{"signature": "def push_tag(tag):", "body": "_call('<STR_LIT>' + str(tag))<EOL>", "docstring": "Pushes a tag into the upstream", "id": "f1:m7"}
{"signature": "def is_committed():", "body": "return '<STR_LIT>' in _call('<STR_LIT>')<EOL>", "docstring": "Returns True if repository is committed, otherwise False", "id": "f1:m3"}
{"signature": "def confirm(tag):", "body": "click.echo()<EOL>if click.confirm('<STR_LIT>'.format(<EOL>tag=click.style(str(tag), fg='<STR_LIT>')),<EOL>default=True, abort=True):<EOL><INDENT>git.create_tag(tag)<EOL><DEDENT>if click.confirm(<EOL>'<STR_LIT>'.format(<EOL>tag=click.style(str(tag), fg='<STR_LIT>')),<EOL>default=True):<EOL><INDENT>git.push_tag(tag)<EOL>click.echo('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>git.delete_tag(tag)<EOL>click.echo('<STR_LIT>')<EOL><DEDENT>", "docstring": "Prompts user before proceeding", "id": "f2:m3"}
{"signature": "def bump(self, target):", "body": "if target == '<STR_LIT>':<EOL><INDENT>return Version(self.major, self.minor, self.patch + <NUM_LIT:1>)<EOL><DEDENT>if target == '<STR_LIT>':<EOL><INDENT>return Version(self.major, self.minor + <NUM_LIT:1>, <NUM_LIT:0>)<EOL><DEDENT>if target == '<STR_LIT>':<EOL><INDENT>return Version(self.major + <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL><DEDENT>return self.clone()<EOL>", "docstring": "Bumps the Version given a target\n\nThe target can be either MAJOR, MINOR or PATCH", "id": "f3:c0:m5"}
{"signature": "def clone(self):", "body": "t = Tag(self.version.major, self.version.minor, self.version.patch)<EOL>if self.revision is not None:<EOL><INDENT>t.revision = self.revision.clone()<EOL><DEDENT>return t<EOL>", "docstring": "Returns a copy of this object", "id": "f3:c2:m4"}
{"signature": "@staticmethod<EOL><INDENT>def parse(s):<DEDENT>", "body": "try:<EOL><INDENT>m = _regex.match(s)<EOL>t = Tag(int(m.group('<STR_LIT>')),<EOL>int(m.group('<STR_LIT>')),<EOL>int(m.group('<STR_LIT>')))<EOL>return tif m.group('<STR_LIT:label>') is Noneelse t.with_revision(m.group('<STR_LIT:label>'), int(m.group('<STR_LIT>')))<EOL><DEDENT>except AttributeError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Parses a string into a Tag", "id": "f3:c2:m8"}
{"signature": "def clone(self):", "body": "return Revision(self.label, self.number)<EOL>", "docstring": "Returns a copy of this object", "id": "f3:c1:m4"}
{"signature": "def bump(self):", "body": "return Revision(self.label, self.number + <NUM_LIT:1>)<EOL>", "docstring": "Bumps the Revision's number", "id": "f3:c1:m5"}
{"signature": "def clone(self):", "body": "return Version(self.major, self.minor, self.patch)<EOL>", "docstring": "Returns a copy of this object", "id": "f3:c0:m4"}
{"signature": "@invoke.task()<EOL>def build(ctx):", "body": "ctx.run(f'<STR_LIT>')<EOL>", "docstring": "Build the package into distributables.\n\n    This will create two distributables: source and wheel.", "id": "f5:m0"}
{"signature": "def lock(self):", "body": "data = self.data<EOL>data['<STR_LIT>']['<STR_LIT>'] = {\"<STR_LIT>\": self.hash}<EOL>data['<STR_LIT>']['<STR_LIT>'] = <NUM_LIT:6><EOL>return json.dumps(data, indent=<NUM_LIT:4>, separators=('<STR_LIT:U+002C>', '<STR_LIT>'))<EOL>", "docstring": "Returns a JSON representation of the Pipfile.", "id": "f10:c1:m5"}
{"signature": "def walk_up(bottom):", "body": "bottom = os.path.realpath(bottom)<EOL>try:<EOL><INDENT>names = os.listdir(bottom)<EOL><DEDENT>except Exception:<EOL><INDENT>return<EOL><DEDENT>dirs, nondirs = [], []<EOL>for name in names:<EOL><INDENT>if os.path.isdir(os.path.join(bottom, name)):<EOL><INDENT>dirs.append(name)<EOL><DEDENT>else:<EOL><INDENT>nondirs.append(name)<EOL><DEDENT><DEDENT>yield bottom, dirs, nondirs<EOL>new_path = os.path.realpath(os.path.join(bottom, '<STR_LIT:..>'))<EOL>if new_path == bottom:<EOL><INDENT>return<EOL><DEDENT>for x in walk_up(new_path):<EOL><INDENT>yield x<EOL><DEDENT>", "docstring": "mimic os.walk, but walk 'up' instead of down the directory tree.\n    From: https://gist.github.com/zdavkeos/1098474", "id": "f10:m1"}
{"signature": "def assert_requirements(self):", "body": "<EOL>if hasattr(sys, '<STR_LIT>'):<EOL><INDENT>implementation_version = format_full_version(sys.implementation.version)<EOL><DEDENT>else:<EOL><INDENT>implementation_version = \"<STR_LIT:0>\"<EOL><DEDENT>if hasattr(sys, '<STR_LIT>'):<EOL><INDENT>implementation_name = sys.implementation.name<EOL><DEDENT>else:<EOL><INDENT>implementation_name = '<STR_LIT>'<EOL><DEDENT>lookup = {<EOL>'<STR_LIT>': os.name,<EOL>'<STR_LIT>': sys.platform,<EOL>'<STR_LIT>': platform.machine(),<EOL>'<STR_LIT>': platform.python_implementation(),<EOL>'<STR_LIT>': platform.release(),<EOL>'<STR_LIT>': platform.system(),<EOL>'<STR_LIT>': platform.version(),<EOL>'<STR_LIT>': platform.python_version()[:<NUM_LIT:3>],<EOL>'<STR_LIT>': platform.python_version(),<EOL>'<STR_LIT>': implementation_name,<EOL>'<STR_LIT>': implementation_version<EOL>}<EOL>for marker, specifier in self.data['<STR_LIT>']['<STR_LIT>'].items():<EOL><INDENT>if marker in lookup:<EOL><INDENT>try:<EOL><INDENT>assert lookup[marker] == specifier<EOL><DEDENT>except AssertionError:<EOL><INDENT>raise AssertionError('<STR_LIT>'.format(marker, specifier))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Asserts PEP 508 specifiers.", "id": "f10:c1:m6"}
{"signature": "@property<EOL><INDENT>def contents(self):<DEDENT>", "body": "with codecs.open(self.filename, '<STR_LIT:r>', '<STR_LIT:utf-8>') as f:<EOL><INDENT>return f.read()<EOL><DEDENT>", "docstring": "Returns the contents of the pipfile.", "id": "f10:c1:m4"}
{"signature": "@classmethod<EOL><INDENT>def load(klass, filename, inject_env=True):<DEDENT>", "body": "p = PipfileParser(filename=filename)<EOL>pipfile = klass(filename=filename)<EOL>pipfile.data = p.parse(inject_env=inject_env)<EOL>return pipfile<EOL>", "docstring": "Load a Pipfile from a given filename.", "id": "f10:c1:m2"}
{"signature": "def add_templates_to_message(self):", "body": "super(TemplatedHTMLEmailMessageViewTestCase, self).add_templates_to_message()<EOL>self.message.html_body_template = self.html_body_template<EOL>", "docstring": "Adds templates to the fixture message, ensuring it can be rendered.", "id": "f20:c2:m1"}
{"signature": "def send(self, extra_context=None, **kwargs):", "body": "message = self.render_to_message(extra_context=extra_context, **kwargs)<EOL>return message.send()<EOL>", "docstring": "Renders and sends an email message.\n\nAll keyword arguments other than ``extra_context`` are passed through\nas keyword arguments when constructing a new :attr:`message_class`\ninstance for this message.\n\nThis method exists primarily for convenience, and the proper\nrendering of your message should not depend on the behavior of this\nmethod. To alter how a message is created, override\n:meth:``render_to_message`` instead, since that should always be\ncalled, even if a message is not sent.\n\n:param extra_context: Any additional context data that will be used\n    when rendering this message.\n:type extra_context: :class:`dict`", "id": "f23:c0:m5"}
{"signature": "@property<EOL><INDENT>def headers(self):<DEDENT>", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._headers = {}<EOL><DEDENT>return self._headers<EOL>", "docstring": "A dictionary containing the headers for this message.", "id": "f23:c0:m0"}
{"signature": "def render_to_message(self, extra_context=None, **kwargs):", "body": "if extra_context is None:<EOL><INDENT>extra_context = {}<EOL><DEDENT>kwargs.setdefault('<STR_LIT>', {}).update(self.headers)<EOL>context = self.get_context_data(**extra_context)<EOL>return self.message_class(<EOL>subject=self.render_subject(context),<EOL>body=self.render_body(context),<EOL>**kwargs)<EOL>", "docstring": "Renders and returns an unsent message with the provided context.\n\nAny extra keyword arguments passed will be passed through as keyword\narguments to the message constructor.\n\n:param extra_context: Any additional context to use when rendering the\n    templated content.\n:type extra_context: :class:`dict`\n:returns: A message instance.\n:rtype: :attr:`.message_class`", "id": "f23:c0:m4"}
{"signature": "def render_subject(self, context):", "body": "rendered = self.subject_template.render(unescape(context))<EOL>return rendered.strip()<EOL>", "docstring": "Renders the message subject for the given context.\n\nThe context data is automatically unescaped to avoid rendering HTML\nentities in ``text/plain`` content.\n\n:param context: The context to use when rendering the subject template.\n:type context: :class:`~django.template.Context`\n:returns: A rendered subject.\n:rtype: :class:`str`", "id": "f23:c1:m5"}
{"signature": "def register(self, cls):", "body": "preview = cls(site=self)<EOL>logger.debug('<STR_LIT>', preview, self)<EOL>index = self.__previews.setdefault(preview.module, {})<EOL>index[cls.__name__] = preview<EOL>", "docstring": "Adds a preview to the index.", "id": "f25:c0:m2"}
{"signature": "@property<EOL><INDENT>def url(self):<DEDENT>", "body": "return reverse('<STR_LIT>' % URL_NAMESPACE, kwargs={<EOL>'<STR_LIT>': self.module,<EOL>'<STR_LIT>': type(self).__name__,<EOL>})<EOL>", "docstring": "The URL to access this preview.", "id": "f25:c1:m4"}
{"signature": "def __iter__(self):", "body": "for module in sorted(self.__previews.keys()):<EOL><INDENT>previews = ModulePreviews(module, sorted(self.__previews[module].values(), key=str))<EOL>yield previews<EOL><DEDENT>", "docstring": "Returns an iterator of :class:`ModulePreviews` tuples, sorted by module nae.", "id": "f25:c0:m1"}
{"signature": "def list_view(self, request):", "body": "return render(request, '<STR_LIT>', {<EOL>'<STR_LIT>': self,<EOL>})<EOL>", "docstring": "Returns a list view response containing all of the registered previews.", "id": "f25:c0:m4"}
{"signature": "def maybe_decode_header(header):", "body": "value, encoding = decode_header(header)[<NUM_LIT:0>]<EOL>if encoding:<EOL><INDENT>return value.decode(encoding)<EOL><DEDENT>else:<EOL><INDENT>return value<EOL><DEDENT>", "docstring": "Decodes an encoded 7-bit ASCII header value into it's actual value.", "id": "f25:m0"}
{"signature": "def logout(self, revoke_oauth=False):", "body": "return self.api.logout(revoke_oauth=revoke_oauth)<EOL>", "docstring": "Log out the gmusicapi Musicmanager instance.\n\n        Parameters:\n                revoke_oauth (bool): If ``True``, oauth credentials will be revoked and the corresponding oauth file will be deleted.\n\n        Returns:\n                ``True`` on success.", "id": "f33:c0:m2"}
{"signature": "def login(self, username=None, password=None, android_id=None):", "body": "cls_name = type(self).__name__<EOL>if username is None:<EOL><INDENT>username = input(\"<STR_LIT>\")<EOL><DEDENT>if password is None:<EOL><INDENT>password = getpass.getpass(\"<STR_LIT>\")<EOL><DEDENT>if android_id is None:<EOL><INDENT>android_id = Mobileclient.FROM_MAC_ADDRESS<EOL><DEDENT>try:<EOL><INDENT>self.api.login(username, password, android_id)<EOL><DEDENT>except OSError:<EOL><INDENT>logger.exception(\"<STR_LIT>\".format(cls_name))<EOL><DEDENT>if not self.is_authenticated:<EOL><INDENT>logger.warning(\"<STR_LIT>\".format(cls_name))<EOL>return False<EOL><DEDENT>logger.info(\"<STR_LIT>\".format(cls_name))<EOL>return True<EOL>", "docstring": "Authenticate the gmusicapi Mobileclient instance.\n\n        Parameters:\n                username (Optional[str]): Your Google Music username. Will be prompted if not given.\n\n                password (Optional[str]): Your Google Music password. Will be prompted if not given.\n\n                android_id (Optional[str]): The 16 hex digits from an Android device ID.\n                        Default: Use gmusicapi.Mobileclient.FROM_MAC_ADDRESS to create ID from computer's MAC address.\n\n        Returns:\n                ``True`` on successful login or ``False`` on unsuccessful login.", "id": "f35:c0:m1"}
{"signature": "def get_google_playlist(self, playlist):", "body": "logger.info(\"<STR_LIT>\".format(playlist))<EOL>for google_playlist in self.api.get_all_user_playlist_contents():<EOL><INDENT>if google_playlist['<STR_LIT:name>'] == playlist or google_playlist['<STR_LIT:id>'] == playlist:<EOL><INDENT>return google_playlist<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.warning(\"<STR_LIT>\".format(playlist))<EOL>return {}<EOL><DEDENT>", "docstring": "Get playlist information of a user-generated Google Music playlist.\n\n        Parameters:\n                playlist (str): Name or ID of Google Music playlist. Names are case-sensitive.\n                        Google allows multiple playlists with the same name.\n                        If multiple playlists have the same name, the first one encountered is used.\n\n        Returns:\n                dict: The playlist dict as returned by Mobileclient.get_all_user_playlist_contents.", "id": "f35:c0:m5"}
{"signature": "@property<EOL><INDENT>def is_authenticated(self):<DEDENT>", "body": "return self.api.is_authenticated()<EOL>", "docstring": "Check the authentication status of the gmusicapi client instance.\n\n        Returns:\n                ``True`` if authenticated, ``False`` if not.", "id": "f36:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>@cast_to_list(<NUM_LIT:0>)<EOL>def get_local_playlists(filepaths, exclude_patterns=None, max_depth=float('<STR_LIT>')):<DEDENT>", "body": "logger.info(\"<STR_LIT>\")<EOL>included_playlists = []<EOL>excluded_playlists = []<EOL>supported_filepaths = get_supported_filepaths(filepaths, SUPPORTED_PLAYLIST_FORMATS, max_depth=max_depth)<EOL>included_playlists, excluded_playlists = exclude_filepaths(supported_filepaths, exclude_patterns=exclude_patterns)<EOL>logger.info(\"<STR_LIT>\".format(len(excluded_playlists)))<EOL>logger.info(\"<STR_LIT>\".format(len(included_playlists)))<EOL>return included_playlists, excluded_playlists<EOL>", "docstring": "Load playlists from local filepaths.\n\n        Parameters:\n                filepaths (list or str): Filepath(s) to search for music files.\n\n                exclude_patterns (list or str): Pattern(s) to exclude.\n                        Patterns are Python regex patterns.\n                        Filepaths are excluded if they match any of the exclude patterns.\n\n                max_depth (int): The depth in the directory tree to walk.\n                        A depth of '0' limits the walk to the top directory.\n                        Default: No limit.\n\n        Returns:\n                A list of local playlist filepaths matching criteria\n                and a list of local playlist filepaths excluded using exclusion criteria.", "id": "f36:c0:m3"}
{"signature": "def _check_field_value(field_value, pattern):", "body": "if isinstance(field_value, list):<EOL><INDENT>return any(re.search(pattern, str(value), re.I) for value in field_value)<EOL><DEDENT>else:<EOL><INDENT>return re.search(pattern, str(field_value), re.I)<EOL><DEDENT>", "docstring": "Check a song metadata field value for a pattern.", "id": "f38:m10"}
{"signature": "def _split_field_to_single_value(field):", "body": "split_field = re.match(r'<STR_LIT>', field)<EOL>return split_field.group(<NUM_LIT:1>) or field<EOL>", "docstring": "Convert number field values split by a '/' to a single number value.", "id": "f38:m3"}
{"signature": "def _normalize_metadata(metadata):", "body": "metadata = str(metadata)<EOL>metadata = metadata.lower()<EOL>metadata = re.sub(r'<STR_LIT>', '<STR_LIT>', metadata)  <EOL>metadata = re.sub(r'<STR_LIT>', r'<STR_LIT>', metadata)  <EOL>metadata = re.sub(r'<STR_LIT>', '<STR_LIT>', metadata)  <EOL>metadata = re.sub(r'<STR_LIT>', '<STR_LIT>', metadata)  <EOL>metadata = re.sub(r'<STR_LIT>', '<STR_LIT:U+0020>', metadata)  <EOL>metadata = re.sub(r'<STR_LIT>', '<STR_LIT>', metadata)  <EOL>metadata = re.sub(r'<STR_LIT>', '<STR_LIT>', metadata)  <EOL>metadata = re.sub(r'<STR_LIT>', '<STR_LIT>', metadata, re.I)  <EOL>return metadata<EOL>", "docstring": "Normalize metadata to improve match accuracy.", "id": "f38:m5"}
{"signature": "def compare_song_collections(src_songs, dst_songs):", "body": "def gather_field_values(song):<EOL><INDENT>return tuple((_normalize_metadata(song[field]) for field in _filter_comparison_fields(song)))<EOL><DEDENT>dst_songs_criteria = {gather_field_values(_normalize_song(dst_song)) for dst_song in dst_songs}<EOL>return [src_song for src_song in src_songs if gather_field_values(_normalize_song(src_song)) not in dst_songs_criteria]<EOL>", "docstring": "Compare two song collections to find missing songs.\n\n    Parameters:\n            src_songs (list): Google Music song dicts or filepaths of local songs.\n\n            dest_songs (list): Google Music song dicts or filepaths of local songs.\n\n    Returns:\n            A list of Google Music song dicts or local song filepaths from source missing in destination.", "id": "f38:m7"}
{"signature": "@cast_to_list(<NUM_LIT:0>)<EOL>def get_supported_filepaths(filepaths, supported_extensions, max_depth=float('<STR_LIT>')):", "body": "supported_filepaths = []<EOL>for path in filepaths:<EOL><INDENT>if os.name == '<STR_LIT>' and CYGPATH_RE.match(path):<EOL><INDENT>path = convert_cygwin_path(path)<EOL><DEDENT>if os.path.isdir(path):<EOL><INDENT>for root, __, files in walk_depth(path, max_depth):<EOL><INDENT>for f in files:<EOL><INDENT>if f.lower().endswith(supported_extensions):<EOL><INDENT>supported_filepaths.append(os.path.join(root, f))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif os.path.isfile(path) and path.lower().endswith(supported_extensions):<EOL><INDENT>supported_filepaths.append(path)<EOL><DEDENT><DEDENT>return supported_filepaths<EOL>", "docstring": "Get filepaths with supported extensions from given filepaths.\n\n    Parameters:\n            filepaths (list or str): Filepath(s) to check.\n\n            supported_extensions (tuple or str): Supported file extensions or a single file extension.\n\n            max_depth (int): The depth in the directory tree to walk.\n                    A depth of '0' limits the walk to the top directory.\n                    Default: No limit.\n\n    Returns:\n            A list of supported filepaths.", "id": "f38:m8"}
{"signature": "def convert_cygwin_path(path):", "body": "try:<EOL><INDENT>win_path = subprocess.check_output([\"<STR_LIT>\", \"<STR_LIT>\", path], universal_newlines=True).strip()<EOL><DEDENT>except (FileNotFoundError, subprocess.CalledProcessError):<EOL><INDENT>logger.exception(\"<STR_LIT>\")<EOL>raise<EOL><DEDENT>return win_path<EOL>", "docstring": "Convert Unix path from Cygwin to Windows path.", "id": "f38:m0"}
{"signature": "def _mutagen_fields_to_single_value(metadata):", "body": "return dict((k, v[<NUM_LIT:0>]) for k, v in metadata.items() if v)<EOL>", "docstring": "Replace mutagen metadata field list values in mutagen tags with the first list value.", "id": "f38:m2"}
{"signature": "def cast_to_list(position):", "body": "@wrapt.decorator<EOL>def wrapper(function, instance, args, kwargs):<EOL><INDENT>if not isinstance(args[position], list):<EOL><INDENT>args = list(args)<EOL>args[position] = [args[position]]<EOL>args = tuple(args)<EOL><DEDENT>return function(*args, **kwargs)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Cast the positional argument at given position into a list if not already a list.", "id": "f39:m0"}
{"signature": "@classmethod<EOL><INDENT>@coroutine<EOL>@nt_cursor<EOL>def insert(cls, cur, table: str, values: dict):<DEDENT>", "body": "keys = cls._COMMA.join(values.keys())<EOL>value_place_holder = cls._PLACEHOLDER * len(values)<EOL>query = cls._insert_string.format(table, keys, value_place_holder[:-<NUM_LIT:1>])<EOL>yield from cur.execute(query, tuple(values.values()))<EOL>return (yield from cur.fetchone())<EOL>", "docstring": "Creates an insert statement with only chosen fields\n\nArgs:\n    table: a string indicating the name of the table\n    values: a dict of fields and values to be inserted\n\nReturns:\n    A 'Record' object with table columns as properties", "id": "f41:c0:m5"}
{"signature": "@classmethod<EOL><INDENT>@coroutine<EOL>def get_cursor(cls, cursor_type=_CursorType.PLAIN) -> Cursor:<DEDENT>", "body": "_cur = None<EOL>if cls._use_pool:<EOL><INDENT>_connection_source = yield from cls.get_pool()<EOL><DEDENT>else:<EOL><INDENT>_connection_source = yield from aiopg.connect(echo=False, **cls._connection_params)<EOL><DEDENT>if cursor_type == _CursorType.PLAIN:<EOL><INDENT>_cur = yield from _connection_source.cursor()<EOL><DEDENT>if cursor_type == _CursorType.NAMEDTUPLE:<EOL><INDENT>_cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)<EOL><DEDENT>if cursor_type == _CursorType.DICT:<EOL><INDENT>_cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.DictCursor)<EOL><DEDENT>if not cls._use_pool:<EOL><INDENT>_cur = cursor_context_manager(_connection_source, _cur)<EOL><DEDENT>return _cur<EOL>", "docstring": "Yields:\n    new client-side cursor from existing db connection pool", "id": "f41:c0:m3"}
{"signature": "def nt_cursor(func):", "body": "@wraps(func)<EOL>def wrapper(cls, *args, **kwargs):<EOL><INDENT>with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c:<EOL><INDENT>return (yield from func(cls, c, *args, **kwargs))<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Decorator that provides a namedtuple cursor to the calling function\n\nAdds the cursor as the second argument to the calling functions\n\nRequires that the function being decorated is an instance of a class or object\nthat yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an object\nas the first argument in its signature\n\nYields:\n    A client-side namedtuple cursor", "id": "f41:m2"}
{"signature": "@classmethod<EOL><INDENT>@coroutine<EOL>@cursor<EOL>def delete(cls, cur, table: str, where_keys: list):<DEDENT>", "body": "where_clause, values = cls._get_where_clause_with_values(where_keys)<EOL>query = cls._delete_query.format(table, where_clause)<EOL>yield from cur.execute(query, values)<EOL>return cur.rowcount<EOL>", "docstring": "Creates a delete query with where keys\nSupports multiple where clause with and or or both\n\nArgs:\n    table: a string indicating the name of the table\n    where_keys: list of dictionary\n    example of where keys: [{'name':('>', 'cip'),'url':('=', 'cip.com'},{'type':{'<=', 'manufacturer'}}]\n    where_clause will look like ((name>%s and url=%s) or (type <= %s))\n    items within each dictionary get 'AND'-ed and dictionaries themselves get 'OR'-ed\n\nReturns:\n    an integer indicating count of rows deleted", "id": "f41:c0:m8"}
{"signature": "@classmethod<EOL><INDENT>@coroutine<EOL>@nt_cursor<EOL>def raw_sql(cls, cur, query: str, values: tuple):<DEDENT>", "body": "yield from cur.execute(query, values)<EOL>return (yield from cur.fetchall())<EOL>", "docstring": "Run a raw sql query\n\nArgs:\n    query : query string to execute\n    values : tuple of values to be used with the query\n\nReturns:\n    result of query as list of named tuple", "id": "f41:c0:m10"}
{"signature": "def cursor(func):", "body": "@wraps(func)<EOL>def wrapper(cls, *args, **kwargs):<EOL><INDENT>with (yield from cls.get_cursor()) as c:<EOL><INDENT>return (yield from func(cls, c, *args, **kwargs))<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Decorator that provides a cursor to the calling function\n\nAdds the cursor as the second argument to the calling functions\n\nRequires that the function being decorated is an instance of a class or object\nthat yields a cursor from a get_cursor() coroutine or provides such an object\nas the first argument in its signature\n\nYields:\n    A client-side cursor", "id": "f41:m1"}
{"signature": "def transaction(func):", "body": "@wraps(func)<EOL>def wrapper(cls, *args, **kwargs):<EOL><INDENT>with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c:<EOL><INDENT>try:<EOL><INDENT>yield from c.execute('<STR_LIT>')<EOL>result = (yield from func(cls, c, *args, **kwargs))<EOL><DEDENT>except Exception:<EOL><INDENT>yield from c.execute('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>yield from c.execute('<STR_LIT>')<EOL>return result<EOL><DEDENT><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Provides a transacted cursor which will run in autocommit=false mode\n\nFor any exception the transaction will be rolled back.\nRequires that the function being decorated is an instance of a class or object\nthat yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an object\nas the first argument in its signature\n\nYields:\n    A client-side transacted named cursor", "id": "f41:m3"}
{"signature": "@classmethod<EOL><INDENT>@coroutine<EOL>@cursor<EOL>def count(cls, cur, table:str, where_keys: list=None):<DEDENT>", "body": "if where_keys:<EOL><INDENT>where_clause, values = cls._get_where_clause_with_values(where_keys)<EOL>query = cls._count_query_where.format(table, where_clause)<EOL>q, t = query, values<EOL><DEDENT>else:<EOL><INDENT>query = cls._count_query.format(table)<EOL>q, t = query, ()<EOL><DEDENT>yield from cur.execute(q, t)<EOL>result = yield from cur.fetchone()<EOL>return int(result[<NUM_LIT:0>])<EOL>", "docstring": "gives the number of records in the table\n\nArgs:\n    table: a string indicating the name of the table\n\nReturns:\n    an integer indicating the number of records in the table", "id": "f41:c0:m4"}
{"signature": "def set_key(self, key, value, namespace=None, expire=<NUM_LIT:0>):", "body": "with (yield from self._pool) as redis:<EOL><INDENT>if namespace is not None:<EOL><INDENT>key = self._get_key(namespace, key)<EOL><DEDENT>yield from redis.set(key, value, expire=expire)<EOL><DEDENT>", "docstring": "Set a key in a cache.\n:param key: Key name\n:param value: Value\n:param namespace : Namespace to associate the key with\n:param expire: expiration\n:return:", "id": "f42:c0:m2"}
{"signature": "def indexesOptional(f):", "body": "stack = inspect.stack()<EOL>_NO_INDEX_CHECK_NEEDED.add('<STR_LIT>' % (f.__module__, stack[<NUM_LIT:1>][<NUM_LIT:3>], f.__name__))<EOL>del stack<EOL>return f<EOL>", "docstring": "Decorate test methods with this if you don't require strict index checking", "id": "f44:m1"}
{"signature": "def on_map_long_clicked(self, pos):", "body": "d = self.declaration<EOL>d.clicked({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': tuple(pos)<EOL>})<EOL>", "docstring": "Called when the map is clicked", "id": "f70:c24:m9"}
{"signature": "def on_marker(self, marker):", "body": "mid, pos = marker<EOL>self.marker = Marker(__id__=mid)<EOL>mapview = self.parent()<EOL>mapview.markers[mid] = self<EOL>self.marker.setTag(mid)<EOL>for w in self.child_widgets():<EOL><INDENT>mapview.init_info_window_adapter()<EOL>break<EOL><DEDENT>d = self.declaration<EOL>if d.show_info:<EOL><INDENT>self.set_show_info(d.show_info)<EOL><DEDENT>del self.options<EOL>", "docstring": "Convert our options into the actual marker object", "id": "f70:c26:m4"}
{"signature": "def handle_change(self, change):", "body": "op = change['<STR_LIT>']<EOL>if op in '<STR_LIT>':<EOL><INDENT>self.add(len(change['<STR_LIT:value>']), LatLng(*change['<STR_LIT>']))<EOL><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>self.add(change['<STR_LIT:index>'], LatLng(*change['<STR_LIT>']))<EOL><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>points = [LatLng(*p) for p in change['<STR_LIT>']]<EOL>self.addAll([bridge.encode(c) for c in points])<EOL><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>self.set(change['<STR_LIT:index>'], LatLng(*change['<STR_LIT>']))<EOL><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>self.remove(change['<STR_LIT:index>'])<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError(<EOL>\"<STR_LIT>\".format(op))<EOL><DEDENT>", "docstring": "Handle changes from atom ContainerLists", "id": "f70:c1:m1"}
{"signature": "def add_to_map(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Add this item to the map", "id": "f70:c25:m1"}
{"signature": "def create_widget(self):", "body": "self.options = PolygonOptions()<EOL>self.points = LatLngList()<EOL>", "docstring": "Create the MarkerOptions for this map marker\n        this later gets converted into a \"Marker\" instance when addMarker \n        is called", "id": "f70:c29:m0"}
{"signature": "def init_info_window_adapter(self):", "body": "adapter = self.adapter<EOL>if adapter:<EOL><INDENT>return  <EOL><DEDENT>adapter = GoogleMap.InfoWindowAdapter()<EOL>adapter.getInfoContents.connect(self.on_info_window_contents_requested)<EOL>adapter.getInfoWindow.connect(self.on_info_window_requested)<EOL>self.map.setInfoWindowAdapter(adapter)<EOL>", "docstring": "Initialize the info window adapter. Should only be done if one of \n        the markers defines a custom view.", "id": "f70:c24:m3"}
{"signature": "def on_marker(self, mid):", "body": "self.marker = Circle(__id__=mid)<EOL>self.parent().markers[mid] = self<EOL>self.marker.setTag(mid)<EOL>d = self.declaration<EOL>if d.clickable:<EOL><INDENT>self.set_clickable(d.clickable)<EOL><DEDENT>del self.options<EOL>", "docstring": "Convert our options into the actual circle object", "id": "f70:c27:m3"}
{"signature": "@observe('<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>','<STR_LIT>', '<STR_LIT>')<EOL><INDENT>def _update_proxy(self, change):<DEDENT>", "body": "<EOL>super(MapView, self)._update_proxy(change)<EOL>", "docstring": "An observer which sends the state change to the proxy.", "id": "f72:c7:m0"}
{"signature": "def decode_html_entities(html):", "body": "if not html:<EOL><INDENT>return html<EOL><DEDENT>for entity, char in six.iteritems(html_entity_map):<EOL><INDENT>html = html.replace(entity, char)<EOL><DEDENT>return html<EOL>", "docstring": "Decodes a limited set of HTML entities.", "id": "f94:m9"}
{"signature": "def x10_command(self, house_code, unit_number, state):", "body": "house_code = normalize_housecode(house_code)<EOL>if unit_number is not None:<EOL><INDENT>unit_number = normalize_unitnumber(unit_number)<EOL><DEDENT>return self._x10_command(house_code, unit_number, state)<EOL>", "docstring": "Send X10 command to ??? unit.\n\n        @param house_code (A-P) - example='A'\n        @param unit_number (1-16)- example=1 (or None to impact entire house code)\n        @param state - Mochad command/state, See\n                https://sourceforge.net/p/mochad/code/ci/master/tree/README\n                examples=OFF, 'OFF', 'ON', ALL_OFF, 'all_units_off', 'xdim 128', etc.\n\n        Examples:\n            x10_command('A', '1', ON)\n            x10_command('A', '1', OFF)\n            x10_command('A', '1', 'ON')\n            x10_command('A', '1', 'OFF')\n            x10_command('A', None, ON)\n            x10_command('A', None, OFF)\n            x10_command('A', None, 'all_lights_off')\n            x10_command('A', None, 'all_units_off')\n            x10_command('A', None, ALL_OFF)\n            x10_command('A', None, 'all_lights_on')\n            x10_command('A', 1, 'xdim 128')", "id": "f101:c3:m3"}
{"signature": "def normalize_housecode(house_code):", "body": "if house_code is None:<EOL><INDENT>raise X10InvalidHouseCode('<STR_LIT>' % house_code)<EOL><DEDENT>if not isinstance(house_code, basestring):<EOL><INDENT>raise X10InvalidHouseCode('<STR_LIT>' % house_code)<EOL><DEDENT>if len(house_code) != <NUM_LIT:1>:<EOL><INDENT>raise X10InvalidHouseCode('<STR_LIT>' % house_code)<EOL><DEDENT>house_code = house_code.upper()<EOL>if not ('<STR_LIT:A>' <= house_code <= '<STR_LIT:P>'):<EOL><INDENT>raise X10InvalidHouseCode('<STR_LIT>' % house_code)<EOL><DEDENT>return house_code<EOL>", "docstring": "Returns a normalized house code, i.e. upper case.\n    Raises exception X10InvalidHouseCode if house code appears to be invalid", "id": "f101:m0"}
{"signature": "def _x10_command(self, house_code, unit_number, state):", "body": "print('<STR_LIT>' % ((house_code, unit_number, state), ))<EOL>raise NotImplementedError()<EOL>", "docstring": "Real implementation", "id": "f101:c3:m4"}
{"signature": "def main(argv=None):", "body": "if len(argv):<EOL><INDENT>commands = '<STR_LIT:U+0020>'.join(argv)<EOL>comPort, commands = commands.split(None, <NUM_LIT:1>)<EOL>sendCommands(comPort, commands)<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Send X10 commands when module is used from the command line.\n\n    This uses syntax similar to sendCommands, for example:\n\n    x10.py com2 A1 On, A2 Off, B All Off", "id": "f102:m7"}
{"signature": "def _sendBinaryData(port, data):", "body": "_reset(port)<EOL>time.sleep(leadInOutDelay)<EOL>for digit in data:<EOL><INDENT>_sendBit(port, digit)<EOL><DEDENT>time.sleep(leadInOutDelay)<EOL>", "docstring": "Send a string of binary data to the FireCracker with proper timing.\n\n    See the diagram in the spec referenced above for timing information.\n    The module level variables leadInOutDelay and bitDelay represent how\n    long each type of delay should be in seconds. They may require tweaking\n    on some setups.", "id": "f102:m2"}
{"signature": "def sendCommands(comPort, commands):", "body": "mutex.acquire()<EOL>try:<EOL><INDENT>try:<EOL><INDENT>port = serial.Serial(port=comPort)<EOL>header = '<STR_LIT>'<EOL>footer = '<STR_LIT>'<EOL>for command in _translateCommands(commands):<EOL><INDENT>_sendBinaryData(port, header + command + footer)<EOL><DEDENT><DEDENT>except serial.SerialException:<EOL><INDENT>print('<STR_LIT>' % comPort)<EOL>print('<STR_LIT>')<EOL>raise<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>mutex.release()<EOL><DEDENT>", "docstring": "Send X10 commands using the FireCracker on comPort\n\n    comPort should be the name of a serial port on the host platform. On\n    Windows, for example, 'com1'.\n\n    commands should be a string consisting of X10 commands separated by\n    commas. For example. 'A1 On, A Dim, A Dim, A Dim, A Lamps Off'. The\n    letter is a house code (A-P) and the number is the device number (1-16).\n    Possible commands for a house code / device number combination are\n    'On' and 'Off'. The commands 'Bright' and 'Dim' should be used with a\n    house code alone after sending an On command to a specific device. The\n    'All On', 'All Off', 'Lamps On', and 'Lamps Off' commands should also\n    be used with a house code alone.\n\n    # Turn on module A1\n    >>> sendCommands('com1', 'A1 On')\n\n    # Turn all modules with house code A off\n    >>> sendCommands('com1', 'A All Off')\n\n    # Turn all lamp modules with house code B on\n    >>> sendCommands('com1', 'B Lamps On')\n\n    # Turn on module A1 and dim it 3 steps, then brighten it 1 step\n    >>> sendCommands('com1', 'A1 On, A Dim, A Dim, A Dim, A Bright')", "id": "f102:m6"}
{"signature": "def __init__(self, visible=False, timeout=<NUM_LIT:30>, app=None, args=None):", "body": "self.app = app or self.create_app(visible, args)<EOL>self.is_terminated = False<EOL>self.status = Status(None)<EOL>self.timeout = timeout<EOL>self.last_host = None<EOL>", "docstring": "Create an emulator instance\n\n`visible` controls which executable will be used.\n`timeout` controls the timeout paramater to any Wait() command sent\n    to x3270.\n`args` allows sending parameters to the emulator executable", "id": "f108:c14:m0"}
{"signature": "def terminate(self):", "body": "if not self.is_terminated:<EOL><INDENT>log.debug(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>self.exec_command(b\"<STR_LIT>\")<EOL><DEDENT>except BrokenPipeError:  <EOL><INDENT>pass<EOL><DEDENT>except socket.error as e:<EOL><INDENT>if e.errno != errno.ECONNRESET:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>self.app.close()<EOL>self.is_terminated = True<EOL><DEDENT>", "docstring": "terminates the underlying x3270 subprocess. Once called, this\nEmulator instance must no longer be used.", "id": "f108:c14:m4"}
{"signature": "def is_connected(self):", "body": "<EOL>try:<EOL><INDENT>self.exec_command(b\"<STR_LIT>\")<EOL>return self.status.connection_state.startswith(b\"<STR_LIT>\")<EOL><DEDENT>except NotConnectedException:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Return bool indicating connection state", "id": "f108:c14:m5"}
{"signature": "def connect(self, host):", "body": "return False<EOL>", "docstring": "this is a no-op for all but wc3270", "id": "f108:c7:m2"}
{"signature": "def __del__(self):", "body": "self.terminate()<EOL>", "docstring": "Since an emulator creates a process (and sometimes a socket handle), it is good practice\nto clean these up when done. Note, not terminating at this point will usually have no\nill effect - only Python 3+ on Windows had problems in this regard.", "id": "f108:c14:m1"}
{"signature": "def string_get(self, ypos, xpos, length):", "body": "<EOL>xpos -= <NUM_LIT:1><EOL>ypos -= <NUM_LIT:1><EOL>cmd = self.exec_command(<EOL>\"<STR_LIT>\".format(ypos, xpos, length).encode(\"<STR_LIT:ascii>\")<EOL>)<EOL>assert len(cmd.data) == <NUM_LIT:1>, cmd.data<EOL>return cmd.data[<NUM_LIT:0>].decode(\"<STR_LIT:ascii>\")<EOL>", "docstring": "Get a string of `length` at screen co-ordinates `ypos`/`xpos`\n\nCo-ordinates are 1 based, as listed in the status area of the\nterminal.", "id": "f108:c14:m19"}
{"signature": "def string_found(self, ypos, xpos, string):", "body": "found = self.string_get(ypos, xpos, len(string))<EOL>log.debug('<STR_LIT>'.format(found))<EOL>return found == string<EOL>", "docstring": "Return True if `string` is found at screen co-ordinates\n`ypos`/`xpos`, False otherwise.\n\nCo-ordinates are 1 based, as listed in the status area of the\nterminal.", "id": "f108:c14:m20"}
{"signature": "def move_to(self, ypos, xpos):", "body": "<EOL>xpos -= <NUM_LIT:1><EOL>ypos -= <NUM_LIT:1><EOL>self.exec_command(\"<STR_LIT>\".format(ypos, xpos).encode(\"<STR_LIT:ascii>\"))<EOL>", "docstring": "move the cursor to the given co-ordinates.  Co-ordinates are 1\nbased, as listed in the status area of the terminal.", "id": "f108:c14:m9"}
{"signature": "def reconnect(self):", "body": "self.exec_command(b\"<STR_LIT>\")<EOL>self.connect(self.last_host)<EOL>", "docstring": "Disconnect from the host and re-connect to the same host", "id": "f108:c14:m7"}
{"signature": "def get_object(self):", "body": "dataframe = self.filter_dataframe(self.get_dataframe())<EOL>assert self.lookup_url_kwarg in self.kwargs, (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(self.__class__.__name__, self.lookup_url_kwarg)<EOL>)<EOL>try:<EOL><INDENT>obj = self.index_row(dataframe)<EOL><DEDENT>except (IndexError, KeyError, ValueError):<EOL><INDENT>raise Http404<EOL><DEDENT>self.check_object_permissions(self.request, obj)<EOL>return obj<EOL>", "docstring": "Returns the row the view is displaying.\n\nYou may want to override this if you need to provide non-standard\nqueryset lookups.  Eg if objects are referenced using multiple\nkeyword arguments in the url conf.", "id": "f112:c0:m3"}
{"signature": "def paginate_dataframe(self, dataframe):", "body": "if self.paginator is None:<EOL><INDENT>return None<EOL><DEDENT>return self.paginator.paginate_dataframe(dataframe, self.request, view=self)<EOL>", "docstring": "Return a single page of results, or `None` if pagination is disabled.", "id": "f112:c0:m9"}
{"signature": "def get_serializer(self, *args, **kwargs):", "body": "serializer_class = self.get_serializer_class()<EOL>kwargs['<STR_LIT>'] = self.get_serializer_context()<EOL>return serializer_class(*args, **kwargs)<EOL>", "docstring": "Return the serializer instance that should be used for validating and\ndeserializing input, and for serializing output.", "id": "f112:c0:m4"}
{"signature": "def update_dataframe(self, dataframe):", "body": "return dataframe<EOL>", "docstring": "Indicates that the dataframe needs to be updated. The default implementation\njust returns the argument. This method has to be ovewritten to make changing\noperations stick.", "id": "f112:c0:m1"}
{"signature": "def _wrap_color(self, code, text, format=None, style=None):", "body": "color = None<EOL>if code[:<NUM_LIT:3>] == self.bg.PREFIX:<EOL><INDENT>color = self.bg.COLORS.get(code, None)<EOL><DEDENT>if not color:<EOL><INDENT>color = self.fg.COLORS.get(code, None)<EOL><DEDENT>if not color:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if format and format not in self.formats:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>fmt = \"<STR_LIT>\"<EOL>if format == '<STR_LIT>':<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL><DEDENT>elif format == '<STR_LIT>':<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL><DEDENT>parts = color.split('<STR_LIT:[>')<EOL>color = '<STR_LIT>'.format(parts[<NUM_LIT:0>], fmt, parts[<NUM_LIT:1>])<EOL>if self.has_colors and self.colors_enabled:<EOL><INDENT>st = '<STR_LIT>'<EOL>if style:<EOL><INDENT>st = self.st.COLORS.get(style, '<STR_LIT>')<EOL><DEDENT>return \"<STR_LIT>\".format(st, color, text, self.st.COLORS['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>return text<EOL><DEDENT>", "docstring": "Colors text with code and given format", "id": "f119:c4:m4"}
{"signature": "def __init__(self):", "body": "try:<EOL><INDENT>p = subprocess.Popen(['<STR_LIT>', '<STR_LIT>'],<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>num_colors = int(p.stdout.read())<EOL><DEDENT>except (OSError, ValueError):<EOL><INDENT>num_colors = <NUM_LIT:1><EOL><DEDENT>self.has_colors = False<EOL>if num_colors > <NUM_LIT:1>:<EOL><INDENT>self.has_colors = True<EOL><DEDENT>self.enable_colors()<EOL>self.COLORS = self.enumerate_colors()<EOL>", "docstring": "Checks if the shell supports colors", "id": "f119:c4:m0"}
{"signature": "def enable_colors(self):", "body": "self.colors_enabled = True<EOL>", "docstring": "Method to enable colors", "id": "f119:c4:m2"}
{"signature": "def get(<EOL>self,<EOL>key: str,<EOL>default: typing.Any = UNSET,<EOL>type_: typing.Type[typing.Any] = str,<EOL>subtype: typing.Type[typing.Any] = str,<EOL>mapper: typing.Optional[typing.Callable[[object], object]] = None,<EOL>) -> typing.Any:", "body": "value = self.environ.get(key, UNSET)<EOL>if value is UNSET and default is UNSET:<EOL><INDENT>raise ConfigError(\"<STR_LIT>\".format(key))<EOL><DEDENT>if value is UNSET:<EOL><INDENT>value = default<EOL><DEDENT>else:<EOL><INDENT>value = self.parse(typing.cast(str, value), type_, subtype)<EOL><DEDENT>if mapper:<EOL><INDENT>value = mapper(value)<EOL><DEDENT>return value<EOL>", "docstring": "Parse a value from an environment variable.\n\n.. code-block:: python\n\n   >>> os.environ['FOO']\n   <<< '12345'\n   >>>\n   >>> os.environ['BAR']\n   <<< '1,2,3,4'\n   >>>\n   >>> 'BAZ' in os.environ\n   <<< False\n   >>>\n   >>> parser = Config()\n   >>> parser.get('FOO', type_=int)\n   <<< 12345\n   >>>\n   >>> parser.get('BAR', type_=list, subtype=int)\n   <<< [1, 2, 3, 4]\n   >>>\n   >>> parser.get('BAZ', default='abc123')\n   <<< 'abc123'\n   >>>\n   >>> parser.get('FOO', type_=int, mapper=lambda x: x*10)\n   <<< 123450\n\n:param key: the key to look up the value under\n:param default: default value to return when when no value is present\n:param type\\\\_: the type to return\n:param subtype: subtype for iterator types\n:param mapper: a function to post-process the value with\n:return: the parsed config value", "id": "f127:c1:m3"}
{"signature": "def unsign(self, signed_value, ttl=None):", "body": "h_size, d_size = struct.calcsize('<STR_LIT>'), self.digest.digest_size<EOL>fmt = '<STR_LIT>' % (len(signed_value) - h_size - d_size, d_size)<EOL>try:<EOL><INDENT>version, timestamp, value, sig = struct.unpack(fmt, signed_value)<EOL><DEDENT>except struct.error:<EOL><INDENT>raise BadSignature('<STR_LIT>')<EOL><DEDENT>if version != self.version:<EOL><INDENT>raise BadSignature('<STR_LIT>')<EOL><DEDENT>if ttl is not None:<EOL><INDENT>if isinstance(ttl, datetime.timedelta):<EOL><INDENT>ttl = ttl.total_seconds()<EOL><DEDENT>age = abs(time.time() - timestamp)<EOL>if age > ttl + _MAX_CLOCK_SKEW:<EOL><INDENT>raise SignatureExpired('<STR_LIT>' % (age,<EOL>ttl))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>self.signature(signed_value[:-d_size]).verify(sig)<EOL><DEDENT>except InvalidSignature:<EOL><INDENT>raise BadSignature(<EOL>'<STR_LIT>' % binascii.b2a_base64(sig))<EOL><DEDENT>return value<EOL>", "docstring": "Retrieve original value and check it wasn't signed more\nthan max_age seconds ago.\n\n:type signed_value: bytes\n:type ttl: int | datetime.timedelta", "id": "f144:c3:m3"}
{"signature": "def signature(self, value):", "body": "h = HMAC(self.key, self.digest, backend=settings.CRYPTOGRAPHY_BACKEND)<EOL>h.update(force_bytes(value))<EOL>return h<EOL>", "docstring": ":type value: any\n:rtype: HMAC", "id": "f144:c3:m1"}
{"signature": "def get_main_version(version=None):", "body": "version = get_complete_version(version)<EOL>parts = <NUM_LIT:2> if version[<NUM_LIT:2>] == <NUM_LIT:0> else <NUM_LIT:3><EOL>return '<STR_LIT:.>'.join(str(x) for x in version[:parts])<EOL>", "docstring": "Returns main version (X.Y[.Z]) from VERSION.", "id": "f145:m1"}
{"signature": "def get_git_changeset():", "body": "repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))<EOL>git_log = subprocess.Popen(<EOL>'<STR_LIT>',<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE,<EOL>shell=True,<EOL>cwd=repo_dir,<EOL>universal_newlines=True)<EOL>timestamp = git_log.communicate()[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))<EOL><DEDENT>except ValueError:<EOL><INDENT>return None<EOL><DEDENT>return timestamp.strftime('<STR_LIT>')<EOL>", "docstring": "Returns a numeric identifier of the latest git changeset.\n\nThe result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\nThis value isn't guaranteed to be unique, but collisions are very unlikely,\nso it's sufficient for generating the development version numbers.", "id": "f145:m4"}
{"signature": "def decrypt(self, data, ttl=None):", "body": "data = self._signer.unsign(data, ttl)<EOL>iv = data[:<NUM_LIT:16>]<EOL>ciphertext = data[<NUM_LIT:16>:]<EOL>decryptor = Cipher(<EOL>algorithms.AES(self._encryption_key), modes.CBC(iv),<EOL>self._backend).decryptor()<EOL>plaintext_padded = decryptor.update(ciphertext)<EOL>try:<EOL><INDENT>plaintext_padded += decryptor.finalize()<EOL><DEDENT>except ValueError:<EOL><INDENT>raise InvalidToken<EOL><DEDENT>unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()<EOL>unpadded = unpadder.update(plaintext_padded)<EOL>try:<EOL><INDENT>unpadded += unpadder.finalize()<EOL><DEDENT>except ValueError:<EOL><INDENT>raise InvalidToken<EOL><DEDENT>return unpadded<EOL>", "docstring": ":type data: bytes\n:type ttl: int\n:rtype: bytes", "id": "f146:c1:m3"}
{"signature": "def salted_hmac(key_salt, value, secret=None):", "body": "if secret is None:<EOL><INDENT>secret = settings.SECRET_KEY<EOL><DEDENT>key_salt = force_bytes(key_salt)<EOL>secret = force_bytes(secret)<EOL>digest = hashes.Hash(<EOL>settings.CRYPTOGRAPHY_DIGEST, backend=settings.CRYPTOGRAPHY_BACKEND)<EOL>digest.update(key_salt + secret)<EOL>key = digest.finalize()<EOL>h = HMAC(<EOL>key,<EOL>settings.CRYPTOGRAPHY_DIGEST,<EOL>backend=settings.CRYPTOGRAPHY_BACKEND)<EOL>h.update(force_bytes(value))<EOL>return h<EOL>", "docstring": "Returns the HMAC-HASH of 'value', using a key generated from key_salt and a\nsecret (which defaults to settings.SECRET_KEY).\n\nA different key_salt should be passed in for every application of HMAC.\n\n:type key_salt: any\n:type value: any\n:type secret: any\n:rtype: HMAC", "id": "f146:m0"}
{"signature": "def constant_time_compare(val1, val2):", "body": "return constant_time.bytes_eq(force_bytes(val1), force_bytes(val2))<EOL>", "docstring": ":type val1: any\n:type val2: any\n:rtype: bool", "id": "f146:m1"}
{"signature": "def pbkdf2(password, salt, iterations, dklen=<NUM_LIT:0>, digest=None):", "body": "if digest is None:<EOL><INDENT>digest = settings.CRYPTOGRAPHY_DIGEST<EOL><DEDENT>if not dklen:<EOL><INDENT>dklen = digest.digest_size<EOL><DEDENT>password = force_bytes(password)<EOL>salt = force_bytes(salt)<EOL>kdf = PBKDF2HMAC(<EOL>algorithm=digest,<EOL>length=dklen,<EOL>salt=salt,<EOL>iterations=iterations,<EOL>backend=settings.CRYPTOGRAPHY_BACKEND)<EOL>return kdf.derive(password)<EOL>", "docstring": "Implements PBKDF2 with the same API as Django's existing\nimplementation, using cryptography.\n\n:type password: any\n:type salt: any\n:type iterations: int\n:type dklen: int\n:type digest: cryptography.hazmat.primitives.hashes.HashAlgorithm", "id": "f146:m2"}
{"signature": "def encrypt(self, data):", "body": "data = force_bytes(data)<EOL>iv = os.urandom(<NUM_LIT:16>)<EOL>return self._encrypt_from_parts(data, iv)<EOL>", "docstring": ":type data: any\n:rtype: any", "id": "f146:c1:m1"}
{"signature": "def log_file(self, url=None):", "body": "if url is None:<EOL><INDENT>url = self.url<EOL><DEDENT>f = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", url)<EOL>try:<EOL><INDENT>with open(f, \"<STR_LIT:a>\") as of:<EOL><INDENT>of.write(str(self.store.get_json_tuples(True)))<EOL><DEDENT><DEDENT>except IOError as e:<EOL><INDENT>print(e)<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Write to a local log file", "id": "f155:c0:m3"}
{"signature": "def register_credentials(self, credentials=None, user=None, user_file=None, password=None, password_file=None):", "body": "<EOL>if credentials is not None:<EOL><INDENT>self.credentials = credentials<EOL><DEDENT>else:<EOL><INDENT>self.credentials = {}<EOL>if user:<EOL><INDENT>self.credentials[\"<STR_LIT:user>\"] = user<EOL><DEDENT>elif user_file:<EOL><INDENT>with open(user_file, \"<STR_LIT:r>\") as of:<EOL><INDENT>pattern = re.compile(\"<STR_LIT>\")<EOL>for l in of:<EOL><INDENT>if re.match(pattern, l):<EOL><INDENT>l = l[<NUM_LIT:0>:-<NUM_LIT:1>]<EOL>self.credentials[\"<STR_LIT:user>\"] = re.sub(pattern, \"<STR_LIT>\", l)<EOL><DEDENT><DEDENT><DEDENT>if self.credentials[\"<STR_LIT:user>\"][<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT:\">' andself.credentials[\"<STR_LIT:user>\"][-<NUM_LIT:1>:] == '<STR_LIT:\">':<EOL><INDENT>self.credentials[\"<STR_LIT:user>\"] = self.credentials[\"<STR_LIT:user>\"][<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>if password:<EOL><INDENT>self.credentials[\"<STR_LIT:password>\"] = password<EOL><DEDENT>elif password_file:<EOL><INDENT>with open(password_file, \"<STR_LIT:r>\") as of:<EOL><INDENT>pattern = re.compile(\"<STR_LIT>\")<EOL>for l in of:<EOL><INDENT>if re.match(pattern, l):<EOL><INDENT>l = l[<NUM_LIT:0>:-<NUM_LIT:1>]<EOL>self.credentials[\"<STR_LIT:password>\"] =re.sub(pattern, \"<STR_LIT>\", l)<EOL><DEDENT><DEDENT><DEDENT>if self.credentials[\"<STR_LIT:password>\"][<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT:\">' andself.credentials[\"<STR_LIT:password>\"][-<NUM_LIT:1>:] == '<STR_LIT:\">':<EOL><INDENT>self.credentials[\"<STR_LIT:password>\"] =self.credentials[\"<STR_LIT:password>\"][<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>if \"<STR_LIT:user>\" in self.credentials and \"<STR_LIT:password>\" in self.credentials:<EOL><INDENT>c = self.credentials[\"<STR_LIT:user>\"] + \"<STR_LIT::>\" + self.credentials[\"<STR_LIT:password>\"]<EOL>self.credentials[\"<STR_LIT>\"] = b64encode(c.encode()).decode(\"<STR_LIT:ascii>\")<EOL><DEDENT><DEDENT>", "docstring": "Helper method to store username and password", "id": "f155:c0:m6"}
{"signature": "def log_post(self, url=None, credentials=None, do_verify_certificate=True):", "body": "if url is None:<EOL><INDENT>url = self.url<EOL><DEDENT>if credentials is None:<EOL><INDENT>credentials = self.credentials<EOL><DEDENT>if do_verify_certificate is None:<EOL><INDENT>do_verify_certificate = self.do_verify_certificate<EOL><DEDENT>if credentials and \"<STR_LIT>\" in credentials:<EOL><INDENT>headers = {\"<STR_LIT:Content-Type>\": \"<STR_LIT:application/json>\",'<STR_LIT>': '<STR_LIT>' % credentials[\"<STR_LIT>\"]}<EOL><DEDENT>else:<EOL><INDENT>headers = {\"<STR_LIT:Content-Type>\": \"<STR_LIT:application/json>\"}<EOL><DEDENT>try:<EOL><INDENT>request = requests.post(url, headers=headers,data=self.store.get_json(), verify=do_verify_certificate)<EOL><DEDENT>except httplib.IncompleteRead as e:<EOL><INDENT>request = e.partial<EOL><DEDENT>", "docstring": "Write to a remote host via HTTP POST", "id": "f155:c0:m4"}
{"signature": "def halt(self):", "body": "self.do_run = False<EOL>", "docstring": "Tell the this object to stop working after the next round", "id": "f157:c0:m3"}
{"signature": "def age(self):", "body": "<EOL>if self.rounds == <NUM_LIT:1>:<EOL><INDENT>self.do_run = False<EOL><DEDENT>elif self.rounds > <NUM_LIT:1>:<EOL><INDENT>self.rounds -= <NUM_LIT:1><EOL><DEDENT>", "docstring": "Get closer to your EOL", "id": "f157:c0:m1"}
{"signature": "def __init__(self, device, baudrate, store, rounds=<NUM_LIT:100>, timeout=<NUM_LIT>):", "body": "threading.Thread.__init__(self)<EOL>self.baudrate = baudrate<EOL>self.store = store<EOL>self.rounds = rounds<EOL>self.do_run = True<EOL>self.device_name = device<EOL>try:<EOL><INDENT>if device:<EOL><INDENT>self.device = serial.Serial(device, self.baudrate, timeout=timeout);<EOL><DEDENT><DEDENT>except serial.serialutil.SerialException:<EOL><INDENT>print(\"<STR_LIT>\" + self.device_name)<EOL><DEDENT>", "docstring": "Initialize the serial reader class\n    device        device name to connect to\n    baudrate      the baud rate for the serial line\n    store        the data store object to send the data to\n    rounds        number of rounds to run / listen for input", "id": "f157:c0:m0"}
{"signature": "def sort_by_modified(files_or_folders: list) -> list:", "body": "return sorted(files_or_folders, key=os.path.getmtime, reverse=True)<EOL>", "docstring": "Sort files or folders by modified time\n\nArgs:\n    files_or_folders: list of files or folders\n\nReturns:\n    list", "id": "f161:m5"}
{"signature": "def abspath(cur_file, parent=<NUM_LIT:0>) -> str:", "body": "file_path = os.path.abspath(cur_file).replace('<STR_LIT:\\\\>', '<STR_LIT:/>')<EOL>if os.path.isdir(file_path) and parent == <NUM_LIT:0>: return file_path<EOL>adj = <NUM_LIT:1> - os.path.isdir(file_path)<EOL>return '<STR_LIT:/>'.join(file_path.split('<STR_LIT:/>')[:-(parent + adj)])<EOL>", "docstring": "Absolute path\n\nArgs:\n    cur_file: __file__ or file or path str\n    parent: level of parent to look for\n\nReturns:\n    str", "id": "f161:m1"}
{"signature": "def all_files(<EOL>path_name, keyword='<STR_LIT>', ext='<STR_LIT>', full_path=True,<EOL>has_date=False, date_fmt=DATE_FMT<EOL>) -> list:", "body": "if not os.path.exists(path=path_name): return []<EOL>path_name = path_name.replace('<STR_LIT:\\\\>', '<STR_LIT:/>')<EOL>if keyword or ext:<EOL><INDENT>keyword = f'<STR_LIT>' if keyword else '<STR_LIT:*>'<EOL>if not ext: ext = '<STR_LIT:*>'<EOL>files = sort_by_modified([<EOL>f.replace('<STR_LIT:\\\\>', '<STR_LIT:/>') for f in glob.iglob(f'<STR_LIT>')<EOL>if os.path.isfile(f) and (f.replace('<STR_LIT:\\\\>', '<STR_LIT:/>').split('<STR_LIT:/>')[-<NUM_LIT:1>][<NUM_LIT:0>] != '<STR_LIT>')<EOL>])<EOL><DEDENT>else:<EOL><INDENT>files = sort_by_modified([<EOL>f'<STR_LIT>' for f in os.listdir(path=path_name)<EOL>if os.path.isfile(f'<STR_LIT>') and (f[<NUM_LIT:0>] != '<STR_LIT>')<EOL>])<EOL><DEDENT>if has_date:<EOL><INDENT>files = filter_by_dates(files, date_fmt=date_fmt)<EOL><DEDENT>return files if full_path else [f.split('<STR_LIT:/>')[-<NUM_LIT:1>] for f in files]<EOL>", "docstring": "Search all files with criteria\nReturned list will be sorted by last modified\n\nArgs:\n    path_name: full path name\n    keyword: keyword to search\n    ext: file extensions, split by ','\n    full_path: whether return full path (default True)\n    has_date: whether has date in file name (default False)\n    date_fmt: date format to check for has_date parameter\n\nReturns:\n    list: all file names with criteria fulfilled", "id": "f161:m3"}
{"signature": "def exists(path) -> bool:", "body": "return os.path.exists(path=path)<EOL>", "docstring": "Check path or file exists (use os.path.exists)\n\nArgs:\n    path: path or file", "id": "f161:m0"}
{"signature": "def filter_by_dates(files_or_folders: list, date_fmt=DATE_FMT) -> list:", "body": "r = re.compile(f'<STR_LIT>')<EOL>return list(filter(<EOL>lambda vv: r.match(vv.replace('<STR_LIT:\\\\>', '<STR_LIT:/>').split('<STR_LIT:/>')[-<NUM_LIT:1>]) is not None,<EOL>files_or_folders,<EOL>))<EOL>", "docstring": "Filter files or dates by date patterns\n\nArgs:\n    files_or_folders: list of files or folders\n    date_fmt: date format\n\nReturns:\n    list", "id": "f161:m6"}
{"signature": "def file_modified_time(file_name) -> pd.Timestamp:", "body": "return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))<EOL>", "docstring": "File modified time in python\n\nArgs:\n    file_name: file name\n\nReturns:\n    pd.Timestamp", "id": "f161:m8"}
{"signature": "def to_hour(num) -> str:", "body": "to_str = str(int(num))<EOL>return pd.Timestamp(f'<STR_LIT>').strftime('<STR_LIT>')<EOL>", "docstring": "Convert YAML input to hours\n\nArgs:\n    num: number in YMAL file, e.g., 900, 1700, etc.\n\nReturns:\n    str\n\nExamples:\n    >>> to_hour(900)\n    '09:00'\n    >>> to_hour(1700)\n    '17:00'", "id": "f162:m2"}
{"signature": "def hist_file(ticker: str, dt, typ='<STR_LIT>') -> str:", "body": "data_path = os.environ.get(assist.BBG_ROOT, '<STR_LIT>').replace('<STR_LIT:\\\\>', '<STR_LIT:/>')<EOL>if not data_path: return '<STR_LIT>'<EOL>asset = ticker.split()[-<NUM_LIT:1>]<EOL>proper_ticker = ticker.replace('<STR_LIT:/>', '<STR_LIT:_>')<EOL>cur_dt = pd.Timestamp(dt).strftime('<STR_LIT>')<EOL>return f'<STR_LIT>'<EOL>", "docstring": "Data file location for Bloomberg historical data\n\nArgs:\n    ticker: ticker name\n    dt: date\n    typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n\nReturns:\n    file location\n\nExamples:\n    >>> os.environ['BBG_ROOT'] = ''\n    >>> hist_file(ticker='ES1 Index', dt='2018-08-01') == ''\n    True\n    >>> os.environ['BBG_ROOT'] = '/data/bbg'\n    >>> hist_file(ticker='ES1 Index', dt='2018-08-01')\n    '/data/bbg/Index/ES1 Index/TRADE/2018-08-01.parq'", "id": "f163:m0"}
{"signature": "def get_logger(name_or_func, level=LOG_LEVEL, types='<STR_LIT>', **kwargs):", "body": "if isinstance(level, str): level = getattr(logging, level.upper())<EOL>log_name = utils.func_scope(name_or_func) if callable(name_or_func) else name_or_func<EOL>logger = logging.getLogger(name=log_name)<EOL>logger.setLevel(level=level)<EOL>if not len(logger.handlers):<EOL><INDENT>formatter = logging.Formatter(fmt=kwargs.get('<STR_LIT>', LOG_FMT))<EOL>if '<STR_LIT:file>' in types and '<STR_LIT>' in kwargs:<EOL><INDENT>file_handler = logging.FileHandler(kwargs['<STR_LIT>'])<EOL>file_handler.setFormatter(fmt=formatter)<EOL>logger.addHandler(file_handler)<EOL><DEDENT>if '<STR_LIT>' in types:<EOL><INDENT>stream_handler = logging.StreamHandler()<EOL>stream_handler.setFormatter(fmt=formatter)<EOL>logger.addHandler(stream_handler)<EOL><DEDENT><DEDENT>return logger<EOL>", "docstring": "Generate logger\n\nArgs:\n    name_or_func: logger name or current running function\n    level: level of logs - debug, info, error\n    types: file or stream, or both\n\nReturns:\n    logger\n\nExamples:\n    >>> get_logger(name_or_func='download_data', level='debug', types='stream')\n    <Logger download_data (DEBUG)>\n    >>> get_logger(name_or_func='preprocess', log_file='pre.log', types='file|stream')\n    <Logger preprocess (CRITICAL)>", "id": "f164:m0"}
{"signature": "def format_output(data: pd.DataFrame, source, col_maps=None) -> pd.DataFrame:", "body": "if data.empty: return pd.DataFrame()<EOL>if source == '<STR_LIT>': req_cols = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:value>']<EOL>else: req_cols = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:name>', '<STR_LIT:value>', '<STR_LIT>']<EOL>if any(col not in data for col in req_cols): return pd.DataFrame()<EOL>if data.dropna(subset=['<STR_LIT:value>']).empty: return pd.DataFrame()<EOL>if source == '<STR_LIT>':<EOL><INDENT>res = pd.DataFrame(pd.concat([<EOL>pd.Series({**{'<STR_LIT>': t}, **grp.set_index('<STR_LIT>').value.to_dict()})<EOL>for t, grp in data.groupby('<STR_LIT>')<EOL>], axis=<NUM_LIT:1>, sort=False)).transpose().set_index('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>res = pd.DataFrame(pd.concat([<EOL>grp.loc[:, ['<STR_LIT:name>', '<STR_LIT:value>']].set_index('<STR_LIT:name>')<EOL>.transpose().reset_index(drop=True).assign(ticker=t)<EOL>for (t, _), grp in data.groupby(['<STR_LIT>', '<STR_LIT>'])<EOL>], sort=False)).reset_index(drop=True).set_index('<STR_LIT>')<EOL>res.columns.name = None<EOL><DEDENT>if col_maps is None: col_maps = dict()<EOL>return res.rename(<EOL>columns=lambda vv: col_maps.get(<EOL>vv, vv.lower().replace('<STR_LIT:U+0020>', '<STR_LIT:_>').replace('<STR_LIT:->', '<STR_LIT:_>')<EOL>)<EOL>).apply(pd.to_numeric, errors='<STR_LIT:ignore>', downcast='<STR_LIT:float>')<EOL>", "docstring": "Format `pdblp` outputs to column-based results\n\nArgs:\n    data: `pdblp` result\n    source: `bdp` or `bds`\n    col_maps: rename columns with these mappings\n\nReturns:\n    pd.DataFrame\n\nExamples:\n    >>> format_output(\n    ...     data=pd.read_pickle('xbbg/tests/data/sample_bdp.pkl'),\n    ...     source='bdp'\n    ... ).reset_index()\n              ticker                        name\n    0  QQQ US Equity  INVESCO QQQ TRUST SERIES 1\n    1  SPY US Equity      SPDR S&P 500 ETF TRUST\n    >>> format_output(\n    ...     data=pd.read_pickle('xbbg/tests/data/sample_dvd.pkl'),\n    ...     source='bds', col_maps={'Dividend Frequency': 'dvd_freq'}\n    ... ).loc[:, ['ex_date', 'dividend_amount', 'dvd_freq']].reset_index()\n            ticker     ex_date  dividend_amount dvd_freq\n    0  C US Equity  2018-02-02             0.32  Quarter", "id": "f166:m3"}
{"signature": "def proc_ovrds(**kwargs):", "body": "return [<EOL>(k, v) for k, v in kwargs.items()<EOL>if k not in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()) + PRSV_COLS<EOL>]<EOL>", "docstring": "Bloomberg overrides\n\nArgs:\n    **kwargs: overrides\n\nReturns:\n    list of tuples\n\nExamples:\n    >>> proc_ovrds(DVD_Start_Dt='20180101')\n    [('DVD_Start_Dt', '20180101')]\n    >>> proc_ovrds(DVD_Start_Dt='20180101', cache=True, has_date=True)\n    [('DVD_Start_Dt', '20180101')]", "id": "f166:m0"}
{"signature": "def proc_elms(**kwargs) -> list:", "body": "return [<EOL>(ELEM_KEYS.get(k, k), ELEM_VALS.get(ELEM_KEYS.get(k, k), dict()).get(v, v))<EOL>for k, v in kwargs.items()<EOL>if (k in list(ELEM_KEYS.keys()) + list(ELEM_KEYS.values()))<EOL>and (k not in PRSV_COLS)<EOL>]<EOL>", "docstring": "Bloomberg overrides for elements\n\nArgs:\n    **kwargs: overrides\n\nReturns:\n    list of tuples\n\nExamples:\n    >>> proc_elms(PerAdj='A', Per='W')\n    [('periodicityAdjustment', 'ACTUAL'), ('periodicitySelection', 'WEEKLY')]\n    >>> proc_elms(Days='A', Fill='B')\n    [('nonTradingDayFillOption', 'ALL_CALENDAR_DAYS'), ('nonTradingDayFillMethod', 'NIL_VALUE')]\n    >>> proc_elms(CshAdjNormal=False, CshAdjAbnormal=True)\n    [('adjustmentNormal', False), ('adjustmentAbnormal', True)]\n    >>> proc_elms(Per='W', Quote='Average', start_date='2018-01-10')\n    [('periodicitySelection', 'WEEKLY'), ('overrideOption', 'OVERRIDE_OPTION_GPA')]\n    >>> proc_elms(QuoteType='Y')\n    [('pricingOption', 'PRICING_OPTION_YIELD')]\n    >>> proc_elms(QuoteType='Y', cache=True)\n    [('pricingOption', 'PRICING_OPTION_YIELD')]", "id": "f166:m1"}
{"signature": "def update_missing(**kwargs):", "body": "data_path = os.environ.get(BBG_ROOT, '<STR_LIT>').replace('<STR_LIT:\\\\>', '<STR_LIT:/>')<EOL>if not data_path: return<EOL>if len(kwargs) == <NUM_LIT:0>: return<EOL>log_path = f'<STR_LIT>'<EOL>cnt = len(files.all_files(log_path)) + <NUM_LIT:1><EOL>files.create_folder(log_path)<EOL>open(f'<STR_LIT>', '<STR_LIT:a>').close()<EOL>", "docstring": "Update number of trials for missing values", "id": "f167:m2"}
{"signature": "def with_bloomberg(func):", "body": "@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>scope = utils.func_scope(func=func)<EOL>param = inspect.signature(func).parameters<EOL>port = kwargs.pop('<STR_LIT:port>', _PORT_)<EOL>timeout = kwargs.pop('<STR_LIT>', _TIMEOUT_)<EOL>restart = kwargs.pop('<STR_LIT>', False)<EOL>all_kw = {<EOL>k: args[n] if n < len(args) else v.default<EOL>for n, (k, v) in enumerate(param.items()) if k != '<STR_LIT>'<EOL>}<EOL>all_kw.update(kwargs)<EOL>log_level = kwargs.get('<STR_LIT>', logs.LOG_LEVEL)<EOL>for to_list in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>conv = all_kw.get(to_list, None)<EOL>if hasattr(conv, '<STR_LIT>'):<EOL><INDENT>all_kw[to_list] = getattr(conv, '<STR_LIT>')()<EOL><DEDENT>if isinstance(conv, str):<EOL><INDENT>all_kw[to_list] = [conv]<EOL><DEDENT><DEDENT>cached_data = []<EOL>if scope in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>to_qry = cached.bdp_bds_cache(func=func.__name__, **all_kw)<EOL>cached_data += to_qry.cached_data<EOL>if not (to_qry.tickers and to_qry.flds):<EOL><INDENT>if not cached_data: return pd.DataFrame()<EOL>res = pd.concat(cached_data, sort=False).reset_index(drop=True)<EOL>if not all_kw.get('<STR_LIT>', False):<EOL><INDENT>res = assist.format_output(<EOL>data=res, source=func.__name__,<EOL>col_maps=all_kw.get('<STR_LIT>', dict())<EOL>)<EOL><DEDENT>return res<EOL><DEDENT>all_kw['<STR_LIT>'] = to_qry.tickers<EOL>all_kw['<STR_LIT>'] = to_qry.flds<EOL><DEDENT>if scope in ['<STR_LIT>']:<EOL><INDENT>data_file = storage.hist_file(<EOL>ticker=all_kw['<STR_LIT>'], dt=all_kw['<STR_LIT>'], typ=all_kw['<STR_LIT>'],<EOL>)<EOL>if files.exists(data_file):<EOL><INDENT>logger = logs.get_logger(func, level=log_level)<EOL>if all_kw.get('<STR_LIT>', False): return<EOL>logger.debug(f'<STR_LIT>')<EOL>return assist.format_intraday(data=pd.read_parquet(data_file), **all_kw)<EOL><DEDENT><DEDENT>_, new = create_connection(port=port, timeout=timeout, restart=restart)<EOL>res = func(**{<EOL>k: v for k, v in all_kw.items() if k not in ['<STR_LIT>', '<STR_LIT>']<EOL>})<EOL>if new: delete_connection()<EOL>if scope.startswith('<STR_LIT>') and isinstance(res, list):<EOL><INDENT>final = cached_data + res<EOL>if not final: return pd.DataFrame()<EOL>res = pd.DataFrame(pd.concat(final, sort=False))<EOL><DEDENT>if (scope in ['<STR_LIT>', '<STR_LIT>'])and (not all_kw.get('<STR_LIT>', False)):<EOL><INDENT>res = assist.format_output(<EOL>data=res.reset_index(drop=True), source=func.__name__,<EOL>col_maps=all_kw.get('<STR_LIT>', dict()),<EOL>)<EOL><DEDENT>return res<EOL><DEDENT>return wrapper<EOL>", "docstring": "Wrapper function for Bloomberg connection\n\nArgs:\n    func: function to wrap", "id": "f169:m0"}
{"signature": "def load_module(full_path):", "body": "from importlib import util<EOL>file_name = full_path.replace('<STR_LIT:\\\\>', '<STR_LIT:/>').split('<STR_LIT:/>')[-<NUM_LIT:1>]<EOL>if file_name[-<NUM_LIT:3>:] != '<STR_LIT>':<EOL><INDENT>raise ImportError(f'<STR_LIT>')<EOL><DEDENT>module_name = file_name[:-<NUM_LIT:3>]<EOL>spec = util.spec_from_file_location(name=module_name, location=full_path)<EOL>module = util.module_from_spec(spec=spec)<EOL>spec.loader.exec_module(module=module)<EOL>return module<EOL>", "docstring": "Load module from full path\nArgs:\n    full_path: module full path name\nReturns:\n    python module\nReferences:\n    https://stackoverflow.com/a/67692/1332656\nExamples:\n    >>> import os\n    >>>\n    >>> cur_file = os.path.abspath(__file__).replace('\\\\\\\\', '/')\n    >>> cur_path = '/'.join(cur_file.split('/')[:-1])\n    >>> load_module(f'{cur_path}/timezone.py').__name__\n    'timezone'\n    >>> load_module(f'{cur_path}/timezone.pyc')\n    Traceback (most recent call last):\n    ImportError: not a python file: timezone.pyc", "id": "f170:m7"}
{"signature": "def func_scope(func) -> str:", "body": "cur_mod = sys.modules[func.__module__]<EOL>return f'<STR_LIT>'<EOL>", "docstring": "Function scope name\n\nArgs:\n    func: python function\n\nReturns:\n    str: module_name.func_name\n\nExamples:\n    >>> func_scope(flatten)\n    'xbbg.core.utils.flatten'\n    >>> func_scope(time.strftime)\n    'time.strftime'", "id": "f170:m6"}
{"signature": "def cur_time(typ='<STR_LIT:date>', tz=DEFAULT_TZ) -> (datetime.date, str):", "body": "dt = pd.Timestamp('<STR_LIT>', tz=tz)<EOL>if typ == '<STR_LIT:date>': return dt.strftime('<STR_LIT>')<EOL>if typ == '<STR_LIT:time>': return dt.strftime('<STR_LIT>')<EOL>if typ == '<STR_LIT>': return dt.strftime('<STR_LIT>')<EOL>if typ == '<STR_LIT>': return dt<EOL>return dt.date()<EOL>", "docstring": "Current time\n\nArgs:\n    typ: one of ['date', 'time', 'time_path', 'raw', '']\n    tz: timezone\n\nReturns:\n    relevant current time or date\n\nExamples:\n    >>> cur_dt = pd.Timestamp('now')\n    >>> cur_time(typ='date') == cur_dt.strftime('%Y-%m-%d')\n    True\n    >>> cur_time(typ='time') == cur_dt.strftime('%Y-%m-%d %H:%M:%S')\n    True\n    >>> cur_time(typ='time_path') == cur_dt.strftime('%Y-%m-%d/%H-%M-%S')\n    True\n    >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp)\n    True\n    >>> cur_time(typ='') == cur_dt.date()\n    True", "id": "f170:m3"}
{"signature": "def _to_gen_(iterable):", "body": "from collections import Iterable<EOL>for elm in iterable:<EOL><INDENT>if isinstance(elm, Iterable) and not isinstance(elm, (str, bytes)):<EOL><INDENT>yield from flatten(elm)<EOL><DEDENT>else: yield elm<EOL><DEDENT>", "docstring": "Recursively iterate lists and tuples", "id": "f170:m1"}
{"signature": "def market_exact(self, session, start_time: str, end_time: str) -> Session:", "body": "if session not in self.exch: return SessNA<EOL>ss = self.exch[session]<EOL>same_day = ss[<NUM_LIT:0>] < ss[-<NUM_LIT:1>]<EOL>if not start_time: s_time = ss[<NUM_LIT:0>]<EOL>else:<EOL><INDENT>s_time = param.to_hour(start_time)<EOL>if same_day: s_time = max(s_time, ss[<NUM_LIT:0>])<EOL><DEDENT>if not end_time: e_time = ss[-<NUM_LIT:1>]<EOL>else:<EOL><INDENT>e_time = param.to_hour(end_time)<EOL>if same_day: e_time = min(e_time, ss[-<NUM_LIT:1>])<EOL><DEDENT>if same_day and (s_time > e_time): return SessNA<EOL>return Session(start_time=s_time, end_time=e_time)<EOL>", "docstring": "Explicitly specify start time and end time\n\nArgs:\n    session: predefined session\n    start_time: start time in terms of HHMM string\n    end_time: end time in terms of HHMM string\n\nReturns:\n    Session of start_time and end_time", "id": "f171:c0:m4"}
{"signature": "def __init__(self, ticker):", "body": "self.ticker = ticker<EOL>self.exch = const.exch_info(ticker=ticker)<EOL>", "docstring": "Args:\n    ticker: ticker", "id": "f171:c0:m0"}
{"signature": "def market_timing(ticker, dt, timing='<STR_LIT>', tz='<STR_LIT>') -> str:", "body": "logger = logs.get_logger(market_timing)<EOL>exch = pd.Series(exch_info(ticker=ticker))<EOL>if any(req not in exch.index for req in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>logger.error(f'<STR_LIT>')<EOL>return '<STR_LIT>'<EOL><DEDENT>mkt_time = {<EOL>'<STR_LIT>': exch.day[<NUM_LIT:0>], '<STR_LIT>': exch.allday[-<NUM_LIT:1>]<EOL>}.get(timing, exch.day[-<NUM_LIT:1>])<EOL>cur_dt = pd.Timestamp(str(dt)).strftime('<STR_LIT>')<EOL>if tz == '<STR_LIT>':<EOL><INDENT>return f'<STR_LIT>'<EOL><DEDENT>return timezone.tz_convert(f'<STR_LIT>', to_tz=tz, from_tz=exch.tz)<EOL>", "docstring": "Market close time for ticker\n\nArgs:\n    ticker: ticker name\n    dt: date\n    timing: [EOD (default), BOD]\n    tz: conversion to timezone\n\nReturns:\n    str: date & time\n\nExamples:\n    >>> market_timing('7267 JT Equity', dt='2018-09-10')\n    '2018-09-10 14:58'\n    >>> market_timing('7267 JT Equity', dt='2018-09-10', tz=timezone.TimeZone.NY)\n    '2018-09-10 01:58:00-04:00'\n    >>> market_timing('7267 JT Equity', dt='2018-01-10', tz='NY')\n    '2018-01-10 00:58:00-05:00'\n    >>> market_timing('7267 JT Equity', dt='2018-09-10', tz='SPX Index')\n    '2018-09-10 01:58:00-04:00'\n    >>> market_timing('8035 JT Equity', dt='2018-09-10', timing='BOD')\n    '2018-09-10 09:01'\n    >>> market_timing('Z 1 Index', dt='2018-09-10', timing='FINISHED')\n    '2018-09-10 21:00'\n    >>> market_timing('TESTTICKER Corp', dt='2018-09-10')\n    ''", "id": "f172:m3"}
{"signature": "def ccy_pair(local, base='<STR_LIT>') -> CurrencyPair:", "body": "ccy_param = param.load_info(cat='<STR_LIT>')<EOL>if f'<STR_LIT>' in ccy_param:<EOL><INDENT>info = ccy_param[f'<STR_LIT>']<EOL><DEDENT>elif f'<STR_LIT>' in ccy_param:<EOL><INDENT>info = ccy_param[f'<STR_LIT>']<EOL>info['<STR_LIT>'] = <NUM_LIT:1.> / info.get('<STR_LIT>', <NUM_LIT:1.>)<EOL>info['<STR_LIT>'] = -info.get('<STR_LIT>', <NUM_LIT:1>)<EOL><DEDENT>elif base.lower() == local.lower():<EOL><INDENT>info = dict(ticker='<STR_LIT>')<EOL>info['<STR_LIT>'] = <NUM_LIT:1.><EOL>if base[-<NUM_LIT:1>].lower() == base[-<NUM_LIT:1>]:<EOL><INDENT>info['<STR_LIT>'] /= <NUM_LIT><EOL><DEDENT>if local[-<NUM_LIT:1>].lower() == local[-<NUM_LIT:1>]:<EOL><INDENT>info['<STR_LIT>'] *= <NUM_LIT><EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger = logs.get_logger(ccy_pair)<EOL>logger.error(f'<STR_LIT>')<EOL>return CurrencyPair(ticker='<STR_LIT>', factor=<NUM_LIT:1.>, power=<NUM_LIT:1>)<EOL><DEDENT>if '<STR_LIT>' not in info: info['<STR_LIT>'] = <NUM_LIT:1.><EOL>if '<STR_LIT>' not in info: info['<STR_LIT>'] = <NUM_LIT:1><EOL>return CurrencyPair(**info)<EOL>", "docstring": "Currency pair info\n\nArgs:\n    local: local currency\n    base: base currency\n\nReturns:\n    CurrencyPair\n\nExamples:\n    >>> ccy_pair(local='HKD', base='USD')\n    CurrencyPair(ticker='HKD Curncy', factor=1.0, power=1)\n    >>> ccy_pair(local='GBp')\n    CurrencyPair(ticker='GBP Curncy', factor=100, power=-1)\n    >>> ccy_pair(local='USD', base='GBp')\n    CurrencyPair(ticker='GBP Curncy', factor=0.01, power=1)\n    >>> ccy_pair(local='XYZ', base='USD')\n    CurrencyPair(ticker='', factor=1.0, power=1)\n    >>> ccy_pair(local='GBP', base='GBp')\n    CurrencyPair(ticker='', factor=0.01, power=1)\n    >>> ccy_pair(local='GBp', base='GBP')\n    CurrencyPair(ticker='', factor=100.0, power=1)", "id": "f172:m2"}
{"signature": "@with_bloomberg<EOL>def active_futures(ticker: str, dt) -> str:", "body": "t_info = ticker.split()<EOL>prefix, asset = '<STR_LIT:U+0020>'.join(t_info[:-<NUM_LIT:1>]), t_info[-<NUM_LIT:1>]<EOL>info = const.market_info(f'<STR_LIT>')<EOL>f1, f2 = f'<STR_LIT>', f'<STR_LIT>'<EOL>fut_2 = fut_ticker(gen_ticker=f2, dt=dt, freq=info['<STR_LIT>'])<EOL>fut_1 = fut_ticker(gen_ticker=f1, dt=dt, freq=info['<STR_LIT>'])<EOL>fut_tk = bdp(tickers=[fut_1, fut_2], flds='<STR_LIT>', cache=True)<EOL>if pd.Timestamp(dt).month < pd.Timestamp(fut_tk.last_tradeable_dt[<NUM_LIT:0>]).month: return fut_1<EOL>d1 = bdib(ticker=f1, dt=dt)<EOL>d2 = bdib(ticker=f2, dt=dt)<EOL>return fut_1 if d1[f1].volume.sum() > d2[f2].volume.sum() else fut_2<EOL>", "docstring": "Active futures contract\n\nArgs:\n    ticker: futures ticker, i.e., ESA Index, Z A Index, CLA Comdty, etc.\n    dt: date\n\nReturns:\n    str: ticker name", "id": "f174:m7"}
{"signature": "@with_bloomberg<EOL>def bdib(ticker, dt, typ='<STR_LIT>', **kwargs) -> pd.DataFrame:", "body": "from xbbg.core import missing<EOL>logger = logs.get_logger(bdib, level=kwargs.pop('<STR_LIT>', logs.LOG_LEVEL))<EOL>t_1 = pd.Timestamp('<STR_LIT>').date() - pd.Timedelta('<STR_LIT>')<EOL>whole_day = pd.Timestamp(dt).date() < t_1<EOL>batch = kwargs.pop('<STR_LIT>', False)<EOL>if (not whole_day) and batch:<EOL><INDENT>logger.warning(f'<STR_LIT>')<EOL>return pd.DataFrame()<EOL><DEDENT>cur_dt = pd.Timestamp(dt).strftime('<STR_LIT>')<EOL>asset = ticker.split()[-<NUM_LIT:1>]<EOL>info_log = f'<STR_LIT>'<EOL>if asset in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>exch = const.exch_info(ticker=ticker)<EOL>if exch.empty: return pd.DataFrame()<EOL><DEDENT>else:<EOL><INDENT>logger.error(f'<STR_LIT>')<EOL>return pd.DataFrame()<EOL><DEDENT>time_fmt = '<STR_LIT>'<EOL>time_idx = pd.DatetimeIndex([<EOL>f'<STR_LIT>', f'<STR_LIT>']<EOL>).tz_localize(exch.tz).tz_convert(DEFAULT_TZ).tz_convert('<STR_LIT>')<EOL>if time_idx[<NUM_LIT:0>] > time_idx[<NUM_LIT:1>]: time_idx -= pd.TimedeltaIndex(['<STR_LIT>', '<STR_LIT>'])<EOL>q_tckr = ticker<EOL>if exch.get('<STR_LIT>', False):<EOL><INDENT>if '<STR_LIT>' not in exch:<EOL><INDENT>logger.error(f'<STR_LIT>')<EOL><DEDENT>is_sprd = exch.get('<STR_LIT>', False) and (len(ticker[:-<NUM_LIT:1>]) != exch['<STR_LIT>'][<NUM_LIT:0>])<EOL>if not is_sprd:<EOL><INDENT>q_tckr = fut_ticker(gen_ticker=ticker, dt=dt, freq=exch['<STR_LIT>'])<EOL>if q_tckr == '<STR_LIT>':<EOL><INDENT>logger.error(f'<STR_LIT>')<EOL>return pd.DataFrame()<EOL><DEDENT><DEDENT><DEDENT>info_log = f'<STR_LIT>'<EOL>miss_kw = dict(ticker=ticker, dt=dt, typ=typ, func='<STR_LIT>')<EOL>cur_miss = missing.current_missing(**miss_kw)<EOL>if cur_miss >= <NUM_LIT:2>:<EOL><INDENT>if batch: return pd.DataFrame()<EOL>logger.info(f'<STR_LIT>')<EOL>return pd.DataFrame()<EOL><DEDENT>logger.info(f'<STR_LIT>')<EOL>con, _ = create_connection()<EOL>try:<EOL><INDENT>data = con.bdib(<EOL>ticker=q_tckr, event_type=typ, interval=<NUM_LIT:1>,<EOL>start_datetime=time_idx[<NUM_LIT:0>].strftime(time_fmt),<EOL>end_datetime=time_idx[<NUM_LIT:1>].strftime(time_fmt),<EOL>)<EOL><DEDENT>except KeyError:<EOL><INDENT>data = pd.DataFrame()<EOL><DEDENT>if not isinstance(data, pd.DataFrame):<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>if data.empty:<EOL><INDENT>logger.warning(f'<STR_LIT>')<EOL>missing.update_missing(**miss_kw)<EOL>return pd.DataFrame()<EOL><DEDENT>data = data.tz_localize('<STR_LIT>').tz_convert(exch.tz)<EOL>storage.save_intraday(data=data, ticker=ticker, dt=dt, typ=typ)<EOL>return pd.DataFrame() if batch else assist.format_intraday(data=data, ticker=ticker)<EOL>", "docstring": "Bloomberg intraday bar data\n\nArgs:\n    ticker: ticker name\n    dt: date to download\n    typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]\n    **kwargs:\n        batch: whether is batch process to download data\n        log: level of logs\n\nReturns:\n    pd.DataFrame", "id": "f174:m3"}
{"signature": "@with_bloomberg<EOL>def check_hours(tickers, tz_exch, tz_loc=DEFAULT_TZ) -> pd.DataFrame:", "body": "cols = ['<STR_LIT>', '<STR_LIT>']<EOL>con, _ = create_connection()<EOL>hours = con.ref(tickers=tickers, flds=cols)<EOL>cur_dt = pd.Timestamp('<STR_LIT>').strftime('<STR_LIT>')<EOL>hours.loc[:, '<STR_LIT>'] = hours.value.astype(str).str[:-<NUM_LIT:3>]<EOL>hours.loc[:, '<STR_LIT>'] = pd.DatetimeIndex(<EOL>cur_dt + hours.value.astype(str)<EOL>).tz_localize(tz_loc).tz_convert(tz_exch).strftime('<STR_LIT>')<EOL>hours = pd.concat([<EOL>hours.set_index(['<STR_LIT>', '<STR_LIT>']).exch.unstack().loc[:, cols],<EOL>hours.set_index(['<STR_LIT>', '<STR_LIT>']).local.unstack().loc[:, cols],<EOL>], axis=<NUM_LIT:1>)<EOL>hours.columns = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>return hours<EOL>", "docstring": "Check exchange hours vs local hours\n\nArgs:\n    tickers: list of tickers\n    tz_exch: exchange timezone\n    tz_loc: local timezone\n\nReturns:\n    Local and exchange hours", "id": "f174:m9"}
{"signature": "@with_bloomberg<EOL>def bds(tickers, flds, **kwargs):", "body": "logger = logs.get_logger(bds, level=kwargs.pop('<STR_LIT>', logs.LOG_LEVEL))<EOL>con, _ = create_connection()<EOL>ovrds = assist.proc_ovrds(**kwargs)<EOL>logger.info(<EOL>f'<STR_LIT>'<EOL>f'<STR_LIT>'<EOL>)<EOL>data = con.bulkref(tickers=tickers, flds=flds, ovrds=ovrds)<EOL>if not kwargs.get('<STR_LIT>', False): return [data]<EOL>qry_data = []<EOL>for (ticker, fld), grp in data.groupby(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>data_file = storage.ref_file(<EOL>ticker=ticker, fld=fld, ext='<STR_LIT>',<EOL>has_date=kwargs.get('<STR_LIT>', True), **kwargs<EOL>)<EOL>if data_file:<EOL><INDENT>if not files.exists(data_file): qry_data.append(grp)<EOL>files.create_folder(data_file, is_file=True)<EOL>grp.reset_index(drop=True).to_pickle(data_file)<EOL><DEDENT><DEDENT>return qry_data<EOL>", "docstring": "Bloomberg block data\n\nArgs:\n    tickers: ticker(s)\n    flds: field(s)\n    **kwargs: other overrides for query\n      -> raw: raw output from `pdbdp` library, default False\n\nReturns:\n    pd.DataFrame: block data\n\nExamples:\n    >>> import os\n    >>>\n    >>> pd.options.display.width = 120\n    >>> s_dt, e_dt = '20180301', '20181031'\n    >>> dvd = bds(\n    ...     'NVDA US Equity', 'DVD_Hist_All',\n    ...     DVD_Start_Dt=s_dt, DVD_End_Dt=e_dt, raw=True,\n    ... )\n    >>> dvd.loc[:, ['ticker', 'name', 'value']].head(8)\n               ticker                name         value\n    0  NVDA US Equity       Declared Date    2018-08-16\n    1  NVDA US Equity             Ex-Date    2018-08-29\n    2  NVDA US Equity         Record Date    2018-08-30\n    3  NVDA US Equity        Payable Date    2018-09-21\n    4  NVDA US Equity     Dividend Amount          0.15\n    5  NVDA US Equity  Dividend Frequency       Quarter\n    6  NVDA US Equity       Dividend Type  Regular Cash\n    7  NVDA US Equity       Declared Date    2018-05-10\n    >>> dvd = bds(\n    ...     'NVDA US Equity', 'DVD_Hist_All',\n    ...     DVD_Start_Dt=s_dt, DVD_End_Dt=e_dt,\n    ... )\n    >>> dvd.reset_index().loc[:, ['ticker', 'ex_date', 'dividend_amount']]\n               ticker     ex_date  dividend_amount\n    0  NVDA US Equity  2018-08-29             0.15\n    1  NVDA US Equity  2018-05-23             0.15\n    >>> if not os.environ.get('BBG_ROOT', ''):\n    ...     os.environ['BBG_ROOT'] = f'{files.abspath(__file__, 1)}/tests/data'\n    >>> idx_kw = dict(End_Dt='20181220', cache=True)\n    >>> idx_wt = bds('DJI Index', 'Indx_MWeight_Hist', **idx_kw)\n    >>> idx_wt.round(2).tail().reset_index(drop=True)\n      index_member  percent_weight\n    0         V UN            3.82\n    1        VZ UN            1.63\n    2       WBA UW            2.06\n    3       WMT UN            2.59\n    4       XOM UN            2.04\n    >>> idx_wt = bds('DJI Index', 'Indx_MWeight_Hist', **idx_kw)\n    >>> idx_wt.round(2).head().reset_index(drop=True)\n      index_member  percent_weight\n    0      AAPL UW            4.65\n    1       AXP UN            2.84\n    2        BA UN            9.29\n    3       CAT UN            3.61\n    4      CSCO UW            1.26", "id": "f174:m1"}
{"signature": "def parse_version(package):", "body": "init_file = f'<STR_LIT>'<EOL>with open(init_file, '<STR_LIT:r>', encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>for line in f.readlines():<EOL><INDENT>if '<STR_LIT>' in line:<EOL><INDENT>return line.split('<STR_LIT:=>')[<NUM_LIT:1>].strip()[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Parse versions", "id": "f175:m0"}
{"signature": "def parse_markdown():", "body": "readme_file = f'<STR_LIT>'<EOL>if path.exists(readme_file):<EOL><INDENT>with open(readme_file, '<STR_LIT:r>', encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>long_description = f.read()<EOL><DEDENT>return long_description<EOL><DEDENT>", "docstring": "Parse markdown as description", "id": "f175:m1"}
{"signature": "def import_submodules(package, recursive=True):", "body": "if isinstance(package, str):<EOL><INDENT>package = importlib.import_module(package)<EOL><DEDENT>results = {}<EOL>for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):<EOL><INDENT>full_name = package.__name__ + '<STR_LIT:.>' + name<EOL>results[full_name] = importlib.import_module(full_name)<EOL>if recursive and is_pkg:<EOL><INDENT>results.update(import_submodules(full_name))<EOL><DEDENT><DEDENT>return results<EOL>", "docstring": "Import all submodules of a module, recursively, including subpackages\n\n    :param package: package (name or actual module)\n    :type package: str | module\n    :rtype: dict[str, types.ModuleType]", "id": "f183:m1"}
{"signature": "@staticmethod<EOL><INDENT>def parse(config):<DEDENT>", "body": "if not isinstance(config, basestring):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>validator = ContainsValidator()<EOL>validator.contains_string = config<EOL>return validator<EOL>", "docstring": "Parse a contains validator, which takes as the config a simple string to find", "id": "f213:c0:m1"}
{"signature": "def bind_variable(self, variable_name, variable_value):", "body": "str_name = str(variable_name)<EOL>prev = self.variables.get(str_name)<EOL>if prev != variable_value:<EOL><INDENT>self.variables[str(variable_name)] = variable_value<EOL>self.mod_count = self.mod_count + <NUM_LIT:1><EOL><DEDENT>", "docstring": "Bind a named variable to a value within the context\n            This allows for passing in variables in testing", "id": "f215:c0:m0"}
{"signature": "def tearDown(self):", "body": "self.server_process.terminate()<EOL>self.server_process = None<EOL>", "docstring": "Stop the server process", "id": "f219:c0:m1"}
{"signature": "def setUp(self):", "body": "config_args = ('<STR_LIT>', os.path.join(<EOL>djangopath, '<STR_LIT>'))<EOL>proc = Process(target=call_command, args=config_args)<EOL>proc.start()<EOL>self.server_process = proc<EOL>time.sleep(<NUM_LIT:1>)<EOL>", "docstring": "Start a mini Django-tastypie REST webapp with test data for testing REST tests", "id": "f219:c0:m0"}
{"signature": "def realize_partial(self, context=None):", "body": "if not self.is_dynamic():<EOL><INDENT>return self<EOL><DEDENT>if self.is_context_modifier():<EOL><INDENT>return self<EOL><DEDENT>else:<EOL><INDENT>copyout = copy.cop<EOL><DEDENT>pass<EOL>", "docstring": "Attempt to template out what is possible for this benchmark", "id": "f223:m2"}
{"signature": "def main(args):", "body": "if '<STR_LIT>' in args and args['<STR_LIT>'] is not None:<EOL><INDENT>logger.setLevel(LOGGING_LEVELS.get(<EOL>args['<STR_LIT>'].lower(), logging.NOTSET))<EOL><DEDENT>if '<STR_LIT>' in args and args['<STR_LIT>']:<EOL><INDENT>extensions = args['<STR_LIT>'].split('<STR_LIT:;>')<EOL>working_folder = args['<STR_LIT>']<EOL>if working_folder not in sys.path:<EOL><INDENT>sys.path.insert(<NUM_LIT:0>, working_folder)<EOL><DEDENT>register_extensions(extensions)<EOL><DEDENT>test_file = args['<STR_LIT:test>']<EOL>test_structure = read_test_file(test_file)<EOL>my_vars = None<EOL>if '<STR_LIT>' in args and args['<STR_LIT>'] is not None:<EOL><INDENT>my_vars = yaml.safe_load(args['<STR_LIT>'])<EOL><DEDENT>if my_vars and not isinstance(my_vars, dict):<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>base_url = args['<STR_LIT:url>']<EOL>if '<STR_LIT>' in args and args['<STR_LIT>']:<EOL><INDENT>base_url = '<STR_LIT>'<EOL><DEDENT>tests = parse_testsets(base_url, test_structure,<EOL>working_directory=os.path.dirname(test_file), vars=my_vars)<EOL>for t in tests:<EOL><INDENT>if '<STR_LIT>' in args and args['<STR_LIT>'] is not None and bool(args['<STR_LIT>']):<EOL><INDENT>t.config.print_bodies = safe_to_bool(args['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in args and args['<STR_LIT>'] is not None and bool(args['<STR_LIT>']):<EOL><INDENT>t.config.print_headers = safe_to_bool(args['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in args and args['<STR_LIT>'] is not None:<EOL><INDENT>t.config.interactive = safe_to_bool(args['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in args and args['<STR_LIT>'] is not None:<EOL><INDENT>t.config.verbose = safe_to_bool(args['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in args and args['<STR_LIT>'] is not None:<EOL><INDENT>t.config.ssl_insecure = safe_to_bool(args['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in args and args['<STR_LIT>'] is not None:<EOL><INDENT>t.config.skip_term_colors = safe_to_bool(args['<STR_LIT>'])<EOL><DEDENT><DEDENT>failures = run_testsets(tests)<EOL>sys.exit(failures)<EOL>", "docstring": "Execute a test against the given base url.\n\nKeys allowed for args:\n    url           - REQUIRED - Base URL\n    test          - REQUIRED - Test file (yaml)\n    print_bodies  - OPTIONAL - print response body\n    print_headers  - OPTIONAL - print response headers\n    log           - OPTIONAL - set logging level {debug,info,warning,error,critical} (default=warning)\n    interactive   - OPTIONAL - mode that prints info before and after test exectuion and pauses for user input for each test\n    absolute_urls - OPTIONAL - mode that treats URLs in tests as absolute/full URLs instead of relative URLs\n    skip_term_colors - OPTIONAL - mode that turn off the output term colors", "id": "f231:m14"}
{"signature": "def analyze_benchmark_results(benchmark_result, benchmark):", "body": "output = BenchmarkResult()<EOL>output.name = benchmark_result.name<EOL>output.group = benchmark_result.group<EOL>output.failures = benchmark_result.failures<EOL>raw_results = benchmark_result.results<EOL>temp = dict()<EOL>for metric in benchmark.raw_metrics:<EOL><INDENT>temp[metric] = raw_results[metric]<EOL><DEDENT>output.results = temp<EOL>aggregate_results = list()<EOL>for metricname, aggregate_list in benchmark.aggregated_metrics.items():<EOL><INDENT>numbers = raw_results[metricname]<EOL>for aggregate_name in aggregate_list:<EOL><INDENT>if numbers:  <EOL><INDENT>aggregate_function = AGGREGATES[aggregate_name]<EOL>aggregate_results.append(<EOL>(metricname, aggregate_name, aggregate_function(numbers)))<EOL><DEDENT>else:<EOL><INDENT>aggregate_results.append((metricname, aggregate_name, None))<EOL><DEDENT><DEDENT><DEDENT>output.aggregates = aggregate_results<EOL>return output<EOL>", "docstring": "Take a benchmark result containing raw benchmark results, and do aggregation by\n    applying functions\n\n    Aggregates come out in format of metricname, aggregate_name, result", "id": "f231:m7"}
{"signature": "def parse_configuration(node, base_config=None):", "body": "test_config = base_config<EOL>if not test_config:<EOL><INDENT>test_config = TestConfig()<EOL><DEDENT>node = lowercase_keys(flatten_dictionaries(node))  <EOL>for key, value in node.items():<EOL><INDENT>if key == u'<STR_LIT>':<EOL><INDENT>test_config.timeout = int(value)<EOL><DEDENT>elif key == u'<STR_LIT>':<EOL><INDENT>test_config.print_bodies = safe_to_bool(value)<EOL><DEDENT>elif key == u'<STR_LIT>':<EOL><INDENT>test_config.retries = int(value)<EOL><DEDENT>elif key == u'<STR_LIT>':<EOL><INDENT>if not test_config.variable_binds:<EOL><INDENT>test_config.variable_binds = dict()<EOL><DEDENT>test_config.variable_binds.update(flatten_dictionaries(value))<EOL><DEDENT>elif key == u'<STR_LIT>':<EOL><INDENT>flat = flatten_dictionaries(value)<EOL>gen_map = dict()<EOL>for generator_name, generator_config in flat.items():<EOL><INDENT>gen = parse_generator(generator_config)<EOL>gen_map[str(generator_name)] = gen<EOL><DEDENT>test_config.generators = gen_map<EOL><DEDENT><DEDENT>return test_config<EOL>", "docstring": "Parse input config to configuration information", "id": "f231:m3"}
{"signature": "def read_file(path):", "body": "with open(path, \"<STR_LIT:r>\") as f:<EOL><INDENT>string = f.read()<EOL>f.close()<EOL><DEDENT>return string<EOL>", "docstring": "Read an input into a file, doing necessary conversions around relative path handling", "id": "f231:m4"}
{"signature": "def parse_headers(header_string):", "body": "<EOL>if not header_string:<EOL><INDENT>return list()<EOL><DEDENT>request, headers = header_string.split('<STR_LIT:\\r\\n>', <NUM_LIT:1>)<EOL>if not headers:<EOL><INDENT>return list()<EOL><DEDENT>if sys.version_info < (<NUM_LIT:2>,<NUM_LIT:7>):<EOL><INDENT>header_msg = message_from_string(headers.encode(HEADER_ENCODING))<EOL>return [(text_type(k.lower(), HEADER_ENCODING), text_type(v, HEADER_ENCODING))<EOL>for k, v in header_msg.items()]<EOL><DEDENT>else:<EOL><INDENT>header_msg = message_from_string(headers)<EOL>return [(k.lower(), v) for k, v in header_msg.items()]<EOL><DEDENT>", "docstring": "Parse a header-string into individual headers\n        Implementation based on: http://stackoverflow.com/a/5955949/95122\n        Note that headers are a list of (key, value) since duplicate headers are allowed\n\n        NEW NOTE: keys & values are unicode strings, but can only contain ISO-8859-1 characters", "id": "f231:m1"}
{"signature": "def get_readable_config(self, context=None):", "body": "string_frags = list()<EOL>string_frags.append(<EOL>\"<STR_LIT>\" + self.extractor.get_readable_config(context=context))<EOL>if isinstance(self.expected, AbstractExtractor):<EOL><INDENT>string_frags.append(\"<STR_LIT>\" +<EOL>self.expected.get_readable_config(context=context))<EOL><DEDENT>elif self.isTemplateExpected:<EOL><INDENT>string_frags.append(<EOL>'<STR_LIT>'.format(self.expected))<EOL><DEDENT>return os.linesep.join(string_frags)<EOL>", "docstring": "Get a human-readable config string", "id": "f232:c6:m0"}
{"signature": "def safe_length(var):", "body": "output = -<NUM_LIT:1><EOL>try:<EOL><INDENT>output = len(var)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return output<EOL>", "docstring": "Exception-safe length check, returns -1 if no length on type or error", "id": "f232:m1"}
{"signature": "@staticmethod<EOL><INDENT>def parse(config):<DEDENT>", "body": "output = ComparatorValidator()<EOL>config = parsing.lowercase_keys(parsing.flatten_dictionaries(config))<EOL>output.config = config<EOL>output.extractor = _get_extractor(config)<EOL>if output.extractor is None:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>if '<STR_LIT>' not in config:  <EOL><INDENT>output.comparator_name = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>output.comparator_name = config['<STR_LIT>'].lower()<EOL><DEDENT>output.comparator = COMPARATORS[output.comparator_name]<EOL>if not output.comparator:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>expected = config['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>if isinstance(expected, basestring) or isinstance(expected, (int, long, float, complex)):<EOL><INDENT>output.expected = expected<EOL><DEDENT>elif isinstance(expected, dict):<EOL><INDENT>expected = parsing.lowercase_keys(expected)<EOL>template = expected.get('<STR_LIT>')<EOL>if template:  <EOL><INDENT>if not isinstance(template, basestring):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>output.isTemplateExpected = True<EOL>output.expected = template<EOL><DEDENT>else:  <EOL><INDENT>output.expected = _get_extractor(expected)<EOL>if not output.expected:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>return output<EOL>", "docstring": "Create a validator that does an extract from body and applies a comparator,\n            Then does comparison vs expected value\n            Syntax sample:\n              { jsonpath_mini: 'node.child',\n                operator: 'eq',\n                expected: 'myValue'\n              }", "id": "f232:c6:m2"}
{"signature": "def register_validator(name, parse_function):", "body": "name = name.lower()<EOL>if name in VALIDATORS:<EOL><INDENT>raise Exception(\"<STR_LIT>\".format(name))<EOL><DEDENT>VALIDATORS[name] = parse_function<EOL>", "docstring": "Registers a validator for use by this library\n        Name is the string name for validator\n\n        Parse function does parse(config_node) and returns a Validator object\n        Validator functions have signature:\n            validate(response_body, context=None) - context is a bindings.Context object\n\n        Validators return true or false and optionally can return a Failure instead of false\n        This allows for passing more details", "id": "f232:m6"}
{"signature": "def _get_extractor(config_dict):", "body": "extractor = None<EOL>extract_config = None<EOL>for key, value in config_dict.items():<EOL><INDENT>if key in EXTRACTORS:<EOL><INDENT>return parse_extractor(key, value)<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>raise Exception(<EOL>'<STR_LIT>'.format(config_dict))<EOL><DEDENT>", "docstring": "Utility function, get an extract function for a single valid extractor name in config\n        and error if more than one or none", "id": "f232:m3"}
{"signature": "def add_move(move):", "body": "setattr(_MovedItems, move.name, move)<EOL>", "docstring": "Add an item to six.moves.", "id": "f235:m2"}
{"signature": "def python_2_unicode_compatible(klass):", "body": "if PY2:<EOL><INDENT>if '<STR_LIT>' not in klass.__dict__:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>klass.__name__)<EOL><DEDENT>klass.__unicode__ = klass.__str__<EOL>klass.__str__ = lambda self: self.__unicode__().encode('<STR_LIT:utf-8>')<EOL><DEDENT>return klass<EOL>", "docstring": "A decorator that defines __unicode__ and __str__ methods under Python 2.\nUnder Python 3 it does nothing.\n\nTo support Python 2 and 3 with a single code base, define a __str__ method\nreturning text and apply this decorator to the class.", "id": "f235:m9"}
{"signature": "def get_code(self, fullname):", "body": "self.__get_module(fullname)  <EOL>return None<EOL>", "docstring": "Return None\n\n        Required, if is_package is implemented", "id": "f235:c4:m7"}
{"signature": "def remove_move(name):", "body": "try:<EOL><INDENT>delattr(_MovedItems, name)<EOL><DEDENT>except AttributeError:<EOL><INDENT>try:<EOL><INDENT>del moves.__dict__[name]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise AttributeError(\"<STR_LIT>\" % (name,))<EOL><DEDENT><DEDENT>", "docstring": "Remove item from six.moves.", "id": "f235:m3"}
{"signature": "def with_metaclass(meta, *bases):", "body": "<EOL>class metaclass(meta):<EOL><INDENT>def __new__(cls, name, this_bases, d):<EOL><INDENT>return meta(name, bases, d)<EOL><DEDENT><DEDENT>return type.__new__(metaclass, '<STR_LIT>', (), {})<EOL>", "docstring": "Create a base class with a metaclass.", "id": "f235:m7"}
{"signature": "def setup(self, input, is_file=False, is_template_path=False, is_template_content=False):", "body": "if not isinstance(input, basestring):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if is_file:<EOL><INDENT>input = os.path.abspath(input)<EOL><DEDENT>self.content = input<EOL>self.is_file = is_file<EOL>self.is_template_path = is_template_path<EOL>self.is_template_content = is_template_content<EOL>", "docstring": "Self explanatory, input is inline content or file path.", "id": "f239:c0:m3"}
{"signature": "def get_content(self, context=None):", "body": "if self.is_file:<EOL><INDENT>path = self.content<EOL>if self.is_template_path and context:<EOL><INDENT>path = string.Template(path).safe_substitute(<EOL>context.get_values())<EOL><DEDENT>data = None<EOL>with open(path, '<STR_LIT:r>') as f:<EOL><INDENT>data = f.read()<EOL><DEDENT>if self.is_template_content and context:<EOL><INDENT>return string.Template(data).safe_substitute(context.get_values())<EOL><DEDENT>else:<EOL><INDENT>return data<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self.is_template_content and context:<EOL><INDENT>return safe_substitute_unicode_template(self.content, context.get_values())<EOL><DEDENT>else:<EOL><INDENT>return self.content<EOL><DEDENT><DEDENT>", "docstring": "Does all context binding and pathing to get content, templated out", "id": "f239:c0:m1"}
{"signature": "def create_noread_version(self):", "body": "if not self.is_file or self.is_template_path:<EOL><INDENT>return self<EOL><DEDENT>output = ContentHandler()<EOL>output.is_template_content = self.is_template_content<EOL>with open(self.content, '<STR_LIT:r>') as f:<EOL><INDENT>output.content = f.read()<EOL><DEDENT>return output<EOL>", "docstring": "Read file content if it is static and return content handler with no I/O", "id": "f239:c0:m2"}
{"signature": "def factory_fixed_sequence(values):", "body": "def seq_generator():<EOL><INDENT>my_list = list(values)<EOL>i = <NUM_LIT:0><EOL>while(True):<EOL><INDENT>yield my_list[i]<EOL>if i == len(my_list):<EOL><INDENT>i = <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>return seq_generator<EOL>", "docstring": "Return a generator that runs through a list of values in order, looping after end", "id": "f240:m4"}
{"signature": "def generator_random_int32():", "body": "rand = random.Random()<EOL>while (True):<EOL><INDENT>yield random.randint(<NUM_LIT:0>, INT32_MAX_VALUE)<EOL><DEDENT>", "docstring": "Random integer generator for up to 32-bit signed ints", "id": "f240:m2"}
{"signature": "def parse_generator(configuration):", "body": "configuration = lowercase_keys(flatten_dictionaries(configuration))<EOL>gen_type = str(configuration.get(u'<STR_LIT:type>')).lower()<EOL>if gen_type not in GENERATOR_TYPES:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(gen_type))<EOL><DEDENT>if gen_type == u'<STR_LIT>':<EOL><INDENT>return factory_env_variable(configuration[u'<STR_LIT>'])()<EOL><DEDENT>elif gen_type == u'<STR_LIT>':<EOL><INDENT>return factory_env_string(configuration[u'<STR_LIT:string>'])()<EOL><DEDENT>elif gen_type == u'<STR_LIT>':<EOL><INDENT>start = configuration.get('<STR_LIT:start>')<EOL>increment = configuration.get('<STR_LIT>')<EOL>if not start:<EOL><INDENT>start = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>start = int(start)<EOL><DEDENT>if not increment:<EOL><INDENT>increment = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>increment = int(increment)<EOL><DEDENT>return factory_generate_ids(start, increment)()<EOL><DEDENT>elif gen_type == u'<STR_LIT>':<EOL><INDENT>return generator_random_int32()<EOL><DEDENT>elif gen_type == u'<STR_LIT>':<EOL><INDENT>return parse_random_text_generator(configuration)<EOL><DEDENT>elif gen_type in GENERATOR_TYPES:<EOL><INDENT>return GENERATOR_PARSING[gen_type](configuration)<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\".format('<STR_LIT>'))<EOL><DEDENT>", "docstring": "Parses a configuration built from yaml and returns a generator\n        Configuration should be a map", "id": "f240:m12"}
{"signature": "def parse_fixed_sequence(config):", "body": "vals = config['<STR_LIT>']<EOL>if not vals:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not isinstance(vals, list):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return factory_fixed_sequence(vals)()<EOL>", "docstring": "Parse fixed sequence string", "id": "f240:m5"}
{"signature": "def factory_generate_ids(starting_id=<NUM_LIT:1>, increment=<NUM_LIT:1>):", "body": "def generate_started_ids():<EOL><INDENT>val = starting_id<EOL>local_increment = increment<EOL>while(True):<EOL><INDENT>yield val<EOL>val += local_increment<EOL><DEDENT><DEDENT>return generate_started_ids<EOL>", "docstring": "Return function generator for ids starting at starting_id\n        Note: needs to be called with () to make generator", "id": "f240:m0"}
{"signature": "def factory_choice_generator(values):", "body": "def choice_generator():<EOL><INDENT>my_list = list(values)<EOL>rand = random.Random()<EOL>while(True):<EOL><INDENT>yield random.choice(my_list)<EOL><DEDENT><DEDENT>return choice_generator<EOL>", "docstring": "Return a generator that picks values from a list randomly", "id": "f240:m6"}
{"signature": "def run_configure(self, key, value, configurable, validator_func=None, converter_func=None, store_func=None, *args, **kwargs):", "body": "if validator_func and not validator(value):<EOL><INDENT>raise TypeError(\"<STR_LIT>\".format(value))<EOL><DEDENT>storeable = value<EOL>if converter_func:<EOL><INDENT>storeable = converter_func(value)<EOL><DEDENT>if store_func:<EOL><INDENT>store_func(configurable, key, storeable)<EOL><DEDENT>else:<EOL><INDENT>configurable.setattr(configurable, key, value)<EOL><DEDENT>", "docstring": "Run a single configuration element\n            Run a validator on the value, if supplied\n            Run a converter_funct to turn the value into something to storeable:\n                converter_func takes params (value) at least and throws exception if failed\n            If a  store_func is supplied, use that to store the option\n              store_func needs to take params (object, key, value, args, kwargs)\n            If store_func NOT supplied we do a setattr on object", "id": "f241:c0:m0"}
{"signature": "def encode_unicode_bytes(my_string):", "body": "if not isinstance(my_string, basestring):<EOL><INDENT>my_string = repr(my_string)<EOL><DEDENT>if PYTHON_MAJOR_VERSION == <NUM_LIT:2>:<EOL><INDENT>if isinstance(my_string, str):<EOL><INDENT>return my_string<EOL><DEDENT>elif isinstance(my_string, unicode):<EOL><INDENT>return my_string.encode('<STR_LIT:utf-8>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if isinstance(my_string, str):<EOL><INDENT>return my_string.encode('<STR_LIT:utf-8>')<EOL><DEDENT>elif isinstance(my_string, bytes):<EOL><INDENT>return my_string<EOL><DEDENT><DEDENT>", "docstring": "Shim function, converts Unicode to UTF-8 encoded bytes regardless of the source format\n        Intended for python 3 compatibility mode, and b/c PyCurl only takes raw bytes", "id": "f241:m0"}
{"signature": "def safe_to_json(in_obj):", "body": "if isinstance(in_obj, bytearray):<EOL><INDENT>return str(in_obj)<EOL><DEDENT>if hasattr(in_obj, '<STR_LIT>'):<EOL><INDENT>return in_obj.__dict__<EOL><DEDENT>try:<EOL><INDENT>return str(in_obj)<EOL><DEDENT>except:<EOL><INDENT>return repr(in_obj)<EOL><DEDENT>", "docstring": "Safely get dict from object if present for json dumping", "id": "f241:m2"}
{"signature": "def configure(self, configs, configurable, handler, *args, **kwargs):", "body": "for key, value in configs.items():<EOL><INDENT>handler[key] = config_options<EOL>self.run_configure(value, configurable)<EOL><DEDENT>", "docstring": "Use the configs and configurable to parse", "id": "f241:c0:m1"}
{"signature": "def is_context_modifier(self):", "body": "return self.variable_binds or self.generator_binds or self.extract_binds<EOL>", "docstring": "Returns true if context can be modified by this test\n            (disallows caching of templated test bodies)", "id": "f242:c0:m13"}
{"signature": "def configure_curl(self, timeout=DEFAULT_TIMEOUT, context=None, curl_handle=None):", "body": "if curl_handle:<EOL><INDENT>curl = curl_handle<EOL>try:  <EOL><INDENT>curl.getinfo(curl.HTTP_CODE)                <EOL>curl.reset()<EOL>curl.setopt(curl.COOKIELIST, \"<STR_LIT>\")<EOL><DEDENT>except pycurl.error:<EOL><INDENT>curl = pycurl.Curl()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>curl = pycurl.Curl()<EOL><DEDENT>curl.setopt(curl.URL, str(self.url))<EOL>curl.setopt(curl.TIMEOUT, timeout)<EOL>is_unicoded = False<EOL>bod = self.body<EOL>if isinstance(bod, text_type):  <EOL><INDENT>bod = bod.encode('<STR_LIT>')<EOL>is_unicoded = True<EOL><DEDENT>if bod and len(bod) > <NUM_LIT:0>:<EOL><INDENT>curl.setopt(curl.READFUNCTION, MyIO(bod).read)<EOL><DEDENT>if self.auth_username and self.auth_password:<EOL><INDENT>curl.setopt(pycurl.USERPWD, <EOL>parsing.encode_unicode_bytes(self.auth_username) + b'<STR_LIT::>' + <EOL>parsing.encode_unicode_bytes(self.auth_password))<EOL>if self.auth_type:<EOL><INDENT>curl.setopt(pycurl.HTTPAUTH, self.auth_type)<EOL><DEDENT><DEDENT>if self.method == u'<STR_LIT:POST>':<EOL><INDENT>curl.setopt(HTTP_METHODS[u'<STR_LIT:POST>'], <NUM_LIT:1>)<EOL>if bod is not None:<EOL><INDENT>curl.setopt(pycurl.POSTFIELDSIZE, len(bod))<EOL><DEDENT>else:<EOL><INDENT>curl.setopt(pycurl.POSTFIELDSIZE, <NUM_LIT:0>)<EOL><DEDENT><DEDENT>elif self.method == u'<STR_LIT>':<EOL><INDENT>curl.setopt(HTTP_METHODS[u'<STR_LIT>'], <NUM_LIT:1>)<EOL>if bod is not None:<EOL><INDENT>curl.setopt(pycurl.INFILESIZE, len(bod))<EOL><DEDENT>else:<EOL><INDENT>curl.setopt(pycurl.INFILESIZE, <NUM_LIT:0>)<EOL><DEDENT><DEDENT>elif self.method == u'<STR_LIT>':<EOL><INDENT>curl.setopt(curl.POSTFIELDS, bod)<EOL>curl.setopt(curl.CUSTOMREQUEST, '<STR_LIT>')<EOL>if bod is not None:<EOL><INDENT>curl.setopt(pycurl.INFILESIZE, len(bod))<EOL><DEDENT>else:<EOL><INDENT>curl.setopt(pycurl.INFILESIZE, <NUM_LIT:0>)<EOL><DEDENT><DEDENT>elif self.method == u'<STR_LIT>':<EOL><INDENT>curl.setopt(curl.CUSTOMREQUEST, '<STR_LIT>')<EOL>if bod is not None:<EOL><INDENT>curl.setopt(pycurl.POSTFIELDS, bod)<EOL>curl.setopt(pycurl.POSTFIELDSIZE, len(bod))<EOL><DEDENT><DEDENT>elif self.method == u'<STR_LIT>':<EOL><INDENT>curl.setopt(curl.NOBODY, <NUM_LIT:1>)<EOL>curl.setopt(curl.CUSTOMREQUEST, '<STR_LIT>')<EOL><DEDENT>elif self.method and self.method.upper() != '<STR_LIT:GET>':  <EOL><INDENT>curl.setopt(curl.CUSTOMREQUEST, self.method.upper())<EOL>if bod is not None:<EOL><INDENT>curl.setopt(pycurl.POSTFIELDS, bod)<EOL>curl.setopt(pycurl.POSTFIELDSIZE, len(bod))<EOL><DEDENT><DEDENT>head = self.get_headers(context=context)<EOL>head = copy.copy(head)  <EOL>if is_unicoded and u'<STR_LIT>' in head.keys():<EOL><INDENT>content = head[u'<STR_LIT>']<EOL>if u'<STR_LIT>' not in content:<EOL><INDENT>head[u'<STR_LIT>'] = content + u'<STR_LIT>'<EOL><DEDENT><DEDENT>if head:<EOL><INDENT>headers = [str(headername) + '<STR_LIT::>' + str(headervalue)<EOL>for headername, headervalue in head.items()]<EOL><DEDENT>else:<EOL><INDENT>headers = list()<EOL><DEDENT>headers.append(\"<STR_LIT>\")<EOL>headers.append(\"<STR_LIT>\")<EOL>curl.setopt(curl.HTTPHEADER, headers)<EOL>if self.curl_options:<EOL><INDENT>filterfunc = lambda x: x[<NUM_LIT:0>] is not None and x[<NUM_LIT:1>] is not None  <EOL>for (key, value) in ifilter(filterfunc, self.curl_options.items()):<EOL><INDENT>curl.setopt(getattr(curl, key), value)<EOL><DEDENT><DEDENT>return curl<EOL>", "docstring": "Create and mostly configure a curl object for test, reusing existing if possible", "id": "f242:c0:m19"}
{"signature": "def realize(self, context=None):", "body": "if not self.is_dynamic() or context is None:<EOL><INDENT>return self<EOL><DEDENT>else:<EOL><INDENT>selfcopy = self.ninja_copy()<EOL>selfcopy.templates = None<EOL>if isinstance(self._body, ContentHandler):<EOL><INDENT>selfcopy._body = self._body.get_content(context)<EOL><DEDENT>selfcopy._url = self.get_url(context=context)<EOL>selfcopy._headers = self.get_headers(context=context)<EOL>return selfcopy<EOL><DEDENT>", "docstring": "Return a fully-templated test object, for configuring curl\n            Warning: this is a SHALLOW copy, mutation of fields will cause problems!\n            Can accept a None context", "id": "f242:c0:m15"}
{"signature": "def set_body(self, value):", "body": "self._body = value<EOL>", "docstring": "Set body, directly", "id": "f242:c0:m5"}
{"signature": "def set_headers(self, value, isTemplate=False):", "body": "if isTemplate:<EOL><INDENT>self.set_template(self.NAME_HEADERS, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.del_template(self.NAME_HEADERS)<EOL><DEDENT>self._headers = value<EOL>", "docstring": "Set headers, passing flag if using a template", "id": "f242:c0:m9"}
{"signature": "def set_url(self, value, isTemplate=False):", "body": "if isTemplate:<EOL><INDENT>self.set_template(self.NAME_URL, value)<EOL><DEDENT>else:<EOL><INDENT>self.del_template(self.NAME_URL)<EOL><DEDENT>self._url = value<EOL>", "docstring": "Set URL, passing flag if using a template", "id": "f242:c0:m7"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(MailChimp, self).__init__(*args, **kwargs)<EOL>self.root = self.api_root = Root(self)<EOL>self.authorized_apps = AuthorizedApps(self)<EOL>self.automations = Automations(self)<EOL>self.automations.actions = AutomationActions(self)<EOL>self.automations.emails = AutomationEmails(self)<EOL>self.automations.emails.actions = AutomationEmailActions(self)<EOL>self.automations.emails.queues = AutomationEmailQueues(self)<EOL>self.automations.removed_subscribers = AutomationRemovedSubscribers(self)<EOL>self.batches = self.batch_operations = BatchOperations(self)<EOL>self.batch_webhooks = BatchWebhooks(self)<EOL>self.campaign_folders = CampaignFolders(self)<EOL>self.campaigns = Campaigns(self)<EOL>self.campaigns.actions = CampaignActions(self)<EOL>self.campaigns.content = CampaignContent(self)<EOL>self.campaigns.feedback = CampaignFeedback(self)<EOL>self.campaigns.send_checklist = CampaignSendChecklist(self)<EOL>self.conversations = Conversations(self)<EOL>self.conversations.messages = ConversationMessages(self)<EOL>self.stores = self.ecommerce = Stores(self)<EOL>self.stores.carts = StoreCarts(self)<EOL>self.stores.carts.lines = StoreCartLines(self)<EOL>self.stores.customers = StoreCustomers(self)<EOL>self.stores.orders = StoreOrders(self)<EOL>self.stores.orders.lines = StoreOrderLines(self)<EOL>self.stores.products = StoreProducts(self)<EOL>self.stores.products.images = StoreProductImages(self)<EOL>self.stores.products.variants = StoreProductVariants(self)<EOL>self.stores.promo_rules = StorePromoRules(self)<EOL>self.stores.promo_codes = StorePromoCodes(self)<EOL>self.files = FileManagerFiles(self)<EOL>self.folders = FileManagerFolders(self)<EOL>self.lists = Lists(self)<EOL>self.lists.abuse_reports = ListAbuseReports(self)<EOL>self.lists.activity = ListActivity(self)<EOL>self.lists.clients = ListClients(self)<EOL>self.lists.growth_history = ListGrowthHistory(self)<EOL>self.lists.interest_categories = ListInterestCategories(self)<EOL>self.lists.interest_categories.interests = ListInterestCategoryInterest(self)<EOL>self.lists.members = ListMembers(self)<EOL>self.lists.members.activity = ListMemberActivity(self)<EOL>self.lists.members.goals = ListMemberGoals(self)<EOL>self.lists.members.notes = ListMemberNotes(self)<EOL>self.lists.members.tags = ListMemberTags(self)<EOL>self.lists.merge_fields = ListMergeFields(self)<EOL>self.lists.segments = ListSegments(self)<EOL>self.lists.segments.members = ListSegmentMembers(self)<EOL>self.lists.signup_forms = ListSignupForms(self)<EOL>self.lists.webhooks = ListWebhooks(self)<EOL>self.ping = Ping(self)<EOL>self.reports = Reports(self)<EOL>self.reports.abuse_reports = ReportCampaignAbuseReports(self)<EOL>self.reports.advice = ReportCampaignAdvice(self)<EOL>self.reports.click_details = ReportClickDetailReports(self)<EOL>self.reports.click_details.members = ReportClickDetailMembers(self)<EOL>self.reports.domain_performance = ReportDomainPerformance(self)<EOL>self.reports.eepurl = ReportEepURL(self)<EOL>self.reports.email_activity = ReportEmailActivity(self)<EOL>self.reports.locations = ReportLocations(self)<EOL>self.reports.sent_to = ReportSentTo(self)<EOL>self.reports.subreports = ReportSubReports(self)<EOL>self.reports.unsubscribes = ReportUnsubscribes(self)<EOL>self.reports.open_details = ReportOpenDetails(self)<EOL>self.reports.google_analytics = ReportGoogleAnalytics(self)<EOL>self.search_campaigns = SearchCampaigns(self)<EOL>self.search_members = SearchMembers(self)<EOL>self.template_folders = TemplateFolders(self)<EOL>self.templates = Templates(self)<EOL>self.templates.default_content = TemplateDefaultContent(self)<EOL>", "docstring": "Initialize the class with your api_key and user_id and attach all of\nthe endpoints", "id": "f245:c0:m0"}
{"signature": "def check_subscriber_hash(potential_hash):", "body": "if re.match(r\"<STR_LIT>\", potential_hash):<EOL><INDENT>return potential_hash<EOL><DEDENT>else:<EOL><INDENT>return get_subscriber_hash(potential_hash)<EOL><DEDENT>", "docstring": "Check the passed value to see if it matches a 32 character hex number that\nMD5 generates as output, or compute that value assuming that the input is\nan email address.\n\n:param potential_hash: A value to be passed to any of the endpoints that\nexpect an MD5 of an email address\n:type potential_hash: :py:class:`str`\n:returns: A valid MD5 hash in hex\n:rtype: :py:class:`str`", "id": "f246:m1"}
{"signature": "def check_email(email):", "body": "if not re.match(r\"<STR_LIT>\", email):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return<EOL>", "docstring": "Function that verifies that the string passed is a valid email address.\n\nRegex for email validation based on MailChimp limits:\nhttp://kb.mailchimp.com/accounts/management/international-characters-in-mailchimp\n\n:param email: The potential email address\n:type email: :py:class:`str`\n:return: Nothing", "id": "f246:m2"}
{"signature": "def _iterate(self, url, **queryparams):", "body": "<EOL>if '<STR_LIT>' in queryparams:<EOL><INDENT>if '<STR_LIT>' not in queryparams['<STR_LIT>'].split('<STR_LIT:U+002C>'):<EOL><INDENT>queryparams['<STR_LIT>'] += '<STR_LIT>'<EOL><DEDENT><DEDENT>queryparams.pop(\"<STR_LIT>\", None)<EOL>queryparams.pop(\"<STR_LIT:count>\", None)<EOL>result = self._mc_client._get(url=url, offset=<NUM_LIT:0>, count=<NUM_LIT:1000>, **queryparams)<EOL>total = result['<STR_LIT>']<EOL>if total > <NUM_LIT:1000>:<EOL><INDENT>for offset in range(<NUM_LIT:1>, int(total / <NUM_LIT:1000>) + <NUM_LIT:1>):<EOL><INDENT>result = merge_results(result, self._mc_client._get(<EOL>url=url,<EOL>offset=int(offset * <NUM_LIT:1000>),<EOL>count=<NUM_LIT:1000>,<EOL>**queryparams<EOL>))<EOL><DEDENT>return result<EOL><DEDENT>else:  <EOL><INDENT>return result<EOL><DEDENT>", "docstring": "Iterate over all pages for the given url. Feed in the result of self._build_path as the url.\n\n:param url: The url of the endpoint\n:type url: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f247:c0:m2"}
{"signature": "def get(self, folder_id, **queryparams):", "body": "self.folder_id = folder_id<EOL>return self._mc_client._get(url=self._build_path(folder_id), **queryparams)<EOL>", "docstring": "Get information about a specific folder used to organize templates.\n\n:param folder_id: The unique id for the File Manager folder.\n:type folder_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f248:c0:m3"}
{"signature": "def delete(self, folder_id):", "body": "self.folder_id = folder_id<EOL>return self._mc_client._delete(url=self._build_path(folder_id))<EOL>", "docstring": "Delete a specific template folder, and mark all the templates in the\nfolder as \u2018unfiled\u2019.\n\n:param folder_id: The unique id for the File Manager folder.\n:type folder_id: :py:class:`str`", "id": "f248:c0:m5"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(TemplateFolders, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.folder_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f248:c0:m0"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.folder_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get all folders used to organize templates.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f248:c0:m2"}
{"signature": "def create(self, data):", "body": "if '<STR_LIT:name>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>response = self._mc_client._post(url=self._build_path(), data=data)<EOL>if response is not None:<EOL><INDENT>self.folder_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.folder_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Create a new template folder.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"name\": string*\n}", "id": "f248:c0:m1"}
{"signature": "def get(self, workflow_id, **queryparams):", "body": "self.workflow_id = workflow_id<EOL>return self._mc_client._get(url=self._build_path(workflow_id), **queryparams)<EOL>", "docstring": "Get a summary of an individual Automation workflow\u2019s settings and\ncontent. The trigger_settings object returns information for the first\nemail in the workflow.\n\n:param workflow_id: The unique id for the Automation workflow\n:type workflow_id: :py:class:`str`\n:param queryparams: the query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f249:c0:m2"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(Automations, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.workflow_id = None<EOL>self.actions = AutomationActions(self)<EOL>self.emails = AutomationEmails(self)<EOL>self.removed_subscribers = AutomationRemovedSubscribers(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f249:c0:m0"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.workflow_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get a summary of an account\u2019s Automations.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: the query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f249:c0:m1"}
{"signature": "def get(self, workflow_id, email_id):", "body": "self.workflow_id = workflow_id<EOL>self.email_id = email_id<EOL>return self._mc_client._get(url=self._build_path(workflow_id, '<STR_LIT>', email_id))<EOL>", "docstring": "Get information about an individual Automation workflow email.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`", "id": "f250:c0:m2"}
{"signature": "def all(self, workflow_id, get_all=False, **queryparams):", "body": "self.workflow_id = workflow_id<EOL>self.email_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(workflow_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(workflow_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get a summary of the emails in an Automation workflow.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: the query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f250:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(AutomationEmails, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.workflow_id = None<EOL>self.email_id = None<EOL>self.actions = AutomationEmailActions(self)<EOL>self.queues = AutomationEmailQueues(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f250:c0:m0"}
{"signature": "def all(self, workflow_id, email_id):", "body": "self.workflow_id = workflow_id<EOL>self.email_id = email_id<EOL>self.subscriber_hash = None<EOL>return self._mc_client._get(url=self._build_path(workflow_id, '<STR_LIT>', email_id, '<STR_LIT>'))<EOL>", "docstring": "Get information about an Automation email queue.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`", "id": "f251:c0:m2"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(AutomationEmailQueues, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.workflow_id = None<EOL>self.email_id = None<EOL>self.subscriber_hash = None<EOL>", "docstring": "Initialize the endpoint", "id": "f251:c0:m0"}
{"signature": "def get(self, **queryparams):", "body": "return self._mc_client._get(url=self._build_path(), **queryparams)<EOL>", "docstring": "Search all campaigns for the specified query terms.\n\n:param queryparams: The query string parameters\nqueryparams['fields'] = array\nqueryparams['exclude_fields'] = array\nqueryparams['query'] = string\nqueryparams['snip_start'] = string\nqueryparams['snip_end'] = string\nqueryparams['offset'] = integer", "id": "f252:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(TemplateDefaultContent, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.template_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f253:c0:m0"}
{"signature": "def delete(self, template_id):", "body": "self.template_id = template_id<EOL>return self._mc_client._delete(url=self._build_path(template_id))<EOL>", "docstring": "Delete a specific template.\n\n:param template_id: The unique id for the template.\n:type template_id: :py:class:`str`", "id": "f254:c0:m5"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.template_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get a list of an account\u2019s available templates.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['created_by'] = string\nqueryparams['before_created_at'] = string\nqueryparams['since_created_at'] = string\nqueryparams['type'] = string\nqueryparams['folder_id'] = string", "id": "f254:c0:m2"}
{"signature": "def get(self, list_id, segment_id):", "body": "return self._mc_client._get(url=self._build_path(list_id, '<STR_LIT>', segment_id))<EOL>", "docstring": "returns the specified list segment.", "id": "f256:c0:m2"}
{"signature": "def update(self, list_id, segment_id, data):", "body": "return self._mc_client._patch(url=self._build_path(list_id, '<STR_LIT>', segment_id), data=data)<EOL>", "docstring": "updates an existing list segment.", "id": "f256:c0:m3"}
{"signature": "def create(self, list_id, data):", "body": "return self._mc_client._post(url=self._build_path(list_id, '<STR_LIT>'), data=data)<EOL>", "docstring": "adds a new segment to the list.", "id": "f256:c0:m5"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.file_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get a list of available images and files stored in the File Manager for the account.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = string\nqueryparams['created_by'] = string\nqueryparams['before_created_at'] = string\nqueryparams['since_created_at'] = string\nqueryparams['sort_field'] = string\nqueryparams['sort_dir'] = string", "id": "f257:c0:m2"}
{"signature": "def delete(self, file_id):", "body": "self.file_id = file_id<EOL>return self._mc_client._delete(url=self._build_path(file_id))<EOL>", "docstring": "Remove a specific file from the File Manager.\n\n:param file_id: The unique id for the File Manager file.\n:type file_id: :py:class:`str`", "id": "f257:c0:m5"}
{"signature": "def update(self, file_id, data):", "body": "self.file_id = file_id<EOL>if '<STR_LIT:name>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>return self._mc_client._patch(url=self._build_path(file_id), data=data)<EOL>", "docstring": "Update a file in the File Manager.\n\n:param file_id: The unique id for the File Manager file.\n:type file_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"name\": string*,\n    \"file_data\": string*\n}", "id": "f257:c0:m4"}
{"signature": "def get(self, file_id, **queryparams):", "body": "self.file_id = file_id<EOL>return self._mc_client._get(url=self._build_path(file_id), **queryparams)<EOL>", "docstring": "Get information about a specific file in the File Manager.\n\n:param file_id: The unique id for the File Manager file.\n:type file_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f257:c0:m3"}
{"signature": "def all(self, list_id, segment_id, get_all=False, **queryparams):", "body": "self.list_id = list_id<EOL>self.segment_id = segment_id<EOL>self.subscriber_hash = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(list_id, '<STR_LIT>', segment_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(list_id, '<STR_LIT>', segment_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about members in a saved segment.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param segment_id: The unique id for the segment.\n:type segment_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f258:c0:m2"}
{"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>self.subscriber_hash = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get a list of member\u2019s subscriber activity in a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f259:c0:m1"}
{"signature": "def create(self, data):", "body": "if '<STR_LIT:url>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>response = self._mc_client._post(url=self._build_path(), data=data)<EOL>if response is not None:<EOL><INDENT>self.batch_webhook_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.batch_webhook_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Configure a webhook that will fire whenever any batch request\ncompletes processing.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"url\": string*\n}", "id": "f260:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportSubReports, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.campaign_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f261:c0:m0"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportCampaignAbuseReports, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.campaign_id = None<EOL>self.report_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f263:c0:m0"}
{"signature": "def all(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>self.report_id = None<EOL>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL>", "docstring": "Get a list of abuse complaints for a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f263:c0:m1"}
{"signature": "def get(self, app_id, **queryparams):", "body": "self.app_id = app_id<EOL>return self._mc_client._get(url=self._build_path(app_id), **queryparams)<EOL>", "docstring": "Get information about a specific authorized application\n\n:param app_id: The unique id for the connected authorized application\n:type app_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f264:c0:m3"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.app_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get a list of an account\u2019s registered, connected applications.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f264:c0:m2"}
{"signature": "def create(self, data):", "body": "if '<STR_LIT:id>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT:name>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if not re.match(r\"<STR_LIT>\", data['<STR_LIT>']):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>response = self._mc_client._post(url=self._build_path(), data=data)<EOL>if response is not None:<EOL><INDENT>self.store_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.store_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Add a new store to your MailChimp account.\n\nError checking on the currency code verifies that it is in the correct\nthree-letter, all-caps format as specified by ISO 4217 but does not\ncheck that it is a valid code as the list of valid codes changes over\ntime.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"id\": string*,\n    \"list_id\": string*,\n    \"name\": string*,\n    \"currency_code\": string*\n}", "id": "f266:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(Stores, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.store_id = None<EOL>self.carts = StoreCarts(self)<EOL>self.customers = StoreCustomers(self)<EOL>self.orders = StoreOrders(self)<EOL>self.products = StoreProducts(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f266:c0:m0"}
{"signature": "def delete(self, store_id):", "body": "self.store_id = store_id<EOL>return self._mc_client._delete(url=self._build_path(store_id))<EOL>", "docstring": "Delete a store. Deleting a store will also delete any associated\nsubresources, including Customers, Orders, Products, and Carts.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`", "id": "f266:c0:m5"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.store_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get information about all stores in the account.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f266:c0:m2"}
{"signature": "def all(self, campaign_id, link_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>self.link_id = link_id<EOL>self.subscriber_hash = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(campaign_id, '<STR_LIT>', link_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(<EOL>url=self._build_path(campaign_id, '<STR_LIT>', link_id, '<STR_LIT>'),<EOL>**queryparams<EOL>)<EOL><DEDENT>", "docstring": "Get information about list members who clicked on a specific link in a\ncampaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param link_id: The id for the link.\n:type link_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f267:c0:m1"}
{"signature": "def update(self, store_id, cart_id, data):", "body": "self.store_id = store_id<EOL>self.cart_id = cart_id<EOL>return self._mc_client._patch(url=self._build_path(store_id, '<STR_LIT>', cart_id), data=data)<EOL>", "docstring": "Update a specific cart.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f268:c0:m4"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreCarts, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.store_id = None<EOL>self.cart_id = None<EOL>self.lines = StoreCartLines(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f268:c0:m0"}
{"signature": "def all(self, store_id, get_all=False, **queryparams):", "body": "self.store_id = store_id<EOL>self.cart_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(store_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(store_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about a store\u2019s carts.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f268:c0:m2"}
{"signature": "def get(self, store_id, cart_id, **queryparams):", "body": "self.store_id = store_id<EOL>self.cart_id = cart_id<EOL>return self._mc_client._get(url=self._build_path(store_id, '<STR_LIT>', cart_id), **queryparams)<EOL>", "docstring": "Get information about a specific cart.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f268:c0:m3"}
{"signature": "def update(self, store_id, product_id, data):", "body": "self.store_id = store_id<EOL>self.product_id = product_id<EOL>return self._mc_client._patch(<EOL>url=self._build_path(store_id, '<STR_LIT>', product_id),<EOL>data=data<EOL>)<EOL>", "docstring": "Update a product.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f270:c0:m4"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreProducts, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.store_id = None<EOL>self.product_id = None<EOL>self.images = StoreProductImages(self)<EOL>self.variants = StoreProductVariants(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f270:c0:m0"}
{"signature": "def delete(self, store_id, product_id):", "body": "self.store_id = store_id<EOL>self.product_id = product_id<EOL>return self._mc_client._delete(url=self._build_path(store_id, '<STR_LIT>', product_id))<EOL>", "docstring": "Delete a product.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`", "id": "f270:c0:m5"}
{"signature": "def all(self, list_id, get_all=False, **queryparams):", "body": "self.list_id = list_id<EOL>self.month = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(list_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(list_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get a month-by-month summary of a specific list\u2019s growth activity.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f271:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(Conversations, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.conversation_id = None<EOL>self.messages = ConversationMessages(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f272:c0:m0"}
{"signature": "def get(self, conversation_id, **queryparams):", "body": "self.conversation_id = conversation_id<EOL>return self._mc_client._get(url=self._build_path(conversation_id), **queryparams)<EOL>", "docstring": "Get details about an individual conversation.\n\n:param conversation_id: The unique id for the conversation.\n:type conversation_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f272:c0:m2"}
{"signature": "def pause(self, workflow_id):", "body": "self.workflow_id = workflow_id<EOL>return self._mc_client._post(url=self._build_path(workflow_id, '<STR_LIT>'))<EOL>", "docstring": "Pause all emails in a specific Automation workflow.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`", "id": "f274:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportLocations, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.campaign_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f277:c0:m0"}
{"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get top open locations for a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f277:c0:m1"}
{"signature": "def all(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL>", "docstring": "Get statistics for the top-performing email domains in a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f278:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreOrderLines, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.store_id = None<EOL>self.order_id = None<EOL>self.line_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f279:c0:m0"}
{"signature": "def all(self, store_id, order_id, get_all=False, **queryparams):", "body": "self.store_id = store_id<EOL>self.order_id = order_id<EOL>self.line_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(store_id, '<STR_LIT>', order_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(store_id, '<STR_LIT>', order_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about an order\u2019s line items.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f279:c0:m2"}
{"signature": "def get(self, store_id, order_id, line_id, **queryparams):", "body": "self.store_id = store_id<EOL>self.order_id = order_id<EOL>self.line_id = line_id<EOL>return self._mc_client._get(url=self._build_path(store_id, '<STR_LIT>', order_id, '<STR_LIT>', line_id), **queryparams)<EOL>", "docstring": "Get information about a specific order line item.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param order_id: The id for the order in a store.\n:type order_id: :py:class:`str`\n:param line_id: The id for the line item of a cart.\n:type line_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f279:c0:m3"}
{"signature": "def update(self, campaign_id, feedback_id, data):", "body": "self.campaign_id = campaign_id<EOL>self.feedback_id = feedback_id<EOL>if '<STR_LIT:message>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>return self._mc_client._patch(url=self._build_path(campaign_id, '<STR_LIT>', feedback_id), data=data)<EOL>", "docstring": "Update a specific feedback message for a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param feedback_id: The unique id for the feedback message.\n:type feedback_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"message\": string*\n}", "id": "f280:c0:m4"}
{"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>self.feedback_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get team feedback while you\u2019re working together on a MailChimp\ncampaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f280:c0:m2"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(CampaignFeedback, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.campaign_id = None<EOL>self.feedback_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f280:c0:m0"}
{"signature": "def delete(self, campaign_id, feedback_id):", "body": "self.campaign_id = campaign_id<EOL>self.feedback_id = feedback_id<EOL>return self._mc_client._delete(url=self._build_path(campaign_id, '<STR_LIT>', feedback_id))<EOL>", "docstring": "Remove a specific feedback message for a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param feedback_id: The unique id for the feedback message.\n:type feedback_id: :py:class:`str`", "id": "f280:c0:m5"}
{"signature": "def all(self, campaign_id, get_all=False,  **queryparams):", "body": "self.campaign_id = campaign_id<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get detailed information about any campaign emails that were opened by a list member.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['since'] = str", "id": "f281:c0:m1"}
{"signature": "def all(self, conversation_id, **queryparams):", "body": "self.conversation_id = conversation_id<EOL>self.message_id = None<EOL>return self._mc_client._get(url=self._build_path(conversation_id, '<STR_LIT>'), **queryparams)<EOL>", "docstring": "Get messages from a specific conversation.\n\nThis endpoint does not currently support count and offset, preventing\nit from having the get_all parameter that most all() methods have\n\n:param conversation_id: The unique id for the conversation.\n:type conversation_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = p[\nqueryparams['is_read'] = string\nqueryparams['before_timestamp'] = string\nqueryparams['since_timestamp'] = string", "id": "f282:c0:m2"}
{"signature": "def create(self, conversation_id, data):", "body": "self.conversation_id = conversation_id<EOL>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>check_email(data['<STR_LIT>'])<EOL>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if data['<STR_LIT>'] not in [True, False]:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>response =  self._mc_client._post(url=self._build_path(conversation_id, '<STR_LIT>'), data=data)<EOL>if response is not None:<EOL><INDENT>self.message_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.message_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Post a new message to a conversation.\n\n:param conversation_id: The unique id for the conversation.\n:type conversation_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"from_email\": string*,\n    \"read\": boolean*\n}", "id": "f282:c0:m1"}
{"signature": "def delete(self, store_id, customer_id):", "body": "self.store_id = store_id<EOL>self.customer_id = customer_id<EOL>return self._mc_client._delete(url=self._build_path(store_id, '<STR_LIT>', customer_id))<EOL>", "docstring": "Delete a customer from a store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param customer_id: The id for the customer of a store.\n:type customer_id: :py:class:`str`", "id": "f283:c0:m6"}
{"signature": "def create(self, store_id, data):", "body": "self.store_id = store_id<EOL>if '<STR_LIT:id>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>check_email(data['<STR_LIT>'])<EOL>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if data['<STR_LIT>'] not in [True, False]:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>response = self._mc_client._post(url=self._build_path(store_id, '<STR_LIT>'), data=data)<EOL>if response is not None:<EOL><INDENT>self.customer_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.customer_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Add a new customer to a store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"id\": string*,\n    \"email_address\": string*,\n    \"opt_in_status\": boolean*\n}", "id": "f283:c0:m1"}
{"signature": "def update(self, store_id, customer_id, data):", "body": "self.store_id = store_id<EOL>self.customer_id = customer_id<EOL>return self._mc_client._patch(url=self._build_path(store_id, '<STR_LIT>', customer_id), data=data)<EOL>", "docstring": "Update a customer.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param customer_id: The id for the customer of a store.\n:type customer_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f283:c0:m4"}
{"signature": "def delete(self, list_id, category_id, interest_id):", "body": "self.list_id = list_id<EOL>self.category_id = category_id<EOL>self.interest_id = interest_id<EOL>return self._mc_client._delete(<EOL>url=self._build_path(list_id, '<STR_LIT>', category_id, '<STR_LIT>', interest_id)<EOL>)<EOL>", "docstring": "Delete interests or group names in a specific category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param interest_id: The specific interest or \u2018group name\u2019.\n:type interest_id: :py:class:`str`", "id": "f284:c0:m5"}
{"signature": "def update(self, list_id, category_id, interest_id, data):", "body": "self.list_id = list_id<EOL>self.category_id = category_id<EOL>self.interest_id = interest_id<EOL>if '<STR_LIT:name>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>return self._mc_client._patch(<EOL>url=self._build_path(list_id, '<STR_LIT>', category_id, '<STR_LIT>', interest_id),<EOL>data=data<EOL>)<EOL>", "docstring": "Update interests or \u2018group names\u2019 for a specific category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param interest_id: The specific interest or \u2018group name\u2019.\n:type interest_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"name\": string*\n}", "id": "f284:c0:m4"}
{"signature": "def get(self, list_id, category_id, interest_id, **queryparams):", "body": "self.list_id = list_id<EOL>self.category_id = category_id<EOL>self.interest_id = interest_id<EOL>return self._mc_client._get(<EOL>url=self._build_path(list_id, '<STR_LIT>', category_id, '<STR_LIT>', interest_id),<EOL>**queryparams<EOL>)<EOL>", "docstring": "Get interests or \u2018group names\u2019 for a specific category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param interest_id: The specific interest or \u2018group name\u2019.\n:type interest_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f284:c0:m3"}
{"signature": "def all(self, list_id, category_id, get_all=False, **queryparams):", "body": "self.list_id = list_id<EOL>self.category_id = category_id<EOL>self.interest_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(list_id, '<STR_LIT>', category_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(<EOL>url=self._build_path(list_id, '<STR_LIT>', category_id, '<STR_LIT>'),<EOL>**queryparams<EOL>)<EOL><DEDENT>", "docstring": "Get a list of this category\u2019s interests.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f284:c0:m2"}
{"signature": "def create(self, list_id, category_id, data):", "body": "self.list_id = list_id<EOL>self.category_id = category_id<EOL>if '<STR_LIT:name>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>response =  self._mc_client._post(<EOL>url=self._build_path(list_id, '<STR_LIT>', category_id, '<STR_LIT>'),<EOL>data=data<EOL>)<EOL>if response is not None:<EOL><INDENT>self.interest_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.interest_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Create a new interest or \u2018group name\u2019 for a specific category.\n\nThe documentation lists only the name request body parameter so it is\nbeing documented and error-checked as if it were required based on the\ndescription of the method.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"name\": string*\n}", "id": "f284:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListInterestCategoryInterest, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.list_id = None<EOL>self.category_id = None<EOL>self.interest_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f284:c0:m0"}
{"signature": "def get(self, folder_id, **queryparams):", "body": "self.folder_id = folder_id<EOL>return self._mc_client._get(url=self._build_path(folder_id), **queryparams)<EOL>", "docstring": "Get information about a specific folder used to organize campaigns.\n\n:param folder_id: The unique id for the campaign folder.\n:type folder_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f285:c0:m3"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(CampaignFolders, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.folder_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f285:c0:m0"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.folder_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get all folders used to organize campaigns.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f285:c0:m2"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListActivity, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.list_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f286:c0:m0"}
{"signature": "def all(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL>", "docstring": "Get feedback based on a campaign\u2019s statistics. Advice feedback is\nbased on campaign stats like opens, clicks, unsubscribes, bounces, and\nmore.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f288:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportCampaignAdvice, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.campaign_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f288:c0:m0"}
{"signature": "def pause(self, campaign_id):", "body": "self.campaign_id = campaign_id<EOL>return self._mc_client._post(url=self._build_path(campaign_id, '<STR_LIT>'))<EOL>", "docstring": "Pause an RSS-Driven campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`", "id": "f289:c0:m2"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(CampaignActions, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.campaign_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f289:c0:m0"}
{"signature": "def send(self, campaign_id):", "body": "self.campaign_id = campaign_id<EOL>return self._mc_client._post(url=self._build_path(campaign_id, '<STR_LIT>'))<EOL>", "docstring": "Send a MailChimp campaign. For RSS Campaigns, the campaign will send\naccording to its schedule. All other campaigns will send immediately.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`", "id": "f289:c0:m6"}
{"signature": "def update(self, campaign_id, data):", "body": "self.campaign_id = campaign_id<EOL>return self._mc_client._put(url=self._build_path(campaign_id, '<STR_LIT:content>'), data=data)<EOL>", "docstring": "Set the content for a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f290:c0:m2"}
{"signature": "def get(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT:content>'), **queryparams)<EOL>", "docstring": "Get the the HTML and plain-text content for a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f290:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportClickDetailReports, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.campaign_id = None<EOL>self.link_id = None<EOL>self.members = ReportClickDetailMembers(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f291:c0:m0"}
{"signature": "def get(self, campaign_id, link_id, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>self.link_id = link_id<EOL>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>', link_id), **queryparams)<EOL>", "docstring": "Get click details for a specific link in a campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param link_id: The id for the link.\n:type link_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f291:c0:m2"}
{"signature": "def all(self, store_id, cart_id, get_all=False, **queryparams):", "body": "self.store_id = store_id<EOL>self.cart_id = cart_id<EOL>self.line_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(store_id, '<STR_LIT>', cart_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(store_id, '<STR_LIT>', cart_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about a cart\u2019s line items.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f292:c0:m2"}
{"signature": "def delete(self, store_id, cart_id, line_id):", "body": "self.store_id = store_id<EOL>self.cart_id = cart_id<EOL>self.line_id = line_id<EOL>return self._mc_client._delete(url=self._build_path(store_id, '<STR_LIT>', cart_id, '<STR_LIT>', line_id))<EOL>", "docstring": "Delete a cart.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param cart_id: The id for the cart.\n:type cart_id: :py:class:`str`\n:param line_id: The id for the line item of a cart.\n:type line_id: :py:class:`str`", "id": "f292:c0:m5"}
{"signature": "def get(self, **queryparams):", "body": "if '<STR_LIT>' in queryparams:<EOL><INDENT>self.list_id = queryparams['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>self.list_id = None<EOL><DEDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL>", "docstring": "Search for list members. This search can be restricted to a specific\nlist, or can be used to search across all lists in an account.\n\n:param queryparams: The query string parameters\nqueryparams['fields'] = array\nqueryparams['exclude_fields'] = array\nqueryparams['query'] = string\nqueryparams['list_id'] = string\nqueryparams['offset'] = integer", "id": "f293:c0:m1"}
{"signature": "def get(self, list_id, segment_id, **queryparams):", "body": "self.list_id = list_id<EOL>self.segment_id = segment_id<EOL>return self._mc_client._get(url=self._build_path(list_id, '<STR_LIT>', segment_id), **queryparams)<EOL>", "docstring": "Get information about a specific segment.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param segment_id: The unique id for the segment.\n:type segment_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f294:c0:m3"}
{"signature": "def create(self, store_id, data):", "body": "self.store_id = store_id<EOL>if '<STR_LIT:id>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT:id>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if not re.match(r\"<STR_LIT>\", data['<STR_LIT>']):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>for line in data['<STR_LIT>']:<EOL><INDENT>if '<STR_LIT:id>' not in line:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in line:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in line:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in line:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in line:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT><DEDENT>response = self._mc_client._post(url=self._build_path(store_id, '<STR_LIT>'), data=data)<EOL>if response is not None:<EOL><INDENT>self.order_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.order_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Add a new order to a store.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"id\": string*,\n    \"customer\": object*\n    {\n        \"'id\": string*\n    },\n    \"curency_code\": string*,\n    \"order_total\": number*,\n    \"lines\": array*\n    [\n        {\n            \"id\": string*,\n            \"product_id\": string*,\n            \"product_variant_id\": string*,\n            \"quantity\": integer*,\n            \"price\": number*\n        }\n    ]\n}", "id": "f295:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreOrders, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.store_id = None<EOL>self.order_id = None<EOL>self.lines = StoreOrderLines(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f295:c0:m0"}
{"signature": "def all(self, store_id, get_all=False, **queryparams):", "body": "self.store_id = store_id<EOL>self.order_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(store_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(store_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about a store\u2019s orders.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['customer_id'] = string", "id": "f295:c0:m2"}
{"signature": "def start(self, workflow_id, email_id):", "body": "self.workflow_id = workflow_id<EOL>self.email_id = email_id<EOL>return self._mc_client._post(url=self._build_path(workflow_id, '<STR_LIT>', email_id, '<STR_LIT>'))<EOL>", "docstring": "Start an automated email.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`\n:param email_id: The unique id for the Automation workflow email.\n:type email_id: :py:class:`str`", "id": "f296:c0:m2"}
{"signature": "def create(self, store_id, promo_rule_id, data):", "body": "self.store_id = store_id<EOL>if '<STR_LIT:id>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT:code>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>response = self._mc_client._post(url=self._build_path(store_id, '<STR_LIT>', promo_rule_id, '<STR_LIT>'), data=data)<EOL>if response is not None:<EOL><INDENT>return response<EOL><DEDENT>", "docstring": "Add a new promo code to a store.\n\n:param store_id: The store id\n:type store_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict'\ndata = {\n    \"id\": string*,\n    \"code\": string*,\n    \"redemption_url\": string*,\n    \"usage_count\": string,\n    \"enabled\": boolean,\n    \"created_at_foreign\": string,\n    \"updated_at_foreign\": string,\n}", "id": "f297:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(StorePromoCodes, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.store_id = None<EOL>", "docstring": "Initialize the Endpoint\n:param args:\n:param kwargs:", "id": "f297:c0:m0"}
{"signature": "def all(self, store_id, promo_rule_id, get_all=False, **queryparams):", "body": "self.store_id=store_id<EOL>self.promo_rule_id=promo_rule_id<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(store_id, '<STR_LIT>', promo_rule_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(store_id, '<STR_LIT>', promo_rule_id), **queryparams)<EOL><DEDENT>", "docstring": "Get information about a store\u2019s promo codes.\n\n:param store_id: The store's id\n:type store_id: `str`\n:param promo_rule_id: The store promo rule id\n:type store_id: `str`\n:param get_all:\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f297:c0:m2"}
{"signature": "def get(self):", "body": "return self._mc_client._get(url=self._build_path())<EOL>", "docstring": "A health check for the API that won\u2019t return any account-specific information.", "id": "f298:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(Ping, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>", "docstring": "Initialize the endpoint", "id": "f298:c0:m0"}
{"signature": "def delete(self, list_id, category_id):", "body": "self.list_id = list_id<EOL>self.category_id = category_id<EOL>return self._mc_client._delete(url=self._build_path(list_id, '<STR_LIT>', category_id))<EOL>", "docstring": "Delete a specific interest category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param category_id: The unique id for the interest category.\n:type category_id: :py:class:`str`", "id": "f299:c0:m5"}
{"signature": "def all(self, list_id, get_all=False, **queryparams):", "body": "self.list_id = list_id<EOL>self.category_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(list_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(list_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about a list\u2019s interest categories.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = string", "id": "f299:c0:m2"}
{"signature": "def create(self, list_id, data):", "body": "self.list_id = list_id<EOL>if '<STR_LIT:title>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT:type>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if data['<STR_LIT:type>'] not in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>response = self._mc_client._post(url=self._build_path(list_id, '<STR_LIT>'), data=data)<EOL>if response is not None:<EOL><INDENT>self.category_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.category_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Create a new interest category.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"title\": string*,\n    \"type\": string* (Must be one of 'checkboxes', 'dropdown', 'radio', or 'hidden')\n}", "id": "f299:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListInterestCategories, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.list_id = None<EOL>self.category_id = None<EOL>self.interests = ListInterestCategoryInterest(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f299:c0:m0"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ReportGoogleAnalytics, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.campaign_id = None<EOL>self.profile_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f300:c0:m0"}
{"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get a summary of Google Analytics reports for a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams:  The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f300:c0:m1"}
{"signature": "def delete(self, list_id, merge_id):", "body": "self.list_id = list_id<EOL>self.merge_id = merge_id<EOL>return self._mc_client._delete(url=self._build_path(list_id, '<STR_LIT>', merge_id))<EOL>", "docstring": "Delete a specific merge field in a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param merge_id: The id for the merge field.\n:type merge_id: :py:class:`str`", "id": "f301:c0:m5"}
{"signature": "def all(self, list_id, get_all=False, **queryparams):", "body": "self.list_id = list_id<EOL>self.merge_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(list_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(list_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get a list of all merge fields (formerly merge vars) for a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = string\nqueryparams['required'] = boolean", "id": "f301:c0:m2"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListMergeFields, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.list_id = None<EOL>self.merge_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f301:c0:m0"}
{"signature": "def create(self, list_id, data):", "body": "self.list_id = list_id<EOL>if '<STR_LIT:name>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT:type>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>response = self._mc_client._post(url=self._build_path(list_id, '<STR_LIT>'), data=data)<EOL>if response is not None:<EOL><INDENT>self.merge_id = response['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>self.merge_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Add a new merge field for a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"name\": string*,\n    \"type\": string*\n}", "id": "f301:c0:m1"}
{"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>self.subscriber_hash = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about members who have unsubscribed from a specific\ncampaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f302:c0:m1"}
{"signature": "def get(self, campaign_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)<EOL>self.campaign_id = campaign_id<EOL>self.subscriber_hash = subscriber_hash<EOL>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>', subscriber_hash), **queryparams)<EOL>", "docstring": "Get information about a specific list member who unsubscribed from a\ncampaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n  list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f302:c0:m2"}
{"signature": "def update(self, list_id, subscriber_hash, data):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)<EOL>self.list_id = list_id<EOL>self.subscriber_hash = subscriber_hash<EOL>return self._mc_client._patch(url=self._build_path(list_id, '<STR_LIT>', subscriber_hash), data=data)<EOL>", "docstring": "Update information for a specific list member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n    list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f303:c0:m4"}
{"signature": "def delete(self, list_id, subscriber_hash):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)<EOL>self.list_id = list_id<EOL>self.subscriber_hash = subscriber_hash<EOL>return self._mc_client._delete(url=self._build_path(list_id, '<STR_LIT>', subscriber_hash))<EOL>", "docstring": "Delete a member from a list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n  list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`", "id": "f303:c0:m6"}
{"signature": "def create(self, list_id, data):", "body": "self.list_id = list_id<EOL>if '<STR_LIT:status>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if data['<STR_LIT:status>'] not in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>check_email(data['<STR_LIT>'])<EOL>response = self._mc_client._post(url=self._build_path(list_id, '<STR_LIT>'), data=data)<EOL>if response is not None:<EOL><INDENT>self.subscriber_hash = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.subscriber_hash = None<EOL><DEDENT>return response<EOL>", "docstring": "Add a new member to the list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"status\": string*, (Must be one of 'subscribed', 'unsubscribed', 'cleaned',\n        'pending', or 'transactional')\n    \"email_address\": string*\n}", "id": "f303:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListMembers, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.list_id = None<EOL>self.subscriber_hash = None<EOL>self.activity = ListMemberActivity(self)<EOL>self.goals = ListMemberGoals(self)<EOL>self.notes = ListMemberNotes(self)<EOL>", "docstring": "Initialize the endpoint", "id": "f303:c0:m0"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.batch_id = None<EOL>self.operation_status = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get a summary of batch requests that have been made.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f304:c0:m2"}
{"signature": "def create(self, data):", "body": "if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>for op in data['<STR_LIT>']:<EOL><INDENT>if '<STR_LIT>' not in op:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if op['<STR_LIT>'] not in ['<STR_LIT:GET>', '<STR_LIT:POST>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(op['<STR_LIT>']))<EOL><DEDENT>if '<STR_LIT:path>' not in op:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT><DEDENT>return self._mc_client._post(url=self._build_path(), data=data)<EOL>", "docstring": "Begin processing a batch operations request.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"operations\": array*\n    [\n        {\n            \"method\": string* (Must be one of \"GET\", \"POST\", \"PUT\", \"PATCH\", or \"DELETE\")\n            \"path\": string*,\n        }\n    ]\n}", "id": "f304:c0:m1"}
{"signature": "def get(self, batch_id, **queryparams):", "body": "self.batch_id = batch_id<EOL>self.operation_status = None<EOL>return self._mc_client._get(url=self._build_path(batch_id), **queryparams)<EOL>", "docstring": "Get the status of a batch request.\n\n:param batch_id: The unique id for the batch operation.\n:type batch_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f304:c0:m3"}
{"signature": "def delete(self, batch_id):", "body": "self.batch_id = batch_id<EOL>self.operation_status = None<EOL>return self._mc_client._delete(url=self._build_path(batch_id))<EOL>", "docstring": "Stops a batch request from running. Since only one batch request is\nrun at a time, this can be used to cancel a long running request. The\nresults of any completed operations will not be available after this\ncall.\n\n:param batch_id: The unique id for the batch operation.\n:type batch_id: :py:class:`str`", "id": "f304:c0:m4"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(BatchOperations, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.batch_id = None<EOL>self.operation_status = None<EOL>", "docstring": "Initialize the endpoint", "id": "f304:c0:m0"}
{"signature": "def create(self, data):", "body": "if '<STR_LIT:name>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>response = self._mc_client._post(url=self._build_path(), data=data)<EOL>if response is not None:<EOL><INDENT>self.folder_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.folder_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Create a new folder in the File Manager.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"name\": string*\n}", "id": "f305:c0:m1"}
{"signature": "def update(self, list_id, subscriber_hash, note_id, data):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)<EOL>self.list_id = list_id<EOL>self.subscriber_hash = subscriber_hash<EOL>self.note_id = note_id<EOL>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>return self._mc_client._patch(<EOL>url=self._build_path(list_id, '<STR_LIT>', subscriber_hash, '<STR_LIT>', note_id),<EOL>data=data<EOL>)<EOL>", "docstring": "Update a specific note for a specific list member.\n\nThe documentation lists only the note request body parameter so it is\nbeing documented and error-checked as if it were required based on the\ndescription of the method.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n  list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param note_id: The id for the note.\n:type note_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"note\": string*\n}", "id": "f306:c0:m4"}
{"signature": "def get(self, list_id, subscriber_hash, note_id, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)<EOL>self.list_id = list_id<EOL>self.subscriber_hash = subscriber_hash<EOL>self.note_id = note_id<EOL>return self._mc_client._get(<EOL>url=self._build_path(list_id, '<STR_LIT>', subscriber_hash, '<STR_LIT>', note_id),<EOL>**queryparams<EOL>)<EOL>", "docstring": "Get a specific note for a specific list member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n  list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param note_id: The id for the note.\n:type note_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f306:c0:m3"}
{"signature": "def all(self, list_id, subscriber_hash, get_all=False, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)<EOL>self.list_id = list_id<EOL>self.subscriber_hash = subscriber_hash<EOL>self.note_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(list_id, '<STR_LIT>', subscriber_hash, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(list_id, '<STR_LIT>', subscriber_hash, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get recent notes for a specific list member.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n  list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f306:c0:m2"}
{"signature": "def all(self, store_id, get_all=False, **queryparams):", "body": "self.store_id = store_id<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(store_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(store_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about a store\u2019s promo rules.\n\n:param store_id: The store's id\n:type store_id: `str`\n:param get_all:\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f307:c0:m2"}
{"signature": "def get(self, store_id, promo_rule_id, **queryparams):", "body": "self.store_id = store_id<EOL>self.promo_rule_id = promo_rule_id<EOL>return self._mc_client._get(url=self._build_path(store_id, '<STR_LIT>', promo_rule_id), **queryparams)<EOL>", "docstring": "Get information about a specific promo rule.\n\n:param store_id: The store's id\n:type store_id: `string`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f307:c0:m3"}
{"signature": "def update(self, store_id, promo_rule_id, data):", "body": "self.store_id = store_id<EOL>self.promo_rule_id = promo_rule_id<EOL>return self._mc_client._patch(url=self._build_path(store_id, '<STR_LIT>', promo_rule_id), data=data)<EOL>", "docstring": "Update a promo rule\n\n:param store_id: The store id\n:type :py:class:`str`\n:param promo_rule_id: The id for the promo rule of a store.\n:type :py:class:`str`\n:param data:\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"id\": string,\n    \"title\": string,\n    \"description\": string,\n    \"starts_at\": string,\n    \"ends_at\": string,\n    \"amount\": number,\n    \"type\": string,\n    \"target\": string,\n    \"enabled\": boolean,\n    \"created_at_foreign\": string,\n    \"updated_at_foreign\": string,\n}", "id": "f307:c0:m4"}
{"signature": "def all(self, store_id, product_id, get_all=False, **queryparams):", "body": "self.store_id = store_id<EOL>self.product_id = product_id<EOL>self.image_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(store_id, '<STR_LIT>', product_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._post(url=self._build_path(store_id, '<STR_LIT>', product_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about a product\u2019s images.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f308:c0:m2"}
{"signature": "def create(self, store_id, product_id, data):", "body": "self.store_id = store_id<EOL>self.product_id = product_id<EOL>if '<STR_LIT:id>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT:title>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>response = self._mc_client._post(url=self._build_path(store_id, '<STR_LIT>', product_id, '<STR_LIT>'), data=data)<EOL>if response is not None:<EOL><INDENT>self.image_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.image_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Add a new image to the product.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"id\": string*,\n    \"url\": string*\n}", "id": "f308:c0:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(ListMemberGoals, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.list_id = None<EOL>self.subscriber_hash = None<EOL>", "docstring": "Initialize the endpoint", "id": "f309:c0:m0"}
{"signature": "def all(self, list_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)<EOL>self.list_id = list_id<EOL>self.subscriber_hash = subscriber_hash<EOL>return self._mc_client._get(url=self._build_path(list_id, '<STR_LIT>', subscriber_hash, '<STR_LIT>'), **queryparams)<EOL>", "docstring": "Get the last 50 Goal events for a member on a specific list.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n  list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f309:c0:m1"}
{"signature": "def all(self, store_id, product_id, get_all=False, **queryparams):", "body": "self.store_id = store_id<EOL>self.product_id = product_id<EOL>self.variant_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(store_id, '<STR_LIT>', product_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(<EOL>url=self._build_path(store_id, '<STR_LIT>', product_id, '<STR_LIT>'),<EOL>**queryparams<EOL>)<EOL><DEDENT>", "docstring": "Get information about a product\u2019s variants.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f310:c0:m2"}
{"signature": "def delete(self, store_id, product_id, variant_id):", "body": "self.store_id = store_id<EOL>self.product_id = product_id<EOL>self.variant_id = variant_id<EOL>return self._mc_client._delete(url=self._build_path(store_id, '<STR_LIT>', product_id, '<STR_LIT>', variant_id))<EOL>", "docstring": "Delete a product variant.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param variant_id: The id for the product variant.\n:type variant_id: :py:class:`str`", "id": "f310:c0:m6"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(StoreProductVariants, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.store_id = None<EOL>self.product_id = None<EOL>self.variant_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f310:c0:m0"}
{"signature": "def update(self, store_id, product_id, variant_id, data):", "body": "self.store_id = store_id<EOL>self.product_id = product_id<EOL>self.variant_id = variant_id<EOL>return self._mc_client._patch(<EOL>url=self._build_path(store_id, '<STR_LIT>', product_id, '<STR_LIT>', variant_id),<EOL>data=data<EOL>)<EOL>", "docstring": "Update a product variant.\n\n:param store_id: The store id.\n:type store_id: :py:class:`str`\n:param product_id: The id for the product of a store.\n:type product_id: :py:class:`str`\n:param variant_id: The id for the product variant.\n:type variant_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`", "id": "f310:c0:m4"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(Root, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>", "docstring": "Initialize the endpoint", "id": "f311:c0:m0"}
{"signature": "def all(self, campaign_id, get_all=False, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>self.subscriber_hash = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL><DEDENT>", "docstring": "Get information about campaign recipients.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer", "id": "f312:c0:m1"}
{"signature": "def get(self, campaign_id, subscriber_hash, **queryparams):", "body": "subscriber_hash = check_subscriber_hash(subscriber_hash)<EOL>self.campaign_id = campaign_id<EOL>self.subscriber_hash = subscriber_hash<EOL>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>', subscriber_hash), **queryparams)<EOL>", "docstring": "Get information about a specific campaign recipient.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param subscriber_hash: The MD5 hash of the lowercase version of the\n  list member\u2019s email address.\n:type subscriber_hash: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f312:c0:m2"}
{"signature": "def get(self, list_id, **queryparams):", "body": "self.list_id = list_id<EOL>return self._mc_client._get(url=self._build_path(list_id), **queryparams)<EOL>", "docstring": "Get information about a specific list in your MailChimp account.\nResults include list members who have signed up but haven\u2019t confirmed\ntheir subscription yet and unsubscribed or cleaned.\n\n:param list_id: The unique id for the list.\n:type list_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f313:c0:m4"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.list_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get information about all lists in the account.\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['before_date_created'] = string\nqueryparams['since_date_created'] = string\nqueryparams['before_campaign_last_sent'] = string\nqueryparams['since_campaign_last_sent'] = string\nqueryparams['email'] = string\nqueryparams['sort_field'] = string (Must be 'date_created')\nqueryparams['sort_dir'] = string (Must be one of 'ASC' or 'DESC')", "id": "f313:c0:m3"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(CampaignSendChecklist, self).__init__(*args, **kwargs)<EOL>self.endpoint = '<STR_LIT>'<EOL>self.campaign_id = None<EOL>", "docstring": "Initialize the endpoint", "id": "f314:c0:m0"}
{"signature": "def get(self, campaign_id, **queryparams):", "body": "self.campaign_id = campaign_id<EOL>return self._mc_client._get(url=self._build_path(campaign_id, '<STR_LIT>'), **queryparams)<EOL>", "docstring": "Review the send checklist for a campaign, and resolve any issues\nbefore sending.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []", "id": "f314:c0:m1"}
{"signature": "def create(self, data):", "body": "if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>check_email(data['<STR_LIT>']['<STR_LIT>'])<EOL>if '<STR_LIT:type>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if not data['<STR_LIT:type>'] in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if data['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if data['<STR_LIT>']['<STR_LIT>'] not in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if data['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if not data['<STR_LIT>']['<STR_LIT>'] in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>response = self._mc_client._post(url=self._build_path(), data=data)<EOL>if response is not None:<EOL><INDENT>self.campaign_id = response['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.campaign_id = None<EOL><DEDENT>return response<EOL>", "docstring": "Create a new MailChimp campaign.\n\nThe ValueError raised by an invalid type in data does not mention\n'absplit' as a potential value because the documentation indicates\nthat the absplit type has been deprecated.\n\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"recipients\": object*\n    {\n        \"list_id\": string*\n    },\n    \"settings\": object*\n    {\n        \"subject_line\": string*,\n        \"from_name\": string*,\n        \"reply_to\": string*\n    },\n    \"variate_settings\": object* (Required if type is \"variate\")\n    {\n        \"winner_criteria\": string* (Must be one of \"opens\", \"clicks\", \"total_revenue\", or \"manual\")\n    },\n    \"rss_opts\": object* (Required if type is \"rss\")\n    {\n        \"feed_url\": string*,\n        \"frequency\": string* (Must be one of \"daily\", \"weekly\", or \"monthly\")\n    },\n    \"type\": string* (Must be one of \"regular\", \"plaintext\", \"rss\", \"variate\", or \"absplit\")\n}", "id": "f315:c0:m1"}
{"signature": "def update(self, campaign_id, data):", "body": "self.campaign_id = campaign_id<EOL>if '<STR_LIT>' not in data:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in data['<STR_LIT>']:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>check_email(data['<STR_LIT>']['<STR_LIT>'])<EOL>return self._mc_client._patch(url=self._build_path(campaign_id), data=data)<EOL>", "docstring": "Update some or all of the settings for a specific campaign.\n\n:param campaign_id: The unique id for the campaign.\n:type campaign_id: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:class:`dict`\ndata = {\n    \"settings\": object*\n    {\n        \"subject_line\": string*,\n        \"from_name\": string*,\n        \"reply_to\": string*\n    },\n}", "id": "f315:c0:m4"}
{"signature": "def all(self, get_all=False, **queryparams):", "body": "self.campaign_id = None<EOL>if get_all:<EOL><INDENT>return self._iterate(url=self._build_path(), **queryparams)<EOL><DEDENT>else:<EOL><INDENT>return self._mc_client._get(url=self._build_path(), **queryparams)<EOL><DEDENT>", "docstring": "Get all campaigns in an account.\n\n.. note::\n    The before_create_time, since_create_time, before_send_time, and\n    since_send_time queryparams expect times to be listed in the ISO\n    8601 format in UTC (ex. 2015-10-21T15:41:36+00:00).\n\n:param get_all: Should the query get all results\n:type get_all: :py:class:`bool`\n:param queryparams: The query string parameters\nqueryparams['fields'] = []\nqueryparams['exclude_fields'] = []\nqueryparams['count'] = integer\nqueryparams['offset'] = integer\nqueryparams['type'] = []\nqueryparams['status'] = []\nqueryparams['before_send_time'] = string\nqueryparams['since_send_time'] = string\nqueryparams['before_create_time'] = string\nqueryparams['since_create_time'] = string\nqueryparams['list_id'] = string\nqueryparams['folder_id'] = string\nqueryparams['sort_field'] = string\nqueryparams['sort_dir'] = string", "id": "f315:c0:m2"}
{"signature": "def all(self, workflow_id):", "body": "self.workflow_id = workflow_id<EOL>return self._mc_client._get(url=self._build_path(workflow_id, '<STR_LIT>'))<EOL>", "docstring": "Get information about subscribers who were removed from an Automation\nworkflow.\n\n:param workflow_id: The unique id for the Automation workflow.\n:type workflow_id: :py:class:`str`", "id": "f316:c0:m2"}
{"signature": "def __call__(self, r):", "body": "r.headers['<STR_LIT>'] = '<STR_LIT>' + self._access_token<EOL>return r<EOL>", "docstring": "Authorize with the access token provided in __init__", "id": "f317:c2:m1"}
{"signature": "@_enabled_or_noop<EOL><INDENT>def _post(self, url, data=None):<DEDENT>", "body": "url = urljoin(self.base_url, url)<EOL>try:<EOL><INDENT>r = self._make_request(**dict(<EOL>method='<STR_LIT:POST>',<EOL>url=url,<EOL>json=data,<EOL>auth=self.auth,<EOL>timeout=self.timeout,<EOL>hooks=self.request_hooks,<EOL>headers=self.request_headers<EOL>))<EOL><DEDENT>except requests.exceptions.RequestException as e:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>if r.status_code >= <NUM_LIT>:<EOL><INDENT>try:<EOL><INDENT>error_data = r.json()<EOL><DEDENT>except ValueError:<EOL><INDENT>error_data = { \"<STR_LIT>\": r }<EOL><DEDENT>raise MailChimpError(error_data)<EOL><DEDENT>if r.status_code == <NUM_LIT>:<EOL><INDENT>return None<EOL><DEDENT>return r.json()<EOL><DEDENT>", "docstring": "Handle authenticated POST requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:data:`none` or :py:class:`dict`\n:returns: The JSON output from the API or an error message", "id": "f317:c1:m2"}
{"signature": "@_enabled_or_noop<EOL><INDENT>def _get(self, url, **queryparams):<DEDENT>", "body": "url = urljoin(self.base_url, url)<EOL>if len(queryparams):<EOL><INDENT>url += '<STR_LIT:?>' + urlencode(queryparams)<EOL><DEDENT>try:<EOL><INDENT>r = self._make_request(**dict(<EOL>method='<STR_LIT:GET>',<EOL>url=url,<EOL>auth=self.auth,<EOL>timeout=self.timeout,<EOL>hooks=self.request_hooks,<EOL>headers=self.request_headers<EOL>))<EOL><DEDENT>except requests.exceptions.RequestException as e:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>if r.status_code >= <NUM_LIT>:<EOL><INDENT>raise MailChimpError(r.json())<EOL><DEDENT>return r.json()<EOL><DEDENT>", "docstring": "Handle authenticated GET requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:param queryparams: The query string parameters\n:returns: The JSON output from the API", "id": "f317:c1:m3"}
{"signature": "@_enabled_or_noop<EOL><INDENT>def _patch(self, url, data=None):<DEDENT>", "body": "url = urljoin(self.base_url, url)<EOL>try:<EOL><INDENT>r = self._make_request(**dict(<EOL>method='<STR_LIT>',<EOL>url=url,<EOL>json=data,<EOL>auth=self.auth,<EOL>timeout=self.timeout,<EOL>hooks=self.request_hooks,<EOL>headers=self.request_headers<EOL>))<EOL><DEDENT>except requests.exceptions.RequestException as e:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>if r.status_code >= <NUM_LIT>:<EOL><INDENT>raise MailChimpError(r.json())<EOL><DEDENT>return r.json()<EOL><DEDENT>", "docstring": "Handle authenticated PATCH requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:data:`none` or :py:class:`dict`\n:returns: The JSON output from the API", "id": "f317:c1:m5"}
{"signature": "@_enabled_or_noop<EOL><INDENT>def _delete(self, url):<DEDENT>", "body": "url = urljoin(self.base_url, url)<EOL>try:<EOL><INDENT>r = self._make_request(**dict(<EOL>method='<STR_LIT>',<EOL>url=url,<EOL>auth=self.auth,<EOL>timeout=self.timeout,<EOL>hooks=self.request_hooks,<EOL>headers=self.request_headers<EOL>))<EOL><DEDENT>except requests.exceptions.RequestException as e:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>if r.status_code >= <NUM_LIT>:<EOL><INDENT>raise MailChimpError(r.json())<EOL><DEDENT>if r.status_code == <NUM_LIT>:<EOL><INDENT>return<EOL><DEDENT>return r.json()<EOL><DEDENT>", "docstring": "Handle authenticated DELETE requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:returns: The JSON output from the API", "id": "f317:c1:m4"}
{"signature": "def get_base_url(self):", "body": "try:<EOL><INDENT>return self.get_metadata()['<STR_LIT>']<EOL><DEDENT>except requests.exceptions.RequestException:<EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Get the base_url from the authentication metadata", "id": "f317:c2:m3"}
{"signature": "def get_metadata(self):", "body": "try:<EOL><INDENT>r = requests.get('<STR_LIT>', auth=self)<EOL><DEDENT>except requests.exceptions.RequestException as e:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>r.raise_for_status()<EOL>output = r.json()<EOL>if '<STR_LIT:error>' in output:<EOL><INDENT>raise requests.exceptions.RequestException(output['<STR_LIT:error>'])<EOL><DEDENT>return output<EOL><DEDENT>", "docstring": "Get the metadata returned after authentication", "id": "f317:c2:m2"}
{"signature": "@_enabled_or_noop<EOL><INDENT>def _put(self, url, data=None):<DEDENT>", "body": "url = urljoin(self.base_url, url)<EOL>try:<EOL><INDENT>r = self._make_request(**dict(<EOL>method='<STR_LIT>',<EOL>url=url,<EOL>json=data,<EOL>auth=self.auth,<EOL>timeout=self.timeout,<EOL>hooks=self.request_hooks,<EOL>headers=self.request_headers<EOL>))<EOL><DEDENT>except requests.exceptions.RequestException as e:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>if r.status_code >= <NUM_LIT>:<EOL><INDENT>try:<EOL><INDENT>error_data = r.json()<EOL><DEDENT>except ValueError:<EOL><INDENT>error_data = { \"<STR_LIT>\": r }<EOL><DEDENT>raise MailChimpError(error_data)<EOL><DEDENT>return r.json()<EOL><DEDENT>", "docstring": "Handle authenticated PUT requests\n\n:param url: The url for the endpoint including path parameters\n:type url: :py:class:`str`\n:param data: The request body parameters\n:type data: :py:data:`none` or :py:class:`dict`\n:returns: The JSON output from the API", "id": "f317:c1:m6"}
{"signature": "def __init__(self, mc_api=None, mc_user='<STR_LIT>', access_token=None, enabled=True, timeout=None,<EOL>request_hooks=None, request_headers=None):", "body": "super(MailChimpClient, self).__init__()<EOL>self.enabled = enabled<EOL>self.timeout = timeout<EOL>if access_token:<EOL><INDENT>self.auth = MailChimpOAuth(access_token)<EOL>self.base_url = self.auth.get_base_url() + '<STR_LIT>'<EOL><DEDENT>elif mc_api:<EOL><INDENT>if not re.match(r\"<STR_LIT>\", mc_api.split('<STR_LIT:->')[<NUM_LIT:0>]):<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>self.auth = HTTPBasicAuth(mc_user, mc_api)<EOL>datacenter = mc_api.split('<STR_LIT:->').pop()<EOL>self.base_url = '<STR_LIT>'.format(datacenter)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>self.request_headers = request_headers or requests.utils.default_headers()<EOL>self.request_hooks = request_hooks or requests.hooks.default_hooks()<EOL>", "docstring": "Initialize the class with your optional user_id and required api_key.\n\nIf `enabled` is not True, these methods become no-ops. This is\nparticularly useful for testing or disabling with configuration.\n\n:param mc_api: Mailchimp API key\n:type mc_api: :py:class:`str`\n:param mc_user: Mailchimp user id\n:type mc_user: :py:class:`str`\n:param access_token: The OAuth access token\n:type access_token: :py:class:`str`\n:param enabled: Whether the API should execute any requests\n:type enabled: :py:class:`bool`\n:param timeout: (optional) How long to wait for the server to send\n    data before giving up, as a float, or a :ref:`(connect timeout,\n    read timeout) <timeouts>` tuple.\n:type timeout: float or tuple\n:param request_hooks: (optional) Hooks for :py:func:`requests.requests`.\n:type request_hooks: :py:class:`dict`\n:param request_headers: (optional) Headers for\n    :py:func:`requests.requests`.\n:type request_headers: :py:class:`dict`", "id": "f317:c1:m0"}
{"signature": "def load(self):", "body": "con = sqlite3.connect(self.tmp_cookie_file)<EOL>cur = con.cursor()<EOL>try:<EOL><INDENT>cur.execute('<STR_LIT>'<EOL>'<STR_LIT>'.format(self.domain_name))<EOL><DEDENT>except sqlite3.OperationalError:<EOL><INDENT>cur.execute('<STR_LIT>'<EOL>'<STR_LIT>'.format(self.domain_name))<EOL><DEDENT>cj = http.cookiejar.CookieJar()<EOL>for item in cur.fetchall():<EOL><INDENT>host, path, secure, expires, name = item[:<NUM_LIT:5>]<EOL>value = self._decrypt(item[<NUM_LIT:5>], item[<NUM_LIT:6>])<EOL>c = create_cookie(host, path, secure, expires, name, value)<EOL>cj.set_cookie(c)<EOL><DEDENT>con.close()<EOL>return cj<EOL>", "docstring": "Load sqlite cookies into a cookiejar", "id": "f320:c1:m3"}
{"signature": "def load(domain_name=\"<STR_LIT>\"):", "body": "cj = http.cookiejar.CookieJar()<EOL>for cookie_fn in [chrome, firefox]:<EOL><INDENT>try:<EOL><INDENT>for cookie in cookie_fn(domain_name=domain_name):<EOL><INDENT>cj.set_cookie(cookie)<EOL><DEDENT><DEDENT>except BrowserCookieError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return cj<EOL>", "docstring": "Try to load cookies from all supported browsers and return combined cookiejar\n    Optionally pass in a domain name to only load cookies from the specified domain", "id": "f320:m6"}
{"signature": "def chrome(cookie_file=None, domain_name=\"<STR_LIT>\"):", "body": "return Chrome(cookie_file, domain_name).load()<EOL>", "docstring": "Returns a cookiejar of the cookies used by Chrome. Optionally pass in a\n    domain name to only load cookies from the specified domain", "id": "f320:m4"}
{"signature": "def create_cookie(host, path, secure, expires, name, value):", "body": "return http.cookiejar.Cookie(<NUM_LIT:0>, name, value, None, False, host, host.startswith('<STR_LIT:.>'), host.startswith('<STR_LIT:.>'), path,<EOL>True, secure, expires, False, None, None, {})<EOL>", "docstring": "Shortcut function to create a cookie", "id": "f320:m3"}
{"signature": "def _decrypt(self, value, encrypted_value):", "body": "if sys.platform == '<STR_LIT:win32>':<EOL><INDENT>return self._decrypt_windows_chrome(value, encrypted_value)<EOL><DEDENT>if value or (encrypted_value[:<NUM_LIT:3>] != b'<STR_LIT>'):<EOL><INDENT>return value<EOL><DEDENT>encrypted_value = encrypted_value[<NUM_LIT:3>:]<EOL>encrypted_value_half_len = int(len(encrypted_value) / <NUM_LIT:2>)<EOL>cipher = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(self.key, self.iv))<EOL>decrypted = cipher.feed(encrypted_value[:encrypted_value_half_len])<EOL>decrypted += cipher.feed(encrypted_value[encrypted_value_half_len:])<EOL>decrypted += cipher.feed()<EOL>return decrypted.decode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "Decrypt encoded cookies", "id": "f320:c1:m5"}
{"signature": "@staticmethod<EOL><INDENT>def from_dict(name, values):<DEDENT>", "body": "<EOL>count = <NUM_LIT:1><EOL>count_value = values.get('<STR_LIT:count>', <NUM_LIT:1>)<EOL>if isinstance(count_value, int):<EOL><INDENT>count = max(count_value, <NUM_LIT:1>)<EOL><DEDENT>def with_index(name, idx):<EOL><INDENT>if name and idx:<EOL><INDENT>return '<STR_LIT>' % (name, idx)<EOL><DEDENT>return name<EOL><DEDENT>def get_instance(n, idx=None):<EOL><INDENT>return BlockadeContainerConfig(<EOL>with_index(n, idx),<EOL>values['<STR_LIT:image>'],<EOL>command=values.get('<STR_LIT>'),<EOL>links=values.get('<STR_LIT>'),<EOL>volumes=values.get('<STR_LIT>'),<EOL>publish_ports=values.get('<STR_LIT>'),<EOL>expose_ports=values.get('<STR_LIT>'),<EOL>environment=values.get('<STR_LIT>'),<EOL>hostname=values.get('<STR_LIT>'),<EOL>dns=values.get('<STR_LIT>'),<EOL>start_delay=values.get('<STR_LIT>', <NUM_LIT:0>),<EOL>neutral=values.get('<STR_LIT>', False),<EOL>holy=values.get('<STR_LIT>', False),<EOL>container_name=with_index(values.get('<STR_LIT>'), idx),<EOL>cap_add=values.get('<STR_LIT>'))<EOL><DEDENT>if count == <NUM_LIT:1>:<EOL><INDENT>yield get_instance(name)<EOL><DEDENT>else:<EOL><INDENT>for idx in range(<NUM_LIT:1>, count+<NUM_LIT:1>):<EOL><INDENT>yield get_instance(name, idx)<EOL><DEDENT><DEDENT>", "docstring": "Convert a dictionary of configuration values\ninto a sequence of BlockadeContainerConfig instances", "id": "f325:c0:m0"}
{"signature": "def dependency_sorted(containers):", "body": "if not isinstance(containers, collections.Mapping):<EOL><INDENT>containers = dict((c.name, c) for c in containers)<EOL><DEDENT>container_links = dict((name, set(c.links.keys()))<EOL>for name, c in containers.items())<EOL>sorted_names = _resolve(container_links)<EOL>return [containers[name] for name in sorted_names]<EOL>", "docstring": "Sort a dictionary or list of containers into dependency order\n\n    Returns a sequence", "id": "f325:m2"}
{"signature": "def wait_for_children():", "body": "wait(lambda: len(multiprocessing.active_children()) == <NUM_LIT:0>)<EOL>", "docstring": "Wait for child processes to exit\n\n    The testing system launches and terminates child processes, but\n    doesn't wait for them to actually die. So in a few places we need\n    this extra call", "id": "f338:m0"}
{"signature": "def cmd_restart(opts):", "body": "__with_containers(opts, Blockade.restart)<EOL>", "docstring": "Restart some or all containers", "id": "f342:m14"}
{"signature": "def cmd_status(opts):", "body": "config = load_config(opts.config)<EOL>b = get_blockade(config, opts)<EOL>containers = b.status()<EOL>print_containers(containers, opts.json)<EOL>", "docstring": "Print status of containers and networks", "id": "f342:m9"}
{"signature": "def cmd_join(opts):", "body": "config = load_config(opts.config)<EOL>b = get_blockade(config, opts)<EOL>b.join()<EOL>", "docstring": "Restore full networking between containers", "id": "f342:m21"}
{"signature": "def cmd_flaky(opts):", "body": "__with_containers(opts, Blockade.flaky)<EOL>", "docstring": "Make the network flaky for some or all containers", "id": "f342:m15"}
{"signature": "def cmd_logs(opts):", "body": "config = load_config(opts.config)<EOL>b = get_blockade(config, opts)<EOL>puts(b.logs(opts.container).decode(encoding='<STR_LIT>'))<EOL>", "docstring": "Fetch the logs of a container", "id": "f342:m22"}
{"signature": "def cmd_add(opts):", "body": "config = load_config(opts.config)<EOL>b = get_blockade(config, opts)<EOL>b.add_container(opts.containers)<EOL>", "docstring": "Add one or more existing Docker containers to a Blockade group", "id": "f342:m24"}
{"signature": "def cmd_partition(opts):", "body": "config = load_config(opts.config)<EOL>b = get_blockade(config, opts)<EOL>if opts.random:<EOL><INDENT>if opts.partitions:<EOL><INDENT>raise BlockadeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>b.random_partition()<EOL><DEDENT>else:<EOL><INDENT>partitions = []<EOL>for partition in opts.partitions:<EOL><INDENT>names = []<EOL>for name in partition.split(\"<STR_LIT:U+002C>\"):<EOL><INDENT>name = name.strip()<EOL>if name:<EOL><INDENT>names.append(name)<EOL><DEDENT><DEDENT>partitions.append(names)<EOL><DEDENT>if not partitions:<EOL><INDENT>raise BlockadeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>b.partition(partitions)<EOL><DEDENT>", "docstring": "Partition the network between containers\n\n    Replaces any existing partitions outright. Any containers NOT specified\n    in arguments will be globbed into a single implicit partition. For\n    example if you have three containers: c1, c2, and c3 and you run:\n\n        blockade partition c1\n\n    The result will be a partition with just c1 and another partition with\n    c2 and c3.\n\n    Alternatively, --random may be specified, and zero or more random\n    partitions will be generated by blockade.", "id": "f342:m20"}
{"signature": "def cmd_up(opts):", "body": "config = load_config(opts.config)<EOL>b = get_blockade(config, opts)<EOL>containers = b.create(verbose=opts.verbose, force=opts.force)<EOL>print_containers(containers, opts.json)<EOL>", "docstring": "Start the containers and link them together", "id": "f342:m7"}
{"signature": "def cmd_version(opts):", "body": "import blockade.version<EOL>puts(\"<STR_LIT>\" + blockade.version.__version__)<EOL>", "docstring": "Show the Blockade version information", "id": "f342:m25"}
{"signature": "def cmd_daemon(opts):", "body": "if opts.data_dir is None:<EOL><INDENT>raise BlockadeError(\"<STR_LIT>\")<EOL><DEDENT>rest.start(data_dir=opts.data_dir, port=opts.port, debug=opts.debug,<EOL>host_exec=get_host_exec())<EOL>", "docstring": "Start the Blockade REST API", "id": "f342:m23"}
{"signature": "def cmd_kill(opts):", "body": "kill_signal = opts.signal if hasattr(opts, '<STR_LIT>') else \"<STR_LIT>\"<EOL>__with_containers(opts, Blockade.kill, signal=kill_signal)<EOL>", "docstring": "Kill some or all containers", "id": "f342:m12"}
{"signature": "def cmd_destroy(opts):", "body": "config = load_config(opts.config)<EOL>b = get_blockade(config, opts)<EOL>b.destroy()<EOL>", "docstring": "Destroy all containers and restore networks", "id": "f342:m8"}
{"signature": "def create_chain(self, chain):", "body": "if not chain:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.call(\"<STR_LIT>\", chain)<EOL>", "docstring": "Create a new chain", "id": "f343:c2:m9"}
{"signature": "def _sm_start(self, *args, **kwargs):", "body": "millisec = random.randint(self._start_min_delay, self._start_max_delay)<EOL>self._timer = threading.Timer(millisec / <NUM_LIT>, self.event_timeout)<EOL>self._timer.start()<EOL>", "docstring": "Start the timer waiting for pain", "id": "f345:c2:m11"}
{"signature": "def _sm_relieve_pain(self, *args, **kwargs):", "body": "_logger.info(<EOL>\"<STR_LIT>\" % self._blockade_name)<EOL>self._do_reset_all()<EOL>millisec = random.randint(self._start_min_delay, self._start_max_delay)<EOL>self._timer = threading.Timer(millisec/<NUM_LIT>, self.event_timeout)<EOL>self._timer.start()<EOL>", "docstring": "End the blockade event and return to a steady state", "id": "f345:c2:m14"}
{"signature": "def _sm_cleanup(self, *args, **kwargs):", "body": "if self._done_notification_func is not None:<EOL><INDENT>self._done_notification_func()<EOL><DEDENT>self._timer.cancel()<EOL>", "docstring": "Delete all state associated with the chaos session", "id": "f345:c2:m16"}
{"signature": "def _sm_stop_from_pain(self, *args, **kwargs):", "body": "_logger.info(\"<STR_LIT>\" % self._blockade_name)<EOL>self._do_reset_all()<EOL>", "docstring": "Stop chaos while there is a blockade event in progress", "id": "f345:c2:m15"}
{"signature": "def _sm_to_pain(self, *args, **kwargs):", "body": "_logger.info(\"<STR_LIT>\" % self._blockade_name)<EOL>self._do_blockade_event()<EOL>millisec = random.randint(self._run_min_time, self._run_max_time)<EOL>self._timer = threading.Timer(millisec / <NUM_LIT>, self.event_timeout)<EOL>self._timer.start()<EOL>", "docstring": "Start the blockade event", "id": "f345:c2:m12"}
{"signature": "def _assure_dir(self):", "body": "try:<EOL><INDENT>os.makedirs(self._state_dir)<EOL><DEDENT>except OSError as err:<EOL><INDENT>if err.errno != errno.EEXIST:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>", "docstring": "Make sure the state directory exists", "id": "f351:c0:m11"}
{"signature": "def initialize(self, containers):", "body": "self._containers = deepcopy(containers)<EOL>self.__write(containers, initialize=True)<EOL>", "docstring": "Initialize a new state file with the given contents.\nThis function fails in case the state file already exists.", "id": "f351:c0:m5"}
{"signature": "def container_id(self, name):", "body": "container = self._containers.get(name, None)<EOL>if not container is None:<EOL><INDENT>return container.get('<STR_LIT:id>', None)<EOL><DEDENT>return None<EOL>", "docstring": "Try to find the container ID with the specified name", "id": "f351:c0:m4"}
{"signature": "@property<EOL><INDENT>def blockade_net_name(self):<DEDENT>", "body": "return \"<STR_LIT>\" % self._blockade_id<EOL>", "docstring": "Generate blockade nework name based on the blockade_id", "id": "f351:c0:m2"}
{"signature": "def load(self):", "body": "try:<EOL><INDENT>with open(self._state_file) as f:<EOL><INDENT>state = yaml.safe_load(f)<EOL>self._containers = state['<STR_LIT>']<EOL><DEDENT><DEDENT>except (IOError, OSError) as err:<EOL><INDENT>if err.errno == errno.ENOENT:<EOL><INDENT>raise NotInitializedError(\"<STR_LIT>\")<EOL><DEDENT>raise InconsistentStateError(\"<STR_LIT>\"<EOL>+ str(err))<EOL><DEDENT>except Exception as err:<EOL><INDENT>raise InconsistentStateError(\"<STR_LIT>\"<EOL>+ str(err))<EOL><DEDENT>", "docstring": "Try to load a blockade state file in the current directory", "id": "f351:c0:m8"}
{"signature": "def translate(self, frame=<NUM_LIT:0>):", "body": "return Fasta(self.id, '<STR_LIT>'.join([genetic_codes.codes[genetic_code].get(self.seq[x:x+<NUM_LIT:3>].upper(), '<STR_LIT:X>') for x in range(frame, len(self)-<NUM_LIT:1>-frame, <NUM_LIT:3>)]))<EOL>", "docstring": "Returns a Fasta sequence, translated into amino acids. Starts translating from 'frame', where frame expected to be 0,1 or 2", "id": "f364:c1:m30"}
{"signature": "def gc_content(self, as_decimal=True):", "body": "gc_total = <NUM_LIT:0.0><EOL>num_bases = <NUM_LIT:0.0><EOL>n_tuple = tuple('<STR_LIT>')<EOL>accepted_bases = tuple('<STR_LIT>')<EOL>for base, count in Counter(self.seq).items():<EOL><INDENT>if base not in n_tuple:<EOL><INDENT>num_bases += count<EOL>if base in accepted_bases:  <EOL><INDENT>gc_total += count<EOL><DEDENT><DEDENT><DEDENT>gc_content = gc_total / num_bases<EOL>if not as_decimal:  <EOL><INDENT>gc_content *= <NUM_LIT:100><EOL><DEDENT>return gc_content<EOL>", "docstring": "Returns the GC content for the sequence.\n        Notes:\n            This method ignores N when calculating the length of the sequence.\n            It does not, however ignore other ambiguous bases. It also only\n            includes the ambiguous base S (G or C). In this sense the method is\n            conservative with its calculation.\n\n        Args:\n            as_decimal (bool): Return the result as a decimal. Setting to False\n            will return as a percentage. i.e for the sequence GCAT it will\n            return 0.5 by default and 50.00 if set to False.\n\n        Returns:\n            float: GC content calculated as the number of G, C, and S divided\n            by the number of (non-N) bases (length).", "id": "f364:c1:m31"}
{"signature": "def expand_nucleotides(self):", "body": "s = list(self.seq)<EOL>for i in range(len(s)):<EOL><INDENT>if s[i] in redundant_nts:<EOL><INDENT>s[i] = '<STR_LIT>'.join(redundant_nts[s[i]])<EOL><DEDENT><DEDENT>seqs = []<EOL>for x in itertools.product(*s):<EOL><INDENT>seqs.append(Fasta(self.id + '<STR_LIT:.>' + str(len(seqs) + <NUM_LIT:1>), '<STR_LIT>'.join(x)))<EOL><DEDENT>return seqs<EOL>", "docstring": "Assumes sequence is nucleotides. Returns list of all combinations of redundant nucleotides. e.g. R is A or G, so CRT would have combinations CAT and CGT", "id": "f364:c1:m7"}
{"signature": "def subseq(self, start, end):", "body": "return Fastq(self.id, self.seq[start:end], self.qual[start:end])<EOL>", "docstring": "Returns Fastq object with the same name, of the bases from start to end, but not including end", "id": "f364:c3:m3"}
{"signature": "def subseq(self, start, end):", "body": "return Fasta(self.id, self.seq[start:end])<EOL>", "docstring": "Returns Fasta object with the same name, of the bases from start to end, but not including end", "id": "f364:c1:m5"}
{"signature": "def to_Fastq(self, qual_scores):", "body": "if len(self) != len(qual_scores):<EOL><INDENT>raise Error('<STR_LIT>', self.id)<EOL><DEDENT>return Fastq(self.id, self.seq, '<STR_LIT>'.join([chr(max(<NUM_LIT:0>, min(x, <NUM_LIT>)) + <NUM_LIT>) for x in qual_scores]))<EOL>", "docstring": "Returns a Fastq object. qual_scores expected to be a list of numbers, like you would get in a .qual file", "id": "f364:c1:m28"}
{"signature": "def replace_bases(self, old, new):", "body": "self.seq = self.seq.replace(old, new)<EOL>", "docstring": "Replaces all occurrences of 'old' with 'new", "id": "f364:c1:m14"}
{"signature": "def orfs(self, frame=<NUM_LIT:0>, revcomp=False):", "body": "assert frame in [<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:2>]<EOL>if revcomp:<EOL><INDENT>self.revcomp()<EOL><DEDENT>aa_seq = self.translate(frame=frame).seq.rstrip('<STR_LIT:X>')<EOL>if revcomp:<EOL><INDENT>self.revcomp()<EOL><DEDENT>orfs = _orfs_from_aa_seq(aa_seq)<EOL>for i in range(len(orfs)):<EOL><INDENT>if revcomp:<EOL><INDENT>start = len(self) - (orfs[i].end * <NUM_LIT:3> + <NUM_LIT:3>) - frame<EOL>end = len(self) - (orfs[i].start * <NUM_LIT:3>) - <NUM_LIT:1> - frame<EOL><DEDENT>else:<EOL><INDENT>start = orfs[i].start * <NUM_LIT:3> + frame<EOL>end = orfs[i].end * <NUM_LIT:3> + <NUM_LIT:2> + frame<EOL><DEDENT>orfs[i] = intervals.Interval(start, end)<EOL><DEDENT>return orfs<EOL>", "docstring": "Returns a list of ORFs that the sequence has, starting on the given\n           frame. Each returned ORF is an interval.Interval object.\n           If revomp=True, then finds the ORFs of the reverse complement\n           of the sequence.", "id": "f364:c1:m19"}
{"signature": "def replace_interval(self, start, end, new, qual_string):", "body": "if len(new) != len(qual_string):<EOL><INDENT>raise Error('<STR_LIT>')<EOL><DEDENT>super().replace_interval(start, end, new)<EOL>self.qual = self.qual[<NUM_LIT:0>:start] + qual_string + self.qual[end + <NUM_LIT:1>:]<EOL>", "docstring": "Replaces the sequence from start to end with the sequence \"new", "id": "f364:c3:m10"}
{"signature": "def strip_illumina_suffix(self):", "body": "if self.id.endswith('<STR_LIT>') or self.id.endswith('<STR_LIT>'):<EOL><INDENT>self.id = self.id[:-<NUM_LIT:2>]<EOL><DEDENT>", "docstring": "Removes any trailing /1 or /2 off the end of the name", "id": "f364:c1:m9"}
{"signature": "def strip_after_first_whitespace(self):", "body": "self.id = self.id.split()[<NUM_LIT:0>]<EOL>", "docstring": "Removes everything in the name after the first whitespace character", "id": "f364:c1:m8"}
{"signature": "def revcomp(self):", "body": "self.seq = self.seq.translate(str.maketrans(\"<STR_LIT>\", \"<STR_LIT>\"))[::-<NUM_LIT:1>]<EOL>", "docstring": "Reverse complements the sequence", "id": "f364:c1:m10"}
{"signature": "def gaps(self, min_length = <NUM_LIT:1>):", "body": "gaps = []<EOL>regex = re.compile('<STR_LIT>', re.IGNORECASE)<EOL>for m in regex.finditer(self.seq):<EOL><INDENT>if m.span()[<NUM_LIT:1>] - m.span()[<NUM_LIT:0>] + <NUM_LIT:1> >= min_length:<EOL><INDENT>gaps.append(intervals.Interval(m.span()[<NUM_LIT:0>], m.span()[<NUM_LIT:1>] - <NUM_LIT:1>))<EOL><DEDENT><DEDENT>return gaps<EOL>", "docstring": "Finds the positions of all gaps in the sequence that are at least min_length long. Returns a list of Intervals. Coords are zero-based", "id": "f364:c1:m17"}
{"signature": "def replace_non_acgt(self):", "body": "self.seq = re.sub(r'''<STR_LIT>''', '<STR_LIT:N>', self.seq)<EOL>", "docstring": "Replace all non acgt characters with an N (case insensitive)", "id": "f364:c1:m15"}
{"signature": "def trim(self, start, end):", "body": "super().trim(start, end)<EOL>self.qual = self.qual[start:len(self.qual) - end]<EOL>", "docstring": "Removes first 'start'/'end' bases off the start/end of the sequence", "id": "f364:c3:m6"}
{"signature": "def to_boulderio(infile, outfile):", "body": "seq_reader = sequences.file_reader(infile)<EOL>f_out = utils.open_file_write(outfile)<EOL>for sequence in seq_reader:<EOL><INDENT>print(\"<STR_LIT>\" + sequence.id, file=f_out)<EOL>print(\"<STR_LIT>\" + sequence.seq, file=f_out)<EOL>print(\"<STR_LIT:=>\", file=f_out)<EOL><DEDENT>utils.close(f_out)<EOL>", "docstring": "Converts input sequence file into a \"Boulder-IO format\", as used by primer3", "id": "f405:m38"}
{"signature": "def split_by_fixed_size(infile, outfiles_prefix, chunk_size, tolerance, skip_if_all_Ns=False):", "body": "file_count = <NUM_LIT:1><EOL>coords = []<EOL>small_sequences = []  <EOL>seq_reader = sequences.file_reader(infile)<EOL>f_coords = utils.open_file_write(outfiles_prefix + '<STR_LIT>')<EOL>for seq in seq_reader:<EOL><INDENT>if skip_if_all_Ns and seq.is_all_Ns():<EOL><INDENT>continue<EOL><DEDENT>if len(seq) < chunk_size:<EOL><INDENT>small_sequences.append(copy.copy(seq))<EOL><DEDENT>elif len(seq) <= chunk_size + tolerance:<EOL><INDENT>f = utils.open_file_write(outfiles_prefix + '<STR_LIT:.>' + str(file_count))<EOL>print(seq, file=f)<EOL>utils.close(f)<EOL>file_count += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>chunks = [(x,x+chunk_size) for x in range(<NUM_LIT:0>, len(seq), chunk_size)]<EOL>if chunks[-<NUM_LIT:1>][<NUM_LIT:1>] - <NUM_LIT:1> > len(seq):<EOL><INDENT>chunks[-<NUM_LIT:1>] = (chunks[-<NUM_LIT:1>][<NUM_LIT:0>], len(seq))<EOL><DEDENT>if len(chunks) > <NUM_LIT:1> and (chunks[-<NUM_LIT:1>][<NUM_LIT:1>] - chunks[-<NUM_LIT:1>][<NUM_LIT:0>]) <= tolerance:<EOL><INDENT>chunks[-<NUM_LIT:2>] = (chunks[-<NUM_LIT:2>][<NUM_LIT:0>], chunks[-<NUM_LIT:1>][<NUM_LIT:1>])<EOL>chunks.pop()<EOL><DEDENT>offset = <NUM_LIT:0><EOL>for chunk in chunks:<EOL><INDENT>if not(skip_if_all_Ns and seq.is_all_Ns(start=chunk[<NUM_LIT:0>], end=chunk[<NUM_LIT:1>]-<NUM_LIT:1>)):<EOL><INDENT>f = utils.open_file_write(outfiles_prefix + '<STR_LIT:.>' + str(file_count))<EOL>chunk_id = seq.id + '<STR_LIT::>' + str(chunk[<NUM_LIT:0>]+<NUM_LIT:1>) + '<STR_LIT:->' + str(chunk[<NUM_LIT:1>])<EOL>print(sequences.Fasta(chunk_id, seq[chunk[<NUM_LIT:0>]:chunk[<NUM_LIT:1>]]), file=f)<EOL>print(chunk_id, seq.id, offset, sep='<STR_LIT:\\t>', file=f_coords)<EOL>utils.close(f)<EOL>file_count += <NUM_LIT:1><EOL><DEDENT>offset += chunk[<NUM_LIT:1>] - chunk[<NUM_LIT:0>]<EOL><DEDENT><DEDENT><DEDENT>if len(small_sequences):<EOL><INDENT>f = utils.open_file_write(outfiles_prefix + '<STR_LIT:.>' + str(file_count))<EOL>file_count += <NUM_LIT:1><EOL>base_count = <NUM_LIT:0><EOL>for seq in small_sequences:<EOL><INDENT>if base_count > <NUM_LIT:0> and base_count + len(seq) > chunk_size + tolerance:<EOL><INDENT>utils.close(f)<EOL>f = utils.open_file_write(outfiles_prefix + '<STR_LIT:.>' + str(file_count))<EOL>file_count += <NUM_LIT:1><EOL>base_count = <NUM_LIT:0><EOL><DEDENT>print(seq, file=f)<EOL>base_count += len(seq)<EOL><DEDENT>utils.close(f)<EOL><DEDENT>", "docstring": "Splits  fasta/q file into separate files, with up to (chunk_size + tolerance) bases in each file", "id": "f405:m33"}
{"signature": "def scaffolds_to_contigs(infile, outfile, number_contigs=False):", "body": "seq_reader = sequences.file_reader(infile)<EOL>fout = utils.open_file_write(outfile)<EOL>for seq in seq_reader:<EOL><INDENT>contigs = seq.contig_coords()<EOL>counter = <NUM_LIT:1><EOL>for contig in contigs:<EOL><INDENT>if number_contigs:<EOL><INDENT>name = seq.id + '<STR_LIT:.>' + str(counter)<EOL>counter += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>name = '<STR_LIT:.>'.join([seq.id, str(contig.start + <NUM_LIT:1>), str(contig.end + <NUM_LIT:1>)])<EOL><DEDENT>print(sequences.Fasta(name, seq[contig.start:contig.end+<NUM_LIT:1>]), file=fout)<EOL><DEDENT><DEDENT>utils.close(fout)<EOL>", "docstring": "Makes a file of contigs from scaffolds by splitting at every N.\n       Use number_contigs=True to add .1, .2, etc onto end of each\n       contig, instead of default to append coordinates.", "id": "f405:m21"}
{"signature": "def split_by_base_count(infile, outfiles_prefix, max_bases, max_seqs=None):", "body": "seq_reader = sequences.file_reader(infile)<EOL>base_count = <NUM_LIT:0><EOL>file_count = <NUM_LIT:1><EOL>seq_count = <NUM_LIT:0><EOL>fout = None<EOL>if max_seqs is None:<EOL><INDENT>max_seqs = float('<STR_LIT>')<EOL><DEDENT>for seq in seq_reader:<EOL><INDENT>if base_count == <NUM_LIT:0>:<EOL><INDENT>fout = utils.open_file_write(outfiles_prefix + '<STR_LIT:.>' + str(file_count))<EOL>file_count += <NUM_LIT:1><EOL><DEDENT>if base_count + len(seq) > max_bases or seq_count >= max_seqs:<EOL><INDENT>if base_count == <NUM_LIT:0>:<EOL><INDENT>print(seq, file=fout)<EOL>utils.close(fout)<EOL><DEDENT>else:<EOL><INDENT>utils.close(fout)<EOL>fout = utils.open_file_write(outfiles_prefix + '<STR_LIT:.>' + str(file_count))<EOL>print(seq, file=fout)<EOL>base_count = len(seq)<EOL>file_count += <NUM_LIT:1><EOL>seq_count = <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>base_count += len(seq)<EOL>seq_count += <NUM_LIT:1><EOL>print(seq, file=fout)<EOL><DEDENT><DEDENT>utils.close(fout)<EOL>", "docstring": "Splits a fasta/q file into separate files, file size determined by number of bases.\n\n    Puts <= max_bases in each split file The exception is a single sequence >=max_bases\n    is put in its own file.  This does not split sequences.", "id": "f405:m32"}
{"signature": "def acgtn_only(infile, outfile):", "body": "f = utils.open_file_write(outfile)<EOL>for seq in sequences.file_reader(infile):<EOL><INDENT>seq.replace_non_acgt()<EOL>print(seq, file=f)<EOL><DEDENT>utils.close(f)<EOL>", "docstring": "Replace every non-acgtn (case insensitve) character with an N", "id": "f405:m0"}
{"signature": "def interleave(infile_1, infile_2, outfile, suffix1=None, suffix2=None):", "body": "seq_reader_1 = sequences.file_reader(infile_1)<EOL>seq_reader_2 = sequences.file_reader(infile_2)<EOL>f_out = utils.open_file_write(outfile)<EOL>for seq_1 in seq_reader_1:<EOL><INDENT>try:<EOL><INDENT>seq_2 = next(seq_reader_2)<EOL><DEDENT>except:<EOL><INDENT>utils.close(f_out)<EOL>raise Error('<STR_LIT>', seq_1.id, '<STR_LIT>')<EOL><DEDENT>if suffix1 is not None and not seq_1.id.endswith(suffix1):<EOL><INDENT>seq_1.id += suffix1<EOL><DEDENT>if suffix2 is not None and not seq_2.id.endswith(suffix2):<EOL><INDENT>seq_2.id += suffix2<EOL><DEDENT>print(seq_1, file=f_out)<EOL>print(seq_2, file=f_out)<EOL><DEDENT>try:<EOL><INDENT>seq_2 = next(seq_reader_2)<EOL><DEDENT>except:<EOL><INDENT>seq_2 = None<EOL><DEDENT>if seq_2 is not None:<EOL><INDENT>utils.close(f_out)<EOL>raise Error('<STR_LIT>', seq_2.id, '<STR_LIT>')<EOL><DEDENT>utils.close(f_out)<EOL>", "docstring": "Makes interleaved file from two sequence files. If used, will append suffix1 onto end\n    of every sequence name in infile_1, unless it already ends with suffix1. Similar for sufffix2.", "id": "f405:m16"}
{"signature": "def mean_length(infile, limit=None):", "body": "total = <NUM_LIT:0><EOL>count = <NUM_LIT:0><EOL>seq_reader = sequences.file_reader(infile)<EOL>for seq in seq_reader:<EOL><INDENT>total += len(seq)<EOL>count += <NUM_LIT:1><EOL>if limit is not None and count >= limit:<EOL><INDENT>break<EOL><DEDENT><DEDENT>assert count > <NUM_LIT:0><EOL>return total / count<EOL>", "docstring": "Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N", "id": "f405:m18"}
{"signature": "def sort_by_size(infile, outfile, smallest_first=False):", "body": "seqs = {}<EOL>file_to_dict(infile, seqs)<EOL>seqs = list(seqs.values())<EOL>seqs.sort(key=lambda x: len(x), reverse=not smallest_first)<EOL>fout = utils.open_file_write(outfile)<EOL>for seq in seqs:<EOL><INDENT>print(seq, file=fout)<EOL><DEDENT>utils.close(fout)<EOL>", "docstring": "Sorts input sequence file by biggest sequence first, writes sorted output file. Set smallest_first=True to have smallest first", "id": "f405:m24"}
{"signature": "def count_sequences(infile):", "body": "seq_reader = sequences.file_reader(infile)<EOL>n = <NUM_LIT:0><EOL>for seq in seq_reader:<EOL><INDENT>n += <NUM_LIT:1><EOL><DEDENT>return n<EOL>", "docstring": "Returns the number of sequences in a file", "id": "f405:m3"}
{"signature": "def to_fastg(infile, outfile, circular=None):", "body": "if circular is None:<EOL><INDENT>to_circularise = set()<EOL><DEDENT>elif type(circular) is not set:<EOL><INDENT>f = utils.open_file_read(circular)<EOL>to_circularise = set([x.rstrip() for x in f.readlines()])<EOL>utils.close(f)<EOL><DEDENT>else:<EOL><INDENT>to_circularise = circular<EOL><DEDENT>seq_reader = sequences.file_reader(infile)<EOL>fout = utils.open_file_write(outfile)<EOL>nodes = <NUM_LIT:1><EOL>for seq in seq_reader:<EOL><INDENT>new_id = '<STR_LIT:_>'.join([<EOL>'<STR_LIT>', str(nodes),<EOL>'<STR_LIT>', str(len(seq)),<EOL>'<STR_LIT>', '<STR_LIT:1>',<EOL>'<STR_LIT>', seq.id<EOL>])<EOL>if seq.id in to_circularise:<EOL><INDENT>seq.id = new_id + '<STR_LIT::>' + new_id + '<STR_LIT:;>'<EOL>print(seq, file=fout)<EOL>seq.revcomp()<EOL>seq.id = new_id + \"<STR_LIT>\" + new_id + \"<STR_LIT>\"<EOL>print(seq, file=fout)<EOL><DEDENT>else:<EOL><INDENT>seq.id = new_id + '<STR_LIT:;>'<EOL>print(seq, file=fout)<EOL>seq.revcomp()<EOL>seq.id = new_id + \"<STR_LIT>\"<EOL>print(seq, file=fout)<EOL><DEDENT>nodes += <NUM_LIT:1><EOL><DEDENT>utils.close(fout)<EOL>", "docstring": "Writes a FASTG file in SPAdes format from input file. Currently only whether or not a sequence is circular is supported. Put circular=set of ids, or circular=filename to make those sequences circular in the output. Puts coverage=1 on all contigs", "id": "f405:m26"}
{"signature": "def split_by_fixed_size_onefile(infile, outfile, chunk_size, tolerance, skip_if_all_Ns=False):", "body": "seq_reader = sequences.file_reader(infile)<EOL>f_out = utils.open_file_write(outfile)<EOL>for seq in seq_reader:<EOL><INDENT>for i in range(<NUM_LIT:0>, len(seq), chunk_size):<EOL><INDENT>if i + chunk_size + tolerance >= len(seq):<EOL><INDENT>end = len(seq)<EOL><DEDENT>else:<EOL><INDENT>end = i + chunk_size<EOL><DEDENT>subseq = seq.subseq(i, end)<EOL>if not (skip_if_all_Ns and subseq.is_all_Ns()):<EOL><INDENT>subseq.id += '<STR_LIT:.>' + str(i+<NUM_LIT:1>) + '<STR_LIT:_>' + str(end)<EOL>print(subseq, file=f_out)<EOL><DEDENT>if end == len(seq):<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>utils.close(f_out)<EOL>", "docstring": "Splits each sequence in infile into chunks of fixed size, last chunk can be up to\n       (chunk_size + tolerance) in length", "id": "f405:m34"}
{"signature": "def caf_to_fastq(infile, outfile, min_length=<NUM_LIT:0>, trim=False):", "body": "caf_reader = caf.file_reader(infile)<EOL>fout = utils.open_file_write(outfile)<EOL>for c in caf_reader:<EOL><INDENT>if trim:<EOL><INDENT>if c.clip_start is not None and c.clip_end is not None:<EOL><INDENT>c.seq.seq = c.seq.seq[c.clip_start:c.clip_end + <NUM_LIT:1>]<EOL>c.seq.qual = c.seq.qual[c.clip_start:c.clip_end + <NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>', c.id, file=sys.stderr)<EOL><DEDENT><DEDENT>if len(c.seq) >= min_length:<EOL><INDENT>print(c.seq, file=fout)<EOL><DEDENT><DEDENT>utils.close(fout)<EOL>", "docstring": "Convert a CAF file to fastq. Reads shorter than min_length are not output. If clipping information is in the CAF file (with a line Clipping QUAL ...) and trim=True, then trim the reads", "id": "f405:m1"}
{"signature": "def intersection(self, i):", "body": "if self.intersects(i):<EOL><INDENT>return Interval(max(self.start, i.start), min(self.end, i.end))<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "If intervals intersect, returns their intersection, otherwise returns None", "id": "f408:c1:m12"}
{"signature": "def union_fill_gap(self, i):", "body": "return Interval(min(self.start, i.start), max(self.end, i.end))<EOL>", "docstring": "Like union, but ignores whether the two intervals intersect or not", "id": "f408:c1:m11"}
{"signature": "def remove_contained_in_list(l):", "body": "i = <NUM_LIT:0><EOL>l.sort()<EOL>while i < len(l) - <NUM_LIT:1>:<EOL><INDENT>if l[i+<NUM_LIT:1>].contains(l[i]):<EOL><INDENT>l.pop(i)<EOL><DEDENT>elif l[i].contains(l[i+<NUM_LIT:1>]):<EOL><INDENT>l.pop(i+<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>", "docstring": "Sorts list in place, then removes any intervals that are completely\n       contained inside another interval", "id": "f408:m2"}
{"signature": "def intersection(l1, l2):", "body": "if len(l1) == <NUM_LIT:0> or len(l2) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>out = []<EOL>l2_pos = <NUM_LIT:0><EOL>for l in l1:<EOL><INDENT>while l2_pos < len(l2) and l2[l2_pos].end < l.start:<EOL><INDENT>l2_pos += <NUM_LIT:1><EOL><DEDENT>if l2_pos == len(l2):<EOL><INDENT>break<EOL><DEDENT>while l2_pos < len(l2) and l.intersects(l2[l2_pos]):<EOL><INDENT>out.append(l.intersection(l2[l2_pos]))<EOL>l2_pos += <NUM_LIT:1><EOL><DEDENT>l2_pos = max(<NUM_LIT:0>, l2_pos - <NUM_LIT:1>)<EOL><DEDENT>return out<EOL>", "docstring": "Returns intersection of two lists.  Assumes the lists are sorted by start positions", "id": "f408:m0"}
{"signature": "def length_sum_from_list(l):", "body": "return sum([len(x) for x in l])<EOL>", "docstring": "Returns total length of intervals from a list", "id": "f408:m3"}
{"signature": "def contains(self, i):", "body": "return self.start <= i.start and i.end <= self.end<EOL>", "docstring": "Returns true iff this interval contains the interval i", "id": "f408:c1:m9"}
{"signature": "def intersects(self, i):", "body": "return self.start <= i.end and i.start <= self.end<EOL>", "docstring": "Returns true iff this interval intersects the interval i", "id": "f408:c1:m8"}
{"signature": "def union(self, i):", "body": "if self.intersects(i) or self.end + <NUM_LIT:1> == i.start or i.end + <NUM_LIT:1> == self.start:<EOL><INDENT>return Interval(min(self.start, i.start), max(self.end, i.end))<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "If intervals intersect, returns their union, otherwise returns None", "id": "f408:c1:m10"}
{"signature": "@classmethod<EOL><INDENT>def to_value(cls, instance):<DEDENT>", "body": "if not isinstance(instance, OctaveUserClass) or not instance._attrs:<EOL><INDENT>return dict()<EOL><DEDENT>dtype = []<EOL>values = []<EOL>for attr in instance._attrs:<EOL><INDENT>dtype.append((str(attr), object))<EOL>values.append(getattr(instance, attr))<EOL><DEDENT>struct = np.array([tuple(values)], dtype)<EOL>return MatlabObject(struct, instance._name)<EOL>", "docstring": "Convert to a value to send to Octave.", "id": "f410:c7:m2"}
{"signature": "@classmethod<EOL><INDENT>def from_value(cls, value):<DEDENT>", "body": "instance = OctaveUserClass.__new__(cls)<EOL>instance._address = '<STR_LIT>' % (instance._name, id(instance))<EOL>instance._ref().push(instance._address, value)<EOL>return instance<EOL>", "docstring": "This is how an instance is created when we read a\n           MatlabObject from a MAT file.", "id": "f410:c7:m1"}
{"signature": "def _make_user_class(session, name):", "body": "attrs = session.eval('<STR_LIT>' % name, nout=<NUM_LIT:1>).ravel().tolist()<EOL>methods = session.eval('<STR_LIT>' % name, nout=<NUM_LIT:1>).ravel().tolist()<EOL>ref = weakref.ref(session)<EOL>doc = _DocDescriptor(ref, name)<EOL>values = dict(__doc__=doc, _name=name, _ref=ref, _attrs=attrs,<EOL>__module__='<STR_LIT>')<EOL>for method in methods:<EOL><INDENT>doc = _MethodDocDescriptor(ref, name, method)<EOL>cls_name = '<STR_LIT>' % (name, method)<EOL>method_values = dict(__doc__=doc)<EOL>method_cls = type(str(cls_name),<EOL>(OctaveUserClassMethod,), method_values)<EOL>values[method] = method_cls(ref, method, name)<EOL><DEDENT>for attr in attrs:<EOL><INDENT>values[attr] = OctaveUserClassAttr(ref, attr, attr)<EOL><DEDENT>return type(str(name), (OctaveUserClass,), values)<EOL>", "docstring": "Make an Octave class for a given class name", "id": "f410:m0"}
{"signature": "@classmethod<EOL><INDENT>def to_pointer(cls, instance):<DEDENT>", "body": "return OctavePtr(instance._ref, instance._name, instance._address)<EOL>", "docstring": "Get a pointer to the private object.", "id": "f410:c7:m3"}
{"signature": "def demo(delay=<NUM_LIT:1>, interactive=True):", "body": "script = \"\"\"<STR_LIT>\"\"\"<EOL>if not PY2:<EOL><INDENT>script = script.replace('<STR_LIT>', '<STR_LIT:input>')<EOL><DEDENT>for line in script.strip().split('<STR_LIT:\\n>'):<EOL><INDENT>line = line.strip()<EOL>if not '<STR_LIT>' in line:<EOL><INDENT>time.sleep(delay)<EOL>print(\"<STR_LIT>\".format(line))<EOL>time.sleep(delay)<EOL><DEDENT>if not interactive:<EOL><INDENT>if '<STR_LIT>' in line or '<STR_LIT>' in line or '<STR_LIT>' in line:<EOL><INDENT>line = '<STR_LIT>'<EOL><DEDENT><DEDENT>exec(line)<EOL><DEDENT>", "docstring": "Play a demo script showing most of the oct2py api features.\n\nParameters\n==========\ndelay : float\n    Time between each command in seconds.", "id": "f411:m0"}
{"signature": "def helper(self, base, keys, types):", "body": "for key, type_ in zip(keys, types):<EOL><INDENT>if not type(base[key]) == type_:<EOL><INDENT>try:<EOL><INDENT>assert type_(base[key]) == base[key], key<EOL><DEDENT>except ValueError:<EOL><INDENT>assert np.allclose(type_(base[key]), base[key])<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Perform type checking of the values\n\nParameters\n==========\nbase : dict\n    Sub-dictionary we are accessing.\nkeys : array-like\n    List of keys to test in base.\ntypes : array-like\n    List of expected return types for the keys.", "id": "f412:c0:m2"}
{"signature": "def __init__(self, shell):", "body": "super(OctaveMagics, self).__init__(shell)<EOL>self._oct = oct2py.octave<EOL>self._display = display<EOL>", "docstring": "Parameters\n----------\nshell : IPython shell", "id": "f418:c0:m0"}
{"signature": "def run(self):", "body": "octave = Oct2Py()<EOL>octave.push('<STR_LIT:name>', self.getName())<EOL>name = octave.pull('<STR_LIT:name>')<EOL>now = datetime.datetime.now()<EOL>print(\"<STR_LIT>\".format(self.getName(), name, now))<EOL>octave.exit()<EOL>try:<EOL><INDENT>assert self.getName() == name<EOL><DEDENT>except AssertionError:  <EOL><INDENT>raise Oct2PyError('<STR_LIT>')<EOL><DEDENT>return<EOL>", "docstring": "Create a unique instance of Octave and verify namespace uniqueness.\n\nRaises\n======\nOct2PyError\n    If the thread does not sucessfully demonstrate independence", "id": "f422:c0:m0"}
{"signature": "def _isobject(self, name, exist):", "body": "if exist in [<NUM_LIT:2>, <NUM_LIT:5>]:<EOL><INDENT>return False<EOL><DEDENT>cmd = '<STR_LIT>' % name<EOL>resp = self._engine.eval(cmd, silent=True).strip()<EOL>return resp == '<STR_LIT>'<EOL>", "docstring": "Test whether the name is an object.", "id": "f423:c0:m18"}
{"signature": "def _get_function_ptr(self, name):", "body": "func = _make_function_ptr_instance<EOL>self._function_ptrs.setdefault(name, func(self, name))<EOL>return self._function_ptrs[name]<EOL>", "docstring": "Get or create a function pointer of the given name.", "id": "f423:c0:m19"}
{"signature": "def pull(self, var, timeout=None, verbose=True):", "body": "if isinstance(var, (str, unicode)):<EOL><INDENT>var = [var]<EOL><DEDENT>outputs = []<EOL>for name in var:<EOL><INDENT>exist = self._exist(name)<EOL>if exist == <NUM_LIT:1>:<EOL><INDENT>outputs.append(self.feval('<STR_LIT>', '<STR_LIT>', name,<EOL>timeout=timeout, verbose=verbose))<EOL><DEDENT>else:<EOL><INDENT>outputs.append(self.get_pointer(name, timeout=timeout))<EOL><DEDENT><DEDENT>if len(outputs) == <NUM_LIT:1>:<EOL><INDENT>return outputs[<NUM_LIT:0>]<EOL><DEDENT>return outputs<EOL>", "docstring": "Retrieve a value or values from the Octave session.\n\nParameters\n----------\nvar : str or list\n    Name of the variable(s) to retrieve.\ntimeout : float, optional.\n    Time to wait for response from Octave (per line).\n**kwargs: Deprecated kwargs, ignored.\n\nReturns\n-------\nout : object\n    Object returned by Octave.\n\nRaises\n------\nOct2PyError\n    If the variable does not exist in the Octave session.\n\nExamples\n--------\n  >>> from oct2py import octave\n  >>> y = [1, 2]\n  >>> octave.push('y', y)\n  >>> octave.pull('y')\n  array([[ 1.,  2.]])\n  >>> octave.push(['x', 'y'], ['spam', [1, 2, 3, 4]])\n  >>> octave.pull(['x', 'y'])  # doctest: +SKIP\n  [u'spam', array([[1, 2, 3, 4]])]", "id": "f423:c0:m7"}
{"signature": "def get_pointer(self, name, timeout=None):", "body": "exist = self._exist(name)<EOL>isobject = self._isobject(name, exist)<EOL>if exist == <NUM_LIT:0>:<EOL><INDENT>raise Oct2PyError('<STR_LIT>' % name)<EOL><DEDENT>elif exist == <NUM_LIT:1>:<EOL><INDENT>return _make_variable_ptr_instance(self, name)<EOL><DEDENT>elif isobject:<EOL><INDENT>return self._get_user_class(name)<EOL><DEDENT>elif exist in [<NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:5>]:<EOL><INDENT>return self._get_function_ptr(name)<EOL><DEDENT>raise Oct2PyError('<STR_LIT>' % name)<EOL>", "docstring": "Get a pointer to a named object in the Octave workspace.\n\n        Parameters\n        ----------\n        name: str\n            The name of the object in the Octave workspace.\n        timemout: float, optional.\n            Time to wait for response from Octave (per line).\n\n        Examples\n        --------\n        >>> from oct2py import octave\n        >>> octave.eval('foo = [1, 2];')\n        >>> ptr = octave.get_pointer('foo')\n        >>> ptr.value\n        array([[ 1.,  2.]])\n        >>> ptr.address\n        'foo'\n        >>> # Can be passed as an argument\n        >>> octave.disp(ptr)  # doctest: +SKIP\n        1  2\n\n        >>> from oct2py import octave\n        >>> sin = octave.get_pointer('sin')  # equivalent to `octave.sin`\n        >>> sin.address\n        '@sin'\n        >>> x = octave.quad(sin, 0, octave.pi())\n        >>> x\n        2.0\n\n        Notes\n        -----\n        Pointers can be passed to `feval` or dynamic functions as function arguments.  A pointer passed as a nested value will be passed by value instead.\n\n        Raises\n        ------\n        Oct2PyError\n            If the variable does not exist in the Octave session or is of\n            unknown type.\n\n        Returns\n        -------\n        A variable, object, user class, or function pointer as appropriate.", "id": "f423:c0:m8"}
{"signature": "def _parse_error(self, err):", "body": "self.logger.debug(err)<EOL>stack = err.get('<STR_LIT>', [])<EOL>if not err['<STR_LIT:message>'].startswith('<STR_LIT>'):<EOL><INDENT>err['<STR_LIT:message>'] = '<STR_LIT>' + err['<STR_LIT:message>']<EOL><DEDENT>errmsg = '<STR_LIT>' % err['<STR_LIT:message>']<EOL>if not isinstance(stack, StructArray):<EOL><INDENT>return errmsg<EOL><DEDENT>errmsg += '<STR_LIT>'<EOL>for item in stack[:-<NUM_LIT:1>]:<EOL><INDENT>errmsg += '<STR_LIT>' % item<EOL>try:<EOL><INDENT>errmsg += '<STR_LIT>' % item<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return errmsg<EOL>", "docstring": "Create a traceback for an Octave evaluation error.", "id": "f423:c0:m14"}
{"signature": "def _feval(self, func_name, func_args=(), dname='<STR_LIT>', nout=<NUM_LIT:0>,<EOL>timeout=None, stream_handler=None, store_as='<STR_LIT>', plot_dir=None):", "body": "engine = self._engine<EOL>if engine is None:<EOL><INDENT>raise Oct2PyError('<STR_LIT>')<EOL><DEDENT>out_file = osp.join(self.temp_dir, '<STR_LIT>')<EOL>out_file = out_file.replace(osp.sep, '<STR_LIT:/>')<EOL>in_file = osp.join(self.temp_dir, '<STR_LIT>')<EOL>in_file = in_file.replace(osp.sep, '<STR_LIT:/>')<EOL>func_args = list(func_args)<EOL>ref_indices = []<EOL>for (i, value) in enumerate(func_args):<EOL><INDENT>if isinstance(value, OctavePtr):<EOL><INDENT>ref_indices.append(i + <NUM_LIT:1>)<EOL>func_args[i] = value.address<EOL><DEDENT><DEDENT>ref_indices = np.array(ref_indices)<EOL>req = dict(func_name=func_name, func_args=tuple(func_args),<EOL>dname=dname or '<STR_LIT>', nout=nout,<EOL>store_as=store_as or '<STR_LIT>',<EOL>ref_indices=ref_indices)<EOL>write_file(req, out_file, oned_as=self._oned_as,<EOL>convert_to_float=self.convert_to_float)<EOL>engine.stream_handler = stream_handler or self.logger.info<EOL>if timeout is None:<EOL><INDENT>timeout = self.timeout<EOL><DEDENT>try:<EOL><INDENT>engine.eval('<STR_LIT>' % (out_file, in_file),<EOL>timeout=timeout)<EOL><DEDENT>except KeyboardInterrupt as e:<EOL><INDENT>stream_handler(engine.repl.interrupt())<EOL>raise<EOL><DEDENT>except TIMEOUT:<EOL><INDENT>stream_handler(engine.repl.interrupt())<EOL>raise Oct2PyError('<STR_LIT>')<EOL><DEDENT>except EOF:<EOL><INDENT>stream_handler(engine.repl.child.before)<EOL>self.restart()<EOL>raise Oct2PyError('<STR_LIT>')<EOL><DEDENT>resp = read_file(in_file, self)<EOL>if resp['<STR_LIT>']:<EOL><INDENT>msg = self._parse_error(resp['<STR_LIT>'])<EOL>raise Oct2PyError(msg)<EOL><DEDENT>result = resp['<STR_LIT:result>'].ravel().tolist()<EOL>if isinstance(result, list) and len(result) == <NUM_LIT:1>:<EOL><INDENT>result = result[<NUM_LIT:0>]<EOL><DEDENT>if (isinstance(result, Cell) and<EOL>result.size == <NUM_LIT:1> and<EOL>isinstance(result[<NUM_LIT:0>], string_types) and<EOL>result[<NUM_LIT:0>] == '<STR_LIT>'):<EOL><INDENT>result = None<EOL><DEDENT>if plot_dir:<EOL><INDENT>self._engine.make_figures(plot_dir)<EOL><DEDENT>return result<EOL>", "docstring": "Run the given function with the given args.", "id": "f423:c0:m13"}
{"signature": "def _get_user_class(self, name):", "body": "self._user_classes.setdefault(name, _make_user_class(self, name))<EOL>return self._user_classes[name]<EOL>", "docstring": "Get or create a user class of the given type.", "id": "f423:c0:m20"}
{"signature": "def eval(self, cmds, verbose=True, timeout=None, stream_handler=None,<EOL>temp_dir=None, plot_dir=None, plot_name='<STR_LIT>', plot_format='<STR_LIT>',<EOL>plot_width=None, plot_height=None, plot_res=None,<EOL>nout=<NUM_LIT:0>, **kwargs):", "body": "if isinstance(cmds, (str, unicode)):<EOL><INDENT>cmds = [cmds]<EOL><DEDENT>prev_temp_dir = self.temp_dir<EOL>self.temp_dir = temp_dir or self.temp_dir<EOL>prev_log_level = self.logger.level<EOL>if kwargs.get('<STR_LIT>') is False:<EOL><INDENT>self.logger.setLevel(logging.WARN)<EOL><DEDENT>for name in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if name not in kwargs:<EOL><INDENT>continue<EOL><DEDENT>msg = '<STR_LIT>'<EOL>warnings.warn(msg % name, stacklevel=<NUM_LIT:2>)<EOL><DEDENT>return_both = kwargs.pop('<STR_LIT>', False)<EOL>lines = []<EOL>if return_both and not stream_handler:<EOL><INDENT>stream_handler = lines.append<EOL><DEDENT>ans = None<EOL>for cmd in cmds:<EOL><INDENT>resp = self.feval('<STR_LIT>', '<STR_LIT>', cmd,<EOL>nout=nout, timeout=timeout,<EOL>stream_handler=stream_handler,<EOL>verbose=verbose, plot_dir=plot_dir,<EOL>plot_name=plot_name, plot_format=plot_format,<EOL>plot_width=plot_width, plot_height=plot_height,<EOL>plot_res=plot_res)<EOL>if resp is not None:<EOL><INDENT>ans = resp<EOL><DEDENT><DEDENT>self.temp_dir = prev_temp_dir<EOL>self.logger.setLevel(prev_log_level)<EOL>if return_both:<EOL><INDENT>return '<STR_LIT:\\n>'.join(lines), ans<EOL><DEDENT>return ans<EOL>", "docstring": "Evaluate an Octave command or commands.\n\nParameters\n----------\ncmds : str or list\n    Commands(s) to pass to Octave.\nverbose : bool, optional\n     Log Octave output at INFO level.  If False, log at DEBUG level.\nstream_handler: callable, optional\n    A function that is called for each line of output from the\n    evaluation.\ntimeout : float, optional\n    Time to wait for response from Octave (per line).  If not given,\n    the instance `timeout` is used.\nnout : int, optional.\n    The desired number of returned values, defaults to 0.  If nout\n    is 0, the `ans` will be returned as the return value.\ntemp_dir: str, optional\n    If specified, the session's MAT files will be created in the\n    directory, otherwise a the instance `temp_dir` is used.\n    a shared memory (tmpfs) path.\nplot_dir: str, optional\n    If specificed, save the session's plot figures to the plot\n    directory instead of displaying the plot window.\nplot_name : str, optional\n    Saved plots will start with `plot_name` and\n    end with \"_%%.xxx' where %% is the plot number and\n    xxx is the `plot_format`.\nplot_format: str, optional\n    The format in which to save the plot (PNG by default).\nplot_width: int, optional\n    The plot with in pixels.\nplot_height: int, optional\n    The plot height in pixels.\nplot_res: int, optional\n    The plot resolution in pixels per inch.\n**kwargs Deprectated kwargs.\n\nExamples\n--------\n>>> from oct2py import octave\n>>> octave.eval('disp(\"hello\")') # doctest: +SKIP\nhello\n>>> x = octave.eval('round(quad(@sin, 0, pi/2));')\n>>> x\n1.0\n\n>>> a = octave.eval('disp(\"hello\");1;')  # doctest: +SKIP\nhello\n>>> a = octave.eval('disp(\"hello\");1;', verbose=False)\n>>> a\n1.0\n\n>>> from oct2py import octave\n>>> lines = []\n>>> octave.eval('for i = 1:3; disp(i);end', \\\n                stream_handler=lines.append)\n>>> lines  # doctest: +SKIP\n[' 1', ' 2', ' 3']\n\nReturns\n-------\nout : object\n    Octave \"ans\" variable, or None.\n\nNotes\n-----\nThe deprecated `log` kwarg will temporarily set the `logger` level to\n`WARN`.  Using the `logger` settings directly is preferred.\nThe deprecated `return_both` kwarg will still work, but the preferred\nmethod is to use the `stream_handler`.  If `stream_handler` is given,\nthe `return_both` kwarg will be honored but will give an empty string\nas the reponse.\n\nRaises\n------\nOct2PyError\n    If the command(s) fail.", "id": "f423:c0:m11"}
{"signature": "def feval(self, func_path, *func_args, **kwargs):", "body": "if not self._engine:<EOL><INDENT>raise Oct2PyError('<STR_LIT>')<EOL><DEDENT>nout = kwargs.get('<STR_LIT>', None)<EOL>if nout is None:<EOL><INDENT>nout = <NUM_LIT:1><EOL><DEDENT>plot_dir = kwargs.get('<STR_LIT>')<EOL>settings = dict(backend='<STR_LIT>' if plot_dir else self.backend,<EOL>format=kwargs.get('<STR_LIT>'),<EOL>name=kwargs.get('<STR_LIT>'),<EOL>width=kwargs.get('<STR_LIT>'),<EOL>height=kwargs.get('<STR_LIT>'),<EOL>resolution=kwargs.get('<STR_LIT>'))<EOL>self._engine.plot_settings = settings<EOL>dname = osp.dirname(func_path)<EOL>fname = osp.basename(func_path)<EOL>func_name, ext = osp.splitext(fname)<EOL>if ext and not ext == '<STR_LIT>':<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if func_name == '<STR_LIT>':<EOL><INDENT>raise Oct2PyError('<STR_LIT>' +<EOL>'<STR_LIT>')<EOL><DEDENT>stream_handler = kwargs.get('<STR_LIT>')<EOL>verbose = kwargs.get('<STR_LIT>', True)<EOL>store_as = kwargs.get('<STR_LIT>', '<STR_LIT>')<EOL>timeout = kwargs.get('<STR_LIT>', self.timeout)<EOL>if not stream_handler:<EOL><INDENT>stream_handler = self.logger.info if verbose else self.logger.debug<EOL><DEDENT>return self._feval(func_name, func_args, dname=dname, nout=nout,<EOL>timeout=timeout, stream_handler=stream_handler,<EOL>store_as=store_as, plot_dir=plot_dir)<EOL>", "docstring": "Run a function in Octave and return the result.\n\n        Parameters\n        ----------\n        func_path: str\n            Name of function to run or a path to an m-file.\n        func_args: object, optional\n            Args to send to the function.\n        nout: int, optional\n            Desired number of return arguments, defaults to 1.\n        store_as: str, optional\n            If given, saves the result to the given Octave variable name\n            instead of returning it.\n        verbose : bool, optional\n            Log Octave output at INFO level.  If False, log at DEBUG level.\n        stream_handler: callable, optional\n            A function that is called for each line of output from the\n            evaluation.\n        timeout: float, optional\n            The timeout in seconds for the call.\n        plot_dir: str, optional\n            If specificed, save the session's plot figures to the plot\n            directory instead of displaying the plot window.\n        plot_name : str, optional\n            Saved plots will start with `plot_name` and\n            end with \"_%%.xxx' where %% is the plot number and\n            xxx is the `plot_format`.\n        plot_format: str, optional\n            The format in which to save the plot.\n        plot_width: int, optional\n            The plot with in pixels.\n        plot_height: int, optional\n            The plot height in pixels.\n\n        Notes\n        -----\n        The function arguments passed follow Octave calling convention, not\n        Python. That is, all values must be passed as a comma separated list,\n        not using `x=foo` assignment.\n\n        Examples\n        --------\n        >>> from oct2py import octave\n        >>> cell = octave.feval('cell', 10, 10, 10)\n        >>> cell.shape\n        (10, 10, 10)\n\n        >>> from oct2py import octave\n        >>> x = octave.feval('linspace', 0, octave.pi() / 2)\n        >>> x.shape\n        (1, 100)\n\n        >>> from oct2py import octave\n        >>> x = octave.feval('svd', octave.hilb(3))\n        >>> x\n        array([[ 1.40831893],\n               [ 0.12232707],\n               [ 0.00268734]])\n        >>> # specify three return values\n        >>> (u, v, d) = octave.feval('svd', octave.hilb(3), nout=3)\n        >>> u.shape\n        (3, 3)\n\n        Returns\n        -------\n        The Python value(s) returned by the Octave function call.", "id": "f423:c0:m10"}
{"signature": "def _exist(self, name):", "body": "cmd = '<STR_LIT>' % name<EOL>resp = self._engine.eval(cmd, silent=True).strip()<EOL>exist = int(resp.split()[-<NUM_LIT:1>])<EOL>if exist == <NUM_LIT:0>:<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise Oct2PyError(msg % name)<EOL><DEDENT>return exist<EOL>", "docstring": "Test whether a name exists and return the name code.\n\n        Raises an error when the name does not exist.", "id": "f423:c0:m17"}
{"signature": "def get_log(name=None):", "body": "if name is None:<EOL><INDENT>name = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>name = '<STR_LIT>' + name<EOL><DEDENT>log = logging.getLogger(name)<EOL>log.setLevel(logging.INFO)<EOL>return log<EOL>", "docstring": "Return a console logger.\n\n    Output may be sent to the logger using the `debug`, `info`, `warning`,\n    `error` and `critical` methods.\n\n    Parameters\n    ----------\n    name : str\n        Name of the log.\n\n    References\n    ----------\n    .. [1] Logging facility for Python,\n           http://docs.python.org/library/logging.html", "id": "f424:m0"}
{"signature": "def large_array_put(self):", "body": "self.octave.push('<STR_LIT:x>', self.array)<EOL>", "docstring": "Create a large matrix and load it into the octave session.", "id": "f425:c0:m2"}
{"signature": "def raw_speed(self):", "body": "self.octave.eval(\"<STR_LIT>\")<EOL>", "docstring": "Run a fast Octave command and see how long it takes.", "id": "f425:c0:m1"}
{"signature": "def __new__(cls, value, session=None):", "body": "value = np.asarray(value)<EOL>if (value.shape[value.ndim - <NUM_LIT:1>] == <NUM_LIT:1>):<EOL><INDENT>value = value.squeeze(axis=value.ndim - <NUM_LIT:1>)<EOL><DEDENT>value = np.atleast_1d(value)<EOL>if not session:<EOL><INDENT>return value.view(cls)<EOL><DEDENT>obj = np.empty(value.size, dtype=value.dtype).view(cls)<EOL>for (i, item) in enumerate(value.ravel()):<EOL><INDENT>for name in value.dtype.names:<EOL><INDENT>obj[i][name] = _extract(item[name], session)<EOL><DEDENT><DEDENT>return obj.reshape(value.shape)<EOL>", "docstring": "Create a struct array from a value and optional Octave session.", "id": "f426:c1:m0"}
{"signature": "def __getitem__(self, item):", "body": "item = np.recarray.__getitem__(self, item)<EOL>if isinstance(item, np.ndarray) and item.dtype.kind == '<STR_LIT:O>':<EOL><INDENT>return Cell(item)<EOL><DEDENT>return item<EOL>", "docstring": "Return object arrays as cells and all other values unchanged.", "id": "f426:c1:m3"}
{"signature": "def __new__(cls, value, session=None):", "body": "value = np.asarray(value, dtype=object)<EOL>if (value.shape[value.ndim - <NUM_LIT:1>] == <NUM_LIT:1>):<EOL><INDENT>value = value.squeeze(axis=value.ndim - <NUM_LIT:1>)<EOL><DEDENT>value = np.atleast_1d(value)<EOL>if not session:<EOL><INDENT>return value.view(cls)<EOL><DEDENT>obj = np.empty(value.size, dtype=object).view(cls)<EOL>for (i, item) in enumerate(value.ravel()):<EOL><INDENT>obj[i] = _extract(item, session)<EOL><DEDENT>return obj.reshape(value.shape)<EOL>", "docstring": "Create a cell array from a value and optional Octave session.", "id": "f426:c2:m0"}
{"signature": "@register.inclusion_tag(\"<STR_LIT>\", takes_context=True)<EOL>def stored_messages_list(context, num_elements=<NUM_LIT:10>):", "body": "if \"<STR_LIT:user>\" in context:<EOL><INDENT>user = context[\"<STR_LIT:user>\"]<EOL>if user.is_authenticated():<EOL><INDENT>qs = Inbox.objects.select_related(\"<STR_LIT:message>\").filter(user=user)<EOL>return {<EOL>\"<STR_LIT>\": qs[:num_elements],<EOL>\"<STR_LIT:count>\": qs.count(),<EOL>}<EOL><DEDENT><DEDENT>", "docstring": "Renders a list of unread stored messages for the current user", "id": "f444:m0"}
{"signature": "@login_required<EOL>@api_view(['<STR_LIT:POST>'])<EOL>def mark_all_read(request):", "body": "from .settings import stored_messages_settings<EOL>backend = stored_messages_settings.STORAGE_BACKEND()<EOL>backend.inbox_purge(request.user)<EOL>return Response({\"<STR_LIT:message>\": \"<STR_LIT>\"})<EOL>", "docstring": "Mark all messages as read (i.e. delete from inbox) for current logged in user", "id": "f447:m0"}
{"signature": "def _prepare_messages(self, messages):", "body": "for message in messages:<EOL><INDENT>if not self.backend.can_handle(message):<EOL><INDENT>message._prepare()<EOL><DEDENT><DEDENT>", "docstring": "Like the base class method, prepares a list of messages for storage\nbut avoid to do this for `models.Message` instances.", "id": "f450:c0:m4"}
{"signature": "def _store(self, messages, response, *args, **kwargs):", "body": "contrib_messages = []<EOL>if self.user.is_authenticated():<EOL><INDENT>if not messages:<EOL><INDENT>self.backend.inbox_purge(self.user)<EOL><DEDENT>else:<EOL><INDENT>for m in messages:<EOL><INDENT>try:<EOL><INDENT>self.backend.inbox_store([self.user], m)<EOL><DEDENT>except MessageTypeNotSupported:<EOL><INDENT>contrib_messages.append(m)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>super(StorageMixin, self)._store(contrib_messages, response, *args, **kwargs)<EOL>", "docstring": "persistent messages are already in the database inside the 'archive',\nso we can say they're already \"stored\".\nHere we put them in the inbox, or remove from the inbox in case the\nmessages were iterated.\n\nmessages contains only new msgs if self.used==True\nelse contains both new and unread messages", "id": "f450:c0:m3"}
{"signature": "def _get(self, *args, **kwargs):", "body": "messages, all_retrieved = super(StorageMixin, self)._get(*args, **kwargs)<EOL>if self.user.is_authenticated():<EOL><INDENT>inbox_messages = self.backend.inbox_list(self.user)<EOL><DEDENT>else:<EOL><INDENT>inbox_messages = []<EOL><DEDENT>return messages + inbox_messages, all_retrieved<EOL>", "docstring": "Retrieve unread messages for current user, both from the inbox and\nfrom other storages", "id": "f450:c0:m1"}
{"signature": "def import_from_string(val, setting_name):", "body": "try:<EOL><INDENT>parts = val.split('<STR_LIT:.>')<EOL>module_path, class_name = '<STR_LIT:.>'.join(parts[:-<NUM_LIT:1>]), parts[-<NUM_LIT:1>]<EOL>module = importlib.import_module(module_path)<EOL>return getattr(module, class_name)<EOL><DEDENT>except ImportError as e:<EOL><INDENT>msg = \"<STR_LIT>\" % (val, setting_name,<EOL>e.__class__.__name__, e)<EOL>raise ImportError(msg)<EOL><DEDENT>", "docstring": "Attempt to import a class from a string representation.", "id": "f453:m1"}
{"signature": "def perform_import(val, setting_name):", "body": "if isinstance(val, six.string_types):<EOL><INDENT>return import_from_string(val, setting_name)<EOL><DEDENT>elif isinstance(val, (list, tuple)):<EOL><INDENT>return [import_from_string(item, setting_name) for item in val]<EOL><DEDENT>return val<EOL>", "docstring": "If the given setting is a string import notation,\nthen perform the necessary import or imports.", "id": "f453:m0"}
{"signature": "def _fromJSON(self, json_msg):", "body": "return Message(**json.loads(force_text(json_msg)))<EOL>", "docstring": "Return a Message instance built from data contained in a JSON string", "id": "f456:c0:m3"}
{"signature": "def create_message(self, level, msg_text, extra_tags, date=None):", "body": "raise NotImplementedError()<EOL>", "docstring": "Create and return a `Message` instance.\nInstance types depend on backends implementation.\n\nParams:\n    `level`: message level (see django.contrib.messages)\n    `msg_text`: what you think it is\n    `extra_tags`: see django.contrib.messages\n    `date`: a DateTime (optional)\n\nReturn:\n    `Message` instance", "id": "f457:c0:m0"}
{"signature": "def _flush(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Clear all backend data.\nWarning: heavily destructive! Here for convenience, not used by the API anyway.\n\nParams:\n    None\n\nReturn:\n    None", "id": "f457:c0:m10"}
{"signature": "def expired_messages_cleanup(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Remove messages that have been expired.\n\nParams:\n    None\n\nReturn:\n   None", "id": "f457:c0:m9"}
{"signature": "def inbox_list(self, user):", "body": "raise NotImplementedError()<EOL>", "docstring": "Retrieve all the messages in `user`'s Inbox.\n\nParams:\n    `user`: Django User instance\n\nReturn:\n    An iterable containing `Message` instances", "id": "f457:c0:m1"}
{"signature": "def mark_read(user, message):", "body": "BackendClass = stored_messages_settings.STORAGE_BACKEND<EOL>backend = BackendClass()<EOL>backend.inbox_delete(user, message)<EOL>", "docstring": "Mark message instance as read for user.\nReturns True if the message was `unread` and thus actually marked as `read` or False in case\nit is already `read` or it does not exist at all.\n\n:param user: user instance for the recipient\n:param message: a Message instance to mark as read", "id": "f463:m2"}
{"signature": "def get_version(package):", "body": "init_py = open(os.path.join(package, '<STR_LIT>')).read()<EOL>return re.search(\"<STR_LIT>\", init_py).group(<NUM_LIT:1>)<EOL>", "docstring": "Return package version as listed in `__version__` in `init.py`.", "id": "f466:m0"}
{"signature": "def can_send(self, user, notice_type):", "body": "from notification.models import NoticeSetting<EOL>return NoticeSetting.for_user(user, notice_type, self.medium_id).send<EOL>", "docstring": "Determines whether this backend is allowed to send a notification to\nthe given user and notice_type.", "id": "f480:c0:m1"}
{"signature": "def deliver(self, recipient, sender, notice_type, extra_context):", "body": "raise NotImplementedError()<EOL>", "docstring": "Deliver a notification to the given recipient.", "id": "f480:c0:m2"}
{"signature": "def i_am_locking(self):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Return True if this object is locking the file.", "id": "f484:c8:m4"}
{"signature": "def acquire(self, timeout=None):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Acquire the lock.\n\n* If timeout is omitted (or None), wait forever trying to lock the\n  file.\n\n* If timeout > 0, try to acquire the lock for that many seconds.  If\n  the lock period expires and the file is still locked, raise\n  LockTimeout.\n\n* If timeout <= 0, raise AlreadyLocked immediately if the file is\n  already locked.", "id": "f484:c8:m1"}
{"signature": "def __exit__(self, *_exc):", "body": "self.release()<EOL>", "docstring": "Context manager support.", "id": "f484:c8:m7"}
{"signature": "def break_lock(self):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Remove a lock.  Useful if a locking thread failed to unlock.", "id": "f484:c8:m5"}
{"signature": "def is_locked(self):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Tell whether or not the file is locked.", "id": "f484:c8:m3"}
{"signature": "def send_now(users, label, extra_context=None, sender=None):", "body": "sent = False<EOL>if extra_context is None:<EOL><INDENT>extra_context = {}<EOL><DEDENT>notice_type = NoticeType.objects.get(label=label)<EOL>current_language = get_language()<EOL>for user in users:<EOL><INDENT>try:<EOL><INDENT>language = get_notification_language(user)<EOL><DEDENT>except LanguageStoreNotAvailable:<EOL><INDENT>language = None<EOL><DEDENT>if language is not None:<EOL><INDENT>activate(language)<EOL><DEDENT>for backend in NOTIFICATION_BACKENDS.values():<EOL><INDENT>if backend.can_send(user, notice_type):<EOL><INDENT>backend.deliver(user, sender, notice_type, extra_context)<EOL>sent = True<EOL><DEDENT><DEDENT><DEDENT>activate(current_language)<EOL>return sent<EOL>", "docstring": "Creates a new notice.\n\nThis is intended to be how other apps create new notices.\n\nnotification.send(user, \"friends_invite_sent\", {\n    \"spam\": \"eggs\",\n    \"foo\": \"bar\",\n)", "id": "f485:m2"}
{"signature": "def divrank_scipy(G, alpha=<NUM_LIT>, d=<NUM_LIT>, personalization=None,<EOL>max_iter=<NUM_LIT:100>, tol=<NUM_LIT>, nstart=None, weight='<STR_LIT>',<EOL>dangling=None):", "body": "import scipy.sparse<EOL>N = len(G)<EOL>if N == <NUM_LIT:0>:<EOL><INDENT>return {}<EOL><DEDENT>nodelist = G.nodes()<EOL>M = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight,<EOL>dtype=float)<EOL>S = scipy.array(M.sum(axis=<NUM_LIT:1>)).flatten()<EOL>S[S != <NUM_LIT:0>] = <NUM_LIT:1.0> / S[S != <NUM_LIT:0>]<EOL>Q = scipy.sparse.spdiags(S.T, <NUM_LIT:0>, *M.shape, format='<STR_LIT>')<EOL>M = Q * M<EOL>M = scipy.sparse.lil_matrix(M)<EOL>M.setdiag(<NUM_LIT:0.0>)<EOL>M = alpha * M<EOL>M.setdiag(<NUM_LIT:1.0> - alpha)<EOL>x = scipy.repeat(<NUM_LIT:1.0> / N, N)<EOL>if personalization is None:<EOL><INDENT>p = scipy.repeat(<NUM_LIT:1.0> / N, N)<EOL><DEDENT>else:<EOL><INDENT>missing = set(nodelist) - set(personalization)<EOL>if missing:<EOL><INDENT>raise NetworkXError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % missing)<EOL><DEDENT>p = scipy.array([personalization[n] for n in nodelist],<EOL>dtype=float)<EOL>p = p / p.sum()<EOL><DEDENT>if dangling is None:<EOL><INDENT>dangling_weights = p<EOL><DEDENT>else:<EOL><INDENT>missing = set(nodelist) - set(dangling)<EOL>if missing:<EOL><INDENT>raise NetworkXError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % missing)<EOL><DEDENT>dangling_weights = scipy.array([dangling[n] for n in nodelist],<EOL>dtype=float)<EOL>dangling_weights /= dangling_weights.sum()<EOL><DEDENT>is_dangling = scipy.where(S == <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>for _ in range(max_iter):<EOL><INDENT>xlast = x<EOL>D_t =  M * x<EOL>x = (<EOL>d * (x / D_t * M * x + sum(x[is_dangling]) * dangling_weights)<EOL>+ (<NUM_LIT:1.0> - d) * p<EOL>)<EOL>err = scipy.absolute(x - xlast).sum()<EOL>if err < N * tol:<EOL><INDENT>return dict(list(zip(nodelist, list(map(float, x)))))<EOL><DEDENT><DEDENT>raise NetworkXError('<STR_LIT>'<EOL>'<STR_LIT>' % max_iter)<EOL>", "docstring": "Returns the DivRank (Diverse Rank) of the nodes in the graph.\nThis code is based on networkx.pagerank_scipy", "id": "f489:m1"}
{"signature": "@not_implemented_for('<STR_LIT>')<EOL>def divrank(G, alpha=<NUM_LIT>, d=<NUM_LIT>, personalization=None,<EOL>max_iter=<NUM_LIT:100>, tol=<NUM_LIT>, nstart=None, weight='<STR_LIT>',<EOL>dangling=None):", "body": "if len(G) == <NUM_LIT:0>:<EOL><INDENT>return {}<EOL><DEDENT>if not G.is_directed():<EOL><INDENT>D = G.to_directed()<EOL><DEDENT>else:<EOL><INDENT>D = G<EOL><DEDENT>W = nx.stochastic_graph(D, weight=weight)<EOL>N = W.number_of_nodes()<EOL>for n in W.nodes_iter():<EOL><INDENT>for n_ in W.nodes_iter():<EOL><INDENT>if n != n_ :<EOL><INDENT>if n_ in W[n]:<EOL><INDENT>W[n][n_][weight] *= alpha<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if n_ not in W[n]:<EOL><INDENT>W.add_edge(n, n_)<EOL><DEDENT>W[n][n_][weight] = <NUM_LIT:1.0> - alpha<EOL><DEDENT><DEDENT><DEDENT>if nstart is None:<EOL><INDENT>x = dict.fromkeys(W, <NUM_LIT:1.0> / N)<EOL><DEDENT>else:<EOL><INDENT>s = float(sum(nstart.values()))<EOL>x = dict((k, v / s) for k, v in list(nstart.items()))<EOL><DEDENT>if personalization is None:<EOL><INDENT>p = dict.fromkeys(W, <NUM_LIT:1.0> / N)<EOL><DEDENT>else:<EOL><INDENT>missing = set(G) - set(personalization)<EOL>if missing:<EOL><INDENT>raise NetworkXError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % missing)<EOL><DEDENT>s = float(sum(personalization.values()))<EOL>p = dict((k, v / s) for k, v in list(personalization.items()))<EOL><DEDENT>if dangling is None:<EOL><INDENT>dangling_weights = p<EOL><DEDENT>else:<EOL><INDENT>missing = set(G) - set(dangling)<EOL>if missing:<EOL><INDENT>raise NetworkXError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % missing)<EOL><DEDENT>s = float(sum(dangling.values()))<EOL>dangling_weights = dict((k, v/s) for k, v in list(dangling.items()))<EOL><DEDENT>dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == <NUM_LIT:0.0>]<EOL>for _ in range(max_iter):<EOL><INDENT>xlast = x<EOL>x = dict.fromkeys(list(xlast.keys()), <NUM_LIT:0>)<EOL>danglesum = d * sum(xlast[n] for n in dangling_nodes)<EOL>for n in x:<EOL><INDENT>D_t = sum(W[n][nbr][weight] * xlast[nbr] for nbr in W[n])<EOL>for nbr in W[n]:<EOL><INDENT>x[nbr] += (<EOL>d * (W[n][nbr][weight] * xlast[nbr] / D_t) * xlast[n]<EOL>)<EOL><DEDENT>x[n] += danglesum * dangling_weights[n] + (<NUM_LIT:1.0> - d) * p[n]<EOL><DEDENT>err = sum([abs(x[n] - xlast[n]) for n in x])<EOL>if err < N*tol:<EOL><INDENT>return x<EOL><DEDENT><DEDENT>raise NetworkXError('<STR_LIT>'<EOL>'<STR_LIT>' % max_iter)<EOL>", "docstring": "Returns the DivRank (Diverse Rank) of the nodes in the graph.\nThis code is based on networkx.pagerank.\n\nArgs: (diff from pagerank)\n  alpha: controls strength of self-link [0.0-1.0]\n  d: the damping factor\n\nReference:\n  Qiaozhu Mei and Jian Guo and Dragomir Radev,\n  DivRank: the Interplay of Prestige and Diversity in Information Networks,\n  http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.174.7982", "id": "f489:m0"}
{"signature": "def lexrank(sentences, continuous=False, sim_threshold=<NUM_LIT:0.1>, alpha=<NUM_LIT>,<EOL>use_divrank=False, divrank_alpha=<NUM_LIT>):", "body": "<EOL>ranker_params = {'<STR_LIT>': <NUM_LIT:1000>}<EOL>if use_divrank:<EOL><INDENT>ranker = divrank_scipy<EOL>ranker_params['<STR_LIT>'] = divrank_alpha<EOL>ranker_params['<STR_LIT:d>'] = alpha<EOL><DEDENT>else:<EOL><INDENT>ranker = networkx.pagerank_scipy<EOL>ranker_params['<STR_LIT>'] = alpha<EOL><DEDENT>graph = networkx.DiGraph()<EOL>sent_tf_list = []<EOL>for sent in sentences:<EOL><INDENT>words = tools.word_segmenter_ja(sent)<EOL>tf = collections.Counter(words)<EOL>sent_tf_list.append(tf)<EOL><DEDENT>sent_vectorizer = DictVectorizer(sparse=True)<EOL>sent_vecs = sent_vectorizer.fit_transform(sent_tf_list)<EOL>sim_mat = <NUM_LIT:1> - pairwise_distances(sent_vecs, sent_vecs, metric='<STR_LIT>')<EOL>if continuous:<EOL><INDENT>linked_rows, linked_cols = numpy.where(sim_mat > <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>linked_rows, linked_cols = numpy.where(sim_mat >= sim_threshold)<EOL><DEDENT>graph.add_nodes_from(list(range(sent_vecs.shape[<NUM_LIT:0>])))<EOL>for i, j in zip(linked_rows, linked_cols):<EOL><INDENT>if i == j:<EOL><INDENT>continue<EOL><DEDENT>weight = sim_mat[i,j] if continuous else <NUM_LIT:1.0><EOL>graph.add_edge(i, j, {'<STR_LIT>': weight})<EOL><DEDENT>scores = ranker(graph, **ranker_params)<EOL>return scores, sim_mat<EOL>", "docstring": "compute centrality score of sentences.\n\nArgs:\n  sentences: [u'\u3053\u3093\u306b\u3061\u306f\uff0e', u'\u79c1\u306e\u540d\u524d\u306f\u98ef\u6cbc\u3067\u3059\uff0e', ... ]\n  continuous: if True, apply continuous LexRank. (see reference)\n  sim_threshold: if continuous is False and smilarity is greater or\n    equal to sim_threshold, link the sentences.\n  alpha: the damping factor of PageRank and DivRank\n  divrank: if True, apply DivRank instead of PageRank\n  divrank_alpha: strength of self-link [0.0-1.0]\n    (it's not the damping factor, see divrank.py)\n\nReturns: tuple\n  (\n    {\n      # sentence index -> score\n      0: 0.003,\n      1: 0.002,\n      ...\n    },\n    similarity_matrix\n  )\n\nReference:\n  G\u00fcnes Erkan and Dragomir R. Radev.\n  LexRank: graph-based lexical centrality as salience in text\n  summarization. (section 3)\n  http://www.cs.cmu.edu/afs/cs/project/jair/pub/volume22/erkan04a-html/erkan04a.html", "id": "f492:m0"}
{"signature": "def __init__(self, i2c, device_address, *, debug=False):", "body": "while not i2c.try_lock():<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>i2c.writeto(device_address, b'<STR_LIT>')<EOL><DEDENT>except OSError:<EOL><INDENT>try:<EOL><INDENT>result = bytearray(<NUM_LIT:1>)<EOL>i2c.readfrom_into(device_address, result)<EOL><DEDENT>except OSError:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % device_address)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>i2c.unlock()<EOL><DEDENT>self.i2c = i2c<EOL>self.device_address = device_address<EOL>self._debug = debug<EOL>", "docstring": "Try to read a byte from an address,\nif you get an OSError it means the device is not there", "id": "f501:c0:m0"}
{"signature": "def write_then_readinto(self, out_buffer, in_buffer, *,<EOL>out_start=<NUM_LIT:0>, out_end=None, in_start=<NUM_LIT:0>, in_end=None, stop=True):", "body": "if out_end is None:<EOL><INDENT>out_end = len(out_buffer)<EOL><DEDENT>if in_end is None:<EOL><INDENT>in_end = len(in_buffer)<EOL><DEDENT>if hasattr(self.i2c, '<STR_LIT>'):<EOL><INDENT>if self._debug:<EOL><INDENT>print(\"<STR_LIT>\",<EOL>[hex(i) for i in out_buffer[out_start:out_end]])<EOL><DEDENT>self.i2c.writeto_then_readfrom(self.device_address, out_buffer, in_buffer,<EOL>out_start=out_start, out_end=out_end,<EOL>in_start=in_start, in_end=in_end, stop=stop)<EOL>if self._debug:<EOL><INDENT>print(\"<STR_LIT>\",<EOL>[hex(i) for i in in_buffer[in_start:in_end]])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.write(out_buffer, start=out_start, end=out_end, stop=stop)<EOL>if self._debug:<EOL><INDENT>print(\"<STR_LIT>\",<EOL>[hex(i) for i in out_buffer[out_start:out_end]])<EOL><DEDENT>self.readinto(in_buffer, start=in_start, end=in_end)<EOL>if self._debug:<EOL><INDENT>print(\"<STR_LIT>\",<EOL>[hex(i) for i in in_buffer[in_start:in_end]])<EOL><DEDENT><DEDENT>", "docstring": "Write the bytes from ``out_buffer`` to the device, then immediately\nreads into ``in_buffer`` from the device. The number of bytes read\nwill be the length of ``in_buffer``.\nTransmits a stop bit after the write, if ``stop`` is set.\n\nIf ``out_start`` or ``out_end`` is provided, then the output buffer\nwill be sliced as if ``out_buffer[out_start:out_end]``. This will\nnot cause an allocation like ``buffer[out_start:out_end]`` will so\nit saves memory.\n\nIf ``in_start`` or ``in_end`` is provided, then the input buffer\nwill be sliced as if ``in_buffer[in_start:in_end]``. This will not\ncause an allocation like ``in_buffer[in_start:in_end]`` will so\nit saves memory.\n\n:param bytearray out_buffer: buffer containing the bytes to write\n:param bytearray in_buffer: buffer containing the bytes to read into\n:param int out_start: Index to start writing from\n:param int out_end: Index to read up to but not include\n:param int in_start: Index to start writing at\n:param int in_end: Index to write up to but not include\n:param bool stop: If true, output an I2C stop condition after the buffer is written", "id": "f501:c0:m3"}
{"signature": "def __iter__(self):", "body": "for id_ in self._items:<EOL><INDENT>yield self[id_]<EOL><DEDENT>", "docstring": "allows you to iterate and use for-loops\n\n        The objects in the iterator have the order in which they were appended.", "id": "f506:c0:m5"}
{"signature": "def __bool__(self):", "body": "return bool(self._items)<EOL>", "docstring": ":return: whether there is anything in the collection.\n        :rtype: bool", "id": "f506:c0:m4"}
{"signature": "def __init__(self):", "body": "self._items = OrderedDict()<EOL>", "docstring": "Create a new :class:`IdCollection` with no arguments.\n\n        You can add objects later using the method :meth:`append`.", "id": "f506:c0:m0"}
{"signature": "def __getitem__(self, id_):", "body": "return self._items[id_]<EOL>", "docstring": "Get the object with the :paramref:`id`\n\n        .. code:: python\n\n            ic = IdCollection()\n            ic.append(object_1)\n            ic.append(object_2)\n            assert ic[object_1.id] == object_1\n            assert ic[object_2.id] == object_1\n\n        :param id_: the id of an object\n        :return: the object with the :paramref:`id`\n        :raises KeyError: if no object with :paramref:`id` was found", "id": "f506:c0:m3"}
{"signature": "def __len__(self):", "body": "return len(self._items)<EOL>", "docstring": ":return: the number of objects in this collection", "id": "f506:c0:m6"}
{"signature": "def __init__(self, process=identity, chooses_path=true):", "body": "self._process = process<EOL>self._chooses_path = chooses_path<EOL>", "docstring": "Create a PathLoader object.\n\n        :param process: ``process(path)`` is called with the `path` to load.\n          The result of :paramref:`process` is returned to the caller. The\n          default value is :func:`identity`, so the paths are returned when\n          loaded.\n        :param chooses_path: ``chooses_path(path)`` is called before\n          :paramref:`process` and returns :obj:`True` or :obj:`False`\n          depending on whether a specific path should be loaded and passed to\n          :paramref:`process`.", "id": "f507:c0:m0"}
{"signature": "def example(self, relative_path):", "body": "example_path = os.path.join(\"<STR_LIT>\", relative_path)<EOL>return self.relative_file(__file__, example_path)<EOL>", "docstring": "Load an example from the knitting pattern examples.\n\n        :param str relative_path: the path to load\n        :return: the result of the processing\n\n        You can use :meth:`knittingpattern.Loader.PathLoader.examples`\n        to find out the paths of all examples.", "id": "f507:c0:m8"}
{"signature": "def string(self, string):", "body": "object_ = json.loads(string)<EOL>return self.object(object_)<EOL>", "docstring": "Load an object from a string and return the processed JSON content\n\n        :return: the result of the processing step\n        :param str string: the string to load the JSON from", "id": "f507:c2:m1"}
{"signature": "def relative_folder(self, module, folder):", "body": "folder = self._relative_to_absolute(module, folder)<EOL>return self.folder(folder)<EOL>", "docstring": "Load a folder located relative to a module and return the processed\n        result.\n\n        :param str module: can be\n\n          - a path to a folder\n          - a path to a file\n          - a module name\n\n        :param str folder: the path of a folder relative to :paramref:`module`\n        :return: a list of the results of the processing\n        :rtype: list\n\n        Depending on :meth:`chooses_path` some paths may not be loaded.\n        Every loaded path is processed and returned part of the returned list.\n        You can use :meth:`choose_paths` to find out which paths are chosen to\n        load.", "id": "f507:c0:m5"}
{"signature": "def choose_paths(self, paths):", "body": "return [path for path in paths if self._chooses_path(path)]<EOL>", "docstring": ":return: the paths that are chosen by :meth:`chooses_path`\n        :rtype: list", "id": "f507:c0:m7"}
{"signature": "def relative_file(self, module, file):", "body": "path = self._relative_to_absolute(module, file)<EOL>return self.path(path)<EOL>", "docstring": "Load a file relative to a module.\n\n        :param str module: can be\n\n          - a path to a folder\n          - a path to a file\n          - a module name\n\n        :param str folder: the path of a folder relative to :paramref:`module`\n        :return: the result of the processing", "id": "f507:c0:m6"}
{"signature": "def identity(object_):", "body": "return object_<EOL>", "docstring": ":return: the argument\n    :param object_: the object to be returned", "id": "f507:m0"}
{"signature": "def rows_in_knit_order(self):", "body": "return walk(self)<EOL>", "docstring": "Return the rows in the order that they should be knit.\n\n        :rtype: list\n        :return: the :attr:`rows` in the order that they should be knit\n\n        .. seealso:: :mod:`knittingpattern.walk`", "id": "f508:c0:m5"}
{"signature": "@property<EOL><INDENT>def name(self):<DEDENT>", "body": "return self._name<EOL>", "docstring": "a human readable name", "id": "f508:c0:m2"}
{"signature": "def add_row(self, id_):", "body": "row = self._parser.new_row(id_)<EOL>self._rows.append(row)<EOL>return row<EOL>", "docstring": "Add a new row to the pattern.\n\n        :param id_: the id of the row", "id": "f508:c0:m4"}
{"signature": "@property<EOL><INDENT>def rows(self):<DEDENT>", "body": "return self._rows<EOL>", "docstring": "a collection of rows that this pattern is made of\n\n        Usually this should be a\n        :class:`knittingpattern.IdCollection.IdCollection` of\n        :class:`knittingpattern.Row.Row`.", "id": "f508:c0:m3"}
{"signature": "def __getitem__(self, instruction_type):", "body": "return self.as_instruction({TYPE: instruction_type})<EOL>", "docstring": ":return: the specification for :paramref:`instruction_type`\n\n        .. seealso:: :meth:`as_instruction`", "id": "f509:c0:m7"}
{"signature": "@property<EOL><INDENT>def _instruction_class(self):<DEDENT>", "body": "return Instruction<EOL>", "docstring": ":return: the class for the specifications", "id": "f509:c0:m1"}
{"signature": "def __init__(self):", "body": "super().__init__()<EOL>self.load.relative_folder(__file__, self.INSTRUCTIONS_FOLDER)<EOL>", "docstring": "Create the default instruction library without arguments.\n\n        The default specifications are loaded automatically form this package.", "id": "f509:c1:m0"}
{"signature": "def _process_loaded_object(self, obj):", "body": "for instruction in obj:<EOL><INDENT>self.add_instruction(instruction)<EOL><DEDENT>return self<EOL>", "docstring": "add the loaded instructions from :attr:`load`", "id": "f509:c0:m4"}
{"signature": "@property<EOL><INDENT>def _loader_class(self):<DEDENT>", "body": "return JSONLoader<EOL>", "docstring": ":return: the class for loading the specifications with\n          :attr:`load`", "id": "f509:c0:m0"}
{"signature": "@fixture<EOL>def a1(charlotte):", "body": "return charlotte.patterns[\"<STR_LIT>\"]<EOL>", "docstring": ":return: the pattern ``\"A.1\"`` in charlotte", "id": "f512:m1"}
{"signature": "@fixture<EOL>def a1():", "body": "return _charlotte().patterns[\"<STR_LIT>\"]<EOL>", "docstring": ":return: the pattern ``\"A.1\"`` in charlotte", "id": "f524:m0"}
{"signature": "@fixture<EOL>def pattern(single_instruction_pattern_set):", "body": "return single_instruction_pattern_set.patterns[\"<STR_LIT>\"]<EOL>", "docstring": "The pattern which has only one instruction.", "id": "f527:m1"}
{"signature": "@fixture<EOL>def row(pattern):", "body": "return pattern.rows[<NUM_LIT:1>]<EOL>", "docstring": "The row with one instruction.", "id": "f527:m2"}
{"signature": "@fixture<EOL>def single_instruction_pattern_set():", "body": "return load_from_relative_file(HERE, \"<STR_LIT>\")<EOL>", "docstring": "Load the pattern set with only one instruction.", "id": "f527:m0"}
{"signature": "@classmethod<EOL><INDENT>def __repr__(cls):<DEDENT>", "body": "return \"<STR_LIT>\".format(cls.__module__, cls.__qualname__)<EOL>", "docstring": "The string representation of the object.\n\n        :return: the string representation\n        :rtype: str", "id": "f532:c1:m1"}
{"signature": "def __init__(self,<EOL>new_loader=JSONLoader,<EOL>new_parser=Parser,<EOL>new_parsing_error=ParsingError,<EOL>new_pattern_set=KnittingPatternSet,<EOL>new_pattern_collection=IdCollection,<EOL>new_row_collection=IdCollection,<EOL>new_pattern=KnittingPattern,<EOL>new_row=Row,<EOL>new_default_instructions=DefaultInstructions,<EOL>new_instruction_in_row=InstructionInRow):", "body": "self.new_loader = new_loader<EOL>self.new_parser = new_parser<EOL>self.new_parsing_error = new_parsing_error<EOL>self.new_pattern_set = new_pattern_set<EOL>self.new_pattern_collection = new_pattern_collection<EOL>self.new_row_collection = new_row_collection<EOL>self.new_pattern = new_pattern<EOL>self.new_row = new_row<EOL>self.new_default_instructions = new_default_instructions<EOL>self.new_instruction_in_row = new_instruction_in_row<EOL>", "docstring": "Create a new parsing specification.", "id": "f532:c0:m0"}
{"signature": "def new_knitting_pattern_set_loader(specification=DefaultSpecification()):", "body": "parser = specification.new_parser(specification)<EOL>loader = specification.new_loader(parser.knitting_pattern_set)<EOL>return loader<EOL>", "docstring": "Create a loader for a knitting pattern set.\n\n    :param specification: a :class:`specification\n      <knittingpattern.ParsingSpecification.ParsingSpecification>`\n      for the knitting pattern set, default\n      :class:`DefaultSpecification`", "id": "f532:m0"}
{"signature": "def load_from_file(file):", "body": "return load_from().file(file)<EOL>", "docstring": "Load a knitting pattern from a file-like object.\n\n    :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet", "id": "f534:m3"}
{"signature": "def load_from():", "body": "from .ParsingSpecification import new_knitting_pattern_set_loader<EOL>return new_knitting_pattern_set_loader()<EOL>", "docstring": "Create a loader to load knitting patterns with.\n\n    :return: the loader to load objects with\n    :rtype: knittingpattern.Loader.JSONLoader\n\n    Example:\n\n    .. code:: python\n\n       import knittingpattern, webbrowser\n       k = knittingpattern.load_from().example(\"Cafe.json\")\n       webbrowser.open(k.to_svg(25).temporary_path(\".svg\"))", "id": "f534:m0"}
{"signature": "def load_from_object(object_):", "body": "return load_from().object(object_)<EOL>", "docstring": "Load a knitting pattern from an object.\n\n    :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet", "id": "f534:m1"}
{"signature": "def convert_from_image(colors=(\"<STR_LIT>\", \"<STR_LIT>\")):", "body": "from .convert.image_to_knittingpattern importconvert_image_to_knitting_pattern<EOL>return convert_image_to_knitting_pattern(colors=colors)<EOL>", "docstring": "Convert and image to a knitting pattern.\n\n    :return: a loader\n    :rtype: knittingpattern.Loader.PathLoader\n    :param tuple colors: the colors to convert to\n\n    .. code:: python\n\n        convert_from_image().path(\"pattern.png\").path(\"pattern.json\")\n        convert_from_image().path(\"pattern.png\").knitting_pattern()\n\n    .. seealso:: :mod:`knittingoattern.convert.image_to_knitting_pattern`", "id": "f534:m7"}
{"signature": "def load_from_path(path):", "body": "return load_from().path(path)<EOL>", "docstring": "Load a knitting pattern from a file behind located at `path`.\n\n    :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet", "id": "f534:m4"}
{"signature": "@property<EOL><INDENT>def last_produced_mesh(self):<DEDENT>", "body": "for instruction in reversed(self.instructions):<EOL><INDENT>if instruction.produces_meshes():<EOL><INDENT>return instruction.last_produced_mesh<EOL><DEDENT><DEDENT>raise IndexError(\"<STR_LIT>\".format(self))<EOL>", "docstring": "The last produced mesh.\n\n        :return: the last produced mesh\n        :rtype: knittingpattern.Mesh.Mesh\n        :raises IndexError: if no mesh is produced\n\n        .. seealso:: :attr:`number_of_produced_meshes`", "id": "f535:c0:m11"}
{"signature": "@property<EOL><INDENT>def color(self):<DEDENT>", "body": "return self.get(COLOR)<EOL>", "docstring": "The color of the row.\n\n        :return: the color of the row as specified or :obj:`None`", "id": "f535:c0:m9"}
{"signature": "@property<EOL><INDENT>def instruction_colors(self):<DEDENT>", "body": "return unique(instruction.colors for instruction in self.instructions)<EOL>", "docstring": "The colors of the instructions in the row in the order tehy appear.\n\n        :return: a list of colors of the knitting pattern in the order that\n          they appear in\n        :rtype: list", "id": "f535:c0:m10"}
{"signature": "@property<EOL><INDENT>def instructions(self):<DEDENT>", "body": "return self._instructions<EOL>", "docstring": "The instructions in this row.\n\n        :return: a collection of :class:`instructions inside the row\n          <knittingpattern.Instruction.InstructionInRow>`\n        :rtype: ObservableList.ObservableList", "id": "f535:c0:m3"}
{"signature": "def __repr__(self):", "body": "return \"<STR_LIT>\".format(self.__class__.__qualname__, self.id)<EOL>", "docstring": "The string representation of this row.\n\n        :return: a string representation of this row\n        :rtype: str", "id": "f535:c0:m8"}
{"signature": "@property<EOL><INDENT>def first_produced_mesh(self):<DEDENT>", "body": "for instruction in self.instructions:<EOL><INDENT>if instruction.produces_meshes():<EOL><INDENT>return instruction.first_produced_mesh<EOL><DEDENT><DEDENT>raise IndexError(\"<STR_LIT>\".format(self))<EOL>", "docstring": "The first produced mesh.\n\n        :return: the first produced mesh\n        :rtype: knittingpattern.Mesh.Mesh\n        :raises IndexError: if no mesh is produced\n\n        .. seealso:: :attr:`number_of_produced_meshes`", "id": "f535:c0:m13"}
{"signature": "@property<EOL><INDENT>def rows_before(self):<DEDENT>", "body": "rows_before = []<EOL>for mesh in self.consumed_meshes:<EOL><INDENT>if mesh.is_produced():<EOL><INDENT>row = mesh.producing_row<EOL>if rows_before not in rows_before:<EOL><INDENT>rows_before.append(row)<EOL><DEDENT><DEDENT><DEDENT>return rows_before<EOL>", "docstring": "The rows that produce meshes for this row.\n\n        :rtype: list\n        :return: a list of rows that produce meshes for this row. Each row\n          occurs only once. They are sorted by the first occurrence in the\n          instructions.", "id": "f535:c0:m15"}
{"signature": "@property<EOL><INDENT>def first_instruction(self):<DEDENT>", "body": "return self.instructions[<NUM_LIT:0>]<EOL>", "docstring": "The first instruction of the rows instructions.\n\n        :rtype: knittingpattern.Instruction.InstructionInRow\n        :return: the first instruction in this row's :attr:`instructions`", "id": "f535:c0:m17"}
{"signature": "@property<EOL><INDENT>def number_of_produced_meshes(self):<DEDENT>", "body": "return sum(instruction.number_of_produced_meshes<EOL>for instruction in self.instructions)<EOL>", "docstring": "The number of meshes that this row produces.\n\n        :return: the number of meshes that this row produces\n        :rtype: int\n\n        .. seealso::\n          :meth:`Instruction.number_of_produced_meshes()\n          <knittingpattern.Instruction.Instruction.number_of_produced_meshes>`,\n          :meth:`number_of_consumed_meshes`", "id": "f535:c0:m4"}
{"signature": "@property<EOL><INDENT>def rows_after(self):<DEDENT>", "body": "rows_after = []<EOL>for mesh in self.produced_meshes:<EOL><INDENT>if mesh.is_consumed():<EOL><INDENT>row = mesh.consuming_row<EOL>if rows_after not in rows_after:<EOL><INDENT>rows_after.append(row)<EOL><DEDENT><DEDENT><DEDENT>return rows_after<EOL>", "docstring": "The rows that consume meshes from this row.\n\n        :rtype: list\n        :return: a list of rows that consume meshes from this row. Each row\n          occurs only once. They are sorted by the first occurrence in the\n          instructions.", "id": "f535:c0:m16"}
{"signature": "@property<EOL><INDENT>def last_instruction(self):<DEDENT>", "body": "return self.instructions[-<NUM_LIT:1>]<EOL>", "docstring": "The last instruction of the rows instructions.\n\n        :rtype: knittingpattern.Instruction.InstructionInRow\n        :return: the last instruction in this row's :attr:`instructions`", "id": "f535:c0:m18"}
{"signature": "def __init__(self, knittingpattern, layout, instruction_to_svg, builder,<EOL>zoom):", "body": "self._knittingpattern = knittingpattern<EOL>self._layout = layout<EOL>self._instruction_to_svg = instruction_to_svg<EOL>self._builder = builder<EOL>self._zoom = zoom<EOL>self._instruction_type_color_to_symbol = OrderedDict()<EOL>self._symbol_id_to_scale = {}<EOL>", "docstring": ":param knittingpattern.KnittingPattern.KnittingPattern knittingpattern:\n  a knitting pattern\n:param knittingpattern.convert.Layout.GridLayout layout:\n:param instruction_to_svg: an\n  :class:`~knittingpattern.convert.InstructionToSVG.InstructionToSVG`\n  :class:`\n  ~knittingpattern.convert.InstructionToSVGCache.InstructionSVGCache`,\n  both with instructions already loaded.\n:param knittingpattern.convert.SVGBuilder.SVGBuilder builder:\n:param float zoom: the height and width of a knit instruction", "id": "f536:c0:m0"}
{"signature": "def place_svg_use(self, symbol_id, layer_id, group=None):", "body": "self.place_svg_use_coords(<NUM_LIT:0>, <NUM_LIT:0>, symbol_id, layer_id, group)<EOL>", "docstring": "Same as :meth:`place_svg_use_coords`.\n\n        With implicit `x`  and `y` which are set to `0` in this method and then\n        :meth:`place_svg_use_coords` is called.", "id": "f537:c0:m6"}
{"signature": "@property<EOL><INDENT>def bounding_box(self):<DEDENT>", "body": "return (self._min_x, self._min_y, self._max_x, self._max_y)<EOL>", "docstring": "the bounding box of this SVG\n        ``(min_x, min_y, max_x, max_y)``.\n\n        .. code:: python\n\n            svg_builder10x10.bounding_box = (0, 0, 10, 10)\n            assert svg_builder10x10.bounding_box == (0, 0, 10, 10)\n\n        ``viewBox``, ``width`` and ``height`` are computed from this.\n\n        If the bounding box was never set, the result is a tuple of four\n        :obj:`None`.", "id": "f537:c0:m1"}
{"signature": "def _get_layer(self, layer_id):", "body": "if layer_id not in self._layer_id_to_layer:<EOL><INDENT>self._svg.setdefault(\"<STR_LIT:g>\", [])<EOL>layer = {<EOL>\"<STR_LIT:g>\": [],<EOL>\"<STR_LIT>\": layer_id,<EOL>\"<STR_LIT>\": layer_id,<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>}<EOL>self._layer_id_to_layer[layer_id] = layer<EOL>self._svg[\"<STR_LIT:g>\"].append(layer)<EOL><DEDENT>return self._layer_id_to_layer[layer_id]<EOL>", "docstring": ":return: the layer with the :paramref:`layer_id`. If the layer\n  does not exist, it is created.\n:param str layer_id: the id of the layer", "id": "f537:c0:m7"}
{"signature": "def get_svg_dict(self):", "body": "return self._structure<EOL>", "docstring": "Return the SVG structure generated.", "id": "f537:c0:m9"}
{"signature": "def decorate_load_and_dump(create_loader, create_dumper):", "body": "return lambda func: load_and_dump(create_loader, create_dumper, func)<EOL>", "docstring": "Same as :func:`load_and_dump` but returns a function to enable decorator\n    syntax.\n\n    Examples:\n\n    .. code:: Python\n\n        @decorate_load_and_dump(ContentLoader, JSONDumper)\n        def convert_from_loader_to_dumper(loaded_stuff, other=\"arguments\"):\n            # convert\n            return converted_stuff\n\n        @decorate_load_and_dump(PathLoader, lambda dump: ContentDumper(dump,\n            encoding=None))\n        def convert_from_loader_to_dumper(loaded_stuff, to_file):\n            # convert\n            to_file.write(converted_stuff)", "id": "f538:m1"}
{"signature": "def load_and_dump(create_loader, create_dumper, load_and_dump_):", "body": "@wraps(load_and_dump_)<EOL>def load_and_dump__(*args1, **kw):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>def load(*args2):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>def dump(*args3):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return load_and_dump_(*(args2 + args3 + args1), **kw)<EOL><DEDENT>return create_dumper(dump)<EOL><DEDENT>return create_loader(load)<EOL><DEDENT>return load_and_dump__<EOL>", "docstring": ":return: a function that has the doc string of\n      :paramref:`load_and_dump_`\n      additional arguments to this function are passed on to\n      :paramref:`load_and_dump_`.\n\n    :param create_loader: a loader, e.g.\n      :class:`knittingpattern.Loader.PathLoader`\n    :param create_dumper: a dumper, e.g.\n      :class:`knittingpattern.Dumper.ContentDumper`\n    :param load_and_dump_: a function to call with the loaded content.\n      The arguments to both, :paramref:`create_dumper` and,\n      :paramref:`create_loader`\n      will be passed to :paramref:`load_and_dump_`.\n      Any additional arguments to the return value are also passed to\n      :paramref:`load_and_dump_`.\n      The return value of :paramref:`load_and_dump_` is passed back to the\n      :paramref:`Dumper`.\n\n    .. seealso:: :func:`decorate_load_and_dump`", "id": "f538:m0"}
{"signature": "def title(content):", "body": "if isinstance(content, str):<EOL><INDENT>return re.findall(\"<STR_LIT>\", content)[-<NUM_LIT:1>]<EOL><DEDENT>return content.title.cdata<EOL>", "docstring": "returns the title of the svg", "id": "f545:m0"}
{"signature": "def connections(layout):", "body": "return list(layout.walk_connections(lambda c: (c.start.xy, c.stop.xy)))<EOL>", "docstring": "The connections between the rows of the leyout.", "id": "f549:m4"}
{"signature": "def sizes(layout):", "body": "return list(layout.walk_instructions(lambda p: (p.width, p.height)))<EOL>", "docstring": "The sizes of the instructions of the layout.", "id": "f549:m1"}
{"signature": "@fixture(scope=\"<STR_LIT:class>\")<EOL><INDENT>def grid(self, pattern):<DEDENT>", "body": "return GridLayout(pattern)<EOL>", "docstring": "The computed grid for the pattern.", "id": "f549:c0:m1"}
{"signature": "def __init__(self, min_x, min_y, max_x, max_y,<EOL>default_color=\"<STR_LIT>\"):", "body": "self._min_x = min_x<EOL>self._min_y = min_y<EOL>self._max_x = max_x<EOL>self._max_y = max_y<EOL>self._default_color = default_color<EOL>self._image = PIL.Image.new(<EOL>\"<STR_LIT>\", (max_x - min_x, max_y - min_y),<EOL>self._convert_to_image_color(default_color))<EOL>", "docstring": "Initialize the builder with the bounding box and a default color.\n\n        .. _png-builder-bounds:\n\n        ``min_x <= x < max_x`` and ``min_y <= y < max_y`` are the bounds of the\n        instructions.\n        Instructions outside the bounds are not rendered.\n        Any Pixel that is not set has the :paramref:`default_color`.\n\n        :param int min_x: the lower bound of the x coordinates\n        :param int max_x: the upper bound of the x coordinates\n        :param int min_y: the lower bound of the y coordinates\n        :param int max_y: the upper bound of the y coordinates\n        :param default_color: a valid :ref:`color <png-color>`", "id": "f550:c0:m0"}
{"signature": "def set_pixel(self, x, y, color):", "body": "self._set_pixel_and_convert_color(x, y, color)<EOL>", "docstring": "set the pixel at ``(x, y)`` position to :paramref:`color`\n\n        If ``(x, y)`` is out of the :ref:`bounds <png-builder-bounds>`\n        this does not change the image.\n\n        .. seealso:: :meth:`set_color_in_grid`", "id": "f550:c0:m7"}
{"signature": "def _convert_rrggbb_to_image_color(self, rrggbb):", "body": "return webcolors.hex_to_rgb(rrggbb)<EOL>", "docstring": ":return: the color that is used by the image", "id": "f550:c0:m3"}
{"signature": "@property<EOL><INDENT>def default_color(self):<DEDENT>", "body": "return self._default_color<EOL>", "docstring": ":return: the :ref:`color <png-color>` of the pixels that are not set\n\n        You can set this color by passing it to the :meth:`constructor\n        <__init__>`.", "id": "f550:c0:m11"}
{"signature": "def _set_pixel_and_convert_color(self, x, y, color):", "body": "if color is None:<EOL><INDENT>return<EOL><DEDENT>color = self._convert_color_to_rrggbb(color)<EOL>self._set_pixel(x, y, color)<EOL>", "docstring": "set the pixel but convert the color before.", "id": "f550:c0:m5"}
{"signature": "def __init__(self, function_that_returns_a_knitting_pattern_set):", "body": "super().__init__(self._dump_knitting_pattern,<EOL>text_is_expected=False, encoding=None)<EOL>self.__on_dump = function_that_returns_a_knitting_pattern_set<EOL>", "docstring": "Initialize the Dumper with a\n        :paramref:`function_that_returns_a_knitting_pattern_set`.\n\n        :param function_that_returns_a_knitting_pattern_set: a function that\n          takes no arguments but returns a\n          :class:`knittinpattern.KnittingPatternSet.KnittingPatternSet`\n\n        When a dump is requested, the\n        :paramref:`function_that_returns_a_knitting_pattern_set`\n        is called and the knitting pattern set is converted and saved to the\n        specified location.", "id": "f552:c0:m0"}
{"signature": "def walk_connections(self, mapping=identity):", "body": "for start in self.walk_instructions():<EOL><INDENT>for stop_instruction in start.instruction.consuming_instructions:<EOL><INDENT>if stop_instruction is None:<EOL><INDENT>continue<EOL><DEDENT>stop = self._walk.instruction_in_grid(stop_instruction)<EOL>connection = Connection(start, stop)<EOL>if connection.is_visible():<EOL><INDENT>yield mapping(connection)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Iterate over connections between instructions.\n\n        :return: an iterator over :class:`connections <Connection>` between\n          :class:`instructions in grid <InstructionInGrid>`\n        :param mapping: funcion to map the result, see\n          :meth:`walk_instructions` for an example usage", "id": "f553:c5:m3"}
{"signature": "@property<EOL><INDENT>def y(self):<DEDENT>", "body": "return self._position.y<EOL>", "docstring": ":return: y coordinate in the grid\n        :rtype: float", "id": "f553:c0:m2"}
{"signature": "def walk_instructions(self, mapping=identity):", "body": "instructions = chain(*self.walk_rows(lambda row: row.instructions))<EOL>return map(mapping, instructions)<EOL>", "docstring": "Iterate over instructions.\n\n        :return: an iterator over :class:`instructions in grid\n          <InstructionInGrid>`\n        :param mapping: funcion to map the result\n\n        .. code:: python\n\n            for pos, c in layout.walk_instructions(lambda i: (i.xy, i.color)):\n                print(\"color {} at {}\".format(c, pos))", "id": "f553:c5:m1"}
{"signature": "def _walk(self):", "body": "while self._todo:<EOL><INDENT>args = self._todo.pop(<NUM_LIT:0>)<EOL>self._step(*args)<EOL><DEDENT>", "docstring": "Loop through all the instructions that are `_todo`.", "id": "f553:c3:m7"}
{"signature": "@property<EOL><INDENT>def start(self):<DEDENT>", "body": "return self._start<EOL>", "docstring": ":return: the start of the connection\n        :rtype: InstructionInGrid", "id": "f553:c4:m1"}
{"signature": "@property<EOL><INDENT>def instructions(self):<DEDENT>", "body": "x = self.x<EOL>y = self.y<EOL>result = []<EOL>for instruction in self._row.instructions:<EOL><INDENT>instruction_in_grid = InstructionInGrid(instruction, Point(x, y))<EOL>x += instruction_in_grid.width<EOL>result.append(instruction_in_grid)<EOL><DEDENT>return result<EOL>", "docstring": "The instructions in a grid.\n\n        :return: the :class:`instructions in a grid <InstructionInGrid>` of\n          this row\n        :rtype: list", "id": "f553:c2:m2"}
{"signature": "def row_in_grid(self, row):", "body": "return self._rows_in_grid[row]<EOL>", "docstring": "Returns an `RowInGrid` object for the `row`", "id": "f553:c3:m9"}
{"signature": "def __init__(self, row, position):", "body": "super().__init__(position)<EOL>self._row = row<EOL>", "docstring": "Create a new row in the grid.", "id": "f553:c2:m0"}
{"signature": "def _expand(self, row, consumed_position, passed):", "body": "self._todo.append((row, consumed_position, passed))<EOL>", "docstring": "Add the arguments `(args, kw)` to `_walk` to the todo list.", "id": "f553:c3:m1"}
{"signature": "def _row_should_be_placed(self, row, position):", "body": "placed_row = self._rows_in_grid.get(row)<EOL>return placed_row is None or placed_row.y < position.y<EOL>", "docstring": ":return: whether to place this instruction", "id": "f553:c3:m5"}
{"signature": "def _place_row(self, row, position):", "body": "self._rows_in_grid[row] = RowInGrid(row, position)<EOL>", "docstring": "place the instruction on a grid", "id": "f553:c3:m6"}
{"signature": "@property<EOL><INDENT>def width(self):<DEDENT>", "body": "return self._width<EOL>", "docstring": ":return: width of the object on the grid\n        :rtype: float", "id": "f553:c0:m5"}
{"signature": "def __init__(self, start, stop):", "body": "self._start = start<EOL>self._stop = stop<EOL>", "docstring": ":param InstructionInGrid start: the start of the connection\n:param InstructionInGrid stop: the end of the connection", "id": "f553:c4:m0"}
{"signature": "def row_in_grid(self, row):", "body": "return self._walk.row_in_grid(row)<EOL>", "docstring": "The a RowInGrid for the row with position information.\n\n        :return: a row in the grid\n        :rtype: RowInGrid", "id": "f553:c5:m5"}
{"signature": "@property<EOL><INDENT>def color(self):<DEDENT>", "body": "return self._instruction.color<EOL>", "docstring": "The color of the instruction.\n\n        :return: the color of the :attr:`instruction`", "id": "f553:c1:m3"}
{"signature": "@property<EOL><INDENT>def bounding_box(self):<DEDENT>", "body": "min_x, min_y, max_x, max_y = zip(*list(self.walk_rows(<EOL>lambda row: row.bounding_box)))<EOL>return min(min_x), min(min_y), max(max_x), max(max_y)<EOL>", "docstring": "The minimum and maximum bounds of this layout.\n\n        :return: ``(min_x, min_y, max_x, max_y)`` the bounding box\n          of this layout\n        :rtype: tuple", "id": "f553:c5:m4"}
{"signature": "@property<EOL><INDENT>def bounding_box(self):<DEDENT>", "body": "return self._bounding_box<EOL>", "docstring": "The bounding box of this object.\n\n        :return: (min x, min y, max x, max y)\n        :rtype: tuple", "id": "f553:c0:m8"}
{"signature": "def __init__(self, pattern):", "body": "self._pattern = pattern<EOL>self._rows = list(pattern.rows)<EOL>self._walk = _RecursiveWalk(self._rows[<NUM_LIT:0>].instructions[<NUM_LIT:0>])<EOL>self._rows.sort(key=lambda row: self._walk.row_in_grid(row).yx)<EOL>", "docstring": ":param knittingpattern.KnittingPattern.KnittingPattern pattern: the\n  pattern to layout", "id": "f553:c5:m0"}
{"signature": "@property<EOL><INDENT>def yx(self):<DEDENT>", "body": "return self._position.y, self._position.x<EOL>", "docstring": ":return: ``(y, x)`` coordinate in the grid\n        :rtype: tuple", "id": "f553:c0:m4"}
{"signature": "@property<EOL><INDENT>def instruction(self):<DEDENT>", "body": "return self._instruction<EOL>", "docstring": "The instruction.\n\n        :return: instruction that is placed on the grid\n        :rtype: knittingpattern.Instruction.InstructionInRow", "id": "f553:c1:m2"}
{"signature": "@property<EOL><INDENT>def height(self):<DEDENT>", "body": "return INSTRUCTION_HEIGHT<EOL>", "docstring": ":return: height of the object on the grid\n        :rtype: float", "id": "f553:c0:m6"}
{"signature": "def __init__(self, first_instruction):", "body": "self._rows_in_grid = {}<EOL>self._todo = []<EOL>self._expand(first_instruction.row, Point(<NUM_LIT:0>, <NUM_LIT:0>), [])<EOL>self._walk()<EOL>", "docstring": "Start walking the knitting pattern starting from first_instruction.", "id": "f553:c3:m0"}
{"signature": "def instruction_in_grid(self, instruction):", "body": "row_position = self._rows_in_grid[instruction.row].xy<EOL>x = instruction.index_of_first_consumed_mesh_in_row<EOL>position = Point(row_position.x + x, row_position.y)<EOL>return InstructionInGrid(instruction, position)<EOL>", "docstring": "Returns an `InstructionInGrid` object for the `instruction`", "id": "f553:c3:m8"}
{"signature": "def _new_svg_dumper(self, on_dump):", "body": "return SVGDumper(on_dump)<EOL>", "docstring": "Create a new SVGDumper with the function ``on_dump``.\n\n        :rtype: knittingpattern.Dumper.SVGDumper", "id": "f554:c0:m2"}
{"signature": "def instruction_to_svg_dict(self, instruction_or_id, copy_result=True):", "body": "instruction_id = self.get_instruction_id(instruction_or_id)<EOL>if instruction_id in self._cache:<EOL><INDENT>result = self._cache[instruction_id]<EOL><DEDENT>else:<EOL><INDENT>result = self._instruction_to_svg_dict(instruction_id)<EOL>self._cache[instruction_id] = result<EOL><DEDENT>if copy_result:<EOL><INDENT>result = deepcopy(result)<EOL><DEDENT>return result<EOL>", "docstring": "Return the SVG dict for the SVGBuilder.\n\n        :param instruction_or_id: the instruction or id, see\n          :meth:`get_instruction_id`\n        :param bool copy_result: whether to copy the result\n        :rtype: dict\n\n        The result is cached.", "id": "f554:c0:m4"}
{"signature": "def to_svg(self, instruction_or_id,<EOL>i_promise_not_to_change_the_result=False):", "body": "return self._new_svg_dumper(lambda: self.instruction_to_svg_dict(<EOL>instruction_or_id, not i_promise_not_to_change_the_result))<EOL>", "docstring": "Return the SVG for an instruction.\n\n        :param instruction_or_id: either an\n          :class:`~knittingpattern.Instruction.Instruction` or an id\n          returned by :meth:`get_instruction_id`\n        :param bool i_promise_not_to_change_the_result:\n\n          - :obj:`False`: the result is copied, you can alter it.\n          - :obj:`True`: the result is directly from the cache. If you change\n            the result, other calls of this function get the changed result.\n\n        :return: an SVGDumper\n        :rtype: knittingpattern.Dumper.SVGDumper", "id": "f554:c0:m3"}
{"signature": "def default_instructions_to_svg():", "body": "instruction_to_svg = InstructionToSVG()<EOL>instruction_to_svg.load.relative_folder(__name__, DEFAULT_SVG_FOLDER)<EOL>return instruction_to_svg<EOL>", "docstring": "load the default set of svg files for instructions\n\n    :return: the default svg files for the instructions in this package\n    :rtype: knittingpattern.InstructionToSVG.InstructionToSVG", "id": "f555:m0"}
{"signature": "def default_instruction_to_svg(self, instruction):", "body": "svg_dict = self.default_instruction_to_svg_dict(instruction)<EOL>return xmltodict.unparse(svg_dict)<EOL>", "docstring": "As :meth:`instruction_to_svg` but it only takes the ``default.svg``\n        file into account.\n\n        In case no file is found for an instruction in\n        :meth:`instruction_to_svg`,\n        this method is used to determine the default svg for it.\n\n        The content is created by replacing the text ``{instruction.type}`` in\n        the whole svg file named ``default.svg``.\n\n        If no file ``default.svg`` was loaded, an empty string is returned.", "id": "f555:c0:m8"}
{"signature": "@property<EOL><INDENT>def index_of_last_consumed_mesh_in_row(self):<DEDENT>", "body": "index = self.index_of_first_consumed_mesh_in_row<EOL>return index + self.number_of_consumed_meshes - <NUM_LIT:1><EOL>", "docstring": "The index of the last consumed mesh of this instruction in its row.\n\n        Same as :attr:`index_of_last_produced_mesh_in_row`\n        but for the last consumed mesh.", "id": "f558:c1:m16"}
{"signature": "@property<EOL><INDENT>def consuming_instructions(self):<DEDENT>", "body": "return [(mesh.consuming_instruction if mesh.is_consumed() else None)<EOL>for mesh in self.produced_meshes]<EOL>", "docstring": "Instructions that consume the meshes that this instruction produces.\n\n        :return: a list of :class:`instructions\n          <knittingpattern.Instruction.InstructionInRow>`\n        :rtype: list\n\n        .. seealso:: :attr:`producing_instructions`, :attr:`produced_meshes`", "id": "f558:c1:m21"}
{"signature": "@property<EOL><INDENT>def hex_color(self):<DEDENT>", "body": "if self.has_color():<EOL><INDENT>return convert_color_to_rrggbb(self.color)<EOL><DEDENT>return None<EOL>", "docstring": "The color in \"#RRGGBB\" format.\n\n        :return: the :attr:`color` in \"#RRGGBB\" format or none if no color is\n          given", "id": "f558:c0:m13"}
{"signature": "def produces_meshes(self):", "body": "return self.number_of_produced_meshes != <NUM_LIT:0><EOL>", "docstring": "Whether this institution produces meshes.\n\n        :return: whether this instruction produces any meshes\n        :rtype: bool\n\n        .. seealso:: :attr:`number_of_produced_meshes`", "id": "f558:c0:m10"}
{"signature": "@property<EOL><INDENT>def _new_produced_mesh(self):<DEDENT>", "body": "return ProducedMesh<EOL>", "docstring": ":return: the class of the produced meshes.", "id": "f558:c1:m2"}
{"signature": "def consumes_meshes(self):", "body": "return self.number_of_consumed_meshes != <NUM_LIT:0><EOL>", "docstring": "Whether this instruction consumes meshes.\n\n        :return: whether this instruction consumes any meshes\n        :rtype: bool\n\n        .. seealso:: :attr:`number_of_consumed_meshes`", "id": "f558:c0:m11"}
{"signature": "def does_knit(self):", "body": "return self.type == KNIT_TYPE<EOL>", "docstring": "Whether this instruction is a knit instruction.\n\n        :return: whether this instruction is a knit instruction\n        :rtype: bool", "id": "f558:c0:m8"}
{"signature": "def _raise_not_found_error(self):", "body": "raise InstructionNotFoundInRow(self._instruction_not_found_message)<EOL>", "docstring": "Raise an error that this instruction is in its row no longer.\n\n        :raises knittingpattern.Instruction.InstructionNotFoundInRow:\n          the instruction was not found\n\n        .. warning: private, do not use", "id": "f558:c1:m12"}
{"signature": "@property<EOL><INDENT>def colors(self):<DEDENT>", "body": "return [self.color]<EOL>", "docstring": "All the colors that an instruction has.\n\n        :return: a list of colors of the instruction. If the instruction has\n          no color, this is ``[None]``.\n        :rtype: list", "id": "f558:c0:m3"}
{"signature": "@property<EOL><INDENT>def type(self):<DEDENT>", "body": "return self.get(TYPE, DEFAULT_TYPE)<EOL>", "docstring": "The type of the instruction.\n\n        :return: the :data:`type <TYPE>` of the instruction or\n          :data:`DEFAULT_TYPE` if none is specified.\n        :rtype: str\n\n        The type should be a string.\n        Depending on the type, the instruction can receive additional\n        attributes.\n\n        .. seealso:: :mod:`knittingpattern.InstructionLibrary`", "id": "f558:c0:m1"}
{"signature": "def has_color(self):", "body": "return self.color is not None<EOL>", "docstring": "Whether this instruction has a color.\n\n        :return: whether a :data:`color <COLOR>` is specified\n        :rtype: bool", "id": "f558:c0:m7"}
{"signature": "@property<EOL><INDENT>def index_of_last_produced_mesh_in_row(self):<DEDENT>", "body": "index = self.index_of_first_produced_mesh_in_row<EOL>return index + self.number_of_produced_meshes - <NUM_LIT:1><EOL>", "docstring": "Index of the last mesh produced by this instruction in its row.\n\n        :return: an index of the last produced mesh of rows produced meshes\n        :rtype: int\n\n        .. note:: If this instruction :meth:`produces meshes\n          <Instruction.produces_meshes>`, this is the index of\n          its last produces mesh in the row. However, if this instruction does\n          not produce meshes, this is the index **before** the first mesh of\n          the instruction if it produced meshes.\n\n        .. seealso:: :attr:`index_of_first_produced_mesh_in_row`", "id": "f558:c1:m14"}
{"signature": "@property<EOL><INDENT>def _new_consumed_mesh(self):<DEDENT>", "body": "return ConsumedMesh<EOL>", "docstring": ":return: the class of the consumed meshes.", "id": "f558:c1:m3"}
{"signature": "def unique(iterables):", "body": "included_elements = set()<EOL>def included(element):<EOL><INDENT>result = element in included_elements<EOL>included_elements.add(element)<EOL>return result<EOL><DEDENT>return [element for elements in iterables for element in elements<EOL>if not included(element)]<EOL>", "docstring": "Create an iterable from the iterables that contains each element once.\n\n    :return: an iterable over the iterables. Each element of the result\n      appeared only once in the result. They are ordered by the first\n      occurrence in the iterables.", "id": "f559:m0"}
{"signature": "def new_pattern(self, id_, name, rows=None):", "body": "if rows is None:<EOL><INDENT>rows = self.new_row_collection()<EOL><DEDENT>return self._spec.new_pattern(id_, name, rows, self)<EOL>", "docstring": "Create a new knitting pattern.\n\n        If rows is :obj:`None` it is replaced with the\n        :meth:`new_row_collection`.", "id": "f560:c1:m16"}
{"signature": "def knitting_pattern_set(self, values):", "body": "self._start()<EOL>pattern_collection = self._new_pattern_collection()<EOL>self._fill_pattern_collection(pattern_collection, values)<EOL>self._create_pattern_set(pattern_collection, values)<EOL>return self._pattern_set<EOL>", "docstring": "Parse a knitting pattern set.\n\n        :param dict value: the specification of the knitting pattern set\n        :rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet\n        :raises knittingpattern.KnittingPatternSet.ParsingError: if\n          :paramref:`value` does not fulfill the :ref:`specification\n          <FileFormatSpecification>`.", "id": "f560:c1:m4"}
{"signature": "def _get_version(self, values):", "body": "return values[VERSION]<EOL>", "docstring": ":return: the version of :paramref:`values`.", "id": "f560:c1:m20"}
{"signature": "def _delay_instructions(self, row):", "body": "self._instruction_todos.append(row)<EOL>", "docstring": "Add a deleyed inheritance that is ti be resolved later.\n\n        When calling :meth:`_finish_instructions` this inheritance chain shall\n        be resolved.", "id": "f560:c1:m8"}
{"signature": "def instruction_in_row(self, row, specification):", "body": "whole_instruction_ = self._as_instruction(specification)<EOL>return self._spec.new_instruction_in_row(row, whole_instruction_)<EOL>", "docstring": "Parse an instruction.\n\n        :param row: the row of the instruction\n        :param specification: the specification of the instruction\n        :return: the instruction in the row", "id": "f560:c1:m14"}
{"signature": "def _row(self, values):", "body": "row_id = self._to_id(values[ID])<EOL>row = self._spec.new_row(row_id, values, self)<EOL>if SAME_AS in values:<EOL><INDENT>self._delay_inheritance(row, self._to_id(values[SAME_AS]))<EOL><DEDENT>self._delay_instructions(row)<EOL>self._id_cache[row_id] = row<EOL>return row<EOL>", "docstring": "Parse a row.", "id": "f560:c1:m12"}
{"signature": "def __init__(self, specification):", "body": "self._spec = specification<EOL>self._start()<EOL>", "docstring": "Create a parser with a specification.\n\n        :param specification: the types and classes to use for the resulting\n          object structure, preferably a\n          :class:`knittingpattern.ParsingSpecification.ParsingSpecification`", "id": "f560:c1:m0"}
{"signature": "def _fill_pattern_collection(self, pattern_collection, values):", "body": "pattern = values.get(PATTERNS, [])<EOL>for pattern_to_parse in pattern:<EOL><INDENT>parsed_pattern = self._pattern(pattern_to_parse)<EOL>pattern_collection.append(parsed_pattern)<EOL><DEDENT>", "docstring": "Fill a pattern collection.", "id": "f560:c1:m11"}
{"signature": "def _get_type(self, values):", "body": "if TYPE not in values:<EOL><INDENT>self._error(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(KNITTING_PATTERN_TYPE))<EOL><DEDENT>type_ = values[TYPE]<EOL>if type_ != KNITTING_PATTERN_TYPE:<EOL><INDENT>self._error(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(type_, KNITTING_PATTERN_TYPE))<EOL><DEDENT>return type_<EOL>", "docstring": ":return: the type of a knitting pattern set.", "id": "f560:c1:m19"}
{"signature": "def _delay_inheritance(self, prototype, parent_id):", "body": "self._inheritance_todos.append((prototype, parent_id))<EOL>", "docstring": "Add a deleyed inheritance that is ti be resolved later.\n\n        When calling :meth:`_finish_inheritance` this inheritance chain shall\n        be resolved.", "id": "f560:c1:m6"}
{"signature": "@staticmethod<EOL><INDENT>def _to_id(id_):<DEDENT>", "body": "return tuple(id_) if isinstance(id_, list) else id_<EOL>", "docstring": "Converts the argument to a object suitable as an identifier.\n\n        :return: a hashable object", "id": "f560:c1:m2"}
{"signature": "def _new_pattern_collection(self):", "body": "return self._spec.new_pattern_collection()<EOL>", "docstring": "Create a new pattern collection.\n\n        :return: a new specified pattern collection for\n          :meth:`knitting_pattern_set`", "id": "f560:c1:m9"}
{"signature": "def _start(self):", "body": "self._instruction_library = self._spec.new_default_instructions()<EOL>self._as_instruction = self._instruction_library.as_instruction<EOL>self._id_cache = {}<EOL>self._pattern_set = None<EOL>self._inheritance_todos = []<EOL>self._instruction_todos = []<EOL>", "docstring": "Initialize the parsing process.", "id": "f560:c1:m1"}
{"signature": "def _finish_inheritance(self):", "body": "while self._inheritance_todos:<EOL><INDENT>prototype, parent_id = self._inheritance_todos.pop()<EOL>parent = self._id_cache[parent_id]<EOL>prototype.inherit_from(parent)<EOL><DEDENT>", "docstring": "Finish those who still need to inherit.", "id": "f560:c1:m5"}
{"signature": "def new_row_collection(self):", "body": "return self._spec.new_row_collection()<EOL>", "docstring": "Create a new row collection.\n\n        :return: a new specified row collection for the\n          :meth:`knitting pattern <new_pattern>`", "id": "f560:c1:m10"}
{"signature": "@abstractmethod<EOL><INDENT>def _is_connected_to(self, other_mesh):<DEDENT>", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m11"}
{"signature": "@abstractmethod<EOL><INDENT>def _connect_to(self, other_mesh):<DEDENT>", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m8"}
{"signature": "@abstractmethod<EOL><INDENT>def _is_consumed(self):<DEDENT>", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m5"}
{"signature": "def disconnect(self):", "body": "if self.is_connected():<EOL><INDENT>self._disconnect()<EOL><DEDENT>", "docstring": "Remove the connection between two rows through this mesh.\n\n        After disconnecting this mesh, it can be connected anew.", "id": "f561:c0:m26"}
{"signature": "def can_connect_to(self, other):", "body": "assert other.is_mesh()<EOL>disconnected = not other.is_connected() and not self.is_connected()<EOL>types_differ = self._is_consumed_mesh() != other._is_consumed_mesh()<EOL>return disconnected and types_differ<EOL>", "docstring": "Whether a connection can be established between those two meshes.", "id": "f561:c0:m33"}
{"signature": "@property<EOL><INDENT>def index_in_consuming_instruction(self):<DEDENT>", "body": "self._assert_is_consumed()<EOL>return self._consuming_instruction_and_index()[<NUM_LIT:1>]<EOL>", "docstring": "Index in instruction as consumed mesh.\n\n        :return: the index of the mesh in the list of meshes that\n          :attr:`consuming_instruction` consumes\n        :rtype: int\n\n        .. code:: python\n\n            instruction = mesh.consuming_instruction\n            index = mesh.index_in_consuming_instruction\n            assert instruction.consumed_meshes[index] == mesh\n\n        .. seealso:: :attr:`consuming_instruction`,\n          :attr:`index_in_consuming_instruction`\n\n        .. warning:: Check with :meth:`is_consumed` before!", "id": "f561:c0:m23"}
{"signature": "@property<EOL><INDENT>def index_in_consuming_row(self):<DEDENT>", "body": "self._assert_is_consumed()<EOL>return self._consuming_row_and_index()[<NUM_LIT:1>]<EOL>", "docstring": "Index in row as consumed mesh.\n\n        :return: the index of the mesh in the list of meshes that\n          :attr:`consuming_row` consumes\n        :rtype: int\n\n        .. code:: python\n\n            row = mesh.consuming_row\n            index = mesh.index_in_consuming_row\n            assert row.consumed_meshes[index] == mesh\n\n        .. seealso:: :attr:`consuming_row`, :attr:`index_in_producing_row`\n\n        .. warning:: Check with :meth:`is_consumed` before!", "id": "f561:c0:m20"}
{"signature": "def __init__(self, consuming_instruction,<EOL>index_in_consuming_instruction):", "body": "self.__consuming_instruction_and_index = (<EOL>consuming_instruction,<EOL>index_in_consuming_instruction<EOL>)<EOL>self._produced_part = None<EOL>", "docstring": ":param consuming_instruction: the\n  :class:`instruction <knittingpattern.Instruction.InstructionInRow>`\n  that consumes the mesh\n:param int index_in_consuming_instruction: the index of the mesh\n  in the list of meshes that :attr:`consuming_instruction`\n  consumes\n\n.. note:: There should be no necessity to create instances of this\n  directly. You should be able to use\n  ``instruction.produced_meshes`` or ``instruction.consumed_meshes``\n  to access the :class:`meshes <knittingpattern.Mesh.Mesh>`.", "id": "f561:c2:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def _consuming_instruction_and_index(self):<DEDENT>", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m2"}
{"signature": "def is_connected(self):", "body": "return self._is_consumed() and self._is_produced()<EOL>", "docstring": "Returns whether this mesh is already connected.\n\n        :return: whether this mesh is connected to an other.\n        :rtype: bool", "id": "f561:c0:m28"}
{"signature": "@abstractmethod<EOL><INDENT>def _is_produced(self):<DEDENT>", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m4"}
{"signature": "@property<EOL><INDENT>def consuming_row(self):<DEDENT>", "body": "self._assert_is_consumed()<EOL>return self._consuming_row_and_index()[<NUM_LIT:0>]<EOL>", "docstring": "Row which consumes this mesh.\n\n        :return: the row that consumes this mesh\n        :rtype: knittingpattern.Row.Row\n\n        .. seealso:: :attr:`index_in_consuming_row`,\n          :attr:`consuming_instruction`, :attr:`producing_row`\n\n        .. warning:: Check with :meth:`is_consumed` before!", "id": "f561:c0:m21"}
{"signature": "def is_consumed(self):", "body": "return self._is_consumed()<EOL>", "docstring": "Whether the mesh has an instruction that consumed it.\n\n        :return: whether the mesh is consumed by an instruction\n        :rtype: bool\n\n        If you get this mesh from\n        :attr:`knittingpattern.Instruction.InstructionInRow.consumed_meshes` or\n        :attr:`knittingpattern.Row.Row.consumed_meshes`,\n        this should be :obj:`True`.\n\n        .. warning:: Before you use any methods on how the mesh is consumed,\n          you should check with ``mesh.is_consumed()``.", "id": "f561:c0:m15"}
{"signature": "@property<EOL><INDENT>def index_in_producing_instruction(self):<DEDENT>", "body": "self._assert_is_produced()<EOL>return self._producing_instruction_and_index()[<NUM_LIT:1>]<EOL>", "docstring": "Index in instruction as a produced mesh.\n\n        :return: the index of the mesh in the list of meshes that\n          :attr:`producing_instruction` produces\n        :rtype: int\n\n        .. code:: python\n\n            instruction = mesh.producing_instruction\n            index = mesh.index_in_producing_instruction\n            assert instruction.produced_meshes[index] == mesh\n\n        .. seealso:: :attr:`producing_instruction`,\n          :attr:`index_in_consuming_instruction`\n\n        .. warning:: Check with :meth:`is_produced` before!", "id": "f561:c0:m16"}
{"signature": "@property<EOL><INDENT>def index_in_producing_row(self):<DEDENT>", "body": "self._assert_is_produced()<EOL>return self._producing_row_and_index()[<NUM_LIT:1>]<EOL>", "docstring": "Index in row as produced mesh.\n\n        :return: the index of the mesh in the :attr:`producing_row`\n        :rtype: int\n\n        .. code:: python\n\n            row = mesh.producing_row\n            index = mesh.index_in_producing_row\n            assert row[index] == mesh\n\n        .. seealso:: :attr:`producing_row`, :attr:`index_in_consuming_row`\n\n        .. warning:: Check with :meth:`is_produced` before!", "id": "f561:c0:m19"}
{"signature": "@abstractmethod<EOL><INDENT>def _as_produced_mesh(self):<DEDENT>", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m9"}
{"signature": "@abstractmethod<EOL><INDENT>def _producing_instruction_and_index(self):<DEDENT>", "body": "", "docstring": "Replace this method.", "id": "f561:c0:m0"}
{"signature": "def string(self):", "body": "if self.__text_is_expected:<EOL><INDENT>return self._string()<EOL><DEDENT>else:<EOL><INDENT>return self._bytes().decode(self.__encoding)<EOL><DEDENT>", "docstring": ":return: the dump as a string", "id": "f564:c0:m2"}
{"signature": "def binary_file(self, file=None):", "body": "if file is None:<EOL><INDENT>file = BytesIO()<EOL><DEDENT>self._binary_file(file)<EOL>return file<EOL>", "docstring": "Same as :meth:`file` but for binary content.", "id": "f564:c0:m8"}
{"signature": "@property<EOL><INDENT>def encoding(self):<DEDENT>", "body": "return self.__encoding<EOL>", "docstring": ":return: the encoding for byte to string conversion\n        :rtype: str", "id": "f564:c0:m1"}
{"signature": "def bytes(self):", "body": "if self.__text_is_expected:<EOL><INDENT>return self.string().encode(self.__encoding)<EOL><DEDENT>else:<EOL><INDENT>return self._bytes()<EOL><DEDENT>", "docstring": ":return: the dump as bytes.", "id": "f564:c0:m4"}
{"signature": "def temporary_file(self, delete_when_closed=True):", "body": "return self._temporary_file(delete_when_closed)<EOL>", "docstring": "Saves the dump in a temporary file and returns the open file object.\n\n        :param bool delete_when_closed: whether to delete the temporary file\n                                        when it is closed.\n        :return: a file-like object\n\n        If :paramref:`delete_when_closed` is :obj:`True` (default) the file\n        on the hard drive will be deleted if it is closed or not referenced\n        any more.\n\n        If :paramref:`delete_when_closed` is :obj:`False` the returned\n        temporary file is not deleted when closed or unreferenced.\n        The user of this method has then the responsibility to free the\n        space on the host system.\n\n        The returned file-like object has an attribute ``name`` that holds\n        the location of the file.", "id": "f564:c0:m15"}
{"signature": "def path(self, path):", "body": "self._path(path)<EOL>", "docstring": "Saves the dump in a file named :paramref:`path`.\n\n        :param str path: a valid path to a file location. The file can exist.", "id": "f564:c0:m11"}
{"signature": "def __repr__(self):", "body": "return \"<STR_LIT>\".format(<EOL>self.__class__.__name__,<EOL>self.__encoding<EOL>)<EOL>", "docstring": "the string representation for people to read\n\n        :return: the string representation of this object\n        :rtype: str", "id": "f564:c0:m18"}
{"signature": "def _string(self):", "body": "file = StringIO()<EOL>self.__dump_to_file(file)<EOL>file.seek(<NUM_LIT:0>)<EOL>return file.read()<EOL>", "docstring": ":return: the string from a :class:`io.StringIO`", "id": "f564:c0:m3"}
{"signature": "def __init__(self, on_dump):", "body": "super().__init__(self._dump_to_file)<EOL>self.__dump_object = on_dump<EOL>", "docstring": "Create a new JSONDumper object with the callable `on_dump`.\n\n        `on_dump` takes no arguments and returns the object that should be\n        serialized to JSON.", "id": "f565:c0:m0"}
{"signature": "def _dump_to_file(self, file):", "body": "json.dump(self.object(), file)<EOL>", "docstring": "dump to the file", "id": "f565:c0:m2"}
{"signature": "def write(self, string):", "body": "bytes_ = string.encode(self._encoding)<EOL>self._file.write(bytes_)<EOL>", "docstring": "Write a string to the file.", "id": "f567:c1:m1"}
{"signature": "def write(self, bytes_):", "body": "string = bytes_.decode(self._encoding)<EOL>self._file.write(string)<EOL>", "docstring": "Write bytes to the file.", "id": "f567:c0:m1"}
{"signature": "def inherit_from(self, new_specification):", "body": "self.__specification.insert(<NUM_LIT:1>, new_specification)<EOL>", "docstring": "Inherit from a :paramref:`new_specification`\n\n        :param new_specification: a specification as passed to :meth:`__init__`\n\n        The :paramref:`new_specification` is inserted before the first\n        :paramref:`inherited value <__init__.inherited_values>`.\n\n        If the order is\n\n        1. :paramref:`~__init__.specification`\n        2. :paramref:`~__init__.inherited_values`\n\n        after calling ``prototype.inherit_from(new_specification)`` the lookup\n        order is\n\n        1. :paramref:`~__init__.specification`\n        2. :paramref:`new_specification`\n        3. :paramref:`~__init__.inherited_values`", "id": "f568:c0:m4"}
{"signature": "def __init__(self, specification, inherited_values=()):", "body": "self.__specification = [specification] + list(inherited_values)<EOL>", "docstring": "create a new prototype\n\n        :param specification: the specification of the prototype.\n          This specification can be inherited by other prototypes.\n          It can be a :class:`dict` or an other\n          :class:`knittingpattern.Prototype.Prototype` or anything else that\n          supports :meth:`__contains__` and :meth:`__getitem__`\n\n        To look up a key in the specification it will be walked through\n\n        1. :paramref:`specification`\n        2. :paramref:`inherited_values` in order\n\n        However, new lookups can be inserted at before\n        :paramref:`inherited_values`, by calling :meth:`inherit_from`.", "id": "f568:c0:m0"}
{"signature": "def __init__(self, type_, version, patterns, parser, comment=None):", "body": "self._version = version<EOL>self._type = type_<EOL>self._patterns = patterns<EOL>self._comment = comment<EOL>self._parser = parser<EOL>", "docstring": "Create a new knitting pattern set.\n\n        This is the class for a set of :class:`knitting patterns\n        <knittingpattern.KnittingPattern.KnittingPattern>`.\n\n        :param str type: the type of the knitting pattern set, see the\n          :ref:`specification <FileFormatSpecification>`.\n        :param str version: the version of the knitting pattern set.\n          This is not the version of the library but the version of the\n          :ref:`specification <FileFormatSpecification>`.\n        :param patterns: a collection of patterns. This should be a\n          :class:`~knittingpattern.IdCollection.IdCollection` of\n          :class:`KnittingPatterns\n          <knittingpattern.KnittingPattern.KnittingPattern>`.\n        :param comment: a comment about the knitting pattern", "id": "f569:c0:m0"}
{"signature": "@property<EOL><INDENT>def comment(self):<DEDENT>", "body": "return self._comment<EOL>", "docstring": "The comment about the knitting pattern.\n\n        :return: the comment for the knitting pattern set or None,\n          see :meth:`__init__`.", "id": "f569:c0:m4"}
{"signature": "def add_new_pattern(self, id_, name=None):", "body": "if name is None:<EOL><INDENT>name = id_<EOL><DEDENT>pattern = self._parser.new_pattern(id_, name)<EOL>self._patterns.append(pattern)<EOL>return pattern<EOL>", "docstring": "Add a new, empty knitting pattern to the set.\n\n        :param id_: the id of the pattern\n        :param name: the name of the pattern to add or if :obj:`None`, the\n          :paramref:`id_` is used\n        :return: a new, empty knitting pattern\n        :rtype: knittingpattern.KnittingPattern.KnittingPattern", "id": "f569:c0:m7"}
{"signature": "def absjoin(*args):", "body": "return os.path.abspath(os.path.join(*args))<EOL>", "docstring": ":return: an absolute path to the joined arguments\n:param args: the parts of the path to join", "id": "f571:m0"}
{"signature": "@fixture<EOL>def warnings(sphinx_build):", "body": "return re.findall(WARNING_PATTERN, sphinx_build)<EOL>", "docstring": ":return: the warnings during the build process.", "id": "f573:m3"}
{"signature": "def print_bytes(bytes_):", "body": "try:<EOL><INDENT>print(bytes_.decode())<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>print(bytes_)<EOL><DEDENT>", "docstring": "Print bytes safely as string.", "id": "f573:m0"}
{"signature": "@fixture(scope=\"<STR_LIT>\")<EOL>def sphinx_build():", "body": "if os.path.exists(BUILD_DIRECTORY):<EOL><INDENT>shutil.rmtree(BUILD_DIRECTORY)<EOL><DEDENT>output = subprocess.check_output(<EOL>\"<STR_LIT>\", shell=True, cwd=DOCS_DIRECTORY,<EOL>stderr=subprocess.STDOUT<EOL>)<EOL>output += subprocess.check_output(<EOL>\"<STR_LIT>\", shell=True, cwd=DOCS_DIRECTORY,<EOL>stderr=subprocess.STDOUT<EOL>)<EOL>print(output.decode())<EOL>return output<EOL>", "docstring": "Build the documentation with sphinx and return the output.", "id": "f573:m1"}
{"signature": "def _encrypt(self, value):", "body": "value = json.dumps(value)<EOL>with warnings.catch_warnings():<EOL><INDENT>warnings.simplefilter(\"<STR_LIT:ignore>\")<EOL>encrypted_value = self.cipher.encrypt(value.encode('<STR_LIT:utf8>'))<EOL><DEDENT>hexified_value = binascii.hexlify(encrypted_value).decode('<STR_LIT:ascii>')<EOL>return hexified_value<EOL>", "docstring": "Turn a json serializable value into an jsonified, encrypted,\n        hexa string.", "id": "f575:c0:m19"}
{"signature": "def delete(self, key_name):", "body": "self._assert_valid_stash()<EOL>if key_name == '<STR_LIT>':<EOL><INDENT>raise GhostError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if not self.get(key_name):<EOL><INDENT>raise GhostError('<STR_LIT>'.format(key_name))<EOL><DEDENT>key = self._storage.get(key_name)<EOL>if key.get('<STR_LIT>'):<EOL><INDENT>raise GhostError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(key_name))<EOL><DEDENT>deleted = self._storage.delete(key_name)<EOL>audit(<EOL>storage=self._storage.db_path,<EOL>action='<STR_LIT>',<EOL>message=json.dumps(dict(key_name=key_name)))<EOL>if not deleted:<EOL><INDENT>raise GhostError('<STR_LIT>'.format(key_name))<EOL><DEDENT>", "docstring": "Delete a key if it exists.", "id": "f575:c0:m9"}
{"signature": "def put(self,<EOL>name,<EOL>value=None,<EOL>modify=False,<EOL>metadata=None,<EOL>description='<STR_LIT>',<EOL>encrypt=True,<EOL>lock=False,<EOL>key_type='<STR_LIT>',<EOL>add=False):", "body": "def assert_key_is_unlocked(existing_key):<EOL><INDENT>if existing_key and existing_key.get('<STR_LIT>'):<EOL><INDENT>raise GhostError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(name))<EOL><DEDENT><DEDENT>def assert_value_provided_for_new_key(value, existing_key):<EOL><INDENT>if not value and not existing_key.get('<STR_LIT:value>'):<EOL><INDENT>raise GhostError('<STR_LIT>')<EOL><DEDENT><DEDENT>self._assert_valid_stash()<EOL>self._validate_key_schema(value, key_type)<EOL>if value and encrypt and not isinstance(value, dict):<EOL><INDENT>raise GhostError('<STR_LIT>')<EOL><DEDENT>key = self._handle_existing_key(name, modify or add)<EOL>assert_key_is_unlocked(key)<EOL>assert_value_provided_for_new_key(value, key)<EOL>new_key = dict(name=name, lock=lock)<EOL>if value:<EOL><INDENT>if add:<EOL><INDENT>value = self._update_existing_key(key, value)<EOL><DEDENT>new_key['<STR_LIT:value>'] = self._encrypt(value) if encrypt else value<EOL><DEDENT>else:<EOL><INDENT>new_key['<STR_LIT:value>'] = key.get('<STR_LIT:value>')<EOL><DEDENT>new_key['<STR_LIT:description>'] = description or key.get('<STR_LIT:description>')<EOL>new_key['<STR_LIT>'] = key.get('<STR_LIT>') or _get_current_time()<EOL>new_key['<STR_LIT>'] = _get_current_time()<EOL>new_key['<STR_LIT>'] = metadata or key.get('<STR_LIT>')<EOL>new_key['<STR_LIT>'] = key.get('<STR_LIT>') or str(uuid.uuid4())<EOL>new_key['<STR_LIT:type>'] = key.get('<STR_LIT:type>') or key_type<EOL>key_id = self._storage.put(new_key)<EOL>audit(<EOL>storage=self._storage.db_path,<EOL>action='<STR_LIT>' if (modify or add) else '<STR_LIT>',<EOL>message=json.dumps(dict(<EOL>key_name=new_key['<STR_LIT:name>'],<EOL>value='<STR_LIT>',<EOL>description=new_key['<STR_LIT:description>'],<EOL>uid=new_key['<STR_LIT>'],<EOL>metadata=json.dumps(new_key['<STR_LIT>']),<EOL>lock=new_key['<STR_LIT>'],<EOL>type=new_key['<STR_LIT:type>'])))<EOL>return key_id<EOL>", "docstring": "Put a key inside the stash\n\n        if key exists and modify true: delete and create\n        if key exists and modify false: fail\n        if key doesn't exist and modify true: fail\n        if key doesn't exist and modify false: create\n\n        `name` is unique and cannot be changed.\n\n        `value` must be provided if the key didn't already exist, otherwise,\n        the previous value will be retained.\n\n        `created_at` will be left unmodified if the key\n        already existed. Otherwise, the current time will be used.\n\n        `modified_at` will be changed to the current time\n        if the field is being modified.\n\n        `metadata` will be updated if provided. If it wasn't\n        provided the field from the existing key will be used and the\n        same goes for the `uid` which will be generated if it didn't\n        previously exist.\n\n        `lock` will lock the key to prevent it from being modified or deleted\n\n        `add` allows to add values to an existing key instead of overwriting.\n\n        Returns the id of the key in the database", "id": "f575:c0:m4"}
{"signature": "def put(self, key):", "body": "self.client.write(self._key_path(key['<STR_LIT:name>']), **key)<EOL>return self._key_path(key['<STR_LIT:name>'])<EOL>", "docstring": "Put and return the only unique identifier possible, its path", "id": "f575:c4:m3"}
{"signature": "def put(self, key):", "body": "return self.db.insert(key)<EOL>", "docstring": "Insert the key and return its database id", "id": "f575:c1:m3"}
{"signature": "def delete(self, key_name):", "body": "self.client.delete_object(<EOL>Bucket=self.db_path,<EOL>Key=key_name)<EOL>return self.get(key_name) == {}<EOL>", "docstring": "Delete the key.\n        :return: True if it was deleted, False otherwise", "id": "f575:c6:m5"}
{"signature": "@main.command(name='<STR_LIT>', short_help='<STR_LIT>')<EOL>@click.argument('<STR_LIT>')<EOL>@click.argument('<STR_LIT>', nargs=-<NUM_LIT:1>, required=True)<EOL>@click.option('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>multiple=True,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>@click.option('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>is_flag=True,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>is_flag=True,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>',<EOL>is_flag=True,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>@click.option('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>type=click.Choice(['<STR_LIT>', '<STR_LIT>']),<EOL>default='<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>@stash_option<EOL>@passphrase_option<EOL>@backend_option<EOL>def put_key(key_name,<EOL>value,<EOL>description,<EOL>meta,<EOL>modify,<EOL>add,<EOL>lock,<EOL>key_type,<EOL>stash,<EOL>passphrase,<EOL>backend):", "body": "stash = _get_stash(backend, stash, passphrase)<EOL>try:<EOL><INDENT>click.echo('<STR_LIT>'.format(key_type))<EOL>stash.put(<EOL>name=key_name,<EOL>value=_build_dict_from_key_value(value),<EOL>modify=modify,<EOL>metadata=_build_dict_from_key_value(meta),<EOL>description=description,<EOL>lock=lock,<EOL>key_type=key_type,<EOL>add=add)<EOL>click.echo('<STR_LIT>')<EOL><DEDENT>except GhostError as ex:<EOL><INDENT>sys.exit(ex)<EOL><DEDENT>", "docstring": "Insert a key to the stash\n\n    `KEY_NAME` is the name of the key to insert\n\n    `VALUE` is a key=value argument which can be provided multiple times.\n    it is the encrypted value of your key", "id": "f575:m14"}
{"signature": "def load(self, origin_passphrase, keys=None, key_file=None):", "body": "<EOL>self._assert_valid_stash()<EOL>if not (bool(keys) ^ bool(key_file)):<EOL><INDENT>raise GhostError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if key_file:<EOL><INDENT>with open(key_file) as stash_file:<EOL><INDENT>keys = json.loads(stash_file.read())<EOL><DEDENT><DEDENT>decrypt = origin_passphrase != self.passphrase<EOL>if decrypt:<EOL><INDENT>stub = Stash(TinyDBStorage('<STR_LIT>'), origin_passphrase)<EOL><DEDENT>for key in keys:<EOL><INDENT>self.put(<EOL>name=key['<STR_LIT:name>'],<EOL>value=stub._decrypt(key['<STR_LIT:value>']) if decrypt else key['<STR_LIT:value>'],<EOL>metadata=key['<STR_LIT>'],<EOL>description=key['<STR_LIT:description>'],<EOL>lock=key.get('<STR_LIT>'),<EOL>key_type=key.get('<STR_LIT:type>'),<EOL>encrypt=decrypt)<EOL><DEDENT>", "docstring": "Import keys to the stash from either a list of keys or a file\n\n        `keys` is a list of dictionaries created by `self.export`\n        `stash_path` is a path to a file created by `self.export`", "id": "f575:c0:m16"}
{"signature": "@click.group(context_settings=CLICK_CONTEXT_SETTINGS)<EOL>def main():", "body": "", "docstring": "Ghost generates a secret-store in which you can\n    keep your secrets encrypted. Ghost isn't real. It's just in your head.", "id": "f575:m12"}
{"signature": "def list(self):", "body": "<EOL>return self.db.search(Query().name.matches('<STR_LIT>'))<EOL>", "docstring": "Return a list of all keys (not just key names, but rather the keys\n        themselves).\n\n        e.g.\n         {u'created_at': u'2016-10-10 08:31:53',\n          u'description': None,\n          u'metadata': None,\n          u'modified_at': u'2016-10-10 08:31:53',\n          u'name': u'aws',\n          u'uid': u'459f12c0-f341-413e-9d7e-7410f912fb74',\n          u'value': u'the_value'},\n         {u'created_at': u'2016-10-10 08:32:29',\n          u'description': u'my gcp token',\n          u'metadata': {u'owner': u'nir'},\n          u'modified_at': u'2016-10-10 08:32:29',\n          u'name': u'gcp',\n          u'uid': u'a51a0043-f241-4d52-93c1-266a3c5de15e',\n          u'value': u'the_value'}]", "id": "f575:c1:m5"}
{"signature": "def _decode(self, data):", "body": "return json.loads(base64.b64decode(data['<STR_LIT>']).decode('<STR_LIT:utf-8>'))<EOL>", "docstring": "Decode one key as returned by consul.\n\n        The format of the data returned is [{'Value': base-64-encoded-json,\n        'Key': keyname}]. We need to decode and return just the values.", "id": "f575:c3:m7"}
{"signature": "def get(self, key_name, decrypt=True):", "body": "self._assert_valid_stash()<EOL>key = self._storage.get(key_name).copy()<EOL>if not key.get('<STR_LIT:value>'):<EOL><INDENT>return None<EOL><DEDENT>if decrypt:<EOL><INDENT>key['<STR_LIT:value>'] = self._decrypt(key['<STR_LIT:value>'])<EOL><DEDENT>audit(<EOL>storage=self._storage.db_path,<EOL>action='<STR_LIT:GET>',<EOL>message=json.dumps(dict(key_name=key_name)))<EOL>return key<EOL>", "docstring": "Return a key with its parameters if it was found.", "id": "f575:c0:m7"}
{"signature": "@main.command(name='<STR_LIT>', short_help='<STR_LIT>')<EOL>@click.argument('<STR_LIT>')<EOL>@click.option('<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>@stash_option<EOL>@passphrase_option<EOL>@backend_option<EOL>def load_keys(key_file, origin_passphrase, stash, passphrase, backend):", "body": "stash = _get_stash(backend, stash, passphrase)<EOL>click.echo('<STR_LIT>'.format(key_file))<EOL>stash.load(origin_passphrase, key_file=key_file)<EOL>click.echo('<STR_LIT>')<EOL>", "docstring": "Load all keys from an exported key file to the stash\n\n    `KEY_FILE` is the exported stash file to load keys from", "id": "f575:m22"}
{"signature": "def lock(self, key_name):", "body": "self._change_lock_state(key_name, lock=True)<EOL>", "docstring": "Lock a key to prevent it from being deleted, purged and modified", "id": "f575:c0:m11"}
{"signature": "@main.command(name='<STR_LIT>')<EOL>@click.option('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>default='<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>@stash_option<EOL>@passphrase_option<EOL>@backend_option<EOL>def export_keys(output_path, stash, passphrase, backend):", "body": "stash = _get_stash(backend, stash, passphrase)<EOL>try:<EOL><INDENT>click.echo('<STR_LIT>'.format(output_path))<EOL>stash.export(output_path=output_path)<EOL>click.echo('<STR_LIT>')<EOL><DEDENT>except GhostError as ex:<EOL><INDENT>sys.exit(ex)<EOL><DEDENT>", "docstring": "Export all keys to a file", "id": "f575:m21"}
{"signature": "def _build_ssh_command(conn_info, no_tunnel=False):", "body": "command = ['<STR_LIT>', '<STR_LIT>', conn_info['<STR_LIT>'], conn_info['<STR_LIT>']]<EOL>if conn_info.get('<STR_LIT>') and not no_tunnel:<EOL><INDENT>command.insert(<NUM_LIT:1>, conn_info.get('<STR_LIT>'))<EOL>command.insert(<NUM_LIT:1>, '<STR_LIT>')<EOL>command.insert(<NUM_LIT:1>, '<STR_LIT>')<EOL><DEDENT>if conn_info.get('<STR_LIT>'):<EOL><INDENT>command.extend(_build_proxy_command(conn_info))<EOL><DEDENT>if conn_info.get('<STR_LIT>'):<EOL><INDENT>command.append(conn_info.get('<STR_LIT>'))<EOL><DEDENT>return command<EOL>", "docstring": "# TODO: Document clearly\nIndetityFile=\"~/.ssh/id_rsa\"\nProxyCommand=\"ssh -i ~/.ssh/id_rsa proxy_IP nc HOST_IP HOST_PORT\"", "id": "f575:m26"}
{"signature": "@main.command(name='<STR_LIT>', short_help='<STR_LIT>')<EOL>@click.argument('<STR_LIT>')<EOL>@stash_option<EOL>@passphrase_option<EOL>@backend_option<EOL>def lock_key(key_name,<EOL>stash,<EOL>passphrase,<EOL>backend):", "body": "stash = _get_stash(backend, stash, passphrase)<EOL>try:<EOL><INDENT>click.echo('<STR_LIT>')<EOL>stash.lock(key_name=key_name)<EOL>click.echo('<STR_LIT>')<EOL><DEDENT>except GhostError as ex:<EOL><INDENT>sys.exit(ex)<EOL><DEDENT>", "docstring": "Lock a key to prevent it from being deleted, purged or modified\n\n    `KEY_NAME` is the name of the key to lock", "id": "f575:m15"}
{"signature": "@property<EOL><INDENT>def is_initialized(self):<DEDENT>", "body": "return True<EOL>", "docstring": "...and therefore, this should always return true", "id": "f575:c3:m2"}
{"signature": "def init(self):", "body": "<EOL>self.es.indices.create(index=self.params['<STR_LIT:index>'], ignore=<NUM_LIT>)<EOL>", "docstring": "Create an Elasticsearch index if necessary", "id": "f575:c5:m1"}
{"signature": "def _key_path(self, key_name):", "body": "return '<STR_LIT>' + self._stash_name + '<STR_LIT:/>' + key_name<EOL>", "docstring": "Return a valid vault path\n\n        Note that we don't use os.path.join as the path is read by vault using\n        slashes even on Windows.", "id": "f575:c4:m7"}
{"signature": "def init(self):", "body": "", "docstring": "Consul creates directories on the fly, so no init is required.", "id": "f575:c3:m1"}
{"signature": "@main.command(name='<STR_LIT>',<EOL>short_help='<STR_LIT>')<EOL>@click.argument('<STR_LIT>', type=click.STRING)<EOL>@click.argument('<STR_LIT>', type=click.STRING)<EOL>@click.option('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>default=None,<EOL>type=click.STRING,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>type=click.Choice(STORAGE_MAPPING.keys()),<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>default=None,<EOL>type=click.STRING,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>type=click.Choice(STORAGE_MAPPING.keys()),<EOL>help='<STR_LIT>')<EOL>def migrate_stash(source_stash_path,<EOL>source_passphrase,<EOL>source_backend,<EOL>destination_stash_path,<EOL>destination_passphrase,<EOL>destination_backend):", "body": "click.echo('<STR_LIT>'.format(<EOL>source_stash_path, destination_stash_path))<EOL>try:<EOL><INDENT>migrate(<EOL>src_path=source_stash_path,<EOL>src_passphrase=source_passphrase,<EOL>src_backend=source_backend,<EOL>dst_path=destination_stash_path,<EOL>dst_passphrase=destination_passphrase,<EOL>dst_backend=destination_backend)<EOL><DEDENT>except GhostError as ex:<EOL><INDENT>sys.exit(ex)<EOL><DEDENT>click.echo('<STR_LIT>')<EOL>", "docstring": "Migrate all keys from a source stash to a destination stash.\n\n    `SOURCE_STASH_PATH` and `DESTINATION_STASH_PATH` are the paths\n    to the stashs you wish to perform the migration on.", "id": "f575:m23"}
{"signature": "def generate_passphrase(size=<NUM_LIT:12>):", "body": "chars = string.ascii_lowercase + string.ascii_uppercase + string.digits<EOL>return str('<STR_LIT>'.join(random.choice(chars) for _ in range(size)))<EOL>", "docstring": "Return a generate string `size` long based on lowercase, uppercase,\n    and digit chars", "id": "f575:m5"}
{"signature": "def init(self):", "body": "try:<EOL><INDENT>self.client.create_bucket(<EOL>Bucket=self.db_path,<EOL>CreateBucketConfiguration=self.bucket_configuration)<EOL><DEDENT>except botocore.exceptions.ClientError as e:<EOL><INDENT>if '<STR_LIT>' not in str(<EOL>e.response['<STR_LIT>']['<STR_LIT>']):<EOL><INDENT>raise e<EOL><DEDENT><DEDENT>", "docstring": "Create a bucket.", "id": "f575:c6:m1"}
{"signature": "def search(self, body, filter_path, **kwargs):", "body": "if '<STR_LIT>' in body['<STR_LIT>']:<EOL><INDENT>items = list(self.store.items())<EOL>for name, key in items:<EOL><INDENT>return self.store[name]<EOL><DEDENT>else:<EOL><INDENT>return {'<STR_LIT>': {'<STR_LIT>': []}}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return self.store.get(body['<STR_LIT>']['<STR_LIT>']['<STR_LIT:name>'])<EOL><DEDENT>", "docstring": "{\n    u'hits': {\n        u'hits': [\n            {\n                u'_id': u'AVewADAWUnUKEMeMQ4QB',\n                u'_source': {\n                    u'description': None,\n                    u'created_at':\n                    u'2016-10-10 22:09:44',\n                    u'modified_at':\n                    u'2016-10-10 22:09:44',\n                    u'value': u'the_value',\n                    u'name': u'aws',\n                    u'uid': u'7a1caa7d-14d4-4045-842c-66adf22190b5',\n                    u'metadata': None\n                }\n            },\n        ]\n    }\n}", "id": "f576:c8:m1"}
{"signature": "def send_message(self, fakeid, content):", "body": "url = '<STR_LIT>'<EOL>payload = {<EOL>'<STR_LIT>': fakeid,<EOL>'<STR_LIT:type>': <NUM_LIT:1>,<EOL>'<STR_LIT>': self.__token,<EOL>'<STR_LIT:content>': content,<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'.format(<EOL>fakeid=fakeid,<EOL>token=self.__token,<EOL>),<EOL>'<STR_LIT>': self.__cookies,<EOL>}<EOL>r = requests.post(url, data=payload, headers=headers)<EOL>try:<EOL><INDENT>message = json.loads(r.text)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>try:<EOL><INDENT>if message['<STR_LIT>']['<STR_LIT>'] == -<NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if message['<STR_LIT>']['<STR_LIT>'] != <NUM_LIT:0>:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>", "docstring": "\u4e3b\u52a8\u53d1\u9001\u6587\u672c\u6d88\u606f\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid )\n:param content: \u53d1\u9001\u7684\u5185\u5bb9\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u5177\u4f53\u5185\u5bb9\u6709 ``fake id not exist``", "id": "f584:c0:m5"}
{"signature": "def login(self, verify_code='<STR_LIT>'):", "body": "url = '<STR_LIT>'<EOL>payload = {<EOL>'<STR_LIT:username>': self.__username,<EOL>'<STR_LIT>': self.__password,<EOL>'<STR_LIT>': verify_code,<EOL>'<STR_LIT:f>': '<STR_LIT>',<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': self.__cookies,<EOL>}<EOL>r = requests.post(url, data=payload, headers=headers)<EOL>s = re.search(r'<STR_LIT>', r.text)<EOL>if not s:<EOL><INDENT>try:<EOL><INDENT>error_code = json.loads(r.text)['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>except (KeyError, ValueError):<EOL><INDENT>raise LoginError(r.text)<EOL><DEDENT>if error_code in [-<NUM_LIT:8>, -<NUM_LIT>]:<EOL><INDENT>raise LoginVerifyCodeError(r.text)<EOL><DEDENT>elif re.search(r'<STR_LIT>', r.text):<EOL><INDENT>raise LoginError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise LoginError(r.text)<EOL><DEDENT><DEDENT>self.__token = int(s.group(<NUM_LIT:1>))<EOL>self.__cookies = '<STR_LIT>'<EOL>for cookie in r.cookies:<EOL><INDENT>self.__cookies += cookie.name + '<STR_LIT:=>' + cookie.value + '<STR_LIT:;>'<EOL><DEDENT>", "docstring": "\u767b\u5f55\u5fae\u4fe1\u516c\u4f17\u5e73\u53f0\n\u6ce8\u610f\u5728\u5b9e\u4f8b\u5316 ``WechatExt`` \u7684\u65f6\u5019\uff0c\u5982\u679c\u6ca1\u6709\u4f20\u5165 ``token`` \u53ca ``cookies`` \uff0c\u5c06\u4f1a\u81ea\u52a8\u8c03\u7528\u8be5\u65b9\u6cd5\uff0c\u65e0\u9700\u624b\u52a8\u8c03\u7528\n\u5f53\u4e14\u4ec5\u5f53\u6355\u83b7\u5230 ``NeedLoginError`` \u5f02\u5e38\u65f6\u624d\u9700\u8981\u8c03\u7528\u6b64\u65b9\u6cd5\u8fdb\u884c\u767b\u5f55\u91cd\u8bd5\n:param verify_code: \u9a8c\u8bc1\u7801, \u4e0d\u4f20\u5165\u5219\u4e3a\u65e0\u9a8c\u8bc1\u7801\n:raises LoginVerifyCodeError: \u9700\u8981\u9a8c\u8bc1\u7801\u6216\u9a8c\u8bc1\u7801\u51fa\u9519\uff0c\u8be5\u5f02\u5e38\u4e3a ``LoginError`` \u7684\u5b50\u7c7b\n:raises LoginError: \u767b\u5f55\u51fa\u9519\u5f02\u5e38\uff0c\u5f02\u5e38\u5185\u5bb9\u4e3a\u5fae\u4fe1\u670d\u52a1\u5668\u54cd\u5e94\u7684\u5185\u5bb9\uff0c\u53ef\u4f5c\u4e3a\u65e5\u5fd7\u8bb0\u5f55\u4e0b\u6765", "id": "f584:c0:m1"}
{"signature": "def _init_appid(self):", "body": "if not self.__appid:<EOL><INDENT>self._init_plugin_token_appid()<EOL><DEDENT>", "docstring": "\u521d\u59cb\u5316\u516c\u4f17\u53f7\u81ea\u8eab\u7684 ``appid`` \u503c\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m30"}
{"signature": "def add_news(self, news):", "body": "if not news:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>for item in news:<EOL><INDENT>if '<STR_LIT:title>' not in item or '<STR_LIT:content>' not in item:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>url = '<STR_LIT>'.format(<EOL>token=self.__token,<EOL>)<EOL>payload = {<EOL>'<STR_LIT>': self.__token,<EOL>'<STR_LIT:type>': <NUM_LIT:10>,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:error>': '<STR_LIT:false>',<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>'.format(<EOL>token=self.__token<EOL>),<EOL>'<STR_LIT>': self.__cookies,<EOL>}<EOL>i = <NUM_LIT:0><EOL>for item in news:<EOL><INDENT>payload['<STR_LIT:title>'+str(i)] = item.get('<STR_LIT:title>')<EOL>payload['<STR_LIT>'+str(i)] = item.get('<STR_LIT>')<EOL>payload['<STR_LIT>'+str(i)] = item.get('<STR_LIT>')<EOL>payload['<STR_LIT:content>'+str(i)] = item.get('<STR_LIT:content>')<EOL>payload['<STR_LIT>'+str(i)] = item.get('<STR_LIT>')<EOL>payload['<STR_LIT>'+str(i)] = item.get('<STR_LIT>')<EOL>i += <NUM_LIT:1><EOL><DEDENT>payload['<STR_LIT:count>'] = i<EOL>r = requests.post(url, data=payload, headers=headers)<EOL>try:<EOL><INDENT>message = json.loads(r.text)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>try:<EOL><INDENT>if message['<STR_LIT>'] != '<STR_LIT:0>':<EOL><INDENT>raise ValueError(r.text)<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>", "docstring": "\u5728\u7d20\u6750\u5e93\u4e2d\u521b\u5efa\u56fe\u6587\u6d88\u606f\n\n:param news: list \u5bf9\u8c61, \u5176\u4e2d\u7684\u6bcf\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a dict \u5bf9\u8c61, \u4ee3\u8868\u4e00\u6761\u56fe\u6587, key \u503c\u5206\u522b\u4e3a ``title``, ``author``, ``summary``,\n             ``content``, ``picture_id``, ``from_url``, \u5bf9\u5e94\u5185\u5bb9\u4e3a\u6807\u9898, \u4f5c\u8005, \u6458\u8981, \u5185\u5bb9, \u7d20\u6750\u5e93\u91cc\u7684\n             \u56fe\u7247ID(\u53ef\u901a\u8fc7 ``upload_file`` \u51fd\u6570\u4e0a\u4f20\u83b7\u53d6), \u6765\u6e90\u94fe\u63a5\u3002\n\n             \u5176\u4e2d\u5fc5\u987b\u63d0\u4f9b\u7684 key \u503c\u4e3a ``title`` \u548c ``content``\n\n             \u793a\u4f8b::\n\n                 [\n                     {\n                         'title': '\u56fe\u6587\u6807\u9898',\n                         'author': '\u56fe\u6587\u4f5c\u8005',\n                         'summary': '\u56fe\u6587\u6458\u8981',\n                         'content': '\u56fe\u6587\u5185\u5bb9',\n                         'picture_id': '23412341',\n                         'from_url': 'http://www.baidu.com',\n                     },\n                     {\n                         'title': '\u6700\u5c11\u56fe\u6587\u6807\u9898',\n                         'content': '\u56fe\u6587\u5185\u5bb9',\n                     }\n                 ]\n:raises ValueError: \u53c2\u6570\u63d0\u4f9b\u9519\u8bef\u65f6\u629b\u51fa\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m12"}
{"signature": "def send_audio(self, fakeid, fid):", "body": "return self.send_file(fakeid, fid, <NUM_LIT:3>)<EOL>", "docstring": "\u7ed9\u6307\u5b9a\u7528\u6237 fakeid \u53d1\u9001\u8bed\u97f3\u4fe1\u606f\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:param fid: \u6587\u4ef6 ID\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef (\u5e38\u89c1\u9519\u8bef\u5185\u5bb9: ``system error`` \u6216 ``can not send this type of msg``: \u6587\u4ef6\u7c7b\u578b\u4e0d\u5339\u914d, ``user not exist``: \u7528\u6237 fakeid \u4e0d\u5b58\u5728, ``file not exist``: \u6587\u4ef6 fid \u4e0d\u5b58\u5728, \u8fd8\u6709\u5176\u4ed6\u9519\u8bef\u8bf7\u81ea\u884c\u68c0\u67e5)", "id": "f584:c0:m17"}
{"signature": "def send_news(self, fakeid, msgid):", "body": "url = '<STR_LIT>'<EOL>payload = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:f>': '<STR_LIT>',<EOL>'<STR_LIT>': fakeid,<EOL>'<STR_LIT:type>': <NUM_LIT:10>,<EOL>'<STR_LIT>': self.__token,<EOL>'<STR_LIT>': msgid,<EOL>'<STR_LIT>': msgid,<EOL>'<STR_LIT:error>': '<STR_LIT:false>',<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT>': random.random(),<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'.format(<EOL>fakeid=fakeid,<EOL>),<EOL>'<STR_LIT>': self.__cookies,<EOL>}<EOL>r = requests.post(url, data=payload, headers=headers)<EOL>try:<EOL><INDENT>message = json.loads(r.text)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>try:<EOL><INDENT>if message['<STR_LIT>']['<STR_LIT>'] == <NUM_LIT> or message['<STR_LIT>']['<STR_LIT>'] == -<NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if message['<STR_LIT>']['<STR_LIT>'] == <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if message['<STR_LIT>']['<STR_LIT>'] != <NUM_LIT:0>:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>", "docstring": "\u5411\u6307\u5b9a\u7528\u6237\u53d1\u9001\u56fe\u6587\u6d88\u606f \uff08\u5fc5\u987b\u4ece\u56fe\u6587\u5e93\u91cc\u9009\u53d6\u6d88\u606fID\u4f20\u5165)\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:param msgid: \u56fe\u6587\u6d88\u606f ID\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u5177\u4f53\u5185\u5bb9\u6709 ``fake id not exist`` \u53ca ``message id not exist``", "id": "f584:c0:m11"}
{"signature": "def send_image(self, fakeid, fid):", "body": "return self.send_file(fakeid, fid, <NUM_LIT:2>)<EOL>", "docstring": "\u7ed9\u6307\u5b9a\u7528\u6237 fakeid \u53d1\u9001\u56fe\u7247\u4fe1\u606f\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:param fid: \u6587\u4ef6 ID\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e\n:raises ValueError: \u53c2\u6570\u51fa\u9519, \u9519\u8bef\u539f\u56e0\u76f4\u63a5\u6253\u5370\u5f02\u5e38\u5373\u53ef (\u5e38\u89c1\u9519\u8bef\u5185\u5bb9: ``system error`` \u6216 ``can not send this type of msg``: \u6587\u4ef6\u7c7b\u578b\u4e0d\u5339\u914d, ``user not exist``: \u7528\u6237 fakeid \u4e0d\u5b58\u5728, ``file not exist``: \u6587\u4ef6 fid \u4e0d\u5b58\u5728, \u8fd8\u6709\u5176\u4ed6\u9519\u8bef\u8bf7\u81ea\u884c\u68c0\u67e5)", "id": "f584:c0:m16"}
{"signature": "def get_file_list(self, type, page, count=<NUM_LIT:10>):", "body": "url = '<STR_LIT>'.format(<EOL>token=self.__token,<EOL>type=type,<EOL>random=round(random.random(), <NUM_LIT:3>),<EOL>begin=page*count,<EOL>count=count,<EOL>)<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'.format(<EOL>token=self.__token,<EOL>),<EOL>'<STR_LIT>': self.__cookies,<EOL>}<EOL>r = requests.get(url, headers=headers)<EOL>try:<EOL><INDENT>message = json.dumps(json.loads(r.text)['<STR_LIT>'], ensure_ascii=False)<EOL><DEDENT>except (KeyError, ValueError):<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>return message<EOL>", "docstring": "\u83b7\u53d6\u7d20\u6750\u5e93\u6587\u4ef6\u5217\u8868\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n\n    {\n        \"type\": 2,\n        \"file_item\": [\n            {\n                \"update_time\": 1408723089,\n                \"name\": \"Doraemonext.png\",\n                \"play_length\": 0,\n                \"file_id\": 206471048,\n                \"type\": 2,\n                \"size\": \"53.7 K\"\n            },\n            {\n                \"update_time\": 1408722328,\n                \"name\": \"Doraemonext.png\",\n                \"play_length\": 0,\n                \"file_id\": 206470809,\n                \"type\": 2,\n                \"size\": \"53.7 K\"\n            }\n        ],\n        \"file_cnt\": {\n            \"voice_cnt\": 1,\n            \"app_msg_cnt\": 10,\n            \"commondity_msg_cnt\": 0,\n            \"video_cnt\": 0,\n            \"img_cnt\": 29,\n            \"video_msg_cnt\": 0,\n            \"total\": 40\n        }\n    }\n\n:param type: \u6587\u4ef6\u7c7b\u578b (2: \u56fe\u7247, 3: \u97f3\u9891, 4: \u89c6\u9891)\n:param page: \u9875\u7801 (\u4ece 0 \u5f00\u59cb)\n:param count: \u6bcf\u9875\u5927\u5c0f\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m15"}
{"signature": "def stat_article_detail_list(self, page=<NUM_LIT:1>, start_date=str(date.today()+timedelta(days=-<NUM_LIT:30>)), end_date=str(date.today())):", "body": "self._init_plugin_token_appid()<EOL>url = '<STR_LIT>'.format(<EOL>page=page,<EOL>appid=self.__appid,<EOL>token=self.__plugin_token,<EOL>rnd=int(time.time()),<EOL>start_date=start_date,<EOL>end_date=end_date,<EOL>)<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'.format(<EOL>page=page,<EOL>appid=self.__appid,<EOL>token=self.__plugin_token,<EOL>rnd=int(time.time()),<EOL>start_date=start_date,<EOL>end_date=end_date,<EOL>),<EOL>'<STR_LIT>': self.__cookies,<EOL>}<EOL>r = requests.get(url, headers=headers)<EOL>if not re.search(r'<STR_LIT>', self.__cookies):<EOL><INDENT>for cookie in r.cookies:<EOL><INDENT>self.__cookies += cookie.name + '<STR_LIT:=>' + cookie.value + '<STR_LIT:;>'<EOL><DEDENT><DEDENT>try:<EOL><INDENT>data = json.loads(r.text)<EOL>if data.get('<STR_LIT>'):<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>message = json.dumps(data, ensure_ascii=False)<EOL><DEDENT>except (KeyError, ValueError):<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>return message<EOL>", "docstring": "\u83b7\u53d6\u56fe\u6587\u5206\u6790\u6570\u636e\n\n\u8fd4\u56deJSON\u793a\u4f8b ::\n\n    {\n        \"hasMore\": true,  // \u8bf4\u660e\u662f\u5426\u53ef\u4ee5\u589e\u52a0 page \u9875\u7801\u6765\u83b7\u53d6\u6570\u636e\n        \"data\": [\n            {\n                \"index\": [\n                    \"20,816\",  // \u9001\u8fbe\u4eba\u6570\n                    \"1,944\",  // \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570\n                    \"2,554\",  // \u56fe\u6587\u9875\u9605\u8bfb\u6b21\u6570\n                    \"9.34%\",  // (\u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u9001\u8fbe\u4eba\u6570)\n                    \"0\",  // \u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570\n                    \"0\",  // \u539f\u6587\u9875\u9605\u8bfb\u6b21\u6570\n                    \"0%\",  // \uff08\u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570)\n                    \"47\",  // \u5206\u4eab\u8f6c\u53d1\u4eba\u6570\n                    \"61\",  // \u5206\u4eab\u8f6c\u53d1\u6b21\u6570\n                    \"1\"  // \u5fae\u4fe1\u6536\u85cf\u4eba\u6570\n                ],\n                \"time\": \"2015-01-21\",\n                \"table_data\": \"{\\\"fields\\\":{\\\"TargetUser\\\":{\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"number\\\":false,\\\"colAlign\\\":\\\"center\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"PageConversion\\\":{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"OriPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"OriPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"Conversion\\\":{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"ShareUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"ShareCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"AddToFavUser\\\":{\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0}},\\\"data\\\":[{\\\"MsgId\\\":\\\"205104027_1\\\",\\\"Title\\\":\\\"\\\\u56de\\\\u5bb6\\\\u5927\\\\u4f5c\\\\u6218 | \\\\u5feb\\\\u6765\\\\u5e26\\\\u6211\\\\u56de\\\\u5bb6\\\",\\\"RefDate\\\":\\\"20150121\\\",\\\"TargetUser\\\":\\\"20,816\\\",\\\"IntPageReadUser\\\":\\\"1,944\\\",\\\"IntPageReadCount\\\":\\\"2,554\\\",\\\"OriPageReadUser\\\":\\\"0\\\",\\\"OriPageReadCount\\\":\\\"0\\\",\\\"ShareUser\\\":\\\"47\\\",\\\"ShareCount\\\":\\\"61\\\",\\\"AddToFavUser\\\":\\\"1\\\",\\\"Conversion\\\":\\\"0%\\\",\\\"PageConversion\\\":\\\"9.34%\\\"}],\\\"fixedRow\\\":false,\\\"cssSetting\\\":{\\\"\\\":\\\"\\\"},\\\"complexHeader\\\":[[{\\\"field\\\":\\\"TargetUser\\\",\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"colSpan\\\":1},{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u5206\\\\u4eab\\\\u8f6c\\\\u53d1\\\",\\\"colSpan\\\":2},{\\\"field\\\":\\\"AddToFavUser\\\",\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"enable\\\":true}],[{\\\"field\\\":\\\"IntPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"IntPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"PageConversion\\\",\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"OriPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"OriPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"Conversion\\\",\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"ShareUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"ShareCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"}]]}\",\n                \"id\": \"205104027_1\",\n                \"title\": \"\u56de\u5bb6\u5927\u4f5c\u6218 | \u5feb\u6765\u5e26\u6211\u56de\u5bb6\"\n            },\n            {\n                \"index\": [\n                    \"20,786\",  // \u9001\u8fbe\u4eba\u6570\n                    \"2,598\",  // \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570\n                    \"3,368\",  // \u56fe\u6587\u9875\u9605\u8bfb\u6b21\u6570\n                    \"12.5%\",  // (\u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u9001\u8fbe\u4eba\u6570)\n                    \"0\",  // \u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570\n                    \"0\",  // \u539f\u6587\u9875\u9605\u8bfb\u6b21\u6570\n                    \"0%\",  // \uff08\u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570)\n                    \"73\",  // \u5206\u4eab\u8f6c\u53d1\u4eba\u6570\n                    \"98\",  // \u5206\u4eab\u8f6c\u53d1\u6b21\u6570\n                    \"1\"  // \u5fae\u4fe1\u6536\u85cf\u4eba\u6570\n                ],\n                \"time\": \"2015-01-20\",\n                \"table_data\": \"{\\\"fields\\\":{\\\"TargetUser\\\":{\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"number\\\":false,\\\"colAlign\\\":\\\"center\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"PageConversion\\\":{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"OriPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"OriPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"Conversion\\\":{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"ShareUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"ShareCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"AddToFavUser\\\":{\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0}},\\\"data\\\":[{\\\"MsgId\\\":\\\"205066833_1\\\",\\\"Title\\\":\\\"\\\\u56de\\\\u5bb6\\\\u5927\\\\u4f5c\\\\u6218 | \\\\u5982\\\\u4f55\\\\u4f18\\\\u96c5\\\\u5730\\\\u53bb\\\\u5f80\\\\u8f66\\\\u7ad9\\\\u548c\\\\u673a\\\\u573a\\\",\\\"RefDate\\\":\\\"20150120\\\",\\\"TargetUser\\\":\\\"20,786\\\",\\\"IntPageReadUser\\\":\\\"2,598\\\",\\\"IntPageReadCount\\\":\\\"3,368\\\",\\\"OriPageReadUser\\\":\\\"0\\\",\\\"OriPageReadCount\\\":\\\"0\\\",\\\"ShareUser\\\":\\\"73\\\",\\\"ShareCount\\\":\\\"98\\\",\\\"AddToFavUser\\\":\\\"1\\\",\\\"Conversion\\\":\\\"0%\\\",\\\"PageConversion\\\":\\\"12.5%\\\"}],\\\"fixedRow\\\":false,\\\"cssSetting\\\":{\\\"\\\":\\\"\\\"},\\\"complexHeader\\\":[[{\\\"field\\\":\\\"TargetUser\\\",\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"colSpan\\\":1},{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u5206\\\\u4eab\\\\u8f6c\\\\u53d1\\\",\\\"colSpan\\\":2},{\\\"field\\\":\\\"AddToFavUser\\\",\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"enable\\\":true}],[{\\\"field\\\":\\\"IntPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"IntPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"PageConversion\\\",\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"OriPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"OriPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"Conversion\\\",\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"ShareUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"ShareCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"}]]}\",\n                \"id\": \"205066833_1\",\n                \"title\": \"\u56de\u5bb6\u5927\u4f5c\u6218 | \u5982\u4f55\u4f18\u96c5\u5730\u53bb\u5f80\u8f66\u7ad9\u548c\u673a\u573a\"\n            },\n            {\n                \"index\": [\n                    \"20,745\",  // \u9001\u8fbe\u4eba\u6570\n                    \"1,355\",  // \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570\n                    \"1,839\",  // \u56fe\u6587\u9875\u9605\u8bfb\u6b21\u6570\n                    \"6.53%\",  // (\u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u9001\u8fbe\u4eba\u6570)\n                    \"145\",  // \u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570\n                    \"184\",  // \u539f\u6587\u9875\u9605\u8bfb\u6b21\u6570\n                    \"10.7%\",  // \uff08\u539f\u6587\u9875\u9605\u8bfb\u4eba\u6570 / \u56fe\u6587\u9875\u9605\u8bfb\u4eba\u6570)\n                    \"48\",  // \u5206\u4eab\u8f6c\u53d1\u4eba\u6570\n                    \"64\",  // \u5206\u4eab\u8f6c\u53d1\u6b21\u6570\n                    \"5\"  // \u5fae\u4fe1\u6536\u85cf\u4eba\u6570\n                ],\n                \"time\": \"2015-01-19\",\n                \"table_data\": \"{\\\"fields\\\":{\\\"TargetUser\\\":{\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"number\\\":false,\\\"colAlign\\\":\\\"center\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"IntPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"PageConversion\\\":{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"OriPageReadUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"OriPageReadCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"Conversion\\\":{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":\\\"2\\\"},\\\"ShareUser\\\":{\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"ShareCount\\\":{\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0},\\\"AddToFavUser\\\":{\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"number\\\":true,\\\"colAlign\\\":\\\"right\\\",\\\"needOrder\\\":false,\\\"precision\\\":0}},\\\"data\\\":[{\\\"MsgId\\\":\\\"205028693_1\\\",\\\"Title\\\":\\\"\\\\u5145\\\\u7535\\\\u65f6\\\\u95f4 | \\\\u542c\\\\u542c\\\\u7535\\\\u53f0\\\\uff0c\\\\u4f18\\\\u96c5\\\\u5730\\\\u63d0\\\\u5347\\\\u5b66\\\\u4e60\\\\u6548\\\\u7387\\\",\\\"RefDate\\\":\\\"20150119\\\",\\\"TargetUser\\\":\\\"20,745\\\",\\\"IntPageReadUser\\\":\\\"1,355\\\",\\\"IntPageReadCount\\\":\\\"1,839\\\",\\\"OriPageReadUser\\\":\\\"145\\\",\\\"OriPageReadCount\\\":\\\"184\\\",\\\"ShareUser\\\":\\\"48\\\",\\\"ShareCount\\\":\\\"64\\\",\\\"AddToFavUser\\\":\\\"5\\\",\\\"Conversion\\\":\\\"10.7%\\\",\\\"PageConversion\\\":\\\"6.53%\\\"}],\\\"fixedRow\\\":false,\\\"cssSetting\\\":{\\\"\\\":\\\"\\\"},\\\"complexHeader\\\":[[{\\\"field\\\":\\\"TargetUser\\\",\\\"thText\\\":\\\"\\\\u9001\\\\u8fbe\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"colSpan\\\":1},{\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u9875\\\\u9605\\\\u8bfb\\\",\\\"colSpan\\\":3},{\\\"thText\\\":\\\"\\\\u5206\\\\u4eab\\\\u8f6c\\\\u53d1\\\",\\\"colSpan\\\":2},{\\\"field\\\":\\\"AddToFavUser\\\",\\\"thText\\\":\\\"\\\\u5fae\\\\u4fe1\\\\u6536\\\\u85cf\\\\u4eba\\\\u6570\\\",\\\"rowSpan\\\":2,\\\"enable\\\":true}],[{\\\"field\\\":\\\"IntPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"IntPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"PageConversion\\\",\\\"thText\\\":\\\"\\\\u56fe\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"OriPageReadUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"OriPageReadCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"},{\\\"field\\\":\\\"Conversion\\\",\\\"thText\\\":\\\"\\\\u539f\\\\u6587\\\\u8f6c\\\\u5316\\\\u7387\\\"},{\\\"field\\\":\\\"ShareUser\\\",\\\"thText\\\":\\\"\\\\u4eba\\\\u6570\\\"},{\\\"field\\\":\\\"ShareCount\\\",\\\"thText\\\":\\\"\\\\u6b21\\\\u6570\\\"}]]}\",\n                \"id\": \"205028693_1\",\n                \"title\": \"\u5145\u7535\u65f6\u95f4 | \u542c\u542c\u7535\u53f0\uff0c\u4f18\u96c5\u5730\u63d0\u5347\u5b66\u4e60\u6548\u7387\"\n            }\n        ]\n    }\n\n:param page: \u9875\u7801 (\u7531\u4e8e\u817e\u8baf\u63a5\u53e3\u9650\u5236\uff0cpage \u4ece 1 \u5f00\u59cb\uff0c3 \u6761\u6570\u636e\u4e3a 1 \u9875)\n:param start_date: \u5f00\u59cb\u65f6\u95f4\uff0c\u9ed8\u8ba4\u662f\u4eca\u5929-30\u5929 (\u7c7b\u578b: str \u683c\u5f0f\u793a\u4f8b: \"2015-01-15\")\n:param end_date: \u7ed3\u675f\u65f6\u95f4\uff0c\u9ed8\u8ba4\u662f\u4eca\u5929 (\u7c7b\u578b: str \u683c\u5f0f\u793a\u4f8b: \"2015-02-01\")\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\uff0c\u5177\u4f53\u7684\u5404\u9879\u5185\u5bb9\u89e3\u91ca\u53c2\u89c1\u4e0a\u9762\u7684 JSON \u8fd4\u56de\u793a\u4f8b\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m7"}
{"signature": "def get_user_info(self, fakeid):", "body": "url = '<STR_LIT>'<EOL>payloads = {<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': round(random.random(), <NUM_LIT:3>),<EOL>'<STR_LIT>': self.__token,<EOL>'<STR_LIT:t>': '<STR_LIT>',<EOL>'<STR_LIT>': fakeid,<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'.format(<EOL>token=self.__token,<EOL>),<EOL>'<STR_LIT>': self.__cookies,<EOL>}<EOL>r = requests.post(url, data=payloads, headers=headers)<EOL>try:<EOL><INDENT>message = json.dumps(json.loads(r.text)['<STR_LIT>'], ensure_ascii=False)<EOL><DEDENT>except (KeyError, ValueError):<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>return message<EOL>", "docstring": "\u83b7\u53d6\u6307\u5b9a\u7528\u6237\u7684\u4e2a\u4eba\u4fe1\u606f\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n\n    {\n        \"province\": \"\u6e56\u5317\",\n        \"city\": \"\u6b66\u6c49\",\n        \"gender\": 1,\n        \"nick_name\": \"Doraemonext\",\n        \"country\": \"\u4e2d\u56fd\",\n        \"remark_name\": \"\",\n        \"fake_id\": 844735403,\n        \"signature\": \"\",\n        \"group_id\": 0,\n        \"user_name\": \"\"\n    }\n\n:param fakeid: \u7528\u6237\u7684 UID (\u5373 fakeid)\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m19"}
{"signature": "def _init_self_information(self):", "body": "url = '<STR_LIT>'.format(token=self.__token)<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': self.__cookies,<EOL>}<EOL>r = requests.get(url, headers=headers)<EOL>ticket_id = re.search(r'<STR_LIT>', r.text)<EOL>if not ticket_id:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>self.__ticket_id = ticket_id.group(<NUM_LIT:1>)<EOL>ticket = re.search(r'<STR_LIT>', r.text)<EOL>if not ticket:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>self.__ticket = ticket.group(<NUM_LIT:1>)<EOL>fakeid = re.search(r'<STR_LIT>', r.text)<EOL>if not fakeid:<EOL><INDENT>raise NeedLoginError(r.text)<EOL><DEDENT>self.__fakeid = fakeid.group(<NUM_LIT:1>)<EOL>", "docstring": "\u521d\u59cb\u5316\u516c\u4f17\u53f7\u81ea\u8eab\u7684\u5c5e\u6027\u503c (\u76ee\u524d\u5305\u62ec ``Ticket`` \u503c \u53ca \u516c\u4f17\u53f7\u81ea\u8eab\u7684 ``fakeid`` \u503c)\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m29"}
{"signature": "def get_top_message(self):", "body": "return self.get_message_list(count=<NUM_LIT:1>)<EOL>", "docstring": "\u83b7\u53d6\u6700\u65b0\u4e00\u6761\u6d88\u606f\n\n\u8fd4\u56deJSON\u793a\u4f8b::\n\n    {\n        \"msg_item\": [\n            {\n                \"id\": 206448489,\n                \"type\": 2,\n                \"fakeid\": \"844735403\",\n                \"nick_name\": \"Doraemonext\",\n                \"date_time\": 1408696938,\n                \"source\": \"\",\n                \"msg_status\": 4,\n                \"has_reply\": 0,\n                \"refuse_reason\": \"\",\n                \"multi_item\": [ ],\n                \"to_uin\": 2391068708,\n                \"send_stat\": {\n                    \"total\": 0,\n                    \"succ\": 0,\n                    \"fail\": 0\n                }\n            }\n        ]\n    }\n\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\n:raises NeedLoginError: \u64cd\u4f5c\u672a\u6267\u884c\u6210\u529f, \u9700\u8981\u518d\u6b21\u5c1d\u8bd5\u767b\u5f55, \u5f02\u5e38\u5185\u5bb9\u4e3a\u670d\u52a1\u5668\u8fd4\u56de\u7684\u9519\u8bef\u6570\u636e", "id": "f584:c0:m22"}
{"signature": "@classmethod<EOL><INDENT>def _transcoding_list(cls, data):<DEDENT>", "body": "if not isinstance(data, list):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>result = []<EOL>for item in data:<EOL><INDENT>if isinstance(item, dict):<EOL><INDENT>result.append(cls._transcoding_dict(item))<EOL><DEDENT>elif isinstance(item, list):<EOL><INDENT>result.append(cls._transcoding_list(item))<EOL><DEDENT>else:<EOL><INDENT>result.append(item)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "\u7f16\u7801\u8f6c\u6362 for list\n        :param data: \u9700\u8981\u8f6c\u6362\u7684 list \u6570\u636e\n        :return: \u8f6c\u6362\u597d\u7684 list", "id": "f585:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def _transcoding(cls, data):<DEDENT>", "body": "if not data:<EOL><INDENT>return data<EOL><DEDENT>result = None<EOL>if isinstance(data, str) and hasattr(data, '<STR_LIT>'):<EOL><INDENT>result = data.decode('<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>result = data<EOL><DEDENT>return result<EOL>", "docstring": "\u7f16\u7801\u8f6c\u6362\n        :param data: \u9700\u8981\u8f6c\u6362\u7684\u6570\u636e\n        :return: \u8f6c\u6362\u597d\u7684\u6570\u636e", "id": "f585:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def _transcoding_dict(cls, data):<DEDENT>", "body": "if not isinstance(data, dict):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>result = {}<EOL>for k, v in data.items():<EOL><INDENT>k = cls._transcoding(k)<EOL>if isinstance(v, dict):<EOL><INDENT>v = cls._transcoding_dict(v)<EOL><DEDENT>elif isinstance(v, list):<EOL><INDENT>v = cls._transcoding_list(v)<EOL><DEDENT>else:<EOL><INDENT>v = cls._transcoding(v)<EOL><DEDENT>result.update({k: v})<EOL><DEDENT>return result<EOL>", "docstring": "\u7f16\u7801\u8f6c\u6362 for dict\n:param data: \u9700\u8981\u8f6c\u6362\u7684 dict \u6570\u636e\n:return: \u8f6c\u6362\u597d\u7684 dict", "id": "f585:c0:m2"}
{"signature": "@token.setter<EOL><INDENT>def token(self, token):<DEDENT>", "body": "self.__token = token<EOL>self._update_crypto()<EOL>", "docstring": "\u8bbe\u7f6e\u5f53\u524d Token", "id": "f587:c0:m2"}
{"signature": "@property<EOL><INDENT>def appid(self):<DEDENT>", "body": "return self.__appid<EOL>", "docstring": "\u83b7\u53d6\u5f53\u524d App ID", "id": "f587:c0:m3"}
{"signature": "@property<EOL><INDENT>def paysignkey(self):<DEDENT>", "body": "return self.__paysignkey<EOL>", "docstring": "\u83b7\u53d6\u5546\u6237\u7b7e\u540d\u5bc6\u94a5 Key", "id": "f587:c0:m15"}
{"signature": "def _update_crypto(self):", "body": "if self.__encrypt_mode in ['<STR_LIT>', '<STR_LIT>'] and self.__encoding_aes_key is not None:<EOL><INDENT>if self.__token is None or self.__appid is None:<EOL><INDENT>raise NeedParamError('<STR_LIT>')<EOL><DEDENT>self.__crypto = BasicCrypto(self.__token, self.__encoding_aes_key, self.__appid)<EOL><DEDENT>else:<EOL><INDENT>self.__crypto = None<EOL><DEDENT>", "docstring": "\u6839\u636e\u5f53\u524d\u914d\u7f6e\u5185\u5bb9\u66f4\u65b0 Crypto \u7c7b", "id": "f587:c0:m22"}
{"signature": "def set_appid_appsecret(self, appid, appsecret):", "body": "self.__appid = appid<EOL>self.__appsecret = appsecret<EOL>self._update_crypto()<EOL>", "docstring": "\u8bbe\u7f6e\u5f53\u524d App ID \u53ca App Secret", "id": "f587:c0:m5"}
{"signature": "@encoding_aes_key.setter<EOL><INDENT>def encoding_aes_key(self, encoding_aes_key):<DEDENT>", "body": "self.__encoding_aes_key = encoding_aes_key<EOL>self._update_crypto()<EOL>", "docstring": "\u8bbe\u7f6e\u5f53\u524d EncodingAESKey", "id": "f587:c0:m7"}
{"signature": "@property<EOL><INDENT>def crypto(self):<DEDENT>", "body": "return self.__crypto<EOL>", "docstring": "\u83b7\u53d6\u5f53\u524d Crypto \u5b9e\u4f8b", "id": "f587:c0:m10"}
{"signature": "def grant_jsapi_ticket(self):", "body": "self._check_appid_appsecret()<EOL>if callable(self.__jsapi_ticket_refreshfunc):<EOL><INDENT>self.__jsapi_ticket, self.__jsapi_ticket_expires_at = self.__jsapi_ticket_refreshfunc()<EOL>return<EOL><DEDENT>response_json = self.__request.get(<EOL>url=\"<STR_LIT>\",<EOL>params={<EOL>\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>},<EOL>access_token=self.access_token,<EOL>)<EOL>self.__jsapi_ticket = response_json['<STR_LIT>']<EOL>self.__jsapi_ticket_expires_at = int(time.time()) + response_json['<STR_LIT>']<EOL>if callable(self.__jsapi_ticket_setfunc):<EOL><INDENT>self.__jsapi_ticket_setfunc(self.__jsapi_ticket, self.__jsapi_ticket_expires_at)<EOL><DEDENT>return response_json<EOL>", "docstring": "\u83b7\u53d6 jsapi ticket \u5e76\u66f4\u65b0\u5f53\u524d\u914d\u7f6e\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305 (\u4f20\u5165 jsapi_ticket_refreshfunc \u53c2\u6570\u540e\u8fd4\u56de None)", "id": "f587:c0:m17"}
{"signature": "def grant_access_token(self):", "body": "self._check_appid_appsecret()<EOL>if callable(self.__access_token_refreshfunc):<EOL><INDENT>self.__access_token, self.__access_token_expires_at = self.__access_token_refreshfunc()<EOL>return<EOL><DEDENT>response_json = self.__request.get(<EOL>url=\"<STR_LIT>\",<EOL>params={<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": self.__appid,<EOL>\"<STR_LIT>\": self.__appsecret,<EOL>},<EOL>access_token=self.__access_token<EOL>)<EOL>self.__access_token = response_json['<STR_LIT>']<EOL>self.__access_token_expires_at = int(time.time()) + response_json['<STR_LIT>']<EOL>if callable(self.__access_token_setfunc):<EOL><INDENT>self.__access_token_setfunc(self.__access_token, self.__access_token_expires_at)<EOL><DEDENT>return response_json<EOL>", "docstring": "\u83b7\u53d6 access token \u5e76\u66f4\u65b0\u5f53\u524d\u914d\u7f6e\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305 (\u4f20\u5165 access_token_refreshfunc \u53c2\u6570\u540e\u8fd4\u56de None)", "id": "f587:c0:m16"}
{"signature": "@property<EOL><INDENT>def jsapi_ticket(self):<DEDENT>", "body": "self._check_appid_appsecret()<EOL>if callable(self.__jsapi_ticket_getfunc):<EOL><INDENT>self.__jsapi_ticket, self.__jsapi_ticket_expires_at = self.__jsapi_ticket_getfunc()<EOL><DEDENT>if self.__jsapi_ticket:<EOL><INDENT>now = time.time()<EOL>if self.__jsapi_ticket_expires_at - now > <NUM_LIT>:<EOL><INDENT>return self.__jsapi_ticket<EOL><DEDENT><DEDENT>self.grant_jsapi_ticket()  <EOL>return self.__jsapi_ticket<EOL>", "docstring": "\u83b7\u53d6\u5f53\u524d jsapi ticket \u503c, \u672c\u65b9\u6cd5\u4f1a\u81ea\u884c\u7ef4\u62a4 jsapi ticket \u6709\u6548\u6027", "id": "f587:c0:m12"}
{"signature": "def __init__(self, token=None, appid=None, appsecret=None, partnerid=None,<EOL>partnerkey=None, paysignkey=None, access_token=None, access_token_expires_at=None,<EOL>jsapi_ticket=None, jsapi_ticket_expires_at=None, checkssl=False, conf=None):", "body": "if conf is not None:<EOL><INDENT>self.__conf = conf<EOL><DEDENT>elif isinstance(token, WechatConf):  <EOL><INDENT>self.__conf = token<EOL><DEDENT>else:  <EOL><INDENT>self.__conf = WechatConf(<EOL>token=token,<EOL>appid=appid,<EOL>appsecret=appsecret,<EOL>access_token=access_token,<EOL>access_token_expires_at=access_token_expires_at,<EOL>jsapi_ticket=jsapi_ticket,<EOL>jsapi_ticket_expires_at=jsapi_ticket_expires_at,<EOL>encrypt_mode='<STR_LIT>',<EOL>partnerid=partnerid,<EOL>partnerkey=partnerkey,<EOL>paysignkey=paysignkey,<EOL>checkssl=checkssl,<EOL>)<EOL><DEDENT>self.__request = WechatRequest(conf=self.__conf)<EOL>self.__is_parse = False<EOL>self.__message = None<EOL>", "docstring": ":param token: \u5fae\u4fe1 Token\n:param appid: App ID\n:param appsecret: App Secret\n:param partnerid: \u8d22\u4ed8\u901a\u5546\u6237\u8eab\u4efd\u6807\u8bc6, \u652f\u4ed8\u6743\u9650\u4e13\u7528\n:param partnerkey: \u8d22\u4ed8\u901a\u5546\u6237\u6743\u9650\u5bc6\u94a5 Key, \u652f\u4ed8\u6743\u9650\u4e13\u7528\n:param paysignkey: \u5546\u6237\u7b7e\u540d\u5bc6\u94a5 Key, \u652f\u4ed8\u6743\u9650\u4e13\u7528\n:param access_token: \u76f4\u63a5\u5bfc\u5165\u7684 access_token \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6\n:param access_token_expires_at: \u76f4\u63a5\u5bfc\u5165\u7684 access_token \u7684\u8fc7\u671f\u65e5\u671f\uff0c\u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6\n:param jsapi_ticket: \u76f4\u63a5\u5bfc\u5165\u7684 jsapi_ticket \u503c, \u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6\n:param jsapi_ticket_expires_at: \u76f4\u63a5\u5bfc\u5165\u7684 jsapi_ticket \u7684\u8fc7\u671f\u65e5\u671f\uff0c\u8be5\u503c\u9700\u8981\u5728\u4e0a\u4e00\u6b21\u8be5\u7c7b\u5b9e\u4f8b\u5316\u4e4b\u540e\u624b\u52a8\u8fdb\u884c\u7f13\u5b58\u5e76\u5728\u6b64\u5904\u4f20\u5165, \u5982\u679c\u4e0d\u4f20\u5165, \u5c06\u4f1a\u5728\u9700\u8981\u65f6\u81ea\u52a8\u91cd\u65b0\u83b7\u53d6\n:param checkssl: \u662f\u5426\u68c0\u67e5 SSL, \u9ed8\u8ba4\u4e3a False, \u53ef\u907f\u514d urllib3 \u7684 InsecurePlatformWarning \u8b66\u544a\n:param conf: WechatConf \u914d\u7f6e\u7c7b, \u63d0\u4f9b\u6b64\u53c2\u6570\u5c06\u9ed8\u8ba4\u5ffd\u7565\u5176\u4ed6\u6240\u6709\u53c2\u6570, \u6240\u6709\u6570\u636e\u5747\u4ece\u6b64\u914d\u7f6e\u7c7b\u4e2d\u83b7\u53d6", "id": "f588:c0:m0"}
{"signature": "def response_video(self, media_id, title=None, description=None):", "body": "self._check_parse()<EOL>title = self._transcoding(title)<EOL>description = self._transcoding(description)<EOL>response = VideoReply(message=self.__message, media_id=media_id, title=title, description=description).render()<EOL>return self._encrypt_response(response)<EOL>", "docstring": "\u5c06 media_id \u6240\u4ee3\u8868\u7684\u89c6\u9891\u7ec4\u88c5\u4e3a\u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u54cd\u5e94\u6570\u636e\n:param media_id: \u89c6\u9891\u7684 MediaID\n:param title: \u89c6\u9891\u6d88\u606f\u7684\u6807\u9898\n:param description: \u89c6\u9891\u6d88\u606f\u7684\u63cf\u8ff0\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m16"}
{"signature": "def response_news(self, articles):", "body": "self._check_parse()<EOL>for article in articles:<EOL><INDENT>if article.get('<STR_LIT:title>'):<EOL><INDENT>article['<STR_LIT:title>'] = self._transcoding(article['<STR_LIT:title>'])<EOL><DEDENT>if article.get('<STR_LIT:description>'):<EOL><INDENT>article['<STR_LIT:description>'] = self._transcoding(article['<STR_LIT:description>'])<EOL><DEDENT>if article.get('<STR_LIT>'):<EOL><INDENT>article['<STR_LIT>'] = self._transcoding(article['<STR_LIT>'])<EOL><DEDENT>if article.get('<STR_LIT:url>'):<EOL><INDENT>article['<STR_LIT:url>'] = self._transcoding(article['<STR_LIT:url>'])<EOL><DEDENT><DEDENT>news = ArticleReply(message=self.__message)<EOL>for article in articles:<EOL><INDENT>article = Article(**article)<EOL>news.add_article(article)<EOL><DEDENT>response = news.render()<EOL>return self._encrypt_response(response)<EOL>", "docstring": "\u5c06\u65b0\u95fb\u4fe1\u606f\u7ec4\u88c5\u4e3a\u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u54cd\u5e94\u6570\u636e\n:param articles: list \u5bf9\u8c61, \u6bcf\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a dict \u5bf9\u8c61, key \u5305\u542b `title`, `description`, `picurl`, `url`\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m18"}
{"signature": "def _check_parse(self):", "body": "if not self.__is_parse:<EOL><INDENT>raise NeedParseError()<EOL><DEDENT>", "docstring": "\u68c0\u67e5\u662f\u5426\u6210\u529f\u89e3\u6790\u5fae\u4fe1\u670d\u52a1\u5668\u4f20\u6765\u7684\u6570\u636e\n:raises NeedParseError: \u9700\u8981\u89e3\u6790\u5fae\u4fe1\u670d\u52a1\u5668\u4f20\u6765\u7684\u6570\u636e", "id": "f588:c0:m49"}
{"signature": "def _check_official_error(self, json_data):", "body": "if \"<STR_LIT>\" in json_data and json_data[\"<STR_LIT>\"] != <NUM_LIT:0>:<EOL><INDENT>raise OfficialAPIError(errcode=json_data.get('<STR_LIT>'), errmsg=json_data.get('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>", "docstring": "\u68c0\u6d4b\u5fae\u4fe1\u516c\u4f17\u5e73\u53f0\u8fd4\u56de\u503c\u4e2d\u662f\u5426\u5305\u542b\u9519\u8bef\u7684\u8fd4\u56de\u7801\n:raises OfficialAPIError: \u5982\u679c\u8fd4\u56de\u7801\u63d0\u793a\u6709\u9519\u8bef\uff0c\u629b\u51fa\u5f02\u5e38\uff1b\u5426\u5219\u8fd4\u56de True", "id": "f588:c0:m50"}
{"signature": "def send_video_message(self, user_id, media_id, title=None, description=None):", "body": "video_data = {<EOL>'<STR_LIT>': media_id,<EOL>}<EOL>if title:<EOL><INDENT>video_data['<STR_LIT:title>'] = title<EOL><DEDENT>if description:<EOL><INDENT>video_data['<STR_LIT:description>'] = description<EOL><DEDENT>return self.request.post(<EOL>url='<STR_LIT>',<EOL>data={<EOL>'<STR_LIT>': user_id,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': video_data,<EOL>}<EOL>)<EOL>", "docstring": "\u53d1\u9001\u89c6\u9891\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param media_id: \u53d1\u9001\u7684\u89c6\u9891\u7684\u5a92\u4f53ID\u3002 \u53ef\u4ee5\u901a\u8fc7 :func:`upload_media` \u4e0a\u4f20\u3002\n:param title: \u89c6\u9891\u6d88\u606f\u7684\u6807\u9898\n:param description: \u89c6\u9891\u6d88\u606f\u7684\u63cf\u8ff0\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m39"}
{"signature": "def get_group_by_id(self, openid):", "body": "return self.request.post(<EOL>url='<STR_LIT>',<EOL>data={<EOL>'<STR_LIT>': openid,<EOL>}<EOL>)<EOL>", "docstring": "\u67e5\u8be2\u7528\u6237\u6240\u5728\u5206\u7ec4\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/be5272dc4930300ba561d927aead2569.html\n:param openid: \u7528\u6237\u7684OpenID\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m31"}
{"signature": "def show_qrcode(self, ticket):", "body": "return requests.get(<EOL>url='<STR_LIT>',<EOL>params={<EOL>'<STR_LIT>': ticket<EOL>}<EOL>)<EOL>", "docstring": "\u901a\u8fc7ticket\u6362\u53d6\u4e8c\u7ef4\u7801\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/18/28fc21e7ed87bec960651f0ce873ef8a.html\n:param ticket: \u4e8c\u7ef4\u7801 ticket \u3002\u53ef\u4ee5\u901a\u8fc7 :func:`create_qrcode` \u83b7\u53d6\u5230\n:return: \u8fd4\u56de\u7684 Request \u5bf9\u8c61", "id": "f588:c0:m43"}
{"signature": "def get_groups(self):", "body": "return self.request.get('<STR_LIT>')<EOL>", "docstring": "\u67e5\u8be2\u6240\u6709\u5206\u7ec4\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/be5272dc4930300ba561d927aead2569.html\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m30"}
{"signature": "def create_group(self, name):", "body": "return self.request.post(<EOL>url='<STR_LIT>',<EOL>data={<EOL>'<STR_LIT>': {<EOL>'<STR_LIT:name>': name,<EOL>},<EOL>}<EOL>)<EOL>", "docstring": "\u521b\u5efa\u5206\u7ec4\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/be5272dc4930300ba561d927aead2569.html\n:param name: \u5206\u7ec4\u540d\u5b57\uff0830\u4e2a\u5b57\u7b26\u4ee5\u5185\uff09\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305\n:raise HTTPError: \u5fae\u4fe1api http \u8bf7\u6c42\u5931\u8d25", "id": "f588:c0:m29"}
{"signature": "def parse_data(self, data, msg_signature=None, timestamp=None, nonce=None):", "body": "result = {}<EOL>if isinstance(data, six.text_type):  <EOL><INDENT>data = data.encode('<STR_LIT:utf-8>')<EOL><DEDENT>if self.conf.encrypt_mode == '<STR_LIT>':<EOL><INDENT>if not (msg_signature and timestamp and nonce):<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>data = self.conf.crypto.decrypt_message(<EOL>msg=data,<EOL>msg_signature=msg_signature,<EOL>timestamp=timestamp,<EOL>nonce=nonce,<EOL>)<EOL><DEDENT>try:<EOL><INDENT>xml = XMLStore(xmlstring=data)<EOL><DEDENT>except Exception:<EOL><INDENT>raise ParseError()<EOL><DEDENT>result = xml.xml2dict<EOL>result['<STR_LIT>'] = data<EOL>result['<STR_LIT:type>'] = result.pop('<STR_LIT>').lower()<EOL>message_type = MESSAGE_TYPES.get(result['<STR_LIT:type>'], UnknownMessage)<EOL>self.__message = message_type(result)<EOL>self.__is_parse = True<EOL>", "docstring": "\u89e3\u6790\u5fae\u4fe1\u670d\u52a1\u5668\u53d1\u9001\u8fc7\u6765\u7684\u6570\u636e\u5e76\u4fdd\u5b58\u7c7b\u4e2d\n:param data: HTTP Request \u7684 Body \u6570\u636e\n:param msg_signature: EncodingAESKey \u7684 msg_signature\n:param timestamp: EncodingAESKey \u7528\u65f6\u95f4\u6233\n:param nonce: EncodingAESKey \u7528\u968f\u673a\u6570\n:raises ParseError: \u89e3\u6790\u5fae\u4fe1\u670d\u52a1\u5668\u6570\u636e\u9519\u8bef, \u6570\u636e\u4e0d\u5408\u6cd5", "id": "f588:c0:m7"}
{"signature": "def send_image_message(self, user_id, media_id):", "body": "return self.request.post(<EOL>url='<STR_LIT>',<EOL>data={<EOL>'<STR_LIT>': user_id,<EOL>'<STR_LIT>': '<STR_LIT:image>',<EOL>'<STR_LIT:image>': {<EOL>'<STR_LIT>': media_id,<EOL>},<EOL>}<EOL>)<EOL>", "docstring": "\u53d1\u9001\u56fe\u7247\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param media_id: \u56fe\u7247\u7684\u5a92\u4f53ID\u3002 \u53ef\u4ee5\u901a\u8fc7 :func:`upload_media` \u4e0a\u4f20\u3002\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m37"}
{"signature": "def set_template_industry(self, industry_id1, industry_id2):", "body": "return self.request.post(<EOL>url='<STR_LIT>',<EOL>data={<EOL>'<STR_LIT>': str(industry_id1),<EOL>'<STR_LIT>': str(industry_id2),<EOL>}<EOL>)<EOL>", "docstring": "\u8bbe\u7f6e\u6240\u5c5e\u884c\u4e1a\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/17/304c1885ea66dbedf7dc170d84999a9d.html\n:param industry_id1: \u4e3b\u8425\u884c\u4e1a\u4ee3\u7801\n:param industry_id2: \u526f\u8425\u884c\u4e1a\u4ee3\u7801\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m44"}
{"signature": "def grant_token(self, **kwargs):", "body": "return self.conf.grant_access_token()<EOL>", "docstring": "\u83b7\u53d6 Access Token\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/11/0e4b294685f817b95cbed85ba5e82b8f.html\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m20"}
{"signature": "def send_article_message(self, user_id, articles=None, media_id=None):", "body": "<EOL>if articles is None and media_id is None:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if articles:<EOL><INDENT>articles_data = []<EOL>for article in articles:<EOL><INDENT>article = Article(**article)<EOL>articles_data.append({<EOL>'<STR_LIT:title>': article.title,<EOL>'<STR_LIT:description>': article.description,<EOL>'<STR_LIT:url>': article.url,<EOL>'<STR_LIT>': article.picurl,<EOL>})<EOL><DEDENT>return self.request.post(<EOL>url='<STR_LIT>',<EOL>data={<EOL>'<STR_LIT>': user_id,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': articles_data,<EOL>},<EOL>}<EOL>)<EOL><DEDENT>return self.request.post(<EOL>url='<STR_LIT>',<EOL>data={<EOL>'<STR_LIT>': user_id,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': media_id,<EOL>},<EOL>}<EOL>)<EOL>", "docstring": "\u53d1\u9001\u56fe\u6587\u6d88\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/7/12a5a320ae96fecdf0e15cb06123de9f.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param articles: list \u5bf9\u8c61, \u6bcf\u4e2a\u5143\u7d20\u4e3a\u4e00\u4e2a dict \u5bf9\u8c61, key \u5305\u542b `title`, `description`, `picurl`, `url`\n:param media_id: \u5f85\u53d1\u9001\u7684\u56fe\u6587 Media ID\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m41"}
{"signature": "@conf.setter<EOL><INDENT>def conf(self, conf):<DEDENT>", "body": "self.__conf = conf<EOL>self.__request = WechatRequest(conf=self.__conf)<EOL>", "docstring": "\u8bbe\u7f6e\u5f53\u524d WechatConf \u5b9e\u4f8b", "id": "f588:c0:m2"}
{"signature": "@request.setter<EOL><INDENT>def request(self, request):<DEDENT>", "body": "self.__request = request<EOL>", "docstring": "\u8bbe\u7f6e\u5f53\u524d WechatConf \u5b9e\u4f8b", "id": "f588:c0:m4"}
{"signature": "def get_user_info(self, user_id, lang='<STR_LIT>'):", "body": "return self.request.get(<EOL>url='<STR_LIT>',<EOL>params={<EOL>'<STR_LIT>': user_id,<EOL>'<STR_LIT>': lang,<EOL>}<EOL>)<EOL>", "docstring": "\u83b7\u53d6\u7528\u6237\u57fa\u672c\u4fe1\u606f\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/14/bb5031008f1494a59c6f71fa0f319c66.html\n:param user_id: \u7528\u6237 ID, \u5c31\u662f\u4f60\u6536\u5230\u7684 WechatMessage \u7684 source\n:param lang: \u8fd4\u56de\u56fd\u5bb6\u5730\u533a\u8bed\u8a00\u7248\u672c\uff0czh_CN \u7b80\u4f53\uff0czh_TW \u7e41\u4f53\uff0cen \u82f1\u8bed\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m34"}
{"signature": "def update_group(self, group_id, name):", "body": "return self.request.post(<EOL>url='<STR_LIT>',<EOL>data={<EOL>'<STR_LIT>': {<EOL>'<STR_LIT:id>': int(group_id),<EOL>'<STR_LIT:name>': name,<EOL>}<EOL>}<EOL>)<EOL>", "docstring": "\u4fee\u6539\u5206\u7ec4\u540d\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/13/be5272dc4930300ba561d927aead2569.html\n:param group_id: \u5206\u7ec4id\uff0c\u7531\u5fae\u4fe1\u5206\u914d\n:param name: \u5206\u7ec4\u540d\u5b57\uff0830\u4e2a\u5b57\u7b26\u4ee5\u5185\uff09\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m32"}
{"signature": "def get_menu(self):", "body": "return self.request.get('<STR_LIT>')<EOL>", "docstring": "\u67e5\u8be2\u81ea\u5b9a\u4e49\u83dc\u5355\n\u8be6\u60c5\u8bf7\u53c2\u8003 http://mp.weixin.qq.com/wiki/16/ff9b7b85220e1396ffa16794a9d95adc.html\n:return: \u8fd4\u56de\u7684 JSON \u6570\u636e\u5305", "id": "f588:c0:m23"}
{"signature": "@property<EOL><INDENT>def request(self):<DEDENT>", "body": "return self.__request<EOL>", "docstring": "\u83b7\u53d6\u5f53\u524d WechatConf \u914d\u7f6e\u5b9e\u4f8b", "id": "f588:c0:m3"}
{"signature": "def response_image(self, media_id):", "body": "self._check_parse()<EOL>response = ImageReply(message=self.__message, media_id=media_id).render()<EOL>return self._encrypt_response(response)<EOL>", "docstring": "\u5c06 media_id \u6240\u4ee3\u8868\u7684\u56fe\u7247\u7ec4\u88c5\u4e3a\u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684\u54cd\u5e94\u6570\u636e\n:param media_id: \u56fe\u7247\u7684 MediaID\n:return: \u7b26\u5408\u5fae\u4fe1\u670d\u52a1\u5668\u8981\u6c42\u7684 XML \u54cd\u5e94\u6570\u636e", "id": "f588:c0:m14"}
{"signature": "def _element2dict(self, parent):", "body": "d = {}<EOL>for node in parent.childNodes:<EOL><INDENT>if not isinstance(node, minidom.Element):<EOL><INDENT>continue<EOL><DEDENT>if not node.hasChildNodes():<EOL><INDENT>continue<EOL><DEDENT>if node.childNodes[<NUM_LIT:0>].nodeType == minidom.Node.ELEMENT_NODE:<EOL><INDENT>try:<EOL><INDENT>d[node.tagName]<EOL><DEDENT>except KeyError:<EOL><INDENT>d[node.tagName] = []<EOL><DEDENT>d[node.tagName].append(self._element2dict(node))<EOL><DEDENT>elif len(node.childNodes) == <NUM_LIT:1> and node.childNodes[<NUM_LIT:0>].nodeType in [minidom.Node.CDATA_SECTION_NODE, minidom.Node.TEXT_NODE]:<EOL><INDENT>d[node.tagName] = node.childNodes[<NUM_LIT:0>].data<EOL><DEDENT><DEDENT>return d<EOL>", "docstring": "\u5c06\u5355\u4e2a\u8282\u70b9\u8f6c\u6362\u4e3a dict", "id": "f590:c0:m2"}
{"signature": "def _check_signature(self, msg_signature, timestamp, nonce, echostr):", "body": "signature = get_sha1_signature(self.__token, timestamp, nonce, echostr)<EOL>if not signature == msg_signature:<EOL><INDENT>raise ValidateSignatureError()<EOL><DEDENT>try:<EOL><INDENT>return self.__pc.decrypt(echostr, self.__id)<EOL><DEDENT>except DecryptAESError as e:<EOL><INDENT>raise ValidateSignatureError(e)<EOL><DEDENT>", "docstring": "\u9a8c\u8bc1\u7b7e\u540d\u6709\u6548\u6027\n\n        :param msg_signature: \u7b7e\u540d\u4e32\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684msg_signature\n        :param timestamp: \u65f6\u95f4\u6233\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684timestamp\n        :param nonce: \u968f\u673a\u4e32\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684nonce\n        :param echostr: \u968f\u673a\u4e32\uff0c\u5bf9\u5e94URL\u53c2\u6570\u7684echostr\n        :return: \u89e3\u5bc6\u4e4b\u540e\u7684echostr\n        :raise ValidateSignatureError: \u7b7e\u540d\u65e0\u6548\u5f02\u5e38", "id": "f592:c0:m1"}
{"signature": "def __init__(self, token, encoding_aes_key, _id):", "body": "self.__key = base64.b64decode(to_binary(encoding_aes_key) + to_binary('<STR_LIT:=>'))<EOL>if len(self.__key) != <NUM_LIT:32>:<EOL><INDENT>raise ValidateAESKeyError(encoding_aes_key)<EOL><DEDENT>self.__token = to_binary(token)<EOL>self.__id = to_binary(_id)<EOL>self.__pc = BaseCrypto(self.__key)<EOL>", "docstring": "\u6784\u9020\u51fd\u6570\n\n        :param token: \u516c\u4f17\u5e73\u53f0\u4e0a\uff0c\u5f00\u53d1\u8005\u8bbe\u7f6e\u7684Token\n        :param encoding_aes_key: \u516c\u4f17\u5e73\u53f0\u4e0a\uff0c\u5f00\u53d1\u8005\u8bbe\u7f6e\u7684EncodingAESKey\n        :param _id: \u516c\u4f17\u53f7\u7684 appid \u6216\u4f01\u4e1a\u53f7\u7684 corpid", "id": "f592:c0:m0"}
{"signature": "def decrypt(self, text, appid):", "body": "try:<EOL><INDENT>cryptor = AES.new(self.key, self.mode, self.key[:<NUM_LIT:16>])<EOL>plain_text = cryptor.decrypt(base64.b64decode(text))<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise DecryptAESError(e)<EOL><DEDENT>try:<EOL><INDENT>if six.PY2:<EOL><INDENT>pad = ord(plain_text[-<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>pad = plain_text[-<NUM_LIT:1>]<EOL><DEDENT>content = plain_text[<NUM_LIT:16>:-pad]<EOL>xml_len = socket.ntohl(struct.unpack(\"<STR_LIT:I>\", content[: <NUM_LIT:4>])[<NUM_LIT:0>])<EOL>xml_content = content[<NUM_LIT:4>: xml_len + <NUM_LIT:4>]<EOL>from_appid = content[xml_len + <NUM_LIT:4>:]<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise IllegalBuffer(e)<EOL><DEDENT>if from_appid != appid:<EOL><INDENT>raise ValidateAppIDError()<EOL><DEDENT>return xml_content<EOL>", "docstring": "\u5bf9\u89e3\u5bc6\u540e\u7684\u660e\u6587\u8fdb\u884c\u8865\u4f4d\u5220\u9664\n\n        @param text: \u5bc6\u6587\n        @return: \u5220\u9664\u586b\u5145\u8865\u4f4d\u540e\u7684\u660e\u6587", "id": "f593:c0:m2"}
{"signature": "def encrypt(self, text, appid):", "body": "<EOL>text = self.get_random_str() + struct.pack(\"<STR_LIT:I>\", socket.htonl(len(text))) + to_binary(text) + appid<EOL>pkcs7 = PKCS7Encoder()<EOL>text = pkcs7.encode(text)<EOL>cryptor = AES.new(self.key, self.mode, self.key[:<NUM_LIT:16>])<EOL>try:<EOL><INDENT>ciphertext = cryptor.encrypt(text)<EOL>return base64.b64encode(ciphertext)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise EncryptAESError(e)<EOL><DEDENT>", "docstring": "\u5bf9\u660e\u6587\u8fdb\u884c\u52a0\u5bc6\n\n        @param text: \u9700\u8981\u52a0\u5bc6\u7684\u660e\u6587\n        @return: \u52a0\u5bc6\u5f97\u5230\u7684\u5b57\u7b26\u4e32", "id": "f593:c0:m1"}
{"signature": "def get_random_str(self):", "body": "rule = string.ascii_letters + string.digits<EOL>return \"<STR_LIT>\".join(random.sample(rule, <NUM_LIT:16>))<EOL>", "docstring": "\u968f\u673a\u751f\u621016\u4f4d\u5b57\u7b26\u4e32\n\n        @return: 16\u4f4d\u5b57\u7b26\u4e32", "id": "f593:c0:m3"}
{"signature": "def __init__(self, message, media_id, title=None, description=None):", "body": "title = title or '<STR_LIT>'<EOL>description = description or '<STR_LIT>'<EOL>super(VideoReply, self).__init__(message=message, media_id=media_id, title=title, description=description)<EOL>", "docstring": ":param message: WechatMessage\u5bf9\u8c61\n:param media_id: \u89c6\u9891\u7684 MediaID\n:param title: \u89c6\u9891\u6d88\u606f\u7684\u6807\u9898\n:param description: \u89c6\u9891\u6d88\u606f\u7684\u63cf\u8ff0", "id": "f598:c4:m0"}
{"signature": "def __init__(self, message):", "body": "super(GroupTransferReply, self).__init__(message=message)<EOL>", "docstring": ":param message: WechatMessage \u5bf9\u8c61", "id": "f598:c8:m0"}
{"signature": "def __init__(self, message, media_id):", "body": "super(VoiceReply, self).__init__(message=message, media_id=media_id)<EOL>", "docstring": ":param message: WechatMessage \u5bf9\u8c61\n:param media_id: \u8bed\u97f3\u7684 MediaID", "id": "f598:c3:m0"}
{"signature": "def convert_ext_to_mime(extension):", "body": "table = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}<EOL>if extension in table:<EOL><INDENT>return table[extension]<EOL><DEDENT>raise ValueError(\"<STR_LIT>\")<EOL>", "docstring": "\u5c06\u6269\u5c55\u540d\u8f6c\u6362\u4e3a MIME \u683c\u5f0f\n    :return: mime string", "id": "f599:m5"}
{"signature": "def exists(self, openid):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "\u5f53 openid \u5b58\u5728\u65f6\u8fd4\u56de True", "id": "f604:c1:m26"}
{"signature": "@_check_player_is_active<EOL><INDENT>def show_video(self):<DEDENT>", "body": "self._player_interface.UnHideVideo()<EOL>", "docstring": "Shows the video (to undo a `hide_video`)", "id": "f624:c2:m48"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def previous(self):<DEDENT>", "body": "return self._player_interface.Previous()<EOL>", "docstring": "Skip to the previous chapter\n\nReturns:\n    bool: Whether the player skipped to the previous chapter", "id": "f624:c2:m61"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def playback_status(self):<DEDENT>", "body": "return self._player_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    str: one of (\"Playing\" | \"Paused\" | \"Stopped\")", "id": "f624:c2:m19"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def set_aspect_mode(self, mode):<DEDENT>", "body": "self._player_interface.SetAspectMode(ObjectPath('<STR_LIT>'), String(mode))<EOL>", "docstring": "Set the aspect mode of the video\n\nArgs:\n    mode (str): One of (\"letterbox\" | \"fill\" | \"stretch\")", "id": "f624:c2:m43"}
{"signature": "def quit(self):", "body": "if self._process is None:<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>return<EOL><DEDENT>try:<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>process_group_id = os.getpgid(self._process.pid)<EOL>os.killpg(process_group_id, signal.SIGTERM)<EOL>logger.debug('<STR_LIT>' % process_group_id)<EOL>self._process_monitor.join()<EOL><DEDENT>except OSError:<EOL><INDENT>logger.error('<STR_LIT>')<EOL><DEDENT>self._process = None<EOL>", "docstring": "Quit the player, blocking until the process has died", "id": "f624:c2:m68"}
{"signature": "def position(self):", "body": "return self._position_us() / (<NUM_LIT> * <NUM_LIT>)<EOL>", "docstring": "Returns:\n    int: position in seconds", "id": "f624:c2:m23"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def can_pause(self):<DEDENT>", "body": "return self._player_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    bool: whether the player can pause", "id": "f624:c2:m18"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def maximum_rate(self):<DEDENT>", "body": "return self._player_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    float: maximum playback rate (as proportion of normal rate)", "id": "f624:c2:m25"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def next(self):<DEDENT>", "body": "return self._player_interface.Next()<EOL>", "docstring": "Skip to the next chapter\n\nReturns:\n    bool: Whether the player skipped to the next chapter", "id": "f624:c2:m60"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def set_video_crop(self, x1, y1, x2, y2):<DEDENT>", "body": "crop = \"<STR_LIT>\" % (str(x1),str(y1),str(x2),str(y2))<EOL>self._player_interface.SetVideoCropPos(ObjectPath('<STR_LIT>'), String(crop))<EOL>", "docstring": "Args:\n    x1 (int): Top left x coordinate (px)\n    y1 (int): Top left y coordinate (px)\n    x2 (int): Bottom right x coordinate (px)\n    y2 (int): Bottom right y coordinate (px)", "id": "f624:c2:m46"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def play(self):<DEDENT>", "body": "if not self.is_playing():<EOL><INDENT>self.play_pause()<EOL>self._is_playing = True<EOL>self.playEvent(self)<EOL><DEDENT>", "docstring": "Play the video asynchronously returning control immediately to the calling code", "id": "f624:c2:m59"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def list_audio(self):<DEDENT>", "body": "return self._player_interface.ListAudio()<EOL>", "docstring": "Returns:\n    [str]: A list of all known audio streams, each item is in the\n           format: ``<index>:<language>:<name>:<codec>:<active>``", "id": "f624:c2:m49"}
{"signature": "@_check_player_is_active<EOL><INDENT>def play_pause(self):<DEDENT>", "body": "self._player_interface.PlayPause()<EOL>self._is_playing = not self._is_playing<EOL>if self._is_playing:<EOL><INDENT>self.playEvent(self)<EOL><DEDENT>else:<EOL><INDENT>self.pauseEvent(self)<EOL><DEDENT>", "docstring": "Pause playback if currently playing, otherwise start playing if currently paused.", "id": "f624:c2:m36"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def can_seek(self):<DEDENT>", "body": "return self._player_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    bool: whether the player can seek", "id": "f624:c2:m15"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def set_video_pos(self, x1, y1, x2, y2):<DEDENT>", "body": "position = \"<STR_LIT>\" % (str(x1),str(y1),str(x2),str(y2))<EOL>self._player_interface.VideoPos(ObjectPath('<STR_LIT>'), String(position))<EOL>", "docstring": "Set the video position on the screen\n\nArgs:\n    x1 (int): Top left x coordinate (px)\n    y1 (int): Top left y coordinate (px)\n    x2 (int): Bottom right x coordinate (px)\n    y2 (int): Bottom right y coordinate (px)", "id": "f624:c2:m44"}
{"signature": "@_check_player_is_active<EOL><INDENT>def show_subtitles(self):<DEDENT>", "body": "return self._player_interface.ShowSubtitles()<EOL>", "docstring": "Shows subtitles after :class:`hide_subtitles`", "id": "f624:c2:m54"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def set_alpha(self, alpha):<DEDENT>", "body": "self._player_interface.SetAlpha(ObjectPath('<STR_LIT>'), Int64(alpha))<EOL>", "docstring": "Set the transparency of the video overlay\n\nArgs:\n    alpha (float): The transparency (0..255)", "id": "f624:c2:m40"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def rate(self):<DEDENT>", "body": "return self._rate<EOL>", "docstring": "Returns:\n    float: playback rate, 1 is the normal rate, 2 would be double speed.", "id": "f624:c2:m26"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def set_rate(self, rate):<DEDENT>", "body": "self._rate = self._player_interface_property('<STR_LIT>', dbus.Double(rate))<EOL>return self._rate<EOL>", "docstring": "Set the playback rate of the video as a multiple of the default playback speed\n\nExamples:\n    >>> player.set_rate(2)\n    # Will play twice as fast as normal speed\n    >>> player.set_rate(0.5)\n    # Will play half speed", "id": "f624:c2:m27"}
{"signature": "@_check_player_is_active<EOL><INDENT>def select_audio(self, index):<DEDENT>", "body": "return self._player_interface.SelectAudio(dbus.Int32(index))<EOL>", "docstring": "Select audio stream specified by the index of the stream in :class:`list_audio`\n\nArgs:\n    index (int): index of audio stream returned by :class:`list_audio`", "id": "f624:c2:m53"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def set_volume(self, volume):<DEDENT>", "body": "<EOL>if volume == <NUM_LIT:0>:<EOL><INDENT>volume = <NUM_LIT><EOL><DEDENT>return self._player_interface_property('<STR_LIT>', dbus.Double(volume))<EOL>", "docstring": "Args:\n    float: volume in the interval [0, 10]", "id": "f624:c2:m21"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def stop(self):<DEDENT>", "body": "self._player_interface.Stop()<EOL>self.stopEvent(self)<EOL>", "docstring": "Stop the player, causing it to quit", "id": "f624:c2:m37"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def supported_uri_schemes(self):<DEDENT>", "body": "return self._root_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    str: list of supported URI schemes\nExamples:\n    >>> player.supported_uri_schemes()\n    [\"file\", \"http\", \"rtsp\", \"rtmp\"]", "id": "f624:c2:m12"}
{"signature": "@_check_player_is_active<EOL><INDENT>def hide_subtitles(self):<DEDENT>", "body": "return self._player_interface.HideSubtitles()<EOL>", "docstring": "Hide subtitles", "id": "f624:c2:m55"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def can_quit(self):<DEDENT>", "body": "return self._root_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    bool: whether the player can quit or not", "id": "f624:c2:m6"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def volume(self):<DEDENT>", "body": "if self._is_muted:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>return self._player_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    float: current player volume", "id": "f624:c2:m20"}
{"signature": "@_check_player_is_active<EOL><INDENT>def mute(self):<DEDENT>", "body": "self._is_muted = True<EOL>self._player_interface.Mute()<EOL>", "docstring": "Mute audio. If already muted, then this does not do anything", "id": "f624:c2:m41"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def width(self):<DEDENT>", "body": "return self._player_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    int: video width in px", "id": "f624:c2:m31"}
{"signature": "@_check_player_is_active<EOL><INDENT>def select_subtitle(self, index):<DEDENT>", "body": "return self._player_interface.SelectSubtitle(dbus.Int32(index))<EOL>", "docstring": "Enable a subtitle specified by the index it is listed in :class:`list_subtitles`\n\nArgs:\n    index (int): index of subtitle listing returned by :class:`list_subtitles`", "id": "f624:c2:m52"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def can_go_next(self):<DEDENT>", "body": "return self._player_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    bool: whether the player can move to the next item in the playlist", "id": "f624:c2:m13"}
{"signature": "@_check_player_is_active<EOL><INDENT>def unmute(self):<DEDENT>", "body": "self._is_muted = False<EOL>self._player_interface.Unmute()<EOL>", "docstring": "Unmutes the video. If already unmuted, then this does not do anything", "id": "f624:c2:m42"}
{"signature": "@_check_player_is_active<EOL><INDENT>@_from_dbus_type<EOL>def has_track_list(self):<DEDENT>", "body": "return self._root_interface_property('<STR_LIT>')<EOL>", "docstring": "Returns:\n    bool: whether the player has a track list or not", "id": "f624:c2:m10"}
{"signature": "def find_address_file(self):", "body": "possible_address_files = []<EOL>while not possible_address_files:<EOL><INDENT>isnt_pid_file = lambda path: not path.endswith('<STR_LIT>')<EOL>possible_address_files = list(filter(isnt_pid_file,<EOL>glob('<STR_LIT>')))<EOL>possible_address_files.sort(key=lambda path: os.path.getmtime(path))<EOL>time.sleep(<NUM_LIT>)<EOL><DEDENT>self.path = possible_address_files[-<NUM_LIT:1>]<EOL>", "docstring": "Finds the OMXPlayer DBus connection\nAssumes there is an alive OMXPlayer process.\n:return:", "id": "f628:c0:m2"}
{"signature": "def exit_on_keyboard_interrupt(f):", "body": "@wraps(f)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>raise_exception = kwargs.pop('<STR_LIT>', False)<EOL>try:<EOL><INDENT>return f(*args, **kwargs)<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>if not raise_exception:<EOL><INDENT>sys.exit()<EOL><DEDENT>raise KeyboardInterrupt<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Decorator that allows user to exit script by sending a keyboard interrupt\n    (ctrl + c) without raising an exception.", "id": "f635:m0"}
{"signature": "def format_answers(self, fmt='<STR_LIT>'):", "body": "fmts = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>if fmt not in fmts:<EOL><INDENT>eprint(\"<STR_LIT>\".format(fmt, fmts))<EOL>return<EOL><DEDENT>def stringify(val):<EOL><INDENT>if type(val) in (list, tuple):<EOL><INDENT>return '<STR_LIT:U+002CU+0020>'.join(str(e) for e in val)<EOL><DEDENT>return val<EOL><DEDENT>if fmt == '<STR_LIT>':<EOL><INDENT>return json.dumps(self.answers)<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>answers = [[k, v] for k, v in self.answers.items()]<EOL>return json.dumps(answers)<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>answers = '<STR_LIT:\\n>'.join('<STR_LIT>'.format(k, stringify(v)) for k, v in self.answers.items())<EOL>return answers<EOL><DEDENT>", "docstring": "Formats answers depending on `fmt`.", "id": "f635:c2:m14"}
{"signature": "def remove(self, key):", "body": "return self.questions.pop(key)<EOL>", "docstring": "Remove all questions associated with `key`. Raises exception if `key`\n        doesn't exist.", "id": "f635:c2:m5"}
{"signature": "def add(self, *args, **kwargs):", "body": "if '<STR_LIT>' in kwargs and isinstance(kwargs['<STR_LIT>'], Question):<EOL><INDENT>question = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>question = Question(*args, **kwargs)<EOL><DEDENT>self.questions.setdefault(question.key, []).append(question)<EOL>return question<EOL>", "docstring": "Add a Question instance to the questions dict. Each key points\n        to a list of Question instances with that key. Use the `question`\n        kwarg to pass a Question instance if you want, or pass in the same\n        args you would pass to instantiate a question.", "id": "f635:c2:m1"}
{"signature": "@register(key='<STR_LIT>')<EOL>def one(prompt, *args, **kwargs):", "body": "indicator = '<STR_LIT>'<EOL>if sys.version_info < (<NUM_LIT:3>, <NUM_LIT:0>):<EOL><INDENT>indicator = '<STR_LIT:>>'<EOL><DEDENT>def go_back(picker):<EOL><INDENT>return None, -<NUM_LIT:1><EOL><DEDENT>options, verbose_options = prepare_options(args)<EOL>idx = kwargs.get('<STR_LIT>', <NUM_LIT:0>)<EOL>picker = Picker(verbose_options, title=prompt, indicator=indicator, default_index=idx)<EOL>picker.register_custom_handler(ord('<STR_LIT:h>'), go_back)<EOL>picker.register_custom_handler(curses.KEY_LEFT, go_back)<EOL>with stdout_redirected(sys.stderr):<EOL><INDENT>option, index = picker.start()<EOL>if index == -<NUM_LIT:1>:<EOL><INDENT>raise QuestionnaireGoBack<EOL><DEDENT>if kwargs.get('<STR_LIT>', False):<EOL><INDENT>return index<EOL><DEDENT>return options[index]<EOL><DEDENT>", "docstring": "Instantiates a picker, registers custom handlers for going back,\n    and starts the picker.", "id": "f636:m3"}
{"signature": "@register(key='<STR_LIT>')<EOL>def many(prompt, *args, **kwargs):", "body": "def get_options(options, chosen):<EOL><INDENT>return [options[i] for i, c in enumerate(chosen) if c]<EOL><DEDENT>def get_verbose_options(verbose_options, chosen):<EOL><INDENT>no, yes = '<STR_LIT:U+0020>', '<STR_LIT>'<EOL>if sys.version_info < (<NUM_LIT:3>, <NUM_LIT:3>):<EOL><INDENT>no, yes = '<STR_LIT:U+0020>', '<STR_LIT:@>'<EOL><DEDENT>opts = ['<STR_LIT>'.format(yes if c else no, verbose_options[i]) for i, c in enumerate(chosen)]<EOL>return opts + ['<STR_LIT>'.format('<STR_LIT:U+0020>', kwargs.get('<STR_LIT>', '<STR_LIT>'))]<EOL><DEDENT>options, verbose_options = prepare_options(args)<EOL>chosen = [False] * len(options)<EOL>index = kwargs.get('<STR_LIT>', <NUM_LIT:0>)<EOL>default = kwargs.get('<STR_LIT:default>', None)<EOL>if isinstance(default, list):<EOL><INDENT>for idx in default:<EOL><INDENT>chosen[idx] = True<EOL><DEDENT><DEDENT>if isinstance(default, int):<EOL><INDENT>chosen[default] = True<EOL><DEDENT>while True:<EOL><INDENT>try:<EOL><INDENT>index = one(prompt, *get_verbose_options(verbose_options, chosen), return_index=True, idx=index)<EOL><DEDENT>except QuestionnaireGoBack:<EOL><INDENT>if any(chosen):<EOL><INDENT>raise QuestionnaireGoBack(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>raise QuestionnaireGoBack<EOL><DEDENT><DEDENT>if index == len(options):<EOL><INDENT>return get_options(options, chosen)<EOL><DEDENT>chosen[index] = not chosen[index]<EOL><DEDENT>", "docstring": "Calls `pick` in a while loop to allow user to pick many\n    options. Returns a list of chosen options.", "id": "f636:m4"}
{"signature": "def register(key='<STR_LIT>'):", "body": "def decorate(func):<EOL><INDENT>prompters[key] = func<EOL>return func<EOL><DEDENT>return decorate<EOL>", "docstring": "Add decorated functions to prompters dict.", "id": "f636:m2"}
{"signature": "def delete_file_on_return(path):", "body": "def decorator(func):<EOL><INDENT>@functools.wraps(func)<EOL>def wrapper(*args, **kwds):<EOL><INDENT>try:<EOL><INDENT>return func(*args, **kwds)<EOL><DEDENT>finally:<EOL><INDENT>try:<EOL><INDENT>os.remove(path)<EOL><DEDENT>except (IOError, OSError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "Decorator to run function at `path`.\n\n:type path: str\n:arg path: relative path from repository root (e.g., 'pyqode' or 'test').", "id": "f656:m1"}
{"signature": "def spam(self):", "body": "pass<EOL>", "docstring": "Spam", "id": "f660:c0:m1"}
{"signature": "def open_file(self, path, line=None):", "body": "editor = None<EOL>if path:<EOL><INDENT>interpreter, pyserver, args = self._get_backend_parameters()<EOL>editor = self.tabWidget.open_document(<EOL>path, None, interpreter=interpreter, server_script=pyserver,<EOL>args=args)<EOL>if editor:<EOL><INDENT>self.setup_editor(editor)<EOL><DEDENT>self.recent_files_manager.open_file(path)<EOL>self.menu_recents.update_actions()<EOL><DEDENT>if line is not None:<EOL><INDENT>TextHelper(self.tabWidget.current_widget()).goto_line(line)<EOL><DEDENT>return editor<EOL>", "docstring": "Creates a new GenericCodeEdit, opens the requested file and adds it\nto the tab widget.\n\n:param path: Path of the file to open\n\n:return The opened editor if open succeeded.", "id": "f666:c0:m8"}
{"signature": "def setup_mnu_edit(self, editor):", "body": "self.menuEdit.addActions(editor.actions())<EOL>self.menuEdit.addSeparator()<EOL>self.setup_mnu_style(editor)<EOL>", "docstring": "Setup the edit menu for the current editor. We show the current editor\ncontext menu and a menu to change the python interpreter.\n\n:param editor: new editor", "id": "f666:c0:m14"}
{"signature": "def on_goto_out_of_doc(self, assignment):", "body": "editor = self.open_file(assignment.module_path)<EOL>if editor:<EOL><INDENT>TextHelper(editor).goto_line(assignment.line, assignment.column)<EOL><DEDENT>", "docstring": "Open the a new tab when goto goes out of the current document.\n\n:param assignment: Destination", "id": "f666:c0:m27"}
{"signature": "def setup_recent_files_menu(self):", "body": "self.recent_files_manager = widgets.RecentFilesManager(<EOL>'<STR_LIT>', '<STR_LIT>')<EOL>self.menu_recents = widgets.MenuRecentFiles(<EOL>self.menuFile, title='<STR_LIT>',<EOL>recent_files_manager=self.recent_files_manager)<EOL>self.menu_recents.open_requested.connect(self.open_file)<EOL>self.menuFile.insertMenu(self.actionSave, self.menu_recents)<EOL>self.menuFile.insertSeparator(self.actionSave)<EOL>", "docstring": "Setup the recent files menu and manager", "id": "f666:c0:m5"}
{"signature": "def on_open(self):", "body": "filename, filter = QtWidgets.QFileDialog.getOpenFileName(self, '<STR_LIT>')<EOL>if filename:<EOL><INDENT>self.open_file(filename)<EOL><DEDENT>self.actionRun.setEnabled(True)<EOL>self.actionConfigure_run.setEnabled(True)<EOL>", "docstring": "Shows an open file dialog and open the file if the dialog was\naccepted.", "id": "f666:c0:m11"}
{"signature": "def closeEvent(self, QCloseEvent):", "body": "self.tabWidget.closeEvent(QCloseEvent)<EOL>", "docstring": "Delegates the close event to the tabWidget to be sure we do not quit\nthe application while there are some still some unsaved tabs.", "id": "f666:c0:m6"}
{"signature": "def on_run(self):", "body": "filename = self.tabWidget.current_widget().file.path<EOL>wd = os.path.dirname(filename)<EOL>args = Settings().get_run_config_for_file(filename)<EOL>self.interactiveConsole.start_process(<EOL>Settings().interpreter, args=[filename] + args, cwd=wd)<EOL>self.dockWidget.show()<EOL>self.actionRun.setEnabled(False)<EOL>self.actionConfigure_run.setEnabled(False)<EOL>", "docstring": "Run the current current script", "id": "f666:c0:m26"}
{"signature": "def setup_actions(self):", "body": "self.actionOpen.triggered.connect(self.on_open)<EOL>self.actionNew.triggered.connect(self.on_new)<EOL>self.actionSave.triggered.connect(self.on_save)<EOL>self.actionSave_as.triggered.connect(self.on_save_as)<EOL>self.actionQuit.triggered.connect(<EOL>QtWidgets.QApplication.instance().quit)<EOL>self.tabWidget.current_changed.connect(self.on_current_tab_changed)<EOL>self.tabWidget.last_tab_closed.connect(self.on_last_tab_closed)<EOL>self.actionAbout.triggered.connect(self.on_about)<EOL>self.actionRun.triggered.connect(self.on_run)<EOL>self.interactiveConsole.process_finished.connect(<EOL>self.on_process_finished)<EOL>self.actionConfigure_run.triggered.connect(self.on_configure_run)<EOL>", "docstring": "Connects slots to signals", "id": "f666:c0:m3"}
{"signature": "def on_panel_state_changed(self):", "body": "action = self.sender()<EOL>action.panel.enabled = action.isChecked()<EOL>action.panel.setVisible(action.isChecked())<EOL>", "docstring": "Enable disable the selected panel.", "id": "f666:c0:m23"}
{"signature": "@property<EOL><INDENT>def run_configs(self):<DEDENT>", "body": "string = self.settings.value('<STR_LIT>', '<STR_LIT:{}>')<EOL>return json.loads(string)<EOL>", "docstring": "Returns the dictionary of run configurations. A run configuration is\njust a list of arguments to append to the run command.\n\nThis is internally stored as a json object", "id": "f671:c0:m3"}
{"signature": "def detect_fold_level(self, prev_block, block):", "body": "<EOL>lvl = super(PythonFoldDetector, self).detect_fold_level(<EOL>prev_block, block)<EOL>prev_lvl = TextBlockHelper.get_fold_lvl(prev_block)<EOL>if prev_block and lvl > prev_lvl and not (<EOL>self._strip_comments(prev_block).endswith('<STR_LIT::>')):<EOL><INDENT>lvl = prev_lvl<EOL><DEDENT>lvl = self._handle_docstrings(block, lvl, prev_block)<EOL>lvl = self._handle_imports(block, lvl, prev_block)<EOL>return lvl<EOL>", "docstring": "Perfoms fold level detection for current block (take previous block\ninto account).\n\n:param prev_block: previous block, None if `block` is the first block.\n:param block: block to analyse.\n:return: block fold level", "id": "f694:c0:m3"}
{"signature": "def any(name, alternates):", "body": "return \"<STR_LIT>\" % name + \"<STR_LIT:|>\".join(alternates) + \"<STR_LIT:)>\"<EOL>", "docstring": "Return a named group pattern matching list of alternates.", "id": "f695:m0"}
{"signature": "def indent(self):", "body": "if not self.tab_always_indent:<EOL><INDENT>super(PyIndenterMode, self).indent()<EOL><DEDENT>else:<EOL><INDENT>cursor = self.editor.textCursor()<EOL>assert isinstance(cursor, QtGui.QTextCursor)<EOL>if cursor.hasSelection():<EOL><INDENT>self.indent_selection(cursor)<EOL><DEDENT>else:<EOL><INDENT>tab_len = self.editor.tab_length<EOL>cursor.beginEditBlock()<EOL>if self.editor.use_spaces_instead_of_tabs:<EOL><INDENT>cursor.insertText(tab_len * \"<STR_LIT:U+0020>\")<EOL><DEDENT>else:<EOL><INDENT>cursor.insertText('<STR_LIT:\\t>')<EOL><DEDENT>cursor.endEditBlock()<EOL>self.editor.setTextCursor(cursor)<EOL><DEDENT><DEDENT>", "docstring": "Performs an indentation", "id": "f698:c0:m3"}
{"signature": "def on_state_changed(self, state):", "body": "if state:<EOL><INDENT>self.action.triggered.connect(self.comment)<EOL>self.editor.add_action(self.action, sub_menu='<STR_LIT>')<EOL>if '<STR_LIT>' in os.environ['<STR_LIT>'].lower():<EOL><INDENT>self.editor.key_pressed.connect(self.on_key_pressed)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.editor.remove_action(self.action, sub_menu='<STR_LIT>')<EOL>self.action.triggered.disconnect(self.comment)<EOL>if '<STR_LIT>' in os.environ['<STR_LIT>'].lower():<EOL><INDENT>self.editor.key_pressed.disconnect(self.on_key_pressed)<EOL><DEDENT><DEDENT>", "docstring": "Called when the mode is activated/deactivated", "id": "f700:c0:m1"}
{"signature": "def update_terminal_colors(self):", "body": "self.color_scheme = self.create_color_scheme(<EOL>background=self.syntax_highlighter.color_scheme.background,<EOL>foreground=self.syntax_highlighter.color_scheme.formats['<STR_LIT>'].foreground().color())<EOL>", "docstring": "Update terminal color scheme based on the pygments color scheme colors", "id": "f708:c2:m3"}
{"signature": "def mousePressEvent(self, e):", "body": "super(PyInteractiveConsole, self).mousePressEvent(e)<EOL>cursor = self.cursorForPosition(e.pos())<EOL>p = cursor.positionInBlock()<EOL>usd = cursor.block().userData()<EOL>if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block:<EOL><INDENT>if e.button() == QtCore.Qt.LeftButton:<EOL><INDENT>self.open_file_requested.emit(usd.filename, usd.line)<EOL><DEDENT><DEDENT>", "docstring": "Emits open_file_requested if the press event occured  over\na file location string.", "id": "f709:c0:m4"}
{"signature": "def mouseMoveEvent(self, e):", "body": "super(PyInteractiveConsole, self).mouseMoveEvent(e)<EOL>cursor = self.cursorForPosition(e.pos())<EOL>assert isinstance(cursor, QtGui.QTextCursor)<EOL>p = cursor.positionInBlock()<EOL>usd = cursor.block().userData()<EOL>if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block:<EOL><INDENT>if QtWidgets.QApplication.overrideCursor() is None:<EOL><INDENT>QtWidgets.QApplication.setOverrideCursor(<EOL>QtGui.QCursor(QtCore.Qt.PointingHandCursor))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if QtWidgets.QApplication.overrideCursor() is not None:<EOL><INDENT>QtWidgets.QApplication.restoreOverrideCursor()<EOL><DEDENT><DEDENT>", "docstring": "Extends mouseMoveEvent to display a pointing hand cursor when the\nmouse cursor is over a file location", "id": "f709:c0:m3"}
{"signature": "def icon_from_typename(name, icon_type):", "body": "ICONS = {<EOL>'<STR_LIT>': ICON_CLASS,<EOL>'<STR_LIT>': ICON_NAMESPACE,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_NAMESPACE,<EOL>'<STR_LIT>': ICON_KEYWORD,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_VAR,<EOL>'<STR_LIT>': ICON_FUNC,<EOL>'<STR_LIT>': ICON_FUNC,<EOL>'<STR_LIT>': ICON_FUNC_PRIVATE,<EOL>'<STR_LIT>': ICON_FUNC_PROTECTED<EOL>}<EOL>ret_val = None<EOL>icon_type = icon_type.upper()<EOL>if hasattr(name, \"<STR_LIT:string>\"):<EOL><INDENT>name = name.string<EOL><DEDENT>if icon_type == \"<STR_LIT>\" or icon_type == \"<STR_LIT>\":<EOL><INDENT>icon_type = \"<STR_LIT>\"<EOL><DEDENT>if icon_type == \"<STR_LIT>\" or icon_type == \"<STR_LIT>\":<EOL><INDENT>if name.startswith(\"<STR_LIT>\"):<EOL><INDENT>icon_type += \"<STR_LIT>\"<EOL><DEDENT>elif name.startswith(\"<STR_LIT:_>\"):<EOL><INDENT>icon_type += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>if icon_type in ICONS:<EOL><INDENT>ret_val = ICONS[icon_type]<EOL><DEDENT>elif icon_type:<EOL><INDENT>_logger().warning(\"<STR_LIT>\", icon_type)<EOL><DEDENT>return ret_val<EOL>", "docstring": "Returns the icon resource filename that corresponds to the given typename.\n\n:param name: name of the completion. Use to make the distinction between\n    public and private completions (using the count of starting '_')\n:pram typename: the typename reported by jedi\n\n:returns: The associate icon resource filename or None.", "id": "f714:m8"}
{"signature": "def run_pyflakes(request_data):", "body": "global prev_results<EOL>from pyflakes import checker<EOL>import _ast<EOL>WARNING = <NUM_LIT:1><EOL>ERROR = <NUM_LIT:2><EOL>ret_val = []<EOL>code = request_data['<STR_LIT:code>']<EOL>path = request_data['<STR_LIT:path>']<EOL>encoding = request_data['<STR_LIT>']<EOL>if not encoding:<EOL><INDENT>encoding = '<STR_LIT:utf-8>'<EOL><DEDENT>if not path:<EOL><INDENT>path = os.path.join(tempfile.gettempdir(), '<STR_LIT>')<EOL><DEDENT>if not code:<EOL><INDENT>return []<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>tree = compile(code.encode(encoding), path, \"<STR_LIT>\",<EOL>_ast.PyCF_ONLY_AST)<EOL><DEDENT>except SyntaxError as value:<EOL><INDENT>msg = '<STR_LIT>' % value.args[<NUM_LIT:0>]<EOL>(lineno, offset, text) = value.lineno - <NUM_LIT:1>, value.offset, value.text<EOL>if text is None:<EOL><INDENT>_logger().warning(\"<STR_LIT>\",<EOL>path)<EOL><DEDENT>else:<EOL><INDENT>ret_val.append((msg, ERROR, lineno))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>w = checker.Checker(tree, os.path.split(path)[<NUM_LIT:1>])<EOL>w.messages.sort(key=lambda m: m.lineno)<EOL>for message in w.messages:<EOL><INDENT>msg = \"<STR_LIT>\" % str(message).split('<STR_LIT::>')[-<NUM_LIT:1>].strip()<EOL>line = message.lineno - <NUM_LIT:1><EOL>status = WARNINGif message.__class__ not in PYFLAKES_ERROR_MESSAGESelse ERROR<EOL>ret_val.append((msg, status, line))<EOL><DEDENT><DEDENT><DEDENT>prev_results = ret_val<EOL>return ret_val<EOL>", "docstring": "Worker that run a frosted (the fork of pyflakes) code analysis on the\ncurrent editor text.", "id": "f714:m7"}
{"signature": "@staticmethod<EOL><INDENT>def complete(code, line, column, path, encoding, prefix):<DEDENT>", "body": "ret_val = []<EOL>try:<EOL><INDENT>script = jedi.Script(code, line + <NUM_LIT:1>, column, path, encoding)<EOL>completions = script.completions()<EOL>print('<STR_LIT>' % completions)<EOL><DEDENT>except jedi.NotFoundError:<EOL><INDENT>completions = []<EOL><DEDENT>for completion in completions:<EOL><INDENT>ret_val.append({<EOL>'<STR_LIT:name>': completion.name,<EOL>'<STR_LIT>': icon_from_typename(<EOL>completion.name, completion.type),<EOL>'<STR_LIT>': completion.description})<EOL><DEDENT>return ret_val<EOL>", "docstring": "Completes python code using `jedi`_.\n\n:returns: a list of completion.", "id": "f714:c0:m0"}
{"signature": "def calltips(request_data):", "body": "code = request_data['<STR_LIT:code>']<EOL>line = request_data['<STR_LIT>'] + <NUM_LIT:1><EOL>column = request_data['<STR_LIT>']<EOL>path = request_data['<STR_LIT:path>']<EOL>encoding = '<STR_LIT:utf-8>'<EOL>script = jedi.Script(code, line, column, path, encoding)<EOL>signatures = script.call_signatures()<EOL>for sig in signatures:<EOL><INDENT>results = (str(sig.module_name), str(sig.name),<EOL>[p.description for p in sig.params], sig.index,<EOL>sig.bracket_start, column)<EOL>return results<EOL><DEDENT>return []<EOL>", "docstring": "Worker that returns a list of calltips.\n\nA calltips is a tuple made of the following parts:\n  - module_name: name of the module of the function invoked\n  - call_name: name of the function that is being called\n  - params: the list of parameter names.\n  - index: index of the current parameter\n  - bracket_start\n\n:returns tuple(module_name, call_name, params)", "id": "f714:m1"}
{"signature": "def acquire(self):", "body": "try:<EOL><INDENT>pidfile = open(self._pidfile, \"<STR_LIT:a>\")<EOL><DEDENT>except IOError as err:<EOL><INDENT>raise SystemExit(err)<EOL><DEDENT>try:<EOL><INDENT>fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)<EOL><DEDENT>except IOError:<EOL><INDENT>raise SystemExit('<STR_LIT>' + self._pidfile)<EOL><DEDENT>pidfile.seek(<NUM_LIT:0>)<EOL>pidfile.truncate()<EOL>pidfile.write(str(os.getpid()) + '<STR_LIT:\\n>')<EOL>pidfile.flush()<EOL>self.pidfile = pidfile<EOL>atexit.register(self.release)<EOL>", "docstring": "Acquire the pidfile.\n\n        Create the pidfile, lock it, write the pid into it\n        and register the release with atexit.\n\n\n        :return: None\n        :raise: SystemExit", "id": "f731:c0:m3"}
{"signature": "@property<EOL><INDENT>def _files_preserve(self):<DEDENT>", "body": "result = set()<EOL>files = [] if not self.files_preserve else self.files_preserve<EOL>files.extend([self.stdin, self.stdout, self.stderr])<EOL>for item in files:<EOL><INDENT>if hasattr(item, '<STR_LIT>'):<EOL><INDENT>result.add(item.fileno())<EOL><DEDENT>if isinstance(item, int):<EOL><INDENT>result.add(item)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "create a set of protected files\n\n        create a set of files, based on self.files_preserve and\n        self.stdin, self,stdout and self.stderr, that should not get\n        closed while daemonizing.\n\n        :return: set", "id": "f733:c1:m4"}
{"signature": "def parent_is_inet():", "body": "result = False<EOL>sock = socket.fromfd(<EOL>sys.__stdin__.fileno(),<EOL>socket.AF_INET,<EOL>socket.SOCK_RAW)<EOL>try:<EOL><INDENT>sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)<EOL>result = True<EOL><DEDENT>except (OSError, socket.error) as err:<EOL><INDENT>if not err.args[<NUM_LIT:0>] == errno.ENOTSOCK:<EOL><INDENT>result = True<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Check if parent is inet\n\n    Check if our parent seems ot be a superserver, aka inetd/xinetd.\n\n    This is done by checking if sys.__stdin__ is a network socket.\n\n    :return: bool", "id": "f733:m3"}
{"signature": "@property<EOL><INDENT>def _signal_handler_map(self):<DEDENT>", "body": "result = {}<EOL>for signum, handler in self.signal_map.items():<EOL><INDENT>result[signum] = self._get_signal_handler(handler)<EOL><DEDENT>return result<EOL>", "docstring": "Create the signal handler map\n\n        create a dictionary with signal:handler mapping based on\n        self.signal_map\n\n        :return: dict", "id": "f733:c1:m5"}
{"signature": "def open(self):", "body": "if self.is_open:<EOL><INDENT>return<EOL><DEDENT>try:<EOL><INDENT>os.chdir(self.working_directory)<EOL>if self.chroot_directory:<EOL><INDENT>os.chroot(self.chroot_directory)<EOL><DEDENT>os.setgid(self.gid)<EOL>os.setuid(self.uid)<EOL>os.umask(self.umask)<EOL><DEDENT>except OSError as err:<EOL><INDENT>raise DaemonError('<STR_LIT>'<EOL>.format(err))<EOL><DEDENT>if self.prevent_core:<EOL><INDENT>try:<EOL><INDENT>resource.setrlimit(resource.RLIMIT_CORE, (<NUM_LIT:0>, <NUM_LIT:0>))<EOL><DEDENT>except Exception as err:<EOL><INDENT>raise DaemonError('<STR_LIT>'<EOL>.format(err))<EOL><DEDENT><DEDENT>if self.detach_process:<EOL><INDENT>try:<EOL><INDENT>if os.fork() > <NUM_LIT:0>:<EOL><INDENT>os._exit(<NUM_LIT:0>)<EOL><DEDENT><DEDENT>except OSError as err:<EOL><INDENT>raise DaemonError('<STR_LIT>'.format(err))<EOL><DEDENT>os.setsid()<EOL>try:<EOL><INDENT>if os.fork() > <NUM_LIT:0>:<EOL><INDENT>os._exit(<NUM_LIT:0>)<EOL><DEDENT><DEDENT>except OSError as err:<EOL><INDENT>raise DaemonError('<STR_LIT>'.format(err))<EOL><DEDENT><DEDENT>for (signal_number, handler) in self._signal_handler_map.items():<EOL><INDENT>signal.signal(signal_number, handler)<EOL><DEDENT>close_filenos(self._files_preserve)<EOL>redirect_stream(sys.stdin, self.stdin)<EOL>redirect_stream(sys.stdout, self.stdout)<EOL>redirect_stream(sys.stderr, self.stderr)<EOL>if self.pidfile:<EOL><INDENT>self.pidfile.acquire()<EOL><DEDENT>self._is_open = True<EOL>", "docstring": "Daemonize this process\n\n        Do everything that is needed to become a Unix daemon.\n\n        :return: None\n        :raise: DaemonError", "id": "f733:c1:m10"}
{"signature": "def parent_is_init():", "body": "if os.getppid() == <NUM_LIT:1>:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Check if parent is Init\n\n    Check if the parent process is init, or something else that\n    owns PID 1.\n\n    :return: bool", "id": "f733:m2"}
{"signature": "def close_filenos(preserve):", "body": "maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[<NUM_LIT:1>]<EOL>if maxfd == resource.RLIM_INFINITY:<EOL><INDENT>maxfd = <NUM_LIT><EOL><DEDENT>for fileno in range(maxfd):<EOL><INDENT>if fileno not in preserve:<EOL><INDENT>try:<EOL><INDENT>os.close(fileno)<EOL><DEDENT>except OSError as err:<EOL><INDENT>if not err.errno == errno.EBADF:<EOL><INDENT>raise DaemonError(<EOL>'<STR_LIT>'<EOL>.format(fileno, err))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Close unprotected file descriptors\n\n    Close all open file descriptors that are not in preserve.\n\n    If ulimit -nofile is \"unlimited\", all is defined filenos <= 4096,\n    else all is <= the output of resource.getrlimit().\n\n    :param preserve: set with protected files\n    :type preserve: set\n\n    :return: None", "id": "f733:m0"}
{"signature": "def __exit__(self, exc_type, exc_val, exc_tb):", "body": "self.close()<EOL>", "docstring": "Context Handler, wrapping self.close()\n\n        :return: None", "id": "f733:c1:m2"}
{"signature": "@property<EOL><INDENT>def is_open(self):<DEDENT>", "body": "return self._is_open<EOL>", "docstring": "True when this instances open method was called\n\n        :return: bool", "id": "f733:c1:m8"}
{"signature": "def __init__(<EOL>self, chroot_directory=None, working_directory='<STR_LIT:/>',<EOL>umask=<NUM_LIT:0>, uid=None, gid=None, prevent_core=True,<EOL>detach_process=None, files_preserve=None, pidfile=None,<EOL>stdin=None, stdout=None, stderr=None, signal_map=None):", "body": "self._is_open = False<EOL>self._working_directory = None<EOL>self.chroot_directory = chroot_directory<EOL>self.umask = umask<EOL>self.uid = uid if uid else os.getuid()<EOL>self.gid = gid if gid else os.getgid()<EOL>if detach_process is None:<EOL><INDENT>self.detach_process = detach_required()<EOL><DEDENT>else:<EOL><INDENT>self.detach_process = detach_process<EOL><DEDENT>self.signal_map = signal_map if signal_map else default_signal_map()<EOL>self.files_preserve = files_preserve<EOL>self.pidfile = pidfile<EOL>self.prevent_core = prevent_core<EOL>self.stdin = stdin<EOL>self.stdout = stdout<EOL>self.stderr = stderr<EOL>self.working_directory = working_directory<EOL>", "docstring": "Initialize a new Instance", "id": "f733:c1:m0"}
{"signature": "def default_signal_map():", "body": "name_map = {<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': '<STR_LIT>'}<EOL>signal_map = {}<EOL>for name, target in name_map.items():<EOL><INDENT>if hasattr(signal, name):<EOL><INDENT>signal_map[getattr(signal, name)] = target<EOL><DEDENT><DEDENT>return signal_map<EOL>", "docstring": "Create the default signal map for this system.\n\n    :return: dict", "id": "f733:m1"}
{"signature": "def redirect_stream(system, target):", "body": "if target is None:<EOL><INDENT>target_fd = os.open(os.devnull, os.O_RDWR)<EOL><DEDENT>else:<EOL><INDENT>target_fd = target.fileno()<EOL><DEDENT>try:<EOL><INDENT>os.dup2(target_fd, system.fileno())<EOL><DEDENT>except OSError as err:<EOL><INDENT>raise DaemonError('<STR_LIT>'<EOL>.format(system, target, err))<EOL><DEDENT>", "docstring": "Redirect Unix streams\n\n    If None, redirect Stream to /dev/null, else redirect to target.\n\n    :param system: ether sys.stdin, sys.stdout, or sys.stderr\n    :type system: file object\n\n    :param target: File like object, or None\n    :type target: None, File Object\n\n    :return: None\n    :raise: DaemonError", "id": "f733:m5"}
{"signature": "@working_directory.setter<EOL><INDENT>def working_directory(self, value):<DEDENT>", "body": "self._working_directory = value<EOL>", "docstring": "Set working directory\n\n        New value is ignored if already daemonized.\n\n        :param value: str\n        :return:", "id": "f733:c1:m7"}
{"signature": "def create_and_load(self):", "body": "create_db_and_user()<EOL>initialize_database()<EOL>populate_database(self.daily_files, self.quarterly_files)<EOL>", "docstring": "Use this to create a user, a database, and load the database with files.\nIt will take a while to run and will only work if your network allows FTP\nfile transfer.  It also requires you to have a postgres server running locally.", "id": "f737:c0:m1"}
{"signature": "def retrieve_document(file_path, directory='<STR_LIT>'):", "body": "ftp = FTP('<STR_LIT>', timeout=None)<EOL>ftp.login()<EOL>name = file_path.replace('<STR_LIT:/>', '<STR_LIT:_>')<EOL>if not os.path.exists(directory):<EOL><INDENT>os.makedirs(directory)<EOL><DEDENT>with tempfile.TemporaryFile() as temp:<EOL><INDENT>ftp.retrbinary('<STR_LIT>' % file_path, temp.write)<EOL>temp.seek(<NUM_LIT:0>)<EOL>with open('<STR_LIT>'.format(directory, name), '<STR_LIT>') as f:<EOL><INDENT>f.write(temp.read().decode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT>f.closed<EOL>records = temp<EOL>retry = False<EOL><DEDENT>ftp.close()<EOL>", "docstring": "This function takes a file path beginning with edgar and stores the form in a directory.\nThe default directory is sec_filings but can be changed through a keyword argument.", "id": "f740:m14"}
{"signature": "def generate(grammar=None, num=<NUM_LIT:1>, output=sys.stdout, max_recursion=<NUM_LIT:10>, seed=None):", "body": "if seed is not None:<EOL><INDENT>gramfuzz.rand.seed(seed)<EOL><DEDENT>fuzzer = gramfuzz.GramFuzzer()<EOL>fuzzer.load_grammar(grammar)<EOL>cat_group = os.path.basename(grammar).replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>results = fuzzer.gen(cat_group=cat_group, num=num, max_recursion=max_recursion)<EOL>for res in results:<EOL><INDENT>output.write(res)<EOL><DEDENT>", "docstring": "Load and generate ``num`` number of top-level rules from the specified grammar.\n\n    :param list grammar: The grammar file to load and generate data from\n    :param int num: The number of times to generate data\n    :param output: The output destination (an open, writable stream-type object. default=``sys.stdout``)\n    :param int max_recursion: The maximum reference-recursion when generating data (default=``10``)\n    :param int seed: The seed to initialize the PRNG with. If None, will not initialize it.", "id": "f748:m0"}
{"signature": "def make_present_participles(verbs):", "body": "res = []<EOL>for verb in verbs:<EOL><INDENT>parts = verb.split()<EOL>if parts[<NUM_LIT:0>].endswith(\"<STR_LIT:e>\"):<EOL><INDENT>parts[<NUM_LIT:0>] = parts[<NUM_LIT:0>][:-<NUM_LIT:1>] + \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>parts[<NUM_LIT:0>] = parts[<NUM_LIT:0>] + \"<STR_LIT>\"<EOL><DEDENT>res.append(\"<STR_LIT:U+0020>\".join(parts))<EOL><DEDENT>return res<EOL>", "docstring": "Make the list of verbs into present participles\n\n    E.g.:\n\n        empower -> empowering\n        drive -> driving", "id": "f751:m1"}
{"signature": "def __init__(self, refname, **kwargs):", "body": "self.refname = refname<EOL>self.cat = kwargs.setdefault(\"<STR_LIT>\", self.cat)<EOL>self.failsafe = kwargs.setdefault(\"<STR_LIT>\", self.failsafe)<EOL>self.fuzzer = GramFuzzer.instance()<EOL>", "docstring": "Create a new ``Ref`` instance\n\n        :param str refname: The name of the rule to reference\n        :param str cat: The name of the category the rule is defined in", "id": "f756:c13:m0"}
{"signature": "def __init__(self, *values, **kwargs):", "body": "<EOL>self.shortest_vals = None<EOL>self.values = list(values)<EOL>if \"<STR_LIT>\" in kwargs and len(values) == <NUM_LIT:0>:<EOL><INDENT>self.values = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>self.rolling = kwargs.setdefault(\"<STR_LIT>\", False)<EOL>", "docstring": "Create a new ``Or`` instance with the provide values\n\n        :param list values: The list of values to choose randomly from", "id": "f756:c10:m0"}
{"signature": "def __init__(self, value=None, **kwargs):", "body": "super(String, self).__init__(value, **kwargs)<EOL>self.charset = kwargs.setdefault(\"<STR_LIT>\", self.charset)<EOL>", "docstring": "Create a new instance of the ``String`` field.\n\n        :param value: The hard-coded value of the String field\n        :param int min: The minimum size of the String when built\n        :param int max: The maximum size of the String when built\n        :param str charset: The character-set to be used when building the string", "id": "f756:c6:m0"}
{"signature": "def __init__(self, name, *values, **options):", "body": "self.name = name<EOL>self.options = options<EOL>self.values = list(values)<EOL>self.sep = self.options.setdefault(\"<STR_LIT>\", self.sep)<EOL>self.cat = self.options.setdefault(\"<STR_LIT>\", self.cat)<EOL>self.no_prune = self.options.setdefault(\"<STR_LIT>\", self.no_prune)<EOL>self.fuzzer = GramFuzzer.instance()<EOL>frame,mod_path,_,_,_,_ = inspect.stack()[<NUM_LIT:1>]<EOL>module_name = os.path.basename(mod_path).replace(\"<STR_LIT>\", \"<STR_LIT>\").replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>if \"<STR_LIT>\" in frame.f_locals:<EOL><INDENT>self.fuzzer.cat_group_defaults[module_name] = frame.f_locals[\"<STR_LIT>\"]<EOL><DEDENT>self.fuzzer.add_definition(self.cat, self.name, self, no_prune=self.no_prune, gram_file=module_name)<EOL>", "docstring": "Create a new rule definition. Simply instantiating a new rule definition\n        will add it to the current ``GramFuzzer`` instance.\n\n        :param str name: The name of the rule being defined\n        :param list values: The list of values that define the value of the rule\n            (will be concatenated when built)\n        :param str cat: The category to create the rule in (default=``\"default\"``).\n        :param bool no_prune: If this rule should not be pruned *EVEN IF* it is found to be\n            unreachable (default=``False``)", "id": "f756:c12:m0"}
{"signature": "def __init__(self, *values, **kwargs):", "body": "super(Q, self).__init__(*values, **kwargs)<EOL>self.escape = kwargs.setdefault(\"<STR_LIT>\", self.escape)<EOL>self.html_js_escape = kwargs.setdefault(\"<STR_LIT>\", self.html_js_escape)<EOL>self.quote = kwargs.setdefault(\"<STR_LIT>\", self.quote)<EOL>", "docstring": "Create the new ``Quote`` instance\n\n        :param bool escape: Whether or not quoted data should be escaped (default=``False``)\n        :param bool html_js_escape: Whether or not quoted data should be html-javascript escaped (default=``False``)\n        :param str quote: The quote character to be used if ``escape`` and ``html_js_escape`` are ``False``", "id": "f756:c9:m0"}
{"signature": "def _odds_val(self):", "body": "if len(self.odds) == <NUM_LIT:0>:<EOL><INDENT>self.odds = [(<NUM_LIT>, [self.min, self.max])]<EOL><DEDENT>rand_val = rand.random()<EOL>total = <NUM_LIT:0><EOL>for percent,v in self.odds:<EOL><INDENT>if total <= rand_val < total+percent:<EOL><INDENT>found_v = v<EOL>break<EOL><DEDENT>total += percent<EOL><DEDENT>res = None<EOL>if isinstance(v, (tuple,list)):<EOL><INDENT>rand_func = rand.randfloat if type(v[<NUM_LIT:0>]) is float else rand.randint<EOL>if len(v) == <NUM_LIT:2>:<EOL><INDENT>res = rand_func(v[<NUM_LIT:0>], v[<NUM_LIT:1>])<EOL><DEDENT>elif len(v) == <NUM_LIT:1>:<EOL><INDENT>res = v[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>res = v<EOL><DEDENT>return res<EOL>", "docstring": "Determine a new random value derived from the\n        defined :any:`gramfuzz.fields.Field.odds` value.\n\n        :returns: The derived value", "id": "f756:c1:m2"}
{"signature": "def seed(val):", "body": "RANDOM.seed(val)<EOL>", "docstring": "Set the seed for any subsequent random values/choices\n\n    :param val: The random seed value", "id": "f757:m0"}
{"signature": "def randint(a, b=None):", "body": "<EOL>if b is None:<EOL><INDENT>return _randint(<NUM_LIT:0>, a-<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>return _randint(a, b-<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Return a random integer\n\n    :param int a: Either the minimum value (inclusive) if ``b`` is set, or\n    the maximum value if ``b`` is not set (non-inclusive, in which case the minimum\n    is implicitly 0)\n    :param int b: The maximum value to generate (non-inclusive)\n    :returns: int", "id": "f757:m1"}
{"signature": "@classmethod<EOL><INDENT>def instance(cls):<DEDENT>", "body": "if cls.__instance__ is None:<EOL><INDENT>cls()<EOL><DEDENT>return cls.__instance__<EOL>", "docstring": "Return the singleton instance of the ``GramFuzzer``", "id": "f758:c0:m0"}
{"signature": "def add_to_cat_group(self, cat, cat_group, def_name):", "body": "self.cat_groups.setdefault(cat, {}).setdefault(cat_group, deque()).append(def_name)<EOL>", "docstring": "Associate the provided rule definition name ``def_name`` with the\n        category group ``cat_group`` in the category ``cat``.\n\n        :param str cat: The category the rule definition was declared in\n        :param str cat_group: The group within the category the rule belongs to\n        :param str def_name: The name of the rule definition", "id": "f758:c0:m12"}
{"signature": "def preprocess_rules(self):", "body": "to_prune = self._find_shortest_paths()<EOL>self._prune_rules(to_prune)<EOL>self._rules_processed = True<EOL>", "docstring": "Calculate shortest reference-paths of each rule (and Or field),\n        and prune all unreachable rules.", "id": "f758:c0:m4"}
{"signature": "def set_max_recursion(self, level):", "body": "import gramfuzz.fields<EOL>gramfuzz.fields.Ref.max_recursion = level<EOL>", "docstring": "Set the maximum reference-recursion depth (not the Python system maximum stack\n        recursion level). This controls how many levels deep of nested references are allowed\n        before gramfuzz attempts to generate the shortest (reference-wise) rules possible.\n\n        :param int level: The new maximum reference level", "id": "f758:c0:m3"}
{"signature": "def load_grammar(self, path):", "body": "if not os.path.exists(path):<EOL><INDENT>raise Exception(\"<STR_LIT>\".format(path))<EOL><DEDENT>grammar_path = os.path.dirname(path)<EOL>if grammar_path not in sys.path:<EOL><INDENT>sys.path.append(grammar_path)<EOL><DEDENT>with open(path, \"<STR_LIT:r>\") as f:<EOL><INDENT>data = f.read()<EOL><DEDENT>code = compile(data, path, \"<STR_LIT>\")<EOL>locals_ = {\"<STR_LIT>\": self, \"<STR_LIT>\": path}<EOL>exec(code, locals_)<EOL>if \"<STR_LIT>\" in locals_:<EOL><INDENT>cat_group = os.path.basename(path).replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>self.set_cat_group_top_level_cat(cat_group, locals_[\"<STR_LIT>\"])<EOL><DEDENT>", "docstring": "Load a grammar file (python file containing grammar definitions) by\n        file path. When loaded, the global variable ``GRAMFUZZER`` will be set\n        within the module. This is not always needed, but can be useful.\n\n        :param str path: The path to the grammar file", "id": "f758:c0:m2"}
{"signature": "def post_revert(self, cat, res, total_num, num, info):", "body": "if self._staged_defs is None:<EOL><INDENT>return<EOL><DEDENT>for cat,def_name,def_value in self._staged_defs:<EOL><INDENT>self.defs.setdefault(cat, {}).setdefault(def_name, deque()).append(def_value)<EOL><DEDENT>self._staged_defs = None<EOL>", "docstring": "Commit any staged rule definition changes (rule generation went\n        smoothly).", "id": "f758:c0:m16"}
{"signature": "def revert(self, info=None):", "body": "self._staged_defs = None<EOL>", "docstring": "Revert after a single def errored during generate (throw away all\n        staged rule definition changes)", "id": "f758:c0:m17"}
{"signature": "@property<EOL><INDENT>def safe(self):<DEDENT>", "body": "return self._dumper_class is yaml.SafeDumper<EOL>", "docstring": "Returns ``True`` if the safe mode is being used with (de)serialization.", "id": "f767:c0:m4"}
{"signature": "def default_decoder(self, obj):", "body": "typename, marshalled_state = self.unwrap_callback(obj)<EOL>if typename is None:<EOL><INDENT>return obj<EOL><DEDENT>try:<EOL><INDENT>cls, unmarshaller = self.serializer.unmarshallers[typename]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise LookupError('<STR_LIT>'.format(typename)) from None<EOL><DEDENT>if cls is not None:<EOL><INDENT>instance = cls.__new__(cls)<EOL>unmarshaller(instance, marshalled_state)<EOL>return instance<EOL><DEDENT>else:<EOL><INDENT>return unmarshaller(marshalled_state)<EOL><DEDENT>", "docstring": "Handle a dict that might contain a wrapped state for a custom type.", "id": "f771:c0:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def serialize(self, obj) -> bytes:<DEDENT>", "body": "", "docstring": "Serialize a Python object into bytes.", "id": "f773:c0:m0"}
{"signature": "def register_custom_type(<EOL>self, cls: type, marshaller: Optional[Callable[[Any], Any]] = default_marshaller,<EOL>unmarshaller: Union[Callable[[Any, Any], None],<EOL>Callable[[Any], Any], None] = default_unmarshaller, *,<EOL>typename: str = None, wrap_state: bool = True) -> None:", "body": "assert check_argument_types()<EOL>typename = typename or qualified_name(cls)<EOL>if marshaller:<EOL><INDENT>self.marshallers[cls] = typename, marshaller, wrap_state<EOL>self.custom_type_codec.register_object_encoder_hook(self)<EOL><DEDENT>if unmarshaller and self.custom_type_codec is not None:<EOL><INDENT>target_cls = cls  <EOL>if len(signature(unmarshaller).parameters) == <NUM_LIT:1>:<EOL><INDENT>target_cls = None<EOL><DEDENT>self.unmarshallers[typename] = target_cls, unmarshaller<EOL>self.custom_type_codec.register_object_decoder_hook(self)<EOL><DEDENT>", "docstring": "Register a marshaller and/or unmarshaller for the given class.\n\nThe state object returned by the marshaller and passed to the unmarshaller can be any\nserializable type. Usually a dictionary mapping of attribute names to values is used.\n\n.. warning:: Registering marshallers/unmarshallers for any custom type will override any\n    serializer specific encoding/decoding hooks (respectively) already in place!\n\n:param cls: the class to register\n:param marshaller: a callable that takes the object to be marshalled as the argument and\n      returns a state object\n:param unmarshaller: a callable that either:\n\n    * takes an uninitialized instance of ``cls`` and its state object as arguments and\n      restores the state of the object\n    * takes a state object and returns a new instance of ``cls``\n:param typename: a unique identifier for the type (defaults to the ``module:varname``\n    reference to the class)\n:param wrap_state: ``True`` to wrap the marshalled state before serialization so that it\n    can be recognized later for unmarshalling, ``False`` to serialize it as is", "id": "f773:c1:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def deserialize(self, payload: bytes):<DEDENT>", "body": "", "docstring": "Deserialize bytes into a Python object.", "id": "f773:c0:m1"}
{"signature": "def get_response(word):", "body": "url = URL + \"<STR_LIT>\" + API_KEY + \"<STR_LIT>\" + word + \"<STR_LIT>\"<EOL>return requests.get(url)<EOL>", "docstring": "Fetch translate result from baidu api\n    Args:\n        word(str): query word\n    Returns:\n        (requests.models.Response): response object", "id": "f778:m0"}
{"signature": "@task<EOL>def travis_setpass():", "body": "print(\"<STR_LIT>\")<EOL>", "docstring": "Stores the PyPI password (encrypted) in the .travis.yml file.", "id": "f782:m2"}
{"signature": "def assert_equal_files(self, obtained_fn, expected_fn, fix_callback=lambda x:x, binary=False, encoding=None):", "body": "import os<EOL>from zerotk.easyfs import GetFileContents, GetFileLines<EOL>__tracebackhide__ = True<EOL>import io<EOL>def FindFile(filename):<EOL><INDENT>data_filename = self.get_filename(filename)<EOL>if os.path.isfile(data_filename):<EOL><INDENT>return data_filename<EOL><DEDENT>if os.path.isfile(filename):<EOL><INDENT>return filename<EOL><DEDENT>from ._exceptions import MultipleFilesNotFound<EOL>raise MultipleFilesNotFound([filename, data_filename])<EOL><DEDENT>obtained_fn = FindFile(obtained_fn)<EOL>expected_fn = FindFile(expected_fn)<EOL>if binary:<EOL><INDENT>obtained_lines = GetFileContents(obtained_fn, binary=True)<EOL>expected_lines = GetFileContents(expected_fn, binary=True)<EOL>assert obtained_lines == expected_lines<EOL><DEDENT>else:<EOL><INDENT>obtained_lines = fix_callback(GetFileLines(obtained_fn, encoding=encoding))<EOL>expected_lines = GetFileLines(expected_fn, encoding=encoding)<EOL>if obtained_lines != expected_lines:<EOL><INDENT>html_fn = os.path.splitext(obtained_fn)[<NUM_LIT:0>] + '<STR_LIT>'<EOL>html_diff = self._generate_html_diff(<EOL>expected_fn, expected_lines, obtained_fn, obtained_lines)<EOL>with io.open(html_fn, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(html_diff)<EOL><DEDENT>import difflib<EOL>diff = ['<STR_LIT>', obtained_fn, expected_fn]<EOL>diff += ['<STR_LIT>' % html_fn]<EOL>diff += difflib.context_diff(obtained_lines, expected_lines)<EOL>raise AssertionError('<STR_LIT:\\n>'.join(diff) + '<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>", "docstring": "Compare two files contents. If the files differ, show the diff and write a nice HTML\ndiff file into the data directory.\n\nSearches for the filenames both inside and outside the data directory (in that order).\n\n:param unicode obtained_fn: basename to obtained file into the data directory, or full path.\n\n:param unicode expected_fn: basename to expected file into the data directory, or full path.\n\n:param bool binary:\n    Thread both files as binary files.\n\n:param unicode encoding:\n    File's encoding. If not None, contents obtained from file will be decoded using this\n    `encoding`.\n\n:param callable fix_callback:\n    A callback to \"fix\" the contents of the obtained (first) file.\n    This callback receives a list of strings (lines) and must also return a list of lines,\n    changed as needed.\n    The resulting lines will be used to compare with the contents of expected_fn.\n\n:param bool binary:\n    .. seealso:: zerotk.easyfs.GetFileContents", "id": "f785:c0:m6"}
{"signature": "def get_data_dir(self):", "body": "return self._data_dir<EOL>", "docstring": ":rtype: unicode\n:returns:\n    Returns the absolute path to data-directory name to use, standardized by StandardizePath.\n\n@remarks:\n    This method triggers the data-directory creation.", "id": "f785:c0:m3"}
{"signature": "def _generate_html_diff(self, expected_fn, expected_lines, obtained_fn, obtained_lines):", "body": "import difflib<EOL>differ = difflib.HtmlDiff()<EOL>return differ.make_file(<EOL>fromlines=expected_lines,<EOL>fromdesc=expected_fn,<EOL>tolines=obtained_lines,<EOL>todesc=obtained_fn,<EOL>)<EOL>", "docstring": "Returns a nice side-by-side diff of the given files, as a string.", "id": "f785:c0:m7"}
{"signature": "def _CopyFileLocal(source_filename, target_filename, copy_symlink=True):", "body": "import shutil<EOL>try:<EOL><INDENT>dir_name = os.path.dirname(target_filename)<EOL>if dir_name and not os.path.isdir(dir_name):<EOL><INDENT>os.makedirs(dir_name)<EOL><DEDENT>if copy_symlink and IsLink(source_filename):<EOL><INDENT>if os.path.isfile(target_filename) or IsLink(target_filename):<EOL><INDENT>DeleteFile(target_filename)<EOL><DEDENT>source_filename = ReadLink(source_filename)<EOL>CreateLink(source_filename, target_filename)<EOL><DEDENT>else:<EOL><INDENT>if sys.platform == '<STR_LIT:win32>':<EOL><INDENT>while IsLink(source_filename):<EOL><INDENT>link = ReadLink(source_filename)<EOL>if os.path.isabs(link):<EOL><INDENT>source_filename = link<EOL><DEDENT>else:<EOL><INDENT>source_filename = os.path.join(os.path.dirname(source_filename), link)<EOL><DEDENT><DEDENT><DEDENT>shutil.copyfile(source_filename, target_filename)<EOL>shutil.copymode(source_filename, target_filename)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>reraise(e, '<STR_LIT>' % (source_filename, target_filename))<EOL><DEDENT>", "docstring": "Copy a file locally to a directory.\n\n:param unicode source_filename:\n    The filename to copy from.\n\n:param unicode target_filename:\n    The filename to copy to.\n\n:param bool copy_symlink:\n    If True and source_filename is a symlink, target_filename will also be created as\n    a symlink.\n\n    If False, the file being linked will be copied instead.", "id": "f788:m9"}
{"signature": "def CreateFile(filename, contents, eol_style=EOL_STYLE_NATIVE, create_dir=True, encoding=None, binary=False):", "body": "<EOL>if binary:<EOL><INDENT>if isinstance(contents, six.text_type):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if not isinstance(contents, six.text_type):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>contents = _HandleContentsEol(contents, eol_style)<EOL>encoding = encoding or sys.getfilesystemencoding()<EOL>contents = contents.encode(encoding)<EOL>binary = True<EOL><DEDENT>if create_dir:<EOL><INDENT>dirname = os.path.dirname(filename)<EOL>if dirname:<EOL><INDENT>CreateDirectory(dirname)<EOL><DEDENT><DEDENT>from six.moves.urllib.parse import urlparse<EOL>filename_url = urlparse(filename)<EOL>if _UrlIsLocal(filename_url):<EOL><INDENT>with open(filename, '<STR_LIT:wb>') as oss:<EOL><INDENT>oss.write(contents)<EOL><DEDENT><DEDENT>elif filename_url.scheme == '<STR_LIT>':<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(directory_url.scheme)<EOL><DEDENT>else:<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(filename_url.scheme)<EOL><DEDENT>return filename<EOL>", "docstring": "Create a file with the given contents.\n\n:param unicode filename:\n    Filename and path to be created.\n\n:param unicode contents:\n    The file contents as a string.\n\n:type eol_style: EOL_STYLE_XXX constant\n:param eol_style:\n    Replaces the EOL by the appropriate EOL depending on the eol_style value.\n    Considers that all content is using only \"\\n\" as EOL.\n\n:param bool create_dir:\n    If True, also creates directories needed in filename's path\n\n:param unicode encoding:\n    Target file's content encoding. Defaults to sys.getfilesystemencoding()\n    Ignored if `binary` = True\n\n:param bool binary:\n    If True, file is created in binary mode. In this case, `contents` must be `bytes` and not\n    `unicode`\n\n:return unicode:\n    Returns the name of the file created.\n\n:raises NotImplementedProtocol:\n    If file protocol is not local or FTP\n\n:raises ValueError:\n    If trying to mix unicode `contents` without `encoding`, or `encoding` without\n    unicode `contents`\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m27"}
{"signature": "def CreateMD5(source_filename, target_filename=None):", "body": "if target_filename is None:<EOL><INDENT>target_filename = source_filename + '<STR_LIT>'<EOL><DEDENT>from six.moves.urllib.parse import urlparse<EOL>source_url = urlparse(source_filename)<EOL>if _UrlIsLocal(source_url):<EOL><INDENT>md5_contents = Md5Hex(filename=source_filename)<EOL><DEDENT>else:<EOL><INDENT>md5_contents = Md5Hex(contents=GetFileContents(source_filename, binary=True))<EOL><DEDENT>CreateFile(target_filename, md5_contents)<EOL>", "docstring": "Creates a md5 file from a source file (contents are the md5 hash of source file)\n\n:param unicode source_filename:\n    Path to source file\n\n:type target_filename: unicode or None\n:param target_filename:\n    Name of the target file with the md5 contents\n\n    If None, defaults to source_filename + '.md5'", "id": "f788:m6"}
{"signature": "def MoveDirectory(source_dir, target_dir):", "body": "if not IsDir(source_dir):<EOL><INDENT>from ._exceptions import DirectoryNotFoundError<EOL>raise DirectoryNotFoundError(source_dir)<EOL><DEDENT>if Exists(target_dir):<EOL><INDENT>from ._exceptions import DirectoryAlreadyExistsError<EOL>raise DirectoryAlreadyExistsError(target_dir)<EOL><DEDENT>from six.moves.urllib.parse import urlparse<EOL>source_url = urlparse(source_dir)<EOL>target_url = urlparse(target_dir)<EOL>if _UrlIsLocal(source_url) and _UrlIsLocal(target_url):<EOL><INDENT>import shutil<EOL>shutil.move(source_dir, target_dir)<EOL><DEDENT>elif source_url.scheme == '<STR_LIT>' and target_url.scheme == '<STR_LIT>':<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(target_url.scheme)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Moves a directory.\n\n:param unicode source_dir:\n\n:param unicode target_dir:\n\n:raises NotImplementedError:\n    If trying to move anything other than:\n        Local dir -> local dir\n        FTP dir -> FTP dir (same host)", "id": "f788:m20"}
{"signature": "def CreateDirectory(directory):", "body": "from six.moves.urllib.parse import urlparse<EOL>directory_url = urlparse(directory)<EOL>if _UrlIsLocal(directory_url):<EOL><INDENT>if not os.path.exists(directory):<EOL><INDENT>os.makedirs(directory)<EOL><DEDENT>return directory<EOL><DEDENT>elif directory_url.scheme == '<STR_LIT>':<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(directory_url.scheme)<EOL><DEDENT>else:<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(directory_url.scheme)<EOL><DEDENT>", "docstring": "Create directory including any missing intermediate directory.\n\n:param unicode directory:\n\n:return unicode|urlparse.ParseResult:\n    Returns the created directory or url (see urlparse).\n\n:raises NotImplementedProtocol:\n    If protocol is not local or FTP.\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m29"}
{"signature": "def IsLink(path):", "body": "_AssertIsLocal(path)<EOL>if sys.platform != '<STR_LIT:win32>':<EOL><INDENT>return os.path.islink(path)<EOL><DEDENT>import jaraco.windows.filesystem<EOL>return jaraco.windows.filesystem.islink(path)<EOL>", "docstring": ":param unicode path:\n    Path being tested\n\n:returns bool:\n    True if `path` is a link", "id": "f788:m35"}
{"signature": "def CopyFiles(source_dir, target_dir, create_target_dir=False, md5_check=False):", "body": "import fnmatch<EOL>if IsDir(source_dir):<EOL><INDENT>source_mask = '<STR_LIT:*>'<EOL><DEDENT>else:<EOL><INDENT>source_dir, source_mask = os.path.split(source_dir)<EOL><DEDENT>if not IsDir(target_dir):<EOL><INDENT>if create_target_dir:<EOL><INDENT>CreateDirectory(target_dir)<EOL><DEDENT>else:<EOL><INDENT>from ._exceptions import DirectoryNotFoundError<EOL>raise DirectoryNotFoundError(target_dir)<EOL><DEDENT><DEDENT>filenames = ListFiles(source_dir)<EOL>if filenames is None:<EOL><INDENT>return<EOL><DEDENT>for i_filename in filenames:<EOL><INDENT>if md5_check and i_filename.endswith('<STR_LIT>'):<EOL><INDENT>continue  <EOL><DEDENT>if fnmatch.fnmatch(i_filename, source_mask):<EOL><INDENT>source_path = source_dir + '<STR_LIT:/>' + i_filename<EOL>target_path = target_dir + '<STR_LIT:/>' + i_filename<EOL>if IsDir(source_path):<EOL><INDENT>CopyFiles(source_path, target_path, create_target_dir=True, md5_check=md5_check)<EOL><DEDENT>else:<EOL><INDENT>CopyFile(source_path, target_path, md5_check=md5_check)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Copy files from the given source to the target.\n\n:param unicode source_dir:\n    A filename, URL or a file mask.\n    Ex.\n        x:\\coilib50\n        x:\\coilib50\\*\n        http://server/directory/file\n        ftp://server/directory/file\n\n\n:param unicode target_dir:\n    A directory or an URL\n    Ex.\n        d:\\Temp\n        ftp://server/directory\n\n:param bool create_target_dir:\n    If True, creates the target path if it doesn't exists.\n\n:param bool md5_check:\n    .. seealso:: CopyFile\n\n:raises DirectoryNotFoundError:\n    If target_dir does not exist, and create_target_dir is False\n\n.. seealso:: CopyFile for documentation on accepted protocols\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m10"}
{"signature": "def CopyDirectory(source_dir, target_dir, override=False):", "body": "_AssertIsLocal(source_dir)<EOL>_AssertIsLocal(target_dir)<EOL>if override and IsDir(target_dir):<EOL><INDENT>DeleteDirectory(target_dir, skip_on_error=False)<EOL><DEDENT>import shutil<EOL>shutil.copytree(source_dir, target_dir)<EOL>", "docstring": "Recursively copy a directory tree.\n\n:param unicode source_dir:\n    Where files will come from\n\n:param unicode target_dir:\n    Where files will go to\n\n:param bool override:\n    If True and target_dir already exists, it will be deleted before copying.\n\n:raises NotImplementedForRemotePathError:\n    If trying to copy to/from remote directories", "id": "f788:m16"}
{"signature": "def MoveFile(source_filename, target_filename):", "body": "_AssertIsLocal(source_filename)<EOL>_AssertIsLocal(target_filename)<EOL>import shutil<EOL>shutil.move(source_filename, target_filename)<EOL>", "docstring": "Moves a file.\n\n:param unicode source_filename:\n\n:param unicode target_filename:\n\n:raises NotImplementedForRemotePathError:\n    If trying to operate with non-local files.", "id": "f788:m19"}
{"signature": "def CreateLink(target_path, link_path, override=True):", "body": "_AssertIsLocal(target_path)<EOL>_AssertIsLocal(link_path)<EOL>if override and IsLink(link_path):<EOL><INDENT>DeleteLink(link_path)<EOL><DEDENT>dirname = os.path.dirname(link_path)<EOL>if dirname:<EOL><INDENT>CreateDirectory(dirname)<EOL><DEDENT>if sys.platform != '<STR_LIT:win32>':<EOL><INDENT>return os.symlink(target_path, link_path)  <EOL><DEDENT>else:<EOL><INDENT>import jaraco.windows.filesystem<EOL>return jaraco.windows.filesystem.symlink(target_path, link_path)<EOL>from ._easyfs_win32 import CreateSymbolicLink<EOL>try:<EOL><INDENT>dw_flags = <NUM_LIT:0><EOL>if target_path and os.path.isdir(target_path):<EOL><INDENT>dw_flags = <NUM_LIT:1><EOL><DEDENT>return CreateSymbolicLink(target_path, link_path, dw_flags)<EOL><DEDENT>except Exception as e:<EOL><INDENT>reraise(e, '<STR_LIT>' % locals())<EOL><DEDENT><DEDENT>", "docstring": "Create a symbolic link at `link_path` pointing to `target_path`.\n\n:param unicode target_path:\n    Link target\n\n:param unicode link_path:\n    Fullpath to link name\n\n:param bool override:\n    If True and `link_path` already exists as a link, that link is overridden.", "id": "f788:m34"}
{"signature": "def _GetNativeEolStyle(platform=sys.platform):", "body": "_NATIVE_EOL_STYLE_MAP = {<EOL>'<STR_LIT:win32>' : EOL_STYLE_WINDOWS,<EOL>'<STR_LIT>' : EOL_STYLE_UNIX,<EOL>'<STR_LIT>' : EOL_STYLE_UNIX,<EOL>'<STR_LIT>' : EOL_STYLE_MAC,<EOL>}<EOL>result = _NATIVE_EOL_STYLE_MAP.get(platform)<EOL>if result is None:<EOL><INDENT>from ._exceptions import UnknownPlatformError<EOL>raise UnknownPlatformError(platform)<EOL><DEDENT>return result<EOL>", "docstring": "Internal function that determines EOL_STYLE_NATIVE constant with the proper value for the\ncurrent platform.", "id": "f788:m0"}
{"signature": "def _DoCopyFile(source_filename, target_filename, copy_symlink=True):", "body": "from six.moves.urllib.parse import urlparse<EOL>source_url = urlparse(source_filename)<EOL>target_url = urlparse(target_filename)<EOL>if _UrlIsLocal(source_url):<EOL><INDENT>if not Exists(source_filename):<EOL><INDENT>from ._exceptions import FileNotFoundError<EOL>raise FileNotFoundError(source_filename)<EOL><DEDENT>if _UrlIsLocal(target_url):<EOL><INDENT>_CopyFileLocal(source_filename, target_filename, copy_symlink=copy_symlink)<EOL><DEDENT>elif target_url.scheme in ['<STR_LIT>']:<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(target_url.scheme)<EOL><DEDENT>else:<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(target_url.scheme)<EOL><DEDENT><DEDENT>elif source_url.scheme in ['<STR_LIT:http>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if _UrlIsLocal(target_url):<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(target_url.scheme)<EOL><DEDENT>else:<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(target_url.scheme)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>from ._exceptions import NotImplementedProtocol  <EOL>raise NotImplementedProtocol(source_url.scheme)<EOL><DEDENT>", "docstring": ":param unicode source_filename:\n    The source filename.\n    Schemas: local, ftp, http\n\n:param unicode target_filename:\n    Target filename.\n    Schemas: local, ftp\n\n:param  copy_symlink:\n    @see _CopyFileLocal\n\n:raises FileNotFoundError:\n    If source_filename does not exist", "id": "f788:m8"}
{"signature": "def _HandleContentsEol(contents, eol_style):", "body": "if eol_style == EOL_STYLE_NONE:<EOL><INDENT>return contents<EOL><DEDENT>if eol_style == EOL_STYLE_UNIX:<EOL><INDENT>return contents.replace('<STR_LIT:\\r\\n>', eol_style).replace('<STR_LIT:\\r>', eol_style)<EOL><DEDENT>if eol_style == EOL_STYLE_MAC:<EOL><INDENT>return contents.replace('<STR_LIT:\\r\\n>', eol_style).replace('<STR_LIT:\\n>', eol_style)<EOL><DEDENT>if eol_style == EOL_STYLE_WINDOWS:<EOL><INDENT>return contents.replace('<STR_LIT:\\r\\n>', '<STR_LIT:\\n>').replace('<STR_LIT:\\r>', '<STR_LIT:\\n>').replace('<STR_LIT:\\n>', EOL_STYLE_WINDOWS)<EOL><DEDENT>raise ValueError('<STR_LIT>' % (eol_style,))<EOL>", "docstring": "Replaces eol on each line by the given eol_style.\n\n:param unicode contents:\n:type eol_style: EOL_STYLE_XXX constant\n:param eol_style:", "id": "f788:m39"}
{"signature": "def GetFileLines(filename, newline=None, encoding=None):", "body": "return GetFileContents(<EOL>filename,<EOL>binary=False,<EOL>encoding=encoding,<EOL>newline=newline,<EOL>).split('<STR_LIT:\\n>')<EOL>", "docstring": "Reads a file and returns its contents as a list of lines. Works for both local and remote files.\n\n:param unicode filename:\n\n:param None|''|'\\n'|'\\r'|'\\r\\n' newline:\n    Controls universal newlines.\n    See 'io.open' newline parameter documentation for more details.\n\n:param unicode encoding:\n    File's encoding. If not None, contents obtained from file will be decoded using this\n    `encoding`.\n\n:returns list(unicode):\n    The file's lines\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m22"}
{"signature": "def CheckIsFile(filename):", "body": "if not IsFile(filename):<EOL><INDENT>from ._exceptions import FileNotFoundError<EOL>raise FileNotFoundError(filename)<EOL><DEDENT>", "docstring": "Check if the given file exists.\n\n@filename: unicode\n    The filename to check for existence.\n\n@raise: FileNotFoundError\n    Raises if the file does not exist.", "id": "f788:m25"}
{"signature": "def ReplaceInFile(filename, old, new, encoding=None):", "body": "contents = GetFileContents(filename, encoding=encoding)<EOL>contents = contents.replace(old, new)<EOL>CreateFile(filename, contents, encoding=encoding)<EOL>return contents<EOL>", "docstring": "Replaces all occurrences of \"old\" by \"new\" in the given file.\n\n:param unicode filename:\n    The name of the file.\n\n:param unicode old:\n    The string to search for.\n\n:param unicode new:\n    Replacement string.\n\n:return unicode:\n    The new contents of the file.", "id": "f788:m28"}
{"signature": "def ListFiles(directory):", "body": "from six.moves.urllib.parse import urlparse<EOL>directory_url = urlparse(directory)<EOL>if _UrlIsLocal(directory_url):<EOL><INDENT>if not os.path.isdir(directory):<EOL><INDENT>return None<EOL><DEDENT>return os.listdir(directory)<EOL><DEDENT>elif directory_url.scheme == '<STR_LIT>':<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(directory_url.scheme)<EOL><DEDENT>else:<EOL><INDENT>from ._exceptions import NotImplementedProtocol<EOL>raise NotImplementedProtocol(directory_url.scheme)<EOL><DEDENT>", "docstring": "Lists the files in the given directory\n\n:type directory: unicode | unicode\n:param directory:\n    A directory or URL\n\n:rtype: list(unicode) | list(unicode)\n:returns:\n    List of filenames/directories found in the given directory.\n    Returns None if the given directory does not exists.\n\n    If `directory` is a unicode string, all files returned will also be unicode\n\n:raises NotImplementedProtocol:\n    If file protocol is not local or FTP\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m24"}
{"signature": "def GetFileContents(filename, binary=False, encoding=None, newline=None):", "body": "source_file = OpenFile(filename, binary=binary, encoding=encoding, newline=newline)<EOL>try:<EOL><INDENT>contents = source_file.read()<EOL><DEDENT>finally:<EOL><INDENT>source_file.close()<EOL><DEDENT>return contents<EOL>", "docstring": "Reads a file and returns its contents. Works for both local and remote files.\n\n:param unicode filename:\n\n:param bool binary:\n    If True returns the file as is, ignore any EOL conversion.\n\n:param unicode encoding:\n    File's encoding. If not None, contents obtained from file will be decoded using this\n    `encoding`.\n\n:param None|''|'\\n'|'\\r'|'\\r\\n' newline:\n    Controls universal newlines.\n    See 'io.open' newline parameter documentation for more details.\n\n:returns str|unicode:\n    The file's contents.\n    Returns unicode string when `encoding` is not None.\n\n.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information", "id": "f788:m21"}
{"signature": "def find_image(conn, name):", "body": "for item in conn.list_images()['<STR_LIT>']:<EOL><INDENT>if (item['<STR_LIT>']['<STR_LIT:location>'] == configuration.LOCATION and<EOL>item['<STR_LIT>']['<STR_LIT>'] == '<STR_LIT>' and<EOL>name in item['<STR_LIT>']['<STR_LIT:name>']):<EOL><INDENT>return item<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Find image by partial name and location.", "id": "f792:m1"}
{"signature": "def get_ipblock(self, ipblock_id):", "body": "response = self._perform_request('<STR_LIT>' % ipblock_id)<EOL>return response<EOL>", "docstring": "Retrieves a single IP block by ID.\n\n:param      ipblock_id: The unique ID of the IP block.\n:type       ipblock_id: ``str``", "id": "f811:c0:m21"}
{"signature": "def list_snapshots(self, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % str(depth))<EOL>return response<EOL>", "docstring": "Retrieves a list of snapshots available in the account.", "id": "f811:c0:m66"}
{"signature": "def get_attached_cdroms(self, datacenter_id, server_id, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id,<EOL>str(depth)))<EOL>return response<EOL>", "docstring": "Retrieves a list of CDROMs attached to the server.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m58"}
{"signature": "def get_attached_cdrom(self, datacenter_id, server_id, cdrom_id):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id,<EOL>cdrom_id))<EOL>return response<EOL>", "docstring": "Retrieves an attached CDROM.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      cdrom_id: The unique ID of the CDROM.\n:type       cdrom_id: ``str``", "id": "f811:c0:m59"}
{"signature": "def remove_group_user(self, group_id, user_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' % (group_id, user_id),<EOL>method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Removes a user from a group.\n\n:param      group_id: The unique ID of the group.\n:type       group_id: ``str``\n\n:param      user_id: The unique ID of the user.\n:type       user_id: ``str``", "id": "f811:c0:m89"}
{"signature": "def get_snapshot(self, snapshot_id):", "body": "response = self._perform_request('<STR_LIT>' % snapshot_id)<EOL>return response<EOL>", "docstring": "Retrieves a single snapshot by ID.\n\n:param      snapshot_id: The unique ID of the snapshot.\n:type       snapshot_id: ``str``", "id": "f811:c0:m65"}
{"signature": "def get_image(self, image_id):", "body": "response = self._perform_request('<STR_LIT>' % image_id)<EOL>return response<EOL>", "docstring": "Retrieves a single image by ID.\n\n:param      image_id: The unique ID of the image.\n:type       image_id: ``str``", "id": "f811:c0:m17"}
{"signature": "def get_group(self, group_id, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (group_id, str(depth)))<EOL>return response<EOL>", "docstring": "Retrieves a single group by ID.\n\n:param      group_id: The unique ID of the group.\n:type       group_id: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m73"}
{"signature": "def get_loadbalanced_nic(self, datacenter_id,<EOL>loadbalancer_id, nic_id, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>datacenter_id,<EOL>loadbalancer_id,<EOL>nic_id,<EOL>str(depth)))<EOL>return response<EOL>", "docstring": "Gets the properties of a load balanced NIC.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      loadbalancer_id: The unique ID of the load balancer.\n:type       loadbalancer_id: ``str``\n\n:param      nic_id: The unique ID of the NIC.\n:type       nic_id: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m38"}
{"signature": "def __init__(self, name=None, location=None, size=None):", "body": "self.name = name<EOL>self.location = location<EOL>self.size = size<EOL>", "docstring": "IPBlock class initializer.\n\n:param      name: The name of the IP block.\n:type       name: ``str``\n\n:param      location: The location for the IP block.\n:type       location: ``str``\n\n:param      size: The number of IPs in the block.\n:type       size: ``str``", "id": "f811:c3:m0"}
{"signature": "def remove_snapshot(self, snapshot_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' + snapshot_id, method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Removes a snapshot.\n\n:param      snapshot_id: The ID of the snapshot\n                         you wish to remove.\n:type       snapshot_id: ``str``", "id": "f811:c0:m71"}
{"signature": "def delete_datacenter(self, datacenter_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' % (datacenter_id),<EOL>method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Removes the data center and all its components such as servers, NICs,\nload balancers, volumes.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``", "id": "f811:c0:m9"}
{"signature": "def create_nic(self, datacenter_id, server_id, nic):", "body": "data = json.dumps(self._create_nic_dict(nic))<EOL>response = self._perform_request(<EOL>url='<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id),<EOL>method='<STR_LIT:POST>',<EOL>data=data)<EOL>return response<EOL>", "docstring": "Creates a NIC on the specified server.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      nic: A NIC dict.\n:type       nic: ``dict``", "id": "f811:c0:m45"}
{"signature": "def add_group_user(self, group_id, user_id):", "body": "data = {<EOL>\"<STR_LIT:id>\": user_id<EOL>}<EOL>response = self._perform_request(<EOL>url='<STR_LIT>' % group_id,<EOL>method='<STR_LIT:POST>',<EOL>data=json.dumps(data))<EOL>return response<EOL>", "docstring": "Adds an existing user to a group.\n\n:param      group_id: The unique ID of the group.\n:type       group_id: ``str``\n\n:param      user_id: The unique ID of the user.\n:type       user_id: ``str``", "id": "f811:c0:m88"}
{"signature": "def _read_config(self, filename=None):", "body": "if filename:<EOL><INDENT>self._config_filename = filename<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>import appdirs<EOL><DEDENT>except ImportError:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>self._config_filename = appdirs.user_config_dir(_LIBRARY_NAME, \"<STR_LIT>\") + \"<STR_LIT>\"<EOL><DEDENT>if not self._config:<EOL><INDENT>self._config = configparser.ConfigParser()<EOL>self._config.optionxform = str<EOL>self._config.read(self._config_filename)<EOL><DEDENT>", "docstring": "Read the user configuration", "id": "f811:c0:m1"}
{"signature": "def update_image(self, image_id, **kwargs):", "body": "data = {}<EOL>for attr, value in kwargs.items():<EOL><INDENT>data[self._underscore_to_camelcase(attr)] = value<EOL><DEDENT>response = self._perform_request(url='<STR_LIT>' + image_id,<EOL>method='<STR_LIT>',<EOL>data=json.dumps(data))<EOL>return response<EOL>", "docstring": "Replace all properties of an image.", "id": "f811:c0:m20"}
{"signature": "def create_user(self, user):", "body": "data = self._create_user_dict(user=user)<EOL>response = self._perform_request(<EOL>url='<STR_LIT>',<EOL>method='<STR_LIT:POST>',<EOL>data=json.dumps(data))<EOL>return response<EOL>", "docstring": "Creates a new user.\n\n:param      user: The user object to be created.\n:type       user: ``dict``", "id": "f811:c0:m84"}
{"signature": "def create_datacenter(self, datacenter):", "body": "server_items = []<EOL>volume_items = []<EOL>lan_items = []<EOL>loadbalancer_items = []<EOL>entities = dict()<EOL>properties = {<EOL>\"<STR_LIT:name>\": datacenter.name<EOL>}<EOL>if datacenter.location:<EOL><INDENT>properties['<STR_LIT:location>'] = datacenter.location<EOL><DEDENT>if datacenter.description:<EOL><INDENT>properties['<STR_LIT:description>'] = datacenter.description<EOL><DEDENT>if datacenter.servers:<EOL><INDENT>for server in datacenter.servers:<EOL><INDENT>server_items.append(self._create_server_dict(server))<EOL><DEDENT>servers = {<EOL>\"<STR_LIT>\": server_items<EOL>}<EOL>server_entities = {<EOL>\"<STR_LIT>\": servers<EOL>}<EOL>entities.update(server_entities)<EOL><DEDENT>if datacenter.volumes:<EOL><INDENT>for volume in datacenter.volumes:<EOL><INDENT>volume_items.append(self._create_volume_dict(volume))<EOL><DEDENT>volumes = {<EOL>\"<STR_LIT>\": volume_items<EOL>}<EOL>volume_entities = {<EOL>\"<STR_LIT>\": volumes<EOL>}<EOL>entities.update(volume_entities)<EOL><DEDENT>if datacenter.loadbalancers:<EOL><INDENT>for loadbalancer in datacenter.loadbalancers:<EOL><INDENT>loadbalancer_items.append(<EOL>self._create_loadbalancer_dict(<EOL>loadbalancer<EOL>)<EOL>)<EOL><DEDENT>loadbalancers = {<EOL>\"<STR_LIT>\": loadbalancer_items<EOL>}<EOL>loadbalancer_entities = {<EOL>\"<STR_LIT>\": loadbalancers<EOL>}<EOL>entities.update(loadbalancer_entities)<EOL><DEDENT>if datacenter.lans:<EOL><INDENT>for lan in datacenter.lans:<EOL><INDENT>lan_items.append(<EOL>self._create_lan_dict(lan)<EOL>)<EOL><DEDENT>lans = {<EOL>\"<STR_LIT>\": lan_items<EOL>}<EOL>lan_entities = {<EOL>\"<STR_LIT>\": lans<EOL>}<EOL>entities.update(lan_entities)<EOL><DEDENT>if not entities:<EOL><INDENT>raw = {<EOL>\"<STR_LIT>\": properties,<EOL>}<EOL><DEDENT>else:<EOL><INDENT>raw = {<EOL>\"<STR_LIT>\": properties,<EOL>\"<STR_LIT>\": entities<EOL>}<EOL><DEDENT>data = json.dumps(raw)<EOL>response = self._perform_request(<EOL>url='<STR_LIT>',<EOL>method='<STR_LIT:POST>',<EOL>data=data)<EOL>return response<EOL>", "docstring": "Creates a data center -- both simple and complex are supported.", "id": "f811:c0:m10"}
{"signature": "def attach_cdrom(self, datacenter_id, server_id, cdrom_id):", "body": "data = '<STR_LIT>' + cdrom_id + '<STR_LIT>'<EOL>response = self._perform_request(<EOL>url='<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id),<EOL>method='<STR_LIT:POST>',<EOL>data=data)<EOL>return response<EOL>", "docstring": "Attaches a CDROM to a server.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      cdrom_id: The unique ID of the CDROM.\n:type       cdrom_id: ``str``", "id": "f811:c0:m60"}
{"signature": "def delete_user(self, user_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' % user_id,<EOL>method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Removes a user.\n\n:param      user_id: The unique ID of the user.\n:type       user_id: ``str``", "id": "f811:c0:m86"}
{"signature": "def add_share(self, group_id, resource_id, **kwargs):", "body": "properties = {}<EOL>for attr, value in kwargs.items():<EOL><INDENT>properties[self._underscore_to_camelcase(attr)] = value<EOL><DEDENT>data = {<EOL>\"<STR_LIT>\": properties<EOL>}<EOL>response = self._perform_request(<EOL>url='<STR_LIT>' % (group_id, resource_id),<EOL>method='<STR_LIT:POST>',<EOL>data=json.dumps(data))<EOL>return response<EOL>", "docstring": "Shares a resource through a group.\n\n:param      group_id: The unique ID of the group.\n:type       group_id: ``str``\n\n:param      resource_id: The unique ID of the resource.\n:type       resource_id: ``str``", "id": "f811:c0:m79"}
{"signature": "def get_attached_volume(self, datacenter_id, server_id, volume_id):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id,<EOL>volume_id))<EOL>return response<EOL>", "docstring": "Retrieves volume information.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      volume_id: The unique ID of the volume.\n:type       volume_id: ``str``", "id": "f811:c0:m55"}
{"signature": "def __init__(self, name=None, ips=None,  <EOL>dhcp=None, lan=None, firewall_active=None,<EOL>firewall_rules=None, nat=None, **kwargs):", "body": "if firewall_rules is None:<EOL><INDENT>firewall_rules = []<EOL><DEDENT>self.name = name<EOL>self.nat = nat<EOL>self.ips = ips<EOL>self.dhcp = dhcp<EOL>self.lan = lan<EOL>self.firewall_active = firewall_active<EOL>self.firewall_rules = firewall_rules<EOL>", "docstring": "NIC class initializer.\n\n:param      name: The name of the NIC.\n:type       name: ``str``\n\n:param      ips: A list of IPs.\n:type       ips: ``list``\n\n:param      dhcp: Enable or disable DHCP. Default is enabled.\n:type       dhcp: ``bool``\n\n:param      lan: ID of the LAN in which the NIC should reside.\n:type       lan: ``str``\n\n:param      nat: Enable or disable NAT. Default is disabled.\n:type       nat: ``bool``\n\n:param      firewall_active: Turns the firewall on or off;\n                             default is disabled.\n:type       firewall_active: ``bool``\n\n:param      firewall_rules: List of firewall rule dicts.\n:type       firewall_rules: ``list``", "id": "f811:c6:m0"}
{"signature": "def get_resource(self, resource_type, resource_id, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>resource_type, resource_id, str(depth)))<EOL>return response<EOL>", "docstring": "Retrieves a single resource of a particular type.\n\n:param      resource_type: The resource type: datacenter, image,\n                           snapshot or ipblock.\n:type       resource_type: ``str``\n\n:param      resource_id: The unique ID of the resource.\n:type       resource_id: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m91"}
{"signature": "def create_lan(self, datacenter_id, lan):", "body": "data = json.dumps(self._create_lan_dict(lan))<EOL>response = self._perform_request(<EOL>url='<STR_LIT>' % datacenter_id,<EOL>method='<STR_LIT:POST>',<EOL>data=data)<EOL>return response<EOL>", "docstring": "Creates a LAN in the data center.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      lan: The LAN object to be created.\n:type       lan: ``dict``", "id": "f811:c0:m28"}
{"signature": "def list_loadbalancers(self, datacenter_id, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>datacenter_id, str(depth)))<EOL>return response<EOL>", "docstring": "Retrieves a list of load balancers in the data center.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m32"}
{"signature": "def detach_volume(self, datacenter_id, server_id, volume_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id,<EOL>volume_id),<EOL>method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Detaches a volume from a server.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      volume_id: The unique ID of the volume.\n:type       volume_id: ``str``", "id": "f811:c0:m57"}
{"signature": "def get_attached_volumes(self, datacenter_id, server_id, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id,<EOL>str(depth)))<EOL>return response<EOL>", "docstring": "Retrieves a list of volumes attached to the server.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m54"}
{"signature": "def list_ipblocks(self, depth=<NUM_LIT:1>):", "body": "response = self._perform_request('<STR_LIT>' % str(depth))<EOL>return response<EOL>", "docstring": "Retrieves a list of IP blocks available in the account.", "id": "f811:c0:m22"}
{"signature": "def create_snapshot(self, datacenter_id, volume_id,<EOL>name=None, description=None):", "body": "data = {'<STR_LIT:name>': name, '<STR_LIT:description>': description}<EOL>response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>datacenter_id, volume_id),<EOL>method='<STR_LIT>',<EOL>data=urlencode(data))<EOL>return response<EOL>", "docstring": "Creates a snapshot of the specified volume.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      volume_id: The unique ID of the volume.\n:type       volume_id: ``str``\n\n:param      name: The name given to the volume.\n:type       name: ``str``\n\n:param      description: The description given to the volume.\n:type       description: ``str``", "id": "f811:c0:m69"}
{"signature": "def delete_group(self, group_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' % group_id,<EOL>method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Removes a group.\n\n:param      group_id: The unique ID of the group.\n:type       group_id: ``str``", "id": "f811:c0:m76"}
{"signature": "def start_server(self, datacenter_id, server_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id),<EOL>method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Starts the server.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``", "id": "f811:c0:m62"}
{"signature": "def remove_loadbalanced_nic(self, datacenter_id,<EOL>loadbalancer_id, nic_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' % (<EOL>datacenter_id,<EOL>loadbalancer_id,<EOL>nic_id),<EOL>method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Removes a NIC from the load balancer.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      loadbalancer_id: The unique ID of the load balancer.\n:type       loadbalancer_id: ``str``\n\n:param      nic_id: The unique ID of the NIC.\n:type       nic_id: ``str``", "id": "f811:c0:m39"}
{"signature": "def list_resources(self, resource_type=None, depth=<NUM_LIT:1>):", "body": "if resource_type is not None:<EOL><INDENT>response = self._perform_request(<EOL>'<STR_LIT>' % (resource_type, str(depth)))<EOL><DEDENT>else:<EOL><INDENT>response = self._perform_request(<EOL>'<STR_LIT>' + str(depth))<EOL><DEDENT>return response<EOL>", "docstring": "Retrieves a list of all resources.\n\n:param      resource_type: The resource type: datacenter, image,\n                           snapshot or ipblock. Default is None,\n                           i.e., all resources are listed.\n:type       resource_type: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m90"}
{"signature": "@staticmethod<EOL><INDENT>def _underscore_to_camelcase(value):<DEDENT>", "body": "def camelcase():<EOL><INDENT>yield str.lower<EOL>while True:<EOL><INDENT>yield str.capitalize<EOL><DEDENT><DEDENT>c = camelcase()<EOL>return \"<STR_LIT>\".join(next(c)(x) if x else '<STR_LIT:_>' for x in value.split(\"<STR_LIT:_>\"))<EOL>", "docstring": "Convert Python snake case back to mixed case.", "id": "f811:c0:m103"}
{"signature": "def get_firewall_rule(self, datacenter_id,<EOL>server_id, nic_id, firewall_rule_id):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id,<EOL>nic_id,<EOL>firewall_rule_id))<EOL>return response<EOL>", "docstring": "Retrieves a single firewall rule by ID.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      nic_id: The unique ID of the NIC.\n:type       nic_id: ``str``\n\n:param      firewall_rule_id: The unique ID of the firewall rule.\n:type       firewall_rule_id: ``str``", "id": "f811:c0:m12"}
{"signature": "def list_images(self, depth=<NUM_LIT:1>):", "body": "response = self._perform_request('<STR_LIT>' + str(depth))<EOL>return response<EOL>", "docstring": "Retrieves a list of images available in the data center.\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m18"}
{"signature": "def __init__(self, name=None, cores=None, ram=None, availability_zone=None,<EOL>boot_volume_id=None, boot_cdrom=None, cpu_family=None,<EOL>create_volumes=None, attach_volumes=None, nics=None):", "body": "if create_volumes is None:<EOL><INDENT>create_volumes = []<EOL><DEDENT>if attach_volumes is None:<EOL><INDENT>attach_volumes = []<EOL><DEDENT>if nics is None:<EOL><INDENT>nics = []<EOL><DEDENT>self.name = name<EOL>self.cores = cores<EOL>self.ram = ram<EOL>self.availability_zone = availability_zone<EOL>self.boot_volume_id = boot_volume_id<EOL>self.boot_cdrom = boot_cdrom<EOL>self.cpu_family = cpu_family<EOL>self.create_volumes = create_volumes<EOL>self.attach_volumes = attach_volumes<EOL>self.nics = nics<EOL>", "docstring": "Server class initializer.\n\n:param      name: The name of your server..\n:type       name: ``str``\n\n:param      cores: The number of cores for the server.\n:type       cores: ``str``\n\n:param      ram: The amount of memory for the server.\n:type       ram: ``str``\n\n:param      availability_zone: The availability zone for the server.\n:type       availability_zone: ``str``\n\n:param      boot_volume_id: The ID of the boot volume.\n:type       boot_volume_id: ``str``\n\n:param      boot_cdrom: Attach a CDROM.\n:type       boot_cdrom: ``str``\n\n:param      cpu_family: Set the desired CPU type.\n:type       cpu_family: ``str``\n\n:param      create_volumes: List of volume dicts to create.\n:type       create_volumes: ``list``\n\n:param      attach_volumes: List of volume IDs to attach.\n:type       attach_volumes: ``list``\n\n:param      nics: List of NIC dicts to create.\n:type       nics: ``list``", "id": "f811:c7:m0"}
{"signature": "def list_group_users(self, group_id, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (group_id, str(depth)))<EOL>return response<EOL>", "docstring": "Retrieves a list of all users that are members of a particular group.\n\n:param      group_id: The unique ID of the group.\n:type       group_id: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m87"}
{"signature": "def get_volume(self, datacenter_id, volume_id):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (datacenter_id, volume_id))<EOL>return response<EOL>", "docstring": "Retrieves a single volume by ID.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      volume_id: The unique ID of the volume.\n:type       volume_id: ``str``", "id": "f811:c0:m92"}
{"signature": "def get_share(self, group_id, resource_id, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>'<EOL>% (group_id, resource_id, str(depth)))<EOL>return response<EOL>", "docstring": "Retrieves a specific resource share available to a group.\n\n:param      group_id: The unique ID of the group.\n:type       group_id: ``str``\n\n:param      resource_id: The unique ID of the resource.\n:type       resource_id: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m78"}
{"signature": "def delete_snapshot(self, snapshot_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' + snapshot_id, method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Removes a snapshot from your account.\n\n:param      snapshot_id: The unique ID of the snapshot.\n:type       snapshot_id: ``str``", "id": "f811:c0:m67"}
{"signature": "def get_nic(self, datacenter_id, server_id, nic_id, depth=<NUM_LIT:1>):", "body": "response = self._perform_request(<EOL>'<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id,<EOL>nic_id,<EOL>str(depth)))<EOL>return response<EOL>", "docstring": "Retrieves a NIC by its ID.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      nic_id: The unique ID of the NIC.\n:type       nic_id: ``str``\n\n:param      depth: The depth of the response data.\n:type       depth: ``int``", "id": "f811:c0:m42"}
{"signature": "def __init__(self, name=None, public=None, nics=None):", "body": "if nics is None:<EOL><INDENT>nics = []<EOL><DEDENT>self.name = name<EOL>self.public = public<EOL>self.nics = nics<EOL>", "docstring": "LAN class initializer.\n\n:param      name: The name of the LAN.\n:type       name: ``str``\n\n:param      public: Indicates if the LAN is public.\n:type       public: ``bool``\n\n:param      nics: A list of NICs\n:type       nics: ``list``", "id": "f811:c4:m0"}
{"signature": "def wait_for_completion(self, response, timeout=<NUM_LIT>, initial_wait=<NUM_LIT:5>, scaleup=<NUM_LIT:10>):", "body": "if not response:<EOL><INDENT>return<EOL><DEDENT>logger = logging.getLogger(__name__)<EOL>wait_period = initial_wait<EOL>next_increase = time.time() + wait_period * scaleup<EOL>if timeout:<EOL><INDENT>timeout = time.time() + timeout<EOL><DEDENT>while True:<EOL><INDENT>request = self.get_request(request_id=response['<STR_LIT>'], status=True)<EOL>if request['<STR_LIT>']['<STR_LIT:status>'] == '<STR_LIT>':<EOL><INDENT>break<EOL><DEDENT>elif request['<STR_LIT>']['<STR_LIT:status>'] == '<STR_LIT>':<EOL><INDENT>raise PBFailedRequest(<EOL>'<STR_LIT>'.format(<EOL>response['<STR_LIT>'], request['<STR_LIT>']['<STR_LIT:message>']),<EOL>response['<STR_LIT>']<EOL>)<EOL><DEDENT>current_time = time.time()<EOL>if timeout and current_time > timeout:<EOL><INDENT>raise PBTimeoutError('<STR_LIT>'.format(<EOL>response['<STR_LIT>']), response['<STR_LIT>'])<EOL><DEDENT>if current_time > next_increase:<EOL><INDENT>wait_period *= <NUM_LIT:2><EOL>next_increase = time.time() + wait_period * scaleup<EOL>scaleup *= <NUM_LIT:2><EOL><DEDENT>logger.info(\"<STR_LIT>\",<EOL>response['<STR_LIT>'], request['<STR_LIT>']['<STR_LIT:status>'], wait_period)<EOL>time.sleep(wait_period)<EOL><DEDENT>", "docstring": "Poll resource request status until resource is provisioned.\n\n:param      response: A response dict, which needs to have a 'requestId' item.\n:type       response: ``dict``\n\n:param      timeout: Maximum waiting time in seconds. None means infinite waiting time.\n:type       timeout: ``int``\n\n:param      initial_wait: Initial polling interval in seconds.\n:type       initial_wait: ``int``\n\n:param      scaleup: Double polling interval every scaleup steps, which will be doubled.\n:type       scaleup: ``int``", "id": "f811:c0:m97"}
{"signature": "def __init__(self, name=None, location=None,  <EOL>description=None, volumes=None, servers=None, lans=None, loadbalancers=None,<EOL>**kwargs):", "body": "if volumes is None:<EOL><INDENT>volumes = []<EOL><DEDENT>if servers is None:<EOL><INDENT>servers = []<EOL><DEDENT>if lans is None:<EOL><INDENT>lans = []<EOL><DEDENT>if loadbalancers is None:<EOL><INDENT>loadbalancers = []<EOL><DEDENT>self.name = name<EOL>self.description = description<EOL>self.location = location<EOL>self.servers = servers<EOL>self.volumes = volumes<EOL>self.lans = lans<EOL>self.loadbalancers = loadbalancers<EOL>", "docstring": "The Datacenter class initializer.\n\n:param      name: The data center name..\n:type       name: ``str``\n\n:param      location: The data center geographical location.\n:type       location: ``str``\n\n:param      description: Optional description.\n:type       description: ``str``\n\n:param      volumes: List of volume dicts.\n:type       volumes: ``list``\n\n:param      servers: List of server dicts.\n:type       servers: ``list``\n\n:param      lans: List of LAN dicts.\n:type       lans: ``list``\n\n:param      loadbalancers: List of load balancer dicts.\n:type       loadbalancers: ``list``", "id": "f811:c1:m0"}
{"signature": "def delete_volume(self, datacenter_id, volume_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' % (<EOL>datacenter_id, volume_id), method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Removes a volume from the data center.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      volume_id: The unique ID of the volume.\n:type       volume_id: ``str``", "id": "f811:c0:m94"}
{"signature": "def update_firewall_rule(self, datacenter_id, server_id,<EOL>nic_id, firewall_rule_id, **kwargs):", "body": "data = {}<EOL>for attr, value in kwargs.items():<EOL><INDENT>data[self._underscore_to_camelcase(attr)] = value<EOL>if attr == '<STR_LIT>':<EOL><INDENT>data['<STR_LIT>'] = value<EOL><DEDENT>elif attr == '<STR_LIT>':<EOL><INDENT>data['<STR_LIT>'] = value<EOL><DEDENT>elif attr == '<STR_LIT>':<EOL><INDENT>data['<STR_LIT>'] = value<EOL><DEDENT>elif attr == '<STR_LIT>':<EOL><INDENT>data['<STR_LIT>'] = value<EOL><DEDENT>elif attr == '<STR_LIT>':<EOL><INDENT>data['<STR_LIT>'] = value<EOL><DEDENT>elif attr == '<STR_LIT>':<EOL><INDENT>data['<STR_LIT>'] = value<EOL><DEDENT>elif attr == '<STR_LIT>':<EOL><INDENT>data['<STR_LIT>'] = value<EOL><DEDENT>else:<EOL><INDENT>data[self._underscore_to_camelcase(attr)] = value<EOL><DEDENT><DEDENT>response = self._perform_request(<EOL>url='<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id,<EOL>nic_id,<EOL>firewall_rule_id),<EOL>method='<STR_LIT>',<EOL>data=json.dumps(data))<EOL>return response<EOL>", "docstring": "Updates a firewall rule.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``\n\n:param      nic_id: The unique ID of the NIC.\n:type       nic_id: ``str``\n\n:param      firewall_rule_id: The unique ID of the firewall rule.\n:type       firewall_rule_id: ``str``", "id": "f811:c0:m16"}
{"signature": "def delete_lan(self, datacenter_id, lan_id):", "body": "response = self._perform_request(<EOL>url='<STR_LIT>' % (<EOL>datacenter_id, lan_id), method='<STR_LIT>')<EOL>return response<EOL>", "docstring": "Removes a LAN from the data center.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      lan_id: The unique ID of the LAN.\n:type       lan_id: ``str``", "id": "f811:c0:m27"}
{"signature": "def create_server(self, datacenter_id, server):", "body": "data = json.dumps(self._create_server_dict(server))<EOL>response = self._perform_request(<EOL>url='<STR_LIT>' % (datacenter_id),<EOL>method='<STR_LIT:POST>',<EOL>data=data)<EOL>return response<EOL>", "docstring": "Creates a server within the data center.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server: A dict of the server to be created.\n:type       server: ``dict``", "id": "f811:c0:m52"}
{"signature": "def __init__(self, name=None, protocol=None,  <EOL>source_mac=None, source_ip=None,<EOL>target_ip=None, port_range_start=None,<EOL>port_range_end=None, icmp_type=None,<EOL>icmp_code=None, **kwargs):", "body": "self.name = name<EOL>self.protocol = protocol<EOL>self.source_mac = source_mac<EOL>self.source_ip = source_ip<EOL>self.target_ip = target_ip<EOL>self.port_range_start = port_range_start<EOL>self.port_range_end = port_range_end<EOL>if icmp_type is not None:<EOL><INDENT>icmp_type = str(icmp_type)<EOL><DEDENT>self.icmp_type = icmp_type<EOL>if icmp_code is not None:<EOL><INDENT>icmp_code = str(icmp_code)<EOL><DEDENT>self.icmp_code = icmp_code<EOL>", "docstring": "FirewallRule class initializer.\n\n:param      name: The name of the firewall rule.\n:type       name: ``str``\n\n:param      protocol: Either TCP or UDP\n:type       protocol: ``str``\n\n:param      source_mac: Source MAC you want to restrict.\n:type       source_mac: ``str``\n\n:param      source_ip: Source IP you want to restrict.\n:type       source_ip: ``str``\n\n:param      target_ip: Target IP you want to restrict.\n:type       target_ip: ``str``\n\n:param      port_range_start: Optional port range.\n:type       port_range_start: ``str``\n\n:param      port_range_end: Optional port range.\n:type       port_range_end: ``str``\n\n:param      icmp_type: Defines the allowed type.\n:type       icmp_type: ``str``\n\n:param      icmp_code: Defines the allowed code.\n:type       icmp_code: ``str``", "id": "f811:c2:m0"}
{"signature": "def __init__(self, name=None, ip=None,  <EOL>dhcp=None, balancednics=None, **kwargs):", "body": "if balancednics is None:<EOL><INDENT>balancednics = []<EOL><DEDENT>self.name = name<EOL>self.ip = ip<EOL>self.dhcp = dhcp<EOL>self.balancednics = balancednics<EOL>", "docstring": "LoadBalancer class initializer.\n\n:param      name: The name of the load balancer.\n:type       name: ``str``\n\n:param      ip: The IP for the load balancer.\n:type       ip: ``str``\n\n:param      dhcp: Indicates if the load balancer\n                  uses DHCP or not.\n:type       dhcp: ``bool``\n\n:param      balancednics: A list of NICs associated\n                          with the load balancer.\n:type       balancednics: ``list``", "id": "f811:c5:m0"}
{"signature": "def list_locations(self, depth=<NUM_LIT:0>):", "body": "response = self._perform_request('<STR_LIT>' % (depth))<EOL>return response<EOL>", "docstring": "Retrieves a list of locations available in the account.", "id": "f811:c0:m41"}
{"signature": "def update_server(self, datacenter_id, server_id, **kwargs):", "body": "data = {}<EOL>for attr, value in kwargs.items():<EOL><INDENT>if attr == '<STR_LIT>':<EOL><INDENT>boot_volume_properties = {<EOL>\"<STR_LIT:id>\": value<EOL>}<EOL>boot_volume_entities = {<EOL>\"<STR_LIT>\": boot_volume_properties<EOL>}<EOL>data.update(boot_volume_entities)<EOL><DEDENT>else:<EOL><INDENT>data[self._underscore_to_camelcase(attr)] = value<EOL><DEDENT><DEDENT>response = self._perform_request(<EOL>url='<STR_LIT>' % (<EOL>datacenter_id,<EOL>server_id),<EOL>method='<STR_LIT>',<EOL>data=json.dumps(data))<EOL>return response<EOL>", "docstring": "Updates a server with the parameters provided.\n\n:param      datacenter_id: The unique ID of the data center.\n:type       datacenter_id: ``str``\n\n:param      server_id: The unique ID of the server.\n:type       server_id: ``str``", "id": "f811:c0:m53"}
{"signature": "def ask(question, options, default):", "body": "assert default in options<EOL>question += \"<STR_LIT>\".format(\"<STR_LIT:/>\".join(o.upper() if o == default else o for o in options))<EOL>selected = None<EOL>while selected not in options:<EOL><INDENT>selected = input(question).strip().lower()<EOL>if selected == \"<STR_LIT>\":<EOL><INDENT>selected = default<EOL><DEDENT>else:<EOL><INDENT>if selected not in options:<EOL><INDENT>question = \"<STR_LIT>\".format(<EOL>\"<STR_LIT>\".join(options[:-<NUM_LIT:1>]), options[-<NUM_LIT:1>],<EOL>comma='<STR_LIT:U+002C>' if len(options) > <NUM_LIT:2> else '<STR_LIT>',<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return selected<EOL>", "docstring": "Ask the user a question with a list of allowed answers (like yes or no).\n\nThe user is presented with a question and asked to select an answer from\nthe given options list. The default will be returned if the user enters\nnothing. The user is asked to repeat his answer if his answer does not\nmatch any of the allowed anwsers.\n\n:param    question: Question to present to the user (without question mark)\n:type     question: ``str``\n\n:param    options: List of allowed anwsers\n:type     options: ``list``\n\n:param    default: Default answer (if the user enters no text)\n:type     default: ``str``", "id": "f813:m0"}
{"signature": "def wait_for_requests(pbclient, request_ids=None,<EOL>timeout=<NUM_LIT:0>, initial_wait=<NUM_LIT:5>, scaleup=<NUM_LIT:10>):", "body": "done = dict()<EOL>if not request_ids:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return done<EOL><DEDENT>total_wait = <NUM_LIT:0><EOL>wait_period = initial_wait<EOL>next_scaleup = scaleup * wait_period<EOL>wait = True<EOL>while wait:<EOL><INDENT>for request_id in request_ids:<EOL><INDENT>if request_id in done:<EOL><INDENT>continue<EOL><DEDENT>request_status = pbclient.get_request(request_id, status=True)<EOL>state = request_status['<STR_LIT>']['<STR_LIT:status>']<EOL>if state == \"<STR_LIT>\":<EOL><INDENT>done[request_id] = (<NUM_LIT:0>, state, request_status['<STR_LIT>']['<STR_LIT:message>'])<EOL>print(\"<STR_LIT>\".format(request_id, state))<EOL><DEDENT>if state == '<STR_LIT>':<EOL><INDENT>done[request_id] = (<NUM_LIT:1>, state, request_status['<STR_LIT>']['<STR_LIT:message>'])<EOL>print(\"<STR_LIT>\".format(request_id, state))<EOL><DEDENT><DEDENT>if len(done) == len(request_ids):<EOL><INDENT>wait = False<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>.format(len(done), len(request_ids), wait_period))<EOL>sleep(wait_period)<EOL>total_wait += wait_period<EOL>if timeout != <NUM_LIT:0> and total_wait > timeout:<EOL><INDENT>wait = False<EOL><DEDENT>next_scaleup -= wait_period<EOL>if next_scaleup == <NUM_LIT:0>:<EOL><INDENT>wait_period += initial_wait<EOL>next_scaleup = scaleup * wait_period<EOL>print(\"<STR_LIT>\"<EOL>.format(wait_period, next_scaleup))<EOL><DEDENT><DEDENT><DEDENT>if len(done) != len(request_ids):<EOL><INDENT>for request_id in request_ids:<EOL><INDENT>if request_id in done:<EOL><INDENT>continue<EOL><DEDENT>done[request_id] = (-<NUM_LIT:1>, state, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>return done<EOL>", "docstring": "Waits for a list of requests to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a dict of request_id -> result.\nresult is a tuple (return code, request status, message) where return code\n0  : request successful\n1  : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f814:m2"}
{"signature": "def _nsattr(self, attr, ns=None):", "body": "if ns is None:<EOL><INDENT>return attr<EOL><DEDENT>return '<STR_LIT:{>' + self._ns[ns] + '<STR_LIT:}>' + attr<EOL>", "docstring": "returns an attribute name w/ namespace prefix", "id": "f816:c1:m2"}
{"signature": "def __init__(self, file=None):", "body": "<EOL>self._ns = {<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>}<EOL>self.file = file<EOL>self.root = None<EOL>self.name = None<EOL>self.osid = None<EOL>self.licenseType = \"<STR_LIT>\"<EOL>self.cpus = None<EOL>self.ram = None<EOL>self.disks = []<EOL>self.lans = dict()<EOL>self.nics = []<EOL>self.resourceTypes = {<EOL>'<STR_LIT:1>': '<STR_LIT>',<EOL>'<STR_LIT:2>': '<STR_LIT>',<EOL>'<STR_LIT:3>': '<STR_LIT>',<EOL>'<STR_LIT:4>': '<STR_LIT>',<EOL>'<STR_LIT:5>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:..>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}   <EOL>self.osTypeOther = {<EOL>'<STR_LIT:0>': '<STR_LIT>',<EOL>'<STR_LIT:1>': '<STR_LIT>',<EOL>'<STR_LIT:2>': '<STR_LIT>',<EOL>'<STR_LIT:3>': '<STR_LIT>',<EOL>'<STR_LIT:4>': '<STR_LIT>',<EOL>'<STR_LIT:5>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}   <EOL>self.osTypeLinux = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:100>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}   <EOL>self.osTypeWindows = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>", "docstring": "OVF meta data initializer", "id": "f816:c1:m0"}
{"signature": "def main(argv=None):                ", "body": "if argv is None:<EOL><INDENT>argv = sys.argv<EOL><DEDENT>else:<EOL><INDENT>sys.argv.extend(argv)<EOL><DEDENT>program_name = os.path.basename(sys.argv[<NUM_LIT:0>])<EOL>program_version = \"<STR_LIT>\" % __version__<EOL>program_build_date = str(__updated__)<EOL>program_version_message = '<STR_LIT>' % (program_version, program_build_date)<EOL>program_shortdesc = __import__('<STR_LIT:__main__>').__doc__.split(\"<STR_LIT:\\n>\")[<NUM_LIT:1>]<EOL>program_license =", "docstring": "Command line options.", "id": "f819:m7"}
{"signature": "def wait_for_datacenter(client, data_center_id):", "body": "total_sleep_time = <NUM_LIT:0><EOL>seconds = <NUM_LIT:5><EOL>while True:<EOL><INDENT>state = client.get_datacenter(data_center_id)['<STR_LIT>']['<STR_LIT:state>']<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\".format(state))<EOL><DEDENT>if state == \"<STR_LIT>\":<EOL><INDENT>break<EOL><DEDENT>time.sleep(seconds)<EOL>total_sleep_time += seconds<EOL>if total_sleep_time == <NUM_LIT>:<EOL><INDENT>seconds = <NUM_LIT:10><EOL><DEDENT>elif total_sleep_time == <NUM_LIT>:<EOL><INDENT>seconds = <NUM_LIT:20><EOL><DEDENT><DEDENT>", "docstring": "Poll the data center to become available (for the next provisionig job)", "id": "f820:m5"}
{"signature": "def getServerStates(pbclient=None, dc_id=None, serverid=None, servername=None):", "body": "if pbclient is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if dc_id is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>server = None<EOL>if serverid is None:<EOL><INDENT>if servername is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>server_info = select_where(getServerInfo(pbclient, dc_id),<EOL>['<STR_LIT:id>', '<STR_LIT:name>', '<STR_LIT:state>', '<STR_LIT>'],<EOL>name=servername)<EOL>if len(server_info) > <NUM_LIT:1>:<EOL><INDENT>raise NameError(\"<STR_LIT>\".format(servername))<EOL><DEDENT>if len(server_info) == <NUM_LIT:1>:<EOL><INDENT>server = server_info[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>server_info = pbclient.get_server(dc_id, serverid, <NUM_LIT:1>)<EOL>server = dict(id=server_info['<STR_LIT:id>'],<EOL>name=server_info['<STR_LIT>']['<STR_LIT:name>'],<EOL>state=server_info['<STR_LIT>']['<STR_LIT:state>'],<EOL>vmstate=server_info['<STR_LIT>']['<STR_LIT>'])<EOL><DEDENT>except Exception:<EOL><INDENT>ex = sys.exc_info()[<NUM_LIT:1>]<EOL>if ex.args[<NUM_LIT:0>] is not None and ex.args[<NUM_LIT:0>] == <NUM_LIT>:<EOL><INDENT>print(\"<STR_LIT>\".format(serverid))<EOL>server = None<EOL><DEDENT>else:<EOL><INDENT>raise ex<EOL><DEDENT><DEDENT><DEDENT>return server<EOL>", "docstring": "gets states of a server", "id": "f820:m3"}
{"signature": "def wait_for_server(pbclient=None, dc_id=None, serverid=None,<EOL>indicator='<STR_LIT:state>', state='<STR_LIT>', timeout=<NUM_LIT>):", "body": "if pbclient is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if dc_id is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if serverid is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>total_sleep_time = <NUM_LIT:0><EOL>seconds = <NUM_LIT:5><EOL>while total_sleep_time < timeout:<EOL><INDENT>time.sleep(seconds)<EOL>total_sleep_time += seconds<EOL>if total_sleep_time == <NUM_LIT>:<EOL><INDENT>seconds = <NUM_LIT:10><EOL><DEDENT>elif total_sleep_time == <NUM_LIT>:<EOL><INDENT>seconds = <NUM_LIT:20><EOL><DEDENT>server = getServerStates(pbclient, dc_id, serverid)<EOL>if server[indicator] == state:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return server<EOL>", "docstring": "wait for a server/VM to reach a defined state for a specified time\nindicator := {state|vmstate} specifies if server or VM stat is tested\nstate specifies the status the indicator should have", "id": "f820:m4"}
{"signature": "def getServerInfo(pbclient=None, dc_id=None):", "body": "if pbclient is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if dc_id is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>server_info = []<EOL>servers = pbclient.list_servers(dc_id, <NUM_LIT:1>)<EOL>for server in servers['<STR_LIT>']:<EOL><INDENT>props = server['<STR_LIT>']<EOL>info = dict(id=server['<STR_LIT:id>'], name=props['<STR_LIT:name>'],<EOL>state=server['<STR_LIT>']['<STR_LIT:state>'],<EOL>vmstate=props['<STR_LIT>'])<EOL>server_info.append(info)<EOL><DEDENT>return server_info<EOL>", "docstring": "gets info of servers of a data center", "id": "f822:m5"}
{"signature": "def main(argv=None):", "body": "if argv is None:<EOL><INDENT>argv = sys.argv<EOL><DEDENT>else:<EOL><INDENT>sys.argv.extend(argv)<EOL><DEDENT>program_name = os.path.basename(sys.argv[<NUM_LIT:0>])<EOL>program_version = \"<STR_LIT>\" % __version__<EOL>program_build_date = str(__updated__)<EOL>program_version_message = '<STR_LIT>' % (program_version, program_build_date)<EOL>program_shortdesc = __import__('<STR_LIT:__main__>').__doc__.split(\"<STR_LIT:\\n>\")[<NUM_LIT:1>]<EOL>program_license =", "docstring": "Parse command line options and dump a datacenter to snapshots and file.", "id": "f822:m10"}
{"signature": "def getServerStates(pbclient=None, dc_id=None, serverid=None, servername=None):", "body": "if pbclient is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if dc_id is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>server = None<EOL>if serverid is None:<EOL><INDENT>if servername is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>server_info = select_where(getServerInfo(pbclient, dc_id),<EOL>['<STR_LIT:id>', '<STR_LIT:name>', '<STR_LIT:state>', '<STR_LIT>'],<EOL>name=servername)<EOL>if len(server_info) > <NUM_LIT:1>:<EOL><INDENT>raise NameError(\"<STR_LIT>\".format(servername))<EOL><DEDENT>if len(server_info) == <NUM_LIT:1>:<EOL><INDENT>server = server_info[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>server_info = pbclient.get_server(dc_id, serverid, <NUM_LIT:1>)<EOL>server = dict(id=server_info['<STR_LIT:id>'],<EOL>name=server_info['<STR_LIT>']['<STR_LIT:name>'],<EOL>state=server_info['<STR_LIT>']['<STR_LIT:state>'],<EOL>vmstate=server_info['<STR_LIT>']['<STR_LIT>'])<EOL><DEDENT>except Exception:<EOL><INDENT>ex = sys.exc_info()[<NUM_LIT:1>]<EOL>if ex.args[<NUM_LIT:0>] is not None and ex.args[<NUM_LIT:0>] == <NUM_LIT>:<EOL><INDENT>print(\"<STR_LIT>\".format(serverid))<EOL>server = None<EOL><DEDENT>else:<EOL><INDENT>raise ex<EOL><DEDENT><DEDENT><DEDENT>return server<EOL>", "docstring": "gets states of a server", "id": "f822:m7"}
{"signature": "def wait_for_request(pbclient, request_id,<EOL>timeout=<NUM_LIT:0>, initial_wait=<NUM_LIT:5>, scaleup=<NUM_LIT:10>):", "body": "total_wait = <NUM_LIT:0><EOL>wait_period = initial_wait<EOL>next_scaleup = scaleup * wait_period<EOL>wait = True<EOL>while wait:<EOL><INDENT>request_status = pbclient.get_request(request_id, status=True)<EOL>state = request_status['<STR_LIT>']['<STR_LIT:status>']<EOL>if state == \"<STR_LIT>\":<EOL><INDENT>return(<NUM_LIT:0>, state, request_status['<STR_LIT>']['<STR_LIT:message>'])<EOL><DEDENT>if state == '<STR_LIT>':<EOL><INDENT>return(<NUM_LIT:1>, state, request_status['<STR_LIT>']['<STR_LIT:message>'])<EOL><DEDENT>print(\"<STR_LIT>\"<EOL>.format(request_id, state, wait_period))<EOL>sleep(wait_period)<EOL>total_wait += wait_period<EOL>if timeout != <NUM_LIT:0> and total_wait > timeout:<EOL><INDENT>wait = False<EOL><DEDENT>next_scaleup -= wait_period<EOL>if next_scaleup == <NUM_LIT:0>:<EOL><INDENT>wait_period += initial_wait<EOL>next_scaleup = scaleup * wait_period<EOL>print(\"<STR_LIT>\"<EOL>.format(wait_period, next_scaleup))<EOL><DEDENT><DEDENT>return(-<NUM_LIT:1>, state, \"<STR_LIT>\")<EOL>", "docstring": "Waits for a request to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a tuple (return code, request status, message) where return code\n0  : request successful\n1  : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f822:m3"}
{"signature": "def wait_for_requests(pbclient, request_ids=None,<EOL>timeout=<NUM_LIT:0>, initial_wait=<NUM_LIT:5>, scaleup=<NUM_LIT:10>):", "body": "done = dict()<EOL>if not request_ids:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return done<EOL><DEDENT>total_wait = <NUM_LIT:0><EOL>wait_period = initial_wait<EOL>next_scaleup = scaleup * wait_period<EOL>wait = True<EOL>while wait:<EOL><INDENT>for request_id in request_ids:<EOL><INDENT>if request_id in done:<EOL><INDENT>continue<EOL><DEDENT>request_status = pbclient.get_request(request_id, status=True)<EOL>state = request_status['<STR_LIT>']['<STR_LIT:status>']<EOL>if state == \"<STR_LIT>\":<EOL><INDENT>done[request_id] = (<NUM_LIT:0>, state, request_status['<STR_LIT>']['<STR_LIT:message>'])<EOL>print(\"<STR_LIT>\".format(request_id, state))<EOL><DEDENT>if state == '<STR_LIT>':<EOL><INDENT>done[request_id] = (<NUM_LIT:1>, state, request_status['<STR_LIT>']['<STR_LIT:message>'])<EOL>print(\"<STR_LIT>\".format(request_id, state))<EOL><DEDENT><DEDENT>if len(done) == len(request_ids):<EOL><INDENT>wait = False<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>.format(len(done), len(request_ids), wait_period))<EOL>sleep(wait_period)<EOL>total_wait += wait_period<EOL>if timeout != <NUM_LIT:0> and total_wait > timeout:<EOL><INDENT>wait = False<EOL><DEDENT>next_scaleup -= wait_period<EOL>if next_scaleup == <NUM_LIT:0>:<EOL><INDENT>wait_period += initial_wait<EOL>next_scaleup = scaleup * wait_period<EOL>print(\"<STR_LIT>\"<EOL>.format(wait_period, next_scaleup))<EOL><DEDENT><DEDENT><DEDENT>if len(done) != len(request_ids):<EOL><INDENT>for request_id in request_ids:<EOL><INDENT>if request_id in done:<EOL><INDENT>continue<EOL><DEDENT>done[request_id] = (-<NUM_LIT:1>, state, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>return done<EOL>", "docstring": "Waits for a list of requests to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a dict of request_id -> result.\nresult is a tuple (return code, request status, message) where return code\n0  : request successful\n1  : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f822:m4"}
{"signature": "def getLogin(filename, user, passwd):", "body": "if filename is None:<EOL><INDENT>return (user, passwd)<EOL><DEDENT>if os.path.exists(filename):<EOL><INDENT>print(\"<STR_LIT>\".format(filename))<EOL>with open(filename, \"<STR_LIT:r>\") as loginfile:<EOL><INDENT>encoded_cred = loginfile.read()<EOL><INDENT>print(\"<STR_LIT>\".format(encoded_cred))<EOL><DEDENT>decoded_cred = b64decode(encoded_cred)<EOL>login = decoded_cred.split('<STR_LIT::>', <NUM_LIT:1>)<EOL>return (login[<NUM_LIT:0>], login[<NUM_LIT:1>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if user is None or passwd is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>print(\"<STR_LIT>\".format(filename))<EOL>with open(filename, \"<STR_LIT:w>\") as loginfile:<EOL><INDENT>encoded_cred = b64encode(user+\"<STR_LIT::>\"+passwd)<EOL><INDENT>print(\"<STR_LIT>\".format(encoded_cred))<EOL><DEDENT>loginfile.write(encoded_cred)<EOL><DEDENT>return (user, passwd)<EOL><DEDENT>", "docstring": "write user/passwd to login file or get them from file.\nThis method is not Py3 safe (byte vs. str)", "id": "f827:m0"}
{"signature": "def main(argv=None):", "body": "if argv is None:<EOL><INDENT>argv = sys.argv<EOL><DEDENT>else:<EOL><INDENT>sys.argv.extend(argv)<EOL><DEDENT>program_name = os.path.basename(sys.argv[<NUM_LIT:0>])<EOL>program_version = \"<STR_LIT>\" % __version__<EOL>program_build_date = str(__updated__)<EOL>program_version_message = '<STR_LIT>' % (program_version,<EOL>program_build_date)<EOL>program_shortdesc = __import__('<STR_LIT:__main__>').__doc__.split(\"<STR_LIT:\\n>\")[<NUM_LIT:1>]<EOL>program_license =", "docstring": "Command line options.", "id": "f827:m5"}
{"signature": "def main(argv=None):", "body": "if argv is None:<EOL><INDENT>argv = sys.argv<EOL><DEDENT>else:<EOL><INDENT>sys.argv.extend(argv)<EOL><DEDENT>program_name = os.path.basename(sys.argv[<NUM_LIT:0>])<EOL>program_version = \"<STR_LIT>\" % __version__<EOL>program_build_date = str(__updated__)<EOL>program_version_message = '<STR_LIT>' % (program_version,<EOL>program_build_date)<EOL>program_shortdesc = __import__('<STR_LIT:__main__>').__doc__.split(\"<STR_LIT:\\n>\")[<NUM_LIT:1>]<EOL>program_license =", "docstring": "Parse command line options and create a server/volume composite.", "id": "f828:m2"}
{"signature": "def getLogin(filename, user, passwd):", "body": "if filename is None:<EOL><INDENT>return (user, passwd)<EOL><DEDENT>if os.path.exists(filename):<EOL><INDENT>print(\"<STR_LIT>\".format(filename))<EOL>with open(filename, \"<STR_LIT:r>\") as loginfile:<EOL><INDENT>encoded_cred = loginfile.read()<EOL>decoded_cred = b64decode(encoded_cred)<EOL>login = decoded_cred.split('<STR_LIT::>', <NUM_LIT:1>)<EOL>return (login[<NUM_LIT:0>], login[<NUM_LIT:1>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if user is None or passwd is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>print(\"<STR_LIT>\".format(filename))<EOL>with open(filename, \"<STR_LIT:w>\") as loginfile:<EOL><INDENT>encoded_cred = b64encode(user+\"<STR_LIT::>\"+passwd)<EOL>loginfile.write(encoded_cred)<EOL><DEDENT>return (user, passwd)<EOL><DEDENT>", "docstring": "write user/passwd to login file or get them from file.\nThis method is not Py3 safe (byte vs. str)", "id": "f828:m0"}
{"signature": "def wait_for_request(pbclient, request_id,<EOL>timeout=<NUM_LIT:0>, initial_wait=<NUM_LIT:5>, scaleup=<NUM_LIT:10>):", "body": "total_wait = <NUM_LIT:0><EOL>wait_period = initial_wait<EOL>next_scaleup = scaleup * wait_period<EOL>wait = True<EOL>while wait:<EOL><INDENT>request_status = pbclient.get_request(request_id, status=True)<EOL>state = request_status['<STR_LIT>']['<STR_LIT:status>']<EOL>if state == \"<STR_LIT>\":<EOL><INDENT>return(<NUM_LIT:0>, state, request_status['<STR_LIT>']['<STR_LIT:message>'])<EOL><DEDENT>if state == '<STR_LIT>':<EOL><INDENT>return(<NUM_LIT:1>, state, request_status['<STR_LIT>']['<STR_LIT:message>'])<EOL><DEDENT>if verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>.format(request_id, state, wait_period))<EOL><DEDENT>sleep(wait_period)<EOL>total_wait += wait_period<EOL>if timeout != <NUM_LIT:0> and total_wait > timeout:<EOL><INDENT>wait = False<EOL><DEDENT>next_scaleup -= wait_period<EOL>if next_scaleup == <NUM_LIT:0>:<EOL><INDENT>wait_period += initial_wait<EOL>next_scaleup = scaleup * wait_period<EOL>if verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>.format(wait_period, next_scaleup))<EOL><DEDENT><DEDENT><DEDENT>return(-<NUM_LIT:1>, state, \"<STR_LIT>\")<EOL>", "docstring": "Waits for a request to finish until timeout.\ntimeout==0 is interpreted as infinite wait time.\nReturns a tuple (return code, request status, message) where return code\n0  : request successful\n1  : request failed\n-1 : timeout exceeded\nThe wait_period is increased every scaleup steps to adjust for long\nrunning requests.", "id": "f828:m1"}
{"signature": "def __init__(self, hub, zid, online, red, green, blue, level):", "body": "self._hub = hub<EOL>self._zid = zid<EOL>self._online = online == <NUM_LIT:1>  <EOL>self._red = int(red)<EOL>self._green = int(green)<EOL>self._blue = int(blue)<EOL>self._level = int(level)<EOL>", "docstring": "Construct a Bulb (light) based on current values.", "id": "f831:c1:m0"}
{"signature": "def turn_off(self):", "body": "command = \"<STR_LIT>\".format(self._zid)<EOL>response = self._hub.send_command(command)<EOL>_LOGGER.debug(\"<STR_LIT>\", repr(command), response)<EOL>return response<EOL>", "docstring": "Turn bulb off (zero brightness).", "id": "f831:c1:m7"}
{"signature": "@property<EOL><INDENT>def zid(self):<DEDENT>", "body": "return self._zid<EOL>", "docstring": "Return the bulb ID.", "id": "f831:c1:m3"}
{"signature": "def set_brightness(self, brightness):", "body": "command = \"<STR_LIT>\".format(self._zid, brightness)<EOL>response = self._hub.send_command(command)<EOL>_LOGGER.debug(\"<STR_LIT>\", repr(command), response)<EOL>return response<EOL>", "docstring": "Set brightness of bulb.", "id": "f831:c1:m9"}
{"signature": "@property<EOL><INDENT>def brightness(self):<DEDENT>", "body": "self.update()<EOL>return self._level<EOL>", "docstring": "Return the brightness level.", "id": "f831:c1:m1"}
{"signature": "def set_all(self, red, green, blue, brightness):", "body": "command = \"<STR_LIT>\".format(self._zid, red, green, blue,<EOL>brightness)<EOL>response = self._hub.send_command(command)<EOL>_LOGGER.debug(\"<STR_LIT>\", repr(command), response)<EOL>return response<EOL>", "docstring": "Set color and brightness of bulb.", "id": "f831:c1:m10"}
{"signature": "@property<EOL><INDENT>def available(self):<DEDENT>", "body": "response = self.send_command(\"<STR_LIT>\")<EOL>return \"<STR_LIT>\" in response<EOL>", "docstring": "Check if hub is responsive.", "id": "f831:c0:m2"}
{"signature": "def demo():", "body": "hub = Hub()<EOL>if hub.available:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL><DEDENT>bulbs = hub.get_lights()<EOL>light = get_bulb(ZID_TO_TEST, bulbs)<EOL>if light is not None:<EOL><INDENT>if light.available:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL>assert light.available<EOL>light.turn_on()<EOL>time.sleep(SECONDS_TO_WAIT)<EOL>assert light.is_on<EOL>light.update()<EOL>light.update()<EOL>light.set_rgb_color(<NUM_LIT:0>, <NUM_LIT:255>, <NUM_LIT:255>)<EOL>time.sleep(SECONDS_TO_WAIT)<EOL>light.update()<EOL>assert light.rgb_color == [<NUM_LIT:0>, <NUM_LIT:255>, <NUM_LIT:255>]<EOL>light.set_brightness(<NUM_LIT>)<EOL>time.sleep(SECONDS_TO_WAIT)<EOL>assert light.brightness == <NUM_LIT><EOL>assert light.is_on<EOL>light.turn_off()<EOL>time.sleep(SECONDS_TO_WAIT)<EOL>assert not light.is_on<EOL>light.set_all(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>)<EOL>time.sleep(SECONDS_TO_WAIT)<EOL>LOGGER.info(\"<STR_LIT>\", light.rgb_color,<EOL>light.brightness)<EOL>assert light.brightness == <NUM_LIT><EOL>assert light.rgb_color == [<NUM_LIT>, <NUM_LIT>, <NUM_LIT>]<EOL>light.turn_off()<EOL><DEDENT>else:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGGER.error(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Demo some specific functionality. Needs to be customised.", "id": "f833:m1"}
{"signature": "def get_bulb(zid, bulbs):", "body": "for bulb in bulbs:<EOL><INDENT>if bulb.zid == zid:<EOL><INDENT>return bulb<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Retrieve a bulb by its zid from a list of Bulb objects.", "id": "f833:m2"}
{"signature": "def _generate_token( self, length=<NUM_LIT:32> ):", "body": "return '<STR_LIT>'.join( choice( ascii_letters + digits ) for x in range( length ) )<EOL>", "docstring": "_generate_token - internal function for generating randomized alphanumberic\nstrings of a given length", "id": "f835:c1:m6"}
{"signature": "def authenticate_token( self, token ):", "body": "token_data = self.data_store.fetch( '<STR_LIT>', token=token )<EOL>if not token_data:<EOL><INDENT>raise Proauth2Error( '<STR_LIT>',<EOL>'<STR_LIT>' )<EOL><DEDENT>return token_data['<STR_LIT>']<EOL>", "docstring": "authenticate_token checks the passed token and returns the user_id it is\nassociated with. it is assumed that this method won't be directly exposed to\nthe oauth client, but some kind of framework or wrapper. this allows the\nframework to have the user_id without doing additional DB calls.", "id": "f835:c1:m4"}
{"signature": "def request_access_token( self, client_id, key, code, grant_type,<EOL>redirect_uri=None, method='<STR_LIT>' ):", "body": "if grant_type != '<STR_LIT>':<EOL><INDENT>raise Proauth2Error( '<STR_LIT>',<EOL>'<STR_LIT>' )<EOL><DEDENT>self._auth( client_id, key, method )<EOL>user_id = self._validate_request_code( code, client_id )<EOL>access_token = self._generate_token( <NUM_LIT:64> )<EOL>self.data_store.store( '<STR_LIT>', token=access_token, user_id=user_id,<EOL>client_id=client_id )<EOL>return { '<STR_LIT>':access_token, '<STR_LIT>':'<STR_LIT>' }<EOL>", "docstring": "request_access_token validates the client_id and client_secret, using the\nprovided method, then generates an access_token, stores it with the user_id\nfrom the nonce, and returns a dictionary containing an access_token and\nbearer token.\n---\nfrom the spec, it looks like there are different types of\ntokens, but i don't understand the disctintions, so someone else can fix\nthis if need be.\nregarding the method: it appears that it is intended for there to be\nmultiple ways to verify the client_id. my assumption is that you use the\nsecret as the salt and pass the hashed of the client_id or something, and\nthen compare hashes on the server end. currently the only implemented method\nis direct comparison of the client_ids and client_secrets.\nadditional methods can be added to proauth2.auth_methods", "id": "f835:c1:m3"}
{"signature": "def _auth( self, client_id, key, method ):", "body": "available = auth_methods.keys()<EOL>if method not in available:<EOL><INDENT>raise Proauth2Error( '<STR_LIT>',<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'% ( method, '<STR_LIT:\\n>'.join( available ) ) )<EOL><DEDENT>client = self.data_store.fetch( '<STR_LIT>', client_id=client_id )<EOL>if not client: raise Proauth2Error( '<STR_LIT>' )<EOL>if not auth_methods[method]( key, client['<STR_LIT>'] ):<EOL><INDENT>raise Proauth2Error( '<STR_LIT>' )<EOL><DEDENT>", "docstring": "_auth - internal method to ensure the client_id and client_secret passed with\nthe nonce match", "id": "f835:c1:m7"}
{"signature": "def revoke_token( self, token ):", "body": "self.data_store.remove( '<STR_LIT>', token=token )<EOL>", "docstring": "revoke_token removes the access token from the data_store", "id": "f835:c1:m5"}
{"signature": "def request_authorization( self, client_id, user_id, response_type,<EOL>redirect_uri=None, scope=None, state=None,<EOL>expires=<NUM_LIT> ):", "body": "if response_type != '<STR_LIT:code>':<EOL><INDENT>raise Proauth2Error( '<STR_LIT>',<EOL>'<STR_LIT>', state=state )<EOL><DEDENT>client = self.data_store.fetch( '<STR_LIT>', client_id=client_id )<EOL>if not client: raise Proauth2Error( '<STR_LIT>' )<EOL>if redirect_uri and client['<STR_LIT>'] != redirect_uri:<EOL><INDENT>raise Proauth2Error( '<STR_LIT>', \"<STR_LIT>\" )<EOL><DEDENT>nonce_code = self._generate_token()<EOL>expires = time() + expires<EOL>try:<EOL><INDENT>self.data_store.store( '<STR_LIT>', code=nonce_code,<EOL>client_id=client_id, expires=expires,<EOL>user_id=user_id, scope=scope )<EOL><DEDENT>except Proauth2Error as e:<EOL><INDENT>e.state = state<EOL>raise e<EOL><DEDENT>return { '<STR_LIT:code>':nonce_code, '<STR_LIT:state>':state }<EOL>", "docstring": "request_authorization generates a nonce, and stores it in the data_store along with the\nclient_id, user_id, and expiration timestamp.\nIt then returns a dictionary containing the nonce as \"code,\" and the passed\nstate.\n---\nresponse_type MUST be \"code.\" this is directly from the OAuth2 spec.\nthis probably doesn't need to be checked here, but if it's in the spec I\nguess it should be verified somewhere.\nscope has not been implemented here. it will be stored, but there is no\nscope-checking built in here at this time.\nif a redirect_uri is passed, it must match the registered redirect_uri.\nagain, this is per spec.", "id": "f835:c1:m2"}
{"signature": "def _validate_request_code( self, code, client_id):", "body": "nonce = self.data_store.fetch( '<STR_LIT>', code=code )<EOL>if not nonce:<EOL><INDENT>raise Proauth2Error( '<STR_LIT>', '<STR_LIT>' % code )<EOL><DEDENT>if client_id != nonce['<STR_LIT>']: <EOL><INDENT>raise Proauth2Error( '<STR_LIT>', '<STR_LIT>' % code )<EOL><DEDENT>user_id = nonce['<STR_LIT>']<EOL>expires = nonce['<STR_LIT>']<EOL>self.data_store.remove( '<STR_LIT>', code=code, client_id=client_id,<EOL>user_id=user_id )<EOL>if time() > expires:<EOL><INDENT>raise Proauth2Error( '<STR_LIT>', '<STR_LIT>' % code )<EOL><DEDENT>return user_id<EOL>", "docstring": "_validate_request_code - internal method for verifying the the given nonce.\nalso removes the nonce from the data_store, as they are intended for\none-time use.", "id": "f835:c1:m8"}
{"signature": "@engine<EOL><INDENT>def register_app(self, name, redirect_uri, callback):<DEDENT>", "body": "client_id = self._generate_token()<EOL>client_secret = self._generate_token(<NUM_LIT:64>)<EOL>yield Task(self.data_store.store, '<STR_LIT>', client_id=client_id,<EOL>client_secret=client_secret, name=name,<EOL>redirect_uri=redirect_uri)<EOL>callback({'<STR_LIT>':client_id, '<STR_LIT>':client_secret})<EOL>", "docstring": "register_app takes an application name and redirect_uri\nIt generates client_id (client_key) and client_secret,\nthen stores all of the above in the data_store,\nand returns a dictionary containing the client_id and client_secret.", "id": "f836:c0:m1"}
{"signature": "def _generate_token(self, length=<NUM_LIT:32>):", "body": "return '<STR_LIT>'.join(choice(ascii_letters + digits) for x in range(length))<EOL>", "docstring": "_generate_token - internal function for generating randomized alphanumberic\nstrings of a given length", "id": "f836:c0:m8"}
{"signature": "def __init__(self, database='<STR_LIT>', host='<STR_LIT:localhost>', port=<NUM_LIT>,<EOL>user=None, pwd=None):", "body": "if user and pwd:<EOL><INDENT>connection_string = '<STR_LIT>' %(user, pwd, host, port)<EOL><DEDENT>else:<EOL><INDENT>connection_string = '<STR_LIT>' %(host, port)<EOL><DEDENT>self.db = MotorClient(connection_string).open_sync()[database]<EOL>", "docstring": "initialize a mongodb connection to mongodb://user:pass@host:port\nuse database", "id": "f838:c0:m0"}
{"signature": "@engine<EOL><INDENT>def store(self, collection, **kwargs):<DEDENT>", "body": "callback = kwargs.pop('<STR_LIT>')<EOL>key = validate(collection, **kwargs)<EOL>data = yield Task(self.fetch, collection, **{key: kwargs[key]})<EOL>if data is not None:<EOL><INDENT>raise Proauth2Error('<STR_LIT>')<EOL><DEDENT>yield Op(self.db[collection].insert, kwargs)<EOL>callback()<EOL>", "docstring": "validate the passed values in kwargs based on the collection,\nstore them in the mongodb collection", "id": "f838:c0:m3"}
{"signature": "def fetch( self, collection, **kwargs ):", "body": "return self.db[collection].find_one( kwargs )<EOL>", "docstring": "return one record from the collection whose parameters match kwargs\n---\nkwargs should be a dictionary whose keys match column names (in\ntraditional SQL / fields in NoSQL) and whose values are the values of\nthose fields.\ne.g. kwargs={name='my application name',client_id=12345}", "id": "f839:c0:m1"}
{"signature": "def remove( self, collection, **kwargs ):", "body": "self.db[collection].remove( kwargs )<EOL>", "docstring": "remove records from collection whose parameters match kwargs", "id": "f839:c0:m2"}
{"signature": "def validate( table, **data ):", "body": "if table not in good.keys():<EOL><INDENT>raise Proauth2Error( '<STR_LIT>', '<STR_LIT>' % table )<EOL><DEDENT>for req in good[table]['<STR_LIT>']:<EOL><INDENT>if not data.get( req, None ):<EOL><INDENT>raise Proauth2Error( '<STR_LIT>',<EOL>'<STR_LIT>' % req )<EOL><DEDENT><DEDENT>for key in data.keys():<EOL><INDENT>if key not in good[table]['<STR_LIT>'] andkey not in good[table]['<STR_LIT>']:<EOL><INDENT>raise Proauth2Error( '<STR_LIT>', '<STR_LIT>' % key )<EOL><DEDENT><DEDENT>return good[table]['<STR_LIT:key>']<EOL>", "docstring": "theoretically, any data store can be implemented to work with this package,\nwhich means basic data validation must be done in-package, so that weird\nstuff can't be stored in the data store.\nthis function raises an exception if an invalid table name is passed, not\nall of the required fields are in the data kwargs, or if a field that was\npassed is not expected.\nit also returns the key field name, for ensuring uniqueness (again, that may\nnot be built into whatever data store is impelemented.)", "id": "f840:m0"}
{"signature": "def direct_auth( key, secret ):", "body": "if key == secret: return True<EOL>return False<EOL>", "docstring": "directly compare the stored secret and the passed secret.", "id": "f841:m0"}
{"signature": "def clean_data_dir_async(self, *, data_dir):", "body": "return super().__getattr__('<STR_LIT>')(data_dir=data_dir)<EOL>", "docstring": "\u6e05\u7406\u6570\u636e\u76ee\u5f55 (\u5f02\u6b65\u7248\u672c)\n\n------------\n\n:param str data_dir: \u6536\u5230\u6e05\u7406\u7684\u76ee\u5f55\u540d\uff0c\u652f\u6301 `image`\u3001`record`\u3001`show`\u3001`bface`\n:return: None\n:rtype: None\n\n------------\n\n\u7528\u4e8e\u6e05\u7406\u79ef\u6512\u4e86\u592a\u591a\u65e7\u6587\u4ef6\u7684\u6570\u636e\u76ee\u5f55\uff0c\u5982 `image`\u3002\n\nHTTP API v3.3.4 \u65b0\u589e", "id": "f847:c0:m36"}
{"signature": "def get_cookies(self):", "body": "return super().__getattr__('<STR_LIT>')()<EOL>", "docstring": "\u83b7\u53d6 Cookies\n\n------------\n\n:return: { \"cookies\": (Cookies: str)}\n:rtype: dict[ str, str ]\n\n------------\n\n========  ===========  =========\n\u54cd\u5e94\u6570\u636e\n--------------------------------\n\u6570\u636e\u7c7b\u578b  \u5b57\u6bb5\u540d       \u8bf4\u660e\n========  ===========  =========\nstr       cookies      Cookies\n========  ===========  =========", "id": "f847:c0:m28"}
{"signature": "def set_group_card(self, *, group_id, user_id, card=None):", "body": "return super().__getattr__('<STR_LIT>')(group_id=group_id, user_id=user_id, card=card)<EOL>", "docstring": "\u8bbe\u7f6e\u7fa4\u540d\u7247\uff08\u7fa4\u5907\u6ce8\uff09\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param int user_id: \u8981\u8bbe\u7f6e\u7684 QQ \u53f7\n:param str | None card: \u7fa4\u540d\u7247\u5185\u5bb9\uff0c\u4e0d\u586b\u6216\u7a7a\u5b57\u7b26\u4e32\u8868\u793a\u5220\u9664\u7fa4\u540d\u7247\n:return: None\n:rtype: None", "id": "f847:c0:m17"}
{"signature": "def _get_friend_list(self):", "body": "return super().__getattr__('<STR_LIT>')()<EOL>", "docstring": "\u83b7\u53d6\u597d\u53cb\u5217\u8868 (\u5b9e\u9a8c\u6027\u529f\u80fd)\n\n------------\n\n:return: [{ \"friend_group_id\": (\u597d\u53cb\u5206\u7ec4 ID: int), \"friend_group_name\": (\u597d\u53cb\u5206\u7ec4\u540d\u79f0: str), \"friends\": (\u5206\u7ec4\u4e2d\u7684\u597d\u53cb: [{ \"nickname\": (\u597d\u53cb\u6635\u79f0: str), \"remark\": (\u597d\u53cb\u5907\u6ce8: str), \"user_id\": (\u597d\u53cb QQ \u53f7: int) }, ...]) }, ...]\n:rtype: list[ dict[ str, int | str | list[ dict[ str, int | str ] ] ] ]\n\n------------\n\n\u54cd\u5e94\u6570\u636e\u4ee5 **\u5217\u8868** \u5305\u88c5\u7684\u5b57\u5178\u7684\u5f62\u5f0f\u63d0\u4f9b\u3002`( List[ Dict[ ...] ] )`\n\n========  ==================  ===============================\n\u54cd\u5e94\u6570\u636e\n-------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b  \u5b57\u6bb5\u540d                      \u8bf4\u660e\n========  ==================  ===============================\nint       friend_group_id     \u597d\u53cb\u5206\u7ec4 ID\nstr       friend_group_name   \u597d\u53cb\u5206\u7ec4\u540d\u79f0\nlist      friends             \u5206\u7ec4\u4e2d\u7684\u597d\u53cb\n========  ==================  ===============================\n\n\u5176\u4e2d\uff0c\u597d\u53cb\u4fe1\u606f\u7ed3\u6784\u4ee5 **\u5b57\u5178** \u7684\u5f62\u5f0f\u5b58\u50a8\u5728\u54cd\u5e94\u6570\u636e\u4e2d\u7684\u5206\u7ec4\u4e2d\u7684\u597d\u53cb `friends` \u7684 **\u5217\u8868** \u4e2d\u3002`( List[ Dict[ ...] ] )`\n\n========  ==================  ===============================\n\u597d\u53cb\u4fe1\u606f\u7ed3\u6784\n-------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b  \u5b57\u6bb5\u540d                      \u8bf4\u660e\n========  ==================  ===============================\nstr       nickname            \u597d\u53cb\u6635\u79f0\nstr       remark              \u597d\u53cb\u5907\u6ce8\nint       user_id             \u597d\u53cb QQ \u53f7\n========  ==================  ===============================", "id": "f847:c0:m37"}
{"signature": "def set_group_ban(self, *, group_id, user_id, duration=<NUM_LIT:30> * <NUM_LIT>):", "body": "return super().__getattr__('<STR_LIT>')(group_id=group_id, user_id=user_id, duration=duration)<EOL>", "docstring": "\u7fa4\u7ec4\u5355\u4eba\u7981\u8a00\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param int user_id: \u8981\u7981\u8a00\u7684 QQ \u53f7\n:param int duration: \u7981\u8a00\u65f6\u957f\uff0c\u5355\u4f4d\u79d2\uff0c0 \u8868\u793a\u53d6\u6d88\u7981\u8a00\n:return: None\n:rtype: None", "id": "f847:c0:m12"}
{"signature": "def delete_msg(self, *, message_id):", "body": "return super().__getattr__('<STR_LIT>')(message_id=message_id)<EOL>", "docstring": "\u64a4\u56de\u6d88\u606f\n\n------------\n\n:param int message_id: \u6d88\u606f ID\n:return: None\n:rtype: None", "id": "f847:c0:m9"}
{"signature": "def send_like(self, *, user_id, times=<NUM_LIT:1>):", "body": "return super().__getattr__('<STR_LIT>')(user_id=user_id, times=times)<EOL>", "docstring": "\u53d1\u9001\u597d\u53cb\u8d5e\n\n------------\n\n:param int user_id: \u5bf9\u65b9 QQ \u53f7\n:param int times: \u8d5e\u7684\u6b21\u6570\uff0c\u6bcf\u4e2a\u597d\u53cb\u6bcf\u5929\u6700\u591a 10 \u6b21\n:return: None\n:rtype: None", "id": "f847:c0:m10"}
{"signature": "def send_msg(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False):", "body": "return super().__getattr__('<STR_LIT>')(message_type=message_type, user_id=user_id, group_id=group_id,<EOL>discuss_id=discuss_id, message=message, auto_escape=auto_escape)<EOL>", "docstring": "\u53d1\u9001\u6d88\u606f\n\n------------\n\n:param str message_type: \u6d88\u606f\u7c7b\u578b\uff0c\u652f\u6301 `private`\u3001`group`\u3001`discuss`\uff0c\u5206\u522b\u5bf9\u5e94\u79c1\u804a\u3001\u7fa4\u7ec4\u3001\u8ba8\u8bba\u7ec4\n:param int user_id: \u5bf9\u65b9 QQ \u53f7\uff08\u6d88\u606f\u7c7b\u578b\u4e3a `private` \u65f6\u9700\u8981\uff09\n:param int group_id: \u7fa4\u53f7\uff08\u6d88\u606f\u7c7b\u578b\u4e3a `group` \u65f6\u9700\u8981\uff09\n:param int discuss_id: \u8ba8\u8bba\u7ec4 ID\uff08\u9700\u8981\u4ece\u4e0a\u62a5\u6d88\u606f\u4e2d\u83b7\u53d6\uff0c\u6d88\u606f\u7c7b\u578b\u4e3a `discuss` \u65f6\u9700\u8981\uff09\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: {\"message_id\": int \u6d88\u606fID}\n:rtype: dict[string, int]", "id": "f847:c0:m7"}
{"signature": "def send_discuss_msg(self, *, discuss_id, message, auto_escape=False):", "body": "return super().__getattr__('<STR_LIT>')(discuss_id=discuss_id, message=message, auto_escape=auto_escape)<EOL>", "docstring": "\u53d1\u9001\u8ba8\u8bba\u7ec4\u6d88\u606f\n\n------------\n\n:param int discuss_id: \u8ba8\u8bba\u7ec4 ID\uff08\u6b63\u5e38\u60c5\u51b5\u4e0b\u770b\u4e0d\u5230\uff0c\u9700\u8981\u4ece\u8ba8\u8bba\u7ec4\u6d88\u606f\u4e0a\u62a5\u7684\u6570\u636e\u4e2d\u83b7\u5f97\uff09\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: {\"message_id\": int \u6d88\u606fID}\n:rtype: dict[string, int]", "id": "f847:c0:m5"}
{"signature": "def get_login_info(self):", "body": "return super().__getattr__('<STR_LIT>')()<EOL>", "docstring": "\u83b7\u53d6\u767b\u5f55\u53f7\u4fe1\u606f\n\n------------\n\n:return: { \"user_id\": (QQ \u53f7: int), \"nickname\": (QQ \u6635\u79f0: str) }\n:rtype: dict[ str, int | str ]\n\n------------\n\n=========  =========  =========\n\u54cd\u5e94\u6570\u636e\n-------------------------------\n\u6570\u636e\u7c7b\u578b   \u5b57\u6bb5\u540d      \u8bf4\u660e\n=========  =========  =========\nint        user_id    QQ \u53f7\nstr        nickname   QQ \u6635\u79f0\n=========  =========  =========", "id": "f847:c0:m23"}
{"signature": "def set_group_anonymous(self, *, group_id, enable=True):", "body": "return super().__getattr__('<STR_LIT>')(group_id=group_id, enable=enable)<EOL>", "docstring": "\u7fa4\u7ec4\u533f\u540d\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param bool enable: \u662f\u5426\u5141\u8bb8\u533f\u540d\u804a\u5929\n:return: None\n:rtype: None", "id": "f847:c0:m16"}
{"signature": "def clean_data_dir(self, *, data_dir):", "body": "return super().__getattr__('<STR_LIT>')(data_dir=data_dir)<EOL>", "docstring": "\u6e05\u7406\u6570\u636e\u76ee\u5f55\n\n------------\n\n:param str data_dir: \u6536\u5230\u6e05\u7406\u7684\u76ee\u5f55\u540d\uff0c\u652f\u6301 `image`\u3001`record`\u3001`show`\u3001`bface`\n:return: None\n:rtype: None\n\n------------\n\n\u7528\u4e8e\u6e05\u7406\u79ef\u6512\u4e86\u592a\u591a\u65e7\u6587\u4ef6\u7684\u6570\u636e\u76ee\u5f55\uff0c\u5982 `image`\u3002\n\nHTTP API v3.3.4 \u65b0\u589e", "id": "f847:c0:m35"}
{"signature": "def send_group_msg_async(self, *, group_id, message, auto_escape=False):", "body": "return super().__getattr__('<STR_LIT>')(group_id=group_id, message=message, auto_escape=auto_escape)<EOL>", "docstring": "\u53d1\u9001\u7fa4\u6d88\u606f (\u5f02\u6b65\u7248\u672c)\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: None\n:rtype: None", "id": "f847:c0:m4"}
{"signature": "def get_version_info(self):", "body": "return super().__getattr__('<STR_LIT>')()<EOL>", "docstring": "\u83b7\u53d6\u9177 Q \u53ca HTTP API \u63d2\u4ef6\u7684\u7248\u672c\u4fe1\u606f\n\n------------\n\n:return: { \"coolq_directory\": (\u9177Q\u6839\u76ee\u5f55\u8def\u5f84: str), \"coolq_edition\": (\u9177Q\u7248\u672c: str in ['air', 'pro']), \"plugin_version\": (API\u63d2\u4ef6\u7248\u672c: str), \"plugin_build_number\": (API\u63d2\u4ef6build\u53f7: int), \"plugin_build_configuration\": (API\u63d2\u4ef6\u7f16\u8bd1\u914d\u7f6e: str in ['debug', 'release']) }\n:rtype: dict[ str, int | str ]\n\n\n------------\n\n========  ==========================  ===============================\n\u54cd\u5e94\u6570\u636e\n---------------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b  \u5b57\u6bb5\u540d                      \u8bf4\u660e\n========  ==========================  ===============================\nstr       coolq_directory             \u9177 Q \u6839\u76ee\u5f55\u8def\u5f84\nstr       coolq_edition               \u9177 Q \u7248\u672c\uff0c`air` \u6216 `pro`\nstr       plugin_version              HTTP API \u63d2\u4ef6\u7248\u672c\uff0c\u4f8b\u5982 2.1.3\nint       plugin_build_number         HTTP API \u63d2\u4ef6 build \u53f7\nstr       plugin_build_configuration  HTTP API \u63d2\u4ef6\u7f16\u8bd1\u914d\u7f6e\uff0c`debug` \u6216 `release`\n========  ==========================  ===============================", "id": "f847:c0:m32"}
{"signature": "def send_msg_async(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False):", "body": "return super().__getattr__('<STR_LIT>')(message_type=message_type, user_id=user_id, group_id=group_id,<EOL>discuss_id=discuss_id, message=message, auto_escape=auto_escape)<EOL>", "docstring": "\u53d1\u9001\u6d88\u606f (\u5f02\u6b65\u7248\u672c)\n\n------------\n\n:param str message_type: \u6d88\u606f\u7c7b\u578b\uff0c\u652f\u6301 `private`\u3001`group`\u3001`discuss`\uff0c\u5206\u522b\u5bf9\u5e94\u79c1\u804a\u3001\u7fa4\u7ec4\u3001\u8ba8\u8bba\u7ec4\n:param int user_id: \u5bf9\u65b9 QQ \u53f7\uff08\u6d88\u606f\u7c7b\u578b\u4e3a `private` \u65f6\u9700\u8981\uff09\n:param int group_id: \u7fa4\u53f7\uff08\u6d88\u606f\u7c7b\u578b\u4e3a `group` \u65f6\u9700\u8981\uff09\n:param int discuss_id: \u8ba8\u8bba\u7ec4 ID\uff08\u9700\u8981\u4ece\u4e0a\u62a5\u6d88\u606f\u4e2d\u83b7\u53d6\uff0c\u6d88\u606f\u7c7b\u578b\u4e3a `discuss` \u65f6\u9700\u8981\uff09\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: None\n:rtype: None", "id": "f847:c0:m8"}
{"signature": "def set_group_whole_ban(self, *, group_id, enable=True):", "body": "return super().__getattr__('<STR_LIT>')(group_id=group_id, enable=enable)<EOL>", "docstring": "\u7fa4\u7ec4\u5168\u5458\u7981\u8a00\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param bool enable: \u662f\u5426\u7981\u8a00\n:return: None\n:rtype: None", "id": "f847:c0:m14"}
{"signature": "def send_private_msg(self, *, user_id, message, auto_escape=False):", "body": "return super().__getattr__('<STR_LIT>')(user_id=user_id, message=message, auto_escape=auto_escape)<EOL>", "docstring": "\u53d1\u9001\u79c1\u804a\u6d88\u606f\n\n------------\n\n:param int user_id: \u5bf9\u65b9 QQ \u53f7\n:param str | list[ dict[ str, unknown ] ] message: \u8981\u53d1\u9001\u7684\u5185\u5bb9\n:param bool auto_escape: \u6d88\u606f\u5185\u5bb9\u662f\u5426\u4f5c\u4e3a\u7eaf\u6587\u672c\u53d1\u9001\uff08\u5373\u4e0d\u89e3\u6790 CQ \u7801\uff09\uff0c`message` \u6570\u636e\u7c7b\u578b\u4e3a `list` \u65f6\u65e0\u6548\n:return: {\"message_id\": int \u6d88\u606fID}\n:rtype: dict[string, int]", "id": "f847:c0:m1"}
{"signature": "def set_group_leave(self, *, group_id, is_dismiss=False):", "body": "return super().__getattr__('<STR_LIT>')(group_id=group_id, is_dismiss=is_dismiss)<EOL>", "docstring": "\u9000\u51fa\u7fa4\u7ec4\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param bool is_dismiss: \u662f\u5426\u89e3\u6563\uff0c\u5982\u679c\u767b\u5f55\u53f7\u662f\u7fa4\u4e3b\uff0c\u5219\u4ec5\u5728\u6b64\u9879\u4e3a true \u65f6\u80fd\u591f\u89e3\u6563\n:return: None\n:rtype: None", "id": "f847:c0:m18"}
{"signature": "def set_group_special_title(self, *, group_id, user_id, special_title, duration=-<NUM_LIT:1>):", "body": "return super().__getattr__('<STR_LIT>')(group_id=group_id, user_id=user_id, special_title=special_title, duration=duration)<EOL>", "docstring": "\u8bbe\u7f6e\u7fa4\u7ec4\u4e13\u5c5e\u5934\u8854\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:param int user_id: \u8981\u8bbe\u7f6e\u7684 QQ \u53f7\n:param str special_title: \u4e13\u5c5e\u5934\u8854\uff0c\u4e0d\u586b\u6216\u7a7a\u5b57\u7b26\u4e32\u8868\u793a\u5220\u9664\u4e13\u5c5e\u5934\u8854\uff0c\u53ea\u80fd\u4fdd\u7559\u524d6\u4e2a\u82f1\u6587\u4e0e\u6c49\u5b57\uff0cEmoji \u6839\u636e\u5b57\u7b26\u5b9e\u9645\u5b57\u7b26\u957f\u5ea6\u5360\u7528\u53ea\u80fd\u653e\u6700\u591a3\u4e2a\u751a\u81f3\u66f4\u5c11\uff0c\u8d85\u51fa\u957f\u5ea6\u90e8\u5206\u4f1a\u88ab\u622a\u65ad\n:param int duration: \u4e13\u5c5e\u5934\u8854\u6709\u6548\u671f\uff0c\u5355\u4f4d\u79d2\uff0c-1 \u8868\u793a\u6c38\u4e45\uff0c\u4e0d\u8fc7\u6b64\u9879\u4f3c\u4e4e\u6ca1\u6709\u6548\u679c\uff0c\u53ef\u80fd\u662f\u53ea\u6709\u67d0\u4e9b\u7279\u6b8a\u7684\u65f6\u95f4\u957f\u5ea6\u6709\u6548\uff0c\u6709\u5f85\u6d4b\u8bd5\n:return: None\n:rtype: None", "id": "f847:c0:m19"}
{"signature": "def get_group_member_list(self, *, group_id):", "body": "return super().__getattr__('<STR_LIT>')(group_id=group_id)<EOL>", "docstring": "\u83b7\u53d6\u7fa4\u6210\u5458\u5217\u8868\n\n------------\n\n:param int group_id: \u7fa4\u53f7\n:return: [{ \"group_id\": (\u7fa4\u53f7: int), \"user_id\": (QQ \u53f7: int), \"nickname\": (\u6635\u79f0: str), \"card\": (\u7fa4\u540d\u7247/\u5907\u6ce8: str), \"sex\": (\u6027\u522b: str in ['male', 'female', 'unknown']), \"age\": (\u5e74\u9f84: int), \"area\": (\u5730\u533a: str), \"join_time\": (\u52a0\u7fa4\u65f6\u95f4\u6233: int), \"last_sent_time\": (\u6700\u540e\u53d1\u8a00\u65f6\u95f4\u6233: int), \"level\": (\u6210\u5458\u7b49\u7ea7: str), \"role\": (\u89d2\u8272: str in ['owner', 'admin', 'member']), \"unfriendly\": (\u662f\u5426\u4e0d\u826f\u8bb0\u5f55\u6210\u5458: bool), \"title\": (\u4e13\u5c5e\u5934\u8854: str), \"title_expire_time\": (\u4e13\u5c5e\u5934\u8854\u8fc7\u671f\u65f6\u95f4\u6233: int), \"card_changeable\": (\u662f\u5426\u5141\u8bb8\u4fee\u6539\u7fa4\u540d\u7247: bool) }, ...]\n:rtype: list[ dict[ str, int | str | bool ] ]\n\n------------\n\n\u54cd\u5e94\u6570\u636e\u4ee5 **\u5217\u8868** \u5305\u88c5\u7684\u5b57\u5178\u7684\u5f62\u5f0f\u63d0\u4f9b\u3002`( List[ Dict[ ...] ] )`\n\n========  ===================  ======================================\n    \u54cd\u5e94\u6570\u636e\n---------------------------------------------------------------------\n\u6570\u636e\u7c7b\u578b  \u5b57\u6bb5\u540d               \u8bf4\u660e\n========  ===================  ======================================\nint       group_id             \u7fa4\u53f7\nint       user_id              QQ \u53f7\nstr       nickname             \u6635\u79f0\nstr       card                 \u7fa4\u540d\u7247/\u5907\u6ce8\nstr       sex                  \u6027\u522b\uff0c`male` \u6216 `female` \u6216 `unknown`\nint       age                  \u5e74\u9f84\nstr       area                 \u5730\u533a\nint       join_time            \u52a0\u7fa4\u65f6\u95f4\u6233\nint       last_sent_time       \u6700\u540e\u53d1\u8a00\u65f6\u95f4\u6233\nstr       level                \u6210\u5458\u7b49\u7ea7\nstr       role                 \u89d2\u8272\uff0c`owner` \u6216 `admin` \u6216 `member`\nbool      unfriendly           \u662f\u5426\u4e0d\u826f\u8bb0\u5f55\u6210\u5458\nstr       title                \u4e13\u5c5e\u5934\u8854\nint       title_expire_time    \u4e13\u5c5e\u5934\u8854\u8fc7\u671f\u65f6\u95f4\u6233\nbool      card_changeable      \u662f\u5426\u5141\u8bb8\u4fee\u6539\u7fa4\u540d\u7247\n========  ===================  ======================================\n\n**\u5907\u6ce8:** \u54cd\u5e94\u5185\u5bb9\u4e3a\u5305\u542b\u5b57\u5178\u7684\u5217\u8868 *( List[ Dict[] ] )* \uff0c\u6bcf\u4e2a\u5143\u7d20\u7684\u5185\u5bb9\u548c `get_group_member_info` \u63a5\u53e3\u76f8\u540c\uff0c\u4f46\u5bf9\u4e8e\u540c\u4e00\u4e2a\u7fa4\u7ec4\u7684\u540c\u4e00\u4e2a\u6210\u5458\uff0c\u83b7\u53d6\u5217\u8868\u65f6\u548c\u83b7\u53d6\u5355\u72ec\u7684\u6210\u5458\u4fe1\u606f\u65f6\uff0c\u67d0\u4e9b\u5b57\u6bb5\u53ef\u80fd\u6709\u6240\u4e0d\u540c\uff0c\u4f8b\u5982 `area`\u3001`title` \u7b49\u5b57\u6bb5\u5728\u83b7\u53d6\u5217\u8868\u65f6\u65e0\u6cd5\u83b7\u5f97\uff0c\u5177\u4f53\u5e94\u4ee5\u5355\u72ec\u7684\u6210\u5458\u4fe1\u606f\u4e3a\u51c6\u3002", "id": "f847:c0:m27"}
{"signature": "def set_discuss_leave(self, *, discuss_id):", "body": "return super().__getattr__('<STR_LIT>')(discuss_id=discuss_id)<EOL>", "docstring": "\u9000\u51fa\u8ba8\u8bba\u7ec4\n\n------------\n\n:param int discuss_id: \u8ba8\u8bba\u7ec4 ID\uff08\u6b63\u5e38\u60c5\u51b5\u4e0b\u770b\u4e0d\u5230\uff0c\u9700\u8981\u4ece\u8ba8\u8bba\u7ec4\u6d88\u606f\u4e0a\u62a5\u7684\u6570\u636e\u4e2d\u83b7\u5f97\uff09\n:return: None\n:rtype: None", "id": "f847:c0:m20"}
{"signature": "def _import_module(name):", "body": "__import__(name)<EOL>return sys.modules[name]<EOL>", "docstring": "Import module, returning the module after the last dot.", "id": "f851:m1"}
{"signature": "def iteritems(d, **kw):", "body": "return iter(getattr(d, _iteritems)(**kw))<EOL>", "docstring": "Return an iterator over the (key, value) pairs of a dictionary.", "id": "f851:m6"}
{"signature": "def _add_doc(func, doc):", "body": "func.__doc__ = doc<EOL>", "docstring": "Add documentation to a function.", "id": "f851:m0"}
{"signature": "def scanAllProcessesForMapping(searchPortion, isExactMatch=False, ignoreCase=False):", "body": "pids = getAllRunningPids()<EOL>mappingResults = [scanProcessForMapping(pid, searchPortion, isExactMatch, ignoreCase) for pid in pids]<EOL>ret = {}<EOL>for i in range(len(pids)):<EOL><INDENT>if mappingResults[i] is not None:<EOL><INDENT>ret[pids[i]] = mappingResults[i]<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "scanAllProcessesForMapping - Scans all processes on the system for a given search pattern.\n\n    @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings.\n    @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed.\n    @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively\n\n@return - <dict> - A dictionary of pid -> mappingResults for each pid that matched the search pattern. For format of \"mappingResults\", @see scanProcessForMapping", "id": "f857:m9"}
{"signature": "def getProcessCommandLineStr(pid):", "body": "try:<EOL><INDENT>with open('<STR_LIT>' %(int(pid),), '<STR_LIT:r>') as f:<EOL><INDENT>cmdline = f.read()<EOL><DEDENT>return cmdline.replace('<STR_LIT:\\x00>', '<STR_LIT:U+0020>')<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "getProcessCommandLineStr - Gets a the commandline (program + arguments) of a given pid\n\n@param pid <int> - Process ID\n\n@return - None if process not found or can't be determined. Otherwise a string of commandline.\n\n@note Caution, args may have spaces in them, and you cannot surmise from this method. If you care (like trying to replay a command), use getProcessCommandLineList instead", "id": "f857:m2"}
{"signature": "def scanProcessForMapping(pid, searchPortion, isExactMatch=False, ignoreCase=False):", "body": "try:   <EOL><INDENT>try:<EOL><INDENT>pid = int(pid)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>sys.stderr.write('<STR_LIT>' %(str(type(pid)),))<EOL>raise e<EOL><DEDENT>with open('<STR_LIT>' %(pid,), '<STR_LIT:r>') as f:<EOL><INDENT>contents = f.read()<EOL><DEDENT>lines = contents.split('<STR_LIT:\\n>')<EOL>matchedMappings = []<EOL>if isExactMatch is True:<EOL><INDENT>if ignoreCase is False:<EOL><INDENT>isMatch = lambda searchFor, searchIn : bool(searchFor == searchIn)<EOL><DEDENT>else:<EOL><INDENT>isMatch = lambda searchFor, searchIn : bool(searchFor.lower() == searchIn.lower())<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if ignoreCase is False:<EOL><INDENT>isMatch = lambda searchFor, searchIn : bool(searchFor in searchIn)<EOL><DEDENT>else:<EOL><INDENT>isMatch = lambda searchFor, searchIn : bool(searchFor.lower() in searchIn.lower())<EOL><DEDENT><DEDENT>for line in lines:<EOL><INDENT>portion = '<STR_LIT:U+0020>'.join(line.split('<STR_LIT:U+0020>')[<NUM_LIT:5>:]).lstrip()<EOL>if isMatch(searchPortion, portion):<EOL><INDENT>matchedMappings.append('<STR_LIT:\\t>' + line)<EOL><DEDENT><DEDENT>if len(matchedMappings) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>cmdline = getProcessCommandLineStr(pid)<EOL>owner   = getProcessOwnerStr(pid)<EOL>return {<EOL>'<STR_LIT>' : searchPortion,<EOL>'<STR_LIT>'           : pid,<EOL>'<STR_LIT>'         : owner,<EOL>'<STR_LIT>'       : cmdline,<EOL>'<STR_LIT>' : matchedMappings,<EOL>}<EOL><DEDENT>except OSError:<EOL><INDENT>return None<EOL><DEDENT>except IOError:<EOL><INDENT>return None<EOL><DEDENT>except FileNotFoundError:<EOL><INDENT>return None<EOL><DEDENT>except PermissionError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "scanProcessForMapping - Searches a given pid's mappings for a certain pattern.\n\n    @param pid <int> - A running process ID on this system\n    @param searchPortion <str> - A mapping for which to search, example: libc or python or libz.so.1. Give empty string to return all mappings.\n    @param isExactMatch <bool> Default False - If match should be exact, otherwise a partial match is performed.\n    @param ignoreCase <bool> Default False - If True, search will be performed case-insensitively\n\n    @return <dict> - If result is found, the following dict is returned. If no match found on the given pid, or pid is not found running, None is returned.\n        {\n            'searchPortion' : The passed search pattern\n            'pid'           : The passed pid (as an integer)\n            'owner'         : String of process owner, or uid if no mapping can be found, or \"unknown\" if neither could be determined.\n            'cmdline'       : Commandline string\n            'matchedMappings' : All mappings likes that matched the given search pattern\n        }", "id": "f857:m8"}
{"signature": "def getProcessCwd(pid):", "body": "try:<EOL><INDENT>cwd = os.readlink('<STR_LIT>' %(int(pid), ))<EOL>return cwd<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "getProcessCwd - Gets the cwd (current working directory) of a given pid\n\n@param pid <int> - Process ID\n\n@return <str/None> - None if process not found or can't be determined. Otherwise, a string of the CWD", "id": "f857:m4"}
{"signature": "def getAllRunningPids():", "body": "return [int(x) for x in os.listdir('<STR_LIT>') if x.isdigit()]<EOL>", "docstring": "getAllRunningPids - Gets list of all pids that are running on a given system\n\n@return <list<int>> - A list of pids (process IDs).", "id": "f857:m5"}
{"signature": "def getProcessCommandLineList(pid):", "body": "try:<EOL><INDENT>with open('<STR_LIT>' %(int(pid),), '<STR_LIT:r>') as f:<EOL><INDENT>cmdline = f.read()<EOL><DEDENT>return cmdline.split('<STR_LIT:\\x00>')<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "getProcessCommandLineList - Gets the commandline (program + argumentS) of a given pid as a list.\n\n@param pid <int> - Process ID\n\n@return - None if process not found or can't be determined. Otherwise a list representing argv. First argument is process name, remainder are arguments.\n\n@note - Use this if you care about whether a process had a space in the commands", "id": "f857:m3"}
{"signature": "def run(self):", "body": "config = config_creator()<EOL>debug = config.debug<EOL>branch_thread_sleep = config.branch_thread_sleep<EOL>while <NUM_LIT:1>:<EOL><INDENT>url = self.branch_queue.get()<EOL>if debug:<EOL><INDENT>print('<STR_LIT>'.format(url))<EOL><DEDENT>branch_spider = self.branch_spider(url)<EOL>sleep(random.randrange(*branch_thread_sleep))<EOL>branch_spider.request_page()<EOL>if debug:<EOL><INDENT>print('<STR_LIT>'.format(url))<EOL><DEDENT>self.branch_queue.task_done()<EOL><DEDENT>", "docstring": "run your main spider here\n        as for branch spider result data, you can return everything or do whatever with it\n        in your own code\n\n        :return: None", "id": "f867:c0:m1"}
{"signature": "def run(self):", "body": "global existed_urls_list<EOL>config = config_creator()<EOL>debug = config.debug<EOL>main_thread_sleep = config.main_thread_sleep<EOL>branch_thread_num = config.branch_thread_num<EOL>while <NUM_LIT:1>:<EOL><INDENT>url = self.main_queue.get()<EOL>if debug:<EOL><INDENT>print('<STR_LIT>'.format(url))<EOL><DEDENT>main_spider = self.main_spider(url)<EOL>sleep(random.randrange(*main_thread_sleep))<EOL>links = main_spider.request_urls()<EOL>try:<EOL><INDENT>assert type(links) in VALIDATE_URLS<EOL><DEDENT>except AssertionError:<EOL><INDENT>error_message('<STR_LIT>')<EOL>links = list()<EOL><DEDENT>branch_queue = queue.Queue(branch_thread_num)<EOL>for i in range(branch_thread_num):<EOL><INDENT>branch_thread = BranchThread(branch_queue=branch_queue,<EOL>branch_spider=self.branch_spider)<EOL>branch_thread.daemon = True<EOL>branch_thread.start()<EOL><DEDENT>for link in links:<EOL><INDENT>if link not in existed_urls_list:<EOL><INDENT>existed_urls_list.append(link)<EOL>branch_queue.put(link)<EOL><DEDENT><DEDENT>branch_queue.join()<EOL>if debug:<EOL><INDENT>print('<STR_LIT>'.format(url))<EOL><DEDENT>self.main_queue.task_done()<EOL><DEDENT>", "docstring": "run your main spider here, and get a list/tuple of url as result\n        then make the instance of branch thread\n\n        :return: None", "id": "f869:c0:m1"}
{"signature": "def error_message(message='<STR_LIT>'):", "body": "print(colorful_text(message, Fore.RED))<EOL>", "docstring": "print the error message in red color\n\n    :param message: error message\n    :return: None", "id": "f872:m1"}
{"signature": "def colorful_text(text, color=Fore.RESET):", "body": "return color + text + Fore.RESET<EOL>", "docstring": "make target text colorful\n\n    :param text: target text\n    :param color\n    :return: colored text", "id": "f872:m0"}
{"signature": "def value_from_datadict(self, *args, **kwargs):", "body": "value = super(RichTextWidget, self).value_from_datadict(<EOL>*args, **kwargs)<EOL>if value is not None:<EOL><INDENT>value = self.get_sanitizer()(value)<EOL><DEDENT>return value<EOL>", "docstring": "Pass the submitted value through the sanitizer before returning it.", "id": "f907:c0:m4"}
{"signature": "def clean(self, value, model_instance):", "body": "value = self.to_python(value)<EOL>if value is not None:<EOL><INDENT>value = self.get_sanitizer()(value)<EOL><DEDENT>self.validate(value, model_instance)<EOL>self.run_validators(value)<EOL>return value<EOL>", "docstring": "Convert the value's type, sanitize it, and run validation. Validation\nerrors from to_python() and validate() are propagated. Return the\ncorrect value if no error is raised.", "id": "f908:c0:m2"}
{"signature": "def setup(app):", "body": "lexer = MarkdownLexer()<EOL>for alias in lexer.aliases:<EOL><INDENT>app.add_lexer(alias, lexer)<EOL><DEDENT>return dict(version=__version__)<EOL>", "docstring": "Initializer for Sphinx extension API.\n\n        See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.", "id": "f916:m0"}
{"signature": "def _build_metadata(): ", "body": "<EOL>expected_keys = ('<STR_LIT:url>', '<STR_LIT:version>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>metadata = {}<EOL>with io.open(srcfile('<STR_LIT:src>', package_name, '<STR_LIT>'), encoding='<STR_LIT:utf-8>') as handle:<EOL><INDENT>pkg_init = handle.read()<EOL>metadata['<STR_LIT>'] = re.search(r'<STR_LIT>', pkg_init, re.DOTALL|re.MULTILINE).group(<NUM_LIT:1>)<EOL>for line in pkg_init.splitlines():<EOL><INDENT>match = re.match(r\"\"\"<STR_LIT>\"\"\".format('<STR_LIT:|>'.join(expected_keys)), line)<EOL>if match:<EOL><INDENT>metadata[match.group(<NUM_LIT:1>)] = match.group(<NUM_LIT:3>)<EOL><DEDENT><DEDENT><DEDENT>if not all(i in metadata for i in expected_keys):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>.format(name, '<STR_LIT:U+002CU+0020>'.join(sorted(set(expected_keys) - set(metadata.keys()))),))<EOL><DEDENT>text = metadata['<STR_LIT>'].strip()<EOL>if text:<EOL><INDENT>metadata['<STR_LIT:description>'], text = text.split('<STR_LIT:.>', <NUM_LIT:1>)<EOL>metadata['<STR_LIT:description>'] = '<STR_LIT:U+0020>'.join(metadata['<STR_LIT:description>'].split()).strip() + '<STR_LIT:.>' <EOL>metadata['<STR_LIT>'] = textwrap.dedent(text).strip()<EOL><DEDENT>metadata['<STR_LIT>'] = metadata['<STR_LIT>'].replace('<STR_LIT:U+002C>', '<STR_LIT:U+0020>').strip().split()<EOL>requirements_files = dict(<EOL>install = '<STR_LIT>',<EOL>setup = '<STR_LIT>',<EOL>test = '<STR_LIT>',<EOL>)<EOL>requires = {}<EOL>for key, filename in requirements_files.items():<EOL><INDENT>requires[key] = []<EOL>if os.path.exists(srcfile(filename)):<EOL><INDENT>with io.open(srcfile(filename), encoding='<STR_LIT:utf-8>') as handle:<EOL><INDENT>for line in handle:<EOL><INDENT>line = line.strip()<EOL>if line and not line.startswith('<STR_LIT:#>'):<EOL><INDENT>if any(line.startswith(i) for i in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')):<EOL><INDENT>line = line.split('<STR_LIT>')[<NUM_LIT:1>]<EOL><DEDENT>requires[key].append(line)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if not any('<STR_LIT>' == re.split('<STR_LIT>', i.lower())[<NUM_LIT:0>] for i in requires['<STR_LIT:test>']):<EOL><INDENT>requires['<STR_LIT:test>'].append('<STR_LIT>') <EOL><DEDENT>console_scripts = []<EOL>for path, dirs, files in os.walk(srcfile('<STR_LIT:src>', package_name)):<EOL><INDENT>dirs = [i for i in dirs if not i.startswith('<STR_LIT:.>')]<EOL>if '<STR_LIT>' in files:<EOL><INDENT>path = path[len(srcfile('<STR_LIT:src>') + os.sep):]<EOL>appname = path.split(os.sep)[-<NUM_LIT:1>]<EOL>with io.open(srcfile('<STR_LIT:src>', path, '<STR_LIT>'), encoding='<STR_LIT:utf-8>') as handle:<EOL><INDENT>for line in handle.readlines():<EOL><INDENT>match = re.match(r\"\"\"<STR_LIT>\"\"\", line)<EOL>if match:<EOL><INDENT>appname = match.group(<NUM_LIT:2>)<EOL><DEDENT><DEDENT><DEDENT>console_scripts.append('<STR_LIT>'.format(appname, path.replace(os.sep, '<STR_LIT:.>')))<EOL><DEDENT><DEDENT>candidate_files = [<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>]<EOL>data_files = defaultdict(list)<EOL>for filename in candidate_files:<EOL><INDENT>if os.path.exists(srcfile(filename)):<EOL><INDENT>data_files['<STR_LIT>'].append(filename)<EOL><DEDENT><DEDENT>classifiers = []<EOL>for classifiers_txt in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>classifiers_txt = srcfile(classifiers_txt)<EOL>if os.path.exists(classifiers_txt):<EOL><INDENT>with io.open(classifiers_txt, encoding='<STR_LIT:utf-8>') as handle:<EOL><INDENT>classifiers = [i.strip() for i in handle if i.strip() and not i.startswith('<STR_LIT:#>')]<EOL><DEDENT>break<EOL><DEDENT><DEDENT>entry_points.setdefault('<STR_LIT>', []).extend(console_scripts)<EOL>metadata.update(dict(<EOL>name = name,<EOL>package_dir = {'<STR_LIT>': '<STR_LIT:src>'},<EOL>packages = find_packages(srcfile('<STR_LIT:src>'), exclude=['<STR_LIT>']),<EOL>data_files = data_files.items(),<EOL>zip_safe = False,<EOL>include_package_data = True,<EOL>install_requires = requires['<STR_LIT>'],<EOL>setup_requires = requires['<STR_LIT>'],<EOL>tests_require =  requires['<STR_LIT:test>'],<EOL>classifiers = classifiers,<EOL>cmdclass = dict(<EOL>test = PyTest,<EOL>),<EOL>entry_points = entry_points,<EOL>))<EOL>return metadata<EOL>", "docstring": "Return project's metadata as a dict.", "id": "f921:m1"}
{"signature": "def predict_peptides(self, peptides):", "body": "<EOL>from mhcflurry.encodable_sequences import EncodableSequences<EOL>binding_predictions = []<EOL>encodable_sequences = EncodableSequences.create(peptides)<EOL>for allele in self.alleles:<EOL><INDENT>predictions_df = self.predictor.predict_to_dataframe(<EOL>encodable_sequences, allele=allele)<EOL>for (_, row) in predictions_df.iterrows():<EOL><INDENT>binding_prediction = BindingPrediction(<EOL>allele=allele,<EOL>peptide=row.peptide,<EOL>affinity=row.prediction,<EOL>percentile_rank=(<EOL>row.prediction_percentile<EOL>if '<STR_LIT>' in row else nan),<EOL>prediction_method_name=\"<STR_LIT>\"<EOL>)<EOL>binding_predictions.append(binding_prediction)<EOL><DEDENT><DEDENT>return BindingPredictionCollection(binding_predictions)<EOL>", "docstring": "Predict MHC affinity for peptides.", "id": "f925:c0:m1"}
{"signature": "def predict(self, sequences):", "body": "with tempfile.NamedTemporaryFile(suffix=\"<STR_LIT>\", mode=\"<STR_LIT:w>\") as input_fd:<EOL><INDENT>for (i, sequence) in enumerate(sequences):<EOL><INDENT>input_fd.write(\"<STR_LIT>\" % i)<EOL>input_fd.write(sequence)<EOL>input_fd.write(\"<STR_LIT:\\n>\")<EOL><DEDENT>input_fd.flush()<EOL>try:<EOL><INDENT>output = subprocess.check_output([\"<STR_LIT>\", input_fd.name])<EOL><DEDENT>except subprocess.CalledProcessError as e:<EOL><INDENT>logging.error(\"<STR_LIT>\" % (e, e.output))<EOL>raise<EOL><DEDENT><DEDENT>parsed = self.parse_netchop(output)<EOL>assert len(parsed) == len(sequences),\"<STR_LIT>\" % (<EOL>len(sequences), len(parsed))<EOL>assert [len(x) for x in parsed] == [len(x) for x in sequences]<EOL>return parsed<EOL>", "docstring": "Return netChop predictions for each position in each sequence.\n\nParameters\n-----------\nsequences : list of string\n    Amino acid sequences to predict cleavage for\n\nReturns\n-----------\nlist of list of float\n\nThe i'th list corresponds to the i'th sequence. Each list gives\nthe cleavage probability for each position in the sequence.", "id": "f928:c0:m0"}
{"signature": "def create_input_peptides_files(<EOL>peptides,<EOL>max_peptides_per_file=None,<EOL>group_by_length=False):", "body": "if group_by_length:<EOL><INDENT>peptide_lengths = {len(p) for p in peptides}<EOL>peptide_groups = {l: [] for l in peptide_lengths}<EOL>for p in peptides:<EOL><INDENT>peptide_groups[len(p)].append(p)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>peptide_groups = {\"<STR_LIT>\": peptides}<EOL><DEDENT>file_names = []<EOL>for key, group in peptide_groups.items():<EOL><INDENT>n_peptides = len(group)<EOL>if not max_peptides_per_file:<EOL><INDENT>max_peptides_per_file = n_peptides<EOL><DEDENT>input_file = None<EOL>for i, p in enumerate(group):<EOL><INDENT>if i % max_peptides_per_file == <NUM_LIT:0>:<EOL><INDENT>if input_file is not None:<EOL><INDENT>file_names.append(input_file.name)<EOL>input_file.close()<EOL><DEDENT>input_file = make_writable_tempfile(<EOL>prefix_number=i // max_peptides_per_file,<EOL>prefix_name=key,<EOL>suffix=\"<STR_LIT>\")<EOL><DEDENT>input_file.write(\"<STR_LIT>\" % p)<EOL><DEDENT>if input_file is not None:<EOL><INDENT>file_names.append(input_file.name)<EOL>input_file.close()<EOL><DEDENT><DEDENT>return file_names<EOL>", "docstring": "Creates one or more files containing one peptide per line,\nreturns names of files.", "id": "f929:m1"}
{"signature": "def __init__(<EOL>self,<EOL>peptide,<EOL>allele,<EOL>affinity,<EOL>percentile_rank,<EOL>source_sequence_name=None,<EOL>offset=<NUM_LIT:0>,<EOL>log_affinity=None,<EOL>prediction_method_name=\"<STR_LIT>\"):", "body": "<EOL>if invalid_affinity(affinity) and np.isfinite(log_affinity):<EOL><INDENT>affinity = <NUM_LIT> ** (-log_affinity + <NUM_LIT:1>)<EOL><DEDENT>if invalid_affinity(affinity):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" % (<EOL>affinity,<EOL>peptide,<EOL>allele))<EOL><DEDENT>if invalid_percentile_rank(percentile_rank):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" % (<EOL>percentile_rank, peptide, allele))<EOL><DEDENT>self.source_sequence_name = source_sequence_name<EOL>self.offset = offset<EOL>self.allele = allele<EOL>self.peptide = peptide<EOL>self.affinity = affinity<EOL>self.percentile_rank = percentile_rank<EOL>self.prediction_method_name = prediction_method_name<EOL>", "docstring": "Parameters\n----------\npeptide : str\n    Short amino acid sequence\n\nallele : str\n    HLA allele, e.g. HLA-A*02:01\n\naffinity : float\n    Predicted binding affinity\n\npercentile_rank : float\n    Percentile rank of the binding affinity for that allele\n\nsource_sequence_name : str\n    Name of sequence from which peptide was extracted\n\noffset : int\n    Base0 starting position in source sequence that all epitopes were\n    extracted from\n\nlog_affinity : float, optional\n    NetMHC sometimes gives invalid IC50 values but we can still\n    reconstruct the value from its (1.0 - log_50000(IC50)) score.\n\nprediction_method_name : str, optional\n    Name of predictor used to generate this prediction.", "id": "f930:c0:m0"}
{"signature": "def clone_with_updates(self, **kwargs):", "body": "fields_dict = self.to_dict()<EOL>fields_dict.update(kwargs)<EOL>return BindingPrediction(**fields_dict)<EOL>", "docstring": "Returns new BindingPrediction with updated fields", "id": "f930:c0:m2"}
{"signature": "def run_multiple_commands_redirect_stdout(<EOL>multiple_args_dict,<EOL>print_commands=True,<EOL>process_limit=-<NUM_LIT:1>,<EOL>polling_freq=<NUM_LIT:0.5>,<EOL>**kwargs):", "body": "assert len(multiple_args_dict) > <NUM_LIT:0><EOL>assert all(len(args) > <NUM_LIT:0> for args in multiple_args_dict.values())<EOL>assert all(hasattr(f, '<STR_LIT:name>') for f in multiple_args_dict.keys())<EOL>if process_limit < <NUM_LIT:0>:<EOL><INDENT>logger.debug(\"<STR_LIT>\" % cpu_count())<EOL>process_limit = cpu_count()<EOL><DEDENT>start_time = time.time()<EOL>processes = Queue(maxsize=process_limit)<EOL>def add_to_queue(process):<EOL><INDENT>process.start()<EOL>if print_commands:<EOL><INDENT>handler = logging.FileHandler(process.redirect_stdout_file.name)<EOL>handler.setLevel(logging.DEBUG)<EOL>logger.addHandler(handler)<EOL>logger.debug(\"<STR_LIT:U+0020>\".join(process.args))<EOL>logger.removeHandler(handler)<EOL><DEDENT>processes.put(process)<EOL><DEDENT>for f, args in multiple_args_dict.items():<EOL><INDENT>p = AsyncProcess(<EOL>args,<EOL>redirect_stdout_file=f,<EOL>**kwargs)<EOL>if not processes.full():<EOL><INDENT>add_to_queue(p)<EOL><DEDENT>else:<EOL><INDENT>while processes.full():<EOL><INDENT>to_remove = []<EOL>for possibly_done in processes.queue:<EOL><INDENT>if possibly_done.poll() is not None:<EOL><INDENT>possibly_done.wait()<EOL>to_remove.append(possibly_done)<EOL><DEDENT><DEDENT>if to_remove:<EOL><INDENT>for process_to_remove in to_remove:<EOL><INDENT>processes.queue.remove(process_to_remove)<EOL><DEDENT>break<EOL><DEDENT>time.sleep(polling_freq)<EOL><DEDENT>add_to_queue(p)<EOL><DEDENT><DEDENT>while not processes.empty():<EOL><INDENT>processes.get().wait()<EOL><DEDENT>elapsed_time = time.time() - start_time<EOL>logger.info(<EOL>\"<STR_LIT>\",<EOL>len(multiple_args_dict),<EOL>elapsed_time)<EOL>", "docstring": "Run multiple shell commands in parallel, write each of their\nstdout output to files associated with each command.\n\nParameters\n----------\nmultiple_args_dict : dict\n    A dictionary whose keys are files and values are args list.\n    Run each args list as a subprocess and write stdout to the\n    corresponding file.\n\nprint_commands : bool\n    Print shell commands before running them.\n\nprocess_limit : int\n    Limit the number of concurrent processes to this number. 0\n    if there is no limit, -1 to use max number of processors\n\npolling_freq : int\n    Number of seconds between checking for done processes, if\n    we have a process limit", "id": "f931:m1"}
{"signature": "def poll(self):", "body": "if self.process is None:<EOL><INDENT>self.start()<EOL><DEDENT>return self.process.poll()<EOL>", "docstring": "Peeks at whether the process is done or not, without\nwaiting for it. Leaves exception handling and such to wait().", "id": "f931:c0:m2"}
{"signature": "def predict_subsequences(self, sequence_dict, peptide_lengths=None):", "body": "sequence_dict = check_sequence_dictionary(sequence_dict)<EOL>peptide_lengths = self._check_peptide_lengths(peptide_lengths)<EOL>binding_predictions = []<EOL>expected_peptides = set([])<EOL>normalized_alleles = []<EOL>for key, amino_acid_sequence in sequence_dict.items():<EOL><INDENT>for l in peptide_lengths:<EOL><INDENT>for i in range(len(amino_acid_sequence) - l + <NUM_LIT:1>):<EOL><INDENT>expected_peptides.add(amino_acid_sequence[i:i + l])<EOL><DEDENT><DEDENT>self._check_peptide_inputs(expected_peptides)<EOL>for allele in self.alleles:<EOL><INDENT>allele = normalize_allele_name(allele, omit_dra1=True)<EOL>normalized_alleles.append(allele)<EOL>request = self._get_iedb_request_params(<EOL>amino_acid_sequence, allele)<EOL>logger.info(<EOL>\"<STR_LIT>\",<EOL>self.url,<EOL>request)<EOL>response_df = _query_iedb(request, self.url)<EOL>for _, row in response_df.iterrows():<EOL><INDENT>binding_predictions.append(<EOL>BindingPrediction(<EOL>source_sequence_name=key,<EOL>offset=row['<STR_LIT:start>'] - <NUM_LIT:1>,<EOL>allele=row['<STR_LIT>'],<EOL>peptide=row['<STR_LIT>'],<EOL>affinity=row['<STR_LIT>'],<EOL>percentile_rank=row['<STR_LIT>'],<EOL>prediction_method_name=\"<STR_LIT>\" + self.prediction_method))<EOL><DEDENT><DEDENT><DEDENT>self._check_results(<EOL>binding_predictions,<EOL>alleles=normalized_alleles,<EOL>peptides=expected_peptides)<EOL>return BindingPredictionCollection(binding_predictions)<EOL>", "docstring": "Given a dictionary mapping unique keys to amino acid sequences,\n        run MHC binding predictions on all candidate epitopes extracted from\n        sequences and return a EpitopeCollection.\n\n        Parameters\n        ----------\n        fasta_dictionary : dict or string\n            Mapping of protein identifiers to protein amino acid sequences.\n            If string then converted to dictionary.", "id": "f933:c0:m4"}
{"signature": "def prepare_allele_name(self, allele_name):", "body": "return allele_name.replace(\"<STR_LIT:*>\", \"<STR_LIT>\")<EOL>", "docstring": "How does the predictor expect to see allele names?", "id": "f934:c0:m2"}
{"signature": "def __init__(<EOL>self,<EOL>program_name,<EOL>alleles,<EOL>parse_output_fn,<EOL>supported_alleles_flag,<EOL>input_file_flag,<EOL>length_flag,<EOL>allele_flag,<EOL>peptide_mode_flags=[\"<STR_LIT>\"],<EOL>tempdir_flag=None,<EOL>extra_flags=[],<EOL>max_peptides_per_file=<NUM_LIT:10> ** <NUM_LIT:4>,<EOL>process_limit=-<NUM_LIT:1>,<EOL>default_peptide_lengths=[<NUM_LIT:9>],<EOL>group_peptides_by_length=False,<EOL>min_peptide_length=<NUM_LIT:8>,<EOL>max_peptide_length=None,):", "body": "require_string(program_name, \"<STR_LIT>\")<EOL>self.program_name = program_name<EOL>if supported_alleles_flag is not None:<EOL><INDENT>require_string(supported_alleles_flag, \"<STR_LIT>\")<EOL><DEDENT>self.supported_alleles_flag = supported_alleles_flag<EOL>require_string(input_file_flag, \"<STR_LIT>\")<EOL>self.input_file_flag = input_file_flag<EOL>require_string(length_flag, \"<STR_LIT>\")<EOL>self.length_flag = length_flag<EOL>require_string(allele_flag, \"<STR_LIT>\")<EOL>self.allele_flag = allele_flag<EOL>require_iterable_of(peptide_mode_flags, string_types)<EOL>self.peptide_mode_flags = peptide_mode_flags<EOL>if tempdir_flag is not None:<EOL><INDENT>require_string(tempdir_flag, \"<STR_LIT>\")<EOL><DEDENT>self.tempdir_flag = tempdir_flag<EOL>require_iterable_of(extra_flags, string_types)<EOL>self.extra_flags = extra_flags<EOL>require_integer(<EOL>max_peptides_per_file,<EOL>\"<STR_LIT>\")<EOL>self.max_peptides_per_file = max_peptides_per_file<EOL>require_integer(process_limit, \"<STR_LIT>\")<EOL>self.process_limit = process_limit<EOL>self.parse_output_fn = parse_output_fn<EOL>if isinstance(default_peptide_lengths, int):<EOL><INDENT>default_peptide_lengths = [default_peptide_lengths]<EOL><DEDENT>self.group_peptides_by_length = group_peptides_by_length<EOL>if self.supported_alleles_flag:<EOL><INDENT>valid_alleles = self._determine_supported_alleles(<EOL>self.program_name,<EOL>self.supported_alleles_flag)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>run_command([self.program_name])<EOL><DEDENT>except:<EOL><INDENT>raise SystemError(\"<STR_LIT>\" % self.program_name)<EOL><DEDENT>valid_alleles = None<EOL><DEDENT>try:<EOL><INDENT>BasePredictor.__init__(<EOL>self,<EOL>alleles=alleles,<EOL>valid_alleles=valid_alleles,<EOL>default_peptide_lengths=default_peptide_lengths,<EOL>min_peptide_length=min_peptide_length,<EOL>max_peptide_length=max_peptide_length)<EOL><DEDENT>except UnsupportedAllele as e:<EOL><INDENT>if self.supported_alleles_flag:<EOL><INDENT>additional_message = (<EOL>\"<STR_LIT>\" % (<EOL>self.program_name,<EOL>self.supported_alleles_flag))<EOL><DEDENT>else:<EOL><INDENT>additional_message = \"<STR_LIT>\"<EOL><DEDENT>raise UnsupportedAllele(str(e) + additional_message)<EOL><DEDENT>", "docstring": "Parameters\n----------\nprogram_name : str\n    Name of prediction program to run\n    (e.g. \"netMHCcons\" or \"netMHCIIpan\")\n\nalleles : list of str\n    MHC alleles\n\nsupported_alleles_flag : str\n    Flag to pass to the predictor to get a list of supported alleles\n    (e.g. \"-A\", \"-list\", \"-listMHC\")\n\nparse_output_fn : fn\n    Takes the stdout string from the predictor and returns a collection\n    of BindingPrediction objects\n\ninput_file_flag : str\n    How to specify the input FASTA file of source sequences (e.g. \"-f\")\n\nlength_flag : str\n    How to specify the desired predicted peptide length (e.g. \"-length\")\n\nallele_flag : str\n    How to specify the allele we want predictions for (e.g. \"-a\")\n\npeptide_mode_flags : list of str\n    How to switch from the default FASTA subsequences input mode to\n    where peptides are explicitly given one per line of a text file.\n\ntempdir_flag : str, optional\n    How to specify the predictor's temporary directory (e.g. \"-tdir\")\n\nextra_flags : list of str\n    Extra flags to pass to the predictor\n\nmax_peptides_per_file : int, optional\n    Maximum number of lines per file when predicting peptides directly.\n\nprocess_limit : int, optional\n    Maximum number of parallel processes to start\n    (0 for no limit, -1 for use all available processors)\n\ndefault_peptide_lengths : list of int, optional\n    When making predictions across subsequences of protein sequences,\n    what peptide lengths to predict for.\n\ngroup_peptides_by_length : bool\n    Run commandline predictor on groups of peptides of equal length\n\nmin_peptide_length : int\n    Shortest peptide this predictor can handle\n\nmax_peptide_length : int\n    Longest peptide this predictor can handle", "id": "f934:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def _determine_supported_alleles(command, supported_allele_flag):<DEDENT>", "body": "try:<EOL><INDENT>supported_alleles_output = check_output([<EOL>command, supported_allele_flag<EOL>])<EOL>supported_alleles_str = supported_alleles_output.decode(\"<STR_LIT:ascii>\", \"<STR_LIT:ignore>\")<EOL>assert len(supported_alleles_str) > <NUM_LIT:0>,'<STR_LIT>' % command<EOL>supported_alleles = set([])<EOL>for line in supported_alleles_str.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>line = line.strip()<EOL>if not line.startswith('<STR_LIT:#>') and len(line) > <NUM_LIT:0>:<EOL><INDENT>try:<EOL><INDENT>supported_alleles.add(normalize_allele_name(line))<EOL><DEDENT>except AlleleParseError as error:<EOL><INDENT>logger.info(\"<STR_LIT>\", line, error)<EOL>continue<EOL><DEDENT><DEDENT><DEDENT>if len(supported_alleles) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return supported_alleles<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.exception(e)<EOL>raise SystemError(\"<STR_LIT>\" % (<EOL>command,<EOL>supported_allele_flag))<EOL><DEDENT>", "docstring": "Try asking the commandline predictor (e.g. netMHCpan)\nwhich alleles it supports.", "id": "f934:c0:m1"}
{"signature": "def predict_peptides(self, peptides):", "body": "raise NotImplementedError(\"<STR_LIT>\" % (<EOL>self.__class__.__name__))<EOL>", "docstring": "Given a list of peptide sequences, returns a BindingPredictionCollection", "id": "f935:c0:m3"}
{"signature": "def _check_peptide_lengths(self, peptide_lengths=None):", "body": "if not peptide_lengths:<EOL><INDENT>peptide_lengths = self.default_peptide_lengths<EOL><DEDENT>if not peptide_lengths:<EOL><INDENT>raise ValueError(<EOL>(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"))<EOL><DEDENT>if isinstance(peptide_lengths, int):<EOL><INDENT>peptide_lengths = [peptide_lengths]<EOL><DEDENT>require_iterable_of(peptide_lengths, int)<EOL>for peptide_length in peptide_lengths:<EOL><INDENT>if (self.min_peptide_length is not None and<EOL>peptide_length < self.min_peptide_length):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" % (<EOL>peptide_length,<EOL>self.min_peptide_length))<EOL><DEDENT>elif (self.max_peptide_length is not None and<EOL>peptide_length > self.max_peptide_length):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" % (<EOL>peptide_length,<EOL>self.max_peptide_length))<EOL><DEDENT><DEDENT>return peptide_lengths<EOL>", "docstring": "If peptide lengths not specified, then try using the default\nlengths associated with this predictor object. If those aren't\na valid non-empty sequence of integers, then raise an exception.\nOtherwise return the peptide lengths.", "id": "f935:c0:m5"}
{"signature": "def main(args_list=None):", "body": "args = parse_args(args_list)<EOL>binding_predictions = run_predictor(args)<EOL>df = binding_predictions.to_dataframe()<EOL>logger.info('<STR_LIT>', df)<EOL>if args.output_csv:<EOL><INDENT>df.to_csv(args.output_csv, index=False)<EOL>print(\"<STR_LIT>\" % args.output_csv)<EOL><DEDENT>", "docstring": "Script to make pMHC binding predictions from amino acid sequences.\n\nUsage example:\n    mhctools\n        --sequence SFFPIQQQQQAAALLLI \\\n        --sequence SILQQQAQAQQAQAASSSC \\\n        --extract-subsequences \\\n        --mhc-predictor netmhc \\\n        --mhc-alleles HLA-A0201 H2-Db \\\n        --mhc-predictor netmhc \\\n        --output-csv epitope.csv", "id": "f939:m4"}
{"signature": "def parse_int_list(string):", "body": "integers = []<EOL>for comma_part in string.split(\"<STR_LIT:U+002C>\"):<EOL><INDENT>for substring in comma_part.split(\"<STR_LIT:U+0020>\"):<EOL><INDENT>if len(substring) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>if \"<STR_LIT:->\" in substring:<EOL><INDENT>left, right = substring.split(\"<STR_LIT:->\")<EOL>left_val = int(left.strip())<EOL>right_val = int(right.strip())<EOL>integers.extend(range(left_val, right_val + <NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>integers.append(int(substring.strip()))<EOL><DEDENT><DEDENT><DEDENT>return integers<EOL>", "docstring": "Parses a string of numbers and ranges into a list of integers. Ranges\nare separated by dashes and inclusive of both the start and end number.\n\nExample:\n    parse_int_list(\"8 9 10,11-13\") == [8,9,10,11,12,13]", "id": "f941:m0"}
{"signature": "def parse_netmhcpan4_stdout(<EOL>stdout,<EOL>prediction_method_name=\"<STR_LIT>\",<EOL>sequence_key_mapping=None):", "body": "<EOL>return parse_netmhcpan3_stdout(<EOL>stdout=stdout,<EOL>prediction_method_name=prediction_method_name,<EOL>sequence_key_mapping=sequence_key_mapping)<EOL>", "docstring": "# NetMHCpan version 4.0\n\n# Tmpdir made /var/folders/jc/fyrvcrcs3sb8g4mkdg6nl_t80000gp/T//netMHCpanuH3SvY\n# Input is in PEPTIDE format\n\n# Make binding affinity predictions\n\nHLA-A02:01 : Distance to training data  0.000 (using nearest neighbor HLA-A02:01)\n\n# Rank Threshold for Strong binding peptides   0.500\n# Rank Threshold for Weak binding peptides   2.000\n-----------------------------------------------------------------------------------\n  Pos          HLA         Peptide       Core Of Gp Gl Ip Il        Icore        Identity     Score Aff(nM)   %Rank  BindLevel\n-----------------------------------------------------------------------------------\n    1  HLA-A*02:01        SIINFEKL  SIINF-EKL  0  0  0  5  1     SIINFEKL         PEPLIST 0.1141340 14543.1 18.9860\n-----------------------------------------------------------------------------------\n\nProtein PEPLIST. Allele HLA-A*02:01. Number of high binders 0. Number of weak binders 0. Number of peptides 1", "id": "f943:m8"}
{"signature": "def NetMHCpan(<EOL>alleles,<EOL>program_name=\"<STR_LIT>\",<EOL>process_limit=-<NUM_LIT:1>,<EOL>default_peptide_lengths=[<NUM_LIT:9>],<EOL>extra_flags=[]):", "body": "<EOL>with open(os.devnull, '<STR_LIT:w>') as devnull:<EOL><INDENT>output = check_output([<EOL>program_name, \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>stderr=devnull)<EOL><DEDENT>output_str = output.decode(\"<STR_LIT:ascii>\", \"<STR_LIT:ignore>\")<EOL>common_kwargs = {<EOL>\"<STR_LIT>\": alleles,<EOL>\"<STR_LIT>\": default_peptide_lengths,<EOL>\"<STR_LIT>\": program_name,<EOL>\"<STR_LIT>\": process_limit,<EOL>\"<STR_LIT>\": extra_flags,<EOL>}<EOL>if \"<STR_LIT>\" in output_str:<EOL><INDENT>return NetMHCpan28(**common_kwargs)<EOL><DEDENT>elif \"<STR_LIT>\" in output_str:<EOL><INDENT>return NetMHCpan3(**common_kwargs)<EOL><DEDENT>elif \"<STR_LIT>\" in output_str:<EOL><INDENT>return NetMHCpan4(**common_kwargs)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "This function wraps NetMHCpan28 and NetMHCpan3 to automatically detect which class\nto use, with the help of the miraculous and strange '--version' netmhcpan argument.", "id": "f950:m0"}
{"signature": "def create_tables(self):", "body": "for cls in self:<EOL><INDENT>cls.create_table(fail_silently=True)<EOL><DEDENT>", "docstring": "Create database tables", "id": "f967:c0:m1"}
{"signature": "def get_paginator(self):", "body": "return self.paginator<EOL>", "docstring": "Return pagination for our model", "id": "f967:c8:m1"}
{"signature": "def to_cursor_ref(self):", "body": "fields = self._meta.get_primary_keys()<EOL>assert fields<EOL>values = {field.name:self.__data__[field.name] for field in fields}<EOL>return values<EOL>", "docstring": "Returns dict of values to uniquely reference this item", "id": "f967:c4:m4"}
{"signature": "def get_database(self, model):", "body": "for router in self.routers:<EOL><INDENT>r = router.get_database(model)<EOL>if r is not None:<EOL><INDENT>return r<EOL><DEDENT><DEDENT>return self.get('<STR_LIT:default>')<EOL>", "docstring": "Find matching database router", "id": "f967:c1:m3"}
{"signature": "@classmethod<EOL><INDENT>def from_cursor_ref(self, cursor):<DEDENT>", "body": "return self.get(**cursor)<EOL>", "docstring": "Returns model instance from unique cursor reference", "id": "f967:c4:m5"}
{"signature": "@classmethod<EOL><INDENT>def get_or_none(cls, **kwargs):<DEDENT>", "body": "try:<EOL><INDENT>return cls.get(**kwargs)<EOL><DEDENT>except cls.DoesNotExist:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "XXX: needs unit test", "id": "f967:c4:m2"}
{"signature": "def refetch(self):", "body": "ref = self.to_cursor_ref()<EOL>return self.from_cursor_ref(ref)<EOL>", "docstring": "Return new model instance with fresh data from database\nOnly works on models which have a primary or compound key\nSee https://github.com/coleifer/peewee/issues/638\n\nXXX: Add support for models without PK", "id": "f967:c4:m6"}
{"signature": "def list(self, filters, cursor, count):", "body": "assert isinstance(filters, dict), \"<STR_LIT>\"<EOL>assert isinstance(cursor, dict), \"<STR_LIT>\"<EOL>query = self.get_query()<EOL>assert isinstance(query, peewee.Query)<EOL>paginator = self.get_paginator()<EOL>assert isinstance(paginator, Pagination)<EOL>count += <NUM_LIT:1><EOL>pquery = paginator.filter_query(query, cursor, count)<EOL>items = [ item for item in pquery ]<EOL>next_item = items.pop(<NUM_LIT:1>)<EOL>next_cursor = next_item.to_cursor_ref()<EOL>'''<STR_LIT>'''<EOL>return items, next_cursor<EOL>", "docstring": "List items from query", "id": "f967:c8:m3"}
{"signature": "def utcnow_no_ms():", "body": "return datetime.datetime.utcnow().replace(microsecond=<NUM_LIT:0>)<EOL>", "docstring": "Returns utcnow without microseconds", "id": "f967:m0"}
{"signature": "@classmethod<EOL><INDENT>def paginate_query(self, query, count, offset=None, sort=None):<DEDENT>", "body": "assert isinstance(query, peewee.Query)<EOL>assert isinstance(count, int)<EOL>assert isinstance(offset, (str, int, type(None)))<EOL>assert isinstance(sort, (list, set, tuple, type(None)))<EOL>fields = query.model._meta.get_primary_keys()<EOL>if len(fields) == <NUM_LIT:0>:<EOL><INDENT>raise peewee.ProgrammingError(<EOL>'<STR_LIT>')<EOL><DEDENT>if len(fields) > <NUM_LIT:1>:<EOL><INDENT>raise peewee.ProgrammingError(<EOL>'<STR_LIT>')<EOL><DEDENT>if offset is not None:<EOL><INDENT>query = query.where(fields[<NUM_LIT:0>] >= offset)<EOL><DEDENT>order_bys = []<EOL>if sort:<EOL><INDENT>for field, direction in sort:<EOL><INDENT>if not isinstance(direction, str):<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(field))<EOL><DEDENT>direction = direction.lower().strip()<EOL>if direction not in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(field))<EOL><DEDENT>order_by = peewee.SQL(field)<EOL>order_by = getattr(order_by, direction)()<EOL>order_bys += [order_by]<EOL><DEDENT><DEDENT>order_bys += [fields[<NUM_LIT:0>].asc()]<EOL>query = query.order_by(*order_bys)<EOL>query = query.limit(count)<EOL>return query<EOL>", "docstring": "Apply pagination to query\n\n:attr query: Instance of `peewee.Query`\n:attr count: Max rows to return\n:attr offset: Pagination offset, str/int\n:attr sort: List of tuples, e.g. [('id', 'asc')]\n\n:returns: Instance of `peewee.Query`", "id": "f967:c7:m0"}
{"signature": "def retrieve(self, cursor):", "body": "assert isinstance(cursor, dict), \"<STR_LIT>\"<EOL>query = self.get_query()<EOL>assert isinstance(query, peewee.Query)<EOL>query<EOL>return query.get(**cursor)<EOL>", "docstring": "Retrieve items from query", "id": "f967:c8:m4"}
{"signature": "def get_query(self):", "body": "return self.query<EOL>", "docstring": "Return query for our model", "id": "f967:c8:m0"}
{"signature": "def populate_models(self):", "body": "fake = Faker()<EOL>fake.seed(<NUM_LIT:0>)<EOL>cities = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>items = []<EOL>for x in range(<NUM_LIT:100>):<EOL><INDENT>city = cities[x % len(cities)]<EOL>items += [dict(name=fake.name(), city=city)]<EOL><DEDENT>Person.insert_many(items).execute()<EOL>assert Person.select().count() == <NUM_LIT:100><EOL>", "docstring": "Populate test models with (predictable) fake data", "id": "f970:c1:m0"}
{"signature": "def upgrade(self):", "body": "if not self.is_valid:<EOL><INDENT>raise PolyaxonDeploymentConfigError(<EOL>'<STR_LIT>'.format(self.deployment_type))<EOL><DEDENT>if self.is_kubernetes:<EOL><INDENT>self.upgrade_on_kubernetes()<EOL><DEDENT>elif self.is_docker_compose:<EOL><INDENT>self.upgrade_on_docker_compose()<EOL><DEDENT>elif self.is_docker:<EOL><INDENT>self.upgrade_on_docker()<EOL><DEDENT>elif self.is_heroku:<EOL><INDENT>self.upgrade_on_heroku()<EOL><DEDENT>", "docstring": "Upgrade deployment.", "id": "f1019:c0:m23"}
{"signature": "def install(self):", "body": "if not self.is_valid:<EOL><INDENT>raise PolyaxonDeploymentConfigError(<EOL>'<STR_LIT>'.format(self.deployment_type))<EOL><DEDENT>if self.is_kubernetes:<EOL><INDENT>self.install_on_kubernetes()<EOL><DEDENT>elif self.is_docker_compose:<EOL><INDENT>self.install_on_docker_compose()<EOL><DEDENT>elif self.is_docker:<EOL><INDENT>self.install_on_docker()<EOL><DEDENT>elif self.is_heroku:<EOL><INDENT>self.install_on_heroku()<EOL><DEDENT>", "docstring": "Install polyaxon using the current config to the correct platform.", "id": "f1019:c0:m18"}
{"signature": "def teardown(self, hooks=True):", "body": "if not self.is_valid:<EOL><INDENT>raise PolyaxonDeploymentConfigError(<EOL>'<STR_LIT>'.format(self.deployment_type))<EOL><DEDENT>if self.is_kubernetes:<EOL><INDENT>self.teardown_on_kubernetes(hooks=hooks)<EOL><DEDENT>elif self.is_docker_compose:<EOL><INDENT>self.teardown_on_docker_compose()<EOL><DEDENT>elif self.is_docker:<EOL><INDENT>self.teardown_on_docker(hooks=hooks)<EOL><DEDENT>elif self.is_heroku:<EOL><INDENT>self.teardown_on_heroku(hooks=hooks)<EOL><DEDENT>", "docstring": "Teardown Polyaxon.", "id": "f1019:c0:m28"}
{"signature": "@classmethod<EOL><INDENT>def find_matching(cls, path, patterns):<DEDENT>", "body": "for pattern in patterns:<EOL><INDENT>if pattern.match(path):<EOL><INDENT>yield pattern<EOL><DEDENT><DEDENT>", "docstring": "Yield all matching patterns for path.", "id": "f1025:c1:m3"}
{"signature": "@staticmethod<EOL><INDENT>def _matches_patterns(path, patterns):<DEDENT>", "body": "for glob in patterns:<EOL><INDENT>try:<EOL><INDENT>if PurePath(path).match(glob):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>except TypeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Given a list of patterns, returns a if a path matches any pattern.", "id": "f1025:c1:m9"}
{"signature": "@classmethod<EOL><INDENT>def _ignore_path(cls, path, ignore_list=None, white_list=None):<DEDENT>", "body": "ignore_list = ignore_list or []<EOL>white_list = white_list or []<EOL>return (cls._matches_patterns(path, ignore_list) and<EOL>not cls._matches_patterns(path, white_list))<EOL>", "docstring": "Returns a whether a path should be ignored or not.", "id": "f1025:c1:m10"}
{"signature": "@click.group()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, default=False, help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def cli(context, verbose):", "body": "configure_logger(verbose or GlobalConfigManager.get_value('<STR_LIT>'))<EOL>non_check_cmds = ['<STR_LIT>', '<STR_LIT:version>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>if context.invoked_subcommand not in non_check_cmds:<EOL><INDENT>check_cli_version()<EOL><DEDENT>", "docstring": "Polyaxon CLI tool to:\n\n        * Parse, Validate, and Check Polyaxonfiles.\n\n        * Interact with Polyaxon server.\n\n        * Run and Monitor experiments.\n\n    Check the help available for each command listed below.", "id": "f1033:m0"}
{"signature": "def pprint(value):", "body": "click.echo(<EOL>json.dumps(value,<EOL>sort_keys=True,<EOL>indent=<NUM_LIT:4>,<EOL>separators=('<STR_LIT:U+002C>', '<STR_LIT>')))<EOL>", "docstring": "Prints as formatted JSON", "id": "f1039:m3"}
{"signature": "@config.command()<EOL>@click.option('<STR_LIT>', type=bool, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=int, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=int, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=bool, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=bool,<EOL>help='<STR_LIT>')<EOL>@clean_outputs<EOL>def set(verbose,  <EOL>host,<EOL>http_port,<EOL>ws_port,<EOL>use_https,<EOL>verify_ssl):", "body": "_config = GlobalConfigManager.get_config_or_default()<EOL>if verbose is not None:<EOL><INDENT>_config.verbose = verbose<EOL><DEDENT>if host is not None:<EOL><INDENT>_config.host = host<EOL><DEDENT>if http_port is not None:<EOL><INDENT>_config.http_port = http_port<EOL><DEDENT>if ws_port is not None:<EOL><INDENT>_config.ws_port = ws_port<EOL><DEDENT>if use_https is not None:<EOL><INDENT>_config.use_https = use_https<EOL><DEDENT>if verify_ssl is False:<EOL><INDENT>_config.verify_ssl = verify_ssl<EOL><DEDENT>GlobalConfigManager.set_config(_config)<EOL>Printer.print_success('<STR_LIT>')<EOL>CliConfigManager.purge()<EOL>", "docstring": "Set the global config values.\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon config set --hots=localhost http_port=80\n    ```", "id": "f1040:m3"}
{"signature": "@click.group(invoke_without_command=True)<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>@clean_outputs<EOL>def config(list):  ", "body": "if list:<EOL><INDENT>_config = GlobalConfigManager.get_config_or_default()<EOL>Printer.print_header('<STR_LIT>')<EOL>dict_tabulate(_config.to_dict())<EOL><DEDENT>", "docstring": "Set and get the global configurations.", "id": "f1040:m1"}
{"signature": "@config.command()<EOL>@click.argument('<STR_LIT>', type=str, nargs=-<NUM_LIT:1>)<EOL>@clean_outputs<EOL>def get(keys):", "body": "_config = GlobalConfigManager.get_config_or_default()<EOL>if not keys:<EOL><INDENT>return<EOL><DEDENT>print_values = {}<EOL>for key in keys:<EOL><INDENT>if hasattr(_config, key):<EOL><INDENT>print_values[key] = getattr(_config, key)<EOL><DEDENT>else:<EOL><INDENT>click.echo('<STR_LIT>'.format(key))<EOL><DEDENT><DEDENT>dict_tabulate(print_values, )<EOL>", "docstring": "Get the global config values by keys.\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon config get host http_port\n    ```", "id": "f1040:m2"}
{"signature": "@click.command()<EOL>@clean_outputs<EOL>def upgrade():", "body": "try:<EOL><INDENT>pip_upgrade(PROJECT_CLI_NAME)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.error(e)<EOL><DEDENT>", "docstring": "Install/Upgrade polyaxon-cli.", "id": "f1042:m8"}
{"signature": "@admin.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.Path(exists=True),<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=click.Path(exists=True),<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>@clean_outputs<EOL>def deploy(file, manager_path, check, dry_run):  ", "body": "config = read_deployment_config(file)<EOL>manager = DeployManager(config=config,<EOL>filepath=file,<EOL>manager_path=manager_path,<EOL>dry_run=dry_run)<EOL>exception = None<EOL>if check:<EOL><INDENT>manager.check()<EOL>Printer.print_success('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>manager.install()<EOL><DEDENT>except Exception as e:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>exception = e<EOL><DEDENT><DEDENT>if exception:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(exception))<EOL><DEDENT>", "docstring": "Deploy polyaxon.", "id": "f1043:m2"}
{"signature": "@click.group()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=str, help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=int, help=\"<STR_LIT>\")<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def build(ctx, project, build):  ", "body": "ctx.obj = ctx.obj or {}<EOL>ctx.obj['<STR_LIT>'] = project<EOL>ctx.obj['<STR_LIT>'] = build<EOL>", "docstring": "Commands for build jobs.", "id": "f1045:m1"}
{"signature": "@build.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def unbookmark(ctx):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>PolyaxonClient().build_job.unbookmark(user, project_name, _build)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_build))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>", "docstring": "Unbookmark build job.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon build unbookmark\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon build -b 2 unbookmark\n    ```", "id": "f1045:m7"}
{"signature": "@build.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def bookmark(ctx):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>PolyaxonClient().build_job.bookmark(user, project_name, _build)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_build))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>", "docstring": "Bookmark build job.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon build bookmark\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon build -b 2 bookmark\n    ```", "id": "f1045:m6"}
{"signature": "@build.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def resources(ctx, gpu):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>message_handler = Printer.gpu_resources if gpu else Printer.resources<EOL>PolyaxonClient().build_job.resources(user,<EOL>project_name,<EOL>_build,<EOL>message_handler=message_handler)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_build))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Get build job resources.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon build -b 2 resources\n    ```\n\n    For GPU resources\n\n    \\b\n    ```bash\n    $ polyaxon build -b 2 resources --gpu\n    ```", "id": "f1045:m9"}
{"signature": "@build.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def stop(ctx, yes):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>if not yes and not click.confirm(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(_build)):<EOL><INDENT>click.echo('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>try:<EOL><INDENT>PolyaxonClient().build_job.stop(user, project_name, _build)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_build))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>", "docstring": "Stop build job.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon build stop\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon build -b 2 stop\n    ```", "id": "f1045:m5"}
{"signature": "@build.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def delete(ctx):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>if not click.confirm(\"<STR_LIT>\".format(_build)):<EOL><INDENT>click.echo('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>try:<EOL><INDENT>response = PolyaxonClient().build_job.delete_build(<EOL>user, project_name, _build)<EOL>BuildJobManager.purge()<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_build))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if response.status_code == <NUM_LIT>:<EOL><INDENT>Printer.print_success(\"<STR_LIT>\".format(_build))<EOL><DEDENT>", "docstring": "Delete build job.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon build delete\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon build -b 2 delete\n    ```", "id": "f1045:m3"}
{"signature": "@build.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def logs(ctx, past, follow, hide_time):", "body": "user, project_name, _build = get_build_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>if past:<EOL><INDENT>try:<EOL><INDENT>response = PolyaxonClient().build_job.logs(<EOL>user, project_name, _build, stream=False)<EOL>get_logs_handler(handle_job_info=False,<EOL>show_timestamp=not hide_time,<EOL>stream=False)(response.content.decode().split('<STR_LIT:\\n>'))<EOL>print()<EOL>if not follow:<EOL><INDENT>return<EOL><DEDENT><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>if not follow:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_build))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>try:<EOL><INDENT>PolyaxonClient().build_job.logs(<EOL>user,<EOL>project_name,<EOL>_build,<EOL>message_handler=get_logs_handler(handle_job_info=False, show_timestamp=not hide_time))<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_build))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Get build logs.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon build -b 2 logs\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon build logs\n    ```", "id": "f1045:m10"}
{"signature": "@click.group()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=str, help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=int, help=\"<STR_LIT>\")<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def job(ctx, project, job):  ", "body": "ctx.obj = ctx.obj or {}<EOL>ctx.obj['<STR_LIT>'] = project<EOL>ctx.obj['<STR_LIT>'] = job<EOL>", "docstring": "Commands for jobs.", "id": "f1046:m1"}
{"signature": "@job.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def get(ctx):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>response = PolyaxonClient().job.get_job(user, project_name, _job)<EOL>cache.cache(config_manager=JobManager, response=response)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_job))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>get_job_details(response)<EOL>", "docstring": "Get job.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon job --job=1 get\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon job --job=1 --project=project_name get\n    ```", "id": "f1046:m2"}
{"signature": "@job.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def bookmark(ctx):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>PolyaxonClient().job.bookmark(user, project_name, _job)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_job))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>", "docstring": "Bookmark job.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon job bookmark\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon job -xp 2 bookmark\n    ```", "id": "f1046:m12"}
{"signature": "@job.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def resources(ctx, gpu):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>message_handler = Printer.gpu_resources if gpu else Printer.resources<EOL>PolyaxonClient().job.resources(user,<EOL>project_name,<EOL>_job,<EOL>message_handler=message_handler)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_job))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Get job resources.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon job -j 2 resources\n    ```\n\n    For GPU resources\n\n    \\b\n    ```bash\n    $ polyaxon job -j 2 resources --gpu\n    ```", "id": "f1046:m9"}
{"signature": "@job.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def unbookmark(ctx):", "body": "user, project_name, _job = get_job_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>PolyaxonClient().job.unbookmark(user, project_name, _job)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_job))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>", "docstring": "Unbookmark job.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon job unbookmark\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon job -xp 2 unbookmark\n    ```", "id": "f1046:m13"}
{"signature": "@job.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT:-c>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, type=click.Path(exists=True),<EOL>help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def restart(ctx, copy, file, u):  ", "body": "config = None<EOL>update_code = None<EOL>if file:<EOL><INDENT>config = rhea.read(file)<EOL><DEDENT>if u:<EOL><INDENT>ctx.invoke(upload, sync=False)<EOL>update_code = True<EOL><DEDENT>user, project_name, _job = get_job_or_local(ctx.obj.get('<STR_LIT>'), ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>if copy:<EOL><INDENT>response = PolyaxonClient().job.copy(<EOL>user, project_name, _job, config=config, update_code=update_code)<EOL><DEDENT>else:<EOL><INDENT>response = PolyaxonClient().job.restart(<EOL>user, project_name, _job, config=config, update_code=update_code)<EOL><DEDENT><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_job))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>get_job_details(response)<EOL>", "docstring": "Restart job.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon job --job=1 restart\n    ```", "id": "f1046:m6"}
{"signature": "@click.command()<EOL>@clean_outputs<EOL>def whoami():", "body": "try:<EOL><INDENT>user = PolyaxonClient().auth.get_user()<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>click.echo(\"<STR_LIT>\".format(**user.to_dict()))<EOL>", "docstring": "Show current logged Polyaxon user.", "id": "f1053:m2"}
{"signature": "@tensorboard.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, type=click.Path(exists=True),<EOL>help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def start(ctx, file):  ", "body": "specification = None<EOL>job_config = None<EOL>if file:<EOL><INDENT>specification = check_polyaxonfile(file, log=False).specification<EOL><DEDENT>if specification:<EOL><INDENT>check_polyaxonfile_kind(specification=specification, kind=specification._TENSORBOARD)<EOL>job_config = specification.parsed_data<EOL><DEDENT>user, project_name = get_project_or_local(ctx.obj.get('<STR_LIT>'))<EOL>group = ctx.obj.get('<STR_LIT>')<EOL>experiment = ctx.obj.get('<STR_LIT>')<EOL>if experiment:<EOL><INDENT>try:<EOL><INDENT>response = PolyaxonClient().experiment.start_tensorboard(<EOL>username=user,<EOL>project_name=project_name,<EOL>experiment_id=experiment,<EOL>job_config=job_config)<EOL>obj = '<STR_LIT>'.format(experiment)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(experiment))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>elif group:<EOL><INDENT>try:<EOL><INDENT>response = PolyaxonClient().experiment_group.start_tensorboard(<EOL>username=user,<EOL>project_name=project_name,<EOL>group_id=group,<EOL>job_config=job_config)<EOL>obj = '<STR_LIT>'.format(group)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(group))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>response = PolyaxonClient().project.start_tensorboard(<EOL>username=user,<EOL>project_name=project_name,<EOL>job_config=job_config)<EOL>obj = '<STR_LIT>'.format(project_name)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(project_name))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>Printer.print_header(\"<STR_LIT>\".format(obj))<EOL>click.echo(get_tensorboard_url(user=user,<EOL>project_name=project_name,<EOL>experiment=experiment,<EOL>group=group))<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>if response.status_code != <NUM_LIT>:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success('<STR_LIT>'.format(obj))<EOL>clint.textui.puts(\"<STR_LIT>\")<EOL>clint.textui.puts(\"<STR_LIT>\")<EOL>with clint.textui.indent(<NUM_LIT:4>):<EOL><INDENT>clint.textui.puts(get_tensorboard_url(user, project_name, experiment, group))<EOL><DEDENT>", "docstring": "Start a tensorboard deployment for project/experiment/experiment group.\n\n    Project tensorboard will aggregate all experiments under the project.\n\n    Experiment group tensorboard will aggregate all experiments under the group.\n\n    Experiment tensorboard will show all metrics for an experiment.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Example: using the default tensorflow image 1.4.1.\n\n    \\b\n    ```bash\n    $ polyaxon tensorboard start\n    ```\n\n    Example: with custom image and resources\n\n    \\b\n    ```bash\n    $ polyaxon tensorboard start -f file -f file_override ...\n    ```\n\n    Example: starting a tensorboard for an experiment group\n\n    \\b\n    ```bash\n    $ polyaxon tensorboard -g 1 start -f file\n    ```\n\n    Example: starting a tensorboard for an experiment\n\n    \\b\n    ```bash\n    $ polyaxon tensorboard -xp 112 start -f file\n    ```", "id": "f1055:m3"}
{"signature": "@click.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=str)<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, type=click.Path(exists=True),<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=int,<EOL>help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def run(ctx, project, file, name, tags, description, ttl, u, l):  ", "body": "if not file:<EOL><INDENT>file = PolyaxonFile.check_default_path(path='<STR_LIT:.>')<EOL><DEDENT>if not file:<EOL><INDENT>file = '<STR_LIT>'<EOL><DEDENT>specification = check_polyaxonfile(file, log=False).specification<EOL>spec_cond = (specification.is_experiment or<EOL>specification.is_group or<EOL>specification.is_job or<EOL>specification.is_build)<EOL>if not spec_cond:<EOL><INDENT>Printer.print_error(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(specification.kind))<EOL>if specification.is_notebook:<EOL><INDENT>click.echo('<STR_LIT>')<EOL><DEDENT>elif specification.is_tensorboard:<EOL><INDENT>click.echo('<STR_LIT>')<EOL><DEDENT>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if u:<EOL><INDENT>if project:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>click.echo('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>ctx.invoke(upload, sync=False)<EOL><DEDENT>user, project_name = get_project_or_local(project)<EOL>project_client = PolyaxonClient().project<EOL>tags = validate_tags(tags)<EOL>def run_experiment():<EOL><INDENT>click.echo('<STR_LIT>')<EOL>experiment = ExperimentConfig(<EOL>name=name,<EOL>description=description,<EOL>tags=tags,<EOL>config=specification.parsed_data,<EOL>ttl=ttl)<EOL>try:<EOL><INDENT>response = PolyaxonClient().project.create_experiment(user,<EOL>project_name,<EOL>experiment)<EOL>cache.cache(config_manager=ExperimentManager, response=response)<EOL>Printer.print_success('<STR_LIT>'.format(response.id))<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>def run_group():<EOL><INDENT>click.echo('<STR_LIT>')<EOL>experiments_def = specification.experiments_def<EOL>get_group_experiments_info(**experiments_def)<EOL>experiment_group = ExperimentGroupConfig(<EOL>name=name,<EOL>description=description,<EOL>tags=tags,<EOL>content=specification._data)  <EOL>try:<EOL><INDENT>response = project_client.create_experiment_group(user,<EOL>project_name,<EOL>experiment_group)<EOL>cache.cache(config_manager=GroupManager, response=response)<EOL>Printer.print_success('<STR_LIT>'.format(response.id))<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>def run_job():<EOL><INDENT>click.echo('<STR_LIT>')<EOL>job = JobConfig(<EOL>name=name,<EOL>description=description,<EOL>tags=tags,<EOL>config=specification.parsed_data,<EOL>ttl=ttl)<EOL>try:<EOL><INDENT>response = project_client.create_job(user,<EOL>project_name,<EOL>job)<EOL>cache.cache(config_manager=JobManager, response=response)<EOL>Printer.print_success('<STR_LIT>'.format(response.id))<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>def run_build():<EOL><INDENT>click.echo('<STR_LIT>')<EOL>job = JobConfig(<EOL>name=name,<EOL>description=description,<EOL>tags=tags,<EOL>config=specification.parsed_data,<EOL>ttl=ttl)<EOL>try:<EOL><INDENT>response = project_client.create_build(user,<EOL>project_name,<EOL>job)<EOL>cache.cache(config_manager=BuildJobManager, response=response)<EOL>Printer.print_success('<STR_LIT>'.format(response.id))<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>logs = None<EOL>if specification.is_experiment:<EOL><INDENT>run_experiment()<EOL>logs = experiment_logs<EOL><DEDENT>elif specification.is_group:<EOL><INDENT>run_group()<EOL><DEDENT>elif specification.is_job:<EOL><INDENT>run_job()<EOL>logs = job_logs<EOL><DEDENT>elif specification.is_build:<EOL><INDENT>run_build()<EOL>logs = build_logs<EOL><DEDENT>if l and logs:<EOL><INDENT>ctx.obj = {'<STR_LIT>': '<STR_LIT>'.format(user, project_name)}<EOL>ctx.invoke(logs)<EOL><DEDENT>", "docstring": "Run polyaxonfile specification.\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon run -f file -f file_override ...\n    ```\n\n    Upload before running\n\n    \\b\n    ```bash\n    $ polyaxon run -f file -u\n    ```\n\n    Run and set description and tags for this run\n\n    \\b\n    ```bash\n    $ polyaxon run -f file -u --description=\"Description of the current run\" --tags=\"foo, bar, moo\"\n    ```\n    Run and set a unique name for this run\n\n    \\b\n    ```bash\n    polyaxon run --name=foo\n    ```\n\n    Run for a specific project\n\n    \\b\n    ```bash\n    $ polyaxon run -p project1 -f file.yaml\n    ```", "id": "f1057:m0"}
{"signature": "@user.command()<EOL>@click.argument('<STR_LIT:username>', type=str)<EOL>@clean_outputs<EOL>def delete(username):", "body": "try:<EOL><INDENT>PolyaxonClient().user.delete_user(username)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(username))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\".format(username))<EOL>", "docstring": "Delete a user.\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon user delete david\n    ```", "id": "f1058:m2"}
{"signature": "@click.group()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=str, help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=int, help=\"<STR_LIT>\")<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def experiment(ctx, project, experiment):  ", "body": "ctx.obj = ctx.obj or {}<EOL>ctx.obj['<STR_LIT>'] = project<EOL>ctx.obj['<STR_LIT>'] = experiment<EOL>", "docstring": "Commands for experiments.", "id": "f1059:m1"}
{"signature": "@experiment.command()<EOL>@click.option('<STR_LIT>', type=str,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str, help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def update(ctx, name, description, tags):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>update_dict = {}<EOL>if name:<EOL><INDENT>update_dict['<STR_LIT:name>'] = name<EOL><DEDENT>if description:<EOL><INDENT>update_dict['<STR_LIT:description>'] = description<EOL><DEDENT>tags = validate_tags(tags)<EOL>if tags:<EOL><INDENT>update_dict['<STR_LIT>'] = tags<EOL><DEDENT>if not update_dict:<EOL><INDENT>Printer.print_warning('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>try:<EOL><INDENT>response = PolyaxonClient().experiment.update_experiment(<EOL>user, project_name, _experiment, update_dict)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_experiment))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>get_experiment_details(response)<EOL>", "docstring": "Update experiment.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon experiment -xp 2 update --description=\"new description for my experiments\"\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon experiment -xp 2 update --tags=\"foo, bar\" --name=\"unique-name\"\n    ```", "id": "f1059:m4"}
{"signature": "@experiment.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def outputs(ctx):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>PolyaxonClient().experiment.download_outputs(user, project_name, _experiment)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_experiment))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success('<STR_LIT>')<EOL>", "docstring": "Download outputs for experiment.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon experiment -xp 1 outputs\n    ```", "id": "f1059:m12"}
{"signature": "@experiment.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def stop(ctx, yes):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>if not yes and not click.confirm(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(_experiment)):<EOL><INDENT>click.echo('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>try:<EOL><INDENT>PolyaxonClient().experiment.stop(user, project_name, _experiment)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_experiment))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>", "docstring": "Stop experiment.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon experiment stop\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon experiment -xp 2 stop\n    ```", "id": "f1059:m5"}
{"signature": "@experiment.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def unbookmark(ctx):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>PolyaxonClient().experiment.unbookmark(user, project_name, _experiment)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_experiment))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>", "docstring": "Unbookmark experiment.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon experiment unbookmark\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon experiment -xp 2 unbookmark\n    ```", "id": "f1059:m14"}
{"signature": "@experiment.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=int, help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def logs(ctx, job, past, follow, hide_time):", "body": "def get_experiment_logs():<EOL><INDENT>if past:<EOL><INDENT>try:<EOL><INDENT>response = PolyaxonClient().experiment.logs(<EOL>user, project_name, _experiment, stream=False)<EOL>get_logs_handler(handle_job_info=True,<EOL>show_timestamp=not hide_time,<EOL>stream=False)(response.content.decode().split('<STR_LIT:\\n>'))<EOL>print()<EOL>if not follow:<EOL><INDENT>return<EOL><DEDENT><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>if not follow:<EOL><INDENT>Printer.print_error(<EOL>'<STR_LIT>'.format(_experiment))<EOL>Printer.print_error(<EOL>'<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>try:<EOL><INDENT>PolyaxonClient().experiment.logs(<EOL>user,<EOL>project_name,<EOL>_experiment,<EOL>message_handler=get_logs_handler(handle_job_info=True,<EOL>show_timestamp=not hide_time))<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_experiment))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>def get_experiment_job_logs():<EOL><INDENT>if past:<EOL><INDENT>try:<EOL><INDENT>response = PolyaxonClient().experiment_job.logs(<EOL>user,<EOL>project_name,<EOL>_experiment,<EOL>_job,<EOL>stream=False)<EOL>get_logs_handler(handle_job_info=True,<EOL>show_timestamp=not hide_time,<EOL>stream=False)(response.content.decode().split('<STR_LIT:\\n>'))<EOL>print()<EOL>if not follow:<EOL><INDENT>return<EOL><DEDENT><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>if not follow:<EOL><INDENT>Printer.print_error(<EOL>'<STR_LIT>'.format(_experiment))<EOL>Printer.print_error(<EOL>'<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>try:<EOL><INDENT>PolyaxonClient().experiment_job.logs(<EOL>user,<EOL>project_name,<EOL>_experiment,<EOL>_job,<EOL>message_handler=get_logs_handler(handle_job_info=True,<EOL>show_timestamp=not hide_time))<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_job))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>if job:<EOL><INDENT>_job = get_experiment_job_or_local(job)<EOL>get_experiment_job_logs()<EOL><DEDENT>else:<EOL><INDENT>get_experiment_logs()<EOL><DEDENT>", "docstring": "Get experiment or experiment job logs.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples for getting experiment logs:\n\n    \\b\n    ```bash\n    $ polyaxon experiment logs\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon experiment -xp 10 -p mnist logs\n    ```\n\n    Examples for getting experiment job logs:\n\n    \\b\n    ```bash\n    $ polyaxon experiment -xp 1 -j 1 logs\n    ```", "id": "f1059:m11"}
{"signature": "@experiment.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT:-c>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, type=click.Path(exists=True),<EOL>help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def restart(ctx, copy, file, u):  ", "body": "config = None<EOL>update_code = None<EOL>if file:<EOL><INDENT>config = rhea.read(file)<EOL><DEDENT>if u:<EOL><INDENT>ctx.invoke(upload, sync=False)<EOL>update_code = True<EOL><DEDENT>user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>if copy:<EOL><INDENT>response = PolyaxonClient().experiment.copy(<EOL>user, project_name, _experiment, config=config, update_code=update_code)<EOL>Printer.print_success('<STR_LIT>'.format(response.id))<EOL><DEDENT>else:<EOL><INDENT>response = PolyaxonClient().experiment.restart(<EOL>user, project_name, _experiment, config=config, update_code=update_code)<EOL>Printer.print_success('<STR_LIT>'.format(response.id))<EOL><DEDENT><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_experiment))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Restart experiment.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon experiment --experiment=1 restart\n    ```", "id": "f1059:m6"}
{"signature": "@experiment.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def delete(ctx):", "body": "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>if not click.confirm(\"<STR_LIT>\".format(_experiment)):<EOL><INDENT>click.echo('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>try:<EOL><INDENT>response = PolyaxonClient().experiment.delete_experiment(<EOL>user, project_name, _experiment)<EOL>ExperimentManager.purge()<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_experiment))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if response.status_code == <NUM_LIT>:<EOL><INDENT>Printer.print_success(\"<STR_LIT>\".format(_experiment))<EOL><DEDENT>", "docstring": "Delete experiment.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon experiment delete\n    ```", "id": "f1059:m3"}
{"signature": "@notebook.command()<EOL>@click.option('<STR_LIT>', type=bool,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def stop(ctx, commit, yes):", "body": "user, project_name = get_project_or_local(ctx.obj.get('<STR_LIT>'))<EOL>if not yes and not click.confirm(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(user, project_name)):<EOL><INDENT>click.echo('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if commit is None:<EOL><INDENT>commit = True<EOL><DEDENT>try:<EOL><INDENT>PolyaxonClient().project.stop_notebook(user, project_name, commit)<EOL>Printer.print_success('<STR_LIT>')<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(project_name))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Stops the notebook deployment for this project if it exists.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1060:m4"}
{"signature": "@notebook.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def url(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>response = PolyaxonClient().project.get_project(user, project_name)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(project_name))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if response.has_notebook:<EOL><INDENT>click.echo(get_notebook_url(user, project_name))<EOL><DEDENT>else:<EOL><INDENT>Printer.print_warning(<EOL>'<STR_LIT>'.format(project_name))<EOL>click.echo('<STR_LIT>')<EOL><DEDENT>", "docstring": "Prints the notebook url for this project.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon notebook url\n    ```", "id": "f1060:m2"}
{"signature": "@notebook.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, type=click.Path(exists=True),<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def start(ctx, file, u):  ", "body": "specification = None<EOL>job_config = None<EOL>if file:<EOL><INDENT>specification = check_polyaxonfile(file, log=False).specification<EOL><DEDENT>if u:<EOL><INDENT>ctx.invoke(upload, sync=False)<EOL><DEDENT>if specification:<EOL><INDENT>check_polyaxonfile_kind(specification=specification, kind=specification._NOTEBOOK)<EOL>job_config = specification.parsed_data<EOL><DEDENT>user, project_name = get_project_or_local(ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>response = PolyaxonClient().project.start_notebook(user, project_name, job_config)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(project_name))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>Printer.print_header(\"<STR_LIT>\")<EOL>click.echo(get_notebook_url(user, project_name))<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>if response.status_code != <NUM_LIT>:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success('<STR_LIT>'.format(project_name))<EOL>clint.textui.puts(\"<STR_LIT>\")<EOL>clint.textui.puts(\"<STR_LIT>\")<EOL>with clint.textui.indent(<NUM_LIT:4>):<EOL><INDENT>clint.textui.puts(get_notebook_url(user, project_name))<EOL><DEDENT>", "docstring": "Start a notebook deployment for this project.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon notebook start -f file -f file_override ...\n    ```\n\n    Example: upload before running\n\n    \\b\n    ```bash\n    $ polyaxon -p user12/mnist notebook start -f file -u\n    ```", "id": "f1060:m3"}
{"signature": "@project.command()<EOL>@click.option('<STR_LIT>', type=int, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=str,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=str, help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def tensorboards(ctx, query, sort, page):", "body": "user, project_name = get_project_or_local(ctx.obj.get('<STR_LIT>'))<EOL>page = page or <NUM_LIT:1><EOL>try:<EOL><INDENT>response = PolyaxonClient().project.list_tensorboards(username=user,<EOL>project_name=project_name,<EOL>query=query,<EOL>sort=sort,<EOL>page=page)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(project_name))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>meta = get_meta_response(response)<EOL>if meta:<EOL><INDENT>Printer.print_header('<STR_LIT>'.format(user, project_name))<EOL>Printer.print_header('<STR_LIT>')<EOL>dict_tabulate(meta)<EOL><DEDENT>else:<EOL><INDENT>Printer.print_header('<STR_LIT>'.format(user,<EOL>project_name))<EOL><DEDENT>objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))<EOL>for o in response['<STR_LIT>']]<EOL>objects = list_dicts_to_tabulate(objects)<EOL>if objects:<EOL><INDENT>Printer.print_header(\"<STR_LIT>\")<EOL>objects.pop('<STR_LIT>', None)<EOL>dict_tabulate(objects, is_list_dict=True)<EOL><DEDENT>", "docstring": "List tensorboard jobs for this project.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1061:m11"}
{"signature": "@project.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def unbookmark(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>PolyaxonClient().project.unbookmark(user, project_name)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(user, project_name))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\".format(user, project_name))<EOL>", "docstring": "Unbookmark project.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1061:m16"}
{"signature": "@project.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def get(ctx):", "body": "user, project_name = get_project_or_local(ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>response = PolyaxonClient().project.get_project(user, project_name)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(project_name))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>get_project_details(response)<EOL>", "docstring": "Get info for current project, by project_name, or user/project_name.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    To get current project:\n\n    \\b\n    ```bash\n    $ polyaxon project get\n    ```\n\n    To get a project by name\n\n    \\b\n    ```bash\n    $ polyaxon project get user/project\n    ```", "id": "f1061:m4"}
{"signature": "@project.command()<EOL>@click.option('<STR_LIT>', required=True, type=str,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def create(ctx, name, description, tags, private, init):", "body": "try:<EOL><INDENT>tags = tags.split('<STR_LIT:U+002C>') if tags else None<EOL>project_dict = dict(name=name, description=description, is_public=not private, tags=tags)<EOL>project_config = ProjectConfig.from_dict(project_dict)<EOL><DEDENT>except ValidationError:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>try:<EOL><INDENT>_project = PolyaxonClient().project.create_project(project_config)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(name))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\".format(_project.name))<EOL>if init:<EOL><INDENT>ctx.obj = {}<EOL>ctx.invoke(init_project, project=name)<EOL><DEDENT>", "docstring": "Create a new project.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon project create --name=cats-vs-dogs --description=\"Image Classification with DL\"\n    ```", "id": "f1061:m2"}
{"signature": "@bookmark.command()<EOL>@click.option('<STR_LIT>', type=int, help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def builds(ctx, page):", "body": "user = get_username_or_local(ctx.obj.get('<STR_LIT:username>'))<EOL>page = page or <NUM_LIT:1><EOL>try:<EOL><INDENT>response = PolyaxonClient().bookmark.builds(username=user, page=page)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error(<EOL>'<STR_LIT>'.format(user))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>meta = get_meta_response(response)<EOL>if meta:<EOL><INDENT>Printer.print_header('<STR_LIT>'.format(user))<EOL>Printer.print_header('<STR_LIT>')<EOL>dict_tabulate(meta)<EOL><DEDENT>else:<EOL><INDENT>Printer.print_header('<STR_LIT>'.format(user))<EOL><DEDENT>objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))<EOL>for o in response['<STR_LIT>']]<EOL>objects = list_dicts_to_tabulate(objects)<EOL>if objects:<EOL><INDENT>Printer.print_header(\"<STR_LIT>\")<EOL>dict_tabulate(objects, is_list_dict=True)<EOL><DEDENT>", "docstring": "List bookmarked builds for user.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon bookmark builds\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon bookmark -u adam builds\n    ```", "id": "f1062:m5"}
{"signature": "@click.group()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=str)<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def bookmark(ctx, username):  ", "body": "ctx.obj = ctx.obj or {}<EOL>ctx.obj['<STR_LIT:username>'] = username<EOL>", "docstring": "Commands for bookmarks.", "id": "f1062:m0"}
{"signature": "@bookmark.command()<EOL>@click.option('<STR_LIT>', type=int, help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def groups(ctx, page):", "body": "user = get_username_or_local(ctx.obj.get('<STR_LIT:username>'))<EOL>page = page or <NUM_LIT:1><EOL>try:<EOL><INDENT>response = PolyaxonClient().bookmark.groups(username=user, page=page)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error(<EOL>'<STR_LIT>'.format(user))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>meta = get_meta_response(response)<EOL>if meta:<EOL><INDENT>Printer.print_header('<STR_LIT>'.format(user))<EOL>Printer.print_header('<STR_LIT>')<EOL>dict_tabulate(meta)<EOL><DEDENT>else:<EOL><INDENT>Printer.print_header('<STR_LIT>'.format(user))<EOL><DEDENT>objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))<EOL>for o in response['<STR_LIT>']]<EOL>objects = list_dicts_to_tabulate(objects)<EOL>if objects:<EOL><INDENT>Printer.print_header(\"<STR_LIT>\")<EOL>dict_tabulate(objects, is_list_dict=True)<EOL><DEDENT>", "docstring": "List bookmarked experiment groups for user.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon bookmark groups\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon bookmark -u adam groups\n    ```", "id": "f1062:m2"}
{"signature": "@group.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def get(ctx):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>response = PolyaxonClient().experiment_group.get_experiment_group(<EOL>user, project_name, _group)<EOL>cache.cache(config_manager=GroupManager, response=response)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_group))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>get_group_details(response)<EOL>", "docstring": "Get experiment group by uuid.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon group -g 13 get\n    ```", "id": "f1064:m2"}
{"signature": "@group.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def stop(ctx, yes, pending):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>if not yes and not click.confirm(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(_group)):<EOL><INDENT>click.echo('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>try:<EOL><INDENT>PolyaxonClient().experiment_group.stop(user, project_name, _group, pending=pending)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_group))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>", "docstring": "Stop experiments in the group.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples: stop only pending experiments\n\n    \\b\n    ```bash\n    $ polyaxon group stop --pending\n    ```\n\n    Examples: stop all unfinished\n\n    \\b\n    ```bash\n    $ polyaxon group stop\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon group -g 2 stop\n    ```", "id": "f1064:m6"}
{"signature": "@group.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def unbookmark(ctx):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>try:<EOL><INDENT>PolyaxonClient().experiment_group.unbookmark(user, project_name, _group)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_group))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>", "docstring": "Unbookmark group.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Examples:\n\n    \\b\n    ```bash\n    $ polyaxon group unbookmark\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon group -g 2 unbookmark\n    ```", "id": "f1064:m9"}
{"signature": "@group.command()<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def delete(ctx):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>if not click.confirm(\"<STR_LIT>\".format(_group)):<EOL><INDENT>click.echo('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>try:<EOL><INDENT>response = PolyaxonClient().experiment_group.delete_experiment_group(<EOL>user, project_name, _group)<EOL>GroupManager.purge()<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_group))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if response.status_code == <NUM_LIT>:<EOL><INDENT>Printer.print_success(\"<STR_LIT>\".format(_group))<EOL><DEDENT>", "docstring": "Delete experiment group.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)", "id": "f1064:m3"}
{"signature": "@group.command()<EOL>@click.option('<STR_LIT>', type=str,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=str, help='<STR_LIT>')<EOL>@click.pass_context<EOL>@clean_outputs<EOL>def update(ctx, name, description, tags):", "body": "user, project_name, _group = get_project_group_or_local(ctx.obj.get('<STR_LIT>'),<EOL>ctx.obj.get('<STR_LIT>'))<EOL>update_dict = {}<EOL>if name:<EOL><INDENT>update_dict['<STR_LIT:name>'] = name<EOL><DEDENT>if description:<EOL><INDENT>update_dict['<STR_LIT:description>'] = description<EOL><DEDENT>tags = validate_tags(tags)<EOL>if tags:<EOL><INDENT>update_dict['<STR_LIT>'] = tags<EOL><DEDENT>if not update_dict:<EOL><INDENT>Printer.print_warning('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>try:<EOL><INDENT>response = PolyaxonClient().experiment_group.update_experiment_group(<EOL>user, project_name, _group, update_dict)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(_group))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(\"<STR_LIT>\")<EOL>get_group_details(response)<EOL>", "docstring": "Update experiment group.\n\n    Uses [Caching](/references/polyaxon-cli/#caching)\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon group -g 2 update --description=\"new description for this group\"\n    ```\n\n    \\b\n    ```bash\n    $ polyaxon update --tags=\"foo, bar\"\n    ```", "id": "f1064:m4"}
{"signature": "@click.group()<EOL>@clean_outputs<EOL>def superuser():", "body": "", "docstring": "Commands for superuser role management.", "id": "f1065:m0"}
{"signature": "@superuser.command()<EOL>@click.argument('<STR_LIT:username>', type=str)<EOL>@clean_outputs<EOL>def grant(username):", "body": "try:<EOL><INDENT>PolyaxonClient().user.grant_superuser(username)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(username))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>Printer.print_success(<EOL>\"<STR_LIT>\".format(username))<EOL>", "docstring": "Grant superuser role to a user.\n\n    Example:\n\n    \\b\n    ```bash\n    $ polyaxon superuser grant david\n    ```", "id": "f1065:m1"}
{"signature": "@click.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=int, help='<STR_LIT>')<EOL>@clean_outputs<EOL>def cluster(node):", "body": "cluster_client = PolyaxonClient().cluster<EOL>if node:<EOL><INDENT>try:<EOL><INDENT>node_config = cluster_client.get_node(node)<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>'.format(node))<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>get_node_info(node_config)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>cluster_config = cluster_client.get_cluster()<EOL><DEDENT>except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:<EOL><INDENT>Printer.print_error('<STR_LIT>')<EOL>Printer.print_error('<STR_LIT>'.format(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>get_cluster_info(cluster_config)<EOL><DEDENT>", "docstring": "Get cluster and nodes info.", "id": "f1066:m2"}
{"signature": "def export(defn):", "body": "globals()[defn.__name__] = defn<EOL>__all__.append(defn.__name__)<EOL>return defn<EOL>", "docstring": "Decorator to explicitly mark functions that are exposed in a lib.", "id": "f1072:m0"}
{"signature": "def __load_linktype__(link_type):", "body": "try:<EOL><INDENT>filep, pathname, description = imp.find_module(link_type, sys.path)<EOL>link_type_module = imp.load_module(link_type, filep, pathname,<EOL>description)<EOL><DEDENT>except ImportError:<EOL><INDENT>return None<EOL><DEDENT>finally:<EOL><INDENT>if filep:<EOL><INDENT>filep.close()<EOL><DEDENT><DEDENT>return link_type_module<EOL>", "docstring": "Given a string for a given module, attempt to load it.", "id": "f1075:m4"}
{"signature": "def clookup(ll_type):", "body": "res = __get_ll_type__(ll_type)<EOL>if res:<EOL><INDENT>return res[<NUM_LIT:3>]<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT>", "docstring": "Given an ll_type, retrieve the linklayer constructor to decode\nthe packets.", "id": "f1075:m3"}
{"signature": "def __get_ll_type__(ll_type):", "body": "res = [llt for llt in __LL_TYPES__<EOL>if llt[<NUM_LIT:1>] == ll_type]<EOL>assert len(res) < <NUM_LIT:2>, '<STR_LIT>'<EOL>if res:<EOL><INDENT>return res[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Given an lltype value, retrieve its definition.", "id": "f1075:m0"}
{"signature": "def slookup(ll_type):", "body": "res = __get_ll_type__(ll_type)<EOL>if res:<EOL><INDENT>return res[<NUM_LIT:2>]<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT>", "docstring": "Given an ll_type, retrieve the short name for the link layer.", "id": "f1075:m2"}
{"signature": "def _generate_packets(file_h, header, layers=<NUM_LIT:0>):", "body": "hdrp = ctypes.pointer(header)<EOL>while True:<EOL><INDENT>pkt = _read_a_packet(file_h, hdrp, layers)<EOL>if pkt:<EOL><INDENT>yield pkt<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Read packets one by one from the capture file. Expects the file\nhandle to point to the location immediately after the header (24\nbytes).", "id": "f1076:m6"}
{"signature": "def init_capfile(self, layers=<NUM_LIT:0>):", "body": "self.capfile = savefile.load_savefile(open('<STR_LIT>', '<STR_LIT:r>'),<EOL>layers=layers)<EOL>", "docstring": "Initialise capture file.", "id": "f1078:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def setUpClass(cls):<DEDENT>", "body": "print('<STR_LIT>')<EOL>", "docstring": "Print a start message when loading the test suite.", "id": "f1081:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def setUpClass(cls):<DEDENT>", "body": "print('<STR_LIT>')<EOL>", "docstring": "Print a start message when loading the test suite.", "id": "f1082:c0:m0"}
{"signature": "def init_capfile(self, layers=<NUM_LIT:0>):", "body": "tfile = create_pcap()<EOL>self.capfile = savefile.load_savefile(tfile, layers=layers)<EOL>tfile.close()<EOL>if os.path.exists(tfile.name):<EOL><INDENT>os.unlink(tfile.name)<EOL><DEDENT>", "docstring": "Initialise the capture file.", "id": "f1083:c0:m0"}
{"signature": "def create_pcap():", "body": "tfile = tempfile.NamedTemporaryFile()<EOL>if sys.version_info[<NUM_LIT:0>] >= <NUM_LIT:3>:  <EOL><INDENT>capture = pickle.loads(base64.b64decode(fixture.TESTPCAP3))<EOL><DEDENT>else:  <EOL><INDENT>capture = pickle.loads(fixture.TESTPCAP2.decode('<STR_LIT>'))<EOL><DEDENT>with open(tfile.name, '<STR_LIT:wb>') as f:<EOL><INDENT>f.write(capture)<EOL><DEDENT>return tfile<EOL>", "docstring": "Create a capture file from the test fixtures.", "id": "f1083:m0"}
{"signature": "def setUp(self):", "body": "<EOL>if not self.capfile:<EOL><INDENT>self.init_capfile()<EOL><DEDENT>", "docstring": "Set up a default capture file.", "id": "f1083:c0:m2"}
{"signature": "def parse_ipv4(address):", "body": "raw = struct.pack('<STR_LIT:I>', address)<EOL>octets = struct.unpack('<STR_LIT>', raw)[::-<NUM_LIT:1>]<EOL>ipv4 = b'<STR_LIT:.>'.join([('<STR_LIT>' % o).encode('<STR_LIT:ascii>') for o in bytearray(octets)])<EOL>return ipv4<EOL>", "docstring": "Given a raw IPv4 address (i.e. as an unsigned integer), return it in\ndotted quad notation.", "id": "f1087:m0"}
{"signature": "def payload_type(ethertype):", "body": "if ethertype == <NUM_LIT>:<EOL><INDENT>from pcapfile.protocols.network.ip import IP<EOL>return (IP, '<STR_LIT>')<EOL>", "docstring": "Returns the appropriate payload constructor based on the supplied\nEtherType.", "id": "f1088:m1"}
{"signature": "def strip_llc(self, idx):", "body": "llc = {}<EOL>snap = <NUM_LIT><EOL>llc_dsap = struct.unpack('<STR_LIT:B>', self._packet[idx:idx + <NUM_LIT:1>])[<NUM_LIT:0>]<EOL>llc['<STR_LIT>'] = llc_dsap >> <NUM_LIT:1><EOL>llc['<STR_LIT>'] = llc_dsap & <NUM_LIT><EOL>idx += <NUM_LIT:1><EOL>llc_ssap = struct.unpack('<STR_LIT:B>', self._packet[idx:idx + <NUM_LIT:1>])[<NUM_LIT:0>]<EOL>llc['<STR_LIT>'] = llc_ssap >> <NUM_LIT:1><EOL>llc['<STR_LIT>'] = llc_ssap & <NUM_LIT><EOL>idx += <NUM_LIT:1><EOL>if llc_dsap == snap and llc_ssap == snap:<EOL><INDENT>llc_control = struct.unpack('<STR_LIT:B>', self._packet[idx:idx + <NUM_LIT:1>])[<NUM_LIT:0>]<EOL>llc['<STR_LIT>'] = llc_control >> <NUM_LIT:2><EOL>llc['<STR_LIT>'] = llc_control & <NUM_LIT><EOL>idx += <NUM_LIT:1><EOL>llc['<STR_LIT>'] = self._packet[idx:idx + <NUM_LIT:3>]<EOL>idx += <NUM_LIT:3><EOL>llc['<STR_LIT:type>'] = self._packet[idx:idx + <NUM_LIT:2>]<EOL>return <NUM_LIT:8>, llc<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:4>, llc<EOL><DEDENT>", "docstring": "strip(4 or 8 byte) logical link control headers\n        :return: int\n            number of processed bytes\n        :return: dict\n            llc information\n        see -> http://www.wildpackets.com/resources/compendium/ethernet/frame_snap_iee8023\n        ABBRVS.\n        ssap: source service access point\n        dsap: destination service access point\n        SNAP(Subnetwork Acess Protocol)", "id": "f1089:c4:m4"}
{"signature": "def strip_dbm_tx_power(self, idx):", "body": "idx = Radiotap.align(idx, <NUM_LIT:1>)<EOL>dbm_tx_power, = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>return idx + <NUM_LIT:1>, dbm_tx_power<EOL>", "docstring": "strip(1 byte) dbm_tx_power\n        :return: int\n            idx\n        :return: int", "id": "f1089:c1:m15"}
{"signature": "def __init__(self, frame, no_rtap=False):", "body": "Wifi.__init__(self, frame, no_rtap)<EOL>self.tagged_params = []<EOL>self._raw_tagged_params = None<EOL>self.timestamp = None<EOL>self.interval = None<EOL>self.fixed_capabils = None<EOL>", "docstring": "Constructor Method\n        :frame: ctypes.Structure\n        :subtype: int", "id": "f1089:c5:m0"}
{"signature": "def strip_rx_flags(self, idx):", "body": "rx_flags = collections.namedtuple('<STR_LIT>', ['<STR_LIT>', '<STR_LIT>'])<EOL>idx = Radiotap.align(idx, <NUM_LIT:2>)<EOL>flags, = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>flag_bits = format(flags, '<STR_LIT>')[::-<NUM_LIT:1>]<EOL>rx_flags.reserved = int(flag_bits[<NUM_LIT:0>])<EOL>rx_flags.badplcp = int(flag_bits[<NUM_LIT:1>])<EOL>return idx + <NUM_LIT:2>, rx_flags<EOL>", "docstring": "strip(2 byte) radiotap.rxflags\n        :idx: int\n        :return: int\n            idx\n        :return: collections.namedtuple", "id": "f1089:c1:m19"}
{"signature": "def strip_mac_addrs(self):", "body": "qos_idx, seq_idx = <NUM_LIT:0>, <NUM_LIT:0><EOL>sa, ta, ra, da, bssid = None, None, None, None, None<EOL>if self.to_ds == <NUM_LIT:1> and self.from_ds == <NUM_LIT:1>:<EOL><INDENT>(ra, ta, da) = struct.unpack('<STR_LIT>', self._packet[<NUM_LIT:4>:<NUM_LIT>])<EOL>sa = struct.unpack('<STR_LIT>', self._packet[<NUM_LIT>:<NUM_LIT:30>])[<NUM_LIT:0>]<EOL>qos_idx = <NUM_LIT:30><EOL>seq_idx = <NUM_LIT><EOL><DEDENT>elif self.to_ds == <NUM_LIT:0> and self.from_ds == <NUM_LIT:1>:<EOL><INDENT>(ra, ta, sa) = struct.unpack('<STR_LIT>', self._packet[<NUM_LIT:4>:<NUM_LIT>])<EOL>qos_idx = <NUM_LIT><EOL>seq_idx = <NUM_LIT><EOL><DEDENT>elif self.to_ds == <NUM_LIT:1> and self.from_ds == <NUM_LIT:0>:<EOL><INDENT>(ra, ta, da) = struct.unpack('<STR_LIT>', self._packet[<NUM_LIT:4>:<NUM_LIT>])<EOL>qos_idx = <NUM_LIT><EOL>seq_idx = <NUM_LIT><EOL><DEDENT>elif self.to_ds == <NUM_LIT:0> and self.from_ds == <NUM_LIT:0>:<EOL><INDENT>(ra, ta, bssid) = struct.unpack('<STR_LIT>', self._packet[<NUM_LIT:4>:<NUM_LIT>])<EOL>qos_idx = <NUM_LIT><EOL>seq_idx = <NUM_LIT><EOL><DEDENT>if ta is not None:<EOL><INDENT>ta = Wifi.get_mac_addr(ta)<EOL><DEDENT>if ra is not None:<EOL><INDENT>ra = Wifi.get_mac_addr(ra)<EOL><DEDENT>if sa is not None:<EOL><INDENT>sa = Wifi.get_mac_addr(sa)<EOL><DEDENT>if da is not None:<EOL><INDENT>da = Wifi.get_mac_addr(da)<EOL><DEDENT>if bssid is not None:<EOL><INDENT>bssid = Wifi.get_mac_addr(bssid)<EOL><DEDENT>return seq_idx, qos_idx, sa, ta, ra, da, bssid<EOL>", "docstring": "strip mac address(each 6 byte) information.\n        (wlan.ta, wlan.ra, wlan.sa, wlan.da)\n        (transmitter, receiver, source, destination)\n        :return: int\n            index of sequence control\n        :return: int\n            index after mac addresses\n        :return: str\n            source address (sa)\n        :return: str\n            transmitter address (ta)\n        :return: str\n            receiver address (ra)\n        :return: str\n            destination address (da)\n        :return: str\n            basic service sed identifier (bssid)", "id": "f1089:c2:m4"}
{"signature": "def __init__(self, rtap_bytes):", "body": "super(Radiotap, self).__init__()<EOL>self._raw = {}   <EOL>self._bits = {}  <EOL>idx = <NUM_LIT:0><EOL>self._rtap = rtap_bytes<EOL>self.vers = Radiotap.strip_vers(self._rtap[idx:idx + <NUM_LIT:1>])<EOL>idx += <NUM_LIT:1><EOL>self.pad = Radiotap.strip_pad(self._rtap[idx:idx + <NUM_LIT:1>])<EOL>idx += <NUM_LIT:1><EOL>self.len = Radiotap.strip_len(self._rtap[idx:idx + <NUM_LIT:2>])<EOL>idx += <NUM_LIT:2><EOL>self.present, self.present_bits = Radiotap.strip_present(self._rtap[idx:idx + <NUM_LIT:4>])<EOL>idx += <NUM_LIT:4><EOL>if self.present.tsft:  <EOL><INDENT>idx, self.mactime = self.strip_tsft(idx)<EOL><DEDENT>if self.present.flags:  <EOL><INDENT>idx, self.flags = self.strip_flags(idx)<EOL><DEDENT>if self.present.rate:  <EOL><INDENT>idx, self.rate = self.strip_rate(idx)<EOL><DEDENT>if self.present.channel:  <EOL><INDENT>idx, self.chan = self.strip_chan(idx)<EOL><DEDENT>if self.present.fhss:  <EOL><INDENT>idx, self.fhss = self.strip_fhss(idx)<EOL><DEDENT>if self.present.dbm_antsignal:  <EOL><INDENT>idx, self.dbm_antsignal = self.strip_dbm_antsignal(idx)<EOL><DEDENT>if self.present.dbm_antnoise:  <EOL><INDENT>idx, self.dbm_antnoise = self.strip_dbm_antnoise(idx)<EOL><DEDENT>if self.present.lock_quality:  <EOL><INDENT>idx, self.lock_quality = self.strip_lock_quality(idx)<EOL><DEDENT>if self.present.tx_attenuation:  <EOL><INDENT>idx, self.tx_attenuation = self.strip_tx_attenuation(idx)<EOL><DEDENT>if self.present.db_tx_attenuation:  <EOL><INDENT>idx, self.db_tx_attenuation = self.strip_db_tx_attenuation(idx)<EOL><DEDENT>if self.present.dbm_tx_power:  <EOL><INDENT>idx, self.dbm_tx_power = self.strip_dbm_tx_power(idx)<EOL><DEDENT>if self.present.antenna:  <EOL><INDENT>idx, self.antenna = self.strip_antenna(idx)<EOL><DEDENT>if self.present.db_antsignal:  <EOL><INDENT>idx, self.db_antsignal = self.strip_db_antsignal(idx)<EOL><DEDENT>if self.present.db_antnoise:  <EOL><INDENT>idx, self.db_antnoise = self.strip_db_antnoise(idx)<EOL><DEDENT>if self.present.rxflags:  <EOL><INDENT>idx, self.rxflags = self.strip_rx_flags(idx)<EOL><DEDENT>if self.present.txflags:  <EOL><INDENT>idx, self.txflags = self.strip_tx_flags(idx)<EOL><DEDENT>if self.present.rts_retries:  <EOL><INDENT>idx, self.rts_retries = self.strip_rts_retries(idx)<EOL><DEDENT>if self.present.data_retries:  <EOL><INDENT>idx, self.data_retries = self.strip_data_retries(idx)<EOL><DEDENT>if self.present.xchannel:  <EOL><INDENT>idx, self.xchannel = self.strip_xchannel(idx)<EOL><DEDENT>if self.present.mcs:  <EOL><INDENT>idx, self.mcs = self.strip_mcs(idx)<EOL><DEDENT>if self.present.ampdu:  <EOL><INDENT>idx, self.ampdu = self.strip_ampdu(idx)<EOL><DEDENT>if self.present.vht:  <EOL><INDENT>idx, self.vht = self.strip_vht(idx)<EOL><DEDENT>self.prot_type = self.extract_protocol()<EOL>", "docstring": "Constructor method.\n        :rtap_bytes: ctypes.Structure", "id": "f1089:c1:m0"}
{"signature": "@staticmethod<EOL><INDENT>def strip_cntrl(payload):<DEDENT>", "body": "cntrl = struct.unpack('<STR_LIT:H>', payload)[<NUM_LIT:0>]<EOL>cntrl_bits = format(cntrl, '<STR_LIT>')[::-<NUM_LIT:1>]<EOL>ackpolicy = int(cntrl_bits[<NUM_LIT:0>])<EOL>multitid = int(cntrl_bits[<NUM_LIT:1>])<EOL>return ackpolicy, multitid<EOL>", "docstring": "strip(2 byte) wlan.ba.control\n        :payload: ctypes.structure\n        :return: int\n            multitid (tid: traffic indicator)\n        :return: int\n            ackpolicy", "id": "f1089:c12:m2"}
{"signature": "def strip_rts_retries(self, idx):", "body": "rts_retries, = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>return idx + <NUM_LIT:1>, rts_retries<EOL>", "docstring": "strip(1 byte) rts_retries\n        :idx: int\n        :return: int\n            idx\n        :return: int", "id": "f1089:c1:m21"}
{"signature": "def __init__(self, frame, no_rtap=False):", "body": "Control.__init__(self, frame, no_rtap)<EOL>(ra_mac, ta_mac) = struct.unpack('<STR_LIT>', self._packet[<NUM_LIT:4>:<NUM_LIT:16>])<EOL>self.ra = self.ta = None<EOL>self.ackpolicy = self.multitid = None<EOL>self.ssc_frag = self.ssc_seq = None<EOL>self.bitmap_str = None<EOL>self.acked_seqs = []<EOL>self.ra = Wifi.get_mac_addr(ra_mac)<EOL>self.ta = Wifi.get_mac_addr(ta_mac)<EOL>idx = <NUM_LIT:16><EOL>payload = self._packet[idx:idx + <NUM_LIT:2>]<EOL>self.ackpolicy, self.multitid = BACK.strip_cntrl(payload)<EOL>idx += <NUM_LIT:2><EOL>payload = self._packet[idx:idx + <NUM_LIT:2>]<EOL>self.ssc_seq, self.ssc_frag = BACK.strip_ssc(payload)<EOL>idx += <NUM_LIT:2><EOL>payload = self._packet[idx:idx + <NUM_LIT:8>]<EOL>self.bitmap_str = BACK.strip_bitmap_str(payload)<EOL>idx += <NUM_LIT:8><EOL>self.acked_seqs = BACK.extract_acked_seqs(self.bitmap_str, self.ssc_seq)<EOL>", "docstring": "Constructor method.\n        :frame: ctypes.Structure", "id": "f1089:c12:m0"}
{"signature": "def strip_ccmp(self, idx):", "body": "ccmp_extiv = None<EOL>if len(self._packet[idx:]) >= <NUM_LIT:8>:<EOL><INDENT>raw_bytes = self._packet[idx:idx + <NUM_LIT:8>]<EOL>ccmp_extiv, = struct.unpack_from('<STR_LIT>', raw_bytes, <NUM_LIT:0>)<EOL><DEDENT>return <NUM_LIT:8>, ccmp_extiv<EOL>", "docstring": "strip(8 byte) wlan.ccmp.extiv\n        CCMP Extended Initialization Vector\n        :return: int\n            number of processed bytes\n        :return: ctypes.raw\n            ccmp vector", "id": "f1089:c4:m2"}
{"signature": "def strip_xchannel(self, idx):", "body": "xchannel = collections.namedtuple(<EOL>'<STR_LIT>', ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>flags = collections.namedtuple(<EOL>'<STR_LIT>', ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>idx = Radiotap.align(idx, <NUM_LIT:2>)<EOL>flag_val, freq, channel, max_power = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>xchannel.freq = freq<EOL>xchannel.channel = channel<EOL>xchannel.max_power = max_power<EOL>bits = format(flag_val, '<STR_LIT>')[::-<NUM_LIT:1>]<EOL>flags.turbo = int(bits[<NUM_LIT:4>])<EOL>flags.cck = int(bits[<NUM_LIT:5>])<EOL>flags.ofdm = int(bits[<NUM_LIT:6>])<EOL>flags.two_g = int(bits[<NUM_LIT:7>])<EOL>flags.five_g = int(bits[<NUM_LIT:8>])<EOL>flags.passive = int(bits[<NUM_LIT:9>])<EOL>flags.dynamic = int(bits[<NUM_LIT:10>])<EOL>flags.gfsk = int(bits[<NUM_LIT:11>])<EOL>flags.gsm = int(bits[<NUM_LIT:12>])<EOL>flags.sturbo = int(bits[<NUM_LIT>])<EOL>flags.half = int(bits[<NUM_LIT>])<EOL>flags.quarter = int(bits[<NUM_LIT:15>])<EOL>flags.ht_20 = int(bits[<NUM_LIT:16>])<EOL>flags.ht_40u = int(bits[<NUM_LIT>])<EOL>flags.ht_40d = int(bits[<NUM_LIT>])<EOL>xchannel.flags = flags<EOL>return idx + <NUM_LIT:8>, xchannel<EOL>", "docstring": "strip(7 bytes) radiotap.xchannel.channel(1 byte),\n        radiotap.xchannel.freq(2 bytes) and radiotap.xchannel.flags(4 bytes)\n        :idx: int\n        :return: int\n            idx\n        :return: collections.namedtuple", "id": "f1089:c1:m23"}
{"signature": "def strip_flags(self, idx):", "body": "flags = collections.namedtuple(<EOL>'<STR_LIT>', ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>val, = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>bits = format(val, '<STR_LIT>')[::-<NUM_LIT:1>]<EOL>flags.cfp = int(bits[<NUM_LIT:0>])<EOL>flags.preamble = int(bits[<NUM_LIT:1>])<EOL>flags.wep = int(bits[<NUM_LIT:2>])<EOL>flags.fragmentation = int(bits[<NUM_LIT:3>])<EOL>flags.fcs = int(bits[<NUM_LIT:4>])<EOL>flags.datapad = int(bits[<NUM_LIT:5>])<EOL>flags.badfcs = int(bits[<NUM_LIT:6>])<EOL>flags.shortgi = int(bits[<NUM_LIT:7>])<EOL>return idx + <NUM_LIT:1>, flags<EOL>", "docstring": "strip(1 byte) radiotap.flags\n        :idx: int\n        :return: int\n            idx\n        :return: collections.namedtuple", "id": "f1089:c1:m6"}
{"signature": "@staticmethod<EOL><INDENT>def align(val, align):<DEDENT>", "body": "return (val + align - <NUM_LIT:1>) & ~(align - <NUM_LIT:1>)<EOL>", "docstring": ":val: int\n:align: int\n:return: int", "id": "f1089:c1:m28"}
{"signature": "@staticmethod<EOL><INDENT>def strip_present(payload):<DEDENT>", "body": "present = collections.namedtuple(<EOL>'<STR_LIT>', ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>val = struct.unpack('<STR_LIT>', payload)[<NUM_LIT:0>]<EOL>bits = format(val, '<STR_LIT>')[::-<NUM_LIT:1>]<EOL>present.tsft = int(bits[<NUM_LIT:0>])               <EOL>present.flags = int(bits[<NUM_LIT:1>])              <EOL>present.rate = int(bits[<NUM_LIT:2>])               <EOL>present.channel = int(bits[<NUM_LIT:3>])            <EOL>present.fhss = int(bits[<NUM_LIT:4>])               <EOL>present.dbm_antsignal = int(bits[<NUM_LIT:5>])      <EOL>present.dbm_antnoise = int(bits[<NUM_LIT:6>])       <EOL>present.lock_quality = int(bits[<NUM_LIT:7>])       <EOL>present.tx_attenuation = int(bits[<NUM_LIT:8>])     <EOL>present.db_tx_attenuation = int(bits[<NUM_LIT:9>])  <EOL>present.dbm_tx_power = int(bits[<NUM_LIT:10>])      <EOL>present.antenna = int(bits[<NUM_LIT:11>])           <EOL>present.db_antsignal = int(bits[<NUM_LIT:12>])      <EOL>present.db_antnoise = int(bits[<NUM_LIT>])       <EOL>present.rxflags = int(bits[<NUM_LIT>])           <EOL>present.txflags = int(bits[<NUM_LIT:15>])           <EOL>present.rts_retries = int(bits[<NUM_LIT:16>])       <EOL>present.data_retries = int(bits[<NUM_LIT>])      <EOL>present.xchannel = int(bits[<NUM_LIT>])          <EOL>present.mcs = int(bits[<NUM_LIT>])               <EOL>present.ampdu = int(bits[<NUM_LIT:20>])             <EOL>present.vht = int(bits[<NUM_LIT>])               <EOL>present.rtap_ns = int(bits[<NUM_LIT>])           <EOL>present.ven_ns = int(bits[<NUM_LIT:30>])            <EOL>present.ext = int(bits[<NUM_LIT>])               <EOL>return present, bits<EOL>", "docstring": "strip(4 byte) radiotap.present. Those are flags that\n        identify existence of incoming radiotap meta-data.\n        :idx: int\n        :return: str\n        :return: namedtuple", "id": "f1089:c1:m4"}
{"signature": "def __init__(self, frame, no_rtap=False):", "body": "Management.__init__(self, frame, no_rtap)<EOL>idx = <NUM_LIT:0><EOL>self.timestamp = self.interval = None<EOL>self.ta = self.ra = self.bssid = None<EOL>self.seq_num = self.frag_num = None<EOL>self.ess = self.ibss = None<EOL>self.privacy = None<EOL>self.priv = self.short_preamble = self.pbcc = self.chan_agility = None<EOL>self.spec_man = self.short_slot = self.apsd = self.radio_meas = None<EOL>self.dss_ofdm = self.del_back = self.imm_back = None<EOL>seq_idx, _, _, self.ta, self.ra, _, self.bssid = self.strip_mac_addrs()<EOL>idx = seq_idx<EOL>self.seq_num, self.frag_num = self.strip_seq_cntrl(idx)<EOL>idx += <NUM_LIT:2><EOL>payload = self._packet[idx:idx + <NUM_LIT:12>]<EOL>timestamp, interval, fixed_capabils = self.strip_fixed_params(payload)<EOL>if all([timestamp, interval, fixed_capabils]):<EOL><INDENT>self.timestamp = timestamp<EOL>self.interval = interval<EOL>self.set_fixed_capabils(fixed_capabils)<EOL>idx += <NUM_LIT:12><EOL><DEDENT>else:<EOL><INDENT>logging.warning(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if idx < len(self._packet):<EOL><INDENT>self._raw_tagged_params = self._packet[idx:]<EOL>is_out_bound, tagged_params = self.parse_tagged_params(self._raw_tagged_params)<EOL>if len(tagged_params):<EOL><INDENT>self.tagged_params = tagged_params<EOL><DEDENT>if is_out_bound:<EOL><INDENT>logging.warning(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Constructor method.\n        :frame: ctypes.Structure", "id": "f1089:c8:m0"}
{"signature": "def get_vendor_ies(self, mac_block=None, oui_type=None):", "body": "vendor_ies = []<EOL>if mac_block is not None:<EOL><INDENT>if Management.is_valid_mac_oui(mac_block):<EOL><INDENT>mac_block = mac_block.upper()<EOL>if '<STR_LIT::>' in mac_block:<EOL><INDENT>mac_block.replace('<STR_LIT::>', '<STR_LIT:->')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logging.warning(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT><DEDENT>for elem in self.tagged_params:<EOL><INDENT>tag_num = elem['<STR_LIT>']<EOL>if MNGMT_TAGS[tag_num] == '<STR_LIT>':<EOL><INDENT>if mac_block is None:<EOL><INDENT>vendor_ies.append(elem)<EOL><DEDENT>elif elem['<STR_LIT>']['<STR_LIT>'] == mac_block.encode('<STR_LIT:ascii>'):<EOL><INDENT>if oui_type is None:<EOL><INDENT>vendor_ies.append(elem)<EOL><DEDENT>elif elem['<STR_LIT>']['<STR_LIT>'] == oui_type:<EOL><INDENT>vendor_ies.append(elem)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return vendor_ies<EOL>", "docstring": "vendor information element querying\n        :mac_block: str\n            first 3 bytes of mac addresses in format of\n            00-11-22 or 00:11:22 or 001122\n        :oui_type: int\n            vendors ie type\n        :return: int\n            is valid mac_block  format\n            -1 is unknown\n        :return: dict[]\n            list of oui information elements\n            -1 on error (invalid v", "id": "f1089:c5:m10"}
{"signature": "def strip_msdu(self, idx):", "body": "<EOL>padding = <NUM_LIT:0><EOL>len_payload = <NUM_LIT:0><EOL>msdu = {<EOL>'<STR_LIT>': {},<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': <NUM_LIT:0><EOL>}<EOL>(da_mac, sa_mac) = struct.unpack('<STR_LIT>', self._packet[idx:idx + <NUM_LIT:12>])<EOL>msdu['<STR_LIT>'] = Wifi.get_mac_addr(da_mac)<EOL>msdu['<STR_LIT>'] = Wifi.get_mac_addr(sa_mac)<EOL>idx += <NUM_LIT:12><EOL>msdu['<STR_LIT>'] = struct.unpack('<STR_LIT>', self._packet[idx:idx + <NUM_LIT:2>])[<NUM_LIT:0>]<EOL>idx += <NUM_LIT:2><EOL>offset, msdu['<STR_LIT>'] = self.strip_llc(idx)<EOL>idx += offset<EOL>len_payload = msdu['<STR_LIT>'] - offset<EOL>msdu['<STR_LIT>'] = self._packet[idx:idx + len_payload]<EOL>padding = <NUM_LIT:4> - (len_payload % <NUM_LIT:4>)<EOL>return msdu, msdu['<STR_LIT>'] + padding + <NUM_LIT:12><EOL>", "docstring": "strip single mac servis data unit(msdu)\n        see -> https://mrncciew.com/2014/11/01/cwap-802-11-data-frame-aggregation/\n        :idx: int\n        :return: dict\n            msdu\n        :return: int\n            number of processed bytes", "id": "f1089:c4:m3"}
{"signature": "def strip_fhss(self, idx):", "body": "fhss = collections.namedtuple('<STR_LIT>', ['<STR_LIT>', '<STR_LIT>'])<EOL>fhss.hopset, fhss.pattern, = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>return idx + <NUM_LIT:2>, fhss<EOL>", "docstring": "strip (2 byte) radiotap.fhss.hopset(1 byte) and\n        radiotap.fhss.pattern(1 byte)\n        :idx: int\n        :return: int\n            idx\n        :return: collections.namedtuple", "id": "f1089:c1:m9"}
{"signature": "def strip_tx_attenuation(self, idx):", "body": "idx = Radiotap.align(idx, <NUM_LIT:2>)<EOL>tx_attenuation, = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>return idx + <NUM_LIT:2>, tx_attenuation<EOL>", "docstring": "strip(1 byte) tx_attenuation\n        :idx: int\n        :return: int\n            idx\n        :return: int", "id": "f1089:c1:m13"}
{"signature": "@staticmethod<EOL><INDENT>def get_interval(payload):<DEDENT>", "body": "if len(payload) != <NUM_LIT:2>:<EOL><INDENT>return None<EOL><DEDENT>interval = struct.unpack('<STR_LIT:H>', payload)[<NUM_LIT:0>]<EOL>return interval<EOL>", "docstring": "strip wlan_mgt.fixed.beacoN(2 bytes)\n        beacon interval\n        :payload: ctypes.structure\n        :return: int\n            None on error", "id": "f1089:c5:m6"}
{"signature": "def extract_protocol(self):", "body": "if self.present.mcs:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if self.present.vht:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if self.present.channel and hasattr(self, '<STR_LIT>'):<EOL><INDENT>if self.chan.five_g:<EOL><INDENT>if self.chan.ofdm:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT><DEDENT>elif self.chan.two_g:<EOL><INDENT>if self.chan.cck:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif self.chan.ofdm or self.chan.dynamic:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT><DEDENT><DEDENT>return '<STR_LIT:None>'<EOL>", "docstring": "extract 802.11 protocol from radiotap.channel.flags\n        :return: str\n            protocol name\n            one of below in success\n            [.11a, .11b, .11g, .11n, .11ac]\n            None in fail", "id": "f1089:c1:m27"}
{"signature": "@staticmethod<EOL><INDENT>def extract_acked_seqs(bitmap, ssc_seq):<DEDENT>", "body": "acked_seqs = []<EOL>for idx, val in enumerate(bitmap):<EOL><INDENT>if int(val) == <NUM_LIT:1>:<EOL><INDENT>seq = (ssc_seq + idx) % <NUM_LIT><EOL>acked_seqs.append(seq)<EOL><DEDENT><DEDENT>return acked_seqs<EOL>", "docstring": "extracts acknowledged sequences from bitmap and\n        starting sequence number.\n        :bitmap: str\n        :ssc_seq: int\n        :return: int[]\n            acknowledged sequence numbers", "id": "f1089:c12:m5"}
{"signature": "def strip_data_retries(self, idx):", "body": "data_retries, = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>return idx + <NUM_LIT:1>, data_retries<EOL>", "docstring": "strip(1 byte) data_retries\n        :idx: int\n        :return: int\n            idx\n        :return: int", "id": "f1089:c1:m22"}
{"signature": "def __init__(self, frame, no_rtap=False):", "body": "Wifi.__init__(self, frame, no_rtap)<EOL>", "docstring": "Constructor method.\n        :packet: ctypes.Structure\n        :no_rtap: Bool\n            shall parse radiotap headers", "id": "f1089:c3:m0"}
{"signature": "def strip_db_tx_attenuation(self, idx):", "body": "idx = Radiotap.align(idx, <NUM_LIT:2>)<EOL>db_tx_attenuation, = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>return idx + <NUM_LIT:2>, db_tx_attenuation<EOL>", "docstring": "strip(1 byte) db_tx_attenuation\n        :return: int\n            idx\n        :return: int", "id": "f1089:c1:m14"}
{"signature": "def strip_qos_cntrl(self, idx, prot_type):", "body": "qos_cntrl, = struct.unpack('<STR_LIT:H>', self._packet[idx:idx + <NUM_LIT:2>])<EOL>qos_cntrl_bits = format(qos_cntrl, '<STR_LIT>')[::-<NUM_LIT:1>]<EOL>qos_pri = qos_cntrl & <NUM_LIT><EOL>qos_bit = int(qos_cntrl_bits[<NUM_LIT:5>])<EOL>qos_ack = int(qos_cntrl_bits[<NUM_LIT:6>:<NUM_LIT:8>], <NUM_LIT:2>)<EOL>amsdupresent = <NUM_LIT:0><EOL>if prot_type == '<STR_LIT>':<EOL><INDENT>amsdupresent = int(qos_cntrl_bits[<NUM_LIT:7>])<EOL><DEDENT>return <NUM_LIT:2>, qos_pri, qos_bit, qos_ack, amsdupresent<EOL>", "docstring": "strip(2 byte) wlan.qos\n        :idx: int\n        :prot_type: string\n            802.11 protocol type(.11ac, .11a, .11n, etc)\n        :return: int\n            number of processed bytes\n        :return: int\n            qos priority\n        :return: int\n            qos bit\n        :return: int\n            qos acknowledgement\n        :return: int\n            amsdupresent(aggregated mac service data unit)", "id": "f1089:c4:m1"}
{"signature": "def WIFI(frame, no_rtap=False):", "body": "pack = None<EOL>try:<EOL><INDENT>pack = WiHelper.get_wifi_packet(frame, no_rtap)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logging.exception(e)<EOL><DEDENT>return pack<EOL>", "docstring": "calls wifi packet discriminator and constructor.\n    :frame: ctypes.Structure\n    :no_rtap: Bool\n    :return: packet object in success\n    :return: int\n        -1 on known error\n    :return: int\n        -2 on unknown error", "id": "f1089:m0"}
{"signature": "def strip_vht(self, idx):", "body": "vht = collections.namedtuple(<EOL>'<STR_LIT>', ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>user = collections.namedtuple('<STR_LIT:user>', ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>idx = Radiotap.align(idx, <NUM_LIT:2>)<EOL>known, flags, bw = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3 = struct.unpack_from('<STR_LIT>', self._rtap, idx + <NUM_LIT:4>)<EOL>coding, group_id, partial_id = struct.unpack_from('<STR_LIT>', self._rtap, idx + <NUM_LIT:8>)<EOL>known_bits = format(known, '<STR_LIT>')[::-<NUM_LIT:1>]<EOL>vht.known_bits = known_bits<EOL>vht.have_stbc = int(known_bits[<NUM_LIT:0>])         <EOL>vht.have_txop_ps = int(known_bits[<NUM_LIT:1>])      <EOL>vht.have_gi = int(known_bits[<NUM_LIT:2>])           <EOL>vht.have_sgi_nsym_da = int(known_bits[<NUM_LIT:3>])  <EOL>vht.have_ldpc_extra = int(known_bits[<NUM_LIT:4>])   <EOL>vht.have_beamformed = int(known_bits[<NUM_LIT:5>])   <EOL>vht.have_bw = int(known_bits[<NUM_LIT:6>])           <EOL>vht.have_gid = int(known_bits[<NUM_LIT:7>])          <EOL>vht.have_paid = int(known_bits[<NUM_LIT:8>])         <EOL>flag_bits = format(flags, '<STR_LIT>')[::-<NUM_LIT:1>]<EOL>vht.flag_bits = flag_bits<EOL>vht.stbc = int(flag_bits[<NUM_LIT:0>])<EOL>vht.txop_ps = int(flag_bits[<NUM_LIT:1>])<EOL>vht.gi = int(flag_bits[<NUM_LIT:2>])<EOL>vht.sgi_nysm_da = int(flag_bits[<NUM_LIT:3>])<EOL>vht.ldpc_extra = int(flag_bits[<NUM_LIT:4>])<EOL>vht.beamformed = int(flag_bits[<NUM_LIT:5>])<EOL>vht.group_id = group_id<EOL>vht.partial_id = partial_id<EOL>vht.bw = bw<EOL>vht.user_0 = user(None, None, None)<EOL>vht.user_1 = user(None, None, None)<EOL>vht.user_2 = user(None, None, None)<EOL>vht.user_3 = user(None, None, None)<EOL>for (i, mcs_nss) in enumerate([mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3]):<EOL><INDENT>if mcs_nss:<EOL><INDENT>nss = mcs_nss & <NUM_LIT> >> <NUM_LIT:4><EOL>mcs = (mcs_nss & <NUM_LIT>) >> <NUM_LIT:4><EOL>coding = (coding & <NUM_LIT:2>**i) >> i<EOL>if i == <NUM_LIT:0>:<EOL><INDENT>vht.user_0 = user(nss, mcs, coding)<EOL><DEDENT>elif i == <NUM_LIT:1>:<EOL><INDENT>vht.user_1 = user(nss, mcs, coding)<EOL><DEDENT>elif i == <NUM_LIT:2>:<EOL><INDENT>vht.user_2 = user(nss, mcs, coding)<EOL><DEDENT>elif i == <NUM_LIT:3>:<EOL><INDENT>vht.user_3 = user(nss, mcs, coding)<EOL><DEDENT><DEDENT><DEDENT>return idx + <NUM_LIT:12>, vht<EOL>", "docstring": "strip(12 byte) radiotap.vht\n        :idx: int\n        :return: int\n            idx\n        :return: collections.namedtuple", "id": "f1089:c1:m26"}
{"signature": "def __init__(self, frame, no_rtap=False):", "body": "Control.__init__(self, frame, no_rtap)<EOL>ra_mac = struct.unpack('<STR_LIT>', self._packet[<NUM_LIT:4>:<NUM_LIT:10>])[<NUM_LIT:0>]<EOL>self.ra = Wifi.get_mac_addr(ra_mac)<EOL>", "docstring": "Constructor method.\n        :frame: ctypes.Structure", "id": "f1089:c11:m0"}
{"signature": "def set_fixed_capabils(self, capabils):", "body": "self.ess = capabils['<STR_LIT>']<EOL>self.ibss = capabils['<STR_LIT>']<EOL>self.priv = capabils['<STR_LIT>']<EOL>self.short_preamble = capabils['<STR_LIT>']<EOL>self.pbcc = capabils['<STR_LIT>']<EOL>self.chan_agility = capabils['<STR_LIT>']<EOL>self.spec_man = capabils['<STR_LIT>']<EOL>self.short_slot = capabils['<STR_LIT>']<EOL>self.apsd = capabils['<STR_LIT>']<EOL>self.radio_meas = capabils['<STR_LIT>']<EOL>self.dss_ofdm = capabils['<STR_LIT>']<EOL>self.del_back = capabils['<STR_LIT>']<EOL>self.imm_back = capabils['<STR_LIT>']<EOL>", "docstring": "set keys of capabils into fields of object\n        :capabils: dict", "id": "f1089:c5:m9"}
{"signature": "def __init__(self, frame, no_rtap=False, parse_amsdu=True):", "body": "Data.__init__(self, frame, no_rtap)<EOL>idx = <NUM_LIT:0><EOL>self.sa = self.ta = self.ra = self.da = None<EOL>self.seq_num = self.frag_num = None<EOL>self.qos_pri = self.qos_bit = self.qos_ack = None<EOL>self.ccmp_extiv = None<EOL>self.payload = []<EOL>seq_idx, qos_idx, self.sa, self.ta, self.ra, self.da, _ = self.strip_mac_addrs()<EOL>self.seq_num, self.frag_num = self.strip_seq_cntrl(seq_idx)<EOL>idx = qos_idx<EOL>incr, self.qos_pri, self.qos_bit, self.qos_ack, self.amsdupresent =self.strip_qos_cntrl(idx, self.radiotap.prot_type)<EOL>idx += incr<EOL>if self.wep == <NUM_LIT:1>:<EOL><INDENT>incr, self.ccmp_extiv = self.strip_ccmp(idx)<EOL>idx += incr<EOL><DEDENT>if parse_amsdu:<EOL><INDENT>if self.amsdupresent != <NUM_LIT:0> and self.wep == <NUM_LIT:0>:<EOL><INDENT>while idx < len(self._packet):<EOL><INDENT>msdu, offset = self.strip_msdu(idx)<EOL>self.payload.append(msdu)<EOL>idx += offset<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self.wep == <NUM_LIT:0>:<EOL><INDENT>msdu = {}<EOL>offset, llc = self.strip_llc(idx)<EOL>msdu['<STR_LIT>'] = llc<EOL>msdu['<STR_LIT>'] = self._packet[idx + offset:]<EOL>self.payload.append(msdu)<EOL><DEDENT>else:<EOL><INDENT>self.payload.append({'<STR_LIT>': self._packet[idx:]})<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Constructor method.\n        :frame: ctypes.Structure\n        :parse_amsdu: Bool\n            shall parse aggregated mac service data unit", "id": "f1089:c4:m0"}
{"signature": "@staticmethod<EOL><INDENT>def strip_len(payload):<DEDENT>", "body": "return struct.unpack('<STR_LIT:H>', payload)[<NUM_LIT:0>]<EOL>", "docstring": "strip(2 byte) radiotap.length\n        :payload: ctypes.Structure\n        :return: int", "id": "f1089:c1:m3"}
{"signature": "def strip_mcs(self, idx):", "body": "mcs = collections.namedtuple(<EOL>'<STR_LIT>', ['<STR_LIT>', '<STR_LIT:index>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'])<EOL>idx = Radiotap.align(idx, <NUM_LIT:1>)<EOL>known, flags, index = struct.unpack_from('<STR_LIT>', self._rtap, idx)<EOL>bits = format(flags, '<STR_LIT>')[::-<NUM_LIT:1>]<EOL>mcs.known = known               <EOL>mcs.index = index               <EOL>mcs.have_bw = int(bits[<NUM_LIT:0>])      <EOL>mcs.have_mcs = int(bits[<NUM_LIT:1>])     <EOL>mcs.have_gi = int(bits[<NUM_LIT:2>])      <EOL>mcs.have_format = int(bits[<NUM_LIT:3>])  <EOL>mcs.have_fec = int(bits[<NUM_LIT:4>])     <EOL>mcs.have_stbc = int(bits[<NUM_LIT:5>])    <EOL>mcs.have_ness = int(bits[<NUM_LIT:6>])    <EOL>mcs.ness_bit1 = int(bits[<NUM_LIT:7>])    <EOL>return idx + <NUM_LIT:3>, mcs<EOL>", "docstring": "strip(3 byte) radiotap.mcs which contains 802.11n bandwidth,\n        mcs(modulation and coding scheme) and stbc(space time block coding)\n        information.\n        :idx: int\n        :return: int\n            idx\n        :return: collections.namedtuple", "id": "f1089:c1:m24"}
{"signature": "@staticmethod<EOL><INDENT>def get_mac_addr(mac_addr):<DEDENT>", "body": "mac_addr = bytearray(mac_addr)<EOL>mac = b'<STR_LIT::>'.join([('<STR_LIT>' % o).encode('<STR_LIT:ascii>') for o in mac_addr])<EOL>return mac<EOL>", "docstring": "converts bytes to mac addr format\n        :mac_addr: ctypes.structure\n        :return: str\n            mac addr in format\n            11:22:33:aa:bb:cc", "id": "f1089:c2:m2"}
{"signature": "def print_loading(self, wait, message):", "body": "tags = ['<STR_LIT:\\\\>', '<STR_LIT:|>', '<STR_LIT:/>', '<STR_LIT:->']<EOL>for i in range(wait):<EOL><INDENT>time.sleep(<NUM_LIT>)<EOL>sys.stdout.write(\"<STR_LIT>\" % {<EOL>'<STR_LIT:message>': message,<EOL>'<STR_LIT>': tags[i % <NUM_LIT:4>]<EOL>})<EOL>sys.stdout.flush()<EOL>pass<EOL><DEDENT>sys.stdout.write(\"<STR_LIT>\" % message)<EOL>sys.stdout.flush()<EOL>pass<EOL>", "docstring": "print loading message on screen\n\n.. note::\n\n    loading message only write to `sys.stdout`\n\n\n:param int wait: seconds to wait\n:param str message: message to print\n:return: None", "id": "f1105:c0:m2"}
{"signature": "def warn(self, *args, **kwargs):", "body": "return self.warn_message(*args, **kwargs)<EOL>", "docstring": "alias for `warn_message`", "id": "f1105:c0:m4"}
{"signature": "def warn_message(self, message, fh=None, prefix=\"<STR_LIT>\", suffix=\"<STR_LIT>\"):", "body": "msg = prefix + message + suffix<EOL>fh = fh or sys.stdout<EOL>if fh is sys.stdout:<EOL><INDENT>termcolor.cprint(msg, color=\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>fh.write(msg)<EOL><DEDENT>pass<EOL>", "docstring": "print warn type message,\nif file handle is `sys.stdout`, print color message\n\n\n:param str message: message to print\n:param file fh: file handle,default is `sys.stdout`\n:param str prefix: message prefix,default is `[warn]`\n:param str suffix: message suffix ,default is `...`\n:return: None", "id": "f1105:c0:m3"}
{"signature": "def print_message(self, message, fh=None):", "body": "return self.parser._print_message(message + \"<STR_LIT:\\n>\", fh)<EOL>", "docstring": "print message on screen\n\n:param str message:\n:param file fh: file handle,default is None\n:return: None", "id": "f1105:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def add_arguments(cls):<DEDENT>", "body": "return [<EOL>(('<STR_LIT>',), dict(help='<STR_LIT>')),<EOL>(('<STR_LIT:name>',), dict(nargs='<STR_LIT:?>', default='<STR_LIT>', help='<STR_LIT>')),<EOL>(('<STR_LIT>',),<EOL>dict(action='<STR_LIT:store_true>', help='<STR_LIT>')),<EOL>(('<STR_LIT>',),<EOL>dict(action='<STR_LIT:store_true>', help='<STR_LIT>')),<EOL>]<EOL>pass<EOL>", "docstring": "Create project.\n\nBy default cliez find github first,\nif not found,then try to search bitbucket\n\nif user define `--local` option. search local path first.\n\nif user define `--bitbucket`, search bitbucket first,\nthen search github.", "id": "f1106:c0:m1"}
{"signature": "def render(self, match_string, new_string):", "body": "current_dir = self.options.dir<EOL>if os.path.expanduser(current_dir) in ['<STR_LIT:/>', os.path.expanduser(\"<STR_LIT>\")]:<EOL><INDENT>self.error(\"<STR_LIT>\", -<NUM_LIT:1>)<EOL>pass<EOL><DEDENT>def match_directory(path):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>skip = False<EOL>for include_dir in ['<STR_LIT>' % s for s in<EOL>self.exclude_directories]:<EOL><INDENT>if path.find(include_dir) > -<NUM_LIT:1>:<EOL><INDENT>skip = True<EOL>break<EOL><DEDENT>pass<EOL><DEDENT>return skip<EOL><DEDENT>for v in os.walk(current_dir):<EOL><INDENT>if os.path.basename(v[<NUM_LIT:0>]) in self.exclude_directories:<EOL><INDENT>continue<EOL><DEDENT>if match_directory(v[<NUM_LIT:0>]):<EOL><INDENT>continue<EOL><DEDENT>for base_name in v[<NUM_LIT:2>]:<EOL><INDENT>file_name = os.path.join(v[<NUM_LIT:0>], base_name)<EOL>try:<EOL><INDENT>with open(file_name, '<STR_LIT:r>') as fh:<EOL><INDENT>buffer = fh.read()<EOL>buffer = buffer.replace(match_string, new_string)<EOL>pass<EOL><DEDENT>with open(file_name, '<STR_LIT:w>') as fh:<EOL><INDENT>fh.write(buffer)<EOL>pass<EOL><DEDENT><DEDENT>except UnicodeDecodeError:<EOL><INDENT>continue<EOL><DEDENT>pass<EOL><DEDENT>pass<EOL><DEDENT>redo_directories = []<EOL>redo_files = []<EOL>for v in os.walk(current_dir):<EOL><INDENT>if os.path.basename(v[<NUM_LIT:0>]) in self.exclude_directories:<EOL><INDENT>continue<EOL><DEDENT>if match_directory(v[<NUM_LIT:0>]):<EOL><INDENT>continue<EOL><DEDENT>for sub_dir in v[<NUM_LIT:1>]:<EOL><INDENT>if match_string in sub_dir:<EOL><INDENT>redo_directories.append(os.path.join(v[<NUM_LIT:0>], sub_dir))<EOL>pass<EOL><DEDENT><DEDENT>for f in v[<NUM_LIT:2>]:<EOL><INDENT>if match_string in f:<EOL><INDENT>redo_files.append(os.path.join(v[<NUM_LIT:0>], f))<EOL>pass<EOL><DEDENT><DEDENT>pass<EOL><DEDENT>redo_directories.reverse()<EOL>redo_files.reverse()<EOL>for v in redo_files:<EOL><INDENT>dir_name = os.path.dirname(v)<EOL>file_name = os.path.basename(v)<EOL>shutil.move(v, os.path.join(<EOL>dir_name,<EOL>file_name.replace(match_string, new_string)))<EOL>pass<EOL><DEDENT>for v in redo_directories:<EOL><INDENT>shutil.move(v, v.replace(match_string, new_string))<EOL>pass<EOL><DEDENT>pass<EOL>", "docstring": "render template string to user string\n:param str match_string: template string,syntax: '___VAR___'\n:param str new_string: user string\n:return:", "id": "f1107:c0:m1"}
{"signature": "def run(self, options):", "body": "self.logger.debug(\"<STR_LIT>\")<EOL>depends = ['<STR_LIT>']<EOL>nil_tools = []<EOL>self.logger.info(\"<STR_LIT>\", depends)<EOL>for v in depends:<EOL><INDENT>real_path = shutil.which(v)<EOL>if real_path:<EOL><INDENT>self.print_message(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(v,<EOL>real_path,<EOL>termcolor.colored(<EOL>'<STR_LIT>',<EOL>color='<STR_LIT>')))<EOL><DEDENT>else:<EOL><INDENT>nil_tools.append(v)<EOL>self.error_message(<EOL>'<STR_LIT>'.format(v, '<STR_LIT>'), prefix='<STR_LIT>',<EOL>suffix='<STR_LIT>')<EOL><DEDENT>pass<EOL><DEDENT>if nil_tools:<EOL><INDENT>self.print_message('<STR_LIT>')<EOL>self.error(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self.print_message(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>self.logger.debug(\"<STR_LIT>\")<EOL>pass<EOL><DEDENT>pass<EOL>", "docstring": ".. todo::\n\n    check network connection\n\n:param Namespace options: parse result from argparse\n:return:", "id": "f1108:c0:m0"}
{"signature": "def hump_to_underscore(name):", "body": "new_name = '<STR_LIT>'<EOL>pos = <NUM_LIT:0><EOL>for c in name:<EOL><INDENT>if pos == <NUM_LIT:0>:<EOL><INDENT>new_name = c.lower()<EOL><DEDENT>elif <NUM_LIT> <= ord(c) <= <NUM_LIT>:<EOL><INDENT>new_name += '<STR_LIT:_>' + c.lower()<EOL>pass<EOL><DEDENT>else:<EOL><INDENT>new_name += c<EOL><DEDENT>pos += <NUM_LIT:1><EOL>pass<EOL><DEDENT>return new_name<EOL>", "docstring": "Convert Hump style to underscore\n\n:param name: Hump Character\n:return: str", "id": "f1110:m1"}
{"signature": "def include_file(filename, global_vars=None, local_vars=None):", "body": "if global_vars is None:<EOL><INDENT>global_vars = sys._getframe(<NUM_LIT:1>).f_globals<EOL><DEDENT>if local_vars is None:<EOL><INDENT>local_vars = sys._getframe(<NUM_LIT:1>).f_locals<EOL><DEDENT>with open(filename, '<STR_LIT:r>') as f:<EOL><INDENT>code = compile(f.read(), os.path.basename(filename), '<STR_LIT>')<EOL>exec(code, global_vars, local_vars)<EOL>pass<EOL><DEDENT>", "docstring": ".. deprecated 2.1::\n    Don't use this any more.\n\n    It's not pythonic.\n\n\ninclude file like php include.\n\ninclude is very useful when we need to split large config file", "id": "f1110:m0"}
{"signature": "def settings(path=None, with_path=None):", "body": "if path:<EOL><INDENT>Settings.bind(path, with_path=with_path)<EOL><DEDENT>return Settings._wrapped<EOL>", "docstring": "Get or set `Settings._wrapped`\n\n:param str path: a python module file,\n    if user set it,write config to `Settings._wrapped`\n:param str with_path: search path\n:return: A instance of `Settings`", "id": "f1111:m0"}
{"signature": "def check_exclusive_mode(self):", "body": "if self.options.exclusive_mode:<EOL><INDENT>import psutil<EOL>current_pid = os.getpid()<EOL>current = psutil.Process(current_pid).cmdline()<EOL>for pid in psutil.pids():<EOL><INDENT>p = psutil.Process(pid)<EOL>try:<EOL><INDENT>if current_pid != pid and current == p.cmdline():<EOL><INDENT>self.error_message(<EOL>\"<STR_LIT>\".format(p.pid))<EOL>sys.exit(-<NUM_LIT:1>)<EOL>pass<EOL><DEDENT><DEDENT>except psutil.ZombieProcess:<EOL><INDENT>pass<EOL><DEDENT>except psutil.AccessDenied:<EOL><INDENT>pass<EOL><DEDENT>pass<EOL><DEDENT>pass<EOL><DEDENT>pass<EOL>", "docstring": "\u68c0\u67e5\u662f\u5426\u662f\u72ec\u5360\u6a21\u5f0f\n\n\u53c2\u6570\u987a\u5e8f\u5fc5\u987b\u4e00\u81f4,\u4e5f\u5c31\u662f\u8bf4\u5982\u679c\u53c2\u6570\u987a\u5e8f\u4e0d\u4e00\u81f4,\u5219\u5224\u5b9a\u4e3a\u662f\u4e24\u4e2a\u4e0d\u540c\u7684\u8fdb\u7a0b\n\u8fd9\u4e48\u8bbe\u8ba1\u662f\u8003\u8651\u5230:\n\n- \u4e00\u822c\u800c\u8a00,\u6392\u4ed6\u6a21\u5f0f\u7684\u670d\u52a1\u542f\u52a8\u90fd\u662fcrontab\u7b49\u811a\u672c\u6765\u5b8c\u6210\u7684,\u4e0d\u5b58\u5728\u987a\u5e8f\u53d8\u66f4\u7684\u53ef\u80fd\n- \u8fd9\u5728\u8c03\u8bd5\u7684\u65f6\u5019\u53ef\u4ee5\u5e2e\u52a9\u6211\u4eec\u4e0d\u9700\u8981\u7ed3\u675f\u539f\u6709\u8fdb\u7a0b\u5c31\u53ef\u4ee5\u7ee7\u7eed\u8c03\u8bd5\n\n:return:", "id": "f1112:c0:m1"}
{"signature": "def run(self, options):", "body": "self.set_signal()<EOL>self.check_exclusive_mode()<EOL>slot = self.Handle(self)<EOL>i = <NUM_LIT:0><EOL>while i < options.threads:<EOL><INDENT>t = threading.Thread(target=self.worker, args=[slot])<EOL>if options.once is True or options.no_daemon is True:<EOL><INDENT>t.daemon = False<EOL><DEDENT>else:<EOL><INDENT>t.daemon = True<EOL><DEDENT>t.start()<EOL>i += <NUM_LIT:1><EOL><DEDENT>if options.once is False:<EOL><INDENT>while True:<EOL><INDENT>if threading.active_count() > <NUM_LIT:1>:<EOL><INDENT>sleep(<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>if threading.current_thread().name == \"<STR_LIT>\":<EOL><INDENT>sys.exit(<NUM_LIT:0>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>pass<EOL>", "docstring": "In general, you don't need to overwrite this method.\n\n:param options:\n:return:", "id": "f1112:c0:m2"}
{"signature": "def message_mentions_me(self, data):", "body": "return (data.get('<STR_LIT:type>') == '<STR_LIT:message>' and<EOL>self.full_name in data.get('<STR_LIT:text>', '<STR_LIT>'))<EOL>", "docstring": "If you send a message that mentions me", "id": "f1114:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def _unpack_message(msg):<DEDENT>", "body": "return json.loads(msg.data)<EOL>", "docstring": "Unpack the data from the message.\n\n        Arguments:\n          msg (:py:class:`aiohttp.websocket.Message`): The message to\n            unpack.\n\n        Returns:\n          :py:class:`dict`: The loaded data.\n\n        Raises:\n          :py:class:`AttributeError`: If there is no data attribute.\n          :py:class:`json.JSONDecodeError`: If the data isn't valid\n            JSON.", "id": "f1114:c0:m10"}
{"signature": "@classmethod<EOL><INDENT>def _validate_first_message(cls, msg):<DEDENT>", "body": "data = cls._unpack_message(msg)<EOL>logger.debug(data)<EOL>if data != cls.RTM_HANDSHAKE:<EOL><INDENT>raise SlackApiError('<STR_LIT>'.format(data))<EOL><DEDENT>logger.info('<STR_LIT>')<EOL>", "docstring": "Check the first message matches the expected handshake.\n\n        Note:\n          The handshake is provided as :py:attr:`RTM_HANDSHAKE`.\n\n        Arguments:\n          msg (:py:class:`aiohttp.Message`): The message to validate.\n\n        Raises:\n          :py:class:`SlackApiError`: If the data doesn't match the\n            expected handshake.", "id": "f1114:c0:m11"}
{"signature": "def message_is_to_me(self, data):", "body": "return (data.get('<STR_LIT:type>') == '<STR_LIT:message>' and<EOL>data.get('<STR_LIT:text>', '<STR_LIT>').startswith(self.address_as))<EOL>", "docstring": "If you send a message directly to me", "id": "f1114:c0:m4"}
{"signature": "def matches(self, data):", "body": "self.text = data.get('<STR_LIT:text>')<EOL>return True<EOL>", "docstring": "Whether the handler should handle the current message.\n\n        Args:\n          data: The data representing the current message.\n\n        Returns:\n          :py:class:`bool`: Whether it should be handled.", "id": "f1116:c0:m4"}
{"signature": "async def execute_method(self, method, **params):", "body": "url = self.url_builder(method, url_params=params)<EOL>logger.info('<STR_LIT>', method)<EOL>response = await aiohttp.get(url)<EOL>logger.info('<STR_LIT>', response.status)<EOL>if response.status == <NUM_LIT:200>:<EOL><INDENT>json = await response.json()<EOL>logger.debug('<STR_LIT>', json)<EOL>if json.get('<STR_LIT>'):<EOL><INDENT>return json<EOL><DEDENT>raise SlackApiError(json['<STR_LIT:error>'])<EOL><DEDENT>else:<EOL><INDENT>raise_for_status(response)<EOL><DEDENT>", "docstring": "Execute a specified Slack Web API method.\n\n        Arguments:\n          method (:py:class:`str`): The name of the method.\n          **params (:py:class:`dict`): Any additional parameters\n            required.\n\n        Returns:\n          :py:class:`dict`: The JSON data from the response.\n\n        Raises:\n          :py:class:`aiohttp.web_exceptions.HTTPException`: If the HTTP\n            request returns a code other than 200 (OK).\n          SlackApiError: If the Slack API is reached but the response\n           contains an error message.", "id": "f1117:c1:m0"}
{"signature": "@classmethod<EOL><INDENT>def method_exists(cls, method):<DEDENT>", "body": "methods = cls.API_METHODS<EOL>for key in method.split('<STR_LIT:.>'):<EOL><INDENT>methods = methods.get(key)<EOL>if methods is None:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if isinstance(methods, str):<EOL><INDENT>logger.debug('<STR_LIT>', method, methods)<EOL>return True<EOL><DEDENT>return False<EOL>", "docstring": "Whether a given method exists in the known API.\n\n        Arguments:\n          method (:py:class:`str`): The name of the method.\n\n        Returns:\n          :py:class:`bool`: Whether the method is in the known API.", "id": "f1117:c1:m1"}
{"signature": "def url_builder(self, endpoint, *, root=None, params=None, url_params=None):", "body": "if root is None:<EOL><INDENT>root = self.ROOT<EOL><DEDENT>scheme, netloc, path, _, _ = urlsplit(root)<EOL>return urlunsplit((<EOL>scheme,<EOL>netloc,<EOL>urljoin(path, endpoint),<EOL>urlencode(url_params or {}),<EOL>'<STR_LIT>',<EOL>)).format(**params or {})<EOL>", "docstring": "Create a URL for the specified endpoint.\n\n        Arguments:\n          endpoint (:py:class:`str`): The API endpoint to access.\n          root: (:py:class:`str`, optional): The root URL for the\n            service API.\n          params: (:py:class:`dict`, optional): The values for format\n            into the created URL (defaults to ``None``).\n          url_params: (:py:class:`dict`, optional): Parameters to add\n            to the end of the URL (defaults to ``None``).\n\n        Returns:\n          :py:class:`str`: The resulting URL.", "id": "f1119:c0:m2"}
{"signature": "@property<EOL><INDENT>def headers(self):<DEDENT>", "body": "return {}<EOL>", "docstring": "Get the headers for the service requests.\n\n        Returns:\n          :py:class:`dict`: The header mapping.", "id": "f1119:c0:m1"}
{"signature": "def truncate(text, max_len=<NUM_LIT>, end='<STR_LIT>'):", "body": "if len(text) <= max_len:<EOL><INDENT>return text<EOL><DEDENT>return text[:max_len].rsplit('<STR_LIT:U+0020>', maxsplit=<NUM_LIT:1>)[<NUM_LIT:0>] + end<EOL>", "docstring": "Truncate the supplied text for display.\n\n    Arguments:\n      text (:py:class:`str`): The text to truncate.\n      max_len (:py:class:`int`, optional): The maximum length of the\n        text before truncation (defaults to 350 characters).\n      end (:py:class:`str`, optional): The ending to use to show that\n        the text was truncated (defaults to ``'...'``).\n\n    Returns:\n      :py:class:`str`: The truncated text.", "id": "f1120:m1"}
{"signature": "def raise_for_status(response):", "body": "for err_name in web_exceptions.__all__:<EOL><INDENT>err = getattr(web_exceptions, err_name)<EOL>if err.status_code == response.status:<EOL><INDENT>payload = dict(<EOL>headers=response.headers,<EOL>reason=response.reason,<EOL>)<EOL>if issubclass(err, web_exceptions._HTTPMove):  <EOL><INDENT>raise err(response.headers['<STR_LIT>'], **payload)<EOL><DEDENT>raise err(**payload)<EOL><DEDENT><DEDENT>", "docstring": "Raise an appropriate error for a given response.\n\n    Arguments:\n      response (:py:class:`aiohttp.ClientResponse`): The API response.\n\n    Raises:\n      :py:class:`aiohttp.web_exceptions.HTTPException`: The appropriate\n        error for the response's status.", "id": "f1120:m0"}
{"signature": "def resorted(values):", "body": "if not values:<EOL><INDENT>return values<EOL><DEDENT>values = sorted(values)<EOL>first_word = next(<EOL>(cnt for cnt, val in enumerate(values)<EOL>if val and not val[<NUM_LIT:0>].isdigit()),<EOL>None<EOL>)<EOL>if first_word is None:<EOL><INDENT>return values<EOL><DEDENT>words = values[first_word:]<EOL>numbers = values[:first_word]<EOL>return words + numbers<EOL>", "docstring": "Sort values, but put numbers after alphabetically sorted words.\n\nThis function is here to make outputs diff-compatible with Aleph.\n\nExample::\n    >>> sorted([\"b\", \"1\", \"a\"])\n    ['1', 'a', 'b']\n    >>> resorted([\"b\", \"1\", \"a\"])\n    ['a', 'b', '1']\n\nArgs:\n    values (iterable): any iterable object/list/tuple/whatever.\n\nReturns:\n    list of sorted values, but with numbers after words", "id": "f1159:m0"}
{"signature": "def get_subfields(self, datafield, subfield, i1=None, i2=None,<EOL>exception=False):", "body": "if len(datafield) != <NUM_LIT:3>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if len(subfield) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if datafield not in self.datafields:<EOL><INDENT>if exception:<EOL><INDENT>raise KeyError(datafield + \"<STR_LIT>\")<EOL><DEDENT>return []<EOL><DEDENT>output = []<EOL>for datafield in self.datafields[datafield]:<EOL><INDENT>if subfield not in datafield:<EOL><INDENT>continue<EOL><DEDENT>for sfield in datafield[subfield]:<EOL><INDENT>if i1 and sfield.i1 != i1:<EOL><INDENT>continue<EOL><DEDENT>if i2 and sfield.i2 != i2:<EOL><INDENT>continue<EOL><DEDENT>output.append(sfield)<EOL><DEDENT><DEDENT>if not output and exception:<EOL><INDENT>raise KeyError(subfield + \"<STR_LIT>\")<EOL><DEDENT>return output<EOL>", "docstring": "Return content of given `subfield` in `datafield`.\n\nArgs:\n    datafield (str): Section name (for example \"001\", \"100\", \"700\").\n    subfield (str):  Subfield name (for example \"a\", \"1\", etc..).\n    i1 (str, default None): Optional i1/ind1 parameter value, which\n       will be used for search.\n    i2 (str, default None): Optional i2/ind2 parameter value, which\n       will be used for search.\n    exception (bool): If ``True``, :exc:`~exceptions.KeyError` is\n              raised when method couldn't found given `datafield` /\n              `subfield`. If ``False``, blank array ``[]`` is returned.\n\nReturns:\n    list: of :class:`.MARCSubrecord`.\n\nRaises:\n    KeyError: If the subfield or datafield couldn't be found.\n\nNote:\n    MARCSubrecord is practically same thing as string, but has defined\n    :meth:`.MARCSubrecord.i1` and :attr:`.MARCSubrecord.i2`\n    methods.\n\n    You may need to be able to get this, because MARC XML depends on\n    i/ind parameters from time to time (names of authors for example).", "id": "f1161:c0:m11"}
{"signature": "def _parse_control_fields(self, fields, tag_id=\"<STR_LIT>\"):", "body": "for field in fields:<EOL><INDENT>params = field.params<EOL>if tag_id not in params:<EOL><INDENT>continue<EOL><DEDENT>self.controlfields[params[tag_id]] = field.getContent().strip()<EOL><DEDENT>", "docstring": "Parse control fields.\n\nArgs:\n    fields (list): list of HTMLElements\n    tag_id (str):  parameter name, which holds the information, about\n                   field name this is normally \"tag\", but in case of\n                   oai_marc \"id\".", "id": "f1161:c0:m2"}
{"signature": "def add_data_field(self, name, i1, i2, subfields_dict):", "body": "if i1 not in self.valid_i_chars:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + i1 + \"<STR_LIT>\")<EOL><DEDENT>if i2 not in self.valid_i_chars:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + i2 + \"<STR_LIT>\")<EOL><DEDENT>if len(name) != <NUM_LIT:3>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if not subfields_dict:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if not isinstance(subfields_dict, dict):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>subrecords = []<EOL>for key, val in subfields_dict.items():<EOL><INDENT>if len(key) > <NUM_LIT:1>:<EOL><INDENT>raise KeyError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if not isinstance(val, list):<EOL><INDENT>val = [val]<EOL><DEDENT>subfields = map(<EOL>lambda x: MARCSubrecord(x, i1, i2, None),<EOL>val<EOL>)<EOL>subfields_dict[key] = subfields<EOL>subrecords.extend(subfields)<EOL><DEDENT>subfields_dict[self.i1_name] = i1<EOL>subfields_dict[self.i2_name] = i2<EOL>if name in self.datafields:<EOL><INDENT>self.datafields[name].append(subfields_dict)<EOL><DEDENT>else:<EOL><INDENT>self.datafields[name] = [subfields_dict]<EOL><DEDENT>other_subfields = self.datafields[name]<EOL>for record in subrecords:<EOL><INDENT>record.other_subfields = other_subfields<EOL><DEDENT>", "docstring": "Add new datafield into :attr:`datafields` and take care of OAI MARC\ndifferencies.\n\nArgs:\n    name (str): Name of datafield.\n    i1 (char): Value of i1/ind1 parameter.\n    i2 (char): Value of i2/ind2 parameter.\n    subfields_dict (dict): Dictionary containing subfields (as list).\n\n`subfields_dict` is expected to be in this format::\n\n    {\n        \"field_id\": [\"subfield data\",],\n        ...\n        \"z\": [\"X0456b\"]\n    }\n\nWarning:\n    For your own good, use OrderedDict for `subfields_dict`, or\n    constructor's `resort` parameter set to ``True`` (it is by\n    default).\n\nWarning:\n    ``field_id`` can be only one character long!", "id": "f1161:c0:m5"}
{"signature": "def _parse_string(self, xml):", "body": "if not isinstance(xml, HTMLElement):<EOL><INDENT>xml = dhtmlparser.parseString(str(xml))<EOL><DEDENT>record = xml.find(\"<STR_LIT>\")<EOL>if not record:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>record = record[<NUM_LIT:0>]<EOL>self.oai_marc = len(record.find(\"<STR_LIT>\")) > <NUM_LIT:0><EOL>if not self.oai_marc:<EOL><INDENT>leader = record.find(\"<STR_LIT>\")<EOL>if len(leader) >= <NUM_LIT:1>:<EOL><INDENT>self.leader = leader[<NUM_LIT:0>].getContent()<EOL><DEDENT><DEDENT>if self.oai_marc:<EOL><INDENT>self._parse_control_fields(record.find(\"<STR_LIT>\"), \"<STR_LIT:id>\")<EOL>self._parse_data_fields(record.find(\"<STR_LIT>\"), \"<STR_LIT:id>\", \"<STR_LIT:label>\")<EOL><DEDENT>else:<EOL><INDENT>self._parse_control_fields(record.find(\"<STR_LIT>\"), \"<STR_LIT>\")<EOL>self._parse_data_fields(record.find(\"<STR_LIT>\"), \"<STR_LIT>\", \"<STR_LIT:code>\")<EOL><DEDENT>if self.oai_marc and \"<STR_LIT>\" in self.controlfields:<EOL><INDENT>self.leader = self.controlfields[\"<STR_LIT>\"]<EOL><DEDENT>", "docstring": "Parse MARC XML document to dicts, which are contained in\nself.controlfields and self.datafields.\n\nArgs:\n    xml (str or HTMLElement): input data\n\nAlso detect if this is oai marc format or not (see elf.oai_marc).", "id": "f1161:c0:m1"}
{"signature": "def get_ctl_field(self, controlfield, alt=None):", "body": "if not alt:<EOL><INDENT>return self.controlfields[controlfield]<EOL><DEDENT>return self.controlfields.get(controlfield, alt)<EOL>", "docstring": "Method wrapper over :attr:`.controlfields` dictionary.\n\nArgs:\n    controlfield (str): Name of the controlfield.\n    alt (object, default None): Alternative value of the `controlfield`\n        when `controlfield` couldn't be found.\n\nReturns:\n    str: record from given `controlfield`", "id": "f1161:c0:m9"}
{"signature": "def get_internal_urls(self):", "body": "internal_urls = self.get_subfields(\"<STR_LIT>\", \"<STR_LIT:u>\", i1=\"<STR_LIT:4>\", i2=\"<STR_LIT:0>\")<EOL>internal_urls.extend(self.get_subfields(\"<STR_LIT>\", \"<STR_LIT:a>\"))<EOL>internal_urls.extend(self.get_subfields(\"<STR_LIT>\", \"<STR_LIT:u>\"))<EOL>return map(lambda x: x.replace(\"<STR_LIT>\", \"<STR_LIT:&>\"), internal_urls)<EOL>", "docstring": "URL's, which may point to edeposit, aleph, kramerius and so on.\n\nFields ``856u40``, ``998a`` and ``URLu``.\n\nReturns:\n    list: List of internal URLs.", "id": "f1163:c0:m26"}
{"signature": "def get_authors(self):", "body": "authors = self._parse_persons(\"<STR_LIT:100>\", \"<STR_LIT:a>\")<EOL>authors += self._parse_persons(\"<STR_LIT>\", \"<STR_LIT:a>\")<EOL>authors += self._parse_persons(\"<STR_LIT>\", \"<STR_LIT:a>\")<EOL>authors += self._parse_persons(\"<STR_LIT>\", \"<STR_LIT:a>\")<EOL>return authors<EOL>", "docstring": "Returns:\n    list: Authors represented as :class:`.Person` objects.", "id": "f1163:c0:m13"}
{"signature": "def get_ISSNs(self):", "body": "invalid_issns = set(self.get_invalid_ISSNs())<EOL>return [<EOL>self._clean_isbn(issn)<EOL>for issn in self[\"<STR_LIT>\"]<EOL>if self._clean_isbn(issn) not in invalid_issns<EOL>]<EOL>", "docstring": "Get list of VALID ISSNs (``022a``).\n\nReturns:\n    list: List with *valid* ISSN strings.", "id": "f1163:c0:m20"}
{"signature": "def get_pub_date(self, undefined=\"<STR_LIT>\"):", "body": "dates = self[\"<STR_LIT>\"] + self[\"<STR_LIT>\"]<EOL>def clean_date(date):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>out = \"<STR_LIT>\"<EOL>was_digit = False<EOL>for c in date:<EOL><INDENT>if c.isdigit() or (c == \"<STR_LIT:->\" and was_digit) or c == \"<STR_LIT:U+0020>\":<EOL><INDENT>out += c<EOL><DEDENT>was_digit = c.isdigit()<EOL><DEDENT>return out<EOL><DEDENT>dates = set([<EOL>clean_date(date)<EOL>for date in self[\"<STR_LIT>\"] + self[\"<STR_LIT>\"]<EOL>])<EOL>return _undefined_pattern(<EOL>\"<STR_LIT:U+002CU+0020>\".join(dates),<EOL>lambda x: x.strip() == \"<STR_LIT>\",<EOL>undefined<EOL>)<EOL>", "docstring": "Args:\n    undefined (optional): Argument, which will be returned if the\n              `pub_date` record is not found.\n\nReturns:\n    str: Date of publication (month and year usually) or `undefined` \\\n         if `pub_date` is not found.", "id": "f1163:c0:m9"}
{"signature": "def _clean_isbn(self, isbn):", "body": "return isbn.strip().split(\"<STR_LIT:U+0020>\", <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>", "docstring": "Clean ISBN from other information (binding).", "id": "f1163:c0:m16"}
{"signature": "def _undefined_pattern(value, fn, undefined):", "body": "if fn(value):<EOL><INDENT>return undefined<EOL><DEDENT>return value<EOL>", "docstring": "If ``fn(value) == True``, return `undefined`, else `value`.", "id": "f1163:m0"}
{"signature": "def is_continuing(self):", "body": "return self.get_pub_type() == PublicationType.continuing<EOL>", "docstring": "Returns:\n    bool: True if the record is continuing.", "id": "f1163:c0:m30"}
{"signature": "def get_pub_type(self):", "body": "INFO_CHAR_INDEX = <NUM_LIT:6><EOL>SECOND_INFO_CHAR_I = <NUM_LIT><EOL>if not len(self.leader) >= INFO_CHAR_INDEX + <NUM_LIT:1>:<EOL><INDENT>return PublicationType.monographic<EOL><DEDENT>if self.controlfields.get(\"<STR_LIT>\") == \"<STR_LIT>\":<EOL><INDENT>return PublicationType.continuing<EOL><DEDENT>info_char = self.leader[INFO_CHAR_INDEX]<EOL>multipart_n = self.get_subfields(\"<STR_LIT>\", \"<STR_LIT:n>\", exception=False)<EOL>multipart_p = self.get_subfields(\"<STR_LIT>\", \"<STR_LIT:p>\", exception=False)<EOL>if info_char in \"<STR_LIT>\":<EOL><INDENT>return PublicationType.monographic<EOL><DEDENT>elif info_char in \"<STR_LIT>\":<EOL><INDENT>return PublicationType.continuing<EOL><DEDENT>elif info_char == \"<STR_LIT:m>\" and (multipart_n or multipart_p):<EOL><INDENT>return PublicationType.multipart_monograph<EOL><DEDENT>elif info_char == \"<STR_LIT:m>\" and len(self.leader) >= SECOND_INFO_CHAR_I + <NUM_LIT:1>:<EOL><INDENT>if self.leader[SECOND_INFO_CHAR_I] == \"<STR_LIT:a>\":<EOL><INDENT>return PublicationType.multipart_monograph<EOL><DEDENT>elif self.leader[SECOND_INFO_CHAR_I] == \"<STR_LIT:U+0020>\":<EOL><INDENT>return PublicationType.single_unit<EOL><DEDENT><DEDENT>return PublicationType.monographic<EOL>", "docstring": "Returns:\n    PublicationType: :class:`.PublicationType` enum **value**.", "id": "f1163:c0:m27"}
{"signature": "@remove_hairs_decorator<EOL><INDENT>def get_pub_order(self, undefined=\"<STR_LIT>\"):<DEDENT>", "body": "return _undefined_pattern(<EOL>\"<STR_LIT>\".join(self.get_subfields(\"<STR_LIT>\", \"<STR_LIT:f>\")),<EOL>lambda x: x.strip() == \"<STR_LIT>\",<EOL>undefined<EOL>)<EOL>", "docstring": "Args:\n    undefined (optional): Argument, which will be returned if the\n              `pub_order` record is not found.\n\nReturns:\n    str: Information about order in which was the book published or \\\n         `undefined` if `pub_order` is not found.", "id": "f1163:c0:m10"}
{"signature": "def get_distributors(self):", "body": "return self.get_corporations(roles=[\"<STR_LIT>\"])<EOL>", "docstring": "Returns:\n    list: Distributors represented as :class:`.Corporation` object.", "id": "f1163:c0:m15"}
{"signature": "def get(self, item, alt=None):", "body": "try:<EOL><INDENT>val = self[item]<EOL><DEDENT>except ValueError:<EOL><INDENT>return alt<EOL><DEDENT>return val if val is not None else alt<EOL>", "docstring": "Standard dict-like .get() method.\n\nArgs:\n    item (str): See :meth:`.__getitem__` for details.\n    alt (default None): Alternative value, if item is not found.\n\nReturns:\n    obj: `item` or `alt`, if item is not found.", "id": "f1163:c0:m33"}
{"signature": "def get_ISBNs(self):", "body": "invalid_isbns = set(self.get_invalid_ISBNs())<EOL>valid_isbns = [<EOL>self._clean_isbn(isbn)<EOL>for isbn in self[\"<STR_LIT>\"]<EOL>if self._clean_isbn(isbn) not in invalid_isbns<EOL>]<EOL>if valid_isbns:<EOL><INDENT>return valid_isbns<EOL><DEDENT>return [<EOL>self._clean_isbn(isbn)<EOL>for isbn in self[\"<STR_LIT>\"]<EOL>]<EOL>", "docstring": "Get list of VALID ISBN.\n\nReturns:\n    list: List with *valid* ISBN strings.", "id": "f1163:c0:m18"}
{"signature": "def _parse_persons(self, datafield, subfield, roles=[\"<STR_LIT>\"]):", "body": "<EOL>parsed_persons = []<EOL>raw_persons = self.get_subfields(datafield, subfield)<EOL>for person in raw_persons:<EOL><INDENT>other_subfields = person.other_subfields<EOL>if \"<STR_LIT:4>\" in other_subfields and roles != [\"<STR_LIT>\"]:<EOL><INDENT>person_roles = other_subfields[\"<STR_LIT:4>\"]  <EOL>relevant = any(map(lambda role: role in roles, person_roles))<EOL>if not relevant:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>ind1 = person.i1<EOL>ind2 = person.i2<EOL>person = person.strip()<EOL>name = \"<STR_LIT>\"<EOL>second_name = \"<STR_LIT>\"<EOL>surname = \"<STR_LIT>\"<EOL>title = \"<STR_LIT>\"<EOL>if ind1 == \"<STR_LIT:1>\" and ind2 == \"<STR_LIT:U+0020>\":<EOL><INDENT>if \"<STR_LIT:U+002C>\" in person:<EOL><INDENT>surname, name = person.split(\"<STR_LIT:U+002C>\", <NUM_LIT:1>)<EOL><DEDENT>elif \"<STR_LIT:U+0020>\" in person:<EOL><INDENT>surname, name = person.split(\"<STR_LIT:U+0020>\", <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>surname = person<EOL><DEDENT>if \"<STR_LIT:c>\" in other_subfields:<EOL><INDENT>title = \"<STR_LIT:U+002C>\".join(other_subfields[\"<STR_LIT:c>\"])<EOL><DEDENT><DEDENT>elif ind1 == \"<STR_LIT:0>\" and ind2 == \"<STR_LIT:U+0020>\":<EOL><INDENT>name = person.strip()<EOL>if \"<STR_LIT:b>\" in other_subfields:<EOL><INDENT>second_name = \"<STR_LIT:U+002C>\".join(other_subfields[\"<STR_LIT:b>\"])<EOL><DEDENT>if \"<STR_LIT:c>\" in other_subfields:<EOL><INDENT>surname = \"<STR_LIT:U+002C>\".join(other_subfields[\"<STR_LIT:c>\"])<EOL><DEDENT><DEDENT>elif ind1 == \"<STR_LIT:1>\" and ind2 == \"<STR_LIT:0>\" or ind1 == \"<STR_LIT:0>\" and ind2 == \"<STR_LIT:0>\":<EOL><INDENT>name = person.strip()<EOL>if \"<STR_LIT:c>\" in other_subfields:<EOL><INDENT>title = \"<STR_LIT:U+002C>\".join(other_subfields[\"<STR_LIT:c>\"])<EOL><DEDENT><DEDENT>parsed_persons.append(<EOL>Person(<EOL>name.strip(),<EOL>second_name.strip(),<EOL>surname.strip(),<EOL>title.strip()<EOL>)<EOL>)<EOL><DEDENT>return parsed_persons<EOL>", "docstring": "Parse persons from given datafield.\n\nArgs:\n    datafield (str): code of datafield (\"010\", \"730\", etc..)\n    subfield (char):  code of subfield (\"a\", \"z\", \"4\", etc..)\n    role (list of str): set to [\"any\"] for any role, [\"aut\"] for\n         authors, etc.. For details see\n         http://www.loc.gov/marc/relators/relaterm.html\n\nMain records for persons are: \"100\", \"600\" and \"700\", subrecords \"c\".\n\nReturns:\n    list: Person objects.", "id": "f1163:c0:m2"}
{"signature": "@remove_hairs_decorator<EOL><INDENT>def get_publisher(self, undefined=\"<STR_LIT>\"):<DEDENT>", "body": "publishers = set([<EOL>remove_hairs_fn(publisher)<EOL>for publisher in self[\"<STR_LIT>\"] + self[\"<STR_LIT>\"]<EOL>])<EOL>return _undefined_pattern(<EOL>\"<STR_LIT:U+002CU+0020>\".join(publishers),<EOL>lambda x: x.strip() == \"<STR_LIT>\",<EOL>undefined<EOL>)<EOL>", "docstring": "Args:\n    undefined (optional): Argument, which will be returned if the\n              `publisher` record is not found.\n\nReturns:\n    str: Name of the publisher (\"``Grada``\" for example) or \\\n         `undefined` if `publisher` is not found.", "id": "f1163:c0:m8"}
{"signature": "def _parse_corporations(self, datafield, subfield, roles=[\"<STR_LIT>\"]):", "body": "if len(datafield) != <NUM_LIT:3>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if len(subfield) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>parsed_corporations = []<EOL>for corporation in self.get_subfields(datafield, subfield):<EOL><INDENT>other_subfields = corporation.other_subfields<EOL>if \"<STR_LIT:4>\" in other_subfields and roles != [\"<STR_LIT>\"]:<EOL><INDENT>corp_roles = other_subfields[\"<STR_LIT:4>\"]  <EOL>relevant = any(map(lambda role: role in roles, corp_roles))<EOL>if not relevant:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>name = \"<STR_LIT>\"<EOL>place = \"<STR_LIT>\"<EOL>date = \"<STR_LIT>\"<EOL>name = corporation<EOL>if \"<STR_LIT:c>\" in other_subfields:<EOL><INDENT>place = \"<STR_LIT:U+002C>\".join(other_subfields[\"<STR_LIT:c>\"])<EOL><DEDENT>if \"<STR_LIT:d>\" in other_subfields:<EOL><INDENT>date = \"<STR_LIT:U+002C>\".join(other_subfields[\"<STR_LIT:d>\"])<EOL><DEDENT>parsed_corporations.append(Corporation(name, place, date))<EOL><DEDENT>return parsed_corporations<EOL>", "docstring": "Parse informations about corporations from given field identified\nby `datafield` parameter.\n\nArgs:\n    datafield (str): MARC field ID (\"``110``\", \"``610``\", etc..)\n    subfield (str):  MARC subfield ID with name, which is typically\n                     stored in \"``a``\" subfield.\n    roles (str): specify which roles you need. Set to ``[\"any\"]`` for\n                 any role, ``[\"dst\"]`` for distributors, etc.. For\n                 details, see\n                 http://www.loc.gov/marc/relators/relaterm.html\n\nReturns:\n    list: :class:`Corporation` objects.", "id": "f1163:c0:m1"}
{"signature": "def is_single_unit(self):", "body": "return self.get_pub_type() == PublicationType.single_unit<EOL>", "docstring": "Returns:\n    bool: True if the record is single unit.", "id": "f1163:c0:m31"}
{"signature": "def is_multi_mono(self):", "body": "return self.get_pub_type() == PublicationType.multipart_monograph<EOL>", "docstring": "Returns:\n    bool: True if the record is multi_mono.", "id": "f1163:c0:m29"}
{"signature": "@remove_hairs_decorator<EOL><INDENT>def get_part_name(self, undefined=\"<STR_LIT>\"):<DEDENT>", "body": "return _undefined_pattern(<EOL>\"<STR_LIT>\".join(self.get_subfields(\"<STR_LIT>\", \"<STR_LIT:n>\")),<EOL>lambda x: x.strip() == \"<STR_LIT>\",<EOL>undefined<EOL>)<EOL>", "docstring": "Args:\n    undefined (optional): Argument, which will be returned if the\n              `part_name` record is not found.\n\nReturns:\n    str: Name of the part of the series. or `undefined` if `part_name`\\\n         is not found.", "id": "f1163:c0:m7"}
{"signature": "def get_invalid_ISSNs(self):", "body": "return [<EOL>self._clean_isbn(issn)<EOL>for issn in self[\"<STR_LIT>\"] + self[\"<STR_LIT>\"]<EOL>]<EOL>", "docstring": "Get list of invalid ISSNs (``022z`` + ``022y``).\n\nReturns:\n    list: List with INVALID ISSN strings.", "id": "f1163:c0:m19"}
{"signature": "@property<EOL><INDENT>def potential(self):<DEDENT>", "body": "if self._potential is not None and self._potential:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Getter for 'potential' property\n\nReturns:\n        bool: potential is required?", "id": "f1181:c0:m7"}
{"signature": "@details.setter<EOL><INDENT>def details(self, value):<DEDENT>", "body": "self._details = value<EOL>", "docstring": "Setter for 'details' property\n\nArgs:\n        value (str): Issue's details", "id": "f1181:c0:m10"}
{"signature": "@checker.setter<EOL><INDENT>def checker(self, value):<DEDENT>", "body": "self._checker_name = value<EOL>", "docstring": "Setter for 'checker' property\n\nArgs:\n        value (str): Issue's checker", "id": "f1181:c0:m12"}
{"signature": "def __todict__(self):", "body": "return {<EOL>\"<STR_LIT:name>\": self.name,<EOL>\"<STR_LIT:file>\": self.file,<EOL>\"<STR_LIT>\": self.details,<EOL>\"<STR_LIT>\": self.severity,<EOL>\"<STR_LIT>\": self.potential,<EOL>\"<STR_LIT>\": self.checker<EOL>}<EOL>", "docstring": "Returns a dictionary with the class representation\n\nReturns:\n        dict: class representarion", "id": "f1181:c0:m13"}
{"signature": "@property<EOL><INDENT>def command(self):<DEDENT>", "body": "return self._command<EOL>", "docstring": "Getter for 'command' property\n\nReturns:\n        str: Command to execute", "id": "f1184:c0:m1"}
{"signature": "def execute(self, shell = True):", "body": "process = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=shell)<EOL>self.output, self.errors = process.communicate()<EOL>", "docstring": "Executes the command setted into class\n\nArgs:\n        shell (boolean): Set True if command is a shell command. Default: True", "id": "f1184:c0:m9"}
{"signature": "@property<EOL><INDENT>def errors(self):<DEDENT>", "body": "return self._errors<EOL>", "docstring": "Getter for 'errors' property\n\nReturns:\n        str: Stderr content", "id": "f1184:c0:m5"}
{"signature": "def __init__(self, command = None):", "body": "self._output = None<EOL>self._errors = None<EOL>self._command = None<EOL>self.command = command<EOL>", "docstring": "Class constructor. \n\nArgs:\n        command (str): Command to execute", "id": "f1184:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def parseConfig(cls, value):<DEDENT>", "body": "if '<STR_LIT>' in value:<EOL><INDENT>value['<STR_LIT>'] = bool(value['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in value:<EOL><INDENT>value['<STR_LIT>'] = [n.strip() for n in ast.literal_eval(value['<STR_LIT>'])]<EOL><DEDENT>return value<EOL>", "docstring": "Parse the config values\n\nArgs:\n        value (dict): Dictionary which contains the checker config\n\nReturns:\n        dict: The checker config with parsed values", "id": "f1187:c0:m14"}
{"signature": "def run(self):", "body": "pass<EOL>", "docstring": "Abstract method. This method will be executed for subclass which not implemented his own method", "id": "f1187:c0:m12"}
{"signature": "@property<EOL><INDENT>def dao(self):<DEDENT>", "body": "return self._dao<EOL>", "docstring": "Getter for 'dao' property\n\nReturns:\n        atomshield.helpers.DAO: Instance of DAO class", "id": "f1187:c0:m1"}
{"signature": "@property<EOL><INDENT>def issues(self):<DEDENT>", "body": "return self._issues<EOL>", "docstring": "Getter for 'issues' property\n\nReturns:\n        list<atomshields.helpers.Issue>: List of instances of Issue class", "id": "f1187:c0:m7"}
{"signature": "@property<EOL><INDENT>def path(self):<DEDENT>", "body": "return self._path<EOL>", "docstring": "Getter for 'path' property\n\nReturns:\n        str: Absolute path to scan", "id": "f1187:c0:m3"}
{"signature": "@project.setter<EOL><INDENT>def project(self, value):<DEDENT>", "body": "self._project = value<EOL>", "docstring": "Setter for 'project' property\n\nArgs:\n        value (str): Project's name", "id": "f1187:c0:m6"}
{"signature": "@staticmethod<EOL><INDENT>def isInstalled(value):<DEDENT>", "body": "function = \"\"\"<STR_LIT>\"\"\"bash -c '<STR_LIT>'", "docstring": "Check if a software is installed into machine.\n\nArgs:\n        value (str): Software's name\n\nReturns:\n        bool: True if the software is installed. False else", "id": "f1187:c0:m15"}
{"signature": "def report(func):", "body": "def execute(self, *args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>print(\"<STR_LIT>\".format(n=self.__class__.NAME))<EOL>if hasattr(self, '<STR_LIT:test>'):<EOL><INDENT>if self.test():<EOL><INDENT>return func(self, *args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>print(colored(\"<STR_LIT>\".format(c=self.__class__.__name__), \"<STR_LIT>\"))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return func(self, *args, **kwargs)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>print(colored(\"<STR_LIT>\".format(n=self.__class__.NAME, e = e), \"<STR_LIT>\"))<EOL><DEDENT><DEDENT>return execute<EOL>", "docstring": "Decorator for method run. This method will be execute before the execution\nfrom the method with this decorator.", "id": "f1190:m0"}
{"signature": "@property<EOL><INDENT>def issues(self):<DEDENT>", "body": "if self._issues is None:<EOL><INDENT>return []<EOL><DEDENT>return self._issues<EOL>", "docstring": "Getter for 'issues' property\n\nReturns:\n        list: List of `Issue` instances", "id": "f1190:c0:m1"}
{"signature": "@issues.setter<EOL><INDENT>def issues(self, value):<DEDENT>", "body": "self._issues = value<EOL>", "docstring": "Setter for 'issues' property\n\nArgs:\n        value (list): List of `Issue` instances", "id": "f1190:c0:m2"}
{"signature": "@property<EOL><INDENT>def config(self):<DEDENT>", "body": "return self._config<EOL>", "docstring": "Getter for 'config' property\n\nReturns:\n        dict: Dictionary which contains the current values for this report config", "id": "f1190:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def setup():<DEDENT>", "body": "<EOL>if not os.path.isdir(AtomShieldsScanner.CHECKERS_DIR):<EOL><INDENT>os.makedirs(AtomShieldsScanner.CHECKERS_DIR)<EOL><DEDENT>if not os.path.isdir(AtomShieldsScanner.REPORTS_DIR):<EOL><INDENT>os.makedirs(AtomShieldsScanner.REPORTS_DIR)<EOL><DEDENT>for f in AtomShieldsScanner._getFiles(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"<STR_LIT>\"), \"<STR_LIT>\"):<EOL><INDENT>AtomShieldsScanner.installChecker(f)<EOL><DEDENT>for f in AtomShieldsScanner._getFiles(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"<STR_LIT>\"), \"<STR_LIT>\"):<EOL><INDENT>AtomShieldsScanner.installReport(f)<EOL><DEDENT>AtomShieldsScanner._executeMassiveMethod(path=AtomShieldsScanner.CHECKERS_DIR, method=\"<STR_LIT>\", args={})<EOL>config_dir = os.path.dirname(AtomShieldsScanner.CONFIG_PATH)<EOL>if not os.path.isdir(config_dir):<EOL><INDENT>os.makedirs(config_dir)<EOL><DEDENT>", "docstring": "Creates required directories and copy checkers and reports.", "id": "f1194:c0:m13"}
{"signature": "@configFile.setter<EOL><INDENT>def configFile(self, value):<DEDENT>", "body": "self._config_file = os.path.abspath(value)<EOL>", "docstring": "Setter for 'configFile' property\n\nArgs:\n        value (str): Path to config file", "id": "f1194:c0:m8"}
{"signature": "@property<EOL><INDENT>def issues(self):<DEDENT>", "body": "return self._issues<EOL>", "docstring": "Getter for 'issues' property\n\nReturns:\n        list: List of Issue instances", "id": "f1194:c0:m11"}
{"signature": "@config.setter<EOL><INDENT>def config(self, value):<DEDENT>", "body": "self._config = value<EOL>", "docstring": "Setter for 'config' property\n\nArgs:\n        value (dict): Dictionary which contains the config", "id": "f1194:c0:m10"}
{"signature": "@staticmethod<EOL><INDENT>def _addConfig(instance, config, parent_section):<DEDENT>", "body": "try:<EOL><INDENT>section_name = \"<STR_LIT>\".format(p = parent_section, n=instance.NAME.lower())<EOL>config.add_section(section_name)<EOL>for k in list(instance.CONFIG.keys()):<EOL><INDENT>config.set(section_name, k, instance.CONFIG[k])<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>print(\"<STR_LIT>\" % e)<EOL><DEDENT>", "docstring": "Writes a section for a plugin.\n\nArgs:\n        instance (object): Class instance for plugin\n        config (object): Object (ConfigParser) which the current config\n        parent_section (str): Parent section for plugin. Usually 'checkers' or 'reports'", "id": "f1194:c0:m21"}
{"signature": "@issues.setter<EOL><INDENT>def issues(self, value):<DEDENT>", "body": "self._issues = value<EOL>", "docstring": "Setter for 'issues' property\n\nArgs:\n        value (list): List of Issue instances", "id": "f1194:c0:m12"}
{"signature": "@path.setter<EOL><INDENT>def path(self, value):<DEDENT>", "body": "self._path = os.path.abspath(value)<EOL>", "docstring": "Setter for 'path' property\n\nArgs:\n        value (str): Path to target directory", "id": "f1194:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def _debug(message, color=None, attrs=None):<DEDENT>", "body": "if attrs is None:<EOL><INDENT>attrs = []<EOL><DEDENT>if color is not None:<EOL><INDENT>print(colored(message, color, attrs=attrs))<EOL><DEDENT>else:<EOL><INDENT>if len(attrs) > <NUM_LIT:0>:<EOL><INDENT>print(colored(message, \"<STR_LIT>\", attrs=attrs))<EOL><DEDENT>else:<EOL><INDENT>print(message)<EOL><DEDENT><DEDENT>", "docstring": "Print a message if the class attribute 'verbose' is enabled\n\nArgs:\n        message (str): Message to print", "id": "f1194:c0:m1"}
{"signature": "@property<EOL><INDENT>def configFile(self):<DEDENT>", "body": "return self._config_file<EOL>", "docstring": "Getter for 'configFile' property\n\nReturns:\n        str: Path to config file", "id": "f1194:c0:m7"}
{"signature": "def getConfig(self, section = None):", "body": "data = {}<EOL>if section is None:<EOL><INDENT>for s in self.config.sections():<EOL><INDENT>if '<STR_LIT:/>' in s:<EOL><INDENT>parent, _s = s.split('<STR_LIT:/>')<EOL>data[parent][_s] = dict(self.config.items(s))<EOL><DEDENT>else:<EOL><INDENT>data[s] = dict(self.config.items(s))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>data = dict(self.config.items(section))<EOL><DEDENT>return data<EOL>", "docstring": "Returns a dictionary which contains the current config. If a section is setted,\nonly will returns the section config\n\nArgs:\n        section (str): (Optional) Section name.\n\nReturns:\n        dict: Representation of current config", "id": "f1194:c0:m25"}
{"signature": "def initialize(self, templates_path, global_data):", "body": "self.env = Environment(loader=FileSystemLoader(templates_path))<EOL>self.env.trim_blocks = True<EOL>self.global_data = global_data<EOL>", "docstring": "initialize with templates' path\n        parameters\n          templates_path    str    the position of templates directory\n          global_data       dict   globa data can be got in any templates", "id": "f1200:c0:m0"}
{"signature": "def render_to(self, path, template, **data):", "body": "html = self.render(template, **data)<EOL>with open(path, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(html.encode(charset))<EOL><DEDENT>", "docstring": "Render data with template and then write to path", "id": "f1200:c0:m2"}
{"signature": "def block_code(self, text, lang):", "body": "if not lang:<EOL><INDENT>return self._code_no_lexer(text)<EOL><DEDENT>try:<EOL><INDENT>lexer = get_lexer_by_name(lang, stripall=True)<EOL><DEDENT>except ClassNotFound:  <EOL><INDENT>return self._code_no_lexer(text)<EOL><DEDENT>formatter = HtmlFormatter()<EOL>return highlight(text, lexer, formatter)<EOL>", "docstring": "text: unicode text to render", "id": "f1201:c0:m1"}
{"signature": "def parse(self, source):", "body": "rt, title, title_pic, markdown = libparser.parse(source)<EOL>if rt == -<NUM_LIT:1>:<EOL><INDENT>raise SeparatorNotFound<EOL><DEDENT>elif rt == -<NUM_LIT:2>:<EOL><INDENT>raise PostTitleNotFound<EOL><DEDENT>title, title_pic, markdown = map(to_unicode, (title, title_pic,<EOL>markdown))<EOL>html = self.markdown.render(markdown)<EOL>summary = self.markdown.render(markdown[:<NUM_LIT:200>])<EOL>return {<EOL>'<STR_LIT:title>': title,<EOL>'<STR_LIT>': markdown,<EOL>'<STR_LIT:html>': html,<EOL>'<STR_LIT>': summary,<EOL>'<STR_LIT>': title_pic<EOL>}<EOL>", "docstring": "Parse ascii post source, return dict", "id": "f1201:c1:m2"}
{"signature": "def run(self, port):", "body": "self.watcher.start()<EOL>self.run_server(port)<EOL>", "docstring": "start web server and watcher", "id": "f1203:c2:m4"}
{"signature": "def get_files_stat(self):", "body": "if not exists(Post.src_dir):<EOL><INDENT>logger.error(SourceDirectoryNotFound.__doc__)<EOL>sys.exit(SourceDirectoryNotFound.exit_code)<EOL><DEDENT>paths = []<EOL>for fn in ls(Post.src_dir):<EOL><INDENT>if fn.endswith(src_ext):<EOL><INDENT>paths.append(join(Post.src_dir, fn))<EOL><DEDENT><DEDENT>if exists(config.filepath):<EOL><INDENT>paths.append(config.filepath)<EOL><DEDENT>files = dict((p, stat(p).st_mtime) for p in paths)<EOL>return files<EOL>", "docstring": "get source files' update time", "id": "f1203:c2:m2"}
{"signature": "def watch_files(self):", "body": "try:<EOL><INDENT>while <NUM_LIT:1>:<EOL><INDENT>sleep(<NUM_LIT:1>)  <EOL>try:<EOL><INDENT>files_stat = self.get_files_stat()<EOL><DEDENT>except SystemExit:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>self.shutdown_server()<EOL><DEDENT>if self.files_stat != files_stat:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>generator.re_generate()<EOL>global _root<EOL>_root = generator.root<EOL><DEDENT>except SystemExit:  <EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>self.shutdown_server()<EOL><DEDENT>self.files_stat = files_stat  <EOL><DEDENT><DEDENT><DEDENT>except KeyboardInterrupt:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>self.shutdown_watcher()<EOL><DEDENT>", "docstring": "watch files for changes, if changed, rebuild blog. this thread\n        will quit if the main process ends", "id": "f1203:c2:m3"}
{"signature": "def run_server(self, port):", "body": "try:<EOL><INDENT>self.server = MultiThreadedHTTPServer(('<STR_LIT>', port), Handler)<EOL><DEDENT>except socket.error as e:  <EOL><INDENT>logger.error(str(e))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>logger.info(\"<STR_LIT>\"<EOL>% port)<EOL>try:<EOL><INDENT>self.server.serve_forever()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>self.shutdown_server()<EOL><DEDENT>", "docstring": "run a server binding to port", "id": "f1203:c2:m1"}
{"signature": "def initialize(self):", "body": "<EOL>try:<EOL><INDENT>conf = config.parse()<EOL><DEDENT>except ConfigSyntaxError as e:<EOL><INDENT>logger.error(e.__doc__)<EOL>sys.exit(e.exit_code)<EOL><DEDENT>update_nested_dict(self.config, conf)<EOL>self.blog.__dict__.update(self.config['<STR_LIT>'])<EOL>self.author.__dict__.update(self.config['<STR_LIT>'])<EOL>self.root = self.config['<STR_LIT:root>']<EOL>templates = join(self.blog.theme, '<STR_LIT>')  <EOL>jinja2_global_data = {<EOL>'<STR_LIT>': self.blog,<EOL>'<STR_LIT>': self.author,<EOL>'<STR_LIT>': self.config,<EOL>'<STR_LIT:root>': self.root<EOL>}<EOL>renderer.initialize(templates, jinja2_global_data)<EOL>logger.success('<STR_LIT>')<EOL>", "docstring": "Initialize configuration and renderer environment", "id": "f1206:c0:m2"}
{"signature": "def join(*p):", "body": "return os.path.normpath(os.path.join(*p))<EOL>", "docstring": "return normpath version of path.join", "id": "f1207:m0"}
{"signature": "def parse(src):", "body": "rt = libparser.parse(byref(post), src)<EOL>return (<EOL>rt,<EOL>string_at(post.title, post.tsz),<EOL>string_at(post.tpic, post.tpsz),<EOL>post.body<EOL>)<EOL>", "docstring": "Note: src should be ascii string", "id": "f1208:m0"}
{"signature": "def replace_relative_url_to_absolute(self, content):", "body": "p =  os.path.join(os.getcwd(), '<STR_LIT>', '<STR_LIT>')<EOL>return content.replace('<STR_LIT>', p)<EOL>", "docstring": "Replace '../' leaded url with absolute uri.", "id": "f1210:c0:m3"}
{"signature": "def render(template, **data):", "body": "try:<EOL><INDENT>return renderer.render(template, **data)<EOL><DEDENT>except JinjaTemplateNotFound as e:<EOL><INDENT>logger.error(e.__doc__ + '<STR_LIT>' % template)<EOL>sys.exit(e.exit_code)<EOL><DEDENT>", "docstring": "shortcut to render data with `template`. Just add exception\n    catch to `renderer.render`", "id": "f1210:m0"}
{"signature": "def load_fixture(filename):", "body": "path = os.path.join(os.path.dirname(__file__), \"<STR_LIT>\", filename)<EOL>with open(path) as json_data:<EOL><INDENT>return json.load(json_data)<EOL><DEDENT>", "docstring": "Load some fixture JSON", "id": "f1216:m0"}
{"signature": "def fahrenheit_to_nuheat(fahrenheit):", "body": "return int(round_half(((fahrenheit - <NUM_LIT>) * <NUM_LIT>) + <NUM_LIT>))<EOL>", "docstring": "Convert Fahrenheit to a temperature value that NuHeat understands\nFormula f(x) = ((x - 33) * 56) + 33\n\n:param fahrenheit: The temperature to convert to NuHeat", "id": "f1219:m2"}
{"signature": "def nuheat_to_celsius(nuheat_temperature):", "body": "fahrenheit = nuheat_to_fahrenheit(nuheat_temperature)<EOL>return fahrenheit_to_celsius(fahrenheit)<EOL>", "docstring": "Convert the NuHeat temp value to Celsius\n\n:param nuheat_temperature: The temperature to convert to Celsius", "id": "f1219:m6"}
{"signature": "def celsius_to_nuheat(celsius):", "body": "fahrenheit = celsius_to_fahrenheit(celsius)<EOL>return int(round_half(((fahrenheit - <NUM_LIT>) * <NUM_LIT>) + <NUM_LIT>))<EOL>", "docstring": "Convert Celsius to a temperature value that NuHeat understands\nFormula f(x) = ((x - 33) * 56) + 33\n\n:param celsius: The temperature to convert to NuHeat", "id": "f1219:m4"}
{"signature": "def celsius_to_fahrenheit(celsius):", "body": "return int(round_half(celsius * <NUM_LIT> + <NUM_LIT:32>))<EOL>", "docstring": "Convert Celsius to Fahrenheit\n\n:param celsius: The temperature to convert to Fahrenheit", "id": "f1219:m3"}
{"signature": "@property<EOL><INDENT>def schedule_mode(self):<DEDENT>", "body": "return self._schedule_mode<EOL>", "docstring": "Return the mode that the thermostat is currently using", "id": "f1220:c0:m13"}
{"signature": "@property<EOL><INDENT>def max_fahrenheit(self):<DEDENT>", "body": "if not self.max_temperature:<EOL><INDENT>return None<EOL><DEDENT>return nuheat_to_fahrenheit(self.max_temperature)<EOL>", "docstring": "Return the thermostat's maximum temperature in Fahrenheit", "id": "f1220:c0:m6"}
{"signature": "@property<EOL><INDENT>def fahrenheit(self):<DEDENT>", "body": "if not self.temperature:<EOL><INDENT>return None<EOL><DEDENT>return nuheat_to_fahrenheit(self.temperature)<EOL>", "docstring": "Return the current temperature in Fahrenheit", "id": "f1220:c0:m2"}
{"signature": "def set_target_celsius(self, celsius, mode=config.SCHEDULE_HOLD):", "body": "temperature = celsius_to_nuheat(celsius)<EOL>self.set_target_temperature(temperature, mode)<EOL>", "docstring": "Set the target temperature to the desired celsius, with more granular control of the hold\nmode\n\n:param celsius: The desired temperature in C\n:param mode: The desired mode to operate in", "id": "f1220:c0:m17"}
{"signature": "def set_data(self, post_data):", "body": "params = {<EOL>\"<STR_LIT>\": self.serial_number<EOL>}<EOL>self._session.request(config.THERMOSTAT_URL, method=\"<STR_LIT:POST>\", data=post_data, params=params)<EOL>", "docstring": "Update (patch) the current instance's data on the NuHeat API", "id": "f1220:c0:m19"}
{"signature": "@schedule_mode.setter<EOL><INDENT>def schedule_mode(self, mode):<DEDENT>", "body": "modes = [config.SCHEDULE_RUN, config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD]<EOL>if mode not in modes:<EOL><INDENT>raise Exception(\"<STR_LIT>\".format(modes))<EOL><DEDENT>self.set_data({\"<STR_LIT>\": mode})<EOL>", "docstring": "Set the thermostat mode\n\n:param mode: The desired mode integer value.\n             Auto = 1\n             Temporary hold = 2\n             Permanent hold = 3", "id": "f1220:c0:m14"}
{"signature": "@property<EOL><INDENT>def celsius(self):<DEDENT>", "body": "if not self.temperature:<EOL><INDENT>return None<EOL><DEDENT>return nuheat_to_celsius(self.temperature)<EOL>", "docstring": "Return the current temperature in Celsius", "id": "f1220:c0:m3"}
{"signature": "@property<EOL><INDENT>def min_celsius(self):<DEDENT>", "body": "if not self.min_temperature:<EOL><INDENT>return None<EOL><DEDENT>return nuheat_to_celsius(self.min_temperature)<EOL>", "docstring": "Return the thermostat's minimum temperature in Celsius", "id": "f1220:c0:m5"}
{"signature": "@target_fahrenheit.setter<EOL><INDENT>def target_fahrenheit(self, fahrenheit):<DEDENT>", "body": "self.set_target_fahrenheit(fahrenheit)<EOL>", "docstring": "Helper to set and HOLD the target temperature to the desired fahrenheit\n\n:param fahrenheit: The desired temperature in F", "id": "f1220:c0:m10"}
{"signature": "def set_target_fahrenheit(self, fahrenheit, mode=config.SCHEDULE_HOLD):", "body": "temperature = fahrenheit_to_nuheat(fahrenheit)<EOL>self.set_target_temperature(temperature, mode)<EOL>", "docstring": "Set the target temperature to the desired fahrenheit, with more granular control of the\nhold mode\n\n:param fahrenheit: The desired temperature in F\n:param mode: The desired mode to operate in", "id": "f1220:c0:m16"}
{"signature": "def get_thermostat(self, serial_number):", "body": "return NuHeatThermostat(self, serial_number)<EOL>", "docstring": "Get a thermostat object by serial number\n\n:param serial_number: The serial number / ID of the desired thermostat", "id": "f1222:c0:m3"}
{"signature": "def request(self, url, method=\"<STR_LIT:GET>\", data=None, params=None, retry=True):", "body": "headers = config.REQUEST_HEADERS<EOL>if params and self._session_id:<EOL><INDENT>params['<STR_LIT>'] = self._session_id<EOL><DEDENT>if method == \"<STR_LIT:GET>\":<EOL><INDENT>response = requests.get(url, headers=headers, params=params)<EOL><DEDENT>elif method == \"<STR_LIT:POST>\":<EOL><INDENT>response = requests.post(url, headers=headers, params=params, data=data)<EOL><DEDENT>if response.status_code == <NUM_LIT> and retry:<EOL><INDENT>_LOGGER.warn(\"<STR_LIT>\")<EOL>self._session_id = None<EOL>self.authenticate()<EOL>return self.request(url, method=method, data=data, params=params, retry=False)<EOL><DEDENT>response.raise_for_status()<EOL>try:<EOL><INDENT>return response.json()<EOL><DEDENT>except ValueError:<EOL><INDENT>return response<EOL><DEDENT>", "docstring": "Make a request to the NuHeat API\n\n:param url: The URL to request\n:param method: The type of request to make (GET, POST)\n:param data: Data to be sent along with POST requests\n:param params: Querystring parameters\n:param retry: Attempt to re-authenticate and retry request if necessary", "id": "f1222:c0:m4"}
{"signature": "@lockdown(until_date=YESTERDAY, after_date=TOMORROW)<EOL>def locked_view_until_and_after(request):", "body": "return HttpResponse('<STR_LIT>')<EOL>", "docstring": "View, only not looked between yesterday and tomorrow.", "id": "f1225:m10"}
{"signature": "@lockdown(extra_context={'<STR_LIT:foo>': '<STR_LIT:bar>'})<EOL>def locked_view_with_extra_context(request):", "body": "return HttpResponse('<STR_LIT>')<EOL>", "docstring": "View, locked by the decorator with extra context.", "id": "f1225:m5"}
{"signature": "@lockdown(form=AuthForm, staff_only=False)<EOL>def user_locked_view(request):", "body": "return HttpResponse('<STR_LIT>')<EOL>", "docstring": "View, locked by the decorator with access for known users only.", "id": "f1225:m11"}
{"signature": "@lockdown(after_date=YESTERDAY)<EOL>def locked_view_after_yesterday(request):", "body": "return HttpResponse('<STR_LIT>')<EOL>", "docstring": "View, locked since yesterday.", "id": "f1225:m8"}
{"signature": "@lockdown(remote_addr_exceptions=['<STR_LIT>'])<EOL>def locked_view_with_ip_exception(request):", "body": "return HttpResponse('<STR_LIT>')<EOL>", "docstring": "View, locked except for the configured IP-address.", "id": "f1225:m4"}
{"signature": "def clean_answer(self):", "body": "if self.cleaned_data['<STR_LIT>'] == <NUM_LIT>:<EOL><INDENT>return <NUM_LIT><EOL><DEDENT>raise forms.ValidationError('<STR_LIT>')<EOL>", "docstring": "Clean the answer field, by checking its value.", "id": "f1226:c0:m0"}
{"signature": "def setUp(self):", "body": "super(MiddlewareTests, self).setUp()<EOL>self._old_middleware_classes = django_settings.MIDDLEWARE<EOL>django_settings.MIDDLEWARE.append(<EOL>'<STR_LIT>',<EOL>)<EOL>", "docstring": "Additional setup for middleware tests.", "id": "f1229:c2:m0"}
{"signature": "def clean(self):", "body": "cleaned_data = super(AuthForm, self).clean()<EOL>user = self.get_user()<EOL>if self.staff_only and (not user or not user.is_staff):<EOL><INDENT>raise forms.ValidationError('<STR_LIT>')<EOL><DEDENT>if self.superusers_only and (not user or not user.is_superuser):<EOL><INDENT>raise forms.ValidationError('<STR_LIT>')<EOL><DEDENT>return cleaned_data<EOL>", "docstring": "When receiving the filled out form, check for valid access.", "id": "f1230:c1:m1"}
{"signature": "def __init__(self, passwords=None, *args, **kwargs):", "body": "super(LockdownForm, self).__init__(*args, **kwargs)<EOL>if passwords is None:<EOL><INDENT>passwords = settings.PASSWORDS<EOL><DEDENT>self.valid_passwords = passwords<EOL>", "docstring": "Initialize the form by setting the valid passwords.", "id": "f1230:c0:m0"}
{"signature": "def clean_password(self):", "body": "value = self.cleaned_data.get('<STR_LIT:password>')<EOL>if value not in self.valid_passwords:<EOL><INDENT>raise forms.ValidationError('<STR_LIT>')<EOL><DEDENT>return value<EOL>", "docstring": "Check that the password is valid.", "id": "f1230:c0:m1"}
{"signature": "def show_form(self):", "body": "return bool(self.valid_passwords)<EOL>", "docstring": "Show the form if there are any valid passwords.", "id": "f1230:c0:m4"}
{"signature": "def generate_token(self):", "body": "return self.cleaned_data['<STR_LIT:password>']<EOL>", "docstring": "Save the password as the authentication token.\n\n        It's acceptable to store the password raw, as it is stored server-side\n        in the user's session.", "id": "f1230:c0:m2"}
{"signature": "def redirect(self, request):", "body": "url = request.path<EOL>querystring = request.GET.copy()<EOL>if self.logout_key and self.logout_key in request.GET:<EOL><INDENT>del querystring[self.logout_key]<EOL><DEDENT>if querystring:<EOL><INDENT>url = '<STR_LIT>' % (url, querystring.urlencode())<EOL><DEDENT>return HttpResponseRedirect(url)<EOL>", "docstring": "Handle redirects properly.", "id": "f1231:c0:m3"}
{"signature": "def __init__(self, get_response=None, form=None, until_date=None,<EOL>after_date=None, logout_key=None, session_key=None,<EOL>url_exceptions=None, view_exceptions=None,<EOL>remote_addr_exceptions=None, trusted_proxies=None,<EOL>extra_context=None, **form_kwargs):", "body": "if logout_key is None:<EOL><INDENT>logout_key = settings.LOGOUT_KEY<EOL><DEDENT>if session_key is None:<EOL><INDENT>session_key = settings.SESSION_KEY<EOL><DEDENT>self.get_response = get_response<EOL>self.form = form<EOL>self.form_kwargs = form_kwargs<EOL>self.until_date = until_date<EOL>self.after_date = after_date<EOL>self.logout_key = logout_key<EOL>self.session_key = session_key<EOL>self.url_exceptions = url_exceptions<EOL>self.remote_addr_exceptions = remote_addr_exceptions<EOL>self.trusted_proxies = trusted_proxies<EOL>self.extra_context = extra_context<EOL>", "docstring": "Initialize the middleware, by setting the configuration values.", "id": "f1231:c0:m0"}
{"signature": "def process_request(self, request):", "body": "try:<EOL><INDENT>session = request.session<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise ImproperlyConfigured('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if settings.ENABLED is False:<EOL><INDENT>return None<EOL><DEDENT>if self.remote_addr_exceptions:<EOL><INDENT>remote_addr_exceptions = self.remote_addr_exceptions<EOL><DEDENT>else:<EOL><INDENT>remote_addr_exceptions = settings.REMOTE_ADDR_EXCEPTIONS<EOL><DEDENT>if remote_addr_exceptions:<EOL><INDENT>trusted_proxies = self.trusted_proxies or settings.TRUSTED_PROXIES<EOL>remote_addr = request.META.get('<STR_LIT>')<EOL>if remote_addr in remote_addr_exceptions:<EOL><INDENT>return None<EOL><DEDENT>if remote_addr in trusted_proxies:<EOL><INDENT>x_forwarded_for = request.META.get('<STR_LIT>')<EOL>if x_forwarded_for:<EOL><INDENT>remote_addr = x_forwarded_for.split('<STR_LIT:U+002C>')[-<NUM_LIT:1>].strip()<EOL>if remote_addr in remote_addr_exceptions:<EOL><INDENT>return None<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if self.url_exceptions:<EOL><INDENT>url_exceptions = compile_url_exceptions(self.url_exceptions)<EOL><DEDENT>else:<EOL><INDENT>url_exceptions = compile_url_exceptions(settings.URL_EXCEPTIONS)<EOL><DEDENT>for pattern in url_exceptions:<EOL><INDENT>if pattern.search(request.path):<EOL><INDENT>return None<EOL><DEDENT><DEDENT>try:<EOL><INDENT>resolved_path = resolve(request.path)<EOL><DEDENT>except Resolver404:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if resolved_path.func in settings.VIEW_EXCEPTIONS:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>if self.until_date:<EOL><INDENT>until_date = self.until_date<EOL><DEDENT>else:<EOL><INDENT>until_date = settings.UNTIL_DATE<EOL><DEDENT>if self.after_date:<EOL><INDENT>after_date = self.after_date<EOL><DEDENT>else:<EOL><INDENT>after_date = settings.AFTER_DATE<EOL><DEDENT>if until_date or after_date:<EOL><INDENT>locked_date = False<EOL>if until_date and datetime.datetime.now() < until_date:<EOL><INDENT>locked_date = True<EOL><DEDENT>if after_date and datetime.datetime.now() > after_date:<EOL><INDENT>locked_date = True<EOL><DEDENT>if not locked_date:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>form_data = request.POST if request.method == '<STR_LIT:POST>' else None<EOL>if self.form:<EOL><INDENT>form_class = self.form<EOL><DEDENT>else:<EOL><INDENT>form_class = get_lockdown_form(settings.FORM)<EOL><DEDENT>form = form_class(data=form_data, **self.form_kwargs)<EOL>authorized = False<EOL>token = session.get(self.session_key)<EOL>if hasattr(form, '<STR_LIT>'):<EOL><INDENT>if form.authenticate(token):<EOL><INDENT>authorized = True<EOL><DEDENT><DEDENT>elif token is True:<EOL><INDENT>authorized = True<EOL><DEDENT>if authorized and self.logout_key and self.logout_key in request.GET:<EOL><INDENT>if self.session_key in session:<EOL><INDENT>del session[self.session_key]<EOL><DEDENT>querystring = request.GET.copy()<EOL>del querystring[self.logout_key]<EOL>return self.redirect(request)<EOL><DEDENT>if authorized:<EOL><INDENT>return None<EOL><DEDENT>if form.is_valid():<EOL><INDENT>if hasattr(form, '<STR_LIT>'):<EOL><INDENT>token = form.generate_token()<EOL><DEDENT>else:<EOL><INDENT>token = True<EOL><DEDENT>session[self.session_key] = token<EOL>return self.redirect(request)<EOL><DEDENT>page_data = {'<STR_LIT>': until_date, '<STR_LIT>': after_date}<EOL>if not hasattr(form, '<STR_LIT>') or form.show_form():<EOL><INDENT>page_data['<STR_LIT>'] = form<EOL><DEDENT>if self.extra_context:<EOL><INDENT>page_data.update(self.extra_context)<EOL><DEDENT>return render(request, '<STR_LIT>', page_data)<EOL>", "docstring": "Check if each request is allowed to access the current resource.", "id": "f1231:c0:m2"}
{"signature": "def lockdown(*args, **kwargs):", "body": "return decorator_from_middleware_with_args(LockdownMiddleware)(*args,<EOL>**kwargs)<EOL>", "docstring": "Define a decorator based on the LockdownMiddleware.\n\n    This decorator takes the same arguments as the middleware, but allows a\n    more granular locking than the middleware.", "id": "f1233:m0"}
{"signature": "@add.command()<EOL>@proto_dataset_uri_argument<EOL>@click.argument(\"<STR_LIT>\")<EOL>@click.argument(\"<STR_LIT:key>\")<EOL>@click.argument(\"<STR_LIT:value>\")<EOL>def metadata(proto_dataset_uri, relpath_in_dataset, key, value):", "body": "proto_dataset = dtoolcore.ProtoDataSet.from_uri(<EOL>uri=proto_dataset_uri,<EOL>config_path=CONFIG_PATH)<EOL>proto_dataset.add_item_metadata(<EOL>handle=relpath_in_dataset,<EOL>key=key,<EOL>value=value)<EOL>", "docstring": "Add metadata to a file in the proto dataset.", "id": "f1243:m13"}
{"signature": "@click.group()<EOL>def readme():", "body": "", "docstring": "Edit / show readme content.\n\n    The readme content is descriptive metadata describing the dataset.", "id": "f1243:m5"}
{"signature": "@click.group()<EOL>def add():", "body": "", "docstring": "Add items and item metadata to a proto dataset.", "id": "f1243:m11"}
{"signature": "@click.command()<EOL>@base_dataset_uri_argument<EOL>@click.argument(\"<STR_LIT>\", default=\"<STR_LIT>\")<EOL>def name(dataset_uri, new_name):", "body": "if new_name != \"<STR_LIT>\":<EOL><INDENT>_validate_name(new_name)<EOL>try:<EOL><INDENT>dataset = dtoolcore.ProtoDataSet.from_uri(<EOL>uri=dataset_uri,<EOL>config_path=CONFIG_PATH<EOL>)<EOL><DEDENT>except dtoolcore.DtoolCoreTypeError:<EOL><INDENT>dataset = dtoolcore.DataSet.from_uri(<EOL>uri=dataset_uri,<EOL>config_path=CONFIG_PATH<EOL>)<EOL><DEDENT>dataset.update_name(new_name)<EOL><DEDENT>admin_metadata = dtoolcore._admin_metadata_from_uri(<EOL>uri=dataset_uri,<EOL>config_path=CONFIG_PATH<EOL>)<EOL>click.secho(admin_metadata[\"<STR_LIT:name>\"])<EOL>", "docstring": "Report / update the name of the dataset.\n\nIt is only possible to update the name of a proto dataset,\ni.e. a dataset that has not yet been frozen.", "id": "f1243:m4"}
{"signature": "@click.command()<EOL>@click.option(\"<STR_LIT>\", is_flag=True, help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\", is_flag=True, help=\"<STR_LIT>\")<EOL>@dataset_uri_argument<EOL>@click.argument(\"<STR_LIT>\")<EOL>def copy(resume, quiet, dataset_uri, dest_base_uri):", "body": "click.secho(<EOL>\"<STR_LIT>\",<EOL>fg=\"<STR_LIT>\",<EOL>err=True<EOL>)<EOL>click.secho(<EOL>\"<STR_LIT>\",<EOL>fg=\"<STR_LIT>\",<EOL>err=True<EOL>)<EOL>_copy(resume, quiet, dataset_uri, dest_base_uri)<EOL>", "docstring": "DEPRECATED: Copy a dataset to a different location.", "id": "f1243:m16"}
{"signature": "@readme.command()<EOL>@proto_dataset_uri_argument<EOL>@click.argument('<STR_LIT:input>', type=click.File('<STR_LIT:r>'))<EOL>def write(proto_dataset_uri, input):", "body": "proto_dataset = dtoolcore.ProtoDataSet.from_uri(<EOL>uri=proto_dataset_uri<EOL>)<EOL>_validate_and_put_readme(proto_dataset, input.read())<EOL>", "docstring": "Use YAML from a file or stdin to populate the readme.\n\n    To stream content from stdin use \"-\", e.g.\n\n    echo \"desc: my data\" | dtool readme write <DS_URI> -", "id": "f1243:m10"}
{"signature": "@click.command()<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\", is_flag=True, help=\"<STR_LIT>\")<EOL>@click.argument(\"<STR_LIT:name>\")<EOL>@click.argument(\"<STR_LIT>\", default=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\", type=click.Path(exists=True))<EOL>def create(quiet, name, base_uri, symlink_path):", "body": "_validate_name(name)<EOL>admin_metadata = dtoolcore.generate_admin_metadata(name)<EOL>parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)<EOL>if parsed_base_uri.scheme == \"<STR_LIT>\":<EOL><INDENT>if symlink_path is None:<EOL><INDENT>raise click.UsageError(\"<STR_LIT>\")  <EOL><DEDENT><DEDENT>if symlink_path:<EOL><INDENT>base_uri = dtoolcore.utils.sanitise_uri(<EOL>\"<STR_LIT>\" + parsed_base_uri.path<EOL>)<EOL>parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)<EOL><DEDENT>proto_dataset = dtoolcore.generate_proto_dataset(<EOL>admin_metadata=admin_metadata,<EOL>base_uri=dtoolcore.utils.urlunparse(parsed_base_uri),<EOL>config_path=CONFIG_PATH)<EOL>if symlink_path:<EOL><INDENT>symlink_abspath = os.path.abspath(symlink_path)<EOL>proto_dataset._storage_broker.symlink_path = symlink_abspath<EOL><DEDENT>try:<EOL><INDENT>proto_dataset.create()<EOL><DEDENT>except dtoolcore.storagebroker.StorageBrokerOSError as err:<EOL><INDENT>raise click.UsageError(str(err))<EOL><DEDENT>proto_dataset.put_readme(\"<STR_LIT>\")<EOL>if quiet:<EOL><INDENT>click.secho(proto_dataset.uri)<EOL><DEDENT>else:<EOL><INDENT>click.secho(\"<STR_LIT>\", nl=False, fg=\"<STR_LIT>\")<EOL>click.secho(proto_dataset.uri)<EOL>click.secho(\"<STR_LIT>\")<EOL>step = <NUM_LIT:1><EOL>if parsed_base_uri.scheme != \"<STR_LIT>\":<EOL><INDENT>click.secho(\"<STR_LIT>\".format(step))<EOL>click.secho(<EOL>\"<STR_LIT>\".format(proto_dataset.uri),<EOL>fg=\"<STR_LIT>\")<EOL>if parsed_base_uri.scheme == \"<STR_LIT:file>\":<EOL><INDENT>data_path = proto_dataset._storage_broker._data_abspath<EOL>click.secho(\"<STR_LIT>\")<EOL>click.secho(<EOL>\"<STR_LIT>\".format(data_path),<EOL>fg=\"<STR_LIT>\"<EOL>)<EOL><DEDENT>step = step + <NUM_LIT:1><EOL><DEDENT>click.secho(\"<STR_LIT>\".format(step))<EOL>click.secho(<EOL>\"<STR_LIT>\".format(proto_dataset.uri),<EOL>fg=\"<STR_LIT>\")<EOL>step = step + <NUM_LIT:1><EOL>click.secho(<EOL>\"<STR_LIT>\".format(step)<EOL>)<EOL>click.secho(\"<STR_LIT>\".format(proto_dataset.uri), fg=\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Create a proto dataset.", "id": "f1243:m3"}
{"signature": "def valid_handle(handle):", "body": "if handle.find(\"<STR_LIT:\\n>\") != -<NUM_LIT:1>:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Return false if the handle is invalid.\n\n    For example if the handle contains a newline.", "id": "f1246:m0"}
{"signature": "def logsigmoid(a):", "body": "return -tf.nn.softplus(-a)<EOL>", "docstring": "Equivalent to tf.log(tf.sigmoid(a))", "id": "f1258:m0"}
{"signature": "def computeStatsEigen(self):", "body": "<EOL>with tf.device('<STR_LIT>'):<EOL><INDENT>def removeNone(tensor_list):<EOL><INDENT>local_list = []<EOL>for item in tensor_list:<EOL><INDENT>if item is not None:<EOL><INDENT>local_list.append(item)<EOL><DEDENT><DEDENT>return local_list<EOL><DEDENT>def copyStats(var_list):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>redundant_stats = {}<EOL>copied_list = []<EOL>for item in var_list:<EOL><INDENT>if item is not None:<EOL><INDENT>if item not in redundant_stats:<EOL><INDENT>if self._use_float64:<EOL><INDENT>redundant_stats[item] = tf.cast(<EOL>tf.identity(item), tf.float64)<EOL><DEDENT>else:<EOL><INDENT>redundant_stats[item] = tf.identity(item)<EOL><DEDENT><DEDENT>copied_list.append(redundant_stats[item])<EOL><DEDENT>else:<EOL><INDENT>copied_list.append(None)<EOL><DEDENT><DEDENT>return copied_list<EOL><DEDENT>stats_eigen = self.stats_eigen<EOL>computedEigen = {}<EOL>eigen_reverse_lookup = {}<EOL>updateOps = []<EOL>with tf.control_dependencies([]):<EOL><INDENT>for stats_var in stats_eigen:<EOL><INDENT>if stats_var not in computedEigen:<EOL><INDENT>eigens = tf.self_adjoint_eig(stats_var)<EOL>e = eigens[<NUM_LIT:0>]<EOL>Q = eigens[<NUM_LIT:1>]<EOL>if self._use_float64:<EOL><INDENT>e = tf.cast(e, tf.float32)<EOL>Q = tf.cast(Q, tf.float32)<EOL><DEDENT>updateOps.append(e)<EOL>updateOps.append(Q)<EOL>computedEigen[stats_var] = {'<STR_LIT:e>': e, '<STR_LIT>': Q}<EOL>eigen_reverse_lookup[e] = stats_eigen[stats_var]['<STR_LIT:e>']<EOL>eigen_reverse_lookup[Q] = stats_eigen[stats_var]['<STR_LIT>']<EOL><DEDENT><DEDENT><DEDENT>self.eigen_reverse_lookup = eigen_reverse_lookup<EOL>self.eigen_update_list = updateOps<EOL>if KFAC_DEBUG:<EOL><INDENT>self.eigen_update_list = [item for item in updateOps]<EOL>with tf.control_dependencies(updateOps):<EOL><INDENT>updateOps.append(tf.Print(tf.constant(<EOL><NUM_LIT:0.>), [tf.convert_to_tensor('<STR_LIT>')]))<EOL><DEDENT><DEDENT><DEDENT>return updateOps<EOL>", "docstring": "compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue", "id": "f1267:c0:m8"}
{"signature": "def convert_episode_to_batch_major(episode):", "body": "episode_batch = {}<EOL>for key in episode.keys():<EOL><INDENT>val = np.array(episode[key]).copy()<EOL>episode_batch[key] = val.swapaxes(<NUM_LIT:0>, <NUM_LIT:1>)<EOL><DEDENT>return episode_batch<EOL>", "docstring": "Converts an episode to have the batch dimension in the major (first)\n    dimension.", "id": "f1268:m6"}
{"signature": "def store_args(method):", "body": "argspec = inspect.getfullargspec(method)<EOL>defaults = {}<EOL>if argspec.defaults is not None:<EOL><INDENT>defaults = dict(<EOL>zip(argspec.args[-len(argspec.defaults):], argspec.defaults))<EOL><DEDENT>if argspec.kwonlydefaults is not None:<EOL><INDENT>defaults.update(argspec.kwonlydefaults)<EOL><DEDENT>arg_names = argspec.args[<NUM_LIT:1>:]<EOL>@functools.wraps(method)<EOL>def wrapper(*positional_args, **keyword_args):<EOL><INDENT>self = positional_args[<NUM_LIT:0>]<EOL>args = defaults.copy()<EOL>for name, value in zip(arg_names, positional_args[<NUM_LIT:1>:]):<EOL><INDENT>args[name] = value<EOL><DEDENT>args.update(keyword_args)<EOL>self.__dict__.update(args)<EOL>return method(*positional_args, **keyword_args)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Stores provided method args as instance attributes.", "id": "f1268:m0"}
{"signature": "def transitions_in_episode_batch(episode_batch):", "body": "shape = episode_batch['<STR_LIT:u>'].shape<EOL>return shape[<NUM_LIT:0>] * shape[<NUM_LIT:1>]<EOL>", "docstring": "Number of transitions in a given episode batch.", "id": "f1268:m7"}
{"signature": "def import_function(spec):", "body": "mod_name, fn_name = spec.split('<STR_LIT::>')<EOL>module = importlib.import_module(mod_name)<EOL>fn = getattr(module, fn_name)<EOL>return fn<EOL>", "docstring": "Import a function identified by a string like \"pkg.module:fn_name\".", "id": "f1268:m1"}
{"signature": "def __init__(self, buffer_shapes, size_in_transitions, T, sample_transitions):", "body": "self.buffer_shapes = buffer_shapes<EOL>self.size = size_in_transitions // T<EOL>self.T = T<EOL>self.sample_transitions = sample_transitions<EOL>self.buffers = {key: np.empty([self.size, *shape])<EOL>for key, shape in buffer_shapes.items()}<EOL>self.current_size = <NUM_LIT:0><EOL>self.n_transitions_stored = <NUM_LIT:0><EOL>self.lock = threading.Lock()<EOL>", "docstring": "Creates a replay buffer.\n\n        Args:\n            buffer_shapes (dict of ints): the shape for all buffers that are used in the replay\n                buffer\n            size_in_transitions (int): the size of the buffer, measured in transitions\n            T (int): the time horizon for episodes\n            sample_transitions (function): a function that samples from the replay buffer", "id": "f1273:c0:m0"}
{"signature": "def sample(self, batch_size):", "body": "buffers = {}<EOL>with self.lock:<EOL><INDENT>assert self.current_size > <NUM_LIT:0><EOL>for key in self.buffers.keys():<EOL><INDENT>buffers[key] = self.buffers[key][:self.current_size]<EOL><DEDENT><DEDENT>buffers['<STR_LIT>'] = buffers['<STR_LIT:o>'][:, <NUM_LIT:1>:, :]<EOL>buffers['<STR_LIT>'] = buffers['<STR_LIT>'][:, <NUM_LIT:1>:, :]<EOL>transitions = self.sample_transitions(buffers, batch_size)<EOL>for key in (['<STR_LIT:r>', '<STR_LIT>', '<STR_LIT>'] + list(self.buffers.keys())):<EOL><INDENT>assert key in transitions, \"<STR_LIT>\" % key<EOL><DEDENT>return transitions<EOL>", "docstring": "Returns a dict {key: array(batch_size x shapes[key])}", "id": "f1273:c0:m2"}
{"signature": "def __init__(self, size, eps=<NUM_LIT>, default_clip_range=np.inf, sess=None):", "body": "self.size = size<EOL>self.eps = eps<EOL>self.default_clip_range = default_clip_range<EOL>self.sess = sess if sess is not None else tf.get_default_session()<EOL>self.local_sum = np.zeros(self.size, np.float32)<EOL>self.local_sumsq = np.zeros(self.size, np.float32)<EOL>self.local_count = np.zeros(<NUM_LIT:1>, np.float32)<EOL>self.sum_tf = tf.get_variable(<EOL>initializer=tf.zeros_initializer(), shape=self.local_sum.shape, name='<STR_LIT>',<EOL>trainable=False, dtype=tf.float32)<EOL>self.sumsq_tf = tf.get_variable(<EOL>initializer=tf.zeros_initializer(), shape=self.local_sumsq.shape, name='<STR_LIT>',<EOL>trainable=False, dtype=tf.float32)<EOL>self.count_tf = tf.get_variable(<EOL>initializer=tf.ones_initializer(), shape=self.local_count.shape, name='<STR_LIT:count>',<EOL>trainable=False, dtype=tf.float32)<EOL>self.mean = tf.get_variable(<EOL>initializer=tf.zeros_initializer(), shape=(self.size,), name='<STR_LIT>',<EOL>trainable=False, dtype=tf.float32)<EOL>self.std = tf.get_variable(<EOL>initializer=tf.ones_initializer(), shape=(self.size,), name='<STR_LIT>',<EOL>trainable=False, dtype=tf.float32)<EOL>self.count_pl = tf.placeholder(name='<STR_LIT>', shape=(<NUM_LIT:1>,), dtype=tf.float32)<EOL>self.sum_pl = tf.placeholder(name='<STR_LIT>', shape=(self.size,), dtype=tf.float32)<EOL>self.sumsq_pl = tf.placeholder(name='<STR_LIT>', shape=(self.size,), dtype=tf.float32)<EOL>self.update_op = tf.group(<EOL>self.count_tf.assign_add(self.count_pl),<EOL>self.sum_tf.assign_add(self.sum_pl),<EOL>self.sumsq_tf.assign_add(self.sumsq_pl)<EOL>)<EOL>self.recompute_op = tf.group(<EOL>tf.assign(self.mean, self.sum_tf / self.count_tf),<EOL>tf.assign(self.std, tf.sqrt(tf.maximum(<EOL>tf.square(self.eps),<EOL>self.sumsq_tf / self.count_tf - tf.square(self.sum_tf / self.count_tf)<EOL>))),<EOL>)<EOL>self.lock = threading.Lock()<EOL>", "docstring": "A normalizer that ensures that observations are approximately distributed according to\n        a standard Normal distribution (i.e. have mean zero and variance one).\n\n        Args:\n            size (int): the size of the observation to be normalized\n            eps (float): a small constant that avoids underflows\n            default_clip_range (float): normalized observations are clipped to be in\n                [-default_clip_range, default_clip_range]\n            sess (object): the TensorFlow session to be used", "id": "f1274:c0:m0"}
{"signature": "@store_args<EOL><INDENT>def __init__(self, venv, policy, dims, logger, T, rollout_batch_size=<NUM_LIT:1>,<EOL>exploit=False, use_target_net=False, compute_Q=False, noise_eps=<NUM_LIT:0>,<EOL>random_eps=<NUM_LIT:0>, history_len=<NUM_LIT:100>, render=False, monitor=False, **kwargs):<DEDENT>", "body": "assert self.T > <NUM_LIT:0><EOL>self.info_keys = [key.replace('<STR_LIT>', '<STR_LIT>') for key in dims.keys() if key.startswith('<STR_LIT>')]<EOL>self.success_history = deque(maxlen=history_len)<EOL>self.Q_history = deque(maxlen=history_len)<EOL>self.n_episodes = <NUM_LIT:0><EOL>self.reset_all_rollouts()<EOL>self.clear_history()<EOL>", "docstring": "Rollout worker generates experience by interacting with one or many environments.\n\n        Args:\n            make_env (function): a factory function that creates a new instance of the environment\n                when called\n            policy (object): the policy that is used to act\n            dims (dict of ints): the dimensions for observations (o), goals (g), and actions (u)\n            logger (object): the logger that is used by the rollout worker\n            rollout_batch_size (int): the number of parallel rollouts that should be used\n            exploit (boolean): whether or not to exploit, i.e. to act optimally according to the\n                current policy without any exploration\n            use_target_net (boolean): whether or not to use the target net for rollouts\n            compute_Q (boolean): whether or not to compute the Q values alongside the actions\n            noise_eps (float): scale of the additive Gaussian noise\n            random_eps (float): probability of selecting a completely random action\n            history_len (int): length of history for statistics smoothing\n            render (boolean): whether or not to render the rollouts", "id": "f1277:c0:m0"}
{"signature": "def clear_history(self):", "body": "self.success_history.clear()<EOL>self.Q_history.clear()<EOL>", "docstring": "Clears all histories that are used for statistics", "id": "f1277:c0:m3"}
{"signature": "def generate_rollouts(self):", "body": "self.reset_all_rollouts()<EOL>o = np.empty((self.rollout_batch_size, self.dims['<STR_LIT:o>']), np.float32)  <EOL>ag = np.empty((self.rollout_batch_size, self.dims['<STR_LIT:g>']), np.float32)  <EOL>o[:] = self.initial_o<EOL>ag[:] = self.initial_ag<EOL>obs, achieved_goals, acts, goals, successes = [], [], [], [], []<EOL>dones = []<EOL>info_values = [np.empty((self.T - <NUM_LIT:1>, self.rollout_batch_size, self.dims['<STR_LIT>' + key]), np.float32) for key in self.info_keys]<EOL>Qs = []<EOL>for t in range(self.T):<EOL><INDENT>policy_output = self.policy.get_actions(<EOL>o, ag, self.g,<EOL>compute_Q=self.compute_Q,<EOL>noise_eps=self.noise_eps if not self.exploit else <NUM_LIT:0.>,<EOL>random_eps=self.random_eps if not self.exploit else <NUM_LIT:0.>,<EOL>use_target_net=self.use_target_net)<EOL>if self.compute_Q:<EOL><INDENT>u, Q = policy_output<EOL>Qs.append(Q)<EOL><DEDENT>else:<EOL><INDENT>u = policy_output<EOL><DEDENT>if u.ndim == <NUM_LIT:1>:<EOL><INDENT>u = u.reshape(<NUM_LIT:1>, -<NUM_LIT:1>)<EOL><DEDENT>o_new = np.empty((self.rollout_batch_size, self.dims['<STR_LIT:o>']))<EOL>ag_new = np.empty((self.rollout_batch_size, self.dims['<STR_LIT:g>']))<EOL>success = np.zeros(self.rollout_batch_size)<EOL>obs_dict_new, _, done, info = self.venv.step(u)<EOL>o_new = obs_dict_new['<STR_LIT>']<EOL>ag_new = obs_dict_new['<STR_LIT>']<EOL>success = np.array([i.get('<STR_LIT>', <NUM_LIT:0.0>) for i in info])<EOL>if any(done):<EOL><INDENT>break<EOL><DEDENT>for i, info_dict in enumerate(info):<EOL><INDENT>for idx, key in enumerate(self.info_keys):<EOL><INDENT>info_values[idx][t, i] = info[i][key]<EOL><DEDENT><DEDENT>if np.isnan(o_new).any():<EOL><INDENT>self.logger.warn('<STR_LIT>')<EOL>self.reset_all_rollouts()<EOL>return self.generate_rollouts()<EOL><DEDENT>dones.append(done)<EOL>obs.append(o.copy())<EOL>achieved_goals.append(ag.copy())<EOL>successes.append(success.copy())<EOL>acts.append(u.copy())<EOL>goals.append(self.g.copy())<EOL>o[...] = o_new<EOL>ag[...] = ag_new<EOL><DEDENT>obs.append(o.copy())<EOL>achieved_goals.append(ag.copy())<EOL>episode = dict(o=obs,<EOL>u=acts,<EOL>g=goals,<EOL>ag=achieved_goals)<EOL>for key, value in zip(self.info_keys, info_values):<EOL><INDENT>episode['<STR_LIT>'.format(key)] = value<EOL><DEDENT>successful = np.array(successes)[-<NUM_LIT:1>, :]<EOL>assert successful.shape == (self.rollout_batch_size,)<EOL>success_rate = np.mean(successful)<EOL>self.success_history.append(success_rate)<EOL>if self.compute_Q:<EOL><INDENT>self.Q_history.append(np.mean(Qs))<EOL><DEDENT>self.n_episodes += self.rollout_batch_size<EOL>return convert_episode_to_batch_major(episode)<EOL>", "docstring": "Performs `rollout_batch_size` rollouts in parallel for time horizon `T` with the current\n        policy acting on it accordingly.", "id": "f1277:c0:m2"}
{"signature": "def logs(self, prefix='<STR_LIT>'):", "body": "logs = []<EOL>logs += [('<STR_LIT>', np.mean(self.success_history))]<EOL>if self.compute_Q:<EOL><INDENT>logs += [('<STR_LIT>', np.mean(self.Q_history))]<EOL><DEDENT>logs += [('<STR_LIT>', self.n_episodes)]<EOL>if prefix != '<STR_LIT>' and not prefix.endswith('<STR_LIT:/>'):<EOL><INDENT>return [(prefix + '<STR_LIT:/>' + key, val) for key, val in logs]<EOL><DEDENT>else:<EOL><INDENT>return logs<EOL><DEDENT>", "docstring": "Generates a dictionary that contains all collected statistics.", "id": "f1277:c0:m7"}
{"signature": "def save_policy(self, path):", "body": "with open(path, '<STR_LIT:wb>') as f:<EOL><INDENT>pickle.dump(self.policy, f)<EOL><DEDENT>", "docstring": "Pickles the current policy for later inspection.", "id": "f1277:c0:m6"}
{"signature": "def __init__(self, size, alpha):", "body": "super(PrioritizedReplayBuffer, self).__init__(size)<EOL>assert alpha >= <NUM_LIT:0><EOL>self._alpha = alpha<EOL>it_capacity = <NUM_LIT:1><EOL>while it_capacity < size:<EOL><INDENT>it_capacity *= <NUM_LIT:2><EOL><DEDENT>self._it_sum = SumSegmentTree(it_capacity)<EOL>self._it_min = MinSegmentTree(it_capacity)<EOL>self._max_priority = <NUM_LIT:1.0><EOL>", "docstring": "Create Prioritized Replay buffer.\n\n        Parameters\n        ----------\n        size: int\n            Max number of transitions to store in the buffer. When the buffer\n            overflows the old memories are dropped.\n        alpha: float\n            how much prioritization is used\n            (0 - no prioritization, 1 - full prioritization)\n\n        See Also\n        --------\n        ReplayBuffer.__init__", "id": "f1281:c1:m0"}
{"signature": "def __init__(self, size):", "body": "self._storage = []<EOL>self._maxsize = size<EOL>self._next_idx = <NUM_LIT:0><EOL>", "docstring": "Create Replay buffer.\n\n        Parameters\n        ----------\n        size: int\n            Max number of transitions to store in the buffer. When the buffer\n            overflows the old memories are dropped.", "id": "f1281:c0:m0"}
{"signature": "def update_priorities(self, idxes, priorities):", "body": "assert len(idxes) == len(priorities)<EOL>for idx, priority in zip(idxes, priorities):<EOL><INDENT>assert priority > <NUM_LIT:0><EOL>assert <NUM_LIT:0> <= idx < len(self._storage)<EOL>self._it_sum[idx] = priority ** self._alpha<EOL>self._it_min[idx] = priority ** self._alpha<EOL>self._max_priority = max(self._max_priority, priority)<EOL><DEDENT>", "docstring": "Update priorities of sampled transitions.\n\n        sets priority of transition at index idxes[i] in buffer\n        to priorities[i].\n\n        Parameters\n        ----------\n        idxes: [int]\n            List of idxes of sampled transitions\n        priorities: [float]\n            List of updated priorities corresponding to\n            transitions at the sampled idxes denoted by\n            variable `idxes`.", "id": "f1281:c1:m4"}
{"signature": "def make_feed_dict(self, data):", "body": "raise NotImplementedError<EOL>", "docstring": "Given data input it to the placeholder(s).", "id": "f1284:c0:m2"}
{"signature": "def __init__(self, placeholder):", "body": "super().__init__(placeholder.name)<EOL>self._placeholder = placeholder<EOL>", "docstring": "Wrapper for regular tensorflow placeholder.", "id": "f1284:c1:m0"}
{"signature": "def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=<NUM_LIT:1.0>,<EOL>double_q=True, scope=\"<STR_LIT>\", reuse=None, param_noise=False, param_noise_filter_func=None):", "body": "if param_noise:<EOL><INDENT>act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,<EOL>param_noise_filter_func=param_noise_filter_func)<EOL><DEDENT>else:<EOL><INDENT>act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)<EOL><DEDENT>with tf.variable_scope(scope, reuse=reuse):<EOL><INDENT>obs_t_input = make_obs_ph(\"<STR_LIT>\")<EOL>act_t_ph = tf.placeholder(tf.int32, [None], name=\"<STR_LIT:action>\")<EOL>rew_t_ph = tf.placeholder(tf.float32, [None], name=\"<STR_LIT>\")<EOL>obs_tp1_input = make_obs_ph(\"<STR_LIT>\")<EOL>done_mask_ph = tf.placeholder(tf.float32, [None], name=\"<STR_LIT>\")<EOL>importance_weights_ph = tf.placeholder(tf.float32, [None], name=\"<STR_LIT>\")<EOL>q_t = q_func(obs_t_input.get(), num_actions, scope=\"<STR_LIT>\", reuse=True)  <EOL>q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + \"<STR_LIT>\")<EOL>q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope=\"<STR_LIT>\")<EOL>target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + \"<STR_LIT>\")<EOL>q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), <NUM_LIT:1>)<EOL>if double_q:<EOL><INDENT>q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope=\"<STR_LIT>\", reuse=True)<EOL>q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, <NUM_LIT:1>)<EOL>q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>q_tp1_best = tf.reduce_max(q_tp1, <NUM_LIT:1>)<EOL><DEDENT>q_tp1_best_masked = (<NUM_LIT:1.0> - done_mask_ph) * q_tp1_best<EOL>q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked<EOL>td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)<EOL>errors = U.huber_loss(td_error)<EOL>weighted_error = tf.reduce_mean(importance_weights_ph * errors)<EOL>if grad_norm_clipping is not None:<EOL><INDENT>gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)<EOL>for i, (grad, var) in enumerate(gradients):<EOL><INDENT>if grad is not None:<EOL><INDENT>gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)<EOL><DEDENT><DEDENT>optimize_expr = optimizer.apply_gradients(gradients)<EOL><DEDENT>else:<EOL><INDENT>optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)<EOL><DEDENT>update_target_expr = []<EOL>for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),<EOL>sorted(target_q_func_vars, key=lambda v: v.name)):<EOL><INDENT>update_target_expr.append(var_target.assign(var))<EOL><DEDENT>update_target_expr = tf.group(*update_target_expr)<EOL>train = U.function(<EOL>inputs=[<EOL>obs_t_input,<EOL>act_t_ph,<EOL>rew_t_ph,<EOL>obs_tp1_input,<EOL>done_mask_ph,<EOL>importance_weights_ph<EOL>],<EOL>outputs=td_error,<EOL>updates=[optimize_expr]<EOL>)<EOL>update_target = U.function([], [], updates=[update_target_expr])<EOL>q_values = U.function([obs_t_input], q_t)<EOL>return act_f, train, update_target, {'<STR_LIT>': q_values}<EOL><DEDENT>", "docstring": "Creates the train function:\n\n    Parameters\n    ----------\n    make_obs_ph: str -> tf.placeholder or TfInput\n        a function that takes a name and creates a placeholder of input with that name\n    q_func: (tf.Variable, int, str, bool) -> tf.Variable\n        the model that takes the following inputs:\n            observation_in: object\n                the output of observation placeholder\n            num_actions: int\n                number of actions\n            scope: str\n            reuse: bool\n                should be passed to outer variable scope\n        and returns a tensor of shape (batch_size, num_actions) with values of every action.\n    num_actions: int\n        number of actions\n    reuse: bool\n        whether or not to reuse the graph variables\n    optimizer: tf.train.Optimizer\n        optimizer to use for the Q-learning objective.\n    grad_norm_clipping: float or None\n        clip gradient norms to this value. If None no clipping is performed.\n    gamma: float\n        discount rate.\n    double_q: bool\n        if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).\n        In general it is a good idea to keep it enabled.\n    scope: str or VariableScope\n        optional scope for variable_scope.\n    reuse: bool or None\n        whether or not the variables should be reused. To be able to reuse the scope must be given.\n    param_noise: bool\n        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n    param_noise_filter_func: tf.Variable -> bool\n        function that decides whether or not a variable should be perturbed. Only applicable\n        if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n    Returns\n    -------\n    act: (tf.Variable, bool, float) -> tf.Variable\n        function to select and action given observation.\n`       See the top of the file for details.\n    train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n        optimize the error in Bellman's equation.\n`       See the top of the file for details.\n    update_target: () -> ()\n        copy the parameters from optimized Q function to the target Q function.\n`       See the top of the file for details.\n    debug: {str: function}\n        a bunch of functions to print debug data like q_values.", "id": "f1285:m6"}
{"signature": "def scope_name():", "body": "return tf.get_variable_scope().name<EOL>", "docstring": "Returns the name of current scope as a string, e.g. deepq/q_func", "id": "f1285:m1"}
{"signature": "def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=\"<STR_LIT>\", reuse=None, param_noise_filter_func=None):", "body": "if param_noise_filter_func is None:<EOL><INDENT>param_noise_filter_func = default_param_noise_filter<EOL><DEDENT>with tf.variable_scope(scope, reuse=reuse):<EOL><INDENT>observations_ph = make_obs_ph(\"<STR_LIT>\")<EOL>stochastic_ph = tf.placeholder(tf.bool, (), name=\"<STR_LIT>\")<EOL>update_eps_ph = tf.placeholder(tf.float32, (), name=\"<STR_LIT>\")<EOL>update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name=\"<STR_LIT>\")<EOL>update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name=\"<STR_LIT>\")<EOL>reset_ph = tf.placeholder(tf.bool, (), name=\"<STR_LIT>\")<EOL>eps = tf.get_variable(\"<STR_LIT>\", (), initializer=tf.constant_initializer(<NUM_LIT:0>))<EOL>param_noise_scale = tf.get_variable(\"<STR_LIT>\", (), initializer=tf.constant_initializer(<NUM_LIT>), trainable=False)<EOL>param_noise_threshold = tf.get_variable(\"<STR_LIT>\", (), initializer=tf.constant_initializer(<NUM_LIT>), trainable=False)<EOL>q_values = q_func(observations_ph.get(), num_actions, scope=\"<STR_LIT>\")<EOL>q_values_perturbed = q_func(observations_ph.get(), num_actions, scope=\"<STR_LIT>\")<EOL>def perturb_vars(original_scope, perturbed_scope):<EOL><INDENT>all_vars = scope_vars(absolute_scope_name(original_scope))<EOL>all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))<EOL>assert len(all_vars) == len(all_perturbed_vars)<EOL>perturb_ops = []<EOL>for var, perturbed_var in zip(all_vars, all_perturbed_vars):<EOL><INDENT>if param_noise_filter_func(perturbed_var):<EOL><INDENT>op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=<NUM_LIT:0.>, stddev=param_noise_scale))<EOL><DEDENT>else:<EOL><INDENT>op = tf.assign(perturbed_var, var)<EOL><DEDENT>perturb_ops.append(op)<EOL><DEDENT>assert len(perturb_ops) == len(all_vars)<EOL>return tf.group(*perturb_ops)<EOL><DEDENT>q_values_adaptive = q_func(observations_ph.get(), num_actions, scope=\"<STR_LIT>\")<EOL>perturb_for_adaption = perturb_vars(original_scope=\"<STR_LIT>\", perturbed_scope=\"<STR_LIT>\")<EOL>kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-<NUM_LIT:1>)<EOL>mean_kl = tf.reduce_mean(kl)<EOL>def update_scale():<EOL><INDENT>with tf.control_dependencies([perturb_for_adaption]):<EOL><INDENT>update_scale_expr = tf.cond(mean_kl < param_noise_threshold,<EOL>lambda: param_noise_scale.assign(param_noise_scale * <NUM_LIT>),<EOL>lambda: param_noise_scale.assign(param_noise_scale / <NUM_LIT>),<EOL>)<EOL><DEDENT>return update_scale_expr<EOL><DEDENT>update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= <NUM_LIT:0>,<EOL>lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))<EOL>deterministic_actions = tf.argmax(q_values_perturbed, axis=<NUM_LIT:1>)<EOL>batch_size = tf.shape(observations_ph.get())[<NUM_LIT:0>]<EOL>random_actions = tf.random_uniform(tf.stack([batch_size]), minval=<NUM_LIT:0>, maxval=num_actions, dtype=tf.int64)<EOL>chose_random = tf.random_uniform(tf.stack([batch_size]), minval=<NUM_LIT:0>, maxval=<NUM_LIT:1>, dtype=tf.float32) < eps<EOL>stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)<EOL>output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)<EOL>update_eps_expr = eps.assign(tf.cond(update_eps_ph >= <NUM_LIT:0>, lambda: update_eps_ph, lambda: eps))<EOL>updates = [<EOL>update_eps_expr,<EOL>tf.cond(reset_ph, lambda: perturb_vars(original_scope=\"<STR_LIT>\", perturbed_scope=\"<STR_LIT>\"), lambda: tf.group(*[])),<EOL>tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(<NUM_LIT:0.>, trainable=False)),<EOL>update_param_noise_threshold_expr,<EOL>]<EOL>_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],<EOL>outputs=output_actions,<EOL>givens={update_eps_ph: -<NUM_LIT:1.0>, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},<EOL>updates=updates)<EOL>def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-<NUM_LIT:1>):<EOL><INDENT>return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)<EOL><DEDENT>return act<EOL><DEDENT>", "docstring": "Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):\n\n    Parameters\n    ----------\n    make_obs_ph: str -> tf.placeholder or TfInput\n        a function that take a name and creates a placeholder of input with that name\n    q_func: (tf.Variable, int, str, bool) -> tf.Variable\n        the model that takes the following inputs:\n            observation_in: object\n                the output of observation placeholder\n            num_actions: int\n                number of actions\n            scope: str\n            reuse: bool\n                should be passed to outer variable scope\n        and returns a tensor of shape (batch_size, num_actions) with values of every action.\n    num_actions: int\n        number of actions.\n    scope: str or VariableScope\n        optional scope for variable_scope.\n    reuse: bool or None\n        whether or not the variables should be reused. To be able to reuse the scope must be given.\n    param_noise_filter_func: tf.Variable -> bool\n        function that decides whether or not a variable should be perturbed. Only applicable\n        if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n    Returns\n    -------\n    act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable\n        function to select and action given observation.\n`       See the top of the file for details.", "id": "f1285:m5"}
{"signature": "def build_act(make_obs_ph, q_func, num_actions, scope=\"<STR_LIT>\", reuse=None):", "body": "with tf.variable_scope(scope, reuse=reuse):<EOL><INDENT>observations_ph = make_obs_ph(\"<STR_LIT>\")<EOL>stochastic_ph = tf.placeholder(tf.bool, (), name=\"<STR_LIT>\")<EOL>update_eps_ph = tf.placeholder(tf.float32, (), name=\"<STR_LIT>\")<EOL>eps = tf.get_variable(\"<STR_LIT>\", (), initializer=tf.constant_initializer(<NUM_LIT:0>))<EOL>q_values = q_func(observations_ph.get(), num_actions, scope=\"<STR_LIT>\")<EOL>deterministic_actions = tf.argmax(q_values, axis=<NUM_LIT:1>)<EOL>batch_size = tf.shape(observations_ph.get())[<NUM_LIT:0>]<EOL>random_actions = tf.random_uniform(tf.stack([batch_size]), minval=<NUM_LIT:0>, maxval=num_actions, dtype=tf.int64)<EOL>chose_random = tf.random_uniform(tf.stack([batch_size]), minval=<NUM_LIT:0>, maxval=<NUM_LIT:1>, dtype=tf.float32) < eps<EOL>stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)<EOL>output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)<EOL>update_eps_expr = eps.assign(tf.cond(update_eps_ph >= <NUM_LIT:0>, lambda: update_eps_ph, lambda: eps))<EOL>_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],<EOL>outputs=output_actions,<EOL>givens={update_eps_ph: -<NUM_LIT:1.0>, stochastic_ph: True},<EOL>updates=[update_eps_expr])<EOL>def act(ob, stochastic=True, update_eps=-<NUM_LIT:1>):<EOL><INDENT>return _act(ob, stochastic, update_eps)<EOL><DEDENT>return act<EOL><DEDENT>", "docstring": "Creates the act function:\n\n    Parameters\n    ----------\n    make_obs_ph: str -> tf.placeholder or TfInput\n        a function that take a name and creates a placeholder of input with that name\n    q_func: (tf.Variable, int, str, bool) -> tf.Variable\n        the model that takes the following inputs:\n            observation_in: object\n                the output of observation placeholder\n            num_actions: int\n                number of actions\n            scope: str\n            reuse: bool\n                should be passed to outer variable scope\n        and returns a tensor of shape (batch_size, num_actions) with values of every action.\n    num_actions: int\n        number of actions.\n    scope: str or VariableScope\n        optional scope for variable_scope.\n    reuse: bool or None\n        whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n    Returns\n    -------\n    act: (tf.Variable, bool, float) -> tf.Variable\n        function to select and action given observation.\n`       See the top of the file for details.", "id": "f1285:m4"}
{"signature": "def mlp(hiddens=[], layer_norm=False):", "body": "return lambda *args, **kwargs: _mlp(hiddens, layer_norm=layer_norm, *args, **kwargs)<EOL>", "docstring": "This model takes as input an observation and returns values of all actions.\n\n    Parameters\n    ----------\n    hiddens: [int]\n        list of sizes of hidden layers\n    layer_norm: bool\n        if true applies layer normalization for every layer\n        as described in https://arxiv.org/abs/1607.06450\n\n    Returns\n    -------\n    q_func: function\n        q_function for DQN algorithm.", "id": "f1286:m1"}
{"signature": "def get_task(benchmark, env_id):", "body": "return next(filter(lambda task: task['<STR_LIT>'] == env_id, benchmark['<STR_LIT>']), None)<EOL>", "docstring": "Get a task by env_id. Return None if the benchmark doesn't have the env", "id": "f1300:m3"}
{"signature": "def learn(*,<EOL>network,<EOL>env,<EOL>total_timesteps,<EOL>timesteps_per_batch=<NUM_LIT>, <EOL>max_kl=<NUM_LIT>,<EOL>cg_iters=<NUM_LIT:10>,<EOL>gamma=<NUM_LIT>,<EOL>lam=<NUM_LIT:1.0>, <EOL>seed=None,<EOL>ent_coef=<NUM_LIT:0.0>,<EOL>cg_damping=<NUM_LIT>,<EOL>vf_stepsize=<NUM_LIT>,<EOL>vf_iters =<NUM_LIT:3>,<EOL>max_episodes=<NUM_LIT:0>, max_iters=<NUM_LIT:0>,  <EOL>callback=None,<EOL>load_path=None,<EOL>**network_kwargs<EOL>):", "body": "if MPI is not None:<EOL><INDENT>nworkers = MPI.COMM_WORLD.Get_size()<EOL>rank = MPI.COMM_WORLD.Get_rank()<EOL><DEDENT>else:<EOL><INDENT>nworkers = <NUM_LIT:1><EOL>rank = <NUM_LIT:0><EOL><DEDENT>cpus_per_worker = <NUM_LIT:1><EOL>U.get_session(config=tf.ConfigProto(<EOL>allow_soft_placement=True,<EOL>inter_op_parallelism_threads=cpus_per_worker,<EOL>intra_op_parallelism_threads=cpus_per_worker<EOL>))<EOL>policy = build_policy(env, network, value_network='<STR_LIT>', **network_kwargs)<EOL>set_global_seeds(seed)<EOL>np.set_printoptions(precision=<NUM_LIT:3>)<EOL>ob_space = env.observation_space<EOL>ac_space = env.action_space<EOL>ob = observation_placeholder(ob_space)<EOL>with tf.variable_scope(\"<STR_LIT>\"):<EOL><INDENT>pi = policy(observ_placeholder=ob)<EOL><DEDENT>with tf.variable_scope(\"<STR_LIT>\"):<EOL><INDENT>oldpi = policy(observ_placeholder=ob)<EOL><DEDENT>atarg = tf.placeholder(dtype=tf.float32, shape=[None]) <EOL>ret = tf.placeholder(dtype=tf.float32, shape=[None]) <EOL>ac = pi.pdtype.sample_placeholder([None])<EOL>kloldnew = oldpi.pd.kl(pi.pd)<EOL>ent = pi.pd.entropy()<EOL>meankl = tf.reduce_mean(kloldnew)<EOL>meanent = tf.reduce_mean(ent)<EOL>entbonus = ent_coef * meanent<EOL>vferr = tf.reduce_mean(tf.square(pi.vf - ret))<EOL>ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) <EOL>surrgain = tf.reduce_mean(ratio * atarg)<EOL>optimgain = surrgain + entbonus<EOL>losses = [optimgain, meankl, entbonus, surrgain, meanent]<EOL>loss_names = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>dist = meankl<EOL>all_var_list = get_trainable_variables(\"<STR_LIT>\")<EOL>var_list = get_pi_trainable_variables(\"<STR_LIT>\")<EOL>vf_var_list = get_vf_trainable_variables(\"<STR_LIT>\")<EOL>vfadam = MpiAdam(vf_var_list)<EOL>get_flat = U.GetFlat(var_list)<EOL>set_from_flat = U.SetFromFlat(var_list)<EOL>klgrads = tf.gradients(dist, var_list)<EOL>flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name=\"<STR_LIT>\")<EOL>shapes = [var.get_shape().as_list() for var in var_list]<EOL>start = <NUM_LIT:0><EOL>tangents = []<EOL>for shape in shapes:<EOL><INDENT>sz = U.intprod(shape)<EOL>tangents.append(tf.reshape(flat_tangent[start:start+sz], shape))<EOL>start += sz<EOL><DEDENT>gvp = tf.add_n([tf.reduce_sum(g*tangent) for (g, tangent) in zipsame(klgrads, tangents)]) <EOL>fvp = U.flatgrad(gvp, var_list)<EOL>assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)<EOL>for (oldv, newv) in zipsame(get_variables(\"<STR_LIT>\"), get_variables(\"<STR_LIT>\"))])<EOL>compute_losses = U.function([ob, ac, atarg], losses)<EOL>compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)])<EOL>compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)<EOL>compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))<EOL>@contextmanager<EOL>def timed(msg):<EOL><INDENT>if rank == <NUM_LIT:0>:<EOL><INDENT>print(colorize(msg, color='<STR_LIT>'))<EOL>tstart = time.time()<EOL>yield<EOL>print(colorize(\"<STR_LIT>\"%(time.time() - tstart), color='<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>yield<EOL><DEDENT><DEDENT>def allmean(x):<EOL><INDENT>assert isinstance(x, np.ndarray)<EOL>if MPI is not None:<EOL><INDENT>out = np.empty_like(x)<EOL>MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)<EOL>out /= nworkers<EOL><DEDENT>else:<EOL><INDENT>out = np.copy(x)<EOL><DEDENT>return out<EOL><DEDENT>U.initialize()<EOL>if load_path is not None:<EOL><INDENT>pi.load(load_path)<EOL><DEDENT>th_init = get_flat()<EOL>if MPI is not None:<EOL><INDENT>MPI.COMM_WORLD.Bcast(th_init, root=<NUM_LIT:0>)<EOL><DEDENT>set_from_flat(th_init)<EOL>vfadam.sync()<EOL>print(\"<STR_LIT>\", th_init.sum(), flush=True)<EOL>seg_gen = traj_segment_generator(pi, env, timesteps_per_batch, stochastic=True)<EOL>episodes_so_far = <NUM_LIT:0><EOL>timesteps_so_far = <NUM_LIT:0><EOL>iters_so_far = <NUM_LIT:0><EOL>tstart = time.time()<EOL>lenbuffer = deque(maxlen=<NUM_LIT>) <EOL>rewbuffer = deque(maxlen=<NUM_LIT>) <EOL>if sum([max_iters><NUM_LIT:0>, total_timesteps><NUM_LIT:0>, max_episodes><NUM_LIT:0>])==<NUM_LIT:0>:<EOL><INDENT>return pi<EOL><DEDENT>assert sum([max_iters><NUM_LIT:0>, total_timesteps><NUM_LIT:0>, max_episodes><NUM_LIT:0>]) < <NUM_LIT:2>,'<STR_LIT>'<EOL>while True:<EOL><INDENT>if callback: callback(locals(), globals())<EOL>if total_timesteps and timesteps_so_far >= total_timesteps:<EOL><INDENT>break<EOL><DEDENT>elif max_episodes and episodes_so_far >= max_episodes:<EOL><INDENT>break<EOL><DEDENT>elif max_iters and iters_so_far >= max_iters:<EOL><INDENT>break<EOL><DEDENT>logger.log(\"<STR_LIT>\"%iters_so_far)<EOL>with timed(\"<STR_LIT>\"):<EOL><INDENT>seg = seg_gen.__next__()<EOL><DEDENT>add_vtarg_and_adv(seg, gamma, lam)<EOL>ob, ac, atarg, tdlamret = seg[\"<STR_LIT>\"], seg[\"<STR_LIT>\"], seg[\"<STR_LIT>\"], seg[\"<STR_LIT>\"]<EOL>vpredbefore = seg[\"<STR_LIT>\"] <EOL>atarg = (atarg - atarg.mean()) / atarg.std() <EOL>if hasattr(pi, \"<STR_LIT>\"): pi.ret_rms.update(tdlamret)<EOL>if hasattr(pi, \"<STR_LIT>\"): pi.ob_rms.update(ob) <EOL>args = seg[\"<STR_LIT>\"], seg[\"<STR_LIT>\"], atarg<EOL>fvpargs = [arr[::<NUM_LIT:5>] for arr in args]<EOL>def fisher_vector_product(p):<EOL><INDENT>return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p<EOL><DEDENT>assign_old_eq_new() <EOL>with timed(\"<STR_LIT>\"):<EOL><INDENT>*lossbefore, g = compute_lossandgrad(*args)<EOL><DEDENT>lossbefore = allmean(np.array(lossbefore))<EOL>g = allmean(g)<EOL>if np.allclose(g, <NUM_LIT:0>):<EOL><INDENT>logger.log(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>with timed(\"<STR_LIT>\"):<EOL><INDENT>stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank==<NUM_LIT:0>)<EOL><DEDENT>assert np.isfinite(stepdir).all()<EOL>shs = <NUM_LIT>*stepdir.dot(fisher_vector_product(stepdir))<EOL>lm = np.sqrt(shs / max_kl)<EOL>fullstep = stepdir / lm<EOL>expectedimprove = g.dot(fullstep)<EOL>surrbefore = lossbefore[<NUM_LIT:0>]<EOL>stepsize = <NUM_LIT:1.0><EOL>thbefore = get_flat()<EOL>for _ in range(<NUM_LIT:10>):<EOL><INDENT>thnew = thbefore + fullstep * stepsize<EOL>set_from_flat(thnew)<EOL>meanlosses = surr, kl, *_ = allmean(np.array(compute_losses(*args)))<EOL>improve = surr - surrbefore<EOL>logger.log(\"<STR_LIT>\"%(expectedimprove, improve))<EOL>if not np.isfinite(meanlosses).all():<EOL><INDENT>logger.log(\"<STR_LIT>\")<EOL><DEDENT>elif kl > max_kl * <NUM_LIT>:<EOL><INDENT>logger.log(\"<STR_LIT>\")<EOL><DEDENT>elif improve < <NUM_LIT:0>:<EOL><INDENT>logger.log(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>logger.log(\"<STR_LIT>\")<EOL>break<EOL><DEDENT>stepsize *= <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>logger.log(\"<STR_LIT>\")<EOL>set_from_flat(thbefore)<EOL><DEDENT>if nworkers > <NUM_LIT:1> and iters_so_far % <NUM_LIT:20> == <NUM_LIT:0>:<EOL><INDENT>paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum())) <EOL>assert all(np.allclose(ps, paramsums[<NUM_LIT:0>]) for ps in paramsums[<NUM_LIT:1>:])<EOL><DEDENT><DEDENT>for (lossname, lossval) in zip(loss_names, meanlosses):<EOL><INDENT>logger.record_tabular(lossname, lossval)<EOL><DEDENT>with timed(\"<STR_LIT>\"):<EOL><INDENT>for _ in range(vf_iters):<EOL><INDENT>for (mbob, mbret) in dataset.iterbatches((seg[\"<STR_LIT>\"], seg[\"<STR_LIT>\"]),<EOL>include_final_partial_batch=False, batch_size=<NUM_LIT:64>):<EOL><INDENT>g = allmean(compute_vflossandgrad(mbob, mbret))<EOL>vfadam.update(g, vf_stepsize)<EOL><DEDENT><DEDENT><DEDENT>logger.record_tabular(\"<STR_LIT>\", explained_variance(vpredbefore, tdlamret))<EOL>lrlocal = (seg[\"<STR_LIT>\"], seg[\"<STR_LIT>\"]) <EOL>if MPI is not None:<EOL><INDENT>listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) <EOL><DEDENT>else:<EOL><INDENT>listoflrpairs = [lrlocal]<EOL><DEDENT>lens, rews = map(flatten_lists, zip(*listoflrpairs))<EOL>lenbuffer.extend(lens)<EOL>rewbuffer.extend(rews)<EOL>logger.record_tabular(\"<STR_LIT>\", np.mean(lenbuffer))<EOL>logger.record_tabular(\"<STR_LIT>\", np.mean(rewbuffer))<EOL>logger.record_tabular(\"<STR_LIT>\", len(lens))<EOL>episodes_so_far += len(lens)<EOL>timesteps_so_far += sum(lens)<EOL>iters_so_far += <NUM_LIT:1><EOL>logger.record_tabular(\"<STR_LIT>\", episodes_so_far)<EOL>logger.record_tabular(\"<STR_LIT>\", timesteps_so_far)<EOL>logger.record_tabular(\"<STR_LIT>\", time.time() - tstart)<EOL>if rank==<NUM_LIT:0>:<EOL><INDENT>logger.dump_tabular()<EOL><DEDENT><DEDENT>return pi<EOL>", "docstring": "learn a policy function with TRPO algorithm\n\nParameters:\n----------\n\nnetwork                 neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types)\n                        or function that takes input placeholder and returns tuple (output, None) for feedforward nets\n                        or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets\n\nenv                     environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class\n\ntimesteps_per_batch     timesteps per gradient estimation batch\n\nmax_kl                  max KL divergence between old policy and new policy ( KL(pi_old || pi) )\n\nent_coef                coefficient of policy entropy term in the optimization objective\n\ncg_iters                number of iterations of conjugate gradient algorithm\n\ncg_damping              conjugate gradient damping\n\nvf_stepsize             learning rate for adam optimizer used to optimie value function loss\n\nvf_iters                number of iterations of value function optimization iterations per each policy optimization step\n\ntotal_timesteps           max number of timesteps\n\nmax_episodes            max number of episodes\n\nmax_iters               maximum number of policy optimization iterations\n\ncallback                function to be called with (locals(), globals()) each policy optimization step\n\nload_path               str, path to load the model from (default: None, i.e. no model is loaded)\n\n**network_kwargs        keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network\n\nReturns:\n-------\n\nlearnt model", "id": "f1305:m2"}
{"signature": "def learn(<EOL>network,<EOL>env,<EOL>seed=None,<EOL>nsteps=<NUM_LIT:5>,<EOL>total_timesteps=int(<NUM_LIT>),<EOL>vf_coef=<NUM_LIT:0.5>,<EOL>ent_coef=<NUM_LIT>,<EOL>max_grad_norm=<NUM_LIT:0.5>,<EOL>lr=<NUM_LIT>,<EOL>lrschedule='<STR_LIT>',<EOL>epsilon=<NUM_LIT>,<EOL>alpha=<NUM_LIT>,<EOL>gamma=<NUM_LIT>,<EOL>log_interval=<NUM_LIT:100>,<EOL>load_path=None,<EOL>**network_kwargs):", "body": "set_global_seeds(seed)<EOL>nenvs = env.num_envs<EOL>policy = build_policy(env, network, **network_kwargs)<EOL>model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,<EOL>max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)<EOL>if load_path is not None:<EOL><INDENT>model.load(load_path)<EOL><DEDENT>runner = Runner(env, model, nsteps=nsteps, gamma=gamma)<EOL>epinfobuf = deque(maxlen=<NUM_LIT:100>)<EOL>nbatch = nenvs*nsteps<EOL>tstart = time.time()<EOL>for update in range(<NUM_LIT:1>, total_timesteps//nbatch+<NUM_LIT:1>):<EOL><INDENT>obs, states, rewards, masks, actions, values, epinfos = runner.run()<EOL>epinfobuf.extend(epinfos)<EOL>policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)<EOL>nseconds = time.time()-tstart<EOL>fps = int((update*nbatch)/nseconds)<EOL>if update % log_interval == <NUM_LIT:0> or update == <NUM_LIT:1>:<EOL><INDENT>ev = explained_variance(values, rewards)<EOL>logger.record_tabular(\"<STR_LIT>\", update)<EOL>logger.record_tabular(\"<STR_LIT>\", update*nbatch)<EOL>logger.record_tabular(\"<STR_LIT>\", fps)<EOL>logger.record_tabular(\"<STR_LIT>\", float(policy_entropy))<EOL>logger.record_tabular(\"<STR_LIT>\", float(value_loss))<EOL>logger.record_tabular(\"<STR_LIT>\", float(ev))<EOL>logger.record_tabular(\"<STR_LIT>\", safemean([epinfo['<STR_LIT:r>'] for epinfo in epinfobuf]))<EOL>logger.record_tabular(\"<STR_LIT>\", safemean([epinfo['<STR_LIT:l>'] for epinfo in epinfobuf]))<EOL>logger.dump_tabular()<EOL><DEDENT><DEDENT>return model<EOL>", "docstring": "Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.\n\nParameters:\n-----------\n\nnetwork:            policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)\n                    specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns\n                    tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward\n                    neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.\n                    See baselines.common/policies.py/lstm for more details on using recurrent nets in policies\n\n\nenv:                RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)\n\n\nseed:               seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)\n\nnsteps:             int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where\n                    nenv is number of environment copies simulated in parallel)\n\ntotal_timesteps:    int, total number of timesteps to train on (default: 80M)\n\nvf_coef:            float, coefficient in front of value function loss in the total loss function (default: 0.5)\n\nent_coef:           float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)\n\nmax_gradient_norm:  float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)\n\nlr:                 float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)\n\nlrschedule:         schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and\n                    returns fraction of the learning rate (specified as lr) as output\n\nepsilon:            float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)\n\nalpha:              float, RMSProp decay parameter (default: 0.99)\n\ngamma:              float, reward discounting parameter (default: 0.99)\n\nlog_interval:       int, specifies how frequently the logs are printed out (default: 100)\n\n**network_kwargs:   keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network\n                    For instance, 'mlp' network architecture has arguments num_hidden and num_layers.", "id": "f1319:m0"}
{"signature": "def __init__(self, env):", "body": "gym.Wrapper.__init__(self, env)<EOL>self.lives = <NUM_LIT:0><EOL>self.was_real_done  = True<EOL>", "docstring": "Make end-of-life == end-of-episode, but only reset on true game over.\n        Done by DeepMind for the DQN and co. since it helps value estimation.", "id": "f1340:c2:m0"}
{"signature": "def reset(self, **kwargs):", "body": "self.env.reset(**kwargs)<EOL>if self.override_num_noops is not None:<EOL><INDENT>noops = self.override_num_noops<EOL><DEDENT>else:<EOL><INDENT>noops = self.unwrapped.np_random.randint(<NUM_LIT:1>, self.noop_max + <NUM_LIT:1>) <EOL><DEDENT>assert noops > <NUM_LIT:0><EOL>obs = None<EOL>for _ in range(noops):<EOL><INDENT>obs, _, done, _ = self.env.step(self.noop_action)<EOL>if done:<EOL><INDENT>obs = self.env.reset(**kwargs)<EOL><DEDENT><DEDENT>return obs<EOL>", "docstring": "Do no-op action for a number of steps in [1, noop_max].", "id": "f1340:c0:m1"}
{"signature": "def __init__(self, env, k):", "body": "gym.Wrapper.__init__(self, env)<EOL>self.k = k<EOL>self.frames = deque([], maxlen=k)<EOL>shp = env.observation_space.shape<EOL>self.observation_space = spaces.Box(low=<NUM_LIT:0>, high=<NUM_LIT:255>, shape=(shp[:-<NUM_LIT:1>] + (shp[-<NUM_LIT:1>] * k,)), dtype=env.observation_space.dtype)<EOL>", "docstring": "Stack k last frames.\n\n        Returns lazy array, which is much more memory efficient.\n\n        See Also\n        --------\n        baselines.common.atari_wrappers.LazyFrames", "id": "f1340:c6:m0"}
{"signature": "def __init__(self, env):", "body": "gym.Wrapper.__init__(self, env)<EOL>assert env.unwrapped.get_action_meanings()[<NUM_LIT:1>] == '<STR_LIT>'<EOL>assert len(env.unwrapped.get_action_meanings()) >= <NUM_LIT:3><EOL>", "docstring": "Take action on reset for environments that are fixed until firing.", "id": "f1340:c1:m0"}
{"signature": "def explained_variance(ypred,y):", "body": "assert y.ndim == <NUM_LIT:1> and ypred.ndim == <NUM_LIT:1><EOL>vary = np.var(y)<EOL>return np.nan if vary==<NUM_LIT:0> else <NUM_LIT:1> - np.var(y-ypred)/vary<EOL>", "docstring": "Computes fraction of variance that ypred explains about y.\nReturns 1 - Var[y-ypred] / Var[y]\n\ninterpretation:\n    ev=0  =>  might as well have predicted zero\n    ev=1  =>  perfect prediction\n    ev<0  =>  worse than just predicting zero", "id": "f1341:m1"}
{"signature": "def make_robotics_env(env_id, seed, rank=<NUM_LIT:0>):", "body": "set_global_seeds(seed)<EOL>env = gym.make(env_id)<EOL>env = FlattenDictWrapper(env, ['<STR_LIT>', '<STR_LIT>'])<EOL>env = Monitor(<EOL>env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),<EOL>info_keywords=('<STR_LIT>',))<EOL>env.seed(seed)<EOL>return env<EOL>", "docstring": "Create a wrapped, monitored gym.Env for MuJoCo.", "id": "f1342:m3"}
{"signature": "def robotics_arg_parser():", "body": "parser = arg_parser()<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', type=str, default='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', type=int, default=None)<EOL>parser.add_argument('<STR_LIT>', type=int, default=int(<NUM_LIT>))<EOL>return parser<EOL>", "docstring": "Create an argparse.ArgumentParser for run_mujoco.py.", "id": "f1342:m8"}
{"signature": "def parse_unknown_args(args):", "body": "retval = {}<EOL>preceded_by_key = False<EOL>for arg in args:<EOL><INDENT>if arg.startswith('<STR_LIT>'):<EOL><INDENT>if '<STR_LIT:=>' in arg:<EOL><INDENT>key = arg.split('<STR_LIT:=>')[<NUM_LIT:0>][<NUM_LIT:2>:]<EOL>value = arg.split('<STR_LIT:=>')[<NUM_LIT:1>]<EOL>retval[key] = value<EOL><DEDENT>else:<EOL><INDENT>key = arg[<NUM_LIT:2>:]<EOL>preceded_by_key = True<EOL><DEDENT><DEDENT>elif preceded_by_key:<EOL><INDENT>retval[key] = arg<EOL>preceded_by_key = False<EOL><DEDENT><DEDENT>return retval<EOL>", "docstring": "Parse arguments not consumed by arg parser into a dicitonary", "id": "f1342:m9"}
{"signature": "def make_vec_env(env_id, env_type, num_env, seed,<EOL>wrapper_kwargs=None,<EOL>start_index=<NUM_LIT:0>,<EOL>reward_scale=<NUM_LIT:1.0>,<EOL>flatten_dict_observations=True,<EOL>gamestate=None):", "body": "wrapper_kwargs = wrapper_kwargs or {}<EOL>mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else <NUM_LIT:0><EOL>seed = seed + <NUM_LIT> * mpi_rank if seed is not None else None<EOL>logger_dir = logger.get_dir()<EOL>def make_thunk(rank):<EOL><INDENT>return lambda: make_env(<EOL>env_id=env_id,<EOL>env_type=env_type,<EOL>mpi_rank=mpi_rank,<EOL>subrank=rank,<EOL>seed=seed,<EOL>reward_scale=reward_scale,<EOL>gamestate=gamestate,<EOL>flatten_dict_observations=flatten_dict_observations,<EOL>wrapper_kwargs=wrapper_kwargs,<EOL>logger_dir=logger_dir<EOL>)<EOL><DEDENT>set_global_seeds(seed)<EOL>if num_env > <NUM_LIT:1>:<EOL><INDENT>return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])<EOL><DEDENT>else:<EOL><INDENT>return DummyVecEnv([make_thunk(start_index)])<EOL><DEDENT>", "docstring": "Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.", "id": "f1342:m0"}
{"signature": "def atari_arg_parser():", "body": "print('<STR_LIT>')<EOL>return common_arg_parser()<EOL>", "docstring": "Create an argparse.ArgumentParser for run_atari.py.", "id": "f1342:m5"}
{"signature": "def common_arg_parser():", "body": "parser = arg_parser()<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', type=str, default='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', type=str)<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', type=int, default=None)<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', type=str, default='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', type=float, default=<NUM_LIT>),<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', default=None)<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', default=None)<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', default=None, type=int)<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', default=<NUM_LIT:1.0>, type=float)<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', default=None, type=str)<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', default=<NUM_LIT:0>, type=int)<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>', default=<NUM_LIT:200>, type=int)<EOL>parser.add_argument('<STR_LIT>', default=False, action='<STR_LIT:store_true>')<EOL>return parser<EOL>", "docstring": "Create an argparse.ArgumentParser for run_mujoco.py.", "id": "f1342:m7"}
{"signature": "def make_mujoco_env(env_id, seed, reward_scale=<NUM_LIT:1.0>):", "body": "rank = MPI.COMM_WORLD.Get_rank()<EOL>myseed = seed  + <NUM_LIT:1000> * rank if seed is not None else None<EOL>set_global_seeds(myseed)<EOL>env = gym.make(env_id)<EOL>logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))<EOL>env = Monitor(env, logger_path, allow_early_resets=True)<EOL>env.seed(seed)<EOL>if reward_scale != <NUM_LIT:1.0>:<EOL><INDENT>from baselines.common.retro_wrappers import RewardScaler<EOL>env = RewardScaler(env, reward_scale)<EOL><DEDENT>return env<EOL>", "docstring": "Create a wrapped, monitored gym.Env for MuJoCo.", "id": "f1342:m2"}
{"signature": "def arg_parser():", "body": "import argparse<EOL>return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)<EOL>", "docstring": "Create an empty argparse.ArgumentParser.", "id": "f1342:m4"}
{"signature": "def obs_to_dict(obs):", "body": "if isinstance(obs, dict):<EOL><INDENT>return obs<EOL><DEDENT>return {None: obs}<EOL>", "docstring": "Convert an observation into a dict.", "id": "f1343:m3"}
{"signature": "def copy_obs_dict(obs):", "body": "return {k: np.copy(v) for k, v in obs.items()}<EOL>", "docstring": "Deep-copy an observation dict.", "id": "f1343:m0"}
{"signature": "def __init__(self, env_fns, spaces=None, context='<STR_LIT>'):", "body": "self.waiting = False<EOL>self.closed = False<EOL>nenvs = len(env_fns)<EOL>ctx = mp.get_context(context)<EOL>self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(nenvs)])<EOL>self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))<EOL>for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]<EOL>for p in self.ps:<EOL><INDENT>p.daemon = True  <EOL>with clear_mpi_env_vars():<EOL><INDENT>p.start()<EOL><DEDENT><DEDENT>for remote in self.work_remotes:<EOL><INDENT>remote.close()<EOL><DEDENT>self.remotes[<NUM_LIT:0>].send(('<STR_LIT>', None))<EOL>observation_space, action_space, self.spec = self.remotes[<NUM_LIT:0>].recv()<EOL>self.viewer = None<EOL>VecEnv.__init__(self, len(env_fns), observation_space, action_space)<EOL>", "docstring": "Arguments:\n\nenv_fns: iterable of callables -  functions that create environments to run in subprocesses. Need to be cloud-pickleable", "id": "f1344:c0:m0"}
{"signature": "def close_extras(self):", "body": "pass<EOL>", "docstring": "Clean up the  extra resources, beyond what's in this base class.\nOnly runs when not self.closed.", "id": "f1352:c2:m4"}
{"signature": "def get_images(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Return RGB images from each environment", "id": "f1352:c2:m8"}
{"signature": "def step(self, actions):", "body": "self.step_async(actions)<EOL>return self.step_wait()<EOL>", "docstring": "Step the environments synchronously.\n\nThis is available for backwards compatibility.", "id": "f1352:c2:m6"}
{"signature": "def encode_observation(ob_space, placeholder):", "body": "if isinstance(ob_space, Discrete):<EOL><INDENT>return tf.to_float(tf.one_hot(placeholder, ob_space.n))<EOL><DEDENT>elif isinstance(ob_space, Box):<EOL><INDENT>return tf.to_float(placeholder)<EOL><DEDENT>elif isinstance(ob_space, MultiDiscrete):<EOL><INDENT>placeholder = tf.cast(placeholder, tf.int32)<EOL>one_hots = [tf.to_float(tf.one_hot(placeholder[..., i], ob_space.nvec[i])) for i in range(placeholder.shape[-<NUM_LIT:1>])]<EOL>return tf.concat(one_hots, axis=-<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError<EOL><DEDENT>", "docstring": "Encode input in the way that is appropriate to the observation space\n\nParameters:\n----------\n\nob_space: gym.Space             observation space\n\nplaceholder: tf.placeholder     observation input placeholder", "id": "f1356:m2"}
{"signature": "def observation_input(ob_space, batch_size=None, name='<STR_LIT>'):", "body": "placeholder = observation_placeholder(ob_space, batch_size, name)<EOL>return placeholder, encode_observation(ob_space, placeholder)<EOL>", "docstring": "Create placeholder to feed observations into of the size appropriate to the observation space, and add input\nencoder of the appropriate type.", "id": "f1356:m1"}
{"signature": "def min(self, start=<NUM_LIT:0>, end=None):", "body": "return super(MinSegmentTree, self).reduce(start, end)<EOL>", "docstring": "Returns min(arr[start], ...,  arr[end])", "id": "f1357:c2:m1"}
{"signature": "def adjust_shape(placeholder, data):", "body": "if not isinstance(data, np.ndarray) and not isinstance(data, list):<EOL><INDENT>return data<EOL><DEDENT>if isinstance(data, list):<EOL><INDENT>data = np.array(data)<EOL><DEDENT>placeholder_shape = [x or -<NUM_LIT:1> for x in placeholder.shape.as_list()]<EOL>assert _check_shape(placeholder_shape, data.shape),'<STR_LIT>'.format(data.shape, placeholder_shape)<EOL>return np.reshape(data, placeholder_shape)<EOL>", "docstring": "adjust shape of the data to the shape of the placeholder if possible.\nIf shape is incompatible, AssertionError is thrown\n\nParameters:\n    placeholder     tensorflow input placeholder\n\n    data            input data to be (potentially) reshaped to be fed into placeholder\n\nReturns:\n    reshaped data", "id": "f1363:m24"}
{"signature": "def launch_tensorboard_in_background(log_dir):", "body": "import subprocess<EOL>subprocess.Popen(['<STR_LIT>', '<STR_LIT>', log_dir])<EOL>", "docstring": "To log the Tensorflow graph when using rl-algs\nalgorithms, you can run the following code\nin your main script:\n    import threading, time\n    def start_tensorboard(session):\n        time.sleep(10) # Wait until graph is setup\n        tb_path = osp.join(logger.get_dir(), 'tb')\n        summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)\n        summary_op = tf.summary.merge_all()\n        launch_tensorboard_in_background(tb_path)\n    session = tf.get_default_session()\n    t = threading.Thread(target=start_tensorboard, args=([session]))\n    t.start()", "id": "f1363:m27"}
{"signature": "def get_session(config=None):", "body": "sess = tf.get_default_session()<EOL>if sess is None:<EOL><INDENT>sess = make_session(config=config, make_default=True)<EOL><DEDENT>return sess<EOL>", "docstring": "Get default session or create one with a given config", "id": "f1363:m3"}
{"signature": "def _check_shape(placeholder_shape, data_shape):", "body": "return True<EOL>squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)<EOL>squeezed_data_shape = _squeeze_shape(data_shape)<EOL>for i, s_data in enumerate(squeezed_data_shape):<EOL><INDENT>s_placeholder = squeezed_placeholder_shape[i]<EOL>if s_placeholder != -<NUM_LIT:1> and s_data != s_placeholder:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)", "id": "f1363:m25"}
{"signature": "def function(inputs, outputs, updates=None, givens=None):", "body": "if isinstance(outputs, list):<EOL><INDENT>return _Function(inputs, outputs, updates, givens=givens)<EOL><DEDENT>elif isinstance(outputs, (dict, collections.OrderedDict)):<EOL><INDENT>f = _Function(inputs, outputs.values(), updates, givens=givens)<EOL>return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))<EOL><DEDENT>else:<EOL><INDENT>f = _Function(inputs, [outputs], updates, givens=givens)<EOL>return lambda *args, **kwargs: f(*args, **kwargs)[<NUM_LIT:0>]<EOL><DEDENT>", "docstring": "Just like Theano function. Take a bunch of tensorflow placeholders and expressions\n    computed based on those placeholders and produces f(inputs) -> outputs. Function f takes\n    values to be fed to the input's placeholders and produces the values of the expressions\n    in outputs.\n\n    Input values can be passed in the same order as inputs or can be provided as kwargs based\n    on placeholder name (passed to constructor or accessible via placeholder.op.name).\n\n    Example:\n        x = tf.placeholder(tf.int32, (), name=\"x\")\n        y = tf.placeholder(tf.int32, (), name=\"y\")\n        z = 3 * x + 2 * y\n        lin = function([x, y], z, givens={y: 0})\n\n        with single_threaded_session():\n            initialize()\n\n            assert lin(2) == 6\n            assert lin(x=3) == 9\n            assert lin(2, 2) == 10\n            assert lin(x=2, y=3) == 12\n\n    Parameters\n    ----------\n    inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]\n        list of input arguments\n    outputs: [tf.Variable] or tf.Variable\n        list of outputs or a single output to be returned from function. Returned\n        value will also have the same shape.\n    updates: [tf.Operation] or tf.Operation\n        list of update functions or single update function that will be run whenever\n        the function is called. The return is ignored.", "id": "f1363:m10"}
{"signature": "def symmetric_ema(xolds, yolds, low=None, high=None, n=<NUM_LIT>, decay_steps=<NUM_LIT:1.>, low_counts_threshold=<NUM_LIT>):", "body": "xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=<NUM_LIT:0>)<EOL>_,  ys2, count_ys2 = one_sided_ema(-xolds[::-<NUM_LIT:1>], yolds[::-<NUM_LIT:1>], -high, -low, n, decay_steps, low_counts_threshold=<NUM_LIT:0>)<EOL>ys2 = ys2[::-<NUM_LIT:1>]<EOL>count_ys2 = count_ys2[::-<NUM_LIT:1>]<EOL>count_ys = count_ys1 + count_ys2<EOL>ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys<EOL>ys[count_ys < low_counts_threshold] = np.nan<EOL>return xs, ys, count_ys<EOL>", "docstring": "perform symmetric EMA (exponential moving average)\nsmoothing and resampling to an even grid with n points.\nDoes not do extrapolation, so we assume\nxolds[0] <= low && high <= xolds[-1]\n\nArguments:\n\nxolds: array or list  - x values of data. Needs to be sorted in ascending order\nyolds: array of list  - y values of data. Has to have the same length as xolds\n\nlow: float            - min value of the new x grid. By default equals to xolds[0]\nhigh: float           - max value of the new x grid. By default equals to xolds[-1]\n\nn: int                - number of points in new x grid\n\ndecay_steps: float    - EMA decay factor, expressed in new x grid steps.\n\nlow_counts_threshold: float or int\n                      - y values with counts less than this value will be set to NaN\n\nReturns:\n    tuple sum_ys, count_ys where\n        xs        - array with new x grid\n        ys        - array of EMA of y at each point of the new x grid\n        count_ys  - array of EMA of y counts at each point of the new x grid", "id": "f1364:m2"}
{"signature": "def one_sided_ema(xolds, yolds, low=None, high=None, n=<NUM_LIT>, decay_steps=<NUM_LIT:1.>, low_counts_threshold=<NUM_LIT>):", "body": "low = xolds[<NUM_LIT:0>] if low is None else low<EOL>high = xolds[-<NUM_LIT:1>] if high is None else high<EOL>assert xolds[<NUM_LIT:0>] <= low, '<STR_LIT>'.format(low, xolds[<NUM_LIT:0>])<EOL>assert xolds[-<NUM_LIT:1>] >= high, '<STR_LIT>'.format(high, xolds[-<NUM_LIT:1>])<EOL>assert len(xolds) == len(yolds), '<STR_LIT>'.format(len(xolds), len(yolds))<EOL>xolds = xolds.astype('<STR_LIT>')<EOL>yolds = yolds.astype('<STR_LIT>')<EOL>luoi = <NUM_LIT:0> <EOL>sum_y = <NUM_LIT:0.><EOL>count_y = <NUM_LIT:0.><EOL>xnews = np.linspace(low, high, n)<EOL>decay_period = (high - low) / (n - <NUM_LIT:1>) * decay_steps<EOL>interstep_decay = np.exp(- <NUM_LIT:1.> / decay_steps)<EOL>sum_ys = np.zeros_like(xnews)<EOL>count_ys = np.zeros_like(xnews)<EOL>for i in range(n):<EOL><INDENT>xnew = xnews[i]<EOL>sum_y *= interstep_decay<EOL>count_y *= interstep_decay<EOL>while True:<EOL><INDENT>xold = xolds[luoi]<EOL>if xold <= xnew:<EOL><INDENT>decay = np.exp(- (xnew - xold) / decay_period)<EOL>sum_y += decay * yolds[luoi]<EOL>count_y += decay<EOL>luoi += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT>if luoi >= len(xolds):<EOL><INDENT>break<EOL><DEDENT><DEDENT>sum_ys[i] = sum_y<EOL>count_ys[i] = count_y<EOL><DEDENT>ys = sum_ys / count_ys<EOL>ys[count_ys < low_counts_threshold] = np.nan<EOL>return xnews, ys, count_ys<EOL>", "docstring": "perform one-sided (causal) EMA (exponential moving average)\nsmoothing and resampling to an even grid with n points.\nDoes not do extrapolation, so we assume\nxolds[0] <= low && high <= xolds[-1]\n\nArguments:\n\nxolds: array or list  - x values of data. Needs to be sorted in ascending order\nyolds: array of list  - y values of data. Has to have the same length as xolds\n\nlow: float            - min value of the new x grid. By default equals to xolds[0]\nhigh: float           - max value of the new x grid. By default equals to xolds[-1]\n\nn: int                - number of points in new x grid\n\ndecay_steps: float    - EMA decay factor, expressed in new x grid steps.\n\nlow_counts_threshold: float or int\n                      - y values with counts less than this value will be set to NaN\n\nReturns:\n    tuple sum_ys, count_ys where\n        xs        - array with new x grid\n        ys        - array of EMA of y at each point of the new x grid\n        count_ys  - array of EMA of y counts at each point of the new x grid", "id": "f1364:m1"}
{"signature": "def plot_results(<EOL>allresults, *,<EOL>xy_fn=default_xy_fn,<EOL>split_fn=default_split_fn,<EOL>group_fn=default_split_fn,<EOL>average_group=False,<EOL>shaded_std=True,<EOL>shaded_err=True,<EOL>figsize=None,<EOL>legend_outside=False,<EOL>resample=<NUM_LIT:0>,<EOL>smooth_step=<NUM_LIT:1.0><EOL>):", "body": "if split_fn is None: split_fn = lambda _ : '<STR_LIT>'<EOL>if group_fn is None: group_fn = lambda _ : '<STR_LIT>'<EOL>sk2r = defaultdict(list) <EOL>for result in allresults:<EOL><INDENT>splitkey = split_fn(result)<EOL>sk2r[splitkey].append(result)<EOL><DEDENT>assert len(sk2r) > <NUM_LIT:0><EOL>assert isinstance(resample, int), \"<STR_LIT>\"<EOL>nrows = len(sk2r)<EOL>ncols = <NUM_LIT:1><EOL>figsize = figsize or (<NUM_LIT:6>, <NUM_LIT:6> * nrows)<EOL>f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)<EOL>groups = list(set(group_fn(result) for result in allresults))<EOL>default_samples = <NUM_LIT><EOL>if average_group:<EOL><INDENT>resample = resample or default_samples<EOL><DEDENT>for (isplit, sk) in enumerate(sorted(sk2r.keys())):<EOL><INDENT>g2l = {}<EOL>g2c = defaultdict(int)<EOL>sresults = sk2r[sk]<EOL>gresults = defaultdict(list)<EOL>ax = axarr[isplit][<NUM_LIT:0>]<EOL>for result in sresults:<EOL><INDENT>group = group_fn(result)<EOL>g2c[group] += <NUM_LIT:1><EOL>x, y = xy_fn(result)<EOL>if x is None: x = np.arange(len(y))<EOL>x, y = map(np.asarray, (x, y))<EOL>if average_group:<EOL><INDENT>gresults[group].append((x,y))<EOL><DEDENT>else:<EOL><INDENT>if resample:<EOL><INDENT>x, y, counts = symmetric_ema(x, y, x[<NUM_LIT:0>], x[-<NUM_LIT:1>], resample, decay_steps=smooth_step)<EOL><DEDENT>l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])<EOL>g2l[group] = l<EOL><DEDENT><DEDENT>if average_group:<EOL><INDENT>for group in sorted(groups):<EOL><INDENT>xys = gresults[group]<EOL>if not any(xys):<EOL><INDENT>continue<EOL><DEDENT>color = COLORS[groups.index(group) % len(COLORS)]<EOL>origxs = [xy[<NUM_LIT:0>] for xy in xys]<EOL>minxlen = min(map(len, origxs))<EOL>def allequal(qs):<EOL><INDENT>return all((q==qs[<NUM_LIT:0>]).all() for q in qs[<NUM_LIT:1>:])<EOL><DEDENT>if resample:<EOL><INDENT>low  = max(x[<NUM_LIT:0>] for x in origxs)<EOL>high = min(x[-<NUM_LIT:1>] for x in origxs)<EOL>usex = np.linspace(low, high, resample)<EOL>ys = []<EOL>for (x, y) in xys:<EOL><INDENT>ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[<NUM_LIT:1>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>assert allequal([x[:minxlen] for x in origxs]),'<STR_LIT>'<EOL>usex = origxs[<NUM_LIT:0>]<EOL>ys = [xy[<NUM_LIT:1>][:minxlen] for xy in xys]<EOL><DEDENT>ymean = np.mean(ys, axis=<NUM_LIT:0>)<EOL>ystd = np.std(ys, axis=<NUM_LIT:0>)<EOL>ystderr = ystd / np.sqrt(len(ys))<EOL>l, = axarr[isplit][<NUM_LIT:0>].plot(usex, ymean, color=color)<EOL>g2l[group] = l<EOL>if shaded_err:<EOL><INDENT>ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=<NUM_LIT>)<EOL><DEDENT>if shaded_std:<EOL><INDENT>ax.fill_between(usex, ymean - ystd,    ymean + ystd,    color=color, alpha=<NUM_LIT>)<EOL><DEDENT><DEDENT><DEDENT>plt.tight_layout()<EOL>if any(g2l.keys()):<EOL><INDENT>ax.legend(<EOL>g2l.values(),<EOL>['<STR_LIT>'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),<EOL>loc=<NUM_LIT:2> if legend_outside else None,<EOL>bbox_to_anchor=(<NUM_LIT:1>,<NUM_LIT:1>) if legend_outside else None)<EOL><DEDENT>ax.set_title(sk)<EOL><DEDENT>return f, axarr<EOL>", "docstring": "Plot multiple Results objects\n\nxy_fn: function Result -> x,y           - function that converts results objects into tuple of x and y values.\n                                          By default, x is cumsum of episode lengths, and y is episode rewards\n\nsplit_fn: function Result -> hashable   - function that converts results objects into keys to split curves into sub-panels by.\n                                          That is, the results r for which split_fn(r) is different will be put on different sub-panels.\n                                          By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are\n                                          stacked vertically in the figure.\n\ngroup_fn: function Result -> hashable   - function that converts results objects into keys to group curves by.\n                                          That is, the results r for which group_fn(r) is the same will be put into the same group.\n                                          Curves in the same group have the same color (if average_group is False), or averaged over\n                                          (if average_group is True). The default value is the same as default value for split_fn\n\naverage_group: bool                     - if True, will average the curves in the same group and plot the mean. Enables resampling\n                                          (if resample = 0, will use 512 steps)\n\nshaded_std: bool                        - if True (default), the shaded region corresponding to standard deviation of the group of curves will be\n                                          shown (only applicable if average_group = True)\n\nshaded_err: bool                        - if True (default), the shaded region corresponding to error in mean estimate of the group of curves\n                                          (that is, standard deviation divided by square root of number of curves) will be\n                                          shown (only applicable if average_group = True)\n\nfigsize: tuple or None                  - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of\n                                          sub-panels.\n\n\nlegend_outside: bool                    - if True, will place the legend outside of the sub-panels.\n\nresample: int                           - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric\n                                          EMA smoothing (see the docstring for symmetric_ema).\n                                          Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default\n                                          value is 512.\n\nsmooth_step: float                      - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).\n                                          See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.", "id": "f1364:m6"}
{"signature": "def cg(f_Ax, b, cg_iters=<NUM_LIT:10>, callback=None, verbose=False, residual_tol=<NUM_LIT>):", "body": "p = b.copy()<EOL>r = b.copy()<EOL>x = np.zeros_like(b)<EOL>rdotr = r.dot(r)<EOL>fmtstr =  \"<STR_LIT>\"<EOL>titlestr =  \"<STR_LIT>\"<EOL>if verbose: print(titlestr % (\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"))<EOL>for i in range(cg_iters):<EOL><INDENT>if callback is not None:<EOL><INDENT>callback(x)<EOL><DEDENT>if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))<EOL>z = f_Ax(p)<EOL>v = rdotr / p.dot(z)<EOL>x += v*p<EOL>r -= v*z<EOL>newrdotr = r.dot(r)<EOL>mu = newrdotr/rdotr<EOL>p = r + mu*p<EOL>rdotr = newrdotr<EOL>if rdotr < residual_tol:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if callback is not None:<EOL><INDENT>callback(x)<EOL><DEDENT>if verbose: print(fmtstr % (i+<NUM_LIT:1>, rdotr, np.linalg.norm(x)))  <EOL>return x<EOL>", "docstring": "Demmel p 312", "id": "f1365:m0"}
{"signature": "def setup_mpi_gpus():", "body": "if '<STR_LIT>' not in os.environ:<EOL><INDENT>if sys.platform == '<STR_LIT>': <EOL><INDENT>ids = []                 <EOL><DEDENT>else:<EOL><INDENT>lrank, _lsize = get_local_rank_size(MPI.COMM_WORLD)<EOL>ids = [lrank]<EOL><DEDENT>os.environ[\"<STR_LIT>\"] = \"<STR_LIT:U+002C>\".join(map(str, ids))<EOL><DEDENT>", "docstring": "Set CUDA_VISIBLE_DEVICES to MPI rank if not already set", "id": "f1366:m2"}
{"signature": "def dict_gather(comm, d, op='<STR_LIT>', assert_all_have_data=True):", "body": "if comm is None: return d<EOL>alldicts = comm.allgather(d)<EOL>size = comm.size<EOL>k2li = defaultdict(list)<EOL>for d in alldicts:<EOL><INDENT>for (k,v) in d.items():<EOL><INDENT>k2li[k].append(v)<EOL><DEDENT><DEDENT>result = {}<EOL>for (k,li) in k2li.items():<EOL><INDENT>if assert_all_have_data:<EOL><INDENT>assert len(li)==size, \"<STR_LIT>\" % (len(li), size, k)<EOL><DEDENT>if op=='<STR_LIT>':<EOL><INDENT>result[k] = np.mean(li, axis=<NUM_LIT:0>)<EOL><DEDENT>elif op=='<STR_LIT>':<EOL><INDENT>result[k] = np.sum(li, axis=<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>assert <NUM_LIT:0>, op<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Perform a reduction operation over dicts", "id": "f1366:m5"}
{"signature": "def sync_from_root(sess, variables, comm=None):", "body": "if comm is None: comm = MPI.COMM_WORLD<EOL>import tensorflow as tf<EOL>values = comm.bcast(sess.run(variables))<EOL>sess.run([tf.assign(var, val)<EOL>for (var, val) in zip(variables, values)])<EOL>", "docstring": "Send the root node's parameters to every worker.\nArguments:\n  sess: the TensorFlow session.\n  variables: all parameter variables including optimizer's", "id": "f1366:m0"}
{"signature": "def get_local_rank_size(comm):", "body": "this_node = platform.node()<EOL>ranks_nodes = comm.allgather((comm.Get_rank(), this_node))<EOL>node2rankssofar = defaultdict(int)<EOL>local_rank = None<EOL>for (rank, node) in ranks_nodes:<EOL><INDENT>if rank == comm.Get_rank():<EOL><INDENT>local_rank = node2rankssofar[node]<EOL><DEDENT>node2rankssofar[node] += <NUM_LIT:1><EOL><DEDENT>assert local_rank is not None<EOL>return local_rank, node2rankssofar[this_node]<EOL>", "docstring": "Returns the rank of each process on its machine\nThe processes on a given machine will be assigned ranks\n    0, 1, 2, ..., N-1,\nwhere N is the number of processes on this machine.\n\nUseful if you want to assign one gpu per machine", "id": "f1366:m3"}
{"signature": "def share_file(comm, path):", "body": "localrank, _ = get_local_rank_size(comm)<EOL>if comm.Get_rank() == <NUM_LIT:0>:<EOL><INDENT>with open(path, '<STR_LIT:rb>') as fh:<EOL><INDENT>data = fh.read()<EOL><DEDENT>comm.bcast(data)<EOL><DEDENT>else:<EOL><INDENT>data = comm.bcast(None)<EOL>if localrank == <NUM_LIT:0>:<EOL><INDENT>os.makedirs(os.path.dirname(path), exist_ok=True)<EOL>with open(path, '<STR_LIT:wb>') as fh:<EOL><INDENT>fh.write(data)<EOL><DEDENT><DEDENT><DEDENT>comm.Barrier()<EOL>", "docstring": "Copies the file from rank 0 to all other ranks\nPuts it in the same place on all machines", "id": "f1366:m4"}
{"signature": "def value(self, ob, *args, **kwargs):", "body": "return self._evaluate(self.vf, ob, *args, **kwargs)<EOL>", "docstring": "Compute value estimate(s) given the observation(s)\n\nParameters:\n----------\n\nobservation     observation data (either single or a batch)\n\n**extra_feed    additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)\n\nReturns:\n-------\nvalue estimate", "id": "f1367:c0:m3"}
{"signature": "def value(self, t):", "body": "return self._v<EOL>", "docstring": "See Schedule.value", "id": "f1370:c1:m1"}
{"signature": "def value(self, t):", "body": "raise NotImplementedError()<EOL>", "docstring": "Value of the schedule at time t", "id": "f1370:c0:m0"}
{"signature": "def __init__(self, value):", "body": "self._v = value<EOL>", "docstring": "Value remains constant over time.\n\n        Parameters\n        ----------\n        value: float\n            Constant value of the schedule", "id": "f1370:c1:m0"}
{"signature": "def tile_images(img_nhwc):", "body": "img_nhwc = np.asarray(img_nhwc)<EOL>N, h, w, c = img_nhwc.shape<EOL>H = int(np.ceil(np.sqrt(N)))<EOL>W = int(np.ceil(float(N)/H))<EOL>img_nhwc = np.array(list(img_nhwc) + [img_nhwc[<NUM_LIT:0>]*<NUM_LIT:0> for _ in range(N, H*W)])<EOL>img_HWhwc = img_nhwc.reshape(H, W, h, w, c)<EOL>img_HhWwc = img_HWhwc.transpose(<NUM_LIT:0>, <NUM_LIT:2>, <NUM_LIT:1>, <NUM_LIT:3>, <NUM_LIT:4>)<EOL>img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)<EOL>return img_Hh_Ww_c<EOL>", "docstring": "Tile N images into one big PxQ image\n(P,Q) are chosen to be as close as possible, and if N\nis square, then P=Q.\n\ninput: img_nhwc, list or array of images, ndim=4 once turned into array\n    n = batch index, h = height, w = width, c = channel\nreturns:\n    bigim_HWc, ndarray with ndim=3", "id": "f1371:m0"}
{"signature": "@register(\"<STR_LIT>\")<EOL>def conv_only(convs=[(<NUM_LIT:32>, <NUM_LIT:8>, <NUM_LIT:4>), (<NUM_LIT:64>, <NUM_LIT:4>, <NUM_LIT:2>), (<NUM_LIT:64>, <NUM_LIT:3>, <NUM_LIT:1>)], **conv_kwargs):", "body": "def network_fn(X):<EOL><INDENT>out = tf.cast(X, tf.float32) / <NUM_LIT><EOL>with tf.variable_scope(\"<STR_LIT>\"):<EOL><INDENT>for num_outputs, kernel_size, stride in convs:<EOL><INDENT>out = layers.convolution2d(out,<EOL>num_outputs=num_outputs,<EOL>kernel_size=kernel_size,<EOL>stride=stride,<EOL>activation_fn=tf.nn.relu,<EOL>**conv_kwargs)<EOL><DEDENT><DEDENT>return out<EOL><DEDENT>return network_fn<EOL>", "docstring": "convolutions-only net\n\nParameters:\n----------\n\nconv:       list of triples (filter_number, filter_size, stride) specifying parameters for each layer.\n\nReturns:\n\nfunction that takes tensorflow tensor as input and returns the output of the last convolutional layer", "id": "f1373:m8"}
{"signature": "@register(\"<STR_LIT>\")<EOL>def lstm(nlstm=<NUM_LIT>, layer_norm=False):", "body": "def network_fn(X, nenv=<NUM_LIT:1>):<EOL><INDENT>nbatch = X.shape[<NUM_LIT:0>]<EOL>nsteps = nbatch // nenv<EOL>h = tf.layers.flatten(X)<EOL>M = tf.placeholder(tf.float32, [nbatch]) <EOL>S = tf.placeholder(tf.float32, [nenv, <NUM_LIT:2>*nlstm]) <EOL>xs = batch_to_seq(h, nenv, nsteps)<EOL>ms = batch_to_seq(M, nenv, nsteps)<EOL>if layer_norm:<EOL><INDENT>h5, snew = utils.lnlstm(xs, ms, S, scope='<STR_LIT>', nh=nlstm)<EOL><DEDENT>else:<EOL><INDENT>h5, snew = utils.lstm(xs, ms, S, scope='<STR_LIT>', nh=nlstm)<EOL><DEDENT>h = seq_to_batch(h5)<EOL>initial_state = np.zeros(S.shape.as_list(), dtype=float)<EOL>return h, {'<STR_LIT:S>':S, '<STR_LIT:M>':M, '<STR_LIT:state>':snew, '<STR_LIT>':initial_state}<EOL><DEDENT>return network_fn<EOL>", "docstring": "Builds LSTM (Long-Short Term Memory) network to be used in a policy.\nNote that the resulting function returns not only the output of the LSTM\n(i.e. hidden state of lstm for each step in the sequence), but also a dictionary\nwith auxiliary tensors to be set as policy attributes.\n\nSpecifically,\n    S is a placeholder to feed current state (LSTM state has to be managed outside policy)\n    M is a placeholder for the mask (used to mask out observations after the end of the episode, but can be used for other purposes too)\n    initial_state is a numpy array containing initial lstm state (usually zeros)\n    state is the output LSTM state (to be fed into S at the next call)\n\n\nAn example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example\n\nParameters:\n----------\n\nnlstm: int          LSTM hidden state size\n\nlayer_norm: bool    if True, layer-normalized version of LSTM is used\n\nReturns:\n-------\n\nfunction that builds LSTM with a given input tensor / placeholder", "id": "f1373:m5"}
{"signature": "def nature_cnn(unscaled_images, **conv_kwargs):", "body": "scaled_images = tf.cast(unscaled_images, tf.float32) / <NUM_LIT><EOL>activ = tf.nn.relu<EOL>h = activ(conv(scaled_images, '<STR_LIT>', nf=<NUM_LIT:32>, rf=<NUM_LIT:8>, stride=<NUM_LIT:4>, init_scale=np.sqrt(<NUM_LIT:2>),<EOL>**conv_kwargs))<EOL>h2 = activ(conv(h, '<STR_LIT>', nf=<NUM_LIT:64>, rf=<NUM_LIT:4>, stride=<NUM_LIT:2>, init_scale=np.sqrt(<NUM_LIT:2>), **conv_kwargs))<EOL>h3 = activ(conv(h2, '<STR_LIT>', nf=<NUM_LIT:64>, rf=<NUM_LIT:3>, stride=<NUM_LIT:1>, init_scale=np.sqrt(<NUM_LIT:2>), **conv_kwargs))<EOL>h3 = conv_to_fc(h3)<EOL>return activ(fc(h3, '<STR_LIT>', nh=<NUM_LIT>, init_scale=np.sqrt(<NUM_LIT:2>)))<EOL>", "docstring": "CNN from Nature paper.", "id": "f1373:m1"}
{"signature": "def get_network_builder(name):", "body": "if callable(name):<EOL><INDENT>return name<EOL><DEDENT>elif name in mapping:<EOL><INDENT>return mapping[name]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(name))<EOL><DEDENT>", "docstring": "If you want to register your own network outside models.py, you just need:\n\nUsage Example:\n-------------\nfrom baselines.common.models import register\n@register(\"your_network_name\")\ndef your_network_define(**net_kwargs):\n    ...\n    return network_fn", "id": "f1373:m10"}
{"signature": "def boolean_flag(parser, name, default=False, help=None):", "body": "dest = name.replace('<STR_LIT:->', '<STR_LIT:_>')<EOL>parser.add_argument(\"<STR_LIT>\" + name, action=\"<STR_LIT:store_true>\", default=default, dest=dest, help=help)<EOL>parser.add_argument(\"<STR_LIT>\" + name, action=\"<STR_LIT>\", dest=dest)<EOL>", "docstring": "Add a boolean flag to argparse parser.\n\n    Parameters\n    ----------\n    parser: argparse.Parser\n        parser to add the flag to\n    name: str\n        --<name> will enable the flag, while --no-<name> will disable it\n    default: bool or None\n        default value of the flag\n    help: str\n        help string for the flag", "id": "f1375:m3"}
{"signature": "def __float__(self):", "body": "return self._value<EOL>", "docstring": "Get the current estimate", "id": "f1375:c1:m2"}
{"signature": "def pretty_eta(seconds_left):", "body": "minutes_left = seconds_left // <NUM_LIT><EOL>seconds_left %= <NUM_LIT><EOL>hours_left = minutes_left // <NUM_LIT><EOL>minutes_left %= <NUM_LIT><EOL>days_left = hours_left // <NUM_LIT><EOL>hours_left %= <NUM_LIT><EOL>def helper(cnt, name):<EOL><INDENT>return \"<STR_LIT>\".format(str(cnt), name, ('<STR_LIT:s>' if cnt > <NUM_LIT:1> else '<STR_LIT>'))<EOL><DEDENT>if days_left > <NUM_LIT:0>:<EOL><INDENT>msg = helper(days_left, '<STR_LIT>')<EOL>if hours_left > <NUM_LIT:0>:<EOL><INDENT>msg += '<STR_LIT>' + helper(hours_left, '<STR_LIT>')<EOL><DEDENT>return msg<EOL><DEDENT>if hours_left > <NUM_LIT:0>:<EOL><INDENT>msg = helper(hours_left, '<STR_LIT>')<EOL>if minutes_left > <NUM_LIT:0>:<EOL><INDENT>msg += '<STR_LIT>' + helper(minutes_left, '<STR_LIT>')<EOL><DEDENT>return msg<EOL><DEDENT>if minutes_left > <NUM_LIT:0>:<EOL><INDENT>return helper(minutes_left, '<STR_LIT>')<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Print the number of seconds in human readable format.\n\n    Examples:\n    2 days\n    2 hours and 37 minutes\n    less than a minute\n\n    Paramters\n    ---------\n    seconds_left: int\n        Number of seconds to be converted to the ETA\n    Returns\n    -------\n    eta: str\n        String representing the pretty ETA.", "id": "f1375:m2"}
{"signature": "def __init__(self, gamma, init_value=None):", "body": "self._value = init_value<EOL>self._gamma = gamma<EOL>", "docstring": "Keep a running estimate of a quantity. This is a bit like mean\n        but more sensitive to recent changes.\n\n        Parameters\n        ----------\n        gamma: float\n            Must be between 0 and 1, where 0 is the most sensitive to recent\n            changes.\n        init_value: float or None\n            Initial value of the estimate. If None, it will be set on the first update.", "id": "f1375:c1:m0"}
{"signature": "def wrap_deepmind_retro(env, scale=True, frame_stack=<NUM_LIT:4>):", "body": "env = WarpFrame(env)<EOL>env = ClipRewardEnv(env)<EOL>if frame_stack > <NUM_LIT:1>:<EOL><INDENT>env = FrameStack(env, frame_stack)<EOL><DEDENT>if scale:<EOL><INDENT>env = ScaledFloatFrame(env)<EOL><DEDENT>return env<EOL>", "docstring": "Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind", "id": "f1376:m1"}
{"signature": "def __init__(self, env, ratio):", "body": "gym.ObservationWrapper.__init__(self, env)<EOL>(oldh, oldw, oldc) = env.observation_space.shape<EOL>newshape = (oldh//ratio, oldw//ratio, oldc)<EOL>self.observation_space = gym.spaces.Box(low=<NUM_LIT:0>, high=<NUM_LIT:255>,<EOL>shape=newshape, dtype=np.uint8)<EOL>", "docstring": "Downsample images by a factor of ratio", "id": "f1376:c2:m0"}
{"signature": "def __init__(self, env):", "body": "gym.ObservationWrapper.__init__(self, env)<EOL>(oldh, oldw, _oldc) = env.observation_space.shape<EOL>self.observation_space = gym.spaces.Box(low=<NUM_LIT:0>, high=<NUM_LIT:255>,<EOL>shape=(oldh, oldw, <NUM_LIT:1>), dtype=np.uint8)<EOL>", "docstring": "Downsample images by a factor of ratio", "id": "f1376:c3:m0"}
{"signature": "def logkv_mean(key, val):", "body": "get_current().logkv_mean(key, val)<EOL>", "docstring": "The same as logkv(), but if called many times, values averaged.", "id": "f1379:m2"}
{"signature": "def logkv(key, val):", "body": "get_current().logkv(key, val)<EOL>", "docstring": "Log a value of some diagnostic\nCall this once for each diagnostic quantity, each iteration\nIf called many times, last value will be used.", "id": "f1379:m1"}
{"signature": "def dumpkvs():", "body": "return get_current().dumpkvs()<EOL>", "docstring": "Write all of the diagnostics from the current iteration", "id": "f1379:m4"}
{"signature": "def get_dir():", "body": "return get_current().get_dir()<EOL>", "docstring": "Get directory that log files are being written to.\nwill be None if there is no output directory (i.e., if you didn't call start)", "id": "f1379:m13"}
{"signature": "def log(*args, level=INFO):", "body": "get_current().log(*args, level=level)<EOL>", "docstring": "Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).", "id": "f1379:m6"}
{"signature": "def profile(n):", "body": "def decorator_with_name(func):<EOL><INDENT>def func_wrapper(*args, **kwargs):<EOL><INDENT>with profile_kv(n):<EOL><INDENT>return func(*args, **kwargs)<EOL><DEDENT><DEDENT>return func_wrapper<EOL><DEDENT>return decorator_with_name<EOL>", "docstring": "Usage:\n@profile(\"my_func\")\ndef my_func(): code", "id": "f1379:m15"}
{"signature": "@staticmethod<EOL><INDENT>def create_datapoint(name, columns, points):<DEDENT>", "body": "return {<EOL>\"<STR_LIT>\": \"<STR_LIT:s>\",<EOL>\"<STR_LIT:name>\": name,<EOL>\"<STR_LIT>\": columns,<EOL>\"<STR_LIT>\": points,<EOL>}<EOL>", "docstring": "Create datastructure in InfluxDB 0.8 data format\n:param name:\n:param columns:\n:param points:\n:return:", "id": "f1381:c0:m1"}
{"signature": "def batches(iterable, n=<NUM_LIT:1>):", "body": "l = len(iterable)<EOL>for ndx in range(<NUM_LIT:0>, l, n):<EOL><INDENT>yield iterable[ndx:min(ndx + n, l)]<EOL><DEDENT>", "docstring": "From http://stackoverflow.com/a/8290508/270334\n:param n:\n:param iterable:", "id": "f1382:m0"}
{"signature": "def __str__(self):", "body": "<EOL>statements = [<EOL>(Keyword.SELECT, self.select_stmt),<EOL>(Keyword.FROM, self.from_stmt)<EOL>]<EOL>if self.where_stmt:<EOL><INDENT>statements.append((Keyword.WHERE, self.where_stmt))<EOL><DEDENT>if self.limit_stmt:<EOL><INDENT>statements.append((Keyword.LIMIT, self.limit_stmt))<EOL><DEDENT>if self.group_by_stmt:<EOL><INDENT>statements.append((Keyword.GROUP_BY, ['<STR_LIT>', self.group_by_stmt]))<EOL><DEDENT>return self._format_statements(statements)<EOL>", "docstring": "Standard string representation of select query", "id": "f1390:c0:m6"}
{"signature": "def handle_error(self, request, client_address):", "body": "cls, e = sys.exc_info()[:<NUM_LIT:2>]<EOL>if cls is socket.error or cls is ssl.SSLError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>return HTTPServer.handle_error(self, request, client_address)<EOL><DEDENT>", "docstring": "Overwrite error handling to suppress socket/ssl related errors\n:param client_address: Address of client\n:param request: Request causing an error", "id": "f1396:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def get_queries(parameters):<DEDENT>", "body": "parsed_params = urlparse.parse_qs(parameters)<EOL>if '<STR_LIT:q>' not in parsed_params:<EOL><INDENT>return []<EOL><DEDENT>queries = parsed_params['<STR_LIT:q>']<EOL>if not isinstance(queries, list):<EOL><INDENT>queries = [queries]<EOL><DEDENT>return queries<EOL>", "docstring": "Get a list of all queries (q=... parameters) from an URL parameter string\n:param parameters: The url parameter list", "id": "f1397:c0:m6"}
{"signature": "def send_error(self, code, message=None):", "body": "message = message.strip()<EOL>self.log_error(\"<STR_LIT>\", code, message)<EOL>self.send_response(code)<EOL>self.send_header(\"<STR_LIT:Content-Type>\", \"<STR_LIT>\")<EOL>self.send_header('<STR_LIT>', '<STR_LIT>')<EOL>self.end_headers()<EOL>if message:<EOL><INDENT>self.wfile.write(message)<EOL><DEDENT>", "docstring": "Send and log plain text error reply.\n:param code:\n:param message:", "id": "f1397:c0:m11"}
{"signature": "def _return_response(self, response):", "body": "self.filter_headers(response.msg)<EOL>if \"<STR_LIT>\" in response.msg:<EOL><INDENT>del response.msg[\"<STR_LIT>\"]<EOL><DEDENT>self.send_response(response.status, response.reason)<EOL>for header_key, header_value in response.msg.items():<EOL><INDENT>self.send_header(header_key, header_value)<EOL><DEDENT>body = response.read()<EOL>self.send_header('<STR_LIT>', str(len(body)))<EOL>self.end_headers()<EOL>self.wfile.write(body)<EOL>", "docstring": ":type result: HTTPResponse", "id": "f1397:c0:m12"}
{"signature": "def naughty_strings(filepath=FILEPATH):", "body": "strings = []<EOL>with open(filepath, '<STR_LIT:r>') as f:<EOL><INDENT>strings = f.readlines()<EOL>strings = [x.strip(u'<STR_LIT:\\n>') for x in strings]<EOL>strings = [x for x in strings if x and not x.startswith(u'<STR_LIT:#>')]<EOL>strings.insert(<NUM_LIT:0>, u\"<STR_LIT>\")<EOL><DEDENT>return strings<EOL>", "docstring": "Get the list of naughty_strings.\n\n    By default this will get the strings from the blns.txt file\n\n    Code is a simple port of what is already in the /scripts directory\n\n    :param filepath: Optional filepath the the blns.txt file\n    :returns: The list of naughty strings", "id": "f1421:m0"}
{"signature": "def flatten(d, parent_key='<STR_LIT>', sep='<STR_LIT:_>'):", "body": "items = []<EOL>for k, v in d.items():<EOL><INDENT>new_key = parent_key + sep + k if parent_key else k<EOL>if isinstance(v, collections.MutableMapping):<EOL><INDENT>items.extend(flatten(v, new_key, sep=sep).items())<EOL><DEDENT>else:<EOL><INDENT>items.append((new_key, v))<EOL><DEDENT><DEDENT>return dict(items)<EOL>", "docstring": "Flatten keys in a dictionary\nExample:\nflatten({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})\n=> {'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}\n:param d:  Dictionary to flatten\n:param sep: Separator between keys\n:param parent_key: Key to merge with", "id": "f1424:m3"}
{"signature": "def load_config():", "body": "<EOL>config = flatten(default_config.DEFAULT_CONFIG)<EOL>cli_config = flatten(parse_args())<EOL>if \"<STR_LIT>\" in cli_config:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(cli_config['<STR_LIT>']))<EOL>configfile = parse_configfile(cli_config['<STR_LIT>'])<EOL>config = overwrite_config(config, configfile)<EOL><DEDENT>config = overwrite_config(config, cli_config)<EOL>if '<STR_LIT>' in config:<EOL><INDENT>if config['<STR_LIT>'] == <NUM_LIT:1>:<EOL><INDENT>logging.getLogger().setLevel(logging.INFO)<EOL><DEDENT>elif config['<STR_LIT>'] > <NUM_LIT:1>:<EOL><INDENT>logging.getLogger().setLevel(logging.DEBUG)<EOL><DEDENT><DEDENT>return ObjectView(config)<EOL>", "docstring": "Load settings from default config and optionally\noverwrite with config file and commandline parameters\n(in that order).", "id": "f1424:m0"}
{"signature": "def parse_configfile(configfile):", "body": "with open(configfile) as f:<EOL><INDENT>try:<EOL><INDENT>return yaml.safe_load(f)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logging.fatal(\"<STR_LIT>\", e)<EOL>exit(-<NUM_LIT:1>)<EOL><DEDENT><DEDENT>", "docstring": "Read settings from file\n:param configfile:", "id": "f1424:m2"}
{"signature": "def check_write_permissions(file):", "body": "try:<EOL><INDENT>open(file, '<STR_LIT:a>')<EOL><DEDENT>except IOError:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(file))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Check if we can write to the given file\n\nOtherwise since we might detach the process to run in the background\nwe might never find out that writing failed and get an ugly\nexit message on startup. For example:\nERROR: Child exited immediately with non-zero exit code 127\n\nSo we catch this error upfront and print a nicer error message\nwith a hint on how to fix it.", "id": "f1428:m1"}
{"signature": "def show_rules():", "body": "from rules.loader import import_rules<EOL>from rules.rule_list import all_rules<EOL>rules = import_rules(all_rules)<EOL>print(\"<STR_LIT>\")<EOL>for name, rule in rules.iteritems():<EOL><INDENT>heading = \"<STR_LIT>\".format(rule.description(), name)<EOL>print(\"<STR_LIT>\".format(heading))<EOL>for line in rule.reason():<EOL><INDENT>print(line)<EOL><DEDENT>print(\"<STR_LIT>\")<EOL><DEDENT>sys.exit(<NUM_LIT:0>)<EOL>", "docstring": "Show the list of available rules and quit\n:return:", "id": "f1428:m2"}
{"signature": "def show_version():", "body": "from version import __version__<EOL>print(\"<STR_LIT>\".format(__package__, __version__))<EOL>sys.exit(<NUM_LIT:0>)<EOL>", "docstring": "Show program version an quit\n:return:", "id": "f1428:m3"}
{"signature": "def __init__(self, rules, whitelist=[], safe_mode=True):", "body": "self.parser = QueryParser()<EOL>self.guard = Guard(rules)<EOL>self.sanitizer = Sanitizer()<EOL>self.whitelist = whitelist<EOL>self.safe_mode = safe_mode<EOL>", "docstring": ":param rules: A list of rules to evaluate\n:param safe_mode: If set to True, allow the query in case it can not be parsed\n:return:", "id": "f1429:c0:m0"}
{"signature": "def check(self, query):", "body": "if query.get_type() in {Keyword.LIST, Keyword.DROP}:<EOL><INDENT>series = query.series_stmt<EOL><DEDENT>else:<EOL><INDENT>series = query.from_stmt<EOL><DEDENT>if len(series) >= self.min_series_name_length:<EOL><INDENT>return Ok(True)<EOL><DEDENT>return Err(\"<STR_LIT>\")<EOL>", "docstring": ":param query:", "id": "f1432:c0:m3"}
{"signature": "def check(self, query):", "body": "if query.get_type() not in {Keyword.SELECT}:<EOL><INDENT>return Ok(True)<EOL><DEDENT>earliest_date = query.get_earliest_date()<EOL>if earliest_date >= self.min_start_date:<EOL><INDENT>return Ok(True)<EOL><DEDENT>if query.limit_stmt:<EOL><INDENT>return Ok(True)<EOL><DEDENT>return Err((\"<STR_LIT>\"<EOL>\"<STR_LIT>\").format(self.min_start_date.strftime(\"<STR_LIT>\"),<EOL>earliest_date))<EOL>", "docstring": ":param query:", "id": "f1433:c0:m3"}
{"signature": "def check(self, query):", "body": "if query.get_type() not in {Keyword.SELECT, Keyword.DELETE}:<EOL><INDENT>return Ok(True)<EOL><DEDENT>datapoints = query.get_datapoints()<EOL>if datapoints <= self.max_datapoints:<EOL><INDENT>return Ok(True)<EOL><DEDENT>return Err((\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\").format(datapoints))<EOL>", "docstring": ":param query:", "id": "f1434:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def reason():<DEDENT>", "body": "pass<EOL>", "docstring": "When and why the rule is useful.\n\nThis should return a list of lines instead of a long string.\nIt's easier to format line breaks this way.\n\n:return: The reason for the rule", "id": "f1436:c0:m1"}
{"signature": "def check(self, query):", "body": "pass<EOL>", "docstring": "Check if a given query is permitted\n:param query:\n:return: result.Ok() if permitted, result.Err() if not.", "id": "f1436:c0:m2"}
{"signature": "def import_rule(path):", "body": "rule = importlib.import_module(path)<EOL>return rule<EOL>", "docstring": "Load the given rule\n:param path: Import path to rule", "id": "f1439:m1"}
{"signature": "def check(self, query):", "body": "if query.get_type() not in {Keyword.SELECT}:<EOL><INDENT>return Ok(True)<EOL><DEDENT>if query.get_resolution() > <NUM_LIT:0>:<EOL><INDENT>return Ok(True)<EOL><DEDENT>return Err(\"<STR_LIT>\")<EOL>", "docstring": ":param query:", "id": "f1441:c0:m2"}
{"signature": "def create_list_query(self, tokens):", "body": "if not tokens[Keyword.SERIES]:<EOL><INDENT>tokens[Keyword.SERIES] = '<STR_LIT>'<EOL><DEDENT>return ListQuery(self.parse_keyword(Keyword.SERIES, tokens))<EOL>", "docstring": "Parse tokens of list query\n:param tokens: A list of InfluxDB query tokens", "id": "f1443:c0:m10"}
{"signature": "def create_query_object(self, tokens):", "body": "try:<EOL><INDENT>query_type = tokens['<STR_LIT:type>']<EOL>return getattr(self, '<STR_LIT>' % query_type)(tokens)<EOL><DEDENT>except (KeyError, TypeError):<EOL><INDENT>return self.invalid_query(tokens)<EOL><DEDENT>", "docstring": "Analyze query tokens and create an InfluxDBStatement from them\nReturn None on error\n:param tokens: A list of InfluxDB query tokens", "id": "f1443:c0:m8"}
{"signature": "def _parse_datapoints(self, parsed_duration, parsed_resolution, limit):", "body": "return self.datapoints_parser.parse(parsed_duration, parsed_resolution, limit)<EOL>", "docstring": "Parse the number of datapoints of a query.\nThis can be calculated from the given duration and resolution of the query.\nE.g. if the query has a duation of 2*60*60 = 7200 seconds and a resolution of 10 seconds\nthen the number of datapoints would be 7200/10 => 7200 datapoints.\n\n:param parsed_duration:\n:param parsed_resolution:\n:param limit:\n:return:", "id": "f1443:c0:m17"}
{"signature": "def create_drop_query(self, tokens):", "body": "if not tokens[Keyword.SERIES]:<EOL><INDENT>return None<EOL><DEDENT>return DropQuery(self.parse_keyword(Keyword.SERIES, tokens))<EOL>", "docstring": "Parse tokens of drop query\n:param tokens: A list of InfluxDB query tokens", "id": "f1443:c0:m11"}
{"signature": "def _parse_resolution(self, tokens):", "body": "return self.resolution_parser.parse(self.parse_keyword(Keyword.GROUP_BY, tokens))<EOL>", "docstring": "Parse resolution from the GROUP BY statement.\nE.g. GROUP BY time(10s) would mean a 10 second resolution\n:param tokens:\n:return:", "id": "f1443:c0:m15"}
{"signature": "def parse(self, group_by_stmt):", "body": "if not group_by_stmt:<EOL><INDENT>return Resolution.MAX_RESOLUTION<EOL><DEDENT>m = self.GROUP_BY_TIME_PATTERN.match(group_by_stmt)<EOL>if not m:<EOL><INDENT>return None<EOL><DEDENT>value = int(m.group(<NUM_LIT:1>))<EOL>unit = m.group(<NUM_LIT:2>)<EOL>resolution = self.convert_to_seconds(value, unit)<EOL>return max(resolution, Resolution.MAX_RESOLUTION)<EOL>", "docstring": "Extract the data resolution of a query in seconds\nE.g. \"group by time(99s)\" => 99\n\n:param group_by_stmt: A raw InfluxDB group by statement", "id": "f1445:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def get_object(cls, api_token, cert_id):<DEDENT>", "body": "certificate = cls(token=api_token, id=cert_id)<EOL>certificate.load()<EOL>return certificate<EOL>", "docstring": "Class method that will return a Certificate object by its ID.", "id": "f1448:c0:m1"}
{"signature": "def destroy(self):", "body": "return self.get_data(\"<STR_LIT>\" % self.id, type=DELETE)<EOL>", "docstring": "Delete the Certificate", "id": "f1448:c0:m4"}
{"signature": "def load(self):", "body": "data = self.get_data(\"<STR_LIT>\" % self.id)<EOL>certificate = data[\"<STR_LIT>\"]<EOL>for attr in certificate.keys():<EOL><INDENT>setattr(self, attr, certificate[attr])<EOL><DEDENT>return self<EOL>", "docstring": "Load the Certificate object from DigitalOcean.\n\nRequires self.id to be set.", "id": "f1448:c0:m2"}
{"signature": "def assert_url_query_equal(self, url1, url2):", "body": "base1, qlist1 = self.split_url(url1)<EOL>base2, qlist2 = self.split_url(url2)<EOL>self.assertEqual(base1, base2)<EOL>self.assertEqual(qlist1, qlist2)<EOL>", "docstring": "Test if two URL queries are equal\n\n        The key=value pairs after the ? in a URL can occur in any order\n        (especially since dicts in python 3 are not deterministic across runs).\n        The method sorts the key=value pairs and then compares the URLs.", "id": "f1460:c0:m3"}
{"signature": "def create(self):", "body": "<EOL>data = {<EOL>\"<STR_LIT:name>\": self.name,<EOL>\"<STR_LIT>\": self.ip_address,<EOL>}<EOL>domain = self.get_data(\"<STR_LIT>\", type=POST, params=data)<EOL>return domain<EOL>", "docstring": "Create new doamin", "id": "f1464:c0:m5"}
{"signature": "@classmethod<EOL><INDENT>def get_object(cls, api_token, domain_name):<DEDENT>", "body": "domain = cls(token=api_token, name=domain_name)<EOL>domain.load()<EOL>return domain<EOL>", "docstring": "Class method that will return a Domain object by ID.", "id": "f1464:c0:m1"}
{"signature": "def assign(self, droplet_id):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % self.ip,<EOL>type=POST,<EOL>params={\"<STR_LIT:type>\": \"<STR_LIT>\", \"<STR_LIT>\": droplet_id}<EOL>)<EOL>", "docstring": "Assign a FloatingIP to a Droplet.\n\nArgs:\n    droplet_id: int - droplet id", "id": "f1465:c0:m6"}
{"signature": "def unassign(self):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % self.ip,<EOL>type=POST,<EOL>params={\"<STR_LIT:type>\": \"<STR_LIT>\"}<EOL>)<EOL>", "docstring": "Unassign a FloatingIP.", "id": "f1465:c0:m7"}
{"signature": "def create(self, *args, **kwargs):", "body": "data = self.get_data('<STR_LIT>',<EOL>type=POST,<EOL>params={'<STR_LIT>': self.droplet_id})<EOL>if data:<EOL><INDENT>self.ip = data['<STR_LIT>']['<STR_LIT>']<EOL>self.region = data['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>return self<EOL>", "docstring": "Creates a FloatingIP and assigns it to a Droplet.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\n    droplet_id: int - droplet id", "id": "f1465:c0:m3"}
{"signature": "def create(self, *args, **kwargs):", "body": "for attr in kwargs.keys():<EOL><INDENT>setattr(self, attr, kwargs[attr])<EOL><DEDENT>if not self.size_slug and self.size:<EOL><INDENT>self.size_slug = self.size<EOL><DEDENT>ssh_keys_id = Droplet.__get_ssh_keys_id_or_fingerprint(self.ssh_keys,<EOL>self.token,<EOL>self.name)<EOL>data = {<EOL>\"<STR_LIT:name>\": self.name,<EOL>\"<STR_LIT:size>\": self.size_slug,<EOL>\"<STR_LIT:image>\": self.image,<EOL>\"<STR_LIT>\": self.region,<EOL>\"<STR_LIT>\": ssh_keys_id,<EOL>\"<STR_LIT>\": bool(self.backups),<EOL>\"<STR_LIT>\": bool(self.ipv6),<EOL>\"<STR_LIT>\": bool(self.private_networking),<EOL>\"<STR_LIT>\": self.volumes,<EOL>\"<STR_LIT>\": self.tags,<EOL>\"<STR_LIT>\": bool(self.monitoring),<EOL>}<EOL>if self.user_data:<EOL><INDENT>data[\"<STR_LIT>\"] = self.user_data<EOL><DEDENT>data = self.get_data(\"<STR_LIT>\", type=POST, params=data)<EOL>if data:<EOL><INDENT>self.id = data['<STR_LIT>']['<STR_LIT:id>']<EOL>action_id = data['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT:id>']<EOL>self.action_ids = []<EOL>self.action_ids.append(action_id)<EOL><DEDENT>", "docstring": "Create the droplet with object properties.\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.", "id": "f1466:c3:m25"}
{"signature": "def change_kernel(self, kernel, return_dict=True):", "body": "if type(kernel) != Kernel:<EOL><INDENT>raise BadKernelObject(\"<STR_LIT>\")<EOL><DEDENT>return self._perform_action(<EOL>{'<STR_LIT:type>': '<STR_LIT>', '<STR_LIT>': kernel.id},<EOL>return_dict<EOL>)<EOL>", "docstring": "Change the kernel to a new one\n\n        Args:\n            kernel : instance of digitalocean.Kernel.Kernel\n\n        Optional Args:\n            return_dict (bool): Return a dict when True (default),\n                otherwise return an Action.\n\n        Returns dict or Action", "id": "f1466:c3:m23"}
{"signature": "def get_snapshots(self):", "body": "snapshots = list()<EOL>for id in self.snapshot_ids:<EOL><INDENT>snapshot = Image()<EOL>snapshot.id = id<EOL>snapshot.token = self.token<EOL>snapshots.append(snapshot)<EOL><DEDENT>return snapshots<EOL>", "docstring": "This method will return the snapshots/images connected to that\nspecific droplet.", "id": "f1466:c3:m29"}
{"signature": "def get_actions(self):", "body": "answer = self.get_data(\"<STR_LIT>\" % self.id, type=GET)<EOL>actions = []<EOL>for action_dict in answer['<STR_LIT>']:<EOL><INDENT>action = Action(**action_dict)<EOL>action.token = self.token<EOL>action.droplet_id = self.id<EOL>action.load()<EOL>actions.append(action)<EOL><DEDENT>return actions<EOL>", "docstring": "Returns a list of Action objects\nThis actions can be used to check the droplet's status", "id": "f1466:c3:m27"}
{"signature": "def enable_backups(self, return_dict=True):", "body": "return self._perform_action({'<STR_LIT:type>': '<STR_LIT>'}, return_dict)<EOL>", "docstring": "Enable automatic backups\n\nOptional Args:\n    return_dict (bool): Return a dict when True (default),\n        otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m17"}
{"signature": "def _perform_action(self, params, return_dict=True):", "body": "action = self.get_data(<EOL>\"<STR_LIT>\" % self.id,<EOL>type=POST,<EOL>params=params<EOL>)<EOL>if return_dict:<EOL><INDENT>return action<EOL><DEDENT>else:<EOL><INDENT>action = action[u'<STR_LIT:action>']<EOL>return_action = Action(token=self.token)<EOL>for attr in action.keys():<EOL><INDENT>setattr(return_action, attr, action[attr])<EOL><DEDENT>return return_action<EOL><DEDENT>", "docstring": "Perform a droplet action.\n\nArgs:\n    params (dict): parameters of the action\n\nOptional Args:\n    return_dict (bool): Return a dict when True (default),\n        otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m6"}
{"signature": "def resize(self, new_size_slug, return_dict=True, disk=True):", "body": "options = {\"<STR_LIT:type>\": \"<STR_LIT>\", \"<STR_LIT:size>\": new_size_slug}<EOL>if disk: options[\"<STR_LIT>\"] = \"<STR_LIT:true>\"<EOL>return self._perform_action(options, return_dict)<EOL>", "docstring": "Resize the droplet to a new size slug.\n        https://developers.digitalocean.com/documentation/v2/#resize-a-droplet\n\n        Args:\n            new_size_slug (str): name of new size\n\n        Optional Args:\n            return_dict (bool): Return a dict when True (default),\n                                otherwise return an Action.\n            disk (bool): If a permanent resize, with disk changes included.\n\n        Returns dict or Action", "id": "f1466:c3:m13"}
{"signature": "def disable_backups(self, return_dict=True):", "body": "return self._perform_action({'<STR_LIT:type>': '<STR_LIT>'}, return_dict)<EOL>", "docstring": "Disable automatic backups\n\nOptional Args:\n    return_dict (bool): Return a dict when True (default),\n        otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m18"}
{"signature": "def reset_root_password(self, return_dict=True):", "body": "return self._perform_action({'<STR_LIT:type>': '<STR_LIT>'}, return_dict)<EOL>", "docstring": "reset the root password\n\nOptional Args:\n    return_dict (bool): Return a dict when True (default),\n        otherwise return an Action.\n\nReturns dict or Action", "id": "f1466:c3:m12"}
{"signature": "def destroy(self):", "body": "return self.get_data(\"<STR_LIT>\" % self.id, type=DELETE)<EOL>", "docstring": "Destroy the Firewall", "id": "f1468:c5:m9"}
{"signature": "def add_droplets(self, droplet_ids):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % self.id,<EOL>type=POST,<EOL>params={\"<STR_LIT>\": droplet_ids}<EOL>)<EOL>", "docstring": "Add droplets to this Firewall.", "id": "f1468:c5:m5"}
{"signature": "def add_tags(self, tags):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % self.id,<EOL>type=POST,<EOL>params={\"<STR_LIT>\": tags}<EOL>)<EOL>", "docstring": "Add tags to this Firewall.", "id": "f1468:c5:m7"}
{"signature": "def load(self):", "body": "identifier = None<EOL>if self.id:<EOL><INDENT>identifier = self.id<EOL><DEDENT>elif self.fingerprint is not None:<EOL><INDENT>identifier = self.fingerprint<EOL><DEDENT>data = self.get_data(\"<STR_LIT>\" % identifier, type=GET)<EOL>ssh_key = data['<STR_LIT>']<EOL>for attr in ssh_key.keys():<EOL><INDENT>setattr(self, attr, ssh_key[attr])<EOL><DEDENT>self.id = ssh_key['<STR_LIT:id>']<EOL>", "docstring": "Load the SSHKey object from DigitalOcean.\n\nRequires either self.id or self.fingerprint to be set.", "id": "f1469:c0:m2"}
{"signature": "@classmethod<EOL><INDENT>def get_object(cls, api_token, ssh_key_id):<DEDENT>", "body": "ssh_key = cls(token=api_token, id=ssh_key_id)<EOL>ssh_key.load()<EOL>return ssh_key<EOL>", "docstring": "Class method that will return a SSHKey object by ID.", "id": "f1469:c0:m1"}
{"signature": "def destroy(self):", "body": "return self.get_data(\"<STR_LIT>\" % self.id, type=DELETE)<EOL>", "docstring": "Destroy the SSH Key", "id": "f1469:c0:m6"}
{"signature": "def wait(self, update_every_seconds=<NUM_LIT:1>):", "body": "while self.status == u'<STR_LIT>':<EOL><INDENT>sleep(update_every_seconds)<EOL>self.load()<EOL><DEDENT>return self.status == u'<STR_LIT>'<EOL>", "docstring": "Wait until the action is marked as completed or with an error.\nIt will return True in case of success, otherwise False.\n\nOptional Args:\n    update_every_seconds - int : number of seconds to wait before\n        checking if the action is completed.", "id": "f1470:c0:m4"}
{"signature": "def get_images(self, private=False, type=None):", "body": "params = {}<EOL>if private:<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT:true>'<EOL><DEDENT>if type:<EOL><INDENT>params['<STR_LIT:type>'] = type<EOL><DEDENT>data = self.get_data(\"<STR_LIT>\", params=params)<EOL>images = list()<EOL>for jsoned in data['<STR_LIT>']:<EOL><INDENT>image = Image(**jsoned)<EOL>image.token = self.token<EOL>images.append(image)<EOL><DEDENT>return images<EOL>", "docstring": "This function returns a list of Image object.", "id": "f1473:c0:m6"}
{"signature": "def get_droplet_snapshots(self):", "body": "data = self.get_data(\"<STR_LIT>\")<EOL>return [<EOL>Snapshot(token=self.token, **snapshot)<EOL>for snapshot in data['<STR_LIT>']<EOL>]<EOL>", "docstring": "This method returns a list of all Snapshots based on Droplets.", "id": "f1473:c0:m27"}
{"signature": "def get_load_balancer(self, id):", "body": "return LoadBalancer.get_object(api_token=self.token, id=id)<EOL>", "docstring": "Returns a Load Balancer object by its ID.\n\nArgs:\n    id (str): Load Balancer ID", "id": "f1473:c0:m22"}
{"signature": "def get_droplet(self, droplet_id):", "body": "return Droplet.get_object(api_token=self.token, droplet_id=droplet_id)<EOL>", "docstring": "Return a Droplet by its ID.", "id": "f1473:c0:m4"}
{"signature": "def get_all_certificates(self):", "body": "data = self.get_data(\"<STR_LIT>\")<EOL>certificates = list()<EOL>for jsoned in data['<STR_LIT>']:<EOL><INDENT>cert = Certificate(**jsoned)<EOL>cert.token = self.token<EOL>certificates.append(cert)<EOL><DEDENT>return certificates<EOL>", "docstring": "This function returns a list of Certificate objects.", "id": "f1473:c0:m24"}
{"signature": "def get_volume_snapshots(self):", "body": "data = self.get_data(\"<STR_LIT>\")<EOL>return [<EOL>Snapshot(token=self.token, **snapshot)<EOL>for snapshot in data['<STR_LIT>']<EOL>]<EOL>", "docstring": "This method returns a list of all Snapshots based on volumes.", "id": "f1473:c0:m28"}
{"signature": "def get_all_load_balancers(self):", "body": "data = self.get_data(\"<STR_LIT>\")<EOL>load_balancers = list()<EOL>for jsoned in data['<STR_LIT>']:<EOL><INDENT>load_balancer = LoadBalancer(**jsoned)<EOL>load_balancer.token = self.token<EOL>load_balancer.health_check = HealthCheck(**jsoned['<STR_LIT>'])<EOL>load_balancer.sticky_sessions = StickySesions(**jsoned['<STR_LIT>'])<EOL>forwarding_rules = list()<EOL>for rule in jsoned['<STR_LIT>']:<EOL><INDENT>forwarding_rules.append(ForwardingRule(**rule))<EOL><DEDENT>load_balancer.forwarding_rules = forwarding_rules<EOL>load_balancers.append(load_balancer)<EOL><DEDENT>return load_balancers<EOL>", "docstring": "Returns a list of Load Balancer objects.", "id": "f1473:c0:m21"}
{"signature": "def get_ssh_key(self, ssh_key_id):", "body": "return SSHKey.get_object(api_token=self.token, ssh_key_id=ssh_key_id)<EOL>", "docstring": "Return a SSHKey object by its ID.", "id": "f1473:c0:m16"}
{"signature": "def get_certificate(self, id):", "body": "return Certificate.get_object(api_token=self.token, cert_id=id)<EOL>", "docstring": "Returns a Certificate object by its ID.\n\nArgs:\n    id (str): Certificate ID", "id": "f1473:c0:m23"}
{"signature": "def get_distro_images(self):", "body": "images = self.get_images(type='<STR_LIT>')<EOL>return images<EOL>", "docstring": "This function returns a list of Image objects representing\npublic base distribution images.", "id": "f1473:c0:m11"}
{"signature": "def get_app_images(self):", "body": "images = self.get_images(type='<STR_LIT>')<EOL>return images<EOL>", "docstring": "This function returns a list of Image objectobjects representing\npublic DigitalOcean 'One-Click' application images.", "id": "f1473:c0:m12"}
{"signature": "def get_all_snapshots(self):", "body": "data = self.get_data(\"<STR_LIT>\")<EOL>return [<EOL>Snapshot(token=self.token, **snapshot)<EOL>for snapshot in data['<STR_LIT>']<EOL>]<EOL>", "docstring": "This method returns a list of all Snapshots.", "id": "f1473:c0:m26"}
{"signature": "def get_account(self):", "body": "return Account.get_object(api_token=self.token)<EOL>", "docstring": "Returns an Account object.", "id": "f1473:c0:m1"}
{"signature": "def get_global_images(self):", "body": "data = self.get_images()<EOL>images = list()<EOL>for i in data:<EOL><INDENT>if i.public:<EOL><INDENT>i.token = self.token<EOL>images.append(i)<EOL><DEDENT><DEDENT>return images<EOL>", "docstring": "This function returns a list of Image objects representing\npublic DigitalOcean images (e.g. base distribution images\nand 'One-Click' applications).", "id": "f1473:c0:m10"}
{"signature": "def get_floating_ip(self, ip):", "body": "return FloatingIP.get_object(api_token=self.token, ip=ip)<EOL>", "docstring": "Returns a of FloatingIP object by its IP address.", "id": "f1473:c0:m20"}
{"signature": "def get_all_floating_ips(self):", "body": "data = self.get_data(\"<STR_LIT>\")<EOL>floating_ips = list()<EOL>for jsoned in data['<STR_LIT>']:<EOL><INDENT>floating_ip = FloatingIP(**jsoned)<EOL>floating_ip.token = self.token<EOL>floating_ips.append(floating_ip)<EOL><DEDENT>return floating_ips<EOL>", "docstring": "This function returns a list of FloatingIP objects.", "id": "f1473:c0:m19"}
{"signature": "def destroy(self):", "body": "return self.get_data(\"<STR_LIT>\" % self.id, type=DELETE)<EOL>", "docstring": "Destroy the image", "id": "f1474:c0:m3"}
{"signature": "def get_data(self, url, type=GET, params=None):", "body": "if params is None:<EOL><INDENT>params = dict()<EOL><DEDENT>if type is GET:<EOL><INDENT>params.setdefault(\"<STR_LIT>\", <NUM_LIT:200>)<EOL><DEDENT>req = self.__perform_request(url, type, params)<EOL>if req.status_code == <NUM_LIT>:<EOL><INDENT>return True<EOL><DEDENT>if req.status_code == <NUM_LIT>:<EOL><INDENT>raise NotFoundError()<EOL><DEDENT>try:<EOL><INDENT>data = req.json()<EOL><DEDENT>except ValueError as e:<EOL><INDENT>raise JSONReadError(<EOL>'<STR_LIT>' % str(e)<EOL>)<EOL><DEDENT>if not req.ok:<EOL><INDENT>msg = [data[m] for m in (\"<STR_LIT:id>\", \"<STR_LIT:message>\") if m in data][<NUM_LIT:1>]<EOL>raise DataReadError(msg)<EOL><DEDENT>self.__init_ratelimit(req.headers)<EOL>pages = data.get(\"<STR_LIT>\", {}).get(\"<STR_LIT>\", {})<EOL>if pages.get(\"<STR_LIT>\") and \"<STR_LIT>\" not in params:<EOL><INDENT>return self.__deal_with_pagination(url, type, params, data)<EOL><DEDENT>else:<EOL><INDENT>return data<EOL><DEDENT>", "docstring": "This method is a basic implementation of __call_api that checks\nerrors too. In case of success the method will return True or the\ncontent of the response to the request.\n\nPagination is automatically detected and handled accordingly", "id": "f1475:c5:m7"}
{"signature": "def get_timeout(self):", "body": "timeout_str = os.environ.get(REQUEST_TIMEOUT_ENV_VAR)<EOL>if timeout_str:<EOL><INDENT>try:<EOL><INDENT>return float(timeout_str)<EOL><DEDENT>except:<EOL><INDENT>self._log.error('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>timeout_str)<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Checks if any timeout for the requests to DigitalOcean is required.\nTo set a timeout, use the REQUEST_TIMEOUT_ENV_VAR environment\nvariable.", "id": "f1475:c5:m6"}
{"signature": "def create(self):", "body": "params = {'<STR_LIT:name>': self.name,<EOL>'<STR_LIT>': self.region,<EOL>'<STR_LIT:url>': self.url,<EOL>'<STR_LIT>': self.distribution,<EOL>'<STR_LIT:description>': self.description,<EOL>'<STR_LIT>': self.tags}<EOL>data = self.get_data('<STR_LIT>', type=POST, params=params)<EOL>if data:<EOL><INDENT>for attr in data['<STR_LIT:image>'].keys():<EOL><INDENT>setattr(self, attr, data['<STR_LIT:image>'][attr])<EOL><DEDENT><DEDENT>return self<EOL>", "docstring": "Creates a new custom DigitalOcean Image from the Linux virtual machine\nimage located at the provided `url`.", "id": "f1476:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def _is_string(value):<DEDENT>", "body": "if type(value) in [type(u'<STR_LIT>'), type('<STR_LIT>')]:<EOL><INDENT>return True<EOL><DEDENT>elif type(value) in [int, type(<NUM_LIT:2> ** <NUM_LIT:64>)]:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Checks if the value provided is a string (True) or not integer\n(False) or something else (None).", "id": "f1476:c0:m2"}
{"signature": "def transfer(self, new_region_slug):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % self.id,<EOL>type=POST,<EOL>params={\"<STR_LIT:type>\": \"<STR_LIT>\", \"<STR_LIT>\": new_region_slug}<EOL>)<EOL>", "docstring": "Transfer the image", "id": "f1476:c0:m6"}
{"signature": "def rename(self, new_name):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % self.id,<EOL>type=PUT,<EOL>params={\"<STR_LIT:name>\": new_name}<EOL>)<EOL>", "docstring": "Rename an image", "id": "f1476:c0:m7"}
{"signature": "@classmethod<EOL><INDENT>def get_object(cls, api_token, image_id_or_slug):<DEDENT>", "body": "if cls._is_string(image_id_or_slug):<EOL><INDENT>image = cls(token=api_token, slug=image_id_or_slug)<EOL>image.load(use_slug=True)<EOL><DEDENT>else:<EOL><INDENT>image = cls(token=api_token, id=image_id_or_slug)<EOL>image.load()<EOL><DEDENT>return image<EOL>", "docstring": "Class method that will return an Image object by ID or slug.\n\nThis method is used to validate the type of the image. If it is a\nnumber, it will be considered as an Image ID, instead if it is a\nstring, it will considered as slug.", "id": "f1476:c0:m1"}
{"signature": "def detach(self, droplet_id, region):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % self.id,<EOL>type=POST,<EOL>params={\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": droplet_id,<EOL>\"<STR_LIT>\": region}<EOL>)<EOL>", "docstring": "Detach a Volume to a Droplet.\n\nArgs:\n    droplet_id: int - droplet id\n    region: string - slug identifier for the region", "id": "f1478:c0:m7"}
{"signature": "def create(self, *args, **kwargs):", "body": "data = self.get_data('<STR_LIT>',<EOL>type=POST,<EOL>params={'<STR_LIT:name>': self.name,<EOL>'<STR_LIT>': self.region,<EOL>'<STR_LIT>': self.size_gigabytes,<EOL>'<STR_LIT:description>': self.description,<EOL>'<STR_LIT>': self.filesystem_type,<EOL>'<STR_LIT>': self.filesystem_label<EOL>})<EOL>if data:<EOL><INDENT>self.id = data['<STR_LIT>']['<STR_LIT:id>']<EOL>self.created_at = data['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>return self<EOL>", "docstring": "Creates a Block Storage volume\n\nNote: Every argument and parameter given to this method will be\nassigned to the object.\n\nArgs:\n    name: string - a name for the volume\n    region: string - slug identifier for the region\n    size_gigabytes: int - size of the Block Storage volume in GiB\n    filesystem_type: string, optional - name of the filesystem type the\n        volume will be formated with ('ext4' or 'xfs')\n    filesystem_label: string, optional - the label to be applied to the\n        filesystem, only used in conjunction with filesystem_type\n\nOptional Args:\n    description: string - text field to describe a volume", "id": "f1478:c0:m3"}
{"signature": "def destroy(self):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % (self.domain, self.id),<EOL>type=DELETE,<EOL>)<EOL>", "docstring": "Destroy the record", "id": "f1479:c0:m3"}
{"signature": "def create(self):", "body": "input_params = {<EOL>\"<STR_LIT:type>\": self.type,<EOL>\"<STR_LIT:data>\": self.data,<EOL>\"<STR_LIT:name>\": self.name,<EOL>\"<STR_LIT>\": self.priority,<EOL>\"<STR_LIT:port>\": self.port,<EOL>\"<STR_LIT>\": self.ttl,<EOL>\"<STR_LIT>\": self.weight,<EOL>\"<STR_LIT>\": self.flags,<EOL>\"<STR_LIT>\": self.tags<EOL>}<EOL>data = self.get_data(<EOL>\"<STR_LIT>\" % (self.domain),<EOL>type=POST,<EOL>params=input_params,<EOL>)<EOL>if data:<EOL><INDENT>self.id = data['<STR_LIT>']['<STR_LIT:id>']<EOL><DEDENT>", "docstring": "Creates a new record for a domain.\n\nArgs:\n    type (str): The type of the DNS record (e.g. A, CNAME, TXT).\n    name (str): The host name, alias, or service being defined by the\n        record.\n    data (int): Variable data depending on record type.\n    priority (int): The priority for SRV and MX records.\n    port (int): The port for SRV records.\n    ttl (int): The time to live for the record, in seconds.\n    weight (int): The weight for SRV records.\n    flags (int): An unsigned integer between 0-255 used for CAA records.\n    tags (string): The parameter tag for CAA records. Valid values are\n        \"issue\", \"wildissue\", or \"iodef\"", "id": "f1479:c0:m2"}
{"signature": "def save(self):", "body": "data = {<EOL>\"<STR_LIT:type>\": self.type,<EOL>\"<STR_LIT:data>\": self.data,<EOL>\"<STR_LIT:name>\": self.name,<EOL>\"<STR_LIT>\": self.priority,<EOL>\"<STR_LIT:port>\": self.port,<EOL>\"<STR_LIT>\": self.ttl,<EOL>\"<STR_LIT>\": self.weight,<EOL>\"<STR_LIT>\": self.flags,<EOL>\"<STR_LIT>\": self.tags<EOL>}<EOL>return self.get_data(<EOL>\"<STR_LIT>\" % (self.domain, self.id),<EOL>type=PUT,<EOL>params=data<EOL>)<EOL>", "docstring": "Save existing record", "id": "f1479:c0:m4"}
{"signature": "def load(self):", "body": "tags = self.get_data(\"<STR_LIT>\" % self.name)<EOL>tag = tags['<STR_LIT>']<EOL>for attr in tag.keys():<EOL><INDENT>setattr(self, attr, tag[attr])<EOL><DEDENT>return self<EOL>", "docstring": "Fetch data about tag", "id": "f1481:c0:m2"}
{"signature": "def __extract_resources_from_droplets(self, data):", "body": "resources = []<EOL>if not isinstance(data, list): return data<EOL>for a_droplet in data:<EOL><INDENT>res = {}<EOL>try:<EOL><INDENT>if isinstance(a_droplet, unicode):<EOL><INDENT>res = {\"<STR_LIT>\": a_droplet, \"<STR_LIT>\": \"<STR_LIT>\"}<EOL><DEDENT><DEDENT>except NameError:<EOL><INDENT>pass<EOL><DEDENT>if isinstance(a_droplet, str) or isinstance(a_droplet, int):<EOL><INDENT>res = {\"<STR_LIT>\": str(a_droplet), \"<STR_LIT>\": \"<STR_LIT>\"}<EOL><DEDENT>elif isinstance(a_droplet, Droplet):<EOL><INDENT>res = {\"<STR_LIT>\": str(a_droplet.id), \"<STR_LIT>\": \"<STR_LIT>\"}<EOL><DEDENT>if len(res) > <NUM_LIT:0>:<EOL><INDENT>resources.append(res)<EOL><DEDENT><DEDENT>return resources<EOL>", "docstring": "Private method to extract from a value, the resources.\nIt will check the type of object in the array provided and build\nthe right structure for the API.", "id": "f1481:c0:m8"}
{"signature": "def __remove_resources(self, resources):", "body": "return self.__get_resources(resources, method='<STR_LIT>')<EOL>", "docstring": "Remove resources from this tag.\n\nAttributes accepted at creation time:\n    resources: array - See API.", "id": "f1481:c0:m7"}
{"signature": "def __add_resources(self, resources):", "body": "return self.__get_resources(resources, method='<STR_LIT:POST>')<EOL>", "docstring": "Add to the resources to this tag.\n\nAttributes accepted at creation time:\n    resources: array - See API.", "id": "f1481:c0:m6"}
{"signature": "def remove_droplets(self, droplet):", "body": "droplets = droplet<EOL>if not isinstance(droplets, list):<EOL><INDENT>droplets = [droplet]<EOL><DEDENT>resources = self.__extract_resources_from_droplets(droplets)<EOL>if len(resources) > <NUM_LIT:0>:<EOL><INDENT>return self.__remove_resources(resources)<EOL><DEDENT>return False<EOL>", "docstring": "Remove the Tag from the Droplet.\n\nAttributes accepted at creation time:\n    droplet: array of string or array of int, or array of Droplets.", "id": "f1481:c0:m10"}
{"signature": "def add_droplets(self, droplet):", "body": "droplets = droplet<EOL>if not isinstance(droplets, list):<EOL><INDENT>droplets = [droplet]<EOL><DEDENT>resources = self.__extract_resources_from_droplets(droplets)<EOL>if len(resources) > <NUM_LIT:0>:<EOL><INDENT>return self.__add_resources(resources)<EOL><DEDENT>return False<EOL>", "docstring": "Add the Tag to a Droplet.\n\nAttributes accepted at creation time:\n    droplet: array of string or array of int, or array of Droplets.", "id": "f1481:c0:m9"}
{"signature": "def create(self, **kwargs):", "body": "for attr in kwargs.keys():<EOL><INDENT>setattr(self, attr, kwargs[attr])<EOL><DEDENT>params = {\"<STR_LIT:name>\": self.name}<EOL>output = self.get_data(\"<STR_LIT>\", type=\"<STR_LIT:POST>\", params=params)<EOL>if output:<EOL><INDENT>self.name = output['<STR_LIT>']['<STR_LIT:name>']<EOL>self.resources = output['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>", "docstring": "Create the tag.", "id": "f1481:c0:m3"}
{"signature": "def remove_droplets(self, droplet_ids):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % self.id,<EOL>type=DELETE,<EOL>params={\"<STR_LIT>\": droplet_ids}<EOL>)<EOL>", "docstring": "Unassign a LoadBalancer.\n\nArgs:\n    droplet_ids (obj:`list` of `int`): A list of Droplet IDs", "id": "f1482:c3:m7"}
{"signature": "def add_droplets(self, droplet_ids):", "body": "return self.get_data(<EOL>\"<STR_LIT>\" % self.id,<EOL>type=POST,<EOL>params={\"<STR_LIT>\": droplet_ids}<EOL>)<EOL>", "docstring": "Assign a LoadBalancer to a Droplet.\n\nArgs:\n    droplet_ids (obj:`list` of `int`): A list of Droplet IDs", "id": "f1482:c3:m6"}
{"signature": "def destroy(self):", "body": "return self.get_data('<STR_LIT>' % self.id, type=DELETE)<EOL>", "docstring": "Destroy the LoadBalancer", "id": "f1482:c3:m5"}
{"signature": "def add_forwarding_rules(self, forwarding_rules):", "body": "rules_dict = [rule.__dict__ for rule in forwarding_rules]<EOL>return self.get_data(<EOL>\"<STR_LIT>\" % self.id,<EOL>type=POST,<EOL>params={\"<STR_LIT>\": rules_dict}<EOL>)<EOL>", "docstring": "Adds new forwarding rules to a LoadBalancer.\n\nArgs:\n    forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects", "id": "f1482:c3:m8"}
{"signature": "@classmethod<EOL><INDENT>def get_object(cls, api_token, id):<DEDENT>", "body": "load_balancer = cls(token=api_token, id=id)<EOL>load_balancer.load()<EOL>return load_balancer<EOL>", "docstring": "Class method that will return a LoadBalancer object by its ID.\n\nArgs:\n    api_token (str): DigitalOcean API token\n    id (str): Load Balancer ID", "id": "f1482:c3:m1"}
{"signature": "def parse_type_comment(type_comment):", "body": "try:<EOL><INDENT>result = ast3.parse(type_comment, '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>except SyntaxError:<EOL><INDENT>raise ValueError(f\"<STR_LIT>\") from None<EOL><DEDENT>assert isinstance(result, ast3.Expression)<EOL>return result.body<EOL>", "docstring": "Parse a type comment string into AST nodes.", "id": "f1486:m50"}
{"signature": "def parse_signature_type_comment(type_comment):", "body": "try:<EOL><INDENT>result = ast3.parse(type_comment, '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>except SyntaxError:<EOL><INDENT>raise ValueError(f\"<STR_LIT>\")<EOL><DEDENT>assert isinstance(result, ast3.FunctionType)<EOL>if len(result.argtypes) == <NUM_LIT:1>:<EOL><INDENT>argtypes = result.argtypes[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>argtypes = result.argtypes<EOL><DEDENT>return argtypes, result.returns<EOL>", "docstring": "Parse the fugly signature type comment into AST nodes.\n\n    Caveats: ASTifying **kwargs is impossible with the current grammar so we\n    hack it into unary subtraction (to differentiate from Starred in vararg).\n\n    For example from:\n    \"(str, int, *int, **Any) -> 'SomeReturnType'\"\n\n    To:\n    ([ast3.Name, ast.Name, ast3.Name, ast.Name], ast3.Str)", "id": "f1486:m49"}
{"signature": "def new(n, prefix=None):", "body": "if isinstance(n, Leaf):<EOL><INDENT>return Leaf(n.type, n.value, prefix=n.prefix if prefix is None else prefix)<EOL><DEDENT>n.parent = None<EOL>if prefix is not None:<EOL><INDENT>n.prefix = prefix<EOL><DEDENT>return n<EOL>", "docstring": "lib2to3's AST requires unique objects as children.", "id": "f1486:m70"}
{"signature": "def retype_path(<EOL>src, pyi_dir, targets, *, src_explicitly_given=False, quiet=False, hg=False<EOL>):", "body": "if src.is_dir():<EOL><INDENT>for child in src.iterdir():<EOL><INDENT>if child == pyi_dir or child == targets:<EOL><INDENT>continue<EOL><DEDENT>yield from retype_path(<EOL>child, pyi_dir / src.name, targets / src.name, quiet=quiet, hg=hg,<EOL>)<EOL><DEDENT><DEDENT>elif src.suffix == '<STR_LIT>' or src_explicitly_given:<EOL><INDENT>try:<EOL><INDENT>retype_file(src, pyi_dir, targets, quiet=quiet, hg=hg)<EOL><DEDENT>except Exception as e:<EOL><INDENT>yield (<EOL>src,<EOL>str(e),<EOL>type(e),<EOL>traceback.format_tb(e.__traceback__),<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Recursively retype files or directories given. Generate errors.", "id": "f1486:m1"}
{"signature": "@singledispatch<EOL>def convert_annotation(ann):", "body": "raise NotImplementedError(f\"<STR_LIT>\")<EOL>", "docstring": "Converts an AST object into its lib2to3 equivalent.", "id": "f1486:m18"}
{"signature": "def copy_type_comments_to_annotations(args):", "body": "for arg in args.args:<EOL><INDENT>copy_type_comment_to_annotation(arg)<EOL><DEDENT>if args.vararg:<EOL><INDENT>copy_type_comment_to_annotation(args.vararg)<EOL><DEDENT>for arg in args.kwonlyargs:<EOL><INDENT>copy_type_comment_to_annotation(arg)<EOL><DEDENT>if args.kwarg:<EOL><INDENT>copy_type_comment_to_annotation(args.kwarg)<EOL><DEDENT>", "docstring": "Copies argument type comments from the legacy long form to annotations\n    in the entire function signature.", "id": "f1486:m53"}
{"signature": "def copy_arguments_to_annotations(args, type_comment, *, is_method=False):", "body": "if isinstance(type_comment, ast3.Ellipsis):<EOL><INDENT>return<EOL><DEDENT>expected = len(args.args)<EOL>if args.vararg:<EOL><INDENT>expected += <NUM_LIT:1><EOL><DEDENT>expected += len(args.kwonlyargs)<EOL>if args.kwarg:<EOL><INDENT>expected += <NUM_LIT:1><EOL><DEDENT>actual = len(type_comment) if isinstance(type_comment, list) else <NUM_LIT:1><EOL>if expected != actual:<EOL><INDENT>if is_method and expected - actual == <NUM_LIT:1>:<EOL><INDENT>pass  <EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>f\"<STR_LIT>\" +<EOL>f\"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>if isinstance(type_comment, list):<EOL><INDENT>next_value = type_comment.pop<EOL><DEDENT>else:<EOL><INDENT>_tc = type_comment<EOL>def next_value(index: int = <NUM_LIT:0>) -> ast3.expr:<EOL><INDENT>return _tc<EOL><DEDENT><DEDENT>for arg in args.args[expected - actual:]:<EOL><INDENT>ensure_no_annotation(arg.annotation)<EOL>arg.annotation = next_value(<NUM_LIT:0>)<EOL><DEDENT>if args.vararg:<EOL><INDENT>ensure_no_annotation(args.vararg.annotation)<EOL>args.vararg.annotation = next_value(<NUM_LIT:0>)<EOL><DEDENT>for arg in args.kwonlyargs:<EOL><INDENT>ensure_no_annotation(arg.annotation)<EOL>arg.annotation = next_value(<NUM_LIT:0>)<EOL><DEDENT>if args.kwarg:<EOL><INDENT>ensure_no_annotation(args.kwarg.annotation)<EOL>args.kwarg.annotation = next_value(<NUM_LIT:0>)<EOL><DEDENT>", "docstring": "Copies AST nodes from `type_comment` into the ast3.arguments in `args`.\n\n    Does validaation of argument count (allowing for untyped self/cls)\n    and type (vararg and kwarg).", "id": "f1486:m52"}
{"signature": "def fix_remaining_type_comments(node):", "body": "assert node.type == syms.file_input<EOL>last_n = None<EOL>for n in node.post_order():<EOL><INDENT>if last_n is not None:<EOL><INDENT>if n.type == token.NEWLINE and is_assignment(last_n):<EOL><INDENT>fix_variable_annotation_type_comment(n, last_n)<EOL><DEDENT>elif n.type == syms.funcdef and last_n.type == syms.suite:<EOL><INDENT>fix_signature_annotation_type_comment(n, last_n, offset=<NUM_LIT:1>)<EOL><DEDENT>elif n.type == syms.async_funcdef and last_n.type == syms.suite:<EOL><INDENT>fix_signature_annotation_type_comment(n, last_n, offset=<NUM_LIT:2>)<EOL><DEDENT><DEDENT>last_n = n<EOL><DEDENT>", "docstring": "Converts type comments in `node` to proper annotated assignments.", "id": "f1486:m39"}
{"signature": "def retype_file(src, pyi_dir, targets, *, quiet=False, hg=False):", "body": "with tokenize.open(src) as src_buffer:<EOL><INDENT>src_encoding = src_buffer.encoding<EOL>src_node = lib2to3_parse(src_buffer.read())<EOL><DEDENT>try:<EOL><INDENT>with open((pyi_dir / src.name).with_suffix('<STR_LIT>')) as pyi_file:<EOL><INDENT>pyi_txt = pyi_file.read()<EOL><DEDENT><DEDENT>except FileNotFoundError:<EOL><INDENT>if not quiet:<EOL><INDENT>print(<EOL>f'<STR_LIT>',<EOL>file=sys.stderr,<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pyi_ast = ast3.parse(pyi_txt)<EOL>assert isinstance(pyi_ast, ast3.Module)<EOL>reapply_all(pyi_ast.body, src_node)<EOL><DEDENT>fix_remaining_type_comments(src_node)<EOL>targets.mkdir(parents=True, exist_ok=True)<EOL>with open(targets / src.name, '<STR_LIT:w>', encoding=src_encoding) as target_file:<EOL><INDENT>target_file.write(lib2to3_unparse(src_node, hg=hg))<EOL><DEDENT>return targets / src.name<EOL>", "docstring": "Retype `src`, finding types in `pyi_dir`. Save in `targets`.\n\n    The file should remain formatted exactly as it was before, save for:\n    - annotations\n    - additional imports needed to satisfy annotations\n    - additional module-level names needed to satisfy annotations\n\n    Type comments in sources are normalized to type annotations.", "id": "f1486:m2"}
{"signature": "def ensure_annotations_equal(name, expected, actual):", "body": "maybe_replace_any_if_equal(name, expected, actual)<EOL>", "docstring": "Raise ValueError if `expected` isn't equal to `actual`.\n\n    If --replace-any is used, the Any type in `actual` is considered equal.", "id": "f1486:m57"}
{"signature": "def __getitem__(self, slice_obj):", "body": "start, stop, step = normalize_slice(slice_obj, self.index.line_count)<EOL>if isinstance(slice_obj, slice):<EOL><INDENT>if step == <NUM_LIT:1>:<EOL><INDENT>return self._get_lines(start, stop)<EOL><DEDENT>return [self._get_lines(i)[<NUM_LIT:0>] for i in range(start, stop, step)]<EOL><DEDENT>if isinstance(slice_obj, int):<EOL><INDENT>return self._get_lines(start)[<NUM_LIT:0>]<EOL><DEDENT>return None<EOL>", "docstring": "Supports slice operations on the file\n\nFor example:\n\n    with IndexedOpen(filename) as f:\n        print f[6:-2]", "id": "f1492:c0:m4"}
{"signature": "@property<EOL><INDENT>def index_path(self):<DEDENT>", "body": "return Path(str(self.filepath) + \"<STR_LIT>\")<EOL>", "docstring": "the path to the index file", "id": "f1492:c0:m7"}
{"signature": "def create_index(file_path, index_path, index_ratio, index_width):", "body": "i = <NUM_LIT:0><EOL>with file_path.open() as f:<EOL><INDENT>with index_path.open(\"<STR_LIT:wb>\") as idx:<EOL><INDENT>idx.write(index_ratio.to_bytes(<NUM_LIT:1>, byteorder=\"<STR_LIT>\"))<EOL>idx.write(index_width.to_bytes(<NUM_LIT:1>, byteorder=\"<STR_LIT>\"))<EOL>idx.write((<NUM_LIT:0>).to_bytes(<NUM_LIT:32>, byteorder=\"<STR_LIT>\"))  <EOL>idx.write((<NUM_LIT:0>).to_bytes(index_width, byteorder=\"<STR_LIT>\"))<EOL>while f.readline():<EOL><INDENT>i += <NUM_LIT:1><EOL>if (i % index_ratio) == <NUM_LIT:0>:<EOL><INDENT>pointer = f.tell()<EOL>b = pointer.to_bytes(index_width, byteorder=\"<STR_LIT>\")<EOL>idx.write(b)<EOL><DEDENT><DEDENT>idx.seek(<NUM_LIT:2>)<EOL>idx.write(i.to_bytes(<NUM_LIT:32>, byteorder=\"<STR_LIT>\"))<EOL><DEDENT>t = file_path.stat().st_mtime<EOL>os.utime(str(index_path), (t, t))<EOL><DEDENT>", "docstring": "Index format:\n    1st byte: index_ratio\n    2nd byte: index_width\n    3rd byte: line_count", "id": "f1492:m0"}
{"signature": "def normalize_slice(slice_obj, length):", "body": "if isinstance(slice_obj, slice):<EOL><INDENT>start, stop, step = slice_obj.start, slice_obj.stop, slice_obj.step<EOL>if start is None:<EOL><INDENT>start = <NUM_LIT:0><EOL><DEDENT>if stop is None:<EOL><INDENT>stop = length<EOL><DEDENT>if step is None:<EOL><INDENT>step = <NUM_LIT:1><EOL><DEDENT>if start < <NUM_LIT:0>:<EOL><INDENT>start += length<EOL><DEDENT>if stop < <NUM_LIT:0>:<EOL><INDENT>stop += length<EOL><DEDENT><DEDENT>elif isinstance(slice_obj, int):<EOL><INDENT>start = slice_obj<EOL>if start < <NUM_LIT:0>:<EOL><INDENT>start += length<EOL><DEDENT>stop = start + <NUM_LIT:1><EOL>step = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>raise TypeError<EOL><DEDENT>if (<NUM_LIT:0> <= start <= length) and (<NUM_LIT:0> <= stop <= length):<EOL><INDENT>return start, stop, step<EOL><DEDENT>raise IndexError<EOL>", "docstring": "Given a slice object, return appropriate values for use in the range function\n\n:param slice_obj: The slice object or integer provided in the `[]` notation\n:param length: For negative indexing we need to know the max length of the object.", "id": "f1494:m0"}
{"signature": "def setUp(self):", "body": "global engine<EOL>engine = create_engine('<STR_LIT>', echo=False)<EOL>global Session<EOL>Session = sessionmaker(bind=engine)<EOL>global session<EOL>session = Session()<EOL>session._model_changes = {}<EOL>Base.metadata.create_all(bind=engine)<EOL>session.add_all([<EOL>User(name='<STR_LIT>', lastname='<STR_LIT>', uid='<STR_LIT>', city_id=<NUM_LIT:1>),<EOL>User(name='<STR_LIT>', lastname='<STR_LIT>', uid='<STR_LIT>', city_id=<NUM_LIT:2>),<EOL>User(name='<STR_LIT>', lastname='<STR_LIT>', uid='<STR_LIT>', city_id=<NUM_LIT:1>),<EOL>City(name='<STR_LIT>'),<EOL>City(name='<STR_LIT>')<EOL>])<EOL>session.commit()<EOL>", "docstring": "Initial setup for the test", "id": "f1495:c2:m0"}
{"signature": "def elastic_query(model, query, session=None, enabled_fields=None):", "body": "<EOL>instance = ElasticQuery(model, query, session, enabled_fields)<EOL>return instance.search()<EOL>", "docstring": "Public method for init the class ElasticQuery\n        :model: SQLAlchemy model\n        :query: valid string like a ElasticSearch\n        :session: SQLAlchemy session *optional\n        :enabled_fields: Fields allowed for make a query *optional", "id": "f1497:m0"}
{"signature": "def search(self):", "body": "try:<EOL><INDENT>filters = json.loads(self.query)<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>result = self.model_query<EOL>if '<STR_LIT>'in filters.keys():<EOL><INDENT>result = self.parse_filter(filters['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>'in filters.keys():<EOL><INDENT>result = result.order_by(*self.sort(filters['<STR_LIT>']))<EOL><DEDENT>return result<EOL>", "docstring": "This is the most important method", "id": "f1497:c0:m1"}
{"signature": "def create_query(self, attr):", "body": "field = attr[<NUM_LIT:0>]<EOL>operator = attr[<NUM_LIT:1>]<EOL>value = attr[<NUM_LIT:2>]<EOL>model = self.model<EOL>if '<STR_LIT:.>' in field:<EOL><INDENT>field_items = field.split('<STR_LIT:.>')<EOL>field_name = getattr(model, field_items[<NUM_LIT:0>], None)<EOL>class_name = field_name.property.mapper.class_<EOL>new_model = getattr(class_name, field_items[<NUM_LIT:1>])<EOL>return field_name.has(OPERATORS[operator](new_model, value))<EOL><DEDENT>return OPERATORS[operator](getattr(model, field, None), value)<EOL>", "docstring": "Mix all values and make the query", "id": "f1497:c0:m6"}
{"signature": "def make_literal(s):", "body": "return partial(s, tri(string), s)<EOL>", "docstring": "returns a literal parser", "id": "f1511:m4"}
{"signature": "def caseless_literal(s):", "body": "return make_caseless_literal(s)()<EOL>", "docstring": "A literal string, case independant.", "id": "f1511:m7"}
{"signature": "def make_caseless_literal(s):", "body": "return partial(s, tri(caseless_string), s)<EOL>", "docstring": "returns a literal string, case independant parser.", "id": "f1511:m6"}
{"signature": "def literal(s):", "body": "return make_literal(s)()<EOL>", "docstring": "A literal string.", "id": "f1511:m5"}
{"signature": "def quoted(parser=any_token):", "body": "quote_char = quote()<EOL>value, _ = many_until(parser, partial(one_of, quote_char))<EOL>return build_string(value)<EOL>", "docstring": "Parses as much as possible until it encounters a matching closing quote.\n\n    By default matches any_token, but can be provided with a more specific parser if required.\n    Returns a string", "id": "f1511:m3"}
{"signature": "def satisfies(guard):", "body": "i = peek()<EOL>if (i is EndOfFile) or (not guard(i)):<EOL><INDENT>fail([\"<STR_LIT>\" + _fun_to_str(guard) + \"<STR_LIT:>>\"])<EOL><DEDENT>next()<EOL>return i<EOL>", "docstring": "Returns the current token if it satisfies the guard function provided.\n\n    Fails otherwise.\n    This is the a generalisation of one_of.", "id": "f1512:m10"}
{"signature": "def seq(*sequence):", "body": "results = {}<EOL>for p in sequence:<EOL><INDENT>if callable(p): <EOL><INDENT>p()<EOL>continue<EOL><DEDENT>k, v = p<EOL>results[k] = v()<EOL><DEDENT>return results<EOL>", "docstring": "Runs a series of parsers in sequence optionally storing results in a returned dictionary.\n\n    For example:\n    seq(whitespace, ('phone', digits), whitespace, ('name', remaining))", "id": "f1512:m25"}
{"signature": "def optional(parser, default=None):", "body": "return choice(parser, lambda: default)<EOL>", "docstring": "Tries to apply the provided parser, returning default if the parser fails.", "id": "f1512:m11"}
{"signature": "def string(string):", "body": "found = []<EOL>for c in string:<EOL><INDENT>found.append(one_of(c))<EOL><DEDENT>return found<EOL>", "docstring": "Iterates over string, matching input to the items provided.\n\n    The most obvious usage of this is to accept an entire string of characters,\n    However this is function is more general than that. It takes an iterable\n    and for each item, it tries one_of for that set. For example, \n       string(['aA','bB','cC'])\n    will accept 'abc' in either case. \n\n    note, If you wish to match caseless strings as in the example, use \n    picoparse.text.caseless_string.", "id": "f1512:m21"}
{"signature": "def many1(parser):", "body": "return [parser()] + many(parser)<EOL>", "docstring": "Like many, but must consume at least one of parser", "id": "f1512:m14"}
{"signature": "def compose(f, g):", "body": "return lambda *args, **kwargs: f(g(*args, **kwargs))<EOL>", "docstring": "Compose returns a two functions composed as a new function.\n\n    The first is called with the result of the second as its argument. Any arguments \n    are passed to the second.", "id": "f1512:m4"}
{"signature": "def not_followed_by(parser):", "body": "@tri<EOL>def not_followed_by_block():<EOL><INDENT>failed = object()<EOL>result = optional(tri(parser), failed)<EOL>if result != failed:<EOL><INDENT>fail([\"<STR_LIT>\" + _fun_to_str(parser)])<EOL><DEDENT><DEDENT>choice(not_followed_by_block)<EOL>", "docstring": "Succeeds if the given parser cannot consume input", "id": "f1512:m12"}
{"signature": "def allele_expectation(bgen, variant_idx):", "body": "geno = bgen[\"<STR_LIT>\"][variant_idx].compute()<EOL>if geno[\"<STR_LIT>\"]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>nalleles = bgen[\"<STR_LIT>\"].loc[variant_idx, \"<STR_LIT>\"].compute().item()<EOL>genotypes = get_genotypes(geno[\"<STR_LIT>\"], nalleles)<EOL>expec = []<EOL>for i in range(len(genotypes)):<EOL><INDENT>count = asarray(genotypes_to_allele_counts(genotypes[i]), float)<EOL>n = count.shape[<NUM_LIT:0>]<EOL>expec.append((count.T * geno[\"<STR_LIT>\"][i, :n]).sum(<NUM_LIT:1>))<EOL><DEDENT>return stack(expec, axis=<NUM_LIT:0>)<EOL>", "docstring": "r\"\"\" Allele expectation.\n\n    Compute the expectation of each allele from the genotype probabilities.\n\n    Parameters\n    ----------\n    bgen : bgen_file\n        Bgen file handler.\n    variant_idx : int\n        Variant index.\n\n    Returns\n    -------\n    :class:`numpy.ndarray`\n        Samples-by-alleles matrix of allele expectations.\n\n    Note\n    ----\n    This function supports unphased genotypes only.\n\n    Examples\n    --------\n    .. doctest::\n\n        >>> from bgen_reader import allele_expectation, example_files, read_bgen\n        >>>\n        >>> from texttable import Texttable\n        >>>\n        >>> # Download an example.\n        >>> example = example_files(\"example.32bits.bgen\")\n        >>> filepath = example.filepath\n        >>>\n        >>> # Read the example.\n        >>> bgen = read_bgen(filepath, verbose=False)\n        >>>\n        >>> variants = bgen[\"variants\"]\n        >>> samples = bgen[\"samples\"]\n        >>> genotype = bgen[\"genotype\"]\n        >>>\n        >>> genotype = bgen[\"genotype\"]\n        >>> # This `compute` call will return a pandas data frame,\n        >>> variant = variants[variants[\"rsid\"] == \"RSID_6\"].compute()\n        >>> # from which we retrieve the variant index.\n        >>> variant_idx = variant.index.item()\n        >>> print(variant)\n                id    rsid chrom   pos  nalleles allele_ids  vaddr\n        4  SNPID_6  RSID_6    01  6000         2        A,G  19377\n        >>> genotype = bgen[\"genotype\"]\n        >>> # Samples is a pandas series, and we retrieve the\n        >>> # sample index from the sample name.\n        >>> sample_idx = samples[samples == \"sample_005\"].index.item()\n        >>>\n        >>> genotype = bgen[\"genotype\"]\n        >>> # This `compute` call will return a dictionary from which\n        >>> # we can get the probability matrix the corresponding\n        >>> # variant.\n        >>> p = genotype[variant_idx].compute()[\"probs\"][sample_idx]\n        >>>\n        >>> genotype = bgen[\"genotype\"]\n        >>> # Allele expectation makes sense for unphased genotypes only,\n        >>> # which is the case here.\n        >>> e = allele_expectation(bgen, variant_idx)[sample_idx]\n        >>>\n        >>> genotype = bgen[\"genotype\"]\n        >>> alleles = variant[\"allele_ids\"].item().split(\",\")\n        >>>\n        >>> genotype = bgen[\"genotype\"]\n        >>>\n        >>> # Print what we have got in a nice format.\n        >>> table = Texttable()\n        >>> table = table.add_rows(\n        ...     [\n        ...         [\"\", \"AA\", \"AG\", \"GG\", \"E[.]\"],\n        ...         [\"p\"] + list(p) + [\"na\"],\n        ...         [\"#\" + alleles[0], 2, 1, 0, e[0]],\n        ...         [\"#\" + alleles[1], 0, 1, 2, e[1]],\n        ...     ]\n        ... )\n        >>> print(table.draw())\n        +----+-------+-------+-------+-------+\n        |    |  AA   |  AG   |  GG   | E[.]  |\n        +====+=======+=======+=======+=======+\n        | p  | 0.012 | 0.987 | 0.001 | na    |\n        +----+-------+-------+-------+-------+\n        | #A | 2     | 1     | 0     | 1.011 |\n        +----+-------+-------+-------+-------+\n        | #G | 0     | 1     | 2     | 0.989 |\n        +----+-------+-------+-------+-------+\n        >>>\n        >>> # Clean-up.\n        >>> example.close()", "id": "f1519:m2"}
{"signature": "def allele_frequency(expec):", "body": "expec = asarray(expec, float)<EOL>if expec.ndim != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>ploidy = expec.shape[-<NUM_LIT:1>]<EOL>return expec.sum(-<NUM_LIT:2>) / ploidy<EOL>", "docstring": "r\"\"\" Compute allele frequency from its expectation.\n\n    Parameters\n    ----------\n    expec : array_like\n        Allele expectations encoded as a samples-by-alleles matrix.\n\n    Returns\n    -------\n    :class:`numpy.ndarray`\n        Allele frequencies encoded as a variants-by-alleles matrix.\n\n    Examples\n    --------\n    .. doctest::\n\n        >>> from bgen_reader import read_bgen, example_files\n        >>> from bgen_reader import allele_expectation, allele_frequency\n        >>>\n        >>> # Download an example\n        >>> example = example_files(\"example.32bits.bgen\")\n        >>> filepath = example.filepath\n        >>>\n        >>> bgen = read_bgen(filepath, verbose=False)\n        >>>\n        >>> variants = bgen[\"variants\"]\n        >>> samples = bgen[\"samples\"]\n        >>> genotype = bgen[\"genotype\"]\n        >>>\n        >>> variant = variants[variants[\"rsid\"] == \"RSID_6\"].compute()\n        >>> variant_idx = variant.index.item()\n        >>>\n        >>> p = genotype[variant_idx].compute()[\"probs\"]\n        >>> # For unphased genotypes only.\n        >>> e = allele_expectation(bgen, variant_idx)\n        >>> f = allele_frequency(e)\n        >>>\n        >>> alleles = variant[\"allele_ids\"].item().split(\",\")\n        >>> print(alleles[0] + \": {}\".format(f[0]))\n        A: 229.23103218810434\n        >>> print(alleles[1] + \": {}\".format(f[1]))\n        G: 270.7689678118956\n        >>> print(variant)\n                id    rsid chrom   pos  nalleles allele_ids  vaddr\n        4  SNPID_6  RSID_6    01  6000         2        A,G  19377\n        >>>\n        >>> # Clean-up the example\n        >>> example.close()", "id": "f1519:m0"}
{"signature": "def _touch(fname, mode=<NUM_LIT>, dir_fd=None, **kwargs):", "body": "flags = os.O_CREAT | os.O_APPEND<EOL>with os.fdopen(os.open(fname, flags=flags, mode=mode, dir_fd=dir_fd)) as f:<EOL><INDENT>os.utime(<EOL>f.fileno() if os.utime in os.supports_fd else fname,<EOL>dir_fd=None if os.supports_fd else dir_fd,<EOL>**kwargs,<EOL>)<EOL><DEDENT>", "docstring": "Touch a file.\n\n    Credits to <https://stackoverflow.com/a/1160227>.", "id": "f1521:m4"}
{"signature": "def point_distance(point1, point2):", "body": "return ((point1[<NUM_LIT:0>] - point2[<NUM_LIT:0>]) ** <NUM_LIT:2> + (point1[<NUM_LIT:1>] - point2[<NUM_LIT:1>]) ** <NUM_LIT:2>) ** <NUM_LIT:0.5><EOL>", "docstring": "Computes the distance beteen two points on a plane.\n\nArgs:\n    point1: Tuple or list, the x and y coordinate of the first point.\n\n    point2: Tuple or list, the x and y coordinate of the second point.\n\nReturns:\n    The distance between the two points as a floating point number.", "id": "f1541:m8"}
{"signature": "def rectangle_area(width, height):", "body": "return width * height<EOL>", "docstring": "Returns the area of a rectangle with the given width and height.\n\n    Args:\n        width:\n            Integer or float, width of the rectangle.\n\n        height: Integer or float, height of the rectangle.\n\n    Returns:\n        The area of a rectangle as an integer or float.", "id": "f1541:m3"}
{"signature": "def average(numbers, numtype='<STR_LIT:float>'):", "body": "if type == '<STR_LIT>':<EOL><INDENT>return Decimal(sum(numbers)) / len(numbers)<EOL><DEDENT>else:<EOL><INDENT>return float(sum(numbers)) / len(numbers)<EOL><DEDENT>", "docstring": "Calculates the average or mean of a list of numbers\n\nArgs:\n    numbers: a list of integers or floating point numbers.\n\n    numtype: string, 'decimal' or 'float'; the type of number to return.\n\nReturns:\n    The average (mean) of the numbers as a floating point number\n    or a Decimal object.\n\nRequires:\n    The math module", "id": "f1541:m13"}
{"signature": "def miles_to_feet(miles):", "body": "return miles * float(<NUM_LIT>)<EOL>", "docstring": "Converts a number of miles to feet.\n\nArgs:\n    miles: Number of miles we want to convert.\n\nReturns:\n    Floating point number as the number of\n    feet in the given miles.", "id": "f1541:m0"}
{"signature": "def get_percentage(a, b, i=False, r=False):", "body": "<EOL>if i is False and r is True:<EOL><INDENT>percentage = round(<NUM_LIT> * (float(a) / b), <NUM_LIT:2>)<EOL><DEDENT>elif (i is True and r is True) or (i is True and r is False):<EOL><INDENT>percentage = int(round(<NUM_LIT:100> * (float(a) / b)))<EOL>if r is False:<EOL><INDENT>warnings.warn(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>percentage = <NUM_LIT> * (float(a) / b)<EOL><DEDENT>return percentage<EOL>", "docstring": "Finds the percentage of one number over another.\n\nArgs:\n    a: The number that is a percent, int or float.\n\n    b: The base number that a is a percent of, int or float.\n\n    i: Optional boolean integer. True if the user wants the result returned as\n    a whole number. Assumes False.\n\n    r: Optional boolean round. True if the user wants the result rounded.\n    Rounds to the second decimal point on floating point numbers. Assumes False.\n\nReturns:\n    The argument a as a percentage of b. Throws a warning if integer is set to True\n    and round is set to False.", "id": "f1541:m16"}
{"signature": "def future_value(present_value, annual_rate, periods_per_year, years):", "body": "<EOL>rate_per_period = annual_rate / float(periods_per_year)<EOL>periods = periods_per_year * years<EOL>return present_value * (<NUM_LIT:1> + rate_per_period) ** periods<EOL>", "docstring": "Calculates the future value of money invested at an anual interest rate,\nx times per year, for a given number of years.\n\nArgs:\n    present_value: int or float, the current value of the money (principal).\n\n    annual_rate: float 0 to 1 e.g., .5 = 50%), the interest rate paid out.\n\n    periods_per_year: int, the number of times money is invested per year.\n\n    years: int, the number of years invested.\n\nReturns:\n    Float, the future value of the money invested with compound interest.", "id": "f1541:m7"}
{"signature": "def total_seconds(hours, minutes, seconds):", "body": "return (hours * <NUM_LIT> + minutes) * <NUM_LIT> + seconds<EOL>", "docstring": "Returns the number of seconds in the given number of hours,\nminutes, and seconds.\n\nArgs:\n    hours:\n        Integer, number of hours.\n\n    minutes:\n        Integer, number of minutes.\n\n    seconds:\n        Integer, number of seconds.\n\nReturns:\n    Integer, time in seconds.", "id": "f1541:m1"}
{"signature": "def get_full_binary_tree_nodes(height):", "body": "return <NUM_LIT:2> ** (height + <NUM_LIT:1>) - <NUM_LIT:1><EOL>", "docstring": "Calculate the number of internal nodes in a complete binary tree in which each\ninternal node has exactly two children. A full binary tree is complete if every\nleaf in the tree has the same depth. Internal nodes include both leaves and\ninternal nodes. The root node is also included in this calculation.\n\nArgs:\n    height: integer, the height of the tree. Height is defined by the number\n    of edges from the furthest child to the root. An edge is the line segment\n    that runs between and connects nodes.", "id": "f1541:m19"}
{"signature": "def is_leap_year(year):", "body": "if (year % <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>elif (year % <NUM_LIT:100>) == <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>elif (year % <NUM_LIT:4>) == <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Checks to see if a given year is a leap year.\n\nArgs:\n    Integer, the year to test.\n\nReturns:\n    Boolean", "id": "f1541:m10"}
{"signature": "def savings_rate(take_home_pay, spending, numtype='<STR_LIT:float>'):", "body": "if numtype == '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>return (<EOL>(Decimal(take_home_pay) - Decimal(spending)) / (Decimal(take_home_pay))<EOL>) * Decimal(<NUM_LIT>)<EOL><DEDENT>except (InvalidOperation, DivisionByZero):<EOL><INDENT>return Decimal(<NUM_LIT:0.0>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>return (<EOL>(float(take_home_pay) - float(spending)) / (float(take_home_pay))<EOL>) * <NUM_LIT><EOL><DEDENT>except (ZeroDivisionError):<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT><DEDENT>", "docstring": "Calculate net take-home pay including employer retirement savings match\nusing the formula laid out by Mr. Money Mustache:\nhttp://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/\n\nArgs:\n    take_home_pay: float or int, monthly take-home pay\n\n    spending: float or int, monthly spending\n\n    numtype: string, 'decimal' or 'float'; the type of number to return.\n\nReturns:\n    your monthly savings rate expressed as a percentage.", "id": "f1541:m21"}
{"signature": "def get_full_binary_tree_leaves(height):", "body": "return <NUM_LIT:2> ** height<EOL>", "docstring": "Calculate the number of leaves in a complete binary tree in which each internal\nnode has exactly two children. A full binary tree is complete if every leaf\nin the tree has the same depth. A leaf is a node without children\n\nArgs:\n    height: integer, the height of the tree. Height is defined by the number\n    of edges from the furthest child to the root. An edge is the line segment\n    that runs between and connects nodes.", "id": "f1541:m18"}
{"signature": "def get_slope(point1, point2):", "body": "return (float(point2[<NUM_LIT:1>]) - point1[<NUM_LIT:1>]) / (float(point2[<NUM_LIT:0>]) - point1[<NUM_LIT:0>])<EOL>", "docstring": "Calculate the slope of the line connecting two points on a grid.\n\nArgs:\n    point1: Tuple or list, the x and y coordinate of the first point.\n\n    point2: Tuple or list, the x and y coordinate of the second point\n\nReturns:\n    the slope of a line connecting two points on a grid.", "id": "f1541:m17"}
{"signature": "def standard_deviation(variance):", "body": "return variance ** <NUM_LIT:0.5><EOL>", "docstring": "Calculates the standard deviation.\n\nArgs:\n    variance: The variance of a group of numbers.\n\nReturns:\n    The standard deviation as a floating point number.", "id": "f1541:m15"}
{"signature": "def take_home_pay(gross_pay, employer_match, taxes_and_fees, numtype='<STR_LIT:float>'):", "body": "if numtype == '<STR_LIT>':<EOL><INDENT>return (Decimal(gross_pay) + Decimal(employer_match)) - Decimal(<EOL>sum(taxes_and_fees)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>return (float(gross_pay) + float(employer_match)) - sum(taxes_and_fees)<EOL><DEDENT>", "docstring": "Calculate net take-home pay including employer retirement savings match\nusing the formula laid out by Mr. Money Mustache:\nhttp://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/\n\nArgs:\n    gross_pay: float or int, gross monthly pay.\n\n    employer_match: float or int, the 401(k) match from your employer.\n\n    taxes_and_fees: list, taxes and fees that are deducted from your paycheck.\n\n    numtype: string, 'decimal' or 'float'; the type of number to return.\n\nReturns:\n    your monthly take-home pay.", "id": "f1541:m20"}
{"signature": "def triangle_area(point1, point2, point3):", "body": "\"\"\"<STR_LIT>\"\"\"<EOL>a = point_distance(point1, point2)<EOL>b = point_distance(point1, point3)<EOL>c = point_distance(point2, point3)<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>s = (a + b + c) / <NUM_LIT><EOL>\"\"\"<STR_LIT>\"\"\"<EOL>return math.sqrt(s * (s - a) * (s - b) * (s - c))<EOL>", "docstring": "Uses Heron's formula to find the area of a triangle\nbased on the coordinates of three points.\n\nArgs:\n    point1: list or tuple, the x y coordinate of point one.\n\n    point2: list or tuple, the x y coordinate of point two.\n\n    point3: list or tuple, the x y coordinate of point three.\n\nReturns:\n    The area of a triangle as a floating point number.\n\nRequires:\n    The math module, point_distance().", "id": "f1541:m9"}
{"signature": "def compound_interest(principal, annual_rate, years):", "body": "return principal * (<NUM_LIT:1> + <NUM_LIT> * annual_rate) ** years<EOL>", "docstring": "Returns the future value of money invested at an annual\ninterest rate, compounded annually for a given number of years.\n\nArgs:\n    principal: The beginning ammount of money invested\n\n    annual_rate: The interest rate paid out\n\n    years: The number of years invested\n\nReturns:\n    A basic calculation of compound interest.", "id": "f1541:m6"}
{"signature": "def foo(a):", "body": "assert a < <NUM_LIT:0><EOL>", "docstring": "Meaningless...", "id": "f1544:m0"}
{"signature": "def get_built_in(self, language, level, data):", "body": "<EOL>pp = pprint.PrettyPrinter(indent=level)<EOL>lookup = {'<STR_LIT>' : pp.pformat(data),<EOL>'<STR_LIT>' : str(json.dumps(data, sort_keys=True, indent=level, separators=('<STR_LIT:U+002C>', '<STR_LIT>')))}<EOL>self.data_structure = lookup[language]<EOL>", "docstring": "Gets the return string for a language that's supported by python.\nUsed in cases when python provides support for the conversion.\n\nArgs:\n    language: string the langage to return for.\n\n    level: integer, the indentation level.\n\n    data: python data structure being converted (list of tuples)\n\nReturns:\n    None, updates self.data_structure", "id": "f1547:c0:m3"}
{"signature": "def excel_to_html(path, sheetname='<STR_LIT>', css_classes='<STR_LIT>',caption='<STR_LIT>', details=[], row_headers=False, merge=False):", "body": "def get_data_on_merged_cells():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>merged_cells = xls.book.sheet_by_name(sheetname).merged_cells<EOL>ds = {}<EOL>for crange in merged_cells:<EOL><INDENT>rlo, rhi, clo, chi = crange<EOL>for rowx in range(rlo, rhi):<EOL><INDENT>for colx in range(clo, chi):<EOL><INDENT>parent_cell = (rlo,clo)<EOL>child_cell = (rowx,colx)<EOL>if not parent_cell in ds:<EOL><INDENT>ds[parent_cell] = [[<NUM_LIT:1>,<NUM_LIT:1>], set([])]<EOL><DEDENT>else:<EOL><INDENT>if parent_cell != child_cell and child_cell[<NUM_LIT:0>] == parent_cell[<NUM_LIT:0>]:<EOL><INDENT>ds[parent_cell][<NUM_LIT:0>][<NUM_LIT:0>] += <NUM_LIT:1><EOL>ds[parent_cell][<NUM_LIT:1>].add('<STR_LIT:right>')<EOL><DEDENT>elif parent_cell != child_cell and child_cell[<NUM_LIT:0>] > parent_cell[<NUM_LIT:0>]:<EOL><INDENT>if child_cell[<NUM_LIT:1>] == parent_cell[<NUM_LIT:1>]:<EOL><INDENT>ds[parent_cell][<NUM_LIT:0>][<NUM_LIT:1>] += <NUM_LIT:1><EOL><DEDENT>ds[parent_cell][<NUM_LIT:1>].add('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return ds<EOL><DEDENT>def mark_cells_going_right(cell, curr_cell, merged_cells):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>try:<EOL><INDENT>xcount = merged_cells[curr_cell][<NUM_LIT:0>][<NUM_LIT:0>]<EOL>if xcount > <NUM_LIT:1>: <EOL><INDENT>cell['<STR_LIT>'] = xcount<EOL><DEDENT>col_count = xcount - <NUM_LIT:1> <EOL>while col_count > <NUM_LIT:0>:<EOL><INDENT>cell = cell.find_next_sibling()<EOL>cell['<STR_LIT:class>'] = '<STR_LIT>'<EOL>col_count -= <NUM_LIT:1><EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>def mark_cells_going_down(cell, curr_cell, merged_cells):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if curr_cell in merged_cells and merged_cells[curr_cell][<NUM_LIT:1>] == set(['<STR_LIT>']):<EOL><INDENT>ycount = merged_cells[curr_cell][<NUM_LIT:0>][<NUM_LIT:1>]<EOL>cell['<STR_LIT>'] = ycount<EOL>row_count = ycount<EOL>for child_row in cell.parent.find_next_siblings(limit=row_count - <NUM_LIT:1>):<EOL><INDENT>i = <NUM_LIT:0><EOL>for child in child_row.find_all('<STR_LIT>'):<EOL><INDENT>if i == curr_cell[<NUM_LIT:1>]:<EOL><INDENT>child['<STR_LIT:class>'] = '<STR_LIT>'<EOL><DEDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>def mark_cells_going_down_and_right(cell, curr_cell, merged_cells):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if curr_cell in merged_cells and('<STR_LIT>' in merged_cells[curr_cell][<NUM_LIT:1>] and'<STR_LIT:right>' in merged_cells[curr_cell][<NUM_LIT:1>]):<EOL><INDENT>xcount = merged_cells[curr_cell][<NUM_LIT:0>][<NUM_LIT:0>]<EOL>ycount = merged_cells[curr_cell][<NUM_LIT:0>][<NUM_LIT:1>]<EOL>row_count = ycount<EOL>col_count = xcount<EOL>mark_cells_going_right(cell, curr_cell, merged_cells)<EOL>flag = False<EOL>for child_row in [cell.parent] + cell.parent.find_all_next('<STR_LIT>', limit=row_count - <NUM_LIT:1>):<EOL><INDENT>i = <NUM_LIT:0><EOL>for child in child_row.find_all('<STR_LIT>'):<EOL><INDENT>if i == curr_cell[<NUM_LIT:1>]:<EOL><INDENT>mark_cells_going_right(child, curr_cell, merged_cells)<EOL>if not flag:<EOL><INDENT>child['<STR_LIT>'] = col_count<EOL>child['<STR_LIT>'] = row_count<EOL>flag = True<EOL><DEDENT>else:<EOL><INDENT>child['<STR_LIT:class>'] = '<STR_LIT>'<EOL><DEDENT><DEDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>def is_empty_th(string):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if string[:<NUM_LIT:8>] == '<STR_LIT>':<EOL><INDENT>data = string.split('<STR_LIT:U+0020>')<EOL>if is_numeric(data[<NUM_LIT:1>]):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL><DEDENT>def mark_header_cells(html):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>th = html.find_all('<STR_LIT>')<EOL>for header in th:<EOL><INDENT>txt = header.string<EOL>if not is_empty_th(txt):<EOL><INDENT>header['<STR_LIT>'] = '<STR_LIT>'<EOL>count = <NUM_LIT:1><EOL>for sibling in header.find_next_siblings():<EOL><INDENT>if is_empty_th(sibling.string):<EOL><INDENT>count += <NUM_LIT:1><EOL>sibling['<STR_LIT:class>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if count > <NUM_LIT:1>:<EOL><INDENT>header['<STR_LIT>'] = count<EOL>header['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT><DEDENT><DEDENT><DEDENT>def create_caption(html, caption):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>ctag = html.new_tag('<STR_LIT>')<EOL>ctag.insert(<NUM_LIT:0>, caption)<EOL>html.table.insert(<NUM_LIT:0>, ctag)<EOL><DEDENT>def create_summary_and_details(html, details):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if len(details) != <NUM_LIT:2>:<EOL><INDENT>msg = '<STR_LIT>'+ '<STR_LIT>'+ '<STR_LIT>'+ '<STR_LIT>'<EOL>raise RuntimeError(msg)<EOL><DEDENT>summary = details[<NUM_LIT:0>]<EOL>details = details[<NUM_LIT:1>]<EOL>if not caption:<EOL><INDENT>create_caption(html, caption)<EOL><DEDENT>dtag = html.new_tag('<STR_LIT>')<EOL>stag = html.new_tag('<STR_LIT>')<EOL>ptag = html.new_tag('<STR_LIT:p>')<EOL>stag.insert(<NUM_LIT:0>, summary)<EOL>ptag.insert(<NUM_LIT:0>, details)<EOL>dtag.insert(<NUM_LIT:0>, stag)<EOL>dtag.append(ptag) <EOL>html.table.caption.insert(<NUM_LIT:1>, dtag)   <EOL><DEDENT>def format_properly(html):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return html.replace('<STR_LIT>', '<STR_LIT>').replace('<STR_LIT>','<STR_LIT>').replace('<STR_LIT>', '<STR_LIT>').replace('<STR_LIT>','<STR_LIT>').replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>def add_row_headers(html):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>for row in html.tbody.find_all('<STR_LIT>'):<EOL><INDENT>spans_rows = '<STR_LIT>' in row.td.attrs<EOL>spans_columns = '<STR_LIT>' in row.td.attrs<EOL>new_tag = html.new_tag('<STR_LIT>')<EOL>new_tag['<STR_LIT>'] = '<STR_LIT>'<EOL>new_tag.string = row.td.string<EOL>if spans_rows:<EOL><INDENT>new_tag['<STR_LIT>'] = row.td.attrs['<STR_LIT>']<EOL>new_tag['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if spans_columns:<EOL><INDENT>new_tag['<STR_LIT>'] = row.td.attrs['<STR_LIT>']<EOL><DEDENT>row.td.replace_with(new_tag)<EOL><DEDENT><DEDENT>def beautify(html):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>table = html.find('<STR_LIT>')<EOL>first_tr = table.find('<STR_LIT>')<EOL>del table['<STR_LIT>']<EOL>del first_tr['<STR_LIT>']<EOL>return format_properly(html.prettify(formatter='<STR_LIT>'))<EOL><DEDENT>def parse_html(html, caption, details):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>new_html = BeautifulSoup(html, '<STR_LIT>')<EOL>if merge:<EOL><INDENT>row_num = <NUM_LIT:1><EOL>merged_cells = get_data_on_merged_cells()<EOL>rows = new_html.find('<STR_LIT>').find('<STR_LIT>').find_all('<STR_LIT>')<EOL>for row in rows:<EOL><INDENT>cell_num = <NUM_LIT:0> <EOL>cells = row.find_all('<STR_LIT>')<EOL>for cell in cells:<EOL><INDENT>curr_cell = (row_num, cell_num)<EOL>mark_cells_going_right(cell, curr_cell, merged_cells)  <EOL>mark_cells_going_down(cell, curr_cell, merged_cells)<EOL>mark_cells_going_down_and_right(cell, curr_cell, merged_cells)<EOL>cell_num += <NUM_LIT:1><EOL><DEDENT>row_num += <NUM_LIT:1><EOL><DEDENT>mark_header_cells(new_html)<EOL>destroy = new_html.find_all(attrs={'<STR_LIT:class>' : '<STR_LIT>' })<EOL>for item in destroy:<EOL><INDENT>item.extract()<EOL><DEDENT><DEDENT>if row_headers:<EOL><INDENT>add_row_headers(new_html)<EOL><DEDENT>if caption:<EOL><INDENT>create_caption(new_html, caption)<EOL><DEDENT>if details:<EOL><INDENT>create_summary_and_details(new_html, details)<EOL><DEDENT>return beautify(new_html)<EOL><DEDENT>pd.options.display.max_colwidth = -<NUM_LIT:1><EOL>xls = pd.ExcelFile(path)<EOL>df = xls.parse(sheetname)<EOL>panda_html = df.to_html(classes=css_classes, index=False, na_rep='<STR_LIT>')<EOL>return parse_html(panda_html, caption, details)<EOL>", "docstring": "Convert an excel spreadsheet to an html table.\nThis function supports the conversion of merged \ncells. It can be used in code or run from the \ncommand-line. If passed the correct arguments\nit can generate fully accessible html.\n\nArgs:\n    path: string, path to the spreadsheet.\n\n    sheetname: string, name of the sheet\n    to convert. \n\n    css_classes: string, space separated\n    classnames to append to the table.\n\n    caption: string, a short heading-like \n    description of the table.\n\n    details: list of strings, where the first\n    item in the list is a string for the html \n    summary element and the second item is\n    a string for the details element. The \n    summary should be very short, e.g. \"Help\",\n    where as the details element should be a \n    long description regarding the purpose or \n    how to navigate the table.\n\n    row_headers: boolean, defaults to False.\n    Does the table have row headers? If set\n    to True, the first element in each row\n    will be a <th scope=\"row\"> element \n    instead of a <td> element.\n\n    merge: boolean, whether or not to \n    combine cells that were merged in the \n    spreadsheet.\n\nReturns:\n    string, html table", "id": "f1549:m14"}
{"signature": "def add_newlines(f, output, char):", "body": "line_count = get_line_count(f)<EOL>f = open(f, '<STR_LIT>')<EOL>output = open(output, '<STR_LIT>')<EOL>for line in range(line_count):<EOL><INDENT>string = f.readline()<EOL>string = re.sub(char, char + '<STR_LIT:\\n>', string)<EOL>output.write(string)<EOL><DEDENT>", "docstring": "Adds line breaks after every occurance of a given character in a file.\n\n    Args:\n        f: string, path to input file.\n\n        output: string, path to output file.\n\n    Returns:\n        None.", "id": "f1549:m4"}
{"signature": "def get_line_count(fname):", "body": "i = <NUM_LIT:0><EOL>with open(fname) as f:<EOL><INDENT>for i, l in enumerate(f):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return i + <NUM_LIT:1><EOL>", "docstring": "Counts the number of lines in a file.\n\n    Args:\n        fname: string, name of the file.\n\n    Returns:\n        integer, the number of lines in the file.", "id": "f1549:m2"}
{"signature": "def indent_css(f, output):", "body": "line_count = get_line_count(f)<EOL>f = open(f, '<STR_LIT>')<EOL>output = open(output, '<STR_LIT>')<EOL>for line in range(line_count):<EOL><INDENT>string = f.readline().rstrip()<EOL>if len(string) > <NUM_LIT:0>:<EOL><INDENT>if string[-<NUM_LIT:1>] == \"<STR_LIT:;>\":<EOL><INDENT>output.write(\"<STR_LIT:U+0020>\" + string + \"<STR_LIT:\\n>\")<EOL><DEDENT>else:<EOL><INDENT>output.write(string + \"<STR_LIT:\\n>\")<EOL><DEDENT><DEDENT><DEDENT>output.close()<EOL>f.close()<EOL>", "docstring": "Indentes css that has not been indented and saves it to a new file.\n    A new file is created if the output destination does not already exist.\n\n    Args:\n        f: string, path to file.\n\n        output: string, path/name of the output file (e.g. /directory/output.css).\n    print type(response.read())\n\n    Returns:\n        None.", "id": "f1549:m3"}
{"signature": "def is_numeric(string):", "body": "try:<EOL><INDENT>float(string)<EOL>return True<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Checks if a string is numeric. If the string value is an integer\nor a float, return True, otherwise False. Can be used to test \nsoley for floats as well. \n\nArgs:\n    string: a string to test.\n\nReturns: \n    boolean", "id": "f1549:m7"}
{"signature": "def clean_strings(iterable):", "body": "retval = []<EOL>for val in iterable:<EOL><INDENT>try:<EOL><INDENT>retval.append(val.strip())<EOL><DEDENT>except(AttributeError):<EOL><INDENT>retval.append(val)<EOL><DEDENT><DEDENT>return retval<EOL>", "docstring": "Take a list of strings and clear whitespace \non each one. If a value in the list is not a \nstring pass it through untouched.\n\nArgs:\n    iterable: mixed list\n\nReturns: \n    mixed list", "id": "f1549:m12"}
{"signature": "def is_int(string):", "body": "try:<EOL><INDENT>a = float(string)<EOL>b = int(a)<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return a == b<EOL><DEDENT>", "docstring": "Checks if a string is an integer. If the string value is an integer\nreturn True, otherwise return False. \n\nArgs:\n    string: a string to test.\n\nReturns: \n    boolean", "id": "f1549:m10"}
{"signature": "def pluralize_collection(base, local_cls, referred_cls, constraint):", "body": "\"<STR_LIT>\"<EOL>referred_name = referred_cls.__name__<EOL>uncamelized = re.sub(r'<STR_LIT>',<EOL>lambda m: \"<STR_LIT>\" % m.group(<NUM_LIT:0>).lower(),<EOL>referred_name)[<NUM_LIT:1>:]<EOL>pluralized = _pluralizer.plural(uncamelized)<EOL>return pluralized<EOL>", "docstring": "Produce an 'uncamelized', 'pluralized' class name, e.g.", "id": "f1564:m1"}
{"signature": "def is_populated(self) -> bool:", "body": "return <NUM_LIT:0> < self.count_model()<EOL>", "docstring": "Check if the database is already populated.", "id": "f1569:c1:m5"}
{"signature": "@classmethod<EOL><INDENT>def populate(cls):<DEDENT>", "body": "cls.manager.populate()<EOL>", "docstring": "Populate the database.", "id": "f1571:c3:m0"}
{"signature": "@classmethod<EOL><INDENT>def populate(cls):<DEDENT>", "body": "cls.manager.populate(return_true=True)<EOL>", "docstring": "Populate the database.", "id": "f1571:c4:m0"}
{"signature": "def setUp(self):", "body": "self.runner = CliRunner()<EOL>self.main = Manager.get_cli()<EOL>", "docstring": "Set up a CliRunner and an accompanying CLI for each test.", "id": "f1574:c0:m0"}
{"signature": "def setUp(self):", "body": "self.runner = CliRunner()<EOL>self.main = NamespaceManager.get_cli()<EOL>self.manager = Manager(connection=self.connection)<EOL>self.manager.populate()<EOL>", "docstring": "Set up a CliRunner and an accompanying CLI for each test.", "id": "f1575:c3:m0"}
{"signature": "def populate(self):", "body": "self.manager.populate()<EOL>", "docstring": "Populate the manager.", "id": "f1575:c2:m0"}
{"signature": "def make_df_getter(data_url: str, data_path: str, **kwargs) -> Callable[[Optional[str], bool, bool], pd.DataFrame]:", "body": "download_function = make_downloader(data_url, data_path)<EOL>def get_df(url: Optional[str] = None, cache: bool = True, force_download: bool = False) -> pd.DataFrame:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if url is None and cache:<EOL><INDENT>url = download_function(force_download=force_download)<EOL><DEDENT>return pd.read_csv(<EOL>url or data_url,<EOL>**kwargs<EOL>)<EOL><DEDENT>return get_df<EOL>", "docstring": "Build a function that handles downloading tabular data and parsing it into a pandas DataFrame.\n\n    :param data_url: The URL of the data\n    :param data_path: The path where the data should get stored\n    :param kwargs: Any other arguments to pass to :func:`pandas.read_csv`", "id": "f1576:m1"}
{"signature": "def build_engine_session(connection, echo=False, autoflush=None, autocommit=None, expire_on_commit=None,<EOL>scopefunc=None):", "body": "if connection is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>engine = create_engine(connection, echo=echo)<EOL>autoflush = autoflush if autoflush is not None else False<EOL>autocommit = autocommit if autocommit is not None else False<EOL>expire_on_commit = expire_on_commit if expire_on_commit is not None else True<EOL>log.debug('<STR_LIT>', autoflush, autocommit, expire_on_commit)<EOL>session_maker = sessionmaker(<EOL>bind=engine,<EOL>autoflush=autoflush,<EOL>autocommit=autocommit,<EOL>expire_on_commit=expire_on_commit,<EOL>)<EOL>session = scoped_session(<EOL>session_maker,<EOL>scopefunc=scopefunc<EOL>)<EOL>return engine, session<EOL>", "docstring": "Build an engine and a session.\n\n    :param str connection: An RFC-1738 database connection string\n    :param bool echo: Turn on echoing SQL\n    :param Optional[bool] autoflush: Defaults to True if not specified in kwargs or configuration.\n    :param Optional[bool] autocommit: Defaults to False if not specified in kwargs or configuration.\n    :param Optional[bool] expire_on_commit: Defaults to False if not specified in kwargs or configuration.\n    :param scopefunc: Scoped function to pass to :func:`sqlalchemy.orm.scoped_session`\n    :rtype: tuple[Engine,Session]\n\n    From the Flask-SQLAlchemy documentation:\n\n    An extra key ``'scopefunc'`` can be set on the ``options`` dict to\n    specify a custom scope function.  If it's not provided, Flask's app\n    context stack identity is used. This will ensure that sessions are\n    created and removed with the request/response cycle, and should be fine\n    in most cases.", "id": "f1578:m0"}
{"signature": "@property<EOL><INDENT>def connection(self) -> str:<DEDENT>", "body": "return str(self.engine.url)<EOL>", "docstring": "Return this manager's connection string.", "id": "f1578:c0:m1"}
{"signature": "def add_cli_write_bel_annotation(main: click.Group) -> click.Group:  ", "body": "@main.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),<EOL>help='<STR_LIT>')<EOL>@click.pass_obj<EOL>def write(manager: BELNamespaceManagerMixin, directory: str):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>with open(os.path.join(directory, manager.identifiers_namespace), '<STR_LIT:w>') as file:<EOL><INDENT>manager.write_bel_annotation(file)<EOL><DEDENT><DEDENT>return main<EOL>", "docstring": "Add a ``write_bel_annotation`` command to main :mod:`click` function.", "id": "f1579:m3"}
{"signature": "def write_directory(self, directory: str) -> bool:", "body": "current_md5_hash = self.get_namespace_hash()<EOL>md5_hash_path = os.path.join(directory, f'<STR_LIT>')<EOL>if not os.path.exists(md5_hash_path):<EOL><INDENT>old_md5_hash = None<EOL><DEDENT>else:<EOL><INDENT>with open(md5_hash_path) as file:<EOL><INDENT>old_md5_hash = file.read().strip()<EOL><DEDENT><DEDENT>if old_md5_hash == current_md5_hash:<EOL><INDENT>return False<EOL><DEDENT>with open(os.path.join(directory, f'<STR_LIT>'), '<STR_LIT:w>') as file:<EOL><INDENT>self.write_bel_namespace(file, use_names=False)<EOL><DEDENT>with open(md5_hash_path, '<STR_LIT:w>') as file:<EOL><INDENT>print(current_md5_hash, file=file)<EOL><DEDENT>if self.has_names:<EOL><INDENT>with open(os.path.join(directory, f'<STR_LIT>'), '<STR_LIT:w>') as file:<EOL><INDENT>self.write_bel_namespace(file, use_names=True)<EOL><DEDENT>with open(os.path.join(directory, f'<STR_LIT>'), '<STR_LIT:w>') as file:<EOL><INDENT>self.write_bel_namespace_mappings(file, desc='<STR_LIT>')<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Write a BEL namespace for identifiers, names, name hash, and mappings to the given directory.", "id": "f1579:c1:m21"}
{"signature": "def _iterate_namespace_models(self, **kwargs) -> Iterable:", "body": "return tqdm(<EOL>self._get_query(self.namespace_model),<EOL>total=self._count_model(self.namespace_model),<EOL>**kwargs<EOL>)<EOL>", "docstring": "Return an iterator over the models to be converted to the namespace.", "id": "f1579:c1:m5"}
{"signature": "@staticmethod<EOL><INDENT>def _cli_add_write_bel_namespace(main: click.Group) -> click.Group:<DEDENT>", "body": "return add_cli_write_bel_namespace(main)<EOL>", "docstring": "Add the write BEL namespace command.", "id": "f1579:c1:m28"}
{"signature": "def _add_annotation_to_graph(self, graph: BELGraph) -> None:", "body": "if '<STR_LIT>' not in graph.annotation_list:<EOL><INDENT>graph.annotation_list['<STR_LIT>'] = set()<EOL><DEDENT>graph.annotation_list['<STR_LIT>'].add(self.module_name)<EOL>", "docstring": "Add this manager as an annotation to the graph.", "id": "f1579:c1:m15"}
{"signature": "@staticmethod<EOL><INDENT>def _cli_add_write_bel_annotation(main: click.Group) -> click.Group:<DEDENT>", "body": "return add_cli_write_bel_annotation(main)<EOL>", "docstring": "Add the write BEL namespace command.", "id": "f1579:c1:m29"}
{"signature": "@staticmethod<EOL><INDENT>def _cli_add_to_bel_namespace(main: click.Group) -> click.Group:<DEDENT>", "body": "return add_cli_to_bel_namespace(main)<EOL>", "docstring": "Add the export BEL namespace command.", "id": "f1579:c1:m26"}
{"signature": "def write_bel_namespace(self, file: TextIO, use_names: bool = False) -> None:", "body": "if not self.is_populated():<EOL><INDENT>self.populate()<EOL><DEDENT>if use_names and not self.has_names:<EOL><INDENT>raise ValueError<EOL><DEDENT>values = (<EOL>self._get_namespace_name_to_encoding(desc='<STR_LIT>')<EOL>if use_names else<EOL>self._get_namespace_identifier_to_encoding(desc='<STR_LIT>')<EOL>)<EOL>write_namespace(<EOL>namespace_name=self._get_namespace_name(),<EOL>namespace_keyword=self._get_namespace_keyword(),<EOL>namespace_query_url=self.identifiers_url,<EOL>values=values,<EOL>file=file,<EOL>)<EOL>", "docstring": "Write as a BEL namespace file.", "id": "f1579:c1:m18"}
{"signature": "def add_cli_clear_bel_namespace(main: click.Group) -> click.Group:  ", "body": "@main.command()<EOL>@click.pass_obj<EOL>def drop(manager: BELNamespaceManagerMixin):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>namespace = manager.drop_bel_namespace()<EOL>if namespace:<EOL><INDENT>click.echo(f'<STR_LIT>')<EOL><DEDENT><DEDENT>return main<EOL>", "docstring": "Add a ``clear_bel_namespace`` command to main :mod:`click` function.", "id": "f1579:m1"}
{"signature": "@staticmethod<EOL><INDENT>def _get_name(model) -> str:<DEDENT>", "body": "return model.name<EOL>", "docstring": "Extract the name from an instance of namespace_model.\n\n        :param model: The model to convert", "id": "f1579:c1:m4"}
{"signature": "@staticmethod<EOL><INDENT>def _cli_add_clear_bel_namespace(main: click.Group) -> click.Group:<DEDENT>", "body": "return add_cli_clear_bel_namespace(main)<EOL>", "docstring": "Add the clear BEL namespace command.", "id": "f1579:c1:m27"}
{"signature": "def upload_bel_namespace(self, update: bool = False) -> Namespace:", "body": "if not self.is_populated():<EOL><INDENT>self.populate()<EOL><DEDENT>namespace = self._get_default_namespace()<EOL>if namespace is None:<EOL><INDENT>log.info('<STR_LIT>', self._get_namespace_name())<EOL>return self._make_namespace()<EOL><DEDENT>if update:<EOL><INDENT>self._update_namespace(namespace)<EOL><DEDENT>return namespace<EOL>", "docstring": "Upload the namespace to the PyBEL database.\n\n        :param update: Should the namespace be updated first?", "id": "f1579:c1:m16"}
{"signature": "@abstractmethod<EOL><INDENT>def _create_namespace_entry_from_model(self, model, namespace: Namespace) -> NamespaceEntry:<DEDENT>", "body": "", "docstring": "Create a PyBEL NamespaceEntry model from a Bio2BEL model.\n\n        :param model: The model to convert\n        :param namespace: The PyBEL namespace to add to", "id": "f1579:c1:m1"}
{"signature": "def add_cli_write_bel_namespace(main: click.Group) -> click.Group:  ", "body": "@main.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),<EOL>help='<STR_LIT>')<EOL>@click.pass_obj<EOL>def write(manager: BELNamespaceManagerMixin, directory: str):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>manager.write_directory(directory)<EOL><DEDENT>return main<EOL>", "docstring": "Add a ``write_bel_namespace`` command to main :mod:`click` function.", "id": "f1579:m2"}
{"signature": "@classmethod<EOL><INDENT>def _get_namespace_name(cls) -> str:<DEDENT>", "body": "return cls.identifiers_recommended or cls.module_name<EOL>", "docstring": "Get the nicely formatted name of this namespace.", "id": "f1579:c1:m6"}
{"signature": "def write_bel_namespace_mappings(self, file: TextIO, **kwargs) -> None:", "body": "json.dump(self._get_namespace_identifier_to_name(**kwargs), file, indent=<NUM_LIT:2>, sort_keys=True)<EOL>", "docstring": "Write a BEL namespace mapping file.", "id": "f1579:c1:m20"}
{"signature": "def write_bel_annotation(self, file: TextIO) -> None:", "body": "if not self.is_populated():<EOL><INDENT>self.populate()<EOL><DEDENT>values = self._get_namespace_name_to_encoding(desc='<STR_LIT>')<EOL>write_annotation(<EOL>keyword=self._get_namespace_keyword(),<EOL>citation_name=self._get_namespace_name(),<EOL>description='<STR_LIT>',<EOL>values=values,<EOL>file=file,<EOL>)<EOL>", "docstring": "Write as a BEL annotation file.", "id": "f1579:c1:m19"}
{"signature": "def add_cli_to_bel_namespace(main: click.Group) -> click.Group:  ", "body": "@main.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True)<EOL>@click.pass_obj<EOL>def upload(manager: BELNamespaceManagerMixin, update):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>namespace = manager.upload_bel_namespace(update=update)<EOL>click.echo(f'<STR_LIT>')<EOL><DEDENT>return main<EOL>", "docstring": "Add a ``upload_bel_namespace`` command to main :mod:`click` function.", "id": "f1579:m0"}
{"signature": "@classmethod<EOL><INDENT>def _get_identifier(cls, model) -> str:<DEDENT>", "body": "return getattr(model, f'<STR_LIT>')<EOL>", "docstring": "Extract the identifier from an instance of namespace_model.\n\n        :param model: The model to convert", "id": "f1579:c1:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _cli_add_to_bel(main: click.Group) -> click.Group:<DEDENT>", "body": "return add_cli_to_bel(main)<EOL>", "docstring": "Add the export BEL command.", "id": "f1580:c1:m3"}
{"signature": "@classmethod<EOL><INDENT>def get_cli(cls) -> click.Group:<DEDENT>", "body": "main = super().get_cli()<EOL>@main.group()<EOL>def bel():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL><DEDENT>cls._cli_add_to_bel(bel)<EOL>cls._cli_add_upload_bel(bel)<EOL>return main<EOL>", "docstring": "Get a :mod:`click` main function with added BEL commands.", "id": "f1580:c1:m5"}
{"signature": "def to_indra_statements(self, *args, **kwargs):", "body": "graph = self.to_bel(*args, **kwargs)<EOL>return to_indra_statements(graph)<EOL>", "docstring": "Dump as a list of INDRA statements.\n\n        :rtype: List[indra.Statement]", "id": "f1580:c1:m2"}
{"signature": "def add_cli_flask(main: click.Group) -> click.Group:  ", "body": "@main.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True)<EOL>@click.option('<STR_LIT>', '<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', default=os.urandom(<NUM_LIT:8>))<EOL>@click.pass_obj<EOL>def web(manager, debug, port, host, secret_key):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if not manager.is_populated():<EOL><INDENT>click.echo('<STR_LIT>'.format(manager.module_name))<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>app = manager.get_flask_admin_app(url='<STR_LIT:/>', secret_key=secret_key)<EOL>app.run(debug=debug, host=host, port=port)<EOL><DEDENT>return main<EOL>", "docstring": "Add a ``web`` comand main :mod:`click` function.", "id": "f1582:m0"}
{"signature": "def _add_admin(self, app, **kwargs):", "body": "from flask_admin import Admin<EOL>from flask_admin.contrib.sqla import ModelView<EOL>admin = Admin(app, **kwargs)<EOL>for flask_admin_model in self.flask_admin_models:<EOL><INDENT>if isinstance(flask_admin_model, tuple):  <EOL><INDENT>if len(flask_admin_model) != <NUM_LIT:2>:<EOL><INDENT>raise TypeError<EOL><DEDENT>model, view = flask_admin_model<EOL>admin.add_view(view(model, self.session))<EOL><DEDENT>else:<EOL><INDENT>admin.add_view(ModelView(flask_admin_model, self.session))<EOL><DEDENT><DEDENT>return admin<EOL>", "docstring": "Add a Flask Admin interface to an application.\n\n        :param flask.Flask app: A Flask application\n        :param kwargs: Keyword arguments are passed through to :class:`flask_admin.Admin`\n        :rtype: flask_admin.Admin", "id": "f1582:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def get_cli(cls) -> click.Group:<DEDENT>", "body": "main = super().get_cli()<EOL>cls._cli_add_flask(main)<EOL>return main<EOL>", "docstring": "Add  a :mod:`click` main function to use as a command line interface.", "id": "f1582:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def _cli_add_flask(main: click.Group) -> click.Group:<DEDENT>", "body": "return add_cli_flask(main)<EOL>", "docstring": "Add the web command.", "id": "f1582:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def _cli_add_drop(main: click.Group) -> click.Group:<DEDENT>", "body": "return add_cli_drop(main)<EOL>", "docstring": "Add the drop command.", "id": "f1583:c1:m12"}
{"signature": "@abstractmethod<EOL><INDENT>def summarize(self) -> Mapping[str, int]:<DEDENT>", "body": "", "docstring": "Summarize the database.", "id": "f1583:c1:m4"}
{"signature": "def create_all(self, check_first: bool = True):", "body": "self._metadata.create_all(self.engine, checkfirst=check_first)<EOL>", "docstring": "Create the empty database (tables).\n\n        :param bool check_first: Defaults to True, don't issue CREATEs for tables already present\n         in the target database. Defers to :meth:`sqlalchemy.sql.schema.MetaData.create_all`", "id": "f1583:c1:m6"}
{"signature": "def _get_query(self, model):", "body": "return self.session.query(model)<EOL>", "docstring": "Get a query for the given model using this manager's session.\n\n        :param model: A SQLAlchemy model class\n        :return: a SQLAlchemy query", "id": "f1583:c1:m8"}
{"signature": "@property<EOL><INDENT>@abstractmethod<EOL>def _base(self) -> DeclarativeMeta:<DEDENT>", "body": "", "docstring": "Return the declarative base.\n\n        It is usually sufficient to return an instance that is module-level.\n\n        How to build an instance of :class:`sqlalchemy.ext.declarative.api.DeclarativeMeta` by using\n        :func:`sqlalchemy.ext.declarative.declarative_base`:\n\n        >>> from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base\n        >>> Base: DeclarativeMeta = declarative_base()\n\n        Then just override this abstract property like:\n\n        >>> @property\n        >>> def _base(self) -> DeclarativeMeta:\n        >>>     return Base\n\n        Note that this property could effectively also be a static method.", "id": "f1583:c1:m0"}
{"signature": "@classmethod<EOL><INDENT>def get_cli(cls) -> click.Group:<DEDENT>", "body": "main = super().get_cli()<EOL>cls._cli_add_populate(main)<EOL>cls._cli_add_drop(main)<EOL>cls._cli_add_cache(main)<EOL>cls._cli_add_summarize(main)<EOL>return main<EOL>", "docstring": "Get the :mod:`click` main function to use as a command line interface.", "id": "f1583:c1:m15"}
{"signature": "def add_cli_cache(main: click.Group) -> click.Group:  ", "body": "@main.group()<EOL>def cache():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL><DEDENT>@cache.command()<EOL>@click.pass_obj<EOL>def locate(manager):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>data_dir = get_data_dir(manager.module_name)<EOL>click.echo(data_dir)<EOL><DEDENT>@cache.command()<EOL>@click.pass_obj<EOL>def ls(manager):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>data_dir = get_data_dir(manager.module_name)<EOL>for path in os.listdir(data_dir):<EOL><INDENT>click.echo(path)<EOL><DEDENT><DEDENT>@cache.command()<EOL>@click.pass_obj<EOL>def clear(manager):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>clear_cache(manager.module_name)<EOL><DEDENT>return main<EOL>", "docstring": "Add several commands to main :mod:`click` function for handling the cache.", "id": "f1583:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _cli_add_populate(main: click.Group) -> click.Group:<DEDENT>", "body": "return add_cli_populate(main)<EOL>", "docstring": "Add the populate command.", "id": "f1583:c1:m11"}
{"signature": "@main.group()<EOL>def bel():", "body": "", "docstring": "Manage BEL.", "id": "f1584:m10"}
{"signature": "@main.command()<EOL>@connection_option<EOL>@click.option('<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, help='<STR_LIT>')<EOL>def populate(connection, reset, force, skip):", "body": "for idx, name, manager in _iterate_managers(connection, skip):<EOL><INDENT>click.echo(<EOL>click.style(f'<STR_LIT>', fg='<STR_LIT>', bold=True) +<EOL>click.style(f'<STR_LIT>', fg='<STR_LIT>', bold=True)<EOL>)<EOL>if reset:<EOL><INDENT>click.echo('<STR_LIT>')<EOL>manager.drop_all()<EOL>click.echo('<STR_LIT>')<EOL>manager.create_all()<EOL><DEDENT>elif manager.is_populated() and not force:<EOL><INDENT>click.echo(f'<STR_LIT>', color='<STR_LIT>')<EOL>continue<EOL><DEDENT>try:<EOL><INDENT>manager.populate()<EOL><DEDENT>except Exception:<EOL><INDENT>logger.exception('<STR_LIT>', name)<EOL>click.secho(f'<STR_LIT>', fg='<STR_LIT>', bold=True)<EOL><DEDENT><DEDENT>", "docstring": "Populate all.", "id": "f1584:m2"}
{"signature": "@belns.command()<EOL>@connection_option<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>def write(connection, skip, directory, force):", "body": "os.makedirs(directory, exist_ok=True)<EOL>from .manager.namespace_manager import BELNamespaceManagerMixin<EOL>for idx, name, manager in _iterate_managers(connection, skip):<EOL><INDENT>if not (isinstance(manager, AbstractManager) and isinstance(manager, BELNamespaceManagerMixin)):<EOL><INDENT>continue<EOL><DEDENT>click.secho(name, fg='<STR_LIT>', bold=True)<EOL>if force:<EOL><INDENT>try:<EOL><INDENT>click.echo(f'<STR_LIT>')<EOL>manager.drop_all()<EOL>click.echo('<STR_LIT>')<EOL>clear_cache(name)<EOL>click.echo('<STR_LIT>')<EOL>manager.populate()<EOL><DEDENT>except Exception:<EOL><INDENT>click.secho(f'<STR_LIT>', fg='<STR_LIT>')<EOL>continue<EOL><DEDENT><DEDENT>try:<EOL><INDENT>r = manager.write_directory(directory)<EOL><DEDENT>except TypeError as e:<EOL><INDENT>click.secho(f'<STR_LIT>'.rstrip(), fg='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>if not r:<EOL><INDENT>click.echo('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Write a BEL namespace names/identifiers to terminology store.", "id": "f1584:m9"}
{"signature": "@main.command()<EOL>@connection_option<EOL>def actions(connection):", "body": "session = _make_session(connection=connection)<EOL>for action in Action.ls(session=session):<EOL><INDENT>click.echo(f'<STR_LIT>')<EOL><DEDENT>", "docstring": "List all actions.", "id": "f1584:m13"}
{"signature": "@main.command()<EOL>@connection_option<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, help='<STR_LIT>')<EOL>def summarize(connection, skip):", "body": "for idx, name, manager in _iterate_managers(connection, skip):<EOL><INDENT>click.secho(name, fg='<STR_LIT>', bold=True)<EOL>if not manager.is_populated():<EOL><INDENT>click.echo('<STR_LIT>')<EOL>continue<EOL><DEDENT>if isinstance(manager, BELNamespaceManagerMixin):<EOL><INDENT>click.secho(f'<STR_LIT>', fg='<STR_LIT>')<EOL><DEDENT>if isinstance(manager, BELManagerMixin):<EOL><INDENT>try:<EOL><INDENT>click.secho(f'<STR_LIT>', fg='<STR_LIT>')<EOL><DEDENT>except TypeError as e:<EOL><INDENT>click.secho(str(e), fg='<STR_LIT>')<EOL><DEDENT><DEDENT>for field_name, count in sorted(manager.summarize().items()):<EOL><INDENT>click.echo(<EOL>click.style('<STR_LIT>', fg='<STR_LIT>', bold=True) + f\"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Summarize all.", "id": "f1584:m6"}
{"signature": "@cache.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, help='<STR_LIT>')<EOL>def clear(skip):", "body": "for name in sorted(MODULES):<EOL><INDENT>if name in skip:<EOL><INDENT>continue<EOL><DEDENT>click.secho(f'<STR_LIT>', fg='<STR_LIT>', bold=True)<EOL>clear_cache(name)<EOL><DEDENT>", "docstring": "Clear all caches.", "id": "f1584:m5"}
{"signature": "@main.group()<EOL>def cache():", "body": "", "docstring": "Manage caches.", "id": "f1584:m4"}
{"signature": "@main.command(help='<STR_LIT>')<EOL>@click.confirmation_option('<STR_LIT>')<EOL>@connection_option<EOL>@click.option('<STR_LIT>', '<STR_LIT>', multiple=True, help='<STR_LIT>')<EOL>def drop(connection, skip):", "body": "for idx, name, manager in _iterate_managers(connection, skip):<EOL><INDENT>click.secho(f'<STR_LIT>', fg='<STR_LIT>', bold=True)<EOL>manager.drop_all()<EOL><DEDENT>", "docstring": "Drop all.", "id": "f1584:m3"}
{"signature": "def get_global_connection() -> str:", "body": "return config.connection<EOL>", "docstring": "Return the global connection string.", "id": "f1585:m0"}
{"signature": "def load_module(self, fullname):", "body": "if fullname in sys.modules:<EOL><INDENT>return sys.modules[fullname]<EOL><DEDENT>end_name = fullname[len(self._group_with_dot):]<EOL>for entry_point in iter_entry_points(group=self.group, name=end_name):<EOL><INDENT>mod = entry_point.load()<EOL>sys.modules[fullname] = mod<EOL>return mod<EOL><DEDENT>", "docstring": "Load a module if its name starts with :code:`self.group` and is registered.", "id": "f1586:c0:m4"}
{"signature": "def __init__(self, group):", "body": "self.group = group<EOL>", "docstring": "Initialize the importer with the group name.\n\n        :param str group: a string representing the package resources entry_points group that will be used", "id": "f1586:c0:m0"}
{"signature": "def find_module(self, fullname, path=None):", "body": "if not fullname.startswith(self._group_with_dot):<EOL><INDENT>return<EOL><DEDENT>end_name = fullname[len(self._group_with_dot):]<EOL>for entry_point in iter_entry_points(group=self.group, name=None):<EOL><INDENT>if entry_point.name == end_name:<EOL><INDENT>return self<EOL><DEDENT><DEDENT>", "docstring": "Find a module if its name starts with :code:`self.group` and is registered.", "id": "f1586:c0:m3"}
{"signature": "@main.command()<EOL>@click.argument('<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.File('<STR_LIT:w>'))<EOL>@click.option('<STR_LIT>', '<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True)<EOL>def belns(keyword: str, file: TextIO, encoding: Optional[str], use_names: bool):", "body": "directory = get_data_dir(keyword)<EOL>obo_url = f'<STR_LIT>'<EOL>obo_path = os.path.join(directory, f'<STR_LIT>')<EOL>obo_cache_path = os.path.join(directory, f'<STR_LIT>')<EOL>obo_getter = make_obo_getter(obo_url, obo_path, preparsed_path=obo_cache_path)<EOL>graph = obo_getter()<EOL>convert_obo_graph_to_belns(<EOL>graph,<EOL>file=file,<EOL>encoding=encoding,<EOL>use_names=use_names,<EOL>)<EOL>", "docstring": "Write as a BEL namespace.", "id": "f1589:m2"}
{"signature": "@click.group()<EOL>def main():", "body": "", "docstring": "OBO Utilities.", "id": "f1589:m1"}
{"signature": "@classmethod<EOL><INDENT>def tearDownClass(cls):<DEDENT>", "body": "cls.manager.session.close()<EOL>super().tearDownClass()<EOL>", "docstring": "Close the connection in the manager and deletes the temporary database.", "id": "f1590:c4:m1"}
{"signature": "def setUp(self):", "body": "super().setUp()<EOL>def mock_connection() -> str:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return self.connection<EOL><DEDENT>self.mock_global_connection = mock.patch('<STR_LIT>', mock_connection)<EOL>self.mock_module_connection = mock.patch('<STR_LIT>', mock_connection)<EOL>", "docstring": "Set up the test with a mock connection string.\n\n        Add two class-level variables: ``mock_global_connection`` and ``mock_module_connection`` that can be\n        used as context managers to mock the bio2bel connection getter functions.", "id": "f1590:c2:m0"}
{"signature": "def tearDown(self):", "body": "os.close(self.fd)<EOL>os.remove(self.path)<EOL>", "docstring": "Close the connection to the database and removes the files created for it.", "id": "f1590:c0:m1"}
{"signature": "def make_temporary_cache_class_mixin(manager_cls: Type[AbstractManager]) -> Type[<EOL>AbstractTemporaryCacheClassMixin]:  ", "body": "class TemporaryCacheClassMixin(AbstractTemporaryCacheClassMixin):<EOL><INDENT>Manager = manager_cls<EOL><DEDENT>return TemporaryCacheClassMixin<EOL>", "docstring": "Build a testing class that has a Bio2BEL manager instance ready to go.", "id": "f1590:m0"}
{"signature": "@classmethod<EOL><INDENT>def setUpClass(cls):<DEDENT>", "body": "if cls.Manager is ...:<EOL><INDENT>raise Bio2BELTestMissingManagerError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if not issubclass(cls.Manager, AbstractManager):<EOL><INDENT>raise Bio2BELManagerTypeError('<STR_LIT>')<EOL><DEDENT>super().setUpClass()<EOL>cls.manager = cls.Manager(connection=cls.connection)<EOL>cls.populate()<EOL>", "docstring": "Set up the class with the given manager and allows an optional populate hook to be overridden.", "id": "f1590:c4:m0"}
{"signature": "def setUp(self):", "body": "super().setUp()<EOL>self.fd, self.path = tempfile.mkstemp()<EOL>self.connection = '<STR_LIT>' + self.path<EOL>log.info('<STR_LIT>', self.connection)<EOL>", "docstring": "Create a temporary file to use as a persistent database throughout tests in this class.", "id": "f1590:c0:m0"}
{"signature": "def get_connection(module_name: str, connection: Optional[str] = None) -> str:", "body": "<EOL>if connection is not None:<EOL><INDENT>return connection<EOL><DEDENT>module_name = module_name.lower()<EOL>module_config_cls = get_module_config_cls(module_name)<EOL>module_config = module_config_cls.load()<EOL>return module_config.connection or config.connection<EOL>", "docstring": "Return the SQLAlchemy connection string if it is set.\n\n    Order of operations:\n\n    1. Return the connection if given as a parameter\n    2. Check the environment for BIO2BEL_{module_name}_CONNECTION\n    3. Look in the bio2bel config file for module-specific connection. Create if doesn't exist. Check the\n       module-specific section for ``connection``\n    4. Look in the bio2bel module folder for a config file. Don't create if doesn't exist. Check the default section\n       for ``connection``\n    5. Check the environment for BIO2BEL_CONNECTION\n    6. Check the bio2bel config file for default\n    7. Fall back to standard default cache connection\n\n    :param module_name: The name of the module to get the configuration for\n    :param connection: get the SQLAlchemy connection string\n    :return: The SQLAlchemy connection string based on the configuration", "id": "f1592:m2"}
{"signature": "def get_module_config_cls(module_name: str) -> Type[_AbstractModuleConfig]:  ", "body": "class ModuleConfig(_AbstractModuleConfig):<EOL><INDENT>NAME = f'<STR_LIT>'<EOL>FILES = DEFAULT_CONFIG_PATHS + [<EOL>os.path.join(DEFAULT_CONFIG_DIRECTORY, module_name, '<STR_LIT>')<EOL>]<EOL><DEDENT>return ModuleConfig<EOL>", "docstring": "Build a module configuration class.", "id": "f1592:m1"}
{"signature": "def create_all(engine, checkfirst=True):", "body": "Base.metadata.create_all(bind=engine, checkfirst=checkfirst)<EOL>", "docstring": "Create the tables for Bio2BEL.", "id": "f1593:m2"}
{"signature": "@classmethod<EOL><INDENT>def ls(cls, session: Optional[Session] = None) -> List['<STR_LIT>']:<DEDENT>", "body": "if session is None:<EOL><INDENT>session = _make_session()<EOL><DEDENT>actions = session.query(cls).order_by(cls.created.desc()).all()<EOL>session.close()<EOL>return actions<EOL>", "docstring": "Get all actions.", "id": "f1593:c0:m7"}
{"signature": "def _store_helper(model: Action, session: Optional[Session] = None) -> None:", "body": "if session is None:<EOL><INDENT>session = _make_session()<EOL><DEDENT>session.add(model)<EOL>session.commit()<EOL>session.close()<EOL>", "docstring": "Help store an action.", "id": "f1593:m0"}
{"signature": "@classmethod<EOL><INDENT>def store_populate_failed(cls, resource: str, session: Optional[Session] = None) -> '<STR_LIT>':<DEDENT>", "body": "action = cls.make_populate_failed(resource)<EOL>_store_helper(action, session=session)<EOL>return action<EOL>", "docstring": "Store a \"populate failed\" event.\n\n        :param resource: The normalized name of the resource to store\n\n        Example:\n\n        >>> from bio2bel.models import Action\n        >>> Action.store_populate_failed('hgnc')", "id": "f1593:c0:m5"}
{"signature": "@staticmethod<EOL><INDENT>def make_drop(resource: str) -> '<STR_LIT>':<DEDENT>", "body": "return Action(resource=resource.lower(), action='<STR_LIT>')<EOL>", "docstring": "Make a ``drop`` instance of :class:`Action`.", "id": "f1593:c0:m3"}
{"signature": "def _make_session(connection: Optional[str] = None) -> Session:", "body": "if connection is None:<EOL><INDENT>connection = get_global_connection()<EOL><DEDENT>engine = create_engine(connection)<EOL>create_all(engine)<EOL>session_cls = sessionmaker(bind=engine)<EOL>session = session_cls()<EOL>return session<EOL>", "docstring": "Make a session.", "id": "f1593:m1"}
{"signature": "def create_application(connection: Optional[str] = None) -> Flask:", "body": "app = Flask(__name__)<EOL>flask_bootstrap.Bootstrap(app)<EOL>Admin(app)<EOL>connection = connection or DEFAULT_CACHE_CONNECTION<EOL>engine, session = build_engine_session(connection)<EOL>for name, add_admin in add_admins.items():<EOL><INDENT>url = '<STR_LIT>'.format(name)<EOL>add_admin(app, session, url=url, endpoint=name, name=name)<EOL>log.debug('<STR_LIT>', name, add_admin, url)<EOL><DEDENT>app.register_blueprint(ui)<EOL>return app<EOL>", "docstring": "Create a Flask application.", "id": "f1594:m1"}
{"signature": "def get_long_description():", "body": "with codecs.open(os.path.join(HERE, '<STR_LIT>'), encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>long_description = f.read()<EOL><DEDENT>return long_description<EOL>", "docstring": "Get the long_description from the README.rst file. Assume UTF-8 encoding.", "id": "f1597:m2"}
{"signature": "def find_meta(meta):", "body": "meta_match = re.search(<EOL>r'<STR_LIT>'.format(meta=meta),<EOL>META_FILE, re.M<EOL>)<EOL>if meta_match:<EOL><INDENT>return meta_match.group(<NUM_LIT:1>)<EOL><DEDENT>raise RuntimeError('<STR_LIT>'.format(meta=meta))<EOL>", "docstring": "Extract __*meta*__ from META_FILE.", "id": "f1597:m1"}
{"signature": "def cond_replace_value_some(ol,dst_value,*some,**kwargs):", "body": "cond_func = kwargs['<STR_LIT>']<EOL>if('<STR_LIT>' in kwargs):<EOL><INDENT>cond_func_args = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>cond_func_args = []<EOL><DEDENT>if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>seqs = list(some)<EOL>new = copy.deepcopy(ol)<EOL>selected = find_all(new,cond_func,*cond_func_args)<EOL>selected_indexes = array_map(selected,lambda ele:ele['<STR_LIT:index>'])<EOL>selected_indexes = select_seqs(selected_indexes,seqs)<EOL>new = replace_seqs(new,dst_value,selected_indexes)<EOL>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new)<EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\ndef afterCH(ele,ch):\n    cond = (ord(str(ele)) > ord(ch))\n    return(cond)\n\nnew = cond_replace_value_some(ol,\"REPLACED\",0,2,cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_replace_value_some(ol,\"REPLACED\",0,2,cond_func=afterCH,cond_func_args=['B'],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m198"}
{"signature": "def broken_some(ol,*break_points):", "body": "bps = list(break_points)<EOL>return(broken_seqs(ol,bps))<EOL>", "docstring": "ol = initRange(0,20,1)\nol\nsecs = broken_some(ol,1,6,14,9)\nforEach(secs,print)", "id": "f1599:m207"}
{"signature": "def remove_somenot(ol,value,*seqs,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>seqs = list(seqs)<EOL>new = []<EOL>length = ol.__len__()<EOL>seq = -<NUM_LIT:1><EOL>cpol = copy.deepcopy(ol)<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(not(cpol[i]==value)):<EOL><INDENT>seq = seq + <NUM_LIT:1><EOL>if(seq in seqs):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new) <EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = remove_somenot(ol,'a',1,3)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = remove_somenot(ol,'a',1,3,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m119"}
{"signature": "def mapfio(ol,**kwargs):", "body": "diff_funcs_arr = kwargs['<STR_LIT>']<EOL>diff_args_arr = kwargs['<STR_LIT>']<EOL>lngth = ol.__len__()<EOL>rslt = []<EOL>for i in range(<NUM_LIT:0>,lngth):<EOL><INDENT>index = i<EOL>value = ol[i]<EOL>func = diff_funcs_arr[i]<EOL>args = diff_args_arr[i]<EOL>ele = func(index,*args)<EOL>rslt.append(ele)<EOL><DEDENT>return(rslt)<EOL>", "docstring": "#mapfio           v\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570                 NOT take value as a param for map_func\n#map_func         diff_func(index,*diff_args)", "id": "f1599:m2"}
{"signature": "def reverse(ol,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>new = copy.deepcopy(ol)<EOL>new.reverse()<EOL>return(new) <EOL><DEDENT>else:<EOL><INDENT>ol.reverse()<EOL>return(ol)<EOL><DEDENT>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nid(ol)\nnew = reverse(ol)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,2,3,4]\nid(ol)\nrslt = reverse(ol,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m137"}
{"signature": "def divide(ol,interval):", "body": "length = ol.__len__()<EOL>seqs = initRange(<NUM_LIT:0>,length,interval)<EOL>rslt = broken_seqs(ol,seqs)<EOL>return(rslt)<EOL>", "docstring": "ol = elel.initRange(0,20,1)\ninterval = 3\nrslt = elel.divide(ol,interval)\nrslt\nrslt = elel.divide(ol,4)\nrslt", "id": "f1599:m209"}
{"signature": "def where(ol,value):", "body": "si = None<EOL>ei = None<EOL>for i in range(<NUM_LIT:0>,ol.__len__()):<EOL><INDENT>ele = ol[i]<EOL>if(value >ele):<EOL><INDENT>si = i <EOL><DEDENT>elif(value == ele):<EOL><INDENT>return((i,i))<EOL><DEDENT>else:<EOL><INDENT>ei = i <EOL>return((si,ei))<EOL><DEDENT><DEDENT>return((si,ei))<EOL>", "docstring": "ol = [0, 4, 6, 8, 10, 14]\nwhere(ol,-1)\nwhere(ol,1)\nwhere(ol,2)\nwhere(ol,3)\nwhere(ol,4)\nwhere(ol,9)\nwhere(ol,14)\nwhere(ol,17)", "id": "f1599:m216"}
{"signature": "def pop_some(ol,*indexes,**kwargs):", "body": "length = ol.__len__()<EOL>indexes = list(map(lambda index:uniform_index(index,length),list(indexes)))<EOL>if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>cpol = copy.deepcopy(ol)<EOL>new = []<EOL>popped = []<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(i in indexes):<EOL><INDENT>popped.append(cpol[i])<EOL><DEDENT>else:<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT><DEDENT>return({'<STR_LIT>':popped,'<STR_LIT:list>':new})<EOL><DEDENT>else:<EOL><INDENT>tmp = []<EOL>popped = []<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(i in indexes):<EOL><INDENT>popped.append(ol[i])<EOL><DEDENT>else:<EOL><INDENT>tmp.append(ol[i])<EOL><DEDENT><DEDENT>ol.clear()<EOL>for i in range(<NUM_LIT:0>,tmp.__len__()):<EOL><INDENT>ol.append(tmp[i])<EOL><DEDENT>return(popped)<EOL><DEDENT>", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_some(ol,0,2,5)\nol\nid(ol)\nid(rslt['list'])\n####\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_some(ol,0,2,5,mode=\"original\")\nrslt\nol\nid(ol)", "id": "f1599:m109"}
{"signature": "def findv(ol,cond_func,cond_func_args=[]):", "body": "rslt = []<EOL>for i in range(ol.__len__()):<EOL><INDENT>cond = cond_func(ol[i],*cond_func_args)<EOL>if(cond):<EOL><INDENT>rslt.append((i,ol[i]))<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return(rslt)<EOL>", "docstring": "#mapv     i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570,\u5171\u4eab\u76f8\u540c\u7684f,\u5171\u4eab\u76f8\u540c\u7684o\n#         NOT take index as a param for map_func\n#         share common other_args\n#         share common cond_func\n#         common_func(value,*common_args)", "id": "f1599:m25"}
{"signature": "def cond_pop(ol,index,**kwargs):", "body": "cond_func = kwargs['<STR_LIT>']<EOL>cond_func_args = kwargs['<STR_LIT>']<EOL>index = uniform_index(index,ol.__len__())<EOL>if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>value = ol[index]<EOL>cond = cond_func(index,value,*cond_func_args)<EOL>if(mode == \"<STR_LIT>\"):<EOL><INDENT>new = copy.deepcopy(ol)<EOL>if(cond):<EOL><INDENT>popped = new.pop(index)<EOL><DEDENT>else:<EOL><INDENT>popped = new<EOL><DEDENT>return({'<STR_LIT>':popped,'<STR_LIT:list>':new})<EOL><DEDENT>else:<EOL><INDENT>if(cond):<EOL><INDENT>popped = ol.pop(index)<EOL><DEDENT>else:<EOL><INDENT>popped = ol<EOL><DEDENT>return(popped)<EOL><DEDENT>", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [{'data':0;'type':'number'},{'data':'x';'type':'str'},{'data':'y';'type':'str'},4]\n#cond_func_args is a array\ndef cond_func(index,value,cond_func_args):", "id": "f1599:m107"}
{"signature": "def insert(ol,start_index,ele,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>length = ol.__len__()<EOL>cpol = copy.deepcopy(ol)<EOL>si = uniform_index(start_index,length)<EOL>new = copy.deepcopy(cpol[<NUM_LIT:0>:si])<EOL>new.append(ele)<EOL>new.extend(cpol[si:])<EOL>return(new)<EOL><DEDENT>else:<EOL><INDENT>ol.insert(start_index,ele)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nele = 5\nid(ol)\ninsert(ol,2,ele,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nele = 5\nid(ol)\nnew = insert(ol,2,ele)\nnew\nid(new)", "id": "f1599:m67"}
{"signature": "def rangize(break_points,length):", "body": "bps = array_map(break_points,uniform_index,length)<EOL>bps.sort()<EOL>bps = prepend(bps,<NUM_LIT:0>)<EOL>bps = append(bps,length)<EOL>bps = uniqualize(bps)<EOL>bpslen = bps.__len__()<EOL>secs=[(<NUM_LIT:0>,bps[<NUM_LIT:0>])]<EOL>for i in range(<NUM_LIT:0>,bpslen-<NUM_LIT:1>):<EOL><INDENT>r = (bps[i],bps[i+<NUM_LIT:1>])<EOL>secs.append(r)<EOL><DEDENT>secs.append((bps[bpslen-<NUM_LIT:1>],length))<EOL>if(secs[<NUM_LIT:0>][<NUM_LIT:0>] == secs[<NUM_LIT:0>][<NUM_LIT:1>]):<EOL><INDENT>secs.pop(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>if(secs[-<NUM_LIT:1>][<NUM_LIT:0>] == secs[-<NUM_LIT:1>][<NUM_LIT:1>]):<EOL><INDENT>secs.pop(-<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return(secs)<EOL>", "docstring": "break_points = [1,3,9,12,-2]\nlength = 15\nsecs = rangize(break_points,length)\nforEach(secs,print)", "id": "f1599:m199"}
{"signature": "def all_continuous_indexes_slices(ol,value):", "body": "rslt = []<EOL>length = ol.__len__()<EOL>cursor = <NUM_LIT:0><EOL>begin = None<EOL>slice = []<EOL>while(cursor < length):<EOL><INDENT>cond1 = (ol[cursor] == value)<EOL>cond2 = (begin == None)<EOL>if(cond1 & cond2):<EOL><INDENT>begin = cursor<EOL>slice.append(cursor)<EOL><DEDENT>elif(cond1 & (not(cond2))):<EOL><INDENT>slice.append(cursor)<EOL><DEDENT>elif((not(cond1)) & (not(cond2))):<EOL><INDENT>rslt.append(slice)<EOL>begin = None<EOL>slice = []<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>cursor = cursor + <NUM_LIT:1><EOL><DEDENT>if(slice):<EOL><INDENT>rslt.append(slice)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return(rslt)<EOL>", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nall_continuous_indexes_slices(ol,\"a\")", "id": "f1599:m103"}
{"signature": "def some_continuous_indexesnot_slices(ol,value,*seqs):", "body": "seqs = list(seqs)<EOL>rslt = []<EOL>length = ol.__len__()<EOL>seq = -<NUM_LIT:1><EOL>cursor = <NUM_LIT:0><EOL>begin = None<EOL>slice = []<EOL>while(cursor < length):<EOL><INDENT>cond1 = not(ol[cursor] == value)<EOL>cond2 = (begin == None)<EOL>if(cond1 & cond2):<EOL><INDENT>begin = cursor<EOL>slice.append(cursor)<EOL><DEDENT>elif(cond1 & (not(cond2))):<EOL><INDENT>slice.append(cursor)<EOL><DEDENT>elif((not(cond1)) & (not(cond2))):<EOL><INDENT>seq = seq + <NUM_LIT:1><EOL>if(seq in seqs):<EOL><INDENT>rslt.append(slice)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>begin = None<EOL>slice = []<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>cursor = cursor + <NUM_LIT:1><EOL><DEDENT>if(slice):<EOL><INDENT>seq = seq + <NUM_LIT:1><EOL>if(seq in seqs):<EOL><INDENT>rslt.append(slice)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return(rslt)<EOL>", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nsome_continuous_indexesnot_slices(ol,\"a\",0,2)", "id": "f1599:m100"}
{"signature": "def reduce_left(ol,callback,initialValue):", "body": "length = ol.__len__()<EOL>accumulator = initialValue<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>accumulator = callback(accumulator,ol[i])<EOL><DEDENT>return(accumulator)<EOL>", "docstring": "from elist.elist import *\ndef callback(accumulator,currentValue):\n    accumulator.append(currentValue[0])\n    accumulator.append(currentValue[1])\n    return(accumulator)\n\nol = [(1,2),(\"a\",\"b\"),(\"x\",\"y\")]\nreduce_left(ol,callback,[])\n#array_reduce, reduceLeft ,reduce_left  are the same", "id": "f1599:m175"}
{"signature": "def toSource(ol):", "body": "return(ol.__repr__())<EOL>", "docstring": "from elist.elist import *\nol = [1,2,3,4]\ntoSource(ol)", "id": "f1599:m142"}
{"signature": "def index_first(ol,value):", "body": "return(ol.index('<STR_LIT:a>'))<EOL>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_first(ol,'a')\n####index_first, array_index, indexOf  are the same\narray_index(ol,'a')\nindexOf(ol,'a')", "id": "f1599:m81"}
{"signature": "def setitem_via_pathlist(ol,value,pathlist):", "body": "this = ol<EOL>for i in range(<NUM_LIT:0>,pathlist.__len__()-<NUM_LIT:1>):<EOL><INDENT>key = pathlist[i]<EOL>this = this.__getitem__(key)<EOL><DEDENT>this.__setitem__(pathlist[-<NUM_LIT:1>],value)<EOL>return(ol)<EOL>", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\nsetitem_via_pathlist(y,\"500\",[1,1])\ny", "id": "f1599:m188"}
{"signature": "def delitem_via_pathlist(ol,pathlist):", "body": "this = ol<EOL>for i in range(<NUM_LIT:0>,pathlist.__len__()-<NUM_LIT:1>):<EOL><INDENT>key = pathlist[i]<EOL>this = this.__getitem__(key)<EOL><DEDENT>this.__delitem__(pathlist[-<NUM_LIT:1>])<EOL>return(ol)<EOL>", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ndelitem_via_pathlist(y,[1,1])\ny", "id": "f1599:m190"}
{"signature": "def replace_value_some(ol,src_value,dst_value,*seqs,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>return(replace_value_seqs(ol,src_value,dst_value,list(seqs),mode=mode))<EOL>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = replace_value_some(ol,'a','AAA',0,1)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = replace_value_some(ol,'a','AAA',0,1,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m195"}
{"signature": "def comprise(list1,list2,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>len_1 = list1.__len__()<EOL>len_2 = list2.__len__()<EOL>if(len_2>len_1):<EOL><INDENT>return(False)<EOL><DEDENT>else:<EOL><INDENT>if(mode==\"<STR_LIT:strict>\"):<EOL><INDENT>if(list2 == list1[:len_2]):<EOL><INDENT>return(True)<EOL><DEDENT>else:<EOL><INDENT>return(False)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>end = len_1 - len_2<EOL>for i in range(<NUM_LIT:0>,end+<NUM_LIT:1>):<EOL><INDENT>if(list2 == list1[i:(i+len_2)]):<EOL><INDENT>return(True)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return(False)<EOL>", "docstring": "from elist.elist import *\ncomprise([1,2,3,4,5],[2,3,4],mode=\"loose\")\ncomprise([1,2,3,4,5],[2,3,4])\ncomprise([1,2,3,4,5],[2,3,4],mode=\"strict\")\ncomprise([1,2,3,4,5],[1,2,3,4],mode=\"strict\")\n#not recursive ,only one level\n#please refer to ListTree.search for recursive support", "id": "f1599:m138"}
{"signature": "def pop_range(ol,start_index,end_index,**kwargs):", "body": "length = ol.__len__()<EOL>start_index = uniform_index(start_index,length)<EOL>end_index = uniform_index(end_index,length)<EOL>if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>cpol = copy.deepcopy(ol)<EOL>new = []<EOL>popped = []<EOL>for i in range(<NUM_LIT:0>,start_index):<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT>for i in range(start_index,end_index):<EOL><INDENT>popped.append(cpol[i])<EOL><DEDENT>for i in range(end_index,length):<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT>return({'<STR_LIT>':popped,'<STR_LIT:list>':new})<EOL><DEDENT>else:<EOL><INDENT>tmp = []<EOL>popped = []<EOL>for i in range(<NUM_LIT:0>,start_index):<EOL><INDENT>tmp.append(ol[i])<EOL><DEDENT>for i in range(start_index,end_index):<EOL><INDENT>popped.append(ol[i])<EOL><DEDENT>for i in range(end_index,length):<EOL><INDENT>tmp.append(ol[i])<EOL><DEDENT>ol.clear()<EOL>for i in range(<NUM_LIT:0>,tmp.__len__()):<EOL><INDENT>ol.append(tmp[i])<EOL><DEDENT>return(popped)<EOL><DEDENT>", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_range(ol,2,4)\nol\nid(ol)\nid(rslt['list'])\n####\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_range(ol,2,4,mode=\"original\")\nrslt\nol\nid(ol)", "id": "f1599:m108"}
{"signature": "def is_lop(ch,block_op_pairs_dict=get_block_op_pairs('<STR_LIT>')):", "body": "for i in range(<NUM_LIT:1>,block_op_pairs_dict.__len__()+<NUM_LIT:1>):<EOL><INDENT>if(ch == block_op_pairs_dict[i][<NUM_LIT:0>]):<EOL><INDENT>return(True)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return(False)<EOL>", "docstring": "# is_lop('{',block_op_pairs_dict)\n# is_lop('[',block_op_pairs_dict)\n# is_lop('}',block_op_pairs_dict)\n# is_lop(']',block_op_pairs_dict)\n# is_lop('a',block_op_pairs_dict)", "id": "f1599:m244"}
{"signature": "def is_leaf(obj):", "body": "if(is_list(obj)):<EOL><INDENT>length = obj.__len__()<EOL>if(length == <NUM_LIT:0>):<EOL><INDENT>return(True)<EOL><DEDENT>else:<EOL><INDENT>return(False)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return(True)<EOL><DEDENT>", "docstring": "the below is for nested-list\nany type is not list will be treated as a leaf\nempty list will be treated as a leaf\nfrom elist.elist import *\nis_leaf(1)\nis_leaf([1,2,3])\nis_leaf([])", "id": "f1599:m226"}
{"signature": "def delitem_via_sibseqs(ol,*sibseqs):", "body": "pathlist = list(sibseqs)<EOL>this = ol<EOL>for i in range(<NUM_LIT:0>,pathlist.__len__()-<NUM_LIT:1>):<EOL><INDENT>key = pathlist[i]<EOL>this = this.__getitem__(key)<EOL><DEDENT>this.__delitem__(pathlist[-<NUM_LIT:1>])<EOL>return(ol)<EOL>", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ndelitem_via_sibseqs(y,1,1)\ny", "id": "f1599:m191"}
{"signature": "def cdr(ol,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>cpol = copy.deepcopy(ol)<EOL>return(cpol[<NUM_LIT:1>:])<EOL><DEDENT>else:<EOL><INDENT>ol.pop(<NUM_LIT:0>)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol=[1,2,3,4]\nid(ol)\nnew = cdr(ol)\nnew\nid(new)\n####\nol=[1,2,3,4]\nid(ol)\nrslt = cdr(ol,mode=\"original\")\nrslt\nid(rslt)", "id": "f1599:m64"}
{"signature": "def cond_remove_seqs(ol,seqs,**kwargs):", "body": "cond_func = kwargs['<STR_LIT>']<EOL>if('<STR_LIT>' in kwargs):<EOL><INDENT>cond_func_args = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>cond_func_args = []<EOL><DEDENT>if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>new = copy.deepcopy(ol)<EOL>selected = find_all(new,cond_func,*cond_func_args)<EOL>selected_indexes = array_map(selected,lambda ele:ele['<STR_LIT:index>'])<EOL>selected_indexes = pop_indexes(selected_indexes,seqs)['<STR_LIT>']<EOL>new = pop_indexes(new,selected_indexes)['<STR_LIT:list>']<EOL>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new)<EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\ndef afterCH(ele,ch):\n    cond = (ord(str(ele)) > ord(ch))\n    return(cond)\n\nnew = cond_remove_seqs(ol,[0,2],cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_remove_seqs(ol,[0,2],cond_func=afterCH,cond_func_args=['B'],mode='original')\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m127"}
{"signature": "def indexes_seqs(ol,value,seqs):", "body": "seqs = list(seqs)<EOL>length = ol.__len__()<EOL>indexes =[]<EOL>seq = -<NUM_LIT:1><EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(value == ol[i]):<EOL><INDENT>seq = seq + <NUM_LIT:1><EOL>if(seq in seqs):<EOL><INDENT>indexes.append(i)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return(indexes)<EOL>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindexes_seqs(ol,'a',{0,2})\nindexes_seqs(ol,'a',{0,1})\nindexes_seqs(ol,'a',{1,2})\nindexes_seqs(ol,'a',{3,4})", "id": "f1599:m91"}
{"signature": "def array_dualmap(ol,value_map_func,**kwargs):", "body": "def get_self(obj):<EOL><INDENT>return(obj)<EOL><DEDENT>if('<STR_LIT>' in kwargs):<EOL><INDENT>index_map_func_args = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>index_map_func_args = []<EOL><DEDENT>if('<STR_LIT>' in kwargs):<EOL><INDENT>value_map_func_args = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>value_map_func_args = []<EOL><DEDENT>if('<STR_LIT>' in kwargs):<EOL><INDENT>index_map_func = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>index_map_func = get_self<EOL><DEDENT>length = ol.__len__()<EOL>il = list(range(<NUM_LIT:0>,length))<EOL>nil = list(map(lambda ele:index_map_func(ele,*index_map_func_args),il))<EOL>nvl = []<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>ele = ol[i]<EOL>v = value_map_func(nil[i],ele,*value_map_func_args)<EOL>nvl.append(v)<EOL><DEDENT>return(nvl)<EOL>", "docstring": "from elist.elist import *\nol = ['a','b','c','d']\ndef index_map_func(index,prefix,suffix):\n    s = prefix +str(index+97)+ suffix\n    return(s)\n\ndef value_map_func(mapped_index,ele,prefix,suffix):\n    s = prefix+mapped_index+': ' + str(ele) + suffix\n    return(s)\n\n####\nrslt = array_dualmap2(ol,index_map_func=index_map_func,index_map_func_args=[': ',' is '],value_map_func=value_map_func,value_map_func_args=['ord',' yes?'])\npobj(rslt)", "id": "f1599:m5"}
{"signature": "def split(ol,value,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>whiches = kwargs['<STR_LIT>']    <EOL><DEDENT>else:<EOL><INDENT>whiches = None<EOL><DEDENT>indexes =  indexes_all(ol,value)<EOL>if(whiches == None):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>indexes = select_indexes(indexes,whiches)<EOL><DEDENT>rslt = []<EOL>rslt.append(ol[:indexes[<NUM_LIT:0>]])<EOL>si = indexes[<NUM_LIT:0>]+<NUM_LIT:1><EOL>for i in range(<NUM_LIT:1>,indexes.__len__()):<EOL><INDENT>ei = indexes[i]<EOL>ele = ol[si:ei]<EOL>rslt.append(ele)<EOL>si = ei + <NUM_LIT:1><EOL><DEDENT>ele = ol[si:ol.__len__()]<EOL>rslt.append(ele)<EOL>return(rslt)<EOL>", "docstring": "ol = ['a',1,'a',2,'a',3,'a',4,'a']\nsplit(ol,'a')\nsplit(ol,'a',whiches=[0])\nsplit(ol,'a',whiches=[1])\nsplit(ol,'a',whiches=[2])\nsplit(ol,'a',whiches=[0,2])\nol = [1,'a',2,'a',3,'a',4]\nsplit(ol,'a')\nsplit('x=bcdsef=g','=',whiches=[0])", "id": "f1599:m214"}
{"signature": "def indexes_allnot(ol,value):", "body": "length = ol.__len__()<EOL>indexes =[]<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(value == ol[i]):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>indexes.append(i)<EOL><DEDENT><DEDENT>return(indexes)<EOL>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindexes_allnot(ol,'a')", "id": "f1599:m88"}
{"signature": "def getStr_to_pathlist(gs):", "body": "def numize(w):<EOL><INDENT>try:<EOL><INDENT>int(w)<EOL><DEDENT>except:<EOL><INDENT>try:<EOL><INDENT>float(w)<EOL><DEDENT>except:<EOL><INDENT>return(w)<EOL><DEDENT>else:<EOL><INDENT>return(float(w))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return(int(w))<EOL><DEDENT><DEDENT>def strip_quote(w):<EOL><INDENT>if(type(w) == type('<STR_LIT>')):<EOL><INDENT>if(w[<NUM_LIT:0>]==w[-<NUM_LIT:1>]):<EOL><INDENT>if((w[<NUM_LIT:0>]==\"<STR_LIT:'>\") |(w[<NUM_LIT:0>]=='<STR_LIT:\">')):<EOL><INDENT>return(w[<NUM_LIT:1>:-<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>return(w)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return(w)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return(w)<EOL><DEDENT><DEDENT>gs = gs[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>pl = gs.split(\"<STR_LIT>\")<EOL>pl = array_map(pl,numize)<EOL>pl = array_map(pl,strip_quote)<EOL>return(pl)<EOL>", "docstring": "gs = \"[1]['1'][2]\"\ngetStr_to_pathlist(gs)\ngs = \"['u']['u1']\"\ngetStr_to_pathlist(gs)", "id": "f1599:m242"}
{"signature": "def mapio(ol,map_func,**kwargs):", "body": "lngth = ol.__len__()<EOL>diff_args_arr = kwargs['<STR_LIT>']<EOL>rslt = []<EOL>for i in range(<NUM_LIT:0>,lngth):<EOL><INDENT>index = i<EOL>value = ol[i]<EOL>func = map_func<EOL>args = diff_args_arr[i]<EOL>ele = func(index,*args)<EOL>rslt.append(ele)<EOL><DEDENT>return(rslt)<EOL>", "docstring": "#mapvo    \u5171\u4eab\u76f8\u540c\u7684f,i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570\n#         share common map_func,NOT take index as a param for map_func\n#         common_func(value,*priv_args)", "id": "f1599:m13"}
{"signature": "def intlize(l):", "body": "return(list(map(lambda ele:int(ele),l)))<EOL>", "docstring": "from elist.elist import *\nl = [\"1\",\"3\",\"4\",\"5\"]\nintlize(l)", "id": "f1599:m131"}
{"signature": "def findfiv(ol,cond_func_args,**kwargs):", "body": "lngth = ol.__len__()<EOL>diff_funcs_arr = kwargs['<STR_LIT>']<EOL>common_args_arr = init(lngth,map_func_args)<EOL>rslt = findfivo(ol,cond_funcs=diff_funcs_arr,cond_func_args_array=common_args_arr)<EOL>return(rslt)<EOL>", "docstring": "#findfiv           \u5171\u4eab\u76f8\u540c\u7684o                         share common other_args\n#cond_func         diff_func(index,value,*common_args)", "id": "f1599:m24"}
{"signature": "def update_desc_rcin_path(desc,sibs_len,pdesc_level):", "body": "psibs_len = pdesc_level.__len__()<EOL>parent_breadth = desc['<STR_LIT>'][-<NUM_LIT:1>]<EOL>if(desc['<STR_LIT>']==(sibs_len - <NUM_LIT:1>)):<EOL><INDENT>if(parent_breadth==(psibs_len -<NUM_LIT:1>)):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>parent_rsib_breadth = parent_breadth + <NUM_LIT:1><EOL>prsib_desc = pdesc_level[parent_rsib_breadth]<EOL>if(prsib_desc['<STR_LIT>']):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>rcin_path = copy.deepcopy(prsib_desc['<STR_LIT:path>'])<EOL>rcin_path.append(<NUM_LIT:0>)<EOL>desc['<STR_LIT>'] = rcin_path<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return(desc)<EOL>", "docstring": "rightCousin\nnextCousin\nrightCin\nnextCin\nrcin\nncin\n\nparents are neighbors,and on the right", "id": "f1599:m238"}
{"signature": "def rangize_supplement(spans,lngth):", "body": "rslt = []<EOL>si = <NUM_LIT:0><EOL>ei = spans[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>if(si == ei):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>rslt.append((si,ei))<EOL><DEDENT>prev_ei = spans[<NUM_LIT:0>][<NUM_LIT:1>]<EOL>for i in range(<NUM_LIT:1>,spans.__len__()):<EOL><INDENT>si = prev_ei<EOL>ei = spans[i][<NUM_LIT:0>]<EOL>rslt.append((si,ei))<EOL>prev_ei = spans[i][<NUM_LIT:1>]<EOL><DEDENT>if(prev_ei < lngth):<EOL><INDENT>rslt.append((prev_ei,lngth))<EOL><DEDENT>else:<EOL><INDENT>rslt.append((prev_ei,lngth+<NUM_LIT:1>))<EOL><DEDENT>return(rslt)<EOL>", "docstring": "spans = [(0, 3), (4, 7), (8, 10), (11, 12), (13, 16), (17, 20)]\nrangize_supplement(spans,24)", "id": "f1599:m201"}
{"signature": "def select_some(ol,*seqs):", "body": "seqs = list(seqs)<EOL>return(select_seqs(ol,seqs))<EOL>", "docstring": "from elist.elist import *\nol = ['a','b','c','d']\nselect_some(ol,1,2)", "id": "f1599:m53"}
{"signature": "def remove_lastnot(ol,value,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>length = ol.__len__()<EOL>if(mode == \"<STR_LIT>\"):<EOL><INDENT>new = copy.deepcopy(ol)<EOL>for i in range(length-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>):<EOL><INDENT>if(new[i] == value):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>new.pop(i)<EOL>return(new)<EOL><DEDENT><DEDENT>return(new)<EOL><DEDENT>else:<EOL><INDENT>for i in range(length-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>):<EOL><INDENT>if(ol[i] == value):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>ol.pop(i)<EOL>return(ol)<EOL><DEDENT><DEDENT>return(ol)<EOL><DEDENT>", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,'a',3,'a',5,'a']\nid(ol)\nnew = remove_lastnot(ol,'a')\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a']\nid(ol)\nrslt = remove_lastnot(ol,'a',mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m115"}
{"signature": "def range_decompress(cl):", "body": "def cond_func(ele):<EOL><INDENT>length = ele.__len__()<EOL>cond = (length == <NUM_LIT:1>)<EOL>if(cond):<EOL><INDENT>return(ord(ele))<EOL><DEDENT>else:<EOL><INDENT>x = ord(ele[<NUM_LIT:0>])<EOL>y = ord(ele[<NUM_LIT:1>])<EOL>return((x,y))<EOL><DEDENT><DEDENT>if(type(cl[<NUM_LIT:0>])==type(<NUM_LIT:0>)):<EOL><INDENT>T = True<EOL><DEDENT>elif(cl[<NUM_LIT:0>].__len__() == <NUM_LIT:1>):<EOL><INDENT>T = (type(cl[<NUM_LIT:0>]) == type(<NUM_LIT:0>))<EOL><DEDENT>else:<EOL><INDENT>T = (type(cl[<NUM_LIT:0>][<NUM_LIT:0>]) == type(<NUM_LIT:0>))<EOL><DEDENT>if(T):<EOL><INDENT>l = cl <EOL><DEDENT>else:<EOL><INDENT>l = array_map(cl,cond_func)<EOL><DEDENT>rslt = []<EOL>for i in range(<NUM_LIT:0>,l.__len__()):<EOL><INDENT>ele = l[i]<EOL>if(type(ele) == type(<NUM_LIT:0>)):<EOL><INDENT>arr = [ele]<EOL><DEDENT>elif(ele.__len__() == <NUM_LIT:1>):<EOL><INDENT>arr = [ele]<EOL><DEDENT>else:<EOL><INDENT>sv = ele[<NUM_LIT:0>]<EOL>ev = ele[<NUM_LIT:1>]<EOL>arr = init_range(sv,ev+<NUM_LIT:1>,<NUM_LIT:1>)<EOL><DEDENT>if(T):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>arr = array_map(arr,chr)<EOL><DEDENT>rslt.extend(arr)<EOL><DEDENT>return(rslt)<EOL>", "docstring": "#only support sorted-ints or sorted-ascii\ncl = [1, (5, 8), (13, 14), 18, (30, 34)]\nrange_decompress(cl)\ncl = [1, (5, 8), (13, 14), 18, (30, 34), 40]\nrange_decompress(cl)\ncl = [('a', 'd'), ('j', 'n'), 'u', ('y', 'z')]\nrange_decompress(cl)", "id": "f1599:m204"}
{"signature": "def remove_allnot(ol,value,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>new = []<EOL>length = ol.__len__()<EOL>cpol = copy.deepcopy(ol)<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(cpol[i]==value):<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new) <EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = remove_allnot(ol,'a')\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = remove_allnot(ol,'a',mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m123"}
{"signature": "def diff_indexes(l1,l2):", "body": "rslt = []<EOL>for i in range(<NUM_LIT:0>,l1.__len__()):<EOL><INDENT>if(l1[i]!=l2[i]):<EOL><INDENT>rslt.append(i)<EOL><DEDENT><DEDENT>return(rslt)<EOL>", "docstring": "from elist.elist import *\nl1 = [1,2,3,5]\nl2 = [0,2,3,4]\ndiff_indexes(l1,l2)", "id": "f1599:m177"}
{"signature": "def indexes_some(ol,value,*seqs):", "body": "seqs = list(seqs)<EOL>length = ol.__len__()<EOL>indexes =[]<EOL>seq = -<NUM_LIT:1><EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(value == ol[i]):<EOL><INDENT>seq = seq + <NUM_LIT:1><EOL>if(seq in seqs):<EOL><INDENT>indexes.append(i)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return(indexes)<EOL>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindexes_some(ol,'a',0,2)\nindexes_some(ol,'a',0,1)\nindexes_some(ol,'a',1,2)\nindexes_some(ol,'a',3,4)", "id": "f1599:m89"}
{"signature": "def getitem_via_sibseqs(ol,*sibseqs):", "body": "pathlist = list(sibseqs)<EOL>this = ol<EOL>for i in range(<NUM_LIT:0>,pathlist.__len__()):<EOL><INDENT>key = pathlist[i]<EOL>this = this.__getitem__(key)<EOL><DEDENT>return(this)<EOL>", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ngetitem_via_sibseqs(y,1,1)", "id": "f1599:m185"}
{"signature": "def cond_replace_value_all(ol,dst_value,**kwargs):", "body": "cond_func = kwargs['<STR_LIT>']<EOL>if('<STR_LIT>' in kwargs):<EOL><INDENT>cond_func_args = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>cond_func_args = []<EOL><DEDENT>if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>new = copy.deepcopy(ol)<EOL>selected = find_all(new,cond_func,*cond_func_args)<EOL>selected_indexes = array_map(selected,lambda ele:ele['<STR_LIT:index>'])<EOL>new = replace_seqs(new,dst_value,selected_indexes)<EOL>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new)<EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\ndef afterCH(ele,ch):\n    cond = (ord(str(ele)) > ord(ch))\n    return(cond)\n\nnew = cond_replace_value_all(ol,\"REPLACED\",cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_replace_value_all(ol,\"REPLACED\",cond_func=afterCH,cond_func_args=['B'],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m196"}
{"signature": "def brkl2kvlist(arr,interval,sub_pos=<NUM_LIT:1>,**kwargs):", "body": "lngth = arr.__len__()<EOL>brkseqs1 = init_range(<NUM_LIT:0>,lngth,interval)<EOL>brkseqs2 = init_range(sub_pos,lngth,interval)<EOL>brkseqs = interleave(brkseqs1,brkseqs2)<EOL>l = broken_seqs(arr,brkseqs)<EOL>kl = select_evens(l)<EOL>vl = select_odds(l)<EOL>if(\"<STR_LIT>\" in kwargs):<EOL><INDENT>single_key = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>single_key = True<EOL><DEDENT>if(sub_pos == <NUM_LIT:1>):<EOL><INDENT>if(single_key):<EOL><INDENT>kl = mapv(kl,lambda ele:ele[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return((kl,vl))<EOL>", "docstring": "arr = [\"color1\",\"r1\",\"g1\",\"b1\",\"a1\",\"color2\",\"r2\",\"g2\",\"b2\",\"a2\"]\nbrkl2kvlist(arr,5)\n(['color1', 'color2'], [['r1', 'g1', 'b1', 'a1'], ['r2', 'g2', 'b2', 'a2']])\nbrkl2kvlist(arr,5,2)\n([['color1', 'r1'], ['color2', 'r2']], [['g1', 'b1', 'a1'], ['g2', 'b2', 'a2']])", "id": "f1599:m208"}
{"signature": "def insert_sections_many(ol,secs,locs,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>secs = copy.deepcopy(secs)<EOL>locs = copy.deepcopy(locs)<EOL>brked = broken_seqs(ol,locs)<EOL>seclen = secs.__len__()<EOL>brklen = brked.__len__()<EOL>if(locs[<NUM_LIT:0>]==<NUM_LIT:0>):<EOL><INDENT>new = secs[<NUM_LIT:0>]<EOL>length = seclen -<NUM_LIT:1><EOL>if(length < brklen):<EOL><INDENT>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>new.extend(brked[i])<EOL>new.extend(secs[i+<NUM_LIT:1>])<EOL><DEDENT>for i in range(length,brklen):<EOL><INDENT>new.extend(brked[i])<EOL><DEDENT><DEDENT>elif(length == brklen):<EOL><INDENT>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>new.extend(brked[i])<EOL>new.extend(secs[i+<NUM_LIT:1>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for i in range(<NUM_LIT:0>,brklen):<EOL><INDENT>new.extend(brked[i])<EOL>new.extend(secs[i+<NUM_LIT:1>])<EOL><DEDENT>for i in range(brklen,length):<EOL><INDENT>new.extend(secs[i])<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>new = brked[<NUM_LIT:0>]<EOL>length = brklen -<NUM_LIT:1><EOL>if(length < seclen):<EOL><INDENT>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>new.extend(secs[i])<EOL>new.extend(brked[i+<NUM_LIT:1>])<EOL><DEDENT>for i in range(length,seclen):<EOL><INDENT>new.extend(secs[i])<EOL><DEDENT><DEDENT>elif(length == seclen):<EOL><INDENT>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>new.extend(secs[i])<EOL>new.extend(brked[i+<NUM_LIT:1>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for i in range(<NUM_LIT:0>,seclen):<EOL><INDENT>new.extend(secs[i])<EOL>new.extend(brked[i+<NUM_LIT:1>])<EOL><DEDENT>for i in range(seclen,length):<EOL><INDENT>new.extend(brked[i])<EOL><DEDENT><DEDENT><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new)<EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "ol = initRange(0,20,1)\nol\nlocs = [1,6,14,9]\nsecs = [\n    ['a','a','a'],\n    ['b','b'],\n    ['c','c','c','c'],\n    ['d','d']\n]\nrslt = insert_sections_many(ol,secs,locs)\nrslt\n####\nol\nlocs = [0,3,6,9,12,15,16]\nsecs = [\n    ['a','a','a'],\n    ['b','b'],\n    ['c','c','c','c'],\n    ['d','d']\n]\nrslt = insert_sections_many(ol,secs,locs)\nrslt\n####\nol\nlocs = [1,6,14,9]\nsecs = [\n    ['a','a','a'],\n    ['b','b'],\n    ['c','c','c','c'],\n    ['d','d'],\n    ['e'],\n    ['f','f','f','f'],\n    [777,777,777,777]\n]\nrslt = insert_sections_many(ol,secs,locs)\nrslt", "id": "f1599:m72"}
{"signature": "def deepcopy(ol):", "body": "return(copy.deepcopy(ol))<EOL>", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nid(ol)\nnew = deepcopy(ol)\nnew\nid(new)", "id": "f1599:m135"}
{"signature": "def pop(ol,index,**kwargs):", "body": "index = uniform_index(index,ol.__len__())<EOL>if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>new = copy.deepcopy(ol)<EOL>popped = new.pop(index)<EOL>return({'<STR_LIT>':popped,'<STR_LIT:list>':new})<EOL><DEDENT>else:<EOL><INDENT>popped = ol.pop(index)<EOL>return(popped)<EOL><DEDENT>", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,2,3,4]\nid(ol)\nrslt = pop(ol,2)\npobj(rslt)\nol\nid(ol)\nid(rslt['list'])\n####\nol = [1,2,3,4]\nid(ol)\nrslt = pop(ol,2,mode=\"original\")\nrslt\nol\nid(ol)", "id": "f1599:m106"}
{"signature": "def copy_within(ol,target, start=None, end=None):", "body": "length = ol.__len__()<EOL>if(start==None):<EOL><INDENT>start = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>if(end==None):<EOL><INDENT>end = length<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>target = uniform_index(target,length)<EOL>start = uniform_index(start,length)<EOL>end = uniform_index(end,length)<EOL>cplen = end - start<EOL>cpend = target+cplen<EOL>if(target+cplen > length):<EOL><INDENT>cpend = length<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>shift = start - target<EOL>if(shift>=<NUM_LIT:0>):<EOL><INDENT>for i in range(target,cpend):<EOL><INDENT>ol[i] = ol[i+shift]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for i in range(cpend-<NUM_LIT:1>,target-<NUM_LIT:1>,-<NUM_LIT:1>):<EOL><INDENT>ol[i] = ol[i+shift]<EOL><DEDENT><DEDENT>return(ol)<EOL>", "docstring": "from elist.elist import *\nol = [1, 2, 3, 4, 5]\nid(ol)\nrslt = copyWithin(ol,0,3,4)\nrslt\nid(rslt)\n####\nol = [1, 2, 3, 4, 5]\nid(ol)\nrslt = copyWithin(ol,0,3)\nrslt\nid(rslt)\n####\nol = [1, 2, 3, 4, 5]\nid(ol)\nrslt = copyWithin(ol,-2)\nrslt\nid(rslt)\n####copyWithin is the same as copy_within", "id": "f1599:m136"}
{"signature": "def is_list(obj):", "body": "if(type(obj)==type([])):<EOL><INDENT>return(True)<EOL><DEDENT>else:<EOL><INDENT>return(False)<EOL><DEDENT>", "docstring": "from elist.elist import *\nis_list([1,2,3])\nis_list(200)", "id": "f1599:m205"}
{"signature": "def rangize_supp(spans,lngth):", "body": "rslt = []<EOL>si = <NUM_LIT:0><EOL>ei = spans[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>if(si == ei):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>rslt.append((si,ei))<EOL><DEDENT>prev_ei = spans[<NUM_LIT:0>][<NUM_LIT:1>]<EOL>for i in range(<NUM_LIT:1>,spans.__len__()):<EOL><INDENT>si = prev_ei<EOL>ei = spans[i][<NUM_LIT:0>]<EOL>rslt.append((si,ei))<EOL>prev_ei = spans[i][<NUM_LIT:1>]<EOL><DEDENT>if(prev_ei < lngth):<EOL><INDENT>rslt.append((prev_ei,lngth))<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return(rslt)<EOL>", "docstring": "spans = [(0, 3), (4, 7), (8, 10), (11, 12), (13, 16), (17, 20)]\nrangize_supplement(spans,24)", "id": "f1599:m202"}
{"signature": "def getitem_via_pathlist2(pathlist,ol):", "body": "this = ol<EOL>for i in range(<NUM_LIT:0>,pathlist.__len__()):<EOL><INDENT>key = pathlist[i]<EOL>this = this.__getitem__(key)<EOL><DEDENT>return(this)<EOL>", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ngetitem_via_pathlist2([1,1],y)", "id": "f1599:m184"}
{"signature": "def value_interval(ol,value):", "body": "si,ei = where(ol,value)<EOL>if(si == None):<EOL><INDENT>sv = None<EOL><DEDENT>else:<EOL><INDENT>sv = ol[si]<EOL><DEDENT>if(ei == None):<EOL><INDENT>ev = None<EOL><DEDENT>else:<EOL><INDENT>ev = ol[ei]<EOL><DEDENT>return((sv,ev))<EOL>", "docstring": "ol = [0, 4, 6, 8, 10, 14]\nvalue_interval(ol,-1)\nvalue_interval(ol,1)\nvalue_interval(ol,2)\nvalue_interval(ol,3)\nvalue_interval(ol,4)\nvalue_interval(ol,9)\nvalue_interval(ol,14)\nvalue_interval(ol,17)", "id": "f1599:m217"}
{"signature": "def remove_seqsnot(ol,value,seqs,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>seqs = list(seqs)<EOL>new = []<EOL>length = ol.__len__()<EOL>cpol = copy.deepcopy(ol)<EOL>seq = -<NUM_LIT:1><EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(not(cpol[i]==value)):<EOL><INDENT>seq = seq + <NUM_LIT:1><EOL>if(seq in seqs):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new) <EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = remove_seqsnot(ol,'a',{1,3})\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = remove_seqsnot(ol,'a',{1,3},mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m121"}
{"signature": "def value_indexes_mapping(l):", "body": "pt = copy.deepcopy(l)<EOL>desc = {}<EOL>vset = set({})<EOL>for v in pt:<EOL><INDENT>vset.add(v)<EOL><DEDENT>for v in vset:<EOL><INDENT>desc[v] = []<EOL><DEDENT>for i in range(<NUM_LIT:0>,l.__len__()):<EOL><INDENT>desc[l[i]].append(i)<EOL><DEDENT>return(desc)<EOL>", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\nl = ['a','b','b','a','c','b']\ndesc = value_indexes_mapping(l)\npobj(desc)", "id": "f1599:m181"}
{"signature": "def update_desc_rsib_path(desc,sibs_len):", "body": "if(desc['<STR_LIT>']<(sibs_len-<NUM_LIT:1>)):<EOL><INDENT>rsib_path = copy.deepcopy(desc['<STR_LIT:path>'])<EOL>rsib_path[-<NUM_LIT:1>] = desc['<STR_LIT>']+<NUM_LIT:1><EOL>desc['<STR_LIT>'] = rsib_path<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return(desc)<EOL>", "docstring": "rightSibling\nnextSibling\nrightSib\nnextSib\nrsib\nnsib\n\nhave the same parent,and on the right", "id": "f1599:m236"}
{"signature": "def prepend(ol,ele,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>new = [ele]<EOL>cpol = copy.deepcopy(ol)<EOL>new.extend(cpol)<EOL>return(new)<EOL><DEDENT>else:<EOL><INDENT>length = ol.__len__()<EOL>ol.append(None)<EOL>for i in range(length-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>):<EOL><INDENT>ol[i+<NUM_LIT:1>] = ol[i]<EOL><DEDENT>ol[<NUM_LIT:0>] = ele<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,2,3,4]\nele = 5\nid(ol)\nprepend(ol,ele,mode=\"original\")\nol\nid(ol)\n####\nol = [1,2,3,4]\nele = 5\nid(ol)\nnew = prepend(ol,ele)\nnew\nid(new)", "id": "f1599:m56"}
{"signature": "def array_of(*eles):", "body": "return(list(eles))<EOL>", "docstring": "from elist.elist import *\narray_of(1,2,4,5,6)", "id": "f1599:m134"}
{"signature": "def findfivo(ol,*args,**kwargs):", "body": "args = list(args)<EOL>lngth = args.__len__()<EOL>if(lngth==<NUM_LIT:0>):<EOL><INDENT>diff_funcs_arr = kwargs['<STR_LIT>']<EOL>diff_args_arr = kwargs['<STR_LIT>']<EOL><DEDENT>elif(lngth==<NUM_LIT:1>):<EOL><INDENT>if('<STR_LIT>' in kwargs):<EOL><INDENT>diff_funcs_arr = args[<NUM_LIT:0>]<EOL>diff_args_arr = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>diff_funcs_arr = kwargs['<STR_LIT>']<EOL>diff_args_arr = args[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>diff_funcs_arr = args[<NUM_LIT:0>]<EOL>diff_args_arr = args[<NUM_LIT:1>]<EOL><DEDENT>lngth = ol.__len__()<EOL>rslt = []<EOL>for i in range(<NUM_LIT:0>,lngth):<EOL><INDENT>index = i<EOL>value = ol[i]<EOL>func = diff_funcs_arr[i]<EOL>args = diff_args_arr[i]<EOL>cond = func(index,value,*args)<EOL>if(cond):<EOL><INDENT>rslt.append((index,value))<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return(rslt)<EOL>", "docstring": "#findfivo          f,i,v,o\u56db\u5143\u51b3\u5b9a                     fivo-4-tuple-engine\n#cond_func         diff_func(index,value,*diff_args)", "id": "f1599:m21"}
{"signature": "def get_children_handler(self,*args):", "body": "return(self.pdata)<EOL>", "docstring": "list's children list is self", "id": "f1599:c2:m0"}
{"signature": "def init_range(start,end,step):", "body": "return(list(range(start,end,step)))<EOL>", "docstring": "init_range(1,20,2)", "id": "f1599:m130"}
{"signature": "def cond_remove_all(ol,**kwargs):", "body": "cond_func = kwargs['<STR_LIT>']<EOL>if('<STR_LIT>' in kwargs):<EOL><INDENT>cond_func_args = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>cond_func_args = []<EOL><DEDENT>if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>new = copy.deepcopy(ol)<EOL>selected = find_all(new,cond_func,*cond_func_args)<EOL>selected_indexes = array_map(selected,lambda ele:ele['<STR_LIT:index>'])<EOL>new = pop_indexes(new,selected_indexes)['<STR_LIT:list>']<EOL>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new)<EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\ndef afterCH(ele,ch):\n    cond = (ord(str(ele)) > ord(ch))\n    return(cond)\n\nnew = cond_remove_all(ol,cond_func=afterCH,cond_func_args=['B'])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'X',3,'b',5,'c',6,'A',7,'b',8,'B',9]\nid(ol)\nrslt = cond_remove_all(ol,cond_func=afterCH,cond_func_args=['B'],mode='original')\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m126"}
{"signature": "def replace_seqs(ol,value,indexes,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>indexes = list(indexes)<EOL>new = []<EOL>length = ol.__len__()<EOL>cpol = copy.deepcopy(ol)<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(i in indexes):<EOL><INDENT>new.append(value)<EOL><DEDENT>else:<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new)<EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nnew = replace_seqs(ol,'AAA',[1,3,7])\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a',6,'a']\nid(ol)\nrslt = replace_seqs(ol,'AAA',[1,3,7],mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)\n#replace_indexes = replace_seqs", "id": "f1599:m192"}
{"signature": "def insert_many(ol,eles,locs,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>eles = copy.deepcopy(eles)<EOL>locs = copy.deepcopy(locs)<EOL>new = []<EOL>length = ol.__len__()<EOL>cpol = copy.deepcopy(ol)<EOL>for i in range(<NUM_LIT:0>,locs.__len__()):<EOL><INDENT>if(locs[i]>=length):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>locs[i] = uniform_index(locs[i],length)<EOL><DEDENT><DEDENT>tmp = sorted_refer_to(eles,locs)<EOL>eles = tmp['<STR_LIT:list>']<EOL>locs = tmp['<STR_LIT>']<EOL>label = eles.__len__()<EOL>si = <NUM_LIT:0><EOL>ei = <NUM_LIT:0><EOL>for i in range(<NUM_LIT:0>,locs.__len__()):<EOL><INDENT>if(locs[i]>=length):<EOL><INDENT>label = i<EOL>break<EOL><DEDENT>else:<EOL><INDENT>ei = locs[i]<EOL>new.extend(cpol[si:ei])<EOL>new.append(eles[i])<EOL>si = ei<EOL><DEDENT><DEDENT>for i in range(label,locs.__len__()):<EOL><INDENT>new.append(eles[i])<EOL><DEDENT>new.extend(cpol[ei:])<EOL>if(mode == \"<STR_LIT>\"):<EOL><INDENT>return(new)<EOL><DEDENT>else:<EOL><INDENT>ol.clear()<EOL>ol.extend(new)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,2,3,4,5]\neles = [7,77,777]\nlocs = [0,2,4]\nid(ol)\nnew = insert_many(ol,eles,locs)\nol\nnew\nid(new)\n####\nol = [1,2,3,4,5]\neles = [7,77,777]\nlocs = [0,2,4]\nid(ol)\nrslt = insert_many(ol,eles,locs,mode=\"original\")\nol\nrslt\nid(rslt)", "id": "f1599:m69"}
{"signature": "def first_continuous_indexesnot_slice(ol,value):", "body": "length = ol.__len__()<EOL>begin = None<EOL>slice = []<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(not(ol[i]==value)):<EOL><INDENT>begin = i<EOL>break<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if(begin == None):<EOL><INDENT>return(None)<EOL><DEDENT>else:<EOL><INDENT>slice.append(begin)<EOL>for i in range(begin+<NUM_LIT:1>,length):<EOL><INDENT>if(not(ol[i]==value)):<EOL><INDENT>slice.append(i)<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return(slice)<EOL>", "docstring": "from elist.elist import *\nol = [\"a\",0,1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nfirst_continuous_indexesnot_slice(ol,\"a\")", "id": "f1599:m94"}
{"signature": "def sort(ol,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>new = copy.deepcopy(ol)<EOL>new.sort()<EOL>return(new) <EOL><DEDENT>else:<EOL><INDENT>ol.sort()<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,3,4,2]\nid(ol)\nnew = sort(ol)\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,3,4,2]\nid(ol)\nrslt = sort(ol,mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)", "id": "f1599:m74"}
{"signature": "def init_desc_matrix(l):", "body": "leaf = is_leaf(l)<EOL>root_desc = new_ele_description(leaf=leaf,depth=<NUM_LIT:0>,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[])<EOL>if(leaf):<EOL><INDENT>root_desc['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>desc_matrix = [<EOL>[root_desc]<EOL>]<EOL>return(desc_matrix)<EOL>", "docstring": "from elist.elist import *\nfrom elist.jprint import pobj\nl = [1,[4],2,[3,[5,6]]]\ndesc_matrix = init_desc_matrix(l)\npobj(desc_matrix)", "id": "f1599:m232"}
{"signature": "def mapfi(ol,map_func_args,**kwargs):", "body": "diff_funcs_arr = kwargs['<STR_LIT>']<EOL>lngth = ol.__len__()<EOL>rslt = []<EOL>for i in range(<NUM_LIT:0>,lngth):<EOL><INDENT>index = i<EOL>value = ol[i]<EOL>func = diff_funcs_arr[i]<EOL>args = map_func_args<EOL>ele = func(index,*args)<EOL>rslt.append(ele)<EOL><DEDENT>return(rslt)<EOL>", "docstring": "#mapfi            \u5171\u4eab\u76f8\u540c\u7684o,v\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570\n#                 share common other_args,NOT take value as a param for map_func\n#map_func         diff_func(index,*common_args)", "id": "f1599:m7"}
{"signature": "def index_which(ol,value,which):", "body": "length = ol.__len__()<EOL>seq = -<NUM_LIT:1><EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(value == ol[i]):<EOL><INDENT>seq = seq + <NUM_LIT:1><EOL>if(seq == which):<EOL><INDENT>return(i)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return(None)<EOL>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_which(ol,'a',0)\nindex_which(ol,'a',1)\nindex_which(ol,'a',2)\nindex_which(ol,'a',3) == None", "id": "f1599:m85"}
{"signature": "def which_continuous_indexes_slice(ol,value,which):", "body": "length = ol.__len__()<EOL>seq = -<NUM_LIT:1><EOL>cursor = <NUM_LIT:0><EOL>begin = None<EOL>slice = []<EOL>while(cursor < length):<EOL><INDENT>cond1 = (ol[cursor] == value)<EOL>cond2 = (begin == None)<EOL>if(cond1 & cond2):<EOL><INDENT>begin = cursor<EOL>slice.append(cursor)<EOL>cursor = cursor + <NUM_LIT:1><EOL><DEDENT>elif(cond1 & (not(cond2))):<EOL><INDENT>slice.append(cursor)<EOL>cursor = cursor + <NUM_LIT:1><EOL><DEDENT>elif((not(cond1)) & (not(cond2))):<EOL><INDENT>seq = seq + <NUM_LIT:1><EOL>if(seq == which):<EOL><INDENT>return(slice)<EOL><DEDENT>else:<EOL><INDENT>cursor = cursor + <NUM_LIT:1><EOL>begin = None<EOL>slice = []<EOL><DEDENT><DEDENT>else:<EOL><INDENT>cursor = cursor + <NUM_LIT:1><EOL><DEDENT><DEDENT>if(slice):<EOL><INDENT>seq = seq + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>if(seq == which):<EOL><INDENT>return(slice)<EOL><DEDENT>else:<EOL><INDENT>return([])<EOL><DEDENT>", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nwhich_continuous_indexes_slice(ol,\"a\",0)\nwhich_continuous_indexes_slice(ol,\"a\",1)\nwhich_continuous_indexes_slice(ol,\"a\",2)\nwhich_continuous_indexes_slice(ol,\"a\",3)\nwhich_continuous_indexes_slice(ol,\"b\",0)", "id": "f1599:m97"}
{"signature": "def getitem_via_pathlist(ol,pathlist):", "body": "this = ol<EOL>for i in range(<NUM_LIT:0>,pathlist.__len__()):<EOL><INDENT>key = pathlist[i]<EOL>this = this.__getitem__(key)<EOL><DEDENT>return(this)<EOL>", "docstring": "from elist.elist import *\ny = ['a',['b',[\"bb\"]],'c']\ny[1][1]\ngetitem_via_pathlist(y,[1,1])", "id": "f1599:m183"}
{"signature": "def last_continuous_indexes_slice(ol,value):", "body": "length = ol.__len__()<EOL>end = None<EOL>slice = []<EOL>for i in range(length-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>):<EOL><INDENT>if(ol[i]==value):<EOL><INDENT>end = i<EOL>break<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if(end == None):<EOL><INDENT>return(None)<EOL><DEDENT>else:<EOL><INDENT>slice.append(end)<EOL>for i in range(end-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>):<EOL><INDENT>if(ol[i]==value):<EOL><INDENT>slice.append(i)<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>slice.reverse()<EOL>return(slice)<EOL>", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nlast_continuous_indexes_slice(ol,\"a\")", "id": "f1599:m95"}
{"signature": "def mapivo(ol,map_func,**kwargs):", "body": "lngth = ol.__len__()<EOL>common_funcs_arr = init(lngth,map_func)<EOL>diff_args_arr = kwargs['<STR_LIT>']<EOL>rslt = mapfivo(ol,map_funcs=common_funcs_arr,map_func_args_array=diff_args_arr)<EOL>return(rslt)<EOL>", "docstring": "#mapivo           \u5171\u4eab\u76f8\u540c\u7684f                         share common map_func\n#map_func         common_func(index,value,*diff_args)", "id": "f1599:m4"}
{"signature": "def join(ol,separator=\"<STR_LIT:U+002C>\"):", "body": "if(ol.__len__() == <NUM_LIT:0>):<EOL><INDENT>return(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>cond = (type(ol[<NUM_LIT:0>])==type(b'<STR_LIT>'))<EOL>if(cond):<EOL><INDENT>rslt = b'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>rslt =\"<STR_LIT>\"<EOL><DEDENT>length = ol.__len__()<EOL>for i in range(<NUM_LIT:0>,length-<NUM_LIT:1>):<EOL><INDENT>ele = ol[i]<EOL>if(cond):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>ele = str(ele)<EOL><DEDENT>rslt = rslt + ele + separator<EOL><DEDENT>if(cond):<EOL><INDENT>rslt = rslt + ol[length - <NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>rslt = rslt + str(ol[length - <NUM_LIT:1>])<EOL><DEDENT>return(rslt)<EOL>", "docstring": "from elist.elist import *\nol = [1,2,3,4]\njoin(ol,separator=\"-\")", "id": "f1599:m145"}
{"signature": "def pop_indexes(ol,indexes,**kwargs):", "body": "length = ol.__len__()<EOL>indexes = list(map(lambda index:uniform_index(index,length),list(indexes)))<EOL>if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>cpol = copy.deepcopy(ol)<EOL>new = []<EOL>popped = []<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(i in indexes):<EOL><INDENT>popped.append(cpol[i])<EOL><DEDENT>else:<EOL><INDENT>new.append(cpol[i])<EOL><DEDENT><DEDENT>return({'<STR_LIT>':popped,'<STR_LIT:list>':new})<EOL><DEDENT>else:<EOL><INDENT>tmp = []<EOL>popped = []<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(i in indexes):<EOL><INDENT>popped.append(ol[i])<EOL><DEDENT>else:<EOL><INDENT>tmp.append(ol[i])<EOL><DEDENT><DEDENT>ol.clear()<EOL>for i in range(<NUM_LIT:0>,tmp.__len__()):<EOL><INDENT>ol.append(tmp[i])<EOL><DEDENT>return(popped)<EOL><DEDENT>", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_indexes(ol,{0,-3,5})\nol\nid(ol)\nid(rslt['list'])\n####\nol = [1,2,3,4,5,6]\nid(ol)\nrslt = pop_indexes(ol,{0,-3,5},mode=\"original\")\nrslt\nol\nid(ol)", "id": "f1599:m110"}
{"signature": "def remove_first(ol,value,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>mode = \"<STR_LIT>\"<EOL><DEDENT>if(mode == \"<STR_LIT>\"):<EOL><INDENT>new = copy.deepcopy(ol)<EOL>new.remove(value)<EOL>return(new)<EOL><DEDENT>else:<EOL><INDENT>ol.remove(value)<EOL>return(ol)<EOL><DEDENT>", "docstring": "from elist.jprint import pobj\nfrom elist.elist import *\nol = [1,'a',3,'a',5,'a']\nid(ol)\nnew = remove_first(ol,'a')\nol\nnew\nid(ol)\nid(new)\n####\nol = [1,'a',3,'a',5,'a']\nid(ol)\nrslt = remove_first(ol,'a',mode=\"original\")\nol\nrslt\nid(ol)\nid(rslt)\n####array_remove is the same as remove_first", "id": "f1599:m112"}
{"signature": "def all_continuous_indexesnot_slices(ol,value):", "body": "rslt = []<EOL>length = ol.__len__()<EOL>cursor = <NUM_LIT:0><EOL>begin = None<EOL>slice = []<EOL>while(cursor < length):<EOL><INDENT>cond1 = not(ol[cursor] == value)<EOL>cond2 = (begin == None)<EOL>if(cond1 & cond2):<EOL><INDENT>begin = cursor<EOL>slice.append(cursor)<EOL><DEDENT>elif(cond1 & (not(cond2))):<EOL><INDENT>slice.append(cursor)<EOL><DEDENT>elif((not(cond1)) & (not(cond2))):<EOL><INDENT>rslt.append(slice)<EOL>begin = None<EOL>slice = []<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>cursor = cursor + <NUM_LIT:1><EOL><DEDENT>if(slice):<EOL><INDENT>rslt.append(slice)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return(rslt)<EOL>", "docstring": "from elist.elist import *\nol = [1,\"a\",\"a\",2,3,\"a\",4,\"a\",\"a\",\"a\",5]\nall_continuous_indexesnot_slices(ol,\"a\")", "id": "f1599:m104"}
{"signature": "def index_firstnot(ol,value):", "body": "length = ol.__len__()<EOL>for i in range(<NUM_LIT:0>,length):<EOL><INDENT>if(value == ol[i]):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>return(i)<EOL><DEDENT><DEDENT>return(None)<EOL>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_firstnot(ol,'a')\n####index_firstnot, array_indexnot, indexOfnot  are the same\narray_indexnot(ol,'a')\nindexOfnot(ol,'a')", "id": "f1599:m82"}
{"signature": "def get_next_char_level_in_j_str(curr_lv,curr_seq,j_str,block_op_pairs_dict=get_block_op_pairs(\"<STR_LIT>\")):", "body": "curr_ch = j_str[curr_seq]<EOL>next_ch = j_str[curr_seq + <NUM_LIT:1>]<EOL>cond = <NUM_LIT:0><EOL>for i in range(<NUM_LIT:1>,block_op_pairs_dict.__len__()+<NUM_LIT:1>):<EOL><INDENT>if(curr_ch == block_op_pairs_dict[i][<NUM_LIT:0>]):<EOL><INDENT>if(next_ch == block_op_pairs_dict[i][<NUM_LIT:1>]):<EOL><INDENT>next_lv = curr_lv               <EOL><DEDENT>else:<EOL><INDENT>next_lv = curr_lv + <NUM_LIT:1><EOL><DEDENT>cond = <NUM_LIT:1><EOL>break<EOL><DEDENT>elif(curr_ch == block_op_pairs_dict[i][<NUM_LIT:1>]):<EOL><INDENT>if(is_rop(next_ch,block_op_pairs_dict)):<EOL><INDENT>next_lv = curr_lv - <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>next_lv = curr_lv<EOL><DEDENT>cond = <NUM_LIT:1><EOL>break<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if(cond == <NUM_LIT:1>):<EOL><INDENT>pass<EOL><DEDENT>elif(is_rop(next_ch,block_op_pairs_dict)):<EOL><INDENT>next_lv = curr_lv - <NUM_LIT:1><EOL><DEDENT>else:    <EOL><INDENT>next_lv = curr_lv<EOL><DEDENT>curr_lv = next_lv<EOL>curr_seq = curr_seq + <NUM_LIT:1><EOL>return(curr_lv,curr_lv,curr_seq)<EOL>", "docstring": "the first-char is level-1\n        when current is  non-op, next-char-level = curr-level\n        when current is  lop,  non-paired-rop-next-char-level = lop-level+1;\n        when current is  lop,  paired-rop-next-char-level = lop-level\n        when current is  rop,  next-char-level = rop-level - 1\n        # {\"key_4_UF0aJJ6v\": \"value_1\", \"key_2_Hd0t\": [\"value_16\", \"value_8\", \"value_8\", \"value_15\", \"value_14\", \"value_19\", {......\n        # 122222222222222222222222222222222222222222222333333333333333333333333333333333333333333333333333333333333333333333334......\n        # {\\n\"key_4_UF0aJJ6v\": \"value_1\", \\n\"key_2_Hd0t\": [\\n\"value_16\", \\n\"value_8\", \\n\"value_8\", \\n\"value_15\", \\n\"value_14\", \\n\"value_19\",...... \n        # 1 222222222222222222222222222222 2222222222222222 3333333333333 333333333333 333333333333 3333333333333 3333333333333 3333333333333......", "id": "f1599:m246"}
{"signature": "def leaf_handler(self,*args):", "body": "desc = self.desc<EOL>pdesc = self.pdesc<EOL>desc['<STR_LIT>'] = True<EOL>desc['<STR_LIT>'] = <NUM_LIT:0><EOL>pdesc['<STR_LIT>'].append(copy.deepcopy(desc['<STR_LIT:path>']))<EOL>pdesc['<STR_LIT>'].append(copy.deepcopy(desc['<STR_LIT:path>']))<EOL>", "docstring": "leaf child handler", "id": "f1599:c2:m3"}
{"signature": "def index_lastnot(ol,value):", "body": "length = ol.__len__()<EOL>for i in range(length-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>):<EOL><INDENT>if(value == ol[i]):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>return(i)<EOL><DEDENT><DEDENT>return(None)<EOL>", "docstring": "from elist.elist import *\nol = [1,'a',3,'a',4,'a',5]\nindex_lastnot(ol,'a')\n####lastIndexOfnot is the same as index_lastnot\nlastIndexOfnot(ol,'a')", "id": "f1599:m84"}
{"signature": "def uniqualize(l,**kwargs):", "body": "if('<STR_LIT>' in kwargs):<EOL><INDENT>mode = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>mode = '<STR_LIT>'<EOL><DEDENT>pt = copy.deepcopy(l)<EOL>seqs =[]<EOL>freq = {}<EOL>for i in range(<NUM_LIT:0>,pt.__len__()):<EOL><INDENT>v = pt[i]<EOL>if(v in freq):<EOL><INDENT>freq[v] = freq[v] + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>freq[v] = <NUM_LIT:0><EOL>seqs.append(i)<EOL><DEDENT><DEDENT>npt = select_seqs(pt,seqs)<EOL>pt = npt<EOL>if(mode == '<STR_LIT>'):<EOL><INDENT>return(npt)<EOL><DEDENT>else:<EOL><INDENT>l.clear()<EOL>l.extend(npt)<EOL>return(l)<EOL><DEDENT>", "docstring": "from elist.elist import *\nl = [1, 2, 2]\nnew = uniqualize(l)\nnew\nid(l)\nid(new)\n####\nl = [1, 2, 2]\nrslt = uniqualize(l,mode=\"original\")\nrslt\nid(l)\nid(rslt)", "id": "f1599:m148"}
{"signature": "def interleave(*arrays,**kwargs):", "body": "anum = arrays.__len__()<EOL>rslt = []<EOL>length = arrays[<NUM_LIT:0>].__len__()<EOL>for j in range(<NUM_LIT:0>,length):<EOL><INDENT>for i in range(<NUM_LIT:0>,anum):<EOL><INDENT>array = arrays[i]<EOL>rslt.append(array[j])<EOL><DEDENT><DEDENT>return(rslt)<EOL>", "docstring": "arr1 = [1,2,3,4]\narr2 = ['a','b','c','d']\narr3 = ['@','#','%','*']\ninterleave(arr1,arr2,arr3)", "id": "f1599:m150"}
{"signature": "def mapfvo(ol,**kwargs):", "body": "diff_funcs_arr = kwargs['<STR_LIT>']<EOL>diff_args_arr = kwargs['<STR_LIT>']<EOL>lngth = ol.__len__()<EOL>rslt = []<EOL>for i in range(<NUM_LIT:0>,lngth):<EOL><INDENT>index = i<EOL>value = ol[i]<EOL>func = diff_funcs_arr[i]<EOL>args = diff_args_arr[i]<EOL>ele = func(value,*args)<EOL>rslt.append(ele)<EOL><DEDENT>return(rslt)<EOL>", "docstring": "#mapfvo           i\u4e0d\u4f5c\u4e3amap_func\u53c2\u6570                 NOT take index as a param for map_func\n#map_func         diff_func(value,*diff_args)", "id": "f1599:m3"}
{"signature": "def same_indexes(l1,l2):", "body": "rslt = []<EOL>for i in range(<NUM_LIT:0>,l1.__len__()):<EOL><INDENT>if(l1[i]==l2[i]):<EOL><INDENT>rslt.append(i)<EOL><DEDENT><DEDENT>return(rslt)<EOL>", "docstring": "from elist.elist import *\nl1 = [1,2,3,5]\nl2 = [0,2,3,4]\nsame_indexes(l1,l2)", "id": "f1599:m179"}
{"signature": "def select_regex_in(pl,regex):", "body": "def cond_func(ele,index,regex):<EOL><INDENT>if(type(ele)==type([])):<EOL><INDENT>cond = regex_in(ele,regex)<EOL><DEDENT>else:<EOL><INDENT>m = regex.search(ele)<EOL>if(m == None):<EOL><INDENT>cond = False<EOL><DEDENT>else:<EOL><INDENT>cond = True<EOL><DEDENT><DEDENT>return(cond)<EOL><DEDENT>arr = cond_select_values_all2(pl,cond_func=cond_func, cond_func_args =[regex])<EOL>return(arr)<EOL>", "docstring": "regex = re.compile(\"^x.*x$\")\npl = ['bcd','xabcxx','xx','y']\nselect_regex_in(pl,'abc')", "id": "f1599:m159"}
{"signature": "def car(ol):", "body": "return(ol[<NUM_LIT:0>])<EOL>", "docstring": "from elist.elist import *\nol=[1,2,3,4]\ncar(ol)", "id": "f1599:m63"}
{"signature": "def pipe_shell_cmds(shell_CMDs):", "body": "len = shell_CMDs.__len__()<EOL>p = {}<EOL>p[<NUM_LIT:1>] = subprocess.Popen(shlex.split(shell_CMDs[<NUM_LIT:1>]), stdout=subprocess.PIPE,stderr=subprocess.PIPE)<EOL>for i in range(<NUM_LIT:2>,len):<EOL><INDENT>p[i] = subprocess.Popen(shlex.split(shell_CMDs[i]), stdin=p[i-<NUM_LIT:1>].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE)<EOL><DEDENT>if(len > <NUM_LIT:1>):<EOL><INDENT>p[len] = subprocess.Popen(shlex.split(shell_CMDs[len]), stdin=p[len-<NUM_LIT:1>].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE)<EOL><DEDENT>result = p[len].communicate()<EOL>if(len > <NUM_LIT:1>):<EOL><INDENT>for i in range(<NUM_LIT:2>,len+<NUM_LIT:1>):<EOL><INDENT>returncode = p[i].wait()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>returncode = p[len].wait()<EOL><DEDENT>return(result)<EOL>", "docstring": "shell_CMDs = {}\nshell_CMDs[1] = 'netstat -n'\nshell_CMDs[2] = \"awk {'print $6'}\"", "id": "f1601:m0"}
{"signature": "def stage(self, pipeline_name, stage_name, pipeline_counter=None):", "body": "return Stage(self, pipeline_name, stage_name, pipeline_counter=pipeline_counter)<EOL>", "docstring": "Returns an instance of :class:`Stage`\n\n        Args:\n            pipeline_name (str): Name of the pipeline the stage belongs to\n            stage_name (str): Name of the stage to act on\n            pipeline_counter (int): The pipeline instance the stage is for.\n\n        Returns:\n          Stage: an instantiated :class:`Stage`.", "id": "f1613:c1:m8"}
{"signature": "def pipeline(self, name):", "body": "return Pipeline(self, name)<EOL>", "docstring": "Instantiates a :class:`Pipeline` with the given name.\n\n        Args:\n          name: The name of the pipeline you want to interact with\n\n        Returns:\n          Pipeline: an instantiated :class:`Pipeline`.", "id": "f1613:c1:m6"}
{"signature": "def add_logged_in_session(self, response=None):", "body": "if not response:<EOL><INDENT>response = self.get('<STR_LIT>')<EOL><DEDENT>self._set_session_cookie(response)<EOL>if not self._session_id:<EOL><INDENT>raise AuthenticationFailed('<STR_LIT>')<EOL><DEDENT>response = self.get('<STR_LIT>')<EOL>match = re.search(<EOL>r'<STR_LIT>',<EOL>response.read().decode('<STR_LIT:utf-8>')<EOL>)<EOL>if match:<EOL><INDENT>self._authenticity_token = match.group(<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>raise AuthenticationFailed('<STR_LIT>')<EOL><DEDENT>", "docstring": "Make the request appear to be coming from a browser\n\n        This is to interact with older parts of Go that doesn't have a\n        proper API call to be made. What will be done:\n\n        1. If no response passed in a call to `go/api/pipelines.xml` is\n           made to get a valid session\n        2. `JSESSIONID` will be populated from this request\n        3. A request to `go/pipelines` will be so the\n           `authenticity_token` (CSRF) can be extracted. It will then\n           silently be injected into `post_args` on any POST calls that\n           doesn't start with `go/api` from this point.\n\n        Args:\n          response: a :class:`Response` object from a previously successful\n            API call. So we won't have to query `go/api/pipelines.xml`\n            unnecessarily.\n\n        Raises:\n          HTTPError: when the HTTP request fails.\n          AuthenticationFailed: when failing to get the `session_id`\n            or the `authenticity_token`.", "id": "f1613:c1:m4"}
{"signature": "def get(self, path):", "body": "return self.request(path)<EOL>", "docstring": "Performs a HTTP GET request to the Go server\n\n        Args:\n          path (str): The full path on the Go server to request.\n            This includes any query string attributes.\n\n        Raises:\n          HTTPError: when the HTTP request fails.\n\n        Returns:\n          file like object: The response from a\n            :func:`urllib2.urlopen` call", "id": "f1613:c1:m1"}
{"signature": "def list(self):", "body": "return self._get('<STR_LIT>')<EOL>", "docstring": "Lists all available artifacts in this job.\n\n        See the `Go artifact list documentation`__ for example responses.\n\n        .. __: http://api.go.cd/current/#get-all-artifacts\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object", "id": "f1618:c0:m1"}
{"signature": "def get_directory(self, path_to_directory, timeout=<NUM_LIT:30>, backoff=<NUM_LIT>, max_wait=<NUM_LIT:4>):", "body": "response = None<EOL>started_at = None<EOL>time_elapsed = <NUM_LIT:0><EOL>i = <NUM_LIT:0><EOL>while time_elapsed < timeout:<EOL><INDENT>response = self._get('<STR_LIT>'.format(path_to_directory))<EOL>if response:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>if started_at is None:<EOL><INDENT>started_at = time.time()<EOL><DEDENT>time.sleep(min(backoff * (<NUM_LIT:2> ** i), max_wait))<EOL>i += <NUM_LIT:1><EOL>time_elapsed = time.time() - started_at<EOL><DEDENT><DEDENT>return response<EOL>", "docstring": "Gets an artifact directory by its path.\n\n        See the `Go artifact directory documentation`__ for example responses.\n\n        .. __: http://api.go.cd/current/#get-artifact-directory\n\n        .. note::\n          Getting a directory relies on Go creating a zip file of the\n          directory in question. Because of this Go will zip the file in\n          the background and return a 202 Accepted response. It's then up\n          to the client to check again later and get the final file.\n\n          To work with normal assumptions this :meth:`get_directory` will\n          retry itself up to ``timeout`` seconds to get a 200 response to\n          return. At that point it will then return the response as is, no\n          matter whether it's still 202 or 200. The retry is done with an\n          exponential backoff with a max value between retries. See the\n          ``backoff`` and ``max_wait`` variables.\n\n          If you want to handle the retry logic yourself then use :meth:`get`\n          and add '.zip' as a suffix on the directory.\n\n        Args:\n          path_to_directory (str): The path to the directory to get.\n            It can be nested eg ``target/dist.zip``\n          timeout (int): How many seconds we will wait in total for a\n            successful response from Go when we're receiving 202\n          backoff (float): The initial value used for backoff, raises\n            exponentially until it reaches ``max_wait``\n          max_wait (int): The max time between retries\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object\n            A successful response is a zip-file.", "id": "f1618:c0:m3"}
{"signature": "def get(self, path_to_file):", "body": "return self._get(path_to_file)<EOL>", "docstring": "Gets an artifact directory by its path.\n\n        See the `Go artifact file documentation`__ for example responses.\n\n        .. __: http://api.go.cd/current/#get-artifact-file\n\n        Args:\n          path_to_file (str): The path to file to get. It can be nested eg\n            ``dist/foobar-widgets-1.2.0.jar``\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object", "id": "f1618:c0:m2"}
{"signature": "def stage(self, name, pipeline_counter=None):", "body": "return Stage(<EOL>self.server,<EOL>pipeline_name=self.name,<EOL>stage_name=name,<EOL>pipeline_counter=pipeline_counter,<EOL>)<EOL>", "docstring": "Helper to instantiate a :class:`gocd.api.stage.Stage` object\n\n        Args:\n            name: The name of the stage\n            pipeline_counter:\n\n        Returns:", "id": "f1619:c0:m10"}
{"signature": "def unpause(self):", "body": "return self._post('<STR_LIT>', headers={\"<STR_LIT>\": True})<EOL>", "docstring": "Unpauses the pipeline\n\n        See the `Go pipeline unpause documentation`__ for example responses.\n\n        .. __: http://api.go.cd/current/#unpause-a-pipeline\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object", "id": "f1619:c0:m4"}
{"signature": "def __init__(self, server, name):", "body": "self.server = server<EOL>self.name = name<EOL>", "docstring": "A wrapper for the `Go pipeline API`__\n\n        .. __: http://api.go.cd/current/#pipelines\n\n        Args:\n          server (Server): A configured instance of\n            :class:gocd.server.Server\n          name (str): The name of the pipeline we're working on", "id": "f1619:c0:m0"}
{"signature": "def console_output(self, instance=None):", "body": "if instance is None:<EOL><INDENT>instance = self.instance()<EOL><DEDENT>for stage in instance['<STR_LIT>']:<EOL><INDENT>for job in stage['<STR_LIT>']:<EOL><INDENT>if job['<STR_LIT:result>'] not in self.final_results:<EOL><INDENT>continue<EOL><DEDENT>artifact = self.artifact(<EOL>instance['<STR_LIT>'],<EOL>stage['<STR_LIT:name>'],<EOL>job['<STR_LIT:name>'],<EOL>stage['<STR_LIT>']<EOL>)<EOL>output = artifact.get('<STR_LIT>')<EOL>yield (<EOL>{<EOL>'<STR_LIT>': self.name,<EOL>'<STR_LIT>': instance['<STR_LIT>'],<EOL>'<STR_LIT>': stage['<STR_LIT:name>'],<EOL>'<STR_LIT>': stage['<STR_LIT>'],<EOL>'<STR_LIT>': job['<STR_LIT:name>'],<EOL>'<STR_LIT>': job['<STR_LIT:result>'],<EOL>},<EOL>output.body<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Yields the output and metadata from all jobs in the pipeline\n\n        Args:\n          instance: The result of a :meth:`instance` call, if not supplied\n            the latest of the pipeline will be used.\n\n        Yields:\n          tuple: (metadata (dict), output (str)).\n\n          metadata contains:\n            - pipeline\n            - pipeline_counter\n            - stage\n            - stage_counter\n            - job\n            - job_result", "id": "f1619:c0:m9"}
{"signature": "def status(self):", "body": "return self._get('<STR_LIT>')<EOL>", "docstring": "Returns the current status of this pipeline\n\n        See the `Go pipeline status documentation`__ for example responses.\n\n        .. __: http://api.go.cd/current/#get-pipeline-status\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object", "id": "f1619:c0:m5"}
{"signature": "def artifact(self, counter, stage, job, stage_counter=<NUM_LIT:1>):", "body": "return Artifact(self.server, self.name, counter, stage, job, stage_counter)<EOL>", "docstring": "Helper to instantiate an :class:`gocd.api.artifact.Artifact` object\n\n        Args:\n          counter (int): The pipeline counter to get the artifact for\n          stage: Stage name\n          job: Job name\n          stage_counter: Defaults to 1\n\n        Returns:\n          Artifact: :class:`gocd.api.artifact.Artifact` object", "id": "f1619:c0:m8"}
{"signature": "def get(self):", "body": "return self._get(self.name, headers={\"<STR_LIT>\": self._accept_header_value})<EOL>", "docstring": "Get template config for specified template name.\n\n        See `The template config object`__ for example responses.\n\n        .. __: https://api.go.cd/current/#the-template-config-object\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object", "id": "f1620:c0:m1"}
{"signature": "def __init__(self, server, name, api_version=<NUM_LIT:2>):", "body": "self.server = server<EOL>self.name = name<EOL>self.api_version = api_version<EOL>", "docstring": "A wrapper for the `Go template config API`__\n\n        .. __: https://api.go.cd/current/#template-config\n\n        Args:\n          server (Server): A configured instance of\n            :class:gocd.server.Server\n          name (str): The name of the template we're working on", "id": "f1620:c0:m0"}
{"signature": "@property<EOL><INDENT>def pipelines(self):<DEDENT>", "body": "if not self.response:<EOL><INDENT>return set()<EOL><DEDENT>elif self._pipelines is None and self.response:<EOL><INDENT>self._pipelines = set()<EOL>for group in self.response.payload:<EOL><INDENT>for pipeline in group['<STR_LIT>']:<EOL><INDENT>self._pipelines.add(pipeline['<STR_LIT:name>'])<EOL><DEDENT><DEDENT><DEDENT>return self._pipelines<EOL>", "docstring": "Returns a set of all pipelines from the last response\n\n        Returns:\n          set: Response success: all the pipelines available in the response\n               Response failure: an empty set", "id": "f1622:c0:m3"}
{"signature": "def get(self):", "body": "return self._get(self.name, headers={\"<STR_LIT>\": self._accept_header_value})<EOL>", "docstring": "Gets SCM material for specified material name\n\n        See `The global scm config object`__ for example responses.\n\n        .. __: https://api.go.cd/current/#the-global-scm-config-object\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object", "id": "f1623:c0:m2"}
{"signature": "def __init__(self, server, name=\"<STR_LIT>\"):", "body": "self.server = server<EOL>self.name = name<EOL>", "docstring": "A wrapper for the `Go pluggable SCM API`__\n\n        .. __: https://api.go.cd/current/#scms\n\n        Args:\n          server (Server): A configured instance of\n            :class:gocd.server.Server\n          name (str): The name of the SCM material", "id": "f1623:c0:m0"}
{"signature": "def history(self, offset=<NUM_LIT:0>):", "body": "return self._get('<STR_LIT>'.format(offset=offset or <NUM_LIT:0>))<EOL>", "docstring": "Lists previous instances/runs of the stage\n\n        See the `Go stage history documentation`__ for example responses.\n\n        .. __: http://api.go.cd/current/#get-stage-history\n\n        Args:\n          offset (int, optional): How many instances to skip for this response.\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object", "id": "f1625:c0:m3"}
{"signature": "def instance(self, counter=None, pipeline_counter=None):", "body": "pipeline_counter = pipeline_counter or self.pipeline_counter<EOL>pipeline_instance = None<EOL>if not pipeline_counter:<EOL><INDENT>pipeline_instance = self.server.pipeline(self.pipeline_name).instance()<EOL>self.pipeline_counter = int(pipeline_instance['<STR_LIT>'])<EOL><DEDENT>if not counter:<EOL><INDENT>if pipeline_instance is None:<EOL><INDENT>pipeline_instance = (<EOL>self.server<EOL>.pipeline(self.pipeline_name)<EOL>.instance(pipeline_counter)<EOL>)<EOL><DEDENT>for stages in pipeline_instance['<STR_LIT>']:<EOL><INDENT>if stages['<STR_LIT:name>'] == self.stage_name:<EOL><INDENT>return self.instance(<EOL>counter=int(stages['<STR_LIT>']),<EOL>pipeline_counter=pipeline_counter<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return self._get('<STR_LIT>'<EOL>.format(pipeline_counter=pipeline_counter, counter=counter))<EOL>", "docstring": "Returns all the information regarding a specific stage run\n\n        See the `Go stage instance documentation`__ for examples.\n\n        .. __: http://api.go.cd/current/#get-stage-instance\n\n        Args:\n          counter (int): The stage instance to fetch.\n            If falsey returns the latest stage instance from :meth:`history`.\n          pipeline_counter (int): The pipeline instance for which to fetch\n            the stage. If falsey returns the latest pipeline instance.\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object", "id": "f1625:c0:m4"}
{"signature": "def cancel(self):", "body": "return self._post('<STR_LIT>', headers={\"<STR_LIT>\": True})<EOL>", "docstring": "Cancels a currently running stage\n\n        Returns:\n          Response: :class:`gocd.api.response.Response` object", "id": "f1625:c0:m2"}
{"signature": "@property<EOL><INDENT>def is_ok(self):<DEDENT>", "body": "return self.status_code == self.ok_status<EOL>", "docstring": "Whether this response is considered successful\n\n        Returns\n          bool: True if `status_code` is `ok_status`", "id": "f1626:c0:m1"}
{"signature": "@property<EOL><INDENT>def payload(self):<DEDENT>", "body": "if self.is_json:<EOL><INDENT>if not self._body_parsed:<EOL><INDENT>if hasattr(self._body, '<STR_LIT>'):<EOL><INDENT>body = self._body.decode('<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>body = self._body<EOL><DEDENT>self._body_parsed = json.loads(body)<EOL><DEDENT>return self._body_parsed<EOL><DEDENT>else:<EOL><INDENT>return self._body<EOL><DEDENT>", "docstring": "Returns:\n  `str` when not json.\n  `dict` when json.", "id": "f1626:c0:m6"}
{"signature": "def make_formatter(format_name):", "body": "if \"<STR_LIT>\" in format_name:<EOL><INDENT>from json import dumps<EOL>import datetime<EOL>def jsonhandler(obj): obj.isoformat() if isinstance(obj, (datetime.datetime, datetime.date)) else obj<EOL>if format_name == \"<STR_LIT>\":<EOL><INDENT>def jsondumps(data): return dumps(data, default=jsonhandler, indent=<NUM_LIT:2>, separators=('<STR_LIT:U+002C>', '<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>def jsondumps(data): return dumps(data, default=jsonhandler)<EOL><DEDENT>def jsonify(data):<EOL><INDENT>if isinstance(data, dict):<EOL><INDENT>print(jsondumps(data))<EOL><DEDENT>elif isinstance(data, list):<EOL><INDENT>print(jsondumps([device._asdict() for device in data]))<EOL><DEDENT>else:<EOL><INDENT>print(dumps({'<STR_LIT:result>': data}))<EOL><DEDENT><DEDENT>return jsonify<EOL><DEDENT>else:<EOL><INDENT>def printer(data):<EOL><INDENT>if isinstance(data, dict):<EOL><INDENT>print(data)<EOL><DEDENT>else:<EOL><INDENT>for row in data:<EOL><INDENT>print(row)<EOL><DEDENT><DEDENT><DEDENT>return printer<EOL><DEDENT>", "docstring": "Returns a callable that outputs the data. Defaults to print.", "id": "f1630:m0"}
{"signature": "def main():", "body": "args = argparser().parse_args(sys.argv[<NUM_LIT:1>:])<EOL>password = os.environ.get('<STR_LIT>') or args.password<EOL>netgear = Netgear(password, args.host, args.user, args.port, args.ssl, args.url, args.force_login_v2)<EOL>results = run_subcommand(netgear, args)<EOL>formatter = make_formatter(args.format)<EOL>if results is None:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>formatter(results)<EOL><DEDENT>", "docstring": "Scan for devices and print results.", "id": "f1630:m3"}
{"signature": "def allow_block_device(self, mac_addr, device_status=BLOCK):", "body": "_LOGGER.info(\"<STR_LIT>\")<EOL>if self.config_started:<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>if not self.config_start():<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>success, _ = self._make_request(<EOL>SERVICE_DEVICE_CONFIG, \"<STR_LIT>\",<EOL>{\"<STR_LIT>\": device_status, \"<STR_LIT>\": mac_addr})<EOL>if not success:<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>if not self.config_finish():<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>return True<EOL>", "docstring": "Allow or Block a device via its Mac Address.\nPass in the mac address for the device that you want to set. Pass in the\ndevice_status you wish to set the device to: Allow (allow device to access the\nnetwork) or Block (block the device from accessing the network).", "id": "f1631:c0:m9"}
{"signature": "def _convert(value, to_type, default=None):", "body": "try:<EOL><INDENT>return default if value is None else to_type(value)<EOL><DEDENT>except ValueError:<EOL><INDENT>return default<EOL><DEDENT>", "docstring": "Convert value to to_type, returns default if fails.", "id": "f1631:m6"}
{"signature": "def login(self):", "body": "if not self.force_login_v2:<EOL><INDENT>v1_result = self.login_v1()<EOL>if v1_result:<EOL><INDENT>return v1_result<EOL><DEDENT><DEDENT>return self.login_v2()<EOL>", "docstring": "Login to the router.\n\nWill be called automatically by other actions.", "id": "f1631:c0:m1"}
{"signature": "def _xml_get(e, name):", "body": "r = e.find(name)<EOL>if r is not None:<EOL><INDENT>return r.text<EOL><DEDENT>return None<EOL>", "docstring": "Returns the value of the subnode \"name\" of element e.\n\nReturns None if the subnode doesn't exist", "id": "f1631:m2"}
{"signature": "def _make_request(self, service, method, params=None, body=\"<STR_LIT>\",<EOL>need_auth=True):", "body": "<EOL>if need_auth and not self.cookie:<EOL><INDENT>if not self.login():<EOL><INDENT>return False, None<EOL><DEDENT><DEDENT>headers = self._get_headers(service, method, need_auth)<EOL>if not body:<EOL><INDENT>if not params:<EOL><INDENT>params = \"<STR_LIT>\"<EOL><DEDENT>if isinstance(params, dict):<EOL><INDENT>_map = params<EOL>params = \"<STR_LIT>\"<EOL>for k in _map:<EOL><INDENT>params += \"<STR_LIT:<>\" + k + \"<STR_LIT:>>\" + _map[k] + \"<STR_LIT>\" + k + \"<STR_LIT>\"<EOL><DEDENT><DEDENT>body = CALL_BODY.format(service=SERVICE_PREFIX + service,<EOL>method=method, params=params)<EOL><DEDENT>message = SOAP_REQUEST.format(session_id=SESSION_ID, body=body)<EOL>try:<EOL><INDENT>response = requests.post(self.soap_url, headers=headers,<EOL>data=message, timeout=<NUM_LIT:30>, verify=False)<EOL>if need_auth and _is_unauthorized_response(response):<EOL><INDENT>self.cookie = None<EOL>_LOGGER.warning(\"<STR_LIT>\")<EOL>if self.login():<EOL><INDENT>headers = self._get_headers(service, method, need_auth)<EOL>response = requests.post(self.soap_url, headers=headers,<EOL>data=message, timeout=<NUM_LIT:30>, verify=False)<EOL><DEDENT><DEDENT>success = _is_valid_response(response)<EOL>if not success:<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>_LOGGER.debug(\"<STR_LIT>\", response.status_code, str(response.headers), response.text)<EOL><DEDENT>return success, response<EOL><DEDENT>except requests.exceptions.RequestException:<EOL><INDENT>_LOGGER.exception(\"<STR_LIT>\")<EOL>return False, None<EOL><DEDENT>", "docstring": "Make an API request to the router.", "id": "f1631:c0:m11"}
{"signature": "def send(self, url, http_method, **client_args):", "body": "response = super(Resource, self).send(url, http_method, **client_args)<EOL>if response.status_code in (requests.codes.ok, requests.codes.created):<EOL><INDENT>try:<EOL><INDENT>self.update_from_dict(self.client.get_response_data(response, self.Meta.parse_json))<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return response if response is not None else None<EOL>", "docstring": "Make the actual request to the API, updating the resource if necessary\n:param url: Endpoint URL\n:param http_method: The method used to make the request to the API\n:param client_args: Arguments to be sent to the auth client\n:return:", "id": "f1650:c2:m5"}
{"signature": "def save(self, force_create=False, fields=None):", "body": "values = {}<EOL>fields = fields or self.fields<EOL>for field_name in fields:<EOL><INDENT>value = getattr(self, field_name)<EOL>if isinstance(value, Resource):<EOL><INDENT>value = value.get_id()<EOL><DEDENT>if isinstance(value, list):<EOL><INDENT>if len(value) > <NUM_LIT:0> and isinstance(value[<NUM_LIT:0>], Resource):<EOL><INDENT>value = None<EOL><DEDENT>else:<EOL><INDENT>final_value_list = []<EOL>for item in value:<EOL><INDENT>final_value_list.append(item.isoformat() if isinstance(item, datetime) else item)<EOL><DEDENT>value = final_value_list<EOL><DEDENT><DEDENT>if isinstance(value, datetime):<EOL><INDENT>value = value.isoformat()<EOL><DEDENT>if value is not None:<EOL><INDENT>values[field_name] = value<EOL><DEDENT><DEDENT>http_headers = {'<STR_LIT>': '<STR_LIT:application/json>'} if self.Meta.json_data is True else None<EOL>json = values if self.Meta.json_data is True else None<EOL>data = values if self.Meta.json_data is False else None<EOL>if self.get_resource_endpoint() is not None and force_create is False:<EOL><INDENT>return self.send(self.get_resource_endpoint(), \"<STR_LIT>\", headers=http_headers, json=json, data=data)<EOL><DEDENT>else:<EOL><INDENT>return self.send(self.get_collection_endpoint(), \"<STR_LIT>\", headers=http_headers, json=json, data=data)<EOL><DEDENT>", "docstring": "Saves (creates or updates) resource on the server\n:param force_create: If True, forces resource creation even if it already has an Id.\n:param fields: List of fields to be saved. If None, all fields will be saved.\n:return:", "id": "f1650:c2:m6"}
{"signature": "def filter(self, **search_args):", "body": "search_args = search_args or {}<EOL>raw_resources = []<EOL>for url, paginator_params in self.paginator.get_urls(self.get_collection_endpoint()):<EOL><INDENT>search_args.update(paginator_params)<EOL>response = self.paginator.process_response(self.send(url, \"<STR_LIT>\", params=search_args))<EOL>raw_resources += self.client.get_response_data(response, self.Meta.parse_json)[self.json_collection_attribute] if self.json_collection_attribute is not None else self.client.get_response_data(response, self.Meta.parse_json)<EOL><DEDENT>resources = []<EOL>for raw_resource in raw_resources:<EOL><INDENT>try:<EOL><INDENT>resource = self.resource_class(self.client)<EOL><DEDENT>except (ValueError, TypeError):<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>resource.update_from_dict(raw_resource)<EOL>resources.append(resource)<EOL><DEDENT><DEDENT>return resources<EOL>", "docstring": "Get a filtered list of resources\n:param search_args: To be translated into ?arg1=value1&arg2=value2...\n:return: A list of resources", "id": "f1650:c3:m3"}
{"signature": "@classmethod<EOL><INDENT>def get_collection_endpoint(cls):<DEDENT>", "body": "return cls.Meta.collection_endpoint if cls.Meta.collection_endpoint is not None else cls.__name__.lower() + \"<STR_LIT>\"<EOL>", "docstring": "Get the relative path to the API resource collection\n\nIf self.collection_endpoint is not set, it will default to the lowercase name of the resource class plus an \"s\" and the terminating \"/\"\n:param cls: Resource class\n:return: Relative path to the resource collection", "id": "f1650:c0:m2"}
{"signature": "def __init__(self, auth_client, **kwargs):", "body": "for name, value in iteritems(kwargs):<EOL><INDENT>setattr(self, name, value)<EOL><DEDENT>super(Resource, self).__init__(auth_client)<EOL>", "docstring": "Initializes the resource\n:param auth_client: Client to make (non)authorized requests\n:param kwargs: Initial value for attributes\n:return:", "id": "f1650:c2:m0"}
{"signature": "def get_resource_endpoint(self):", "body": "return super(Resource, self).get_resource_endpoint(self.get_id())<EOL>", "docstring": "Get the relative path to the specific API resource\n:return: Relative path to the resource", "id": "f1650:c2:m3"}
{"signature": "def get(self, resource_id):", "body": "response = self.send(self.get_resource_endpoint(resource_id), \"<STR_LIT>\")<EOL>try:<EOL><INDENT>resource = self.resource_class(self.client)<EOL><DEDENT>except (ValueError, TypeError):<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>resource.update_from_dict(self.client.get_response_data(response, self.Meta.parse_json))<EOL>return resource<EOL><DEDENT>", "docstring": "Get one single resource from the API\n:param resource_id: Id of the resource to be retrieved\n:return: Retrieved resource", "id": "f1650:c3:m2"}
{"signature": "def refresh(self):", "body": "if self.get_resource_endpoint() is not None:<EOL><INDENT>return self.send(self.get_resource_endpoint(), \"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Refreshes a resource by checking against the API\n:return:", "id": "f1650:c2:m7"}
{"signature": "def __init__(self, auth_client):", "body": "self.client = auth_client<EOL>", "docstring": "Initializes the instance\n:param auth_client: Client to make (non)authorized requests\n:return:", "id": "f1650:c0:m0"}
{"signature": "def send(self, relative_path, http_method, **requests_args):", "body": "url = urljoin(self.base_url, relative_path)<EOL>return self.session.request(http_method, url, **requests_args)<EOL>", "docstring": "Subclasses must implement this method, that will be used to send API requests with proper auth\n:param relative_path: URL path relative to self.base_url\n:param http_method: HTTP method\n:param requests_args: kargs to be sent to requests\n:return:", "id": "f1651:c0:m1"}
{"signature": "def __init__(self, many=False):", "body": "self.many = many<EOL>self.name = None<EOL>", "docstring": "Initialize the field\n:param many: Set to True if this field will host a list of items", "id": "f1652:c0:m0"}
{"signature": "def __get__(self, instance, owner):", "body": "if instance is not None and self.name is not None:<EOL><INDENT>return instance.__dict__.get(self.name)<EOL><DEDENT>else:<EOL><INDENT>return self<EOL><DEDENT>", "docstring": "Normal descriptor get method\n:param instance: Resource instance where the field lives\n:param instance: Resource class where the field lives\n:return: Value stored in instance.name (TODO: maybe change this in the future to instance.Cache.name)", "id": "f1652:c0:m1"}
{"signature": "def __set__(self, instance, value):", "body": "if instance is not None and self.name is not None:<EOL><INDENT>instance.__dict__[self.name] = value<EOL><DEDENT>", "docstring": "Normal descriptor set method\n:param instance: Resource instance where the field lives\n:param value: Value to store in instance.name (TODO: maybe change this in the future to instance.Cache.name)", "id": "f1652:c0:m2"}
{"signature": "def __set__(self, instance, value):", "body": "if self.many is False:<EOL><INDENT>if isinstance(value, str):<EOL><INDENT>value = parse(value)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>datetime_list = []<EOL>for datetime_value in value:<EOL><INDENT>if isinstance(datetime_value, str):<EOL><INDENT>datetime_value = parse(datetime_value)<EOL><DEDENT>datetime_list.append(datetime_value)<EOL><DEDENT>value = datetime_list<EOL><DEDENT>super(DateTimeField, self).__set__(instance, value)<EOL>", "docstring": "Normal descriptor set method\n:param instance: Resource instance where the field lives\n:param value: Might be a datetime object or a string to be parsed", "id": "f1652:c5:m0"}
{"signature": "def get_version():", "body": "contents = read_file(os.path.join('<STR_LIT>', '<STR_LIT>'))<EOL>version = re.search('<STR_LIT>', contents)<EOL>version = version.group(<NUM_LIT:1>).replace('<STR_LIT:U+002CU+0020>', '<STR_LIT:.>').strip()<EOL>return version<EOL>", "docstring": "Returns version number, without module import (which can lead to ImportError\n    if some dependencies are unavailable before install.", "id": "f1655:m1"}
{"signature": "def __init__(<EOL>self, name, default, category=None, field=None, verbose_name=None, help_text='<STR_LIT>', static=True,<EOL>readonly=False):", "body": "self.name = name<EOL>self.category = category<EOL>self.default = default<EOL>self.static = static<EOL>self.help_text = help_text<EOL>if static:<EOL><INDENT>readonly = True<EOL><DEDENT>self.readonly = readonly<EOL>if verbose_name is None:<EOL><INDENT>verbose_name = name.replace('<STR_LIT:_>', '<STR_LIT:U+0020>').capitalize()<EOL><DEDENT>self.verbose_name = verbose_name<EOL>if field is None:<EOL><INDENT>self.field = get_field_for_proxy(self)<EOL><DEDENT>else:<EOL><INDENT>self.field = field<EOL>update_field_from_proxy(self.field, self)<EOL><DEDENT>", "docstring": ":param str|unicode name: Preference name.\n\n:param default: Default (initial) value.\n\n:param str|unicode category: Category name the preference belongs to.\n\n:param Field field: Django model field to represent this preference.\n\n:param str|unicode verbose_name: Field verbose name.\n\n:param str|unicode help_text: Field help text.\n\n:param bool static: Leave this preference static (do not store in DB).\n\n:param bool readonly: Make this field read only.", "id": "f1668:c3:m0"}
{"signature": "def update_field_from_proxy(field_obj, pref_proxy):", "body": "attr_names = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT:default>')<EOL>for attr_name in attr_names:<EOL><INDENT>setattr(field_obj, attr_name, getattr(pref_proxy, attr_name))<EOL><DEDENT>", "docstring": "Updates field object with data from a PrefProxy object.\n\n    :param models.Field field_obj:\n\n    :param PrefProxy pref_proxy:", "id": "f1668:m1"}
{"signature": "def traverse_local_prefs(stepback=<NUM_LIT:0>):", "body": "locals_dict = get_frame_locals(stepback+<NUM_LIT:1>)<EOL>for k in locals_dict:<EOL><INDENT>if not k.startswith('<STR_LIT:_>') and k.upper() == k:<EOL><INDENT>yield k, locals_dict<EOL><DEDENT><DEDENT>", "docstring": "Generator to walk through variables considered as preferences\n    in locals dict of a given frame.\n\n    :param int stepback:\n\n    :rtype: tuple", "id": "f1668:m5"}
{"signature": "@classmethod<EOL><INDENT>def read_prefs(cls, mem_prefs):<DEDENT>", "body": "db_prefs = {<EOL>'<STR_LIT>' % (pref['<STR_LIT>'], pref['<STR_LIT:name>']): pref for pref in<EOL>cls.objects.values().order_by('<STR_LIT>', '<STR_LIT:name>')<EOL>}<EOL>new_prefs = []<EOL>for app, prefs in mem_prefs.items():<EOL><INDENT>for pref_name, pref_proxy in prefs.items():<EOL><INDENT>if not pref_proxy.static:  <EOL><INDENT>key = '<STR_LIT>' % (app, pref_name)<EOL>if key in db_prefs:<EOL><INDENT>pref_proxy.db_value = db_prefs[key]['<STR_LIT:text>']<EOL><DEDENT>else:<EOL><INDENT>new_prefs.append(cls(app=app, name=pref_name, text=pref_proxy.default))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if new_prefs:<EOL><INDENT>try:<EOL><INDENT>cls.objects.bulk_create(new_prefs)<EOL><DEDENT>except IntegrityError:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "Initializes preferences entries in DB according to currently discovered prefs.\n\n        :param dict mem_prefs:", "id": "f1669:c0:m1"}
{"signature": "def bind_proxy(values, category=None, field=None, verbose_name=None, help_text='<STR_LIT>', static=True, readonly=False):", "body": "addrs = OrderedDict()<EOL>depth = <NUM_LIT:3><EOL>for local_name, locals_dict in traverse_local_prefs(depth):<EOL><INDENT>addrs[id(locals_dict[local_name])] = local_name<EOL><DEDENT>proxies = []<EOL>locals_dict = get_frame_locals(depth)<EOL>for value in values:  <EOL><INDENT>id_val = id(value)<EOL>if id_val in addrs:<EOL><INDENT>local_name = addrs[id_val]<EOL>local_val = locals_dict[local_name]<EOL>if isinstance(local_val, PatchedLocal) and not isinstance(local_val, PrefProxy):<EOL><INDENT>proxy = PrefProxy(<EOL>local_name, value.val,<EOL>category=category,<EOL>field=field,<EOL>verbose_name=verbose_name,<EOL>help_text=help_text,<EOL>static=static,<EOL>readonly=readonly,<EOL>)<EOL>app_name = locals_dict['<STR_LIT>'].split('<STR_LIT:.>')[-<NUM_LIT:2>]  <EOL>prefs = get_prefs()<EOL>if app_name not in prefs:<EOL><INDENT>prefs[app_name] = OrderedDict()<EOL><DEDENT>prefs[app_name][local_name.lower()] = proxy<EOL>locals_dict[local_name] = proxy<EOL>proxies.append(proxy)<EOL><DEDENT><DEDENT><DEDENT>return proxies<EOL>", "docstring": "Binds PrefProxy objects to module variables used by apps as preferences.\n\n    :param list|tuple values: Preference values.\n\n    :param str|unicode category: Category name the preference belongs to.\n\n    :param Field field: Django model field to represent this preference.\n\n    :param str|unicode verbose_name: Field verbose name.\n\n    :param str|unicode help_text: Field help text.\n\n    :param bool static: Leave this preference static (do not store in DB).\n\n    :param bool readonly: Make this field read only.\n\n    :rtype: list", "id": "f1670:m4"}
{"signature": "def register_prefs(*args, **kwargs):", "body": "swap_settings_module = bool(kwargs.get('<STR_LIT>', True))<EOL>if __PATCHED_LOCALS_SENTINEL not in get_frame_locals(<NUM_LIT:2>):<EOL><INDENT>raise SitePrefsException('<STR_LIT>')<EOL><DEDENT>bind_proxy(args, **kwargs)<EOL>unpatch_locals()<EOL>swap_settings_module and proxy_settings_module()<EOL>", "docstring": "Registers preferences that should be handled by siteprefs.\n\n    Expects preferences as *args.\n\n    Use keyword arguments to batch apply params supported by\n    ``PrefProxy`` to all preferences not constructed by ``pref`` and ``pref_group``.\n\n    Batch kwargs:\n\n        :param str|unicode help_text: Field help text.\n\n        :param bool static: Leave this preference static (do not store in DB).\n\n        :param bool readonly: Make this field read only.\n\n    :param bool swap_settings_module: Whether to automatically replace settings module\n        with a special ``ProxyModule`` object to access dynamic values of settings\n        transparently (so not to bother with calling ``.value`` of ``PrefProxy`` object).", "id": "f1670:m10"}
{"signature": "def autodiscover_siteprefs(admin_site=None):", "body": "if admin_site is None:<EOL><INDENT>admin_site = admin.site<EOL><DEDENT>if '<STR_LIT>' not in sys.argv[<NUM_LIT:0>] or (len(sys.argv) > <NUM_LIT:1> and sys.argv[<NUM_LIT:1>] in MANAGE_SAFE_COMMANDS):<EOL><INDENT>import_prefs()<EOL>Preference.read_prefs(get_prefs())<EOL>register_admin_models(admin_site)<EOL><DEDENT>", "docstring": "Automatically discovers and registers all preferences available in all apps.\n\n    :param admin.AdminSite admin_site: Custom AdminSite object.", "id": "f1670:m6"}
{"signature": "def on_pref_update(*args, **kwargs):", "body": "Preference.update_prefs(*args, **kwargs)<EOL>Preference.read_prefs(get_prefs())<EOL>", "docstring": "Triggered on dynamic preferences model save.\n     Issues DB save and reread.", "id": "f1670:m0"}
{"signature": "def patch_locals(depth=<NUM_LIT:2>):", "body": "for name, locals_dict in traverse_local_prefs(depth):<EOL><INDENT>locals_dict[name] = PatchedLocal(name, locals_dict[name])<EOL><DEDENT>get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL] = True<EOL>", "docstring": "Temporarily (see unpatch_locals()) replaces all module variables\n    considered preferences with PatchedLocal objects, so that every\n    variable has different hash returned by id().", "id": "f1670:m7"}
{"signature": "def proxy_settings_module(depth=<NUM_LIT:3>):", "body": "proxies = []<EOL>modules = sys.modules<EOL>module_name = get_frame_locals(depth)['<STR_LIT>']<EOL>module_real = modules[module_name]<EOL>for name, locals_dict in traverse_local_prefs(depth):<EOL><INDENT>value = locals_dict[name]<EOL>if isinstance(value, PrefProxy):<EOL><INDENT>proxies.append(name)<EOL><DEDENT><DEDENT>new_module = type(module_name, (ModuleType, ModuleProxy), {})(module_name)  <EOL>new_module.bind(module_real, proxies)<EOL>modules[module_name] = new_module<EOL>", "docstring": "Replaces a settings module with a Module proxy to intercept\n    an access to settings.\n\n    :param int depth: Frame count to go backward.", "id": "f1670:m9"}
{"signature": "def score(self):", "body": "return sum([self.scores[len(w)] for w in self.words()])<EOL>", "docstring": "The total score for the words found, according to the rules.", "id": "f1675:c8:m4"}
{"signature": "def expand(self, problem):", "body": "return [self.child_node(problem, action)<EOL>for action in problem.actions(self.state)]<EOL>", "docstring": "List the nodes reachable in one step from this node.", "id": "f1675:c1:m2"}
{"signature": "def recursive_best_first_search(problem, h=None):", "body": "h = memoize(h or problem.h, '<STR_LIT:h>')<EOL>def RBFS(problem, node, flimit):<EOL><INDENT>if problem.goal_test(node.state):<EOL><INDENT>return node, <NUM_LIT:0>   <EOL><DEDENT>successors = node.expand(problem)<EOL>if len(successors) == <NUM_LIT:0>:<EOL><INDENT>return None, infinity<EOL><DEDENT>for s in successors:<EOL><INDENT>s.f = max(s.path_cost + h(s), node.f)<EOL><DEDENT>while True:<EOL><INDENT>successors.sort(lambda x,y: cmp(x.f, y.f)) <EOL>best = successors[<NUM_LIT:0>]<EOL>if best.f > flimit:<EOL><INDENT>return None, best.f<EOL><DEDENT>if len(successors) > <NUM_LIT:1>:<EOL><INDENT>alternative = successors[<NUM_LIT:1>].f<EOL><DEDENT>else:<EOL><INDENT>alternative = infinity<EOL><DEDENT>result, best.f = RBFS(problem, best, min(flimit, alternative))<EOL>if result is not None:<EOL><INDENT>return result, best.f<EOL><DEDENT><DEDENT><DEDENT>node = Node(problem.initial)<EOL>node.f = h(node)<EOL>result, bestf = RBFS(problem, node, infinity)<EOL>return result<EOL>", "docstring": "[Fig. 3.26]", "id": "f1675:m11"}
{"signature": "def boggle_hill_climbing(board=None, ntimes=<NUM_LIT:100>, verbose=True):", "body": "finder = BoggleFinder()<EOL>if board is None:<EOL><INDENT>board = random_boggle()<EOL><DEDENT>best = len(finder.set_board(board))<EOL>for _ in range(ntimes):<EOL><INDENT>i, oldc = mutate_boggle(board)<EOL>new = len(finder.set_board(board))<EOL>if new > best:<EOL><INDENT>best = new<EOL>if verbose: print(best, _, board)<EOL><DEDENT>else:<EOL><INDENT>board[i] = oldc <EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print_boggle(board)<EOL><DEDENT>return board, best<EOL>", "docstring": "Solve inverse Boggle by hill-climbing: find a high-scoring board by\n    starting with a random one and changing it.", "id": "f1675:m26"}
{"signature": "def mate(self, other):", "body": "c = random.randrange(len(self.genes))<EOL>return self.__class__(self.genes[:c] + other.genes[c:])<EOL>", "docstring": "Return a new individual crossing self and other.", "id": "f1675:c3:m1"}
{"signature": "def nodes(self):", "body": "return list(self.dict.keys())<EOL>", "docstring": "Return a list of nodes in the graph.", "id": "f1675:c4:m5"}
{"signature": "def online_dfs_agent(s1):", "body": "unimplemented()<EOL>", "docstring": "[Fig. 4.21]", "id": "f1675:m16"}
{"signature": "def RandomGraph(nodes=list(range(<NUM_LIT:10>)), min_links=<NUM_LIT:2>, width=<NUM_LIT>, height=<NUM_LIT>,<EOL>curvature=lambda: random.uniform(<NUM_LIT>, <NUM_LIT>)):", "body": "g = UndirectedGraph()<EOL>g.locations = {}<EOL>for node in nodes:<EOL><INDENT>g.locations[node] = (random.randrange(width), random.randrange(height))<EOL><DEDENT>for i in range(min_links):<EOL><INDENT>for node in nodes:<EOL><INDENT>if len(g.get(node)) < min_links:<EOL><INDENT>here = g.locations[node]<EOL>def distance_to_node(n):<EOL><INDENT>if n is node or g.get(node,n): return infinity<EOL>return distance(g.locations[n], here)<EOL><DEDENT>neighbor = argmin(nodes, distance_to_node)<EOL>d = distance(g.locations[neighbor], here) * curvature()<EOL>g.connect(node, neighbor, int(d))<EOL><DEDENT><DEDENT><DEDENT>return g<EOL>", "docstring": "Construct a random graph, with the specified nodes, and random links.\n    The nodes are laid out randomly on a (width x height) rectangle.\n    Then each node is connected to the min_links nearest neighbors.\n    Because inverse links are added, some nodes will have more connections.\n    The distance between nodes is the hypotenuse times curvature(),\n    where curvature() defaults to a random number between 1.1 and 1.5.", "id": "f1675:m21"}
{"signature": "def compare_graph_searchers():", "body": "compare_searchers(problems=[GraphProblem('<STR_LIT:A>', '<STR_LIT:B>', romania),<EOL>GraphProblem('<STR_LIT:O>', '<STR_LIT:N>', romania),<EOL>GraphProblem('<STR_LIT>', '<STR_LIT>', australia)],<EOL>header=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>", "docstring": "Prints a table of results like this:\n    >>> compare_graph_searchers()\n    Searcher                      Romania(A, B)        Romania(O, N)         Australia          \n    breadth_first_tree_search     <  21/  22/  59/B>   <1158/1159/3288/N>    <   7/   8/  22/WA>\n    breadth_first_search          <   7/  11/  18/B>   <  19/  20/  45/N>    <   2/   6/   8/WA>\n    depth_first_graph_search      <   8/   9/  20/B>   <  16/  17/  38/N>    <   4/   5/  11/WA>\n    iterative_deepening_search    <  11/  33/  31/B>   < 656/1815/1812/N>    <   3/  11/  11/WA>\n    depth_limited_search          <  54/  65/ 185/B>   < 387/1012/1125/N>    <  50/  54/ 200/WA>\n    recursive_best_first_search   <   5/   6/  15/B>   <5887/5888/16532/N>   <  11/  12/  43/WA>", "id": "f1675:m30"}
{"signature": "def boggle_neighbors(n2, cache={}):", "body": "if cache.get(n2):<EOL><INDENT>return cache.get(n2)<EOL><DEDENT>n = exact_sqrt(n2)<EOL>neighbors = [None] * n2<EOL>for i in range(n2):<EOL><INDENT>neighbors[i] = []<EOL>on_top = i < n<EOL>on_bottom = i >= n2 - n<EOL>on_left = i % n == <NUM_LIT:0><EOL>on_right = (i+<NUM_LIT:1>) % n == <NUM_LIT:0><EOL>if not on_top:<EOL><INDENT>neighbors[i].append(i - n)<EOL>if not on_left:  neighbors[i].append(i - n - <NUM_LIT:1>)<EOL>if not on_right: neighbors[i].append(i - n + <NUM_LIT:1>)<EOL><DEDENT>if not on_bottom:<EOL><INDENT>neighbors[i].append(i + n)<EOL>if not on_left:  neighbors[i].append(i + n - <NUM_LIT:1>)<EOL>if not on_right: neighbors[i].append(i + n + <NUM_LIT:1>)<EOL><DEDENT>if not on_left: neighbors[i].append(i - <NUM_LIT:1>)<EOL>if not on_right: neighbors[i].append(i + <NUM_LIT:1>)<EOL><DEDENT>cache[n2] = neighbors<EOL>return neighbors<EOL>", "docstring": "Return a list of lists, where the i-th element is the list of indexes\n    for the neighbors of square i.", "id": "f1675:m24"}
{"signature": "def depth_first_tree_search(problem):", "body": "return tree_search(problem, Stack())<EOL>", "docstring": "Search the deepest nodes in the search tree first.", "id": "f1675:m3"}
{"signature": "def value(self, state):", "body": "abstract<EOL>", "docstring": "For optimization problems, each state has a value.  Hill-climbing\n        and related algorithms try to maximize this value.", "id": "f1675:c0:m5"}
{"signature": "def hill_climbing(problem):", "body": "current = Node(problem.initial)<EOL>while True:<EOL><INDENT>neighbors = current.expand(problem)<EOL>if not neighbors:<EOL><INDENT>break<EOL><DEDENT>neighbor = argmax_random_tie(neighbors,<EOL>lambda node: problem.value(node.state))<EOL>if problem.value(neighbor.state) <= problem.value(current.state):<EOL><INDENT>break<EOL><DEDENT>current = neighbor<EOL><DEDENT>return current.state<EOL>", "docstring": "From the initial node, keep choosing the neighbor with highest value,\n    stopping when no neighbor is better. [Fig. 4.2]", "id": "f1675:m12"}
{"signature": "def lrta_star_agent(s1):", "body": "unimplemented()<EOL>", "docstring": "[Fig. 4.24]", "id": "f1675:m17"}
{"signature": "def __init__(self, initial, goal=None):", "body": "self.initial = initial; self.goal = goal<EOL>", "docstring": "The constructor specifies the initial state, and possibly a goal\n        state, if there is a unique goal.  Your subclass's constructor can add\n        other arguments.", "id": "f1675:c0:m0"}
{"signature": "def __len__(self):", "body": "return len(self.found)<EOL>", "docstring": "The number of words found.", "id": "f1675:c8:m5"}
{"signature": "def breadth_first_tree_search(problem):", "body": "return tree_search(problem, FIFOQueue())<EOL>", "docstring": "Search the shallowest nodes in the search tree first.", "id": "f1675:m2"}
{"signature": "def connect(self, A, B, distance=<NUM_LIT:1>):", "body": "self.connect1(A, B, distance)<EOL>if not self.directed: self.connect1(B, A, distance)<EOL>", "docstring": "Add a link from A and B of given distance, and also add the inverse\n        link if the graph is undirected.", "id": "f1675:c4:m2"}
{"signature": "def result(self, state, row):", "body": "col = state.index(None)<EOL>new = state[:]<EOL>new[col] = row<EOL>return new<EOL>", "docstring": "Place the next queen at the given row.", "id": "f1675:c6:m2"}
{"signature": "def UndirectedGraph(dict=None):", "body": "return Graph(dict=dict, directed=False)<EOL>", "docstring": "Build a Graph where every edge (including future ones) goes both ways.", "id": "f1675:m20"}
{"signature": "def graph_search(problem, frontier):", "body": "frontier.append(Node(problem.initial))<EOL>explored = set()<EOL>while frontier:<EOL><INDENT>node = frontier.pop()<EOL>if problem.goal_test(node.state):<EOL><INDENT>return node<EOL><DEDENT>explored.add(node.state)<EOL>frontier.extend(child for child in node.expand(problem)<EOL>if child.state not in explored<EOL>and child not in frontier)<EOL><DEDENT>return None<EOL>", "docstring": "Search through the successors of a problem to find a goal.\n    The argument frontier should be an empty queue.\n    If two paths reach a state, only use the first one. [Fig. 3.7]", "id": "f1675:m1"}
{"signature": "def lookup(self, prefix, lo=<NUM_LIT:0>, hi=None):", "body": "words = self.words<EOL>if hi is None: hi = len(words)<EOL>i = bisect.bisect_left(words, prefix, lo, hi)<EOL>if i < len(words) and words[i].startswith(prefix):<EOL><INDENT>return i, (words[i] == prefix)<EOL><DEDENT>else:<EOL><INDENT>return None, False<EOL><DEDENT>", "docstring": "See if prefix is in dictionary, as a full word or as a prefix.\n        Return two values: the first is the lowest i such that\n        words[i].startswith(prefix), or is None; the second is\n        True iff prefix itself is in the Wordlist.", "id": "f1675:c7:m1"}
{"signature": "def mutate(self):", "body": "abstract<EOL>", "docstring": "Change a few of my genes.", "id": "f1675:c3:m2"}
{"signature": "def genetic_search(problem, fitness_fn, ngen=<NUM_LIT:1000>, pmut=<NUM_LIT:0.1>, n=<NUM_LIT:20>):", "body": "s = problem.initial_state<EOL>states = [problem.result(s, a) for a in problem.actions(s)]<EOL>random.shuffle(states)<EOL>return genetic_algorithm(states[:n], problem.value, ngen, pmut)<EOL>", "docstring": "Call genetic_algorithm on the appropriate parts of a problem.\n    This requires the problem to have states that can mate and mutate,\n    plus a value method that scores states.", "id": "f1675:m18"}
{"signature": "def child_node(self, problem, action):", "body": "next = problem.result(self.state, action)<EOL>return Node(next, self, action,<EOL>problem.path_cost(self.path_cost, self.state, action, next))<EOL>", "docstring": "Fig. 3.10", "id": "f1675:c1:m3"}
{"signature": "def depth_limited_search(problem, limit=<NUM_LIT:50>):", "body": "def recursive_dls(node, problem, limit):<EOL><INDENT>if problem.goal_test(node.state):<EOL><INDENT>return node<EOL><DEDENT>elif node.depth == limit:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>cutoff_occurred = False<EOL>for child in node.expand(problem):<EOL><INDENT>result = recursive_dls(child, problem, limit)<EOL>if result == '<STR_LIT>':<EOL><INDENT>cutoff_occurred = True<EOL><DEDENT>elif result is not None:<EOL><INDENT>return result<EOL><DEDENT><DEDENT>return if_(cutoff_occurred, '<STR_LIT>', None)<EOL><DEDENT><DEDENT>return recursive_dls(Node(problem.initial), problem, limit)<EOL>", "docstring": "[Fig. 3.17]", "id": "f1675:m8"}
{"signature": "def path_cost(self, c, state1, action, state2):", "body": "return c + <NUM_LIT:1><EOL>", "docstring": "Return the cost of a solution path that arrives at state2 from\n        state1 via action, assuming cost c to get up to state1. If the problem\n        is such that the path doesn't matter, this function will only look at\n        state2.  If the path does matter, it will consider c and maybe state1\n        and action. The default method costs 1 for every step in the path.", "id": "f1675:c0:m4"}
{"signature": "def simulated_annealing(problem, schedule=exp_schedule()):", "body": "current = Node(problem.initial)<EOL>for t in range(sys.maxsize):<EOL><INDENT>T = schedule(t)<EOL>if T == <NUM_LIT:0>:<EOL><INDENT>return current<EOL><DEDENT>neighbors = current.expand(problem)<EOL>if not neighbors:<EOL><INDENT>return current<EOL><DEDENT>next = random.choice(neighbors)<EOL>delta_e = problem.value(next.state) - problem.value(current.state)<EOL>if delta_e > <NUM_LIT:0> or probability(math.exp(delta_e/T)):<EOL><INDENT>current = next<EOL><DEDENT><DEDENT>", "docstring": "[Fig. 4.5]", "id": "f1675:m14"}
{"signature": "def ModelBasedReflexAgentProgram(rules, update_state):", "body": "def program(percept):<EOL><INDENT>program.state = update_state(program.state, program.action, percept)<EOL>rule = rule_match(program.state, rules)<EOL>action = rule.action<EOL>return action<EOL><DEDENT>program.state = program.action = None<EOL>return program<EOL>", "docstring": "This agent takes action based on the percept and state. [Fig. 2.12]", "id": "f1676:m4"}
{"signature": "def percept(self, agent):", "body": "return (agent.location, self.status[agent.location])<EOL>", "docstring": "Returns the agent's location, and the location status (Dirty/Clean).", "id": "f1676:c8:m2"}
{"signature": "def TableDrivenVacuumAgent():", "body": "table = {((loc_A, '<STR_LIT>'),): '<STR_LIT>',<EOL>((loc_A, '<STR_LIT>'),): '<STR_LIT>',<EOL>((loc_B, '<STR_LIT>'),): '<STR_LIT>',<EOL>((loc_B, '<STR_LIT>'),): '<STR_LIT>',<EOL>((loc_A, '<STR_LIT>'), (loc_A, '<STR_LIT>')): '<STR_LIT>',<EOL>((loc_A, '<STR_LIT>'), (loc_A, '<STR_LIT>')): '<STR_LIT>',<EOL>((loc_A, '<STR_LIT>'), (loc_A, '<STR_LIT>'), (loc_A, '<STR_LIT>')): '<STR_LIT>',<EOL>((loc_A, '<STR_LIT>'), (loc_A, '<STR_LIT>'), (loc_A, '<STR_LIT>')): '<STR_LIT>',<EOL>}<EOL>return Agent(TableDrivenAgentProgram(table))<EOL>", "docstring": "[Fig. 2.3]", "id": "f1676:m7"}
{"signature": "def delete_thing(self, thing):", "body": "try:<EOL><INDENT>self.things.remove(thing)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>print(e)<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\" % (thing, thing.location))<EOL>print(\"<STR_LIT>\" % [(thing, thing.location)<EOL>for thing in self.things])<EOL><DEDENT>if thing in self.agents:<EOL><INDENT>self.agents.remove(thing)<EOL><DEDENT>", "docstring": "Remove a thing from the environment.", "id": "f1676:c2:m12"}
{"signature": "def RandomAgentProgram(actions):", "body": "return lambda percept: random.choice(actions)<EOL>", "docstring": "An agent that chooses an action at random, ignoring all percepts.", "id": "f1676:m2"}
{"signature": "def list_things_at(self, location, tclass=Thing):", "body": "return [thing for thing in self.things<EOL>if thing.location == location and isinstance(thing, tclass)]<EOL>", "docstring": "Return all things exactly at a given location.", "id": "f1676:c2:m9"}
{"signature": "def run(self, steps=<NUM_LIT:1000>):", "body": "for step in range(steps):<EOL><INDENT>if self.is_done(): return<EOL>self.step()<EOL><DEDENT>", "docstring": "Run the Environment for given number of time steps.", "id": "f1676:c2:m8"}
{"signature": "def execute_action(self, agent, action):", "body": "if action == '<STR_LIT>':<EOL><INDENT>agent.location = loc_B<EOL>agent.performance -= <NUM_LIT:1><EOL><DEDENT>elif action == '<STR_LIT>':<EOL><INDENT>agent.location = loc_A<EOL>agent.performance -= <NUM_LIT:1><EOL><DEDENT>elif action == '<STR_LIT>':<EOL><INDENT>if self.status[agent.location] == '<STR_LIT>':<EOL><INDENT>agent.performance += <NUM_LIT:10><EOL><DEDENT>self.status[agent.location] = '<STR_LIT>'<EOL><DEDENT>", "docstring": "Change agent's location and/or location's status; track performance.\n        Score 10 for each dirt cleaned; -1 for each move.", "id": "f1676:c8:m3"}
{"signature": "def turn_heading(self, heading, inc):", "body": "return turn_heading(heading, inc)<EOL>", "docstring": "Return the heading to the left (inc=+1) or right (inc=-1) of heading.", "id": "f1676:c3:m11"}
{"signature": "def rule_match(state, rules):", "body": "for rule in rules:<EOL><INDENT>if rule.matches(state):<EOL><INDENT>return rule<EOL><DEDENT><DEDENT>", "docstring": "Find the first rule that matches state.", "id": "f1676:m5"}
{"signature": "def default_location(self, thing):", "body": "return None<EOL>", "docstring": "Default location to place a new thing with unspecified location.", "id": "f1676:c2:m4"}
{"signature": "def add_walls(self):", "body": "for x in range(self.width):<EOL><INDENT>self.add_thing(Wall(), (x, <NUM_LIT:0>))<EOL>self.add_thing(Wall(), (x, self.height-<NUM_LIT:1>))<EOL><DEDENT>for y in range(self.height):<EOL><INDENT>self.add_thing(Wall(), (<NUM_LIT:0>, y))<EOL>self.add_thing(Wall(), (self.width-<NUM_LIT:1>, y))<EOL><DEDENT>", "docstring": "Put walls around the entire perimeter of the grid.", "id": "f1676:c3:m9"}
{"signature": "def SimpleReflexAgentProgram(rules, interpret_input):", "body": "def program(percept):<EOL><INDENT>state = interpret_input(percept)<EOL>rule = rule_match(state, rules)<EOL>action = rule.action<EOL>return action<EOL><DEDENT>return program<EOL>", "docstring": "This agent takes action based solely on the percept. [Fig. 2.10]", "id": "f1676:m3"}
{"signature": "def RandomVacuumAgent():", "body": "return Agent(RandomAgentProgram(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']))<EOL>", "docstring": "Randomly choose one of the actions from the vacuum environment.", "id": "f1676:m6"}
{"signature": "def move_to(self, thing, destination):", "body": "thing.bump = self.some_things_at(destination, Obstacle)<EOL>if not thing.bump:<EOL><INDENT>thing.location = destination<EOL>for o in self.observers:<EOL><INDENT>o.thing_moved(thing)<EOL><DEDENT><DEDENT>", "docstring": "Move a thing to a new location.", "id": "f1676:c3:m6"}
{"signature": "def can_grab(self, thing):", "body": "return False<EOL>", "docstring": "Returns True if this agent can grab this thing.\n        Override for appropriate subclasses of Agent and Thing.", "id": "f1676:c1:m1"}
{"signature": "def add_thing(self, thing, location=None):", "body": "if not isinstance(thing, Thing):<EOL><INDENT>thing = Agent(thing)<EOL><DEDENT>assert thing not in self.things, \"<STR_LIT>\"<EOL>thing.location = location or self.default_location(thing)<EOL>self.things.append(thing)<EOL>if isinstance(thing, Agent):<EOL><INDENT>thing.performance = <NUM_LIT:0><EOL>self.agents.append(thing)<EOL><DEDENT>", "docstring": "Add a thing to the environment, setting its location. For\n        convenience, if thing is an agent program we make a new agent\n        for it. (Shouldn't need to override this.", "id": "f1676:c2:m11"}
{"signature": "def present(self, results):", "body": "for (score, d) in results:<EOL><INDENT>doc = self.documents[d]<EOL>print (\"<STR_LIT>\"<EOL>% (<NUM_LIT:100> * score, doc.url, doc.title[:<NUM_LIT>].expandtabs()))<EOL><DEDENT>", "docstring": "Present the results as a list.", "id": "f1677:c2:m5"}
{"signature": "def all_shifts(text):", "body": "return [shift_encode(text, n) for n in range(len(alphabet))]<EOL>", "docstring": "Return a list of all 26 possible encodings of text by a shift cipher.", "id": "f1677:m7"}
{"signature": "def score(self, word, docid):", "body": "<EOL>return (math.log(<NUM_LIT:1> + self.index[word][docid])<EOL>/ math.log(<NUM_LIT:1> + self.documents[docid].nwords))<EOL>", "docstring": "Compute a score for this word on this docid.", "id": "f1677:c2:m4"}
{"signature": "def score(self, plaintext):", "body": "s = <NUM_LIT:1.0><EOL>for bi in bigrams(plaintext):<EOL><INDENT>s = s * self.P2[bi]<EOL><DEDENT>return s<EOL>", "docstring": "Return a score for text based on how common letters pairs are.", "id": "f1677:c5:m1"}
{"signature": "def index_collection(self, filenames):", "body": "for filename in filenames:<EOL><INDENT>self.index_document(open(filename).read(), filename)<EOL><DEDENT>", "docstring": "Index a whole collection of files.", "id": "f1677:c2:m1"}
{"signature": "def words(text, reg=re.compile('<STR_LIT>')):", "body": "return reg.findall(text.lower())<EOL>", "docstring": "Return a list of the words in text, ignoring punctuation and\n    converting everything to lowercase (to canonicalize).\n    >>> words(\"``EGAD!'' Edgar cried.\")\n    ['egad', 'edgar', 'cried']", "id": "f1677:m1"}
{"signature": "def add(self, ngram):", "body": "CountingProbDist.add(self, ngram)<EOL>self.cond_prob[ngram[:-<NUM_LIT:1>]].add(ngram[-<NUM_LIT:1>])<EOL>", "docstring": "Count 1 for P[(w1, ..., wn)] and for P(wn | (w1, ..., wn-1)", "id": "f1677:c1:m1"}
{"signature": "def add_sequence(self, words):", "body": "n = self.n<EOL>words = ['<STR_LIT>',] * (n-<NUM_LIT:1>) + words<EOL>for i in range(len(words)-n):<EOL><INDENT>self.add(tuple(words[i:i+n]))<EOL><DEDENT>", "docstring": "Add each of the tuple words[i:i+n], using a sliding window.\n        Prefix some copies of the empty word, '', to make the start work.", "id": "f1677:c1:m2"}
{"signature": "def __init__(self, stopwords='<STR_LIT>'):", "body": "<EOL>update(self, index=DefaultDict(DefaultDict(<NUM_LIT:0>)),<EOL>stopwords=set(words(stopwords)), documents=[])<EOL>", "docstring": "Create an IR System. Optionally specify stopwords.", "id": "f1677:c2:m0"}
{"signature": "def viterbi_segment(text, P):", "body": "<EOL>n = len(text)<EOL>words = ['<STR_LIT>'] + list(text)<EOL>best = [<NUM_LIT:1.0>] + [<NUM_LIT:0.0>] * n<EOL>for i in range(n+<NUM_LIT:1>):<EOL><INDENT>for j in range(<NUM_LIT:0>, i):<EOL><INDENT>w = text[j:i]<EOL>if P[w] * best[i - len(w)] >= best[i]:<EOL><INDENT>best[i] = P[w] * best[i - len(w)]<EOL>words[i] = w<EOL><DEDENT><DEDENT><DEDENT>sequence = []; i = len(words)-<NUM_LIT:1><EOL>while i > <NUM_LIT:0>:<EOL><INDENT>sequence[<NUM_LIT:0>:<NUM_LIT:0>] = [words[i]]<EOL>i = i - len(words[i])<EOL><DEDENT>return sequence, best[-<NUM_LIT:1>]<EOL>", "docstring": "Find the best segmentation of the string of characters, given the\n    UnigramTextModel P.", "id": "f1677:m0"}
{"signature": "def best_policy(mdp, U):", "body": "pi = {}<EOL>for s in mdp.states:<EOL><INDENT>pi[s] = argmax(mdp.actions(s), lambda a:expected_utility(a, s, U, mdp))<EOL><DEDENT>return pi<EOL>", "docstring": "Given an MDP and a utility function U, determine the best policy,\n    as a mapping from state to action. (Equation 17.4)", "id": "f1678:m1"}
{"signature": "def T(self, state, action):", "body": "abstract<EOL>", "docstring": "Transition model.  From a state and an action, return a list\n        of (probability, result-state) pairs.", "id": "f1678:c0:m2"}
{"signature": "def value_iteration(mdp, epsilon=<NUM_LIT>):", "body": "U1 = dict([(s, <NUM_LIT:0>) for s in mdp.states])<EOL>R, T, gamma = mdp.R, mdp.T, mdp.gamma<EOL>while True:<EOL><INDENT>U = U1.copy()<EOL>delta = <NUM_LIT:0><EOL>for s in mdp.states:<EOL><INDENT>U1[s] = R(s) + gamma * max([sum([p * U[s1] for (p, s1) in T(s, a)])<EOL>for a in mdp.actions(s)])<EOL>delta = max(delta, abs(U1[s] - U[s]))<EOL><DEDENT>if delta < epsilon * (<NUM_LIT:1> - gamma) / gamma:<EOL><INDENT>return U<EOL><DEDENT><DEDENT>", "docstring": "Solving an MDP by value iteration. [Fig. 17.4]", "id": "f1678:m0"}
{"signature": "def policy_evaluation(pi, U, mdp, k=<NUM_LIT:20>):", "body": "R, T, gamma = mdp.R, mdp.T, mdp.gamma<EOL>for i in range(k):<EOL><INDENT>for s in mdp.states:<EOL><INDENT>U[s] = R(s) + gamma * sum([p * U[s1] for (p, s1) in T(s, pi[s])])<EOL><DEDENT><DEDENT>return U<EOL>", "docstring": "Return an updated utility mapping U from each state in the MDP to its\n    utility, using an approximation (modified policy iteration).", "id": "f1678:m4"}
{"signature": "def WeightedMajority(predictors, weights):", "body": "def predict(example):<EOL><INDENT>return weighted_mode((predictor(example) for predictor in predictors),<EOL>weights)<EOL><DEDENT>return predict<EOL>", "docstring": "Return a predictor that takes a weighted vote.", "id": "f1680:m16"}
{"signature": "def setproblem(self, target, inputs=None, exclude=()):", "body": "self.target = self.attrnum(target)<EOL>exclude = list(map(self.attrnum, exclude))<EOL>if inputs:<EOL><INDENT>self.inputs = removeall(self.target, inputs)<EOL><DEDENT>else:<EOL><INDENT>self.inputs = [a for a in self.attrs<EOL>if a != self.target and a not in exclude]<EOL><DEDENT>if not self.values:<EOL><INDENT>self.values = list(map(unique, list(zip(*self.examples))))<EOL><DEDENT>self.check_me()<EOL>", "docstring": "Set (or change) the target and/or inputs.\n        This way, one DataSet can be used multiple ways. inputs, if specified,\n        is a list of attributes, or specify exclude as a list of attributes\n        to not use in inputs. Attributes can be -n .. n, or an attrname.\n        Also computes the list of possible values, if that wasn't done yet.", "id": "f1680:c0:m1"}
{"signature": "def replicated_dataset(dataset, weights, n=None):", "body": "n = n or len(dataset.examples)<EOL>result = copy.copy(dataset)<EOL>result.examples = weighted_replicate(dataset.examples, weights, n)<EOL>return result<EOL>", "docstring": "Copy dataset, replicating each example in proportion to its weight.", "id": "f1680:m19"}
{"signature": "def DecisionListLearner(dataset):", "body": "def decision_list_learning(examples):<EOL><INDENT>if not examples:<EOL><INDENT>return [(True, False)]<EOL><DEDENT>t, o, examples_t = find_examples(examples)<EOL>if not t:<EOL><INDENT>raise Failure<EOL><DEDENT>return [(t, o)] + decision_list_learning(examples - examples_t)<EOL><DEDENT>def find_examples(examples):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>unimplemented()<EOL><DEDENT>def passes(example, test):<EOL><INDENT>\"<STR_LIT>\"<EOL>unimplemented()<EOL><DEDENT>def predict(example):<EOL><INDENT>\"<STR_LIT>\"<EOL>for test, outcome in predict.decision_list:<EOL><INDENT>if passes(example, test):<EOL><INDENT>return outcome<EOL><DEDENT><DEDENT><DEDENT>predict.decision_list = decision_list_learning(set(dataset.examples))<EOL>return predict<EOL>", "docstring": "[Fig. 18.11]", "id": "f1680:m10"}
{"signature": "def EnsembleLearner(learners):", "body": "def train(dataset):<EOL><INDENT>predictors = [learner(dataset) for learner in learners]<EOL>def predict(example):<EOL><INDENT>return mode(predictor(example) for predictor in predictors)<EOL><DEDENT>return predict<EOL><DEDENT>return train<EOL>", "docstring": "Given a list of learning algorithms, have them vote.", "id": "f1680:m14"}
{"signature": "def weighted_mode(values, weights):", "body": "totals = defaultdict(int)<EOL>for v, w in zip(values, weights):<EOL><INDENT>totals[v] += w<EOL><DEDENT>return max(list(totals.keys()), key=totals.get)<EOL>", "docstring": "Return the value with the greatest total weight.\n    >>> weighted_mode('abbaa', [1,2,3,1,2])\n    'b", "id": "f1680:m17"}
{"signature": "def WeightedLearner(unweighted_learner):", "body": "def train(dataset, weights):<EOL><INDENT>return unweighted_learner(replicated_dataset(dataset, weights))<EOL><DEDENT>return train<EOL>", "docstring": "Given a learner that takes just an unweighted dataset, return\n    one that takes also a weight for each example. [p. 749 footnote 14]", "id": "f1680:m18"}
{"signature": "def NaiveBayesLearner(dataset):", "body": "targetvals = dataset.values[dataset.target]<EOL>target_dist = CountingProbDist(targetvals)<EOL>attr_dists = dict(((gv, attr), CountingProbDist(dataset.values[attr]))<EOL>for gv in targetvals<EOL>for attr in dataset.inputs)<EOL>for example in dataset.examples:<EOL><INDENT>targetval = example[dataset.target]<EOL>target_dist.add(targetval)<EOL>for attr in dataset.inputs:<EOL><INDENT>attr_dists[targetval, attr].add(example[attr])<EOL><DEDENT><DEDENT>def predict(example):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>def class_probability(targetval):<EOL><INDENT>return (target_dist[targetval]<EOL>* product(attr_dists[targetval, attr][example[attr]]<EOL>for attr in dataset.inputs))<EOL><DEDENT>return argmax(targetvals, class_probability)<EOL><DEDENT>return predict<EOL>", "docstring": "Just count how many times each value of each input attribute\n    occurs, conditional on the target value. Count the different\n    target values too.", "id": "f1680:m6"}
{"signature": "def add(self, o):", "body": "self.smooth_for(o)<EOL>self.dictionary[o] += <NUM_LIT:1><EOL>self.n_obs += <NUM_LIT:1><EOL>self.sampler = None<EOL>", "docstring": "Add an observation o to the distribution.", "id": "f1680:c1:m1"}
{"signature": "def AdaBoost(L, K):", "body": "def train(dataset):<EOL><INDENT>examples, target = dataset.examples, dataset.target<EOL>N = len(examples)<EOL>epsilon = <NUM_LIT:1.>/(<NUM_LIT:2>*N)<EOL>w = [<NUM_LIT:1.>/N] * N<EOL>h, z = [], []<EOL>for k in range(K):<EOL><INDENT>h_k = L(dataset, w)<EOL>h.append(h_k)<EOL>error = sum(weight for example, weight in zip(examples, w)<EOL>if example[target] != h_k(example))<EOL>error = clip(error, epsilon, <NUM_LIT:1>-epsilon)<EOL>for j, example in enumerate(examples):<EOL><INDENT>if example[target] == h_k(example):<EOL><INDENT>w[j] *= error / (<NUM_LIT:1.> - error)<EOL><DEDENT><DEDENT>w = normalize(w)<EOL>z.append(math.log((<NUM_LIT:1.> - error) / error))<EOL><DEDENT>return WeightedMajority(h, z)<EOL><DEDENT>return train<EOL>", "docstring": "[Fig. 18.34]", "id": "f1680:m15"}
{"signature": "def attrnum(self, attr):", "body": "if attr < <NUM_LIT:0>:<EOL><INDENT>return len(self.attrs) + attr<EOL><DEDENT>elif isinstance(attr, str):<EOL><INDENT>return self.attrnames.index(attr)<EOL><DEDENT>else:<EOL><INDENT>return attr<EOL><DEDENT>", "docstring": "Returns the number used for attr, which can be a name, or -n .. n-1.", "id": "f1680:c0:m5"}
{"signature": "def add_example(self, example):", "body": "self.check_example(example)<EOL>self.examples.append(example)<EOL>", "docstring": "Add an example to the list of examples, checking it first.", "id": "f1680:c0:m3"}
{"signature": "def parse_csv(input, delim='<STR_LIT:U+002C>'):", "body": "lines = [line for line in input.splitlines() if line.strip()]<EOL>return [list(map(num_or_str, line.split(delim))) for line in lines]<EOL>", "docstring": "r\"\"\"Input is a string consisting of lines, each line has comma-delimited\n    fields.  Convert this into a list of lists.  Blank lines are skipped.\n    Fields that look like numbers are converted to numbers.\n    The delim defaults to ',' but '\\t' and None are also reasonable values.\n    >>> parse_csv('1, 2, 3 \\n 0, 2, na')\n    [[1, 2, 3], [0, 2, 'na']]", "id": "f1680:m4"}
{"signature": "def Xor(n):", "body": "return Parity(<NUM_LIT:2>, n, name=\"<STR_LIT>\")<EOL>", "docstring": "Return a DataSet with n examples of 2-input xor.", "id": "f1680:m32"}
{"signature": "def NeuralNetLearner(dataset, sizes):", "body": "activations = [[<NUM_LIT:0.0> for i in range(n)] for n in sizes]<EOL>weights = []<EOL>def predict(example):<EOL><INDENT>unimplemented()<EOL><DEDENT>return predict<EOL>", "docstring": "Layered feed-forward network.", "id": "f1680:m11"}
{"signature": "def weighted_replicate(seq, weights, n):", "body": "assert len(seq) == len(weights)<EOL>weights = normalize(weights)<EOL>wholes = [int(w*n) for w in weights]<EOL>fractions = [(w*n) % <NUM_LIT:1> for w in weights]<EOL>return (flatten([x] * nx for x, nx in zip(seq, wholes))<EOL>+ weighted_sample_with_replacement(seq, fractions, n - sum(wholes)))<EOL>", "docstring": "Return n selections from seq, with the count of each element of\n    seq proportional to the corresponding weight (filling in fractions\n    randomly).\n    >>> weighted_replicate('ABC', [1,2,1], 4)\n    ['A', 'B', 'B', 'C']", "id": "f1680:m20"}
{"signature": "def __init__(self, examples=None, attrs=None, attrnames=None, target=-<NUM_LIT:1>,<EOL>inputs=None, values=None, distance=mean_boolean_error,<EOL>name='<STR_LIT>', source='<STR_LIT>', exclude=()):", "body": "update(self, name=name, source=source, values=values, distance=distance)<EOL>if isinstance(examples, str):<EOL><INDENT>self.examples = parse_csv(examples)<EOL><DEDENT>elif examples is None:<EOL><INDENT>self.examples = parse_csv(DataFile(name+'<STR_LIT>').read())<EOL><DEDENT>else:<EOL><INDENT>self.examples = examples<EOL><DEDENT>if not attrs and self.examples:<EOL><INDENT>attrs = list(range(len(self.examples[<NUM_LIT:0>])))<EOL><DEDENT>self.attrs = attrs<EOL>if isinstance(attrnames, str):<EOL><INDENT>self.attrnames = attrnames.split()<EOL><DEDENT>else:<EOL><INDENT>self.attrnames = attrnames or attrs<EOL><DEDENT>self.setproblem(target, inputs=inputs, exclude=exclude)<EOL>", "docstring": "Accepts any of DataSet's fields.  Examples can also be a\n        string or file from which to parse examples using parse_csv.\n        Optional parameter: exclude, as documented in .setproblem().\n        >>> DataSet(examples='1, 2, 3')\n        <DataSet(): 1 examples, 3 attributes>", "id": "f1680:c0:m0"}
{"signature": "def add(self, val, subtree):", "body": "self.branches[val] = subtree<EOL>", "docstring": "Add a branch.  If self.attr = val, go to the given subtree.", "id": "f1680:c2:m2"}
{"signature": "def leave1out(learner, dataset):", "body": "return cross_validation(learner, dataset, k=len(dataset.examples))<EOL>", "docstring": "Leave one out cross-validation over the dataset.", "id": "f1680:m25"}
{"signature": "def min_conflicts(csp, max_steps=<NUM_LIT>):", "body": "<EOL>csp.current = current = {}<EOL>for var in csp.vars:<EOL><INDENT>val = min_conflicts_value(csp, var, current)<EOL>csp.assign(var, val, current)<EOL><DEDENT>for i in range(max_steps):<EOL><INDENT>conflicted = csp.conflicted_vars(current)<EOL>if not conflicted:<EOL><INDENT>return current<EOL><DEDENT>var = random.choice(conflicted)<EOL>val = min_conflicts_value(csp, var, current)<EOL>csp.assign(var, val, current)<EOL><DEDENT>return None<EOL>", "docstring": "Solve a CSP by stochastic hillclimbing on the number of conflicts.", "id": "f1681:m11"}
{"signature": "def conflicted_vars(self, current):", "body": "return [var for var in self.vars<EOL>if self.nconflicts(var, current[var], current) > <NUM_LIT:0>]<EOL>", "docstring": "Return a list of variables in current assignment that are in conflict", "id": "f1681:c0:m14"}
{"signature": "def nconflicts(self, var, val, assignment):", "body": "n = len(self.vars)<EOL>c = self.rows[val] + self.downs[var+val] + self.ups[var-val+n-<NUM_LIT:1>]<EOL>if assignment.get(var, None) == val:<EOL><INDENT>c -= <NUM_LIT:3><EOL><DEDENT>return c<EOL>", "docstring": "The number of conflicts, as recorded with each assignment.\n        Count conflicts in row and in up, down diagonals. If there\n        is a queen there, it can't conflict with itself, so subtract 3.", "id": "f1681:c2:m1"}
{"signature": "def choices(self, var):", "body": "return (self.curr_domains or self.domains)[var]<EOL>", "docstring": "Return all values for var that aren't currently ruled out.", "id": "f1681:c0:m11"}
{"signature": "def MapColoringCSP(colors, neighbors):", "body": "if isinstance(neighbors, str):<EOL><INDENT>neighbors = parse_neighbors(neighbors)<EOL><DEDENT>return CSP(list(neighbors.keys()), UniversalDict(colors), neighbors,<EOL>different_values_constraint)<EOL>", "docstring": "Make a CSP for the problem of coloring a map with different colors\n    for any two adjacent regions.  Arguments are a list of colors, and a\n    dict of {region: [neighbor,...]} entries.  This dict may also be\n    specified as a string of the form defined by parse_neighbors.", "id": "f1681:m17"}
{"signature": "def revise(csp, Xi, Xj, removals):", "body": "revised = False<EOL>for x in csp.curr_domains[Xi][:]:<EOL><INDENT>if every(lambda y: not csp.constraints(Xi, x, Xj, y),<EOL>csp.curr_domains[Xj]):<EOL><INDENT>csp.prune(Xi, x, removals)<EOL>revised = True<EOL><DEDENT><DEDENT>return revised<EOL>", "docstring": "Return true if we remove a value.", "id": "f1681:m1"}
{"signature": "def min_conflicts_value(csp, var, current):", "body": "return argmin_random_tie(csp.domains[var],<EOL>lambda val: csp.nconflicts(var, val, current))<EOL>", "docstring": "Return the value that will give var the least number of conflicts.\n    If there is a tie, choose at random.", "id": "f1681:m12"}
{"signature": "def tree_csp_solver(csp):", "body": "n = len(csp.vars)<EOL>assignment = {}<EOL>root = csp.vars[<NUM_LIT:0>]<EOL>X, parent = topological_sort(csp.vars, root)<EOL>for Xj in reversed(X):<EOL><INDENT>if not make_arc_consistent(parent[Xj], Xj, csp):<EOL><INDENT>return None<EOL><DEDENT><DEDENT>for Xi in X:<EOL><INDENT>if not csp.curr_domains[Xi]:<EOL><INDENT>return None<EOL><DEDENT>assignment[Xi] = csp.curr_domains[Xi][<NUM_LIT:0>]<EOL><DEDENT>return assignment<EOL>", "docstring": "[Fig. 6.11]", "id": "f1681:m13"}
{"signature": "def unordered_domain_values(var, assignment, csp):", "body": "return csp.choices(var)<EOL>", "docstring": "The default value order.", "id": "f1681:m5"}
{"signature": "def support_pruning(self):", "body": "if self.curr_domains is None:<EOL><INDENT>self.curr_domains = dict((v, list(self.domains[v]))<EOL>for v in self.vars)<EOL><DEDENT>", "docstring": "Make sure we can prune values from domains. (We want to pay\n        for this only if we use it.)", "id": "f1681:c0:m8"}
{"signature": "def mrv(assignment, csp):", "body": "return argmin_random_tie(<EOL>[v for v in csp.vars if v not in assignment],<EOL>lambda var: num_legal_values(csp, var, assignment))<EOL>", "docstring": "Minimum-remaining-values heuristic.", "id": "f1681:m3"}
{"signature": "def result(self, state, xxx_todo_changeme):", "body": "(var, val) = xxx_todo_changeme<EOL>return state + ((var, val),)<EOL>", "docstring": "Perform an action and return the new state.", "id": "f1681:c0:m6"}
{"signature": "def forward_checking(csp, var, value, assignment, removals):", "body": "for B in csp.neighbors[var]:<EOL><INDENT>if B not in assignment:<EOL><INDENT>for b in csp.curr_domains[B][:]:<EOL><INDENT>if not csp.constraints(var, value, B, b):<EOL><INDENT>csp.prune(B, b, removals)<EOL><DEDENT><DEDENT>if not csp.curr_domains[B]:<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "Prune neighbor values inconsistent with var=value.", "id": "f1681:m8"}
{"signature": "def display(self, assignment):", "body": "<EOL>print('<STR_LIT>', self, '<STR_LIT>', assignment)<EOL>", "docstring": "Show a human-readable representation of the CSP.", "id": "f1681:c0:m4"}
{"signature": "def Zebra():", "body": "Colors = '<STR_LIT>'.split()<EOL>Pets = '<STR_LIT>'.split()<EOL>Drinks = '<STR_LIT>'.split()<EOL>Countries = '<STR_LIT>'.split()<EOL>Smokes = '<STR_LIT>'.split()<EOL>vars = Colors + Pets + Drinks + Countries + Smokes<EOL>domains = {}<EOL>for var in vars:<EOL><INDENT>domains[var] = list(range(<NUM_LIT:1>, <NUM_LIT:6>))<EOL><DEDENT>domains['<STR_LIT>'] = [<NUM_LIT:1>]<EOL>domains['<STR_LIT>'] = [<NUM_LIT:3>]<EOL>neighbors = parse_neighbors(\"\"\"<STR_LIT>\"\"\", vars)<EOL>for type in [Colors, Pets, Drinks, Countries, Smokes]:<EOL><INDENT>for A in type:<EOL><INDENT>for B in type:<EOL><INDENT>if A != B:<EOL><INDENT>if B not in neighbors[A]: neighbors[A].append(B)<EOL>if A not in neighbors[B]: neighbors[B].append(A)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>def zebra_constraint(A, a, B, b, recurse=<NUM_LIT:0>):<EOL><INDENT>same = (a == b)<EOL>next_to = abs(a - b) == <NUM_LIT:1><EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return same<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return same<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return next_to<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return next_to<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return same<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return same<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return same<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return same<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return same<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return next_to<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return same<EOL>if A == '<STR_LIT>' and B == '<STR_LIT>': return (a - <NUM_LIT:1>) == b<EOL>if recurse == <NUM_LIT:0>: return zebra_constraint(B, b, A, a, <NUM_LIT:1>)<EOL>if ((A in Colors and B in Colors) or<EOL>(A in Pets and B in Pets) or<EOL>(A in Drinks and B in Drinks) or<EOL>(A in Countries and B in Countries) or<EOL>(A in Smokes and B in Smokes)): return not same<EOL>raise '<STR_LIT:error>'<EOL><DEDENT>return CSP(vars, domains, neighbors, zebra_constraint)<EOL>", "docstring": "Return an instance of the Zebra Puzzle.", "id": "f1681:m21"}
{"signature": "def lcv(var, assignment, csp):", "body": "return sorted(csp.choices(var),<EOL>key=lambda val: csp.nconflicts(var, val, assignment))<EOL>", "docstring": "Least-constraining-values heuristic.", "id": "f1681:m6"}
{"signature": "def nconflicts(self, var, val, assignment):", "body": "<EOL>def conflict(var2):<EOL><INDENT>return (var2 in assignment<EOL>and not self.constraints(var, val, var2, assignment[var2]))<EOL><DEDENT>return count_if(conflict, self.neighbors[var])<EOL>", "docstring": "Return the number of conflicts var=val has with other variables.", "id": "f1681:c0:m3"}
{"signature": "def AC3(csp, queue=None, removals=None):", "body": "if queue is None:<EOL><INDENT>queue = [(Xi, Xk) for Xi in csp.vars for Xk in csp.neighbors[Xi]]<EOL><DEDENT>csp.support_pruning()<EOL>while queue:<EOL><INDENT>(Xi, Xj) = queue.pop()<EOL>if revise(csp, Xi, Xj, removals):<EOL><INDENT>if not csp.curr_domains[Xi]:<EOL><INDENT>return False<EOL><DEDENT>for Xk in csp.neighbors[Xi]:<EOL><INDENT>if Xk != Xi:<EOL><INDENT>queue.append((Xk, Xi))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "[Fig. 6.3]", "id": "f1681:m0"}
{"signature": "def parse_neighbors(neighbors, vars=[]):", "body": "dict = DefaultDict([])<EOL>for var in vars:<EOL><INDENT>dict[var] = []<EOL><DEDENT>specs = [spec.split('<STR_LIT::>') for spec in neighbors.split('<STR_LIT:;>')]<EOL>for (A, Aneighbors) in specs:<EOL><INDENT>A = A.strip()<EOL>dict.setdefault(A, [])<EOL>for B in Aneighbors.split():<EOL><INDENT>dict[A].append(B)<EOL>dict[B].append(A)<EOL><DEDENT><DEDENT>return dict<EOL>", "docstring": "Convert a string of the form 'X: Y Z; Y: Z' into a dict mapping\n    regions to neighbors.  The syntax is a region name followed by a ':'\n    followed by zero or more region names, followed by ';', repeated for\n    each region name.  If you say 'X: Y' you don't need 'Y: X'.\n    >>> parse_neighbors('X: Y Z; Y: Z')\n    {'Y': ['X', 'Z'], 'X': ['Y', 'Z'], 'Z': ['X', 'Y']}", "id": "f1681:m18"}
{"signature": "def actions(self, state):", "body": "if len(state) == len(self.vars):<EOL><INDENT>return []<EOL><DEDENT>else:<EOL><INDENT>assignment = dict(state)<EOL>var = find_if(lambda v: v not in assignment, self.vars)<EOL>return [(var, val) for val in self.domains[var]<EOL>if self.nconflicts(var, val, assignment) == <NUM_LIT:0>]<EOL><DEDENT>", "docstring": "Return a list of applicable actions: nonconflicting\n        assignments to an unassigned variable.", "id": "f1681:c0:m5"}
{"signature": "def first_unassigned_variable(assignment, csp):", "body": "return find_if(lambda var: var not in assignment, csp.vars)<EOL>", "docstring": "The default variable order.", "id": "f1681:m2"}
{"signature": "def assign(self, var, val, assignment):", "body": "oldval = assignment.get(var, None)<EOL>if val != oldval:<EOL><INDENT>if oldval is not None: <EOL><INDENT>self.record_conflict(assignment, var, oldval, -<NUM_LIT:1>)<EOL><DEDENT>self.record_conflict(assignment, var, val, +<NUM_LIT:1>)<EOL>CSP.assign(self, var, val, assignment)<EOL><DEDENT>", "docstring": "Assign var, and keep track of conflicts.", "id": "f1681:c2:m2"}
{"signature": "def display(self, assignment):", "body": "n = len(self.vars)<EOL>for val in range(n):<EOL><INDENT>for var in range(n):<EOL><INDENT>if assignment.get(var,'<STR_LIT>') == val: ch = '<STR_LIT>'<EOL>elif (var+val) % <NUM_LIT:2> == <NUM_LIT:0>: ch = '<STR_LIT:.>'<EOL>else: ch = '<STR_LIT:->'<EOL>print(ch, end='<STR_LIT:U+0020>')<EOL><DEDENT>print('<STR_LIT:U+0020>', end='<STR_LIT:U+0020>')<EOL>for var in range(n):<EOL><INDENT>if assignment.get(var,'<STR_LIT>') == val: ch = '<STR_LIT:*>'<EOL>else: ch = '<STR_LIT:U+0020>'<EOL>print(str(self.nconflicts(var, val, assignment))+ch, end='<STR_LIT:U+0020>')<EOL><DEDENT>print()<EOL><DEDENT>", "docstring": "Print the queens and the nconflicts values (for debugging).", "id": "f1681:c2:m5"}
{"signature": "def __init__(self, vars, domains, neighbors, constraints):", "body": "vars = vars or list(domains.keys())<EOL>update(self, vars=vars, domains=domains,<EOL>neighbors=neighbors, constraints=constraints,<EOL>initial=(), curr_domains=None, nassigns=<NUM_LIT:0>)<EOL>", "docstring": "Construct a CSP problem. If vars is empty, it becomes domains.keys().", "id": "f1681:c0:m0"}
{"signature": "def pl_fc_entails(KB, q):", "body": "count = dict([(c, len(conjuncts(c.args[<NUM_LIT:0>]))) for c in KB.clauses<EOL>if c.op == '<STR_LIT>'])<EOL>inferred = DefaultDict(False)<EOL>agenda = [s for s in KB.clauses if is_prop_symbol(s.op)]<EOL>while agenda:<EOL><INDENT>p = agenda.pop()<EOL>if p == q: return True<EOL>if not inferred[p]:<EOL><INDENT>inferred[p] = True<EOL>for c in KB.clauses_with_premise(p):<EOL><INDENT>count[c] -= <NUM_LIT:1><EOL>if count[c] == <NUM_LIT:0>:<EOL><INDENT>agenda.append(c.args[<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Use forward chaining to see if a PropDefiniteKB entails symbol q.\n    [Fig. 7.15]\n    >>> pl_fc_entails(Fig[7,15], expr('Q'))\n    True", "id": "f1683:m23"}
{"signature": "def expr(s):", "body": "if isinstance(s, Expr): return s<EOL>if isnumber(s): return Expr(s)<EOL>s = s.replace('<STR_LIT>', '<STR_LIT>').replace('<STR_LIT>', '<STR_LIT>')<EOL>s = s.replace('<STR_LIT>', '<STR_LIT:%>').replace('<STR_LIT>', '<STR_LIT>')<EOL>s = re.sub(r'<STR_LIT>', r'<STR_LIT>', s)<EOL>return eval(s, {'<STR_LIT>':Expr})<EOL>", "docstring": "Create an Expr representing a logic expression by parsing the input\n    string. Symbols and numbers are automatically converted to Exprs.\n    In addition you can use alternative spellings of these operators:\n      'x ==> y'   parses as   (x >> y)    # Implication\n      'x <== y'   parses as   (x << y)    # Reverse implication\n      'x <=> y'   parses as   (x % y)     # Logical equivalence\n      'x =/= y'   parses as   (x ^ y)     # Logical disequality (xor)\n    But BE CAREFUL; precedence of implication is wrong. expr('P & Q ==> R & S')\n    is ((P & (Q >> R)) & S); so you must use expr('(P & Q) ==> (R & S)').\n    >>> expr('P <=> Q(1)')\n    (P <=> Q(1))\n    >>> expr('P & Q | ~R(x, F(x))')\n    ((P & Q) | ~R(x, F(x)))", "id": "f1683:m1"}
{"signature": "def ask_generator(self, query):", "body": "if pl_fc_entails(self.clauses, query):<EOL><INDENT>yield {}<EOL><DEDENT>", "docstring": "Yield the empty substitution if KB implies query; else nothing.", "id": "f1683:c3:m1"}
{"signature": "def variables(s):", "body": "result = set([])<EOL>def walk(s):<EOL><INDENT>if is_variable(s):<EOL><INDENT>result.add(s)<EOL><DEDENT>else:<EOL><INDENT>for arg in s.args:<EOL><INDENT>walk(arg)<EOL><DEDENT><DEDENT><DEDENT>walk(s)<EOL>return result<EOL>", "docstring": "Return a set of the variables in expression s.\n    >>> ppset(variables(F(x, A, y)))\n    set([x, y])\n    >>> ppset(variables(F(G(x), z)))\n    set([x, z])\n    >>> ppset(variables(expr('F(x, x) & G(x, y) & H(y, z) & R(A, z, z)')))\n    set([x, y, z])", "id": "f1683:m5"}
{"signature": "def move_not_inwards(s):", "body": "if s.op == '<STR_LIT>':<EOL><INDENT>NOT = lambda b: move_not_inwards(~b)<EOL>a = s.args[<NUM_LIT:0>]<EOL>if a.op == '<STR_LIT>': return move_not_inwards(a.args[<NUM_LIT:0>]) <EOL>if a.op =='<STR_LIT:&>': return associate('<STR_LIT:|>', list(map(NOT, a.args)))<EOL>if a.op =='<STR_LIT:|>': return associate('<STR_LIT:&>', list(map(NOT, a.args)))<EOL>return s<EOL><DEDENT>elif is_symbol(s.op) or not s.args:<EOL><INDENT>return s<EOL><DEDENT>else:<EOL><INDENT>return Expr(s.op, *list(map(move_not_inwards, s.args)))<EOL><DEDENT>", "docstring": "Rewrite sentence s by moving negation sign inward.\n    >>> move_not_inwards(~(A | B))\n    (~A & ~B)\n    >>> move_not_inwards(~(A & B))\n    (~A | ~B)\n    >>> move_not_inwards(~(~(A | ~B) | ~~C))\n    ((A | ~B) & ~C)", "id": "f1683:m15"}
{"signature": "def subst(s, x):", "body": "if isinstance(x, list):<EOL><INDENT>return [subst(s, xi) for xi in x]<EOL><DEDENT>elif isinstance(x, tuple):<EOL><INDENT>return tuple([subst(s, xi) for xi in x])<EOL><DEDENT>elif not isinstance(x, Expr):<EOL><INDENT>return x<EOL><DEDENT>elif is_var_symbol(x.op):<EOL><INDENT>return s.get(x, x)<EOL><DEDENT>else:<EOL><INDENT>return Expr(x.op, *[subst(s, arg) for arg in x.args])<EOL><DEDENT>", "docstring": "Substitute the substitution s into the expression x.\n    >>> subst({x: 42, y:0}, F(x) + y)\n    (F(42) + 0)", "id": "f1683:m40"}
{"signature": "def is_definite_clause(s):", "body": "if is_symbol(s.op):<EOL><INDENT>return True<EOL><DEDENT>elif s.op == '<STR_LIT>':<EOL><INDENT>antecedent, consequent = s.args<EOL>return (is_symbol(consequent.op)<EOL>and every(lambda arg: is_symbol(arg.op), conjuncts(antecedent)))<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "returns True for exprs s of the form A & B & ... & C ==> D,\n    where all literals are positive.  In clause form, this is\n    ~A | ~B | ... | ~C | D, where exactly one clause is positive.\n    >>> is_definite_clause(expr('Farmer(Mac)'))\n    True\n    >>> is_definite_clause(expr('~Farmer(Mac)'))\n    False\n    >>> is_definite_clause(expr('(Farmer(f) & Rabbit(r)) ==> Hates(f, r)'))\n    True\n    >>> is_definite_clause(expr('(Farmer(f) & ~Rabbit(r)) ==> Hates(f, r)'))\n    False\n    >>> is_definite_clause(expr('(Farmer(f) | Rabbit(r)) ==> Hates(f, r)'))\n    False", "id": "f1683:m6"}
{"signature": "def ask(self, query):", "body": "for result in self.ask_generator(query):<EOL><INDENT>return result<EOL><DEDENT>return False<EOL>", "docstring": "Return a substitution that makes the query true, or,\n        failing that, return False.", "id": "f1683:c0:m2"}
{"signature": "def tell(self, sentence):", "body": "abstract<EOL>", "docstring": "Add the sentence to the KB.", "id": "f1683:c0:m1"}
{"signature": "def standardize_variables(sentence, dic=None):", "body": "if dic is None: dic = {}<EOL>if not isinstance(sentence, Expr):<EOL><INDENT>return sentence<EOL><DEDENT>elif is_var_symbol(sentence.op):<EOL><INDENT>if sentence in dic:<EOL><INDENT>return dic[sentence]<EOL><DEDENT>else:<EOL><INDENT>v = Expr('<STR_LIT>' % next(standardize_variables.counter))<EOL>dic[sentence] = v<EOL>return v<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return Expr(sentence.op,<EOL>*[standardize_variables(a, dic) for a in sentence.args])<EOL><DEDENT>", "docstring": "Replace all the variables in sentence with new variables.\n    >>> e = expr('F(a, b, c) & G(c, A, 23)')\n    >>> len(variables(standardize_variables(e)))\n    3\n    >>> variables(e).intersection(variables(standardize_variables(e)))\n    set([])\n    >>> is_variable(standardize_variables(expr('x')))\n    True", "id": "f1683:m42"}
{"signature": "def diff(y, x):", "body": "if y == x: return ONE<EOL>elif not y.args: return ZERO<EOL>else:<EOL><INDENT>u, op, v = y.args[<NUM_LIT:0>], y.op, y.args[-<NUM_LIT:1>]<EOL>if op == '<STR_LIT:+>': return diff(u, x) + diff(v, x)<EOL>elif op == '<STR_LIT:->' and len(args) == <NUM_LIT:1>: return -diff(u, x)<EOL>elif op == '<STR_LIT:->': return diff(u, x) - diff(v, x)<EOL>elif op == '<STR_LIT:*>': return u * diff(v, x) + v * diff(u, x)<EOL>elif op == '<STR_LIT:/>': return (v*diff(u, x) - u*diff(v, x)) / (v * v)<EOL>elif op == '<STR_LIT>' and isnumber(x.op):<EOL><INDENT>return (v * u ** (v - <NUM_LIT:1>) * diff(u, x))<EOL><DEDENT>elif op == '<STR_LIT>': return (v * u ** (v - <NUM_LIT:1>) * diff(u, x)<EOL>+ u ** v * Expr('<STR_LIT>')(u) * diff(v, x))<EOL>elif op == '<STR_LIT>': return diff(u, x) / u<EOL>else: raise ValueError(\"<STR_LIT>\" % (op, y, x))<EOL><DEDENT>", "docstring": "Return the symbolic derivative, dy/dx, as an Expr.\n    However, you probably want to simplify the results with simp.\n    >>> diff(x * x, x)\n    ((x * 1) + (x * 1))\n    >>> simp(diff(x * x, x))\n    (2 * x)", "id": "f1683:m47"}
{"signature": "def is_var_symbol(s):", "body": "return is_symbol(s) and s[<NUM_LIT:0>].islower()<EOL>", "docstring": "A logic variable symbol is an initial-lowercase string.", "id": "f1683:m3"}
{"signature": "def to_cnf(s):", "body": "if isinstance(s, str): s = expr(s)<EOL>s = eliminate_implications(s) <EOL>s = move_not_inwards(s) <EOL>return distribute_and_over_or(s)<EOL>", "docstring": "Convert a propositional logical sentence s to conjunctive normal form.\n    That is, to the form ((A | ~B | ...) & (B | C | ...) & ...) [p. 253]\n    >>> to_cnf(\"~(B|C)\")\n    (~B & ~C)\n    >>> to_cnf(\"B <=> (P1|P2)\")\n    ((~P1 | B) & (~P2 | B) & (P1 | P2 | ~B))\n    >>> to_cnf(\"a | (b & c) | d\")\n    ((b | a | d) & (c | a | d))\n    >>> to_cnf(\"A & (B | (D & E))\")\n    (A & (D | B) & (E | B))\n    >>> to_cnf(\"A | (B | (C | (D & E)))\")\n    ((D | A | B | C) & (E | A | B | C))", "id": "f1683:m13"}
{"signature": "def dpll(clauses, symbols, model):", "body": "unknown_clauses = [] <EOL>for c in clauses:<EOL><INDENT>val =  pl_true(c, model)<EOL>if val == False:<EOL><INDENT>return False<EOL><DEDENT>if val != True:<EOL><INDENT>unknown_clauses.append(c)<EOL><DEDENT><DEDENT>if not unknown_clauses:<EOL><INDENT>return model<EOL><DEDENT>P, value = find_pure_symbol(symbols, unknown_clauses)<EOL>if P:<EOL><INDENT>return dpll(clauses, removeall(P, symbols), extend(model, P, value))<EOL><DEDENT>P, value = find_unit_clause(clauses, model)<EOL>if P:<EOL><INDENT>return dpll(clauses, removeall(P, symbols), extend(model, P, value))<EOL><DEDENT>P, symbols = symbols[<NUM_LIT:0>], symbols[<NUM_LIT:1>:]<EOL>return (dpll(clauses, symbols, extend(model, P, True)) or<EOL>dpll(clauses, symbols, extend(model, P, False)))<EOL>", "docstring": "See if the clauses are true in a partial model.", "id": "f1683:m25"}
{"signature": "def __hash__(self):", "body": "return hash(self.op) ^ hash(tuple(self.args))<EOL>", "docstring": "Need a hash method so Exprs can live in dicts.", "id": "f1683:c2:m5"}
{"signature": "def tell(self, sentence):", "body": "assert is_definite_clause(sentence), \"<STR_LIT>\"<EOL>self.clauses.append(sentence)<EOL>", "docstring": "Add a definite clause to this KB.", "id": "f1683:c3:m0"}
{"signature": "def eliminate_implications(s):", "body": "if not s.args or is_symbol(s.op): return s     <EOL>args = list(map(eliminate_implications, s.args))<EOL>a, b = args[<NUM_LIT:0>], args[-<NUM_LIT:1>]<EOL>if s.op == '<STR_LIT>':<EOL><INDENT>return (b | ~a)<EOL><DEDENT>elif s.op == '<STR_LIT>':<EOL><INDENT>return (a | ~b)<EOL><DEDENT>elif s.op == '<STR_LIT>':<EOL><INDENT>return (a | ~b) & (b | ~a)<EOL><DEDENT>elif s.op == '<STR_LIT>':<EOL><INDENT>assert len(args) == <NUM_LIT:2>   <EOL>return (a & ~b) | (~a & b)<EOL><DEDENT>else:<EOL><INDENT>assert s.op in ('<STR_LIT:&>', '<STR_LIT:|>', '<STR_LIT>')<EOL>return Expr(s.op, *args)<EOL><DEDENT>", "docstring": "Change >>, <<, and <=> into &, |, and ~. That is, return an Expr\n    that is equivalent to s, but has only &, |, and ~ as logical operators.\n    >>> eliminate_implications(A >> (~B << C))\n    ((~B | ~C) | ~A)\n    >>> eliminate_implications(A ^ B)\n    ((A & ~B) | (~A & B))", "id": "f1683:m14"}
{"signature": "def pl_true(exp, model={}):", "body": "op, args = exp.op, exp.args<EOL>if exp == TRUE:<EOL><INDENT>return True<EOL><DEDENT>elif exp == FALSE:<EOL><INDENT>return False<EOL><DEDENT>elif is_prop_symbol(op):<EOL><INDENT>return model.get(exp)<EOL><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>p = pl_true(args[<NUM_LIT:0>], model)<EOL>if p is None: return None<EOL>else: return not p<EOL><DEDENT>elif op == '<STR_LIT:|>':<EOL><INDENT>result = False<EOL>for arg in args:<EOL><INDENT>p = pl_true(arg, model)<EOL>if p is True: return True<EOL>if p is None: result = None<EOL><DEDENT>return result<EOL><DEDENT>elif op == '<STR_LIT:&>':<EOL><INDENT>result = True<EOL>for arg in args:<EOL><INDENT>p = pl_true(arg, model)<EOL>if p is False: return False<EOL>if p is None: result = None<EOL><DEDENT>return result<EOL><DEDENT>p, q = args<EOL>if op == '<STR_LIT>':<EOL><INDENT>return pl_true(~p | q, model)<EOL><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>return pl_true(p | ~q, model)<EOL><DEDENT>pt = pl_true(p, model)<EOL>if pt is None: return None<EOL>qt = pl_true(q, model)<EOL>if qt is None: return None<EOL>if op == '<STR_LIT>':<EOL><INDENT>return pt == qt<EOL><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>return pt != qt<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(exp))<EOL><DEDENT>", "docstring": "Return True if the propositional logic expression is true in the model,\n    and False if it is false. If the model does not specify the value for\n    every proposition, this may return None to indicate 'not obvious';\n    this may happen even when the expression is tautological.", "id": "f1683:m12"}
{"signature": "def tt_check_all(kb, alpha, symbols, model):", "body": "if not symbols:<EOL><INDENT>if pl_true(kb, model):<EOL><INDENT>result = pl_true(alpha, model)<EOL>assert result in (True, False)<EOL>return result<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>P, rest = symbols[<NUM_LIT:0>], symbols[<NUM_LIT:1>:]<EOL>return (tt_check_all(kb, alpha, rest, extend(model, P, True)) and<EOL>tt_check_all(kb, alpha, rest, extend(model, P, False)))<EOL><DEDENT>", "docstring": "Auxiliary routine to implement tt_entails.", "id": "f1683:m9"}
{"signature": "def KB_AgentProgram(KB):", "body": "steps = itertools.count()<EOL>def program(percept):<EOL><INDENT>t = next(steps)<EOL>KB.tell(make_percept_sentence(percept, t))<EOL>action = KB.ask(make_action_query(t))<EOL>KB.tell(make_action_sentence(action, t))<EOL>return action<EOL><DEDENT>def make_percept_sentence(self, percept, t):<EOL><INDENT>return Expr(\"<STR_LIT>\")(percept, t)<EOL><DEDENT>def make_action_query(self, t):<EOL><INDENT>return expr(\"<STR_LIT>\" % t)<EOL><DEDENT>def make_action_sentence(self, action, t):<EOL><INDENT>return Expr(\"<STR_LIT>\")(action[expr('<STR_LIT:action>')], t)<EOL><DEDENT>return program<EOL>", "docstring": "A generic logical knowledge-based agent program. [Fig. 7.1]", "id": "f1683:m0"}
{"signature": "def ask_generator(self, query):", "body": "abstract<EOL>", "docstring": "Yield all the substitutions that make query true.", "id": "f1683:c0:m3"}
{"signature": "def parse_definite_clause(s):", "body": "assert is_definite_clause(s)<EOL>if is_symbol(s.op):<EOL><INDENT>return [], s<EOL><DEDENT>else:<EOL><INDENT>antecedent, consequent = s.args<EOL>return conjuncts(antecedent), consequent<EOL><DEDENT>", "docstring": "Return the antecedents and the consequent of a definite clause.", "id": "f1683:m7"}
{"signature": "def disjuncts(s):", "body": "return dissociate('<STR_LIT:|>', [s])<EOL>", "docstring": "Return a list of the disjuncts in the sentence s.\n    >>> disjuncts(A | B)\n    [A, B]\n    >>> disjuncts(A & B)\n    [(A & B)]", "id": "f1683:m20"}
{"signature": "def ppsubst(s):", "body": "ppdict(s)<EOL>", "docstring": "Pretty-print substitution s", "id": "f1683:m54"}
{"signature": "def unit_clause_assign(clause, model):", "body": "P, value = None, None<EOL>for literal in disjuncts(clause):<EOL><INDENT>sym, positive = inspect_literal(literal)<EOL>if sym in model:<EOL><INDENT>if model[sym] == positive:<EOL><INDENT>return None, None  <EOL><DEDENT><DEDENT>elif P:<EOL><INDENT>return None, None      <EOL><DEDENT>else:<EOL><INDENT>P, value = sym, positive<EOL><DEDENT><DEDENT>return P, value<EOL>", "docstring": "Return a single variable/value pair that makes clause true in\n    the model, if possible.\n    >>> unit_clause_assign(A|B|C, {A:True})\n    (None, None)\n    >>> unit_clause_assign(B|~C, {A:True})\n    (None, None)\n    >>> unit_clause_assign(~A|~B, {A:True})\n    (B, False)", "id": "f1683:m28"}
{"signature": "def unify(x, y, s):", "body": "if s is None:<EOL><INDENT>return None<EOL><DEDENT>elif x == y:<EOL><INDENT>return s<EOL><DEDENT>elif is_variable(x):<EOL><INDENT>return unify_var(x, y, s)<EOL><DEDENT>elif is_variable(y):<EOL><INDENT>return unify_var(y, x, s)<EOL><DEDENT>elif isinstance(x, Expr) and isinstance(y, Expr):<EOL><INDENT>return unify(x.args, y.args, unify(x.op, y.op, s))<EOL><DEDENT>elif isinstance(x, str) or isinstance(y, str):<EOL><INDENT>return None<EOL><DEDENT>elif issequence(x) and issequence(y) and len(x) == len(y):<EOL><INDENT>if not x: return s<EOL>return unify(x[<NUM_LIT:1>:], y[<NUM_LIT:1>:], unify(x[<NUM_LIT:0>], y[<NUM_LIT:0>], s))<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Unify expressions x,y with substitution s; return a substitution that\n    would make x,y equal, or None if x,y can not unify. x and y can be\n    variables (e.g. Expr('x')), constants, lists, or Exprs. [Fig. 9.1]\n    >>> ppsubst(unify(x + y, y + C, {}))\n    {x: y, y: C}", "id": "f1683:m35"}
{"signature": "def __init__(self, name, rules, lexicon):", "body": "update(self, name=name, rules=rules, lexicon=lexicon)<EOL>self.categories = DefaultDict([])<EOL>for lhs in lexicon:<EOL><INDENT>for word in lexicon[lhs]:<EOL><INDENT>self.categories[word].append(lhs)<EOL><DEDENT><DEDENT>", "docstring": "A grammar has a set of rules and a lexicon.", "id": "f1684:c0:m0"}
{"signature": "def extender(self, edge):", "body": "(j, k, B, _, _) = edge<EOL>for (i, j, A, alpha, B1b) in self.chart[j]:<EOL><INDENT>if B1b and B == B1b[<NUM_LIT:0>]:<EOL><INDENT>self.add_edge([i, k, A, alpha + [edge], B1b[<NUM_LIT:1>:]])<EOL><DEDENT><DEDENT>", "docstring": "See what edges can be extended by this edge.", "id": "f1684:c1:m6"}
{"signature": "def isa(self, word, cat):", "body": "return cat in self.categories[word]<EOL>", "docstring": "Return True iff word is of category cat", "id": "f1684:c0:m2"}
{"signature": "def generate_random(grammar=E_, s='<STR_LIT:S>'):", "body": "import random<EOL>def rewrite(tokens, into):<EOL><INDENT>for token in tokens:<EOL><INDENT>if token in grammar.rules:<EOL><INDENT>rewrite(random.choice(grammar.rules[token]), into)<EOL><DEDENT>elif token in grammar.lexicon:<EOL><INDENT>into.append(random.choice(grammar.lexicon[token]))<EOL><DEDENT>else:<EOL><INDENT>into.append(token)<EOL><DEDENT><DEDENT>return into<EOL><DEDENT>return '<STR_LIT:U+0020>'.join(rewrite(s.split(), []))<EOL>", "docstring": "Replace each token in s by a random entry in grammar (recursively).\n    This is useful for testing a grammar, e.g. generate_random(E_)", "id": "f1684:m2"}
{"signature": "def rewrites_for(self, cat):", "body": "return self.rules.get(cat, ())<EOL>", "docstring": "Return a sequence of possible rhs's that cat can be rewritten as.", "id": "f1684:c0:m1"}
{"signature": "def Lexicon(**rules):", "body": "for (lhs, rhs) in list(rules.items()):<EOL><INDENT>rules[lhs] = [word.strip() for word in rhs.split('<STR_LIT:|>')]<EOL><DEDENT>return rules<EOL>", "docstring": "Create a dictionary mapping symbols to alternative words.\n    >>> Lexicon(Art = \"the | a | an\")\n    {'Art': ['the', 'a', 'an']}", "id": "f1684:m1"}
{"signature": "def __init__(self, grammar, trace=False):", "body": "update(self, grammar=grammar, trace=trace)<EOL>", "docstring": "A datastructure for parsing a string; and methods to do the parse.\n        self.chart[i] holds the edges that end just before the i'th word.\n        Edges are 5-element lists of [start, end, lhs, [found], [expects]].", "id": "f1684:c1:m0"}
{"signature": "def Rules(**rules):", "body": "for (lhs, rhs) in list(rules.items()):<EOL><INDENT>rules[lhs] = [alt.strip().split() for alt in rhs.split('<STR_LIT:|>')]<EOL><DEDENT>return rules<EOL>", "docstring": "Create a dictionary mapping symbols to alternative sequences.\n    >>> Rules(A = \"B C | D E\")\n    {'A': [['B', 'C'], ['D', 'E']]}", "id": "f1684:m0"}
{"signature": "def actions(self, state):", "body": "abstract<EOL>", "docstring": "Return a list of the allowable moves at this point.", "id": "f1685:c0:m0"}
{"signature": "def alphabeta_full_search(state, game):", "body": "player = game.to_move(state)<EOL>def max_value(state, alpha, beta):<EOL><INDENT>if game.terminal_test(state):<EOL><INDENT>return game.utility(state, player)<EOL><DEDENT>v = -infinity<EOL>for a in game.actions(state):<EOL><INDENT>v = max(v, min_value(game.result(state, a), alpha, beta))<EOL>if v >= beta:<EOL><INDENT>return v<EOL><DEDENT>alpha = max(alpha, v)<EOL><DEDENT>return v<EOL><DEDENT>def min_value(state, alpha, beta):<EOL><INDENT>if game.terminal_test(state):<EOL><INDENT>return game.utility(state, player)<EOL><DEDENT>v = infinity<EOL>for a in game.actions(state):<EOL><INDENT>v = min(v, max_value(game.result(state, a), alpha, beta))<EOL>if v <= alpha:<EOL><INDENT>return v<EOL><DEDENT>beta = min(beta, v)<EOL><DEDENT>return v<EOL><DEDENT>return argmax(game.actions(state),<EOL>lambda a: min_value(game.result(state, a),<EOL>-infinity, infinity))<EOL>", "docstring": "Search game to determine best action; use alpha-beta pruning.\n    As in [Fig. 5.7], this version searches all the way to the leaves.", "id": "f1685:m1"}
{"signature": "def result(self, state, move):", "body": "abstract<EOL>", "docstring": "Return the state that results from making a move from a state.", "id": "f1685:c0:m1"}
{"signature": "def random_player(game, state):", "body": "return random.choice(game.actions(state))<EOL>", "docstring": "A player that chooses a legal move at random.", "id": "f1685:m4"}
{"signature": "def minimax_decision(state, game):", "body": "player = game.to_move(state)<EOL>def max_value(state):<EOL><INDENT>if game.terminal_test(state):<EOL><INDENT>return game.utility(state, player)<EOL><DEDENT>v = -infinity<EOL>for a in game.actions(state):<EOL><INDENT>v = max(v, min_value(game.result(state, a)))<EOL><DEDENT>return v<EOL><DEDENT>def min_value(state):<EOL><INDENT>if game.terminal_test(state):<EOL><INDENT>return game.utility(state, player)<EOL><DEDENT>v = infinity<EOL>for a in game.actions(state):<EOL><INDENT>v = min(v, max_value(game.result(state, a)))<EOL><DEDENT>return v<EOL><DEDENT>return argmax(game.actions(state),<EOL>lambda a: min_value(game.result(state, a)))<EOL>", "docstring": "Given a state in a game, calculate the best move by searching\n    forward all the way to the terminal states. [Fig. 5.3]", "id": "f1685:m0"}
{"signature": "def k_in_row(self, board, move, player, xxx_todo_changeme):", "body": "(delta_x, delta_y) = xxx_todo_changeme<EOL>x, y = move<EOL>n = <NUM_LIT:0> <EOL>while board.get((x, y)) == player:<EOL><INDENT>n += <NUM_LIT:1><EOL>x, y = x + delta_x, y + delta_y<EOL><DEDENT>x, y = move<EOL>while board.get((x, y)) == player:<EOL><INDENT>n += <NUM_LIT:1><EOL>x, y = x - delta_x, y - delta_y<EOL><DEDENT>n -= <NUM_LIT:1> <EOL>return n >= self.k<EOL>", "docstring": "Return true if there is a line through move on board for player.", "id": "f1685:c2:m7"}
{"signature": "def to_move(self, state):", "body": "return state.to_move<EOL>", "docstring": "Return the player whose move it is in this state.", "id": "f1685:c0:m4"}
{"signature": "def play_game(game, *players):", "body": "state = game.initial<EOL>while True:<EOL><INDENT>for player in players:<EOL><INDENT>move = player(game, state)<EOL>state = game.result(state, move)<EOL>if game.terminal_test(state):<EOL><INDENT>return game.utility(state, game.to_move(game.initial))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Play an n-person, move-alternating game.\n    >>> play_game(Fig52Game(), alphabeta_player, alphabeta_player)\n    3", "id": "f1685:m6"}
{"signature": "def markov_blanket_sample(X, e, bn):", "body": "Xnode = bn.variable_node(X)<EOL>Q = ProbDist(X)<EOL>for xi in bn.variable_values(X):<EOL><INDENT>ei = extend(e, X, xi)<EOL>Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei)<EOL>for Yj in Xnode.children)<EOL><DEDENT>return probability(Q.normalize()[True])<EOL>", "docstring": "Return a sample from P(X | mb) where mb denotes that the\n    variables in the Markov blanket of X take their values from event\n    e (which must assign a value to each). The Markov blanket of X is\n    X's parents, children, and children's parents.", "id": "f1686:m18"}
{"signature": "def pointwise_product(self, other, bn):", "body": "vars = list(set(self.vars) | set(other.vars))<EOL>cpt = dict((event_values(e, vars), self.p(e) * other.p(e))<EOL>for e in all_events(vars, bn, {}))<EOL>return Factor(vars, cpt)<EOL>", "docstring": "Multiply two factors, combining their variables.", "id": "f1686:c4:m1"}
{"signature": "def all_events(vars, bn, e):", "body": "if not vars:<EOL><INDENT>yield e<EOL><DEDENT>else:<EOL><INDENT>X, rest = vars[<NUM_LIT:0>], vars[<NUM_LIT:1>:]<EOL>for e1 in all_events(rest, bn, e):<EOL><INDENT>for x in bn.variable_values(X):<EOL><INDENT>yield extend(e1, X, x)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Yield every way of extending e with values for all vars.", "id": "f1686:m11"}
{"signature": "def __init__(self, node_specs=[]):", "body": "update(self, nodes=[], vars=[])<EOL>for node_spec in node_specs:<EOL><INDENT>self.add(node_spec)<EOL><DEDENT>", "docstring": "nodes must be ordered with parents before children.", "id": "f1686:c2:m0"}
{"signature": "def add(self, node_spec):", "body": "node = BayesNode(*node_spec)<EOL>assert node.variable not in self.vars<EOL>assert every(lambda parent: parent in self.vars, node.parents)<EOL>self.nodes.append(node)<EOL>self.vars.append(node.variable)<EOL>for parent in node.parents:<EOL><INDENT>self.variable_node(parent).children.append(node)<EOL><DEDENT>", "docstring": "Add a node to the net. Its parents must already be in the\n        net, and its variable must not.", "id": "f1686:c2:m1"}
{"signature": "def __setitem__(self, val, p):", "body": "if val not in self.values:<EOL><INDENT>self.values.append(val)<EOL><DEDENT>self.prob[val] = p<EOL>", "docstring": "Set P(val) = p.", "id": "f1686:c0:m2"}
{"signature": "def prior_sample(bn):", "body": "event = {}<EOL>for node in bn.nodes:<EOL><INDENT>event[node.variable] = node.sample(event)<EOL><DEDENT>return event<EOL>", "docstring": "Randomly sample from bn's full joint distribution. The result\n    is a {variable: value} dict. [Fig. 14.13]", "id": "f1686:m12"}
{"signature": "def elimination_ask(X, e, bn):", "body": "assert X not in e, \"<STR_LIT>\"<EOL>factors = []<EOL>for var in reversed(bn.vars):<EOL><INDENT>factors.append(make_factor(var, e, bn))<EOL>if is_hidden(var, X, e):<EOL><INDENT>factors = sum_out(var, factors, bn)<EOL><DEDENT><DEDENT>return pointwise_product(factors, bn).normalize()<EOL>", "docstring": "Compute bn's P(X|e) by variable elimination. [Fig. 14.11]\n    >>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary\n    ...  ).show_approx()\n    'False: 0.716, True: 0.284", "id": "f1686:m6"}
{"signature": "def p(self, value, event):", "body": "assert isinstance(value, bool)<EOL>ptrue = self.cpt[event_values(event, self.parents)]<EOL>return if_(value, ptrue, <NUM_LIT:1> - ptrue)<EOL>", "docstring": "Return the conditional probability\n        P(X=value | parents=parent_values), where parent_values\n        are the values of parents in event. (event must assign each\n        parent a value.)\n        >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625})\n        >>> bn.p(False, {'Burglary': False, 'Earthquake': True})\n        0.375", "id": "f1686:c3:m1"}
{"signature": "def normalize(self):", "body": "total = float(sum(self.prob.values()))<EOL>if not (<NUM_LIT:1.0>-epsilon < total < <NUM_LIT:1.0>+epsilon):<EOL><INDENT>for val in self.prob:<EOL><INDENT>self.prob[val] /= total<EOL><DEDENT><DEDENT>return self<EOL>", "docstring": "Make sure the probabilities of all values sum to 1.\n        Returns the normalized distribution.\n        Raises a ZeroDivisionError if the sum of the values is 0.\n        >>> P = ProbDist('Flip'); P['H'], P['T'] = 35, 65\n        >>> P = P.normalize()\n        >>> print '%5.3f %5.3f' % (P.prob['H'], P.prob['T'])\n        0.350 0.650", "id": "f1686:c0:m3"}
{"signature": "def enumerate_joint_ask(X, e, P):", "body": "assert X not in e, \"<STR_LIT>\"<EOL>Q = ProbDist(X) <EOL>Y = [v for v in P.variables if v != X and v not in e] <EOL>for xi in P.values(X):<EOL><INDENT>Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)<EOL><DEDENT>return Q.normalize()<EOL>", "docstring": "Return a probability distribution over the values of the variable X,\n    given the {var:val} observations e, in the JointProbDist P. [Section 13.3]\n    >>> P = JointProbDist(['X', 'Y'])\n    >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125\n    >>> enumerate_joint_ask('X', dict(Y=1), P).show_approx()\n    '0: 0.667, 1: 0.167, 2: 0.167'", "id": "f1686:m2"}
{"signature": "def fixed_lag_smoothing(e_t, hmm, d):", "body": "unimplemented()<EOL>", "docstring": "[Fig. 15.6]", "id": "f1686:m20"}
{"signature": "def values(self, var):", "body": "return self.vals[var]<EOL>", "docstring": "Return the set of possible values for a variable.", "id": "f1686:c1:m3"}
{"signature": "def enumerate_joint(vars, e, P):", "body": "if not vars:<EOL><INDENT>return P[e]<EOL><DEDENT>Y, rest = vars[<NUM_LIT:0>], vars[<NUM_LIT:1>:]<EOL>return sum([enumerate_joint(rest, extend(e, Y, y), P)<EOL>for y in P.values(Y)])<EOL>", "docstring": "Return the sum of those entries in P consistent with e,\n    provided vars is P's remaining variables (the ones not in e).", "id": "f1686:m3"}
{"signature": "def likelihood_weighting(X, e, bn, N):", "body": "W = dict((x, <NUM_LIT:0>) for x in bn.variable_values(X))<EOL>for j in range(N):<EOL><INDENT>sample, weight = weighted_sample(bn, e) <EOL>W[sample[X]] += weight<EOL><DEDENT>return ProbDist(X, W)<EOL>", "docstring": "Estimate the probability distribution of variable X given\n    evidence e in BayesNet bn.  [Fig. 14.15]\n    >>> seed(1017)\n    >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T),\n    ...   burglary, 10000).show_approx()\n    'False: 0.702, True: 0.298'", "id": "f1686:m15"}
{"signature": "def assert_amnesty(self, input_code, errors, expected):", "body": "input_code = textwrap.dedent(input_code)<EOL>expected = textwrap.dedent(expected)<EOL>errors_by_line = defaultdict(list)<EOL>for error in errors:<EOL><INDENT>errors_by_line[error.linenum].append(error)<EOL><DEDENT>output_lines = itertools.chain.from_iterable(<EOL>fix_pylint(line, errors_by_line[lineno])<EOL>for lineno, line<EOL>in enumerate(StringIO(input_code), start=<NUM_LIT:1>)<EOL>)<EOL>self.assertEqual(expected.split(u'<STR_LIT:\\n>'), \"<STR_LIT>\".join(output_lines).split(u'<STR_LIT:\\n>'))<EOL>", "docstring": "Assert that fix_pylint produces ``expected`` when fed ``input_code`` and the\nlist of errors ``errors``.\n\nArguments:\n    input_code: A string of python code. Will be textwrap.dedented.\n    errors: A list of PylintErrors\n    expected: A string of python code. Will be textwrap.dedented.", "id": "f1688:c0:m0"}
{"signature": "def find_line_markers(source):", "body": "markers = {}<EOL>for lineno, line in enumerate(source.splitlines(), start=<NUM_LIT:1>):<EOL><INDENT>m = re.search(r\"<STR_LIT>\", line)<EOL>if m:<EOL><INDENT>markers[lineno] = m.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>return markers<EOL>", "docstring": "Find line markers in program source.\n\n    Returns a dict mapping line numbers to the marker on that line.", "id": "f1700:m0"}
{"signature": "def assert_not_file(self, filename):", "body": "self.assertFalse(os.path.isfile(filename))<EOL>", "docstring": "Assert that a file doesn't exist.", "id": "f1702:c0:m3"}
{"signature": "def merge_configs(main, tweaks):", "body": "for section in tweaks.sections():<EOL><INDENT>for option in tweaks.options(section):<EOL><INDENT>value = tweaks.get(section, option)<EOL>if option.endswith(\"<STR_LIT:+>\"):<EOL><INDENT>option = option[:-<NUM_LIT:1>]<EOL>value = main.get(section, option) + value<EOL><DEDENT>main.set(section, option, value)<EOL><DEDENT><DEDENT>", "docstring": "Merge tweaks into a main config file.", "id": "f1703:m0"}
{"signature": "def list_main(argv_unused):  ", "body": "print(\"<STR_LIT>\")<EOL>for filename in pkg_resources.resource_listdir(\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>print(filename)<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "list\n    List the FILENAMEs that edx_lint can provide.", "id": "f1704:m0"}
{"signature": "@click.command()<EOL>@click.option(<EOL>'<STR_LIT>', default=sys.stdin, type=click.File(),<EOL>help=\"<STR_LIT>\"<EOL>)<EOL>@click_log.simple_verbosity_option(default=u'<STR_LIT>')<EOL>def pylint_amnesty(pylint_output):", "body": "errors = defaultdict(lambda: defaultdict(set))<EOL>for pylint_error in parse_pylint_output(pylint_output):<EOL><INDENT>errors[pylint_error.filename][pylint_error.linenum].add(pylint_error)<EOL><DEDENT>for file_with_errors in sorted(errors):<EOL><INDENT>try:<EOL><INDENT>opened_file = open(file_with_errors)<EOL><DEDENT>except IOError:<EOL><INDENT>LOG.warning(u\"<STR_LIT>\", file_with_errors, exc_info=True)<EOL><DEDENT>else:<EOL><INDENT>with opened_file as input_file:<EOL><INDENT>output_lines = []<EOL>for line_num, line in enumerate(input_file, start=<NUM_LIT:1>):<EOL><INDENT>output_lines.extend(<EOL>fix_pylint(<EOL>line,<EOL>errors[file_with_errors][line_num]<EOL>)<EOL>)<EOL><DEDENT><DEDENT>with open(file_with_errors, '<STR_LIT:w>') as output_file:<EOL><INDENT>output_file.writelines(output_lines)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Add ``# pylint: disable`` clauses to add exceptions to all existing pylint errors in a codebase.", "id": "f1705:m3"}
{"signature": "def format_pylint_disables(error_names, tag=True):", "body": "tag_str = \"<STR_LIT>\" if tag else \"<STR_LIT>\"<EOL>if error_names:<EOL><INDENT>return u\"<STR_LIT>\".format(<EOL>disabled=\"<STR_LIT:U+002CU+0020>\".join(sorted(error_names)),<EOL>tag=tag_str,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Format a list of error_names into a 'pylint: disable=' line.", "id": "f1705:m1"}
{"signature": "def visit_module(self, node):", "body": "with open(FILENAME, \"<STR_LIT:a>\") as f:<EOL><INDENT>f.write(node.file)<EOL>f.write(\"<STR_LIT:\\n>\")<EOL><DEDENT>", "docstring": "Called for each module being examined.", "id": "f1710:c0:m0"}
{"signature": "def register_checkers(linter):", "body": "if FILENAME:<EOL><INDENT>linter.register_checker(ModuleTracingChecker(linter))<EOL><DEDENT>", "docstring": "Register checkers.", "id": "f1710:m0"}
{"signature": "def register_checkers(linter):", "body": "linter.register_checker(RangeChecker(linter))<EOL>", "docstring": "Register checkers.", "id": "f1714:m0"}
{"signature": "@utils.check_messages(MESSAGE_ID)<EOL><INDENT>def visit_call(self, node):<DEDENT>", "body": "if not isinstance(node.func, astroid.Name):<EOL><INDENT>return<EOL><DEDENT>if node.func.name not in self.RANGE_FUNCTIONS:<EOL><INDENT>return<EOL><DEDENT>first = node.args[<NUM_LIT:0>]<EOL>if not isinstance(first, astroid.Const):<EOL><INDENT>return<EOL><DEDENT>if not isinstance(first.value, int):<EOL><INDENT>return<EOL><DEDENT>three1 = False<EOL>if len(node.args) == <NUM_LIT:3>:<EOL><INDENT>third = node.args[<NUM_LIT:2>]<EOL>if isinstance(third, astroid.Const):<EOL><INDENT>if isinstance(third.value, int) and third.value == <NUM_LIT:1>:<EOL><INDENT>three1 = True<EOL><DEDENT><DEDENT><DEDENT>if first.value == <NUM_LIT:0>:<EOL><INDENT>if len(node.args) == <NUM_LIT:2>:<EOL><INDENT>self.add_message(self.MESSAGE_ID, args=(node.func.name, \"<STR_LIT>\"), node=node)<EOL><DEDENT>elif three1:<EOL><INDENT>self.add_message(self.MESSAGE_ID, args=(node.func.name, \"<STR_LIT>\"), node=node)<EOL><DEDENT><DEDENT>elif three1:<EOL><INDENT>self.add_message(self.MESSAGE_ID, args=(node.func.name, \"<STR_LIT>\"), node=node)<EOL><DEDENT>", "docstring": "Called for every function call in the source code.", "id": "f1714:c0:m0"}
{"signature": "def check_visitors(cls):", "body": "for name in dir(cls):<EOL><INDENT>if name.startswith(\"<STR_LIT>\"):<EOL><INDENT>if name[<NUM_LIT:6>:] not in CLASS_NAMES:<EOL><INDENT>raise Exception(u\"<STR_LIT>\".format(name))<EOL><DEDENT><DEDENT><DEDENT>return cls<EOL>", "docstring": "Check that a checker's visitors are correctly named.\n\n    A checker has methods named visit_NODETYPE, but it's easy to mis-name\n    a visit method, and it will never be called.  This decorator checks\n    the class to see that all of its visitors are named after an existing\n    node class.", "id": "f1716:m0"}
{"signature": "def register_checkers(linter):", "body": "linter.register_checker(UnicodeFormatStringChecker(linter))<EOL>", "docstring": "Register checkers.", "id": "f1718:m0"}
{"signature": "def process_module(self, node):", "body": "self._unicode_literals = \"<STR_LIT>\" in node.future_imports<EOL>", "docstring": "Called for each module being examined.", "id": "f1718:c0:m1"}
{"signature": "def register(linter):", "body": "<EOL>for mod in MODS:<EOL><INDENT>mod.register_checkers(linter)<EOL><DEDENT>", "docstring": "Registering additional checkers.\n    However, we will also use it to amend existing checker config.", "id": "f1719:m0"}
{"signature": "def validate(self):", "body": "with open(self.filename, \"<STR_LIT:rb>\") as f:<EOL><INDENT>text = f.read()<EOL><DEDENT>start_last_line = text.rfind(b\"<STR_LIT:\\n>\", <NUM_LIT:0>, -<NUM_LIT:1>)<EOL>if start_last_line == -<NUM_LIT:1>:<EOL><INDENT>return False<EOL><DEDENT>original_text = text[:start_last_line+<NUM_LIT:1>]<EOL>last_line = text[start_last_line+<NUM_LIT:1>:]<EOL>expected_hash = hashlib.sha1(original_text).hexdigest().encode('<STR_LIT:utf8>')<EOL>match = re.search(b\"<STR_LIT>\", last_line)<EOL>if not match:<EOL><INDENT>return False<EOL><DEDENT>actual_hash = match.group(<NUM_LIT:0>)<EOL>return actual_hash == expected_hash<EOL>", "docstring": "Check if the file still has its original contents.\n\nReturns True if the file is unchanged, False if it has been tampered\nwith.", "id": "f1722:c0:m2"}
{"signature": "def guess_array_memory_usage( bam_readers, dtype, use_strand=False ):", "body": "ARRAY_COUNT = <NUM_LIT:5><EOL>if not isinstance( bam_readers, list ):<EOL><INDENT>bam_readers = [ bam_readers ]<EOL><DEDENT>if isinstance( dtype, basestring ):<EOL><INDENT>dtype = NUMPY_DTYPES.get( dtype, None )<EOL><DEDENT>use_strand = use_strand + <NUM_LIT:1> <EOL>dtypes = guess_numpy_dtypes_from_idxstats( bam_readers, default=None, force_dtype=False )<EOL>if not [ dt for dt in dtypes if dt is not None ]:<EOL><INDENT>dtypes = guess_numpy_dtypes_from_idxstats( bam_readers, default=dtype or numpy.uint64, force_dtype=True )<EOL><DEDENT>elif dtype:<EOL><INDENT>dtypes = [ dtype if dt else None for dt in dtypes ]<EOL><DEDENT>read_groups = []<EOL>no_read_group = False<EOL>for bam in bam_readers:<EOL><INDENT>rgs = bam.get_read_groups()<EOL>if rgs:<EOL><INDENT>for rg in rgs:<EOL><INDENT>if rg not in read_groups:<EOL><INDENT>read_groups.append( rg )<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>no_read_group = True<EOL><DEDENT><DEDENT>read_groups = len( read_groups ) + no_read_group<EOL>max_ref_size = <NUM_LIT:0><EOL>array_byte_overhead = sys.getsizeof( numpy.zeros( ( <NUM_LIT:0> ), dtype=numpy.uint64 ) )<EOL>array_count = ARRAY_COUNT * use_strand * read_groups<EOL>for bam in bam_readers:<EOL><INDENT>for i, ( name, length ) in enumerate( bam.get_references() ):<EOL><INDENT>if dtypes[i] is not None:<EOL><INDENT>max_ref_size = max( max_ref_size, ( length + length * dtypes[i]().nbytes * array_count + ( array_byte_overhead * ( array_count + <NUM_LIT:1> ) ) ) )<EOL><DEDENT><DEDENT><DEDENT>return max_ref_size<EOL>", "docstring": "Returns an estimate for the maximum amount of memory to be consumed by numpy arrays.", "id": "f1726:m4"}
{"signature": "def _extractall(self, path=\"<STR_LIT:.>\", members=None):", "body": "import copy<EOL>import operator<EOL>from tarfile import ExtractError<EOL>directories = []<EOL>if members is None:<EOL><INDENT>members = self<EOL><DEDENT>for tarinfo in members:<EOL><INDENT>if tarinfo.isdir():<EOL><INDENT>directories.append(tarinfo)<EOL>tarinfo = copy.copy(tarinfo)<EOL>tarinfo.mode = <NUM_LIT>  <EOL><DEDENT>self.extract(tarinfo, path)<EOL><DEDENT>if sys.version_info < (<NUM_LIT:2>, <NUM_LIT:4>):<EOL><INDENT>def sorter(dir1, dir2):<EOL><INDENT>return cmp(dir1.name, dir2.name)<EOL><DEDENT>directories.sort(sorter)<EOL>directories.reverse()<EOL><DEDENT>else:<EOL><INDENT>directories.sort(key=operator.attrgetter('<STR_LIT:name>'), reverse=True)<EOL><DEDENT>for tarinfo in directories:<EOL><INDENT>dirpath = os.path.join(path, tarinfo.name)<EOL>try:<EOL><INDENT>self.chown(tarinfo, dirpath)<EOL>self.utime(tarinfo, dirpath)<EOL>self.chmod(tarinfo, dirpath)<EOL><DEDENT>except ExtractError:<EOL><INDENT>e = sys.exc_info()[<NUM_LIT:1>]<EOL>if self.errorlevel > <NUM_LIT:1>:<EOL><INDENT>raise<EOL><DEDENT>else:<EOL><INDENT>self._dbg(<NUM_LIT:1>, \"<STR_LIT>\" % e)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Extract all members from the archive to the current working\n       directory and set owner, modification time and permissions on\n       directories afterwards. `path' specifies a different directory\n       to extract to. `members' is optional and must be a subset of the\n       list returned by getmembers().", "id": "f1732:m17"}
{"signature": "def main(version=DEFAULT_VERSION):", "body": "options = _parse_args()<EOL>tarball = download_setuptools(download_base=options.download_base)<EOL>return _install(tarball, _build_install_args(options))<EOL>", "docstring": "Install or upgrade setuptools and EasyInstall", "id": "f1732:m20"}
{"signature": "def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,<EOL>to_dir=os.curdir, delay=<NUM_LIT:15>):", "body": "<EOL>to_dir = os.path.abspath(to_dir)<EOL>try:<EOL><INDENT>from urllib.request import urlopen<EOL><DEDENT>except ImportError:<EOL><INDENT>from urllib2 import urlopen<EOL><DEDENT>tgz_name = \"<STR_LIT>\" % version<EOL>url = download_base + tgz_name<EOL>saveto = os.path.join(to_dir, tgz_name)<EOL>src = dst = None<EOL>if not os.path.exists(saveto):  <EOL><INDENT>try:<EOL><INDENT>log.warn(\"<STR_LIT>\", url)<EOL>src = urlopen(url)<EOL>data = src.read()<EOL>dst = open(saveto, \"<STR_LIT:wb>\")<EOL>dst.write(data)<EOL><DEDENT>finally:<EOL><INDENT>if src:<EOL><INDENT>src.close()<EOL><DEDENT>if dst:<EOL><INDENT>dst.close()<EOL><DEDENT><DEDENT><DEDENT>return os.path.realpath(saveto)<EOL>", "docstring": "Download distribute from a specified location and return its filename\n\n    `version` should be a valid distribute version number that is available\n    as an egg for download under the `download_base` URL (which should end\n    with a '/'). `to_dir` is the directory where the egg will be downloaded.\n    `delay` is the number of seconds to pause before an actual download\n    attempt.", "id": "f1732:m4"}
{"signature": "def show():", "body": "<EOL>sys.stdout.write(colorful.bold('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.dimmed('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.italic('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.underlined('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.inversed('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.concealed('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.struckthrough('<STR_LIT>') + '<STR_LIT:\\n>')<EOL>sys.stdout.write(colorful.red('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.green('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.yellow('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.blue('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.magenta('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.cyan('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.white('<STR_LIT>') + '<STR_LIT:\\n>')<EOL>sys.stdout.write(colorful.on_red('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.on_green('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.on_yellow('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.on_blue('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.on_magenta('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.on_cyan('<STR_LIT>') + '<STR_LIT:U+0020>')<EOL>sys.stdout.write(colorful.on_white('<STR_LIT>') + '<STR_LIT:\\n>')<EOL>", "docstring": "Show the modifiers and colors", "id": "f1741:m0"}
{"signature": "def translate_style(style, colormode, colorpalette):", "body": "style_parts = iter(style.split('<STR_LIT:_>'))<EOL>ansi_start_sequence = []<EOL>ansi_end_sequence = []<EOL>try:<EOL><INDENT>part = None<EOL>for mod_part in style_parts:<EOL><INDENT>part = mod_part<EOL>if part not in ansi.MODIFIERS:<EOL><INDENT>break  <EOL><DEDENT>mod_start_code, mod_end_code = resolve_modifier_to_ansi_code(part, colormode)<EOL>ansi_start_sequence.append(mod_start_code)<EOL>ansi_end_sequence.append(mod_end_code)<EOL><DEDENT>else:  <EOL><INDENT>raise StopIteration()<EOL><DEDENT>if part != '<STR_LIT>':<EOL><INDENT>ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code(<EOL>part, ansi.FOREGROUND_COLOR_OFFSET, colormode, colorpalette)<EOL>ansi_start_sequence.append(ansi_start_code)<EOL>ansi_end_sequence.append(ansi_end_code)<EOL>next(style_parts)<EOL><DEDENT>part = next(style_parts)<EOL>ansi_start_code, ansi_end_code = translate_colorname_to_ansi_code(<EOL>part, ansi.BACKGROUND_COLOR_OFFSET, colormode, colorpalette)<EOL>ansi_start_sequence.append(ansi_start_code)<EOL>ansi_end_sequence.append(ansi_end_code)<EOL><DEDENT>except StopIteration:  <EOL><INDENT>pass<EOL><DEDENT>return '<STR_LIT>'.join(ansi_start_sequence), '<STR_LIT>'.join(ansi_end_sequence)<EOL>", "docstring": "Translate the given style to an ANSI escape code\nsequence.\n\n``style`` examples are:\n\n* green\n* bold\n* red_on_black\n* bold_green\n* italic_yellow_on_cyan\n\n:param str style: the style to translate\n:param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n:parma dict colorpalette: the color palette to use for the color name mapping", "id": "f1747:m3"}
{"signature": "def update_palette(self, colorpalette):", "body": "self.colorpalette.update(colors.sanitize_color_palette(colorpalette))<EOL>", "docstring": "Update the currently active color palette\nwith the given color palette", "id": "f1747:c2:m10"}
{"signature": "@colorpalette.setter<EOL><INDENT>def colorpalette(self, colorpalette):<DEDENT>", "body": "if isinstance(colorpalette, str):  <EOL><INDENT>colorpalette = colors.parse_colors(colorpalette)<EOL><DEDENT>self._colorpalette = colors.sanitize_color_palette(colorpalette)<EOL>", "docstring": "Set the colorpalette which should be used", "id": "f1747:c2:m2"}
{"signature": "def str(self, string):", "body": "return ColorfulString(string, string)<EOL>", "docstring": "Create a new ColorfulString instance of the given\nunstyled string.\n\nThis method should be used to create a ColorfulString\nwhich is actually not styled yet but can safely be concatinated\nwith other ColorfulStrings like:\n\n>>> s = colorful.str('Hello ')\n>>> s =+ colorful.black('World')\n>>> str(s)\n'Hello \\033[30mWorld\\033[39m'\n\n:param str string: the string to use for the ColorfulString", "id": "f1747:c2:m13"}
{"signature": "def setup(self, colormode=None, colorpalette=None, extend_colors=False):", "body": "if colormode:<EOL><INDENT>self.colormode = colormode<EOL><DEDENT>if colorpalette:<EOL><INDENT>if extend_colors:<EOL><INDENT>self.update_palette(colorpalette)<EOL><DEDENT>else:<EOL><INDENT>self.colorpalette = colorpalette<EOL><DEDENT><DEDENT>", "docstring": "Setup this colorful object by setting a ``colormode`` and\nthe ``colorpalette`. The ``extend_colors`` flag is used\nto extend the currently active color palette instead of\nreplacing it.\n\n:param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n:parma dict colorpalette: the colorpalette to use. This ``dict`` should map\n                          color names to it's corresponding RGB value\n:param bool extend_colors: extend the active color palette instead of replacing it", "id": "f1747:c2:m3"}
{"signature": "def use_256_ansi_colors(self):", "body": "self.colormode = terminal.ANSI_256_COLORS<EOL>", "docstring": "Use 256 ANSI colors for this colorful object", "id": "f1747:c2:m7"}
{"signature": "def translate_rgb_to_ansi_code(red, green, blue, offset, colormode):", "body": "if colormode == terminal.NO_COLORS:  <EOL><INDENT>return '<STR_LIT>', '<STR_LIT>'<EOL><DEDENT>if colormode == terminal.ANSI_8_COLORS or colormode == terminal.ANSI_16_COLORS:<EOL><INDENT>color_code = ansi.rgb_to_ansi16(red, green, blue)<EOL>start_code = ansi.ANSI_ESCAPE_CODE.format(<EOL>code=color_code + offset - ansi.FOREGROUND_COLOR_OFFSET)<EOL>end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)<EOL>return start_code, end_code<EOL><DEDENT>if colormode == terminal.ANSI_256_COLORS:<EOL><INDENT>color_code = ansi.rgb_to_ansi256(red, green, blue)<EOL>start_code = ansi.ANSI_ESCAPE_CODE.format(code='<STR_LIT>'.format(<EOL>base=<NUM_LIT:8> + offset, code=color_code))<EOL>end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)<EOL>return start_code, end_code<EOL><DEDENT>if colormode == terminal.TRUE_COLORS:<EOL><INDENT>start_code = ansi.ANSI_ESCAPE_CODE.format(code='<STR_LIT>'.format(<EOL>base=<NUM_LIT:8> + offset, red=red, green=green, blue=blue))<EOL>end_code = ansi.ANSI_ESCAPE_CODE.format(code=offset + ansi.COLOR_CLOSE_OFFSET)<EOL>return start_code, end_code<EOL><DEDENT>raise ColorfulError('<STR_LIT>'.format(colormode))<EOL>", "docstring": "Translate the given RGB color into the appropriate ANSI escape code\nfor the given color mode.\nThe offset is used for the base color which is used.\n\nThe ``colormode`` has to be one of:\n    * 0: no colors / disabled\n    * 8: use ANSI 8 colors\n    * 16: use ANSI 16 colors (same as 8 but with brightness)\n    * 256: use ANSI 256 colors\n    * 0xFFFFFF / 16777215: use 16 Million true colors\n\n:param int red: the red channel value\n:param int green: the green channel value\n:param int blue: the blue channel value\n:param int offset: the offset to use for the base color\n:param int colormode: the color mode to use. See explanation above", "id": "f1747:m0"}
{"signature": "def resolve_modifier_to_ansi_code(modifiername, colormode):", "body": "if colormode == terminal.NO_COLORS:  <EOL><INDENT>return '<STR_LIT>', '<STR_LIT>'<EOL><DEDENT>try:<EOL><INDENT>start_code, end_code = ansi.MODIFIERS[modifiername]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ColorfulError('<STR_LIT>'.format(<EOL>modifiername, ansi.MODIFIERS.keys()))<EOL><DEDENT>else:<EOL><INDENT>return ansi.ANSI_ESCAPE_CODE.format(<EOL>code=start_code), ansi.ANSI_ESCAPE_CODE.format(<EOL>code=end_code)<EOL><DEDENT>", "docstring": "Resolve the given modifier name to a valid\nANSI escape code.\n\n:param str modifiername: the name of the modifier to resolve\n:param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n\n:returns str: the ANSI escape code for the modifier\n\n:raises ColorfulError: if the given modifier name is invalid", "id": "f1747:m2"}
{"signature": "def use_true_colors(self):", "body": "self.colormode = terminal.TRUE_COLORS<EOL>", "docstring": "Use true colors for this colorful object", "id": "f1747:c2:m8"}
{"signature": "def style_string(string, ansi_style, colormode, nested=False):", "body": "ansi_start_code, ansi_end_code = ansi_style<EOL>if PY2:<EOL><INDENT>if isinstance(string, str):<EOL><INDENT>string = string.decode(DEFAULT_ENCODING)<EOL><DEDENT><DEDENT>string = UNICODE(string).replace(ansi.NEST_PLACEHOLDER, ansi_start_code)<EOL>return '<STR_LIT>'.format(<EOL>start_code=ansi_start_code,<EOL>string=string,<EOL>end_code=ansi_end_code,<EOL>nest_ph=ansi.NEST_PLACEHOLDER if nested else '<STR_LIT>')<EOL>", "docstring": "Style the given string according to the given\nANSI style string.\n\n:param str string: the string to style\n:param tuple ansi_style: the styling string returned by ``translate_style``\n:param int colormode: the color mode to use. See ``translate_rgb_to_ansi_code``\n\n:returns: a string containing proper ANSI sequence", "id": "f1747:m4"}
{"signature": "@property<EOL><INDENT>def colorpalette(self):<DEDENT>", "body": "return self._colorpalette<EOL>", "docstring": "Get the current used color palette", "id": "f1747:c2:m1"}
{"signature": "def parse_colors(path):", "body": "if path.endswith(\"<STR_LIT>\"):<EOL><INDENT>return parse_rgb_txt_file(path)<EOL><DEDENT>elif path.endswith(\"<STR_LIT>\"):<EOL><INDENT>return parse_json_color_file(path)<EOL><DEDENT>raise TypeError(\"<STR_LIT>\")<EOL>", "docstring": "Parse the given color files.\n\n    Supported are:\n        * .txt for X11 colors\n        * .json for colornames", "id": "f1749:m0"}
{"signature": "def split_phonemes(letter, onset=True, nucleus=True, coda=True):", "body": "if len(letter) != <NUM_LIT:1> or not is_hangul(letter):<EOL><INDENT>raise ValueError('<STR_LIT>' % letter)<EOL><DEDENT>offset = ord(letter) - FIRST_HANGUL_OFFSET<EOL>phonemes = [None] * <NUM_LIT:3><EOL>if onset:<EOL><INDENT>phonemes[<NUM_LIT:0>] = ONSETS[offset // (NUM_NUCLEUSES * NUM_CODAS)]<EOL><DEDENT>if nucleus:<EOL><INDENT>phonemes[<NUM_LIT:1>] = NUCLEUSES[(offset // NUM_CODAS) % NUM_NUCLEUSES]<EOL><DEDENT>if coda:<EOL><INDENT>phonemes[<NUM_LIT:2>] = CODAS[offset % NUM_CODAS]<EOL><DEDENT>return tuple(phonemes)<EOL>", "docstring": "Splits Korean phonemes as known as \"\uc790\uc18c\" from a Hangul letter.\n\n    :returns: (onset, nucleus, coda)\n    :raises ValueError: `letter` is not a Hangul single letter.", "id": "f1751:m3"}
{"signature": "def pick(word, morph, **kwargs):", "body": "return registry.pick(word, morph, **kwargs)<EOL>", "docstring": "Shortcut for :class:`ParticleRegistry.pick` of the default registry.", "id": "f1753:m2"}
{"signature": "def postfix(word, morph, **kwargs):", "body": "return registry.postfix(word, morph, **kwargs)<EOL>", "docstring": "Shortcut for :class:`ParticleRegistry.postfix` of the default registry.", "id": "f1753:m3"}
{"signature": "def guess_coda(word):", "body": "word = filter_only_significant(word)<EOL>return guess_coda_from_significant_word(word)<EOL>", "docstring": "Guesses the coda of the given word as correct as possible.  If it fails\n    to guess the coda, returns ``None``.", "id": "f1755:m0"}
{"signature": "def pick_coda_from_letter(letter):", "body": "try:<EOL><INDENT>__, __, coda =split_phonemes(letter, onset=False, nucleus=False, coda=True)<EOL><DEDENT>except ValueError:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return coda<EOL><DEDENT>", "docstring": "Picks only a coda from a Hangul letter.  It returns ``None`` if the\n    given letter is not Hangul.", "id": "f1755:m3"}
{"signature": "@cached_property<EOL><INDENT>def morphs(self):<DEDENT>", "body": "seen = set()<EOL>saw = seen.add<EOL>morphs = chain([self.morph1, self.morph2], self.tolerances)<EOL>unique_morphs = (x for x in morphs if x and not (x in seen or saw(x)))<EOL>return tuple(sorted(unique_morphs, key=len, reverse=True))<EOL>", "docstring": "The tuple containing the given morphs and all the possible tolerant\n        morphs.  Longer is first.", "id": "f1757:c0:m7"}
{"signature": "def __getitem__(self, key):", "body": "if isinstance(key, slice):<EOL><INDENT>word = key.start<EOL>morph = key.stop or self.morph1<EOL>tolerance_style = key.step or DEFAULT_TOLERANCE_STYLE<EOL><DEDENT>else:<EOL><INDENT>word, morph = key, self.morph1<EOL>tolerance_style = DEFAULT_TOLERANCE_STYLE<EOL><DEDENT>return self.allomorph(word, morph, tolerance_style)<EOL>", "docstring": "The syntax sugar to determine one of allomorphic morphs based on a\n        word::\n\n           eun = Particle(u'\uc740', u'\ub294')\n           assert eun[u'\ub098\uc624'] == u'\ub294'\n           assert eun[u'\ubaa8\ub9ac\uc548'] == u'\uc740'", "id": "f1757:c0:m5"}
{"signature": "def singleton_particle(*bases):", "body": "return with_metaclass(SingletonParticleMeta, SingletonParticle, *bases)<EOL>", "docstring": "Defines a singleton instance immediately when defining the class.  The\n    name of the class will refer the instance instead.", "id": "f1757:m0"}
{"signature": "def rule(self, coda):", "body": "if coda:<EOL><INDENT>return self.morph1<EOL><DEDENT>else:<EOL><INDENT>return self.morph2<EOL><DEDENT>", "docstring": "Determines one of allomorphic morphs based on a coda.", "id": "f1757:c0:m3"}
{"signature": "def _add_magic(self, data):", "body": "if self.magic:<EOL><INDENT>return self.magic + data<EOL><DEDENT>return data<EOL>", "docstring": "Add magic", "id": "f1760:c0:m27"}
{"signature": "@staticmethod<EOL><INDENT>def _aes_encrypt(data, algorithm, key):<DEDENT>", "body": "if algorithm['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>mode = AES.MODE_CBC<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>'<EOL>% algorithm['<STR_LIT>'])<EOL><DEDENT>iv_size = algorithm['<STR_LIT>']<EOL>block_size = iv_size<EOL>include_iv = True<EOL>if '<STR_LIT>'in algorithm and algorithm['<STR_LIT>']:<EOL><INDENT>if len(algorithm['<STR_LIT>']) != algorithm['<STR_LIT>']:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>iv_value = algorithm['<STR_LIT>']<EOL>include_iv = False<EOL><DEDENT>else:<EOL><INDENT>iv_value = get_random_bytes(iv_size)<EOL><DEDENT>numpad = block_size - (len(data) % block_size)<EOL>data = data + numpad * chr(numpad)<EOL>enc = AES.new(key, mode, iv_value).encrypt(data)<EOL>if include_iv:<EOL><INDENT>enc = iv_value + enc<EOL><DEDENT>return enc<EOL>", "docstring": "AES encrypt", "id": "f1760:c0:m38"}
{"signature": "def unseal(self, data, return_options=False):", "body": "data = self._remove_magic(data)<EOL>data = urlsafe_nopadding_b64decode(data)<EOL>options = self._read_header(data)<EOL>data = self._add_magic(data)<EOL>data = self._unsign_data(data, options)<EOL>data = self._remove_magic(data)<EOL>data = self._remove_header(data, options)<EOL>data = self._decrypt_data(data, options)<EOL>data = self._decompress_data(data, options)<EOL>data = self._unserialize_data(data, options)<EOL>if return_options:<EOL><INDENT>return data, options<EOL><DEDENT>else:<EOL><INDENT>return data<EOL><DEDENT>", "docstring": "Unseal data", "id": "f1760:c0:m13"}
{"signature": "def get_options(self):", "body": "return self.options<EOL>", "docstring": "Get options used for sealing", "id": "f1760:c0:m9"}
{"signature": "def _read_version(self, data):", "body": "version = ord(data[<NUM_LIT:0>])<EOL>if version not in self.VERSIONS:<EOL><INDENT>raise Exception('<STR_LIT>' % version)<EOL><DEDENT>return version<EOL>", "docstring": "Read header version from data", "id": "f1760:c0:m31"}
{"signature": "@staticmethod<EOL><INDENT>def _generate_key(pass_id, passphrases, salt, algorithm):<DEDENT>", "body": "if pass_id not in passphrases:<EOL><INDENT>raise Exception('<STR_LIT>' % pass_id)<EOL><DEDENT>passphrase = passphrases[pass_id]<EOL>if len(passphrase) < <NUM_LIT:32>:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>digestmod = EncryptedPickle._get_hashlib(algorithm['<STR_LIT>'])<EOL>encoder = PBKDF2(passphrase, salt,<EOL>iterations=algorithm['<STR_LIT>'],<EOL>digestmodule=digestmod)<EOL>return encoder.read(algorithm['<STR_LIT>'])<EOL>", "docstring": "Generate and return PBKDF2 key", "id": "f1760:c0:m34"}
{"signature": "def _unsign_data(self, data, options):", "body": "if options['<STR_LIT>'] not in self.signature_algorithms:<EOL><INDENT>raise Exception('<STR_LIT>'<EOL>% options['<STR_LIT>'])<EOL><DEDENT>signature_algorithm =self.signature_algorithms[options['<STR_LIT>']]<EOL>algorithm = self._get_algorithm_info(signature_algorithm)<EOL>key_salt = '<STR_LIT>'<EOL>if algorithm['<STR_LIT>']:<EOL><INDENT>key_salt = data[-algorithm['<STR_LIT>']:]<EOL>data = data[:-algorithm['<STR_LIT>']]<EOL><DEDENT>key = self._generate_key(options['<STR_LIT>'],<EOL>self.signature_passphrases, key_salt, algorithm)<EOL>data = self._decode(data, algorithm, key)<EOL>return data<EOL>", "docstring": "Verify and remove signature", "id": "f1760:c0:m19"}
{"signature": "def verify_signature(self, data):", "body": "data = self._remove_magic(data)<EOL>data = urlsafe_nopadding_b64decode(data)<EOL>options = self._read_header(data)<EOL>data = self._add_magic(data)<EOL>self._unsign_data(data, options)<EOL>", "docstring": "Verify sealed data signature", "id": "f1760:c0:m14"}
{"signature": "def set_magic(self, magic):", "body": "if magic is None or isinstance(magic, str):<EOL><INDENT>self.magic = magic<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Set magic (prefix)", "id": "f1760:c0:m10"}
{"signature": "def seal(self, data, options=None):", "body": "options = self._set_options(options)<EOL>data = self._serialize_data(data, options)<EOL>data = self._compress_data(data, options)<EOL>data = self._encrypt_data(data, options)<EOL>data = self._add_header(data, options)<EOL>data = self._add_magic(data)<EOL>data = self._sign_data(data, options)<EOL>data = self._remove_magic(data)<EOL>data = urlsafe_nopadding_b64encode(data)<EOL>data = self._add_magic(data)<EOL>return data<EOL>", "docstring": "Seal data", "id": "f1760:c0:m12"}
{"signature": "def _decode(self, data, algorithm, key=None):", "body": "if algorithm['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>verify_signature = data[-algorithm['<STR_LIT>']:]<EOL>data = data[:-algorithm['<STR_LIT>']]<EOL>signature = self._hmac_generate(data, algorithm, key)<EOL>if not const_equal(verify_signature, signature):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return data<EOL><DEDENT>elif algorithm['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>return self._aes_decrypt(data, algorithm, key)<EOL><DEDENT>elif algorithm['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>return data<EOL><DEDENT>elif algorithm['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>return json.loads(data)<EOL><DEDENT>elif algorithm['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>return data<EOL><DEDENT>elif algorithm['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>return self._zlib_decompress(data, algorithm)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>' % algorithm['<STR_LIT:type>'])<EOL><DEDENT>", "docstring": "Decode data with specific algorithm", "id": "f1760:c0:m17"}
{"signature": "def _decrypt_data(self, data, options):", "body": "if options['<STR_LIT>'] not in self.encryption_algorithms:<EOL><INDENT>raise Exception('<STR_LIT>'<EOL>% options['<STR_LIT>'])<EOL><DEDENT>encryption_algorithm =self.encryption_algorithms[options['<STR_LIT>']]<EOL>algorithm = self._get_algorithm_info(encryption_algorithm)<EOL>key_salt = '<STR_LIT>'<EOL>if algorithm['<STR_LIT>']:<EOL><INDENT>key_salt = data[-algorithm['<STR_LIT>']:]<EOL>data = data[:-algorithm['<STR_LIT>']]<EOL><DEDENT>key = self._generate_key(options['<STR_LIT>'],<EOL>self.encryption_passphrases, key_salt, algorithm)<EOL>data = self._decode(data, algorithm, key)<EOL>return data<EOL>", "docstring": "Decrypt data", "id": "f1760:c0:m21"}
{"signature": "def _set_options(self, options):", "body": "if not options:<EOL><INDENT>return self.options.copy()<EOL><DEDENT>options = options.copy()<EOL>if '<STR_LIT>' in options:<EOL><INDENT>self.set_magic(options['<STR_LIT>'])<EOL>del(options['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in options:<EOL><INDENT>flags = options['<STR_LIT>']<EOL>del(options['<STR_LIT>'])<EOL>for key, value in flags.iteritems():<EOL><INDENT>if not isinstance(value, bool):<EOL><INDENT>raise TypeError('<STR_LIT>' % key)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>flags = self.options['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT:info>' in options:<EOL><INDENT>del(options['<STR_LIT:info>'])<EOL><DEDENT>for key, value in options.iteritems():<EOL><INDENT>if not isinstance(value, int):<EOL><INDENT>raise TypeError('<STR_LIT>' % key)<EOL><DEDENT>if value < <NUM_LIT:0> or value > <NUM_LIT:255>:<EOL><INDENT>raise ValueError('<STR_LIT>' % key)<EOL><DEDENT><DEDENT>new_options = self.options.copy()<EOL>new_options.update(options)<EOL>new_options['<STR_LIT>'].update(flags)<EOL>return new_options<EOL>", "docstring": "Private function for setting options used for sealing", "id": "f1760:c0:m8"}
{"signature": "@staticmethod<EOL><INDENT>def _zlib_compress(data, algorithm):<DEDENT>", "body": "if algorithm['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>encoder = zlib.compressobj(algorithm['<STR_LIT>'], zlib.DEFLATED, -<NUM_LIT:15>)<EOL>compressed = encoder.compress(data)<EOL>compressed += encoder.flush()<EOL>return compressed<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>'<EOL>% algorithm['<STR_LIT>'])<EOL><DEDENT>", "docstring": "GZIP compress", "id": "f1760:c0:m40"}
{"signature": "def set_algorithms(self, signature=None, encryption=None,<EOL>serialization=None, compression=None):", "body": "self.signature_algorithms =self._update_dict(signature, self.DEFAULT_SIGNATURE)<EOL>self.encryption_algorithms =self._update_dict(encryption, self.DEFAULT_ENCRYPTION)<EOL>self.serialization_algorithms =self._update_dict(serialization, self.DEFAULT_SERIALIZATION)<EOL>self.compression_algorithms =self._update_dict(compression, self.DEFAULT_COMPRESSION)<EOL>", "docstring": "Set algorithms used for sealing. Defaults can not be overridden.", "id": "f1760:c0:m5"}
{"signature": "def threadFunc(root):", "body": "<EOL>th = threading.currentThread()<EOL>auto.Logger.WriteLine('<STR_LIT>'.format(th.ident, th.name), auto.ConsoleColor.Cyan)<EOL>time.sleep(<NUM_LIT:2>)<EOL>auto.InitializeUIAutomationInCurrentThread()<EOL>auto.GetConsoleWindow().CaptureToImage('<STR_LIT>')<EOL>newRoot = auto.GetRootControl()    <EOL>auto.EnumAndLogControl(newRoot, <NUM_LIT:1>)<EOL>auto.UninitializeUIAutomationInCurrentThread()<EOL>auto.Logger.WriteLine('<STR_LIT>'.format(th.ident, th.name), auto.ConsoleColor.Cyan)<EOL>", "docstring": "If you want to use functionalities related to Controls and Patterns in a new thread.\nYou must call InitializeUIAutomationInCurrentThread first in the thread\n    and call UninitializeUIAutomationInCurrentThread when the thread exits.\nBut you can't use use a Control or a Pattern created in a different thread.\nSo you can't create a Control or a Pattern in main thread and then pass it to a new thread and use it.", "id": "f1777:m0"}
{"signature": "@property<EOL><INDENT>def RowSpan(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentRowSpan<EOL>", "docstring": "Property RowSpan.\nCall IUIAutomationGridItemPattern::get_CurrentRowSpan.\nReturn int, the number of rows spanned by the grid item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgriditempattern-get_currentrowspan", "id": "f1782:c49:m5"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationvaluepattern", "id": "f1782:c75:m0"}
{"signature": "def RightClick(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "SetCursorPos(x, y)<EOL>screenWidth, screenHeight = GetScreenSize()<EOL>mouse_event(MouseEventFlag.RightDown | MouseEventFlag.Absolute, x * <NUM_LIT> // screenWidth, y * <NUM_LIT> // screenHeight, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>time.sleep(<NUM_LIT>)<EOL>mouse_event(MouseEventFlag.RightUp | MouseEventFlag.Absolute, x * <NUM_LIT> // screenWidth, y * <NUM_LIT> // screenHeight, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>time.sleep(waitTime)<EOL>", "docstring": "Simulate mouse right click at point x, y.\nx: int.\ny: int.\nwaitTime: float.", "id": "f1782:m14"}
{"signature": "@property<EOL><INDENT>def CurrentView(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentCurrentView<EOL>", "docstring": "Property CurrentView.\nCall IUIAutomationMultipleViewPattern::get_CurrentCurrentView.\nReturn int, the control-specific identifier of the current view of the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationmultipleviewpattern-get_currentcurrentview", "id": "f1782:c54:m1"}
{"signature": "def SetValue(self, value: str, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.SetValue(value) == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationTransformPattern2::IUIAutomationValuePattern::SetValue.\nSet the value of the element.\nvalue: str.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationvaluepattern-setvalue", "id": "f1782:c75:m3"}
{"signature": "@property<EOL><INDENT>def Orientation(self) -> int:<DEDENT>", "body": "return self.Element.CurrentOrientation<EOL>", "docstring": "Property Orientation.\nReturn int, a value in class `OrientationType`.\nCall IUIAutomationElement::get_CurrentOrientation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentorientation", "id": "f1782:c78:m35"}
{"signature": "def SetCursorPos(x: int, y: int) -> bool:", "body": "return bool(ctypes.windll.user32.SetCursorPos(x, y))<EOL>", "docstring": "SetCursorPos from Win32.\nSet mouse cursor to point x, y.\nx: int.\ny: int.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m6"}
{"signature": "def RightClick(self, x: int = None, y: int = None, ratioX: float = <NUM_LIT:0.5>, ratioY: float = <NUM_LIT:0.5>, simulateMove: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "point = self.MoveCursorToInnerPos(x, y, ratioX, ratioY, simulateMove)<EOL>if point:<EOL><INDENT>RightClick(point[<NUM_LIT:0>], point[<NUM_LIT:1>], waitTime)<EOL><DEDENT>", "docstring": "x: int, if < 0, right click self.BoundingRectangle.right + x, if not None, ignore ratioX.\ny: int, if < 0, right click self.BoundingRectangle.bottom + y, if not None, ignore ratioY.\nratioX: float.\nratioY: float.\nsimulateMove: bool, if True, first move cursor to control smoothly.\nwaitTime: float.\n\nRightClick(), RightClick(ratioX=0.5, ratioY=0.5): right click center.\nRightClick(10, 10): right click left+10, top+10.\nRightClick(-10, -10): right click right-10, bottom-10.", "id": "f1782:c78:m65"}
{"signature": "def FromControl(self, control: '<STR_LIT>', x: int = <NUM_LIT:0>, y: int = <NUM_LIT:0>, width: int = <NUM_LIT:0>, height: int = <NUM_LIT:0>) -> bool:", "body": "rect = control.BoundingRectangle<EOL>while rect.width() == <NUM_LIT:0> or rect.height() == <NUM_LIT:0>:<EOL><INDENT>control = control.GetParentControl()<EOL>if not control:<EOL><INDENT>return False<EOL><DEDENT>rect = control.BoundingRectangle<EOL><DEDENT>if width <= <NUM_LIT:0>:<EOL><INDENT>width = rect.width() + width<EOL><DEDENT>if height <= <NUM_LIT:0>:<EOL><INDENT>height = rect.height() + height<EOL><DEDENT>handle = control.NativeWindowHandle<EOL>if handle:<EOL><INDENT>left = x<EOL>top = y<EOL>right = left + width<EOL>bottom = top + height<EOL><DEDENT>else:<EOL><INDENT>while True:<EOL><INDENT>control = control.GetParentControl()<EOL>handle = control.NativeWindowHandle<EOL>if handle:<EOL><INDENT>pRect = control.BoundingRectangle<EOL>left = rect.left - pRect.left + x<EOL>top = rect.top - pRect.top + y<EOL>right = left + width<EOL>bottom = top + height<EOL>break<EOL><DEDENT><DEDENT><DEDENT>return self.FromHandle(handle, left, top, right, bottom)<EOL>", "docstring": "Capture a control to Bitmap.\ncontrol: `Control` or its subclass.\nx: int.\ny: int.\nwidth: int.\nheight: int.\nx, y: the point in control's internal position(from 0,0)\nwidth, height: image's width and height from x, y, use 0 for entire area,\nIf width(or height) < 0, image size will be control's width(or height) - width(or height).\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m7"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtableitempattern", "id": "f1782:c65:m0"}
{"signature": "def GetGridItemPattern(self) -> GridItemPattern:", "body": "return self.GetPattern(PatternId.GridItemPattern)<EOL>", "docstring": "Return `GridItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c95:m3"}
{"signature": "def GetPixelColor(x: int, y: int, handle: int = <NUM_LIT:0>) -> int:", "body": "hdc = ctypes.windll.user32.GetWindowDC(ctypes.c_void_p(handle))<EOL>bgr = ctypes.windll.gdi32.GetPixel(hdc, x, y)<EOL>ctypes.windll.user32.ReleaseDC(ctypes.c_void_p(handle), hdc)<EOL>return bgr<EOL>", "docstring": "Get pixel color of a native window.\nx: int.\ny: int.\nhandle: int, the handle of a native window.\nReturn int, the bgr value of point (x,y).\nr = bgr & 0x0000FF\ng = (bgr & 0x00FF00) >> 8\nb = (bgr & 0xFF0000) >> 16\nIf handle is 0, get pixel from Desktop window(root control).\nNote:\nNot all devices support GetPixel.\nAn application should call GetDeviceCaps to determine whether a specified device supports this function.\nFor example, console window doesn't support.", "id": "f1782:m25"}
{"signature": "def EnumAndLogControl(control: Control, maxDepth: int = <NUM_LIT>, showAllName: bool = True, startDepth: int = <NUM_LIT:0>) -> None:", "body": "for c, d in WalkControl(control, True, maxDepth):<EOL><INDENT>LogControl(c, d + startDepth, showAllName)<EOL><DEDENT>", "docstring": "Print and log control and its descendants' propertyies.\ncontrol: `Control` or its subclass.\nmaxDepth: int, enum depth.\nshowAllName: bool, if False, print the first 30 characters of control.Name.\nstartDepth: int, control's current depth.", "id": "f1782:m83"}
{"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)<EOL>", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c83:m1"}
{"signature": "def SetTopmost(self, isTopmost: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "if self.IsTopLevel():<EOL><INDENT>ret = SetWindowTopmost(self.NativeWindowHandle, isTopmost)<EOL>time.sleep(waitTime)<EOL>return ret<EOL><DEDENT>return False<EOL>", "docstring": "Set top level window topmost.\nisTopmost: bool.\nwaitTime: float.", "id": "f1782:c99:m0"}
{"signature": "def Toggle(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Toggle() == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationTogglePattern::Toggle.\nCycle through the toggle states of the control.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtogglepattern-toggle", "id": "f1782:c72:m2"}
{"signature": "@property<EOL><INDENT>def Role(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentRole<EOL>", "docstring": "Property Role.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentRole.\nReturn int, a value in calss `AccessibleRole`, the Microsoft Active Accessibility role identifier.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentrole", "id": "f1782:c53:m7"}
{"signature": "def GetParentControl(self) -> '<STR_LIT>':", "body": "ele = _AutomationClient.instance().ViewWalker.GetParentElement(self.Element)<EOL>return Control.CreateControlFromElement(ele)<EOL>", "docstring": "Return `Control` subclass or None.", "id": "f1782:c78:m50"}
{"signature": "def Navigate(self, direction: int) -> '<STR_LIT>':", "body": "ele = self.pattern.Navigate(direction)<EOL>return Control.CreateControlFromElement(ele)<EOL>", "docstring": "Call IUIAutomationCustomNavigationPattern::Navigate.\nGet the next control in the specified direction within the logical UI tree.\ndirection: int, a value in class `NavigateDirection`.\nReturn `Control` subclass or None.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationcustomnavigationpattern-navigate", "id": "f1782:c44:m1"}
{"signature": "def RemoveFromSelection(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.textRange.RemoveFromSelection() == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationTextRange::RemoveFromSelection.\nRemove the text range from an existing collection of selected text in a text container that supports multiple, disjoint selections.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-removefromselection", "id": "f1782:c67:m16"}
{"signature": "@property<EOL><INDENT>def Help(self) -> str:<DEDENT>", "body": "return self.pattern.CurrentHelp<EOL>", "docstring": "Property Help.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentHelp.\nReturn str, the Microsoft Active Accessibility help string for the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currenthelp", "id": "f1782:c53:m4"}
{"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)<EOL>", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c118:m2"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationdockpattern", "id": "f1782:c45:m0"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtextchildpattern", "id": "f1782:c68:m0"}
{"signature": "@property<EOL><INDENT>def IsReadOnly(self) -> bool:<DEDENT>", "body": "return self.pattern.CurrentIsReadOnly<EOL>", "docstring": "Property IsReadOnly.\nCall IUIAutomationRangeValuePattern::get_CurrentIsReadOnly.\nReturn bool, indicates whether the value of the element can be changed.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentisreadonly", "id": "f1782:c56:m1"}
{"signature": "@property<EOL><INDENT>def ZoomMinimum(self) -> float:<DEDENT>", "body": "return self.pattern.CurrentZoomMinimum<EOL>", "docstring": "Property ZoomMinimum.\nCall IUIAutomationTransformPattern2::get_CurrentZoomMinimum.\nReturn float, the minimum zoom level of the control's viewport.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-get_currentzoomminimum", "id": "f1782:c74:m4"}
{"signature": "def GetTablePattern(self) -> TablePattern:", "body": "return self.GetPattern(PatternId.TablePattern)<EOL>", "docstring": "Return `TablePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c85:m4"}
{"signature": "def FromFile(self, filePath: str) -> bool:", "body": "self.Release()<EOL>self._bitmap = _DllClient.instance().dll.BitmapFromFile(ctypes.c_wchar_p(filePath))<EOL>self._getsize()<EOL>return self._bitmap > <NUM_LIT:0><EOL>", "docstring": "Load image from a file.\nfilePath: str.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m8"}
{"signature": "def ToFile(self, savePath: str) -> bool:", "body": "name, ext = os.path.splitext(savePath)<EOL>extMap = {'<STR_LIT>': '<STR_LIT>'<EOL>, '<STR_LIT>': '<STR_LIT>'<EOL>, '<STR_LIT>': '<STR_LIT>'<EOL>, '<STR_LIT>': '<STR_LIT>'<EOL>, '<STR_LIT>': '<STR_LIT>'<EOL>, '<STR_LIT>': '<STR_LIT>'<EOL>, '<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>gdiplusImageFormat = extMap.get(ext.lower(), '<STR_LIT>')<EOL>return bool(_DllClient.instance().dll.BitmapToFile(self._bitmap, ctypes.c_wchar_p(savePath), ctypes.c_wchar_p(gdiplusImageFormat)))<EOL>", "docstring": "Save to a file.\nsavePath: str, should end with .bmp, .jpg, .jpeg, .png, .gif, .tif, .tiff.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m9"}
{"signature": "def GetSubBitmap(self, x: int, y: int, width: int, height: int) -> '<STR_LIT>':", "body": "colors = self.GetPixelColorsOfRect(x, y, width, height)<EOL>bitmap = Bitmap(width, height)<EOL>bitmap.SetPixelColorsOfRect(<NUM_LIT:0>, <NUM_LIT:0>, width, height, colors)<EOL>return bitmap<EOL>", "docstring": "x: int.\ny: int.\nwidth: int.\nheight: int.\nReturn `Bitmap`, a sub bitmap of the input rect.", "id": "f1782:c42:m22"}
{"signature": "def SetFocus(self) -> bool:", "body": "return self.Element.SetFocus() == S_OK<EOL>", "docstring": "Call IUIAutomationElement::SetFocus.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-setfocus", "id": "f1782:c78:m44"}
{"signature": "@property<EOL><INDENT>def WindowInteractionState(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentWindowInteractionState<EOL>", "docstring": "Property WindowInteractionState.\nCall IUIAutomationWindowPattern::get_CurrentWindowInteractionState.\nReturn int, a value in class `WindowInteractionState`,\n            the current state of the window for the purposes of user interaction.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentwindowinteractionstate", "id": "f1782:c77:m6"}
{"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)<EOL>", "docstring": "Return `InvokePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c108:m2"}
{"signature": "def SetGlobalSearchTimeout(seconds: float) -> None:", "body": "global TIME_OUT_SECOND<EOL>TIME_OUT_SECOND = seconds<EOL>", "docstring": "seconds: float.\nTo make this available, you need explicitly import uiautomation:\n    from uiautomation import uiautomation as auto\n    auto.SetGlobalSearchTimeout(10)", "id": "f1782:m67"}
{"signature": "@property<EOL><INDENT>def ChildId(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentChildId<EOL>", "docstring": "Property ChildId.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentChildId.\nReturn int, the Microsoft Active Accessibility child identifier for the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentchildid", "id": "f1782:c53:m1"}
{"signature": "def SetWindowVisualState(self, state: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.SetWindowVisualState(state) == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationWindowPattern::SetWindowVisualState.\nMinimize, maximize, or restore the window.\nstate: int, a value in class `WindowVisualState`.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-setwindowvisualstate", "id": "f1782:c77:m8"}
{"signature": "def SetValue(self, value: str, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.SetValue(value) == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationLegacyIAccessiblePattern::SetValue.\nSet the Microsoft Active Accessibility value property for the element.\nvalue: str.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-setvalue", "id": "f1782:c53:m14"}
{"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)<EOL>", "docstring": "Return `InvokePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c95:m4"}
{"signature": "@property<EOL><INDENT>def ExtendedProperties(self) -> str:<DEDENT>", "body": "return self.pattern.CurrentExtendedProperties<EOL>", "docstring": "Property ExtendedProperties.\nCall IUIAutomationStylesPattern::get_CurrentExtendedProperties.\nReturn str, a localized string that contains the list of extended properties for an element in a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationstylespattern-get_currentextendedproperties", "id": "f1782:c63:m1"}
{"signature": "@property<EOL><INDENT>def DocumentRange(self) -> TextRange:<DEDENT>", "body": "return TextRange(self.pattern.DocumentRange)<EOL>", "docstring": "Property DocumentRange.\nCall IUIAutomationTextPattern::get_DocumentRange.\nReturn `TextRange`, a text range that encloses the main text of a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-get_documentrange", "id": "f1782:c70:m1"}
{"signature": "def WalkControl(control: Control, includeTop: bool = False, maxDepth: int = <NUM_LIT>):", "body": "if includeTop:<EOL><INDENT>yield control, <NUM_LIT:0><EOL><DEDENT>if maxDepth <= <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>depth = <NUM_LIT:0><EOL>child = control.GetFirstChildControl()<EOL>controlList = [child]<EOL>while depth >= <NUM_LIT:0>:<EOL><INDENT>lastControl = controlList[-<NUM_LIT:1>]<EOL>if lastControl:<EOL><INDENT>yield lastControl, depth + <NUM_LIT:1><EOL>child = lastControl.GetNextSiblingControl()<EOL>controlList[depth] = child<EOL>if depth + <NUM_LIT:1> < maxDepth:<EOL><INDENT>child = lastControl.GetFirstChildControl()<EOL>if child:<EOL><INDENT>depth += <NUM_LIT:1><EOL>controlList.append(child)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>del controlList[depth]<EOL>depth -= <NUM_LIT:1><EOL><DEDENT><DEDENT>", "docstring": "control: `Control` or its subclass.\nincludeTop: bool, if True, yield (control, 0) first.\nmaxDepth: int, enum depth.\nYield 2 items tuple(control: Control, depth: int).", "id": "f1782:m81"}
{"signature": "def GetPixelColorsOfRects(self, rects: list) -> list:", "body": "rects2 = [(x, y, x + width, y + height) for x, y, width, height in rects]<EOL>left, top, right, bottom = zip(*rects2)<EOL>left, top, right, bottom = min(left), min(top), max(right), max(bottom)<EOL>width, height = right - left, bottom - top<EOL>allColors = self.GetPixelColorsOfRect(left, top, width, height)<EOL>colorsOfRects = []<EOL>for x, y, w, h in rects:<EOL><INDENT>x -= left<EOL>y -= top<EOL>colors = []<EOL>for row in range(h):<EOL><INDENT>colors.extend(allColors[(y + row) * width + x:(y + row) * width + x + w])<EOL><DEDENT>colorsOfRects.append(colors)<EOL><DEDENT>return colorsOfRects<EOL>", "docstring": "rects: a list of rects, such as [(0,0,10,10), (10,10,20,20),(x,y,width,height)].\nReturn list, a list whose elements are ctypes.Array which is an iterable array of int values in argb.", "id": "f1782:c42:m20"}
{"signature": "def GetScrollItemPattern(self) -> ScrollItemPattern:", "body": "return self.GetPattern(PatternId.ScrollItemPattern)<EOL>", "docstring": "Return `ScrollItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c119:m3"}
{"signature": "@property<EOL><INDENT>def AutomationId(self) -> str:<DEDENT>", "body": "return self.Element.CurrentAutomationId<EOL>", "docstring": "Property AutomationId.\nCall IUIAutomationElement::get_CurrentAutomationId.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentautomationid", "id": "f1782:c78:m14"}
{"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)<EOL>", "docstring": "Return `TogglePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c95:m6"}
{"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)<EOL>", "docstring": "Return `InvokePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c80:m2"}
{"signature": "def GetGridPattern(self) -> GridPattern:", "body": "return self.GetPattern(PatternId.GridPattern)<EOL>", "docstring": "Return `GridPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c109:m1"}
{"signature": "def GetPixelColorsHorizontally(self, x: int, y: int, count: int) -> ctypes.Array:", "body": "arrayType = ctypes.c_uint32 * count<EOL>values = arrayType()<EOL>_DllClient.instance().dll.BitmapGetPixelsHorizontally(ctypes.c_size_t(self._bitmap), x, y, values, count)<EOL>return values<EOL>", "docstring": "x: int.\ny: int.\ncount: int.\nReturn `ctypes.Array`, an iterable array of int values in argb form point x,y horizontally.", "id": "f1782:c42:m12"}
{"signature": "def GetSelectionItemPattern(self) -> SelectionItemPattern:", "body": "return self.GetPattern(PatternId.SelectionItemPattern)<EOL>", "docstring": "Return `SelectionItemPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c102:m1"}
{"signature": "@property<EOL><INDENT>def SupportedTextSelection(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.SupportedTextSelection)<EOL>", "docstring": "Property SupportedTextSelection.\nCall IUIAutomationTextPattern::get_SupportedTextSelection.\nReturn bool, specifies the type of text selection that is supported by the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-get_supportedtextselection", "id": "f1782:c70:m2"}
{"signature": "def Hide(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "return self.ShowWindow(SW.Hide, waitTime)<EOL>", "docstring": "Call native `ShowWindow(SW.Hide)`.\nwaitTime: float\nReturn bool, True if succeed otherwise False.", "id": "f1782:c78:m71"}
{"signature": "def GetDockPattern(self) -> DockPattern:", "body": "return self.GetPattern(PatternId.DockPattern)<EOL>", "docstring": "Return `DockPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c100:m1"}
{"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)<EOL>", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c97:m2"}
{"signature": "def __init__(self, textRange=None):", "body": "self.textRange = textRange<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtextrange", "id": "f1782:c67:m0"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtablepattern", "id": "f1782:c66:m0"}
{"signature": "def FindText(self, text: str, backward: bool, ignoreCase: bool) -> '<STR_LIT>':", "body": "textRange = self.textRange.FindText(text, int(backward), int(ignoreCase))<EOL>if textRange:<EOL><INDENT>return TextRange(textRange=textRange)<EOL><DEDENT>", "docstring": "Call IUIAutomationTextRange::FindText.\ntext: str,\nbackward: bool, True if the last occurring text range should be returned instead of the first; otherwise False.\nignoreCase: bool, True if case should be ignored; otherwise False.\nreturn `TextRange` or None, a text range subset that contains the specified text.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-findtext", "id": "f1782:c67:m7"}
{"signature": "def GetWindowPattern(self) -> WindowPattern:", "body": "return self.GetPattern(PatternId.WindowPattern)<EOL>", "docstring": "Return `WindowPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c117:m2"}
{"signature": "def ToBitmap(self, x: int = <NUM_LIT:0>, y: int = <NUM_LIT:0>, width: int = <NUM_LIT:0>, height: int = <NUM_LIT:0>) -> Bitmap:", "body": "bitmap = Bitmap()<EOL>bitmap.FromControl(self, x, y, width, height)<EOL>return bitmap<EOL>", "docstring": "Capture control to a Bitmap object.\nx, y: int, the point in control's internal position(from 0,0).\nwidth, height: int, image's width and height from x, y, use 0 for entire area.\n               If width(or height) < 0, image size will be control's width(or height) - width(or height).", "id": "f1782:c78:m78"}
{"signature": "def __init__(self, width: int = <NUM_LIT:0>, height: int = <NUM_LIT:0>):", "body": "self._width = width<EOL>self._height = height<EOL>self._bitmap = <NUM_LIT:0><EOL>if width > <NUM_LIT:0> and height > <NUM_LIT:0>:<EOL><INDENT>self._bitmap = _DllClient.instance().dll.BitmapCreate(width, height)<EOL><DEDENT>", "docstring": "Create a black bimap of size(width, height).", "id": "f1782:c42:m0"}
{"signature": "def keybd_event(bVk: int, bScan: int, dwFlags: int, dwExtraInfo: int) -> None:", "body": "ctypes.windll.user32.keybd_event(bVk, bScan, dwFlags, dwExtraInfo)<EOL>", "docstring": "keybd_event from Win32.", "id": "f1782:m9"}
{"signature": "def Select(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Select() == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationSelectionItemPattern::Select.\nClear any selected items and then select the current element.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionitempattern-select", "id": "f1782:c59:m5"}
{"signature": "def GetFirstChildControl(self) -> '<STR_LIT>':", "body": "ele = _AutomationClient.instance().ViewWalker.GetFirstChildElement(self.Element)<EOL>return Control.CreateControlFromElement(ele)<EOL>", "docstring": "Return `Control` subclass or None.", "id": "f1782:c78:m51"}
{"signature": "def Click(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "SetCursorPos(x, y)<EOL>screenWidth, screenHeight = GetScreenSize()<EOL>mouse_event(MouseEventFlag.LeftDown | MouseEventFlag.Absolute, x * <NUM_LIT> // screenWidth, y * <NUM_LIT> // screenHeight, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>time.sleep(<NUM_LIT>)<EOL>mouse_event(MouseEventFlag.LeftUp | MouseEventFlag.Absolute, x * <NUM_LIT> // screenWidth, y * <NUM_LIT> // screenHeight, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>time.sleep(waitTime)<EOL>", "docstring": "Simulate mouse click at point x, y.\nx: int.\ny: int.\nwaitTime: float.", "id": "f1782:m12"}
{"signature": "def FindAttribute(self, textAttributeId: int, val, backward: bool) -> '<STR_LIT>':", "body": "textRange = self.textRange.FindAttribute(textAttributeId, val, int(backward))<EOL>if textRange:<EOL><INDENT>return TextRange(textRange=textRange)<EOL><DEDENT>", "docstring": "Call IUIAutomationTextRange::FindAttribute.\ntextAttributeID: int, a value in class `TextAttributeId`.\nval: COM VARIANT according to textAttributeId? todo.\nbackward: bool, True if the last occurring text range should be returned instead of the first; otherwise False.\nreturn `TextRange` or None, a text range subset that has the specified text attribute value.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-findattribute", "id": "f1782:c67:m6"}
{"signature": "def MoveToCenter(self) -> bool:", "body": "if self.IsTopLevel():<EOL><INDENT>rect = self.BoundingRectangle<EOL>screenWidth, screenHeight = GetScreenSize()<EOL>x, y = (screenWidth - rect.width()) // <NUM_LIT:2>, (screenHeight - rect.height()) // <NUM_LIT:2><EOL>if x < <NUM_LIT:0>: x = <NUM_LIT:0><EOL>if y < <NUM_LIT:0>: y = <NUM_LIT:0><EOL>return SetWindowPos(self.NativeWindowHandle, SWP.HWND_Top, x, y, <NUM_LIT:0>, <NUM_LIT:0>, SWP.SWP_NoSize)<EOL><DEDENT>return False<EOL>", "docstring": "Move window to screen center.", "id": "f1782:c99:m8"}
{"signature": "def Disappears(self, maxSearchSeconds: float = <NUM_LIT:5>, searchIntervalSeconds: float = SEARCH_INTERVAL, printIfNotDisappear: bool = False) -> bool:", "body": "global DEBUG_EXIST_DISAPPEAR<EOL>start = ProcessTime()<EOL>while True:<EOL><INDENT>temp = DEBUG_EXIST_DISAPPEAR<EOL>DEBUG_EXIST_DISAPPEAR = False  <EOL>if not self.Exists(<NUM_LIT:0>, <NUM_LIT:0>, False):<EOL><INDENT>DEBUG_EXIST_DISAPPEAR = temp<EOL>return True<EOL><DEDENT>DEBUG_EXIST_DISAPPEAR = temp<EOL>remain = start + maxSearchSeconds - ProcessTime()<EOL>if remain > <NUM_LIT:0>:<EOL><INDENT>time.sleep(min(remain, searchIntervalSeconds))<EOL><DEDENT>else:<EOL><INDENT>if printIfNotDisappear or DEBUG_EXIST_DISAPPEAR:<EOL><INDENT>Logger.ColorfullyWriteLine(self.GetColorfulSearchPropertiesStr() + '<STR_LIT>')<EOL><DEDENT>return False<EOL><DEDENT><DEDENT>", "docstring": "maxSearchSeconds: float\nsearchIntervalSeconds: float\nCheck if control disappears every searchIntervalSeconds seconds in maxSearchSeconds seconds.\nReturn bool, True if control disappears.", "id": "f1782:c78:m59"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationitemcontainerpattern", "id": "f1782:c52:m0"}
{"signature": "def WaitForInputIdle(self, milliseconds: int) -> bool:", "body": "return self.pattern.WaitForInputIdle(milliseconds) == S_OK<EOL>", "docstring": "Call IUIAutomationWindowPattern::WaitForInputIdle.\nCause the calling code to block for the specified time or\n    until the associated process enters an idle state, whichever completes first.\nmilliseconds: int.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-waitforinputidle", "id": "f1782:c77:m9"}
{"signature": "@property<EOL><INDENT>def Author(self) -> str:<DEDENT>", "body": "return self.pattern.CurrentAuthor<EOL>", "docstring": "Property Author.\nCall IUIAutomationAnnotationPattern::get_CurrentAuthor.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationannotationpattern-get_currentauthor", "id": "f1782:c43:m3"}
{"signature": "@property<EOL><INDENT>def LocalizedControlType(self) -> str:<DEDENT>", "body": "return self.Element.CurrentLocalizedControlType<EOL>", "docstring": "Property LocalizedControlType.\nCall IUIAutomationElement::get_CurrentLocalizedControlType.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentlocalizedcontroltype", "id": "f1782:c78:m32"}
{"signature": "def GetRootControl() -> PaneControl:", "body": "return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.GetRootElement())<EOL>", "docstring": "Get root control, the Desktop window.\nReturn `PaneControl`.", "id": "f1782:m71"}
{"signature": "def WindowFromPoint(x: int, y: int) -> int:", "body": "return ctypes.windll.user32.WindowFromPoint(ctypes.wintypes.POINT(x, y))<EOL>", "docstring": "WindowFromPoint from Win32.\nReturn int, a native window handle.", "id": "f1782:m4"}
{"signature": "@property<EOL><INDENT>def HelpText(self) -> str:<DEDENT>", "body": "return self.Element.CurrentHelpText<EOL>", "docstring": "Property HelpText.\nCall IUIAutomationElement::get_CurrentHelpText.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currenthelptext", "id": "f1782:c78:m21"}
{"signature": "@property<EOL><INDENT>def ProcessId(self) -> int:<DEDENT>", "body": "return self.Element.CurrentProcessId<EOL>", "docstring": "Property ProcessId.\nCall IUIAutomationElement::get_CurrentProcessId.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentprocessid", "id": "f1782:c78:m36"}
{"signature": "def GetConsoleWindow() -> WindowControl:", "body": "return ControlFromHandle(ctypes.windll.kernel32.GetConsoleWindow())<EOL>", "docstring": "Return `WindowControl`, a console window that runs python.", "id": "f1782:m74"}
{"signature": "@staticmethod<EOL><INDENT>def ColorfullyWrite(log: str, consoleColor: int = -<NUM_LIT:1>, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:<DEDENT>", "body": "text = []<EOL>start = <NUM_LIT:0><EOL>while True:<EOL><INDENT>index1 = log.find('<STR_LIT>', start)<EOL>if index1 >= <NUM_LIT:0>:<EOL><INDENT>if index1 > start:<EOL><INDENT>text.append((log[start:index1], consoleColor))<EOL><DEDENT>index2 = log.find('<STR_LIT:>>', index1)<EOL>colorName = log[index1+<NUM_LIT:7>:index2]<EOL>index3 = log.find('<STR_LIT>', index2 + <NUM_LIT:1>)<EOL>text.append((log[index2 + <NUM_LIT:1>:index3], Logger.ColorNames[colorName]))<EOL>start = index3 + <NUM_LIT:8><EOL><DEDENT>else:<EOL><INDENT>if start < len(log):<EOL><INDENT>text.append((log[start:], consoleColor))<EOL><DEDENT>break<EOL><DEDENT><DEDENT>for t, c in text:<EOL><INDENT>Logger.Write(t, c, writeToFile, printToStdout, logFile)<EOL><DEDENT>", "docstring": "log: str.\nconsoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.\nColorfullyWrite('Hello <Color=Green>Green</Color> !!!'), color name must be in Logger.ColorNames.", "id": "f1782:c41:m3"}
{"signature": "def GetAnnotationObjects(self) -> list:", "body": "eleArray = self.pattern.GetCurrentAnnotationObjects()<EOL>if eleArray:<EOL><INDENT>controls = []<EOL>for i in range(eleArray.Length):<EOL><INDENT>ele = eleArray.GetElement(i)<EOL>con = Control.CreateControlFromElement(element=ele)<EOL>if con:<EOL><INDENT>controls.append(con)<EOL><DEDENT><DEDENT>return controls<EOL><DEDENT>return []<EOL>", "docstring": "Call IUIAutomationSelectionPattern::GetCurrentAnnotationObjects.\nReturn list, a list of `Control` subclasses representing the annotations associated with this spreadsheet cell.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationspreadsheetitempattern-getcurrentannotationobjects", "id": "f1782:c61:m2"}
{"signature": "def GetActiveComposition(self) -> TextRange:", "body": "textRange = self.pattern.GetActiveComposition()<EOL>if textRange:<EOL><INDENT>return TextRange(textRange=textRange)<EOL><DEDENT>", "docstring": "Call IUIAutomationTextEditPattern::GetActiveComposition.\nReturn `TextRange` or None, the active composition.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtexteditpattern-getactivecomposition", "id": "f1782:c69:m1"}
{"signature": "def GetTableItemPattern(self) -> TableItemPattern:", "body": "return self.GetPattern(PatternId.TableItemPattern)<EOL>", "docstring": "Return `TableItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c93:m2"}
{"signature": "def ControlFromPoint(x: int, y: int) -> Control:", "body": "element = _AutomationClient.instance().IUIAutomation.ElementFromPoint(ctypes.wintypes.POINT(x, y))<EOL>return Control.CreateControlFromElement(element)<EOL>", "docstring": "Call IUIAutomation ElementFromPoint x,y. May return None if mouse is over cmd's title bar icon.\nReturn `Control` subclass or None.", "id": "f1782:m75"}
{"signature": "def SetWindowLong(handle: int, index: int, value: int) -> int:", "body": "return ctypes.windll.user32.SetWindowLongW(ctypes.c_void_p(handle), index, value)<EOL>", "docstring": "SetWindowLong from Win32.\nhandle: int, the handle of a native window.\nindex: int.\nvalue: int.\nReturn int, the previous value before set.", "id": "f1782:m33"}
{"signature": "def ShowDesktop(waitTime: float = <NUM_LIT:1>) -> None:", "body": "SendKeys('<STR_LIT>')<EOL>time.sleep(waitTime)<EOL>", "docstring": "Show Desktop by pressing win + d", "id": "f1782:m86"}
{"signature": "def Move(self, unit: int, count: int, waitTime: float = OPERATION_WAIT_TIME) -> int:", "body": "ret = self.textRange.Move(unit, count)<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationTextRange::Move.\nMove the text range forward or backward by the specified number of text units.\nunit: int, a value in class `TextUnit`.\ncount: int, the number of text units to move.\n       A positive value moves the text range forward.\n       A negative value moves the text range backward. Zero has no effect.\nwaitTime: float.\nReturn: int, the number of text units actually moved.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-move", "id": "f1782:c67:m13"}
{"signature": "def Move(self, x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Move(x, y) == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationTransformPattern::Move.\nMove the UI Automation element.\nx: int.\ny: int.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-move", "id": "f1782:c73:m4"}
{"signature": "def GetGrabbedItems(self) -> list:", "body": "eleArray = self.pattern.GetCurrentGrabbedItems()<EOL>if eleArray:<EOL><INDENT>controls = []<EOL>for i in range(eleArray.Length):<EOL><INDENT>ele = eleArray.GetElement(i)<EOL>con = Control.CreateControlFromElement(element=ele)<EOL>if con:<EOL><INDENT>controls.append(con)<EOL><DEDENT><DEDENT>return controls<EOL><DEDENT>return []<EOL>", "docstring": "Call IUIAutomationDragPattern::GetCurrentGrabbedItems.\nReturn list, a list of `Control` subclasses that represent the full set of items\n             that the user is dragging as part of a drag operation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdragpattern-getcurrentgrabbeditems", "id": "f1782:c46:m4"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationscrollitempattern", "id": "f1782:c57:m0"}
{"signature": "@property<EOL><INDENT>def CanMaximize(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.CurrentCanMaximize)<EOL>", "docstring": "Property CanMaximize.\nCall IUIAutomationWindowPattern::get_CurrentCanMaximize.\nReturn bool, indicates whether the window can be maximized.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentcanmaximize", "id": "f1782:c77:m2"}
{"signature": "def WalkTree(top, getChildren: Callable = None, getFirstChild: Callable = None, getNextSibling: Callable = None, yieldCondition: Callable = None, includeTop: bool = False, maxDepth: int = <NUM_LIT>):", "body": "if maxDepth <= <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>depth = <NUM_LIT:0><EOL>if getChildren:<EOL><INDENT>if includeTop:<EOL><INDENT>if not yieldCondition or yieldCondition(top, <NUM_LIT:0>):<EOL><INDENT>yield top, <NUM_LIT:0>, <NUM_LIT:0><EOL><DEDENT><DEDENT>children = getChildren(top)<EOL>childList = [children]<EOL>while depth >= <NUM_LIT:0>:   <EOL><INDENT>lastItems = childList[-<NUM_LIT:1>]<EOL>if lastItems:<EOL><INDENT>if not yieldCondition or yieldCondition(lastItems[<NUM_LIT:0>], depth + <NUM_LIT:1>):<EOL><INDENT>yield lastItems[<NUM_LIT:0>], depth + <NUM_LIT:1>, len(lastItems) - <NUM_LIT:1><EOL><DEDENT>if depth + <NUM_LIT:1> < maxDepth:<EOL><INDENT>children = getChildren(lastItems[<NUM_LIT:0>])<EOL>if children:<EOL><INDENT>depth += <NUM_LIT:1><EOL>childList.append(children)<EOL><DEDENT><DEDENT>del lastItems[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>del childList[depth]<EOL>depth -= <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>elif getFirstChild and getNextSibling:<EOL><INDENT>if includeTop:<EOL><INDENT>if not yieldCondition or yieldCondition(top, <NUM_LIT:0>):<EOL><INDENT>yield top, <NUM_LIT:0><EOL><DEDENT><DEDENT>child = getFirstChild(top)<EOL>childList = [child]<EOL>while depth >= <NUM_LIT:0>:  <EOL><INDENT>lastItem = childList[-<NUM_LIT:1>]<EOL>if lastItem:<EOL><INDENT>if not yieldCondition or yieldCondition(lastItem, depth + <NUM_LIT:1>):<EOL><INDENT>yield lastItem, depth + <NUM_LIT:1><EOL><DEDENT>child = getNextSibling(lastItem)<EOL>childList[depth] = child<EOL>if depth + <NUM_LIT:1> < maxDepth:<EOL><INDENT>child = getFirstChild(lastItem)<EOL>if child:<EOL><INDENT>depth += <NUM_LIT:1><EOL>childList.append(child)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>del childList[depth]<EOL>depth -= <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Walk a tree not using recursive algorithm.\ntop: a tree node.\ngetChildren: function(treeNode) -> list.\ngetNextSibling: function(treeNode) -> treeNode.\ngetNextSibling: function(treeNode) -> treeNode.\nyieldCondition: function(treeNode, depth) -> bool.\nincludeTop: bool, if True yield top first.\nmaxDepth: int, enum depth.\n\nIf getChildren is valid, ignore getFirstChild and getNextSibling,\n    yield 3 items tuple: (treeNode, depth, remain children count in current depth).\nIf getChildren is not valid, using getFirstChild and getNextSibling,\n    yield 2 items tuple: (treeNode, depth).\nIf yieldCondition is not None, only yield tree nodes that yieldCondition(treeNode, depth)->bool returns True.\n\nFor example:\ndef GetDirChildren(dir_):\n    if os.path.isdir(dir_):\n        return [os.path.join(dir_, it) for it in os.listdir(dir_)]\nfor it, depth, leftCount in WalkTree('D:\\\\', getChildren= GetDirChildren):\n    print(it, depth, leftCount)", "id": "f1782:m70"}
{"signature": "@property<EOL><INDENT>def ExpandCollapseState(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentExpandCollapseState<EOL>", "docstring": "Property ExpandCollapseState.\nCall IUIAutomationExpandCollapsePattern::get_CurrentExpandCollapseState.\nReturn int, a value in class ExpandCollapseState.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationexpandcollapsepattern-get_currentexpandcollapsestate", "id": "f1782:c48:m1"}
{"signature": "def GetValuePattern(self) -> ValuePattern:", "body": "return self.GetPattern(PatternId.ValuePattern)<EOL>", "docstring": "Return `ValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c107:m3"}
{"signature": "def GetLastChildControl(self) -> '<STR_LIT>':", "body": "ele = _AutomationClient.instance().ViewWalker.GetLastChildElement(self.Element)<EOL>return Control.CreateControlFromElement(ele)<EOL>", "docstring": "Return `Control` subclass or None.", "id": "f1782:c78:m52"}
{"signature": "def GetSupportedViews(self) -> list:", "body": "return self.pattern.GetCurrentSupportedViews()<EOL>", "docstring": "Call IUIAutomationMultipleViewPattern::GetCurrentSupportedViews, todo.\nReturn list, a list of int, control-specific view identifiers.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationmultipleviewpattern-getcurrentsupportedviews", "id": "f1782:c54:m2"}
{"signature": "@property<EOL><INDENT>def ContainingGrid(self) -> '<STR_LIT>':<DEDENT>", "body": "return Control.CreateControlFromElement(self.pattern.CurrentContainingGrid)<EOL>", "docstring": "Property ContainingGrid.\nCall IUIAutomationGridItemPattern::get_CurrentContainingGrid.\nReturn `Control` subclass, the element that contains the grid item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgriditempattern-get_currentcontaininggrid", "id": "f1782:c49:m3"}
{"signature": "def RangeFromChild(self, child) -> TextRange:", "body": "textRange = self.pattern.RangeFromChild(Control.Element)<EOL>if textRange:<EOL><INDENT>return TextRange(textRange=textRange)<EOL><DEDENT>", "docstring": "Call IUIAutomationTextPattern::RangeFromChild.\nchild: `Control` or its subclass.\nReturn `TextRange` or None, a text range enclosing a child element such as an image,\n    hyperlink, Microsoft Excel spreadsheet, or other embedded object.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-rangefromchild", "id": "f1782:c70:m5"}
{"signature": "@property<EOL><INDENT>def Minimum(self) -> float:<DEDENT>", "body": "return self.pattern.CurrentMinimum<EOL>", "docstring": "Property Minimum.\nCall IUIAutomationRangeValuePattern::get_CurrentMinimum.\nReturn float, the minimum value of the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentminimum", "id": "f1782:c56:m4"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationspreadsheetpattern", "id": "f1782:c62:m0"}
{"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)<EOL>", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c94:m4"}
{"signature": "@property<EOL><INDENT>def AriaProperties(self) -> str:<DEDENT>", "body": "return self.Element.CurrentAriaProperties<EOL>", "docstring": "Property AriaProperties.\nCall IUIAutomationElement::get_CurrentAriaProperties.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentariaproperties", "id": "f1782:c78:m12"}
{"signature": "def Resize(self, width: int, height: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Resize(width, height) == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationTransformPattern::Resize.\nResize the UI Automation element.\nwidth: int.\nheight: int.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-resize", "id": "f1782:c73:m5"}
{"signature": "@staticmethod<EOL><INDENT>def Write(log: Any, consoleColor: int = ConsoleColor.Default, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None, printTruncateLen: int = <NUM_LIT:0>) -> None:<DEDENT>", "body": "if not isinstance(log, str):<EOL><INDENT>log = str(log)<EOL><DEDENT>if printToStdout and sys.stdout:<EOL><INDENT>isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)<EOL>if isValidColor:<EOL><INDENT>SetConsoleColor(consoleColor)<EOL><DEDENT>try:<EOL><INDENT>if printTruncateLen > <NUM_LIT:0> and len(log) > printTruncateLen:<EOL><INDENT>sys.stdout.write(log[:printTruncateLen] + '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>sys.stdout.write(log)<EOL><DEDENT><DEDENT>except Exception as ex:<EOL><INDENT>SetConsoleColor(ConsoleColor.Red)<EOL>isValidColor = True<EOL>sys.stdout.write(ex.__class__.__name__ + '<STR_LIT>')<EOL>if log.endswith('<STR_LIT:\\n>'):<EOL><INDENT>sys.stdout.write('<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>if isValidColor:<EOL><INDENT>ResetConsoleColor()<EOL><DEDENT>sys.stdout.flush()<EOL><DEDENT>if not writeToFile:<EOL><INDENT>return<EOL><DEDENT>fileName = logFile if logFile else Logger.FileName<EOL>try:<EOL><INDENT>fout = open(fileName, '<STR_LIT>', encoding='<STR_LIT:utf-8>')<EOL>fout.write(log)<EOL><DEDENT>except Exception as ex:<EOL><INDENT>if sys.stdout:<EOL><INDENT>sys.stdout.write(ex.__class__.__name__ + '<STR_LIT>')<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>if fout:<EOL><INDENT>fout.close()<EOL><DEDENT><DEDENT>", "docstring": "log: any type.\nconsoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.\nprintTruncateLen: int, if <= 0, log is not truncated when print.", "id": "f1782:c41:m1"}
{"signature": "def GetRangeValuePattern(self) -> RangeValuePattern:", "body": "return self.GetPattern(PatternId.RangeValuePattern)<EOL>", "docstring": "Return `RangeValuePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c103:m1"}
{"signature": "def Select(self, flagsSelect: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Select(flagsSelect) == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationLegacyIAccessiblePattern::Select.\nPerform a Microsoft Active Accessibility selection.\nflagsSelect: int, a value in `AccessibleSelection`.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-select", "id": "f1782:c53:m13"}
{"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)<EOL>", "docstring": "Return `TransformPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c90:m1"}
{"signature": "def MetroClose(self, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "if self.ClassName == METRO_WINDOW_CLASS_NAME:<EOL><INDENT>screenWidth, screenHeight = GetScreenSize()<EOL>MoveTo(screenWidth // <NUM_LIT:2>, <NUM_LIT:0>, waitTime=<NUM_LIT:0>)<EOL>DragDrop(screenWidth // <NUM_LIT:2>, <NUM_LIT:0>, screenWidth // <NUM_LIT:2>, screenHeight, waitTime=waitTime)<EOL><DEDENT>else:<EOL><INDENT>Logger.WriteLine('<STR_LIT>', ConsoleColor.Yellow)<EOL><DEDENT>", "docstring": "Only work on Windows 8/8.1, if current window is Metro UI.\nwaitTime: float.", "id": "f1782:c120:m4"}
{"signature": "def GetGridItemPattern(self) -> GridItemPattern:", "body": "return self.GetPattern(PatternId.GridItemPattern)<EOL>", "docstring": "Return `GridItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c93:m1"}
{"signature": "def ControlsAreSame(control1: Control, control2: Control) -> bool:", "body": "return bool(_AutomationClient.instance().IUIAutomation.CompareElements(control1.Element, control2.Element))<EOL>", "docstring": "control1: `Control` or its subclass.\ncontrol2: `Control` or its subclass.\nReturn bool, True if control1 and control2 represent the same control otherwise False.", "id": "f1782:m80"}
{"signature": "@property<EOL><INDENT>def Value(self) -> float:<DEDENT>", "body": "return self.pattern.CurrentValue<EOL>", "docstring": "Property Value.\nCall IUIAutomationRangeValuePattern::get_CurrentValue.\nReturn float, the value of the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentvalue", "id": "f1782:c56:m6"}
{"signature": "@property<EOL><INDENT>def IsGrabbed(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.CurrentIsGrabbed)<EOL>", "docstring": "Property IsGrabbed.\nCall IUIAutomationDragPattern::get_CurrentIsGrabbed.\nReturn bool, indicates whether the user has grabbed this element as part of a drag-and-drop operation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdragpattern-get_currentisgrabbed", "id": "f1782:c46:m3"}
{"signature": "@property<EOL><INDENT>def HorizontalScrollPercent(self) -> float:<DEDENT>", "body": "return self.pattern.CurrentHorizontalScrollPercent<EOL>", "docstring": "Property HorizontalScrollPercent.\nCall IUIAutomationScrollPattern::get_CurrentHorizontalScrollPercent.\nReturn float, the horizontal scroll position.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currenthorizontalscrollpercent", "id": "f1782:c58:m2"}
{"signature": "@classmethod<EOL><INDENT>def instance(cls) -> '<STR_LIT>':<DEDENT>", "body": "if cls._instance is None:<EOL><INDENT>cls._instance = cls()<EOL><DEDENT>return cls._instance<EOL>", "docstring": "Singleton instance (this prevents com creation on import).", "id": "f1782:c0:m0"}
{"signature": "def Maximize(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "if self.IsTopLevel():<EOL><INDENT>return self.ShowWindow(SW.ShowMaximized, waitTime)<EOL><DEDENT>return False<EOL>", "docstring": "Set top level window maximize.", "id": "f1782:c99:m3"}
{"signature": "@property<EOL><INDENT>def DefaultAction(self) -> str:<DEDENT>", "body": "return self.pattern.CurrentDefaultAction<EOL>", "docstring": "Property DefaultAction.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentDefaultAction.\nReturn str, the Microsoft Active Accessibility current default action for the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentdefaultaction", "id": "f1782:c53:m2"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationdragpattern", "id": "f1782:c46:m0"}
{"signature": "def ExpandToEnclosingUnit(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.textRange.ExpandToEnclosingUnit() == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationTextRange::ExpandToEnclosingUnit.\nNormalize the text range by the specified text unit.\n    The range is expanded if it is smaller than the specified unit,\n    or shortened if it is longer than the specified unit.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-expandtoenclosingunit", "id": "f1782:c67:m5"}
{"signature": "@property<EOL><INDENT>def Description(self) -> str:<DEDENT>", "body": "return self.pattern.CurrentDescription<EOL>", "docstring": "Property Description.\nCall IUIAutomationLegacyIAccessiblePattern::get_CurrentDescription.\nReturn str, the Microsoft Active Accessibility description of the element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-get_currentdescription", "id": "f1782:c53:m3"}
{"signature": "@property<EOL><INDENT>def HorizontalViewSize(self) -> float:<DEDENT>", "body": "return self.pattern.CurrentHorizontalViewSize<EOL>", "docstring": "Property HorizontalViewSize.\nCall IUIAutomationScrollPattern::get_CurrentHorizontalViewSize.\nReturn float, the horizontal size of the viewable region of a scrollable element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currenthorizontalviewsize", "id": "f1782:c58:m3"}
{"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)<EOL>", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c86:m2"}
{"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)<EOL>", "docstring": "Return `TransformPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c120:m1"}
{"signature": "@property<EOL><INDENT>def IsPassword(self) -> bool:<DEDENT>", "body": "return self.Element.CurrentIsPassword<EOL>", "docstring": "Property IsPassword.\nCall IUIAutomationElement::get_CurrentIsPassword.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentispassword", "id": "f1782:c78:m28"}
{"signature": "def GetConsoleOriginalTitle() -> str:", "body": "if IsNT6orHigher:<EOL><INDENT>arrayType = ctypes.c_wchar * MAX_PATH<EOL>values = arrayType()<EOL>ctypes.windll.kernel32.GetConsoleOriginalTitleW(values, MAX_PATH)<EOL>return values.value<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "GetConsoleOriginalTitle from Win32.\nReturn str.\nOnly available on Windows Vista or higher.", "id": "f1782:m44"}
{"signature": "@property<EOL><INDENT>def ClassName(self) -> str:<DEDENT>", "body": "return self.Element.CurrentClassName<EOL>", "docstring": "Property ClassName.\nCall IUIAutomationElement::get_CurrentClassName.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentclassname", "id": "f1782:c78:m16"}
{"signature": "def SendInput(*inputs) -> int:", "body": "nInputs = len(inputs)<EOL>LPINPUT = INPUT * nInputs<EOL>pInputs = LPINPUT(*inputs)<EOL>cbSize = ctypes.c_int(ctypes.sizeof(INPUT))<EOL>return ctypes.windll.user32.SendInput(nInputs, pInputs, cbSize)<EOL>", "docstring": "SendInput from Win32.\ninput: `INPUT`.\nReturn int, the number of events that it successfully inserted into the keyboard or mouse input stream.\n            If the function returns zero, the input was already blocked by another thread.", "id": "f1782:m61"}
{"signature": "def GetItemByName(self, name: str) -> '<STR_LIT>':", "body": "ele = self.pattern.GetItemByName(name)<EOL>return Control.CreateControlFromElement(element=ele)<EOL>", "docstring": "Call IUIAutomationSpreadsheetPattern::GetItemByName.\nname: str.\nReturn `Control` subclass or None, represents the spreadsheet cell that has the specified name..\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationspreadsheetpattern-getitembyname", "id": "f1782:c62:m1"}
{"signature": "def PressMouse(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "SetCursorPos(x, y)<EOL>screenWidth, screenHeight = GetScreenSize()<EOL>mouse_event(MouseEventFlag.LeftDown | MouseEventFlag.Absolute, x * <NUM_LIT> // screenWidth, y * <NUM_LIT> // screenHeight, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>time.sleep(waitTime)<EOL>", "docstring": "Press left mouse.\nx: int.\ny: int.\nwaitTime: float.", "id": "f1782:m15"}
{"signature": "@property<EOL><INDENT>def DropEffects(self) -> list:<DEDENT>", "body": "return self.pattern.CurrentDropEffects<EOL>", "docstring": "Property DropEffects.\nCall IUIAutomationDragPattern::get_CurrentDropEffects, todo SAFEARRAY.\nReturn list, a list of localized strings that enumerate the full set of effects\n             that can happen when this element as part of a drag-and-drop operation.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdragpattern-get_currentdropeffects", "id": "f1782:c46:m2"}
{"signature": "def GetTablePattern(self) -> TablePattern:", "body": "return self.GetPattern(PatternId.TablePattern)<EOL>", "docstring": "Return `TablePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c81:m2"}
{"signature": "@property<EOL><INDENT>def AccessKey(self) -> str:<DEDENT>", "body": "return self.Element.CurrentAccessKey<EOL>", "docstring": "Property AccessKey.\nCall IUIAutomationElement::get_CurrentAccessKey.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentaccesskey", "id": "f1782:c78:m11"}
{"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)<EOL>", "docstring": "Return `TransformPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c114:m1"}
{"signature": "def SetConsoleTitle(text: str) -> bool:", "body": "return bool(ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(text)))<EOL>", "docstring": "SetConsoleTitle from Win32.\ntext: str.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m46"}
{"signature": "@property<EOL><INDENT>def Row(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentRow<EOL>", "docstring": "Property Row.\nCall IUIAutomationGridItemPattern::get_CurrentRow.\nReturn int, the zero-based index of the row that contains the grid item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgriditempattern-get_currentrow", "id": "f1782:c49:m4"}
{"signature": "def GetFocusedControl() -> Control:", "body": "return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.GetFocusedElement())<EOL>", "docstring": "Return `Control` subclass.", "id": "f1782:m72"}
{"signature": "def ControlFromHandle(handle: int) -> Control:", "body": "return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.ElementFromHandle(handle))<EOL>", "docstring": "Call IUIAutomation.ElementFromHandle with a native handle.\nhandle: int, a native window handle.\nReturn `Control` subclass.", "id": "f1782:m79"}
{"signature": "def Restore(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "if self.IsTopLevel():<EOL><INDENT>return self.ShowWindow(SW.Restore, waitTime)<EOL><DEDENT>return False<EOL>", "docstring": "Restore window to normal state.\nSimilar to SwitchToThisWindow.", "id": "f1782:c99:m7"}
{"signature": "def GetSelectionPattern(self) -> SelectionPattern:", "body": "return self.GetPattern(PatternId.SelectionPattern)<EOL>", "docstring": "Return `SelectionPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c110:m1"}
{"signature": "@property<EOL><INDENT>def VerticallyScrollable(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.CurrentVerticallyScrollable)<EOL>", "docstring": "Property VerticallyScrollable.\nCall IUIAutomationScrollPattern::get_CurrentVerticallyScrollable.\nReturn bool, indicates whether the element can scroll vertically.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currentverticallyscrollable", "id": "f1782:c58:m4"}
{"signature": "def SetPixelColorsVertically(self, x: int, y: int, colors: Iterable) -> bool:", "body": "count = len(colors)<EOL>arrayType = ctypes.c_uint32 * count<EOL>values = arrayType(*colors)<EOL>return _DllClient.instance().dll.BitmapSetPixelsVertically(ctypes.c_size_t(self._bitmap), x, y, values, count)<EOL>", "docstring": "Set pixel colors form x,y vertically.\nx: int.\ny: int.\ncolors: Iterable, an iterable list of int color values in argb.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m15"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtextpattern2", "id": "f1782:c71:m0"}
{"signature": "def WheelUp(self, wheelTimes: int = <NUM_LIT:1>, interval: float = <NUM_LIT>, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "x, y = GetCursorPos()<EOL>self.SetFocus()<EOL>self.MoveCursorToMyCenter(False)<EOL>WheelUp(wheelTimes, interval, waitTime)<EOL>SetCursorPos(x, y)<EOL>", "docstring": "Make control have focus first, move cursor to center and mouse wheel up.\nwheelTimes: int.\ninterval: float.\nwaitTime: float.", "id": "f1782:c78:m68"}
{"signature": "def WaitHotKeyReleased(hotkey: tuple) -> None:", "body": "mod = {ModifierKey.Alt: Keys.VK_MENU,<EOL>ModifierKey.Control: Keys.VK_CONTROL,<EOL>ModifierKey.Shift: Keys.VK_SHIFT,<EOL>ModifierKey.Win: Keys.VK_LWIN<EOL>}<EOL>while True:<EOL><INDENT>time.sleep(<NUM_LIT>)<EOL>if IsKeyPressed(hotkey[<NUM_LIT:1>]):<EOL><INDENT>continue<EOL><DEDENT>for k, v in mod.items():<EOL><INDENT>if k & hotkey[<NUM_LIT:0>]:<EOL><INDENT>if IsKeyPressed(v):<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "hotkey: tuple, two ints tuple(modifierKey, key)", "id": "f1782:m89"}
{"signature": "def AddToSelection(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.textRange.AddToSelection() == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationTextRange::AddToSelection.\nAdd the text range to the collection of selected text ranges in a control that supports multiple, disjoint spans of selected text.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-addtoselection", "id": "f1782:c67:m1"}
{"signature": "def MiddleClick(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "SetCursorPos(x, y)<EOL>screenWidth, screenHeight = GetScreenSize()<EOL>mouse_event(MouseEventFlag.MiddleDown | MouseEventFlag.Absolute, x * <NUM_LIT> // screenWidth, y * <NUM_LIT> // screenHeight, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>time.sleep(<NUM_LIT>)<EOL>mouse_event(MouseEventFlag.MiddleUp | MouseEventFlag.Absolute, x * <NUM_LIT> // screenWidth, y * <NUM_LIT> // screenHeight, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>time.sleep(waitTime)<EOL>", "docstring": "Simulate mouse middle click at point x, y.\nx: int.\ny: int.\nwaitTime: float.", "id": "f1782:m13"}
{"signature": "@property<EOL><INDENT>def DockPosition(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentDockPosition<EOL>", "docstring": "Property DockPosition.\nCall IUIAutomationDockPattern::get_CurrentDockPosition.\nReturn int, a value in class `DockPosition`.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdockpattern-get_currentdockposition", "id": "f1782:c45:m1"}
{"signature": "@property<EOL><INDENT>def IsTopmost(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.CurrentIsTopmost)<EOL>", "docstring": "Property IsTopmost.\nCall IUIAutomationWindowPattern::get_CurrentIsTopmost.\nReturn bool, indicates whether the window is the topmost element in the z-order.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentistopmost", "id": "f1782:c77:m5"}
{"signature": "@property<EOL><INDENT>def Shape(self) -> str:<DEDENT>", "body": "return self.pattern.CurrentShape<EOL>", "docstring": "Property Shape.\nCall IUIAutomationStylesPattern::get_CurrentShape.\nReturn str, the shape of an element in a document.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationstylespattern-get_currentshape", "id": "f1782:c63:m4"}
{"signature": "def GetPixelColor(self, x: int, y: int) -> int:", "body": "handle = self.NativeWindowHandle<EOL>if handle:<EOL><INDENT>return GetPixelColor(x, y, handle)<EOL><DEDENT>", "docstring": "Call native `GetPixelColor` if control has a valid native handle.\nUse `self.ToBitmap` if control doesn't have a valid native handle or you get many pixels.\nx: int, internal x position.\ny: int, internal y position.\nReturn int, a color value in bgr.\nr = bgr & 0x0000FF\ng = (bgr & 0x00FF00) >> 8\nb = (bgr & 0xFF0000) >> 16", "id": "f1782:c78:m77"}
{"signature": "def ReleaseMouse(waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "x, y = GetCursorPos()<EOL>screenWidth, screenHeight = GetScreenSize()<EOL>mouse_event(MouseEventFlag.LeftUp | MouseEventFlag.Absolute, x * <NUM_LIT> // screenWidth, y * <NUM_LIT> // screenHeight, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>time.sleep(waitTime)<EOL>", "docstring": "Release left mouse.\nwaitTime: float.", "id": "f1782:m16"}
{"signature": "@staticmethod<EOL><INDENT>def CreateControlFromControl(control: '<STR_LIT>') -> '<STR_LIT>':<DEDENT>", "body": "newControl = Control.CreateControlFromElement(control.Element)<EOL>return newControl<EOL>", "docstring": "Create a concreate `Control` from a control instance, copy it.\ncontrol: `Control` or its subclass.\nReturn a subclass of `Control`, an instance of the control's real type.\nFor example: if control's ControlType is EditControl, return an EditControl.", "id": "f1782:c78:m3"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtransformpattern2", "id": "f1782:c74:m0"}
{"signature": "@property<EOL><INDENT>def ControlTypeName(self) -> str:<DEDENT>", "body": "return ControlTypeNames[self.ControlType]<EOL>", "docstring": "Property ControlTypeName.", "id": "f1782:c78:m46"}
{"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)<EOL>", "docstring": "Return `TransformPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c91:m2"}
{"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)<EOL>", "docstring": "Return `TogglePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c80:m3"}
{"signature": "def GetRowHeaderItems(self) -> list:", "body": "eleArray = self.pattern.GetCurrentRowHeaderItems()<EOL>if eleArray:<EOL><INDENT>controls = []<EOL>for i in range(eleArray.Length):<EOL><INDENT>ele = eleArray.GetElement(i)<EOL>con = Control.CreateControlFromElement(element=ele)<EOL>if con:<EOL><INDENT>controls.append(con)<EOL><DEDENT><DEDENT>return controls<EOL><DEDENT>return []<EOL>", "docstring": "Call IUIAutomationTableItemPattern::GetCurrentRowHeaderItems.\nReturn list, a list of `Control` subclasses, the row headers associated with a table item or cell.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtableitempattern-getcurrentrowheaderitems", "id": "f1782:c65:m2"}
{"signature": "def MoveWindow(self, x: int, y: int, width: int, height: int, repaint: bool = True) -> bool:", "body": "handle = self.NativeWindowHandle<EOL>if handle:<EOL><INDENT>return MoveWindow(handle, x, y, width, height, int(repaint))<EOL><DEDENT>return False<EOL>", "docstring": "Call native MoveWindow if control has a valid native handle.\nx: int.\ny: int.\nwidth: int.\nheight: int.\nrepaint: bool.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c78:m72"}
{"signature": "def GetPropertyValue(self, propertyId: int) -> Any:", "body": "return self.Element.GetCurrentPropertyValue(propertyId)<EOL>", "docstring": "Call IUIAutomationElement::GetCurrentPropertyValue.\npropertyId: int, a value in class `PropertyId`.\nReturn Any, corresponding type according to propertyId.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpropertyvalue", "id": "f1782:c78:m41"}
{"signature": "def GetExpandCollapsePattern(self) -> ExpandCollapsePattern:", "body": "return self.GetPattern(PatternId.ExpandCollapsePattern)<EOL>", "docstring": "Return `ExpandCollapsePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c119:m1"}
{"signature": "@classmethod<EOL><INDENT>def instance(cls) -> '<STR_LIT>':<DEDENT>", "body": "if cls._instance is None:<EOL><INDENT>cls._instance = cls()<EOL><DEDENT>return cls._instance<EOL>", "docstring": "Singleton instance (this prevents com creation on import).", "id": "f1782:c1:m0"}
{"signature": "def Cancel(self) -> bool:", "body": "return self.pattern.Cancel() == S_OK<EOL>", "docstring": "Call IUIAutomationSynchronizedInputPattern::Cancel.\nCause the Microsoft UI Automation provider to stop listening for mouse or keyboard input.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationsynchronizedinputpattern-cancel", "id": "f1782:c64:m1"}
{"signature": "def GetSelectionItemPattern(self) -> SelectionItemPattern:", "body": "return self.GetPattern(PatternId.SelectionItemPattern)<EOL>", "docstring": "Return `SelectionItemPattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c86:m1"}
{"signature": "def IsKeyPressed(key: int) -> bool:", "body": "state = ctypes.windll.user32.GetAsyncKeyState(key)<EOL>return bool(state & <NUM_LIT>)<EOL>", "docstring": "key: int, a value in class `Keys`.\nReturn bool.", "id": "f1782:m56"}
{"signature": "def ShowWindow(self, cmdShow: int, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "handle = self.NativeWindowHandle<EOL>if not handle:<EOL><INDENT>control = self<EOL>while not handle:<EOL><INDENT>control = control.GetParentControl()<EOL>handle = control.NativeWindowHandle<EOL><DEDENT><DEDENT>if handle:<EOL><INDENT>ret = ShowWindow(handle, cmdShow)<EOL>time.sleep(waitTime)<EOL>return ret<EOL><DEDENT>", "docstring": "Get a native handle from self or ancestors until valid and call native `ShowWindow` with cmdShow.\ncmdShow: int, a value in in class `SW`.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c78:m69"}
{"signature": "@property<EOL><INDENT>def CanZoom(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.CurrentCanZoom)<EOL>", "docstring": "Property CanZoom.\nCall IUIAutomationTransformPattern2::get_CurrentCanZoom.\nReturn bool, indicates whether the control supports zooming of its viewport.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-get_CurrentCanZoom", "id": "f1782:c74:m1"}
{"signature": "@property<EOL><INDENT>def BoundingRectangle(self) -> Rect:<DEDENT>", "body": "rect = self.Element.CurrentBoundingRectangle<EOL>return Rect(rect.left, rect.top, rect.right, rect.bottom)<EOL>", "docstring": "Property BoundingRectangle.\nCall IUIAutomationElement::get_CurrentBoundingRectangle.\nReturn `Rect`.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentboundingrectangle\n\nrect = control.BoundingRectangle\nprint(rect.left, rect.top, rect.right, rect.bottom, rect.width(), rect.height(), rect.xcenter(), rect.ycenter())", "id": "f1782:c78:m15"}
{"signature": "def MiddleClick(self, x: int = None, y: int = None, ratioX: float = <NUM_LIT:0.5>, ratioY: float = <NUM_LIT:0.5>, simulateMove: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "point = self.MoveCursorToInnerPos(x, y, ratioX, ratioY, simulateMove)<EOL>if point:<EOL><INDENT>MiddleClick(point[<NUM_LIT:0>], point[<NUM_LIT:1>], waitTime)<EOL><DEDENT>", "docstring": "x: int, if < 0, middle click self.BoundingRectangle.right + x, if not None, ignore ratioX.\ny: int, if < 0, middle click self.BoundingRectangle.bottom + y, if not None, ignore ratioY.\nratioX: float.\nratioY: float.\nsimulateMove: bool, if True, first move cursor to control smoothly.\nwaitTime: float.\n\nMiddleClick(), MiddleClick(ratioX=0.5, ratioY=0.5): middle click center.\nMiddleClick(10, 10): middle click left+10, top+10.\nMiddleClick(-10, -10): middle click right-10, bottom-10.", "id": "f1782:c78:m64"}
{"signature": "def GetIAccessible(self):", "body": "return self.pattern.GetIAccessible()<EOL>", "docstring": "Call IUIAutomationLegacyIAccessiblePattern::GetIAccessible, todo.\nReturn an IAccessible object that corresponds to the Microsoft UI Automation element.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationlegacyiaccessiblepattern-getiaccessible\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/oleacc/nn-oleacc-iaccessible", "id": "f1782:c53:m12"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationtexteditpattern", "id": "f1782:c69:m0"}
{"signature": "def GetTableItemPattern(self) -> TableItemPattern:", "body": "return self.GetPattern(PatternId.TableItemPattern)<EOL>", "docstring": "Return `TableItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c86:m5"}
{"signature": "@property<EOL><INDENT>def Maximum(self) -> float:<DEDENT>", "body": "return self.pattern.CurrentMaximum<EOL>", "docstring": "Property Maximum.\nCall IUIAutomationRangeValuePattern::get_CurrentMaximum.\nReturn float, the maximum value of the control.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationrangevaluepattern-get_currentmaximum", "id": "f1782:c56:m3"}
{"signature": "def ShowWindow(handle: int, cmdShow: int) -> bool:", "body": "return ctypes.windll.user32.ShowWindow(ctypes.c_void_p(handle), cmdShow)<EOL>", "docstring": "ShowWindow from Win32.\nhandle: int, the handle of a native window.\ncmdShow: int, a value in clas `SW`.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m37"}
{"signature": "def GetTopLevelControl(self) -> '<STR_LIT>':", "body": "handle = self.NativeWindowHandle<EOL>if handle:<EOL><INDENT>topHandle = GetAncestor(handle, GAFlag.Root)<EOL>if topHandle:<EOL><INDENT>if topHandle == handle:<EOL><INDENT>return self<EOL><DEDENT>else:<EOL><INDENT>return ControlFromHandle(topHandle)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>control = self<EOL>while True:<EOL><INDENT>control = control.GetParentControl()<EOL>handle = control.NativeWindowHandle<EOL>if handle:<EOL><INDENT>topHandle = GetAncestor(handle, GAFlag.Root)<EOL>return ControlFromHandle(topHandle)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Get the top level control which current control lays.\nIf current control is top level, return self.\nIf current control is root control, return None.\nReturn `PaneControl` or `WindowControl` or None.", "id": "f1782:c78:m81"}
{"signature": "@staticmethod<EOL><INDENT>def WriteLine(log: Any, consoleColor: int = -<NUM_LIT:1>, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:<DEDENT>", "body": "Logger.Write('<STR_LIT>'.format(log), consoleColor, writeToFile, printToStdout, logFile)<EOL>", "docstring": "log: any type.\nconsoleColor: int, a value in class `ConsoleColor`, such as `ConsoleColor.DarkGreen`.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.", "id": "f1782:c41:m2"}
{"signature": "def SetConsoleColor(color: int) -> bool:", "body": "global _ConsoleOutputHandle<EOL>global _DefaultConsoleColor<EOL>if not _DefaultConsoleColor:<EOL><INDENT>if not _ConsoleOutputHandle:<EOL><INDENT>_ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(_StdOutputHandle)<EOL><DEDENT>bufferInfo = ConsoleScreenBufferInfo()<EOL>ctypes.windll.kernel32.GetConsoleScreenBufferInfo(_ConsoleOutputHandle, ctypes.byref(bufferInfo))<EOL>_DefaultConsoleColor = int(bufferInfo.wAttributes & <NUM_LIT>)<EOL><DEDENT>if sys.stdout:<EOL><INDENT>sys.stdout.flush()<EOL><DEDENT>bool(ctypes.windll.kernel32.SetConsoleTextAttribute(_ConsoleOutputHandle, color))<EOL>", "docstring": "Change the text color on console window.\ncolor: int, a value in class `ConsoleColor`.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m2"}
{"signature": "def PlayWaveFile(filePath: str = r'<STR_LIT>', isAsync: bool = False, isLoop: bool = False) -> bool:", "body": "if filePath:<EOL><INDENT>SND_ASYNC = <NUM_LIT><EOL>SND_NODEFAULT = <NUM_LIT><EOL>SND_LOOP = <NUM_LIT><EOL>SND_FILENAME = <NUM_LIT><EOL>flags = SND_NODEFAULT | SND_FILENAME<EOL>if isAsync:<EOL><INDENT>flags |= SND_ASYNC<EOL><DEDENT>if isLoop:<EOL><INDENT>flags |= SND_LOOP<EOL>flags |= SND_ASYNC<EOL><DEDENT>return bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(filePath), ctypes.c_void_p(<NUM_LIT:0>), flags))<EOL><DEDENT>else:<EOL><INDENT>return bool(ctypes.windll.winmm.PlaySoundW(ctypes.c_wchar_p(<NUM_LIT:0>), ctypes.c_void_p(<NUM_LIT:0>), <NUM_LIT:0>))<EOL><DEDENT>", "docstring": "Call PlaySound from Win32.\nfilePath: str, if emtpy, stop playing the current sound.\nisAsync: bool, if True, the sound is played asynchronously and returns immediately.\nisLoop: bool, if True, the sound plays repeatedly until PlayWaveFile(None) is called again, must also set isAsync to True.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m49"}
{"signature": "def GetAnnotationTypes(self) -> list:", "body": "return self.pattern.GetCurrentAnnotationTypes()<EOL>", "docstring": "Call IUIAutomationSelectionPattern::GetCurrentAnnotationTypes.\nReturn list, a list of int values in class `AnnotationType`,\n             indicating the types of annotations that are associated with this spreadsheet cell.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionpattern-getcurrentannotationtypes", "id": "f1782:c61:m3"}
{"signature": "@property<EOL><INDENT>def Column(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentColumn<EOL>", "docstring": "Property Column.\nCall IUIAutomationGridItemPattern::get_CurrentColumn.\nReturn int, the zero-based index of the column that contains the item.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgriditempattern-get_currentcolumn", "id": "f1782:c49:m1"}
{"signature": "def GetPattern(self, patternId: int):", "body": "try:<EOL><INDENT>pattern = self.Element.GetCurrentPattern(patternId)<EOL>if pattern:<EOL><INDENT>subPattern = CreatePattern(patternId, pattern)<EOL>self._supportedPatterns[patternId] = subPattern<EOL>return subPattern<EOL><DEDENT><DEDENT>except comtypes.COMError as ex:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Call IUIAutomationElement::GetCurrentPattern.\nGet a new pattern by pattern id if it supports the pattern.\npatternId: int, a value in class `PatternId`.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-getcurrentpattern", "id": "f1782:c78:m39"}
{"signature": "def SetWindowPos(handle: int, hWndInsertAfter: int, x: int, y: int, width: int, height: int, flags: int) -> bool:", "body": "return ctypes.windll.user32.SetWindowPos(ctypes.c_void_p(handle), ctypes.c_void_p(hWndInsertAfter), x, y, width, height, flags)<EOL>", "docstring": "SetWindowPos from Win32.\nhandle: int, the handle of a native window.\nhWndInsertAfter: int, a value whose name starts with 'HWND' in class SWP.\nx: int.\ny: int.\nwidth: int.\nheight: int.\nflags: int, values whose name starts with 'SWP' in class `SWP`.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m39"}
{"signature": "def GetTransformPattern(self) -> TransformPattern:", "body": "return self.GetPattern(PatternId.TransformPattern)<EOL>", "docstring": "Return `TransformPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c100:m3"}
{"signature": "def GetColumnHeaderItems(self) -> list:", "body": "eleArray = self.pattern.GetCurrentColumnHeaderItems()<EOL>if eleArray:<EOL><INDENT>controls = []<EOL>for i in range(eleArray.Length):<EOL><INDENT>ele = eleArray.GetElement(i)<EOL>con = Control.CreateControlFromElement(element=ele)<EOL>if con:<EOL><INDENT>controls.append(con)<EOL><DEDENT><DEDENT>return controls<EOL><DEDENT>return []<EOL>", "docstring": "Call IUIAutomationTableItemPattern::GetCurrentColumnHeaderItems.\nReturn list, a list of `Control` subclasses, the column headers associated with a table item or cell.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtableitempattern-getcurrentcolumnheaderitems", "id": "f1782:c65:m1"}
{"signature": "@property<EOL><INDENT>def RowCount(self) -> int:<DEDENT>", "body": "return self.pattern.CurrentRowCount<EOL>", "docstring": "Property RowCount.\nCall IUIAutomationGridPattern::get_CurrentRowCount.\nReturn int, the number of rows in the grid.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationgridpattern-get_currentrowcount", "id": "f1782:c50:m2"}
{"signature": "@property<EOL><INDENT>def CanMinimize(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.CurrentCanMinimize)<EOL>", "docstring": "Property CanMinimize.\nCall IUIAutomationWindowPattern::get_CurrentCanMinimize.\nReturn bool, indicates whether the window can be minimized.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationwindowpattern-get_currentismodal", "id": "f1782:c77:m3"}
{"signature": "def RightDragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = <NUM_LIT:1>, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "RightPressMouse(x1, y1, <NUM_LIT>)<EOL>MoveTo(x2, y2, moveSpeed, <NUM_LIT>)<EOL>RightReleaseMouse(waitTime)<EOL>", "docstring": "Simulate mouse right button drag from point x1, y1 drop to point x2, y2.\nx1: int.\ny1: int.\nx2: int.\ny2: int.\nmoveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.\nwaitTime: float.", "id": "f1782:m21"}
{"signature": "def EnumAndLogControlAncestors(control: Control, showAllName: bool = True) -> None:", "body": "lists = []<EOL>while control:<EOL><INDENT>lists.insert(<NUM_LIT:0>, control)<EOL>control = control.GetParentControl()<EOL><DEDENT>for i, control in enumerate(lists):<EOL><INDENT>LogControl(control, i, showAllName)<EOL><DEDENT>", "docstring": "Print and log control and its ancestors' propertyies.\ncontrol: `Control` or its subclass.\nshowAllName: bool, if False, print the first 30 characters of control.Name.", "id": "f1782:m84"}
{"signature": "def FindItemByProperty(control: '<STR_LIT>', propertyId: int, propertyValue) -> '<STR_LIT>':", "body": "ele = self.pattern.FindItemByProperty(control.Element, propertyId, propertyValue)<EOL>return Control.CreateControlFromElement(ele)<EOL>", "docstring": "Call IUIAutomationItemContainerPattern::FindItemByProperty.\ncontrol: `Control` or its subclass.\npropertyValue: COM VARIANT according to propertyId? todo.\npropertyId: int, a value in class `PropertyId`.\nReturn `Control` subclass, a control within a containing element, based on a specified property value.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationitemcontainerpattern-finditembyproperty", "id": "f1782:c52:m1"}
{"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)<EOL>", "docstring": "Return `TogglePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c86:m6"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationgridpattern", "id": "f1782:c50:m0"}
{"signature": "def RangeFromPoint(self, x: int, y: int) -> TextRange:", "body": "textRange = self.pattern.RangeFromPoint(ctypes.wintypes.POINT(x, y))<EOL>if textRange:<EOL><INDENT>return TextRange(textRange=textRange)<EOL><DEDENT>", "docstring": "Call IUIAutomationTextPattern::RangeFromPoint.\nchild: `Control` or its subclass.\nReturn `TextRange` or None, the degenerate (empty) text range nearest to the specified screen coordinates.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextpattern-rangefrompoint", "id": "f1782:c70:m6"}
{"signature": "def SetClipboardText(text: str) -> bool:", "body": "if ctypes.windll.user32.OpenClipboard(<NUM_LIT:0>):<EOL><INDENT>ctypes.windll.user32.EmptyClipboard()<EOL>textByteLen = (len(text) + <NUM_LIT:1>) * <NUM_LIT:2><EOL>hClipboardData = ctypes.windll.kernel32.GlobalAlloc(<NUM_LIT:0>, textByteLen)  <EOL>hDestText = ctypes.windll.kernel32.GlobalLock(hClipboardData)<EOL>ctypes.cdll.msvcrt.wcsncpy(ctypes.c_wchar_p(hDestText), ctypes.c_wchar_p(text), textByteLen // <NUM_LIT:2>)<EOL>ctypes.windll.kernel32.GlobalUnlock(hClipboardData)<EOL>ctypes.windll.user32.SetClipboardData(<NUM_LIT>, hClipboardData)  <EOL>ctypes.windll.user32.CloseClipboard()<EOL>return True<EOL><DEDENT>return False<EOL>", "docstring": "Return bool, True if succeed otherwise False.", "id": "f1782:m1"}
{"signature": "def GetAncestorControl(self, condition: Callable) -> '<STR_LIT>':", "body": "ancestor = self<EOL>depth = <NUM_LIT:0><EOL>while True:<EOL><INDENT>ancestor = ancestor.GetParentControl()<EOL>depth -= <NUM_LIT:1><EOL>if ancestor:<EOL><INDENT>if condition(ancestor, depth):<EOL><INDENT>return ancestor<EOL><DEDENT><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Get a ancestor control that matches the condition.\ncondition: Callable, function (control: Control, depth: int)->bool,\n           depth starts with -1 and decreses when search goes up.\nReturn `Control` subclass or None.", "id": "f1782:c78:m49"}
{"signature": "def RightReleaseMouse(waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "x, y = GetCursorPos()<EOL>screenWidth, screenHeight = GetScreenSize()<EOL>mouse_event(MouseEventFlag.RightUp | MouseEventFlag.Absolute, x * <NUM_LIT> // screenWidth, y * <NUM_LIT> // screenHeight, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>time.sleep(waitTime)<EOL>", "docstring": "Release right mouse.\nwaitTime: float.", "id": "f1782:m18"}
{"signature": "@property<EOL><INDENT>def CanMove(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.CurrentCanMove)<EOL>", "docstring": "Property CanMove.\nCall IUIAutomationTransformPattern::get_CurrentCanMove.\nReturn bool, indicates whether the element can be moved.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern-get_currentcanmove", "id": "f1782:c73:m1"}
{"signature": "@property<EOL><INDENT>def IsSelected(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.CurrentIsSelected)<EOL>", "docstring": "Property IsSelected.\nCall IUIAutomationScrollPattern::get_CurrentIsSelected.\nReturn bool, indicates whether this item is selected.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationscrollpattern-get_currentisselected", "id": "f1782:c59:m2"}
{"signature": "@property<EOL><INDENT>def IsKeyboardFocusable(self) -> bool:<DEDENT>", "body": "return self.Element.CurrentIsKeyboardFocusable<EOL>", "docstring": "Property IsKeyboardFocusable.\nCall IUIAutomationElement::get_CurrentIsKeyboardFocusable.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentiskeyboardfocusable", "id": "f1782:c78:m26"}
{"signature": "@property<EOL><INDENT>def IsOffscreen(self) -> bool:<DEDENT>", "body": "return self.Element.CurrentIsOffscreen<EOL>", "docstring": "Property IsOffscreen.\nCall IUIAutomationElement::get_CurrentIsOffscreen.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentisoffscreen", "id": "f1782:c78:m27"}
{"signature": "@property<EOL><INDENT>def Target(self) -> '<STR_LIT>':<DEDENT>", "body": "ele = self.pattern.CurrentTarget<EOL>return Control.CreateControlFromElement(ele)<EOL>", "docstring": "Property Target.\nCall IUIAutomationAnnotationPattern::get_CurrentTarget.\nReturn `Control` subclass, the element that is being annotated.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationannotationpattern-get_currenttarget", "id": "f1782:c43:m5"}
{"signature": "def AddSearchProperties(self, **searchProperties) -> None:", "body": "self.searchProperties.update(searchProperties)<EOL>if '<STR_LIT>' in searchProperties:<EOL><INDENT>self.searchDepth = searchProperties['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in searchProperties:<EOL><INDENT>regName = searchProperties['<STR_LIT>']<EOL>self.regexName = re.compile(regName) if regName else None<EOL><DEDENT>", "docstring": "Add search properties using `dict.update`.\nsearchProperties: dict, same as searchProperties in `Control.__init__`.", "id": "f1782:c78:m6"}
{"signature": "def SwitchToThisWindow(handle: int) -> None:", "body": "ctypes.windll.user32.SwitchToThisWindow(ctypes.c_void_p(handle), <NUM_LIT:1>)<EOL>", "docstring": "SwitchToThisWindow from Win32.\nhandle: int, the handle of a native window.", "id": "f1782:m29"}
{"signature": "def IsUserAnAdmin() -> bool:", "body": "return bool(ctypes.windll.shell32.IsUserAnAdmin())<EOL>", "docstring": "IsUserAnAdmin from Win32.\nReturn bool.\nMinimum supported OS: Windows XP, Windows Server 2003", "id": "f1782:m51"}
{"signature": "def Realize(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Realize() == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationVirtualizedItemPattern::Realize.\nCreate a full UI Automation element for a virtualized item.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationvirtualizeditempattern-realize", "id": "f1782:c76:m1"}
{"signature": "def SendKey(self, key: int, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "self.SetFocus()<EOL>SendKey(key, waitTime)<EOL>", "docstring": "Make control have focus first and type a key.\n`self.SetFocus` may not work for some controls, you may need to click it to make it have focus.\nkey: int, a key code value in class Keys.\nwaitTime: float.", "id": "f1782:c78:m75"}
{"signature": "def GetInvokePattern(self) -> InvokePattern:", "body": "return self.GetPattern(PatternId.InvokePattern)<EOL>", "docstring": "Return `InvokePattern` if it supports the pattern else None(Must support according to MSDN).", "id": "f1782:c92:m1"}
{"signature": "def SetActive(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "if self.IsTopLevel():<EOL><INDENT>handle = self.NativeWindowHandle<EOL>if IsIconic(handle):<EOL><INDENT>ret = ShowWindow(handle, SW.Restore)<EOL><DEDENT>elif not IsWindowVisible(handle):<EOL><INDENT>ret = ShowWindow(handle, SW.Show)<EOL><DEDENT>ret = SetForegroundWindow(handle)  <EOL>time.sleep(waitTime)<EOL>return ret<EOL><DEDENT>return False<EOL>", "docstring": "Set top level window active.", "id": "f1782:c99:m9"}
{"signature": "def SetPixelColor(self, x: int, y: int, argb: int) -> bool:", "body": "return _DllClient.instance().dll.BitmapSetPixel(self._bitmap, x, y, argb)<EOL>", "docstring": "Set color value of a pixel.\nx: int.\ny: int.\nargb: int, color value.\nReturn bool, True if succeed otherwise False.", "id": "f1782:c42:m11"}
{"signature": "def GetTableItemPattern(self) -> TableItemPattern:", "body": "return self.GetPattern(PatternId.TableItemPattern)<EOL>", "docstring": "Return `TableItemPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c113:m2"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationrangevaluepattern", "id": "f1782:c56:m0"}
{"signature": "def Click(self, x: int = None, y: int = None, ratioX: float = <NUM_LIT:0.5>, ratioY: float = <NUM_LIT:0.5>, simulateMove: bool = True, waitTime: float = OPERATION_WAIT_TIME) -> None:", "body": "point = self.MoveCursorToInnerPos(x, y, ratioX, ratioY, simulateMove)<EOL>if point:<EOL><INDENT>Click(point[<NUM_LIT:0>], point[<NUM_LIT:1>], waitTime)<EOL><DEDENT>", "docstring": "x: int, if < 0, click self.BoundingRectangle.right + x, if not None, ignore ratioX.\ny: int, if < 0, click self.BoundingRectangle.bottom + y, if not None, ignore ratioY.\nratioX: float.\nratioY: float.\nsimulateMove: bool, if True, first move cursor to control smoothly.\nwaitTime: float.\n\nClick(), Click(ratioX=0.5, ratioY=0.5): click center.\nClick(10, 10): click left+10, top+10.\nClick(-10, -10): click right-10, bottom-10.", "id": "f1782:c78:m63"}
{"signature": "@property<EOL><INDENT>def CanSelectMultiple(self) -> bool:<DEDENT>", "body": "return bool(self.pattern.CurrentCanSelectMultiple)<EOL>", "docstring": "Property CanSelectMultiple.\nCall IUIAutomationSelectionPattern::get_CurrentCanSelectMultiple.\nReturn bool, indicates whether more than one item in the container can be selected at one time.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationselectionpattern-get_currentcanselectmultiple", "id": "f1782:c60:m1"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationselectionpattern", "id": "f1782:c60:m0"}
{"signature": "@property<EOL><INDENT>def Element(self):<DEDENT>", "body": "if not self._element:<EOL><INDENT>self.Refind(maxSearchSeconds=TIME_OUT_SECOND, searchIntervalSeconds=self.searchWaitTime)<EOL><DEDENT>return self._element<EOL>", "docstring": "Property Element.\nReturn `ctypes.POINTER(IUIAutomationElement)`.", "id": "f1782:c78:m45"}
{"signature": "@staticmethod<EOL><INDENT>def ColorfullyLog(log: str = '<STR_LIT>', consoleColor: int = -<NUM_LIT:1>, writeToFile: bool = True, printToStdout: bool = True, logFile: str = None) -> None:<DEDENT>", "body": "t = datetime.datetime.now()<EOL>frame = sys._getframe(<NUM_LIT:1>)<EOL>log = '<STR_LIT>'.format(t.year, t.month, t.day,<EOL>t.hour, t.minute, t.second, t.microsecond // <NUM_LIT:1000>, frame.f_code.co_name, frame.f_lineno, log)<EOL>Logger.ColorfullyWrite(log, consoleColor, writeToFile, printToStdout, logFile)<EOL>", "docstring": "log: any type.\nconsoleColor: int, a value in class ConsoleColor, such as ConsoleColor.DarkGreen.\nwriteToFile: bool.\nprintToStdout: bool.\nlogFile: str, log file path.\n\nColorfullyLog('Hello <Color=Green>Green</Color> !!!'), color name must be in Logger.ColorNames", "id": "f1782:c41:m6"}
{"signature": "def BringWindowToTop(handle: int) -> bool:", "body": "return bool(ctypes.windll.user32.BringWindowToTop(ctypes.c_void_p(handle)))<EOL>", "docstring": "BringWindowToTop from Win32.\nhandle: int, the handle of a native window.\nReturn bool, True if succeed otherwise False.", "id": "f1782:m28"}
{"signature": "@property<EOL><INDENT>def ControlType(self) -> int:<DEDENT>", "body": "return self.Element.CurrentControlType<EOL>", "docstring": "Property ControlType.\nReturn int, a value in class `ControlType`.\nCall IUIAutomationElement::get_CurrentControlType.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentcontroltype", "id": "f1782:c78:m17"}
{"signature": "def Clone(self) -> '<STR_LIT>':", "body": "return TextRange(textRange=self.textRange.Clone())<EOL>", "docstring": "Call IUIAutomationTextRange::Clone.\nreturn `TextRange`, identical to the original and inheriting all properties of the original.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextrange-clone", "id": "f1782:c67:m2"}
{"signature": "def GetScrollPattern(self) -> ScrollPattern:", "body": "return self.GetPattern(PatternId.ScrollPattern)<EOL>", "docstring": "Return `ScrollPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c94:m3"}
{"signature": "def GetTextPattern(self) -> TextPattern:", "body": "return self.GetPattern(PatternId.TextPattern)<EOL>", "docstring": "Return `TextPattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c113:m3"}
{"signature": "def GetTogglePattern(self) -> TogglePattern:", "body": "return self.GetPattern(PatternId.TogglePattern)<EOL>", "docstring": "Return `TogglePattern` if it supports the pattern else None(Conditional support according to MSDN).", "id": "f1782:c98:m4"}
{"signature": "@property<EOL><INDENT>def FrameworkId(self) -> str:<DEDENT>", "body": "return self.Element.CurrentFrameworkId<EOL>", "docstring": "Property FrameworkId.\nCall IUIAutomationElement::get_CurrentFrameworkId.\nReturn str, such as Win32, WPF...\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentframeworkid", "id": "f1782:c78:m19"}
{"signature": "@property<EOL><INDENT>def Name(self) -> str:<DEDENT>", "body": "return self.Element.CurrentName or '<STR_LIT>'<EOL>", "docstring": "Property Name.\nCall IUIAutomationElement::get_CurrentName.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentname", "id": "f1782:c78:m33"}
{"signature": "def GetDoubleClickTime() -> int:", "body": "return ctypes.windll.user32.GetDoubleClickTime()<EOL>", "docstring": "GetDoubleClickTime from Win32.\nReturn int, in milliseconds.", "id": "f1782:m7"}
{"signature": "def SetDockPosition(self, dockPosition: int, waitTime: float = OPERATION_WAIT_TIME) -> int:", "body": "ret = self.pattern.SetDockPosition(dockPosition)<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationDockPattern::SetDockPosition.\ndockPosition: int, a value in class `DockPosition`.\nwaitTime: float.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationdockpattern-setdockposition", "id": "f1782:c45:m2"}
{"signature": "def __init__(self, pattern=None):", "body": "self.pattern = pattern<EOL>", "docstring": "Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nn-uiautomationclient-iuiautomationscrollpattern", "id": "f1782:c58:m0"}
{"signature": "@property<EOL><INDENT>def Culture(self) -> int:<DEDENT>", "body": "return self.Element.CurrentCulture<EOL>", "docstring": "Property Culture.\nCall IUIAutomationElement::get_CurrentCulture.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationelement-get_currentculture", "id": "f1782:c78:m18"}
{"signature": "def Collapse(self, waitTime: float = OPERATION_WAIT_TIME) -> bool:", "body": "ret = self.pattern.Collapse() == S_OK<EOL>time.sleep(waitTime)<EOL>return ret<EOL>", "docstring": "Call IUIAutomationExpandCollapsePattern::Collapse.\nwaitTime: float.\nReturn bool, True if succeed otherwise False.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationexpandcollapsepattern-collapse", "id": "f1782:c48:m2"}
{"signature": "@property<EOL><INDENT>def TextContainer(self) -> '<STR_LIT>':<DEDENT>", "body": "return Control.CreateControlFromElement(self.pattern.TextContainer)<EOL>", "docstring": "Property TextContainer.\nCall IUIAutomationSelectionContainer::get_TextContainer.\nReturn `Control` subclass, the nearest ancestor element that supports the Text control pattern.\nRefer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtextchildpattern-get_textcontainer", "id": "f1782:c68:m1"}
{"signature": "def format(self, record):", "body": "data = record._raw.copy()<EOL>data['<STR_LIT:time>'] = data['<STR_LIT:time>'].isoformat()<EOL>if data.get('<STR_LIT>'):<EOL><INDENT>data['<STR_LIT>'] = self.formatException(data['<STR_LIT>'])<EOL><DEDENT>return json.dumps(data)<EOL>", "docstring": "JSON-encode a record for serializing through redis.\n\nConvert date to iso format, and stringify any exceptions.", "id": "f1786:c0:m0"}
{"signature": "def emit(self, record):", "body": "try:<EOL><INDENT>if self.max_messages:<EOL><INDENT>p = self.redis_client.pipeline()<EOL>p.rpush(self.key, self.format(record))<EOL>p.ltrim(self.key, -self.max_messages, -<NUM_LIT:1>)<EOL>p.execute()<EOL><DEDENT>else:<EOL><INDENT>self.redis_client.rpush(self.key, self.format(record))<EOL><DEDENT><DEDENT>except redis.RedisError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Publish record to redis logging list", "id": "f1786:c2:m2"}
{"signature": "def _getCallingContext():", "body": "frames = inspect.stack()<EOL>if len(frames) > <NUM_LIT:4>:<EOL><INDENT>context = frames[<NUM_LIT:5>]<EOL><DEDENT>else:<EOL><INDENT>context = frames[<NUM_LIT:0>]<EOL><DEDENT>modname = context[<NUM_LIT:1>]<EOL>lineno = context[<NUM_LIT:2>]<EOL>if context[<NUM_LIT:3>]:<EOL><INDENT>funcname = context[<NUM_LIT:3>]<EOL><DEDENT>else:<EOL><INDENT>funcname = \"<STR_LIT>\"<EOL><DEDENT>del context<EOL>del frames<EOL>return modname, funcname, lineno<EOL>", "docstring": "Utility function for the RedisLogRecord.\n\nReturns the module, function, and lineno of the function \nthat called the logger.  \n\nWe look way up in the stack.  The stack at this point is:\n[0] logger.py _getCallingContext (hey, that's me!)\n[1] logger.py __init__\n[2] logger.py makeRecord\n[3] _log\n[4] <logging method>\n[5] caller of logging method", "id": "f1787:m1"}
{"signature": "def _unicode(string):", "body": "for encoding in ['<STR_LIT:utf-8>', '<STR_LIT>']:<EOL><INDENT>try:<EOL><INDENT>result = unicode(string, encoding)<EOL>return result<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>result = unicode(string, '<STR_LIT:utf-8>', '<STR_LIT:replace>')<EOL>return result<EOL>", "docstring": "Try to convert a string to unicode using different encodings", "id": "f1791:m2"}
{"signature": "def request(key, features, query, timeout=<NUM_LIT:5>):", "body": "data = {}<EOL>data['<STR_LIT:key>'] = key<EOL>data['<STR_LIT>'] = '<STR_LIT:/>'.join([f for f in features if f in FEATURES])<EOL>data['<STR_LIT>'] = quote(query)<EOL>data['<STR_LIT>'] = '<STR_LIT>'<EOL>r = requests.get(API_URL.format(**data), timeout=timeout)<EOL>results = json.loads(_unicode(r.content))<EOL>return results<EOL>", "docstring": "Make an API request\n\n    :param string key: API key to use\n    :param list features: features to request. It must be a subset of :data:`FEATURES`\n    :param string query: query to send\n    :param integer timeout: timeout of the request\n    :returns: result of the API request\n    :rtype: dict", "id": "f1791:m1"}
{"signature": "def satisfied_by_checked(self, req):", "body": "req_man = RequirementsManager([req])<EOL>return any(req_man.check(*checked) for checked in self.checked)<EOL>", "docstring": "Check if requirement is already satisfied by what was previously checked\n\n:param Requirement req: Requirement to check", "id": "f1794:c2:m8"}
{"signature": "def update_requirements(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Update/persist requirements from `self.bumps`", "id": "f1794:c4:m7"}
{"signature": "@classmethod<EOL><INDENT>def parse(cls, s, required=False):<DEDENT>", "body": "req = pkg_resources.Requirement.parse(s)<EOL>return cls(req, required=required)<EOL>", "docstring": "Parse string to create an instance\n\n:param str s: String with requirement to parse\n:param bool required: Is this requirement required to be fulfilled? If not, then it is a filter.", "id": "f1794:c1:m1"}
{"signature": "@classmethod<EOL><INDENT>def bump_message(self, bumps, include_changes=False):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Compose a bump message for the given bumps\n\n:param list bumps: List of :class:`Bump` instances\n:param bool include_changes: Indicate if the message should include detailed changes.", "id": "f1794:c4:m5"}
{"signature": "def reverse(self):", "body": "if self._original_target_content:<EOL><INDENT>with open(self.target, '<STR_LIT:w>') as fp:<EOL><INDENT>fp.write(self._original_target_content)<EOL><DEDENT><DEDENT>", "docstring": "Restore content in target file to be before any changes", "id": "f1794:c4:m16"}
{"signature": "def requirements(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Return a list of existing requirements (as :class:`pkg_resources.Requirement`)", "id": "f1794:c4:m6"}
{"signature": "def bump(self, filter_requirements, required=False, show_summary=True, show_detail=False, **kwargs):", "body": "found_targets = [target for target in self.targets if os.path.exists(target)]<EOL>if not found_targets:<EOL><INDENT>raise BumpAccident('<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join(self.targets))<EOL><DEDENT>bump_reqs = RequirementsManager()<EOL>if filter_requirements:<EOL><INDENT>requirements = parse_requirements(filter_requirements)<EOL>bump_reqs.add(requirements, required=required)<EOL><DEDENT>try:<EOL><INDENT>for target in found_targets:<EOL><INDENT>log.debug('<STR_LIT>', target)<EOL>target_bumpers = []<EOL>target_bump_reqs = RequirementsManager(bump_reqs)<EOL>loops = <NUM_LIT:0><EOL>while True:<EOL><INDENT>loops += <NUM_LIT:1><EOL>if loops > <NUM_LIT:5>:<EOL><INDENT>log.debug('<STR_LIT>')<EOL>break<EOL><DEDENT>if not target_bumpers:<EOL><INDENT>target_bumpers = [model(target, detail=self.detail, test_drive=self.test_drive)<EOL>for model in self.bumper_models if model.likes(target)]<EOL>if not target_bumpers:<EOL><INDENT>log.debug('<STR_LIT>', target, self.default_model)<EOL>target_bumpers = [self.default_model(target, detail=self.detail,<EOL>test_drive=self.test_drive)]<EOL><DEDENT>self.bumpers.extend(target_bumpers)<EOL><DEDENT>new_target_bump_reqs = RequirementsManager()<EOL>for bumper in target_bumpers:<EOL><INDENT>target_bumps = bumper.bump(target_bump_reqs)<EOL>self.bumps.update(dict((b.name, b) for b in target_bumps))<EOL>for bump in target_bumps:<EOL><INDENT>for new_req in bump.requirements:<EOL><INDENT>if not (bump_reqs.satisfied_by_checked(new_req) or<EOL>target_bump_reqs.satisfied_by_checked(new_req)):<EOL><INDENT>new_target_bump_reqs.add(new_req)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>bump_reqs.matched_name |= target_bump_reqs.matched_name<EOL>bump_reqs.checked.extend(target_bump_reqs.checked)<EOL>if new_target_bump_reqs:<EOL><INDENT>bump_reqs.add(new_target_bump_reqs)<EOL><DEDENT>target_bump_reqs = RequirementsManager(list(<EOL>r for r in new_target_bump_reqs if r.project_name not in self.bumps))<EOL>if not target_bump_reqs:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>if not self.bumpers:<EOL><INDENT>raise BumpAccident('<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join(found_targets))<EOL><DEDENT>if bump_reqs and not bump_reqs.matched_name:<EOL><INDENT>raise BumpAccident('<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join(found_targets))<EOL><DEDENT>if self.bumps:<EOL><INDENT>for bump in self.bumps.values():<EOL><INDENT>bump_reqs.check(bump)<EOL><DEDENT>for reqs in bump_reqs.required_requirements().values():<EOL><INDENT>for req in reqs:<EOL><INDENT>if not self.full_throttle:<EOL><INDENT>use_force = '<STR_LIT>' if req.required_by else '<STR_LIT>'<EOL>raise BumpAccident('<STR_LIT>'<EOL>'<STR_LIT>' % (req, use_force))<EOL><DEDENT><DEDENT><DEDENT>if self.test_drive:<EOL><INDENT>log.info(\"<STR_LIT>\")<EOL><DEDENT>messages = {}<EOL>for bumper in self.bumpers:<EOL><INDENT>if bumper.bumps:<EOL><INDENT>if not self.test_drive:<EOL><INDENT>bumper.update_requirements()<EOL><DEDENT>if self.test_drive or show_summary:<EOL><INDENT>msg = bumper.bump_message(self.test_drive or show_detail)<EOL>if self.test_drive:<EOL><INDENT>print(msg)<EOL><DEDENT>else:<EOL><INDENT>rewords = [('<STR_LIT>', '<STR_LIT>'), ('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>')]<EOL>for word, new_word in rewords:<EOL><INDENT>if msg.startswith(word):<EOL><INDENT>msg = msg.replace(word, new_word, <NUM_LIT:1>)<EOL>break<EOL><DEDENT><DEDENT>log.info(msg)<EOL><DEDENT><DEDENT>messages[bumper.target] = bumper.bump_message(True)<EOL><DEDENT><DEDENT>return messages, self.bumps<EOL><DEDENT>else:<EOL><INDENT>log.info('<STR_LIT>')<EOL>return {}, []<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>if not self.test_drive and self.bumps:<EOL><INDENT>map(lambda b: b.reverse(), self.bumpers)<EOL><DEDENT>raise<EOL><DEDENT>", "docstring": "Bump dependency requirements using filter.\n\n:param list filter_requirements: List of dependency filter requirements.\n:param bool required: Require the filter_requirements to be met (by adding if possible).\n:param bool show_summary: Show summary for each bump made.\n:param bool show_detail: Show detail for each bump made if available.\n:return: Tuple with two elements: Dict of target file to bump message, List of :class:`Bump`\n:raise BumpAccident: for any bump errors", "id": "f1795:c0:m1"}
{"signature": "def reverse(self):", "body": "if not self.test_drive and self.bumps:<EOL><INDENT>map(lambda b: b.reverse(), self.bumpers)<EOL><DEDENT>", "docstring": "Reverse all bumpers", "id": "f1795:c0:m2"}
{"signature": "def bump():", "body": "parser = argparse.ArgumentParser(description=bump.__doc__)<EOL>parser.add_argument('<STR_LIT>', nargs='<STR_LIT:*>', help=\"\"\"<STR_LIT>\"\"\")<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>', help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', action='<STR_LIT:store_true>', help='<STR_LIT>')<EOL>args = parser.parse_args()<EOL>targets = [args.file] if args.file else ['<STR_LIT>', '<STR_LIT>']<EOL>level = logging.DEBUG if args.debug else logging.INFO<EOL>logging.basicConfig(level=level, format='<STR_LIT>')<EOL>try:<EOL><INDENT>bumper = BumperDriver(targets, full_throttle=args.force, detail=args.detail, test_drive=args.dry_run)<EOL>bumper.bump(args.names, required=args.add, show_detail=args.detail)<EOL><DEDENT>except Exception as e:<EOL><INDENT>if args.debug:<EOL><INDENT>raise<EOL><DEDENT>else:<EOL><INDENT>log.error(e)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>", "docstring": "CLI entry point to bump requirements in requirements.txt or pinned.txt", "id": "f1795:m0"}
{"signature": "def freeze(self):", "body": "data = super(IndexBuilder, self).freeze()<EOL>try:<EOL><INDENT>base_file_names = data['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>base_file_names = data['<STR_LIT>']<EOL><DEDENT>store = {}<EOL>c = itertools.count()<EOL>for prefix, items in iteritems(data['<STR_LIT>']):<EOL><INDENT>for name, (index, typeindex, _, shortanchor) in iteritems(items):<EOL><INDENT>objtype = data['<STR_LIT>'][typeindex]<EOL>if objtype.startswith('<STR_LIT>'):<EOL><INDENT>split =  name.rsplit('<STR_LIT>', <NUM_LIT:1>)<EOL>if len(split) != <NUM_LIT:2>:<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % str((prefix, name, objtype)))<EOL>continue<EOL><DEDENT>prefix, name = split<EOL>last_prefix = prefix.split('<STR_LIT>')[-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>last_prefix = prefix.split('<STR_LIT:.>')[-<NUM_LIT:1>]<EOL><DEDENT>store[next(c)] = {<EOL>'<STR_LIT:filename>': base_file_names[index],<EOL>'<STR_LIT>': objtype,<EOL>'<STR_LIT>': prefix,<EOL>'<STR_LIT>': last_prefix,<EOL>'<STR_LIT:name>': name,<EOL>'<STR_LIT>': shortanchor,<EOL>}<EOL><DEDENT><DEDENT>data.update({'<STR_LIT:store>': store})<EOL>return data<EOL>", "docstring": "Create a usable data structure for serializing.", "id": "f1803:c0:m0"}
{"signature": "def _word_ngrams(self, tokens):", "body": "<EOL>if self.stop_words is not None:<EOL><INDENT>tokens = [w for w in tokens if w not in self.stop_words]<EOL><DEDENT>min_n, max_n = self.ngram_range<EOL>if max_n != <NUM_LIT:1>:<EOL><INDENT>original_tokens = tokens<EOL>if min_n == <NUM_LIT:1>:<EOL><INDENT>tokens = list(original_tokens)<EOL>min_n += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>tokens = []<EOL><DEDENT>n_original_tokens = len(original_tokens)<EOL>tokens_append = tokens.append<EOL>space_join = \"<STR_LIT:U+0020>\".join<EOL>for n in range(min_n,<EOL>min(max_n + <NUM_LIT:1>, n_original_tokens + <NUM_LIT:1>)):<EOL><INDENT>for i in range(n_original_tokens - n + <NUM_LIT:1>):<EOL><INDENT>tokens_append(space_join(original_tokens[i: i + n]))<EOL><DEDENT><DEDENT><DEDENT>return tokens<EOL>", "docstring": "Turn tokens into a tokens of n-grams\n\nref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L124-L153", "id": "f1806:c0:m1"}
{"signature": "def _document_frequency(X):", "body": "if sp.isspmatrix_csr(X):<EOL><INDENT>return np.bincount(X.indices, minlength=X.shape[<NUM_LIT:1>])<EOL><DEDENT>return np.diff(sp.csc_matrix(X, copy=False).indptr)<EOL>", "docstring": "Count the number of non-zero values for each feature in sparse X.", "id": "f1806:m2"}
{"signature": "def _check_stop_list(stop):", "body": "if stop == \"<STR_LIT>\":<EOL><INDENT>return THAI_STOP_WORDS<EOL><DEDENT>elif isinstance(stop, six.string_types):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % stop)<EOL><DEDENT>elif stop is None:<EOL><INDENT>return None<EOL><DEDENT>return frozenset(stop)<EOL>", "docstring": "Check stop words list\nref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95", "id": "f1806:m3"}
{"signature": "def evaluate(best_processed_path, model):", "body": "x_test_char, x_test_type, y_test = prepare_feature(best_processed_path, option='<STR_LIT:test>')<EOL>y_predict = model.predict([x_test_char, x_test_type])<EOL>y_predict = (y_predict.ravel() > <NUM_LIT:0.5>).astype(int)<EOL>f1score = f1_score(y_test, y_predict)<EOL>precision = precision_score(y_test, y_predict)<EOL>recall = recall_score(y_test, y_predict)<EOL>return f1score, precision, recall<EOL>", "docstring": "Evaluate model on splitted 10 percent testing set", "id": "f1807:m5"}
{"signature": "def generate_best_dataset(best_path, output_path='<STR_LIT>', create_val=False):", "body": "if not os.path.isdir(output_path):<EOL><INDENT>os.mkdir(output_path)<EOL><DEDENT>if not os.path.isdir(os.path.join(output_path, '<STR_LIT:train>')):<EOL><INDENT>os.makedirs(os.path.join(output_path, '<STR_LIT:train>'))<EOL><DEDENT>if not os.path.isdir(os.path.join(output_path, '<STR_LIT:test>')):<EOL><INDENT>os.makedirs(os.path.join(output_path, '<STR_LIT:test>'))<EOL><DEDENT>if not os.path.isdir(os.path.join(output_path, '<STR_LIT>')) and create_val:<EOL><INDENT>os.makedirs(os.path.join(output_path, '<STR_LIT>'))<EOL><DEDENT>for article_type in article_types:<EOL><INDENT>files = glob(os.path.join(best_path, article_type, '<STR_LIT>'))<EOL>files_train, files_test = train_test_split(files, random_state=<NUM_LIT:0>, test_size=<NUM_LIT:0.1>)<EOL>if create_val:<EOL><INDENT>files_train, files_val = train_test_split(files_train, random_state=<NUM_LIT:0>, test_size=<NUM_LIT:0.1>)<EOL>val_words = generate_words(files_val)<EOL>val_df = create_char_dataframe(val_words)<EOL>val_df.to_csv(os.path.join(output_path, '<STR_LIT>', '<STR_LIT>'.format(article_type)), index=False)<EOL><DEDENT>train_words = generate_words(files_train)<EOL>test_words = generate_words(files_test)<EOL>train_df = create_char_dataframe(train_words)<EOL>test_df = create_char_dataframe(test_words)<EOL>train_df.to_csv(os.path.join(output_path, '<STR_LIT:train>', '<STR_LIT>'.format(article_type)), index=False)<EOL>test_df.to_csv(os.path.join(output_path, '<STR_LIT:test>', '<STR_LIT>'.format(article_type)), index=False)<EOL>print(\"<STR_LIT>\".format(article_type))<EOL><DEDENT>", "docstring": "Generate CSV file for training and testing data\n\nInput\n=====\nbest_path: str, path to BEST folder which contains unzipped subfolder\n    'article', 'encyclopedia', 'news', 'novel'\n\ncleaned_data: str, path to output folder, the cleaned data will be saved\n    in the given folder name where training set will be stored in `train` folder\n    and testing set will be stored on `test` folder\n\ncreate_val: boolean, True or False, if True, divide training set into training set and\n    validation set in `val` folder", "id": "f1807:m2"}
{"signature": "def prepare_feature(best_processed_path, option='<STR_LIT:train>'):", "body": "<EOL>n_pad = <NUM_LIT><EOL>n_pad_2 = int((n_pad - <NUM_LIT:1>)/<NUM_LIT:2>)<EOL>pad = [{'<STR_LIT>': '<STR_LIT:U+0020>', '<STR_LIT:type>': '<STR_LIT:p>', '<STR_LIT:target>': True}]<EOL>df_pad = pd.DataFrame(pad * n_pad_2)<EOL>df = []<EOL>for article_type in article_types:<EOL><INDENT>df.append(pd.read_csv(os.path.join(best_processed_path, option, '<STR_LIT>'.format(article_type, option))))<EOL><DEDENT>df = pd.concat(df)<EOL>df = pd.concat((df_pad, df, df_pad)) <EOL>df['<STR_LIT>'] = df['<STR_LIT>'].map(lambda x: CHARS_MAP.get(x, <NUM_LIT>))<EOL>df['<STR_LIT:type>'] = df['<STR_LIT:type>'].map(lambda x: CHAR_TYPES_MAP.get(x, <NUM_LIT:4>))<EOL>df_pad = create_n_gram_df(df, n_pad=n_pad)<EOL>char_row = ['<STR_LIT>' + str(i + <NUM_LIT:1>) for i in range(n_pad_2)] +['<STR_LIT>' + str(i + <NUM_LIT:1>) for i in range(n_pad_2)] + ['<STR_LIT>']<EOL>type_row = ['<STR_LIT:type>' + str(i + <NUM_LIT:1>) for i in range(n_pad_2)] +['<STR_LIT>' + str(i + <NUM_LIT:1>) for i in range(n_pad_2)] + ['<STR_LIT:type>']<EOL>x_char = df_pad[char_row].as_matrix()<EOL>x_type = df_pad[type_row].as_matrix()<EOL>y = df_pad['<STR_LIT:target>'].astype(int).as_matrix()<EOL>return x_char, x_type, y<EOL>", "docstring": "Transform processed path into feature matrix and output array\n\nInput\n=====\nbest_processed_path: str, path to processed BEST dataset\n\noption: str, 'train' or 'test'", "id": "f1807:m3"}
{"signature": "def create_n_gram_df(df, n_pad):", "body": "n_pad_2 = int((n_pad - <NUM_LIT:1>)/<NUM_LIT:2>)<EOL>for i in range(n_pad_2):<EOL><INDENT>df['<STR_LIT>'.format(i+<NUM_LIT:1>)] = df['<STR_LIT>'].shift(i + <NUM_LIT:1>)<EOL>df['<STR_LIT>'.format(i+<NUM_LIT:1>)] = df['<STR_LIT:type>'].shift(i + <NUM_LIT:1>)<EOL>df['<STR_LIT>'.format(i+<NUM_LIT:1>)] = df['<STR_LIT>'].shift(-i - <NUM_LIT:1>)<EOL>df['<STR_LIT>'.format(i+<NUM_LIT:1>)] = df['<STR_LIT:type>'].shift(-i - <NUM_LIT:1>)<EOL><DEDENT>return df[n_pad_2: -n_pad_2]<EOL>", "docstring": "Given input dataframe, create feature dataframe of shifted characters", "id": "f1809:m1"}
{"signature": "@blog.command()<EOL>def update():", "body": "run('<STR_LIT>')<EOL>run('<STR_LIT>')<EOL>", "docstring": "Gets other people's changes from GitHub", "id": "f1826:m0"}
{"signature": "@blog.command()<EOL>@click.pass_context<EOL>def lint(context):", "body": "config = context.obj<EOL>try:<EOL><INDENT>run('<STR_LIT>'.format(<EOL>dir=config['<STR_LIT>'],<EOL>exclude='<STR_LIT:U+002C>'.join(EXCLUDE),<EOL>))<EOL><DEDENT>except SubprocessError:<EOL><INDENT>context.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Looks for errors in source code of your blog", "id": "f1827:m0"}
{"signature": "@blog.command()<EOL>@click.pass_context<EOL>def preview(context):", "body": "config = context.obj<EOL>pelican(config, '<STR_LIT>', '<STR_LIT>')<EOL>server_proc = None<EOL>os.chdir(config['<STR_LIT>'])<EOL>try:<EOL><INDENT>try:<EOL><INDENT>command = '<STR_LIT>' + str(PORT)<EOL>server_proc = run(command, bg=True)<EOL>time.sleep(<NUM_LIT:3>)<EOL>click.launch('<STR_LIT>')<EOL>time.sleep(<NUM_LIT:5>)<EOL>pelican(config, '<STR_LIT>')<EOL><DEDENT>except Exception:<EOL><INDENT>if server_proc is not None:<EOL><INDENT>server_proc.kill()<EOL><DEDENT>raise<EOL><DEDENT><DEDENT>except KeyboardInterrupt:<EOL><INDENT>abort(context)<EOL><DEDENT>", "docstring": "Opens local preview of your blog website", "id": "f1830:m0"}
{"signature": "def backwards(self, orm):", "body": "", "docstring": "Write your backwards methods here.", "id": "f1851:c0:m1"}
{"signature": "def forwards(self, orm):", "body": "<EOL>print(\"<STR_LIT>\")<EOL>ja_akt_stan=orm.JednostkaAdministracyjna.objects.all().aggregate(Max('<STR_LIT>'))['<STR_LIT>']<EOL>orm.JednostkaAdministracyjna.objects.filter(stan_na__exact=ja_akt_stan).update(aktywny=True)<EOL>orm.JednostkaAdministracyjna.objects.exclude(stan_na__exact=ja_akt_stan).update(aktywny=False)<EOL>print(\"<STR_LIT>\")<EOL>m_akt_stan=orm.Miejscowosc.objects.all().aggregate(Max('<STR_LIT>'))['<STR_LIT>']<EOL>orm.Miejscowosc.objects.filter(stan_na__exact=m_akt_stan).update(aktywny=True)<EOL>orm.Miejscowosc.objects.exclude(stan_na__exact=m_akt_stan).update(aktywny=False)<EOL>print(\"<STR_LIT>\")<EOL>rm_akt_stan=orm.RodzajMiejsowosci.objects.all().aggregate(Max('<STR_LIT>'))['<STR_LIT>']<EOL>orm.RodzajMiejsowosci.objects.filter(stan_na__exact=rm_akt_stan).update(aktywny=True)<EOL>orm.RodzajMiejsowosci.objects.exclude(stan_na__exact=rm_akt_stan).update(aktywny=False)<EOL>print(\"<STR_LIT>\")<EOL>u_akt_stan=orm.Ulica.objects.all().aggregate(Max('<STR_LIT>'))['<STR_LIT>']<EOL>orm.Ulica.objects.filter(stan_na__exact=u_akt_stan).update(aktywny=True)<EOL>orm.Ulica.objects.exclude(stan_na__exact=u_akt_stan).update(aktywny=False)<EOL>", "docstring": "Write your forwards methods here.", "id": "f1851:c0:m0"}
{"signature": "def forwards(self, orm):", "body": "<EOL>LEN_TYPE = {<EOL><NUM_LIT:7>: '<STR_LIT>',<EOL><NUM_LIT:4>: '<STR_LIT>',<EOL><NUM_LIT:2>: '<STR_LIT>',<EOL>}<EOL>for ja in orm.JednostkaAdministracyjna.objects.all():<EOL><INDENT>ja.typ = LEN_TYPE[len(ja.id)]<EOL>ja.save()<EOL><DEDENT>", "docstring": "Write your forwards methods here.", "id": "f1854:c0:m0"}
{"signature": "def do_filter(qs, keywords, exclude=False):", "body": "and_q = Q()<EOL>for keyword, value in iteritems(keywords):<EOL><INDENT>try:<EOL><INDENT>values = value.split(\"<STR_LIT:U+002C>\")<EOL>if len(values) > <NUM_LIT:0>:<EOL><INDENT>or_q = Q()<EOL>for value in values:<EOL><INDENT>or_q |= Q(**{keyword: value})<EOL><DEDENT>and_q &= or_q<EOL><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>and_q &= Q(**{keyword: value})<EOL><DEDENT><DEDENT>if exclude:<EOL><INDENT>qs = qs.exclude(and_q)<EOL><DEDENT>else:<EOL><INDENT>qs = qs.filter(and_q)<EOL><DEDENT>return qs<EOL>", "docstring": "Filter queryset based on keywords.\nSupport for multiple-selected parent values.", "id": "f1868:m1"}
{"signature": "@property<EOL><INDENT>def media(self):<DEDENT>", "body": "media = super(JqueryMediaMixin, self).media<EOL>js = []<EOL>if JQUERY_URL:<EOL><INDENT>js.append(JQUERY_URL)<EOL><DEDENT>elif JQUERY_URL is not False:<EOL><INDENT>vendor = '<STR_LIT>' if django.VERSION < (<NUM_LIT:1>, <NUM_LIT:9>, <NUM_LIT:0>) else '<STR_LIT>'<EOL>extra = '<STR_LIT>' if settings.DEBUG else '<STR_LIT>'<EOL>jquery_paths = [<EOL>'<STR_LIT>'.format(vendor, extra),<EOL>'<STR_LIT>',<EOL>]<EOL>if USE_DJANGO_JQUERY:<EOL><INDENT>jquery_paths = ['<STR_LIT>'.format(path) for path in jquery_paths]<EOL><DEDENT>js.extend(jquery_paths)<EOL><DEDENT>media += Media(js=js)<EOL>return media<EOL>", "docstring": "Media defined as a dynamic property instead of an inner class.", "id": "f1870:c0:m0"}
{"signature": "def _get_available_choices(self, queryset, value):", "body": "item = queryset.filter(pk=value).first()<EOL>if item:<EOL><INDENT>try:<EOL><INDENT>pk = getattr(item, self.chained_model_field + \"<STR_LIT>\")<EOL>filter = {self.chained_model_field: pk}<EOL><DEDENT>except AttributeError:<EOL><INDENT>try:  <EOL><INDENT>pks = getattr(item, self.chained_model_field).all().values_list('<STR_LIT>', flat=True)<EOL>filter = {self.chained_model_field + \"<STR_LIT>\": pks}<EOL><DEDENT>except AttributeError:<EOL><INDENT>try:  <EOL><INDENT>pks = getattr(item, self.chained_model_field + \"<STR_LIT>\").all().values_list('<STR_LIT>', flat=True)<EOL>filter = {self.chained_model_field + \"<STR_LIT>\": pks}<EOL><DEDENT>except AttributeError:  <EOL><INDENT>filter = {}<EOL><DEDENT><DEDENT><DEDENT>filtered = list(get_model(self.to_app_name, self.to_model_name).objects.filter(**filter).distinct())<EOL>if self.sort:<EOL><INDENT>sort_results(filtered)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>filtered = []<EOL><DEDENT>return filtered<EOL>", "docstring": "get possible choices for selection", "id": "f1870:c1:m3"}
{"signature": "def command_callback(result=None):", "body": "print(\"<STR_LIT>\".format(opendnp3.TaskCompletionToString(result.summary)))<EOL>result.ForeachItem(collection_callback)<EOL>", "docstring": ":type result: opendnp3.ICommandTaskResult", "id": "f1888:m2"}
{"signature": "def OnReceiveIIN(self, iin):", "body": "self.iin_field = dict(<EOL>LSB=iin.LSB,<EOL>MSB=iin.MSB<EOL>)<EOL>", "docstring": "Called when a response or unsolicited response is receive from the outstation.", "id": "f1888:c0:m1"}
{"signature": "def OnTaskComplete(self, info):", "body": "self.task_info = dict(<EOL>type=info.type,<EOL>result=info.result<EOL>)<EOL>", "docstring": "Task completion notification.", "id": "f1888:c0:m3"}
{"signature": "def OnStateChange(self, state):", "body": "self.state = state<EOL>", "docstring": "State change notification.", "id": "f1888:c1:m1"}
{"signature": "def run_master(hang=False):", "body": "logger = asiodnp3.ConsoleLogger().Create()<EOL>manager = asiodnp3.DNP3Manager(<NUM_LIT:1>, asiodnp3.ConsoleLogger().Create())<EOL>channel = manager.AddTCPClient(\"<STR_LIT>\",<EOL>FILTERS,<EOL>asiopal.ChannelRetry(),<EOL>HOST,<EOL>LOCAL,<EOL>PORT,<EOL>asiodnp3.PrintingChannelListener().Create())<EOL>stack_config = asiodnp3.MasterStackConfig()<EOL>stack_config.master.responseTimeout = openpal.TimeDuration().Seconds(<NUM_LIT:2>)<EOL>stack_config.link.RemoteAddr = <NUM_LIT:10><EOL>soe_handler = asiodnp3.PrintingSOEHandler().Create()<EOL>default_master_app = asiodnp3.DefaultMasterApplication().Create()<EOL>master = channel.AddMaster(\"<STR_LIT>\",<EOL>soe_handler,<EOL>default_master_app,<EOL>stack_config)<EOL>master.Enable()<EOL>time.sleep(<NUM_LIT:2>)<EOL>if not hang:<EOL><INDENT>del channel<EOL>del master<EOL><DEDENT>print(\"<STR_LIT>\".format(hang))<EOL>manager.Shutdown()<EOL>", "docstring": "Demonstrate hanging when channel and master are not deleted prior to manager.Shutdown()", "id": "f1889:m0"}
{"signature": "def do_quit(self, line):", "body": "self.application.shutdown()<EOL>exit()<EOL>", "docstring": "Quit the command-line interface. Command syntax is: quit", "id": "f1897:c0:m19"}
{"signature": "def do_chan_log_normal(self, line):", "body": "self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.NORMAL))<EOL>print('<STR_LIT>'.format(opendnp3.levels.NORMAL))<EOL>", "docstring": "Set the channel log level to NORMAL. Command syntax is: chan_log_normal", "id": "f1897:c0:m4"}
{"signature": "def do_disable_unsol(self, line):", "body": "headers = [opendnp3.Header().AllObjects(<NUM_LIT>, <NUM_LIT:2>),<EOL>opendnp3.Header().AllObjects(<NUM_LIT>, <NUM_LIT:3>),<EOL>opendnp3.Header().AllObjects(<NUM_LIT>, <NUM_LIT:4>)]<EOL>self.application.master.PerformFunction(\"<STR_LIT>\",<EOL>opendnp3.FunctionCode.DISABLE_UNSOLICITED,<EOL>headers,<EOL>opendnp3.TaskConfig().Default())<EOL>", "docstring": "Perform the function DISABLE_UNSOLICITED. Command syntax is: disable_unsol", "id": "f1897:c0:m5"}
{"signature": "def do_o1(self, line):", "body": "self.application.send_direct_operate_command(opendnp3.ControlRelayOutputBlock(opendnp3.ControlCode.LATCH_ON),<EOL><NUM_LIT:5>,<EOL>command_callback)<EOL>", "docstring": "Send a DirectOperate BinaryOutput (group 12) index 5 LATCH_ON to the Outstation. Command syntax is: o1", "id": "f1897:c0:m8"}
{"signature": "def do_scan_fast(self, line):", "body": "self.application.fast_scan.Demand()<EOL>", "docstring": "Demand an immediate fast scan. Command syntax is: scan_fast", "id": "f1897:c0:m15"}
{"signature": "def do_chan_log_all(self, line):", "body": "self.application.channel.SetLogFilters(openpal.LogFilters(opendnp3.levels.ALL_COMMS))<EOL>print('<STR_LIT>'.format(opendnp3.levels.ALL_COMMS))<EOL>", "docstring": "Set the channel log level to ALL_COMMS. Command syntax is: chan_log_all", "id": "f1897:c0:m3"}
{"signature": "def do_write_time(self, line):", "body": "millis_since_epoch = int((datetime.now() - datetime.utcfromtimestamp(<NUM_LIT:0>)).total_seconds() * <NUM_LIT>)<EOL>self.application.master.Write(opendnp3.TimeAndInterval(opendnp3.DNPTime(millis_since_epoch),<EOL><NUM_LIT:100>,<EOL>opendnp3.IntervalUnits.Seconds),<EOL><NUM_LIT:0>,                            <EOL>opendnp3.TaskConfig().Default())<EOL>", "docstring": "Write a TimeAndInterval to the Outstation. Command syntax is: write_time", "id": "f1897:c0:m18"}
{"signature": "def do_mast_log_normal(self, line):", "body": "self.application.master.SetLogFilters(openpal.LogFilters(opendnp3.levels.NORMAL))<EOL>_log.debug('<STR_LIT>'.format(opendnp3.levels.NORMAL))<EOL>", "docstring": "Set the master log level to NORMAL. Command syntax is: mast_log_normal", "id": "f1897:c0:m7"}
{"signature": "def startup(self):", "body": "print('<STR_LIT>')<EOL>self.do_menu('<STR_LIT>')<EOL>self.cmdloop('<STR_LIT>')<EOL>exit()<EOL>", "docstring": "Display the command-line interface's menu and issue a prompt.", "id": "f1897:c0:m1"}
{"signature": "def send_select_and_operate_command(self, command, index, callback=asiodnp3.PrintingCommandCallback.Get(),<EOL>config=opendnp3.TaskConfig().Default()):", "body": "self.master.SelectAndOperate(command, index, callback, config)<EOL>", "docstring": "Select and operate a single command\n\n:param command: command to operate\n:param index: index of the command\n:param callback: callback that will be invoked upon completion or failure\n:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA", "id": "f1899:c0:m3"}
{"signature": "def send_select_and_operate_command_set(self, command_set, callback=asiodnp3.PrintingCommandCallback.Get(),<EOL>config=opendnp3.TaskConfig().Default()):", "body": "self.master.SelectAndOperate(command_set, callback, config)<EOL>", "docstring": "Select and operate a set of commands\n\n:param command_set: set of command headers\n:param callback: callback that will be invoked upon completion or failure\n:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA", "id": "f1899:c0:m4"}
{"signature": "def send_direct_operate_command_set(self, command_set, callback=asiodnp3.PrintingCommandCallback.Get(),<EOL>config=opendnp3.TaskConfig().Default()):", "body": "self.master.DirectOperate(command_set, callback, config)<EOL>", "docstring": "Direct operate a set of commands\n\n:param command_set: set of command headers\n:param callback: callback that will be invoked upon completion or failure\n:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA", "id": "f1899:c0:m2"}
{"signature": "def command_callback(result=None):", "body": "print(\"<STR_LIT>\".format(opendnp3.TaskCompletionToString(result.summary)))<EOL>result.ForeachItem(collection_callback)<EOL>", "docstring": ":type result: opendnp3.ICommandTaskResult", "id": "f1899:m1"}
{"signature": "def main():", "body": "<EOL>app = MyMaster(log_handler=MyLogger(),<EOL>listener=AppChannelListener(),<EOL>soe_handler=SOEHandler(),<EOL>master_application=MasterApplication())<EOL>_log.debug('<STR_LIT>')<EOL>app.shutdown()<EOL>_log.debug('<STR_LIT>')<EOL>exit()<EOL>", "docstring": "The Master has been started from the command line. Execute ad-hoc tests if desired.", "id": "f1899:m3"}
{"signature": "def Process(self, info, values):", "body": "visitor_class_types = {<EOL>opendnp3.ICollectionIndexedBinary: VisitorIndexedBinary,<EOL>opendnp3.ICollectionIndexedDoubleBitBinary: VisitorIndexedDoubleBitBinary,<EOL>opendnp3.ICollectionIndexedCounter: VisitorIndexedCounter,<EOL>opendnp3.ICollectionIndexedFrozenCounter: VisitorIndexedFrozenCounter,<EOL>opendnp3.ICollectionIndexedAnalog: VisitorIndexedAnalog,<EOL>opendnp3.ICollectionIndexedBinaryOutputStatus: VisitorIndexedBinaryOutputStatus,<EOL>opendnp3.ICollectionIndexedAnalogOutputStatus: VisitorIndexedAnalogOutputStatus,<EOL>opendnp3.ICollectionIndexedTimeAndInterval: VisitorIndexedTimeAndInterval<EOL>}<EOL>visitor_class = visitor_class_types[type(values)]<EOL>visitor = visitor_class()<EOL>values.Foreach(visitor)<EOL>for index, value in visitor.index_and_value:<EOL><INDENT>log_string = '<STR_LIT>'<EOL>_log.debug(log_string.format(info.gv, info.headerIndex, type(values).__name__, index, value))<EOL><DEDENT>", "docstring": "Process measurement data.\n\n:param info: HeaderInfo\n:param values: A collection of values received from the Outstation (various data types are possible).", "id": "f1899:c3:m1"}
{"signature": "def apply_update(self, value, index):", "body": "_log.debug('<STR_LIT>'.format(type(value).__name__, index, value.value))<EOL>builder = asiodnp3.UpdateBuilder()<EOL>builder.Update(value, index)<EOL>update = builder.Build()<EOL>OutstationApplication.get_outstation().Apply(update)<EOL>", "docstring": "Record an opendnp3 data value (Analog, Binary, etc.) in the outstation's database.\n\n    The data value gets sent to the Master as a side-effect.\n\n:param value: An instance of Analog, Binary, or another opendnp3 data value.\n:param index: (integer) Index of the data definition in the opendnp3 database.", "id": "f1900:c0:m13"}
{"signature": "@classmethod<EOL><INDENT>def process_point_value(cls, command_type, command, index, op_type):<DEDENT>", "body": "_log.debug('<STR_LIT>'.format(index, command))<EOL>", "docstring": "A PointValue was received from the Master. Process its payload.\n\n:param command_type: (string) Either 'Select' or 'Operate'.\n:param command: A ControlRelayOutputBlock or else a wrapped data value (AnalogOutputInt16, etc.).\n:param index: (integer) DNP3 index of the payload's data definition.\n:param op_type: An OperateType, or None if command_type == 'Select'.", "id": "f1900:c0:m12"}
{"signature": "def GetApplicationIIN(self):", "body": "application_iin = opendnp3.ApplicationIIN()<EOL>application_iin.configCorrupt = False<EOL>application_iin.deviceTrouble = False<EOL>application_iin.localControl = False<EOL>application_iin.needTime = False<EOL>iin_field = application_iin.ToIIN()<EOL>_log.debug('<STR_LIT>'.format(iin_field.LSB,<EOL>iin_field.MSB))<EOL>return application_iin<EOL>", "docstring": "Return the application-controlled IIN field.", "id": "f1900:c0:m7"}
{"signature": "def do_b(self, line):", "body": "index, value_string = self.index_and_value_from_line(line)<EOL>if index and value_string:<EOL><INDENT>if value_string.lower() == '<STR_LIT:true>' or value_string.lower() == '<STR_LIT:false>':<EOL><INDENT>self.application.apply_update(opendnp3.Binary(value_string == '<STR_LIT:true>'), index)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Send the Master a BinaryInput (group 2) value. Command syntax is: 'b index true' or 'b index false", "id": "f1901:c0:m4"}
{"signature": "def do_a(self, line):", "body": "index, value_string = self.index_and_value_from_line(line)<EOL>if index and value_string:<EOL><INDENT>try:<EOL><INDENT>self.application.apply_update(opendnp3.Analog(float(value_string)), index)<EOL><DEDENT>except ValueError:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Send the Master an AnalogInput (group 32) value. Command syntax is: a index value", "id": "f1901:c0:m2"}
{"signature": "def startup(self):", "body": "print('<STR_LIT>')<EOL>self.do_menu('<STR_LIT>')<EOL>self.cmdloop('<STR_LIT>')<EOL>exit()<EOL>", "docstring": "Display the command-line interface's menu and issue a prompt.", "id": "f1901:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def index_and_value_from_line(line):<DEDENT>", "body": "try:<EOL><INDENT>index = int(line.split('<STR_LIT:U+0020>')[<NUM_LIT:0>])<EOL><DEDENT>except (ValueError, IndexError):<EOL><INDENT>print('<STR_LIT>')<EOL>index = None<EOL><DEDENT>try:<EOL><INDENT>value_string = line.split('<STR_LIT:U+0020>')[<NUM_LIT:1>]<EOL><DEDENT>except (ValueError, IndexError):<EOL><INDENT>print('<STR_LIT>')<EOL>value_string = None<EOL><DEDENT>return index, value_string<EOL>", "docstring": "Parse an index (integer) and value (string) from command line args and return them.", "id": "f1901:c0:m10"}
{"signature": "def do_a2(self, line):", "body": "self.application.apply_update(opendnp3.Analog(<NUM_LIT:2>), index=<NUM_LIT:4>)<EOL>", "docstring": "Send the Master an AnalogInput (group 32) value of 2 at index 4. Command syntax is: a2", "id": "f1901:c0:m3"}
{"signature": "def do_d(self, line):", "body": "index = self.index_from_line(line)<EOL>if index:<EOL><INDENT>self.application.apply_update(opendnp3.DoubleBitBinary(opendnp3.DoubleBit.DETERMINED_ON), index)<EOL><DEDENT>", "docstring": "Send the Master a DoubleBitBinaryInput (group 4) value of DETERMINED_ON. Command syntax is: d index", "id": "f1901:c0:m7"}
{"signature": "def __init__(self, routing, default, python_path):", "body": "def make(x):<EOL><INDENT>if isinstance(x, (list, str)):<EOL><INDENT>return ActionList(x)<EOL><DEDENT>assert isinstance(x, dict)<EOL>if '<STR_LIT>' in x or '<STR_LIT>' in x:<EOL><INDENT>x = dict(default, **x)<EOL>return construct.construct_type(x, python_path)<EOL><DEDENT>return {k: make(v) for k, v in x.items()}<EOL><DEDENT>routing = flatten.unflatten(routing)<EOL>self.routing = make(routing)<EOL>", "docstring": ":param dict routing: `routing` is a dict that maps addresses\n   to lists of actions.\n\n   The values in the input dictionary `routing` are recursively visited\n   to build the routing table:\n\n   * values that are strings or lists are used to construct ActionLists\n   * dictionaries that contain \"typename\" or \"datatype\" keys are\n     used to construct a class of that type.\n   * otherwise, dictionaries are visited recursively\n   * all other types are forbidden", "id": "f1907:c0:m0"}
{"signature": "def multi(method):", "body": "@functools.wraps(method)<EOL>def multi(self, address='<STR_LIT>'):<EOL><INDENT>values = flask.request.values<EOL>address = urllib.parse.unquote_plus(address)<EOL>if address and values and not address.endswith('<STR_LIT:.>'):<EOL><INDENT>address += '<STR_LIT:.>'<EOL><DEDENT>result = {}<EOL>for a in values or '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>if not self.project:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>ed = editor.Editor(address + a, self.project)<EOL>result[address + a] = {'<STR_LIT:value>': method(self, ed, a)}<EOL><DEDENT>except:<EOL><INDENT>if self.project:<EOL><INDENT>traceback.print_exc()<EOL><DEDENT>result[address + a] = {'<STR_LIT:error>': '<STR_LIT>' % a}<EOL><DEDENT><DEDENT>return flask.jsonify(result)<EOL><DEDENT>return multi<EOL>", "docstring": "Decorator for RestServer methods that take multiple addresses", "id": "f1908:m1"}
{"signature": "def extract(self, msg):", "body": "def normal(key):<EOL><INDENT>v = msg.get(key)<EOL>if v is None:<EOL><INDENT>return v<EOL><DEDENT>normalizer = self.normalizers.get(key, lambda x: x)<EOL>return normalizer(v)<EOL><DEDENT>def odict(keys):<EOL><INDENT>return collections.OrderedDict((k, normal(k)) for k in keys)<EOL><DEDENT>def match(m):<EOL><INDENT>return (msg.get(k) in v for k, v in m.items()) if m else ()<EOL><DEDENT>accept = all(match(self.accept))<EOL>reject = any(match(self.reject))<EOL>if reject or not accept:<EOL><INDENT>keys = ()<EOL><DEDENT>elif self.keys_by_type is None:<EOL><INDENT>keys = [k for k in msg.keys() if k not in self.omit]<EOL><DEDENT>else:<EOL><INDENT>keys = self.keys_by_type.get(msg.get('<STR_LIT:type>'))<EOL><DEDENT>return odict(keys)<EOL>", "docstring": "Yield an ordered dictionary if msg['type'] is in keys_by_type.", "id": "f1918:c0:m1"}
{"signature": "def stop(self=None):", "body": "if not self:<EOL><INDENT>instance = getattr(Runner.instance(), '<STR_LIT>', None)<EOL>self = instance and instance()<EOL>if not self:<EOL><INDENT>return<EOL><DEDENT><DEDENT>self._runner.stop()<EOL>if self.project:<EOL><INDENT>self.project.stop()<EOL>self.project = None<EOL><DEDENT>", "docstring": "Stop the builder if it's running.", "id": "f1925:c0:m2"}
{"signature": "def update(desc, other=None, **kwds):", "body": "other = other and _as_dict(other) or {}<EOL>for i in other, kwds:<EOL><INDENT>for k, v in i.items():<EOL><INDENT>if isinstance(v, dict):<EOL><INDENT>old_v = desc[k]<EOL>for k2, v2 in v.items():<EOL><INDENT>if v2 is None:<EOL><INDENT>old_v.pop(k2, None)<EOL><DEDENT>else:<EOL><INDENT>old_v[k2] = v2<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>set_one(desc, k, v)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Update sections in a Project description", "id": "f1926:m1"}
{"signature": "def clear(self):", "body": "self._desc = {}<EOL>for key, value in merge.DEFAULT_PROJECT.items():<EOL><INDENT>if key not in self._HIDDEN:<EOL><INDENT>self._desc[key] = type(value)()<EOL><DEDENT><DEDENT>", "docstring": "Clear description to default values", "id": "f1928:c0:m1"}
{"signature": "def as_dict(self):", "body": "return {k: v for k, v in self.items() if v}<EOL>", "docstring": "Returns a dictionary of non-empty description", "id": "f1928:c0:m4"}
{"signature": "def __getitem__(self, index):", "body": "index = self._check_index(index)<EOL>return self.layout.get(*index)<EOL>", "docstring": "Returns the r, g, b pixel at a location in the layout.  May only be\ncalled if self.is_running is true.", "id": "f1929:c0:m1"}
{"signature": "def clear(self):", "body": "self.desc.clear()<EOL>", "docstring": "Clear description to default values", "id": "f1930:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def instance(cls):<DEDENT>", "body": "return cls._INSTANCE and cls._INSTANCE()<EOL>", "docstring": "Return the unique instance of Runner, if any, or None", "id": "f1931:c0:m3"}
{"signature": "def stop(self):", "body": "if self.is_running:<EOL><INDENT>log.info('<STR_LIT>')<EOL>self.is_running = False<EOL>self.__class__._INSTANCE = None<EOL>try:<EOL><INDENT>self.thread and self.thread.stop()<EOL><DEDENT>except:<EOL><INDENT>log.error('<STR_LIT>')<EOL>traceback.print_exc()<EOL><DEDENT>self.thread = None<EOL>return True<EOL><DEDENT>", "docstring": "Stop the Runner if it's running.\nCalled as a classmethod, stop the running instance if any.", "id": "f1931:c0:m2"}
{"signature": "def __init__(self, *args, limit=None, **kwds):", "body": "super().__init__(*args, **kwds)<EOL>self.limit = Limit(**(limit or {}))<EOL>self._math = color_list.Math(self.color_list)<EOL>", "docstring": ":param dict limit: A construction dictionary for a Limit.", "id": "f1947:c0:m0"}
{"signature": "@property<EOL><INDENT>def _led(self):<DEDENT>", "body": "return self.layout<EOL>", "docstring": "Many BiblioPixelAnimations use the \"protected\" variable _led.", "id": "f1952:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def construct(cls, project, *, run=None, name=None, data=None, **desc):<DEDENT>", "body": "from . failed import Failed<EOL>exception = desc.pop('<STR_LIT>', None)<EOL>if exception:<EOL><INDENT>a = Failed(project.layout, desc, exception)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>a = cls(project.layout, **desc)<EOL>a._set_runner(run or {})<EOL><DEDENT>except Exception as e:<EOL><INDENT>if cls.FAIL_ON_EXCEPTION:<EOL><INDENT>raise<EOL><DEDENT>a = Failed(project.layout, desc, e)<EOL><DEDENT><DEDENT>a.name = name<EOL>a.data = data<EOL>return a<EOL>", "docstring": "Construct an animation, set the runner, and add in the two\n\"reserved fields\" `name` and `data`.", "id": "f1952:c0:m0"}
{"signature": "def __init__(self, *args, overlay=False, detach=True, **kwds):", "body": "super().__init__(*args, **kwds)<EOL>if detach:<EOL><INDENT>self.detach(overlay)<EOL><DEDENT>", "docstring": "If overlay is True, then preclear is set to False for everything\nother than the first animation.", "id": "f1954:c0:m0"}
{"signature": "def raw_opener(ip_address, port, delay=<NUM_LIT:1>):", "body": "def target():<EOL><INDENT>time.sleep(delay)<EOL>url = '<STR_LIT>' % (ip_address, port)<EOL>webbrowser.open(url, new=<NUM_LIT:0>, autoraise=True)<EOL><DEDENT>threading.Thread(target=target, daemon=True).start()<EOL>", "docstring": "Wait a little and then open a web browser page for the control panel.", "id": "f1966:m1"}
{"signature": "def _clean_animation(desc, parent):", "body": "desc = load.load_if_filename(desc) or desc<EOL>if isinstance(desc, str):<EOL><INDENT>animation = {'<STR_LIT>': desc}<EOL><DEDENT>elif not isinstance(desc, dict):<EOL><INDENT>raise TypeError('<STR_LIT>' % type(desc))<EOL><DEDENT>elif '<STR_LIT>' in desc or '<STR_LIT>' not in desc:<EOL><INDENT>animation = desc<EOL><DEDENT>else:<EOL><INDENT>animation = desc.pop('<STR_LIT>', {})<EOL>if isinstance(animation, str):<EOL><INDENT>animation = {'<STR_LIT>': animation}<EOL><DEDENT>animation['<STR_LIT>'] = desc.pop('<STR_LIT>', {})<EOL>if desc:<EOL><INDENT>raise ValueError('<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(desc))<EOL><DEDENT><DEDENT>animation.setdefault('<STR_LIT>', DEFAULT_ANIMATION)<EOL>animation = construct.to_type_constructor(animation, ANIMATION_PATH)<EOL>datatype = animation.setdefault('<STR_LIT>', failed.Failed)<EOL>animation.setdefault('<STR_LIT:name>', datatype.__name__)<EOL>run = animation.setdefault('<STR_LIT>', {})<EOL>run_parent = parent.setdefault('<STR_LIT>', {})<EOL>if not ('<STR_LIT>' in run or '<STR_LIT>' in run):<EOL><INDENT>if '<STR_LIT>' in run_parent:<EOL><INDENT>run.update(fps=run_parent['<STR_LIT>'])<EOL><DEDENT>elif '<STR_LIT>' in run_parent:<EOL><INDENT>run.update(sleep_time=run_parent['<STR_LIT>'])<EOL><DEDENT><DEDENT>return animation<EOL>", "docstring": "Cleans up all sorts of special cases that humans want when entering\nan animation from a yaml file.\n\n1. Loading it from a file\n2. Using just a typename instead of a dict\n3. A single dict representing an animation, with a run: section.\n4. (Legacy) Having a dict with parallel elements run: and animation:\n5. (Legacy) A tuple or list: (animation, run )", "id": "f1968:m0"}
{"signature": "def detach(self, overlay):", "body": "<EOL>for i, a in enumerate(self.animations):<EOL><INDENT>a.layout = a.layout.clone()<EOL>if overlay and i:<EOL><INDENT>a.preclear = False<EOL><DEDENT><DEDENT>", "docstring": "Give each animation a unique, mutable layout so they can run\nindependently.", "id": "f1968:c0:m6"}
{"signature": "def __init__(self, *args, size=<NUM_LIT:100>, **kwds):", "body": "super().__init__(*args, detach=False, **kwds)<EOL>if not size:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.size = size if isinstance(size, list) else [size]<EOL>self.is_numpy = hasattr(self.color_list, '<STR_LIT>')<EOL>for animation, begin, end in self._foreach():<EOL><INDENT>animation.layout = Strip([], color_list=self.color_list[begin:end])<EOL><DEDENT>", "docstring": "Arguments --\n    size: a number or a list of numbers representing the size of each\n        segment from the original layout.  If there aren't enough sizes\n        for each segment, the list of sizes is reused repeatedly.", "id": "f1971:c0:m0"}
{"signature": "@property<EOL><INDENT>def index(self):<DEDENT>", "body": "return self._index<EOL>", "docstring": ":returns int: index of the current animation within the Collection.", "id": "f1972:c0:m0"}
{"signature": "def step(self, amt=<NUM_LIT:1>):", "body": "if not self._stop_event.isSet():<EOL><INDENT>self._hold_for_data.wait()<EOL>self._hold_for_data.clear()<EOL><DEDENT>", "docstring": "This may seem silly, but on a Receiver step() need not do anything.\nInstead, receive the data on the receive thread and set it on the buffer\nthen call self._hold_for_data.set()", "id": "f1973:c1:m6"}
{"signature": "def adapt_animation_layout(animation):", "body": "layout = animation.layout<EOL>required = getattr(animation, '<STR_LIT>', None)<EOL>if not required or isinstance(layout, required):<EOL><INDENT>return<EOL><DEDENT>msg = LAYOUT_WARNING % (<EOL>type(animation).__name__, required.__name__, type(layout).__name__)<EOL>setter = layout.set<EOL>adaptor = None<EOL>if required is strip.Strip:<EOL><INDENT>if isinstance(layout, matrix.Matrix):<EOL><INDENT>width = layout.width<EOL>def adaptor(pixel, color=None):<EOL><INDENT>y, x = divmod(pixel, width)<EOL>setter(x, y, color or BLACK)<EOL><DEDENT><DEDENT>elif isinstance(layout, cube.Cube):<EOL><INDENT>lx, ly = layout.x, layout.y<EOL>def adaptor(pixel, color=None):<EOL><INDENT>yz, x = divmod(pixel, lx)<EOL>z, y = divmod(yz, ly)<EOL>setter(x, y, z, color or BLACK)<EOL><DEDENT><DEDENT>elif isinstance(layout, circle.Circle):<EOL><INDENT>def adaptor(pixel, color=None):<EOL><INDENT>layout._set_base(pixel, color or BLACK)<EOL><DEDENT><DEDENT><DEDENT>elif required is matrix.Matrix:<EOL><INDENT>if isinstance(layout, strip.Strip):<EOL><INDENT>width = animation.width<EOL>def adaptor(x, y, color=None):<EOL><INDENT>setter(x + y * width, color or BLACK)<EOL><DEDENT><DEDENT><DEDENT>if not adaptor:<EOL><INDENT>raise ValueError(msg)<EOL><DEDENT>log.warning(msg)<EOL>animation.layout.set = adaptor<EOL>", "docstring": "Adapt the setter in an animation's layout so that Strip animations can run\non on Matrix, Cube, or Circle layout, and Matrix or Cube animations can run\non a Strip layout.", "id": "f1979:m0"}
{"signature": "def read_from(self, data, pad=<NUM_LIT:0>):", "body": "for i in range(self.BEGIN, self.END + <NUM_LIT:1>):<EOL><INDENT>index = self.index(i, len(data))<EOL>yield pad if index is None else data[index]<EOL><DEDENT>", "docstring": "Returns a generator with the elements \"data\" taken by offset, restricted\nby self.begin and self.end, and padded on either end by `pad` to get\nback to the original length of `data`", "id": "f1982:c0:m3"}
{"signature": "def __init__(self, offset=<NUM_LIT:0>, begin=None, end=None):", "body": "self.begin = self.BEGIN if begin is None else begin<EOL>self.end = self.END if end is None else end<EOL>if not (self.BEGIN <= self.begin <= self.end <= self.END):<EOL><INDENT>raise ValueError('<STR_LIT>' %<EOL>(self.BEGIN, self.begin, self.end, self.END))<EOL><DEDENT>self.offset = offset<EOL>", "docstring": "Unlike a `range`, an OffsetRange includes both its begin *and* its end,\nso it's closer to how regular people think of a range - for example\nthat DMX channels are in the range 1-512.", "id": "f1982:c0:m0"}
{"signature": "def pointOnCircle(cx, cy, radius, angle):", "body": "angle = math.radians(angle) - (math.pi / <NUM_LIT:2>)<EOL>x = cx + radius * math.cos(angle)<EOL>if x < cx:<EOL><INDENT>x = math.ceil(x)<EOL><DEDENT>else:<EOL><INDENT>x = math.floor(x)<EOL><DEDENT>y = cy + radius * math.sin(angle)<EOL>if y < cy:<EOL><INDENT>y = math.ceil(y)<EOL><DEDENT>else:<EOL><INDENT>y = math.floor(y)<EOL><DEDENT>return (int(x), int(y))<EOL>", "docstring": "Calculates the coordinates of a point on a circle given the center point,\nradius, and angle.", "id": "f1983:m6"}
{"signature": "def parse(s):", "body": "parts = s.replace('<STR_LIT:U+002C>', '<STR_LIT:U+0020>').split()<EOL>if not parts:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>pieces = []<EOL>for part in parts:<EOL><INDENT>m = PART_MATCH(part)<EOL>pieces.extend(m.groups() if m else [part])<EOL><DEDENT>if len(pieces) == <NUM_LIT:1>:<EOL><INDENT>pieces.append('<STR_LIT:s>')<EOL><DEDENT>if len(pieces) % <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>' % (s, parts, pieces))<EOL><DEDENT>result = <NUM_LIT:0><EOL>for number, units in zip(*[iter(pieces)] * <NUM_LIT:2>):<EOL><INDENT>number = float(number)<EOL>if number < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>result += number * _get_units(units)<EOL><DEDENT>return result<EOL>", "docstring": "Parse a string representing a time interval or duration into seconds,\nor raise an exception\n\n:param str s: a string representation of a time interval\n:raises ValueError: if ``s`` can't be interpreted as a duration", "id": "f1987:m1"}
{"signature": "def __init__(self, filename):", "body": "self.__filename = filename<EOL>data = data_file.load(filename) if os.path.exists(filename) else {}<EOL>super().__init__(data)<EOL>", "docstring": ":param c: the filename to store the DATA_FILE in", "id": "f1988:c0:m0"}
{"signature": "def __init__(self, constructor, **kwds):", "body": "self.servers = {}<EOL>self.constructor = constructor<EOL>self.kwds = kwds<EOL>", "docstring": ":param constructor: a function which takes a key and some keywords,\n    and returns a new server\n:param kwds: keywords to the ``constructor`` function", "id": "f1989:c1:m0"}
{"signature": "def __init__(self, ratio=<NUM_LIT:1>, knee=<NUM_LIT:0>, gain=<NUM_LIT:1>, enable=True):", "body": "<EOL>self.ratio = ratio<EOL>self.knee = knee<EOL>self.gain = gain<EOL>self.enable = enable<EOL>", "docstring": ":param float ratio: the compression ratio (1 means no compression).\n    ratio should usually between 0 and 1.\n\n:param float knee: the ratio where the compression starts to kick in.\n    knee should usually be 0 <= knee <= ratio\n\n:param float gain: post limiter output gain. gain should usually be >= 0", "id": "f1990:c0:m0"}
{"signature": "@property<EOL><INDENT>def running(self):<DEDENT>", "body": "return self.run_event.is_set() and not self.stop_event.is_set()<EOL>", "docstring": "Is this Runnable expected to make any progress from here?\n\nThe Runnable might still execute a little code after it has stopped\nrunning.", "id": "f1993:c0:m1"}
{"signature": "def cleanup(self):", "body": "", "docstring": "Cleans up resources after the Runnable.\n\nself.cleanup() may not throw an exception.", "id": "f1993:c0:m6"}
{"signature": "@contextlib.contextmanager<EOL><INDENT>def run_until_stop(self):<DEDENT>", "body": "self.start()<EOL>try:<EOL><INDENT>yield self<EOL><DEDENT>finally:<EOL><INDENT>self.stop()<EOL><DEDENT>self.wait()<EOL>", "docstring": "A context manager that starts this Runnable, yields,\nand then waits for it to finish.", "id": "f1993:c0:m9"}
{"signature": "def run(function, *args, use_subprocess=False, daemon=True, **kwds):", "body": "if use_subprocess:<EOL><INDENT>Creator, Queue = multiprocessing.Process, multiprocessing.Queue<EOL><DEDENT>else:<EOL><INDENT>Creator, Queue = threading.Thread, queue.Queue<EOL><DEDENT>input, output = Queue(), Queue()<EOL>args = input, output, function, args<EOL>sub = Creator(target=_run_locally, args=args, kwargs=kwds, daemon=daemon)<EOL>sub.start()<EOL>return sub, input, output<EOL>", "docstring": "Create input, output queues, call `function` in a subprocess or a thread.\n\n``function`` is called like this: ``function(input, output, *args, **kwds)``\n\n:param use_subprocess: if true, create a new multiprocess;\n                       if false, create a new thread\n:param function: the function to call\n:param daemon: is the thread or subprocess run as a daemon or not?\n\n:param args: positional arguments to the function\n:param kwds: keyword arguments to the function\n:returns: a tuple with three elements: the subprocess or thread, an input\n          queue, and an output queue.", "id": "f1995:m1"}
{"signature": "def __init__(self, function, errors):", "body": "assert isinstance(errors, int) or errors in (<EOL>'<STR_LIT>', '<STR_LIT:ignore>', '<STR_LIT>')<EOL>self.function = function<EOL>self.errors = errors<EOL>self.error_count = <NUM_LIT:0><EOL>", "docstring": ":param function: the function to wrap\n:param errors: either a number, indicating how many errors to report\n   before ignoring them, or one of these strings:\n   'raise', meaning to raise an exception\n   'ignore', meaning to ignore all errors\n   'report', meaning to report all errors", "id": "f1997:c0:m0"}
{"signature": "def set_log_level(level):", "body": "if isinstance(level, str):<EOL><INDENT>level = LOG_NAMES[level.lower()]<EOL><DEDENT>logger.setLevel(level)<EOL>", "docstring": ":param level: the level to set - either a string level name from\n              'frame', 'debug', 'info', 'warning', 'error'\n              or an integer log level from:\n              log.FRAME, log.DEBUG, log.INFO, log.WARNING, log.ERROR", "id": "f1998:m4"}
{"signature": "def dumps(data, use_yaml=None, safe=True, **kwds):", "body": "if use_yaml is None:<EOL><INDENT>use_yaml = ALWAYS_DUMP_YAML<EOL><DEDENT>if use_yaml:<EOL><INDENT>dumps = yaml.safe_dump if safe else yaml.dump<EOL><DEDENT>else:<EOL><INDENT>dumps = json.dumps<EOL>kwds.update(indent=<NUM_LIT:4>, sort_keys=True)<EOL>if not safe:<EOL><INDENT>kwds.update(default=repr)<EOL><DEDENT><DEDENT>return dumps(data, **kwds)<EOL>", "docstring": "Dumps data into a nicely formatted JSON string.\n\n:param dict data: a dictionary to dump\n:param kwds: keywords to pass to json.dumps\n:returns: a string with formatted data\n:rtype: str", "id": "f1999:m0"}
{"signature": "def load(file, use_yaml=None):", "body": "if isinstance(file, str):<EOL><INDENT>fp = open(file)<EOL>filename = file<EOL><DEDENT>else:<EOL><INDENT>fp = file<EOL>filename = getattr(fp, '<STR_LIT:name>', '<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>return loads(fp.read(), use_yaml, filename)<EOL><DEDENT>except Exception as e:<EOL><INDENT>e.args = ('<STR_LIT>', filename) + e.args<EOL>raise<EOL><DEDENT>", "docstring": "Loads not only JSON files but also YAML files ending in .yml.\n\n:param file: a filename or file handle to read from\n:returns: the data loaded from the JSON or YAML file\n:rtype: dict", "id": "f1999:m3"}
{"signature": "def dump(data, file=sys.stdout, use_yaml=None, **kwds):", "body": "if use_yaml is None:<EOL><INDENT>use_yaml = ALWAYS_DUMP_YAML<EOL><DEDENT>def dump(fp):<EOL><INDENT>if use_yaml:<EOL><INDENT>yaml.safe_dump(data, stream=fp, **kwds)<EOL><DEDENT>else:<EOL><INDENT>json.dump(data, fp, indent=<NUM_LIT:4>, sort_keys=True, **kwds)<EOL><DEDENT><DEDENT>if not isinstance(file, str):<EOL><INDENT>return dump(file)<EOL><DEDENT>if os.path.isabs(file):<EOL><INDENT>parent = os.path.dirname(file)<EOL>if not os.path.exists(parent):<EOL><INDENT>os.makedirs(parent, exist_ok=True)<EOL><DEDENT><DEDENT>with open(file, '<STR_LIT:w>') as fp:<EOL><INDENT>return dump(fp)<EOL><DEDENT>", "docstring": "Dumps data as nicely formatted JSON string to a file or file handle\n\n:param dict data: a dictionary to dump\n:param file: a filename or file handle to write to\n:param kwds: keywords to pass to json.dump", "id": "f1999:m1"}
{"signature": "def advance_permutation(a, increasing=True, forward=True):", "body": "if not forward:<EOL><INDENT>a.reverse()<EOL><DEDENT>cmp = operator.lt if increasing else operator.gt<EOL>try:<EOL><INDENT>i = next(i for i in reversed(range(len(a) - <NUM_LIT:1>)) if cmp(a[i], a[i + <NUM_LIT:1>]))<EOL>j = next(j for j in reversed(range(i + <NUM_LIT:1>, len(a))) if cmp(a[i], a[j]))<EOL><DEDENT>except StopIteration:<EOL><INDENT>if forward:<EOL><INDENT>a.reverse()<EOL><DEDENT>return False<EOL><DEDENT>a[i], a[j] = a[j], a[i]<EOL>a[i + <NUM_LIT:1>:] = reversed(a[i + <NUM_LIT:1>:])<EOL>if not forward:<EOL><INDENT>a.reverse()<EOL><DEDENT>return True<EOL>", "docstring": "Advance a list of unique, ordered elements in-place, lexicographically\nincreasing or backward, by rightmost or leftmost digit.\n\nReturns False if the permutation wrapped around - i.e. went from\nlexicographically greatest to least, and True in all other cases.\n\nIf the length of the list is N, then this function will repeat values after\nN! steps, and will return False exactly once.\n\nSee also https://stackoverflow.com/a/34325140/43839", "id": "f2003:m0"}
{"signature": "def get_pid(pid_filename=None):", "body": "return int(open(pid_filename or DEFAULT_PID_FILENAME).read(<NUM_LIT:16>))<EOL>", "docstring": "Return the integer PID for the current bp process, or raise an exception if\nthere is no such process or it hasn't registered a PID.", "id": "f2005:m2"}
{"signature": "def report(function, *args, **kwds):", "body": "try:<EOL><INDENT>function(*args, **kwds)<EOL><DEDENT>except Exception:<EOL><INDENT>traceback.print_exc()<EOL><DEDENT>", "docstring": "Run a function, catch, report and discard exceptions", "id": "f2007:m1"}
{"signature": "@contextlib.contextmanager<EOL>def add(*args):", "body": "try:<EOL><INDENT>yield<EOL><DEDENT>except Exception as e:<EOL><INDENT>e.args = args + e.args<EOL>raise<EOL><DEDENT>", "docstring": "A context manager that appends arguments to any exception thrown\n\n:param args: Arguments to be appended to the ``.args`` attribute of any\n             exception that is thrown while the context manager is active", "id": "f2007:m0"}
{"signature": "def resize(image, x, y, stretch=False, top=None, left=None, mode='<STR_LIT>',<EOL>resample=None):", "body": "if x <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if y <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>from PIL import Image<EOL>resample = Image.ANTIALIAS if resample is None else resample<EOL>if not isinstance(resample, numbers.Number):<EOL><INDENT>try:<EOL><INDENT>resample = getattr(Image, resample.upper())<EOL><DEDENT>except:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % resample)<EOL><DEDENT>if not isinstance(resample, numbers.Number):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % resample)<EOL><DEDENT><DEDENT>size = x, y<EOL>if stretch:<EOL><INDENT>return image.resize(size, resample=resample)<EOL><DEDENT>result = Image.new(mode, size)<EOL>ratios = [d1 / d2 for d1, d2 in zip(size, image.size)]<EOL>if ratios[<NUM_LIT:0>] < ratios[<NUM_LIT:1>]:<EOL><INDENT>new_size = (size[<NUM_LIT:0>], int(image.size[<NUM_LIT:1>] * ratios[<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>new_size = (int(image.size[<NUM_LIT:0>] * ratios[<NUM_LIT:1>]), size[<NUM_LIT:1>])<EOL><DEDENT>image = image.resize(new_size, resample=resample)<EOL>if left is None:<EOL><INDENT>box_x = int((x - new_size[<NUM_LIT:0>]) / <NUM_LIT:2>)<EOL><DEDENT>elif left:<EOL><INDENT>box_x = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>box_x = x - new_size[<NUM_LIT:0>]<EOL><DEDENT>if top is None:<EOL><INDENT>box_y = int((y - new_size[<NUM_LIT:1>]) / <NUM_LIT:2>)<EOL><DEDENT>elif top:<EOL><INDENT>box_y = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>box_y = y - new_size[<NUM_LIT:1>]<EOL><DEDENT>result.paste(image, box=(box_x, box_y))<EOL>return result<EOL>", "docstring": "Return an image resized.", "id": "f2011:m1"}
{"signature": "def show_image(setter, width, height,<EOL>image_path='<STR_LIT>', image_obj=None, offset=(<NUM_LIT:0>, <NUM_LIT:0>),<EOL>bgcolor=COLORS.Off, brightness=<NUM_LIT:255>):", "body": "bgcolor = color_scale(bgcolor, brightness)<EOL>img = image_obj<EOL>if image_path and not img:<EOL><INDENT>from PIL import Image<EOL>img = Image.open(image_path)<EOL><DEDENT>elif not img:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>w = min(width - offset[<NUM_LIT:0>], img.size[<NUM_LIT:0>])<EOL>h = min(height - offset[<NUM_LIT:1>], img.size[<NUM_LIT:1>])<EOL>ox = offset[<NUM_LIT:0>]<EOL>oy = offset[<NUM_LIT:1>]<EOL>for x in range(ox, w + ox):<EOL><INDENT>for y in range(oy, h + oy):<EOL><INDENT>r, g, b, a = (<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:255>)<EOL>rgba = img.getpixel((x - ox, y - oy))<EOL>if isinstance(rgba, int):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(rgba) == <NUM_LIT:3>:<EOL><INDENT>r, g, b = rgba<EOL><DEDENT>elif len(rgba) == <NUM_LIT:4>:<EOL><INDENT>r, g, b, a = rgba<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if a == <NUM_LIT:0>:<EOL><INDENT>r, g, b = bgcolor<EOL><DEDENT>else:<EOL><INDENT>r, g, b = color_scale((r, g, b), a)<EOL><DEDENT>if brightness != <NUM_LIT:255>:<EOL><INDENT>r, g, b = color_scale((r, g, b), brightness)<EOL><DEDENT>setter(x, y, (r, g, b))<EOL><DEDENT><DEDENT>", "docstring": "Display an image on a matrix.", "id": "f2015:m0"}
{"signature": "def loadImage(layout, imagePath=\"<STR_LIT>\", imageObj=None, offset=(<NUM_LIT:0>, <NUM_LIT:0>),<EOL>bgcolor=COLORS.Off, brightness=<NUM_LIT:255>):", "body": "if not isinstance(layout, Matrix):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>texture = [[COLORS.Off for x in range(layout.width)]<EOL>for y in range(layout.height)]<EOL>def setter(x, y, pixel):<EOL><INDENT>if y >= <NUM_LIT:0> and x >= <NUM_LIT:0>:<EOL><INDENT>texture[y][x] = pixel<EOL><DEDENT><DEDENT>show_image(setter, layout.width, layout.height, imagePath, imageObj,<EOL>offset, bgcolor, brightness)<EOL>return texture<EOL>", "docstring": "Display an image on the matrix", "id": "f2015:m2"}
{"signature": "def showImage(layout, imagePath=\"<STR_LIT>\", imageObj=None, offset=(<NUM_LIT:0>, <NUM_LIT:0>),<EOL>bgcolor=COLORS.Off, brightness=<NUM_LIT:255>):", "body": "if not isinstance(layout, Matrix):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>layout.all_off()<EOL>return show_image(layout.set, layout.width, layout.height, imagePath,<EOL>imageObj, offset, bgcolor, brightness)<EOL>", "docstring": "Display an image on the matrix", "id": "f2015:m1"}
{"signature": "def __init__(self, address):", "body": "super().__init__()<EOL>self.address = address<EOL>", "docstring": ":param str address: a pair (ip_address, port) to pass to socket.connect", "id": "f2021:c0:m0"}
{"signature": "def __init__(self, address, **kwds):", "body": "super().__init__(**kwds)<EOL>self.sender = Sender(address)<EOL>", "docstring": ":param str address: a pair (ip_address, port) to pass to socket.connect", "id": "f2021:c1:m0"}
{"signature": "def unflatten(master):", "body": "result = {}<EOL>for k, v in master.items():<EOL><INDENT>*first, last = k.split('<STR_LIT:.>')<EOL>r = result<EOL>for i in first:<EOL><INDENT>r = r.setdefault(i, {})<EOL><DEDENT>r[last] = v<EOL><DEDENT>return result<EOL>", "docstring": ":param dict master: a multilevel dictionary\n:return: a unflattened dictionary\n:rtype: dict\n\nUnflattens a single-level dictionary a multilevel into one so that::\n\n    {'foo.bar.a': 1,\n     'foo.bar.b': True,\n     'foo.bar.a': 1,\n     }\n\nwould become::\n\n    {'foo':\n        {'bar':\n           {\n               'a': 1,\n               'b': True,\n               'c': 'hello',\n            },\n        },\n    }", "id": "f2023:m1"}
{"signature": "def pop_legacy_palette(kwds, *color_defaults):", "body": "palette = kwds.pop('<STR_LIT>', None)<EOL>if palette:<EOL><INDENT>legacy = [k for k, _ in color_defaults if k in kwds]<EOL>if legacy:<EOL><INDENT>raise ValueError('<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(legacy))<EOL><DEDENT>return palette<EOL><DEDENT>values = [kwds.pop(k, v) for k, v in color_defaults]<EOL>if values and color_defaults[<NUM_LIT:0>][<NUM_LIT:0>] in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>values = values[<NUM_LIT:0>]<EOL><DEDENT>return make.colors(values or None)<EOL>", "docstring": "Older animations in BPA and other areas use all sorts of different names for\nwhat we are now representing with palettes.\n\nThis function mutates a kwds dictionary to remove these legacy fields and\nextract a palette from it, which it returns.", "id": "f2026:m0"}
{"signature": "def wheel_color(position):", "body": "return _WHEEL[round(position) % len(_WHEEL)]<EOL>", "docstring": "Get color from wheel value (0 - 384).\n    Provided for those used to using it from Adafruit libraries", "id": "f2027:m1"}
{"signature": "def hsv2rgb_rainbow(hsv):", "body": "def nscale8x3_video(r, g, b, scale):<EOL><INDENT>nonzeroscale = <NUM_LIT:0><EOL>if scale != <NUM_LIT:0>:<EOL><INDENT>nonzeroscale = <NUM_LIT:1><EOL><DEDENT>if r != <NUM_LIT:0>:<EOL><INDENT>r = ((r * scale) >> <NUM_LIT:8>) + nonzeroscale<EOL><DEDENT>if g != <NUM_LIT:0>:<EOL><INDENT>g = ((g * scale) >> <NUM_LIT:8>) + nonzeroscale<EOL><DEDENT>if b != <NUM_LIT:0>:<EOL><INDENT>b = ((b * scale) >> <NUM_LIT:8>) + nonzeroscale<EOL><DEDENT>return (r, g, b)<EOL><DEDENT>def scale8_video_LEAVING_R1_DIRTY(i, scale):<EOL><INDENT>nonzeroscale = <NUM_LIT:0><EOL>if scale != <NUM_LIT:0>:<EOL><INDENT>nonzeroscale = <NUM_LIT:1><EOL><DEDENT>if i != <NUM_LIT:0>:<EOL><INDENT>i = ((i * scale) >> <NUM_LIT:8>) + nonzeroscale<EOL><DEDENT>return i<EOL><DEDENT>h, s, v = hsv<EOL>offset = h & <NUM_LIT>  <EOL>offset8 = offset * <NUM_LIT:8><EOL>third = (offset8 * (<NUM_LIT> // <NUM_LIT:3>)) >> <NUM_LIT:8><EOL>r, g, b = (<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>if not (h & <NUM_LIT>):<EOL><INDENT>if not (h & <NUM_LIT>):<EOL><INDENT>if not (h & <NUM_LIT>):<EOL><INDENT>r = <NUM_LIT:255> - third<EOL>g = third<EOL>b = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>r = <NUM_LIT><EOL>g = <NUM_LIT> + third<EOL>b = <NUM_LIT><EOL><DEDENT><DEDENT>else:<EOL><INDENT>if not (h & <NUM_LIT>):<EOL><INDENT>twothirds = (third << <NUM_LIT:1>)<EOL>r = <NUM_LIT> - twothirds<EOL>g = <NUM_LIT> + third<EOL>b = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>r = <NUM_LIT:0><EOL>g = <NUM_LIT:255> - third<EOL>b = third<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if not (h & <NUM_LIT>):<EOL><INDENT>if not (h & <NUM_LIT>):<EOL><INDENT>r = <NUM_LIT><EOL>twothirds = (third << <NUM_LIT:1>)<EOL>g = <NUM_LIT> - twothirds<EOL>b = <NUM_LIT> + twothirds<EOL><DEDENT>else:<EOL><INDENT>r = third<EOL>g = <NUM_LIT:0><EOL>b = <NUM_LIT:255> - third<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if not (h & <NUM_LIT>):<EOL><INDENT>r = <NUM_LIT> + third<EOL>g = <NUM_LIT:0><EOL>b = <NUM_LIT> - third<EOL><DEDENT>else:<EOL><INDENT>r = <NUM_LIT> + third<EOL>g = <NUM_LIT><EOL>b = <NUM_LIT> - third<EOL><DEDENT><DEDENT><DEDENT>if s != <NUM_LIT:255>:<EOL><INDENT>r, g, b = nscale8x3_video(r, g, b, s)<EOL>desat = <NUM_LIT:255> - s<EOL>desat = (desat * desat) >> <NUM_LIT:8><EOL>brightness_floor = desat<EOL>r = r + brightness_floor<EOL>g = g + brightness_floor<EOL>b = b + brightness_floor<EOL><DEDENT>if v != <NUM_LIT:255>:<EOL><INDENT>v = scale8_video_LEAVING_R1_DIRTY(v, v)<EOL>r, g, b = nscale8x3_video(r, g, b, v)<EOL><DEDENT>return (r, g, b)<EOL>", "docstring": "Generates RGB values from HSV that have an even visual\n    distribution.  Be careful as this method is only have as fast as\n    hsv2rgb_spectrum.", "id": "f2030:m2"}
{"signature": "def color_cmp(a, b):", "body": "if a == b:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>a, b = rgb_to_hsv(a), rgb_to_hsv(b)<EOL>return -<NUM_LIT:1> if a < b else <NUM_LIT:1><EOL>", "docstring": "Order colors by hue, saturation and value, in that order.\n\n    Returns -1 if a < b, 0 if a == b and 1 if a < b.", "id": "f2030:m12"}
{"signature": "def hsv2rgb_raw(hsv):", "body": "HSV_SECTION_3 = <NUM_LIT><EOL>h, s, v = hsv<EOL>invsat = <NUM_LIT:255> - s<EOL>brightness_floor = (v * invsat) // <NUM_LIT><EOL>color_amplitude = v - brightness_floor<EOL>section = h // HSV_SECTION_3  <EOL>offset = h % HSV_SECTION_3  <EOL>rampup = offset<EOL>rampdown = (HSV_SECTION_3 - <NUM_LIT:1>) - offset<EOL>rampup_amp_adj = (rampup * color_amplitude) // (<NUM_LIT> // <NUM_LIT:4>)<EOL>rampdown_amp_adj = (rampdown * color_amplitude) // (<NUM_LIT> // <NUM_LIT:4>)<EOL>rampup_adj_with_floor = rampup_amp_adj + brightness_floor<EOL>rampdown_adj_with_floor = rampdown_amp_adj + brightness_floor<EOL>r, g, b = (<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>if section:<EOL><INDENT>if section == <NUM_LIT:1>:<EOL><INDENT>r = brightness_floor<EOL>g = rampdown_adj_with_floor<EOL>b = rampup_adj_with_floor<EOL><DEDENT>else:<EOL><INDENT>r = rampup_adj_with_floor<EOL>g = brightness_floor<EOL>b = rampdown_adj_with_floor<EOL><DEDENT><DEDENT>else:<EOL><INDENT>r = rampdown_adj_with_floor<EOL>g = rampup_adj_with_floor<EOL>b = brightness_floor<EOL><DEDENT>return (r, g, b)<EOL>", "docstring": "Converts an HSV tuple to RGB. Intended for internal use.\nYou should use hsv2rgb_spectrum or hsv2rgb_rainbow instead.", "id": "f2030:m0"}
{"signature": "def hsv2rgb_360(hsv):", "body": "h, s, v = hsv<EOL>r, g, b = colorsys.hsv_to_rgb(h / <NUM_LIT>, s, v)<EOL>return (int(r * <NUM_LIT>), int(g * <NUM_LIT>), int(b * <NUM_LIT>))<EOL>", "docstring": "Python default hsv to rgb conversion for when hue values in the\n    range 0-359 are preferred.  Due to requiring float math, this method\n    is slower than hsv2rgb_rainbow and hsv2rgb_spectrum.", "id": "f2030:m3"}
{"signature": "def euclidean(c1, c2):", "body": "diffs = ((i - j) for i, j in zip(c1, c2))<EOL>return sum(x * x for x in diffs)<EOL>", "docstring": "Square of the euclidean distance", "id": "f2032:m1"}
{"signature": "def __init__(self, colors=(), continuous=False, serpentine=False, scale=<NUM_LIT:1>,<EOL>offset=<NUM_LIT:0>, autoscale=False, length=None):", "body": "super().__init__(colors)<EOL>if not self:<EOL><INDENT>self.append(Black)<EOL><DEDENT>self.continuous = continuous<EOL>self.serpentine = serpentine<EOL>self.scale = scale<EOL>self.offset = offset<EOL>self.autoscale = autoscale<EOL>self.length = length<EOL>", "docstring": "Arguments:\n    colors: an iterable of colors\n\n    continuous: if True, interpolate linearly between colors; if False,\n      use the nearest color from the original list\n\n    serpentine: if True, palette colors are used in reverse order every\n      other iteration, giving a back-and-forth effect.  If False,\n      palette colors always restart on each iteration\n\n    scale: Scales the incoming index ``i``.  As ``i`` moves from 0\n      to ``len(colors) - 1``, the whole palette repeats itself\n      ``self.scale`` times\n\n    offset: offset to the incoming index ``i``, applied after scaling\n\n    autoscale: If True, automatically rescale the Palette size to\n      match the length of the output.  ``autoscale`` happens before\n      ``scale``, so the two work well together to give banding or\n      striping effects across your display\n\n   ``length``:\n     The length of the output color_list.  If None, use the length of\n     the palette itself.  If autoscale=True, ``length`` is used to scale\n     the palette to match the output.", "id": "f2033:c0:m0"}
{"signature": "def toggle(s):", "body": "is_numeric = '<STR_LIT:U+002C>' in s or s.startswith('<STR_LIT>') or s.startswith('<STR_LIT:#>')<EOL>c = name_to_color(s)<EOL>return color_to_name(c) if is_numeric else str(c)<EOL>", "docstring": "Toggle back and forth between a name and a tuple representation.\n\n:param str s: a string which is either a text name, or a tuple-string:\n              a string with three numbers separated by commas\n\n:returns: if the string was a text name, return a tuple.  If it's a\n          tuple-string and it corresponds to a text name, return the text\n          name, else return the original tuple-string.", "id": "f2035:m2"}
{"signature": "def color_blend(a, b):", "body": "return (<NUM_LIT:255> - (((<NUM_LIT:255> - a[<NUM_LIT:0>]) * (<NUM_LIT:255> - b[<NUM_LIT:0>])) >> <NUM_LIT:8>),<EOL><NUM_LIT:255> - (((<NUM_LIT:255> - a[<NUM_LIT:1>]) * (<NUM_LIT:255> - b[<NUM_LIT:1>])) >> <NUM_LIT:8>),<EOL><NUM_LIT:255> - (((<NUM_LIT:255> - a[<NUM_LIT:2>]) * (<NUM_LIT:255> - b[<NUM_LIT:2>])) >> <NUM_LIT:8>))<EOL>", "docstring": "Performs a Screen blend on RGB color tuples, a and b", "id": "f2036:m0"}
{"signature": "def get(self, i):", "body": "return self.table[max(<NUM_LIT:0>, min(<NUM_LIT:255>, int(i)))]<EOL>", "docstring": ":returns: The gamma table entry\n:param int i: the index into the table", "id": "f2037:c0:m1"}
{"signature": "def get(name=None):", "body": "if name is None or name == '<STR_LIT:default>':<EOL><INDENT>return _DEFAULT_PALETTE<EOL><DEDENT>if isinstance(name, str):<EOL><INDENT>return PROJECT_PALETTES.get(name) or BUILT_IN_PALETTES.get(name)<EOL><DEDENT>", "docstring": "Return a named Palette, or None if no such name exists.\n\nIf ``name`` is omitted, the default value is used.", "id": "f2041:m0"}
{"signature": "def push_to_driver(self):", "body": "self.wait_for_update()<EOL>self.update_colors()<EOL>", "docstring": "Push the current pixel state to the driver", "id": "f2042:c2:m3"}
{"signature": "def bresenham_line(setter, x0, y0, x1, y1, color=None, colorFunc=None):", "body": "steep = abs(y1 - y0) > abs(x1 - x0)<EOL>if steep:<EOL><INDENT>x0, y0 = y0, x0<EOL>x1, y1 = y1, x1<EOL><DEDENT>if x0 > x1:<EOL><INDENT>x0, x1 = x1, x0<EOL>y0, y1 = y1, y0<EOL><DEDENT>dx = x1 - x0<EOL>dy = abs(y1 - y0)<EOL>err = dx / <NUM_LIT:2><EOL>if y0 < y1:<EOL><INDENT>ystep = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ystep = -<NUM_LIT:1><EOL><DEDENT>count = <NUM_LIT:0><EOL>for x in range(x0, x1 + <NUM_LIT:1>):<EOL><INDENT>if colorFunc:<EOL><INDENT>color = colorFunc(count)<EOL>count += <NUM_LIT:1><EOL><DEDENT>if steep:<EOL><INDENT>setter(y0, x, color)<EOL><DEDENT>else:<EOL><INDENT>setter(x, y0, color)<EOL><DEDENT>err -= dy<EOL>if err < <NUM_LIT:0>:<EOL><INDENT>y0 += ystep<EOL>err += dx<EOL><DEDENT><DEDENT>", "docstring": "Draw line from point x0,y0 to x,1,y1. Will draw beyond matrix bounds.", "id": "f2044:m5"}
{"signature": "def fillScreen(self, color=None):", "body": "md.fill_rect(self.set, <NUM_LIT:0>, <NUM_LIT:0>, self.width, self.height, color)<EOL>", "docstring": "Fill the matrix with the given RGB color", "id": "f2045:c0:m20"}
{"signature": "@property<EOL><INDENT>def shape(self):<DEDENT>", "body": "return self.width, self.height<EOL>", "docstring": "Returns ``width, height``", "id": "f2045:c0:m1"}
{"signature": "def bresenham_line(self, x0, y0, x1, y1, color=None, colorFunc=None):", "body": "md.bresenham_line(self.set, x0, y0, x1, y1, color, colorFunc)<EOL>", "docstring": "Draw line from point x0, y0 to x1, y1 using Bresenham's algorithm.\n\nWill draw beyond matrix bounds.", "id": "f2045:c0:m16"}
{"signature": "def drawRect(self, x, y, w, h, color=None, aa=False):", "body": "md.draw_rect(self.set, x, y, w, h, color, aa)<EOL>", "docstring": "Draw rectangle with top-left corner at x,y, width w and height h\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n    otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m18"}
{"signature": "def set(self, x, y, color):", "body": "<EOL>raise NotImplementedError<EOL>", "docstring": "Set the pixel color at position x, y.", "id": "f2045:c0:m3"}
{"signature": "def drawText(self, text, x=<NUM_LIT:0>, y=<NUM_LIT:0>, color=None,<EOL>bg=colors.COLORS.Off, aa=False, font=font.default_font,<EOL>font_scale=<NUM_LIT:1>):", "body": "md.draw_text(self.fonts, self.set, text, self.width, self.height,<EOL>x, y, color, bg, aa, font, font_scale)<EOL>", "docstring": "Draw a line of text starting at (x, y) in an RGB color.\n\n:param colorFunc: a function that takes an integer from x0 to x1 and\n    returns a color corresponding to that point\n:param aa: if True, use Bresenham's algorithm for line drawing;\n    otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m26"}
{"signature": "def fillTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):", "body": "md.fill_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)<EOL>", "docstring": "Draw filled triangle with points x0,y0 - x1,y1 - x2,y2\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n    otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m24"}
{"signature": "def drawCircle(self, x0, y0, r, color=None):", "body": "md.draw_circle(self.set, x0, y0, r, color)<EOL>", "docstring": "Draw a circle in an RGB color, with center x0, y0 and radius r.", "id": "f2045:c0:m13"}
{"signature": "def __init__(self, drivers, width=<NUM_LIT:0>, height=<NUM_LIT:0>,<EOL>rotation=<NUM_LIT:0>, vert_flip=False, y_flip=False,<EOL>serpentine=True,<EOL>threadedUpdate=False, brightness=<NUM_LIT:255>,<EOL>pixelSize=(<NUM_LIT:1>, <NUM_LIT:1>), **kwargs):", "body": "self.gen_multi = make_matrix_coord_map_multi<EOL>super().__init__(drivers, threadedUpdate, brightness, **kwargs)<EOL>rot_mod = rotation % <NUM_LIT><EOL>self.rotation = <NUM_LIT> * round(rot_mod / <NUM_LIT>)<EOL>if self.rotation != rot_mod:<EOL><INDENT>log.warning(ROTATION_WARNING, rotation, self.rotation)<EOL><DEDENT>self.width = width or getattr(self.drivers[<NUM_LIT:0>], '<STR_LIT:width>') or <NUM_LIT:0><EOL>self.height = height or getattr(self.drivers[<NUM_LIT:0>], '<STR_LIT>') or <NUM_LIT:0><EOL>self.vert_flip = vert_flip<EOL>self.y_flip = y_flip<EOL>self.serpentine = serpentine<EOL>self.pixelSize = pixelSize<EOL>pw, ph = self.pixelSize<EOL>if not (self.width or self.height):<EOL><INDENT>square = int(math.sqrt(self.numLEDs))<EOL>if (square * square) == self.numLEDs:<EOL><INDENT>self.width = self.height = square<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if self.width * self.height > self.numLEDs:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>% (self.width, self.height, self.numLEDs))<EOL><DEDENT>if not self.coord_map:<EOL><INDENT>if len(self.drivers) == <NUM_LIT:1>:<EOL><INDENT>log.debug(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>y_flip = y_flip or vert_flip<EOL>self.coord_map = make_matrix_coord_map(<EOL>self.width, self.height,<EOL>serpentine=serpentine,<EOL>rotation=rotation,<EOL>y_flip=vert_flip)<EOL><DEDENT>elif self.drivers:<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>self.set_pixel_positions(<EOL>make_matrix_coord_map_positions(self.coord_map))<EOL>if rotation in (<NUM_LIT>, <NUM_LIT>):<EOL><INDENT>w = self.width<EOL>h = self.height<EOL>self.width = h<EOL>self.height = w<EOL><DEDENT>self.texture = None<EOL>self.set = self._setColor<EOL>if pw < <NUM_LIT:0> or pw > self.width or ph < <NUM_LIT:0> or ph > self.height:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if self.width % pw != <NUM_LIT:0> or self.height % ph != <NUM_LIT:0>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if pw == <NUM_LIT:1> and ph == <NUM_LIT:1>:<EOL><INDENT>self._set = self.__setNormal<EOL><DEDENT>else:<EOL><INDENT>self._set = self.__setScaled<EOL>self.width = self.width / pw<EOL>self.height = self.height / ph<EOL>self.numLEDs = self.width * self.height<EOL><DEDENT>self.fonts = font.fonts<EOL>", "docstring": "Main class for matricies.\n\n        driver --  instance that inherits from DriverBase\n        width --  X axis size of matrix\n        height --  Y axis size of matrix\n        coord_map --  a 2D matrix defining the X,Y to strip index mapping.\n            Not needed in most cases\n        rotation -- how to rotate when generating the map.\n            Not used if coord_map specified\n        vert_flip - flips the generated map along the Y axis.\n            This along with rotation can achieve any orientation", "id": "f2045:c0:m0"}
{"signature": "def drawLine(self, x0, y0, x1, y1, color=None, colorFunc=None, aa=False):", "body": "md.draw_line(self.set, x0, y0, x1, y1, color, colorFunc, aa)<EOL>", "docstring": "Draw a between x0, y0 and x1, y1 in an RGB color.\n\n:param colorFunc: a function that takes an integer from x0 to x1 and\n    returns a color corresponding to that point\n:param aa: if True, use Bresenham's algorithm for line drawing;\n    otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m15"}
{"signature": "def fillRoundRect(self, x, y, w, h, r, color=None, aa=False):", "body": "md.fill_round_rect(self.set, x, y, w, h, r, color, aa)<EOL>", "docstring": "Draw a rounded rectangle with top-left corner at (x, y), width w,\nheight h, and corner radius r\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n    otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m22"}
{"signature": "def get(self, x, y):", "body": "try:<EOL><INDENT>pixel = self.coord_map[y][x]<EOL>return self._get_base(pixel)<EOL><DEDENT>except IndexError:<EOL><INDENT>return colors.COLORS.Black<EOL><DEDENT>", "docstring": "Return the pixel color at position (x, y), or Colors.black if that\nposition is out-of-bounds.", "id": "f2045:c0:m2"}
{"signature": "def fillRect(self, x, y, w, h, color=None, aa=False):", "body": "md.fill_rect(self.set, x, y, w, h, color, aa)<EOL>", "docstring": "Draw a solid rectangle with top-left corner at (x, y), width w and\nheight h.\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n    otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m19"}
{"signature": "def drawTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):", "body": "md.draw_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)<EOL>", "docstring": "Draw triangle with vertices (x0, y0), (x1, y1) and (x2, y2)\n\n:param aa: if True, use Bresenham's algorithm for line drawing;\n    Otherwise use Xiaolin Wu's algorithm", "id": "f2045:c0:m23"}
{"signature": "@property<EOL><INDENT>def shape(self):<DEDENT>", "body": "return self.ringCount, self.ringSteps<EOL>", "docstring": "Returns ``ringCount, ringSteps``.", "id": "f2046:c0:m2"}
{"signature": "def set(self, ring, angle, color):", "body": "pixel = self.angleToPixel(angle, ring)<EOL>self._set_base(pixel, color)<EOL>", "docstring": "Set pixel to RGB color tuple", "id": "f2046:c0:m5"}
{"signature": "def apply(self, function):", "body": "for cut in self.cuts:<EOL><INDENT>value = self.read(cut)<EOL>function(value)<EOL>self.write(cut, value)<EOL><DEDENT>", "docstring": "For each row or column in cuts, read a list of its colors,\napply the function to that list of colors, then write it back\nto the layout.", "id": "f2050:c0:m1"}
{"signature": "def setRGB(self, pixel, r, g, b):", "body": "self.set(pixel, (r, g, b))<EOL>", "docstring": "Set single pixel using individual RGB values instead of tuple", "id": "f2051:c0:m7"}
{"signature": "def set(self, pixel, color):", "body": "<EOL>raise NotImplementedError<EOL>", "docstring": "Set the pixel color at position x in the strip.", "id": "f2051:c0:m3"}
{"signature": "def get(self, pixel):", "body": "return self._get_base(pixel * self.pixelWidth)<EOL>", "docstring": "Get RGB color tuple of color at index pixel", "id": "f2051:c0:m2"}
{"signature": "def setOff(self, pixel):", "body": "self.set(pixel, (<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>))<EOL>", "docstring": "Set single pixel off", "id": "f2051:c0:m9"}
{"signature": "def fillRGB(self, r, g, b, start=<NUM_LIT:0>, end=-<NUM_LIT:1>):", "body": "self.fill((r, g, b), start, end)<EOL>", "docstring": "Fill entire strip by giving individual RGB values instead of tuple", "id": "f2052:c0:m24"}
{"signature": "@classmethod<EOL><INDENT>def construct(cls, project, **desc):<DEDENT>", "body": "return cls(project.drivers, maker=project.maker, **desc)<EOL>", "docstring": "Construct a layout.\n        SHOULD BE PRIVATE", "id": "f2052:c0:m0"}
{"signature": "def set_colors(self, buf):", "body": "deprecated.deprecated('<STR_LIT>')<EOL>if len(self._colors) != len(buf):<EOL><INDENT>raise IOError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>.format(len(self._colors), len(buf)))<EOL><DEDENT>self._colors[:] = buf<EOL>", "docstring": "DEPRECATED: use self.color_list\n\nUse with extreme caution!\nDirectly sets the internal buffer and bypasses all brightness and\nrotation control buf must also be in the exact format required by the\ndisplay type.", "id": "f2052:c1:m2"}
{"signature": "def set_pixel_positions(self, pixel_positions):", "body": "for d in self.drivers:<EOL><INDENT>d.set_pixel_positions(pixel_positions)<EOL><DEDENT>", "docstring": "SHOULD BE PRIVATE", "id": "f2052:c0:m2"}
{"signature": "def reflect_y(x, y, matrix):", "body": "return x, matrix.rows - <NUM_LIT:1> - y<EOL>", "docstring": "Reflect the index horizontally.", "id": "f2053:m1"}
{"signature": "def transpose(x, y, _):", "body": "return y, x<EOL>", "docstring": "Transpose rows and columns.", "id": "f2053:m4"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def __setitem__(self, index, value):<DEDENT>", "body": "pass<EOL>", "docstring": "`index` must be an integer, not a slice.", "id": "f2060:c0:m1"}
{"signature": "def recurse(desc, pre='<STR_LIT>', post=None, python_path=None):", "body": "def call(f, desc):<EOL><INDENT>if isinstance(f, str):<EOL><INDENT>f = getattr(datatype, f, None)<EOL><DEDENT>return f and f(desc)<EOL><DEDENT>desc = load.load_if_filename(desc) or desc<EOL>desc = construct.to_type_constructor(desc, python_path)<EOL>datatype = desc.get('<STR_LIT>')<EOL>desc = call(pre, desc) or desc<EOL>for child_name in getattr(datatype, '<STR_LIT>', []):<EOL><INDENT>child = desc.get(child_name)<EOL>if child:<EOL><INDENT>is_plural = child_name.endswith('<STR_LIT:s>')<EOL>remove_s = is_plural and child_name != '<STR_LIT>'<EOL>cname = child_name[:-<NUM_LIT:1>] if remove_s else child_name<EOL>new_path = python_path or ('<STR_LIT>' + cname)<EOL>if is_plural:<EOL><INDENT>if isinstance(child, (dict, str)):<EOL><INDENT>child = [child]<EOL><DEDENT>for i, c in enumerate(child):<EOL><INDENT>child[i] = recurse(c, pre, post, new_path)<EOL><DEDENT>desc[child_name] = child<EOL><DEDENT>else:<EOL><INDENT>desc[child_name] = recurse(child, pre, post, new_path)<EOL><DEDENT><DEDENT><DEDENT>d = call(post, desc)<EOL>return desc if d is None else d<EOL>", "docstring": "Depth first recursion through a dictionary containing type constructors\n\nThe arguments pre, post and children are independently either:\n\n* None, which means to do nothing\n* a string, which means to use the static class method of that name on the\n  class being constructed, or\n* a callable, to be called at each recursion\n\nArguments:\n\ndictionary -- a project dictionary or one of its subdictionaries\npre -- called before children are visited node in the recursion\npost -- called after children are visited in the recursion\npython_path -- relative path to start resolving typenames", "id": "f2061:m0"}
{"signature": "def put_edit(self, f, *args, **kwds):", "body": "self.put_nowait(functools.partial(f, *args, **kwds))<EOL>", "docstring": "Defer an edit to run on the EditQueue.\n\n:param callable f: The function to be called\n:param tuple args: Positional arguments to the function\n:param tuple kwds: Keyword arguments to the function\n:throws queue.Full: if the queue is full", "id": "f2076:c0:m0"}
{"signature": "def get_and_run_edits(self):", "body": "if self.empty():<EOL><INDENT>return<EOL><DEDENT>edits = []<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>edits.append(self.get_nowait())<EOL><DEDENT>except queue.Empty:<EOL><INDENT>break<EOL><DEDENT><DEDENT>for e in edits:<EOL><INDENT>try:<EOL><INDENT>e()<EOL><DEDENT>except:<EOL><INDENT>log.error('<STR_LIT>', e)<EOL>traceback.print_exc()<EOL><DEDENT><DEDENT>", "docstring": "Get all the edits in the queue, then execute them.\n\nThe algorithm gets all edits, and then executes all of them.  It does\n*not* pull off one edit, execute, repeat until the queue is empty, and\nthat means that the queue might not be empty at the end of\n``run_edits``, because new edits might have entered the queue\nwhile the previous edits are being executed.\n\nThis has the advantage that if edits enter the queue faster than they\ncan be processed, ``get_and_run_edits`` won't go into an infinite loop,\nbut rather the queue will grow unboundedly, which that can be\ndetected, and mitigated and reported on - or if Queue.maxsize is\nset, ``bp`` will report a fairly clear error and just dump the edits\non the ground.", "id": "f2076:c0:m1"}
{"signature": "def __init__(self, *,<EOL>drivers, layout, maker, path, animation, controls,<EOL>edit_queue_maxsize=EDIT_QUEUE_MAXSIZE, **kwds):", "body": "self.needs_cleanup = False<EOL>def create(root, name):<EOL><INDENT>def post(desc):<EOL><INDENT>exception = desc.get('<STR_LIT>')<EOL>if exception:<EOL><INDENT>raise exception<EOL><DEDENT>return self.construct_child(name, **desc)<EOL><DEDENT>with exception.add('<STR_LIT>' + name):<EOL><INDENT>return recurse.recurse(<EOL>root,<EOL>pre=None,<EOL>post=post,<EOL>python_path='<STR_LIT>' + name)<EOL><DEDENT><DEDENT>attributes.check(kwds, '<STR_LIT>')<EOL>self.path = path<EOL>layout = layout or fill.fill_layout(animation)<EOL>self.maker = self.construct_child('<STR_LIT>', **maker)<EOL>self.drivers = [create(d, '<STR_LIT>') for d in drivers]<EOL>with exception.add('<STR_LIT>'):<EOL><INDENT>self.layout = self.construct_child('<STR_LIT>', **layout)<EOL><DEDENT>self.animation = create(animation, '<STR_LIT>')<EOL>self.running = False<EOL>self.clock = clock.Clock()<EOL>eq = edit_queue.EditQueue(maxsize=edit_queue_maxsize)<EOL>self.layout.edit_queue = self.animation.edit_queue = eq<EOL>self.animation.add_preframe_callback(eq.get_and_run_edits)<EOL>self.controls = [create(c, '<STR_LIT>') for c in controls]<EOL>for d in self.drivers:<EOL><INDENT>d.set_project(self)<EOL><DEDENT>self.animation.set_project(self)<EOL>", "docstring": ":param int edit_queue_maxsize: maxsize parameter to queue.Queue.\n    0 means an unbounded queue.", "id": "f2080:c0:m2"}
{"signature": "def project(*descs, root_file=None):", "body": "load.ROOT_FILE = root_file<EOL>desc = merge.merge(merge.DEFAULT_PROJECT, *descs)<EOL>path = desc.get('<STR_LIT:path>', '<STR_LIT>')<EOL>if root_file:<EOL><INDENT>project_path = os.path.dirname(root_file)<EOL>if path:<EOL><INDENT>path += '<STR_LIT::>' + project_path<EOL><DEDENT>else:<EOL><INDENT>path = project_path<EOL><DEDENT><DEDENT>with load.extender(path):<EOL><INDENT>desc = recurse.recurse(desc)<EOL><DEDENT>project = construct.construct(**desc)<EOL>project.desc = desc<EOL>return project<EOL>", "docstring": "Make a new project, using recursion and alias resolution.\n\nUse this function in preference to calling Project() directly.", "id": "f2080:m0"}
{"signature": "def __init__(self, *args, filename='<STR_LIT>', render=None,<EOL>divide=<NUM_LIT:1>, frames=<NUM_LIT:0>, time=<NUM_LIT:10>, speed=<NUM_LIT:1.0>, options=None,<EOL>gif_dir=None, **kwds):", "body": "super().__init__(*args, **kwds)<EOL>self.cur_step = <NUM_LIT:1><EOL>self.movie_writer = _movie_writer.MovieWriter(<EOL>filename, render, divide, frames, time, speed, options, gif_dir)<EOL>", "docstring": ":param str filename: Base filename to write the animated GIF file\n\n:param dict render: Parameters to the renderer function -\n    see ``bibliopixel.util.image.render.renderer``\n\n:param int divide: If greater than 1, only rendered one in ``divide``\n    frames\n\n:param int frames: Number of frames to write\n\n:param float time: Total time to write.  If non-zero, takes precedence\n    over `frames`\n\n:param float speed: the speed of the GIF is scaled up by this factor,\n    so if speed=2 then a 2 second animation will become a 1 second GIF.\n\n:param dict options: Options to\n    ``bibliopixel.util.image.gif.write_animation``\n\n:param str gif_dir: If set, write individual GIF frame files to this\n    directory, and do not delete them when done.  For testing purposes.", "id": "f2089:c0:m0"}
{"signature": "def set_device_id(self, dev, id):", "body": "if id < <NUM_LIT:0> or id > <NUM_LIT:255>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>com, code, ok = io.send_packet(<EOL>CMDTYPE.SETID, <NUM_LIT:1>, dev, self.baudrate, <NUM_LIT:5>, id)<EOL>if not ok:<EOL><INDENT>raise_error(code)<EOL><DEDENT>", "docstring": "Set device ID to new value.\n\n        :param str dev: Serial device address/path\n        :param id: Device ID to set", "id": "f2090:c0:m4"}
{"signature": "def error(self, fail=True, action='<STR_LIT>'):", "body": "e = '<STR_LIT>'<EOL>if action:<EOL><INDENT>e = '<STR_LIT>' % (action, e)<EOL><DEDENT>log.error(e)<EOL>if fail:<EOL><INDENT>raise IOError(e)<EOL><DEDENT>", "docstring": "SHOULD BE PRIVATE METHOD", "id": "f2090:c0:m3"}
{"signature": "def get_device(self, id=None):", "body": "if id is None:<EOL><INDENT>if not self.devices:<EOL><INDENT>raise ValueError('<STR_LIT>' % self.hardware_id)<EOL><DEDENT>id, (device, version) = sorted(self.devices.items())[<NUM_LIT:0>]<EOL><DEDENT>elif id in self.devices:<EOL><INDENT>device, version = self.devices[id]<EOL><DEDENT>else:<EOL><INDENT>error = '<STR_LIT>' % id<EOL>log.error(error)<EOL>raise ValueError(error)<EOL><DEDENT>log.info(\"<STR_LIT>\",<EOL>device, id, version)<EOL>return id, device, version<EOL>", "docstring": "Returns details of either the first or specified device\n\n        :param int id: Identifier of desired device. If not given, first device\n            found will be returned\n\n        :returns tuple: Device ID, Device Address, Firmware Version", "id": "f2090:c0:m2"}
{"signature": "def get_device_id(self, dev):", "body": "com, code, ok = io.send_packet(CMDTYPE.GETID, <NUM_LIT:0>, dev, self.baudrate, <NUM_LIT:5>)<EOL>if code is None:<EOL><INDENT>self.error(action='<STR_LIT>')<EOL><DEDENT>return code<EOL>", "docstring": "Get device ID at given address/path.\n\n        :param str dev: Serial device address/path\n        :param baudrate: Baudrate to use when connecting (optional)", "id": "f2090:c0:m5"}
{"signature": "def __init__(self, num=<NUM_LIT>, delay=<NUM_LIT:0>, **kwds):", "body": "super().__init__(num)<EOL>self._kwds = kwds<EOL>self._delay = delay<EOL>", "docstring": "Args\n    delay: time to wait in seconds to simulate actual hardware\n    interface time", "id": "f2103:c0:m0"}
{"signature": "def send_packet(self, data):", "body": "pass<EOL>", "docstring": "do nothing", "id": "f2108:c4:m0"}
{"signature": "def send_packet(self, data):", "body": "raise NotImplementedError<EOL>", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c0:m1"}
{"signature": "def send_packet(self, data):", "body": "package_size = <NUM_LIT>  <EOL>for i in range(int(math.ceil(len(data) / package_size))):<EOL><INDENT>start = i * package_size<EOL>end = (i + <NUM_LIT:1>) * package_size<EOL>self._spi.transfer(data[start:end])<EOL><DEDENT>", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c2:m1"}
{"signature": "def error(self, text):", "body": "msg = '<STR_LIT>'.format(<EOL>self._dev, self._spi_speed, text)<EOL>log.error(msg)<EOL>raise IOError(msg)<EOL>", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c0:m3"}
{"signature": "def send_packet(self, data):", "body": "package_size = <NUM_LIT>  <EOL>for i in range(int(math.ceil(len(data) / package_size))):<EOL><INDENT>start = i * package_size<EOL>end = (i + <NUM_LIT:1>) * package_size<EOL>self._spi.write(data[start:end])<EOL>self._spi.flush()<EOL><DEDENT>", "docstring": "SHOULD BE PRIVATE", "id": "f2108:c1:m1"}
{"signature": "def __init__(self, num=<NUM_LIT:32>, port=<NUM_LIT>, **kwds):", "body": "super().__init__(num, address=port, **kwds)<EOL>", "docstring": "Args:\n    num:  number of LEDs being visualizer.\n    port:  the port on which the SimPixel server is running.\n    pixel_positions:  the positions of the LEDs in 3-d space.\n    **kwds:  keywords passed to DriverBase.", "id": "f2117:c1:m1"}
{"signature": "def sendFragment(self, data):", "body": "self._sendMessage(True, STREAM, data)<EOL>", "docstring": "see sendFragmentStart()\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f2119:c1:m9"}
{"signature": "def handleMessage(self):", "body": "pass<EOL>", "docstring": "Called when websocket frame is received.\nTo access the frame data call self.data.\n\nIf the frame is Text then self.data is a unicode object.\nIf the frame is Binary then self.data is a bytearray object.", "id": "f2119:c1:m1"}
{"signature": "def sendMessage(self, data):", "body": "opcode = BINARY<EOL>if _check_unicode(data):<EOL><INDENT>opcode = TEXT<EOL><DEDENT>self._sendMessage(False, opcode, data)<EOL>", "docstring": "Send websocket data frame to the client.\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f2119:c1:m11"}
{"signature": "def set_device_brightness(self, brightness):", "body": "packet = util.generate_header(CMDTYPE.BRIGHTNESS, <NUM_LIT:1>)<EOL>packet.append(self._brightness)<EOL>s = self._connect()<EOL>s.sendall(packet)<EOL>resp = ord(s.recv(<NUM_LIT:1>))<EOL>return resp == RETURN_CODES.SUCCESS<EOL>", "docstring": "Hardware specific method to set the global brightness for\n        this driver's output. This method is required to be implemented,\n        however, users should call\n        :py:meth:`.driver_base.DriverBase.set_brightness`\n        instead of calling this method directly.\n\n        :param int brightness: 0-255 value representing the desired\n            brightness level", "id": "f2122:c1:m4"}
{"signature": "def __init__(self, *args, ip_address='<STR_LIT>', port=artnet_message.UDP_PORT,<EOL>filter_dupes=True, offset=<NUM_LIT:0>, **kwds):", "body": "super().__init__(*args, address=(ip_address, port), **kwds)<EOL>self.filter_dupes = filter_dupes<EOL>self.offset = offset_range.DMXChannel(offset)<EOL>self.msg = artnet_message.dmx_message()<EOL>self.last_message = None<EOL>", "docstring": ":param dict channel_map: maps DMX channels to positions in\n    the color_list\n:param int offset: a DMX channel offset, positive, negative or zero", "id": "f2124:c0:m0"}
{"signature": "def _render(self):", "body": "if self.set_device_brightness:<EOL><INDENT>level = <NUM_LIT:1.0><EOL><DEDENT>else:<EOL><INDENT>level = self._brightness / <NUM_LIT><EOL><DEDENT>gam, (r, g, b) = self.gamma.get, self.c_order<EOL>for i in range(min(self.numLEDs, len(self._buf) / <NUM_LIT:3>)):<EOL><INDENT>c = [int(level * x) for x in self._colors[i + self._pos]]<EOL>self._buf[i * <NUM_LIT:3>:(i + <NUM_LIT:1>) * <NUM_LIT:3>] = gam(c[r]), gam(c[g]), gam(c[b])<EOL><DEDENT>", "docstring": "Typically called from :py:func:`_compute_packet` this applies\n        brightness and gamma correction to the pixels controlled by this\n        driver.", "id": "f2125:c0:m15"}
{"signature": "@classmethod<EOL><INDENT>def construct(cls, project, **desc):<DEDENT>", "body": "return cls(maker=project.maker, **desc)<EOL>", "docstring": "Construct a driver from a project and a description.", "id": "f2125:c0:m0"}
{"signature": "def join(self, timeout=None):", "body": "", "docstring": "Called to join threads.", "id": "f2125:c0:m8"}
{"signature": "def _send_packet(self):", "body": "", "docstring": "Send the packet to the driver.\n\n        Eventually, this will run on an I/O thread.", "id": "f2125:c0:m12"}
{"signature": "def cleanup(self):", "body": "", "docstring": "Called to shut this driver down, and stop all threads and processes.", "id": "f2125:c0:m7"}
{"signature": "def bufByteCount(self):", "body": "return <NUM_LIT:3> * self.numLEDs<EOL>", "docstring": "Total number of bytes that the pixel buffer represents.\nMainly used for drivers such as :py:mod:`bibliopixel.drivers.serial`\nand :py:mod:`.network`", "id": "f2125:c0:m9"}
{"signature": "def start(self):", "body": "", "docstring": "Called right before this driver will run.  This is the place\nto do things like start threads, not in the constructor.", "id": "f2125:c0:m5"}
{"signature": "def sync(self):", "body": "", "docstring": "The sync() method is called after the entire frame has been\nsent to the device to indicate that it may now be displayed.\n\nThis is particularly useful when there are multiple drivers comprising\none display which all need to display the next frame at exactly the same\ntime.", "id": "f2125:c0:m10"}
{"signature": "def set_pixel_positions(self, pixel_positions):", "body": "pass<EOL>", "docstring": "Internal Use Only\n\nPlaceholder callback for sending physical pixel layout data to the\n``SimPixel`` driver.", "id": "f2125:c0:m2"}
{"signature": "def stop(self):", "body": "", "docstring": "Called to request any threads or resources to shut down.", "id": "f2125:c0:m6"}
{"signature": "def get_all_items(obj):", "body": "if hasattr(obj, '<STR_LIT>'):<EOL><INDENT>items = []<EOL>for key in obj:<EOL><INDENT>for value in obj.getlist(key):<EOL><INDENT>items.append((key, value))<EOL><DEDENT><DEDENT>return items<EOL><DEDENT>else:<EOL><INDENT>return obj.items()<EOL><DEDENT>", "docstring": "dict.items() but with a separate row for each value in a MultiValueDict", "id": "f2250:m6"}
{"signature": "def parse_json_path(path):", "body": "<EOL>original_path = path<EOL>steps = []<EOL>failed = [<EOL>JsonStep(<EOL>type=\"<STR_LIT:object>\",<EOL>key=original_path,<EOL>last=True,<EOL>failed=True,<EOL>)<EOL>]<EOL>digit_re = re.compile(r'<STR_LIT>')<EOL>key_re = re.compile(r'<STR_LIT>')<EOL>parts = path.split(\"<STR_LIT:[>\")<EOL>first_key = parts[<NUM_LIT:0>]<EOL>if parts[<NUM_LIT:1>:]:<EOL><INDENT>path = \"<STR_LIT:[>\" + \"<STR_LIT:[>\".join(parts[<NUM_LIT:1>:])<EOL><DEDENT>else:<EOL><INDENT>path = \"<STR_LIT>\"<EOL><DEDENT>steps.append(JsonStep(<EOL>type=\"<STR_LIT:object>\",<EOL>key=first_key,<EOL>))<EOL>if not path:<EOL><INDENT>steps[-<NUM_LIT:1>].last = True<EOL>return steps<EOL><DEDENT>while path:<EOL><INDENT>if path[:<NUM_LIT:2>] == \"<STR_LIT>\":<EOL><INDENT>path = path[<NUM_LIT:2>:]<EOL>steps.append(JsonStep(<EOL>type=\"<STR_LIT>\",<EOL>key=<NUM_LIT:0>,<EOL>))<EOL>continue<EOL><DEDENT>digit_match = digit_re.match(path)<EOL>if digit_match:<EOL><INDENT>path = digit_re.sub(\"<STR_LIT>\", path)<EOL>steps.append(JsonStep(<EOL>type=\"<STR_LIT>\",<EOL>key=int(digit_match.group(<NUM_LIT:1>)),<EOL>))<EOL>continue<EOL><DEDENT>key_match = key_re.match(path)<EOL>if key_match:<EOL><INDENT>path = key_re.sub(\"<STR_LIT>\", path)<EOL>steps.append(JsonStep(<EOL>type=\"<STR_LIT:object>\",<EOL>key=key_match.group(<NUM_LIT:1>),<EOL>))<EOL>continue<EOL><DEDENT>return failed<EOL><DEDENT>next_step = None<EOL>for step in reversed(steps):<EOL><INDENT>if next_step:<EOL><INDENT>step.next_type = next_step.type<EOL><DEDENT>else:<EOL><INDENT>step.last = True<EOL><DEDENT>next_step = step<EOL><DEDENT>return steps<EOL>", "docstring": "Parse a string as a JSON path\nAn implementation of \"steps to parse a JSON encoding path\"\nhttp://www.w3.org/TR/html-json-forms/#dfn-steps-to-parse-a-json-encoding-path", "id": "f2250:m1"}
{"signature": "def clean_empty_string(obj):", "body": "if obj == '<STR_LIT>':<EOL><INDENT>return None<EOL><DEDENT>if isinstance(obj, list):<EOL><INDENT>return [<EOL>None if item == '<STR_LIT>' else item<EOL>for item in obj<EOL>]<EOL><DEDENT>if isinstance(obj, dict):<EOL><INDENT>for key in obj:<EOL><INDENT>obj[key] = clean_empty_string(obj[key])<EOL><DEDENT><DEDENT>return obj<EOL>", "docstring": "Replace empty form values with None, since the is_html_input() check in\nField won't work after we convert to JSON.\n(FIXME: What about allow_blank=True?)", "id": "f2250:m5"}
{"signature": "def clone(self):", "body": "new_object = copy.copy(self)<EOL>if new_object.next:<EOL><INDENT>new_object.next = new_object.next.clone()<EOL><DEDENT>return new_object<EOL>", "docstring": "Self-cloning. All its next Pipe objects are cloned too.\n\n        :returns: cloned object", "id": "f2258:c1:m4"}
{"signature": "@staticmethod<EOL><INDENT>def func(generator):<DEDENT>", "body": "return Pipe(generator)<EOL>", "docstring": "Wrap a generator function to Pipe object.\n\n        :param generator: The generator function to be wrapped.\n        :type generator: generator\n        :returns: Pipe object", "id": "f2258:c2:m0"}
{"signature": "def __init__(self, func, *args, **kw):", "body": "self.__name__ = func.__name__<EOL>self.__doc__ = func.__doc__<EOL>self.func = func<EOL>self.next = None<EOL>self.chained = False<EOL>self.args = args<EOL>self.kw = kw<EOL>", "docstring": "Constructor of Pipe. It takes first argument as a generator function.\n        args and kw are default arguments to be used if the Pipe object is\n        cascaded directly. The default arguments are replaced by the arguments of\n        __call__ operator.\n\n        :param self: self reference.\n        :param func: The generator function to be be wrapped.\n        :param args: The default arguments to be used for generator function.\n        :param kw:  The default keyword arguments to be used for generator function.", "id": "f2258:c1:m0"}
{"signature": "def unregister_all_types():", "body": "Pipe.pipe_item_types.clear()<EOL>", "docstring": "Unregister all data types from Pipe class.", "id": "f2258:m2"}
{"signature": "def append(self, next):", "body": "next.chained = True<EOL>if self.next:<EOL><INDENT>self.next.append(next)<EOL><DEDENT>else:<EOL><INDENT>self.next = next<EOL><DEDENT>", "docstring": "Append next object to pipe tail.\n\n        :param next: The Pipe object to be appended to tail.\n        :type next: Pipe object.", "id": "f2258:c1:m5"}
{"signature": "@staticmethod<EOL><INDENT>def map(func):<DEDENT>", "body": "def wrapper(prev, *argv, **kw):<EOL><INDENT>if prev is None:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>for i in prev:<EOL><INDENT>yield func(i, *argv, **kw)<EOL><DEDENT><DEDENT>return Pipe(wrapper)<EOL>", "docstring": "Wrap a map function to Pipe object. Map function is a function with\n        at least one argument. It is used to convert data. The first argument\n        is the data to be converted. The return data from map function will\n        be sent to next generator.\n\n        :param func: The map function to be wrapped.\n        :type func: function object\n        :param args: The default arguments to be used for map function.\n        :param kw:  The default keyword arguments to be used for map function.\n        :returns: Pipe object", "id": "f2258:c2:m1"}
{"signature": "def unregister_type(item_type):", "body": "if item_type not in Pipe.pipe_item_types:<EOL><INDENT>return<EOL><DEDENT>del Pipe.pipe_item_types[item_type]<EOL>", "docstring": "Unregister data type from Pipe class. Check Pipe.__or__ and Pipe.__ror__ for\n    detail.\n\n    :param item_type: The type of data object which used in pipe cascading.", "id": "f2258:m1"}
{"signature": "def __or__(self, next):", "body": "if not isinstance(next, Pipe):<EOL><INDENT>item_creator = get_item_creator(type(next))<EOL>if item_creator is None:<EOL><INDENT>raise UnregisteredPipeType(type(next))<EOL><DEDENT>next = item_creator(next)<EOL><DEDENT>clone = self.clone()<EOL>if not next.chained:<EOL><INDENT>clone.append(next)<EOL><DEDENT>else:<EOL><INDENT>clone.append(next(*next.args, **next.kw))<EOL><DEDENT>return clone<EOL>", "docstring": "Set operand of right-hand side to be next Pipe object. Type convertion\n        will be applied automatically if next is not a Pipe object and its type\n        is registered in Pipe.pipe_item_types. Otherwise, UnregisteredPipeType\n        will be raised.\n\n        :param next: The next Pipe object to be cascaded.\n        :type next: Pipe object or any object whose type is registered.\n\n        :returns: The clone of self.", "id": "f2258:c1:m1"}
{"signature": "@pipe.func<EOL>def resplit(prev, pattern, *args, **kw):", "body": "maxsplit = <NUM_LIT:0> if '<STR_LIT>' not in kw else kw.pop('<STR_LIT>')<EOL>pattern_obj = re.compile(pattern, *args, **kw)<EOL>for s in prev:<EOL><INDENT>yield pattern_obj.split(s, maxsplit=maxsplit)<EOL><DEDENT>", "docstring": "The resplit pipe split previous pipe input by regular expression.\n\n    Use 'maxsplit' keyword argument to limit the number of split.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param pattern: The pattern which used to split string.\n    :type pattern: str|unicode", "id": "f2259:m15"}
{"signature": "@pipe.func<EOL>def readline(prev, filename=None, mode='<STR_LIT:r>', trim=str.rstrip, start=<NUM_LIT:1>, end=sys.maxsize):", "body": "if prev is None:<EOL><INDENT>if filename is None:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>elif is_str_type(filename):<EOL><INDENT>file_list = [filename, ]<EOL><DEDENT>else:<EOL><INDENT>file_list = filename<EOL><DEDENT><DEDENT>else:<EOL><INDENT>file_list = prev<EOL><DEDENT>for fn in file_list:<EOL><INDENT>if isinstance(fn, file_type):<EOL><INDENT>fd = fn<EOL><DEDENT>else:<EOL><INDENT>fd = open(fn, mode)<EOL><DEDENT>try:<EOL><INDENT>if start <= <NUM_LIT:1> and end == sys.maxsize:<EOL><INDENT>for line in fd:<EOL><INDENT>yield trim(line)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for line_no, line in enumerate(fd, <NUM_LIT:1>):<EOL><INDENT>if line_no < start:<EOL><INDENT>continue<EOL><DEDENT>yield trim(line)<EOL>if line_no >= end:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>if fd != fn:<EOL><INDENT>fd.close()<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "This pipe get filenames or file object from previous pipe and read the\n    content of file. Then, send the content of file line by line to next pipe.\n\n    The start and end parameters are used to limit the range of reading from file.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param filename: The files to be read. If None, use previous pipe input as filenames.\n    :type filename: None|str|unicode|list|tuple\n    :param mode: The mode to open file. default is 'r'\n    :type mode: str\n    :param trim: The function to trim the line before send to next pipe.\n    :type trim: function object.\n    :param start: if star is specified, only line number larger or equal to start will be sent.\n    :type start: integer\n    :param end: The last line number to read.\n    :type end: integer\n    :returns: generator", "id": "f2259:m21"}
{"signature": "@pipe.func<EOL>def subn(prev, pattern, repl, *args, **kw):", "body": "count = <NUM_LIT:0> if '<STR_LIT:count>' not in kw else kw.pop('<STR_LIT:count>')<EOL>pattern_obj = re.compile(pattern, *args, **kw)<EOL>for s in prev:<EOL><INDENT>yield pattern_obj.subn(repl, s, count=count)<EOL><DEDENT>", "docstring": "subn pipe is a wrapper of re.subn method.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param pattern: The pattern string.\n    :type pattern: str|unicode\n    :param repl: Check repl argument in re.sub method.\n    :type repl: str|unicode|callable", "id": "f2259:m17"}
{"signature": "def register_default_types():", "body": "register_type(type, pipe.map)<EOL>register_type(types.FunctionType, pipe.map)<EOL>register_type(types.MethodType, pipe.map)<EOL>register_type(tuple, seq)<EOL>register_type(list, seq)<EOL>register_type(types.GeneratorType, seq)<EOL>register_type(string_type, sh)<EOL>register_type(unicode_type, sh)<EOL>register_type(file_type, fileobj)<EOL>if is_py3:<EOL><INDENT>register_type(range, seq)<EOL>register_type(map, seq)<EOL><DEDENT>", "docstring": "Regiser all default type-to-pipe convertors.", "id": "f2259:m29"}
{"signature": "@pipe.func<EOL>def safe_substitute(prev, *args, **kw):", "body": "template_obj = string.Template(*args, **kw)<EOL>for data in prev:<EOL><INDENT>yield template_obj.safe_substitute(data)<EOL><DEDENT>", "docstring": "alias of string.Template.safe_substitute", "id": "f2259:m27"}
{"signature": "@pipe.func<EOL>def format(prev, format_string):", "body": "for i in prev:<EOL><INDENT>yield (format_string % i)<EOL><DEDENT>", "docstring": "The pipe formats the data passed from previous generator according to\n    given format_string argument.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param format_string: format string which used to format the data from\n                          previous iterator.\n    :type sequence: str\n    :returns: generator", "id": "f2259:m12"}
{"signature": "def run(cmd):", "body": "return cmd.run()<EOL>", "docstring": "Run pipe object and return its last result.\n\n    :param cmd: The Pipe object to be executed.\n    :type cmd: Pipe\n    :returns: The last result.\n\n    .. seealso::\n        :py:meth:`cmdlet.Pipe.run`", "id": "f2259:m0"}
{"signature": "@pipe.func<EOL>def stdout(prev, endl='<STR_LIT:\\n>', thru=False):", "body": "for i in prev:<EOL><INDENT>sys.stdout.write(str(i) + endl)<EOL>if thru:<EOL><INDENT>yield i<EOL><DEDENT><DEDENT>", "docstring": "This pipe read data from previous iterator and write it to stdout.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param endl: The end-of-line symbol for each output.\n    :type endl: str\n    :param thru: If true, data will passed to next generator. If false, data\n                 will be dropped.\n    :type thru: bool\n    :returns: generator", "id": "f2259:m19"}
{"signature": "@pipe.func<EOL>def grep(prev, pattern, *args, **kw):", "body": "inv = False if '<STR_LIT>' not in kw else kw.pop('<STR_LIT>')<EOL>pattern_obj = re.compile(pattern, *args, **kw)<EOL>for data in prev:<EOL><INDENT>if bool(inv) ^ bool(pattern_obj.match(data)):<EOL><INDENT>yield data<EOL><DEDENT><DEDENT>", "docstring": "The pipe greps the data passed from previous generator according to\n    given regular expression.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param pattern: The pattern which used to filter out data.\n    :type pattern: str|unicode|re pattern object\n    :param inv: If true, invert the match condition.\n    :type inv: boolean\n    :param kw:\n    :type kw: dict\n    :returns: generator", "id": "f2259:m13"}
{"signature": "@pipe.func<EOL>def pack(prev, n, rest=False, **kw):", "body": "if '<STR_LIT>' in kw:<EOL><INDENT>use_padding = True<EOL>padding = kw['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>use_padding = False<EOL>padding = None<EOL><DEDENT>items = []<EOL>for i, data in enumerate(prev, <NUM_LIT:1>):<EOL><INDENT>items.append(data)<EOL>if (i % n) == <NUM_LIT:0>:<EOL><INDENT>yield items<EOL>items = []<EOL><DEDENT><DEDENT>if len(items) != <NUM_LIT:0> and rest:<EOL><INDENT>if use_padding:<EOL><INDENT>items.extend([padding, ] * (n - (i % n)))<EOL><DEDENT>yield items<EOL><DEDENT>", "docstring": "pack pipe takes n elements from previous generator and yield one\n    list to next.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param rest: Set True to allow to output the rest part of last elements.\n    :type prev: boolean\n    :param padding: Specify the padding element for the rest part of last elements.\n    :type prev: boolean\n    :returns: generator\n\n    :Example:\n    >>> result([1,2,3,4,5,6,7] | pack(3))\n    [[1, 2, 3], [4, 5, 6]]\n\n    >>> result([1,2,3,4,5,6,7] | pack(3, rest=True))\n    [[1, 2, 3], [4, 5, 6], [7,]]\n\n    >>> result([1,2,3,4,5,6,7] | pack(3, padding=None))\n    [[1, 2, 3], [4, 5, 6], [7, None, None]]", "id": "f2259:m11"}
{"signature": "@pipe.func<EOL>def fileobj(prev, file_handle, endl='<STR_LIT>', thru=False):", "body": "if prev is not None:<EOL><INDENT>for i in prev:<EOL><INDENT>file_handle.write(str(i)+endl)<EOL>if thru:<EOL><INDENT>yield i<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for data in file_handle:<EOL><INDENT>yield data<EOL><DEDENT><DEDENT>", "docstring": "This pipe read/write data from/to file object which specified by\n    file_handle.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param file_handle: The file object to read or write.\n    :type file_handle: file object\n    :param endl: The end-of-line symbol for each output.\n    :type endl: str\n    :param thru: If true, data will passed to next generator. If false, data\n                 will be dropped.\n    :type thru: bool\n    :returns: generator", "id": "f2259:m22"}
{"signature": "@pipe.func<EOL>def attr(prev, attr_name):", "body": "for obj in prev:<EOL><INDENT>if hasattr(obj, attr_name):<EOL><INDENT>yield getattr(obj, attr_name)<EOL><DEDENT><DEDENT>", "docstring": "attr pipe can extract attribute value of object.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param attr_name: The name of attribute\n    :type attr_name: str\n    :returns: generator", "id": "f2259:m4"}
{"signature": "@pipe.func<EOL>def attrs(prev, attr_names):", "body": "for obj in prev:<EOL><INDENT>attr_values = []<EOL>for name in attr_names:<EOL><INDENT>if hasattr(obj, name):<EOL><INDENT>attr_values.append(getattr(obj, name))<EOL><DEDENT><DEDENT>yield attr_values<EOL><DEDENT>", "docstring": "attrs pipe can extract attribute values of object.\n\n    If attr_names is a list and its item is not a valid attribute of\n    prev's object. It will be excluded from yielded dict.\n\n    :param prev: The previous iterator of pipe.\n    :type prev: Pipe\n    :param attr_names: The list of attribute names\n    :type attr_names: str of list\n    :returns: generator", "id": "f2259:m5"}
{"signature": "def _get_env(self, env_var):", "body": "value = os.environ.get(env_var)<EOL>if not value:<EOL><INDENT>raise ValueError('<STR_LIT>' % env_var)<EOL><DEDENT>return value<EOL>", "docstring": "Helper to read an environment variable", "id": "f2265:c2:m1"}
{"signature": "def output_password(self, password):", "body": "print(password, file=sys.stdout)<EOL>", "docstring": "Output the password to the user.\n\n        This mostly exists to ease the testing process.", "id": "f2277:c0:m5"}
{"signature": "@classmethod<EOL><INDENT>def pass_from_pipe(cls):<DEDENT>", "body": "is_pipe = not sys.stdin.isatty()<EOL>return is_pipe and cls.strip_last_newline(sys.stdin.read())<EOL>", "docstring": "Return password from pipe if not on TTY, else False.", "id": "f2277:c0:m3"}
{"signature": "def once(func):", "body": "def wrapper(*args, **kwargs):<EOL><INDENT>if not hasattr(func, '<STR_LIT>'):<EOL><INDENT>func.always_returns = func(*args, **kwargs)<EOL><DEDENT>return func.always_returns<EOL><DEDENT>return functools.wraps(func)(wrapper)<EOL>", "docstring": "Decorate func so it's only ever called the first time.\n\nThis decorator can ensure that an expensive or non-idempotent function\nwill not be expensive on subsequent calls and is idempotent.\n\n>>> func = once(lambda a: a+3)\n>>> func(3)\n6\n>>> func(9)\n6\n>>> func('12')\n6", "id": "f2280:m0"}
{"signature": "def _check_old_config_root():", "body": "<EOL>globals()['<STR_LIT>'] = lambda: None<EOL>config_file_new = os.path.join(_config_root_Linux(), '<STR_LIT>')<EOL>config_file_old = os.path.join(_data_root_Linux(), '<STR_LIT>')<EOL>if os.path.isfile(config_file_old) and not os.path.isfile(config_file_new):<EOL><INDENT>msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>raise RuntimeError(msg.format(**locals()))<EOL><DEDENT>", "docstring": "Prior versions of keyring would search for the config\nin XDG_DATA_HOME, but should probably have been\nsearching for config in XDG_CONFIG_HOME. If the\nconfig exists in the former but not in the latter,\nraise a RuntimeError to force the change.", "id": "f2281:m4"}
{"signature": "def load_env():", "body": "try:<EOL><INDENT>return load_keyring(os.environ['<STR_LIT>'])<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Load a keyring configured in the environment variable.", "id": "f2284:m11"}
{"signature": "def set_keyring(keyring):", "body": "global _keyring_backend<EOL>if not isinstance(keyring, backend.KeyringBackend):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>_keyring_backend = keyring<EOL>", "docstring": "Set current keyring backend.", "id": "f2284:m0"}
{"signature": "def init_backend(limit=None):", "body": "<EOL>backend._limit = limit<EOL>keyrings = filter(limit, backend.get_all_keyring())<EOL>set_keyring(<EOL>load_env()<EOL>or load_config()<EOL>or max(keyrings, default=fail.Keyring(), key=backend.by_priority)<EOL>)<EOL>", "docstring": "Load a keyring specified in the config file or infer the best available.\n\nLimit, if supplied, should be a callable taking a backend and returning\nTrue if that backend should be included for consideration.", "id": "f2284:m8"}
{"signature": "def get_password(service_name, username):", "body": "return _keyring_backend.get_password(service_name, username)<EOL>", "docstring": "Get password from the specified service.", "id": "f2284:m3"}
{"signature": "def delete_password(self, service, username):", "body": "if not self.connected(service):<EOL><INDENT>raise PasswordDeleteError(\"<STR_LIT>\")<EOL><DEDENT>if not self.iface.hasEntry(self.handle, service, username, self.appid):<EOL><INDENT>raise PasswordDeleteError(\"<STR_LIT>\")<EOL><DEDENT>self.iface.removeEntry(self.handle, service, username, self.appid)<EOL>", "docstring": "Delete the password for the username of the service.", "id": "f2285:c0:m6"}
{"signature": "def get_password(self, service, username):", "body": "if not self.connected(service):<EOL><INDENT>raise KeyringLocked(\"<STR_LIT>\")<EOL><DEDENT>if not self.iface.hasEntry(self.handle, service, username, self.appid):<EOL><INDENT>return None<EOL><DEDENT>password = self.iface.readPassword(<EOL>self.handle, service, username, self.appid)<EOL>return str(password)<EOL>", "docstring": "Get password of the username for the service", "id": "f2285:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def unpack(word):<DEDENT>", "body": "if not isinstance(word, str):<EOL><INDENT>return word<EOL><DEDENT>val, = struct.unpack('<STR_LIT>', word.encode('<STR_LIT:ascii>'))<EOL>return val<EOL>", "docstring": "r\"\"\"\n        >>> PackedAttributes.unpack(0)\n        0\n        >>> PackedAttributes.unpack('\\x00\\x00\\x00\\x01')\n        1\n        >>> PackedAttributes.unpack('abcd')\n        1633837924", "id": "f2287:c5:m1"}
{"signature": "def set_password(self, service, username, password):", "body": "collection = self.get_preferred_collection()<EOL>attributes = {<EOL>\"<STR_LIT>\": self.appid,<EOL>\"<STR_LIT>\": service,<EOL>\"<STR_LIT:username>\": username<EOL>}<EOL>label = \"<STR_LIT>\".format(username, service)<EOL>collection.create_item(label, attributes, password, replace=True)<EOL>", "docstring": "Set password for the username of the service", "id": "f2288:c0:m3"}
{"signature": "def get_preferred_collection(self):", "body": "bus = secretstorage.dbus_init()<EOL>try:<EOL><INDENT>if hasattr(self, '<STR_LIT>'):<EOL><INDENT>collection = secretstorage.Collection(<EOL>bus, self.preferred_collection)<EOL><DEDENT>else:<EOL><INDENT>collection = secretstorage.get_default_collection(bus)<EOL><DEDENT><DEDENT>except exceptions.SecretStorageException as e:<EOL><INDENT>raise InitError(\"<STR_LIT>\" % e)<EOL><DEDENT>if collection.is_locked():<EOL><INDENT>collection.unlock()<EOL>if collection.is_locked():  <EOL><INDENT>raise KeyringLocked(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return collection<EOL>", "docstring": "If self.preferred_collection contains a D-Bus path,\n        the collection at that address is returned. Otherwise,\n        the default collection is returned.", "id": "f2288:c0:m1"}
{"signature": "def get_password(self, service, username):", "body": "collection = self.get_preferred_collection()<EOL>items = collection.search_items(<EOL>{\"<STR_LIT:username>\": username, \"<STR_LIT>\": service})<EOL>for item in items:<EOL><INDENT>if hasattr(item, '<STR_LIT>'):<EOL><INDENT>item.unlock()<EOL><DEDENT>if item.is_locked():  <EOL><INDENT>raise KeyringLocked('<STR_LIT>')<EOL><DEDENT>return item.get_secret().decode('<STR_LIT:utf-8>')<EOL><DEDENT>", "docstring": "Get password of the username for the service", "id": "f2288:c0:m2"}
{"signature": "def delete_password(self, service, username):", "body": "collection = self.get_preferred_collection()<EOL>items = collection.search_items(<EOL>{\"<STR_LIT:username>\": username, \"<STR_LIT>\": service})<EOL>for item in items:<EOL><INDENT>return item.delete()<EOL><DEDENT>raise PasswordDeleteError(\"<STR_LIT>\")<EOL>", "docstring": "Delete the stored password (only the first one)", "id": "f2288:c0:m4"}
{"signature": "@properties.ClassProperty<EOL><INDENT>@classmethod<EOL>def priority(cls):<DEDENT>", "body": "if missing_deps:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>return <NUM_LIT:5><EOL>", "docstring": "If available, the preferred backend on Windows.", "id": "f2289:c0:m0"}
{"signature": "@properties.ClassProperty<EOL><INDENT>@classmethod<EOL>def backends(cls):<DEDENT>", "body": "allowed = (<EOL>keyring<EOL>for keyring in filter(backend._limit, backend.get_all_keyring())<EOL>if not isinstance(keyring, ChainerBackend)<EOL>and keyring.priority > <NUM_LIT:0><EOL>)<EOL>return sorted(allowed, key=backend.by_priority, reverse=True)<EOL>", "docstring": "Discover all keyrings for chaining.", "id": "f2291:c0:m1"}
{"signature": "@properties.ClassProperty<EOL><INDENT>@classmethod<EOL>def priority(cls):<DEDENT>", "body": "return <NUM_LIT:10> * (len(cls.backends) > <NUM_LIT:1>)<EOL>", "docstring": "High-priority if there are backends to chain, otherwise 0.", "id": "f2291:c0:m0"}
{"signature": "def get_credential(self, service, username):", "body": "<EOL>if username is not None:<EOL><INDENT>password = self.get_password(service, username)<EOL>if password is not None:<EOL><INDENT>return credentials.SimpleCredential(<EOL>username,<EOL>password,<EOL>)<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Gets the username and password for the service.\n        Returns a Credential instance.\n\n        The *username* argument is optional and may be omitted by\n        the caller or ignored by the backend. Callers must use the\n        returned username.", "id": "f2293:c1:m8"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def encrypt(self, value):<DEDENT>", "body": "pass<EOL>", "docstring": "Encrypt the value.", "id": "f2293:c2:m0"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def get_password(self, service, username):<DEDENT>", "body": "return None<EOL>", "docstring": "Get password of the username for the service", "id": "f2293:c1:m5"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def decrypt(self, value):<DEDENT>", "body": "pass<EOL>", "docstring": "Decrypt the value.", "id": "f2293:c2:m1"}
{"signature": "def get_queryset(self):", "body": "return Question.objects.filter(pub_date__lte=timezone.now())<EOL>", "docstring": "Excludes any questions that aren't published yet.", "id": "f2297:c1:m0"}
{"signature": "def get_queryset(self):", "body": "return Question.objects.filter(pub_date__lte=timezone.now())<EOL>", "docstring": "Excludes any questions that aren't published yet.", "id": "f2297:c2:m0"}
{"signature": "def create_question(question_text, days):", "body": "time = timezone.now() + datetime.timedelta(days=days)<EOL>return Question.objects.create(question_text=question_text, pub_date=time)<EOL>", "docstring": "Creates a question with the given `question_text` published the given\nnumber of `days` offset to now (negative for questions published\nin the past, positive for questions that have yet to be published).", "id": "f2300:m0"}
{"signature": "def get_field_value(self, field, value_verbose=True):", "body": "if not value_verbose:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>value = field._get_val_from_obj(self)<EOL><DEDENT>else:<EOL><INDENT>if isinstance(field, ForeignKey):<EOL><INDENT>value = getattr(self, field.name)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>value =  self._get_FIELD_display(field)<EOL><DEDENT>except :<EOL><INDENT>value = field._get_val_from_obj(self)<EOL><DEDENT><DEDENT><DEDENT>if(value == True or value == False or isinstance(value, (int, float))):<EOL><INDENT>return value<EOL><DEDENT>return unicode(value)<EOL>", "docstring": "\u8fd4\u56de\u663e\u793a\u7684\u503c\uff0c\u800c\u4e0d\u662f\u5355\u7eaf\u7684\u6570\u636e\u5e93\u4e2d\u7684\u503c\nfield  \u662fmodel\u4e2d\u7684field type\nvalue_verbose \u4e3aTrue\uff0c\u8fd4\u56de\u6570\u636e\u7684\u663e\u793a\u6570\u636e\uff0c\u4f1a\u8f6c\u6362\u4e3achoice\u7684\u5185\u5bb9\uff0c\n\u5982\u679cvalue_verbose \u4e3aFalse\uff0c \u8fd4\u56de\u6570\u636e\u7684\u5b9e\u9645\u503c", "id": "f2309:c0:m1"}
{"signature": "def get_querydict(self):", "body": "if self.method:<EOL><INDENT>querydict =  getattr(self.request, self.method.upper())<EOL><DEDENT>else:<EOL><INDENT>querydict =  getattr(self.request, '<STR_LIT:POST>'.upper())<EOL><DEDENT>query_dict =  dict(list(querydict.items()))<EOL>return query_dict<EOL>", "docstring": "\u8fd9\u4e2a\u51fd\u6570\u8ddf self.method\u6709\u5173\nself.method \u6682\u65f6\u6ca1\u7528, querydict\u90fd\u662fPOST\u7684", "id": "f2310:c1:m1"}
{"signature": "def get_limit_queryset(self):", "body": "queryset = self.get_queryset()<EOL>limit_queryset = queryset.all()[self.get_slice_start() :self.get_slice_end()] <EOL>return  limit_queryset<EOL>", "docstring": "\u8fd4\u56de\u5206\u9875\u4e4b\u540e\u7684queryset", "id": "f2310:c1:m6"}
{"signature": "def get_slice_start(self):", "body": "value = None<EOL>if self.easyui_page:<EOL><INDENT>value = (self.easyui_page -<NUM_LIT:1>) * self.easyui_rows<EOL><DEDENT>return  value<EOL>", "docstring": "\u8fd4\u56dequeryset\u5207\u7247\u7684\u5934", "id": "f2310:c1:m3"}
{"signature": "def get_queryset(self):", "body": "filter_dict = self.get_filter_dict()<EOL>queryset = super(EasyUIListMixin, self).get_queryset()<EOL>queryset =  queryset.filter(**filter_dict)<EOL>if self.easyui_order:<EOL><INDENT>queryset = queryset.order_by(self.easyui_order)<EOL><DEDENT>return queryset<EOL>", "docstring": "queryset", "id": "f2310:c1:m5"}
{"signature": "def get_template_names(self):", "body": "names = super(EasyUIDeleteView, self).get_template_names()<EOL>names.append('<STR_LIT>')<EOL>return names<EOL>", "docstring": "datagrid\u7684\u9ed8\u8ba4\u6a21\u677f", "id": "f2311:c3:m0"}
{"signature": "def success(request):", "body": "return HttpResponse('<STR_LIT:success>')<EOL>", "docstring": "\u589e\u5220\u6539\u64cd\u4f5c\u6210\u529f\u4e4b\u540e\u8fd4\u56de\u8fd9\u4e2a\u9875\u9762", "id": "f2313:m0"}
{"signature": "def get_url(request):", "body": "menu_id = request.GET.get('<STR_LIT>')<EOL>m_object = Menu.objects.get(pk=menu_id)<EOL>namespace = m_object.namespace<EOL>viewname = m_object.viewname<EOL>url_string = '<STR_LIT>' %(namespace, viewname)<EOL>url = reverse(url_string)<EOL>return HttpResponse(url)<EOL>", "docstring": "\u901a\u8fc7menu_id\uff0c\u83b7\u53d6\u5bf9\u5e94\u7684URL\neg. /easyui/MenuListView/", "id": "f2313:m1"}
{"signature": "def register_views(app_name, view_filename, urlpatterns=None):", "body": "app_module = __import__(app_name)<EOL>view_module = getattr(app_module, view_filename)<EOL>views = dir(view_module)<EOL>for view_name in views:<EOL><INDENT>if view_name.endswith('<STR_LIT>'):<EOL><INDENT>view = getattr(view_module, view_name)<EOL>if isinstance(view, object):<EOL><INDENT>if urlpatterns:<EOL><INDENT>urlpatterns  += patterns('<STR_LIT>',<EOL>url(r'<STR_LIT>' % view_name, view.as_view(),  name=view_name),<EOL>)<EOL><DEDENT>else:<EOL><INDENT>urlpatterns = patterns('<STR_LIT>',<EOL>url(r'<STR_LIT>' % view_name, view.as_view(),  name=view_name),<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>return urlpatterns<EOL>", "docstring": "app_name       APP\u540d\nview_filename  views \u6240\u5728\u7684\u6587\u4ef6\nurlpatterns    url\u4e2d\u5df2\u7ecf\u5b58\u5728\u7684urlpatterns\n\nreturn urlpatterns\n\n\u53ea\u5bfc\u5165View\u7ed3\u5c3e\u7684\uff0c\u662f\u7c7b\u7684\u89c6\u56fe", "id": "f2316:m1"}
{"signature": "def sentimentDF(symbol, type='<STR_LIT>', date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "ret = sentiment(symbol, type, date, token, version)<EOL>if type == '<STR_LIT>':<EOL><INDENT>ret = [ret]<EOL><DEDENT>df = pd.DataFrame(ret)<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "This endpoint provides social sentiment data from StockTwits. Data can be viewed as a daily value, or by minute for a given date.\n\n    https://iexcloud.io/docs/api/#social-sentiment\n    Continuous\n\n    Args:\n        symbol (string); Ticker to request\n        type (string); 'daily' or 'minute'\n        date (string); date in YYYYMMDD or datetime\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2329:m3"}
{"signature": "def cryptoDF(token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(crypto(token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "This will return an array of quotes for all Cryptocurrencies supported by the IEX API. Each element is a standard quote object with four additional keys.\n\n    https://iexcloud.io/docs/api/#crypto\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2329:m1"}
{"signature": "def crypto(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "This will return an array of quotes for all Cryptocurrencies supported by the IEX API. Each element is a standard quote object with four additional keys.\n\n    https://iexcloud.io/docs/api/#crypto\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2329:m0"}
{"signature": "def largestTrades(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "This returns 15 minute delayed, last sale eligible trades.\n\n    https://iexcloud.io/docs/api/#largest-trades\n    9:30-4pm ET M-F during regular market hours\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m48"}
{"signature": "def _statsToDF(s):", "body": "if s:<EOL><INDENT>df = pd.io.json.json_normalize(s)<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>df = pd.DataFrame()<EOL><DEDENT>return df<EOL>", "docstring": "internal", "id": "f2330:m46"}
{"signature": "def batchDF(symbols, fields=None, range_='<STR_LIT>', last=<NUM_LIT:10>, token='<STR_LIT>', version='<STR_LIT>'):", "body": "x = batch(symbols, fields, range_, last, token, version)<EOL>ret = {}<EOL>if isinstance(symbols, str):<EOL><INDENT>for field in x.keys():<EOL><INDENT>ret[field] = _MAPPING[field](x[field])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for symbol in x.keys():<EOL><INDENT>for field in x[symbol].keys():<EOL><INDENT>if field not in ret:<EOL><INDENT>ret[field] = pd.DataFrame()<EOL><DEDENT>dat = x[symbol][field]<EOL>dat = _MAPPING[field](dat)<EOL>dat['<STR_LIT>'] = symbol<EOL>ret[field] = pd.concat([ret[field], dat], sort=True)<EOL><DEDENT><DEDENT><DEDENT>return ret<EOL>", "docstring": "Batch several data requests into one invocation\n\n    https://iexcloud.io/docs/api/#batch-requests\n\n\n    Args:\n        symbols (list); List of tickers to request\n        fields (list); List of fields to request\n        range_ (string); Date range for chart\n        last (int);\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: results in json", "id": "f2330:m3"}
{"signature": "def shortInterestDF(symbol, date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(shortInterest(symbol, date, token, version))<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.\n\n    The report data will be published daily at 4:00pm ET.\n\n    https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev\n\n    Args:\n        symbol (string); Ticker to request\n        date (datetime); Effective Datetime\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m91"}
{"signature": "def logoNotebook(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>url = logo(symbol, token, version)['<STR_LIT:url>']<EOL>return ImageI(url=url)<EOL>", "docstring": "This is a helper function, but the google APIs url is standardized.\n\n    https://iexcloud.io/docs/api/#logo\n    8am UTC daily\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        image: result", "id": "f2330:m54"}
{"signature": "def marketVolumeDF(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return pd.DataFrame(marketVolume())<EOL>", "docstring": "This endpoint returns real time traded volume on U.S. markets.\n\n    https://iexcloud.io/docs/api/#market-volume-u-s\n    7:45am-5:15pm ET Mon-Fri\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m56"}
{"signature": "def sectorPerformance(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "This returns an array of each sector and performance for the current trading day. Performance is based on each sector ETF.\n\n    https://iexcloud.io/docs/api/#sector-performance\n    8am-5pm ET Mon-Fri\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m81"}
{"signature": "def priceTargetDF(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.io.json.json_normalize(priceTarget(symbol, token, version))<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "Provides the latest avg, high, and low analyst price target for a symbol.\n\n    https://iexcloud.io/docs/api/#price-target\n    Updates at 10am, 11am, 12pm UTC every day\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m76"}
{"signature": "def volumeByVenueDF(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(volumeByVenue(symbol, token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "This returns 15 minute delayed and 30 day average consolidated volume percentage of a stock, by market.\n    This call will always return 13 values, and will be sorted in ascending order by current day trading volume percentage.\n\n    https://iexcloud.io/docs/api/#volume-by-venue\n    Updated during regular market hours 9:30am-4pm ET\n\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m87"}
{"signature": "def _splitsToDF(s):", "body": "df = pd.DataFrame(s)<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "internal", "id": "f2330:m84"}
{"signature": "def logoPNG(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>response = requests.get(logo(symbol, token, version)['<STR_LIT:url>'])<EOL>return ImageP.open(BytesIO(response.content))<EOL>", "docstring": "This is a helper function, but the google APIs url is standardized.\n\n    https://iexcloud.io/docs/api/#logo\n    8am UTC daily\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        image: result as png", "id": "f2330:m53"}
{"signature": "def batch(symbols, fields=None, range_='<STR_LIT>', last=<NUM_LIT:10>, token='<STR_LIT>', version='<STR_LIT>'):", "body": "fields = fields or _BATCH_TYPES[:<NUM_LIT:10>]  <EOL>if not isinstance(symbols, [].__class__):<EOL><INDENT>if not isinstance(symbols, str):<EOL><INDENT>raise PyEXception('<STR_LIT>')<EOL><DEDENT><DEDENT>if isinstance(fields, str):<EOL><INDENT>fields = [fields]<EOL><DEDENT>if range_ not in _TIMEFRAME_CHART:<EOL><INDENT>raise PyEXception('<STR_LIT>' % str(_TIMEFRAME_CHART))<EOL><DEDENT>if isinstance(symbols, str):<EOL><INDENT>route = '<STR_LIT>'.format(symbols, '<STR_LIT:U+002C>'.join(fields), range_, last)<EOL>return _getJson(route, token, version)<EOL><DEDENT>if len(symbols) > <NUM_LIT:100>:<EOL><INDENT>raise PyEXception('<STR_LIT>')<EOL><DEDENT>route = '<STR_LIT>'.format('<STR_LIT:U+002C>'.join(symbols), '<STR_LIT:U+002C>'.join(fields), range_, last)<EOL>return _getJson(route, token, version)<EOL>", "docstring": "Batch several data requests into one invocation\n\n    https://iexcloud.io/docs/api/#batch-requests\n\n\n    Args:\n        symbols (list); List of tickers to request\n        fields (list); List of fields to request\n        range_ (string); Date range for chart\n        last (int);\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: results in json", "id": "f2330:m2"}
{"signature": "def threshold(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "if date:<EOL><INDENT>date = _strOrDate(date)<EOL>return _getJson('<STR_LIT>' + date, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "The following are IEX-listed securities that have an aggregate fail to deliver position for five consecutive settlement days at a registered clearing agency, totaling 10,000 shares or more and equal to at least 0.5% of the issuer\u2019s total shares outstanding (i.e., \u201cthreshold securities\u201d).\n    The report data will be published to the IEX website daily at 8:30 p.m. ET with data for that trading day.\n\n    https://iexcloud.io/docs/api/#listed-regulation-sho-threshold-securities-list-in-dev\n\n    Args:\n        date (datetime); Effective Datetime\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m88"}
{"signature": "def book(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "Book data\n\n    https://iextrading.com/developer/docs/#book\n    realtime during Investors Exchange market hours\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m6"}
{"signature": "def logo(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "This is a helper function, but the google APIs url is standardized.\n\n    https://iexcloud.io/docs/api/#logo\n    8am UTC daily\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m52"}
{"signature": "def balanceSheet(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "Pulls balance sheet data. Available quarterly (4 quarters) and annually (4 years)\n\n    https://iexcloud.io/docs/api/#balance-sheet\n    Updates at 8am, 9am UTC daily\n\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m0"}
{"signature": "def _peersToDF(p):", "body": "df = pd.DataFrame(p, columns=['<STR_LIT>'])<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>df['<STR_LIT>'] = df.index<EOL>return df<EOL>", "docstring": "internal", "id": "f2330:m67"}
{"signature": "def _bookToDF(b):", "body": "quote = b.get('<STR_LIT>', [])<EOL>asks = b.get('<STR_LIT>', [])<EOL>bids = b.get('<STR_LIT>', [])<EOL>trades = b.get('<STR_LIT>', [])<EOL>df1 = pd.io.json.json_normalize(quote)<EOL>df1['<STR_LIT:type>'] = '<STR_LIT>'<EOL>df2 = pd.io.json.json_normalize(asks)<EOL>df2['<STR_LIT>'] = quote['<STR_LIT>']<EOL>df2['<STR_LIT:type>'] = '<STR_LIT>'<EOL>df3 = pd.io.json.json_normalize(bids)<EOL>df3['<STR_LIT>'] = quote['<STR_LIT>']<EOL>df3['<STR_LIT:type>'] = '<STR_LIT>'<EOL>df4 = pd.io.json.json_normalize(trades)<EOL>df4['<STR_LIT>'] = quote['<STR_LIT>']<EOL>df3['<STR_LIT:type>'] = '<STR_LIT>'<EOL>df = pd.concat([df1, df2, df3, df4], sort=True)<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "internal", "id": "f2330:m7"}
{"signature": "def marketNews(count=<NUM_LIT:10>, token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _getJson('<STR_LIT>' + str(count), token, version)<EOL>", "docstring": "News about market\n\n    https://iexcloud.io/docs/api/#news\n    Continuous\n\n    Args:\n        count (int): limit number of results\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m60"}
{"signature": "def news(symbol, count=<NUM_LIT:10>, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>' + str(count), token, version)<EOL>", "docstring": "News about company\n\n    https://iexcloud.io/docs/api/#news\n    Continuous\n\n    Args:\n        symbol (string); Ticker to request\n        count (int): limit number of results\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m57"}
{"signature": "def shortInterest(symbol, date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>if date:<EOL><INDENT>date = _strOrDate(date)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>' + date, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.\n\n    The report data will be published daily at 4:00pm ET.\n\n    https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev\n\n    Args:\n        symbol (string); Ticker to request\n        date (datetime); Effective Datetime\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m90"}
{"signature": "def chart(symbol, timeframe='<STR_LIT>', date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>if timeframe is not None and timeframe != '<STR_LIT>':<EOL><INDENT>if timeframe not in _TIMEFRAME_CHART:<EOL><INDENT>raise PyEXception('<STR_LIT>' % str(_TIMEFRAME_CHART))<EOL><DEDENT>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>' + '<STR_LIT:/>' + timeframe, token, version)<EOL><DEDENT>if date:<EOL><INDENT>date = _strOrDate(date)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>' + '<STR_LIT>' + date, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "Historical price/volume data, daily and intraday\n\n    https://iexcloud.io/docs/api/#historical-prices\n    Data Schedule\n    1d: -9:30-4pm ET Mon-Fri on regular market trading days\n        -9:30-1pm ET on early close trading days\n    All others:\n        -Prior trading day available after 4am ET Tue-Sat\n\n    Args:\n        symbol (string); Ticker to request\n        timeframe (string); Timeframe to request e.g. 1m\n        date (datetime): date, if requesting intraday\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m11"}
{"signature": "def thresholdDF(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(threshold(date, token, version))<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "The following are IEX-listed securities that have an aggregate fail to deliver position for five consecutive settlement days at a registered clearing agency, totaling 10,000 shares or more and equal to at least 0.5% of the issuer\u2019s total shares outstanding (i.e., \u201cthreshold securities\u201d).\n    The report data will be published to the IEX website daily at 8:30 p.m. ET with data for that trading day.\n\n    https://iexcloud.io/docs/api/#listed-regulation-sho-threshold-securities-list-in-dev\n\n    Args:\n        date (datetime); Effective Datetime\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m89"}
{"signature": "def chartDF(symbol, timeframe='<STR_LIT>', date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "c = chart(symbol, timeframe, date, token, version)<EOL>df = pd.DataFrame(c)<EOL>_toDatetime(df)<EOL>if timeframe is not None and timeframe != '<STR_LIT>':<EOL><INDENT>_reindex(df, '<STR_LIT:date>')<EOL><DEDENT>else:<EOL><INDENT>if not df.empty:<EOL><INDENT>df.set_index(['<STR_LIT:date>', '<STR_LIT>'], inplace=True)<EOL><DEDENT>else:<EOL><INDENT>return pd.DataFrame()<EOL><DEDENT><DEDENT>return df<EOL>", "docstring": "Historical price/volume data, daily and intraday\n\n    https://iexcloud.io/docs/api/#historical-prices\n    Data Schedule\n    1d: -9:30-4pm ET Mon-Fri on regular market trading days\n        -9:30-1pm ET on early close trading days\n    All others:\n        -Prior trading day available after 4am ET Tue-Sat\n\n    Args:\n        symbol (string); Ticker to request\n        timeframe (string); Timeframe to request e.g. 1m\n        date (datetime): date, if requesting intraday\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m13"}
{"signature": "def yesterdayDF(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "y = yesterday(symbol, token, version)<EOL>if y:<EOL><INDENT>df = pd.io.json.json_normalize(y)<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>df = pd.DataFrame()<EOL><DEDENT>return df<EOL>", "docstring": "This returns previous day adjusted price data for one or more stocks\n\n    https://iexcloud.io/docs/api/#previous-day-prices\n    Available after 4am ET Tue-Sat\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m70"}
{"signature": "def spreadDF(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(spread(symbol, token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "This returns an array of effective spread, eligible volume, and price improvement of a stock, by market.\n    Unlike volume-by-venue, this will only return a venue if effective spread is not \u2018N/A\u2019. Values are sorted in descending order by effectiveSpread.\n    Lower effectiveSpread and higher priceImprovement values are generally considered optimal.\n\n    Effective spread is designed to measure marketable orders executed in relation to the market center\u2019s\n    quoted spread and takes into account hidden and midpoint liquidity available at each market center.\n    Effective Spread is calculated by using eligible trade prices recorded to the consolidated tape and\n    comparing those trade prices to the National Best Bid and Offer (\u201cNBBO\u201d) at the time of the execution.\n\n    View the data disclaimer at the bottom of the stocks app for more information about how these values are calculated.\n\n\n    https://iexcloud.io/docs/api/#earnings-today\n    8am ET M-F\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m32"}
{"signature": "def marketOhlcDF(token='<STR_LIT>', version='<STR_LIT>'):", "body": "x = marketOhlc(token, version)<EOL>data = []<EOL>for key in x:<EOL><INDENT>data.append(x[key])<EOL>data[-<NUM_LIT:1>]['<STR_LIT>'] = key<EOL><DEDENT>df = pd.io.json.json_normalize(data)<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "Returns the official open and close for whole market.\n\n    https://iexcloud.io/docs/api/#news\n    9:30am-5pm ET Mon-Fri\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m65"}
{"signature": "def ipoUpcoming(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "This returns a list of upcoming or today IPOs scheduled for the current and next month. The response is split into two structures:\n    rawData and viewData. rawData represents all available data for an IPO. viewData represents data structured for display to a user.\n\n    https://iexcloud.io/docs/api/#ipo-calendar\n    10am, 10:30am UTC daily\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m43"}
{"signature": "def _earningsToDF(e):", "body": "if e:<EOL><INDENT>df = pd.io.json.json_normalize(e, '<STR_LIT>', '<STR_LIT>')<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>df = pd.DataFrame()<EOL><DEDENT>return df<EOL>", "docstring": "internal", "id": "f2330:m27"}
{"signature": "def splits(symbol, timeframe='<STR_LIT>', token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>if timeframe not in _TIMEFRAME_DIVSPLIT:<EOL><INDENT>raise PyEXception('<STR_LIT>' % str(_TIMEFRAME_DIVSPLIT))<EOL><DEDENT>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>' + timeframe, token, version)<EOL>", "docstring": "Stock split history\n\n    https://iexcloud.io/docs/api/#splits\n    Updated at 9am UTC every day\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m83"}
{"signature": "def financials(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "Pulls income statement, balance sheet, and cash flow data from the four most recent reported quarters.\n\n    https://iexcloud.io/docs/api/#financials\n    Updates at 8am, 9am UTC daily\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m36"}
{"signature": "def companyDF(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "c = company(symbol, token, version)<EOL>df = _companyToDF(c)<EOL>return df<EOL>", "docstring": "Company reference data\n\n    https://iexcloud.io/docs/api/#company\n    Updates at 4am and 5am UTC every day\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m20"}
{"signature": "def marketNewsDF(count=<NUM_LIT:10>, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(marketNews(count, token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "News about market\n\n    https://iexcloud.io/docs/api/#news\n    Continuous\n\n    Args:\n        count (int): limit number of results\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m61"}
{"signature": "def earnings(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "Earnings data for a given company including the actual EPS, consensus, and fiscal period. Earnings are available quarterly (last 4 quarters) and annually (last 4 years).\n\n    https://iexcloud.io/docs/api/#earnings\n    Updates at 9am, 11am, 12pm UTC every day\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m26"}
{"signature": "def incomeStatement(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "Pulls income statement data. Available quarterly (4 quarters) or annually (4 years).\n\n    https://iexcloud.io/docs/api/#income-statement\n    Updates at 8am, 9am UTC daily\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m39"}
{"signature": "def ipoToday(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "This returns a list of upcoming or today IPOs scheduled for the current and next month. The response is split into two structures:\n    rawData and viewData. rawData represents all available data for an IPO. viewData represents data structured for display to a user.\n\n    https://iexcloud.io/docs/api/#ipo-calendar\n    10am, 10:30am UTC daily\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m41"}
{"signature": "def priceTarget(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "Provides the latest avg, high, and low analyst price target for a symbol.\n\n    https://iexcloud.io/docs/api/#price-target\n    Updates at 10am, 11am, 12pm UTC every day\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m75"}
{"signature": "def marketYesterdayDF(token='<STR_LIT>', version='<STR_LIT>'):", "body": "x = marketYesterday(token, version)<EOL>data = []<EOL>for key in x:<EOL><INDENT>data.append(x[key])<EOL>data[-<NUM_LIT:1>]['<STR_LIT>'] = key<EOL><DEDENT>df = pd.DataFrame(data)<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "This returns previous day adjusted price data for whole market\n\n    https://iexcloud.io/docs/api/#previous-day-prices\n    Available after 4am ET Tue-Sat\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m72"}
{"signature": "def delayedQuoteDF(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.io.json.json_normalize(delayedQuote(symbol, token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "This returns the 15 minute delayed market quote.\n\n    https://iexcloud.io/docs/api/#delayed-quote\n    15min delayed\n    4:30am - 8pm ET M-F when market is open\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m22"}
{"signature": "def _dividendsToDF(d):", "body": "df = pd.DataFrame(d)<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "internal", "id": "f2330:m24"}
{"signature": "def volumeByVenue(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "This returns 15 minute delayed and 30 day average consolidated volume percentage of a stock, by market.\n    This call will always return 13 values, and will be sorted in ascending order by current day trading volume percentage.\n\n    https://iexcloud.io/docs/api/#volume-by-venue\n    Updated during regular market hours 9:30am-4pm ET\n\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m86"}
{"signature": "def marketShortInterestDF(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(marketShortInterest(date, token, version))<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.\n\n    The report data will be published daily at 4:00pm ET.\n\n    https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev\n\n    Args:\n        date (datetime); Effective Datetime\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m93"}
{"signature": "def marketOhlc(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "Returns the official open and close for whole market.\n\n    https://iexcloud.io/docs/api/#news\n    9:30am-5pm ET Mon-Fri\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m64"}
{"signature": "def delayedQuote(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>return _getJson('<STR_LIT>' + symbol + '<STR_LIT>', token, version)<EOL>", "docstring": "This returns the 15 minute delayed market quote.\n\n    https://iexcloud.io/docs/api/#delayed-quote\n    15min delayed\n    4:30am - 8pm ET M-F when market is open\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2330:m21"}
{"signature": "def peersDF(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "p = peers(symbol, token, version)<EOL>df = _peersToDF(p)<EOL>return df<EOL>", "docstring": "Peers of ticker\n\n    https://iexcloud.io/docs/api/#peers\n    8am UTC daily\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m68"}
{"signature": "def _estimatesToDF(f):", "body": "if f:<EOL><INDENT>df = pd.io.json.json_normalize(f, '<STR_LIT>', '<STR_LIT>')<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>df = pd.DataFrame()<EOL><DEDENT>return df<EOL>", "docstring": "internal", "id": "f2330:m34"}
{"signature": "def sectorPerformanceDF(token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(sectorPerformance(token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT:name>')<EOL>return df<EOL>", "docstring": "This returns an array of each sector and performance for the current trading day. Performance is based on each sector ETF.\n\n    https://iexcloud.io/docs/api/#sector-performance\n    8am-5pm ET Mon-Fri\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m82"}
{"signature": "def quoteDF(symbol, token='<STR_LIT>', version='<STR_LIT>'):", "body": "q = quote(symbol, token, version)<EOL>if q:<EOL><INDENT>df = pd.io.json.json_normalize(q)<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>df = pd.DataFrame()<EOL><DEDENT>return df<EOL>", "docstring": "Get quote for ticker\n\n    https://iexcloud.io/docs/api/#quote\n    4:30am-8pm ET Mon-Fri\n\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m78"}
{"signature": "def listDF(option='<STR_LIT>', token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(list(option, token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "Returns an array of quotes for the top 10 symbols in a specified list.\n\n\n    https://iexcloud.io/docs/api/#list\n    Updated intraday\n\n    Args:\n        option (string); Option to query\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2330:m51"}
{"signature": "def internationalSymbolsDF(region='<STR_LIT>', exchange='<STR_LIT>', token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(internationalSymbols(region, exchange, token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "This call returns an array of international symbols that IEX Cloud supports for API calls.\n\n    https://iexcloud.io/docs/api/#international-symbols\n    8am, 9am, 12pm, 1pm UTC daily\n\n    Args:\n        region (string); region, 2 letter case insensitive string of country codes using ISO 3166-1 alpha-2\n        exchange (string): Case insensitive string of Exchange using IEX Supported Exchanges list\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2331:m15"}
{"signature": "def internationalSymbolsList(region='<STR_LIT>', exchange='<STR_LIT>', token='<STR_LIT>', version='<STR_LIT>'):", "body": "return internationalSymbolsDF(region, exchange, token, version).index.tolist()<EOL>", "docstring": "This call returns an array of international symbols that IEX Cloud supports for API calls.\n\n    https://iexcloud.io/docs/api/#international-symbols\n    8am, 9am, 12pm, 1pm UTC daily\n\n    Args:\n        region (string); region, 2 letter case insensitive string of country codes using ISO 3166-1 alpha-2\n        exchange (string): Case insensitive string of Exchange using IEX Supported Exchanges list\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        list: result", "id": "f2331:m20"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def directory(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "if date:<EOL><INDENT>date = _strOrDate(date)<EOL>return _getJson('<STR_LIT>' + date, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "Args:\n    date (datetime): Effective date\n    token (string); Access token\n    version (string); API version\n\nReturns:\n    dict: result", "id": "f2331:m27"}
{"signature": "def calendar(type='<STR_LIT>', direction='<STR_LIT>', last=<NUM_LIT:1>, startDate=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "if startDate:<EOL><INDENT>startDate = _strOrDate(startDate)<EOL>return _getJson('<STR_LIT>'.format(type=type, direction=direction, last=last, date=startDate), token, version)<EOL><DEDENT>return _getJson('<STR_LIT>' + type + '<STR_LIT:/>' + direction + '<STR_LIT:/>' + str(last), token, version)<EOL>", "docstring": "This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1.\n\n    https://iexcloud.io/docs/api/#u-s-exchanges\n    8am, 9am, 12pm, 1pm UTC daily\n\n    Args:\n        type (string); \"holiday\" or \"trade\"\n        direction (string); \"next\" or \"last\"\n        last (int); number to move in direction\n        startDate (date); start date for next or last, YYYYMMDD\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2331:m2"}
{"signature": "def mutualFundSymbolsDF(token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(mutualFundSymbols(token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "This call returns an array of mutual fund symbols that IEX Cloud supports for API calls.\n\n    https://iexcloud.io/docs/api/#mutual-fund-symbols\n    8am, 9am, 12pm, 1pm UTC daily\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2331:m13"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def corporateActions(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "if date:<EOL><INDENT>date = _strOrDate(date)<EOL>return _getJson('<STR_LIT>' + date, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "Args:\n    date (datetime): Effective date\n    token (string); Access token\n    version (string); API version\n\nReturns:\n    dict: result", "id": "f2331:m21"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def nextDayExtDate(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "if date:<EOL><INDENT>date = _strOrDate(date)<EOL>return _getJson('<STR_LIT>' + date, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "Args:\n    date (datetime): Effective date\n    token (string); Access token\n    version (string); API version\n\nReturns:\n    dict: result", "id": "f2331:m25"}
{"signature": "def iexSymbolsList(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return iexSymbolsDF(token, version).index.tolist()<EOL>", "docstring": "This call returns an array of symbols the Investors Exchange supports for trading.\n    This list is updated daily as of 7:45 a.m. ET. Symbols may be added or removed by the Investors Exchange after the list was produced.\n\n    https://iexcloud.io/docs/api/#iex-symbols\n    8am, 9am, 12pm, 1pm UTC daily\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        list: result", "id": "f2331:m17"}
{"signature": "def calendarDF(type='<STR_LIT>', direction='<STR_LIT>', last=<NUM_LIT:1>, startDate=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "dat = pd.DataFrame(calendar(type, direction, last, startDate, token, version))<EOL>_toDatetime(dat)<EOL>return dat<EOL>", "docstring": "This call allows you to fetch a number of trade dates or holidays from a given date. For example, if you want the next trading day, you would call /ref-data/us/dates/trade/next/1.\n\n    https://iexcloud.io/docs/api/#u-s-exchanges\n    8am, 9am, 12pm, 1pm UTC daily\n\n    Args:\n        type (string); \"holiday\" or \"trade\"\n        direction (string); \"next\" or \"last\"\n        last (int); number to move in direction\n        startDate (date); start date for next or last, YYYYMMDD\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2331:m3"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def directoryDF(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(directory(date, token, version))<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "Args:\n    date (datetime): Effective date\n    token (string); Access token\n    version (string); API version\n\nReturns:\n    dict: result", "id": "f2331:m28"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def dividendsDF(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(dividends(date, token, version))<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "Args:\n    date (datetime): Effective date\n    token (string); Access token\n    version (string); API version\n\nReturns:\n    DataFrame: result", "id": "f2331:m24"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def dividends(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "if date:<EOL><INDENT>date = _strOrDate(date)<EOL>return _getJson('<STR_LIT>' + date, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "Args:\n    date (datetime): Effective date\n    token (string); Access token\n    version (string); API version\n\nReturns:\n    dict: result", "id": "f2331:m23"}
{"signature": "def exchangesDF(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return pd.DataFrame(exchanges())<EOL>", "docstring": "Returns an array of U.S. exchanges.\n\n    https://iexcloud.io/docs/api/#u-s-exchanges\n    8am, 9am, 12pm, 1pm UTC daily\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2331:m1"}
{"signature": "def systemEventSSE(symbols=None, on_data=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _runSSE('<STR_LIT>', symbols, on_data, token, version)<EOL>", "docstring": "The System event message is used to indicate events that apply to the market or the data feed.\n\n    There will be a single message disseminated per channel for each System Event type within a given trading session.\n\n    https://iexcloud.io/docs/api/#deep-system-event\n\n    Args:\n        symbols (string); Tickers to request\n        on_data (function): Callback on data\n        token (string); Access token\n        version (string); API version", "id": "f2332:m11"}
{"signature": "def securityEventSSE(symbols=None, on_data=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _runSSE('<STR_LIT>', symbols, on_data, token, version)<EOL>", "docstring": "The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs\n\n    https://iexcloud.io/docs/api/#deep-security-event\n\n    Args:\n        symbols (string); Tickers to request\n        on_data (function): Callback on data\n        token (string); Access token\n        version (string); API version", "id": "f2332:m9"}
{"signature": "def officialPriceSSE(symbols=None, on_data=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _runSSE('<STR_LIT>', symbols, on_data, token, version)<EOL>", "docstring": "The Official Price message is used to disseminate the IEX Official Opening and Closing Prices.\n\n    These messages will be provided only for IEX Listed Securities.\n\n    https://iexcloud.io/docs/api/#deep-official-price\n\n    Args:\n        symbols (string); Tickers to request\n        on_data (function): Callback on data\n        token (string); Access token\n        version (string); API version", "id": "f2332:m8"}
{"signature": "def auctionSSE(symbols=None, on_data=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _runSSE('<STR_LIT>', symbols, on_data, token, version)<EOL>", "docstring": "DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions,\n    and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions.\n\n    https://iexcloud.io/docs/api/#deep-auction\n\n    Args:\n        symbols (string); Tickers to request\n        on_data (function): Callback on data\n        token (string); Access token\n        version (string); API version", "id": "f2332:m5"}
{"signature": "def tradeBreaksSSE(symbols=None, on_data=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _runSSE('<STR_LIT>', symbols, on_data, token, version)<EOL>", "docstring": "Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.\n\n    https://iexcloud.io/docs/api/#deep-trades\n\n    Args:\n        symbols (string); Tickers to request\n        on_data (function): Callback on data\n        token (string); Access token\n        version (string); API version", "id": "f2332:m12"}
{"signature": "def bookSSE(symbols=None, on_data=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _runSSE('<STR_LIT>', symbols, on_data, token, version)<EOL>", "docstring": "Book shows IEX\u2019s bids and asks for given symbols.\n\n    https://iexcloud.io/docs/api/#deep-book\n\n    Args:\n        symbols (string); Tickers to request\n        on_data (function): Callback on data\n        token (string); Access token\n        version (string); API version", "id": "f2332:m6"}
{"signature": "def opHaltStatusSSE(symbols=None, on_data=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _runSSE('<STR_LIT>', symbols, on_data, token, version)<EOL>", "docstring": "The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message.\n\n    IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities.\n    In the spin, IEX will send out an Operational Halt Message with \u201cN\u201d (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session.\n    If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session.\n\n    After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security.\n\n    https://iexcloud.io/docs/api/#deep-operational-halt-status\n\n    Args:\n        symbols (string); Tickers to request\n        on_data (function): Callback on data\n        token (string); Access token\n        version (string); API version", "id": "f2332:m7"}
{"signature": "def deepSSE(symbols=None, channels=None, on_data=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "symbols = _strCommaSeparatedString(symbols)<EOL>channels = channels or []<EOL>if isinstance(channels, str):<EOL><INDENT>if channels not in DeepChannelsSSE.options():<EOL><INDENT>raise PyEXception('<STR_LIT>', type(channels))<EOL><DEDENT>channels = [channels]<EOL><DEDENT>elif isinstance(channels, DeepChannelsSSE):<EOL><INDENT>channels = [channels.value]<EOL><DEDENT>elif isinstance(channels, list):<EOL><INDENT>for i, c in enumerate(channels):<EOL><INDENT>if isinstance(c, DeepChannelsSSE):<EOL><INDENT>channels[i] = c.value<EOL><DEDENT>elif not isinstance(c, str) or isinstance(c, str) and c not in DeepChannelsSSE.options():<EOL><INDENT>raise PyEXception('<STR_LIT>', c)<EOL><DEDENT><DEDENT><DEDENT>channels = _strCommaSeparatedString(channels)<EOL>return _streamSSE(_SSE_DEEP_URL_PREFIX.format(symbols=symbols, channels=channels, token=token, version=version), on_data)<EOL>", "docstring": "DEEP is used to receive real-time depth of book quotations direct from IEX.\n    The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,\n    and do not indicate the size or number of individual orders at any price level.\n    Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.\n\n    DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.\n\n    https://iexcloud.io/docs/api/#deep\n\n    Args:\n        symbols (string); Tickers to request\n        on_data (function): Callback on data\n        token (string); Access token\n        version (string); API version", "id": "f2332:m3"}
{"signature": "def deep(symbol=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>if symbol:<EOL><INDENT>return _getJson('<STR_LIT>' + symbol, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "DEEP is used to receive real-time depth of book quotations direct from IEX.\n    The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,\n    and do not indicate the size or number of individual orders at any price level.\n    Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.\n\n    DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.\n\n    https://iexcloud.io/docs/api/#deep\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2333:m4"}
{"signature": "def tradingStatusDF(symbol=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "x = tradingStatus(symbol, token, version)<EOL>data = []<EOL>for key in x:<EOL><INDENT>d = x[key]<EOL>d['<STR_LIT>'] = key<EOL>data.append(d)<EOL><DEDENT>df = pd.DataFrame(data)<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "The Trading status message is used to indicate the current trading status of a security.\n     For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.\n     For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.\n\n    IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.\n     In the spin, IEX will send out a Trading status message with \u201cT\u201d (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.\n     If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.\n\n    After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:\n\n    Halted\n    Paused*\n    Released into an Order Acceptance Period*\n    Released for trading\n    *The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.\n\n    https://iexcloud.io/docs/api/#deep-trading-status\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2333:m25"}
{"signature": "def bookDF(symbol=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "x = book(symbol, token, version)<EOL>data = []<EOL>for key in x:<EOL><INDENT>d = x[key]<EOL>d['<STR_LIT>'] = key<EOL>data.append(d)<EOL><DEDENT>df = pd.io.json.json_normalize(data)<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "Book shows IEX\u2019s bids and asks for given symbols.\n\n    https://iexcloud.io/docs/api/#deep-book\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2333:m9"}
{"signature": "def tradeBreak(symbol=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>if symbol:<EOL><INDENT>return _getJson('<STR_LIT>' + symbol, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "Trade break messages are sent when an execution on IEX is broken on that same trading day. Trade breaks are rare and only affect applications that rely upon IEX execution based data.\n\n    https://iexcloud.io/docs/api/#deep-trade-break\n\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2333:m22"}
{"signature": "def topsDF(symbols=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.io.json.json_normalize(tops(symbols, token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "TOPS provides IEX\u2019s aggregated best quoted bid and offer position in near real time for all securities on IEX\u2019s displayed limit order book.\n    TOPS is ideal for developers needing both quote and trade data.\n\n    https://iexcloud.io/docs/api/#tops\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2333:m1"}
{"signature": "def opHaltStatusDF(symbol=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "x = opHaltStatus(symbol, token, version)<EOL>data = []<EOL>for key in x:<EOL><INDENT>d = x[key]<EOL>d['<STR_LIT>'] = key<EOL>data.append(d)<EOL><DEDENT>df = pd.DataFrame(data)<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message.\n\n    IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities.\n    In the spin, IEX will send out an Operational Halt Message with \u201cN\u201d (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session.\n    If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session.\n\n    After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security.\n\n    https://iexcloud.io/docs/api/#deep-operational-halt-status\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2333:m11"}
{"signature": "def lastDF(symbols=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.io.json.json_normalize(last(symbols, token, version))<EOL>_toDatetime(df)<EOL>_reindex(df, '<STR_LIT>')<EOL>return df<EOL>", "docstring": "Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.\n    Last is ideal for developers that need a lightweight stock quote.\n\n    https://iexcloud.io/docs/api/#last\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2333:m3"}
{"signature": "def ssrStatus(symbol=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "_raiseIfNotStr(symbol)<EOL>if symbol:<EOL><INDENT>return _getJson('<STR_LIT>' + symbol, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "In association with Rule 201 of Regulation SHO, the Short Sale Price Test Message is used to indicate when a short sale price test restriction is in effect for a security.\n\n    IEX disseminates a full pre-market spin of Short sale price test status messages indicating the Rule 201 status of all securities.\n     After the pre-market spin, IEX will use the Short sale price test status message in the event of an intraday status change.\n\n    The IEX Trading System will process orders based on the latest short sale price test restriction status.\n\n    https://iexcloud.io/docs/api/#deep-short-sale-price-test-status\n\n    Args:\n        symbol (string); Ticker to request\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2333:m16"}
{"signature": "def systemEventDF(token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.io.json.json_normalize(systemEvent(token, version))<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "The System event message is used to indicate events that apply to the market or the data feed.\n\n    There will be a single message disseminated per channel for each System Event type within a given trading session.\n\n    https://iexcloud.io/docs/api/#deep-system-event\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        DataFrame: result", "id": "f2333:m19"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def tradingStatusWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)<EOL>sendinit = ({'<STR_LIT>': symbols, '<STR_LIT>': ['<STR_LIT>']},)<EOL>return _stream(_wsURL('<STR_LIT>'), sendinit, on_data)<EOL>", "docstring": "https://iextrading.com/developer/docs/#trading-status", "id": "f2334:m6"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def tradesWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)<EOL>sendinit = ({'<STR_LIT>': symbols, '<STR_LIT>': ['<STR_LIT>']},)<EOL>return _stream(_wsURL('<STR_LIT>'), sendinit, on_data)<EOL>", "docstring": "https://iextrading.com/developer/docs/#trades", "id": "f2334:m4"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def opHaltStatusWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)<EOL>sendinit = ({'<STR_LIT>': symbols, '<STR_LIT>': ['<STR_LIT>']},)<EOL>return _stream(_wsURL('<STR_LIT>'), sendinit, on_data)<EOL>", "docstring": "https://iextrading.com/developer/docs/#operational-halt-status", "id": "f2334:m7"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def bookWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)<EOL>sendinit = ({'<STR_LIT>': symbols, '<STR_LIT>': ['<STR_LIT>']},)<EOL>return _stream(_wsURL('<STR_LIT>'), sendinit, on_data)<EOL>", "docstring": "https://iextrading.com/developer/docs/#book51", "id": "f2334:m3"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def auctionWS(symbols=None, on_data=None):", "body": "symbols = _strToList(symbols)<EOL>sendinit = ({'<STR_LIT>': symbols, '<STR_LIT>': ['<STR_LIT>']},)<EOL>return _stream(_wsURL('<STR_LIT>'), sendinit, on_data)<EOL>", "docstring": "https://iextrading.com/developer/docs/#auction", "id": "f2334:m11"}
{"signature": "@deprecated(details='<STR_LIT>')<EOL>def marketsDF(token='<STR_LIT>', version='<STR_LIT>'):", "body": "df = pd.DataFrame(markets(token, version))<EOL>_toDatetime(df)<EOL>return df<EOL>", "docstring": "https://iextrading.com/developer/docs/#intraday", "id": "f2337:m1"}
{"signature": "def __init__(self, addr, sendinit=None, on_data=None, on_open=None, on_close=None, raw=True):", "body": "self.addr = addr<EOL>self.sendinit = sendinit<EOL>on_data = on_data or print<EOL>class Namespace(BaseNamespace):<EOL><INDENT>def on_connect(self, *data):<EOL><INDENT>if on_open:<EOL><INDENT>on_open(_tryJson(data, raw))<EOL><DEDENT><DEDENT>def on_disconnect(self, *data):<EOL><INDENT>if on_close:<EOL><INDENT>on_close(_tryJson(data, raw))<EOL><DEDENT><DEDENT>def on_message(self, data):<EOL><INDENT>on_data(_tryJson(data, raw))<EOL><DEDENT><DEDENT>self._Namespace = Namespace<EOL>", "docstring": "addr: path to sio\nsendinit: tuple to emit\non_data, on_open, on_close: functions to call", "id": "f2338:c1:m0"}
{"signature": "def _raiseIfNotStr(s):", "body": "if s is not None and not isinstance(s, string_types):<EOL><INDENT>raise PyEXception('<STR_LIT>' % str(type(s)))<EOL><DEDENT>", "docstring": "internal", "id": "f2338:m7"}
{"signature": "def _stream(url, sendinit=None, on_data=print):", "body": "cl = WSClient(url, sendinit=sendinit, on_data=on_data)<EOL>return cl<EOL>", "docstring": "internal", "id": "f2338:m9"}
{"signature": "def _streamSSE(url, on_data=print, accrue=False):", "body": "messages = SSEClient(url)<EOL>if accrue:<EOL><INDENT>ret = []<EOL><DEDENT>for msg in messages:<EOL><INDENT>data = msg.data<EOL>on_data(json.loads(data))<EOL>if accrue:<EOL><INDENT>ret.append(msg)<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "internal", "id": "f2338:m10"}
{"signature": "def _strToList(st):", "body": "if isinstance(st, string_types):<EOL><INDENT>return [st]<EOL><DEDENT>return st<EOL>", "docstring": "internal", "id": "f2338:m4"}
{"signature": "def _tryJson(data, raw=True):", "body": "if raw:<EOL><INDENT>return data<EOL><DEDENT>try:<EOL><INDENT>return json.loads(data)<EOL><DEDENT>except ValueError:<EOL><INDENT>return data<EOL><DEDENT>", "docstring": "internal", "id": "f2338:m8"}
{"signature": "def _reindex(df, col):", "body": "if col in df.columns:<EOL><INDENT>df.set_index(col, inplace=True)<EOL><DEDENT>", "docstring": "internal", "id": "f2338:m11"}
{"signature": "def summary(date=None, token='<STR_LIT>', version='<STR_LIT>'):", "body": "if date:<EOL><INDENT>if isinstance(date, str):<EOL><INDENT>return _getJson('<STR_LIT>' + date, token, version)<EOL><DEDENT>elif isinstance(date, datetime):<EOL><INDENT>return _getJson('<STR_LIT>' + date.strftime('<STR_LIT>'), token, version)<EOL><DEDENT>else:<EOL><INDENT>raise PyEXception(\"<STR_LIT>\" % str(type(date)), token, version)<EOL><DEDENT><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "https://iexcloud.io/docs/api/#stats-historical-summary\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2339:m6"}
{"signature": "def daily(date=None, last='<STR_LIT>', token='<STR_LIT>', version='<STR_LIT>'):", "body": "if date:<EOL><INDENT>date = _strOrDate(date)<EOL>return _getJson('<STR_LIT>' + date, token, version)<EOL><DEDENT>elif last:<EOL><INDENT>return _getJson('<STR_LIT>' + last, token, version)<EOL><DEDENT>return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "https://iexcloud.io/docs/api/#stats-historical-daily\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2339:m8"}
{"signature": "def records(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "https://iexcloud.io/docs/api/#stats-records\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2339:m4"}
{"signature": "def stats(token='<STR_LIT>', version='<STR_LIT>'):", "body": "return _getJson('<STR_LIT>', token, version)<EOL>", "docstring": "https://iexcloud.io/docs/api/#stats-intraday\n\n    Args:\n        token (string); Access token\n        version (string); API version\n\n    Returns:\n        dict: result", "id": "f2339:m0"}
{"signature": "def get_config_bool(name):", "body": "cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX)<EOL>return cli_config.getboolean('<STR_LIT>', name, False)<EOL>", "docstring": "Checks if a config value is set to a valid bool value.", "id": "f2344:m1"}
{"signature": "def aad_cache():", "body": "return jsonpickle.decode(get_config_value('<STR_LIT>', fallback=None)),jsonpickle.decode(get_config_value('<STR_LIT>', fallback=None))<EOL>", "docstring": "AAD token cache.", "id": "f2344:m11"}
{"signature": "def client_endpoint():", "body": "return get_config_value('<STR_LIT>', None)<EOL>", "docstring": "Cluster HTTP gateway endpoint address and port, represented as a URL.", "id": "f2344:m3"}
{"signature": "def set_auth(pem=None, cert=None, key=None, aad=False):", "body": "if any([cert, key]) and pem:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if any([cert, key]) and not all([cert, key]):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if pem:<EOL><INDENT>set_config_value('<STR_LIT>', '<STR_LIT>')<EOL>set_config_value('<STR_LIT>', pem)<EOL><DEDENT>elif cert or key:<EOL><INDENT>set_config_value('<STR_LIT>', '<STR_LIT>')<EOL>set_config_value('<STR_LIT>', cert)<EOL>set_config_value('<STR_LIT>', key)<EOL><DEDENT>elif aad:<EOL><INDENT>set_config_value('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>set_config_value('<STR_LIT>', '<STR_LIT:none>')<EOL><DEDENT>", "docstring": "Set certificate usage paths", "id": "f2344:m15"}
{"signature": "def set_aad_metadata(uri, resource, client):", "body": "set_config_value('<STR_LIT>', uri)<EOL>set_config_value('<STR_LIT>', resource)<EOL>set_config_value('<STR_LIT>', client)<EOL>", "docstring": "Set AAD metadata.", "id": "f2344:m14"}
{"signature": "def set_aad_cache(token, cache):", "body": "set_config_value('<STR_LIT>', jsonpickle.encode(token))<EOL>set_config_value('<STR_LIT>', jsonpickle.encode(cache))<EOL>", "docstring": "Set AAD token cache.", "id": "f2344:m12"}
{"signature": "def set_no_verify(no_verify):", "body": "if no_verify:<EOL><INDENT>set_config_value('<STR_LIT>', '<STR_LIT:true>')<EOL><DEDENT>else:<EOL><INDENT>set_config_value('<STR_LIT>', '<STR_LIT:false>')<EOL><DEDENT>", "docstring": "Configure if cert verification should be skipped.", "id": "f2344:m7"}
{"signature": "def no_verify_setting():", "body": "return get_config_bool('<STR_LIT>')<EOL>", "docstring": "True to skip certificate SSL validation and verification", "id": "f2344:m6"}
{"signature": "def set_ca_cert(ca_path=None):", "body": "if ca_path:<EOL><INDENT>set_config_value('<STR_LIT>', ca_path)<EOL>set_config_value('<STR_LIT>', '<STR_LIT:true>')<EOL><DEDENT>else:<EOL><INDENT>set_config_value('<STR_LIT>', '<STR_LIT:false>')<EOL><DEDENT>", "docstring": "Configure paths to CA cert(s).", "id": "f2344:m9"}
{"signature": "def cert_info():", "body": "sec_type = security_type()<EOL>if sec_type == '<STR_LIT>':<EOL><INDENT>return get_config_value('<STR_LIT>', fallback=None)<EOL><DEDENT>if sec_type == '<STR_LIT>':<EOL><INDENT>cert_path = get_config_value('<STR_LIT>', fallback=None)<EOL>key_path = get_config_value('<STR_LIT>', fallback=None)<EOL>return cert_path, key_path<EOL><DEDENT>return None<EOL>", "docstring": "Path to certificate related files, either a single file path or a\n    tuple. In the case of no security, returns None.", "id": "f2344:m10"}
{"signature": "def select(endpoint, cert=None, key=None, pem=None, ca=None, <EOL>aad=False, no_verify=False):<EOL>", "body": "from sfctl.config import (set_ca_cert, set_auth, set_aad_cache,<EOL>set_cluster_endpoint,<EOL>set_no_verify)<EOL>from msrest import ServiceClient, Configuration<EOL>from sfctl.auth import ClientCertAuthentication, AdalAuthentication<EOL>select_arg_verify(endpoint, cert, key, pem, ca, aad, no_verify)<EOL>if aad:<EOL><INDENT>new_token, new_cache = get_aad_token(endpoint, no_verify)<EOL>set_aad_cache(new_token, new_cache)<EOL>rest_client = ServiceClient(<EOL>AdalAuthentication(no_verify),<EOL>Configuration(endpoint)<EOL>)<EOL>rest_client.send(rest_client.get('<STR_LIT:/>')).raise_for_status()<EOL><DEDENT>else:<EOL><INDENT>client_cert = None<EOL>if pem:<EOL><INDENT>client_cert = pem<EOL><DEDENT>elif cert:<EOL><INDENT>client_cert = (cert, key)<EOL><DEDENT>rest_client = ServiceClient(<EOL>ClientCertAuthentication(client_cert, ca, no_verify),<EOL>Configuration(endpoint)<EOL>)<EOL>rest_client.send(rest_client.get('<STR_LIT:/>')).raise_for_status()<EOL><DEDENT>set_cluster_endpoint(endpoint)<EOL>set_no_verify(no_verify)<EOL>set_ca_cert(ca)<EOL>set_auth(pem, cert, key, aad)<EOL>", "docstring": "Connects to a Service Fabric cluster endpoint.\nIf connecting to secure cluster specify an absolute path to a cert (.crt)\nand key file (.key) or a single file with both (.pem). Do not specify both.\nOptionally, if connecting to a secure cluster, specify also an absolute\npath to a CA bundle file or directory of trusted CA certs.\n:param str endpoint: Cluster endpoint URL, including port and HTTP or HTTPS\nprefix\n:param str cert: Absolute path to a client certificate file\n:param str key: Absolute path to client certificate key file\n:param str pem: Absolute path to client certificate, as a .pem file\n:param str ca: Absolute path to CA certs directory to treat as valid\nor CA bundle\nfile\n:param bool aad: Use Azure Active Directory for authentication\n:param bool no_verify: Disable verification for certificates when using\nHTTPS, note: this is an insecure option and should not be used for\nproduction environments", "id": "f2345:m1"}
{"signature": "def get_aad_token(endpoint, no_verify):<EOL>", "body": "from azure.servicefabric.service_fabric_client_ap_is import (<EOL>ServiceFabricClientAPIs<EOL>)<EOL>from sfctl.auth import ClientCertAuthentication<EOL>from sfctl.config import set_aad_metadata<EOL>auth = ClientCertAuthentication(None, None, no_verify)<EOL>client = ServiceFabricClientAPIs(auth, base_url=endpoint)<EOL>aad_metadata = client.get_aad_metadata()<EOL>if aad_metadata.type != \"<STR_LIT>\":<EOL><INDENT>raise CLIError(\"<STR_LIT>\")<EOL><DEDENT>aad_resource = aad_metadata.metadata<EOL>tenant_id = aad_resource.tenant<EOL>authority_uri = aad_resource.login + '<STR_LIT:/>' + tenant_id<EOL>context = adal.AuthenticationContext(authority_uri,<EOL>api_version=None)<EOL>cluster_id = aad_resource.cluster<EOL>client_id = aad_resource.client<EOL>set_aad_metadata(authority_uri, cluster_id, client_id)<EOL>code = context.acquire_user_code(cluster_id, client_id)<EOL>print(code['<STR_LIT:message>'])<EOL>token = context.acquire_token_with_device_code(<EOL>cluster_id, code, client_id)<EOL>print(\"<STR_LIT>\")<EOL>return token, context.cache<EOL>", "docstring": "Get AAD token", "id": "f2345:m2"}
{"signature": "def launch():", "body": "cli_env = cli()<EOL>return cli_env.invoke(sys.argv[<NUM_LIT:1>:])<EOL>", "docstring": "Entry point for Service Fabric CLI.\n\n    Configures and invokes CLI with arguments passed during the time the python\n    session is launched", "id": "f2346:m1"}
{"signature": "def cli():", "body": "return VersionedCLI(cli_name=SF_CLI_NAME,<EOL>config_dir=SF_CLI_CONFIG_DIR,<EOL>config_env_var_prefix=SF_CLI_ENV_VAR_PREFIX,<EOL>commands_loader_cls=SFCommandLoader,<EOL>help_cls=SFCommandHelp)<EOL>", "docstring": "Create CLI environment", "id": "f2346:m0"}
{"signature": "def signed_session(self, session=None):", "body": "if session:<EOL><INDENT>session = super(ClientCertAuthentication, self).signed_session(session)<EOL><DEDENT>else:<EOL><INDENT>session = super(ClientCertAuthentication, self).signed_session()<EOL><DEDENT>if self.cert is not None:<EOL><INDENT>session.cert = self.cert<EOL><DEDENT>if self.ca_cert is not None:<EOL><INDENT>session.verify = self.ca_cert<EOL><DEDENT>if self.no_verify:<EOL><INDENT>session.verify = False<EOL><DEDENT>return session<EOL>", "docstring": "Create requests session with any required auth headers\n        applied.\n\n        :rtype: requests.Session.", "id": "f2347:c0:m1"}
{"signature": "def signed_session(self, session=None):", "body": "from sfctl.config import (aad_metadata, aad_cache)<EOL>if session:<EOL><INDENT>session = super(AdalAuthentication, self).signed_session(session)<EOL><DEDENT>else:<EOL><INDENT>session = super(AdalAuthentication, self).signed_session()<EOL><DEDENT>if self.no_verify:<EOL><INDENT>session.verify = False<EOL><DEDENT>authority_uri, cluster_id, client_id = aad_metadata()<EOL>existing_token, existing_cache = aad_cache()<EOL>context = adal.AuthenticationContext(authority_uri,<EOL>cache=existing_cache)<EOL>new_token = context.acquire_token(cluster_id,<EOL>existing_token['<STR_LIT>'], client_id)<EOL>header = \"<STR_LIT>\".format(\"<STR_LIT>\", new_token['<STR_LIT>'])<EOL>session.headers['<STR_LIT>'] = header<EOL>return session<EOL>", "docstring": "Create requests session with AAD auth headers\n\n        :rtype: requests.Session.", "id": "f2347:c1:m1"}
{"signature": "def load_command_table(self, args): ", "body": "<EOL>with CommandSuperGroup(__name__, self,<EOL>'<STR_LIT>') as super_group:<EOL><INDENT>with super_group.group('<STR_LIT>') as group:<EOL><INDENT>group.command('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT><DEDENT>with CommandSuperGroup(__name__, self, '<STR_LIT>',<EOL>client_factory=client_create) as super_group: <EOL><INDENT>with super_group.group('<STR_LIT>') as group:<EOL><INDENT>group.command('<STR_LIT>', '<STR_LIT>')<EOL>group.command('<STR_LIT>', '<STR_LIT>')<EOL>group.command('<STR_LIT>', '<STR_LIT>')<EOL>group.command('<STR_LIT:list>', '<STR_LIT>')<EOL>group.command('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT><DEDENT>with ArgumentsContext(self, '<STR_LIT>') as ac:<EOL><INDENT>ac.argument('<STR_LIT>', options_list=['<STR_LIT>', '<STR_LIT>'])<EOL>ac.argument('<STR_LIT>', options_list=['<STR_LIT>', '<STR_LIT>'])<EOL>ac.argument('<STR_LIT>', options_list=['<STR_LIT>', '<STR_LIT>'])<EOL>ac.argument('<STR_LIT>', options_list=['<STR_LIT>', '<STR_LIT>'])<EOL>ac.argument('<STR_LIT>', options_list=['<STR_LIT>', '<STR_LIT>'])<EOL>ac.argument('<STR_LIT>', options_list=['<STR_LIT>', '<STR_LIT>'])<EOL>ac.argument('<STR_LIT>', options_list=['<STR_LIT>', '<STR_LIT>'])<EOL><DEDENT>return OrderedDict(self.command_table)<EOL>", "docstring": "Load all Service Fabric commands", "id": "f2349:c1:m0"}
{"signature": "def get_reliabledictionary_type_schema(client, application_name, service_name, dictionary_name, type_name, output_file=None):", "body": "cluster = Cluster.from_sfclient(client)<EOL>dictionary = cluster.get_application(application_name).get_service(service_name).get_dictionary(dictionary_name)<EOL>result = json.dumps(dictionary.get_complex_type(type_name), indent=<NUM_LIT:4>)<EOL>if (output_file == None):<EOL><INDENT>output_file = \"<STR_LIT>\".format(application_name, service_name, dictionary_name, type_name)<EOL><DEDENT>with open(output_file, \"<STR_LIT:w>\") as output:<EOL><INDENT>output.write(result)<EOL><DEDENT>print('<STR_LIT>' + output_file)<EOL>print(result)<EOL>", "docstring": "Query complex type information existing reliable dictionaries for given application and service. Make sure to provide entire namespace for your type if necessary.\n\n    :param application_name: Name of the application.\n    :type application_name: str\n    :param service_name: Name of the service.\n    :type service_name: str\n    :param dictionary_name: Name of the reliable dictionary.\n    :type dictionary_name: str\n    :param type_name: Name of the complex type.\n    :type type_name: str\n    :param output_file: Optional file to save the schema.", "id": "f2353:m2"}
{"signature": "def get_reliabledictionary_list(client, application_name, service_name):", "body": "cluster = Cluster.from_sfclient(client)<EOL>service = cluster.get_application(application_name).get_service(service_name)<EOL>for dictionary in service.get_dictionaries():<EOL><INDENT>print(dictionary.name)<EOL><DEDENT>", "docstring": "List existing reliable dictionaries.\n\n    List existing reliable dictionaries and respective schema for given application and service.\n\n    :param application_name: Name of the application.\n    :type application_name: str\n    :param service_name: Name of the service.\n    :type service_name: str", "id": "f2353:m0"}
{"signature": "def query_reliabledictionary(client, application_name, service_name, dictionary_name, query_string, partition_key=None, partition_id=None, output_file=None):", "body": "cluster = Cluster.from_sfclient(client)<EOL>dictionary = cluster.get_application(application_name).get_service(service_name).get_dictionary(dictionary_name)<EOL>start = time.time()<EOL>if (partition_id != None):<EOL><INDENT>result = dictionary.query(query_string, PartitionLookup.ID, partition_id)<EOL><DEDENT>elif (partition_key != None):<EOL><INDENT>result = dictionary.query(query_string, PartitionLookup.KEY, partition_key)<EOL><DEDENT>else:<EOL><INDENT>result = dictionary.query(query_string)<EOL><DEDENT>if type(result) is str:<EOL><INDENT>print(result)<EOL>return<EOL><DEDENT>else:<EOL><INDENT>result = json.dumps(result.get(\"<STR_LIT:value>\"), indent=<NUM_LIT:4>)<EOL><DEDENT>print(\"<STR_LIT>\" + str(time.time() - start) + \"<STR_LIT>\")<EOL>if (output_file == None):<EOL><INDENT>output_file = \"<STR_LIT>\".format(application_name, service_name, dictionary_name)<EOL><DEDENT>with open(output_file, \"<STR_LIT:w>\") as output:<EOL><INDENT>output.write(result)<EOL><DEDENT>print()<EOL>print('<STR_LIT>' + output_file)<EOL>print(result)<EOL>", "docstring": "Query existing reliable dictionary.\n\n    Query existing reliable dictionaries for given application and service.\n\n    :param application_name: Name of the application.\n    :type application_name: str\n    :param service_name: Name of the service.\n    :type service_name: str\n    :param dictionary_name: Name of the reliable dictionary.\n    :type dictionary_name: str\n    :param query_string: An OData query string. For example $top=10. Check https://www.odata.org/documentation/ for more information.\n    :type query_string: str\n    :param partition_key: Optional partition key of the desired partition, either a string if named schema or int if Int64 schema\n    :type partition_id: str\n    :param partition_id: Optional partition GUID of the owning reliable dictionary.\n    :type partition_id: str\n    :param output_file: Optional file to save the schema.", "id": "f2353:m3"}
{"signature": "def find(self, y):", "body": "node = self.root<EOL>while True:<EOL><INDENT>edge = self._edgeLabel(node, node.parent)<EOL>if edge.startswith(y):<EOL><INDENT>return node.idx<EOL><DEDENT>i = <NUM_LIT:0><EOL>while(i < len(edge) and edge[i] == y[<NUM_LIT:0>]):<EOL><INDENT>y = y[<NUM_LIT:1>:]<EOL>i += <NUM_LIT:1><EOL><DEDENT>if i != <NUM_LIT:0>:<EOL><INDENT>if i == len(edge) and y != '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT><DEDENT>node = node._get_transition_link(y[<NUM_LIT:0>])<EOL>if not node:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT><DEDENT>", "docstring": "Returns starting position of the substring y in the string used for\n        building the Suffix tree.\n\n        :param y: String\n        :return: Index of the starting position of string y in the string used for building the Suffix tree\n                 -1 if y is not a substring.", "id": "f2362:c0:m15"}
{"signature": "def _terminalSymbolsGenerator(self):", "body": "py2 = sys.version[<NUM_LIT:0>] < '<STR_LIT:3>'<EOL>UPPAs = list(list(range(<NUM_LIT>,<NUM_LIT>+<NUM_LIT:1>)) + list(range(<NUM_LIT>,<NUM_LIT>+<NUM_LIT:1>)) + list(range(<NUM_LIT>, <NUM_LIT>+<NUM_LIT:1>)))<EOL>for i in UPPAs:<EOL><INDENT>if py2:<EOL><INDENT>yield(unichr(i))<EOL><DEDENT>else:<EOL><INDENT>yield(chr(i))<EOL><DEDENT><DEDENT>raise ValueError(\"<STR_LIT>\")<EOL>", "docstring": "Generator of unique terminal symbols used for building the Generalized Suffix Tree.\n        Unicode Private Use Area U+E000..U+F8FF is used to ensure that terminal symbols\n        are not part of the input string.", "id": "f2362:c0:m18"}
{"signature": "def _find_lcs(self, node, stringIdxs):", "body": "nodes = [self._find_lcs(n, stringIdxs)<EOL>for (n,_) in node.transition_links<EOL>if n.generalized_idxs.issuperset(stringIdxs)]<EOL>if nodes == []:<EOL><INDENT>return node<EOL><DEDENT>deepestNode = max(nodes, key=lambda n: n.depth)<EOL>return deepestNode<EOL>", "docstring": "Helper method that finds LCS by traversing the labeled GSD.", "id": "f2362:c0:m13"}
{"signature": "def _build_McCreight(self, x):", "body": "u = self.root<EOL>d = <NUM_LIT:0><EOL>for i in range(len(x)):<EOL><INDENT>while u.depth == d and u._has_transition(x[d+i]):<EOL><INDENT>u = u._get_transition_link(x[d+i])<EOL>d = d + <NUM_LIT:1><EOL>while d < u.depth and x[u.idx + d] == x[i + d]:<EOL><INDENT>d = d + <NUM_LIT:1><EOL><DEDENT><DEDENT>if d < u.depth:<EOL><INDENT>u = self._create_node(x, u, d)<EOL><DEDENT>self._create_leaf(x, i, u, d)<EOL>if not u._get_suffix_link():<EOL><INDENT>self._compute_slink(x, u)<EOL><DEDENT>u = u._get_suffix_link()<EOL>d = d - <NUM_LIT:1><EOL>if d < <NUM_LIT:0>:<EOL><INDENT>d = <NUM_LIT:0><EOL><DEDENT><DEDENT>", "docstring": "Builds a Suffix tree using McCreight O(n) algorithm.\n\n        Algorithm based on:\n        McCreight, Edward M. \"A space-economical suffix tree construction algorithm.\" - ACM, 1976.\n        Implementation based on:\n        UH CS - 58093 String Processing Algorithms Lecture Notes", "id": "f2362:c0:m4"}
{"signature": "def _build_generalized(self, xs):", "body": "terminal_gen = self._terminalSymbolsGenerator()<EOL>_xs = '<STR_LIT>'.join([x + next(terminal_gen) for x in xs])<EOL>self.word = _xs<EOL>self._generalized_word_starts(xs)<EOL>self._build(_xs)<EOL>self.root._traverse(self._label_generalized)<EOL>", "docstring": "Builds a Generalized Suffix Tree (GST) from the array of strings provided.", "id": "f2362:c0:m9"}
{"signature": "def _generalized_word_starts(self, xs):", "body": "self.word_starts = []<EOL>i = <NUM_LIT:0><EOL>for n in range(len(xs)):<EOL><INDENT>self.word_starts.append(i)<EOL>i += len(xs[n]) + <NUM_LIT:1><EOL><DEDENT>", "docstring": "Helper method returns the starting indexes of strings in GST", "id": "f2362:c0:m14"}
{"signature": "def _edgeLabel(self, node, parent):", "body": "return self.word[node.idx + parent.depth : node.idx + node.depth]<EOL>", "docstring": "Helper method, returns the edge label between a node and it's parent", "id": "f2362:c0:m17"}
{"signature": "def _label_generalized(self, node):", "body": "if node.is_leaf():<EOL><INDENT>x = {self._get_word_start_index(node.idx)}<EOL><DEDENT>else:<EOL><INDENT>x = {n for ns in node.transition_links for n in ns[<NUM_LIT:0>].generalized_idxs}<EOL><DEDENT>node.generalized_idxs = x<EOL>", "docstring": "Helper method that labels the nodes of GST with indexes of strings\n        found in their descendants.", "id": "f2362:c0:m10"}
{"signature": "def unsafe_method(self, x):", "body": "pass<EOL>", "docstring": "Docstring.", "id": "f2370:c10:m4"}
{"signature": "def execute_undo(self, message):", "body": "info(\"<STR_LIT>\")<EOL>with self.world._unlock_temporarily():<EOL><INDENT>message._undo(self.world)<EOL>self.world._react_to_undo_response(message)<EOL><DEDENT>for actor in self.actors:<EOL><INDENT>actor._react_to_undo_response(message)<EOL><DEDENT>", "docstring": "Manage the response when the server rejects a message.\n\nAn undo is when required this client sends a message that the server \nrefuses to pass on to the other clients playing the game.  When this \nhappens, the client must undo the changes that the message made to the \nworld before being sent or crash.  Note that unlike sync requests, undo \nrequests are only reported to the client that sent the offending \nmessage.", "id": "f2375:c0:m5"}
{"signature": "def update (self):", "body": "delta_velocity = Vector.null()<EOL>target_position = self.target.get_position()<EOL>sprite_position = self.sprite.get_position()<EOL>desired_direction = target_position - sprite_position<EOL>if <NUM_LIT:0.0> == self.los or desired_direction.magnitude <= self.los:<EOL><INDENT>desired_normal = desired_direction.normal<EOL>desired_velocity = desired_normal * self.sprite.get_max_velocity()<EOL>delta_velocity = desired_velocity - self.sprite.get_velocity()<EOL><DEDENT>self.last_delta_velocity = delta_velocity<EOL>return delta_velocity, self.power<EOL>", "docstring": "Calculate what the desired change in velocity is. \n        delta_velocity = acceleration * delta_time\n        Time will be dealt with by the sprite.", "id": "f2380:c4:m1"}
{"signature": "@read_only<EOL><INDENT>def get_last_id(self):<DEDENT>", "body": "return max(self._tokens)<EOL>", "docstring": "Return the largest token id registered with the world.  If no tokens \nhave been added to the world, the id for the world itself (0) is \nreturned.  This means that the first \"real\" token id is 1.", "id": "f2381:c3:m8"}
{"signature": "def _set_actors(self, actors):", "body": "self._actors = actors<EOL>", "docstring": "Tell the world which actors are running on this machine.  This \ninformation is used to create extensions for new tokens.", "id": "f2381:c3:m19"}
{"signature": "@debug_only<EOL>def require_token(object):", "body": "require_instance(Token(), object)<EOL>", "docstring": "Raise an ApiUsageError if the given object is not a fully constructed \ninstance of a Token subclass.", "id": "f2381:m2"}
{"signature": "@read_only<EOL><INDENT>def get_token(self, id):<DEDENT>", "body": "return self._tokens[id]<EOL>", "docstring": "Return the token with the given id.  If no token with the given id is \nregistered to the world, an IndexError is thrown.", "id": "f2381:c3:m7"}
{"signature": "@read_only<EOL><INDENT>def watch_method(self, method_name, callback):<DEDENT>", "body": "<EOL>try:<EOL><INDENT>method = getattr(self, method_name)<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise ApiUsageError(\"\"\"<STR_LIT>\"\"\")<EOL><DEDENT>if not isinstance(method, Token.WatchedMethod):<EOL><INDENT>setattr(self, method_name, Token.WatchedMethod(method))<EOL>method = getattr(self, method_name)<EOL><DEDENT>method.add_watcher(callback)<EOL>", "docstring": "Register the given callback to be called whenever the method with the \ngiven name is called.  You can easily take advantage of this feature in \ntoken extensions by using the @watch_token decorator.", "id": "f2381:c2:m12"}
{"signature": "def watch_token(method):", "body": "method._kxg_watch_token = True<EOL>return method<EOL>", "docstring": "Mark a token extension method that should automatically be called when a \ntoken method of the same name is called.\n\nThis decorator must only be used on TokenExtension methods, otherwise it \nwill silently do nothing.  The reason is that the decorator itself can't do \nanything but label the given method, because at the time of decoration the \ntoken to watch isn't known.  The method is actually setup to watch a token \nin the TokenExtension constructor, which searches for the label added here.  \nBut other classes won't make this search and will silently do nothing.", "id": "f2381:m1"}
{"signature": "@staticmethod<EOL><INDENT>def add_safety_check(member_name, member_value):<DEDENT>", "body": "import functools<EOL>from types import FunctionType<EOL>is_method = isinstance(member_value, FunctionType)<EOL>is_read_only = hasattr(member_value, '<STR_LIT>')<EOL>is_private = member_name.startswith('<STR_LIT:_>')<EOL>if not is_method or is_read_only or is_private:<EOL><INDENT>return member_value<EOL><DEDENT>def safety_checked_method(self, *args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>world = getattr(self, '<STR_LIT>', None)<EOL>if world and world.is_locked():<EOL><INDENT>nonlocal member_name<EOL>raise ApiUsageError(\"\"\"<STR_LIT>\"\"\")<EOL><DEDENT>return member_value(self, *args, **kwargs)<EOL><DEDENT>functools.update_wrapper(<EOL>safety_checked_method, member_value,<EOL>assigned=functools.WRAPPER_ASSIGNMENTS + (<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>)<EOL>)<EOL>return safety_checked_method<EOL>", "docstring": "If the given member is a method that is public (i.e. doesn't start with \nan underscore) and hasn't been marked as read-only, replace it with a \nversion that will check to make sure the world is locked.  This ensures \nthat methods that alter the token are only called from update methods \nor messages.", "id": "f2381:c0:m2"}
{"signature": "def _check_if_forum_observation_enabled(self):", "body": "try:<EOL><INDENT>super()._check_if_forum_observation_enabled()<EOL><DEDENT>except ApiUsageError:<EOL><INDENT>raise ApiUsageError(\"\"\"<STR_LIT>\"\"\")<EOL><DEDENT>", "docstring": "Give a helpful error if the user attempts to subscribe or unsubscribe \nfrom messages while the token is not registered with a world.  This can \neasily happen if the user attempts to subscribe to messages in the \nconstructor.  However, because the constructor is only called on one \nclient and message handlers cannot be pickled, subscribing at this time \nwould create hard-to-find synchronization bugs.", "id": "f2381:c2:m18"}
{"signature": "@debug_only<EOL>def require_active_token(object):", "body": "require_token(object)<EOL>token = object<EOL>if not token.has_id:<EOL><INDENT>raise ApiUsageError(\"\"\"<STR_LIT>\"\"\")<EOL><DEDENT>if not token.has_world:<EOL><INDENT>raise ApiUsageError(\"\"\"<STR_LIT>\"\"\")<EOL><DEDENT>", "docstring": "Raise an ApiUsageError if the given object is not a token that is currently \nparticipating in the game.  To be participating in the game, the given \ntoken must have an id number and be associated with the world.", "id": "f2381:m3"}
{"signature": "def on_exit_stage(self):", "body": "pass<EOL>", "docstring": "Give the stage a chance to react before it is stopped and the next \nstage is started.\n\nYou can define the next stage by setting the Stage.successor attribute.  \nIf the successor is static, you can just set it in the constructor.  \nBut if it will differ depending on the context, this method may be a \ngood place to calculate it because it is called only once and just \nbefore the theater queries for the successor.", "id": "f2382:c2:m6"}
{"signature": "def _assign_token_ids(self, id_factory):", "body": "for token in self.tokens_to_add():<EOL><INDENT>token._give_id(id_factory)<EOL><DEDENT>", "docstring": "Assign id numbers to any tokens that will be added to the world by this \nmessage.\n\nThis method is called by Actor but not by ServerActor, so it's \nguaranteed to be called exactly once.  In fact, this method is not \nreally different from the constructor, except that the id_factory \nobject is nicely provided.  That's useful for assigning ids to tokens \nbut probably nothing else.  This method is called before _check() so \nthat _check() can make sure that valid ids were assigned (although by \ndefault it doesn't).", "id": "f2384:c0:m17"}
{"signature": "def _run_supervisor(self):", "body": "import time<EOL>still_supervising = lambda: (<EOL>multiprocessing.active_children()<EOL>or not self.log_queue.empty()<EOL>or not self.exception_queue.empty())<EOL>try:<EOL><INDENT>while still_supervising():<EOL><INDENT>try:<EOL><INDENT>record = self.log_queue.get_nowait()<EOL>logger = logging.getLogger(record.name)<EOL>logger.handle(record)<EOL><DEDENT>except queue.Empty:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>exception = self.exception_queue.get_nowait()<EOL><DEDENT>except queue.Empty:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise exception<EOL><DEDENT>time.sleep(<NUM_LIT:1>/self.frame_rate)<EOL>self.elapsed_time += <NUM_LIT:1>/self.frame_rate<EOL>if self.time_limit and self.elapsed_time > self.time_limit:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>for process in multiprocessing.active_children():<EOL><INDENT>process.terminate()<EOL><DEDENT><DEDENT>", "docstring": "Poll the queues that the worker can use to communicate with the \nsupervisor, until all the workers are done and all the queues are \nempty.  Handle messages as they appear.", "id": "f2386:c3:m5"}
{"signature": "def main(world_cls, referee_cls, gui_cls, gui_actor_cls, ai_actor_cls,<EOL>theater_cls=PygletTheater, default_host=DEFAULT_HOST,<EOL>default_port=DEFAULT_PORT, argv=None):", "body": "import sys, os, docopt, nonstdlib<EOL>exe_name = os.path.basename(sys.argv[<NUM_LIT:0>])<EOL>usage = main.__doc__.format(**locals()).strip()<EOL>args = docopt.docopt(usage, argv or sys.argv[<NUM_LIT:1>:])<EOL>num_guis = int(args['<STR_LIT>'] or <NUM_LIT:1>)<EOL>num_ais = int(args['<STR_LIT>'] or <NUM_LIT:0>)<EOL>host, port = args['<STR_LIT>'], int(args['<STR_LIT>'])<EOL>logging.basicConfig(<EOL>format='<STR_LIT>',<EOL>level=nonstdlib.verbosity(args['<STR_LIT>']),<EOL>)<EOL>if args['<STR_LIT>']:<EOL><INDENT>print(\"\"\"<STR_LIT>\"\"\")<EOL>game = MultiplayerDebugger(<EOL>world_cls, referee_cls, gui_cls, gui_actor_cls, num_guis,<EOL>ai_actor_cls, num_ais, theater_cls, host, port)<EOL><DEDENT>else:<EOL><INDENT>game = theater_cls()<EOL>ai_actors = [ai_actor_cls() for i in range(num_ais)]<EOL>if args['<STR_LIT>']:<EOL><INDENT>game.gui = gui_cls()<EOL>game.initial_stage = UniplayerGameStage(<EOL>world_cls(), referee_cls(), gui_actor_cls(), ai_actors)<EOL>game.initial_stage.successor = PostgameSplashStage()<EOL><DEDENT>if args['<STR_LIT>']:<EOL><INDENT>game.gui = gui_cls()<EOL>game.initial_stage = ClientConnectionStage(<EOL>world_cls(), gui_actor_cls(), host, port)<EOL><DEDENT>if args['<STR_LIT>']:<EOL><INDENT>game.initial_stage = ServerConnectionStage(<EOL>world_cls(), referee_cls(), num_guis, ai_actors,<EOL>host, port)<EOL><DEDENT><DEDENT>game.play()<EOL>", "docstring": "Run a game being developed with the kxg game engine.\n\nUsage:\n    {exe_name} sandbox [<num_ais>] [-v...]\n    {exe_name} client [--host HOST] [--port PORT] [-v...]\n    {exe_name} server <num_guis> [<num_ais>] [--host HOST] [--port PORT] [-v...] \n    {exe_name} debug <num_guis> [<num_ais>] [--host HOST] [--port PORT] [-v...]\n    {exe_name} --help\n\nCommands:\n    sandbox\n        Play a single-player game with the specified number of AIs.  None of \n        the multiplayer machinery will be used.\n\n    client\n        Launch a client that will try to connect to a server on the given host \n        and port.  Once it connects and the game starts, the client will allow \n        you to play the game against any other connected clients.\n\n    server\n        Launch a server that will manage a game between the given number of \n        human and AI players.  The human players must connect using this \n        command's client mode.\n\n    debug\n        Debug a multiplayer game locally.  This command launches a server and \n        the given number of clients all in different processes, and configures \n        the logging system such that the output from each process can be easily \n        distinguished.\n\nArguments:\n    <num_guis>\n        The number of human players that will be playing the game.  Only needed \n        by commands that will launch some sort of multiplayer server.\n\n    <num_ais>\n        The number of AI players that will be playing the game.  Only needed by \n        commands that will launch single-player games or multiplayer servers.\n\nOptions:\n    -x --host HOST          [default: {default_host}]\n        The address of the machine running the server.  Must be accessible from \n        the machines running the clients.\n\n    -p --port PORT          [default: {default_port}]\n        The port that the server should listen on.  Don't specify a value less \n        than 1024 unless the server is running with root permissions.\n\n    -v --verbose \n        Have the game engine log more information about what it's doing.  You \n        can specify this option several times to get more and more information.\n\nThis command is provided so that you can start writing your game with the least \npossible amount of boilerplate code.  However, the clients and servers provided \nby this command are not capable of running a production game.  Once you have \nwritten your game and want to give it a polished set of menus and options, \nyou'll have to write new Stage subclasses encapsulating that logic and you'll \nhave to call those stages yourself by interacting more directly with the \nTheater class.  The online documentation has more information on this process.", "id": "f2386:m0"}
{"signature": "def pre_poll(self):", "body": "pass<EOL>", "docstring": "Called before polling for process status", "id": "f2388:c0:m1"}
{"signature": "def input(self, input, song):", "body": "try:<EOL><INDENT>cmd = getattr(self, self.CMD_MAP[input][<NUM_LIT:1>])<EOL><DEDENT>except (IndexError, KeyError):<EOL><INDENT>return self.screen.print_error(<EOL>\"<STR_LIT>\".format(input))<EOL><DEDENT>cmd(song)<EOL>", "docstring": "Input callback, handles key presses", "id": "f2388:c1:m17"}
{"signature": "def post_poll(self):", "body": "pass<EOL>", "docstring": "Called after polling for process status", "id": "f2388:c0:m2"}
{"signature": "def input(self, value, song):", "body": "pass<EOL>", "docstring": "Called after user input during song playback", "id": "f2388:c0:m3"}
{"signature": "def play(self, song):", "body": "pass<EOL>", "docstring": "Called once when a song starts playing", "id": "f2388:c0:m0"}
{"signature": "def _post_start(self):", "body": "return<EOL>", "docstring": "Optionally, do something after the audio backend is started", "id": "f2389:c3:m6"}
{"signature": "def _loop_hook(self):", "body": "return<EOL>", "docstring": "Optionally, do something each main loop iteration", "id": "f2389:c3:m7"}
{"signature": "def _send_cmd(self, cmd):", "body": "self._process.stdin.write(\"<STR_LIT>\".format(cmd).encode(\"<STR_LIT:utf-8>\"))<EOL>self._process.stdin.flush()<EOL>", "docstring": "Write command to remote process", "id": "f2389:c3:m9"}
{"signature": "def _read_from_process(self, handle):", "body": "return handle.readline().strip()<EOL>", "docstring": "Read a line from the process and clean it\n\n        Different audio backends return text in different formats so provides a\n        hook for each subclass to customize reader behaviour.", "id": "f2389:c3:m8"}
{"signature": "def __init__(self, callbacks, control_channel):", "body": "self._control_channel = control_channel<EOL>self._control_fd = control_channel.fileno()<EOL>self._callbacks = callbacks<EOL>self._process = None<EOL>self._cmd = [self._find_path()]<EOL>", "docstring": "Constructor\n\n        Will attempt to find the player binary on construction and fail if it\n        is not found. Subclasses should append any additional arguments to\n        _cmd.", "id": "f2389:c3:m0"}
{"signature": "def _player_stopped(self, value):", "body": "raise NotImplementedError<EOL>", "docstring": "Determine if player has stopped", "id": "f2389:c3:m3"}
{"signature": "def play(self, song):", "body": "self._callbacks.play(song)<EOL>self._load_track(song)<EOL>time.sleep(<NUM_LIT:2>)  <EOL>while True:<EOL><INDENT>try:<EOL><INDENT>self._callbacks.pre_poll()<EOL>self._ensure_started()<EOL>self._loop_hook()<EOL>readers, _, _ = select.select(<EOL>self._get_select_readers(), [], [], <NUM_LIT:1>)<EOL>for handle in readers:<EOL><INDENT>if handle.fileno() == self._control_fd:<EOL><INDENT>self._callbacks.input(handle.readline().strip(), song)<EOL><DEDENT>else:<EOL><INDENT>value = self._read_from_process(handle)<EOL>if self._player_stopped(value):<EOL><INDENT>return<EOL><DEDENT><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>self._callbacks.post_poll()<EOL><DEDENT><DEDENT>", "docstring": "Play a new song from a Pandora model\n\n        Returns once the stream starts but does not shut down the remote audio\n        output backend process. Calls the input callback when the user has\n        input.", "id": "f2389:c3:m16"}
{"signature": "def lower_volume(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Lower the volume of the audio output\n\n        The player backend may not support this functionality in which case it\n        should not override this method.", "id": "f2389:c3:m5"}
{"signature": "def pause(self):", "body": "self._send_cmd(\"<STR_LIT>\")<EOL>", "docstring": "Pause the player", "id": "f2389:c3:m11"}
{"signature": "def end_station(self):", "body": "raise StopIteration<EOL>", "docstring": "Stop playing the station", "id": "f2389:c3:m17"}
{"signature": "def iterate_forever(func, *args, **kwargs):", "body": "output = func(*args, **kwargs)<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>playlist_item = next(output)<EOL>playlist_item.prepare_playback()<EOL>yield playlist_item<EOL><DEDENT>except StopIteration:<EOL><INDENT>output = func(*args, **kwargs)<EOL><DEDENT><DEDENT>", "docstring": "Iterate over a finite iterator forever\n\n    When the iterator is exhausted will call the function again to generate a\n    new iterator and keep iterating.", "id": "f2390:m0"}
{"signature": "@staticmethod<EOL><INDENT>def get_integer(prompt):<DEDENT>", "body": "while True:<EOL><INDENT>try:<EOL><INDENT>return int(input(prompt).strip())<EOL><DEDENT>except ValueError:<EOL><INDENT>print(Colors.red(\"<STR_LIT>\"))<EOL><DEDENT><DEDENT>", "docstring": "Gather user input and convert it to an integer\n\n        Will keep trying till the user enters an interger or until they ^C the\n        program.", "id": "f2390:c4:m7"}
{"signature": "def formatter(self, api_client, data, newval):", "body": "raise NotImplementedError<EOL>", "docstring": "Format Value for Model\n\n        The return value of this method is used as a value for the field in the\n        model of which this field is a member\n\n        api_client\n            instance of a Pandora API client\n        data\n            complete JSON data blob for the parent model of which this field is\n            a member\n        newval\n            the value of this field as retrieved from the JSON data after\n            having resolved default value logic", "id": "f2402:c1:m0"}
{"signature": "@classmethod<EOL><INDENT>def from_json_list(cls, api_client, data):<DEDENT>", "body": "return [cls.from_json(api_client, item) for item in data]<EOL>", "docstring": "Convert a list of JSON values to a list of models", "id": "f2402:c4:m0"}
{"signature": "def prepare_playback(self):", "body": "return self<EOL>", "docstring": "Prepare Track for Playback\n\n        This method must be called by clients before beginning playback\n        otherwise the track recieved may not be playable.", "id": "f2404:c4:m1"}
{"signature": "def retries(max_tries, exceptions=(Exception,)):", "body": "def decorator(func):<EOL><INDENT>def function(*args, **kwargs):<EOL><INDENT>retries_left = max_tries<EOL>while retries_left > <NUM_LIT:0>:<EOL><INDENT>try:<EOL><INDENT>retries_left -= <NUM_LIT:1><EOL>return func(*args, **kwargs)<EOL><DEDENT>except exceptions as exc:<EOL><INDENT>if isinstance(exc, PandoraException):<EOL><INDENT>raise<EOL><DEDENT>if retries_left > <NUM_LIT:0>:<EOL><INDENT>time.sleep(delay_exponential(<EOL><NUM_LIT:0.5>, <NUM_LIT:2>, max_tries - retries_left))<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return function<EOL><DEDENT>return decorator<EOL>", "docstring": "Function decorator implementing retrying logic.\n\n    exceptions: A tuple of exception classes; default (Exception,)\n\n    The decorator will call the function up to max_tries times if it raises\n    an exception.\n\n    By default it catches instances of the Exception class and subclasses.\n    This will recover after all but the most fatal errors. You may specify a\n    custom tuple of exception classes with the 'exceptions' argument; the\n    function will only be retried if it raises one of the specified\n    exceptions.", "id": "f2408:m0"}
{"signature": "def __init__(self, headers=None, data=None):", "body": "if headers is None:<EOL><INDENT>headers = {'<STR_LIT:status>': http_client.OK}<EOL><DEDENT>self.data = data<EOL>self.response_headers = headers<EOL>self.headers = None<EOL>self.uri = None<EOL>self.method = None<EOL>self.body = None<EOL>self.headers = None<EOL>self.requests = <NUM_LIT:0><EOL>", "docstring": "HttpMock constructor.\n\n        Args:\n            headers: dict, header to return with response", "id": "f2444:c1:m0"}
{"signature": "def __init__(self, iterable):", "body": "self._iterable = iterable<EOL>self.requests = []<EOL>", "docstring": "HttpMockSequence constructor.\n\n        Args:\n            iterable: iterable, a sequence of pairs of (headers, body)", "id": "f2444:c2:m0"}
{"signature": "def _parse_pem_key(raw_key_input):", "body": "offset = raw_key_input.find(b'<STR_LIT>')<EOL>if offset != -<NUM_LIT:1>:<EOL><INDENT>return raw_key_input[offset:]<EOL><DEDENT>", "docstring": "Identify and extract PEM keys.\n\n    Determines whether the given key is in the format of PEM key, and extracts\n    the relevant part of the key if it is.\n\n    Args:\n        raw_key_input: The contents of a private key file (either PEM or\n                       PKCS12).\n\n    Returns:\n        string, The actual key if the contents are from a PEM file, or\n        else None.", "id": "f2445:m7"}
{"signature": "def scopes_to_string(scopes):", "body": "if isinstance(scopes, six.string_types):<EOL><INDENT>return scopes<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT:U+0020>'.join(scopes)<EOL><DEDENT>", "docstring": "Converts scope value to a string.\n\n    If scopes is a string then it is simply passed through. If scopes is an\n    iterable then a string is returned that is all the individual scopes\n    concatenated with spaces.\n\n    Args:\n        scopes: string or iterable of strings, the scopes.\n\n    Returns:\n        The scopes formatted as a single string.", "id": "f2445:m1"}
{"signature": "def _from_bytes(value):", "body": "result = (value.decode('<STR_LIT:utf-8>')<EOL>if isinstance(value, six.binary_type) else value)<EOL>if isinstance(result, six.text_type):<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(value))<EOL><DEDENT>", "docstring": "Converts bytes to a string value, if necessary.\n\n    Args:\n        value: The string/bytes value to be converted.\n\n    Returns:\n        The original value converted to unicode (if bytes) or as passed in\n        if it started out as unicode.\n\n    Raises:\n        ValueError if the value could not be converted to unicode.", "id": "f2445:m10"}
{"signature": "def update_query_params(uri, params):", "body": "parts = urllib.parse.urlparse(uri)<EOL>query_params = parse_unique_urlencoded(parts.query)<EOL>query_params.update(params)<EOL>new_query = urllib.parse.urlencode(query_params)<EOL>new_parts = parts._replace(query=new_query)<EOL>return urllib.parse.urlunparse(new_parts)<EOL>", "docstring": "Updates a URI with new query parameters.\n\n    If a given key from ``params`` is repeated in the ``uri``, then\n    the URI will be considered invalid and an error will occur.\n\n    If the URI is valid, then each value from ``params`` will\n    replace the corresponding value in the query parameters (if\n    it exists).\n\n    Args:\n        uri: string, A valid URI, with potential existing query parameters.\n        params: dict, A dictionary of query parameters.\n\n    Returns:\n        The same URI but with the new query parameters added.", "id": "f2445:m4"}
{"signature": "def parse_unique_urlencoded(content):", "body": "urlencoded_params = urllib.parse.parse_qs(content)<EOL>params = {}<EOL>for key, value in six.iteritems(urlencoded_params):<EOL><INDENT>if len(value) != <NUM_LIT:1>:<EOL><INDENT>msg = ('<STR_LIT>'<EOL>'<STR_LIT>' % (key, '<STR_LIT:U+002CU+0020>'.join(value)))<EOL>raise ValueError(msg)<EOL><DEDENT>params[key] = value[<NUM_LIT:0>]<EOL><DEDENT>return params<EOL>", "docstring": "Parses unique key-value parameters from urlencoded content.\n\n    Args:\n        content: string, URL-encoded key-value pairs.\n\n    Returns:\n        dict, The key-value pairs from ``content``.\n\n    Raises:\n        ValueError: if one of the keys is repeated.", "id": "f2445:m3"}
{"signature": "def positional(max_positional_args):", "body": "def positional_decorator(wrapped):<EOL><INDENT>@functools.wraps(wrapped)<EOL>def positional_wrapper(*args, **kwargs):<EOL><INDENT>if len(args) > max_positional_args:<EOL><INDENT>plural_s = '<STR_LIT>'<EOL>if max_positional_args != <NUM_LIT:1>:<EOL><INDENT>plural_s = '<STR_LIT:s>'<EOL><DEDENT>message = ('<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>function=wrapped.__name__,<EOL>args_max=max_positional_args,<EOL>args_given=len(args),<EOL>plural=plural_s))<EOL>if positional_parameters_enforcement == POSITIONAL_EXCEPTION:<EOL><INDENT>raise TypeError(message)<EOL><DEDENT>elif positional_parameters_enforcement == POSITIONAL_WARNING:<EOL><INDENT>logger.warning(message)<EOL><DEDENT><DEDENT>return wrapped(*args, **kwargs)<EOL><DEDENT>return positional_wrapper<EOL><DEDENT>if isinstance(max_positional_args, six.integer_types):<EOL><INDENT>return positional_decorator<EOL><DEDENT>else:<EOL><INDENT>args, _, _, defaults = inspect.getargspec(max_positional_args)<EOL>return positional(len(args) - len(defaults))(max_positional_args)<EOL><DEDENT>", "docstring": "A decorator to declare that only the first N arguments my be positional.\n\n    This decorator makes it easy to support Python 3 style keyword-only\n    parameters. For example, in Python 3 it is possible to write::\n\n        def fn(pos1, *, kwonly1=None, kwonly1=None):\n            ...\n\n    All named parameters after ``*`` must be a keyword::\n\n        fn(10, 'kw1', 'kw2')  # Raises exception.\n        fn(10, kwonly1='kw1')  # Ok.\n\n    Example\n    ^^^^^^^\n\n    To define a function like above, do::\n\n        @positional(1)\n        def fn(pos1, kwonly1=None, kwonly2=None):\n            ...\n\n    If no default value is provided to a keyword argument, it becomes a\n    required keyword argument::\n\n        @positional(0)\n        def fn(required_kw):\n            ...\n\n    This must be called with the keyword parameter::\n\n        fn()  # Raises exception.\n        fn(10)  # Raises exception.\n        fn(required_kw=10)  # Ok.\n\n    When defining instance or class methods always remember to account for\n    ``self`` and ``cls``::\n\n        class MyClass(object):\n\n            @positional(2)\n            def my_method(self, pos1, kwonly1=None):\n                ...\n\n            @classmethod\n            @positional(2)\n            def my_method(cls, pos1, kwonly1=None):\n                ...\n\n    The positional decorator behavior is controlled by\n    ``_helpers.positional_parameters_enforcement``, which may be set to\n    ``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or\n    ``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do\n    nothing, respectively, if a declaration is violated.\n\n    Args:\n        max_positional_arguments: Maximum number of positional arguments. All\n                                  parameters after the this index must be\n                                  keyword only.\n\n    Returns:\n        A decorator that prevents using arguments after max_positional_args\n        from being used as positional parameters.\n\n    Raises:\n        TypeError: if a key-word only argument is provided as a positional\n                   parameter, but only if\n                   _helpers.positional_parameters_enforcement is set to\n                   POSITIONAL_EXCEPTION.", "id": "f2445:m0"}
{"signature": "def _to_bytes(value, encoding='<STR_LIT:ascii>'):", "body": "result = (value.encode(encoding)<EOL>if isinstance(value, six.text_type) else value)<EOL>if isinstance(result, six.binary_type):<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(value))<EOL><DEDENT>", "docstring": "Converts a string value to bytes, if necessary.\n\n    Unfortunately, ``six.b`` is insufficient for this task since in\n    Python2 it does not modify ``unicode`` objects.\n\n    Args:\n        value: The string/bytes value to be converted.\n        encoding: The encoding to use to convert unicode to bytes. Defaults\n                  to \"ascii\", which will not allow any characters from ordinals\n                  larger than 127. Other useful values are \"latin-1\", which\n                  which will only allows byte ordinals (up to 255) and \"utf-8\",\n                  which will encode any unicode that needs to be.\n\n    Returns:\n        The original value converted to bytes (if unicode) or as passed in\n        if it started out as bytes.\n\n    Raises:\n        ValueError if the value could not be converted to bytes.", "id": "f2445:m9"}
{"signature": "def sign(self, message):", "body": "message = _helpers._to_bytes(message, encoding='<STR_LIT:utf-8>')<EOL>return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))<EOL>", "docstring": "Signs a message.\n\n        Args:\n            message: string, Message to be signed.\n\n        Returns:\n            string, The signature of the message for the given key.", "id": "f2446:c1:m1"}
{"signature": "@staticmethod<EOL><INDENT>def from_string(key_pem, is_x509_cert):<DEDENT>", "body": "if is_x509_cert:<EOL><INDENT>key_pem = _helpers._to_bytes(key_pem)<EOL>pemLines = key_pem.replace(b'<STR_LIT:U+0020>', b'<STR_LIT>').split()<EOL>certDer = _helpers._urlsafe_b64decode(b'<STR_LIT>'.join(pemLines[<NUM_LIT:1>:-<NUM_LIT:1>]))<EOL>certSeq = DerSequence()<EOL>certSeq.decode(certDer)<EOL>tbsSeq = DerSequence()<EOL>tbsSeq.decode(certSeq[<NUM_LIT:0>])<EOL>pubkey = RSA.importKey(tbsSeq[<NUM_LIT:6>])<EOL><DEDENT>else:<EOL><INDENT>pubkey = RSA.importKey(key_pem)<EOL><DEDENT>return PyCryptoVerifier(pubkey)<EOL>", "docstring": "Construct a Verified instance from a string.\n\n        Args:\n            key_pem: string, public key in PEM format.\n            is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it\n                          is expected to be an RSA key in PEM format.\n\n        Returns:\n            Verifier instance.", "id": "f2446:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def from_string(key, password='<STR_LIT>'):<DEDENT>", "body": "parsed_pem_key = _helpers._parse_pem_key(_helpers._to_bytes(key))<EOL>if parsed_pem_key:<EOL><INDENT>pkey = RSA.importKey(parsed_pem_key)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>return PyCryptoSigner(pkey)<EOL>", "docstring": "Construct a Signer instance from a string.\n\n        Args:\n            key: string, private key in PEM format.\n            password: string, password for private key file. Unused for PEM\n                      files.\n\n        Returns:\n            Signer instance.\n\n        Raises:\n            NotImplementedError if the key isn't in PEM format.", "id": "f2446:c1:m2"}
{"signature": "def __init__(self, pkey):", "body": "self._key = pkey<EOL>", "docstring": "Constructor.\n\n        Args:\n            pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.", "id": "f2446:c1:m0"}
{"signature": "def __init__(self, pubkey):", "body": "self._pubkey = pubkey<EOL>", "docstring": "Constructor.\n\n        Args:\n            pubkey: OpenSSL.crypto.PKey (or equiv), The public key to verify\n            with.", "id": "f2446:c0:m0"}
{"signature": "def oauth2_callback(request):", "body": "if '<STR_LIT:error>' in request.GET:<EOL><INDENT>reason = request.GET.get(<EOL>'<STR_LIT>', request.GET.get('<STR_LIT:error>', '<STR_LIT>'))<EOL>reason = html.escape(reason)<EOL>return http.HttpResponseBadRequest(<EOL>'<STR_LIT>'.format(reason))<EOL><DEDENT>try:<EOL><INDENT>encoded_state = request.GET['<STR_LIT:state>']<EOL>code = request.GET['<STR_LIT:code>']<EOL><DEDENT>except KeyError:<EOL><INDENT>return http.HttpResponseBadRequest(<EOL>'<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>server_csrf = request.session[_CSRF_KEY]<EOL><DEDENT>except KeyError:<EOL><INDENT>return http.HttpResponseBadRequest(<EOL>'<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>state = json.loads(encoded_state)<EOL>client_csrf = state['<STR_LIT>']<EOL>return_url = state['<STR_LIT>']<EOL><DEDENT>except (ValueError, KeyError):<EOL><INDENT>return http.HttpResponseBadRequest('<STR_LIT>')<EOL><DEDENT>if client_csrf != server_csrf:<EOL><INDENT>return http.HttpResponseBadRequest('<STR_LIT>')<EOL><DEDENT>flow = _get_flow_for_token(client_csrf, request)<EOL>if not flow:<EOL><INDENT>return http.HttpResponseBadRequest('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>credentials = flow.step2_exchange(code)<EOL><DEDENT>except client.FlowExchangeError as exchange_error:<EOL><INDENT>return http.HttpResponseBadRequest(<EOL>'<STR_LIT>'.format(exchange_error))<EOL><DEDENT>get_storage(request).put(credentials)<EOL>signals.oauth2_authorized.send(sender=signals.oauth2_authorized,<EOL>request=request, credentials=credentials)<EOL>return shortcuts.redirect(return_url)<EOL>", "docstring": "View that handles the user's return from OAuth2 provider.\n\n    This view verifies the CSRF state and OAuth authorization code, and on\n    success stores the credentials obtained in the storage provider,\n    and redirects to the return_url specified in the authorize view and\n    stored in the session.\n\n    Args:\n        request: Django request.\n\n    Returns:\n         A redirect response back to the return_url.", "id": "f2449:m2"}
{"signature": "def __init__(self, model_class, key_name, key_value, property_name):", "body": "super(DjangoORMStorage, self).__init__()<EOL>self.model_class = model_class<EOL>self.key_name = key_name<EOL>self.key_value = key_value<EOL>self.property_name = property_name<EOL>", "docstring": "Constructor for Storage.\n\n        Args:\n            model: string, fully qualified name of db.Model model class.\n            key_name: string, key name for the entity that has the credentials\n            key_value: string, key value for the entity that has the\n               credentials.\n            property_name: string, name of the property that is an\n                           CredentialsProperty.", "id": "f2450:c0:m0"}
{"signature": "def locked_delete(self):", "body": "query = {self.key_name: self.key_value}<EOL>self.model_class.objects.filter(**query).delete()<EOL>", "docstring": "Delete Credentials from the datastore.", "id": "f2450:c0:m3"}
{"signature": "def _get_scopes(self):", "body": "if _credentials_from_request(self.request):<EOL><INDENT>return (self._scopes |<EOL>_credentials_from_request(self.request).scopes)<EOL><DEDENT>else:<EOL><INDENT>return self._scopes<EOL><DEDENT>", "docstring": "Returns the scopes associated with this object, kept up to\n         date for incremental auth.", "id": "f2452:c1:m3"}
{"signature": "@property<EOL><INDENT>def credentials(self):<DEDENT>", "body": "return _credentials_from_request(self.request)<EOL>", "docstring": "Gets the authorized credentials for this flow, if they exist.", "id": "f2452:c1:m5"}
{"signature": "def has_credentials(self):", "body": "credentials = _credentials_from_request(self.request)<EOL>return (credentials and not credentials.invalid and<EOL>credentials.has_scopes(self._get_scopes()))<EOL>", "docstring": "Returns True if there are valid credentials for the current user\n        and required scopes.", "id": "f2452:c1:m2"}
{"signature": "@property<EOL><INDENT>def scopes(self):<DEDENT>", "body": "<EOL>return self._get_scopes()<EOL>", "docstring": "Returns the scopes associated with this OAuth2 object.", "id": "f2452:c1:m4"}
{"signature": "def get_authorize_redirect(self):", "body": "get_params = {<EOL>'<STR_LIT>': self.return_url,<EOL>'<STR_LIT>': self._get_scopes()<EOL>}<EOL>return _redirect_with_params('<STR_LIT>', **get_params)<EOL>", "docstring": "Creates a URl to start the OAuth2 authorization flow.", "id": "f2452:c1:m1"}
{"signature": "def _redirect_with_params(url_name, *args, **kwargs):", "body": "url = urlresolvers.reverse(url_name, args=args)<EOL>params = parse.urlencode(kwargs, True)<EOL>return \"<STR_LIT>\".format(url, params)<EOL>", "docstring": "Helper method to create a redirect response with URL params.\n\n    This builds a redirect string that converts kwargs into a\n    query string.\n\n    Args:\n        url_name: The name of the url to redirect to.\n        kwargs: the query string param and their values to build.\n\n    Returns:\n        A properly formatted redirect string.", "id": "f2452:m4"}
{"signature": "def _get_storage_model():", "body": "storage_model_settings = getattr(django.conf.settings,<EOL>'<STR_LIT>', None)<EOL>if storage_model_settings is not None:<EOL><INDENT>return (storage_model_settings['<STR_LIT>'],<EOL>storage_model_settings['<STR_LIT>'],<EOL>storage_model_settings['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>return None, None, None<EOL><DEDENT>", "docstring": "This configures whether the credentials will be stored in the session\n    or the Django ORM based on the settings. By default, the credentials\n    will be stored in the session, unless `GOOGLE_OAUTH2_STORAGE_MODEL`\n    is found in the settings. Usually, the ORM storage is used to integrate\n    credentials into an existing Django user system.\n\n    Returns:\n        A tuple containing three strings, or None. If\n        ``GOOGLE_OAUTH2_STORAGE_MODEL`` is configured, the tuple\n        will contain the fully qualifed path of the `django.db.model`,\n        the name of the ``django.contrib.auth.models.User`` field on the\n        model, and the name of the\n        :class:`oauth2client.contrib.django_util.models.CredentialsField`\n        field on the model. If Django ORM storage is not configured,\n        this function returns None.", "id": "f2452:m2"}
{"signature": "def get_storage(request):", "body": "storage_model = oauth2_settings.storage_model<EOL>user_property = oauth2_settings.storage_model_user_property<EOL>credentials_property = oauth2_settings.storage_model_credentials_property<EOL>if storage_model:<EOL><INDENT>module_name, class_name = storage_model.rsplit('<STR_LIT:.>', <NUM_LIT:1>)<EOL>module = importlib.import_module(module_name)<EOL>storage_model_class = getattr(module, class_name)<EOL>return storage.DjangoORMStorage(storage_model_class,<EOL>user_property,<EOL>request.user,<EOL>credentials_property)<EOL><DEDENT>else:<EOL><INDENT>return dictionary_storage.DictionaryStorage(<EOL>request.session, key=_CREDENTIALS_KEY)<EOL><DEDENT>", "docstring": "Gets a Credentials storage object provided by the Django OAuth2 Helper\n    object.\n\n    Args:\n        request: Reference to the current request object.\n\n    Returns:\n       An :class:`oauth2.client.Storage` object.", "id": "f2452:m3"}
{"signature": "@property<EOL><INDENT>def http(self):<DEDENT>", "body": "if self.has_credentials():<EOL><INDENT>return self.credentials.authorize(transport.get_http_object())<EOL><DEDENT>return None<EOL>", "docstring": "Helper: create HTTP client authorized with OAuth2 credentials.", "id": "f2452:c1:m6"}
{"signature": "def _credentials_from_request(request):", "body": "<EOL>if (oauth2_settings.storage_model is None or<EOL>request.user.is_authenticated()):<EOL><INDENT>return get_storage(request).get()<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Gets the authorized credentials for this flow, if they exist.", "id": "f2452:m5"}
{"signature": "def from_db_value(self, value, expression, connection, context):", "body": "return self.to_python(value)<EOL>", "docstring": "Overrides ``models.Field`` method. This converts the value\n        returned from the database to an instance of this class.", "id": "f2453:c0:m2"}
{"signature": "def get_prep_value(self, value):", "body": "if value is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return encoding.smart_text(<EOL>base64.b64encode(jsonpickle.encode(value).encode()))<EOL><DEDENT>", "docstring": "Overrides ``models.Field`` method. This is used to convert\n        the value from an instances of this class to bytes that can be\n        inserted into the database.", "id": "f2453:c0:m4"}
{"signature": "def oauth_required(decorated_function=None, scopes=None, **decorator_kwargs):", "body": "def curry_wrapper(wrapped_function):<EOL><INDENT>@wraps(wrapped_function)<EOL>def required_wrapper(request, *args, **kwargs):<EOL><INDENT>if not (django_util.oauth2_settings.storage_model is None or<EOL>request.user.is_authenticated()):<EOL><INDENT>redirect_str = '<STR_LIT>'.format(<EOL>django.conf.settings.LOGIN_URL,<EOL>parse.quote(request.path))<EOL>return shortcuts.redirect(redirect_str)<EOL><DEDENT>return_url = decorator_kwargs.pop('<STR_LIT>',<EOL>request.get_full_path())<EOL>user_oauth = django_util.UserOAuth2(request, scopes, return_url)<EOL>if not user_oauth.has_credentials():<EOL><INDENT>return shortcuts.redirect(user_oauth.get_authorize_redirect())<EOL><DEDENT>setattr(request, django_util.oauth2_settings.request_prefix,<EOL>user_oauth)<EOL>return wrapped_function(request, *args, **kwargs)<EOL><DEDENT>return required_wrapper<EOL><DEDENT>if decorated_function:<EOL><INDENT>return curry_wrapper(decorated_function)<EOL><DEDENT>else:<EOL><INDENT>return curry_wrapper<EOL><DEDENT>", "docstring": "Decorator to require OAuth2 credentials for a view.\n\n\n    .. code-block:: python\n       :caption: views.py\n       :name: views_required_2\n\n\n       from oauth2client.django_util.decorators import oauth_required\n\n       @oauth_required\n       def requires_default_scopes(request):\n          email = request.credentials.id_token['email']\n          service = build(serviceName='calendar', version='v3',\n                       http=request.oauth.http,\n                       developerKey=API_KEY)\n          events = service.events().list(\n                                    calendarId='primary').execute()['items']\n          return HttpResponse(\n              \"email: {0}, calendar: {1}\".format(email, str(events)))\n\n    Args:\n        decorated_function: View function to decorate, must have the Django\n           request object as the first argument.\n        scopes: Scopes to require, will default.\n        decorator_kwargs: Can include ``return_url`` to specify the URL to\n           return to after OAuth2 authorization is complete.\n\n    Returns:\n        An OAuth2 Authorize view if credentials are not found or if the\n        credentials are missing the required scopes. Otherwise,\n        the decorated view.", "id": "f2454:m0"}
{"signature": "def oauth_enabled(decorated_function=None, scopes=None, **decorator_kwargs):", "body": "def curry_wrapper(wrapped_function):<EOL><INDENT>@wraps(wrapped_function)<EOL>def enabled_wrapper(request, *args, **kwargs):<EOL><INDENT>return_url = decorator_kwargs.pop('<STR_LIT>',<EOL>request.get_full_path())<EOL>user_oauth = django_util.UserOAuth2(request, scopes, return_url)<EOL>setattr(request, django_util.oauth2_settings.request_prefix,<EOL>user_oauth)<EOL>return wrapped_function(request, *args, **kwargs)<EOL><DEDENT>return enabled_wrapper<EOL><DEDENT>if decorated_function:<EOL><INDENT>return curry_wrapper(decorated_function)<EOL><DEDENT>else:<EOL><INDENT>return curry_wrapper<EOL><DEDENT>", "docstring": "Decorator to enable OAuth Credentials if authorized, and setup\n    the oauth object on the request object to provide helper functions\n    to start the flow otherwise.\n\n    .. code-block:: python\n       :caption: views.py\n       :name: views_enabled3\n\n       from oauth2client.django_util.decorators import oauth_enabled\n\n       @oauth_enabled\n       def optional_oauth2(request):\n           if request.oauth.has_credentials():\n               # this could be passed into a view\n               # request.oauth.http is also initialized\n               return HttpResponse(\"User email: {0}\".format(\n                                   request.oauth.credentials.id_token['email'])\n           else:\n               return HttpResponse('Here is an OAuth Authorize link:\n               <a href=\"{0}\">Authorize</a>'.format(\n                   request.oauth.get_authorize_redirect()))\n\n\n    Args:\n        decorated_function: View function to decorate.\n        scopes: Scopes to require, will default.\n        decorator_kwargs: Can include ``return_url`` to specify the URL to\n           return to after OAuth2 authorization is complete.\n\n    Returns:\n         The decorated view function.", "id": "f2454:m1"}
{"signature": "def locked_put(self, credentials):", "body": "filters = {self.key_name: self.key_value}<EOL>query = self.session.query(self.model_class).filter_by(**filters)<EOL>entity = query.first()<EOL>if not entity:<EOL><INDENT>entity = self.model_class(**filters)<EOL><DEDENT>setattr(entity, self.property_name, credentials)<EOL>self.session.add(entity)<EOL>", "docstring": "Write a credentials to the SQLAlchemy datastore.\n\n        Args:\n            credentials: :class:`oauth2client.Credentials`", "id": "f2455:c1:m2"}
{"signature": "def __init__(self, session, model_class, key_name,<EOL>key_value, property_name):", "body": "super(Storage, self).__init__()<EOL>self.session = session<EOL>self.model_class = model_class<EOL>self.key_name = key_name<EOL>self.key_value = key_value<EOL>self.property_name = property_name<EOL>", "docstring": "Constructor for Storage.\n\n        Args:\n            session: An instance of :class:`sqlalchemy.orm.Session`.\n            model_class: SQLAlchemy declarative mapping.\n            key_name: string, key name for the entity that has the credentials\n            key_value: key value for the entity that has the credentials\n            property_name: A string indicating which property on the\n                           ``model_class`` to store the credentials.\n                           This property must be a\n                           :class:`CredentialsType` column.", "id": "f2455:c1:m0"}
{"signature": "def locked_delete(self):", "body": "filters = {self.key_name: self.key_value}<EOL>self.session.query(self.model_class).filter_by(**filters).delete()<EOL>", "docstring": "Delete credentials from the SQLAlchemy datastore.", "id": "f2455:c1:m3"}
{"signature": "@_helpers.positional(<NUM_LIT:2>)<EOL>def generate_token(key, user_id, action_id='<STR_LIT>', when=None):", "body": "digester = hmac.new(_helpers._to_bytes(key, encoding='<STR_LIT:utf-8>'))<EOL>digester.update(_helpers._to_bytes(str(user_id), encoding='<STR_LIT:utf-8>'))<EOL>digester.update(DELIMITER)<EOL>digester.update(_helpers._to_bytes(action_id, encoding='<STR_LIT:utf-8>'))<EOL>digester.update(DELIMITER)<EOL>when = _helpers._to_bytes(str(when or int(time.time())), encoding='<STR_LIT:utf-8>')<EOL>digester.update(when)<EOL>digest = digester.digest()<EOL>token = base64.urlsafe_b64encode(digest + DELIMITER + when)<EOL>return token<EOL>", "docstring": "Generates a URL-safe token for the given user, action, time tuple.\n\n    Args:\n        key: secret key to use.\n        user_id: the user ID of the authenticated user.\n        action_id: a string identifier of the action they requested\n                   authorization for.\n        when: the time in seconds since the epoch at which the user was\n              authorized for this action. If not set the current time is used.\n\n    Returns:\n        A string XSRF protection token.", "id": "f2456:m0"}
{"signature": "@_helpers.positional(<NUM_LIT:3>)<EOL>def validate_token(key, token, user_id, action_id=\"<STR_LIT>\", current_time=None):", "body": "if not token:<EOL><INDENT>return False<EOL><DEDENT>try:<EOL><INDENT>decoded = base64.urlsafe_b64decode(token)<EOL>token_time = int(decoded.split(DELIMITER)[-<NUM_LIT:1>])<EOL><DEDENT>except (TypeError, ValueError, binascii.Error):<EOL><INDENT>return False<EOL><DEDENT>if current_time is None:<EOL><INDENT>current_time = time.time()<EOL><DEDENT>if current_time - token_time > DEFAULT_TIMEOUT_SECS:<EOL><INDENT>return False<EOL><DEDENT>expected_token = generate_token(key, user_id, action_id=action_id,<EOL>when=token_time)<EOL>if len(token) != len(expected_token):<EOL><INDENT>return False<EOL><DEDENT>different = <NUM_LIT:0><EOL>for x, y in zip(bytearray(token), bytearray(expected_token)):<EOL><INDENT>different |= x ^ y<EOL><DEDENT>return not different<EOL>", "docstring": "Validates that the given token authorizes the user for the action.\n\n    Tokens are invalid if the time of issue is too old or if the token\n    does not match what generateToken outputs (i.e. the token was forged).\n\n    Args:\n        key: secret key to use.\n        token: a string of the token generated by generateToken.\n        user_id: the user ID of the authenticated user.\n        action_id: a string identifier of the action they requested\n                   authorization for.\n\n    Returns:\n        A boolean - True if the user is authorized for the action, False\n        otherwise.", "id": "f2456:m1"}
{"signature": "def __init__(self, dictionary, key, lock=None):", "body": "super(DictionaryStorage, self).__init__(lock=lock)<EOL>self._dictionary = dictionary<EOL>self._key = key<EOL>", "docstring": "Construct a DictionaryStorage instance.", "id": "f2457:c0:m0"}
{"signature": "def locked_delete(self):", "body": "self._dictionary.pop(self._key, None)<EOL>", "docstring": "Remove the credentials from the dictionary, if they exist.", "id": "f2457:c0:m3"}
{"signature": "def retrieve_scopes(self, http):", "body": "self._retrieve_info(http)<EOL>return self.scopes<EOL>", "docstring": "Retrieves the canonical list of scopes for this access token.\n\n        Overrides client.Credentials.retrieve_scopes. Fetches scopes info\n        from the metadata server.\n\n        Args:\n            http: httplib2.Http, an http object to be used to make the refresh\n                  request.\n\n        Returns:\n            A set of strings containing the canonical list of scopes.", "id": "f2458:c0:m3"}
{"signature": "def sign_blob(self, blob):", "body": "raise NotImplementedError(<EOL>'<STR_LIT>')<EOL>", "docstring": "Cryptographically sign a blob (of bytes).\n\n        This method is provided to support a common interface, but\n        the actual key used for a Google Compute Engine service account\n        is not available, so it can't be used to sign content.\n\n        Args:\n            blob: bytes, Message to be signed.\n\n        Raises:\n            NotImplementedError, always.", "id": "f2458:c0:m8"}
{"signature": "def _refresh(self, http):", "body": "try:<EOL><INDENT>self._retrieve_info(http)<EOL>self.access_token, self.token_expiry = _metadata.get_token(<EOL>http, service_account=self.service_account_email)<EOL><DEDENT>except http_client.HTTPException as err:<EOL><INDENT>raise client.HttpAccessTokenRefreshError(str(err))<EOL><DEDENT>", "docstring": "Refreshes the access token.\n\n        Skip all the storage hoops and just refresh using the API.\n\n        Args:\n            http: an object to be used to make HTTP requests.\n\n        Raises:\n            HttpAccessTokenRefreshError: When the refresh fails.", "id": "f2458:c0:m5"}
{"signature": "@classmethod<EOL><INDENT>def _get_kind(cls):<DEDENT>", "body": "return '<STR_LIT>'<EOL>", "docstring": "Return the kind name for this class.", "id": "f2459:c3:m0"}
{"signature": "def _validate(self, value):", "body": "_LOGGER.info('<STR_LIT>', type(value))<EOL>if value is not None and not isinstance(value, client.Credentials):<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(self._name, value))<EOL><DEDENT>", "docstring": "Validates a value as a proper credentials object.\n\n        Args:\n            value: A value to be set on the property.\n\n        Raises:\n            TypeError if the value is not an instance of Credentials.", "id": "f2459:c2:m0"}
{"signature": "def _from_base_type(self, value):", "body": "if not value:<EOL><INDENT>return None<EOL><DEDENT>try:<EOL><INDENT>credentials = client.Credentials.new_from_json(value)<EOL><DEDENT>except ValueError:<EOL><INDENT>credentials = None<EOL><DEDENT>return credentials<EOL>", "docstring": "Converts our stored JSON string back to the desired type.\n\n        Args:\n            value: A value from the datastore to be converted to the\n                   desired type.\n\n        Returns:\n            A deserialized Credentials (or subclass) object, else None if\n            the value can't be parsed.", "id": "f2459:c2:m2"}
{"signature": "def _validate(self, value):", "body": "_LOGGER.info('<STR_LIT>', type(value))<EOL>if value is not None and not isinstance(value, client.Flow):<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(self._name, value))<EOL><DEDENT>", "docstring": "Validates a value as a proper Flow object.\n\n        Args:\n            value: A value to be set on the property.\n\n        Raises:\n            TypeError if the value is not an instance of Flow.", "id": "f2459:c1:m0"}
{"signature": "def _load_credentials(self):", "body": "if not self._file:<EOL><INDENT>return<EOL><DEDENT>loaded_credentials = _load_credentials_file(self._file)<EOL>self._credentials.update(loaded_credentials)<EOL>logger.debug('<STR_LIT>')<EOL>", "docstring": "(Re-)loads the credentials from the file.", "id": "f2461:c0:m1"}
{"signature": "def locked_put(self, credentials):", "body": "return self._backend.locked_put(self._key, credentials)<EOL>", "docstring": "Writes the given credentials to the store.\n\n        Args:\n            credentials: an instance of\n                :class:`oauth2client.client.Credentials`.", "id": "f2461:c1:m4"}
{"signature": "def has_credentials(self):", "body": "return self.credentials is not None and not self.credentials.invalid<EOL>", "docstring": "True if for the logged in user there are valid access Credentials.\n\n        Must only be called from with a webapp.RequestHandler subclassed method\n        that had been decorated with either @oauth_required or @oauth_aware.", "id": "f2463:c6:m9"}
{"signature": "def _is_ndb(self):", "body": "<EOL>if isinstance(self._model, type):<EOL><INDENT>if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL):<EOL><INDENT>return True<EOL><DEDENT>elif issubclass(self._model, db.Model):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>raise TypeError(<EOL>'<STR_LIT>'.format(self._model))<EOL>", "docstring": "Determine whether the model of the instance is an NDB model.\n\n        Returns:\n            Boolean indicating whether or not the model is an NDB or DB model.", "id": "f2463:c4:m1"}
{"signature": "@_helpers.positional(<NUM_LIT:4>)<EOL><INDENT>def __init__(self, model, key_name, property_name, cache=None, user=None):<DEDENT>", "body": "super(StorageByKeyName, self).__init__()<EOL>if key_name is None:<EOL><INDENT>if user is None:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>key_name = user.user_id()<EOL><DEDENT>self._model = model<EOL>self._key_name = key_name<EOL>self._property_name = property_name<EOL>self._cache = cache<EOL>", "docstring": "Constructor for Storage.\n\n        Args:\n            model: db.Model or ndb.Model, model class\n            key_name: string, key name for the entity that has the credentials\n            property_name: string, name of the property that is a\n                           CredentialsProperty or CredentialsNDBProperty.\n            cache: memcache, a write-through cache to put in front of the\n                   datastore. If the model you are using is an NDB model, using\n                   a cache will be redundant since the model uses an instance\n                   cache and memcache for you.\n            user: users.User object, optional. Can be used to grab user ID as a\n                  key_name if no key name is specified.", "id": "f2463:c4:m0"}
{"signature": "@db.non_transactional(allow_existing=True)<EOL><INDENT>def locked_put(self, credentials):<DEDENT>", "body": "entity = self._model.get_or_insert(self._key_name)<EOL>setattr(entity, self._property_name, credentials)<EOL>entity.put()<EOL>if self._cache:<EOL><INDENT>self._cache.set(self._key_name, credentials.to_json())<EOL><DEDENT>", "docstring": "Write a Credentials to the datastore.\n\n        Args:\n            credentials: Credentials, the credentials to store.", "id": "f2463:c4:m5"}
{"signature": "@db.non_transactional(allow_existing=True)<EOL><INDENT>def locked_delete(self):<DEDENT>", "body": "if self._cache:<EOL><INDENT>self._cache.delete(self._key_name)<EOL><DEDENT>self._delete_entity()<EOL>", "docstring": "Delete Credential from datastore.", "id": "f2463:c4:m6"}
{"signature": "def get_flow(self):", "body": "return getattr(self._tls, '<STR_LIT>', None)<EOL>", "docstring": "A thread local Flow object.\n\n        Returns:\n            A credentials.Flow object, or None if the flow hasn't been set in\n            this thread yet, which happens in _create_flow() since Flows are\n            created lazily.", "id": "f2463:c6:m3"}
{"signature": "def oauth_aware(self, method):", "body": "def setup_oauth(request_handler, *args, **kwargs):<EOL><INDENT>if self._in_error:<EOL><INDENT>self._display_error_message(request_handler)<EOL>return<EOL><DEDENT>user = users.get_current_user()<EOL>if not user:<EOL><INDENT>request_handler.redirect(users.create_login_url(<EOL>request_handler.request.uri))<EOL>return<EOL><DEDENT>self._create_flow(request_handler)<EOL>self.flow.params['<STR_LIT:state>'] = _build_state_value(request_handler,<EOL>user)<EOL>self.credentials = self._storage_class(<EOL>self._credentials_class, None,<EOL>self._credentials_property_name, user=user).get()<EOL>try:<EOL><INDENT>resp = method(request_handler, *args, **kwargs)<EOL><DEDENT>finally:<EOL><INDENT>self.credentials = None<EOL><DEDENT>return resp<EOL><DEDENT>return setup_oauth<EOL>", "docstring": "Decorator that sets up for OAuth 2.0 dance, but doesn't do it.\n\n        Does all the setup for the OAuth dance, but doesn't initiate it.\n        This decorator is useful if you want to create a page that knows\n        whether or not the user has granted access to this application.\n        From within a method decorated with @oauth_aware the has_credentials()\n        and authorize_url() methods can be called.\n\n        Args:\n            method: callable, to be decorated method of a webapp.RequestHandler\n                    instance.", "id": "f2463:c6:m8"}
{"signature": "@_helpers.positional(<NUM_LIT:2>)<EOL><INDENT>def __init__(self, scope, **kwargs):<DEDENT>", "body": "self.scope = _helpers.scopes_to_string(scope)<EOL>self._kwargs = kwargs<EOL>self.service_account_id = kwargs.get('<STR_LIT>', None)<EOL>self._service_account_email = None<EOL>super(AppAssertionCredentials, self).__init__(None)<EOL>", "docstring": "Constructor for AppAssertionCredentials\n\n        Args:\n            scope: string or iterable of strings, scope(s) of the credentials\n                   being requested.\n            **kwargs: optional keyword args, including:\n            service_account_id: service account id of the application. If None\n                                or unspecified, the default service account for\n                                the app is used.", "id": "f2463:c1:m0"}
{"signature": "@_helpers.positional(<NUM_LIT:2>)<EOL>def oauth2decorator_from_clientsecrets(filename, scope,<EOL>message=None, cache=None):", "body": "return OAuth2DecoratorFromClientSecrets(filename, scope,<EOL>message=message, cache=cache)<EOL>", "docstring": "Creates an OAuth2Decorator populated from a clientsecrets file.\n\n    Args:\n        filename: string, File name of client secrets.\n        scope: string or list of strings, scope(s) of the credentials being\n               requested.\n        message: string, A friendly string to display to the user if the\n                 clientsecrets file is missing or invalid. The message may\n                 contain HTML and will be presented on the web interface for\n                 any method that uses the decorator.\n        cache: An optional cache service client that implements get() and set()\n               methods. See clientsecrets.loadfile() for details.\n\n    Returns: An OAuth2Decorator", "id": "f2463:m5"}
{"signature": "def has_credentials(self):", "body": "if not self.credentials:<EOL><INDENT>return False<EOL><DEDENT>elif (self.credentials.access_token_expired and<EOL>not self.credentials.refresh_token):<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Returns True if there are valid credentials for the current user.", "id": "f2464:c0:m9"}
{"signature": "def _get_flow_for_token(csrf_token):", "body": "flow_pickle = session.pop(<EOL>_FLOW_KEY.format(csrf_token), None)<EOL>if flow_pickle is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return pickle.loads(flow_pickle)<EOL><DEDENT>", "docstring": "Retrieves the flow instance associated with a given CSRF token from\n    the Flask session.", "id": "f2464:m0"}
{"signature": "def authorize_view(self):", "body": "args = request.args.to_dict()<EOL>args['<STR_LIT>'] = request.args.getlist('<STR_LIT>')<EOL>return_url = args.pop('<STR_LIT>', None)<EOL>if return_url is None:<EOL><INDENT>return_url = request.referrer or '<STR_LIT:/>'<EOL><DEDENT>flow = self._make_flow(return_url=return_url, **args)<EOL>auth_url = flow.step1_get_authorize_url()<EOL>return redirect(auth_url)<EOL>", "docstring": "Flask view that starts the authorization flow.\n\n        Starts flow by redirecting the user to the OAuth2 provider.", "id": "f2464:c0:m6"}
{"signature": "@property<EOL><INDENT>def credentials(self):<DEDENT>", "body": "ctx = _app_ctx_stack.top<EOL>if not hasattr(ctx, _CREDENTIALS_KEY):<EOL><INDENT>ctx.google_oauth2_credentials = self.storage.get()<EOL><DEDENT>return ctx.google_oauth2_credentials<EOL>", "docstring": "The credentials for the current user or None if unavailable.", "id": "f2464:c0:m8"}
{"signature": "def authorize_url(self, return_url, **kwargs):", "body": "return url_for('<STR_LIT>', return_url=return_url, **kwargs)<EOL>", "docstring": "Creates a URL that can be used to start the authorization flow.\n\n        When the user is directed to the URL, the authorization flow will\n        begin. Once complete, the user will be redirected to the specified\n        return URL.\n\n        Any kwargs are passed into the flow constructor.", "id": "f2464:c0:m12"}
{"signature": "def __init__(self, service_name, user_name):", "body": "super(Storage, self).__init__(lock=threading.Lock())<EOL>self._service_name = service_name<EOL>self._user_name = user_name<EOL>", "docstring": "Constructor.\n\n        Args:\n            service_name: string, The name of the service under which the\n                          credentials are stored.\n            user_name: string, The name of the user to store credentials for.", "id": "f2465:c0:m0"}
{"signature": "def locked_delete(self):", "body": "keyring.set_password(self._service_name, self._user_name, '<STR_LIT>')<EOL>", "docstring": "Delete Credentials file.\n\n        Args:\n            credentials: Credentials, the credentials to store.", "id": "f2465:c0:m3"}
{"signature": "def verify(self, message, signature):", "body": "message = _helpers._to_bytes(message, encoding='<STR_LIT:utf-8>')<EOL>try:<EOL><INDENT>return rsa.pkcs1.verify(message, signature, self._pubkey)<EOL><DEDENT>except (ValueError, rsa.pkcs1.VerificationError):<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Verifies a message against a signature.\n\n        Args:\n            message: string or bytes, The message to verify. If string, will be\n                     encoded to bytes as utf-8.\n            signature: string or bytes, The signature on the message. If\n                       string, will be encoded to bytes as utf-8.\n\n        Returns:\n            True if message was signed by the private key associated with the\n            public key that this object was constructed with.", "id": "f2467:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def from_string(cls, key, password='<STR_LIT>'):<DEDENT>", "body": "key = _helpers._from_bytes(key)  <EOL>marker_id, key_bytes = pem.readPemBlocksFromFile(<EOL>six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)<EOL>if marker_id == <NUM_LIT:0>:<EOL><INDENT>pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,<EOL>format='<STR_LIT>')<EOL><DEDENT>elif marker_id == <NUM_LIT:1>:<EOL><INDENT>key_info, remaining = decoder.decode(<EOL>key_bytes, asn1Spec=_PKCS8_SPEC)<EOL>if remaining != b'<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>', remaining)<EOL><DEDENT>pkey_info = key_info.getComponentByName('<STR_LIT>')<EOL>pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),<EOL>format='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return cls(pkey)<EOL>", "docstring": "Construct an RsaSigner instance from a string.\n\n        Args:\n            key: string, private key in PEM format.\n            password: string, password for private key file. Unused for PEM\n                      files.\n\n        Returns:\n            RsaSigner instance.\n\n        Raises:\n            ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in\n            PEM format.", "id": "f2467:c1:m2"}
{"signature": "def verify_signed_jwt_with_certs(jwt, certs, audience=None):", "body": "jwt = _helpers._to_bytes(jwt)<EOL>if jwt.count(b'<STR_LIT:.>') != <NUM_LIT:2>:<EOL><INDENT>raise AppIdentityError(<EOL>'<STR_LIT>'.format(jwt))<EOL><DEDENT>header, payload, signature = jwt.split(b'<STR_LIT:.>')<EOL>message_to_sign = header + b'<STR_LIT:.>' + payload<EOL>signature = _helpers._urlsafe_b64decode(signature)<EOL>payload_bytes = _helpers._urlsafe_b64decode(payload)<EOL>try:<EOL><INDENT>payload_dict = json.loads(_helpers._from_bytes(payload_bytes))<EOL><DEDENT>except:<EOL><INDENT>raise AppIdentityError('<STR_LIT>'.format(payload_bytes))<EOL><DEDENT>_verify_signature(message_to_sign, signature, certs.values())<EOL>_verify_time_range(payload_dict)<EOL>_check_audience(payload_dict, audience)<EOL>return payload_dict<EOL>", "docstring": "Verify a JWT against public certs.\n\n    See http://self-issued.info/docs/draft-jones-json-web-token.html.\n\n    Args:\n        jwt: string, A JWT.\n        certs: dict, Dictionary where values of public keys in PEM format.\n        audience: string, The audience, 'aud', that this JWT should contain. If\n                  None then the JWT's 'aud' parameter is not verified.\n\n    Returns:\n        dict, The deserialized JSON payload in the JWT.\n\n    Raises:\n        AppIdentityError: if any checks are failed.", "id": "f2468:m5"}
{"signature": "def _verify_signature(message, signature, certs):", "body": "for pem in certs:<EOL><INDENT>verifier = Verifier.from_string(pem, is_x509_cert=True)<EOL>if verifier.verify(message, signature):<EOL><INDENT>return<EOL><DEDENT><DEDENT>raise AppIdentityError('<STR_LIT>')<EOL>", "docstring": "Verifies signed content using a list of certificates.\n\n    Args:\n        message: string or bytes, The message to verify.\n        signature: string or bytes, The signature on the message.\n        certs: iterable, certificates in PEM format.\n\n    Raises:\n        AppIdentityError: If none of the certificates can verify the message\n                          against the signature.", "id": "f2468:m2"}
{"signature": "def loadfile(filename, cache=None):", "body": "_SECRET_NAMESPACE = '<STR_LIT>'<EOL>if not cache:<EOL><INDENT>return _loadfile(filename)<EOL><DEDENT>obj = cache.get(filename, namespace=_SECRET_NAMESPACE)<EOL>if obj is None:<EOL><INDENT>client_type, client_info = _loadfile(filename)<EOL>obj = {client_type: client_info}<EOL>cache.set(filename, obj, namespace=_SECRET_NAMESPACE)<EOL><DEDENT>return next(six.iteritems(obj))<EOL>", "docstring": "Loading of client_secrets JSON file, optionally backed by a cache.\n\n    Typical cache storage would be App Engine memcache service,\n    but you can pass in any other cache client that implements\n    these methods:\n\n    * ``get(key, namespace=ns)``\n    * ``set(key, value, namespace=ns)``\n\n    Usage::\n\n        # without caching\n        client_type, client_info = loadfile('secrets.json')\n        # using App Engine memcache service\n        from google.appengine.api import memcache\n        client_type, client_info = loadfile('secrets.json', cache=memcache)\n\n    Args:\n        filename: string, Path to a client_secrets.json file on a filesystem.\n        cache: An optional cache service client that implements get() and set()\n        methods. If not specified, the file is always being loaded from\n                 a filesystem.\n\n    Raises:\n        InvalidClientSecretsError: In case of a validation error or some\n                                   I/O failure. Can happen only on cache miss.\n\n    Returns:\n        (client_type, client_info) tuple, as _loadfile() normally would.\n        JSON contents is validated only during first load. Cache hits are not\n        validated.", "id": "f2469:m4"}
{"signature": "def code_challenge(verifier):", "body": "digest = hashlib.sha256(verifier).digest()<EOL>return base64.urlsafe_b64encode(digest).rstrip(b'<STR_LIT:=>')<EOL>", "docstring": "Creates a 'code_challenge' as described in section 4.2 of RFC 7636\nby taking the sha256 hash of the verifier and then urlsafe\nbase64-encoding it.\n\nArgs:\n    verifier: bytestring, representing a code_verifier as generated by\n        code_verifier().\n\nReturns:\n    Bytestring, representing a urlsafe base64-encoded sha256 hash digest,\n        without '=' padding.", "id": "f2470:m1"}
{"signature": "def code_verifier(n_bytes=<NUM_LIT:64>):", "body": "verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'<STR_LIT:=>')<EOL>if len(verifier) < <NUM_LIT>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>elif len(verifier) > <NUM_LIT>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return verifier<EOL><DEDENT>", "docstring": "Generates a 'code_verifier' as described in section 4.1 of RFC 7636.\n\nThis is a 'high-entropy cryptographic random string' that will be\nimpractical for an attacker to guess.\n\nArgs:\n    n_bytes: integer between 31 and 96, inclusive. default: 64\n        number of bytes of entropy to include in verifier.\n\nReturns:\n    Bytestring, representing urlsafe base64-encoded random data.", "id": "f2470:m0"}
{"signature": "def sign(self, message):", "body": "message = _helpers._to_bytes(message, encoding='<STR_LIT:utf-8>')<EOL>return crypto.sign(self._key, message, '<STR_LIT>')<EOL>", "docstring": "Signs a message.\n\n        Args:\n            message: bytes, Message to be signed.\n\n        Returns:\n            string, The signature of the message for the given key.", "id": "f2471:c1:m1"}
{"signature": "def __init__(self, pkey):", "body": "self._key = pkey<EOL>", "docstring": "Constructor.\n\n        Args:\n            pkey: OpenSSL.crypto.PKey (or equiv), The private key to sign with.", "id": "f2471:c1:m0"}
{"signature": "def verify(self, message, signature):", "body": "message = _helpers._to_bytes(message, encoding='<STR_LIT:utf-8>')<EOL>signature = _helpers._to_bytes(signature, encoding='<STR_LIT:utf-8>')<EOL>try:<EOL><INDENT>crypto.verify(self._pubkey, signature, message, '<STR_LIT>')<EOL>return True<EOL><DEDENT>except crypto.Error:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Verifies a message against a signature.\n\n        Args:\n        message: string or bytes, The message to verify. If string, will be\n                 encoded to bytes as utf-8.\n        signature: string or bytes, The signature on the message. If string,\n                   will be encoded to bytes as utf-8.\n\n        Returns:\n            True if message was signed by the private key associated with the\n            public key that this object was constructed with.", "id": "f2471:c0:m1"}
{"signature": "def __init__(self, pubkey):", "body": "self._pubkey = pubkey<EOL>", "docstring": "Constructor.\n\n        Args:\n            pubkey: OpenSSL.crypto.PKey, The public key to verify with.", "id": "f2471:c0:m0"}
{"signature": "def save_to_well_known_file(credentials, well_known_file=None):", "body": "<EOL>if well_known_file is None:<EOL><INDENT>well_known_file = _get_well_known_file()<EOL><DEDENT>config_dir = os.path.dirname(well_known_file)<EOL>if not os.path.isdir(config_dir):<EOL><INDENT>raise OSError(<EOL>'<STR_LIT>'.format(config_dir))<EOL><DEDENT>credentials_data = credentials.serialization_data<EOL>_save_private_file(well_known_file, credentials_data)<EOL>", "docstring": "Save the provided GoogleCredentials to the well known file.\n\n    Args:\n        credentials: the credentials to be saved to the well known file;\n                     it should be an instance of GoogleCredentials\n        well_known_file: the name of the file where the credentials are to be\n                         saved; this parameter is supposed to be used for\n                         testing only", "id": "f2472:m5"}
{"signature": "@_helpers.positional(<NUM_LIT:8>)<EOL><INDENT>def __init__(self, access_token, client_id, client_secret, refresh_token,<EOL>token_expiry, token_uri, user_agent, revoke_uri=None,<EOL>id_token=None, token_response=None, scopes=None,<EOL>token_info_uri=None, id_token_jwt=None):<DEDENT>", "body": "self.access_token = access_token<EOL>self.client_id = client_id<EOL>self.client_secret = client_secret<EOL>self.refresh_token = refresh_token<EOL>self.store = None<EOL>self.token_expiry = token_expiry<EOL>self.token_uri = token_uri<EOL>self.user_agent = user_agent<EOL>self.revoke_uri = revoke_uri<EOL>self.id_token = id_token<EOL>self.id_token_jwt = id_token_jwt<EOL>self.token_response = token_response<EOL>self.scopes = set(_helpers.string_to_scopes(scopes or []))<EOL>self.token_info_uri = token_info_uri<EOL>self.invalid = False<EOL>", "docstring": "Create an instance of OAuth2Credentials.\n\n        This constructor is not usually called by the user, instead\n        OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.\n\n        Args:\n            access_token: string, access token.\n            client_id: string, client identifier.\n            client_secret: string, client secret.\n            refresh_token: string, refresh token.\n            token_expiry: datetime, when the access_token expires.\n            token_uri: string, URI of token endpoint.\n            user_agent: string, The HTTP User-Agent to provide for this\n                        application.\n            revoke_uri: string, URI for revoke endpoint. Defaults to None; a\n                        token can't be revoked if this is None.\n            id_token: object, The identity of the resource owner.\n            token_response: dict, the decoded response to the token request.\n                            None if a token hasn't been requested yet. Stored\n                            because some providers (e.g. wordpress.com) include\n                            extra fields that clients may want.\n            scopes: list, authorized scopes for these credentials.\n            token_info_uri: string, the URI for the token info endpoint.\n                            Defaults to None; scopes can not be refreshed if\n                            this is None.\n            id_token_jwt: string, the encoded and signed identity JWT. The\n                          decoded version of this is stored in id_token.\n\n        Notes:\n            store: callable, A callable that when passed a Credential\n                   will store the credential back to where it came from.\n                   This is needed to store the latest access_token if it\n                   has expired and been refreshed.", "id": "f2472:c16:m0"}
{"signature": "def retrieve_scopes(self, http):", "body": "self._retrieve_scopes(http)<EOL>return self.scopes<EOL>", "docstring": "Retrieves the canonical list of scopes for this access token.\n\n        Gets the scopes from the OAuth2 provider.\n\n        Args:\n            http: httplib2.Http, an http object to be used to make the refresh\n                  request.\n\n        Returns:\n            A set of strings containing the canonical list of scopes.", "id": "f2472:c16:m6"}
{"signature": "def _updateFromCredential(self, other):", "body": "self.__dict__.update(other.__getstate__())<EOL>", "docstring": "Update this Credential from another instance.", "id": "f2472:c16:m12"}
{"signature": "@_helpers.positional(<NUM_LIT:2>)<EOL><INDENT>def __init__(self, assertion_type, user_agent=None,<EOL>token_uri=oauth2client.GOOGLE_TOKEN_URI,<EOL>revoke_uri=oauth2client.GOOGLE_REVOKE_URI,<EOL>**unused_kwargs):<DEDENT>", "body": "super(AssertionCredentials, self).__init__(<EOL>None,<EOL>None,<EOL>None,<EOL>None,<EOL>None,<EOL>token_uri,<EOL>user_agent,<EOL>revoke_uri=revoke_uri)<EOL>self.assertion_type = assertion_type<EOL>", "docstring": "Constructor for AssertionFlowCredentials.\n\n        Args:\n            assertion_type: string, assertion type that will be declared to the\n                            auth server\n            user_agent: string, The HTTP User-Agent to provide for this\n                        application.\n            token_uri: string, URI for token endpoint. For convenience defaults\n                       to Google's endpoints but any OAuth 2.0 provider can be\n                       used.\n            revoke_uri: string, URI for revoke endpoint.", "id": "f2472:c19:m0"}
{"signature": "@staticmethod<EOL><INDENT>def from_stream(credential_filename):<DEDENT>", "body": "if credential_filename and os.path.isfile(credential_filename):<EOL><INDENT>try:<EOL><INDENT>return _get_application_default_credential_from_file(<EOL>credential_filename)<EOL><DEDENT>except (ApplicationDefaultCredentialsError, ValueError) as error:<EOL><INDENT>extra_help = ('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>_raise_exception_for_reading_json(credential_filename,<EOL>extra_help,<EOL>error)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ApplicationDefaultCredentialsError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Create a Credentials object by reading information from a file.\n\n        It returns an object of type GoogleCredentials.\n\n        Args:\n            credential_filename: the path to the file from where the\n                                 credentials are to be read\n\n        Raises:\n            ApplicationDefaultCredentialsError: raised when the credentials\n                                                fail to be retrieved.", "id": "f2472:c18:m10"}
{"signature": "def _do_revoke(self, http, token):", "body": "logger.info('<STR_LIT>')<EOL>query_params = {'<STR_LIT>': token}<EOL>token_revoke_uri = _helpers.update_query_params(<EOL>self.revoke_uri, query_params)<EOL>resp, content = transport.request(http, token_revoke_uri)<EOL>if resp.status == http_client.METHOD_NOT_ALLOWED:<EOL><INDENT>body = urllib.parse.urlencode(query_params)<EOL>resp, content = transport.request(http, token_revoke_uri,<EOL>method='<STR_LIT:POST>', body=body)<EOL><DEDENT>if resp.status == http_client.OK:<EOL><INDENT>self.invalid = True<EOL><DEDENT>else:<EOL><INDENT>error_msg = '<STR_LIT>'.format(resp.status)<EOL>try:<EOL><INDENT>d = json.loads(_helpers._from_bytes(content))<EOL>if '<STR_LIT:error>' in d:<EOL><INDENT>error_msg = d['<STR_LIT:error>']<EOL><DEDENT><DEDENT>except (TypeError, ValueError):<EOL><INDENT>pass<EOL><DEDENT>raise TokenRevokeError(error_msg)<EOL><DEDENT>if self.store:<EOL><INDENT>self.store.delete()<EOL><DEDENT>", "docstring": "Revokes this credential and deletes the stored copy (if it exists).\n\n        Args:\n            http: an object to be used to make HTTP requests.\n            token: A string used as the token to be revoked. Can be either an\n                   access_token or refresh_token.\n\n        Raises:\n            TokenRevokeError: If the revoke request does not return with a\n                              200 OK.", "id": "f2472:c16:m20"}
{"signature": "def _parse_exchange_token_response(content):", "body": "resp = {}<EOL>content = _helpers._from_bytes(content)<EOL>try:<EOL><INDENT>resp = json.loads(content)<EOL><DEDENT>except Exception:<EOL><INDENT>resp = _helpers.parse_unique_urlencoded(content)<EOL><DEDENT>if resp and '<STR_LIT>' in resp:<EOL><INDENT>resp['<STR_LIT>'] = resp.pop('<STR_LIT>')<EOL><DEDENT>return resp<EOL>", "docstring": "Parses response of an exchange token request.\n\n    Most providers return JSON but some (e.g. Facebook) return a\n    url-encoded string.\n\n    Args:\n        content: The body of a response\n\n    Returns:\n        Content as a dictionary object. Note that the dict could be empty,\n        i.e. {}. That basically indicates a failure.", "id": "f2472:m16"}
{"signature": "def _get_application_default_credential_from_file(filename):", "body": "<EOL>with open(filename) as file_obj:<EOL><INDENT>client_credentials = json.load(file_obj)<EOL><DEDENT>credentials_type = client_credentials.get('<STR_LIT:type>')<EOL>if credentials_type == AUTHORIZED_USER:<EOL><INDENT>required_fields = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL><DEDENT>elif credentials_type == SERVICE_ACCOUNT:<EOL><INDENT>required_fields = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>raise ApplicationDefaultCredentialsError(<EOL>\"<STR_LIT>\" +<EOL>AUTHORIZED_USER + \"<STR_LIT>\" + SERVICE_ACCOUNT + \"<STR_LIT>\")<EOL><DEDENT>missing_fields = required_fields.difference(client_credentials.keys())<EOL>if missing_fields:<EOL><INDENT>_raise_exception_for_missing_fields(missing_fields)<EOL><DEDENT>if client_credentials['<STR_LIT:type>'] == AUTHORIZED_USER:<EOL><INDENT>return GoogleCredentials(<EOL>access_token=None,<EOL>client_id=client_credentials['<STR_LIT>'],<EOL>client_secret=client_credentials['<STR_LIT>'],<EOL>refresh_token=client_credentials['<STR_LIT>'],<EOL>token_expiry=None,<EOL>token_uri=oauth2client.GOOGLE_TOKEN_URI,<EOL>user_agent='<STR_LIT>')<EOL><DEDENT>else:  <EOL><INDENT>from oauth2client import service_account<EOL>return service_account._JWTAccessCredentials.from_json_keyfile_dict(<EOL>client_credentials)<EOL><DEDENT>", "docstring": "Build the Application Default Credentials from file.", "id": "f2472:m8"}
{"signature": "def authorize(self, http):", "body": "raise NotImplementedError<EOL>", "docstring": "Take an httplib2.Http instance (or equivalent) and authorizes it.\n\n        Authorizes it for the set of credentials, usually by replacing\n        http.request() with a method that adds in the appropriate headers and\n        then delegates to the original Http.request() method.\n\n        Args:\n            http: httplib2.Http, an http object to be used to make the refresh\n                  request.", "id": "f2472:c13:m0"}
{"signature": "def _get_well_known_file():", "body": "<EOL>default_config_dir = os.getenv(_CLOUDSDK_CONFIG_ENV_VAR)<EOL>if default_config_dir is None:<EOL><INDENT>if os.name == '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>default_config_dir = os.path.join(os.environ['<STR_LIT>'],<EOL>_CLOUDSDK_CONFIG_DIRECTORY)<EOL><DEDENT>except KeyError:<EOL><INDENT>drive = os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL>default_config_dir = os.path.join(drive, '<STR_LIT:\\\\>',<EOL>_CLOUDSDK_CONFIG_DIRECTORY)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>default_config_dir = os.path.join(os.path.expanduser('<STR_LIT>'),<EOL>'<STR_LIT>',<EOL>_CLOUDSDK_CONFIG_DIRECTORY)<EOL><DEDENT><DEDENT>return os.path.join(default_config_dir, _WELL_KNOWN_CREDENTIALS_FILE)<EOL>", "docstring": "Get the well known file produced by command 'gcloud auth login'.", "id": "f2472:m7"}
{"signature": "@_helpers.positional(<NUM_LIT:2>)<EOL>def flow_from_clientsecrets(filename, scope, redirect_uri=None,<EOL>message=None, cache=None, login_hint=None,<EOL>device_uri=None, pkce=None, code_verifier=None,<EOL>prompt=None):", "body": "try:<EOL><INDENT>client_type, client_info = clientsecrets.loadfile(filename,<EOL>cache=cache)<EOL>if client_type in (clientsecrets.TYPE_WEB,<EOL>clientsecrets.TYPE_INSTALLED):<EOL><INDENT>constructor_kwargs = {<EOL>'<STR_LIT>': redirect_uri,<EOL>'<STR_LIT>': client_info['<STR_LIT>'],<EOL>'<STR_LIT>': client_info['<STR_LIT>'],<EOL>'<STR_LIT>': login_hint,<EOL>}<EOL>revoke_uri = client_info.get('<STR_LIT>')<EOL>optional = (<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>)<EOL>for param in optional:<EOL><INDENT>if locals()[param] is not None:<EOL><INDENT>constructor_kwargs[param] = locals()[param]<EOL><DEDENT><DEDENT>return OAuth2WebServerFlow(<EOL>client_info['<STR_LIT>'], client_info['<STR_LIT>'],<EOL>scope, **constructor_kwargs)<EOL><DEDENT><DEDENT>except clientsecrets.InvalidClientSecretsError as e:<EOL><INDENT>if message is not None:<EOL><INDENT>if e.args:<EOL><INDENT>message = ('<STR_LIT>'<EOL>'<STR_LIT>'.format(e, message))<EOL><DEDENT>sys.exit(message)<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise UnknownClientSecretsFlowError(<EOL>'<STR_LIT>'.format(client_type))<EOL><DEDENT>", "docstring": "Create a Flow from a clientsecrets file.\n\n    Will create the right kind of Flow based on the contents of the\n    clientsecrets file or will raise InvalidClientSecretsError for unknown\n    types of Flows.\n\n    Args:\n        filename: string, File name of client secrets.\n        scope: string or iterable of strings, scope(s) to request.\n        redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for\n                      a non-web-based application, or a URI that handles the\n                      callback from the authorization server.\n        message: string, A friendly string to display to the user if the\n                 clientsecrets file is missing or invalid. If message is\n                 provided then sys.exit will be called in the case of an error.\n                 If message in not provided then\n                 clientsecrets.InvalidClientSecretsError will be raised.\n        cache: An optional cache service client that implements get() and set()\n               methods. See clientsecrets.loadfile() for details.\n        login_hint: string, Either an email address or domain. Passing this\n                    hint will either pre-fill the email box on the sign-in form\n                    or select the proper multi-login session, thereby\n                    simplifying the login flow.\n        device_uri: string, URI for device authorization endpoint. For\n                    convenience defaults to Google's endpoints but any\n                    OAuth 2.0 provider can be used.\n\n    Returns:\n        A Flow object.\n\n    Raises:\n        UnknownClientSecretsFlowError: if the file describes an unknown kind of\n                                       Flow.\n        clientsecrets.InvalidClientSecretsError: if the clientsecrets file is\n                                                 invalid.", "id": "f2472:m20"}
{"signature": "def revoke(self, http):", "body": "self._revoke(http)<EOL>", "docstring": "Revokes a refresh_token and makes the credentials void.\n\n        Args:\n            http: httplib2.Http, an http object to be used to make the revoke\n                  request.", "id": "f2472:c16:m3"}
{"signature": "@_helpers.positional(<NUM_LIT:2>)<EOL><INDENT>def step2_exchange(self, code=None, http=None, device_flow_info=None):<DEDENT>", "body": "if code is None and device_flow_info is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if code is not None and device_flow_info is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if code is None:<EOL><INDENT>code = device_flow_info.device_code<EOL><DEDENT>elif not isinstance(code, (six.string_types, six.binary_type)):<EOL><INDENT>if '<STR_LIT:code>' not in code:<EOL><INDENT>raise FlowExchangeError(code.get(<EOL>'<STR_LIT:error>', '<STR_LIT>'))<EOL><DEDENT>code = code['<STR_LIT:code>']<EOL><DEDENT>post_data = {<EOL>'<STR_LIT>': self.client_id,<EOL>'<STR_LIT:code>': code,<EOL>'<STR_LIT>': self.scope,<EOL>}<EOL>if self.client_secret is not None:<EOL><INDENT>post_data['<STR_LIT>'] = self.client_secret<EOL><DEDENT>if self._pkce:<EOL><INDENT>post_data['<STR_LIT>'] = self.code_verifier<EOL><DEDENT>if device_flow_info is not None:<EOL><INDENT>post_data['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>post_data['<STR_LIT>'] = '<STR_LIT>'<EOL>post_data['<STR_LIT>'] = self.redirect_uri<EOL><DEDENT>body = urllib.parse.urlencode(post_data)<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}<EOL>if self.authorization_header is not None:<EOL><INDENT>headers['<STR_LIT>'] = self.authorization_header<EOL><DEDENT>if self.user_agent is not None:<EOL><INDENT>headers['<STR_LIT>'] = self.user_agent<EOL><DEDENT>if http is None:<EOL><INDENT>http = transport.get_http_object()<EOL><DEDENT>resp, content = transport.request(<EOL>http, self.token_uri, method='<STR_LIT:POST>', body=body, headers=headers)<EOL>d = _parse_exchange_token_response(content)<EOL>if resp.status == http_client.OK and '<STR_LIT>' in d:<EOL><INDENT>access_token = d['<STR_LIT>']<EOL>refresh_token = d.get('<STR_LIT>', None)<EOL>if not refresh_token:<EOL><INDENT>logger.info(<EOL>'<STR_LIT>'<EOL>\"<STR_LIT>\")<EOL><DEDENT>token_expiry = None<EOL>if '<STR_LIT>' in d:<EOL><INDENT>delta = datetime.timedelta(seconds=int(d['<STR_LIT>']))<EOL>token_expiry = delta + _UTCNOW()<EOL><DEDENT>extracted_id_token = None<EOL>id_token_jwt = None<EOL>if '<STR_LIT>' in d:<EOL><INDENT>extracted_id_token = _extract_id_token(d['<STR_LIT>'])<EOL>id_token_jwt = d['<STR_LIT>']<EOL><DEDENT>logger.info('<STR_LIT>')<EOL>return OAuth2Credentials(<EOL>access_token, self.client_id, self.client_secret,<EOL>refresh_token, token_expiry, self.token_uri, self.user_agent,<EOL>revoke_uri=self.revoke_uri, id_token=extracted_id_token,<EOL>id_token_jwt=id_token_jwt, token_response=d, scopes=self.scope,<EOL>token_info_uri=self.token_info_uri)<EOL><DEDENT>else:<EOL><INDENT>logger.info('<STR_LIT>', content)<EOL>if '<STR_LIT:error>' in d:<EOL><INDENT>error_msg = (str(d['<STR_LIT:error>']) +<EOL>str(d.get('<STR_LIT>', '<STR_LIT>')))<EOL><DEDENT>else:<EOL><INDENT>error_msg = '<STR_LIT>'.format(str(resp.status))<EOL><DEDENT>raise FlowExchangeError(error_msg)<EOL><DEDENT>", "docstring": "Exchanges a code for OAuth2Credentials.\n\n        Args:\n            code: string, a dict-like object, or None. For a non-device\n                  flow, this is either the response code as a string, or a\n                  dictionary of query parameters to the redirect_uri. For a\n                  device flow, this should be None.\n            http: httplib2.Http, optional http instance to use when fetching\n                  credentials.\n            device_flow_info: DeviceFlowInfo, return value from step1 in the\n                              case of a device flow.\n\n        Returns:\n            An OAuth2Credentials object that can be used to authorize requests.\n\n        Raises:\n            FlowExchangeError: if a problem occurred exchanging the code for a\n                               refresh_token.\n            ValueError: if code and device_flow_info are both provided or both\n                        missing.", "id": "f2472:c21:m3"}
{"signature": "def apply(self, headers):", "body": "headers['<STR_LIT>'] = '<STR_LIT>' + self.access_token<EOL>", "docstring": "Add the authorization to the headers.\n\n        Args:\n            headers: dict, the headers to add the Authorization header to.", "id": "f2472:c16:m4"}
{"signature": "def _to_json(self, strip, to_serialize=None):", "body": "curr_type = self.__class__<EOL>if to_serialize is None:<EOL><INDENT>to_serialize = copy.copy(self.__dict__)<EOL><DEDENT>else:<EOL><INDENT>to_serialize = copy.copy(to_serialize)<EOL><DEDENT>for member in strip:<EOL><INDENT>if member in to_serialize:<EOL><INDENT>del to_serialize[member]<EOL><DEDENT><DEDENT>to_serialize['<STR_LIT>'] = _parse_expiry(<EOL>to_serialize.get('<STR_LIT>'))<EOL>to_serialize['<STR_LIT>'] = curr_type.__name__<EOL>to_serialize['<STR_LIT>'] = curr_type.__module__<EOL>for key, val in to_serialize.items():<EOL><INDENT>if isinstance(val, bytes):<EOL><INDENT>to_serialize[key] = val.decode('<STR_LIT:utf-8>')<EOL><DEDENT>if isinstance(val, set):<EOL><INDENT>to_serialize[key] = list(val)<EOL><DEDENT><DEDENT>return json.dumps(to_serialize)<EOL>", "docstring": "Utility function that creates JSON repr. of a Credentials object.\n\n        Args:\n            strip: array, An array of names of members to exclude from the\n                   JSON.\n            to_serialize: dict, (Optional) The properties for this object\n                          that will be serialized. This allows callers to\n                          modify before serializing.\n\n        Returns:\n            string, a JSON representation of this instance, suitable to pass to\n            from_json().", "id": "f2472:c13:m4"}
{"signature": "def _in_gce_environment():", "body": "if SETTINGS.env_name is not None:<EOL><INDENT>return SETTINGS.env_name == '<STR_LIT>'<EOL><DEDENT>if NO_GCE_CHECK != '<STR_LIT:True>' and _detect_gce_environment():<EOL><INDENT>SETTINGS.env_name = '<STR_LIT>'<EOL>return True<EOL><DEDENT>return False<EOL>", "docstring": "Detect if the code is running in the Compute Engine environment.\n\n    Returns:\n        True if running in the GCE environment, False otherwise.", "id": "f2472:m3"}
{"signature": "def _detect_gce_environment():", "body": "<EOL>http = transport.get_http_object(timeout=GCE_METADATA_TIMEOUT)<EOL>try:<EOL><INDENT>response, _ = transport.request(<EOL>http, _GCE_METADATA_URI, headers=_GCE_HEADERS)<EOL>return (<EOL>response.status == http_client.OK and<EOL>response.get(_METADATA_FLAVOR_HEADER) == _DESIRED_METADATA_FLAVOR)<EOL><DEDENT>except socket.error:  <EOL><INDENT>logger.info('<STR_LIT>')<EOL>return False<EOL><DEDENT>", "docstring": "Determine if the current environment is Compute Engine.\n\n    Returns:\n        Boolean indicating whether or not the current environment is Google\n        Compute Engine.", "id": "f2472:m1"}
{"signature": "def create_scoped_required(self):", "body": "return False<EOL>", "docstring": "Whether this Credentials object is scopeless.\n\n        create_scoped(scopes) method needs to be called in order to create\n        a Credentials object for API calls.", "id": "f2472:c18:m1"}
{"signature": "def _oauth2_web_server_flow_params(kwargs):", "body": "params = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:code>',<EOL>}<EOL>params.update(kwargs)<EOL>approval_prompt = params.get('<STR_LIT>')<EOL>if approval_prompt is not None:<EOL><INDENT>logger.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>if approval_prompt == '<STR_LIT>':<EOL><INDENT>logger.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>params['<STR_LIT>'] = '<STR_LIT>'<EOL>del params['<STR_LIT>']<EOL><DEDENT><DEDENT>return params<EOL>", "docstring": "Configures redirect URI parameters for OAuth2WebServerFlow.", "id": "f2472:m19"}
{"signature": "def _do_refresh_request(self, http):", "body": "body = self._generate_refresh_request_body()<EOL>headers = self._generate_refresh_request_headers()<EOL>logger.info('<STR_LIT>')<EOL>resp, content = transport.request(<EOL>http, self.token_uri, method='<STR_LIT:POST>',<EOL>body=body, headers=headers)<EOL>content = _helpers._from_bytes(content)<EOL>if resp.status == http_client.OK:<EOL><INDENT>d = json.loads(content)<EOL>self.token_response = d<EOL>self.access_token = d['<STR_LIT>']<EOL>self.refresh_token = d.get('<STR_LIT>', self.refresh_token)<EOL>if '<STR_LIT>' in d:<EOL><INDENT>delta = datetime.timedelta(seconds=int(d['<STR_LIT>']))<EOL>self.token_expiry = delta + _UTCNOW()<EOL><DEDENT>else:<EOL><INDENT>self.token_expiry = None<EOL><DEDENT>if '<STR_LIT>' in d:<EOL><INDENT>self.id_token = _extract_id_token(d['<STR_LIT>'])<EOL>self.id_token_jwt = d['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>self.id_token = None<EOL>self.id_token_jwt = None<EOL><DEDENT>self.invalid = False<EOL>if self.store:<EOL><INDENT>self.store.locked_put(self)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.info('<STR_LIT>', content)<EOL>error_msg = '<STR_LIT>'.format(resp.status)<EOL>try:<EOL><INDENT>d = json.loads(content)<EOL>if '<STR_LIT:error>' in d:<EOL><INDENT>error_msg = d['<STR_LIT:error>']<EOL>if '<STR_LIT>' in d:<EOL><INDENT>error_msg += '<STR_LIT>' + d['<STR_LIT>']<EOL><DEDENT>self.invalid = True<EOL>if self.store is not None:<EOL><INDENT>self.store.locked_put(self)<EOL><DEDENT><DEDENT><DEDENT>except (TypeError, ValueError):<EOL><INDENT>pass<EOL><DEDENT>raise HttpAccessTokenRefreshError(error_msg, status=resp.status)<EOL><DEDENT>", "docstring": "Refresh the access_token using the refresh_token.\n\n        Args:\n            http: an object to be used to make HTTP requests.\n\n        Raises:\n            HttpAccessTokenRefreshError: When the refresh fails.", "id": "f2472:c16:m18"}
{"signature": "def _revoke(self, http):", "body": "self._do_revoke(http, self.refresh_token or self.access_token)<EOL>", "docstring": "Revokes this credential and deletes the stored copy (if it exists).\n\n        Args:\n            http: an object to be used to make HTTP requests.", "id": "f2472:c16:m19"}
{"signature": "@_helpers.positional(<NUM_LIT:3>)<EOL>def credentials_from_clientsecrets_and_code(filename, scope, code,<EOL>message=None,<EOL>redirect_uri='<STR_LIT>',<EOL>http=None,<EOL>cache=None,<EOL>device_uri=None):", "body": "flow = flow_from_clientsecrets(filename, scope, message=message,<EOL>cache=cache, redirect_uri=redirect_uri,<EOL>device_uri=device_uri)<EOL>credentials = flow.step2_exchange(code, http=http)<EOL>return credentials<EOL>", "docstring": "Returns OAuth2Credentials from a clientsecrets file and an auth code.\n\n    Will create the right kind of Flow based on the contents of the\n    clientsecrets file or will raise InvalidClientSecretsError for unknown\n    types of Flows.\n\n    Args:\n        filename: string, File name of clientsecrets.\n        scope: string or iterable of strings, scope(s) to request.\n        code: string, An authorization code, most likely passed down from\n              the client\n        message: string, A friendly string to display to the user if the\n                 clientsecrets file is missing or invalid. If message is\n                 provided then sys.exit will be called in the case of an error.\n                 If message in not provided then\n                 clientsecrets.InvalidClientSecretsError will be raised.\n        redirect_uri: string, this is generally set to 'postmessage' to match\n                      the redirect_uri that the client specified\n        http: httplib2.Http, optional http instance to use to do the fetch\n        cache: An optional cache service client that implements get() and set()\n               methods. See clientsecrets.loadfile() for details.\n        device_uri: string, OAuth 2.0 device authorization endpoint\n        pkce: boolean, default: False, Generate and include a \"Proof Key\n              for Code Exchange\" (PKCE) with your authorization and token\n              requests. This adds security for installed applications that\n              cannot protect a client_secret. See RFC 7636 for details.\n        code_verifier: bytestring or None, default: None, parameter passed\n                       as part of the code exchange when pkce=True. If\n                       None, a code_verifier will automatically be\n                       generated as part of step1_get_authorize_url(). See\n                       RFC 7636 for details.\n\n    Returns:\n        An OAuth2Credentials object.\n\n    Raises:\n        FlowExchangeError: if the authorization code cannot be exchanged for an\n                           access token\n        UnknownClientSecretsFlowError: if the file describes an unknown kind\n                                       of Flow.\n        clientsecrets.InvalidClientSecretsError: if the clientsecrets file is\n                                                 invalid.", "id": "f2472:m18"}
{"signature": "def __init__(self, access_token, user_agent, revoke_uri=None):", "body": "super(AccessTokenCredentials, self).__init__(<EOL>access_token,<EOL>None,<EOL>None,<EOL>None,<EOL>None,<EOL>None,<EOL>user_agent,<EOL>revoke_uri=revoke_uri)<EOL>", "docstring": "Create an instance of OAuth2Credentials\n\n        This is one of the few types if Credentials that you should contrust,\n        Credentials objects are usually instantiated by a Flow.\n\n        Args:\n            access_token: string, access token.\n            user_agent: string, The HTTP User-Agent to provide for this\n                        application.\n            revoke_uri: string, URI for revoke endpoint. Defaults to None; a\n                        token can't be revoked if this is None.", "id": "f2472:c17:m0"}
{"signature": "def _in_gae_environment():", "body": "if SETTINGS.env_name is not None:<EOL><INDENT>return SETTINGS.env_name in ('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>import google.appengine  <EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>server_software = os.environ.get(_SERVER_SOFTWARE, '<STR_LIT>')<EOL>if server_software.startswith('<STR_LIT>'):<EOL><INDENT>SETTINGS.env_name = '<STR_LIT>'<EOL>return True<EOL><DEDENT>elif server_software.startswith('<STR_LIT>'):<EOL><INDENT>SETTINGS.env_name = '<STR_LIT>'<EOL>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Detects if the code is running in the App Engine environment.\n\n    Returns:\n        True if running in the GAE environment, False otherwise.", "id": "f2472:m2"}
{"signature": "def locked_get(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Retrieve credential.\n\n        The Storage lock must be held when this is called.\n\n        Returns:\n            oauth2client.client.Credentials", "id": "f2472:c15:m3"}
{"signature": "def __init__(self, access_token, client_id, client_secret, refresh_token,<EOL>token_expiry, token_uri, user_agent,<EOL>revoke_uri=oauth2client.GOOGLE_REVOKE_URI):", "body": "super(GoogleCredentials, self).__init__(<EOL>access_token, client_id, client_secret, refresh_token,<EOL>token_expiry, token_uri, user_agent, revoke_uri=revoke_uri)<EOL>", "docstring": "Create an instance of GoogleCredentials.\n\n        This constructor is not usually called by the user, instead\n        GoogleCredentials objects are instantiated by\n        GoogleCredentials.from_stream() or\n        GoogleCredentials.get_application_default().\n\n        Args:\n            access_token: string, access token.\n            client_id: string, client identifier.\n            client_secret: string, client secret.\n            refresh_token: string, refresh token.\n            token_expiry: datetime, when the access_token expires.\n            token_uri: string, URI of token endpoint.\n            user_agent: string, The HTTP User-Agent to provide for this\n                        application.\n            revoke_uri: string, URI for revoke endpoint. Defaults to\n                        oauth2client.GOOGLE_REVOKE_URI; a token can't be\n                        revoked if this is None.", "id": "f2472:c18:m0"}
{"signature": "def apply(self, headers):", "body": "raise NotImplementedError<EOL>", "docstring": "Add the authorization to the headers.\n\n        Args:\n            headers: dict, the headers to add the Authorization header to.", "id": "f2472:c13:m3"}
{"signature": "def __init__(self, lock=None):", "body": "self._lock = lock<EOL>", "docstring": "Create a Storage instance.\n\n        Args:\n            lock: An optional threading.Lock-like object. Must implement at\n                  least acquire() and release(). Does not need to be\n                  re-entrant.", "id": "f2472:c15:m0"}
{"signature": "def get(self):", "body": "self.acquire_lock()<EOL>try:<EOL><INDENT>return self.locked_get()<EOL><DEDENT>finally:<EOL><INDENT>self.release_lock()<EOL><DEDENT>", "docstring": "Retrieve credential.\n\n        The Storage lock must *not* be held when this is called.\n\n        Returns:\n            oauth2client.client.Credentials", "id": "f2472:c15:m6"}
{"signature": "def set_store(self, store):", "body": "self.store = store<EOL>", "docstring": "Set the Storage for the credential.\n\n        Args:\n            store: Storage, an implementation of Storage object.\n                   This is needed to store the latest access_token if it\n                   has expired and been refreshed. This implementation uses\n                   locking to check for updates before updating the\n                   access_token.", "id": "f2472:c16:m10"}
{"signature": "def _do_retrieve_scopes(self, http, token):", "body": "logger.info('<STR_LIT>')<EOL>query_params = {'<STR_LIT>': token, '<STR_LIT>': '<STR_LIT>'}<EOL>token_info_uri = _helpers.update_query_params(<EOL>self.token_info_uri, query_params)<EOL>resp, content = transport.request(http, token_info_uri)<EOL>content = _helpers._from_bytes(content)<EOL>if resp.status == http_client.OK:<EOL><INDENT>d = json.loads(content)<EOL>self.scopes = set(_helpers.string_to_scopes(d.get('<STR_LIT>', '<STR_LIT>')))<EOL><DEDENT>else:<EOL><INDENT>error_msg = '<STR_LIT>'.format(resp.status)<EOL>try:<EOL><INDENT>d = json.loads(content)<EOL>if '<STR_LIT>' in d:<EOL><INDENT>error_msg = d['<STR_LIT>']<EOL><DEDENT><DEDENT>except (TypeError, ValueError):<EOL><INDENT>pass<EOL><DEDENT>raise Error(error_msg)<EOL><DEDENT>", "docstring": "Retrieves the list of authorized scopes from the OAuth2 provider.\n\n        Args:\n            http: an object to be used to make HTTP requests.\n            token: A string used as the token to identify the credentials to\n                   the provider.\n\n        Raises:\n            Error: When refresh fails, indicating the the access token is\n                   invalid.", "id": "f2472:c16:m22"}
{"signature": "def acquire_lock(self):", "body": "if self._lock is not None:<EOL><INDENT>self._lock.acquire()<EOL><DEDENT>", "docstring": "Acquires any lock necessary to access this Storage.\n\n        This lock is not reentrant.", "id": "f2472:c15:m1"}
{"signature": "def authorize(self, http):", "body": "transport.wrap_http_for_auth(self, http)<EOL>return http<EOL>", "docstring": "Authorize an httplib2.Http instance with these credentials.\n\n        The modified http.request method will add authentication headers to\n        each request and will refresh access_tokens when a 401 is received on a\n        request. In addition the http.request method has a credentials\n        property, http.request.credentials, which is the Credentials object\n        that authorized it.\n\n        Args:\n            http: An instance of ``httplib2.Http`` or something that acts\n                  like it.\n\n        Returns:\n            A modified instance of http that was passed in.\n\n        Example::\n\n            h = httplib2.Http()\n            h = credentials.authorize(h)\n\n        You can't create a new OAuth subclass of httplib2.Authentication\n        because it never gets passed the absolute URI, which is needed for\n        signing. So instead we have to overload 'request' with a closure\n        that adds in the Authorization header and then calls the original\n        version of 'request()'.", "id": "f2472:c16:m1"}
{"signature": "def release_lock(self):", "body": "if self._lock is not None:<EOL><INDENT>self._lock.release()<EOL><DEDENT>", "docstring": "Release the Storage lock.\n\n        Trying to release a lock that isn't held will result in a\n        RuntimeError in the case of a threading.Lock or multiprocessing.Lock.", "id": "f2472:c15:m2"}
{"signature": "def create_scoped(self, scopes):", "body": "return self<EOL>", "docstring": "Create a Credentials object for the given scopes.\n\n        The Credentials type is preserved.", "id": "f2472:c18:m2"}
{"signature": "def sign_blob(self, blob):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Cryptographically sign a blob (of bytes).\n\n        Args:\n            blob: bytes, Message to be signed.\n\n        Returns:\n            tuple, A pair of the private key ID used to sign the blob and\n            the signed contents.", "id": "f2472:c19:m4"}
{"signature": "def revoke(self, http):", "body": "raise NotImplementedError<EOL>", "docstring": "Revokes a refresh_token and makes the credentials void.\n\n        Args:\n            http: httplib2.Http, an http object to be used to make the revoke\n                  request.", "id": "f2472:c13:m2"}
{"signature": "def to_json(self):", "body": "return self._to_json(self.NON_SERIALIZED_MEMBERS)<EOL>", "docstring": "Creating a JSON representation of an instance of Credentials.\n\n        Returns:\n            string, a JSON representation of this instance, suitable to pass to\n            from_json().", "id": "f2472:c13:m5"}
{"signature": "@_helpers.positional(<NUM_LIT:2>)<EOL>def verify_id_token(id_token, audience, http=None,<EOL>cert_uri=ID_TOKEN_VERIFICATION_CERTS):", "body": "_require_crypto_or_die()<EOL>if http is None:<EOL><INDENT>http = transport.get_cached_http()<EOL><DEDENT>resp, content = transport.request(http, cert_uri)<EOL>if resp.status == http_client.OK:<EOL><INDENT>certs = json.loads(_helpers._from_bytes(content))<EOL>return crypt.verify_signed_jwt_with_certs(id_token, certs, audience)<EOL><DEDENT>else:<EOL><INDENT>raise VerifyJwtTokenError('<STR_LIT>'.format(resp.status))<EOL><DEDENT>", "docstring": "Verifies a signed JWT id_token.\n\n    This function requires PyOpenSSL and because of that it does not work on\n    App Engine.\n\n    Args:\n        id_token: string, A Signed JWT.\n        audience: string, The audience 'aud' that the token should be for.\n        http: httplib2.Http, instance to use to make the HTTP request. Callers\n              should supply an instance that has caching enabled.\n        cert_uri: string, URI of the certificates in JSON format to\n                  verify the JWT against.\n\n    Returns:\n        The deserialized JSON in the JWT.\n\n    Raises:\n        oauth2client.crypt.AppIdentityError: if the JWT fails to verify.\n        CryptoUnavailableError: if no crypto library is available.", "id": "f2472:m14"}
{"signature": "@_helpers.positional(<NUM_LIT:1>)<EOL><INDENT>def step1_get_device_and_user_codes(self, http=None):<DEDENT>", "body": "if self.device_uri is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>body = urllib.parse.urlencode({<EOL>'<STR_LIT>': self.client_id,<EOL>'<STR_LIT>': self.scope,<EOL>})<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}<EOL>if self.user_agent is not None:<EOL><INDENT>headers['<STR_LIT>'] = self.user_agent<EOL><DEDENT>if http is None:<EOL><INDENT>http = transport.get_http_object()<EOL><DEDENT>resp, content = transport.request(<EOL>http, self.device_uri, method='<STR_LIT:POST>', body=body, headers=headers)<EOL>content = _helpers._from_bytes(content)<EOL>if resp.status == http_client.OK:<EOL><INDENT>try:<EOL><INDENT>flow_info = json.loads(content)<EOL><DEDENT>except ValueError as exc:<EOL><INDENT>raise OAuth2DeviceCodeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(content, exc))<EOL><DEDENT>return DeviceFlowInfo.FromResponse(flow_info)<EOL><DEDENT>else:<EOL><INDENT>error_msg = '<STR_LIT>'.format(resp.status)<EOL>try:<EOL><INDENT>error_dict = json.loads(content)<EOL>if '<STR_LIT:error>' in error_dict:<EOL><INDENT>error_msg += '<STR_LIT>'.format(error_dict['<STR_LIT:error>'])<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>raise OAuth2DeviceCodeError(error_msg)<EOL><DEDENT>", "docstring": "Returns a user code and the verification URL where to enter it\n\n        Returns:\n            A user code as a string for the user to authorize the application\n            An URL as a string where the user has to enter the code", "id": "f2472:c21:m2"}
{"signature": "def wrap_http_for_auth(credentials, http):", "body": "orig_request_method = http.request<EOL>def new_request(uri, method='<STR_LIT:GET>', body=None, headers=None,<EOL>redirections=httplib2.DEFAULT_MAX_REDIRECTS,<EOL>connection_type=None):<EOL><INDENT>if not credentials.access_token:<EOL><INDENT>_LOGGER.info('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>credentials._refresh(orig_request_method)<EOL><DEDENT>headers = _initialize_headers(headers)<EOL>credentials.apply(headers)<EOL>_apply_user_agent(headers, credentials.user_agent)<EOL>body_stream_position = None<EOL>if all(getattr(body, stream_prop, None) for stream_prop in<EOL>_STREAM_PROPERTIES):<EOL><INDENT>body_stream_position = body.tell()<EOL><DEDENT>resp, content = request(orig_request_method, uri, method, body,<EOL>clean_headers(headers),<EOL>redirections, connection_type)<EOL>max_refresh_attempts = <NUM_LIT:2><EOL>for refresh_attempt in range(max_refresh_attempts):<EOL><INDENT>if resp.status not in REFRESH_STATUS_CODES:<EOL><INDENT>break<EOL><DEDENT>_LOGGER.info('<STR_LIT>',<EOL>resp.status, refresh_attempt + <NUM_LIT:1>,<EOL>max_refresh_attempts)<EOL>credentials._refresh(orig_request_method)<EOL>credentials.apply(headers)<EOL>if body_stream_position is not None:<EOL><INDENT>body.seek(body_stream_position)<EOL><DEDENT>resp, content = request(orig_request_method, uri, method, body,<EOL>clean_headers(headers),<EOL>redirections, connection_type)<EOL><DEDENT>return resp, content<EOL><DEDENT>http.request = new_request<EOL>http.request.credentials = credentials<EOL>", "docstring": "Prepares an HTTP object's request method for auth.\n\n    Wraps HTTP requests with logic to catch auth failures (typically\n    identified via a 401 status code). In the event of failure, tries\n    to refresh the token used and then retry the original request.\n\n    Args:\n        credentials: Credentials, the credentials used to identify\n                     the authenticated user.\n        http: httplib2.Http, an http object to be used to make\n              auth requests.", "id": "f2474:m5"}
{"signature": "def get_http_object(*args, **kwargs):", "body": "return httplib2.Http(*args, **kwargs)<EOL>", "docstring": "Return a new HTTP object.\n\n    Args:\n        *args: tuple, The positional arguments to be passed when\n               contructing a new HTTP object.\n        **kwargs: dict, The keyword arguments to be passed when\n                  contructing a new HTTP object.\n\n    Returns:\n        httplib2.Http, an HTTP object.", "id": "f2474:m1"}
{"signature": "def request(http, uri, method='<STR_LIT:GET>', body=None, headers=None,<EOL>redirections=httplib2.DEFAULT_MAX_REDIRECTS,<EOL>connection_type=None):", "body": "<EOL>http_callable = getattr(http, '<STR_LIT>', http)<EOL>return http_callable(uri, method=method, body=body, headers=headers,<EOL>redirections=redirections,<EOL>connection_type=connection_type)<EOL>", "docstring": "Make an HTTP request with an HTTP object and arguments.\n\n    Args:\n        http: httplib2.Http, an http object to be used to make requests.\n        uri: string, The URI to be requested.\n        method: string, The HTTP method to use for the request. Defaults\n                to 'GET'.\n        body: string, The payload / body in HTTP request. By default\n              there is no payload.\n        headers: dict, Key-value pairs of request headers. By default\n                 there are no headers.\n        redirections: int, The number of allowed 203 redirects for\n                      the request. Defaults to 5.\n        connection_type: httplib.HTTPConnection, a subclass to be used for\n                         establishing connection. If not set, the type\n                         will be determined from the ``uri``.\n\n    Returns:\n        tuple, a pair of a httplib2.Response with the status code and other\n        headers and the bytes of the content returned.", "id": "f2474:m7"}
{"signature": "def clean_headers(headers):", "body": "clean = {}<EOL>try:<EOL><INDENT>for k, v in six.iteritems(headers):<EOL><INDENT>if not isinstance(k, six.binary_type):<EOL><INDENT>k = str(k)<EOL><DEDENT>if not isinstance(v, six.binary_type):<EOL><INDENT>v = str(v)<EOL><DEDENT>clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v)<EOL><DEDENT><DEDENT>except UnicodeEncodeError:<EOL><INDENT>from oauth2client.client import NonAsciiHeaderError<EOL>raise NonAsciiHeaderError(k, '<STR_LIT>', v)<EOL><DEDENT>return clean<EOL>", "docstring": "Forces header keys and values to be strings, i.e not unicode.\n\n    The httplib module just concats the header keys and values in a way that\n    may make the message header a unicode string, which, if it then tries to\n    contatenate to a binary request body may result in a unicode decode error.\n\n    Args:\n        headers: dict, A dictionary of headers.\n\n    Returns:\n        The same dictionary but with all the keys converted to strings.", "id": "f2474:m4"}
{"signature": "def wrap_http_for_jwt_access(credentials, http):", "body": "orig_request_method = http.request<EOL>wrap_http_for_auth(credentials, http)<EOL>authenticated_request_method = http.request<EOL>def new_request(uri, method='<STR_LIT:GET>', body=None, headers=None,<EOL>redirections=httplib2.DEFAULT_MAX_REDIRECTS,<EOL>connection_type=None):<EOL><INDENT>if '<STR_LIT>' in credentials._kwargs:<EOL><INDENT>if (credentials.access_token is None or<EOL>credentials.access_token_expired):<EOL><INDENT>credentials.refresh(None)<EOL><DEDENT>return request(authenticated_request_method, uri,<EOL>method, body, headers, redirections,<EOL>connection_type)<EOL><DEDENT>else:<EOL><INDENT>headers = _initialize_headers(headers)<EOL>_apply_user_agent(headers, credentials.user_agent)<EOL>uri_root = uri.split('<STR_LIT:?>', <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>token, unused_expiry = credentials._create_token({'<STR_LIT>': uri_root})<EOL>headers['<STR_LIT>'] = '<STR_LIT>' + token<EOL>return request(orig_request_method, uri, method, body,<EOL>clean_headers(headers),<EOL>redirections, connection_type)<EOL><DEDENT><DEDENT>http.request = new_request<EOL>http.request.credentials = credentials<EOL>", "docstring": "Prepares an HTTP object's request method for JWT access.\n\n    Wraps HTTP requests with logic to catch auth failures (typically\n    identified via a 401 status code). In the event of failure, tries\n    to refresh the token used and then retry the original request.\n\n    Args:\n        credentials: _JWTAccessCredentials, the credentials used to identify\n                     a service account that uses JWT access tokens.\n        http: httplib2.Http, an http object to be used to make\n              auth requests.", "id": "f2474:m6"}
{"signature": "def _apply_user_agent(headers, user_agent):", "body": "if user_agent is not None:<EOL><INDENT>if '<STR_LIT>' in headers:<EOL><INDENT>headers['<STR_LIT>'] = (user_agent + '<STR_LIT:U+0020>' + headers['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>headers['<STR_LIT>'] = user_agent<EOL><DEDENT><DEDENT>return headers<EOL>", "docstring": "Adds a user-agent to the headers.\n\n    Args:\n        headers: dict, request headers to add / modify user\n                 agent within.\n        user_agent: str, the user agent to add.\n\n    Returns:\n        dict, the original headers passed in, but modified if the\n        user agent is not None.", "id": "f2474:m3"}
{"signature": "def _create_file_if_needed(self):", "body": "if not os.path.exists(self._filename):<EOL><INDENT>old_umask = os.umask(<NUM_LIT>)<EOL>try:<EOL><INDENT>open(self._filename, '<STR_LIT>').close()<EOL><DEDENT>finally:<EOL><INDENT>os.umask(old_umask)<EOL><DEDENT><DEDENT>", "docstring": "Create an empty file if necessary.\n\n        This method will not initialize the file. Instead it implements a\n        simple version of \"touch\" to ensure the file has been created.", "id": "f2475:c0:m2"}
{"signature": "def locked_delete(self):", "body": "os.unlink(self._filename)<EOL>", "docstring": "Delete Credentials file.\n\n        Args:\n            credentials: Credentials, the credentials to store.", "id": "f2475:c0:m4"}
{"signature": "def do_GET(self):", "body": "self.send_response(http_client.OK)<EOL>self.send_header('<STR_LIT>', '<STR_LIT>')<EOL>self.end_headers()<EOL>parts = urllib.parse.urlparse(self.path)<EOL>query = _helpers.parse_unique_urlencoded(parts.query)<EOL>self.server.query_params = query<EOL>self.wfile.write(<EOL>b'<STR_LIT>')<EOL>self.wfile.write(<EOL>b'<STR_LIT>')<EOL>self.wfile.write(b'<STR_LIT>')<EOL>", "docstring": "Handle a GET request.\n\n        Parses the query parameters and prints a message\n        if the flow has completed. Note that we can't detect\n        if an error occurred.", "id": "f2476:c1:m0"}
{"signature": "@_helpers.positional(<NUM_LIT:3>)<EOL>def run_flow(flow, storage, flags=None, http=None):", "body": "if flags is None:<EOL><INDENT>flags = argparser.parse_args()<EOL><DEDENT>logging.getLogger().setLevel(getattr(logging, flags.logging_level))<EOL>if not flags.noauth_local_webserver:<EOL><INDENT>success = False<EOL>port_number = <NUM_LIT:0><EOL>for port in flags.auth_host_port:<EOL><INDENT>port_number = port<EOL>try:<EOL><INDENT>httpd = ClientRedirectServer((flags.auth_host_name, port),<EOL>ClientRedirectHandler)<EOL><DEDENT>except socket.error:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>success = True<EOL>break<EOL><DEDENT><DEDENT>flags.noauth_local_webserver = not success<EOL>if not success:<EOL><INDENT>print(_FAILED_START_MESSAGE)<EOL><DEDENT><DEDENT>if not flags.noauth_local_webserver:<EOL><INDENT>oauth_callback = '<STR_LIT>'.format(<EOL>host=flags.auth_host_name, port=port_number)<EOL><DEDENT>else:<EOL><INDENT>oauth_callback = client.OOB_CALLBACK_URN<EOL><DEDENT>flow.redirect_uri = oauth_callback<EOL>authorize_url = flow.step1_get_authorize_url()<EOL>if not flags.noauth_local_webserver:<EOL><INDENT>import webbrowser<EOL>webbrowser.open(authorize_url, new=<NUM_LIT:1>, autoraise=True)<EOL>print(_BROWSER_OPENED_MESSAGE.format(address=authorize_url))<EOL><DEDENT>else:<EOL><INDENT>print(_GO_TO_LINK_MESSAGE.format(address=authorize_url))<EOL><DEDENT>code = None<EOL>if not flags.noauth_local_webserver:<EOL><INDENT>httpd.handle_request()<EOL>if '<STR_LIT:error>' in httpd.query_params:<EOL><INDENT>sys.exit('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT:code>' in httpd.query_params:<EOL><INDENT>code = httpd.query_params['<STR_LIT:code>']<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>sys.exit('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>code = input('<STR_LIT>').strip()<EOL><DEDENT>try:<EOL><INDENT>credential = flow.step2_exchange(code, http=http)<EOL><DEDENT>except client.FlowExchangeError as e:<EOL><INDENT>sys.exit('<STR_LIT>'.format(e))<EOL><DEDENT>storage.put(credential)<EOL>credential.set_store(storage)<EOL>print('<STR_LIT>')<EOL>return credential<EOL>", "docstring": "Core code for a command-line application.\n\n    The ``run()`` function is called from your application and runs\n    through all the steps to obtain credentials. It takes a ``Flow``\n    argument and attempts to open an authorization server page in the\n    user's default web browser. The server asks the user to grant your\n    application access to the user's data. If the user grants access,\n    the ``run()`` function returns new credentials. The new credentials\n    are also stored in the ``storage`` argument, which updates the file\n    associated with the ``Storage`` object.\n\n    It presumes it is run from a command-line application and supports the\n    following flags:\n\n        ``--auth_host_name`` (string, default: ``localhost``)\n           Host name to use when running a local web server to handle\n           redirects during OAuth authorization.\n\n        ``--auth_host_port`` (integer, default: ``[8080, 8090]``)\n           Port to use when running a local web server to handle redirects\n           during OAuth authorization. Repeat this option to specify a list\n           of values.\n\n        ``--[no]auth_local_webserver`` (boolean, default: ``True``)\n           Run a local web server to handle redirects during OAuth\n           authorization.\n\n    The tools module defines an ``ArgumentParser`` the already contains the\n    flag definitions that ``run()`` requires. You can pass that\n    ``ArgumentParser`` to your ``ArgumentParser`` constructor::\n\n        parser = argparse.ArgumentParser(\n            description=__doc__,\n            formatter_class=argparse.RawDescriptionHelpFormatter,\n            parents=[tools.argparser])\n        flags = parser.parse_args(argv)\n\n    Args:\n        flow: Flow, an OAuth 2.0 Flow to step through.\n        storage: Storage, a ``Storage`` to store the credential in.\n        flags: ``argparse.Namespace``, (Optional) The command-line flags. This\n               is the object returned from calling ``parse_args()`` on\n               ``argparse.ArgumentParser`` as described above. Defaults\n               to ``argparser.parse_args()``.\n        http: An instance of ``httplib2.Http.request`` or something that\n              acts like it.\n\n    Returns:\n        Credentials, the obtained credential.", "id": "f2476:m1"}
{"signature": "def revoke(self, http):", "body": "pass<EOL>", "docstring": "Cannot revoke JWTAccessCredentials tokens.", "id": "f2477:c1:m3"}
{"signature": "@classmethod<EOL><INDENT>def _from_p12_keyfile_contents(cls, service_account_email,<EOL>private_key_pkcs12,<EOL>private_key_password=None, scopes='<STR_LIT>',<EOL>token_uri=oauth2client.GOOGLE_TOKEN_URI,<EOL>revoke_uri=oauth2client.GOOGLE_REVOKE_URI):<DEDENT>", "body": "if private_key_password is None:<EOL><INDENT>private_key_password = _PASSWORD_DEFAULT<EOL><DEDENT>if crypt.Signer is not crypt.OpenSSLSigner:<EOL><INDENT>raise NotImplementedError(_PKCS12_ERROR)<EOL><DEDENT>signer = crypt.Signer.from_string(private_key_pkcs12,<EOL>private_key_password)<EOL>credentials = cls(service_account_email, signer, scopes=scopes,<EOL>token_uri=token_uri, revoke_uri=revoke_uri)<EOL>credentials._private_key_pkcs12 = private_key_pkcs12<EOL>credentials._private_key_password = private_key_password<EOL>return credentials<EOL>", "docstring": "Factory constructor from JSON keyfile.\n\n        Args:\n            service_account_email: string, The email associated with the\n                                   service account.\n            private_key_pkcs12: string, The contents of a PKCS#12 keyfile.\n            private_key_password: string, (Optional) Password for PKCS#12\n                                  private key. Defaults to ``notasecret``.\n            scopes: List or string, (Optional) Scopes to use when acquiring an\n                    access token.\n            token_uri: string, URI for token endpoint. For convenience defaults\n                       to Google's endpoints but any OAuth 2.0 provider can be\n                       used.\n            revoke_uri: string, URI for revoke endpoint. For convenience\n                        defaults to Google's endpoints but any OAuth 2.0\n                        provider can be used.\n\n        Returns:\n            ServiceAccountCredentials, a credentials object created from\n            the keyfile.\n\n        Raises:\n            NotImplementedError if pyOpenSSL is not installed / not the\n            active crypto library.", "id": "f2477:c0:m5"}
{"signature": "def get_access_token(self, http=None, additional_claims=None):", "body": "if additional_claims is None:<EOL><INDENT>if self.access_token is None or self.access_token_expired:<EOL><INDENT>self.refresh(None)<EOL><DEDENT>return client.AccessTokenInfo(<EOL>access_token=self.access_token, expires_in=self._expires_in())<EOL><DEDENT>else:<EOL><INDENT>token, unused_expiry = self._create_token(additional_claims)<EOL>return client.AccessTokenInfo(<EOL>access_token=token, expires_in=self._MAX_TOKEN_LIFETIME_SECS)<EOL><DEDENT>", "docstring": "Create a signed jwt.\n\n        Args:\n            http: unused\n            additional_claims: dict, additional claims to add to\n                the payload of the JWT.\n        Returns:\n            An AccessTokenInfo with the signed jwt", "id": "f2477:c1:m2"}
{"signature": "@classmethod<EOL><INDENT>def from_p12_keyfile(cls, service_account_email, filename,<EOL>private_key_password=None, scopes='<STR_LIT>',<EOL>token_uri=oauth2client.GOOGLE_TOKEN_URI,<EOL>revoke_uri=oauth2client.GOOGLE_REVOKE_URI):<DEDENT>", "body": "with open(filename, '<STR_LIT:rb>') as file_obj:<EOL><INDENT>private_key_pkcs12 = file_obj.read()<EOL><DEDENT>return cls._from_p12_keyfile_contents(<EOL>service_account_email, private_key_pkcs12,<EOL>private_key_password=private_key_password, scopes=scopes,<EOL>token_uri=token_uri, revoke_uri=revoke_uri)<EOL>", "docstring": "Factory constructor from JSON keyfile.\n\n        Args:\n            service_account_email: string, The email associated with the\n                                   service account.\n            filename: string, The location of the PKCS#12 keyfile.\n            private_key_password: string, (Optional) Password for PKCS#12\n                                  private key. Defaults to ``notasecret``.\n            scopes: List or string, (Optional) Scopes to use when acquiring an\n                    access token.\n            token_uri: string, URI for token endpoint. For convenience defaults\n                       to Google's endpoints but any OAuth 2.0 provider can be\n                       used.\n            revoke_uri: string, URI for revoke endpoint. For convenience\n                        defaults to Google's endpoints but any OAuth 2.0\n                        provider can be used.\n\n        Returns:\n            ServiceAccountCredentials, a credentials object created from\n            the keyfile.\n\n        Raises:\n            NotImplementedError if pyOpenSSL is not installed / not the\n            active crypto library.", "id": "f2477:c0:m6"}
{"signature": "@classmethod<EOL><INDENT>def _from_parsed_json_keyfile(cls, keyfile_dict, scopes,<EOL>token_uri=None, revoke_uri=None):<DEDENT>", "body": "creds_type = keyfile_dict.get('<STR_LIT:type>')<EOL>if creds_type != client.SERVICE_ACCOUNT:<EOL><INDENT>raise ValueError('<STR_LIT>', creds_type,<EOL>'<STR_LIT>', client.SERVICE_ACCOUNT)<EOL><DEDENT>service_account_email = keyfile_dict['<STR_LIT>']<EOL>private_key_pkcs8_pem = keyfile_dict['<STR_LIT>']<EOL>private_key_id = keyfile_dict['<STR_LIT>']<EOL>client_id = keyfile_dict['<STR_LIT>']<EOL>if not token_uri:<EOL><INDENT>token_uri = keyfile_dict.get('<STR_LIT>',<EOL>oauth2client.GOOGLE_TOKEN_URI)<EOL><DEDENT>if not revoke_uri:<EOL><INDENT>revoke_uri = keyfile_dict.get('<STR_LIT>',<EOL>oauth2client.GOOGLE_REVOKE_URI)<EOL><DEDENT>signer = crypt.Signer.from_string(private_key_pkcs8_pem)<EOL>credentials = cls(service_account_email, signer, scopes=scopes,<EOL>private_key_id=private_key_id,<EOL>client_id=client_id, token_uri=token_uri,<EOL>revoke_uri=revoke_uri)<EOL>credentials._private_key_pkcs8_pem = private_key_pkcs8_pem<EOL>return credentials<EOL>", "docstring": "Helper for factory constructors from JSON keyfile.\n\n        Args:\n            keyfile_dict: dict-like object, The parsed dictionary-like object\n                          containing the contents of the JSON keyfile.\n            scopes: List or string, Scopes to use when acquiring an\n                    access token.\n            token_uri: string, URI for OAuth 2.0 provider token endpoint.\n                       If unset and not present in keyfile_dict, defaults\n                       to Google's endpoints.\n            revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.\n                       If unset and not present in keyfile_dict, defaults\n                       to Google's endpoints.\n\n        Returns:\n            ServiceAccountCredentials, a credentials object created from\n            the keyfile contents.\n\n        Raises:\n            ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.\n            KeyError, if one of the expected keys is not present in\n                the keyfile.", "id": "f2477:c0:m2"}
{"signature": "@classmethod<EOL><INDENT>def from_json_keyfile_dict(cls, keyfile_dict, scopes='<STR_LIT>',<EOL>token_uri=None, revoke_uri=None):<DEDENT>", "body": "return cls._from_parsed_json_keyfile(keyfile_dict, scopes,<EOL>token_uri=token_uri,<EOL>revoke_uri=revoke_uri)<EOL>", "docstring": "Factory constructor from parsed JSON keyfile.\n\n        Args:\n            keyfile_dict: dict-like object, The parsed dictionary-like object\n                          containing the contents of the JSON keyfile.\n            scopes: List or string, (Optional) Scopes to use when acquiring an\n                    access token.\n            token_uri: string, URI for OAuth 2.0 provider token endpoint.\n                       If unset and not present in keyfile_dict, defaults\n                       to Google's endpoints.\n            revoke_uri: string, URI for OAuth 2.0 provider revoke endpoint.\n                       If unset and not present in keyfile_dict, defaults\n                       to Google's endpoints.\n\n        Returns:\n            ServiceAccountCredentials, a credentials object created from\n            the keyfile.\n\n        Raises:\n            ValueError, if the credential type is not :data:`SERVICE_ACCOUNT`.\n            KeyError, if one of the expected keys is not present in\n                the keyfile.", "id": "f2477:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def from_p12_keyfile_buffer(cls, service_account_email, file_buffer,<EOL>private_key_password=None, scopes='<STR_LIT>',<EOL>token_uri=oauth2client.GOOGLE_TOKEN_URI,<EOL>revoke_uri=oauth2client.GOOGLE_REVOKE_URI):<DEDENT>", "body": "private_key_pkcs12 = file_buffer.read()<EOL>return cls._from_p12_keyfile_contents(<EOL>service_account_email, private_key_pkcs12,<EOL>private_key_password=private_key_password, scopes=scopes,<EOL>token_uri=token_uri, revoke_uri=revoke_uri)<EOL>", "docstring": "Factory constructor from JSON keyfile.\n\n        Args:\n            service_account_email: string, The email associated with the\n                                   service account.\n            file_buffer: stream, A buffer that implements ``read()``\n                         and contains the PKCS#12 key contents.\n            private_key_password: string, (Optional) Password for PKCS#12\n                                  private key. Defaults to ``notasecret``.\n            scopes: List or string, (Optional) Scopes to use when acquiring an\n                    access token.\n            token_uri: string, URI for token endpoint. For convenience defaults\n                       to Google's endpoints but any OAuth 2.0 provider can be\n                       used.\n            revoke_uri: string, URI for revoke endpoint. For convenience\n                        defaults to Google's endpoints but any OAuth 2.0\n                        provider can be used.\n\n        Returns:\n            ServiceAccountCredentials, a credentials object created from\n            the keyfile.\n\n        Raises:\n            NotImplementedError if pyOpenSSL is not installed / not the\n            active crypto library.", "id": "f2477:c0:m7"}
{"signature": "def create_delegated(self, sub):", "body": "return self.create_with_claims({'<STR_LIT>': sub})<EOL>", "docstring": "Create credentials that act as domain-wide delegation of authority.\n\n        Use the ``sub`` parameter as the subject to delegate on behalf of\n        that user.\n\n        For example::\n\n          >>> account_sub = 'foo@email.com'\n          >>> delegate_creds = creds.create_delegated(account_sub)\n\n        Args:\n            sub: string, An email address that this service account will\n                 act on behalf of (via domain-wide delegation).\n\n        Returns:\n            ServiceAccountCredentials, a copy of the current service account\n            updated to act on behalf of ``sub``.", "id": "f2477:c0:m16"}
{"signature": "def discover_modules(self):", "body": "modules = [self.package_name]<EOL>for dirpath, dirnames, filenames in os.walk(self.root_path):<EOL><INDENT>root_uri = self._path2uri(os.path.join(self.root_path,<EOL>dirpath))<EOL>filenames = [f[:-<NUM_LIT:3>] for f in filenames if<EOL>f.endswith('<STR_LIT>') and not f.startswith('<STR_LIT>')]<EOL>for filename in filenames:<EOL><INDENT>package_uri = '<STR_LIT:/>'.join((dirpath, filename))<EOL><DEDENT>for subpkg_name in dirnames + filenames:<EOL><INDENT>package_uri = '<STR_LIT:.>'.join((root_uri, subpkg_name))<EOL>package_path = self._uri2path(package_uri)<EOL>if (package_path and<EOL>self._survives_exclude(package_uri, '<STR_LIT>')):<EOL><INDENT>modules.append(package_uri)<EOL><DEDENT><DEDENT><DEDENT>return sorted(modules)<EOL>", "docstring": "Return module sequence discovered from ``self.package_name``\n\n\n        Parameters\n        ----------\n        None\n\n        Returns\n        -------\n        mods : sequence\n            Sequence of module names within ``self.package_name``\n\n        Examples\n        --------\n        >>> dw = ApiDocWriter('sphinx')\n        >>> mods = dw.discover_modules()\n        >>> 'sphinx.util' in mods\n        True\n        >>> dw.package_skip_patterns.append('\\.util$')\n        >>> 'sphinx.util' in dw.discover_modules()\n        False\n        >>>", "id": "f2494:c0:m12"}
{"signature": "def _get_object_name(self, line):", "body": "name = line.split()[<NUM_LIT:1>].split('<STR_LIT:(>')[<NUM_LIT:0>].strip()<EOL>return name.rstrip('<STR_LIT::>')<EOL>", "docstring": "Get second token in line\n        >>> docwriter = ApiDocWriter('sphinx')\n        >>> docwriter._get_object_name(\"  def func():  \")\n        'func'\n        >>> docwriter._get_object_name(\"  class Klass(object):  \")\n        'Klass'\n        >>> docwriter._get_object_name(\"  class Klass:  \")\n        'Klass'", "id": "f2494:c0:m4"}
{"signature": "def _parse_lines(self, linesource):", "body": "functions = []<EOL>classes = []<EOL>for line in linesource:<EOL><INDENT>if line.startswith('<STR_LIT>') and line.count('<STR_LIT:(>'):<EOL><INDENT>name = self._get_object_name(line)<EOL>if not name.startswith('<STR_LIT:_>'):<EOL><INDENT>functions.append(name)<EOL><DEDENT><DEDENT>elif line.startswith('<STR_LIT>'):<EOL><INDENT>name = self._get_object_name(line)<EOL>if not name.startswith('<STR_LIT:_>'):<EOL><INDENT>classes.append(name)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>functions.sort()<EOL>classes.sort()<EOL>return functions, classes<EOL>", "docstring": "Parse lines of text for functions and classes", "id": "f2494:c0:m9"}
{"signature": "def write_index(self, outdir, froot='<STR_LIT>', relative_to=None):", "body": "if self.written_modules is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>path = os.path.join(outdir, froot+self.rst_extension)<EOL>if relative_to is not None:<EOL><INDENT>relpath = (outdir + os.path.sep).replace(relative_to + os.path.sep, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>relpath = outdir<EOL><DEDENT>idx = open(path,'<STR_LIT>')<EOL>w = idx.write<EOL>w('<STR_LIT>')<EOL>title = \"<STR_LIT>\"<EOL>w(title + \"<STR_LIT:\\n>\")<EOL>w(\"<STR_LIT:=>\" * len(title) + \"<STR_LIT>\")<EOL>w('<STR_LIT>')<EOL>for f in self.written_modules:<EOL><INDENT>w('<STR_LIT>' % os.path.join(relpath,f))<EOL><DEDENT>idx.close()<EOL>", "docstring": "Make a reST API index file from written files\n\n        Parameters\n        ----------\n        path : string\n            Filename to write index to\n        outdir : string\n            Directory to which to write generated index file\n        froot : string, optional\n            root (filename without extension) of filename to write to\n            Defaults to 'gen'.  We add ``self.rst_extension``.\n        relative_to : string\n            path to which written filenames are relative.  This\n            component of the written file path will be removed from\n            outdir, in the generated index.  Default is None, meaning,\n            leave path as it is.", "id": "f2494:c0:m15"}
{"signature": "def _import(self, name):", "body": "mod = __import__(name)<EOL>components = name.split('<STR_LIT:.>')<EOL>for comp in components[<NUM_LIT:1>:]:<EOL><INDENT>mod = getattr(mod, comp)<EOL><DEDENT>return mod<EOL>", "docstring": "Import namespace package", "id": "f2494:c0:m3"}
{"signature": "def set_package_name(self, package_name):", "body": "<EOL>self._package_name = package_name<EOL>root_module = self._import(package_name)<EOL>self.root_path = root_module.__path__[-<NUM_LIT:1>]<EOL>self.written_modules = None<EOL>", "docstring": "Set package_name\n\n        >>> docwriter = ApiDocWriter('sphinx')\n        >>> import sphinx\n        >>> docwriter.root_path == sphinx.__path__[0]\n        True\n        >>> docwriter.package_name = 'docutils'\n        >>> import docutils\n        >>> docwriter.root_path == docutils.__path__[0]\n        True", "id": "f2494:c0:m2"}
{"signature": "def _bias_correction(V_IJ, inbag, pred_centered, n_trees):", "body": "n_train_samples = inbag.shape[<NUM_LIT:0>]<EOL>n_var = np.mean(np.square(inbag[<NUM_LIT:0>:n_trees]).mean(axis=<NUM_LIT:1>).T.view() -<EOL>np.square(inbag[<NUM_LIT:0>:n_trees].mean(axis=<NUM_LIT:1>)).T.view())<EOL>boot_var = np.square(pred_centered).sum(axis=<NUM_LIT:1>) / n_trees<EOL>bias_correction = n_train_samples * n_var * boot_var / n_trees<EOL>V_IJ_unbiased = V_IJ - bias_correction<EOL>return V_IJ_unbiased<EOL>", "docstring": "Helper functions that implements bias correction\n\nParameters\n----------\nV_IJ : ndarray\n    Intermediate result in the computation.\n\ninbag : ndarray\n    The inbag matrix that fit the data. If set to `None` (default) it\n    will be inferred from the forest. However, this only works for trees\n    for which bootstrapping was set to `True`. That is, if sampling was\n    done with replacement. Otherwise, users need to provide their own\n    inbag matrix.\n\npred_centered : ndarray\n    Centered predictions that are an intermediate result in the\n    computation.\n\nn_trees : int\n    The number of trees in the forest object.", "id": "f2501:m2"}
{"signature": "def calc_inbag(n_samples, forest):", "body": "if not forest.bootstrap:<EOL><INDENT>e_s = \"<STR_LIT>\"<EOL>e_s = \"<STR_LIT>\"<EOL>raise ValueError(e_s)<EOL><DEDENT>n_trees = forest.n_estimators<EOL>inbag = np.zeros((n_samples, n_trees))<EOL>sample_idx = []<EOL>for t_idx in range(n_trees):<EOL><INDENT>sample_idx.append(<EOL>_generate_sample_indices(forest.estimators_[t_idx].random_state,<EOL>n_samples))<EOL>inbag[:, t_idx] = np.bincount(sample_idx[-<NUM_LIT:1>], minlength=n_samples)<EOL><DEDENT>return inbag<EOL>", "docstring": "Derive samples used to create trees in scikit-learn RandomForest objects.\n\nRecovers the samples in each tree from the random state of that tree using\n:func:`forest._generate_sample_indices`.\n\nParameters\n----------\nn_samples : int\n    The number of samples used to fit the scikit-learn RandomForest object.\n\nforest : RandomForest\n    Regressor or Classifier object that is already fit by scikit-learn.\n\nReturns\n-------\nArray that records how many times a data point was placed in a tree.\nColumns are individual trees. Rows are the number of times a sample was\nused in a tree.", "id": "f2501:m0"}
{"signature": "def gbayes(x0, g_est, sigma):", "body": "Kx = norm().pdf((g_est[<NUM_LIT:0>] - x0) / sigma)<EOL>post = Kx * g_est[<NUM_LIT:1>]<EOL>post /= sum(post)<EOL>return sum(post * g_est[<NUM_LIT:0>])<EOL>", "docstring": "Estimate Bayes posterior with Gaussian noise [Efron2014]_.\n\nParameters\n----------\nx0: ndarray\n    an observation\ng_est: float\n    a prior density, as returned by gfit\nsigma: int\n    noise estimate\n\nReturns\n-------\nAn array of the posterior estimate E[mu | x0]", "id": "f2502:m1"}
{"signature": "def gfit(X, sigma, p=<NUM_LIT:5>, nbin=<NUM_LIT:200>, unif_fraction=<NUM_LIT:0.1>):", "body": "min_x = min(min(X) - <NUM_LIT:2> * np.std(X, ddof=<NUM_LIT:1>), <NUM_LIT:0>)<EOL>max_x = max(max(X) + <NUM_LIT:2> * np.std(X, ddof=<NUM_LIT:1>),<EOL>np.std(X, ddof=<NUM_LIT:1>))<EOL>xvals = np.linspace(min_x, max_x, nbin)<EOL>binw = (max_x - min_x) / (nbin - <NUM_LIT:1>)<EOL>zero_idx = max(np.where(xvals <= <NUM_LIT:0>)[<NUM_LIT:0>])<EOL>noise_kernel = norm().pdf(xvals / sigma) * binw / sigma<EOL>if zero_idx > <NUM_LIT:0>:<EOL><INDENT>noise_rotate = noise_kernel[list(np.arange(zero_idx, len(xvals))) +<EOL>list(np.arange(<NUM_LIT:0>, zero_idx))]<EOL><DEDENT>else:<EOL><INDENT>noise_rotate = noise_kernel<EOL><DEDENT>XX = np.zeros((p, len(xvals)), dtype=np.float)<EOL>for ind, exp in enumerate(range(<NUM_LIT:1>, p+<NUM_LIT:1>)):<EOL><INDENT>mask = np.ones_like(xvals)<EOL>mask[np.where(xvals <= <NUM_LIT:0>)[<NUM_LIT:0>]] = <NUM_LIT:0><EOL>XX[ind, :] = pow(xvals, exp) * mask<EOL><DEDENT>XX = XX.T<EOL>def neg_loglik(eta):<EOL><INDENT>mask = np.ones_like(xvals)<EOL>mask[np.where(xvals <= <NUM_LIT:0>)[<NUM_LIT:0>]] = <NUM_LIT:0><EOL>g_eta_raw = np.exp(np.dot(XX, eta)) * mask<EOL>if ((np.sum(g_eta_raw) == np.inf) |<EOL>(np.sum(g_eta_raw) <=<EOL><NUM_LIT:100> * np.finfo(np.double).tiny)):<EOL><INDENT>return (<NUM_LIT:1000> * (len(X) + sum(eta ** <NUM_LIT:2>)))<EOL><DEDENT>g_eta_main = g_eta_raw / sum(g_eta_raw)<EOL>g_eta = ((<NUM_LIT:1> - unif_fraction) * g_eta_main +<EOL>unif_fraction * mask / sum(mask))<EOL>f_eta = fftconvolve(g_eta, noise_rotate, mode='<STR_LIT>')<EOL>return np.sum(np.interp(X, xvals,<EOL>-np.log(np.maximum(f_eta, <NUM_LIT>))))<EOL><DEDENT>eta_hat = minimize(neg_loglik,<EOL>list(itertools.repeat(-<NUM_LIT:1>, p))).x<EOL>g_eta_raw = np.exp(np.dot(XX, eta_hat)) * mask<EOL>g_eta_main = g_eta_raw / sum(g_eta_raw)<EOL>g_eta = ((<NUM_LIT:1> - unif_fraction) * g_eta_main +<EOL>unif_fraction * mask) / sum(mask)<EOL>return xvals, g_eta<EOL>", "docstring": "Fit empirical Bayes prior in the hierarchical model [Efron2014]_.\n\n.. math::\n\n    mu ~ G, X ~ N(mu, sigma^2)\n\nParameters\n----------\nX: ndarray\n    A 1D array of observations.\nsigma: float\n    Noise estimate on X.\np: int\n    Number of parameters used to fit G. Default: 5\nnbin: int\n    Number of bins used for discrete approximation.\n    Default: 200\nunif_fraction: float\n    Fraction of G modeled as \"slab\". Default: 0.1\n\nReturns\n-------\nAn array of the posterior density estimate g.", "id": "f2502:m0"}
{"signature": "def calibrateEB(variances, sigma2):", "body": "if (sigma2 <= <NUM_LIT:0> or min(variances) == max(variances)):<EOL><INDENT>return(np.maximum(variances, <NUM_LIT:0>))<EOL><DEDENT>sigma = np.sqrt(sigma2)<EOL>eb_prior = gfit(variances, sigma)<EOL>part = functools.partial(gbayes, g_est=eb_prior,<EOL>sigma=sigma)<EOL>if len(variances) >= <NUM_LIT:200>:<EOL><INDENT>calib_x = np.percentile(variances,<EOL>np.arange(<NUM_LIT:0>, <NUM_LIT>, <NUM_LIT:2>))<EOL>calib_y = list(map(part, calib_x))<EOL>calib_all = np.interp(variances, calib_x, calib_y)<EOL><DEDENT>else:<EOL><INDENT>calib_all = list(map(part, variances))<EOL><DEDENT>return np.asarray(calib_all)<EOL>", "docstring": "Calibrate noisy variance estimates with empirical Bayes.\n\nParameters\n----------\nvars: ndarray\n    List of variance estimates.\nsigma2: int\n    Estimate of the Monte Carlo noise in vars.\n\nReturns\n-------\nAn array of the calibrated variance estimates", "id": "f2502:m2"}
{"signature": "def dcite(self, *args, **kwargs):", "body": "def nondecorating_decorator(func):<EOL><INDENT>return func<EOL><DEDENT>return nondecorating_decorator<EOL>", "docstring": "If I could cite I would", "id": "f2504:c0:m1"}
{"signature": "def repr_failure(self, excinfo):", "body": "return \"<STR_LIT>\".format(<EOL>excinfo,<EOL>self.indent(self.code),<EOL>excinfo.getrepr(funcargs=True, style='<STR_LIT>')<EOL>)<EOL>", "docstring": "called when self.runtest() raises an exception.", "id": "f2522:c1:m4"}
{"signature": "def backend_extras(*requirements):", "body": "return [\"<STR_LIT>\"] + list(requirements)<EOL>", "docstring": "Construct list of requirements for backend integration.\n\n    All built-in backends depend on PyOpenGL so add it as default requirement.", "id": "f2536:m3"}
{"signature": "def __init__(self, shape, cols=[]):", "body": "self._arrays = {}<EOL>self._names = []<EOL>self.shape = shape<EOL>for colname, value in cols:<EOL><INDENT>self.set(colname, value)<EOL><DEDENT>self._dtype = None<EOL>", "docstring": "cols is a list of (colname, values), shape has to be 1D.", "id": "f2538:c1:m0"}
{"signature": "def read(self, n):", "body": "while len(self.pool) < n:<EOL><INDENT>self.cur = self.files.next()<EOL>self.pool = numpy.append(self.pool,<EOL>self.fetch(self.cur), axis=<NUM_LIT:0>)<EOL><DEDENT>rt = self.pool[:n]<EOL>if n == len(self.pool):<EOL><INDENT>self.pool = self.fetch(None)<EOL><DEDENT>else:<EOL><INDENT>self.pool = self.pool[n:]<EOL><DEDENT>return rt<EOL>", "docstring": "return at most n array items, move the cursor.", "id": "f2540:c0:m2"}
{"signature": "def flatten_dtype(dtype, _next=None):", "body": "types = []<EOL>if _next is None: <EOL><INDENT>_next = [<NUM_LIT:0>, '<STR_LIT>']<EOL>primary = True<EOL><DEDENT>else:<EOL><INDENT>primary = False<EOL><DEDENT>prefix = _next[<NUM_LIT:1>]<EOL>if dtype.names is None:<EOL><INDENT>for i in numpy.ndindex(dtype.shape):<EOL><INDENT>if dtype.base == dtype:<EOL><INDENT>types.append(('<STR_LIT>' % (prefix, simplerepr(i)), dtype))<EOL>_next[<NUM_LIT:0>] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>_next[<NUM_LIT:1>] = '<STR_LIT>' % (prefix, simplerepr(i))<EOL>types.extend(flatten_dtype(dtype.base, _next))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for field in dtype.names:<EOL><INDENT>typ_fields = dtype.fields[field]<EOL>if len(prefix) > <NUM_LIT:0>:<EOL><INDENT>_next[<NUM_LIT:1>] = prefix + '<STR_LIT:.>' + field<EOL><DEDENT>else:<EOL><INDENT>_next[<NUM_LIT:1>] = '<STR_LIT>' + field<EOL><DEDENT>flat_dt = flatten_dtype(typ_fields[<NUM_LIT:0>], _next)<EOL>types.extend(flat_dt)<EOL><DEDENT><DEDENT>_next[<NUM_LIT:1>] = prefix<EOL>if primary:<EOL><INDENT>return numpy.dtype(types)<EOL><DEDENT>else:<EOL><INDENT>return types<EOL><DEDENT>", "docstring": "Unpack a structured data-type.", "id": "f2541:m8"}
{"signature": "def __init__(self, func, ins=None, outdtype=None, altreduce=None):", "body": "if isinstance(func, numpy.ufunc):<EOL><INDENT>self.ufunc = func<EOL>self.nin = func.nin<EOL>self.ins = (<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>)[:func.nin]<EOL>self.nout = func.nout<EOL>self.outdtype = None<EOL>self.altreduce = None<EOL><DEDENT>else:<EOL><INDENT>self.ufunc = func<EOL>self.nin = len(ins)<EOL>self.ins = ins<EOL>self.nout = <NUM_LIT:1><EOL>self.outdtype = outdtype<EOL>self.altreduce = altreduce<EOL><DEDENT>self.__doc__ = func.__doc__<EOL>if self.nout != <NUM_LIT:1>:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "if func is not ufunc, a bit complicated:\n            ins tells which positional argument will be striped\n            after done, reducefunc is called on the results", "id": "f2542:c0:m0"}
{"signature": "def __new__(cls, array, start=None, end=None):", "body": "self = array.view(type=cls)<EOL>if end is None and start is None:<EOL><INDENT>start = numpy.array([len(arr) for arr in array], dtype='<STR_LIT>')<EOL>array = numpy.concatenate(array)<EOL><DEDENT>if end is None:<EOL><INDENT>sizes = start<EOL>self.start = numpy.zeros(shape=len(sizes), dtype='<STR_LIT>')<EOL>self.end = numpy.zeros(shape=len(sizes), dtype='<STR_LIT>')<EOL>self.end[:] = sizes.cumsum()<EOL>self.start[<NUM_LIT:1>:] = self.end[:-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>self.start = start<EOL>self.end = end<EOL><DEDENT>self.A = array<EOL>return self<EOL>", "docstring": "if end is none, start contains the sizes. \n            if start is also none, array is a list of arrays to concatenate", "id": "f2542:c1:m0"}
{"signature": "def total_memory():", "body": "with file('<STR_LIT>', '<STR_LIT:r>') as f:<EOL><INDENT>for line in f:<EOL><INDENT>words = line.split()<EOL><DEDENT>if words[<NUM_LIT:0>].upper() == '<STR_LIT>':<EOL><INDENT>return int(words[<NUM_LIT:1>]) * <NUM_LIT><EOL><DEDENT><DEDENT>raise IOError('<STR_LIT>')<EOL>", "docstring": "Returns the the amount of memory available for use.\n\n        The memory is obtained from MemTotal entry in /proc/meminfo.\n\n        Notes\n        =====\n        This function is not very useful and not very portable.", "id": "f2546:m2"}
{"signature": "def get(self, Q):", "body": "while self.Errors.empty():<EOL><INDENT>try:<EOL><INDENT>return Q.get(timeout=<NUM_LIT:1>)<EOL><DEDENT>except queue.Empty:<EOL><INDENT>if not self.is_alive():<EOL><INDENT>try:<EOL><INDENT>return Q.get(timeout=<NUM_LIT:0>)<EOL><DEDENT>except queue.Empty:<EOL><INDENT>raise StopProcessGroup<EOL><DEDENT><DEDENT>else:<EOL><INDENT>continue<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise StopProcessGroup<EOL><DEDENT>", "docstring": "Protected get. Get an item from Q.\n            Will block. but if the process group has errors,\n            raise an StopProcessGroup exception.\n\n            A slave process will terminate upon StopProcessGroup.\n            The master process shall read the error from the process group.", "id": "f2546:c3:m8"}
{"signature": "def copy(a):", "body": "shared = anonymousmemmap(a.shape, dtype=a.dtype)<EOL>shared[:] = a[:]<EOL>return shared<EOL>", "docstring": "Copy an array to the shared memory. \n\n        Notes\n        -----\n        copy is not always necessary because the private memory is always copy-on-write.\n\n        Use :code:`a = copy(a)` to immediately dereference the old 'a' on private memory", "id": "f2546:m9"}
{"signature": "def full_like(array, value, dtype=None):", "body": "shared = empty_like(array, dtype)<EOL>shared[:] = value<EOL>return shared<EOL>", "docstring": "Create a shared memory array with the same shape and type as a given array, filled with `value`.", "id": "f2546:m7"}
{"signature": "def empty_like(array, dtype=None):", "body": "array = numpy.asarray(array)<EOL>if dtype is None: <EOL><INDENT>dtype = array.dtype<EOL><DEDENT>return anonymousmemmap(array.shape, dtype)<EOL>", "docstring": "Create a shared memory array from the shape of array.", "id": "f2546:m5"}
{"signature": "def start(self):", "body": "self.thread = Thread(target=self.main)<EOL>self.thread.daemon = True<EOL>self.thread.start()<EOL>", "docstring": "master only", "id": "f2547:c3:m3"}
{"signature": "def slaveraise(self, type, error, traceback):", "body": "message = '<STR_LIT:E>' * <NUM_LIT:1> + pickle.dumps((type,<EOL>'<STR_LIT>'.join(tb.format_exception(type, error, traceback))))<EOL>if self.pipe is not None:<EOL><INDENT>self.pipe.put(message)<EOL><DEDENT>", "docstring": "slave only", "id": "f2547:c3:m5"}
{"signature": "def abort(self):", "body": "self.mutex.release()<EOL>self.turnstile.release()<EOL>self.mutex.release()<EOL>self.turnstile2.release()<EOL>", "docstring": "ensure the master exit from Barrier", "id": "f2547:c5:m1"}
{"signature": "def haserror(self):", "body": "return self.message is not None<EOL>", "docstring": "master only", "id": "f2547:c3:m2"}
{"signature": "def kill_all(self):", "body": "for pid in self.children:<EOL><INDENT>try:<EOL><INDENT>os.kill(pid, signal.SIGTRAP)<EOL><DEDENT>except OSError:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>self.join()<EOL>", "docstring": "kill all slaves and reap the monitor", "id": "f2547:c2:m3"}
{"signature": "@staticmethod<EOL><INDENT>def _get_classes(package_name, base_class):<DEDENT>", "body": "classes = {}<EOL>base_dir = os.getcwd()<EOL>root_module_name = base_dir.split('<STR_LIT:/>')[-<NUM_LIT:1>]<EOL>package_dir = base_dir + '<STR_LIT>' % package_name<EOL>if os.path.isdir(package_dir):<EOL><INDENT>for module_path in os.listdir(package_dir):<EOL><INDENT>if not module_path.endswith('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>module_name = os.path.splitext(module_path)[<NUM_LIT:0>]<EOL>module_full_name = '<STR_LIT>' % (root_module_name, package_name, module_name)<EOL>__import__(module_full_name)<EOL>work_module = sys.modules[module_full_name]<EOL>for module_item in work_module.__dict__.values():<EOL><INDENT>if type(module_item) is typeand issubclass(module_item, base_class)and module_item is not base_classand hasattr(module_item, '<STR_LIT:name>') and module_item.name:<EOL><INDENT>classes.setdefault(module_item.name, []).append(module_item)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>for work_name, work_modules in classes.items():<EOL><INDENT>if len(work_modules) > <NUM_LIT:1>:<EOL><INDENT>raise DuplicatedNameException('<STR_LIT>' % (<EOL>'<STR_LIT>'.join(map(str, work_modules)),<EOL>work_name<EOL>))<EOL><DEDENT><DEDENT>return tuple([(work_name, work_modules[<NUM_LIT:0>]) for work_name, work_modules in classes.items()])<EOL>", "docstring": "search monits or works classes. Class must have 'name' attribute\n:param package_name: 'monits' or 'works'\n:param base_class: Monit or Work\n:return: tuple of tuples monit/work-name and class", "id": "f2558:c4:m3"}
{"signature": "def format_time_point(<EOL>time_point_string):", "body": "time_point = dateutil.parser.parse(time_point_string)<EOL>if not is_aware(time_point):<EOL><INDENT>time_point = make_aware(time_point)<EOL><DEDENT>time_point = local_time_point(time_point)<EOL>return time_point.strftime(\"<STR_LIT>\")<EOL>", "docstring": ":param str time_point_string: String representation of a time point\n    to format\n:return: Formatted time point\n:rtype: str\n:raises ValueError: If *time_point_string* is not formatted by\n    dateutil.parser.parse\n\nSee :py:meth:`datetime.datetime.isoformat` function for supported formats.", "id": "f2570:m1"}
{"signature": "def format_pathname(<EOL>pathname,<EOL>max_length):", "body": "if max_length <= <NUM_LIT:3>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if len(pathname) > max_length:<EOL><INDENT>pathname = \"<STR_LIT>\".format(pathname[-(max_length-<NUM_LIT:3>):])<EOL><DEDENT>return pathname<EOL>", "docstring": "Format a pathname\n\n:param str pathname: Pathname to format\n:param int max_length: Maximum length of result pathname (> 3)\n:return: Formatted pathname\n:rtype: str\n:raises ValueError: If *max_length* is not larger than 3\n\nThis function formats a pathname so it is not longer than *max_length*\ncharacters. The resulting pathname is returned. It does so by replacing\ncharacters at the start of the *pathname* with three dots, if necessary.\nThe idea is that the end of the *pathname* is the most important part\nto be able to identify the file.", "id": "f2570:m0"}
{"signature": "def register(<EOL>app):", "body": "error_code.register(app)<EOL>", "docstring": "Register all available error handlers\n\n:param flask.Flask app: Application instance", "id": "f2573:m0"}
{"signature": "def response(<EOL>code,<EOL>description):", "body": "payload = jsonify({<EOL>\"<STR_LIT>\": code,<EOL>\"<STR_LIT:message>\": description<EOL>})<EOL>return payload, code<EOL>", "docstring": "Format a response\n\n:param int code: HTTP error code\n:param str description: Error message\n:return: Tuple of a wrapped JSON snippet and the error code\n:rtype: Tuple of :py:class:`flask.Response` containing a JSON snippet,\n    and the error code\n\nThe JSON snippet is formatted like this:\n\n.. code-block:: json\n\n   {\n       \"status_code\": 404,\n       \"message\": \"The requested URL was not found on the server\"\n   }", "id": "f2574:m0"}
{"signature": "def consume_message(<EOL>method):", "body": "def wrapper(<EOL>self,<EOL>channel,<EOL>method_frame,<EOL>header_frame,<EOL>body):<EOL><INDENT>sys.stdout.write(\"<STR_LIT>\".format(body))<EOL>sys.stdout.flush()<EOL>try:<EOL><INDENT>body = body.decode(\"<STR_LIT:utf-8>\")<EOL>data = json.loads(body)<EOL>method(self, data)<EOL><DEDENT>except Exception as exception:<EOL><INDENT>sys.stderr.write(\"<STR_LIT>\".format(traceback.format_exc()))<EOL>sys.stderr.flush()<EOL><DEDENT>channel.basic_ack(delivery_tag=method_frame.delivery_tag)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator for methods handling requests from RabbitMQ\n\nThe goal of this decorator is to perform the tasks common to all\nmethods handling requests:\n\n- Log the raw message to *stdout*\n- Decode the message into a Python dictionary\n- Log errors to *stderr*\n- Signal the broker that we're done handling the request\n\nThe method passed in will be called with the message body as a\ndictionary. It is assumed here that the message body is a JSON string\nencoded in UTF8.", "id": "f2576:m1"}
{"signature": "def consume_message_with_notify(<EOL>notifier_uri_getter):", "body": "def consume_message_with_notify_decorator(<EOL>method):<EOL><INDENT>@consume_message<EOL>def wrapper(<EOL>self,<EOL>data):<EOL><INDENT>notifier_uri = notifier_uri_getter(self)<EOL>client_id = data[\"<STR_LIT>\"]<EOL>try:<EOL><INDENT>method(self, data)<EOL>notify_client(notifier_uri, client_id, <NUM_LIT:200>)<EOL><DEDENT>except Exception as exception:<EOL><INDENT>notify_client(notifier_uri, client_id, <NUM_LIT>, str(exception))<EOL>raise<EOL><DEDENT><DEDENT>return wrapper<EOL><DEDENT>return consume_message_with_notify_decorator<EOL>", "docstring": "Decorator for methods handling requests from RabbitMQ\n\nThis decorator builds on the :py:func:`consume_message` decorator. It extents\nit by logic for notifying a client of the result of handling the\nrequest.\n\nThe *notifier_uri_getter* argument must be a callable which accepts\n*self* and returns the uri of the notifier service.", "id": "f2576:m2"}
{"signature": "def make_aware(<EOL>time_point):", "body": "assert not is_aware(time_point)<EOL>return time_point.replace(tzinfo=UTC)<EOL>", "docstring": "Return an aware time point\n\n:param datetime.datetime time_point: Unaware time point in UTC\n:return: Aware time point in UTC timezone\n:rtype: datetime.datetime", "id": "f2578:m2"}
{"signature": "def utc_now():", "body": "return datetime.now(timezone.utc)<EOL>", "docstring": "Return an aware :py:class:`datetime.datetime` instance of the current\ndate and time, in UTC timezone\n\n:return: Current date and time, in UTC timezone\n:rtype: datetime.datetime", "id": "f2578:m0"}
{"signature": "def predict(list_items):", "body": "return [i*<NUM_LIT:2> for i in list_items]<EOL>", "docstring": "Returns the double of the items", "id": "f2580:m0"}
{"signature": "@classmethod<EOL><INDENT>def get_data_manager(cls):<DEDENT>", "body": "from parsl.dataflow.dflow import DataFlowKernelLoader<EOL>dfk = DataFlowKernelLoader.dfk()<EOL>return dfk.executors['<STR_LIT>']<EOL>", "docstring": "Return the DataManager of the currently loaded DataFlowKernel.", "id": "f2586:c0:m0"}
{"signature": "def shutdown(self, block=False):", "body": "x = self.executor.shutdown(wait=block)<EOL>logger.debug(\"<STR_LIT>\")<EOL>return x<EOL>", "docstring": "Shutdown the ThreadPool.\n\n        Kwargs:\n            - block (bool): To block for confirmations or not", "id": "f2586:c0:m6"}
{"signature": "def stage_in(self, file, executor):", "body": "if file.scheme == '<STR_LIT>':<EOL><INDENT>working_dir = self.dfk.executors[executor].working_dir<EOL>stage_in_app = self._ftp_stage_in_app(executor=executor)<EOL>app_fut = stage_in_app(working_dir, outputs=[file])<EOL>return app_fut._outputs[<NUM_LIT:0>]<EOL><DEDENT>elif file.scheme == '<STR_LIT:http>' or file.scheme == '<STR_LIT>':<EOL><INDENT>working_dir = self.dfk.executors[executor].working_dir<EOL>stage_in_app = self._http_stage_in_app(executor=executor)<EOL>app_fut = stage_in_app(working_dir, outputs=[file])<EOL>return app_fut._outputs[<NUM_LIT:0>]<EOL><DEDENT>elif file.scheme == '<STR_LIT>':<EOL><INDENT>globus_ep = self._get_globus_endpoint(executor)<EOL>stage_in_app = self._globus_stage_in_app()<EOL>app_fut = stage_in_app(globus_ep, outputs=[file])<EOL>return app_fut._outputs[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>'.format(file.scheme))<EOL><DEDENT>", "docstring": "Transport the file from the input source to the executor.\n\n        This function returns a DataFuture.\n\n        Args:\n            - self\n            - file (File) : file to stage in\n            - executor (str) : an executor the file is going to be staged in to.\n                                If the executor argument is not specified for a file\n                                with 'globus' scheme, the file will be staged in to\n                                the first executor with the \"globus\" key in a config.", "id": "f2586:c0:m10"}
{"signature": "@abstractproperty<EOL><INDENT>def script_dir(self):<DEDENT>", "body": "pass<EOL>", "docstring": "This is a property. Returns the directory assigned for storing all internal scripts such as\n        scheduler submit scripts. This is usually where error logs from the scheduler would reside on the\n        channel destination side.\n\n        Args:\n            - None\n\n        Returns:\n            - Channel script dir", "id": "f2589:c0:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def close(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Closes the channel. Clean out any auth credentials.\n\n        Args:\n            None\n\n        Returns:\n            Bool", "id": "f2589:c0:m4"}
{"signature": "@abstractmethod<EOL><INDENT>def abspath(self, path):<DEDENT>", "body": "pass<EOL>", "docstring": "Return the absolute path.\n\n        Parameters\n        ----------\n        path : str\n            Path for which the absolute path will be returned.", "id": "f2589:c0:m7"}
{"signature": "@abstractmethod<EOL><INDENT>def makedirs(self, path, mode=<NUM_LIT>, exist_ok=False):<DEDENT>", "body": "pass<EOL>", "docstring": "Create a directory.\n\n        If intermediate directories do not exist, they will be created.\n\n        Parameters\n        ----------\n        path : str\n            Path of directory to create.\n        mode : int\n            Permissions (posix-style) for the newly-created directory.\n        exist_ok : bool\n            If False, raise an OSError if the target directory already exists.", "id": "f2589:c0:m5"}
{"signature": "@abstractmethod<EOL><INDENT>def execute_no_wait(self, cmd, walltime, envs={}, *args, **kwargs):<DEDENT>", "body": "pass<EOL>", "docstring": "Execute asynchronousely without waiting for exitcode\n\n        Args:\n            - cmd (string): Command string to execute over the channel\n            - walltime (int) : Timeout in seconds\n\n        KWargs:\n            - envs (dict) : Environment variables to push to the remote side\n\n        Returns:\n            - the type of return value is channel specific", "id": "f2589:c0:m2"}
{"signature": "def close(self):", "body": "return False<EOL>", "docstring": "There's nothing to close here, and this really doesn't do anything\n\n        Returns:\n             - False, because it really did not \"close\" this channel.", "id": "f2590:c0:m4"}
{"signature": "def makedirs(self, path, mode=<NUM_LIT>, exist_ok=False):", "body": "return os.makedirs(path, mode, exist_ok)<EOL>", "docstring": "Create a directory.\n\n        If intermediate directories do not exist, they will be created.\n\n        Parameters\n        ----------\n        path : str\n            Path of directory to create.\n        mode : int\n            Permissions (posix-style) for the newly-created directory.\n        exist_ok : bool\n            If False, raise an OSError if the target directory already exists.", "id": "f2590:c0:m6"}
{"signature": "def isdir(self, path):", "body": "return os.path.isdir(path)<EOL>", "docstring": "Return true if the path refers to an existing directory.\n\n        Parameters\n        ----------\n        path : str\n            Path of directory to check.", "id": "f2590:c0:m5"}
{"signature": "def execute_no_wait(self, cmd, walltime, envs={}):", "body": "current_env = copy.deepcopy(self._envs)<EOL>current_env.update(envs)<EOL>try:<EOL><INDENT>proc = subprocess.Popen(<EOL>cmd,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE,<EOL>cwd=self.userhome,<EOL>env=current_env,<EOL>shell=True,<EOL>preexec_fn=os.setpgrp<EOL>)<EOL>pid = proc.pid<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.warn(\"<STR_LIT>\", (cmd, e))<EOL>raise<EOL><DEDENT>return pid, proc<EOL>", "docstring": "Synchronously execute a commandline string on the shell.\n\n        Args:\n            - cmd (string) : Commandline string to execute\n            - walltime (int) : walltime in seconds, this is not really used now.\n\n        Returns a tuple containing:\n\n           - pid : process id\n           - proc : a subprocess.Popen object\n\n        Raises:\n         None.", "id": "f2590:c0:m2"}
{"signature": "def abspath(self, path):", "body": "return os.path.abspath(path)<EOL>", "docstring": "Return the absolute path.\n\n        Parameters\n        ----------\n        path : str\n            Path for which the absolute path will be returned.", "id": "f2590:c0:m7"}
{"signature": "def abspath(self, path):", "body": "return self.sftp_client.normalize(path)<EOL>", "docstring": "Return the absolute path on the remote side.\n\n        Parameters\n        ----------\n        path : str\n            Path for which the absolute path will be returned.", "id": "f2592:c1:m9"}
{"signature": "def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, gssapi_auth=False, skip_auth=False, **kwargs):", "body": "self.hostname = hostname<EOL>self.username = username<EOL>self.password = password<EOL>self.kwargs = kwargs<EOL>self.script_dir = script_dir<EOL>self.skip_auth = skip_auth<EOL>self.gssapi_auth = gssapi_auth<EOL>if self.skip_auth:<EOL><INDENT>self.ssh_client = NoAuthSSHClient()<EOL><DEDENT>else:<EOL><INDENT>self.ssh_client = paramiko.SSHClient()<EOL><DEDENT>self.ssh_client.load_system_host_keys()<EOL>self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())<EOL>self.envs = {}<EOL>if envs is not None:<EOL><INDENT>self.envs = envs<EOL><DEDENT>try:<EOL><INDENT>self.ssh_client.connect(<EOL>hostname,<EOL>username=username,<EOL>password=password,<EOL>allow_agent=True,<EOL>gss_auth=gssapi_auth,<EOL>gss_kex=gssapi_auth,<EOL>)<EOL>t = self.ssh_client.get_transport()<EOL>self.sftp_client = paramiko.SFTPClient.from_transport(t)<EOL><DEDENT>except paramiko.BadHostKeyException as e:<EOL><INDENT>raise BadHostKeyException(e, self.hostname)<EOL><DEDENT>except paramiko.AuthenticationException as e:<EOL><INDENT>raise AuthException(e, self.hostname)<EOL><DEDENT>except paramiko.SSHException as e:<EOL><INDENT>raise SSHException(e, self.hostname)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise SSHException(e, self.hostname)<EOL><DEDENT>", "docstring": "Initialize a persistent connection to the remote system.\n        We should know at this point whether ssh connectivity is possible\n\n        Args:\n            - hostname (String) : Hostname\n\n        KWargs:\n            - username (string) : Username on remote system\n            - password (string) : Password for remote system\n            - script_dir (string) : Full path to a script dir where\n              generated scripts could be sent to.\n            - envs (dict) : A dictionary of environment variables to be set when executing commands\n\n        Raises:", "id": "f2592:c1:m0"}
{"signature": "def isdir(self, path):", "body": "result = True<EOL>try:<EOL><INDENT>self.sftp_client.lstat(path)<EOL><DEDENT>except FileNotFoundError:<EOL><INDENT>result = False<EOL><DEDENT>return result<EOL>", "docstring": "Return true if the path refers to an existing directory.\n\n        Parameters\n        ----------\n        path : str\n            Path of directory on the remote side to check.", "id": "f2592:c1:m7"}
{"signature": "def execute_no_wait(self, cmd, walltime=<NUM_LIT:2>, envs={}):", "body": "<EOL>stdin, stdout, stderr = self.ssh_client.exec_command(<EOL>self.prepend_envs(cmd, envs), bufsize=-<NUM_LIT:1>, timeout=walltime<EOL>)<EOL>return None, stdout, stderr<EOL>", "docstring": "Execute asynchronousely without waiting for exitcode\n\n        Args:\n            - cmd (string): Commandline string to be executed on the remote side\n            - walltime (int): timeout to exec_command\n\n        KWargs:\n            - envs (dict): A dictionary of env variables\n\n        Returns:\n            - None, stdout (readable stream), stderr (readable stream)\n\n        Raises:\n            - ChannelExecFailed (reason)", "id": "f2592:c1:m3"}
{"signature": "def ping_time(ip, n=<NUM_LIT:5>):", "body": "cmd = \"<STR_LIT>\".format(ip, n)<EOL>p = subprocess.Popen(cmd.split(\"<STR_LIT:U+0020>\"), stdout=subprocess.PIPE)<EOL>output = str(p.communicate()[<NUM_LIT:0>])<EOL>stats = output.split(\"<STR_LIT:\\n>\")[-<NUM_LIT:1>].split(\"<STR_LIT>\")[-<NUM_LIT:1>].split(\"<STR_LIT:/>\")<EOL>avg_ping_time = float(stats[<NUM_LIT:1>])  <EOL>return avg_ping_time * <NUM_LIT:1000><EOL>", "docstring": "Returns the average ping time in microseconds.\n\nNote: This function is inherently platform specific.\nIt currently works on Midway.", "id": "f2620:m0"}
{"signature": "def __init__(self,<EOL>host=None,<EOL>port=None,<EOL>logging_server_host='<STR_LIT:localhost>',<EOL>logging_server_port=<NUM_LIT>):", "body": "self.host = host<EOL>self.port = port<EOL>self.logging_server_host = logging_server_host<EOL>self.logging_server_port = logging_server_port<EOL>", "docstring": "Parameters\n----------\nhost : str\n     The hostname for running the visualization interface.\nport : int\n    The port for the visualization interface.\nlogging_server_host : str\n     The hostname for the logging server.\nlogging_server_port : int\n    The port for the logging server.", "id": "f2750:c1:m0"}
{"signature": "def __init__(self, monitoring_url, source_id=None, timeout=<NUM_LIT:10>):", "body": "self.monitoring_url = monitoring_url<EOL>self.sock_timeout = timeout<EOL>self.source_id = source_id<EOL>try:<EOL><INDENT>self.scheme, self.ip, port = (x.strip('<STR_LIT:/>') for x in monitoring_url.split('<STR_LIT::>'))<EOL>self.port = int(port)<EOL><DEDENT>except Exception:<EOL><INDENT>raise Exception(\"<STR_LIT>\".format(monitoring_url))<EOL><DEDENT>self.sock = socket.socket(socket.AF_INET,<EOL>socket.SOCK_DGRAM,<EOL>socket.IPPROTO_UDP)  <EOL>self.sock.settimeout(self.sock_timeout)<EOL>", "docstring": "Parameters\n----------\n\nmonitoring_url : str\n    URL of the form <scheme>://<IP>:<PORT>\nmessage : py obj\n    Python object to send, this will be pickled\nsource_id : str\n    String identifier of the source\ntimeout : int\n    timeout, default=10s", "id": "f2764:c0:m0"}
{"signature": "def monitor(pid, task_id, monitoring_hub_url, run_id, sleep_dur=<NUM_LIT:10>):", "body": "import psutil<EOL>radio = UDPRadio(monitoring_hub_url,<EOL>source_id=task_id)<EOL>simple = [\"<STR_LIT>\", '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:name>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:status>', '<STR_LIT:username>']<EOL>summable_values = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>pm = psutil.Process(pid)<EOL>pm.cpu_percent()<EOL>first_msg = True<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>d = {\"<STR_LIT>\" + str(k): v for k, v in pm.as_dict().items() if k in simple}<EOL>d[\"<STR_LIT>\"] = run_id<EOL>d[\"<STR_LIT>\"] = task_id<EOL>d['<STR_LIT>'] = sleep_dur<EOL>d['<STR_LIT>'] = first_msg<EOL>d['<STR_LIT>'] = datetime.datetime.now()<EOL>children = pm.children(recursive=True)<EOL>d[\"<STR_LIT>\"] = psutil.cpu_count()<EOL>d['<STR_LIT>'] = pm.memory_info().vms<EOL>d['<STR_LIT>'] = pm.memory_info().rss<EOL>d['<STR_LIT>'] = pm.cpu_times().user<EOL>d['<STR_LIT>'] = pm.cpu_times().system<EOL>d['<STR_LIT>'] = len(children)<EOL>try:<EOL><INDENT>d['<STR_LIT>'] = pm.io_counters().write_bytes<EOL>d['<STR_LIT>'] = pm.io_counters().read_bytes<EOL><DEDENT>except psutil._exceptions.AccessDenied:<EOL><INDENT>d['<STR_LIT>'] = <NUM_LIT:0><EOL>d['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>for child in children:<EOL><INDENT>for k, v in child.as_dict(attrs=summable_values).items():<EOL><INDENT>d['<STR_LIT>' + str(k)] += v<EOL><DEDENT>d['<STR_LIT>'] += child.cpu_times().user<EOL>d['<STR_LIT>'] += child.cpu_times().system<EOL>d['<STR_LIT>'] += child.memory_info().vms<EOL>d['<STR_LIT>'] += child.memory_info().rss<EOL>try:<EOL><INDENT>d['<STR_LIT>'] += child.io_counters().write_bytes<EOL>d['<STR_LIT>'] += child.io_counters().read_bytes<EOL><DEDENT>except psutil._exceptions.AccessDenied:<EOL><INDENT>d['<STR_LIT>'] += <NUM_LIT:0><EOL>d['<STR_LIT>'] += <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>radio.send(MessageType.TASK_INFO, task_id, d)<EOL>time.sleep(sleep_dur)<EOL>first_msg = False<EOL><DEDENT><DEDENT>", "docstring": "Internal\n    Monitors the Parsl task's resources by pointing psutil to the task's pid and watching it and its children.", "id": "f2764:m2"}
{"signature": "def send(self, message_type, task_id, message):", "body": "x = <NUM_LIT:0><EOL>try:<EOL><INDENT>buffer = pickle.dumps((self.source_id,   <EOL>int(time.time()),  <EOL>message_type,<EOL>message))<EOL><DEDENT>except Exception as e:<EOL><INDENT>print(\"<STR_LIT>\".format(e))<EOL>return<EOL><DEDENT>try:<EOL><INDENT>x = self.sock.sendto(buffer, (self.ip, self.port))<EOL><DEDENT>except socket.timeout:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>return x<EOL>", "docstring": "Sends a message to the UDP receiver\n\n        Parameter\n        ---------\n\n        message_type: monitoring.MessageType (enum)\n            In this case message type is RESOURCE_INFO most often\n        task_id: int\n            Task identifier of the task for which resource monitoring is being reported\n        message: object\n            Arbitrary pickle-able object that is to be sent\n\n        Returns:\n            # bytes sent", "id": "f2764:c0:m1"}
{"signature": "def __init__(self,<EOL>hub_address,<EOL>hub_port=None,<EOL>hub_port_range=(<NUM_LIT>, <NUM_LIT>),<EOL>database=None,              <EOL>visualization_server=None,  <EOL>client_address=\"<STR_LIT:127.0.0.1>\",<EOL>client_port=None,<EOL>monitoring_hub_address=\"<STR_LIT:127.0.0.1>\",<EOL>logdir=\"<STR_LIT:.>\",<EOL>logging_level=logging.DEBUG,<EOL>atexit_timeout=<NUM_LIT:3>    <EOL>):", "body": "try:<EOL><INDENT>os.makedirs(logdir)<EOL><DEDENT>except FileExistsError:<EOL><INDENT>pass<EOL><DEDENT>self.logger = start_file_logger(\"<STR_LIT>\".format(logdir),<EOL>name=\"<STR_LIT>\",<EOL>level=logging_level)<EOL>self.logger.debug(\"<STR_LIT>\")<EOL>if not hub_port:<EOL><INDENT>self.logger.critical(\"<STR_LIT>\")<EOL><DEDENT>self.hub_port = hub_port<EOL>self.hub_address = hub_address<EOL>self.database = database<EOL>self.visualization_server = visualization_server<EOL>self.atexit_timeout = atexit_timeout<EOL>self.loop_freq = <NUM_LIT>  <EOL>self.logger.debug(\"<STR_LIT>\".format(hub_port))<EOL>try:<EOL><INDENT>self.sock = socket.socket(socket.AF_INET,<EOL>socket.SOCK_DGRAM,<EOL>socket.IPPROTO_UDP)<EOL>self.sock.bind(('<STR_LIT>', hub_port))<EOL>self.sock.settimeout(self.loop_freq / <NUM_LIT:1000>)<EOL><DEDENT>except OSError:<EOL><INDENT>self.logger.critical(\"<STR_LIT>\")<EOL>self.hub_port = -<NUM_LIT:1><EOL><DEDENT>self._context = zmq.Context()<EOL>self.dfk_channel = self._context.socket(zmq.DEALER)<EOL>self.dfk_channel.set_hwm(<NUM_LIT:0>)<EOL>self.dfk_channel.RCVTIMEO = int(self.loop_freq)  <EOL>self.dfk_channel.connect(\"<STR_LIT>\".format(client_address, client_port))<EOL>", "docstring": "Initializes a monitoring configuration class.\n\n        Parameters\n        ----------\n        address : str\n            IP address of the node on which the monitoring hub will run, this address must be\n            reachable from the Parsl client as well as the worker nodes. Eg. <NNN>.<NNN>.<NNN>.<NNN>\n\n        port : int\n            Used with Elasticsearch logging, the port of where to access Elasticsearch. Required when using logging_type = 'elasticsearch'.\n\n        logging_endpoint : Endpoint object\n            This is generally a database object to which logging data can be pushed to from the\n            monitoring HUB.\n\n        workflow_name : str, optional\n            Name to record as the workflow base name, defaults to the name of the parsl script file if left as None.\n\n        workflow_version : str, optional\n            Optional workflow identification to distinguish between workflows with the same name, not used internally only for display to user.\n\n        atexit_timeout : float, optional\n            The amount of time in seconds to terminate the hub without receiving any messages, after the last dfk workflow message is received.", "id": "f2764:c2:m0"}
{"signature": "def hash_lookup(self, hashsum):", "body": "return self.memo_lookup_table[hashsum]<EOL>", "docstring": "Lookup a hash in the memoization table.\n\n        Will raise a KeyError if hash is not in the memoization lookup table.\n\n        Args:\n            - hashsum (str?): The same hashes used to uniquely identify apps+inputs\n\n        Returns:\n            - Lookup result, this is unlikely to be None, since the hashes are set by this\n              library and could not miss entried in it's dict.\n\n        Raises:\n            - KeyError: if hash not in table", "id": "f2767:c0:m3"}
{"signature": "def __init__(self, dfk, memoize=True, checkpoint={}):", "body": "self.dfk = dfk<EOL>self.memoize = memoize<EOL>if self.memoize:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>self.memo_lookup_table = checkpoint<EOL><DEDENT>else:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>self.memo_lookup_table = {}<EOL><DEDENT>", "docstring": "Initialize the memoizer.\n\n        Args:\n            - dfk (DFK obj): The DFK object\n\n        KWargs:\n            - memoize (Bool): enable memoization or not.\n            - checkpoint (Dict): A checkpoint loaded as a dict.", "id": "f2767:c0:m0"}
{"signature": "def make_hash(self, task):", "body": "<EOL>t = [serialize_object(task['<STR_LIT>'])[<NUM_LIT:0>],<EOL>serialize_object(task['<STR_LIT>'])[<NUM_LIT:0>],<EOL>serialize_object(task['<STR_LIT:args>'])[<NUM_LIT:0>],<EOL>serialize_object(task['<STR_LIT>'])[<NUM_LIT:0>],<EOL>serialize_object(task['<STR_LIT>'])[<NUM_LIT:0>]]<EOL>x = b'<STR_LIT>'.join(t)<EOL>hashedsum = hashlib.md5(x).hexdigest()<EOL>return hashedsum<EOL>", "docstring": "Create a hash of the task inputs.\n\n        This uses a serialization library borrowed from ipyparallel.\n        If this fails here, then all ipp calls are also likely to fail due to failure\n        at serialization.\n\n        Args:\n            - task (dict) : Task dictionary from dfk.tasks\n\n        Returns:\n            - hash (str) : A unique hash string", "id": "f2767:c0:m1"}
{"signature": "def unset_logging(self):", "body": "if self.logger_flag is True:<EOL><INDENT>return<EOL><DEDENT>root_logger = logging.getLogger()<EOL>for hndlr in root_logger.handlers:<EOL><INDENT>if hndlr not in self.prior_loghandlers:<EOL><INDENT>hndlr.setLevel(logging.ERROR)<EOL><DEDENT><DEDENT>self.logger_flag = True<EOL>", "docstring": "Mute newly added handlers to the root level, right after calling executor.status", "id": "f2768:c0:m3"}
{"signature": "def _strategy_noop(self, tasks, *args, kind=None, **kwargs):", "body": "", "docstring": "Do nothing.\n\n        Args:\n            - tasks (task_ids): Not used here.\n\n        KWargs:\n            - kind (Not used)", "id": "f2768:c0:m2"}
{"signature": "@property<EOL><INDENT>def config(self):<DEDENT>", "body": "return self._config<EOL>", "docstring": "Returns the fully initialized config that the DFK is actively using.\n\n        DO *NOT* update.\n\n        Returns:\n             - config (dict)", "id": "f2769:c0:m3"}
{"signature": "def wait_for_current_tasks(self):", "body": "logger.info(\"<STR_LIT>\")<EOL>for task_id in self.tasks:<EOL><INDENT>fut = self.tasks[task_id]['<STR_LIT>']<EOL>if not fut.done():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(task_id))<EOL>fut.exception()<EOL><DEDENT><DEDENT>logger.info(\"<STR_LIT>\")<EOL>", "docstring": "Waits for all tasks in the task list to be completed, by waiting for their\n        AppFuture to be completed. This method will not necessarily wait for any tasks\n        added after cleanup has started (such as data stageout?)", "id": "f2769:c0:m15"}
{"signature": "def _create_task_log_info(self, task_id, fail_mode=None):", "body": "info_to_monitor = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT:status>', '<STR_LIT:id>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>task_log_info = {\"<STR_LIT>\" + k: self.tasks[task_id][k] for k in info_to_monitor}<EOL>task_log_info['<STR_LIT>'] = self.run_id<EOL>task_log_info['<STR_LIT>'] = datetime.datetime.now()<EOL>task_log_info['<STR_LIT>'] = self.tasks[task_id]['<STR_LIT:status>'].name<EOL>task_log_info['<STR_LIT>'] = self.tasks_failed_count<EOL>task_log_info['<STR_LIT>'] = self.tasks_completed_count<EOL>task_log_info['<STR_LIT>'] = str(self.tasks[task_id]['<STR_LIT>'].get('<STR_LIT>', None))<EOL>task_log_info['<STR_LIT>'] = str(self.tasks[task_id]['<STR_LIT>'].get('<STR_LIT>', None))<EOL>task_log_info['<STR_LIT>'] = self.tasks[task_id]['<STR_LIT>'].get('<STR_LIT>', None)<EOL>task_log_info['<STR_LIT>'] = self.tasks[task_id]['<STR_LIT>'].get('<STR_LIT>', None)<EOL>task_log_info['<STR_LIT>'] = None<EOL>if self.tasks[task_id]['<STR_LIT>'] is not None:<EOL><INDENT>task_log_info['<STR_LIT>'] = \"<STR_LIT:U+002C>\".join([str(t._tid) for t in self.tasks[task_id]['<STR_LIT>']])<EOL><DEDENT>task_log_info['<STR_LIT>'] = None<EOL>if self.tasks[task_id]['<STR_LIT>'] is not None:<EOL><INDENT>task_log_info['<STR_LIT>'] = (self.tasks[task_id]['<STR_LIT>'] -<EOL>self.tasks[task_id]['<STR_LIT>']).total_seconds()<EOL><DEDENT>if fail_mode is not None:<EOL><INDENT>task_log_info['<STR_LIT>'] = fail_mode<EOL><DEDENT>return task_log_info<EOL>", "docstring": "Create the dictionary that will be included in the log.", "id": "f2769:c0:m1"}
{"signature": "def __init__(self, config=Config()):", "body": "<EOL>self.cleanup_called = False<EOL>if isinstance(config, dict):<EOL><INDENT>raise ConfigurationError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>self._config = config<EOL>self.run_dir = make_rundir(config.run_dir)<EOL>parsl.set_file_logger(\"<STR_LIT>\".format(self.run_dir), level=logging.DEBUG)<EOL>logger.debug(\"<STR_LIT>\".format(config))<EOL>logger.info(\"<STR_LIT>\".format(get_version()))<EOL>self.checkpoint_lock = threading.Lock()<EOL>self.usage_tracker = UsageTracker(self)<EOL>self.usage_tracker.send_message()<EOL>self.tasks_completed_count = <NUM_LIT:0><EOL>self.tasks_failed_count = <NUM_LIT:0><EOL>self.monitoring = config.monitoring<EOL>if self.monitoring:<EOL><INDENT>if self.monitoring.logdir is None:<EOL><INDENT>self.monitoring.logdir = self.run_dir<EOL><DEDENT>self.monitoring.start()<EOL><DEDENT>self.time_began = datetime.datetime.now()<EOL>self.time_completed = None<EOL>self.run_id = str(uuid4())<EOL>logger.info(\"<STR_LIT>\" + self.run_id)<EOL>self.workflow_name = None<EOL>if self.monitoring is not None and self.monitoring.workflow_name is not None:<EOL><INDENT>self.workflow_name = self.monitoring.workflow_name<EOL><DEDENT>else:<EOL><INDENT>for frame in inspect.stack():<EOL><INDENT>fname = os.path.basename(str(frame.filename))<EOL>parsl_file_names = ['<STR_LIT>']<EOL>if fname not in parsl_file_names:<EOL><INDENT>self.workflow_name = fname<EOL>break<EOL><DEDENT><DEDENT><DEDENT>self.workflow_version = str(self.time_began)<EOL>if self.monitoring is not None and self.monitoring.workflow_version is not None:<EOL><INDENT>self.workflow_version = self.monitoring.workflow_version<EOL><DEDENT>workflow_info = {<EOL>'<STR_LIT>': \"<STR_LIT>\".format(sys.version_info.major,<EOL>sys.version_info.minor,<EOL>sys.version_info.micro),<EOL>'<STR_LIT>': get_version(),<EOL>\"<STR_LIT>\": self.time_began,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self.run_id,<EOL>'<STR_LIT>': self.workflow_name,<EOL>'<STR_LIT>': self.workflow_version,<EOL>'<STR_LIT>': self.run_dir,<EOL>'<STR_LIT>': self.tasks_completed_count,<EOL>'<STR_LIT>': self.tasks_failed_count,<EOL>'<STR_LIT:user>': getuser(),<EOL>'<STR_LIT:host>': gethostname(),<EOL>}<EOL>if self.monitoring:<EOL><INDENT>self.monitoring.send(MessageType.WORKFLOW_INFO,<EOL>workflow_info)<EOL><DEDENT>checkpoints = self.load_checkpoints(config.checkpoint_files)<EOL>self.memoizer = Memoizer(self, memoize=config.app_cache, checkpoint=checkpoints)<EOL>self.checkpointed_tasks = <NUM_LIT:0><EOL>self._checkpoint_timer = None<EOL>self.checkpoint_mode = config.checkpoint_mode<EOL>self.data_manager = DataManager(self, max_threads=config.data_management_max_threads)<EOL>self.executors = {}<EOL>self.add_executors(config.executors + [self.data_manager])<EOL>if self.checkpoint_mode == \"<STR_LIT>\":<EOL><INDENT>try:<EOL><INDENT>h, m, s = map(int, config.checkpoint_period.split('<STR_LIT::>'))<EOL>checkpoint_period = (h * <NUM_LIT>) + (m * <NUM_LIT>) + s<EOL>self._checkpoint_timer = Timer(self.checkpoint, interval=checkpoint_period)<EOL><DEDENT>except Exception:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(config.checkpoint_period))<EOL>self._checkpoint_timer = Timer(self.checkpoint, interval=(<NUM_LIT:30> * <NUM_LIT>))<EOL><DEDENT><DEDENT>if any([x.managed for x in config.executors]):<EOL><INDENT>self.flowcontrol = FlowControl(self)<EOL><DEDENT>else:<EOL><INDENT>self.flowcontrol = FlowNoControl(self)<EOL><DEDENT>self.task_count = <NUM_LIT:0><EOL>self.tasks = {}<EOL>self.submitter_lock = threading.Lock()<EOL>atexit.register(self.atexit_cleanup)<EOL>", "docstring": "Initialize the DataFlowKernel.\n\n        Parameters\n        ----------\n        config : Config\n            A specification of all configuration options. For more details see the\n            :class:~`parsl.config.Config` documentation.", "id": "f2769:c0:m0"}
{"signature": "def launch_task(self, task_id, executable, *args, **kwargs):", "body": "self.tasks[task_id]['<STR_LIT>'] = datetime.datetime.now()<EOL>hit, memo_fu = self.memoizer.check_memo(task_id, self.tasks[task_id])<EOL>if hit:<EOL><INDENT>logger.info(\"<STR_LIT>\".format(task_id))<EOL>return memo_fu<EOL><DEDENT>executor_label = self.tasks[task_id][\"<STR_LIT>\"]<EOL>try:<EOL><INDENT>executor = self.executors[executor_label]<EOL><DEDENT>except Exception:<EOL><INDENT>logger.exception(\"<STR_LIT>\".format(task_id, executor_label, self._config))<EOL><DEDENT>if self.monitoring is not None and self.monitoring.resource_monitoring_enabled:<EOL><INDENT>executable = self.monitoring.monitor_wrapper(executable, task_id,<EOL>self.monitoring.monitoring_hub_url,<EOL>self.run_id,<EOL>self.monitoring.resource_monitoring_interval)<EOL><DEDENT>with self.submitter_lock:<EOL><INDENT>exec_fu = executor.submit(executable, *args, **kwargs)<EOL><DEDENT>self.tasks[task_id]['<STR_LIT:status>'] = States.launched<EOL>if self.monitoring is not None:<EOL><INDENT>task_log_info = self._create_task_log_info(task_id, '<STR_LIT>')<EOL>self.monitoring.send(MessageType.TASK_INFO, task_log_info)<EOL><DEDENT>exec_fu.retries_left = self._config.retries -self.tasks[task_id]['<STR_LIT>']<EOL>logger.info(\"<STR_LIT>\".format(task_id, executor.label))<EOL>return exec_fu<EOL>", "docstring": "Handle the actual submission of the task to the executor layer.\n\n        If the app task has the executors attributes not set (default=='all')\n        the task is launched on a randomly selected executor from the\n        list of executors. This behavior could later be updated to support\n        binding to executors based on user specified criteria.\n\n        If the app task specifies a particular set of executors, it will be\n        targeted at those specific executors.\n\n        Args:\n            task_id (uuid string) : A uuid string that uniquely identifies the task\n            executable (callable) : A callable object\n            args (list of positional args)\n            kwargs (arbitrary keyword arguments)\n\n\n        Returns:\n            Future that tracks the execution of the submitted executable", "id": "f2769:c0:m7"}
{"signature": "def _count_deps(self, depends):", "body": "count = <NUM_LIT:0><EOL>for dep in depends:<EOL><INDENT>if isinstance(dep, Future):<EOL><INDENT>if not dep.done():<EOL><INDENT>count += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return count<EOL>", "docstring": "Internal.\n\n        Count the number of unresolved futures in the list depends.", "id": "f2769:c0:m2"}
{"signature": "def _load_checkpoints(self, checkpointDirs):", "body": "memo_lookup_table = {}<EOL>for checkpoint_dir in checkpointDirs:<EOL><INDENT>logger.info(\"<STR_LIT>\".format(checkpoint_dir))<EOL>checkpoint_file = os.path.join(checkpoint_dir, '<STR_LIT>')<EOL>try:<EOL><INDENT>with open(checkpoint_file, '<STR_LIT:rb>') as f:<EOL><INDENT>while True:<EOL><INDENT>try:<EOL><INDENT>data = pickle.load(f)<EOL>memo_fu = Future()<EOL>if data['<STR_LIT>']:<EOL><INDENT>memo_fu.set_exception(data['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>memo_fu.set_result(data['<STR_LIT:result>'])<EOL><DEDENT>memo_lookup_table[data['<STR_LIT>']] = memo_fu<EOL><DEDENT>except EOFError:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>except FileNotFoundError:<EOL><INDENT>reason = \"<STR_LIT>\".format(<EOL>checkpoint_file)<EOL>logger.error(reason)<EOL>raise BadCheckpoint(reason)<EOL><DEDENT>except Exception:<EOL><INDENT>reason = \"<STR_LIT>\".format(<EOL>checkpoint_file)<EOL>logger.error(reason)<EOL>raise BadCheckpoint(reason)<EOL><DEDENT>logger.info(\"<STR_LIT>\".format(checkpoint_file,<EOL>len(memo_lookup_table.keys())))<EOL><DEDENT>return memo_lookup_table<EOL>", "docstring": "Load a checkpoint file into a lookup table.\n\n        The data being loaded from the pickle file mostly contains input\n        attributes of the task: func, args, kwargs, env...\n        To simplify the check of whether the exact task has been completed\n        in the checkpoint, we hash these input params and use it as the key\n        for the memoized lookup table.\n\n        Args:\n            - checkpointDirs (list) : List of filepaths to checkpoints\n              Eg. ['runinfo/001', 'runinfo/002']\n\n        Returns:\n            - memoized_lookup_table (dict)", "id": "f2769:c0:m18"}
{"signature": "@classmethod<EOL><INDENT>@typeguard.typechecked<EOL>def load(cls, config: Optional[Config] = None):<DEDENT>", "body": "if cls._dfk is not None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if config is None:<EOL><INDENT>cls._dfk = DataFlowKernel(Config())<EOL><DEDENT>else:<EOL><INDENT>cls._dfk = DataFlowKernel(config)<EOL><DEDENT>return cls._dfk<EOL>", "docstring": "Load a DataFlowKernel.\n\n        Args:\n            - config (Config) : Configuration to load. This config will be passed to a\n              new DataFlowKernel instantiation which will be set as the active DataFlowKernel.\n        Returns:\n            - DataFlowKernel : The loaded DataFlowKernel object.", "id": "f2769:c1:m1"}
{"signature": "def handle_exec_update(self, task_id, future):", "body": "try:<EOL><INDENT>res = future.result()<EOL>if isinstance(res, RemoteExceptionWrapper):<EOL><INDENT>res.reraise()<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>logger.exception(\"<STR_LIT>\".format(task_id))<EOL>self.tasks[task_id]['<STR_LIT>'].append(future._exception)<EOL>self.tasks[task_id]['<STR_LIT>'] += <NUM_LIT:1><EOL>if not self._config.lazy_errors:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self.tasks[task_id]['<STR_LIT:status>'] = States.failed<EOL>if self.monitoring:<EOL><INDENT>task_log_info = self._create_task_log_info(task_id, '<STR_LIT>')<EOL>self.monitoring.send(MessageType.TASK_INFO, task_log_info)<EOL><DEDENT>return<EOL><DEDENT>if self.tasks[task_id]['<STR_LIT>'] <= self._config.retries:<EOL><INDENT>self.tasks[task_id]['<STR_LIT:status>'] = States.pending<EOL>logger.debug(\"<STR_LIT>\".format(task_id))<EOL><DEDENT>else:<EOL><INDENT>logger.info(\"<STR_LIT>\".format(task_id,<EOL>self._config.retries))<EOL>self.tasks[task_id]['<STR_LIT:status>'] = States.failed<EOL>self.tasks_failed_count += <NUM_LIT:1><EOL>self.tasks[task_id]['<STR_LIT>'] = datetime.datetime.now()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.tasks[task_id]['<STR_LIT:status>'] = States.done<EOL>self.tasks_completed_count += <NUM_LIT:1><EOL>logger.info(\"<STR_LIT>\".format(task_id))<EOL>self.tasks[task_id]['<STR_LIT>'] = datetime.datetime.now()<EOL><DEDENT>if self.monitoring:<EOL><INDENT>task_log_info = self._create_task_log_info(task_id, '<STR_LIT>')<EOL>self.monitoring.send(MessageType.TASK_INFO, task_log_info)<EOL><DEDENT>if self.tasks[task_id]['<STR_LIT:status>'] == States.pending:<EOL><INDENT>self.launch_if_ready(task_id)<EOL><DEDENT>return<EOL>", "docstring": "This function is called only as a callback from an execution\n        attempt reaching a final state (either successfully or failing).\n\n        It will launch retries if necessary, and update the task\n        structure.\n\n        Args:\n             task_id (string) : Task id which is a uuid string\n             future (Future) : The future object corresponding to the task which\n             makes this callback\n\n        KWargs:\n             memo_cbk(Bool) : Indicates that the call is coming from a memo update,\n             that does not require additional memo updates.", "id": "f2769:c0:m4"}
{"signature": "def launch_if_ready(self, task_id):", "body": "if self._count_deps(self.tasks[task_id]['<STR_LIT>']) == <NUM_LIT:0>:<EOL><INDENT>new_args, kwargs, exceptions = self.sanitize_and_wrap(task_id,<EOL>self.tasks[task_id]['<STR_LIT:args>'],<EOL>self.tasks[task_id]['<STR_LIT>'])<EOL>self.tasks[task_id]['<STR_LIT:args>'] = new_args<EOL>self.tasks[task_id]['<STR_LIT>'] = kwargs<EOL>if not exceptions:<EOL><INDENT>exec_fu = None<EOL>with self.tasks[task_id]['<STR_LIT>']:<EOL><INDENT>if self.tasks[task_id]['<STR_LIT:status>'] == States.pending:<EOL><INDENT>exec_fu = self.launch_task(<EOL>task_id, self.tasks[task_id]['<STR_LIT>'], *new_args, **kwargs)<EOL><DEDENT><DEDENT>if exec_fu:<EOL><INDENT>try:<EOL><INDENT>exec_fu.add_done_callback(partial(self.handle_exec_update, task_id))<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(e))<EOL><DEDENT>self.tasks[task_id]['<STR_LIT>'] = exec_fu<EOL>try:<EOL><INDENT>self.tasks[task_id]['<STR_LIT>'].update_parent(exec_fu)<EOL>self.tasks[task_id]['<STR_LIT>'] = exec_fu<EOL><DEDENT>except AttributeError as e:<EOL><INDENT>logger.error(<EOL>\"<STR_LIT>\".format(task_id))<EOL>raise e<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>logger.info(<EOL>\"<STR_LIT>\".format(task_id))<EOL>self.tasks[task_id]['<STR_LIT:status>'] = States.dep_fail<EOL>if self.monitoring is not None:<EOL><INDENT>task_log_info = self._create_task_log_info(task_id, '<STR_LIT>')<EOL>self.monitoring.send(MessageType.TASK_INFO, task_log_info)<EOL><DEDENT>try:<EOL><INDENT>fu = Future()<EOL>fu.retries_left = <NUM_LIT:0><EOL>self.tasks[task_id]['<STR_LIT>'] = fu<EOL>self.tasks[task_id]['<STR_LIT>'].update_parent(fu)<EOL>fu.set_exception(DependencyError(exceptions,<EOL>task_id,<EOL>None))<EOL><DEDENT>except AttributeError as e:<EOL><INDENT>logger.error(<EOL>\"<STR_LIT>\".format(task_id))<EOL>raise e<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "launch_if_ready will launch the specified task, if it is ready\nto run (for example, without dependencies, and in pending state).\n\nThis should be called by any piece of the DataFlowKernel that\nthinks a task may have become ready to run.\n\nIt is not an error to call launch_if_ready on a task that is not\nready to run - launch_if_ready will not incorrectly launch that\ntask.\n\nlaunch_if_ready is thread safe, so may be called from any thread\nor callback.", "id": "f2769:c0:m6"}
{"signature": "def sanitize_and_wrap(self, task_id, args, kwargs):", "body": "dep_failures = []<EOL>new_args = []<EOL>for dep in args:<EOL><INDENT>if isinstance(dep, Future):<EOL><INDENT>try:<EOL><INDENT>new_args.extend([dep.result()])<EOL><DEDENT>except Exception as e:<EOL><INDENT>if self.tasks[dep.tid]['<STR_LIT:status>'] in FINAL_FAILURE_STATES:<EOL><INDENT>dep_failures.extend([e])<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>new_args.extend([dep])<EOL><DEDENT><DEDENT>for key in kwargs:<EOL><INDENT>dep = kwargs[key]<EOL>if isinstance(dep, Future):<EOL><INDENT>try:<EOL><INDENT>kwargs[key] = dep.result()<EOL><DEDENT>except Exception as e:<EOL><INDENT>if self.tasks[dep.tid]['<STR_LIT:status>'] in FINAL_FAILURE_STATES:<EOL><INDENT>dep_failures.extend([e])<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if '<STR_LIT>' in kwargs:<EOL><INDENT>new_inputs = []<EOL>for dep in kwargs['<STR_LIT>']:<EOL><INDENT>if isinstance(dep, Future):<EOL><INDENT>try:<EOL><INDENT>new_inputs.extend([dep.result()])<EOL><DEDENT>except Exception as e:<EOL><INDENT>if self.tasks[dep.tid]['<STR_LIT:status>'] in FINAL_FAILURE_STATES:<EOL><INDENT>dep_failures.extend([e])<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>new_inputs.extend([dep])<EOL><DEDENT><DEDENT>kwargs['<STR_LIT>'] = new_inputs<EOL><DEDENT>return new_args, kwargs, dep_failures<EOL>", "docstring": "This function should be called **ONLY** when all the futures we track have been resolved.\n\n        If the user hid futures a level below, we will not catch\n        it, and will (most likely) result in a type error.\n\n        Args:\n             task_id (uuid str) : Task id\n             func (Function) : App function\n             args (List) : Positional args to app function\n             kwargs (Dict) : Kwargs to app function\n\n        Return:\n             partial function evaluated with all dependencies in  args, kwargs and kwargs['inputs'] evaluated.", "id": "f2769:c0:m10"}
{"signature": "def _wake_up_timer(self, kill_event):", "body": "<EOL>while True:<EOL><INDENT>prev = self._wake_up_time<EOL>time_to_die = kill_event.wait(float(max(prev - time.time(), <NUM_LIT:0>)))<EOL>if time_to_die:<EOL><INDENT>return<EOL><DEDENT>if prev == self._wake_up_time:<EOL><INDENT>self.make_callback(kind='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Internal. This is the function that the thread will execute.\n        waits on an event so that the thread can make a quick exit when close() is called\n\n        Args:\n            - kill_event (threading.Event) : Event to wait on", "id": "f2770:c2:m1"}
{"signature": "def close(self):", "body": "self._kill_event.set()<EOL>self._thread.join()<EOL>", "docstring": "Merge the threads and terminate.", "id": "f2770:c2:m3"}
{"signature": "def make_callback(self, kind=None):", "body": "self._wake_up_time = time.time() + self.interval<EOL>self.callback(*self.cb_args)<EOL>", "docstring": "Makes the callback and resets the timer.", "id": "f2770:c2:m2"}
{"signature": "def notify(self, event_id):", "body": "self._event_buffer.extend([event_id])<EOL>self._event_count += <NUM_LIT:1><EOL>if self._event_count >= self.threshold:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self.make_callback(kind=\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Let the FlowControl system know that there is an event.", "id": "f2770:c1:m2"}
{"signature": "def close(self):", "body": "pass<EOL>", "docstring": "This close fn does nothing.", "id": "f2770:c0:m2"}
{"signature": "def __init__(self, dfk, ip='<STR_LIT>', port=<NUM_LIT>,<EOL>domain_name='<STR_LIT>'):", "body": "self.domain_name = domain_name<EOL>self.ip = ip<EOL>self.sock_timeout = <NUM_LIT:5><EOL>self.UDP_PORT = port<EOL>self.UDP_IP = None<EOL>self.procs = []<EOL>self.dfk = dfk<EOL>self.config = self.dfk.config<EOL>self.uuid = str(uuid.uuid4())<EOL>self.parsl_version = PARSL_VERSION<EOL>self.python_version = \"<STR_LIT>\".format(sys.version_info.major,<EOL>sys.version_info.minor,<EOL>sys.version_info.micro)<EOL>self.test_mode, self.tracking_enabled = self.check_tracking_enabled()<EOL>logger.debug(\"<STR_LIT>\".format(self.tracking_enabled))<EOL>logger.debug(\"<STR_LIT>\".format(self.test_mode))<EOL>self.initialized = False<EOL>", "docstring": "Initialize usage tracking unless the user has opted-out.\n\n        We will try to resolve the hostname specified in kwarg:domain_name\n        and if that fails attempt to use the kwarg:ip. Determining the\n        IP and sending message is threaded to avoid slowing down DFK\n        initialization.\n\n        Tracks usage stats by inspecting the internal state of the dfk.\n\n        Args:\n             - dfk (DFK object) : Data Flow Kernel object\n\n        KWargs:\n             - ip (string) : IP address\n             - port (int) : Port number, Default:50077\n             - domain_name (string) : Domain name, will override IP\n                  Default: tracking.parsl-project.org", "id": "f2771:c0:m0"}
{"signature": "def check_tracking_enabled(self):", "body": "track = True   <EOL>test = False  <EOL>testvar = str(os.environ.get(\"<STR_LIT>\", '<STR_LIT:None>')).lower()<EOL>if testvar == '<STR_LIT:true>':<EOL><INDENT>test = True<EOL><DEDENT>if not self.config.usage_tracking:<EOL><INDENT>track = False<EOL><DEDENT>envvar = str(os.environ.get(\"<STR_LIT>\", True)).lower()<EOL>if envvar == \"<STR_LIT:false>\":<EOL><INDENT>track = False<EOL><DEDENT>return test, track<EOL>", "docstring": "By default tracking is enabled.\n\n        If Test mode is set via env variable PARSL_TESTING, a test flag is set\n\n        Tracking is disabled if :\n            1. config[\"globals\"][\"usageTracking\"] is set to False (Bool)\n            2. Environment variable PARSL_TRACKING is set to false (case insensitive)", "id": "f2771:c0:m1"}
{"signature": "def async_process(fn):", "body": "def run(*args, **kwargs):<EOL><INDENT>proc = mp.Process(target=fn, args=args, kwargs=kwargs)<EOL>proc.start()<EOL>return proc<EOL><DEDENT>return run<EOL>", "docstring": "Decorator function to launch a function as a separate process", "id": "f2771:m0"}
{"signature": "def close(self):", "body": "for proc in self.procs:<EOL><INDENT>proc.terminate()<EOL><DEDENT>", "docstring": "We terminate (SIGTERM) the processes added to the self.procs list", "id": "f2771:c0:m7"}
{"signature": "def construct_end_message(self):", "body": "app_count = self.dfk.task_count<EOL>site_count = len([x for x in self.dfk.config.executors if x.managed])<EOL>app_fails = len([t for t in self.dfk.tasks if<EOL>self.dfk.tasks[t]['<STR_LIT:status>'] in FINAL_FAILURE_STATES])<EOL>message = {'<STR_LIT>': self.uuid,<EOL>'<STR_LIT:end>': time.time(),<EOL>'<STR_LIT>': app_count,<EOL>'<STR_LIT>': site_count,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': app_fails,<EOL>'<STR_LIT:test>': self.test_mode,<EOL>}<EOL>return json.dumps(message)<EOL>", "docstring": "Collect the final run information at the time of DFK cleanup.\n\n        Returns:\n             - Message dict dumped as json string, ready for UDP", "id": "f2771:c0:m3"}
{"signature": "def construct_start_message(self):", "body": "uname = getpass.getuser().encode('<STR_LIT>')<EOL>hashed_username = hashlib.sha256(uname).hexdigest()[<NUM_LIT:0>:<NUM_LIT:10>]<EOL>hname = socket.gethostname().encode('<STR_LIT>')<EOL>hashed_hostname = hashlib.sha256(hname).hexdigest()[<NUM_LIT:0>:<NUM_LIT:10>]<EOL>message = {'<STR_LIT>': self.uuid,<EOL>'<STR_LIT>': hashed_username,<EOL>'<STR_LIT>': hashed_hostname,<EOL>'<STR_LIT:test>': self.test_mode,<EOL>'<STR_LIT>': self.parsl_version,<EOL>'<STR_LIT>': self.python_version,<EOL>'<STR_LIT>': platform.system(),<EOL>'<STR_LIT>': platform.release(),<EOL>'<STR_LIT:start>': time.time()}<EOL>return json.dumps(message)<EOL>", "docstring": "Collect preliminary run info at the start of the DFK.\n\n        Returns :\n              - Message dict dumped as json string, ready for UDP", "id": "f2771:c0:m2"}
{"signature": "def make_rundir(path):", "body": "try:<EOL><INDENT>if not os.path.exists(path):<EOL><INDENT>os.makedirs(path)<EOL><DEDENT>prev_rundirs = glob(os.path.join(path, \"<STR_LIT>\"))<EOL>current_rundir = os.path.join(path, '<STR_LIT>')<EOL>if prev_rundirs:<EOL><INDENT>x = sorted([int(os.path.basename(x)) for x in prev_rundirs])[-<NUM_LIT:1>]<EOL>current_rundir = os.path.join(path, '<STR_LIT>'.format(x + <NUM_LIT:1>))<EOL><DEDENT>os.makedirs(current_rundir)<EOL>logger.debug(\"<STR_LIT>\".format(current_rundir))<EOL>return os.path.abspath(current_rundir)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>logger.error(\"<STR_LIT>\".format(e))<EOL>raise<EOL><DEDENT>", "docstring": "When a path has not been specified, make the run directory.\n\n    Creates a rundir with the following hierarchy:\n        ./runinfo <- Home of all run directories\n          |----000\n          |----001 <- Directories for each run\n          | ....\n          |----NNN\n\n    Kwargs:\n        - path (str): String path to a specific run dir\n               Default : None.", "id": "f2774:m0"}
{"signature": "def remote_side_bash_executor(func, *args, **kwargs):", "body": "import os<EOL>import time<EOL>import subprocess<EOL>import logging<EOL>import parsl.app.errors as pe<EOL>logging.basicConfig(filename='<STR_LIT>'.format(time.time()), level=logging.DEBUG)<EOL>func_name = func.__name__<EOL>partial_cmdline = None<EOL>try:<EOL><INDENT>partial_cmdline = func(*args, **kwargs)<EOL>executable = partial_cmdline.format(*args, **kwargs)<EOL><DEDENT>except AttributeError as e:<EOL><INDENT>if partial_cmdline is not None:<EOL><INDENT>raise pe.AppBadFormatting(\"<STR_LIT>\".format(func_name, e))<EOL><DEDENT>else:<EOL><INDENT>raise pe.BashAppNoReturn(\"<STR_LIT>\".format(func_name, e), None)<EOL><DEDENT><DEDENT>except IndexError as e:<EOL><INDENT>raise pe.AppBadFormatting(\"<STR_LIT>\".format(func_name, e))<EOL><DEDENT>except Exception as e:<EOL><INDENT>logging.error(\"<STR_LIT>\".format(func_name, e))<EOL>raise e<EOL><DEDENT>logging.debug(\"<STR_LIT>\", executable)<EOL>def open_std_fd(fdname):<EOL><INDENT>stdfspec = kwargs.get(fdname)  <EOL>if stdfspec is None:<EOL><INDENT>return None<EOL><DEDENT>elif isinstance(stdfspec, str):<EOL><INDENT>fname = stdfspec<EOL>mode = '<STR_LIT>'<EOL><DEDENT>elif isinstance(stdfspec, tuple):<EOL><INDENT>if len(stdfspec) != <NUM_LIT:2>:<EOL><INDENT>raise pe.BadStdStreamFile(\"<STR_LIT>\" % (fdname, len(stdfspec)), TypeError('<STR_LIT>'))<EOL><DEDENT>fname, mode = stdfspec<EOL><DEDENT>else:<EOL><INDENT>raise pe.BadStdStreamFile(\"<STR_LIT>\" % (fdname, str(type(stdfspec))), TypeError('<STR_LIT>'))<EOL><DEDENT>try:<EOL><INDENT>fd = open(fname, mode)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise pe.BadStdStreamFile(fname, e)<EOL><DEDENT>return fd<EOL><DEDENT>std_out = open_std_fd('<STR_LIT>')<EOL>std_err = open_std_fd('<STR_LIT>')<EOL>timeout = kwargs.get('<STR_LIT>')<EOL>returncode = None<EOL>try:<EOL><INDENT>proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='<STR_LIT>')<EOL>proc.wait(timeout=timeout)<EOL>returncode = proc.returncode<EOL><DEDENT>except subprocess.TimeoutExpired:<EOL><INDENT>raise pe.AppTimeout(\"<STR_LIT>\".format(func_name, timeout))<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise pe.AppException(\"<STR_LIT>\".format(func_name, proc.returncode), e)<EOL><DEDENT>if returncode != <NUM_LIT:0>:<EOL><INDENT>raise pe.AppFailure(\"<STR_LIT>\".format(func_name, proc.returncode), proc.returncode)<EOL><DEDENT>missing = []<EOL>for outputfile in kwargs.get('<STR_LIT>', []):<EOL><INDENT>fpath = outputfile<EOL>if type(outputfile) != str:<EOL><INDENT>fpath = outputfile.filepath<EOL><DEDENT>if not os.path.exists(fpath):<EOL><INDENT>missing.extend([outputfile])<EOL><DEDENT><DEDENT>if missing:<EOL><INDENT>raise pe.MissingOutputs(\"<STR_LIT>\".format(func_name), missing)<EOL><DEDENT>return returncode<EOL>", "docstring": "Execute the bash app type function and return the command line string.\n\n    This string is reformatted with the *args, and **kwargs\n    from call time.", "id": "f2777:m0"}
{"signature": "@property<EOL><INDENT>def tid(self):<DEDENT>", "body": "return self._tid<EOL>", "docstring": "Returns the task_id of the task that will resolve this DataFuture.", "id": "f2779:c0:m2"}
{"signature": "@property<EOL><INDENT>def filepath(self):<DEDENT>", "body": "return self.file_obj.filepath<EOL>", "docstring": "Filepath of the File object this datafuture represents.", "id": "f2779:c0:m3"}
{"signature": "@property<EOL><INDENT>def filename(self):<DEDENT>", "body": "return self.filepath<EOL>", "docstring": "Filepath of the File object this datafuture represents.", "id": "f2779:c0:m4"}
{"signature": "def __init__(self, func, data_flow_kernel=None, walltime=<NUM_LIT>, executors='<STR_LIT:all>', cache=False):", "body": "self.__name__ = func.__name__<EOL>self.func = func<EOL>self.data_flow_kernel = data_flow_kernel<EOL>self.status = '<STR_LIT>'<EOL>self.executors = executors<EOL>self.cache = cache<EOL>if not (isinstance(executors, list) or isinstance(executors, str)):<EOL><INDENT>logger.error(\"<STR_LIT>\".format(<EOL>func.__name__))<EOL><DEDENT>if cache is True:<EOL><INDENT>try:<EOL><INDENT>self.fn_source = getsource(func)<EOL><DEDENT>except OSError:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self.fn_source = func.__name__<EOL><DEDENT>self.func_hash = md5(self.fn_source.encode('<STR_LIT:utf-8>')).hexdigest()<EOL><DEDENT>else:<EOL><INDENT>self.func_hash = func.__name__<EOL><DEDENT>params = signature(func).parameters<EOL>self.kwargs = {}<EOL>if '<STR_LIT>' in params:<EOL><INDENT>self.kwargs['<STR_LIT>'] = params['<STR_LIT>'].default<EOL><DEDENT>if '<STR_LIT>' in params:<EOL><INDENT>self.kwargs['<STR_LIT>'] = params['<STR_LIT>'].default<EOL><DEDENT>self.outputs = params['<STR_LIT>'].default if '<STR_LIT>' in params else []<EOL>self.inputs = params['<STR_LIT>'].default if '<STR_LIT>' in params else []<EOL>", "docstring": "Construct the App object.\n\n        Args:\n             - func (function): Takes the function to be made into an App\n\n        Kwargs:\n             - data_flow_kernel (DataFlowKernel): The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for\n               managing this app. This can be omitted only\n               after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`.\n             - walltime (int) : Walltime in seconds for the app execution.\n             - executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.\n             - cache (Bool) : Enable caching of this app ?\n\n        Returns:\n             - App object.", "id": "f2780:c0:m0"}
{"signature": "def python_app(function=None, data_flow_kernel=None, walltime=<NUM_LIT>, cache=False, executors='<STR_LIT:all>'):", "body": "from parsl.app.python import PythonApp<EOL>def decorator(func):<EOL><INDENT>def wrapper(f):<EOL><INDENT>return PythonApp(f,<EOL>data_flow_kernel=data_flow_kernel,<EOL>walltime=walltime,<EOL>cache=cache,<EOL>executors=executors)<EOL><DEDENT>return wrapper(func)<EOL><DEDENT>if function is not None:<EOL><INDENT>return decorator(function)<EOL><DEDENT>return decorator<EOL>", "docstring": "Decorator function for making python apps.\n\n    Parameters\n    ----------\n    function : function\n        Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis,\n        for example, `@python_app` if using all defaults or `@python_app(walltime=120)`. If the\n        decorator is used alone, function will be the actual function being decorated, whereas if it\n        is called with arguments, function will be None. Default is None.\n    data_flow_kernel : DataFlowKernel\n        The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can\n        be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None.\n    walltime : int\n        Walltime for app in seconds. Default is 60.\n    executors : string or list\n        Labels of the executors that this app can execute over. Default is 'all'.\n    cache : bool\n        Enable caching of the app call. Default is False.", "id": "f2780:m1"}
{"signature": "@typeguard.typechecked<EOL>def set_file_logger(filename: str, name: str = '<STR_LIT>', level: int = logging.DEBUG, format_string: Optional[str] = None):", "body": "if format_string is None:<EOL><INDENT>format_string = \"<STR_LIT>\"<EOL><DEDENT>logger = logging.getLogger(name)<EOL>logger.setLevel(logging.DEBUG)<EOL>handler = logging.FileHandler(filename)<EOL>handler.setLevel(level)<EOL>formatter = logging.Formatter(format_string, datefmt='<STR_LIT>')<EOL>handler.setFormatter(formatter)<EOL>logger.addHandler(handler)<EOL>futures_logger = logging.getLogger(\"<STR_LIT>\")<EOL>futures_logger.addHandler(handler)<EOL>", "docstring": "Add a stream log handler.\n\n    Args:\n        - filename (string): Name of the file to write logs to\n        - name (string): Logger name\n        - level (logging.LEVEL): Set the logging level.\n        - format_string (string): Set the format string\n\n    Returns:\n       -  None", "id": "f2781:m1"}
{"signature": "def submit(self, command='<STR_LIT>', blocksize=<NUM_LIT:1>, tasks_per_node=<NUM_LIT:1>, job_name=\"<STR_LIT>\"):", "body": "job_name = \"<STR_LIT>\".format(time.time())<EOL>wrapped_cmd = self.launcher(command,<EOL>tasks_per_node,<EOL>self.nodes_per_block)<EOL>[instance, *rest] = self.spin_up_instance(command=wrapped_cmd, job_name=job_name)<EOL>if not instance:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(instance.instance_id))<EOL>state = translate_table.get(instance.state['<STR_LIT:Name>'], \"<STR_LIT>\")<EOL>self.resources[instance.instance_id] = {<EOL>\"<STR_LIT>\": instance.instance_id,<EOL>\"<STR_LIT>\": instance,<EOL>\"<STR_LIT:status>\": state<EOL>}<EOL>return instance.instance_id<EOL>", "docstring": "Submit the command onto a freshly instantiated AWS EC2 instance.\n\n        Submit returns an ID that corresponds to the task that was just submitted.\n\n        Parameters\n        ----------\n        command : str\n            Command to be invoked on the remote side.\n        blocksize : int\n            Number of blocks requested.\n        tasks_per_node : int (default=1)\n            Number of command invocations to be launched per node\n        job_name : str\n            Prefix for the job name.\n\n        Returns\n        -------\n        None or str\n            If at capacity, None will be returned. Otherwise, the job identifier will be returned.", "id": "f2782:c0:m13"}
{"signature": "def write_state_file(self):", "body": "fh = open('<STR_LIT>', '<STR_LIT:w>')<EOL>state = {}<EOL>state['<STR_LIT>'] = self.vpc_id<EOL>state['<STR_LIT>'] = self.sg_id<EOL>state['<STR_LIT>'] = self.sn_ids<EOL>state['<STR_LIT>'] = self.instances<EOL>state[\"<STR_LIT>\"] = self.instance_states<EOL>fh.write(json.dumps(state, indent=<NUM_LIT:4>))<EOL>", "docstring": "Save information that must persist to a file.\n\n        We do not want to create a new VPC and new identical security groups, so we save\n        information about them in a file between runs.", "id": "f2782:c0:m3"}
{"signature": "def get_instance_state(self, instances=None):", "body": "if instances:<EOL><INDENT>desc = self.client.describe_instances(InstanceIds=instances)<EOL><DEDENT>else:<EOL><INDENT>desc = self.client.describe_instances(InstanceIds=self.instances)<EOL><DEDENT>for i in range(len(desc['<STR_LIT>'])):<EOL><INDENT>instance = desc['<STR_LIT>'][i]['<STR_LIT>'][<NUM_LIT:0>]<EOL>self.instance_states[instance['<STR_LIT>']] = instance['<STR_LIT>']['<STR_LIT:Name>']<EOL><DEDENT>return self.instance_states<EOL>", "docstring": "Get states of all instances on EC2 which were started by this file.", "id": "f2782:c0:m11"}
{"signature": "def cancel(self, job_ids):", "body": "if self.linger is True:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return [False for x in job_ids]<EOL><DEDENT>try:<EOL><INDENT>self.client.terminate_instances(InstanceIds=list(job_ids))<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(job_ids))<EOL>raise e<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(job_ids))<EOL><DEDENT>for job_id in job_ids:<EOL><INDENT>self.resources[job_id][\"<STR_LIT:status>\"] = \"<STR_LIT>\"<EOL><DEDENT>for job_id in job_ids:<EOL><INDENT>self.instances.remove(job_id)<EOL><DEDENT>return [True for x in job_ids]<EOL>", "docstring": "Cancel the jobs specified by a list of job ids.\n\n        Parameters\n        ----------\n        job_ids : list of str\n            List of of job identifiers\n\n        Returns\n        -------\n        list of bool\n            Each entry in the list will contain False if the operation fails. Otherwise, the entry will be True.", "id": "f2782:c0:m14"}
{"signature": "def config_route_table(self, vpc, internet_gateway):", "body": "route_table = vpc.create_route_table()<EOL>route_table.create_route(<EOL>DestinationCidrBlock='<STR_LIT>', GatewayId=internet_gateway.internet_gateway_id<EOL>)<EOL>return route_table<EOL>", "docstring": "Configure route table for Virtual Private Cloud (VPC).\n\n        Parameters\n        ----------\n        vpc : dict\n            Representation of the VPC (created by create_vpc()).\n        internet_gateway : dict\n            Representation of the internet gateway (created by create_vpc()).", "id": "f2782:c0:m7"}
{"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"<STR_LIT>\"):", "body": "if self.provisioned_blocks >= self.max_blocks:<EOL><INDENT>logger.warn(\"<STR_LIT>\".format(self.label))<EOL>return None<EOL><DEDENT>job_name = \"<STR_LIT>\".format(job_name, time.time())<EOL>script_path = \"<STR_LIT>\".format(self.script_dir, job_name)<EOL>script_path = os.path.abspath(script_path)<EOL>logger.debug(\"<STR_LIT>\".format(self.nodes_per_block))<EOL>job_config = {}<EOL>job_config[\"<STR_LIT>\"] = self.channel.script_dir<EOL>job_config[\"<STR_LIT>\"] = self.nodes_per_block<EOL>job_config[\"<STR_LIT>\"] = tasks_per_node<EOL>job_config[\"<STR_LIT>\"] = wtime_to_minutes(self.walltime)<EOL>job_config[\"<STR_LIT>\"] = self.scheduler_options<EOL>job_config[\"<STR_LIT>\"] = self.worker_init<EOL>job_config[\"<STR_LIT>\"] = self.partition<EOL>job_config[\"<STR_LIT>\"] = command<EOL>job_config[\"<STR_LIT>\"] = self.launcher(command,<EOL>tasks_per_node,<EOL>self.nodes_per_block)<EOL>logger.debug(\"<STR_LIT>\")<EOL>self._write_submit_script(template_string, script_path, job_name, job_config)<EOL>if self.move_files:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>channel_script_path = script_path<EOL><DEDENT>retcode, stdout, stderr = super().execute_wait(\"<STR_LIT>\".format(channel_script_path))<EOL>job_id = None<EOL>if retcode == <NUM_LIT:0>:<EOL><INDENT>for line in stdout.split('<STR_LIT:\\n>'):<EOL><INDENT>if line.startswith(\"<STR_LIT>\"):<EOL><INDENT>job_id = line.split(\"<STR_LIT>\")[<NUM_LIT:1>].strip()<EOL>self.resources[job_id] = {'<STR_LIT>': job_id, '<STR_LIT:status>': '<STR_LIT>', '<STR_LIT>': blocksize}<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>logger.error(\"<STR_LIT>\", retcode, stdout.strip(), stderr.strip())<EOL><DEDENT>return job_id<EOL>", "docstring": "Submit the command as a slurm job of blocksize parallel elements.\n\n        Parameters\n        ----------\n        command : str\n            Command to be made on the remote side.\n        blocksize : int\n            Not implemented.\n        tasks_per_node : int\n            Command invocations to be launched per node\n        job_name : str\n            Name for the job (must be unique).\n        Returns\n        -------\n        None or str\n            If at capacity, returns None; otherwise, a string identifier for the job", "id": "f2785:c0:m2"}
{"signature": "def _status(self):", "body": "cmd = \"<STR_LIT>\"<EOL>retcode, stdout, stderr = super().execute_wait(cmd)<EOL>if retcode != <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>jobs_missing = list(self.resources.keys())<EOL>for line in stdout.split('<STR_LIT:\\n>'):<EOL><INDENT>parts = line.split()<EOL>if parts and parts[<NUM_LIT:0>].lower().lower() != '<STR_LIT>'and not parts[<NUM_LIT:0>].startswith('<STR_LIT>'):<EOL><INDENT>job_id = parts[<NUM_LIT:0>]<EOL>status = translate_table.get(parts[<NUM_LIT:4>].lower(), '<STR_LIT>')<EOL>if job_id in self.resources:<EOL><INDENT>self.resources[job_id]['<STR_LIT:status>'] = status<EOL>jobs_missing.remove(job_id)<EOL><DEDENT><DEDENT><DEDENT>for missing_job in jobs_missing:<EOL><INDENT>if self.resources[missing_job]['<STR_LIT:status>'] in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>self.resources[missing_job]['<STR_LIT:status>'] = '<STR_LIT>'<EOL><DEDENT><DEDENT>", "docstring": "Get the status of a list of jobs identified by the job identifiers\n        returned from the submit request.\n\n        Returns:\n             - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',\n               'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.\n\n        Raises:\n             - ExecutionProviderException or its subclasses", "id": "f2787:c0:m3"}
{"signature": "def cancel(self, job_ids):", "body": "job_id_list = '<STR_LIT:U+0020>'.join(job_ids)<EOL>cmd = \"<STR_LIT>\".format(job_id_list)<EOL>retcode, stdout, stderr = super().execute_wait(cmd, <NUM_LIT:3>)<EOL>rets = None<EOL>if retcode == <NUM_LIT:0>:<EOL><INDENT>for jid in job_ids:<EOL><INDENT>self.resources[jid]['<STR_LIT:status>'] = \"<STR_LIT>\"<EOL><DEDENT>rets = [True for i in job_ids]<EOL><DEDENT>else:<EOL><INDENT>rets = [False for i in job_ids]<EOL><DEDENT>return rets<EOL>", "docstring": "Cancels the resources identified by the job_ids provided by the user.\n\n        Args:\n             - job_ids (list): A list of job identifiers\n\n        Returns:\n             - A list of status from cancelling the job which can be True, False\n\n        Raises:\n             - ExecutionProviderException or its subclasses", "id": "f2787:c0:m4"}
{"signature": "def _create_deployment_object(self, job_name, job_image,<EOL>deployment_name, port=<NUM_LIT>,<EOL>replicas=<NUM_LIT:1>,<EOL>cmd_string=None,<EOL>engine_json_file='<STR_LIT>',<EOL>engine_dir='<STR_LIT:.>',<EOL>volumes=[]):", "body": "<EOL>security_context = None<EOL>if self.user_id and self.group_id:<EOL><INDENT>security_context = client.V1SecurityContext(run_as_group=self.group_id,<EOL>run_as_user=self.user_id,<EOL>run_as_non_root=self.run_as_non_root)<EOL><DEDENT>environment_vars = client.V1EnvVar(name=\"<STR_LIT>\", value=\"<STR_LIT>\")<EOL>launch_args = [\"<STR_LIT:-c>\", \"<STR_LIT>\".format(cmd_string)]<EOL>volume_mounts = []<EOL>for volume in volumes:<EOL><INDENT>volume_mounts.append(client.V1VolumeMount(mount_path=volume[<NUM_LIT:1>],<EOL>name=volume[<NUM_LIT:0>]))<EOL><DEDENT>container = None<EOL>if security_context:<EOL><INDENT>container = client.V1Container(<EOL>name=job_name,<EOL>image=job_image,<EOL>ports=[client.V1ContainerPort(container_port=port)],<EOL>volume_mounts=volume_mounts,<EOL>command=['<STR_LIT>'],<EOL>args=launch_args,<EOL>env=[environment_vars],<EOL>security_context=security_context)<EOL><DEDENT>else:<EOL><INDENT>container = client.V1Container(<EOL>name=job_name,<EOL>image=job_image,<EOL>ports=[client.V1ContainerPort(container_port=port)],<EOL>volume_mounts=volume_mounts,<EOL>command=['<STR_LIT>'],<EOL>args=launch_args,<EOL>env=[environment_vars])<EOL><DEDENT>secret = None<EOL>if self.secret:<EOL><INDENT>secret = client.V1LocalObjectReference(name=self.secret)<EOL><DEDENT>volume_defs = []<EOL>for volume in volumes:<EOL><INDENT>volume_defs.append(client.V1Volume(name=volume[<NUM_LIT:0>],<EOL>persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(<EOL>claim_name=volume[<NUM_LIT:0>])))<EOL><DEDENT>template = client.V1PodTemplateSpec(<EOL>metadata=client.V1ObjectMeta(labels={\"<STR_LIT>\": job_name}),<EOL>spec=client.V1PodSpec(containers=[container],<EOL>image_pull_secrets=[secret],<EOL>volumes=volume_defs<EOL>))<EOL>spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,<EOL>template=template)<EOL>deployment = client.ExtensionsV1beta1Deployment(<EOL>api_version=\"<STR_LIT>\",<EOL>kind=\"<STR_LIT>\",<EOL>metadata=client.V1ObjectMeta(name=deployment_name),<EOL>spec=spec)<EOL>return deployment<EOL>", "docstring": "Create a kubernetes deployment for the job.\n        Args:\n              - job_name (string) : Name of the job and deployment\n              - job_image (string) : Docker image to launch\n        KWargs:\n             - port (integer) : Container port\n             - replicas : Number of replica containers to maintain\n        Returns:\n              - True: The deployment object to launch", "id": "f2788:c0:m5"}
{"signature": "def cancel(self, job_ids):", "body": "for job in job_ids:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(job))<EOL>self._delete_deployment(job)<EOL>self.resources[job]['<STR_LIT:status>'] = '<STR_LIT>'<EOL><DEDENT>rets = [True for i in job_ids]<EOL>return rets<EOL>", "docstring": "Cancels the jobs specified by a list of job ids\n        Args:\n        job_ids : [<job_id> ...]\n        Returns :\n        [True/False...] : If the cancel operation fails the entire list will be False.", "id": "f2788:c0:m3"}
{"signature": "def _create_deployment(self, deployment):", "body": "api_response = self.kube_client.create_namespaced_deployment(<EOL>body=deployment,<EOL>namespace=self.namespace)<EOL>logger.debug(\"<STR_LIT>\".format(str(api_response.status)))<EOL>", "docstring": "Create the kubernetes deployment", "id": "f2788:c0:m6"}
{"signature": "def _status(self):", "body": "jobs_ids = list(self.resources.keys())<EOL>return jobs_ids<EOL>", "docstring": "Internal: Do not call. Returns the status list for a list of job_ids\n        Args:\n              self\n        Returns:\n              [status...] : Status list of all jobs", "id": "f2788:c0:m4"}
{"signature": "def _delete_deployment(self, deployment_name):", "body": "api_response = self.kube_client.delete_namespaced_deployment(<EOL>name=deployment_name,<EOL>namespace=self.namespace,<EOL>body=client.V1DeleteOptions(<EOL>propagation_policy='<STR_LIT>',<EOL>grace_period_seconds=<NUM_LIT:5>))<EOL>logger.debug(\"<STR_LIT>\".format(<EOL>str(api_response.status)))<EOL>", "docstring": "Delete deployment", "id": "f2788:c0:m7"}
{"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"<STR_LIT>\"):", "body": "job_name = \"<STR_LIT>\".format(job_name, time.time())<EOL>script_path = \"<STR_LIT>\".format(self.script_dir, job_name)<EOL>script_path = os.path.abspath(script_path)<EOL>wrap_command = self.worker_init + '<STR_LIT:\\n>' + self.launcher(command, tasks_per_node, self.nodes_per_block)<EOL>self._write_submit_script(wrap_command, script_path)<EOL>job_id = None<EOL>proc = None<EOL>remote_pid = None<EOL>if (self.move_files is None and not isinstance(self.channel, LocalChannel)) or (self.move_files):<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>script_path = self.channel.push_file(script_path, self.channel.script_dir)<EOL><DEDENT>if not isinstance(self.channel, LocalChannel):<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>cmd = '<STR_LIT>'.format(script_path)<EOL>retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)<EOL>for line in stdout.split('<STR_LIT:\\n>'):<EOL><INDENT>if line.startswith(\"<STR_LIT>\"):<EOL><INDENT>remote_pid = line.split(\"<STR_LIT>\")[<NUM_LIT:1>].strip()<EOL>job_id = remote_pid<EOL><DEDENT><DEDENT>if job_id is None:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>job_id, proc = self.channel.execute_no_wait('<STR_LIT>'.format(script_path), self.cmd_timeout)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(self.channel, e))<EOL>raise<EOL><DEDENT><DEDENT>self.resources[job_id] = {'<STR_LIT>': job_id, '<STR_LIT:status>': '<STR_LIT>',<EOL>'<STR_LIT>': blocksize,<EOL>'<STR_LIT>': remote_pid,<EOL>'<STR_LIT>': proc}<EOL>return job_id<EOL>", "docstring": "Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n        Submit returns an ID that corresponds to the task that was just submitted.\n\n        If tasks_per_node <  1:\n             1/tasks_per_node is provisioned\n\n        If tasks_per_node == 1:\n             A single node is provisioned\n\n        If tasks_per_node >  1 :\n             tasks_per_node * blocksize number of nodes are provisioned.\n\n        Args:\n             - command  :(String) Commandline invocation to be made on the remote side.\n             - blocksize   :(float) - Not really used for local\n             - tasks_per_node (int) : command invocations to be launched per node\n\n        Kwargs:\n             - job_name (String): Name for job, must be unique\n\n        Returns:\n             - None: At capacity, cannot provision more\n             - job_id: (string) Identifier for the job", "id": "f2792:c0:m3"}
{"signature": "def _write_submit_script(self, script_string, script_filename):", "body": "try:<EOL><INDENT>with open(script_filename, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(script_string)<EOL><DEDENT><DEDENT>except KeyError as e:<EOL><INDENT>logger.error(\"<STR_LIT>\", e)<EOL>raise (SchedulerMissingArgs(e.args, self.label))<EOL><DEDENT>except IOError as e:<EOL><INDENT>logger.error(\"<STR_LIT>\", script_filename)<EOL>raise (ScriptPathError(script_filename, e))<EOL><DEDENT>return True<EOL>", "docstring": "Load the template string with config values and write the generated submit script to\na submit script file.\n\nArgs:\n      - template_string (string) : The template string to be used for the writing submit script\n      - script_filename (string) : Name of the submit script\n\nReturns:\n      - True: on success\n\nRaises:\n      SchedulerMissingArgs : If template is missing args\n      ScriptPathError : Unable to write submit script out", "id": "f2792:c0:m2"}
{"signature": "def cancel(self, job_ids):", "body": "for job in job_ids:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(job))<EOL>if self.resources[job]['<STR_LIT>']:<EOL><INDENT>proc = self.resources[job]['<STR_LIT>']<EOL>os.killpg(os.getpgid(proc.pid), signal.SIGTERM)<EOL>self.resources[job]['<STR_LIT:status>'] = '<STR_LIT>'<EOL><DEDENT>elif self.resources[job]['<STR_LIT>']:<EOL><INDENT>cmd = \"<STR_LIT>\".format(self.resources[job]['<STR_LIT>'])<EOL>retcode, stdout, stderr = self.channel.execute_wait(cmd, self.cmd_timeout)<EOL>if retcode != <NUM_LIT:0>:<EOL><INDENT>logger.warning(\"<STR_LIT>\".format(self.resources[job]['<STR_LIT>'],<EOL>self.label))<EOL><DEDENT><DEDENT><DEDENT>rets = [True for i in job_ids]<EOL>return rets<EOL>", "docstring": "Cancels the jobs specified by a list of job ids\n\n        Args:\n        job_ids : [<job_id> ...]\n\n        Returns :\n        [True/False...] : If the cancel operation fails the entire list will be False.", "id": "f2792:c0:m4"}
{"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"<STR_LIT>\"):", "body": "wrapped_cmd = self.launcher(command,<EOL>tasks_per_node,<EOL><NUM_LIT:1>)<EOL>instance, name = self.create_instance(command=wrapped_cmd)<EOL>self.provisioned_blocks += <NUM_LIT:1><EOL>self.resources[name] = {\"<STR_LIT>\": name, \"<STR_LIT:status>\": translate_table[instance['<STR_LIT:status>']]}<EOL>return name<EOL>", "docstring": "The submit method takes the command string to be executed upon\n        instantiation of a resource most often to start a pilot.\n\n        Args :\n             - command (str) : The bash command string to be executed.\n             - blocksize (int) : Blocksize to be requested\n             - tasks_per_node (int) : command invocations to be launched per node\n\n        KWargs:\n             - job_name (str) : Human friendly name to be assigned to the job request\n\n        Returns:\n             - A job identifier, this could be an integer, string etc\n\n        Raises:\n             - ExecutionProviderException or its subclasses", "id": "f2793:c0:m1"}
{"signature": "@property<EOL><INDENT>def current_capacity(self):<DEDENT>", "body": "return self.provisioned_blocks<EOL>", "docstring": "Returns the number of currently provisioned blocks.", "id": "f2793:c0:m5"}
{"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"<STR_LIT>\"):", "body": "if self.provisioned_blocks >= self.max_blocks:<EOL><INDENT>logger.warn(\"<STR_LIT>\", self.label)<EOL>return None<EOL><DEDENT>if blocksize < self.nodes_per_block:<EOL><INDENT>blocksize = self.nodes_per_block<EOL><DEDENT>job_name = \"<STR_LIT>\".format(job_name, time.time())<EOL>script_path = \"<STR_LIT>\".format(self.script_dir, job_name)<EOL>script_path = os.path.abspath(script_path)<EOL>logger.debug(\"<STR_LIT>\", blocksize, self.nodes_per_block,<EOL>tasks_per_node)<EOL>job_config = {}<EOL>job_config[\"<STR_LIT>\"] = self.channel.script_dir<EOL>job_config[\"<STR_LIT>\"] = self.nodes_per_block<EOL>job_config[\"<STR_LIT>\"] = self.nodes_per_block * tasks_per_node<EOL>job_config[\"<STR_LIT>\"] = self.nodes_per_block<EOL>job_config[\"<STR_LIT>\"] = tasks_per_node<EOL>job_config[\"<STR_LIT>\"] = self.walltime<EOL>job_config[\"<STR_LIT>\"] = self.scheduler_options<EOL>job_config[\"<STR_LIT>\"] = self.worker_init<EOL>job_config[\"<STR_LIT>\"] = command<EOL>job_config[\"<STR_LIT>\"] = self.launcher(command,<EOL>tasks_per_node,<EOL>self.nodes_per_block)<EOL>logger.debug(\"<STR_LIT>\")<EOL>self._write_submit_script(template_string, script_path, job_name, job_config)<EOL>channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)<EOL>submit_options = '<STR_LIT>'<EOL>if self.queue is not None:<EOL><INDENT>submit_options = '<STR_LIT>'.format(submit_options, self.queue)<EOL><DEDENT>if self.account is not None:<EOL><INDENT>submit_options = '<STR_LIT>'.format(submit_options, self.account)<EOL><DEDENT>launch_cmd = \"<STR_LIT>\".format(submit_options, channel_script_path)<EOL>retcode, stdout, stderr = super().execute_wait(launch_cmd)<EOL>job_id = None<EOL>if retcode == <NUM_LIT:0>:<EOL><INDENT>for line in stdout.split('<STR_LIT:\\n>'):<EOL><INDENT>if line.strip():<EOL><INDENT>job_id = line.strip()<EOL>self.resources[job_id] = {'<STR_LIT>': job_id, '<STR_LIT:status>': '<STR_LIT>', '<STR_LIT>': blocksize}<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>message = \"<STR_LIT>\".format(launch_cmd, retcode)<EOL>if (stdout is not None) and (stderr is not None):<EOL><INDENT>message += \"<STR_LIT>\".format(stderr.strip(), stdout.strip())<EOL><DEDENT>logger.error(message)<EOL><DEDENT>return job_id<EOL>", "docstring": "Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n        Submit returns an ID that corresponds to the task that was just submitted.\n\n        If tasks_per_node <  1 : ! This is illegal. tasks_per_node should be integer\n\n        If tasks_per_node == 1:\n             A single node is provisioned\n\n        If tasks_per_node >  1 :\n             tasks_per_node * blocksize number of nodes are provisioned.\n\n        Args:\n             - command  :(String) Commandline invocation to be made on the remote side.\n             - blocksize   :(float)\n             - tasks_per_node (int) : command invocations to be launched per node\n\n        Kwargs:\n             - job_name (String): Name for job, must be unique\n\n        Returns:\n             - None: At capacity, cannot provision more\n             - job_id: (string) Identifier for the job", "id": "f2795:c0:m2"}
{"signature": "def _status(self):", "body": "job_id_list = '<STR_LIT:U+0020>'.join(self.resources.keys())<EOL>jobs_missing = list(self.resources.keys())<EOL>retcode, stdout, stderr = super().execute_wait(\"<STR_LIT>\".format(job_id_list))<EOL>for line in stdout.split('<STR_LIT:\\n>'):<EOL><INDENT>parts = line.split()<EOL>if not parts or parts[<NUM_LIT:0>].upper().startswith('<STR_LIT>') or parts[<NUM_LIT:0>].startswith('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>job_id = parts[<NUM_LIT:0>]<EOL>status = translate_table.get(parts[<NUM_LIT:4>], '<STR_LIT>')<EOL>self.resources[job_id]['<STR_LIT:status>'] = status<EOL>jobs_missing.remove(job_id)<EOL><DEDENT>for missing_job in jobs_missing:<EOL><INDENT>if self.resources[missing_job]['<STR_LIT:status>'] in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>self.resources[missing_job]['<STR_LIT:status>'] = translate_table['<STR_LIT:E>']<EOL><DEDENT><DEDENT>", "docstring": "Internal: Do not call. Returns the status list for a list of job_ids\n\n        Args:\n              self\n\n        Returns:\n              [status...] : Status list of all jobs", "id": "f2795:c0:m1"}
{"signature": "def scale_in(self, blocks=<NUM_LIT:0>, machines=<NUM_LIT:0>, strategy=None):", "body": "count = <NUM_LIT:0><EOL>instances = self.client.servers.list()<EOL>for instance in instances[<NUM_LIT:0>:machines]:<EOL><INDENT>print(\"<STR_LIT>\", instance)<EOL>instance.delete()<EOL>count += <NUM_LIT:1><EOL><DEDENT>return count<EOL>", "docstring": "Scale in resources", "id": "f2798:c0:m2"}
{"signature": "def scale_out(self, blocks=<NUM_LIT:1>, block_size=<NUM_LIT:1>):", "body": "self.config['<STR_LIT>'.format(self.pool)]['<STR_LIT>']<EOL>count = <NUM_LIT:0><EOL>if blocks == <NUM_LIT:1>:<EOL><INDENT>block_id = len(self.blocks)<EOL>self.blocks[block_id] = []<EOL>for instance_id in range(<NUM_LIT:0>, block_size):<EOL><INDENT>instances = self.server_manager.create(<EOL>'<STR_LIT>'.format(block_id, instance_id),  <EOL>self.client.images.get('<STR_LIT>'),  <EOL>self.client.flavors.list()[<NUM_LIT:0>],<EOL>min_count=<NUM_LIT:1>,<EOL>max_count=<NUM_LIT:1>,<EOL>userdata=setup_script.format(engine_config=self.engine_config),<EOL>key_name='<STR_LIT>',<EOL>security_groups=['<STR_LIT>'],<EOL>nics=[{<EOL>\"<STR_LIT>\": '<STR_LIT>',<EOL>\"<STR_LIT>\": '<STR_LIT>',<EOL>\"<STR_LIT>\": '<STR_LIT>'<EOL>}])<EOL>self.blocks[block_id].extend([instances])<EOL>count += <NUM_LIT:1><EOL><DEDENT><DEDENT>return count<EOL>", "docstring": "Scale out the existing resources.", "id": "f2798:c0:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def cancel(self, job_ids):<DEDENT>", "body": "pass<EOL>", "docstring": "Cancels the resources identified by the job_ids provided by the user.\n\n        Args:\n             - job_ids (list): A list of job identifiers\n\n        Returns:\n             - A list of status from cancelling the job which can be True, False\n\n        Raises:\n             - ExecutionProviderException or its subclasses", "id": "f2799:c0:m2"}
{"signature": "def _write_submit_script(self, template, script_filename, job_name, configs):", "body": "try:<EOL><INDENT>submit_script = Template(template).substitute(jobname=job_name, **configs)<EOL>with open(script_filename, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(submit_script)<EOL><DEDENT><DEDENT>except KeyError as e:<EOL><INDENT>logger.error(\"<STR_LIT>\", e)<EOL>raise (SchedulerMissingArgs(e.args, self.sitename))<EOL><DEDENT>except IOError as e:<EOL><INDENT>logger.error(\"<STR_LIT>\", script_filename)<EOL>raise (ScriptPathError(script_filename, e))<EOL><DEDENT>except Exception as e:<EOL><INDENT>print(\"<STR_LIT>\", template)<EOL>print(\"<STR_LIT>\", job_name)<EOL>print(\"<STR_LIT>\", configs)<EOL>logger.error(\"<STR_LIT>\", e)<EOL>raise (e)<EOL><DEDENT>return True<EOL>", "docstring": "Generate submit script and write it to a file.\n\n        Args:\n              - template (string) : The template string to be used for the writing submit script\n              - script_filename (string) : Name of the submit script\n              - job_name (string) : job name\n              - configs (dict) : configs that get pushed into the template\n\n        Returns:\n              - True: on success\n\n        Raises:\n              SchedulerMissingArgs : If template is missing args\n              ScriptPathError : Unable to write submit script out", "id": "f2800:c0:m2"}
{"signature": "@property<EOL><INDENT>def scaling_enabled(self):<DEDENT>", "body": "return self._scaling_enabled<EOL>", "docstring": "The callers of ParslExecutors need to differentiate between Executors\n        and Executors wrapped in a resource provider\n\n        Returns:\n              - Status (Bool)", "id": "f2800:c0:m7"}
{"signature": "def cancel(self, job_ids):", "body": "job_id_list = '<STR_LIT:U+0020>'.join(job_ids)<EOL>retcode, stdout, stderr = super().execute_wait(\"<STR_LIT>\".format(job_id_list))<EOL>rets = None<EOL>if retcode == <NUM_LIT:0>:<EOL><INDENT>for jid in job_ids:<EOL><INDENT>self.resources[jid]['<STR_LIT:status>'] = translate_table['<STR_LIT>']  <EOL><DEDENT>rets = [True for i in job_ids]<EOL><DEDENT>else:<EOL><INDENT>rets = [False for i in job_ids]<EOL><DEDENT>return rets<EOL>", "docstring": "Cancels the jobs specified by a list of job ids\n\n        Args:\n        job_ids : [<job_id> ...]\n\n        Returns :\n        [True/False...] : If the cancel operation fails the entire list will be False.", "id": "f2802:c0:m3"}
{"signature": "def submit(self, command, blocksize, tasks_per_node, job_name=\"<STR_LIT>\"):", "body": "if self.provisioned_blocks >= self.max_blocks:<EOL><INDENT>logger.warn(\"<STR_LIT>\", self.label)<EOL>return None<EOL><DEDENT>if blocksize < self.nodes_per_block:<EOL><INDENT>blocksize = self.nodes_per_block<EOL><DEDENT>account_opt = '<STR_LIT>'.format(self.account) if self.account is not None else '<STR_LIT>'<EOL>job_name = \"<STR_LIT>\".format(job_name, time.time())<EOL>script_path = \"<STR_LIT>\".format(self.script_dir, job_name)<EOL>script_path = os.path.abspath(script_path)<EOL>job_config = {}<EOL>job_config[\"<STR_LIT>\"] = self.scheduler_options<EOL>job_config[\"<STR_LIT>\"] = self.worker_init<EOL>logger.debug(\"<STR_LIT>\",<EOL>blocksize, self.nodes_per_block, tasks_per_node)<EOL>job_config[\"<STR_LIT>\"] = self.launcher(command, tasks_per_node, self.nodes_per_block)<EOL>queue_opt = '<STR_LIT>'.format(self.queue) if self.queue is not None else '<STR_LIT>'<EOL>logger.debug(\"<STR_LIT>\")<EOL>self._write_submit_script(template_string, script_path, job_name, job_config)<EOL>channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)<EOL>command = '<STR_LIT>'.format(<EOL>self.nodes_per_block, queue_opt, wtime_to_minutes(self.walltime), account_opt, channel_script_path)<EOL>logger.debug(\"<STR_LIT>\".format(command))<EOL>retcode, stdout, stderr = super().execute_wait(command)<EOL>if retcode != <NUM_LIT:0>:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(command))<EOL>logger.error(\"<STR_LIT>\".format(stdout, stderr))<EOL><DEDENT>logger.debug(\"<STR_LIT>\", retcode, stdout.strip(), stderr.strip())<EOL>job_id = None<EOL>if retcode == <NUM_LIT:0>:<EOL><INDENT>job_id = stdout.strip()<EOL>self.resources[job_id] = {'<STR_LIT>': job_id, '<STR_LIT:status>': '<STR_LIT>', '<STR_LIT>': blocksize}<EOL><DEDENT>else:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(stderr))<EOL>raise (ScaleOutFailed(self.__class__, \"<STR_LIT>\"))<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(job_id))<EOL>return job_id<EOL>", "docstring": "Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n        Submit returns an ID that corresponds to the task that was just submitted.\n\n        If tasks_per_node <  1 : ! This is illegal. tasks_per_node should be integer\n\n        If tasks_per_node == 1:\n             A single node is provisioned\n\n        If tasks_per_node >  1 :\n             tasks_per_node * blocksize number of nodes are provisioned.\n\n        Args:\n             - command  :(String) Commandline invocation to be made on the remote side.\n             - blocksize   :(float)\n             - tasks_per_node (int) : command invocations to be launched per node\n\n        Kwargs:\n             - job_name (String): Name for job, must be unique\n\n        Returns:\n             - None: At capacity, cannot provision more\n             - job_id: (string) Identifier for the job", "id": "f2802:c0:m2"}
{"signature": "def scale_out(self, workers=<NUM_LIT:1>):", "body": "raise NotImplementedError<EOL>", "docstring": "Scales out the number of active workers by 1.\n\n        This method is notImplemented for threads and will raise the error if called.\n\n        Raises:\n             NotImplemented exception", "id": "f2803:c0:m4"}
{"signature": "def submit(self, *args, **kwargs):", "body": "return self.executor.submit(*args, **kwargs)<EOL>", "docstring": "Submits work to the thread pool.\n\n        This method is simply pass through and behaves like a submit call as described\n        here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_", "id": "f2803:c0:m3"}
{"signature": "def shutdown(self, block=False):", "body": "x = self.executor.shutdown(wait=block)<EOL>logger.debug(\"<STR_LIT>\")<EOL>return x<EOL>", "docstring": "Shutdown the ThreadPool.\n\n        Kwargs:\n            - block (Bool): To block for confirmations or not", "id": "f2803:c0:m6"}
{"signature": "def close(self):", "body": "if self.reuse:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if self.mode == \"<STR_LIT>\":<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>try:<EOL><INDENT>pgid = os.getpgid(self.proc.pid)<EOL>os.killpg(pgid, signal.SIGTERM)<EOL>time.sleep(<NUM_LIT>)<EOL>os.killpg(pgid, signal.SIGKILL)<EOL>try:<EOL><INDENT>self.proc.wait(timeout=<NUM_LIT:1>)<EOL>x = self.proc.returncode<EOL>if x == <NUM_LIT:0>:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(x))<EOL><DEDENT>else:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(x))<EOL><DEDENT><DEDENT>except subprocess.TimeoutExpired:<EOL><INDENT>logger.warn(\"<STR_LIT>\".format(self.proc.pid))<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>logger.warn(\"<STR_LIT>\".format(self.proc.pid, e))<EOL><DEDENT>", "docstring": "Terminate the controller process and its child processes.\n\n        Args:\n              - None", "id": "f2805:c0:m4"}
{"signature": "@property<EOL><INDENT>def engine_file(self):<DEDENT>", "body": "return os.path.join(self.ipython_dir,<EOL>'<STR_LIT>'.format(self.profile),<EOL>'<STR_LIT>')<EOL>", "docstring": "Specify path to the ipcontroller-engine.json file.\n\n        This file is stored in in the ipython_dir/profile folders.\n\n        Returns :\n              - str, File path to engine file", "id": "f2805:c0:m2"}
{"signature": "@property<EOL><INDENT>def client_file(self):<DEDENT>", "body": "return os.path.join(self.ipython_dir,<EOL>'<STR_LIT>'.format(self.profile),<EOL>'<STR_LIT>')<EOL>", "docstring": "Specify path to the ipcontroller-client.json file.\n\n        This file is stored in in the ipython_dir/profile folders.\n\n        Returns :\n              - str, File path to client file", "id": "f2805:c0:m3"}
{"signature": "def start(self):", "body": "if self.mode == \"<STR_LIT>\":<EOL><INDENT>return<EOL><DEDENT>if self.ipython_dir != '<STR_LIT>':<EOL><INDENT>self.ipython_dir = os.path.abspath(os.path.expanduser(self.ipython_dir))<EOL><DEDENT>if self.log:<EOL><INDENT>stdout = open(os.path.join(self.ipython_dir, \"<STR_LIT>\".format(self.profile)), '<STR_LIT:w>')<EOL>stderr = open(os.path.join(self.ipython_dir, \"<STR_LIT>\".format(self.profile)), '<STR_LIT:w>')<EOL><DEDENT>else:<EOL><INDENT>stdout = open(os.devnull, '<STR_LIT:w>')<EOL>stderr = open(os.devnull, '<STR_LIT:w>')<EOL><DEDENT>try:<EOL><INDENT>opts = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' if self.ipython_dir == '<STR_LIT>' else '<STR_LIT>'.format(self.ipython_dir),<EOL>self.interfaces if self.interfaces is not None else '<STR_LIT>',<EOL>'<STR_LIT>' if self.profile == '<STR_LIT:default>' else '<STR_LIT>'.format(self.profile),<EOL>'<STR_LIT>' if self.reuse else '<STR_LIT>',<EOL>'<STR_LIT>'.format(self.public_ip) if self.public_ip else '<STR_LIT>',<EOL>'<STR_LIT>'.format(self.port) if self.port is not None else '<STR_LIT>'<EOL>]<EOL>if self.port_range is not None:<EOL><INDENT>opts += [<EOL>'<STR_LIT>'.format(self.hb_ping, self.hb_pong),<EOL>'<STR_LIT>'.format(self.control_client, self.control_engine),<EOL>'<STR_LIT>'.format(self.mux_client, self.mux_engine),<EOL>'<STR_LIT>'.format(self.task_client, self.task_engine)<EOL>]<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format('<STR_LIT:U+0020>'.join([str(x) for x in opts])))<EOL>self.proc = subprocess.Popen(opts, stdout=stdout, stderr=stderr, preexec_fn=os.setsid)<EOL><DEDENT>except FileNotFoundError:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>logger.error(msg)<EOL>raise ControllerError(msg)<EOL><DEDENT>except Exception as e:<EOL><INDENT>msg = \"<STR_LIT>\".format(e)<EOL>logger.error(msg)<EOL>raise ControllerError(msg)<EOL><DEDENT>", "docstring": "Start the controller.", "id": "f2805:c0:m1"}
{"signature": "def __init__(self, ip_address, port_range):", "body": "self.context = zmq.Context()<EOL>self.zmq_socket = self.context.socket(zmq.DEALER)<EOL>self.zmq_socket.set_hwm(<NUM_LIT:0>)<EOL>self.port = self.zmq_socket.bind_to_random_port(<EOL>\"<STR_LIT>\".format(ip_address),<EOL>min_port=port_range[<NUM_LIT:0>],<EOL>max_port=port_range[<NUM_LIT:1>])<EOL>", "docstring": "TODO: docstring", "id": "f2806:c1:m0"}
{"signature": "def execute_task(f, args, kwargs, user_ns):", "body": "fname = getattr(f, '<STR_LIT>', '<STR_LIT:f>')<EOL>prefix = \"<STR_LIT>\"<EOL>fname = prefix + \"<STR_LIT:f>\"<EOL>argname = prefix + \"<STR_LIT:args>\"<EOL>kwargname = prefix + \"<STR_LIT>\"<EOL>resultname = prefix + \"<STR_LIT:result>\"<EOL>user_ns.update({fname: f,<EOL>argname: args,<EOL>kwargname: kwargs,<EOL>resultname: resultname})<EOL>code = \"<STR_LIT>\".format(resultname, fname,<EOL>argname, kwargname)<EOL>try:<EOL><INDENT>exec(code, user_ns, user_ns)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.warning(\"<STR_LIT>\".format(e))<EOL>raise e<EOL><DEDENT>else:<EOL><INDENT>return user_ns.get(resultname)<EOL><DEDENT>", "docstring": "Deserialize the buffer and execute the task.\n\n# Returns the result or exception.", "id": "f2807:m0"}
{"signature": "def start(self):", "body": "logger.info(\"<STR_LIT>\")<EOL>while True:<EOL><INDENT>socks = dict(self.poller.poll(<NUM_LIT:1>))<EOL>if socks.get(self.task_incoming) == zmq.POLLIN:<EOL><INDENT>message = self.task_incoming.recv_multipart()<EOL>logger.debug(\"<STR_LIT>\")<EOL>self.worker_messages.send_multipart(message)<EOL>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>if socks.get(self.worker_messages) == zmq.POLLIN:<EOL><INDENT>message = self.worker_messages.recv_multipart()<EOL>logger.debug(\"<STR_LIT>\")<EOL>self.result_outgoing.send_multipart(message[<NUM_LIT:1>:])<EOL>logger.debug(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "TODO: docstring", "id": "f2808:c0:m1"}
{"signature": "def starter(comm_q, *args, **kwargs):", "body": "<EOL>ic = Interchange(*args, **kwargs)<EOL>comm_q.put(ic.worker_port)<EOL>ic.start()<EOL>logger.debug(\"<STR_LIT>\")<EOL>", "docstring": "Start the interchange process\n\n    The executor is expected to call this function. The args, kwargs match that of the Interchange.__init__", "id": "f2808:m1"}
{"signature": "def status(self):", "body": "status = []<EOL>if self.provider:<EOL><INDENT>status = self.provider.status(self.blocks)<EOL><DEDENT>return status<EOL>", "docstring": "Return status of all blocks.", "id": "f2809:c0:m9"}
{"signature": "def scale_out(self, blocks=<NUM_LIT:1>):", "body": "r = []<EOL>for i in range(blocks):<EOL><INDENT>if self.provider:<EOL><INDENT>block = self.provider.submit(<EOL>self.launch_cmd, <NUM_LIT:1>, self.workers_per_node)<EOL>logger.debug(\"<STR_LIT>\".format(i, block))<EOL>if not block:<EOL><INDENT>raise(ScalingFailed(self.provider.label,<EOL>\"<STR_LIT>\"))<EOL><DEDENT>self.blocks.extend([block])<EOL><DEDENT>else:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>r = None<EOL><DEDENT><DEDENT>return r<EOL>", "docstring": "Scales out the number of active workers by the number of blocks specified.\n\n        Parameters\n        ----------\n\n        blocks : int\n             # of blocks to scale out. Default=1\n\n        Raises:\n             NotImplementedError", "id": "f2809:c0:m7"}
{"signature": "def _queue_management_worker(self):", "body": "logger.debug(\"<STR_LIT>\")<EOL>while True:<EOL><INDENT>task_id, buf = self.incoming_q.get()  <EOL>msg = deserialize_object(buf)[<NUM_LIT:0>]<EOL>task_fut = self.tasks[task_id]<EOL>logger.debug(\"<STR_LIT>\".format(task_id))<EOL>if \"<STR_LIT:result>\" in msg:<EOL><INDENT>task_fut.set_result(msg[\"<STR_LIT:result>\"])<EOL><DEDENT>elif \"<STR_LIT>\" in msg:<EOL><INDENT>pass<EOL><DEDENT>elif '<STR_LIT>' in msg:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>s, _ = deserialize_object(msg['<STR_LIT>'])<EOL>exception = ValueError(\"<STR_LIT>\".format(s))<EOL>task_fut.set_exception(exception)<EOL><DEDENT>except Exception as e:<EOL><INDENT>task_fut.set_exception(<EOL>DeserializationError(\"<STR_LIT>\".format(e)))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise BadMessage(<EOL>\"<STR_LIT>\")<EOL><DEDENT>if not self.is_alive:<EOL><INDENT>break<EOL><DEDENT><DEDENT>logger.info(\"<STR_LIT>\")<EOL>", "docstring": "TODO: docstring", "id": "f2809:c0:m4"}
{"signature": "def start(self):", "body": "self.outgoing_q = zmq_pipes.TasksOutgoing(<EOL>\"<STR_LIT:127.0.0.1>\", self.interchange_port_range)<EOL>self.incoming_q = zmq_pipes.ResultsIncoming(<EOL>\"<STR_LIT:127.0.0.1>\", self.interchange_port_range)<EOL>self.is_alive = True<EOL>self._queue_management_thread = None<EOL>self._start_queue_management_thread()<EOL>self._start_local_queue_process()<EOL>logger.debug(\"<STR_LIT>\"<EOL>.format(self._queue_management_thread))<EOL>if self.provider:<EOL><INDENT>l_cmd = self.launch_cmd.format(  <EOL>task_url=self.worker_task_url,<EOL>workers_per_node=self.workers_per_node,<EOL>logdir=\"<STR_LIT>\".format(self.run_dir, self.label))<EOL>self.launch_cmd = l_cmd<EOL>logger.debug(\"<STR_LIT>\".format(self.launch_cmd))<EOL>self._scaling_enabled = self.provider.scaling_enabled<EOL>logger.debug(<EOL>\"<STR_LIT>\", self.provider)<EOL>if hasattr(self.provider, '<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>for i in range(self.provider.init_blocks):<EOL><INDENT>block = self.provider.submit(<EOL>self.launch_cmd, <NUM_LIT:1>, self.workers_per_node)<EOL>logger.debug(\"<STR_LIT>\".format(i, block))<EOL>if not block:<EOL><INDENT>raise(ScalingFailed(self.provider.label,<EOL>\"<STR_LIT>\"))<EOL><DEDENT>self.blocks.extend([block])<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(e))<EOL>raise e<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self._scaling_enabled = False<EOL>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Create the Interchange process and connect to it.", "id": "f2809:c0:m1"}
{"signature": "def scale_in(self, blocks):", "body": "to_kill = self.blocks[:blocks]<EOL>if self.provider:<EOL><INDENT>r = self.provider.cancel(to_kill)<EOL><DEDENT>return r<EOL>", "docstring": "Scale in the number of active blocks by specified amount.\n\n        The scale in method here is very rude. It doesn't give the workers\n        the opportunity to finish current tasks or cleanup. This is tracked\n        in issue #530\n\n        Raises:\n             NotImplementedError", "id": "f2809:c0:m8"}
{"signature": "@property<EOL><INDENT>def run_dir(self):<DEDENT>", "body": "return self._run_dir<EOL>", "docstring": "Path to the run directory.", "id": "f2810:c0:m6"}
{"signature": "@abstractmethod<EOL><INDENT>def submit(self, *args, **kwargs):<DEDENT>", "body": "pass<EOL>", "docstring": "Submit.\n\n        We haven't yet decided on what the args to this can be,\n        whether it should just be func, args, kwargs or be the partially evaluated\n        fn", "id": "f2810:c0:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def start(self, *args, **kwargs):<DEDENT>", "body": "pass<EOL>", "docstring": "Start the executor.\n\n        Any spin-up operations (for example: starting thread pools) should be performed here.", "id": "f2810:c0:m0"}
{"signature": "@abstractproperty<EOL><INDENT>def scaling_enabled(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Specify if scaling is enabled.\n\n        The callers of ParslExecutors need to differentiate between Executors\n        and Executors wrapped in a resource provider", "id": "f2810:c0:m5"}
{"signature": "def submit(self, func, *args, **kwargs):", "body": "task_id = uuid.uuid4()<EOL>logger.debug(\"<STR_LIT>\".format(func, args))<EOL>self.tasks[task_id] = Future()<EOL>fn_buf = pack_apply_message(func, args, kwargs,<EOL>buffer_threshold=<NUM_LIT> * <NUM_LIT>,<EOL>item_threshold=<NUM_LIT>)<EOL>msg = {\"<STR_LIT>\": task_id,<EOL>\"<STR_LIT>\": fn_buf}<EOL>self.outgoing_q.put(msg)<EOL>return self.tasks[task_id]<EOL>", "docstring": "Submits work to the the outgoing_q.\n\n        The outgoing_q is an external process listens on this\n        queue for new work. This method is simply pass through and behaves like a\n        submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n        Args:\n            - func (callable) : Callable function\n            - *args (list) : List of arbitrary positional arguments.\n\n        Kwargs:\n            - **kwargs (dict) : A dictionary of arbitrary keyword args for func.\n\n        Returns:\n              Future", "id": "f2812:c0:m6"}
{"signature": "def runner(incoming_q, outgoing_q):", "body": "logger.debug(\"<STR_LIT>\")<EOL>def execute_task(bufs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>user_ns = locals()<EOL>user_ns.update({'<STR_LIT>': __builtins__})<EOL>f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)<EOL>fname = getattr(f, '<STR_LIT>', '<STR_LIT:f>')<EOL>prefix = \"<STR_LIT>\"<EOL>fname = prefix + \"<STR_LIT:f>\"<EOL>argname = prefix + \"<STR_LIT:args>\"<EOL>kwargname = prefix + \"<STR_LIT>\"<EOL>resultname = prefix + \"<STR_LIT:result>\"<EOL>user_ns.update({fname: f,<EOL>argname: args,<EOL>kwargname: kwargs,<EOL>resultname: resultname})<EOL>code = \"<STR_LIT>\".format(resultname, fname,<EOL>argname, kwargname)<EOL>try:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(code))<EOL>exec(code, user_ns, user_ns)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.warning(\"<STR_LIT>\".format(e))<EOL>raise e<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(user_ns.get(resultname)))<EOL>return user_ns.get(resultname)<EOL><DEDENT><DEDENT>while True:<EOL><INDENT>try:<EOL><INDENT>msg = incoming_q.get(block=True, timeout=<NUM_LIT:10>)<EOL><DEDENT>except queue.Empty:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>except IOError as e:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(e))<EOL>try:<EOL><INDENT>outgoing_q.put(None)<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT>break<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(e))<EOL><DEDENT>else:<EOL><INDENT>if not msg:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>outgoing_q.put(None)<EOL>break<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(msg[\"<STR_LIT>\"]))<EOL>try:<EOL><INDENT>response_obj = execute_task(msg['<STR_LIT>'])<EOL>response = {\"<STR_LIT>\": msg[\"<STR_LIT>\"],<EOL>\"<STR_LIT:result>\": serialize_object(response_obj)}<EOL>logger.debug(\"<STR_LIT>\".format(<EOL>deserialize_object(response[\"<STR_LIT:result>\"])))<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(e))<EOL>response = {\"<STR_LIT>\": msg[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": serialize_object(e)}<EOL><DEDENT>outgoing_q.put(response)<EOL><DEDENT><DEDENT><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>", "docstring": "This is a function that mocks the Swift-T side.\n\n    It listens on the the incoming_q for tasks and posts returns on the outgoing_q.\n\n    Args:\n         - incoming_q (Queue object) : The queue to listen on\n         - outgoing_q (Queue object) : Queue to post results on\n\n    The messages posted on the incoming_q will be of the form :\n\n    .. code:: python\n\n       {\n          \"task_id\" : <uuid.uuid4 string>,\n          \"buffer\"  : serialized buffer containing the fn, args and kwargs\n       }\n\n    If ``None`` is received, the runner will exit.\n\n    Response messages should be of the form:\n\n    .. code:: python\n\n       {\n          \"task_id\" : <uuid.uuid4 string>,\n          \"result\"  : serialized buffer containing result\n          \"exception\" : serialized exception object\n       }\n\n    On exiting the runner will post ``None`` to the outgoing_q", "id": "f2812:m0"}
{"signature": "def _start_queue_management_thread(self):", "body": "logging.debug(\"<STR_LIT>\", \"<STR_LIT:*>\" * <NUM_LIT>)<EOL>if self._queue_management_thread is None:<EOL><INDENT>logging.debug(\"<STR_LIT>\")<EOL>self._queue_management_thread = threading.Thread(target=self._queue_management_worker)<EOL>self._queue_management_thread.daemon = True<EOL>self._queue_management_thread.start()<EOL><DEDENT>else:<EOL><INDENT>logging.debug(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Method to start the management thread as a daemon.\n\n        Checks if a thread already exists, then starts it.\n        Could be used later as a restart if the management thread dies.", "id": "f2812:c0:m4"}
{"signature": "def scale_in(self, workers):", "body": "raise NotImplementedError<EOL>", "docstring": "Scale in the number of active blocks by specified amount.\n\n        This method is not implemented for turbine and will raise an error if called.\n\n        Raises:\n             NotImplementedError", "id": "f2812:c0:m9"}
{"signature": "def __init__(self, label='<STR_LIT>', storage_access=None, working_dir=None, managed=True):", "body": "logger.debug(\"<STR_LIT>\")<EOL>self.label = label<EOL>self.storage_access = storage_access if storage_access is not None else []<EOL>if len(self.storage_access) > <NUM_LIT:1>:<EOL><INDENT>raise ConfigurationError('<STR_LIT>')<EOL><DEDENT>self.working_dir = working_dir<EOL>self.managed = managed<EOL>", "docstring": "Initialize the thread pool.\n\n        Trying to implement the emews model.", "id": "f2812:c0:m0"}
{"signature": "def scale_out(self, blocks=<NUM_LIT:1>):", "body": "r = []<EOL>for i in range(blocks):<EOL><INDENT>if self.provider:<EOL><INDENT>block = self.provider.submit(self.launch_cmd, <NUM_LIT:1>, self.workers_per_node)<EOL>logger.debug(\"<STR_LIT>\".format(i, block))<EOL>if not block:<EOL><INDENT>raise(ScalingFailed(self.provider.label,<EOL>\"<STR_LIT>\"))<EOL><DEDENT>self.engines.extend([block])<EOL>r.extend([block])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>r = None<EOL><DEDENT>return r<EOL>", "docstring": "Scales out the number of active workers by 1.\n\n        This method is notImplemented for threads and will raise the error if called.\n\n        Parameters:\n            blocks : int\n               Number of blocks to be provisioned.", "id": "f2813:c0:m8"}
{"signature": "def submit(self, *args, **kwargs):", "body": "return self.lb_view.apply_async(*args, **kwargs)<EOL>", "docstring": "Submits work to the thread pool.\n\n        This method is simply pass through and behaves like a submit call as described\n        here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n        Returns:\n              Future", "id": "f2813:c0:m7"}
{"signature": "def scale_in(self, blocks):", "body": "status = dict(zip(self.engines, self.provider.status(self.engines)))<EOL>to_kill = [engine for engine in status if status[engine] == \"<STR_LIT>\"][:blocks]<EOL>if self.provider:<EOL><INDENT>r = self.provider.cancel(to_kill)<EOL><DEDENT>else:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>r = None<EOL><DEDENT>return r<EOL>", "docstring": "Scale in the number of active blocks by the specified number.", "id": "f2813:c0:m9"}
{"signature": "def serialize_object(obj, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS):", "body": "buffers = []<EOL>if istype(obj, sequence_types) and len(obj) < item_threshold:<EOL><INDENT>cobj = can_sequence(obj)<EOL>for c in cobj:<EOL><INDENT>buffers.extend(_extract_buffers(c, buffer_threshold))<EOL><DEDENT><DEDENT>elif istype(obj, dict) and len(obj) < item_threshold:<EOL><INDENT>cobj = {}<EOL>for k in sorted(obj):<EOL><INDENT>c = can(obj[k])<EOL>buffers.extend(_extract_buffers(c, buffer_threshold))<EOL>cobj[k] = c<EOL><DEDENT><DEDENT>else:<EOL><INDENT>cobj = can(obj)<EOL>buffers.extend(_extract_buffers(cobj, buffer_threshold))<EOL><DEDENT>buffers.insert(<NUM_LIT:0>, pickle.dumps(cobj, PICKLE_PROTOCOL))<EOL>return buffers<EOL>", "docstring": "Serialize an object into a list of sendable buffers.\n\n    Parameters\n    ----------\n\n    obj : object\n        The object to be serialized\n    buffer_threshold : int\n        The threshold (in bytes) for pulling out data buffers\n        to avoid pickling them.\n    item_threshold : int\n        The maximum number of items over which canning will iterate.\n        Containers (lists, dicts) larger than this will be pickled without\n        introspection.\n\n    Returns\n    -------\n    [bufs] : list of buffers representing the serialized object.", "id": "f2814:m3"}
{"signature": "def _nbytes(buf):", "body": "if isinstance(buf, memoryview):<EOL><INDENT>if PY3:<EOL><INDENT>return buf.nbytes<EOL><DEDENT>else:<EOL><INDENT>size = buf.itemsize<EOL>for dim in buf.shape:<EOL><INDENT>size *= dim<EOL><DEDENT>return size<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return len(buf)<EOL><DEDENT>", "docstring": "Return byte-size of a memoryview or buffer.", "id": "f2814:m0"}
{"signature": "def _extract_buffers(obj, threshold=MAX_BYTES):", "body": "buffers = []<EOL>if isinstance(obj, CannedObject) and obj.buffers:<EOL><INDENT>for i, buf in enumerate(obj.buffers):<EOL><INDENT>nbytes = _nbytes(buf)<EOL>if nbytes > threshold:<EOL><INDENT>obj.buffers[i] = None<EOL>buffers.append(buf)<EOL><DEDENT>elif isinstance(buf, memoryview):<EOL><INDENT>obj.buffers[i] = buf.tobytes()<EOL><DEDENT>elif isinstance(buf, buffer):<EOL><INDENT>obj.buffers[i] = bytes(buf)<EOL><DEDENT><DEDENT><DEDENT>return buffers<EOL>", "docstring": "Extract buffers larger than a certain threshold.", "id": "f2814:m1"}
{"signature": "def unpack_apply_message(bufs, g=None, copy=True):", "body": "bufs = list(bufs)  <EOL>assert len(bufs) >= <NUM_LIT:2>, \"<STR_LIT>\"<EOL>pf = buffer_to_bytes_py2(bufs.pop(<NUM_LIT:0>))<EOL>f = uncan(pickle.loads(pf), g)<EOL>pinfo = buffer_to_bytes_py2(bufs.pop(<NUM_LIT:0>))<EOL>info = pickle.loads(pinfo)<EOL>arg_bufs, kwarg_bufs = bufs[:info['<STR_LIT>']], bufs[info['<STR_LIT>']:]<EOL>args = []<EOL>for i in range(info['<STR_LIT>']):<EOL><INDENT>arg, arg_bufs = deserialize_object(arg_bufs, g)<EOL>args.append(arg)<EOL><DEDENT>args = tuple(args)<EOL>assert not arg_bufs, \"<STR_LIT>\"<EOL>kwargs = {}<EOL>for key in info['<STR_LIT>']:<EOL><INDENT>kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g)<EOL>kwargs[key] = kwarg<EOL><DEDENT>assert not kwarg_bufs, \"<STR_LIT>\"<EOL>return f, args, kwargs<EOL>", "docstring": "Unpack f,args,kwargs from buffers packed by pack_apply_message().\n\n    Returns: original f,args,kwargs", "id": "f2814:m6"}
{"signature": "def _restore_buffers(obj, buffers):", "body": "if isinstance(obj, CannedObject) and obj.buffers:<EOL><INDENT>for i, buf in enumerate(obj.buffers):<EOL><INDENT>if buf is None:<EOL><INDENT>obj.buffers[i] = buffers.pop(<NUM_LIT:0>)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Restore extracted buffers.", "id": "f2814:m2"}
{"signature": "def use_pickle():", "body": "from . import serialize<EOL>serialize.pickle = serialize._stdlib_pickle<EOL>can_map[FunctionType] = _original_can_map[FunctionType]<EOL>", "docstring": "Revert to using stdlib pickle.\n\n    Reverts custom serialization enabled by use_dill|cloudpickle.", "id": "f2817:m4"}
{"signature": "def _import_mapping(mapping, original=None):", "body": "<EOL>for key, value in list(mapping.items()):<EOL><INDENT>if isinstance(key, string_types):<EOL><INDENT>try:<EOL><INDENT>cls = import_item(key)<EOL><DEDENT>except Exception:<EOL><INDENT>if original and key not in original:<EOL><INDENT>print(\"<STR_LIT>\", key, exc_info=True)<EOL><DEDENT>mapping.pop(key)<EOL><DEDENT>else:<EOL><INDENT>mapping[cls] = mapping.pop(key)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Import any string-keys in a type mapping.", "id": "f2817:m5"}
{"signature": "def __init__(self, obj, keys=[], hook=None):", "body": "self.keys = keys<EOL>self.obj = copy.copy(obj)<EOL>self.hook = can(hook)<EOL>for key in keys:<EOL><INDENT>setattr(self.obj, key, can(getattr(obj, key)))<EOL><DEDENT>self.buffers = []<EOL>", "docstring": "Can an object for safe pickling.\n\n        Parameters\n        ==========\n\n        obj:\n            The object to be canned\n        keys: list (optional)\n            list of attribute names that will be explicitly canned / uncanned\n        hook: callable (optional)\n            An optional extra callable,\n            which can do additional processing of the uncanned object.\n\n        Large data may be offloaded into the buffers list,\n        used for zero-copy transfers.", "id": "f2817:c0:m0"}
{"signature": "def _get_cell_type(a=None):", "body": "def inner():<EOL><INDENT>return a<EOL><DEDENT>return type(py3compat.get_closure(inner)[<NUM_LIT:0>])<EOL>", "docstring": "The type of a closure cell doesn't seem to be importable, so just create one.", "id": "f2817:m0"}
{"signature": "def istype(obj, check):", "body": "if isinstance(check, tuple):<EOL><INDENT>for cls in check:<EOL><INDENT>if type(obj) is cls:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return type(obj) is check<EOL><DEDENT>", "docstring": "Like isinstance(obj, check), but strict.\n\n    This won't catch subclasses.", "id": "f2817:m6"}
{"signature": "def __init__(self, ip_address, port_range):", "body": "self.context = zmq.Context()<EOL>self.zmq_socket = self.context.socket(zmq.DEALER)<EOL>self.zmq_socket.set_hwm(<NUM_LIT:0>)<EOL>self.port = self.zmq_socket.bind_to_random_port(\"<STR_LIT>\".format(ip_address),<EOL>min_port=port_range[<NUM_LIT:0>],<EOL>max_port=port_range[<NUM_LIT:1>])<EOL>self.poller = zmq.Poller()<EOL>self.poller.register(self.zmq_socket, zmq.POLLOUT)<EOL>", "docstring": "Parameters\n----------\n\nip_address: str\n   IP address of the client (where Parsl runs)\nport_range: tuple(int, int)\n   Port range for the comms between client and interchange", "id": "f2819:c1:m0"}
{"signature": "def run(self, message):", "body": "self.zmq_socket.send_pyobj(message, copy=True)<EOL>reply = self.zmq_socket.recv_pyobj()<EOL>return reply<EOL>", "docstring": "This function needs to be fast at the same time aware of the possibility of\n        ZMQ pipes overflowing.\n\n        The timeout increases slowly if contention is detected on ZMQ pipes.\n        We could set copy=False and get slightly better latency but this results\n        in ZMQ sockets reaching a broken state once there are ~10k tasks in flight.\n        This issue can be magnified if each the serialized buffer itself is larger.", "id": "f2819:c0:m1"}
{"signature": "def create_reg_message(self):", "body": "msg = {'<STR_LIT>': PARSL_VERSION,<EOL>'<STR_LIT>': \"<STR_LIT>\".format(sys.version_info.major,<EOL>sys.version_info.minor,<EOL>sys.version_info.micro),<EOL>'<STR_LIT>': self.worker_count,<EOL>'<STR_LIT>': self.block_id,<EOL>'<STR_LIT>': self.prefetch_capacity,<EOL>'<STR_LIT>': self.worker_count + self.prefetch_capacity,<EOL>'<STR_LIT>': platform.system(),<EOL>'<STR_LIT>': platform.node(),<EOL>'<STR_LIT>': os.getcwd(),<EOL>}<EOL>b_msg = json.dumps(msg).encode('<STR_LIT:utf-8>')<EOL>return b_msg<EOL>", "docstring": "Creates a registration message to identify the worker to the interchange", "id": "f2820:c0:m1"}
{"signature": "def worker(worker_id, pool_id, task_queue, result_queue, worker_queue):", "body": "start_file_logger('<STR_LIT>'.format(args.logdir, pool_id, worker_id),<EOL>worker_id,<EOL>name=\"<STR_LIT>\",<EOL>level=logging.DEBUG if args.debug else logging.INFO)<EOL>logger.info('<STR_LIT>'.format(worker_id))<EOL>if args.debug:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>while True:<EOL><INDENT>worker_queue.put(worker_id)<EOL>req = task_queue.get()<EOL>tid = req['<STR_LIT>']<EOL>logger.info(\"<STR_LIT>\".format(tid))<EOL>try:<EOL><INDENT>worker_queue.get()<EOL><DEDENT>except queue.Empty:<EOL><INDENT>logger.warning(\"<STR_LIT>\".format(worker_id))<EOL>pass<EOL><DEDENT>try:<EOL><INDENT>result = execute_task(req['<STR_LIT>'])<EOL>serialized_result = serialize_object(result)<EOL><DEDENT>except Exception:<EOL><INDENT>result_package = {'<STR_LIT>': tid, '<STR_LIT>': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))}<EOL><DEDENT>else:<EOL><INDENT>result_package = {'<STR_LIT>': tid, '<STR_LIT:result>': serialized_result}<EOL><DEDENT>logger.info(\"<STR_LIT>\".format(tid))<EOL>pkl_package = pickle.dumps(result_package)<EOL>result_queue.put(pkl_package)<EOL><DEDENT>", "docstring": "Put request token into queue\nGet task from task_queue\nPop request from queue\nPut result into result_queue", "id": "f2820:m1"}
{"signature": "def push_results(self, kill_event):", "body": "logger.debug(\"<STR_LIT>\")<EOL>push_poll_period = max(<NUM_LIT:10>, self.poll_period) / <NUM_LIT:1000>    <EOL>logger.debug(\"<STR_LIT>\".format(push_poll_period))<EOL>last_beat = time.time()<EOL>items = []<EOL>while not kill_event.is_set():<EOL><INDENT>try:<EOL><INDENT>r = self.pending_result_queue.get(block=True, timeout=push_poll_period)<EOL>items.append(r)<EOL><DEDENT>except queue.Empty:<EOL><INDENT>pass<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.exception(\"<STR_LIT>\".format(e))<EOL><DEDENT>if len(items) >= self.max_queue_size or time.time() > last_beat + push_poll_period:<EOL><INDENT>last_beat = time.time()<EOL>if items:<EOL><INDENT>self.result_outgoing.send_multipart(items)<EOL>items = []<EOL><DEDENT><DEDENT><DEDENT>logger.critical(\"<STR_LIT>\")<EOL>", "docstring": "Listens on the pending_result_queue and sends out results via 0mq\n\n        Parameters:\n        -----------\n        kill_event : threading.Event\n              Event to let the thread know when it is time to die.", "id": "f2820:c0:m4"}
{"signature": "def __init__(self,<EOL>task_q_url=\"<STR_LIT>\",<EOL>result_q_url=\"<STR_LIT>\",<EOL>cores_per_worker=<NUM_LIT:1>,<EOL>max_workers=float('<STR_LIT>'),<EOL>prefetch_capacity=<NUM_LIT:0>,<EOL>uid=None,<EOL>block_id=None,<EOL>heartbeat_threshold=<NUM_LIT>,<EOL>heartbeat_period=<NUM_LIT:30>,<EOL>poll_period=<NUM_LIT:10>):", "body": "logger.info(\"<STR_LIT>\")<EOL>self.context = zmq.Context()<EOL>self.task_incoming = self.context.socket(zmq.DEALER)<EOL>self.task_incoming.setsockopt(zmq.IDENTITY, uid.encode('<STR_LIT:utf-8>'))<EOL>self.task_incoming.setsockopt(zmq.LINGER, <NUM_LIT:0>)<EOL>self.task_incoming.connect(task_q_url)<EOL>self.result_outgoing = self.context.socket(zmq.DEALER)<EOL>self.result_outgoing.setsockopt(zmq.IDENTITY, uid.encode('<STR_LIT:utf-8>'))<EOL>self.result_outgoing.setsockopt(zmq.LINGER, <NUM_LIT:0>)<EOL>self.result_outgoing.connect(result_q_url)<EOL>logger.info(\"<STR_LIT>\")<EOL>self.uid = uid<EOL>self.block_id = block_id<EOL>cores_on_node = multiprocessing.cpu_count()<EOL>self.max_workers = max_workers<EOL>self.prefetch_capacity = prefetch_capacity<EOL>self.worker_count = min(max_workers,<EOL>math.floor(cores_on_node / cores_per_worker))<EOL>logger.info(\"<STR_LIT>\".format(self.worker_count))<EOL>self.pending_task_queue = multiprocessing.Queue()<EOL>self.pending_result_queue = multiprocessing.Queue()<EOL>self.ready_worker_queue = multiprocessing.Queue()<EOL>self.max_queue_size = self.prefetch_capacity + self.worker_count<EOL>self.tasks_per_round = <NUM_LIT:1><EOL>self.heartbeat_period = heartbeat_period<EOL>self.heartbeat_threshold = heartbeat_threshold<EOL>self.poll_period = poll_period<EOL>", "docstring": "Parameters\n----------\nworker_url : str\n     Worker url on which workers will attempt to connect back\n\nuid : str\n     string unique identifier\n\nblock_id : str\n     Block identifier that maps managers to the provider blocks they belong to.\n\ncores_per_worker : float\n     cores to be assigned to each worker. Oversubscription is possible\n     by setting cores_per_worker < 1.0. Default=1\n\nmax_workers : int\n     caps the maximum number of workers that can be launched.\n     default: infinity\n\nprefetch_capacity : int\n     Number of tasks that could be prefetched over available worker capacity.\n     When there are a few tasks (<100) or when tasks are long running, this option should\n     be set to 0 for better load balancing. Default is 0.\n\nheartbeat_threshold : int\n     Seconds since the last message from the interchange after which the\n     interchange is assumed to be un-available, and the manager initiates shutdown. Default:120s\n\n     Number of seconds since the last message from the interchange after which the worker\n     assumes that the interchange is lost and the manager shuts down. Default:120\n\nheartbeat_period : int\n     Number of seconds after which a heartbeat message is sent to the interchange\n\npoll_period : int\n     Timeout period used by the manager in milliseconds. Default: 10ms", "id": "f2820:c0:m0"}
{"signature": "def set_stream_logger(name='<STR_LIT>', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:<EOL><INDENT>format_string = \"<STR_LIT>\"<EOL><DEDENT>global logger<EOL>logger = logging.getLogger(name)<EOL>logger.setLevel(logging.DEBUG)<EOL>handler = logging.StreamHandler()<EOL>handler.setLevel(level)<EOL>formatter = logging.Formatter(format_string, datefmt='<STR_LIT>')<EOL>handler.setFormatter(formatter)<EOL>logger.addHandler(handler)<EOL>", "docstring": "Add a stream log handler.\n\n    Args:\n         - name (string) : Set the logger name.\n         - level (logging.LEVEL) : Set to logging.DEBUG by default.\n         - format_string (sting) : Set to None by default.\n\n    Returns:\n         - None", "id": "f2820:m3"}
{"signature": "def start_file_logger(filename, rank, name='<STR_LIT>', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:<EOL><INDENT>format_string = \"<STR_LIT>\".format(rank)<EOL><DEDENT>global logger<EOL>logger = logging.getLogger(name)<EOL>logger.setLevel(logging.DEBUG)<EOL>handler = logging.FileHandler(filename)<EOL>handler.setLevel(level)<EOL>formatter = logging.Formatter(format_string, datefmt='<STR_LIT>')<EOL>handler.setFormatter(formatter)<EOL>logger.addHandler(handler)<EOL>", "docstring": "Add a stream log handler.\n\n    Args:\n        - filename (string): Name of the file to write logs to\n        - name (string): Logger name\n        - level (logging.LEVEL): Set the logging level.\n        - format_string (string): Set the format string\n\n    Returns:\n       -  None", "id": "f2820:m2"}
{"signature": "def __init__(self,<EOL>client_address=\"<STR_LIT:127.0.0.1>\",<EOL>interchange_address=\"<STR_LIT:127.0.0.1>\",<EOL>client_ports=(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>),<EOL>worker_ports=None,<EOL>worker_port_range=(<NUM_LIT>, <NUM_LIT>),<EOL>heartbeat_threshold=<NUM_LIT>,<EOL>logdir=\"<STR_LIT:.>\",<EOL>logging_level=logging.INFO,<EOL>poll_period=<NUM_LIT:10>,<EOL>suppress_failure=False,<EOL>):", "body": "self.logdir = logdir<EOL>try:<EOL><INDENT>os.makedirs(self.logdir)<EOL><DEDENT>except FileExistsError:<EOL><INDENT>pass<EOL><DEDENT>start_file_logger(\"<STR_LIT>\".format(self.logdir), level=logging_level)<EOL>logger.debug(\"<STR_LIT>\")<EOL>self.client_address = client_address<EOL>self.interchange_address = interchange_address<EOL>self.suppress_failure = suppress_failure<EOL>self.poll_period = poll_period<EOL>logger.info(\"<STR_LIT>\".format(<EOL>client_address, client_ports[<NUM_LIT:0>], client_ports[<NUM_LIT:1>], client_ports[<NUM_LIT:2>]))<EOL>self.context = zmq.Context()<EOL>self.task_incoming = self.context.socket(zmq.DEALER)<EOL>self.task_incoming.set_hwm(<NUM_LIT:0>)<EOL>self.task_incoming.RCVTIMEO = <NUM_LIT:10>  <EOL>self.task_incoming.connect(\"<STR_LIT>\".format(client_address, client_ports[<NUM_LIT:0>]))<EOL>self.results_outgoing = self.context.socket(zmq.DEALER)<EOL>self.results_outgoing.set_hwm(<NUM_LIT:0>)<EOL>self.results_outgoing.connect(\"<STR_LIT>\".format(client_address, client_ports[<NUM_LIT:1>]))<EOL>self.command_channel = self.context.socket(zmq.REP)<EOL>self.command_channel.RCVTIMEO = <NUM_LIT:1000>  <EOL>self.command_channel.connect(\"<STR_LIT>\".format(client_address, client_ports[<NUM_LIT:2>]))<EOL>logger.info(\"<STR_LIT>\")<EOL>self.pending_task_queue = queue.Queue(maxsize=<NUM_LIT:10> ** <NUM_LIT:6>)<EOL>self.worker_ports = worker_ports<EOL>self.worker_port_range = worker_port_range<EOL>self.task_outgoing = self.context.socket(zmq.ROUTER)<EOL>self.task_outgoing.set_hwm(<NUM_LIT:0>)<EOL>self.results_incoming = self.context.socket(zmq.ROUTER)<EOL>self.results_incoming.set_hwm(<NUM_LIT:0>)<EOL>if self.worker_ports:<EOL><INDENT>self.worker_task_port = self.worker_ports[<NUM_LIT:0>]<EOL>self.worker_result_port = self.worker_ports[<NUM_LIT:1>]<EOL>self.task_outgoing.bind(\"<STR_LIT>\".format(self.worker_task_port))<EOL>self.results_incoming.bind(\"<STR_LIT>\".format(self.worker_result_port))<EOL><DEDENT>else:<EOL><INDENT>self.worker_task_port = self.task_outgoing.bind_to_random_port('<STR_LIT>',<EOL>min_port=worker_port_range[<NUM_LIT:0>],<EOL>max_port=worker_port_range[<NUM_LIT:1>], max_tries=<NUM_LIT:100>)<EOL>self.worker_result_port = self.results_incoming.bind_to_random_port('<STR_LIT>',<EOL>min_port=worker_port_range[<NUM_LIT:0>],<EOL>max_port=worker_port_range[<NUM_LIT:1>], max_tries=<NUM_LIT:100>)<EOL><DEDENT>logger.info(\"<STR_LIT>\".format(<EOL>self.worker_task_port, self.worker_result_port))<EOL>self._ready_manager_queue = {}<EOL>self.heartbeat_threshold = heartbeat_threshold<EOL>self.current_platform = {'<STR_LIT>': PARSL_VERSION,<EOL>'<STR_LIT>': \"<STR_LIT>\".format(sys.version_info.major,<EOL>sys.version_info.minor,<EOL>sys.version_info.micro),<EOL>'<STR_LIT>': platform.system(),<EOL>'<STR_LIT>': platform.node(),<EOL>'<STR_LIT>': os.getcwd()}<EOL>logger.info(\"<STR_LIT>\".format(self.current_platform))<EOL>", "docstring": "Parameters\n----------\nclient_address : str\n     The ip address at which the parsl client can be reached. Default: \"127.0.0.1\"\n\ninterchange_address : str\n     The ip address at which the workers will be able to reach the Interchange. Default: \"127.0.0.1\"\n\nclient_ports : triple(int, int, int)\n     The ports at which the client can be reached\n\nworker_ports : tuple(int, int)\n     The specific two ports at which workers will connect to the Interchange. Default: None\n\nworker_port_range : tuple(int, int)\n     The interchange picks ports at random from the range which will be used by workers.\n     This is overridden when the worker_ports option is set. Defauls: (54000, 55000)\n\nheartbeat_threshold : int\n     Number of seconds since the last heartbeat after which worker is considered lost.\n\nlogdir : str\n     Parsl log directory paths. Logs and temp files go here. Default: '.'\n\nlogging_level : int\n     Logging level as defined in the logging module. Default: logging.INFO (20)\n\npoll_period : int\n     The main thread polling period, in milliseconds. Default: 10ms\n\nsuppress_failure : Bool\n     When set to True, the interchange will attempt to suppress failures. Default: False", "id": "f2821:c3:m0"}
{"signature": "def start(self, poll_period=None):", "body": "logger.info(\"<STR_LIT>\")<EOL>if poll_period is None:<EOL><INDENT>poll_period = self.poll_period<EOL><DEDENT>start = time.time()<EOL>count = <NUM_LIT:0><EOL>self._kill_event = threading.Event()<EOL>self._task_puller_thread = threading.Thread(target=self.migrate_tasks_to_internal,<EOL>args=(self._kill_event,))<EOL>self._task_puller_thread.start()<EOL>self._command_thread = threading.Thread(target=self._command_server,<EOL>args=(self._kill_event,))<EOL>self._command_thread.start()<EOL>poller = zmq.Poller()<EOL>poller.register(self.task_outgoing, zmq.POLLIN)<EOL>poller.register(self.results_incoming, zmq.POLLIN)<EOL>interesting_managers = set()<EOL>while not self._kill_event.is_set():<EOL><INDENT>self.socks = dict(poller.poll(timeout=poll_period))<EOL>if self.task_outgoing in self.socks and self.socks[self.task_outgoing] == zmq.POLLIN:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>message = self.task_outgoing.recv_multipart()<EOL>manager = message[<NUM_LIT:0>]<EOL>if manager not in self._ready_manager_queue:<EOL><INDENT>reg_flag = False<EOL>try:<EOL><INDENT>msg = json.loads(message[<NUM_LIT:1>].decode('<STR_LIT:utf-8>'))<EOL>reg_flag = True<EOL><DEDENT>except Exception:<EOL><INDENT>logger.warning(\"<STR_LIT>\".format(<EOL>manager))<EOL>logger.debug(\"<STR_LIT>\".format(message[<NUM_LIT:0>]))<EOL><DEDENT>self._ready_manager_queue[manager] = {'<STR_LIT>': time.time(),<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT>': []}<EOL>if reg_flag is True:<EOL><INDENT>interesting_managers.add(manager)<EOL>logger.info(\"<STR_LIT>\".format(manager))<EOL>self._ready_manager_queue[manager].update(msg)<EOL>logger.info(\"<STR_LIT>\".format(manager, msg))<EOL>if (msg['<STR_LIT>'].rsplit(\"<STR_LIT:.>\", <NUM_LIT:1>)[<NUM_LIT:0>] != self.current_platform['<STR_LIT>'].rsplit(\"<STR_LIT:.>\", <NUM_LIT:1>)[<NUM_LIT:0>] or<EOL>msg['<STR_LIT>'] != self.current_platform['<STR_LIT>']):<EOL><INDENT>logger.warn(\"<STR_LIT>\".format(manager))<EOL>if self.suppress_failure is False:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self._kill_event.set()<EOL>e = ManagerLost(manager)<EOL>result_package = {'<STR_LIT>': -<NUM_LIT:1>, '<STR_LIT>': serialize_object(e)}<EOL>pkl_package = pickle.dumps(result_package)<EOL>self.results_outgoing.send(pkl_package)<EOL>logger.warning(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.info(\"<STR_LIT>\".format(manager, msg['<STR_LIT>']))<EOL>logger.info(\"<STR_LIT>\".format(manager,<EOL>msg['<STR_LIT>'].rsplit(\"<STR_LIT:.>\", <NUM_LIT:1>)[<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self.suppress_failure is False:<EOL><INDENT>self._kill_event.set()<EOL>e = BadRegistration(manager, critical=True)<EOL>result_package = {'<STR_LIT>': -<NUM_LIT:1>, '<STR_LIT>': serialize_object(e)}<EOL>pkl_package = pickle.dumps(result_package)<EOL>self.results_outgoing.send(pkl_package)<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(<EOL>manager))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>tasks_requested = int.from_bytes(message[<NUM_LIT:1>], \"<STR_LIT>\")<EOL>self._ready_manager_queue[manager]['<STR_LIT>'] = time.time()<EOL>if tasks_requested == HEARTBEAT_CODE:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(manager))<EOL>self.task_outgoing.send_multipart([manager, b'<STR_LIT>', PKL_HEARTBEAT_CODE])<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(manager, tasks_requested))<EOL>self._ready_manager_queue[manager]['<STR_LIT>'] = tasks_requested<EOL>interesting_managers.add(manager)<EOL><DEDENT><DEDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(len(self._ready_manager_queue),<EOL>len(interesting_managers)))<EOL>if interesting_managers and not self.pending_task_queue.empty():<EOL><INDENT>shuffled_managers = list(interesting_managers)<EOL>random.shuffle(shuffled_managers)<EOL>while shuffled_managers and not self.pending_task_queue.empty():  <EOL><INDENT>manager = shuffled_managers.pop()<EOL>tasks_inflight = len(self._ready_manager_queue[manager]['<STR_LIT>'])<EOL>real_capacity = min(self._ready_manager_queue[manager]['<STR_LIT>'],<EOL>self._ready_manager_queue[manager]['<STR_LIT>'] - tasks_inflight)<EOL>if (real_capacity and self._ready_manager_queue[manager]['<STR_LIT>']):<EOL><INDENT>tasks = self.get_tasks(real_capacity)<EOL>if tasks:<EOL><INDENT>self.task_outgoing.send_multipart([manager, b'<STR_LIT>', pickle.dumps(tasks)])<EOL>task_count = len(tasks)<EOL>count += task_count<EOL>tids = [t['<STR_LIT>'] for t in tasks]<EOL>self._ready_manager_queue[manager]['<STR_LIT>'] -= task_count<EOL>self._ready_manager_queue[manager]['<STR_LIT>'].extend(tids)<EOL>logger.debug(\"<STR_LIT>\".format(tids, manager))<EOL>if self._ready_manager_queue[manager]['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(manager, self._ready_manager_queue[manager]['<STR_LIT>']))<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(manager))<EOL>interesting_managers.remove(manager)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>interesting_managers.remove(manager)<EOL><DEDENT><DEDENT>logger.debug(\"<STR_LIT>\".format(len(interesting_managers)))<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>if self.results_incoming in self.socks and self.socks[self.results_incoming] == zmq.POLLIN:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>manager, *b_messages = self.results_incoming.recv_multipart()<EOL>if manager not in self._ready_manager_queue:<EOL><INDENT>logger.warning(\"<STR_LIT>\".format(manager))<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(len(b_messages)))<EOL>for b_message in b_messages:<EOL><INDENT>r = pickle.loads(b_message)<EOL>self._ready_manager_queue[manager]['<STR_LIT>'].remove(r['<STR_LIT>'])<EOL><DEDENT>self.results_outgoing.send_multipart(b_messages)<EOL>logger.debug(\"<STR_LIT>\".format(self._ready_manager_queue[manager]['<STR_LIT>']))<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>bad_managers = [manager for manager in self._ready_manager_queue if<EOL>time.time() - self._ready_manager_queue[manager]['<STR_LIT>'] > self.heartbeat_threshold]<EOL>for manager in bad_managers:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(self._ready_manager_queue[manager]['<STR_LIT>'], time.time()))<EOL>logger.warning(\"<STR_LIT>\".format(manager))<EOL>for tid in self._ready_manager_queue[manager]['<STR_LIT>']:<EOL><INDENT>try:<EOL><INDENT>raise ManagerLost(manager)<EOL><DEDENT>except Exception:<EOL><INDENT>result_package = {'<STR_LIT>': tid, '<STR_LIT>': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))}<EOL>pkl_package = pickle.dumps(result_package)<EOL>self.results_outgoing.send(pkl_package)<EOL>logger.warning(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>self._ready_manager_queue.pop(manager, '<STR_LIT:None>')<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>delta = time.time() - start<EOL>logger.info(\"<STR_LIT>\".format(count, delta))<EOL>logger.warning(\"<STR_LIT>\")<EOL>", "docstring": "Start the NeedNameQeueu\n\n        Parameters:\n        ----------\n\n        TODO: Move task receiving to a thread", "id": "f2821:c3:m4"}
{"signature": "def start_file_logger(filename, name='<STR_LIT>', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:<EOL><INDENT>format_string = \"<STR_LIT>\"<EOL><DEDENT>global logger<EOL>logger = logging.getLogger(name)<EOL>logger.setLevel(level)<EOL>handler = logging.FileHandler(filename)<EOL>handler.setLevel(level)<EOL>formatter = logging.Formatter(format_string, datefmt='<STR_LIT>')<EOL>handler.setFormatter(formatter)<EOL>logger.addHandler(handler)<EOL>", "docstring": "Add a stream log handler.\n\n    Parameters\n    ---------\n\n    filename: string\n        Name of the file to write logs to. Required.\n    name: string\n        Logger name. Default=\"parsl.executors.interchange\"\n    level: logging.LEVEL\n        Set the logging level. Default=logging.DEBUG\n        - format_string (string): Set the format string\n    format_string: string\n        Format string to use.\n\n    Returns\n    -------\n        None.", "id": "f2821:m0"}
{"signature": "def get_tasks(self, count):", "body": "tasks = []<EOL>for i in range(<NUM_LIT:0>, count):<EOL><INDENT>try:<EOL><INDENT>x = self.pending_task_queue.get(block=False)<EOL><DEDENT>except queue.Empty:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>tasks.append(x)<EOL><DEDENT><DEDENT>return tasks<EOL>", "docstring": "Obtains a batch of tasks from the internal pending_task_queue\n\n        Parameters\n        ----------\n        count: int\n            Count of tasks to get from the queue\n\n        Returns\n        -------\n        List of upto count tasks. May return fewer than count down to an empty list\n            eg. [{'task_id':<x>, 'buffer':<buf>} ... ]", "id": "f2821:c3:m1"}
{"signature": "def _start_local_queue_process(self):", "body": "comm_q = Queue(maxsize=<NUM_LIT:10>)<EOL>self.queue_proc = Process(target=interchange.starter,<EOL>args=(comm_q,),<EOL>kwargs={\"<STR_LIT>\": (self.outgoing_q.port,<EOL>self.incoming_q.port,<EOL>self.command_client.port),<EOL>\"<STR_LIT>\": self.worker_ports,<EOL>\"<STR_LIT>\": self.worker_port_range,<EOL>\"<STR_LIT>\": \"<STR_LIT>\".format(self.run_dir, self.label),<EOL>\"<STR_LIT>\": self.suppress_failure,<EOL>\"<STR_LIT>\": self.heartbeat_threshold,<EOL>\"<STR_LIT>\": self.poll_period,<EOL>\"<STR_LIT>\": logging.DEBUG if self.worker_debug else logging.INFO<EOL>},<EOL>)<EOL>self.queue_proc.start()<EOL>try:<EOL><INDENT>(worker_task_port, worker_result_port) = comm_q.get(block=True, timeout=<NUM_LIT>)<EOL><DEDENT>except queue.Empty:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>self.worker_task_url = \"<STR_LIT>\".format(self.address, worker_task_port)<EOL>self.worker_result_url = \"<STR_LIT>\".format(self.address, worker_result_port)<EOL>", "docstring": "Starts the interchange process locally\n\n        Starts the interchange process locally and uses an internal command queue to\n        get the worker task and result ports that the interchange has bound to.", "id": "f2822:c0:m5"}
{"signature": "def _queue_management_worker(self):", "body": "logger.debug(\"<STR_LIT>\")<EOL>while not self._executor_bad_state.is_set():<EOL><INDENT>try:<EOL><INDENT>msgs = self.incoming_q.get(timeout=<NUM_LIT:1>)<EOL><DEDENT>except queue.Empty:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>pass<EOL><DEDENT>except IOError as e:<EOL><INDENT>logger.exception(\"<STR_LIT>\".format(e.errno, e))<EOL>return<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.exception(\"<STR_LIT>\".format(e))<EOL>return<EOL><DEDENT>else:<EOL><INDENT>if msgs is None:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>else:<EOL><INDENT>for serialized_msg in msgs:<EOL><INDENT>try:<EOL><INDENT>msg = pickle.loads(serialized_msg)<EOL>tid = msg['<STR_LIT>']<EOL><DEDENT>except pickle.UnpicklingError:<EOL><INDENT>raise BadMessage(\"<STR_LIT>\")<EOL><DEDENT>except Exception:<EOL><INDENT>raise BadMessage(\"<STR_LIT>\")<EOL><DEDENT>if tid == -<NUM_LIT:1> and '<STR_LIT>' in msg:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>self._executor_exception, _ = deserialize_object(msg['<STR_LIT>'])<EOL>logger.exception(\"<STR_LIT>\".format(self._executor_exception))<EOL>self._executor_bad_state.set()<EOL>for task in self.tasks:<EOL><INDENT>self.tasks[task].set_exception(self._executor_exception)<EOL><DEDENT>break<EOL><DEDENT>task_fut = self.tasks[tid]<EOL>if '<STR_LIT:result>' in msg:<EOL><INDENT>result, _ = deserialize_object(msg['<STR_LIT:result>'])<EOL>task_fut.set_result(result)<EOL><DEDENT>elif '<STR_LIT>' in msg:<EOL><INDENT>try:<EOL><INDENT>s, _ = deserialize_object(msg['<STR_LIT>'])<EOL>try:<EOL><INDENT>s.reraise()<EOL><DEDENT>except Exception as e:<EOL><INDENT>task_fut.set_exception(e)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>task_fut.set_exception(<EOL>DeserializationError(\"<STR_LIT>\".format(e)))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise BadMessage(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if not self.is_alive:<EOL><INDENT>break<EOL><DEDENT><DEDENT>logger.info(\"<STR_LIT>\")<EOL>", "docstring": "Listen to the queue for task status messages and handle them.\n\n        Depending on the message, tasks will be updated with results, exceptions,\n        or updates. It expects the following messages:\n\n        .. code:: python\n\n            {\n               \"task_id\" : <task_id>\n               \"result\"  : serialized result object, if task succeeded\n               ... more tags could be added later\n            }\n\n            {\n               \"task_id\" : <task_id>\n               \"exception\" : serialized exception object, on failure\n            }\n\n        We do not support these yet, but they could be added easily.\n\n        .. code:: python\n\n            {\n               \"task_id\" : <task_id>\n               \"cpu_stat\" : <>\n               \"mem_stat\" : <>\n               \"io_stat\"  : <>\n               \"started\"  : tstamp\n            }\n\n        The `None` message is a die request.", "id": "f2822:c0:m3"}
{"signature": "def _hold_block(self, block_id):", "body": "managers = self.connected_managers<EOL>for manager in managers:<EOL><INDENT>if manager['<STR_LIT>'] == block_id:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(manager['<STR_LIT>']))<EOL>self.hold_worker(manager['<STR_LIT>'])<EOL><DEDENT><DEDENT>", "docstring": "Sends hold command to all managers which are in a specific block\n\n        Parameters\n        ----------\n        block_id : str\n             Block identifier of the block to be put on hold", "id": "f2822:c0:m11"}
{"signature": "def initialize_scaling(self):", "body": "debug_opts = \"<STR_LIT>\" if self.worker_debug else \"<STR_LIT>\"<EOL>max_workers = \"<STR_LIT>\" if self.max_workers == float('<STR_LIT>') else \"<STR_LIT>\".format(self.max_workers)<EOL>worker_logdir = \"<STR_LIT>\".format(self.run_dir, self.label)<EOL>if self.worker_logdir_root is not None:<EOL><INDENT>worker_logdir = \"<STR_LIT>\".format(self.worker_logdir_root, self.label)<EOL><DEDENT>l_cmd = self.launch_cmd.format(debug=debug_opts,<EOL>prefetch_capacity=self.prefetch_capacity,<EOL>task_url=self.worker_task_url,<EOL>result_url=self.worker_result_url,<EOL>cores_per_worker=self.cores_per_worker,<EOL>max_workers=max_workers,<EOL>nodes_per_block=self.provider.nodes_per_block,<EOL>heartbeat_period=self.heartbeat_period,<EOL>heartbeat_threshold=self.heartbeat_threshold,<EOL>poll_period=self.poll_period,<EOL>logdir=worker_logdir)<EOL>self.launch_cmd = l_cmd<EOL>logger.debug(\"<STR_LIT>\".format(self.launch_cmd))<EOL>self._scaling_enabled = self.provider.scaling_enabled<EOL>logger.debug(\"<STR_LIT>\", self.provider)<EOL>if hasattr(self.provider, '<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>self.scale_out(blocks=self.provider.init_blocks)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(e))<EOL>raise e<EOL><DEDENT><DEDENT>", "docstring": "Compose the launch command and call the scale_out\n\n        This should be implemented in the child classes to take care of\n        executor specific oddities.", "id": "f2822:c0:m1"}
{"signature": "def hold_worker(self, worker_id):", "body": "c = self.command_client.run(\"<STR_LIT>\".format(worker_id))<EOL>logger.debug(\"<STR_LIT>\".format(worker_id))<EOL>return c<EOL>", "docstring": "Puts a worker on hold, preventing scheduling of additional tasks to it.\n\n        This is called \"hold\" mostly because this only stops scheduling of tasks,\n        and does not actually kill the worker.\n\n        Parameters\n        ----------\n\n        worker_id : str\n            Worker id to be put on hold", "id": "f2822:c0:m7"}
{"signature": "def _start_queue_management_thread(self):", "body": "if self._queue_management_thread is None:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self._queue_management_thread = threading.Thread(target=self._queue_management_worker)<EOL>self._queue_management_thread.daemon = True<EOL>self._queue_management_thread.start()<EOL>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Method to start the management thread as a daemon.\n\n        Checks if a thread already exists, then starts it.\n        Could be used later as a restart if the management thread dies.", "id": "f2822:c0:m6"}
{"signature": "def scale_out(self, blocks=<NUM_LIT:1>):", "body": "r = []<EOL>for i in range(blocks):<EOL><INDENT>if self.provider:<EOL><INDENT>external_block_id = str(len(self.blocks))<EOL>launch_cmd = self.launch_cmd.format(block_id=external_block_id)<EOL>internal_block = self.provider.submit(launch_cmd, <NUM_LIT:1>, <NUM_LIT:1>)<EOL>logger.debug(\"<STR_LIT>\".format(external_block_id, internal_block))<EOL>if not internal_block:<EOL><INDENT>raise(ScalingFailed(self.provider.label,<EOL>\"<STR_LIT>\"))<EOL><DEDENT>r.extend([external_block_id])<EOL>self.blocks[external_block_id] = internal_block<EOL><DEDENT>else:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>r = None<EOL><DEDENT><DEDENT>return r<EOL>", "docstring": "Scales out the number of blocks by \"blocks\"\n\n        Raises:\n             NotImplementedError", "id": "f2822:c0:m14"}
{"signature": "def start(self):", "body": "self.outgoing_q = zmq_pipes.TasksOutgoing(\"<STR_LIT:127.0.0.1>\", self.interchange_port_range)<EOL>self.incoming_q = zmq_pipes.ResultsIncoming(\"<STR_LIT:127.0.0.1>\", self.interchange_port_range)<EOL>self.command_client = zmq_pipes.CommandClient(\"<STR_LIT:127.0.0.1>\", self.interchange_port_range)<EOL>self.is_alive = True<EOL>self._executor_bad_state = threading.Event()<EOL>self._executor_exception = None<EOL>self._queue_management_thread = None<EOL>self._start_queue_management_thread()<EOL>self._start_local_queue_process()<EOL>logger.debug(\"<STR_LIT>\".format(self._queue_management_thread))<EOL>if self.provider:<EOL><INDENT>self.initialize_scaling()<EOL><DEDENT>else:<EOL><INDENT>self._scaling_enabled = False<EOL>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Create the Interchange process and connect to it.", "id": "f2822:c0:m2"}
{"signature": "def pull_tasks(self, kill_event):", "body": "logger.info(\"<STR_LIT>\")<EOL>poller = zmq.Poller()<EOL>poller.register(self.task_incoming, zmq.POLLIN)<EOL>msg = self.create_reg_message()<EOL>logger.debug(\"<STR_LIT>\".format(msg))<EOL>self.task_incoming.send(msg)<EOL>last_beat = time.time()<EOL>last_interchange_contact = time.time()<EOL>task_recv_counter = <NUM_LIT:0><EOL>poll_timer = <NUM_LIT:1><EOL>while not kill_event.is_set():<EOL><INDENT>time.sleep(LOOP_SLOWDOWN)<EOL>ready_worker_count = self.ready_worker_queue.qsize()<EOL>pending_task_count = self.pending_task_queue.qsize()<EOL>logger.debug(\"<STR_LIT>\".format(ready_worker_count,<EOL>pending_task_count))<EOL>if time.time() > last_beat + self.heartbeat_period:<EOL><INDENT>self.heartbeat()<EOL>last_beat = time.time()<EOL><DEDENT>if pending_task_count < self.max_queue_size and ready_worker_count > <NUM_LIT:0>:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(ready_worker_count))<EOL>msg = ((ready_worker_count).to_bytes(<NUM_LIT:4>, \"<STR_LIT>\"))<EOL>self.task_incoming.send(msg)<EOL><DEDENT>socks = dict(poller.poll(timeout=poll_timer))<EOL>if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN:<EOL><INDENT>_, pkl_msg = self.task_incoming.recv_multipart()<EOL>tasks = pickle.loads(pkl_msg)<EOL>last_interchange_contact = time.time()<EOL>if tasks == '<STR_LIT>':<EOL><INDENT>logger.critical(\"<STR_LIT>\")<EOL>kill_event.set()<EOL>break<EOL><DEDENT>elif tasks == HEARTBEAT_CODE:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>poll_timer = <NUM_LIT:1><EOL>task_recv_counter += len(tasks)<EOL>logger.debug(\"<STR_LIT>\".format([t['<STR_LIT>'] for t in tasks],<EOL>task_recv_counter))<EOL>for task in tasks:<EOL><INDENT>self.pending_task_queue.put(task)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>poll_timer = min(self.heartbeat_period * <NUM_LIT:1000>, poll_timer * <NUM_LIT:2>)<EOL>if time.time() > last_interchange_contact + self.heartbeat_threshold:<EOL><INDENT>logger.critical(\"<STR_LIT>\")<EOL>kill_event.set()<EOL>logger.critical(\"<STR_LIT>\")<EOL>break<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Pulls tasks from the incoming tasks 0mq pipe onto the internal\n        pending task queue\n\n        Parameters:\n        -----------\n        kill_event : threading.Event\n              Event to let the thread know when it is time to die.", "id": "f2823:c0:m5"}
{"signature": "def set_stream_logger(name='<STR_LIT>', level=logging.DEBUG, format_string=None):", "body": "if format_string is None:<EOL><INDENT>format_string = \"<STR_LIT>\"<EOL><DEDENT>global logger<EOL>logger = logging.getLogger(name)<EOL>logger.setLevel(logging.DEBUG)<EOL>handler = logging.StreamHandler()<EOL>handler.setLevel(level)<EOL>formatter = logging.Formatter(format_string, datefmt='<STR_LIT>')<EOL>handler.setFormatter(formatter)<EOL>logger.addHandler(handler)<EOL>", "docstring": "Add a stream log handler.\n\n    Args:\n         - name (string) : Set the logger name.\n         - level (logging.LEVEL) : Set to logging.DEBUG by default.\n         - format_string (sting) : Set to None by default.\n\n    Returns:\n         - None", "id": "f2823:m3"}
{"signature": "def recv_result_from_workers(self):", "body": "info = MPI.Status()<EOL>result = self.comm.recv(source=MPI.ANY_SOURCE, tag=RESULT_TAG, status=info)<EOL>logger.debug(\"<STR_LIT>\".format(result))<EOL>return result<EOL>", "docstring": "Receives a results from the MPI worker pool and send it out via 0mq\n\n        Returns:\n        --------\n            result: task result from the workers", "id": "f2823:c0:m3"}
{"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "task_blocks = tasks_per_node * nodes_per_block<EOL>x =", "docstring": "Args:\n- command (string): The command string to be launched\n- task_block (string) : bash evaluated string.\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c5:m1"}
{"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "task_blocks = tasks_per_node * nodes_per_block<EOL>x =", "docstring": "Args:\n- command (string): The command string to be launched\n- task_block (string) : bash evaluated string.\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c6:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):<DEDENT>", "body": "pass<EOL>", "docstring": "Wraps the command with the Launcher calls.\n        *MUST* be implemented by the concrete child classes", "id": "f2826:c0:m0"}
{"signature": "def __call__(self, command, tasks_per_node, nodes_per_block, walltime=None):", "body": "task_blocks = tasks_per_node * nodes_per_block<EOL>x =", "docstring": "Args:\n- command (string): The command string to be launched\n- task_block (string) : bash evaluated string.\n\nKWargs:\n- walltime (int) : This is not used by this launcher.", "id": "f2826:c4:m0"}
{"signature": "def get_last_checkpoint(rundir=\"<STR_LIT>\"):", "body": "if not os.path.isdir(rundir):<EOL><INDENT>return []<EOL><DEDENT>dirs = sorted(os.listdir(rundir))<EOL>if len(dirs) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>last_runid = dirs[-<NUM_LIT:1>]<EOL>last_checkpoint = os.path.abspath('<STR_LIT>'.format(rundir, last_runid))<EOL>if(not(os.path.isdir(last_checkpoint))):<EOL><INDENT>return []<EOL><DEDENT>return [last_checkpoint]<EOL>", "docstring": "Find the checkpoint from the last run, if one exists.\n\n    Note that checkpoints are incremental, and this helper will not find\n    previous checkpoints from earlier than the most recent run. It probably\n    should be made to do so.\n\n    Kwargs:\n       - rundir(str) : Path to the runinfo directory\n\n    Returns:\n     - a list suitable for checkpointFiles parameter of DataFlowKernel\n       constructor, with 0 or 1 elements", "id": "f2829:m2"}
{"signature": "def get_all_checkpoints(rundir=\"<STR_LIT>\"):", "body": "if(not os.path.isdir(rundir)):<EOL><INDENT>return []<EOL><DEDENT>dirs = sorted(os.listdir(rundir))<EOL>checkpoints = []<EOL>for runid in dirs:<EOL><INDENT>checkpoint = os.path.abspath('<STR_LIT>'.format(rundir, runid))<EOL>if os.path.isdir(checkpoint):<EOL><INDENT>checkpoints.append(checkpoint)<EOL><DEDENT><DEDENT>return checkpoints<EOL>", "docstring": "Finds the checkpoints from all last runs.\n\n    Note that checkpoints are incremental, and this helper will not find\n    previous checkpoints from earlier than the most recent run. It probably\n    should be made to do so.\n\n    Kwargs:\n       - rundir(str) : Path to the runinfo directory\n\n    Returns:\n       - a list suitable for the checkpointFiles parameter of DataFlowKernel\n         constructor", "id": "f2829:m1"}
{"signature": "def teardown_module(module):", "body": "p_this = Path(__file__)<EOL>to_remove_list = list(p_this.parent.select_by_ext(\"<STR_LIT>\"))<EOL>for p in to_remove_list:<EOL><INDENT>if p.exists():<EOL><INDENT>p.remove()<EOL><DEDENT><DEDENT>", "docstring": "Remove temp file and dir for test.", "id": "f2875:m0"}
{"signature": "def setup_module(module):", "body": "p = Path(__file__).change(new_basename=\"<STR_LIT>\")<EOL>try:<EOL><INDENT>shutil.copytree(p.abspath, p.change(new_basename=\"<STR_LIT>\").abspath)<EOL><DEDENT>except Exception as e:<EOL><INDENT>pass<EOL><DEDENT>p = Path(__file__).change(new_basename=\"<STR_LIT>\")<EOL>with open(p.abspath, \"<STR_LIT:wb>\") as f:<EOL><INDENT>f.write(\"<STR_LIT>\".encode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT>p = Path(__file__).change(new_basename=\"<STR_LIT>\")<EOL>with open(p.abspath, \"<STR_LIT:wb>\") as f:<EOL><INDENT>f.write(\"<STR_LIT>\".encode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT>", "docstring": "Create temp file and dir for test.\n\n- create a new folder ``/wow``\n- create two file `/`wow/file_to_move.txt``, ``wow/file_to_copy.txt``", "id": "f2877:m0"}
{"signature": "def get_text_fingerprint(text, hash_meth, encoding=\"<STR_LIT:utf-8>\"):  ", "body": "m = hash_meth()<EOL>m.update(text.encode(encoding))<EOL>return m.hexdigest()<EOL>", "docstring": "Use default hash method to return hash value of a piece of string\ndefault setting use 'utf-8' encoding.", "id": "f2879:m0"}
{"signature": "def sha256file(abspath, nbytes=<NUM_LIT:0>, chunk_size=DEFAULT_CHUNK_SIZE):", "body": "return get_file_fingerprint(abspath, hashlib.sha256, nbytes=nbytes, chunk_size=chunk_size)<EOL>", "docstring": "Return sha256 hash value of a piece of a file\n\nEstimate processing time on:\n\n:param abspath: the absolute path to the file\n:param nbytes: only has first N bytes of the file. if 0 or None,\n  hash all file", "id": "f2879:m3"}
{"signature": "def get_partial_md5(self, nbytes):", "body": "return md5file(abspath=self.abspath, nbytes=nbytes)<EOL>", "docstring": "Return md5 check sum of first n bytes of this file.", "id": "f2880:c0:m0"}
{"signature": "def get_partial_sha256(self, nbytes):", "body": "return sha256file(abspath=self.abspath, nbytes=nbytes)<EOL>", "docstring": "Return sha256 check sum of first n bytes of this file.", "id": "f2880:c0:m2"}
{"signature": "def make_zip_archive(self,<EOL>dst=None,<EOL>filters=all_true,<EOL>compress=True,<EOL>overwrite=False,<EOL>makedirs=False,<EOL>verbose=False):  ", "body": "self.assert_exists()<EOL>if dst is None:<EOL><INDENT>dst = self._auto_zip_archive_dst()<EOL><DEDENT>else:<EOL><INDENT>dst = self.change(new_abspath=dst)<EOL><DEDENT>if not dst.basename.lower().endswith(\"<STR_LIT>\"):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if dst.exists():<EOL><INDENT>if not overwrite:<EOL><INDENT>raise IOError(\"<STR_LIT>\" % dst)<EOL><DEDENT><DEDENT>if compress:<EOL><INDENT>compression = ZIP_DEFLATED<EOL><DEDENT>else:<EOL><INDENT>compression = ZIP_STORED<EOL><DEDENT>if not dst.parent.exists():<EOL><INDENT>if makedirs:<EOL><INDENT>os.makedirs(dst.parent.abspath)<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>msg = \"<STR_LIT>\" % self<EOL>print(msg)<EOL><DEDENT>current_dir = os.getcwd()<EOL>if self.is_dir():<EOL><INDENT>total_size = <NUM_LIT:0><EOL>selected = list()<EOL>for p in self.glob(\"<STR_LIT>\"):<EOL><INDENT>if filters(p):<EOL><INDENT>selected.append(p)<EOL>total_size += p.size<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>msg = \"<STR_LIT>\".format(<EOL>len(selected), repr_data_size(total_size),<EOL>)<EOL>print(msg)<EOL><DEDENT>with ZipFile(dst.abspath, \"<STR_LIT:w>\", compression) as f:<EOL><INDENT>os.chdir(self.abspath)<EOL>for p in selected:<EOL><INDENT>relpath = p.relative_to(self).__str__()<EOL>f.write(relpath)<EOL><DEDENT><DEDENT><DEDENT>elif self.is_file():<EOL><INDENT>with ZipFile(dst.abspath, \"<STR_LIT:w>\", compression) as f:<EOL><INDENT>os.chdir(self.parent.abspath)<EOL>f.write(self.basename)<EOL><DEDENT><DEDENT>os.chdir(current_dir)<EOL>if verbose:<EOL><INDENT>msg = \"<STR_LIT>\".format(dst.size_in_text)<EOL>print(msg)<EOL><DEDENT>", "docstring": "Make a zip archive.\n\n:param dst: output file path. if not given, will be automatically assigned.\n:param filters: custom path filter. By default it allows any file.\n:param compress: compress or not.\n:param overwrite: overwrite exists or not.\n:param verbose: display log or not.\n:return:", "id": "f2881:c0:m1"}
{"signature": "def backup(self,<EOL>dst=None,<EOL>ignore=None,<EOL>ignore_ext=None,<EOL>ignore_pattern=None,<EOL>ignore_size_smaller_than=None,<EOL>ignore_size_larger_than=None,<EOL>case_sensitive=False):  ", "body": "def preprocess_arg(arg):  <EOL><INDENT>if arg is None:<EOL><INDENT>return []<EOL><DEDENT>if isinstance(arg, (tuple, list)):<EOL><INDENT>return list(arg)<EOL><DEDENT>else:<EOL><INDENT>return [arg, ]<EOL><DEDENT><DEDENT>self.assert_is_dir_and_exists()<EOL>ignore = preprocess_arg(ignore)<EOL>for i in ignore:<EOL><INDENT>if i.startswith(\"<STR_LIT:/>\") or i.startswith(\"<STR_LIT:\\\\>\"):<EOL><INDENT>raise ValueError<EOL><DEDENT><DEDENT>ignore_ext = preprocess_arg(ignore_ext)<EOL>for ext in ignore_ext:<EOL><INDENT>if not ext.startswith(\"<STR_LIT:.>\"):<EOL><INDENT>raise ValueError<EOL><DEDENT><DEDENT>ignore_pattern = preprocess_arg(ignore_pattern)<EOL>if case_sensitive:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>ignore = [i.lower() for i in ignore]<EOL>ignore_ext = [i.lower() for i in ignore_ext]<EOL>ignore_pattern = [i.lower() for i in ignore_pattern]<EOL><DEDENT>def filters(p):<EOL><INDENT>relpath = p.relative_to(self).abspath<EOL>if not case_sensitive:<EOL><INDENT>relpath = relpath.lower()<EOL><DEDENT>for i in ignore:<EOL><INDENT>if relpath.startswith(i):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>if case_sensitive:<EOL><INDENT>ext = p.ext<EOL><DEDENT>else:<EOL><INDENT>ext = p.ext.lower()<EOL><DEDENT>if ext in ignore_ext:<EOL><INDENT>return False<EOL><DEDENT>for pattern in ignore_pattern:<EOL><INDENT>if pattern in relpath:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>if ignore_size_smaller_than:<EOL><INDENT>if p.size < ignore_size_smaller_than:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>if ignore_size_larger_than:<EOL><INDENT>if p.size > ignore_size_larger_than:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL><DEDENT>self.make_zip_archive(<EOL>dst=dst, filters=filters, compress=True, overwrite=False, verbose=True,<EOL>)<EOL>", "docstring": "Create a compressed zip archive backup for a directory.\n\n:param dst: the output file path.\n:param ignore: file or directory defined in this list will be ignored.\n:param ignore_ext: file with extensions defined in this list will be ignored.\n:param ignore_pattern: any file or directory that contains this pattern\n    will be ignored.\n:param ignore_size_smaller_than: any file size smaller than this\n    will be ignored.\n:param ignore_size_larger_than: any file size larger than this\n    will be ignored.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u4e3a\u4e00\u4e2a\u76ee\u5f55\u521b\u5efa\u4e00\u4e2a\u5907\u4efd\u538b\u7f29\u5305\u3002\u53ef\u4ee5\u901a\u8fc7\u8fc7\u6ee4\u5668\u9009\u62e9\u4f60\u8981\u5907\u4efd\u7684\u6587\u4ef6\u3002", "id": "f2881:c0:m2"}
{"signature": "@property<EOL><INDENT>def fname_hexstr(self):<DEDENT>", "body": "return encode_hexstr(self.fname)<EOL>", "docstring": "File name encoded in hex string.", "id": "f2882:c0:m9"}
{"signature": "@property<EOL><INDENT>def mtime(self):<DEDENT>", "body": "try:<EOL><INDENT>return self._stat.st_mtime<EOL><DEDENT>except:  <EOL><INDENT>self._stat = self.stat()<EOL>return self.mtime<EOL><DEDENT>", "docstring": "Get most recent modify time in timestamp.", "id": "f2882:c0:m13"}
{"signature": "@property<EOL><INDENT>def basename_hexstr(self):<DEDENT>", "body": "return encode_hexstr(self.basename)<EOL>", "docstring": "File name with extension encoded in hex string.", "id": "f2882:c0:m7"}
{"signature": "@property<EOL><INDENT>def ctime(self):<DEDENT>", "body": "try:<EOL><INDENT>return self._stat.st_ctime<EOL><DEDENT>except:  <EOL><INDENT>self._stat = self.stat()<EOL>return self.ctime<EOL><DEDENT>", "docstring": "Get most recent create time in timestamp.", "id": "f2882:c0:m15"}
{"signature": "@property<EOL><INDENT>def create_datetime(self):<DEDENT>", "body": "return datetime.fromtimestamp(self.ctime)<EOL>", "docstring": "Get most recent create time in datetime.", "id": "f2882:c0:m18"}
{"signature": "@property<EOL><INDENT>def abspath(self):<DEDENT>", "body": "return self.absolute().__str__()<EOL>", "docstring": "r\"\"\"\n        Absolute path.\n\n        Example: ``C:\\User\\admin\\readme.txt`` for ``C:\\User\\admin\\readme.txt``", "id": "f2882:c0:m0"}
{"signature": "@property<EOL><INDENT>def dirpath_hexstr(self):<DEDENT>", "body": "return encode_hexstr(self.dirpath)<EOL>", "docstring": "Return dir full absolute path encoded in hex string.", "id": "f2882:c0:m3"}
{"signature": "@property<EOL><INDENT>def fname(self):<DEDENT>", "body": "return self.stem<EOL>", "docstring": "r\"\"\"\n        File name without extension.\n\n        Example: ``readme`` for ``C:\\User\\admin\\readme.txt``", "id": "f2882:c0:m8"}
{"signature": "@property<EOL><INDENT>def ext(self):<DEDENT>", "body": "return self.suffix<EOL>", "docstring": "r\"\"\"\n        File extension. If it's a dir, then return empty str.\n\n        Example: ``.txt`` for ``C:\\User\\admin\\readme.txt``", "id": "f2882:c0:m10"}
{"signature": "def touch(self, mode=<NUM_LIT>, exist_ok=True):", "body": "if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>if exist_ok:<EOL><INDENT>try:<EOL><INDENT>self._accessor.utime(self, None)<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT><DEDENT>flags = os.O_CREAT | os.O_WRONLY<EOL>if not exist_ok:<EOL><INDENT>flags |= os.O_EXCL<EOL><DEDENT>fd = self._raw_open(flags, mode)<EOL>os.close(fd)<EOL>", "docstring": "Create this file with the given access mode, if it doesn't exist.", "id": "f2883:c14:m24"}
{"signature": "def rmdir(self):", "body": "if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>self._accessor.rmdir(self)<EOL>", "docstring": "Remove this directory.  The directory must be empty.", "id": "f2883:c14:m29"}
{"signature": "def joinpath(self, *args):", "body": "return self._make_child(args)<EOL>", "docstring": "Combine this path with one or several arguments, and return a\n        new path representing either a subpath (if all arguments are relative\n        paths) or a totally different path (if one of the arguments is\n        anchored).", "id": "f2883:c11:m31"}
{"signature": "def with_suffix(self, suffix):", "body": "<EOL>f = self._flavour<EOL>if f.sep in suffix or f.altsep and f.altsep in suffix:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (suffix))<EOL><DEDENT>if suffix and not suffix.startswith('<STR_LIT:.>') or suffix == '<STR_LIT:.>':<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (suffix))<EOL><DEDENT>name = self.name<EOL>if not name:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (self,))<EOL><DEDENT>old_suffix = self.suffix<EOL>if not old_suffix:<EOL><INDENT>name = name + suffix<EOL><DEDENT>else:<EOL><INDENT>name = name[:-len(old_suffix)] + suffix<EOL><DEDENT>return self._from_parsed_parts(self._drv, self._root,<EOL>self._parts[:-<NUM_LIT:1>] + [name])<EOL>", "docstring": "Return a new path with the file suffix changed (or added, if\n        none).", "id": "f2883:c11:m28"}
{"signature": "def chmod(self, mode):", "body": "if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>self._accessor.chmod(self, mode)<EOL>", "docstring": "Change the permissions of the path, like os.chmod().", "id": "f2883:c14:m26"}
{"signature": "def is_fifo(self):", "body": "try:<EOL><INDENT>return S_ISFIFO(self.stat().st_mode)<EOL><DEDENT>except OSError as e:<EOL><INDENT>if e.errno not in (ENOENT, ENOTDIR):<EOL><INDENT>raise<EOL><DEDENT>return False<EOL><DEDENT>", "docstring": "Whether this path is a FIFO.", "id": "f2883:c14:m40"}
{"signature": "def stat(self):", "body": "return self._accessor.stat(self)<EOL>", "docstring": "Return the result of the stat() system call on this path, like\nos.stat() does.", "id": "f2883:c14:m16"}
{"signature": "def match(self, path_pattern):", "body": "cf = self._flavour.casefold<EOL>path_pattern = cf(path_pattern)<EOL>drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))<EOL>if not pat_parts:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if drv and drv != cf(self._drv):<EOL><INDENT>return False<EOL><DEDENT>if root and root != cf(self._root):<EOL><INDENT>return False<EOL><DEDENT>parts = self._cparts<EOL>if drv or root:<EOL><INDENT>if len(pat_parts) != len(parts):<EOL><INDENT>return False<EOL><DEDENT>pat_parts = pat_parts[<NUM_LIT:1>:]<EOL><DEDENT>elif len(pat_parts) > len(parts):<EOL><INDENT>return False<EOL><DEDENT>for part, pat in zip(reversed(parts), reversed(pat_parts)):<EOL><INDENT>if not fnmatch.fnmatchcase(part, pat):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Return True if this path matches the given pattern.", "id": "f2883:c11:m38"}
{"signature": "def rename(self, target):", "body": "if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>self._accessor.rename(self, target)<EOL>", "docstring": "Rename this path to the given path.", "id": "f2883:c14:m31"}
{"signature": "def exists(self):", "body": "try:<EOL><INDENT>self.stat()<EOL><DEDENT>except OSError as e:<EOL><INDENT>if e.errno not in (ENOENT, ENOTDIR):<EOL><INDENT>raise<EOL><DEDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Whether this path exists.", "id": "f2883:c14:m34"}
{"signature": "def __str__(self):", "body": "try:<EOL><INDENT>return self._str<EOL><DEDENT>except AttributeError:<EOL><INDENT>self._str = self._format_parsed_parts(self._drv, self._root,<EOL>self._parts) or '<STR_LIT:.>'<EOL>return self._str<EOL><DEDENT>", "docstring": "Return the string representation of the path, suitable for\n        passing to system calls.", "id": "f2883:c11:m8"}
{"signature": "@property<EOL><INDENT>def parent(self):<DEDENT>", "body": "drv = self._drv<EOL>root = self._root<EOL>parts = self._parts<EOL>if len(parts) == <NUM_LIT:1> and (drv or root):<EOL><INDENT>return self<EOL><DEDENT>return self._from_parsed_parts(drv, root, parts[:-<NUM_LIT:1>])<EOL>", "docstring": "The logical parent of the path.", "id": "f2883:c11:m34"}
{"signature": "def write_bytes(self, data):", "body": "if not isinstance(data, six.binary_type):<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>' %<EOL>(six.binary_type.__name__, data.__class__.__name__))<EOL><DEDENT>with self.open(mode='<STR_LIT:wb>') as f:<EOL><INDENT>return f.write(data)<EOL><DEDENT>", "docstring": "Open the file in bytes mode, write to it, and close the file.", "id": "f2883:c14:m22"}
{"signature": "def replace(self, target):", "body": "if sys.version_info < (<NUM_LIT:3>, <NUM_LIT:3>):<EOL><INDENT>raise NotImplementedError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>self._accessor.replace(self, target)<EOL>", "docstring": "Rename this path to the given path, clobbering the existing\ndestination if it exists.", "id": "f2883:c14:m32"}
{"signature": "@property<EOL><INDENT>def suffixes(self):<DEDENT>", "body": "name = self.name<EOL>if name.endswith('<STR_LIT:.>'):<EOL><INDENT>return []<EOL><DEDENT>name = name.lstrip('<STR_LIT:.>')<EOL>return ['<STR_LIT:.>' + suffix for suffix in name.split('<STR_LIT:.>')[<NUM_LIT:1>:]]<EOL>", "docstring": "A list of the final component's suffixes, if any.", "id": "f2883:c11:m25"}
{"signature": "@property<EOL><INDENT>def name(self):<DEDENT>", "body": "parts = self._parts<EOL>if len(parts) == (<NUM_LIT:1> if (self._drv or self._root) else <NUM_LIT:0>):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return parts[-<NUM_LIT:1>]<EOL>", "docstring": "The final path component, if any.", "id": "f2883:c11:m23"}
{"signature": "def lchmod(self, mode):", "body": "if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>self._accessor.lchmod(self, mode)<EOL>", "docstring": "Like chmod(), except if the path points to a symlink, the symlink's\npermissions are changed, rather than its target's.", "id": "f2883:c14:m27"}
{"signature": "def is_symlink(self):", "body": "try:<EOL><INDENT>return S_ISLNK(self.lstat().st_mode)<EOL><DEDENT>except OSError as e:<EOL><INDENT>if e.errno not in (ENOENT, ENOTDIR):<EOL><INDENT>raise<EOL><DEDENT>return False<EOL><DEDENT>", "docstring": "Whether this path is a symbolic link.", "id": "f2883:c14:m37"}
{"signature": "def is_reserved(self):", "body": "return self._flavour.is_reserved(self._parts)<EOL>", "docstring": "Return True if the path contains one of the special names reserved\n        by the system, if any.", "id": "f2883:c11:m37"}
{"signature": "def symlink_to(self, target, target_is_directory=False):", "body": "if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>self._accessor.symlink(target, self, target_is_directory)<EOL>", "docstring": "Make this path a symlink pointing to the given path.\nNote the order of arguments (self, target) is the reverse of\nos.symlink's.", "id": "f2883:c14:m33"}
{"signature": "def as_posix(self):", "body": "f = self._flavour<EOL>return str(self).replace(f.sep, '<STR_LIT:/>')<EOL>", "docstring": "Return the string representation of the path with forward (/)\n        slashes.", "id": "f2883:c11:m10"}
{"signature": "def absolute(self):", "body": "<EOL>if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>if self.is_absolute():<EOL><INDENT>return self<EOL><DEDENT>obj = self._from_parts([os.getcwd()] + self._parts, init=False)<EOL>obj._init(template=self)<EOL>return obj<EOL>", "docstring": "Return an absolute version of this path.  This function works\n        even if the path doesn't point to anything.\n\n        No normalization is done, i.e. all '.' and '..' will be kept along.\n        Use resolve() to get the canonical path to a file.", "id": "f2883:c14:m14"}
{"signature": "@classmethod<EOL><INDENT>def cwd(cls):<DEDENT>", "body": "return cls(os.getcwd())<EOL>", "docstring": "Return a new path pointing to the current working directory\n        (as returned by os.getcwd()).", "id": "f2883:c14:m8"}
{"signature": "def is_block_device(self):", "body": "try:<EOL><INDENT>return S_ISBLK(self.stat().st_mode)<EOL><DEDENT>except OSError as e:<EOL><INDENT>if e.errno not in (ENOENT, ENOTDIR):<EOL><INDENT>raise<EOL><DEDENT>return False<EOL><DEDENT>", "docstring": "Whether this path is a block device.", "id": "f2883:c14:m38"}
{"signature": "def glob(self, pattern):", "body": "if not pattern:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(pattern))<EOL><DEDENT>pattern = self._flavour.casefold(pattern)<EOL>drv, root, pattern_parts = self._flavour.parse_parts((pattern,))<EOL>if drv or root:<EOL><INDENT>raise NotImplementedError(\"<STR_LIT>\")<EOL><DEDENT>selector = _make_selector(tuple(pattern_parts))<EOL>for p in selector.select_from(self):<EOL><INDENT>yield p<EOL><DEDENT>", "docstring": "Iterate over this subtree and yield all existing files (of any\n        kind, including directories) matching the given pattern.", "id": "f2883:c14:m12"}
{"signature": "def expanduser(self):", "body": "if (not (self._drv or self._root)<EOL>and self._parts and self._parts[<NUM_LIT:0>][:<NUM_LIT:1>] == '<STR_LIT>'):<EOL><INDENT>homedir = self._flavour.gethomedir(self._parts[<NUM_LIT:0>][<NUM_LIT:1>:])<EOL>return self._from_parts([homedir] + self._parts[<NUM_LIT:1>:])<EOL><DEDENT>return self<EOL>", "docstring": "Return a new path with expanded ~ and ~user constructs\n        (as returned by os.path.expanduser)", "id": "f2883:c14:m42"}
{"signature": "def lstat(self):", "body": "if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>return self._accessor.lstat(self)<EOL>", "docstring": "Like stat(), except if the path points to a symlink, the symlink's\nstatus information is returned, rather than its target's.", "id": "f2883:c14:m30"}
{"signature": "def unlink(self):", "body": "if self._closed:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>self._accessor.unlink(self)<EOL>", "docstring": "Remove this file or link.\nIf the path is a directory, use rmdir() instead.", "id": "f2883:c14:m28"}
{"signature": "@property<EOL><INDENT>def n_subdir(self):<DEDENT>", "body": "self.assert_is_dir_and_exists()<EOL>n = <NUM_LIT:0><EOL>for _ in self.select_dir(recursive=False):<EOL><INDENT>n += <NUM_LIT:1><EOL><DEDENT>return n<EOL>", "docstring": "Count how many folders in this directory (doesn't include folder in\nsub folders).", "id": "f2884:c0:m9"}
{"signature": "def select_video(self, recursive=True):  ", "body": "return self.select_by_ext(self._video_ext, recursive)<EOL>", "docstring": "Select video file.", "id": "f2884:c0:m19"}
{"signature": "def _sort_by(key):", "body": "@staticmethod<EOL>def sort_by(p_list, reverse=False):<EOL><INDENT>return sorted(<EOL>p_list,<EOL>key=lambda p: getattr(p, key),<EOL>reverse=reverse,<EOL>)<EOL><DEDENT>return sort_by<EOL>", "docstring": "High order function for sort methods.", "id": "f2884:m1"}
{"signature": "def select_by_ctime(self, min_time=<NUM_LIT:0>, max_time=ts_2100,<EOL>recursive=True):", "body": "def filters(p): return min_time <= p.ctime <= max_time<EOL>return self.select_file(filters, recursive)<EOL>", "docstring": "Select file path by create time.\n\n:param min_time: lower bound timestamp\n:param max_time: upper bound timestamp\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9009\u62e9\u6240\u6709 :attr:`pathlib_mate.pathlib2.Path.ctime` \u5728\u4e00\u5b9a\u8303\u56f4\u5185\u7684\u6587\u4ef6\u3002", "id": "f2884:c0:m16"}
{"signature": "@property<EOL><INDENT>def n_file(self):<DEDENT>", "body": "self.assert_is_dir_and_exists()<EOL>n = <NUM_LIT:0><EOL>for _ in self.select_file(recursive=True):<EOL><INDENT>n += <NUM_LIT:1><EOL><DEDENT>return n<EOL>", "docstring": "Count how many files in this directory. Including file in sub folder.", "id": "f2884:c0:m6"}
{"signature": "def assert_exists(self):", "body": "if not self.exists():<EOL><INDENT>msg = \"<STR_LIT>\" % self<EOL>raise EnvironmentError(msg)<EOL><DEDENT>", "docstring": "Assert it exists.", "id": "f2884:c0:m2"}
{"signature": "def select_audio(self, recursive=True):  ", "body": "return self.select_by_ext(self._audio_ext, recursive)<EOL>", "docstring": "Select audio file.", "id": "f2884:c0:m18"}
{"signature": "def select_file(self, filters=all_true, recursive=True):", "body": "for p in self.select(filters, recursive):<EOL><INDENT>if p.is_file():<EOL><INDENT>yield p<EOL><DEDENT><DEDENT>", "docstring": "Select file path by criterion.\n\n        **\u4e2d\u6587\u6587\u6863**\n\n        \u6839\u636efilters\u4e2d\u5b9a\u4e49\u7684\u6761\u4ef6\u9009\u62e9\u6587\u4ef6\u3002", "id": "f2884:c0:m4"}
{"signature": "def assert_is_dir_and_exists(self):", "body": "if not self.is_dir():<EOL><INDENT>msg = \"<STR_LIT>\" % self<EOL>raise EnvironmentError(msg)<EOL><DEDENT>", "docstring": "Assert it is a directory and exists in file system.", "id": "f2884:c0:m1"}
{"signature": "def select_by_pattern_in_abspath(self,<EOL>pattern,<EOL>recursive=True,<EOL>case_sensitive=False):", "body": "if case_sensitive:<EOL><INDENT>def filters(p):<EOL><INDENT>return pattern in p.abspath<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pattern = pattern.lower()<EOL>def filters(p):<EOL><INDENT>return pattern in p.abspath.lower()<EOL><DEDENT><DEDENT>return self.select_file(filters, recursive)<EOL>", "docstring": "Select file path by text pattern in absolute path.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9009\u62e9\u7edd\u5bf9\u8def\u5f84\u4e2d\u5305\u542b\u6307\u5b9a\u5b50\u5b57\u7b26\u4e32\u7684\u6587\u4ef6\u3002", "id": "f2884:c0:m12"}
{"signature": "def assert_is_file_and_exists(self):", "body": "if not self.is_file():<EOL><INDENT>msg = \"<STR_LIT>\" % self<EOL>raise EnvironmentError(msg)<EOL><DEDENT>", "docstring": "Assert it is a directory and exists in file system.", "id": "f2884:c0:m0"}
{"signature": "def select_image(self, recursive=True):", "body": "return self.select_by_ext(self._image_ext, recursive)<EOL>", "docstring": "Select image file.", "id": "f2884:c0:m17"}
{"signature": "def ensure_list(path_or_path_list):", "body": "if isinstance(path_or_path_list, (tuple, list, set)):<EOL><INDENT>return [ensure_str(path) for path in path_or_path_list]<EOL><DEDENT>else:<EOL><INDENT>return [ensure_str(path_or_path_list), ]<EOL><DEDENT>", "docstring": "Pre-process input argument, whether if it is:\n\n1. abspath\n2. Path instance\n3. string\n4. list or set of any of them\n\nIt returns list of path.\n\n:return path_or_path_list: always return list of path in string\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9884\u5904\u7406\u8f93\u5165\u53c2\u6570\u3002", "id": "f2885:m1"}
{"signature": "def repr_data_size(size_in_bytes, precision=<NUM_LIT:2>):", "body": "if size_in_bytes < <NUM_LIT>:<EOL><INDENT>return \"<STR_LIT>\" % size_in_bytes<EOL><DEDENT>index = <NUM_LIT:0><EOL>while <NUM_LIT:1>:<EOL><INDENT>index += <NUM_LIT:1><EOL>size_in_bytes, mod = divmod(size_in_bytes, <NUM_LIT>)<EOL>if size_in_bytes < <NUM_LIT>:<EOL><INDENT>break<EOL><DEDENT><DEDENT>template = \"<STR_LIT>\" % precision<EOL>s = template.format(size_in_bytes + mod / <NUM_LIT>, MAGNITUDE_OF_DATA[index])<EOL>return s<EOL>", "docstring": "Return human readable string represent of a file size. Doesn't support\nsize greater than 1EB.\n\nFor example:\n\n- 100 bytes => 100 B\n- 100,000 bytes => 97.66 KB\n- 100,000,000 bytes => 95.37 MB\n- 100,000,000,000 bytes => 93.13 GB\n- 100,000,000,000,000 bytes => 90.95 TB\n- 100,000,000,000,000,000 bytes => 88.82 PB\n...\n\nMagnitude of data::\n\n    1000         kB    kilobyte\n    1000 ** 2    MB    megabyte\n    1000 ** 3    GB    gigabyte\n    1000 ** 4    TB    terabyte\n    1000 ** 5    PB    petabyte\n    1000 ** 6    EB    exabyte\n    1000 ** 7    ZB    zettabyte\n    1000 ** 8    YB    yottabyte", "id": "f2885:m2"}
{"signature": "def zip_many_files(list_of_abspath, dst):", "body": "if os.path.exists(dst):<EOL><INDENT>print(\"<STR_LIT>\" % dst)<EOL>return<EOL><DEDENT>base_dir = os.getcwd()<EOL>with ZipFile(dst, \"<STR_LIT:w>\") as f:<EOL><INDENT>for abspath in list_of_abspath:<EOL><INDENT>dirname, basename = os.path.split(abspath)<EOL>os.chdir(dirname)<EOL>f.write(basename)<EOL><DEDENT><DEDENT>os.chdir(base_dir)<EOL>", "docstring": "Add many files to a zip archive.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u4e00\u7cfb\u5217\u7684\u6587\u4ef6\u538b\u7f29\u5230\u4e00\u4e2a\u538b\u7f29\u5305\u4e2d, \u82e5\u6709\u91cd\u590d\u7684\u6587\u4ef6\u540d, \u5728zip\u4e2d\u4fdd\u7559\u6240\u6709\u7684\u526f\u672c\u3002", "id": "f2887:m2"}
{"signature": "def zip_a_folder(src, dst):", "body": "if os.path.exists(dst):<EOL><INDENT>print(\"<STR_LIT>\" % dst)<EOL>return<EOL><DEDENT>src, dst = os.path.abspath(src), os.path.abspath(dst)<EOL>cwd = os.getcwd()<EOL>todo = list()<EOL>dirname, basename = os.path.split(src)<EOL>os.chdir(dirname)<EOL>for dirname, _, fnamelist in os.walk(basename):<EOL><INDENT>for fname in fnamelist:<EOL><INDENT>newname = os.path.join(dirname, fname)<EOL>todo.append(newname)<EOL><DEDENT><DEDENT>with ZipFile(dst, \"<STR_LIT:w>\") as f:<EOL><INDENT>for newname in todo:<EOL><INDENT>f.write(newname)<EOL><DEDENT><DEDENT>os.chdir(cwd)<EOL>", "docstring": "Add a folder and everything inside to zip archive.\n\nExample::\n\n    |---paper\n        |--- algorithm.pdf\n        |--- images\n            |--- 1.jpg\n\n    zip_a_folder(\"paper\", \"paper.zip\")\n\n    paper.zip\n        |---paper\n            |--- algorithm.pdf\n            |--- images\n                |--- 1.jpg\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u6574\u4e2a\u6587\u4ef6\u5939\u6dfb\u52a0\u5230\u538b\u7f29\u5305, \u5305\u62ec\u6839\u76ee\u5f55\u672c\u8eab\u3002", "id": "f2887:m0"}
{"signature": "def zip_everything_in_a_folder(src, dst):", "body": "if os.path.exists(dst):<EOL><INDENT>print(\"<STR_LIT>\" % dst)<EOL>return<EOL><DEDENT>src, dst = os.path.abspath(src), os.path.abspath(dst)<EOL>cwd = os.getcwd()<EOL>todo = list()<EOL>os.chdir(src)<EOL>for dirname, _, fnamelist in os.walk(os.getcwd()):<EOL><INDENT>for fname in fnamelist:<EOL><INDENT>newname = os.path.relpath(os.path.join(dirname, fname), src)<EOL>todo.append(newname)<EOL><DEDENT><DEDENT>with ZipFile(dst, \"<STR_LIT:w>\") as f:<EOL><INDENT>for newname in todo:<EOL><INDENT>f.write(newname)<EOL><DEDENT><DEDENT>os.chdir(cwd)<EOL>", "docstring": "Add everything in a folder except the root folder it self to zip archive.\n\nExample::\n\n    |---paper\n        |--- algorithm.pdf\n        |--- images\n            |--- 1.jpg\n\n    zip_everything_in_folder(\"paper\", \"paper.zip\")\n\n    paper.zip\n        |--- algorithm.pdf\n        |--- images\n            |--- 1.jpg\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u76ee\u5f55\u5185\u90e8\u7684\u6240\u6709\u6587\u4ef6\u6dfb\u52a0\u5230\u538b\u7f29\u5305, \u4e0d\u5305\u62ec\u6839\u76ee\u5f55\u672c\u8eab\u3002", "id": "f2887:m1"}
{"signature": "def is_empty(self, strict=True):", "body": "if self.exists():<EOL><INDENT>if self.is_file():<EOL><INDENT>return self.size == <NUM_LIT:0><EOL><DEDENT>elif self.is_dir():<EOL><INDENT>if strict:<EOL><INDENT>return len(list(self.select(recursive=True))) == <NUM_LIT:0><EOL><DEDENT>else:  <EOL><INDENT>return len(list(self.select_file(recursive=True))) == <NUM_LIT:0><EOL><DEDENT><DEDENT>else:  <EOL><INDENT>msg = \"<STR_LIT>\" % self<EOL>raise EnvironmentError(msg)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise EnvironmentError(\"<STR_LIT>\" % self)<EOL><DEDENT>", "docstring": "- If it's a file, check if it is a empty file. (0 bytes content)\n- If it's a directory, check if there's no file and dir in it.\n    But if ``strict = False``, then only check if there's no file in it.\n\n:param strict: only useful when it is a directory. if True, only\n    return True if this dir has no dir and file. if False, return True\n    if it doesn't have any file.", "id": "f2888:c0:m4"}
{"signature": "def file_stat(self, filters=all_true):", "body": "self.assert_is_dir_and_exists()<EOL>stat = {\"<STR_LIT:file>\": <NUM_LIT:0>, \"<STR_LIT>\": <NUM_LIT:0>, \"<STR_LIT:size>\": <NUM_LIT:0>}<EOL>for p in self.select(filters=filters, recursive=True):<EOL><INDENT>if p.is_file():<EOL><INDENT>stat[\"<STR_LIT:file>\"] += <NUM_LIT:1><EOL>stat[\"<STR_LIT:size>\"] += p.size<EOL><DEDENT>elif p.is_dir():<EOL><INDENT>stat[\"<STR_LIT>\"] += <NUM_LIT:1><EOL><DEDENT><DEDENT>return stat<EOL>", "docstring": "Find out how many files, directorys and total size (Include file in\n        it's sub-folder).\n\n        :returns: stat, a dict like ``{\"file\": number of files,\n          \"dir\": number of directorys, \"size\": total size in bytes}``\n\n        **\u4e2d\u6587\u6587\u6863**\n\n        \u8fd4\u56de\u4e00\u4e2a\u76ee\u5f55\u4e2d\u7684\u6587\u4ef6, \u6587\u4ef6\u5939, \u5927\u5c0f\u7684\u7edf\u8ba1\u6570\u636e\u3002", "id": "f2888:c0:m10"}
{"signature": "def print_big_dir_and_big_file(self, top_n=<NUM_LIT:5>):", "body": "self.assert_is_dir_and_exists()<EOL>size_table1 = sorted(<EOL>[(p, p.dirsize) for p in self.select_dir(recursive=False)],<EOL>key=lambda x: x[<NUM_LIT:1>],<EOL>reverse=True,<EOL>)<EOL>for p1, size1 in size_table1[:top_n]:<EOL><INDENT>print(\"<STR_LIT>\".format(repr_data_size(size1), p1.abspath))<EOL>size_table2 = sorted(<EOL>[(p, p.size) for p in p1.select_file(recursive=True)],<EOL>key=lambda x: x[<NUM_LIT:1>],<EOL>reverse=True,<EOL>)<EOL>for p2, size2 in size_table2[:top_n]:<EOL><INDENT>print(\"<STR_LIT>\".format(<EOL>repr_data_size(size2), p2.abspath))<EOL><DEDENT><DEDENT>", "docstring": "Print ``top_n`` big dir and ``top_n`` big file in each dir.", "id": "f2888:c0:m8"}
{"signature": "def print_big_file(self, top_n=<NUM_LIT:5>):", "body": "self.assert_is_dir_and_exists()<EOL>size_table = sorted(<EOL>[(p, p.size) for p in self.select_file(recursive=True)],<EOL>key=lambda x: x[<NUM_LIT:1>],<EOL>reverse=True,<EOL>)<EOL>for p, size in size_table[:top_n]:<EOL><INDENT>print(\"<STR_LIT>\".format(repr_data_size(size), p.abspath))<EOL><DEDENT>", "docstring": "Print ``top_n`` big file in this dir.", "id": "f2888:c0:m7"}
{"signature": "def trail_space(self, filters=lambda p: p.ext == \"<STR_LIT>\"):  ", "body": "self.assert_is_dir_and_exists()<EOL>for p in self.select_file(filters):<EOL><INDENT>try:<EOL><INDENT>with open(p.abspath, \"<STR_LIT:rb>\") as f:<EOL><INDENT>lines = list()<EOL>for line in f:<EOL><INDENT>lines.append(line.decode(\"<STR_LIT:utf-8>\").rstrip())<EOL><DEDENT><DEDENT>with open(p.abspath, \"<STR_LIT:wb>\") as f:<EOL><INDENT>f.write(\"<STR_LIT:\\n>\".join(lines).encode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT><DEDENT>except Exception as e:  <EOL><INDENT>raise e<EOL><DEDENT><DEDENT>", "docstring": "Trail white space at end of each line for every ``.py`` file.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c06\u76ee\u5f55\u4e0b\u7684\u6240\u6709\u88ab\u9009\u62e9\u7684\u6587\u4ef6\u4e2d\u884c\u672b\u7684\u7a7a\u683c\u5220\u9664\u3002", "id": "f2888:c0:m13"}
{"signature": "def decode_hexstr(text):", "body": "return binascii.a2b_hex(text.encode(\"<STR_LIT:utf-8>\")).decode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "Reverse operation of :func:`encode_hexstr`.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u5c0616\u8fdb\u5236\u5b57\u7b26\u4e32\u89e3\u7801\u4e3a\u539f\u5b57\u7b26\u4e32\u3002", "id": "f2889:m1"}
{"signature": "def copyto(self,<EOL>new_abspath=None,<EOL>new_dirpath=None,<EOL>new_dirname=None,<EOL>new_basename=None,<EOL>new_fname=None,<EOL>new_ext=None,<EOL>overwrite=False,<EOL>makedirs=False):", "body": "self.assert_exists()<EOL>p = self.change(<EOL>new_abspath=new_abspath,<EOL>new_dirpath=new_dirpath,<EOL>new_dirname=new_dirname,<EOL>new_basename=new_basename,<EOL>new_fname=new_fname,<EOL>new_ext=new_ext,<EOL>)<EOL>if p.is_not_exist_or_allow_overwrite(overwrite=overwrite):<EOL><INDENT>if self.abspath != p.abspath:<EOL><INDENT>try:<EOL><INDENT>shutil.copy(self.abspath, p.abspath)<EOL><DEDENT>except IOError as e:<EOL><INDENT>if makedirs:<EOL><INDENT>os.makedirs(p.parent.abspath)<EOL>shutil.copy(self.abspath, p.abspath)<EOL><DEDENT>else:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return p<EOL>", "docstring": "Copy this file to other place.", "id": "f2890:c0:m5"}
{"signature": "def moveto(self,<EOL>new_abspath=None,<EOL>new_dirpath=None,<EOL>new_dirname=None,<EOL>new_basename=None,<EOL>new_fname=None,<EOL>new_ext=None,<EOL>overwrite=False,<EOL>makedirs=False):", "body": "self.assert_exists()<EOL>p = self.change(<EOL>new_abspath=new_abspath,<EOL>new_dirpath=new_dirpath,<EOL>new_dirname=new_dirname,<EOL>new_basename=new_basename,<EOL>new_fname=new_fname,<EOL>new_ext=new_ext,<EOL>)<EOL>if p.is_not_exist_or_allow_overwrite(overwrite=overwrite):<EOL><INDENT>if self.abspath != p.abspath:<EOL><INDENT>if makedirs:<EOL><INDENT>parent = p.parent<EOL>if not parent.exists():<EOL><INDENT>os.makedirs(parent.abspath)<EOL><DEDENT><DEDENT>self.rename(p)<EOL><DEDENT><DEDENT>return p<EOL>", "docstring": "An advanced :meth:`pathlib_mate.pathlib2.Path.rename` method provide ability to rename by\neach components of a path. A new ``Path`` instance will returns.\n\n**\u4e2d\u6587\u6587\u6863**\n\n\u9ad8\u7ea7\u91cd\u547d\u540d\u51fd\u6570, \u5141\u8bb8\u7528\u4e8e\u6839\u636e\u8def\u5f84\u7684\u5404\u4e2a\u7ec4\u6210\u90e8\u5206\u8fdb\u884c\u91cd\u547d\u540d\u3002\u4f46\u548cos.rename\n\u65b9\u6cd5\u4e00\u6837, \u9700\u8981\u4fdd\u8bc1\u6bcd\u6587\u4ef6\u5939\u5b58\u5728\u3002", "id": "f2890:c0:m4"}
{"signature": "def append_parts(self, *parts):", "body": "return self.__class__(self, *parts)<EOL>", "docstring": "Append some parts to the end of this path.\n\n:returns: a new Path object.\n\nExample::\n\n    >>> self.__class__(\"/usr/bin/python\").append_parts(\"lib\")\n    \"/user/bin/python/lib\"\n\n    >>> self.__class__(\"/usr/bin/python\").append_parts(\"lib\", \"core.py\")\n    \"/user/bin/python/lib/core.py\"", "id": "f2890:c0:m1"}
{"signature": "def is_not_exist_or_allow_overwrite(self, overwrite=False):", "body": "if self.exists() and overwrite is False:<EOL><INDENT>return False<EOL><DEDENT>else:  <EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Test whether a file target is not exists or it exists but allow\noverwrite.", "id": "f2890:c0:m3"}
{"signature": "@property<EOL><INDENT>def format(self):<DEDENT>", "body": "formats = {}<EOL>for subpod in self.root.findall('<STR_LIT>'):<EOL><INDENT>for elem in list(subpod):<EOL><INDENT>if elem.tag == '<STR_LIT:state>':<EOL><INDENT>continue<EOL><DEDENT>content = elem.text<EOL>if elem.tag == '<STR_LIT>':<EOL><INDENT>content = {'<STR_LIT:url>': elem.get('<STR_LIT:src>'),<EOL>'<STR_LIT>': elem.get('<STR_LIT>'),<EOL>'<STR_LIT:title>': elem.get('<STR_LIT:title>'),<EOL>'<STR_LIT:width>': int(elem.get('<STR_LIT:width>', <NUM_LIT:0>)),<EOL>'<STR_LIT>': int(elem.get('<STR_LIT>', <NUM_LIT:0>))}<EOL><DEDENT>if elem.tag not in formats:<EOL><INDENT>formats[elem.tag] = [content]<EOL><DEDENT>else:<EOL><INDENT>formats[elem.tag].append(content)<EOL><DEDENT><DEDENT><DEDENT>return formats<EOL>", "docstring": "Dictionary of available formats, corresponding to a list of the values\nExample: pod.format['plaintext'] will return a list of every plaintext\n         content in the pod's subpods", "id": "f2901:c2:m4"}
{"signature": "def kill_connections(self, name):", "body": "log.info('<STR_LIT>' % name)<EOL>self._run_stmt(\"\"\"<STR_LIT>\"\"\" % name)<EOL>", "docstring": "Drops all connections to the specified database.", "id": "f2908:c0:m11"}
{"signature": "def connections(self, name):", "body": "stmt = \"\"\"<STR_LIT>\"\"\".format(fields='<STR_LIT:U+002CU+0020>'.join(CONNECTION_FIELDS), datname=name)<EOL>return list(Connection(**x) for x in self._iter_results(stmt))<EOL>", "docstring": "Returns a list of existing connections to the named database.", "id": "f2908:c0:m10"}
{"signature": "def rename(self, from_name, to_name):", "body": "log.info('<STR_LIT>' % (from_name, to_name))<EOL>self._run_stmt('<STR_LIT>' % (from_name, to_name))<EOL>", "docstring": "Renames an existing database.", "id": "f2908:c0:m9"}
{"signature": "def settings(self):", "body": "stmt = \"<STR_LIT>\".format(fields='<STR_LIT:U+002CU+0020>'.join(SETTINGS_FIELDS))<EOL>settings = []<EOL>for row in self._iter_results(stmt):<EOL><INDENT>row['<STR_LIT>'] = self._vartype_map[row['<STR_LIT>']](row['<STR_LIT>'])<EOL>settings.append(Settings(**row))<EOL><DEDENT>return settings<EOL>", "docstring": "Returns settings from the server.", "id": "f2908:c0:m19"}
{"signature": "def connection_url(self, name=None):", "body": "return '<STR_LIT>'.format(**{k: v for k, v in self._connect_options(name)})<EOL>", "docstring": "Provides a connection string for database as a sqlalchemy compatible URL.\n\nNB - this doesn't include special arguments related to SSL connectivity (which are outside the scope\nof the connection URL format).\n\nParameters\n----------\nname: str, optional\n    an override database name for the connection string.\n\nReturns\n-------\nstr: the connection URL (e.g. postgresql://user1@localhost:5432/db1)", "id": "f2908:c0:m17"}
{"signature": "def map_attr(self, mapping, attr, obj):", "body": "if attr not in mapping and hasattr(self, attr):<EOL><INDENT>if not callable(getattr(self, attr)):<EOL><INDENT>mapping[attr] = getattr(self, attr)<EOL><DEDENT>else:<EOL><INDENT>mapping[attr] = getattr(self, attr)(obj)<EOL><DEDENT><DEDENT>", "docstring": "A kind of cheesy method that allows for callables or attributes to\nbe used interchangably", "id": "f2914:c4:m10"}
{"signature": "def preprocess(self, obj, mapping, **kwargs):", "body": "pass<EOL>", "docstring": "Pre-processing hook.  Called by map_to_dictionary()", "id": "f2914:c4:m15"}
{"signature": "def request_resource(self, url, **kwargs):", "body": "obj = self.get_object(url)<EOL>mapping = self.map_to_dictionary(url, obj, **kwargs)<EOL>resource = OEmbedResource.create(mapping)<EOL>resource.content_object = obj<EOL>return resource<EOL>", "docstring": "Request an OEmbedResource for a given url.  Some valid keyword args:\n- format\n- maxwidth\n- maxheight", "id": "f2914:c4:m18"}
{"signature": "def provider_from_url(self, url):", "body": "domain = get_domain(url)<EOL>site_tuples = self.get_cleaned_sites().values()<EOL>for domain_re, name, normalized_domain in site_tuples:<EOL><INDENT>if re.match(domain_re, domain):<EOL><INDENT>return normalized_domain, name<EOL><DEDENT><DEDENT>site = Site.objects.get_current()<EOL>return site.domain, site.name<EOL>", "docstring": "Given a URL for any of our sites, try and match it to one, returning\nthe domain & name of the match.  If no match is found, return current.\n\nReturns a tuple of domain, site name -- used to determine 'provider'", "id": "f2914:c4:m5"}
{"signature": "def map_to_dictionary(self, url, obj, **kwargs):", "body": "maxwidth = kwargs.get('<STR_LIT>', None)<EOL>maxheight = kwargs.get('<STR_LIT>', None)<EOL>provider_url, provider_name = self.provider_from_url(url)<EOL>mapping = {<EOL>'<STR_LIT:version>': '<STR_LIT:1.0>',<EOL>'<STR_LIT:url>': url,<EOL>'<STR_LIT>': provider_name,<EOL>'<STR_LIT>': provider_url,<EOL>'<STR_LIT:type>': self.resource_type<EOL>}<EOL>self.preprocess(obj, mapping, **kwargs)<EOL>if self.resource_type == '<STR_LIT>' and self.get_image(obj):<EOL><INDENT>self.resize_photo(obj, mapping, maxwidth, maxheight)<EOL><DEDENT>elif self.resource_type in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>width, height = size_to_nearest(<EOL>maxwidth,<EOL>maxheight,<EOL>self._meta.valid_sizes,<EOL>self._meta.force_fit<EOL>)<EOL>mapping.update(width=width, height=height)<EOL><DEDENT>if self.get_image(obj):<EOL><INDENT>self.thumbnail(obj, mapping)<EOL><DEDENT>for attr in ('<STR_LIT:title>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:html>'):<EOL><INDENT>self.map_attr(mapping, attr, obj)<EOL><DEDENT>if '<STR_LIT:url>' in mapping:<EOL><INDENT>mapping['<STR_LIT:url>'] = relative_to_full(mapping['<STR_LIT:url>'], url)<EOL><DEDENT>if '<STR_LIT>' in mapping:<EOL><INDENT>mapping['<STR_LIT>'] = relative_to_full(mapping['<STR_LIT>'], url)<EOL><DEDENT>if '<STR_LIT:html>' not in mapping and mapping['<STR_LIT:type>'] in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>mapping['<STR_LIT:html>'] = self.render_html(obj, context=Context(mapping))<EOL><DEDENT>self.postprocess(obj, mapping, **kwargs)<EOL>return mapping<EOL>", "docstring": "Build a dictionary of metadata for the requested object.", "id": "f2914:c4:m17"}
{"signature": "def get_image(self, obj):", "body": "if self._meta.image_field:<EOL><INDENT>return getattr(obj, self._meta.image_field)<EOL><DEDENT>", "docstring": "Return an ImageFileField instance", "id": "f2914:c4:m11"}
{"signature": "def get_sites(self):", "body": "return Site.objects.all()<EOL>", "docstring": "Return sites whose domains should be checked against", "id": "f2914:c4:m3"}
{"signature": "def _fetch(self, url):", "body": "return fetch_url(url)<EOL>", "docstring": "Fetches from a URL, respecting GZip encoding, etc.\n\nReturns an OEmbedResource instance", "id": "f2914:c1:m2"}
{"signature": "def _image_field(self):", "body": "for field in self.model._meta.fields:<EOL><INDENT>if isinstance(field, ImageField):<EOL><INDENT>return field.name<EOL><DEDENT><DEDENT>", "docstring": "Try to automatically detect an image field", "id": "f2914:c2:m2"}
{"signature": "def postprocess(self, obj, mapping, **kwargs):", "body": "pass<EOL>", "docstring": "Post-processing hook.  Called by map_to_dictionary()", "id": "f2914:c4:m16"}
{"signature": "def _build_regex(self):", "body": "<EOL>url_patterns = resolver.reverse_dict.get(self._meta.named_view)<EOL>try:<EOL><INDENT>regex = url_patterns[<NUM_LIT:1>]<EOL><DEDENT>except TypeError:<EOL><INDENT>raise OEmbedException('<STR_LIT>' % self._meta.named_view)<EOL><DEDENT>cleaned_sites = self.get_cleaned_sites()<EOL>site_regexes = []<EOL>for site in self.get_sites():<EOL><INDENT>site_regexes.append(cleaned_sites[site.pk][<NUM_LIT:0>])<EOL><DEDENT>sites = '<STR_LIT:|>'.join(site_regexes)<EOL>regex = re.compile('<STR_LIT>' % (sites, regex))<EOL>return regex<EOL>", "docstring": "Performs a reverse lookup on a named view and generates\na list of regexes that match that object.  It generates\nregexes with the domain name included, using sites provided\nby the get_sites() method.\n\n>>> regex = provider.regex\n>>> regex.pattern\n'http://(www2.kusports.com|www2.ljworld.com|www.lawrence.com)/photos/(?P<year>\\\\d{4})/(?P<month>\\\\w{3})/(?P<day>\\\\d{1,2})/(?P<object_id>\\\\d+)/$'", "id": "f2914:c4:m2"}
{"signature": "def render_html(self, obj, context=None):", "body": "provided_context = context or Context()<EOL>context = RequestContext(mock_request())<EOL>context.update(provided_context)<EOL>context.push()<EOL>context[self._meta.context_varname] = obj<EOL>rendered = render_to_string(self._meta.template_name, context)<EOL>context.pop()<EOL>return rendered<EOL>", "docstring": "Generate the 'html' attribute of an oembed resource using a template.\nSort of a corollary to the parser's render_oembed method.  By default,\nthe current mapping will be passed in as the context.\n\nOEmbed templates are stored in:\n\noembed/provider/[app_label]_[model].html\n\n-- or --\n\noembed/provider/media_video.html", "id": "f2914:c4:m9"}
{"signature": "def setUp(self):", "body": "super(ConsumerTestCase, self).setUp()<EOL>self.oembed_client = OEmbedConsumer()<EOL>", "docstring": "Set up test environment", "id": "f2923:c0:m0"}
{"signature": "def setUp(self):", "body": "<EOL>oembed.autodiscover()<EOL>oembed.site._db_updated = None<EOL>self.storage = DummyMemoryStorage()<EOL>self.orig_default_storage = storage.default_storage<EOL>storage.default_storage = self.storage<EOL>self.media_root, self.media_url = settings.MEDIA_ROOT, settings.MEDIA_URL<EOL>settings.MEDIA_ROOT = MEDIA_ROOT<EOL>settings.MEDIA_URL = MEDIA_URL<EOL>self.template_dirs = settings.TEMPLATE_DIRS<EOL>cur_dir = os.path.dirname(__file__)<EOL>settings.TEMPLATE_DIRS = [os.path.join(os.path.dirname(cur_dir), '<STR_LIT>')]<EOL>self.orig_file_storage = settings.DEFAULT_FILE_STORAGE<EOL>settings.DEFAULT_FILE_STORAGE = DEFAULT_FILE_STORAGE<EOL>test_image = Image.new('<STR_LIT>', (<NUM_LIT>, <NUM_LIT>), (<NUM_LIT:255>, <NUM_LIT:255>, <NUM_LIT:255>, <NUM_LIT:255>))<EOL>self.test_img_buffer = StringIO()<EOL>test_image.save(self.test_img_buffer, '<STR_LIT>')<EOL>self.test_img_file = ContentFile(self.test_img_buffer.getvalue())<EOL>self.test_img_location = '<STR_LIT>'<EOL>storage.default_storage.save(self.test_img_location, self.test_img_file)<EOL>", "docstring": "Set up test environment", "id": "f2925:c0:m0"}
{"signature": "def do_url_scheme(parser, token):", "body": "args = token.split_contents()<EOL>if len(args) != <NUM_LIT:1>:<EOL><INDENT>raise template.TemplateSyntaxError('<STR_LIT>' % args[<NUM_LIT:0>])<EOL><DEDENT>return OEmbedURLSchemeNode()<EOL>", "docstring": "Generates a &lt;link&gt; tag with oembed autodiscovery bits.\n\n{% oembed_url_scheme %}", "id": "f2937:m5"}
{"signature": "@register.filter<EOL>def strip_oembeds(text, args=None):", "body": "resource_type = width = height = None<EOL>if args:<EOL><INDENT>dimensions = args.lower().split('<STR_LIT:x>')<EOL>if len(dimensions) in (<NUM_LIT:3>, <NUM_LIT:1>):<EOL><INDENT>resource_type = dimensions.pop()<EOL><DEDENT>if len(dimensions) == <NUM_LIT:2>:<EOL><INDENT>width, height = map(lambda x: int(x), dimensions)<EOL><DEDENT><DEDENT>client = OEmbedConsumer()<EOL>return mark_safe(client.strip(text, width, height, resource_type))<EOL>", "docstring": "Take a block of text and strip all the embeds from it, optionally taking\na maxwidth, maxheight / resource_type\n\nUsage:\n{{ post.content|strip_embeds }}\n\n{{ post.content|strip_embeds:\"600x600xphoto\" }}\n\n{{ post.content|strip_embeds:\"video\" }}", "id": "f2937:m2"}
{"signature": "def json(request, *args, **kwargs):", "body": "<EOL>params = dict(list(request.GET.items()))<EOL>callback = params.pop('<STR_LIT>', None)<EOL>url = params.pop('<STR_LIT:url>', None)<EOL>if not url:<EOL><INDENT>return HttpResponseBadRequest('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>provider = oembed.site.provider_for_url(url)<EOL>if not provider.provides:<EOL><INDENT>raise OEmbedMissingEndpoint()<EOL><DEDENT><DEDENT>except OEmbedMissingEndpoint:<EOL><INDENT>raise Http404('<STR_LIT>' % url)<EOL><DEDENT>query = dict([(smart_str(k), smart_str(v)) for k, v in list(params.items()) if v])<EOL>try:<EOL><INDENT>resource = oembed.site.embed(url, **query)<EOL><DEDENT>except OEmbedException as e:<EOL><INDENT>raise Http404('<STR_LIT>' % (url, str(e)))<EOL><DEDENT>response = HttpResponse(mimetype='<STR_LIT:application/json>')<EOL>json = resource.json<EOL>if callback:<EOL><INDENT>response.write('<STR_LIT>' % (defaultfilters.force_escape(callback), json))<EOL><DEDENT>else:<EOL><INDENT>response.write(json)<EOL><DEDENT>return response<EOL>", "docstring": "The oembed endpoint, or the url to which requests for metadata are passed.\nThird parties will want to access this view with URLs for your site's\ncontent and be returned OEmbed metadata.", "id": "f2940:m0"}
{"signature": "def store_providers(self, provider_data):", "body": "if not hasattr(provider_data, '<STR_LIT>'):<EOL><INDENT>raise OEmbedException('<STR_LIT>')<EOL><DEDENT>provider_pks = []<EOL>for provider in provider_data:<EOL><INDENT>if '<STR_LIT>' not in provider or'<STR_LIT>' not in provider:<EOL><INDENT>continue<EOL><DEDENT>resource_type = provider.get('<STR_LIT:type>')<EOL>if resource_type not in RESOURCE_TYPES:<EOL><INDENT>continue<EOL><DEDENT>stored_provider, created = StoredProvider.objects.get_or_create(<EOL>wildcard_regex=provider['<STR_LIT>']<EOL>)<EOL>if created:<EOL><INDENT>stored_provider.endpoint_url = relative_to_full(    <EOL>provider['<STR_LIT>'],<EOL>provider['<STR_LIT>']<EOL>)<EOL>stored_provider.resource_type = resource_type<EOL>stored_provider.save()<EOL><DEDENT>provider_pks.append(stored_provider.pk)<EOL><DEDENT>return StoredProvider.objects.filter(pk__in=provider_pks)<EOL>", "docstring": "Iterate over the returned json and try to sort out any new providers", "id": "f2942:c0:m13"}
{"signature": "def invalidate_stored_oembeds(self, sender, instance, created, **kwargs):", "body": "ctype = ContentType.objects.get_for_model(instance)<EOL>StoredOEmbed.objects.filter(<EOL>object_id=instance.pk,<EOL>content_type=ctype).delete()<EOL>", "docstring": "A hook for django-based oembed providers to delete any stored oembeds", "id": "f2942:c0:m10"}
{"signature": "def get_providers(self):", "body": "return self.get_registry().keys()<EOL>", "docstring": "Provide a list of all oembed providers that are being used.", "id": "f2942:c0:m8"}
{"signature": "def ensure_populated(self):", "body": "if not self._populated:<EOL><INDENT>self.populate()<EOL><DEDENT>", "docstring": "Ensure not only that the internal registry of Python-class providers is\npopulated, but also make sure the cached queryset of database-providers\nis up-to-date", "id": "f2942:c0:m6"}
{"signature": "def autodiscover(self, url):", "body": "headers, response = fetch_url(url)<EOL>if headers['<STR_LIT>'].split('<STR_LIT:;>')[<NUM_LIT:0>] in ('<STR_LIT:application/json>', '<STR_LIT>'):<EOL><INDENT>provider_data = json.loads(response)<EOL>return self.store_providers(provider_data)<EOL><DEDENT>", "docstring": "Load up StoredProviders from url if it is an oembed scheme", "id": "f2942:c0:m12"}
{"signature": "def register_field(cls, field):", "body": "FieldRegistry.add_field(cls, field)<EOL>signals.post_save.connect(handle_save_embeds, sender=cls,<EOL>dispatch_uid='<STR_LIT>' %(cls._meta.app_label, cls._meta.module_name, field.name))<EOL>", "docstring": "Handles registering the fields with the FieldRegistry and creating a \npost-save signal for the model.", "id": "f2943:m0"}
{"signature": "def contribute_to_class(self, cls, name):", "body": "super(EmbeddedMediaField, self).contribute_to_class(cls, name)<EOL>register_field(cls, self)<EOL>cls._meta.add_virtual_field(EmbeddedSignalCreator(self))<EOL>", "docstring": "I need a way to ensure that this signal gets created for all child\nmodels, and since model inheritance doesn't have a 'contrubite_to_class'\nstyle hook, I am creating a fake virtual field which will be added to\nall subclasses and handles creating the signal", "id": "f2943:c2:m1"}
{"signature": "def autodiscover():", "body": "import imp<EOL>from django.conf import settings<EOL>for app in settings.INSTALLED_APPS:<EOL><INDENT>try:<EOL><INDENT>app_path = __import__(app, {}, {}, [app.split('<STR_LIT:.>')[-<NUM_LIT:1>]]).__path__<EOL><DEDENT>except AttributeError:<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>imp.find_module('<STR_LIT>', app_path)<EOL><DEDENT>except ImportError:<EOL><INDENT>continue<EOL><DEDENT>__import__(\"<STR_LIT>\" % app)<EOL><DEDENT>", "docstring": "Automatically build the provider index.", "id": "f2944:m0"}
{"signature": "def render_oembed(self, oembed_resource, original_url, template_dir=None,<EOL>context=None):", "body": "provided_context = context or Context()<EOL>context = RequestContext(context.get(\"<STR_LIT>\") or mock_request())<EOL>context.update(provided_context)<EOL>template_name = '<STR_LIT>' % oembed_resource.type<EOL>templates = [os.path.join('<STR_LIT>', template_name), '<STR_LIT>']<EOL>if template_dir:<EOL><INDENT>templates.insert(<NUM_LIT:0>, os.path.join('<STR_LIT>', template_dir, template_name))<EOL><DEDENT>template = select_template(templates)<EOL>context.push()<EOL>context['<STR_LIT>'] = oembed_resource<EOL>context['<STR_LIT>'] = original_url<EOL>rendered = template.render(context)<EOL>context.pop()<EOL>return rendered.strip()<EOL>", "docstring": "Render the oembed resource and return as a string.\n\nTemplate directory will always fall back to 'oembed/[type].html', but\na custom template dir can be passed in using the kwargs.\n\nTemplates are given two context variables:\n- response: an OEmbedResource\n- original_url: the url that was passed to the consumer", "id": "f2946:c0:m0"}
{"signature": "def parse(self, text, maxwidth=None, maxheight=None, template_dir=None,<EOL>context=None, urlize_all_links=CONSUMER_URLIZE_ALL):", "body": "context = context or Context()<EOL>context['<STR_LIT>'] = maxwidth<EOL>context['<STR_LIT>'] = maxheight<EOL>try:<EOL><INDENT>text = unicode(text)<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>text = unicode(text.decode('<STR_LIT:utf-8>'))<EOL><DEDENT>return self.parse_data(text, maxwidth, maxheight, template_dir,<EOL>context, urlize_all_links)<EOL>", "docstring": "Scans a block of text, replacing anything matching a provider pattern\nwith an OEmbed html snippet, if possible.\n\nTemplates should be stored at oembed/{format}.html, so for example:\n\n    oembed/video.html\n\nAn optional template_dir can be provided, allowing for\n\n    oembed/[template_dir]/video.html\n\nThese templates are passed a context variable, ``response``, which is\nan OEmbedResource, as well as the ``original_url``", "id": "f2946:c0:m1"}
{"signature": "def parse_data(self, text, maxwidth, maxheight, template_dir, context,<EOL>urlize_all_links):", "body": "<EOL>replacements = {}<EOL>user_urls = set(re.findall(URL_RE, text))<EOL>for user_url in user_urls:<EOL><INDENT>try:<EOL><INDENT>resource = oembed.site.embed(user_url, maxwidth=maxwidth, maxheight=maxheight)<EOL><DEDENT>except OEmbedException:<EOL><INDENT>if urlize_all_links:<EOL><INDENT>replacements[user_url] = '<STR_LIT>' % {'<STR_LIT>': user_url}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>context['<STR_LIT>'] = min(maxwidth, resource.width)<EOL>context['<STR_LIT>'] = min(maxheight, resource.height)<EOL>replacement = self.render_oembed(<EOL>resource, <EOL>user_url, <EOL>template_dir=template_dir, <EOL>context=context<EOL>)<EOL>replacements[user_url] = replacement.strip()<EOL><DEDENT><DEDENT>user_urls = re.finditer(URL_RE, text)<EOL>matches = []<EOL>for match in user_urls:<EOL><INDENT>if match.group() in replacements:<EOL><INDENT>matches.append([match.start(), match.end(), match.group()])<EOL><DEDENT><DEDENT>for indx, (start, end, user_url) in enumerate(matches):<EOL><INDENT>replacement = replacements[user_url]<EOL>difference = len(replacement) - len(user_url)<EOL>text = text[:start] + replacement + text[end:]<EOL>for j in xrange(indx + <NUM_LIT:1>, len(matches)):<EOL><INDENT>matches[j][<NUM_LIT:0>] += difference<EOL>matches[j][<NUM_LIT:1>] += difference<EOL><DEDENT><DEDENT>return mark_safe(text)<EOL>", "docstring": "Parses a block of text indiscriminately", "id": "f2947:c0:m0"}
{"signature": "def fetch_url(url, method='<STR_LIT:GET>', user_agent='<STR_LIT>', timeout=SOCKET_TIMEOUT):", "body": "sock = httplib2.Http(timeout=timeout)<EOL>request_headers = {<EOL>'<STR_LIT>': user_agent,<EOL>'<STR_LIT>': '<STR_LIT>'}<EOL>try:<EOL><INDENT>headers, raw = sock.request(url, headers=request_headers, method=method)<EOL><DEDENT>except:<EOL><INDENT>raise OEmbedHTTPException('<STR_LIT>' % url)<EOL><DEDENT>return headers, raw<EOL>", "docstring": "Fetch response headers and data from a URL, raising a generic exception\nfor any kind of failure.", "id": "f2950:m2"}
{"signature": "def load_class(path):", "body": "package, klass = path.rsplit('<STR_LIT:.>', <NUM_LIT:1>)<EOL>module = import_module(package)<EOL>return getattr(module, klass)<EOL>", "docstring": "dynamically load a class given a string of the format\n\npackage.Class", "id": "f2950:m6"}
{"signature": "def getDevicesReadableNames():", "body": "return [{'<STR_LIT>': s,<EOL>'<STR_LIT>': config.get(s).get('<STR_LIT>')}<EOL>for s in getDevicesCodenames()]<EOL>", "docstring": "Returns codename and readable name for each device", "id": "f2959:m1"}
{"signature": "def encryptPassword(self, login, passwd):", "body": "<EOL>binaryKey = b64decode(config.GOOGLE_PUBKEY)<EOL>i = utils.readInt(binaryKey, <NUM_LIT:0>)<EOL>modulus = utils.toBigInt(binaryKey[<NUM_LIT:4>:][<NUM_LIT:0>:i])<EOL>j = utils.readInt(binaryKey, i + <NUM_LIT:4>)<EOL>exponent = utils.toBigInt(binaryKey[i + <NUM_LIT:8>:][<NUM_LIT:0>:j])<EOL>digest = hashes.Hash(hashes.SHA1(), backend=default_backend())<EOL>digest.update(binaryKey)<EOL>h = b'<STR_LIT:\\x00>' + digest.finalize()[<NUM_LIT:0>:<NUM_LIT:4>]<EOL>der_data = encode_dss_signature(modulus, exponent)<EOL>publicKey = load_der_public_key(der_data, backend=default_backend())<EOL>to_be_encrypted = login.encode() + b'<STR_LIT:\\x00>' + passwd.encode()<EOL>ciphertext = publicKey.encrypt(<EOL>to_be_encrypted,<EOL>padding.OAEP(<EOL>mgf=padding.MGF1(algorithm=hashes.SHA1()),<EOL>algorithm=hashes.SHA1(),<EOL>label=None<EOL>)<EOL>)<EOL>return urlsafe_b64encode(h + ciphertext)<EOL>", "docstring": "Encrypt credentials using the google publickey, with the\n        RSA algorithm", "id": "f2960:c3:m3"}
{"signature": "def bulkDetails(self, packageNames):", "body": "params = {'<STR_LIT>': '<STR_LIT:1>'}<EOL>req = googleplay_pb2.BulkDetailsRequest()<EOL>req.docid.extend(packageNames)<EOL>data = req.SerializeToString()<EOL>message = self.executeRequestApi2(BULK_URL,<EOL>post_data=data.decode(\"<STR_LIT:utf-8>\"),<EOL>content_type=CONTENT_TYPE_PROTO,<EOL>params=params)<EOL>response = message.payload.bulkDetailsResponse<EOL>return [None if not utils.hasDoc(entry) else<EOL>utils.parseProtobufObj(entry.doc)<EOL>for entry in response.entry]<EOL>", "docstring": "Get several apps details from a list of package names.\n\n        This is much more efficient than calling N times details() since it\n        requires only one request. If an item is not found it returns an empty object\n        instead of throwing a RequestError('Item not found') like the details() function\n\n        Args:\n            packageNames (list): a list of app IDs (usually starting with 'com.').\n\n        Returns:\n            a list of dictionaries containing docv2 data, or None\n            if the app doesn't exist", "id": "f2960:c3:m15"}
{"signature": "def login(self, email=None, password=None, gsfId=None, authSubToken=None):", "body": "if email is not None and password is not None:<EOL><INDENT>encryptedPass = self.encryptPassword(email, password).decode('<STR_LIT:utf-8>')<EOL>params = self.deviceBuilder.getLoginParams(email, encryptedPass)<EOL>params['<STR_LIT>'] = '<STR_LIT>'<EOL>params['<STR_LIT>'] = '<STR_LIT:1>'<EOL>params['<STR_LIT>'] = '<STR_LIT>'<EOL>headers = self.deviceBuilder.getAuthHeaders(self.gsfId)<EOL>headers['<STR_LIT>'] = '<STR_LIT>'<EOL>response = requests.post(AUTH_URL, data=params, verify=ssl_verify,<EOL>proxies=self.proxies_config)<EOL>data = response.text.split()<EOL>params = {}<EOL>for d in data:<EOL><INDENT>if \"<STR_LIT:=>\" not in d:<EOL><INDENT>continue<EOL><DEDENT>k, v = d.split(\"<STR_LIT:=>\", <NUM_LIT:1>)<EOL>params[k.strip().lower()] = v.strip()<EOL><DEDENT>if \"<STR_LIT>\" in params:<EOL><INDENT>ac2dmToken = params[\"<STR_LIT>\"]<EOL><DEDENT>elif \"<STR_LIT:error>\" in params:<EOL><INDENT>if \"<STR_LIT>\" in params[\"<STR_LIT:error>\"]:<EOL><INDENT>raise SecurityCheckError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>raise LoginError(\"<STR_LIT>\" + params[\"<STR_LIT:error>\"])<EOL><DEDENT>else:<EOL><INDENT>raise LoginError(\"<STR_LIT>\")<EOL><DEDENT>self.gsfId = self.checkin(email, ac2dmToken)<EOL>self.getAuthSubToken(email, encryptedPass)<EOL>self.uploadDeviceConfig()<EOL><DEDENT>elif gsfId is not None and authSubToken is not None:<EOL><INDENT>self.gsfId = gsfId<EOL>self.setAuthSubToken(authSubToken)<EOL>self.search('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise LoginError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Login to your Google Account.\n        For first time login you should provide:\n            * email\n            * password\n        For the following logins you need to provide:\n            * gsfId\n            * authSubToken", "id": "f2960:c3:m8"}
{"signature": "def details(self, packageName):", "body": "path = DETAILS_URL + \"<STR_LIT>\".format(requests.utils.quote(packageName))<EOL>data = self.executeRequestApi2(path)<EOL>return utils.parseProtobufObj(data.payload.detailsResponse.docV2)<EOL>", "docstring": "Get app details from a package name.\n\n        packageName is the app unique ID (usually starting with 'com.').", "id": "f2960:c3:m14"}
{"signature": "def getHeaders(self, upload_fields=False):", "body": "if upload_fields:<EOL><INDENT>headers = self.deviceBuilder.getDeviceUploadHeaders()<EOL><DEDENT>else:<EOL><INDENT>headers = self.deviceBuilder.getBaseHeaders()<EOL><DEDENT>if self.gsfId is not None:<EOL><INDENT>headers[\"<STR_LIT>\"] = \"<STR_LIT>\".format(self.gsfId)<EOL><DEDENT>if self.authSubToken is not None:<EOL><INDENT>headers[\"<STR_LIT>\"] = \"<STR_LIT>\" % self.authSubToken<EOL><DEDENT>if self.device_config_token is not None:<EOL><INDENT>headers[\"<STR_LIT>\"] = self.device_config_token<EOL><DEDENT>if self.deviceCheckinConsistencyToken is not None:<EOL><INDENT>headers[\"<STR_LIT>\"] = self.deviceCheckinConsistencyToken<EOL><DEDENT>if self.dfeCookie is not None:<EOL><INDENT>headers[\"<STR_LIT>\"] = self.dfeCookie<EOL><DEDENT>return headers<EOL>", "docstring": "Return the default set of request headers, which\n        can later be expanded, based on the request type", "id": "f2960:c3:m5"}
{"signature": "def text_rank (path):", "body": "graph = build_graph(json_iter(path))<EOL>ranks = nx.pagerank(graph)<EOL>return graph, ranks<EOL>", "docstring": "run the TextRank algorithm", "id": "f2966:m12"}
{"signature": "def json_iter (path):", "body": "with open(path, '<STR_LIT:r>') as f:<EOL><INDENT>for line in f.readlines():<EOL><INDENT>yield json.loads(line)<EOL><DEDENT><DEDENT>", "docstring": "iterator for JSON-per-line in a file pattern", "id": "f2966:m29"}
{"signature": "def render_ranks (graph, ranks, dot_file=\"<STR_LIT>\"):", "body": "if dot_file:<EOL><INDENT>write_dot(graph, ranks, path=dot_file)<EOL><DEDENT>", "docstring": "render the TextRank graph for visual formats", "id": "f2966:m11"}
{"signature": "def normalize_key_phrases (path, ranks, stopwords=None, spacy_nlp=None, skip_ner=True):", "body": "global STOPWORDS, SPACY_NLP<EOL>if (type(stopwords) is list) or (type(stopwords) is set):<EOL><INDENT>stopwords = set(stopwords)<EOL><DEDENT>else:<EOL><INDENT>if not STOPWORDS:<EOL><INDENT>STOPWORDS = load_stopwords(stopwords)<EOL><DEDENT>stopwords = STOPWORDS<EOL><DEDENT>if not spacy_nlp:<EOL><INDENT>if not SPACY_NLP:<EOL><INDENT>SPACY_NLP = spacy.load(\"<STR_LIT>\")<EOL><DEDENT>spacy_nlp = SPACY_NLP<EOL><DEDENT>single_lex = {}<EOL>phrase_lex = {}<EOL>if isinstance(path, str):<EOL><INDENT>path = json_iter(path)<EOL><DEDENT>for meta in path:<EOL><INDENT>sent = [w for w in map(WordNode._make, meta[\"<STR_LIT>\"])]<EOL>for rl in collect_keyword(sent, ranks, stopwords):<EOL><INDENT>id = str(rl.ids)<EOL>if id not in single_lex:<EOL><INDENT>single_lex[id] = rl<EOL><DEDENT>else:<EOL><INDENT>prev_lex = single_lex[id]<EOL>single_lex[id] = rl._replace(count = prev_lex.count + <NUM_LIT:1>)<EOL><DEDENT><DEDENT>if not skip_ner:<EOL><INDENT>for rl in collect_entities(sent, ranks, stopwords, spacy_nlp):<EOL><INDENT>id = str(rl.ids)<EOL>if id not in phrase_lex:<EOL><INDENT>phrase_lex[id] = rl<EOL><DEDENT>else:<EOL><INDENT>prev_lex = phrase_lex[id]<EOL>phrase_lex[id] = rl._replace(count = prev_lex.count + <NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>for rl in collect_phrases(sent, ranks, spacy_nlp):<EOL><INDENT>id = str(rl.ids)<EOL>if id not in phrase_lex:<EOL><INDENT>phrase_lex[id] = rl<EOL><DEDENT>else:<EOL><INDENT>prev_lex = phrase_lex[id]<EOL>phrase_lex[id] = rl._replace(count = prev_lex.count + <NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>rank_list = [rl.rank for rl in single_lex.values()]<EOL>if len(rank_list) < <NUM_LIT:1>:<EOL><INDENT>max_single_rank = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>max_single_rank = max(rank_list)<EOL><DEDENT>repeated_roots = {}<EOL>for rl in sorted(phrase_lex.values(), key=lambda rl: len(rl), reverse=True):<EOL><INDENT>rank_list = []<EOL>for i in iter(range(<NUM_LIT:0>, len(rl.ids))):<EOL><INDENT>id = rl.ids[i]<EOL>if not id in repeated_roots:<EOL><INDENT>repeated_roots[id] = <NUM_LIT:1.0><EOL>rank_list.append(rl.rank[i])<EOL><DEDENT>else:<EOL><INDENT>repeated_roots[id] += <NUM_LIT:1.0><EOL>rank_list.append(rl.rank[i] / repeated_roots[id])<EOL><DEDENT><DEDENT>phrase_rank = calc_rms(rank_list)<EOL>single_lex[str(rl.ids)] = rl._replace(rank = phrase_rank)<EOL><DEDENT>sum_ranks = sum([rl.rank for rl in single_lex.values()])<EOL>for rl in sorted(single_lex.values(), key=lambda rl: rl.rank, reverse=True):<EOL><INDENT>if sum_ranks > <NUM_LIT:0.0>:<EOL><INDENT>rl = rl._replace(rank=rl.rank / sum_ranks)<EOL><DEDENT>elif rl.rank == <NUM_LIT:0.0>:<EOL><INDENT>rl = rl._replace(rank=<NUM_LIT:0.1>)<EOL><DEDENT>rl = rl._replace(text=re.sub(r\"<STR_LIT>\", r\"<STR_LIT>\", rl.text))<EOL>yield rl<EOL><DEDENT>", "docstring": "collect keyphrases, named entities, etc., while removing stop words", "id": "f2966:m22"}
{"signature": "def collect_keyword (sent, ranks, stopwords):", "body": "for w in sent:<EOL><INDENT>if (w.word_id > <NUM_LIT:0>) and (w.root in ranks) and (w.pos[<NUM_LIT:0>] in \"<STR_LIT>\") and (w.root not in stopwords):<EOL><INDENT>rl = RankedLexeme(text=w.raw.lower(), rank=ranks[w.root]/<NUM_LIT>, ids=[w.word_id], pos=w.pos.lower(), count=<NUM_LIT:1>)<EOL>if DEBUG:<EOL><INDENT>print(rl)<EOL><DEDENT>yield rl<EOL><DEDENT><DEDENT>", "docstring": "iterator for collecting the single-word keyphrases", "id": "f2966:m17"}
{"signature": "def build_graph (json_iter):", "body": "global DEBUG, WordNode<EOL>graph = nx.DiGraph()<EOL>for meta in json_iter:<EOL><INDENT>if DEBUG:<EOL><INDENT>print(meta[\"<STR_LIT>\"])<EOL><DEDENT>for pair in get_tiles(map(WordNode._make, meta[\"<STR_LIT>\"])):<EOL><INDENT>if DEBUG:<EOL><INDENT>print(pair)<EOL><DEDENT>for word_id in pair:<EOL><INDENT>if not graph.has_node(word_id):<EOL><INDENT>graph.add_node(word_id)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>graph.edge[pair[<NUM_LIT:0>]][pair[<NUM_LIT:1>]][\"<STR_LIT>\"] += <NUM_LIT:1.0><EOL><DEDENT>except KeyError:<EOL><INDENT>graph.add_edge(pair[<NUM_LIT:0>], pair[<NUM_LIT:1>], weight=<NUM_LIT:1.0>)<EOL><DEDENT><DEDENT><DEDENT>return graph<EOL>", "docstring": "construct the TextRank graph from parsed paragraphs", "id": "f2966:m9"}
{"signature": "def write_dot (graph, ranks, path=\"<STR_LIT>\"):", "body": "dot = Digraph()<EOL>for node in graph.nodes():<EOL><INDENT>dot.node(node, \"<STR_LIT>\" % (node, ranks[node]))<EOL><DEDENT>for edge in graph.edges():<EOL><INDENT>dot.edge(edge[<NUM_LIT:0>], edge[<NUM_LIT:1>], constraint=\"<STR_LIT:false>\")<EOL><DEDENT>with open(path, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(dot.source)<EOL><DEDENT>", "docstring": "output the graph in Dot file format", "id": "f2966:m10"}
{"signature": "def parse_doc (json_iter):", "body": "global DEBUG<EOL>for meta in json_iter:<EOL><INDENT>base_idx = <NUM_LIT:0><EOL>for graf_text in filter_quotes(meta[\"<STR_LIT:text>\"], is_email=False):<EOL><INDENT>if DEBUG:<EOL><INDENT>print(\"<STR_LIT>\", graf_text)<EOL><DEDENT>grafs, new_base_idx = parse_graf(meta[\"<STR_LIT:id>\"], graf_text, base_idx)<EOL>base_idx = new_base_idx<EOL>for graf in grafs:<EOL><INDENT>yield graf<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "parse one document to prep for TextRank", "id": "f2966:m7"}
{"signature": "def top_sentences (kernel, path):", "body": "key_sent = {}<EOL>i = <NUM_LIT:0><EOL>if isinstance(path, str):<EOL><INDENT>path = json_iter(path)<EOL><DEDENT>for meta in path:<EOL><INDENT>graf = meta[\"<STR_LIT>\"]<EOL>tagged_sent = [WordNode._make(x) for x in graf]<EOL>text = \"<STR_LIT:U+0020>\".join([w.raw for w in tagged_sent])<EOL>m_sent = mh_digest([str(w.word_id) for w in tagged_sent])<EOL>dist = sum([m_sent.jaccard(m) * rl.rank for rl, m in kernel])<EOL>key_sent[text] = (dist, i)<EOL>i += <NUM_LIT:1><EOL><DEDENT>for text, (dist, i) in sorted(key_sent.items(), key=lambda x: x[<NUM_LIT:1>][<NUM_LIT:0>], reverse=True):<EOL><INDENT>yield SummarySent(dist=dist, idx=i, text=text)<EOL><DEDENT>", "docstring": "determine distance for each sentence", "id": "f2966:m25"}
{"signature": "def enumerate_chunks (phrase, spacy_nlp):", "body": "if (len(phrase) > <NUM_LIT:1>):<EOL><INDENT>found = False<EOL>text = \"<STR_LIT:U+0020>\".join([rl.text for rl in phrase])<EOL>doc = spacy_nlp(text.strip(), parse=True)<EOL>for np in doc.noun_chunks:<EOL><INDENT>if np.text != text:<EOL><INDENT>found = True<EOL>yield np.text, find_chunk(phrase, np.text.split(\"<STR_LIT:U+0020>\"))<EOL><DEDENT><DEDENT>if not found and all([rl.pos[<NUM_LIT:0>] != \"<STR_LIT:v>\" for rl in phrase]):<EOL><INDENT>yield text, phrase<EOL><DEDENT><DEDENT>", "docstring": "iterate through the noun phrases", "id": "f2966:m16"}
{"signature": "def limit_sentences (path, word_limit=<NUM_LIT:100>):", "body": "word_count = <NUM_LIT:0><EOL>if isinstance(path, str):<EOL><INDENT>path = json_iter(path)<EOL><DEDENT>for meta in path:<EOL><INDENT>if not isinstance(meta, SummarySent):<EOL><INDENT>p = SummarySent(**meta)<EOL><DEDENT>else:<EOL><INDENT>p = meta<EOL><DEDENT>sent_text = p.text.strip().split(\"<STR_LIT:U+0020>\")<EOL>sent_len = len(sent_text)<EOL>if (word_count + sent_len) > word_limit:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>word_count += sent_len<EOL>yield sent_text, p.idx<EOL><DEDENT><DEDENT>", "docstring": "iterator for the most significant sentences, up to a specified limit", "id": "f2966:m27"}
{"signature": "def fix_microsoft (foo):", "body": "i = <NUM_LIT:0><EOL>bar = []<EOL>while i < len(foo):<EOL><INDENT>text, lemma, pos, tag = foo[i]<EOL>if (text == \"<STR_LIT:#>\") and (i > <NUM_LIT:0>):<EOL><INDENT>prev_tok = bar[-<NUM_LIT:1>]<EOL>prev_tok[<NUM_LIT:0>] += \"<STR_LIT:#>\"<EOL>prev_tok[<NUM_LIT:1>] += \"<STR_LIT:#>\"<EOL>bar[-<NUM_LIT:1>] = prev_tok<EOL><DEDENT>else:<EOL><INDENT>bar.append(foo[i])<EOL><DEDENT>i += <NUM_LIT:1><EOL><DEDENT>return bar<EOL>", "docstring": "fix special case for `c#`, `f#`, etc.; thanks Microsoft", "id": "f2966:m4"}
{"signature": "def make_sentence (sent_text):", "body": "lex = []<EOL>idx = <NUM_LIT:0><EOL>for word in sent_text:<EOL><INDENT>if len(word) > <NUM_LIT:0>:<EOL><INDENT>if (idx > <NUM_LIT:0>) and not (word[<NUM_LIT:0>] in \"<STR_LIT>\"):<EOL><INDENT>lex.append(\"<STR_LIT:U+0020>\")<EOL><DEDENT>lex.append(word)<EOL><DEDENT>idx += <NUM_LIT:1><EOL><DEDENT>return \"<STR_LIT>\".join(lex)<EOL>", "docstring": "construct a sentence text, with proper spacing", "id": "f2966:m28"}
{"signature": "def cleanup_text (text):", "body": "x = \"<STR_LIT:U+0020>\".join(map(lambda s: s.strip(), text.split(\"<STR_LIT:\\n>\"))).strip()<EOL>x = x.replace('<STR_LIT>', '<STR_LIT:\">').replace('<STR_LIT>', '<STR_LIT:\">')<EOL>x = x.replace(\"<STR_LIT>\", \"<STR_LIT:'>\").replace(\"<STR_LIT>\", \"<STR_LIT:'>\").replace(\"<STR_LIT>\", \"<STR_LIT:'>\")<EOL>x = x.replace('<STR_LIT>', '<STR_LIT>').replace('<STR_LIT>', '<STR_LIT:->')<EOL>x = str(unicodedata.normalize('<STR_LIT>', x).encode('<STR_LIT:ascii>', '<STR_LIT:ignore>').decode('<STR_LIT:ascii>'))<EOL>try:<EOL><INDENT>assert type(x).__name__ == '<STR_LIT:str>'<EOL><DEDENT>except AssertionError:<EOL><INDENT>print(\"<STR_LIT>\", type(line), line)<EOL><DEDENT>return x<EOL>", "docstring": "It scrubs the garbled from its stream...\nOr it gets the debugger again.", "id": "f2970:m0"}
{"signature": "@contextmanager<EOL><INDENT>def visit(self, key):<DEDENT>", "body": "self[key] = key<EOL>try:<EOL><INDENT>yield key<EOL><DEDENT>finally:<EOL><INDENT>del self[key]<EOL><DEDENT>", "docstring": "Visits key and marks as visited.\n        Support context manager interface.\n\n        :param key: key being visited.", "id": "f2981:c0:m0"}
{"signature": "def construct_mapping(self, node, deep=False):", "body": "mapping = super(ExtendedSafeConstructor, self).construct_mapping(<EOL>node, deep)<EOL>return {<EOL>(str(key) if isinstance(key, int) else key): mapping[key]<EOL>for key in mapping<EOL>}<EOL>", "docstring": "While yaml supports integer keys, these are not valid in\n        json, and will break jsonschema. This method coerces all keys\n        to strings.", "id": "f2986:c0:m0"}
{"signature": "def __add__(self, other):", "body": "other = Position(other)<EOL>return Position((self.line + other.line,<EOL>self.column + other.column))<EOL>", "docstring": "(1, 1) + (1, 1) -> (2, 2)", "id": "f3011:c0:m5"}
{"signature": "def __lt__(self, other):", "body": "other = Position(other)<EOL>return (self.line, self.column) < (other.line, other.column)<EOL>", "docstring": "Compares Position with Position or indexable object", "id": "f3011:c0:m11"}
{"signature": "def advance_line(self):", "body": "self.line += <NUM_LIT:1><EOL>self.column = <NUM_LIT:1><EOL>", "docstring": "(3, 10) -> (4, 1)", "id": "f3011:c0:m2"}
{"signature": "def __eq__(self, other):", "body": "if not (hasattr(other, '<STR_LIT>') and hasattr(other, '<STR_LIT>')) and len(other) < <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>other = Position(other)<EOL>return self.line == other.line and self.column == other.column<EOL>", "docstring": "Compares Positions or Position and tuple\n\n        Will not fail if other is an unsupported type", "id": "f3011:c0:m10"}
{"signature": "def node_to_bounding_box(node):", "body": "return BoundingBoxFinder().compute(node)<EOL>", "docstring": "Bounding box of the given node\n\n    The bounding box of a node represents its left most and right most\n    position in the rendered source code. Its left position is here\n    always (1, 1).", "id": "f3011:m3"}
{"signature": "def __eq__(self, other):", "body": "if not (hasattr(other, '<STR_LIT>') and hasattr(other, '<STR_LIT>')) and len(other) < <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>other = BoundingBox(other)<EOL>return self.top_left == other.top_left and self.bottom_right == other.bottom_right<EOL>", "docstring": "Compares BoundingBox with BoundingBox or indexable object", "id": "f3011:c1:m1"}
{"signature": "@property<EOL><INDENT>def right(self):<DEDENT>", "body": "return Position((self.line, self.column + <NUM_LIT:1>))<EOL>", "docstring": "(3, 10) -> (3, 11)", "id": "f3011:c0:m4"}
{"signature": "def position_to_path(tree, position):", "body": "return PositionFinder().find(tree, position)<EOL>", "docstring": "Path to the node located at the given line and column\n\n    This function locates a node in the rendered source code", "id": "f3011:m0"}
{"signature": "def position_to_node(tree, position):", "body": "return path_to_node(tree, position_to_path(tree, position))<EOL>", "docstring": "FST node located at the given line and column", "id": "f3011:m2"}
{"signature": "def path_to_node(tree, path):", "body": "if path is None:<EOL><INDENT>return None<EOL><DEDENT>node = tree<EOL>for key in path:<EOL><INDENT>node = child_by_key(node, key)<EOL><DEDENT>return node<EOL>", "docstring": "FST node located at the given path", "id": "f3011:m1"}
{"signature": "def get_space(node):", "body": "if len(node) < <NUM_LIT:4> or len(node[<NUM_LIT:3>]) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>return transform_tabs_to_spaces(node[<NUM_LIT:3>][<NUM_LIT:0>][<NUM_LIT:1>])<EOL>", "docstring": "Return space formatting information of node.\n\n    If the node does not have a third formatting item - like in\n    a ('ENDL', '\\n') node - then we return None as a flag value. This is\n    maybe not the best behavior but it seems to work for now.", "id": "f3016:m2"}
{"signature": "def gettokentype(self):", "body": "return self.name<EOL>", "docstring": "Returns the type or name of the token.", "id": "f3029:c0:m5"}
{"signature": "def getstr(self):", "body": "return self.value<EOL>", "docstring": "Returns the string represented by this token.", "id": "f3029:c0:m6"}
{"signature": "@classmethod<EOL><INDENT>def regular_polygon(cls, center, radius, n_vertices, start_angle=<NUM_LIT:0>, **kwargs):<DEDENT>", "body": "angles = (np.arange(n_vertices) * <NUM_LIT:2> * np.pi / n_vertices) + start_angle<EOL>return cls(center + radius * np.array([np.cos(angles), np.sin(angles)]).T, **kwargs)<EOL>", "docstring": "Construct a regular polygon.\n\n        Parameters\n        ----------\n        center : array-like\n        radius : float\n        n_vertices : int\n        start_angle : float, optional\n            Where to put the first point, relative to `center`,\n            in radians counter-clockwise starting from the horizontal axis.\n        kwargs\n            Other keyword arguments are passed to the |Shape| constructor.", "id": "f3037:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def circle(cls, center, radius, n_vertices=<NUM_LIT:50>, **kwargs):<DEDENT>", "body": "return cls.regular_polygon(center, radius, n_vertices, **kwargs)<EOL>", "docstring": "Construct a circle.\n\n        Parameters\n        ----------\n        center : array-like\n        radius : float\n        n_vertices : int, optional\n            Number of points to draw.\n            Decrease for performance, increase for appearance.\n        kwargs\n            Other keyword arguments are passed to the |Shape| constructor.", "id": "f3037:c0:m2"}
{"signature": "def update(self, dt):", "body": "self.translate(dt * self.velocity)<EOL>self.rotate(dt * self.angular_velocity)<EOL>", "docstring": "Update the shape's position by moving it forward according to its velocity.\n\n        Parameters\n        ----------\n        dt : float", "id": "f3037:c0:m24"}
{"signature": "def covers(self, other):", "body": "return bool(self.poly.covers(other.poly))<EOL>", "docstring": "Check if the shape completely covers another shape.\n\n        Parameters\n        ----------\n        other : |Shape|\n\n        Returns\n        -------\n        bool", "id": "f3037:c0:m27"}
{"signature": "@property<EOL><INDENT>def _kwargs(self):<DEDENT>", "body": "return dict(color=self.color, velocity=self.velocity, colors=self.colors)<EOL>", "docstring": "Keyword arguments for recreating the Shape from the vertices.", "id": "f3037:c0:m8"}
{"signature": "def enable(self, enabled):", "body": "self.enabled = enabled<EOL>return self<EOL>", "docstring": "Set whether the shape should be drawn.\n\n        Parameters\n        ----------\n\n        enabled : bool", "id": "f3037:c0:m25"}
{"signature": "def flip(self, angle, center=None):", "body": "return self.rotate(-angle, center=center).flip_y(center=center).rotate(angle, center=center)<EOL>", "docstring": "Flip the shape in an arbitrary direction.\n\n        Parameters\n        ----------\n        angle : array-like\n            The angle, in radians counter-clockwise from the horizontal axis,\n            defining the angle about which to flip the shape (of a line through `center`).\n        center : array-like, optional\n            The point about which to flip.\n            If not passed, the center of the shape will be used.", "id": "f3037:c0:m21"}
{"signature": "def flip_x(self, center=None):", "body": "if center is None:<EOL><INDENT>self.poly.flip()<EOL><DEDENT>else:<EOL><INDENT>self.poly.flip(center[<NUM_LIT:0>])<EOL><DEDENT>", "docstring": "Flip the shape in the x direction, in-place.\n\n        Parameters\n        ----------\n        center : array-like, optional\n            Point about which to flip.\n            If not passed, the center of the shape will be used.", "id": "f3037:c0:m19"}
{"signature": "def overlaps(self, other):", "body": "return bool(self.poly.overlaps(other.poly))<EOL>", "docstring": "Check if two shapes overlap.\n\n        Parameters\n        ----------\n        other : |Shape|\n\n        Returns\n        -------\n        bool", "id": "f3037:c0:m26"}
{"signature": "def enqueue_task(self, task):", "body": "data = dumps(task)<EOL>if self._async:<EOL><INDENT>self.publisher_client.publish(self.topic_path, data=data)<EOL>logger.info('<STR_LIT>'.format(task.id))<EOL><DEDENT>else:<EOL><INDENT>unpickled_task = unpickle(data)<EOL>logger.info(<EOL>'<STR_LIT>'.format(unpickled_task.id)<EOL>)<EOL>with measure_time() as summary, self.queue_context():<EOL><INDENT>unpickled_task.execute(queue=self)<EOL>summary(unpickled_task.summary())<EOL><DEDENT><DEDENT>return TaskResult(task.id, self)<EOL>", "docstring": "Enqueues a task directly. This is used when a task is retried or if\n        a task was manually created.\n\n        Note that this does not store the task.", "id": "f3044:c0:m5"}
{"signature": "def task_context(self):", "body": "return task_context(self)<EOL>", "docstring": "Returns a context manager that sets this task as the current_task\nglobal. Similar to flask's app.request_context. This is used by the\nworkers to make the global available inside of task functions.", "id": "f3045:c2:m9"}
{"signature": "def cleanup(self):", "body": "if self.subscription:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>self.subscriber_client.delete_subscription(self.subscription)<EOL><DEDENT>", "docstring": "Deletes this worker's subscription.", "id": "f3046:c0:m2"}
{"signature": "def _get_or_create_subscription(self):", "body": "topic_path = self._get_topic_path()<EOL>subscription_name = '<STR_LIT>'.format(<EOL>queue.PUBSUB_OBJECT_PREFIX, self.name, uuid4().hex)<EOL>subscription_path = self.subscriber_client.subscription_path(<EOL>self.project, subscription_name)<EOL>try:<EOL><INDENT>self.subscriber_client.get_subscription(subscription_path)<EOL><DEDENT>except google.cloud.exceptions.NotFound:<EOL><INDENT>logger.info(\"<STR_LIT>\".format(<EOL>subscription_name))<EOL>self.subscriber_client.create_subscription(<EOL>subscription_path, topic_path)<EOL><DEDENT>return subscription_path<EOL>", "docstring": "In a broadcast queue, workers have a unique subscription ensuring\n        that every worker recieves a copy of every task.", "id": "f3046:c0:m1"}
{"signature": "def unpickle(pickled_string):", "body": "try:<EOL><INDENT>obj = loads(pickled_string)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise UnpickleError('<STR_LIT>', pickled_string, e)<EOL><DEDENT>return obj<EOL>", "docstring": "Unpickles a string, but raises a unified UnpickleError in case anything\n    fails.\n    This is a helper method to not have to deal with the fact that `loads()`\n    potentially raises many types of exceptions (e.g. AttributeError,\n    IndexError, TypeError, KeyError, etc.)", "id": "f3055:m0"}
{"signature": "def busybox_single_app_bundle_fixture(num_bundles=<NUM_LIT:1>, command=['<STR_LIT>'], app_name_transformer=None):", "body": "if app_name_transformer is None:<EOL><INDENT>app_name_transformer = lambda x: x<EOL><DEDENT>app_dict = {'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:image>': '<STR_LIT>',<EOL>'<STR_LIT>': {'<STR_LIT>': command},<EOL>'<STR_LIT:test>': {'<STR_LIT:image>': '<STR_LIT>',<EOL>'<STR_LIT>': ['<STR_LIT>'],<EOL>'<STR_LIT>': [{'<STR_LIT:name>': '<STR_LIT>',<EOL>'<STR_LIT>': ['<STR_LIT>'],<EOL>'<STR_LIT>': '<STR_LIT:.>'},<EOL>{'<STR_LIT:name>': '<STR_LIT>',<EOL>'<STR_LIT>': ['<STR_LIT>'],<EOL>'<STR_LIT>': '<STR_LIT:.>'},<EOL>{'<STR_LIT:name>': '<STR_LIT>',<EOL>'<STR_LIT>': ['<STR_LIT>'],<EOL>'<STR_LIT>': '<STR_LIT:.>'}]}}<EOL>for bundle in range(num_bundles):<EOL><INDENT>app_name = app_name_transformer('<STR_LIT>'.format(_num_to_alpha(bundle)))<EOL>bundle_name = '<STR_LIT>'.format(_num_to_alpha(bundle))<EOL>_write('<STR_LIT>', bundle_name, {'<STR_LIT:description>': '<STR_LIT>', '<STR_LIT>': [app_name]})<EOL>_write('<STR_LIT>', app_name, app_dict)<EOL><DEDENT>", "docstring": "Fixture for use in integration tests. The local repo at\n    /tmp/fake-repo should be set up before using this fixture. Optionally takes in\n    a name transformer function which is applied to the default names of the apps.", "id": "f3059:m9"}
{"signature": "@patch('<STR_LIT>')<EOL><INDENT>def run_command(self, args, fake_exit, raise_on_error=True):<DEDENT>", "body": "with patch('<STR_LIT>', wraps=self.exec_docker_patch) as fake_exec_docker:<EOL><INDENT>fake_exit.side_effect = SysExit('<STR_LIT>')<EOL>self.fake_exec_docker = fake_exec_docker<EOL>sys.argv = ['<STR_LIT>'] + args.split('<STR_LIT:U+0020>')<EOL>try:<EOL><INDENT>client_entrypoint()<EOL><DEDENT>except SysExit:<EOL><INDENT>pass<EOL><DEDENT>for call in fake_exit.mock_calls:<EOL><INDENT>name, args, kwargs = call<EOL>if len(args) == <NUM_LIT:1> and args[<NUM_LIT:0>] > <NUM_LIT:0> and raise_on_error:<EOL><INDENT>self._clear_stdout()<EOL>raise CommandError('<STR_LIT>'.format('<STR_LIT:U+0020>'.join(sys.argv), args[<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>result = self.stdout<EOL>self._clear_stdout()<EOL>return result<EOL><DEDENT>", "docstring": "Run a command through the Dusty client entrypoint, e.g. simulating\n        the Dusty CLI as close as possible without having to call a subprocess.\n        This command raises if the command fails, otherwise it returns the\n        stdout generated by the command.", "id": "f3121:c4:m7"}
{"signature": "def _load_ssh_auth_post_yosemite(mac_username):", "body": "user_id = subprocess.check_output(['<STR_LIT:id>', '<STR_LIT>', mac_username])<EOL>ssh_auth_sock = subprocess.check_output(['<STR_LIT>', '<STR_LIT>', user_id, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']).rstrip()<EOL>_set_ssh_auth_sock(ssh_auth_sock)<EOL>", "docstring": "Starting with Yosemite, launchd was rearchitected and now only one\n    launchd process runs for all users. This allows us to much more easily\n    impersonate a user through launchd and extract the environment\n    variables from their running processes.", "id": "f3124:m13"}
{"signature": "def check_and_load_ssh_auth():", "body": "mac_username = get_config_value(constants.CONFIG_MAC_USERNAME_KEY)<EOL>if not mac_username:<EOL><INDENT>logging.info(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if not _running_on_mac(): <EOL><INDENT>logging.info(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if _mac_version_is_post_yosemite():<EOL><INDENT>_load_ssh_auth_post_yosemite(mac_username)<EOL><DEDENT>else:<EOL><INDENT>_load_ssh_auth_pre_yosemite()<EOL><DEDENT>", "docstring": "Will check the mac_username config value; if it is present, will load that user's\nSSH_AUTH_SOCK environment variable to the current environment.  This allows git clones\nto behave the same for the daemon as they do for the user", "id": "f3124:m16"}
{"signature": "def _lib_install_commands_for_lib(app_name, assembled_specs):", "body": "libs = assembled_specs['<STR_LIT>'][app_name]['<STR_LIT>']['<STR_LIT>']<EOL>return _lib_install_commands_for_libs(assembled_specs, libs)<EOL>", "docstring": "This returns a list of all the commands that will install libraries for a\n    given lib", "id": "f3127:m12"}
{"signature": "def _compile_docker_commands(app_name, assembled_specs, port_spec):", "body": "app_spec = assembled_specs['<STR_LIT>'][app_name]<EOL>commands = ['<STR_LIT>']<EOL>commands += _lib_install_commands_for_app(app_name, assembled_specs)<EOL>if app_spec['<STR_LIT>']:<EOL><INDENT>commands.append(\"<STR_LIT>\".format(container_code_path(app_spec)))<EOL>commands.append(\"<STR_LIT>\".format(container_code_path(app_spec)))<EOL><DEDENT>commands += _copy_assets_commands_for_app(app_spec, assembled_specs)<EOL>commands += _get_once_commands(app_spec, port_spec)<EOL>commands += _get_always_commands(app_spec)<EOL>return commands<EOL>", "docstring": "This is used to compile the command that will be run when the docker container starts\n    up. This command has to install any libs that the app uses, run the `always` command, and\n    run the `once` command if the container is being launched for the first time", "id": "f3127:m8"}
{"signature": "def _lib_install_commands(lib_spec):", "body": "if not lib_spec['<STR_LIT>']:<EOL><INDENT>return []<EOL><DEDENT>return [\"<STR_LIT>\".format(lib_spec['<STR_LIT>'])] + lib_spec['<STR_LIT>']<EOL>", "docstring": "This returns a single commmand that will install a library in a docker container", "id": "f3127:m14"}
{"signature": "def repo_mount_validator():", "body": "def validator(document):<EOL><INDENT>if '<STR_LIT>' in document and '<STR_LIT>' in document:<EOL><INDENT>return<EOL><DEDENT>elif '<STR_LIT>' not in document and '<STR_LIT>' not in document:<EOL><INDENT>return<EOL><DEDENT>return '<STR_LIT>'<EOL><DEDENT>return validator<EOL>", "docstring": "If either repo or mount are provided, they must both be provided.", "id": "f3133:m1"}
{"signature": "def _ensure_managed_repos_dir_exists():", "body": "if not os.path.exists(constants.REPOS_DIR):<EOL><INDENT>os.makedirs(constants.REPOS_DIR)<EOL><DEDENT>", "docstring": "Our exports file will be invalid if this folder doesn't exist, and the NFS server\nwill not run correctly.", "id": "f3137:m2"}
{"signature": "def configure_nfs_server():", "body": "repos_for_export = get_all_repos(active_only=True, include_specs_repo=False)<EOL>current_exports = _get_current_exports()<EOL>needed_exports = _get_exports_for_repos(repos_for_export)<EOL>_ensure_managed_repos_dir_exists()<EOL>if not needed_exports.difference(current_exports):<EOL><INDENT>if not _server_is_running():<EOL><INDENT>_restart_server()<EOL><DEDENT>return<EOL><DEDENT>_write_exports_config(needed_exports)<EOL>_restart_server()<EOL>", "docstring": "This function is used with `dusty up`.  It will check all active repos to see if\nthey are exported.  If any are missing, it will replace current dusty exports with\nexports that are needed for currently active repos, and restart\nthe nfs server", "id": "f3137:m0"}
{"signature": "def vm_path_is_directory(remote_path):", "body": "try:<EOL><INDENT>check_call_on_vm('<STR_LIT>'.format(remote_path))<EOL><DEDENT>except CalledProcessError:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "A weak check of whether a path in the Dusty VM is a directory.\n    This function returns False on any process error, so False may indicate\n    other failures such as the path not actually existing.", "id": "f3139:m2"}
{"signature": "@memoized<EOL>def get_authed_registries():", "body": "result = set()<EOL>if not os.path.exists(constants.DOCKER_CONFIG_PATH):<EOL><INDENT>return result<EOL><DEDENT>config = json.load(open(constants.DOCKER_CONFIG_PATH, '<STR_LIT:r>'))<EOL>for registry in config.get('<STR_LIT>', {}).iterkeys():<EOL><INDENT>try:<EOL><INDENT>parsed = urlparse(registry)<EOL><DEDENT>except Exception:<EOL><INDENT>log_to_client('<STR_LIT>').format(registry)<EOL><DEDENT>result.add(parsed.netloc) if parsed.netloc else result.add(parsed.path)<EOL><DEDENT>return result<EOL>", "docstring": "Reads the local Docker client config for the current user\n    and returns all registries to which the user may be logged in.\n    This is intended to be run client-side, not by the daemon.", "id": "f3140:m1"}
{"signature": "def registry_from_image(image_name):", "body": "if '<STR_LIT:/>' not in image_name: <EOL><INDENT>return constants.PUBLIC_DOCKER_REGISTRY<EOL><DEDENT>prefix = image_name.split('<STR_LIT:/>')[<NUM_LIT:0>]<EOL>if '<STR_LIT:.>' not in prefix: <EOL><INDENT>return constants.PUBLIC_DOCKER_REGISTRY<EOL><DEDENT>return prefix<EOL>", "docstring": "Returns the Docker registry host associated with\n    a given image name.", "id": "f3140:m0"}
{"signature": "def get_dusty_containers(services, include_exited=False):", "body": "client = get_docker_client()<EOL>if services:<EOL><INDENT>containers = [get_container_for_app_or_service(service, include_exited=include_exited) for service in services]<EOL>return [container for container in containers if container]<EOL><DEDENT>else:<EOL><INDENT>return [container<EOL>for container in client.containers(all=include_exited)<EOL>if any(name.startswith('<STR_LIT>') for name in container.get('<STR_LIT>', []))]<EOL><DEDENT>", "docstring": "Get a list of containers associated with the list\n    of services. If no services are provided, attempts to\n    return all containers associated with Dusty.", "id": "f3144:m5"}
{"signature": "def _compose_restart(services):", "body": "def _restart_container(client, container):<EOL><INDENT>log_to_client('<STR_LIT>'.format(get_canonical_container_name(container)))<EOL>client.restart(container['<STR_LIT>'], timeout=<NUM_LIT:1>)<EOL><DEDENT>assembled_specs = get_assembled_specs()<EOL>if services == []:<EOL><INDENT>services = [spec.name for spec in assembled_specs.get_apps_and_services()]<EOL><DEDENT>logging.info('<STR_LIT>'.format(services))<EOL>client = get_docker_client()<EOL>for service in services:<EOL><INDENT>container = get_container_for_app_or_service(service, include_exited=True)<EOL>if container is None:<EOL><INDENT>log_to_client('<STR_LIT>'.format(service))<EOL>continue<EOL><DEDENT>stopped_linked_containers = _check_stopped_linked_containers(container, assembled_specs)<EOL>if stopped_linked_containers:<EOL><INDENT>log_to_client('<STR_LIT>'.format(<EOL>stopped_linked_containers, service))<EOL><DEDENT>else:<EOL><INDENT>_restart_container(client, container)<EOL><DEDENT><DEDENT>", "docstring": "Well, this is annoying. Compose 1.2 shipped with the\n    restart functionality fucking broken, so we can't set a faster\n    timeout than 10 seconds (which is way too long) using Compose.\n    We are therefore resigned to trying to hack this together\n    ourselves. Lame.\n\n    Relevant fix which will make it into the next release:\n    https://github.com/docker/compose/pull/1318", "id": "f3145:m6"}
{"signature": "def update_running_containers_from_spec(compose_config, recreate_containers=True):", "body": "write_composefile(compose_config, constants.COMPOSEFILE_PATH)<EOL>compose_up(constants.COMPOSEFILE_PATH, '<STR_LIT>', recreate_containers=recreate_containers)<EOL>", "docstring": "Takes in a Compose spec from the Dusty Compose compiler,\n    writes it to the Compose spec folder so Compose can pick it\n    up, then does everything needed to make sure the Docker VM is\n    up and running containers with the updated config.", "id": "f3145:m7"}
{"signature": "def stop_running_services(services=None):", "body": "if services is None:<EOL><INDENT>services = []<EOL><DEDENT>_compose_stop(constants.COMPOSEFILE_PATH, '<STR_LIT>', services)<EOL>", "docstring": "Stop running containers owned by Dusty, or a specific\n    list of Compose services if provided.\n\n    Here, \"services\" refers to the Compose version of the term,\n    so any existing running container, by name. This includes Dusty\n    apps and services.", "id": "f3145:m8"}
{"signature": "def remove_exited_dusty_containers():", "body": "client = get_docker_client()<EOL>exited_containers = get_exited_dusty_containers()<EOL>removed_containers = []<EOL>for container in exited_containers:<EOL><INDENT>log_to_client(\"<STR_LIT>\".format(container['<STR_LIT>'][<NUM_LIT:0>]))<EOL>try:<EOL><INDENT>client.remove_container(container['<STR_LIT>'], v=True)<EOL>removed_containers.append(container)<EOL><DEDENT>except Exception as e:<EOL><INDENT>log_to_client(e.message or str(e))<EOL><DEDENT><DEDENT>return removed_containers<EOL>", "docstring": "Removed all dusty containers with 'Exited' in their status", "id": "f3146:m1"}
{"signature": "def remove_current_dusty_config(config):", "body": "return constants.DUSTY_CONFIG_REGEX.sub(\"<STR_LIT>\", config)<EOL>", "docstring": "Given a string representing the contents of a\n    file, this function strips out the Dusty config section\n    denominated by the Dusty header and footer. Returns\n    the stripped string.", "id": "f3148:m2"}
{"signature": "def _ip_for_mac_from_ip_addr_show(ip_addr_show, target_mac):", "body": "return_next_ip = False<EOL>for line in ip_addr_show.splitlines():<EOL><INDENT>line = line.strip()<EOL>if line.startswith('<STR_LIT>'):<EOL><INDENT>line_mac = line.split('<STR_LIT:U+0020>')[<NUM_LIT:1>].replace('<STR_LIT::>', '<STR_LIT>')<EOL>if line_mac == target_mac:<EOL><INDENT>return_next_ip = True<EOL><DEDENT><DEDENT>elif return_next_ip and line.startswith('<STR_LIT>') and not line.startswith('<STR_LIT>'):<EOL><INDENT>ip = line.split('<STR_LIT:U+0020>')[<NUM_LIT:1>].split('<STR_LIT:/>')[<NUM_LIT:0>]<EOL>return ip<EOL><DEDENT><DEDENT>", "docstring": "Given the rather-complex output from an 'ip addr show' command\n    on the VM, parse the output to determine the IP address\n    assigned to the interface with the given MAC.", "id": "f3150:m28"}
{"signature": "def _apply_nic_fix():", "body": "log_to_client('<STR_LIT>')<EOL>check_call_demoted(['<STR_LIT>', '<STR_LIT>', constants.VM_MACHINE_NAME, '<STR_LIT>', constants.VM_NIC_TYPE])<EOL>", "docstring": "Set NIC 1 to use PCnet-FAST III. The host-only NIC type is\n    set during docker-machine create (and Machine will change it\n    back if it is changed manually), which is why we only change\n    NIC 1 here.", "id": "f3150:m20"}
{"signature": "def _get_localhost_ssh_port():", "body": "for line in _get_vm_config():<EOL><INDENT>if line.startswith('<STR_LIT>'):<EOL><INDENT>spec = line.split('<STR_LIT:=>')[<NUM_LIT:1>].strip('<STR_LIT:\">')<EOL>name, protocol, host, host_port, target, target_port = spec.split('<STR_LIT:U+002C>')<EOL>if name == '<STR_LIT>' and protocol == '<STR_LIT>' and target_port == '<STR_LIT>':<EOL><INDENT>return host_port<EOL><DEDENT><DEDENT><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Something in the VM chain, either VirtualBox or Machine, helpfully\n    sets up localhost-to-VM forwarding on port 22. We can inspect this\n    rule to determine the port on localhost which gets forwarded to\n    22 in the VM.", "id": "f3150:m26"}
{"signature": "def delete_docker_vm_host_only_interface():", "body": "adapter_name = get_vm_hostonly_adapter()<EOL>log_to_client('<STR_LIT>'.format(adapter_name))<EOL>check_call_demoted(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', adapter_name])<EOL>", "docstring": "Attempt to delete the host-only interface attached\n    to the current Dusty VM. VM should be stopped\n    before calling this.", "id": "f3150:m21"}
{"signature": "def _stop_docker_vm():", "body": "check_call_demoted(['<STR_LIT>', '<STR_LIT>', constants.VM_MACHINE_NAME], redirect_stderr=True)<EOL>", "docstring": "Stop the Dusty VM if it is not already stopped.", "id": "f3150:m16"}
{"signature": "def regenerate_docker_vm_certificates():", "body": "log_to_client('<STR_LIT>')<EOL>check_call_demoted(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', constants.VM_MACHINE_NAME])<EOL>", "docstring": "Regenerate certificates for a running VM through Docker Machine.\n    This may be necessary following a restart if there were previously\n    networking issues preventing Machine from doing this as part\n    of normal startup.", "id": "f3150:m22"}
{"signature": "def _apply_nat_net_less_greedy_subnet():", "body": "check_and_log_output_and_error_demoted(<EOL>['<STR_LIT>', '<STR_LIT>', constants.VM_MACHINE_NAME, '<STR_LIT>', '<STR_LIT>'],<EOL>quiet_on_success=True)<EOL>", "docstring": "By default, VirtualBox claims 10.0.2.x for itself as part of its NAT routing\n    scheme. This subnet is commonly used on internal networks, making this a pretty\n    damn greedy choice. We instead alter the VM to use the less greedy subnet of\n    10.174.249.x which is less likely to conflict.", "id": "f3150:m13"}
{"signature": "def docker_vm_is_running():", "body": "running_vms = check_output_demoted(['<STR_LIT>', '<STR_LIT:list>', '<STR_LIT>'])<EOL>for line in running_vms.splitlines():<EOL><INDENT>if '<STR_LIT>'.format(constants.VM_MACHINE_NAME) in line:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Using VBoxManage is 0.5 seconds or so faster than Machine.", "id": "f3150:m18"}
{"signature": "def _apply_nat_dns_host_resolver():", "body": "check_and_log_output_and_error_demoted(<EOL>['<STR_LIT>', '<STR_LIT>', constants.VM_MACHINE_NAME, '<STR_LIT>', '<STR_LIT>'],<EOL>quiet_on_success=True)<EOL>", "docstring": "This will make the Dusty VM always use the host's DNS resolver for lookups.\nIt solves an issue we were seeing where the VM's resolving settings would get\nout of date when a laptop was moved between routers with different settings,\nresulting in DNS lookup failures on the VM.", "id": "f3150:m12"}
{"signature": "def _get_host_only_ip():", "body": "mac = _get_host_only_mac_address()<EOL>ip_addr_show = check_output_demoted(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', _vm_key_path(), '<STR_LIT>', _get_localhost_ssh_port(),<EOL>'<STR_LIT>', '<STR_LIT>'])<EOL>return _ip_for_mac_from_ip_addr_show(ip_addr_show, mac)<EOL>", "docstring": "Determine the host-only IP of the Dusty VM through Virtualbox and SSH\n    directly, bypassing Docker Machine. We do this because Docker Machine is\n    much slower, taking about 600ms total. We are basically doing the same\n    flow Docker Machine does in its own code.", "id": "f3150:m29"}
{"signature": "def _dusty_vm_exists():", "body": "existing_vms = check_output_demoted(['<STR_LIT>', '<STR_LIT:list>', '<STR_LIT>'])<EOL>for line in existing_vms.splitlines():<EOL><INDENT>if '<STR_LIT>'.format(constants.VM_MACHINE_NAME) in line:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "We use VBox directly instead of Docker Machine because it\n    shaves about 0.5 seconds off the runtime of this check.", "id": "f3150:m11"}
{"signature": "def get_nginx_configuration_spec(port_spec_dict, docker_bridge_ip):", "body": "nginx_http_config, nginx_stream_config = \"<STR_LIT>\", \"<STR_LIT>\"<EOL>for port_spec in port_spec_dict['<STR_LIT>']:<EOL><INDENT>if port_spec['<STR_LIT:type>'] == '<STR_LIT:http>':<EOL><INDENT>nginx_http_config += _nginx_http_spec(port_spec, docker_bridge_ip)<EOL><DEDENT>elif port_spec['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>nginx_stream_config += _nginx_stream_spec(port_spec, docker_bridge_ip)<EOL><DEDENT><DEDENT>return {'<STR_LIT:http>': nginx_http_config, '<STR_LIT>': nginx_stream_config}<EOL>", "docstring": "This function will take in a port spec as specified by the port_spec compiler and\n    will output an nginx web proxy config string. This string can then be written to a file\n    and used running nginx", "id": "f3153:m8"}
{"signature": "def _nginx_http_spec(port_spec, bridge_ip):", "body": "server_string_spec = \"<STR_LIT>\"<EOL>server_string_spec += \"<STR_LIT>\".format(_nginx_max_file_size_string())<EOL>server_string_spec += \"<STR_LIT>\".format(_nginx_listen_string(port_spec))<EOL>server_string_spec += \"<STR_LIT>\".format(_nginx_server_name_string(port_spec))<EOL>server_string_spec += _nginx_location_spec(port_spec, bridge_ip)<EOL>server_string_spec += _custom_502_page()<EOL>server_string_spec += \"<STR_LIT>\"<EOL>return server_string_spec<EOL>", "docstring": "This will output the nginx HTTP config string for specific port spec", "id": "f3153:m6"}
{"signature": "def get_lib_volume_mounts(base_lib_name, assembled_specs):", "body": "volumes = [_get_lib_repo_volume_mount(assembled_specs['<STR_LIT>'][base_lib_name])]<EOL>volumes.append(get_command_files_volume_mount(base_lib_name, test=True))<EOL>for lib_name in assembled_specs['<STR_LIT>'][base_lib_name]['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>lib_spec = assembled_specs['<STR_LIT>'][lib_name]<EOL>volumes.append(_get_lib_repo_volume_mount(lib_spec))<EOL><DEDENT>return volumes<EOL>", "docstring": "Returns a list of the formatted volume specs for a lib", "id": "f3155:m4"}
{"signature": "def _get_app_repo_volume_mount(app_spec):", "body": "if app_spec['<STR_LIT>']:<EOL><INDENT>return \"<STR_LIT>\".format(Repo(app_spec['<STR_LIT>']).vm_path, container_code_path(app_spec))<EOL><DEDENT>", "docstring": "This returns the formatted volume mount spec to mount the local code for an app in the\n    container", "id": "f3155:m5"}
{"signature": "def _get_lib_repo_volume_mount(lib_spec):", "body": "return \"<STR_LIT>\".format(Repo(lib_spec['<STR_LIT>']).vm_path, container_code_path(lib_spec))<EOL>", "docstring": "This returns the formatted volume mount spec to mount the local code for a lib in the\n    container", "id": "f3155:m6"}
{"signature": "def _get_app_libs_volume_mounts(app_name, assembled_specs):", "body": "volumes = []<EOL>for lib_name in assembled_specs['<STR_LIT>'][app_name]['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>lib_spec = assembled_specs['<STR_LIT>'][lib_name]<EOL>volumes.append(\"<STR_LIT>\".format(Repo(lib_spec['<STR_LIT>']).vm_path, container_code_path(lib_spec)))<EOL><DEDENT>return volumes<EOL>", "docstring": "Returns a list of the formatted volume mounts for all libs that an app uses", "id": "f3155:m8"}
{"signature": "def _composed_service_dict(service_spec):", "body": "compose_dict = service_spec.plain_dict()<EOL>_apply_env_overrides(env_overrides_for_app_or_service(service_spec.name), compose_dict)<EOL>compose_dict.setdefault('<STR_LIT>', []).append(_get_cp_volume_mount(service_spec.name))<EOL>compose_dict['<STR_LIT>'] = \"<STR_LIT>\".format(service_spec.name)<EOL>return compose_dict<EOL>", "docstring": "This function returns a dictionary of the docker_compose specifications\n    for one service. Currently, this is just the Dusty service spec with\n    an additional volume mount to support Dusty's cp functionality.", "id": "f3156:m11"}
{"signature": "def get_compose_dict(assembled_specs, port_specs):", "body": "compose_dict = _compose_dict_for_nginx(port_specs)<EOL>for app_name in assembled_specs['<STR_LIT>'].keys():<EOL><INDENT>compose_dict[app_name] = _composed_app_dict(app_name, assembled_specs, port_specs)<EOL><DEDENT>for service_spec in assembled_specs['<STR_LIT>'].values():<EOL><INDENT>compose_dict[service_spec.name] = _composed_service_dict(service_spec)<EOL><DEDENT>return compose_dict<EOL>", "docstring": "This function returns a dictionary representation of a docker-compose.yml file, based on assembled_specs from\n    the spec_assembler, and port_specs from the port_spec compiler", "id": "f3156:m2"}
{"signature": "def _get_expanded_active_specs(specs):", "body": "_filter_active(constants.CONFIG_BUNDLES_KEY, specs)<EOL>_filter_active('<STR_LIT>', specs)<EOL>_expand_libs_in_apps(specs)<EOL>_filter_active('<STR_LIT>', specs)<EOL>_filter_active('<STR_LIT>', specs)<EOL>_add_active_assets(specs)<EOL>", "docstring": "This function removes any unnecessary bundles, apps, libs, and services that aren't needed by\nthe activated_bundles.  It also expands inside specs.apps.depends.libs all libs that are needed\nindirectly by each app", "id": "f3157:m9"}
{"signature": "def _get_referenced_services(specs):", "body": "active_services = set()<EOL>for app_spec in specs['<STR_LIT>'].values():<EOL><INDENT>for service in app_spec['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>active_services.add(service)<EOL><DEDENT><DEDENT>for bundle_spec in specs['<STR_LIT>'].values():<EOL><INDENT>for service in bundle_spec['<STR_LIT>']:<EOL><INDENT>active_services.add(service)<EOL><DEDENT><DEDENT>return active_services<EOL>", "docstring": "Returns all services that are referenced in specs.apps.depends.services,\nor in specs.bundles.services", "id": "f3157:m6"}
{"signature": "def get_same_container_repos_from_spec(app_or_library_spec):", "body": "repos = set()<EOL>app_or_lib_repo = get_repo_of_app_or_library(app_or_library_spec.name)<EOL>if app_or_lib_repo is not None:<EOL><INDENT>repos.add(app_or_lib_repo)<EOL><DEDENT>for dependent_name in app_or_library_spec['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>repos.add(get_repo_of_app_or_library(dependent_name))<EOL><DEDENT>return repos<EOL>", "docstring": "Given the spec of an app or library, returns all repos that are guaranteed\n    to live in the same container", "id": "f3157:m19"}
{"signature": "@contextmanager<EOL>def parallel_task_queue(pool_size=multiprocessing.cpu_count()):", "body": "task_queue = TaskQueue(pool_size)<EOL>yield task_queue<EOL>task_queue.execute()<EOL>", "docstring": "Context manager for setting up a TaskQueue. Upon leaving the\n    context manager, all tasks that were enqueued will be executed\n    in parallel subject to `pool_size` concurrency constraints.", "id": "f3158:m0"}
{"signature": "@contextlib.contextmanager<EOL>def streaming_to_client():", "body": "for handler in client_logger.handlers:<EOL><INDENT>if hasattr(handler, '<STR_LIT>'):<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>handler = None<EOL><DEDENT>old_propagate = client_logger.propagate<EOL>client_logger.propagate = False<EOL>if handler is not None:<EOL><INDENT>old_append = handler.append_newlines<EOL>handler.append_newlines = False<EOL><DEDENT>yield<EOL>client_logger.propagate = old_propagate<EOL>if handler is not None:<EOL><INDENT>handler.append_newlines = old_append<EOL><DEDENT>", "docstring": "Puts the client logger into streaming mode, which sends\n    unbuffered input through to the socket one character at a time.\n    We also disable propagation so the root logger does not\n    receive many one-byte emissions. This context handler\n    was originally created for streaming Compose up's\n    terminal output through to the client and should only be\n    used for similarly complex circumstances.", "id": "f3160:m5"}
{"signature": "@daemon_command<EOL>def update_managed_repos(force=False):", "body": "log_to_client('<STR_LIT>')<EOL>update_specs_repo_and_known_hosts()<EOL>repos_to_update = get_all_repos(active_only=True, include_specs_repo=False)<EOL>with parallel_task_queue() as queue:<EOL><INDENT>log_to_client('<STR_LIT>')<EOL>for repo in repos_to_update:<EOL><INDENT>if not repo.is_overridden:<EOL><INDENT>repo.update_local_repo_async(queue, force=force)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "For any active, managed repos, update the Dusty-managed\n    copy to bring it up to date with the latest master.", "id": "f3169:m9"}
{"signature": "@daemon_command<EOL>def start_local_env(recreate_containers):", "body": "assembled_spec = spec_assembler.get_assembled_specs()<EOL>required_absent_assets = virtualbox.required_absent_assets(assembled_spec)<EOL>if required_absent_assets:<EOL><INDENT>raise RuntimeError('<STR_LIT>'.format(required_absent_assets))<EOL><DEDENT>docker_ip = virtualbox.get_docker_vm_ip()<EOL>if os.path.exists(constants.COMPOSEFILE_PATH):<EOL><INDENT>try:<EOL><INDENT>stop_apps_or_services(rm_containers=recreate_containers)<EOL><DEDENT>except CalledProcessError as e:<EOL><INDENT>log_to_client(\"<STR_LIT>\")<EOL>log_to_client(str(e))<EOL><DEDENT><DEDENT>daemon_warnings.clear_namespace('<STR_LIT>')<EOL>df_info = virtualbox.get_docker_vm_disk_info(as_dict=True)<EOL>if '<STR_LIT:M>' in df_info['<STR_LIT>'] or '<STR_LIT>' in df_info['<STR_LIT>']:<EOL><INDENT>warning_msg = '<STR_LIT>'.format(df_info['<STR_LIT>'])<EOL>daemon_warnings.warn('<STR_LIT>', warning_msg)<EOL>log_to_client(warning_msg)<EOL><DEDENT>log_to_client(\"<STR_LIT>\")<EOL>active_repos = spec_assembler.get_all_repos(active_only=True, include_specs_repo=False)<EOL>log_to_client(\"<STR_LIT>\")<EOL>port_spec = port_spec_compiler.get_port_spec_document(assembled_spec, docker_ip)<EOL>log_to_client(\"<STR_LIT>\")<EOL>docker_bridge_ip = virtualbox.get_docker_bridge_ip()<EOL>nginx_config = nginx_compiler.get_nginx_configuration_spec(port_spec, docker_bridge_ip)<EOL>log_to_client(\"<STR_LIT>\")<EOL>make_up_command_files(assembled_spec, port_spec)<EOL>log_to_client(\"<STR_LIT>\")<EOL>compose_config = compose_compiler.get_compose_dict(assembled_spec, port_spec)<EOL>log_to_client(\"<STR_LIT>\")<EOL>hosts.update_hosts_file_from_port_spec(port_spec)<EOL>log_to_client(\"<STR_LIT>\")<EOL>nfs.configure_nfs()<EOL>log_to_client(\"<STR_LIT>\")<EOL>nginx.update_nginx_from_config(nginx_config)<EOL>log_to_client(\"<STR_LIT>\")<EOL>compose.update_running_containers_from_spec(compose_config, recreate_containers=recreate_containers)<EOL>log_to_client(\"<STR_LIT>\")<EOL>", "docstring": "This command will use the compilers to get compose specs\n    will pass those specs to the systems that need them. Those\n    systems will in turn launch the services needed to make the\n    local environment go.", "id": "f3172:m2"}
{"signature": "@daemon_command<EOL>def restart_apps_or_services(app_or_service_names=None):", "body": "if app_or_service_names:<EOL><INDENT>log_to_client(\"<STR_LIT>\".format('<STR_LIT:U+002CU+0020>'.join(app_or_service_names)))<EOL><DEDENT>else:<EOL><INDENT>log_to_client(\"<STR_LIT>\")<EOL><DEDENT>if app_or_service_names:<EOL><INDENT>specs = spec_assembler.get_assembled_specs()<EOL>specs_list = [specs['<STR_LIT>'][app_name] for app_name in app_or_service_names if app_name in specs['<STR_LIT>']]<EOL>repos = set()<EOL>for spec in specs_list:<EOL><INDENT>if spec['<STR_LIT>']:<EOL><INDENT>repos = repos.union(spec_assembler.get_same_container_repos_from_spec(spec))<EOL><DEDENT><DEDENT>nfs.update_nfs_with_repos(repos)<EOL><DEDENT>else:<EOL><INDENT>nfs.update_nfs_with_repos(spec_assembler.get_all_repos(active_only=True, include_specs_repo=False))<EOL><DEDENT>compose.restart_running_services(app_or_service_names)<EOL>", "docstring": "Restart any containers associated with Dusty, or associated with\n    the provided app_or_service_names.", "id": "f3172:m4"}
{"signature": "@daemon_command<EOL>def prep_for_start_local_env(pull_repos):", "body": "if pull_repos:<EOL><INDENT>update_managed_repos(force=True)<EOL><DEDENT>assembled_spec = spec_assembler.get_assembled_specs()<EOL>if not assembled_spec[constants.CONFIG_BUNDLES_KEY]:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>virtualbox.initialize_docker_vm()<EOL>", "docstring": "Daemon-side command to ensure we're running the latest\n    versions of any managed repos, including the\n    specs repo, before we do anything else in the up flow.", "id": "f3172:m0"}
{"signature": "def _env_vars_from_file(filename):", "body": "def split_env(env):<EOL><INDENT>if '<STR_LIT:=>' in env:<EOL><INDENT>return env.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>return env, None<EOL><DEDENT><DEDENT>env = {}<EOL>for line in open(filename, '<STR_LIT:r>'):<EOL><INDENT>line = line.strip()<EOL>if line and not line.startswith('<STR_LIT:#>'):<EOL><INDENT>k, v = split_env(line)<EOL>env[k] = v<EOL><DEDENT><DEDENT>return env<EOL>", "docstring": "This code is copied from Docker Compose, so that we're exactly compatible\nwith their `env_file` option", "id": "f3174:m4"}
{"signature": "def validate_specs_from_path(specs_path):", "body": "<EOL>log_to_client(\"<STR_LIT>\".format(specs_path))<EOL>if not os.path.exists(specs_path):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\".format(specs_path))<EOL><DEDENT>specs = get_specs_from_path(specs_path)<EOL>_check_bare_minimum(specs)<EOL>_validate_spec_names(specs)<EOL>_validate_cycle_free(specs)<EOL>log_to_client(\"<STR_LIT>\")<EOL>", "docstring": "Validates Dusty specs at the given path. The following checks are performed:\n    -That the given path exists\n    -That there are bundles in the given path\n    -That the fields in the specs match those allowed in our schemas\n    -That references to apps, libs, and services point at defined specs\n    -That there are no cycles in app and lib dependencies", "id": "f3176:m9"}
{"signature": "@daemon_command<EOL>def copy_between_containers(source_name, source_path, dest_name, dest_path):", "body": "if not container_path_exists(source_name, source_path):<EOL><INDENT>raise RuntimeError('<STR_LIT>'.format(source_path, source_name))<EOL><DEDENT>temp_path = os.path.join(tempfile.mkdtemp(), str(uuid.uuid1()))<EOL>with _cleanup_path(temp_path):<EOL><INDENT>copy_to_local(temp_path, source_name, source_path, demote=False)<EOL>copy_from_local(temp_path, dest_name, dest_path, demote=False)<EOL><DEDENT>", "docstring": "Copy a file from the source container to an intermediate staging\n    area on the local filesystem, then from that staging area to the\n    destination container.\n\n    These moves take place without demotion for two reasons:\n      1. There should be no permissions vulnerabilities with copying\n         between containers because it is assumed the non-privileged\n         user has full access to all Dusty containers.\n      2. The temp dir created by mkdtemp is owned by the owner of the\n         Dusty daemon process, so if we demoted our moves to/from that location\n         they would encounter permission errors.", "id": "f3179:m1"}
{"signature": "def update_local_repo_async(self, task_queue, force=False):", "body": "self.ensure_local_repo()<EOL>task_queue.enqueue_task(self.update_local_repo, force=force)<EOL>", "docstring": "Local repo updating suitable for asynchronous, parallel execution.\n        We still need to run `ensure_local_repo` synchronously because it\n        does a bunch of non-threadsafe filesystem operations.", "id": "f3182:c0:m19"}
{"signature": "def init_yaml_constructor():", "body": "def utf_encoding_string_constructor(loader, node):<EOL><INDENT>return loader.construct_scalar(node).encode('<STR_LIT:utf-8>')<EOL><DEDENT>yaml.SafeLoader.add_constructor(u'<STR_LIT>', utf_encoding_string_constructor)<EOL>", "docstring": "This dark magic is used to make yaml.safe_load encode all strings as utf-8,\nwhere otherwise python unicode strings would be returned for non-ascii chars", "id": "f3207:m3"}
{"signature": "def _increase_file_handle_limit():", "body": "logging.info('<STR_LIT>'.format(constants.FILE_HANDLE_LIMIT))<EOL>resource.setrlimit(resource.RLIMIT_NOFILE,<EOL>(constants.FILE_HANDLE_LIMIT, resource.RLIM_INFINITY))<EOL>", "docstring": "Raise the open file handles permitted by the Dusty daemon process\n    and its child processes. The number we choose here needs to be within\n    the OS X default kernel hard limit, which is 10240.", "id": "f3208:m7"}
{"signature": "def _start_http_server():", "body": "logging.info('<STR_LIT>'.format(constants.DAEMON_HTTP_BIND_IP,<EOL>constants.DAEMON_HTTP_BIND_PORT))<EOL>thread = threading.Thread(target=http_server.app.run, args=(constants.DAEMON_HTTP_BIND_IP,<EOL>constants.DAEMON_HTTP_BIND_PORT))<EOL>thread.daemon = True<EOL>thread.start()<EOL>", "docstring": "Start the daemon's HTTP server on a separate thread.\n    This server is only used for servicing container status\n    requests from Dusty's custom 502 page.", "id": "f3208:m9"}
{"signature": "@app.route('<STR_LIT>', methods=['<STR_LIT:GET>'])<EOL>def consume(consumer_id):", "body": "global _consumers<EOL>consumer = _consumers[consumer_id]<EOL>client = get_docker_client()<EOL>try:<EOL><INDENT>status = client.inspect_container(consumer.container_id)['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>except Exception as e:<EOL><INDENT>status = '<STR_LIT>'<EOL><DEDENT>new_logs = client.logs(consumer.container_id,<EOL>stdout=True,<EOL>stderr=True,<EOL>stream=False,<EOL>timestamps=False,<EOL>since=calendar.timegm(consumer.offset.timetuple()))<EOL>updated_consumer = Consumer(consumer.container_id, datetime.utcnow())<EOL>_consumers[str(consumer_id)] = updated_consumer<EOL>response = jsonify({'<STR_LIT>': new_logs, '<STR_LIT:status>': status})<EOL>response.headers['<STR_LIT>'] = '<STR_LIT:*>'<EOL>response.headers['<STR_LIT>'] = '<STR_LIT>'<EOL>return response<EOL>", "docstring": "Given an existing consumer ID, return any new lines from the\n    log since the last time the consumer was consumed.", "id": "f3209:m3"}
{"signature": "@app.route('<STR_LIT>', methods=['<STR_LIT:POST>'])<EOL>def register_consumer():", "body": "global _consumers<EOL>hostname, port = request.form['<STR_LIT>'], request.form['<STR_LIT:port>']<EOL>app_name = _app_name_from_forwarding_info(hostname, port)<EOL>containers = get_dusty_containers([app_name], include_exited=True)<EOL>if not containers:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(app_name))<EOL><DEDENT>container = containers[<NUM_LIT:0>]<EOL>new_id = uuid1()<EOL>new_consumer = Consumer(container['<STR_LIT>'], datetime.utcnow())<EOL>_consumers[str(new_id)] = new_consumer<EOL>response = jsonify({'<STR_LIT>': app_name, '<STR_LIT>': new_id})<EOL>response.headers['<STR_LIT>'] = '<STR_LIT:*>'<EOL>response.headers['<STR_LIT>'] = '<STR_LIT>'<EOL>return response<EOL>", "docstring": "Given a hostname and port attempting to be accessed,\n    return a unique consumer ID for accessing logs from\n    the referenced container.", "id": "f3209:m2"}
{"signature": "def __getitem__(self, index):", "body": "return self._row[index]<EOL>", "docstring": "Retrieve the row at index.", "id": "f3227:c0:m5"}
{"signature": "def __setitem__(self, index, value):", "body": "if not isinstance(value, dict):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>for val in value.values():<EOL><INDENT>self._detect_or_validate(val)<EOL><DEDENT>self._row[index] = value<EOL>", "docstring": "Replace the row at index.", "id": "f3227:c0:m7"}
{"signature": "def __repr__(self): <EOL>", "body": "parts = [u'<STR_LIT>' % self.ver_str]<EOL>if bool(self.metadata):<EOL><INDENT>parts.append(u'<STR_LIT>' % self.metadata)<EOL><DEDENT>column_meta = []<EOL>for col, col_meta in self.column.items():<EOL><INDENT>if bool(col_meta):<EOL><INDENT>column_meta.append(u'<STR_LIT>' % (col, col_meta))<EOL><DEDENT>else:<EOL><INDENT>column_meta.append(u'<STR_LIT>' % col)<EOL><DEDENT><DEDENT>if bool(column_meta):<EOL><INDENT>parts.append(u'<STR_LIT>' % '<STR_LIT:\\n>'.join(column_meta))<EOL><DEDENT>elif len(self.column):<EOL><INDENT>parts.append(u'<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join(self.column.keys()))<EOL><DEDENT>else:<EOL><INDENT>parts.append(u'<STR_LIT>')<EOL><DEDENT>if bool(self):<EOL><INDENT>parts.extend([<EOL>u'<STR_LIT>' % (row, u'<STR_LIT>'.join([<EOL>((u'<STR_LIT>' % (col, data[col]))if col in data else(u'<STR_LIT>' % col)) for colin self.column.keys()]))<EOL>for (row, data) in enumerate(self)<EOL>])<EOL><DEDENT>else:<EOL><INDENT>parts.append(u'<STR_LIT>')<EOL><DEDENT>class_name = self.__class__.__name__<EOL>return u'<STR_LIT>' % (<EOL>class_name, u'<STR_LIT:\\n>'.join(parts), class_name<EOL>)<EOL>", "docstring": "Return a representation of this grid.", "id": "f3227:c0:m4"}
{"signature": "def pop_at(self, index):", "body": "return self.pop(self.at(index))<EOL>", "docstring": "Remove the key at the given index and return its value.", "id": "f3230:c0:m13"}
{"signature": "def at(self, index):", "body": "return self._order[index]<EOL>", "docstring": "Return the key at the given index.", "id": "f3230:c0:m8"}
{"signature": "def add_item(self, key, value, after=False, index=None, pos_key=None,<EOL>replace=True):", "body": "if self._validate_fn:<EOL><INDENT>self._validate_fn(value)<EOL><DEDENT>if (index is not None) and (pos_key is not None):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif pos_key is not None:<EOL><INDENT>try:<EOL><INDENT>index = self.index(pos_key)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise KeyError('<STR_LIT>' % pos_key)<EOL><DEDENT><DEDENT>if after and (index is not None):<EOL><INDENT>index += <NUM_LIT:1><EOL><DEDENT>if key in self._values:<EOL><INDENT>if not replace:<EOL><INDENT>raise KeyError('<STR_LIT>' % key)<EOL><DEDENT>if index is not None:<EOL><INDENT>del self[key]<EOL><DEDENT>else:<EOL><INDENT>self._values[key] = value<EOL>return<EOL><DEDENT><DEDENT>if index is not None:<EOL><INDENT>self._order.insert(index, key)<EOL><DEDENT>else:<EOL><INDENT>self._order.append(key)<EOL><DEDENT>self._values[key] = value<EOL>", "docstring": "Add an item at a specific location, possibly replacing the\nexisting item.\n\nIf after is True, we insert *after* the given index, otherwise we\ninsert before.\n\nThe position is specified using either index or pos_key, the former\nspecifies the position from the start of the array (base 0).  pos_key\nspecifies the name of another key, and positions the new key relative\nto that key.\n\nWhen replacing, the position will be left un-changed unless a location\nis specified explicitly.", "id": "f3230:c0:m7"}
{"signature": "def define_haystack_units():", "body": "ureg = UnitRegistry()<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>ureg.define('<STR_LIT>')<EOL>return ureg<EOL>", "docstring": "Missing units found in project-haystack\nAdded to the registry", "id": "f3234:m2"}
{"signature": "def dump_grid(grid):", "body": "header = '<STR_LIT>' % dump_str(str(grid._version), version=grid._version)<EOL>if bool(grid.metadata):<EOL><INDENT>header += '<STR_LIT:U+0020>' + dump_meta(grid.metadata, version=grid._version)<EOL><DEDENT>columns = dump_columns(grid.column, version=grid._version)<EOL>rows = dump_rows(grid)<EOL>return '<STR_LIT:\\n>'.join([header, columns] + rows + ['<STR_LIT>'])<EOL>", "docstring": "Dump a single grid to its ZINC representation.", "id": "f3236:m2"}
{"signature": "def parse_scalar(scalar_data, version):", "body": "try:<EOL><INDENT>return hs_scalar[version].parseString(scalar_data, parseAll=True)[<NUM_LIT:0>]<EOL><DEDENT>except pp.ParseException as pe:<EOL><INDENT>raise ZincParseException(<EOL>'<STR_LIT>' % reformat_exception(pe),<EOL>scalar_data, <NUM_LIT:1>, pe.col)<EOL><DEDENT>except:<EOL><INDENT>LOG.debug('<STR_LIT>',<EOL>scalar_data, version)<EOL><DEDENT>", "docstring": "Parse a Project Haystack scalar in ZINC format.", "id": "f3237:m7"}
{"signature": "def _unescape(s, uri=False):", "body": "out = '<STR_LIT>'<EOL>while len(s) > <NUM_LIT:0>:<EOL><INDENT>c = s[<NUM_LIT:0>]<EOL>if c == '<STR_LIT:\\\\>':<EOL><INDENT>esc_c = s[<NUM_LIT:1>]<EOL>if esc_c in ('<STR_LIT:u>', '<STR_LIT>'):<EOL><INDENT>out += six.unichr(int(s[<NUM_LIT:2>:<NUM_LIT:6>], base=<NUM_LIT:16>))<EOL>s = s[<NUM_LIT:6>:]<EOL>continue<EOL><DEDENT>else:<EOL><INDENT>if esc_c == '<STR_LIT:b>':<EOL><INDENT>out += '<STR_LIT>'<EOL><DEDENT>elif esc_c == '<STR_LIT:f>':<EOL><INDENT>out += '<STR_LIT>'<EOL><DEDENT>elif esc_c == '<STR_LIT:n>':<EOL><INDENT>out += '<STR_LIT:\\n>'<EOL><DEDENT>elif esc_c == '<STR_LIT:r>':<EOL><INDENT>out += '<STR_LIT:\\r>'<EOL><DEDENT>elif esc_c == '<STR_LIT:t>':<EOL><INDENT>out += '<STR_LIT:\\t>'<EOL><DEDENT>else:<EOL><INDENT>if uri and (esc_c == '<STR_LIT:#>'):<EOL><INDENT>out += '<STR_LIT:\\\\>'<EOL><DEDENT>out += esc_c<EOL><DEDENT>s = s[<NUM_LIT:2>:]<EOL>continue<EOL><DEDENT><DEDENT>else:<EOL><INDENT>out += c<EOL>s = s[<NUM_LIT:1>:]<EOL><DEDENT><DEDENT>return out<EOL>", "docstring": "Iterative parser for string escapes.", "id": "f3237:m2"}
{"signature": "def parse_grid(grid_data):", "body": "try:<EOL><INDENT>grid_parts = NEWLINE_RE.split(grid_data)<EOL>if len(grid_parts) < <NUM_LIT:2>:<EOL><INDENT>raise ZincParseException('<STR_LIT>',<EOL>grid_data, <NUM_LIT:1>, <NUM_LIT:1>)<EOL><DEDENT>grid_meta_str = grid_parts.pop(<NUM_LIT:0>)<EOL>col_meta_str = grid_parts.pop(<NUM_LIT:0>)<EOL>ver_match = VERSION_RE.match(grid_meta_str)<EOL>if ver_match is None:<EOL><INDENT>raise ZincParseException(<EOL>'<STR_LIT>' % grid_meta_str,<EOL>grid_data, <NUM_LIT:1>, <NUM_LIT:1>)<EOL><DEDENT>version = Version(ver_match.group(<NUM_LIT:1>))<EOL>try:<EOL><INDENT>grid_meta = hs_gridMeta[version].parseString(grid_meta_str, parseAll=True)[<NUM_LIT:0>]<EOL><DEDENT>except pp.ParseException as pe:<EOL><INDENT>raise ZincParseException(<EOL>'<STR_LIT>' % pe,<EOL>grid_data, <NUM_LIT:1>, pe.col)<EOL><DEDENT>except: <EOL><INDENT>LOG.debug('<STR_LIT>', grid_meta_str)<EOL>raise<EOL><DEDENT>try:<EOL><INDENT>col_meta = hs_cols[version].parseString(col_meta_str, parseAll=True)[<NUM_LIT:0>]<EOL><DEDENT>except pp.ParseException as pe:<EOL><INDENT>raise ZincParseException(<EOL>'<STR_LIT>'% reformat_exception(pe, <NUM_LIT:2>),<EOL>grid_data, <NUM_LIT:2>, pe.col)<EOL><DEDENT>except: <EOL><INDENT>LOG.debug('<STR_LIT>', col_meta_str)<EOL>raise<EOL><DEDENT>row_grammar = hs_row[version]<EOL>def _parse_row(row_num_and_data):<EOL><INDENT>(row_num, row) = row_num_and_data<EOL>line_num = row_num + <NUM_LIT:3><EOL>try:<EOL><INDENT>return dict(zip(col_meta.keys(),<EOL>row_grammar.parseString(row, parseAll=True)[<NUM_LIT:0>].asList()))<EOL><DEDENT>except pp.ParseException as pe:<EOL><INDENT>raise ZincParseException(<EOL>'<STR_LIT>'% reformat_exception(pe, line_num),<EOL>grid_data, line_num, pe.col)<EOL><DEDENT>except: <EOL><INDENT>LOG.debug('<STR_LIT>', row)<EOL>raise<EOL><DEDENT><DEDENT>g = Grid(version=grid_meta.pop('<STR_LIT>'),<EOL>metadata=grid_meta,<EOL>columns=list(col_meta.items()))<EOL>g.extend(map(_parse_row, filter(lambda gp : bool(gp[<NUM_LIT:1>]), enumerate(grid_parts))))<EOL>return g<EOL><DEDENT>except:<EOL><INDENT>LOG.debug('<STR_LIT>', grid_data)<EOL>raise<EOL><DEDENT>", "docstring": "Parse the incoming grid.", "id": "f3237:m6"}
{"signature": "def extend(self, items, replace=True):", "body": "if isinstance(items, dict) or isinstance(items, SortableDict):<EOL><INDENT>items = list(items.items())<EOL><DEDENT>for (key, value) in items:<EOL><INDENT>self.append(key, value, replace=replace)<EOL><DEDENT>", "docstring": "Append the items to the metadata.", "id": "f3238:c0:m1"}
{"signature": "def is_valid_ip(ip_address):", "body": "try:<EOL><INDENT>ip = ipaddress.ip_address(u'<STR_LIT>' + ip_address)<EOL>return True<EOL><DEDENT>except ValueError as e:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Check Validity of an IP address", "id": "f3244:m0"}
{"signature": "def _lstat(self, path):", "body": "if path not in self.entries:<EOL><INDENT>return OverlayStat(*self.originals['<STR_LIT>'](path)[:<NUM_LIT:10>], st_overlay=<NUM_LIT:0>)<EOL><DEDENT>return self.entries[path].stat<EOL>", "docstring": "IMPORTANT: expects `path`'s parent to already be deref()'erenced.", "id": "f3252:c4:m15"}
{"signature": "def __init__(self, install=False, passthru=None):", "body": "self.entries    = {}<EOL>self._installed = False<EOL>self.impostors  = dict()<EOL>self.originals  = dict()<EOL>self.vaporized  = None<EOL>self.fds        = dict()<EOL>self.passthru   = passthru or []<EOL>if self.passthru:<EOL><INDENT>if not morph.isseq(self.passthru):<EOL><INDENT>self.passthru = [self.passthru]<EOL><DEDENT>self.passthru = [re.compile(expr) if morph.isstr(expr) else expr<EOL>for expr in self.passthru]<EOL><DEDENT>self._makeImpostors()<EOL>if install:<EOL><INDENT>self.install()<EOL><DEDENT>", "docstring": ":Parameters:\n\ninstall : bool, optional, default: false\n\n  Flag indicating whether or not this overlay should be\n  installed upon instantiation.\n\npassthru : list({str, regex}), optional, default: none\n\n  A regular expression (or list thereof) that will be matched\n  against any file that is operated on; if it matches, no overlay\n  will be applied, i.e. this list excludes a set of files. The\n  specified regexes can be either strings or re.RegexObject\n  instances. Note that these regexes will be given only the\n  fully-dereferenced paths to be tested.", "id": "f3252:c4:m0"}
{"signature": "def _lexists(self, path):", "body": "try:<EOL><INDENT>return bool(self._lstat(path))<EOL><DEDENT>except os.error:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "IMPORTANT: expects `path` to already be deref()'erenced.", "id": "f3252:c4:m20"}
{"signature": "def fso_readlink(self, path):", "body": "path = self.deref(path, to_parent=True)<EOL>st = self.fso_lstat(path)<EOL>if not stat.S_ISLNK(st.st_mode):<EOL><INDENT>raise OSError(<NUM_LIT>, '<STR_LIT>', path)<EOL><DEDENT>if st.st_overlay:<EOL><INDENT>return self.entries[path].content<EOL><DEDENT>return self.originals['<STR_LIT>'](path)<EOL>", "docstring": "overlays os.readlink()", "id": "f3252:c4:m28"}
{"signature": "def fso_stat(self, path):", "body": "return self.fso_anystat(path, link=False)<EOL>", "docstring": "overlays os.stat()", "id": "f3252:c4:m18"}
{"signature": "def fso_remove(self, path):", "body": "return self.fso_unlink(path)<EOL>", "docstring": "overlays os.remove()", "id": "f3252:c4:m31"}
{"signature": "def _stat(self, path):", "body": "if path not in self.entries:<EOL><INDENT>return OverlayStat(*self.originals['<STR_LIT>'](path)[:<NUM_LIT:10>], st_overlay=<NUM_LIT:0>)<EOL><DEDENT>st = self.entries[path].stat<EOL>if stat.S_ISLNK(st.st_mode):<EOL><INDENT>return self._stat(self.deref(path))<EOL><DEDENT>return st<EOL>", "docstring": "IMPORTANT: expects `path`'s parent to already be deref()'erenced.", "id": "f3252:c4:m14"}
{"signature": "def fso_mkdir(self, path, mode=None):", "body": "path = self.deref(path, to_parent=True)<EOL>if self._lexists(path):<EOL><INDENT>raise OSError(<NUM_LIT>, '<STR_LIT>', path)<EOL><DEDENT>self._addentry(OverlayEntry(self, path, stat.S_IFDIR))<EOL>", "docstring": "overlays os.mkdir()", "id": "f3252:c4:m25"}
{"signature": "def no_ansi(text):", "body": "return re.sub(r\"<STR_LIT>\", \"<STR_LIT>\", text)<EOL>", "docstring": "Kill any ANSI escape sequences.", "id": "f3261:m0"}
{"signature": "def assert_sets_equal(s1, s2):", "body": "assert list(sorted(s1)) == list(sorted(s2))<EOL>", "docstring": "Helper to compare sets.", "id": "f3270:m1"}
{"signature": "def load():", "body": "cfg = Bunch(DEFAULTS)<EOL>cfg.project_root = get_project_root()<EOL>if not cfg.project_root:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>cfg.rootjoin = lambda *names: os.path.join(cfg.project_root, *names)<EOL>cfg.srcjoin = lambda *names: cfg.rootjoin(cfg.srcdir, *names)<EOL>cfg.testjoin = lambda *names: cfg.rootjoin(cfg.testdir, *names)<EOL>cfg.cwd = os.getcwd()<EOL>os.chdir(cfg.project_root)<EOL>if cfg.project_root not in sys.path:<EOL><INDENT>sys.path.append(cfg.project_root)<EOL><DEDENT>try:<EOL><INDENT>from setup import project <EOL><DEDENT>except ImportError:<EOL><INDENT>from setup import setup_args as project <EOL><DEDENT>cfg.project = Bunch(project)<EOL>return cfg<EOL>", "docstring": "Load and return configuration as a ``Bunch``.\n\n        Values are based on ``DEFAULTS``, and metadata from ``setup.py``.", "id": "f3273:m1"}
{"signature": "def set_flat_layout():", "body": "DEFAULTS.update(<EOL>srcdir = '<STR_LIT:.>',<EOL>testdir = '<STR_LIT>',<EOL>)<EOL>", "docstring": "Switch default project layout to everything top-level.", "id": "f3273:m3"}
{"signature": "def get_project_root():", "body": "try:<EOL><INDENT>tasks_py = sys.modules['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return os.path.abspath(os.path.dirname(tasks_py.__file__))<EOL><DEDENT>", "docstring": "Determine location of `tasks.py`.", "id": "f3273:m0"}
{"signature": "@task(help={<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>})<EOL>def confluence(ctx, no_publish=False, clean=False, opts='<STR_LIT>'):", "body": "cfg = config.load()<EOL>if clean:<EOL><INDENT>ctx.run(\"<STR_LIT>\")<EOL><DEDENT>cmd = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>cmd.extend(['<STR_LIT>', '<STR_LIT>'])  <EOL>if opts:<EOL><INDENT>cmd.append(opts)<EOL><DEDENT>cmd.extend(['<STR_LIT:.>', ctx.rituals.docs.build + '<STR_LIT>'])<EOL>if no_publish:<EOL><INDENT>cmd.extend(['<STR_LIT>'])<EOL><DEDENT>notify.info(\"<STR_LIT>\")<EOL>with pushd(ctx.rituals.docs.sources):<EOL><INDENT>ctx.run('<STR_LIT:U+0020>'.join(cmd), pty=True)<EOL><DEDENT>", "docstring": "Build Sphinx docs and publish to Confluence.", "id": "f3275:m3"}
{"signature": "def watchdogctl(ctx, kill=False, verbose=True):", "body": "tries = <NUM_LIT> if kill else <NUM_LIT:0><EOL>cmd = '<STR_LIT>'.format(ctx.rituals.docs.watchdog.port)<EOL>pidno = <NUM_LIT:0><EOL>pidinfo = capture(cmd, ignore_failures=True)<EOL>while pidinfo:<EOL><INDENT>pidline = next(filter(None, [re.match(r'<STR_LIT>', x) for x in pidinfo.splitlines()]))<EOL>if not pidline:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(pidinfo))<EOL><DEDENT>pidno = int(pidline.group(<NUM_LIT:1>), <NUM_LIT:10>)<EOL>if verbose:<EOL><INDENT>ctx.run(\"<STR_LIT>\".format(pidno), echo=False)<EOL>verbose = False<EOL><DEDENT>tries -= <NUM_LIT:1><EOL>if tries <= <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>os.kill(pidno, <NUM_LIT:0>)<EOL><DEDENT>except OSError as exc:  <EOL><INDENT>if exc.errno == <NUM_LIT:3>:<EOL><INDENT>break<EOL><DEDENT>raise<EOL><DEDENT>else:<EOL><INDENT>notify.info(\"<STR_LIT>\".format(pidno))<EOL>ctx.run(\"<STR_LIT>\".format(pidno), echo=False)<EOL>time.sleep(<NUM_LIT>)<EOL><DEDENT><DEDENT>pid = capture(cmd, ignore_failures=True)<EOL><DEDENT>return pidno<EOL>", "docstring": "Control / check a running Sphinx autobuild process.", "id": "f3275:m1"}
{"signature": "def get_pypi_auth(configfile='<STR_LIT>'):", "body": "pypi_cfg = ConfigParser()<EOL>if pypi_cfg.read(os.path.expanduser(configfile)):<EOL><INDENT>try:<EOL><INDENT>user = pypi_cfg.get('<STR_LIT>', '<STR_LIT:username>')<EOL>pwd = pypi_cfg.get('<STR_LIT>', '<STR_LIT:password>')<EOL>return user, pwd<EOL><DEDENT>except ConfigError:<EOL><INDENT>notify.warning(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(configfile))<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Read auth from pip config.", "id": "f3275:m0"}
{"signature": "@task(help=dict(<EOL>docs=\"<STR_LIT>\",<EOL>))<EOL>def build(ctx, docs=False):", "body": "cfg = config.load()<EOL>ctx.run(\"<STR_LIT>\")<EOL>if docs:<EOL><INDENT>for doc_path in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if os.path.exists(cfg.rootjoin(doc_path, '<STR_LIT>')):<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>doc_path = None<EOL><DEDENT>if doc_path:<EOL><INDENT>ctx.run(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>notify.warning(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Build the project.", "id": "f3277:m2"}
{"signature": "@task(help=dict(<EOL>docs=\"<STR_LIT>\",<EOL>backups=\"<STR_LIT>\",<EOL>bytecode=\"<STR_LIT>\",<EOL>dist=\"<STR_LIT>\",<EOL>all=\"<STR_LIT>\",<EOL>venv=\"<STR_LIT>\",<EOL>tox=\"<STR_LIT>\",<EOL>extra=\"<STR_LIT>\",<EOL>))<EOL>def clean(_dummy_ctx, docs=False, backups=False, bytecode=False, dist=False, <EOL>all=False, venv=False, tox=False, extra='<STR_LIT>'): ", "body": "cfg = config.load()<EOL>notify.banner(\"<STR_LIT>\")<EOL>venv_dirs = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>patterns = ['<STR_LIT>', '<STR_LIT>']<EOL>excludes = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>if docs or all:<EOL><INDENT>patterns.extend(['<STR_LIT>', '<STR_LIT>'])<EOL><DEDENT>if dist or all:<EOL><INDENT>patterns.append('<STR_LIT>')<EOL><DEDENT>if backups or all:<EOL><INDENT>patterns.extend(['<STR_LIT>'])<EOL><DEDENT>if bytecode or all:<EOL><INDENT>patterns.extend([<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>cfg.srcjoin('<STR_LIT>')[len(cfg.project_root)+<NUM_LIT:1>:],<EOL>])<EOL><DEDENT>if venv:<EOL><INDENT>patterns.extend([i + '<STR_LIT:/>' for i in venv_dirs])<EOL><DEDENT>if tox:<EOL><INDENT>patterns.append('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>excludes.append('<STR_LIT>')<EOL><DEDENT>if extra:<EOL><INDENT>patterns.extend(shlex.split(extra))<EOL><DEDENT>patterns = [antglob.includes(i) for i in patterns] + [antglob.excludes(i) for i in excludes]<EOL>if not venv:<EOL><INDENT>patterns.extend([antglob.excludes(i + '<STR_LIT:/>') for i in venv_dirs])<EOL><DEDENT>fileset = antglob.FileSet(cfg.project_root, patterns)<EOL>for name in fileset:<EOL><INDENT>notify.info('<STR_LIT>'.format(name))<EOL>if name.endswith('<STR_LIT:/>'):<EOL><INDENT>shutil.rmtree(os.path.join(cfg.project_root, name))<EOL><DEDENT>else:<EOL><INDENT>os.unlink(os.path.join(cfg.project_root, name))<EOL><DEDENT><DEDENT>", "docstring": "Perform house-keeping.", "id": "f3277:m1"}
{"signature": "@task(help=dict(<EOL>local=\"<STR_LIT>\",<EOL>))<EOL>def freeze(ctx, local=False):", "body": "cmd = '<STR_LIT>'.format('<STR_LIT>' if local else '<STR_LIT>')<EOL>frozen = ctx.run(cmd, hide='<STR_LIT>').stdout.replace('<STR_LIT>', '<STR_LIT:#>')<EOL>with io.open('<STR_LIT>', '<STR_LIT:w>', encoding='<STR_LIT:ascii>') as out:<EOL><INDENT>out.write(\"<STR_LIT>\".format(isodate()))<EOL>out.write(frozen)<EOL><DEDENT>notify.info(\"<STR_LIT>\".format(len(frozen.splitlines()),))<EOL>", "docstring": "Freeze currently installed requirements.", "id": "f3277:m3"}
{"signature": "@task(help={<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>})<EOL>def tox(ctx, verbose=False, clean=False, env_list='<STR_LIT>', opts='<STR_LIT>'):", "body": "cfg = config.load()<EOL>add_dir2pypath(cfg.project_root)<EOL>snakepits = ctx.rituals.snakepits.split(os.pathsep)<EOL>cmd = []<EOL>snakepits = [i for i in snakepits if os.path.isdir(i)]<EOL>if snakepits:<EOL><INDENT>cmd += ['<STR_LIT>'.format(os.pathsep.join(snakepits),)]<EOL><DEDENT>if clean and os.path.exists(cfg.rootjoin('<STR_LIT>')):<EOL><INDENT>shutil.rmtree(cfg.rootjoin('<STR_LIT>'))<EOL><DEDENT>cmd += ['<STR_LIT>']<EOL>if verbose:<EOL><INDENT>cmd += ['<STR_LIT>']<EOL><DEDENT>if env_list:<EOL><INDENT>cmd += ['<STR_LIT>', env_list]<EOL><DEDENT>cmd += opts<EOL>ctx.run('<STR_LIT:U+0020>'.join(cmd))<EOL>", "docstring": "Perform multi-environment tests.", "id": "f3278:m1"}
{"signature": "@task(help=dict(<EOL>verbose=\"<STR_LIT>\",<EOL>pypi=\"<STR_LIT>\",<EOL>))<EOL>def bump(ctx, verbose=False, pypi=False):", "body": "cfg = config.load()<EOL>scm = scm_provider(cfg.project_root, commit=False, ctx=ctx)<EOL>if not scm.workdir_is_clean():<EOL><INDENT>notify.warning(\"<STR_LIT>\")<EOL><DEDENT>pep440 = scm.pep440_dev_version(verbose=verbose, non_local=pypi)<EOL>setup_cfg = cfg.rootjoin('<STR_LIT>')<EOL>if not pep440:<EOL><INDENT>notify.info(\"<STR_LIT>\")<EOL><DEDENT>elif os.path.exists(setup_cfg):<EOL><INDENT>with io.open(setup_cfg, encoding='<STR_LIT:utf-8>') as handle:<EOL><INDENT>data = handle.readlines()<EOL><DEDENT>changed = False<EOL>for i, line in enumerate(data):<EOL><INDENT>if re.match(r\"<STR_LIT>\", line):<EOL><INDENT>verb, _ = data[i].split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>data[i] = '<STR_LIT>'.format(verb, pep440)<EOL>changed = True<EOL><DEDENT><DEDENT>if changed:<EOL><INDENT>notify.info(\"<STR_LIT>\")<EOL>with io.open(setup_cfg, '<STR_LIT:w>', encoding='<STR_LIT:utf-8>') as handle:<EOL><INDENT>handle.write('<STR_LIT>'.join(data))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>notify.warning(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>notify.warning(\"<STR_LIT>\")<EOL><DEDENT>if os.path.exists(setup_cfg):<EOL><INDENT>egg_info = shell.capture(\"<STR_LIT>\", echo=True if verbose else None)<EOL>for line in egg_info.splitlines():<EOL><INDENT>if line.endswith('<STR_LIT>'):<EOL><INDENT>pkg_info_file = line.split(None, <NUM_LIT:1>)[<NUM_LIT:1>]<EOL>with io.open(pkg_info_file, encoding='<STR_LIT:utf-8>') as handle:<EOL><INDENT>notify.info('<STR_LIT:\\n>'.join(i for i in handle.readlines() if i.startswith('<STR_LIT>')).strip())<EOL><DEDENT><DEDENT><DEDENT>ctx.run(\"<STR_LIT>\", echo=True if verbose else None)<EOL><DEDENT>", "docstring": "Bump a development version.", "id": "f3283:m1"}
{"signature": "@task(help=dict(<EOL>pyrun=\"<STR_LIT>\",<EOL>upload=\"<STR_LIT>\",<EOL>opts=\"<STR_LIT>\",<EOL>))<EOL>def pex(ctx, pyrun='<STR_LIT>', upload=False, opts='<STR_LIT>'):", "body": "cfg = config.load()<EOL>ctx.run(\"<STR_LIT>\")<EOL>pkg_info = get_egg_info(cfg)<EOL>version = pkg_info.version if pkg_info else cfg.project.version<EOL>pex_files = []<EOL>for script in cfg.project.entry_points['<STR_LIT>']:<EOL><INDENT>script, entry_point = script.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>script, entry_point = script.strip(), entry_point.strip()<EOL>pex_file = cfg.rootjoin('<STR_LIT>', '<STR_LIT>'.format(script, version))<EOL>cmd = ['<STR_LIT>', '<STR_LIT>', cfg.rootjoin('<STR_LIT>'), cfg.project_root, '<STR_LIT:-c>', script, '<STR_LIT>', pex_file]<EOL>if opts:<EOL><INDENT>cmd.append(opts)<EOL><DEDENT>ctx.run('<STR_LIT:U+0020>'.join(cmd))<EOL>non_universal = set()<EOL>with closing(zipfile.ZipFile(pex_file, mode=\"<STR_LIT:r>\")) as pex_contents:<EOL><INDENT>for pex_name in pex_contents.namelist():  <EOL><INDENT>if pex_name.endswith('<STR_LIT>') and '<STR_LIT>' not in pex_name:<EOL><INDENT>non_universal.add(pex_name.split('<STR_LIT>')[<NUM_LIT:0>].split('<STR_LIT:/>')[-<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT>if non_universal:<EOL><INDENT>notify.warning(\"<STR_LIT>\"<EOL>.format(pex_file.replace(os.getcwd(), '<STR_LIT:.>'), '<STR_LIT>'.join(sorted(non_universal))))<EOL>envs = [i.split('<STR_LIT:->')[-<NUM_LIT:3>:] for i in non_universal]<EOL>envs = {i[<NUM_LIT:0>]: i[<NUM_LIT:1>:] for i in envs}<EOL>if len(envs) > <NUM_LIT:1>:<EOL><INDENT>envs = {k: v for k, v in envs.items() if not k.startswith('<STR_LIT>')}<EOL><DEDENT>env_id = []<EOL>for k, v in sorted(envs.items()):<EOL><INDENT>env_id.append(k)<EOL>env_id.extend(v)<EOL><DEDENT>env_id = '<STR_LIT:->'.join(env_id)<EOL><DEDENT>else:<EOL><INDENT>env_id = '<STR_LIT>'<EOL><DEDENT>new_pex_file = pex_file.replace('<STR_LIT>', '<STR_LIT>'.format(env_id))<EOL>notify.info(\"<STR_LIT>\".format(os.path.basename(new_pex_file)))<EOL>os.rename(pex_file, new_pex_file)<EOL>pex_file = new_pex_file<EOL>pex_files.append(pex_file)<EOL><DEDENT>if not pex_files:<EOL><INDENT>notify.warning(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>if pyrun:<EOL><INDENT>if any(pyrun.startswith(i) for i in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')):<EOL><INDENT>pyrun_url = pyrun<EOL><DEDENT>else:<EOL><INDENT>pyrun_cfg = dict(ctx.rituals.pyrun)<EOL>pyrun_cfg.update(parse_qsl(pyrun.replace(os.pathsep, '<STR_LIT:&>')))<EOL>pyrun_url = (pyrun_cfg['<STR_LIT>'] + '<STR_LIT:/>' +<EOL>pyrun_cfg['<STR_LIT>']).format(**pyrun_cfg)<EOL><DEDENT>notify.info(\"<STR_LIT>\".format(pyrun_url))<EOL>with url_as_file(pyrun_url, ext='<STR_LIT>') as pyrun_tarball:<EOL><INDENT>pyrun_tar = tarfile.TarFile.gzopen(pyrun_tarball)<EOL>for pex_file in pex_files[:]:<EOL><INDENT>pyrun_exe = pyrun_tar.extractfile('<STR_LIT>')<EOL>with open(pex_file, '<STR_LIT:rb>') as pex_handle:<EOL><INDENT>pyrun_pex_file = '<STR_LIT>'.format(<EOL>pex_file[:-<NUM_LIT:4>], pyrun_url.rsplit('<STR_LIT>')[-<NUM_LIT:1>][:-<NUM_LIT:4>])<EOL>with open(pyrun_pex_file, '<STR_LIT:wb>') as pyrun_pex:<EOL><INDENT>pyrun_pex.write(INSTALLER_BASH.replace('<STR_LIT>', '<STR_LIT>'.format(len(INSTALLER_BASH) + <NUM_LIT:1>)))<EOL>shutil.copyfileobj(pyrun_exe, pyrun_pex)<EOL>shutil.copyfileobj(pex_handle, pyrun_pex)<EOL><DEDENT>shutil.copystat(pex_file, pyrun_pex_file)<EOL>notify.info(\"<STR_LIT>\".format(pretty_path(pyrun_pex_file)))<EOL>pex_files.append(pyrun_pex_file)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if upload:<EOL><INDENT>base_url = ctx.rituals.release.upload.base_url.rstrip('<STR_LIT:/>')<EOL>if not base_url:<EOL><INDENT>notify.failure(\"<STR_LIT>\")<EOL><DEDENT>for pex_file in pex_files:<EOL><INDENT>url = base_url + '<STR_LIT:/>' + ctx.rituals.release.upload.path.lstrip('<STR_LIT:/>').format(<EOL>name=cfg.project.name, version=cfg.project.version, filename=os.path.basename(pex_file))<EOL>notify.info(\"<STR_LIT>\".format(url))<EOL>with io.open(pex_file, '<STR_LIT:rb>') as handle:<EOL><INDENT>reply = requests.put(url, data=handle.read())<EOL>if reply.status_code in range(<NUM_LIT:200>, <NUM_LIT>):<EOL><INDENT>notify.info(\"<STR_LIT>\".format(**vars(reply)))<EOL><DEDENT>else:<EOL><INDENT>notify.warning(\"<STR_LIT>\".format(**vars(reply)))<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Package the project with PEX.", "id": "f3283:m3"}
{"signature": "def get_egg_info(cfg, verbose=False):", "body": "result = Bunch()<EOL>setup_py = cfg.rootjoin('<STR_LIT>')<EOL>if not os.path.exists(setup_py):<EOL><INDENT>return result<EOL><DEDENT>egg_info = shell.capture(\"<STR_LIT>\".format(setup_py), echo=True if verbose else None)<EOL>for info_line in egg_info.splitlines():<EOL><INDENT>if info_line.endswith('<STR_LIT>'):<EOL><INDENT>pkg_info_file = info_line.split(None, <NUM_LIT:1>)[<NUM_LIT:1>]<EOL>result['<STR_LIT>'] = pkg_info_file<EOL>with io.open(pkg_info_file, encoding='<STR_LIT:utf-8>') as handle:<EOL><INDENT>lastkey = None<EOL>for line in handle:<EOL><INDENT>if line.lstrip() != line:<EOL><INDENT>assert lastkey, \"<STR_LIT>\".format(pkg_info_file, line)<EOL>result[lastkey] += '<STR_LIT:\\n>' + line<EOL><DEDENT>else:<EOL><INDENT>lastkey, value = line.split('<STR_LIT::>', <NUM_LIT:1>)<EOL>lastkey = lastkey.strip().lower().replace('<STR_LIT:->', '<STR_LIT:_>')<EOL>value = value.strip()<EOL>if lastkey in result:<EOL><INDENT>try:<EOL><INDENT>result[lastkey].append(value)<EOL><DEDENT>except AttributeError:<EOL><INDENT>result[lastkey] = [result[lastkey], value]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>result[lastkey] = value<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>for multikey in PKG_INFO_MULTIKEYS:<EOL><INDENT>if not isinstance(result.get(multikey, []), list):<EOL><INDENT>result[multikey] = [result[multikey]]<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Call 'setup egg_info' and return the parsed meta-data.", "id": "f3283:m0"}
{"signature": "@task(help=dict(<EOL>devpi=\"<STR_LIT>\",<EOL>egg=\"<STR_LIT>\",<EOL>wheel=\"<STR_LIT>\",<EOL>auto=\"<STR_LIT>\",<EOL>))<EOL>def dist(ctx, devpi=False, egg=False, wheel=False, auto=True):", "body": "config.load()<EOL>cmd = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>if auto:<EOL><INDENT>egg = sys.version_info.major == <NUM_LIT:2><EOL>try:<EOL><INDENT>import wheel as _<EOL>wheel = True<EOL><DEDENT>except ImportError:<EOL><INDENT>wheel = False<EOL><DEDENT><DEDENT>if egg:<EOL><INDENT>cmd.append(\"<STR_LIT>\")<EOL><DEDENT>if wheel:<EOL><INDENT>cmd.append(\"<STR_LIT>\")<EOL><DEDENT>ctx.run(\"<STR_LIT>\")<EOL>ctx.run('<STR_LIT:U+0020>'.join(cmd))<EOL>if devpi:<EOL><INDENT>ctx.run(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Distribute the project.", "id": "f3283:m2"}
{"signature": "def _samefile(fname1, fname2):", "body": "if sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>return os.path.normpath(os.path.normcase(fname1)) == os.path.normpath(os.path.normcase(fname2))<EOL><DEDENT>else:<EOL><INDENT>return os.path.samefile(fname1, fname2)<EOL><DEDENT>", "docstring": "OS independent `samefile` implementation.", "id": "f3285:m1"}
{"signature": "def whichgen(command, path=None, verbose=<NUM_LIT:0>, exts=None): ", "body": "matches = []<EOL>if path is None:<EOL><INDENT>using_given_path = <NUM_LIT:0><EOL>path = os.environ.get(\"<STR_LIT>\", \"<STR_LIT>\").split(os.pathsep)<EOL>if sys.platform.startswith(\"<STR_LIT>\"):<EOL><INDENT>path.insert(<NUM_LIT:0>, os.curdir)  <EOL><DEDENT><DEDENT>else:<EOL><INDENT>using_given_path = <NUM_LIT:1><EOL><DEDENT>if sys.platform.startswith(\"<STR_LIT>\"):<EOL><INDENT>if exts is None:<EOL><INDENT>exts = os.environ.get(\"<STR_LIT>\", \"<STR_LIT>\").split(os.pathsep)<EOL>for ext in exts:<EOL><INDENT>if ext.lower() == \"<STR_LIT>\":<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>exts = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL><DEDENT><DEDENT>elif not isinstance(exts, list):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if exts is not None:<EOL><INDENT>raise WhichError(\"<STR_LIT>\" % sys.platform)<EOL><DEDENT>exts = []<EOL><DEDENT>if os.sep in command or os.altsep and os.altsep in command:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>for i, dir_name in enumerate(path):<EOL><INDENT>if sys.platform.startswith(\"<STR_LIT>\") and len(dir_name) >= <NUM_LIT:2> and dir_name[<NUM_LIT:0>] == '<STR_LIT:\">' and dir_name[-<NUM_LIT:1>] == '<STR_LIT:\">':<EOL><INDENT>dir_name = dir_name[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>for ext in ['<STR_LIT>']+exts:<EOL><INDENT>abs_name = os.path.abspath(os.path.normpath(os.path.join(dir_name, command+ext)))<EOL>if os.path.isfile(abs_name):<EOL><INDENT>if using_given_path:<EOL><INDENT>from_where = \"<STR_LIT>\" % i<EOL><DEDENT>elif not sys.platform.startswith(\"<STR_LIT>\"):<EOL><INDENT>from_where = \"<STR_LIT>\" % i<EOL><DEDENT>elif i == <NUM_LIT:0>:<EOL><INDENT>from_where = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>from_where = \"<STR_LIT>\" % (i-<NUM_LIT:1>)<EOL><DEDENT>match = _cull((abs_name, from_where), matches, verbose)<EOL>if match:<EOL><INDENT>if verbose:<EOL><INDENT>yield match<EOL><DEDENT>else:<EOL><INDENT>yield match[<NUM_LIT:0>]<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>match = _get_registered_executable(command)<EOL>if match is not None:<EOL><INDENT>match = _cull(match, matches, verbose)<EOL>if match:<EOL><INDENT>if verbose:<EOL><INDENT>yield match<EOL><DEDENT>else:<EOL><INDENT>yield match[<NUM_LIT:0>]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Return a generator of full paths to the given command.\n\n    \"command\" is a the name of the executable to search for.\n    \"path\" is an optional alternate path list to search. The default it\n        to use the PATH environment variable.\n    \"verbose\", if true, will cause a 2-tuple to be returned for each\n        match. The second element is a textual description of where the\n        match was found.\n    \"exts\" optionally allows one to specify a list of extensions to use\n        instead of the standard list for this system. This can\n        effectively be used as an optimization to, for example, avoid\n        stat's of \"foo.vbs\" when searching for \"foo\" and you know it is\n        not a VisualBasic script but \".vbs\" is on PATHEXT. This option\n        is only supported on Windows.\n\n    This method returns a generator which yields either full paths to\n    the given command or, if verbose, tuples of the form (<path to\n    command>, <where path found>).", "id": "f3285:m3"}
{"signature": "def _get_registered_executable(exe_name):", "body": "registered = None<EOL>if sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>if os.path.splitext(exe_name)[<NUM_LIT:1>].lower() != '<STR_LIT>':<EOL><INDENT>exe_name += '<STR_LIT>'<EOL><DEDENT>import _winreg <EOL>try:<EOL><INDENT>key = \"<STR_LIT>\" + exe_name<EOL>value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)<EOL>registered = (value, \"<STR_LIT>\"+key)<EOL><DEDENT>except _winreg.error:<EOL><INDENT>pass<EOL><DEDENT>if registered and not os.path.exists(registered[<NUM_LIT:0>]):<EOL><INDENT>registered = None<EOL><DEDENT><DEDENT>return registered<EOL>", "docstring": "Windows allow application paths to be registered in the registry.", "id": "f3285:m0"}
{"signature": "def _cull(potential, matches, verbose=<NUM_LIT:0>):", "body": "for match in matches: <EOL><INDENT>if _samefile(potential[<NUM_LIT:0>], match[<NUM_LIT:0>]):<EOL><INDENT>if verbose:<EOL><INDENT>sys.stderr.write(\"<STR_LIT>\" % potential)<EOL><DEDENT>return None<EOL><DEDENT><DEDENT>if not stat.S_ISREG(os.stat(potential[<NUM_LIT:0>]).st_mode):<EOL><INDENT>if verbose:<EOL><INDENT>sys.stderr.write(\"<STR_LIT>\" % potential)<EOL><DEDENT><DEDENT>elif not os.access(potential[<NUM_LIT:0>], os.X_OK):<EOL><INDENT>if verbose:<EOL><INDENT>sys.stderr.write(\"<STR_LIT>\" % potential)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>matches.append(potential)<EOL>return potential<EOL><DEDENT>return None<EOL>", "docstring": "Cull inappropriate matches. Possible reasons:\n        - a duplicate of a previous match\n        - not a disk file\n        - not executable (non-Windows)\n    If 'potential' is approved it is returned and added to 'matches'.\n    Otherwise, None is returned.", "id": "f3285:m2"}
{"signature": "def whichall(command, path=None, verbose=<NUM_LIT:0>, exts=None):", "body": "return list(whichgen(command, path, verbose, exts))<EOL>", "docstring": "Return a list of full paths to all matches of the given command on the path.\n\n    \"command\" is a the name of the executable to search for.\n    \"path\" is an optional alternate path list to search. The default it\n        to use the PATH environment variable.\n    \"verbose\", if true, will cause a 2-tuple to be returned for each\n        match. The second element is a textual description of where the\n        match was found.\n    \"exts\" optionally allows one to specify a list of extensions to use\n        instead of the standard list for this system. This can\n        effectively be used as an optimization to, for example, avoid\n        stat's of \"foo.vbs\" when searching for \"foo\" and you know it is\n        not a VisualBasic script but \".vbs\" is on PATHEXT. This option\n        is only supported on Windows.", "id": "f3285:m5"}
{"signature": "def workdir_is_clean(self, quiet=False):", "body": "<EOL>self.run('<STR_LIT>', **RUN_KWARGS)<EOL>unchanged = True<EOL>try:<EOL><INDENT>self.run('<STR_LIT>', report_error=False, **RUN_KWARGS)<EOL><DEDENT>except exceptions.Failure:<EOL><INDENT>unchanged = False<EOL>if not quiet:<EOL><INDENT>notify.warning('<STR_LIT>')<EOL>self.run('<STR_LIT>', **RUN_KWARGS)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>self.run('<STR_LIT>', report_error=False, **RUN_KWARGS)<EOL><DEDENT>except exceptions.Failure:<EOL><INDENT>unchanged = False<EOL>if not quiet:<EOL><INDENT>notify.warning('<STR_LIT>')<EOL>self.run('<STR_LIT>', **RUN_KWARGS)<EOL><DEDENT><DEDENT>return unchanged<EOL>", "docstring": "Check for uncommitted changes, return `True` if everything is clean.\n\n            Inspired by http://stackoverflow.com/questions/3878624/.", "id": "f3286:c0:m0"}
{"signature": "def commit(self, message):", "body": "self.run_elective('<STR_LIT>'.format(message))<EOL>", "docstring": "Commit pending changes.", "id": "f3286:c0:m2"}
{"signature": "def add_file(self, filename):", "body": "notify.warning('<STR_LIT>'.format(filename))<EOL>", "docstring": "Stage a file for committing, or commit it directly (depending on the SCM).", "id": "f3287:c0:m1"}
{"signature": "def tag(self, label, message=None):", "body": "notify.warning('<STR_LIT>'.format(<EOL>label, '<STR_LIT>'.format(message) if message else '<STR_LIT>',<EOL>))<EOL>", "docstring": "Tag the current workdir state.", "id": "f3287:c0:m3"}
{"signature": "def run_elective(self, cmd, *args, **kwargs):", "body": "if self._commit:<EOL><INDENT>return self.run(cmd, *args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>notify.warning(\"<STR_LIT>\".format(cmd))<EOL>kwargs = kwargs.copy()<EOL>kwargs['<STR_LIT>'] = False<EOL>return self.run('<STR_LIT:true>', *args, **kwargs)<EOL><DEDENT>", "docstring": "Run a command, or just echo it, depending on `commit`.", "id": "f3288:c0:m2"}
{"signature": "def matches(self, path):", "body": "return bool(self.compiled.match(path))<EOL>", "docstring": "Check this pattern against given `path`.", "id": "f3290:c0:m2"}
{"signature": "def excludes(pattern):", "body": "return Pattern(pattern, inclusive=False)<EOL>", "docstring": "A single exclusive glob pattern.", "id": "f3290:m4"}
{"signature": "def parse_glob(pattern):", "body": "if not pattern:<EOL><INDENT>return<EOL><DEDENT>bits = pattern.split(\"<STR_LIT:/>\")<EOL>dirs, filename = bits[:-<NUM_LIT:1>], bits[-<NUM_LIT:1>]<EOL>for dirname in dirs:<EOL><INDENT>if dirname == \"<STR_LIT>\":<EOL><INDENT>yield  \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>yield glob2re(dirname) + \"<STR_LIT:/>\"<EOL><DEDENT><DEDENT>yield glob2re(filename)<EOL>", "docstring": "Generate parts of regex transformed from glob pattern.", "id": "f3290:m1"}
{"signature": "def run(cmd, **kw):", "body": "kw = kw.copy()<EOL>kw.setdefault('<STR_LIT>', False)  <EOL>report_error = kw.pop('<STR_LIT>', True)<EOL>runner = kw.pop('<STR_LIT>', invoke_run)<EOL>try:<EOL><INDENT>return runner(cmd, **kw)<EOL><DEDENT>except exceptions.Failure as exc:<EOL><INDENT>sys.stdout.flush()<EOL>sys.stderr.flush()<EOL>if report_error:<EOL><INDENT>notify.error(\"<STR_LIT>\".format(cmd, exc.result.return_code,))<EOL><DEDENT>raise<EOL><DEDENT>finally:<EOL><INDENT>sys.stdout.flush()<EOL>sys.stderr.flush()<EOL><DEDENT>", "docstring": "Run a command and flush its output.", "id": "f3292:m1"}
{"signature": "def search_file_upwards(name, base=None):", "body": "base = base or os.getcwd()<EOL>while base != os.path.dirname(base):<EOL><INDENT>if os.path.exists(os.path.join(base, name)):<EOL><INDENT>return base<EOL><DEDENT>base = os.path.dirname(base)<EOL><DEDENT>return None<EOL>", "docstring": "Search for a file named `name` from cwd or given directory to root.\n        Return None if nothing's found.", "id": "f3293:m0"}
{"signature": "def failure(msg):", "body": "error(msg)<EOL>sys.exit(<NUM_LIT:1>)<EOL>", "docstring": "Emit a fatal message and exit.", "id": "f3294:m5"}
{"signature": "def warning(msg):", "body": "_flush()<EOL>sys.stderr.write(\"<STR_LIT>\".format(msg))<EOL>sys.stderr.flush()<EOL>", "docstring": "Emit a warning message.", "id": "f3294:m3"}
{"signature": "def _flush():", "body": "sys.stdout.flush()<EOL>sys.stderr.flush()<EOL>", "docstring": "Flush all console output.", "id": "f3294:m0"}
{"signature": "@contextmanager<EOL>def pushd(path):", "body": "saved = os.getcwd()<EOL>os.chdir(path)<EOL>try:<EOL><INDENT>yield saved<EOL><DEDENT>finally:<EOL><INDENT>os.chdir(saved)<EOL><DEDENT>", "docstring": "A context that enters a given directory and restores the old state on exit.\n\n        The original directory is returned as the context variable.", "id": "f3295:m1"}
{"signature": "def pretty_path(path, _home_re=re.compile('<STR_LIT>' + re.escape(os.path.expanduser('<STR_LIT>') + os.sep))):", "body": "path = decode_filename(path)<EOL>path = _home_re.sub('<STR_LIT>' + os.sep, path)<EOL>return path<EOL>", "docstring": "Prettify path for humans, and make it Unicode.", "id": "f3295:m0"}
{"signature": "def try_until_even(req):", "body": "response = yield req<EOL>while response % <NUM_LIT:2>:<EOL><INDENT>try:<EOL><INDENT>response = yield '<STR_LIT>'<EOL><DEDENT>except GeneratorExit:<EOL><INDENT>return<EOL><DEDENT>except ValueError:<EOL><INDENT>yield '<STR_LIT>'<EOL><DEDENT><DEDENT>return response<EOL>", "docstring": "an example relay", "id": "f3299:m3"}
{"signature": "@py2_compatible<EOL>def mymax(val):", "body": "while val < <NUM_LIT:100>:<EOL><INDENT>try:<EOL><INDENT>sent = yield val<EOL><DEDENT>except GeneratorExit:<EOL><INDENT>return<EOL><DEDENT>except ValueError:<EOL><INDENT>sent = yield '<STR_LIT>'<EOL><DEDENT>except TypeError:<EOL><INDENT>return_('<STR_LIT>')<EOL><DEDENT>if sent > val:<EOL><INDENT>val = sent<EOL><DEDENT><DEDENT>return_(val * <NUM_LIT:3>)<EOL>", "docstring": "an example generator function", "id": "f3300:m4"}
{"signature": "@py2_compatible<EOL>def try_until_positive(req):", "body": "response = yield req<EOL>while response < <NUM_LIT:0>:<EOL><INDENT>try:<EOL><INDENT>response = yield '<STR_LIT>'<EOL><DEDENT>except GeneratorExit:<EOL><INDENT>return<EOL><DEDENT>except ValueError:<EOL><INDENT>yield '<STR_LIT>'<EOL><DEDENT><DEDENT>return_(response)<EOL>", "docstring": "an example relay", "id": "f3300:m2"}
{"signature": "@py2_compatible<EOL>def try_until_even(req):", "body": "response = yield req<EOL>while response % <NUM_LIT:2>:<EOL><INDENT>try:<EOL><INDENT>response = yield '<STR_LIT>'<EOL><DEDENT>except GeneratorExit:<EOL><INDENT>return<EOL><DEDENT>except ValueError:<EOL><INDENT>yield '<STR_LIT>'<EOL><DEDENT><DEDENT>return_(response)<EOL>", "docstring": "an example relay", "id": "f3300:m3"}
{"signature": "def with_generator(name):", "body": "gens = [getattr(common, name)]<EOL>if not PY2:<EOL><INDENT>from . import py3<EOL>gens.append(getattr(py3, name))<EOL><DEDENT>return pytest.mark.parametrize(name, gens)<EOL>", "docstring": "use a python 2/3 parametrized generator", "id": "f3302:m0"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def __iter__(self):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Returns\n-------\n~typing.Generator[T_yield, T_send, T_return]\n    the generator iterator", "id": "f3303:c0:m0"}
{"signature": "def __call__(self, *args, **kwargs):", "body": "raise NotImplementedError()<EOL>", "docstring": "Returns\n-------\n~typing.Generator[T_yield, T_send, T_return]\n    the resulting generator", "id": "f3303:c1:m0"}
{"signature": "@py2_compatible<EOL>def imap_yield(func, gen):", "body": "gen = iter(gen)<EOL>assert _is_just_started(gen)<EOL>yielder = yield_from(gen)<EOL>for item in yielder:<EOL><INDENT>with yielder:<EOL><INDENT>yielder.send((yield func(item)))<EOL><DEDENT><DEDENT>return_(yielder.result)<EOL>", "docstring": "Apply a function to all ``yield`` values of a generator\n\n    Parameters\n    ----------\n    func: ~typing.Callable[[T_yield], T_mapped]\n        the function to apply\n    gen: Generable[T_yield, T_send, T_return]\n        the generator iterable.\n\n    Returns\n    -------\n    ~typing.Generator[T_mapped, T_send, T_return]\n        the mapped generator", "id": "f3305:m6"}
{"signature": "@py2_compatible<EOL>def irelay(gen, thru):", "body": "gen = iter(gen)<EOL>assert _is_just_started(gen)<EOL>yielder = yield_from(gen)<EOL>for item in yielder:<EOL><INDENT>with yielder:<EOL><INDENT>subgen = thru(item)<EOL>subyielder = yield_from(subgen)<EOL>for subitem in subyielder:<EOL><INDENT>with subyielder:<EOL><INDENT>subyielder.send((yield subitem))<EOL><DEDENT><DEDENT>yielder.send(subyielder.result)<EOL><DEDENT><DEDENT>return_(yielder.result)<EOL>", "docstring": "Create a new generator by relaying yield/send interactions\n    through another generator\n\n    Parameters\n    ----------\n    gen: Generable[T_yield, T_send, T_return]\n        the original generator\n    thru: ~typing.Callable[[T_yield], ~typing.Generator]\n        the generator callable through which each interaction is relayed\n\n    Returns\n    -------\n    ~typing.Generator\n        the relayed generator", "id": "f3305:m9"}
{"signature": "def reusable(func):", "body": "sig = signature(func)<EOL>origin = func<EOL>while hasattr(origin, '<STR_LIT>'):<EOL><INDENT>origin = origin.__wrapped__<EOL><DEDENT>return type(<EOL>origin.__name__,<EOL>(ReusableGenerator, ),<EOL>dict([<EOL>('<STR_LIT>',       origin.__doc__),<EOL>('<STR_LIT>',    origin.__module__),<EOL>('<STR_LIT>', sig),<EOL>('<STR_LIT>',   staticmethod(func)),<EOL>] + [<EOL>(name, property(compose(itemgetter(name),<EOL>attrgetter('<STR_LIT>'))))<EOL>for name in sig.parameters<EOL>] + ([<EOL>('<STR_LIT>',  origin.__qualname__),<EOL>] if sys.version_info > (<NUM_LIT:3>, ) else [])))<EOL>", "docstring": "Create a reusable class from a generator function\n\n    Parameters\n    ----------\n    func: GeneratorCallable[T_yield, T_send, T_return]\n        the function to wrap\n\n    Note\n    ----\n    * the callable must have an inspectable signature\n    * If bound to a class, the new reusable generator is callable as a method.\n      To opt out of this, add a :func:`staticmethod` decorator above\n      this decorator.", "id": "f3305:m3"}
{"signature": "@py2_compatible<EOL>def imap_send(func, gen):", "body": "gen = iter(gen)<EOL>assert _is_just_started(gen)<EOL>yielder = yield_from(gen)<EOL>for item in yielder:<EOL><INDENT>with yielder:<EOL><INDENT>yielder.send(func((yield item)))<EOL><DEDENT><DEDENT>return_(yielder.result)<EOL>", "docstring": "Apply a function to all ``send`` values of a generator\n\n    Parameters\n    ----------\n    func: ~typing.Callable[[T_send], T_mapped]\n        the function to apply\n    gen: Generable[T_yield, T_mapped, T_return]\n        the generator iterable.\n\n    Returns\n    -------\n    ~typing.Generator[T_yield, T_send, T_return]\n        the mapped generator", "id": "f3305:m7"}
{"signature": "def sendreturn(gen, value):", "body": "try:<EOL><INDENT>gen.send(value)<EOL><DEDENT>except StopIteration as e:<EOL><INDENT>return stopiter_value(e)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Send an item into a generator expecting a final return value\n\n    Parameters\n    ----------\n    gen: ~typing.Generator[T_yield, T_send, T_return]\n        the generator to send the value to\n    value: T_send\n        the value to send\n\n    Raises\n    ------\n    RuntimeError\n        if the generator did not return as expected\n\n    Returns\n    -------\n    T_return\n        the generator's return value", "id": "f3305:m5"}
{"signature": "def __call__(self, product_name, **properties):", "body": "if not product_name in self:<EOL><INDENT>return self.default(tag=product_name, **properties)<EOL><DEDENT>return self[product_name](**properties)<EOL>", "docstring": "Builds and returns a Blok object", "id": "f3315:c0:m2"}
{"signature": "def __new__(metaclass, name, parents, class_dict, *kargs, **kwargs):", "body": "attributes = {name: attribute for name, attribute in class_dict.items() if isinstance(attribute,<EOL>AbstractAttribute)}<EOL>if attributes:<EOL><INDENT>if hasattr(parents[<NUM_LIT:0>], '<STR_LIT>'):<EOL><INDENT>full_attributes = parents[<NUM_LIT:0>].attribute_descriptors.copy()<EOL>full_attributes.update(attributes)<EOL>attributes = full_attributes<EOL><DEDENT>blok_attributes = {}<EOL>render_attributes = []<EOL>direct_attributes = []<EOL>init_attributes = []<EOL>accessor_attributes = []<EOL>attribute_map = {}<EOL>for attribute_name, attribute in attributes.items():<EOL><INDENT>if not hasattr(attribute, '<STR_LIT:name>'):<EOL><INDENT>attribute.name = attribute_name<EOL><DEDENT>if isinstance(attribute, DirectAttribute):<EOL><INDENT>direct_attributes.append(attribute)<EOL>if hasattr(attribute, '<STR_LIT>'):<EOL><INDENT>render_attributes.append(attribute)<EOL><DEDENT>if not hasattr(attribute, '<STR_LIT>'):<EOL><INDENT>attribute.object_attribute = '<STR_LIT>'.format(attribute_name)<EOL><DEDENT>if getattr(attribute, '<STR_LIT>', False):<EOL><INDENT>init_attributes.append(attribute_name)<EOL><DEDENT><DEDENT>if isinstance(attribute, (BlokAttribute, NestedBlokAttribute)) and hasattr(attribute.type, '<STR_LIT>'):<EOL><INDENT>blok_attributes[attribute.type.tag] = attribute<EOL><DEDENT>if isinstance(attribute, AccessorAttribute):<EOL><INDENT>accessor_attributes.append(attribute)<EOL>if not hasattr(attribute, '<STR_LIT>'):<EOL><INDENT>attribute.parent_attribute = '<STR_LIT>'.format(attribute_name)<EOL><DEDENT><DEDENT>attribute_map[attribute.name] = attribute_name<EOL><DEDENT>if direct_attributes and not name == '<STR_LIT>' and '<STR_LIT>' in class_dict:<EOL><INDENT>class_dict['<STR_LIT>'] += tuple(attribute.object_attribute for attribute in direct_attributes)<EOL>class_dict['<STR_LIT>'] += tuple(attribute.parent_attribute for attribute in accessor_attributes)<EOL><DEDENT>if render_attributes:<EOL><INDENT>if hasattr(parents[<NUM_LIT:0>], '<STR_LIT>'):<EOL><INDENT>render_attributes = list(parents[<NUM_LIT:0>].render_attributes) + render_attributes<EOL><DEDENT>class_dict['<STR_LIT>'] = set(render_attributes)<EOL><DEDENT>if init_attributes:<EOL><INDENT>if hasattr(parents[<NUM_LIT:0>], '<STR_LIT>'):<EOL><INDENT>init_attributes = list(parents[<NUM_LIT:0>].init_attributes) + init_attributes<EOL><DEDENT>class_dict['<STR_LIT>'] = init_attributes<EOL><DEDENT>if blok_attributes:<EOL><INDENT>if hasattr(parents[<NUM_LIT:0>], '<STR_LIT>'):<EOL><INDENT>full_blok_attributes = dict(parents[<NUM_LIT:0>].blok_attributes)<EOL>full_blok_attributes.update(blok_attributes)<EOL>blok_attributes = full_blok_attributes<EOL><DEDENT>class_dict['<STR_LIT>'] = blok_attributes<EOL><DEDENT>if attribute_map:<EOL><INDENT>if hasattr(parents[<NUM_LIT:0>], '<STR_LIT>'):<EOL><INDENT>full_attribute_map = dict(parents[<NUM_LIT:0>].attribute_map)<EOL>full_attribute_map.update(attribute_map)<EOL>attribute_map = full_attribute_map<EOL><DEDENT>class_dict['<STR_LIT>'] = attribute_map<EOL><DEDENT>class_dict['<STR_LIT>'] = attributes<EOL>attribute_signals = (attribute.signal for attribute in attributes.values() if getattr(attribute, '<STR_LIT>'))<EOL>if attribute_signals:<EOL><INDENT>class_dict['<STR_LIT>'] = class_dict.get('<STR_LIT>', ()) + tuple(attribute_signals)<EOL><DEDENT><DEDENT>return super(TagAttributes, metaclass).__new__(metaclass, name, parents, class_dict, *kargs, **kwargs)<EOL>", "docstring": "Updates a tag class to automatically register all signals", "id": "f3317:c1:m0"}
{"signature": "def render(self, *args, **kwargs):", "body": "render_to = StringIO()<EOL>self.output(render_to, *args, **kwargs)<EOL>return render_to.getvalue()<EOL>", "docstring": "Renders as a str", "id": "f3317:c2:m1"}
{"signature": "@property<EOL><INDENT>def attributes(self):<DEDENT>", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._attributes = {}<EOL><DEDENT>return self._attributes<EOL>", "docstring": "Lazily creates and returns a tags attributes", "id": "f3317:c5:m1"}
{"signature": "def render(self, *args, **kwargs):", "body": "render_to = StringIO()<EOL>self.output(render_to, *args, **kwargs)<EOL>return render_to.getvalue()<EOL>", "docstring": "Renders as a str", "id": "f3317:c0:m4"}
{"signature": "def output(self, to=None, formatted=False, indent=<NUM_LIT:0>, indentation='<STR_LIT:U+0020>', *args, **kwargs):", "body": "if formatted:<EOL><INDENT>to.write(self.start_tag)<EOL>to.write('<STR_LIT:\\n>')<EOL>if not self.tag_self_closes:<EOL><INDENT>for blok in self.blox:<EOL><INDENT>to.write(indentation * (indent + <NUM_LIT:1>))<EOL>blok.output(to=to, indent=indent + <NUM_LIT:1>, formatted=True, indentation=indentation, *args, **kwargs)<EOL>to.write('<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>to.write(indentation * indent)<EOL>to.write(self.end_tag)<EOL>if not indentation:<EOL><INDENT>to.write('<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>to.write(self.start_tag)<EOL>if not self.tag_self_closes:<EOL><INDENT>for blok in self.blox:<EOL><INDENT>blok.output(to=to, *args, **kwargs)<EOL><DEDENT><DEDENT>to.write(self.end_tag)<EOL><DEDENT>", "docstring": "Outputs to a stream (like a file or request)", "id": "f3317:c8:m1"}
{"signature": "def __call__(self, *blox, position=None):", "body": "if position is not None:<EOL><INDENT>for blok in blox:<EOL><INDENT>self.blox_container.blox.insert(position, blok)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for blok in blox:<EOL><INDENT>self.blox_container.blox.append(blok)<EOL><DEDENT><DEDENT>return blok<EOL>", "docstring": "Adds a nested blok to this blok", "id": "f3317:c4:m3"}
{"signature": "def output(self, to=None, *args, **kwargs):", "body": "to.write(cgi.escape(str(self._value)))<EOL>", "docstring": "Outputs the set text", "id": "f3318:c1:m0"}
{"signature": "def file(file_object, start_on=None, ignore=(), use_short=True, **queries):", "body": "return string(file_object.read(), start_on=start_on, ignore=ignore, use_short=use_short, **queries)<EOL>", "docstring": "Returns a blox template from a file stream object", "id": "f3320:m1"}
{"signature": "def string(html, start_on=None, ignore=(), use_short=True, **queries):", "body": "if use_short:<EOL><INDENT>html = grow_short(html)<EOL><DEDENT>return _to_template(fromstring(html), start_on=start_on,<EOL>ignore=ignore, **queries)<EOL>", "docstring": "Returns a blox template from an html string", "id": "f3320:m0"}
{"signature": "def _pop_none(self, kwargs):", "body": "for key, value in copy(kwargs).items():<EOL><INDENT>if value is None or value == ():<EOL><INDENT>kwargs.pop(key)<EOL><DEDENT>if hasattr(value, '<STR_LIT>'):<EOL><INDENT>kwargs[key] = value.read()<EOL><DEDENT><DEDENT>", "docstring": "Remove default values (anything where the value is None). click is unfortunately bad at the way it\n        sends through unspecified defaults.", "id": "f3329:c1:m0"}
{"signature": "@resources.command(ignore_defaults=True, no_args_is_help=False)<EOL><INDENT>@click.option('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', is_flag=True, default=False, show_default=True,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', default=<NUM_LIT:1>, type=int, show_default=True,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=int, show_default=True, required=False,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', required=False, nargs=<NUM_LIT:2>, multiple=True,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def list(self, all_pages=False, **kwargs):<DEDENT>", "body": "<EOL>if kwargs.get('<STR_LIT:status>', None) and '<STR_LIT:U+002C>' in kwargs['<STR_LIT:status>']:<EOL><INDENT>all_status = kwargs.pop('<STR_LIT:status>').strip('<STR_LIT:U+002C>').split('<STR_LIT:U+002C>')<EOL>queries = list(kwargs.pop('<STR_LIT>', ()))<EOL>for status in all_status:<EOL><INDENT>if status in STATUS_CHOICES:<EOL><INDENT>queries.append(('<STR_LIT>', status))<EOL><DEDENT>else:<EOL><INDENT>raise exc.TowerCLIError('<STR_LIT>'.format(status))<EOL><DEDENT><DEDENT>kwargs['<STR_LIT>'] = tuple(queries)<EOL><DEDENT>if all_pages:<EOL><INDENT>kwargs.pop('<STR_LIT>', None)<EOL>kwargs.pop('<STR_LIT>', None)<EOL><DEDENT>debug.log('<STR_LIT>', header='<STR_LIT>')<EOL>response = self.read(**kwargs)<EOL>self._convert_pagenum(response)<EOL>if all_pages and response['<STR_LIT>']:<EOL><INDENT>cursor = copy(response)<EOL>while cursor['<STR_LIT>']:<EOL><INDENT>cursor = self.read(**dict(kwargs, page=cursor['<STR_LIT>']))<EOL>self._convert_pagenum(cursor)<EOL>response['<STR_LIT>'] += cursor['<STR_LIT>']<EOL>response['<STR_LIT:count>'] += cursor['<STR_LIT:count>']<EOL><DEDENT>response['<STR_LIT>'] = None<EOL><DEDENT>return response<EOL>", "docstring": "Return a list of objects.\n\n        If one or more filters are provided through keyword arguments, filter the results accordingly.\n\n        If no filters are provided, return all results.\n\n        =====API DOCS=====\n        Retrieve a list of objects.\n\n        :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n        :type all_pages: bool\n        :param page: The page to show. Ignored if all_pages is set.\n        :type page: int\n        :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n        :type query: list\n        :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n        :returns: A JSON object containing details of all resource objects returned by Tower backend.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3329:c1:m8"}
{"signature": "def _disassoc(self, url_fragment, me, other):", "body": "<EOL>url = self.endpoint + '<STR_LIT>' % (me, url_fragment)<EOL>r = client.get(url, params={'<STR_LIT:id>': other}).json()<EOL>if r['<STR_LIT:count>'] == <NUM_LIT:0>:<EOL><INDENT>return {'<STR_LIT>': False}<EOL><DEDENT>r = client.post(url, data={'<STR_LIT>': True, '<STR_LIT:id>': other})<EOL>return {'<STR_LIT>': True}<EOL>", "docstring": "Disassociate the `other` record from the `me` record.", "id": "f3329:c1:m10"}
{"signature": "def _lookup(self, fail_on_missing=False, fail_on_found=False, include_debug_header=True, **kwargs):", "body": "read_params = {}<EOL>for field_name in self.identity:<EOL><INDENT>if field_name in kwargs:<EOL><INDENT>read_params[field_name] = kwargs[field_name]<EOL><DEDENT><DEDENT>if '<STR_LIT:id>' in self.identity and len(self.identity) == <NUM_LIT:1>:<EOL><INDENT>return {}<EOL><DEDENT>if not read_params:<EOL><INDENT>raise exc.BadRequest('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>existing_data = self.get(include_debug_header=include_debug_header, **read_params)<EOL>if fail_on_found:<EOL><INDENT>raise exc.Found('<STR_LIT>' %<EOL>read_params)<EOL><DEDENT>return existing_data<EOL><DEDENT>except exc.NotFound:<EOL><INDENT>if fail_on_missing:<EOL><INDENT>raise exc.NotFound('<STR_LIT>' %<EOL>read_params)<EOL><DEDENT>return {}<EOL><DEDENT>", "docstring": "=====API DOCS=====\nAttempt to perform a lookup that is expected to return a single result, and return the record.\n\nThis method is a wrapper around `get` that strips out non-unique keys, and is used internally by\n`write` and `delete`.\n\n:param fail_on_missing: Flag that raise exception if no resource is found.\n:type fail_on_missing: bool\n:param fail_on_found: Flag that raise exception if a resource is found.\n:type fail_on_found: bool\n:param include_debug_header: Flag determining whether to print debug messages when querying\n                             Tower backend.\n:type include_debug_header: bool\n:param `**kwargs`: Keyword arguments list of available fields used for searching resource.\n:returns: A JSON object containing details of the resource returned by Tower backend.\n:rtype: dict\n\n:raises tower_cli.exceptions.BadRequest: When no field are provided in kwargs.\n:raises tower_cli.exceptions.Found: When a resource is found and fail_on_found flag is on.\n:raises tower_cli.exceptions.NotFound: When no resource is found and fail_on_missing flag\n                                       is on.\n=====API DOCS=====", "id": "f3329:c1:m1"}
{"signature": "@resources.command(ignore_defaults=True)<EOL><INDENT>def get(self, pk=None, **kwargs):<DEDENT>", "body": "if kwargs.pop('<STR_LIT>', True):<EOL><INDENT>debug.log('<STR_LIT>', header='<STR_LIT>')<EOL><DEDENT>response = self.read(pk=pk, fail_on_no_results=True, fail_on_multiple_results=True, **kwargs)<EOL>return response['<STR_LIT>'][<NUM_LIT:0>]<EOL>", "docstring": "Return one and exactly one object.\n\n        Lookups may be through a primary key, specified as a positional argument, and/or through filters specified\n        through keyword arguments.\n\n        If the number of results does not equal one, raise an exception.\n\n        =====API DOCS=====\n        Retrieve one and exactly one object.\n\n        :param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object\n                   if ``pk`` is provided (not ``None``).\n        :type pk: int\n        :param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.\n        :returns: loaded JSON of the retrieved resource object.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3329:c1:m7"}
{"signature": "def _get_patch_url(self, url, pk):", "body": "return url + '<STR_LIT>' % pk<EOL>", "docstring": "Overwrite this method to handle specific corner cases to the url passed to PATCH method.", "id": "f3329:c1:m4"}
{"signature": "@resources.command<EOL><INDENT>@click.option('<STR_LIT>', default=False, show_default=True, type=bool, is_flag=True,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', default=False, show_default=True, type=bool, is_flag=True,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def create(self, **kwargs):<DEDENT>", "body": "return self.write(create_on_missing=True, **kwargs)<EOL>", "docstring": "Create an object.\n\n        Fields in the resource's `identity` tuple are used for a lookup; if a match is found, then no-op\n        (unless `force_on_exists` is set) but do not fail (unless `fail_on_found` is set).\n\n        =====API DOCS=====\n        Create an object.\n\n        :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n                              already exists.\n        :type fail_on_found: bool\n        :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n                                be updated to the provided values.; If unset, a match causes the request to be\n                                a no-op.\n        :type force_on_exists: bool\n        :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the\n                           resource object.\n        :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields:\n                  \"changed\", a flag indicating if the resource is created successfully; \"id\", an integer which\n                  is the primary key of the created object.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3329:c2:m0"}
{"signature": "@property<EOL><INDENT>def help(self):<DEDENT>", "body": "if self.help_text:<EOL><INDENT>return self.help_text<EOL><DEDENT>return '<STR_LIT>' % self.name<EOL>", "docstring": "Return the help text that was passed to the constructor, or a\n        sensible default if none was provided.", "id": "f3330:c1:m3"}
{"signature": "def configure_model(self, attrs, field_name):", "body": "self.relationship = field_name<EOL>self._set_method_names(relationship=field_name)<EOL>if self.res_name is None:<EOL><INDENT>self.res_name = grammar.singularize(attrs.get('<STR_LIT>', '<STR_LIT>').strip('<STR_LIT:/>'))<EOL><DEDENT>", "docstring": "Hook for ResourceMeta class to call when initializing model class.\nSaves fields obtained from resource class backlinks", "id": "f3330:c2:m2"}
{"signature": "def ordered_dump(data, Dumper=yaml.Dumper, **kws):", "body": "class OrderedDumper(Dumper):<EOL><INDENT>pass<EOL><DEDENT>def _dict_representer(dumper, data):<EOL><INDENT>return dumper.represent_mapping(<EOL>yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,<EOL>data.items())<EOL><DEDENT>OrderedDumper.add_representer(OrderedDict,<EOL>_dict_representer)<EOL>return yaml.dump(data, None, OrderedDumper, **kws)<EOL>", "docstring": "Expand PyYAML's built-in dumper to support parsing OrderedDict. Return\n    a string as parse result of the original data structure, which includes\n    OrderedDict.\n\n    Args:\n        data: the data structure to be dumped(parsed) which is supposed to\n        contain OrderedDict.\n        Dumper: the yaml serializer to be expanded and used.\n        kws: extra key-value arguments to be passed to yaml.dump.", "id": "f3333:m3"}
{"signature": "def string_to_dict(var_string, allow_kv=True, require_dict=True):", "body": "<EOL>try:<EOL><INDENT>return_dict = yaml.load(var_string, Loader=yaml.SafeLoader)<EOL>if require_dict:<EOL><INDENT>assert type(return_dict) is dict<EOL><DEDENT><DEDENT>except (AttributeError, yaml.YAMLError, AssertionError):<EOL><INDENT>try:<EOL><INDENT>assert allow_kv<EOL>return_dict = parse_kv(var_string)<EOL><DEDENT>except Exception:<EOL><INDENT>raise exc.TowerCLIError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % var_string<EOL>)<EOL><DEDENT><DEDENT>return return_dict<EOL>", "docstring": "Returns a dictionary given a string with yaml or json syntax.\n    If data is not present in a key: value format, then it return\n    an empty dictionary.\n\n    Attempts processing string by 3 different methods in order:\n        1. as JSON      2. as YAML      3. as custom key=value syntax\n    Throws an error if all of these fail in the standard ways.", "id": "f3333:m1"}
{"signature": "def log(s, header='<STR_LIT>', file=sys.stderr, nl=<NUM_LIT:1>, **kwargs):", "body": "<EOL>if not settings.verbose:<EOL><INDENT>return<EOL><DEDENT>if header:<EOL><INDENT>word_arr = s.split('<STR_LIT:U+0020>')<EOL>multi = []<EOL>word_arr.insert(<NUM_LIT:0>, '<STR_LIT>' % header.upper())<EOL>i = <NUM_LIT:0><EOL>while i < len(word_arr):<EOL><INDENT>to_add = ['<STR_LIT>']<EOL>count = <NUM_LIT:3><EOL>while count <= <NUM_LIT>:<EOL><INDENT>count += len(word_arr[i]) + <NUM_LIT:1><EOL>if count <= <NUM_LIT>:<EOL><INDENT>to_add.append(word_arr[i])<EOL>i += <NUM_LIT:1><EOL>if i == len(word_arr):<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>if len(to_add) == <NUM_LIT:1>:<EOL><INDENT>to_add.append(word_arr[i])<EOL>i += <NUM_LIT:1><EOL><DEDENT>if i != len(word_arr):<EOL><INDENT>count -= len(word_arr[i]) + <NUM_LIT:1><EOL><DEDENT>to_add.append('<STR_LIT:*>' * (<NUM_LIT> - count))<EOL>multi.append('<STR_LIT:U+0020>'.join(to_add))<EOL><DEDENT>s = '<STR_LIT:\\n>'.join(multi)<EOL>lines = len(multi)<EOL><DEDENT>else:<EOL><INDENT>lines = <NUM_LIT:1><EOL><DEDENT>if isinstance(nl, int) and nl > lines:<EOL><INDENT>s += '<STR_LIT:\\n>' * (nl - lines)<EOL><DEDENT>return secho(s, file=file, **kwargs)<EOL>", "docstring": "Log the given output to stderr if and only if we are in\n    verbose mode.\n\n    If we are not in verbose mode, this is a no-op.", "id": "f3336:m0"}
{"signature": "def unified_job_template_options(method):", "body": "jt_dec = click.option(<EOL>'<STR_LIT>', type=types.Related('<STR_LIT>'),<EOL>help='<STR_LIT>')<EOL>prj_dec = click.option(<EOL>'<STR_LIT>', type=types.Related('<STR_LIT>'),<EOL>help='<STR_LIT>')<EOL>inv_src_dec = click.option(<EOL>'<STR_LIT>', type=types.Related('<STR_LIT>'),<EOL>help='<STR_LIT>')<EOL>def ujt_translation(_method):<EOL><INDENT>def _ujt_translation(*args, **kwargs):<EOL><INDENT>for fd in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if fd in kwargs and kwargs[fd] is not None:<EOL><INDENT>kwargs['<STR_LIT>'] = kwargs.pop(fd)<EOL><DEDENT><DEDENT>return _method(*args, **kwargs)<EOL><DEDENT>return functools.wraps(_method)(_ujt_translation)<EOL><DEDENT>return ujt_translation(<EOL>inv_src_dec(<EOL>prj_dec(<EOL>jt_dec(<EOL>method<EOL>)<EOL>)<EOL>)<EOL>)<EOL>", "docstring": "Adds the decorators for all types of unified job templates,\nand if the non-unified type is specified, converts it into the\nunified_job_template kwarg.", "id": "f3338:m0"}
{"signature": "def get_resource(name):", "body": "module = importlib.import_module('<STR_LIT>' % name)<EOL>return module.Resource()<EOL>", "docstring": "Return an instance of the requested Resource class.\n\n    Since all of the resource classes are named `Resource`, this provides\n    a slightly cleaner interface for using these classes via. importing rather\n    than through the CLI.", "id": "f3339:m0"}
{"signature": "@click.command()<EOL>@with_global_options<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', required=False, multiple=True)<EOL>@click.option('<STR_LIT>', is_flag=True)<EOL>def receive(organization=None, user=None, team=None, credential_type=None, credential=None,<EOL>notification_template=None, inventory_script=None, inventory=None, project=None, job_template=None,<EOL>workflow=None, all=None):", "body": "from tower_cli.cli.transfer.receive import Receiver<EOL>receiver = Receiver()<EOL>assets_to_export = {}<EOL>for asset_type in SEND_ORDER:<EOL><INDENT>assets_to_export[asset_type] = locals()[asset_type]<EOL><DEDENT>receiver.receive(all=all, asset_input=assets_to_export)<EOL>", "docstring": "Export assets from Tower.\n\n    'tower receive' exports one or more assets from a Tower instance\n\n    For all of the possible assets types the TEXT can either be the assets name\n    (or username for the case of a user) or the keyword all. Specifying all\n    will export all of the assets of that type.", "id": "f3340:m5"}
{"signature": "@click.command()<EOL>def logout():", "body": "if not supports_oauth():<EOL><INDENT>raise exc.TowerCLIError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>config.main(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>", "docstring": "Removes an OAuth2 personal auth token from config.", "id": "f3340:m4"}
{"signature": "def _echo_setting(key):", "body": "value = getattr(settings, key)<EOL>secho('<STR_LIT>' % key, fg='<STR_LIT>', bold=True, nl=False)<EOL>secho(<EOL>six.text_type(value),<EOL>bold=True,<EOL>fg='<STR_LIT>' if isinstance(value, six.text_type) else '<STR_LIT>',<EOL>)<EOL>", "docstring": "Echo a setting to the CLI.", "id": "f3340:m1"}
{"signature": "def _format_id(self, payload):", "body": "if '<STR_LIT:id>' in payload:<EOL><INDENT>return str(payload['<STR_LIT:id>'])<EOL><DEDENT>if '<STR_LIT>' in payload:<EOL><INDENT>return '<STR_LIT:U+0020>'.join([six.text_type(item['<STR_LIT:id>']) for item in payload['<STR_LIT>']])<EOL><DEDENT>raise MultipleRelatedError('<STR_LIT>')<EOL>", "docstring": "Echos only the id", "id": "f3341:c0:m6"}
{"signature": "def get_command(self, ctx, name):", "body": "<EOL>if not hasattr(self.resource, name):<EOL><INDENT>return None<EOL><DEDENT>method = getattr(self.resource, name)<EOL>attrs = getattr(method, '<STR_LIT>', {})<EOL>help_text = inspect.getdoc(method)<EOL>attrs['<STR_LIT>'] = self._auto_help_text(help_text or '<STR_LIT>')<EOL>ignore_defaults = attrs.pop('<STR_LIT>', False)<EOL>new_method = self._echo_method(method)<EOL>click_params = getattr(method, '<STR_LIT>', [])<EOL>new_method.__click_params__ = copy(click_params)<EOL>new_method = with_global_options(new_method)<EOL>fao = attrs.pop('<STR_LIT>', True)<EOL>if fao:<EOL><INDENT>for field in reversed(self.resource.fields):<EOL><INDENT>if not field.is_option:<EOL><INDENT>continue<EOL><DEDENT>if not isinstance(fao, bool) and field.name not in fao:<EOL><INDENT>continue<EOL><DEDENT>args = [field.option]<EOL>if field.key:<EOL><INDENT>args.insert(<NUM_LIT:0>, field.key)<EOL><DEDENT>short_fields = {<EOL>'<STR_LIT:name>': '<STR_LIT:n>',<EOL>'<STR_LIT:description>': '<STR_LIT:d>',<EOL>'<STR_LIT>': '<STR_LIT:i>',<EOL>'<STR_LIT>': '<STR_LIT:e>'<EOL>}<EOL>if field.name in short_fields:<EOL><INDENT>args.append('<STR_LIT:->'+short_fields[field.name])<EOL><DEDENT>option_help = field.help<EOL>if isinstance(field.type, StructuredInput):<EOL><INDENT>option_help += '<STR_LIT>'<EOL><DEDENT>if field.required:<EOL><INDENT>option_help = '<STR_LIT>' + option_help<EOL><DEDENT>elif field.read_only:<EOL><INDENT>option_help = '<STR_LIT>' + option_help<EOL><DEDENT>option_help = '<STR_LIT>' + option_help<EOL>click.option(<EOL>*args,<EOL>default=field.default if not ignore_defaults else None,<EOL>help=option_help,<EOL>type=field.type,<EOL>show_default=field.show_default,<EOL>multiple=field.multiple,<EOL>is_eager=False<EOL>)(new_method)<EOL><DEDENT><DEDENT>cmd = click.command(name=name, cls=ActionSubcommand, **attrs)(new_method)<EOL>code = six.get_function_code(method)<EOL>if '<STR_LIT>' in code.co_varnames:<EOL><INDENT>click.argument('<STR_LIT>', nargs=<NUM_LIT:1>, required=False, type=str, metavar='<STR_LIT>')(cmd)<EOL><DEDENT>return cmd<EOL>", "docstring": "Retrieve the appropriate method from the Resource,\n        decorate it as a click command, and return that method.", "id": "f3341:c0:m9"}
{"signature": "def list_commands(self, ctx):", "body": "return self.resource.commands<EOL>", "docstring": "Return a list of all methods decorated with the\n        @resources.command decorator.", "id": "f3341:c0:m1"}
{"signature": "def _auto_help_text(self, help_text):", "body": "<EOL>api_doc_delimiter = '<STR_LIT>'<EOL>begin_api_doc = help_text.find(api_doc_delimiter)<EOL>if begin_api_doc >= <NUM_LIT:0>:<EOL><INDENT>end_api_doc = help_text.rfind(api_doc_delimiter) + len(api_doc_delimiter)<EOL>help_text = help_text[:begin_api_doc] + help_text[end_api_doc:]<EOL><DEDENT>an_prefix = ('<STR_LIT:a>', '<STR_LIT:e>', '<STR_LIT:i>', '<STR_LIT:o>')<EOL>if not self.resource_name.lower().startswith(an_prefix):<EOL><INDENT>help_text = help_text.replace('<STR_LIT>',<EOL>'<STR_LIT>' % self.resource_name)<EOL><DEDENT>if self.resource_name.lower().endswith('<STR_LIT:y>'):<EOL><INDENT>help_text = help_text.replace(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % self.resource_name[:-<NUM_LIT:1>],<EOL>)<EOL><DEDENT>help_text = help_text.replace('<STR_LIT:object>', self.resource_name)<EOL>help_text = help_text.replace('<STR_LIT>', '<STR_LIT>')<EOL>help_text = help_text.replace('<STR_LIT>',<EOL>'<STR_LIT>')<EOL>for match in re.findall(r'<STR_LIT>', help_text):<EOL><INDENT>option = '<STR_LIT>' % match.replace('<STR_LIT:_>', '<STR_LIT:->')<EOL>help_text = help_text.replace('<STR_LIT>' % match, option)<EOL><DEDENT>return help_text<EOL>", "docstring": "Given a method with a docstring, convert the docstring\n        to more CLI appropriate wording, and also disambiguate the\n        word \"object\" on the base class docstrings.", "id": "f3341:c0:m2"}
{"signature": "def _format_json(self, payload):", "body": "return json.dumps(payload, indent=<NUM_LIT:2>)<EOL>", "docstring": "Convert the payload into a JSON string with proper\n        indentation and return it.", "id": "f3341:c0:m4"}
{"signature": "def list_commands(self, ctx):", "body": "commands = set(self.list_resource_commands())<EOL>commands.union(set(self.list_misc_commands()))<EOL>return sorted(commands)<EOL>", "docstring": "Return a list of commands present in the commands and resources\n        folders, but not subcommands.", "id": "f3342:c0:m4"}
{"signature": "def list_resource_commands(self):", "body": "resource_path = os.path.abspath(os.path.join(<EOL>os.path.dirname(__file__),<EOL>os.pardir,<EOL>'<STR_LIT>'<EOL>))<EOL>answer = set([])<EOL>for _, name, _ in pkgutil.iter_modules([resource_path]):<EOL><INDENT>res = tower_cli.get_resource(name)<EOL>if not getattr(res, '<STR_LIT>', False):<EOL><INDENT>answer.add(name)<EOL><DEDENT><DEDENT>return sorted(answer)<EOL>", "docstring": "Returns a list of multi-commands for each resource type.", "id": "f3342:c0:m5"}
{"signature": "def convert(self, value, param, ctx):", "body": "resource = tower_cli.get_resource(self.resource_name)<EOL>if value is None:<EOL><INDENT>return None<EOL><DEDENT>if isinstance(value, int):<EOL><INDENT>return value<EOL><DEDENT>if re.match(r'<STR_LIT>', value):<EOL><INDENT>return int(value)<EOL><DEDENT>if value == '<STR_LIT:null>':<EOL><INDENT>return value<EOL><DEDENT>try:<EOL><INDENT>debug.log('<STR_LIT>'<EOL>'<STR_LIT>' % param.name, header='<STR_LIT>')<EOL>lookup_data = {resource.identity[-<NUM_LIT:1>]: value}<EOL>rel = resource.get(**lookup_data)<EOL><DEDENT>except exc.MultipleResults:<EOL><INDENT>raise exc.MultipleRelatedError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(self.resource_name,<EOL>value),<EOL>)<EOL><DEDENT>except exc.TowerCLIError as ex:<EOL><INDENT>raise exc.RelatedError('<STR_LIT>' %<EOL>(self.resource_name, str(ex)))<EOL><DEDENT>return rel['<STR_LIT:id>']<EOL>", "docstring": "Return the appropriate integer value. If a non-integer is\n        provided, attempt a name-based lookup and return the primary key.", "id": "f3343:c4:m1"}
{"signature": "def parse_args(self, ctx, args):", "body": "if not args and self.no_args_is_help and not ctx.resilient_parsing:<EOL><INDENT>click.echo(ctx.get_help())<EOL>ctx.exit()<EOL><DEDENT>return super(ActionSubcommand, self).parse_args(ctx, args)<EOL>", "docstring": "Parse arguments sent to this command.\n\n        The code for this method is taken from MultiCommand:\n        https://github.com/mitsuhiko/click/blob/master/click/core.py\n\n        It is Copyright (c) 2014 by Armin Ronacher.\n        See the license:\n        https://github.com/mitsuhiko/click/blob/master/LICENSE", "id": "f3345:c0:m1"}
{"signature": "def format_options(self, ctx, formatter):", "body": "field_opts = []<EOL>global_opts = []<EOL>local_opts = []<EOL>other_opts = []<EOL>for param in self.params:<EOL><INDENT>if param.name in SETTINGS_PARMS:<EOL><INDENT>opts = global_opts<EOL><DEDENT>elif getattr(param, '<STR_LIT>', None) and param.help.startswith('<STR_LIT>'):<EOL><INDENT>opts = field_opts<EOL>param.help = param.help[len('<STR_LIT>'):]<EOL><DEDENT>else:<EOL><INDENT>opts = local_opts<EOL><DEDENT>rv = param.get_help_record(ctx)<EOL>if rv is None:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>opts.append(rv)<EOL><DEDENT><DEDENT>if self.add_help_option:<EOL><INDENT>help_options = self.get_help_option_names(ctx)<EOL>if help_options:<EOL><INDENT>other_opts.append([join_options(help_options)[<NUM_LIT:0>], '<STR_LIT>'])<EOL><DEDENT><DEDENT>if field_opts:<EOL><INDENT>with formatter.section('<STR_LIT>'):<EOL><INDENT>formatter.write_dl(field_opts)<EOL><DEDENT><DEDENT>if local_opts:<EOL><INDENT>with formatter.section('<STR_LIT>'):<EOL><INDENT>formatter.write_dl(local_opts)<EOL><DEDENT><DEDENT>if global_opts:<EOL><INDENT>with formatter.section('<STR_LIT>'):<EOL><INDENT>formatter.write_dl(global_opts)<EOL><DEDENT><DEDENT>if other_opts:<EOL><INDENT>with formatter.section('<STR_LIT>'):<EOL><INDENT>formatter.write_dl(other_opts)<EOL><DEDENT><DEDENT>", "docstring": "Monkey-patch click's format_options method to support option categorization.", "id": "f3345:c0:m2"}
{"signature": "@property<EOL><INDENT>def _parsers(self):<DEDENT>", "body": "return tuple([getattr(self, '<STR_LIT>' % i) for i in self._parser_names])<EOL>", "docstring": "Return a tuple of all parsers, in order.\n\n        This is referenced at runtime, to avoid gleefully ignoring the\n        `runtime_values` context manager.", "id": "f3351:c1:m3"}
{"signature": "def set_or_reset_runtime_param(self, key, value):", "body": "if self._runtime.has_option('<STR_LIT>', key):<EOL><INDENT>self._runtime = self._new_parser()<EOL><DEDENT>if value is None:<EOL><INDENT>return<EOL><DEDENT>settings._runtime.set('<STR_LIT>', key.replace('<STR_LIT>', '<STR_LIT>'),<EOL>six.text_type(value))<EOL>", "docstring": "Maintains the context of the runtime settings for invoking\n        a command.\n\n        This should be called by a click.option callback, and only\n        called once for each setting for each command invocation.\n\n        If the setting exists, it follows that the runtime settings are\n        stale, so the entire runtime settings are reset.", "id": "f3351:c1:m4"}
{"signature": "def _read(self, fp, fpname):", "body": "<EOL>if os.path.isfile(fpname):<EOL><INDENT>file_permission = os.stat(fpname)<EOL>if fpname != os.path.join(tower_dir, '<STR_LIT>') and (<EOL>(file_permission.st_mode & stat.S_IRGRP) or<EOL>(file_permission.st_mode & stat.S_IROTH)<EOL>):<EOL><INDENT>warnings.warn('<STR_LIT>'<EOL>.format(fpname), RuntimeWarning)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>return configparser.ConfigParser._read(self, fp, fpname)<EOL><DEDENT>except configparser.MissingSectionHeaderError:<EOL><INDENT>fp.seek(<NUM_LIT:0>)<EOL>string = '<STR_LIT>' % fp.read()<EOL>flo = StringIO(string)  <EOL>return configparser.ConfigParser._read(self, flo, fpname)<EOL><DEDENT>", "docstring": "Read the configuration from the given file.\n\n        If the file lacks any section header, add a [general] section\n        header that encompasses the whole thing.", "id": "f3351:c0:m0"}
{"signature": "def with_global_options(method):", "body": "<EOL>method = click.option(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = click.option(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = click.option(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = click.option(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = click.option(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>type=click.Choice(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:id>']),<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = click.option(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>default=None,<EOL>help='<STR_LIT>',<EOL>is_flag=True,<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = click.option(<EOL>'<STR_LIT>',<EOL>default=None,<EOL>help='<STR_LIT>',<EOL>is_flag=True,<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = click.option(<EOL>'<STR_LIT>',<EOL>default=None,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>is_flag=True,<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = click.option(<EOL>'<STR_LIT>',<EOL>default=None,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = click.option(<EOL>'<STR_LIT>',<EOL>default=None,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>is_flag=True,<EOL>required=False, callback=_apply_runtime_setting,<EOL>is_eager=True,<EOL>expose_value=False<EOL>)(method)<EOL>method = runtime_context_manager(method)<EOL>return method<EOL>", "docstring": "Apply the global options that we desire on every method within\n    tower-cli to the given click command.", "id": "f3351:m3"}
{"signature": "def __init__(self):", "body": "self._cache = {}<EOL>defaults = {}<EOL>for key in CONFIG_OPTIONS:<EOL><INDENT>defaults[key] = '<STR_LIT>'<EOL><DEDENT>defaults.update({<EOL>'<STR_LIT>': '<STR_LIT:true>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:host>': '<STR_LIT:127.0.0.1>',<EOL>'<STR_LIT>': '<STR_LIT:true>',<EOL>'<STR_LIT>': '<STR_LIT:false>',<EOL>'<STR_LIT>': '<STR_LIT:false>',<EOL>'<STR_LIT>': '<STR_LIT:false>',<EOL>})<EOL>self._defaults = self._new_parser(defaults=defaults)<EOL>self._environment = self._new_parser(defaults=config_from_environment())<EOL>self._global = self._new_parser()<EOL>if os.path.isdir(tower_dir):<EOL><INDENT>try:<EOL><INDENT>os.listdir(tower_dir)<EOL><DEDENT>except OSError:<EOL><INDENT>warnings.warn('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>RuntimeWarning)<EOL><DEDENT>self._global.read(os.path.join(tower_dir, '<STR_LIT>'))<EOL><DEDENT>self._user = self._new_parser()<EOL>user_filename = os.path.join(user_dir, CONFIG_FILENAME)<EOL>self._user.read(user_filename)<EOL>self._local = self._new_parser()<EOL>local_dir = os.getcwd()<EOL>local_dirs = [local_dir] if local_dir not in (user_dir, tower_dir) else []<EOL>while os.path.split(local_dir)[<NUM_LIT:1>]:<EOL><INDENT>local_dir, _ = os.path.split(local_dir)<EOL>if local_dir not in (user_dir, tower_dir):<EOL><INDENT>local_dirs = [local_dir] + local_dirs<EOL><DEDENT><DEDENT>for local_dir in local_dirs:<EOL><INDENT>local_filename = os.path.join(local_dir, CONFIG_FILENAME)<EOL>self._local.read(local_filename)<EOL><DEDENT>self._runtime = self._new_parser()<EOL>", "docstring": "Create the settings object, and read from appropriate files as\n        well as from `sys.argv`.", "id": "f3351:c1:m1"}
{"signature": "@resources.command(ignore_defaults=True, no_args_is_help=False)<EOL><INDENT>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', help='<STR_LIT>')<EOL>def list(self, root=False, **kwargs):<DEDENT>", "body": "<EOL>if kwargs.get('<STR_LIT>', None):<EOL><INDENT>self.set_child_endpoint(parent=kwargs['<STR_LIT>'], inventory=kwargs.get('<STR_LIT>', None))<EOL>kwargs.pop('<STR_LIT>')<EOL><DEDENT>if root and not kwargs.get('<STR_LIT>', None):<EOL><INDENT>raise exc.UsageError('<STR_LIT>')<EOL><DEDENT>if root:<EOL><INDENT>inventory_id = kwargs['<STR_LIT>']<EOL>r = client.get('<STR_LIT>' % inventory_id)<EOL>return r.json()<EOL><DEDENT>return super(Resource, self).list(**kwargs)<EOL>", "docstring": "Return a list of groups.\n\n        =====API DOCS=====\n        Retrieve a list of groups.\n\n        :param root: Flag that if set, only root groups of a specific inventory will be listed.\n        :type root: bool\n        :param parent: Primary key or name of the group whose child groups will be listed.\n        :type parent: str\n        :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n        :type all_pages: bool\n        :param page: The page to show. Ignored if all_pages is set.\n        :type page: int\n        :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n        :type query: list\n        :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n        :returns: A JSON object containing details of all resource objects returned by Tower backend.\n        :rtype: dict\n        :raises tower_cli.exceptions.UsageError: When ``root`` flag is on and ``inventory`` is not present in\n                                                 ``**kwargs``.\n\n        =====API DOCS=====", "id": "f3355:c0:m3"}
{"signature": "def __getattribute__(self, name):", "body": "if name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise AttributeError<EOL><DEDENT>else:<EOL><INDENT>return object.__getattribute__(self, name)<EOL><DEDENT>", "docstring": "Disable inherited methods that cannot be applied to this\n        particular resource.", "id": "f3356:c0:m0"}
{"signature": "def configure_display(self, data, kwargs=None, write=False):", "body": "if settings.format != '<STR_LIT>':<EOL><INDENT>return  <EOL><DEDENT>if write:<EOL><INDENT>obj, obj_type, res, res_type = self.obj_res(kwargs)<EOL>data['<STR_LIT:type>'] = kwargs['<STR_LIT:type>']<EOL>data[obj_type] = obj<EOL>data[res_type] = res<EOL>self.set_display_columns(<EOL>set_false=['<STR_LIT>' if obj_type == '<STR_LIT:user>' else '<STR_LIT:user>'],<EOL>set_true=['<STR_LIT>' if res_type == '<STR_LIT>' else res_type])<EOL><DEDENT>else:<EOL><INDENT>self.set_display_columns(<EOL>set_false=['<STR_LIT:user>', '<STR_LIT>'],<EOL>set_true=['<STR_LIT>', '<STR_LIT>'])<EOL>if '<STR_LIT>' in data:<EOL><INDENT>for i in range(len(data['<STR_LIT>'])):<EOL><INDENT>self.populate_resource_columns(data['<STR_LIT>'][i])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.populate_resource_columns(data)<EOL><DEDENT><DEDENT>", "docstring": "Populates columns and sets display attribute as needed.\n        Operates on data.", "id": "f3356:c0:m5"}
{"signature": "@resources.command(<EOL>use_fields_as_options=ACTOR_FIELDS+RESOURCE_FIELDS+['<STR_LIT:type>'])<EOL><INDENT>def list(self, **kwargs):<DEDENT>", "body": "data, self.endpoint = self.data_endpoint(kwargs)<EOL>r = super(Resource, self).list(**data)<EOL>self.configure_display(r)<EOL>return r<EOL>", "docstring": "Return a list of roles.\n\n        =====API DOCS=====\n        Retrieve a list of objects.\n\n        :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n        :type all_pages: bool\n        :param page: The page to show. Ignored if all_pages is set.\n        :type page: int\n        :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n        :type query: list\n        :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n        :returns: A JSON object containing details of all resource objects returned by Tower backend.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3356:c0:m7"}
{"signature": "@classmethod<EOL><INDENT>def data_endpoint(cls, in_data, ignore=[]):<DEDENT>", "body": "obj, obj_type, res, res_type = cls.obj_res(in_data, fail_on=[])<EOL>data = {}<EOL>if '<STR_LIT>' in ignore:<EOL><INDENT>obj = None<EOL><DEDENT>if '<STR_LIT>' in ignore:<EOL><INDENT>res = None<EOL><DEDENT>if obj and obj_type == '<STR_LIT:user>':<EOL><INDENT>data['<STR_LIT>'] = obj<EOL><DEDENT>if obj and obj_type == '<STR_LIT>':<EOL><INDENT>endpoint = '<STR_LIT>' % (grammar.pluralize(obj_type), obj)<EOL>if res is not None:<EOL><INDENT>data['<STR_LIT>'] = res<EOL><DEDENT><DEDENT>elif res:<EOL><INDENT>endpoint = '<STR_LIT>' % (grammar.pluralize(res_type), res)<EOL><DEDENT>else:<EOL><INDENT>endpoint = '<STR_LIT>'<EOL><DEDENT>if in_data.get('<STR_LIT:type>', False):<EOL><INDENT>data['<STR_LIT>'] = '<STR_LIT>' % in_data['<STR_LIT:type>'].lower()<EOL><DEDENT>for key, value in in_data.items():<EOL><INDENT>if key not in RESOURCE_FIELDS and key not in ['<STR_LIT:type>', '<STR_LIT:user>', '<STR_LIT>']:<EOL><INDENT>data[key] = value<EOL><DEDENT><DEDENT>return data, endpoint<EOL>", "docstring": "Converts a set of CLI input arguments, `in_data`, into\nrequest data and an endpoint that can be used to look\nup a role or list of roles.\n\nAlso changes the format of `type` in data to what the server\nexpects for the role model, as it exists in the database.", "id": "f3356:c0:m2"}
{"signature": "def role_write(self, fail_on_found=False, disassociate=False, **kwargs):", "body": "<EOL>data, self.endpoint = self.data_endpoint(kwargs, ignore=['<STR_LIT>'])<EOL>debug.log('<STR_LIT>', header='<STR_LIT>')<EOL>response = self.read(pk=None, fail_on_no_results=True,<EOL>fail_on_multiple_results=True, **data)<EOL>role_data = response['<STR_LIT>'][<NUM_LIT:0>]<EOL>role_id = role_data['<STR_LIT:id>']<EOL>self.configure_display(role_data, kwargs, write=True)<EOL>obj, obj_type, res, res_type = self.obj_res(kwargs)<EOL>debug.log('<STR_LIT>' % obj_type,<EOL>header='<STR_LIT>')<EOL>data, self.endpoint = self.data_endpoint(kwargs)<EOL>data['<STR_LIT>'] = res_type.replace('<STR_LIT:_>', '<STR_LIT>')<EOL>response = self.read(pk=None, fail_on_no_results=False,<EOL>fail_on_multiple_results=False, **data)<EOL>msg = '<STR_LIT>'<EOL>if response['<STR_LIT:count>'] > <NUM_LIT:0> and not disassociate:<EOL><INDENT>msg = '<STR_LIT>' % obj_type<EOL><DEDENT>elif response['<STR_LIT:count>'] == <NUM_LIT:0> and disassociate:<EOL><INDENT>msg = '<STR_LIT>' % obj_type<EOL><DEDENT>if msg:<EOL><INDENT>role_data['<STR_LIT>'] = False<EOL>if fail_on_found:<EOL><INDENT>raise exc.NotFound(msg)<EOL><DEDENT>else:<EOL><INDENT>debug.log(msg, header='<STR_LIT>')<EOL>return role_data<EOL><DEDENT><DEDENT>debug.log('<STR_LIT>' % (<EOL>'<STR_LIT>' if disassociate else '<STR_LIT>', obj_type), header='<STR_LIT>')<EOL>post_data = {'<STR_LIT:id>': role_id}<EOL>if disassociate:<EOL><INDENT>post_data['<STR_LIT>'] = True<EOL><DEDENT>client.post('<STR_LIT>' % (grammar.pluralize(obj_type), obj),<EOL>data=post_data)<EOL>role_data['<STR_LIT>'] = True<EOL>return role_data<EOL>", "docstring": "Re-implementation of the parent `write` method specific to roles.\n        Adds a grantee (user or team) to the resource's role.", "id": "f3356:c0:m6"}
{"signature": "@resources.command(<EOL>use_fields_as_options=ACTOR_FIELDS+RESOURCE_FIELDS+['<STR_LIT:type>'])<EOL><INDENT>@click.option('<STR_LIT>', default=False,<EOL>show_default=True, type=bool, is_flag=True,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def revoke(self, fail_on_found=False, **kwargs):<DEDENT>", "body": "return self.role_write(fail_on_found=fail_on_found,<EOL>disassociate=True, **kwargs)<EOL>", "docstring": "Remove a user or a team from a role. Required information:\n        1) Type of the role\n        2) Resource of the role, inventory, credential, or any other\n        3) A user or a team to add to the role\n\n        =====API DOCS=====\n        Remove a user or a team from a role. Required information:\n        * Type of the role.\n        * Resource of the role, inventory, credential, or any other.\n        * A user or a team to add to the role.\n\n        :param fail_on_found: Flag that if set, the operation fails if a user/team dose not have the role.\n        :type fail_on_found: bool\n        :param `**kwargs`: The user to be disassociated and the role to disassociate.\n        :returns: parsed JSON of role revoke.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3356:c0:m10"}
{"signature": "def _configuration(self, kwargs, config_item):", "body": "if '<STR_LIT>' not in config_item:<EOL><INDENT>if '<STR_LIT>' not in kwargs:<EOL><INDENT>return<EOL><DEDENT>nc = kwargs['<STR_LIT>'] = {}<EOL>for field in Resource.configuration[kwargs['<STR_LIT>']]:<EOL><INDENT>if field not in config_item:<EOL><INDENT>raise exc.TowerCLIError('<STR_LIT>'<EOL>'<STR_LIT>' % field)<EOL><DEDENT>else:<EOL><INDENT>nc[field] = config_item[field]<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>kwargs['<STR_LIT>'] =config_item['<STR_LIT>']<EOL><DEDENT>", "docstring": "Combine configuration-related keyworded arguments into\n        notification_configuration.", "id": "f3357:c0:m1"}
{"signature": "@resources.command<EOL><INDENT>def list(self, all_pages=False, **kwargs):<DEDENT>", "body": "self._separate(kwargs)<EOL>return super(Resource, self).list(all_pages=all_pages, **kwargs)<EOL>", "docstring": "Return a list of notification templates.\n\n        Note here configuration-related fields like\n        'notification_configuration' and 'channels' will not be\n        used even provided.\n\n        If one or more filters are provided through keyword arguments,\n        filter the results accordingly.\n\n        If no filters are provided, return all results.\n\n        =====API DOCS=====\n        Retrieve a list of objects.\n\n        :param all_pages: Flag that if set, collect all pages of content from the API when returning results.\n        :type all_pages: bool\n        :param page: The page to show. Ignored if all_pages is set.\n        :type page: int\n        :param query: Contains 2-tuples used as query parameters to filter resulting resource objects.\n        :type query: list\n        :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n        :returns: A JSON object containing details of all resource objects returned by Tower backend.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3357:c0:m5"}
{"signature": "@resources.command<EOL><INDENT>@click.option('<STR_LIT>', type=types.Related('<STR_LIT>'),<EOL>required=False, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', type=click.Choice(['<STR_LIT:error>', '<STR_LIT:success>']),<EOL>required=False, help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def create(self, fail_on_found=False, force_on_exists=False, **kwargs):<DEDENT>", "body": "config_item = self._separate(kwargs)<EOL>jt_id = kwargs.pop('<STR_LIT>', None)<EOL>status = kwargs.pop('<STR_LIT:status>', '<STR_LIT>')<EOL>old_endpoint = self.endpoint<EOL>if jt_id is not None:<EOL><INDENT>jt = get_resource('<STR_LIT>')<EOL>jt.get(pk=jt_id)<EOL>try:<EOL><INDENT>nt_id = self.get(**copy.deepcopy(kwargs))['<STR_LIT:id>']<EOL><DEDENT>except exc.NotFound:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if fail_on_found:<EOL><INDENT>raise exc.TowerCLIError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>debug.log('<STR_LIT>'<EOL>'<STR_LIT>',<EOL>header='<STR_LIT>')<EOL>return jt.associate_notification_template(<EOL>jt_id, nt_id, status=status)<EOL><DEDENT><DEDENT>self.endpoint = '<STR_LIT>' %(jt_id, status)<EOL><DEDENT>self._configuration(kwargs, config_item)<EOL>result = super(Resource, self).create(**kwargs)<EOL>self.endpoint = old_endpoint<EOL>return result<EOL>", "docstring": "Create a notification template.\n\n        All required configuration-related fields (required according to\n        notification_type) must be provided.\n\n        There are two types of notification template creation: isolatedly\n        creating a new notification template and creating a new notification\n        template under a job template. Here the two types are discriminated by\n        whether to provide --job-template option. --status option controls\n        more specific, job-run-status-related association.\n\n        Fields in the resource's `identity` tuple are used for a lookup;\n        if a match is found, then no-op (unless `force_on_exists` is set) but\n        do not fail (unless `fail_on_found` is set).\n\n        =====API DOCS=====\n        Create an object.\n\n        :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n                              already exists.\n        :type fail_on_found: bool\n        :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n                                be updated to the provided values.; If unset, a match causes the request to be\n                                a no-op.\n        :type force_on_exists: bool\n        :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the\n                           resource object.\n        :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields:\n                  \"changed\", a flag indicating if the resource is created successfully; \"id\", an integer which\n                  is the primary key of the created object.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3357:c0:m2"}
{"signature": "def _separate(self, kwargs):", "body": "self._pop_none(kwargs)<EOL>result = {}<EOL>for field in Resource.config_fields:<EOL><INDENT>if field in kwargs:<EOL><INDENT>result[field] = kwargs.pop(field)<EOL>if field in Resource.json_fields:<EOL><INDENT>if not isinstance(result[field], six.string_types):<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>data = json.loads(result[field])<EOL>result[field] = data<EOL><DEDENT>except ValueError:<EOL><INDENT>raise exc.TowerCLIError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return result<EOL>", "docstring": "Remove None-valued and configuration-related keyworded arguments", "id": "f3357:c0:m0"}
{"signature": "@resources.command<EOL><INDENT>def get(self, pk=None, **kwargs):<DEDENT>", "body": "self._separate(kwargs)<EOL>return super(Resource, self).get(pk=pk, **kwargs)<EOL>", "docstring": "Return one and exactly one notification template.\n\n        Note here configuration-related fields like\n        'notification_configuration' and 'channels' will not be\n        used even provided.\n\n        Lookups may be through a primary key, specified as a positional\n        argument, and/or through filters specified through keyword arguments.\n\n        If the number of results does not equal one, raise an exception.\n\n        =====API DOCS=====\n        Retrieve one and exactly one object.\n\n        :param pk: Primary key of the resource to be read. Tower CLI will only attempt to read *that* object\n                   if ``pk`` is provided (not ``None``).\n        :type pk: int\n        :param `**kwargs`: Keyword arguments used to look up resource object to retrieve if ``pk`` is not provided.\n        :returns: loaded JSON of the retrieved resource object.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3357:c0:m6"}
{"signature": "@resources.command<EOL><INDENT>def delete(self, pk=None, fail_on_missing=False, **kwargs):<DEDENT>", "body": "self._separate(kwargs)<EOL>return super(Resource, self).delete(pk=pk, fail_on_missing=fail_on_missing, **kwargs)<EOL>", "docstring": "Remove the given notification template.\n\n        Note here configuration-related fields like\n        'notification_configuration' and 'channels' will not be\n        used even provided.\n\n        If `fail_on_missing` is True, then the object's not being found is\n        considered a failure; otherwise, a success with no change is reported.\n\n        =====API DOCS=====\n        Remove the given object.\n\n        :param pk: Primary key of the resource to be deleted.\n        :type pk: int\n        :param fail_on_missing: Flag that if set, the object's not being found is considered a failure; otherwise,\n                                a success with no change is reported.\n        :type fail_on_missing: bool\n        :param `**kwargs`: Keyword arguments used to look up resource object to delete if ``pk`` is not provided.\n        :returns: dictionary of only one field \"changed\", which is a flag indicating whether the specified resource\n                  is successfully deleted.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3357:c0:m4"}
{"signature": "@click.argument('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL><INDENT>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', required=False, type=int,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>@resources.command(use_fields_as_options=False, no_args_is_help=True)<EOL>def update(self, inventory_source, monitor=False, wait=False,<EOL>timeout=None, **kwargs):<DEDENT>", "body": "<EOL>debug.log('<STR_LIT>', header='<STR_LIT>')<EOL>r = client.get('<STR_LIT>' % (self.endpoint, inventory_source))<EOL>if not r.json()['<STR_LIT>']:<EOL><INDENT>raise exc.BadRequest('<STR_LIT>')<EOL><DEDENT>debug.log('<STR_LIT>', header='<STR_LIT>')<EOL>r = client.post('<STR_LIT>' % (self.endpoint, inventory_source), data={})<EOL>inventory_update_id = r.json()['<STR_LIT>']<EOL>if monitor or wait:<EOL><INDENT>if monitor:<EOL><INDENT>result = self.monitor(inventory_update_id, parent_pk=inventory_source, timeout=timeout)<EOL><DEDENT>elif wait:<EOL><INDENT>result = self.wait(inventory_update_id, parent_pk=inventory_source, timeout=timeout)<EOL><DEDENT>inventory = client.get('<STR_LIT>' % result['<STR_LIT>']).json()['<STR_LIT>']<EOL>result['<STR_LIT>'] = int(inventory)<EOL>return result<EOL><DEDENT>return {<EOL>'<STR_LIT:id>': inventory_update_id,<EOL>'<STR_LIT:status>': '<STR_LIT>'<EOL>}<EOL>", "docstring": "Update the given inventory source.\n\n        =====API DOCS=====\n        Update the given inventory source.\n\n        :param inventory_source: Primary key or name of the inventory source to be updated.\n        :type inventory_source: str\n        :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched inventory update\n                        rather than exiting with a success.\n        :type monitor: bool\n        :param wait: Flag that if set, monitor the status of the inventory update, but do not print while it is\n                     in progress.\n        :type wait: bool\n        :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n                        of seconds.\n        :type timeout: int\n        :param `**kwargs`: Fields used to override underlyingl inventory source fields when creating and launching\n                           an inventory update.\n        :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait``\n                  call if ``wait`` flag is on; dictionary of \"status\" if none of the two flags are on.\n        :rtype: dict\n        :raises tower_cli.exceptions.BadRequest: When the inventory source cannot be updated.\n\n        =====API DOCS=====", "id": "f3358:c0:m0"}
{"signature": "@resources.command<EOL><INDENT>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>def status(self, pk, detail=False, **kwargs):<DEDENT>", "body": "<EOL>job = self.last_job_data(pk, **kwargs)<EOL>if detail:<EOL><INDENT>return job<EOL><DEDENT>return {<EOL>'<STR_LIT>': job['<STR_LIT>'],<EOL>'<STR_LIT>': job['<STR_LIT>'],<EOL>'<STR_LIT:status>': job['<STR_LIT:status>'],<EOL>}<EOL>", "docstring": "Print the status of the most recent sync.\n\n        =====API DOCS=====\n        Retrieve the current inventory update status.\n\n        :param pk: Primary key of the resource to retrieve status from.\n        :type pk: int\n        :param detail: Flag that if set, return the full JSON of the job resource rather than a status summary.\n        :type detail: bool\n        :param `**kwargs`: Keyword arguments used to look up resource object to retrieve status from if ``pk``\n                           is not provided.\n        :returns: full loaded JSON of the specified unified job if ``detail`` flag is on; trimed JSON containing\n                  only \"elapsed\", \"failed\" and \"status\" fields of the unified job if ``detail`` flag is off.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3358:c0:m1"}
{"signature": "@resources.command<EOL><INDENT>@click.option('<STR_LIT>', type=types.Related('<STR_LIT>'),<EOL>required=False, help='<STR_LIT>')<EOL>def create(self, fail_on_found=False, force_on_exists=False, **kwargs):<DEDENT>", "body": "jt_id = kwargs.pop('<STR_LIT>', None)<EOL>old_endpoint = self.endpoint<EOL>if jt_id is not None:<EOL><INDENT>jt = get_resource('<STR_LIT>')<EOL>jt.get(pk=jt_id)<EOL>try:<EOL><INDENT>label_id = self.get(name=kwargs.get('<STR_LIT:name>', None), organization=kwargs.get('<STR_LIT>', None))['<STR_LIT:id>']<EOL><DEDENT>except exc.NotFound:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if fail_on_found:<EOL><INDENT>raise exc.TowerCLIError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>debug.log('<STR_LIT>', header='<STR_LIT>')<EOL>return jt.associate_label(job_template=jt_id, label=label_id)<EOL><DEDENT><DEDENT>self.endpoint = '<STR_LIT>' % jt_id<EOL><DEDENT>result = super(Resource, self).create(fail_on_found=fail_on_found, force_on_exists=force_on_exists, **kwargs)<EOL>self.endpoint = old_endpoint<EOL>return result<EOL>", "docstring": "Create a new label.\n\n        There are two types of label creation: isolatedly creating a new label and creating a new label under\n        a job template. Here the two types are discriminated by whether to provide --job-template option.\n\n        Fields in the resource's `identity` tuple are used for a lookup; if a match is found, then no-op (unless\n        `force_on_exists` is set) but do not fail (unless `fail_on_found` is set).\n\n        =====API DOCS=====\n        Create a label.\n\n        :param job_template: Primary key or name of the job template for the created label to associate to.\n        :type job_template: str\n        :param fail_on_found: Flag that if set, the operation fails if an object matching the unique criteria\n                              already exists.\n        :type fail_on_found: bool\n        :param force_on_exists: Flag that if set, then if a match is found on unique fields, other fields will\n                                be updated to the provided values.; If unset, a match causes the request to be\n                                a no-op.\n        :type force_on_exists: bool\n        :param `**kwargs`: Keyword arguments which, all together, will be used as POST body to create the\n                           resource object.\n        :returns: A dictionary combining the JSON output of the created resource, as well as two extra fields:\n                  \"changed\", a flag indicating if the resource is created successfully; \"id\", an integer which\n                  is the primary key of the created object.\n        :rtype: dict\n        :raises tower_cli.exceptions.TowerCLIError: When the label already exists and ``fail_on_found`` flag is on.\n\n        =====API DOCS=====", "id": "f3359:c0:m1"}
{"signature": "def _get_schema(self, wfjt_id):", "body": "node_res = get_resource('<STR_LIT>')<EOL>node_results = node_res.list(workflow_job_template=wfjt_id,<EOL>all_pages=True)['<STR_LIT>']<EOL>return self._workflow_node_structure(node_results)<EOL>", "docstring": "Returns a dictionary that represents the node network of the\nworkflow job template", "id": "f3361:c1:m1"}
{"signature": "@resources.command(use_fields_as_options=False)<EOL><INDENT>@click.argument('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL>@click.argument('<STR_LIT>', type=types.Variables(), required=False)<EOL>def schema(self, wfjt, node_network=None):<DEDENT>", "body": "existing_network = self._get_schema(wfjt)<EOL>if not isinstance(existing_network, list):<EOL><INDENT>existing_network = []<EOL><DEDENT>if node_network is None:<EOL><INDENT>if settings.format == '<STR_LIT>':<EOL><INDENT>settings.format = '<STR_LIT>'<EOL><DEDENT>return existing_network<EOL><DEDENT>if hasattr(node_network, '<STR_LIT>'):<EOL><INDENT>node_network = node_network.read()<EOL><DEDENT>node_network = string_to_dict(<EOL>node_network, allow_kv=False, require_dict=False)<EOL>if not isinstance(node_network, list):<EOL><INDENT>node_network = []<EOL><DEDENT>_update_workflow([TreeNode(x, wfjt, include_id=True) for x in existing_network],<EOL>[TreeNode(x, wfjt) for x in node_network])<EOL>if settings.format == '<STR_LIT>':<EOL><INDENT>settings.format = '<STR_LIT>'<EOL><DEDENT>return self._get_schema(wfjt)<EOL>", "docstring": "Convert YAML/JSON content into workflow node objects if\nnode_network param is given.\nIf not, print a YAML representation of the node network.\n\n=====API DOCS=====\nConvert YAML/JSON content into workflow node objects if ``node_network`` param is given. If not,\nprint a YAML representation of the node network.\n\n:param wfjt: Primary key or name of the workflow job template to run schema against.\n:type wfjt: str\n:param node_network: JSON- or YAML-formatted string representing the topology of the workflow job\n                     template be updated to.\n:type node_network: str\n:returns: The latest topology (possibly after modification) of the workflow job template.\n:rtype: dict\n\n=====API DOCS=====", "id": "f3361:c1:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _workflow_node_structure(node_results):<DEDENT>", "body": "<EOL>node_list_pos = {}<EOL>for i, node_result in enumerate(node_results):<EOL><INDENT>for rel in ['<STR_LIT:success>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>node_result['<STR_LIT>'.format(rel)] = []<EOL><DEDENT>node_list_pos[node_result['<STR_LIT:id>']] = i<EOL><DEDENT>for node_result in node_results:<EOL><INDENT>for rel in ['<STR_LIT:success>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>for sub_node_id in node_result['<STR_LIT>'.format(rel)]:<EOL><INDENT>j = node_list_pos[sub_node_id]<EOL>node_results[j]['<STR_LIT>'.format(rel)].append(<EOL>node_result['<STR_LIT:id>'])<EOL><DEDENT><DEDENT><DEDENT>root_nodes = []<EOL>for node_result in node_results:<EOL><INDENT>is_root = True<EOL>for rel in ['<STR_LIT:success>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if node_result['<STR_LIT>'.format(rel)] != []:<EOL><INDENT>is_root = False<EOL>break<EOL><DEDENT><DEDENT>if is_root:<EOL><INDENT>root_nodes.append(node_result['<STR_LIT:id>'])<EOL><DEDENT><DEDENT>def branch_schema(node_id):<EOL><INDENT>i = node_list_pos[node_id]<EOL>node_dict = node_results[i]<EOL>ret_dict = {\"<STR_LIT:id>\": node_id}<EOL>for fd in NODE_STANDARD_FIELDS:<EOL><INDENT>val = node_dict.get(fd, None)<EOL>if val is not None:<EOL><INDENT>if fd == '<STR_LIT>':<EOL><INDENT>job_type = node_dict['<STR_LIT>'][<EOL>'<STR_LIT>']['<STR_LIT>']<EOL>ujt_key = JOB_TYPES[job_type]<EOL>ret_dict[ujt_key] = val<EOL><DEDENT>else:<EOL><INDENT>ret_dict[fd] = val<EOL><DEDENT><DEDENT><DEDENT>for rel in ['<STR_LIT:success>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>sub_node_id_list = node_dict['<STR_LIT>'.format(rel)]<EOL>if len(sub_node_id_list) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>relationship_name = '<STR_LIT>'.format(rel)<EOL>ret_dict[relationship_name] = []<EOL>for sub_node_id in sub_node_id_list:<EOL><INDENT>ret_dict[relationship_name].append(<EOL>branch_schema(sub_node_id))<EOL><DEDENT><DEDENT>return ret_dict<EOL><DEDENT>schema_dict = []<EOL>for root_node_id in root_nodes:<EOL><INDENT>schema_dict.append(branch_schema(root_node_id))<EOL><DEDENT>return schema_dict<EOL>", "docstring": "Takes the list results from the API in `node_results` and\ntranslates this data into a dictionary organized in a\nhuman-readable heirarchial structure", "id": "f3361:c1:m0"}
{"signature": "@resources.command(use_fields_as_options=False)<EOL><INDENT>@click.option('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL>@click.option('<STR_LIT>',<EOL>type=types.Related('<STR_LIT>'))<EOL>@click.option('<STR_LIT>', type=click.Choice(['<STR_LIT>', '<STR_LIT:error>', '<STR_LIT:success>']),<EOL>required=False, default='<STR_LIT>', help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def associate_notification_template(self, workflow,<EOL>notification_template, status):<DEDENT>", "body": "return self._assoc('<STR_LIT>' % status,<EOL>workflow, notification_template)<EOL>", "docstring": "Associate a notification template from this workflow.\n\n        =====API DOCS=====\n        Associate a notification template from this workflow job template.\n\n        :param workflow: The workflow job template to associate to.\n        :type workflow: str\n        :param notification_template: The notification template to be associated.\n        :type notification_template: str\n        :param status: type of notification this notification template should be associated to.\n        :type status: str\n        :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3361:c1:m3"}
{"signature": "@resources.command(use_fields_as_options=False)<EOL><INDENT>@click.option('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL>@click.option('<STR_LIT>',<EOL>type=types.Related('<STR_LIT>'))<EOL>@click.option('<STR_LIT>', type=click.Choice(['<STR_LIT>', '<STR_LIT:error>', '<STR_LIT:success>']),<EOL>required=False, default='<STR_LIT>', help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def disassociate_notification_template(self, workflow,<EOL>notification_template, status):<DEDENT>", "body": "return self._disassoc('<STR_LIT>' % status,<EOL>workflow, notification_template)<EOL>", "docstring": "Disassociate a notification template from this workflow.\n\n        =====API DOCS=====\n        Disassociate a notification template from this workflow job template.\n\n        :param job_template: The workflow job template to disassociate from.\n        :type job_template: str\n        :param notification_template: The notification template to be disassociated.\n        :type notification_template: str\n        :param status: type of notification this notification template should be disassociated from.\n        :type status: str\n        :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3361:c1:m4"}
{"signature": "def read(self, *args, **kwargs):", "body": "if '<STR_LIT>' in kwargs:<EOL><INDENT>kwargs['<STR_LIT>'] = kwargs.pop('<STR_LIT>')<EOL><DEDENT>r = super(Resource, self).read(*args, **kwargs)<EOL>if '<STR_LIT>' in r:<EOL><INDENT>for d in r['<STR_LIT>']:<EOL><INDENT>self._promote_actor(d)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._promote_actor(d)<EOL><DEDENT>return r<EOL>", "docstring": "Do extra processing so we can display the actor field as\na top-level field", "id": "f3363:c0:m3"}
{"signature": "@resources.command(<EOL>use_fields_as_options=(<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>)<EOL>)<EOL><INDENT>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>@click.option('<STR_LIT>', required=False, type=int,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def launch(self, monitor=False, wait=False, timeout=None, **kwargs):<DEDENT>", "body": "<EOL>r = client.get('<STR_LIT:/>')<EOL>if '<STR_LIT>' not in r.json():<EOL><INDENT>raise exc.TowerCLIError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>self._pop_none(kwargs)<EOL>debug.log('<STR_LIT>', header='<STR_LIT>')<EOL>result = client.post(self.endpoint, data=kwargs)<EOL>command = result.json()<EOL>command_id = command['<STR_LIT:id>']<EOL>if monitor:<EOL><INDENT>return self.monitor(command_id, timeout=timeout)<EOL><DEDENT>elif wait:<EOL><INDENT>return self.wait(command_id, timeout=timeout)<EOL><DEDENT>answer = OrderedDict((<EOL>('<STR_LIT>', True),<EOL>('<STR_LIT:id>', command_id),<EOL>))<EOL>answer.update(result.json())<EOL>return answer<EOL>", "docstring": "Launch a new ad-hoc command.\n\n        Runs a user-defined command from Ansible Tower, immediately starts it,\n        and returns back an ID in order for its status to be monitored.\n\n        =====API DOCS=====\n        Launch a new ad-hoc command.\n\n        :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched command rather\n                        than exiting with a success.\n        :type monitor: bool\n        :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress.\n        :type wait: bool\n        :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n                        of seconds.\n        :type timeout: int\n        :param `**kwargs`: Fields needed to create and launch an ad hoc command.\n        :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait``\n                  call if ``wait`` flag is on; dictionary of \"id\" and \"changed\" if none of the two flags are on.\n        :rtype: dict\n        :raises tower_cli.exceptions.TowerCLIError: When ad hoc commands are not available in Tower backend.\n\n        =====API DOCS=====", "id": "f3365:c0:m0"}
{"signature": "@resources.command(use_fields_as_options=False)<EOL><INDENT>@click.option('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL>@click.option('<STR_LIT>',<EOL>type=types.Related('<STR_LIT>'))<EOL>@click.option('<STR_LIT>', type=click.Choice(['<STR_LIT>', '<STR_LIT:error>', '<STR_LIT:success>']),<EOL>required=False, default='<STR_LIT>', help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def disassociate_notification_template(self, job_template,<EOL>notification_template, status):<DEDENT>", "body": "return self._disassoc('<STR_LIT>' % status,<EOL>job_template, notification_template)<EOL>", "docstring": "Disassociate a notification template from this job template.\n\n        =====API DOCS=====\n        Disassociate a notification template from this job template.\n\n        :param job_template: The job template to disassociate from.\n        :type job_template: str\n        :param notification_template: The notification template to be disassociated.\n        :type notification_template: str\n        :param status: type of notification this notification template should be disassociated from.\n        :type status: str\n        :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3368:c0:m4"}
{"signature": "@resources.command(use_fields_as_options=('<STR_LIT>'))<EOL><INDENT>@click.option('<STR_LIT>', help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def callback(self, pk=None, host_config_key='<STR_LIT>', extra_vars=None):<DEDENT>", "body": "url = self.endpoint + '<STR_LIT>' % pk<EOL>if not host_config_key:<EOL><INDENT>host_config_key = client.get(url).json()['<STR_LIT>']<EOL><DEDENT>post_data = {'<STR_LIT>': host_config_key}<EOL>if extra_vars:<EOL><INDENT>post_data['<STR_LIT>'] = parser.process_extra_vars(list(extra_vars), force_json=True)<EOL><DEDENT>r = client.post(url, data=post_data, auth=None)<EOL>if r.status_code == <NUM_LIT>:<EOL><INDENT>return {'<STR_LIT>': True}<EOL><DEDENT>", "docstring": "Contact Tower and request a configuration update using this job template.\n\n        =====API DOCS=====\n        Contact Tower and request a provisioning callback using this job template.\n\n        :param pk: Primary key of the job template to run provisioning callback against.\n        :type pk: int\n        :param host_config_key: Key string used to authenticate the callback host.\n        :type host_config_key: str\n        :param extra_vars: Extra variables that are passed to provisioning callback.\n        :type extra_vars: array of str\n        :returns: A dictionary of a single key \"changed\", which indicates whether the provisioning callback\n                  is successful.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3368:c0:m5"}
{"signature": "def command(method=None, **kwargs):", "body": "<EOL>def actual_decorator(method):<EOL><INDENT>method._cli_command = True<EOL>method._cli_command_attrs = kwargs<EOL>return method<EOL><DEDENT>if method and isinstance(method, types.FunctionType):<EOL><INDENT>return actual_decorator(method)<EOL><DEDENT>else:<EOL><INDENT>return actual_decorator<EOL><DEDENT>", "docstring": "Mark this method as a CLI command.\n\n    This will only have any meaningful effect in methods that are members of a\n    Resource subclass.", "id": "f3374:m0"}
{"signature": "@resources.command(use_fields_as_options=False)<EOL><INDENT>def get(self, pk):<DEDENT>", "body": "<EOL>try:<EOL><INDENT>return next(s for s in self.list()['<STR_LIT>'] if s['<STR_LIT:id>'] == pk)<EOL><DEDENT>except StopIteration:<EOL><INDENT>raise exc.NotFound('<STR_LIT>')<EOL><DEDENT>", "docstring": "Return one and exactly one object\n\n        =====API DOCS=====\n        Return one and exactly one Tower setting.\n\n        :param pk: Primary key of the Tower setting to retrieve\n        :type pk: int\n        :returns: loaded JSON of the retrieved Tower setting object.\n        :rtype: dict\n        :raises tower_cli.exceptions.NotFound: When no specified Tower setting exists.\n\n        =====API DOCS=====", "id": "f3376:c0:m1"}
{"signature": "def __getattribute__(self, name):", "body": "if name in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise AttributeError<EOL><DEDENT>else:<EOL><INDENT>return object.__getattribute__(self, name)<EOL><DEDENT>", "docstring": "Disable inherited methods that cannot be applied to this\n        particular resource.", "id": "f3376:c0:m5"}
{"signature": "@resources.command(use_fields_as_options=False)<EOL><INDENT>@click.argument('<STR_LIT>')<EOL>@click.argument('<STR_LIT:value>', default=None, required=False,<EOL>type=types.Variables())<EOL>def modify(self, setting, value):<DEDENT>", "body": "prev_value = new_value = self.get(setting)['<STR_LIT:value>']<EOL>answer = OrderedDict()<EOL>encrypted = '<STR_LIT>' in six.text_type(prev_value)<EOL>if encrypted or six.text_type(prev_value) != six.text_type(value):<EOL><INDENT>if setting == '<STR_LIT>':<EOL><INDENT>r = client.post('<STR_LIT>',<EOL>data=self.coerce_type(setting, value))<EOL>new_value = r.json()<EOL><DEDENT>else:<EOL><INDENT>r = client.patch(<EOL>self.endpoint,<EOL>data={setting: self.coerce_type(setting, value)}<EOL>)<EOL>new_value = r.json()[setting]<EOL><DEDENT>answer.update(r.json())<EOL><DEDENT>changed = encrypted or (prev_value != new_value)<EOL>answer.update({<EOL>'<STR_LIT>': changed,<EOL>'<STR_LIT:id>': setting,<EOL>'<STR_LIT:value>': new_value,<EOL>})<EOL>return answer<EOL>", "docstring": "Modify an already existing object.\n\n        Positional argument SETTING is the setting name and VALUE is its value,\n        which can be provided directly or obtained from a file name if prefixed with '@'.\n\n        =====API DOCS=====\n        Modify an already existing Tower setting.\n\n        :param setting: The name of the Tower setting to be modified.\n        :type setting: str\n        :param value: The new value of the Tower setting.\n        :type value: str\n        :returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:\n                  \"changed\", a flag indicating if the resource is successfully updated; \"id\", an integer which\n                  is the primary key of the updated object.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3376:c0:m2"}
{"signature": "def jt_aggregate(func, is_create=False, has_pk=False):", "body": "def helper(kwargs, obj):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>unified_job_template = None<EOL>for item in UNIFIED_JT:<EOL><INDENT>if kwargs.get(item, None) is not None:<EOL><INDENT>jt_id = kwargs.pop(item)<EOL>if unified_job_template is None:<EOL><INDENT>unified_job_template = (item, jt_id)<EOL><DEDENT>else:<EOL><INDENT>raise exc.UsageError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT><DEDENT>if unified_job_template is not None:<EOL><INDENT>kwargs['<STR_LIT>'] = unified_job_template[<NUM_LIT:1>]<EOL>obj.identity = tuple(list(obj.identity) + ['<STR_LIT>'])<EOL>return '<STR_LIT:/>'.join([UNIFIED_JT[unified_job_template[<NUM_LIT:0>]],<EOL>str(unified_job_template[<NUM_LIT:1>]), '<STR_LIT>'])<EOL><DEDENT>elif is_create:<EOL><INDENT>raise exc.UsageError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>def decorator_without_pk(obj, *args, **kwargs):<EOL><INDENT>old_endpoint = obj.endpoint<EOL>new_endpoint = helper(kwargs, obj)<EOL>if is_create:<EOL><INDENT>obj.endpoint = new_endpoint<EOL><DEDENT>result = func(obj, *args, **kwargs)<EOL>obj.endpoint = old_endpoint<EOL>return result<EOL><DEDENT>def decorator_with_pk(obj, pk=None, *args, **kwargs):<EOL><INDENT>old_endpoint = obj.endpoint<EOL>new_endpoint = helper(kwargs, obj)<EOL>if is_create:<EOL><INDENT>obj.endpoint = new_endpoint<EOL><DEDENT>result = func(obj, pk=pk, *args, **kwargs)<EOL>obj.endpoint = old_endpoint<EOL>return result<EOL><DEDENT>decorator = decorator_with_pk if has_pk else decorator_without_pk<EOL>for item in CLICK_ATTRS:<EOL><INDENT>setattr(decorator, item, getattr(func, item, []))<EOL><DEDENT>decorator.__doc__ = func.__doc__<EOL>return decorator<EOL>", "docstring": "Decorator to aggregate unified_jt-related fields.\n\n    Args:\n        func: The CURD method to be decorated.\n        is_create: Boolean flag showing whether this method is create.\n        has_pk: Boolean flag showing whether this method uses pk as argument.\n\n    Returns:\n        A function with necessary click-related attributes whose keyworded\n        arguments are aggregated.\n\n    Raises:\n        exc.UsageError: Either more than one unified jt fields are\n            provided, or none is provided when is_create flag is set.", "id": "f3379:m0"}
{"signature": "@resources.command<EOL><INDENT>def summary(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Placeholder to get swapped out for `stdout`.\n\n        =====API DOCS=====\n        foobar\n        =====API DOCS=====", "id": "f3380:c0:m2"}
{"signature": "@resources.command(use_fields_as_options=(<EOL>'<STR_LIT:name>', '<STR_LIT:description>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'<EOL>))<EOL><INDENT>def modify(self, pk=None, create_on_missing=False, **kwargs):<DEDENT>", "body": "<EOL>if '<STR_LIT>' in kwargs and '<STR_LIT>' not in kwargs:<EOL><INDENT>kwargs['<STR_LIT>'] = kwargs.pop('<STR_LIT>')<EOL><DEDENT>return super(Resource, self).write(<EOL>pk, create_on_missing=create_on_missing,<EOL>force_on_exists=True, **kwargs<EOL>)<EOL>", "docstring": "Modify an already existing.\n\n        To edit the project's organizations, see help for organizations.\n\n        Fields in the resource's `identity` tuple can be used in lieu of a\n        primary key for a lookup; in such a case, only other fields are\n        written.\n\n        To modify unique fields, you must use the primary key for the lookup.\n\n        =====API DOCS=====\n        Modify an already existing project.\n\n        :param pk: Primary key of the resource to be modified.\n        :type pk: int\n        :param create_on_missing: Flag that if set, a new object is created if ``pk`` is not set and objects\n                                  matching the appropriate unique criteria is not found.\n        :type create_on_missing: bool\n        :param `**kwargs`: Keyword arguments which, all together, will be used as PATCH body to modify the\n                           resource object. if ``pk`` is not set, key-value pairs of ``**kwargs`` which are\n                           also in resource's identity will be used to lookup existing reosource.\n        :returns: A dictionary combining the JSON output of the modified resource, as well as two extra fields:\n                  \"changed\", a flag indicating if the resource is successfully updated; \"id\", an integer which\n                  is the primary key of the updated object.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3383:c0:m1"}
{"signature": "@resources.command(use_fields_as_options=('<STR_LIT:name>', '<STR_LIT>'))<EOL><INDENT>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', required=False, type=int,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>def update(self, pk=None, create_on_missing=False, monitor=False,<EOL>wait=False, timeout=None, name=None, organization=None):<DEDENT>", "body": "<EOL>project = self.get(pk, name=name, organization=organization)<EOL>pk = project['<STR_LIT:id>']<EOL>debug.log('<STR_LIT>',<EOL>header='<STR_LIT>')<EOL>result = client.get('<STR_LIT>' % pk)<EOL>if not result.json()['<STR_LIT>']:<EOL><INDENT>raise exc.CannotStartJob('<STR_LIT>')<EOL><DEDENT>debug.log('<STR_LIT>', header='<STR_LIT>')<EOL>result = client.post('<STR_LIT>' % pk)<EOL>project_update_id = result.json()['<STR_LIT>']<EOL>if monitor:<EOL><INDENT>return self.monitor(project_update_id, parent_pk=pk,<EOL>timeout=timeout)<EOL><DEDENT>elif wait:<EOL><INDENT>return self.wait(project_update_id, parent_pk=pk, timeout=timeout)<EOL><DEDENT>return {<EOL>'<STR_LIT:id>': project_update_id,<EOL>'<STR_LIT>': True,<EOL>}<EOL>", "docstring": "Trigger a project update job within Ansible Tower.\n        Only meaningful on non-manual projects.\n\n        =====API DOCS=====\n        Update the given project.\n\n        :param pk: Primary key of the project to be updated.\n        :type pk: int\n        :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched project update\n                        rather than exiting with a success.\n        :type monitor: bool\n        :param wait: Flag that if set, monitor the status of the project update, but do not print while it is\n                     in progress.\n        :type wait: bool\n        :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number\n                        of seconds.\n        :type timeout: int\n        :param name: Name of the project to be updated if ``pk`` is not set.\n        :type name: str\n        :param organization: Primary key or name of the organization the project to be updated belonging to if\n                             ``pk`` is not set.\n        :type organization: str\n        :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait``\n                  call if ``wait`` flag is on; dictionary of \"status\" if none of the two flags are on.\n        :rtype: dict\n        :raises tower_cli.exceptions.CannotStartJob: When the project cannot be updated.\n\n        =====API DOCS=====", "id": "f3383:c0:m2"}
{"signature": "@resources.command<EOL><INDENT>@click.option('<STR_LIT>', is_flag=True, default=False,<EOL>help='<STR_LIT>')<EOL>def status(self, pk=None, detail=False, **kwargs):<DEDENT>", "body": "<EOL>job = self.last_job_data(pk, **kwargs)<EOL>if detail:<EOL><INDENT>return job<EOL><DEDENT>return {<EOL>'<STR_LIT>': job['<STR_LIT>'],<EOL>'<STR_LIT>': job['<STR_LIT>'],<EOL>'<STR_LIT:status>': job['<STR_LIT:status>'],<EOL>}<EOL>", "docstring": "Print the status of the most recent update.\n\n        =====API DOCS=====\n        Print the status of the most recent update.\n\n        :param pk: Primary key of the resource to retrieve status from.\n        :type pk: int\n        :param detail: Flag that if set, return the full JSON of the job resource rather than a status summary.\n        :type detail: bool\n        :param `**kwargs`: Keyword arguments used to look up resource object to retrieve status from if ``pk``\n                           is not provided.\n        :returns: full loaded JSON of the specified unified job if ``detail`` flag is on; trimed JSON containing\n                  only \"elapsed\", \"failed\" and \"status\" fields of the unified job if ``detail`` flag is off.\n        :rtype: dict\n        =====API DOCS=====", "id": "f3383:c0:m3"}
{"signature": "@resources.command<EOL><INDENT>@unified_job_template_options<EOL>@click.argument('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL>@click.argument('<STR_LIT>', type=types.Related('<STR_LIT>'), required=False)<EOL>def associate_always_node(self, parent, child=None, **kwargs):<DEDENT>", "body": "return self._assoc_or_create('<STR_LIT>', parent, child, **kwargs)<EOL>", "docstring": "Add a node to always run after the parent is finished.\n\n        =====API DOCS=====\n        Add a node to always run after the parent is finished.\n\n        :param parent: Primary key of parent node to associate always node to.\n        :type parent: int\n        :param child: Primary key of child node to be associated.\n        :type child: int\n        :param `**kwargs`: Fields used to create child node if ``child`` is not provided.\n        :returns: Dictionary of only one key \"changed\", which indicates whether the association succeeded.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3384:c0:m10"}
{"signature": "@resources.command(use_fields_as_options=False)<EOL><INDENT>@click.argument('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL>@click.argument('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL>def disassociate_failure_node(self, parent, child):<DEDENT>", "body": "return self._disassoc(<EOL>self._forward_rel_name('<STR_LIT>'), parent, child)<EOL>", "docstring": "Remove a failure node link.\n        The resulatant 2 nodes will both become root nodes.\n\n        =====API DOCS=====\n        Remove a failure node link.\n\n        :param parent: Primary key of parent node to disassociate failure node from.\n        :type parent: int\n        :param child: Primary key of child node to be disassociated.\n        :type child: int\n        :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3384:c0:m9"}
{"signature": "def _parent_filter(self, parent, relationship, **kwargs):", "body": "if parent is None or relationship is None:<EOL><INDENT>return {}<EOL><DEDENT>parent_filter_kwargs = {}<EOL>query_params = ((self._reverse_rel_name(relationship), parent),)<EOL>parent_filter_kwargs['<STR_LIT>'] = query_params<EOL>if kwargs.get('<STR_LIT>', None) is None:<EOL><INDENT>parent_data = self.read(pk=parent)['<STR_LIT>'][<NUM_LIT:0>]<EOL>parent_filter_kwargs['<STR_LIT>'] = parent_data[<EOL>'<STR_LIT>']<EOL><DEDENT>return parent_filter_kwargs<EOL>", "docstring": "Returns filtering parameters to limit a search to the children\nof a particular node by a particular relationship.", "id": "f3384:c0:m3"}
{"signature": "@resources.command(use_fields_as_options=False)<EOL><INDENT>@click.argument('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL>@click.argument('<STR_LIT>', type=types.Related('<STR_LIT>'))<EOL>def disassociate_always_node(self, parent, child):<DEDENT>", "body": "return self._disassoc(<EOL>self._forward_rel_name('<STR_LIT>'), parent, child)<EOL>", "docstring": "Remove an always node link.\n        The resultant 2 nodes will both become root nodes.\n\n        =====API DOCS=====\n        Remove an always node link.\n\n        :param parent: Primary key of parent node to disassociate always node from.\n        :type parent: int\n        :param child: Primary key of child node to be disassociated.\n        :type child: int\n        :returns: Dictionary of only one key \"changed\", which indicates whether the disassociation succeeded.\n        :rtype: dict\n\n        =====API DOCS=====", "id": "f3384:c0:m11"}
{"signature": "@resources.command(ignore_defaults=True)<EOL><INDENT>def list_facts(self, pk=None, **kwargs):<DEDENT>", "body": "res = self.get(pk=pk, **kwargs)<EOL>url = self.endpoint + '<STR_LIT>' % (res['<STR_LIT:id>'], '<STR_LIT>')<EOL>return client.get(url, params={}).json()<EOL>", "docstring": "Return a JSON object of all available facts of the given host.\n\n        Note global option --format is not available here, as the output would always be JSON-formatted.\n\n        =====API DOCS=====\n        List all available facts of the given host.\n\n        :param pk: Primary key of the target host.\n        :type pk: int\n        :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.\n        :returns: A JSON object of all available facts of the given host.\n        :rtype: dict\n        =====API DOCS=====", "id": "f3385:c0:m1"}
{"signature": "def register_get(t):", "body": "t.register_json('<STR_LIT>',<EOL>{<EOL>'<STR_LIT:id>': <NUM_LIT>, '<STR_LIT>': <NUM_LIT:1>, '<STR_LIT:status>': '<STR_LIT>',<EOL>'<STR_LIT>': <NUM_LIT>, '<STR_LIT>': <NUM_LIT:0.0>,<EOL>}, method='<STR_LIT:GET>')<EOL>", "docstring": "After starting job, the launch method may grab info about\n    the job just launched from this endpoint", "id": "f3399:m0"}
{"signature": "def jt_vars_registration(t, extra_vars):", "body": "t.register_json('<STR_LIT>', {<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT>': extra_vars,<EOL>'<STR_LIT:id>': <NUM_LIT:1>,<EOL>'<STR_LIT:name>': '<STR_LIT>',<EOL>'<STR_LIT>': {'<STR_LIT>': '<STR_LIT>'},<EOL>})<EOL>register_get(t)<EOL>t.register_json('<STR_LIT>', {'<STR_LIT:version>': '<STR_LIT>'}, method='<STR_LIT:GET>')<EOL>t.register_json('<STR_LIT>', {}, method='<STR_LIT:GET>')<EOL>t.register_json('<STR_LIT>', {'<STR_LIT:id>': <NUM_LIT>},<EOL>method='<STR_LIT:POST>')<EOL>", "docstring": "Endpoints that are needed to get information from job template.\n    This particular combination also entails\n    1) version of Tower - 2.2.0\n    2) successful job launch, id=42\n    3) prompts user for variables on launch", "id": "f3399:m2"}
{"signature": "def setUp(self):", "body": "class BasicResource(models.Resource):<EOL><INDENT>endpoint = '<STR_LIT>'<EOL>name = models.Field(unique=True)<EOL><DEDENT>self.resource = BasicResource()<EOL>self.command = ResSubcommand(self.resource)<EOL>", "docstring": "Install a resource instance sufficient for testing common\n        things with subcommands.", "id": "f3414:c0:m0"}
{"signature": "def parse_requirements(filename):", "body": "reqs = []<EOL>version_spec_in_play = None<EOL>for line in open(filename, '<STR_LIT:r>').read().strip().split('<STR_LIT:\\n>'):<EOL><INDENT>if not line.strip():<EOL><INDENT>continue<EOL><DEDENT>if not line.startswith('<STR_LIT:#>'):<EOL><INDENT>reqs.append(line)<EOL>continue<EOL><DEDENT>match = re.search(r'<STR_LIT>'<EOL>r'<STR_LIT>', line)<EOL>if match:<EOL><INDENT>version_spec_in_play = match.groupdict()<EOL>for key in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>version_spec_in_play[key] = int(version_spec_in_play[key])<EOL><DEDENT>continue<EOL><DEDENT>if '<STR_LIT:U+0020>' not in line[<NUM_LIT:1>:].strip() and version_spec_in_play:<EOL><INDENT>package = line[<NUM_LIT:1>:].strip()<EOL>op = version_spec_in_play['<STR_LIT>']<EOL>vspec = (version_spec_in_play['<STR_LIT>'],<EOL>version_spec_in_play['<STR_LIT>'])<EOL>if '<STR_LIT:=>' in op and sys.version_info[<NUM_LIT:0>:<NUM_LIT:2>] == vspec:<EOL><INDENT>reqs.append(package)<EOL><DEDENT>elif '<STR_LIT:>>' in op and sys.version_info[<NUM_LIT:0>:<NUM_LIT:2>] > vspec:<EOL><INDENT>reqs.append(package)<EOL><DEDENT>elif '<STR_LIT:<>' in op and sys.version_info[<NUM_LIT:0>:<NUM_LIT:2>] < vspec:<EOL><INDENT>reqs.append(package)<EOL><DEDENT><DEDENT><DEDENT>return reqs<EOL>", "docstring": "Parse out a list of requirements from the given requirements\n    requirements file.", "id": "f3422:m0"}
{"signature": "@classmethod<EOL><INDENT>def keys(cls):<DEDENT>", "body": "return cls._item_dict.keys()<EOL>", "docstring": "Returns all of the Enum keys", "id": "f3426:c2:m5"}
{"signature": "@classmethod<EOL><INDENT>def lookup(cls, key, get=False):<DEDENT>", "body": "if get:<EOL><INDENT>item = cls._item_dict.get(key)<EOL>return item.name if item else key<EOL><DEDENT>return cls._item_dict[key].name<EOL>", "docstring": "Returns the label for a given Enum key", "id": "f3426:c2:m4"}
{"signature": "@classmethod<EOL><INDENT>def verbose(cls, key=False, default='<STR_LIT>'):<DEDENT>", "body": "if key is False:<EOL><INDENT>items = cls._item_dict.values()<EOL>return [(x.key, x.value) for x in sorted(items, key=lambda x:x.sort or x.key)]<EOL><DEDENT>item = cls._item_dict.get(key)<EOL>return item.value if item else default<EOL>", "docstring": "Returns the verbose name for a given enum value", "id": "f3426:c2:m8"}
{"signature": "@classmethod<EOL><INDENT>def values(cls):<DEDENT>", "body": "return [x.name for x in cls._item_dict.values()]<EOL>", "docstring": "Returns all of the Enum values", "id": "f3426:c2:m6"}
{"signature": "def balls(timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)<EOL>rc.encoding = '<STR_LIT:utf-8>'  <EOL>rc = rc.text<EOL>data = re.findall('<STR_LIT>', rc)<EOL>balls = {}<EOL>for i in data:<EOL><INDENT>balls[int(i[<NUM_LIT:0>])] = i[<NUM_LIT:1>]<EOL><DEDENT>return balls<EOL>", "docstring": "Return all balls in dict {id0: ball0, id1: ball1}.", "id": "f3437:m6"}
{"signature": "@property<EOL><INDENT>def stadiums(self):<DEDENT>", "body": "if not self._stadiums:<EOL><INDENT>self._stadiums = stadiums()<EOL><DEDENT>return self._stadiums<EOL>", "docstring": "Return all stadiums in dict {id0: stadium0, id1: stadium1}.\n\n        :params year: Year.", "id": "f3437:c0:m11"}
{"signature": "def leagues(year=<NUM_LIT>, timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)<EOL>rc.encoding = '<STR_LIT:utf-8>'  <EOL>rc = rc.text<EOL>data = re.findall('<STR_LIT>' % year, rc)<EOL>leagues = {}<EOL>for i in data:<EOL><INDENT>leagues[int(i[<NUM_LIT:0>])] = i[<NUM_LIT:1>]<EOL><DEDENT>return leagues<EOL>", "docstring": "Return all leagues in dict {id0: league0, id1: legaue1}.\n\n    :params year: Year.", "id": "f3437:m3"}
{"signature": "def bid(self, trade_id, bid, fast=False):", "body": "method = '<STR_LIT>'<EOL>url = '<STR_LIT>' % trade_id<EOL>if not fast:<EOL><INDENT>rc = self.tradeStatus(trade_id)[<NUM_LIT:0>]<EOL>if rc['<STR_LIT>'] >= bid or self.credits < bid:<EOL><INDENT>return False  <EOL><DEDENT><DEDENT>data = {'<STR_LIT>': bid}<EOL>try:<EOL><INDENT>rc = self.__request__(method, url, data=json.dumps(data), params={'<STR_LIT>': self.sku_b}, fast=fast)[<EOL>'<STR_LIT>'][<NUM_LIT:0>]<EOL><DEDENT>except PermissionDenied:  <EOL><INDENT>return False<EOL><DEDENT>if rc['<STR_LIT>'] == '<STR_LIT>' or (<EOL>rc['<STR_LIT>'] == '<STR_LIT>' and rc['<STR_LIT>'] == '<STR_LIT>'):  <EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Make a bid.\n\n        :params trade_id: Trade id.\n        :params bid: Amount of credits You want to spend.\n        :params fast: True for fastest bidding (skips trade status & credits check).", "id": "f3437:c0:m18"}
{"signature": "def clubStaff(self):", "body": "method = '<STR_LIT:GET>'<EOL>url = '<STR_LIT>'<EOL>rc = self.__request__(method, url)<EOL>return rc<EOL>", "docstring": "Return staff in your club.", "id": "f3437:c0:m20"}
{"signature": "def search(self, ctype, level=None, category=None, assetId=None, defId=None,<EOL>min_price=None, max_price=None, min_buy=None, max_buy=None,<EOL>league=None, club=None, position=None, zone=None, nationality=None,<EOL>rare=False, playStyle=None, start=<NUM_LIT:0>, page_size=itemsPerPage['<STR_LIT>'],<EOL>fast=False):", "body": "<EOL>method = '<STR_LIT:GET>'<EOL>url = '<STR_LIT>'<EOL>if start == <NUM_LIT:0>:<EOL><INDENT>events = [self.pin.event('<STR_LIT>', '<STR_LIT>'), self.pin.event('<STR_LIT>', '<STR_LIT>')]<EOL>self.pin.send(events, fast=fast)<EOL><DEDENT>params = {<EOL>'<STR_LIT:start>': start,<EOL>'<STR_LIT>': page_size,<EOL>'<STR_LIT:type>': ctype,  <EOL>}<EOL>if level:<EOL><INDENT>params['<STR_LIT>'] = level<EOL><DEDENT>if category:<EOL><INDENT>params['<STR_LIT>'] = category<EOL><DEDENT>if assetId:<EOL><INDENT>params['<STR_LIT>'] = assetId<EOL><DEDENT>if defId:<EOL><INDENT>params['<STR_LIT>'] = defId<EOL><DEDENT>if min_price:<EOL><INDENT>params['<STR_LIT>'] = min_price<EOL><DEDENT>if max_price:<EOL><INDENT>params['<STR_LIT>'] = max_price<EOL><DEDENT>if min_buy:<EOL><INDENT>params['<STR_LIT>'] = min_buy<EOL><DEDENT>if max_buy:<EOL><INDENT>params['<STR_LIT>'] = max_buy<EOL><DEDENT>if league:<EOL><INDENT>params['<STR_LIT>'] = league<EOL><DEDENT>if club:<EOL><INDENT>params['<STR_LIT>'] = club<EOL><DEDENT>if position:<EOL><INDENT>params['<STR_LIT>'] = position<EOL><DEDENT>if zone:<EOL><INDENT>params['<STR_LIT>'] = zone<EOL><DEDENT>if nationality:<EOL><INDENT>params['<STR_LIT>'] = nationality<EOL><DEDENT>if rare:<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if playStyle:<EOL><INDENT>params['<STR_LIT>'] = playStyle<EOL><DEDENT>rc = self.__request__(method, url, params=params, fast=fast)<EOL>if start == <NUM_LIT:0>:<EOL><INDENT>events = [self.pin.event('<STR_LIT>', '<STR_LIT>'), self.pin.event('<STR_LIT>', '<STR_LIT>')]<EOL>self.pin.send(events, fast=fast)<EOL><DEDENT>return [itemParse(i) for i in rc.get('<STR_LIT>', ())]<EOL>", "docstring": "Prepare search request, send and return parsed data as a dict.\n\n        :param ctype: [development / ? / ?] Card type.\n        :param level: (optional) [?/?/gold] Card level.\n        :param category: (optional) [fitness/?/?] Card category.\n        :param assetId: (optional) Asset id.\n        :param defId: (optional) Definition id.\n        :param min_price: (optional) Minimal price.\n        :param max_price: (optional) Maximum price.\n        :param min_buy: (optional) Minimal buy now price.\n        :param max_buy: (optional) Maximum buy now price.\n        :param league: (optional) League id.\n        :param club: (optional) Club id.\n        :param position: (optional) Position.\n        :param nationality: (optional) Nation id.\n        :param rare: (optional) [boolean] True for searching special cards.\n        :param playStyle: (optional) Play style.\n        :param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n)\n        :param page_size: (optional) Page size (items per page).", "id": "f3437:c0:m16"}
{"signature": "def stadiums(year=<NUM_LIT>, timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)<EOL>rc.encoding = '<STR_LIT:utf-8>'  <EOL>rc = rc.text<EOL>data = re.findall('<STR_LIT>' % year, rc)<EOL>stadiums = {}<EOL>for i in data:<EOL><INDENT>stadiums[int(i[<NUM_LIT:0>])] = i[<NUM_LIT:1>]<EOL><DEDENT>return stadiums<EOL>", "docstring": "Return all stadium in dict {id0: stadium0, id1: stadium1}.\n\n    :params year: Year.", "id": "f3437:m5"}
{"signature": "def __sendToPile__(self, pile, trade_id=None, item_id=None):", "body": "method = '<STR_LIT>'<EOL>url = '<STR_LIT>'<EOL>if not isinstance(item_id, (list, tuple)):<EOL><INDENT>item_id = (item_id,)<EOL><DEDENT>data = {\"<STR_LIT>\": [{'<STR_LIT>': pile, '<STR_LIT:id>': str(i)} for i in item_id]}<EOL>rc = self.__request__(method, url, data=json.dumps(data))<EOL>if rc['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT:success>']:<EOL><INDENT>self.logger.info(\"<STR_LIT>\".format(trade_id, item_id, pile))<EOL><DEDENT>else:<EOL><INDENT>self.logger.error(\"<STR_LIT>\".format(trade_id, item_id, pile,<EOL>rc['<STR_LIT>'][<NUM_LIT:0>][<EOL>'<STR_LIT>']))<EOL><DEDENT>return rc['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT:success>']<EOL>", "docstring": "Send to pile.\n\n        :params trade_id: (optional?) Trade id.\n        :params item_id: Iteam id.", "id": "f3437:c0:m4"}
{"signature": "def sendToSbs(self, challenge_id, item_id):", "body": "<EOL>method = '<STR_LIT>'<EOL>url = '<STR_LIT>' % challenge_id<EOL>squad = self.sbsSquad(challenge_id)<EOL>players = []<EOL>moved = False<EOL>n = <NUM_LIT:0><EOL>for i in squad['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>if i['<STR_LIT>']['<STR_LIT:id>'] == item_id:  <EOL><INDENT>return False<EOL><DEDENT>if i['<STR_LIT>']['<STR_LIT:id>'] == <NUM_LIT:0> and not moved:<EOL><INDENT>i['<STR_LIT>']['<STR_LIT:id>'] = item_id<EOL>moved = True<EOL><DEDENT>players.append({\"<STR_LIT:index>\": n,<EOL>\"<STR_LIT>\": {\"<STR_LIT:id>\": i['<STR_LIT>']['<STR_LIT:id>'],<EOL>\"<STR_LIT>\": False}})<EOL>n += <NUM_LIT:1><EOL><DEDENT>data = {'<STR_LIT>': players}<EOL>if not moved:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>self.__request__(method, url, data=json.dumps(data))<EOL>return True<EOL><DEDENT>", "docstring": "Send card FROM CLUB to first free slot in sbs squad.", "id": "f3437:c0:m35"}
{"signature": "def tradepileClear(self):", "body": "method = '<STR_LIT>'<EOL>url = '<STR_LIT>'<EOL>self.__request__(method, url)<EOL>", "docstring": "Removes all sold items from tradepile.", "id": "f3437:c0:m31"}
{"signature": "def baseId(self, *args, **kwargs):", "body": "return baseId(*args, **kwargs)<EOL>", "docstring": "Calculate base id and version from a resource id.", "id": "f3437:c0:m13"}
{"signature": "def clubConsumables(self, fast=False):", "body": "method = '<STR_LIT:GET>'<EOL>url = '<STR_LIT>'<EOL>rc = self.__request__(method, url)<EOL>events = [self.pin.event('<STR_LIT>', '<STR_LIT>')]<EOL>self.pin.send(events, fast=fast)<EOL>events = [self.pin.event('<STR_LIT>', '<STR_LIT>')]<EOL>self.pin.send(events, fast=fast)<EOL>events = [self.pin.event('<STR_LIT>', '<STR_LIT>')]<EOL>self.pin.send(events, fast=fast)<EOL>return [itemParse(i) for i in rc.get('<STR_LIT>', ())]<EOL>", "docstring": "Return all consumables from club.", "id": "f3437:c0:m21"}
{"signature": "@property<EOL><INDENT>def playstyles(self, year=<NUM_LIT>):<DEDENT>", "body": "if not self._playstyles:<EOL><INDENT>self._playstyles = playstyles()<EOL><DEDENT>return self._playstyles<EOL>", "docstring": "Return all playstyles in dict {id0: playstyle0, id1: playstyle1}.\n\n        :params year: Year.", "id": "f3437:c0:m7"}
{"signature": "@property<EOL><INDENT>def teams(self, year=<NUM_LIT>):<DEDENT>", "body": "if year not in self._teams:<EOL><INDENT>self._teams[year] = teams(year)<EOL><DEDENT>return self._teams[year]<EOL>", "docstring": "Return all teams in dict {id0: team0, id1: team1}.\n\n        :params year: Year.", "id": "f3437:c0:m10"}
{"signature": "def tradepile(self):", "body": "method = '<STR_LIT:GET>'<EOL>url = '<STR_LIT>'<EOL>rc = self.__request__(method, url)<EOL>events = [self.pin.event('<STR_LIT>', '<STR_LIT>'), self.pin.event('<STR_LIT>', '<STR_LIT>')]<EOL>if rc.get('<STR_LIT>'):<EOL><INDENT>events.append(self.pin.event('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>self.pin.send(events)<EOL>return [itemParse(i) for i in rc.get('<STR_LIT>', ())]<EOL>", "docstring": "Return items in tradepile.", "id": "f3437:c0:m24"}
{"signature": "def saveSession(self):", "body": "if self.cookies_file:<EOL><INDENT>self.r.cookies.save(ignore_discard=True)<EOL>with open(self.token_file, '<STR_LIT:w>') as f:<EOL><INDENT>f.write('<STR_LIT>' % (self.token_type, self.access_token))<EOL><DEDENT><DEDENT>", "docstring": "Save cookies/session.", "id": "f3437:c0:m12"}
{"signature": "def messages(self):", "body": "method = '<STR_LIT:GET>'<EOL>url = '<STR_LIT>'<EOL>rc = self.__request__(method, url)<EOL>return rc['<STR_LIT>']<EOL>", "docstring": "Return active messages.", "id": "f3437:c0:m40"}
{"signature": "def cardInfo(self, resource_id):", "body": "<EOL>base_id = baseId(resource_id)<EOL>if base_id in self.players:<EOL><INDENT>return self.players[base_id]<EOL><DEDENT>else:  <EOL><INDENT>url = '<STR_LIT>'.format(card_info_url, base_id)<EOL>return requests.get(url, timeout=self.timeout).json()<EOL><DEDENT>", "docstring": "Return card info.\n\n        :params resource_id: Resource id.", "id": "f3437:c0:m14"}
{"signature": "def watchlistDelete(self, trade_id):", "body": "method = '<STR_LIT>'<EOL>url = '<STR_LIT>'<EOL>if not isinstance(trade_id, (list, tuple)):<EOL><INDENT>trade_id = (trade_id,)<EOL><DEDENT>trade_id = (str(i) for i in trade_id)<EOL>params = {'<STR_LIT>': '<STR_LIT:U+002C>'.join(trade_id)}<EOL>self.__request__(method, url, params=params)  <EOL>return True<EOL>", "docstring": "Remove cards from watchlist.\n\n        :params trade_id: Trade id.", "id": "f3437:c0:m29"}
{"signature": "def playstyles(year=<NUM_LIT>, timeout=timeout):", "body": "rc = requests.get(messages_url, timeout=timeout)<EOL>rc.encoding = '<STR_LIT:utf-8>'  <EOL>rc = rc.text<EOL>data = re.findall('<STR_LIT>' % year, rc)<EOL>playstyles = {}<EOL>for i in data:<EOL><INDENT>playstyles[int(i[<NUM_LIT:0>])] = i[<NUM_LIT:1>]<EOL><DEDENT>return playstyles<EOL>", "docstring": "Return all playstyles in dict {id0: playstyle0, id1: playstyle1}.\n\n    :params year: Year.", "id": "f3437:m8"}
{"signature": "def build_options(self):", "body": "return None<EOL>", "docstring": "Default value for optional column configuration. Only child\nclasses will have non-None values for this field. Value\ndepends on child class implementation.\n\nNote: Some child classes do not require any extra options\n(e.g. OrganicChemicalFormula), in that case, this implementation\nwill be invoked and no options will be present in the dictionary.\n\n:return: Options dictionary, or None if not implemented in child\n:rtype: dict or None", "id": "f3455:c0:m2"}
{"signature": "def to_dict(self):", "body": "return {<EOL>\"<STR_LIT:type>\": self.type,<EOL>\"<STR_LIT:name>\": self.name,<EOL>\"<STR_LIT>\": self.group_by_key,<EOL>\"<STR_LIT>\": self.role,<EOL>\"<STR_LIT>\": self.units,<EOL>\"<STR_LIT>\": self.build_options()<EOL>}<EOL>", "docstring": "Converts the column to a dictionary representation accepted\nby the Citrination server.\n\n:return: Dictionary with basic options, plus any column type specific\n    options held under the \"options\" key\n:rtype: dict", "id": "f3455:c0:m1"}
{"signature": "def __init__(self, name, role, group_by_key=False, units=None, length=None):", "body": "super(VectorColumn, self).__init__(name=name,<EOL>role=role,<EOL>group_by_key=group_by_key,<EOL>units=units)<EOL>self.length = length<EOL>", "docstring": "Constructor.\n\n:param name: The name of the column\n:type name: str\n:param role: The role the column will play in machine learning:\n               \"Input\"\n               \"Output\"\n               \"Latent Variable\"\n               \"Ignore\"\n:type role: str\n:param group_by_key: Whether or not this column should be used for\n    grouping during cross validation\n:type group_by_key: bool\n:param units: Optionally, the units for this column\n:type units: str\n:param length: The length of vectors in this column\n:type length: int", "id": "f3457:c0:m0"}
{"signature": "def __init__(self, key, value, loss=None):", "body": "self._key = key<EOL>self._value = value<EOL>self._loss = loss<EOL>", "docstring": "Constructor.\n\n:param key: The descriptor key for the prediction\n:type key: str\n:param value: The predicted value\n:type value: str or float\n:param loss: The loss for the prediction\n:type loss: float", "id": "f3467:c0:m0"}
{"signature": "def get_projection(self, key):", "body": "return self._projections.get(key)<EOL>", "docstring": "Retrieves the projection registered under a particular\ndescriptor key.\n\n:param key: A descriptor key\n:return: A :class:`Projection`", "id": "f3468:c0:m3"}
{"signature": "def projections(self):", "body": "return self._projections.keys()<EOL>", "docstring": "List the descriptor keys with registered projections.\n\n:return: List of descriptor keys", "id": "f3468:c0:m2"}
{"signature": "def add_projection(self, key, projection):", "body": "self._projections[key] = projection<EOL>", "docstring": "Register a projection under a descriptor key.\n\n:param key: The descriptor key for the projection\n:type key: str\n:param projection: The projection for the provided descriptor key\n:type projection: :class:`Projection`", "id": "f3468:c0:m1"}
{"signature": "def check_predict_status(self, view_id, predict_request_id):", "body": "failure_message = \"<STR_LIT>\"<EOL>bare_response = self._get_success_json(self._get(<EOL>'<STR_LIT>' + str(view_id) + '<STR_LIT>' + str(predict_request_id) + '<STR_LIT>',<EOL>None, failure_message=failure_message))<EOL>result = bare_response[\"<STR_LIT:data>\"]<EOL>return result<EOL>", "docstring": "Returns a string indicating the status of the prediction job\n\n:param view_id: The data view id returned from data view create\n:param predict_request_id: The id returned from predict\n:return: Status data, also includes results if state is finished", "id": "f3469:c0:m9"}
{"signature": "def predict(self, data_view_id, candidates, method=\"<STR_LIT>\", use_prior=True):", "body": "uid = self.submit_predict_request(data_view_id, candidates, method, use_prior)<EOL>while self.check_predict_status(data_view_id, uid)['<STR_LIT:status>'] not in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>time.sleep(<NUM_LIT:1>)<EOL><DEDENT>result = self.check_predict_status(data_view_id, uid)<EOL>if result[\"<STR_LIT:status>\"] == \"<STR_LIT>\":<EOL><INDENT>paired = zip(result[\"<STR_LIT>\"][\"<STR_LIT>\"], result[\"<STR_LIT>\"][\"<STR_LIT>\"])<EOL>prediction_result_format = [{k: (p[<NUM_LIT:0>][k], p[<NUM_LIT:1>][k]) for k in p[<NUM_LIT:0>].keys()} for p in paired]<EOL>return list(map(<EOL>lambda c: _get_prediction_result_from_candidate(c), prediction_result_format<EOL>))<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\".format(uid, result[\"<STR_LIT:status>\"])<EOL>)<EOL><DEDENT>", "docstring": "Predict endpoint. This simply wraps the async methods (submit and poll for status/results).\n\n:param data_view_id: The ID of the data view to use for prediction\n:type data_view_id: str\n:param candidates: A list of candidates to make predictions on\n:type candidates: list of dicts\n:param method: Method for propagating predictions through model graphs. \"scalar\" uses linearized uncertainty\npropagation, whereas \"scalar_from_distribution\" still returns scalar predictions but uses sampling to\npropagate uncertainty without a linear approximation.\n:type method: str (\"scalar\" or \"scalar_from_distribution\")\n:param use_prior:  Whether to apply prior values implied by the property descriptors\n:type use_prior: bool\n:return: The results of the prediction\n:rtype: list of :class:`PredictionResult`", "id": "f3469:c0:m2"}
{"signature": "def _data_analysis(self, data_view_id):", "body": "failure_message = \"<STR_LIT>\".format(data_view_id)<EOL>return self._get_success_json(self._get(routes.data_analysis(data_view_id), failure_message=failure_message))<EOL>", "docstring": "Data analysis endpoint.\n\n:param data_view_id: The model identifier (id number for data views)\n:type data_view_id: str\n:return: dictionary containing information about the data, e.g. dCorr and tsne", "id": "f3469:c0:m6"}
{"signature": "def get_data_view(self, data_view_id):", "body": "url = routes.get_data_view(data_view_id)<EOL>response = self._get(url).json()<EOL>result = response[\"<STR_LIT:data>\"][\"<STR_LIT>\"]<EOL>datasets_list = []<EOL>for dataset in result[\"<STR_LIT>\"]:<EOL><INDENT>datasets_list.append(Dataset(<EOL>name=dataset[\"<STR_LIT:name>\"],<EOL>id=dataset[\"<STR_LIT:id>\"],<EOL>description=dataset[\"<STR_LIT:description>\"]<EOL>))<EOL><DEDENT>columns_list = []<EOL>for column in result[\"<STR_LIT>\"]:<EOL><INDENT>columns_list.append(ColumnFactory.from_dict(column))<EOL><DEDENT>return DataView(<EOL>view_id=data_view_id,<EOL>name=result[\"<STR_LIT:name>\"],<EOL>description=result[\"<STR_LIT:description>\"],<EOL>datasets=datasets_list,<EOL>columns=columns_list,<EOL>)<EOL>", "docstring": "Retrieves a summary of information for a given data view\n    - view id\n    - name\n    - description\n    - columns\n\n:param data_view_id: The ID number of the data view to which the\n    run belongs, as a string\n:type data_view_id: str", "id": "f3469:c0:m13"}
{"signature": "def get_design_run_status(self, data_view_id, run_uuid):", "body": "url = routes.get_data_view_design_status(data_view_id, run_uuid)<EOL>response = self._get(url).json()<EOL>status = response[\"<STR_LIT:data>\"]<EOL>return ProcessStatus(<EOL>result=status.get(\"<STR_LIT:result>\"),<EOL>progress=status.get(\"<STR_LIT>\"),<EOL>status=status.get(\"<STR_LIT:status>\"),<EOL>messages=status.get(\"<STR_LIT>\")<EOL>)<EOL>", "docstring": "Retrieves the status of an in progress or completed design run\n\n:param data_view_id: The ID number of the data view to which the\n    run belongs, as a string\n:type data_view_id: str\n:param run_uuid: The UUID of the design run to retrieve status for\n:type run_uuid: str\n:return: A :class:`ProcessStatus` object", "id": "f3469:c0:m11"}
{"signature": "def is_ready(self):", "body": "return self.ready == True<EOL>", "docstring": "Indicates whether or not the service is ready to be used.\n\n:return: A boolean\n:rtype: bool", "id": "f3473:c0:m14"}
{"signature": "def __init__(self, xs, ys, responses, tags, uids):", "body": "self._xs = xs<EOL>self._ys = ys<EOL>self._responses = responses<EOL>self._tags = tags<EOL>self._uids = uids<EOL>", "docstring": "Constructor.\n\n:param xs: A list of x values of the projection.\n:type xs: list of floats\n:param ys: A list of y values of the projection.\n:type ys: list of floats\n:param responses: A list of z values of the projection.\n:type responses: list of floats\n:param tags: A list of tags for the projected points\n:type tags: list of strings\n:param uids: A list of record UIDs for the projected points\n:type uids: list of strings", "id": "f3474:c0:m0"}
{"signature": "def get_data_view_status(data_view_id):", "body": "return \"<STR_LIT>\".format(data_view_id)<EOL>", "docstring": "URL for retrieving the statuses of all services\nassociated with a data view.\n\n:param data_view_id: The ID of the desired data views\n:type data_view_id: str", "id": "f3475:m7"}
{"signature": "def __init__(self, title, normalized_progress, subtitle=None, subevent=None):", "body": "self._title = title<EOL>self._subtitle = subtitle<EOL>self._subevent = subevent<EOL>self._normalized_progress = normalized_progress<EOL>", "docstring": "Constructor.\n\n:param title: The title of the event\n:type title: str\n:param subtitle: More detail about the event\n:type subtitle: str\n:param subevent: An event object describing the current state of the service's\n    progress toward readiness\n:type subevent: Event\n:param normalized_progress: The fractional representation of the status of the event\n:type normalized_progress: float", "id": "f3476:c0:m0"}
{"signature": "def get_value(self, key):", "body": "try:<EOL><INDENT>return self._values[key]<EOL><DEDENT>except KeyError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Retrieves a predicted value.\n\n:param key: A descriptor key for a registered predicted value.\n:type key: str\n:return: The value stored at the provided descriptor key. None if no key is provided.\n:rtype: :class:`PredictedValue`", "id": "f3477:c0:m2"}
{"signature": "def __init__(self, result, progress, status, messages=None):", "body": "self._status = status<EOL>self._result = result<EOL>self._progress = progress<EOL>self._messages = messages<EOL>", "docstring": "Constructor.\n\n:param result: The result of the process\n:type result: any\n:param progress: The progress of the process as as percentage\n:type progress: int\n:param status: The status string for the process\n:type status: str\n:param messages: A list of messages representing the steps the process\n    has already progressed through\n:type messages: list of str", "id": "f3478:c0:m0"}
{"signature": "def __init__(self, uuid):", "body": "self._uuid = uuid<EOL>", "docstring": "Constructor.\n\n:param uuid: The UUID of an in progress design run.\n:type uuid: str", "id": "f3480:c0:m0"}
{"signature": "def __init__(self, best_materials, next_experiments):", "body": "self._best_materials = best_materials<EOL>self._next_experiments = next_experiments<EOL>", "docstring": "Constructor.\n\n:param best_materials: An array of candidate dictionaries\n:type best_materials: list of dictionaries\n:param next_experiments: An array of candidate dictionaries\n:type next_experiments: list of dictionaries", "id": "f3481:c0:m0"}
{"signature": "def __init__(self, name, accepted_categories):", "body": "self._type = \"<STR_LIT>\"<EOL>self._name = name<EOL>self._categories = accepted_categories<EOL>", "docstring": "Constructor.\n\n:param name: The name of the column in the data\n    view to which this constraint should be applied\n:type name: str\n:param accepted_categories: An array of categories to constrain the name to\n:type accepted_categories: list of str", "id": "f3486:c0:m0"}
{"signature": "def __init__(self, name, elements, minimum, maximum):", "body": "if not <NUM_LIT:0> <= minimum <= <NUM_LIT:100>:<EOL><INDENT>raise CitrinationClientError(\"<STR_LIT>\")<EOL><DEDENT>if not <NUM_LIT:0> <= maximum <= <NUM_LIT:100>:<EOL><INDENT>raise CitrinationClientError(\"<STR_LIT>\")<EOL><DEDENT>if not maximum >= minimum:<EOL><INDENT>raise CitrinationClientError(\"<STR_LIT>\")<EOL><DEDENT>self._type = \"<STR_LIT>\"<EOL>self._elements = elements<EOL>self._name = name<EOL>self._min = minimum<EOL>self._max = maximum<EOL>", "docstring": "Constructor.\n\n:param name: The name of the column in the data\n    view to which this constraint should be applied\n:type name: str\n:param elements: An array of element abbreviations as\n    strings, e.g. [\"Mg\", \"C\"]\n:type elements: list of str\n:param minimum: The minimum value (<= 100) as a percentage\n    at which the specified elements should appear in\n    candidate compositions\n:type minimum: float\n:param maximum: The maximum value (<= 100) as a percentage\n    at which the specified elements should appear in\n    candidate compositions\n:type maximum: float", "id": "f3490:c0:m0"}
{"signature": "def __init__(self, name, value=None):", "body": "self._type = \"<STR_LIT>\"        <EOL>self._name = name<EOL>self._value = value<EOL>", "docstring": "Constructor.\n\n:param name: The name of the column in the data\n    view to which this constraint should be applied\n:type name: str\n:param value: The value the column should be constrained to\n:type value: float", "id": "f3491:c0:m0"}
{"signature": "def _get_qualified_route(self, route):", "body": "return \"<STR_LIT>\".format(self.api_url, route)<EOL>", "docstring": "Get a fully qualified api route.\n:param route: the route (e.g., /model)\n:return: the fully qualified route (e.g., https://citrination.com/model)", "id": "f3496:c0:m3"}
{"signature": "def _delete(self, route, headers=None, failure_message=None):", "body": "headers = self._get_headers(headers)<EOL>response_lambda = (lambda: requests.delete(<EOL>self._get_qualified_route(route), headers=headers, verify=False, proxies=self.proxies)<EOL>)<EOL>response = check_for_rate_limiting(response_lambda(), response_lambda)<EOL>return self._handle_response(response, failure_message)<EOL>", "docstring": "Execute a delete request and return the result\n:param headers:\n:return:", "id": "f3496:c0:m13"}
{"signature": "def _post(self, route, data, headers=None, failure_message=None):", "body": "headers = self._get_headers(headers)<EOL>response_lambda = (<EOL>lambda: requests.post(<EOL>self._get_qualified_route(route), headers=headers, data=data, verify=False, proxies=self.proxies<EOL>)<EOL>)<EOL>response = check_for_rate_limiting(response_lambda(), response_lambda)<EOL>return self._handle_response(response, failure_message)<EOL>", "docstring": "Execute a post request and return the result\n:param data:\n:param headers:\n:return:", "id": "f3496:c0:m8"}
{"signature": "def _get(self, route, headers=None, failure_message=None):", "body": "headers = self._get_headers(headers)<EOL>response_lambda = (<EOL>lambda: requests.get(self._get_qualified_route(route), headers=headers, verify=False, proxies=self.proxies)<EOL>)<EOL>response = check_for_rate_limiting(response_lambda(), response_lambda)<EOL>return self._handle_response(response, failure_message)<EOL>", "docstring": "Execute a post request and return the result\n:param headers:\n:return:", "id": "f3496:c0:m6"}
{"signature": "def _put(self, route, data, headers=None, failure_message=None):", "body": "headers = self._get_headers(headers)<EOL>response_lambda = (<EOL>lambda: requests.put(<EOL>self._get_qualified_route(route), headers=headers, data=data, verify=False, proxies=self.proxies<EOL>)<EOL>)<EOL>response = check_for_rate_limiting(response_lambda(), response_lambda)<EOL>return self._handle_response(response, failure_message)<EOL>", "docstring": "Execute a put request and return the result\n:param data:\n:param headers:\n:return:", "id": "f3496:c0:m10"}
{"signature": "def create_dataset(self, name=None, description=None, public=False):", "body": "data = {<EOL>\"<STR_LIT>\": _convert_bool_to_public_value(public)<EOL>}<EOL>if name:<EOL><INDENT>data[\"<STR_LIT:name>\"] = name<EOL><DEDENT>if description:<EOL><INDENT>data[\"<STR_LIT:description>\"] = description<EOL><DEDENT>dataset = {\"<STR_LIT>\": data}<EOL>failure_message = \"<STR_LIT>\"<EOL>result = self._get_success_json(self._post_json(routes.create_dataset(), dataset, failure_message=failure_message))<EOL>return _dataset_from_response_dict(result)<EOL>", "docstring": "Create a new data set.\n\n:param name: name of the dataset\n:type name: str\n:param description: description for the dataset\n:type description: str\n:param public: A boolean indicating whether or not the dataset should be public.\n:type public: bool\n:return: The newly created dataset.\n:rtype: :class:`Dataset`", "id": "f3511:c0:m9"}
{"signature": "def download_files(self, dataset_files, destination='<STR_LIT:.>'):", "body": "if not isinstance(dataset_files, list):<EOL><INDENT>dataset_files = [dataset_files]<EOL><DEDENT>for f in dataset_files:<EOL><INDENT>filename = f.path.lstrip('<STR_LIT:/>')<EOL>local_path = os.path.join(destination, filename)<EOL>if not os.path.isdir(os.path.dirname(local_path)):<EOL><INDENT>os.makedirs(os.path.dirname(local_path))<EOL><DEDENT>r = requests.get(f.url, stream=True)<EOL>with open(local_path, '<STR_LIT:wb>') as output_file:<EOL><INDENT>shutil.copyfileobj(r.raw, output_file)<EOL><DEDENT><DEDENT>", "docstring": "Downloads file(s) to a local destination.\n\n:param dataset_files:\n:type dataset_files: list of :class: `DatasetFile`\n:param destination: The path to the desired local download destination\n:type destination: str\n:param chunk: Whether or not to chunk the file. Default True\n:type chunk: bool", "id": "f3511:c0:m7"}
{"signature": "def list_files(self, dataset_id, glob=\"<STR_LIT:.>\", is_dir=False):", "body": "data = {<EOL>\"<STR_LIT:list>\": {<EOL>\"<STR_LIT>\": glob,<EOL>\"<STR_LIT>\": is_dir<EOL>}<EOL>}<EOL>return self._get_success_json(self._post_json(routes.list_files(dataset_id), data, failure_message=\"<STR_LIT>\".format(dataset_id)))['<STR_LIT>']<EOL>", "docstring": "List matched filenames in a dataset on Citrination.\n\n:param dataset_id: The ID of the dataset to search for files.\n:type dataset_id: int\n:param glob: A pattern which will be matched against files in the dataset.\n:type glob: str\n:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.\n:type is_dir: bool\n:return: A list of filepaths in the dataset matching the provided glob.\n:rtype: list of strings", "id": "f3511:c0:m2"}
{"signature": "def upload(self, dataset_id, source_path, dest_path=None):", "body": "upload_result = UploadResult()<EOL>source_path = str(source_path)<EOL>if not dest_path:<EOL><INDENT>dest_path = source_path<EOL><DEDENT>else:<EOL><INDENT>dest_path = str(dest_path)<EOL><DEDENT>if os.path.isdir(source_path):<EOL><INDENT>for path, subdirs, files in os.walk(source_path):<EOL><INDENT>relative_path = os.path.relpath(path, source_path)<EOL>current_dest_prefix = dest_path<EOL>if relative_path is not \"<STR_LIT:.>\":<EOL><INDENT>current_dest_prefix = os.path.join(current_dest_prefix, relative_path)<EOL><DEDENT>for name in files:<EOL><INDENT>current_dest_path = os.path.join(current_dest_prefix, name)<EOL>current_source_path = os.path.join(path, name)<EOL>try:<EOL><INDENT>if self.upload(dataset_id, current_source_path, current_dest_path).successful():<EOL><INDENT>upload_result.add_success(current_source_path)<EOL><DEDENT>else:<EOL><INDENT>upload_result.add_failure(current_source_path,\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except (CitrinationClientError, ValueError) as e:<EOL><INDENT>upload_result.add_failure(current_source_path, str(e))<EOL><DEDENT><DEDENT><DEDENT>return upload_result<EOL><DEDENT>elif os.path.isfile(source_path):<EOL><INDENT>file_data = { \"<STR_LIT>\": str(dest_path), \"<STR_LIT>\": str(source_path)}<EOL>j = self._get_success_json(self._post_json(routes.upload_to_dataset(dataset_id), data=file_data))<EOL>s3url = _get_s3_presigned_url(j)<EOL>with open(source_path, '<STR_LIT:rb>') as f:<EOL><INDENT>if os.stat(source_path).st_size == <NUM_LIT:0>:<EOL><INDENT>data = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>data = f<EOL><DEDENT>r = requests.put(s3url, data=data, headers=j[\"<STR_LIT>\"])<EOL>if r.status_code == <NUM_LIT:200>:<EOL><INDENT>data = {'<STR_LIT>': j['<STR_LIT:url>']['<STR_LIT:path>'], '<STR_LIT>': j['<STR_LIT>']}<EOL>self._post_json(routes.update_file(j['<STR_LIT>']), data=data)<EOL>upload_result.add_success(source_path)<EOL>return upload_result<EOL><DEDENT>else:<EOL><INDENT>raise CitrinationClientError(\"<STR_LIT>\".format(source_path))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(source_path))<EOL><DEDENT>", "docstring": "Upload a file, specifying source and dest paths a file (acts as the scp command).asdfasdf\n\n:param source_path: The path to the file on the source host asdf\n:type source_path: str\n:param dest_path: The path to the file where the contents of the upload will be written (on the dest host)\n:type dest_path: str\n:return: The result of the upload process\n:rtype: :class:`UploadResult`", "id": "f3511:c0:m1"}
{"signature": "def successful(self):", "body": "return len(self._failures) == <NUM_LIT:0><EOL>", "docstring": "Indicates whether or not the entire upload was successful.\n\n:return: Whether or not the upload was successful\n:rtype: bool", "id": "f3514:c0:m3"}
{"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, id=None, is_featured=None,<EOL>name=None, description=None, owner=None, email=None, updated_at=None, query=None, **kwargs):", "body": "self._logic = None<EOL>self.logic = logic<EOL>self._weight = None<EOL>self.weight = weight<EOL>self._simple = None<EOL>self.simple = simple<EOL>self._simple_weight = None<EOL>self.simple_weight = simple_weight<EOL>self._id = None<EOL>self.id = id<EOL>self._is_featured = None<EOL>self.is_featured = is_featured<EOL>self._name = None<EOL>self.name = name<EOL>self._description = None<EOL>self.description = description<EOL>self._owner = None<EOL>self.owner = owner<EOL>self._email = None<EOL>self.email = email<EOL>self._updated_at = None<EOL>self.updated_at = updated_at<EOL>self._query = None<EOL>self.query = query<EOL>", "docstring": "Constructor.\n\n:param logic: The logic to apply to the query ('SHOULD', 'MUST', 'MUST_NOT', or 'OPTIONAL').\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param id: One or more :class:`Filter` objects with filters against the id field.\n:param is_featured:  One or more :class:`BooleanFilter` objects with filters against the isFeatured field.\n:param name: One or more :class:`Filter` objects with filters against the name field.\n:param description: One or more :class:`Filter` objects with filters against the description field.\n:param owner: One or more :class:`Filter` objects with filters against the owner field.\n:param email: One or more :class:`Filter` objects with filters against the email field.\n:param updated_at: One or more :class:`Filter` objects with filters against the time that the dataset was last updated.\n:param query: One or more :class:`DatasetQuery` objects with nested queries.", "id": "f3518:c0:m0"}
{"signature": "def __init__(self, query=None, from_index=None, size=None, random_results=None, random_seed=None,<EOL>score_relevance=None, return_max_score=None, timeout=None, count_pifs=None, **kwargs):", "body": "super(DatasetReturningQuery, self).__init__(<EOL>query=query, from_index=from_index, size=size, random_results=random_results, random_seed=random_seed,<EOL>score_relevance=score_relevance, return_max_score=return_max_score, timeout=timeout, **kwargs)<EOL>self._count_pifs = None<EOL>self.count_pifs = count_pifs<EOL>", "docstring": "Constructor.\n\n:param query: One or more :class:`DataQuery` objects with the queries to run.\n:param from_index: Index of the first hit that should be returned.\n:param size: Total number of hits the should be returned.\n:param random_results: Whether to return a random set of records.\n:param random_seed: The random seed to use.\n:param score_relevance: Whether to use relevance scoring.\n:param return_max_score: Whether to return the maximum score.\n:param timeout: The number of milliseconds to wait for the query to execute.\n:param count_pifs: Whether to return counts of PIFs for each dataset.", "id": "f3519:c0:m0"}
{"signature": "def __init__(self, took=None, total_num_hits=None, max_score=None, hits=None, **kwargs):", "body": "super(DatasetSearchResult, self).__init__(<EOL>took=took, total_num_hits=total_num_hits, max_score=max_score,<EOL>hits=self._get_object(DatasetSearchHit, hits), **kwargs)<EOL>", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param total_num_hits: Total number of hits.\n:param max_score: The maximum score.\n:param hits: List of :class:`DatasetSearchHit` objects.", "id": "f3520:c0:m0"}
{"signature": "def __init__(self, id=None, score=None, is_featured=None, name=None, description=None, owner=None, email=None, <EOL>num_pifs=None, updated_at=None, **kwargs):", "body": "self._id = None<EOL>self.id = id<EOL>self._score = None<EOL>self.score = score<EOL>self._is_featured = None<EOL>self.is_featured = is_featured<EOL>self._name = None<EOL>self.name = name<EOL>self._description = None<EOL>self.description = description<EOL>self._owner = None<EOL>self.owner = owner<EOL>self._email = None<EOL>self.email = email<EOL>self._num_pifs = None<EOL>self.num_pifs = num_pifs<EOL>self._updated_at = None<EOL>self.updated_at = updated_at<EOL>", "docstring": "Constructor.\n\n:param id: String with the ID of the record.\n:param score: Score with the relevancy of the result.\n:param is_featured: Whether the dataset is a featured one.\n:param name: Name of the dataset.\n:param description: Description of the dataset.\n:param owner: Name of the owner of the dataset.\n:param email: Email address of the owner of the dataset.\n:param num_pifs: Number of PIFs in the dataset.\n:param updated_at: String with the last time that the dataset was updated.", "id": "f3521:c0:m0"}
{"signature": "def __init__(self, took=None, results=None, **kwargs):", "body": "self._took = None<EOL>self.took = took<EOL>self._results = None<EOL>self.results = results<EOL>", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param results: List of :class:`DatasetMultiSearchResultElement` objects.", "id": "f3523:c0:m0"}
{"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, id=None, name=None, content=None,<EOL>updated_at=None, query=None, **kwargs):", "body": "self._logic = None<EOL>self.logic = logic<EOL>self._weight = None<EOL>self.weight = weight<EOL>self._simple = None<EOL>self.simple = simple<EOL>self._simple_weight = None<EOL>self.simple_weight = simple_weight<EOL>self._id = None<EOL>self.id = id<EOL>self._name = None<EOL>self.name = name<EOL>self._content = None<EOL>self.content = content<EOL>self._updated_at = None<EOL>self.updated_at = updated_at<EOL>self._query = None<EOL>self.query = query<EOL>", "docstring": "Constructor.\n\n:param logic: The logic to apply to the query ('SHOULD', 'MUST', 'MUST_NOT', or 'OPTIONAL').\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param id: One or more :class:`Filter` objects with filters against the id field.\n:param name: One or more :class:`Filter` objects with filters against the name field.\n:param content: One or more :class:`Filter` objects with filters against the content field.\n:param updated_at: One or more :class:`Filter` objects with filters against the time that the dataset was last updated.\n:param query: One or more :class:`DatasetQuery` objects with nested queries.", "id": "f3525:c0:m0"}
{"signature": "def __init__(self, took=None, results=None, **kwargs):", "body": "self._took = None<EOL>self.took = took<EOL>self._results = None<EOL>self.results = results<EOL>", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param results: List of :class:`FileMultiSearchResultElement` objects.", "id": "f3527:c0:m0"}
{"signature": "def __init__(self, result=None, status=None, **kwargs):", "body": "self._result = None<EOL>self.result = result<EOL>self._status = None<EOL>self.status = status<EOL>", "docstring": "Constructor.\n\n:param result: A single :class:`FileSearchResult` object with the query results.\n:param status: 'SUCCESS', 'ERROR', or 'NOT_EXECUTED'.", "id": "f3529:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def _get_list(values):<DEDENT>", "body": "if values is None:<EOL><INDENT>return []<EOL><DEDENT>elif isinstance(values, list):<EOL><INDENT>return values<EOL><DEDENT>else:<EOL><INDENT>return [values]<EOL><DEDENT>", "docstring": "Helper method that wraps values in a list. If the input is a list then it is returned. If the input is None then an empty list is returned. For anything else, the input value is wrapped as a single-element list.\n\n:param values: Value to make sure exists in a list.\n:return: List with the input values.", "id": "f3532:c0:m9"}
{"signature": "def dataset_search(self, dataset_returning_query):", "body": "self._validate_search_query(dataset_returning_query)<EOL>return self._execute_search_query(<EOL>dataset_returning_query,<EOL>DatasetSearchResult<EOL>)<EOL>", "docstring": "Run a dataset query against Citrination.\n\n:param dataset_returning_query: :class:`DatasetReturningQuery` to execute.\n:type dataset_returning_query: :class:`DatasetReturningQuery`\n:return: Dataset search result object with the results of the query.\n:rtype: :class:`DatasetSearchResult`", "id": "f3532:c0:m4"}
{"signature": "def pif_multi_search(self, multi_query):", "body": "failure_message = \"<STR_LIT>\"<EOL>response_dict = self._get_success_json(<EOL>self._post(routes.pif_multi_search, data=json.dumps(multi_query, cls=QueryEncoder),<EOL>failure_message=failure_message))<EOL>return PifMultiSearchResult(**keys_to_snake_case(response_dict['<STR_LIT>']))<EOL>", "docstring": "Run each in a list of PIF queries against Citrination.\n\n:param multi_query: :class:`MultiQuery` object to execute.\n:return: :class:`PifMultiSearchResult` object with the results of the query.", "id": "f3532:c0:m7"}
{"signature": "def __init__(self, logic=None, weight=None, exists=None, equal=None, filter=None, **kwargs):", "body": "self._logic = None<EOL>self.logic = logic<EOL>self._weight = None<EOL>self.weight = weight<EOL>self._exists = None<EOL>self.exists = exists<EOL>self._equal = None<EOL>self.equal = equal<EOL>self._filter = None<EOL>self.filter = filter<EOL>", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the filter.\n:param exists: True/False to simply test whether the field exists and has a non-null value.\n:param equal: String with the phrase to match against.\n:param filter: List of :class:`BooleanFilter` objects with sub-filters.", "id": "f3533:c0:m0"}
{"signature": "def __init__(self, query=None, extraction_sort=None, from_index=None, size=None, random_results=None,<EOL>random_seed=None, score_relevance=None, return_max_score=None, timeout=None, **kwargs):", "body": "super(BaseReturningQuery, self).__init__(query=query, extraction_sort=extraction_sort, **kwargs)<EOL>if '<STR_LIT>' in '<STR_LIT>':<EOL><INDENT>self.from_index = kwargs['<STR_LIT>']<EOL><DEDENT>self._from = None<EOL>self.from_index = from_index<EOL>self._size = None<EOL>self.size = size<EOL>self._random_results = None<EOL>self.random_results = random_results<EOL>self._random_seed = None<EOL>self.random_seed = random_seed<EOL>self._score_relevance = None<EOL>self.score_relevance = score_relevance<EOL>self._return_max_score = None<EOL>self.return_max_score = return_max_score<EOL>self._timeout = None<EOL>self.timeout = timeout<EOL>", "docstring": "Base class for all queries against datasets and the items that they contain on Citrination.\n\n:param query: One or more :class:`DataQuery` objects with the queries to run.\n:param extraction_sort: A single :class:`ExtractionSort` object for sorting.\n:param from_index: Index of the first hit that should be returned.\n:param size: Total number of hits the should be returned.\n:param random_results: Whether to return a random set of records.\n:param random_seed: The random seed to use.\n:param score_relevance: Whether to use relevance scoring.\n:param return_max_score: Whether to return the maximum score.\n:param timeout: The number of milliseconds to wait for the query to execute.", "id": "f3534:c0:m0"}
{"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, dataset=None, system=None, file=None,<EOL>query=None, **kwargs):", "body": "self._logic = None<EOL>self.logic = logic<EOL>self._weight = None<EOL>self.weight = weight<EOL>self._simple = None<EOL>self.simple = simple<EOL>self._simple_weight = None<EOL>self.simple_weight = simple_weight<EOL>self._dataset = None<EOL>self.dataset = dataset<EOL>self._system = None<EOL>self.system = system<EOL>self._file = None<EOL>self.file = file<EOL>self._query = None<EOL>self.query = query<EOL>", "docstring": "Constructor.\n\n:param logic: The logic to apply to the query ('SHOULD', 'MUST', 'MUST_NOT', or 'OPTIONAL').\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param dataset: One or more :class:`DatasetQuery` objects with queries against dataset metadata.\n:param system: One or more :class:`PifSystemQuery` objects with queries against PIF systems\n:param file: One or more :class:`FileQuery` objects with queries against file content or metadata.\n:type file: :class:`FileQuery`\n:param query: Nested list of :class:`DataQuery` objects.", "id": "f3537:c0:m0"}
{"signature": "def __init__(self, queries=None, **kwargs):", "body": "self._queries = None<EOL>self.queries = queries<EOL>", "docstring": "Constructor.\n\n:param queries: One or more queries to run.", "id": "f3538:c0:m0"}
{"signature": "def __init__(self, took=None, total_num_hits=None, max_score=None, hits=None, **kwargs):", "body": "self._took = None<EOL>self.took = took<EOL>self._total_num_hits = None<EOL>self.total_num_hits = total_num_hits<EOL>self._max_score = None<EOL>self.max_score = max_score<EOL>self._hits = None<EOL>self.hits = hits<EOL>", "docstring": "Constructor.\n\n:param took: Number of milliseconds that the query took to execute.\n:param total_num_hits: Total number of hits.\n:param max_score: The maximum score.\n:param hits: List of hits.", "id": "f3539:c0:m0"}
{"signature": "def __init__(self, sort=None, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None,<EOL>extract_all=None, extract_when_missing=None, length=None, offset=None, filter=None, **kwargs):", "body": "super(ChemicalFieldQuery, self).__init__(<EOL>sort=sort, weight=weight, logic=logic, simple=simple, simple_weight=simple_weight, extract_as=extract_as,<EOL>extract_all=extract_all, extract_when_missing=extract_when_missing, length=length, offset=offset, **kwargs)<EOL>self._filter = None<EOL>self.filter = filter<EOL>", "docstring": "Constructor.\n\n:param sort: ASCENDING or DESCENDING to set the sort order on this field.\n:param logic: Logic for this query. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param length: One or more :class:`FieldOperation` objects against the length field.\n:param offset: One or more :class:`FieldOperation` objects against the offset field.\n:param filter: One or more :class:`ChemicalFilter` objects against this field.", "id": "f3542:c0:m0"}
{"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None,<EOL>extract_all=None, extract_when_missing=None, tags=None, length=None, offset=None, element=None,<EOL>actual_weight_percent=None, actual_atomic_percent=None, ideal_weight_percent=None,<EOL>ideal_atomic_percent=None, query=None, **kwargs):", "body": "super(CompositionQuery, self).__init__(<EOL>logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,<EOL>extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,<EOL>offset=offset, **kwargs)<EOL>self._element = None<EOL>self.element = element<EOL>self._actual_weight_percent = None<EOL>self.actual_weight_percent = actual_weight_percent<EOL>self._actual_atomic_percent = None<EOL>self.actual_atomic_percent = actual_atomic_percent<EOL>self._ideal_weight_percent = None<EOL>self.ideal_weight_percent = ideal_weight_percent<EOL>self._ideal_atomic_percent = None<EOL>self.ideal_atomic_percent = ideal_atomic_percent<EOL>self._query = None<EOL>self.query = query<EOL>", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param element: One or more :class:`ChemicalFieldQuery` operations against the element field.\n:param actual_weight_percent: One or more :class:`FieldQuery` operations against the actual weight percent field.\n:param actual_atomic_percent: One or more :class:`FieldQuery` operations against the actual atomic percent field.\n:param ideal_weight_percent: One or more :class:`FieldQuery` operations against the ideal weight percent field.\n:param ideal_atomic_percent: One or more :class:`FieldQuery` operations against the ideal atomic percent field.\n:param query: One or more :class:`CompositionQuery` objects with the nest queries.", "id": "f3543:c0:m0"}
{"signature": "def __init__(self, key=None, order=None, **kwargs):", "body": "self._key = None<EOL>self.key = key<EOL>self._order = None<EOL>self.order = order<EOL>", "docstring": "Constructor.\n\n:param key: String with the key that will be sorted on.\n:param order: The order to use. Either ASCENDING or DESCENDING.", "id": "f3545:c0:m0"}
{"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,<EOL>extract_when_missing=None, tags=None, length=None, offset=None, name=None, value=None,<EOL>query=None, **kwargs):", "body": "super(IdQuery, self).__init__(<EOL>logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,<EOL>extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,<EOL>offset=offset, **kwargs)<EOL>self._name = None<EOL>self.name = name<EOL>self._value = None<EOL>self.value = value<EOL>self._query = None<EOL>self.query = query<EOL>", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the query to run over all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param name: One or more :class:`FieldQuery` operations against the name field.\n:param value: One or more :class:`FieldQuery` operations against the value field.\n:param query: One or more :class:`IdQuery` objects with nested queries.", "id": "f3550:c0:m0"}
{"signature": "def __init__(self, sort=None, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None,<EOL>extract_all=None, extract_when_missing=None, length=None, offset=None, **kwargs):", "body": "self._sort = None<EOL>self.sort = sort<EOL>self._logic = None<EOL>self.logic = logic<EOL>self._weight = None<EOL>self.weight = weight<EOL>self._simple = None<EOL>self.simple = simple<EOL>self._simple_weight = None<EOL>self.simple_weight = simple_weight<EOL>self._extract_as = None<EOL>self.extract_as = extract_as<EOL>self._extract_all = None<EOL>self.extract_all = extract_all<EOL>self._extract_when_missing = None<EOL>self.extract_when_missing = extract_when_missing<EOL>self._length = None<EOL>self.length = length<EOL>self._offset = None<EOL>self.offset = offset<EOL>", "docstring": "Constructor.\n\n:param sort: ASCENDING or DESCENDING to set the sort order on this field.\n:param logic: Logic for this query. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.", "id": "f3551:c0:m0"}
{"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,<EOL>extract_when_missing=None, tags=None, length=None, offset=None, number=None, title=None,<EOL>caption=None, query=None, **kwargs):", "body": "super(DisplayItemQuery, self).__init__(<EOL>logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,<EOL>extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,<EOL>offset=offset, **kwargs)<EOL>self._title = None<EOL>self.title = title<EOL>self._number = None<EOL>self.number = number<EOL>self._caption = None<EOL>self.caption = caption<EOL>self._query = None<EOL>self.query = query<EOL>", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param number: One or more :class:`FieldQuery` operations against the number field.\n:param title: One or more :class:`FieldQuery` operations against the title field.\n:param caption: One or more :class:`FieldQuery` operations against the caption field.\n:param query: One or more :class:`DisplayItemQuery` objects as nested queries.", "id": "f3554:c0:m0"}
{"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,<EOL>extract_when_missing=None, tags=None, length=None, offset=None, producer=None, url=None,<EOL>query=None, **kwargs):", "body": "super(SourceQuery, self).__init__(<EOL>logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,<EOL>extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,<EOL>offset=offset, **kwargs)<EOL>self._producer = None<EOL>self.producer = producer<EOL>self._url = None<EOL>self.url = url<EOL>self._query = None<EOL>self.query = query<EOL>", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param producer: One or more :class:`FieldQuery` operations against the producer field.\n:param url: One or more :class:`FieldQuery` operations against the url field.\n:param query: One or more :class:`SourceQuery` objects with nested queries.", "id": "f3556:c0:m0"}
{"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,<EOL>extract_when_missing=None, tags=None, length=None, offset=None, doi=None, isbn=None, issn=None,<EOL>url=None, title=None, publisher=None, journal=None, volume=None, issue=None, year=None,<EOL>figure=None, table=None, pages=None, authors=None, editors=None, affiliations=None,<EOL>acknowledgements=None, references=None, query=None, **kwargs):", "body": "super(ReferenceQuery, self).__init__(<EOL>logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,<EOL>extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,<EOL>offset=offset, **kwargs)<EOL>self._doi = None<EOL>self.doi = doi<EOL>self._isbn = None<EOL>self.isbn = isbn<EOL>self._issn = None<EOL>self.issn = issn<EOL>self._url = None<EOL>self.url = url<EOL>self._title = None<EOL>self.title = title<EOL>self._publisher = None<EOL>self.publisher = publisher<EOL>self._journal = None<EOL>self.journal = journal<EOL>self._volume = None<EOL>self.volume = volume<EOL>self._issue = None<EOL>self.issue = issue<EOL>self._year = None<EOL>self.year = year<EOL>self._figure = None<EOL>self.figure = figure<EOL>self._table = None<EOL>self.table = table<EOL>self._pages = None<EOL>self.pages = pages<EOL>self._authors = None<EOL>self.authors = authors<EOL>self._editors = None<EOL>self.editors = editors<EOL>self._affiliations = None<EOL>self.affiliations = affiliations<EOL>self._acknowledgements = None<EOL>self.acknowledgements = acknowledgements<EOL>self._references = None<EOL>self.references = references<EOL>self._query = None<EOL>self.query = query<EOL>", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight of the query.\n:param simple: String with the simple query to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.\n:param doi: One or more :class:`FieldQuery` operations against the doi field.\n:param isbn: One or more :class:`FieldQuery` operations against the isbn field.\n:param issn: One or more :class:`FieldQuery` operations against the issn field.\n:param url: One or more :class:`FieldQuery` operations against the url field.\n:param title: One or more :class:`FieldQuery` operations against the title field.\n:param publisher: One or more :class:`FieldQuery` operations against the publisher field.\n:param journal: One or more :class:`FieldQuery` operations against the journal field.\n:param volume: One or more :class:`FieldQuery` operations against the volume field.\n:param issue: One or more :class:`FieldQuery` operations against the issue field.\n:param year: One or more :class:`FieldQuery` operations against the year field.\n:param figure: One or more :class:`DisplayItemQuery` operations against the figure field.\n:param table: One or more :class:`DisplayItemQuery` operations against the table field.\n:param pages: One or more :class:`PagesQuery` operations against the pages field.\n:param authors: One or more :class:`NameQuery` operations against the authors field.\n:param editors: One or more :class:`NameQuery` operations against the editors field.\n:param affiliations: One or more :class:`FieldQuery` operations against the affiliations field.\n:param acknowledgements: One or more :class:`FieldQuery` operations against the acknowledgements field.\n:param references: One or more :class:`ReferenceQuery` operations against the references field.\n:param query: One or more :class:`ReferenceQuery` objects with nested queries.", "id": "f3557:c0:m0"}
{"signature": "def __init__(self, sort=None, weight=None, logic=None, simple=None, simple_weight=None, extract_as=None,<EOL>extract_all=None, extract_when_missing=None, length=None, offset=None, filter=None, **kwargs):", "body": "super(FieldQuery, self).__init__(<EOL>sort=sort, weight=weight, logic=logic, simple=simple, simple_weight=simple_weight, extract_as=extract_as,<EOL>extract_all=extract_all, extract_when_missing=extract_when_missing, length=length, offset=offset, **kwargs)<EOL>self._filter = None<EOL>self.filter = filter<EOL>", "docstring": "Constructor.\n\n:param sort: ASCENDING or DESCENDING to set the sort order on this field.\n:param weight: Weight of the query.\n:param logic: Logic for this query. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param length: One or more :class:`FieldQuery` objects against the length field.\n:param offset: One or more :class:`FieldQuery` objects against the offset field.\n:param filter: One or more :class:`Filter` objects against this field.", "id": "f3558:c0:m0"}
{"signature": "def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,<EOL>extract_when_missing=None, tags=None, length=None, offset=None, **kwargs):", "body": "self._logic = None<EOL>self.logic = logic<EOL>self._weight = None<EOL>self.weight = weight<EOL>self._simple = None<EOL>self.simple = simple<EOL>self._simple_weight = None<EOL>self.simple_weight = simple_weight<EOL>self._extract_as = None<EOL>self.extract_as = extract_as<EOL>self._extract_all = None<EOL>self.extract_all = extract_all<EOL>self._extract_when_missing = None<EOL>self.extract_when_missing = extract_when_missing<EOL>self._tags = None<EOL>self.tags = tags<EOL>self._length = None<EOL>self.length = length<EOL>self._offset = None<EOL>self.offset = offset<EOL>", "docstring": "Constructor.\n\n:param logic: Logic for this filter. Must be equal to one of \"MUST\", \"MUST_NOT\", \"SHOULD\", or \"OPTIONAL\".\n:param weight: Weight for the query.\n:param simple: String with the simple search to run against all fields.\n:param simple_weight: Dictionary of relative paths to their weights for simple queries.\n:param extract_as: String with the alias to save this field under.\n:param extract_all: Boolean setting whether all values in an array should be extracted.\n:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).\n:param tags: One or more :class:`FieldQuery` operations against the tags field.\n:param length: One or more :class:`FieldQuery` operations against the length field.\n:param offset: One or more :class:`FieldQuery` operations against the offset field.", "id": "f3559:c0:m0"}
{"signature": "def __init__(self, id=None, dataset=None, dataset_version=None, score=None, updated_at=None, system=None, <EOL>extracted=None, extracted_path=None, **kwargs):", "body": "self._id = None<EOL>self.id = id<EOL>self._dataset = None<EOL>self.dataset = dataset<EOL>self._dataset_version = None<EOL>self.dataset_version = dataset_version<EOL>self._score = None<EOL>self.score = score<EOL>self._updated_at = None<EOL>self.updated_at = updated_at<EOL>self._system = None<EOL>self.system = system<EOL>self._extracted = None<EOL>self.extracted = extracted<EOL>self._extracted_path = None<EOL>self.extracted_path = extracted_path<EOL>", "docstring": "Constructor.\n\n:param id: String with the ID of the record.\n:param dataset: Integer with the dataset of the record.\n:param dataset_version: Integer with the dataset version of the record.\n:param score: Score with the relevancy of the result.\n:param updated_at: String with the last time that the record was updated.\n:param system: Pif System object that matched.\n:param extracted: Dictionary with a map of extracted property names to values.\n:param extracted_path: Dictionary with a map of extracted property names to paths in a PIF.", "id": "f3564:c0:m0"}
{"signature": "def load_file_as_json(path):", "body": "with open(path, \"<STR_LIT:r>\") as f:<EOL><INDENT>parsed_dict = json.load(f)<EOL><DEDENT>return parsed_dict<EOL>", "docstring": "Given a filepath, loads the file as a dictionary from JSON\n\n:param path: The path to a JSON file", "id": "f3568:m0"}
{"signature": "def __get_ml_configuration_status(self, job_id):", "body": "failure_message = \"<STR_LIT>\"<EOL>response = self._get_success_json(self._get(<EOL>'<STR_LIT>' + job_id + '<STR_LIT>', None, failure_message=failure_message))[<EOL>'<STR_LIT:data>']<EOL>return response<EOL>", "docstring": "After invoking the create_ml_configuration async method, you can use this method to\ncheck on the status of the builder job.\n\n:param job_id: The identifier returned from create_ml_configuration\n:return: Job status", "id": "f3579:c0:m10"}
{"signature": "def create_ml_configuration(self, search_template, extract_as_keys, dataset_ids):", "body": "data = {<EOL>\"<STR_LIT>\":<EOL>search_template,<EOL>\"<STR_LIT>\":<EOL>extract_as_keys<EOL>}<EOL>failure_message = \"<STR_LIT>\"<EOL>config_job_id = self._get_success_json(self._post_json(<EOL>'<STR_LIT>', data, failure_message=failure_message))['<STR_LIT:data>'][<EOL>'<STR_LIT:result>']['<STR_LIT>']<EOL>while True:<EOL><INDENT>config_status = self.__get_ml_configuration_status(config_job_id)<EOL>print('<STR_LIT>', config_status)<EOL>if config_status['<STR_LIT:status>'] == '<STR_LIT>':<EOL><INDENT>ml_config = self.__convert_response_to_configuration(config_status['<STR_LIT:result>'], dataset_ids)<EOL>return ml_config<EOL><DEDENT>time.sleep(<NUM_LIT:5>)<EOL><DEDENT>", "docstring": "This method will spawn a server job to create a default ML configuration based on a search template and\nthe extract as keys.\nThis function will submit the request to build, and wait for the configuration to finish before returning.\n\n:param search_template: A search template defining the query (properties, datasets etc)\n:param extract_as_keys: Array of extract-as keys defining the descriptors\n:param dataset_ids: Array of dataset identifiers to make search template from\n:return: An identifier used to request the status of the builder job (get_ml_configuration_status)", "id": "f3579:c0:m7"}
{"signature": "def create_ml_configuration_from_datasets(self, dataset_ids):", "body": "available_columns = self.search_template_client.get_available_columns(dataset_ids)<EOL>search_template = self.search_template_client.create(dataset_ids, available_columns)<EOL>return self.create_ml_configuration(search_template, available_columns, dataset_ids)<EOL>", "docstring": "Creates an ml configuration from dataset_ids and extract_as_keys\n\n:param dataset_ids: Array of dataset identifiers to make search template from\n:return: An identifier used to request the status of the builder job (get_ml_configuration_status)", "id": "f3579:c0:m6"}
{"signature": "def validate(self, ml_template):", "body": "data = {<EOL>\"<STR_LIT>\":<EOL>ml_template<EOL>}<EOL>failure_message = \"<STR_LIT>\"<EOL>res = self._get_success_json(self._post_json(<EOL>'<STR_LIT>', data, failure_message=failure_message))['<STR_LIT:data>']<EOL>if res['<STR_LIT>']:<EOL><INDENT>return '<STR_LIT:OK>'<EOL><DEDENT>return res['<STR_LIT>']<EOL>", "docstring": "Runs the template against the validation endpoint, returns a message indicating status of the templte\n\n:param ml_template: Template to validate\n:return: OK or error message if validation failed", "id": "f3580:c0:m1"}
{"signature": "def add_descriptor(self, descriptor, role='<STR_LIT:ignore>', group_by_key=False):", "body": "descriptor.validate()<EOL>if descriptor.key in self.configuration[\"<STR_LIT>\"]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.configuration['<STR_LIT>'].append(descriptor.as_dict())<EOL>self.configuration[\"<STR_LIT>\"][descriptor.key] = role<EOL>if group_by_key:<EOL><INDENT>self.configuration[\"<STR_LIT>\"].append(descriptor.key)<EOL><DEDENT>", "docstring": "Add a descriptor column.\n\n:param descriptor: A Descriptor instance (e.g., RealDescriptor, InorganicDescriptor, etc.)\n:param role: Specify a role (input, output, latentVariable, or ignore)\n:param group_by_key: Whether or not to group by this key during cross validation", "id": "f3581:c0:m3"}
{"signature": "def dataset_ids(self, dataset_ids):", "body": "self.configuration['<STR_LIT>'] = dataset_ids<EOL>", "docstring": "Sets the dataset ids to use for the view\n\n:param dataset_ids: Array of strings, one for each dataset id", "id": "f3581:c0:m1"}
{"signature": "def get_available_columns(self, dataset_ids):", "body": "if not isinstance(dataset_ids, list):<EOL><INDENT>dataset_ids = [dataset_ids]<EOL><DEDENT>data = {<EOL>\"<STR_LIT>\":<EOL>dataset_ids<EOL>}<EOL>failure_message = \"<STR_LIT>\".format(dataset_ids)<EOL>return self._get_success_json(self._post_json(<EOL>'<STR_LIT>', data, failure_message=failure_message))['<STR_LIT:data>']<EOL>", "docstring": "Retrieves the set of columns from the combination of dataset ids given\n\n:param dataset_ids: The id of the dataset to retrieve columns from\n:type dataset_ids: list of int\n:return: A list of column names from the dataset ids given.\n:rtype: list of str", "id": "f3582:c0:m2"}
{"signature": "def __int__(self):", "body": "return self._ip_dec<EOL>", "docstring": "Return the decimal representation of the address/netmask.", "id": "f3601:c0:m16"}
{"signature": "def _dot_to_dec(ip, check=True):", "body": "if check and not is_dot(ip):<EOL><INDENT>raise ValueError('<STR_LIT>' % ip)<EOL><DEDENT>octets = str(ip).split('<STR_LIT:.>')<EOL>dec = <NUM_LIT:0><EOL>dec |= int(octets[<NUM_LIT:0>]) << <NUM_LIT><EOL>dec |= int(octets[<NUM_LIT:1>]) << <NUM_LIT:16><EOL>dec |= int(octets[<NUM_LIT:2>]) << <NUM_LIT:8><EOL>dec |= int(octets[<NUM_LIT:3>])<EOL>return dec<EOL>", "docstring": "Dotted decimal notation to decimal conversion.", "id": "f3601:m15"}
{"signature": "def get_network_ip(self):", "body": "return self._net_ip<EOL>", "docstring": "Return the network address.", "id": "f3601:c3:m9"}
{"signature": "def _check_nm(nm, notation):", "body": "<EOL>_NM_CHECK_FUNCT = {<EOL>NM_DOT: _dot_to_dec,<EOL>NM_HEX: _hex_to_dec,<EOL>NM_BIN: _bin_to_dec,<EOL>NM_OCT: _oct_to_dec,<EOL>NM_DEC: _dec_to_dec_long}<EOL>try:<EOL><INDENT>dec = _NM_CHECK_FUNCT[notation](nm, check=True)<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>if dec in _NETMASKS_VALUES:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Function internally used to check if the given netmask\n    is of the specified notation.", "id": "f3601:m7"}
{"signature": "def set_ip(self, ip):", "body": "self.set(ip=ip, netmask=self._nm)<EOL>", "docstring": "Change the current IP.", "id": "f3601:c3:m3"}
{"signature": "def is_hex_nm(nm):", "body": "return _check_nm(nm, NM_HEX)<EOL>", "docstring": "Return true if the netmask is in hexadecimal notatation.", "id": "f3601:m9"}
{"signature": "def is_dec_nm(nm):", "body": "return _check_nm(nm, NM_DEC)<EOL>", "docstring": "Return true if the netmask is in decimal notatation.", "id": "f3601:m12"}
{"signature": "def is_hex(ip):", "body": "try:<EOL><INDENT>dec = int(str(ip), <NUM_LIT:16>)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return False<EOL><DEDENT>if dec > <NUM_LIT> or dec < <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Return true if the IP address is in hexadecimal notation.", "id": "f3601:m3"}
{"signature": "def detect(ip):", "body": "return _detect(ip, _isnm=False)<EOL>", "docstring": "Detect the notation of an IP address.\n\n    @param ip: the IP address.\n    @type ip: integers, strings or object with an appropriate __str()__ method.\n    @return: one of the IP_* constants; IP_UNKNOWN if undetected.", "id": "f3601:m34"}
{"signature": "def p_detect(ip):", "body": "return NOTATION_MAP[detect(ip)][<NUM_LIT:0>]<EOL>", "docstring": "Return the notation of an IP address (string).", "id": "f3601:m36"}
{"signature": "def _dec_to_hex(ip):", "body": "return hex(ip)<EOL>", "docstring": "Decimal to hexadecimal conversion.", "id": "f3601:m18"}
{"signature": "def is_bin_nm(nm):", "body": "return _check_nm(nm, NM_BIN)<EOL>", "docstring": "Return true if the netmask is in binary notatation.", "id": "f3601:m10"}
{"signature": "def __iadd__(self, other):", "body": "self.set(self._add(other), notation=IP_DEC)<EOL>return self<EOL>", "docstring": "Augmented arithmetic sum.", "id": "f3601:c1:m3"}
{"signature": "def _dec_to_oct(ip):", "body": "return oct(ip)<EOL>", "docstring": "Decimal to octal conversion.", "id": "f3601:m20"}
{"signature": "def detect_nm(nm):", "body": "return _detect(nm, _isnm=True)<EOL>", "docstring": "Detect the notation of a netmask.\n    @param nm: the netmask.\n    @type nm: integers, strings or object with an appropriate __str()__ method.\n    @return: one of the NM_* constants; NM_UNKNOWN if undetected.", "id": "f3601:m35"}
{"signature": "def is_dot(ip):", "body": "octets = str(ip).split('<STR_LIT:.>')<EOL>if len(octets) != <NUM_LIT:4>:<EOL><INDENT>return False<EOL><DEDENT>for i in octets:<EOL><INDENT>try:<EOL><INDENT>val = int(i)<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>if val > <NUM_LIT:255> or val < <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Return true if the IP address is in dotted decimal notation.", "id": "f3601:m2"}
{"signature": "def is_dec(ip):", "body": "try:<EOL><INDENT>dec = int(str(ip))<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>if dec > <NUM_LIT> or dec < <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Return true if the IP address is in decimal notation.", "id": "f3601:m6"}
{"signature": "def _hex_to_dec(ip, check=True):", "body": "if check and not is_hex(ip):<EOL><INDENT>raise ValueError('<STR_LIT>' % ip)<EOL><DEDENT>if isinstance(ip, int):<EOL><INDENT>ip = hex(ip)<EOL><DEDENT>return int(str(ip), <NUM_LIT:16>)<EOL>", "docstring": "Hexadecimal to decimal conversion.", "id": "f3601:m17"}
{"signature": "def _cmp_prepare(self, other):", "body": "if isinstance(other, self.__class__):<EOL><INDENT>return other._ip_dec<EOL><DEDENT>elif isinstance(other, int):<EOL><INDENT>return other<EOL><DEDENT>return self.__class__(other)._ip_dec<EOL>", "docstring": "Prepare the item to be compared with this address/netmask.", "id": "f3601:c0:m9"}
{"signature": "def get_ip(self):", "body": "return self._ip<EOL>", "docstring": "Return the given address.", "id": "f3601:c3:m4"}
{"signature": "def is_notation(ip, notation):", "body": "return _is_notation(ip, notation, _isnm=False)<EOL>", "docstring": "Return true if the given address is in the given notation.", "id": "f3601:m31"}
{"signature": "def get_dot(self):", "body": "return self._ip<EOL>", "docstring": "Return the dotted decimal notation of the address/netmask.", "id": "f3601:c0:m3"}
{"signature": "def is_oct(ip):", "body": "try:<EOL><INDENT>dec = int(str(ip), <NUM_LIT:8>)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return False<EOL><DEDENT>if dec > <NUM_LIT> or dec < <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Return true if the IP address is in octal notation.", "id": "f3601:m5"}
{"signature": "def _wildcard_to_dec(nm, check=False):", "body": "if check and not is_wildcard_nm(nm):<EOL><INDENT>raise ValueError('<STR_LIT>' % nm)<EOL><DEDENT>return <NUM_LIT> - _dot_to_dec(nm, check=False)<EOL>", "docstring": "Wildcard bits to decimal conversion.", "id": "f3601:m28"}
{"signature": "def __init__(self, ip, notation=IP_UNKNOWN):", "body": "self.set(ip, notation)<EOL>", "docstring": "Initialize the object.", "id": "f3601:c0:m0"}
{"signature": "def set(self, ip, netmask=None):", "body": "if isinstance(ip, str) and netmask is None:<EOL><INDENT>ipnm = ip.split('<STR_LIT:/>')<EOL>if len(ipnm) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>' % ip)<EOL><DEDENT>ip = ipnm[<NUM_LIT:0>]<EOL>netmask = ipnm[<NUM_LIT:1>]<EOL><DEDENT>if isinstance(ip, IPv4Address):<EOL><INDENT>self._ip = ip<EOL><DEDENT>else:<EOL><INDENT>self._ip = IPv4Address(ip)<EOL><DEDENT>if isinstance(netmask, IPv4NetMask):<EOL><INDENT>self._nm = netmask<EOL><DEDENT>else:<EOL><INDENT>self._nm = IPv4NetMask(netmask)<EOL><DEDENT>ipl = int(self._ip)<EOL>nml = int(self._nm)<EOL>base_add = ipl & nml<EOL>self._ip_num = <NUM_LIT> - <NUM_LIT:1> - nml<EOL>if self._ip_num in (-<NUM_LIT:1>, <NUM_LIT:0>):<EOL><INDENT>if self._ip_num == -<NUM_LIT:1>:<EOL><INDENT>self._ip_num = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>self._ip_num = <NUM_LIT:2><EOL><DEDENT>self._net_ip = None<EOL>self._bc_ip = None<EOL>self._first_ip_dec = base_add<EOL>self._first_ip = IPv4Address(self._first_ip_dec, notation=IP_DEC)<EOL>if self._ip_num == <NUM_LIT:1>:<EOL><INDENT>last_ip_dec = self._first_ip_dec<EOL><DEDENT>else:<EOL><INDENT>last_ip_dec = self._first_ip_dec + <NUM_LIT:1><EOL><DEDENT>self._last_ip = IPv4Address(last_ip_dec, notation=IP_DEC)<EOL>return<EOL><DEDENT>self._net_ip = IPv4Address(base_add, notation=IP_DEC)<EOL>self._bc_ip = IPv4Address(base_add + self._ip_num + <NUM_LIT:1>, notation=IP_DEC)<EOL>self._first_ip_dec = base_add + <NUM_LIT:1><EOL>self._first_ip = IPv4Address(self._first_ip_dec, notation=IP_DEC)<EOL>self._last_ip = IPv4Address(base_add + self._ip_num, notation=IP_DEC)<EOL>", "docstring": "Set the IP address and the netmask.", "id": "f3601:c3:m1"}
{"signature": "def get_wildcard(self):", "body": "return _convert(self._ip, notation=NM_WILDCARD,<EOL>inotation=IP_DOT, _check=False, _isnm=self._isnm)<EOL>", "docstring": "Return the wildcard bits notation of the netmask.", "id": "f3601:c2:m1"}
{"signature": "def p_detect_nm(nm):", "body": "return NOTATION_MAP[detect_nm(nm)][<NUM_LIT:0>]<EOL>", "docstring": "Return the notation of a netmask (string).", "id": "f3601:m37"}
{"signature": "def _dec_to_dot(ip):", "body": "first = int((ip >> <NUM_LIT>) & <NUM_LIT:255>)<EOL>second = int((ip >> <NUM_LIT:16>) & <NUM_LIT:255>)<EOL>third = int((ip >> <NUM_LIT:8>) & <NUM_LIT:255>)<EOL>fourth = int(ip & <NUM_LIT:255>)<EOL>return '<STR_LIT>' % (first, second, third, fourth)<EOL>", "docstring": "Decimal to dotted decimal notation conversion.", "id": "f3601:m16"}
{"signature": "def _convert(ip, notation, inotation, _check, _isnm):", "body": "inotation_orig = inotation<EOL>notation_orig = notation<EOL>inotation = _get_notation(inotation)<EOL>notation = _get_notation(notation)<EOL>if inotation is None:<EOL><INDENT>raise ValueError('<STR_LIT>' % inotation_orig)<EOL><DEDENT>if notation is None:<EOL><INDENT>raise ValueError('<STR_LIT>' % notation_orig)<EOL><DEDENT>docheck = _check or False<EOL>if inotation == IP_UNKNOWN:<EOL><INDENT>inotation = _detect(ip, _isnm)<EOL>if inotation == IP_UNKNOWN:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if _check is None:<EOL><INDENT>docheck = True<EOL><DEDENT><DEDENT>if _isnm:<EOL><INDENT>docheck = False<EOL><DEDENT>dec = <NUM_LIT:0><EOL>if inotation == IP_DOT:<EOL><INDENT>dec = _dot_to_dec(ip, docheck)<EOL><DEDENT>elif inotation == IP_HEX:<EOL><INDENT>dec = _hex_to_dec(ip, docheck)<EOL><DEDENT>elif inotation == IP_BIN:<EOL><INDENT>dec = _bin_to_dec(ip, docheck)<EOL><DEDENT>elif inotation == IP_OCT:<EOL><INDENT>dec = _oct_to_dec(ip, docheck)<EOL><DEDENT>elif inotation == IP_DEC:<EOL><INDENT>dec = _dec_to_dec_long(ip, docheck)<EOL><DEDENT>elif _isnm and inotation == NM_BITS:<EOL><INDENT>dec = _bits_to_dec(ip, docheck)<EOL><DEDENT>elif _isnm and inotation == NM_WILDCARD:<EOL><INDENT>dec = _wildcard_to_dec(ip, docheck)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % inotation_orig)<EOL><DEDENT>if _isnm and dec not in _NETMASKS_VALUES:<EOL><INDENT>raise ValueError('<STR_LIT>' % ip)<EOL><DEDENT>if notation == IP_DOT:<EOL><INDENT>return _dec_to_dot(dec)<EOL><DEDENT>elif notation == IP_HEX:<EOL><INDENT>return _dec_to_hex(dec)<EOL><DEDENT>elif notation == IP_BIN:<EOL><INDENT>return _dec_to_bin(dec)<EOL><DEDENT>elif notation == IP_OCT:<EOL><INDENT>return _dec_to_oct(dec)<EOL><DEDENT>elif notation == IP_DEC:<EOL><INDENT>return _dec_to_dec_str(dec)<EOL><DEDENT>elif _isnm and notation == NM_BITS:<EOL><INDENT>return _dec_to_bits(dec)<EOL><DEDENT>elif _isnm and notation == NM_WILDCARD:<EOL><INDENT>return _dec_to_wildcard(dec)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % notation_orig)<EOL><DEDENT>", "docstring": "Internally used to convert IPs and netmasks to other notations.", "id": "f3601:m38"}
{"signature": "def is_notation_nm(nm, notation):", "body": "return _is_notation(nm, notation, _isnm=True)<EOL>", "docstring": "Return true if the given netmask is in the given notation.", "id": "f3601:m32"}
{"signature": "def __isub__(self, other):", "body": "self.set(self._sub(other), notation=IP_DEC)<EOL>return self<EOL>", "docstring": "Augmented arithmetic subtraction.", "id": "f3601:c1:m6"}
{"signature": "def convert_nm(nm, notation=IP_DOT, inotation=IP_UNKNOWN, check=True):", "body": "return _convert(nm, notation, inotation, _check=check, _isnm=True)<EOL>", "docstring": "Convert a netmask to another notation.", "id": "f3601:m40"}
{"signature": "def __len__(self):", "body": "return self.get_ip_number()<EOL>", "docstring": "Return the number of usable IP address.", "id": "f3601:c3:m16"}
{"signature": "def _oct_to_dec(ip, check=True):", "body": "if check and not is_oct(ip):<EOL><INDENT>raise ValueError('<STR_LIT>' % ip)<EOL><DEDENT>if isinstance(ip, int):<EOL><INDENT>ip = oct(ip)<EOL><DEDENT>return int(str(ip), <NUM_LIT:8>)<EOL>", "docstring": "Octal to decimal conversion.", "id": "f3601:m19"}
{"signature": "def __str__(self):", "body": "return self.get()<EOL>", "docstring": "Print this address/netmask.", "id": "f3601:c0:m8"}
{"signature": "def get_ip_number(self):", "body": "return self._ip_num<EOL>", "docstring": "Return the number of usable IP addresses.", "id": "f3601:c3:m11"}
{"signature": "def _get_notation(notation):", "body": "return _NOTATION_KEYS.get(notation, None)<EOL>", "docstring": "Given a numeric value or string value, returns one in IP_DOT, IP_HEX,\n    IP_BIN, etc., or None if unable to convert to the internally\n    used numeric convention.", "id": "f3601:m0"}
{"signature": "def _dec_to_bin(ip):", "body": "bits = []<EOL>while ip:<EOL><INDENT>bits.append(_BYTES_TO_BITS[ip & <NUM_LIT:255>])<EOL>ip >>= <NUM_LIT:8><EOL><DEDENT>bits.reverse()<EOL>return '<STR_LIT>'.join(bits) or <NUM_LIT:32>*'<STR_LIT:0>'<EOL>", "docstring": "Decimal to binary conversion.", "id": "f3601:m23"}
{"signature": "def get_netmask(self):", "body": "return self._nm<EOL>", "docstring": "Return the netmask.", "id": "f3601:c3:m6"}
{"signature": "def generate(length=DEFAULT_LENGTH):", "body": "return '<STR_LIT>'.join(random.SystemRandom().choice(ALPHABET)<EOL>for _ in range(length))<EOL>", "docstring": "Generate a random string of the specified length.\n\nThe returned string is composed of an alphabet that shouldn't include any\ncharacters that are easily mistakeable for one another (I, 1, O, 0), and\nhopefully won't accidentally contain any English-language curse words.", "id": "f3605:m0"}
{"signature": "@classmethod<EOL><INDENT>def coerce(cls, key, value):<DEDENT>", "body": "if not isinstance(value, MutableDict):<EOL><INDENT>if isinstance(value, dict):<EOL><INDENT>return MutableDict(value)<EOL><DEDENT>return Mutable.coerce(key, value)<EOL><DEDENT>else:<EOL><INDENT>return value<EOL><DEDENT>", "docstring": "Convert plain dictionaries to MutableDict.", "id": "f3607:c4:m0"}
{"signature": "def __setitem__(self, key, value):", "body": "dict.__setitem__(self, key, value)<EOL>self.changed()<EOL>", "docstring": "Detect dictionary set events and emit change events.", "id": "f3607:c4:m1"}
{"signature": "@compiles(utcnow)<EOL>def _default_utcnow(element, compiler, **kw):", "body": "return \"<STR_LIT>\"<EOL>", "docstring": "default compilation handler.\n\n    Note that there is no SQL \"utcnow()\" function; this is a\n    \"fake\" string so that we can produce SQL strings that are dialect-agnostic,\n    such as within tests.", "id": "f3608:m0"}
{"signature": "@classmethod<EOL><INDENT>def _reference_table(cls, ref_table):<DEDENT>", "body": "<EOL>cols = [(sa.Column(), refcol) for refcol in ref_table.primary_key]<EOL>for col, refcol in cols:<EOL><INDENT>setattr(cls, \"<STR_LIT>\" % (ref_table.name, refcol.name), col)<EOL><DEDENT>cls.__table__.append_constraint(sa.ForeignKeyConstraint(*zip(*cols)))<EOL>", "docstring": "Create a foreign key reference from the local class to the given remote\n        table.\n\n        Adds column references to the declarative class and adds a\n        ForeignKeyConstraint.", "id": "f3608:c0:m1"}
{"signature": "def get_tm_session(session_factory, transaction_manager):", "body": "dbsession = session_factory()<EOL>zope.sqlalchemy.register(<EOL>dbsession, transaction_manager=transaction_manager)<EOL>return dbsession<EOL>", "docstring": "Get a ``sqlalchemy.orm.Session`` instance backed by a transaction.\n\nThis function will hook the session to the transaction manager which\nwill take care of committing any changes.\n\n- When using pyramid_tm it will automatically be committed or aborted\n  depending on whether an exception is raised.\n\n- When using scripts you should wrap the session in a manager yourself.\n  For example::\n\n      import transaction\n\n      engine = get_engine(settings)\n      session_factory = get_session_factory(engine)\n      with transaction.manager:\n          dbsession = get_tm_session(session_factory, transaction.manager)", "id": "f3609:m2"}
{"signature": "def many_to_one(clsname, **kw):", "body": "@declared_attr<EOL>def m2o(cls):<EOL><INDENT>cls._references((cls.__name__, clsname))<EOL>return relationship(clsname, **kw)<EOL><DEDENT>return m2o<EOL>", "docstring": "Use an event to build a many-to-one relationship on a class.\n\n    This makes use of the :meth:`.References._reference_table` method\n    to generate a full foreign key relationship to the remote table.", "id": "f3611:m0"}
{"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n    Like 'git describe --tags --dirty --always -long'.\n    The distance/hash is unconditional.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f3617:m14"}
{"signature": "def render_pep440(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"],<EOL>pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n    Exceptions:\n    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f3617:m9"}
{"signature": "def register_vcs_handler(vcs, method):  ", "body": "def decorate(f):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if vcs not in HANDLERS:<EOL><INDENT>HANDLERS[vcs] = {}<EOL><DEDENT>HANDLERS[vcs][method] = f<EOL>return f<EOL><DEDENT>return decorate<EOL>", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f3617:m2"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>date = keywords.get(\"<STR_LIT:date>\")<EOL>if date is not None:<EOL><INDENT>date = date.strip().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:T>\", <NUM_LIT:1>).replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\", <NUM_LIT:1>)<EOL><DEDENT>refnames = keywords[\"<STR_LIT>\"].strip()<EOL>if refnames.startswith(\"<STR_LIT>\"):<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>refs = set([r.strip() for r in refnames.strip(\"<STR_LIT>\").split(\"<STR_LIT:U+002C>\")])<EOL>TAG = \"<STR_LIT>\"<EOL>tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])<EOL>if not tags:<EOL><INDENT>tags = set([r for r in refs if re.search(r'<STR_LIT>', r)])<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(refs - tags))<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(sorted(tags)))<EOL><DEDENT>for ref in sorted(tags):<EOL><INDENT>if ref.startswith(tag_prefix):<EOL><INDENT>r = ref[len(tag_prefix):]<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % r)<EOL><DEDENT>return {\"<STR_LIT:version>\": r,<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": date}<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": \"<STR_LIT>\", \"<STR_LIT:date>\": None}<EOL>", "docstring": "Get version information from git keywords.", "id": "f3617:m6"}
{"signature": "def render_git_describe(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n    Like 'git describe --tags --dirty --always'.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f3617:m13"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_get_keywords(versionfile_abs):", "body": "<EOL>keywords = {}<EOL>try:<EOL><INDENT>f = open(versionfile_abs, \"<STR_LIT:r>\")<EOL>for line in f.readlines():<EOL><INDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT:date>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>f.close()<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>pass<EOL><DEDENT>return keywords<EOL>", "docstring": "Extract version information from the given file.", "id": "f3617:m5"}
{"signature": "def get_config():", "body": "<EOL>cfg = VersioneerConfig()<EOL>cfg.VCS = \"<STR_LIT>\"<EOL>cfg.style = \"<STR_LIT>\"<EOL>cfg.tag_prefix = \"<STR_LIT>\"<EOL>cfg.parentdir_prefix = \"<STR_LIT>\"<EOL>cfg.versionfile_source = \"<STR_LIT>\"<EOL>cfg.verbose = False<EOL>return cfg<EOL>", "docstring": "Create, populate and return the VersioneerConfig() object.", "id": "f3617:m1"}
{"signature": "def get_keywords():", "body": "<EOL>git_refnames = \"<STR_LIT>\"<EOL>git_full = \"<STR_LIT>\"<EOL>git_date = \"<STR_LIT>\"<EOL>keywords = {\"<STR_LIT>\": git_refnames, \"<STR_LIT>\": git_full, \"<STR_LIT:date>\": git_date}<EOL>return keywords<EOL>", "docstring": "Get the keywords needed to look up the version information.", "id": "f3617:m0"}
{"signature": "def parse_line(line, document=None):", "body": "result = re.match(line_pattern, line)<EOL>if result:<EOL><INDENT>_, lineno, offset, severity, msg = result.groups()<EOL>lineno = int(lineno or <NUM_LIT:1>)<EOL>offset = int(offset or <NUM_LIT:0>)<EOL>errno = <NUM_LIT:2><EOL>if severity == '<STR_LIT:error>':<EOL><INDENT>errno = <NUM_LIT:1><EOL><DEDENT>diag = {<EOL>'<STR_LIT:source>': '<STR_LIT>',<EOL>'<STR_LIT>': {<EOL>'<STR_LIT:start>': {'<STR_LIT>': lineno - <NUM_LIT:1>, '<STR_LIT>': offset},<EOL>'<STR_LIT:end>': {'<STR_LIT>': lineno - <NUM_LIT:1>, '<STR_LIT>': offset + <NUM_LIT:1>}<EOL>},<EOL>'<STR_LIT:message>': msg,<EOL>'<STR_LIT>': errno<EOL>}<EOL>if document:<EOL><INDENT>word = document.word_at_position(diag['<STR_LIT>']['<STR_LIT:start>'])<EOL>if word:<EOL><INDENT>diag['<STR_LIT>']['<STR_LIT:end>']['<STR_LIT>'] = (<EOL>diag['<STR_LIT>']['<STR_LIT:start>']['<STR_LIT>'] + len(word))<EOL><DEDENT><DEDENT>return diag<EOL><DEDENT>", "docstring": "Return a language-server diagnostic from a line of the Mypy error report;\noptionally, use the whole document to provide more context on it.", "id": "f3619:m0"}
{"signature": "def versions_from_file(filename):", "body": "try:<EOL><INDENT>with open(filename) as f:<EOL><INDENT>contents = f.read()<EOL><DEDENT><DEDENT>except EnvironmentError:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>mo = re.search(r\"<STR_LIT>\",<EOL>contents, re.M | re.S)<EOL>if not mo:<EOL><INDENT>mo = re.search(r\"<STR_LIT>\",<EOL>contents, re.M | re.S)<EOL><DEDENT>if not mo:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>return json.loads(mo.group(<NUM_LIT:1>))<EOL>", "docstring": "Try to determine the version from _version.py if present.", "id": "f3620:m9"}
{"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n    The \".dev0\" means dirty. Note that .dev0 sorts backwards\n    (a dirty tree will appear \"older\" than the corresponding clean one),\n    but you shouldn't be releasing software with -dirty anyways.\n\n    Exceptions:\n    1: no tags. 0.postDISTANCE[.dev0]", "id": "f3620:m14"}
{"signature": "def scan_setup_py():", "body": "found = set()<EOL>setters = False<EOL>errors = <NUM_LIT:0><EOL>with open(\"<STR_LIT>\", \"<STR_LIT:r>\") as f:<EOL><INDENT>for line in f.readlines():<EOL><INDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>found.add(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>found.add(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>found.add(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>setters = True<EOL><DEDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>setters = True<EOL><DEDENT><DEDENT><DEDENT>if len(found) != <NUM_LIT:3>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>errors += <NUM_LIT:1><EOL><DEDENT>if setters:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>errors += <NUM_LIT:1><EOL><DEDENT>return errors<EOL>", "docstring": "Validate the contents of setup.py against Versioneer's expectations.", "id": "f3620:m23"}
{"signature": "def plus_or_dot(pieces):", "body": "if \"<STR_LIT:+>\" in pieces.get(\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>return \"<STR_LIT:.>\"<EOL><DEDENT>return \"<STR_LIT:+>\"<EOL>", "docstring": "Return a + if we don't already have one, else return a .", "id": "f3620:m11"}
{"signature": "def render_pep440(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"],<EOL>pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n    Exceptions:\n    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f3620:m12"}
{"signature": "def do_vcs_install(manifest_in, versionfile_source, ipy):", "body": "GITS = [\"<STR_LIT>\"]<EOL>if sys.platform == \"<STR_LIT:win32>\":<EOL><INDENT>GITS = [\"<STR_LIT>\", \"<STR_LIT>\"]<EOL><DEDENT>files = [manifest_in, versionfile_source]<EOL>if ipy:<EOL><INDENT>files.append(ipy)<EOL><DEDENT>try:<EOL><INDENT>me = __file__<EOL>if me.endswith(\"<STR_LIT>\") or me.endswith(\"<STR_LIT>\"):<EOL><INDENT>me = os.path.splitext(me)[<NUM_LIT:0>] + \"<STR_LIT>\"<EOL><DEDENT>versioneer_file = os.path.relpath(me)<EOL><DEDENT>except NameError:<EOL><INDENT>versioneer_file = \"<STR_LIT>\"<EOL><DEDENT>files.append(versioneer_file)<EOL>present = False<EOL>try:<EOL><INDENT>f = open(\"<STR_LIT>\", \"<STR_LIT:r>\")<EOL>for line in f.readlines():<EOL><INDENT>if line.strip().startswith(versionfile_source):<EOL><INDENT>if \"<STR_LIT>\" in line.strip().split()[<NUM_LIT:1>:]:<EOL><INDENT>present = True<EOL><DEDENT><DEDENT><DEDENT>f.close()<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>pass<EOL><DEDENT>if not present:<EOL><INDENT>f = open(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>f.write(\"<STR_LIT>\" % versionfile_source)<EOL>f.close()<EOL>files.append(\"<STR_LIT>\")<EOL><DEDENT>run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\"] + files)<EOL>", "docstring": "Git-specific installation logic for Versioneer.\n\n    For Git, this means creating/changing .gitattributes to mark _version.py\n    for export-subst keyword substitution.", "id": "f3620:m7"}
{"signature": "def render(pieces, style):", "body": "if pieces[\"<STR_LIT:error>\"]:<EOL><INDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": pieces.get(\"<STR_LIT>\"),<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": pieces[\"<STR_LIT:error>\"],<EOL>\"<STR_LIT:date>\": None}<EOL><DEDENT>if not style or style == \"<STR_LIT:default>\":<EOL><INDENT>style = \"<STR_LIT>\"  <EOL><DEDENT>if style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_pre(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_post(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_old(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe_long(pieces)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % style)<EOL><DEDENT>return {\"<STR_LIT:version>\": rendered, \"<STR_LIT>\": pieces[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": pieces[\"<STR_LIT>\"], \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": pieces.get(\"<STR_LIT:date>\")}<EOL>", "docstring": "Render the given version pieces into the requested style.", "id": "f3620:m18"}
{"signature": "def write_to_version_file(filename, versions):", "body": "os.unlink(filename)<EOL>contents = json.dumps(versions, sort_keys=True,<EOL>indent=<NUM_LIT:1>, separators=(\"<STR_LIT:U+002C>\", \"<STR_LIT>\"))<EOL>with open(filename, \"<STR_LIT:w>\") as f:<EOL><INDENT>f.write(SHORT_VERSION_PY % contents)<EOL><DEDENT>print(\"<STR_LIT>\" % (filename, versions[\"<STR_LIT:version>\"]))<EOL>", "docstring": "Write the given version number to the given _version.py file.", "id": "f3620:m10"}
{"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n    Like 'git describe --tags --dirty --always -long'.\n    The distance/hash is unconditional.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f3620:m17"}
{"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n    Exceptions:\n    1: no tags. 0.post.devDISTANCE", "id": "f3620:m13"}
{"signature": "def get_config_from_root(root):", "body": "<EOL>setup_cfg = os.path.join(root, \"<STR_LIT>\")<EOL>parser = configparser.SafeConfigParser()<EOL>with open(setup_cfg, \"<STR_LIT:r>\") as f:<EOL><INDENT>parser.readfp(f)<EOL><DEDENT>VCS = parser.get(\"<STR_LIT>\", \"<STR_LIT>\")  <EOL>def get(parser, name):<EOL><INDENT>if parser.has_option(\"<STR_LIT>\", name):<EOL><INDENT>return parser.get(\"<STR_LIT>\", name)<EOL><DEDENT>return None<EOL><DEDENT>cfg = VersioneerConfig()<EOL>cfg.VCS = VCS<EOL>cfg.style = get(parser, \"<STR_LIT>\") or \"<STR_LIT>\"<EOL>cfg.versionfile_source = get(parser, \"<STR_LIT>\")<EOL>cfg.versionfile_build = get(parser, \"<STR_LIT>\")<EOL>cfg.tag_prefix = get(parser, \"<STR_LIT>\")<EOL>if cfg.tag_prefix in (\"<STR_LIT>\", '<STR_LIT>'):<EOL><INDENT>cfg.tag_prefix = \"<STR_LIT>\"<EOL><DEDENT>cfg.parentdir_prefix = get(parser, \"<STR_LIT>\")<EOL>cfg.verbose = get(parser, \"<STR_LIT>\")<EOL>return cfg<EOL>", "docstring": "Read the project setup.cfg file to determine Versioneer config.", "id": "f3620:m1"}
{"signature": "def get_versions(verbose=False):", "body": "if \"<STR_LIT>\" in sys.modules:<EOL><INDENT>del sys.modules[\"<STR_LIT>\"]<EOL><DEDENT>root = get_root()<EOL>cfg = get_config_from_root(root)<EOL>assert cfg.VCS is not None, \"<STR_LIT>\"<EOL>handlers = HANDLERS.get(cfg.VCS)<EOL>assert handlers, \"<STR_LIT>\" % cfg.VCS<EOL>verbose = verbose or cfg.verbose<EOL>assert cfg.versionfile_source is not None,\"<STR_LIT>\"<EOL>assert cfg.tag_prefix is not None, \"<STR_LIT>\"<EOL>versionfile_abs = os.path.join(root, cfg.versionfile_source)<EOL>get_keywords_f = handlers.get(\"<STR_LIT>\")<EOL>from_keywords_f = handlers.get(\"<STR_LIT>\")<EOL>if get_keywords_f and from_keywords_f:<EOL><INDENT>try:<EOL><INDENT>keywords = get_keywords_f(versionfile_abs)<EOL>ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % ver)<EOL><DEDENT>return ver<EOL><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>try:<EOL><INDENT>ver = versions_from_file(versionfile_abs)<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % (versionfile_abs, ver))<EOL><DEDENT>return ver<EOL><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>from_vcs_f = handlers.get(\"<STR_LIT>\")<EOL>if from_vcs_f:<EOL><INDENT>try:<EOL><INDENT>pieces = from_vcs_f(cfg.tag_prefix, root, verbose)<EOL>ver = render(pieces, cfg.style)<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % ver)<EOL><DEDENT>return ver<EOL><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>try:<EOL><INDENT>if cfg.parentdir_prefix:<EOL><INDENT>ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % ver)<EOL><DEDENT>return ver<EOL><DEDENT><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\", \"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None, \"<STR_LIT:error>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:date>\": None}<EOL>", "docstring": "Get the project version from whatever source is available.\n\n    Returns dict with two keys: 'version' and 'full'.", "id": "f3620:m19"}
{"signature": "def _default_json_default(obj):", "body": "if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):<EOL><INDENT>return obj.strftime(default_date_fmt)<EOL><DEDENT>else:<EOL><INDENT>return str(obj)<EOL><DEDENT>", "docstring": "Coerce everything to strings.\n    All objects representing time get output according to default_date_fmt.", "id": "f3624:m0"}
{"signature": "@_log_fn()<EOL>def notice(**kwargs):", "body": "pass<EOL>", "docstring": "log with pyzlog level NOTICE", "id": "f3624:m6"}
{"signature": "def init_logs(self, path=None, target=None, level=None,<EOL>server_hostname=None, extra=None):", "body": "path = path if path is not None else self.path<EOL>target = target if target is not None else self.target<EOL>level = level if level is not None else logging.DEBUG<EOL>server_hostname = (server_hostname if server_hostname is not None<EOL>else '<STR_LIT:localhost>')<EOL>extra = extra if extra is not None else {'<STR_LIT>': None}<EOL>init_logs(path=path, target=target, level=level,<EOL>server_hostname=server_hostname, fields=extra)<EOL>", "docstring": "Simple canned way to initialize pyzlog.\n\n        Initialize pyslog for tests. If path or target are not\n        specified, will default to path and target properties on the\n        object. leve will default to logging.DEBUG, server_hostname\n        defaults to localhost, and extra defaults to {'extra': None}\n\n        :param path: path to find the log file\n        :param target: name of the log file\n        :param level: log level for this instance\n        :param server_hostname: hostname to put in each entry\n        :param extra: whitelist/defaults of extra fields to add to each entry\n        :type path: string\n        :type target: string\n        :type level: int\n        :type server_hostname: string\n        :type extra: dict", "id": "f3624:c0:m2"}
{"signature": "@_log_fn()<EOL>def critical(**kwargs):", "body": "pass<EOL>", "docstring": "log with pyzlog level CRITICAL", "id": "f3624:m10"}
{"signature": "@_log_fn()<EOL>def emergency(**kwargs):", "body": "pass<EOL>", "docstring": "log with pyzlog level EMERGENCY", "id": "f3624:m4"}
{"signature": "def init_logs(path=None,<EOL>target=None,<EOL>logger_name='<STR_LIT:root>',<EOL>level=logging.DEBUG,<EOL>maxBytes=<NUM_LIT:1>*<NUM_LIT>*<NUM_LIT>,<EOL>backupCount=<NUM_LIT:5>,<EOL>application_name='<STR_LIT:default>',<EOL>server_hostname=None,<EOL>fields=None):", "body": "log_file = os.path.abspath(<EOL>os.path.join(path, target))<EOL>logger = logging.getLogger(logger_name)<EOL>logger.setLevel(level)<EOL>handler = logging.handlers.RotatingFileHandler(<EOL>log_file, maxBytes=maxBytes, backupCount=backupCount)<EOL>handler.setLevel(level)<EOL>handler.setFormatter(<EOL>JsonFormatter(<EOL>application_name=application_name,<EOL>server_hostname=server_hostname,<EOL>fields=fields))<EOL>logger.addHandler(handler)<EOL>", "docstring": "Initialize the zlogger.\n\n    Sets up a rotating file handler to the specified path and file with\n    the given size and backup count limits, sets the default\n    application_name, server_hostname, and default/whitelist fields.\n\n    :param path: path to write the log file\n    :param target: name of the log file\n    :param logger_name: name of the logger (defaults to root)\n    :param level: log level for this logger (defaults to logging.DEBUG)\n    :param maxBytes: size of the file before rotation (default 1MB)\n    :param application_name: app name to add to each log entry\n    :param server_hostname: hostname to add to each log entry\n    :param fields: default/whitelist fields.\n    :type path: string\n    :type target: string\n    :type logger_name: string\n    :type level: int\n    :type maxBytes: int\n    :type backupCount: int\n    :type application_name: string\n    :type server_hostname: string\n    :type fields: dict", "id": "f3624:m1"}
{"signature": "def render_to_response(self, context, **response_kwargs):", "body": "if self.request.is_ajax():<EOL><INDENT>template = self.page_template<EOL><DEDENT>else:<EOL><INDENT>template = self.get_template_names()<EOL><DEDENT>return self.response_class(<EOL>request=self.request,<EOL>template=template,<EOL>context=context,<EOL>**response_kwargs<EOL>)<EOL>", "docstring": "Returns a response with a template depending if the request is ajax \nor not and it renders with the given context.", "id": "f3630:c2:m3"}
{"signature": "def aggregate_history(self, ip, days_limit=None):", "body": "first_date = None<EOL>last_date = None<EOL>prec_asn = None<EOL>prec_block = None<EOL>for entry in self.history(ip, days_limit):<EOL><INDENT>if entry is None:<EOL><INDENT>continue<EOL><DEDENT>date, asn, block = entry<EOL>if first_date is None:<EOL><INDENT>last_date = date<EOL>first_date = date<EOL>prec_asn = asn<EOL>prec_block = block<EOL><DEDENT>elif prec_asn == asn and prec_block == block:<EOL><INDENT>first_date = date<EOL><DEDENT>else:<EOL><INDENT>yield first_date, last_date, prec_asn, prec_block<EOL>last_date = date<EOL>first_date = date<EOL>prec_asn = asn<EOL>prec_block = block<EOL><DEDENT><DEDENT>if first_date is not None:<EOL><INDENT>yield first_date, last_date, prec_asn, prec_block<EOL><DEDENT>", "docstring": "Get the full history of an IP, aggregate the result instead of\nreturning one line per day.\n\n:param ip: IP address to search for\n:param days_limit: Max amount of days to query. (None means no limit)\n\n:rtype: list. For each change: FirstDay, LastDay, ASN, Block", "id": "f3639:c0:m9"}
{"signature": "def downloadURL(url):", "body": "urlretrieve(url, os.path.join(c.raw_data, path_temp_bviewfile))<EOL>os.rename(os.path.join(c.raw_data, path_temp_bviewfile), c.path_bviewfile)<EOL>", "docstring": "Inconditianilly download the URL in a temporary directory.\nWhen finished, the file is moved in the real directory.\nLike this an other process will not attempt to extract an inclomplete file.", "id": "f3641:m1"}
{"signature": "def downloadURL(url, filename):", "body": "path_temp_bviewfile = os.path.join(c.raw_data, c.bview_dir, '<STR_LIT>', filename)<EOL>path_bviewfile = os.path.join(c.raw_data, c.bview_dir, filename)<EOL>try:<EOL><INDENT>f = urlopen(url)<EOL><DEDENT>except:<EOL><INDENT>return False<EOL><DEDENT>if f.getcode() != <NUM_LIT:200>:<EOL><INDENT>publisher.warning('<STR_LIT>'.format(url, f.getcode()))<EOL>return False<EOL><DEDENT>try:<EOL><INDENT>with open(path_temp_bviewfile, '<STR_LIT:w>') as outfile:<EOL><INDENT>outfile.write(f.read())<EOL><DEDENT>os.rename(path_temp_bviewfile, path_bviewfile)<EOL><DEDENT>except:<EOL><INDENT>os.remove(path_temp_bviewfile)<EOL>return False<EOL><DEDENT>return True<EOL>", "docstring": "Inconditianilly download the URL in a temporary directory.\nWhen finished, the file is moved in the real directory.\nLike this an other process will not attempt to extract an inclomplete file.", "id": "f3643:m1"}
{"signature": "def already_downloaded(filename):", "body": "cur_file = os.path.join(c.bview_dir, filename)<EOL>old_file = os.path.join(c.bview_dir, '<STR_LIT>', filename)<EOL>if not os.path.exists(cur_file) and not os.path.exists(old_file):<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Verify that the file has not already been downloaded.", "id": "f3643:m2"}
{"signature": "def run_splitted_processing(max_simultaneous_processes, process_name,<EOL>filenames):", "body": "pids = []<EOL>while len(filenames) > <NUM_LIT:0>:<EOL><INDENT>while len(filenames) > <NUM_LIT:0> and len(pids) < max_simultaneous_processes:<EOL><INDENT>filename = filenames.pop()<EOL>pids.append(service_start(service=process_name,<EOL>param=['<STR_LIT>', filename, '<STR_LIT>',<EOL>imported_day]))<EOL><DEDENT>while len(pids) == max_simultaneous_processes:<EOL><INDENT>time.sleep(sleep_timer)<EOL>pids = update_running_pids(pids)<EOL><DEDENT><DEDENT>while len(pids) > <NUM_LIT:0>:<EOL><INDENT>time.sleep(sleep_timer)<EOL>pids = update_running_pids(pids)<EOL><DEDENT>", "docstring": "Run processes which push the routing dump of the RIPE in a redis\ndatabase.\nThe dump has been splitted in multiple files and each process run\non one of this files.", "id": "f3646:m3"}
{"signature": "@app.route('<STR_LIT>', methods=['<STR_LIT:POST>'])<EOL>def __entry_point():", "body": "ip = request.remote_addr<EOL>ua = request.headers.get('<STR_LIT>', '<STR_LIT>')<EOL>method = request.json.get('<STR_LIT>')<EOL>if method is None:<EOL><INDENT>__query_logging(ip, ua, method, level='<STR_LIT>')<EOL>return json.dumps({'<STR_LIT:error>': '<STR_LIT>'})<EOL><DEDENT>if method not in authorized_methods:<EOL><INDENT>__query_logging(ip, ua, method, level='<STR_LIT>')<EOL>return json.dumps({'<STR_LIT:error>': '<STR_LIT>'})<EOL><DEDENT>fct = globals().get(method)<EOL>if fct is None:<EOL><INDENT>__query_logging(ip, ua, method, level='<STR_LIT>')<EOL>return json.dumps({'<STR_LIT:error>': '<STR_LIT>'})<EOL><DEDENT>if request.json.get('<STR_LIT>') is None:<EOL><INDENT>__query_logging(ip, ua, method, level='<STR_LIT>')<EOL>return json.dumps({'<STR_LIT:error>': '<STR_LIT>'})<EOL><DEDENT>try:<EOL><INDENT>result = fct(request.json)<EOL>__query_logging(ip, ua, method, request.json.get('<STR_LIT>'),<EOL>request.json.get('<STR_LIT>'), request.json.get('<STR_LIT>'))<EOL>return result<EOL><DEDENT>except Exception:<EOL><INDENT>__query_logging(ip, ua, method, request.json.get('<STR_LIT>'), level='<STR_LIT:error>')<EOL>return json.dumps({'<STR_LIT:error>': '<STR_LIT>'})<EOL><DEDENT>", "docstring": "Function called when an query is made on /json. Expects a JSON\nobject with at least a 'method' entry.", "id": "f3650:m2"}
{"signature": "def compile(fmt, names=None):", "body": "if names is None:<EOL><INDENT>return CompiledFormat(fmt)<EOL><DEDENT>else:<EOL><INDENT>return CompiledFormatDict(fmt, names)<EOL><DEDENT>", "docstring": "Compile given format string `fmt` and return a compiled format\n    object that can be used to pack and/or unpack data multiple times.\n\n    Returns a :class:`~bitstruct.CompiledFormat` object if `names` is\n    ``None``, and otherwise a :class:`~bitstruct.CompiledFormatDict`\n    object.\n\n    See :func:`~bitstruct.pack_dict()` for details on `names`.", "id": "f3652:m13"}
{"signature": "def pack_into(fmt, buf, offset, *args, **kwargs):", "body": "return CompiledFormat(fmt).pack_into(buf,<EOL>offset,<EOL>*args,<EOL>**kwargs)<EOL>", "docstring": "Pack given values v1, v2, ... into given bytearray `buf`, starting\n    at given bit offset `offset`. Pack according to given format\n    string `fmt`. Give `fill_padding` as ``False`` to leave padding\n    bits in `buf` unmodified.", "id": "f3652:m5"}
{"signature": "def pack(fmt, *args):", "body": "return CompiledFormat(fmt).pack(*args)<EOL>", "docstring": "Return a bytes object containing the values v1, v2, ... packed\n    according to given format string `fmt`. If the total number of\n    bits are not a multiple of 8, padding will be added at the end of\n    the last byte.\n\n    `fmt` is a string of bitorder-type-length groups, and optionally a\n    byteorder identifier after the groups. Bitorder and byteorder may\n    be omitted.\n\n    Bitorder is either ``>`` or ``<``, where ``>`` means MSB first and\n    ``<`` means LSB first. If bitorder is omitted, the previous\n    values' bitorder is used for the current value. For example, in\n    the format string ``'u1<u2u3'``, ``u1`` is MSB first and both\n    ``u2`` and ``u3`` are LSB first.\n\n    Byteorder is either ``>`` or ``<``, where ``>`` means most\n    significant byte first and ``<`` means least significant byte\n    first. If byteorder is omitted, most significant byte first is\n    used.\n\n    There are eight types; ``u``, ``s``, ``f``, ``b``, ``t``, ``r``,\n    ``p`` and ``P``.\n\n    - ``u`` -- unsigned integer\n    - ``s`` -- signed integer\n    - ``f`` -- floating point number of 16, 32, or 64 bits\n    - ``b`` -- boolean\n    - ``t`` -- text (ascii or utf-8)\n    - ``r`` -- raw, bytes\n    - ``p`` -- padding with zeros, ignore\n    - ``P`` -- padding with ones, ignore\n\n    Length is the number of bits to pack the value into.\n\n    Example format string with default bit and byte ordering:\n    ``'u1u3p7s16'``\n\n    Same format string, but with least significant byte first:\n    ``'u1u3p7s16<'``\n\n    Same format string, but with LSB first (``<`` prefix) and least\n    significant byte first (``<`` suffix): ``'<u1u3p7s16<'``\n\n    It is allowed to separate groups with a single space for better\n    readability.", "id": "f3652:m3"}
{"signature": "def pack_into(self, buf, offset, *args, **kwargs):", "body": "<EOL>if len(args) < self._number_of_arguments:<EOL><INDENT>raise Error(<EOL>\"<STR_LIT>\".format(<EOL>self._number_of_arguments,<EOL>len(args)))<EOL><DEDENT>self.pack_into_any(buf, offset, args, **kwargs)<EOL>", "docstring": "See :func:`~bitstruct.pack_into()`.", "id": "f3652:c12:m3"}
{"signature": "def unpack_from(fmt, data, offset=<NUM_LIT:0>):", "body": "return CompiledFormat(fmt).unpack_from(data, offset)<EOL>", "docstring": "Unpack `data` (bytes or bytearray) according to given format string\n    `fmt`, starting at given bit offset `offset`. The result is a\n    tuple even if it contains exactly one item.", "id": "f3652:m6"}
{"signature": "def unpack_from_dict(fmt, names, data, offset=<NUM_LIT:0>):", "body": "return CompiledFormatDict(fmt, names).unpack_from(data, offset)<EOL>", "docstring": "Same as :func:`~bitstruct.unpack_from_dict()`, but returns a\n    dictionary.\n\n    See :func:`~bitstruct.pack_dict()` for details on `names`.", "id": "f3652:m10"}
{"signature": "def make_seekable(fileobj):", "body": "if sys.version_info < (<NUM_LIT:3>, <NUM_LIT:0>) and isinstance(fileobj, file):<EOL><INDENT>filename = fileobj.name<EOL>fileobj = io.FileIO(fileobj.fileno(), closefd=False)<EOL>fileobj.name = filename<EOL><DEDENT>assert isinstance(fileobj, io.IOBase),\"<STR_LIT>\"% type(fileobj)<EOL>return fileobj if fileobj.seekable()else ArchiveTemp(fileobj)<EOL>", "docstring": "If the file-object is not seekable, return  ArchiveTemp of the fileobject,\notherwise return the file-object itself", "id": "f3671:m0"}
{"signature": "def __init__(self, field, *byfields):", "body": "fieldstrs = []<EOL>if len(byfields) == <NUM_LIT:1> and isinstance(byfields[<NUM_LIT:0>], type) andissubclass(byfields[<NUM_LIT:0>], SortDirection):<EOL><INDENT>byfields = [byfields[<NUM_LIT:0>](field)]<EOL><DEDENT>for f in byfields:<EOL><INDENT>fieldstrs += [f.field, f.DIRSTRING]<EOL><DEDENT>args = [field]<EOL>if fieldstrs:<EOL><INDENT>args += ['<STR_LIT>'] + fieldstrs<EOL><DEDENT>super(first_value, self).__init__(*args)<EOL>self._field = field<EOL>", "docstring": "Selects the first value of the given field within the group.\n\n### Parameter\n\n- **field**: Source field used for the value\n- **byfields**: How to sort the results. This can be either the\n    *class* of `aggregation.Asc` or `aggregation.Desc` in which\n    case the field `field` is also used as the sort input.\n\n    `byfields` can also be one or more *instances* of `Asc` or `Desc`\n    indicating the sort order for these fields", "id": "f3676:c11:m0"}
{"signature": "def drop_index(self):", "body": "return self.redis.execute_command(self.DROP_CMD, self.index_name)<EOL>", "docstring": "Drop the index if it exists", "id": "f3679:c5:m3"}
{"signature": "def search(self, query):", "body": "args, query = self._mk_query_args(query)<EOL>st = time.time()<EOL>res = self.redis.execute_command(self.SEARCH_CMD, *args)<EOL>return Result(res,<EOL>not query._no_content,<EOL>duration=(time.time() - st) * <NUM_LIT>,<EOL>has_payload=query._with_payloads)<EOL>", "docstring": "Search the index for a given query, and return a result of documents\n\n### Parameters\n\n- **query**: the search query. Either a text for simple queries with default parameters, or a Query object for complex queries.\n             See RediSearch's documentation on query format\n- **snippet_sizes**: A dictionary of {field: snippet_size} used to trim and format the result. e.g.e {'body': 500}", "id": "f3679:c5:m10"}
{"signature": "def add_document(self, doc_id, nosave=False, score=<NUM_LIT:1.0>, payload=None,<EOL>replace=False, partial=False, language=None, **fields):", "body": "return self._add_document(doc_id, conn=None, nosave=nosave, score=score, <EOL>payload=payload, replace=replace,<EOL>partial=partial, language=language, **fields)<EOL>", "docstring": "Add a single document to the index.\n\n### Parameters\n\n- **doc_id**: the id of the saved document.\n- **nosave**: if set to true, we just index the document, and don't save a copy of it. This means that searches will just return ids.\n- **score**: the document ranking, between 0.0 and 1.0 \n- **payload**: optional inner-index payload we can save for fast access in scoring functions\n- **replace**: if True, and the document already is in the index, we perform an update and reindex the document\n- **partial**: if True, the fields specified will be added to the existing document.\n               This has the added benefit that any fields specified with `no_index`\n               will not be reindexed again. Implies `replace`\n- **language**: Specify the language used for document tokenization.\n- **fields** kwargs dictionary of the document fields to be saved and/or indexed. \n             NOTE: Geo points shoule be encoded as strings of \"lon,lat\"", "id": "f3679:c5:m5"}
{"signature": "def in_order(self):", "body": "self._in_order = True<EOL>return self<EOL>", "docstring": "Match only documents where the query terms appear in the same order in the document.\ni.e. for the query 'hello world', we do not match 'world hello'", "id": "f3680:c0:m9"}
{"signature": "def slop(self, slop):", "body": "self._slop = slop<EOL>return self<EOL>", "docstring": "Allow a masimum of N intervening non matched terms between phrase terms (0 means exact phrase)", "id": "f3680:c0:m8"}
{"signature": "def paging(self, offset, num):", "body": "self._offset = offset<EOL>self._num = num<EOL>return self<EOL>", "docstring": "Set the paging for the query (defaults to 0..10).\n\n- **offset**: Paging offset for the results. Defaults to 0\n- **num**: How many results do we want", "id": "f3680:c0:m11"}
{"signature": "def get_args(self):", "body": "args = [self._query_string]<EOL>if self._no_content:<EOL><INDENT>args.append('<STR_LIT>')<EOL><DEDENT>if self._fields:<EOL><INDENT>args.append('<STR_LIT>')<EOL>args.append(len(self._fields))<EOL>args += self._fields<EOL><DEDENT>if self._verbatim:<EOL><INDENT>args.append('<STR_LIT>')<EOL><DEDENT>if self._no_stopwords:<EOL><INDENT>args.append('<STR_LIT>')<EOL><DEDENT>if self._filters:<EOL><INDENT>for flt in self._filters:<EOL><INDENT>assert isinstance(flt, Filter)<EOL>args += flt.args<EOL><DEDENT><DEDENT>if self._with_payloads:<EOL><INDENT>args.append('<STR_LIT>')<EOL><DEDENT>if self._ids:<EOL><INDENT>args.append('<STR_LIT>')<EOL>args.append(len(self._ids))<EOL>args += self._ids<EOL><DEDENT>if self._slop >= <NUM_LIT:0>:<EOL><INDENT>args += ['<STR_LIT>', self._slop]<EOL><DEDENT>if self._in_order:<EOL><INDENT>args.append('<STR_LIT>')<EOL><DEDENT>if self._return_fields:<EOL><INDENT>args.append('<STR_LIT>')<EOL>args.append(len(self._return_fields))<EOL>args += self._return_fields<EOL><DEDENT>if self._sortby:<EOL><INDENT>assert isinstance(self._sortby, SortbyField)<EOL>args.append('<STR_LIT>')<EOL>args += self._sortby.args<EOL><DEDENT>if self._language:<EOL><INDENT>args += ['<STR_LIT>', self._language]<EOL><DEDENT>args += self._summarize_fields + self._highlight_fields<EOL>args += [\"<STR_LIT>\", self._offset, self._num]<EOL>return args<EOL>", "docstring": "Format the redis arguments for this query and return them", "id": "f3680:c0:m10"}
{"signature": "def summarize(self, fields=None, context_len=None, num_frags=None, sep=None):", "body": "args = ['<STR_LIT>']<EOL>fields = self._mk_field_list(fields)<EOL>if fields:<EOL><INDENT>args += ['<STR_LIT>', str(len(fields))] + fields<EOL><DEDENT>if context_len is not None:<EOL><INDENT>args += ['<STR_LIT>', str(context_len)]<EOL><DEDENT>if num_frags is not None:<EOL><INDENT>args += ['<STR_LIT>', str(num_frags)]<EOL><DEDENT>if sep is not None:<EOL><INDENT>args += ['<STR_LIT>', sep]<EOL><DEDENT>self._summarize_fields = args<EOL>return self<EOL>", "docstring": "Return an abridged format of the field, containing only the segments of\nthe field which contain the matching term(s).\n\nIf `fields` is specified, then only the mentioned fields are\nsummarized; otherwise all results are summarized.\n\nServer side defaults are used for each option (except `fields`) if not specified\n\n- **fields** List of fields to summarize. All fields are summarized if not specified\n- **context_len** Amount of context to include with each fragment\n- **num_frags** Number of fragments per document\n- **sep** Separator string to separate fragments", "id": "f3680:c0:m5"}
{"signature": "def add_filter(self, flt):", "body": "self._filters.append(flt)<EOL>return self<EOL>", "docstring": "Add a numeric or geo filter to the query. \n**Currently only one of each filter is supported by the engine**\n\n- **flt**: A NumericFilter or GeoFilter object, used on a corresponding field", "id": "f3680:c0:m17"}
{"signature": "def highlight(self, fields=None, tags=None):", "body": "args = ['<STR_LIT>']<EOL>fields = self._mk_field_list(fields)<EOL>if fields:<EOL><INDENT>args += ['<STR_LIT>', str(len(fields))] + fields<EOL><DEDENT>if tags:<EOL><INDENT>args += ['<STR_LIT>'] + list(tags)<EOL><DEDENT>self._highlight_fields = args<EOL>return self<EOL>", "docstring": "Apply specified markup to matched term(s) within the returned field(s)\n\n- **fields** If specified then only those mentioned fields are highlighted, otherwise all fields are highlighted\n- **tags** A list of two strings to surround the match.", "id": "f3680:c0:m6"}
{"signature": "def __init__(self, query_string):", "body": "self._query_string = query_string<EOL>self._offset = <NUM_LIT:0><EOL>self._num = <NUM_LIT:10><EOL>self._no_content = False<EOL>self._no_stopwords = False<EOL>self._fields = None<EOL>self._verbatim = False<EOL>self._with_payloads = False<EOL>self._filters = list()<EOL>self._ids = None<EOL>self._slop = -<NUM_LIT:1><EOL>self._in_order = False<EOL>self._sortby = None<EOL>self._return_fields = []<EOL>self._summarize_fields = []<EOL>self._highlight_fields = []<EOL>self._language = None<EOL>", "docstring": "Create a new query object. \nThe query string is set in the constructor, and other options have setter functions.", "id": "f3680:c0:m0"}
{"signature": "def query_string(self):", "body": "return self._query_string<EOL>", "docstring": "Return the query string of this query only", "id": "f3680:c0:m1"}
{"signature": "def sort_by(self, field, asc=True):", "body": "self._sortby = SortbyField(field, asc)<EOL>return self<EOL>", "docstring": "Add a sortby field to the query\n\n- **field** - the name of the field to sort by\n- **asc** - when `True`, sorting will be done in asceding order", "id": "f3680:c0:m18"}
{"signature": "def limit_ids(self, *ids):", "body": "self._ids = ids<EOL>return self<EOL>", "docstring": "Limit the results to a specific set of pre-known document ids of any length", "id": "f3680:c0:m2"}
{"signature": "def lt(n):", "body": "return between(None, n, inclusive_max=False)<EOL>", "docstring": "Match any value less than n", "id": "f3682:m3"}
{"signature": "def delete(self, string):", "body": "return self.redis.execute_command(AutoCompleter.SUGDEL_COMMAND, self.key, string)<EOL>", "docstring": "Delete a string from the AutoCompleter index.\nReturns 1 if the string was found and deleted, 0 otherwise", "id": "f3684:c2:m3"}
{"signature": "def __init__(self, key, host='<STR_LIT:localhost>', port=<NUM_LIT>, conn = None):", "body": "self.key = key<EOL>self.redis = conn if conn is not None else Redis(<EOL>connection_pool = ConnectionPool(host=host, port=port))<EOL>", "docstring": "Create a new AutoCompleter client for the given key, and optional host and port\n\nIf conn is not None, we employ an already existing redis connection", "id": "f3684:c2:m0"}
{"signature": "def load(self, *fields):", "body": "self._loadfields.extend(fields)<EOL>return self<EOL>", "docstring": "Indicate the fields to be returned in the response. These fields are\nreturned in addition to any others implicitly specified.\n\n### Parameters\n\n- **fields**: One or more fields in the format of `@field`", "id": "f3685:c6:m1"}
{"signature": "def sort_by(self, *fields, **kwargs):", "body": "self._max = kwargs.get('<STR_LIT>', <NUM_LIT:0>)<EOL>if isinstance(fields, (string_types, SortDirection)):<EOL><INDENT>fields = [fields]<EOL><DEDENT>for f in fields:<EOL><INDENT>if isinstance(f, SortDirection):<EOL><INDENT>self._sortby += [f.field, f.DIRSTRING]<EOL><DEDENT>else:<EOL><INDENT>self._sortby.append(f)<EOL><DEDENT><DEDENT>return self<EOL>", "docstring": "Indicate how the results should be sorted. This can also be used for\n*top-N* style queries\n\n### Parameters\n\n- **fields**: The fields by which to sort. This can be either a single\n    field or a list of fields. If you wish to specify order, you can\n    use the `Asc` or `Desc` wrapper classes.\n- **max**: Maximum number of results to return. This can be used instead\n    of `LIMIT` and is also faster.\n\n\nExample of sorting by `foo` ascending and `bar` descending:\n\n```\nsort_by(Asc('@foo'), Desc('@bar'))\n```\n\nReturn the top 10 customers:\n\n```\nAggregateRequest()\\\n    .group_by('@customer', r.sum('@paid').alias(FIELDNAME))\\\n    .sort_by(Desc('@paid'), max=10)\n```", "id": "f3685:c6:m5"}
{"signature": "@require_template_debug<EOL>@register.simple_tag<EOL>def attributes(var):", "body": "attrs = get_attributes(var)<EOL>pprint(attrs)<EOL>return attrs<EOL>", "docstring": "Given a variable in the template's context, print and return the list of\nattributes thare accessible inside of the template. For example, private\nattributes or callables that require arguments are excluded.", "id": "f3701:m3"}
{"signature": "@require_template_debug<EOL>@register.simple_tag(takes_context=True)<EOL>def set_trace(context):", "body": "try:<EOL><INDENT>import ipdb as pdb<EOL><DEDENT>except ImportError:<EOL><INDENT>import pdb<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>print(\"<STR_LIT>\")<EOL>render = lambda s: template.Template(s).render(context)<EOL>availables = get_variables(context)<EOL>pprint(availables)<EOL>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>for var in availables:<EOL><INDENT>locals()[var] = context[var]<EOL><DEDENT>pdb.set_trace()<EOL>return '<STR_LIT>'<EOL>", "docstring": "Start a pdb set_trace inside of the template with the context available as\n'context'. Uses ipdb if available.", "id": "f3701:m5"}
{"signature": "def _display_details(var_data):", "body": "meta_keys = (key for key in list(var_data.keys())<EOL>if key.startswith('<STR_LIT>'))<EOL>for key in meta_keys:<EOL><INDENT>display_key = key[<NUM_LIT:5>:].capitalize()<EOL>pprint('<STR_LIT>'.format(display_key, var_data.pop(key)))<EOL><DEDENT>pprint(var_data)<EOL>", "docstring": "Given a dictionary of variable attribute data from get_details display the\ndata in the terminal.", "id": "f3701:m1"}
{"signature": "def get_attributes(var):", "body": "is_valid = partial(is_valid_in_template, var)<EOL>return list(filter(is_valid, dir(var)))<EOL>", "docstring": "Given a varaible, return the list of attributes that are available inside\nof a template", "id": "f3703:m4"}
{"signature": "def _get_detail_value(var, attr):", "body": "value = getattr(var, attr)<EOL>kls = getattr(getattr(value, '<STR_LIT>', '<STR_LIT>'), '<STR_LIT>', '<STR_LIT>')<EOL>if kls in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>return kls<EOL><DEDENT>if callable(value):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return value<EOL>", "docstring": "Given a variable and one of its attributes that are available inside of\na template, return its 'method' if it is a callable, its class name if it\nis a model manager, otherwise return its value", "id": "f3703:m3"}
{"signature": "def get_details(var):", "body": "var_data = {}<EOL>module = getattr(var, '<STR_LIT>', '<STR_LIT>')<EOL>kls = getattr(getattr(var, '<STR_LIT>', '<STR_LIT>'), '<STR_LIT>', '<STR_LIT>')<EOL>if module:<EOL><INDENT>var_data['<STR_LIT>'] = module<EOL><DEDENT>if kls:<EOL><INDENT>var_data['<STR_LIT>'] = kls<EOL><DEDENT>for attr in get_attributes(var):<EOL><INDENT>value = _get_detail_value(var, attr)<EOL>if value is not None:<EOL><INDENT>var_data[attr] = value<EOL><DEDENT><DEDENT>return var_data<EOL>", "docstring": "Given a variable inside the context, obtain the attributes/callables,\ntheir values where possible, and the module name and class name if possible", "id": "f3703:m2"}
{"signature": "def _flatten(iterable):", "body": "for i in iterable:<EOL><INDENT>if isinstance(i, Iterable) and not isinstance(i, string_types):<EOL><INDENT>for sub_i in _flatten(i):<EOL><INDENT>yield sub_i<EOL><DEDENT><DEDENT>else:<EOL><INDENT>yield i<EOL><DEDENT><DEDENT>", "docstring": "Given an iterable with nested iterables, generate a flat iterable", "id": "f3703:m0"}
{"signature": "def volcano(differential_dfs, title='<STR_LIT>', scripts_mode=\"<STR_LIT>\", data_mode=\"<STR_LIT>\",<EOL>organism=\"<STR_LIT>\", q_value_column_name=\"<STR_LIT:q>\", log2FC_column_name=\"<STR_LIT>\",<EOL>output_dir=\"<STR_LIT:.>\", filename=\"<STR_LIT>\", version=this_version):", "body": "output_dir = Path(output_dir)<EOL>output_dir.mkdir(exist_ok=True, parents=True)<EOL>if isinstance(differential_dfs, pd.DataFrame):<EOL><INDENT>differential_dfs = {'<STR_LIT>': differential_dfs}<EOL><DEDENT>for name, df in differential_dfs.items():<EOL><INDENT>df = df[[q_value_column_name, log2FC_column_name]]<EOL>df.columns = ['<STR_LIT:q>', '<STR_LIT>']<EOL>df = df.round(<NUM_LIT:2>)<EOL>_verify_differential_df(df)<EOL>del differential_dfs[name]<EOL>differential_dfs[_sanitize(name)] = df<EOL><DEDENT>names_and_differentials = f\"<STR_LIT>\"<EOL>data_block = _data_block(data_mode, [('<STR_LIT>', names_and_differentials)], output_dir, include_gene_sets=False, organism=organism)<EOL>scripts = third_party_scripts + [CDN_url(version)+\"<STR_LIT>\", CDN_url(version)+\"<STR_LIT>\", CDN_url(version)+\"<STR_LIT>\"]<EOL>scripts_block = _scripts_block(scripts, scripts_mode, output_dir)<EOL>html = templateEnv.get_template('<STR_LIT>').render(title=title, scripts_block=scripts_block+'<STR_LIT:\\n>'+data_block, organism=\"<STR_LIT>\")<EOL>(output_dir / filename).write_text(html)<EOL>return (output_dir / filename).resolve()<EOL>", "docstring": "Arguments:\n    differential_dfs (dict or pandas.DataFrame): python dict of names to pandas dataframes, or a single dataframe, indexed by gene symbols which must have columns named log2FC and qval.\n    title (str): The title of the plot (to be embedded in the html).\n    scripts_mode (str): Choose from [`\"CDN\"`, `\"directory\"`, `\"inline\"`]:\n\n        - `\"CDN\"` compiles a single HTML page with links to scripts hosted on a CDN,\n\n        - `\"directory\"` compiles a directory with all scripts locally cached,\n\n        - `\"inline\"` compiles a single HTML file with all scripts/styles inlined.\n\n    data_mode (str): Choose from [\"directory\", \"inline\"]:\n\n        - \"directory\" compiles a directory with all data locally cached,\n\n        - \"inline\" compiles a single HTML file with all data inlined.\n\n    organism (str): `\"human\"` or `\"mouse\"`\n    q_value_column_name (str):\n    log2FC_column_name (str):\n    output_dir (str): the directory in which to output the file\n    filename (str): the filename of the output file\n    version (str): the version of the javascripts to use.\n        Leave the default to pin the version, or choose \"latest\" to get updates,\n        or choose part of the version string to get minor updates.\nReturns:\n    Path: The filepath which the html was outputted to.", "id": "f3712:m9"}
{"signature": "def heatmap(genes_by_samples_matrix, sample_attributes, title='<STR_LIT>', scripts_mode=\"<STR_LIT>\", data_mode=\"<STR_LIT>\",<EOL>organism=\"<STR_LIT>\", separate_zscore_by=[\"<STR_LIT>\"],<EOL>output_dir=\"<STR_LIT:.>\", filename=\"<STR_LIT>\", version=this_version):", "body": "output_dir = Path(output_dir)<EOL>output_dir.mkdir(exist_ok=True, parents=True)<EOL>_verify_sample_by_genes_matrix(genes_by_samples_matrix)<EOL>_verify_sample_attributes(genes_by_samples_matrix, sample_attributes)<EOL>genes_by_samples_matrix = genes_by_samples_matrix.round(<NUM_LIT:2>)<EOL>matrix = f\"<STR_LIT>\"<EOL>classes = f\"<STR_LIT>\"<EOL>data_block = _data_block(data_mode, [('<STR_LIT>', matrix), ('<STR_LIT>', classes)], output_dir, organism=organism)<EOL>scripts = third_party_scripts + [CDN_url(version)+\"<STR_LIT>\", CDN_url(version)+\"<STR_LIT>\", CDN_url(version)+\"<STR_LIT>\"]<EOL>scripts_block = _scripts_block(scripts, scripts_mode, output_dir)<EOL>html = templateEnv.get_template('<STR_LIT>').render(title=title, scripts_block=scripts_block+'<STR_LIT:\\n>'+data_block, separate_zscore_by=separate_zscore_by)<EOL>(output_dir / filename).write_text(html)<EOL>return (output_dir / filename).resolve()<EOL>", "docstring": "Arguments:\n    genes_by_samples_matrix (pandas.DataFrame): dataframe indexed by genes, columns are samples\n    sample_attributes (pandas.DataFrame): dataframe indexed by samples, columns are sample attributes (e.g. classes)\n    title (str): The title of the plot (to be embedded in the html).\n    scripts_mode (str): Choose from [`\"CDN\"`, `\"directory\"`, `\"inline\"`]:\n\n        - `\"CDN\"` compiles a single HTML page with links to scripts hosted on a CDN,\n\n        - `\"directory\"` compiles a directory with all scripts locally cached,\n\n        - `\"inline\"` compiles a single HTML file with all scripts/styles inlined.\n\n    data_mode (str): Choose from [\"directory\", \"inline\"]:\n\n        - \"directory\" compiles a directory with all data locally cached,\n\n        - \"inline\" compiles a single HTML file with all data inlined.\n\n    organism (str): `\"human\"` or `\"mouse\"`\n    separate_zscore_by (list):\n    output_dir (str): the directory in which to output the file\n    filename (str): the filename of the output file\n    version (str): the version of the javascripts to use.\n        Leave the default to pin the version, or choose \"latest\" to get updates,\n        or choose part of the version string to get minor updates.\nReturns:\n    Path: The filepath which the html was outputted to.", "id": "f3712:m12"}
{"signature": "def graph(networkx_graph, title='<STR_LIT>', scripts_mode=\"<STR_LIT>\", data_mode=\"<STR_LIT>\",<EOL>output_dir=\"<STR_LIT:.>\", filename=\"<STR_LIT>\", version=this_version):", "body": "output_dir = Path(output_dir)<EOL>output_dir.mkdir(exist_ok=True, parents=True)<EOL>scripts = third_party_scripts + [CDN_url(version)+\"<STR_LIT>\", CDN_url(version)+\"<STR_LIT>\"]<EOL>scripts_block = _scripts_block(scripts, scripts_mode, output_dir)<EOL>graph_json = nx_json.node_link_data(networkx_graph)<EOL>for node in graph_json['<STR_LIT>']:<EOL><INDENT>for attr, val in node.items():<EOL><INDENT>if isinstance(val, numbers.Number):<EOL><INDENT>node[attr] = round(val, <NUM_LIT:2>)<EOL><DEDENT><DEDENT><DEDENT>for link in graph_json['<STR_LIT>']:<EOL><INDENT>for attr, val in link.items():<EOL><INDENT>if isinstance(val, numbers.Number):<EOL><INDENT>link[attr] = round(val, <NUM_LIT:2>)<EOL><DEDENT><DEDENT><DEDENT>graph_json = f\"<STR_LIT>\"<EOL>data_block = _data_block(data_mode, [('<STR_LIT>', graph_json)], output_dir)<EOL>html = templateEnv.get_template('<STR_LIT>').render(title=title, scripts_block=scripts_block+'<STR_LIT:\\n>'+data_block, nodes=networkx_graph.nodes())<EOL>(output_dir / filename).write_text(html)<EOL>return (output_dir / filename).resolve()<EOL>", "docstring": "Arguments:\n    networkx_graph (networkx.Graph): any instance of networkx.Graph\n    title (str): The title of the plot (to be embedded in the html).\n    scripts_mode (str): Choose from [`\"CDN\"`, `\"directory\"`, `\"inline\"`]:\n\n        - `\"CDN\"` compiles a single HTML page with links to scripts hosted on a CDN,\n\n        - `\"directory\"` compiles a directory with all scripts locally cached,\n\n        - `\"inline\"` compiles a single HTML file with all scripts/styles inlined.\n\n    data_mode (str): Choose from [\"directory\", \"inline\"]:\n\n        - \"directory\" compiles a directory with all data locally cached,\n\n        - \"inline\" compiles a single HTML file with all data inlined.\n\n    output_dir (str): the directory in which to output the file\n    filename (str): the filename of the output file\n    version (str): the version of the javascripts to use.\n        Leave the default to pin the version, or choose \"latest\" to get updates,\n        or choose part of the version string to get minor updates.\nReturns:\n    Path: The filepath which the html was outputted to.", "id": "f3712:m13"}
{"signature": "def version(self):", "body": "ver = Version()<EOL>ver.conn = self.conn<EOL>ver.attrs = {<EOL>'<STR_LIT>': self.attrs['<STR_LIT:id>'],<EOL>}<EOL>ver.save()<EOL>return ver<EOL>", "docstring": "Create a new version under this service.", "id": "f3722:c1:m2"}
{"signature": "def vcl(self, name, content):", "body": "vcl = VCL()<EOL>vcl.conn = self.conn<EOL>vcl.attrs = {<EOL>'<STR_LIT>': self.attrs['<STR_LIT>'],<EOL>'<STR_LIT:version>': self.attrs['<STR_LIT>'],<EOL>'<STR_LIT:name>': name,<EOL>'<STR_LIT:content>': content,<EOL>}<EOL>vcl.save()<EOL>return vcl<EOL>", "docstring": "Create a new VCL under this version.", "id": "f3722:c2:m8"}
{"signature": "def update(dst, src):", "body": "stack = [(dst, src)]<EOL>def isdict(o):<EOL><INDENT>return hasattr(o, '<STR_LIT>')<EOL><DEDENT>while stack:<EOL><INDENT>current_dst, current_src = stack.pop()<EOL>for key in current_src:<EOL><INDENT>if key not in current_dst:<EOL><INDENT>current_dst[key] = current_src[key]<EOL><DEDENT>else:<EOL><INDENT>if isdict(current_src[key]) and isdict(current_dst[key]):<EOL><INDENT>stack.append((current_dst[key], current_src[key]))<EOL><DEDENT>else:<EOL><INDENT>current_dst[key] = current_src[key]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return dst<EOL>", "docstring": "Recursively update the destination dict-like object with the source dict-like object.\n\n    Useful for merging options and Bunches together!\n\n    Based on:\n    http://code.activestate.com/recipes/499335-recursively-update-a-dictionary-without-hitting-py/#c1", "id": "f3724:m6"}
{"signature": "def rmDirPatterns(*patterns, **kwargs):", "body": "kwargs['<STR_LIT:action>'] = '<STR_LIT>'<EOL>kwargs['<STR_LIT>'] = '<STR_LIT>'<EOL>return _walkWithAction(*patterns, **kwargs)<EOL>", "docstring": "Remove all directories under the current path with the given patterns.", "id": "f3724:m3"}
{"signature": "def pip_install(*args):", "body": "download_cache = ('<STR_LIT>' % options.paved.pip.download_cache) if options.paved.pip.download_cache else '<STR_LIT>'<EOL>shv('<STR_LIT>' % (download_cache, '<STR_LIT:U+0020>'.join(args)))<EOL>", "docstring": "Send the given arguments to `pip install`.", "id": "f3724:m7"}
{"signature": "@task<EOL>def shell(info):", "body": "cmd = '<STR_LIT>'<EOL>try:<EOL><INDENT>import django_extensions<EOL>cmd = '<STR_LIT>'<EOL><DEDENT>except ImportError:<EOL><INDENT>info(\"<STR_LIT>\")<EOL><DEDENT>call_manage(cmd)<EOL>", "docstring": "Run the ipython shell. Shorthand for `paver manage shell`.\n\n    Uses `django_extensions <http://pypi.python.org/pypi/django-extensions/0.5>`, if\n    available, to provide `shell_plus`.", "id": "f3725:m4"}
{"signature": "@task<EOL>@consume_args<EOL>def schema(args):", "body": "try:<EOL><INDENT>import south<EOL>cmd = args and '<STR_LIT>' % '<STR_LIT:U+0020>'.join(options.args) or '<STR_LIT>'<EOL>call_manage(cmd)<EOL><DEDENT>except ImportError:<EOL><INDENT>error('<STR_LIT>')<EOL><DEDENT>", "docstring": "Run South's schemamigration command.", "id": "f3725:m6"}
{"signature": "@task<EOL>def pychecker():", "body": "<EOL>packages = [x for x in options.setup.packages if '<STR_LIT:.>' not in x]<EOL>sh('<STR_LIT>'.format(param=options.paved.pycheck.pychecker.param, files='<STR_LIT:U+0020>'.join(packages)))<EOL>", "docstring": "check of python programs by pychecker.\n\n    requirements:\n     - pychecker_ should be installed.\n\n    options.paved.pycheck.pychecker.param\n\n    .. _pychecker: http://pychecker.sourceforge.net/", "id": "f3726:m3"}
{"signature": "@task<EOL>def findimports():", "body": "<EOL>packages = [x for x in options.setup.packages if '<STR_LIT:.>' not in x]<EOL>sh('<STR_LIT>'.format(param=options.paved.pycheck.findimports.param, files='<STR_LIT:U+0020>'.join(packages)))<EOL>", "docstring": "print python module dependencies by findimports.\n\n    requirements:\n     - findimports_ should be installed. ``easy_install findimports``\n\n    options.paved.pycheck.findimports.param\n\n    .. _findimports: http://pypi.python.org/pypi/findimports", "id": "f3726:m1"}
{"signature": "@task<EOL>def nose():", "body": "sh('<STR_LIT>'.format(param=options.paved.pycheck.nose.param))<EOL>", "docstring": "Run unit tests using nosetests.\n\n    requirements:\n     - nose_ should be installed.\n\n    options.paved.pycheck.nose.param\n\n    .. _nose: http://somethingaboutorange.com/mrl/projects/nose/1.0.0/", "id": "f3726:m4"}
{"signature": "@task<EOL>@needs('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def pycheckall():", "body": "", "docstring": "All pycheck tasks.", "id": "f3726:m5"}
{"signature": "@task<EOL>@needs('<STR_LIT>')<EOL>def clean(options, info):", "body": "info(\"<STR_LIT>\", options.paved.clean.patterns)<EOL>for wd in options.paved.clean.dirs:<EOL><INDENT>info(\"<STR_LIT>\", wd)<EOL>for p in options.paved.clean.patterns:<EOL><INDENT>for f in wd.walkfiles(p):<EOL><INDENT>f.remove()<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Clean up extra files littering the source tree.\n\n    options.paved.clean.dirs: directories to search recursively\n    options.paved.clean.patterns: patterns to search for and remove", "id": "f3727:m0"}
{"signature": "@task<EOL>def printoptions():", "body": "x = json.dumps(environment.options,<EOL>indent=<NUM_LIT:4>,<EOL>sort_keys=True,<EOL>skipkeys=True,<EOL>cls=MyEncoder)<EOL>print(x)<EOL>", "docstring": "print paver options.\n\n    Prettified by json.\n    `long_description` is removed", "id": "f3727:m1"}
{"signature": "def open_s3(bucket):", "body": "conn = boto.connect_s3(options.paved.s3.access_id, options.paved.s3.secret)<EOL>try:<EOL><INDENT>bucket = conn.get_bucket(bucket)<EOL><DEDENT>except boto.exception.S3ResponseError:<EOL><INDENT>bucket = conn.create_bucket(bucket)<EOL><DEDENT>return bucket<EOL>", "docstring": "Opens connection to S3 returning bucket and key", "id": "f3729:m0"}
{"signature": "def download_s3(bucket_name, file_key, file_path, force=False):", "body": "file_path = path(file_path)<EOL>bucket = open_s3(bucket_name)<EOL>file_dir = file_path.dirname()<EOL>file_dir.makedirs()<EOL>s3_key = bucket.get_key(file_key)<EOL>if file_path.exists():<EOL><INDENT>file_data = file_path.bytes()<EOL>file_md5, file_md5_64 = s3_key.get_md5_from_hexdigest(hashlib.md5(file_data).hexdigest())<EOL>try:<EOL><INDENT>s3_md5 = s3_key.etag.replace('<STR_LIT:\">', '<STR_LIT>')<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if s3_md5 == file_md5:<EOL><INDENT>info('<STR_LIT>' % file_path)<EOL>return<EOL><DEDENT>elif not force:<EOL><INDENT>s3_datetime = datetime.datetime(*time.strptime(<EOL>s3_key.last_modified, '<STR_LIT>')[<NUM_LIT:0>:<NUM_LIT:6>])<EOL>local_datetime = datetime.datetime.utcfromtimestamp(file_path.stat().st_mtime)<EOL>if s3_datetime < local_datetime:<EOL><INDENT>info(\"<STR_LIT>\" % (file_key))<EOL>return<EOL><DEDENT><DEDENT><DEDENT><DEDENT>info(\"<STR_LIT>\" % (file_key))<EOL>try:<EOL><INDENT>with open(file_path, '<STR_LIT:w>') as fo:<EOL><INDENT>s3_key.get_contents_to_file(fo)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>error(\"<STR_LIT>\" % e)<EOL>raise<EOL><DEDENT>", "docstring": "Download a remote file from S3.", "id": "f3729:m2"}
{"signature": "def sphinx_make(*targets):", "body": "sh('<STR_LIT>' % '<STR_LIT:U+0020>'.join(targets), cwd=options.paved.docs.path)<EOL>", "docstring": "Call the Sphinx Makefile with the specified targets.\n\n    `options.paved.docs.path`: the path to the Sphinx folder (where the Makefile resides).", "id": "f3730:m0"}
{"signature": "@task<EOL>def docs():", "body": "sphinx_make(*options.paved.docs.targets)<EOL>", "docstring": "Make Sphinx docs.\n\n    `options.paved.docs.path`: the path to the Sphinx folder (where the Makefile resides).\n\n    `options.paved.docs.targets`: the Make targets to send to `sphinx_make`. Default is `html`.", "id": "f3730:m1"}
{"signature": "@task<EOL>@needs('<STR_LIT>')<EOL>def rsync_docs():", "body": "assert options.paved.docs.rsync_location, \"<STR_LIT>\"<EOL>sh('<STR_LIT>' % (path(options.paved.docs.path) / options.paved.docs.build_rel,<EOL>options.paved.docs.rsync_location))<EOL>", "docstring": "Upload the docs to a remote location via rsync.\n\n    `options.paved.docs.rsync_location`: the target location to rsync files to.\n\n    `options.paved.docs.path`: the path to the Sphinx folder (where the Makefile resides).\n\n    `options.paved.docs.build_rel`: the path of the documentation\n        build folder, relative to `options.paved.docs.path`.", "id": "f3730:m3"}
{"signature": "@task<EOL>@consume_args<EOL>def pip_install(args):", "body": "util.pip_install(*args)<EOL>", "docstring": "Send the given arguments to `pip install`.", "id": "f3732:m0"}
{"signature": "@task<EOL>def easy_install(args):", "body": "util.easy_install(*args)<EOL>", "docstring": "Send the given arguments to `easy_install`.", "id": "f3732:m1"}
{"signature": "def validate(self, messages):", "body": "messages = self.validate_creators(messages)<EOL>messages = self.validate_created(messages)<EOL>return messages<EOL>", "docstring": "Returns True if the fields are valid according to the SPDX standard.\n        Appends user friendly messages to the messages parameter.", "id": "f3735:c4:m6"}
{"signature": "def load_license_list(file_name):", "body": "licenses_map = {}<EOL>with codecs.open(file_name, '<STR_LIT:rb>', encoding='<STR_LIT:utf-8>') as lics:<EOL><INDENT>licenses = json.load(lics)<EOL>version = licenses['<STR_LIT>'].split('<STR_LIT:.>')<EOL>for lic in licenses['<STR_LIT>']:<EOL><INDENT>if lic.get('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>name = lic['<STR_LIT:name>']<EOL>identifier = lic['<STR_LIT>']<EOL>licenses_map[name] = identifier<EOL>licenses_map[identifier] = name<EOL><DEDENT><DEDENT>return version, licenses_map<EOL>", "docstring": "Return the licenses list version tuple and a mapping of licenses\nname->id and id->name loaded from a JSON file\nfrom https://github.com/spdx/license-list-data", "id": "f3736:m0"}
{"signature": "def _add_parens(required, text):", "body": "return '<STR_LIT>'.format(text) if required else text<EOL>", "docstring": "Add parens around a license expression if `required` is True, otherwise\nreturn `text` unmodified.", "id": "f3738:m0"}
{"signature": "def validate(self, messages):", "body": "messages = self.validate_version(messages)<EOL>messages = self.validate_data_lics(messages)<EOL>messages = self.validate_name(messages)<EOL>messages = self.validate_spdx_id(messages)<EOL>messages = self.validate_namespace(messages)<EOL>messages = self.validate_ext_document_references(messages)<EOL>messages = self.validate_creation_info(messages)<EOL>messages = self.validate_package(messages)<EOL>messages = self.validate_extracted_licenses(messages)<EOL>messages = self.validate_reviews(messages)<EOL>return messages<EOL>", "docstring": "Validate all fields of the document and update the\nmessages list with user friendly error messages for display.", "id": "f3738:c5:m8"}
{"signature": "@classmethod<EOL><INDENT>def from_identifier(cls, identifier):<DEDENT>", "body": "if identifier in config.LICENSE_MAP.keys():<EOL><INDENT>return cls(config.LICENSE_MAP[identifier], identifier)<EOL><DEDENT>else:<EOL><INDENT>return cls(identifier, identifier)<EOL><DEDENT>", "docstring": "If identifier exists in config.LICENSE_MAP\n        the full_name is retrieved from it. Otherwise\n        the full_name is the same as the identifier.", "id": "f3738:c1:m1"}
{"signature": "@classmethod<EOL><INDENT>def from_full_name(cls, full_name):<DEDENT>", "body": "if full_name in config.LICENSE_MAP.keys():<EOL><INDENT>return cls(full_name, config.LICENSE_MAP[full_name])<EOL><DEDENT>else:<EOL><INDENT>return cls(full_name, full_name)<EOL><DEDENT>", "docstring": "Returna new License for a full_name. If the full_name exists in\nconfig.LICENSE_MAP the identifier is retrieved from it.\nOtherwise the identifier is the same as the full_name.", "id": "f3738:c1:m2"}
{"signature": "def validate(self, messages):", "body": "messages = self.validate_ext_doc_id(messages)<EOL>messages = self.validate_spdx_doc_uri(messages)<EOL>messages = self.validate_checksum(messages)<EOL>return messages<EOL>", "docstring": "Validate all fields of the ExternalDocumentRef class and update the\nmessages list with user friendly error messages for display.", "id": "f3738:c0:m3"}
{"signature": "def tv_to_rdf(infile_name, outfile_name):", "body": "parser = Parser(Builder(), StandardLogger())<EOL>parser.build()<EOL>with open(infile_name) as infile:<EOL><INDENT>data = infile.read()<EOL>document, error = parser.parse(data)<EOL>if not error:<EOL><INDENT>with open(outfile_name, mode='<STR_LIT:w>') as outfile:<EOL><INDENT>write_document(document, outfile)<EOL><DEDENT>return True<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL>messages = []<EOL>document.validate(messages)<EOL>print('<STR_LIT:\\n>'.join(messages))<EOL>return False<EOL><DEDENT><DEDENT>", "docstring": "Convert a SPDX file from tag/value format to RDF format.\nReturn True on sucess, False otherwise.", "id": "f3740:m0"}
{"signature": "def create_doc(self):", "body": "doc_node = URIRef('<STR_LIT>')<EOL>self.graph.add((doc_node, RDF.type, self.spdx_namespace.SpdxDocument))<EOL>vers_literal = Literal(str(self.document.version))<EOL>self.graph.add((doc_node, self.spdx_namespace.specVersion, vers_literal))<EOL>data_lics = URIRef(self.document.data_license.url)<EOL>self.graph.add((doc_node, self.spdx_namespace.dataLicense, data_lics))<EOL>doc_name = URIRef(self.document.name)<EOL>self.graph.add((doc_node, self.spdx_namespace.name, doc_name))<EOL>return doc_node<EOL>", "docstring": "Add and return the root document node to graph.", "id": "f3742:c8:m1"}
{"signature": "def __init__(self, document, out):", "body": "super(Writer, self).__init__(document, out)<EOL>", "docstring": "- document is spdx.document instance that will be written.\n- out is a file-like object that will be written to.", "id": "f3742:c8:m0"}
{"signature": "def add_file_dependencies(self):", "body": "for doc_file in self.document.files:<EOL><INDENT>self.add_file_dependencies_helper(doc_file)<EOL><DEDENT>", "docstring": "Add file dependencies to the graph.\nCalled after all files have been added.", "id": "f3742:c2:m4"}
{"signature": "def create_conjunction_node(self, conjunction):", "body": "node = BNode()<EOL>type_triple = (node, RDF.type, self.spdx_namespace.ConjunctiveLicenseSet)<EOL>self.graph.add(type_triple)<EOL>licenses = self.licenses_from_tree(conjunction)<EOL>for lic in licenses:<EOL><INDENT>member_triple = (node, self.spdx_namespace.member, lic)<EOL>self.graph.add(member_triple)<EOL><DEDENT>return node<EOL>", "docstring": "Return a node representing a conjunction of licenses.", "id": "f3742:c1:m3"}
{"signature": "def create_extracted_license(self, lic):", "body": "licenses = list(self.graph.triples((None, self.spdx_namespace.licenseId, lic.identifier)))<EOL>if len(licenses) != <NUM_LIT:0>:<EOL><INDENT>return licenses[<NUM_LIT:0>][<NUM_LIT:0>]  <EOL><DEDENT>else:<EOL><INDENT>license_node = BNode()<EOL>type_triple = (license_node, RDF.type, self.spdx_namespace.ExtractedLicensingInfo)<EOL>self.graph.add(type_triple)<EOL>ident_triple = (license_node, self.spdx_namespace.licenseId, Literal(lic.identifier))<EOL>self.graph.add(ident_triple)<EOL>text_triple = (license_node, self.spdx_namespace.extractedText, Literal(lic.text))<EOL>self.graph.add(text_triple)<EOL>if lic.full_name is not None:<EOL><INDENT>name_triple = (license_node, self.spdx_namespace.licenseName, self.to_special_value(lic.full_name))<EOL>self.graph.add(name_triple)<EOL><DEDENT>for ref in lic.cross_ref:<EOL><INDENT>triple = (license_node, RDFS.seeAlso, URIRef(ref))<EOL>self.graph.add(triple)<EOL><DEDENT>if lic.comment is not None:<EOL><INDENT>comment_triple = (license_node, RDFS.comment, Literal(lic.comment))<EOL>self.graph.add(comment_triple)<EOL><DEDENT>return license_node<EOL><DEDENT>", "docstring": "Handle extracted license.\nReturn the license node.", "id": "f3742:c1:m6"}
{"signature": "def create_review_node(self, review):", "body": "review_node = BNode()<EOL>type_triple = (review_node, RDF.type, self.spdx_namespace.Review)<EOL>self.graph.add(type_triple)<EOL>reviewer_node = Literal(review.reviewer.to_value())<EOL>self.graph.add((review_node, self.spdx_namespace.reviewer, reviewer_node))<EOL>reviewed_date_node = Literal(review.review_date_iso_format)<EOL>reviewed_triple = (review_node, self.spdx_namespace.reviewDate, reviewed_date_node)<EOL>self.graph.add(reviewed_triple)<EOL>if review.has_comment:<EOL><INDENT>comment_node = Literal(review.comment)<EOL>comment_triple = (review_node, RDFS.comment, comment_node)<EOL>self.graph.add(comment_triple)<EOL><DEDENT>return review_node<EOL>", "docstring": "Return a review node.", "id": "f3742:c3:m1"}
{"signature": "def handle_package_has_file_helper(self, pkg_file):", "body": "nodes = list(self.graph.triples((None, self.spdx_namespace.fileName, Literal(pkg_file.name))))<EOL>if len(nodes) == <NUM_LIT:1>:<EOL><INDENT>return nodes[<NUM_LIT:0>][<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>raise InvalidDocumentError('<STR_LIT>' +<EOL>'<STR_LIT>'.format(pkg_file.name))<EOL><DEDENT>", "docstring": "Return node representing pkg_file\npkg_file should be instance of spdx.file.", "id": "f3742:c7:m6"}
{"signature": "def reviews(self):", "body": "return map(self.create_review_node, self.document.reviews)<EOL>", "docstring": "Returns a list of review nodes", "id": "f3742:c3:m2"}
{"signature": "def to_special_value(self, value):", "body": "if isinstance(value, utils.NoAssert):<EOL><INDENT>return self.spdx_namespace.noassertion<EOL><DEDENT>elif isinstance(value, utils.SPDXNone):<EOL><INDENT>return self.spdx_namespace.none<EOL><DEDENT>else:<EOL><INDENT>return Literal(value)<EOL><DEDENT>", "docstring": "Return proper spdx term or Literal", "id": "f3742:c0:m2"}
{"signature": "def write_document(document, out, validate=True):", "body": "if validate:<EOL><INDENT>messages = []<EOL>messages = document.validate(messages)<EOL>if messages:<EOL><INDENT>raise InvalidDocumentError(messages)<EOL><DEDENT><DEDENT>writer = Writer(document, out)<EOL>writer.write()<EOL>", "docstring": "Write an SPDX RDF document.\n- document - spdx.document instance.\n- out - file like object that will be written to.\nOptionally `validate` the document before writing and raise\nInvalidDocumentError if document.validate returns False.", "id": "f3742:m0"}
{"signature": "def files(self):", "body": "return map(self.create_file_node, self.document.files)<EOL>", "docstring": "Return list of file nodes.", "id": "f3742:c2:m2"}
{"signature": "def creators(self):", "body": "return map(lambda c: Literal(c.to_value()), self.document.creation_info.creators)<EOL>", "docstring": "Return a list of creator nodes.\nNote: Does not add anything to the graph.", "id": "f3742:c5:m1"}
{"signature": "def create_file_node(self, doc_file):", "body": "file_node = URIRef('<STR_LIT>'.format(<EOL>id=str(doc_file.spdx_id)))<EOL>type_triple = (file_node, RDF.type, self.spdx_namespace.File)<EOL>self.graph.add(type_triple)<EOL>name_triple = (file_node, self.spdx_namespace.fileName, Literal(doc_file.name))<EOL>self.graph.add(name_triple)<EOL>if doc_file.has_optional_field('<STR_LIT>'):<EOL><INDENT>comment_triple = (file_node, RDFS.comment, Literal(doc_file.comment))<EOL>self.graph.add(comment_triple)<EOL><DEDENT>if doc_file.has_optional_field('<STR_LIT:type>'):<EOL><INDENT>ftype = self.spdx_namespace[self.FILE_TYPES[doc_file.type]]<EOL>ftype_triple = (file_node, self.spdx_namespace.fileType, ftype)<EOL>self.graph.add(ftype_triple)<EOL><DEDENT>self.graph.add((file_node, self.spdx_namespace.checksum, self.create_checksum_node(doc_file.chk_sum)))<EOL>conc_lic_node = self.license_or_special(doc_file.conc_lics)<EOL>conc_lic_triple = (file_node, self.spdx_namespace.licenseConcluded, conc_lic_node)<EOL>self.graph.add(conc_lic_triple)<EOL>license_info_nodes = map(self.license_or_special, doc_file.licenses_in_file)<EOL>for lic in license_info_nodes:<EOL><INDENT>triple = (file_node, self.spdx_namespace.licenseInfoInFile, lic)<EOL>self.graph.add(triple)<EOL><DEDENT>if doc_file.has_optional_field('<STR_LIT>'):<EOL><INDENT>comment_triple = (file_node, self.spdx_namespace.licenseComments, Literal(doc_file.license_comment))<EOL>self.graph.add(comment_triple)<EOL><DEDENT>cr_text_node = self.to_special_value(doc_file.copyright)<EOL>cr_text_triple = (file_node, self.spdx_namespace.copyrightText, cr_text_node)<EOL>self.graph.add(cr_text_triple)<EOL>if doc_file.has_optional_field('<STR_LIT>'):<EOL><INDENT>notice_triple = (file_node, self.spdx_namespace.noticeText, doc_file.notice)<EOL>self.graph.add(notice_triple)<EOL><DEDENT>contrib_nodes = map(lambda c: Literal(c), doc_file.contributors)<EOL>contrib_triples = [(file_node, self.spdx_namespace.fileContributor, node) for node in contrib_nodes]<EOL>for triple in contrib_triples:<EOL><INDENT>self.graph.add(triple)<EOL><DEDENT>return file_node<EOL>", "docstring": "Create a node for spdx.file.", "id": "f3742:c2:m1"}
{"signature": "def add_file_dependencies_helper(self, doc_file):", "body": "subj_triples = list(self.graph.triples((None, self.spdx_namespace.fileName, Literal(doc_file.name))))<EOL>if len(subj_triples) != <NUM_LIT:1>:<EOL><INDENT>raise InvalidDocumentError('<STR_LIT>'.format(doc_file.name))<EOL><DEDENT>subject_node = subj_triples[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>for dependency in doc_file.dependencies:<EOL><INDENT>dep_triples = list(self.graph.triples((None, self.spdx_namespace.fileName, Literal(dependency))))<EOL>if len(dep_triples) == <NUM_LIT:1>:<EOL><INDENT>dep_node = dep_triples[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>dep_triple = (subject_node, self.spdx_namespace.fileDependency, dep_node)<EOL>self.graph.add(dep_triple)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>'.format(doc_file.name, dependency))<EOL><DEDENT><DEDENT>", "docstring": "Handle dependencies for a single file.\n- doc_file - instance of spdx.file.File.", "id": "f3742:c2:m3"}
{"signature": "def write_package(package, out):", "body": "out.write('<STR_LIT>')<EOL>write_value('<STR_LIT>', package.name, out)<EOL>if package.has_optional_field('<STR_LIT:version>'):<EOL><INDENT>write_value('<STR_LIT>', package.version, out)<EOL><DEDENT>write_value('<STR_LIT>', package.download_location, out)<EOL>if package.has_optional_field('<STR_LIT>'):<EOL><INDENT>write_text_value('<STR_LIT>', package.summary, out)<EOL><DEDENT>if package.has_optional_field('<STR_LIT>'):<EOL><INDENT>write_text_value('<STR_LIT>', package.source_info, out)<EOL><DEDENT>if package.has_optional_field('<STR_LIT>'):<EOL><INDENT>write_value('<STR_LIT>', package.file_name, out)<EOL><DEDENT>if package.has_optional_field('<STR_LIT>'):<EOL><INDENT>write_value('<STR_LIT>', package.supplier, out)<EOL><DEDENT>if package.has_optional_field('<STR_LIT>'):<EOL><INDENT>write_value('<STR_LIT>', package.originator, out)<EOL><DEDENT>if package.has_optional_field('<STR_LIT>'):<EOL><INDENT>write_value('<STR_LIT>', package.check_sum.to_tv(), out)<EOL><DEDENT>write_value('<STR_LIT>', format_verif_code(package), out)<EOL>if package.has_optional_field('<STR_LIT:description>'):<EOL><INDENT>write_text_value('<STR_LIT>', package.description, out)<EOL><DEDENT>if isinstance(package.license_declared, (document.LicenseConjunction,<EOL>document.LicenseDisjunction)):<EOL><INDENT>write_value('<STR_LIT>', u'<STR_LIT>'.format(package.license_declared), out)<EOL><DEDENT>else:<EOL><INDENT>write_value('<STR_LIT>', package.license_declared, out)<EOL><DEDENT>if isinstance(package.conc_lics, (document.LicenseConjunction,<EOL>document.LicenseDisjunction)):<EOL><INDENT>write_value('<STR_LIT>', u'<STR_LIT>'.format(package.conc_lics), out)<EOL><DEDENT>else:<EOL><INDENT>write_value('<STR_LIT>', package.conc_lics, out)<EOL><DEDENT>for lics in sorted(package.licenses_from_files):<EOL><INDENT>write_value('<STR_LIT>', lics, out)<EOL><DEDENT>if package.has_optional_field('<STR_LIT>'):<EOL><INDENT>write_text_value('<STR_LIT>', package.license_comment, out)<EOL><DEDENT>if isinstance(package.cr_text, six.string_types):<EOL><INDENT>write_text_value('<STR_LIT>', package.cr_text, out)<EOL><DEDENT>else:<EOL><INDENT>write_value('<STR_LIT>', package.cr_text, out)<EOL><DEDENT>if package.has_optional_field('<STR_LIT>'):<EOL><INDENT>write_value('<STR_LIT>', package.homepage, out)<EOL><DEDENT>for spdx_file in sorted(package.files):<EOL><INDENT>write_separators(out)<EOL>write_file(spdx_file, out)<EOL><DEDENT>", "docstring": "Write a package fields to out.", "id": "f3743:m9"}
{"signature": "def write_extracted_licenses(lics, out):", "body": "write_value('<STR_LIT>', lics.identifier, out)<EOL>if lics.full_name is not None:<EOL><INDENT>write_value('<STR_LIT>', lics.full_name, out)<EOL><DEDENT>if lics.comment is not None:<EOL><INDENT>write_text_value('<STR_LIT>', lics.comment, out)<EOL><DEDENT>for xref in sorted(lics.cross_ref):<EOL><INDENT>write_value('<STR_LIT>', xref, out)<EOL><DEDENT>write_text_value('<STR_LIT>', lics.text, out)<EOL>", "docstring": "Write extracted licenses fields to out.", "id": "f3743:m10"}
{"signature": "def write_review(review, out):", "body": "out.write('<STR_LIT>')<EOL>write_value('<STR_LIT>', review.reviewer, out)<EOL>write_value('<STR_LIT>', review.review_date_iso_format, out)<EOL>if review.has_comment:<EOL><INDENT>write_text_value('<STR_LIT>', review.comment, out)<EOL><DEDENT>", "docstring": "Write the fields of a single review to out.", "id": "f3743:m5"}
{"signature": "def write_annotation(annotation, out):", "body": "out.write('<STR_LIT>')<EOL>write_value('<STR_LIT>', annotation.annotator, out)<EOL>write_value('<STR_LIT>', annotation.annotation_date_iso_format, out)<EOL>if annotation.has_comment:<EOL><INDENT>write_text_value('<STR_LIT>', annotation.comment, out)<EOL><DEDENT>write_value('<STR_LIT>', annotation.annotation_type, out)<EOL>write_value('<STR_LIT>', annotation.spdx_id, out)<EOL>", "docstring": "Write the fields of a single annotation to out.", "id": "f3743:m6"}
{"signature": "def add_artifact(self, symbol, value):", "body": "symbol = '<STR_LIT>'.format(symbol)<EOL>artifact = getattr(self, symbol)<EOL>artifact.append(value)<EOL>", "docstring": "Add value as artifact_of_project{symbol}.", "id": "f3745:c1:m6"}
{"signature": "def validate_optional_str_fields(self, messages):", "body": "FIELDS = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT:version>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT:description>'<EOL>]<EOL>messages = self.validate_str_fields(FIELDS, True, messages)<EOL>return messages<EOL>", "docstring": "Fields marked as optional and of type string in class\n        docstring must be of a type that provides __str__ method.", "id": "f3746:c0:m8"}
{"signature": "def validate_str_fields(self, fields, optional, messages):", "body": "for field_str in fields:<EOL><INDENT>field = getattr(self, field_str)<EOL>if field is not None:<EOL><INDENT>attr = getattr(field, '<STR_LIT>', None)<EOL>if not callable(attr):<EOL><INDENT>messages = messages + [<EOL>'<STR_LIT>'.format(field)<EOL>]<EOL><DEDENT><DEDENT>elif not optional:<EOL><INDENT>messages = messages + [<EOL>'<STR_LIT>'.format(field_str)<EOL>]<EOL><DEDENT><DEDENT>return messages<EOL>", "docstring": "Helper for validate_mandatory_str_field and\n        validate_optional_str_fields", "id": "f3746:c0:m10"}
{"signature": "def set_file_license_comment(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):<EOL><INDENT>if not self.file_license_comment_set:<EOL><INDENT>self.file_license_comment_set = True<EOL>self.file(doc).license_comment = text<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Raises OrderError if no package or file defined.\nRaises CardinalityError if more than one per file.", "id": "f3750:c5:m2"}
{"signature": "def reset_document(self):", "body": "<EOL>self.doc_version_set = False<EOL>self.doc_comment_set = False<EOL>self.doc_namespace_set = False<EOL>self.doc_data_lics_set = False<EOL>self.doc_name_set = False<EOL>self.doc_spdx_id_set = False<EOL>", "docstring": "Reset the internal state to allow building new document", "id": "f3750:c0:m7"}
{"signature": "def set_pkg_source_info(self, doc, text):", "body": "self.assert_package_exists()<EOL>if not self.package_source_info_set:<EOL><INDENT>self.package_source_info_set = True<EOL>doc.package.source_info = text<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the package's source information, if not already set.\n        text - Free form text.\n        Raises CardinalityError if already defined.\n        Raises OrderError if no package previously defined.", "id": "f3750:c4:m2"}
{"signature": "def set_doc_comment(self, doc, comment):", "body": "if not self.doc_comment_set:<EOL><INDENT>self.doc_comment_set = True<EOL>doc.comment = comment<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets document comment, Raises CardinalityError if\n        comment already set.", "id": "f3750:c0:m5"}
{"signature": "def set_pkg_chk_sum(self, doc, chk_sum):", "body": "self.assert_package_exists()<EOL>if not self.package_chk_sum_set:<EOL><INDENT>self.package_chk_sum_set = True<EOL>doc.package.check_sum = checksum.Algorithm('<STR_LIT>', chk_sum)<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the package check sum, if not already set.\n        chk_sum - A string\n        Raises CardinalityError if already defined.\n        Raises OrderError if no package previously defined.", "id": "f3750:c4:m1"}
{"signature": "def set_pkg_cr_text(self, doc, text):", "body": "self.assert_package_exists()<EOL>if not self.package_cr_text_set:<EOL><INDENT>self.package_cr_text_set = True<EOL>doc.package.cr_text = text<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the package's license comment.\n        Raises OrderError if no package previously defined.\n        Raises CardinalityError if already set.", "id": "f3750:c4:m6"}
{"signature": "def reset(self):", "body": "<EOL>self.reset_creation_info()<EOL>self.reset_document()<EOL>self.reset_package()<EOL>self.reset_file_stat()<EOL>self.reset_reviews()<EOL>self.reset_annotations()<EOL>", "docstring": "Resets builder's state for building new documents.\n        Must be called between usage with different documents.", "id": "f3750:c8:m1"}
{"signature": "def set_chksum(self, doc, chk_sum):", "body": "if chk_sum:<EOL><INDENT>doc.ext_document_references[-<NUM_LIT:1>].check_sum = checksum.Algorithm(<EOL>'<STR_LIT>', chk_sum)<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the external document reference's check sum, if not already set.\nchk_sum - The checksum value in the form of a string.", "id": "f3750:c1:m0"}
{"signature": "def set_file_notice(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):<EOL><INDENT>if not self.file_notice_set:<EOL><INDENT>self.file_notice_set = True<EOL>self.file(doc).notice = tagvaluebuilders.str_from_text(text)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Raises OrderError if no package or file defined.\n        Raises CardinalityError if more than one.", "id": "f3750:c5:m5"}
{"signature": "def set_pkg_desc(self, doc, text):", "body": "self.assert_package_exists()<EOL>if not self.package_desc_set:<EOL><INDENT>self.package_desc_set = True<EOL>doc.package.description = text<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Set's the package's description.\n        Raises CardinalityError if description already set.\n        Raises OrderError if no package previously defined.", "id": "f3750:c4:m8"}
{"signature": "def set_pkg_verif_code(self, doc, code):", "body": "self.assert_package_exists()<EOL>if not self.package_verif_set:<EOL><INDENT>self.package_verif_set = True<EOL>doc.package.verif_code = code<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the package verification code, if not already set.\n        code - A string.\n        Raises CardinalityError if already defined.\n        Raises OrderError if no package previously defined.", "id": "f3750:c4:m3"}
{"signature": "def get_extr_lics_comment(self, extr_lics):", "body": "comment_list = list(self.graph.triples(<EOL>(extr_lics, RDFS.comment, None)))<EOL>if len(comment_list) > <NUM_LIT:1> :<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL>return<EOL><DEDENT>elif len(comment_list) == <NUM_LIT:1>:<EOL><INDENT>return comment_list[<NUM_LIT:0>][<NUM_LIT:2>]<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT>", "docstring": "Return license comment or None.", "id": "f3751:c1:m6"}
{"signature": "def p_file_notice(self, f_term, predicate):", "body": "try:<EOL><INDENT>for _, _, notice in self.graph.triples((f_term, predicate, None)):<EOL><INDENT>self.builder.set_file_notice(self.doc, six.text_type(notice))<EOL><DEDENT><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets file notice text.", "id": "f3751:c3:m5"}
{"signature": "def get_annotation_date(self, r_term):", "body": "annotation_date_list = list(self.graph.triples((r_term, self.spdx_namespace['<STR_LIT>'], None)))<EOL>if len(annotation_date_list) != <NUM_LIT:1>:<EOL><INDENT>self.error = True<EOL>msg = '<STR_LIT>'<EOL>self.logger.log(msg)<EOL>return<EOL><DEDENT>return six.text_type(annotation_date_list[<NUM_LIT:0>][<NUM_LIT:2>])<EOL>", "docstring": "Returns annotation date or None if not found.\n        Reports error on failure.\n        Note does not check value format.", "id": "f3751:c5:m4"}
{"signature": "def p_file_comment(self, f_term, predicate):", "body": "try:<EOL><INDENT>for _, _, comment in self.graph.triples((f_term, predicate, None)):<EOL><INDENT>self.builder.set_file_comment(self.doc, six.text_type(comment))<EOL><DEDENT><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets file comment text.", "id": "f3751:c3:m6"}
{"signature": "def p_file_lic_info(self, f_term, predicate):", "body": "for _, _, info in self.graph.triples((f_term, predicate, None)):<EOL><INDENT>lic = self.handle_lics(info)<EOL>if lic is not None:<EOL><INDENT>self.builder.set_file_license_in_file(self.doc, lic)<EOL><DEDENT><DEDENT>", "docstring": "Sets file license information.", "id": "f3751:c3:m11"}
{"signature": "def get_file_name(self, f_term):", "body": "for _, _, name in self.graph.triples((f_term, self.spdx_namespace['<STR_LIT>'], None)):<EOL><INDENT>return name<EOL><DEDENT>return<EOL>", "docstring": "Returns first found fileName property or None if not found.", "id": "f3751:c3:m2"}
{"signature": "def p_file_contributor(self, f_term, predicate):", "body": "for _, _, contributor in self.graph.triples((f_term, predicate, None)):<EOL><INDENT>self.builder.add_file_contribution(self.doc, six.text_type(contributor))<EOL><DEDENT>", "docstring": "Parse all file contributors and adds them to the model.", "id": "f3751:c3:m4"}
{"signature": "def parse_only_extr_license(self, extr_lic):", "body": "<EOL>ident = self.get_extr_license_ident(extr_lic)<EOL>text = self.get_extr_license_text(extr_lic)<EOL>comment = self.get_extr_lics_comment(extr_lic)<EOL>xrefs = self.get_extr_lics_xref(extr_lic)<EOL>name = self.get_extr_lic_name(extr_lic)<EOL>if not ident:<EOL><INDENT>return<EOL><DEDENT>lic = document.ExtractedLicense(ident)<EOL>if text is not None:<EOL><INDENT>lic.text = text<EOL><DEDENT>if name is not None:<EOL><INDENT>lic.full_name = name<EOL><DEDENT>if comment is not None:<EOL><INDENT>lic.comment = comment<EOL><DEDENT>lic.cross_ref = map(lambda x: six.text_type(x), xrefs)<EOL>return lic<EOL>", "docstring": "Return an ExtractedLicense object to represent a license object.\nBut does not add it to the SPDXDocument model.\nReturn None if failed.", "id": "f3751:c1:m7"}
{"signature": "def handle_pkg_lic(self, p_term, predicate, builder_func):", "body": "try:<EOL><INDENT>for _, _, licenses in self.graph.triples((p_term, predicate, None)):<EOL><INDENT>if (licenses, RDF.type, self.spdx_namespace['<STR_LIT>']) in self.graph:<EOL><INDENT>lics = self.handle_conjunctive_list(licenses)<EOL>builder_func(self.doc, lics)<EOL><DEDENT>elif (licenses, RDF.type, self.spdx_namespace['<STR_LIT>']) in self.graph:<EOL><INDENT>lics = self.handle_disjunctive_list(licenses)<EOL>builder_func(self.doc, lics)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>lics = self.handle_lics(licenses)<EOL>builder_func(self.doc, lics)<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', licenses)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>'.format(predicate))<EOL><DEDENT>", "docstring": "Handles package lics concluded or declared.", "id": "f3751:c2:m8"}
{"signature": "def p_file_comments_on_lics(self, f_term, predicate):", "body": "try:<EOL><INDENT>for _, _, comment in self.graph.triples((f_term, predicate, None)):<EOL><INDENT>self.builder.set_file_license_comment(self.doc, six.text_type(comment))<EOL><DEDENT><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets file license comment.", "id": "f3751:c3:m10"}
{"signature": "def p_file_lic_conc(self, f_term, predicate):", "body": "try:<EOL><INDENT>for _, _, licenses in self.graph.triples((f_term, predicate, None)):<EOL><INDENT>if (licenses, RDF.type, self.spdx_namespace['<STR_LIT>']) in self.graph:<EOL><INDENT>lics = self.handle_conjunctive_list(licenses)<EOL>self.builder.set_concluded_license(self.doc, lics)<EOL><DEDENT>elif (licenses, RDF.type, self.spdx_namespace['<STR_LIT>']) in self.graph:<EOL><INDENT>lics = self.handle_disjunctive_list(licenses)<EOL>self.builder.set_concluded_license(self.doc, lics)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>lics = self.handle_lics(licenses)<EOL>self.builder.set_concluded_license(self.doc, lics)<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', licenses)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>'.format(predicate))<EOL><DEDENT>", "docstring": "Sets file licenses concluded.", "id": "f3751:c3:m15"}
{"signature": "def get_annotation_type(self, r_term):", "body": "for _, _, typ in self.graph.triples((<EOL>r_term, self.spdx_namespace['<STR_LIT>'], None)):<EOL><INDENT>if typ is not None:<EOL><INDENT>return typ<EOL><DEDENT>else:<EOL><INDENT>self.error = True<EOL>msg = '<STR_LIT>'<EOL>self.logger.log(msg)<EOL>return<EOL><DEDENT><DEDENT>", "docstring": "Returns annotation type or None if found none or more than one.\n        Reports errors on failure.", "id": "f3751:c5:m2"}
{"signature": "def get_review_date(self, r_term):", "body": "reviewed_list = list(self.graph.triples((r_term, self.spdx_namespace['<STR_LIT>'], None)))<EOL>if len(reviewed_list) != <NUM_LIT:1>:<EOL><INDENT>self.error = True<EOL>msg = '<STR_LIT>'<EOL>self.logger.log(msg)<EOL>return<EOL><DEDENT>return six.text_type(reviewed_list[<NUM_LIT:0>][<NUM_LIT:2>])<EOL>", "docstring": "Returns review date or None if not found.\n        Reports error on failure.\n        Note does not check value format.", "id": "f3751:c4:m3"}
{"signature": "def get_extr_lic_name(self, extr_lic):", "body": "extr_name_list = list(self.graph.triples((extr_lic, self.spdx_namespace['<STR_LIT>'], None)))<EOL>if len(extr_name_list) > <NUM_LIT:1>:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL>return<EOL><DEDENT>elif len(extr_name_list) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>return self.to_special_value(extr_name_list[<NUM_LIT:0>][<NUM_LIT:2>])<EOL>", "docstring": "Return the license name from an ExtractedLicense or None", "id": "f3751:c1:m4"}
{"signature": "def parse_creation_info(self, ci_term):", "body": "for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['<STR_LIT>'], None)):<EOL><INDENT>try:<EOL><INDENT>ent = self.builder.create_entity(self.doc, six.text_type(o))<EOL>self.builder.add_creator(self.doc, ent)<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', o)<EOL><DEDENT><DEDENT>for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['<STR_LIT>'], None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_created_date(self.doc, six.text_type(o))<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', o)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT>for _s, _p, o in self.graph.triples((ci_term, RDFS.comment, None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_creation_comment(self.doc, six.text_type(o))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT>for _s, _p, o in self.graph.triples((ci_term, self.spdx_namespace['<STR_LIT>'], None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_lics_list_ver(self.doc, six.text_type(o))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL>break<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', o)<EOL><DEDENT><DEDENT>", "docstring": "Parse creators, created and comment.", "id": "f3751:c6:m2"}
{"signature": "def to_special_value(self, value):", "body": "if value == self.spdx_namespace.none:<EOL><INDENT>return utils.SPDXNone()<EOL><DEDENT>elif value == self.spdx_namespace.noassertion:<EOL><INDENT>return utils.NoAssert()<EOL><DEDENT>elif value == self.spdx_namespace.unknown:<EOL><INDENT>return utils.UnKnown()<EOL><DEDENT>else:<EOL><INDENT>return value<EOL><DEDENT>", "docstring": "Checks if value is a special SPDX value such as\n        NONE, NOASSERTION or UNKNOWN if so returns proper model.\n        else returns value", "id": "f3751:c0:m3"}
{"signature": "def parse_doc_fields(self, doc_term):", "body": "try:<EOL><INDENT>self.builder.set_doc_spdx_id(self.doc, doc_term)<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', doc_term)<EOL><DEDENT>try:<EOL><INDENT>if doc_term.count('<STR_LIT:#>', <NUM_LIT:0>, len(doc_term)) <= <NUM_LIT:1>:<EOL><INDENT>doc_namespace = doc_term.split('<STR_LIT:#>')[<NUM_LIT:0>]<EOL>self.builder.set_doc_namespace(self.doc, doc_namespace)<EOL><DEDENT>else:<EOL><INDENT>self.value_error('<STR_LIT>', doc_term)<EOL><DEDENT><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', doc_term)<EOL><DEDENT>for _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace['<STR_LIT>'], None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_doc_version(self.doc, six.text_type(o))<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', o)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT>for _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace['<STR_LIT>'], None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_doc_data_lic(self.doc, six.text_type(o))<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', o)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT>for _s, _p, o in self.graph.triples(<EOL>(doc_term, self.spdx_namespace['<STR_LIT:name>'], None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_doc_name(self.doc, six.text_type(o))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT:name>')<EOL>break<EOL><DEDENT><DEDENT>for _s, _p, o in self.graph.triples((doc_term, RDFS.comment, None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_doc_comment(self.doc, six.text_type(o))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT>", "docstring": "Parses the version, data license, name, SPDX Identifier, namespace,\n        and comment.", "id": "f3751:c6:m3"}
{"signature": "def more_than_one_error(self, field):", "body": "msg = '<STR_LIT>'.format(field)<EOL>self.logger.log(msg)<EOL>self.error = True<EOL>", "docstring": "Logs a more than one error.\n        field is the field/property that has more than one defined.", "id": "f3751:c0:m1"}
{"signature": "def parse_ext_doc_ref(self, ext_doc_ref_term):", "body": "for _s, _p, o in self.graph.triples(<EOL>(ext_doc_ref_term,<EOL>self.spdx_namespace['<STR_LIT>'],<EOL>None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_ext_doc_id(self.doc, six.text_type(o))<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', '<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT>for _s, _p, o in self.graph.triples(<EOL>(ext_doc_ref_term,<EOL>self.spdx_namespace['<STR_LIT>'],<EOL>None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_spdx_doc_uri(self.doc, six.text_type(o))<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', '<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT>for _s, _p, checksum in self.graph.triples(<EOL>(ext_doc_ref_term, self.spdx_namespace['<STR_LIT>'], None)):<EOL><INDENT>for _, _, value in self.graph.triples(<EOL>(checksum, self.spdx_namespace['<STR_LIT>'], None)):<EOL><INDENT>try:<EOL><INDENT>self.builder.set_chksum(self.doc, six.text_type(value))<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', '<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Parses the External Document ID, SPDX Document URI and Checksum.", "id": "f3751:c6:m4"}
{"signature": "def p_file_type(self, f_term, predicate):", "body": "try:<EOL><INDENT>for _, _, ftype in self.graph.triples((f_term, predicate, None)):<EOL><INDENT>try:<EOL><INDENT>if ftype.endswith('<STR_LIT>'):<EOL><INDENT>ftype = '<STR_LIT>'<EOL><DEDENT>elif ftype.endswith('<STR_LIT:source>'):<EOL><INDENT>ftype = '<STR_LIT>'<EOL><DEDENT>elif ftype.endswith('<STR_LIT>'):<EOL><INDENT>ftype = '<STR_LIT>'<EOL><DEDENT>elif ftype.endswith('<STR_LIT>'):<EOL><INDENT>ftype = '<STR_LIT>'<EOL><DEDENT>self.builder.set_file_type(self.doc, ftype)<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.value_error('<STR_LIT>', ftype)<EOL><DEDENT><DEDENT><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets file type.", "id": "f3751:c3:m13"}
{"signature": "def p_file_project(self, project):", "body": "for _, _, name in self.graph.triples((project, self.doap_namespace['<STR_LIT:name>'], None)):<EOL><INDENT>self.builder.set_file_atrificat_of_project(self.doc, '<STR_LIT:name>', six.text_type(name))<EOL><DEDENT>for _, _, homepage in self.graph.triples(<EOL>(project, self.doap_namespace['<STR_LIT>'], None)):<EOL><INDENT>self.builder.set_file_atrificat_of_project(self.doc, '<STR_LIT>', six.text_type(homepage))<EOL><DEDENT>", "docstring": "Helper function for parsing doap:project name and homepage.\n        and setting them using the file builder.", "id": "f3751:c3:m8"}
{"signature": "def p_entity_3(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:1>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:1>]<EOL><DEDENT>p[<NUM_LIT:0>] = self.builder.build_person(self.document, value)<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>msg = ERROR_MESSAGES['<STR_LIT>'].format(p[<NUM_LIT:1>], p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>self.error = True<EOL>p[<NUM_LIT:0>] = None<EOL><DEDENT>", "docstring": "entity : PERSON_VALUE", "id": "f3753:c0:m149"}
{"signature": "def p_review_comment_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.add_review_comment(self.document, value)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "review_comment : REVIEW_COMMENT TEXT", "id": "f3753:c0:m115"}
{"signature": "def p_creator_comment_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "creator_comment : CREATOR_COMMENT error", "id": "f3753:c0:m142"}
{"signature": "def p_file_artificat_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:2>))<EOL>self.logger.log(msg)<EOL>", "docstring": "file_artifact : prj_name_art error", "id": "f3753:c0:m20"}
{"signature": "def p_extr_lic_id_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "extr_lic_id : LICS_ID error", "id": "f3753:c0:m17"}
{"signature": "def p_package_version_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "package_version : PKG_VERSION error", "id": "f3753:c0:m108"}
{"signature": "def p_prj_uri_art_1(self, p):", "body": "try:<EOL><INDENT>self.builder.set_file_atrificat_of_project(self.document,<EOL>'<STR_LIT>', utils.UnKnown())<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "prj_uri_art : ART_PRJ_URI UN_KNOWN", "id": "f3753:c0:m22"}
{"signature": "def p_file_name_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "file_name : FILE_NAME error", "id": "f3753:c0:m52"}
{"signature": "def p_extr_lic_text_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_lic_text(self.document, value)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "extr_lic_text : LICS_TEXT TEXT", "id": "f3753:c0:m14"}
{"signature": "def p_annotation_type_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(<EOL>p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "annotation_type : ANNOTATION_TYPE error", "id": "f3753:c0:m124"}
{"signature": "def p_conc_license_1(self, p):", "body": "p[<NUM_LIT:0>] = utils.NoAssert()<EOL>", "docstring": "conc_license : NO_ASSERT", "id": "f3753:c0:m48"}
{"signature": "def p_extr_lic_name_value_2(self, p):", "body": "p[<NUM_LIT:0>] = utils.NoAssert()<EOL>", "docstring": "extr_lic_name_value : NO_ASSERT", "id": "f3753:c0:m13"}
{"signature": "def p_file_artifact_1(self, p):", "body": "pass<EOL>", "docstring": "file_artifact : prj_name_art file_art_rest\n                         | prj_name_art", "id": "f3753:c0:m19"}
{"signature": "def p_annotation_date_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.add_annotation_date(self.document, value)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "annotation_date : ANNOTATION_DATE DATE", "id": "f3753:c0:m119"}
{"signature": "def p_ext_doc_refs_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>doc_ref_id = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL>doc_uri = p[<NUM_LIT:3>].decode(encoding='<STR_LIT:utf-8>')<EOL>ext_doc_chksum = p[<NUM_LIT:4>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>doc_ref_id = p[<NUM_LIT:2>]<EOL>doc_uri = p[<NUM_LIT:3>]<EOL>ext_doc_chksum = p[<NUM_LIT:4>]<EOL><DEDENT>self.builder.add_ext_doc_refs(self.document, doc_ref_id, doc_uri,<EOL>ext_doc_chksum)<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:2>))<EOL>self.logger.log(msg)<EOL><DEDENT>", "docstring": "ext_doc_ref : EXT_DOC_REF DOC_REF_ID DOC_URI EXT_DOC_REF_CHKSUM", "id": "f3753:c0:m137"}
{"signature": "def p_entity_2(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:1>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:1>]<EOL><DEDENT>p[<NUM_LIT:0>] = self.builder.build_org(self.document, value)<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>msg = ERROR_MESSAGES['<STR_LIT>'].format(p[<NUM_LIT:1>], p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>self.error = True<EOL>p[<NUM_LIT:0>] = None<EOL><DEDENT>", "docstring": "entity : ORG_VALUE", "id": "f3753:c0:m148"}
{"signature": "def p_pkg_cr_text_1(self, p):", "body": "try:<EOL><INDENT>self.builder.set_pkg_cr_text(self.document, p[<NUM_LIT:2>])<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "pkg_cr_text : PKG_CPY_TEXT pkg_cr_text_value", "id": "f3753:c0:m67"}
{"signature": "def p_creator_comment_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_creation_comment(self.document, value)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "creator_comment : CREATOR_COMMENT TEXT", "id": "f3753:c0:m141"}
{"signature": "def p_file_cr_value_2(self, p):", "body": "p[<NUM_LIT:0>] = utils.SPDXNone()<EOL>", "docstring": "file_cr_value : NONE", "id": "f3753:c0:m39"}
{"signature": "def p_pkg_down_location_1(self, p):", "body": "try:<EOL><INDENT>self.builder.set_pkg_down_location(self.document, p[<NUM_LIT:2>])<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "pkg_down_location : PKG_DOWN pkg_down_value", "id": "f3753:c0:m94"}
{"signature": "def p_annotation_date_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "annotation_date : ANNOTATION_DATE error", "id": "f3753:c0:m120"}
{"signature": "def p_annotation_spdx_id_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(<EOL>p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "annotation_spdx_id : ANNOTATION_SPDX_ID error", "id": "f3753:c0:m126"}
{"signature": "def p_file_cr_text_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "file_cr_text : FILE_CR_TEXT error", "id": "f3753:c0:m37"}
{"signature": "def p_package_name_1(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "package_name : PKG_NAME error", "id": "f3753:c0:m110"}
{"signature": "def p_attrib(self, p):", "body": "pass<EOL>", "docstring": "attrib : spdx_version\n                  | spdx_id\n                  | data_lics\n                  | doc_name\n                  | ext_doc_ref\n                  | doc_comment\n                  | doc_namespace\n                  | creator\n                  | created\n                  | creator_comment\n                  | locs_list_ver\n                  | reviewer\n                  | review_date\n                  | review_comment\n                  | annotator\n                  | annotation_date\n                  | annotation_comment\n                  | annotation_type\n                  | annotation_spdx_id\n                  | package_name\n                  | package_version\n                  | pkg_down_location\n                  | pkg_home\n                  | pkg_summary\n                  | pkg_src_info\n                  | pkg_file_name\n                  | pkg_supplier\n                  | pkg_orig\n                  | pkg_chksum\n                  | pkg_verif\n                  | pkg_desc\n                  | pkg_lic_decl\n                  | pkg_lic_conc\n                  | pkg_lic_ff\n                  | pkg_lic_comment\n                  | pkg_cr_text\n                  | file_name\n                  | file_type\n                  | file_chksum\n                  | file_conc\n                  | file_lics_info\n                  | file_cr_text\n                  | file_lics_comment\n                  | file_notice\n                  | file_comment\n                  | file_contrib\n                  | file_dep\n                  | file_artifact\n                  | extr_lic_id\n                  | extr_lic_text\n                  | extr_lic_name\n                  | lic_xref\n                  | lic_comment\n                  | unknown_tag", "id": "f3753:c0:m3"}
{"signature": "def p_file_dep_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.add_file_dep(self.document, value)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "file_dep : FILE_DEP LINE", "id": "f3753:c0:m30"}
{"signature": "def p_lics_list_ver_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "locs_list_ver : LIC_LIST_VER error", "id": "f3753:c0:m128"}
{"signature": "def p_file_lic_info_value_3(self, p):", "body": "if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:1>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:1>]<EOL><DEDENT>p[<NUM_LIT:0>] = document.License.from_identifier(value)<EOL>", "docstring": "file_lic_info_value : LINE", "id": "f3753:c0:m47"}
{"signature": "def p_pkg_src_info_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_pkg_source_info(self.document, value)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "pkg_src_info : PKG_SRC_INFO TEXT", "id": "f3753:c0:m83"}
{"signature": "def p_file_dep_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "file_dep : FILE_DEP error", "id": "f3753:c0:m31"}
{"signature": "def p_pkg_lic_decl_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "pkg_lic_decl : PKG_LICS_DECL error", "id": "f3753:c0:m75"}
{"signature": "def p_lic_xref_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "lic_xref : LICS_CRS_REF error", "id": "f3753:c0:m7"}
{"signature": "def p_file_cr_value_3(self, p):", "body": "p[<NUM_LIT:0>] = utils.NoAssert()<EOL>", "docstring": "file_cr_value : NO_ASSERT", "id": "f3753:c0:m40"}
{"signature": "def p_file_cr_text_1(self, p):", "body": "try:<EOL><INDENT>self.builder.set_file_copyright(self.document, p[<NUM_LIT:2>])<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "file_cr_text : FILE_CR_TEXT file_cr_value", "id": "f3753:c0:m36"}
{"signature": "def p_doc_namespace_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "doc_namespace : DOC_NAMESPACE error", "id": "f3753:c0:m132"}
{"signature": "def p_created_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "created : CREATED error", "id": "f3753:c0:m146"}
{"signature": "def p_file_conc_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "file_conc : FILE_LICS_CONC error", "id": "f3753:c0:m61"}
{"signature": "def p_lics_list_ver_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_lics_list_ver(self.document, value)<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(<EOL>p[<NUM_LIT:2>], p.lineno(<NUM_LIT:2>))<EOL>self.logger.log(msg)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "locs_list_ver : LIC_LIST_VER LINE", "id": "f3753:c0:m127"}
{"signature": "def p_doc_name_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_doc_name(self.document, value)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "doc_name : DOC_NAME LINE", "id": "f3753:c0:m135"}
{"signature": "def p_file_type_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "file_type : FILE_TYPE error", "id": "f3753:c0:m57"}
{"signature": "def p_pkg_lic_conc_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "pkg_lic_conc : PKG_LICS_CONC error", "id": "f3753:c0:m82"}
{"signature": "def p_pkg_home_value_1(self, p):", "body": "if six.PY2:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "pkg_home_value : LINE", "id": "f3753:c0:m91"}
{"signature": "def p_file_conc_1(self, p):", "body": "try:<EOL><INDENT>self.builder.set_concluded_license(self.document, p[<NUM_LIT:2>])<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "file_conc : FILE_LICS_CONC conc_license", "id": "f3753:c0:m60"}
{"signature": "def p_file_comment_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "file_comment : FILE_COMMENT error", "id": "f3753:c0:m55"}
{"signature": "def p_package_name(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.create_package(self.document, value)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "package_name : PKG_NAME LINE", "id": "f3753:c0:m109"}
{"signature": "def p_file_notice_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "file_notice : FILE_NOTICE error", "id": "f3753:c0:m35"}
{"signature": "def p_pkg_lic_ff_1(self, p):", "body": "try:<EOL><INDENT>self.builder.set_pkg_license_from_file(self.document, p[<NUM_LIT:2>])<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL><DEDENT>", "docstring": "pkg_lic_ff : PKG_LICS_FFILE pkg_lic_ff_value", "id": "f3753:c0:m76"}
{"signature": "def p_file_lics_comment_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_file_license_comment(self.document, value)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "file_lics_comment : FILE_LICS_COMMENT TEXT", "id": "f3753:c0:m41"}
{"signature": "def p_annotation_comment_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.add_annotation_comment(self.document, value)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "annotation_comment : ANNOTATION_COMMENT TEXT", "id": "f3753:c0:m121"}
{"signature": "def p_pkg_supplier_values_1(self, p):", "body": "p[<NUM_LIT:0>] = utils.NoAssert()<EOL>", "docstring": "pkg_supplier_values : NO_ASSERT", "id": "f3753:c0:m103"}
{"signature": "def p_reviewer_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "reviewer : REVIEWER error", "id": "f3753:c0:m112"}
{"signature": "def p_prj_home_art_1(self, p):", "body": "try:<EOL><INDENT>self.builder.set_file_atrificat_of_project(self.document, '<STR_LIT>', p[<NUM_LIT:2>])<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "prj_home_art : ART_PRJ_HOME LINE", "id": "f3753:c0:m25"}
{"signature": "def p_pkg_lic_ff_value_1(self, p):", "body": "p[<NUM_LIT:0>] = utils.SPDXNone()<EOL>", "docstring": "pkg_lic_ff_value : NONE", "id": "f3753:c0:m77"}
{"signature": "def p_pkg_desc_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "pkg_desc : PKG_DESC error", "id": "f3753:c0:m64"}
{"signature": "def p_annotation_spdx_id_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_annotation_spdx_id(self.document, value)<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "annotation_spdx_id : ANNOTATION_SPDX_ID LINE", "id": "f3753:c0:m125"}
{"signature": "def p_file_chksum_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_file_chksum(self.document, value)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "file_chksum : FILE_CHKSUM CHKSUM", "id": "f3753:c0:m58"}
{"signature": "def p_reviewer_1(self, p):", "body": "self.builder.add_reviewer(self.document, p[<NUM_LIT:2>])<EOL>", "docstring": "reviewer : REVIEWER entity", "id": "f3753:c0:m111"}
{"signature": "def p_pkg_file_name_1(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "pkg_file_name : PKG_FILE_NAME error", "id": "f3753:c0:m106"}
{"signature": "def p_file_comment_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_file_comment(self.document, value)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "file_comment : FILE_COMMENT TEXT", "id": "f3753:c0:m54"}
{"signature": "def p_pkg_down_value_2(self, p):", "body": "p[<NUM_LIT:0>] = utils.SPDXNone()<EOL>", "docstring": "pkg_down_value : NONE", "id": "f3753:c0:m97"}
{"signature": "def p_creator_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "creator : CREATOR error", "id": "f3753:c0:m144"}
{"signature": "def p_extr_lic_text_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "extr_lic_text : LICS_TEXT error", "id": "f3753:c0:m15"}
{"signature": "def p_file_lic_info_value_2(self, p):", "body": "p[<NUM_LIT:0>] = utils.NoAssert()<EOL>", "docstring": "file_lic_info_value : NO_ASSERT", "id": "f3753:c0:m46"}
{"signature": "def p_pkg_down_value_3(self, p):", "body": "p[<NUM_LIT:0>] = utils.NoAssert()<EOL>", "docstring": "pkg_down_value : NO_ASSERT", "id": "f3753:c0:m98"}
{"signature": "def p_file_contrib_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.add_file_contribution(self.document, value)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "file_contrib : FILE_CONTRIB LINE", "id": "f3753:c0:m32"}
{"signature": "def p_pkg_chksum_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_pkg_chk_sum(self.document, value)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "pkg_chksum : PKG_CHKSUM CHKSUM", "id": "f3753:c0:m85"}
{"signature": "def p_doc_comment_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "doc_comment : DOC_COMMENT error", "id": "f3753:c0:m130"}
{"signature": "def p_pkg_verif_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "pkg_verif : PKG_VERF_CODE error", "id": "f3753:c0:m88"}
{"signature": "def p_pkg_verif_1(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_pkg_verif_code(self.document, value)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except SPDXValueError:<EOL><INDENT>self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL><DEDENT>", "docstring": "pkg_verif : PKG_VERF_CODE LINE", "id": "f3753:c0:m87"}
{"signature": "def p_spdx_id(self, p):", "body": "if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>if not self.builder.doc_spdx_id_set:<EOL><INDENT>self.builder.set_doc_spdx_id(self.document, value)<EOL><DEDENT>else:<EOL><INDENT>self.builder.set_file_spdx_id(self.document, value)<EOL><DEDENT>", "docstring": "spdx_id : SPDX_ID LINE", "id": "f3753:c0:m53"}
{"signature": "def p_pkg_file_name(self, p):", "body": "try:<EOL><INDENT>if six.PY2:<EOL><INDENT>value = p[<NUM_LIT:2>].decode(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>value = p[<NUM_LIT:2>]<EOL><DEDENT>self.builder.set_pkg_file_name(self.document, value)<EOL><DEDENT>except OrderError:<EOL><INDENT>self.order_error('<STR_LIT>', '<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>except CardinalityError:<EOL><INDENT>self.more_than_one_error('<STR_LIT>', p.lineno(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "pkg_file_name : PKG_FILE_NAME LINE", "id": "f3753:c0:m105"}
{"signature": "def p_pkg_supplier_2(self, p):", "body": "self.error = True<EOL>msg = ERROR_MESSAGES['<STR_LIT>'].format(p.lineno(<NUM_LIT:1>))<EOL>self.logger.log(msg)<EOL>", "docstring": "pkg_supplier : PKG_SUPPL error", "id": "f3753:c0:m102"}
{"signature": "def has_package(self, doc):", "body": "return doc.package is not None<EOL>", "docstring": "Returns true if the document has a package.", "id": "f3754:c7:m16"}
{"signature": "def set_doc_data_lics(self, doc, lics):", "body": "if not self.doc_data_lics_set:<EOL><INDENT>self.doc_data_lics_set = True<EOL>if validations.validate_data_lics(lics):<EOL><INDENT>doc.data_license = document.License.from_identifier(lics)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the document data license.\n        Raises value error if malformed value, CardinalityError\n        if already defined.", "id": "f3754:c0:m2"}
{"signature": "def add_lic_xref(self, doc, ref):", "body": "if self.has_extr_lic(doc):<EOL><INDENT>self.extr_lic(doc).add_xref(ref)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Adds a license cross reference.\n        Raises OrderError if no License ID defined.", "id": "f3754:c8:m7"}
{"signature": "def add_review_date(self, doc, reviewed):", "body": "if len(doc.reviews) != <NUM_LIT:0>:<EOL><INDENT>if not self.review_date_set:<EOL><INDENT>self.review_date_set = True<EOL>date = utils.datetime_from_iso_format(reviewed)<EOL>if date is not None:<EOL><INDENT>doc.reviews[-<NUM_LIT:1>].review_date = date<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the review date. Raises CardinalityError if\n        already set. OrderError if no reviewer defined before.\n        Raises SPDXValueError if invalid reviewed value.", "id": "f3754:c4:m3"}
{"signature": "def set_pkg_file_name(self, doc, name):", "body": "self.assert_package_exists()<EOL>if not self.package_file_name_set:<EOL><INDENT>self.package_file_name_set = True<EOL>doc.package.file_name = name<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the package file name, if not already set.\n        name - Any string.\n        Raises CardinalityError if already has a file_name.\n        Raises OrderError if no pacakge previously defined.", "id": "f3754:c6:m4"}
{"signature": "def set_file_spdx_id(self, doc, spdx_id):", "body": "if self.has_package(doc) and self.has_file(doc):<EOL><INDENT>if not self.file_spdx_id_set:<EOL><INDENT>self.file_spdx_id_set = True<EOL>if validations.validate_file_spdx_id(spdx_id):<EOL><INDENT>self.file(doc).spdx_id = spdx_id<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the file SPDX Identifier.\nRaises OrderError if no package or no file defined.\nRaises SPDXValueError if malformed value.\nRaises CardinalityError if more than one spdx_id set.", "id": "f3754:c7:m2"}
{"signature": "def build_org(self, doc, entity):", "body": "match = self.org_re.match(entity)<EOL>if match and validations.validate_org_name(match.group(self.ORG_NAME_GROUP)):<EOL><INDENT>name = match.group(self.ORG_NAME_GROUP).strip()<EOL>email = match.group(self.ORG_EMAIL_GROUP)<EOL>if (email is not None) and (len(email) != <NUM_LIT:0>):<EOL><INDENT>return creationinfo.Organization(name=name, email=email.strip())<EOL><DEDENT>else:<EOL><INDENT>return creationinfo.Organization(name=name, email=None)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Builds an organization object of of a string representation.\n        Returns built organization. Raises SPDXValueError if failed to extract\n        name.", "id": "f3754:c2:m1"}
{"signature": "def set_lic_name(self, doc, name):", "body": "if self.has_extr_lic(doc):<EOL><INDENT>if not self.extr_lic_name_set:<EOL><INDENT>self.extr_lic_name_set = True<EOL>if validations.validate_extr_lic_name(name):<EOL><INDENT>self.extr_lic(doc).full_name = name<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets license name.\n        Raises SPDXValueError if name is not str or utils.NoAssert\n        Raises OrderError if no license id defined.", "id": "f3754:c8:m5"}
{"signature": "def reset(self):", "body": "<EOL>self.reset_creation_info()<EOL>self.reset_document()<EOL>self.reset_package()<EOL>self.reset_file_stat()<EOL>self.reset_reviews()<EOL>self.reset_annotations()<EOL>self.reset_extr_lics()<EOL>", "docstring": "Resets builder's state for building new documents.\n        Must be called between usage with different documents.", "id": "f3754:c9:m1"}
{"signature": "def set_chksum(self, doc, chksum):", "body": "doc.ext_document_references[-<NUM_LIT:1>].check_sum = checksum_from_sha1(<EOL>chksum)<EOL>", "docstring": "Sets the `check_sum` attribute of the `ExternalDocumentRef`\nobject.", "id": "f3754:c1:m2"}
{"signature": "def set_doc_namespace(self, doc, namespace):", "body": "if not self.doc_namespace_set:<EOL><INDENT>self.doc_namespace_set = True<EOL>if validations.validate_doc_namespace(namespace):<EOL><INDENT>doc.namespace = namespace<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the document namespace.\n        Raise SPDXValueError if malformed value, CardinalityError\n        if already defined.", "id": "f3754:c0:m6"}
{"signature": "def set_pkg_source_info(self, doc, text):", "body": "self.assert_package_exists()<EOL>if not self.package_source_info_set:<EOL><INDENT>self.package_source_info_set = True<EOL>if validations.validate_pkg_src_info(text):<EOL><INDENT>doc.package.source_info = str_from_text(text)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the package's source information, if not already set.\n        text - Free form text.\n        Raises CardinalityError if already defined.\n        Raises OrderError if no package previously defined.\n        SPDXValueError if text is not free form text.", "id": "f3754:c6:m11"}
{"signature": "def set_file_copyright(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):<EOL><INDENT>if not self.file_copytext_set:<EOL><INDENT>self.file_copytext_set = True<EOL>if validations.validate_file_cpyright(text):<EOL><INDENT>if isinstance(text, string_types):<EOL><INDENT>self.file(doc).copyright = str_from_text(text)<EOL><DEDENT>else:<EOL><INDENT>self.file(doc).copyright = text  <EOL><DEDENT>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Raises OrderError if no package or file defined.\n        Raises SPDXValueError if not free form text or NONE or NO_ASSERT.\n        Raises CardinalityError if more than one.", "id": "f3754:c7:m9"}
{"signature": "def set_pkg_summary(self, doc, text):", "body": "self.assert_package_exists()<EOL>if not self.package_summary_set:<EOL><INDENT>self.package_summary_set = True<EOL>if validations.validate_pkg_summary(text):<EOL><INDENT>doc.package.summary = str_from_text(text)<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Set's the package summary.\n        Raises SPDXValueError if text is not free form text.\n        Raises CardinalityError if summary already set.\n        Raises OrderError if no package previously defined.", "id": "f3754:c6:m17"}
{"signature": "def add_reviewer(self, doc, reviewer):", "body": "<EOL>self.reset_reviews()<EOL>if validations.validate_reviewer(reviewer):<EOL><INDENT>doc.add_review(review.Review(reviewer=reviewer))<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Adds a reviewer to the SPDX Document.\n        Reviwer is an entity created by an EntityBuilder.\n        Raises SPDXValueError if not a valid reviewer type.", "id": "f3754:c4:m2"}
{"signature": "def set_annotation_spdx_id(self, doc, spdx_id):", "body": "if len(doc.annotations) != <NUM_LIT:0>:<EOL><INDENT>if not self.annotation_spdx_id_set:<EOL><INDENT>self.annotation_spdx_id_set = True<EOL>doc.annotations[-<NUM_LIT:1>].spdx_id = spdx_id<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the annotation SPDX Identifier.\n        Raises CardinalityError if already set. OrderError if no annotator\n        defined before.", "id": "f3754:c5:m6"}
{"signature": "def checksum_from_sha1(value):", "body": "<EOL>CHECKSUM_RE = re.compile('<STR_LIT>', re.UNICODE)<EOL>match = CHECKSUM_RE.match(value)<EOL>if match:<EOL><INDENT>return checksum.Algorithm(identifier='<STR_LIT>', value=match.group(<NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Return an spdx.checksum.Algorithm instance representing the SHA1\nchecksum or None if does not match CHECKSUM_RE.", "id": "f3754:m0"}
{"signature": "def set_file_comment(self, doc, text):", "body": "if self.has_package(doc) and self.has_file(doc):<EOL><INDENT>if not self.file_comment_set:<EOL><INDENT>self.file_comment_set = True<EOL>if validations.validate_file_comment(text):<EOL><INDENT>self.file(doc).comment = str_from_text(text)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Raises OrderError if no package or no file defined.\nRaises CardinalityError if more than one comment set.\nRaises SPDXValueError if text is not free form text.", "id": "f3754:c7:m3"}
{"signature": "def reset_reviews(self):", "body": "<EOL>self.review_date_set = False<EOL>self.review_comment_set = False<EOL>", "docstring": "Resets the builder's state to allow building new reviews.", "id": "f3754:c4:m1"}
{"signature": "def extr_lic(self, doc):", "body": "return doc.extracted_licenses[-<NUM_LIT:1>]<EOL>", "docstring": "Retrieves last license in extracted license list", "id": "f3754:c8:m1"}
{"signature": "def set_created_date(self, doc, created):", "body": "if not self.created_date_set:<EOL><INDENT>self.created_date_set = True<EOL>date = utils.datetime_from_iso_format(created)<EOL>if date is not None:<EOL><INDENT>doc.creation_info.created = date<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets created date, Raises CardinalityError if\n        created date already set.\n        Raises SPDXValueError if created is not a date.", "id": "f3754:c3:m2"}
{"signature": "def set_file_license_in_file(self, doc, lic):", "body": "if self.has_package(doc) and self.has_file(doc):<EOL><INDENT>if validations.validate_file_lics_in_file(lic):<EOL><INDENT>self.file(doc).add_lics(lic)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Raises OrderError if no package or file defined.\nRaises SPDXValueError if malformed value.", "id": "f3754:c7:m7"}
{"signature": "def add_file_dep(self, doc, value):", "body": "if self.has_package(doc) and self.has_file(doc):<EOL><INDENT>self.file(doc).add_depend(value)<EOL><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Raises OrderError if no package or file defined.", "id": "f3754:c7:m12"}
{"signature": "def add_annotation_type(self, doc, annotation_type):", "body": "if len(doc.annotations) != <NUM_LIT:0>:<EOL><INDENT>if not self.annotation_type_set:<EOL><INDENT>self.annotation_type_set = True<EOL>if validations.validate_annotation_type(annotation_type):<EOL><INDENT>doc.annotations[-<NUM_LIT:1>].annotation_type = annotation_type<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the annotation type. Raises CardinalityError if\n        already set. OrderError if no annotator defined before.\n        Raises SPDXValueError if invalid value.", "id": "f3754:c5:m5"}
{"signature": "def build_person(self, doc, entity):", "body": "match = self.person_re.match(entity)<EOL>if match and validations.validate_person_name(match.group(self.PERSON_NAME_GROUP)):<EOL><INDENT>name = match.group(self.PERSON_NAME_GROUP).strip()<EOL>email = match.group(self.PERSON_EMAIL_GROUP)<EOL>if (email is not None) and (len(email) != <NUM_LIT:0>):<EOL><INDENT>return creationinfo.Person(name=name, email=email.strip())<EOL><DEDENT>else:<EOL><INDENT>return creationinfo.Person(name=name, email=None)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Builds an organization object of of a string representation.\n        Returns built organization. Raises SPDXValueError if failed to extract\n        name.", "id": "f3754:c2:m2"}
{"signature": "def set_doc_comment(self, doc, comment):", "body": "if not self.doc_comment_set:<EOL><INDENT>self.doc_comment_set = True<EOL>if validations.validate_doc_comment(comment):<EOL><INDENT>doc.comment = str_from_text(comment)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets document comment, Raises CardinalityError if\n        comment already set.\n        Raises SPDXValueError if comment is not free form text.", "id": "f3754:c0:m5"}
{"signature": "def reset_annotations(self):", "body": "<EOL>self.annotation_date_set = False<EOL>self.annotation_comment_set = False<EOL>self.annotation_type_set = False<EOL>self.annotation_spdx_id_set = False<EOL>", "docstring": "Resets the builder's state to allow building new annotations.", "id": "f3754:c5:m1"}
{"signature": "def set_pkg_license_declared(self, doc, lic):", "body": "self.assert_package_exists()<EOL>if not self.package_license_declared_set:<EOL><INDENT>self.package_license_declared_set = True<EOL>if validations.validate_lics_conc(lic):<EOL><INDENT>doc.package.license_declared = lic<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the package's declared license.\n        Raises SPDXValueError if data malformed.\n        Raises OrderError if no package previously defined.\n        Raises CardinalityError if already set.", "id": "f3754:c6:m14"}
{"signature": "def set_doc_name(self, doc, name):", "body": "if not self.doc_name_set:<EOL><INDENT>doc.name = name<EOL>self.doc_name_set = True<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the document name.\n        Raises CardinalityError if already defined.", "id": "f3754:c0:m3"}
{"signature": "def set_pkg_chk_sum(self, doc, chk_sum):", "body": "self.assert_package_exists()<EOL>if not self.package_chk_sum_set:<EOL><INDENT>self.package_chk_sum_set = True<EOL>doc.package.check_sum = checksum_from_sha1(chk_sum)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the package check sum, if not already set.\n        chk_sum - A string\n        Raises CardinalityError if already defined.\n        Raises OrderError if no package previously defined.", "id": "f3754:c6:m10"}
{"signature": "def add_review_comment(self, doc, comment):", "body": "if len(doc.reviews) != <NUM_LIT:0>:<EOL><INDENT>if not self.review_comment_set:<EOL><INDENT>self.review_comment_set = True<EOL>if validations.validate_review_comment(comment):<EOL><INDENT>doc.reviews[-<NUM_LIT:1>].comment = str_from_text(comment)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise SPDXValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CardinalityError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OrderError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets the review comment. Raises CardinalityError if\n        already set. OrderError if no reviewer defined before.\n        Raises SPDXValueError if comment is not free form text.", "id": "f3754:c4:m4"}
{"signature": "def t_text(self, t):", "body": "t.lexer.text_start = t.lexer.lexpos - len('<STR_LIT>')<EOL>t.lexer.begin('<STR_LIT:text>')<EOL>", "docstring": "r':\\s*<text>", "id": "f3755:c0:m0"}
{"signature": "def t_newline(self, t):", "body": "t.lexer.lineno += len(t.value)<EOL>", "docstring": "r'\\n+", "id": "f3755:c0:m15"}
{"signature": "def t_PERSON_VALUE(self, t):", "body": "t.value = t.value[<NUM_LIT:1>:].strip()<EOL>return t<EOL>", "docstring": "r':\\s*Person:.+", "id": "f3755:c0:m10"}
{"signature": "def t_DOC_URI(self, t):", "body": "t.value = t.value.strip()<EOL>return t<EOL>", "docstring": "r'\\s*((ht|f)tps?:\\/\\/\\S*)", "id": "f3755:c0:m6"}
{"signature": "def t_comment(self, t):", "body": "pass<EOL>", "docstring": "r'\\#.*", "id": "f3755:c0:m14"}
{"signature": "def t_LINE_OR_KEYWORD_VALUE(self, t):", "body": "t.value = t.value[<NUM_LIT:1>:].strip()<EOL>if t.value in self.reserved.keys():<EOL><INDENT>t.type = self.reserved[t.value]<EOL><DEDENT>else:<EOL><INDENT>t.type = '<STR_LIT>'<EOL><DEDENT>return t<EOL>", "docstring": "r':.+", "id": "f3755:c0:m13"}
{"signature": "def build(self, **kwargs):", "body": "self.yacc = yacc.yacc(module=self, **kwargs)<EOL>", "docstring": "Must be called before parse.", "id": "f3756:c4:m8"}
{"signature": "def t_LP(self, t):", "body": "return t<EOL>", "docstring": "r'\\(", "id": "f3756:c3:m0"}
{"signature": "def parse(self, data):", "body": "try:<EOL><INDENT>return self.yacc.parse(data, lexer=self.lex)<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Parses a license list and returns a License or None if it failed.", "id": "f3756:c4:m9"}
{"signature": "def t_whitespace(self, t):", "body": "pass<EOL>", "docstring": "r'\\s+", "id": "f3756:c3:m4"}
{"signature": "def t_AND(self, t):", "body": "t.value = t.value.strip()<EOL>return t<EOL>", "docstring": "r'\\s(and|AND)\\s", "id": "f3756:c3:m2"}
{"signature": "def p_conjunction_1(self, p):", "body": "p[<NUM_LIT:0>] = document.LicenseConjunction(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL>", "docstring": "conjunction : conjunction AND license_atom", "id": "f3756:c4:m3"}
{"signature": "def p_disjunction_1(self, p):", "body": "p[<NUM_LIT:0>] = document.LicenseDisjunction(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL>", "docstring": "disjunction : disjunction OR conjunction", "id": "f3756:c4:m1"}
{"signature": "def p_conjunction_2(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "conjunction : license_atom", "id": "f3756:c4:m4"}
{"signature": "def t_LICENSE(self, t):", "body": "t.value = t.value.strip()<EOL>return t<EOL>", "docstring": "r'[A-Za-z.0-9\\-+]+", "id": "f3756:c3:m5"}
{"signature": "def datetime_from_iso_format(string):", "body": "match = DATE_ISO_REGEX.match(string)<EOL>if match:<EOL><INDENT>date = datetime.datetime(year=int(match.group(DATE_ISO_YEAR_GRP)),<EOL>month=int(match.group(DATE_ISO_MONTH_GRP)),<EOL>day=int(match.group(DATE_ISO_DAY_GRP)),<EOL>hour=int(match.group(DATE_ISO_HOUR_GRP)),<EOL>second=int(match.group(DATE_ISO_SEC_GRP)),<EOL>minute=int(match.group(DATE_ISO_MIN_GRP)))<EOL>return date<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Return a datetime object from an iso 8601 representation.\nReturn None if string is non conforming.", "id": "f3756:m1"}
{"signature": "def token(self):", "body": "return self.lexer.token()<EOL>", "docstring": "Get the next token or None if exhausted input.", "id": "f3756:c3:m8"}
{"signature": "def p_license_atom_1(self, p):", "body": "p[<NUM_LIT:0>] = document.License.from_identifier(p[<NUM_LIT:1>])<EOL>", "docstring": "license_atom : LICENSE", "id": "f3756:c4:m5"}
{"signature": "def validate(self, messages):", "body": "messages = self.validate_annotator(messages)<EOL>messages = self.validate_annotation_date(messages)<EOL>messages = self.validate_annotation_type(messages)<EOL>messages = self.validate_spdx_id(messages)<EOL>return messages<EOL>", "docstring": "Returns True if all the fields are valid.\n        Appends any error messages to messages parameter.", "id": "f3757:c0:m6"}
{"signature": "def get_temp_file(extension='<STR_LIT>'):", "body": "if extension and not extension.startswith('<STR_LIT:.>'):<EOL><INDENT>extension = '<STR_LIT:.>' + extension<EOL><DEDENT>file_name = '<STR_LIT>' + extension<EOL>temp_dir = tempfile.mkdtemp()<EOL>return os.path.join(temp_dir, file_name)<EOL>", "docstring": "Return a unique new temporary file location to a non-existing\ntemporary file that can safely be created without a risk of name\ncollision.", "id": "f3762:m0"}
{"signature": "def sort_nested(data):", "body": "if isinstance(data, dict):<EOL><INDENT>new_data = {}<EOL>for k, v in data.items():<EOL><INDENT>if isinstance(v, list):<EOL><INDENT>v = sorted(v)<EOL><DEDENT>if isinstance(v, dict):<EOL><INDENT>v = sort_nested(v)<EOL><DEDENT>new_data[k] = v<EOL><DEDENT>return new_data<EOL><DEDENT>elif isinstance(data, list):<EOL><INDENT>new_data = []<EOL>for v in sorted(data):<EOL><INDENT>if isinstance(v, list):<EOL><INDENT>v = sort_nested(v)<EOL><DEDENT>if isinstance(v, dict):<EOL><INDENT>v = sort_nested(v)<EOL><DEDENT>new_data.append(v)<EOL><DEDENT>return new_data<EOL><DEDENT>", "docstring": "Return a new dict with any nested list sorted recursively.", "id": "f3764:m4"}
{"signature": "def check_rdf_scan(expected_file, result_file, regen=False):", "body": "import json<EOL>result = load_and_clean_rdf(result_file)<EOL>if regen:<EOL><INDENT>expected = result<EOL>with codecs.open(expected_file, '<STR_LIT:w>', encoding='<STR_LIT:utf-8>') as o:<EOL><INDENT>json.dump(expected, o, indent=<NUM_LIT:2>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with codecs.open(expected_file, '<STR_LIT:r>', encoding='<STR_LIT:utf-8>') as i:<EOL><INDENT>expected = sort_nested(json.load(i))<EOL><DEDENT><DEDENT>assert expected == result<EOL>", "docstring": "Check that expected and result_file are equal.\nBoth are paths to SPDX RDF XML files, UTF-8 encoded.", "id": "f3764:m5"}
{"signature": "def load_and_clean_tv(location):", "body": "content = codecs.open(location, encoding='<STR_LIT:utf-8>').read()<EOL>content = [l for l in content.splitlines(False)<EOL>if l and l.strip() and not l.startswith(('<STR_LIT>', '<STR_LIT>',))]<EOL>return '<STR_LIT:\\n>'.join(content)<EOL>", "docstring": "Return a mapping for the SPDX TV file at location suitable for\ncomparison. The file content is cleaned from variable parts such as\ndates, generated UUIDs and versions", "id": "f3764:m6"}
{"signature": "def strip_variable_text(rdf_text):", "body": "replace_nid = re.compile('<STR_LIT>').sub<EOL>rdf_text = replace_nid('<STR_LIT>', rdf_text)<EOL>replace_creation = re.compile('<STR_LIT>', re.DOTALL).sub<EOL>rdf_text = replace_creation('<STR_LIT>', rdf_text)<EOL>replace_pcc = re.compile('<STR_LIT>', re.DOTALL).sub<EOL>rdf_text = replace_pcc('<STR_LIT>', rdf_text)<EOL>return rdf_text<EOL>", "docstring": "Return rdf_text stripped from variable parts such as rdf nodeids", "id": "f3764:m2"}
{"signature": "def load_and_clean_rdf(location):", "body": "content = codecs.open(location, encoding='<STR_LIT:utf-8>').read()<EOL>content = strip_variable_text(content)<EOL>data = xmltodict.parse(content, dict_constructor=dict)<EOL>return sort_nested(data)<EOL>", "docstring": "Return plain Python nested data for the SPDX RDF file at location\nsuitable for comparison. The file content is cleaned from variable\nparts such as dates, generated UUIDs and versions\n\nNOTE: we use plain dicts to avoid ordering issues in XML. the SPDX\ntool and lxml do not seem to return a consistent ordering that is\nneeded for tests.", "id": "f3764:m3"}
{"signature": "def __init__(self, default_args):", "body": "self._default_args = default_args<EOL>", "docstring": ":param default_args: default arguments\n:type default_args: string or list of string", "id": "f3777:c0:m0"}
{"signature": "@property<EOL><INDENT>def timeout(self):<DEDENT>", "body": "return self._timeout<EOL>", "docstring": ":return: seconds to wait I/O.\n:rtype: float", "id": "f3780:c0:m20"}
{"signature": "def send_keys(self, keys, wait=True):", "body": "self._process.stdin.write(bytearray(keys, self._encoding))<EOL>self._process.stdin.flush()<EOL>if wait:<EOL><INDENT>self.wait()<EOL><DEDENT>", "docstring": "Send a raw key sequence to *Vim*.\n\n.. note:: *Vim* style key sequence notation (like ``<Esc>``)\n          is not recognized.\n          Use escaped characters (like ``'\\033'``) instead.\n\nExample:\n\n>>> import headlessvim\n>>> with headlessvim.open() as vim:\n...     vim.send_keys('ispam\\033')\n...     str(vim.display_lines()[0].strip())\n...\n'spam'\n\n:param strgin keys: key sequence to send\n:param boolean wait: whether if wait a response", "id": "f3780:c0:m9"}
{"signature": "def close(self):", "body": "self._tempfile.close()<EOL>self._process.terminate()<EOL>if self._process.is_alive():<EOL><INDENT>self._process.kill()<EOL><DEDENT>", "docstring": "Disconnect and close *Vim*.", "id": "f3780:c0:m5"}
{"signature": "def install_plugin(self, dir, entry_script=None):", "body": "self.runtimepath.append(dir)<EOL>if entry_script is not None:<EOL><INDENT>self.command('<STR_LIT>'.format(entry_script), False)<EOL><DEDENT>", "docstring": "Install *Vim* plugin.\n\n:param string dir: the root directory contains *Vim* script\n:param string entry_script: path to the initializing script", "id": "f3780:c0:m11"}
{"signature": "def wait(self, timeout=None):", "body": "if timeout is None:<EOL><INDENT>timeout = self._timeout<EOL><DEDENT>while self._process.check_readable(timeout):<EOL><INDENT>self._flush()<EOL><DEDENT>", "docstring": "Wait for response until timeout.\nIf timeout is specified to None, ``self.timeout`` is used.\n\n:param float timeout: seconds to wait I/O", "id": "f3780:c0:m10"}
{"signature": "def display_lines(self):", "body": "return self._screen.display<EOL>", "docstring": "Shows the terminal screen splitted by newlines.\n\nAlmost equals to ``self.display().splitlines()``\n\n:return: screen as a list of strings\n:rtype: list of string", "id": "f3780:c0:m8"}
{"signature": "@property<EOL><INDENT>def encoding(self):<DEDENT>", "body": "return self._encoding<EOL>", "docstring": ":return: internal encoding of *Vim*.\n:rtype: string", "id": "f3780:c0:m17"}
{"signature": "@property<EOL><INDENT>def executable(self):<DEDENT>", "body": "return self._process.executable<EOL>", "docstring": ":return: the absolute path to the process.\n:rtype: string", "id": "f3780:c0:m15"}
{"signature": "@property<EOL><INDENT>def stdin(self):<DEDENT>", "body": "return self._stdin<EOL>", "docstring": ":return: file-like object representing the standard input\n         of the process\n:rtype: flie-like object", "id": "f3781:c0:m7"}
{"signature": "@property<EOL><INDENT>def executable(self):<DEDENT>", "body": "return self._executable<EOL>", "docstring": ":return: the absolute path to the process.\n:rtype: strIng", "id": "f3781:c0:m5"}
{"signature": "def terminate(self):", "body": "with self._close():<EOL><INDENT>self._process.terminate()<EOL><DEDENT>", "docstring": "Terminate this process.\nUse this method rather than ``self.kill``.", "id": "f3781:c0:m1"}
{"signature": "def is_alive(self):", "body": "return self._process.poll() is None<EOL>", "docstring": "Check if the process is alive.\n\n:return: True if the process is alive, else False\n:rtype: boolean", "id": "f3781:c0:m4"}
{"signature": "@property<EOL><INDENT>def args(self):<DEDENT>", "body": "return self._args<EOL>", "docstring": ":return: launch arguments of the process.\n:rtype: string or list of string", "id": "f3781:c0:m6"}
{"signature": "@property<EOL><INDENT>def stdout(self):<DEDENT>", "body": "return self._stdout<EOL>", "docstring": ":return: non blocking file-like object\n         representing the standard output of the process\n:rtype: file-like object", "id": "f3781:c0:m8"}
{"signature": "def __init__(self, executable, args, env):", "body": "self._executable = distutils.spawn.find_executable(executable)<EOL>self._args = args<EOL>self._env = env<EOL>self._open_process()<EOL>", "docstring": ":param str executable: command name to execute *Vim*\n:param args: arguments to execute *Vim*\n:type args: None or string or list of string\n:param env: environment variables to execute *Vim*\n:type env: None or dict of (string, string)", "id": "f3781:c0:m0"}
{"signature": "def run(self):", "body": "run_once = True<EOL>while (run_once or self._threaded) and self.end is False:<EOL><INDENT>self.service_tx_queue()<EOL>self.parse_messages()<EOL>run_once = False<EOL>if self._threaded:<EOL><INDENT>time.sleep(self._timeout)<EOL><DEDENT><DEDENT>if self._threaded:<EOL><INDENT>logger.info('<STR_LIT>')<EOL><DEDENT>", "docstring": "Receives the serial data into the self._raw buffer\n:return:", "id": "f3786:c0:m23"}
{"signature": "def _remove_esc_chars(self, raw_message):", "body": "message = []<EOL>escape_next = False<EOL>for c in raw_message:<EOL><INDENT>if escape_next:<EOL><INDENT>message.append(c ^ self._ESC_XOR)<EOL>escape_next = False<EOL><DEDENT>else:<EOL><INDENT>if c == self._ESC:<EOL><INDENT>escape_next = True<EOL><DEDENT>else:<EOL><INDENT>message.append(c)<EOL><DEDENT><DEDENT><DEDENT>return message<EOL>", "docstring": "Removes any escape characters from the message\n:param raw_message: a list of bytes containing the un-processed data\n:return: a message that has the escaped characters appropriately un-escaped", "id": "f3788:c0:m6"}
{"signature": "def rx(self):", "body": "if not self._threaded:<EOL><INDENT>self.run()<EOL><DEDENT>try:<EOL><INDENT>return tuple(self._messages.pop(<NUM_LIT:0>))<EOL><DEDENT>except IndexError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Receive a series of bytes that have been verified\n:return: a series of bytes as a tuple or None if empty", "id": "f3788:c0:m2"}
{"signature": "def run(self):", "body": "run_once = True<EOL>while run_once or self._threaded:<EOL><INDENT>waiting = self._port.in_waiting<EOL>if waiting > <NUM_LIT:0>:<EOL><INDENT>temp = [int(c) for c in self._port.read(waiting)]<EOL>self._raw += temp<EOL><DEDENT>self._parse_raw_data()<EOL>run_once = False<EOL>if self._threaded:<EOL><INDENT>time.sleep(self._timeout)<EOL><DEDENT><DEDENT>", "docstring": "Receives the serial data into the self._raw buffer\n:return:", "id": "f3788:c0:m7"}
{"signature": "def _parse_raw_data(self):", "body": "if self._START_OF_FRAME in self._raw and self._END_OF_FRAME in self._raw:<EOL><INDENT>while self._raw[<NUM_LIT:0>] != self._START_OF_FRAME and len(self._raw) > <NUM_LIT:0>:<EOL><INDENT>self._raw.pop(<NUM_LIT:0>)<EOL><DEDENT>if self._raw[<NUM_LIT:0>] == self._START_OF_FRAME:<EOL><INDENT>self._raw.pop(<NUM_LIT:0>)<EOL><DEDENT>eof_index = self._raw.index(self._END_OF_FRAME)<EOL>raw_message = self._raw[:eof_index]<EOL>self._raw = self._raw[eof_index:]<EOL>logger.debug('<STR_LIT>'.format(raw_message))<EOL>message = self._remove_esc_chars(raw_message)<EOL>logger.debug('<STR_LIT>'.format(message))<EOL>expected_checksum = (message[-<NUM_LIT:1>] << <NUM_LIT:8>) | message[-<NUM_LIT:2>]<EOL>logger.debug('<STR_LIT>'.format(expected_checksum))<EOL>message = message[:-<NUM_LIT:2>]  <EOL>logger.debug('<STR_LIT>'.format(message))<EOL>sum1, sum2 = self._fletcher16_checksum(message)<EOL>calculated_checksum = (sum2 << <NUM_LIT:8>) | sum1<EOL>if expected_checksum == calculated_checksum:<EOL><INDENT>message = message[<NUM_LIT:2>:]  <EOL>logger.debug('<STR_LIT>'.format(message))<EOL>self._messages.append(message)<EOL><DEDENT>else:<EOL><INDENT>logger.warning('<STR_LIT>'.format(message))<EOL>logger.debug('<STR_LIT>'.format(expected_checksum, calculated_checksum))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>while self._raw[<NUM_LIT:0>] != self._START_OF_FRAME and len(self._raw) > <NUM_LIT:0>:<EOL><INDENT>self._raw.pop(<NUM_LIT:0>)<EOL><DEDENT><DEDENT>except IndexError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Parses the incoming data and determines if it is valid.  Valid\ndata gets placed into self._messages\n:return: None", "id": "f3788:c0:m4"}
{"signature": "def convert_to_float(value):", "body": "try:<EOL><INDENT>ret_val = float(value)<EOL>return ret_val, True<EOL><DEDENT>except ValueError:<EOL><INDENT>return <NUM_LIT:0.0>, False<EOL><DEDENT>", "docstring": "Convert a string to FLOAT", "id": "f3794:m5"}
{"signature": "def convert_words_to_uint(high_word, low_word):", "body": "try:<EOL><INDENT>low_num = int(low_word)<EOL>if low_num < <NUM_LIT:0>:<EOL><INDENT>low_num = abs(low_num) + <NUM_LIT:2>**<NUM_LIT:15><EOL><DEDENT>number = (int(high_word) << <NUM_LIT:16>) | low_num<EOL>return number, True<EOL><DEDENT>except:<EOL><INDENT>return <NUM_LIT:0>, False<EOL><DEDENT>", "docstring": "Convert two words to a floating point", "id": "f3794:m7"}
{"signature": "def check_pid(pid, debug):", "body": "try:<EOL><INDENT>os.kill(pid, <NUM_LIT:0>)<EOL>if debug > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return True<EOL><DEDENT>except OSError:<EOL><INDENT>if debug > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return False<EOL><DEDENT>", "docstring": "This function will check whether a PID is currently running", "id": "f3794:m2"}
{"signature": "def convert_int32(high_word, low_word):", "body": "return convert_words_to_uint(high_word, low_word)<EOL>", "docstring": "Convert two words to a 32 bit unsigned integer", "id": "f3794:m6"}
{"signature": "def check_pidfile(pidfile, debug):", "body": "<EOL>if os.path.isfile(pidfile):<EOL><INDENT>pidfile_handle = open(pidfile, '<STR_LIT:r>')<EOL>try:<EOL><INDENT>pid = int(pidfile_handle.read())<EOL>pidfile_handle.close()<EOL>if check_pid(pid, debug):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>os.unlink(pidfile)<EOL><DEDENT>pid = str(os.getpid())<EOL>open(pidfile, '<STR_LIT:w>').write(pid)<EOL>return False<EOL>", "docstring": "Check that a process is not running more than once, using PIDFILE", "id": "f3794:m1"}
{"signature": "def run_program(prog_list, debug, shell):", "body": "try:<EOL><INDENT>if not shell:<EOL><INDENT>process = Popen(prog_list, stdout=PIPE, stderr=PIPE)<EOL>stdout, stderr = process.communicate()<EOL>retcode = process.returncode<EOL>if debug >= <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\", \"<STR_LIT:U+0020>\".join(prog_list))<EOL>print(\"<STR_LIT>\", retcode)<EOL>print(\"<STR_LIT>\", stdout)<EOL>print(\"<STR_LIT>\", stderr)<EOL><DEDENT>return bool(retcode)<EOL><DEDENT>else:<EOL><INDENT>command = \"<STR_LIT:U+0020>\".join(prog_list)<EOL>os.system(command)<EOL>return True<EOL><DEDENT><DEDENT>except:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Run a  program and check program return code Note that some commands don't work\n    well with Popen.  So if this function is specifically called with 'shell=True',\n    then it will run the old 'os.system'. In which case, there is no program output", "id": "f3794:m10"}
{"signature": "def contains(self, logger, level, message, is_regex=False):", "body": "for record in self.records:<EOL><INDENT>if record.name != logger or record.levelno != level:<EOL><INDENT>continue<EOL><DEDENT>if is_regex:<EOL><INDENT>if re.search(message, (record.msg % record.args)):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if message in (record.msg % record.args):<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Checks whether a message has been logged to a specific logger with a\nspecific level.\n\n:param logger: The logger.\n:param level: The log level.\n:param messgae: The message contents.\n:param is_regex: Whether the expected message is a regex or not.\n    Non-regex messages are simply tested for inclusion.", "id": "f3795:c0:m4"}
{"signature": "def emit(self, record):", "body": "self.records.append(record)<EOL>", "docstring": "Overrides :py:meth:`logging.Handler.emit`.", "id": "f3795:c0:m1"}
{"signature": "@contextmanager<EOL>def environment(**kwargs):", "body": "old_values = {}<EOL>nonexistent = set()<EOL>for key in kwargs:<EOL><INDENT>if key not in os.environ:<EOL><INDENT>nonexistent.add(key)<EOL><DEDENT>else:<EOL><INDENT>old_values[key] = os.environ[key]<EOL><DEDENT>os.environ[key] = kwargs[key]<EOL><DEDENT>try:<EOL><INDENT>yield<EOL><DEDENT>finally:<EOL><INDENT>for key in old_values:<EOL><INDENT>os.environ[key] = old_values[key]<EOL><DEDENT>for key in nonexistent:<EOL><INDENT>os.environ.pop(key)<EOL><DEDENT><DEDENT>", "docstring": "Context manager to tempolrarily change environment variables. On exit all\nvariables are set to their original value.", "id": "f3795:m0"}
{"signature": "def check_file(self, filename):<EOL>", "body": "can_read = super(SecuredConfig, self).check_file(filename)<EOL>if not can_read:<EOL><INDENT>return False<EOL><DEDENT>mode = get_stat(filename).st_mode<EOL>if (mode & stat.S_IRGRP) or (mode & stat.S_IROTH):<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>self._log.warning(msg, filename)<EOL>return False<EOL><DEDENT>return True<EOL>", "docstring": "Overrides :py:meth:`.Config.check_file`", "id": "f3801:c1:m0"}
{"signature": "def load(self, reload=False, require_load=False):<EOL>", "body": "if reload:  <EOL><INDENT>self.config = None<EOL><DEDENT>if self.config:  <EOL><INDENT>self._log.debug('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return<EOL><DEDENT>path = self._effective_path()<EOL>config_filename = self._effective_filename()<EOL>self._active_path = [join(_, config_filename) for _ in path]<EOL>for dirname in path:<EOL><INDENT>conf_name = join(dirname, config_filename)<EOL>readable = self.check_file(conf_name)<EOL>if readable:<EOL><INDENT>action = '<STR_LIT>' if self._loaded_files else '<STR_LIT>'<EOL>self._log.info('<STR_LIT>', action, conf_name)<EOL>self.read(conf_name)<EOL>if conf_name == expanduser(\"<STR_LIT>\" % (<EOL>self.group_name, self.app_name, self.filename)):<EOL><INDENT>self._log.warning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\", expanduser(\"<STR_LIT>\"), self.group_name,<EOL>self.app_name, expanduser(\"<STR_LIT>\"), self.group_name,<EOL>self.app_name)<EOL><DEDENT>self._loaded_files.append(conf_name)<EOL><DEDENT><DEDENT>if not self._loaded_files and not require_load:<EOL><INDENT>self._log.warning(<EOL>\"<STR_LIT>\",<EOL>config_filename,<EOL>path)<EOL><DEDENT>elif not self._loaded_files and require_load:<EOL><INDENT>raise IOError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (config_filename, path))<EOL><DEDENT>", "docstring": "Searches for an appropriate config file. If found, loads the file into\nthe current instance. This method can also be used to reload a\nconfiguration. Note that you may want to set ``reload`` to ``True`` to\nclear the configuration before loading in that case.  Without doing\nthat, values will remain available even if they have been removed from\nthe config files.\n\n:param reload: if set to ``True``, the existing values are cleared\n               before reloading.\n:param require_load: If set to ``True`` this will raise a\n                     :py:exc:`IOError` if no config file has been found\n                     to load.", "id": "f3801:c0:m9"}
{"signature": "def get_xdg_dirs(self):<EOL>", "body": "config_dirs = getenv('<STR_LIT>', '<STR_LIT>')<EOL>if config_dirs:<EOL><INDENT>self._log.debug('<STR_LIT>', config_dirs)<EOL>output = []<EOL>for path in reversed(config_dirs.split('<STR_LIT::>')):<EOL><INDENT>output.append(join(path, self.group_name, self.app_name))<EOL><DEDENT>return output<EOL><DEDENT>return ['<STR_LIT>' % (self.group_name, self.app_name)]<EOL>", "docstring": "Returns a list of paths specified by the XDG_CONFIG_DIRS environment\nvariable or the appropriate default.\n\nThe list is sorted by precedence, with the most important item coming\n*last* (required by the existing config_resolver logic).", "id": "f3801:c0:m3"}
{"signature": "def get_xdg_home(self):<EOL>", "body": "config_home = getenv('<STR_LIT>', '<STR_LIT>')<EOL>if config_home:<EOL><INDENT>self._log.debug('<STR_LIT>', config_home)<EOL>return expanduser(join(config_home, self.group_name, self.app_name))<EOL><DEDENT>return expanduser('<STR_LIT>' % (self.group_name, self.app_name))<EOL>", "docstring": "Returns the value specified in the XDG_CONFIG_HOME environment variable\nor the appropriate default.", "id": "f3801:c0:m4"}
{"signature": "def _effective_path(self):<EOL>", "body": "<EOL>path = (['<STR_LIT>' % (self.group_name, self.app_name)] +<EOL>self.get_xdg_dirs() +<EOL>[expanduser('<STR_LIT>' % (self.group_name, self.app_name)),<EOL>self.get_xdg_home(),<EOL>join(getcwd(), '<STR_LIT>'.format(self.group_name), self.app_name)])<EOL>if self.search_path:<EOL><INDENT>path = self.search_path.split(pathsep)<EOL><DEDENT>env_path = getenv(self.env_path_name)<EOL>if env_path and env_path.startswith('<STR_LIT:+>'):<EOL><INDENT>additional_paths = env_path[<NUM_LIT:1>:].split(pathsep)<EOL>self._log.info('<STR_LIT>'<EOL>'<STR_LIT>',<EOL>additional_paths,<EOL>self.env_path_name)<EOL>path.extend(additional_paths)<EOL><DEDENT>elif env_path:<EOL><INDENT>self._log.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>env_path,<EOL>self.env_path_name)<EOL>path = env_path.split(pathsep)<EOL><DEDENT>return path<EOL>", "docstring": "Returns a list of paths to search for config files in reverse order of\nprecedence.  In other words: the last path element will override the\nsettings from the first one.", "id": "f3801:c0:m6"}
{"signature": "def _validate_num_channels(input_filepath_list, combine_type):", "body": "channels = [<EOL>file_info.channels(f) for f in input_filepath_list<EOL>]<EOL>if not core.all_equal(channels):<EOL><INDENT>raise IOError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>.format(combine_type)<EOL>)<EOL><DEDENT>", "docstring": "Check if files in input file list have the same number of channels", "id": "f3808:m2"}
{"signature": "def set_input_format(self, file_type=None, rate=None, bits=None,<EOL>channels=None, encoding=None, ignore_length=None):", "body": "if file_type is not None and not isinstance(file_type, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if file_type is not None:<EOL><INDENT>if not all([f in VALID_FORMATS for f in file_type]):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(VALID_FORMATS)<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>file_type = []<EOL><DEDENT>if rate is not None and not isinstance(rate, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if rate is not None:<EOL><INDENT>if not all([is_number(r) and r > <NUM_LIT:0> for r in rate]):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rate = []<EOL><DEDENT>if bits is not None and not isinstance(bits, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if bits is not None:<EOL><INDENT>if not all([isinstance(b, int) and b > <NUM_LIT:0> for b in bits]):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>bits = []<EOL><DEDENT>if channels is not None and not isinstance(channels, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if channels is not None:<EOL><INDENT>if not all([isinstance(c, int) and c > <NUM_LIT:0> for c in channels]):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>channels = []<EOL><DEDENT>if encoding is not None and not isinstance(encoding, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if encoding is not None:<EOL><INDENT>if not all([e in ENCODING_VALS for e in encoding]):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(ENCODING_VALS)<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>encoding = []<EOL><DEDENT>if ignore_length is not None and not isinstance(ignore_length, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if ignore_length is not None:<EOL><INDENT>if not all([isinstance(l, bool) for l in ignore_length]):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ignore_length = []<EOL><DEDENT>max_input_arg_len = max([<EOL>len(file_type), len(rate), len(bits), len(channels),<EOL>len(encoding), len(ignore_length)<EOL>])<EOL>input_format = []<EOL>for _ in range(max_input_arg_len):<EOL><INDENT>input_format.append([])<EOL><DEDENT>for i, f in enumerate(file_type):<EOL><INDENT>input_format[i].extend(['<STR_LIT>', '<STR_LIT:{}>'.format(f)])<EOL><DEDENT>for i, r in enumerate(rate):<EOL><INDENT>input_format[i].extend(['<STR_LIT>', '<STR_LIT:{}>'.format(r)])<EOL><DEDENT>for i, b in enumerate(bits):<EOL><INDENT>input_format[i].extend(['<STR_LIT>', '<STR_LIT:{}>'.format(b)])<EOL><DEDENT>for i, c in enumerate(channels):<EOL><INDENT>input_format[i].extend(['<STR_LIT:-c>', '<STR_LIT:{}>'.format(c)])<EOL><DEDENT>for i, e in enumerate(encoding):<EOL><INDENT>input_format[i].extend(['<STR_LIT>', '<STR_LIT:{}>'.format(e)])<EOL><DEDENT>for i, l in enumerate(ignore_length):<EOL><INDENT>if l is True:<EOL><INDENT>input_format[i].append('<STR_LIT>')<EOL><DEDENT><DEDENT>self.input_format = input_format<EOL>return self<EOL>", "docstring": "Sets input file format arguments. This is primarily useful when\n        dealing with audio files without a file extension. Overwrites any\n        previously set input file arguments.\n\n        If this function is not explicity called the input format is inferred\n        from the file extension or the file's header.\n\n        Parameters\n        ----------\n        file_type : list of str or None, default=None\n            The file type of the input audio file. Should be the same as what\n            the file extension would be, for ex. 'mp3' or 'wav'.\n        rate : list of float or None, default=None\n            The sample rate of the input audio file. If None the sample rate\n            is inferred.\n        bits : list of int or None, default=None\n            The number of bits per sample. If None, the number of bits per\n            sample is inferred.\n        channels : list of int or None, default=None\n            The number of channels in the audio file. If None the number of\n            channels is inferred.\n        encoding : list of str or None, default=None\n            The audio encoding type. Sometimes needed with file-types that\n            support more than one encoding type. One of:\n                * signed-integer : PCM data stored as signed (\u2018two\u2019s\n                    complement\u2019) integers. Commonly used with a 16 or 24\u2212bit\n                    encoding size. A value of 0 represents minimum signal\n                    power.\n                * unsigned-integer : PCM data stored as unsigned integers.\n                    Commonly used with an 8-bit encoding size. A value of 0\n                    represents maximum signal power.\n                * floating-point : PCM data stored as IEEE 753 single precision\n                    (32-bit) or double precision (64-bit) floating-point\n                    (\u2018real\u2019) numbers. A value of 0 represents minimum signal\n                    power.\n                * a-law : International telephony standard for logarithmic\n                    encoding to 8 bits per sample. It has a precision\n                    equivalent to roughly 13-bit PCM and is sometimes encoded\n                    with reversed bit-ordering.\n                * u-law : North American telephony standard for logarithmic\n                    encoding to 8 bits per sample. A.k.a. \u03bc-law. It has a\n                    precision equivalent to roughly 14-bit PCM and is sometimes\n                    encoded with reversed bit-ordering.\n                * oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;\n                    it has a precision equivalent to roughly 12-bit PCM. ADPCM\n                    is a form of audio compression that has a good compromise\n                    between audio quality and encoding/decoding speed.\n                * ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision\n                    equivalent to roughly 13-bit PCM.\n                * ms-adpcm : Microsoft 4-bit ADPCM; it has a precision\n                    equivalent to roughly 14-bit PCM.\n                * gsm-full-rate : GSM is currently used for the vast majority\n                    of the world\u2019s digital wireless telephone calls. It\n                    utilises several audio formats with different bit-rates and\n                    associated speech quality. SoX has support for GSM\u2019s\n                    original 13kbps \u2018Full Rate\u2019 audio format. It is usually\n                    CPU-intensive to work with GSM audio.\n        ignore_length : list of bool or None, default=None\n            If True, overrides an (incorrect) audio length given in an audio\n            file\u2019s header. If this option is given then SoX will keep reading\n            audio until it reaches the end of the input file.", "id": "f3808:c0:m3"}
{"signature": "def rate(self, samplerate, quality='<STR_LIT:h>'):", "body": "quality_vals = ['<STR_LIT:q>', '<STR_LIT:l>', '<STR_LIT:m>', '<STR_LIT:h>', '<STR_LIT:v>']<EOL>if not is_number(samplerate) or samplerate <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if quality not in quality_vals:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\".format('<STR_LIT:U+0020>'.join(quality_vals))<EOL>)<EOL><DEDENT>effect_args = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(quality),<EOL>'<STR_LIT>'.format(samplerate)<EOL>]<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Change the audio sampling rate (i.e. resample the audio) to any\n        given `samplerate`. Better the resampling quality = slower runtime.\n\n        Parameters\n        ----------\n        samplerate : float\n            Desired sample rate.\n        quality : str\n            Resampling quality. One of:\n             * q : Quick - very low quality,\n             * l : Low,\n             * m : Medium,\n             * h : High (default),\n             * v : Very high\n\n        See Also\n        --------\n        upsample, downsample, convert", "id": "f3809:c0:m43"}
{"signature": "def delay(self, positions):", "body": "if not isinstance(positions, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not all((is_number(p) and p >= <NUM_LIT:0>) for p in positions):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = ['<STR_LIT>']<EOL>effect_args.extend(['<STR_LIT>'.format(p) for p in positions])<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Delay one or more audio channels such that they start at the given\n        positions.\n\n        Parameters\n        ----------\n        positions: list of floats\n            List of times (in seconds) to delay each audio channel.\n            If fewer positions are given than the number of channels, the\n            remaining channels will be unaffected.", "id": "f3809:c0:m20"}
{"signature": "def noisered(self, profile_path, amount=<NUM_LIT:0.5>):", "body": "if not os.path.exists(profile_path):<EOL><INDENT>raise IOError(<EOL>\"<STR_LIT>\".format(profile_path))<EOL><DEDENT>if not is_number(amount) or amount < <NUM_LIT:0> or amount > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = [<EOL>'<STR_LIT>',<EOL>profile_path,<EOL>'<STR_LIT>'.format(amount)<EOL>]<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Reduce noise in the audio signal by profiling and filtering.\n        This effect is moderately effective at removing consistent\n        background noise such as hiss or hum.\n\n        Parameters\n        ----------\n        profile_path : str\n            Path to a noise profile file.\n            This file can be generated using the `noiseprof` effect.\n        amount : float, default=0.5\n            How much noise should be removed is specified by amount. Should\n            be between 0 and 1.  Higher numbers will remove more noise but\n            present a greater likelihood  of  removing wanted  components  of\n            the  audio  signal.\n\n        See Also\n        --------\n        noiseprof", "id": "f3809:c0:m36"}
{"signature": "def vad(self, location=<NUM_LIT:1>, normalize=True, activity_threshold=<NUM_LIT>,<EOL>min_activity_duration=<NUM_LIT>, initial_search_buffer=<NUM_LIT:1.0>,<EOL>max_gap=<NUM_LIT>, initial_pad=<NUM_LIT:0.0>):", "body": "if location not in [-<NUM_LIT:1>, <NUM_LIT:1>]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(normalize, bool):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(activity_threshold):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(min_activity_duration) or min_activity_duration < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(initial_search_buffer) or initial_search_buffer < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(max_gap) or max_gap < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(initial_pad) or initial_pad < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = []<EOL>if normalize:<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>if location == -<NUM_LIT:1>:<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>effect_args.extend([<EOL>'<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'.format(activity_threshold),<EOL>'<STR_LIT>', '<STR_LIT>'.format(min_activity_duration),<EOL>'<STR_LIT>', '<STR_LIT>'.format(initial_search_buffer),<EOL>'<STR_LIT>', '<STR_LIT>'.format(max_gap),<EOL>'<STR_LIT>', '<STR_LIT>'.format(initial_pad)<EOL>])<EOL>if location == -<NUM_LIT:1>:<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Voice Activity Detector. Attempts to trim silence and quiet\n        background sounds from the ends of recordings of speech. The algorithm\n        currently uses a simple cepstral power measurement to detect voice, so\n        may be fooled by other things, especially music.\n\n        The effect can trim only from the front of the audio, so in order to\n        trim from the back, the reverse effect must also be used.\n\n        Parameters\n        ----------\n        location : 1 or -1, default=1\n            If 1, trims silence from the beginning\n            If -1, trims silence from the end\n        normalize : bool, default=True\n            If true, normalizes audio before processing.\n        activity_threshold : float, default=7.0\n            The measurement level used to trigger activity detection. This may\n            need to be cahnged depending on the noise level, signal level, and\n            other characteristics of the input audio.\n        min_activity_duration : float, default=0.25\n            The time constant (in seconds) used to help ignore short bursts of\n            sound.\n        initial_search_buffer : float, default=1.0\n            The amount of audio (in seconds) to search for quieter/shorter\n            bursts of audio to include prior to the detected trigger point.\n        max_gap : float, default=0.25\n            The allowed gap (in seconds) between quiteter/shorter bursts of\n            audio to include prior to the detected trigger point\n        initial_pad : float, default=0.0\n            The amount of audio (in seconds) to preserve before the trigger\n            point and any found quieter/shorter bursts.\n\n        See Also\n        --------\n        silence\n\n        Examples\n        --------\n        >>> tfm = sox.Transformer()\n\n        Remove silence from the beginning of speech\n\n        >>> tfm.vad(initial_pad=0.3)\n\n        Remove silence from the end of speech\n\n        >>> tfm.vad(location=-1, initial_pad=0.2)", "id": "f3809:c0:m61"}
{"signature": "def set_input_format(self, file_type=None, rate=None, bits=None,<EOL>channels=None, encoding=None, ignore_length=False):", "body": "if file_type not in VALID_FORMATS + [None]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(VALID_FORMATS)<EOL>)<EOL><DEDENT>if not is_number(rate) and rate is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if rate is not None and rate <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not isinstance(bits, int) and bits is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if bits is not None and bits <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not isinstance(channels, int) and channels is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if channels is not None and channels <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if encoding not in ENCODING_VALS + [None]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(ENCODING_VALS)<EOL>)<EOL><DEDENT>if not isinstance(ignore_length, bool):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>input_format = []<EOL>if file_type is not None:<EOL><INDENT>input_format.extend(['<STR_LIT>', '<STR_LIT:{}>'.format(file_type)])<EOL><DEDENT>if rate is not None:<EOL><INDENT>input_format.extend(['<STR_LIT>', '<STR_LIT>'.format(rate)])<EOL><DEDENT>if bits is not None:<EOL><INDENT>input_format.extend(['<STR_LIT>', '<STR_LIT:{}>'.format(bits)])<EOL><DEDENT>if channels is not None:<EOL><INDENT>input_format.extend(['<STR_LIT:-c>', '<STR_LIT:{}>'.format(channels)])<EOL><DEDENT>if encoding is not None:<EOL><INDENT>input_format.extend(['<STR_LIT>', '<STR_LIT:{}>'.format(encoding)])<EOL><DEDENT>if ignore_length:<EOL><INDENT>input_format.append('<STR_LIT>')<EOL><DEDENT>self.input_format = input_format<EOL>return self<EOL>", "docstring": "Sets input file format arguments. This is primarily useful when\n        dealing with audio files without a file extension. Overwrites any\n        previously set input file arguments.\n\n        If this function is not explicity called the input format is inferred\n        from the file extension or the file's header.\n\n        Parameters\n        ----------\n        file_type : str or None, default=None\n            The file type of the input audio file. Should be the same as what\n            the file extension would be, for ex. 'mp3' or 'wav'.\n        rate : float or None, default=None\n            The sample rate of the input audio file. If None the sample rate\n            is inferred.\n        bits : int or None, default=None\n            The number of bits per sample. If None, the number of bits per\n            sample is inferred.\n        channels : int or None, default=None\n            The number of channels in the audio file. If None the number of\n            channels is inferred.\n        encoding : str or None, default=None\n            The audio encoding type. Sometimes needed with file-types that\n            support more than one encoding type. One of:\n                * signed-integer : PCM data stored as signed (\u2018two\u2019s\n                    complement\u2019) integers. Commonly used with a 16 or 24\u2212bit\n                    encoding size. A value of 0 represents minimum signal\n                    power.\n                * unsigned-integer : PCM data stored as unsigned integers.\n                    Commonly used with an 8-bit encoding size. A value of 0\n                    represents maximum signal power.\n                * floating-point : PCM data stored as IEEE 753 single precision\n                    (32-bit) or double precision (64-bit) floating-point\n                    (\u2018real\u2019) numbers. A value of 0 represents minimum signal\n                    power.\n                * a-law : International telephony standard for logarithmic\n                    encoding to 8 bits per sample. It has a precision\n                    equivalent to roughly 13-bit PCM and is sometimes encoded\n                    with reversed bit-ordering.\n                * u-law : North American telephony standard for logarithmic\n                    encoding to 8 bits per sample. A.k.a. \u03bc-law. It has a\n                    precision equivalent to roughly 14-bit PCM and is sometimes\n                    encoded with reversed bit-ordering.\n                * oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;\n                    it has a precision equivalent to roughly 12-bit PCM. ADPCM\n                    is a form of audio compression that has a good compromise\n                    between audio quality and encoding/decoding speed.\n                * ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision\n                    equivalent to roughly 13-bit PCM.\n                * ms-adpcm : Microsoft 4-bit ADPCM; it has a precision\n                    equivalent to roughly 14-bit PCM.\n                * gsm-full-rate : GSM is currently used for the vast majority\n                    of the world\u2019s digital wireless telephone calls. It\n                    utilises several audio formats with different bit-rates and\n                    associated speech quality. SoX has support for GSM\u2019s\n                    original 13kbps \u2018Full Rate\u2019 audio format. It is usually\n                    CPU-intensive to work with GSM audio.\n        ignore_length : bool, default=False\n            If True, overrides an (incorrect) audio length given in an audio\n            file\u2019s header. If this option is given then SoX will keep reading\n            audio until it reaches the end of the input file.", "id": "f3809:c0:m2"}
{"signature": "def dcshift(self, shift=<NUM_LIT:0.0>):", "body": "if not is_number(shift) or shift < -<NUM_LIT:2> or shift > <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>effect_args = ['<STR_LIT>', '<STR_LIT>'.format(shift)]<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Apply a DC shift to the audio.\n\n        Parameters\n        ----------\n        shift : float\n            Amount to shift audio between -2 and 2. (Audio is between -1 and 1)\n\n        See Also\n        --------\n        highpass", "id": "f3809:c0:m18"}
{"signature": "def contrast(self, amount=<NUM_LIT>):", "body": "if not is_number(amount) or amount < <NUM_LIT:0> or amount > <NUM_LIT:100>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>effect_args = ['<STR_LIT>', '<STR_LIT>'.format(amount)]<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Comparable with compression, this effect modifies an audio signal to\n        make it sound louder.\n\n        Parameters\n        ----------\n        amount : float\n            Amount of enhancement between 0 and 100.\n\n        See Also\n        --------\n        compand, mcompand", "id": "f3809:c0:m16"}
{"signature": "def gain(self, gain_db=<NUM_LIT:0.0>, normalize=True, limiter=False, balance=None):", "body": "if not is_number(gain_db):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(normalize, bool):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(limiter, bool):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if balance not in [None, '<STR_LIT:e>', '<STR_LIT:B>', '<STR_LIT:b>']:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = ['<STR_LIT>']<EOL>if balance is not None:<EOL><INDENT>effect_args.append('<STR_LIT>'.format(balance))<EOL><DEDENT>if normalize:<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>if limiter:<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>effect_args.append('<STR_LIT>'.format(gain_db))<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Apply amplification or attenuation to the audio signal.\n\n        Parameters\n        ----------\n        gain_db : float, default=0.0\n            Gain adjustment in decibels (dB).\n        normalize : bool, default=True\n            If True, audio is normalized to gain_db relative to full scale.\n            If False, simply adjusts the audio power level by gain_db.\n        limiter : bool, default=False\n            If True, a simple limiter is invoked to prevent clipping.\n        balance : str or None, default=None\n            Balance gain across channels. Can be one of:\n             * None applies no balancing (default)\n             * 'e' applies gain to all channels other than that with the\n                highest peak level, such that all channels attain the same\n                peak level\n             * 'B' applies gain to all channels other than that with the\n                highest RMS level, such that all channels attain the same\n                RMS level\n             * 'b' applies gain with clipping protection to all channels other\n                than that with the highest RMS level, such that all channels\n                attain the same RMS level\n            If normalize=True, 'B' and 'b' are equivalent.\n\n        See Also\n        --------\n        loudness", "id": "f3809:c0:m29"}
{"signature": "def pitch(self, n_semitones, quick=False):", "body": "if not is_number(n_semitones):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if n_semitones < -<NUM_LIT:12> or n_semitones > <NUM_LIT:12>:<EOL><INDENT>logger.warning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if not isinstance(quick, bool):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = ['<STR_LIT>']<EOL>if quick:<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>effect_args.append('<STR_LIT>'.format(n_semitones * <NUM_LIT>))<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Pitch shift the audio without changing the tempo.\n\n        This effect uses the WSOLA algorithm. The audio is chopped up into\n        segments which are then shifted in the time domain and overlapped\n        (cross-faded) at points where their waveforms are most similar as\n        determined by measurement of least squares.\n\n        Parameters\n        ----------\n        n_semitones : float\n            The number of semitones to shift. Can be positive or negative.\n        quick : bool, default=False\n            If True, this effect will run faster but with lower sound quality.\n\n        See Also\n        --------\n        bend, speed, tempo", "id": "f3809:c0:m42"}
{"signature": "def stat(self, input_filepath, scale=None, rms=False):", "body": "effect_args = ['<STR_LIT>', '<STR_LIT:1>', '<STR_LIT>']<EOL>if scale is not None:<EOL><INDENT>if not is_number(scale) or scale <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args.extend(['<STR_LIT>', '<STR_LIT>'.format(scale)])<EOL><DEDENT>if rms:<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>_, _, stat_output = self.build(<EOL>input_filepath, None, extra_args=effect_args, return_output=True<EOL>)<EOL>stat_dict = {}<EOL>lines = stat_output.split('<STR_LIT:\\n>')<EOL>for line in lines:<EOL><INDENT>split_line = line.split()<EOL>if len(split_line) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>value = split_line[-<NUM_LIT:1>]<EOL>key = '<STR_LIT:U+0020>'.join(split_line[:-<NUM_LIT:1>])<EOL>stat_dict[key.strip('<STR_LIT::>')] = value<EOL><DEDENT>return stat_dict<EOL>", "docstring": "Display time and frequency domain statistical information about the\n        audio. Audio is passed unmodified through the SoX processing chain.\n\n        Unlike other Transformer methods, this does not modify the transformer\n        effects chain. Instead it computes statistics on the output file that\n        would be created if the build command were invoked.\n\n        Note: The file is downmixed to mono prior to computation.\n\n        Parameters\n        ----------\n        input_filepath : str\n            Path to input file to compute stats on.\n        scale : float or None, default=None\n            If not None, scales the input by the given scale factor.\n        rms : bool, default=False\n            If True, scales all values by the average rms amplitude.\n\n        Returns\n        -------\n        stat_dict : dict\n            Dictionary of statistics.\n\n        See Also\n        --------\n        stats, power_spectrum, sox.file_info", "id": "f3809:c0:m51"}
{"signature": "def bandreject(self, frequency, width_q=<NUM_LIT>):", "body": "if not is_number(frequency) or frequency <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(width_q) or width_q <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = [<EOL>'<STR_LIT>', '<STR_LIT>'.format(frequency), '<STR_LIT>'.format(width_q)<EOL>]<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Apply a two-pole Butterworth band-reject filter with the given\n        central frequency, and (3dB-point) band-width. The filter rolls off at\n        6dB per octave (20dB per decade) and is described in detail in\n        http://musicdsp.org/files/Audio-EQ-Cookbook.txt\n\n        Parameters\n        ----------\n        frequency : float\n            The filter's center frequency in Hz.\n        width_q : float, default=2.0\n            The filter's width as a Q-factor.\n        constant_skirt : bool, default=False\n            If True, selects constant skirt gain (peak gain = width_q).\n            If False, selects constant 0dB peak gain.\n\n        See Also\n        --------\n        bandreject, sinc", "id": "f3809:c0:m9"}
{"signature": "def echos(self, gain_in=<NUM_LIT>, gain_out=<NUM_LIT>, n_echos=<NUM_LIT:1>, delays=[<NUM_LIT>],<EOL>decays=[<NUM_LIT>]):", "body": "if not is_number(gain_in) or gain_in <= <NUM_LIT:0> or gain_in > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(gain_out) or gain_out <= <NUM_LIT:0> or gain_out > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(n_echos, int) or n_echos <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(delays, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if len(delays) != n_echos:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any((not is_number(p) or p <= <NUM_LIT:0>) for p in delays):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(decays, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if len(decays) != n_echos:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any((not is_number(p) or p <= <NUM_LIT:0> or p > <NUM_LIT:1>) for p in decays):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>effect_args = [<EOL>'<STR_LIT>', '<STR_LIT>'.format(gain_in), '<STR_LIT>'.format(gain_out)<EOL>]<EOL>for i in range(n_echos):<EOL><INDENT>effect_args.extend([<EOL>'<STR_LIT>'.format(delays[i]),<EOL>'<STR_LIT>'.format(decays[i])<EOL>])<EOL><DEDENT>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Add a sequence of echoes to the audio.\n\n        Like the echo effect, echos stand for \u2018ECHO in Sequel\u2019, that is the\n        first echos takes the input, the second the input and the first echos,\n        the third the input and the first and the second echos, ... and so on.\n        Care should be taken using many echos; a single echos has the same\n        effect as a single echo.\n\n        Parameters\n        ----------\n        gain_in : float, default=0.8\n            Input volume, between 0 and 1\n        gain_out : float, default=0.9\n            Output volume, between 0 and 1\n        n_echos : int, default=1\n            Number of reflections\n        delays : list, default=[60]\n            List of delays in miliseconds\n        decays : list, default=[0.4]\n            List of decays, relative to gain in between 0 and 1\n\n        See Also\n        --------\n        echo, reverb, chorus", "id": "f3809:c0:m24"}
{"signature": "def reverb(self, reverberance=<NUM_LIT:50>, high_freq_damping=<NUM_LIT:50>, room_scale=<NUM_LIT:100>,<EOL>stereo_depth=<NUM_LIT:100>, pre_delay=<NUM_LIT:0>, wet_gain=<NUM_LIT:0>, wet_only=False):", "body": "if (not is_number(reverberance) or reverberance < <NUM_LIT:0> or<EOL>reverberance > <NUM_LIT:100>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if (not is_number(high_freq_damping) or high_freq_damping < <NUM_LIT:0> or<EOL>high_freq_damping > <NUM_LIT:100>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if (not is_number(room_scale) or room_scale < <NUM_LIT:0> or<EOL>room_scale > <NUM_LIT:100>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if (not is_number(stereo_depth) or stereo_depth < <NUM_LIT:0> or<EOL>stereo_depth > <NUM_LIT:100>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(pre_delay) or pre_delay < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(wet_gain):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(wet_only, bool):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = ['<STR_LIT>']<EOL>if wet_only:<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>effect_args.extend([<EOL>'<STR_LIT>'.format(reverberance),<EOL>'<STR_LIT>'.format(high_freq_damping),<EOL>'<STR_LIT>'.format(room_scale),<EOL>'<STR_LIT>'.format(stereo_depth),<EOL>'<STR_LIT>'.format(pre_delay),<EOL>'<STR_LIT>'.format(wet_gain)<EOL>])<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Add reverberation to the audio using the \u2018freeverb\u2019 algorithm.\n        A reverberation effect is sometimes desirable for concert halls that\n        are too small or contain so many people that the hall\u2019s natural\n        reverberance is diminished. Applying a small amount of stereo reverb\n        to a (dry) mono signal will usually make it sound more natural.\n\n        Parameters\n        ----------\n        reverberance : float, default=50\n            Percentage of reverberance\n        high_freq_damping : float, default=50\n            Percentage of high-frequency damping.\n        room_scale : float, default=100\n            Scale of the room as a percentage.\n        stereo_depth : float, default=100\n            Stereo depth as a percentage.\n        pre_delay : float, default=0\n            Pre-delay in milliseconds.\n        wet_gain : float, default=0\n            Amount of wet gain in dB\n        wet_only : bool, default=False\n            If True, only outputs the wet signal.\n\n        See Also\n        --------\n        echo", "id": "f3809:c0:m46"}
{"signature": "def channels(self, n_channels):", "body": "if not isinstance(n_channels, int) or n_channels <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>effect_args = ['<STR_LIT>', '<STR_LIT:{}>'.format(n_channels)]<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Change the number of channels in the audio signal. If decreasing the\n        number of channels it mixes channels together, if increasing the number\n        of channels it duplicates.\n\n        Note: This overrides arguments used in the convert effect!\n\n        Parameters\n        ----------\n        n_channels : int\n            Desired number of channels.\n\n        See Also\n        --------\n        convert", "id": "f3809:c0:m13"}
{"signature": "def stats(self, input_filepath):", "body": "effect_args = ['<STR_LIT>', '<STR_LIT:1>', '<STR_LIT>']<EOL>_, _, stats_output = self.build(<EOL>input_filepath, None, extra_args=effect_args, return_output=True<EOL>)<EOL>stats_dict = {}<EOL>lines = stats_output.split('<STR_LIT:\\n>')<EOL>for line in lines:<EOL><INDENT>split_line = line.split()<EOL>if len(split_line) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>value = split_line[-<NUM_LIT:1>]<EOL>key = '<STR_LIT:U+0020>'.join(split_line[:-<NUM_LIT:1>])<EOL>stats_dict[key] = value<EOL><DEDENT>return stats_dict<EOL>", "docstring": "Display time domain statistical information about the audio\n        channels. Audio is passed unmodified through the SoX processing chain.\n        Statistics are calculated and displayed for each audio channel\n\n        Unlike other Transformer methods, this does not modify the transformer\n        effects chain. Instead it computes statistics on the output file that\n        would be created if the build command were invoked.\n\n        Note: The file is downmixed to mono prior to computation.\n\n        Parameters\n        ----------\n        input_filepath : str\n            Path to input file to compute stats on.\n\n        Returns\n        -------\n        stats_dict : dict\n            List of frequency (Hz), amplitude pairs.\n\n        See Also\n        --------\n        stat, sox.file_info", "id": "f3809:c0:m53"}
{"signature": "def power_spectrum(self, input_filepath):", "body": "effect_args = ['<STR_LIT>', '<STR_LIT:1>', '<STR_LIT>', '<STR_LIT>']<EOL>_, _, stat_output = self.build(<EOL>input_filepath, None, extra_args=effect_args, return_output=True<EOL>)<EOL>power_spectrum = []<EOL>lines = stat_output.split('<STR_LIT:\\n>')<EOL>for line in lines:<EOL><INDENT>split_line = line.split()<EOL>if len(split_line) != <NUM_LIT:2>:<EOL><INDENT>continue<EOL><DEDENT>freq, amp = split_line<EOL>power_spectrum.append([float(freq), float(amp)])<EOL><DEDENT>return power_spectrum<EOL>", "docstring": "Calculates the power spectrum (4096 point DFT). This method\n        internally invokes the stat command with the -freq option.\n\n        Note: The file is downmixed to mono prior to computation.\n\n        Parameters\n        ----------\n        input_filepath : str\n            Path to input file to compute stats on.\n\n        Returns\n        -------\n        power_spectrum : list\n            List of frequency (Hz), amplitude pairs.\n\n        See Also\n        --------\n        stat, stats, sox.file_info", "id": "f3809:c0:m52"}
{"signature": "def chorus(self, gain_in=<NUM_LIT:0.5>, gain_out=<NUM_LIT>, n_voices=<NUM_LIT:3>, delays=None,<EOL>decays=None, speeds=None, depths=None, shapes=None):", "body": "if not is_number(gain_in) or gain_in <= <NUM_LIT:0> or gain_in > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(gain_out) or gain_out <= <NUM_LIT:0> or gain_out > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(n_voices, int) or n_voices <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not (delays is None or isinstance(delays, list)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if delays is not None:<EOL><INDENT>if len(delays) != n_voices:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any((not is_number(p) or p < <NUM_LIT:20>) for p in delays):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>delays = [random.uniform(<NUM_LIT>, <NUM_LIT>) for _ in range(n_voices)]<EOL><DEDENT>if not (decays is None or isinstance(decays, list)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if decays is not None:<EOL><INDENT>if len(decays) != n_voices:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any((not is_number(p) or p <= <NUM_LIT:0> or p > <NUM_LIT:1>) for p in decays):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>decays = [random.uniform(<NUM_LIT>, <NUM_LIT>) for _ in range(n_voices)]<EOL><DEDENT>if not (speeds is None or isinstance(speeds, list)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if speeds is not None:<EOL><INDENT>if len(speeds) != n_voices:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any((not is_number(p) or p <= <NUM_LIT:0>) for p in speeds):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>speeds = [random.uniform(<NUM_LIT>, <NUM_LIT>) for _ in range(n_voices)]<EOL><DEDENT>if not (depths is None or isinstance(depths, list)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if depths is not None:<EOL><INDENT>if len(depths) != n_voices:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any((not is_number(p) or p <= <NUM_LIT:0>) for p in depths):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>depths = [random.uniform(<NUM_LIT:1.0>, <NUM_LIT>) for _ in range(n_voices)]<EOL><DEDENT>if not (shapes is None or isinstance(shapes, list)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if shapes is not None:<EOL><INDENT>if len(shapes) != n_voices:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any((p not in ['<STR_LIT:t>', '<STR_LIT:s>']) for p in shapes):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>shapes = [random.choice(['<STR_LIT:t>', '<STR_LIT:s>']) for _ in range(n_voices)]<EOL><DEDENT>effect_args = ['<STR_LIT>', '<STR_LIT:{}>'.format(gain_in), '<STR_LIT:{}>'.format(gain_out)]<EOL>for i in range(n_voices):<EOL><INDENT>effect_args.extend([<EOL>'<STR_LIT>'.format(delays[i]),<EOL>'<STR_LIT>'.format(decays[i]),<EOL>'<STR_LIT>'.format(speeds[i]),<EOL>'<STR_LIT>'.format(depths[i]),<EOL>'<STR_LIT>'.format(shapes[i])<EOL>])<EOL><DEDENT>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Add a chorus effect to the audio. This can makeasingle vocal sound\n        like a chorus, but can also be applied to instrumentation.\n\n        Chorus resembles an echo effect with a short delay, but whereas with\n        echo the delay is constant, with chorus, it is varied using sinusoidal\n        or triangular modulation. The modulation depth defines the range the\n        modulated delay is played before or after the delay. Hence the delayed\n        sound will sound slower or faster, that is the delayed sound tuned\n        around the original one, like in a chorus where some vocals are\n        slightly off key.\n\n        Parameters\n        ----------\n        gain_in : float, default=0.3\n            The time in seconds over which the instantaneous level of the input\n            signal is averaged to determine increases in volume.\n        gain_out : float, default=0.8\n            The time in seconds over which the instantaneous level of the input\n            signal is averaged to determine decreases in volume.\n        n_voices : int, default=3\n            The number of voices in the chorus effect.\n        delays : list of floats > 20 or None, default=None\n            If a list, the list of delays (in miliseconds) of length n_voices.\n            If None, the individual delay parameters are chosen automatically\n            to be between 40 and 60 miliseconds.\n        decays : list of floats or None, default=None\n            If a list, the list of decays (as a fraction of gain_in) of length\n            n_voices.\n            If None, the individual decay parameters are chosen automatically\n            to be between 0.3 and 0.4.\n        speeds : list of floats or None, default=None\n            If a list, the list of modulation speeds (in Hz) of length n_voices\n            If None, the individual speed parameters are chosen automatically\n            to be between 0.25 and 0.4 Hz.\n        depths : list of floats or None, default=None\n            If a list, the list of depths (in miliseconds) of length n_voices.\n            If None, the individual delay parameters are chosen automatically\n            to be between 1 and 3 miliseconds.\n        shapes : list of 's' or 't' or None, deault=None\n            If a list, the list of modulation shapes - 's' for sinusoidal or\n            't' for triangular - of length n_voices.\n            If None, the individual shapes are chosen automatically.", "id": "f3809:c0:m14"}
{"signature": "def trim(self, start_time, end_time=None):", "body": "if not is_number(start_time) or start_time < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(start_time)<EOL>]<EOL>if end_time is not None:<EOL><INDENT>if not is_number(end_time) or end_time < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if start_time >= end_time:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args.append('<STR_LIT>'.format(end_time - start_time))<EOL><DEDENT>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Excerpt a clip from an audio file, given the start timestamp and end timestamp of the clip within the file, expressed in seconds. If the end timestamp is set to `None` or left unspecified, it defaults to the duration of the audio file.\n\n        Parameters\n        ----------\n        start_time : float\n            Start time of the clip (seconds)\n        end_time : float or None, default=None\n            End time of the clip (seconds)", "id": "f3809:c0:m59"}
{"signature": "def overdrive(self, gain_db=<NUM_LIT>, colour=<NUM_LIT>):", "body": "if not is_number(gain_db):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not is_number(colour):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>effect_args = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(gain_db),<EOL>'<STR_LIT>'.format(colour)<EOL>]<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Apply non-linear distortion.\n\n        Parameters\n        ----------\n        gain_db : float, default=20\n            Controls the amount of distortion (dB).\n        colour : float, default=20\n            Controls the amount of even harmonic content in the output (dB).", "id": "f3809:c0:m39"}
{"signature": "def sinc(self, filter_type='<STR_LIT>', cutoff_freq=<NUM_LIT>,<EOL>stop_band_attenuation=<NUM_LIT>, transition_bw=None,<EOL>phase_response=None):", "body": "filter_types = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>if filter_type not in filter_types:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\".format('<STR_LIT:U+002CU+0020>'.join(filter_types))<EOL>)<EOL><DEDENT>if not (is_number(cutoff_freq) or isinstance(cutoff_freq, list)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if filter_type in ['<STR_LIT>', '<STR_LIT>'] and isinstance(cutoff_freq, list):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if filter_type in ['<STR_LIT>', '<STR_LIT>'] and is_number(cutoff_freq):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if is_number(cutoff_freq) and cutoff_freq <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if isinstance(cutoff_freq, list):<EOL><INDENT>if len(cutoff_freq) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if any([not is_number(f) or f <= <NUM_LIT:0> for f in cutoff_freq]):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>cutoff_freq = sorted(cutoff_freq)<EOL><DEDENT>if not is_number(stop_band_attenuation) or stop_band_attenuation < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not (is_number(transition_bw) or<EOL>isinstance(transition_bw, list) or transition_bw is None):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if filter_type in ['<STR_LIT>', '<STR_LIT>'] and isinstance(transition_bw, list):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if is_number(transition_bw) and transition_bw <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if isinstance(transition_bw, list):<EOL><INDENT>if any([not is_number(f) or f <= <NUM_LIT:0> for f in transition_bw]):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if len(transition_bw) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>if phase_response is not None and not is_number(phase_response):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if (is_number(phase_response) and<EOL>(phase_response < <NUM_LIT:0> or phase_response > <NUM_LIT:100>)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = ['<STR_LIT>']<EOL>effect_args.extend(['<STR_LIT>', '<STR_LIT>'.format(stop_band_attenuation)])<EOL>if phase_response is not None:<EOL><INDENT>effect_args.extend(['<STR_LIT>', '<STR_LIT>'.format(phase_response)])<EOL><DEDENT>if filter_type == '<STR_LIT>':<EOL><INDENT>if transition_bw is not None:<EOL><INDENT>effect_args.extend(['<STR_LIT>', '<STR_LIT>'.format(transition_bw)])<EOL><DEDENT>effect_args.append('<STR_LIT>'.format(cutoff_freq))<EOL><DEDENT>elif filter_type == '<STR_LIT>':<EOL><INDENT>effect_args.append('<STR_LIT>'.format(cutoff_freq))<EOL>if transition_bw is not None:<EOL><INDENT>effect_args.extend(['<STR_LIT>', '<STR_LIT>'.format(transition_bw)])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if is_number(transition_bw):<EOL><INDENT>effect_args.extend(['<STR_LIT>', '<STR_LIT>'.format(transition_bw)])<EOL><DEDENT>elif isinstance(transition_bw, list):<EOL><INDENT>effect_args.extend(['<STR_LIT>', '<STR_LIT>'.format(transition_bw[<NUM_LIT:0>])])<EOL><DEDENT><DEDENT>if filter_type == '<STR_LIT>':<EOL><INDENT>effect_args.append(<EOL>'<STR_LIT>'.format(cutoff_freq[<NUM_LIT:0>], cutoff_freq[<NUM_LIT:1>])<EOL>)<EOL><DEDENT>elif filter_type == '<STR_LIT>':<EOL><INDENT>effect_args.append(<EOL>'<STR_LIT>'.format(cutoff_freq[<NUM_LIT:1>], cutoff_freq[<NUM_LIT:0>])<EOL>)<EOL><DEDENT>if isinstance(transition_bw, list):<EOL><INDENT>effect_args.extend(['<STR_LIT>', '<STR_LIT>'.format(transition_bw[<NUM_LIT:1>])])<EOL><DEDENT>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Apply a sinc kaiser-windowed low-pass, high-pass, band-pass, or\n        band-reject filter to the signal.\n\n        Parameters\n        ----------\n        filter_type : str, default='high'\n            Type of filter. One of:\n                - 'high' for a high-pass filter\n                - 'low' for a low-pass filter\n                - 'pass' for a band-pass filter\n                - 'reject' for a band-reject filter\n        cutoff_freq : float or list, default=3000\n            A scalar or length 2 list indicating the filter's critical\n            frequencies. The critical frequencies are given in Hz and must be\n            positive. For a high-pass or low-pass filter, cutoff_freq\n            must be a scalar. For a band-pass or band-reject filter, it must be\n            a length 2 list.\n        stop_band_attenuation : float, default=120\n            The stop band attenuation in dB\n        transition_bw : float, list or None, default=None\n            The transition band-width in Hz.\n            If None, sox's default of 5% of the total bandwith is used.\n            If a float, the given transition bandwith is used for both the\n            upper and lower bands (if applicable).\n            If a list, the first argument is used for the lower band and the\n            second for the upper band.\n        phase_response : float or None\n            The filter's phase response between 0 (minimum) and 100 (maximum).\n            If None, sox's default phase repsonse is used.\n\n        See Also\n        --------\n        band, bandpass, bandreject, highpass, lowpass", "id": "f3809:c0:m49"}
{"signature": "def fade(self, fade_in_len=<NUM_LIT:0.0>, fade_out_len=<NUM_LIT:0.0>, fade_shape='<STR_LIT:q>'):", "body": "fade_shapes = ['<STR_LIT:q>', '<STR_LIT:h>', '<STR_LIT:t>', '<STR_LIT:l>', '<STR_LIT:p>']<EOL>if fade_shape not in fade_shapes:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\".format(\"<STR_LIT:U+0020>\".join(fade_shapes))<EOL>)<EOL><DEDENT>if not is_number(fade_in_len) or fade_in_len < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(fade_out_len) or fade_out_len < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = []<EOL>if fade_in_len > <NUM_LIT:0>:<EOL><INDENT>effect_args.extend([<EOL>'<STR_LIT>', '<STR_LIT:{}>'.format(fade_shape), '<STR_LIT>'.format(fade_in_len)<EOL>])<EOL><DEDENT>if fade_out_len > <NUM_LIT:0>:<EOL><INDENT>effect_args.extend([<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT:{}>'.format(fade_shape),<EOL>'<STR_LIT>'.format(fade_out_len), '<STR_LIT>'<EOL>])<EOL><DEDENT>if len(effect_args) > <NUM_LIT:0>:<EOL><INDENT>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL><DEDENT>return self<EOL>", "docstring": "Add a fade in and/or fade out to an audio file.\n        Default fade shape is 1/4 sine wave.\n\n        Parameters\n        ----------\n        fade_in_len : float, default=0.0\n            Length of fade-in (seconds). If fade_in_len = 0,\n            no fade in is applied.\n        fade_out_len : float, defaut=0.0\n            Length of fade-out (seconds). If fade_out_len = 0,\n            no fade in is applied.\n        fade_shape : str, default='q'\n            Shape of fade. Must be one of\n             * 'q' for quarter sine (default),\n             * 'h' for half sine,\n             * 't' for linear,\n             * 'l' for logarithmic\n             * 'p' for inverted parabola.\n\n        See Also\n        --------\n        splice", "id": "f3809:c0:m26"}
{"signature": "def vol(self, gain, gain_type='<STR_LIT>', limiter_gain=None):", "body": "if not is_number(gain):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if limiter_gain is not None:<EOL><INDENT>if (not is_number(limiter_gain) or<EOL>limiter_gain <= <NUM_LIT:0> or limiter_gain >= <NUM_LIT:1>):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT>if gain_type in ['<STR_LIT>', '<STR_LIT>'] and gain < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>effect_args = ['<STR_LIT>']<EOL>effect_args.append('<STR_LIT>'.format(gain))<EOL>if gain_type == '<STR_LIT>':<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>elif gain_type == '<STR_LIT>':<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>elif gain_type == '<STR_LIT>':<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if limiter_gain is not None:<EOL><INDENT>if gain_type in ['<STR_LIT>', '<STR_LIT>'] and gain > <NUM_LIT:1>:<EOL><INDENT>effect_args.append('<STR_LIT>'.format(limiter_gain))<EOL><DEDENT>elif gain_type == '<STR_LIT>' and gain > <NUM_LIT:0>:<EOL><INDENT>effect_args.append('<STR_LIT>'.format(limiter_gain))<EOL><DEDENT><DEDENT>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Apply an amplification or an attenuation to the audio signal.\n\n        Parameters\n        ----------\n        gain : float\n            Interpreted according to the given `gain_type`.\n            If `gain_type' = 'amplitude', `gain' is a positive amplitude ratio.\n            If `gain_type' = 'power', `gain' is a power (voltage squared).\n            If `gain_type' = 'db', `gain' is in decibels.\n        gain_type : string, default='amplitude'\n            Type of gain. One of:\n                - 'amplitude'\n                - 'power'\n                - 'db'\n        limiter_gain : float or None, default=None\n            If specified, a limiter is invoked on peaks greater than\n            `limiter_gain' to prevent clipping.\n            `limiter_gain` should be a positive value much less than 1.\n\n        See Also\n        --------\n        gain, compand", "id": "f3809:c0:m62"}
{"signature": "def stretch(self, factor, window=<NUM_LIT:20>):", "body": "if not is_number(factor) or factor <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if factor < <NUM_LIT:0.5> or factor > <NUM_LIT:2>:<EOL><INDENT>logger.warning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if abs(factor - <NUM_LIT:1.0>) > <NUM_LIT:0.1>:<EOL><INDENT>logger.warning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if not is_number(window) or window <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>effect_args = ['<STR_LIT>', '<STR_LIT>'.format(factor), '<STR_LIT>'.format(window)]<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Change the audio duration (but not its pitch).\n        **Unless factor is close to 1, use the tempo effect instead.**\n\n        This effect is broadly equivalent to the tempo effect with search set\n        to zero, so in general, its results are comparatively poor; it is\n        retained as it can sometimes out-perform tempo for small factors.\n\n        Parameters\n        ----------\n        factor : float\n            The ratio of the new tempo to the old tempo.\n            For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.\n            Note - this argument is the inverse of what is passed to the sox\n            stretch effect for consistency with tempo.\n        window : float, default=20\n            Window size in miliseconds\n\n        See Also\n        --------\n        tempo, speed, pitch", "id": "f3809:c0:m54"}
{"signature": "def set_output_format(self, file_type=None, rate=None, bits=None,<EOL>channels=None, encoding=None, comments=None,<EOL>append_comments=True):", "body": "if file_type not in VALID_FORMATS + [None]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(VALID_FORMATS)<EOL>)<EOL><DEDENT>if not is_number(rate) and rate is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if rate is not None and rate <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not isinstance(bits, int) and bits is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if bits is not None and bits <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not isinstance(channels, int) and channels is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if channels is not None and channels <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if encoding not in ENCODING_VALS + [None]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(ENCODING_VALS)<EOL>)<EOL><DEDENT>if comments is not None and not isinstance(comments, str):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not isinstance(append_comments, bool):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>output_format = []<EOL>if file_type is not None:<EOL><INDENT>output_format.extend(['<STR_LIT>', '<STR_LIT:{}>'.format(file_type)])<EOL><DEDENT>if rate is not None:<EOL><INDENT>output_format.extend(['<STR_LIT>', '<STR_LIT>'.format(rate)])<EOL><DEDENT>if bits is not None:<EOL><INDENT>output_format.extend(['<STR_LIT>', '<STR_LIT:{}>'.format(bits)])<EOL><DEDENT>if channels is not None:<EOL><INDENT>output_format.extend(['<STR_LIT:-c>', '<STR_LIT:{}>'.format(channels)])<EOL><DEDENT>if encoding is not None:<EOL><INDENT>output_format.extend(['<STR_LIT>', '<STR_LIT:{}>'.format(encoding)])<EOL><DEDENT>if comments is not None:<EOL><INDENT>if append_comments:<EOL><INDENT>output_format.extend(['<STR_LIT>', comments])<EOL><DEDENT>else:<EOL><INDENT>output_format.extend(['<STR_LIT>', comments])<EOL><DEDENT><DEDENT>self.output_format = output_format<EOL>return self<EOL>", "docstring": "Sets output file format arguments. These arguments will overwrite\n        any format related arguments supplied by other effects (e.g. rate).\n\n        If this function is not explicity called the output format is inferred\n        from the file extension or the file's header.\n\n        Parameters\n        ----------\n        file_type : str or None, default=None\n            The file type of the output audio file. Should be the same as what\n            the file extension would be, for ex. 'mp3' or 'wav'.\n        rate : float or None, default=None\n            The sample rate of the output audio file. If None the sample rate\n            is inferred.\n        bits : int or None, default=None\n            The number of bits per sample. If None, the number of bits per\n            sample is inferred.\n        channels : int or None, default=None\n            The number of channels in the audio file. If None the number of\n            channels is inferred.\n        encoding : str or None, default=None\n            The audio encoding type. Sometimes needed with file-types that\n            support more than one encoding type. One of:\n                * signed-integer : PCM data stored as signed (\u2018two\u2019s\n                    complement\u2019) integers. Commonly used with a 16 or 24\u2212bit\n                    encoding size. A value of 0 represents minimum signal\n                    power.\n                * unsigned-integer : PCM data stored as unsigned integers.\n                    Commonly used with an 8-bit encoding size. A value of 0\n                    represents maximum signal power.\n                * floating-point : PCM data stored as IEEE 753 single precision\n                    (32-bit) or double precision (64-bit) floating-point\n                    (\u2018real\u2019) numbers. A value of 0 represents minimum signal\n                    power.\n                * a-law : International telephony standard for logarithmic\n                    encoding to 8 bits per sample. It has a precision\n                    equivalent to roughly 13-bit PCM and is sometimes encoded\n                    with reversed bit-ordering.\n                * u-law : North American telephony standard for logarithmic\n                    encoding to 8 bits per sample. A.k.a. \u03bc-law. It has a\n                    precision equivalent to roughly 14-bit PCM and is sometimes\n                    encoded with reversed bit-ordering.\n                * oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;\n                    it has a precision equivalent to roughly 12-bit PCM. ADPCM\n                    is a form of audio compression that has a good compromise\n                    between audio quality and encoding/decoding speed.\n                * ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision\n                    equivalent to roughly 13-bit PCM.\n                * ms-adpcm : Microsoft 4-bit ADPCM; it has a precision\n                    equivalent to roughly 14-bit PCM.\n                * gsm-full-rate : GSM is currently used for the vast majority\n                    of the world\u2019s digital wireless telephone calls. It\n                    utilises several audio formats with different bit-rates and\n                    associated speech quality. SoX has support for GSM\u2019s\n                    original 13kbps \u2018Full Rate\u2019 audio format. It is usually\n                    CPU-intensive to work with GSM audio.\n        comments : str or None, default=None\n            If not None, the string is added as a comment in the header of the\n            output audio file. If None, no comments are added.\n        append_comments : bool, default=True\n            If True, comment strings are appended to SoX's default comments. If\n            False, the supplied comment replaces the existing comment.", "id": "f3809:c0:m3"}
{"signature": "def oops(self):", "body": "effect_args = ['<STR_LIT>']<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Out Of Phase Stereo effect. Mixes stereo to twin-mono where each\n        mono channel contains the difference between the left and right stereo\n        channels. This is sometimes known as the 'karaoke' effect as it often\n        has the effect of removing most or all of the vocals from a recording.", "id": "f3809:c0:m38"}
{"signature": "def compand(self, attack_time=<NUM_LIT>, decay_time=<NUM_LIT>, soft_knee_db=<NUM_LIT>,<EOL>tf_points=[(-<NUM_LIT>, -<NUM_LIT>), (-<NUM_LIT>, -<NUM_LIT:20>), (<NUM_LIT:0>, <NUM_LIT:0>)],<EOL>):", "body": "if not is_number(attack_time) or attack_time <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(decay_time) or decay_time <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if attack_time > decay_time:<EOL><INDENT>logger.warning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if not (is_number(soft_knee_db) or soft_knee_db is None):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(tf_points, list):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if len(tf_points) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any(not isinstance(pair, tuple) for pair in tf_points):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any(len(pair) != <NUM_LIT:2> for pair in tf_points):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any(not (is_number(p[<NUM_LIT:0>]) and is_number(p[<NUM_LIT:1>])) for p in tf_points):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if any((p[<NUM_LIT:0>] > <NUM_LIT:0> or p[<NUM_LIT:1>] > <NUM_LIT:0>) for p in tf_points):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if len(tf_points) > len(set([p[<NUM_LIT:0>] for p in tf_points])):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>tf_points = sorted(<EOL>tf_points,<EOL>key=lambda tf_points: tf_points[<NUM_LIT:0>]<EOL>)<EOL>transfer_list = []<EOL>for point in tf_points:<EOL><INDENT>transfer_list.extend([<EOL>\"<STR_LIT>\".format(point[<NUM_LIT:0>]), \"<STR_LIT>\".format(point[<NUM_LIT:1>])<EOL>])<EOL><DEDENT>effect_args = [<EOL>'<STR_LIT>',<EOL>\"<STR_LIT>\".format(attack_time, decay_time)<EOL>]<EOL>if soft_knee_db is not None:<EOL><INDENT>effect_args.append(<EOL>\"<STR_LIT>\".format(soft_knee_db, \"<STR_LIT:U+002C>\".join(transfer_list))<EOL>)<EOL><DEDENT>else:<EOL><INDENT>effect_args.append(\"<STR_LIT:U+002C>\".join(transfer_list))<EOL><DEDENT>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Compand (compress or expand) the dynamic range of the audio.\n\n        Parameters\n        ----------\n        attack_time : float, default=0.3\n            The time in seconds over which the instantaneous level of the input\n            signal is averaged to determine increases in volume.\n        decay_time : float, default=0.8\n            The time in seconds over which the instantaneous level of the input\n            signal is averaged to determine decreases in volume.\n        soft_knee_db : float or None, default=6.0\n            The ammount (in dB) for which the points at where adjacent line\n            segments on the transfer function meet will be rounded.\n            If None, no soft_knee is applied.\n        tf_points : list of tuples\n            Transfer function points as a list of tuples corresponding to\n            points in (dB, dB) defining the compander's transfer function.\n\n        See Also\n        --------\n        mcompand, contrast", "id": "f3809:c0:m15"}
{"signature": "def allpass(self, frequency, width_q=<NUM_LIT>):", "body": "if not is_number(frequency) or frequency <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(width_q) or width_q <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>effect_args = [<EOL>'<STR_LIT>', '<STR_LIT>'.format(frequency), '<STR_LIT>'.format(width_q)<EOL>]<EOL>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Apply a two-pole all-pass filter. An all-pass filter changes the\n        audio\u2019s frequency to phase relationship without changing its frequency\n        to amplitude relationship. The filter is described in detail in at\n        http://musicdsp.org/files/Audio-EQ-Cookbook.txt\n\n        Parameters\n        ----------\n        frequency : float\n            The filter's center frequency in Hz.\n        width_q : float, default=2.0\n            The filter's width as a Q-factor.\n\n        See Also\n        --------\n        equalizer, highpass, lowpass, sinc", "id": "f3809:c0:m7"}
{"signature": "def phaser(self, gain_in=<NUM_LIT>, gain_out=<NUM_LIT>, delay=<NUM_LIT:3>, decay=<NUM_LIT>, speed=<NUM_LIT:0.5>,<EOL>modulation_shape='<STR_LIT>'):", "body": "if not is_number(gain_in) or gain_in <= <NUM_LIT:0> or gain_in > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(gain_out) or gain_out <= <NUM_LIT:0> or gain_out > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(delay) or delay <= <NUM_LIT:0> or delay > <NUM_LIT:5>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(decay) or decay < <NUM_LIT:0.1> or decay > <NUM_LIT:0.5>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_number(speed) or speed < <NUM_LIT:0.1> or speed > <NUM_LIT:2>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if modulation_shape not in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>effect_args = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(gain_in),<EOL>'<STR_LIT>'.format(gain_out),<EOL>'<STR_LIT>'.format(delay),<EOL>'<STR_LIT>'.format(decay),<EOL>'<STR_LIT>'.format(speed)<EOL>]<EOL>if modulation_shape == '<STR_LIT>':<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>elif modulation_shape == '<STR_LIT>':<EOL><INDENT>effect_args.append('<STR_LIT>')<EOL><DEDENT>self.effects.extend(effect_args)<EOL>self.effects_log.append('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Apply a phasing effect to the audio.\n\n        Parameters\n        ----------\n        gain_in : float, default=0.8\n            Input volume between 0 and 1\n        gain_out: float, default=0.74\n            Output volume between 0 and 1\n        delay : float, default=3\n            Delay in miliseconds between 0 and 5\n        decay : float, default=0.4\n            Decay relative to gain_in, between 0.1 and 0.5.\n        speed : float, default=0.5\n            Modulation speed in Hz, between 0.1 and 2\n        modulation_shape : str, defaul='sinusoidal'\n            Modulation shpae. One of 'sinusoidal' or 'triangular'\n\n        See Also\n        --------\n        flanger, tremolo", "id": "f3809:c0:m41"}
{"signature": "def build(self, input_filepath, output_filepath, extra_args=None,<EOL>return_output=False):", "body": "file_info.validate_input_file(input_filepath)<EOL>if output_filepath is not None:<EOL><INDENT>file_info.validate_output_file(output_filepath)<EOL><DEDENT>else:<EOL><INDENT>output_filepath = '<STR_LIT>'<EOL><DEDENT>if input_filepath == output_filepath:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>args = []<EOL>args.extend(self.globals)<EOL>args.extend(self.input_format)<EOL>args.append(input_filepath)<EOL>args.extend(self.output_format)<EOL>args.append(output_filepath)<EOL>args.extend(self.effects)<EOL>if extra_args is not None:<EOL><INDENT>if not isinstance(extra_args, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>args.extend(extra_args)<EOL><DEDENT>status, out, err = sox(args)<EOL>if status != <NUM_LIT:0>:<EOL><INDENT>raise SoxError(<EOL>\"<STR_LIT>\".format(out, err)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>logger.info(<EOL>\"<STR_LIT>\",<EOL>output_filepath,<EOL>\"<STR_LIT:U+0020>\".join(self.effects_log)<EOL>)<EOL>if out is not None:<EOL><INDENT>logger.info(\"<STR_LIT>\".format(out))<EOL><DEDENT>if return_output:<EOL><INDENT>return status, out, err<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>", "docstring": "Builds the output_file by executing the current set of commands.\n\n        Parameters\n        ----------\n        input_filepath : str\n            Path to input audio file.\n        output_filepath : str or None\n            Path to desired output file. If a file already exists at the given\n            path, the file will be overwritten.\n            If None, no file will be created.\n        extra_args : list or None, default=None\n            If a list is given, these additional arguments are passed to SoX\n            at the end of the list of effects.\n            Don't use this argument unless you know exactly what you're doing!\n        return_output : bool, default=False\n            If True, returns the status and information sent to stderr and\n            stdout as a tuple (status, stdout, stderr).\n            Otherwise returns True on success.", "id": "f3809:c0:m5"}
{"signature": "def num_samples(input_filepath):", "body": "validate_input_file(input_filepath)<EOL>output = soxi(input_filepath, '<STR_LIT:s>')<EOL>if output == '<STR_LIT:0>':<EOL><INDENT>logger.warning(\"<STR_LIT>\", input_filepath)<EOL><DEDENT>return int(output)<EOL>", "docstring": "Show number of samples (0 if unavailable).\n\nParameters\n----------\ninput_filepath : str\n    Path to audio file.\n\nReturns\n-------\nn_samples : int\n    total number of samples in audio file.\n    Returns 0 if empty or unavailable", "id": "f3811:m6"}
{"signature": "def validate_input_file_list(input_filepath_list):", "body": "if not isinstance(input_filepath_list, list):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>elif len(input_filepath_list) < <NUM_LIT:2>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>for input_filepath in input_filepath_list:<EOL><INDENT>validate_input_file(input_filepath)<EOL><DEDENT>", "docstring": "Input file list validation function. Checks that object is a list and\n    contains valid filepaths that can be processed by SoX.\n\n    Parameters\n    ----------\n    input_filepath_list : list\n        A list of filepaths.", "id": "f3811:m10"}
{"signature": "def comments(input_filepath):", "body": "validate_input_file(input_filepath)<EOL>output = soxi(input_filepath, '<STR_LIT:a>')<EOL>return str(output)<EOL>", "docstring": "Show file comments (annotations) if available.\n\nParameters\n----------\ninput_filepath : str\n    Path to audio file.\n\nReturns\n-------\ncomments : str\n    File comments from header.\n    If no comments are present, returns an empty string.", "id": "f3811:m2"}
{"signature": "def stat(filepath):", "body": "stat_output = _stat_call(filepath)<EOL>stat_dictionary = _parse_stat(stat_output)<EOL>return stat_dictionary<EOL>", "docstring": "Returns a dictionary of audio statistics.\n\n    Parameters\n    ----------\n    filepath : str\n        File path.\n\n    Returns\n    -------\n    stat_dictionary : dict\n        Dictionary of audio statistics.", "id": "f3811:m14"}
{"signature": "def validate_output_file(output_filepath):", "body": "nowrite_conditions = [<EOL>bool(os.path.dirname(output_filepath)) ornot os.access(os.getcwd(), os.W_OK),<EOL>not os.access(os.path.dirname(output_filepath), os.W_OK)]<EOL>if all(nowrite_conditions):<EOL><INDENT>raise IOError(<EOL>\"<STR_LIT>\".format(output_filepath)<EOL>)<EOL><DEDENT>ext = file_extension(output_filepath)<EOL>if ext not in VALID_FORMATS:<EOL><INDENT>logger.info(\"<STR_LIT>\", \"<STR_LIT:U+0020>\".join(VALID_FORMATS))<EOL>logger.warning(<EOL>\"<STR_LIT>\".format(ext)<EOL>)<EOL><DEDENT>if os.path.exists(output_filepath):<EOL><INDENT>logger.warning(<EOL>'<STR_LIT>',<EOL>output_filepath<EOL>)<EOL><DEDENT>", "docstring": "Output file validation function. Checks that file can be written, and\n    has a valid file extension. Throws a warning if the path already exists,\n    as it will be overwritten on build.\n\n    Parameters\n    ----------\n    output_filepath : str\n        The output filepath.\n\n    Returns:\n    --------\n    output_filepath : str\n        The output filepath.", "id": "f3811:m11"}
{"signature": "def info(filepath):", "body": "info_dictionary = {<EOL>'<STR_LIT>': channels(filepath),<EOL>'<STR_LIT>': sample_rate(filepath),<EOL>'<STR_LIT>': bitrate(filepath),<EOL>'<STR_LIT>': duration(filepath),<EOL>'<STR_LIT>': num_samples(filepath),<EOL>'<STR_LIT>': encoding(filepath),<EOL>'<STR_LIT>': silent(filepath)<EOL>}<EOL>return info_dictionary<EOL>", "docstring": "Get a dictionary of file information\n\n    Parameters\n    ----------\n    filepath : str\n        File path.\n\n    Returns:\n    --------\n    info_dictionary : dict\n        Dictionary of file information. Fields are:\n            * channels\n            * sample_rate\n            * bitrate\n            * duration\n            * num_samples\n            * encoding\n            * silent", "id": "f3811:m13"}
{"signature": "def channels(input_filepath):", "body": "validate_input_file(input_filepath)<EOL>output = soxi(input_filepath, '<STR_LIT:c>')<EOL>return int(output)<EOL>", "docstring": "Show number of channels.\n\nParameters\n----------\ninput_filepath : str\n    Path to audio file.\n\nReturns\n-------\nchannels : int\n    number of channels", "id": "f3811:m1"}
{"signature": "def sample_rate(input_filepath):", "body": "validate_input_file(input_filepath)<EOL>output = soxi(input_filepath, '<STR_LIT:r>')<EOL>return float(output)<EOL>", "docstring": "Show sample-rate.\n\nParameters\n----------\ninput_filepath : str\n    Path to audio file.\n\nReturns\n-------\nsamplerate : float\n    number of samples/second", "id": "f3811:m7"}
{"signature": "def _stat_call(filepath):", "body": "validate_input_file(filepath)<EOL>args = ['<STR_LIT>', filepath, '<STR_LIT>', '<STR_LIT>']<EOL>_, _, stat_output = sox(args)<EOL>return stat_output<EOL>", "docstring": "Call sox's stat function.\n\n    Parameters\n    ----------\n    filepath : str\n        File path.\n\n    Returns\n    -------\n    stat_output : str\n        Sox output from stderr.", "id": "f3811:m15"}
{"signature": "def silent(input_filepath, threshold=<NUM_LIT>):", "body": "validate_input_file(input_filepath)<EOL>stat_dictionary = stat(input_filepath)<EOL>mean_norm = stat_dictionary['<STR_LIT>']<EOL>if mean_norm is not float('<STR_LIT>'):<EOL><INDENT>if mean_norm >= threshold:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Determine if an input file is silent.\n\nParameters\n----------\ninput_filepath : str\n    The input filepath.\nthreshold : float\n    Threshold for determining silence\n\nReturns\n-------\nis_silent : bool\n    True if file is determined silent.", "id": "f3811:m8"}
{"signature": "def duration(input_filepath):", "body": "validate_input_file(input_filepath)<EOL>output = soxi(input_filepath, '<STR_LIT:D>')<EOL>if output == '<STR_LIT:0>':<EOL><INDENT>logger.warning(\"<STR_LIT>\", input_filepath)<EOL><DEDENT>return float(output)<EOL>", "docstring": "Show duration in seconds (0 if unavailable).\n\nParameters\n----------\ninput_filepath : str\n    Path to audio file.\n\nReturns\n-------\nduration : float\n    Duration of audio file in seconds.\n    If unavailable or empty, returns 0.", "id": "f3811:m3"}
{"signature": "def _get_valid_formats():", "body": "if NO_SOX:<EOL><INDENT>return []<EOL><DEDENT>so = subprocess.check_output(['<STR_LIT>', '<STR_LIT>'])<EOL>if type(so) is not str:<EOL><INDENT>so = str(so, encoding='<STR_LIT>')<EOL><DEDENT>so = so.split('<STR_LIT:\\n>')<EOL>idx = [i for i in range(len(so)) if '<STR_LIT>' in so[i]][<NUM_LIT:0>]<EOL>formats = so[idx].split('<STR_LIT:U+0020>')[<NUM_LIT:3>:]<EOL>return formats<EOL>", "docstring": "Calls SoX help for a lists of audio formats available with the current\n    install of SoX.\n\n    Returns:\n    --------\n    formats : list\n        List of audio file extensions that SoX can process.", "id": "f3813:m1"}
{"signature": "def is_number(var):", "body": "try:<EOL><INDENT>float(var)<EOL>return True<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>except TypeError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Check if variable is a numeric value.\n\n    Parameters\n    ----------\n    var : object\n\n    Returns:\n    --------\n    bool\n        True if var is numeric, False otherwise.", "id": "f3813:m4"}
{"signature": "def get(self, request):", "body": "pass<EOL>", "docstring": "responses:\n  200:\n    description: A list of organisations.\n    examples:\n      [{\"name\": \"Foo Corp.\"}, {\"name\": \"Acme Ltd.\"}]", "id": "f3834:c0:m0"}
{"signature": "@app.route(\"<STR_LIT>\")<EOL>def regular_docstring_and_schema(request):", "body": "pass<EOL>", "docstring": "This a regular docstring example (not included in schema)\n\n---\n\nresponses:\n  200:\n    description: This is included in the schema.", "id": "f3834:m3"}
{"signature": "@app.route(\"<STR_LIT>\")<EOL>def regular_docstring(request):", "body": "pass<EOL>", "docstring": "This a regular docstring example (not included in schema)", "id": "f3834:m4"}
{"signature": "def get_long_description():", "body": "with open(\"<STR_LIT>\", encoding=\"<STR_LIT:utf8>\") as f:<EOL><INDENT>return f.read()<EOL><DEDENT>", "docstring": "Return the README.", "id": "f3842:m1"}
{"signature": "def get_version(package):", "body": "with open(os.path.join(package, \"<STR_LIT>\")) as f:<EOL><INDENT>return re.search(\"<STR_LIT>\", f.read()).group(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Return package version as listed in `__version__` in `init.py`.", "id": "f3842:m0"}
{"signature": "async def on_connect(self, websocket: WebSocket) -> None:", "body": "await websocket.accept()<EOL>", "docstring": "Override to handle an incoming websocket connection", "id": "f3845:c1:m4"}
{"signature": "def append(self, key: str, value: str) -> None:", "body": "append_key = key.lower().encode(\"<STR_LIT>\")<EOL>append_value = value.encode(\"<STR_LIT>\")<EOL>self._list.append((append_key, append_value))<EOL>", "docstring": "Append a header, preserving any duplicate entries.", "id": "f3859:c10:m5"}
{"signature": "def setdefault(self, key: str, value: str) -> str:", "body": "set_key = key.lower().encode(\"<STR_LIT>\")<EOL>set_value = value.encode(\"<STR_LIT>\")<EOL>for idx, (item_key, item_value) in enumerate(self._list):<EOL><INDENT>if item_key == set_key:<EOL><INDENT>return item_value.decode(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>self._list.append((set_key, set_value))<EOL>return value<EOL>", "docstring": "If the header `key` does not exist, then set it to `value`.\nReturns the header value.", "id": "f3859:c10:m3"}
{"signature": "def parse_docstring(self, func_or_method: typing.Callable) -> dict:", "body": "docstring = func_or_method.__doc__<EOL>if not docstring:<EOL><INDENT>return {}<EOL><DEDENT>docstring = docstring.split(\"<STR_LIT>\")[-<NUM_LIT:1>]<EOL>parsed = yaml.safe_load(docstring)<EOL>if not isinstance(parsed, dict):<EOL><INDENT>return {}<EOL><DEDENT>return parsed<EOL>", "docstring": "Given a function, parse the docstring as YAML and return a dictionary of info.", "id": "f3862:c2:m2"}
{"signature": "async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:", "body": "assert scope[\"<STR_LIT:type>\"] == \"<STR_LIT:http>\"<EOL>if not self.config_checked:<EOL><INDENT>await self.check_config()<EOL>self.config_checked = True<EOL><DEDENT>path = self.get_path(scope)<EOL>response = await self.get_response(path, scope)<EOL>await response(scope, receive, send)<EOL>", "docstring": "The ASGI entry point.", "id": "f3863:c1:m2"}
{"signature": "def get_path(self, scope: Scope) -> str:", "body": "return os.path.normpath(os.path.join(*scope[\"<STR_LIT:path>\"].split(\"<STR_LIT:/>\")))<EOL>", "docstring": "Given the ASGI scope, return the `path` string to serve up,\nwith OS specific path seperators, and any '..', '.' components removed.", "id": "f3863:c1:m3"}
{"signature": "def is_not_modified(<EOL>self, response_headers: Headers, request_headers: Headers<EOL>) -> bool:", "body": "try:<EOL><INDENT>if_none_match = request_headers[\"<STR_LIT>\"]<EOL>etag = response_headers[\"<STR_LIT>\"]<EOL>if if_none_match == etag:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>if_modified_since = parsedate(request_headers[\"<STR_LIT>\"])<EOL>last_modified = parsedate(response_headers[\"<STR_LIT>\"])<EOL>if (<EOL>if_modified_since is not None<EOL>and last_modified is not None<EOL>and if_modified_since >= last_modified<EOL>):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>return False<EOL>", "docstring": "Given the request and response headers, return `True` if an HTTP\n\"Not Modified\" response could be returned instead.", "id": "f3863:c1:m8"}
{"signature": "def _run(self) -> None:", "body": "scope = self.scope<EOL>receive = self._asgi_receive<EOL>send = self._asgi_send<EOL>try:<EOL><INDENT>self._loop.run_until_complete(self.app(scope, receive, send))<EOL><DEDENT>except BaseException as exc:<EOL><INDENT>self._send_queue.put(exc)<EOL><DEDENT>", "docstring": "The sub-thread in which the websocket session runs.", "id": "f3871:c5:m3"}
{"signature": "async def send(self, message: Message) -> None:", "body": "if self.application_state == WebSocketState.CONNECTING:<EOL><INDENT>message_type = message[\"<STR_LIT:type>\"]<EOL>assert message_type in {\"<STR_LIT>\", \"<STR_LIT>\"}<EOL>if message_type == \"<STR_LIT>\":<EOL><INDENT>self.application_state = WebSocketState.DISCONNECTED<EOL><DEDENT>else:<EOL><INDENT>self.application_state = WebSocketState.CONNECTED<EOL><DEDENT>await self._send(message)<EOL><DEDENT>elif self.application_state == WebSocketState.CONNECTED:<EOL><INDENT>message_type = message[\"<STR_LIT:type>\"]<EOL>assert message_type in {\"<STR_LIT>\", \"<STR_LIT>\"}<EOL>if message_type == \"<STR_LIT>\":<EOL><INDENT>self.application_state = WebSocketState.DISCONNECTED<EOL><DEDENT>await self._send(message)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Send ASGI websocket messages, ensuring valid state transitions.", "id": "f3872:c2:m2"}
{"signature": "@property<EOL><INDENT>def width(self):<DEDENT>", "body": "return self.size[<NUM_LIT:0>]<EOL>", "docstring": "Get the current terminal width.", "id": "f3874:c0:m5"}
{"signature": "def set_text(self, point, text):", "body": "if not self.option.legend:<EOL><INDENT>return<EOL><DEDENT>if not isinstance(point, Point):<EOL><INDENT>point = Point(point)<EOL><DEDENT>for offset, char in enumerate(str(text)):<EOL><INDENT>self.screen.canvas[point.y][point.x + offset] = char<EOL><DEDENT>", "docstring": "Set a text value in the screen canvas.", "id": "f3874:c3:m13"}
{"signature": "@property<EOL><INDENT>def colors(self):<DEDENT>", "body": "number = curses.tigetnum('<STR_LIT>') or <NUM_LIT:0><EOL>return <NUM_LIT:16> if number == <NUM_LIT:8> else number<EOL>", "docstring": "Get the number of colors supported by this terminal.", "id": "f3874:c0:m1"}
{"signature": "def __init__(self, dg_option=None, ostream=None, data=None):", "body": "<EOL>self.dg_option = dg_option<EOL>if self.dg_option == None:<EOL><INDENT>self.dg_option = DOption()<EOL><DEDENT>self.ostream = ostream<EOL>if self.ostream == None:<EOL><INDENT>try:<EOL><INDENT>self.ostream = sys.stdout.buffer<EOL><DEDENT>except AttributeError:<EOL><INDENT>self.ostream = sys.stdout<EOL><DEDENT><DEDENT>if self.dg_option.mode == '<STR_LIT:h>':<EOL><INDENT>self.dg = HorizontalBarGraph(self.dg_option.size,<EOL>self.dg_option)<EOL><DEDENT>elif self.dg_option.mode == '<STR_LIT:v>':<EOL><INDENT>self.dg = VerticalBarGraph(self.dg_option.size,<EOL>self.dg_option)<EOL><DEDENT>else:<EOL><INDENT>self.dg = AxisGraph(self.dg_option.size,<EOL>self.dg_option)<EOL><DEDENT>self.dg.update(data[<NUM_LIT:0>], data[<NUM_LIT:1>])<EOL>", "docstring": "Handle some of the setup functions for the graph in the\n        diagram package. Specifically hide all of the requirements that\n        are computed in run() inside diagram.py.", "id": "f3874:c10:m0"}
{"signature": "def human(self, size, base=<NUM_LIT:1000>, units='<STR_LIT>'):", "body": "sign = '<STR_LIT:+>' if size >= <NUM_LIT:0> else '<STR_LIT:->'<EOL>size = abs(size)<EOL>if size < <NUM_LIT:1000>:<EOL><INDENT>return '<STR_LIT>' % (sign, size)<EOL><DEDENT>for i, suffix in enumerate(units):<EOL><INDENT>unit = <NUM_LIT:1000> ** (i + <NUM_LIT:1>)<EOL>if size < unit:<EOL><INDENT>return ('<STR_LIT>' % (<EOL>sign,<EOL>size / float(unit) * base,<EOL>suffix,<EOL>)).strip()<EOL><DEDENT><DEDENT>raise OverflowError<EOL>", "docstring": "Convert the input ``size`` to human readable, short form.", "id": "f3874:c3:m6"}
{"signature": "def set(self, point):", "body": "if not isinstance(point, Point):<EOL><INDENT>point = Point(point)<EOL><DEDENT>rx = self.round(point.x)<EOL>ry = self.round(point.y)<EOL>item = Point((rx >> <NUM_LIT:1>, min(ry >> <NUM_LIT:2>, self.size.y)))<EOL>self.screen[item] |= self.pixels[ry & <NUM_LIT:3>][rx & <NUM_LIT:1>]<EOL>", "docstring": "Set pixel at (x, y) point.", "id": "f3874:c5:m7"}
{"signature": "def render(self, stream):", "body": "raise NotImplementedError()<EOL>", "docstring": "Render the graph to the selected output stream.", "id": "f3874:c3:m11"}
{"signature": "def usage_function(parser):", "body": "parser.print_usage()<EOL>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>for function in sorted(FUNCTION):<EOL><INDENT>doc = FUNCTION[function].__doc__.strip().splitlines()[<NUM_LIT:0>]<EOL>print('<STR_LIT>' % (function + '<STR_LIT::>', doc))<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Show usage and available curve functions.", "id": "f3874:m2"}
{"signature": "def __getitem__(self, point):", "body": "if not isinstance(point, Point):<EOL><INDENT>point = Point(point)<EOL><DEDENT>return self.canvas[point.y][point.x]<EOL>", "docstring": "Get a point value or None.", "id": "f3874:c2:m6"}
{"signature": "@property<EOL><INDENT>def height(self):<DEDENT>", "body": "return self.size.y<EOL>", "docstring": "Get the buffer height.", "id": "f3874:c2:m2"}
{"signature": "@property<EOL><INDENT>def encoding(self):<DEDENT>", "body": "_, encoding = locale.getdefaultlocale()<EOL>return encoding<EOL>", "docstring": "Get the current terminal encoding.", "id": "f3874:c0:m2"}
{"signature": "@property<EOL><INDENT>def null(self):<DEDENT>", "body": "if not self.option.axis:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>return self.screen.height - (<EOL>-self.minimum * <NUM_LIT> / self.extents * self.size.y<EOL>)<EOL><DEDENT>", "docstring": "Zero crossing value.", "id": "f3874:c5:m6"}
{"signature": "def usage_palette(parser):", "body": "parser.print_usage()<EOL>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>for palette in sorted(PALETTE):<EOL><INDENT>print('<STR_LIT>' % (palette,))<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Show usage and available palettes.", "id": "f3874:m3"}
{"signature": "def _normalised_python(self):", "body": "dx = (self.screen.width / float(len(self.points)))<EOL>oy = (self.screen.height)<EOL>for x, point in enumerate(self.points):<EOL><INDENT>y = (point - self.minimum) * <NUM_LIT> / self.extents * self.size.y<EOL>yield Point((<EOL>dx * x,<EOL>min(oy, oy - y),<EOL>))<EOL><DEDENT>", "docstring": "Normalised data points using pure Python.", "id": "f3874:c5:m4"}
{"signature": "@property<EOL><INDENT>def maximum_points(self):<DEDENT>", "body": "return self.size.x<EOL>", "docstring": "Maximum width.", "id": "f3874:c5:m5"}
{"signature": "def consume_line(self, line):", "body": "data = RE_VALUE_KEY.split(line.strip(), <NUM_LIT:1>)<EOL>if len(data) == <NUM_LIT:1>:<EOL><INDENT>return float(data[<NUM_LIT:0>]), None<EOL><DEDENT>else:<EOL><INDENT>return float(data[<NUM_LIT:0>]), data[<NUM_LIT:1>].strip()<EOL><DEDENT>", "docstring": "Consume data from a line.", "id": "f3874:c3:m2"}
{"signature": "def color(self, index):", "body": "if self.colors == <NUM_LIT:16>:<EOL><INDENT>if index >= <NUM_LIT:8>:<EOL><INDENT>return self.csi('<STR_LIT>') + self.csi('<STR_LIT>', index - <NUM_LIT:8>)<EOL><DEDENT>else:<EOL><INDENT>return self.csi('<STR_LIT>') + self.csi('<STR_LIT>', index)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return self.csi('<STR_LIT>', index)<EOL><DEDENT>", "docstring": "Get the escape sequence for indexed color ``index``.\n\n        The ``index`` is a color index in the 256 color space. The color space\n        consists of:\n\n        * 0x00-0x0f: default EGA colors\n        * 0x10-0xe7: 6x6x6 RGB cubes\n        * 0xe8-0xff: gray scale ramp", "id": "f3874:c0:m7"}
{"signature": "def __init__(self):", "body": "curses.setupterm()<EOL>", "docstring": "Initialize curses.", "id": "f3874:c0:m0"}
{"signature": "def __contains__(self, point):", "body": "if not isinstance(point, Point):<EOL><INDENT>point = Point(point)<EOL><DEDENT>if point.y not in self.canvas:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return point.x in self.canvas[point.y]<EOL><DEDENT>", "docstring": "Check if a point has a value.", "id": "f3874:c2:m3"}
{"signature": "def _function_argument(self, value):", "body": "if value in FUNCTION_CONSTANT:<EOL><INDENT>return FUNCTION_CONSTANT[value]<EOL><DEDENT>else:<EOL><INDENT>return float(value)<EOL><DEDENT>", "docstring": "Resolve function, convert to float if not found.", "id": "f3874:c3:m8"}
{"signature": "def filter_savitzky_golay(y, window_size=<NUM_LIT:5>, order=<NUM_LIT:2>, deriv=<NUM_LIT:0>, rate=<NUM_LIT:1>):", "body": "try:<EOL><INDENT>window_size = np.abs(np.int(window_size))<EOL>order = np.abs(np.int(order))<EOL><DEDENT>except ValueError:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if window_size % <NUM_LIT:2> != <NUM_LIT:1> or window_size < <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if window_size < order + <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>order_range = range(order + <NUM_LIT:1>)<EOL>half_window = (window_size - <NUM_LIT:1>) // <NUM_LIT:2><EOL>minimum = np.min(y)<EOL>maximum = np.max(y)<EOL>b = np.mat([<EOL>[k ** i for i in order_range]<EOL>for k in range(-half_window, half_window + <NUM_LIT:1>)<EOL>])<EOL>m = np.linalg.pinv(b).A[deriv] * rate ** deriv * math.factorial(deriv)<EOL>firstvals = y[<NUM_LIT:0>] - np.abs(y[<NUM_LIT:1>:half_window+<NUM_LIT:1>][::-<NUM_LIT:1>] - y[<NUM_LIT:0>])<EOL>lastvals = y[-<NUM_LIT:1>] + np.abs(y[-half_window-<NUM_LIT:1>:-<NUM_LIT:1>][::-<NUM_LIT:1>] - y[-<NUM_LIT:1>])<EOL>y = np.concatenate((firstvals, y, lastvals))<EOL>return np.clip(<EOL>np.convolve(m[::-<NUM_LIT:1>], y, mode='<STR_LIT>'),<EOL>minimum,<EOL>maximum,<EOL>)<EOL>", "docstring": "Smooth (and optionally differentiate) with a Savitzky-Golay filter.", "id": "f3874:m1"}
{"signature": "@property<EOL><INDENT>def normalised(self):<DEDENT>", "body": "if np is None:<EOL><INDENT>return self._normalised_python()<EOL><DEDENT>else:<EOL><INDENT>return self._normalised_numpy()<EOL><DEDENT>", "docstring": "Normalised data points.", "id": "f3874:c5:m2"}
{"signature": "@property<EOL><INDENT>def scale(self):<DEDENT>", "body": "return <NUM_LIT:1><EOL>", "docstring": "Graph scale.", "id": "f3874:c3:m3"}
{"signature": "@property<EOL><INDENT>def size(self):<DEDENT>", "body": "for fd in range(<NUM_LIT:3>):<EOL><INDENT>cr = self._ioctl_GWINSZ(fd)<EOL>if cr:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if not cr:<EOL><INDENT>try:<EOL><INDENT>fd = os.open(os.ctermid(), os.O_RDONLY)<EOL>cr = self._ioctl_GWINSZ(fd)<EOL>os.close(fd)<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if not cr:<EOL><INDENT>env = os.environ<EOL>cr = (env.get('<STR_LIT>', <NUM_LIT>), env.get('<STR_LIT>', <NUM_LIT>))<EOL><DEDENT>return int(cr[<NUM_LIT:1>]), int(cr[<NUM_LIT:0>])<EOL>", "docstring": "Get the current terminal size.", "id": "f3874:c0:m4"}
{"signature": "def fetch_deputies(data_dir):", "body": "deputies = DeputiesDataset()<EOL>df = deputies.fetch()<EOL>save_to_csv(df, data_dir, \"<STR_LIT>\")<EOL>holders = df.condition == '<STR_LIT>'<EOL>substitutes = df.condition == '<STR_LIT>'<EOL>log.info(\"<STR_LIT>\", len(df))<EOL>log.info(\"<STR_LIT>\", len(df[holders]))<EOL>log.info(\"<STR_LIT>\", len(df[substitutes]))<EOL>return df<EOL>", "docstring": ":param data_dir: (str) directory in which the output file will be saved", "id": "f3892:m0"}
{"signature": "def fetch(self):", "body": "xml = urllib.request.urlopen(self.URL)<EOL>tree = ET.ElementTree(file=xml)<EOL>records = self._parse_deputies(tree.getroot())<EOL>df = pd.DataFrame(records, columns=(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT:state>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT:email>'<EOL>))<EOL>return self._translate(df)<EOL>", "docstring": "Fetches the list of deputies for the current term.", "id": "f3892:c0:m0"}
{"signature": "def fetch_session_start_times(data_dir, pivot, session_dates):", "body": "session_start_times = SessionStartTimesDataset()<EOL>df = session_start_times.fetch(pivot, session_dates)<EOL>save_to_csv(df, data_dir, \"<STR_LIT>\")<EOL>log.info(\"<STR_LIT>\", len(session_dates))<EOL>found = pd.to_datetime(df['<STR_LIT:date>'], format=\"<STR_LIT>\").dt.date.unique()<EOL>log.info(\"<STR_LIT>\", len(found))<EOL>return df<EOL>", "docstring": ":param data_dir: (str) directory in which the output file will be saved\n:param pivot: (int) congressperson document to use as a pivot for scraping the data\n:param session_dates: (list) datetime objects to fetch the start times for", "id": "f3894:m0"}
{"signature": "def fetch(self, pivot, session_dates):", "body": "records = self._all_start_times(pivot, session_dates)<EOL>return pd.DataFrame(records, columns=(<EOL>'<STR_LIT:date>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>))<EOL>", "docstring": ":param pivot: (int) a congressperson document to use as a pivot for scraping the data\n:param session_dates: (list) datetime objects to fetch the start times for", "id": "f3894:c0:m0"}
{"signature": "def fetch(self, start_date, end_date):", "body": "records = []<EOL>for two_months_range in self._generate_ranges(start_date, end_date):<EOL><INDENT>log.debug(two_months_range)<EOL>for record in self._fetch_missions_for_range(two_months_range[<NUM_LIT:0>], two_months_range[<NUM_LIT:1>]):<EOL><INDENT>records.append(record)<EOL><DEDENT><DEDENT>df = pd.DataFrame(records, columns=[<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT:start>',<EOL>'<STR_LIT:end>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>])<EOL>translate_column(df, '<STR_LIT>', {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>})<EOL>translate_column(df, '<STR_LIT>', {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>})<EOL>return df.drop_duplicates()<EOL>", "docstring": "Fetches official missions within the given date range", "id": "f3895:c0:m0"}
{"signature": "def fetch(self, deputies, start_date, end_date):", "body": "log.debug(\"<STR_LIT>\".format(len(deputies), start_date, end_date))<EOL>records = self._all_presences(deputies, start_date, end_date)<EOL>df = pd.DataFrame(records, columns=(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT:state>',<EOL>'<STR_LIT:date>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>))<EOL>return self._translate(df)<EOL>", "docstring": ":param deputies: (pandas.DataFrame) a dataframe with deputies data\n:param date_start: (str) date in the format dd/mm/yyyy\n:param date_end: (str) date in the format dd/mm/yyyy", "id": "f3897:c0:m1"}
{"signature": "def fetch_presences(data_dir, deputies, date_start, date_end):", "body": "presences = PresencesDataset()<EOL>df = presences.fetch(deputies, date_start, date_end)<EOL>save_to_csv(df, data_dir, \"<STR_LIT>\")<EOL>log.info(\"<STR_LIT>\", len(df))<EOL>log.info(\"<STR_LIT>\", len(df[df.presence == '<STR_LIT>']))<EOL>log.info(\"<STR_LIT>\", len(df[df.presence == '<STR_LIT>']))<EOL>return df<EOL>", "docstring": ":param data_dir: (str) directory in which the output file will be saved\n:param deputies: (pandas.DataFrame) a dataframe with deputies data\n:param date_start: (str) a date in the format dd/mm/yyyy\n:param date_end: (str) a date in the format dd/mm/yyyy", "id": "f3897:m0"}
{"signature": "def translate_column(df, column, translations):", "body": "df[column] = df[column].astype('<STR_LIT>')<EOL>translations = [translations[cat]<EOL>for cat in df[column].cat.categories]<EOL>df[column].cat.rename_categories(translations, inplace=True)<EOL>", "docstring": ":param df: (pandas.Dataframe) the dataframe to be translated\n:param column: (str) the column to be translated\n:param translations: (dict) a dictionary of the strings to be categorized and translated", "id": "f3902:m3"}
{"signature": "def render(self, obj):", "body": "self.obj = obj<EOL>attrs = '<STR_LIT:U+0020>'.join([<EOL>'<STR_LIT>' % (attr_name, attr.resolve(obj))<EOL>if isinstance(attr, Accessor)<EOL>else '<STR_LIT>' % (attr_name, attr)<EOL>for attr_name, attr in self.attrs.items()<EOL>])<EOL>return mark_safe(u'<STR_LIT>' % (attrs, self.text))<EOL>", "docstring": "Render link as HTML output tag <a>.", "id": "f3914:c1:m4"}
{"signature": "def get_days_span(self, month_index):", "body": "is_first_month = month_index == <NUM_LIT:0><EOL>is_last_month = month_index == self.__len__() - <NUM_LIT:1><EOL>y = int(self.start_date.year + (self.start_date.month + month_index) / <NUM_LIT>)<EOL>m = int((self.start_date.month + month_index) % <NUM_LIT:12> or <NUM_LIT:12>)<EOL>total = calendar.monthrange(y, m)[<NUM_LIT:1>]<EOL>if is_first_month and is_last_month:<EOL><INDENT>return (self.end_date - self.start_date).days + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>if is_first_month:<EOL><INDENT>return total - self.start_date.day + <NUM_LIT:1><EOL><DEDENT>elif is_last_month:<EOL><INDENT>return self.end_date.day<EOL><DEDENT>else:<EOL><INDENT>return total<EOL><DEDENT><DEDENT>", "docstring": "Calculate how many days the month spans.", "id": "f3915:c5:m4"}
{"signature": "def get_context_data(self, **kwargs):", "body": "sEcho = self.query_data[\"<STR_LIT>\"]<EOL>context = super(BaseListView, self).get_context_data(**kwargs)<EOL>queryset = context[\"<STR_LIT>\"]<EOL>if queryset is not None:<EOL><INDENT>total_length = self.get_queryset_length(queryset)<EOL>queryset = self.filter_queryset(queryset)<EOL>display_length = self.get_queryset_length(queryset)<EOL>queryset = self.sort_queryset(queryset)<EOL>queryset = self.paging_queryset(queryset)<EOL>values_list = self.convert_queryset_to_values_list(queryset)<EOL>context = {<EOL>\"<STR_LIT>\": sEcho,<EOL>\"<STR_LIT>\": total_length,<EOL>\"<STR_LIT>\": display_length,<EOL>\"<STR_LIT>\": values_list,<EOL>}<EOL><DEDENT>else:<EOL><INDENT>context = {<EOL>\"<STR_LIT>\": sEcho,<EOL>\"<STR_LIT>\": <NUM_LIT:0>,<EOL>\"<STR_LIT>\": <NUM_LIT:0>,<EOL>\"<STR_LIT>\": [],<EOL>}<EOL><DEDENT>return context<EOL>", "docstring": "Get context data for datatable server-side response.\nSee http://www.datatables.net/usage/server-side", "id": "f3923:c1:m7"}
{"signature": "def render_to_json_response(self, context, **response_kwargs):", "body": "return HttpResponse(<EOL>self.convert_context_to_json(context),<EOL>content_type='<STR_LIT:application/json>',<EOL>**response_kwargs<EOL>)<EOL>", "docstring": "Returns a JSON response, transforming 'context' to make the payload.", "id": "f3923:c0:m0"}
{"signature": "def get_meta_image(self):", "body": "return None<EOL>", "docstring": "Get the image to use for this object.\nCan be None if there is no relevant image.", "id": "f3960:c0:m3"}
{"signature": "def train_encoder(X, y, fold_count, encoder):", "body": "kf = StratifiedKFold(n_splits=fold_count, shuffle=True, random_state=<NUM_LIT>)<EOL>encoder = deepcopy(encoder)  <EOL>imputer = SimpleImputer(strategy='<STR_LIT>')<EOL>scaler = StandardScaler()<EOL>folds = []<EOL>fit_encoder_time = <NUM_LIT:0><EOL>score_encoder_time = <NUM_LIT:0><EOL>for train_index, test_index in kf.split(X, y):<EOL><INDENT>X_train, X_test = X.iloc[train_index, :].reset_index(drop=True), X.iloc[test_index, :].reset_index(drop=True)<EOL>y_train, y_test = y[train_index].reset_index(drop=True), y[test_index].reset_index(drop=True)<EOL>start_time = time.time()<EOL>X_train = encoder.fit_transform(X_train, y_train)<EOL>fit_encoder_time += time.time() - start_time<EOL>X_train = imputer.fit_transform(X_train)<EOL>X_train = scaler.fit_transform(X_train)<EOL>start_time = time.time()<EOL>X_test = encoder.transform(X_test)<EOL>score_encoder_time += time.time() - start_time<EOL>X_test = imputer.transform(X_test)<EOL>X_test = scaler.transform(X_test)<EOL>folds.append([X_train, y_train, X_test, y_test])<EOL><DEDENT>return folds, fit_encoder_time/fold_count, score_encoder_time/fold_count<EOL>", "docstring": "Defines folds and performs the data preprocessing (categorical encoding, NaN imputation, normalization)\nReturns a list with {X_train, y_train, X_test, y_test}, average fit_encoder_time and average score_encoder_time\n\nNote: We normalize all features (not only numerical features) because otherwise SVM would\n    get stuck for hours on ordinal encoded cylinder.bands.arff dataset due to presence of\n    unproportionally high values.\n\nNote: The fold count is variable because there are datasets, which have less than 10 samples in the minority class.\n\nNote: We do not use pipelines because of:\n    https://github.com/scikit-learn/scikit-learn/issues/11832", "id": "f3962:m0"}
{"signature": "def train_model(folds, model):", "body": "scores = []<EOL>fit_model_time = <NUM_LIT:0>      <EOL>score_model_time = <NUM_LIT:0>    <EOL>for X_train, y_train, X_test, y_test in folds:<EOL><INDENT>start_time = time.time()<EOL>with ignore_warnings(category=ConvergenceWarning):  <EOL><INDENT>model.fit(X_train, y_train)<EOL><DEDENT>fit_model_time += time.time() - start_time<EOL>prediction_train_proba = model.predict_proba(X_train)[:, <NUM_LIT:1>]<EOL>prediction_train = (prediction_train_proba >= <NUM_LIT:0.5>).astype('<STR_LIT>')<EOL>start_time = time.time()<EOL>prediction_test_proba = model.predict_proba(X_test)[:, <NUM_LIT:1>]<EOL>score_model_time += time.time() - start_time<EOL>prediction_test = (prediction_test_proba >= <NUM_LIT:0.5>).astype('<STR_LIT>')<EOL>with warnings.catch_warnings():<EOL><INDENT>warnings.simplefilter(\"<STR_LIT:ignore>\")<EOL>scores.append([<EOL>sklearn.metrics.matthews_corrcoef(y_test, prediction_test),<EOL>sklearn.metrics.matthews_corrcoef(y_train, prediction_train),<EOL>sklearn.metrics.roc_auc_score(y_test, prediction_test_proba),<EOL>sklearn.metrics.roc_auc_score(y_train, prediction_train_proba),<EOL>sklearn.metrics.brier_score_loss(y_test, prediction_test_proba),<EOL>sklearn.metrics.brier_score_loss(y_train, prediction_train_proba)<EOL>])<EOL><DEDENT><DEDENT>return np.mean(scores, axis=<NUM_LIT:0>), fit_model_time/len(folds), score_model_time/len(folds)<EOL>", "docstring": "Evaluation with:\n  Matthews correlation coefficient: represents thresholding measures\n  AUC: represents ranking measures\n  Brier score: represents calibration measures", "id": "f3962:m1"}
{"signature": "def main(loader, name):", "body": "scores = []<EOL>raw_scores_ds = {}<EOL>X, y, mapping = loader()<EOL>clf = linear_model.LogisticRegression(solver='<STR_LIT>', multi_class='<STR_LIT>', max_iter=<NUM_LIT:200>, random_state=<NUM_LIT:0>)<EOL>encoders = (set(category_encoders.__all__) - {'<STR_LIT>'})  <EOL>for encoder_name in encoders:<EOL><INDENT>encoder = getattr(category_encoders, encoder_name)<EOL>start_time = time.time()<EOL>score, stds, raw_scores, dim = score_models(clf, X, y, encoder)<EOL>scores.append([encoder_name, name, dim, score, stds, time.time() - start_time])<EOL>raw_scores_ds[encoder_name] = raw_scores<EOL>gc.collect()<EOL><DEDENT>results = pd.DataFrame(scores, columns=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>raw = pd.DataFrame.from_dict(raw_scores_ds)<EOL>ax = raw.plot(kind='<STR_LIT>', return_type='<STR_LIT>')<EOL>plt.title('<STR_LIT>' % (name,))<EOL>plt.ylabel('<STR_LIT>')<EOL>for tick in ax.get_xticklabels():<EOL><INDENT>tick.set_rotation(<NUM_LIT>)<EOL><DEDENT>plt.grid()<EOL>plt.tight_layout()<EOL>plt.show()<EOL>return results, raw<EOL>", "docstring": "Here we iterate through the datasets and score them with a classifier using different encodings.", "id": "f3967:m1"}
{"signature": "def get_splice_data():", "body": "df = pd.read_csv('<STR_LIT>')<EOL>X = df.reindex(columns=[x for x in df.columns.values if x != '<STR_LIT:class>'])<EOL>X['<STR_LIT>'] = X['<STR_LIT>'].map(lambda x: list(str(x).strip()))<EOL>for idx in range(<NUM_LIT>):<EOL><INDENT>X['<STR_LIT>' % (idx, )] = X['<STR_LIT>'].map(lambda x: x[idx])<EOL><DEDENT>del X['<STR_LIT>']<EOL>y = df.reindex(columns=['<STR_LIT:class>'])<EOL>y = preprocessing.LabelEncoder().fit_transform(y.values.reshape(-<NUM_LIT:1>, ))<EOL>mapping = None<EOL>return X, y, mapping<EOL>", "docstring": "Load the mushroom dataset, split it into X and y, and then call the label encoder to get an integer y column.\n\n:return:", "id": "f3969:m2"}
{"signature": "def create_dataset(n_rows=<NUM_LIT:1000>, extras=False, has_none=True):", "body": "random.seed(<NUM_LIT>)<EOL>ds = [[<EOL>random.random(),                                                                        <EOL>random.choice([float('<STR_LIT>'), float('<STR_LIT>'), float('<STR_LIT>'), -<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, -<NUM_LIT:1>, math.pi]),      <EOL>row,                                                                                    <EOL>str(row),                                                                               <EOL>random.choice(['<STR_LIT:A>', '<STR_LIT:B>']) if extras else '<STR_LIT:A>',                                           <EOL>random.choice(['<STR_LIT:A>', '<STR_LIT>', '<STR_LIT>']),                                                   <EOL>random.choice(['<STR_LIT:A>', '<STR_LIT:B>', '<STR_LIT:C>', None]) if has_none else random.choice(['<STR_LIT:A>', '<STR_LIT:B>', '<STR_LIT:C>']),   <EOL>random.choice(['<STR_LIT:A>', '<STR_LIT:B>', '<STR_LIT:C>', '<STR_LIT:D>']) if extras else random.choice(['<STR_LIT:A>', '<STR_LIT:B>', '<STR_LIT:C>']),      <EOL>random.choice([<NUM_LIT:12>, <NUM_LIT>, -<NUM_LIT:32>]),                                                           <EOL>random.choice(['<STR_LIT:A>', '<STR_LIT:B>', '<STR_LIT:C>']),                                                         <EOL>random.choice(['<STR_LIT:A>', '<STR_LIT:B>', '<STR_LIT:C>', np.nan])                                                  <EOL>] for row in range(n_rows)]<EOL>df = pd.DataFrame(ds, columns=['<STR_LIT:float>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:none>', '<STR_LIT>', <NUM_LIT>, '<STR_LIT>', '<STR_LIT>'])<EOL>df['<STR_LIT>'] = pd.Categorical(df['<STR_LIT>'], categories=['<STR_LIT:A>', '<STR_LIT:B>', '<STR_LIT:C>'])<EOL>df['<STR_LIT>'] = pd.Categorical(df['<STR_LIT>'], categories=['<STR_LIT:A>', '<STR_LIT:B>', '<STR_LIT:C>'])<EOL>return df<EOL>", "docstring": "Creates a dataset with some categorical variables.", "id": "f3981:m2"}
{"signature": "def verify_inverse_transform(x, x_inv):", "body": "assert x.equals(x_inv)<EOL>", "docstring": "Verify x is equal to x_inv. The test returns true for NaN.equals(NaN) as it should.", "id": "f3981:m3"}
{"signature": "def fit(self, X, y=None, **kwargs):", "body": "<EOL>X = util.convert_input(X)<EOL>self._dim = X.shape[<NUM_LIT:1>]<EOL>if self.cols is None:<EOL><INDENT>self.cols = util.get_obj_cols(X)<EOL><DEDENT>else:<EOL><INDENT>self.cols = util.convert_cols_to_list(self.cols)<EOL><DEDENT>if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>_, categories = self.ordinal_encoding(<EOL>X,<EOL>mapping=self.mapping,<EOL>cols=self.cols,<EOL>handle_unknown=self.handle_unknown,<EOL>handle_missing=self.handle_missing<EOL>)<EOL>self.mapping = categories<EOL>X_temp = self.transform(X, override_return_df=True)<EOL>self.feature_names = X_temp.columns.tolist()<EOL>if self.drop_invariant:<EOL><INDENT>self.drop_cols = []<EOL>generated_cols = util.get_generated_cols(X, X_temp, self.cols)<EOL>self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= <NUM_LIT>]<EOL>try:<EOL><INDENT>[self.feature_names.remove(x) for x in self.drop_cols]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(e))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Fit encoder according to X and y.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n            Training vectors, where n_samples is the number of samples\n            and n_features is the number of features.\n        y : array-like, shape = [n_samples]\n            Target values.\n\n        Returns\n        -------\n\n        self : encoder\n            Returns self.", "id": "f3990:c0:m2"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f3990:c0:m6"}
{"signature": "def fit(self, X, y, **kwargs):", "body": "<EOL>X = util.convert_input(X)<EOL>y = util.convert_input_vector(y, X.index)<EOL>if X.shape[<NUM_LIT:0>] != y.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(X.shape[<NUM_LIT:0>]) + \"<STR_LIT>\" + str(y.shape[<NUM_LIT:0>]) + \"<STR_LIT:.>\")<EOL><DEDENT>self._dim = X.shape[<NUM_LIT:1>]<EOL>if self.cols is None:<EOL><INDENT>self.cols = util.get_obj_cols(X)<EOL><DEDENT>else:<EOL><INDENT>self.cols = util.convert_cols_to_list(self.cols)<EOL><DEDENT>if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>self.ordinal_encoder = OrdinalEncoder(<EOL>verbose=self.verbose,<EOL>cols=self.cols,<EOL>handle_unknown='<STR_LIT:value>',<EOL>handle_missing='<STR_LIT:value>'<EOL>)<EOL>self.ordinal_encoder = self.ordinal_encoder.fit(X)<EOL>X_ordinal = self.ordinal_encoder.transform(X)<EOL>self.mapping = self.fit_target_encoding(X_ordinal, y)<EOL>X_temp = self.transform(X, override_return_df=True)<EOL>self.feature_names = list(X_temp.columns)<EOL>if self.drop_invariant:<EOL><INDENT>self.drop_cols = []<EOL>X_temp = self.transform(X)<EOL>generated_cols = util.get_generated_cols(X, X_temp, self.cols)<EOL>self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= <NUM_LIT>]<EOL>try:<EOL><INDENT>[self.feature_names.remove(x) for x in self.drop_cols]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(e))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Fit encoder according to X and y.\n\n        Parameters\n        ----------\n        X : array-like, shape = [n_samples, n_features]\n            Training vectors, where n_samples is the number of samples\n            and n_features is the number of features.\n        y : array-like, shape = [n_samples]\n            Target values.\n\n        Returns\n        -------\n        self : encoder\n            Returns self.", "id": "f3991:c0:m1"}
{"signature": "def fit_transform(self, X, y=None, **fit_params):", "body": "return self.fit(X, y, **fit_params).transform(X, y)<EOL>", "docstring": "Encoders that utilize the target must make sure that the training data are transformed with:\n     transform(X, y)\nand not with:\n    transform(X)", "id": "f3991:c0:m4"}
{"signature": "def transform(self, X, y=None, override_return_df=False):", "body": "if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if self._dim is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>X = util.convert_input(X)<EOL>if X.shape[<NUM_LIT:1>] != self._dim:<EOL><INDENT>raise ValueError('<STR_LIT>' % (X.shape[<NUM_LIT:1>], self._dim,))<EOL><DEDENT>if y is not None:<EOL><INDENT>y = util.convert_input_vector(y, X.index)<EOL>if X.shape[<NUM_LIT:0>] != y.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(X.shape[<NUM_LIT:0>]) + \"<STR_LIT>\" + str(y.shape[<NUM_LIT:0>]) + \"<STR_LIT:.>\")<EOL><DEDENT><DEDENT>if not self.cols:<EOL><INDENT>return X<EOL><DEDENT>X = self.ordinal_encoder.transform(X)<EOL>if self.handle_unknown == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isin([-<NUM_LIT:1>]).any().any():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>X = self.target_encode(X)<EOL>if self.drop_invariant:<EOL><INDENT>for col in self.drop_cols:<EOL><INDENT>X.drop(col, <NUM_LIT:1>, inplace=True)<EOL><DEDENT><DEDENT>if self.return_df or override_return_df:<EOL><INDENT>return X<EOL><DEDENT>else:<EOL><INDENT>return X.values<EOL><DEDENT>", "docstring": "Perform the transformation to new categorical data.\n\n        Parameters\n        ----------\n        X : array-like, shape = [n_samples, n_features]\n        y : array-like, shape = [n_samples] when transform by leave one out\n            None, when transform without target info (such as transform test set)\n\n        Returns\n        -------\n        p : array, shape = [n_samples, n_numeric + N]\n            Transformed values with encoding applied.", "id": "f3991:c0:m3"}
{"signature": "def fit(self, X, y=None, **kwargs):", "body": "<EOL>X = util.convert_input(X)<EOL>self._dim = X.shape[<NUM_LIT:1>]<EOL>if self.cols is None:<EOL><INDENT>self.cols = util.get_obj_cols(X)<EOL><DEDENT>else:<EOL><INDENT>self.cols = util.convert_cols_to_list(self.cols)<EOL><DEDENT>if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>self.ordinal_encoder = OrdinalEncoder(<EOL>verbose=self.verbose,<EOL>cols=self.cols,<EOL>handle_unknown='<STR_LIT:value>',<EOL>handle_missing='<STR_LIT:value>'<EOL>)<EOL>self.ordinal_encoder = self.ordinal_encoder.fit(X)<EOL>self.mapping = self.fit_base_n_encoding(X)<EOL>X_temp = self.transform(X, override_return_df=True)<EOL>self._encoded_columns = X_temp.columns.values<EOL>self.feature_names = list(X_temp.columns)<EOL>if self.drop_invariant:<EOL><INDENT>self.drop_cols = []<EOL>generated_cols = util.get_generated_cols(X, X_temp, self.cols)<EOL>self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= <NUM_LIT>]<EOL>try:<EOL><INDENT>[self.feature_names.remove(x) for x in self.drop_cols]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(e))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Fit encoder according to X and y.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n            Training vectors, where n_samples is the number of samples\n            and n_features is the number of features.\n        y : array-like, shape = [n_samples]\n            Target values.\n\n        Returns\n        -------\n\n        self : encoder\n            Returns self.", "id": "f3992:c0:m1"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f3992:c0:m10"}
{"signature": "def basen_encode(self, X_in, cols=None):", "body": "X = X_in.copy(deep=True)<EOL>cols = X.columns.values.tolist()<EOL>for switch in self.mapping:<EOL><INDENT>col = switch.get('<STR_LIT>')<EOL>mod = switch.get('<STR_LIT>')<EOL>base_df = mod.reindex(X[col])<EOL>base_df.set_index(X.index, inplace=True)<EOL>X = pd.concat([base_df, X], axis=<NUM_LIT:1>)<EOL>old_column_index = cols.index(col)<EOL>cols[old_column_index: old_column_index + <NUM_LIT:1>] = mod.columns<EOL><DEDENT>return X.reindex(columns=cols)<EOL>", "docstring": "Basen encoding encodes the integers as basen code with one column per digit.\n\nParameters\n----------\nX_in: DataFrame\ncols: list-like, default None\n    Column names in the DataFrame to be encoded\n\nReturns\n-------\ndummies : DataFrame", "id": "f3992:c0:m6"}
{"signature": "def transform(self, X, override_return_df=False):", "body": "if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if self._dim is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>X = util.convert_input(X)<EOL>if X.shape[<NUM_LIT:1>] != self._dim:<EOL><INDENT>raise ValueError('<STR_LIT>' % (X.shape[<NUM_LIT:1>], self._dim,))<EOL><DEDENT>if not self.cols:<EOL><INDENT>return X<EOL><DEDENT>X_out = self.ordinal_encoder.transform(X)<EOL>if self.handle_unknown == '<STR_LIT:error>':<EOL><INDENT>if X_out[self.cols].isin([-<NUM_LIT:1>]).any().any():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>X_out = self.basen_encode(X_out, cols=self.cols)<EOL>if self.drop_invariant:<EOL><INDENT>for col in self.drop_cols:<EOL><INDENT>X_out.drop(col, <NUM_LIT:1>, inplace=True)<EOL><DEDENT><DEDENT>if self.return_df or override_return_df:<EOL><INDENT>return X_out<EOL><DEDENT>else:<EOL><INDENT>return X_out.values<EOL><DEDENT>", "docstring": "Perform the transformation to new categorical data.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n\n        Returns\n        -------\n\n        p : array, shape = [n_samples, n_numeric + N]\n            Transformed values with encoding applied.", "id": "f3992:c0:m3"}
{"signature": "def col_transform(self, col, digits):", "body": "if col is None or float(col) < <NUM_LIT:0.0>:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>col = self.number_to_base(int(col), self.base, digits)<EOL>if len(col) == digits:<EOL><INDENT>return col<EOL><DEDENT>else:<EOL><INDENT>return [<NUM_LIT:0> for _ in range(digits - len(col))] + col<EOL><DEDENT><DEDENT>", "docstring": "The lambda body to transform the column values", "id": "f3992:c0:m8"}
{"signature": "def inverse_transform(self, X_in):", "body": "X = X_in.copy(deep=True)<EOL>X = util.convert_input(X)<EOL>if self._dim is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>X = self.basen_to_integer(X, self.cols, self.base)<EOL>if X.shape[<NUM_LIT:1>] != self._dim:<EOL><INDENT>if self.drop_invariant:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (X.shape[<NUM_LIT:1>],))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % (X.shape[<NUM_LIT:1>], self._dim,))<EOL><DEDENT><DEDENT>if not self.cols:<EOL><INDENT>return X if self.return_df else X.values<EOL><DEDENT>for switch in self.ordinal_encoder.mapping:<EOL><INDENT>column_mapping = switch.get('<STR_LIT>')<EOL>inverse = pd.Series(data=column_mapping.index, index=column_mapping.get_values())<EOL>X[switch.get('<STR_LIT>')] = X[switch.get('<STR_LIT>')].map(inverse).astype(switch.get('<STR_LIT>'))<EOL>if self.handle_unknown == '<STR_LIT>' and self.handle_missing == '<STR_LIT>':<EOL><INDENT>for col in self.cols:<EOL><INDENT>if X[switch.get('<STR_LIT>')].isnull().any():<EOL><INDENT>warnings.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (col,))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return X if self.return_df else X.values<EOL>", "docstring": "Perform the inverse transformation to encoded data.\n\nParameters\n----------\nX_in : array-like, shape = [n_samples, n_features]\n\nReturns\n-------\np: array, the same size of X_in", "id": "f3992:c0:m4"}
{"signature": "def fit(self, X, y=None, **kwargs):", "body": "<EOL>X = util.convert_input(X)<EOL>self._dim = X.shape[<NUM_LIT:1>]<EOL>if self.cols is None:<EOL><INDENT>self.cols = util.get_obj_cols(X)<EOL><DEDENT>else:<EOL><INDENT>self.cols = util.convert_cols_to_list(self.cols)<EOL><DEDENT>if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>self.ordinal_encoder = OrdinalEncoder(<EOL>verbose=self.verbose,<EOL>cols=self.cols,<EOL>handle_unknown='<STR_LIT:value>',<EOL>handle_missing='<STR_LIT:value>'<EOL>)<EOL>self.ordinal_encoder = self.ordinal_encoder.fit(X)<EOL>ordinal_mapping = self.ordinal_encoder.category_mapping<EOL>mappings_out = []<EOL>for switch in ordinal_mapping:<EOL><INDENT>values = switch.get('<STR_LIT>')<EOL>col = switch.get('<STR_LIT>')<EOL>column_mapping = self.fit_polynomial_coding(col, values, self.handle_missing, self.handle_unknown)<EOL>mappings_out.append({'<STR_LIT>': switch.get('<STR_LIT>'), '<STR_LIT>': column_mapping, })<EOL><DEDENT>self.mapping = mappings_out<EOL>X_temp = self.transform(X, override_return_df=True)<EOL>self.feature_names = X_temp.columns.tolist()<EOL>if self.drop_invariant:<EOL><INDENT>self.drop_cols = []<EOL>generated_cols = util.get_generated_cols(X, X_temp, self.cols)<EOL>self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= <NUM_LIT>]<EOL>try:<EOL><INDENT>[self.feature_names.remove(x) for x in self.drop_cols]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(e))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Fit encoder according to X and y.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n            Training vectors, where n_samples is the number of samples\n            and n_features is the number of features.\n        y : array-like, shape = [n_samples]\n            Target values.\n\n        Returns\n        -------\n\n        self : encoder\n            Returns self.", "id": "f3994:c0:m1"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f3995:c0:m5"}
{"signature": "def fit(self, X, y, **kwargs):", "body": "<EOL>X = util.convert_input(X)<EOL>y = util.convert_input_vector(y, X.index).astype(float)<EOL>if X.shape[<NUM_LIT:0>] != y.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(X.shape[<NUM_LIT:0>]) + \"<STR_LIT>\" + str(y.shape[<NUM_LIT:0>]) + \"<STR_LIT:.>\")<EOL><DEDENT>self._dim = X.shape[<NUM_LIT:1>]<EOL>if self.use_default_cols:<EOL><INDENT>self.cols = util.get_obj_cols(X)<EOL><DEDENT>else:<EOL><INDENT>self.cols = util.convert_cols_to_list(self.cols)<EOL><DEDENT>if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>categories = self.fit_leave_one_out(<EOL>X, y,<EOL>cols=self.cols<EOL>)<EOL>self.mapping = categories<EOL>X_temp = self.transform(X, override_return_df=True)<EOL>self.feature_names = X_temp.columns.tolist()<EOL>if self.drop_invariant:<EOL><INDENT>self.drop_cols = []<EOL>generated_cols = util.get_generated_cols(X, X_temp, self.cols)<EOL>self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= <NUM_LIT>]<EOL>try:<EOL><INDENT>[self.feature_names.remove(x) for x in self.drop_cols]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(e))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Fit encoder according to X and y.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n            Training vectors, where n_samples is the number of samples\n            and n_features is the number of features.\n        y : array-like, shape = [n_samples]\n            Target values.\n\n        Returns\n        -------\n\n        self : encoder\n            Returns self.", "id": "f3996:c0:m1"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f3996:c0:m7"}
{"signature": "def fit(self, X, y, **kwargs):", "body": "<EOL>X = util.convert_input(X)<EOL>y = util.convert_input_vector(y, X.index).astype(float)<EOL>if X.shape[<NUM_LIT:0>] != y.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(X.shape[<NUM_LIT:0>]) + \"<STR_LIT>\" + str(y.shape[<NUM_LIT:0>]) + \"<STR_LIT:.>\")<EOL><DEDENT>unique = y.unique()<EOL>if len(unique) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(len(unique)) + \"<STR_LIT>\")<EOL><DEDENT>if y.isnull().any():<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if np.max(unique) < <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if np.min(unique) > <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self._dim = X.shape[<NUM_LIT:1>]<EOL>if self.cols is None:<EOL><INDENT>self.cols = util.get_obj_cols(X)<EOL><DEDENT>else:<EOL><INDENT>self.cols = util.convert_cols_to_list(self.cols)<EOL><DEDENT>if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>self.ordinal_encoder = OrdinalEncoder(<EOL>verbose=self.verbose,<EOL>cols=self.cols,<EOL>handle_unknown='<STR_LIT:value>',<EOL>handle_missing='<STR_LIT:value>'<EOL>)<EOL>self.ordinal_encoder = self.ordinal_encoder.fit(X)<EOL>X_ordinal = self.ordinal_encoder.transform(X)<EOL>self.mapping = self._train(X_ordinal, y)<EOL>X_temp = self.transform(X, override_return_df=True)<EOL>self.feature_names = X_temp.columns.tolist()<EOL>if self.drop_invariant:<EOL><INDENT>self.drop_cols = []<EOL>generated_cols = util.get_generated_cols(X, X_temp, self.cols)<EOL>self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= <NUM_LIT>]<EOL>try:<EOL><INDENT>[self.feature_names.remove(x) for x in self.drop_cols]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(e))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Fit encoder according to X and binary y.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n            Training vectors, where n_samples is the number of samples\n            and n_features is the number of features.\n        y : array-like, shape = [n_samples]\n            Binary target values.\n\n        Returns\n        -------\n\n        self : encoder\n            Returns self.", "id": "f3997:c0:m1"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f3998:c0:m8"}
{"signature": "def fit(self, X, y=None, **kwargs):", "body": "<EOL>X = util.convert_input(X)<EOL>self._dim = X.shape[<NUM_LIT:1>]<EOL>if self.cols is None:<EOL><INDENT>self.cols = util.get_obj_cols(X)<EOL><DEDENT>else:<EOL><INDENT>self.cols = util.convert_cols_to_list(self.cols)<EOL><DEDENT>if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>self.ordinal_encoder = OrdinalEncoder(<EOL>verbose=self.verbose,<EOL>cols=self.cols,<EOL>handle_unknown='<STR_LIT:value>',<EOL>handle_missing='<STR_LIT:value>'<EOL>)<EOL>self.ordinal_encoder = self.ordinal_encoder.fit(X)<EOL>self.mapping = self.generate_mapping()<EOL>X_temp = self.transform(X, override_return_df=True)<EOL>self.feature_names = list(X_temp.columns)<EOL>if self.drop_invariant:<EOL><INDENT>self.drop_cols = []<EOL>generated_cols = util.get_generated_cols(X, X_temp, self.cols)<EOL>self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= <NUM_LIT>]<EOL>try:<EOL><INDENT>[self.feature_names.remove(x) for x in self.drop_cols]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(e))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Fit encoder according to X and y.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n            Training vectors, where n_samples is the number of samples\n            and n_features is the number of features.\n        y : array-like, shape = [n_samples]\n            Target values.\n\n        Returns\n        -------\n\n        self : encoder\n            Returns self.", "id": "f3998:c0:m2"}
{"signature": "def get_generated_cols(X_original, X_transformed, to_transform):", "body": "original_cols = list(X_original.columns)<EOL>if len(to_transform) > <NUM_LIT:0>:<EOL><INDENT>[original_cols.remove(c) for c in to_transform]<EOL><DEDENT>current_cols = list(X_transformed.columns)<EOL>if len(original_cols) > <NUM_LIT:0>:<EOL><INDENT>[current_cols.remove(c) for c in original_cols]<EOL><DEDENT>return current_cols<EOL>", "docstring": "Returns a list of the generated/transformed columns.\n\nArguments:\n    X_original: df\n        the original (input) DataFrame.\n    X_transformed: df\n        the transformed (current) DataFrame.\n    to_transform: [str]\n        a list of columns that were transformed (as in the original DataFrame), commonly self.cols.\n\nOutput:\n    a list of columns that were transformed (as in the current DataFrame).", "id": "f3999:m5"}
{"signature": "def convert_input_vector(y, index):", "body": "if y is None:<EOL><INDENT>return None<EOL><DEDENT>if isinstance(y, pd.Series):<EOL><INDENT>return y<EOL><DEDENT>elif isinstance(y, np.ndarray):<EOL><INDENT>if len(np.shape(y))==<NUM_LIT:1>:  <EOL><INDENT>return pd.Series(y, name='<STR_LIT:target>', index=index)<EOL><DEDENT>elif len(np.shape(y))==<NUM_LIT:2> and np.shape(y)[<NUM_LIT:0>]==<NUM_LIT:1>:  <EOL><INDENT>return pd.Series(y[<NUM_LIT:0>, :], name='<STR_LIT:target>', index=index)<EOL><DEDENT>elif len(np.shape(y))==<NUM_LIT:2> and np.shape(y)[<NUM_LIT:1>]==<NUM_LIT:1>:  <EOL><INDENT>return pd.Series(y[:, <NUM_LIT:0>], name='<STR_LIT:target>', index=index)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % (str(np.shape(y))))<EOL><DEDENT><DEDENT>elif np.isscalar(y):<EOL><INDENT>return pd.Series([y], name='<STR_LIT:target>', index=index)<EOL><DEDENT>elif isinstance(y, list):<EOL><INDENT>if len(y)==<NUM_LIT:0> or (len(y)><NUM_LIT:0> and not isinstance(y[<NUM_LIT:0>], list)): <EOL><INDENT>return pd.Series(y, name='<STR_LIT:target>', index=index)<EOL><DEDENT>elif len(y)><NUM_LIT:0> and isinstance(y[<NUM_LIT:0>], list) and len(y[<NUM_LIT:0>])==<NUM_LIT:1>: <EOL><INDENT>flatten = lambda y: [item for sublist in y for item in sublist]<EOL>return pd.Series(flatten(y), name='<STR_LIT:target>', index=index)<EOL><DEDENT>elif len(y)==<NUM_LIT:1> and isinstance(y[<NUM_LIT:0>], list): <EOL><INDENT>return pd.Series(y[<NUM_LIT:0>], name='<STR_LIT:target>', index=index)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif isinstance(y, pd.DataFrame):<EOL><INDENT>if len(list(y))==<NUM_LIT:0>: <EOL><INDENT>return pd.Series(y, name='<STR_LIT:target>')<EOL><DEDENT>if len(list(y))==<NUM_LIT:1>: <EOL><INDENT>return y.iloc[:, <NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % (str(y.shape)))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return pd.Series(y, name='<STR_LIT:target>', index=index)<EOL><DEDENT>", "docstring": "Unite target data type into a Series.\nIf the target is a Series or a DataFrame, we preserve its index.\nBut if the target does not contain index attribute, we use the index from the argument.", "id": "f3999:m4"}
{"signature": "def get_obj_cols(df):", "body": "obj_cols = []<EOL>for idx, dt in enumerate(df.dtypes):<EOL><INDENT>if dt == '<STR_LIT:object>' or is_category(dt):<EOL><INDENT>obj_cols.append(df.columns.values[idx])<EOL><DEDENT><DEDENT>return obj_cols<EOL>", "docstring": "Returns names of 'object' columns in the DataFrame.", "id": "f3999:m1"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f4000:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def hashing_trick(X_in, hashing_method='<STR_LIT>', N=<NUM_LIT:2>, cols=None, make_copy=False):<DEDENT>", "body": "try:<EOL><INDENT>if hashing_method not in hashlib.algorithms_available:<EOL><INDENT>raise ValueError('<STR_LIT>' % (<EOL>hashing_method,<EOL>'<STR_LIT:U+002CU+0020>'.join([str(x) for x in hashlib.algorithms_available])<EOL>))<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>try:<EOL><INDENT>_ = hashlib.new(hashing_method)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if make_copy:<EOL><INDENT>X = X_in.copy(deep=True)<EOL><DEDENT>else:<EOL><INDENT>X = X_in<EOL><DEDENT>if cols is None:<EOL><INDENT>cols = X.columns.values<EOL><DEDENT>def hash_fn(x):<EOL><INDENT>tmp = [<NUM_LIT:0> for _ in range(N)]<EOL>for val in x.values:<EOL><INDENT>if val is not None:<EOL><INDENT>hasher = hashlib.new(hashing_method)<EOL>if sys.version_info[<NUM_LIT:0>] == <NUM_LIT:2>:<EOL><INDENT>hasher.update(str(val))<EOL><DEDENT>else:<EOL><INDENT>hasher.update(bytes(str(val), '<STR_LIT:utf-8>'))<EOL><DEDENT>tmp[int(hasher.hexdigest(), <NUM_LIT:16>) % N] += <NUM_LIT:1><EOL><DEDENT><DEDENT>return pd.Series(tmp, index=new_cols)<EOL><DEDENT>new_cols = ['<STR_LIT>' % d for d in range(N)]<EOL>X_cat = X.loc[:, cols]<EOL>X_num = X.loc[:, [x for x in X.columns.values if x not in cols]]<EOL>X_cat = X_cat.apply(hash_fn, axis=<NUM_LIT:1>)<EOL>X_cat.columns = new_cols<EOL>X = pd.concat([X_cat, X_num], axis=<NUM_LIT:1>)<EOL>return X<EOL>", "docstring": "A basic hashing implementation with configurable dimensionality/precision\n\n        Performs the hashing trick on a pandas dataframe, `X`, using the hashing method from hashlib\n        identified by `hashing_method`.  The number of output dimensions (`N`), and columns to hash (`cols`) are\n        also configurable.\n\n        Parameters\n        ----------\n\n        X_in: pandas dataframe\n            description text\n        hashing_method: string, optional\n            description text\n        N: int, optional\n            description text\n        cols: list, optional\n            description text\n        make_copy: bool, optional\n            description text\n\n        Returns\n        -------\n\n        out : dataframe\n            A hashing encoded dataframe.\n\n        References\n        ----------\n        Cite the relevant literature, e.g. [1]_.  You may also cite these\n        references in the notes section above.\n        .. [1] Kilian Weinberger; Anirban Dasgupta; John Langford; Alex Smola; Josh Attenberg (2009). Feature Hashing\n        for Large Scale Multitask Learning. Proc. ICML.", "id": "f4000:c0:m3"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f4001:c0:m6"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f4002:c0:m5"}
{"signature": "def fit_transform(self, X, y=None, **fit_params):", "body": "return self.fit(X, y, **fit_params).transform(X, y)<EOL>", "docstring": "Encoders that utilize the target must make sure that the training data are transformed with:\n    transform(X, y)\nand not with:\n    transform(X)", "id": "f4003:c0:m3"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f4003:c0:m9"}
{"signature": "def transform(self, X, y=None, override_return_df=False):", "body": "if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if self._dim is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>X = util.convert_input(X)<EOL>if X.shape[<NUM_LIT:1>] != self._dim:<EOL><INDENT>raise ValueError('<STR_LIT>' % (X.shape[<NUM_LIT:1>], self._dim,))<EOL><DEDENT>if y is not None:<EOL><INDENT>y = util.convert_input_vector(y, X.index).astype(float)<EOL>if X.shape[<NUM_LIT:0>] != y.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(X.shape[<NUM_LIT:0>]) + \"<STR_LIT>\" + str(y.shape[<NUM_LIT:0>]) + \"<STR_LIT:.>\")<EOL><DEDENT><DEDENT>if not self.cols:<EOL><INDENT>return X<EOL><DEDENT>X = X.copy(deep=True)<EOL>X = self.ordinal_encoder.transform(X)<EOL>if self.handle_unknown == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isin([-<NUM_LIT:1>]).any().any():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>X = self._score(X, y)<EOL>if self.drop_invariant:<EOL><INDENT>for col in self.drop_cols:<EOL><INDENT>X.drop(col, <NUM_LIT:1>, inplace=True)<EOL><DEDENT><DEDENT>if self.return_df or override_return_df:<EOL><INDENT>return X<EOL><DEDENT>else:<EOL><INDENT>return X.values<EOL><DEDENT>", "docstring": "Perform the transformation to new categorical data. When the data are used for model training,\n        it is important to also pass the target in order to apply leave one out.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n        y : array-like, shape = [n_samples] when transform by leave one out\n            None, when transform without target information (such as transform test set)\n\n\n\n        Returns\n        -------\n\n        p : array, shape = [n_samples, n_numeric + N]\n            Transformed values with encoding applied.", "id": "f4003:c0:m2"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f4004:c0:m7"}
{"signature": "def fit(self, X, y, **kwargs):", "body": "<EOL>X = util.convert_input(X)<EOL>y = util.convert_input_vector(y, X.index).astype(float)<EOL>if X.shape[<NUM_LIT:0>] != y.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(X.shape[<NUM_LIT:0>]) + \"<STR_LIT>\" + str(y.shape[<NUM_LIT:0>]) + \"<STR_LIT:.>\")<EOL><DEDENT>self._dim = X.shape[<NUM_LIT:1>]<EOL>if self.use_default_cols:<EOL><INDENT>self.cols = util.get_obj_cols(X)<EOL><DEDENT>else:<EOL><INDENT>self.cols = util.convert_cols_to_list(self.cols)<EOL><DEDENT>if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>categories = self.fit_leave_one_out(<EOL>X, y,<EOL>cols=self.cols<EOL>)<EOL>self.mapping = categories<EOL>X_temp = self.transform(X, y, override_return_df=True)<EOL>self.feature_names = X_temp.columns.tolist()<EOL>if self.drop_invariant:<EOL><INDENT>self.drop_cols = []<EOL>generated_cols = util.get_generated_cols(X, X_temp, self.cols)<EOL>self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= <NUM_LIT>]<EOL>try:<EOL><INDENT>[self.feature_names.remove(x) for x in self.drop_cols]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(e))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Fit encoder according to X and y.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n            Training vectors, where n_samples is the number of samples\n            and n_features is the number of features.\n        y : array-like, shape = [n_samples]\n            Target values.\n\n        Returns\n        -------\n\n        self : encoder\n            Returns self.", "id": "f4004:c0:m1"}
{"signature": "def fit_transform(self, X, y=None, **fit_params):", "body": "return self.fit(X, y, **fit_params).transform(X, y)<EOL>", "docstring": "Encoders that utilize the target must make sure that the training data are transformed with:\n     transform(X, y)\nand not with:\n    transform(X)", "id": "f4004:c0:m3"}
{"signature": "def inverse_transform(self, X_in):", "body": "return self.base_n_encoder.inverse_transform(X_in)<EOL>", "docstring": "Perform the inverse transformation to encoded data.\n\nParameters\n----------\nX_in : array-like, shape = [n_samples, n_features]\n\nReturns\n-------\np: array, the same size of X_in", "id": "f4005:c0:m3"}
{"signature": "def transform(self, X, override_return_df=False):", "body": "return self.base_n_encoder.transform(X)<EOL>", "docstring": "Perform the transformation to new categorical data.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n\n        Returns\n        -------\n\n        p : array, shape = [n_samples, n_numeric + N]\n            Transformed values with encoding applied.", "id": "f4005:c0:m2"}
{"signature": "def transform(self, X, override_return_df=False):", "body": "if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if self._dim is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>X = util.convert_input(X)<EOL>if X.shape[<NUM_LIT:1>] != self._dim:<EOL><INDENT>raise ValueError('<STR_LIT>' % (X.shape[<NUM_LIT:1>], self._dim, ))<EOL><DEDENT>if not self.cols:<EOL><INDENT>return X<EOL><DEDENT>X = self.ordinal_encoder.transform(X)<EOL>if self.handle_unknown == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isin([-<NUM_LIT:1>]).any().any():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>X = self.backward_difference_coding(X, mapping=self.mapping)<EOL>if self.drop_invariant:<EOL><INDENT>for col in self.drop_cols:<EOL><INDENT>X.drop(col, <NUM_LIT:1>, inplace=True)<EOL><DEDENT><DEDENT>if self.return_df or override_return_df:<EOL><INDENT>return X<EOL><DEDENT>else:<EOL><INDENT>return X.values<EOL><DEDENT>", "docstring": "Perform the transformation to new categorical data.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n\n        Returns\n        -------\n\n        p : array, shape = [n_samples, n_numeric + N]\n            Transformed values with encoding applied.", "id": "f4006:c0:m2"}
{"signature": "def get_feature_names(self):", "body": "if not isinstance(self.feature_names, list):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return self.feature_names<EOL><DEDENT>", "docstring": "Returns the names of all transformed / added columns.\n\nReturns\n-------\nfeature_names: list\n    A list with all feature names transformed or added.\n    Note: potentially dropped features are not included!", "id": "f4006:c0:m5"}
{"signature": "def fit(self, X, y=None, **kwargs):", "body": "<EOL>X = util.convert_input(X)<EOL>self._dim = X.shape[<NUM_LIT:1>]<EOL>if self.cols is None:<EOL><INDENT>self.cols = util.get_obj_cols(X)<EOL><DEDENT>else:<EOL><INDENT>self.cols = util.convert_cols_to_list(self.cols)<EOL><DEDENT>if self.handle_missing == '<STR_LIT:error>':<EOL><INDENT>if X[self.cols].isnull().any().bool():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>self.ordinal_encoder = OrdinalEncoder(<EOL>verbose=self.verbose,<EOL>cols=self.cols,<EOL>handle_unknown='<STR_LIT:value>',<EOL>handle_missing='<STR_LIT:value>'<EOL>)<EOL>self.ordinal_encoder = self.ordinal_encoder.fit(X)<EOL>ordinal_mapping = self.ordinal_encoder.category_mapping<EOL>mappings_out = []<EOL>for switch in ordinal_mapping:<EOL><INDENT>values = switch.get('<STR_LIT>')<EOL>col = switch.get('<STR_LIT>')<EOL>column_mapping = self.fit_backward_difference_coding(col, values, self.handle_missing, self.handle_unknown)<EOL>mappings_out.append({'<STR_LIT>': col, '<STR_LIT>': column_mapping, })<EOL><DEDENT>self.mapping = mappings_out<EOL>X_temp = self.transform(X, override_return_df=True)<EOL>self.feature_names = X_temp.columns.tolist()<EOL>if self.drop_invariant:<EOL><INDENT>self.drop_cols = []<EOL>generated_cols = util.get_generated_cols(X, X_temp, self.cols)<EOL>self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= <NUM_LIT>]<EOL>try:<EOL><INDENT>[self.feature_names.remove(x) for x in self.drop_cols]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(e))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Fits an ordinal encoder to produce a consistent mapping across applications and optionally finds\n        generally invariant columns to drop consistently.\n\n        Parameters\n        ----------\n\n        X : array-like, shape = [n_samples, n_features]\n            Training vectors, where n_samples is the number of samples\n            and n_features is the number of features.\n        y : array-like, shape = [n_samples]\n            Target values.\n\n        Returns\n        -------\n\n        self : encoder\n            Returns self.", "id": "f4006:c0:m1"}
{"signature": "def djeffify_html(rendered_string):", "body": "parser = DjeffParser()<EOL>parser.feed(rendered_string)<EOL>return parser.djhtml<EOL>", "docstring": "This function contains the core logic for a\nmiddleware, template tag or Template engine approach", "id": "f4011:m1"}
{"signature": "def djeffify_string(string_to_djeff):", "body": "string_to_djeff = re.sub(r'<STR_LIT>', '<STR_LIT:d>', string_to_djeff, flags=re.IGNORECASE)  <EOL>string_to_djeff = re.sub(r'<STR_LIT>', '<STR_LIT>', string_to_djeff, flags=re.IGNORECASE)  <EOL>string_to_djeff = re.sub(r'<STR_LIT>', '<STR_LIT>', string_to_djeff, flags=re.IGNORECASE)  <EOL>return string_to_djeff<EOL>", "docstring": "Djeffifies string_to_djeff", "id": "f4011:m0"}
{"signature": "def __init__(self, convert_charrefs=True, *args, **kwargs):", "body": "<EOL>try:<EOL><INDENT>HTMLParser.__init__(self, convert_charrefs=convert_charrefs)<EOL><DEDENT>except TypeError:<EOL><INDENT>HTMLParser.__init__(self)<EOL><DEDENT>self.djhtml = '<STR_LIT>'<EOL>", "docstring": "Explicitly set convert_charrefs to keep deprecation warnings at bay.\n\nSee:\nhttps://docs.python.org/3/library/html.parser.html#html.parser.HTMLParser", "id": "f4011:c2:m0"}
{"signature": "def prepend_line(filepath, line):", "body": "with open(filepath) as f:<EOL><INDENT>lines = f.readlines()<EOL><DEDENT>lines.insert(<NUM_LIT:0>, line)<EOL>with open(filepath, '<STR_LIT:w>') as f:<EOL><INDENT>f.writelines(lines)<EOL><DEDENT>", "docstring": "Rewrite a file adding a line to its beginning.", "id": "f4013:m3"}
{"signature": "def facts(self, **kwargs):", "body": "return self.__api.facts(query=EqualsOperator(\"<STR_LIT>\", self.name),<EOL>**kwargs)<EOL>", "docstring": "Get all facts of this node. Additional arguments may also be\n        specified that will be passed to the query function.", "id": "f4024:c4:m4"}
{"signature": "def events(self, **kwargs):", "body": "return self.__api.events(query=EqualsOperator(\"<STR_LIT>\", self.hash_),<EOL>**kwargs)<EOL>", "docstring": "Get all events for this report. Additional arguments may also be\n        specified that will be passed to the query function.", "id": "f4024:c1:m4"}
{"signature": "def resource(self, type_, title, **kwargs):", "body": "resources = self.__api.resources(<EOL>type_=type_,<EOL>title=title,<EOL>query=EqualsOperator(\"<STR_LIT>\", self.name),<EOL>**kwargs)<EOL>return next(resource for resource in resources)<EOL>", "docstring": "Get a resource matching the supplied type and title. Additional\n        arguments may also be specified that will be passed to the query\n        function.", "id": "f4024:c4:m7"}
{"signature": "def connect(host='<STR_LIT:localhost>', port=<NUM_LIT>, ssl_verify=False, ssl_key=None,<EOL>ssl_cert=None, timeout=<NUM_LIT:10>, protocol=None, url_path='<STR_LIT:/>',<EOL>username=None, password=None, token=None):", "body": "return BaseAPI(host=host, port=port,<EOL>timeout=timeout, ssl_verify=ssl_verify, ssl_key=ssl_key,<EOL>ssl_cert=ssl_cert, protocol=protocol, url_path=url_path,<EOL>username=username, password=password, token=token)<EOL>", "docstring": "Connect with PuppetDB. This will return an object allowing you\n    to query the API through its methods.\n\n    :param host: (Default: 'localhost;) Hostname or IP of PuppetDB.\n    :type host: :obj:`string`\n\n    :param port: (Default: '8080') Port on which to talk to PuppetDB.\n    :type port: :obj:`int`\n\n    :param ssl_verify: (optional) Verify PuppetDB server certificate.\n    :type ssl_verify: :obj:`bool` or :obj:`string` True, False or filesystem \\\n            path to CA certificate.\n\n    :param ssl_key: (optional) Path to our client secret key.\n    :type ssl_key: :obj:`None` or :obj:`string` representing a filesystem\\\n            path.\n\n    :param ssl_cert: (optional) Path to our client certificate.\n    :type ssl_cert: :obj:`None` or :obj:`string` representing a filesystem\\\n            path.\n\n    :param timeout: (Default: 10) Number of seconds to wait for a response.\n    :type timeout: :obj:`int`\n\n    :param protocol: (optional) Explicitly specify the protocol to be used\n            (especially handy when using HTTPS with ssl_verify=False and\n            without certs)\n    :type protocol: :obj:`None` or :obj:`string`\n\n    :param url_path: (Default: '/') The URL path where PuppetDB is served\n    :type url_path: :obj:`None` or :obj:`string`\n\n    :param username: (optional) The username to use for HTTP basic\n            authentication\n    :type username: :obj:`None` or :obj:`string`\n\n    :param password: (optional) The password to use for HTTP basic\n            authentication\n    :type password: :obj:`None` or :obj:`string`\n\n    :param token: (optional) The x-auth token to use for X-Authentication\n    :type token: :obj:`None` or :obj:`string`", "id": "f4025:m0"}
{"signature": "def versioncmp(v1, v2):", "body": "def normalize(v):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return [int(x) for x in re.sub(r'<STR_LIT>', '<STR_LIT>', v).split(\"<STR_LIT:.>\")]<EOL><DEDENT>try:<EOL><INDENT>return cmp(normalize(v1), normalize(v2))<EOL><DEDENT>except NameError:<EOL><INDENT>return (normalize(v1) > normalize(v2)) - (<EOL>normalize(v1) < normalize(v2))<EOL><DEDENT>", "docstring": "Compares two objects, x and y, and returns an integer according to the\n    outcome. The return value is negative if x < y, zero if x == y and\n    positive if x > y.\n\n    :param v1: The first object to compare.\n    :param v2: The second object to compare.\n\n    :returns: -1, 0 or 1.\n    :rtype: :obj:`int`", "id": "f4027:m1"}
{"signature": "def _normalize_resource_type(self, type_):", "body": "return '<STR_LIT>'.join([s.capitalize() for s in type_.split('<STR_LIT>')])<EOL>", "docstring": "Normalizes the type passed to the api by capitalizing each part\n        of the type. For example:\n\n        sysctl::value -> Sysctl::Value\n        user -> User", "id": "f4028:c0:m4"}
{"signature": "def _query(self, endpoint, path=None, query=None,<EOL>order_by=None, limit=None, offset=None, include_total=False,<EOL>summarize_by=None, count_by=None, count_filter=None,<EOL>request_method='<STR_LIT:GET>'):", "body": "log.debug('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(endpoint, path, query, limit,<EOL>offset, summarize_by, count_by,<EOL>count_filter))<EOL>url = self._url(endpoint, path=path)<EOL>payload = {}<EOL>if query is not None:<EOL><INDENT>payload['<STR_LIT>'] = query<EOL><DEDENT>if order_by is not None:<EOL><INDENT>payload[PARAMETERS['<STR_LIT>']] = order_by<EOL><DEDENT>if limit is not None:<EOL><INDENT>payload['<STR_LIT>'] = limit<EOL><DEDENT>if include_total is True:<EOL><INDENT>payload[PARAMETERS['<STR_LIT>']] =json.dumps(include_total)<EOL><DEDENT>if offset is not None:<EOL><INDENT>payload['<STR_LIT>'] = offset<EOL><DEDENT>if summarize_by is not None:<EOL><INDENT>payload[PARAMETERS['<STR_LIT>']] = summarize_by<EOL><DEDENT>if count_by is not None:<EOL><INDENT>payload[PARAMETERS['<STR_LIT>']] = count_by<EOL><DEDENT>if count_filter is not None:<EOL><INDENT>payload[PARAMETERS['<STR_LIT>']] = count_filter<EOL><DEDENT>if not (payload):<EOL><INDENT>payload = None<EOL><DEDENT>if not self.token:<EOL><INDENT>auth = (self.username, self.password)<EOL><DEDENT>else:<EOL><INDENT>auth = None<EOL><DEDENT>try:<EOL><INDENT>if request_method.upper() == '<STR_LIT:GET>':<EOL><INDENT>r = self._session.get(url, params=payload,<EOL>verify=self.ssl_verify,<EOL>cert=(self.ssl_cert, self.ssl_key),<EOL>timeout=self.timeout,<EOL>auth=auth)<EOL><DEDENT>elif request_method.upper() == '<STR_LIT:POST>':<EOL><INDENT>r = self._session.post(url,<EOL>data=json.dumps(payload, default=str),<EOL>verify=self.ssl_verify,<EOL>cert=(self.ssl_cert, self.ssl_key),<EOL>timeout=self.timeout,<EOL>auth=auth)<EOL><DEDENT>else:<EOL><INDENT>log.error(\"<STR_LIT>\".format(<EOL>request_method))<EOL>raise APIError<EOL><DEDENT>r.raise_for_status()<EOL>if '<STR_LIT>' in r.headers:<EOL><INDENT>self.last_total = r.headers['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>self.last_total = None<EOL><DEDENT>json_body = r.json()<EOL>if json_body is not None:<EOL><INDENT>return json_body<EOL><DEDENT>else:<EOL><INDENT>del json_body<EOL>raise EmptyResponseError<EOL><DEDENT><DEDENT>except requests.exceptions.Timeout:<EOL><INDENT>log.error(\"<STR_LIT>\".format(ERROR_STRINGS['<STR_LIT>'],<EOL>self.host, self.port,<EOL>self.protocol.upper()))<EOL>raise<EOL><DEDENT>except requests.exceptions.ConnectionError:<EOL><INDENT>log.error(\"<STR_LIT>\".format(ERROR_STRINGS['<STR_LIT>'],<EOL>self.host, self.port,<EOL>self.protocol.upper()))<EOL>raise<EOL><DEDENT>except requests.exceptions.HTTPError as err:<EOL><INDENT>log.error(\"<STR_LIT>\".format(err.response.text,<EOL>self.host, self.port,<EOL>self.protocol.upper()))<EOL>raise<EOL><DEDENT>", "docstring": "This method actually querries PuppetDB. Provided an endpoint and an\n        optional path and/or query it will fire a request at PuppetDB. If\n        PuppetDB can be reached and answers within the timeout we'll decode\n        the response and give it back or raise for the HTTP Status Code\n        PuppetDB gave back.\n\n        :param endpoint: The PuppetDB API endpoint we want to query.\n        :type endpoint: :obj:`string`\n        :param path: An additional path if we don't wish to query the\\\n                bare endpoint.\n        :type path: :obj:`string`\n        :param query: (optional) A query to further narrow down the resultset.\n        :type query: :obj:`string`\n        :param order_by: (optional) Set the order parameters for the resultset.\n        :type order_by: :obj:`string`\n        :param limit: (optional) Tell PuppetDB to limit it's response to this\\\n                number of objects.\n        :type limit: :obj:`int`\n        :param offset: (optional) Tell PuppetDB to start it's response from\\\n                the given offset. This is useful for implementing pagination\\\n                but is not supported just yet.\n        :type offset: :obj:`string`\n        :param include_total: (optional) Include the total number of results\n        :type order_by: :obj:`bool`\n        :param summarize_by: (optional) Specify what type of object you'd like\\\n                to see counts at the event-counts and aggregate-event-counts \\\n                endpoints\n        :type summarize_by: :obj:`string`\n        :param count_by: (optional) Specify what type of object is counted\n        :type count_by: :obj:`string`\n        :param count_filter: (optional) Specify a filter for the results\n        :type count_filter: :obj:`string`\n\n        :raises: :class:`~pypuppetdb.errors.EmptyResponseError`\n\n        :returns: The decoded response from PuppetDB\n        :rtype: :obj:`dict` or :obj:`list`", "id": "f4028:c0:m6"}
{"signature": "def event_counts(self, summarize_by, **kwargs):", "body": "return self._query('<STR_LIT>',<EOL>summarize_by=summarize_by,<EOL>**kwargs)<EOL>", "docstring": "Get event counts from puppetdb.\n\n        :param summarize_by: (Required) The object type to be counted on.\n                             Valid values are 'containing_class', 'resource'\n                             and 'certname'.\n        :type summarize_by: :obj:`string`\n        :param count_by: (Optional) The object type that is counted when\n                         building the counts of 'successes', 'failures',\n                         'noops' and 'skips'. Support values are 'certname'\n                         and 'resource' (default)\n        :type count_by: :obj:`string`\n        :param count_filter: (Optional) A JSON query that is applied to the\n                             event-counts output but before the results are\n                             aggregated. Supported operators are `=`, `>`,\n                             `<`, `>=`, and `<=`. Supported fields are\n                             `failures`, `successes`, `noops`, and `skips`.\n        :type count_filter: :obj:`string`\n        :param \\*\\*kwargs: The rest of the keyword arguments are passed\n                           to the _query function.\n\n        :returns: A list of dictionaries containing the results.\n        :rtype: :obj:`list`", "id": "f4028:c0:m19"}
{"signature": "def current_version(self):", "body": "return self._query('<STR_LIT:version>')['<STR_LIT:version>']<EOL>", "docstring": "Get version information about the running PuppetDB server.\n\n        :returns: A string representation of the PuppetDB version.\n        :rtype: :obj:`string`", "id": "f4028:c0:m22"}
{"signature": "def fact_names(self):", "body": "return self._query('<STR_LIT>')<EOL>", "docstring": "Get a list of all known facts.", "id": "f4028:c0:m23"}
{"signature": "def _url(self, endpoint, path=None):", "body": "log.debug('<STR_LIT>'.format(<EOL>endpoint, path))<EOL>try:<EOL><INDENT>endpoint = ENDPOINTS[endpoint]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise APIError<EOL><DEDENT>url = '<STR_LIT>'.format(<EOL>base_url=self.base_url,<EOL>endpoint=endpoint,<EOL>)<EOL>if path is not None:<EOL><INDENT>url = '<STR_LIT>'.format(url, quote(path))<EOL><DEDENT>return url<EOL>", "docstring": "The complete URL we will end up querying. Depending on the\n        endpoint we pass in  this will result in different URL's with\n        different prefixes.\n\n        :param endpoint: The PuppetDB API endpoint we want to query.\n        :type endpoint: :obj:`string`\n        :param path: An additional path if we don't wish to query the\\\n                bare endpoint.\n        :type path: :obj:`string`\n\n        :returns: A URL constructed from :func:`base_url` with the\\\n                apropraite API version/prefix and the rest of the path added\\\n                to it.\n        :rtype: :obj:`string`", "id": "f4028:c0:m5"}
{"signature": "def nodes(self, unreported=<NUM_LIT:2>, with_status=False, **kwargs):", "body": "nodes = self._query('<STR_LIT>', **kwargs)<EOL>now = datetime.datetime.utcnow()<EOL>if type(nodes) == dict:<EOL><INDENT>nodes = [nodes, ]<EOL><DEDENT>if with_status:<EOL><INDENT>latest_events = self.event_counts(<EOL>query=EqualsOperator(\"<STR_LIT>\", True),<EOL>summarize_by='<STR_LIT>'<EOL>)<EOL><DEDENT>for node in nodes:<EOL><INDENT>node['<STR_LIT>'] = None<EOL>node['<STR_LIT>'] = None<EOL>if with_status:<EOL><INDENT>status = [s for s in latest_events<EOL>if s['<STR_LIT>']['<STR_LIT:title>'] == node['<STR_LIT>']]<EOL>try:<EOL><INDENT>node['<STR_LIT>'] = node['<STR_LIT>']<EOL>if status:<EOL><INDENT>node['<STR_LIT>'] = status[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>if status:<EOL><INDENT>node['<STR_LIT>'] = status = status[<NUM_LIT:0>]<EOL>if status['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>node['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if status['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>node['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if status['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>node['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>node['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT><DEDENT>if node['<STR_LIT>'] is not None:<EOL><INDENT>try:<EOL><INDENT>last_report = json_to_datetime(<EOL>node['<STR_LIT>'])<EOL>last_report = last_report.replace(tzinfo=None)<EOL>unreported_border = now - timedelta(hours=unreported)<EOL>if last_report < unreported_border:<EOL><INDENT>delta = (now - last_report)<EOL>node['<STR_LIT>'] = True<EOL>node['<STR_LIT>'] = '<STR_LIT>'.format(<EOL>delta.days,<EOL>int(delta.seconds / <NUM_LIT>),<EOL>int((delta.seconds % <NUM_LIT>) / <NUM_LIT>)<EOL>)<EOL><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>node['<STR_LIT>'] = True<EOL><DEDENT><DEDENT>if not node['<STR_LIT>']:<EOL><INDENT>node['<STR_LIT>'] = True<EOL><DEDENT><DEDENT>yield Node(self,<EOL>name=node['<STR_LIT>'],<EOL>deactivated=node['<STR_LIT>'],<EOL>expired=node['<STR_LIT>'],<EOL>report_timestamp=node['<STR_LIT>'],<EOL>catalog_timestamp=node['<STR_LIT>'],<EOL>facts_timestamp=node['<STR_LIT>'],<EOL>status_report=node['<STR_LIT>'],<EOL>noop=node.get('<STR_LIT>'),<EOL>noop_pending=node.get('<STR_LIT>'),<EOL>events=node['<STR_LIT>'],<EOL>unreported=node.get('<STR_LIT>'),<EOL>unreported_time=node.get('<STR_LIT>'),<EOL>report_environment=node['<STR_LIT>'],<EOL>catalog_environment=node['<STR_LIT>'],<EOL>facts_environment=node['<STR_LIT>'],<EOL>latest_report_hash=node.get('<STR_LIT>'),<EOL>cached_catalog_status=node.get('<STR_LIT>')<EOL>)<EOL><DEDENT>", "docstring": "Query for nodes by either name or query. If both aren't\n        provided this will return a list of all nodes. This method\n        also fetches the nodes status and event counts of the latest\n        report from puppetdb.\n\n        :param with_status: (optional) include the node status in the\\\n                           returned nodes\n        :type with_status: :bool:\n        :param unreported: (optional) amount of hours when a node gets\n                           marked as unreported\n        :type unreported: :obj:`None` or integer\n        :param \\*\\*kwargs: The rest of the keyword arguments are passed\n                           to the _query function\n\n        :returns: A generator yieling Nodes.\n        :rtype: :class:`pypuppetdb.types.Node`", "id": "f4028:c0:m7"}
{"signature": "@property<EOL><INDENT>def version(self):<DEDENT>", "body": "return self.api_version<EOL>", "docstring": "The version of the API we're querying against.\n\n        :returns: Current API version.\n        :rtype: :obj:`string`", "id": "f4028:c0:m1"}
{"signature": "def catalog(self, node):", "body": "catalogs = self.catalogs(path=node)<EOL>return next(x for x in catalogs)<EOL>", "docstring": "Get the available catalog for a given node.\n\n        :param node: (Required) The name of the PuppetDB node.\n        :type: :obj:`string`\n\n        :returns: An instance of Catalog\n        :rtype: :class:`pypuppetdb.types.Catalog`", "id": "f4028:c0:m16"}
{"signature": "def node(self, name):", "body": "nodes = self.nodes(path=name)<EOL>return next(node for node in nodes)<EOL>", "docstring": "Gets a single node from PuppetDB.\n\n        :param name: The name of the node search.\n        :type name: :obj:`string`\n\n        :return: An instance of Node\n        :rtype: :class:`pypuppetdb.types.Node`", "id": "f4028:c0:m8"}
{"signature": "def metric(self, metric=None):", "body": "return self._query('<STR_LIT>', path=metric)<EOL>", "docstring": "Query for a specific metrc.\n\n        :param metric: The name of the metric we want.\n        :type metric: :obj:`string`\n\n        :returns: The return of :meth:`~pypuppetdb.api.BaseAPI._query`.", "id": "f4028:c0:m24"}
{"signature": "def recursive_glob(base_directory, regex=None):", "body": "if regex is None:<EOL><INDENT>regex = '<STR_LIT>'<EOL><DEDENT>files = glob(os.path.join(base_directory, regex))<EOL>for path, dirlist, filelist in os.walk(base_directory):<EOL><INDENT>for ignored in IGNORE:<EOL><INDENT>try:<EOL><INDENT>dirlist.remove(ignored)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>for dir_name in dirlist:<EOL><INDENT>files.extend(glob(os.path.join(path, dir_name, regex)))<EOL><DEDENT><DEDENT>return files<EOL>", "docstring": "Uses glob to find all files that match the regex in base_directory.\n\n    @param base_directory: string\n\n    @param regex: string\n\n    @return: set", "id": "f4053:m1"}
{"signature": "def get_requirements(*args):", "body": "install_deps = []<EOL>try:<EOL><INDENT>for fpath in args:<EOL><INDENT>install_deps.extend([str(d.req or d.url) for d in parse_requirements(fpath)])<EOL><DEDENT><DEDENT>except:<EOL><INDENT>print('<STR_LIT>'.format(fpath))<EOL><DEDENT>return [dep for dep in install_deps if dep != '<STR_LIT:None>']<EOL>", "docstring": "Parse all requirements files given and return a list of the dependencies", "id": "f4053:m0"}
{"signature": "def read_xl(xl_path: str):", "body": "xl_path, choice = _check_xl_path(xl_path)<EOL>reader = XL_READERS[choice]<EOL>return reader(xl_path)<EOL>", "docstring": "Return the workbook from the Excel file in `xl_path`.", "id": "f4056:m4"}
{"signature": "def concat_sheets(xl_path: str, sheetnames=None, add_tab_names=False):", "body": "xl_path, choice = _check_xl_path(xl_path)<EOL>if sheetnames is None:<EOL><INDENT>sheetnames = get_sheet_list(xl_path)<EOL><DEDENT>sheets = pd.read_excel(xl_path, sheetname=sheetnames)<EOL>if add_tab_names:<EOL><INDENT>for tab in sheets:<EOL><INDENT>sheets[tab]['<STR_LIT>'] = [tab] * len(sheets[tab])<EOL><DEDENT><DEDENT>return pd.concat([sheets[tab] for tab in sheets])<EOL>", "docstring": "Return a pandas DataFrame with the concat'ed\n    content of the `sheetnames` from the Excel file in\n    `xl_path`.\n\n    Parameters\n    ----------\n    xl_path: str\n        Path to the Excel file\n\n    sheetnames: list of str\n        List of existing sheet names of `xl_path`.\n        If None, will use all sheets from `xl_path`.\n\n    add_tab_names: bool\n        If True will add a 'Tab' column which says from which\n        tab the row comes from.\n\n    Returns\n    -------\n    df: pandas.DataFrame", "id": "f4056:m6"}
{"signature": "def duplicated(values: Sequence):", "body": "vals = pd.Series(values)<EOL>return vals[vals.duplicated()]<EOL>", "docstring": "Return the duplicated items in `values`", "id": "f4056:m10"}
{"signature": "def _check_cols(df, col_names):", "body": "for col in col_names:<EOL><INDENT>if not hasattr(df, col):<EOL><INDENT>raise AttributeError(\"<STR_LIT>\".format(col,<EOL>df.columns))<EOL><DEDENT><DEDENT>", "docstring": "Raise an AttributeError if `df` does not have a column named as an item of\n    the list of strings `col_names`.", "id": "f4056:m7"}
{"signature": "def _openpyxl_read_xl(xl_path: str):", "body": "try:<EOL><INDENT>wb = load_workbook(filename=xl_path, read_only=True)<EOL><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>else:<EOL><INDENT>return wb<EOL><DEDENT>", "docstring": "Use openpyxl to read an Excel file.", "id": "f4056:m0"}
{"signature": "def get_sheet_list(xl_path: str) -> List:", "body": "wb = read_xl(xl_path)<EOL>if hasattr(wb, '<STR_LIT>'):<EOL><INDENT>return wb.sheetnames<EOL><DEDENT>else:<EOL><INDENT>return wb.sheet_names()<EOL><DEDENT>", "docstring": "Return a list with the name of the sheets in\n    the Excel file in `xl_path`.", "id": "f4056:m5"}
{"signature": "def col_values(df, col_name):", "body": "_check_cols(df, [col_name])<EOL>if '<STR_LIT:O>' in df[col_name] or pd.np.issubdtype(df[col_name].dtype, str): <EOL><INDENT>return [nom.lower() for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)]<EOL><DEDENT>else:<EOL><INDENT>return [nom for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)]<EOL><DEDENT>", "docstring": "Return a list of not null values from the `col_name` column of `df`.", "id": "f4056:m8"}
{"signature": "def duplicated_rows(df, col_name):", "body": "_check_cols(df, [col_name])<EOL>dups = df[pd.notnull(df[col_name]) & df.duplicated(subset=[col_name])]<EOL>return dups<EOL>", "docstring": "Return a DataFrame with the duplicated values of the column `col_name`\n    in `df`.", "id": "f4056:m9"}
{"signature": "def get_last_created_file(input_dir, glob_pattern='<STR_LIT:*>'):", "body": "return get_last_file(input_dir, glob_pattern, key=op.getctime)<EOL>", "docstring": "Return the path to the last created file in `input_dir`.\n    See `get_last_file` docstring for description of the parameters.", "id": "f4057:m16"}
{"signature": "def recursive_glob(base_directory, regex='<STR_LIT>'):", "body": "files = glob(op.join(base_directory, regex))<EOL>for path, dirlist, filelist in os.walk(base_directory):<EOL><INDENT>for dir_name in dirlist:<EOL><INDENT>files.extend(glob(op.join(path, dir_name, regex)))<EOL><DEDENT><DEDENT>return files<EOL>", "docstring": "Uses glob to find all files or folders that match the regex\nstarting from the base_directory.\n\nParameters\n----------\nbase_directory: str\n\nregex: str\n\nReturns\n-------\nfiles: list", "id": "f4057:m11"}
{"signature": "def find_match(base_directory, regex='<STR_LIT>'):", "body": "return glob(op.join(base_directory, regex))<EOL>", "docstring": "Uses glob to find all files that match the regex\nin base_directory.\n\n@param base_directory: string\n\n@param regex: string\n\n@return: set", "id": "f4057:m10"}
{"signature": "def get_file_list(file_dir, regex='<STR_LIT>'):", "body": "file_list = os.listdir(file_dir)<EOL>file_list.sort()<EOL>if regex:<EOL><INDENT>file_list = search_list(file_list, regex)<EOL><DEDENT>file_list = [op.join(file_dir, fname) for fname in file_list]<EOL>return file_list<EOL>", "docstring": "Creates a list of files that match the search_regex within file_dir.\nThe list of files will have file_dir as path prefix.\n\nParameters\n----------\n@param file_dir:\n\n@param search_regex:\n\nReturns:\n--------\nList of paths to files that match the search_regex", "id": "f4057:m4"}
{"signature": "def recursive_dir_match(folder_path, regex='<STR_LIT>'):", "body": "outlist = []<EOL>for root, dirs, files in os.walk(folder_path):<EOL><INDENT>outlist.extend([op.join(root, f) for f in dirs<EOL>if re.match(regex, f)])<EOL><DEDENT>return outlist<EOL>", "docstring": "Returns absolute paths of folders that match the regex within folder_path and\nall its children folders.\n\nNote: The regex matching is done using the match function\nof the re module.\n\nParameters\n----------\nfolder_path: string\n\nregex: string\n\nReturns\n-------\nA list of strings.", "id": "f4057:m3"}
{"signature": "def recursive_find_match(folder_path, regex='<STR_LIT>'):", "body": "outlist = []<EOL>for root, dirs, files in os.walk(folder_path):<EOL><INDENT>outlist.extend([op.join(root, f) for f in files<EOL>if re.match(regex, f)])<EOL><DEDENT>return outlist<EOL>", "docstring": "Returns absolute paths of files that match the regex within folder_path and\nall its children folders.\n\nNote: The regex matching is done using the match function\nof the re module.\n\nParameters\n----------\nfolder_path: string\n\nregex: string\n\nReturns\n-------\nA list of strings.", "id": "f4057:m6"}
{"signature": "def recursive_find_search(folder_path, regex='<STR_LIT>'):", "body": "outlist = []<EOL>for root, dirs, files in os.walk(folder_path):<EOL><INDENT>outlist.extend([op.join(root, f) for f in files<EOL>if re.search(regex, f)])<EOL><DEDENT>return outlist<EOL>", "docstring": "Returns absolute paths of files that match the regex within file_dir and\nall its children folders.\n\nNote: The regex matching is done using the search function\nof the re module.\n\nParameters\n----------\nfolder_path: string\n\nregex: string\n\nReturns\n-------\nA list of strings.", "id": "f4057:m7"}
{"signature": "def get_common_filepath(self, nodepath):", "body": "return commonprefix(self.get_node_filepaths(nodepath))<EOL>", "docstring": "Returns the common filepath between all leaves in the filetree.", "id": "f4058:c1:m11"}
{"signature": "def remove_hidden_files(file_lst):", "body": "return [fnom for fnom in file_lst if not fnom.startswith('<STR_LIT:.>')]<EOL>", "docstring": "Removes the filenames that start with '.'\n\n:param file_lst: list of strings\n\n:return: list of strings", "id": "f4058:m1"}
{"signature": "@staticmethod<EOL><INDENT>def _import_config(filepath):<DEDENT>", "body": "if not op.isfile(filepath):<EOL><INDENT>raise IOError('<STR_LIT>'<EOL>'<STR_LIT>'.format(filepath))<EOL><DEDENT>cfg = import_pyfile(filepath)<EOL>if not hasattr(cfg, '<STR_LIT>'):<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>if not hasattr(cfg, '<STR_LIT>'):<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>return cfg.root_path, cfg.filetree<EOL>", "docstring": "Imports filetree and root_path variable values from the filepath.\n\n:param filepath:\n:return: root_path and filetree", "id": "f4058:c1:m7"}
{"signature": "def get_root_nodes(self):", "body": "return self._filetree.keys()<EOL>", "docstring": "Return a list of the names of the root nodes.", "id": "f4058:c1:m8"}
{"signature": "def remove_nodes(self, pattern, adict):", "body": "mydict = self._filetree if adict is None else adict<EOL>if isinstance(mydict, dict):<EOL><INDENT>for nom in mydict.keys():<EOL><INDENT>if isinstance(mydict[nom], dict):<EOL><INDENT>matchs = filter_list(mydict[nom], pattern)<EOL>for nom in matchs:<EOL><INDENT>mydict = self.remove_nodes(pattern, mydict[nom])<EOL>mydict.pop(nom)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>mydict[nom] = filter_list(mydict[nom], pattern)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>matchs = set(filter_list(mydict, pattern))<EOL>mydict = set(mydict) - matchs<EOL><DEDENT>return mydict<EOL>", "docstring": "Remove the nodes that match the pattern.", "id": "f4058:c1:m12"}
{"signature": "def filter_list(lst, pattern):", "body": "if is_fnmatch_regex(pattern) and not is_regex(pattern):<EOL><INDENT>log.info('<STR_LIT>'.format(pattern))<EOL>filst = fnmatch.filter(lst, pattern)<EOL><DEDENT>else:<EOL><INDENT>log.info('<STR_LIT>'.format(pattern))<EOL>filst = match_list(lst, pattern)<EOL><DEDENT>if filst:<EOL><INDENT>filst.sort()<EOL><DEDENT>return filst<EOL>", "docstring": "Filters the lst using pattern.\nIf pattern starts with '(' it will be considered a re regular expression,\notherwise it will use fnmatch filter.\n\n:param lst: list of strings\n\n:param pattern: string\n\n:return: list of strings\nFiltered list of strings", "id": "f4058:m0"}
{"signature": "def get_possible_paths(base_path, path_regex):", "body": "if not path_regex:<EOL><INDENT>return []<EOL><DEDENT>if len(path_regex) < <NUM_LIT:1>:<EOL><INDENT>return []<EOL><DEDENT>if path_regex[<NUM_LIT:0>] == os.sep:<EOL><INDENT>path_regex = path_regex[<NUM_LIT:1>:]<EOL><DEDENT>rest_files = '<STR_LIT>'<EOL>if os.sep in path_regex:<EOL><INDENT>node_names = path_regex.partition(os.sep)<EOL>first_node = node_names[<NUM_LIT:0>]<EOL>rest_nodes = node_names[<NUM_LIT:2>]<EOL>folder_names = filter_list(os.listdir(base_path), first_node)<EOL>for nom in folder_names:<EOL><INDENT>new_base = op.join(base_path, nom)<EOL>if op.isdir(new_base):<EOL><INDENT>rest_files = get_possible_paths(new_base, rest_nodes)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rest_files = filter_list(os.listdir(base_path), path_regex)<EOL><DEDENT>files = []<EOL>if rest_files:<EOL><INDENT>files = [op.join(base_path, f) for f in rest_files]<EOL><DEDENT>return files<EOL>", "docstring": "Looks for path_regex within base_path. Each match is append\nin the returned list.\npath_regex may contain subfolder structure.\nIf any part of the folder structure is a\n\n:param base_path: str\n\n:param path_regex: str\n\n:return list of strings", "id": "f4058:m5"}
{"signature": "def populate_subtree(basepath, treemap, verbose=False):", "body": "file_nodes = OrderedDict()<EOL>if isinstance(treemap, tuple):<EOL><INDENT>try:<EOL><INDENT>file_nodes = process_tuple_node(basepath, treemap)<EOL><DEDENT>except:<EOL><INDENT>raise FileTreeMapError('<STR_LIT>'<EOL>'<STR_LIT>'.format(basepath, treemap))<EOL><DEDENT><DEDENT>if isinstance(treemap, list):<EOL><INDENT>for node in treemap:<EOL><INDENT>try:<EOL><INDENT>file_nodes.update(process_tuple_node(basepath, node))<EOL><DEDENT>except:<EOL><INDENT>raise FileTreeMapError('<STR_LIT>'<EOL>'<STR_LIT>'.format(basepath, node))<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(treemap, dict):<EOL><INDENT>for k in treemap.keys():<EOL><INDENT>cname = k<EOL>child_map = treemap[k]<EOL>if isinstance(child_map, tuple) or isinstance(child_map, dict):<EOL><INDENT>try:<EOL><INDENT>file_nodes[cname] = populate_subtree(basepath, child_map)<EOL><DEDENT>except:<EOL><INDENT>raise FileTreeMapError('<STR_LIT>'<EOL>'<STR_LIT>'.format(basepath,<EOL>child_map))<EOL><DEDENT><DEDENT>elif isinstance(child_map, str):<EOL><INDENT>if child_map[<NUM_LIT:0>] == os.sep:<EOL><INDENT>raise FileTreeMapError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(str(child_map),<EOL>os.sep))<EOL><DEDENT>subpaths = get_possible_paths(basepath, child_map)<EOL>if subpaths:<EOL><INDENT>file_nodes[cname] = subpaths<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if verbose:<EOL><INDENT>log.info('<STR_LIT>'.format(basepath, file_nodes.keys()))<EOL><DEDENT>return file_nodes<EOL>", "docstring": ":param path: str\n\n:param treemap: dict\n\n:return: dict", "id": "f4058:m7"}
{"signature": "def get_dict_leaves(data):", "body": "result = []<EOL>if isinstance(data, dict):<EOL><INDENT>for item in data.values():<EOL><INDENT>result.extend(get_dict_leaves(item))<EOL><DEDENT><DEDENT>elif isinstance(data, list):<EOL><INDENT>result.extend(data)<EOL><DEDENT>else:<EOL><INDENT>result.append(data)<EOL><DEDENT>return result<EOL>", "docstring": "Given a nested dictionary, this returns all its leave elements in a list.\n\n:param adict:\n\n:return: list", "id": "f4058:m4"}
{"signature": "@staticmethod<EOL><INDENT>def create_folder(dirpath, overwrite=False):<DEDENT>", "body": "if not overwrite:<EOL><INDENT>while op.exists(dirpath):<EOL><INDENT>dirpath += '<STR_LIT:+>'<EOL><DEDENT><DEDENT>os.makedirs(dirpath, exist_ok=overwrite)<EOL>return dirpath<EOL>", "docstring": "Will create dirpath folder. If dirpath already exists and overwrite is False,\n        will append a '+' suffix to dirpath until dirpath does not exist.", "id": "f4058:c1:m6"}
{"signature": "def get_node_filepaths(self, nodepath):", "body": "files = self.get_node(nodepath)<EOL>return get_dict_leaves(files)<EOL>", "docstring": "Returns all leaves in filetree.", "id": "f4058:c1:m10"}
{"signature": "def count_node_match(self, pattern, adict=None):", "body": "mydict = self._filetree if adict is None else adict<EOL>k = <NUM_LIT:0><EOL>if isinstance(mydict, dict):<EOL><INDENT>names = mydict.keys()<EOL>k += len(filter_list(names, pattern))<EOL>for nom in names:<EOL><INDENT>k += self.count_node_match(pattern, mydict[nom])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>k = len(filter_list(mydict, pattern))<EOL><DEDENT>return k<EOL>", "docstring": "Return the number of nodes that match the pattern.\n\n:param pattern:\n\n:param adict:\n:return: int", "id": "f4058:c1:m13"}
{"signature": "def __init__(self, config_map):", "body": "for key in config_map:<EOL><INDENT>if config_map[key] == '<STR_LIT:None>':<EOL><INDENT>config_map[key] = None<EOL><DEDENT>setattr(self, key, config_map[key])<EOL><DEDENT>", "docstring": ":param config_map: dict", "id": "f4059:c0:m0"}
{"signature": "def ux_file_len(filepath):", "body": "p = subprocess.Popen(['<STR_LIT>', '<STR_LIT>', filepath], stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>result, err = p.communicate()<EOL>if p.returncode != <NUM_LIT:0>:<EOL><INDENT>raise IOError(err)<EOL><DEDENT>l = result.strip()<EOL>l = int(l.split()[<NUM_LIT:0>])<EOL>return l<EOL>", "docstring": "Returns the length of the file using the 'wc' GNU command\n\n    Parameters\n    ----------\n    filepath: str\n\n    Returns\n    -------\n    float", "id": "f4061:m15"}
{"signature": "def get_abspath(folderpath):", "body": "if not op.exists(folderpath):<EOL><INDENT>raise FolderNotFound(folderpath)<EOL><DEDENT>return op.abspath(folderpath)<EOL>", "docstring": "Returns the absolute path of folderpath.\n    If the path does not exist, will raise IOError.", "id": "f4061:m0"}
{"signature": "def remove_all(filelist, folder='<STR_LIT>'):", "body": "if not folder:<EOL><INDENT>for f in filelist:<EOL><INDENT>os.remove(f)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for f in filelist:<EOL><INDENT>os.remove(op.join(folder, f))<EOL><DEDENT><DEDENT>", "docstring": "Deletes all files in filelist\n\n    Parameters\n    ----------\n    filelist: list of str\n        List of the file paths to be removed\n\n    folder: str\n        Path to be used as common directory for all file paths in filelist", "id": "f4061:m11"}
{"signature": "def fileobj_size(file_obj):", "body": "file_obj.seek(<NUM_LIT:0>, os.SEEK_END)<EOL>return file_obj.tell()<EOL>", "docstring": "Returns the length of the size of the file\n\n    Parameters\n    ----------\n    file_obj: file-like object\n\n    Returns\n    -------\n    float", "id": "f4061:m18"}
{"signature": "def create_subjects_file(filelist, labels, output_file, split='<STR_LIT::>'):", "body": "if len(filelist) != len(labels):<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(len(filelist), len(labels)))<EOL><DEDENT>lines = []<EOL>for i, subj in enumerate(filelist):<EOL><INDENT>lab  = labels[i]<EOL>line = subj + split + str(lab)<EOL>lines.append(line)<EOL><DEDENT>lines = np.array(lines)<EOL>np.savetxt(output_file, lines, fmt='<STR_LIT:%s>')<EOL>", "docstring": "Creates a file where each line is <subject_file>:<subject_class_label>.\n\n    Parameters\n    ----------\n    filelist: list of str\n    List of filepaths\n\n    labels: list of int, str or labels that can be transformed with str()\n    List of labels\n\n    output_file: str\n    Output file path\n\n    split: str\n    Split character for each line", "id": "f4061:m9"}
{"signature": "def parse_subjects_list(filepath, datadir='<STR_LIT>', split='<STR_LIT::>', labelsf=None):", "body": "labels = []<EOL>subjs  = []<EOL>if datadir:<EOL><INDENT>datadir += op.sep<EOL><DEDENT>with open(filepath, '<STR_LIT:r>') as f:<EOL><INDENT>for s in f:<EOL><INDENT>line = s.strip().split(split)<EOL>if len(line) == <NUM_LIT:2>:<EOL><INDENT>labels.append(np.float(line[<NUM_LIT:1>]))<EOL>subjf = line[<NUM_LIT:0>].strip()<EOL><DEDENT>else:<EOL><INDENT>subjf = line.strip()<EOL><DEDENT>if not op.isabs(subjf):<EOL><INDENT>subjs.append(datadir + subjf)<EOL><DEDENT>else:<EOL><INDENT>subjs.append(subjf)<EOL><DEDENT><DEDENT><DEDENT>if labelsf is not None:<EOL><INDENT>labels = np.loadtxt(labelsf)<EOL><DEDENT>return [labels, subjs]<EOL>", "docstring": "Parses a file with a list of: <subject_file>:<subject_class_label>.\n\n    Parameters\n    ----------\n    filepath: str\n    Path to file with a list of: <subject_file>:<subject_class_label>.\n    Where ':' can be any split character\n\n    datadir: str\n    String to be path prefix of each line of the fname content,\n    only in case the lines are relative file paths.\n\n    split: str\n    Split character for each line\n\n    labelsf: str\n    Path to file with a list of the labels if it is not included in\n    fname. It will overwrite the labels from fname.\n\n    Returns\n    -------\n    [labels, subjs] where labels is a list of labels and subjs a list of\n    filepaths", "id": "f4061:m8"}
{"signature": "def get_folder_subpath(path, folder_depth):", "body": "if path[<NUM_LIT:0>] == op.sep:<EOL><INDENT>folder_depth += <NUM_LIT:1><EOL><DEDENT>return op.sep.join(path.split(op.sep)[<NUM_LIT:0>:folder_depth])<EOL>", "docstring": "Returns a folder path of path with depth given by folder_dept:\n\nParameters\n----------\npath: str\n\nfolder_depth: int > 0\n\nReturns\n-------\nA folder path\n\nExample\n-------\n>>> get_folder_subpath('/home/user/mydoc/work/notes.txt', 3)\n>>> '/home/user/mydoc'", "id": "f4061:m12"}
{"signature": "def get_temp_file(dirpath=None, suffix='<STR_LIT>'):", "body": "return tempfile.NamedTemporaryFile(dir=dirpath, suffix=suffix)<EOL>", "docstring": "Uses tempfile to create a NamedTemporaryFile using\nthe default arguments.\n\nParameters\n----------\ndirpath: str\nDirectory where it must be created.\nIf dir is specified, the file will be created\nin that directory, otherwise, a default directory is used.\nThe default directory is chosen from a platform-dependent\nlist, but the user of the application can control the\ndirectory location by setting the TMPDIR, TEMP or TMP\nenvironment variables.\n\nsuffix: str\nFile name suffix.\nIt does not put a dot between the file name and the\nsuffix; if you need one, put it at the beginning of suffix.\n\nReturns\n-------\nfile object\n\nNote\n----\nPlease, close it once you have used the file.", "id": "f4061:m13"}
{"signature": "def add_extension_if_needed(filepath, ext, check_if_exists=False):", "body": "if not filepath.endswith(ext):<EOL><INDENT>filepath += ext<EOL><DEDENT>if check_if_exists:<EOL><INDENT>if not op.exists(filepath):<EOL><INDENT>raise IOError('<STR_LIT>' + filepath)<EOL><DEDENT><DEDENT>return filepath<EOL>", "docstring": "Add the extension ext to fpath if it doesn't have it.\n\n    Parameters\n    ----------\n    filepath: str\n    File name or path\n\n    ext: str\n    File extension\n\n    check_if_exists: bool\n\n    Returns\n    -------\n    File name or path with extension added, if needed.", "id": "f4061:m4"}
{"signature": "def grep_one(srch_str, filepath):", "body": "for line in open(filepath):<EOL><INDENT>if srch_str in line:<EOL><INDENT>return line<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Return the first line in file defined by filepath\n    that contains srch_str\n\n    Parameters\n    ----------\n    srch_str: str\n\n    filepath: str\n\n    Returns\n    ----------\n    str", "id": "f4061:m7"}
{"signature": "def join_path_to_filelist(path, filelist):", "body": "return [op.join(path, str(item)) for item in filelist]<EOL>", "docstring": "Joins path to each line in filelist\n\n    Parameters\n    ----------\n    path: str\n\n    filelist: list of str\n\n    Returns\n    -------\n    list of filepaths", "id": "f4061:m10"}
{"signature": "def write_lines(filepath, lines):", "body": "with open(filepath, '<STR_LIT:w>') as f:<EOL><INDENT>f.writelines(lines)<EOL><DEDENT>", "docstring": "Write the given lines to the file in filepath\n\n    Parameters\n    ----------\n    filepath: str\n\n    lines: list of str", "id": "f4061:m6"}
{"signature": "def append_dict_values(list_of_dicts, keys=None):", "body": "if keys is None:<EOL><INDENT>keys = list(list_of_dicts[<NUM_LIT:0>].keys())<EOL><DEDENT>dict_of_lists = DefaultOrderedDict(list)<EOL>for d in list_of_dicts:<EOL><INDENT>for k in keys:<EOL><INDENT>dict_of_lists[k].append(d[k])<EOL><DEDENT><DEDENT>return dict_of_lists<EOL>", "docstring": "Return a dict of lists from a list of dicts with the same keys.\nFor each dict in list_of_dicts with look for the values of the\ngiven keys and append it to the output dict.\n\nParameters\n----------\nlist_of_dicts: list of dicts\n\nkeys: list of str\n    List of keys to create in the output dict\n    If None will use all keys in the first element of list_of_dicts\nReturns\n-------\nDefaultOrderedDict of lists", "id": "f4062:m2"}
{"signature": "def create_dataset(self, ds_name, data, attrs=None, dtype=None):", "body": "if ds_name in self._datasets:<EOL><INDENT>ds = self._datasets[ds_name]<EOL>if ds.dtype != data.dtype:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if dtype is None:<EOL><INDENT>dtype = data.dtype<EOL><DEDENT>ds = self._group.create_dataset(ds_name, data.shape,<EOL>dtype=dtype)<EOL>if attrs is not None:<EOL><INDENT>for key in attrs:<EOL><INDENT>setattr(ds.attrs, key, attrs[key])<EOL><DEDENT><DEDENT><DEDENT>ds.read_direct(data)<EOL>self._datasets[ds_name] = ds<EOL>return ds<EOL>", "docstring": "Saves a Numpy array in a dataset in the HDF file, registers it as\nds_name and returns the h5py dataset.\n\n:param ds_name: string\nRegistration name of the dataset to be registered.\n\n:param data: Numpy ndarray\n\n:param dtype: dtype\nDatatype of the dataset\n\n:return: h5py dataset", "id": "f4063:c0:m6"}
{"signature": "def __del__(self):", "body": "self._hdf_file.close()<EOL>if self._remove_on_destroy:<EOL><INDENT>os.remove(self._fname)<EOL><DEDENT>", "docstring": "Class destructor", "id": "f4063:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def get_temp_file(dir=None, suffix='<STR_LIT>'):<DEDENT>", "body": "return tempfile.NamedTemporaryFile(dir=dir, suffix=suffix)<EOL>", "docstring": "Uses tempfile to create a NamedTemporaryFile using\nthe default arguments.\n\n@param dir: string\nDirectory where it must be created.\nIf dir is specified, the file will be created\nin that directory, otherwise, a default directory is used.\nThe default directory is chosen from a platform-dependent\nlist, but the user of the application can control the\ndirectory location by setting the TMPDIR, TEMP or TMP\nenvironment variables.\n\n@param suffix: string\nFile name suffix.\nIt does not put a dot between the file name and the\nsuffix; if you need one, put it at the beginning of suffix.\n\n@return: file object\n\n@note:\nClose it once you have used the file.", "id": "f4063:c0:m1"}
{"signature": "def get(self, key):", "body": "node = self.get_node(key)<EOL>if node is None:<EOL><INDENT>raise KeyError('<STR_LIT>' % key)<EOL><DEDENT>if hasattr(node, '<STR_LIT>'):<EOL><INDENT>if '<STR_LIT>' in node.attrs:<EOL><INDENT>return self._read_group(node)<EOL><DEDENT><DEDENT>return self._read_array(node)<EOL>", "docstring": "Retrieve pandas object or group of Numpy ndarrays\nstored in file\n\nParameters\n----------\nkey : object\n\nReturns\n-------\nobj : type of object stored in file", "id": "f4063:c1:m3"}
{"signature": "def create_empty_dataset(self, ds_name, dtype=np.float32):", "body": "if ds_name in self._datasets:<EOL><INDENT>return self._datasets[ds_name]<EOL><DEDENT>ds = self._group.create_dataset(ds_name, (<NUM_LIT:1>, <NUM_LIT:1>), maxshape=None,<EOL>dtype=dtype)<EOL>self._datasets[ds_name] = ds<EOL>return ds<EOL>", "docstring": "Creates a Dataset with unknown size.\nResize it before using.\n\n:param ds_name: string\n\n:param dtype: dtype\nDatatype of the dataset\n\n:return: h5py DataSet", "id": "f4063:c0:m5"}
{"signature": "@staticmethod<EOL><INDENT>def _fill_missing_values(df, range_values, fill_value=<NUM_LIT:0>, fill_method=None):<DEDENT>", "body": "idx_colnames  = df.index.names<EOL>idx_colranges = [range_values[x] for x in idx_colnames]<EOL>fullindex = pd.Index([p for p in product(*idx_colranges)],<EOL>name=tuple(idx_colnames))<EOL>fulldf = df.reindex(index=fullindex, fill_value=fill_value,<EOL>method=fill_method)<EOL>fulldf.index.names = idx_colnames<EOL>return fulldf, idx_colranges<EOL>", "docstring": "Will get the names of the index colums of df, obtain their ranges from\nrange_values dict and return a reindexed version of df with the given\nrange values.\n\n:param df: pandas DataFrame\n\n:param range_values: dict or array-like\nMust contain for each index column of df an entry with all the values\nwithin the range of the column.\n\n:param fill_value: scalar or 'nearest', default 0\nValue to use for missing values. Defaults to 0, but can be any\n\"compatible\" value, e.g., NaN.\nThe 'nearest' mode will fill the missing value with the nearest value in\n the column.\n\n:param fill_method:  {'backfill', 'bfill', 'pad', 'ffill', None}, default None\nMethod to use for filling holes in reindexed DataFrame\n'pad' / 'ffill': propagate last valid observation forward to next valid\n'backfill' / 'bfill': use NEXT valid observation to fill gap\n\n:return: pandas Dataframe and used column ranges\nreindexed DataFrame and dict with index column ranges", "id": "f4063:c1:m2"}
{"signature": "def put(self, key, value, attrs=None, format=None, append=False, **kwargs):", "body": "if not isinstance(value, np.ndarray):<EOL><INDENT>super(NumpyHDFStore, self).put(key, value, format, append, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>group = self.get_node(key)<EOL>if group is not None and not append:<EOL><INDENT>self._handle.removeNode(group, recursive=True)<EOL>group = None<EOL><DEDENT>if group is None:<EOL><INDENT>paths = key.split('<STR_LIT:/>')<EOL>path = '<STR_LIT:/>'<EOL>for p in paths:<EOL><INDENT>if not len(p):<EOL><INDENT>continue<EOL><DEDENT>new_path = path<EOL>if not path.endswith('<STR_LIT:/>'):<EOL><INDENT>new_path += '<STR_LIT:/>'<EOL><DEDENT>new_path += p<EOL>group = self.get_node(new_path)<EOL>if group is None:<EOL><INDENT>group = self._handle.createGroup(path, p)<EOL><DEDENT>path = new_path<EOL><DEDENT><DEDENT>ds_name = kwargs.get('<STR_LIT>', self._array_dsname)<EOL>ds = self._handle.createArray(group, ds_name, value)<EOL>if attrs is not None:<EOL><INDENT>for key in attrs:<EOL><INDENT>setattr(ds.attrs, key, attrs[key])<EOL><DEDENT><DEDENT>self._handle.flush()<EOL>return ds<EOL><DEDENT>", "docstring": "Store object in HDFStore\n\nParameters\n----------\nkey : str\n\nvalue : {Series, DataFrame, Panel, Numpy ndarray}\n\nformat : 'fixed(f)|table(t)', default is 'fixed'\n    fixed(f) : Fixed format\n        Fast writing/reading. Not-appendable, nor searchable\n\n    table(t) : Table format\n        Write as a PyTables Table structure which may perform worse but allow more flexible operations\n        like searching/selecting subsets of the data\n\nappend : boolean, default False\n    This will force Table format, append the input data to the\n    existing.\n\nencoding : default None, provide an encoding for strings", "id": "f4063:c1:m4"}
{"signature": "def save_variables_to_shelve(file_path, variables):", "body": "mashelf = shelve.open(file_path, '<STR_LIT:n>')<EOL>for vn in variables.keys():<EOL><INDENT>try:<EOL><INDENT>mashelf[vn] = variables[vn]<EOL><DEDENT>except KeyError as ke:<EOL><INDENT>raise Exception('<STR_LIT>'.format(vn)) from ke<EOL><DEDENT><DEDENT>mashelf.close()<EOL>", "docstring": "Parameters\n----------\nfile_path: str\n\nvariables: dict\n    Dictionary with objects. Object name -> object\n\nNotes\n-----\n    Before calling this function, create a varlist this way:\n\n    shelfvars = []\n    for v in varnames:\n        shelfvars.append(eval(v))\n\n    #to_restore variables from shelf\n    my_shelf = shelve.open(filename)\n    for key in my_shelf:\n       globals()[key]=my_shelf[key]\n    my_shelf.close()", "id": "f4065:m2"}
{"signature": "@staticmethod<EOL><INDENT>def save_variables(filename, variables):<DEDENT>", "body": "ext = get_extension(filename).lower()<EOL>out_exts = {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}<EOL>output_file = filename<EOL>if not ext in out_exts:<EOL><INDENT>output_file = add_extension_if_needed(filename, '<STR_LIT>')<EOL>ext = get_extension(filename)<EOL><DEDENT>if ext == '<STR_LIT>' or ext == '<STR_LIT>':<EOL><INDENT>save_variables_to_shelve(output_file, variables)<EOL><DEDENT>elif ext == '<STR_LIT>':<EOL><INDENT>save_variables_to_mat(output_file, variables)<EOL><DEDENT>elif ext == '<STR_LIT>' or ext == '<STR_LIT>':<EOL><INDENT>from .hdf5 import save_variables_to_hdf5<EOL>save_variables_to_hdf5(output_file, variables)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(ext))<EOL><DEDENT>", "docstring": "Save given variables in a file.\n        Valid extensions: '.pyshelf' or '.shelf' (Python shelve)\n                          '.mat' (Matlab archive),\n                          '.hdf5' or '.h5' (HDF5 file)\n\n        Parameters\n        ----------\n        filename: str\n            Output file path.\n\n        variables: dict\n            Dictionary varname -> variable\n\n        Raises\n        ------\n        ValueError: if the extension of the filesname is not recognized.", "id": "f4065:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def save_varlist(filename, varnames, varlist):<DEDENT>", "body": "variables = {}<EOL>for i, vn in enumerate(varnames):<EOL><INDENT>variables[vn] = varlist[i]<EOL><DEDENT>ExportData.save_variables(filename, variables)<EOL>", "docstring": "Valid extensions '.pyshelf', '.mat', '.hdf5' or '.h5'\n\n@param filename: string\n\n@param varnames: list of strings\nNames of the variables\n\n@param varlist: list of objects\nThe objects to be saved", "id": "f4065:c0:m2"}
{"signature": "def get_group_names(h5file, h5path='<STR_LIT:/>'):", "body": "return _get_node_names(h5file, h5path, node_type=h5py.Group)<EOL>", "docstring": "Return the groups names within h5file/h5path\n\n    Parameters\n    ----------\n    h5file: h5py.File or path to hdf5 file\n        HDF5 file object\n\n    h5path: str\n        HDF5 group path to get the group names from\n\n    Returns\n    -------\n    groupnames: list of str\n        List of group names", "id": "f4066:m2"}
{"signature": "def _get_node_names(h5file, h5path='<STR_LIT:/>', node_type=h5py.Dataset):", "body": "if isinstance(h5file, str):<EOL><INDENT>_h5file = get_h5file(h5file, mode='<STR_LIT:r>')<EOL><DEDENT>else:<EOL><INDENT>_h5file = h5file<EOL><DEDENT>if not h5path.startswith('<STR_LIT:/>'):<EOL><INDENT>h5path = '<STR_LIT:/>' + h5path<EOL><DEDENT>names = []<EOL>try:<EOL><INDENT>h5group = _h5file.require_group(h5path)<EOL>for node in _hdf5_walk(h5group, node_type=node_type):<EOL><INDENT>names.append(node.name)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>raise RuntimeError('<STR_LIT>'.format(_h5file.filename, h5path))<EOL><DEDENT>finally:<EOL><INDENT>if isinstance(h5file, str):<EOL><INDENT>_h5file.close()<EOL><DEDENT><DEDENT>return names<EOL>", "docstring": "Return the node of type node_type names within h5path of h5file.\n\n    Parameters\n    ----------\n    h5file: h5py.File\n        HDF5 file object\n\n    h5path: str\n        HDF5 group path to get the group names from\n\n    node_type: h5py object type\n        HDF5 object type\n\n    Returns\n    -------\n    names: list of str\n        List of names", "id": "f4066:m7"}
{"signature": "def get_h5file(file_path, mode='<STR_LIT:r>'):", "body": "if not op.exists(file_path):<EOL><INDENT>raise IOError('<STR_LIT>'.format(file_path))<EOL><DEDENT>try:<EOL><INDENT>h5file = h5py.File(file_path, mode=mode)<EOL><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>else:<EOL><INDENT>return h5file<EOL><DEDENT>", "docstring": "Return the h5py.File given its file path.\n\n    Parameters\n    ----------\n    file_path: string\n        HDF5 file path\n\n    mode: string\n        r   Readonly, file must exist\n        r+  Read/write, file must exist\n        w   Create file, truncate if exists\n        w-  Create file, fail if exists\n        a   Read/write if exists, create otherwise (default)\n\n    Returns\n    -------\n    h5file: h5py.File", "id": "f4066:m1"}
{"signature": "def _get_nodes(h5file, h5path='<STR_LIT:/>', node_type=h5py.Dataset):", "body": "if isinstance(h5file, str):<EOL><INDENT>_h5file = get_h5file(h5file, mode='<STR_LIT:r>')<EOL><DEDENT>else:<EOL><INDENT>_h5file = h5file<EOL><DEDENT>if not h5path.startswith('<STR_LIT:/>'):<EOL><INDENT>h5path = '<STR_LIT:/>' + h5path<EOL><DEDENT>names = []<EOL>try:<EOL><INDENT>h5group = _h5file.require_group(h5path)<EOL>for node in _hdf5_walk(h5group, node_type=node_type):<EOL><INDENT>names.append(node)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>raise RuntimeError('<STR_LIT>'.format(str(node_type), _h5file.filename, h5path))<EOL><DEDENT>finally:<EOL><INDENT>if isinstance(h5file, str):<EOL><INDENT>_h5file.close()<EOL><DEDENT><DEDENT>return names<EOL>", "docstring": "Return the nodes within h5path of the h5file.\n\n    Parameters\n    ----------\n    h5file: h5py.File\n        HDF5 file object\n\n    h5path: str\n        HDF5 group path to get the nodes from\n\n    node_type: h5py object type\n        The type of the nodes that you want to get\n\n    Returns\n    -------\n    nodes: list of node_type objects", "id": "f4066:m8"}
{"signature": "@staticmethod<EOL><INDENT>def get_dcm_reader(store_metadata=True, header_fields=None):<DEDENT>", "body": "if not store_metadata:<EOL><INDENT>return lambda fpath: fpath<EOL><DEDENT>if header_fields is None:<EOL><INDENT>build_dcm = lambda fpath: DicomFile(fpath)<EOL><DEDENT>else:<EOL><INDENT>dicom_header = namedtuple('<STR_LIT>', header_fields)<EOL>build_dcm = lambda fpath: dicom_header._make(DicomFile(fpath).get_attributes(header_fields))<EOL><DEDENT>return build_dcm<EOL>", "docstring": "Creates a lambda function to read DICOM files.\nIf store_store_metadata is False, will only return the file path.\nElse if you give header_fields, will return only the set of of\nheader_fields within a DicomFile object or the whole DICOM file if\nNone.\n\n:return: function\nThis function has only one parameter: file_path", "id": "f4067:c1:m1"}
{"signature": "def from_folders(self, folders):", "body": "self.items = []<EOL>self._store_dicom_paths(folders)<EOL>", "docstring": "Restart the self.items and stores all dicom file paths found\nwithin folders\n\nParameters\n----------\nfolders: str or list of str", "id": "f4067:c0:m2"}
{"signature": "def from_set(self, fileset, check_if_dicoms=True):", "body": "if check_if_dicoms:<EOL><INDENT>self.items = []<EOL>for f in fileset:<EOL><INDENT>if is_dicom_file(f):<EOL><INDENT>self.items.append(f)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.items = fileset<EOL><DEDENT>", "docstring": "Overwrites self.items with the given set of files.\n        Will filter the fileset and keep only Dicom files.\n\n        Parameters\n        ----------\n        fileset: iterable of str\n        Paths to files\n\n        check_if_dicoms: bool\n        Whether to check if the items in fileset are dicom file paths", "id": "f4067:c0:m3"}
{"signature": "def __init__(self, folders, read_metadata=True, header_fields=None):", "body": "DicomFileSet.__init__(self,  folders)<EOL>self.read_dcm = self.get_dcm_reader(read_metadata, header_fields)<EOL>", "docstring": ":param folders: str or list of strs\nPath or paths to folders to be searched for Dicom files\n\n:param read_metadata: bool\nIf True, will make a list of DicomFiles, otherwise will store\na simple DICOM header (namedtuples) with the fields specified\nin header_fields.\n\n:param header_fields: set of strings\nSet of header fields to be read from each DICOM file in a DicomHeader.\nIf store_metadata is False, this won't be used. Else and if this is\nNone, will store the whole DicomFile.", "id": "f4067:c1:m0"}
{"signature": "def rename_file_group_to_serial_nums(file_lst):", "body": "file_lst.sort()<EOL>c = <NUM_LIT:1><EOL>for f in file_lst:<EOL><INDENT>dirname = get_abspath(f.dirname())<EOL>fdest = f.joinpath(dirname, \"<STR_LIT>\".format(c) +<EOL>OUTPUT_DICOM_EXTENSION)<EOL>log.info('<STR_LIT>'.format(f, fdest))<EOL>f.rename(fdest)<EOL>c += <NUM_LIT:1><EOL><DEDENT>", "docstring": "Will rename all files in file_lst to a padded serial\n    number plus its extension\n\n    :param file_lst: list of path.py paths", "id": "f4067:m1"}
{"signature": "def _store_dicom_paths(self, folders):", "body": "if isinstance(folders, str):<EOL><INDENT>folders = [folders]<EOL><DEDENT>for folder in folders:<EOL><INDENT>if not os.path.exists(folder):<EOL><INDENT>raise FolderNotFound(folder)<EOL><DEDENT>self.items.extend(list(find_all_dicom_files(folder)))<EOL><DEDENT>", "docstring": "Search for dicoms in folders and save file paths into\n        self.dicom_paths set.\n\n        :param folders: str or list of str", "id": "f4067:c0:m1"}
{"signature": "def update(self, dicomset):", "body": "if not isinstance(dicomset, DicomFileSet):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.items = list(set(self.items).update(dicomset))<EOL>", "docstring": "Update this set with the union of itself and dicomset.\n\n        Parameters\n        ----------\n        dicomset: DicomFileSet", "id": "f4067:c0:m4"}
{"signature": "def set_dicom_file2(self, dcm_file):", "body": "self.dcmf2 = self._read_dcmfile(dcm_file)<EOL>", "docstring": "Parameters\n----------\ndcm_file: str (path to file) or DicomFile or namedtuple", "id": "f4068:c1:m3"}
{"signature": "def merge_groups(self, indices):", "body": "try:<EOL><INDENT>merged = merge_dict_of_lists(self.dicom_groups, indices,<EOL>pop_later=True, copy=True)<EOL>self.dicom_groups = merged<EOL><DEDENT>except IndexError:<EOL><INDENT>raise IndexError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Extend the lists within the DICOM groups dictionary.\n        The indices will indicate which list have to be extended by which\n        other list.\n\n        Parameters\n        ----------\n        indices: list or tuple of 2 iterables of int, bot having the same len\n             The indices of the lists that have to be merged, both iterables\n             items will be read pair by pair, the first is the index to the\n             list that will be extended with the list of the second index.\n             The indices can be constructed with Numpy e.g.,\n             indices = np.where(square_matrix)", "id": "f4068:c3:m7"}
{"signature": "def group_dicom_files(dicom_file_paths, header_fields):", "body": "dist = SimpleDicomFileDistance(field_weights=header_fields)<EOL>path_list = dicom_file_paths.copy()<EOL>path_groups = DefaultOrderedDict(DicomFileSet)<EOL>while len(path_list) > <NUM_LIT:0>:<EOL><INDENT>file_path1 = path_list.pop()<EOL>file_subgroup = [file_path1]<EOL>dist.set_dicom_file1(file_path1)<EOL>j = len(path_list)-<NUM_LIT:1><EOL>while j >= <NUM_LIT:0>:<EOL><INDENT>file_path2 = path_list[j]<EOL>dist.set_dicom_file2(file_path2)<EOL>if dist.transform():<EOL><INDENT>file_subgroup.append(file_path2)<EOL>path_list.pop(j)<EOL><DEDENT>j -= <NUM_LIT:1><EOL><DEDENT>path_groups[file_path1].from_set(file_subgroup, check_if_dicoms=False)<EOL><DEDENT>return path_groups<EOL>", "docstring": "Gets a list of DICOM file absolute paths and returns a list of lists of\nDICOM file paths. Each group contains a set of DICOM files that have\nexactly the same headers.\n\nParameters\n----------\ndicom_file_paths: list of str\n    List or set of DICOM file paths\n\nheader_fields: list of str\n    List of header field names to check on the comparisons of the DICOM files.\n\nReturns\n-------\ndict of DicomFileSets\n    The key is one filepath representing the group (the first found).", "id": "f4068:m0"}
{"signature": "def fit(self, dcm_file1, dcm_file2):", "body": "self.set_dicom_file1(dcm_file1)<EOL>self.set_dicom_file2(dcm_file2)<EOL>", "docstring": "Parameters\n----------\ndcm_file1: str (path to file) or DicomFile or namedtuple\n\ndcm_file2: str (path to file) or DicomFile or namedtuple", "id": "f4068:c1:m1"}
{"signature": "def get_groups_in_same_folder(self, folder_depth=<NUM_LIT:3>):", "body": "group_pairs = []<EOL>key_dicoms = list(self.dicom_groups.keys())<EOL>idx = len(key_dicoms)<EOL>while idx > <NUM_LIT:0>:<EOL><INDENT>group1 = key_dicoms.pop()<EOL>dir_group1 = get_folder_subpath(group1, folder_depth)<EOL>for group in key_dicoms:<EOL><INDENT>if group.startswith(dir_group1):<EOL><INDENT>group_pairs.append((group1, group))<EOL><DEDENT><DEDENT>idx -= <NUM_LIT:1><EOL><DEDENT>return group_pairs<EOL>", "docstring": "Returns a list of 2-tuples with pairs of dicom groups that\nare in the same folder within given depth.\n\nParameters\n----------\nfolder_depth: int\nPath depth to check for folder equality.\n\nReturns\n-------\nlist of tuples of str", "id": "f4068:c3:m3"}
{"signature": "def remove_dcm2nii_underprocessed(filepaths):", "body": "cln_flist = []<EOL>len_sorted = sorted(filepaths, key=len)<EOL>for idx, fpath in enumerate(len_sorted):<EOL><INDENT>remove = False<EOL>fname = op.basename(fpath)<EOL>rest  = len_sorted[idx+<NUM_LIT:1>:]<EOL>for rest_fpath in rest:<EOL><INDENT>rest_file = op.basename(rest_fpath)<EOL>if rest_file.endswith(fname):<EOL><INDENT>remove = True<EOL>break<EOL><DEDENT><DEDENT>if not remove:<EOL><INDENT>cln_flist.append(fpath)<EOL><DEDENT><DEDENT>return cln_flist<EOL>", "docstring": "Return a subset of `filepaths`. Keep only the files that have a basename longer than the\n    others with same suffix.\n    This works based on that dcm2nii appends a preffix character for each processing\n    step it does automatically in the DICOM to NifTI conversion.\n\n    Parameters\n    ----------\n    filepaths: iterable of str\n\n    Returns\n    -------\n    cleaned_paths: iterable of str", "id": "f4069:m4"}
{"signature": "def convert_dcm2nii(input_dir, output_dir, filename):", "body": "<EOL>if not op.exists(input_dir):<EOL><INDENT>raise IOError('<STR_LIT>'.format(input_dir))<EOL><DEDENT>if not op.exists(output_dir):<EOL><INDENT>raise IOError('<STR_LIT>'.format(output_dir))<EOL><DEDENT>tmpdir = tempfile.TemporaryDirectory(prefix='<STR_LIT>')<EOL>arguments = '<STR_LIT>'.format(tmpdir.name)<EOL>try:<EOL><INDENT>call_out = call_dcm2nii(input_dir, arguments)<EOL><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>else:<EOL><INDENT>log.info('<STR_LIT>'.format(input_dir))<EOL>filenames  = glob(op.join(tmpdir.name, '<STR_LIT>'))<EOL>cleaned_filenames = remove_dcm2nii_underprocessed(filenames)<EOL>filepaths = []<EOL>for srcpath in cleaned_filenames:<EOL><INDENT>dstpath = op.join(output_dir, filename)<EOL>realpath = copy_w_plus(srcpath, dstpath)<EOL>filepaths.append(realpath)<EOL>basename = op.basename(remove_ext(srcpath))<EOL>aux_files = set(glob(op.join(tmpdir.name, '<STR_LIT>'     .format(basename)))) -set(glob(op.join(tmpdir.name, '<STR_LIT>'.format(basename))))<EOL>for aux_file in aux_files:<EOL><INDENT>aux_dstpath = copy_w_ext(aux_file, output_dir, remove_ext(op.basename(realpath)))<EOL>filepaths.append(aux_dstpath)<EOL><DEDENT><DEDENT>return filepaths<EOL><DEDENT>", "docstring": "Call MRICron's `dcm2nii` to convert the DICOM files inside `input_dir`\n    to Nifti and save the Nifti file in `output_dir` with a `filename` prefix.\n\n    Parameters\n    ----------\n    input_dir: str\n        Path to the folder that contains the DICOM files\n\n    output_dir: str\n        Path to the folder where to save the NifTI file\n\n    filename: str\n        Output file basename\n\n    Returns\n    -------\n    filepaths: list of str\n        List of file paths created in `output_dir`.", "id": "f4069:m3"}
{"signature": "def treefall(iterable):", "body": "num_elems = len(iterable)<EOL>for i in range(num_elems, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>for c in combinations(iterable, i):<EOL><INDENT>yield c<EOL><DEDENT><DEDENT>", "docstring": "Generate all combinations of the elements of iterable and its subsets.\n\nParameters\n----------\niterable: list, set or dict or any iterable object\n\nReturns\n-------\nA generator of all possible combinations of the iterable.\n\nExample:\n-------\n>>> for i in treefall([1, 2, 3, 4, 5]): print(i)\n>>> (1, 2, 3)\n>>> (1, 2)\n>>> (1, 3)\n>>> (2, 3)\n>>> (1,)\n>>> (2,)\n>>> (3,)\n>>> ()", "id": "f4070:m0"}
{"signature": "def get_attributes(self, attributes, default='<STR_LIT>'):", "body": "if isinstance(attributes, str):<EOL><INDENT>attributes = [attributes]<EOL><DEDENT>attrs = [getattr(self, attr, default) for attr in attributes]<EOL>if len(attrs) == <NUM_LIT:1>:<EOL><INDENT>return attrs[<NUM_LIT:0>]<EOL><DEDENT>return tuple(attrs)<EOL>", "docstring": "Return the attributes values from this DicomFile\n\n        Parameters\n        ----------\n        attributes: str or list of str\n         DICOM field names\n\n        default: str\n         Default value if the attribute does not exist.\n\n        Returns\n        -------\n        Value of the field or list of values.", "id": "f4071:c0:m1"}
{"signature": "def as_ndarray(arr, copy=False, dtype=None, order='<STR_LIT>'):", "body": "if order not in ('<STR_LIT:C>', '<STR_LIT:F>', '<STR_LIT:A>', '<STR_LIT>', None):<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(str(order)))<EOL><DEDENT>if isinstance(arr, np.memmap):<EOL><INDENT>if dtype is None:<EOL><INDENT>if order in ('<STR_LIT>', '<STR_LIT:A>', None):<EOL><INDENT>ret = np.array(np.asarray(arr), copy=True)<EOL><DEDENT>else:<EOL><INDENT>ret = np.array(np.asarray(arr), copy=True, order=order)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if order in ('<STR_LIT>', '<STR_LIT:A>', None):<EOL><INDENT>ret = np.asarray(arr).astype(dtype)<EOL><DEDENT>else:<EOL><INDENT>ret = _asarray(np.array(arr, copy=True), dtype=dtype, order=order)<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(arr, np.ndarray):<EOL><INDENT>ret = _asarray(arr, dtype=dtype, order=order)<EOL>if np.may_share_memory(ret, arr) and copy:<EOL><INDENT>ret = ret.T.copy().T if ret.flags['<STR_LIT>'] else ret.copy()<EOL><DEDENT><DEDENT>elif isinstance(arr, (list, tuple)):<EOL><INDENT>if order in (\"<STR_LIT:A>\", \"<STR_LIT>\"):<EOL><INDENT>ret = np.asarray(arr, dtype=dtype)<EOL><DEDENT>else:<EOL><INDENT>ret = np.asarray(arr, dtype=dtype, order=order)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(arr.__class__))<EOL><DEDENT>return ret<EOL>", "docstring": "Convert an arbitrary array to numpy.ndarray.\n\n    In the case of a memmap array, a copy is automatically made to break the\n    link with the underlying file (whatever the value of the \"copy\" keyword).\n\n    The purpose of this function is mainly to get rid of memmap objects, but\n    it can be used for other purposes. In particular, combining copying and\n    casting can lead to performance improvements in some cases, by avoiding\n    unnecessary copies.\n\n    If not specified, input array order is preserved, in all cases, even when\n    a copy is requested.\n\n    Caveat: this function does not copy during bool to/from 1-byte dtype\n    conversions. This can lead to some surprising results in some rare cases.\n    Example:\n\n        a = numpy.asarray([0, 1, 2], dtype=numpy.int8)\n        b = as_ndarray(a, dtype=bool)  # array([False, True, True], dtype=bool)\n        c = as_ndarray(b, dtype=numpy.int8)  # array([0, 1, 2], dtype=numpy.int8)\n\n    The usually expected result for the last line would be array([0, 1, 1])\n    because True evaluates to 1. Since there is no copy made here, the original\n    array is recovered.\n\n    Parameters\n    ----------\n    arr: array-like\n        input array. Any value accepted by numpy.asarray is valid.\n\n    copy: bool\n        if True, force a copy of the array. Always True when arr is a memmap.\n\n    dtype: any numpy dtype\n        dtype of the returned array. Performing copy and type conversion at the\n        same time can in some cases avoid an additional copy.\n\n    order: string\n        gives the order of the returned array.\n        Valid values are: \"C\", \"F\", \"A\", \"K\", None.\n        default is \"K\". See ndarray.copy() for more information.\n\n    Returns\n    -------\n    ret: np.ndarray\n        Numpy array containing the same data as arr, always of class\n        numpy.ndarray, and with no link to any underlying file.", "id": "f4072:m1"}
{"signature": "def _num_samples(x):", "body": "if not hasattr(x, '<STR_LIT>') and not hasattr(x, '<STR_LIT>'):<EOL><INDENT>if hasattr(x, '<STR_LIT>'):<EOL><INDENT>x = np.asarray(x)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % x)<EOL><DEDENT><DEDENT>return x.shape[<NUM_LIT:0>] if hasattr(x, '<STR_LIT>') else len(x)<EOL>", "docstring": "Return number of samples in array-like x.", "id": "f4075:m3"}
{"signature": "def warn_if_not_float(X, estimator='<STR_LIT>'):", "body": "if not isinstance(estimator, str):<EOL><INDENT>estimator = estimator.__class__.__name__<EOL><DEDENT>if X.dtype.kind != '<STR_LIT:f>':<EOL><INDENT>warnings.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (estimator, X.dtype))<EOL>return True<EOL><DEDENT>return False<EOL>", "docstring": "Warning utility function to check that data type is floating point.\n\n    Returns True if a warning was raised (i.e. the input is not float) and\n    False otherwise, for easier input validation.", "id": "f4075:m10"}
{"signature": "def check_array(array, accept_sparse=None, dtype=None, order=None, copy=False,<EOL>force_all_finite=True, ensure_2d=True, allow_nd=False):", "body": "if isinstance(accept_sparse, str):<EOL><INDENT>accept_sparse = [accept_sparse]<EOL><DEDENT>if sp.issparse(array):<EOL><INDENT>array = _ensure_sparse_format(array, accept_sparse, dtype, order,<EOL>copy, force_all_finite)<EOL><DEDENT>else:<EOL><INDENT>if ensure_2d:<EOL><INDENT>array = np.atleast_2d(array)<EOL><DEDENT>array = np.array(array, dtype=dtype, order=order, copy=copy)<EOL>if not allow_nd and array.ndim >= <NUM_LIT:3>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" %<EOL>array.ndim)<EOL><DEDENT>if force_all_finite:<EOL><INDENT>_assert_all_finite(array)<EOL><DEDENT><DEDENT>return array<EOL>", "docstring": "Input validation on an array, list, sparse matrix or similar.\n\n    By default, the input is converted to an at least 2nd numpy array.\n\n    Parameters\n    ----------\n    array : object\n        Input object to check / convert.\n\n    accept_sparse : string, list of string or None (default=None)\n        String[s] representing allowed sparse matrix formats, such as 'csc',\n        'csr', etc.  None means that sparse matrix input will raise an error.\n        If the input is sparse but not in the allowed format, it will be\n        converted to the first listed format.\n\n    order : 'F', 'C' or None (default=None)\n        Whether an array will be forced to be fortran or c-style.\n\n    copy : boolean (default=False)\n        Whether a forced copy will be triggered. If copy=False, a copy might\n        be triggered by a conversion.\n\n    force_all_finite : boolean (default=True)\n        Whether to raise an error on np.inf and np.nan in X.\n\n    ensure_2d : boolean (default=True)\n        Whether to make X at least 2d.\n\n    allow_nd : boolean (default=False)\n        Whether to allow X.ndim > 2.\n\n    Returns\n    -------\n    X_converted : object\n        The converted and validated X.", "id": "f4075:m7"}
{"signature": "def assert_all_finite(X):", "body": "<EOL>_assert_all_finite(X.data if sp.issparse(X) else X)<EOL>", "docstring": "Throw a ValueError if X contains NaN or infinity.\n\n    Input MUST be an np.ndarray instance or a scipy.sparse matrix.", "id": "f4075:m1"}
{"signature": "def remove_from_string(string, values):", "body": "for v in values:<EOL><INDENT>string = string.replace(v, '<STR_LIT>')<EOL><DEDENT>return string<EOL>", "docstring": "Parameters\n----------\nstring:\nvalues:\n\nReturns\n-------", "id": "f4077:m9"}
{"signature": "def is_fnmatch_regex(string):", "body": "is_regex = False<EOL>regex_chars = ['<STR_LIT:!>', '<STR_LIT:*>', '<STR_LIT:$>']<EOL>for c in regex_chars:<EOL><INDENT>if string.find(c) > -<NUM_LIT:1>:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return is_regex<EOL>", "docstring": "Returns True if the given string is considered a fnmatch\nregular expression, False otherwise.\nIt will look for\n\n:param string: str", "id": "f4077:m8"}
{"signature": "def append_to_list(lst, preffix):", "body": "return [preffix + str(item) for item in lst]<EOL>", "docstring": "Parameters\n----------\nlst:\npreffix:\n\nReturns\n-------", "id": "f4077:m5"}
{"signature": "def match_list(lst, pattern, group_names=[]):", "body": "filtfn = re.compile(pattern).match<EOL>filtlst = filter_list(lst, filtfn)<EOL>if not group_names:<EOL><INDENT>return [m.string for m in filtlst]<EOL><DEDENT>else:<EOL><INDENT>return [m.group(group_names) for m in filtlst]<EOL><DEDENT>", "docstring": "Parameters\n----------\nlst: list of str\n\nregex: string\n\ngroup_names: list of strings\n    See re.MatchObject group docstring\n\nReturns\n-------\nlist of strings\n    Filtered list, with the strings that match the pattern", "id": "f4077:m2"}
{"signature": "def is_valid_regex(string):", "body": "try:<EOL><INDENT>re.compile(string)<EOL>is_valid = True<EOL><DEDENT>except re.error:<EOL><INDENT>is_valid = False<EOL><DEDENT>return is_valid<EOL>", "docstring": "Checks whether the re module can compile the given regular expression.\n\nParameters\n----------\nstring: str\n\nReturns\n-------\nboolean", "id": "f4077:m6"}
{"signature": "def search_list(lst, pattern):", "body": "filt = re.compile(pattern).search<EOL>return filter_list(lst, filt)<EOL>", "docstring": "Parameters\n----------\npattern: string\n\nlst: list of strings\n\nReturns\n-------\nfiltered_list: list of str\n    Filtered lists with the strings in which the pattern is found.", "id": "f4077:m3"}
{"signature": "def merge(dict_1, dict_2):", "body": "return dict((str(key), dict_1.get(key) or dict_2.get(key))<EOL>for key in set(dict_2) | set(dict_1))<EOL>", "docstring": "Merge two dictionaries.\n\n    Values that evaluate to true take priority over falsy values.\n    `dict_1` takes priority over `dict_2`.", "id": "f4080:m0"}
{"signature": "def get_rcfile_variable_value(var_name, app_name, section_name=None):", "body": "cfg = get_rcfile_section(app_name, section_name)<EOL>if var_name in cfg:<EOL><INDENT>raise KeyError('<STR_LIT>'<EOL>'<STR_LIT>'.format(var_name, section_name))<EOL><DEDENT>return cfg[var_name]<EOL>", "docstring": "Return the value of the variable in the section_name section of the\n    app_name rc file.\n\n    Parameters\n    ----------\n    var_name: str\n        Name of the variable to be searched for.\n\n    section_name: str\n        Name of the section in the rcfiles.\n\n    app_name: str\n        Name of the application to look for its rcfiles.\n\n    Returns\n    -------\n    var_value: str\n        The value of the variable with given var_name.", "id": "f4080:m8"}
{"signature": "def rcfile(appname, section=None, args={}, strip_dashes=True):", "body": "if strip_dashes:<EOL><INDENT>for k in args.keys():<EOL><INDENT>args[k.lstrip('<STR_LIT:->')] = args.pop(k)<EOL><DEDENT><DEDENT>environ = get_environment(appname)<EOL>if section is None:<EOL><INDENT>section = appname<EOL><DEDENT>config = get_config(appname,<EOL>section,<EOL>args.get('<STR_LIT>', '<STR_LIT>'),<EOL>args.get('<STR_LIT:path>', '<STR_LIT>'))<EOL>config = merge(merge(args, config), environ)<EOL>if not config:<EOL><INDENT>raise IOError('<STR_LIT>'<EOL>'<STR_LIT>'.format(appname))<EOL><DEDENT>return config<EOL>", "docstring": "Read environment variables and config files and return them merged with\n    predefined list of arguments.\n\n    Parameters\n    ----------\n    appname: str\n        Application name, used for config files and environment variable\n        names.\n\n    section: str\n        Name of the section to be read. If this is not set: appname.\n\n    args:\n        arguments from command line (optparse, docopt, etc).\n\n    strip_dashes: bool\n        Strip dashes prefixing key names from args dict.\n\n    Returns\n    --------\n    dict\n        containing the merged variables of environment variables, config\n        files and args.\n\n    Raises\n    ------\n    IOError\n        In case the return value is empty.\n\n    Notes\n    -----\n    Environment variables are read if they start with appname in uppercase\n    with underscore, for example:\n\n        TEST_VAR=1\n\n    Config files compatible with ConfigParser are read and the section name\n    appname is read, example:\n\n        [appname]\n        var=1\n\n    We can also have host-dependent configuration values, which have\n    priority over the default appname values.\n\n        [appname]\n        var=1\n\n        [appname:mylinux]\n        var=3\n\n\n    For boolean flags do not try to use: 'True' or 'False',\n                                         'on' or 'off',\n                                         '1' or '0'.\n    Unless you are willing to parse this values by yourself.\n    We recommend commenting the variables out with '#' if you want to set a\n    flag to False and check if it is in the rcfile cfg dict, i.e.:\n\n        flag_value = 'flag_variable' in cfg\n\n\n    Files are read from: /etc/appname/config,\n                         /etc/appfilerc,\n                         ~/.config/appname/config,\n                         ~/.config/appname,\n                         ~/.appname/config,\n                         ~/.appnamerc,\n                         appnamerc,\n                         .appnamerc,\n                         appnamerc file found in 'path' folder variable in args,\n                         .appnamerc file found in 'path' folder variable in args,\n                         file provided by 'config' variable in args.\n\n    Example\n    -------\n        args = rcfile(__name__, docopt(__doc__, version=__version__))", "id": "f4080:m6"}
{"signature": "def get_rcfile_section(app_name, section_name):", "body": "try:<EOL><INDENT>settings = rcfile(app_name, section_name)<EOL><DEDENT>except IOError:<EOL><INDENT>raise<EOL><DEDENT>except:<EOL><INDENT>raise KeyError('<STR_LIT>'<EOL>'<STR_LIT>'.format(section_name, app_name))<EOL><DEDENT>else:<EOL><INDENT>return settings<EOL><DEDENT>", "docstring": "Return the dictionary containing the rcfile section configuration\n    variables.\n\n    Parameters\n    ----------\n    section_name: str\n        Name of the section in the rcfiles.\n\n    app_name: str\n        Name of the application to look for its rcfiles.\n\n    Returns\n    -------\n    settings: dict\n        Dict with variable values", "id": "f4080:m7"}
{"signature": "def get_sys_path(rcpath, app_name, section_name=None):", "body": "<EOL>if op.exists(rcpath):<EOL><INDENT>return op.realpath(op.expanduser(rcpath))<EOL><DEDENT>try:<EOL><INDENT>settings = rcfile(app_name, section_name)<EOL><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>try:<EOL><INDENT>sys_path = op.expanduser(settings[rcpath])<EOL><DEDENT>except KeyError:<EOL><INDENT>raise IOError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(rcpath,<EOL>section_name,<EOL>app_name))<EOL><DEDENT>else:<EOL><INDENT>if not op.exists(sys_path):<EOL><INDENT>raise IOError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(rcpath, section_name, app_name,<EOL>sys_path))<EOL><DEDENT>return op.realpath(op.expanduser(sys_path))<EOL><DEDENT>", "docstring": "Return a folder path if it exists.\n\n    First will check if it is an existing system path, if it is, will return it\n    expanded and absoluted.\n\n    If this fails will look for the rcpath variable in the app_name rcfiles or\n    exclusively within the given section_name, if given.\n\n    Parameters\n    ----------\n    rcpath: str\n        Existing folder path or variable name in app_name rcfile with an\n        existing one.\n\n    section_name: str\n        Name of a section in the app_name rcfile to look exclusively there for\n        variable names.\n\n    app_name: str\n        Name of the application to look for rcfile configuration files.\n\n    Returns\n    -------\n    sys_path: str\n        A expanded absolute file or folder path if the path exists.\n\n    Raises\n    ------\n    IOError if the proposed sys_path does not exist.", "id": "f4080:m5"}
{"signature": "def _cache(self, func, func_memory_level=<NUM_LIT:1>, **kwargs):", "body": "verbose = getattr(self, '<STR_LIT>', <NUM_LIT:0>)<EOL>if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self.memory_level = <NUM_LIT:0><EOL><DEDENT>if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self.memory = Memory(cachedir=None, verbose=verbose)<EOL><DEDENT>if isinstance(self.memory, _basestring):<EOL><INDENT>self.memory = Memory(cachedir=self.memory, verbose=verbose)<EOL><DEDENT>if self.memory_level == <NUM_LIT:0>:<EOL><INDENT>if (isinstance(self.memory, _basestring)<EOL>or self.memory.cachedir is not None):<EOL><INDENT>warnings.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>self.memory_level = <NUM_LIT:1><EOL><DEDENT><DEDENT>return cache(func, self.memory, func_memory_level=func_memory_level,<EOL>memory_level=self.memory_level, **kwargs)<EOL>", "docstring": "Return a joblib.Memory object.\n\n        The memory_level determines the level above which the wrapped\n        function output is cached. By specifying a numeric value for\n        this level, the user can to control the amount of cache memory\n        used. This function will cache the function call or not\n        depending on the cache level.\n\n        Parameters\n        ----------\n        func: function\n            The function the output of which is to be cached.\n\n        memory_level: int\n            The memory_level from which caching must be enabled for the wrapped\n            function.\n\n        Returns\n        -------\n        mem: joblib.Memory\n            object that wraps the function func. This object may be\n            a no-op, if the requested level is lower than the value given\n            to _cache()). For consistency, a joblib.Memory object is always\n            returned.", "id": "f4082:c0:m0"}
{"signature": "def check_call(cmd_args):", "body": "p = subprocess.Popen(cmd_args, stdout=subprocess.PIPE)<EOL>(output, err) = p.communicate()<EOL>return output<EOL>", "docstring": "Calls the command\n\nParameters\n----------\ncmd_args: list of str\n    Command name to call and its arguments in a list.\n\nReturns\n-------\nCommand output", "id": "f4083:m6"}
{"signature": "def whosdaddy():", "body": "return inspect.stack()[<NUM_LIT:2>][<NUM_LIT:3>]<EOL>", "docstring": "Get the name of the current function", "id": "f4083:m4"}
{"signature": "def call_command(cmd_name, args_strings):", "body": "if not op.isabs(cmd_name):<EOL><INDENT>cmd_fullpath = which(cmd_name)<EOL><DEDENT>else:<EOL><INDENT>cmd_fullpath = cmd_name<EOL><DEDENT>try:<EOL><INDENT>cmd_line = [cmd_fullpath] + args_strings<EOL>log.info('<STR_LIT>'.format(cmd_line))<EOL>retval = subprocess.check_call(cmd_line)<EOL><DEDENT>except CalledProcessError as ce:<EOL><INDENT>log.exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(cmd_name, args_strings,<EOL>ce.returncode))<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>return retval<EOL><DEDENT>", "docstring": "Call CLI command with arguments and returns its return value.\n\n    Parameters\n    ----------\n    cmd_name: str\n        Command name or full path to the binary file.\n\n    arg_strings: list of str\n        Argument strings list.\n\n    Returns\n    -------\n    return_value\n        Command return value.", "id": "f4083:m7"}
{"signature": "def whoami():", "body": "return inspect.stack()[<NUM_LIT:1>][<NUM_LIT:3>]<EOL>", "docstring": "Get the name of the current function", "id": "f4083:m3"}
{"signature": "def which(program):", "body": "if (sys.version_info > (<NUM_LIT:3>, <NUM_LIT:0>)):<EOL><INDENT>return which_py3(program)<EOL><DEDENT>else:<EOL><INDENT>return which_py2(program)<EOL><DEDENT>", "docstring": "Returns the absolute path of the given CLI program name.", "id": "f4083:m0"}
{"signature": "def check_compatibility(self, one_img, another_img=None):", "body": "if another_img is None:<EOL><INDENT>if len(self.items) > <NUM_LIT:0>:<EOL><INDENT>another_img = self.items[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(repr_imgs(one_img)))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>if self.all_compatible:<EOL><INDENT>check_img_compatibility(one_img, another_img)<EOL><DEDENT>if self.mask is not None:<EOL><INDENT>check_img_compatibility(one_img, self.mask, only_check_3d=True)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Parameters\n----------\none_img: str or img-like object.\n    See NeuroImage constructor docstring.\n\nanoter_img: str or img-like object.\n    See NeuroImage constructor docstring.\n    If None will use the first image of self.images, if there is any.\n\nRaises\n------\nNiftiFilesNotCompatible\n    If one_img and another_img aren't compatible.\n\nValueError\n    If another_img is None and there are no other images in this set.", "id": "f4085:c0:m7"}
{"signature": "def from_dict(self, subj_files):", "body": "for group_label in subj_files:<EOL><INDENT>try:<EOL><INDENT>group_files = subj_files[group_label]<EOL>self.items.extend([self._load_image(get_abspath(imgf)) for imgf in group_files])<EOL>self.labels.extend([group_label]*len(group_files))<EOL><DEDENT>except Exception as exc:<EOL><INDENT>raise Exception('<STR_LIT>'<EOL>'<STR_LIT>'.format(group_label)) from exc<EOL><DEDENT><DEDENT>", "docstring": "Parameters\n----------\nsubj_files: dict of str\n    file_path -> int/str", "id": "f4085:c1:m5"}
{"signature": "def set_labels(self, labels):", "body": "if not isinstance(labels, string_types) and len(labels) != self.n_subjs:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(len(labels), self.n_subjs))<EOL><DEDENT>self.labels = labels<EOL>", "docstring": "Parameters\n----------\nlabels: list of int or str\n    This list will be checked to have the same size as\n\nRaises\n------\nValueError\n    if len(labels) != self.n_subjs", "id": "f4085:c0:m9"}
{"signature": "def _init_subj_data(self, subj_files):", "body": "try:<EOL><INDENT>if isinstance(subj_files, list):<EOL><INDENT>self.from_list(subj_files)<EOL><DEDENT>elif isinstance(subj_files, dict):<EOL><INDENT>self.from_dict(subj_files)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>except Exception as exc:<EOL><INDENT>raise Exception('<STR_LIT>') from exc<EOL><DEDENT>", "docstring": "Parameters\n----------\nsubj_files: list or dict of str\n    file_path -> int/str", "id": "f4085:c1:m1"}
{"signature": "def to_matrix(self, smooth_fwhm=<NUM_LIT:0>, outdtype=None):", "body": "if not self.all_compatible:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not outdtype:<EOL><INDENT>outdtype = self.items[<NUM_LIT:0>].dtype<EOL><DEDENT>n_voxels     = None<EOL>mask_indices = None<EOL>mask_shape   = self.items[<NUM_LIT:0>].shape[:<NUM_LIT:3>]<EOL>if self.has_mask:<EOL><INDENT>mask_arr     = self.mask.get_data()<EOL>mask_indices = np.nonzero(mask_arr)<EOL>mask_shape   = self.mask.shape<EOL>n_voxels     = np.count_nonzero(mask_arr)<EOL><DEDENT>if n_voxels is None:<EOL><INDENT>log.debug('<STR_LIT>'.format(self.mask))<EOL>n_voxels     = np.prod(mask_shape)<EOL>mask_indices = None<EOL><DEDENT>ndims = self.items[<NUM_LIT:0>].ndim<EOL>if ndims == <NUM_LIT:3>:<EOL><INDENT>subj_flat_shape = (n_voxels, )<EOL><DEDENT>elif ndims == <NUM_LIT:4>:<EOL><INDENT>subj_flat_shape = (n_voxels, self.items[<NUM_LIT:0>].shape[<NUM_LIT:3>])<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError('<STR_LIT>'<EOL>'<STR_LIT>'.format(ndims))<EOL><DEDENT>outmat = np.zeros((self.n_subjs, ) + subj_flat_shape, dtype=outdtype)<EOL>try:<EOL><INDENT>for i, image in enumerate(self.items):<EOL><INDENT>if smooth_fwhm > <NUM_LIT:0>:<EOL><INDENT>image.fwhm = smooth_fwhm<EOL><DEDENT>if self.has_mask:<EOL><INDENT>image.set_mask(self.mask)<EOL><DEDENT>outmat[i, :], _, _ = image.mask_and_flatten()<EOL>image.clear_data()<EOL><DEDENT><DEDENT>except Exception as exc:<EOL><INDENT>raise Exception('<STR_LIT>'.format(image)) from exc<EOL><DEDENT>else:<EOL><INDENT>return outmat, mask_indices, mask_shape<EOL><DEDENT>", "docstring": "Return numpy.ndarray with the masked or flatten image data and\n           the relevant information (mask indices and volume shape).\n\n        Parameters\n        ----------\n        smooth__fwhm: int\n            Integer indicating the size of the FWHM Gaussian smoothing kernel\n            to smooth the subject volumes before creating the data matrix\n\n        outdtype: dtype\n            Type of the elements of the array, if None will obtain the dtype from\n            the first nifti file.\n\n        Returns\n        -------\n        outmat, mask_indices, vol_shape\n\n        outmat: Numpy array with shape N x prod(vol.shape)\n                containing the N files as flat vectors.\n\n        mask_indices: matrix with indices of the voxels in the mask\n\n        vol_shape: Tuple with shape of the volumes, for reshaping.", "id": "f4085:c0:m11"}
{"signature": "@mask.setter<EOL><INDENT>def mask(self, image):<DEDENT>", "body": "if image is None:<EOL><INDENT>self._mask = None<EOL><DEDENT>try:<EOL><INDENT>mask = load_mask(image)<EOL><DEDENT>except Exception as exc:<EOL><INDENT>raise Exception('<STR_LIT>'.format(image)) from exc<EOL><DEDENT>else:<EOL><INDENT>self._mask = mask<EOL><DEDENT>", "docstring": "self.mask setter\n\n        Parameters\n        ----------\n        image: str or img-like object.\n            See NeuroImage constructor docstring.", "id": "f4085:c0:m4"}
{"signature": "def all_childnodes_to_nifti1img(h5group):", "body": "child_nodes = []<EOL>def append_parent_if_dataset(name, obj):<EOL><INDENT>if isinstance(obj, h5py.Dataset):<EOL><INDENT>if name.split('<STR_LIT:/>')[-<NUM_LIT:1>] == '<STR_LIT:data>':<EOL><INDENT>child_nodes.append(obj.parent)<EOL><DEDENT><DEDENT><DEDENT>vols = []<EOL>h5group.visititems(append_parent_if_dataset)<EOL>for c in child_nodes:<EOL><INDENT>vols.append(hdfgroup_to_nifti1image(c))<EOL><DEDENT>return vols<EOL>", "docstring": "Returns in a list all images found under h5group.\n\n    Parameters\n    ----------\n    h5group: h5py.Group\n        HDF group\n\n    Returns\n    -------\n    list of nifti1Image", "id": "f4087:m6"}
{"signature": "def get_nifti1hdr_from_h5attrs(h5attrs):", "body": "hdr = nib.Nifti1Header()<EOL>for k in list(h5attrs.keys()):<EOL><INDENT>hdr[str(k)] = np.array(h5attrs[k])<EOL><DEDENT>return hdr<EOL>", "docstring": "Transforms an H5py Attributes set to a dict.\n    Converts unicode string keys into standard strings\n    and each value into a numpy array.\n\n    Parameters\n    ----------\n    h5attrs: H5py Attributes\n\n    Returns\n    --------\n    dict", "id": "f4087:m5"}
{"signature": "def hdfgroup_to_nifti1image(h5group):", "body": "try:<EOL><INDENT>data   = h5group['<STR_LIT:data>'][:]<EOL>affine = h5group['<STR_LIT>'][:]<EOL>extra = None<EOL>if '<STR_LIT>' in h5group:<EOL><INDENT>extra = h5group['<STR_LIT>'][:]<EOL><DEDENT>header = get_nifti1hdr_from_h5attrs(h5group['<STR_LIT:data>'].attrs)<EOL>img = nib.Nifti1Image(data, affine, header=header, extra=extra)<EOL>return img<EOL><DEDENT>except KeyError as ke:<EOL><INDENT>raise Exception('<STR_LIT>' + h5group.name) from ke<EOL><DEDENT>", "docstring": "Returns a nibabel Nifti1Image from a HDF5 group datasets\n\n    Parameters\n    ----------\n    h5group: h5py.Group\n        HDF5 group\n\n    Returns\n    -------\n    nibabel Nifti1Image", "id": "f4087:m4"}
{"signature": "def spatialimg_to_hdfpath(file_path, spatial_img, h5path=None, append=True):", "body": "if h5path is None:<EOL><INDENT>h5path = '<STR_LIT>'<EOL><DEDENT>mode = '<STR_LIT:w>'<EOL>if os.path.exists(file_path):<EOL><INDENT>if append:<EOL><INDENT>mode = '<STR_LIT:a>'<EOL><DEDENT><DEDENT>with h5py.File(file_path, mode) as f:<EOL><INDENT>try:<EOL><INDENT>h5img = f.create_group(h5path)<EOL>spatialimg_to_hdfgroup(h5img, spatial_img)<EOL><DEDENT>except ValueError as ve:<EOL><INDENT>raise Exception('<STR_LIT>' + h5path) from ve<EOL><DEDENT><DEDENT>", "docstring": "Saves a Nifti1Image into an HDF5 file.\n\n    Parameters\n    ----------\n    file_path: string\n        Output HDF5 file path\n\n    spatial_img: nibabel SpatialImage\n        Image to be saved\n\n    h5path: string\n        HDF5 group path where the image data will be saved.\n        Datasets will be created inside the given group path:\n        'data', 'extra', 'affine', the header information will\n        be set as attributes of the 'data' dataset.\n        Default: '/img'\n\n    append: bool\n        True if you don't want to erase the content of the file\n        if it already exists, False otherwise.\n\n    Note\n    ----\n    HDF5 open modes\n    >>> 'r' Readonly, file must exist\n    >>> 'r+' Read/write, file must exist\n    >>> 'w' Create file, truncate if exists\n    >>> 'w-' Create file, fail if exists\n    >>> 'a' Read/write if exists, create otherwise (default)", "id": "f4087:m2"}
{"signature": "def partition_timeseries(image, roi_img, mask_img=None, zeroe=True, roi_values=None, outdict=False):", "body": "img  = read_img(image)<EOL>rois = read_img(roi_img)<EOL>check_img_compatibility(img, rois, only_check_3d=True)<EOL>roi_data = rois.get_data()<EOL>if roi_values is not None:<EOL><INDENT>for rv in roi_values:<EOL><INDENT>if not np.any(roi_data == rv):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(rv, repr_imgs(roi_img)))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>roi_values = get_unique_nonzeros(roi_data)<EOL><DEDENT>if mask_img is None:<EOL><INDENT>mask_data = None<EOL><DEDENT>else:<EOL><INDENT>mask = load_mask(mask_img)<EOL>check_img_compatibility(img, mask, only_check_3d=True)<EOL>mask_data = mask.get_data()<EOL><DEDENT>if outdict:<EOL><INDENT>extract_data = _extract_timeseries_dict<EOL><DEDENT>else:<EOL><INDENT>extract_data = _extract_timeseries_list<EOL><DEDENT>try:<EOL><INDENT>return extract_data(img.get_data(), rois.get_data(), mask_data,<EOL>roi_values=roi_values, zeroe=zeroe)<EOL><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Partition the timeseries in tsvol according to the ROIs in roivol.\n    If a mask is given, will use it to exclude any voxel outside of it.\n\n    The outdict indicates whether you want a dictionary for each set of timeseries keyed by the ROI value\n    or a list of timeseries sets. If True and roi_img is not None will return an OrderedDict, if False\n    or roi_img or roi_list is None will return a list.\n\n    Background value is assumed to be 0 and won't be used here.\n\n    Parameters\n    ----------\n    image: img-like object or str\n        4D timeseries volume\n\n    roi_img: img-like object or str\n        3D volume defining different ROIs.\n\n    mask_img: img-like object or str\n        3D mask volume\n\n    zeroe: bool\n        If true will remove the null timeseries voxels.\n\n    roi_values: list of ROI values (int?)\n        List of the values of the ROIs to indicate the\n        order and which ROIs will be processed.\n\n    outdict: bool\n        If True will return an OrderedDict of timeseries sets, otherwise a list.\n\n    Returns\n    -------\n    timeseries: list or OrderedDict\n        A dict with the timeseries as items and keys as the ROIs voxel values or\n        a list where each element is the timeseries set ordered by the sorted values in roi_img or by the roi_values\n        argument.", "id": "f4088:m8"}
{"signature": "def create_rois_mask(roislist, filelist):", "body": "roifiles = []<EOL>for roi in roislist:<EOL><INDENT>try:<EOL><INDENT>roi_file = search_list(roi, filelist)[<NUM_LIT:0>]<EOL><DEDENT>except Exception as exc:<EOL><INDENT>raise Exception('<STR_LIT>'.format(str(exc)))<EOL><DEDENT>else:<EOL><INDENT>roifiles.append(roi_file)<EOL><DEDENT><DEDENT>return binarise(roifiles)<EOL>", "docstring": "Look for the files in filelist containing the names in roislist, these files will be opened, binarised\n    and merged in one mask.\n\n    Parameters\n    ----------\n    roislist: list of strings\n        Names of the ROIs, which will have to be in the names of the files in filelist.\n\n    filelist: list of strings\n        List of paths to the volume files containing the ROIs.\n\n    Returns\n    -------\n    numpy.ndarray\n        Mask volume", "id": "f4088:m4"}
{"signature": "def get_roilist_from_atlas(atlas_img):", "body": "return get_unique_nonzeros(check_img(atlas_img).get_data())<EOL>", "docstring": "Extract unique values from the atlas and returns them as an ordered list.\n\nParameters\n----------\natlas_img: img-like object or str\n    Volume defining different ROIs.\n    Can either be:\n    - a file path to a Nifti image\n    - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n    If niimg is a string, consider it as a path to Nifti image and\n    call nibabel.load on it. If it is an object, check if get_data()\n    and get_affine() methods are present, raise TypeError otherwise.\n\nReturns\n-------\nnp.ndarray\n    An 1D array of roi values from atlas volume.\n\nNote\n----\nThe roi with value 0 will be considered background so will be removed.", "id": "f4088:m6"}
{"signature": "def _partition_data(datavol, roivol, roivalue, maskvol=None, zeroe=True):", "body": "if maskvol is not None:<EOL><INDENT>indices = (roivol == roivalue) * (maskvol > <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>indices = roivol == roivalue<EOL><DEDENT>if datavol.ndim == <NUM_LIT:4>:<EOL><INDENT>ts = datavol[indices, :]<EOL><DEDENT>else:<EOL><INDENT>ts = datavol[indices]<EOL><DEDENT>if zeroe:<EOL><INDENT>if datavol.ndim == <NUM_LIT:4>:<EOL><INDENT>ts = ts[ts.sum(axis=<NUM_LIT:1>) != <NUM_LIT:0>, :]<EOL><DEDENT><DEDENT>return ts<EOL>", "docstring": "Extracts the values in `datavol` that are in the ROI with value `roivalue` in `roivol`.\n    The ROI can be masked by `maskvol`.\n\n    Parameters\n    ----------\n    datavol: numpy.ndarray\n        4D timeseries volume or a 3D volume to be partitioned\n\n    roivol: numpy.ndarray\n        3D ROIs volume\n\n    roivalue: int or float\n        A value from roivol that represents the ROI to be used for extraction.\n\n    maskvol: numpy.ndarray\n        3D mask volume\n\n    zeroe: bool\n        If true will remove the null timeseries voxels.  Only applied to timeseries (4D) data.\n\n    Returns\n    -------\n    values: np.array\n        An array of the values in the indicated ROI.\n        A 2D matrix if `datavol` is 4D or a 1D vector if `datavol` is 3D.", "id": "f4088:m11"}
{"signature": "def largest_connected_component(volume):", "body": "<EOL>volume = np.asarray(volume)<EOL>labels, num_labels = scn.label(volume)<EOL>if not num_labels:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if num_labels == <NUM_LIT:1>:<EOL><INDENT>return volume.astype(np.bool)<EOL><DEDENT>label_count = np.bincount(labels.ravel().astype(np.int))<EOL>label_count[<NUM_LIT:0>] = <NUM_LIT:0><EOL>return labels == label_count.argmax()<EOL>", "docstring": "Return the largest connected component of a 3D array.\n\n    Parameters\n    -----------\n    volume: numpy.array\n        3D boolean array.\n\n    Returns\n    --------\n    volume: numpy.array\n        3D boolean array with only one connected component.", "id": "f4088:m2"}
{"signature": "def get_unique_nonzeros(arr):", "body": "rois = np.unique(arr)<EOL>rois = rois[np.nonzero(rois)]<EOL>rois.sort()<EOL>return rois<EOL>", "docstring": "Return a sorted list of the non-zero unique values of arr.\n\n    Parameters\n    ----------\n    arr: numpy.ndarray\n        The data array\n\n    Returns\n    -------\n    list of items of arr.", "id": "f4088:m5"}
{"signature": "def large_clusters_mask(volume, min_cluster_size):", "body": "labels, num_labels = scn.label(volume)<EOL>labels_to_keep = set([i for i in range(num_labels)<EOL>if np.sum(labels == i) >= min_cluster_size])<EOL>clusters_mask = np.zeros_like(volume, dtype=int)<EOL>for l in range(num_labels):<EOL><INDENT>if l in labels_to_keep:<EOL><INDENT>clusters_mask[labels == l] = <NUM_LIT:1><EOL><DEDENT><DEDENT>return clusters_mask<EOL>", "docstring": "Return as mask for `volume` that includes only areas where\n    the connected components have a size bigger than `min_cluster_size`\n    in number of voxels.\n\n    Parameters\n    -----------\n    volume: numpy.array\n        3D boolean array.\n\n    min_cluster_size: int\n        Minimum size in voxels that the connected component must have.\n\n    Returns\n    --------\n    volume: numpy.array\n        3D int array with a mask excluding small connected components.", "id": "f4088:m3"}
{"signature": "def get_3D_from_4D(image, vol_idx=<NUM_LIT:0>):", "body": "img      = check_img(image)<EOL>hdr, aff = get_img_info(img)<EOL>if len(img.shape) != <NUM_LIT:4>:<EOL><INDENT>raise AttributeError('<STR_LIT>'.format(repr_imgs(img)))<EOL><DEDENT>if not <NUM_LIT:0> <= vol_idx < img.shape[<NUM_LIT:3>]:<EOL><INDENT>raise IndexError('<STR_LIT>'<EOL>'<STR_LIT>'.format(repr_imgs(img), img.shape[<NUM_LIT:3>], vol_idx))<EOL><DEDENT>img_data = img.get_data()<EOL>new_vol  = img_data[:, :, :, vol_idx].copy()<EOL>hdr.set_data_shape(hdr.get_data_shape()[:<NUM_LIT:3>])<EOL>return new_vol, hdr, aff<EOL>", "docstring": "Pick one 3D volume from a 4D nifti image file\n\n    Parameters\n    ----------\n    image: img-like object or str\n        Volume defining different ROIs.\n        Can either be:\n        - a file path to a Nifti image\n        - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n        If niimg is a string, consider it as a path to Nifti image and\n        call nibabel.load on it. If it is an object, check if get_data()\n        and get_affine() methods are present, raise TypeError otherwise.\n\n    vol_idx: int\n        Index of the 3D volume to be extracted from the 4D volume.\n\n    Returns\n    -------\n    vol, hdr, aff\n        The data array, the image header and the affine transform matrix.", "id": "f4088:m14"}
{"signature": "def _extract_timeseries_dict(tsvol, roivol, maskvol=None, roi_values=None, zeroe=True):", "body": "_check_for_partition(tsvol, roivol, maskvol)<EOL>if roi_values is None:<EOL><INDENT>roi_values = get_unique_nonzeros(roivol)<EOL><DEDENT>ts_dict = OrderedDict()<EOL>for r in roi_values:<EOL><INDENT>ts = _partition_data(tsvol, roivol, r, maskvol, zeroe)<EOL>if len(ts) == <NUM_LIT:0>:<EOL><INDENT>ts = np.zeros(tsvol.shape[-<NUM_LIT:1>])<EOL><DEDENT>ts_dict[r] = ts<EOL><DEDENT>return ts_dict<EOL>", "docstring": "Partition the timeseries in tsvol according to the ROIs in roivol.\n    If a mask is given, will use it to exclude any voxel outside of it.\n\n    Parameters\n    ----------\n    tsvol: numpy.ndarray\n        4D timeseries volume or a 3D volume to be partitioned\n\n    roivol: numpy.ndarray\n        3D ROIs volume\n\n    maskvol: numpy.ndarray\n        3D mask volume\n\n    zeroe: bool\n        If true will remove the null timeseries voxels.\n\n    roi_values: list of ROI values (int?)\n        List of the values of the ROIs to indicate the\n        order and which ROIs will be processed.\n\n    Returns\n    -------\n    ts_dict: OrderedDict\n        A dict with the timeseries as items and keys as the ROIs voxel values.", "id": "f4088:m12"}
{"signature": "def union_mask(filelist):", "body": "firstimg = check_img(filelist[<NUM_LIT:0>])<EOL>mask     = np.zeros_like(firstimg.get_data())<EOL>try:<EOL><INDENT>for volf in filelist:<EOL><INDENT>roiimg = check_img(volf)<EOL>check_img_compatibility(firstimg, roiimg)<EOL>mask  += get_img_data(roiimg)<EOL><DEDENT><DEDENT>except Exception as exc:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(repr_imgs(firstimg), repr_imgs(volf))) from exc<EOL><DEDENT>else:<EOL><INDENT>return as_ndarray(mask > <NUM_LIT:0>, dtype=bool)<EOL><DEDENT>", "docstring": "Creates a binarised mask with the union of the files in filelist.\n\nParameters\n----------\nfilelist: list of img-like object or boyle.nifti.NeuroImage or str\n    List of paths to the volume files containing the ROIs.\n    Can either be:\n    - a file path to a Nifti image\n    - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n    If niimg is a string, consider it as a path to Nifti image and\n    call nibabel.load on it. If it is an object, check if get_data()\n    and get_affine() methods are present, raise TypeError otherwise.\n\nReturns\n-------\nndarray of bools\n    Mask volume\n\nRaises\n------\nValueError", "id": "f4089:m3"}
{"signature": "def _apply_mask_to_4d_data(vol_data, mask_img):", "body": "mask_data = load_mask_data(mask_img)<EOL>return vol_data[mask_data], mask_data<EOL>", "docstring": "Parameters\n----------\nvol_data:\nmask_img:\n\nReturns\n-------\nmasked_data, mask_indices\n\nmasked_data: numpy.ndarray\n    2D array of series with shape (image number, voxel number)\n\nNote\n----\nvol_data and mask_file must have the same shape.", "id": "f4089:m6"}
{"signature": "def binarise(image, threshold=<NUM_LIT:0>):", "body": "img = check_img(image)<EOL>return img.get_data() > threshold<EOL>", "docstring": "Binarise image with the given threshold\n\n    Parameters\n    ----------\n    image: img-like object or boyle.nifti.NeuroImage or str\n        Can either be:\n        - a file path to a Nifti image\n        - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n        If niimg is a string, consider it as a path to Nifti image and\n        call nibabel.load on it. If it is an object, check if get_data()\n        and get_affine() methods are present, raise TypeError otherwise.\n\n    threshold: float\n\n    Returns\n    -------\n    binarised_img: numpy.ndarray\n        Mask volume", "id": "f4089:m2"}
{"signature": "def load_mask(image, allow_empty=True):", "body": "img    = check_img(image, make_it_3d=True)<EOL>values = np.unique(img.get_data())<EOL>if len(values) == <NUM_LIT:1>:<EOL><INDENT>if values[<NUM_LIT:0>] == <NUM_LIT:0> and not allow_empty:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif len(values) == <NUM_LIT:2>:<EOL><INDENT>if <NUM_LIT:0> not in values:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(values))<EOL><DEDENT><DEDENT>elif len(values) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(values))<EOL><DEDENT>return nib.Nifti1Image(as_ndarray(get_img_data(img), dtype=bool), img.get_affine(), img.get_header())<EOL>", "docstring": "Load a Nifti mask volume.\n\n    Parameters\n    ----------\n    image: img-like object or boyle.nifti.NeuroImage or str\n        Can either be:\n        - a file path to a Nifti image\n        - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n        If niimg is a string, consider it as a path to Nifti image and\n        call nibabel.load on it. If it is an object, check if get_data()\n        and get_affine() methods are present, raise TypeError otherwise.\n\n    allow_empty: boolean, optional\n        Allow loading an empty mask (full of 0 values)\n\n    Returns\n    -------\n    nibabel.Nifti1Image with boolean data.", "id": "f4089:m0"}
{"signature": "def vector_to_volume(arr, mask, order='<STR_LIT:C>'):", "body": "if mask.dtype != np.bool:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if arr.ndim != <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if arr.ndim == <NUM_LIT:2> and any(v == <NUM_LIT:1> for v in arr.shape):<EOL><INDENT>log.debug('<STR_LIT>'.format(arr.shape))<EOL>arr = arr.flatten()<EOL><DEDENT>volume = np.zeros(mask.shape[:<NUM_LIT:3>], dtype=arr.dtype, order=order)<EOL>volume[mask] = arr<EOL>return volume<EOL>", "docstring": "Transform a given vector to a volume. This is a reshape function for\n    3D flattened and maybe masked vectors.\n\n    Parameters\n    ----------\n    arr: np.array\n        1-Dimensional array\n\n    mask: numpy.ndarray\n        Mask image. Must have 3 dimensions, bool dtype.\n\n    Returns\n    -------\n    np.ndarray", "id": "f4089:m7"}
{"signature": "def matrix_to_4dvolume(arr, mask, order='<STR_LIT:C>'):", "body": "if mask.dtype != np.bool:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if arr.ndim != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if mask.sum() != arr.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(mask.sum(), arr.shape))<EOL><DEDENT>data = np.zeros(mask.shape + (arr.shape[<NUM_LIT:1>],), dtype=arr.dtype,<EOL>order=order)<EOL>data[mask, :] = arr<EOL>return data<EOL>", "docstring": "Transform a given vector to a volume. This is a reshape function for\n    4D flattened masked matrices where the second dimension of the matrix\n    corresponds to the original 4th dimension.\n\n    Parameters\n    ----------\n    arr: numpy.array\n        2D numpy.array\n\n    mask: numpy.ndarray\n        Mask image. Must have 3 dimensions, bool dtype.\n\n    dtype: return type\n        If None, will get the type from vector\n\n    Returns\n    -------\n    data: numpy.ndarray\n        Unmasked data.\n        Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[1])", "id": "f4089:m8"}
{"signature": "def smooth_volume(image, smoothmm):", "body": "return smooth_imgs(image, smoothmm)<EOL>", "docstring": "See smooth_img.", "id": "f4090:m2"}
{"signature": "def sigma2fwhm(sigma):", "body": "sigma = np.asarray(sigma)<EOL>return np.sqrt(<NUM_LIT:8> * np.log(<NUM_LIT:2>)) * sigma<EOL>", "docstring": "Convert a sigma in a Gaussian kernel to a FWHM value.\n\n    Parameters\n    ----------\n    sigma: float or numpy.array\n       sigma value or values\n\n    Returns\n    -------\n    fwhm: float or numpy.array\n       fwhm values corresponding to `sigma` values", "id": "f4090:m1"}
{"signature": "def _smooth_data_array(arr, affine, fwhm, copy=True):", "body": "if arr.dtype.kind == '<STR_LIT:i>':<EOL><INDENT>if arr.dtype == np.int64:<EOL><INDENT>arr = arr.astype(np.float64)<EOL><DEDENT>else:<EOL><INDENT>arr = arr.astype(np.float32)<EOL><DEDENT><DEDENT>if copy:<EOL><INDENT>arr = arr.copy()<EOL><DEDENT>arr[np.logical_not(np.isfinite(arr))] = <NUM_LIT:0><EOL>try:<EOL><INDENT>affine = affine[:<NUM_LIT:3>, :<NUM_LIT:3>]<EOL>fwhm_sigma_ratio = np.sqrt(<NUM_LIT:8> * np.log(<NUM_LIT:2>))<EOL>vox_size         = np.sqrt(np.sum(affine ** <NUM_LIT:2>, axis=<NUM_LIT:0>))<EOL>sigma            = fwhm / (fwhm_sigma_ratio * vox_size)<EOL>for n, s in enumerate(sigma):<EOL><INDENT>ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return arr<EOL><DEDENT>", "docstring": "Smooth images with a a Gaussian filter.\n\n    Apply a Gaussian filter along the three first dimensions of arr.\n\n    Parameters\n    ----------\n    arr: numpy.ndarray\n        3D or 4D array, with image number as last dimension.\n\n    affine: numpy.ndarray\n        Image affine transformation matrix for image.\n\n    fwhm: scalar, numpy.ndarray\n        Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.\n        If a scalar is given, kernel width is identical on all three directions.\n        A numpy.ndarray must have 3 elements, giving the FWHM along each axis.\n\n    copy: bool\n        if True, will make a copy of the input array. Otherwise will directly smooth the input array.\n\n    Returns\n    -------\n    smooth_arr: numpy.ndarray", "id": "f4090:m3"}
{"signature": "def fwhm2sigma(fwhm):", "body": "fwhm = np.asarray(fwhm)<EOL>return fwhm / np.sqrt(<NUM_LIT:8> * np.log(<NUM_LIT:2>))<EOL>", "docstring": "Convert a FWHM value to sigma in a Gaussian kernel.\n\n    Parameters\n    ----------\n    fwhm: float or numpy.array\n       fwhm value or values\n\n    Returns\n    -------\n    fwhm: float or numpy.array\n       sigma values", "id": "f4090:m0"}
{"signature": "def smooth_img(imgs, fwhm, **kwargs):", "body": "<EOL>if hasattr(imgs, \"<STR_LIT>\")and not isinstance(imgs, string_types):<EOL><INDENT>single_img = False<EOL><DEDENT>else:<EOL><INDENT>single_img = True<EOL>imgs = [imgs]<EOL><DEDENT>ret = []<EOL>for img in imgs:<EOL><INDENT>img = check_niimg(img)<EOL>affine = img.get_affine()<EOL>filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm,<EOL>ensure_finite=True, copy=True, **kwargs)<EOL>ret.append(new_img_like(img, filtered, affine, copy_header=True))<EOL><DEDENT>if single_img:<EOL><INDENT>return ret[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return ret<EOL><DEDENT>", "docstring": "Smooth images by applying a Gaussian filter.\n    Apply a Gaussian filter along the three first dimensions of arr.\n    In all cases, non-finite values in input image are replaced by zeros.\n\n    This is copied and slightly modified from nilearn:\n    https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py\n    Added the **kwargs argument.\n\n    Parameters\n    ==========\n    imgs: Niimg-like object or iterable of Niimg-like objects\n        See http://nilearn.github.io/manipulating_images/manipulating_images.html#niimg.\n        Image(s) to smooth.\n    fwhm: scalar, numpy.ndarray, 'fast' or None\n        Smoothing strength, as a Full-Width at Half Maximum, in millimeters.\n        If a scalar is given, width is identical on all three directions.\n        A numpy.ndarray must have 3 elements, giving the FWHM along each axis.\n        If fwhm == 'fast', a fast smoothing will be performed with\n        a filter [0.2, 1, 0.2] in each direction and a normalisation\n        to preserve the scale.\n        If fwhm is None, no filtering is performed (useful when just removal\n        of non-finite values is needed)\n    Returns\n    =======\n    filtered_img: nibabel.Nifti1Image or list of.\n        Input image, filtered. If imgs is an iterable, then filtered_img is a\n        list.", "id": "f4090:m6"}
{"signature": "def are_compatible_imgs(one_img, another_img):", "body": "try:<EOL><INDENT>check_img_compatibility(one_img, another_img)<EOL><DEDENT>except :<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Return true if one_img and another_img have the same shape.\n    False otherwise.\n    If both are nibabel.Nifti1Image will also check for affine matrices.\n\n    Parameters\n    ----------\n    one_img: nibabel.Nifti1Image or np.ndarray\n\n    another_img: nibabel.Nifti1Image  or np.ndarray\n\n    Returns\n    -------\n    bool", "id": "f4091:m4"}
{"signature": "def have_same_affine(one_img, another_img, only_check_3d=False):", "body": "img1 = check_img(one_img)<EOL>img2 = check_img(another_img)<EOL>ndim1 = len(img1.shape)<EOL>ndim2 = len(img2.shape)<EOL>if ndim1 < <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(repr_imgs(img1), ndim1))<EOL><DEDENT>if ndim2 < <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(repr_imgs(img2), ndim1))<EOL><DEDENT>affine1 = img1.get_affine()<EOL>affine2 = img2.get_affine()<EOL>if only_check_3d:<EOL><INDENT>affine1 = affine1[:<NUM_LIT:3>, :<NUM_LIT:3>]<EOL>affine2 = affine2[:<NUM_LIT:3>, :<NUM_LIT:3>]<EOL><DEDENT>try:<EOL><INDENT>return np.allclose(affine1, affine2)<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Return True if the affine matrix of one_img is close to the affine matrix of another_img.\n    False otherwise.\n\n    Parameters\n    ----------\n    one_img: nibabel.Nifti1Image\n\n    another_img: nibabel.Nifti1Image\n\n    only_check_3d: bool\n        If True will extract only the 3D part of the affine matrices when they have more dimensions.\n\n    Returns\n    -------\n    bool\n\n    Raises\n    ------\n    ValueError", "id": "f4091:m6"}
{"signature": "def get_shape(img):", "body": "if hasattr(img, '<STR_LIT>'):<EOL><INDENT>shape = img.shape<EOL><DEDENT>else:<EOL><INDENT>shape = img.get_data().shape<EOL><DEDENT>return shape<EOL>", "docstring": "Return the shape of img.\n\n    Paramerers\n    -----------\n    img:\n\n    Returns\n    -------\n    shape: tuple", "id": "f4091:m2"}
{"signature": "def have_same_geometry(fname1, fname2):", "body": "img1shape = nib.load(fname1).get_shape()<EOL>img2shape = nib.load(fname2).get_shape()<EOL>return have_same_shape(img1shape, img2shape)<EOL>", "docstring": "@param fname1: string\nFile path of an image\n\n@param fname2: string\nFile path of an image\n\n@return: bool\nTrue if both have the same geometry", "id": "f4091:m12"}
{"signature": "def have_same_shape(array1, array2, nd_to_check=None):", "body": "shape1 = array1.shape<EOL>shape2 = array2.shape<EOL>if nd_to_check is not None:<EOL><INDENT>if len(shape1) < nd_to_check:<EOL><INDENT>msg = '<STR_LIT>'.format(shape1)<EOL>raise ValueError(msg)<EOL><DEDENT>elif len(shape2) < nd_to_check:<EOL><INDENT>msg = '<STR_LIT>'.format(shape2)<EOL>raise ValueError(msg)<EOL><DEDENT>shape1 = shape1[:nd_to_check]<EOL>shape2 = shape2[:nd_to_check]<EOL><DEDENT>return shape1 == shape2<EOL>", "docstring": "Returns true if array1 and array2 have the same shapes, false\notherwise.\n\nParameters\n----------\narray1: numpy.ndarray\n\narray2: numpy.ndarray\n\nnd_to_check: int\n    Number of the dimensions to check, i.e., if == 3 then will check only the 3 first numbers of array.shape.\nReturns\n-------\nbool", "id": "f4091:m11"}
{"signature": "def xfm_atlas_to_functional(atlas_filepath, anatbrain_filepath, meanfunc_filepath,<EOL>atlas2anat_nonlin_xfm_filepath, is_atlas2anat_inverted,<EOL>anat2func_lin_xfm_filepath,<EOL>atlasinanat_out_filepath, atlasinfunc_out_filepath,<EOL>interp='<STR_LIT>', rewrite=True, parallel=False):", "body": "if is_atlas2anat_inverted:<EOL><INDENT>anat_to_mni_nl_inv = atlas2anat_nonlin_xfm_filepath<EOL><DEDENT>else:<EOL><INDENT>output_dir         = op.abspath   (op.dirname(atlasinanat_out_filepath))<EOL>ext                = get_extension(atlas2anat_nonlin_xfm_filepath)<EOL>anat_to_mni_nl_inv = op.join(output_dir, remove_ext(op.basename(atlas2anat_nonlin_xfm_filepath)) + '<STR_LIT>' + ext)<EOL><DEDENT>invwarp_cmd   = op.join('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>applywarp_cmd = op.join('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>fslsub_cmd    = op.join('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>if parallel:<EOL><INDENT>invwarp_cmd   = fslsub_cmd + '<STR_LIT:U+0020>' + invwarp_cmd<EOL>applywarp_cmd = fslsub_cmd + '<STR_LIT:U+0020>' + applywarp_cmd<EOL><DEDENT>if rewrite or (not is_atlas2anat_inverted and not op.exists(anat_to_mni_nl_inv)):<EOL><INDENT>log.debug('<STR_LIT>'.format(anat_to_mni_nl_inv))<EOL>cmd  = invwarp_cmd + '<STR_LIT:U+0020>'<EOL>cmd += '<STR_LIT>'.format(atlas2anat_nonlin_xfm_filepath)<EOL>cmd += '<STR_LIT>'.format(anat_to_mni_nl_inv)<EOL>cmd += '<STR_LIT>'.format(anatbrain_filepath)<EOL>log.debug('<STR_LIT>'.format(cmd))<EOL>check_call(cmd)<EOL><DEDENT>if rewrite or not op.exists(atlasinanat_out_filepath):<EOL><INDENT>log.debug('<STR_LIT>'.format(atlasinanat_out_filepath))<EOL>cmd  = applywarp_cmd + '<STR_LIT:U+0020>'<EOL>cmd += '<STR_LIT>'.format(atlas_filepath)<EOL>cmd += '<STR_LIT>'.format(anatbrain_filepath)<EOL>cmd += '<STR_LIT>'.format(anat_to_mni_nl_inv)<EOL>cmd += '<STR_LIT>'.format(interp)<EOL>cmd += '<STR_LIT>'.format(atlasinanat_out_filepath)<EOL>log.debug('<STR_LIT>'.format(cmd))<EOL>check_call(cmd)<EOL><DEDENT>if rewrite or not op.exists(atlasinfunc_out_filepath):<EOL><INDENT>log.debug('<STR_LIT>'.format(atlasinfunc_out_filepath))<EOL>cmd  = applywarp_cmd + '<STR_LIT:U+0020>'<EOL>cmd += '<STR_LIT>'.format(atlasinanat_out_filepath)<EOL>cmd += '<STR_LIT>'.format(meanfunc_filepath)<EOL>cmd += '<STR_LIT>'.format(anat2func_lin_xfm_filepath)<EOL>cmd += '<STR_LIT>'.format(interp)<EOL>cmd += '<STR_LIT>'.format(atlasinfunc_out_filepath)<EOL>log.debug('<STR_LIT>'.format(cmd))<EOL>check_call(cmd)<EOL><DEDENT>", "docstring": "Call FSL tools to apply transformations to a given atlas to a functional image.\n    Given the transformation matrices.\n\n    Parameters\n    ----------\n    atlas_filepath: str\n        Path to the 3D atlas volume file.\n\n    anatbrain_filepath: str\n        Path to the anatomical brain volume file (skull-stripped and registered to the same space as the atlas,\n        e.g., MNI).\n\n    meanfunc_filepath: str\n        Path to the average functional image to be used as reference in the last applywarp step.\n\n    atlas2anat_nonlin_xfm_filepath: str\n        Path to the atlas to anatomical brain linear transformation .mat file.\n        If you have the inverse transformation, i.e., anatomical brain to atlas, set is_atlas2anat_inverted to True.\n\n    is_atlas2anat_inverted: bool\n        If False will have to calculate the inverse atlas2anat transformation to apply the transformations.\n        This step will be performed with FSL invwarp.\n\n    anat2func_lin_xfm_filepath: str\n        Path to the anatomical to functional .mat linear transformation file.\n\n    atlasinanat_out_filepath: str\n        Path to output file which will contain the 3D atlas in the subject anatomical space.\n\n    atlasinfunc_out_filepath: str\n        Path to output file which will contain the 3D atlas in the subject functional space.\n\n    verbose: bool\n        If verbose will show DEBUG log info.\n\n    rewrite: bool\n        If True will re-run all the commands overwriting any existing file. Otherwise will check if\n        each file exists and if it does won't run the command.\n\n    parallel: bool\n        If True will launch the commands using ${FSLDIR}/fsl_sub to use the cluster infrastructure you have setup\n        with FSL (SGE or HTCondor).", "id": "f4092:m0"}
{"signature": "@nifti_out<EOL>def div_img(img1, div2):", "body": "if is_img(div2):<EOL><INDENT>return img1.get_data()/div2.get_data()<EOL><DEDENT>elif isinstance(div2, (float, int)):<EOL><INDENT>return img1.get_data()/div2<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError('<STR_LIT>'<EOL>'<STR_LIT>'.format(type(img1),<EOL>img1,<EOL>type(div2),<EOL>div2))<EOL><DEDENT>", "docstring": "Pixelwise division or divide by a number", "id": "f4093:m8"}
{"signature": "@nifti_out<EOL>def positive_img(img):", "body": "bool_img = read_img(img).get_data() > <NUM_LIT:0><EOL>return bool_img.astype(int)<EOL>", "docstring": "Return an image with the positive voxels of the data of `img`.", "id": "f4093:m4"}
{"signature": "def filter_icc(icc, mask=None, thr=<NUM_LIT:2>, zscore=True, mode=\"<STR_LIT:+>\"):", "body": "if zscore:<EOL><INDENT>icc_filt = thr_img(icc_img_to_zscore(icc), thr=thr, mode=mode)<EOL><DEDENT>else:<EOL><INDENT>icc_filt = thr_img(icc, thr=thr, mode=mode)<EOL><DEDENT>if mask is not None:<EOL><INDENT>icc_filt = apply_mask(icc_filt, mask)<EOL><DEDENT>return icc_filt<EOL>", "docstring": "Threshold then mask an IC correlation map.\n    Parameters\n    ----------\n    icc: img-like\n        The 'raw' ICC map.\n\n    mask: img-like\n        If not None. Will apply this masks in the end of the process.\n\n    thr: float\n        The threshold value.\n\n    zscore: bool\n        If True will calculate the z-score of the ICC before thresholding.\n\n    mode: str\n        Choices: '+' for positive threshold,\n                 '+-' for positive and negative threshold and\n                 '-' for negative threshold.\n\n    Returns\n    -------\n    icc_filt: nibabel.NiftiImage\n        Thresholded and masked ICC.", "id": "f4093:m13"}
{"signature": "@nifti_out<EOL>def thr_img(img, thr=<NUM_LIT>, mode='<STR_LIT:+>'):", "body": "vol  = read_img(img).get_data()<EOL>if mode == '<STR_LIT:+>':<EOL><INDENT>mask = vol > thr<EOL><DEDENT>elif mode == '<STR_LIT>' or mode == '<STR_LIT>':<EOL><INDENT>mask = np.abs(vol) > thr<EOL><DEDENT>elif mode == '<STR_LIT:->':<EOL><INDENT>mask = vol < -thr<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(mode))<EOL><DEDENT>return vol * mask<EOL>", "docstring": "Use the given magic function name `func` to threshold with value `thr`\n    the data of `img` and return a new nibabel.Nifti1Image.\n    Parameters\n    ----------\n    img: img-like\n\n    thr: float or int\n        The threshold value.\n\n    mode: str\n        Choices: '+' for positive threshold,\n                 '+-' for positive and negative threshold and\n                 '-' for negative threshold.\n    Returns\n    -------\n    thr_img: nibabel.Nifti1Image\n        Thresholded image", "id": "f4093:m2"}
{"signature": "@nifti_out<EOL>def abs_img(img):", "body": "bool_img = np.abs(read_img(img).get_data())<EOL>return bool_img.astype(int)<EOL>", "docstring": "Return an image with the binarised version of the data of `img`.", "id": "f4093:m10"}
{"signature": "@nifti_out<EOL>def negative_img(img):", "body": "bool_img = read_img(img).get_data() < <NUM_LIT:0><EOL>return bool_img.astype(int)<EOL>", "docstring": "Return an image with the negative voxels of the data of `img`.", "id": "f4093:m5"}
{"signature": "def get_nii_info(img_file):", "body": "warnings.warn(\"<STR_LIT>\",<EOL>DeprecationWarning)<EOL>return get_img_info(img_file)<EOL>", "docstring": "See get_img_info", "id": "f4094:m1"}
{"signature": "def get_nii_data(nii_file):", "body": "warnings.warn(\"<STR_LIT>\",<EOL>DeprecationWarning)<EOL>return get_img_data(nii_file)<EOL>", "docstring": "See get_img_data", "id": "f4094:m2"}
{"signature": "def new_img_like(ref_niimg, data, affine=None, copy_header=False):", "body": "<EOL>if not (hasattr(ref_niimg, '<STR_LIT>')<EOL>and hasattr(ref_niimg,'<STR_LIT>')):<EOL><INDENT>if isinstance(ref_niimg, _basestring):<EOL><INDENT>ref_niimg = nib.load(ref_niimg)<EOL><DEDENT>elif operator.isSequenceType(ref_niimg):<EOL><INDENT>ref_niimg = nib.load(ref_niimg[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(('<STR_LIT>'<EOL>'<STR_LIT>') % ref_niimg )<EOL><DEDENT><DEDENT>if affine is None:<EOL><INDENT>affine = ref_niimg.get_affine()<EOL><DEDENT>if data.dtype == bool:<EOL><INDENT>default_dtype = np.int8<EOL>if (LooseVersion(nib.__version__) >= LooseVersion('<STR_LIT>') and<EOL>isinstance(ref_niimg, nib.freesurfer.mghformat.MGHImage)):<EOL><INDENT>default_dtype = np.uint8<EOL><DEDENT>data = as_ndarray(data, dtype=default_dtype)<EOL><DEDENT>header = None<EOL>if copy_header:<EOL><INDENT>header = copy.copy(ref_niimg.get_header())<EOL>header['<STR_LIT>'] = <NUM_LIT:0.><EOL>header['<STR_LIT>'] = <NUM_LIT:0.><EOL>header['<STR_LIT>'] = <NUM_LIT:0.><EOL>header['<STR_LIT>'] = np.max(data) if data.size > <NUM_LIT:0> else <NUM_LIT:0.><EOL>header['<STR_LIT>'] = np.min(data) if data.size > <NUM_LIT:0> else <NUM_LIT:0.><EOL><DEDENT>return ref_niimg.__class__(data, affine, header=header)<EOL>", "docstring": "Create a new image of the same class as the reference image\n\n    Parameters\n    ----------\n    ref_niimg: image\n        Reference image. The new image will be of the same type.\n\n    data: numpy array\n        Data to be stored in the image\n\n    affine: 4x4 numpy array, optional\n        Transformation matrix\n\n    copy_header: boolean, optional\n        Indicated if the header of the reference image should be used to\n        create the new image\n\n    Returns\n    -------\n    new_img: image\n        A loaded image with the same type (and header) as the reference image.", "id": "f4094:m9"}
{"signature": "def _make_it_3d(img):", "body": "shape = img.shape<EOL>if len(shape) == <NUM_LIT:3>:<EOL><INDENT>return img<EOL><DEDENT>elif len(shape) == <NUM_LIT:4> and shape[<NUM_LIT:3>] == <NUM_LIT:1>:<EOL><INDENT>return img[:, :, :, <NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(shape))<EOL><DEDENT>", "docstring": "Enforce that img is a 3D img-like object, if it is not, raise a TypeError.\n    i.e., remove dimensions of size 1.\n\n    Parameters\n    ----------\n    img: numpy.ndarray\n        Image data array\n\n    Returns\n    -------\n    3D numpy ndarray object", "id": "f4096:m1"}
{"signature": "def write_mhd_file(filename, data, shape=None, meta_dict=None):", "body": "<EOL>ext = get_extension(filename)<EOL>fname = op.basename(filename)<EOL>if ext != '<STR_LIT>' or ext != '<STR_LIT>':<EOL><INDENT>mhd_filename = fname + '<STR_LIT>'<EOL>raw_filename = fname + '<STR_LIT>'<EOL><DEDENT>elif ext == '<STR_LIT>':<EOL><INDENT>mhd_filename = fname<EOL>raw_filename = remove_ext(fname) + '<STR_LIT>'<EOL><DEDENT>elif ext == '<STR_LIT>':<EOL><INDENT>mhd_filename = remove_ext(fname) + '<STR_LIT>'<EOL>raw_filename = fname<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(ext, filename))<EOL><DEDENT>if meta_dict is None:<EOL><INDENT>meta_dict = {}<EOL><DEDENT>if shape is None:<EOL><INDENT>shape = data.shape<EOL><DEDENT>meta_dict['<STR_LIT>']             = meta_dict.get('<STR_LIT>',             '<STR_LIT>')<EOL>meta_dict['<STR_LIT>']             = meta_dict.get('<STR_LIT>',             '<STR_LIT:True>' )<EOL>meta_dict['<STR_LIT>'] = meta_dict.get('<STR_LIT>', '<STR_LIT:False>')<EOL>meta_dict['<STR_LIT>']            = meta_dict.get('<STR_LIT>',            NUMPY_TO_MHD_TYPE[data.dtype.type])<EOL>meta_dict['<STR_LIT>']                  = meta_dict.get('<STR_LIT>',                  str(len(shape)))<EOL>meta_dict['<STR_LIT>']                = meta_dict.get('<STR_LIT>',                '<STR_LIT:U+0020>'.join([str(i) for i in shape]))<EOL>meta_dict['<STR_LIT>']        = meta_dict.get('<STR_LIT>',        raw_filename)<EOL>mhd_filename = op.join(op.dirname(filename), mhd_filename)<EOL>raw_filename = op.join(op.dirname(filename), raw_filename)<EOL>write_meta_header(mhd_filename, meta_dict)<EOL>dump_raw_data(raw_filename, data)<EOL>return mhd_filename, raw_filename<EOL>", "docstring": "Write the `data` and `meta_dict` in two files with names\n    that use `filename` as a prefix.\n\n    Parameters\n    ----------\n    filename: str\n        Path to the output file.\n        This is going to be used as a preffix.\n        Two files will be created, one with a '.mhd' extension\n        and another with '.raw'. If `filename` has any of these already\n        they will be taken into account to build the filenames.\n\n    data: numpy.ndarray\n        n-dimensional image data array.\n\n    shape: tuple\n        Tuple describing the shape of `data`\n        Default: data.shape\n\n    meta_dict: dict\n        Dictionary with the fields of the metadata .mhd file\n        Default: {}\n\n    Returns\n    -------\n    mhd_filename: str\n        Path to the .mhd file\n\n    raw_filename: str\n        Path to the .raw file", "id": "f4098:m2"}
{"signature": "def write_meta_header(filename, meta_dict):", "body": "header = '<STR_LIT>'<EOL>for tag in MHD_TAGS:<EOL><INDENT>if tag in meta_dict.keys():<EOL><INDENT>header += '<STR_LIT>'.format(tag, meta_dict[tag])<EOL><DEDENT><DEDENT>with open(filename, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(header)<EOL><DEDENT>", "docstring": "Write the content of the `meta_dict` into `filename`.\n\n    Parameters\n    ----------\n    filename: str\n        Path to the output file\n\n    meta_dict: dict\n        Dictionary with the fields of the metadata .mhd file", "id": "f4098:m0"}
{"signature": "def copy_mhd_and_raw(src, dst):", "body": "<EOL>if not op.exists(src):<EOL><INDENT>raise IOError('<STR_LIT>'.format(src))<EOL><DEDENT>ext = get_extension(src)<EOL>if ext != '<STR_LIT>':<EOL><INDENT>msg = '<STR_LIT>'.format(src)<EOL>raise ValueError(msg)<EOL><DEDENT>meta_src = _read_meta_header(src)<EOL>src_raw = meta_src['<STR_LIT>']<EOL>if not op.isabs(src_raw):<EOL><INDENT>src_raw = op.join(op.dirname(src), src_raw)<EOL><DEDENT>if op.isdir(dst):<EOL><INDENT>shutil.copyfile(src, dst)<EOL>shutil.copyfile(src_raw, dst)<EOL>return dst<EOL><DEDENT>dst_raw = op.join(op.dirname(dst), remove_ext(op.basename(dst))) + '<STR_LIT>'<EOL>if get_extension(dst) != '<STR_LIT>':<EOL><INDENT>dst += '<STR_LIT>'<EOL><DEDENT>log.debug('<STR_LIT>'.format(src,     dst))<EOL>log.debug('<STR_LIT>'.format(src_raw, dst_raw))<EOL>shutil.copyfile(src, dst)<EOL>shutil.copyfile(src_raw, dst_raw)<EOL>if op.basename(dst) != op.basename(src):<EOL><INDENT>log.debug('<STR_LIT>'.format(dst, src_raw,<EOL>op.basename(dst_raw)))<EOL>meta_dst = _read_meta_header(dst)<EOL>meta_dst['<STR_LIT>'] = op.basename(dst_raw)<EOL>write_meta_header(dst, meta_dst)<EOL><DEDENT>return dst<EOL>", "docstring": "Copy .mhd and .raw files to dst.\n\n    If dst is a folder, won't change the file, but if dst is another filepath,\n    will modify the ElementDataFile field in the .mhd to point to the\n    new renamed .raw file.\n\n    Parameters\n    ----------\n    src: str\n        Path to the .mhd file to be copied\n\n    dst: str\n        Path to the destination of the .mhd and .raw files.\n        If a new file name is given, the extension will be ignored.\n\n    Returns\n    -------\n    dst: str", "id": "f4098:m3"}
{"signature": "def dump_raw_data(filename, data):", "body": "if data.ndim == <NUM_LIT:3>:<EOL><INDENT>data = data.reshape([data.shape[<NUM_LIT:0>], data.shape[<NUM_LIT:1>]*data.shape[<NUM_LIT:2>]])<EOL><DEDENT>a = array.array('<STR_LIT:f>')<EOL>for o in data:<EOL><INDENT>a.fromlist(list(o.flatten()))<EOL><DEDENT>with open(filename, '<STR_LIT:wb>') as rawf:<EOL><INDENT>a.tofile(rawf)<EOL><DEDENT>", "docstring": "Write the data into a raw format file. Big endian is always used.\n\n    Parameters\n    ----------\n    filename: str\n        Path to the output file\n\n    data: numpy.ndarray\n        n-dimensional image data array.", "id": "f4098:m1"}
{"signature": "def load_raw_data_with_mhd(filename):", "body": "meta_dict = _read_meta_header(filename)<EOL>dim       = int(meta_dict['<STR_LIT>'])<EOL>assert (meta_dict['<STR_LIT>'] in MHD_TO_NUMPY_TYPE)<EOL>arr = [int(i) for i in meta_dict['<STR_LIT>'].split()]<EOL>volume = reduce(lambda x, y: x*y, arr[<NUM_LIT:0>:dim-<NUM_LIT:1>], <NUM_LIT:1>)<EOL>pwd       = op.dirname(filename)<EOL>raw_file  = meta_dict['<STR_LIT>']<EOL>data_file = op.join(pwd, raw_file)<EOL>ndtype    = MHD_TO_NUMPY_TYPE[meta_dict['<STR_LIT>']]<EOL>arrtype   = NDARRAY_TO_ARRAY_TYPE[ndtype]<EOL>with open(data_file, '<STR_LIT:rb>') as fid:<EOL><INDENT>binvalues = array.array(arrtype)<EOL>binvalues.fromfile(fid, volume*arr[dim-<NUM_LIT:1>])<EOL><DEDENT>data = np.array  (binvalues, ndtype)<EOL>data = np.reshape(data, (arr[dim-<NUM_LIT:1>], volume))<EOL>if dim >= <NUM_LIT:3>:<EOL><INDENT>dimensions = [int(i) for i in meta_dict['<STR_LIT>'].split()]<EOL>data       = data.reshape(dimensions)<EOL><DEDENT>return data, meta_dict<EOL>", "docstring": "Return a dictionary of meta data from meta header file.\n\n    Parameters\n    ----------\n    filename: str\n        Path to a .mhd file\n\n    Returns\n    -------\n    data: numpy.ndarray\n        n-dimensional image data array.\n\n    meta_dict: dict\n        A dictionary with the .mhd header content.", "id": "f4099:m1"}
{"signature": "def tabulate(self, tablefmt='<STR_LIT>'):", "body": "return tabulate(self, tablefmt=tablefmt)<EOL>", "docstring": ":param tablefmt: string\n Supported table formats are:\n\"plain\"\n\"simple\"\n\"grid\"\n\"pipe\"\n\"orgtbl\"\n\"rst\"\n\"mediawiki\"\n\"latex\"\n\n:return: tabulate\nTabulated content", "id": "f4100:c0:m1"}
{"signature": "def tabulate(self, tablefmt='<STR_LIT>'):", "body": "return tabulate(list(self.items()), tablefmt=tablefmt)<EOL>", "docstring": ":param tablefmt: string\n Supported table formats are:\n\"plain\"\n\"simple\"\n\"grid\"\n\"pipe\"\n\"orgtbl\"\n\"rst\"\n\"mediawiki\"\n\"latex\"\n\n:return: tabulate\nTabulated content", "id": "f4100:c1:m1"}
{"signature": "def apply_smoothing(self, smooth_fwhm):", "body": "if smooth_fwhm <= <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>old_smooth_fwhm   = self._smooth_fwhm<EOL>self._smooth_fwhm = smooth_fwhm<EOL>try:<EOL><INDENT>data = self.get_data(smoothed=True, masked=True, safe_copy=True)<EOL><DEDENT>except ValueError as ve:<EOL><INDENT>self._smooth_fwhm = old_smooth_fwhm<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>self._smooth_fwhm = smooth_fwhm<EOL>return data<EOL><DEDENT>", "docstring": "Set self._smooth_fwhm and then smooths the data.\n        See boyle.nifti.smooth.smooth_imgs.\n\n        Returns\n        -------\n        the smoothed data deepcopied.", "id": "f4101:c1:m16"}
{"signature": "def apply_mask(self, mask_img):", "body": "self.set_mask(mask_img)<EOL>return self.get_data(masked=True, smoothed=True, safe_copy=True)<EOL>", "docstring": "First set_mask and the get_masked_data.\n\n        Parameters\n        ----------\n        mask_img:  nifti-like image, NeuroImage or str\n            3D mask array: True where a voxel should be used.\n            Can either be:\n            - a file path to a Nifti image\n            - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n            If niimg is a string, consider it as a path to Nifti image and\n            call nibabel.load on it. If it is an object, check if get_data()\n            and get_affine() methods are present, raise TypeError otherwise.\n\n        Returns\n        -------\n        The masked data deepcopied", "id": "f4101:c1:m13"}
{"signature": "def to_file(self, outpath):", "body": "if not self.has_mask() and not self.is_smoothed():<EOL><INDENT>save_niigz(outpath, self.img)<EOL><DEDENT>else:<EOL><INDENT>save_niigz(outpath, self.get_data(masked=True, smoothed=True),<EOL>self.get_header(), self.get_affine())<EOL><DEDENT>", "docstring": "Save this object instance in outpath.\n\n        Parameters\n        ----------\n        outpath: str\n            Output file path", "id": "f4101:c1:m19"}
{"signature": "def open_volume_file(filepath):", "body": "<EOL>if not op.exists(filepath):<EOL><INDENT>raise IOError('<STR_LIT>'.format(filepath))<EOL><DEDENT>def open_nifti_file(filepath):<EOL><INDENT>return NiftiImage(filepath)<EOL><DEDENT>def open_mhd_file(filepath):<EOL><INDENT>return MedicalImage(filepath)<EOL>vol_data, hdr_data = load_raw_data_with_mhd(filepath)<EOL>return vol_data, hdr_data<EOL><DEDENT>def open_mha_file(filepath):<EOL><INDENT>raise NotImplementedError('<STR_LIT>')<EOL><DEDENT>def _load_file(filepath, loader):<EOL><INDENT>return loader(filepath)<EOL><DEDENT>filext_loader = {<EOL>'<STR_LIT>': open_nifti_file,<EOL>'<STR_LIT>': open_mhd_file,<EOL>'<STR_LIT>': open_mha_file,<EOL>}<EOL>ext = get_extension(filepath)<EOL>loader = None<EOL>for e in filext_loader:<EOL><INDENT>if ext in e:<EOL><INDENT>loader = filext_loader[e]<EOL><DEDENT><DEDENT>if loader is None:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(filepath))<EOL><DEDENT>return _load_file(filepath, loader)<EOL>", "docstring": "Open a volumetric file using the tools following the file extension.\n\n    Parameters\n    ----------\n    filepath: str\n        Path to a volume file\n\n    Returns\n    -------\n    volume_data: np.ndarray\n        Volume data\n\n    pixdim: 1xN np.ndarray\n        Vector with the description of the voxels physical size (usually in mm) for each volume dimension.\n\n    Raises\n    ------\n    IOError\n        In case the file is not found.", "id": "f4102:m0"}
{"signature": "def compose_err_msg(msg, **kwargs):", "body": "updated_msg = msg<EOL>for k, v in sorted(kwargs.items()):<EOL><INDENT>if isinstance(v, _basestring):  <EOL><INDENT>updated_msg += \"<STR_LIT:\\n>\" + k + \"<STR_LIT>\" + v<EOL><DEDENT><DEDENT>return updated_msg<EOL>", "docstring": "Append key-value pairs to msg, for display.\n\n    Parameters\n    ----------\n    msg: string\n        arbitrary message\n    kwargs: dict\n        arbitrary dictionary\n\n    Returns\n    -------\n    updated_msg: string\n        msg, with \"key: value\" appended. Only string values are appended.\n\n    Example\n    -------\n    >>> compose_err_msg('Error message with arguments...', arg_num=123, \\\n        arg_str='filename.nii', arg_bool=True)\n    'Error message with arguments...\\\\narg_str: filename.nii'\n    >>>", "id": "f4103:m0"}
{"signature": "def insert_unique(self, table_name, data, unique_fields=None, *, raise_if_found=False):", "body": "return insert_unique(table=self.table(table_name),<EOL>data=_to_string(data),<EOL>unique_fields=unique_fields,<EOL>raise_if_found=raise_if_found)<EOL>", "docstring": "Insert `data` into `table` ensuring that data has unique values\n        in `table` for the fields listed in `unique_fields`.\n\n        If `raise_if_found` is True, will raise an NotUniqueItemError if\n        another item with the same `unique_fields` values are found\n        previously in `table`.\n        If False, will return the `eid` from the item found.\n\n        Parameters\n        ----------\n        table_name: str\n\n        data: dict\n\n        unique_fields: list of str\n            Name of fields (keys) from `data` which are going to be used to build\n            a sample to look for exactly the same values in the database.\n            If None, will use every key in `data`.\n\n        raise_if_found: bool\n\n        Returns\n        -------\n        eid: int\n            Id of the object inserted or the one found with same `unique_fields`.\n\n        Raises\n        ------\n        MoreThanOneItemError\n            Raise even with `raise_with_found` == False if it finds more than one item\n            with the same values as the sample.\n\n        NotUniqueItemError\n            If `raise_if_found` is True and an item with the same `unique_fields`\n            values from `data` is found in `table`.", "id": "f4104:c2:m2"}
{"signature": "def timestamp_with_tzinfo(dt):", "body": "utc = tzutc()<EOL>if dt.tzinfo:<EOL><INDENT>dt = dt.astimezone(utc).replace(tzinfo=None)<EOL><DEDENT>return dt.isoformat() + '<STR_LIT>'<EOL>", "docstring": "Serialize a date/time value into an ISO8601 text representation\nadjusted (if needed) to UTC timezone.\n\nFor instance:\n>>> serialize_date(datetime(2012, 4, 10, 22, 38, 20, 604391))\n'2012-04-10T22:38:20.604391Z'", "id": "f4104:m0"}
{"signature": "def search_unique(table, sample, unique_fields=None):", "body": "if unique_fields is None:<EOL><INDENT>unique_fields = list(sample.keys())<EOL><DEDENT>query = _query_data(sample, field_names=unique_fields, operators='<STR_LIT>')<EOL>items = table.search(query)<EOL>if len(items) == <NUM_LIT:1>:<EOL><INDENT>return items[<NUM_LIT:0>]<EOL><DEDENT>if len(items) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>raise MoreThanOneItemError('<STR_LIT>'<EOL>'<STR_LIT>'.format(len(items)))<EOL>", "docstring": "Search for items in `table` that have the same field sub-set values as in `sample`.\n    Expecting it to be unique, otherwise will raise an exception.\n\n    Parameters\n    ----------\n    table: tinydb.table\n    sample: dict\n        Sample data\n\n    Returns\n    -------\n    search_result: tinydb.database.Element\n        Unique item result of the search.\n\n    Raises\n    ------\n    KeyError:\n        If the search returns for more than one entry.", "id": "f4104:m5"}
{"signature": "def insert_unique(table, data, unique_fields=None, *, raise_if_found=False):", "body": "item = find_unique(table, data, unique_fields)<EOL>if item is not None:<EOL><INDENT>if raise_if_found:<EOL><INDENT>raise NotUniqueItemError('<STR_LIT>'<EOL>'<STR_LIT>'.format(unique_fields,<EOL>data,<EOL>table.get(eid=item),<EOL>item))<EOL><DEDENT>else:<EOL><INDENT>return item<EOL><DEDENT><DEDENT>return table.insert(data)<EOL>", "docstring": "Insert `data` into `table` ensuring that data has unique values\n    in `table` for the fields listed in `unique_fields`.\n\n    If `raise_if_found` is True, will raise an NotUniqueItemError if\n    another item with the same `unique_fields` values are found\n    previously in `table`.\n    If False, will return the `eid` from the item found.\n\n    Parameters\n    ----------\n    table: tinydb.Table\n\n    data: dict\n\n    unique_fields: list of str\n        Name of fields (keys) from `data` which are going to be used to build\n        a sample to look for exactly the same values in the database.\n        If None, will use every key in `data`.\n\n    raise_if_found: bool\n\n    Returns\n    -------\n    eid: int\n        Id of the object inserted or the one found with same `unique_fields`.\n\n    Raises\n    ------\n    MoreThanOneItemError\n        Raise even with `raise_with_found` == False if it finds more than one item\n        with the same values as the sample.\n\n    NotUniqueItemError\n        If `raise_if_found` is True and an item with the same `unique_fields`\n        values from `data` is found in `table`.", "id": "f4104:m3"}
{"signature": "def search_sample(self, table_name, sample):", "body": "return search_sample(table=self.table(table_name),<EOL>sample=sample)<EOL>", "docstring": "Search for items in `table` that have the same field sub-set values as in `sample`.\n\n        Parameters\n        ----------\n        table_name: str\n\n        sample: dict\n            Sample data\n\n        Returns\n        -------\n        search_result: list of dict\n            List of the items found. The list is empty if no item is found.", "id": "f4104:c2:m4"}
{"signature": "def _query_data(data, field_names=None, operators='<STR_LIT>'):", "body": "if field_names is None:<EOL><INDENT>field_names = list(data.keys())<EOL><DEDENT>if isinstance(field_names, str):<EOL><INDENT>field_names = [field_names]<EOL><DEDENT>sample = OrderedDict([(fn, data[fn]) for fn in field_names])<EOL>return _query_sample(sample, operators=operators)<EOL>", "docstring": "Create a tinyDB Query object that looks for items that confirms the correspondent operator\n    from `operators` for each `field_names` field values from `data`.\n\n    Parameters\n    ----------\n    data: dict\n        The data sample\n\n    field_names: str or list of str\n        The name of the fields in `data` that will be used for the query.\n\n    operators: str or list of str\n        A list of comparison operations for each field value in `field_names`.\n        If this is a str, will use the same operator for all `field_names`.\n        If you want different operators for each field, remember to use an OrderedDict for `data`.\n        Check TinyDB.Query class for possible choices.\n\n    Returns\n    -------\n    query: tinydb.database.Query", "id": "f4104:m8"}
{"signature": "def get_requirements(*args):", "body": "install_deps = []<EOL>try:<EOL><INDENT>for fpath in args:<EOL><INDENT>install_deps.extend([str(d.req or d.url) for d in parse_requirements(fpath)])<EOL><DEDENT><DEDENT>except:<EOL><INDENT>print('<STR_LIT>'.format(fpath))<EOL><DEDENT>return [dep for dep in install_deps if dep != '<STR_LIT:None>']<EOL>", "docstring": "Parse all requirements files given and return a list of the dependencies", "id": "f4105:m0"}
{"signature": "@baker.command(default=True,<EOL>shortopts={'<STR_LIT>': '<STR_LIT:c>',<EOL>'<STR_LIT>': '<STR_LIT:d>',<EOL>'<STR_LIT>': '<STR_LIT:s>',<EOL>'<STR_LIT>': '<STR_LIT:o>'})<EOL>def copy(configfile='<STR_LIT>', destpath='<STR_LIT>', overwrite=False, sub_node='<STR_LIT>'):", "body": "log.info('<STR_LIT>'.format(os.path.basename(__file__),<EOL>whoami(),<EOL>locals()))<EOL>assert(os.path.isfile(configfile))<EOL>if os.path.exists(destpath):<EOL><INDENT>if os.listdir(destpath):<EOL><INDENT>raise FolderAlreadyExists('<STR_LIT>'<EOL>'<STR_LIT>'.format(destpath))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>log.info('<STR_LIT>'.format(destpath))<EOL>path(destpath).makedirs_p()<EOL><DEDENT>from boyle.files.file_tree_map import FileTreeMap<EOL>file_map = FileTreeMap()<EOL>try:<EOL><INDENT>file_map.from_config_file(configfile)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise FileTreeMapError(str(e))<EOL><DEDENT>if sub_node:<EOL><INDENT>sub_map = file_map.get_node(sub_node)<EOL>if not sub_map:<EOL><INDENT>raise FileTreeMapError('<STR_LIT>'<EOL>'<STR_LIT>'.format(sub_node))<EOL><DEDENT>file_map._filetree = {}<EOL>file_map._filetree[sub_node] = sub_map<EOL><DEDENT>try:<EOL><INDENT>file_map.copy_to(destpath, overwrite=overwrite)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise FileTreeMapError(str(e))<EOL><DEDENT>", "docstring": "Copies the files in the built file tree map\n    to despath.\n\n    :param configfile: string\n     Path to the FileTreeMap config file\n\n    :param destpath: string\n     Path to the files destination\n\n    :param overwrite: bool\n     Overwrite files if they already exist.\n\n    :param sub_node: string\n     Tree map configuration sub path.\n     Will copy only the contents within this sub-node", "id": "f4107:m0"}
{"signature": "def print_compare_idsets_one_ref(self, idset1_name, idset2_name):", "body": "try:<EOL><INDENT>idset1 = self[idset1_name]<EOL>idset2 = self[idset2_name]<EOL><DEDENT>except KeyError as ke:<EOL><INDENT>log.error('<STR_LIT>'.format(idset1_name,<EOL>idset2_name))<EOL>import sys, pdb<EOL>pdb.post_mortem(sys.exc_info()[<NUM_LIT:2>])<EOL>raise<EOL><DEDENT>assert(isinstance(idset1, idset_with_reference))<EOL>assert(isinstance(idset2, idset))<EOL>self._print_general_vs_table(idset1, idset2)<EOL>self._print_foreign_repetition_table(idset1, idset2)<EOL>", "docstring": "idset1_name: string\nkey of an idset_with_reference\n\nidset2_name: string\nkey of an idset", "id": "f4108:c2:m7"}
{"signature": "@baker.command(name='<STR_LIT>',<EOL>params={\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"\"\"<STR_LIT>\"\"\",<EOL>\"<STR_LIT>\": \"\"\"<STR_LIT>\"\"\"},<EOL>shortopts={'<STR_LIT>': '<STR_LIT:i>', '<STR_LIT>': '<STR_LIT:o>', <EOL>'<STR_LIT>': '<STR_LIT:m>', '<STR_LIT>': '<STR_LIT:t>'})<EOL>def convert_sav(inputfile, outputfile=None, method='<STR_LIT>', otype='<STR_LIT>'):", "body": "assert(os.path.isfile(inputfile))<EOL>assert(method=='<STR_LIT>' or method=='<STR_LIT>')<EOL>if method == '<STR_LIT>':<EOL><INDENT>df = sav_to_pandas_rpy2(inputfile)<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>df = sav_to_pandas_savreader(inputfile)<EOL><DEDENT>otype_exts = {'<STR_LIT>': '<STR_LIT>', <EOL>'<STR_LIT>': '<STR_LIT>', <EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:html>': '<STR_LIT>'}<EOL>if outputfile is None:<EOL><INDENT>outputfile = inputfile.replace(path(inputfile).ext, '<STR_LIT>')<EOL><DEDENT>outputfile = add_extension_if_needed(outputfile, otype_exts[otype])<EOL>if otype == '<STR_LIT>':<EOL><INDENT>df.to_csv(outputfile)<EOL><DEDENT>elif otype == '<STR_LIT>':<EOL><INDENT>df.to_hdf(outputfile, os.path.basename(outputfile))<EOL><DEDENT>elif otype == '<STR_LIT>':<EOL><INDENT>df.to_stata(outputfile)<EOL><DEDENT>elif otype == '<STR_LIT>':<EOL><INDENT>df.to_json(outputfile)<EOL><DEDENT>elif otype == '<STR_LIT>':<EOL><INDENT>df.to_pickle(outputfile)<EOL><DEDENT>elif otype == '<STR_LIT>':<EOL><INDENT>df.to_excel(outputfile)<EOL><DEDENT>elif otype == '<STR_LIT:html>':<EOL><INDENT>df.to_html(outputfile)<EOL><DEDENT>else:<EOL><INDENT>df.to_csv(outputfile)<EOL><DEDENT>", "docstring": "Transforms the input .sav SPSS file into other format.\n    If you don't specify an outputfile, it will use the\n    inputfile and change its extension to .csv", "id": "f4109:m0"}
{"signature": "def setDefaultIREncoding(encoding):", "body": "try:<EOL><INDENT>b'<STR_LIT>'.decode(encoding)<EOL><DEDENT>except:<EOL><INDENT>raise ValueError('<STR_LIT>' %(str(encoding), ))<EOL><DEDENT>global defaultIREncoding<EOL>defaultIREncoding = encoding<EOL>", "docstring": "setDefaultIREncoding - Sets the default encoding used by IndexedRedis.\n  This will be the default encoding used for field data. You can override this on a\n  per-field basis by using an IRField (such as IRUnicodeField or IRRawField)\n\n@param encoding - An encoding (like utf-8)", "id": "f4140:m0"}
{"signature": "def __new__(self, val='<STR_LIT>'):", "body": "return IrNullBaseType.__new__(self, '<STR_LIT>')<EOL>", "docstring": "Don't let this be assigned a value.", "id": "f4144:c0:m0"}
{"signature": "def getObj(self):", "body": "if self.obj is None:<EOL><INDENT>if not self.pk:<EOL><INDENT>return None<EOL><DEDENT>self.obj = self.foreignModel.objects.get(self.pk)<EOL><DEDENT>return self.obj<EOL>", "docstring": "getObj - Fetch (if not fetched) and return the obj associated with this data.", "id": "f4146:c1:m3"}
{"signature": "def __init__(self, pk=None, foreignModel=None, obj=None):", "body": "self.pk = pk<EOL>self.obj = obj<EOL>if foreignModel is not None:<EOL><INDENT>if issubclass(foreignModel.__class__, weakref.ReferenceType):<EOL><INDENT>foreignModel = foreignModel()<EOL><DEDENT>self._foreignModel = weakref.ref(foreignModel)<EOL><DEDENT>else:<EOL><INDENT>self._foreignModel = None<EOL><DEDENT>", "docstring": "__init__ - Create a ForeignLinkData object\n\n@param pk <int> - The primary key of the foreign object\n@param obj <None/IndexedRedisModel> - The resolved object, or None if not yet resolved", "id": "f4146:c1:m0"}
{"signature": "def isMulti(self):", "body": "return False<EOL>", "docstring": "isMulti - Returns True if this is a MultiLink object (expects lists), otherwise False (expects object)\n\n@return <bool>", "id": "f4146:c4:m8"}
{"signature": "def isFetched(self):", "body": "return not bool(self.obj is None)<EOL>", "docstring": "isFetched - Check if the associated obj has been fetched or not.", "id": "f4146:c1:m7"}
{"signature": "def getObjs(self):", "body": "return self.getObj()<EOL>", "docstring": "getObjs - @see ForeignLinkData.getObjs", "id": "f4146:c2:m4"}
{"signature": "def __init__(self, name='<STR_LIT>', foreignModel=None):", "body": "IRField.__init__(self, name, valueType=int, defaultValue=irNull)<EOL>if foreignModel:<EOL><INDENT>if not isinstance(foreignModel, type):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not hasattr(foreignModel, '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>self._foreignModel = weakref.ref(foreignModel)<EOL>", "docstring": "__init__ - Create an IRForeignLinkField. Only takes a name\n\n@param name <str> - Field name\n\nThis field type does not support indexing.", "id": "f4146:c4:m0"}
{"signature": "def __init__(self, name='<STR_LIT>', defaultValue=irNull):", "body": "self.valueType = None<EOL>self.defaultValue = defaultValue<EOL>", "docstring": "__init__ - Create an IRPickleField\n\n@param name <str> - Field name\n\n@param defaultValue - The default value of this field\n\nBecause even with the same format, python2 and python3 can output different pickle strings for the same object,\n  as well as different host configurations may lead to different output, this field type is not indexable.", "id": "f4147:c0:m0"}
{"signature": "def __init__(self, name='<STR_LIT>', decimalPlaces=<NUM_LIT:5>, defaultValue=irNull):", "body": "self.decimalPlaces = decimalPlaces<EOL>if isinstance(defaultValue, int):<EOL><INDENT>defaultValue = float(defaultValue)<EOL><DEDENT>elif isinstance(defaultValue, float):<EOL><INDENT>defaultValue = round(defaultValue, decimalPlaces)<EOL><DEDENT>self.defaultValue = defaultValue<EOL>", "docstring": "__init__ - Create this object.\n\n@param name <str> - Field name (or blank if used in an IRFieldChain)\n\n@param decimalPlaces <int> - The number of decimal places to use (precision). Values will be rounded to this many places, and always have\n  this many digits after the decimal point.\n\n@param defaultValue - The default value for this field\n\nAn IRFixedPointField is indexable, and has no option to hash the index.", "id": "f4148:c0:m0"}
{"signature": "def toIndex(self, value):", "body": "if self._isIrNull(value):<EOL><INDENT>ret = IR_NULL_STR<EOL><DEDENT>else:<EOL><INDENT>ret = self._toIndex(value)<EOL><DEDENT>if self.isIndexHashed is False:<EOL><INDENT>return ret<EOL><DEDENT>return md5(tobytes(ret)).hexdigest()<EOL>", "docstring": "toIndex - An optional method which will return the value prepped for index.\n\nBy default, \"toStorage\" will be called. If you provide \"hashIndex=True\" on the constructor,\nthe field will be md5summed for indexing purposes. This is useful for large strings, etc.", "id": "f4149:c0:m8"}
{"signature": "def toStorage(self, value):", "body": "if value == irNull or None:<EOL><INDENT>return IR_NULL_STR<EOL><DEDENT>return self._toStorage(value)<EOL>", "docstring": "toStorage - Convert the value to a string representation for storage.\n\n  The default implementation will work here for basic types.\n\n@param value - The value of the item to convert\n@return A string value suitable for storing.", "id": "f4149:c0:m1"}
{"signature": "def _getReprProperties(self):", "body": "ret = []<EOL>if getattr(self, '<STR_LIT>', None) is not None:<EOL><INDENT>ret.append('<STR_LIT>' %(self.valueType.__name__, ))<EOL><DEDENT>if hasattr(self, '<STR_LIT>'):<EOL><INDENT>ret.append('<STR_LIT>' %(self.hashIndex, ))<EOL><DEDENT>return ret<EOL>", "docstring": "_getReprProperties - Get the properties of this field to display in repr().\n\n        These should be in the form of $propertyName=$propertyRepr\n\n        The default IRField implementation handles just the \"hashIndex\" property.\n\n        defaultValue is part of \"__repr__\" impl. You should just extend this method\n        with your object's properties instead of rewriting repr.", "id": "f4149:c0:m18"}
{"signature": "def _toStorage(self, value):", "body": "return to_unicode(value)<EOL>", "docstring": "_toStorage - Convert the value to a string for storage.\n\nThe default implementation works for most valueTypes within IRField, override this for extending types.\n\nYou don't need to handle null\n\n@param value - Value of item to convert\n\n@return - A string value suitable for storing", "id": "f4149:c0:m2"}
{"signature": "@property<EOL><INDENT>def name(self):<DEDENT>", "body": "return str(self)<EOL>", "docstring": "name - Property, return this field's name\n\n@return <str> - Field name", "id": "f4149:c0:m11"}
{"signature": "def copy(self):", "body": "return self.__class__(name=self.name, valueType=self.valueType, defaultValue=self.defaultValue, hashIndex=self.hashIndex)<EOL>", "docstring": "copy - Create a copy of this IRField.\n\n  Each subclass should implement this, as you'll need to pass in the args to constructor.\n\n@return <IRField (or subclass)> - Another IRField that has all the same values as this one.", "id": "f4149:c0:m20"}
{"signature": "@property<EOL><INDENT>def isIndexHashed(self):<DEDENT>", "body": "return bool(self.hashIndex)<EOL>", "docstring": "isIndexHashed - Returns if the index value should be hashed\n\n@return <bool> - True if this field should be hashed before indexing / filtering", "id": "f4149:c0:m10"}
{"signature": "def fromInput(self, value):", "body": "if value == irNull:<EOL><INDENT>return irNull<EOL><DEDENT>return self._fromInput(value)<EOL>", "docstring": "fromInput - Convert the value from input (like assigning this through constructor or as an item assignment on the object\n\n@param value - Value to convert\n\n@return - Converted value", "id": "f4149:c0:m6"}
{"signature": "def _fromStorage(self, value):", "body": "return self.valueType(value)<EOL>", "docstring": "_fromStorage - Convert the value from storage to the value type.\n\n  This default impl works fine for most value types, should be implemented by extending types.\n\n  @param value - Value to convert\n\n  @return - Converted value", "id": "f4149:c0:m4"}
{"signature": "def fromStorage(self, value):", "body": "if value in IR_NULL_STRINGS:<EOL><INDENT>return irNull<EOL><DEDENT>return self._fromStorage(value)<EOL>", "docstring": "fromStorage - Convert the value from storage to the value type.\n\n@param value - Value to convert\n\n@return - The converted value", "id": "f4149:c0:m3"}
{"signature": "def getEncoding(self):", "body": "if not self.encoding:<EOL><INDENT>return getDefaultIREncoding()<EOL><DEDENT>return self.encoding<EOL>", "docstring": "getEncoding - Get the encoding codec associated with this field.\n\n        If you provided None, this will return the defaultIREncoding\n\n@return <str> - Encoding", "id": "f4152:c0:m1"}
{"signature": "def __init__(self, name='<STR_LIT>', defaultValue=irNull, encoding=None):", "body": "self.valueType = None<EOL>self.defaultValue = defaultValue<EOL>self.encoding = encoding<EOL>", "docstring": "__init__ - Create an IRBytesField object\n\n@param name <str> - Field name\n\n@param defaultValue <any> default irNull - Default value for this field\n\n@param encoding <None/str> - If None, defaultIREncoding will be used when converting to bytes,\n  otherwise you can provide an explicit encoding\n\nAn IRBytesField is indexable, and the index is forced to be hashed.", "id": "f4154:c0:m0"}
{"signature": "def reload(self):", "body": "if len(self) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>ret = []<EOL>for obj in self:<EOL><INDENT>res = None<EOL>try:<EOL><INDENT>res = obj.reload()<EOL><DEDENT>except Exception as e:<EOL><INDENT>res = e<EOL><DEDENT>ret.append(res)<EOL><DEDENT>return ret<EOL>", "docstring": "reload - Reload all objects in this list. \n        Updates in-place. To just fetch all these objects again, use \"refetch\"\n\n@return - List (same order as current objects) of either exception (KeyError) if operation failed,\n  or a dict of fields changed -> (old, new)", "id": "f4155:c0:m5"}
{"signature": "@staticmethod<EOL><INDENT>def __validate_model(mdl):<DEDENT>", "body": "if not hasattr(mdl, '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>' %(str(mdl.__class__.__name__),))<EOL><DEDENT>", "docstring": "__validate_model - Internal function to check that model is of correct type.\n\nUses a class variable that has been defined for IndexedRedisModel s for a long time, not the type itself, to prevent circular imports etc.\n\n@param mdl - type to validate", "id": "f4155:c0:m1"}
{"signature": "def refetch(self):", "body": "if len(self) == <NUM_LIT:0>:<EOL><INDENT>return IRQueryableList()<EOL><DEDENT>mdl = self.getModel()<EOL>pks = [item._id for item in self if item._id]<EOL>return mdl.objects.getMultiple(pks)<EOL>", "docstring": "refetch - Fetch a fresh copy of all items in this list.\n        Returns a new list. To update in-place, use \"reload\".\n\n@return IRQueryableList<IndexedRedisModel> - List of fetched items", "id": "f4155:c0:m6"}
{"signature": "def delete(self):", "body": "if len(self) == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>mdl = self.getModel()<EOL>return mdl.deleter.deleteMultiple(self)<EOL>", "docstring": "delete - Delete all objects in this list.\n\n@return <int> - Number of objects deleted", "id": "f4155:c0:m3"}
{"signature": "def _add_id_to_keys(self, pk, conn=None):", "body": "if conn is None:<EOL><INDENT>conn = self._get_connection()<EOL><DEDENT>conn.sadd(self._get_ids_key(), pk)<EOL>", "docstring": "_add_id_to_keys - Adds primary key to table\ninternal", "id": "f4156:c2:m5"}
{"signature": "def diff(firstObj, otherObj, includeMeta=False):", "body": "if not isIndexedRedisModel(firstObj): <EOL><INDENT>raise ValueError('<STR_LIT>' %( type(firstObj).__name__ , ) )<EOL><DEDENT>if not isIndexedRedisModel(otherObj): <EOL><INDENT>raise ValueError('<STR_LIT>' %( type(otherObj).__name__ , ) )<EOL><DEDENT>firstObj.validateModel()<EOL>otherObj.validateModel()<EOL>if getattr(firstObj, '<STR_LIT>') != getattr(otherObj, '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>' %( firstObj.__class__, otherObj.__class__) )<EOL><DEDENT>diffFields = {}<EOL>for thisField in firstObj.FIELDS:<EOL><INDENT>thisFieldStr = str(thisField)<EOL>firstVal = object.__getattribute__( firstObj, thisFieldStr )<EOL>otherVal = object.__getattribute__( otherObj, thisFieldStr )<EOL>if firstVal != otherVal:<EOL><INDENT>diffFields[ thisFieldStr ] = ( (firstVal, otherVal) )<EOL><DEDENT><DEDENT>if includeMeta:<EOL><INDENT>firstPk = firstObj.getPk()<EOL>otherPk = otherObj.getPk()<EOL>if firstPk != otherPk:<EOL><INDENT>diffFields['<STR_LIT>'] = ( firstPk, otherPk )<EOL><DEDENT><DEDENT>return diffFields<EOL>", "docstring": "diff - Compare the field values on two IndexedRedisModels.\n\n@param firstObj <IndexedRedisModel instance> - First object (or self)\n\n@param otherObj <IndexedRedisModel instance> - Second object\n\n@param includeMeta <bool> - If meta information (like pk) should be in the diff results.\n\n\n@return <dict> - Dict of  'field' : ( value_firstObjForField, value_otherObjForField ).\n\n        Keys are names of fields with different values.\n        Value is a tuple of ( value_firstObjForField, value_otherObjForField )\n\nCan be called statically, like: IndexedRedisModel.diff ( obj1, obj2 )\n\n  or in reference to an obj   : obj1.diff(obj2)", "id": "f4156:c1:m7"}
{"signature": "@classproperty<EOL><INDENT>def saver(cls):<DEDENT>", "body": "return IndexedRedisSave(cls)<EOL>", "docstring": "saver - Get an IndexedRedisSave associated with this model", "id": "f4156:c1:m9"}
{"signature": "def saveMultiple(self, objs):", "body": "<EOL>return self.save(objs)<EOL>", "docstring": "saveMultiple - Save a list of objects using a pipeline.\n\n@param objs < list<IndexedRedisModel> > - List of objects to save", "id": "f4156:c4:m1"}
{"signature": "def getDefaultRedisConnectionParams():", "body": "global _defaultRedisConnectionParams<EOL>return copy.copy(_defaultRedisConnectionParams)<EOL>", "docstring": "getDefaultRedisConnectionParams - Gets A COPY OF the default Redis connection params.\n\n@see setDefaultRedisConnectionParams for more info\n\n@return <dict> - copy of default Redis connection parameters", "id": "f4156:m1"}
{"signature": "def saveToExternal(self, redisCon):", "body": "if type(redisCon) == dict:<EOL><INDENT>conn = redis.Redis(**redisCon)<EOL><DEDENT>elif hasattr(conn, '<STR_LIT>') and issubclass(conn.__class__, redis.Redis):<EOL><INDENT>conn = redisCon<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>saver = self.saver<EOL>forceID = saver._getNextID(conn) <EOL>myCopy = self.copy(False)<EOL>return saver.save(myCopy, usePipeline=True, forceID=forceID, conn=conn)<EOL>", "docstring": "saveToExternal - Saves this object to a different Redis than that specified by REDIS_CONNECTION_PARAMS on this model.\n\n@param redisCon <dict/redis.Redis> - Either a dict of connection params, a la REDIS_CONNECTION_PARAMS, or an existing Redis connection.\n        If you are doing a lot of bulk copies, it is recommended that you create a Redis connection and pass it in rather than establish a new\n        connection with each call.\n\n@note - You will generate a new primary key relative to the external Redis environment. If you need to reference a \"shared\" primary key, it is better\n                to use an indexed field than the internal pk.", "id": "f4156:c1:m23"}
{"signature": "def all(self, cascadeFetch=False):", "body": "matchedKeys = self.getPrimaryKeys()<EOL>if matchedKeys:<EOL><INDENT>return self.getMultiple(matchedKeys, cascadeFetch=cascadeFetch)<EOL><DEDENT>return IRQueryableList([], mdl=self.mdl)<EOL>", "docstring": "all - Get the underlying objects which match the filter criteria.\n\nExample:   objs = Model.objects.filter(field1='value', field2='value2').all()\n\n@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model\n   will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@return - Objects of the Model instance associated with this query.", "id": "f4156:c3:m9"}
{"signature": "def __setattr__(self, keyName, value):", "body": "oga = object.__getattribute__<EOL>if keyName not in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>fields = oga(self, '<STR_LIT>')<EOL>try:<EOL><INDENT>idx = fields.index(keyName)<EOL><DEDENT>except:<EOL><INDENT>idx = -<NUM_LIT:1><EOL><DEDENT>if idx != -<NUM_LIT:1>:<EOL><INDENT>value = fields[idx].fromInput(value)<EOL><DEDENT><DEDENT>object.__setattr__(self, keyName, value)<EOL>", "docstring": "__setattr__ - Will be used to set an attribute on this object.\n\n  If the attribute is a field (in self.FIELDS), it will be converted via the field type's #fromInput method.\n\n  Otherwise, it will just set the attribute on this object.", "id": "f4156:c1:m1"}
{"signature": "def reload(self, cascadeObjects=True):", "body": "_id = self._id<EOL>if not _id:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>currentData = self.asDict(False, forStorage=False)<EOL>newDataObj = self.objects.get(_id)<EOL>if not newDataObj:<EOL><INDENT>raise KeyError('<STR_LIT>' %(_id,))<EOL><DEDENT>newData = newDataObj.asDict(False, forStorage=False)<EOL>if currentData == newData and not self.foreignFields:<EOL><INDENT>return []<EOL><DEDENT>updatedFields = {}<EOL>for thisField, newValue in newData.items():<EOL><INDENT>defaultValue = thisField.getDefaultValue()<EOL>currentValue = currentData.get(thisField, defaultValue)<EOL>fieldIsUpdated = False<EOL>if currentValue != newValue:<EOL><INDENT>fieldIsUpdated = True<EOL><DEDENT>elif cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase):<EOL><INDENT>if currentValue.isFetched():<EOL><INDENT>oldObjs = currentValue.getObjs()<EOL>newObjs = newValue.getObjs()<EOL>if oldObjs != newObjs: <EOL><INDENT>fieldIsUpdated = True<EOL><DEDENT>else:<EOL><INDENT>for i in range(len(oldObjs)):<EOL><INDENT>if not oldObjs[i].hasSameValues(newObjs[i], cascadeObjects=True):<EOL><INDENT>fieldIsUpdated = True<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if fieldIsUpdated is True:<EOL><INDENT>updatedFields[thisField] = ( currentValue, newValue) <EOL>setattr(self, thisField, newValue)<EOL>self._origData[thisField] = newDataObj._origData[thisField]<EOL><DEDENT><DEDENT>return updatedFields<EOL>", "docstring": "reload - Reload this object from the database, overriding any local changes and merging in any updates.\n\n\n    @param cascadeObjects <bool> Default True. If True, foreign-linked objects will be reloaded if their values have changed\n      since last save/fetch. If False, only if the pk changed will the foreign linked objects be reloaded.\n\n    @raises KeyError - if this object has not been saved (no primary key)\n\n    @return - Dict with the keys that were updated. Key is field name that was updated,\n       and value is tuple of (old value, new value). \n\n    NOTE: Currently, this will cause a fetch of all Foreign Link objects, one level", "id": "f4156:c1:m24"}
{"signature": "def save(self, cascadeSave=True):", "body": "saver = IndexedRedisSave(self.__class__)<EOL>return saver.save(self, cascadeSave=cascadeSave)<EOL>", "docstring": "save - Save this object.\n\nWill perform an \"insert\" if this object had not been saved before,\n  otherwise will update JUST the fields changed on THIS INSTANCE of the model.\n\n  i.e. If you have two processes fetch the same object and change different fields, they will not overwrite\n  eachother, but only save the ones each process changed.\n\nIf you want to save multiple objects of type MyModel in a single transaction,\nand you have those objects in a list, myObjs, you can do the following:\n\n        MyModel.saver.save(myObjs)\n\n@param cascadeSave <bool> Default True - If True, any Foreign models linked as attributes that have been altered\n   or created will be saved with this object. If False, only this object (and the reference to an already-saved foreign model) will be saved.\n\n@see #IndexedRedisSave.save\n\n@return <list> - Single element list, id of saved object (if successful)", "id": "f4156:c1:m11"}
{"signature": "@deprecated('<STR_LIT>')<EOL><INDENT>@classmethod<EOL>def connect(cls, redisConnectionParams):<DEDENT>", "body": "return cls.connectAlt(redisConnectionParams)<EOL>", "docstring": "connect - DEPRECATED NAME - @see connectAlt\n  Create a class of this model which will use an alternate connection than the one specified by REDIS_CONNECTION_PARAMS on this model.\n\n@param redisConnectionParams <dict> - Dictionary of arguments to redis.Redis, same as REDIS_CONNECTION_PARAMS.\n\n@return - A class that can be used in all the same ways as the existing IndexedRedisModel, but that connects to a different instance.", "id": "f4156:c1:m30"}
{"signature": "def random(self, cascadeFetch=False):", "body": "matchedKeys = list(self.getPrimaryKeys())<EOL>obj = None<EOL>while matchedKeys and not obj:<EOL><INDENT>key = matchedKeys.pop(random.randint(<NUM_LIT:0>, len(matchedKeys)-<NUM_LIT:1>))<EOL>obj = self.get(key, cascadeFetch=cascadeFetch)<EOL><DEDENT>return obj<EOL>", "docstring": "Random - Returns a random record in current filterset.\n\n\n@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model\n   will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@return - Instance of Model object, or None if no items math current filters", "id": "f4156:c3:m15"}
{"signature": "@staticmethod<EOL><INDENT>def _doCascadeFetch(obj):<DEDENT>", "body": "obj.validateModel()<EOL>if not obj.foreignFields:<EOL><INDENT>return<EOL><DEDENT>NOTE: Currently this fetches using one transaction per object. Implementation for actual resolution is in<EOL><INDENT>IndexedRedisModel.__getattribute__ <EOL><DEDENT>for foreignField in obj.foreignFields:<EOL><INDENT>subObjsData = object.__getattribute__(obj, foreignField)<EOL>if not subObjsData:<EOL><INDENT>setattr(obj, str(foreignField), irNull)<EOL>continue<EOL><DEDENT>subObjs = subObjsData.getObjs()<EOL>for subObj in subObjs:<EOL><INDENT>if isIndexedRedisModel(subObj):<EOL><INDENT>IndexedRedisQuery._doCascadeFetch(subObj)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "_doCascadeFetch - Takes an object and performs a cascading fetch on all foreign links, and all theirs, and so on.\n\n@param obj <IndexedRedisModel> - A fetched model", "id": "f4156:c3:m18"}
{"signature": "@classmethod<EOL><INDENT>def copyModel(mdl):<DEDENT>", "body": "copyNum = _modelCopyMap[mdl]<EOL>_modelCopyMap[mdl] += <NUM_LIT:1><EOL>mdlCopy = type(mdl.__name__ + '<STR_LIT>' + str(copyNum), mdl.__bases__, copy.deepcopy(dict(mdl.__dict__)))<EOL>mdlCopy.FIELDS = [field.copy() for field in mdl.FIELDS]<EOL>mdlCopy.INDEXED_FIELDS = [str(idxField) for idxField in mdl.INDEXED_FIELDS] <EOL>mdlCopy.validateModel()<EOL>return mdlCopy<EOL>", "docstring": "copyModel - Copy this model, and return that copy.\n\n  The copied model will have all the same data, but will have a fresh instance of the FIELDS array and all members,\n    and the INDEXED_FIELDS array.\n\n  This is useful for converting, like changing field types or whatever, where you can load from one model and save into the other.\n\n@return <IndexedRedisModel> - A copy class of this model class with a unique name.", "id": "f4156:c1:m28"}
{"signature": "def getMultipleOnlyFields(self, pks, fields, cascadeFetch=False):", "body": "if type(pks) == set:<EOL><INDENT>pks = list(pks)<EOL><DEDENT>if len(pks) == <NUM_LIT:1>:<EOL><INDENT>return IRQueryableList([self.getOnlyFields(pks[<NUM_LIT:0>], fields, cascadeFetch=cascadeFetch)], mdl=self.mdl)<EOL><DEDENT>conn = self._get_connection()<EOL>pipeline = conn.pipeline()<EOL>for pk in pks:<EOL><INDENT>key = self._get_key_for_id(pk)<EOL>pipeline.hmget(key, fields)<EOL><DEDENT>res = pipeline.execute()<EOL>ret = IRQueryableList(mdl=self.mdl)<EOL>pksLen = len(pks)<EOL>i = <NUM_LIT:0><EOL>numFields = len(fields)<EOL>while i < pksLen:<EOL><INDENT>objDict = {}<EOL>anyNotNone = False<EOL>thisRes = res[i]<EOL>if thisRes is None or type(thisRes) != list:<EOL><INDENT>ret.append(None)<EOL>i += <NUM_LIT:1><EOL>continue<EOL><DEDENT>j = <NUM_LIT:0><EOL>while j < numFields:<EOL><INDENT>objDict[fields[j]] = thisRes[j]<EOL>if thisRes[j] != None:<EOL><INDENT>anyNotNone = True<EOL><DEDENT>j += <NUM_LIT:1><EOL><DEDENT>if anyNotNone is False:<EOL><INDENT>ret.append(None)<EOL>i += <NUM_LIT:1><EOL>continue<EOL><DEDENT>objDict['<STR_LIT>'] = pks[i]<EOL>obj = self._redisResultToObj(objDict)<EOL>ret.append(obj)<EOL>i += <NUM_LIT:1><EOL><DEDENT>if cascadeFetch is True:<EOL><INDENT>for obj in ret:<EOL><INDENT>self._doCascadeFetch(obj)<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "getMultipleOnlyFields - Gets only certain fields from a list of  primary keys. For working on entire filter set, see allOnlyFields\n\n@param pks list<str> - Primary Keys\n\n@param fields list<str> - List of fields\n\n\n@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model\n   will be fetched immediately. If False, foreign objects will be fetched on-access.\n\nreturn - List of partial objects with only fields applied", "id": "f4156:c3:m21"}
{"signature": "def delete(self):", "body": "deleter = IndexedRedisDelete(self.__class__)<EOL>return deleter.deleteOne(self)<EOL>", "docstring": "delete - Delete this object", "id": "f4156:c1:m12"}
{"signature": "def save(self, obj, usePipeline=True, forceID=False, cascadeSave=True, conn=None):", "body": "if conn is None:<EOL><INDENT>conn = self._get_connection()<EOL><DEDENT>if usePipeline is True:<EOL><INDENT>idConn = conn<EOL><DEDENT>else:<EOL><INDENT>idConn = self._get_new_connection()<EOL><DEDENT>if issubclass(obj.__class__, (list, tuple)):<EOL><INDENT>objs = obj<EOL><DEDENT>else:<EOL><INDENT>objs = [obj]<EOL><DEDENT>if usePipeline is True:<EOL><INDENT>pipeline = conn.pipeline()<EOL><DEDENT>else:<EOL><INDENT>pipeline = conn<EOL><DEDENT>oga = object.__getattribute__<EOL>if cascadeSave is True:<EOL>ignPipelines = OrderedDict()<EOL><INDENT>foreignSavers = {}<EOL>for thisObj in objs:<EOL><INDENT>if not thisObj.foreignFields:<EOL><INDENT>continue<EOL><DEDENT>foreignFields = thisObj.foreignFields<EOL>for foreignField in foreignFields:<EOL><INDENT>rawObj = oga(thisObj, str(foreignField))<EOL>if rawObj in (None, irNull) or not rawObj.isFetched():<EOL><INDENT>continue<EOL><DEDENT>foreignObjects = oga(thisObj, str(foreignField)).getObjs()<EOL>for foreignObject in foreignObjects:<EOL><INDENT>doSaveForeign = False<EOL>if getattr(foreignObject, '<STR_LIT>', None):<EOL><INDENT>if foreignObject.hasUnsavedChanges(cascadeObjects=True):<EOL><INDENT>doSaveForeign = True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>doSaveForeign = True<EOL><DEDENT>if doSaveForeign is True:<EOL><INDENT>if foreignField not in foreignSavers:<EOL><DEDENT><DEDENT><DEDENT>foreignPipelines[foreignField] = self._get_new_connection().pipeline()<EOL><INDENT>foreignSavers[foreignField] = IndexedRedisSave(foreignObject.__class__)<EOL>", "docstring": "save - Save an object / objects associated with this model. \n\nYou probably want to just do object.save() instead of this, but to save multiple objects at once in a single transaction, \n   you can use:\n\n        MyModel.saver.save(myObjs)\n\n@param obj <IndexedRedisModel or list<IndexedRedisModel> - The object to save, or a list of objects to save\n\n@param usePipeline - Use a pipeline for saving. You should always want this, unless you are calling this function from within an existing pipeline.\n\n@param forceID - if not False, force ID to this. If obj is list, this is also list. Forcing IDs also forces insert. Up to you to ensure ID will not clash.\n@param cascadeSave <bool> Default True - If True, any Foreign models linked as attributes that have been altered\n   or created will be saved with this object. If False, only this object (and the reference to an already-saved foreign model) will be saved.\n\n@param conn - A connection or None\n\n@note - if no ID is specified\n\n@return - List of pks", "id": "f4156:c4:m0"}
{"signature": "@classmethod<EOL><INDENT>def connectAlt(cls, redisConnectionParams):<DEDENT>", "body": "if not isinstance(redisConnectionParams, dict):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>hashVal = hashDictOneLevel(redisConnectionParams)<EOL>modelDictCopy = copy.deepcopy(dict(cls.__dict__))<EOL>modelDictCopy['<STR_LIT>'] = redisConnectionParams<EOL>ConnectedIndexedRedisModel = type('<STR_LIT>' + cls.__name__ + str(hashVal), cls.__bases__, modelDictCopy)<EOL>return ConnectedIndexedRedisModel<EOL>", "docstring": "connectAlt - Create a class of this model which will use an alternate connection than the one specified by REDIS_CONNECTION_PARAMS on this model.\n\n@param redisConnectionParams <dict> - Dictionary of arguments to redis.Redis, same as REDIS_CONNECTION_PARAMS.\n\n@return - A class that can be used in all the same ways as the existing IndexedRedisModel, but that connects to a different instance.\n\n  The fields and key will be the same here, but the connection will be different. use #copyModel if you want an independent class for the model", "id": "f4156:c1:m31"}
{"signature": "def __eq__(self, other):", "body": "<EOL>if type(self) != type(other):<EOL><INDENT>return False<EOL><DEDENT>if not self.hasSameValues(other):<EOL><INDENT>return False<EOL><DEDENT>if getattr(self, '<STR_LIT>', None) != getattr(other, '<STR_LIT>', None):<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "__eq__ - Check if two IndexedRedisModels are equal.\n\nThey are equal if they have the same type and same field values (including id).\n\nTo check if two models have the same values (but can have different ids), use #hasSameValues method.", "id": "f4156:c1:m16"}
{"signature": "def __repr__(self):", "body": "myDict = self.asDict(True, forStorage=False, strKeys=True)<EOL>myClassName = self.__class__.__name__<EOL>ret = [myClassName, '<STR_LIT:(>']<EOL>_id = myDict.pop('<STR_LIT>', '<STR_LIT>')<EOL>if _id:<EOL><INDENT>ret += ['<STR_LIT>', to_unicode(_id), '<STR_LIT>']<EOL><DEDENT>key = None<EOL>for key, value in myDict.items():<EOL><INDENT>ret += [key, '<STR_LIT:=>', repr(value), '<STR_LIT:U+002CU+0020>']<EOL><DEDENT>if key is not None or not _id:<EOL><INDENT>ret.pop()<EOL><DEDENT>ret.append('<STR_LIT:)>')<EOL>return '<STR_LIT>'.join(ret)<EOL>", "docstring": "__repr__ - Returns a string of the constructor/params to recreate this object.\n    Example: objCopy = eval(repr(obj))\n\n    @return - String of python init call to recreate this object", "id": "f4156:c1:m19"}
{"signature": "def _rem_id_from_index(self, indexedField, pk, val, conn=None):", "body": "if conn is None:<EOL><INDENT>conn = self._get_connection()<EOL><DEDENT>conn.srem(self._get_key_for_index(indexedField, val), pk)<EOL>", "docstring": "_rem_id_from_index - Removes an id from an index\ninternal", "id": "f4156:c2:m8"}
{"signature": "def filterInline(self, **kwargs):", "body": "return IndexedRedisQuery._filter(self, **kwargs)<EOL>", "docstring": "filterInline - @see IndexedRedisQuery.filter. This is the same as filter, but works inline on this object instead of creating a copy.\n        Use this is you do not need to retain the previous filter object.", "id": "f4156:c3:m4"}
{"signature": "def getPk(self):", "body": "return self._id<EOL>", "docstring": "getPk - Gets the internal primary key associated with this object", "id": "f4156:c1:m13"}
{"signature": "def _get_connection(self):", "body": "if self._connection is None:<EOL><INDENT>self._connection = self._get_new_connection() <EOL><DEDENT>return self._connection<EOL>", "docstring": "_get_connection - Maybe get a new connection, or reuse if passed in.\n        Will share a connection with a model\ninternal", "id": "f4156:c2:m3"}
{"signature": "def asDict(self, includeMeta=False, forStorage=False, strKeys=False):", "body": "ret = {}<EOL>for thisField in self.FIELDS:<EOL><INDENT>val = object.__getattribute__(self, thisField)<EOL>if forStorage is True:<EOL><INDENT>val = thisField.toStorage(val)<EOL><DEDENT>if strKeys:<EOL><INDENT>ret[str(thisField)] = val<EOL><DEDENT>else:<EOL><INDENT>ret[thisField] = val<EOL><DEDENT><DEDENT>if includeMeta is True:<EOL><INDENT>ret['<STR_LIT>'] = getattr(self, '<STR_LIT>', None)<EOL><DEDENT>return ret<EOL>", "docstring": "toDict / asDict - Get a dictionary representation of this model.\n\n@param includeMeta - Include metadata in return. For now, this is only pk stored as \"_id\"\n\n@param convertValueTypes <bool> - default True. If False, fields with fieldValue defined will be converted to that type.\n        Use True when saving, etc, as native type is always either str or bytes.\n\n@param strKeys <bool> Default False - If True, just the string value of the field name will be used as the key.\n        Otherwise, the IRField itself will be (although represented and indexed by string)\n\n@return - Dictionary reprensetation of this object and all fields", "id": "f4156:c1:m3"}
{"signature": "def __setstate__(self, stateDict):", "body": "self.__class__.validateModel()<EOL>for key, value in stateDict.items():<EOL><INDENT>setattr(self, key, value)<EOL><DEDENT>self._origData = stateDict['<STR_LIT>']<EOL>", "docstring": "pickle uses this", "id": "f4156:c1:m27"}
{"signature": "def _get_new_connection(self):", "body": "pool = getRedisPool(self.mdl.REDIS_CONNECTION_PARAMS)<EOL>return redis.Redis(connection_pool=pool)<EOL>", "docstring": "_get_new_connection - Get a new connection\ninternal", "id": "f4156:c2:m2"}
{"signature": "def allByAge(self, cascadeFetch=False):", "body": "matchedKeys = self.getPrimaryKeys(sortByAge=True)<EOL>if matchedKeys:<EOL><INDENT>return self.getMultiple(matchedKeys, cascadeFetch=cascadeFetch)<EOL><DEDENT>return IRQueryableList([], mdl=self.mdl)<EOL>", "docstring": "allByAge - Get the underlying objects which match the filter criteria, ordered oldest -> newest\n        If you are doing a queue or just need the head/tail, consider .first() and .last() instead.\n\n\n@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model\n   will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@return - Objects of the Model instance associated with this query, sorted oldest->newest", "id": "f4156:c3:m10"}
{"signature": "def __init__(self, mdl):", "body": "mdl.validateModel()<EOL>self.mdl = mdl<EOL>self.keyName = mdl.KEY_NAME<EOL>fields = mdl.FIELDS<EOL>self.fields = mdl.FIELDS<EOL>self.indexedFields = [fields[fieldName] for fieldName in mdl.INDEXED_FIELDS]<EOL>self._connection = None<EOL>", "docstring": "Internal constructor\n\n@param mdl - IndexedRedisModel implementer", "id": "f4156:c2:m0"}
{"signature": "def hasSameValues(self, other, cascadeObject=True):", "body": "if self.FIELDS != other.FIELDS:<EOL><INDENT>return False<EOL><DEDENT>oga = object.__getattribute__<EOL>for field in self.FIELDS:<EOL><INDENT>thisVal = oga(self, field)<EOL>otherVal = oga(other, field)<EOL>if thisVal != otherVal:<EOL><INDENT>return False<EOL><DEDENT>if cascadeObject is True and issubclass(field.__class__, IRForeignLinkFieldBase):<EOL><INDENT>if thisVal and thisVal.isFetched():<EOL><INDENT>if otherVal and otherVal.isFetched():<EOL><INDENT>theseForeign = thisVal.getObjs()<EOL>othersForeign = otherVal.getObjs()<EOL>for i in range(len(theseForeign)):<EOL><INDENT>if not theseForeign[i].hasSameValues(othersForeign[i]):<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>theseForeign = thisVal.getObjs()<EOL>for i in range(len(theseForeign)):<EOL><INDENT>if theseForeign[i].hasUnsavedChanges(cascadeObjects=True):<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if otherVal and otherVal.isFetched():<EOL><INDENT>othersForeign = otherVal.getObjs()<EOL>for i in range(len(othersForeign)):<EOL><INDENT>if othersForeign[i].hasUnsavedChanges(cascadeObjects=True):<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "hasSameValues - Check if this and another model have the same fields and values.\n\nThis does NOT include id, so the models can have the same values but be different objects in the database.\n\n@param other <IndexedRedisModel> - Another model\n\n@param cascadeObject <bool> default True - If True, foreign link values with changes will be considered a difference.\n        Otherwise, only the immediate values are checked.\n\n@return <bool> - True if all fields have the same value, otherwise False", "id": "f4156:c1:m15"}
{"signature": "def count(self):", "body": "conn = self._get_connection()<EOL>numFilters = len(self.filters)<EOL>numNotFilters = len(self.notFilters)<EOL>if numFilters + numNotFilters == <NUM_LIT:0>:<EOL><INDENT>return conn.scard(self._get_ids_key())<EOL><DEDENT>if numNotFilters == <NUM_LIT:0>:<EOL><INDENT>if numFilters == <NUM_LIT:1>:<EOL><INDENT>(filterFieldName, filterValue) = self.filters[<NUM_LIT:0>]<EOL>return conn.scard(self._get_key_for_index(filterFieldName, filterValue))<EOL><DEDENT>indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]<EOL>return len(conn.sinter(indexKeys))<EOL><DEDENT>notIndexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.notFilters]<EOL>if numFilters == <NUM_LIT:0>:<EOL><INDENT>return len(conn.sdiff(self._get_ids_key(), *notIndexKeys))<EOL><DEDENT>indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]<EOL>tempKey = self._getTempKey()<EOL>pipeline = conn.pipeline()<EOL>pipeline.sinterstore(tempKey, *indexKeys)<EOL>pipeline.sdiff(tempKey, *notIndexKeys)<EOL>pipeline.delete(tempKey)<EOL>pks = pipeline.execute()[<NUM_LIT:1>] <EOL>return len(pks)<EOL>", "docstring": "count - gets the number of records matching the filter criteria\n\nExample:\n        theCount = Model.objects.filter(field1='value').count()", "id": "f4156:c3:m6"}
{"signature": "def setDefaultRedisConnectionParams( connectionParams ):", "body": "global _defaultRedisConnectionParams<EOL>_defaultRedisConnectionParams.clear()<EOL>for key, value in connectionParams.items():<EOL><INDENT>_defaultRedisConnectionParams[key] = value<EOL><DEDENT>clearRedisPools()<EOL>", "docstring": "setDefaultRedisConnectionParams - Sets the default parameters used when connecting to Redis.\n\n  This should be the args to redis.Redis in dict (kwargs) form.\n\n  @param connectionParams <dict> - A dict of connection parameters.\n    Common keys are:\n\n       host <str> - hostname/ip of Redis server (default '127.0.0.1')\n       port <int> - Port number\t\t\t(default 6379)\n       db  <int>  - Redis DB number\t\t(default 0)\n\n   Omitting any of those keys will ensure the default value listed is used.\n\n  This connection info will be used by default for all connections to Redis, unless explicitly set otherwise.\n  The common way to override is to define REDIS_CONNECTION_PARAMS on a model, or use AltConnectedModel = MyModel.connectAlt( PARAMS )\n\n  Any omitted fields in these connection overrides will inherit the value from the global default.\n\n  For example, if your global default connection params define host = 'example.com', port=15000, and db=0, \n    and then one of your models has\n\n       REDIS_CONNECTION_PARAMS = { 'db' : 1 }\n\n    as an attribute, then that model's connection will inherit host='example.com\" and port=15000 but override db and use db=1\n\n\n    NOTE: Calling this function will clear the connection_pool attribute of all stored managed connections, disconnect all managed connections,\n      and close-out the connection pool.\n     It may not be safe to call this function while other threads are potentially hitting Redis (not that it would make sense anyway...)\n\n     @see clearRedisPools   for more info", "id": "f4156:m0"}
{"signature": "def cascadeFetch(self):", "body": "IndexedRedisQuery._doCascadeFetch(self)<EOL>", "docstring": "cascadeFetch - Immediately fetch all foreign links on this field, and all their links, etc.\n\n  Normally, this would be done on access of the foreign members, or at .all() time by passing cascadeFetch=True into\n   the fetch function\n\n   e.x.    MyModel.objects.filter(...).all(cascadeFetch=True)", "id": "f4156:c1:m25"}
{"signature": "def allOnlyFields(self, fields, cascadeFetch=False):", "body": "matchedKeys = self.getPrimaryKeys()<EOL>if matchedKeys:<EOL><INDENT>return self.getMultipleOnlyFields(matchedKeys, fields, cascadeFetch=cascadeFetch)<EOL><DEDENT>return IRQueryableList([], mdl=self.mdl)<EOL>", "docstring": "allOnlyFields - Get the objects which match the filter criteria, only fetching given fields.\n\n@param fields - List of fields to fetch\n\n@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model\n   will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n\n@return - Partial objects with only the given fields fetched", "id": "f4156:c3:m11"}
{"signature": "@classmethod<EOL><INDENT>def validateModel(model):<DEDENT>", "body": "if model == IndexedRedisModel:<EOL><INDENT>import re<EOL>if re.match('<STR_LIT>', sys.argv[<NUM_LIT:0>]):<EOL><INDENT>return<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>global validatedModels<EOL>keyName = model.KEY_NAME<EOL>if not keyName:<EOL><INDENT>raise InvalidModelException('<STR_LIT>' %(str(model.__name__), ) )<EOL><DEDENT>if model in validatedModels:<EOL><INDENT>return True<EOL><DEDENT>failedValidationStr = '<STR_LIT>' %(str(model.__name__), ) <EOL>fieldSet = set(model.FIELDS)<EOL>indexedFieldSet = set(model.INDEXED_FIELDS)<EOL>if not fieldSet:<EOL><INDENT>raise InvalidModelException('<STR_LIT>' %(failedValidationStr,))<EOL><DEDENT>if hasattr(model, '<STR_LIT>'):<EOL><INDENT>raise InvalidModelException('<STR_LIT>')<EOL><DEDENT>if hasattr(model, '<STR_LIT>'):<EOL><INDENT>raise InvalidModelException('<STR_LIT>')<EOL><DEDENT>newFields = []<EOL>updatedFields = []<EOL>mustUpdateFields = False<EOL>foreignFields = []<EOL>for thisField in fieldSet:<EOL><INDENT>if thisField == '<STR_LIT>':<EOL><INDENT>raise InvalidModelException('<STR_LIT>' %(failedValidationStr,))<EOL><DEDENT>try:<EOL><INDENT>codecs.ascii_encode(thisField)<EOL><DEDENT>except UnicodeDecodeError as e:<EOL><INDENT>raise InvalidModelException('<STR_LIT>' %(failedValidationStr, to_unicode(thisField), str(e)))<EOL><DEDENT>if issubclass(thisField.__class__, IRForeignLinkFieldBase):<EOL><INDENT>foreignFields.append(thisField)<EOL><DEDENT>if issubclass(thisField.__class__, IRField):<EOL><INDENT>newFields.append(thisField)<EOL><DEDENT>else:<EOL><INDENT>mustUpdateFields = True<EOL>newField = IRClassicField(thisField)<EOL>newFields.append(newField)<EOL>updatedFields.append(thisField)<EOL>thisField = newField<EOL><DEDENT>if str(thisField) == '<STR_LIT>':<EOL><INDENT>raise InvalidModelException('<STR_LIT>' %(failedValidationStr, str(type(thisField)), repr(thisField)   ) )<EOL><DEDENT>if thisField in indexedFieldSet and thisField.CAN_INDEX is False:<EOL><INDENT>raise InvalidModelException('<STR_LIT>' %(failedValidationStr, str(thisField.__class__.__name__), repr(thisField)))<EOL><DEDENT>if hasattr(IndexedRedisModel, thisField) is True:<EOL><INDENT>raise InvalidModelException('<STR_LIT>' %(failedValidationStr, str(thisField)))<EOL><DEDENT><DEDENT>if mustUpdateFields is True:<EOL><INDENT>model.FIELDS = newFields<EOL>deprecatedMessage('<STR_LIT>' %(model.__name__, repr(updatedFields)), '<STR_LIT>' + model.__name__)<EOL><DEDENT>model.FIELDS = KeyList(model.FIELDS)<EOL>if bool(indexedFieldSet - fieldSet):<EOL><INDENT>raise InvalidModelException('<STR_LIT>' %(failedValidationStr, str(list(indexedFieldSet - fieldSet)), ) )<EOL><DEDENT>model.foreignFields = foreignFields<EOL>validatedModels.add(model)<EOL>return True<EOL>", "docstring": "validateModel - Class method that validates a given model is implemented correctly. Will only be validated once, on first model instantiation.\n\n@param model - Implicit of own class\n\n@return - True\n\n@raises - InvalidModelException if there is a problem with the model, and the message contains relevant information.", "id": "f4156:c1:m29"}
{"signature": "def last(self, cascadeFetch=False):", "body": "obj = None<EOL>matchedKeys = self.getPrimaryKeys(sortByAge=True)<EOL>if matchedKeys:<EOL><INDENT>while matchedKeys and obj is None:<EOL><INDENT>obj = self.get(matchedKeys.pop(), cascadeFetch=cascadeFetch)<EOL><DEDENT><DEDENT>return obj<EOL>", "docstring": "Last - Returns the newest record (highest primary key) with current filters.\n        This makes an efficient queue, as it only fetches a single object.\n\n\n@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model\n   will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n@return - Instance of Model object, or None if no items match current filters", "id": "f4156:c3:m14"}
{"signature": "def _rem_id_from_keys(self, pk, conn=None):", "body": "if conn is None:<EOL><INDENT>conn = self._get_connection()<EOL><DEDENT>conn.srem(self._get_ids_key(), pk)<EOL>", "docstring": "_rem_id_from_keys - Remove primary key from table\ninternal", "id": "f4156:c2:m6"}
{"signature": "def getRedisPool(params):", "body": "global RedisPools<EOL>global _defaultRedisConnectionParams<EOL>global _redisManagedConnectionParams<EOL>if not params:<EOL><INDENT>params = _defaultRedisConnectionParams<EOL>isDefaultParams = True<EOL><DEDENT>else:<EOL><INDENT>isDefaultParams = bool(params is _defaultRedisConnectionParams)<EOL><DEDENT>if '<STR_LIT>' in params:<EOL><INDENT>return params['<STR_LIT>']<EOL><DEDENT>hashValue = hashDictOneLevel(params)<EOL>if hashValue in RedisPools:<EOL><INDENT>params['<STR_LIT>'] = RedisPools[hashValue]<EOL>return RedisPools[hashValue]<EOL><DEDENT>if not isDefaultParams:<EOL><INDENT>origParams = params<EOL>params = copy.copy(params)<EOL><DEDENT>else:<EOL><INDENT>origParams = params<EOL><DEDENT>checkAgain = False<EOL>if '<STR_LIT:host>' not in params:<EOL><INDENT>if not isDefaultParams and '<STR_LIT:host>' in _defaultRedisConnectionParams:<EOL><INDENT>params['<STR_LIT:host>'] = _defaultRedisConnectionParams['<STR_LIT:host>']<EOL><DEDENT>else:<EOL><INDENT>params['<STR_LIT:host>'] = '<STR_LIT:127.0.0.1>'<EOL><DEDENT>checkAgain = True<EOL><DEDENT>if '<STR_LIT:port>' not in params:<EOL><INDENT>if not isDefaultParams and '<STR_LIT:port>' in _defaultRedisConnectionParams:<EOL><INDENT>params['<STR_LIT:port>'] = _defaultRedisConnectionParams['<STR_LIT:port>']<EOL><DEDENT>else:<EOL><INDENT>params['<STR_LIT:port>'] = <NUM_LIT><EOL><DEDENT>checkAgain = True<EOL><DEDENT>if '<STR_LIT>' not in params:<EOL><INDENT>if not isDefaultParams and '<STR_LIT>' in _defaultRedisConnectionParams:<EOL><INDENT>params['<STR_LIT>'] = _defaultRedisConnectionParams['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>params['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>checkAgain = True<EOL><DEDENT>if not isDefaultParams:<EOL><INDENT>otherGlobalKeys = set(_defaultRedisConnectionParams.keys()) - set(params.keys())<EOL>for otherKey in otherGlobalKeys:<EOL><INDENT>if otherKey == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>params[otherKey] = _defaultRedisConnectionParams[otherKey]<EOL>checkAgain = True<EOL><DEDENT><DEDENT>if checkAgain:<EOL><INDENT>hashValue = hashDictOneLevel(params)<EOL>if hashValue in RedisPools:<EOL><INDENT>params['<STR_LIT>'] = RedisPools[hashValue]<EOL>return RedisPools[hashValue]<EOL><DEDENT><DEDENT>connectionPool = redis.ConnectionPool(**params)<EOL>origParams['<STR_LIT>'] = params['<STR_LIT>'] = connectionPool<EOL>RedisPools[hashValue] = connectionPool<EOL>origParamsHash = hashDictOneLevel(origParams)<EOL>if origParamsHash not in _redisManagedConnectionParams:<EOL><INDENT>_redisManagedConnectionParams[origParamsHash] = [origParams]<EOL><DEDENT>elif origParams not in _redisManagedConnectionParams[origParamsHash]:<EOL><INDENT>_redisManagedConnectionParams[origParamsHash].append(origParams)<EOL><DEDENT>return connectionPool<EOL>", "docstring": "getRedisPool - Returns and possibly also creates a Redis connection pool\n        based on the REDIS_CONNECTION_PARAMS passed in.\n\n        The goal of this method is to keep a small connection pool rolling\n        to each unique Redis instance, otherwise during network issues etc\n        python-redis will leak connections and in short-order can exhaust\n        all the ports on a system. There's probably also some minor\n        performance gain in sharing Pools.\n\n        Will modify \"params\", if \"host\" and/or \"port\" are missing, will fill\n        them in with defaults, and prior to return will set \"connection_pool\"\n        on params, which will allow immediate return on the next call,\n        and allow access to the pool directly from the model object.\n\n        @param params <dict> - REDIS_CONNECTION_PARAMS - kwargs to redis.Redis\n\n        @return redis.ConnectionPool corrosponding to this unique server.", "id": "f4156:m3"}
{"signature": "def getOnlyFields(self, pk, fields, cascadeFetch=False):", "body": "conn = self._get_connection()<EOL>key = self._get_key_for_id(pk)<EOL>res = conn.hmget(key, fields)<EOL>if type(res) != list or not len(res):<EOL><INDENT>return None<EOL><DEDENT>objDict = {}<EOL>numFields = len(fields)<EOL>i = <NUM_LIT:0><EOL>anyNotNone = False<EOL>while i < numFields:<EOL><INDENT>objDict[fields[i]] = res[i]<EOL>if res[i] != None:<EOL><INDENT>anyNotNone = True<EOL><DEDENT>i += <NUM_LIT:1><EOL><DEDENT>if anyNotNone is False:<EOL><INDENT>return None<EOL><DEDENT>objDict['<STR_LIT>'] = pk<EOL>ret = self._redisResultToObj(objDict)<EOL>if cascadeFetch is True:<EOL><INDENT>self._doCascadeFetch(ret)<EOL><DEDENT>return ret<EOL>", "docstring": "getOnlyFields - Gets only certain fields from a paticular primary key. For working on entire filter set, see allOnlyFields\n\n@param pk <int> - Primary Key\n\n@param fields list<str> - List of fields\n\n@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model\n   will be fetched immediately. If False, foreign objects will be fetched on-access.\n\n\nreturn - Partial objects with only fields applied", "id": "f4156:c3:m20"}
{"signature": "def pprint(self, stream=None):", "body": "pprint.pprint(self.asDict(includeMeta=True, forStorage=False, strKeys=True), stream=stream)<EOL>", "docstring": "pprint - Pretty-print a dict representation of this object.\n\n@param stream <file/None> - Either a stream to output, or None to default to sys.stdout", "id": "f4156:c1:m4"}
{"signature": "def _get_key_for_index(self, indexedField, val):", "body": "<EOL>if hasattr(indexedField, '<STR_LIT>'):<EOL><INDENT>val = indexedField.toIndex(val)<EOL><DEDENT>else:<EOL><INDENT>val = self.fields[indexedField].toIndex(val)<EOL><DEDENT>return '<STR_LIT>'.join( [INDEXED_REDIS_PREFIX, self.keyName, '<STR_LIT>', indexedField, '<STR_LIT::>', val] )<EOL>", "docstring": "_get_key_for_index - Returns the key name that would hold the indexes on a value\nInternal - does not validate that indexedFields is actually indexed. Trusts you. Don't let it down.\n\n@param indexedField - string of field name\n@param val - Value of field\n\n@return - Key name string, potentially hashed.", "id": "f4156:c2:m9"}
{"signature": "def __getstate__(self):", "body": "myData = self.asDict(True, forStorage=False)<EOL>myData['<STR_LIT>'] = self._origData<EOL>return myData<EOL>", "docstring": "pickle uses this", "id": "f4156:c1:m26"}
{"signature": "def deprecatedMessage(msg, key=None, printStack=False):", "body": "if __deprecatedMessagesEnabled is False:<EOL><INDENT>return<EOL><DEDENT>if not _alreadyWarned:<EOL><INDENT>sys.stderr.write('<STR_LIT>')<EOL><DEDENT>if key is None:<EOL><INDENT>from .compat_str import tobytes<EOL>key = md5(tobytes(msg)).hexdigest()<EOL><DEDENT>if key not in _alreadyWarned:<EOL><INDENT>_alreadyWarned[key] = True<EOL>sys.stderr.write('<STR_LIT>' %(msg, ))<EOL>if printStack:<EOL><INDENT>sys.stderr.write('<STR_LIT>')<EOL>curStack = traceback.extract_stack()<EOL>sys.stderr.write('<STR_LIT:U+0020>' + '<STR_LIT>'.join(traceback.format_list(curStack[:-<NUM_LIT:2>])).replace('<STR_LIT:\\t>', '<STR_LIT:U+0020>') + '<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>", "docstring": "deprecatedMessage - Print a deprecated messsage (unless they are toggled off). Will print a message only once (based on \"key\")\n\n@param msg <str> - Deprecated message to possibly print\n\n@param key <anything> - A key that is specific to this message. \n        If None is provided (default), one will be generated from the md5 of the message.\n        However, better to save cycles and provide a unique key if at all possible.\n        The decorator uses the function itself as the key.\n\n@param printStack <bool> Default False, if True print a stack trace", "id": "f4157:m1"}
{"signature": "def hashDictOneLevel(myDict):", "body": "keys = [str(x) for x in myDict.keys()]<EOL>keys.sort()<EOL>lst = []<EOL>for key in keys:<EOL><INDENT>lst.append(str(myDict[key]) + '<STR_LIT>')<EOL><DEDENT>return '<STR_LIT>'.join(lst).__hash__()<EOL>", "docstring": "A function which can generate a hash of a one-level \n  dict containing strings (like REDIS_CONNECTION_PARAMS)\n\n@param myDict <dict> - Dict with string keys and values\n\n@return <long> - Hash of myDict", "id": "f4158:m0"}
{"signature": "def raw_from_delimited(msgs: DelimitedMsg) -> RawMsgs:", "body": "delim = _rindex(msgs, b'<STR_LIT>')<EOL>return tuple(msgs[:delim]), tuple(msgs[delim + <NUM_LIT:1>:])<EOL>", "docstring": "\\\n    From a message consisting of header frames, delimiter frame, and payload frames, return a tuple `(header, payload)`.\n    The payload frames may be returned as sequences of bytes (raw) or as `Message`s.", "id": "f4166:m3"}
{"signature": "def parse(self, data: RawMessage) -> Message:", "body": "try:<EOL><INDENT>return self.receiver.parse(data)<EOL><DEDENT>except KeyError as err:<EOL><INDENT>raise UnknownCommandError from err<EOL><DEDENT>except DecodeError as err:<EOL><INDENT>raise UnknownCommandError(f\"<STR_LIT>\") from err<EOL><DEDENT>", "docstring": "\\\n        Parses a binary protobuf message into a Message object.", "id": "f4168:c0:m1"}
{"signature": "async def get_ltd_product(session, slug=None, url=None):", "body": "if url is None:<EOL><INDENT>url = '<STR_LIT>'.format(slug)<EOL><DEDENT>async with session.get(url) as response:<EOL><INDENT>data = await response.json()<EOL><DEDENT>return data<EOL>", "docstring": "Get the product resource (JSON document) from the LSST the Docs API.\n\n    Parameters\n    ----------\n    session : `aiohttp.ClientSession`\n        Your application's aiohttp client session.\n        See http://aiohttp.readthedocs.io/en/stable/client.html.\n    slug : `str`, optional\n        Slug identfying the product. This is the same as the subdomain.\n        For example, ``'ldm-151'`` is the slug for ``ldm-151.lsst.io``.\n        A full product URL can be provided instead, see ``url``.\n    url : `str`, optional\n        The full LTD Keeper URL for the product resource. For example,\n        ``'https://keeper.lsst.codes/products/ldm-151'``. The ``slug``\n        can be provided instead.\n\n    Returns\n    -------\n    product : `dict`\n        Product dataset. See\n        https://ltd-keeper.lsst.io/products.html#get--products-(slug)\n        for fields.", "id": "f4201:m1"}
{"signature": "async def get_ltd_product_urls(session):", "body": "product_url = '<STR_LIT>'<EOL>async with session.get(product_url) as response:<EOL><INDENT>data = await response.json()<EOL><DEDENT>return data['<STR_LIT>']<EOL>", "docstring": "Get URLs for LSST the Docs (LTD) products from the LTD Keeper API.\n\n    Parameters\n    ----------\n    session : `aiohttp.ClientSession`\n        Your application's aiohttp client session.\n        See http://aiohttp.readthedocs.io/en/stable/client.html.\n\n    Returns\n    -------\n    product_urls : `list`\n        List of product URLs.", "id": "f4201:m0"}
{"signature": "def ensure_pandoc(func):", "body": "logger = logging.getLogger(__name__)<EOL>@functools.wraps(func)<EOL>def _install_and_run(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>result = func(*args, **kwargs)<EOL><DEDENT>except OSError:<EOL><INDENT>message = \"<STR_LIT>\"<EOL>logger.warning(message)<EOL>pypandoc.download_pandoc(version='<STR_LIT>')<EOL>logger.debug(\"<STR_LIT>\")<EOL>result = func(*args, **kwargs)<EOL><DEDENT>return result<EOL><DEDENT>return _install_and_run<EOL>", "docstring": "Decorate a function that uses pypandoc to ensure that pandoc is\n    installed if necessary.", "id": "f4202:m0"}
{"signature": "def __call__(self, tex_source):", "body": "for linker in self._linkers:<EOL><INDENT>tex_source = linker(tex_source)<EOL><DEDENT>return tex_source<EOL>", "docstring": "r\"\"\"Convert citations in LaTeX source to Hyperref links.\n\n        Parameters\n        ----------\n        tex_source : `str`\n            LaTeX document source.\n\n        Returns\n        -------\n        processed_tex : `str`\n            LaTeX document source with all citation commands converted to\n            ``\\hyperref`` commands.", "id": "f4206:c0:m1"}
{"signature": "def remove_comments(tex_source):", "body": "<EOL>return re.sub(r'<STR_LIT>', r'<STR_LIT>', tex_source, flags=re.M)<EOL>", "docstring": "Delete latex comments from TeX source.\n\n    Parameters\n    ----------\n    tex_source : str\n        TeX source content.\n\n    Returns\n    -------\n    tex_source : str\n        TeX source without comments.", "id": "f4207:m0"}
{"signature": "def remove_trailing_whitespace(tex_source):", "body": "<EOL>return re.sub(r'<STR_LIT>', '<STR_LIT>', tex_source, flags=re.M)<EOL>", "docstring": "Delete trailing whitespace from TeX source.\n\n    Parameters\n    ----------\n    tex_source : str\n        TeX source content.\n\n    Returns\n    -------\n    tex_source : str\n        TeX source without trailing whitespace.", "id": "f4207:m1"}
{"signature": "def process_inputs(tex_source, root_dir=None):", "body": "logger = logging.getLogger(__name__)<EOL>def _sub_line(match):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>fname = match.group('<STR_LIT:filename>')<EOL>if not fname.endswith('<STR_LIT>'):<EOL><INDENT>full_fname = \"<STR_LIT:.>\".join((fname, '<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>full_fname = fname<EOL><DEDENT>full_path = os.path.abspath(os.path.join(root_dir, full_fname))<EOL>try:<EOL><INDENT>included_source = read_tex_file(full_path, root_dir=root_dir)<EOL><DEDENT>except IOError:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(full_path))<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>return included_source<EOL><DEDENT><DEDENT>tex_source = input_include_pattern.sub(_sub_line, tex_source)<EOL>return tex_source<EOL>", "docstring": "r\"\"\"Insert referenced TeX file contents (from  ``\\input`` and ``\\include``\n    commands) into the source.\n\n    Parameters\n    ----------\n    tex_source : `str`\n        TeX source where referenced source files will be found and inserted.\n    root_dir : `str`, optional\n        Name of the directory containing the TeX project's root file. Files\n        referenced by TeX ``\\input`` and ``\\include`` commands are relative to\n        this directory. If not set, the current working directory is assumed.\n\n    Returns\n    -------\n    tex_source : `str`\n        TeX source.\n\n    See also\n    --------\n    `read_tex_file`\n        Recommended API for reading a root TeX source file and inserting\n        referenced files.", "id": "f4207:m3"}
{"signature": "def _parse_doc_ref(self):", "body": "command = LatexCommand(<EOL>'<STR_LIT>',<EOL>{'<STR_LIT:name>': '<STR_LIT>', '<STR_LIT>': True, '<STR_LIT>': '<STR_LIT:{>'})<EOL>try:<EOL><INDENT>parsed = next(command.parse(self._tex))<EOL><DEDENT>except StopIteration:<EOL><INDENT>self._logger.warning('<STR_LIT>')<EOL>self._handle = None<EOL>self._series = None<EOL>self._serial = None<EOL>return<EOL><DEDENT>self._handle = parsed['<STR_LIT>']<EOL>try:<EOL><INDENT>self._series, self._serial = self._handle.split('<STR_LIT:->', <NUM_LIT:1>)<EOL><DEDENT>except ValueError:<EOL><INDENT>self._logger.warning('<STR_LIT>'<EOL>'<STR_LIT>', self._handle)<EOL>self._series = None<EOL>self._serial = None<EOL><DEDENT>", "docstring": "Parse the document handle.\n\n        Sets the ``_series``, ``_serial``, and ``_handle`` attributes.", "id": "f4210:c0:m29"}
{"signature": "def _parse_abstract(self):", "body": "command = LatexCommand(<EOL>'<STR_LIT>',<EOL>{'<STR_LIT:name>': '<STR_LIT>', '<STR_LIT>': True, '<STR_LIT>': '<STR_LIT:{>'})<EOL>try:<EOL><INDENT>parsed = next(command.parse(self._tex))<EOL><DEDENT>except StopIteration:<EOL><INDENT>self._logger.warning('<STR_LIT>')<EOL>self._abstract = None<EOL>return<EOL><DEDENT>try:<EOL><INDENT>content = parsed['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>self._logger.warning('<STR_LIT>')<EOL>self._abstract = None<EOL>return<EOL><DEDENT>content = content.strip()<EOL>self._abstract = content<EOL>", "docstring": "Parse the abstract from the TeX source.\n\n        Sets the ``_abstract`` attribute.", "id": "f4210:c0:m31"}
{"signature": "@property<EOL><INDENT>def title(self):<DEDENT>", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._parse_title()<EOL><DEDENT>return self._title<EOL>", "docstring": "LaTeX-formatted document title (`str`).", "id": "f4210:c0:m5"}
{"signature": "@property<EOL><INDENT>def html_abstract(self):<DEDENT>", "body": "return self.format_abstract(format='<STR_LIT>', deparagraph=False,<EOL>mathjax=False, smart=True)<EOL>", "docstring": "HTML5-formatted document abstract (`str`).", "id": "f4210:c0:m12"}
{"signature": "def _parse_revision_date(self):", "body": "doc_datetime = None<EOL>if not self.is_draft:<EOL><INDENT>date_command = LatexCommand(<EOL>'<STR_LIT:date>',<EOL>{'<STR_LIT:name>': '<STR_LIT:content>', '<STR_LIT>': True, '<STR_LIT>': '<STR_LIT:{>'})<EOL>try:<EOL><INDENT>parsed = next(date_command.parse(self._tex))<EOL>command_content = parsed['<STR_LIT:content>'].strip()<EOL><DEDENT>except StopIteration:<EOL><INDENT>command_content = None<EOL>self._logger.warning('<STR_LIT>')<EOL><DEDENT>if command_content is not None and command_content != r'<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>doc_datetime = datetime.datetime.strptime(command_content,<EOL>'<STR_LIT>')<EOL>project_tz = timezone('<STR_LIT>')<EOL>localized_datetime = project_tz.localize(doc_datetime)<EOL>doc_datetime = localized_datetime.astimezone(pytz.utc)<EOL>self._revision_datetime_source = '<STR_LIT>'<EOL><DEDENT>except ValueError:<EOL><INDENT>self._logger.warning('<STR_LIT>'<EOL>'<STR_LIT>',<EOL>command_content)<EOL><DEDENT><DEDENT><DEDENT>if doc_datetime is None:<EOL><INDENT>content_extensions = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>try:<EOL><INDENT>doc_datetime = get_content_commit_date(<EOL>content_extensions,<EOL>root_dir=self._root_dir)<EOL>self._revision_datetime_source = '<STR_LIT>'<EOL><DEDENT>except RuntimeError:<EOL><INDENT>self._logger.warning('<STR_LIT>'<EOL>'<STR_LIT>',<EOL>self._root_dir)<EOL><DEDENT><DEDENT>if doc_datetime is None:<EOL><INDENT>doc_datetime = pytz.utc.localize(datetime.datetime.now())<EOL>self._revision_datetime_source = '<STR_LIT>'<EOL><DEDENT>self._datetime = doc_datetime<EOL>", "docstring": "r\"\"\"Parse the ``\\date`` command, falling back to getting the\n        most recent Git commit date and the current datetime.\n\n        Result is available from the `revision_datetime` attribute.", "id": "f4210:c0:m34"}
{"signature": "def format_short_title(self, format='<STR_LIT>', deparagraph=True,<EOL>mathjax=False, smart=True, extra_args=None):", "body": "if self.short_title is None:<EOL><INDENT>return None<EOL><DEDENT>output_text = convert_lsstdoc_tex(<EOL>self.short_title, '<STR_LIT>',<EOL>deparagraph=deparagraph,<EOL>mathjax=mathjax,<EOL>smart=smart,<EOL>extra_args=extra_args)<EOL>return output_text<EOL>", "docstring": "Get the document short title in the specified markup format.\n\n        Parameters\n        ----------\n        format : `str`, optional\n            Output format (such as ``'html5'`` or ``'plain'``).\n        deparagraph : `bool`, optional\n            Remove the paragraph tags from single paragraph content.\n        mathjax : `bool`, optional\n            Allow pandoc to use MathJax math markup.\n        smart : `True`, optional\n            Allow pandoc to create \"smart\" unicode punctuation.\n        extra_args : `list`, optional\n            Additional command line flags to pass to Pandoc. See\n            `lsstprojectmeta.pandoc.convert.convert_text`.\n\n        Returns\n        -------\n        output_text : `str`\n            Converted content or `None` if the short title is not available in\n            the document.", "id": "f4210:c0:m24"}
{"signature": "@property<EOL><INDENT>def plain_title(self):<DEDENT>", "body": "return self.format_title(format='<STR_LIT>', deparagraph=True,<EOL>mathjax=False, smart=True)<EOL>", "docstring": "Plain-text-formatted document title (`str`).", "id": "f4210:c0:m4"}
{"signature": "def _parse_title(self):", "body": "command = LatexCommand(<EOL>'<STR_LIT:title>',<EOL>{'<STR_LIT:name>': '<STR_LIT>', '<STR_LIT>': False, '<STR_LIT>': '<STR_LIT:[>'},<EOL>{'<STR_LIT:name>': '<STR_LIT>', '<STR_LIT>': True, '<STR_LIT>': '<STR_LIT:{>'})<EOL>try:<EOL><INDENT>parsed = next(command.parse(self._tex))<EOL><DEDENT>except StopIteration:<EOL><INDENT>self._logger.warning('<STR_LIT>')<EOL>self._title = None<EOL>self._short_title = None<EOL><DEDENT>self._title = parsed['<STR_LIT>']<EOL>try:<EOL><INDENT>self._short_title = parsed['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>self._logger.warning('<STR_LIT>')<EOL>self._short_title = None<EOL><DEDENT>", "docstring": "Parse the title from TeX source.\n\n        Sets these attributes:\n\n        - ``_title``\n        - ``_short_title``", "id": "f4210:c0:m28"}
{"signature": "def format_authors(self, format='<STR_LIT>', deparagraph=True, mathjax=False,<EOL>smart=True, extra_args=None):", "body": "formatted_authors = []<EOL>for latex_author in self.authors:<EOL><INDENT>formatted_author = convert_lsstdoc_tex(<EOL>latex_author, format,<EOL>deparagraph=deparagraph,<EOL>mathjax=mathjax,<EOL>smart=smart,<EOL>extra_args=extra_args)<EOL>formatted_author = formatted_author.strip()<EOL>formatted_authors.append(formatted_author)<EOL><DEDENT>return formatted_authors<EOL>", "docstring": "Get the document authors in the specified markup format.\n\n        Parameters\n        ----------\n        format : `str`, optional\n            Output format (such as ``'html5'`` or ``'plain'``).\n        deparagraph : `bool`, optional\n            Remove the paragraph tags from single paragraph content.\n        mathjax : `bool`, optional\n            Allow pandoc to use MathJax math markup.\n        smart : `True`, optional\n            Allow pandoc to create \"smart\" unicode punctuation.\n        extra_args : `list`, optional\n            Additional command line flags to pass to Pandoc. See\n            `lsstprojectmeta.pandoc.convert.convert_text`.\n\n        Returns\n        -------\n        output_text : `list` of `str`\n            Sequence of author names in the specified output markup format.", "id": "f4210:c0:m26"}
{"signature": "@property<EOL><INDENT>def is_draft(self):<DEDENT>", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._parse_documentclass()<EOL><DEDENT>if '<STR_LIT>' in self._document_options:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Document is a draft if ``'lsstdoc'`` is included in the\n        documentclass options (`bool`).", "id": "f4210:c0:m18"}
{"signature": "@property<EOL><INDENT>def plain_content(self):<DEDENT>", "body": "return self.format_content(format='<STR_LIT>', mathjax=False, smart=True)<EOL>", "docstring": "Plain-text-formatted document content (`str`).", "id": "f4210:c0:m2"}
{"signature": "def format_abstract(self, format='<STR_LIT>', deparagraph=False, mathjax=False,<EOL>smart=True, extra_args=None):", "body": "if self.abstract is None:<EOL><INDENT>return None<EOL><DEDENT>abstract_latex = self._prep_snippet_for_pandoc(self.abstract)<EOL>output_text = convert_lsstdoc_tex(<EOL>abstract_latex, format,<EOL>deparagraph=deparagraph,<EOL>mathjax=mathjax,<EOL>smart=smart,<EOL>extra_args=extra_args)<EOL>return output_text<EOL>", "docstring": "Get the document abstract in the specified markup format.\n\n        Parameters\n        ----------\n        format : `str`, optional\n            Output format (such as ``'html5'`` or ``'plain'``).\n        deparagraph : `bool`, optional\n            Remove the paragraph tags from single paragraph content.\n        mathjax : `bool`, optional\n            Allow pandoc to use MathJax math markup.\n        smart : `True`, optional\n            Allow pandoc to create \"smart\" unicode punctuation.\n        extra_args : `list`, optional\n            Additional command line flags to pass to Pandoc. See\n            `lsstprojectmeta.pandoc.convert.convert_text`.\n\n        Returns\n        -------\n        output_text : `str`\n            Converted content or `None` if the title is not available in\n            the document.", "id": "f4210:c0:m25"}
{"signature": "@property<EOL><INDENT>def plain_short_title(self):<DEDENT>", "body": "return self.format_short_title(format='<STR_LIT>', deparagraph=True,<EOL>mathjax=False, smart=True)<EOL>", "docstring": "Plaintext-formatted document short title (`str`).", "id": "f4210:c0:m7"}
{"signature": "@property<EOL><INDENT>def authors(self):<DEDENT>", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._parse_author()<EOL><DEDENT>return self._authors<EOL>", "docstring": "LaTeX-formatted authors (`list` of `str`).", "id": "f4210:c0:m11"}
{"signature": "def _load_bib_db(self):", "body": "<EOL>command = LatexCommand(<EOL>'<STR_LIT>',<EOL>{'<STR_LIT:name>': '<STR_LIT>', '<STR_LIT>': True, '<STR_LIT>': '<STR_LIT:{>'})<EOL>try:<EOL><INDENT>parsed = next(command.parse(self._tex))<EOL>bib_names = [n.strip() for n in parsed['<STR_LIT>'].split('<STR_LIT:U+002C>')]<EOL><DEDENT>except StopIteration:<EOL><INDENT>self._logger.warning('<STR_LIT>')<EOL>bib_names = []<EOL><DEDENT>custom_bib_names = [n for n in bib_names<EOL>if n not in KNOWN_LSSTTEXMF_BIB_NAMES]<EOL>custom_bibs = []<EOL>for custom_bib_name in custom_bib_names:<EOL><INDENT>custom_bib_path = os.path.join(<EOL>os.path.join(self._root_dir),<EOL>custom_bib_name + '<STR_LIT>'<EOL>)<EOL>if not os.path.exists(custom_bib_path):<EOL><INDENT>self._logger.warning('<STR_LIT>',<EOL>custom_bib_path)<EOL>continue<EOL><DEDENT>with open(custom_bib_path, '<STR_LIT:r>') as file_handle:<EOL><INDENT>custom_bibs.append(file_handle.read())<EOL><DEDENT><DEDENT>if len(custom_bibs) > <NUM_LIT:0>:<EOL><INDENT>custom_bibtex = '<STR_LIT>'.join(custom_bibs)<EOL><DEDENT>else:<EOL><INDENT>custom_bibtex = None<EOL><DEDENT>db = get_bibliography(bibtex=custom_bibtex)<EOL>self._bib_db = db<EOL>", "docstring": "r\"\"\"Load the BibTeX bibliography referenced by the document.\n\n        This method triggered by the `bib_db` attribute and populates the\n        `_bib_db` private attribute.\n\n        The ``\\bibliography`` command is parsed to identify the bibliographies\n        referenced by the document.", "id": "f4210:c0:m33"}
{"signature": "@property<EOL><INDENT>def abstract(self):<DEDENT>", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._parse_abstract()<EOL><DEDENT>return self._abstract<EOL>", "docstring": "LaTeX-formatted abstract (`str`).", "id": "f4210:c0:m14"}
{"signature": "@staticmethod<EOL><INDENT>def _parse_whitespace_argument(source, name):<DEDENT>", "body": "<EOL>command_pattern = r'<STR_LIT>' + name + r'<STR_LIT>'<EOL>command_match = re.search(command_pattern, source)<EOL>if command_match is not None:<EOL><INDENT>source = source[command_match.end(<NUM_LIT:1>):]<EOL><DEDENT>pattern = r'<STR_LIT>'<EOL>match = re.search(pattern, source)<EOL>if match is None:<EOL><INDENT>message = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>raise CommandParserError(message.format(name))<EOL><DEDENT>content = match.group('<STR_LIT:content>')<EOL>content.strip()<EOL>return content<EOL>", "docstring": "r\"\"\"Attempt to parse a single token on the first line of this source.\n\n        This method is used for parsing whitespace-delimited arguments, like\n        ``\\input file``. The source should ideally contain `` file`` along\n        with a newline character.\n\n        >>> source = 'Line 1\\n' r'\\input test.tex' '\\nLine 2'\n        >>> LatexCommand._parse_whitespace_argument(source, 'input')\n        'test.tex'\n\n        Bracket delimited arguments (``\\input{test.tex}``) are handled in\n        the normal logic of `_parse_command`.", "id": "f4211:c0:m4"}
{"signature": "def parse(self, source):", "body": "command_regex = self._make_command_regex(self.name)<EOL>for match in re.finditer(command_regex, source):<EOL><INDENT>self._logger.debug(match)<EOL>start_index = match.start(<NUM_LIT:0>)<EOL>yield self._parse_command(source, start_index)<EOL><DEDENT>", "docstring": "Parse command content from the LaTeX source.\n\n        Parameters\n        ----------\n        source : `str`\n            The full source of the tex document.\n\n        Yields\n        ------\n        parsed_command : `ParsedCommand`\n            Yields parsed commands instances for each occurence of the command\n            in the source.", "id": "f4211:c0:m1"}
{"signature": "def get_authoryear_from_entry(entry, paren=False):", "body": "def _format_last(person):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return '<STR_LIT:U+0020>'.join([n.strip('<STR_LIT:{}>') for n in person.last_names])<EOL><DEDENT>if len(entry.persons['<STR_LIT>']) > <NUM_LIT:0>:<EOL><INDENT>persons = entry.persons['<STR_LIT>']<EOL><DEDENT>elif len(entry.persons['<STR_LIT>']) > <NUM_LIT:0>:<EOL><INDENT>persons = entry.persons['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>raise AuthorYearError<EOL><DEDENT>try:<EOL><INDENT>year = entry.fields['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>raise AuthorYearError<EOL><DEDENT>if paren and len(persons) == <NUM_LIT:1>:<EOL><INDENT>template = '<STR_LIT>'<EOL>return template.format(author=_format_last(persons[<NUM_LIT:0>]),<EOL>year=year)<EOL><DEDENT>elif not paren and len(persons) == <NUM_LIT:1>:<EOL><INDENT>template = '<STR_LIT>'<EOL>return template.format(author=_format_last(persons[<NUM_LIT:0>]),<EOL>year=year)<EOL><DEDENT>elif paren and len(persons) == <NUM_LIT:2>:<EOL><INDENT>template = '<STR_LIT>'<EOL>return template.format(author1=_format_last(persons[<NUM_LIT:0>]),<EOL>author2=_format_last(persons[<NUM_LIT:1>]),<EOL>year=year)<EOL><DEDENT>elif not paren and len(persons) == <NUM_LIT:2>:<EOL><INDENT>template = '<STR_LIT>'<EOL>return template.format(author1=_format_last(persons[<NUM_LIT:0>]),<EOL>author2=_format_last(persons[<NUM_LIT:1>]),<EOL>year=year)<EOL><DEDENT>elif not paren and len(persons) > <NUM_LIT:2>:<EOL><INDENT>template = '<STR_LIT>'<EOL>return template.format(author=_format_last(persons[<NUM_LIT:0>]),<EOL>year=year)<EOL><DEDENT>elif paren and len(persons) > <NUM_LIT:2>:<EOL><INDENT>template = '<STR_LIT>'<EOL>return template.format(author=_format_last(persons[<NUM_LIT:0>]),<EOL>year=year)<EOL><DEDENT>", "docstring": "Get and format author-year text from a pybtex entry to emulate\n    natbib citations.\n\n    Parameters\n    ----------\n    entry : `pybtex.database.Entry`\n        A pybtex bibliography entry.\n    parens : `bool`, optional\n        Whether to add parentheses around the year. Default is `False`.\n\n    Returns\n    -------\n    authoryear : `str`\n        The author-year citation text.", "id": "f4212:m5"}
{"signature": "def get_lsst_bibtex(bibtex_filenames=None):", "body": "logger = logging.getLogger(__name__)<EOL>if bibtex_filenames is None:<EOL><INDENT>bibtex_names = KNOWN_LSSTTEXMF_BIB_NAMES<EOL><DEDENT>else:<EOL><INDENT>bibtex_names = []<EOL>for filename in bibtex_filenames:<EOL><INDENT>name = os.path.basename(os.path.splitext(filename)[<NUM_LIT:0>])<EOL>if name not in KNOWN_LSSTTEXMF_BIB_NAMES:<EOL><INDENT>logger.warning('<STR_LIT>',<EOL>name)<EOL>continue<EOL><DEDENT>bibtex_names.append(name)<EOL><DEDENT><DEDENT>uncached_names = [name for name in bibtex_names<EOL>if name not in _LSSTTEXMF_BIB_CACHE]<EOL>if len(uncached_names) > <NUM_LIT:0>:<EOL><INDENT>loop = asyncio.get_event_loop()<EOL>future = asyncio.ensure_future(_download_lsst_bibtex(uncached_names))<EOL>loop.run_until_complete(future)<EOL>for name, text in zip(bibtex_names, future.result()):<EOL><INDENT>_LSSTTEXMF_BIB_CACHE[name] = text<EOL><DEDENT><DEDENT>return {name: _LSSTTEXMF_BIB_CACHE[name] for name in bibtex_names}<EOL>", "docstring": "Get content of lsst-texmf bibliographies.\n\n    BibTeX content is downloaded from GitHub (``master`` branch of\n    https://github.com/lsst/lsst-texmf or retrieved from an in-memory cache.\n\n    Parameters\n    ----------\n    bibtex_filenames : sequence of `str`, optional\n        List of lsst-texmf BibTeX files to retrieve. These can be the filenames\n        of lsst-bibtex files (for example, ``['lsst.bib', 'lsst-dm.bib']``)\n        or names without an extension (``['lsst', 'lsst-dm']``). The default\n        (recommended) is to get *all* lsst-texmf bibliographies:\n\n        .. code-block:: python\n\n           ['lsst', 'lsst-dm', 'refs', 'books', 'refs_ads']\n\n    Returns\n    -------\n    bibtex : `dict`\n        Dictionary with keys that are bibtex file names (such as ``'lsst'``,\n        ``'lsst-dm'``). Values are the corresponding bibtex file content\n        (`str`).", "id": "f4212:m2"}
{"signature": "def get_installation_token(installation_id, integration_jwt):", "body": "api_root = '<STR_LIT>'<EOL>url = '<STR_LIT>'.format(<EOL>api_root=api_root,<EOL>id_=installation_id)<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>'.format(integration_jwt.decode('<STR_LIT:utf-8>')),<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>resp = requests.post(url, headers=headers)<EOL>resp.raise_for_status()<EOL>return resp.json()<EOL>", "docstring": "Create a GitHub token for an integration installation.\n\n    Parameters\n    ----------\n    installation_id : `int`\n        Installation ID. This is available in the URL of the integration's\n        **installation** ID.\n    integration_jwt : `bytes`\n        The integration's JSON Web Token (JWT). You can create this with\n        `create_jwt`.\n\n    Returns\n    -------\n    token_obj : `dict`\n        GitHub token object. Includes the fields:\n\n        - ``token``: the token string itself.\n        - ``expires_at``: date time string when the token expires.\n\n    Example\n    -------\n    The typical workflow for authenticating to an integration installation is:\n\n    .. code-block:: python\n\n       from dochubadapter.github import auth\n       jwt = auth.create_jwt(integration_id, private_key_path)\n       token_obj = auth.get_installation_token(installation_id, jwt)\n       print(token_obj['token'])\n\n    Notes\n    -----\n    See\n    https://developer.github.com/early-access/integrations/authentication/#as-an-installation\n    for more information", "id": "f4213:m0"}
{"signature": "def create_jwt(integration_id, private_key_path):", "body": "integration_id = int(integration_id)<EOL>with open(private_key_path, '<STR_LIT:rb>') as f:<EOL><INDENT>cert_bytes = f.read()<EOL><DEDENT>now = datetime.datetime.now()<EOL>expiration_time = now + datetime.timedelta(minutes=<NUM_LIT:9>)<EOL>payload = {<EOL>'<STR_LIT>': int(now.timestamp()),<EOL>'<STR_LIT>': int(expiration_time.timestamp()),<EOL>'<STR_LIT>': integration_id<EOL>}<EOL>return jwt.encode(payload, cert_bytes, algorithm='<STR_LIT>')<EOL>", "docstring": "Create a JSON Web Token to authenticate a GitHub Integration or\n    installation.\n\n    Parameters\n    ----------\n    integration_id : `int`\n        Integration ID. This is available from the GitHub integration's\n        homepage.\n    private_key_path : `str`\n        Path to the integration's private key (a ``.pem`` file).\n\n    Returns\n    -------\n    jwt : `bytes`\n        JSON Web Token that is good for 9 minutes.\n\n    Notes\n    -----\n    The JWT is encoded with the RS256 algorithm. It includes a payload with\n    fields:\n\n    - ``'iat'``: The current time, as an `int` timestamp.\n    - ``'exp'``: Expiration time, as an `int timestamp. The expiration\n      time is set of 9 minutes in the future (maximum allowance is 10 minutes).\n    - ``'iss'``: The integration ID (`int`).\n\n    For more information, see\n    https://developer.github.com/early-access/integrations/authentication/.", "id": "f4213:m1"}
{"signature": "def make_raw_content_url(repo_slug, git_ref, file_path):", "body": "if isinstance(repo_slug, RepoSlug):<EOL><INDENT>slug_str = repo_slug.full<EOL><DEDENT>else:<EOL><INDENT>slug_str = repo_slug<EOL><DEDENT>if file_path.startswith('<STR_LIT:/>'):<EOL><INDENT>file_path = file_path.lstrip('<STR_LIT:/>')<EOL><DEDENT>template = '<STR_LIT>'<EOL>return template.format(<EOL>slug=slug_str,<EOL>git_ref=git_ref,<EOL>path=file_path)<EOL>", "docstring": "Make a raw content (raw.githubusercontent.com) URL to a file.\n\n    Parameters\n    ----------\n    repo_slug : `str` or `RepoSlug`\n        The repository slug, formatted as either a `str` (``'owner/name'``)\n        or a `RepoSlug` object (created by `parse_repo_slug_from_url`).\n    git_ref : `str`\n        The git ref: a branch name, commit hash, or tag name.\n    file_path : `str`\n        The POSIX path of the file in the repository tree.", "id": "f4215:m1"}
{"signature": "def normalize_repo_root_url(url):", "body": "<EOL>if url.endswith('<STR_LIT>'):<EOL><INDENT>url = url[:-<NUM_LIT:4>]<EOL><DEDENT>return url<EOL>", "docstring": "Normalize a GitHub URL into the root repository URL.\n\n    Parameters\n    ----------\n    url : `str`\n        A GitHub URL\n\n    Returns\n    -------\n    url : `str`\n        Normalized URL of a GitHub repository.\n\n    Examples\n    --------\n    >>> normalize_repo_root_url('https://github.com/lsst/LDM-151.git')\n    'https://github.com/lsst/LDM-151'", "id": "f4215:m2"}
{"signature": "async def process_ltd_doc_products(session, product_urls, github_api_token,<EOL>mongo_collection=None):", "body": "tasks = [asyncio.ensure_future(<EOL>process_ltd_doc(session, github_api_token,<EOL>product_url,<EOL>mongo_collection=mongo_collection))<EOL>for product_url in product_urls]<EOL>await asyncio.gather(*tasks)<EOL>", "docstring": "Run a pipeline to process extract, transform, and load metadata for\n    multiple LSST the Docs-hosted projects\n\n    Parameters\n    ----------\n    session : `aiohttp.ClientSession`\n        Your application's aiohttp client session.\n        See http://aiohttp.readthedocs.io/en/stable/client.html.\n    product_urls : `list` of `str`\n        List of LSST the Docs product URLs.\n    github_api_token : `str`\n        A GitHub personal API token. See the `GitHub personal access token\n        guide`_.\n    mongo_collection : `motor.motor_asyncio.AsyncIOMotorCollection`, optional\n        MongoDB collection. This should be the common MongoDB collection for\n        LSST projectmeta JSON-LD records.", "id": "f4218:m3"}
{"signature": "def main():", "body": "parser = argparse.ArgumentParser(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>dest='<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default='<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default='<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>args = parser.parse_args()<EOL>stream_handler = logging.StreamHandler()<EOL>stream_formatter = logging.Formatter(<EOL>'<STR_LIT>')<EOL>stream_handler.setFormatter(stream_formatter)<EOL>root_logger = logging.getLogger()<EOL>root_logger.addHandler(stream_handler)<EOL>root_logger.setLevel(logging.WARNING)<EOL>app_logger = logging.getLogger('<STR_LIT>')<EOL>app_logger.setLevel(logging.DEBUG)<EOL>if args.mongodb_uri is not None:<EOL><INDENT>mongo_client = AsyncIOMotorClient(args.mongodb_uri, ssl=True)<EOL>collection = mongo_client[args.mongodb_db][args.mongodb_collection]<EOL><DEDENT>else:<EOL><INDENT>collection = None<EOL><DEDENT>loop = asyncio.get_event_loop()<EOL>if args.ltd_product_url is not None:<EOL><INDENT>loop.run_until_complete(run_single_ltd_doc(args.ltd_product_url,<EOL>args.github_token,<EOL>collection))<EOL><DEDENT>else:<EOL><INDENT>loop.run_until_complete(run_bulk_etl(args.github_token,<EOL>collection))<EOL><DEDENT>", "docstring": "Command line entrypoint to reduce technote metadata.", "id": "f4218:m0"}
{"signature": "def _encode_datetime(self, dt):", "body": "if dt.tzinfo is None:<EOL><INDENT>dt = dt.replace(tzinfo=datetime.timezone.utc)<EOL><DEDENT>dt = dt.astimezone(datetime.timezone.utc)<EOL>return dt.strftime('<STR_LIT>')<EOL>", "docstring": "Encode a datetime in the format '%Y-%m-%dT%H:%M:%SZ'.\n\n        The datetime can be naieve (doesn't have timezone info) or aware\n        (it does have a tzinfo attribute set). Regardless, the datetime\n        is transformed into UTC.", "id": "f4219:c0:m1"}
{"signature": "def decode_jsonld(jsonld_text):", "body": "decoder = json.JSONDecoder(object_pairs_hook=_decode_object_pairs)<EOL>return decoder.decode(jsonld_text)<EOL>", "docstring": "Decode a JSON-LD dataset, including decoding datetime\n    strings into `datetime.datetime` objects.\n\n    Parameters\n    ----------\n    encoded_dataset : `str`\n        The JSON-LD dataset encoded as a string.\n\n    Returns\n    -------\n    jsonld_dataset : `dict`\n        A JSON-LD dataset.\n\n    Examples\n    --------\n\n    >>> doc = '{\"dt\": \"2018-01-01T12:00:00Z\"}'\n    >>> decode_jsonld(doc)\n    {'dt': datetime.datetime(2018, 1, 1, 12, 0, tzinfo=datetime.timezone.utc)}", "id": "f4219:m1"}
{"signature": "def default(self, obj):", "body": "if isinstance(obj, datetime.datetime):<EOL><INDENT>return self._encode_datetime(obj)<EOL><DEDENT>return json.JSONEncoder.default(self, obj)<EOL>", "docstring": "Encode values as JSON strings.\n\n        This method overrides the default implementation from\n        `json.JSONEncoder`.", "id": "f4219:c0:m0"}
{"signature": "def reduce_technote_metadata(github_url, metadata, github_data,<EOL>ltd_product_data):", "body": "repo_slug = parse_repo_slug_from_url(github_url)<EOL>jsonld = {<EOL>'<STR_LIT>': [<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\"],<EOL>'<STR_LIT>': ['<STR_LIT>', '<STR_LIT>'],<EOL>'<STR_LIT>': github_url<EOL>}<EOL>if '<STR_LIT:url>' in metadata:<EOL><INDENT>url = metadata['<STR_LIT:url>']<EOL><DEDENT>elif '<STR_LIT>' in ltd_product_data:<EOL><INDENT>url = ltd_product_data['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT:{}>'.format(github_url))<EOL><DEDENT>jsonld['<STR_LIT>'] = url<EOL>jsonld['<STR_LIT:url>'] = url<EOL>if '<STR_LIT>' in metadata and '<STR_LIT>' in metadata:<EOL><INDENT>jsonld['<STR_LIT>'] = '<STR_LIT>'.format(**metadata)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>'.format(github_url))<EOL><DEDENT>if '<STR_LIT>' in metadata:<EOL><INDENT>jsonld['<STR_LIT:name>'] = metadata['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT:description>' in metadata:<EOL><INDENT>jsonld['<STR_LIT:description>'] = metadata['<STR_LIT:description>']<EOL><DEDENT>if '<STR_LIT>' in metadata:<EOL><INDENT>jsonld['<STR_LIT>'] = [{'<STR_LIT>': '<STR_LIT>', '<STR_LIT:name>': author_name}<EOL>for author_name in metadata['<STR_LIT>']]<EOL><DEDENT>if '<STR_LIT>' in metadata:<EOL><INDENT>jsonld['<STR_LIT>'] = datetime.datetime.strptime(<EOL>metadata['<STR_LIT>'],<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>_repo_data = github_data['<STR_LIT:data>']['<STR_LIT>']<EOL>_master_data = _repo_data['<STR_LIT>']<EOL>jsonld['<STR_LIT>'] = datetime.datetime.strptime(<EOL>_master_data['<STR_LIT:target>']['<STR_LIT>'],<EOL>'<STR_LIT>')<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>try:<EOL><INDENT>_license_data = github_data['<STR_LIT:data>']['<STR_LIT>']['<STR_LIT>']<EOL>_spdxId = _license_data['<STR_LIT>']<EOL>if _spdxId is not None:<EOL><INDENT>_spdx_url = '<STR_LIT>'.format(_spdxId)<EOL>jsonld['<STR_LIT>'] = _spdx_url<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>_master_data = github_data['<STR_LIT:data>']['<STR_LIT>']['<STR_LIT>']<EOL>_files = _master_data['<STR_LIT:target>']['<STR_LIT>']['<STR_LIT>']<EOL>for _node in _files:<EOL><INDENT>filename = _node['<STR_LIT:name>']<EOL>normalized_filename = filename.lower()<EOL>if normalized_filename.startswith('<STR_LIT>'):<EOL><INDENT>readme_url = make_raw_content_url(repo_slug, '<STR_LIT>',<EOL>filename)<EOL>jsonld['<STR_LIT>'] = readme_url<EOL>break<EOL><DEDENT><DEDENT><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>travis_url = '<STR_LIT>'.format(repo_slug.full)<EOL>jsonld['<STR_LIT>'] = travis_url<EOL>return jsonld<EOL>", "docstring": "Reduce a technote project's metadata from multiple sources into a\n    single JSON-LD resource.\n\n    Parameters\n    ----------\n    github_url : `str`\n        URL of the technote's GitHub repository.\n    metadata : `dict`\n        The parsed contents of ``metadata.yaml`` found in a technote's\n        repository.\n    github_data : `dict`\n        The contents of the ``technote_repo`` GitHub GraphQL API query.\n    ltd_product_data : `dict`\n        JSON dataset for the technote corresponding to the\n        ``/products/<product>`` of LTD Keeper.\n\n    Returns\n    -------\n    metadata : `dict`\n        JSON-LD-formatted dictionary.\n\n    .. `GitHub personal access token guide`: https://ls.st/41d", "id": "f4221:m1"}
{"signature": "async def _upload_to_mongodb(collection, jsonld):", "body": "document = {<EOL>'<STR_LIT:data>': jsonld<EOL>}<EOL>query = {<EOL>'<STR_LIT>': jsonld['<STR_LIT>']<EOL>}<EOL>await collection.update(query, document, upsert=True, multi=False)<EOL>", "docstring": "Upsert the technote resource into the projectmeta MongoDB collection.\n\n    Parameters\n    ----------\n    collection : `motor.motor_asyncio.AsyncIOMotorCollection`\n        The MongoDB collection.\n    jsonld : `dict`\n        The JSON-LD document that represents the document resource.", "id": "f4222:m1"}
{"signature": "def read_git_commit_timestamp(repo_path=None, repo=None):", "body": "if repo is None:<EOL><INDENT>repo = git.repo.base.Repo(path=repo_path,<EOL>search_parent_directories=True)<EOL><DEDENT>head_commit = repo.head.commit<EOL>return head_commit.committed_datetime<EOL>", "docstring": "Obtain the timestamp from the current head commit of a Git repository.\n\n    Parameters\n    ----------\n    repo_path : `str`, optional\n        Path to the Git repository. Leave as `None` to use the current working\n        directory.\n\n    Returns\n    -------\n    commit_timestamp : `datetime.datetime`\n        The datetime of the head commit.", "id": "f4223:m0"}
{"signature": "def read_git_commit_timestamp_for_file(filepath, repo_path=None, repo=None):", "body": "logger = logging.getLogger(__name__)<EOL>if repo is None:<EOL><INDENT>repo = git.repo.base.Repo(path=repo_path,<EOL>search_parent_directories=True)<EOL><DEDENT>repo_path = repo.working_tree_dir<EOL>head_commit = repo.head.commit<EOL>logger.debug('<STR_LIT>', repo_path)<EOL>filepath = os.path.relpath(<EOL>os.path.abspath(filepath),<EOL>start=repo_path)<EOL>logger.debug('<STR_LIT>', filepath)<EOL>for commit in head_commit.iter_items(repo,<EOL>head_commit,<EOL>[filepath],<EOL>skip=<NUM_LIT:0>):<EOL><INDENT>return commit.committed_datetime<EOL><DEDENT>raise IOError('<STR_LIT>'.format(filepath))<EOL>", "docstring": "Obtain the timestamp for the most recent commit to a given file in a\n    Git repository.\n\n    Parameters\n    ----------\n    filepath : `str`\n        Absolute or repository-relative path for a file.\n    repo_path : `str`, optional\n        Path to the Git repository. Leave as `None` to use the current working\n        directory or if a ``repo`` argument is provided.\n    repo : `git.Repo`, optional\n        A `git.Repo` instance.\n\n    Returns\n    -------\n    commit_timestamp : `datetime.datetime`\n        The datetime of the most recent commit to the given file.\n\n    Raises\n    ------\n    IOError\n        Raised if the ``filepath`` does not exist in the Git repository.", "id": "f4223:m1"}
{"signature": "def __init__(self, path_to_tagger):", "body": "self._path_to_tagger = path_to_tagger<EOL>self._dir_to_tagger = os.path.dirname(path_to_tagger)<EOL>self._tagger = subprocess.Popen('<STR_LIT>'+os.path.basename(path_to_tagger),<EOL>cwd=self._dir_to_tagger,<EOL>stdin=subprocess.PIPE, stdout=subprocess.PIPE)<EOL>", "docstring": "Arguments:\n- `path_to_tagger`:", "id": "f4225:c0:m0"}
{"signature": "def count(s, limit=<NUM_LIT:20>):", "body": "return _gen(parse(s), limit, count=True)<EOL>", "docstring": "Counts all matching strings to a given regular expression\n\n    :param s: Regular expression\n    :type s: str\n    :param limit: Range limit\n    :type limit: int\n    :rtype: int\n    :returns: number of matching strings", "id": "f4227:m14"}
{"signature": "def _gen(d, limit=<NUM_LIT:20>, count=False, grouprefs=None):", "body": "if grouprefs is None:<EOL><INDENT>grouprefs = {}<EOL><DEDENT>ret = ['<STR_LIT>']<EOL>strings = <NUM_LIT:0><EOL>literal = False<EOL>for i in d:<EOL><INDENT>if i[<NUM_LIT:0>] == sre_parse.IN:<EOL><INDENT>subs = _in(i[<NUM_LIT:1>])<EOL>if count:<EOL><INDENT>strings = (strings or <NUM_LIT:1>) * len(subs)<EOL><DEDENT>ret = comb(ret, subs)<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.LITERAL:<EOL><INDENT>literal = True<EOL>ret = mappend(ret, unichr(i[<NUM_LIT:1>]))<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.CATEGORY:<EOL><INDENT>subs = CATEGORIES.get(i[<NUM_LIT:1>], ['<STR_LIT>'])<EOL>if count:<EOL><INDENT>strings = (strings or <NUM_LIT:1>) * len(subs)<EOL><DEDENT>ret = comb(ret, subs)<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.ANY:<EOL><INDENT>subs = CATEGORIES['<STR_LIT>']<EOL>if count:<EOL><INDENT>strings = (strings or <NUM_LIT:1>) * len(subs)<EOL><DEDENT>ret = comb(ret, subs)<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.MAX_REPEAT or i[<NUM_LIT:0>] == sre_parse.MIN_REPEAT:<EOL><INDENT>items = list(i[<NUM_LIT:1>][<NUM_LIT:2>])<EOL>if i[<NUM_LIT:1>][<NUM_LIT:1>] + <NUM_LIT:1> - i[<NUM_LIT:1>][<NUM_LIT:0>] >= limit:<EOL><INDENT>r1 = i[<NUM_LIT:1>][<NUM_LIT:0>]<EOL>r2 = i[<NUM_LIT:1>][<NUM_LIT:0>] + limit<EOL><DEDENT>else:<EOL><INDENT>r1 = i[<NUM_LIT:1>][<NUM_LIT:0>]<EOL>r2 = i[<NUM_LIT:1>][<NUM_LIT:1>] + <NUM_LIT:1><EOL><DEDENT>ran = range(r1, r2)<EOL>if count:<EOL><INDENT>branch_count = <NUM_LIT:0><EOL>for p in ran:<EOL><INDENT>branch_count += pow(_gen(items, limit, True, grouprefs), p)<EOL><DEDENT>strings = (strings or <NUM_LIT:1>) * branch_count<EOL><DEDENT>ret = prods(ret, ran, items, limit, grouprefs)<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.BRANCH:<EOL><INDENT>if count:<EOL><INDENT>for x in i[<NUM_LIT:1>][<NUM_LIT:1>]:<EOL><INDENT>strings += _gen(x, limit, True, grouprefs) or <NUM_LIT:1><EOL><DEDENT><DEDENT>ret = concit(ret, i[<NUM_LIT:1>][<NUM_LIT:1>], limit, grouprefs)<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.SUBPATTERN or i[<NUM_LIT:0>] == sre_parse.ASSERT:<EOL><INDENT>subexpr = i[<NUM_LIT:1>][<NUM_LIT:1>]<EOL>if IS_PY36_OR_GREATER and i[<NUM_LIT:0>] == sre_parse.SUBPATTERN:<EOL><INDENT>subexpr = i[<NUM_LIT:1>][<NUM_LIT:3>]<EOL><DEDENT>if count:<EOL><INDENT>strings = (<EOL>strings or <NUM_LIT:1>) * (sum(ggen([<NUM_LIT:0>], _gen, subexpr, limit=limit, count=True, grouprefs=grouprefs)) or <NUM_LIT:1>)<EOL><DEDENT>ret = ggen(ret, _gen, subexpr, limit=limit, count=False, grouprefs=grouprefs, groupref=i[<NUM_LIT:1>][<NUM_LIT:0>])<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.AT:<EOL><INDENT>continue<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.NOT_LITERAL:<EOL><INDENT>subs = list(CATEGORIES['<STR_LIT>'])<EOL>if unichr(i[<NUM_LIT:1>]) in subs:<EOL><INDENT>subs.remove(unichr(i[<NUM_LIT:1>]))<EOL><DEDENT>if count:<EOL><INDENT>strings = (strings or <NUM_LIT:1>) * len(subs)<EOL><DEDENT>ret = comb(ret, subs)<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.GROUPREF:<EOL><INDENT>ret = dappend(ret, grouprefs, i[<NUM_LIT:1>])<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.ASSERT_NOT:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>' + repr(i))<EOL><DEDENT><DEDENT>if count:<EOL><INDENT>if strings == <NUM_LIT:0> and literal:<EOL><INDENT>inc = True<EOL>for i in d:<EOL><INDENT>if i[<NUM_LIT:0>] not in (sre_parse.AT, sre_parse.LITERAL):<EOL><INDENT>inc = False<EOL><DEDENT><DEDENT>if inc:<EOL><INDENT>strings = <NUM_LIT:1><EOL><DEDENT><DEDENT>return strings<EOL><DEDENT>return ret<EOL>", "docstring": "docstring for _gen", "id": "f4227:m8"}
{"signature": "def generate(s, limit=<NUM_LIT:20>):", "body": "return _gen(parse(s), limit)<EOL>", "docstring": "Creates a generator that generates all matching strings to a given regular expression\n\n    :param s: Regular expression\n    :type s: str\n    :param limit: Range limit\n    :type limit: int\n    :returns: string generator object", "id": "f4227:m13"}
{"signature": "def sre_to_string(sre_obj, paren=True):", "body": "ret = u'<STR_LIT>'<EOL>for i in sre_obj:<EOL><INDENT>if i[<NUM_LIT:0>] == sre_parse.IN:<EOL><INDENT>prefix = '<STR_LIT>'<EOL>if len(i[<NUM_LIT:1>]) and i[<NUM_LIT:1>][<NUM_LIT:0>][<NUM_LIT:0>] == sre_parse.NEGATE:<EOL><INDENT>prefix = '<STR_LIT>'<EOL><DEDENT>ret += u'<STR_LIT>'.format(prefix, sre_to_string(i[<NUM_LIT:1>], paren=paren))<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.LITERAL:<EOL><INDENT>u = unichr(i[<NUM_LIT:1>])<EOL>ret += u if u not in sre_parse.SPECIAL_CHARS else '<STR_LIT>'.format(u)<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.CATEGORY:<EOL><INDENT>ret += REVERSE_CATEGORIES[i[<NUM_LIT:1>]]<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.ANY:<EOL><INDENT>ret += '<STR_LIT:.>'<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.BRANCH:<EOL><INDENT>parts = [sre_to_string(x, paren=paren) for x in i[<NUM_LIT:1>][<NUM_LIT:1>]]<EOL>if not any(parts):<EOL><INDENT>continue<EOL><DEDENT>if i[<NUM_LIT:1>][<NUM_LIT:0>]:<EOL><INDENT>if len(parts) == <NUM_LIT:1>:<EOL><INDENT>paren = False<EOL><DEDENT>prefix = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>prefix = '<STR_LIT>'<EOL><DEDENT>branch = '<STR_LIT:|>'.join(parts)<EOL>if paren:<EOL><INDENT>ret += '<STR_LIT>'.format(prefix, branch)<EOL><DEDENT>else:<EOL><INDENT>ret += '<STR_LIT>'.format(branch)<EOL><DEDENT><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.SUBPATTERN:<EOL><INDENT>subexpr = i[<NUM_LIT:1>][<NUM_LIT:1>]<EOL>if IS_PY36_OR_GREATER and i[<NUM_LIT:0>] == sre_parse.SUBPATTERN:<EOL><INDENT>subexpr = i[<NUM_LIT:1>][<NUM_LIT:3>]<EOL><DEDENT>if i[<NUM_LIT:1>][<NUM_LIT:0>]:<EOL><INDENT>ret += '<STR_LIT>'.format(sre_to_string(subexpr, paren=False))<EOL><DEDENT>else:<EOL><INDENT>ret += '<STR_LIT>'.format(sre_to_string(subexpr, paren=paren))<EOL><DEDENT><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.NOT_LITERAL:<EOL><INDENT>ret += '<STR_LIT>'.format(unichr(i[<NUM_LIT:1>]))<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.MAX_REPEAT:<EOL><INDENT>if i[<NUM_LIT:1>][<NUM_LIT:0>] == i[<NUM_LIT:1>][<NUM_LIT:1>]:<EOL><INDENT>range_str = '<STR_LIT>'.format(i[<NUM_LIT:1>][<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>if i[<NUM_LIT:1>][<NUM_LIT:0>] == <NUM_LIT:0> and i[<NUM_LIT:1>][<NUM_LIT:1>] - i[<NUM_LIT:1>][<NUM_LIT:0>] == sre_parse.MAXREPEAT:<EOL><INDENT>range_str = '<STR_LIT:*>'<EOL><DEDENT>elif i[<NUM_LIT:1>][<NUM_LIT:0>] == <NUM_LIT:1> and i[<NUM_LIT:1>][<NUM_LIT:1>] - i[<NUM_LIT:1>][<NUM_LIT:0>] == sre_parse.MAXREPEAT - <NUM_LIT:1>:<EOL><INDENT>range_str = '<STR_LIT:+>'<EOL><DEDENT>else:<EOL><INDENT>range_str = '<STR_LIT>'.format(i[<NUM_LIT:1>][<NUM_LIT:0>], i[<NUM_LIT:1>][<NUM_LIT:1>])<EOL><DEDENT><DEDENT>ret += sre_to_string(i[<NUM_LIT:1>][<NUM_LIT:2>], paren=paren) + range_str<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.MIN_REPEAT:<EOL><INDENT>if i[<NUM_LIT:1>][<NUM_LIT:0>] == <NUM_LIT:0> and i[<NUM_LIT:1>][<NUM_LIT:1>] == sre_parse.MAXREPEAT:<EOL><INDENT>range_str = '<STR_LIT>'<EOL><DEDENT>elif i[<NUM_LIT:1>][<NUM_LIT:0>] == <NUM_LIT:1> and i[<NUM_LIT:1>][<NUM_LIT:1>] == sre_parse.MAXREPEAT:<EOL><INDENT>range_str = '<STR_LIT>'<EOL><DEDENT>elif i[<NUM_LIT:1>][<NUM_LIT:1>] == sre_parse.MAXREPEAT:<EOL><INDENT>range_str = '<STR_LIT>'.format(i[<NUM_LIT:1>][<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>range_str = '<STR_LIT>'.format(i[<NUM_LIT:1>][<NUM_LIT:0>], i[<NUM_LIT:1>][<NUM_LIT:1>])<EOL><DEDENT>ret += sre_to_string(i[<NUM_LIT:1>][<NUM_LIT:2>], paren=paren) + range_str<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.GROUPREF:<EOL><INDENT>ret += '<STR_LIT>'.format(i[<NUM_LIT:1>])<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.AT:<EOL><INDENT>if i[<NUM_LIT:1>] == sre_parse.AT_BEGINNING:<EOL><INDENT>ret += '<STR_LIT>'<EOL><DEDENT>elif i[<NUM_LIT:1>] == sre_parse.AT_END:<EOL><INDENT>ret += '<STR_LIT:$>'<EOL><DEDENT><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.NEGATE:<EOL><INDENT>pass<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.RANGE:<EOL><INDENT>ret += '<STR_LIT>'.format(unichr(i[<NUM_LIT:1>][<NUM_LIT:0>]), unichr(i[<NUM_LIT:1>][<NUM_LIT:1>]))<EOL><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.ASSERT:<EOL><INDENT>if i[<NUM_LIT:1>][<NUM_LIT:0>]:<EOL><INDENT>ret += '<STR_LIT>'.format(sre_to_string(i[<NUM_LIT:1>][<NUM_LIT:1>], paren=False))<EOL><DEDENT>else:<EOL><INDENT>ret += '<STR_LIT>'.format(sre_to_string(i[<NUM_LIT:1>][<NUM_LIT:1>], paren=paren))<EOL><DEDENT><DEDENT>elif i[<NUM_LIT:0>] == sre_parse.ASSERT_NOT:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>' % str(i))<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "sre_parse object to string\n\n    :param sre_obj: Output of sre_parse.parse()\n    :type sre_obj: list\n    :rtype: str", "id": "f4227:m10"}
{"signature": "def _update_settings(self, new_settings, enforce_helpstring=True):", "body": "for raw_setting_name, value in six.iteritems(new_settings):<EOL><INDENT>setting_name = raw_setting_name.replace(\"<STR_LIT:_>\", \"<STR_LIT:->\")<EOL>setting_already_exists = setting_name in self._instance_settings<EOL>value_is_list_len_2 = isinstance(value, list) and len(value) == <NUM_LIT:2><EOL>treat_as_tuple = not setting_already_exists and value_is_list_len_2<EOL>if isinstance(value, tuple) or treat_as_tuple:<EOL><INDENT>self._instance_settings[setting_name] = value<EOL><DEDENT>else:<EOL><INDENT>if setting_name not in self._instance_settings:<EOL><INDENT>if enforce_helpstring:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise InternalCashewException(msg % setting_name)<EOL><DEDENT>else:<EOL><INDENT>self._instance_settings[setting_name] = ('<STR_LIT>', value,)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>orig = self._instance_settings[setting_name]<EOL>self._instance_settings[setting_name] = (orig[<NUM_LIT:0>], value,)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "This method does the work of updating settings. Can be passed with\nenforce_helpstring = False which you may want if allowing end users to\nadd arbitrary metadata via the settings system.\n\nPreferable to use update_settings (without leading _) in code to do the\nright thing and always have docstrings.", "id": "f4241:c0:m11"}
{"signature": "def standardize_alias_or_aliases(cls, alias_or_aliases):", "body": "if isinstance(alias_or_aliases, string_types):<EOL><INDENT>return [alias_or_aliases]<EOL><DEDENT>else:<EOL><INDENT>return alias_or_aliases<EOL><DEDENT>", "docstring": "Make sure we don't attempt to iterate over an alias string thinking\nit's an array.", "id": "f4241:c1:m4"}
{"signature": "def settings_and_attributes(self):", "body": "attrs = self.setting_values()<EOL>attrs.update(self.__dict__)<EOL>skip = [\"<STR_LIT>\", \"<STR_LIT>\"]<EOL>for a in skip:<EOL><INDENT>del attrs[a]<EOL><DEDENT>return attrs<EOL>", "docstring": "Return a combined dictionary of setting values and attribute values.", "id": "f4241:c0:m12"}
{"signature": "def setting_values(self, skip=None):", "body": "if not skip:<EOL><INDENT>skip = []<EOL><DEDENT>return dict(<EOL>(k, v[<NUM_LIT:1>])<EOL>for k, v in six.iteritems(self._instance_settings)<EOL>if not k in skip)<EOL>", "docstring": "Returns dict of all setting values (removes the helpstrings).", "id": "f4241:c0:m9"}
{"signature": "def check_docstring(cls):", "body": "docstring = inspect.getdoc(cls)<EOL>if not docstring:<EOL><INDENT>breadcrumbs = \"<STR_LIT>\".join(t.__name__ for t in inspect.getmro(cls)[:-<NUM_LIT:1>][::-<NUM_LIT:1>])<EOL>msg = \"<STR_LIT>\"<EOL>args = (cls.__name__, breadcrumbs, cls.__module__)<EOL>raise InternalCashewException(msg % args)<EOL><DEDENT>max_line_length = cls._class_settings.get('<STR_LIT>')<EOL>if max_line_length:<EOL><INDENT>for i, line in enumerate(docstring.splitlines()):<EOL><INDENT>if len(line) > max_line_length:<EOL><INDENT>msg = \"<STR_LIT>\" <EOL>args = (i, cls.__name__, len(line) - max_line_length)<EOL>raise Exception(msg % args)<EOL><DEDENT><DEDENT><DEDENT>return docstring<EOL>", "docstring": "Asserts that the class has a docstring, returning it if successful.", "id": "f4241:c1:m7"}
{"signature": "def expect(self, bytes, stream=None):", "body": "if stream is None:<EOL><INDENT>stream = self.std_out<EOL><DEDENT>", "docstring": "Block until given bytes appear in the stream.", "id": "f4246:c1:m6"}
{"signature": "@property<EOL><INDENT>def pid(self):<DEDENT>", "body": "return self._process.pid<EOL>", "docstring": "The process' PID.", "id": "f4246:c1:m4"}
{"signature": "def block(self):", "body": "self._status_code = self._process.wait()<EOL>", "docstring": "Blocks until command finishes. Returns Response instance.", "id": "f4246:c1:m8"}
{"signature": "def run(command, data=None, timeout=None, kill_timeout=None, env=None, cwd=None):", "body": "command = expand_args(command)<EOL>history = []<EOL>for c in command:<EOL><INDENT>if len(history):<EOL><INDENT>data = history[-<NUM_LIT:1>].std_out[<NUM_LIT:0>:<NUM_LIT:10>*<NUM_LIT>]<EOL><DEDENT>cmd = Command(c)<EOL>try:<EOL><INDENT>out, err = cmd.run(data, timeout, kill_timeout, env, cwd)<EOL>status_code = cmd.returncode<EOL><DEDENT>except OSError as e:<EOL><INDENT>out, err = '<STR_LIT>', u\"<STR_LIT:\\n>\".join([e.strerror, traceback.format_exc()])<EOL>status_code = <NUM_LIT><EOL><DEDENT>r = Response(process=cmd)<EOL>r.command = c<EOL>r.std_out = out<EOL>r.std_err = err<EOL>r.status_code = status_code<EOL>history.append(r)<EOL><DEDENT>r = history.pop()<EOL>r.history = history<EOL>return r<EOL>", "docstring": "Executes a given commmand and returns Response.\n\n    Blocks until process is complete, or timeout is reached.", "id": "f4246:m4"}
{"signature": "def send(self, str, end='<STR_LIT:\\n>'):", "body": "return self._process.stdin.write(str+end)<EOL>", "docstring": "Sends a line to std_in.", "id": "f4246:c1:m7"}
{"signature": "def user_line(self, frame):", "body": "self.get_stack_data(frame, None, '<STR_LIT>')<EOL>", "docstring": "This function is called when we stop or break at this line.", "id": "f4257:c0:m3"}
{"signature": "def user_return(self, frame, return_value):", "body": "self.get_stack_data(frame, None, '<STR_LIT>')<EOL>", "docstring": "This function is called when a return trap is set here.", "id": "f4257:c0:m4"}
{"signature": "def get_type_info(obj):", "body": "if isinstance(obj, primitive_types):<EOL><INDENT>return ('<STR_LIT>', type(obj).__name__)<EOL><DEDENT>if isinstance(obj, sequence_types):<EOL><INDENT>return ('<STR_LIT>', type(obj).__name__)<EOL><DEDENT>if isinstance(obj, array_types):<EOL><INDENT>return ('<STR_LIT>', type(obj).__name__)<EOL><DEDENT>if isinstance(obj, key_value_types):<EOL><INDENT>return ('<STR_LIT>', type(obj).__name__)<EOL><DEDENT>if isinstance(obj, types.ModuleType):<EOL><INDENT>return ('<STR_LIT>', type(obj).__name__)<EOL><DEDENT>if isinstance(obj, (types.FunctionType, types.MethodType)):<EOL><INDENT>return ('<STR_LIT>', type(obj).__name__)<EOL><DEDENT>if isinstance(obj, type):<EOL><INDENT>if hasattr(obj, '<STR_LIT>'):<EOL><INDENT>return ('<STR_LIT:class>', obj.__name__)<EOL><DEDENT><DEDENT>if isinstance(type(obj), type):<EOL><INDENT>if hasattr(obj, '<STR_LIT>'):<EOL><INDENT>cls_name = type(obj).__name__<EOL>if cls_name == '<STR_LIT>':<EOL><INDENT>cls_name = obj.__name__<EOL>return ('<STR_LIT:class>', '<STR_LIT:{}>'.format(cls_name))<EOL><DEDENT>if cls_name == '<STR_LIT>':<EOL><INDENT>cls_name = obj.__class__.__name__<EOL><DEDENT>return ('<STR_LIT>', '<STR_LIT>'.format(cls_name))<EOL><DEDENT><DEDENT>return ('<STR_LIT>', type(obj).__name__)<EOL>", "docstring": "Get type information for a Python object\n\n    Args:\n        obj: The Python object\n\n    Returns:\n        tuple: (object type \"catagory\", object type name)", "id": "f4259:m3"}
{"signature": "@contextmanager<EOL>def redirect_stdout(new_stdout):", "body": "old_stdout, sys.stdout = sys.stdout, new_stdout<EOL>try:<EOL><INDENT>yield None<EOL><DEDENT>finally:<EOL><INDENT>sys.stdout = old_stdout<EOL><DEDENT>", "docstring": "Redirect the stdout\n\n    Args:\n        new_stdout (io.StringIO): New stdout to use instead", "id": "f4259:m1"}
{"signature": "def filter_dict(d, exclude):", "body": "ret = {}<EOL>for key, value in d.items():<EOL><INDENT>if key not in exclude:<EOL><INDENT>ret.update({key: value})<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "Return a new dict with specified keys excluded from the origional dict\n\n    Args:\n        d (dict): origional dict\n        exclude (list): The keys that are excluded", "id": "f4259:m0"}
{"signature": "def which(program):", "body": "if os.path.split(program)[<NUM_LIT:0>]:<EOL><INDENT>program_path = find_exe(program)<EOL>if program_path:<EOL><INDENT>return program_path<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for path in get_path_list():<EOL><INDENT>program_path = find_exe(os.path.join(path, program))<EOL>if program_path:<EOL><INDENT>return program_path<EOL><DEDENT><DEDENT><DEDENT>return None<EOL>", "docstring": "Identify the location of an executable file.", "id": "f4266:m0"}
{"signature": "def print_unicode(text):", "body": "if sys.version_info[<NUM_LIT:0>] < <NUM_LIT:3>:<EOL><INDENT>text = text.encode('<STR_LIT:utf-8>')<EOL><DEDENT>print(text)<EOL>", "docstring": "Print in a portable manner.", "id": "f4269:m3"}
{"signature": "@classmethod<EOL><INDENT>def _get_languages(cls) -> set:<DEDENT>", "body": "cls._start_server_if_needed()<EOL>url = urllib.parse.urljoin(cls._url, '<STR_LIT>')<EOL>languages = set()<EOL>for e in cls._get_root(url, num_tries=<NUM_LIT:1>):<EOL><INDENT>languages.add(e.get('<STR_LIT>'))<EOL>languages.add(e.get('<STR_LIT>'))<EOL><DEDENT>return languages<EOL>", "docstring": "Get supported languages (by querying the server).", "id": "f4271:c5:m14"}
{"signature": "def correct(self, text: str, srctext=None) -> str:", "body": "return correct(text, self.check(text, srctext))<EOL>", "docstring": "Automatically apply suggestions to the text.", "id": "f4271:c5:m11"}
{"signature": "def disable_spellchecking(self):", "body": "self.disabled.update(self._spell_checking_rules)<EOL>", "docstring": "Disable spell-checking rules.", "id": "f4271:c5:m13"}
{"signature": "@classmethod<EOL><INDENT>def _get_attrib(cls):<DEDENT>", "body": "cls._start_server_if_needed()<EOL>params = {'<STR_LIT>': FAILSAFE_LANGUAGE, '<STR_LIT:text>': '<STR_LIT>'}<EOL>data = urllib.parse.urlencode(params).encode()<EOL>root = cls._get_root(cls._url, data, num_tries=<NUM_LIT:1>)<EOL>return root.attrib<EOL>", "docstring": "Get matches element attributes.", "id": "f4271:c5:m15"}
{"signature": "def get_locale_language():", "body": "return locale.getlocale()[<NUM_LIT:0>] or locale.getdefaultlocale()[<NUM_LIT:0>]<EOL>", "docstring": "Get the language code for the current locale setting.", "id": "f4271:m12"}
{"signature": "def get_languages() -> set:", "body": "try:<EOL><INDENT>languages = cache['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>languages = LanguageTool._get_languages()<EOL>cache['<STR_LIT>'] = languages<EOL><DEDENT>return languages<EOL>", "docstring": "Get supported languages.", "id": "f4271:m7"}
{"signature": "def _check_api(self, text: str, srctext=None) -> bytes:", "body": "root = self._get_root(self._url, self._encode(text, srctext))<EOL>return (b'<STR_LIT>' +<EOL>ElementTree.tostring(root) + b\"<STR_LIT:\\n>\")<EOL>", "docstring": "Match text against enabled rules (result in XML format).", "id": "f4271:c5:m9"}
{"signature": "def set_directory(path=None):", "body": "old_path = get_directory()<EOL>terminate_server()<EOL>cache.clear()<EOL>if path:<EOL><INDENT>cache['<STR_LIT>'] = path<EOL>try:<EOL><INDENT>get_jar_info()<EOL><DEDENT>except Error:<EOL><INDENT>cache['<STR_LIT>'] = old_path<EOL>raise<EOL><DEDENT><DEDENT>", "docstring": "Set LanguageTool directory.", "id": "f4271:m9"}
{"signature": "def which(program, win_allow_cross_arch=True):", "body": "def is_exe(path):<EOL><INDENT>return os.path.isfile(path) and os.access(path, os.X_OK)<EOL><DEDENT>def _get_path_list():<EOL><INDENT>return os.environ['<STR_LIT>'].split(os.pathsep)<EOL><DEDENT>if os.name == '<STR_LIT>':<EOL><INDENT>def find_exe(program):<EOL><INDENT>root, ext = os.path.splitext(program)<EOL>if ext:<EOL><INDENT>if is_exe(program):<EOL><INDENT>return program<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for ext in os.environ['<STR_LIT>'].split(os.pathsep):<EOL><INDENT>program_path = root + ext.lower()<EOL>if is_exe(program_path):<EOL><INDENT>return program_path<EOL><DEDENT><DEDENT><DEDENT>return None<EOL><DEDENT>def get_path_list():<EOL><INDENT>paths = _get_path_list()<EOL>if win_allow_cross_arch:<EOL><INDENT>alt_sys_path = os.path.expandvars(r\"<STR_LIT>\")<EOL>if os.path.isdir(alt_sys_path):<EOL><INDENT>paths.insert(<NUM_LIT:0>, alt_sys_path)<EOL><DEDENT>else:<EOL><INDENT>alt_sys_path = os.path.expandvars(r\"<STR_LIT>\")<EOL>if os.path.isdir(alt_sys_path):<EOL><INDENT>paths.append(alt_sys_path)<EOL><DEDENT><DEDENT><DEDENT>return paths<EOL><DEDENT><DEDENT>else:<EOL><INDENT>def find_exe(program):<EOL><INDENT>return program if is_exe(program) else None<EOL><DEDENT>get_path_list = _get_path_list<EOL><DEDENT>if os.path.split(program)[<NUM_LIT:0>]:<EOL><INDENT>program_path = find_exe(program)<EOL>if program_path:<EOL><INDENT>return program_path<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for path in get_path_list():<EOL><INDENT>program_path = find_exe(os.path.join(path, program))<EOL>if program_path:<EOL><INDENT>return program_path<EOL><DEDENT><DEDENT><DEDENT>return None<EOL>", "docstring": "Identify the location of an executable file.", "id": "f4275:m1"}
{"signature": "def default_hook(config):", "body": "if (any(arg.startswith('<STR_LIT>') for arg in sys.argv) and<EOL>os.path.isdir(PY2K_DIR) != IS_PY2K and os.path.isdir(LIB_DIR)):<EOL><INDENT>shutil.rmtree(LIB_DIR)<EOL><DEDENT>if IS_PY2K and any(arg.startswith(('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:test>'))<EOL>for arg in sys.argv):<EOL><INDENT>generate_py2k(config)<EOL>packages_root = get_cfg_value(config, '<STR_LIT>', '<STR_LIT>')<EOL>packages_root = os.path.join(PY2K_DIR, packages_root)<EOL>set_cfg_value(config, '<STR_LIT>', '<STR_LIT>', packages_root)<EOL><DEDENT>", "docstring": "Default setup hook.", "id": "f4275:m16"}
{"signature": "def split_elements(value):", "body": "items = [v.strip() for v in value.split('<STR_LIT:U+002C>')]<EOL>if len(items) == <NUM_LIT:1>:<EOL><INDENT>items = value.split()<EOL><DEDENT>return items<EOL>", "docstring": "Split a string with comma or space-separated elements into a list.", "id": "f4275:m3"}
{"signature": "def run_3to2(args=None):", "body": "args = BASE_ARGS_3TO2 if args is None else BASE_ARGS_3TO2 + args<EOL>try:<EOL><INDENT>proc = subprocess.Popen(['<STR_LIT>'] + args, stderr=subprocess.PIPE)<EOL><DEDENT>except OSError:<EOL><INDENT>for path in glob.glob('<STR_LIT>'):<EOL><INDENT>if os.path.isdir(path) and path not in sys.path:<EOL><INDENT>sys.path.append(path)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>from lib3to2.main import main as lib3to2_main<EOL><DEDENT>except ImportError:<EOL><INDENT>raise OSError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>if lib3to2_main('<STR_LIT>', args):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>num_errors = <NUM_LIT:0><EOL>while proc.poll() is None:<EOL><INDENT>line = proc.stderr.readline()<EOL>sys.stderr.write(line)<EOL>num_errors += line.count('<STR_LIT>')<EOL><DEDENT>if proc.returncode or num_errors:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Convert Python files using lib3to2.", "id": "f4275:m11"}
{"signature": "def split_multiline(value):", "body": "return [element for element in (line.strip() for line in value.split('<STR_LIT:\\n>'))<EOL>if element]<EOL>", "docstring": "Split a multiline string into a list, excluding blank lines.", "id": "f4275:m2"}
{"signature": "def write_py2k_header(file_list):", "body": "if not isinstance(file_list, list):<EOL><INDENT>file_list = [file_list]<EOL><DEDENT>python_re = re.compile(br\"<STR_LIT>\")<EOL>coding_re = re.compile(br\"<STR_LIT>\")<EOL>new_line_re = re.compile(br\"<STR_LIT>\")<EOL>version_3 = LooseVersion('<STR_LIT:3>')<EOL>for file in file_list:<EOL><INDENT>if not os.path.getsize(file):<EOL><INDENT>continue<EOL><DEDENT>rewrite_needed = False<EOL>python_found = False<EOL>coding_found = False<EOL>lines = []<EOL>f = open(file, '<STR_LIT:rb>')<EOL>try:<EOL><INDENT>while len(lines) < <NUM_LIT:2>:<EOL><INDENT>line = f.readline()<EOL>match = python_re.match(line)<EOL>if match:<EOL><INDENT>python_found = True<EOL>version = LooseVersion(match.group(<NUM_LIT:2>).decode() or '<STR_LIT:2>')<EOL>try:<EOL><INDENT>version_test = version >= version_3<EOL><DEDENT>except TypeError:<EOL><INDENT>version_test = True<EOL><DEDENT>if version_test:<EOL><INDENT>line = python_re.sub(br\"<STR_LIT>\", line)<EOL>rewrite_needed = True<EOL><DEDENT><DEDENT>elif coding_re.search(line):<EOL><INDENT>coding_found = True<EOL><DEDENT>lines.append(line)<EOL><DEDENT>if not coding_found:<EOL><INDENT>match = new_line_re.search(lines[<NUM_LIT:0>])<EOL>newline = match.group(<NUM_LIT:1>) if match else b\"<STR_LIT:\\n>\"<EOL>line = b\"<STR_LIT>\" + newline<EOL>lines.insert(<NUM_LIT:1> if python_found else <NUM_LIT:0>, line)<EOL>rewrite_needed = True<EOL><DEDENT>if rewrite_needed:<EOL><INDENT>lines += f.readlines()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>f.close()<EOL><DEDENT>if rewrite_needed:<EOL><INDENT>f = open(file, '<STR_LIT:wb>')<EOL>try:<EOL><INDENT>f.writelines(lines)<EOL><DEDENT>finally:<EOL><INDENT>f.close()<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Write Python 2 shebang and add encoding cookie if needed.", "id": "f4275:m12"}
{"signature": "def download_file_insecure(url, target):", "body": "try:<EOL><INDENT>from urllib.request import urlopen<EOL><DEDENT>except ImportError:<EOL><INDENT>from urllib2 import urlopen<EOL><DEDENT>src = dst = None<EOL>try:<EOL><INDENT>src = urlopen(url)<EOL>data = src.read()<EOL>dst = open(target, \"<STR_LIT:wb>\")<EOL>dst.write(data)<EOL><DEDENT>finally:<EOL><INDENT>if src:<EOL><INDENT>src.close()<EOL><DEDENT>if dst:<EOL><INDENT>dst.close()<EOL><DEDENT><DEDENT>", "docstring": "Use Python to download the file, even though it cannot authenticate the\nconnection.", "id": "f4276:m14"}
{"signature": "def download_file_powershell(url, target):", "body": "target = os.path.abspath(target)<EOL>cmd = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>\"<STR_LIT>\" % vars(),<EOL>]<EOL>_clean_check(cmd, target)<EOL>", "docstring": "Download the file at url to target using Powershell (which will validate\ntrust). Raise an exception if the command cannot complete.", "id": "f4276:m8"}
{"signature": "def main():", "body": "options = _parse_args()<EOL>archive = download_setuptools(<EOL>version=options.version,<EOL>download_base=options.download_base,<EOL>downloader_factory=options.downloader_factory,<EOL>)<EOL>return _install(archive, _build_install_args(options))<EOL>", "docstring": "Install or upgrade setuptools and EasyInstall", "id": "f4276:m19"}
{"signature": "def _parse_args():", "body": "parser = optparse.OptionParser()<EOL>parser.add_option(<EOL>'<STR_LIT>', dest='<STR_LIT>', action='<STR_LIT:store_true>', default=False,<EOL>help='<STR_LIT>')<EOL>parser.add_option(<EOL>'<STR_LIT>', dest='<STR_LIT>', metavar=\"<STR_LIT>\",<EOL>default=DEFAULT_URL,<EOL>help='<STR_LIT>')<EOL>parser.add_option(<EOL>'<STR_LIT>', dest='<STR_LIT>', action='<STR_LIT>',<EOL>const=lambda: download_file_insecure, default=get_best_downloader,<EOL>help='<STR_LIT>'<EOL>)<EOL>parser.add_option(<EOL>'<STR_LIT>', help=\"<STR_LIT>\",<EOL>default=DEFAULT_VERSION,<EOL>)<EOL>options, args = parser.parse_args()<EOL>return options<EOL>", "docstring": "Parse the command line for options", "id": "f4276:m18"}
{"signature": "def _build_install_args(options):", "body": "return ['<STR_LIT>'] if options.user_install else []<EOL>", "docstring": "Build the arguments to 'python setup.py install' on the setuptools package", "id": "f4276:m17"}
{"signature": "def make_aware(value, timezone):", "body": "if hasattr(timezone, '<STR_LIT>') and value not in (datetime.datetime.min, datetime.datetime.max):<EOL><INDENT>return timezone.localize(value, is_dst=None)<EOL><DEDENT>else:<EOL><INDENT>return value.replace(tzinfo=timezone)<EOL><DEDENT>", "docstring": "Makes a naive datetime.datetime in a given time zone aware.", "id": "f4281:m3"}
{"signature": "def includes(self, query):", "body": "query = self.to_timezone(query)<EOL>return any(self.intervals(range_start=query, range_end=query))<EOL>", "docstring": "Does this schedule include the provided time?\n        query should be a datetime (naive or timezone-aware)", "id": "f4285:c0:m6"}
{"signature": "def intervals(self, range_start=datetime.datetime.min, range_end=datetime.datetime.max):", "body": "<EOL>current_period = None<EOL>max_continuous_days = <NUM_LIT><EOL>range_start = self.to_timezone(range_start)<EOL>range_end = self.to_timezone(range_end)<EOL>for period in self._daily_periods(range_start.date(), range_end.date()):<EOL><INDENT>if period.end < range_start or period.start > range_end:<EOL><INDENT>continue<EOL><DEDENT>if current_period is None:<EOL><INDENT>current_period = period<EOL><DEDENT>else:<EOL><INDENT>if ( ((period.start < current_period.end)<EOL>or (period.start - current_period.end) <= datetime.timedelta(minutes=<NUM_LIT:1>))<EOL>and (current_period.end - current_period.start) < datetime.timedelta(days=max_continuous_days)):<EOL><INDENT>current_period = Period(current_period.start, period.end)<EOL><DEDENT>else:<EOL><INDENT>yield current_period<EOL>current_period = period<EOL><DEDENT><DEDENT><DEDENT>if current_period:<EOL><INDENT>yield current_period<EOL><DEDENT>", "docstring": "Returns an iterator of Period tuples for continuous stretches of time during\n        which this event is in effect, between range_start and range_end.", "id": "f4285:c2:m5"}
{"signature": "def includes(self, query_date, query_time=None):", "body": "if self.start_date and query_date < self.start_date:<EOL><INDENT>return False<EOL><DEDENT>if self.end_date and query_date > self.end_date:<EOL><INDENT>return False<EOL><DEDENT>if query_date.weekday() not in self.weekdays:<EOL><INDENT>return False<EOL><DEDENT>if not query_time:<EOL><INDENT>return True<EOL><DEDENT>if query_time >= self.period.start and query_time <= self.period.end:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Does this schedule include the provided time?\n        query_date and query_time are date and time objects, interpreted\n        in this schedule's timezone", "id": "f4285:c3:m1"}
{"signature": "def next_interval(self, after=None):", "body": "if after is None:<EOL><INDENT>after = timezone.now()<EOL><DEDENT>after = self.to_timezone(after)<EOL>return next(self.intervals(range_start=after), None)<EOL>", "docstring": "Returns the next Period this event is in effect, or None if the event\n        has no remaining periods.", "id": "f4285:c0:m4"}
{"signature": "@property<EOL><INDENT>@memoize_method<EOL>def exceptions(self):<DEDENT>", "body": "ex = {}<EOL>for sd in self.root.xpath('<STR_LIT>'):<EOL><INDENT>bits = str(sd.text).split('<STR_LIT:U+0020>')<EOL>date = text_to_date(bits.pop(<NUM_LIT:0>))<EOL>ex.setdefault(date, []).extend([<EOL>_time_text_to_period(t)<EOL>for t in bits<EOL>])<EOL><DEDENT>return ex<EOL>", "docstring": "A dict of dates -> [Period time tuples] representing exceptions\n        to the base recurrence pattern.", "id": "f4285:c2:m1"}
{"signature": "@staticmethod<EOL><INDENT>def from_element(root, timezone):<DEDENT>", "body": "assert root.tag == '<STR_LIT>'<EOL>if root.xpath('<STR_LIT>'):<EOL><INDENT>return _ScheduleIntervals(root, timezone)<EOL><DEDENT>elif root.xpath('<STR_LIT>'):<EOL><INDENT>return _ScheduleRecurring(root, timezone)<EOL><DEDENT>raise NotImplementedError<EOL>", "docstring": "Return a Schedule object based on an lxml Element for the <schedule>\n        tag. timezone is a tzinfo object, ideally from pytz.", "id": "f4285:c0:m1"}
{"signature": "def to_timezone(self, dt):", "body": "if timezone.is_aware(dt):<EOL><INDENT>return dt.astimezone(self.timezone)<EOL><DEDENT>else:<EOL><INDENT>return timezone.make_aware(dt, self.timezone)<EOL><DEDENT>", "docstring": "Converts a datetime to the timezone of this Schedule.", "id": "f4285:c0:m2"}
{"signature": "@property<EOL><INDENT>@memoize_method<EOL>def period(self):<DEDENT>", "body": "start_time = self.root.findtext('<STR_LIT>')<EOL>if start_time:<EOL><INDENT>return Period(text_to_time(start_time), text_to_time(self.root.findtext('<STR_LIT>')))<EOL><DEDENT>return Period(datetime.time(<NUM_LIT:0>, <NUM_LIT:0>), datetime.time(<NUM_LIT>, <NUM_LIT>))<EOL>", "docstring": "A Period tuple representing the daily start and end time.", "id": "f4285:c3:m3"}
{"signature": "def _daily_periods(self, range_start, range_end):", "body": "specific = set(self.exceptions.keys())<EOL>return heapq.merge(self.exception_periods(range_start, range_end), *[<EOL>sched.daily_periods(range_start=range_start, range_end=range_end, exclude_dates=specific)<EOL>for sched in self._recurring_schedules<EOL>])<EOL>", "docstring": "Returns an iterator of Period tuples for every day this event is in effect, between range_start\n        and range_end.", "id": "f4285:c2:m4"}
{"signature": "def _tmdd_datetime_to_iso(dt, include_offset=True, include_seconds=True):", "body": "datestring = dt.findtext('<STR_LIT:date>')<EOL>timestring = dt.findtext('<STR_LIT:time>')<EOL>assert len(datestring) == <NUM_LIT:8><EOL>assert len(timestring) >= <NUM_LIT:6><EOL>iso = datestring[<NUM_LIT:0>:<NUM_LIT:4>] + '<STR_LIT:->' + datestring[<NUM_LIT:4>:<NUM_LIT:6>] + '<STR_LIT:->' + datestring[<NUM_LIT:6>:<NUM_LIT:8>] + '<STR_LIT:T>'+ timestring[<NUM_LIT:0>:<NUM_LIT:2>] + '<STR_LIT::>' + timestring[<NUM_LIT:2>:<NUM_LIT:4>]<EOL>if include_seconds:<EOL><INDENT>iso += '<STR_LIT::>' + timestring[<NUM_LIT:4>:<NUM_LIT:6>]<EOL><DEDENT>if include_offset:<EOL><INDENT>offset = dt.findtext('<STR_LIT>')<EOL>if offset:<EOL><INDENT>assert len(offset) == <NUM_LIT:5><EOL>iso += offset[<NUM_LIT:0>:<NUM_LIT:3>] + '<STR_LIT::>' + offset[<NUM_LIT:3>:<NUM_LIT:5>]<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\" % etree.tostring(dt))<EOL><DEDENT><DEDENT>return iso<EOL>", "docstring": "dt is an xml Element with <date>, <time>, and optionally <offset> children.\nreturns an ISO8601 string", "id": "f4292:m2"}
{"signature": "def _get_severity(c):", "body": "severities = c.feu.xpath('<STR_LIT>')<EOL>impacts = c.feu.xpath('<STR_LIT>')<EOL>severities = [convert_severity[s] for s in severities]<EOL>impacts = [convert_impact[i] for i in impacts]<EOL>return ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'][max(itertools.chain(severities, impacts))]<EOL>", "docstring": "1. Collect all <severity> and <impact-level> values.\n2. Convert impact-level of 1-3 to MINOR, 4-7 to MODERATE, 8-10 to MAJOR\n3. Map severity -> none to MINOR, natural-disaster to MAJOR, other to UNKNOWN\n4. Pick the highest severity.", "id": "f4292:m13"}
{"signature": "def ensure_format(doc, format):", "body": "assert format in ('<STR_LIT>', '<STR_LIT>')<EOL>if getattr(doc, '<STR_LIT>', None) == '<STR_LIT>':<EOL><INDENT>if format == '<STR_LIT>':<EOL><INDENT>return xml_to_json(doc)<EOL><DEDENT><DEDENT>elif isinstance(doc, dict) and '<STR_LIT>' in doc:<EOL><INDENT>if format == '<STR_LIT>':<EOL><INDENT>return json_doc_to_xml(doc)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return doc<EOL>", "docstring": "Ensures that the provided document is an lxml Element or json dict.", "id": "f4293:m0"}
{"signature": "def xml_to_json(root):", "body": "j = {}<EOL>if len(root) == <NUM_LIT:0>:  <EOL><INDENT>return _maybe_intify(root.text)<EOL><DEDENT>if len(root) == <NUM_LIT:1> and root[<NUM_LIT:0>].tag.startswith('<STR_LIT:{>' + NS_GML):  <EOL><INDENT>return gml_to_geojson(root[<NUM_LIT:0>])<EOL><DEDENT>if root.tag == '<STR_LIT>':<EOL><INDENT>j['<STR_LIT>'] = {'<STR_LIT:version>': root.get('<STR_LIT:version>')}<EOL><DEDENT>for elem in root:<EOL><INDENT>name = elem.tag<EOL>if name == '<STR_LIT>' and elem.get('<STR_LIT>'):<EOL><INDENT>name = elem.get('<STR_LIT>') + '<STR_LIT>'<EOL>if name == '<STR_LIT>':<EOL><INDENT>name = '<STR_LIT:url>'<EOL><DEDENT>if root.tag == '<STR_LIT>':<EOL><INDENT>j['<STR_LIT>'][name] = elem.get('<STR_LIT>')<EOL>continue<EOL><DEDENT><DEDENT>elif name.startswith('<STR_LIT:{>' + NS_PROTECTED):<EOL><INDENT>name = '<STR_LIT:!>' + name[name.index('<STR_LIT:}>') + <NUM_LIT:1>:] <EOL><DEDENT>elif name[<NUM_LIT:0>] == '<STR_LIT:{>':<EOL><INDENT>name = '<STR_LIT:+>' + name[name.index('<STR_LIT:}>') + <NUM_LIT:1>:]<EOL><DEDENT>if name in j:<EOL><INDENT>continue  <EOL><DEDENT>elif elem.tag == '<STR_LIT>' and not elem.text:<EOL><INDENT>j[name] = elem.get('<STR_LIT>')<EOL><DEDENT>elif len(elem):<EOL><INDENT>if name == '<STR_LIT>':<EOL><INDENT>j[name] = [xml_link_to_json(child, to_dict=False) for child in elem]<EOL><DEDENT>elif name in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>j[name] = [xml_link_to_json(child, to_dict=True) for child in elem]<EOL><DEDENT>elif all((name == pluralize(child.tag) for child in elem)):<EOL><INDENT>j[name] = [xml_to_json(child) for child in elem]<EOL><DEDENT>else:<EOL><INDENT>j[name] = xml_to_json(elem)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if root.tag == '<STR_LIT>' and name.endswith('<STR_LIT:s>') and not elem.text:<EOL><INDENT>j[name] = []<EOL><DEDENT>else:<EOL><INDENT>j[name] = _maybe_intify(elem.text)<EOL><DEDENT><DEDENT><DEDENT>return j<EOL>", "docstring": "Convert an Open511 XML document or document fragment to JSON.\n\n    Takes an lxml Element object. Returns a dict ready to be JSON-serialized.", "id": "f4294:m2"}
{"signature": "def _gmlv2_to_geojson(el):", "body": "tag = el.tag.replace('<STR_LIT>' % NS_GML, '<STR_LIT>')<EOL>if tag == '<STR_LIT>':<EOL><INDENT>coordinates = [float(c) for c in el.findtext('<STR_LIT>' % NS_GML).split('<STR_LIT:U+002C>')]<EOL><DEDENT>elif tag == '<STR_LIT>':<EOL><INDENT>coordinates = [<EOL>[float(x) for x in pair.split('<STR_LIT:U+002C>')]<EOL>for pair in el.findtext('<STR_LIT>' % NS_GML).split('<STR_LIT:U+0020>')<EOL>]<EOL><DEDENT>elif tag == '<STR_LIT>':<EOL><INDENT>coordinates = []<EOL>for ring in el.xpath('<STR_LIT>', namespaces=NSMAP)+ el.xpath('<STR_LIT>', namespaces=NSMAP):<EOL><INDENT>coordinates.append([<EOL>[float(x) for x in pair.split('<STR_LIT:U+002C>')]<EOL>for pair in ring.text.split('<STR_LIT:U+0020>')<EOL>])<EOL><DEDENT><DEDENT>elif tag in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if tag == '<STR_LIT>':<EOL><INDENT>single_type = '<STR_LIT>'<EOL>member_tag = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>single_type = tag[<NUM_LIT:5>:]<EOL>member_tag = single_type[<NUM_LIT:0>].lower() + single_type[<NUM_LIT:1>:] + '<STR_LIT>'<EOL><DEDENT>coordinates = [<EOL>gml_to_geojson(member)['<STR_LIT>']<EOL>for member in el.xpath('<STR_LIT>' % (member_tag, single_type), namespaces=NSMAP)<EOL>]<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError<EOL><DEDENT>return {<EOL>'<STR_LIT:type>': tag,<EOL>'<STR_LIT>': coordinates<EOL>}<EOL>", "docstring": "Translates a deprecated GML 2.0 geometry to GeoJSON", "id": "f4294:m6"}
{"signature": "def geom_to_xml_element(geom):", "body": "if geom.srs.srid != <NUM_LIT>:<EOL><INDENT>raise NotImplementedError(\"<STR_LIT>\")<EOL><DEDENT>return geojson_to_gml(json.loads(geom.geojson))<EOL>", "docstring": "Transform a GEOS or OGR geometry object into an lxml Element\n    for the GML geometry.", "id": "f4295:m6"}
{"signature": "def json_struct_to_xml(json_obj, root, custom_namespace=None):", "body": "if isinstance(root, (str, unicode)):<EOL><INDENT>if root.startswith('<STR_LIT:!>'):<EOL><INDENT>root = etree.Element('<STR_LIT>' % (NS_PROTECTED, root[<NUM_LIT:1>:]))<EOL><DEDENT>elif root.startswith('<STR_LIT:+>'):<EOL><INDENT>if not custom_namespace:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>root = etree.Element('<STR_LIT>' % (custom_namespace, root[<NUM_LIT:1>:]))<EOL><DEDENT>else:<EOL><INDENT>root = etree.Element(root)<EOL><DEDENT><DEDENT>if root.tag in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>for link in json_obj:<EOL><INDENT>root.append(json_link_to_xml(link))<EOL><DEDENT><DEDENT>elif isinstance(json_obj, (str, unicode)):<EOL><INDENT>root.text = json_obj<EOL><DEDENT>elif isinstance(json_obj, (int, float)):<EOL><INDENT>root.text = unicode(json_obj)<EOL><DEDENT>elif isinstance(json_obj, dict):<EOL><INDENT>if frozenset(json_obj.keys()) == frozenset(('<STR_LIT:type>', '<STR_LIT>')):<EOL><INDENT>root.append(geojson_to_gml(json_obj))<EOL><DEDENT>else:<EOL><INDENT>for key, val in json_obj.items():<EOL><INDENT>if key == '<STR_LIT:url>' or key.endswith('<STR_LIT>'):<EOL><INDENT>el = json_link_to_xml(val, json_link_key_to_xml_rel(key))<EOL><DEDENT>else:<EOL><INDENT>el = json_struct_to_xml(val, key, custom_namespace=custom_namespace)<EOL><DEDENT>if el is not None:<EOL><INDENT>root.append(el)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif isinstance(json_obj, list):<EOL><INDENT>tag_name = root.tag<EOL>if tag_name.endswith('<STR_LIT>'):<EOL><INDENT>tag_name = tag_name[:-<NUM_LIT:3>] + '<STR_LIT:y>'<EOL><DEDENT>elif tag_name.endswith('<STR_LIT:s>'):<EOL><INDENT>tag_name = tag_name[:-<NUM_LIT:1>]<EOL><DEDENT>for val in json_obj:<EOL><INDENT>el = json_struct_to_xml(val, tag_name, custom_namespace=custom_namespace)<EOL>if el is not None:<EOL><INDENT>root.append(el)<EOL><DEDENT><DEDENT><DEDENT>elif json_obj is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError<EOL><DEDENT>return root<EOL>", "docstring": "Converts a Open511 JSON fragment to XML.\n\n    Takes a dict deserialized from JSON, returns an lxml Element.\n\n    This won't provide a conforming document if you pass in a full JSON document;\n    it's for translating little fragments, and is mostly used internally.", "id": "f4295:m1"}
{"signature": "def shuffle(self, count=None, utc=None):", "body": "url = PATHS['<STR_LIT>'] % self.profile_id<EOL>post_data = '<STR_LIT>'<EOL>if count:<EOL><INDENT>post_data += '<STR_LIT>' % count<EOL><DEDENT>if utc:<EOL><INDENT>post_data += '<STR_LIT>' % utc<EOL><DEDENT>return self.api.post(url=url, data=post_data)<EOL>", "docstring": "Randomize the order at which statuses for the specified social media\nprofile will be sent out of the buffer.", "id": "f4304:c0:m3"}
{"signature": "@property<EOL><INDENT>def pending(self):<DEDENT>", "body": "pending_updates = []<EOL>url = PATHS['<STR_LIT>'] % self.profile_id<EOL>response = self.api.get(url=url)<EOL>for update in response['<STR_LIT>']:<EOL><INDENT>pending_updates.append(Update(api=self.api, raw_response=update))<EOL><DEDENT>self.__pending = pending_updates<EOL>return self.__pending<EOL>", "docstring": "Returns an array of updates that are currently in the buffer for an\nindividual social media profile.", "id": "f4304:c0:m1"}
{"signature": "@property<EOL><INDENT>def interactions(self):<DEDENT>", "body": "interactions = []<EOL>url = PATHS['<STR_LIT>'] % self.id<EOL>response = self.api.get(url=url)<EOL>for interaction in response['<STR_LIT>']:<EOL><INDENT>interactions.append(ResponseObject(interaction))<EOL><DEDENT>self.__interactions = interactions<EOL>return self.__interactions<EOL>", "docstring": "Returns the detailed information on individual interactions with the social\nmedia update such as favorites, retweets and likes.", "id": "f4306:c0:m1"}
{"signature": "@schedules.setter<EOL><INDENT>def schedules(self, schedules):<DEDENT>", "body": "url = PATHS['<STR_LIT>'] % self.id<EOL>data_format = \"<STR_LIT>\"<EOL>post_data = \"<STR_LIT>\"<EOL>for format_type, values in schedules.iteritems():<EOL><INDENT>for value in values:<EOL><INDENT>post_data += data_format % (format_type, value)<EOL><DEDENT><DEDENT>self.api.post(url=url, data=post_data)<EOL>", "docstring": "Set the posting schedules for the specified social media profile.", "id": "f4308:c0:m2"}
{"signature": "@property<EOL><INDENT>def schedules(self):<DEDENT>", "body": "url = PATHS['<STR_LIT>'] % self.id<EOL>self.__schedules = self.api.get(url=url)<EOL>return self.__schedules<EOL>", "docstring": "Returns details of the posting schedules associated with a social media\nprofile.", "id": "f4308:c0:m1"}
{"signature": "def set_mongonaut_base(self):", "body": "if hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>return None<EOL><DEDENT>self.app_label = self.kwargs.get('<STR_LIT>')<EOL>self.document_name = self.kwargs.get('<STR_LIT>')<EOL>self.models_name = self.kwargs.get('<STR_LIT>', '<STR_LIT>')<EOL>self.model_name = \"<STR_LIT>\".format(self.app_label, self.models_name)<EOL>self.models = import_module(self.model_name)<EOL>", "docstring": "Sets a number of commonly used attributes", "id": "f4350:c1:m3"}
{"signature": "def set_embedded_doc(self, document, form_key, current_key, remaining_key):", "body": "embedded_doc = getattr(document, current_key, False)<EOL>if not embedded_doc:<EOL><INDENT>embedded_doc = document._fields[current_key].document_type_obj()<EOL><DEDENT>new_key, new_remaining_key_array = trim_field_key(embedded_doc, remaining_key)<EOL>self.process_document(embedded_doc, form_key, make_key(new_key, new_remaining_key_array))<EOL>setattr(document, current_key, embedded_doc)<EOL>", "docstring": "Get the existing embedded document if it exists, else created it.", "id": "f4350:c2:m2"}
{"signature": "def set_mongoadmin(self):", "body": "if hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>return None<EOL><DEDENT>if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self.set_mongonaut_base()<EOL><DEDENT>for mongoadmin in self.get_mongoadmins():<EOL><INDENT>for model in mongoadmin['<STR_LIT>'].models:<EOL><INDENT>if model.name == self.document_name:<EOL><INDENT>self.mongoadmin = model.mongoadmin<EOL>break<EOL><DEDENT><DEDENT><DEDENT>if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>raise NoMongoAdminSpecified(\"<STR_LIT>\".format(self.app_label, self.document_name))<EOL><DEDENT>", "docstring": "Returns the MongoAdmin object for an app_label/document_name style view", "id": "f4350:c1:m4"}
{"signature": "def get_mongoadmins(self):", "body": "apps = []<EOL>for app_name in settings.INSTALLED_APPS:<EOL><INDENT>mongoadmin = \"<STR_LIT>\".format(app_name)<EOL>try:<EOL><INDENT>module = import_module(mongoadmin)<EOL><DEDENT>except ImportError as e:<EOL><INDENT>if str(e).startswith(\"<STR_LIT>\"):<EOL><INDENT>continue<EOL><DEDENT>raise e<EOL><DEDENT>app_store = AppStore(module)<EOL>apps.append(dict(<EOL>app_name=app_name,<EOL>obj=app_store<EOL>))<EOL><DEDENT>return apps<EOL>", "docstring": "Returns a list of all mongoadmin implementations for the site", "id": "f4350:c1:m2"}
{"signature": "def post(self, request, *args, **kwargs):", "body": "<EOL>form_class = self.get_form_class()<EOL>form = self.get_form(form_class)<EOL>mongo_ids = self.get_initial()['<STR_LIT>']<EOL>for form_mongo_id in form.data.getlist('<STR_LIT>'):<EOL><INDENT>for mongo_id in mongo_ids:<EOL><INDENT>if form_mongo_id == mongo_id:<EOL><INDENT>self.document.objects.get(pk=mongo_id).delete()<EOL><DEDENT><DEDENT><DEDENT>return self.form_invalid(form)<EOL>", "docstring": "Creates new mongoengine records.", "id": "f4352:c1:m4"}
{"signature": "@cached_property<EOL><INDENT>def get_queryset(self):<DEDENT>", "body": "if hasattr(self, \"<STR_LIT>\") and self.queryset:<EOL><INDENT>return self.queryset<EOL><DEDENT>self.set_mongonaut_base()<EOL>self.set_mongoadmin()<EOL>self.document = getattr(self.models, self.document_name)<EOL>queryset = self.document.objects.all()<EOL>if self.mongoadmin.ordering:<EOL><INDENT>queryset = queryset.order_by(*self.mongoadmin.ordering)<EOL><DEDENT>q = self.request.GET.get('<STR_LIT:q>')<EOL>queryset = self.get_qset(queryset, q)<EOL>try:<EOL><INDENT>self.page = int(self.request.GET.get('<STR_LIT>', '<STR_LIT:1>'))<EOL><DEDENT>except ValueError:<EOL><INDENT>self.page = <NUM_LIT:1><EOL><DEDENT>obj_count = queryset.count()<EOL>self.total_pages = math.ceil(obj_count / self.documents_per_page)<EOL>if self.page > self.total_pages:<EOL><INDENT>self.page = self.total_pages<EOL><DEDENT>if self.page < <NUM_LIT:1>:<EOL><INDENT>self.page = <NUM_LIT:1><EOL><DEDENT>start = (self.page - <NUM_LIT:1>) * self.documents_per_page<EOL>end = self.page * self.documents_per_page<EOL>queryset = queryset[start:end] if obj_count else queryset<EOL>self.queryset = queryset<EOL>return queryset<EOL>", "docstring": "Replicates Django CBV `get_queryset()` method, but for MongoEngine.", "id": "f4352:c1:m1"}
{"signature": "def get_context_data(self, **kwargs):", "body": "context = super(DocumentAddFormView, self).get_context_data(**kwargs)<EOL>self.set_mongoadmin()<EOL>context = self.set_permissions_in_context(context)<EOL>self.document_type = getattr(self.models, self.document_name)<EOL>context['<STR_LIT>'] = self.app_label<EOL>context['<STR_LIT>'] = self.document_name<EOL>context['<STR_LIT>'] = reverse('<STR_LIT>', args=[self.kwargs.get('<STR_LIT>'),<EOL>self.kwargs.get('<STR_LIT>')])<EOL>return context<EOL>", "docstring": "TODO - possibly inherit this from DocumentEditFormView. This is same thing minus:\n            self.ident = self.kwargs.get('id')\n            self.document = self.document_type.objects.get(pk=self.ident)", "id": "f4352:c4:m1"}
{"signature": "def get_qset(self, queryset, q):", "body": "if self.mongoadmin.search_fields and q:<EOL><INDENT>params = {}<EOL>for field in self.mongoadmin.search_fields:<EOL><INDENT>if field == '<STR_LIT:id>':<EOL><INDENT>if is_valid_object_id(q):<EOL><INDENT>return queryset.filter(pk=q)<EOL><DEDENT>continue<EOL><DEDENT>search_key = \"<STR_LIT>\".format(field=field)<EOL>params[search_key] = q<EOL><DEDENT>queryset = queryset.filter(**params)<EOL><DEDENT>return queryset<EOL>", "docstring": "Performs filtering against the default queryset returned by\n            mongoengine.", "id": "f4352:c1:m0"}
{"signature": "def has_add_permission(self, request):", "body": "return request.user.is_authenticated and request.user.is_active and request.user.is_staff<EOL>", "docstring": "Can add this object", "id": "f4353:c0:m2"}
{"signature": "def has_view_permission(self, request):", "body": "return request.user.is_authenticated and request.user.is_active<EOL>", "docstring": "Returns True if the given HttpRequest has permission to view\n*at least one* page in the mongonaut site.", "id": "f4353:c0:m0"}
{"signature": "def make_key(*args, **kwargs):", "body": "sep = kwargs.get('<STR_LIT>', u\"<STR_LIT:_>\")<EOL>exclude_last_string = kwargs.get('<STR_LIT>', False)<EOL>string_array = []<EOL>for arg in args:<EOL><INDENT>if isinstance(arg, list):<EOL><INDENT>string_array.append(six.text_type(sep.join(arg)))<EOL><DEDENT>else:<EOL><INDENT>if exclude_last_string:<EOL><INDENT>new_key_array = arg.split(sep)[:-<NUM_LIT:1>]<EOL>if len(new_key_array) > <NUM_LIT:0>:<EOL><INDENT>string_array.append(make_key(new_key_array))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>string_array.append(six.text_type(arg))<EOL><DEDENT><DEDENT><DEDENT>return sep.join(string_array)<EOL>", "docstring": "Given any number of lists and strings will join them in order as one\nstring separated by the sep kwarg.  sep defaults to u\"_\".\n\nAdd exclude_last_string=True as a kwarg to exclude the last item in a\ngiven string after being split by sep.  Note if you only have one word\nin your string you can end up getting an empty string.\n\nExample uses:\n\n>>> from mongonaut.forms.form_utils import make_key\n>>> make_key('hi', 'my', 'firend')\n>>> u'hi_my_firend'\n\n>>> make_key('hi', 'my', 'firend', sep='i')\n>>> 'hiimyifirend'\n\n>>> make_key('hi', 'my', 'firend',['this', 'be', 'what'], sep='i')\n>>> 'hiimyifirendithisibeiwhat'\n\n>>> make_key('hi', 'my', 'firend',['this', 'be', 'what'])\n>>> u'hi_my_firend_this_be_what'", "id": "f4354:m1"}
{"signature": "def create_list_dict(self, document, list_field, doc_key):", "body": "list_dict = {\"<STR_LIT>\": document}<EOL>if isinstance(list_field.field, EmbeddedDocumentField):<EOL><INDENT>list_dict.update(self.create_document_dictionary(document=list_field.field.document_type_obj,<EOL>owner_document=document))<EOL><DEDENT>list_dict.update({\"<STR_LIT>\": list_field.field,<EOL>\"<STR_LIT>\": doc_key,<EOL>\"<STR_LIT>\": ListField,<EOL>\"<STR_LIT>\": get_widget(list_field.field),<EOL>\"<STR_LIT>\": getattr(document, doc_key, None)})<EOL>return list_dict<EOL>", "docstring": "Genereates a dictionary representation of the list field. Document\nshould be the document the list_field comes from.\n\nDO NOT CALL DIRECTLY", "id": "f4355:c0:m5"}
{"signature": "def create_document_dictionary(self, document, document_key=None,<EOL>owner_document=None):", "body": "doc_dict = self.create_doc_dict(document, document_key, owner_document)<EOL>for doc_key, doc_field in doc_dict.items():<EOL><INDENT>if doc_key.startswith(\"<STR_LIT:_>\"):<EOL><INDENT>continue<EOL><DEDENT>if isinstance(doc_field, ListField):<EOL><INDENT>doc_dict[doc_key] = self.create_list_dict(document, doc_field, doc_key)<EOL><DEDENT>elif isinstance(doc_field, EmbeddedDocumentField):<EOL><INDENT>doc_dict[doc_key] = self.create_document_dictionary(doc_dict[doc_key].document_type_obj,<EOL>doc_key)<EOL><DEDENT>else:<EOL><INDENT>doc_dict[doc_key] = {\"<STR_LIT>\": document,<EOL>\"<STR_LIT>\": doc_key,<EOL>\"<STR_LIT>\": doc_field,<EOL>\"<STR_LIT>\": get_widget(doc_dict[doc_key], getattr(doc_field, '<STR_LIT>', False))}<EOL><DEDENT><DEDENT>return doc_dict<EOL>", "docstring": "Given document generates a dictionary representation of the document.\nIncludes the widget for each for each field in the document.", "id": "f4355:c0:m6"}
{"signature": "def get_document_unicode(document):", "body": "try:<EOL><INDENT>return document.__unicode__()<EOL><DEDENT>except AttributeError:<EOL><INDENT>return six.text_type(document)<EOL><DEDENT>", "docstring": "Safely converts MongoDB document strings to unicode.", "id": "f4358:m0"}
{"signature": "def set_form_fields(self, form_field_dict, parent_key=None, field_type=None):", "body": "for form_key, field_value in form_field_dict.items():<EOL><INDENT>form_key = make_key(parent_key, form_key) if parent_key is not None else form_key<EOL>if isinstance(field_value, tuple):<EOL><INDENT>set_list_class = False<EOL>base_key = form_key<EOL>if ListField in (field_value.field_type, field_type):<EOL><INDENT>if parent_key is None or ListField == field_value.field_type:<EOL><INDENT>if field_type != EmbeddedDocumentField:<EOL><INDENT>field_value.widget.attrs['<STR_LIT:class>'] += '<STR_LIT>'.format(form_key)<EOL><DEDENT>set_list_class = True<EOL><DEDENT>else:<EOL><INDENT>field_value.widget.attrs['<STR_LIT:class>'] += '<STR_LIT>'<EOL><DEDENT>list_keys = [field_key for field_key in self.form.fields.keys()<EOL>if has_digit(field_key)]<EOL>key_int = <NUM_LIT:0><EOL>while form_key in list_keys:<EOL><INDENT>key_int += <NUM_LIT:1><EOL><DEDENT>form_key = make_key(form_key, key_int)<EOL><DEDENT>if parent_key is not None:<EOL><INDENT>valid_base_keys = [model_key for model_key in self.model_map_dict.keys()<EOL>if not model_key.startswith(\"<STR_LIT:_>\")]<EOL>while base_key not in valid_base_keys and base_key:<EOL><INDENT>base_key = make_key(base_key, exclude_last_string=True)<EOL><DEDENT>embedded_key_class = None<EOL>if set_list_class:<EOL><INDENT>field_value.widget.attrs['<STR_LIT:class>'] += \"<STR_LIT>\".format(base_key)<EOL>embedded_key_class = make_key(field_key, exclude_last_string=True)<EOL><DEDENT>field_value.widget.attrs['<STR_LIT:class>'] += \"<STR_LIT>\"<EOL>if base_key == parent_key:<EOL><INDENT>field_value.widget.attrs['<STR_LIT:class>'] += '<STR_LIT>'.format(base_key)<EOL><DEDENT>else:<EOL><INDENT>field_value.widget.attrs['<STR_LIT:class>'] += '<STR_LIT>'.format(base_key, parent_key)<EOL><DEDENT>if embedded_key_class is not None:<EOL><INDENT>field_value.widget.attrs['<STR_LIT:class>'] += '<STR_LIT>'.format(embedded_key_class)<EOL><DEDENT><DEDENT>default_value = self.get_field_value(form_key)<EOL>if isinstance(default_value, list) and len(default_value) > <NUM_LIT:0>:<EOL><INDENT>key_index = int(form_key.split(\"<STR_LIT:_>\")[-<NUM_LIT:1>])<EOL>new_base_key = make_key(form_key, exclude_last_string=True)<EOL>for list_value in default_value:<EOL><INDENT>list_widget = deepcopy(field_value.widget)<EOL>new_key = make_key(new_base_key, six.text_type(key_index))<EOL>list_widget.attrs['<STR_LIT:class>'] += \"<STR_LIT>\".format(make_key(base_key, key_index))<EOL>self.set_form_field(list_widget, field_value.document_field, new_key, list_value)<EOL>key_index += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.set_form_field(field_value.widget, field_value.document_field,<EOL>form_key, default_value)<EOL><DEDENT><DEDENT>elif isinstance(field_value, dict):<EOL><INDENT>self.set_form_fields(field_value, form_key, field_value.get(\"<STR_LIT>\", None))<EOL><DEDENT><DEDENT>", "docstring": "Set the form fields for every key in the form_field_dict.\n\nParams:\n  form_field_dict -- a dictionary created by get_form_field_dict\n  parent_key -- the key for the previous key in the recursive call\n  field_type -- used to determine what kind of field we are setting", "id": "f4358:c0:m2"}
{"signature": "def is_valid_object_id(value):", "body": "try:<EOL><INDENT>OBJECT_ID.validate(value)<EOL>return True<EOL><DEDENT>except ValidationError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Validates BSON IDs using mongoengine's ObjectIdField field.", "id": "f4361:m0"}
{"signature": "def stats(self):", "body": "return {'<STR_LIT:size>': self._size, '<STR_LIT>': self._hits, '<STR_LIT>': self._misses}<EOL>", "docstring": ">>> c = LRUCache()\n>>> sorted(c.stats().keys())\n['hits', 'misses', 'size']", "id": "f4367:c0:m7"}
{"signature": "def delete(self, key):", "body": "(_value, mem) = LRUCache.get(self, key)<EOL>self._mem -= mem<EOL>LRUCache.delete(self, key)<EOL>", "docstring": ">>> c = MemSizeLRUCache()\n>>> c.put(1, 1)\n>>> c.mem()\n24\n>>> c.delete(1)\n>>> c.mem()\n0", "id": "f4367:c1:m4"}
{"signature": "def put(self, key, value):", "body": "mem = sys.getsizeof(value)<EOL>if self._mem + mem > self._maxmem:<EOL><INDENT>self.delete(self.last())<EOL><DEDENT>LRUCache.put(self, key, (value, mem))<EOL>self._mem += mem<EOL>", "docstring": ">>> c = MemSizeLRUCache(maxmem=24*4)\n>>> c.put(1, 1)\n>>> c.mem() # 24-bytes per integer\n24\n>>> c.put(2, 2)\n>>> c.put(3, 3)\n>>> c.put(4, 4)\n>>> c.get(1)\n1\n>>> c.mem()\n96\n>>> c.size()\n4\n>>> c.put(5, 5)\n>>> c.size()\n4\n>>> c.get(2)\nTraceback (most recent call last):\n    ...\nKeyError: 2", "id": "f4367:c1:m2"}
{"signature": "def put(self, key, value):", "body": "self._cache[key] = value<EOL>self._order.push(key)<EOL>self._size += <NUM_LIT:1><EOL>", "docstring": ">>> c = LRUCache()\n>>> c.put(1, 'one')\n>>> c.get(1)\n'one'\n>>> c.size()\n1\n>>> c.put(2, 'two')\n>>> c.put(3, 'three')\n>>> c.put(4, 'four')\n>>> c.put(5, 'five')\n>>> c.get(5)\n'five'\n>>> c.size()\n5", "id": "f4367:c0:m2"}
{"signature": "def delete(self, key):", "body": "del self._cache[key]<EOL>self._order.delete(key)<EOL>self._size -= <NUM_LIT:1><EOL>", "docstring": ">>> c = LRUCache()\n>>> c.put(1, 'one')\n>>> c.get(1)\n'one'\n>>> c.delete(1)\n>>> c.get(1)\nTraceback (most recent call last):\n    ...\nKeyError: 1\n>>> c.delete(1)\nTraceback (most recent call last):\n    ...\nKeyError: 1", "id": "f4367:c0:m3"}
{"signature": "def put(self, key, value):", "body": "<EOL>if self.size() == self._maxsize:<EOL><INDENT>self.delete(self.last())<EOL><DEDENT>LRUCache.put(self, key, value)<EOL>", "docstring": ">>> c = FixedSizeLRUCache(maxsize=5)\n>>> c.put(1, 'one')\n>>> c.get(1)\n'one'\n>>> c.size()\n1\n>>> c.put(2, 'two')\n>>> c.put(3, 'three')\n>>> c.put(4, 'four')\n>>> c.put(5, 'five')\n>>> c.get(5)\n'five'\n>>> c.size()\n5\n>>> c.put(6, 'six')\n>>> c.size()\n5\n>>> c.get(1)\nTraceback (most recent call last):\n    ...\nKeyError: 1\n>>> c.get(2)\n'two'\n>>> c.put(7, 'seven')\n>>> c.get(2)\n'two'\n>>> c.get(3)\nTraceback (most recent call last):\n    ...\nKeyError: 3", "id": "f4367:c2:m1"}
{"signature": "def get(self, key):", "body": "try:<EOL><INDENT>value = self._cache[key]<EOL>self._order.push(key)<EOL>self._hits += <NUM_LIT:1><EOL>return value<EOL><DEDENT>except KeyError as e:<EOL><INDENT>self._misses += <NUM_LIT:1><EOL>raise<EOL><DEDENT>", "docstring": ">>> c = LRUCache()\n>>> c.get('toto')\nTraceback (most recent call last):\n    ...\nKeyError: 'toto'\n>>> c.stats()['misses']\n1\n>>> c.put('toto', 'tata')\n>>> c.get('toto')\n'tata'\n>>> c.stats()['hits']\n1", "id": "f4367:c0:m1"}
{"signature": "def size(self):", "body": "return self._size<EOL>", "docstring": ">>> l = DLL()\n>>> l.size()\n0", "id": "f4370:c0:m7"}
{"signature": "def first(self):", "body": "return self._first<EOL>", "docstring": ">>> l = DLL()\n>>> l.first()", "id": "f4370:c0:m6"}
{"signature": "def delete(self, k):", "body": "self.deletenode(self._index[k])<EOL>", "docstring": ">>> l = DLL()\n>>> l.push(1)\n>>> l.delete(1)\n>>> l\n[]\n>>> l.push(1)\n>>> l.push(2)\n>>> l.push(3)\n>>> l\n[3, 2, 1]\n>>> l.delete(2)\n>>> l\n[3, 1]", "id": "f4370:c0:m2"}
{"signature": "def deletenode(self, node):", "body": "if self._last == node:<EOL><INDENT>self._last = node.previous<EOL><DEDENT>if self._first == node:<EOL><INDENT>self._first = node.next<EOL><DEDENT>node.pop()<EOL>del self._index[node.value]<EOL>self._size -= <NUM_LIT:1><EOL>", "docstring": ">>> l = DLL()\n>>> l.push(1)\n>>> l\n[1]\n>>> l.size()\n1\n>>> l.deletenode(l._first)\n>>> l\n[]\n>>> l.size()\n0\n>>> l._index\n{}\n>>> l._first", "id": "f4370:c0:m3"}
{"signature": "def push(self, k):", "body": "if not self._first:<EOL><INDENT>self._first = self._last = node = DLL.Node(k)<EOL><DEDENT>elif self._first.value == k:<EOL><INDENT>return<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>self.delete(k) <EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>self._first = node = self._first.insert_before(k)<EOL><DEDENT>self._index[k] = node<EOL>self._size += <NUM_LIT:1><EOL>", "docstring": "Push k to the top of the list\n\n        >>> l = DLL()\n        >>> l.push(1)\n        >>> l\n        [1]\n        >>> l.push(2)\n        >>> l\n        [2, 1]\n        >>> l.push(3)\n        >>> l\n        [3, 2, 1]", "id": "f4370:c0:m1"}
{"signature": "def pop(self):", "body": "k = self._last.value<EOL>self.deletenode(self._last)<EOL>return k<EOL>", "docstring": ">>> l = DLL()\n>>> l.push(1)\n>>> l.pop()\n1", "id": "f4370:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def _handle_array(toks):<DEDENT>", "body": "if len(toks) == <NUM_LIT:5> and toks[<NUM_LIT:1>] == '<STR_LIT:{>' and toks[<NUM_LIT:4>] == '<STR_LIT:}>':<EOL><INDENT>return strategies.dictionaries(<EOL>keys=toks[<NUM_LIT:2>], values=toks[<NUM_LIT:3>], max_size=<NUM_LIT:20>)<EOL><DEDENT>if len(toks) == <NUM_LIT:2>:<EOL><INDENT>return strategies.lists(elements=toks[<NUM_LIT:1>], max_size=<NUM_LIT:20>)<EOL><DEDENT>raise ValueError(\"<STR_LIT>\")<EOL>", "docstring": "Generate the correct strategy for an array signature.\n\n:param toks: the list of parsed tokens\n:returns: strategy that generates an array or dict as appropriate\n:rtype: strategy", "id": "f4371:c0:m0"}
{"signature": "def _descending(dbus_object):", "body": "<EOL>if isinstance(dbus_object, dbus.Dictionary):<EOL><INDENT>key_levels = [_descending(x) for x in dbus_object.keys()]<EOL>value_levels = [_descending(x) for x in dbus_object.values()]<EOL>if any(k is None for k in key_levels) orany(v is None for v in value_levels):<EOL><INDENT>return None<EOL><DEDENT>max_key_level = max(key_levels) if key_levels != [] else <NUM_LIT:0><EOL>max_value_level = max(value_levels) if value_levels != [] else <NUM_LIT:0><EOL>max_level = max(max_key_level, max_value_level)<EOL>variant_level = dbus_object.variant_level<EOL>if variant_level == <NUM_LIT:0>:<EOL><INDENT>return max_level<EOL><DEDENT>return None if variant_level < max_level + <NUM_LIT:1> else variant_level<EOL><DEDENT>if isinstance(dbus_object, (dbus.Array, dbus.Struct)):<EOL><INDENT>levels = [_descending(x) for x in dbus_object]<EOL>if any(l is None for l in levels):<EOL><INDENT>return None<EOL><DEDENT>max_level = max(levels) if levels != [] else <NUM_LIT:0><EOL>variant_level = dbus_object.variant_level<EOL>if variant_level == <NUM_LIT:0>:<EOL><INDENT>return max_level<EOL><DEDENT>return None if variant_level < max_level + <NUM_LIT:1> else variant_level<EOL><DEDENT>return dbus_object.variant_level<EOL>", "docstring": "Verify levels of variant values always descend.\n\n:param object dbus_object: a dbus object\n:returns: None if there was a failure of the property, otherwise the level\n:rtype: int or NoneType\n\nNone is a better choice than False, for 0, a valid variant level, is always\ninterpreted as False.", "id": "f4371:m0"}
{"signature": "def get_command(namespace):", "body": "cmd = [\"<STR_LIT>\", namespace.package] + arg_map[namespace.package]<EOL>if namespace.ignore:<EOL><INDENT>cmd.append(\"<STR_LIT>\" % namespace.ignore)<EOL><DEDENT>return cmd<EOL>", "docstring": "Get the pylint command for these arguments.\n\n:param `Namespace` namespace: the namespace", "id": "f4372:m1"}
{"signature": "@staticmethod<EOL><INDENT>def _handle_array(toks):<DEDENT>", "body": "if len(toks) == <NUM_LIT:5> and toks[<NUM_LIT:1>] == '<STR_LIT:{>' and toks[<NUM_LIT:4>] == '<STR_LIT:}>':<EOL><INDENT>subtree = toks[<NUM_LIT:2>:<NUM_LIT:4>]<EOL>signature = '<STR_LIT>'.join(s for (_, s) in subtree)<EOL>[key_func, value_func] = [f for (f, _) in subtree]<EOL>def the_dict_func(a_dict, variant=<NUM_LIT:0>):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>elements =[(key_func(x), value_func(y)) for (x, y) in a_dict.items()]<EOL>level = <NUM_LIT:0> if elements == []else max(max(x, y) for ((_, x), (_, y)) in elements)<EOL>(obj_level, func_level) =_ToDbusXformer._variant_levels(level, variant)<EOL>return (dbus.types.Dictionary(<EOL>((x, y) for ((x, _), (y, _)) in elements),<EOL>signature=signature,<EOL>variant_level=obj_level), func_level)<EOL><DEDENT>return (the_dict_func, '<STR_LIT>' + signature + '<STR_LIT:}>')<EOL><DEDENT>if len(toks) == <NUM_LIT:2>:<EOL><INDENT>(func, sig) = toks[<NUM_LIT:1>]<EOL>def the_array_func(a_list, variant=<NUM_LIT:0>):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if isinstance(a_list, dict):<EOL><INDENT>raise IntoDPValueError(a_list, \"<STR_LIT>\",<EOL>\"<STR_LIT>\")<EOL><DEDENT>elements = [func(x) for x in a_list]<EOL>level = <NUM_LIT:0> if elements == [] else max(x for (_, x) in elements)<EOL>(obj_level, func_level) =_ToDbusXformer._variant_levels(level, variant)<EOL>return (dbus.types.Array(<EOL>(x for (x, _) in elements),<EOL>signature=sig,<EOL>variant_level=obj_level), func_level)<EOL><DEDENT>return (the_array_func, '<STR_LIT:a>' + sig)<EOL><DEDENT>raise IntoDPValueError(toks, \"<STR_LIT>\",<EOL>\"<STR_LIT>\")<EOL>", "docstring": "Generate the correct function for an array signature.\n\n:param toks: the list of parsed tokens\n:returns: function that returns an Array or Dictionary value\n:rtype: ((or list dict) -> ((or Array Dictionary) * int)) * str", "id": "f4373:c0:m2"}
{"signature": "def _handle_variant(self):", "body": "def the_func(a_tuple, variant=<NUM_LIT:0>):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>(signature, an_obj) = a_tuple<EOL>(func, sig) = self.COMPLETE.parseString(signature)[<NUM_LIT:0>]<EOL>assert sig == signature<EOL>(xformed, _) = func(an_obj, variant=variant + <NUM_LIT:1>)<EOL>return (xformed, xformed.variant_level)<EOL><DEDENT>return (the_func, '<STR_LIT:v>')<EOL>", "docstring": "Generate the correct function for a variant signature.\n\n:returns: function that returns an appropriate value\n:rtype: ((str * object) or list)-> object", "id": "f4373:c0:m1"}
{"signature": "def _wrapper(func):", "body": "@functools.wraps(func)<EOL>def the_func(expr):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>try:<EOL><INDENT>return func(expr)<EOL><DEDENT>except (TypeError, ValueError) as err:<EOL><INDENT>raise IntoDPValueError(expr, \"<STR_LIT>\", \"<STR_LIT>\")from err<EOL><DEDENT><DEDENT>return the_func<EOL>", "docstring": "Wraps a generated function so that it catches all Type- and ValueErrors\nand raises IntoDPValueErrors.\n\n:param func: the transforming function", "id": "f4373:m0"}
{"signature": "@staticmethod<EOL><INDENT>def _handle_base_case(klass, symbol):<DEDENT>", "body": "def the_func(value, variant=<NUM_LIT:0>):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>(obj_level, func_level) = _ToDbusXformer._variant_levels(<EOL><NUM_LIT:0>, variant)<EOL>return (klass(value, variant_level=obj_level), func_level)<EOL><DEDENT>return lambda: (the_func, symbol)<EOL>", "docstring": "Handle a base case.\n\n:param type klass: the class constructor\n:param str symbol: the type code", "id": "f4373:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def _variant_levels(level, variant):<DEDENT>", "body": "return (level + variant, level + variant)if variant != <NUM_LIT:0> else (variant, level)<EOL>", "docstring": "Gets the level for the variant.\n\n:param int level: the current variant level\n:param int variant: the value for this level if variant\n\n:returns: a level for the object and one for the function\n:rtype: int * int", "id": "f4373:c0:m0"}
{"signature": "def __init__(self, value, param, msg=None):  ", "body": "<EOL>self._value = value<EOL>self._param = param<EOL>self._msg = msg<EOL>", "docstring": "Initializer.\n\n            :param object value: the value\n            :param str param: the parameter\n            :param str msg: an explanatory message", "id": "f4376:c1:m0"}
{"signature": "def read_file(self, changeset_file):", "body": "if isfile(changeset_file):<EOL><INDENT>self.filename = changeset_file<EOL><DEDENT>else:<EOL><INDENT>self.path = mkdtemp()<EOL>self.filename = join(self.path, basename(changeset_file))<EOL>download(changeset_file, self.path)<EOL><DEDENT>self.xml = ET.fromstring(gzip.open(self.filename).read())<EOL>if not isfile(changeset_file):<EOL><INDENT>rmtree(self.path)<EOL><DEDENT>", "docstring": "Download the replication changeset file or read it directly from the\n        filesystem (to test purposes).", "id": "f4379:c1:m1"}
{"signature": "def get_changeset(changeset):", "body": "url = '<STR_LIT>'.format(<EOL>changeset<EOL>)<EOL>return ET.fromstring(requests.get(url).content)<EOL>", "docstring": "Get the changeset using the OSM API and return the content as a XML\n    ElementTree.\n\n    Args:\n        changeset: the id of the changeset.", "id": "f4379:m2"}
{"signature": "def changeset_info(changeset):", "body": "keys = [tag.attrib.get('<STR_LIT:k>') for tag in changeset.getchildren()]<EOL>keys += ['<STR_LIT:id>', '<STR_LIT:user>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>values = [tag.attrib.get('<STR_LIT:v>') for tag in changeset.getchildren()]<EOL>values += [<EOL>changeset.get('<STR_LIT:id>'), changeset.get('<STR_LIT:user>'), changeset.get('<STR_LIT>'),<EOL>get_bounds(changeset), changeset.get('<STR_LIT>')<EOL>]<EOL>return dict(zip(keys, values))<EOL>", "docstring": "Return a dictionary with id, user, user_id, bounds, date of creation\n    and all the tags of the changeset.\n\n    Args:\n        changeset: the XML string of the changeset.", "id": "f4379:m1"}
{"signature": "def full_analysis(self):", "body": "self.count()<EOL>self.verify_words()<EOL>self.verify_user()<EOL>if self.review_requested == '<STR_LIT:yes>':<EOL><INDENT>self.label_suspicious('<STR_LIT>')<EOL><DEDENT>", "docstring": "Execute the count and verify_words methods.", "id": "f4379:c2:m3"}
{"signature": "def label_suspicious(self, reason):", "body": "self.suspicion_reasons.append(reason)<EOL>self.is_suspect = True<EOL>", "docstring": "Add suspicion reason and set the suspicious flag.", "id": "f4379:c2:m2"}
{"signature": "def verify_words(self):", "body": "if self.comment:<EOL><INDENT>if find_words(self.comment, self.suspect_words, self.excluded_words):<EOL><INDENT>self.label_suspicious('<STR_LIT>')<EOL><DEDENT><DEDENT>if self.source:<EOL><INDENT>for word in self.illegal_sources:<EOL><INDENT>if word in self.source.lower():<EOL><INDENT>self.label_suspicious('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT><DEDENT>if self.imagery_used:<EOL><INDENT>for word in self.illegal_sources:<EOL><INDENT>if word in self.imagery_used.lower():<EOL><INDENT>self.label_suspicious('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT><DEDENT>self.suspicion_reasons = list(set(self.suspicion_reasons))<EOL>", "docstring": "Verify the fields source, imagery_used and comment of the changeset\n        for some suspect words.", "id": "f4379:c2:m5"}
{"signature": "def verify_editor(self):", "body": "powerful_editors = [<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'<EOL>]<EOL>if self.editor is not None:<EOL><INDENT>for editor in powerful_editors:<EOL><INDENT>if editor in self.editor.lower():<EOL><INDENT>self.powerfull_editor = True<EOL>break<EOL><DEDENT><DEDENT>if '<STR_LIT>' in self.editor:<EOL><INDENT>trusted_hosts = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>]<EOL>if self.host.split('<STR_LIT>')[-<NUM_LIT:1>].strip('<STR_LIT:/>') not in trusted_hosts:<EOL><INDENT>self.label_suspicious('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.powerfull_editor = True<EOL>self.label_suspicious('<STR_LIT>')<EOL><DEDENT>", "docstring": "Verify if the software used in the changeset is a powerfull_editor.", "id": "f4379:c2:m6"}
{"signature": "@click.command('<STR_LIT>')<EOL>@click.argument('<STR_LIT:id>', type=int, metavar='<STR_LIT>')<EOL>def cli(id):", "body": "ch = Analyse(id)<EOL>ch.full_analysis()<EOL>click.echo(<EOL>'<STR_LIT>' % (ch.create, ch.modify, ch.delete)<EOL>)<EOL>if ch.is_suspect:<EOL><INDENT>click.echo('<STR_LIT>'.format(<EOL>id,<EOL>'<STR_LIT:U+002CU+0020>'.join(ch.suspicion_reasons)<EOL>))<EOL><DEDENT>else:<EOL><INDENT>click.echo('<STR_LIT>' % id)<EOL><DEDENT>", "docstring": "Analyse an OpenStreetMap changeset.", "id": "f4381:m0"}
{"signature": "def process_args():", "body": "import argparse<EOL>parser = argparse.ArgumentParser()<EOL>parser.add_argument('<STR_LIT>', nargs='<STR_LIT:*>', help='<STR_LIT>')<EOL>return parser.parse_args()<EOL>", "docstring": "Return processed arguments (options and positional arguments).", "id": "f4385:m4"}
{"signature": "def main():", "body": "return <NUM_LIT:0> if check(process_args()) else <NUM_LIT:1><EOL>", "docstring": "Run main.", "id": "f4385:m6"}
{"signature": "def version():", "body": "with open('<STR_LIT>') as input_file:<EOL><INDENT>for line in input_file:<EOL><INDENT>if line.startswith('<STR_LIT>'):<EOL><INDENT>return ast.parse(line).body[<NUM_LIT:0>].value.s<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Return version string.", "id": "f4387:m0"}
{"signature": "def untokenize(tokens):", "body": "text = '<STR_LIT>'<EOL>previous_line = '<STR_LIT>'<EOL>last_row = <NUM_LIT:0><EOL>last_column = -<NUM_LIT:1><EOL>last_non_whitespace_token_type = None<EOL>for (token_type, token_string, start, end, line) in tokens:<EOL><INDENT>if TOKENIZE_HAS_ENCODING and token_type == tokenize.ENCODING:<EOL><INDENT>continue<EOL><DEDENT>(start_row, start_column) = start<EOL>(end_row, end_column) = end<EOL>if (<EOL>last_non_whitespace_token_type != tokenize.COMMENT and<EOL>start_row > last_row and<EOL>previous_line.endswith(('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'))<EOL>):<EOL><INDENT>text += previous_line[len(previous_line.rstrip('<STR_LIT>')):]<EOL><DEDENT>if start_row > last_row:<EOL><INDENT>last_column = <NUM_LIT:0><EOL><DEDENT>if start_column > last_column:<EOL><INDENT>text += line[last_column:start_column]<EOL><DEDENT>text += token_string<EOL>previous_line = line<EOL>last_row = end_row<EOL>last_column = end_column<EOL>if token_type not in WHITESPACE_TOKENS:<EOL><INDENT>last_non_whitespace_token_type = token_type<EOL><DEDENT><DEDENT>return text<EOL>", "docstring": "Return source code based on tokens.\n\n    This is like tokenize.untokenize(), but it preserves spacing between\n    tokens. So if the original soure code had multiple spaces between\n    some tokens or if escaped newlines were used, those things will be\n    reflected by untokenize().", "id": "f4388:m0"}
{"signature": "def get(self, q=None, page=None):", "body": "<EOL>etag = generate_etag(current_ext.content_version.encode('<STR_LIT:utf8>'))<EOL>self.check_etag(etag, weak=True)<EOL>res = jsonify(current_ext.styles)<EOL>res.set_etag(etag)<EOL>return res<EOL>", "docstring": "Get styles.", "id": "f4394:c0:m0"}
{"signature": "def __init__(self, app=None):", "body": "if app:<EOL><INDENT>self._state = self.init_app(app)<EOL><DEDENT>", "docstring": "Extension initialization.", "id": "f4395:c1:m0"}
{"signature": "def init_app(self, app):", "body": "state = _InvenioCSLRESTState(app)<EOL>app.extensions['<STR_LIT>'] = state<EOL>return state<EOL>", "docstring": "Flask application initialization.", "id": "f4395:c1:m1"}
{"signature": "def element_or_none(self, using, value):", "body": "try:<EOL><INDENT>return self._execute(Command.FIND_ELEMENT, {<EOL>'<STR_LIT>': using,<EOL>'<STR_LIT:value>': value<EOL>})<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Check if an element in the current context.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            using(str): The element location strategy.\n            value(str): The value of the location strategy.\n\n        Returns:\n            Return Element if the element does exists and return None otherwise.\n\n        Raises:\n            WebDriverException.", "id": "f4426:c0:m54"}
{"signature": "def get_window_size(self, window_handle='<STR_LIT>'):", "body": "return self._execute(Command.GET_WINDOW_SIZE,<EOL>{'<STR_LIT>': window_handle})<EOL>", "docstring": "Gets the width and height of the current window.\n\n        Support:\n            Web(WebView)\n\n        Args:\n            window_handle(str): Identifier of window_handle,\n                default to 'current'.\n\n        Returns:\n            A dict contains width and height", "id": "f4426:c0:m21"}
{"signature": "def element(self, using, value):", "body": "return self._execute(Command.FIND_ELEMENT, {<EOL>'<STR_LIT>': using,<EOL>'<STR_LIT:value>': value<EOL>})<EOL>", "docstring": "Find an element in the current context.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            using(str): The element location strategy.\n            value(str): The value of the location strategy.\n\n        Returns:\n            WebElement Object.\n\n        Raises:\n            WebDriverException.", "id": "f4426:c0:m52"}
{"signature": "@fluent<EOL><INDENT>def set_window_size(self, width, height, window_handle='<STR_LIT>'):<DEDENT>", "body": "self._execute(Command.SET_WINDOW_SIZE, {<EOL>'<STR_LIT:width>': int(width),<EOL>'<STR_LIT>': int(height),<EOL>'<STR_LIT>': window_handle})<EOL>", "docstring": "Sets the width and height of the current window.\n\n        Support:\n            Web(WebView)\n\n        Args:\n            width(int): the width in pixels.\n            height(int): the height in pixels.\n            window_handle(str): Identifier of window_handle,\n                default to 'current'.\n\n        Returns:\n            WebDriver Object.", "id": "f4426:c0:m20"}
{"signature": "@property<EOL><INDENT>def source(self):<DEDENT>", "body": "return self._execute(Command.GET_PAGE_SOURCE)<EOL>", "docstring": "Gets the source of the current page.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Returns:\n            Return the source of the current page.", "id": "f4426:c0:m35"}
{"signature": "@fluent<EOL><INDENT>def add_cookie(self, cookie_dict):<DEDENT>", "body": "if not isinstance(cookie_dict, dict):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if not cookie_dict.get(<EOL>'<STR_LIT:name>', None<EOL>) or not cookie_dict.get(<EOL>'<STR_LIT:value>', None):<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>self._execute(Command.ADD_COOKIE, {'<STR_LIT>': cookie_dict})<EOL>", "docstring": "Set a cookie.\n\n        Support:\n            Web(WebView)\n\n        Args:\n            cookie_dict: A dictionary contain keys: \"name\", \"value\",\n                [\"path\"], [\"domain\"], [\"secure\"], [\"httpOnly\"], [\"expiry\"].\n\n        Returns:\n            WebElement Object.", "id": "f4426:c0:m42"}
{"signature": "@fluent<EOL><INDENT>def alert_keys(self, keys):<DEDENT>", "body": "self._execute(Command.SET_ALERT_VALUE, {<EOL>'<STR_LIT:text>': keys<EOL>})<EOL>", "docstring": "Sends keystrokes to a JavaScript prompt() dialog.\n\n        Support:\n            iOS\n\n        Args:\n            keys(str): The keys send to.", "id": "f4426:c0:m49"}
{"signature": "@property<EOL><INDENT>def contexts(self):<DEDENT>", "body": "return self._execute(Command.CONTEXT_HANDLES)<EOL>", "docstring": "returns a list of available contexts\n\n        Support:\n            Android iOS", "id": "f4426:c0:m25"}
{"signature": "def _unwrap_el(self, value):", "body": "if isinstance(value, dict) and '<STR_LIT>' in value:<EOL><INDENT>element_id = value.get('<STR_LIT>')<EOL>return WebElement(element_id, self)<EOL><DEDENT>elif isinstance(value, list) and not isinstance(value, str):<EOL><INDENT>return [self._unwrap_el(item) for item in value]<EOL><DEDENT>else:<EOL><INDENT>return value<EOL><DEDENT>", "docstring": "Convert {'Element': 1234} to WebElement Object\n\n        Args:\n            value(str|list|dict): The value field in the json response.\n\n        Returns:\n            The unwrapped value.", "id": "f4426:c0:m3"}
{"signature": "@fluent<EOL><INDENT>def switch_to_window(self, window_name):<DEDENT>", "body": "data = {<EOL>'<STR_LIT:name>': window_name<EOL>}<EOL>self._execute(Command.SWITCH_TO_WINDOW, data)<EOL>", "docstring": "Switch to the given window.\n\n        Support:\n            Web(WebView)\n\n        Args:\n            window_name(str): The window to change focus to.\n\n        Returns:\n            WebDriver Object.", "id": "f4426:c0:m17"}
{"signature": "@property<EOL><INDENT>def current_window_handle(self):<DEDENT>", "body": "return self._execute(Command.GET_CURRENT_WINDOW_HANDLE)<EOL>", "docstring": "Returns the handle of the current window.\n\n        Support:\n            Web(WebView)", "id": "f4426:c0:m15"}
{"signature": "@fluent<EOL><INDENT>def move_to(self, element, x=<NUM_LIT:0>, y=<NUM_LIT:0>):<DEDENT>", "body": "self._execute(Command.MOVE_TO, {<EOL>'<STR_LIT>': element.element_id,<EOL>'<STR_LIT:x>': x,<EOL>'<STR_LIT:y>': y<EOL>})<EOL>", "docstring": "Deprecated use element.touch('drag', { toX, toY, duration(s) }) instead.\n            Move the mouse by an offset of the specificed element.\n\n        Support:\n            Android\n\n        Args:\n            element(WebElement): WebElement Object.\n            x(float): X offset to move to, relative to the\n                      top-left corner of the element.\n            y(float): Y offset to move to, relative to the\n                      top-left corner of the element.\n\n        Returns:\n            WebDriver object.", "id": "f4426:c0:m27"}
{"signature": "@fluent<EOL><INDENT>def attach(self, session_id):<DEDENT>", "body": "self.session_id = session_id<EOL>", "docstring": "Attach to given Session.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            session_id(str): The given session ID\n\n        Returns:\n            WebDriver Object.", "id": "f4426:c0:m6"}
{"signature": "@fluent<EOL><INDENT>def tap(self, element):<DEDENT>", "body": "self._execute(Command.SINGLE_TAP, {<EOL>'<STR_LIT>': element.element_id,<EOL>})<EOL>", "docstring": "Deprecated use element.touch('tap') instead.\n            Single tap on the touch enabled device.\n\n        Support:\n            Android iOS\n\n        Args:\n            element(WebElement): WebElement Object to single tap on.\n\n        Returns:\n            WebDriver object.", "id": "f4426:c0:m29"}
{"signature": "def execute_script(self, script, *args):", "body": "return self._execute(Command.EXECUTE_SCRIPT, {<EOL>'<STR_LIT>': script,<EOL>'<STR_LIT:args>': list(args)})<EOL>", "docstring": "Execute JavaScript Synchronously in current context.\n\n        Support:\n            Web(WebView)\n\n        Args:\n            script: The JavaScript to execute.\n            *args: Arguments for your JavaScript.\n\n        Returns:\n            Returns the return value of the function.", "id": "f4426:c0:m36"}
{"signature": "@fluent<EOL><INDENT>def dismiss_alert(self):<DEDENT>", "body": "self._execute(Command.DISMISS_ALERT)<EOL>", "docstring": "Dismisses the alert available.\n\n        Support:\n            iOS", "id": "f4426:c0:m47"}
{"signature": "def element_if_exists(self, using, value):", "body": "try:<EOL><INDENT>self._execute(Command.FIND_ELEMENT, {<EOL>'<STR_LIT>': using,<EOL>'<STR_LIT:value>': value<EOL>})<EOL>return True<EOL><DEDENT>except:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Check if an element in the current context.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            using(str): The element location strategy.\n            value(str): The value of the location strategy.\n\n        Returns:\n            Return True if the element does exists and return False otherwise.\n\n        Raises:\n            WebDriverException.", "id": "f4426:c0:m53"}
{"signature": "def get_active_element(self):", "body": "return self._execute(Command.GET_ACTIVE_ELEMENT)<EOL>", "docstring": "Returns the active element in current context.\n\n        Support:\n            Web(WebView)", "id": "f4426:c0:m34"}
{"signature": "def take_screenshot(self):", "body": "return self._execute(Command.SCREENSHOT)<EOL>", "docstring": "Gets the screenshot of the current window\n           as a base64 encoded string.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Returns:\n            Base64 encoded string of the screenshot.", "id": "f4426:c0:m50"}
{"signature": "@fluent<EOL><INDENT>def close(self):<DEDENT>", "body": "self._execute(Command.CLOSE)<EOL>", "docstring": "Closes the current window.\n\n        Support:\n            Web(WebView)\n\n        Returns:\n            WebDriver Object.", "id": "f4426:c0:m16"}
{"signature": "@context.setter<EOL><INDENT>def context(self, new_context):<DEDENT>", "body": "self._execute(Command.SWITCH_TO_CONTEXT, {\"<STR_LIT:name>\": new_context})<EOL>", "docstring": "sets the current context\n\n        Support:\n            Android iOS", "id": "f4426:c0:m26"}
{"signature": "@fluent<EOL><INDENT>def init(self):<DEDENT>", "body": "resp = self._execute(Command.NEW_SESSION, {<EOL>'<STR_LIT>': self.desired_capabilities<EOL>}, False)<EOL>resp.raise_for_status()<EOL>self.session_id = str(resp.session_id)<EOL>self.capabilities = resp.value<EOL>", "docstring": "Create Session by desiredCapabilities\n\n        Support:\n            Android iOS Web(WebView)\n\n        Returns:\n            WebDriver Object.", "id": "f4426:c0:m7"}
{"signature": "@property<EOL><INDENT>def sessions(self):<DEDENT>", "body": "return self._execute(Command.GET_ALL_SESSIONS)<EOL>", "docstring": "Gets all the sessions of the webdriver server.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Returns:\n            Return the URL of the current page.", "id": "f4426:c0:m5"}
{"signature": "def wait_for_element(<EOL>self, using, value, timeout=<NUM_LIT>,<EOL>interval=<NUM_LIT:1000>, asserter=is_displayed):", "body": "if not callable(asserter):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>@retry(<EOL>retry_on_exception=lambda ex: isinstance(ex, WebDriverException),<EOL>stop_max_delay=timeout,<EOL>wait_fixed=interval<EOL>)<EOL>def _wait_for_element(ctx, using, value):<EOL><INDENT>el = ctx.element(using, value)<EOL>asserter(el)<EOL>return el<EOL><DEDENT>return _wait_for_element(self, using, value)<EOL>", "docstring": "Wait for element till satisfy the given condition\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            using(str): The element location strategy.\n            value(str): The value of the location strategy.\n            timeout(int): How long we should be retrying stuff.\n            interval(int): How long between retries.\n            asserter(callable): The asserter func to determine the result.\n\n        Returns:\n            Return the Element.\n\n        Raises:\n            WebDriverException.", "id": "f4426:c0:m57"}
{"signature": "@property<EOL><INDENT>def current_url(self):<DEDENT>", "body": "return self._execute(Command.GET_CURRENT_URL)<EOL>", "docstring": "Gets the URL of the current page.\n\n        Support:\n            Web(WebView)\n\n        Returns:\n            Return the URL of the current page.", "id": "f4426:c0:m10"}
{"signature": "@fluent<EOL><INDENT>def maximize_window(self):<DEDENT>", "body": "self._execute(Command.MAXIMIZE_WINDOW,<EOL>{\"<STR_LIT>\": \"<STR_LIT>\"})<EOL>", "docstring": "Maximizes the current window.\n\n        Support:\n            Web(WebView)\n\n        Returns:\n            WebDriver Object.", "id": "f4426:c0:m19"}
{"signature": "@property<EOL><INDENT>def window_handles(self):<DEDENT>", "body": "return self._execute(Command.GET_WINDOW_HANDLES)<EOL>", "docstring": "Returns the handles of all windows within the current session.\n\n        Support:\n            Web(WebView)", "id": "f4426:c0:m18"}
{"signature": "def wait_for_elements(<EOL>self, using, value, timeout=<NUM_LIT>,<EOL>interval=<NUM_LIT:1000>, asserter=is_displayed):", "body": "if not callable(asserter):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>@retry(<EOL>retry_on_exception=lambda ex: isinstance(ex, WebDriverException),<EOL>stop_max_delay=timeout,<EOL>wait_fixed=interval<EOL>)<EOL>def _wait_for_elements(ctx, using, value):<EOL><INDENT>els = ctx.elements(using, value)<EOL>if not len(els):<EOL><INDENT>raise WebDriverException('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>el = els[<NUM_LIT:0>]<EOL>asserter(el)<EOL>return els<EOL><DEDENT><DEDENT>return _wait_for_elements(self, using, value)<EOL>", "docstring": "Wait for elements till satisfy the given condition\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            using(str): The element location strategy.\n            value(str): The value of the location strategy.\n            timeout(int): How long we should be retrying stuff.\n            interval(int): How long between retries.\n            asserter(callable): The asserter func to determine the result.\n\n        Returns:\n            Return the list of Element if any of them satisfy the condition.\n\n        Raises:\n            WebDriverException.", "id": "f4426:c0:m58"}
{"signature": "@fluent<EOL><INDENT>def send_keys(self, value):<DEDENT>", "body": "self._execute(Command.SEND_KEYS_TO_ACTIVE_ELEMENT, {<EOL>'<STR_LIT:value>': value_to_key_strokes(value)<EOL>})<EOL>", "docstring": "Send a sequence of key strokes.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            value(str|int|list): value can be a string,\n              int or a list contains defined Keys.", "id": "f4426:c0:m31"}
{"signature": "@fluent<EOL><INDENT>def get(self, url):<DEDENT>", "body": "self._execute(Command.GET, {'<STR_LIT:url>': url})<EOL>", "docstring": "Loads a web page in the current browser session.\n\n        Support:\n            Web(WebView)\n\n        Args:\n            url(str): The URL to navigate to.\n\n        Returns:\n            WebDriver Object.\n\n        Raises:\n            WebDriverException.", "id": "f4426:c0:m9"}
{"signature": "def _wrap_el(self, value):", "body": "if isinstance(value, dict):<EOL><INDENT>return {k: self._wrap_el(v) for k, v in value.items()}<EOL><DEDENT>elif isinstance(value, WebElement):<EOL><INDENT>return {'<STR_LIT>': value.element_id}<EOL><DEDENT>elif isinstance(value, list) and not isinstance(value, str):<EOL><INDENT>return [self._wrap_el(item) for item in value]<EOL><DEDENT>else:<EOL><INDENT>return value<EOL><DEDENT>", "docstring": "Convert WebElement Object to {'Element': 1234}\n\n        Args:\n            value(str|list|dict): The local value.\n\n        Returns:\n            The wrapped value.", "id": "f4426:c0:m4"}
{"signature": "@fluent<EOL><INDENT>def execute_async_script(self, script, *args):<DEDENT>", "body": "return self._execute(Command.EXECUTE_ASYNC_SCRIPT, {<EOL>'<STR_LIT>': script,<EOL>'<STR_LIT:args>': list(args)})<EOL>", "docstring": "Execute JavaScript Asynchronously in current context.\n\n        Support:\n            Web(WebView)\n\n        Args:\n            script: The JavaScript to execute.\n            *args: Arguments for your JavaScript.\n\n        Returns:\n            Returns the return value of the function.", "id": "f4426:c0:m37"}
{"signature": "def get_window_position(self, window_handle='<STR_LIT>'):", "body": "return self._execute(Command.GET_WINDOW_POSITION, {<EOL>'<STR_LIT>': window_handle})<EOL>", "docstring": "Gets the x,y position of the current window.\n\n        Support:\n            Web(WebView)\n\n        Args:\n            window_handle(str): Identifier of window_handle,\n                default to 'current'.\n\n        Usage:\n            driver.get_window_position()", "id": "f4426:c0:m23"}
{"signature": "def format_map(self, format_string, mapping):", "body": "return self.vformat(format_string, args=None, kwargs=mapping)<EOL>", "docstring": "format a string by a map\n\n        Args:\n            format_string(str): A format string\n            mapping(dict): A map to format the string\n\n        Returns:\n            A formatted string.\n\n        Raises:\n            KeyError: if key is not provided by the given map.", "id": "f4428:c0:m3"}
{"signature": "def add_element_extension_method(Klass):", "body": "def add_element_method(Klass, using):<EOL><INDENT>locator = using.name.lower()<EOL>find_element_name = \"<STR_LIT>\" + locator<EOL>find_element_if_exists_name = \"<STR_LIT>\" + locator + \"<STR_LIT>\"<EOL>find_element_or_none_name = \"<STR_LIT>\" + locator + \"<STR_LIT>\"<EOL>wait_for_element_name = \"<STR_LIT>\" + locator<EOL>find_elements_name = \"<STR_LIT>\" + locator<EOL>wait_for_elements_name = \"<STR_LIT>\" + locator<EOL>def find_element(self, value):<EOL><INDENT>return self.element(using.value, value)<EOL><DEDENT>find_element.__name__ = find_element_name<EOL>find_element.__doc__ = (<EOL>\"<STR_LIT>\".format(using.value) +<EOL>\"<STR_LIT>\"<EOL>)<EOL>def find_element_if_exists(self, value):<EOL><INDENT>return self.element_if_exists(using.value, value)<EOL><DEDENT>find_element_if_exists.__name__ = find_element_if_exists_name<EOL>find_element_if_exists.__doc__ = (<EOL>\"<STR_LIT>\".format(using.value) +<EOL>\"<STR_LIT>\"<EOL>)<EOL>def find_element_or_none(self, value):<EOL><INDENT>return self.element_or_none(using.value, value)<EOL><DEDENT>find_element_or_none.__name__ = find_element_or_none_name<EOL>find_element_or_none.__doc__ = (<EOL>\"<STR_LIT>\".format(using.value) +<EOL>\"<STR_LIT>\"<EOL>)<EOL>def wait_for_element_by(self, *args, **kwargs):<EOL><INDENT>return self.wait_for_element(using.value, *args, **kwargs)<EOL><DEDENT>wait_for_element_by.__name__ = wait_for_element_name<EOL>wait_for_element_by.__doc__ = (<EOL>\"<STR_LIT>\".format(using.value) +<EOL>\"<STR_LIT>\"<EOL>)<EOL>def find_elements(self, value):<EOL><INDENT>return self.elements(using.value, value)<EOL><DEDENT>find_elements.__name__ = find_elements_name<EOL>find_elements.__doc__ = (<EOL>\"<STR_LIT>\".format(using.value) +<EOL>\"<STR_LIT>\"<EOL>)<EOL>def wait_for_elements_available(self, *args, **kwargs):<EOL><INDENT>return self.wait_for_elements(using.value, *args, **kwargs)<EOL><DEDENT>wait_for_elements_available.__name__ = wait_for_elements_name<EOL>wait_for_elements_available.__doc__ = (<EOL>\"<STR_LIT>\".format(using.value) +<EOL>\"<STR_LIT>\"<EOL>)<EOL>setattr(Klass, find_element_name, find_element)<EOL>setattr(Klass, find_element_if_exists_name, find_element_if_exists)<EOL>setattr(Klass, find_element_or_none_name, find_element_or_none)<EOL>setattr(Klass, wait_for_element_name, wait_for_element_by)<EOL>setattr(Klass, find_elements_name, find_elements)<EOL>setattr(Klass, wait_for_elements_name, wait_for_elements_available)<EOL><DEDENT>for locator in iter(Locator):<EOL><INDENT>add_element_method(Klass, locator)<EOL><DEDENT>", "docstring": "Add element_by alias and extension' methods(if_exists/or_none).", "id": "f4428:m0"}
{"signature": "def fluent(func):", "body": "@wraps(func)<EOL>def fluent_interface(instance, *args, **kwargs):<EOL><INDENT>ret = func(instance, *args, **kwargs)<EOL>if ret is not None:<EOL><INDENT>return ret<EOL><DEDENT>return instance<EOL><DEDENT>return fluent_interface<EOL>", "docstring": "Fluent interface decorator to return self if method return None.", "id": "f4428:m1"}
{"signature": "def value_to_single_key_strokes(value):", "body": "result = []<EOL>if isinstance(value, Integral):<EOL><INDENT>value = str(value)<EOL><DEDENT>for v in value:<EOL><INDENT>if isinstance(v, Keys):<EOL><INDENT>result.append(v.value)<EOL><DEDENT>elif isinstance(v, Integral):<EOL><INDENT>result.append(str(v))<EOL><DEDENT>else:<EOL><INDENT>result.append(v)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Convert value to a list of key strokes\n    >>> value_to_single_key_strokes(123)\n    ['1', '2', '3']\n    >>> value_to_single_key_strokes('123')\n    ['1', '2', '3']\n    >>> value_to_single_key_strokes([1, 2, 3])\n    ['1', '2', '3']\n    >>> value_to_single_key_strokes(['1', '2', '3'])\n    ['1', '2', '3']\n    Args:\n        value(int|str|list)\n    Returns:\n        A list of string.", "id": "f4428:m3"}
{"signature": "def get_used_kwargs(self):", "body": "return self._used_kwargs<EOL>", "docstring": "Get used kwargs after formatting.", "id": "f4428:c0:m4"}
{"signature": "def __init__(self, error=None, message=None, screen=None, stacktrace=None):", "body": "self.error = error<EOL>self.message = message<EOL>self.screen = screen<EOL>self.stacktrace = stacktrace<EOL>", "docstring": "Initialize the WebDriverException", "id": "f4429:c1:m0"}
{"signature": "@property<EOL><INDENT>def text(self):<DEDENT>", "body": "return self._execute(Command.GET_ELEMENT_TEXT)<EOL>", "docstring": "Return the text of the element.\n           This is equivalent to calling element.innerText.\n\n        Support:\n            Android iOS Web(WebView)", "id": "f4435:c0:m19"}
{"signature": "@fluent<EOL><INDENT>def tap(self):<DEDENT>", "body": "self._driver.tap(self)<EOL>", "docstring": "Deprecated use touch('tap') instead.\n            Single tap on the touch enabled device.\n\n        Support:\n            Android iOS\n\n        Args:\n            element(WebElement): WebElement Object to single tap on.\n\n        Returns:\n            WebElement object.", "id": "f4435:c0:m29"}
{"signature": "@fluent<EOL><INDENT>def touch(self, name, args=None):<DEDENT>", "body": "if isinstance(name, list) and not isinstance(name, str):<EOL><INDENT>for obj in name:<EOL><INDENT>obj['<STR_LIT>'] = self.element_id<EOL><DEDENT>actions = name<EOL><DEDENT>elif isinstance(name, str):<EOL><INDENT>if not args:<EOL><INDENT>args = {}<EOL><DEDENT>args['<STR_LIT:type>'] = name<EOL>args['<STR_LIT>'] = self.element_id<EOL>actions = [args]<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self._driver._execute(Command.PERFORM_ACTIONS, {<EOL>'<STR_LIT>': actions<EOL>})<EOL>", "docstring": "Apply touch actions on devices. Such as, tap/doubleTap/press/pinch/rotate/drag.\n            See more on https://github.com/alibaba/macaca/issues/366.\n\n        Support:\n            Android iOS\n\n        Args:\n            name(str): Name of the action\n            args(dict): Arguments of the action\n\n        Returns:\n            WebDriver Object.\n\n        Raises:\n            WebDriverException.", "id": "f4435:c0:m32"}
{"signature": "@fluent<EOL><INDENT>def save_screenshot(self, filename, quietly = False):<DEDENT>", "body": "imgData = self.take_screenshot()<EOL>try:<EOL><INDENT>with open(filename, \"<STR_LIT:wb>\") as f:<EOL><INDENT>f.write(b64decode(imgData.encode('<STR_LIT:ascii>')))<EOL><DEDENT><DEDENT>except IOError as err:<EOL><INDENT>if not quietly:<EOL><INDENT>raise err<EOL><DEDENT><DEDENT>", "docstring": "Save the screenshot to local.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            filename(str): The path to save the image.\n            quietly(bool): If True, omit the IOError when\n                failed to save the image.\n\n        Returns:\n            WebElement Object.\n\n        Raises:\n            WebDriverException.\n            IOError.", "id": "f4435:c0:m31"}
{"signature": "@fluent<EOL><INDENT>def send_keys(self, value):<DEDENT>", "body": "self._execute(Command.SEND_KEYS_TO_ELEMENT, {<EOL>'<STR_LIT:value>': value_to_key_strokes(value)})<EOL>", "docstring": "Send a sequence of key strokes to an element.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            value(str|int|list): value can be a string,\n              int or a list contains defined Keys.", "id": "f4435:c0:m26"}
{"signature": "def wait_for(<EOL>self, timeout=<NUM_LIT>, interval=<NUM_LIT:1000>,<EOL>asserter=lambda x: x):", "body": "if not callable(asserter):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>@retry(<EOL>retry_on_exception=lambda ex: isinstance(ex, WebDriverException),<EOL>stop_max_delay=timeout,<EOL>wait_fixed=interval<EOL>)<EOL>def _wait_for(el):<EOL><INDENT>asserter(el)<EOL>return el<EOL><DEDENT>return _wait_for(self)<EOL>", "docstring": "Wait for element till given condition\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            timeout(int): How long we should be retrying stuff.\n            interval(int): How long between retries.\n            asserter(callable): The asserter func to determine the result.\n\n        Returns:\n            Return the Element.\n\n        Raises:\n            WebDriverException.", "id": "f4435:c0:m11"}
{"signature": "@fluent<EOL><INDENT>def clear(self):<DEDENT>", "body": "self._execute(Command.CLEAR_ELEMENT)<EOL>", "docstring": "The Element Clear command scrolls into view\n           a submittable element excluding buttons or\n           editable element, and then attempts to clear\n           its value, checkedness, or text content.\n\n        Support:\n            Android iOS Web(WebView)", "id": "f4435:c0:m24"}
{"signature": "@property<EOL><INDENT>def size(self):<DEDENT>", "body": "return self._execute(Command.GET_ELEMENT_SIZE)<EOL>", "docstring": "The size of the given web element in pixels.\n\n        Support:\n            Web(WebView)\n\n        Returns:\n            A dict contains:\n            height(float): Height of the web element's bounding rectangle.\n            width(float): Width of the web element's bounding rectangle.", "id": "f4435:c0:m22"}
{"signature": "def is_displayed(self):", "body": "return self._execute(Command.IS_ELEMENT_DISPLAYED)<EOL>", "docstring": "Whether the element is visible.\n\n        Support:\n            Android Web(WebView)", "id": "f4435:c0:m14"}
{"signature": "def take_screenshot(self):", "body": "return self._execute(Command.ELEMENT_SCREENSHOT)<EOL>", "docstring": "Gets the screenshot of the current element\n           as a base64 encoded string.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Returns:\n            Base64 encoded string of the screenshot.", "id": "f4435:c0:m30"}
{"signature": "def elements(self, using, value):", "body": "return self._execute(Command.FIND_CHILD_ELEMENTS, {<EOL>'<STR_LIT>': using,<EOL>'<STR_LIT:value>': value<EOL>})<EOL>", "docstring": "find elements in the current element.\n\n        Support:\n            Android iOS Web(WebView)\n\n        Args:\n            using(str): The element location strategy.\n            value(str): The value of the location strategy.\n\n        Returns:\n            Return a List<Element | None>, if no element matched, the list is empty.\n\n        Raises:\n            WebDriverException.", "id": "f4435:c0:m10"}
{"signature": "@fluent<EOL><INDENT>def click(self):<DEDENT>", "body": "self._execute(Command.CLICK_ELEMENT)<EOL>", "docstring": "The Element Click command scrolls into view\n           the element and then attempts to click the\n           centre of the visible area of the first element\n           of the DOMRect sequence. In case the element is\n           not displayed, an element not visible error is returned.\n\n        Support:\n            Android iOS Web(WebView)", "id": "f4435:c0:m23"}
{"signature": "def execute(self, command, data={}):", "body": "method, uri = command<EOL>try:<EOL><INDENT>path = self._formatter.format_map(uri, data)<EOL>body = self._formatter.get_unused_kwargs()<EOL>url = \"<STR_LIT>\".format(self._url, path)<EOL>return self._request(method, url, body)<EOL><DEDENT>except KeyError as err:<EOL><INDENT>LOGGER.debug(<EOL>'<STR_LIT>'.format(uri, err))<EOL>raise<EOL><DEDENT>", "docstring": "Format the endpoint url by data and then request the remote server.\n\n        Args:\n            command(Command): WebDriver command to be executed.\n            data(dict): Data fulfill the uri template and json body.\n\n        Returns:\n            A dict represent the json body from server response.\n\n        Raises:\n            KeyError: Data cannot fulfill the variable which command needed.\n            ConnectionError: Meet network problem (e.g. DNS failure,\n                refused connection, etc).\n            Timeout: A request times out.\n            HTTPError: HTTP request returned an unsuccessful status code.", "id": "f4436:c0:m1"}
{"signature": "def __init__(self, url='<STR_LIT>'):", "body": "self._timeout = None<EOL>if isinstance(url, str):<EOL><INDENT>parsed_url = urlparse(url)<EOL>scheme = parsed_url.scheme<EOL>netloc = parsed_url.netloc<EOL>if not scheme or not netloc:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(url))<EOL><DEDENT>elif scheme not in ('<STR_LIT:http>', '<STR_LIT>'):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(url, scheme))<EOL><DEDENT>else:<EOL><INDENT>self._url = url<EOL><DEDENT><DEDENT>elif isinstance(url, dict):<EOL><INDENT>scheme = url.get('<STR_LIT>', None)or url.get('<STR_LIT>', None)or '<STR_LIT:http>'<EOL>if scheme not in ('<STR_LIT:http>', '<STR_LIT>'):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(scheme))<EOL><DEDENT>hostname = url.get('<STR_LIT>', '<STR_LIT:127.0.0.1>')<EOL>port = url.get('<STR_LIT:port>', <NUM_LIT>)<EOL>path = url.get('<STR_LIT:path>', '<STR_LIT>')<EOL>username = url.get('<STR_LIT:username>', None)<EOL>password = url.get('<STR_LIT:password>', None)<EOL>if username and password:<EOL><INDENT>netloc = '<STR_LIT>'.format(<EOL>username, password, hostname, port)<EOL><DEDENT>else:<EOL><INDENT>netloc = '<STR_LIT>'.format(hostname, port)<EOL><DEDENT>self._url = urlunparse((scheme, netloc, path, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>'.format(url))<EOL><DEDENT>self._formatter = MemorizeFormatter()<EOL>", "docstring": "Init the RemoteInvoker by remote url\n\n        Args:\n            url(str|dict): The url of remote server.\n        Defaults:\n            if url is str:\n                url = http://127.0.0.1:3456/wd/hub\n            if url is dict:\n                url = {\n                    'protocol': 'http',\n                    'hostname': '127.0.0.1',\n                    'port': 3456,\n                    'path': '/wd/hub'\n                }\n        Examples:\n            r = RemoteInvoker('http://127.0.0.1:3456/wd/hub')\n            r = RemoteInvoker({\n                'protocol': 'https',\n                'hostname': '127.0.0.1',\n                'port': 5678,\n                'username': 'macaca',\n                'password': '123456',\n                'path': '/how/r/u'\n            }) => \"https://macaca:123456@127.0.0.1:5678/how/r/u\"", "id": "f4436:c0:m0"}
{"signature": "def _request(self, method, url, body):", "body": "if method != '<STR_LIT:POST>' and method != '<STR_LIT>':<EOL><INDENT>body = None<EOL><DEDENT>s = Session()<EOL>LOGGER.debug(<EOL>'<STR_LIT>'.format(method, url, body))<EOL>req = Request(method, url, json=body)<EOL>prepped = s.prepare_request(req)<EOL>res = s.send(prepped, timeout=self._timeout or None)<EOL>res.raise_for_status()<EOL>return res.json()<EOL>", "docstring": "Internal method to send request to the remote server.\n\n        Args:\n            method(str): HTTP Method(GET/POST/PUT/DELET/HEAD).\n            url(str): The request url.\n            body(dict): The JSON object to be sent.\n\n        Returns:\n            A dict represent the json body from server response.\n\n        Raises:\n            ConnectionError: Meet network problem (e.g. DNS failure,\n                refused connection, etc).\n            Timeout: A request times out.\n            HTTPError: HTTP request returned an unsuccessful status code.", "id": "f4436:c0:m2"}
{"signature": "@property<EOL><INDENT>def stitched(self):<DEDENT>", "body": "return glob(_pattern(self.path, '<STR_LIT>'))<EOL>", "docstring": "List of stitched images if they are in experiment folder.", "id": "f4440:c0:m5"}
{"signature": "def field_metadata(self, well_row=<NUM_LIT:0>, well_column=<NUM_LIT:0>,<EOL>field_row=<NUM_LIT:0>, field_column=<NUM_LIT:0>):", "body": "def condition(path):<EOL><INDENT>attrs = attributes(path)<EOL>return (attrs.u == well_column and attrs.v == well_row<EOL>and attrs.x == field_column and attrs.y == field_row)<EOL><DEDENT>field = [f for f in self.fields if condition(f)]<EOL>if field:<EOL><INDENT>field = field[<NUM_LIT:0>]<EOL>filename = _pattern(field, '<STR_LIT>',<EOL>_image, extension='<STR_LIT>')<EOL>filename = glob(filename)[<NUM_LIT:0>] <EOL>return objectify.parse(filename).getroot()<EOL><DEDENT>", "docstring": "Get OME-XML metadata of given field.\n\n        Parameters\n        ----------\n        well_row : int\n            Y well coordinate. Same as --V in files.\n        well_column : int\n            X well coordinate. Same as --U in files.\n        field_row : int\n            Y field coordinate. Same as --Y in files.\n        field_column : int\n            X field coordinate. Same as --X in files.\n\n        Returns\n        -------\n        lxml.objectify.ObjectifiedElement\n            lxml object of OME-XML found in slide/chamber/field/metadata.", "id": "f4440:c0:m17"}
{"signature": "@property<EOL><INDENT>def fields(self):<DEDENT>", "body": "return glob(self._field_path)<EOL>", "docstring": "List of paths to fields.", "id": "f4440:c0:m3"}
{"signature": "@property<EOL><INDENT>def wells(self):<DEDENT>", "body": "return glob(self._well_path)<EOL>", "docstring": "List of paths to wells.", "id": "f4440:c0:m2"}
{"signature": "def well_images(self, well_row, well_column):", "body": "return list(i for i in self.images<EOL>if attribute(i, '<STR_LIT:u>') == well_column and<EOL>attribute(i, '<STR_LIT:v>') == well_row)<EOL>", "docstring": "Get list of paths to images in specified well.\n\n\n        Parameters\n        ----------\n        well_row : int\n            Starts at 0. Same as --V in files.\n        well_column : int\n            Starts at 0. Save as --U in files.\n\n        Returns\n        -------\n        list of strings\n            Paths to images or empty list if no images are found.", "id": "f4440:c0:m12"}
{"signature": "def decompress(images, delete_png=False, delete_json=False, folder=None):", "body": "if type(images) == str:<EOL><INDENT>return decompress([images])<EOL><DEDENT>filenames = copy(images) <EOL>decompressed_images = []<EOL>for orig_filename in filenames:<EOL><INDENT>debug('<STR_LIT>'.format(orig_filename))<EOL>try:<EOL><INDENT>filename, extension = os.path.splitext(orig_filename)<EOL>if folder:<EOL><INDENT>basename = os.path.basename(filename)<EOL>new_filename = os.path.join(folder, basename + '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>new_filename = filename + '<STR_LIT>'<EOL><DEDENT>if os.path.isfile(new_filename):<EOL><INDENT>decompressed_images.append(new_filename)<EOL>msg = \"<STR_LIT>\"\"<STR_LIT>\".format(orig_filename)<EOL>raise AssertionError(msg)<EOL><DEDENT>if extension != '<STR_LIT>':<EOL><INDENT>msg = \"<STR_LIT>\"\"<STR_LIT>\".format(orig_filename)<EOL>raise AssertionError(msg)<EOL><DEDENT>img = Image.open(orig_filename)<EOL>img.load() <EOL>info = {}<EOL>with open(filename + '<STR_LIT>', '<STR_LIT:r>') as f:<EOL><INDENT>tags = json.load(f)<EOL>for tag,val in tags.items():<EOL><INDENT>if tag == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>if type(val) == list:<EOL><INDENT>val = tuple(val)<EOL><DEDENT>if type(val[<NUM_LIT:0>]) == list:<EOL><INDENT>val = tuple(tuple(x) for x in val)<EOL><DEDENT>info[int(tag)] = val<EOL><DEDENT><DEDENT>if '<STR_LIT>' in tags:<EOL><INDENT>img.putpalette(tags['<STR_LIT>'])<EOL><DEDENT>debug('<STR_LIT>'.format(new_filename))<EOL>img.save(new_filename, tiffinfo=info)<EOL>decompressed_images.append(new_filename)<EOL>if delete_png:<EOL><INDENT>os.remove(orig_filename)<EOL><DEDENT>if delete_json:<EOL><INDENT>os.remove(filename + '<STR_LIT>')<EOL><DEDENT><DEDENT>except (IOError, AssertionError) as e:<EOL><INDENT>print('<STR_LIT>'.format(e))<EOL><DEDENT><DEDENT>return decompressed_images<EOL>", "docstring": "Reverse compression from tif to png and save them in original format\n    (ome.tif). TIFF-tags are gotten from json-files named the same as given\n    images.\n\n\n    Parameters\n    ----------\n    images : list of filenames\n        Image to decompress.\n    delete_png : bool\n        Wheter to delete PNG images.\n    delete_json : bool\n        Wheter to delete TIFF-tags stored in json files on compress.\n\n    Returns\n    -------\n    list of filenames\n        List of decompressed files.", "id": "f4440:m4"}
{"signature": "def _pattern(*names, **kwargs):", "body": "if '<STR_LIT>' not in kwargs:<EOL><INDENT>kwargs['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>return os.path.join(*names) + kwargs['<STR_LIT>']<EOL>", "docstring": "Returns globbing pattern for name1/name2/../lastname + '--*' or\n    name1/name2/../lastname + extension if parameter `extension` it set.\n\n    Parameters\n    ----------\n    names : strings\n        Which path to join. Example: _pattern('path', 'to', 'experiment') will\n        return `path/to/experiment--*`.\n    extension : string\n        If other extension then --* is wanted.\n        Example: _pattern('path', 'to', 'image', extension='*.png') will return\n        `path/to/image*.png`.\n\n    Returns\n    -------\n    string\n        Joined glob pattern string.", "id": "f4440:m8"}
{"signature": "def image(self, well_row, well_column, field_row, field_column):", "body": "return next((i for i in self.images<EOL>if attribute(i, '<STR_LIT:u>') == well_column and<EOL>attribute(i, '<STR_LIT:v>') == well_row and<EOL>attribute(i, '<STR_LIT:x>') == field_column and<EOL>attribute(i, '<STR_LIT:y>') == field_row), '<STR_LIT>')<EOL>", "docstring": "Get path of specified image.\n\n        Parameters\n        ----------\n        well_row : int\n            Starts at 0. Same as --U in files.\n        well_column : int\n            Starts at 0. Same as --V in files.\n        field_row : int\n            Starts at 0. Same as --Y in files.\n        field_column : int\n            Starts at 0. Same as --X in files.\n\n        Returns\n        -------\n        string\n            Path to image or empty string if image is not found.", "id": "f4440:c0:m11"}
{"signature": "def compress(images, delete_tif=False, folder=None):", "body": "if type(images) == str:<EOL><INDENT>return [compress_blocking(images, delete_tif, folder)]<EOL><DEDENT>filenames = copy(images) <EOL>return Parallel(n_jobs=_pools)(delayed(compress_blocking)<EOL>(image=image, delete_tif=delete_tif, folder=folder)<EOL>for image in filenames)<EOL>", "docstring": "Lossless compression. Save images as PNG and TIFF tags to json. Can be\n    reversed with `decompress`. Will run in multiprocessing, where\n    number of workers is decided by ``leicaexperiment.experiment._pools``.\n\n    Parameters\n    ----------\n    images : list of filenames\n        Images to lossless compress.\n    delete_tif : bool\n        Wheter to delete original images.\n    folder : string\n        Where to store images. Basename will be kept.\n\n    Returns\n    -------\n    list of filenames\n        List of compressed files.", "id": "f4440:m2"}
{"signature": "def attribute_as_str(path, name):", "body": "matches = re.findall('<STR_LIT>' + name.upper() + '<STR_LIT>', path)<EOL>if matches:<EOL><INDENT>return matches[-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns the two numbers found behind --[A-Z] in path. If several matches\n    are found, the last one is returned.\n\n    Parameters\n    ----------\n    path : string\n        String with path of file/folder to get attribute from.\n    name : string\n        Name of attribute to get. Should be A-Z or a-z (implicit converted to\n        uppercase).\n\n    Returns\n    -------\n    string\n        Returns two digit number found in path behind --name.", "id": "f4440:m6"}
{"signature": "def attributes(path):", "body": "<EOL>matches = re.findall('<STR_LIT>', path)<EOL>keys = []<EOL>values = []<EOL>for k,v in matches:<EOL><INDENT>if k in keys:<EOL><INDENT>i = keys.index(k)<EOL>del keys[i]<EOL>del values[i]<EOL><DEDENT>keys.append(k)<EOL>values.append(v)<EOL><DEDENT>lower_keys = [k.lower() for k in keys]<EOL>int_values= [int(v) for v in values]<EOL>attributes = namedtuple('<STR_LIT>', keys + lower_keys)<EOL>return attributes(*values + int_values)<EOL>", "docstring": "Get attributes from path based on format --[A-Z]. Returns namedtuple\n    with upper case attributes equal to what found in path (string) and lower\n    case as int. If path holds several occurrences of same character, only the\n    last one is kept.\n\n        >>> attrs = attributes('/folder/file--X00-X01.tif')\n        >>> print(attrs)\n        namedtuple('attributes', 'X x')('01', 1)\n        >>> print(attrs.x)\n        1\n\n    Parameters\n    ----------\n    path : string\n\n    Returns\n    -------\n    collections.namedtuple", "id": "f4440:m7"}
{"signature": "@property<EOL><INDENT>def private_ip(self):<DEDENT>", "body": "ip = None<EOL>for eth in self.networks['<STR_LIT>']:<EOL><INDENT>if eth['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>ip = eth['<STR_LIT>']<EOL>break<EOL><DEDENT><DEDENT>if ip is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return ip<EOL>", "docstring": "Private ip_address", "id": "f4446:c1:m28"}
{"signature": "def power_on(self, wait=True):", "body": "return self._action('<STR_LIT>', wait)<EOL>", "docstring": "Turn on this droplet.\n\nParameters\n----------\nwait: bool, default True\n    Whether to block until the pending action is completed\n\nRaises\n------\nAPIError if droplet is already powered on", "id": "f4446:c1:m10"}
{"signature": "def power_off(self, wait=True):", "body": "return self._action('<STR_LIT>', wait)<EOL>", "docstring": "Equivalent to hitting the power button. This is a \"hard shutoff\"\n\nParameters\n----------\nwait: bool, default True\n    Whether to block until the pending action is completed", "id": "f4446:c1:m9"}
{"signature": "def take_snapshot(self, name, wait=True):", "body": "return self._action('<STR_LIT>', name=name, wait=wait)<EOL>", "docstring": "Take a snapshot of this droplet (must be powered off)\n\nParameters\n----------\nname: str\n    Name of the snapshot\nwait: bool, default True\n    Whether to block until the pending action is completed", "id": "f4446:c1:m20"}
{"signature": "def create(self, name, region, size, image, ssh_keys=None,<EOL>backups=None, ipv6=None, private_networking=None, wait=True):", "body": "if ssh_keys and not isinstance(ssh_keys, (list, tuple)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>resp = self.post(name=name, region=region, size=size, image=image,<EOL>ssh_keys=ssh_keys,<EOL>private_networking=private_networking,<EOL>backups=backups, ipv6=ipv6)<EOL>droplet = self.get(resp[self.singular]['<STR_LIT:id>'])<EOL>if wait:<EOL><INDENT>droplet.wait()<EOL><DEDENT>droplet = self.get(resp[self.singular]['<STR_LIT:id>'])<EOL>return droplet<EOL>", "docstring": "Create a new droplet\n\nParameters\n----------\nname: str\n    Name of new droplet\nregion: str\n    slug for region (e.g., sfo1, nyc1)\nsize: str\n    slug for droplet size (e.g., 512mb, 1024mb)\nimage: int or str\n    image id (e.g., 12352) or slug (e.g., 'ubuntu-14-04-x64')\nssh_keys: list, optional\n    default SSH keys to be added on creation\n    this is highly recommended for ssh access\nbackups: bool, optional\n    whether automated backups should be enabled for the Droplet.\n    Automated backups can only be enabled when the Droplet is created.\nipv6: bool, optional\n    whether IPv6 is enabled on the Droplet\nprivate_networking: bool, optional\n    whether private networking is enabled for the Droplet. Private\n    networking is currently only available in certain regions\nwait: bool, default True\n    if True then block until creation is complete", "id": "f4446:c0:m5"}
{"signature": "@property<EOL><INDENT>def ip_address(self):<DEDENT>", "body": "ip = None<EOL>for eth in self.networks['<STR_LIT>']:<EOL><INDENT>if eth['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>ip = eth['<STR_LIT>']<EOL>break<EOL><DEDENT><DEDENT>if ip is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return ip<EOL>", "docstring": "Public ip_address", "id": "f4446:c1:m27"}
{"signature": "def reboot(self, wait=True):", "body": "return self._action('<STR_LIT>', wait)<EOL>", "docstring": "According to DigitalOcean API this is best efforts only\n\nParameters\n----------\nwait: bool, default True\n    Whether to block until the pending action is completed", "id": "f4446:c1:m6"}
{"signature": "def rename(self, name, wait=True):", "body": "return self._action('<STR_LIT>', name=name, wait=wait)<EOL>", "docstring": "Change the name of this droplet\n\nParameters\n----------\nname: str\n    New name for the droplet\nwait: bool, default True\n    Whether to block until the pending action is completed\n\nRaises\n------\nAPIError if region does not support private networking", "id": "f4446:c1:m18"}
{"signature": "def get(self, id):", "body": "info = self._get_droplet_info(id)<EOL>return DropletActions(self.api, self, **info)<EOL>", "docstring": "Retrieve a droplet by id\n\nParameters\n----------\nid: int\n    droplet id\n\nReturns\n-------\ndroplet: DropletActions", "id": "f4446:c0:m6"}
{"signature": "def by_name(self, name):", "body": "for d in self.list():<EOL><INDENT>if d['<STR_LIT:name>'] == name:<EOL><INDENT>return self.get(d['<STR_LIT:id>'])<EOL><DEDENT><DEDENT>raise KeyError(\"<STR_LIT>\" % name)<EOL>", "docstring": "Retrieve a droplet by name (return first if duplicated)\n\nParameters\n----------\nname: str\n    droplet name\n\nReturns\n-------\ndroplet: DropletActions", "id": "f4446:c0:m8"}
{"signature": "def actions(self, id):", "body": "return self._prop(id, '<STR_LIT>')<EOL>", "docstring": "Return all actions for a given droplet (completed and otherwise)\n\nParameters\n----------\nid: int\n    Droplet id", "id": "f4446:c0:m3"}
{"signature": "def enable_ipv6(self, wait=True):", "body": "return self._action('<STR_LIT>', wait)<EOL>", "docstring": "Turn on IPv6 networking for this droplet\n\nParameters\n----------\nwait: bool, default True\n    Whether to block until the pending action is completed\n\nRaises\n------\nAPIError if region does not support IPv6", "id": "f4446:c1:m12"}
{"signature": "def actions(self):", "body": "return self.parent.actions(self.id)<EOL>", "docstring": "Action history on this droplet", "id": "f4446:c1:m24"}
{"signature": "def update(self, id, **kwargs):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "A droplet cannot be updated via POST", "id": "f4446:c0:m9"}
{"signature": "def get_action(self, action_id):", "body": "return self.get((action_id,)).get('<STR_LIT:action>')<EOL>", "docstring": "Retrieve a single action based on action_id", "id": "f4446:c1:m4"}
{"signature": "def password_reset(self, wait=True):", "body": "return self._action('<STR_LIT>', wait)<EOL>", "docstring": "Send password reset email for this droplet\n\nParameters\n----------\nwait: bool, default True\n    Whether to block until the pending action is completed", "id": "f4446:c1:m11"}
{"signature": "def records(self, name):", "body": "if self.get(name):<EOL><INDENT>return DomainRecords(self.api, name)<EOL><DEDENT>", "docstring": "Get a list of all domain records for the given domain name\n\nParameters\n----------\nname: str\n    domain name", "id": "f4452:c9:m1"}
{"signature": "def get_response(self, resp):", "body": "try:<EOL><INDENT>return resp.json()<EOL><DEDENT>except JSON_ERROR:<EOL><INDENT>return {}<EOL><DEDENT>", "docstring": "Retrieve response as json and deserialize as dict\n\nParameters\n----------\nresp: requests.models.Response", "id": "f4452:c1:m1"}
{"signature": "def send_request(self, kind, resource, url_components, **kwargs):", "body": "url = self.format_request_url(resource, *url_components)<EOL>meth = getattr(requests, kind)<EOL>headers = self.get_request_headers()<EOL>req_data = self.format_parameters(**kwargs)<EOL>response = meth(url, headers=headers, data=req_data)<EOL>data = self.get_response(response)<EOL>if response.status_code >= <NUM_LIT>:<EOL><INDENT>msg = data.pop('<STR_LIT:message>', '<STR_LIT>')<EOL>raise APIError(msg, response.status_code, **data)<EOL><DEDENT>return data<EOL>", "docstring": "Send a request to the REST API\n\nParameters\n----------\nkind: str, {get, delete, put, post, head}\nresource: str\nurl_components: list or tuple to be appended to the request URL\n\nNotes\n-----\nkwargs contain request parameters to be sent as request data", "id": "f4452:c1:m0"}
{"signature": "@property<EOL><INDENT>def result_key(self):<DEDENT>", "body": "return self.resource_path<EOL>", "docstring": "Key value for response contents", "id": "f4452:c4:m1"}
{"signature": "def create(self, name, ip_address):", "body": "return (self.post(name=name, ip_address=ip_address)<EOL>.get(self.singular, None))<EOL>", "docstring": "Creates a new domain\n\nParameters\n----------\nname: str\n    new domain name\nip_address: str\n    IP address for the new domain", "id": "f4452:c9:m0"}
{"signature": "def send_request(self, kind, url_components, **kwargs):", "body": "return self.api.send_request(kind, self.resource_path, url_components,<EOL>**kwargs)<EOL>", "docstring": "Send a request for this resource to the API\n\nParameters\n----------\nkind: str, {'get', 'delete', 'put', 'post', 'head'}", "id": "f4452:c3:m1"}
{"signature": "@property<EOL><INDENT>def singular(self):<DEDENT>", "body": "return self.result_key[:-<NUM_LIT:1>]<EOL>", "docstring": "Key value for response contents when requesting single unit of\ncollection", "id": "f4452:c4:m2"}
{"signature": "def __init__(self, api_key=None, api_url=API_URL, api_version=API_VERSION):", "body": "if api_key is None:<EOL><INDENT>api_key = os.environ.get('<STR_LIT>', None)<EOL><DEDENT>if api_key is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.api_key = api_key<EOL>self.api_url = api_url<EOL>self.api_version = api_version<EOL>", "docstring": "Parameters\n----------\napi_key: str, optional\n    If not supplied uses value of envvar DIGITALOCEAN_API_KEY\napi_url: str, optional\napi_version: str, optional", "id": "f4452:c2:m0"}
{"signature": "def get(self, id, **kwargs):", "body": "return (super(MutableCollection, self).get((id,), **kwargs)<EOL>.get(self.singular, None))<EOL>", "docstring": "Get single unit of collection", "id": "f4452:c5:m3"}
{"signature": "def delete(self, url_components=(), **kwargs):", "body": "return self.send_request('<STR_LIT>', url_components, **kwargs)<EOL>", "docstring": "Send delete request", "id": "f4452:c3:m3"}
{"signature": "def update(self, id, **kwargs):", "body": "raise NotImplementedError()<EOL>", "docstring": "Domain cannot be updated", "id": "f4452:c9:m2"}
{"signature": "def get_request_headers(self):", "body": "header = {}<EOL>header['<STR_LIT>'] = '<STR_LIT>' % self.api_key<EOL>return header<EOL>", "docstring": "Format headers for the request", "id": "f4452:c2:m1"}
{"signature": "def wait(self, cmd, raise_on_error=True):", "body": "_, stdout, stderr = self.exec_command(cmd)<EOL>stdout.channel.recv_exit_status()<EOL>output = stdout.read()<EOL>if self.interactive:<EOL><INDENT>print(output)<EOL><DEDENT>errors = stderr.read()<EOL>if self.interactive:<EOL><INDENT>print(errors)<EOL><DEDENT>if errors and raise_on_error:<EOL><INDENT>raise ValueError(errors)<EOL><DEDENT>return output<EOL>", "docstring": "Execute command and wait for it to finish. Proceed with caution because\nif you run a command that causes a prompt this will hang", "id": "f4453:c0:m7"}
{"signature": "def unsudo(self):", "body": "self.wait('<STR_LIT>')<EOL>", "docstring": "Assume already in sudo", "id": "f4453:c0:m11"}
{"signature": "def pip(self, package_names, raise_on_error=True):", "body": "if isinstance(package_names, basestring):<EOL><INDENT>package_names = [package_names]<EOL><DEDENT>cmd = \"<STR_LIT>\" % ('<STR_LIT:U+0020>'.join(package_names))<EOL>return self.wait(cmd, raise_on_error=raise_on_error)<EOL>", "docstring": "Install specified python packages using pip. -U option added\nWaits for command to finish.\n\nParameters\n----------\npackage_names: list-like of str\nraise_on_error: bool, default True\n    If True then raise ValueError if stderr is not empty", "id": "f4453:c0:m14"}
{"signature": "def pip_r(self, requirements, raise_on_error=True):", "body": "cmd = \"<STR_LIT>\" % requirements<EOL>return self.wait(cmd, raise_on_error=raise_on_error)<EOL>", "docstring": "Install all requirements contained in the given file path\nWaits for command to finish.\n\nParameters\n----------\nrequirements: str\n    Path to requirements.txt\nraise_on_error: bool, default True\n    If True then raise ValueError if stderr is not empty", "id": "f4453:c0:m16"}
{"signature": "def chdir(self, new_pwd, relative=True):", "body": "if new_pwd and self.pwd and relative:<EOL><INDENT>new_pwd = os.path.join(self.pwd, new_pwd)<EOL><DEDENT>self.pwd = new_pwd<EOL>", "docstring": "Parameters\n----------\nnew_pwd: str,\n    Directory to change to\nrelative: bool, default True\n    If True then the given directory is treated as relative to the\n    current directory", "id": "f4453:c0:m3"}
{"signature": "def get_first():", "body": "client = po.connect() <EOL>all_droplets = client.droplets.list()<EOL>id = all_droplets[<NUM_LIT:0>]['<STR_LIT:id>'] <EOL>return client.droplets.get(id)<EOL>", "docstring": "return first droplet", "id": "f4455:m0"}
{"signature": "def take_snapshot(droplet, name):", "body": "print(\"<STR_LIT>\")<EOL>droplet.power_off()<EOL>droplet.wait() <EOL>print(\"<STR_LIT>\")<EOL>droplet.take_snapshot(name)<EOL>droplet.wait()<EOL>snapshots = droplet.snapshots()<EOL>print(\"<STR_LIT>\")<EOL>print(snapshots)<EOL>", "docstring": "Take a snapshot of a droplet\n\nParameters\n----------\nname: str\n    name for snapshot", "id": "f4455:m1"}
{"signature": "def fetch_public_key(repo):", "body": "keyurl = '<STR_LIT>'.format(repo)<EOL>data = json.loads(urlopen(keyurl).read().decode())<EOL>if '<STR_LIT:key>' not in data:<EOL><INDENT>errmsg = \"<STR_LIT>\".format(repo)<EOL>errmsg += \"<STR_LIT>\"<EOL>raise ValueError(errmsg)<EOL><DEDENT>return data['<STR_LIT:key>']<EOL>", "docstring": "Download RSA public key Travis will use for this repo.\n\n    Travis API docs: http://docs.travis-ci.com/api/#repository-keys", "id": "f4459:m2"}
{"signature": "def update_travis_deploy_password(encrypted_password):", "body": "config = load_yaml_config(TRAVIS_CONFIG_FILE)<EOL>config['<STR_LIT>']['<STR_LIT:password>'] = dict(secure=encrypted_password)<EOL>save_yaml_config(TRAVIS_CONFIG_FILE, config)<EOL>line = ('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>prepend_line(TRAVIS_CONFIG_FILE, line)<EOL>", "docstring": "Update the deploy section of the .travis.yml file\n    to use the given encrypted password.", "id": "f4459:m6"}
{"signature": "def encrypt(pubkey, password):", "body": "key = load_key(pubkey)<EOL>encrypted_password = key.encrypt(password, PKCS1v15())<EOL>return base64.b64encode(encrypted_password)<EOL>", "docstring": "Encrypt password using given RSA public key and encode it with base64.\n\n    The encrypted password can only be decrypted by someone with the\n    private key (in this case, only Travis).", "id": "f4459:m1"}
{"signature": "def prepend_line(filepath, line):", "body": "with open(filepath) as f:<EOL><INDENT>lines = f.readlines()<EOL><DEDENT>lines.insert(<NUM_LIT:0>, line)<EOL>with open(filepath, '<STR_LIT:w>') as f:<EOL><INDENT>f.writelines(lines)<EOL><DEDENT>", "docstring": "Rewrite a file adding a line to its beginning.", "id": "f4459:m3"}
{"signature": "def log_update(entity, update):", "body": "p = {'<STR_LIT>': entity, '<STR_LIT>': update}<EOL>_log(TYPE_CODES.UPDATE, p)<EOL>", "docstring": "Logs an update done on an entity", "id": "f4461:m4"}
{"signature": "def log_entity_deletion(entity, params=None):", "body": "p = {'<STR_LIT>': entity}<EOL>if params:<EOL><INDENT>p['<STR_LIT>'] = params<EOL><DEDENT>_log(TYPE_CODES.DELETE, p)<EOL>", "docstring": "Logs an entity creation", "id": "f4461:m1"}
{"signature": "def log_operation(entities, operation_name, params=None):", "body": "if isinstance(entities, (list, tuple)):<EOL><INDENT>entities = list(entities)<EOL><DEDENT>else:<EOL><INDENT>entities = [entities]<EOL><DEDENT>p = {'<STR_LIT:name>': operation_name, '<STR_LIT>': entities}<EOL>if params:<EOL><INDENT>p['<STR_LIT>'] = params<EOL><DEDENT>_log(TYPE_CODES.OPERATION, p)<EOL>", "docstring": "Logs an operation done on an entity, possibly with other arguments", "id": "f4461:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _proc_error(ifn: str, e: Exception) -> None:<DEDENT>", "body": "type_, value_, traceback_ = sys.exc_info()<EOL>traceback.print_tb(traceback_, file=sys.stderr)<EOL>print(file=sys.stderr)<EOL>print(\"<STR_LIT>\" % ifn, file=sys.stderr)<EOL>print(str(e), file=sys.stderr)<EOL>", "docstring": "Report an error\n        :param ifn: Input file name\n        :param e: Exception to report", "id": "f4468:c0:m2"}
{"signature": "def run(self,<EOL>proc: Callable[[Optional[str], Optional[str], argparse.Namespace], Optional[bool]],<EOL>file_filter: Optional[Callable[[str], bool]]=None,<EOL>file_filter_2: Optional[Callable[[Optional[str], str, argparse.Namespace], bool]]=None)-> Tuple[int, int]:", "body": "nfiles = <NUM_LIT:0><EOL>nsuccess = <NUM_LIT:0><EOL>if self.opts.infile:<EOL><INDENT>for file_idx in range(len(self.opts.infile)):<EOL><INDENT>in_f = self.opts.infile[file_idx]<EOL>if self._check_filter(in_f, self.opts.indir, file_filter, file_filter_2):<EOL><INDENT>fn = os.path.join(self.opts.indir, in_f) if self.opts.indir else in_f<EOL>nfiles += <NUM_LIT:1><EOL>if self._call_proc(proc, fn, self._outfile_name('<STR_LIT>', fn, outfile_idx=file_idx)):<EOL><INDENT>nsuccess += <NUM_LIT:1><EOL><DEDENT>elif self.opts.stoponerror:<EOL><INDENT>return nfiles, nsuccess<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif not self.opts.indir:<EOL><INDENT>if self._check_filter(None, None, file_filter, file_filter_2):<EOL><INDENT>nfiles += <NUM_LIT:1><EOL>if self._call_proc(proc, None, self._outfile_name('<STR_LIT>', '<STR_LIT>')):<EOL><INDENT>nsuccess += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for dirpath, _, filenames in os.walk(self.opts.indir):<EOL><INDENT>for fn in filenames:<EOL><INDENT>if self._check_filter(fn, dirpath, file_filter, file_filter_2):<EOL><INDENT>nfiles += <NUM_LIT:1><EOL>if self._call_proc(proc, os.path.join(dirpath, fn), self._outfile_name(dirpath, fn)):<EOL><INDENT>nsuccess += <NUM_LIT:1><EOL><DEDENT>elif self.opts.stoponerror:<EOL><INDENT>return nfiles, nsuccess<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return nfiles, nsuccess<EOL>", "docstring": "Run the directory list processor calling a function per file.\n        :param proc: Process to invoke. Args: input_file_name, output_file_name, argparse options. Return pass or fail.\n                     No return also means pass\n        :param file_filter: Additional filter for testing file names, types, etc.\n        :param file_filter_2: File filter that includes directory, filename and opts\n                        (separate for backwards compatibility)\n        :return: tuple - (number of files passed to proc: int, number of files that passed proc)", "id": "f4468:c0:m5"}
{"signature": "def _outfile_name(self, dirpath: str, infile: str, outfile_idx: int=<NUM_LIT:0>) -> Optional[str]:", "body": "if not self.opts.outfile and not self.opts.outdir:<EOL><INDENT>return None<EOL><DEDENT>if self.opts.outfile:<EOL><INDENT>outfile_element = self.opts.outfile[<NUM_LIT:0>] if len(self.opts.outfile) == <NUM_LIT:1> else self.opts.outfile[outfile_idx]<EOL><DEDENT>elif self.opts.infile:<EOL><INDENT>if '<STR_LIT>' in infile:<EOL><INDENT>outfile_element = \"<STR_LIT>\".format(outfile_idx + <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>outfile_element = os.path.basename(infile).rsplit('<STR_LIT:.>', <NUM_LIT:1>)[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>relpath = dirpath[len(self.opts.indir) + <NUM_LIT:1>:] if not self.opts.flatten and self.opts.indir else '<STR_LIT>'<EOL>outfile_element = os.path.join(relpath, os.path.split(infile)[<NUM_LIT:1>][:-len(self.infile_suffix)])<EOL><DEDENT>return (os.path.join(self.opts.outdir, outfile_element) if self.opts.outdir else outfile_element) +(self.outfile_suffix if not self.opts.outfile and self.outfile_suffix else '<STR_LIT>')<EOL>", "docstring": "Construct the output file name from the input file.  If a single output file was named and there isn't a\n        directory, return the output file.\n        :param dirpath: Directory path to infile\n        :param infile: Name of input file\n        :param outfile_idx: Index into output file list (for multiple input/output files)\n        :return: Full name of output file or None if output is not otherwise supplied", "id": "f4468:c0:m6"}
{"signature": "def _parser_exit(parser: argparse.ArgumentParser, proc: \"<STR_LIT>\", _=<NUM_LIT:0>,<EOL>message: Optional[str]=None) -> None:", "body": "if message:<EOL><INDENT>parser._print_message(message, sys.stderr)<EOL><DEDENT>proc.successful_parse = False<EOL>", "docstring": "Override the default exit in the parser.\n:param parser:\n:param _: exit code.  Unused because we don't exit\n:param message: Optional message", "id": "f4468:m0"}
{"signature": "def _call_proc(self,<EOL>proc: Callable[[Optional[str], Optional[str], argparse.Namespace], bool],<EOL>ifn: Optional[str],<EOL>ofn: Optional[str]) -> bool:", "body": "rslt = False<EOL>try:<EOL><INDENT>rslt = proc(ifn, ofn, self.opts)<EOL><DEDENT>except Exception as e:<EOL><INDENT>self._proc_error(ifn, e)<EOL><DEDENT>return True if rslt or rslt is None else False<EOL>", "docstring": "Call the actual processor and intercept anything that goes wrong\n        :param proc: Process to call\n        :param ifn: Input file name to process.  If absent, typical use is stdin\n        :param ofn: Output file name. If absent, typical use is stdout\n        :return: true means process was successful", "id": "f4468:c0:m3"}
{"signature": "def _detect_sse3(self):", "body": "self._print_support_start('<STR_LIT>')<EOL>result = self.hasfunction('<STR_LIT>',<EOL>include='<STR_LIT>',<EOL>extra_postargs=['<STR_LIT>'])<EOL>self._print_support_end('<STR_LIT>', result)<EOL>return result<EOL>", "docstring": "Does this compiler support SSE3 intrinsics?", "id": "f4474:c0:m6"}
{"signature": "@classmethod<EOL><INDENT>def fromdict(cls, config, check_fields=True):<DEDENT>", "body": "m = super(Config, cls).__new__(cls)<EOL>m.path = '<STR_LIT:.>'<EOL>m.verbose = False<EOL>m.config = m._merge_defaults(config)<EOL>if check_fields:<EOL><INDENT>m._check_fields()<EOL><DEDENT>return m<EOL>", "docstring": "Create a Config object from config dict directly.", "id": "f4478:c0:m3"}
{"signature": "def get_section(self, section):", "body": "return self.config.get(section, {})<EOL>", "docstring": "Sections are top-level entries in the config tree", "id": "f4478:c0:m4"}
{"signature": "def sha1(self):", "body": "with open(self.path, '<STR_LIT:rb>') as f:<EOL><INDENT>return hashlib.sha1(f.read()).hexdigest()<EOL><DEDENT>", "docstring": "SHA1 hash of the config file itself.", "id": "f4478:c0:m18"}
{"signature": "@staticmethod<EOL><INDENT>def is_repeated_suggestion(params, history):<DEDENT>", "body": "if any(params == hparams and hstatus == '<STR_LIT>'<EOL>for hparams, hscore, hstatus in history):<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Parameters\n----------\nparams : dict\n    Trial param set\nhistory : list of 3-tuples\n    History of past function evaluations. Each element in history\n    should be a tuple `(params, score, status)`, where `params` is a\n    dict mapping parameter names to values\n\nReturns\n-------\nis_repeated_suggestion : bool", "id": "f4481:c0:m1"}
{"signature": "def suggest(self, history, searchspace):", "body": "<EOL>if '<STR_LIT>' not in sys.modules:<EOL><INDENT>raise ImportError('<STR_LIT>')<EOL><DEDENT>random = check_random_state(self.seed)<EOL>hp_searchspace = searchspace.to_hyperopt()<EOL>trials = Trials()<EOL>for i, (params, scores, status) in enumerate(history):<EOL><INDENT>if status == '<STR_LIT>':<EOL><INDENT>result = {'<STR_LIT>': -np.mean(scores), '<STR_LIT:status>': STATUS_OK}<EOL><DEDENT>elif status == '<STR_LIT>':<EOL><INDENT>result = {'<STR_LIT:status>': STATUS_RUNNING}<EOL><DEDENT>elif status == '<STR_LIT>':<EOL><INDENT>result = {'<STR_LIT:status>': STATUS_FAIL}<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>' % status)<EOL><DEDENT>vals = {}<EOL>for var in searchspace:<EOL><INDENT>if isinstance(var, EnumVariable):<EOL><INDENT>matches = [<EOL>i for i, c in enumerate(var.choices)<EOL>if c == params[var.name]<EOL>]<EOL>assert len(matches) == <NUM_LIT:1><EOL>vals[var.name] = matches<EOL><DEDENT>else:<EOL><INDENT>vals[var.name] = [params[var.name]]<EOL><DEDENT><DEDENT>trials.insert_trial_doc({<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': dict((k, [i]) for k in hp_searchspace.keys()),<EOL>'<STR_LIT>': i,<EOL>'<STR_LIT>': vals,<EOL>'<STR_LIT>': None<EOL>},<EOL>'<STR_LIT:result>': result,<EOL>'<STR_LIT>': i,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT:state>': <NUM_LIT:2>,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT:version>': <NUM_LIT:0><EOL>})<EOL><DEDENT>trials.refresh()<EOL>chosen_params_container = []<EOL>def suggest(*args, **kwargs):<EOL><INDENT>return tpe.suggest(*args,<EOL>**kwargs,<EOL>gamma=self.gamma,<EOL>n_startup_jobs=self.seeds)<EOL><DEDENT>def mock_fn(x):<EOL><INDENT>chosen_params_container.append(x)<EOL>return <NUM_LIT:0><EOL><DEDENT>fmin(fn=mock_fn,<EOL>algo=tpe.suggest,<EOL>space=hp_searchspace,<EOL>trials=trials,<EOL>max_evals=len(trials.trials) + <NUM_LIT:1>,<EOL>**self._hyperopt_fmin_random_kwarg(random))<EOL>chosen_params = chosen_params_container[<NUM_LIT:0>]<EOL>return chosen_params<EOL>", "docstring": "Suggest params to maximize an objective function based on the\nfunction evaluation history using a tree of Parzen estimators (TPE),\nas implemented in the hyperopt package.\n\nUse of this function requires that hyperopt be installed.", "id": "f4481:c2:m1"}
{"signature": "def suggest(self, history, searchspace):", "body": "return searchspace.rvs(self.seed)<EOL>", "docstring": "Randomly suggest params from searchspace.", "id": "f4481:c1:m1"}
{"signature": "def fit(self):", "body": "self.model.optimize_restarts(num_restarts=self.num_restarts, verbose=False)<EOL>", "docstring": "Fits the model with random restarts.\n:return:", "id": "f4482:c0:m1"}
{"signature": "def plot_3(data, ss, *args):", "body": "if len(data) <= <NUM_LIT:1>:<EOL><INDENT>warnings.warn(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>scores = np.array([d['<STR_LIT>'] for d in data])<EOL>warped = np.array([ss.point_to_unit(d['<STR_LIT>']) for d in data])<EOL>X = TSNE(n_components=<NUM_LIT:2>).fit_transform(warped)<EOL>e_scores = np.exp(scores)<EOL>mine, maxe = np.min(e_scores), np.max(e_scores)<EOL>color = (e_scores - mine) / (maxe - mine)<EOL>mapped_colors = list(map(rgb2hex, cm.get_cmap('<STR_LIT>')(color)))<EOL>p = bk.figure(title='<STR_LIT>', tools=TOOLS)<EOL>df_params = nonconstant_parameters(data)<EOL>df_params['<STR_LIT>'] = scores<EOL>df_params['<STR_LIT:x>'] = X[:, <NUM_LIT:0>]<EOL>df_params['<STR_LIT:y>'] = X[:, <NUM_LIT:1>]<EOL>df_params['<STR_LIT>'] = mapped_colors<EOL>df_params['<STR_LIT>'] = <NUM_LIT:1><EOL>p.circle(<EOL>x='<STR_LIT:x>', y='<STR_LIT:y>', color='<STR_LIT>', radius='<STR_LIT>',<EOL>source=ColumnDataSource(data=df_params), fill_alpha=<NUM_LIT>,<EOL>line_color=None)<EOL>cp = p<EOL>hover = cp.select(dict(type=HoverTool))<EOL>format_tt = [(s, '<STR_LIT>' % s) for s in df_params.columns]<EOL>hover.tooltips = OrderedDict([(\"<STR_LIT:index>\", \"<STR_LIT>\")] + format_tt)<EOL>xax, yax = p.axis<EOL>xax.axis_label = '<STR_LIT>'<EOL>yax.axis_label = '<STR_LIT>'<EOL>return p<EOL>", "docstring": "t-SNE embedding of the parameters, colored by score", "id": "f4494:m4"}
{"signature": "def plot_1(data, *args):", "body": "df_all = pd.DataFrame(data)<EOL>df_params = nonconstant_parameters(data)<EOL>return build_scatter_tooltip(<EOL>x=df_all['<STR_LIT:id>'], y=df_all['<STR_LIT>'], tt=df_params,<EOL>title='<STR_LIT>')<EOL>", "docstring": "Plot 1. All iterations (scatter plot)", "id": "f4494:m2"}
{"signature": "def init_subclass_by_name(baseclass, short_name, params):", "body": "sc = baseclass.__subclasses__()<EOL>for kls in sc:<EOL><INDENT>if kls.short_name == short_name or(_is_collection(kls.short_name) and short_name in kls.short_name):<EOL><INDENT>try:<EOL><INDENT>return kls(**params)<EOL><DEDENT>except TypeError as e:<EOL><INDENT>spec = inspect.getargspec(kls.__init__)<EOL>if '<STR_LIT>' in str(e):<EOL><INDENT>avail = join_quoted(spec.args[<NUM_LIT:1>:])<EOL>raise RuntimeError(<EOL>\"<STR_LIT>\"<EOL>% (short_name, str(e), avail))<EOL><DEDENT>elif '<STR_LIT>' in str(e):<EOL><INDENT>required = join_quoted(spec.args[<NUM_LIT:1>:-len(spec.defaults)])<EOL>raise RuntimeError(<EOL>\"<STR_LIT>\"<EOL>% (short_name, str(e), required))<EOL><DEDENT>elif '<STR_LIT>' in str(e):<EOL><INDENT>required = join_quoted(spec.args[<NUM_LIT:1>:-len(spec.defaults)])<EOL>optional = join_quoted(spec.args[-len(spec.defaults):])<EOL>raise RuntimeError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>short_name, str(e), required, optional))<EOL><DEDENT>raise<EOL><DEDENT><DEDENT><DEDENT>chain = itertools.chain.from_iterable(<EOL>e.short_name if _is_collection(e.short_name) else [e.short_name]<EOL>for e in sc)<EOL>avail_names = '<STR_LIT:U+002CU+0020>'.join(str(n) for n in chain)<EOL>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % (short_name, avail_names))<EOL>", "docstring": "Find the subclass, `kls` of baseclass with class attribute `short_name`\nthat matches the supplied `short_name`, and then instantiate and return\nthat class with:\n\n    return kls(**params)\n\nThis function also tries its best to catch any possible TypeErrors due\nto binding of the arguments, and rethrows them as nicely formatted\nRuntimeErrors that are suitable for showing to users.", "id": "f4495:m0"}
{"signature": "def run_osprey(self, config):", "body": "fh, filename = tempfile.mkstemp(dir=self.temp_dir)<EOL>with open(filename, '<STR_LIT:wb>') as f:<EOL><INDENT>f.write(config)<EOL><DEDENT>args = Namespace(config=filename, n_iters=<NUM_LIT:1>, output='<STR_LIT>')<EOL>execute_worker.execute(args, None)<EOL>dump = json.loads(execute_dump.execute(args, None))<EOL>assert len(dump) == <NUM_LIT:1><EOL>assert dump[<NUM_LIT:0>]['<STR_LIT:status>'] == '<STR_LIT>', dump[<NUM_LIT:0>]['<STR_LIT:status>']<EOL>", "docstring": "Run osprey-worker.\n\nParameters\n----------\nconfig : str\n    Configuration string.", "id": "f4500:c0:m2"}
{"signature": "def predict(self, X):", "body": "return np.argmax(self._predict(X), axis=<NUM_LIT:1>)<EOL>", "docstring": "Get model predictions.\n\nParameters\n----------\nX : array_like\n    Test dataset.", "id": "f4501:c1:m2"}
{"signature": "def predict_proba(self, X):", "body": "return self._predict(X)<EOL>", "docstring": "Get model predictions.\n\nParameters\n----------\nX : array_like\n    Test dataset.", "id": "f4501:c1:m1"}
{"signature": "def _get_labels(self, y):", "body": "y = np.asarray(y)<EOL>assert y.ndim == <NUM_LIT:1><EOL>labels = np.unique(y).tolist()<EOL>oh = np.zeros((y.size, len(labels)), dtype=float)<EOL>for i, label in enumerate(y):<EOL><INDENT>oh[i, labels.index(label)] = <NUM_LIT:1.><EOL><DEDENT>return oh<EOL>", "docstring": "Construct pylearn2 dataset labels.\n\nParameters\n----------\ny : array_like, optional\n    Labels.", "id": "f4501:c1:m0"}
{"signature": "def score(self, X, y):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Score predictions.\n\nParameters\n----------\nX : array_like\n    Test examples.\ny : array_like, optional\n    Labels.", "id": "f4501:c0:m7"}
{"signature": "def _get_param_names(self):", "body": "template = Template(self.yaml_string)<EOL>names = ['<STR_LIT>']  <EOL>for match in re.finditer(template.pattern, template.template):<EOL><INDENT>name = match.group('<STR_LIT>') or match.group('<STR_LIT>')<EOL>assert name is not None<EOL>names.append(name)<EOL><DEDENT>return names<EOL>", "docstring": "Get mappable parameters from YAML.", "id": "f4501:c0:m1"}
{"signature": "def _squeeze_time(t):", "body": "if sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>return max(<NUM_LIT:0>, t - <NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>return t<EOL><DEDENT>", "docstring": "Remove .1s to the time under Windows: this is the time it take to\n    stat files. This is needed to make results similar to timings under\n    Unix, for tests", "id": "f4510:m6"}
{"signature": "@contextlib.contextmanager<EOL>def prepend_syspath(path):", "body": "sys.path.insert(<NUM_LIT:0>, path)<EOL>yield<EOL>sys.path.pop(<NUM_LIT:0>)<EOL>", "docstring": "Contect manager (with statement) that prepends path to sys.path", "id": "f4510:m3"}
{"signature": "def check_arrays(*arrays, **options):", "body": "sparse_format = options.pop('<STR_LIT>', None)<EOL>if sparse_format not in (None, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>' % sparse_format)<EOL><DEDENT>copy = options.pop('<STR_LIT>', False)<EOL>check_ccontiguous = options.pop('<STR_LIT>', False)<EOL>dtype = options.pop('<STR_LIT>', None)<EOL>warn_nans = options.pop('<STR_LIT>', False)<EOL>replace_nans = options.pop('<STR_LIT>', False)<EOL>allow_lists = options.pop('<STR_LIT>', False)<EOL>allow_nans = options.pop('<STR_LIT>', False)<EOL>allow_nd = options.pop('<STR_LIT>', False)<EOL>if options:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % options.keys())<EOL><DEDENT>if len(arrays) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>n_samples = num_samples(arrays[<NUM_LIT:0>])<EOL>checked_arrays = []<EOL>for array in arrays:<EOL><INDENT>array_orig = array<EOL>if array is None:<EOL><INDENT>checked_arrays.append(array)<EOL>continue<EOL><DEDENT>size = num_samples(array)<EOL>if size != n_samples:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>% (size, n_samples))<EOL><DEDENT>if not allow_lists or hasattr(array, \"<STR_LIT>\"):<EOL><INDENT>if sp.issparse(array):<EOL><INDENT>if sparse_format == '<STR_LIT>':<EOL><INDENT>array = array.tocsr()<EOL><DEDENT>elif sparse_format == '<STR_LIT>':<EOL><INDENT>array = array.tocsc()<EOL><DEDENT>elif sparse_format == '<STR_LIT>':<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if check_ccontiguous:<EOL><INDENT>array.data = np.ascontiguousarray(array.data, dtype=dtype)<EOL><DEDENT>elif hasattr(array, '<STR_LIT:data>'):<EOL><INDENT>array.data = np.asarray(array.data, dtype=dtype)<EOL><DEDENT>elif array.dtype != dtype:<EOL><INDENT>array = array.astype(dtype)<EOL><DEDENT>if not allow_nans:<EOL><INDENT>if hasattr(array, '<STR_LIT:data>'):<EOL><INDENT>_assert_all_finite(array.data)<EOL><DEDENT>else:<EOL><INDENT>_assert_all_finite(array.values())<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if check_ccontiguous:<EOL><INDENT>array = np.ascontiguousarray(array, dtype=dtype)<EOL><DEDENT>else:<EOL><INDENT>array = np.asarray(array, dtype=dtype)<EOL><DEDENT>if warn_nans:<EOL><INDENT>allow_nans = True<EOL>_warn_if_not_finite(array)<EOL><DEDENT>if replace_nans:<EOL><INDENT>array = np.nan_to_num(array)<EOL><DEDENT>if not allow_nans:<EOL><INDENT>_assert_all_finite(array)<EOL><DEDENT><DEDENT>if not allow_nd and array.ndim >= <NUM_LIT:3>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" %<EOL>array.ndim)<EOL><DEDENT><DEDENT>if copy and array is array_orig:<EOL><INDENT>array = array.copy()<EOL><DEDENT>checked_arrays.append(array)<EOL><DEDENT>return checked_arrays<EOL>", "docstring": "Check that all arrays have consistent first dimensions.\n\n    Checks whether all objects in arrays have the same shape or length.\n    By default lists and tuples are converted to numpy arrays.\n\n    It is possible to enforce certain properties, such as dtype, continguity\n    and sparse matrix format (if a sparse matrix is passed).\n\n    Converting lists to arrays can be disabled by setting ``allow_lists=True``.\n    Lists can then contain arbitrary objects and are not checked for dtype,\n    finiteness or anything else but length. Arrays are still checked\n    and possibly converted.\n\n    Parameters\n    ----------\n    *arrays : sequence of arrays or scipy.sparse matrices with same shape[0]\n        Python lists or tuples occurring in arrays are converted to 1D numpy\n        arrays, unless allow_lists is specified.\n    sparse_format : 'csr', 'csc' or 'dense', None by default\n        If not None, any scipy.sparse matrix is converted to\n        Compressed Sparse Rows or Compressed Sparse Columns representations.\n        If 'dense', an error is raised when a sparse array is\n        passed.\n    copy : boolean, False by default\n        If copy is True, ensure that returned arrays are copies of the original\n        (if not already converted to another format earlier in the process).\n    check_ccontiguous : boolean, False by default\n        Check that the arrays are C contiguous\n    dtype : a numpy dtype instance, None by default\n        Enforce a specific dtype.\n    warn_nans : boolean, False by default\n        Prints warning if nans in the arrays\n        Disables allow_nans\n    replace_nans : boolean, False by default\n        Replace nans in the arrays with zeros\n    allow_lists : bool\n        Allow lists of arbitrary objects as input, just check their length.\n        Disables\n    allow_nans : boolean, False by default\n        Allows nans in the arrays\n    allow_nd : boolean, False by default\n        Allows arrays of more than 2 dimensions.", "id": "f4510:m15"}
{"signature": "def add_int(self, name, min, max, warp=None):", "body": "min, max = map(int, (min, max))<EOL>if max < min:<EOL><INDENT>raise ValueError('<STR_LIT>' % name)<EOL><DEDENT>if warp not in (None, '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % (name, warp))<EOL><DEDENT>if min <= <NUM_LIT:0> and warp == '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.variables[name] = IntVariable(name, min, max, warp)<EOL>", "docstring": "An integer-valued dimension bounded between `min` <= x <= `max`.\n        Note that the right endpoint of the interval includes `max`.\n\n        When `warp` is None, the base measure associated with this dimension\n        is a categorical distribution with each weight on each of the integers\n        in [min, max]. With `warp == 'log'`, the base measure is a uniform\n        distribution on the log of the variable, with bounds at `log(min)` and\n        `log(max)`. This is appropriate for variables that are \"naturally\" in\n        log-space. Other `warp` functions are not supported (yet), but may be\n        at a later time. Please note that this functionality is not supported\n        for `hyperopt_tpe`.", "id": "f4515:c0:m3"}
{"signature": "def add_jump(self, name, min, max, num, warp=None, var_type=float):", "body": "if not isinstance(var_type, type):<EOL><INDENT>if var_type == '<STR_LIT:int>':<EOL><INDENT>var_type = int<EOL><DEDENT>elif var_type == '<STR_LIT:float>':<EOL><INDENT>var_type = float<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % (var_type))<EOL><DEDENT><DEDENT>min, max = map(var_type, (min, max))<EOL>num = int(num)<EOL>if not warp:<EOL><INDENT>choices = np.linspace(min, max, num=num, dtype=var_type)<EOL><DEDENT>elif (min >= <NUM_LIT:0>) and warp == '<STR_LIT>':<EOL><INDENT>choices = np.logspace(np.log10(min), np.log10(max), num=num,<EOL>dtype=var_type)<EOL><DEDENT>elif (min <= <NUM_LIT:0>)and warp == '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % (name, warp))<EOL><DEDENT>self.variables[name] = EnumVariable(name, choices.tolist())<EOL>", "docstring": "An integer/float-valued enumerable with `num` items, bounded\n        between [`min`, `max`]. Note that the right endpoint of the interval\n        includes `max`. This is a wrapper around the add_enum. `jump` can be\n        a float or int.", "id": "f4515:c0:m2"}
{"signature": "def ga_context(context_func):", "body": "def decorator(func):<EOL><INDENT>@wraps(func)<EOL>def wrapper(self, *args, **kwargs):<EOL><INDENT>context = func(self, *args, **kwargs)<EOL>self.request.google_analytics.update(context_func(context))<EOL>return context<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "A decorator for Cornice views that allows one to set extra parameters\nfor Google Analytics tracking::\n    @ga_context(lambda context: {'dt': context['category'].title, })\n    @view_config(route_name='page')\n    def view(request):\n        return {\n            'category': self.workspace.S(Category).filter(title='foo')[0],\n        }\n:param func context_func:\n    A function which takes one argument, a context dictionary made\n    available to the template.\n:returns:\n    A dict containing the extra variables for Google Analytics\n    tracking.", "id": "f4534:m1"}
{"signature": "def get_featured_pages(<EOL>self, limit=<NUM_LIT:5>, order_by=('<STR_LIT>', '<STR_LIT>')):", "body": "return self._get_featured_pages(self.locale, limit, order_by)<EOL>", "docstring": "Return featured pages the GitModel knows about.\n:param str locale:\n    The locale string, like `eng_UK`.\n:param int limit:\n    The number of pages to return, defaults to 5.\n:param tuple order_by:\n    The attributes to order on,\n    defaults to ('position', '-modified_at').", "id": "f4535:c0:m17"}
{"signature": "def initialize_minor_angle(self):", "body": "num_groups = len(self.nodes.keys())<EOL>self.minor_angle = <NUM_LIT:2> * np.pi / (<NUM_LIT:6> * num_groups)<EOL>", "docstring": "Computes the minor angle: 2pi radians / 3 * number of groups.", "id": "f4539:c0:m3"}
{"signature": "def set_minor_angle(self, angle):", "body": "assert angle < self.major_angle,\"<STR_LIT>\"<EOL>self.minor_angle = angle<EOL>", "docstring": "Sets the major angle of the hive plot. I have restricted this to be\nless than the major angle.", "id": "f4539:c0:m4"}
{"signature": "def axis_length(self, group):", "body": "return len(self.nodes[group])<EOL>", "docstring": "Computes the length of the axis for a given group.", "id": "f4539:c0:m6"}
{"signature": "def plot_axis(self, rs, theta):", "body": "xs, ys = get_cartesian(rs, theta)<EOL>self.ax.plot(xs, ys, '<STR_LIT>', alpha=<NUM_LIT>)<EOL>", "docstring": "Renders the axis.", "id": "f4539:c0:m8"}
{"signature": "def plot_radius(self):", "body": "plot_rad = <NUM_LIT:0><EOL>for group, nodelist in self.nodes.items():<EOL><INDENT>proposed_radius = len(nodelist) * self.scale<EOL>if proposed_radius > plot_rad:<EOL><INDENT>plot_rad = proposed_radius<EOL><DEDENT><DEDENT>return plot_rad + self.internal_radius<EOL>", "docstring": "Computes the plot radius: maximum of length of each list of nodes.", "id": "f4539:c0:m5"}
{"signature": "def add_axes_and_nodes(self):", "body": "for i, (group, nodelist) in enumerate(self.nodes.items()):<EOL><INDENT>theta = self.group_theta(group)<EOL>if self.has_edge_within_group(group):<EOL><INDENT>theta = theta - self.minor_angle<EOL>self.plot_nodes(nodelist, theta, group)<EOL>theta = theta + <NUM_LIT:2> * self.minor_angle<EOL>self.plot_nodes(nodelist, theta, group)<EOL><DEDENT>else:<EOL><INDENT>self.plot_nodes(nodelist, theta, group)<EOL><DEDENT><DEDENT>", "docstring": "Adds the axes (i.e. 2 or 3 axes, not to be confused with matplotlib\naxes) and the nodes that belong to each axis.", "id": "f4539:c0:m11"}
{"signature": "def group_theta(self, group):", "body": "for i, g in enumerate(self.nodes.keys()):<EOL><INDENT>if g == group:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return i * self.major_angle<EOL>", "docstring": "Computes the theta along which a group's nodes are aligned.", "id": "f4539:c0:m10"}
{"signature": "def adjust_angles(self, start_node, start_angle, end_node, end_angle):", "body": "start_group = self.find_node_group_membership(start_node)<EOL>end_group = self.find_node_group_membership(end_node)<EOL>if start_group == <NUM_LIT:0> and end_group == len(self.nodes.keys())-<NUM_LIT:1>:<EOL><INDENT>if self.has_edge_within_group(start_group):<EOL><INDENT>start_angle = correct_negative_angle(start_angle -<EOL>self.minor_angle)<EOL><DEDENT>if self.has_edge_within_group(end_group):<EOL><INDENT>end_angle = correct_negative_angle(end_angle +<EOL>self.minor_angle)<EOL><DEDENT><DEDENT>elif start_group == len(self.nodes.keys())-<NUM_LIT:1> and end_group == <NUM_LIT:0>:<EOL><INDENT>if self.has_edge_within_group(start_group):<EOL><INDENT>start_angle = correct_negative_angle(start_angle +<EOL>self.minor_angle)<EOL><DEDENT>if self.has_edge_within_group(end_group):<EOL><INDENT>end_angle = correct_negative_angle(end_angle -<EOL>self.minor_angle)<EOL><DEDENT><DEDENT>elif start_group < end_group:<EOL><INDENT>if self.has_edge_within_group(end_group):<EOL><INDENT>end_angle = correct_negative_angle(end_angle -<EOL>self.minor_angle)<EOL><DEDENT>if self.has_edge_within_group(start_group):<EOL><INDENT>start_angle = correct_negative_angle(start_angle +<EOL>self.minor_angle)<EOL><DEDENT><DEDENT>elif end_group < start_group:<EOL><INDENT>if self.has_edge_within_group(start_group):<EOL><INDENT>start_angle = correct_negative_angle(start_angle -<EOL>self.minor_angle)<EOL><DEDENT>if self.has_edge_within_group(end_group):<EOL><INDENT>end_angle = correct_negative_angle(end_angle +<EOL>self.minor_angle)<EOL><DEDENT><DEDENT>return start_angle, end_angle<EOL>", "docstring": "This function adjusts the start and end angles to correct for\nduplicated axes.", "id": "f4539:c0:m19"}
{"signature": "def node_theta(self, node):", "body": "group = self.find_node_group_membership(node)<EOL>return self.group_theta(group)<EOL>", "docstring": "Convenience function to find the node's theta angle.", "id": "f4539:c0:m15"}
{"signature": "def plot_nodes(self, nodelist, theta, group):", "body": "for i, node in enumerate(nodelist):<EOL><INDENT>r = self.internal_radius + i * self.scale<EOL>x, y = get_cartesian(r, theta)<EOL>circle = plt.Circle(xy=(x, y), radius=self.dot_radius,<EOL>color=self.node_colormap[group], linewidth=<NUM_LIT:0>)<EOL>self.ax.add_patch(circle)<EOL><DEDENT>", "docstring": "Plots nodes to screen.", "id": "f4539:c0:m9"}
{"signature": "def initialize_major_angle(self):", "body": "num_groups = len(self.nodes.keys())<EOL>self.major_angle = <NUM_LIT:2> * np.pi / num_groups<EOL>", "docstring": "Computes the major angle: 2pi radians / number of groups.", "id": "f4539:c0:m2"}
{"signature": "def draw(self):", "body": "self.ax.set_xlim(-self.plot_radius(), self.plot_radius())<EOL>self.ax.set_ylim(-self.plot_radius(), self.plot_radius())<EOL>self.add_axes_and_nodes()<EOL>self.add_edges()<EOL>self.ax.axis('<STR_LIT>')<EOL>", "docstring": "The master function that is called that draws everything.", "id": "f4539:c0:m18"}
{"signature": "def find_node_group_membership(self, node):", "body": "for group, nodelist in self.nodes.items():<EOL><INDENT>if node in nodelist:<EOL><INDENT>return group<EOL><DEDENT><DEDENT>", "docstring": "Identifies the group for which a node belongs to.", "id": "f4539:c0:m12"}
{"signature": "def simplified_edges(self):", "body": "for group, edgelist in self.edges.items():<EOL><INDENT>for u, v, d in edgelist:<EOL><INDENT>yield (u, v)<EOL><DEDENT><DEDENT>", "docstring": "A generator for getting all of the edges without consuming extra\nmemory.", "id": "f4539:c0:m1"}
{"signature": "def reversals(series, left=False, right=False):", "body": "series = iter(series)<EOL>x_last, x = next(series), next(series)<EOL>d_last = (x - x_last)<EOL>if left:<EOL><INDENT>yield x_last<EOL><DEDENT>for x_next in series:<EOL><INDENT>if x_next == x:<EOL><INDENT>continue<EOL><DEDENT>d_next = x_next - x<EOL>if d_last * d_next < <NUM_LIT:0>:<EOL><INDENT>yield x<EOL><DEDENT>x_last, x = x, x_next<EOL>d_last = d_next<EOL><DEDENT>if right:<EOL><INDENT>yield x_next<EOL><DEDENT>", "docstring": "Iterate reversal points in the series.\n\n    A reversal point is a point in the series at which the first derivative\n    changes sign. Reversal is undefined at the first (last) point because the\n    derivative before (after) this point is undefined. The first and the last\n    points may be treated as reversals by setting the optional parameters\n    `left` and `right` to True.\n\n    Parameters\n    ----------\n    series : iterable sequence of numbers\n    left: bool, optional\n        If True, yield the first point in the series (treat it as a reversal).\n    right: bool, optional\n        If True, yield the last point in the series (treat it as a reversal).\n\n    Yields\n    ------\n    float\n        Reversal points.", "id": "f4542:m1"}
{"signature": "def _sort_lows_and_highs(func):", "body": "@functools.wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>for low, high, mult in func(*args, **kwargs):<EOL><INDENT>if low < high:<EOL><INDENT>yield low, high, mult<EOL><DEDENT>else:<EOL><INDENT>yield high, low, mult<EOL><DEDENT><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Decorator for extract_cycles", "id": "f4542:m2"}
{"signature": "def CherryPyWSGIServer(bind_addr,<EOL>wsgi_app,<EOL>numthreads = <NUM_LIT:10>,<EOL>server_name = None,<EOL>max = -<NUM_LIT:1>,<EOL>request_queue_size = <NUM_LIT:5>,<EOL>timeout = <NUM_LIT:10>,<EOL>shutdown_timeout = <NUM_LIT:5>):", "body": "max_threads = max<EOL>if max_threads < <NUM_LIT:0>:<EOL><INDENT>max_threads = <NUM_LIT:0><EOL><DEDENT>return Rocket(bind_addr, '<STR_LIT>', {'<STR_LIT>': wsgi_app},<EOL>min_threads = numthreads,<EOL>max_threads = max_threads,<EOL>queue_size = request_queue_size,<EOL>timeout = timeout)<EOL>", "docstring": "A Cherrypy wsgiserver-compatible wrapper.", "id": "f4560:m0"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "Worker.__init__(self, *args, **kwargs)<EOL>if isinstance(self.app_info, dict):<EOL><INDENT>multithreaded = self.app_info.get('<STR_LIT>') != <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>multithreaded = False<EOL><DEDENT>self.base_environ = dict({'<STR_LIT>': self.app_info['<STR_LIT>'],<EOL>'<STR_LIT>': multithreaded,<EOL>})<EOL>self.base_environ.update(BASE_ENV)<EOL>self.app = self.app_info.get('<STR_LIT>')<EOL>if not hasattr(self.app, \"<STR_LIT>\"):<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % repr(self.app))<EOL><DEDENT>if has_futures and self.app_info.get('<STR_LIT>'):<EOL><INDENT>executor = self.app_info['<STR_LIT>']<EOL>self.base_environ.update({\"<STR_LIT>\": executor,<EOL>\"<STR_LIT>\": executor.futures})<EOL><DEDENT>", "docstring": "Builds some instance variables that will last the life of the\n        thread.", "id": "f4562:c0:m0"}
{"signature": "def main(argv, version=DEFAULT_VERSION):", "body": "tarball = download_setuptools()<EOL>_install(tarball)<EOL>", "docstring": "Install or upgrade setuptools and EasyInstall", "id": "f4571:m18"}
{"signature": "def update_config(new_config):", "body": "flask_app.base_config.update(new_config)<EOL>if '<STR_LIT>' in new_config:<EOL><INDENT>wd = os.path.abspath(new_config['<STR_LIT>'])<EOL>if nbmanager.notebook_dir != wd:<EOL><INDENT>if not os.path.exists(wd):<EOL><INDENT>raise IOError('<STR_LIT>' % wd)<EOL><DEDENT>nbmanager.notebook_dir = wd<EOL><DEDENT><DEDENT>", "docstring": "Update config options with the provided dictionary of options.", "id": "f4577:m1"}
{"signature": "def set_config(new_config={}):", "body": "<EOL>flask_app.base_config = dict(working_directory='<STR_LIT:.>',<EOL>template='<STR_LIT>',<EOL>debug=False,<EOL>port=None)<EOL>update_config(new_config)<EOL>", "docstring": "Reset config options to defaults, and then update (optionally)\n    with the provided dictionary of options.", "id": "f4577:m2"}
{"signature": "def write_tersoff_potential(parameters):", "body": "lines = []<EOL>for (e1, e2, e3), params in parameters.items():<EOL><INDENT>if len(params) != <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>lines.append('<STR_LIT:U+0020>'.join([e1, e2, e3] + ['<STR_LIT>'.format(_) for _ in params]))<EOL><DEDENT>return '<STR_LIT:\\n>'.join(lines)<EOL>", "docstring": "Write tersoff potential file from parameters to string\n\n    Parameters\n    ----------\n    parameters: dict\n       keys are tuple of elements with the values being the parameters length 14", "id": "f4590:m1"}
{"signature": "def write_comb_3_potential(parameters):", "body": "lines = []<EOL>for (e1, e2, e3), params in parameters.items():<EOL><INDENT>if len(params) != <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>lines.append('<STR_LIT:U+0020>'.join([e1, e2, e3] + ['<STR_LIT>'.format(_) for _ in params]))<EOL><DEDENT>return '<STR_LIT:\\n>'.join(lines)<EOL>", "docstring": "Write comb 3 potential file from parameters\n\n    Parameters\n    ----------\n    parameters: dict\n           keys are tuple of elements with the values being the parameters length 71", "id": "f4590:m6"}
{"signature": "def write_comb_potential(parameters):", "body": "lines = []<EOL>for (e1, e2, e3), params in parameters.items():<EOL><INDENT>if len(params) != <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>lines.append('<STR_LIT:U+0020>'.join([e1, e2, e3] + ['<STR_LIT>'.format(_) for _ in params]))<EOL><DEDENT>return '<STR_LIT:\\n>'.join(lines)<EOL>", "docstring": "Write comb potential file from parameters\n\n    Parameters\n    ----------\n    parameters: dict\n           keys are tuple of elements with the values being the parameters length 46", "id": "f4590:m5"}
{"signature": "def is_cache_valid(self):", "body": "is_expired = time.time() - self._cache_timestamp < self._cache_ttl<EOL>cache_info_set = self._cache_info is not None<EOL>cache_ip_set = self._cache_ip is not None<EOL>return is_expired and cache_info_set and cache_ip_set<EOL>", "docstring": "Evaluates the validity of the cache", "id": "f4601:c1:m7"}
{"signature": "def _fetch_from_sources(self, required_info_keys=None):", "body": "srces = self._sources.keys()<EOL>random.shuffle(srces)<EOL>for source in srces:<EOL><INDENT>try:<EOL><INDENT>source.fetch()<EOL>ip_address = source.ip_address<EOL>info = source.info<EOL>if self._verify_required_keys(info, required_info_keys):<EOL><INDENT>return ip_address, info<EOL><DEDENT><DEDENT>except (ValueError, requests.ConnectionError):<EOL><INDENT>continue<EOL><DEDENT><DEDENT>raise NullResponseFromSourcesError(\"<STR_LIT>\")<EOL>", "docstring": "Internal method that fetches from configured sources.\n:param required_info_keys: Keys that are required in the response\n:type required_info_keys: list\n:return: None\n:rtype: None", "id": "f4601:c1:m4"}
{"signature": "def get_info(self, required_info_keys=None):", "body": "", "docstring": "Round robin through the configured providers until\none returns the keys required, then cache it.", "id": "f4601:c0:m3"}
{"signature": "def add_source(self, source):", "body": "", "docstring": "Adds a source to the set being used by the provider", "id": "f4601:c0:m1"}
{"signature": "@property<EOL><INDENT>def num_sources(self):<DEDENT>", "body": "return len(self._sources)<EOL>", "docstring": "Returns the number of configured sources in the provider", "id": "f4601:c1:m8"}
{"signature": "def get_info(self, required_info_keys=None):", "body": "if not self.is_cache_valid or not self._cache_infoor not self._verify_required_keys(self._cache_info, required_info_keys):<EOL><INDENT>self._cache_ip, self._cache_info = self._fetch_from_sources(required_info_keys)<EOL>self._cache_timestamp = time.time()<EOL><DEDENT>return self._cache_info<EOL>", "docstring": "Round robin through the configured providers until one returns the keys\nrequired, then cache it\n:param required_info_keys: The keys required for the fetch to be valid\n:type required_info_keys: list(tuple)\n:return: The info dictionary returned by the provider\n:rtype: dict", "id": "f4601:c1:m3"}
{"signature": "def add_source(self, source_class, *constructor_args):", "body": "if not IIPSource.implementedBy(source_class):<EOL><INDENT>raise TypeError(\"<STR_LIT>\".format(source_class))<EOL><DEDENT>else:<EOL><INDENT>self._sources.add((source_class, constructor_args))<EOL><DEDENT>", "docstring": "Adds a source to the factory provided it's type and constructor arguments\n:param source_class: The class used to instantiate the source\n:type source_class: type\n:param constructor_args: Arguments to be passed into the constructor\n:type constructor_args: Iterable", "id": "f4603:c3:m1"}
{"signature": "@property<EOL><INDENT>def info(self):<DEDENT>", "body": "self.fetch()<EOL>return self._info<EOL>", "docstring": "Returns a dictionary containing any additional information returned by the API\n:return: any additional information returned by the API\n:rtype: dict", "id": "f4603:c1:m2"}
{"signature": "def get_sources(self, limit=sys.maxsize, types_list=None):", "body": "if types_list and not isinstance(types_list, (tuple, list)):<EOL><INDENT>types_list = [types_list]<EOL><DEDENT>sources = list(self._sources)<EOL>random.shuffle(sources)<EOL>for source in sources:<EOL><INDENT>if not types_list or source[<NUM_LIT:0>] in types_list:<EOL><INDENT>limit -= <NUM_LIT:1><EOL>yield source[<NUM_LIT:0>](*source[<NUM_LIT:1>])<EOL><DEDENT>if limit <= <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Generates instantiated sources from the factory\n:param limit: the max number of sources to yield\n:type limit: int\n:param types_list: filter by types so the constructor can be used to accomidate many types\n:type types_list: class or list of classes\n:return: Yields types added by add_source\n:rtype: generator", "id": "f4603:c3:m2"}
{"signature": "def __init__(self):", "body": "", "docstring": "Empty Constructor that shouldn't be ran for PyLint", "id": "f4603:c0:m0"}
{"signature": "def fetch(self):", "body": "response = requests.get(self._ip_url, headers = {\"<STR_LIT>\": \"<STR_LIT>\"})<EOL>self._ip_address = ipaddress.ip_address(unicode(response.content.strip()))<EOL>self._info = dict()<EOL>", "docstring": "Performs a refresh from source\n:return: None\n:rtype: None", "id": "f4603:c1:m3"}
{"signature": "def __init__(self, ip_url, ip_key):", "body": "super(JSONIPSource, self).__init__(ip_url)<EOL>self._ip_key = ip_key<EOL>", "docstring": ":param ip_url: The URL used to get the IP\n:type ip_url: str\n:param ip_key: The key in the json response used to encapsulate the IP\n:type ip_key: str", "id": "f4603:c2:m0"}
{"signature": "def fetch(self):", "body": "", "docstring": "Performs a refresh from source", "id": "f4603:c0:m1"}
{"signature": "@try_delegation<EOL><INDENT>def apply_handler(self, method_data, *args, **kwargs):<DEDENT>", "body": "if isinstance(method_data, tuple):<EOL><INDENT>len_method = len(method_data)<EOL>method = method_data[<NUM_LIT:0>]<EOL>if <NUM_LIT:1> < len_method:<EOL><INDENT>args = method_data[<NUM_LIT:1>]<EOL><DEDENT>if <NUM_LIT:2> < len_method:<EOL><INDENT>kwargs = method_data[<NUM_LIT:2>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>method = method_data<EOL><DEDENT>return method(*args, **kwargs)<EOL>", "docstring": "Call the dispatched function, optionally with other data\n        stored/created during .register and .prepare. Assume the arguments\n        passed in by the dispathcer are the only ones required.", "id": "f4609:c0:m3"}
{"signature": "@try_delegation<EOL><INDENT>def get_method(self, *args, **kwargs):<DEDENT>", "body": "for method in self.gen_methods(*args, **kwargs):<EOL><INDENT>return method<EOL><DEDENT>msg = '<STR_LIT>'<EOL>raise self.DispatchError(msg % ((args, kwargs), self.inst))<EOL>", "docstring": "Find the first method this input dispatches to.", "id": "f4610:c4:m10"}
{"signature": "@try_delegation<EOL><INDENT>def yield_from_handler(self, result):<DEDENT>", "body": "return result<EOL>", "docstring": "Given an applied function result, yield from it.", "id": "f4610:c4:m14"}
{"signature": "@try_delegation<EOL><INDENT>def gen_dispatch(self, *args, **kwargs):<DEDENT>", "body": "dispatched = False<EOL>for method_data in self.gen_methods(*args, **kwargs):<EOL><INDENT>dispatched = True<EOL>result = self.apply_handler(method_data, *args, **kwargs)<EOL>yield result<EOL><DEDENT>if dispatched:<EOL><INDENT>return<EOL><DEDENT>msg = '<STR_LIT>'<EOL>raise self.DispatchError(msg % ((args, kwargs), self.inst))<EOL>", "docstring": "Find and evaluate/yield every method this input dispatches to.", "id": "f4610:c4:m12"}
{"signature": "@try_delegation<EOL><INDENT>def register(self, method, args, kwargs):<DEDENT>", "body": "invoc = self.dump_invoc(*args, **kwargs)<EOL>self.registry.append((invoc, method.__name__))<EOL>", "docstring": "Given a single decorated handler function,\n        prepare, append desired data to self.registry.", "id": "f4610:c4:m7"}
{"signature": "@try_delegation<EOL><INDENT>def gen_methods(self, *args, **kwargs):<DEDENT>", "body": "dispatched = False<EOL>for invoc, methodname in self.registry:<EOL><INDENT>args, kwargs = self.loads(invoc)<EOL>yield getattr(self.inst, methodname), args, kwargs<EOL>dispatched = True<EOL><DEDENT>if dispatched:<EOL><INDENT>return<EOL><DEDENT>generic_handler = getattr(self.inst, '<STR_LIT>', None)<EOL>if generic_handler is not None:<EOL><INDENT>yield generic_handler, args, kwargs<EOL><DEDENT>msg = '<STR_LIT>'<EOL>raise self.DispatchError(msg % ((args, kwargs), self.inst))<EOL>", "docstring": "Find all method names this input dispatches to. This method\n        can accept *args, **kwargs, but it's the gen_dispatch method's\n        job of passing specific args to handler methods.", "id": "f4610:c4:m9"}
{"signature": "@try_delegation<EOL><INDENT>@dedupe<EOL>def gen_methods(self, *args, **kwargs):<DEDENT>", "body": "token = args[<NUM_LIT:0>]<EOL>inst = self.inst<EOL>prefix = self._method_prefix<EOL>for method_key in self.gen_method_keys(*args, **kwargs):<EOL><INDENT>method = getattr(inst, prefix + method_key, None)<EOL>if method is not None:<EOL><INDENT>yield method<EOL><DEDENT><DEDENT>typename = type(token).__name__<EOL>yield from self.check_basetype(<EOL>token, typename, self.builtins.get(typename))<EOL>for basetype_name in self.interp_types:<EOL><INDENT>yield from self.check_basetype(<EOL>token, basetype_name, getattr(self.types, basetype_name, None))<EOL><DEDENT>for basetype_name in self.abc_types:<EOL><INDENT>yield from self.check_basetype(<EOL>token, basetype_name, getattr(self.collections, basetype_name, None))<EOL><DEDENT>yield from self.gen_generic()<EOL>", "docstring": "Find all method names this input dispatches to.", "id": "f4610:c5:m2"}
{"signature": "def activate_selected(self, request, queryset):", "body": "queryset.update(active=True)<EOL>count = queryset.count()<EOL>if count == <NUM_LIT:1>:<EOL><INDENT>message = _('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>message = _('<STR_LIT>')<EOL>message.format(count=count)  <EOL><DEDENT>self.message_user(request, message)<EOL>", "docstring": "Activate the selected entries.", "id": "f4613:c0:m1"}
{"signature": "def create_organization(active=True):", "body": "Organization.objects.create(<EOL>name='<STR_LIT>',<EOL>description='<STR_LIT>',<EOL>active=active<EOL>)<EOL>", "docstring": "Create an organization.", "id": "f4615:m0"}
{"signature": "def serialize_organization(organization):", "body": "return {<EOL>'<STR_LIT:id>': organization.id,<EOL>'<STR_LIT:name>': organization.name,<EOL>'<STR_LIT>': organization.short_name,<EOL>'<STR_LIT:description>': organization.description,<EOL>'<STR_LIT>': organization.logo<EOL>}<EOL>", "docstring": "Organization object-to-dict serialization", "id": "f4622:m0"}
{"signature": "def course_key_is_valid(course_key):", "body": "if course_key is None:<EOL><INDENT>return False<EOL><DEDENT>try:<EOL><INDENT>CourseKey.from_string(text_type(course_key))<EOL><DEDENT>except (InvalidKeyError, UnicodeDecodeError):<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Course key object validation", "id": "f4629:m0"}
{"signature": "def _activate_organization_course_relationship(relationship):  ", "body": "<EOL>relationship = internal.OrganizationCourse.objects.get(<EOL>id=relationship.id,<EOL>active=False,<EOL>organization__active=True<EOL>)<EOL>_activate_record(relationship)<EOL>", "docstring": "Activates an inactive organization-course relationship", "id": "f4630:m4"}
{"signature": "def fetch_organizations():", "body": "return serializers.serialize_organizations(internal.Organization.objects.filter(active=True))<EOL>", "docstring": "Retrieves the set of active organizations from app/local state\nReturns a list-of-dicts representation of the object", "id": "f4630:m11"}
{"signature": "def create_organization_course(organization, course_key):", "body": "organization_obj = serializers.deserialize_organization(organization)<EOL>try:<EOL><INDENT>relationship = internal.OrganizationCourse.objects.get(<EOL>organization=organization_obj,<EOL>course_id=text_type(course_key)<EOL>)<EOL>if not relationship.active:<EOL><INDENT>_activate_organization_course_relationship(relationship)<EOL><DEDENT><DEDENT>except internal.OrganizationCourse.DoesNotExist:<EOL><INDENT>relationship = internal.OrganizationCourse.objects.create(<EOL>organization=organization_obj,<EOL>course_id=text_type(course_key),<EOL>active=True<EOL>)<EOL><DEDENT>", "docstring": "Inserts a new organization-course relationship into app/local state\nNo response currently defined for this operation", "id": "f4630:m12"}
{"signature": "def _inactivate_organization(organization):", "body": "[_inactivate_organization_course_relationship(record) for record<EOL>in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=True)]<EOL>[_inactivate_record(record) for record<EOL>in internal.Organization.objects.filter(id=organization.id, active=True)]<EOL>", "docstring": "Inactivates an activated organization as well as any active relationships", "id": "f4630:m3"}
{"signature": "def fetch_organization(organization_id):", "body": "organization = {'<STR_LIT:id>': organization_id}<EOL>if not organization_id:<EOL><INDENT>exceptions.raise_exception(\"<STR_LIT>\", organization, exceptions.InvalidOrganizationException)<EOL><DEDENT>organizations = serializers.serialize_organizations(<EOL>internal.Organization.objects.filter(id=organization_id, active=True)<EOL>)<EOL>if not organizations:<EOL><INDENT>exceptions.raise_exception(\"<STR_LIT>\", organization, exceptions.InvalidOrganizationException)<EOL><DEDENT>return organizations[<NUM_LIT:0>]<EOL>", "docstring": "Retrieves a specific organization from app/local state\nReturns a dictionary representation of the object", "id": "f4630:m9"}
{"signature": "def _inactivate_record(record):", "body": "record.active = False<EOL>record.save()<EOL>", "docstring": "Disables database records by setting the 'active' attribute to False\nThe queries in this module filter out inactive records as part of their criteria\nThis effectively allows us to soft-delete records so they are not lost forever", "id": "f4630:m1"}
{"signature": "def remove_organization_course(organization, course_key):", "body": "_validate_organization_data(organization)<EOL>_validate_course_key(course_key)<EOL>return data.delete_organization_course(course_key=course_key, organization=organization)<EOL>", "docstring": "Removes the specfied course from the specified organization", "id": "f4637:m10"}
{"signature": "def remove_course_references(course_key):", "body": "_validate_course_key(course_key)<EOL>data.delete_course_references(course_key)<EOL>", "docstring": "Removes course references from application state\nSee edx-platform/lms/djangoapps/courseware/management/commands/delete_course_references.py", "id": "f4637:m12"}
{"signature": "def get_organization(organization_id):", "body": "return data.fetch_organization(organization_id)<EOL>", "docstring": "Retrieves the specified organization", "id": "f4637:m4"}
{"signature": "def get_organization_by_short_name(organization_short_name):", "body": "return data.fetch_organization_by_short_name(organization_short_name)<EOL>", "docstring": "Retrieves the organization filtered by short name", "id": "f4637:m5"}
{"signature": "def raise_exception(entity_type, entity, exception):", "body": "raise exception(<EOL>u'<STR_LIT>'.format(entity_type, entity).encode('<STR_LIT:utf-8>')<EOL>)<EOL>", "docstring": "Exception helper", "id": "f4641:m0"}
{"signature": "@register.simple_tag<EOL>def get_upcoming_events_count(days=<NUM_LIT>, featured=False):", "body": "from happenings.models import Event<EOL>start_period = today - datetime.timedelta(days=<NUM_LIT:2>)<EOL>end_period = today + datetime.timedelta(days=days)<EOL>if featured:<EOL><INDENT>return Event.objects.filter(<EOL>featured=True,<EOL>start_date__gte=start_period,<EOL>start_date__lte=end_period<EOL>).count()<EOL><DEDENT>return Event.objects.filter(start_date__gte=start_period, start_date__lte=end_period).count()<EOL>", "docstring": "Returns count of upcoming events for a given number of days, either featured or all\nUsage:\n{% get_upcoming_events_count DAYS as events_count %}\nwith days being the number of days you want, or 5 by default", "id": "f4646:m0"}
{"signature": "@register.inclusion_tag('<STR_LIT>')<EOL>def paginate_update(update):", "body": "from happenings.models import Update<EOL>time = update.pub_time<EOL>event = update.event<EOL>try:<EOL><INDENT>next = Update.objects.filter(<EOL>event=event,<EOL>pub_time__gt=time<EOL>).order_by('<STR_LIT>').only('<STR_LIT:title>')[<NUM_LIT:0>]<EOL><DEDENT>except:<EOL><INDENT>next = None<EOL><DEDENT>try:<EOL><INDENT>previous = Update.objects.filter(<EOL>event=event,<EOL>pub_time__lt=time<EOL>).order_by('<STR_LIT>').only('<STR_LIT:title>')[<NUM_LIT:0>]<EOL><DEDENT>except:<EOL><INDENT>previous = None<EOL><DEDENT>return {'<STR_LIT>': next, '<STR_LIT>': previous, '<STR_LIT>': event}<EOL>", "docstring": "attempts to get next and previous on updates", "id": "f4646:m5"}
{"signature": "def create_ical(request, slug):", "body": "event = get_object_or_404(Event, slug=slug)<EOL>start = event.start_date<EOL>start = datetime.datetime(start.year, start.month, start.day)<EOL>if event.end_date:<EOL><INDENT>end = event.end_date<EOL>end = datetime.datetime(end.year, end.month, end.day)<EOL><DEDENT>else:<EOL><INDENT>end = start<EOL><DEDENT>cal = card_me.iCalendar()<EOL>cal.add('<STR_LIT>').value = '<STR_LIT>'<EOL>vevent = cal.add('<STR_LIT>')<EOL>vevent.add('<STR_LIT>').value = start<EOL>vevent.add('<STR_LIT>').value = end<EOL>vevent.add('<STR_LIT>').value = datetime.datetime.now()<EOL>vevent.add('<STR_LIT>').value = event.name<EOL>response = HttpResponse(cal.serialize(), content_type='<STR_LIT>')<EOL>response['<STR_LIT>'] = '<STR_LIT>'<EOL>response['<STR_LIT>'] = '<STR_LIT>'<EOL>return response<EOL>", "docstring": "Creates an ical .ics file for an event using python-card-me.", "id": "f4647:m0"}
{"signature": "def event_all_comments_list(request, slug):", "body": "event = get_object_or_404(Event, slug=slug)<EOL>comments = event.all_comments<EOL>page = int(request.GET.get('<STR_LIT>', <NUM_LIT>))  <EOL>is_paginated = False<EOL>if comments:<EOL><INDENT>paginator = Paginator(comments, <NUM_LIT:50>)  <EOL>try:<EOL><INDENT>comments = paginator.page(page)<EOL><DEDENT>except EmptyPage:<EOL><INDENT>comments = paginator.page(paginator.num_pages)<EOL><DEDENT>is_paginated = comments.has_other_pages()<EOL><DEDENT>return render(request, '<STR_LIT>', {<EOL>\"<STR_LIT>\": event,<EOL>\"<STR_LIT>\": comments,<EOL>\"<STR_LIT>\": comments,<EOL>\"<STR_LIT>\": comments,<EOL>\"<STR_LIT>\": page,<EOL>\"<STR_LIT>\": is_paginated,<EOL>\"<STR_LIT:key>\": key<EOL>})<EOL>", "docstring": "Returns a list view of all comments for a given event.\nCombines event comments and update comments in one list.", "id": "f4647:m1"}
{"signature": "@login_required<EOL>def add_event(request):", "body": "form = AddEventForm(request.POST or None)<EOL>if form.is_valid():<EOL><INDENT>instance = form.save(commit=False)<EOL>instance.sites = settings.SITE_ID<EOL>instance.submitted_by = request.user<EOL>instance.approved = True<EOL>instance.slug = slugify(instance.name)<EOL>instance.save()<EOL>messages.success(request, '<STR_LIT>')<EOL>return HttpResponseRedirect(reverse('<STR_LIT>'))<EOL><DEDENT>return render(request, '<STR_LIT>', {<EOL>'<STR_LIT>': form,<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>})<EOL>", "docstring": "Public form to add an event.", "id": "f4647:m4"}
{"signature": "def process_upload(upload_file, instance, form, event, request):", "body": "caption = form.cleaned_data.get('<STR_LIT>')<EOL>upload_name = upload_file.name.lower()<EOL>if upload_name.endswith('<STR_LIT>') or upload_name.endswith('<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>upload = Image(<EOL>event=event,<EOL>image=upload_file,<EOL>caption=caption,<EOL>)<EOL>upload.save()<EOL>instance.photos.add(upload)<EOL><DEDENT>except Exception as error:<EOL><INDENT>messages.error(request, '<STR_LIT>'.format(error))<EOL><DEDENT><DEDENT>", "docstring": "Helper function that actually processes and saves the upload(s).\nSegregated out for readability.", "id": "f4647:m7"}
{"signature": "def add_memory(request, slug):", "body": "event = get_object_or_404(Event, slug=slug)<EOL>form = MemoryForm(request.POST or None, request.FILES or None)<EOL>if form.is_valid():<EOL><INDENT>instance = form.save(commit=False)<EOL>instance.user = request.user<EOL>instance.event = event<EOL>instance.save()<EOL>msg = \"<STR_LIT>\"<EOL>if request.FILES:<EOL><INDENT>photo_list = request.FILES.getlist('<STR_LIT>')<EOL>photo_count = len(photo_list)<EOL>for upload_file in photo_list:<EOL><INDENT>process_upload(upload_file, instance, form, event, request)<EOL><DEDENT>if photo_count > <NUM_LIT:1>:<EOL><INDENT>msg += \"<STR_LIT>\".format(photo_count)<EOL><DEDENT>else:<EOL><INDENT>msg += \"<STR_LIT>\".format(photo_count)<EOL><DEDENT><DEDENT>messages.success(request, msg)<EOL>return HttpResponseRedirect('<STR_LIT>')<EOL><DEDENT>return render(request, '<STR_LIT>', {'<STR_LIT>': form, '<STR_LIT>': event})<EOL>", "docstring": "Adds a memory to an event.", "id": "f4647:m6"}
{"signature": "@cached_property<EOL><INDENT>def comments_open(self):<DEDENT>", "body": "return self.event.comments_open<EOL>", "docstring": "Based on the update's event's comments open status", "id": "f4650:c5:m3"}
{"signature": "def recently_ended(self):", "body": "if self.ended():<EOL><INDENT>end_date = self.end_date if self.end_date else self.start_date<EOL>if end_date >= offset.date():<EOL><INDENT>return True<EOL><DEDENT><DEDENT>", "docstring": "Determines if event ended recently (within 5 days).\nUseful for attending list.", "id": "f4650:c1:m5"}
{"signature": "def event_update_list(request, slug):", "body": "event = get_object_or_404(Event, slug=slug)<EOL>updates = Update.objects.filter(event__slug=slug)<EOL>if event.recently_ended():<EOL><INDENT>updates = updates.order_by('<STR_LIT:id>')<EOL><DEDENT>else:<EOL><INDENT>updates = updates.order_by('<STR_LIT>')<EOL><DEDENT>return render(request, '<STR_LIT>', {<EOL>'<STR_LIT>': event,<EOL>'<STR_LIT>': updates,<EOL>})<EOL>", "docstring": "Returns a list view of updates for a given event.\nIf the event is over, it will be in chronological order.\nIf the event is upcoming or still going,\nit will be in reverse chronological order.", "id": "f4652:m2"}
{"signature": "def event_all_comments_list(request, slug):", "body": "event = get_object_or_404(Event, slug=slug)<EOL>comments = event.all_comments<EOL>page = int(request.GET.get('<STR_LIT>', <NUM_LIT>))  <EOL>is_paginated = False<EOL>if comments:<EOL><INDENT>paginator = Paginator(comments, <NUM_LIT:50>)  <EOL>try:<EOL><INDENT>comments = paginator.page(page)<EOL><DEDENT>except EmptyPage:<EOL><INDENT>comments = paginator.page(paginator.num_pages)<EOL><DEDENT>is_paginated = comments.has_other_pages()<EOL><DEDENT>return render(request, '<STR_LIT>', {<EOL>\"<STR_LIT>\": event,<EOL>\"<STR_LIT>\": comments,<EOL>\"<STR_LIT>\": comments,<EOL>\"<STR_LIT>\": comments,<EOL>\"<STR_LIT>\": page,<EOL>\"<STR_LIT>\": is_paginated,<EOL>\"<STR_LIT:key>\": key<EOL>})<EOL>", "docstring": "Returns a list view of all comments for a given event.\nCombines event comments and update comments in one list.", "id": "f4652:m1"}
{"signature": "def get_all_images_count(self):", "body": "self_imgs = self.image_set.count()<EOL>update_ids = self.update_set.values_list('<STR_LIT:id>', flat=True)<EOL>u_images = UpdateImage.objects.filter(update__id__in=update_ids).count()<EOL>count = self_imgs + u_images<EOL>return count<EOL>", "docstring": "Gets count of all images from both event and updates.", "id": "f4654:c1:m13"}
{"signature": "def delete_past_events(self):", "body": "lapsed = datetime.datetime.now() - datetime.timedelta(days=<NUM_LIT>)<EOL>for event in self.filter(start_date__lte=lapsed, featured=<NUM_LIT:0>, recap='<STR_LIT>'):<EOL><INDENT>event.delete()<EOL><DEDENT>", "docstring": "Removes old events. This is provided largely as a convenience for maintenance\npurposes (daily_cleanup). if an Event has passed by more than X days\nas defined by Lapsed and has no related special events it will be deleted\nto free up the event name and remove clutter.\nFor best results, set this up to run regularly as a cron job.", "id": "f4654:c0:m0"}
{"signature": "def has_started(self):", "body": "if self.start_date <= datetime.date.today():<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Determines if event has started. Duh.", "id": "f4654:c1:m6"}
{"signature": "def comments_open(self):", "body": "return True<EOL>", "docstring": "Determine if comments should be allowed.\nProbably need some more robust logic here :-)\nMaybe something like \"If event has been over for a month, no more comments.\"\nOr something.", "id": "f4654:c1:m7"}
{"signature": "def parse(string):", "body": "<EOL>bib = []<EOL>if not isinstance(string, six.text_type):<EOL><INDENT>string = string.decode('<STR_LIT:utf-8>')<EOL><DEDENT>for key, value in special_chars:<EOL><INDENT>string = string.replace(key, value)<EOL><DEDENT>string = re.sub(r'<STR_LIT>', r'<STR_LIT>', string)<EOL>entries = re.findall(<EOL>r'<STR_LIT>',<EOL>string)<EOL>for entry in entries:<EOL><INDENT>pairs = re.findall(r'<STR_LIT>', entry[<NUM_LIT:2>])<EOL>bib.append({'<STR_LIT:type>': entry[<NUM_LIT:0>].lower(), '<STR_LIT:key>': entry[<NUM_LIT:1>]})<EOL>for key, value in pairs:<EOL><INDENT>key = key.lower()<EOL>if value and value[<NUM_LIT:0>] == '<STR_LIT:\">' and value[-<NUM_LIT:1>] == '<STR_LIT:\">':<EOL><INDENT>value = value[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>if value and value[<NUM_LIT:0>] == '<STR_LIT:{>' and value[-<NUM_LIT:1>] == '<STR_LIT:}>':<EOL><INDENT>value = value[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>if key not in ['<STR_LIT>', '<STR_LIT:title>']:<EOL><INDENT>value = value.replace('<STR_LIT:}>', '<STR_LIT>').replace('<STR_LIT:{>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>if value.startswith('<STR_LIT:{>') and value.endswith('<STR_LIT:}>'):<EOL><INDENT>value = value[<NUM_LIT:1>:]<EOL>value = value[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>value = value.strip()<EOL>value = re.sub(r'<STR_LIT>', '<STR_LIT:U+0020>', value)<EOL>bib[-<NUM_LIT:1>][key] = value<EOL><DEDENT><DEDENT>return bib<EOL>", "docstring": "Takes a string in BibTex format and returns a list of BibTex entries, where\neach entry is a dictionary containing the entries' key-value pairs.\n\n@type  string: string\n@param string: bibliography in BibTex format\n\n@rtype: list\n@return: a list of dictionaries representing a bibliography", "id": "f4667:m0"}
{"signature": "def get_publication_list(context, list, template='<STR_LIT>'):", "body": "list = List.objects.filter(list__iexact=list)<EOL>if not list:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>list = list[<NUM_LIT:0>]<EOL>publications = list.publication_set.all()<EOL>publications = publications.order_by('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>if not publications:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>populate(publications)<EOL>return render_template(<EOL>template, context['<STR_LIT>'], {'<STR_LIT:list>': list, '<STR_LIT>': publications})<EOL>", "docstring": "Get a publication list.", "id": "f4673:m3"}
{"signature": "def tex_parse(string):", "body": "string = string.replace('<STR_LIT:{>', '<STR_LIT>').replace('<STR_LIT:}>', '<STR_LIT>')<EOL>def tex_replace(match):<EOL><INDENT>returnsub(r'<STR_LIT>', r'<STR_LIT>',<EOL>sub(r'<STR_LIT>', r'<STR_LIT>',<EOL>sub(r'<STR_LIT>', r'<STR_LIT>',<EOL>sub(r'<STR_LIT>', r'<STR_LIT>',<EOL>sub(r'<STR_LIT>' + GREEK_LETTERS + '<STR_LIT:)>', r'<STR_LIT>', match.group(<NUM_LIT:1>))))))<EOL><DEDENT>return mark_safe(sub(r'<STR_LIT>', tex_replace, escape(string)))<EOL>", "docstring": "Renders some basic TeX math to HTML.", "id": "f4673:m4"}
{"signature": "def get_publications(context, template='<STR_LIT>'):", "body": "types = Type.objects.filter(hidden=False)<EOL>publications = Publication.objects.select_related()<EOL>publications = publications.filter(external=False, type__in=types)<EOL>publications = publications.order_by('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>if not publications:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>populate(publications)<EOL>return render_template(template, context['<STR_LIT>'], {'<STR_LIT>': publications})<EOL>", "docstring": "Get all publications.", "id": "f4673:m1"}
{"signature": "def get_publication(context, id):", "body": "pbl = Publication.objects.filter(pk=int(id))<EOL>if len(pbl) < <NUM_LIT:1>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>pbl[<NUM_LIT:0>].links = pbl[<NUM_LIT:0>].customlink_set.all()<EOL>pbl[<NUM_LIT:0>].files = pbl[<NUM_LIT:0>].customfile_set.all()<EOL>return render_template(<EOL>'<STR_LIT>', context['<STR_LIT>'], {'<STR_LIT>': pbl[<NUM_LIT:0>]})<EOL>", "docstring": "Get a single publication.", "id": "f4673:m2"}
{"signature": "def _produce_author_lists(self):", "body": "<EOL>self.authors = self.authors.replace('<STR_LIT>', '<STR_LIT:U+002CU+0020>')<EOL>self.authors = self.authors.replace('<STR_LIT>', '<STR_LIT:U+002CU+0020>')<EOL>self.authors = self.authors.replace('<STR_LIT>', '<STR_LIT:U+002CU+0020>')<EOL>self.authors = self.authors.replace('<STR_LIT:;>', '<STR_LIT:U+002C>')<EOL>self.authors_list = [author.strip() for author in self.authors.split('<STR_LIT:U+002C>')]<EOL>self.authors_list_simple = []<EOL>self.authors_list_split = []<EOL>self.title_ends_with_punct = self.title[-<NUM_LIT:1>] in ['<STR_LIT:.>', '<STR_LIT:!>', '<STR_LIT:?>']if len(self.title) > <NUM_LIT:0> else False<EOL>suffixes = ['<STR_LIT:I>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>prefixes = ['<STR_LIT>']<EOL>prepositions = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>for i, author in enumerate(self.authors_list):<EOL><INDENT>if author == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>names = author.split('<STR_LIT:U+0020>')<EOL>if (len(names[-<NUM_LIT:1>]) <= <NUM_LIT:3>)and names[-<NUM_LIT:1>] not in suffixesand all(c in ascii_uppercase for c in names[-<NUM_LIT:1>]):<EOL><INDENT>names = [c + '<STR_LIT:.>' for c in names[-<NUM_LIT:1>]] + names[:-<NUM_LIT:1>]<EOL><DEDENT>num_suffixes = <NUM_LIT:0><EOL>for name in names[::-<NUM_LIT:1>]:<EOL><INDENT>if name in suffixes:<EOL><INDENT>num_suffixes += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>for j, name in enumerate(names[:-<NUM_LIT:1> - num_suffixes]):<EOL><INDENT>if j == <NUM_LIT:0> and name in prefixes:<EOL><INDENT>continue<EOL><DEDENT>if j > <NUM_LIT:0> and name in prepositions:<EOL><INDENT>continue<EOL><DEDENT>if (len(name) > <NUM_LIT:2>) or (len(name) and (name[-<NUM_LIT:1>] != '<STR_LIT:.>')):<EOL><INDENT>k = name.find('<STR_LIT:->')<EOL>if <NUM_LIT:0> < k + <NUM_LIT:1> < len(name):<EOL><INDENT>names[j] = name[<NUM_LIT:0>] + '<STR_LIT>' + name[k + <NUM_LIT:1>] + '<STR_LIT:.>'<EOL><DEDENT>else:<EOL><INDENT>names[j] = name[<NUM_LIT:0>] + '<STR_LIT:.>'<EOL><DEDENT><DEDENT><DEDENT>if len(names):<EOL><INDENT>self.authors_list[i] = '<STR_LIT:U+0020>'.join(names)<EOL>if len(names) > <NUM_LIT:1>:<EOL><INDENT>for name in names[<NUM_LIT:0>].split('<STR_LIT:->'):<EOL><INDENT>name_simple = self.simplify_name('<STR_LIT:U+0020>'.join([name, names[-<NUM_LIT:1>]]))<EOL>self.authors_list_simple.append(name_simple)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.authors_list_simple.append(self.simplify_name(names[<NUM_LIT:0>]))<EOL><DEDENT>num_prepositions = <NUM_LIT:0><EOL>for name in names:<EOL><INDENT>if name in prepositions:<EOL><INDENT>num_prepositions += <NUM_LIT:1><EOL><DEDENT><DEDENT>sp = <NUM_LIT:1> + num_suffixes + num_prepositions<EOL>self.authors_list_split.append(<EOL>('<STR_LIT:U+0020>'.join(names[:-sp]), '<STR_LIT:U+0020>'.join(names[-sp:])))<EOL><DEDENT><DEDENT>self.authors_bibtex = '<STR_LIT>'.join(self.authors_list)<EOL>if len(self.authors_list) > <NUM_LIT:2>:<EOL><INDENT>self.authors = '<STR_LIT>'.join([<EOL>'<STR_LIT:U+002CU+0020>'.join(self.authors_list[:-<NUM_LIT:1>]),<EOL>self.authors_list[-<NUM_LIT:1>]])<EOL><DEDENT>elif len(self.authors_list) > <NUM_LIT:1>:<EOL><INDENT>self.authors = '<STR_LIT>'.join(self.authors_list)<EOL><DEDENT>else:<EOL><INDENT>self.authors = self.authors_list[<NUM_LIT:0>]<EOL><DEDENT>", "docstring": "Parse authors string to create lists of authors.", "id": "f4681:c0:m1"}
{"signature": "def to(self, order):", "body": "if order is None or self.order == order:<EOL><INDENT>return<EOL><DEDENT>qs = self.get_ordering_queryset()<EOL>if self.order > order:<EOL><INDENT>qs.filter(order__lt=self.order, order__gte=order).update(order=F('<STR_LIT>') + <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>qs.filter(order__gt=self.order, order__lte=order).update(order=F('<STR_LIT>') - <NUM_LIT:1>)<EOL><DEDENT>self.order = order<EOL>self.save()<EOL>", "docstring": "Move object to a certain position, updating all affected objects to move accordingly up or down.", "id": "f4682:c0:m11"}
{"signature": "def above(self, ref):", "body": "if not self._valid_ordering_reference(ref):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" % (<EOL>self, self.__class__, self.order_with_respect_to,<EOL>self._get_order_with_respect_to()<EOL>)<EOL>)<EOL><DEDENT>if self.order == ref.order:<EOL><INDENT>return<EOL><DEDENT>if self.order > ref.order:<EOL><INDENT>o = ref.order<EOL><DEDENT>else:<EOL><INDENT>o = self.get_ordering_queryset().filter(order__lt=ref.order).aggregate(Max('<STR_LIT>')).get('<STR_LIT>') or <NUM_LIT:0><EOL><DEDENT>self.to(o)<EOL>", "docstring": "Move this object above the referenced object.", "id": "f4682:c0:m12"}
{"signature": "def below(self, ref):", "body": "if not self._valid_ordering_reference(ref):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" % (<EOL>self, self.__class__, self.order_with_respect_to,<EOL>self._get_order_with_respect_to()<EOL>)<EOL>)<EOL><DEDENT>if self.order == ref.order:<EOL><INDENT>return<EOL><DEDENT>if self.order > ref.order:<EOL><INDENT>o = self.get_ordering_queryset().filter(order__gt=ref.order).aggregate(Min('<STR_LIT>')).get('<STR_LIT>') or <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>o = ref.order<EOL><DEDENT>self.to(o)<EOL>", "docstring": "Move this object below the referenced object.", "id": "f4682:c0:m13"}
{"signature": "def swap(self, qs):", "body": "try:<EOL><INDENT>replacement = qs[<NUM_LIT:0>]<EOL><DEDENT>except IndexError:<EOL><INDENT>return<EOL><DEDENT>if not self._valid_ordering_reference(replacement):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" % (<EOL>self, self.__class__, self.order_with_respect_to,<EOL>self._get_order_with_respect_to()<EOL>)<EOL>)<EOL><DEDENT>self.order, replacement.order = replacement.order, self.order<EOL>self.save()<EOL>replacement.save()<EOL>", "docstring": "Swap the positions of this object with a reference object.", "id": "f4682:c0:m8"}
{"signature": "def bottom(self):", "body": "o = self.get_ordering_queryset().aggregate(Max('<STR_LIT>')).get('<STR_LIT>')<EOL>self.to(o)<EOL>", "docstring": "Move this object to the bottom of the ordered stack.", "id": "f4682:c0:m15"}
{"signature": "def _import_module(name):", "body": "__import__(name)<EOL>return sys.modules[name]<EOL>", "docstring": "Import module, returning the module after the last dot.", "id": "f4689:m1"}
{"signature": "def itervalues(d, **kw):", "body": "return iter(getattr(d, _itervalues)(**kw))<EOL>", "docstring": "Return an iterator over the values of a dictionary.", "id": "f4689:m5"}
{"signature": "def run_suite(suite):", "body": "sys.stdout.write('<STR_LIT>' % tests.countTestCases())<EOL>res = unittest.TestResult()<EOL>suite.run(res)<EOL>if res.wasSuccessful():<EOL><INDENT>sys.stdout.write('<STR_LIT>')<EOL>return<EOL><DEDENT>sys.stdout.write('<STR_LIT:\\n>')<EOL>for problems, kind in ((res.errors, '<STR_LIT:error>'),<EOL>(res.failures, '<STR_LIT>')):<EOL><INDENT>if len(problems):<EOL><INDENT>head = '<STR_LIT>' % (len(problems),<EOL>kind,<EOL>'<STR_LIT:s>' if len(problems) != <NUM_LIT:1> else '<STR_LIT>')<EOL>sys.stdout.write('<STR_LIT>' %<EOL>(head, '<STR_LIT>' * len(head)))<EOL><DEDENT>for problem in problems:<EOL><INDENT>func = problem[<NUM_LIT:0>]._testMethodName[<NUM_LIT:5>:]<EOL>environ = '<STR_LIT>' if isinstance(problem[<NUM_LIT:0>],<EOL>BrokenCtypesTest)else '<STR_LIT>'<EOL>sys.stdout.write(<EOL>'<STR_LIT>' %<EOL>(func, environ, '<STR_LIT:\\n>'.join(map(lambda s: '<STR_LIT:U+0020>' + s,<EOL>problem[<NUM_LIT:1>].splitlines())))<EOL>)<EOL><DEDENT><DEDENT>sys.stdout.write('<STR_LIT>' %<EOL>(res.testsRun - len(res.failures) - len(res.errors)))<EOL>", "docstring": "unittest is basically a disaster, so let's do this ourselves.", "id": "f4702:m0"}
{"signature": "def _uptime_amiga():", "body": "global __boottime<EOL>try:<EOL><INDENT>__boottime = os.stat('<STR_LIT>').st_ctime<EOL>return time.time() - __boottime<EOL><DEDENT>except (NameError, OSError):<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns uptime in seconds or None, on AmigaOS.", "id": "f4705:m2"}
{"signature": "def _uptime_riscos():", "body": "try:<EOL><INDENT>up = swi.swi('<STR_LIT>', '<STR_LIT>')<EOL>if up < <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>return up / <NUM_LIT><EOL><DEDENT>except NameError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns uptime in seconds or None, on RISC OS.", "id": "f4705:m8"}
{"signature": "def _uptime_linux():", "body": "<EOL>try:<EOL><INDENT>f = open('<STR_LIT>', '<STR_LIT:r>')<EOL>up = float(f.readline().split()[<NUM_LIT:0>])<EOL>f.close()<EOL>return up<EOL><DEDENT>except (IOError, ValueError):<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>libc = ctypes.CDLL('<STR_LIT>')<EOL><DEDENT>except AttributeError:<EOL><INDENT>return None<EOL><DEDENT>except OSError:<EOL><INDENT>try:<EOL><INDENT>libc = ctypes.CDLL('<STR_LIT>')<EOL><DEDENT>except OSError:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>if not hasattr(libc, '<STR_LIT>'):<EOL><INDENT>return None<EOL><DEDENT>buf = ctypes.create_string_buffer(<NUM_LIT>) <EOL>if libc.sysinfo(buf) < <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>up = struct.unpack_from('<STR_LIT>', buf.raw)[<NUM_LIT:0>]<EOL>if up < <NUM_LIT:0>:<EOL><INDENT>up = None<EOL><DEDENT>return up<EOL>", "docstring": "Returns uptime in seconds or None, on Linux.", "id": "f4705:m0"}
{"signature": "def _boottime_linux():", "body": "global __boottime<EOL>try:<EOL><INDENT>f = open('<STR_LIT>', '<STR_LIT:r>')<EOL>for line in f:<EOL><INDENT>if line.startswith('<STR_LIT>'):<EOL><INDENT>__boottime = int(line.split()[<NUM_LIT:1>])<EOL><DEDENT><DEDENT>if datetime is None:<EOL><INDENT>raise NotImplementedError('<STR_LIT>')<EOL><DEDENT>return datetime.fromtimestamp(__boottime)<EOL><DEDENT>except (IOError, IndexError):<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "A way to figure out the boot time directly on Linux.", "id": "f4705:m1"}
{"signature": "def _uptime_plan9():", "body": "<EOL>try:<EOL><INDENT>f = open('<STR_LIT>', '<STR_LIT:r>')<EOL>s, ns, ct, cf = f.read().split()<EOL>f.close()<EOL>return float(ct) / float(cf)<EOL><DEDENT>except (IOError, ValueError):<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns uptime in seconds or None, on Plan 9.", "id": "f4705:m7"}
{"signature": "def uptime():", "body": "if __boottime is not None:<EOL><INDENT>return time.time() - __boottime<EOL><DEDENT>return {'<STR_LIT>': _uptime_amiga,<EOL>'<STR_LIT>': _uptime_amiga,<EOL>'<STR_LIT>': _uptime_beos,<EOL>'<STR_LIT>': _uptime_linux,<EOL>'<STR_LIT>': _uptime_osx,<EOL>'<STR_LIT>': _uptime_beos,<EOL>'<STR_LIT>': _uptime_linux,<EOL>'<STR_LIT>': _uptime_linux,<EOL>'<STR_LIT>': _uptime_linux,<EOL>'<STR_LIT>': _uptime_mac,<EOL>'<STR_LIT>': _uptime_minix,<EOL>'<STR_LIT>': _uptime_riscos,<EOL>'<STR_LIT>': _uptime_solaris,<EOL>'<STR_LIT>': _uptime_syllable,<EOL>'<STR_LIT:win32>': _uptime_windows,<EOL>'<STR_LIT>': _uptime_windows}.get(sys.platform, _uptime_bsd)() or_uptime_bsd() or _uptime_plan9() or _uptime_linux() or_uptime_windows() or _uptime_solaris() or _uptime_beos() or_uptime_amiga() or _uptime_riscos() or _uptime_posix() or_uptime_syllable() or _uptime_mac() or _uptime_osx()<EOL>", "docstring": "Returns uptime in seconds if even remotely possible, or None if not.", "id": "f4705:m12"}
{"signature": "def _uptime_syllable():", "body": "global __boottime<EOL>try:<EOL><INDENT>__boottime = os.stat('<STR_LIT>').st_mtime<EOL>return time.time() - __boottime<EOL><DEDENT>except (NameError, OSError):<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns uptime in seconds or None, on Syllable.", "id": "f4705:m10"}
{"signature": "def ExtensionReplacer(new_ext):", "body": "return functools.partial(replace_extension, new_ext)<EOL>", "docstring": "A reusable function to replace a file's extension with another\n\n>>> repl = ExtensionReplacer('.pdf')\n>>> repl('myfile.doc')\n'myfile.pdf'\n>>> repl('myfile.txt')\n'myfile.pdf'\n>>> repl('myfile')\n'myfile.pdf'", "id": "f4708:m10"}
{"signature": "def is_hidden(path):", "body": "full_path = os.path.abspath(path)<EOL>name = os.path.basename(full_path)<EOL>def no(path):<EOL><INDENT>return False<EOL><DEDENT>platform_hidden = globals().get('<STR_LIT>' + platform.system(), no)<EOL>return name.startswith('<STR_LIT:.>') or platform_hidden(full_path)<EOL>", "docstring": "Check whether a file is presumed hidden, either because\nthe pathname starts with dot or because the platform\nindicates such.", "id": "f4708:m13"}
{"signature": "def insert_before_extension(filename, content):", "body": "parts = list(os.path.splitext(filename))<EOL>parts[<NUM_LIT:1>:<NUM_LIT:1>] = [content]<EOL>return '<STR_LIT>'.join(parts)<EOL>", "docstring": "Given a filename and some content, insert the content just before\nthe extension.\n\n>>> insert_before_extension('pages.pdf', '-old')\n'pages-old.pdf'", "id": "f4708:m5"}
{"signature": "@contextlib.contextmanager<EOL>def tempfile_context(*args, **kwargs):", "body": "fd, filename = tempfile.mkstemp(*args, **kwargs)<EOL>os.close(fd)<EOL>try:<EOL><INDENT>yield filename<EOL><DEDENT>finally:<EOL><INDENT>os.remove(filename)<EOL><DEDENT>", "docstring": "A wrapper around tempfile.mkstemp to create the file in a context and\ndelete it after.", "id": "f4708:m8"}
{"signature": "def encode(name, system='<STR_LIT>'):", "body": "assert system == '<STR_LIT>', '<STR_LIT>'<EOL>special_characters = r'<STR_LIT>' + '<STR_LIT>'.join(map(chr, range(<NUM_LIT:32>)))<EOL>pattern = '<STR_LIT:|>'.join(map(re.escape, special_characters))<EOL>pattern = re.compile(pattern)<EOL>return pattern.sub('<STR_LIT:_>', name)<EOL>", "docstring": "Encode the name for a suitable name in the given filesystem\n>>> encode('Test :1')\n'Test _1'", "id": "f4708:m7"}
{"signature": "@abstractmethod<EOL><INDENT>def __call__(self, **kwargs):<DEDENT>", "body": "", "docstring": "method called to make the request", "id": "f4730:c1:m3"}
{"signature": "async def _get_twitter_configuration(self):", "body": "api = self['<STR_LIT>', general.twitter_api_version,<EOL>\"<STR_LIT>\", general.twitter_base_api_url]<EOL>return await api.help.configuration.get()<EOL>", "docstring": "create a ``twitter_configuration`` attribute with the response\nof the endpoint\nhttps://api.twitter.com/1.1/help/configuration.json", "id": "f4733:c2:m1"}
{"signature": "async def upload_media(self, file_,<EOL>media_type=None,<EOL>media_category=None,<EOL>chunked=None,<EOL>size_limit=None,<EOL>**params):", "body": "if isinstance(file_, str):<EOL><INDENT>url = urlparse(file_)<EOL>if url.scheme.startswith('<STR_LIT:http>'):<EOL><INDENT>media = await self._session.get(file_)<EOL><DEDENT>else:<EOL><INDENT>path = urlparse(file_).path.strip(\"<STR_LIT>\")<EOL>media = await utils.execute(open(path, '<STR_LIT:rb>'))<EOL><DEDENT><DEDENT>elif hasattr(file_, '<STR_LIT>') or isinstance(file_, bytes):<EOL><INDENT>media = file_<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>media_size = await utils.get_size(media)<EOL>if chunked is not None:<EOL><INDENT>size_test = False<EOL><DEDENT>else:<EOL><INDENT>size_test = await self._size_test(media_size, size_limit)<EOL><DEDENT>if isinstance(media, aiohttp.ClientResponse):<EOL><INDENT>media = media.content<EOL><DEDENT>if chunked or (size_test and chunked is None):<EOL><INDENT>args = media, media_size, file_, media_type, media_category<EOL>response = await self._chunked_upload(*args, **params)<EOL><DEDENT>else:<EOL><INDENT>response = await self.upload.media.upload.post(media=media,<EOL>**params)<EOL><DEDENT>if not hasattr(file_, '<STR_LIT>') and not getattr(media, '<STR_LIT>', True):<EOL><INDENT>media.close()<EOL><DEDENT>return response<EOL>", "docstring": "upload a media on twitter\n\nParameters\n----------\nfile_ : str or pathlib.Path or file\n    Path to the file or file object\nmedia_type : str, optional\n    mime type of the media\nmedia_category : str, optional\n    Twitter's media category of the media, must be used with\n    ``media_type``\nchunked : bool, optional\n    If True, force the use of the chunked upload for the media\nsize_limit : int, optional\n    If set, the media will be sent using a multipart upload if\n    its size is over ``size_limit`` bytes\nparams : dict\n    parameters used when making the request\n\nReturns\n-------\n.data_processing.PeonyResponse\n    Response of the request", "id": "f4733:c2:m8"}
{"signature": "@staticmethod<EOL><INDENT>def _get_base_url(base_url, api, version):<DEDENT>", "body": "format_args = {}<EOL>if \"<STR_LIT>\" in base_url:<EOL><INDENT>if api == \"<STR_LIT>\":<EOL><INDENT>base_url = base_url.replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>format_args['<STR_LIT>'] = api<EOL><DEDENT><DEDENT>if \"<STR_LIT>\" in base_url:<EOL><INDENT>if version == \"<STR_LIT>\":<EOL><INDENT>base_url = base_url.replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>format_args['<STR_LIT:version>'] = version<EOL><DEDENT><DEDENT>return base_url.format(api=api, version=version)<EOL>", "docstring": "create the base url for the api\n\nParameters\n----------\nbase_url : str\n    format of the base_url using {api} and {version}\napi : str\n    name of the api to use\nversion : str\n    version of the api\n\nReturns\n-------\nstr\n    the base url of the api you want to use", "id": "f4733:c1:m2"}
{"signature": "async def run_tasks(self):", "body": "tasks = self.get_tasks()<EOL>self._gathered_tasks = asyncio.gather(*tasks, loop=self.loop)<EOL>try:<EOL><INDENT>await self._gathered_tasks<EOL><DEDENT>except CancelledError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Run the tasks attached to the instance", "id": "f4733:c1:m10"}
{"signature": "async def _get_user(self):", "body": "api = self['<STR_LIT>', general.twitter_api_version,<EOL>\"<STR_LIT>\", general.twitter_base_api_url]<EOL>return await api.account.verify_credentials.get()<EOL>", "docstring": "create a ``user`` attribute with the response of the endpoint\nhttps://api.twitter.com/1.1/account/verify_credentials.json", "id": "f4733:c2:m4"}
{"signature": "def __new__(cls, name, bases, attrs, **kwargs):", "body": "tasks = {'<STR_LIT>': set()}<EOL>for base in bases:<EOL><INDENT>if hasattr(base, '<STR_LIT>'):<EOL><INDENT>for key, value in base._tasks.items():<EOL><INDENT>tasks[key] |= value<EOL><DEDENT><DEDENT><DEDENT>for attr in attrs.values():<EOL><INDENT>if isinstance(attr, task):<EOL><INDENT>tasks['<STR_LIT>'].add(attr)<EOL><DEDENT><DEDENT>attrs['<STR_LIT>'] = tasks<EOL>attrs['<STR_LIT>'] = EventStreams()<EOL>return super().__new__(cls, name, bases, attrs)<EOL>", "docstring": "put the :class:`~peony.commands.tasks.Task`s in the right place", "id": "f4733:c0:m0"}
{"signature": "def oauth2_dance(consumer_key, consumer_secret, loop=None):", "body": "loop = asyncio.get_event_loop() if loop is None else loop<EOL>client = BasePeonyClient(consumer_key=consumer_key,<EOL>consumer_secret=consumer_secret,<EOL>auth=oauth.OAuth2Headers)<EOL>loop.run_until_complete(client.headers.sign())<EOL>return client.headers.token<EOL>", "docstring": "oauth2 dance\n\nParameters\n----------\nconsumer_key : str\n    Your consumer key\nconsumer_secret : str\n    Your consumer secret\nloop : event loop, optional\n    event loop to use\n\nReturns\n-------\nstr\n    Bearer token", "id": "f4734:m6"}
{"signature": "def oauth_dance(consumer_key, consumer_secret,<EOL>oauth_callback=\"<STR_LIT>\", loop=None):", "body": "loop = asyncio.get_event_loop() if loop is None else loop<EOL>coro = async_oauth_dance(consumer_key, consumer_secret, oauth_callback)<EOL>return loop.run_until_complete(coro)<EOL>", "docstring": "OAuth dance to get the user's access token\n\nIt calls async_oauth_dance and create event loop of not given\n\nParameters\n----------\nconsumer_key : str\n    Your consumer key\nconsumer_secret : str\n    Your consumer secret\noauth_callback : str\n    Callback uri, defaults to 'oob'\nloop : event loop\n    asyncio event loop\n\nReturns\n-------\ndict\n    Access tokens", "id": "f4734:m5"}
{"signature": "async def async_oauth_dance(consumer_key, consumer_secret, callback_uri=\"<STR_LIT>\"):", "body": "token = await get_oauth_token(consumer_key, consumer_secret, callback_uri)<EOL>oauth_verifier = await get_oauth_verifier(token['<STR_LIT>'])<EOL>token = await get_access_token(<EOL>consumer_key,<EOL>consumer_secret,<EOL>oauth_verifier=oauth_verifier,<EOL>**token<EOL>)<EOL>token = dict(<EOL>consumer_key=consumer_key,<EOL>consumer_secret=consumer_secret,<EOL>access_token=token['<STR_LIT>'],<EOL>access_token_secret=token['<STR_LIT>']<EOL>)<EOL>return token<EOL>", "docstring": "OAuth dance to get the user's access token\n\nParameters\n----------\nconsumer_key : str\n    Your consumer key\nconsumer_secret : str\n    Your consumer secret\ncallback_uri : str\n    Callback uri, defaults to 'oob'\n\nReturns\n-------\ndict\n    Access tokens", "id": "f4734:m3"}
{"signature": "def parse_token(response):", "body": "items = response.split(\"<STR_LIT:&>\")<EOL>items = [item.split(\"<STR_LIT:=>\") for item in items]<EOL>return {key: value for key, value in items}<EOL>", "docstring": "parse the responses containing the tokens\n\nParameters\n----------\nresponse : str\n    The response containing the tokens\n\nReturns\n-------\ndict\n    The parsed tokens", "id": "f4734:m4"}
{"signature": "async def get_oauth_token(consumer_key, consumer_secret, callback_uri=\"<STR_LIT>\"):", "body": "client = BasePeonyClient(consumer_key=consumer_key,<EOL>consumer_secret=consumer_secret,<EOL>api_version=\"<STR_LIT>\",<EOL>suffix=\"<STR_LIT>\")<EOL>response = await client.api.oauth.request_token.post(<EOL>_suffix=\"<STR_LIT>\",<EOL>oauth_callback=callback_uri<EOL>)<EOL>return parse_token(response)<EOL>", "docstring": "Get a temporary oauth token\n\nParameters\n----------\nconsumer_key : str\n    Your consumer key\nconsumer_secret : str\n    Your consumer secret\ncallback_uri : str, optional\n    Callback uri, defaults to 'oob'\n\nReturns\n-------\ndict\n    Temporary tokens", "id": "f4734:m0"}
{"signature": "async def get_access_token(consumer_key, consumer_secret,<EOL>oauth_token, oauth_token_secret,<EOL>oauth_verifier, **kwargs):", "body": "client = BasePeonyClient(consumer_key=consumer_key,<EOL>consumer_secret=consumer_secret,<EOL>access_token=oauth_token,<EOL>access_token_secret=oauth_token_secret,<EOL>api_version=\"<STR_LIT>\",<EOL>suffix=\"<STR_LIT>\")<EOL>response = await client.api.oauth.access_token.get(<EOL>_suffix=\"<STR_LIT>\",<EOL>oauth_verifier=oauth_verifier<EOL>)<EOL>return parse_token(response)<EOL>", "docstring": "get the access token of the user\n\nParameters\n----------\nconsumer_key : str\n    Your consumer key\nconsumer_secret : str\n    Your consumer secret\noauth_token : str\n    OAuth token from :func:`get_oauth_token`\noauth_token_secret : str\n    OAuth token secret from :func:`get_oauth_token`\noauth_verifier : str\n    OAuth verifier from :func:`get_oauth_verifier`\n\nReturns\n-------\ndict\n    Access tokens", "id": "f4734:m2"}
{"signature": "def __len__(self):", "body": "return len(self.data)<EOL>", "docstring": "get the length of the data", "id": "f4735:c2:m7"}
{"signature": "def loads(json_data, encoding=\"<STR_LIT:utf-8>\", **kwargs):", "body": "if isinstance(json_data, bytes):<EOL><INDENT>json_data = json_data.decode(encoding)<EOL><DEDENT>return json.loads(json_data, object_hook=JSONData, **kwargs)<EOL>", "docstring": "Custom loads function with an object_hook and automatic decoding\n\nParameters\n----------\njson_data : str\n    The JSON data to decode\n*args\n    Positional arguments, passed to :func:`json.loads`\nencoding : :obj:`str`, optional\n    The encoding of the bytestring\n**kwargs\n    Keyword arguments passed to :func:`json.loads`\n\nReturns\n-------\n:obj:`dict` or :obj:`list`\n    Decoded json data", "id": "f4735:m0"}
{"signature": "def __getitem__(self, key):", "body": "return self.data[key]<EOL>", "docstring": "get items from the data", "id": "f4735:c2:m2"}
{"signature": "def __iter__(self):", "body": "return iter(self.data)<EOL>", "docstring": "iterate over the data", "id": "f4735:c2:m4"}
{"signature": "def process_keys(func):", "body": "@wraps(func)<EOL>def decorated(self, k, *args):<EOL><INDENT>if not isinstance(k, str):<EOL><INDENT>msg = \"<STR_LIT>\" % self.__class__.__name__<EOL>raise ValueError(msg)<EOL><DEDENT>if not k.startswith(self.prefix):<EOL><INDENT>k = self.prefix + k<EOL><DEDENT>return func(self, k, *args)<EOL><DEDENT>return decorated<EOL>", "docstring": "Raise error for keys that are not strings\nand add the prefix if it is missing", "id": "f4737:m0"}
{"signature": "def _get(self, text):", "body": "if self.strict:<EOL><INDENT>match = self.prog.match(text)<EOL>if match:<EOL><INDENT>cmd = match.group()<EOL>if cmd in self:<EOL><INDENT>return cmd<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>words = self.prog.findall(text)<EOL>for word in words:<EOL><INDENT>if word in self:<EOL><INDENT>return word<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Analyze the text to get the right function\n\nParameters\n----------\ntext : str\n    The text that could call a function", "id": "f4737:c0:m4"}
{"signature": "async def run(self, *args, data):", "body": "cmd = self._get(data.text)<EOL>try:<EOL><INDENT>if cmd is not None:<EOL><INDENT>command = self[cmd](*args, data=data)<EOL>return await peony.utils.execute(command)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL>peony.utils.log_error(fmt.format(cmd=cmd))<EOL><DEDENT>", "docstring": "run the function you want", "id": "f4737:c0:m5"}
{"signature": "@events.event<EOL>def status_withheld():", "body": "", "docstring": "Event triggered upon receiving a status withheld notice\n\nFor more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#withheld-content-notices-status-withheld-user-withheld", "id": "f4742:m8"}
{"signature": "@events.alias(on)<EOL>def too_many_follows(data):", "body": "<EOL>n (warning(data)<EOL>and data.get('<STR_LIT>').get('<STR_LIT:code>') == \"<STR_LIT>\")<EOL>", "docstring": "Event triggered when receiving a \"too many follows\" warning\n\nFor more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#too-many-follows-warning", "id": "f4742:m13"}
{"signature": "@events.event<EOL>def block():", "body": "", "docstring": "For more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#events-event", "id": "f4742:m17"}
{"signature": "@events.event<EOL>def list_created():", "body": "", "docstring": "For more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#events-event", "id": "f4742:m21"}
{"signature": "@events.alias(on)<EOL>def warning():", "body": "", "docstring": "Event triggered when receiving a warning\n\nFor more information:\n\n* https://dev.twitter.com/streaming/overview/messages-types#stall-warnings-warning\n* https://dev.twitter.com/streaming/overview/messages-types#too-many-follows-warning", "id": "f4742:m11"}
{"signature": "@events.alias(on)<EOL>def stall_warning(data):", "body": "<EOL>n (warning(data)<EOL>and data.get('<STR_LIT>').get('<STR_LIT:code>') == \"<STR_LIT>\")<EOL>", "docstring": "Event triggered when receiving a stall warning\n\nFor more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#stall-warnings-warning", "id": "f4742:m12"}
{"signature": "@events.event<EOL>def user_update():", "body": "", "docstring": "Event triggered when an user updates their profile\n\nFor more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#user_update", "id": "f4742:m29"}
{"signature": "@events.alias(on, '<STR_LIT>')<EOL>def delete():", "body": "", "docstring": "Event triggered when an user deletes a tweet\n\nFor more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#status-deletion-notices-delete", "id": "f4742:m5"}
{"signature": "@events.event<EOL>def user_withheld():", "body": "", "docstring": "Event triggered upon receiving a status withheld notice\n\nFor more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#user-withheld", "id": "f4742:m9"}
{"signature": "@events.event<EOL>def list_user_subscribed():  ", "body": "", "docstring": "For more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#events-event", "id": "f4742:m26"}
{"signature": "@events.event<EOL>def favorite():", "body": "", "docstring": "For more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#events-event", "id": "f4742:m19"}
{"signature": "@events<EOL>def friends(data):", "body": "<EOL>n '<STR_LIT>' in data or '<STR_LIT>' in data<EOL>", "docstring": "Event triggered on connection to an userstream\n\nFor more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#friends-lists-friends", "id": "f4742:m1"}
{"signature": "@events.priority(<NUM_LIT>)<EOL>def default(_):", "body": "return True<EOL>", "docstring": "Event triggered when the data didn't trigger any handled event", "id": "f4742:m35"}
{"signature": "def with_prefix(self, prefix, strict=False):", "body": "def decorated(func):<EOL><INDENT>return EventHandler(func=func, event=self.event,<EOL>prefix=prefix, strict=strict)<EOL><DEDENT>return decorated<EOL>", "docstring": "decorator to handle commands with prefixes\n\nParameters\n----------\nprefix : str\n    the prefix of the command\nstrict : bool, optional\n    If set to True the command must be at the beginning\n    of the message. Defaults to False.\n\nReturns\n-------\nfunction\n    a decorator that returns an :class:`EventHandler` instance", "id": "f4742:c0:m2"}
{"signature": "@events.event<EOL>def list_member_removed():", "body": "", "docstring": "For more information:\nhttps://dev.twitter.com/streaming/overview/messages-types#events-event", "id": "f4742:m25"}
{"signature": "async def __anext__(self):", "body": "if self.response is None:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>await self.connect()<EOL>return {'<STR_LIT>': True}<EOL><DEDENT>line = b'<STR_LIT>'<EOL>try:<EOL><INDENT>if self.state != NORMAL:<EOL><INDENT>if self._reconnecting:<EOL><INDENT>return await self.restart_stream()<EOL><DEDENT>else:<EOL><INDENT>return await self.init_restart()<EOL><DEDENT><DEDENT>while not line:<EOL><INDENT>if self.response.content.at_eof():<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self.state = EOF<EOL>return await self.init_restart()<EOL><DEDENT>with async_timeout.timeout(<NUM_LIT>):<EOL><INDENT>line = await self.response.content.readline()<EOL>line = line.strip(b'<STR_LIT:\\r\\n>')<EOL>logger.debug(\"<STR_LIT>\" % line)<EOL><DEDENT><DEDENT>if line in rate_limit_notices:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>raise StreamLimit(line)<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>return self.loads(line)<EOL><DEDENT>except HandledErrors as e:<EOL><INDENT>logger.debug(\"<STR_LIT>\" % (e.__class__.__name__, e))<EOL>self.state = ERROR<EOL>return await self.init_restart()<EOL><DEDENT>except ClientConnectionError:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self.state = DISCONNECTION<EOL>return await self.init_restart()<EOL><DEDENT>except CancelledError:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>raise StopAsyncIteration<EOL><DEDENT>except Exception as e:<EOL><INDENT>self.state = ERROR<EOL>return await self.init_restart(error=e)<EOL><DEDENT>", "docstring": "Decode each line using json\n\nReturns\n-------\ndict\n    Decoded JSON data", "id": "f4744:c0:m4"}
{"signature": "async def __aenter__(self):", "body": "await self.client.setup<EOL>return self<EOL>", "docstring": "Prepare the stream\n\nReturns\n-------\nStreamResponse\n    The stream iterator", "id": "f4744:c0:m10"}
{"signature": "async def connect(self):", "body": "with async_timeout.timeout(self.timeout):<EOL><INDENT>self.response = await self._connect()<EOL><DEDENT>if self.response.status in range(<NUM_LIT:200>, <NUM_LIT>):<EOL><INDENT>self._error_timeout = <NUM_LIT:0><EOL>self.state = NORMAL<EOL><DEDENT>elif self.response.status == <NUM_LIT>:<EOL><INDENT>self.state = DISCONNECTION<EOL><DEDENT>elif self.response.status in range(<NUM_LIT>, <NUM_LIT>):<EOL><INDENT>self.state = RECONNECTION<EOL><DEDENT>elif self.response.status in (<NUM_LIT>, <NUM_LIT>):<EOL><INDENT>self.state = ENHANCE_YOUR_CALM<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>raise await exceptions.throw(self.response,<EOL>loads=self.client._loads,<EOL>url=self.kwargs['<STR_LIT:url>'])<EOL><DEDENT>logger.debug(\"<STR_LIT>\" % self.state)<EOL>", "docstring": "Create the connection\n\nReturns\n-------\nself\n\nRaises\n------\nexception.PeonyException\n    On a response status in 4xx that are not status 420 or 429\n    Also on statuses in 1xx or 3xx since this should not be the status\n    received here", "id": "f4744:c0:m2"}
{"signature": "async def execute(coro):", "body": "if asyncio.iscoroutine(coro):<EOL><INDENT>return await coro<EOL><DEDENT>else:<EOL><INDENT>return coro<EOL><DEDENT>", "docstring": "run a function or coroutine\n\nParameters\n----------\ncoro : asyncio.coroutine or function", "id": "f4745:m6"}
{"signature": "def get_args(func, skip=<NUM_LIT:0>):", "body": "code = getattr(func, '<STR_LIT>', None)<EOL>if code is None:<EOL><INDENT>code = func.__call__.__code__<EOL><DEDENT>return code.co_varnames[skip:code.co_argcount]<EOL>", "docstring": "Hackish way to get the arguments of a function\n\nParameters\n----------\nfunc : callable\n    Function to get the arguments from\nskip : int, optional\n    Arguments to skip, defaults to 0 set it to 1 to skip the\n    ``self`` argument of a method.\n\nReturns\n-------\ntuple\n    Function's arguments", "id": "f4745:m0"}
{"signature": "def log_error(msg=None, exc_info=None, logger=None, **kwargs):", "body": "if logger is None:<EOL><INDENT>logger = _logger<EOL><DEDENT>if not exc_info:<EOL><INDENT>exc_info = sys.exc_info()<EOL><DEDENT>if msg is None:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL><DEDENT>exc_class, exc_msg, _ = exc_info<EOL>if all(info is not None for info in exc_info):<EOL><INDENT>logger.error(msg, exc_info=exc_info)<EOL><DEDENT>", "docstring": "log an exception and its traceback on the logger defined\n\nParameters\n----------\nmsg : str, optional\n    A message to add to the error\nexc_info : tuple\n    Information about the current exception\nlogger : logging.Logger\n    logger to use", "id": "f4745:m1"}
{"signature": "def url(self, suffix=None):", "body": "return \"<STR_LIT:/>\".join(self._path) + (suffix or self._suffix)<EOL>", "docstring": "Build the url using the _path attribute\n\nParameters\n----------\nsuffix : str\n    String to be appended to the url\n\nReturns\n-------\nstr\n    Path to the endpoint", "id": "f4746:c0:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def _request(self, method):<DEDENT>", "body": "", "docstring": "Make a request for the endpoint\n\nParameters\n----------\nmethod : str\n    method to use to make the request", "id": "f4746:c0:m4"}
{"signature": "def __getitem__(self, key):", "body": "if isinstance(key, str):<EOL><INDENT>if key.lower() in general.request_methods:<EOL><INDENT>return self._request(key)<EOL><DEDENT>else:<EOL><INDENT>new_path = self._path + [key]<EOL><DEDENT><DEDENT>elif isinstance(key, (tuple, list)):<EOL><INDENT>key = [str(i) for i in key]<EOL>new_path = self._path + key<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (key, type(key)))<EOL><DEDENT>return self.__class__(path=new_path,<EOL>suffix=self._suffix,<EOL>client=self.client)<EOL>", "docstring": "Where the magic happens\n\nIf the key is a request method (eg. get) call the _request\nattribute with the method as argument\n\notherwise append the key to the _path attribute\n\n>>> api = APIPath()  # you would have to add more arguments\n>>> api['client']    # appends 'client' to _path\n\nParameters\n----------\nkey : :obj:`str`, :obj:`tuple` or :obj:`list`\n    Key used to access an API endpoint and appended to the\n    path attribute\n\nReturns\n-------\nBaseAPIPath\n    New APIPath instance with a new ``path`` value", "id": "f4746:c0:m2"}
{"signature": "async def throw(response, loads=None, encoding=None, **kwargs):", "body": "if loads is None:<EOL><INDENT>loads = data_processing.loads<EOL><DEDENT>data = await data_processing.read(response, loads=loads,<EOL>encoding=encoding)<EOL>error = get_error(data)<EOL>if error is not None:<EOL><INDENT>exception = errors[error['<STR_LIT:code>']]<EOL>raise exception(response=response, error=error, data=data, **kwargs)<EOL><DEDENT>if response.status in statuses:<EOL><INDENT>exception = statuses[response.status]<EOL>raise exception(response=response, data=data, **kwargs)<EOL><DEDENT>raise PeonyException(response=response, data=data, **kwargs)<EOL>", "docstring": "Get the response data if possible and raise an exception", "id": "f4747:m1"}
{"signature": "@property<EOL><INDENT>def reset_in(self):<DEDENT>", "body": "return max(self.reset - time(), <NUM_LIT:0>)<EOL>", "docstring": "Time in seconds until the limit will be reset\n\nReturns\n-------\nint\n    Time in seconds until the limit will be reset", "id": "f4747:c10:m1"}
{"signature": "@property<EOL><INDENT>def reset(self):<DEDENT>", "body": "return int(self.response.headers.get('<STR_LIT>', <NUM_LIT:0>))<EOL>", "docstring": "Time when the limit will be reset\n\nReturns\n-------\nint\n    Time when the limit will be reset", "id": "f4747:c10:m0"}
{"signature": "def __init__(self, response=None, error=None, data=None, url=None,<EOL>message=None):", "body": "self.response = response<EOL>self.data = data<EOL>self.error = error<EOL>self.url = url<EOL>if not message:<EOL><INDENT>message = self.get_message()<EOL><DEDENT>if url:<EOL><INDENT>message += \"<STR_LIT>\" + url<EOL><DEDENT>super().__init__(message)<EOL>", "docstring": "Add the response and data attributes\n\nExtract message from the error if not explicitly given", "id": "f4747:c0:m0"}
{"signature": "def convert(img, formats):", "body": "media = None<EOL>min_size = <NUM_LIT:0><EOL>for kwargs in formats:<EOL><INDENT>f = io.BytesIO()<EOL>if img.mode == \"<STR_LIT>\" and kwargs['<STR_LIT>'] != \"<STR_LIT>\":<EOL><INDENT>if min_size < <NUM_LIT:5> * <NUM_LIT>**<NUM_LIT:2>:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>img.convert('<STR_LIT>')<EOL><DEDENT><DEDENT>img.save(f, **kwargs)<EOL>size = f.tell()<EOL>if media is None or size < min_size:<EOL><INDENT>if media is not None:<EOL><INDENT>media.close()<EOL><DEDENT>media = f<EOL>min_size = size<EOL><DEDENT>else:<EOL><INDENT>f.close()<EOL><DEDENT><DEDENT>return media<EOL>", "docstring": "Convert the image to all the formats specified\nParameters\n----------\nimg : PIL.Image.Image\n    The image to convert\nformats : list\n    List of all the formats to use\nReturns\n-------\nio.BytesIO\n    A file object containing the converted image", "id": "f4749:m0"}
{"signature": "def optimize_media(file_, max_size, formats):", "body": "if not PIL:<EOL><INDENT>msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>raise RuntimeError(msg)<EOL><DEDENT>img = PIL.Image.open(file_)<EOL>ratio = max(hw / max_hw for hw, max_hw in zip(img.size, max_size))<EOL>if ratio > <NUM_LIT:1>:<EOL><INDENT>size = tuple(int(hw // ratio) for hw in img.size)<EOL>img = img.resize(size, PIL.Image.ANTIALIAS)<EOL><DEDENT>media = convert(img, formats)<EOL>if not hasattr(file_, '<STR_LIT>'):<EOL><INDENT>img.close()<EOL><DEDENT>return media<EOL>", "docstring": "Optimize an image\nResize the picture to the ``max_size``, defaulting to the large\nphoto size of Twitter in :meth:`PeonyClient.upload_media` when\nused with the ``optimize_media`` argument.\nParameters\n----------\nfile_ : file object\n    the file object of an image\nmax_size : :obj:`tuple` or :obj:`list` of :obj:`int`\n    a tuple in the format (width, height) which is maximum size of\n    the picture returned by this function\nformats : :obj`list` or :obj:`tuple` of :obj:`dict`\n    a list of all the formats to convert the picture to\nReturns\n-------\nfile\n    The smallest file created in this function", "id": "f4749:m1"}
{"signature": "async def set_tz(self):", "body": "settings = await self.api.account.settings.get()<EOL>tz = settings.time_zone.tzinfo_name<EOL>os.environ['<STR_LIT>'] = tz<EOL>time.tzset()<EOL>", "docstring": "set the environment timezone to the timezone\nset in your twitter settings", "id": "f4753:c0:m1"}
{"signature": "@contextmanager<EOL>def atomic_write(filename):", "body": "f = _tempfile(os.fsencode(filename))<EOL>try:<EOL><INDENT>yield f<EOL><DEDENT>finally:<EOL><INDENT>f.close()<EOL>os.replace(f.name, filename)<EOL><DEDENT>", "docstring": "Open a NamedTemoraryFile handle in a context manager", "id": "f4765:m1"}
{"signature": "def get_item(filename, uuid):", "body": "with open(os.fsencode(str(filename)), \"<STR_LIT:r>\") as f:<EOL><INDENT>data = json.load(f)<EOL>results = [i for i in data if i[\"<STR_LIT>\"] == str(uuid)]<EOL>if results:<EOL><INDENT>return results<EOL><DEDENT>return None<EOL><DEDENT>", "docstring": "Read entry from JSON file", "id": "f4765:m2"}
{"signature": "def raw(s):", "body": "return text(s, escape=False)<EOL>", "docstring": "Inserts a raw string into the DOM. Unsafe.", "id": "f4772:m6"}
{"signature": "def unescape(data):", "body": "cc = re.compile(r'<STR_LIT>')<EOL>result = []<EOL>m = cc.search(data)<EOL>while m:<EOL><INDENT>result.append(data[<NUM_LIT:0>:m.start()])<EOL>d = m.group(<NUM_LIT:1>)<EOL>if d:<EOL><INDENT>d = int(d)<EOL>result.append(unichr(d))<EOL><DEDENT>else:<EOL><INDENT>d = _unescape.get(m.group(<NUM_LIT:2>), ord('<STR_LIT:?>'))<EOL>result.append(unichr(d))<EOL><DEDENT>data = data[m.end():]<EOL>m = cc.search(data)<EOL><DEDENT>result.append(data)<EOL>return '<STR_LIT>'.join(result)<EOL>", "docstring": "unescapes html entities. the opposite of escape.", "id": "f4772:m3"}
{"signature": "def __new__(_cls, *args, **kwargs):", "body": "if len(args) == <NUM_LIT:1> and isinstance(args[<NUM_LIT:0>], Callable)and not isinstance(args[<NUM_LIT:0>], dom_tag) and not kwargs:<EOL><INDENT>wrapped = args[<NUM_LIT:0>]<EOL>@wraps(wrapped)<EOL>def f(*args, **kwargs):<EOL><INDENT>with _cls() as _tag:<EOL><INDENT>return wrapped(*args, **kwargs) or _tag<EOL><DEDENT><DEDENT>return f<EOL><DEDENT>return object.__new__(_cls)<EOL>", "docstring": "Check if bare tag is being used a a decorator.\ndecorate the function and return", "id": "f4773:c0:m0"}
{"signature": "def get(self, tag=None, **kwargs):", "body": "<EOL>if tag is None: tag = dom_tag<EOL>attrs = [(dom_tag.clean_attribute(attr), value)<EOL>for attr, value in kwargs.items()]<EOL>results = []<EOL>for child in self.children:<EOL><INDENT>if (isinstance(tag, basestring) and type(child).__name__ == tag) or(not isinstance(tag, basestring) and isinstance(child, tag)):<EOL><INDENT>if all(child.attributes.get(attribute) == value<EOL>for attribute, value in attrs):<EOL><INDENT>results.append(child)<EOL><DEDENT><DEDENT>if isinstance(child, dom_tag):<EOL><INDENT>results.extend(child.get(tag, **kwargs))<EOL><DEDENT><DEDENT>return results<EOL>", "docstring": "Recursively searches children for tags of a certain\ntype with matching attributes.", "id": "f4773:c0:m13"}
{"signature": "@classmethod<EOL><INDENT>def clean_pair(cls, attribute, value):<DEDENT>", "body": "attribute = cls.clean_attribute(attribute)<EOL>if value is True:<EOL><INDENT>value = attribute<EOL><DEDENT>if value is False:<EOL><INDENT>value = \"<STR_LIT:false>\"<EOL><DEDENT>return (attribute, value)<EOL>", "docstring": "This will call `clean_attribute` on the attribute and also allows for the\ncreation of boolean attributes.\n\nEx. input(selected=True) is equivalent to input(selected=\"selected\")", "id": "f4773:c0:m26"}
{"signature": "def setdocument(self, doc):", "body": "<EOL>if self.document != doc:<EOL><INDENT>self.document = doc<EOL>for i in self.children:<EOL><INDENT>if not isinstance(i, dom_tag): return<EOL>i.setdocument(doc)<EOL><DEDENT><DEDENT>", "docstring": "Creates a reference to the parent document to allow for partial-tree\nvalidation.", "id": "f4773:c0:m8"}
{"signature": "@staticmethod<EOL><INDENT>def clean_attribute(attribute):<DEDENT>", "body": "<EOL>attribute = {<EOL>'<STR_LIT>': '<STR_LIT:class>',<EOL>'<STR_LIT>': '<STR_LIT:class>',<EOL>'<STR_LIT>': '<STR_LIT:class>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}.get(attribute, attribute)<EOL>if attribute[<NUM_LIT:0>] == '<STR_LIT:_>':<EOL><INDENT>attribute = attribute[<NUM_LIT:1>:]<EOL><DEDENT>if attribute in set(['<STR_LIT>']) or attribute.startswith('<STR_LIT>'):<EOL><INDENT>attribute = attribute.replace('<STR_LIT:_>', '<STR_LIT:->').lower()<EOL><DEDENT>if attribute.split('<STR_LIT:_>')[<NUM_LIT:0>] in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>attribute = attribute.replace('<STR_LIT:_>', '<STR_LIT::>', <NUM_LIT:1>).lower()<EOL><DEDENT>return attribute<EOL>", "docstring": "Normalize attribute names for shorthand and work arounds for limitations\nin Python's syntax", "id": "f4773:c0:m25"}
{"signature": "def __contains__(self, item):", "body": "return bool(self.get(item))<EOL>", "docstring": "Checks recursively if item is in children tree.\nAccepts both a string and a class.", "id": "f4773:c0:m18"}
{"signature": "def __len__(self):", "body": "return len(self.children)<EOL>", "docstring": "Number of child elements.", "id": "f4773:c0:m15"}
{"signature": "def add(self, *args):", "body": "for obj in args:<EOL><INDENT>if isinstance(obj, numbers.Number):<EOL><INDENT>obj = str(obj)<EOL><DEDENT>if isinstance(obj, basestring):<EOL><INDENT>obj = escape(obj)<EOL>self.children.append(obj)<EOL><DEDENT>elif isinstance(obj, dom_tag):<EOL><INDENT>ctx = dom_tag._with_contexts[_get_thread_context()]<EOL>if ctx and ctx[-<NUM_LIT:1>]:<EOL><INDENT>ctx[-<NUM_LIT:1>].used.add(obj)<EOL><DEDENT>self.children.append(obj)<EOL>obj.parent = self<EOL>obj.setdocument(self.document)<EOL><DEDENT>elif isinstance(obj, dict):<EOL><INDENT>for attr, value in obj.items():<EOL><INDENT>self.set_attribute(*dom_tag.clean_pair(attr, value))<EOL><DEDENT><DEDENT>elif hasattr(obj, '<STR_LIT>'):<EOL><INDENT>for subobj in obj:<EOL><INDENT>self.add(subobj)<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>raise ValueError('<STR_LIT>' % obj)<EOL><DEDENT><DEDENT>if len(args) == <NUM_LIT:1>:<EOL><INDENT>return args[<NUM_LIT:0>]<EOL><DEDENT>return args<EOL>", "docstring": "Add new child tags.", "id": "f4773:c0:m9"}
{"signature": "def attr(*args, **kwargs):", "body": "ctx = dom_tag._with_contexts[_get_thread_context()]<EOL>if ctx and ctx[-<NUM_LIT:1>]:<EOL><INDENT>dicts = args + (kwargs,)<EOL>for d in dicts:<EOL><INDENT>for attr, value in d.items():<EOL><INDENT>ctx[-<NUM_LIT:1>].tag.set_attribute(*dom_tag.clean_pair(attr, value))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Set attributes on the current active tag context", "id": "f4773:m1"}
{"signature": "def __init__(self, title='<STR_LIT>', doctype='<STR_LIT>', request=None):", "body": "super(document, self).__init__()<EOL>self.doctype    = doctype<EOL>self.head       = super(document, self).add(tags.head())<EOL>self.body       = super(document, self).add(tags.body())<EOL>self.title_node = self.head.add(tags.title(title))<EOL>self._entry     = self.body<EOL>", "docstring": "Creates a new document instance. Accepts `title`, `doctype`, and `request` keyword arguments.", "id": "f4774:c0:m0"}
{"signature": "def render(self, *args, **kwargs):", "body": "r = []<EOL>if self.doctype:<EOL><INDENT>r.append(self.doctype)<EOL>r.append('<STR_LIT:\\n>')<EOL><DEDENT>r.append(super(document, self).render(*args, **kwargs))<EOL>return u'<STR_LIT>'.join(r)<EOL>", "docstring": "Creates a <title> tag if not present and renders the DOCTYPE and tag tree.", "id": "f4774:c0:m4"}
{"signature": "def appendChild(self, obj):", "body": "self.add(obj)<EOL>return self<EOL>", "docstring": "DOM API: Add an item to the end of the children list.", "id": "f4775:c0:m3"}
{"signature": "def getElementsByTagName(self, name):", "body": "if isinstance(name, basestring):<EOL><INDENT>return self.get(name.lower())<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "DOM API: Returns all tags that match name.", "id": "f4775:c0:m2"}
{"signature": "def get_user(self, obj):", "body": "return obj.email.user<EOL>", "docstring": "Get the user that owns the password reset token.", "id": "f4780:c2:m0"}
{"signature": "def __init__(self, settings_obj):", "body": "settings_obj.REST_EMAIL_AUTH = copy.deepcopy(<EOL>settings_obj.REST_EMAIL_AUTH<EOL>)<EOL>self.__dict__[\"<STR_LIT>\"] = settings_obj<EOL>self.__dict__[\"<STR_LIT>\"] = copy.deepcopy(settings_obj.REST_EMAIL_AUTH)<EOL>", "docstring": "Create a new mutable settings instance.\n\nThis is done by making a copy of the REST_EMAIL_AUTH\nconfiguration dictionary. Since the settings fixture is rolled\nback at the end of each test, it sees that we changed the\ndictionary reference and points it back to the original.\n\nArgs:\n    settings_obj:\n        The object through which Django's settings are\n        accessible. We rely on the fact that this object is\n        rolled back after each test.", "id": "f4793:c0:m0"}
{"signature": "def validate_key(self, key):", "body": "try:<EOL><INDENT>confirmation = models.EmailConfirmation.objects.select_related(<EOL>\"<STR_LIT>\"<EOL>).get(key=key)<EOL><DEDENT>except models.EmailConfirmation.DoesNotExist:<EOL><INDENT>raise serializers.ValidationError(<EOL>_(\"<STR_LIT>\")<EOL>)<EOL><DEDENT>if confirmation.is_expired:<EOL><INDENT>raise serializers.ValidationError(<EOL>_(\"<STR_LIT>\")<EOL>)<EOL><DEDENT>self._confirmation = confirmation<EOL>return key<EOL>", "docstring": "Validate the provided confirmation key.\n\nReturns:\n    str:\n        The validated confirmation key.\n\nRaises:\n    serializers.ValidationError:\n        If there is no email confirmation with the given key or\n        the confirmation has expired.", "id": "f4805:c1:m3"}
{"signature": "def validate_password(self, password):", "body": "password_validation.validate_password(password)<EOL>return password<EOL>", "docstring": "Validate the provided password by running it through Django's\npassword validation system.\n\nReturns:\n    The validated password.\n\nRaises:\n    ValidationError:\n        If the provided password does not pass the configured\n        password validators.", "id": "f4805:c3:m2"}
{"signature": "def validate_password(self, password):", "body": "password_validation.validate_password(password)<EOL>return password<EOL>", "docstring": "Validate the provided password.\n\nArgs:\n    password (str):\n        The password provided by the user.\n\nReturns:\n    str:\n        The validated password.\n\nRaises:\n    ValidationError:\n        If the provided password doesn't pass Django's provided\n        password validation.", "id": "f4805:c4:m2"}
{"signature": "def save(self):", "body": "token = models.PasswordResetToken.objects.get(<EOL>key=self.validated_data[\"<STR_LIT:key>\"]<EOL>)<EOL>token.email.user.set_password(self.validated_data[\"<STR_LIT:password>\"])<EOL>token.email.user.save()<EOL>logger.info(\"<STR_LIT>\", token.email.user)<EOL>token.delete()<EOL>", "docstring": "Reset the user's password if the provided information is valid.", "id": "f4805:c3:m0"}
{"signature": "def update(self, instance, validated_data):", "body": "is_primary = validated_data.pop(\"<STR_LIT>\", False)<EOL>instance = super(EmailSerializer, self).update(<EOL>instance, validated_data<EOL>)<EOL>if is_primary:<EOL><INDENT>instance.set_primary()<EOL><DEDENT>return instance<EOL>", "docstring": "Update the instance the serializer is bound to.\n\nArgs:\n    instance:\n        The instance the serializer is bound to.\n    validated_data:\n        The data to update the serializer with.\n\nReturns:\n    The updated instance.", "id": "f4805:c0:m1"}
{"signature": "def save(self):", "body": "try:<EOL><INDENT>email = models.EmailAddress.objects.get(<EOL>email=self.validated_data[\"<STR_LIT:email>\"], is_verified=True<EOL>)<EOL><DEDENT>except models.EmailAddress.DoesNotExist:<EOL><INDENT>return None<EOL><DEDENT>token = models.PasswordResetToken.objects.create(email=email)<EOL>token.send()<EOL>return token<EOL>", "docstring": "Send out a password reset if the provided data is valid.\n\nIf the provided email address exists and is verified, a reset\nemail is sent to the address.\n\nReturns:\n    The password reset token if it was returned and ``None``\n    otherwise.", "id": "f4805:c2:m0"}
{"signature": "def validate_key(self, key):", "body": "if not models.PasswordResetToken.valid_tokens.filter(key=key).exists():<EOL><INDENT>raise serializers.ValidationError(<EOL>_(\"<STR_LIT>\")<EOL>)<EOL><DEDENT>return key<EOL>", "docstring": "Validate the provided reset key.\n\nReturns:\n    The validated key.\n\nRaises:\n    serializers.ValidationError:\n        If the provided key does not exist.", "id": "f4805:c3:m1"}
{"signature": "def save(self):", "body": "try:<EOL><INDENT>email = models.EmailAddress.objects.get(<EOL>email=self.validated_data[\"<STR_LIT:email>\"], is_verified=False<EOL>)<EOL>logger.debug(<EOL>\"<STR_LIT>\",<EOL>self.validated_data[\"<STR_LIT:email>\"],<EOL>)<EOL>email.send_confirmation()<EOL><DEDENT>except models.EmailAddress.DoesNotExist:<EOL><INDENT>logger.debug(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>self.validated_data[\"<STR_LIT:email>\"],<EOL>)<EOL><DEDENT>", "docstring": "Resend a verification email to the provided address.\n\nIf the provided email is already verified no action is taken.", "id": "f4805:c5:m0"}
{"signature": "def authenticate(self, request, email=None, password=None, username=None):", "body": "email = email or username<EOL>try:<EOL><INDENT>email_instance = models.EmailAddress.objects.get(<EOL>is_verified=True, email=email<EOL>)<EOL><DEDENT>except models.EmailAddress.DoesNotExist:<EOL><INDENT>return None<EOL><DEDENT>user = email_instance.user<EOL>if user.check_password(password):<EOL><INDENT>return user<EOL><DEDENT>return None<EOL>", "docstring": "Attempt to authenticate a set of credentials.\n\nArgs:\n    request:\n        The request associated with the authentication attempt.\n    email:\n        The user's email address.\n    password:\n        The user's password.\n    username:\n        An alias for the ``email`` field. This is provided for\n        compatability with Django's built in authentication\n        views.\n\nReturns:\n    The user associated with the provided credentials if they\n    are valid. Returns ``None`` otherwise.", "id": "f4812:c1:m0"}
{"signature": "def get_queryset(self):", "body": "return self.request.user.email_addresses.all()<EOL>", "docstring": "Get the email addresses belonging to the requesting user.\n\nReturns:\n    A queryset containing only the email addresses owned by the\n    requesting user.", "id": "f4813:c1:m0"}
{"signature": "def get_queryset(self):", "body": "return self.request.user.email_addresses.all()<EOL>", "docstring": "Get the email addresses belonging to the requesting user.\n\nReturns:\n    A queryset containing only the email addresses owned by the\n    requesting user.", "id": "f4813:c0:m0"}
{"signature": "def get_serializer_class(self):", "body": "return app_settings.REGISTRATION_SERIALIZER<EOL>", "docstring": "Get the serializer class used to register new users.", "id": "f4813:c5:m0"}
{"signature": "@classmethod<EOL><INDENT>def _create(cls, model_class, *args, **kwargs):<DEDENT>", "body": "manager = cls._get_manager(model_class)<EOL>return manager.create_user(*args, **kwargs)<EOL>", "docstring": "Create a new user instance.\n\nArgs:\n    model_class:\n        The type of model to create an instance of.\n    args:\n        Positional arguments to create the instance with.\n    kwargs:\n        Keyword arguments to create the instance with.\n\nReturns:\n    A new user instance of the type specified by\n    ``model_class``.", "id": "f4814:c3:m0"}
{"signature": "def get_queryset(self):", "body": "oldest = timezone.now() - app_settings.PASSWORD_RESET_EXPIRATION<EOL>queryset = super(ValidPasswordResetTokenManager, self).get_queryset()<EOL>return queryset.filter(created_at__gt=oldest)<EOL>", "docstring": "Return all unexpired password reset tokens.", "id": "f4815:c1:m0"}
{"signature": "def _setting(self, name, default):", "body": "from django.conf import settings<EOL>settings_dict = getattr(settings, \"<STR_LIT>\", {})<EOL>return settings_dict.get(name, default)<EOL>", "docstring": "Retrieve a setting from the current Django settings.\n\nSettings are retrieved from the ``REST_EMAIL_AUTH`` dict in the\nsettings file.\n\nArgs:\n    name (str):\n        The name of the setting to retrieve.\n    default:\n        The setting's default value.\n\nReturns:\n    The value provided in the settings dictionary if it exists.\n    The default value is returned otherwise.", "id": "f4817:c0:m1"}
{"signature": "@property<EOL><INDENT>def PASSWORD_RESET_EXPIRATION(self):<DEDENT>", "body": "import datetime<EOL>return self._setting(<EOL>\"<STR_LIT>\", datetime.timedelta(hours=<NUM_LIT:1>)<EOL>)<EOL>", "docstring": "The duration that a password reset token is valid for.\n\nDefaults to 1 hour.", "id": "f4817:c0:m6"}
{"signature": "@property<EOL><INDENT>def PASSWORD_RESET_URL(self):<DEDENT>", "body": "return self._setting(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>", "docstring": "The template to use for the password reset url.", "id": "f4817:c0:m7"}
{"signature": "@property<EOL><INDENT>def CONFIRMATION_EXPIRATION(self):<DEDENT>", "body": "import datetime<EOL>return self._setting(<EOL>\"<STR_LIT>\", datetime.timedelta(days=<NUM_LIT:1>)<EOL>)<EOL>", "docstring": "The duration that an email confirmation is valid for.\n\nDefaults to 1 day.", "id": "f4817:c0:m2"}
{"signature": "def handle(self, *args, **kwargs):", "body": "cutoff = timezone.now()<EOL>cutoff -= app_settings.CONFIRMATION_EXPIRATION<EOL>cutoff -= app_settings.CONFIRMATION_SAVE_PERIOD<EOL>queryset = models.EmailConfirmation.objects.filter(<EOL>created_at__lte=cutoff<EOL>)<EOL>count = queryset.count()<EOL>queryset.delete()<EOL>if count:<EOL><INDENT>self.stdout.write(<EOL>self.style.SUCCESS(<EOL>\"<STR_LIT>\".format(<EOL>count=count<EOL>)<EOL>)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>self.stdout.write(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Handle execution of the command.", "id": "f4818:c0:m0"}
{"signature": "def send_confirmation(self):", "body": "confirmation = EmailConfirmation.objects.create(email=self)<EOL>confirmation.send()<EOL>", "docstring": "Send a verification email for the email address.", "id": "f4819:c0:m1"}
{"signature": "@property<EOL><INDENT>def is_expired(self):<DEDENT>", "body": "expiration_time = self.created_at + datetime.timedelta(days=<NUM_LIT:1>)<EOL>return timezone.now() > expiration_time<EOL>", "docstring": "Determine if the confirmation has expired.\n\nReturns:\n    bool:\n        ``True`` if the confirmation has expired and ``False``\n        otherwise.", "id": "f4819:c1:m1"}
{"signature": "def send(self):", "body": "context = {<EOL>\"<STR_LIT>\": app_settings.PASSWORD_RESET_URL.format(key=self.key)<EOL>}<EOL>email_utils.send_email(<EOL>context=context,<EOL>from_email=settings.DEFAULT_FROM_EMAIL,<EOL>recipient_list=[self.email.email],<EOL>subject=_(\"<STR_LIT>\"),<EOL>template_name=\"<STR_LIT>\",<EOL>)<EOL>logger.info(\"<STR_LIT>\", self.email)<EOL>", "docstring": "Send the password reset token to the user.", "id": "f4819:c2:m1"}
{"signature": "def __str__(self):", "body": "return \"<STR_LIT>\".format(self.email.user)<EOL>", "docstring": "Get a string representation of the instance.\n\nReturns:\n    Information about the token's owner.", "id": "f4819:c2:m0"}
{"signature": "def set_primary(self):", "body": "query = EmailAddress.objects.filter(is_primary=True, user=self.user)<EOL>query = query.exclude(pk=self.pk)<EOL>with transaction.atomic():<EOL><INDENT>query.update(is_primary=False)<EOL>self.is_primary = True<EOL>self.save()<EOL><DEDENT>logger.info(<EOL>\"<STR_LIT>\",<EOL>self.email,<EOL>self.user,<EOL>)<EOL>", "docstring": "Set this email address as the user's primary email.", "id": "f4819:c0:m3"}
{"signature": "def confirm(self):", "body": "self.email.is_verified = True<EOL>self.email.save()<EOL>signals.email_verified.send(email=self.email, sender=self.__class__)<EOL>logger.info(\"<STR_LIT>\", self.email.email)<EOL>", "docstring": "Mark the instance's email as verified.", "id": "f4819:c1:m0"}
{"signature": "def img_src_finder(pipeline_index,<EOL>soup,<EOL>finder_image_urls=[],<EOL>*args, **kwargs):", "body": "now_finder_image_urls = []<EOL>for img in soup.find_all('<STR_LIT>'):<EOL><INDENT>src = img.get('<STR_LIT:src>', None)<EOL>if src:<EOL><INDENT>src = str(src)<EOL>if (src not in finder_image_urls) and(src not in now_finder_image_urls):<EOL><INDENT>now_finder_image_urls.append(src)<EOL><DEDENT><DEDENT><DEDENT>output = {}<EOL>output['<STR_LIT>'] = finder_image_urls + now_finder_image_urls<EOL>return output<EOL>", "docstring": "Find image URL in <img>'s src attribute", "id": "f4831:m0"}
{"signature": "def original_image_extender(pipeline_index,<EOL>finder_image_urls,<EOL>extender_image_urls=[],<EOL>*args, **kwargs):", "body": "now_extender_image_urls = []<EOL>search_re = re.compile(r'<STR_LIT>', re.IGNORECASE)<EOL>for image_url in finder_image_urls:<EOL><INDENT>if '<STR_LIT>' in image_url.lower():<EOL><INDENT>if search_re.search(image_url):<EOL><INDENT>extender_image_url = search_re.sub('<STR_LIT>', image_url, count=<NUM_LIT:1>)<EOL>now_extender_image_urls.append(extender_image_url)<EOL><DEDENT><DEDENT><DEDENT>output = {}<EOL>output['<STR_LIT>'] = extender_image_urls + now_extender_image_urls<EOL>return output<EOL>", "docstring": "Example:\nhttp://media-cache-ec0.pinimg.com/70x/50/9b/bd/509bbd5c6543d473bc2b49befe75f4c6.jpg\nhttp://media-cache-ec0.pinimg.com/236x/50/9b/bd/509bbd5c6543d473bc2b49befe75f4c6.jpg\nhttp://media-cache-ec0.pinimg.com/736x/50/9b/bd/509bbd5c6543d473bc2b49befe75f4c6.jpg\nto\nhttp://media-cache-ec0.pinimg.com/originals/50/9b/bd/509bbd5c6543d473bc2b49befe75f4c6.jpg", "id": "f4833:m0"}
{"signature": "def ggpht_s1600_extender(pipeline_index,<EOL>finder_image_urls,<EOL>extender_image_urls=[],<EOL>*args, **kwargs):", "body": "now_extender_image_urls = []<EOL>search_re = re.compile(r'<STR_LIT>', re.IGNORECASE)<EOL>for image_url in finder_image_urls:<EOL><INDENT>if '<STR_LIT>' in image_url.lower():<EOL><INDENT>if search_re.search(image_url):<EOL><INDENT>extender_image_url = search_re.sub('<STR_LIT>', image_url)<EOL>now_extender_image_urls.append(extender_image_url)<EOL><DEDENT><DEDENT><DEDENT>output = {}<EOL>output['<STR_LIT>'] = extender_image_urls + now_extender_image_urls<EOL>return output<EOL>", "docstring": "Example:\nhttp://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s640/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg\nto\nhttp://lh4.ggpht.com/-fFi-qJRuxeY/UjwHSOTHGOI/AAAAAAAArgE/SWTMT-hXzB4/s1600/Celeber-ru-Emma-Watson-Net-A-Porter-The-Edit-Magazine-Photoshoot-2013-01.jpg", "id": "f4834:m1"}
{"signature": "def original_image_extender(pipeline_index,<EOL>finder_image_urls,<EOL>extender_image_urls=[],<EOL>*args, **kwargs):", "body": "now_extender_image_urls = []<EOL>check_re = re.compile(r'<STR_LIT>', re.IGNORECASE)<EOL>search_re = re.compile(r'<STR_LIT>', re.IGNORECASE)<EOL>for image_url in finder_image_urls:<EOL><INDENT>if check_re.search(image_url):<EOL><INDENT>if search_re.search(image_url):<EOL><INDENT>extender_image_url = search_re.sub('<STR_LIT:.>', image_url)<EOL>now_extender_image_urls.append(extender_image_url)<EOL><DEDENT><DEDENT><DEDENT>output = {}<EOL>output['<STR_LIT>'] = extender_image_urls + now_extender_image_urls<EOL>return output<EOL>", "docstring": "Example:\nhttp://fashion-fever.nl/wp-content/upload/2013/09/DSC_0058-110x110.jpg\nhttp://www.wendyslookbook.com/wp-content/uploads/2013/09/Morning-Coffee-Run-7-433x650.jpg\nto\nhttp://fashion-fever.nl/wp-content/upload/2013/09/DSC_0058.jpg\nhttp://www.wendyslookbook.com/wp-content/uploads/2013/09/Morning-Coffee-Run-7.jpg", "id": "f4835:m0"}
{"signature": "def avatar_128_extender(pipeline_index,<EOL>finder_image_urls,<EOL>extender_image_urls=[],<EOL>*args, **kwargs):", "body": "now_extender_image_urls = []<EOL>search_re = re.compile(r'<STR_LIT>', re.IGNORECASE)<EOL>for image_url in finder_image_urls:<EOL><INDENT>if '<STR_LIT>' in image_url.lower():<EOL><INDENT>if search_re.search(image_url):<EOL><INDENT>extender_image_url = search_re.sub(r'<STR_LIT>', image_url)<EOL>now_extender_image_urls.append(extender_image_url)<EOL><DEDENT><DEDENT><DEDENT>output = {}<EOL>output['<STR_LIT>'] = extender_image_urls + now_extender_image_urls<EOL>return output<EOL>", "docstring": "Example:\nhttp://25.media.tumblr.com/avatar_2909d6610c26_16.png\nto\nhttp://25.media.tumblr.com/avatar_2909d6610c26_128.png", "id": "f4836:m1"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def stream(self, report):<DEDENT>", "body": "with self.ClientSession() as session:<EOL><INDENT>lines = []<EOL>for job in report['<STR_LIT>']:<EOL><INDENT>key = '<STR_LIT>' % (self.name, job)<EOL>for minute in report['<STR_LIT>'][job]:<EOL><INDENT>for k, v in report['<STR_LIT>'][job][minute].items():<EOL><INDENT>lines.append('<STR_LIT>' % (key, k))<EOL>lines.append('<STR_LIT>' % (key, k, v))<EOL><DEDENT><DEDENT><DEDENT>lines.append(\"<STR_LIT>\")<EOL>data = \"<STR_LIT:\\n>\".join(lines)<EOL>logger.info(data)<EOL>yield from session.post(self.url, data=bytes(data.encode('<STR_LIT:utf-8>')))<EOL><DEDENT>", "docstring": "Stream reports to application logs", "id": "f4845:c3:m1"}
{"signature": "def _escape_char(c, escape_char=ESCAPE_CHAR):", "body": "buf = []<EOL>for byte in c.encode('<STR_LIT:utf8>'):<EOL><INDENT>buf.append(escape_char)<EOL>buf.append('<STR_LIT>' % _ord(byte))<EOL><DEDENT>return '<STR_LIT>'.join(buf)<EOL>", "docstring": "Escape a single character", "id": "f4847:m0"}
{"signature": "def unescape(escaped, escape_char=ESCAPE_CHAR):", "body": "if isinstance(escaped, bytes):<EOL><INDENT>escaped = escaped.decode('<STR_LIT:utf8>')<EOL><DEDENT>escape_pat = re.compile(re.escape(escape_char).encode('<STR_LIT:utf8>') + b'<STR_LIT>', re.IGNORECASE)<EOL>buf = escape_pat.subn(_unescape_char, escaped.encode('<STR_LIT:utf8>'))[<NUM_LIT:0>]<EOL>return buf.decode('<STR_LIT:utf8>')<EOL>", "docstring": "Unescape a string escaped with `escape`\n\n    escape_char must be the same as that used in the call to escape.", "id": "f4847:m3"}
{"signature": "def escape(to_escape, safe=SAFE, escape_char=ESCAPE_CHAR, allow_collisions=False):", "body": "if isinstance(to_escape, bytes):<EOL><INDENT>to_escape = to_escape.decode('<STR_LIT:utf8>')<EOL><DEDENT>if not isinstance(safe, set):<EOL><INDENT>safe = set(safe)<EOL><DEDENT>if allow_collisions:<EOL><INDENT>safe.add(escape_char)<EOL><DEDENT>elif escape_char in safe:<EOL><INDENT>safe.remove(escape_char)<EOL><DEDENT>chars = []<EOL>for c in to_escape:<EOL><INDENT>if c in safe:<EOL><INDENT>chars.append(c)<EOL><DEDENT>else:<EOL><INDENT>chars.append(_escape_char(c, escape_char))<EOL><DEDENT><DEDENT>return u'<STR_LIT>'.join(chars)<EOL>", "docstring": "Escape a string so that it only contains characters in a safe set.\n\n    Characters outside the safe list will be escaped with _%x_,\n    where %x is the hex value of the character.\n\n    If `allow_collisions` is True, occurrences of `escape_char`\n    in the input will not be escaped.\n\n    In this case, `unescape` cannot be used to reverse the transform\n    because occurrences of the escape char in the resulting string are ambiguous.\n    Only use this mode when:\n\n    1. collisions cannot occur or do not matter, and\n    2. unescape will never be called.\n\n    .. versionadded: 1.0\n        allow_collisions argument.\n        Prior to 1.0, behavior was the same as allow_collisions=False (default).", "id": "f4847:m1"}
{"signature": "def expand_dims(a, axis):", "body": "if hasattr(a, '<STR_LIT>') and hasattr(type(a), '<STR_LIT>'):<EOL><INDENT>return a.expand_dims(axis)<EOL><DEDENT>else:<EOL><INDENT>return np.expand_dims(a, axis)<EOL><DEDENT>", "docstring": "Insert a new axis, corresponding to a given position in the array shape\n\n    Args:\n      a (array_like): Input array.\n      axis (int): Position (amongst axes) where new axis is to be inserted.", "id": "f4853:m8"}
{"signature": "def __div__(self, other):", "body": "return np.divide(self, other)<EOL>", "docstring": "x.__div__(y) <==> x/y", "id": "f4853:c0:m34"}
{"signature": "def _tosubslices(self, sl):", "body": "N = self.shape[self._distaxis]<EOL>start, stop, step = sl.start, sl.stop, sl.step<EOL>if step is None:<EOL><INDENT>step = <NUM_LIT:1><EOL><DEDENT>ss = []<EOL>ms = []<EOL>if step > <NUM_LIT:0>:<EOL><INDENT>if start is None:<EOL><INDENT>start = <NUM_LIT:0><EOL><DEDENT>if stop is None:<EOL><INDENT>stop = N<EOL><DEDENT>subs = range(<NUM_LIT:0>, self._n)<EOL>for s in subs:<EOL><INDENT>low = self._si[s]<EOL>high = self._si[s + <NUM_LIT:1>]<EOL>first = low + ((low - start) % step)<EOL>last = high + ((high - start) % step)<EOL>if start < high and stop > low and first < high:<EOL><INDENT>ss.append(s)<EOL>substart = max(first, start) - low<EOL>substop = min(last, stop) - low<EOL>ms.append(slice(substart, substop, step))<EOL><DEDENT><DEDENT><DEDENT>elif step < <NUM_LIT:0>:<EOL><INDENT>if start is None:<EOL><INDENT>start = N - <NUM_LIT:1><EOL><DEDENT>if stop is None:<EOL><INDENT>stop = -<NUM_LIT:1><EOL><DEDENT>subs = range(self._n - <NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>)<EOL>for s in subs:<EOL><INDENT>low = self._si[s]<EOL>high = self._si[s + <NUM_LIT:1>]<EOL>first = high + step + ((high - start) % step)<EOL>last = low + step + ((low - start) % step)<EOL>if start >= low and stop < high and first >= low:<EOL><INDENT>ss.append(s)<EOL>substart = min(first, start) - low<EOL>substop = max(last + step, stop) - low<EOL>if substop < <NUM_LIT:0>:<EOL><INDENT>substop = None<EOL><DEDENT>ms.append(slice(substart, substop, step))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return ss, ms<EOL>", "docstring": "Maps a slice object for whole array to slice objects for subarrays.\n        Returns pair (ss, ms) where ss is a list of subarrays and ms is a list\n        giving the slice object that should be applied to each subarray.", "id": "f4853:c1:m16"}
{"signature": "def __pow__(self, other, modulo=None):", "body": "<EOL>return np.power(self, other)<EOL>", "docstring": "x.__pow__(y[, z]) <==> pow(x, y[, z])", "id": "f4853:c1:m38"}
{"signature": "def __iand__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__and__(y) <==> x&=y", "id": "f4853:c0:m46"}
{"signature": "def __ipow__(self, other, modulo=None):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__pow__(y) <==> x**=y", "id": "f4853:c0:m43"}
{"signature": "def dot(self, other):", "body": "return np.dot(self, other)<EOL>", "docstring": "Dot product of two arrays.\n        Refer to `numpy.dot` for full documentation.", "id": "f4853:c1:m71"}
{"signature": "def __rmul__(self, other):", "body": "return np.multiply(other, self)<EOL>", "docstring": "x.__rmul__(y) <==> y*x", "id": "f4853:c1:m31"}
{"signature": "def __abs__(self):", "body": "return np.abs(self)<EOL>", "docstring": "x.__abs__() <==> abs(x)", "id": "f4853:c0:m53"}
{"signature": "def __mod__(self, other):", "body": "return np.mod(self, other)<EOL>", "docstring": "x.__mod__(y) <==> x%y", "id": "f4853:c0:m18"}
{"signature": "def __getitem__(self, index):", "body": "<EOL>distaxis = self._distaxis<EOL>if isinstance(index, np.ndarray) and index.dtype.type is np.bool_:<EOL><INDENT>raise Error('<STR_LIT>')<EOL><DEDENT>if not isinstance(index, Sequence):<EOL><INDENT>index = (index,) + (slice(None),)*(self.ndim - <NUM_LIT:1>)<EOL><DEDENT>ix_types = tuple(type(x) for x in index)<EOL>if (np.ndarray in ix_types or<EOL>(not isinstance(index, _TupleType) and <EOL>_NewaxisType not in ix_types and <EOL>_EllipsisType not in ix_types and<EOL>_SliceType not in ix_types) or<EOL>any(issubclass(T, Sequence) for T in ix_types)):<EOL><INDENT>basic_slicing = False<EOL><DEDENT>else:<EOL><INDENT>basic_slicing = True<EOL><DEDENT>while _EllipsisType in ix_types:<EOL><INDENT>pos = ix_types.index(_EllipsisType)<EOL>m = (self.ndim + ix_types.count(_NewaxisType) - len(index) + <NUM_LIT:1>)<EOL>index = index[:pos] + (slice(None),)*m + index[(pos+<NUM_LIT:1>):]<EOL>ix_types = tuple(type(x) for x in index)<EOL><DEDENT>if _NewaxisType in ix_types:<EOL><INDENT>new_distaxis = distaxis<EOL>subix = [slice(None)] * self.ndim<EOL>while _NewaxisType in ix_types:<EOL><INDENT>pos = ix_types.index(type(np.newaxis))<EOL>index = index[:pos] + (slice(None),) + index[(pos+<NUM_LIT:1>):]<EOL>ix_types = tuple(type(x) for x in index)<EOL>if pos <= distaxis:<EOL><INDENT>subix[pos] = np.newaxis<EOL>new_distaxis += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>subix[pos - <NUM_LIT:1>] = np.newaxis<EOL><DEDENT><DEDENT>new_subarrays = [ra[tuple(subix)] for ra in self._subarrays]<EOL>return DistArray(new_subarrays, new_distaxis)[index]<EOL><DEDENT>index = tuple(index) + (slice(None),)*(self.ndim - len(index))<EOL>if len(index) > self.ndim:<EOL><INDENT>raise IndexError('<STR_LIT>')<EOL><DEDENT>if basic_slicing:<EOL><INDENT>distix = index[distaxis]<EOL>if isinstance(distix, numbers.Number):<EOL><INDENT>s, i = self._tosub(distix)<EOL>subix = index[<NUM_LIT:0>:distaxis] + (i,) + index[(distaxis+<NUM_LIT:1>):]<EOL>return self._subarrays[s][subix]<EOL><DEDENT>else:<EOL><INDENT>result_ras = []<EOL>ss, ms = self._tosubslices(distix)<EOL>for s, m in zip(ss, ms):<EOL><INDENT>subix = index[<NUM_LIT:0>:distaxis] + (m,) + index[(distaxis+<NUM_LIT:1>):]<EOL>result_ras.append(self._subarrays[s][subix])<EOL><DEDENT>axes_removed = sum(<NUM_LIT:1> for x in index[:distaxis] if isinstance(<EOL>x, numbers.Integral))<EOL>new_distaxis = distaxis - axes_removed<EOL><DEDENT><DEDENT>else:<EOL><INDENT>is_fancy = tuple(not isinstance(x, _SliceType) for x in index)<EOL>fancy_pos = tuple(i for i in range(len(index)) if is_fancy[i])<EOL>slice_pos = tuple(i for i in range(len(index)) if not is_fancy[i])<EOL>contiguous = (fancy_pos[-<NUM_LIT:1>] - fancy_pos[<NUM_LIT:0>] == len(fancy_pos) - <NUM_LIT:1>)<EOL>index = list(index)<EOL>ix_arrays = [index[j] for j in fancy_pos]<EOL>ix_arrays = np.broadcast_arrays(*ix_arrays)<EOL>for j in range(len(fancy_pos)):<EOL><INDENT>if ix_arrays[j].shape is ():<EOL><INDENT>ix_arrays[j] = np.expand_dims(ix_arrays[j], <NUM_LIT:0>)<EOL><DEDENT>index[fancy_pos[j]] = ix_arrays[j]<EOL><DEDENT>index = tuple(index)<EOL>idim = index[fancy_pos[<NUM_LIT:0>]].ndim <EOL>assert(idim > <NUM_LIT:0>)<EOL>distix = index[distaxis]<EOL>otherix = index[<NUM_LIT:0>:distaxis] + (slice(None),) + index[(distaxis+<NUM_LIT:1>):]<EOL>if not is_fancy[distaxis]:<EOL><INDENT>result_ras = []<EOL>ss, ms = self._tosubslices(distix)<EOL>for s, m in zip(ss, ms):<EOL><INDENT>subix = index[<NUM_LIT:0>:distaxis] + (m,) + index[(distaxis+<NUM_LIT:1>):]<EOL>result_ras.append(self._subarrays[s][subix])<EOL><DEDENT>if contiguous:<EOL><INDENT>if fancy_pos[<NUM_LIT:0>] > distaxis:<EOL><INDENT>new_distaxis = distaxis<EOL><DEDENT>else:<EOL><INDENT>new_distaxis = distaxis - len(fancy_pos) + idim<EOL><DEDENT><DEDENT>else:<EOL><INDENT>earlier_fancy = len([i for i in fancy_pos if i < distaxis])<EOL>new_distaxis = distaxis - earlier_fancy + idim<EOL><DEDENT><DEDENT>else:<EOL><INDENT>nonconstant_ix_axes = []<EOL>for j in range(idim):<EOL><INDENT>n = distix.shape[j]<EOL>if n > <NUM_LIT:1>:<EOL><INDENT>partix = np.split(distix, n, axis=j)<EOL>if not all(np.array_equal(<EOL>partix[<NUM_LIT:0>], partix[i]) for i in range(<NUM_LIT:1>, n)):<EOL><INDENT>nonconstant_ix_axes.append(j)<EOL><DEDENT><DEDENT><DEDENT>if len(nonconstant_ix_axes) <= <NUM_LIT:1>:<EOL><INDENT>if len(nonconstant_ix_axes) is <NUM_LIT:0>:<EOL><INDENT>all_same_engine = True<EOL>iax = idim - <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>all_same_engine = False<EOL>iax = nonconstant_ix_axes[<NUM_LIT:0>]<EOL><DEDENT>iix = [<NUM_LIT:0>] * idim<EOL>iix[iax] = slice(None)<EOL>iix = tuple(iix)<EOL>ixlist = self._placeholders[distix[iix]]<EOL>if contiguous:<EOL><INDENT>new_distaxis = fancy_pos[<NUM_LIT:0>] + iax<EOL><DEDENT>else:<EOL><INDENT>new_distaxis = <NUM_LIT:0> + iax<EOL><DEDENT>result_ras = []<EOL>ss, ms, js = self._tosubsj(ixlist)<EOL>shp = [<NUM_LIT:1>] * idim<EOL>for s, m, jlist in zip(ss, ms, js):<EOL><INDENT>shp[iax] = len(m)<EOL>m = np.array(m).reshape(shp) <EOL>sl = [slice(None)] * idim<EOL>sl[iax] = jlist<EOL>sl = tuple(sl)<EOL>subix = list(index)<EOL>for i in range(len(subix)):<EOL><INDENT>if isinstance(subix[i], np.ndarray):<EOL><INDENT>if i == distaxis:<EOL><INDENT>subix[i] = m<EOL><DEDENT>else:<EOL><INDENT>subix[i] = subix[i][sl]<EOL><DEDENT><DEDENT><DEDENT>subix = tuple(subix)<EOL>result_ras.append(self._subarrays[s][subix])<EOL><DEDENT>if all_same_engine:<EOL><INDENT>result_ras = [expand_dims(r, new_distaxis) for<EOL>r in result_ras]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>msg = (u'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>warnings.warn(msg, RuntimeWarning)<EOL>return self._ob[index]<EOL><DEDENT><DEDENT><DEDENT>if len(result_ras) == <NUM_LIT:0>:<EOL><INDENT>subix = index[<NUM_LIT:0>:distaxis] + (slice(<NUM_LIT:0>),) + index[(distaxis+<NUM_LIT:1>):]<EOL>return self._subarrays[<NUM_LIT:0>][subix]<EOL><DEDENT>if all(not isinstance(ra, RemoteArray) or ra._id.engine == -<NUM_LIT:1> for<EOL>ra in result_ras):<EOL><INDENT>return gather(concatenate(result_ras, axis=new_distaxis))<EOL><DEDENT>engines = [ra._id.engine if isinstance(ra, RemoteArray) else -<NUM_LIT:1> for<EOL>ra in result_ras]<EOL>if all(e == engines[<NUM_LIT:0>] for e in engines):<EOL><INDENT>return concatenate(result_ras, axis=new_distaxis)<EOL><DEDENT>else:<EOL><INDENT>return DistArray(result_ras, new_distaxis)<EOL><DEDENT>", "docstring": "Slice the distributed array", "id": "f4853:c1:m17"}
{"signature": "def __rrshift__(self, other):", "body": "return np.right_shift(other, self)<EOL>", "docstring": "x.__rshift__(y) <==> y>>x", "id": "f4853:c1:m43"}
{"signature": "def __iand__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__and__(y) <==> x&=y", "id": "f4853:c1:m62"}
{"signature": "def __iadd__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__add__(y) <==> x+=y", "id": "f4853:c0:m38"}
{"signature": "def __ilshift__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__lshift__(y) <==> x<<=y", "id": "f4853:c0:m44"}
{"signature": "def __ior__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__or__(y) <==> x|=y", "id": "f4853:c0:m48"}
{"signature": "def __radd__(self, other):", "body": "return np.add(other, self)<EOL>", "docstring": "x.__radd__(y) <==> y+x", "id": "f4853:c1:m27"}
{"signature": "def __isub__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__sub__(y) <==> x-=y", "id": "f4853:c1:m55"}
{"signature": "def __ror__(self, other):", "body": "return np.bitwise_or(other, self)<EOL>", "docstring": "x.__ror__(y) <==> y|x", "id": "f4853:c1:m49"}
{"signature": "def __rmul__(self, other):", "body": "return np.multiply(other, self)<EOL>", "docstring": "x.__rmul__(y) <==> y*x", "id": "f4853:c0:m15"}
{"signature": "def __rfloordiv__(self, other):", "body": "return np.floor_divide(other, self)<EOL>", "docstring": "x.__rfloordiv__(y) <==> y//x", "id": "f4853:c0:m17"}
{"signature": "def _tosubsj(self, ixlist):", "body": "n = len(ixlist)<EOL>N = self.shape[self._distaxis]<EOL>ss = []<EOL>ms = []<EOL>js = []<EOL>if n == <NUM_LIT:0>:<EOL><INDENT>return ss, ms, js<EOL><DEDENT>j = <NUM_LIT:0> <EOL>ix = ixlist[j]<EOL>if ix >= N or ix < -N:<EOL><INDENT>raise IndexError(<EOL>'<STR_LIT>',<EOL>ix, self._distaxis, N)<EOL><DEDENT>if ix < <NUM_LIT:0>:<EOL><INDENT>ix += N<EOL><DEDENT>while j < n:<EOL><INDENT>for s in range(<NUM_LIT:0>, self._n):<EOL><INDENT>low = self._si[s]<EOL>high = self._si[s + <NUM_LIT:1>]<EOL>if ix >= low and ix < high:<EOL><INDENT>ss.append(s)<EOL>msj = [ix - low]<EOL>jsj = [j]<EOL>j += <NUM_LIT:1><EOL>while j < n:<EOL><INDENT>ix = ixlist[j]<EOL>if ix >= N or ix < -N:<EOL><INDENT>raise IndexError(<EOL>'<STR_LIT>',<EOL>ix, self._distaxis, N)<EOL><DEDENT>if ix < <NUM_LIT:0>:<EOL><INDENT>ix += N<EOL><DEDENT>if ix < low or ix >= high:<EOL><INDENT>break<EOL><DEDENT>msj.append(ix - low)<EOL>jsj.append(j)<EOL>j += <NUM_LIT:1><EOL><DEDENT>ms.append(msj)<EOL>js.append(jsj)<EOL><DEDENT>if ix < low:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return ss, ms, js<EOL>", "docstring": "Like _tosubs(), maps a list of integer indices of the DistArray to\n        subarray indices, but also returns the positions processed in the\n        original index list.\n        ixlist can contain repeated indices and does not need to be sorted.\n        Returns tuple (ss, ms, js) where ss is a list of subarrays, ms is a\n        list of lists of subindices m (one list for each subarray s in ss), and\n        js is a list of lists of positions in ixlist that were processed (one\n        list for each subarray s in ss).", "id": "f4853:c1:m15"}
{"signature": "def __xor__(self, other):", "body": "return np.bitwise_xor(self, other)<EOL>", "docstring": "x.__xor__(y) <==> x^y", "id": "f4853:c1:m46"}
{"signature": "def _engine_affinity(obj):", "body": "from distob import engine<EOL>this_engine = engine.eid<EOL>if isinstance(obj, numbers.Number) or obj is None:<EOL><INDENT>return (this_engine, <NUM_LIT:0>)<EOL><DEDENT>elif hasattr(obj, '<STR_LIT>'):<EOL><INDENT>return obj.__engine_affinity__<EOL><DEDENT>else:<EOL><INDENT>return (this_engine, _rough_size(obj))<EOL><DEDENT>", "docstring": "Which engine or engines are preferred for processing this object\n    Returns: (location, weight)\n      location (integer or tuple): engine id (or in the case of a distributed\n      array a tuple (engine_id_list, distaxis)).\n      weight(integer): Proportional to the cost of moving the object to a\n        different engine. Currently just taken to be the size of data.", "id": "f4853:m3"}
{"signature": "def __pow__(self, other, modulo=None):", "body": "<EOL>return np.power(self, other)<EOL>", "docstring": "x.__pow__(y[, z]) <==> pow(x, y[, z])", "id": "f4853:c0:m22"}
{"signature": "def __mul__(self, other):", "body": "return np.multiply(self, other)<EOL>", "docstring": "x.__mul__(y) <==> x*y", "id": "f4853:c1:m30"}
{"signature": "def __neg__(self):", "body": "return np.negative(self)<EOL>", "docstring": "x.__neg__() <==> -x", "id": "f4853:c1:m67"}
{"signature": "@classmethod<EOL><INDENT>def __distob_vectorize__(cls, f):<DEDENT>", "body": "def vf(self, *args, **kwargs):<EOL><INDENT>kwargs = kwargs.copy()<EOL>kwargs['<STR_LIT>'] = False<EOL>kwargs['<STR_LIT>'] = False<EOL>ars = [call(f, ra, *args, **kwargs) for ra in self._subarrays]<EOL>results = [convert_result(ar) for ar in ars]<EOL>if all(isinstance(r, RemoteArray) and<EOL>r.ndim == results[<NUM_LIT:0>].ndim for r in results):<EOL><INDENT>out_shapes = [ra.shape for ra in results]<EOL>new_distaxis = self._new_distaxis(out_shapes)<EOL>if new_distaxis is None:<EOL><INDENT>return results <EOL><DEDENT>if new_distaxis == results[<NUM_LIT:0>].ndim:<EOL><INDENT>results = [r.expand_dims(new_distaxis) for r in results]<EOL><DEDENT>return DistArray(results, new_distaxis)<EOL><DEDENT>elif all(isinstance(r, numbers.Number) for r in results):<EOL><INDENT>return np.array(results)<EOL><DEDENT>else:<EOL><INDENT>return results  <EOL><DEDENT><DEDENT>if hasattr(f, '<STR_LIT>'):<EOL><INDENT>vf.__name__ = '<STR_LIT:v>' + f.__name__<EOL>f_str = f.__name__ + '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>f_str = '<STR_LIT>'<EOL><DEDENT>doc = u\"\"\"<STR_LIT>\"\"\" % (f_str, f_str)<EOL>if hasattr(f, '<STR_LIT>') and f.__doc__ is not None:<EOL><INDENT>doc = doc.rstrip() + ('<STR_LIT>' + f.__doc__)<EOL><DEDENT>vf.__doc__ = doc<EOL>return vf<EOL>", "docstring": "Upgrades a normal function f to act on a DistArray in parallel\n\n        Args:\n          f (callable): ordinary function which expects as its first \n            argument an array (of the same shape as our subarrays)\n\n        Returns:\n          vf (callable): new function that takes a DistArray as its first\n            argument. ``vf(distarray)`` will do the computation ``f(subarray)``\n            on each subarray in parallel and if possible will return another \n            DistArray. (otherwise will return a list with the result for each \n            subarray).", "id": "f4853:c1:m22"}
{"signature": "def __rxor__(self, other):", "body": "return np.bitwise_xor(other, self)<EOL>", "docstring": "x.__rxor__(y) <==> y^x", "id": "f4853:c0:m31"}
{"signature": "def __isub__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__sub__(y) <==> x-=y", "id": "f4853:c0:m39"}
{"signature": "def __ilshift__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__lshift__(y) <==> x<<=y", "id": "f4853:c1:m60"}
{"signature": "def vstack(tup):", "body": "<EOL>arrays = list(tup)<EOL>for i in range(len(arrays)):<EOL><INDENT>if arrays[i].ndim is <NUM_LIT:1>:<EOL><INDENT>arrays[i] = arrays[i][np.newaxis, :]<EOL><DEDENT><DEDENT>return concatenate(tup, axis=<NUM_LIT:0>)<EOL>", "docstring": "Stack arrays in sequence vertically (row wise), \n    handling ``RemoteArray`` and ``DistArray`` without moving data.\n\n    Args:\n      tup (sequence of array_like)\n\n    Returns: \n      res: `ndarray`, if inputs were all local\n           `RemoteArray`, if inputs were all on the same remote engine\n           `DistArray`, if inputs were already scattered on different engines", "id": "f4853:m11"}
{"signature": "def __imul__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__mul__(y) <==> x*=y", "id": "f4853:c0:m40"}
{"signature": "def __pos__(self):", "body": "return self<EOL>", "docstring": "x.__pos__() <==> +x", "id": "f4853:c0:m52"}
{"signature": "def __divmod__(self, other):", "body": "return (np.floor_divide(self - self % other, other), self % other)<EOL>", "docstring": "x.__divmod__(y) <==> divmod(x, y)", "id": "f4853:c1:m36"}
{"signature": "def _ufunc_dispatch(ufunc, method, i, inputs, **kwargs):", "body": "<EOL>if '<STR_LIT>' in kwargs and kwargs['<STR_LIT>'] is not None:<EOL><INDENT>raise Error('<STR_LIT>')<EOL><DEDENT>nin = <NUM_LIT:2> if ufunc is np.dot else ufunc.nin<EOL>if nin is <NUM_LIT:1> and method == '<STR_LIT>':<EOL><INDENT>return vectorize(ufunc.__call__)(inputs[<NUM_LIT:0>], **kwargs)<EOL><DEDENT>elif nin is <NUM_LIT:2> and method == '<STR_LIT>':<EOL><INDENT>from distob import engine<EOL>here = engine.eid<EOL>locs, weights = zip(*[_engine_affinity(a) for a in inputs])<EOL>bshape = _broadcast_shape(*inputs)<EOL>locs = list(locs)<EOL>for i, loc in enumerate(locs):<EOL><INDENT>if isinstance(loc, _TupleType):<EOL><INDENT>num_new_axes = len(bshape) - inputs[i].ndim<EOL>if num_new_axes > <NUM_LIT:0>:<EOL><INDENT>locs[i] = (locs[i][<NUM_LIT:0>], locs[i][<NUM_LIT:1>] + num_new_axes)<EOL><DEDENT><DEDENT><DEDENT>if ufunc is np.dot:<EOL><INDENT>locs = [here if isinstance(m, _TupleType) else m for m in locs]<EOL><DEDENT>if locs[<NUM_LIT:0>] == locs[<NUM_LIT:1>]:<EOL><INDENT>location = locs[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>smallest = <NUM_LIT:0> if weights[<NUM_LIT:0>] <= weights[<NUM_LIT:1>] else <NUM_LIT:1><EOL>largest = <NUM_LIT:1> - smallest<EOL>if locs[<NUM_LIT:0>] is here or locs[<NUM_LIT:1>] is here:<EOL><INDENT>location = here if weights[<NUM_LIT:0>] == weights[<NUM_LIT:1>] else locs[largest]<EOL><DEDENT>else:<EOL><INDENT>if weights[smallest]*<NUM_LIT:2> < weights[largest] + weights[smallest]:<EOL><INDENT>location = locs[largest]<EOL><DEDENT>else:<EOL><INDENT>location = here<EOL><DEDENT><DEDENT><DEDENT>inputs = [_ufunc_move_input(a, location, bshape) for a in inputs]<EOL>if location is here:<EOL><INDENT>return ufunc.__call__(inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], **kwargs)<EOL><DEDENT>else:<EOL><INDENT>if isinstance(location, numbers.Integral):<EOL><INDENT>return call(ufunc.__call__, inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], **kwargs)<EOL><DEDENT>else:<EOL><INDENT>engine_ids, distaxis = location<EOL>n = len(engine_ids)<EOL>is_dist = tuple(isinstance(a, DistArray) for a in inputs)<EOL>assert(is_dist[<NUM_LIT:0>] or is_dist[<NUM_LIT:1>])<EOL>for i in <NUM_LIT:0>, <NUM_LIT:1>:<EOL><INDENT>if is_dist[i]:<EOL><INDENT>ndim = inputs[i].ndim<EOL>assert(inputs[i]._distaxis == distaxis)<EOL>assert(inputs[i]._n == n)<EOL><DEDENT><DEDENT>def _remote_ucall(inputs, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return ufunc.__call__(inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], **kwargs)<EOL><DEDENT>results = []<EOL>kwargs = kwargs.copy()<EOL>kwargs['<STR_LIT>'] = False<EOL>kwargs['<STR_LIT>'] = False<EOL>for j in range(n):<EOL><INDENT>subinputs = tuple(inputs[i]._subarrays[j] if <EOL>is_dist[i] else inputs[i] for i in (<NUM_LIT:0>, <NUM_LIT:1>))<EOL>results.append(call(_remote_ucall, subinputs, **kwargs))<EOL><DEDENT>results = [convert_result(ar) for ar in results]<EOL>return DistArray(results, distaxis)<EOL><DEDENT><DEDENT><DEDENT>elif ufunc.nin > <NUM_LIT:2>:<EOL><INDENT>raise Error(u'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise Error(u'<STR_LIT>' % method)<EOL><DEDENT>", "docstring": "Route ufunc execution intelligently to local host or remote engine(s)\n    depending on where the inputs are, to minimize the need to move data.\n    Args:\n      see numpy documentation for __numpy_ufunc__", "id": "f4853:m5"}
{"signature": "def __truediv__(self, other):", "body": "return np.true_divide(self, other)<EOL>", "docstring": "x.__truediv__(y) <==> x/y", "id": "f4853:c1:m52"}
{"signature": "def __and__(self, other):", "body": "return np.bitwise_and(self, other)<EOL>", "docstring": "x.__and__(y) <==> x&y", "id": "f4853:c1:m44"}
{"signature": "def __array_prepare__(self, out_arr, context=None):", "body": "<EOL>msg = (u'<STR_LIT>' +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>' % context[<NUM_LIT:0>].__name__)<EOL>_brief_warning(msg, stacklevel=<NUM_LIT:3>)<EOL>return out_arr<EOL>", "docstring": "Fetch underlying data to user's computer and apply ufunc locally.\n        Only used as a fallback, for current numpy versions which lack \n        support for the future __numpy_ufunc__ mechanism.", "id": "f4853:c1:m11"}
{"signature": "def __ror__(self, other):", "body": "return np.bitwise_or(other, self)<EOL>", "docstring": "x.__ror__(y) <==> y|x", "id": "f4853:c0:m33"}
{"signature": "def __rsub__(self, other):", "body": "return np.subtract(other, self)<EOL>", "docstring": "x.__rsub__(y) <==> y-x", "id": "f4853:c0:m13"}
{"signature": "def __xor__(self, other):", "body": "return np.bitwise_xor(self, other)<EOL>", "docstring": "x.__xor__(y) <==> x^y", "id": "f4853:c0:m30"}
{"signature": "def __rdivmod__(self, other):", "body": "return (np.floor_divide(other - other % self, self), other % self)<EOL>", "docstring": "x.__rdivmod__(y) <==> divmod(y, x)", "id": "f4853:c1:m37"}
{"signature": "def __or__(self, other):", "body": "return np.bitwise_or(self, other)<EOL>", "docstring": "x.__or__(y) <==> x|y", "id": "f4853:c0:m32"}
{"signature": "def __invert__(self):", "body": "return np.invert(self)<EOL>", "docstring": "x.__invert__() <==> ~x", "id": "f4853:c0:m54"}
{"signature": "def __mul__(self, other):", "body": "return np.multiply(self, other)<EOL>", "docstring": "x.__mul__(y) <==> x*y", "id": "f4853:c0:m14"}
{"signature": "def __neg__(self):", "body": "return np.negative(self)<EOL>", "docstring": "x.__neg__() <==> -x", "id": "f4853:c0:m51"}
{"signature": "def __add__(self, other):", "body": "return np.add(self, other)<EOL>", "docstring": "x.__add__(y) <==> x+y", "id": "f4853:c1:m26"}
{"signature": "def __divmod__(self, other):", "body": "return (np.floor_divide(self - self % other, other), self % other)<EOL>", "docstring": "x.__divmod__(y) <==> divmod(x, y)", "id": "f4853:c0:m20"}
{"signature": "def __mod__(self, other):", "body": "return np.mod(self, other)<EOL>", "docstring": "x.__mod__(y) <==> x%y", "id": "f4853:c1:m34"}
{"signature": "def __rshift__(self, other):", "body": "return np.right_shift(self, other)<EOL>", "docstring": "x.__rshift__(y) <==> x>>y", "id": "f4853:c0:m26"}
{"signature": "def __lshift__(self, other):", "body": "return np.left_shift(self, other)<EOL>", "docstring": "x.__lshift__(y) <==> x<<y", "id": "f4853:c1:m40"}
{"signature": "def __iadd__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__add__(y) <==> x+=y", "id": "f4853:c1:m54"}
{"signature": "def _tosubs(self, ixlist):", "body": "n = len(ixlist)<EOL>N = self.shape[self._distaxis]<EOL>ss = []<EOL>ms = []<EOL>if n == <NUM_LIT:0>:<EOL><INDENT>return ss, ms<EOL><DEDENT>j = <NUM_LIT:0> <EOL>ix = ixlist[j]<EOL>if ix >= N or ix < -N:<EOL><INDENT>raise IndexError(<EOL>'<STR_LIT>',<EOL>ix, self._distaxis, N)<EOL><DEDENT>if ix < <NUM_LIT:0>:<EOL><INDENT>ix += N<EOL><DEDENT>while j < n:<EOL><INDENT>for s in range(<NUM_LIT:0>, self._n):<EOL><INDENT>low = self._si[s]<EOL>high = self._si[s + <NUM_LIT:1>]<EOL>if ix >= low and ix < high:<EOL><INDENT>ss.append(s)<EOL>msj = [ix - low]<EOL>j += <NUM_LIT:1><EOL>while j < n:<EOL><INDENT>ix = ixlist[j]<EOL>if ix >= N or ix < -N:<EOL><INDENT>raise IndexError(<EOL>'<STR_LIT>',<EOL>ix, self._distaxis, N)<EOL><DEDENT>if ix < <NUM_LIT:0>:<EOL><INDENT>ix += N<EOL><DEDENT>if ix < low or ix >= high:<EOL><INDENT>break<EOL><DEDENT>msj.append(ix - low)<EOL>j += <NUM_LIT:1><EOL><DEDENT>ms.append(msj)<EOL><DEDENT>if ix < low:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return ss, ms<EOL>", "docstring": "Maps a list of integer indices of the DistArray to subarray indices.\n        ixlist can contain repeated indices and does not need to be sorted.\n        Returns pair (ss, ms) where ss is a list of subarrays and ms is a\n        list of lists of subindices m (one list for each subarray s in ss).", "id": "f4853:c1:m14"}
{"signature": "@staticmethod<EOL><INDENT>def _valid_distaxis(shapes, ax):<DEDENT>", "body": "compare_shapes = np.vstack(shapes)<EOL>if ax < compare_shapes.shape[<NUM_LIT:1>]:<EOL><INDENT>compare_shapes[:, ax] = -<NUM_LIT:1><EOL><DEDENT>return np.count_nonzero(compare_shapes - compare_shapes[<NUM_LIT:0>]) == <NUM_LIT:0><EOL>", "docstring": "`ax` is a valid candidate for a distributed axis if the given\n        subarray shapes are all the same when ignoring axis `ax`", "id": "f4853:c1:m20"}
{"signature": "def __imod__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__mod__(y) <==> x%=y", "id": "f4853:c0:m42"}
{"signature": "def __rand__(self, other):", "body": "return np.bitwise_and(other, self)<EOL>", "docstring": "x.__rand__(y) <==> y&x", "id": "f4853:c0:m29"}
{"signature": "def __rfloordiv__(self, other):", "body": "return np.floor_divide(other, self)<EOL>", "docstring": "x.__rfloordiv__(y) <==> y//x", "id": "f4853:c1:m33"}
{"signature": "def __rdiv__(self, other):", "body": "return np.divide(other, self)<EOL>", "docstring": "x.__rdiv__(y) <==> y/x", "id": "f4853:c1:m51"}
{"signature": "def __ipow__(self, other, modulo=None):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__pow__(y) <==> x**=y", "id": "f4853:c1:m59"}
{"signature": "def mean(self, axis=None, dtype=None, out=None, keepdims=False):", "body": "if axis == -<NUM_LIT:1>:<EOL><INDENT>axis = self.ndim<EOL><DEDENT>if axis is None:<EOL><INDENT>results = vectorize(mean)(self, axis, dtype, keepdims=False)<EOL>weights = self._sublengths<EOL>res = np.average(results, axis=None, weights=weights)<EOL>if keepdims:<EOL><INDENT>for i in range(self.ndim):<EOL><INDENT>res = expand_dims(res, res.ndim)<EOL><DEDENT><DEDENT><DEDENT>elif axis == self._distaxis:<EOL><INDENT>results = vectorize(mean)(self, axis, dtype, keepdims=True)<EOL>results = gather(results)<EOL>weights = (np.array(self._sublengths, dtype=np.float64) /<EOL>sum(self._sublengths))<EOL>ix = [slice(None)] * self.ndim<EOL>ix[axis] = <NUM_LIT:0><EOL>res = results[ix] * weights[<NUM_LIT:0>]<EOL>for i in range(<NUM_LIT:1>, self._n):<EOL><INDENT>ix[axis] = i<EOL>res = res + results[ix] * weights[i]<EOL><DEDENT>if keepdims:<EOL><INDENT>res = expand_dims(res, axis)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>res = vectorize(mean)(self, axis, dtype, keepdims=False)<EOL>if keepdims:<EOL><INDENT>res = expand_dims(res, axis)<EOL><DEDENT><DEDENT>if out is not None:<EOL><INDENT>out[:] = res<EOL><DEDENT>return res<EOL>", "docstring": "Compute the arithmetic mean along the specified axis.\n        See np.mean() for details.", "id": "f4853:c1:m25"}
{"signature": "def __imul__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__mul__(y) <==> x*=y", "id": "f4853:c1:m56"}
{"signature": "def _broadcast_shape(*args):", "body": "<EOL>shapes = [a.shape if hasattr(type(a), '<STR_LIT>')<EOL>else () for a in args]<EOL>ndim = max(len(sh) for sh in shapes) <EOL>for i, sh in enumerate(shapes):<EOL><INDENT>if len(sh) < ndim:<EOL><INDENT>shapes[i] = (<NUM_LIT:1>,)*(ndim - len(sh)) + sh<EOL><DEDENT><DEDENT>return tuple(max(sh[ax] for sh in shapes) for ax in range(ndim))<EOL>", "docstring": "Return the shape that would result from broadcasting the inputs", "id": "f4853:m18"}
{"signature": "def __ixor__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__xor__(y) <==> x^=y", "id": "f4853:c0:m47"}
{"signature": "def __irshift__(self, other):", "body": "raise Error('<STR_LIT>')<EOL>", "docstring": "x.__rshift__(y) <==> x>>=y", "id": "f4853:c0:m45"}
{"signature": "def transpose(self, *axes):", "body": "if axes is ():<EOL><INDENT>axes = None<EOL><DEDENT>return transpose(self, axes)<EOL>", "docstring": "Returns a view of the array with axes transposed.\n\n        For a 1-D array, this has no effect.\n        For a 2-D array, this is the usual matrix transpose.\n        For an n-D array, if axes are given, their order indicates how the\n        axes are permuted\n\n        Args:\n          a (array_like): Input array.\n          axes (tuple of int, optional): By default, reverse the dimensions, \n            otherwise permute the axes according to values given.", "id": "f4853:c1:m19"}
{"signature": "def __div__(self, other):", "body": "return np.divide(self, other)<EOL>", "docstring": "x.__div__(y) <==> x/y", "id": "f4853:c1:m50"}
{"signature": "def __setitem__(self, index, value):", "body": "raise Error(u'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>", "docstring": "Assign to the sliced item", "id": "f4853:c1:m18"}
{"signature": "def apply(f, obj, *args, **kwargs):", "body": "return vectorize(f)(obj, *args, **kwargs)<EOL>", "docstring": "Apply a function in parallel to each element of the input", "id": "f4855:m22"}
{"signature": "def _process_args(args, kwargs, prefer_local=True, recurse=True):", "body": "this_engine = distob.engine.eid<EOL>local_args = []<EOL>remote_args = []<EOL>execloc = this_engine  <EOL>for a in args:<EOL><INDENT>id = None<EOL>if isinstance(a, Remote):<EOL><INDENT>id = a._ref.id<EOL><DEDENT>elif isinstance(a, Ref):<EOL><INDENT>id = a.id<EOL><DEDENT>elif isinstance(a, Id):<EOL><INDENT>id = a<EOL><DEDENT>if id is not None:<EOL><INDENT>if id.engine is this_engine:<EOL><INDENT>local_args.append(distob.engine[id])<EOL>remote_args.append(distob.engine[id])<EOL><DEDENT>else:<EOL><INDENT>if (prefer_local and isinstance(a, Remote) and <EOL>a._obcache_current):<EOL><INDENT>local_args.append(a._obcache)<EOL>remote_args.append(id)<EOL><DEDENT>else:<EOL><INDENT>if execloc is not this_engine and id.engine is not execloc:<EOL><INDENT>raise DistobValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>execloc = id.engine<EOL>local_args.append(None)<EOL>remote_args.append(id)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif (isinstance(a, collections.Sequence) and<EOL>not isinstance(a, string_types) and recurse):<EOL><INDENT>eid, ls, _ = _process_args(a, {}, prefer_local, recurse=False)<EOL>if eid is not this_engine:<EOL><INDENT>if execloc is not this_engine and eid is not execloc:<EOL><INDENT>raise DistobValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>execloc = eid<EOL><DEDENT>local_args.append(ls)<EOL>remote_args.append(ls)<EOL><DEDENT>else:<EOL><INDENT>local_args.append(a)<EOL>remote_args.append(a)<EOL><DEDENT><DEDENT>local_kwargs = dict()<EOL>remote_kwargs = dict()<EOL>for k, a in kwargs.items():<EOL><INDENT>id = None<EOL>if isinstance(a, Remote):<EOL><INDENT>id = a._ref.id<EOL><DEDENT>elif isinstance(a, Ref):<EOL><INDENT>id = a.id<EOL><DEDENT>elif isinstance(a, Id):<EOL><INDENT>id = a<EOL><DEDENT>if id is not None:<EOL><INDENT>if id.engine is this_engine:<EOL><INDENT>local_kwargs[k] = distob.engine[id]<EOL>remote_kwargs[k] = distob.engine[id]<EOL><DEDENT>else:<EOL><INDENT>if (prefer_local and isinstance(a, Remote) and<EOL>a._obcache_current):<EOL><INDENT>local_kwargs[k] = a._obcache<EOL>remote_kwargs[k] = id<EOL><DEDENT>else:<EOL><INDENT>if execloc is not this_engine and id.engine is not execloc:<EOL><INDENT>raise DistobValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>execloc = id.engine<EOL>local_kwargs[k] = None<EOL>remote_kwargs[k] = id<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif (isinstance(a, collections.Sequence) and<EOL>not isinstance(a, string_types) and recurse):<EOL><INDENT>eid, ls, _ = _process_args(a, {}, prefer_local, recurse=False)<EOL>if eid is not this_engine:<EOL><INDENT>if execloc is not this_engine and eid is not execloc:<EOL><INDENT>raise DistobValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>execloc = eid<EOL><DEDENT>local_kwargs[k] = ls<EOL>remote_kwargs[k] = ls<EOL><DEDENT>else:<EOL><INDENT>local_kwargs[k] = a<EOL>remote_kwargs[k] = a<EOL><DEDENT><DEDENT>if execloc is this_engine:<EOL><INDENT>return execloc, tuple(local_args), local_kwargs<EOL><DEDENT>else:<EOL><INDENT>return execloc, tuple(remote_args), remote_kwargs<EOL><DEDENT>", "docstring": "Select local or remote execution and prepare arguments accordingly.\n    Assumes any remote args have already been moved to a common engine.\n\n    Local execution will be chosen if:\n    - all args are ordinary objects or Remote instances on the local engine; or\n    - the local cache of all remote args is current, and prefer_local is True.\n    Otherwise, remote execution will be chosen. \n\n    For remote execution, replaces any remote arg with its Id.\n    For local execution, replaces any remote arg with its locally cached object\n    Any arguments or kwargs that are Sequences will be recursed one level deep.\n\n    Args:\n      args (list)\n      kwargs (dict)\n      prefer_local (bool, optional): Whether cached local results are prefered\n        if available, instead of returning Remote objects. Default is True.", "id": "f4855:m3"}
{"signature": "def _remote_setup_engine(engine_id, nengines):", "body": "if distob.engine is None:<EOL><INDENT>distob.engine = distob.ObjectEngine(engine_id, nengines)<EOL><DEDENT>import numpy as np<EOL>from scipy import stats<EOL>import __main__<EOL>__main__.__dict__['<STR_LIT>'] = np<EOL>__main__.__dict__['<STR_LIT>'] = stats<EOL>", "docstring": "(Executed on remote engine) creates an ObjectEngine instance", "id": "f4855:m1"}
{"signature": "def _ars_to_proxies(ars):", "body": "if (isinstance(ars, Remote) or<EOL>isinstance(ars, numbers.Number) or<EOL>ars is None):<EOL><INDENT>return ars<EOL><DEDENT>elif isinstance(ars, collections.Sequence):<EOL><INDENT>res = []<EOL>for i in range(len(ars)):<EOL><INDENT>res.append(_ars_to_proxies(ars[i]))<EOL><DEDENT>return res<EOL><DEDENT>elif isinstance(ars, ipyparallel.AsyncResult):<EOL><INDENT>ref = ars.r<EOL>ObClass = ref.type<EOL>if ObClass in distob.engine.proxy_types:<EOL><INDENT>RemoteClass = distob.engine.proxy_types[ObClass]<EOL><DEDENT>else:<EOL><INDENT>RemoteClass = type(<EOL>'<STR_LIT>' + ObClass.__name__, (Remote, ObClass), dict())<EOL>RemoteClass = proxy_methods(ObClass)(RemoteClass)<EOL><DEDENT>proxy_obj = RemoteClass(ref)<EOL>return proxy_obj<EOL><DEDENT>else:<EOL><INDENT>raise DistobTypeError('<STR_LIT>' % type(ars))<EOL><DEDENT>", "docstring": "wait for async results and return proxy objects\n    Args: \n      ars: AsyncResult (or sequence of AsyncResults), each result type ``Ref``.\n    Returns:\n      Remote* proxy object (or list of them)", "id": "f4855:m16"}
{"signature": "def vectorize(f):", "body": "def vf(obj, *args, **kwargs):<EOL><INDENT>if hasattr(obj, '<STR_LIT>'):<EOL><INDENT>return obj.__distob_vectorize__(f)(obj, *args, **kwargs)<EOL><DEDENT>if isinstance(obj, Remote):<EOL><INDENT>return call(f, obj, *args, **kwargs)<EOL><DEDENT>elif distob._have_numpy and (isinstance(obj, np.ndarray) or<EOL>hasattr(type(obj), '<STR_LIT>')):<EOL><INDENT>distarray = scatter(obj, axis=-<NUM_LIT:1>)<EOL>return vf(distarray, *args, **kwargs)<EOL><DEDENT>elif isinstance(obj, collections.Sequence):<EOL><INDENT>inputs = scatter(obj)<EOL>dv = distob.engine._client[:]<EOL>kwargs = kwargs.copy()<EOL>kwargs['<STR_LIT>'] = False<EOL>results = []<EOL>for obj in inputs:<EOL><INDENT>results.append(call(f, obj, *args, **kwargs))<EOL><DEDENT>for i in range(len(results)):<EOL><INDENT>results[i] = convert_result(results[i])<EOL><DEDENT>return results<EOL><DEDENT><DEDENT>if hasattr(f, '<STR_LIT>'):<EOL><INDENT>vf.__name__ = '<STR_LIT:v>' + f.__name__<EOL>f_str = f.__name__ + '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>f_str = '<STR_LIT>'<EOL><DEDENT>doc = u\"\"\"<STR_LIT>\"\"\" % (f_str, f_str)<EOL>if hasattr(f, '<STR_LIT>') and f.__doc__ is not None:<EOL><INDENT>doc = doc.rstrip() + ('<STR_LIT>' + f.__doc__)<EOL><DEDENT>vf.__doc__ = doc<EOL>return vf<EOL>", "docstring": "Upgrade normal function f to act in parallel on distibuted lists/arrays\n\n    Args:\n      f (callable): an ordinary function which expects as its first argument a\n        single object, or a numpy array of N dimensions.\n\n    Returns:\n      vf (callable): new function that takes as its first argument a list of\n        objects, or a array of N+1 dimensions. ``vf()`` will do the\n        computation ``f()`` on each part of the input in parallel and will\n        return a list of results, or a distributed array of results.", "id": "f4855:m21"}
{"signature": "def proxy_methods(base, include_underscore=None, exclude=None, supers=True):", "body": "always_exclude = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>')<EOL>if isinstance(include_underscore, str):<EOL><INDENT>include_underscore = (include_underscore,)<EOL><DEDENT>if isinstance(exclude, str):<EOL><INDENT>exclude = (exclude,)<EOL><DEDENT>if not include_underscore:<EOL><INDENT>include_underscore = ()<EOL><DEDENT>if not exclude:<EOL><INDENT>exclude = ()<EOL><DEDENT>def rebuild_class(cls):<EOL><INDENT>bases_other = list(cls.__bases__)<EOL>if bases_other[-<NUM_LIT:1>] is object:<EOL><INDENT>bases_other.pop()<EOL><DEDENT>if base in bases_other:<EOL><INDENT>bases_other = bases_other[:bases_other.index(base)]<EOL><DEDENT>if not issubclass(cls.__bases__[<NUM_LIT:0>], Remote):<EOL><INDENT>raise DistobTypeError('<STR_LIT>')<EOL><DEDENT>if not issubclass(base, object):<EOL><INDENT>raise DistobTypeError('<STR_LIT>')<EOL><DEDENT>dct = cls.__dict__.copy()<EOL>if cls.__doc__ is None or '<STR_LIT:\\n>' not in cls.__doc__:<EOL><INDENT>base_doc = base.__doc__<EOL>if base_doc is None:<EOL><INDENT>base_doc = '<STR_LIT>'<EOL><DEDENT>dct['<STR_LIT>'] = \"\"\"<STR_LIT>\"\"\" % ((base.__name__,)*<NUM_LIT:3>) + base_doc<EOL><DEDENT>newcls = type(cls.__name__, cls.__bases__, dct)<EOL>newcls._include_underscore = include_underscore<EOL>newcls._exclude = exclude<EOL>if supers:<EOL><INDENT>proxied_classes = base.__mro__[:-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>proxied_classes = (base,)<EOL><DEDENT>for c in proxied_classes:<EOL><INDENT>for name in c.__dict__:<EOL><INDENT>if (name not in newcls.__dict__ and<EOL>all(name not in b.__dict__ <EOL>for c in bases_other for b in c.mro()[:-<NUM_LIT:1>]) and<EOL>name not in newcls._exclude and<EOL>name not in always_exclude and<EOL>(name[<NUM_LIT:0>] != '<STR_LIT:_>' or <EOL>newcls._include_underscore is True or<EOL>name in newcls._include_underscore)):<EOL><INDENT>f = c.__dict__[name]<EOL>if hasattr(f, '<STR_LIT>'):<EOL><INDENT>doc = f.__doc__<EOL><DEDENT>else:<EOL><INDENT>doc = None<EOL><DEDENT>if callable(f) and not isinstance(f, type):<EOL><INDENT>setattr(newcls, name, _make_proxy_method(name, doc))<EOL><DEDENT>else:<EOL><INDENT>setattr(newcls, name, _make_proxy_property(name, doc))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>newcls.__module__ = '<STR_LIT:__main__>' <EOL>import __main__<EOL>__main__.__dict__[newcls.__name__] = newcls <EOL>ObjectHub.register_proxy_type(base, newcls)<EOL>return newcls<EOL><DEDENT>return rebuild_class<EOL>", "docstring": "class decorator. Modifies `Remote` subclasses to add proxy methods and\n    attributes that mimic those defined in class `base`.\n\n    Example:\n\n      @proxy_methods(Tree)\n      class RemoteTree(Remote, Tree)\n\n    The decorator registers the new proxy class and specifies which methods\n    and attributes of class `base` should be proxied via a remote call to\n    a real object, and which methods/attributes should not be proxied but\n    instead called directly on the instance of the proxy class.\n\n    By default all methods and attributes of the class `base` will be\n    proxied except those starting with an underscore.\n\n    The MRO of the decorated class is respected:\n    Any methods and attributes defined in the decorated class\n    (or in other bases of the decorated class that do not come after `base`\n     in its MRO) will override those added by this decorator,\n    so that `base` is treated like a base class.\n\n    Args:\n      base (type): The class whose instances should be remotely controlled.\n      include_underscore (bool or sequence of str): Should methods or\n        attributes that start with an underscore be proxied anyway? If a\n        sequence of names is provided then methods or attributes starting with\n        an underscore will only be proxied if their names are in the sequence.\n      exclude (sequence of str): Names of any methods or attributes that \n        should not be proxied.\n      supers (bool): Proxy methods and attributes defined in superclasses \n        of ``base``, in addition to those defined directly in class ``base``", "id": "f4855:m14"}
{"signature": "def __ob(self):", "body": "self._fetch()<EOL>return self._obcache<EOL>", "docstring": "return a local copy of the real object", "id": "f4855:c7:m2"}
{"signature": "def call_all(sequence, method_name, *args, **kwargs):", "body": "kwargs = kwargs.copy()<EOL>kwargs['<STR_LIT>'] = False<EOL>results = []<EOL>for obj in sequence:<EOL><INDENT>results.append(methodcall(obj, method_name, *args, **kwargs))<EOL><DEDENT>for i in range(len(results)):<EOL><INDENT>results[i] = convert_result(results[i])<EOL><DEDENT>return results<EOL>", "docstring": "Call a method on each element of a sequence, in parallel.\n    Returns:\n      list of results", "id": "f4855:m23"}
{"signature": "def _scatter_ndarray(ar, axis=-<NUM_LIT:1>, destination=None, blocksize=None):", "body": "from .arrays import DistArray, RemoteArray<EOL>shape = ar.shape<EOL>ndim = len(shape)<EOL>if axis is None:<EOL><INDENT>return _directed_scatter([ar], destination=[destination],<EOL>blocksize=blocksize)[<NUM_LIT:0>]<EOL><DEDENT>if axis < -ndim or axis > ndim - <NUM_LIT:1>:<EOL><INDENT>raise DistobValueError('<STR_LIT>')<EOL><DEDENT>if axis < <NUM_LIT:0>:<EOL><INDENT>axis = ndim + axis<EOL><DEDENT>n = shape[axis]<EOL>if n == <NUM_LIT:1>:<EOL><INDENT>return _directed_scatter([ar], destination=[destination])[<NUM_LIT:0>]<EOL><DEDENT>if isinstance(destination, collections.Sequence):<EOL><INDENT>ne = len(destination) <EOL><DEDENT>else:<EOL><INDENT>if distob.engine is None:<EOL><INDENT>setup_engines()<EOL><DEDENT>ne = distob.engine.nengines <EOL><DEDENT>if blocksize is None:<EOL><INDENT>blocksize = ((n - <NUM_LIT:1>) // ne) + <NUM_LIT:1><EOL><DEDENT>if blocksize > n:<EOL><INDENT>blocksize = n<EOL><DEDENT>if isinstance(ar, DistArray):<EOL><INDENT>if axis == ar._distaxis:<EOL><INDENT>return ar<EOL><DEDENT>else:<EOL><INDENT>raise DistobError('<STR_LIT>')<EOL><DEDENT><DEDENT>if isinstance(ar, RemoteArray) and n > blocksize:<EOL><INDENT>ar = ar._ob<EOL><DEDENT>s = slice(None)<EOL>subarrays = []<EOL>low = <NUM_LIT:0><EOL>for i in range(<NUM_LIT:0>, n // blocksize):<EOL><INDENT>high = low + blocksize<EOL>index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - <NUM_LIT:1>)<EOL>subarrays.append(ar[index])<EOL>low += blocksize<EOL><DEDENT>if n % blocksize != <NUM_LIT:0>:<EOL><INDENT>high = low + (n % blocksize)<EOL>index = (s,)*axis + (slice(low, high),) + (s,)*(ndim - axis - <NUM_LIT:1>)<EOL>subarrays.append(ar[index])<EOL><DEDENT>subarrays = _directed_scatter(subarrays, destination=destination)<EOL>return DistArray(subarrays, axis)<EOL>", "docstring": "Turn a numpy ndarray into a DistArray or RemoteArray\n    Args:\n     ar (array_like)\n     axis (int, optional): specifies along which axis to split the array to \n       distribute it. The default is to split along the last axis. `None` means\n       do not distribute.\n     destination (int or list of int, optional): Optionally force the array to\n       go to a specific engine. If an array is to be scattered along an axis, \n       this should be a list of engine ids with the same length as that axis.\n     blocksize (int): Optionally control the size of intervals into which the\n       distributed axis is split (the default splits the distributed axis\n       evenly over all computing engines).", "id": "f4855:m17"}
{"signature": "def convert_result(r):", "body": "if (isinstance(r, collections.Sequence) and<EOL>not isinstance(r, string_types)):<EOL><INDENT>rs = []<EOL>for subresult in r:<EOL><INDENT>rs.append(convert_result(subresult))<EOL><DEDENT>return rs<EOL><DEDENT>if isinstance(r, ipyparallel.AsyncResult):<EOL><INDENT>r = r.r<EOL><DEDENT>if isinstance(r, Ref):<EOL><INDENT>RemoteClass = distob.engine.proxy_types[r.type]<EOL>r = RemoteClass(r)<EOL><DEDENT>return r<EOL>", "docstring": "Waits for and converts any AsyncResults. Converts any Ref into a Remote.\n    Args:\n      r: can be an ordinary object, ipyparallel.AsyncResult, a Ref, or a\n        Sequence of objects, AsyncResults and Refs.\n    Returns: \n      either an ordinary object or a Remote instance", "id": "f4855:m7"}
{"signature": "def _remote_call(f, *args, **kwargs):", "body": "nargs = []<EOL>for a in args:<EOL><INDENT>if isinstance(a, Id):<EOL><INDENT>nargs.append(distob.engine[a])<EOL><DEDENT>elif (isinstance(a, collections.Sequence) and<EOL>not isinstance(a, string_types)):<EOL><INDENT>nargs.append(<EOL>[distob.engine[b] if isinstance(b, Id) else b for b in a])<EOL><DEDENT>else: nargs.append(a)<EOL><DEDENT>for k, a in kwargs.items():<EOL><INDENT>if isinstance(a, Id):<EOL><INDENT>kwargs[k] = distob.engine[a]<EOL><DEDENT>elif (isinstance(a, collections.Sequence) and<EOL>not isinstance(a, string_types)):<EOL><INDENT>kwargs[k] = [<EOL>distob.engine[b] if isinstance(b, Id) else b for b in a]<EOL><DEDENT><DEDENT>result = f(*nargs, **kwargs)<EOL>if (isinstance(result, collections.Sequence) and<EOL>not isinstance(result, string_types)):<EOL><INDENT>results = []<EOL>for subresult in result:<EOL><INDENT>if type(subresult) in distob.engine.proxy_types: <EOL><INDENT>results.append(Ref(subresult))<EOL><DEDENT>else:<EOL><INDENT>results.append(subresult)<EOL><DEDENT><DEDENT>return results<EOL><DEDENT>elif type(result) in distob.engine.proxy_types:<EOL><INDENT>return Ref(result)<EOL><DEDENT>else:<EOL><INDENT>return result<EOL><DEDENT>", "docstring": "(Executed on remote engine) convert Ids to real objects, call f", "id": "f4855:m4"}
{"signature": "def clone(src, dst_path, skip_globals, skip_dimensions, skip_variables):", "body": "if os.path.exists(dst_path):<EOL><INDENT>os.unlink(dst_path)<EOL><DEDENT>dst = netCDF4.Dataset(dst_path, '<STR_LIT:w>')<EOL>for attname in src.ncattrs():<EOL><INDENT>if attname not in skip_globals:<EOL><INDENT>setattr(dst, attname, getattr(src, attname))<EOL><DEDENT><DEDENT>unlimdim     = None<EOL>unlimdimname = False<EOL>for dimname, dim in src.dimensions.items():<EOL><INDENT>if dimname in skip_dimensions:<EOL><INDENT>continue<EOL><DEDENT>if dim.isunlimited():<EOL><INDENT>unlimdim     = dim<EOL>unlimdimname = dimname<EOL>dst.createDimension(dimname, None)<EOL><DEDENT>else:<EOL><INDENT>dst.createDimension(dimname, len(dim))<EOL><DEDENT><DEDENT>for varname, ncvar in src.variables.items():<EOL><INDENT>if varname in skip_variables:<EOL><INDENT>continue<EOL><DEDENT>hasunlimdim = False<EOL>if unlimdimname and unlimdimname in ncvar.dimensions:<EOL><INDENT>hasunlimdim = True<EOL><DEDENT>filler = None<EOL>if hasattr(ncvar, '<STR_LIT>'):<EOL><INDENT>filler = ncvar._FillValue<EOL><DEDENT>if ncvar.chunking == \"<STR_LIT>\":<EOL><INDENT>var = dst.createVariable(varname, ncvar.dtype, ncvar.dimensions, fill_value=filler)<EOL><DEDENT>else:<EOL><INDENT>var = dst.createVariable(varname, ncvar.dtype, ncvar.dimensions, fill_value=filler, chunksizes=ncvar.chunking())<EOL><DEDENT>for attname in ncvar.ncattrs():<EOL><INDENT>if attname == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>setattr(var, attname, getattr(ncvar, attname))<EOL><DEDENT><DEDENT>nchunk = <NUM_LIT:1000><EOL>if hasunlimdim:<EOL><INDENT>if nchunk:<EOL><INDENT>start = <NUM_LIT:0><EOL>stop = len(unlimdim)<EOL>step = nchunk<EOL>if step < <NUM_LIT:1>:<EOL><INDENT>step = <NUM_LIT:1><EOL><DEDENT>for n in range(start, stop, step):<EOL><INDENT>nmax = n + nchunk<EOL>if nmax > len(unlimdim):<EOL><INDENT>nmax = len(unlimdim)<EOL><DEDENT>idata = ncvar[n:nmax]<EOL>var[n:nmax] = idata<EOL><DEDENT><DEDENT>else:<EOL><INDENT>idata = ncvar[:]<EOL>var[<NUM_LIT:0>:len(unlimdim)] = idata<EOL><DEDENT><DEDENT>else:<EOL><INDENT>idata = ncvar[:]<EOL>var[:] = idata<EOL><DEDENT>dst.sync()<EOL><DEDENT>src.close()<EOL>dst.close()<EOL>", "docstring": "Mostly ripped from nc3tonc4 in netCDF4-python.\nAdded ability to skip dimension and variables.\nRemoved all of the unpacking logic for shorts.", "id": "f4869:m0"}
{"signature": "@classmethod<EOL><INDENT>def combine(self, members, output_file, dimension=None, start_index=None, stop_index=None, stride=None):<DEDENT>", "body": "nco = None<EOL>try:<EOL><INDENT>nco = Nco()<EOL><DEDENT>except BaseException:<EOL><INDENT>raise ImportError(\"<STR_LIT>\")<EOL><DEDENT>if len(members) > <NUM_LIT:0> and hasattr(members[<NUM_LIT:0>], '<STR_LIT:path>'):<EOL><INDENT>members = [ m.path for m in members ]<EOL><DEDENT>options  = ['<STR_LIT>']  <EOL>options += ['<STR_LIT>', '<STR_LIT:3>']  <EOL>options += ['<STR_LIT>']  <EOL>if dimension is not None:<EOL><INDENT>if start_index is None:<EOL><INDENT>start_index = <NUM_LIT:0><EOL><DEDENT>if stop_index is None:<EOL><INDENT>stop_index = '<STR_LIT>'<EOL><DEDENT>if stride is None:<EOL><INDENT>stride = <NUM_LIT:1><EOL><DEDENT>options += ['<STR_LIT>', '<STR_LIT>'.format(dimension, start_index, stop_index, stride)]<EOL><DEDENT>nco.ncrcat(input=members, output=output_file, options=options)<EOL>", "docstring": "Combine many files into a single file on disk.  Defaults to using the 'time' dimension.", "id": "f4872:c0:m3"}
{"signature": "def get_variables_by_attributes(self, **kwargs):", "body": "vs = []<EOL>has_value_flag  = False<EOL>for vname in self.variables:<EOL><INDENT>var = self.variables[vname]<EOL>for k, v in kwargs.items():<EOL><INDENT>if callable(v):<EOL><INDENT>has_value_flag = v(getattr(var, k, None))<EOL>if has_value_flag is False:<EOL><INDENT>break<EOL><DEDENT><DEDENT>elif hasattr(var, k) and getattr(var, k) == v:<EOL><INDENT>has_value_flag = True<EOL><DEDENT>else:<EOL><INDENT>has_value_flag = False<EOL>break<EOL><DEDENT><DEDENT>if has_value_flag is True:<EOL><INDENT>vs.append(self.variables[vname])<EOL><DEDENT><DEDENT>return vs<EOL>", "docstring": "Returns variables that match specific conditions.\n\n        * Can pass in key=value parameters and variables are returned that\n        contain all of the matches.  For example,\n\n        >>> # Get variables with x-axis attribute.\n        >>> vs = nc.get_variables_by_attributes(axis='X')\n        >>> # Get variables with matching \"standard_name\" attribute.\n        >>> nc.get_variables_by_attributes(standard_name='northward_sea_water_velocity')\n\n        * Can pass in key=callable parameter and variables are returned if the\n        callable returns True.  The callable should accept a single parameter,\n        the attribute value.  None is given as the attribute value when the\n        attribute does not exist on the variable. For example,\n\n        >>> # Get Axis variables.\n        >>> vs = nc.get_variables_by_attributes(axis=lambda v: v in ['X', 'Y', 'Z', 'T'])\n        >>> # Get variables that don't have an \"axis\" attribute.\n        >>> vs = nc.get_variables_by_attributes(axis=lambda v: v is None)\n        >>> # Get variables that have a \"grid_mapping\" attribute.\n        >>> vs = nc.get_variables_by_attributes(grid_mapping=lambda v: v is not None)", "id": "f4874:c0:m0"}
{"signature": "def all_subclasses(cls):", "body": "for subclass in cls.__subclasses__():<EOL><INDENT>yield subclass<EOL>for subc in all_subclasses(subclass):<EOL><INDENT>yield subc<EOL><DEDENT><DEDENT>", "docstring": "Recursively generate of all the subclasses of class cls.", "id": "f4895:m0"}
{"signature": "def generic_masked(arr, attrs=None, minv=None, maxv=None, mask_nan=True):", "body": "attrs = attrs or {}<EOL>if '<STR_LIT>' in attrs:<EOL><INDENT>minv = safe_attribute_typing(arr.dtype, attrs['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in attrs:<EOL><INDENT>maxv = safe_attribute_typing(arr.dtype, attrs['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in attrs:<EOL><INDENT>vr = attrs['<STR_LIT>']<EOL>minv = safe_attribute_typing(arr.dtype, vr[<NUM_LIT:0>])<EOL>maxv = safe_attribute_typing(arr.dtype, vr[<NUM_LIT:1>])<EOL><DEDENT>try:<EOL><INDENT>info = np.iinfo(arr.dtype)<EOL><DEDENT>except ValueError:<EOL><INDENT>info = np.finfo(arr.dtype)<EOL><DEDENT>minv = minv if minv is not None else info.min<EOL>maxv = maxv if maxv is not None else info.max<EOL>if mask_nan is True:<EOL><INDENT>arr = np.ma.fix_invalid(arr)<EOL><DEDENT>return np.ma.masked_outside(<EOL>arr,<EOL>minv,<EOL>maxv<EOL>)<EOL>", "docstring": "Returns a masked array with anything outside of values masked.\nThe minv and maxv parameters take precendence over any dict values.\nThe valid_range attribute takes precendence over the valid_min and\nvalid_max attributes.", "id": "f4895:m4"}
{"signature": "def default(self, obj):", "body": "if isinstance(obj, np.ndarray):<EOL><INDENT>return obj.tolist()<EOL><DEDENT>elif isinstance(obj, np.generic):<EOL><INDENT>return np.asscalar(obj)<EOL><DEDENT>return json.JSONEncoder(self, obj)<EOL>", "docstring": "If input object is an ndarray it will be converted into a list", "id": "f4895:c1:m0"}
{"signature": "def normalize_array(var):", "body": "if np.issubdtype(var.dtype, '<STR_LIT>'):<EOL><INDENT>if var.dtype == str:<EOL><INDENT>return var[:]<EOL><DEDENT>def decoder(x):<EOL><INDENT>return str(x.decode('<STR_LIT:utf-8>'))<EOL><DEDENT>vfunc = np.vectorize(decoder)<EOL>return vfunc(nc4.chartostring(var[:]))<EOL><DEDENT>else:<EOL><INDENT>return var[:]<EOL><DEDENT>", "docstring": "Returns a normalized data array from a NetCDF4 variable. This is mostly\nused to normalize string types between py2 and py3. It has no effect on types\nother than chars/strings", "id": "f4895:m2"}
{"signature": "def unique_justseen(iterable, key=None):", "body": "<EOL>try:<EOL><INDENT>from itertools import imap as map<EOL><DEDENT>except ImportError:<EOL><INDENT>from builtins import map<EOL><DEDENT>return map(next, map(operator.itemgetter(<NUM_LIT:1>), itertools.groupby(iterable, key)))<EOL>", "docstring": "List unique elements, preserving order. Remember only the element just seen.", "id": "f4895:m1"}
{"signature": "def receive(self, length):", "body": "<EOL>slipDriver = sliplib.Driver()<EOL>ret = self._serialPort.read(length)<EOL>temp = slipDriver.receive(ret)<EOL>return iter(temp)<EOL>", "docstring": "Reads in data from a serial port (length bytes), decodes SLIP packets\n\n        A function which reads from the serial port and then uses the SlipLib\n        module to decode the SLIP protocol packets. Each message received\n        is added to a receive buffer in SlipLib which is then returned.\n\n        Args:\n            length (int): Length to receive with serialPort.read(length)\n\n        Returns:\n            bytes: An iterator of the receive buffer", "id": "f4902:c0:m2"}
{"signature": "def __init__(self):", "body": "self._port = \"<STR_LIT>\"<EOL>self._timeout = <NUM_LIT:0><EOL>self._baudrate = <NUM_LIT><EOL>self.serialPort =serial.serial_for_url(url=self._port,<EOL>timeout=self._timeout,<EOL>baudrate=self._baudrate)<EOL>", "docstring": "Creates a mock serial port which is a loopback object", "id": "f4902:c3:m0"}
{"signature": "def isPortAvailable(port='<STR_LIT>'):", "body": "isPortAvailable = serial.tools.list_ports.grep(port)<EOL>try:<EOL><INDENT>next(isPortAvailable)<EOL>available = True<EOL><DEDENT>except StopIteration:<EOL><INDENT>available = False<EOL><DEDENT>return available<EOL>", "docstring": "Checks whether specified port is available.\n\nSource code derived from @lqdev suggestion per #38\n\nArgs:\n    port: Serial port location i.e. 'COM1'. Default is /dev/ttyUSB0\n\nReturns:\n    available: Boolean value indicating presence of port", "id": "f4902:c3:m1"}
{"signature": "def rxSerial(self, length):", "body": "return(self._faraday.receive(length))<EOL>", "docstring": "Checks the serial port for data and returns any that is found.\n\nArgs:\n    length: Number of bytes to read from serial port\n\nReturns:\n    data: Data received from serial port", "id": "f4902:c2:m3"}
{"signature": "def run(self):", "body": "while self.isRunning.is_set():<EOL><INDENT>try:<EOL><INDENT>try:<EOL><INDENT>self.monitorTUN()<EOL><DEDENT>except timeout_decorator.TimeoutError as error:<EOL><INDENT>pass<EOL><DEDENT>self.checkSerial()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Wrapper function for TUN and serial port monitoring\n\nWraps the necessary functions to loop over until self._isRunning\nthreading.Event() is set(). This checks for data on the TUN/serial\ninterfaces and then sends data over the appropriate interface. This\nfunction is automatically run when Threading.start() is called on the\nMonitor class.", "id": "f4902:c2:m6"}
{"signature": "@timeout_decorator.timeout(<NUM_LIT:1>, use_signals=False)<EOL><INDENT>def checkTUN(self):<DEDENT>", "body": "packet = self._TUN._tun.read(self._TUN._tun.mtu)<EOL>return(packet)<EOL>", "docstring": "Checks the TUN adapter for data and returns any that is found.\n\nReturns:\n    packet: Data read from the TUN adapter", "id": "f4902:c2:m1"}
{"signature": "def send(self, msg):", "body": "<EOL>slipDriver = sliplib.Driver()<EOL>slipData = slipDriver.send(msg)<EOL>res = self._serialPort.write(slipData)<EOL>return res<EOL>", "docstring": "Encodes data to slip protocol and then sends over serial port\n\n        Uses the SlipLib module to convert the message data into SLIP format.\n        The message is then sent over the serial port opened with the instance\n        of the Faraday class used when invoking send().\n\n        Args:\n            msg (bytes): Bytes format message to send over serial port.\n\n        Returns:\n            int: Number of bytes transmitted over the serial port.", "id": "f4902:c0:m1"}
{"signature": "def toposort(data):", "body": "<EOL>if len(data) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>data = data.copy()<EOL>for k, v in data.items():<EOL><INDENT>v.discard(k)<EOL><DEDENT>extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys())<EOL>data.update({item:set() for item in extra_items_in_deps})<EOL>while True:<EOL><INDENT>ordered = set(item for item, dep in data.items() if len(dep) == <NUM_LIT:0>)<EOL>if not ordered:<EOL><INDENT>break<EOL><DEDENT>yield ordered<EOL>data = {item: (dep - ordered)<EOL>for item, dep in data.items()<EOL>if item not in ordered}<EOL><DEDENT>if len(data) != <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>'.format('<STR_LIT:U+002CU+0020>'.join(repr(x) for x in data.items())))<EOL><DEDENT>", "docstring": "Dependencies are expressed as a dictionary whose keys are items\nand whose values are a set of dependent items. Output is a list of\nsets in topological order. The first set consists of items with no\ndependences, each subsequent set consists of items that depend upon\nitems in the preceeding sets.", "id": "f4909:m0"}
{"signature": "def _timezone_format(value):", "body": "return timezone.make_aware(value, timezone.get_current_timezone()) if getattr(settings, '<STR_LIT>', False) else value<EOL>", "docstring": "Generates a timezone aware datetime if the 'USE_TZ' setting is enabled\n\n:param value: The datetime value\n:return: A locale aware datetime", "id": "f4910:m0"}
{"signature": "def guess_format(self, field):", "body": "faker = self.faker<EOL>provider = self.provider<EOL>if isinstance(field, DurationField): return lambda x: provider.duration()<EOL>if isinstance(field, UUIDField): return lambda x: provider.uuid()<EOL>if isinstance(field, BooleanField): return lambda x: faker.boolean()<EOL>if isinstance(field, NullBooleanField): return lambda x: faker.null_boolean()<EOL>if isinstance(field, PositiveSmallIntegerField): return lambda x: provider.rand_small_int(pos=True)<EOL>if isinstance(field, SmallIntegerField): return lambda x: provider.rand_small_int()<EOL>if isinstance(field, BigIntegerField): return lambda x: provider.rand_big_int()<EOL>if isinstance(field, PositiveIntegerField): return lambda x: provider.rand_small_int(pos=True)<EOL>if isinstance(field, IntegerField): return lambda x: provider.rand_small_int()<EOL>if isinstance(field, FloatField): return lambda x: provider.rand_float()<EOL>if isinstance(field, DecimalField): return lambda x: random.random()<EOL>if isinstance(field, URLField): return lambda x: faker.uri()<EOL>if isinstance(field, SlugField): return lambda x: faker.uri_page()<EOL>if isinstance(field, IPAddressField) or isinstance(field, GenericIPAddressField):<EOL><INDENT>protocol = random.choice(['<STR_LIT>','<STR_LIT>'])<EOL>return lambda x: getattr(faker, protocol)()<EOL><DEDENT>if isinstance(field, EmailField): return lambda x: faker.email()<EOL>if isinstance(field, CommaSeparatedIntegerField):<EOL><INDENT>return lambda x: provider.comma_sep_ints()<EOL><DEDENT>if isinstance(field, BinaryField): return lambda x: provider.binary()<EOL>if isinstance(field, ImageField): return lambda x: provider.file_name()<EOL>if isinstance(field, FilePathField): return lambda x: provider.file_name()<EOL>if isinstance(field, FileField): return lambda x: provider.file_name()<EOL>if isinstance(field, CharField):<EOL><INDENT>if field.choices:<EOL><INDENT>return lambda x: random.choice(field.choices)[<NUM_LIT:0>]<EOL><DEDENT>return lambda x: faker.text(field.max_length) if field.max_length >= <NUM_LIT:5> else faker.word()<EOL><DEDENT>if isinstance(field, TextField): return lambda x: faker.text()<EOL>if isinstance(field, DateTimeField):<EOL><INDENT>return lambda x: _timezone_format(faker.date_time())<EOL><DEDENT>if isinstance(field, DateField): return lambda x: faker.date()<EOL>if isinstance(field, TimeField): return lambda x: faker.time()<EOL>raise AttributeError(field)<EOL>", "docstring": "Returns the correct faker function based on the field type\n:param field:", "id": "f4910:c1:m1"}
{"signature": "@contextmanager<EOL>def django_setting(name, value):", "body": "original_value = getattr(settings, name)<EOL>setattr(settings, name, value)<EOL>try:<EOL><INDENT>yield<EOL><DEDENT>finally:<EOL><INDENT>setattr(settings, name, original_value)<EOL><DEDENT>", "docstring": "Generator that mutates the django.settings object during the context of a test run.\n\n:param name: The setting name to be affected\n:param value: The setting value to be defined during the execution\n:return:", "id": "f4913:m0"}
{"signature": "def execute(self, using, inserted_entities):", "body": "def format_field(format, inserted_entities):<EOL><INDENT>if callable(format):<EOL><INDENT>return format(inserted_entities)<EOL><DEDENT>return format<EOL><DEDENT>def turn_off_auto_add(model):<EOL><INDENT>for field in model._meta.fields:<EOL><INDENT>if getattr(field, '<STR_LIT>', False):<EOL><INDENT>field.auto_now = False<EOL><DEDENT>if getattr(field, '<STR_LIT>', False):<EOL><INDENT>field.auto_now_add = False<EOL><DEDENT><DEDENT><DEDENT>manager = self.model.objects.db_manager(using=using)<EOL>turn_off_auto_add(manager.model)<EOL>faker_data = {<EOL>field: format_field(field_format, inserted_entities)<EOL>for field, field_format in self.field_formatters.items()<EOL>}<EOL>for data_field in faker_data:<EOL><INDENT>field = self.model._meta.get_field(data_field)<EOL>if field.max_length and isinstance(faker_data[data_field], str):<EOL><INDENT>faker_data[data_field] = faker_data[data_field][:field.max_length]<EOL><DEDENT><DEDENT>obj = manager.create(**faker_data)<EOL>return obj.pk<EOL>", "docstring": "Execute the stages entities to insert\n:param using:\n:param inserted_entities:", "id": "f4914:c0:m3"}
{"signature": "def get_all_tags(self):", "body": "verbose = self.options.verbose<EOL>gh = self.github<EOL>user = self.options.user<EOL>repo = self.options.project<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>tags = []<EOL>page = <NUM_LIT:1><EOL>while page > <NUM_LIT:0>:<EOL><INDENT>if verbose > <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT:.>\", end=\"<STR_LIT>\")<EOL><DEDENT>rc, data = gh.repos[user][repo].tags.get(<EOL>page=page, per_page=PER_PAGE_NUMBER)<EOL>if rc == <NUM_LIT:200>:<EOL><INDENT>tags.extend(data)<EOL><DEDENT>else:<EOL><INDENT>self.raise_GitHubError(rc, data, gh.getheaders())<EOL><DEDENT>page = NextPage(gh)<EOL><DEDENT>if verbose > <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT:.>\")<EOL><DEDENT>if len(tags) == <NUM_LIT:0>:<EOL><INDENT>if not self.options.quiet:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>exit()<EOL><DEDENT><DEDENT>if verbose > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\".format(len(tags)))<EOL><DEDENT>return tags<EOL>", "docstring": "Fetch all tags for repository from Github.\n\n:return: tags in repository\n:rtype: list", "id": "f4919:c0:m2"}
{"signature": "def run(self):", "body": "if not self.options.project or not self.options.user:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if not self.options.quiet:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>log = None<EOL>try:<EOL><INDENT>log = self.generator.compound_changelog()<EOL><DEDENT>except ChangelogGeneratorError as err:<EOL><INDENT>print(\"<STR_LIT>\".format(err.args[<NUM_LIT:0>]))<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>if not log:<EOL><INDENT>if not self.options.quiet:<EOL><INDENT>print(\"<STR_LIT>\".format(<EOL>self.options.output)<EOL>)<EOL><DEDENT>return<EOL><DEDENT>if self.options.no_overwrite:<EOL><INDENT>out = checkname(self.options.output)<EOL><DEDENT>else:<EOL><INDENT>out = self.options.output<EOL><DEDENT>with codecs.open(out, \"<STR_LIT:w>\", \"<STR_LIT:utf-8>\") as fh:<EOL><INDENT>fh.write(log)<EOL><DEDENT>if not self.options.quiet:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\".format(out))<EOL><DEDENT>", "docstring": "The entry point of this script to generate change log\n'ChangelogGeneratorError' Is thrown when one\nof the specified tags was not found in list of tags.", "id": "f4922:c0:m1"}
{"signature": "def __init__(self, options=None):", "body": "self.options = OptionsParser(options).options<EOL>self.generator = Generator(self.options)<EOL>", "docstring": ":type options: list\n:param options: command line arguments", "id": "f4922:c0:m0"}
{"signature": "def parse(data):", "body": "sections = re.compile(\"<STR_LIT>\", re.MULTILINE).split(data)<EOL>headings = re.findall(\"<STR_LIT>\", data, re.MULTILINE)<EOL>sections.pop(<NUM_LIT:0>)<EOL>parsed = []<EOL>def func(h, s):<EOL><INDENT>p = parse_heading(h)<EOL>p[\"<STR_LIT:content>\"] = s<EOL>parsed.append(p)<EOL><DEDENT>list(map(func, headings, sections))<EOL>return parsed<EOL>", "docstring": "Parse the given ChangeLog data into a list of Hashes.\n\n@param [String] data File data from the ChangeLog.md\n@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...]", "id": "f4924:m1"}
{"signature": "@staticmethod<EOL><INDENT>def find_issues_to_add(all_issues, tag_name):<DEDENT>", "body": "filtered = []<EOL>for issue in all_issues:<EOL><INDENT>if issue.get(\"<STR_LIT>\"):<EOL><INDENT>if issue[\"<STR_LIT>\"][\"<STR_LIT:title>\"] == tag_name:<EOL><INDENT>iss = copy.deepcopy(issue)<EOL>filtered.append(iss)<EOL><DEDENT><DEDENT><DEDENT>return filtered<EOL>", "docstring": "Add all issues, that should be in that tag, according to milestone.\n\n:param list(dict) all_issues: All issues.\n:param str tag_name: Name (title) of tag.\n:rtype: List[dict]\n:return: Issues filtered by milestone.", "id": "f4926:c0:m27"}
{"signature": "def filter_by_labels(self, all_issues, kind):", "body": "filtered_issues = self.include_issues_by_labels(all_issues)<EOL>filtered = self.exclude_issues_by_labels(filtered_issues)<EOL>if self.options.verbose > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\".format(kind, len(filtered)))<EOL><DEDENT>return filtered<EOL>", "docstring": "Filter issues for include/exclude labels.\n\n:param list(dict) all_issues: All issues.\n:param str kind: Either \"issues\" or \"pull requests\".\n:rtype: list(dict)\n:return: Filtered issues.", "id": "f4926:c0:m33"}
{"signature": "def get_time_of_tag(self, tag):", "body": "if not tag:<EOL><INDENT>raise ChangelogGeneratorError(\"<STR_LIT>\")<EOL><DEDENT>name_of_tag = tag[\"<STR_LIT:name>\"]<EOL>time_for_name = self.tag_times_dict.get(name_of_tag, None)<EOL>if time_for_name:<EOL><INDENT>return time_for_name<EOL><DEDENT>else:<EOL><INDENT>time_string = self.fetcher.fetch_date_of_tag(tag)<EOL>try:<EOL><INDENT>self.tag_times_dict[name_of_tag] =timestring_to_datetime(time_string)<EOL><DEDENT>except UnicodeWarning:<EOL><INDENT>print(\"<STR_LIT>\", tag)<EOL>self.tag_times_dict[name_of_tag] =timestring_to_datetime(time_string)<EOL><DEDENT>return self.tag_times_dict[name_of_tag]<EOL><DEDENT>", "docstring": "Get date and time for tag, fetching it if not already cached.\n\n:param dict tag: Tag to get the datetime for.\n:rtype: datetime\n:return: datetime for specified tag.", "id": "f4926:c0:m38"}
{"signature": "def generate_log_between_tags(self, older_tag, newer_tag):", "body": "filtered_issues, filtered_pull_requests =self.filter_issues_for_tags(newer_tag, older_tag)<EOL>older_tag_name = older_tag[\"<STR_LIT:name>\"] if older_tagelse self.detect_since_tag()<EOL>if not filtered_issues and not filtered_pull_requests:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>return self.generate_log_for_tag(<EOL>filtered_pull_requests, filtered_issues,<EOL>newer_tag, older_tag_name)<EOL>", "docstring": "Generate log between 2 specified tags.\n\n:param dict older_tag: All issues before this tag's date will be\n                       excluded. May be special value, if new tag is\n                       the first tag. (Means **older_tag** is when\n                       the repo was created.)\n:param dict newer_tag: All issues after this tag's date  will be\n                       excluded. May be title of unreleased section.\n:rtype: str\n:return: Generated ready-to-add tag section for newer tag.", "id": "f4926:c0:m11"}
{"signature": "def detect_since_tag(self):", "body": "return self.options.since_tag or self.version_of_first_item()<EOL>", "docstring": "Try to find tag name to use as older tag for range of log creation.\n\n:rtype: str\n:return: Tag name to use as 'oldest' tag. May be special value,\n         indicating the creation of the repo.", "id": "f4926:c0:m40"}
{"signature": "def get_filtered_pull_requests(self, pull_requests):", "body": "pull_requests = self.filter_by_labels(pull_requests, \"<STR_LIT>\")<EOL>pull_requests = self.filter_merged_pull_requests(pull_requests)<EOL>if self.options.verbose > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\".format(len(pull_requests)))<EOL><DEDENT>return pull_requests<EOL>", "docstring": "This method fetches missing params for PR and filter them\nby specified options. It include add all PR's with labels\nfrom options.include_labels and exclude all from\noptions.exclude_labels.\n\n:param list(dict) pull_requests: All pull requests.\n:rtype: list(dict)\n:return: Filtered pull requests.", "id": "f4926:c0:m34"}
{"signature": "def timestring_to_datetime(timestring):", "body": "with warnings.catch_warnings():<EOL><INDENT>warnings.filterwarnings(\"<STR_LIT:ignore>\", category=UnicodeWarning)<EOL>result = dateutil_parser(timestring)<EOL><DEDENT>return result<EOL>", "docstring": "Convert an ISO formated date and time string to a datetime object.\n\n:param str timestring: String with date and time in ISO format.\n:rtype: datetime\n:return: datetime object", "id": "f4926:m0"}
{"signature": "def parse_by_sections(self, issues, pull_requests):", "body": "issues_a = []<EOL>sections_a = OrderedDict()<EOL>if not self.options.sections:<EOL><INDENT>return [sections_a, issues]<EOL><DEDENT>for key in self.options.sections:<EOL><INDENT>sections_a.update({key: []})<EOL><DEDENT>self.parse_by_sections_for_issues(issues, sections_a, issues_a)<EOL>self.parse_by_sections_for_pr(pull_requests, sections_a)<EOL>return [sections_a, issues_a]<EOL>", "docstring": "This method sort issues by types (bugs, features, etc. or\njust closed issues) by labels.\n\n:param list(dict) issues: List of issues in this tag section.\n:param list(dict) pull_requests: List of PR's in this tag section.\n:rtype: dict(list(dict)), list(dict)\n:return: Issues and PR's sorted into sections.", "id": "f4926:c0:m22"}
{"signature": "def find_closed_date_by_commit(self, issue):", "body": "if not issue.get('<STR_LIT>'):<EOL><INDENT>return<EOL><DEDENT>compare_string = \"<STR_LIT>\" if '<STR_LIT>' in issue else \"<STR_LIT>\"<EOL>issue['<STR_LIT>'].reverse()<EOL>found_date = False<EOL>for event in issue['<STR_LIT>']:<EOL><INDENT>if event[\"<STR_LIT>\"] == compare_string:<EOL><INDENT>self.set_date_from_event(event, issue)<EOL>found_date = True<EOL>break<EOL><DEDENT><DEDENT>if not found_date:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(issue[\"<STR_LIT>\"], issue[\"<STR_LIT:title>\"]))<EOL><DEDENT>", "docstring": "Fill \"actual_date\" parameter of specified issue by closed date of\nthe commit, if it was closed by commit.\n\n:param dict issue: issue to edit", "id": "f4926:c0:m5"}
{"signature": "def include_issues_by_labels(self, all_issues):", "body": "included_by_labels = self.filter_by_include_labels(all_issues)<EOL>wo_labels = self.filter_wo_labels(all_issues)<EOL>il = set([f[\"<STR_LIT>\"] for f in included_by_labels])<EOL>wl = set([w[\"<STR_LIT>\"] for w in wo_labels])<EOL>filtered_issues = []<EOL>for issue in all_issues:<EOL><INDENT>if issue[\"<STR_LIT>\"] in il or issue[\"<STR_LIT>\"] in wl:<EOL><INDENT>filtered_issues.append(issue)<EOL><DEDENT><DEDENT>return filtered_issues<EOL>", "docstring": "Include issues with labels, specified in self.options.include_labels.\n\n:param list(dict) all_issues: All issues.\n:rtype: list(dict)\n:return: Filtered issues.", "id": "f4926:c0:m30"}
{"signature": "def get_string_for_issue(self, issue):", "body": "encapsulated_title = self.encapsulate_string(issue['<STR_LIT:title>'])<EOL>try:<EOL><INDENT>title_with_number = u\"<STR_LIT>\".format(<EOL>encapsulated_title, issue[\"<STR_LIT>\"], issue[\"<STR_LIT>\"]<EOL>)<EOL><DEDENT>except UnicodeEncodeError:<EOL><INDENT>title_with_number = \"<STR_LIT>\".format(<EOL>issue[\"<STR_LIT>\"], issue['<STR_LIT:title>']<EOL>)<EOL>print(title_with_number, '<STR_LIT:\\n>', issue[\"<STR_LIT>\"])<EOL><DEDENT>return self.issue_line_with_user(title_with_number, issue)<EOL>", "docstring": "Parse issue and generate single line formatted issue line.\n\nExample output:\n    - Add coveralls integration [\\#223](https://github.com/skywinder/github-changelog-generator/pull/223) ([skywinder](https://github.com/skywinder))\n    - Add coveralls integration [\\#223](https://github.com/skywinder/github-changelog-generator/pull/223) (@skywinder)\n\n\n:param dict issue: Fetched issue from GitHub.\n:rtype: str\n:return: Markdown-formatted single issue.", "id": "f4926:c0:m18"}
{"signature": "def filter_between_tags(self, all_tags):", "body": "tag_names = [t[\"<STR_LIT:name>\"] for t in all_tags]<EOL>between_tags = []<EOL>for tag in self.options.between_tags:<EOL><INDENT>try:<EOL><INDENT>idx = tag_names.index(tag)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise ChangelogGeneratorError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(tag))<EOL><DEDENT>between_tags.append(all_tags[idx])<EOL><DEDENT>between_tags = self.sort_tags_by_date(between_tags)<EOL>if len(between_tags) == <NUM_LIT:1>:<EOL><INDENT>between_tags.append(between_tags[<NUM_LIT:0>])<EOL><DEDENT>older = self.get_time_of_tag(between_tags[<NUM_LIT:1>])<EOL>newer = self.get_time_of_tag(between_tags[<NUM_LIT:0>])<EOL>for tag in all_tags:<EOL><INDENT>if older < self.get_time_of_tag(tag) < newer:<EOL><INDENT>between_tags.append(tag)<EOL><DEDENT><DEDENT>if older == newer:<EOL><INDENT>between_tags.pop(<NUM_LIT:0>)<EOL><DEDENT>return between_tags<EOL>", "docstring": "Filter tags according between_tags option.\n\n:param list(dict) all_tags: Pre-filtered tags.\n:rtype: list(dict)\n:return: Filtered tags.", "id": "f4926:c0:m46"}
{"signature": "def apply_exclude_tags_regex(self, all_tags):", "body": "filtered = []<EOL>for tag in all_tags:<EOL><INDENT>if not re.match(self.options.exclude_tags_regex, tag[\"<STR_LIT:name>\"]):<EOL><INDENT>filtered.append(tag)<EOL><DEDENT><DEDENT>if len(all_tags) == len(filtered):<EOL><INDENT>self.warn_if_nonmatching_regex()<EOL><DEDENT>return filtered<EOL>", "docstring": "Filter tags according exclude_tags_regex option.\n\n:param list(dict) all_tags: Pre-filtered tags.\n:rtype: list(dict)\n:return: Filtered tags.", "id": "f4926:c0:m48"}
{"signature": "def get_temp_tag_for_repo_creation(self):", "body": "tag_date = self.tag_times_dict.get(REPO_CREATED_TAG_NAME, None)<EOL>if not tag_date:<EOL><INDENT>tag_name, tag_date = self.fetcher.fetch_repo_creation_date()<EOL>self.tag_times_dict[tag_name] = timestring_to_datetime(tag_date)<EOL><DEDENT>return REPO_CREATED_TAG_NAME<EOL>", "docstring": "If not already cached, fetch the creation date of the repo, cache it\nand return the special value indicating the creation of the repo.\n\n:rtype: str\n:return: value indicating the creation", "id": "f4926:c0:m42"}
{"signature": "def detect_link_tag_time(self, tag):", "body": "<EOL>newer_tag_time = self.get_time_of_tag(tag) if tagelse datetime.datetime.now()<EOL>if tag[\"<STR_LIT:name>\"] == self.options.unreleased_labeland self.options.future_release:<EOL><INDENT>newer_tag_name = self.options.future_release<EOL>newer_tag_link = self.options.future_release<EOL><DEDENT>elif tag[\"<STR_LIT:name>\"] is not self.options.unreleased_label :<EOL><INDENT>newer_tag_name = tag[\"<STR_LIT:name>\"]<EOL>newer_tag_link = newer_tag_name<EOL><DEDENT>else:<EOL><INDENT>newer_tag_name = self.options.unreleased_label<EOL>newer_tag_link = \"<STR_LIT>\"<EOL><DEDENT>return [newer_tag_link, newer_tag_name, newer_tag_time]<EOL>", "docstring": "Detect link, name and time for specified tag.\n\n:param dict tag: Tag data.\n:rtype: str, str, datetime\n:return: Link, name and time of the tag.", "id": "f4926:c0:m39"}
{"signature": "def filter_excluded_tags(self, all_tags):", "body": "filtered_tags = copy.deepcopy(all_tags)<EOL>if self.options.exclude_tags:<EOL><INDENT>filtered_tags = self.apply_exclude_tags(filtered_tags)<EOL><DEDENT>if self.options.exclude_tags_regex:<EOL><INDENT>filtered_tags = self.apply_exclude_tags_regex(filtered_tags)<EOL><DEDENT>return filtered_tags<EOL>", "docstring": "Filter tags according exclude_tags and exclude_tags_regex option.\n\n:param list(dict) all_tags: Pre-filtered tags.\n:rtype: list(dict)\n:return: Filtered tags.", "id": "f4926:c0:m47"}
{"signature": "def generate_sub_section(self, issues, prefix):", "body": "log = \"<STR_LIT>\"<EOL>if issues:<EOL><INDENT>if not self.options.simple_list:<EOL><INDENT>log += u\"<STR_LIT>\".format(prefix)<EOL><DEDENT>for issue in issues:<EOL><INDENT>merge_string = self.get_string_for_issue(issue)<EOL>log += u\"<STR_LIT>\".format(merge_string)<EOL><DEDENT>log += \"<STR_LIT:\\n>\"<EOL><DEDENT>return log<EOL>", "docstring": "Generate formated list of issues for changelog.\n\n:param list issues: Issues to put in sub-section.\n:param str prefix: Title of sub-section.\n:rtype: str\n:return: Generated ready-to-add sub-section.", "id": "f4926:c0:m9"}
{"signature": "@staticmethod<EOL><INDENT>def encapsulate_string(raw_string):<DEDENT>", "body": "raw_string.replace('<STR_LIT:\\\\>', '<STR_LIT>')<EOL>enc_string = re.sub(\"<STR_LIT>\", r\"<STR_LIT>\", raw_string)<EOL>return enc_string<EOL>", "docstring": "Encapsulate characters to make markdown look as expected.\n\n:param str raw_string: string to encapsulate\n:rtype: str\n:return: encapsulated input string", "id": "f4926:c0:m7"}
{"signature": "@staticmethod<EOL><INDENT>def user_project_from_remote(remote):<DEDENT>", "body": "<EOL>regex1 = br\"<STR_LIT>\"br\"<STR_LIT>\"<EOL>match = re.match(regex1, remote)<EOL>if match:<EOL><INDENT>return match.group(\"<STR_LIT:user>\"), match.group(\"<STR_LIT>\")<EOL><DEDENT>regex2 = r\"<STR_LIT>\"<EOL>match = re.match(regex2, remote)<EOL>if match:<EOL><INDENT>return match.group(\"<STR_LIT:user>\"), match.group(\"<STR_LIT>\")<EOL><DEDENT>return None, None<EOL>", "docstring": "Try to find user and project name from git remote output\n\n@param [String] output of git remote command\n@return [Array] user and project", "id": "f4927:c1:m5"}
{"signature": "@staticmethod<EOL><INDENT>def user_project_from_option(options, arg0, arg1):<DEDENT>", "body": "site = options.github_site<EOL>if arg0 and not arg1:<EOL><INDENT>match = re.match(<EOL>\"<STR_LIT>\".format(site=site),<EOL>arg0<EOL>)<EOL>if not match:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(arg0=arg0))<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>return match.groups()<EOL><DEDENT>return None, None<EOL>", "docstring": "Try to find user and project name from git remote output\n\n@param [String] output of git remote command\n@return [Array] user and project", "id": "f4927:c1:m4"}
{"signature": "def user_and_project_from_git(self, options, arg0=None, arg1=None):", "body": "user, project = self.user_project_from_option(options, arg0, arg1)<EOL>if user and project:<EOL><INDENT>return user, project<EOL><DEDENT>try:<EOL><INDENT>remote = subprocess.check_output(<EOL>[<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'.format(options.git_remote)<EOL>]<EOL>)<EOL><DEDENT>except subprocess.CalledProcessError:<EOL><INDENT>return None, None<EOL><DEDENT>except WindowsError:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>return self.user_project_from_remote(remote)<EOL><DEDENT>", "docstring": "Detects user and project from git.", "id": "f4927:c1:m3"}
{"signature": "def valid_content_type(self, content_type, accept):", "body": "accept_tokens = accept.replace('<STR_LIT:U+0020>', '<STR_LIT>').split('<STR_LIT:;>')<EOL>content_type_tokens = content_type.replace('<STR_LIT:U+0020>', '<STR_LIT>').split('<STR_LIT:;>')<EOL>return (<EOL>all(elem in content_type_tokens for elem in accept_tokens) and<EOL>(content_type_tokens[<NUM_LIT:0>] == '<STR_LIT>' or<EOL>content_type_tokens[<NUM_LIT:0>] == '<STR_LIT>')<EOL>)<EOL>", "docstring": "Check that the server is returning a valid Content-Type\n\n        Args:\n            content_type (str): ``Content-Type:`` header value\n            accept (str): media type to include in the ``Accept:`` header.", "id": "f4931:c10:m1"}
{"signature": "def __init__(self, user=None, password=None, verify=True, proxies=None,<EOL>user_agent=DEFAULT_USER_AGENT):", "body": "self.session = requests.Session()<EOL>self.session.verify = verify<EOL>self.user_agent = user_agent or DEFAULT_USER_AGENT<EOL>if user and password:<EOL><INDENT>self.session.auth = requests.auth.HTTPBasicAuth(user, password)<EOL><DEDENT>if proxies:<EOL><INDENT>self.session.proxies.update(proxies)<EOL><DEDENT>", "docstring": "Create a connection session.\n\n        Args:\n            user (str): username for authentication (optional)\n            password (str): password for authentication (optional)\n            verify (bool): validate the entity credentials. (default: True)\n            proxies (dict): key/value pair for http/https proxy settings.\n                (optional)\n            user_agent (str): A value to use for the User-Agent header in\n                requests.  If not given, use a default value which represents\n                this library.", "id": "f4931:c10:m0"}
{"signature": "def refresh(self):", "body": "response = self.__raw = self._conn.get(self.url)<EOL>self._populate_fields(**response)<EOL>self._loaded = True<EOL>", "docstring": "Update the Server information and list of API Roots", "id": "f4931:c9:m11"}
{"signature": "def _validate_status(self):", "body": "if not self.id:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.url))<EOL><DEDENT>if not self.status:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.url))<EOL><DEDENT>if self.total_count is None:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.url))<EOL><DEDENT>if self.success_count is None:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.url))<EOL><DEDENT>if self.failure_count is None:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.url))<EOL><DEDENT>if self.pending_count is None:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.url))<EOL><DEDENT>if len(self.successes) != self.success_count:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.successes,<EOL>self.success_count,<EOL>self.id))<EOL><DEDENT>if len(self.pendings) != self.pending_count:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.pendings,<EOL>self.pending_count,<EOL>self.id))<EOL><DEDENT>if len(self.failures) != self.failure_count:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.failures,<EOL>self.failure_count,<EOL>self.id))<EOL><DEDENT>if (self.success_count + self.pending_count + self.failure_count !=<EOL>self.total_count):<EOL><INDENT>msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>raise ValidationError(msg.format(self.success_count,<EOL>self.pending_count,<EOL>self.failure_count,<EOL>self.total_count,<EOL>self.id))<EOL><DEDENT>", "docstring": "Validates Status information. Raises errors for required\n        properties.", "id": "f4931:c6:m7"}
{"signature": "def __init__(self, url, conn=None, user=None, password=None, verify=True,<EOL>proxies=None):", "body": "super(ApiRoot, self).__init__(url, conn, user, password, verify, proxies)<EOL>self._loaded_collections = False<EOL>self._loaded_information = False<EOL>self.__raw = None<EOL>", "docstring": "Create an API root resource endpoint.\n\n        Args:\n            url (str): URL of a TAXII API root resource endpoint\n            user (str): username for authentication (optional)\n            password (str): password for authentication (optional)\n            conn (_HTTPConnection): reuse connection object, as an alternative\n                to providing username/password\n            verify (bool): validate the entity credentials. (default: True)\n            proxies (dict): key/value pair for http/https proxy settings.\n                (optional)", "id": "f4931:c8:m0"}
{"signature": "def _validate_server(self):", "body": "if not self._title:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValidationError(msg.format(self.url))<EOL><DEDENT>", "docstring": "Validates server information. Raises errors for required properties.", "id": "f4931:c9:m9"}
{"signature": "@property<EOL><INDENT>def _raw(self):<DEDENT>", "body": "self._ensure_loaded()<EOL>return self.__raw<EOL>", "docstring": "Get the \"raw\" collection information response (parsed JSON).", "id": "f4931:c7:m9"}
{"signature": "def __init__(self, url, conn=None, user=None, password=None, verify=True,<EOL>proxies=None, collection_info=None):", "body": "super(Collection, self).__init__(url, conn, user, password, verify, proxies)<EOL>self._loaded = False<EOL>self.__raw = None<EOL>if collection_info:<EOL><INDENT>self._populate_fields(**collection_info)<EOL>self.__raw = collection_info<EOL>self._loaded = True<EOL><DEDENT>", "docstring": "Initialize a new Collection.  Either user/password or conn may be\ngiven, but not both.  The latter is intended for internal use, when\nsharing connection pools with an ApiRoot, mocking a connection for\ntesting, etc.  Users should use user/password (if required) which will\ncreate a new connection.\n\nArgs:\n    url (str): A TAXII endpoint for a collection\n    user (str): User name for authentication (optional)\n    password (str): Password for authentication (optional)\n    verify (bool): Either a boolean, in which case it controls whether\n        we verify the server's TLS certificate, or a string, in which\n        case it must be a path to a CA bundle to use. Defaults to\n        `True` (optional)\n    conn (_HTTPConnection): A connection to reuse (optional)\n    collection_info: Collection metadata, if known in advance (optional)\n    verify (bool): validate the entity credentials. (default: True)\n    proxies (dict): key/value pair for http/https proxy settings.\n        (optional)", "id": "f4931:c7:m0"}
{"signature": "@property<EOL><INDENT>def _raw(self):<DEDENT>", "body": "return self.__raw<EOL>", "docstring": "Get the \"raw\" status response (parsed JSON).", "id": "f4931:c6:m2"}
{"signature": "def close(self):", "body": "self.session.close()<EOL>", "docstring": "Closes connections.  This object is no longer usable.", "id": "f4931:c10:m4"}
{"signature": "def _format_datetime(dttm):", "body": "if dttm.tzinfo is None or dttm.tzinfo.utcoffset(dttm) is None:<EOL><INDENT>zoned = pytz.utc.localize(dttm)<EOL><DEDENT>else:<EOL><INDENT>zoned = dttm.astimezone(pytz.utc)<EOL><DEDENT>ts = zoned.strftime(\"<STR_LIT>\")<EOL>ms = zoned.strftime(\"<STR_LIT>\")<EOL>precision = getattr(dttm, \"<STR_LIT>\", None)<EOL>if precision == \"<STR_LIT>\":<EOL><INDENT>pass  <EOL><DEDENT>elif precision == \"<STR_LIT>\":<EOL><INDENT>ts = ts + \"<STR_LIT:.>\" + ms[:<NUM_LIT:3>]<EOL><DEDENT>elif zoned.microsecond > <NUM_LIT:0>:<EOL><INDENT>ts = ts + \"<STR_LIT:.>\" + ms.rstrip(\"<STR_LIT:0>\")<EOL><DEDENT>return ts + \"<STR_LIT>\"<EOL>", "docstring": "Convert a datetime object into a valid STIX timestamp string.\n\n    1. Convert to timezone-aware\n    2. Convert to UTC\n    3. Format in ISO format\n    4. Ensure correct precision\n       a. Add subsecond value if non-zero and precision not defined\n    5. Add \"Z\"", "id": "f4931:m0"}
{"signature": "def _merge_headers(self, call_specific_headers):", "body": "<EOL>merged_headers = requests.structures.CaseInsensitiveDict({<EOL>\"<STR_LIT>\": self.user_agent<EOL>})<EOL>if call_specific_headers:<EOL><INDENT>merged_headers.update(call_specific_headers)<EOL><DEDENT>if not merged_headers.get(\"<STR_LIT>\"):<EOL><INDENT>merged_headers[\"<STR_LIT>\"] = self.user_agent<EOL><DEDENT>return merged_headers<EOL>", "docstring": "Merge headers from different sources together.  Headers passed to the\npost/get methods have highest priority, then headers associated with\nthe connection object itself have next priority.\n\n:param call_specific_headers: A header dict from the get/post call, or\n    None (the default for those methods).\n:return: A key-case-insensitive MutableMapping object which contains\n    the merged headers.  (This doesn't actually return a dict.)", "id": "f4931:c10:m5"}
{"signature": "def _to_json(resp):", "body": "try:<EOL><INDENT>return resp.json()<EOL><DEDENT>except ValueError as e:<EOL><INDENT>six.raise_from(InvalidJSONError(<EOL>\"<STR_LIT>\" + resp.request.url<EOL>), e)<EOL><DEDENT>", "docstring": "Factors out some JSON parse code with error handling, to hopefully improve\nerror messages.\n\n:param resp: A \"requests\" library response\n:return: Parsed JSON.\n:raises: InvalidJSONError If JSON parsing failed.", "id": "f4931:m3"}
{"signature": "@property<EOL><INDENT>def _raw(self):<DEDENT>", "body": "self._ensure_loaded()<EOL>return self.__raw<EOL>", "docstring": "Get the \"raw\" server discovery response (parsed JSON).", "id": "f4931:c9:m7"}
{"signature": "def wait_until_final(self, poll_interval=<NUM_LIT:1>, timeout=<NUM_LIT>):", "body": "start_time = time.time()<EOL>elapsed = <NUM_LIT:0><EOL>while (self.status != \"<STR_LIT>\" and<EOL>(timeout <= <NUM_LIT:0> or elapsed < timeout)):<EOL><INDENT>time.sleep(poll_interval)<EOL>self.refresh()<EOL>elapsed = time.time() - start_time<EOL><DEDENT>", "docstring": "It will poll the URL to grab the latest status resource in a given\n        timeout and time interval.\n\n        Args:\n            poll_interval (int): how often to poll the status service.\n            timeout (int): how long to poll the URL until giving up. Use <= 0\n                to wait forever", "id": "f4931:c6:m5"}
{"signature": "def add_objects(self, bundle, wait_for_completion=True, poll_interval=<NUM_LIT:1>,<EOL>timeout=<NUM_LIT>, accept=MEDIA_TYPE_TAXII_V20,<EOL>content_type=MEDIA_TYPE_STIX_V20):", "body": "self._verify_can_write()<EOL>headers = {<EOL>\"<STR_LIT>\": accept,<EOL>\"<STR_LIT:Content-Type>\": content_type,<EOL>}<EOL>if isinstance(bundle, dict):<EOL><INDENT>json_text = json.dumps(bundle, ensure_ascii=False)<EOL>data = json_text.encode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>elif isinstance(bundle, six.text_type):<EOL><INDENT>data = bundle.encode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>elif isinstance(bundle, six.binary_type):<EOL><INDENT>data = bundle<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\".format(<EOL>type(bundle).__name__))<EOL><DEDENT>status_json = self._conn.post(self.objects_url, headers=headers,<EOL>data=data)<EOL>status_url = urlparse.urljoin(<EOL>self.url,<EOL>\"<STR_LIT>\".format(status_json[\"<STR_LIT:id>\"])<EOL>)<EOL>status = Status(url=status_url, conn=self._conn,<EOL>status_info=status_json)<EOL>if not wait_for_completion or status.status == \"<STR_LIT>\":<EOL><INDENT>return status<EOL><DEDENT>status.wait_until_final(poll_interval, timeout)<EOL>return status<EOL>", "docstring": "Implement the ``Add Objects`` endpoint (section 5.4)\n\n        Add objects to the collection.  This may be performed either\n        synchronously or asynchronously.  To add asynchronously, set\n        wait_for_completion to False.  If False, the latter two args are\n        unused.  If the caller wishes to monitor the status of the addition,\n        it may do so in its own way.  To add synchronously, set\n        wait_for_completion to True, and optionally set the poll and timeout\n        intervals.  After initiating the addition, the caller will block,\n        and the TAXII \"status\" service will be polled until the timeout\n        expires, or the operation completes.\n\n        Args:\n            bundle: A STIX bundle with the objects to add (string, dict, binary)\n            wait_for_completion (bool): Whether to wait for the add operation\n                to complete before returning\n            poll_interval (int): If waiting for completion, how often to poll\n                the status service (seconds)\n            timeout (int): If waiting for completion, how long to poll until\n                giving up (seconds).  Use <= 0 to wait forever\n            accept (str): media type to include in the ``Accept:`` header.\n            content_type (str): media type to include in the ``Content-Type:``\n                header.\n\n        Returns:\n            If ``wait_for_completion`` is False, a Status object corresponding\n            to the initial status data returned from the service, is returned.\n            The status may not yet be complete at this point.\n\n            If ``wait_for_completion`` is True, a Status object corresponding\n            to the completed operation is returned if it didn't time out;\n            otherwise a Status object corresponding to the most recent data\n            obtained before the timeout, is returned.", "id": "f4931:c7:m18"}
{"signature": "def refresh_information(self, accept=MEDIA_TYPE_TAXII_V20):", "body": "response = self.__raw = self._conn.get(self.url,<EOL>headers={\"<STR_LIT>\": accept})<EOL>self._populate_fields(**response)<EOL>self._loaded_information = True<EOL>", "docstring": "Update the properties of this API Root.\n\n        This invokes the ``Get API Root Information`` endpoint.", "id": "f4931:c8:m12"}
{"signature": "def _ensure_datetime_to_string(maybe_dttm):", "body": "if isinstance(maybe_dttm, datetime.datetime):<EOL><INDENT>maybe_dttm = _format_datetime(maybe_dttm)<EOL><DEDENT>return maybe_dttm<EOL>", "docstring": "If maybe_dttm is a datetime instance, convert to a STIX-compliant\n    string representation.  Otherwise return the value unchanged.", "id": "f4931:m1"}
{"signature": "def __init__(self, url, conn=None, user=None, password=None, verify=True,<EOL>proxies=None, status_info=None):", "body": "super(Status, self).__init__(url, conn, user, password, verify, proxies)<EOL>self.__raw = None<EOL>if status_info:<EOL><INDENT>self._populate_fields(**status_info)<EOL>self.__raw = status_info<EOL><DEDENT>else:<EOL><INDENT>self.refresh()<EOL><DEDENT>", "docstring": "Create an API root resource endpoint.\n\n        Args:\n            url (str): URL of a TAXII status resource endpoint\n            user (str): username for authentication (optional)\n            password (str): password for authentication (optional)\n            conn (_HTTPConnection): reuse connection object, as an alternative\n                to providing username/password\n            status_info (dict): Parsed JSON representing a response from the\n                status endpoint, if already known.  If not given, the\n                endpoint will be queried. (optional)\n            verify (bool): validate the entity credentials. (default: True)\n            proxies (dict): key/value pair for http/https proxy settings.\n                (optional)", "id": "f4931:c6:m0"}
{"signature": "def __init__(self, url, conn=None, user=None, password=None, verify=True,<EOL>proxies=None):", "body": "if conn and (user or password):<EOL><INDENT>raise InvalidArgumentsError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>elif conn:<EOL><INDENT>self._conn = conn<EOL><DEDENT>else:<EOL><INDENT>self._conn = _HTTPConnection(user, password, verify, proxies)<EOL><DEDENT>if url[-<NUM_LIT:1>] == \"<STR_LIT:/>\":<EOL><INDENT>self.url = url<EOL><DEDENT>else:<EOL><INDENT>self.url = url + \"<STR_LIT:/>\"<EOL><DEDENT>", "docstring": "Create a TAXII endpoint.\n\n        Args:\n            user (str): username for authentication (optional)\n            password (str): password for authentication (optional)\n            verify (bool): validate the entity credentials (default: True)\n            conn (_HTTPConnection): A connection to reuse (optional)\n            proxies (dict): key/value pair for http/https proxy settings.\n                (optional)", "id": "f4931:c5:m0"}
{"signature": "def get_objects(self, accept=MEDIA_TYPE_STIX_V20, **filter_kwargs):", "body": "self._verify_can_read()<EOL>query_params = _filter_kwargs_to_query_params(filter_kwargs)<EOL>return self._conn.get(self.objects_url, headers={\"<STR_LIT>\": accept},<EOL>params=query_params)<EOL>", "docstring": "Implement the ``Get Objects`` endpoint (section 5.3)", "id": "f4931:c7:m16"}
{"signature": "def find_imports(self, pbds):", "body": "<EOL>imports = list(set(self.uses).difference(set(self.defines)))<EOL>for imp in imports:<EOL><INDENT>for p in pbds:<EOL><INDENT>if imp in p.defines:<EOL><INDENT>self.imports.append(p.name)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>self.imports = list(set(self.imports))<EOL>for import_file in self.imports:<EOL><INDENT>self.lines.insert(<NUM_LIT:2>, '<STR_LIT>'.format(import_file))<EOL><DEDENT>", "docstring": "Find all missing imports in list of Pbd instances.", "id": "f4935:c0:m8"}
{"signature": "def _dump_message(self, m, top='<STR_LIT>'):", "body": "self._print()<EOL>self._print('<STR_LIT>'.format(m.name))<EOL>self.defines.append('<STR_LIT>'.format(top, m.name))<EOL>self.tabs+=<NUM_LIT:1><EOL>for f in m.field:<EOL><INDENT>self._dump_field(f)<EOL><DEDENT>for e in m.enum_type:<EOL><INDENT>self._dump_enum(e, top='<STR_LIT>'.format(top, m.name))<EOL><DEDENT>for n in m.nested_type:<EOL><INDENT>self._dump_message(n, top='<STR_LIT>'.format(top, m.name))<EOL><DEDENT>self.tabs-=<NUM_LIT:1><EOL>self._print('<STR_LIT:}>')<EOL>", "docstring": "Dump single message type.\n\n        Keyword arguments:\n        top -- top namespace", "id": "f4935:c0:m4"}
{"signature": "def _walk(self, fd):", "body": "top = '<STR_LIT>'.format(fd.package) if len(fd.package) > <NUM_LIT:0> else '<STR_LIT>'<EOL>for e in fd.enum_type: self._dump_enum(e, top)<EOL>for m in fd.message_type: self. _dump_message(m, top)<EOL>", "docstring": "Walk and dump (disasm) descriptor.", "id": "f4935:c0:m5"}
{"signature": "@requires_firmware(<NUM_LIT>)<EOL><INDENT>def pm(self):<DEDENT>", "body": "resp = []<EOL>data = {}<EOL>self.cnxn.xfer([<NUM_LIT>])<EOL>sleep(<NUM_LIT>)<EOL>for i in range(<NUM_LIT:12>):<EOL><INDENT>r = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]<EOL>resp.append(r)<EOL><DEDENT>data['<STR_LIT>']     = self._calculate_float(resp[<NUM_LIT:0>:<NUM_LIT:4>])<EOL>data['<STR_LIT>']   = self._calculate_float(resp[<NUM_LIT:4>:<NUM_LIT:8>])<EOL>data['<STR_LIT>']    = self._calculate_float(resp[<NUM_LIT:8>:])<EOL>sleep(<NUM_LIT:0.1>)<EOL>return data<EOL>", "docstring": "Read the PM data and reset the histogram\n\n        **NOTE: This method is supported by firmware v18+.**\n\n        :rtype: dictionary\n\n        :Example:\n\n        >>> alpha.pm()\n        {\n            'PM1': 0.12,\n            'PM2.5': 0.24,\n            'PM10': 1.42\n        }", "id": "f4940:c1:m18"}
{"signature": "@requires_firmware(<NUM_LIT>)<EOL><INDENT>def read_pot_status(self):<DEDENT>", "body": "<EOL>a = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]<EOL>sleep(<NUM_LIT>)<EOL>res = []<EOL>for i in range(<NUM_LIT:4>):<EOL><INDENT>res.append(self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>])<EOL><DEDENT>sleep(<NUM_LIT:0.1>)<EOL>return {<EOL>'<STR_LIT>':        res[<NUM_LIT:0>],<EOL>'<STR_LIT>':      res[<NUM_LIT:1>],<EOL>'<STR_LIT>':    res[<NUM_LIT:2>],<EOL>'<STR_LIT>':  res[<NUM_LIT:3>]<EOL>}<EOL>", "docstring": "Read the status of the digital pot. Firmware v18+ only.\n        The return value is a dictionary containing the following as\n        unsigned 8-bit integers: FanON, LaserON, FanDACVal, LaserDACVal.\n\n        :rtype: dict\n\n        :Example:\n\n        >>> alpha.read_pot_status()\n        {\n            'LaserDACVal': 230,\n            'FanDACVal': 255,\n            'FanON': 0,\n            'LaserON': 0\n        }", "id": "f4940:c1:m14"}
{"signature": "def calculate_bin_boundary(self, bb):", "body": "return min(enumerate(OPC_LOOKUP), key = lambda x: abs(x[<NUM_LIT:1>] - bb))[<NUM_LIT:0>]<EOL>", "docstring": "Calculate the adc value that corresponds to a specific bin boundary diameter in microns.\n\n            :param bb: Bin Boundary in microns\n\n            :type bb: float\n\n            :rtype: int", "id": "f4940:c0:m9"}
{"signature": "@requires_firmware(<NUM_LIT>)<EOL><INDENT>def write_config_variables2(self, config_vars):<DEDENT>", "body": "logger.warning(\"<STR_LIT>\")<EOL>return<EOL>", "docstring": "Write configuration variables 2 to non-volatile memory.\n\n        **NOTE: This method is currently a placeholder and is not implemented.**\n        **NOTE: This method is supported by firmware v18+.**\n\n        :param config_vars: dictionary containing the configuration variables\n\n        :type config_vars: dictionary", "id": "f4940:c1:m6"}
{"signature": "def off(self):", "body": "b1 = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]          <EOL>sleep(<NUM_LIT>)                             <EOL>b2 = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]          <EOL>sleep(<NUM_LIT:0.1>)<EOL>return True if b1 == <NUM_LIT> and b2 == <NUM_LIT> else False<EOL>", "docstring": "Turn OFF the OPC (fan and laser)\n\n        :rtype: boolean\n\n        :Example:\n\n        >>> alpha.off()\n        True", "id": "f4940:c1:m2"}
{"signature": "def _calculate_float(self, byte_array):", "body": "if len(byte_array) != <NUM_LIT:4>:<EOL><INDENT>return None<EOL><DEDENT>return struct.unpack('<STR_LIT:f>', struct.pack('<STR_LIT>', *byte_array))[<NUM_LIT:0>]<EOL>", "docstring": "Returns an IEEE 754 float from an array of 4 bytes\n\n        :param byte_array: Expects an array of 4 bytes\n\n        :type byte_array: array\n\n        :rtype: float", "id": "f4940:c0:m2"}
{"signature": "@requires_firmware(<NUM_LIT>)<EOL><INDENT>def sn(self):<DEDENT>", "body": "string = []<EOL>self.cnxn.xfer([<NUM_LIT>])<EOL>sleep(<NUM_LIT>)<EOL>for i in range(<NUM_LIT>):<EOL><INDENT>resp = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]<EOL>string.append(chr(resp))<EOL><DEDENT>sleep(<NUM_LIT:0.1>)<EOL>return '<STR_LIT>'.join(string)<EOL>", "docstring": "Read the Serial Number string. This method is only available on OPC-N2\n        firmware versions 18+.\n\n        :rtype: string\n\n        :Example:\n\n        >>> alpha.sn()\n        'OPC-N2 123456789'", "id": "f4940:c1:m15"}
{"signature": "def write_gsc_sfr(self):", "body": "return<EOL>", "docstring": "Write the gsc and sfr values\n\n        **NOTE**: This method is currently a placeholder.", "id": "f4940:c2:m5"}
{"signature": "def ping(self):", "body": "b = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]           <EOL>sleep(<NUM_LIT:0.1>)<EOL>return True if b == <NUM_LIT> else False<EOL>", "docstring": "Checks the connection between the Raspberry Pi and the OPC\n\n        :rtype: Boolean", "id": "f4940:c0:m11"}
{"signature": "def off(self):", "body": "b1 = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]          <EOL>sleep(<NUM_LIT>)                             <EOL>return True if b1 == <NUM_LIT> else False<EOL>", "docstring": "Turn OFF the OPC (fan and laser)\n\n        :returns: boolean success state", "id": "f4940:c2:m2"}
{"signature": "def lookup_bin_boundary(self, adc_value):", "body": "if adc_value < <NUM_LIT:0>:<EOL><INDENT>adc_value = <NUM_LIT:0><EOL><DEDENT>if adc_value > <NUM_LIT>:<EOL><INDENT>adc_value = <NUM_LIT><EOL><DEDENT>return OPC_LOOKUP[adc_value]<EOL>", "docstring": "Looks up the bin boundary value in microns based on the lookup table provided by Alphasense.\n\n            :param adc_value: ADC Value (0 - 4095)\n\n            :type adc_value: int\n\n            :rtype: float", "id": "f4940:c0:m8"}
{"signature": "def read_info_string(self):", "body": "infostring = []<EOL>self.cnxn.xfer([<NUM_LIT>])<EOL>sleep(<NUM_LIT>)<EOL>for i in range(<NUM_LIT>):<EOL><INDENT>resp = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]<EOL>infostring.append(chr(resp))<EOL><DEDENT>sleep(<NUM_LIT:0.1>)<EOL>return '<STR_LIT>'.join(infostring)<EOL>", "docstring": "Reads the information string for the OPC\n\n        :rtype: string\n\n        :Example:\n\n        >>> alpha.read_info_string()\n        'OPC-N2 FirmwareVer=OPC-018.2....................BD'", "id": "f4940:c0:m10"}
{"signature": "def toggle_laser(self, state):", "body": "<EOL>a = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]<EOL>sleep(<NUM_LIT>)<EOL>if state:<EOL><INDENT>b = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>b = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]<EOL><DEDENT>sleep(<NUM_LIT:0.1>)<EOL>return True if a == <NUM_LIT> and b == <NUM_LIT> else False<EOL>", "docstring": "Toggle the power state of the laser.\n\n        :param state: Boolean state of the laser\n\n        :type state: boolean\n\n        :rtype: boolean\n\n        :Example:\n\n        >>> alpha.toggle_laser(True)\n        True", "id": "f4940:c1:m12"}
{"signature": "def _calculate_mtof(self, mtof):", "body": "return mtof / <NUM_LIT><EOL>", "docstring": "Returns the average amount of time that particles in a bin\n        took to cross the path of the laser [units -> microseconds]\n\n        :param mtof: mass time-of-flight\n\n        :type mtof: float\n\n        :rtype: float", "id": "f4940:c0:m3"}
{"signature": "def read_histogram(self):", "body": "resp = []<EOL>data = {}<EOL>command = <NUM_LIT><EOL>self.cnxn.xfer([command])<EOL>sleep(<NUM_LIT>)<EOL>for i in range(<NUM_LIT>):<EOL><INDENT>r = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]<EOL>resp.append(r)<EOL><DEDENT>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT:0>], resp[<NUM_LIT:1>])<EOL>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT:2>], resp[<NUM_LIT:3>])<EOL>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT:4>], resp[<NUM_LIT:5>])<EOL>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT:6>], resp[<NUM_LIT:7>])<EOL>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT:8>], resp[<NUM_LIT:9>])<EOL>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT:10>], resp[<NUM_LIT:11>])<EOL>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT:12>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT>], resp[<NUM_LIT:15>])<EOL>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT:16>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']           = self._16bit_unsigned(resp[<NUM_LIT>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']          = self._16bit_unsigned(resp[<NUM_LIT:20>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']          = self._16bit_unsigned(resp[<NUM_LIT>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']          = self._16bit_unsigned(resp[<NUM_LIT>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']          = self._16bit_unsigned(resp[<NUM_LIT>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']          = self._16bit_unsigned(resp[<NUM_LIT>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']          = self._16bit_unsigned(resp[<NUM_LIT:30>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']       = self._calculate_mtof(resp[<NUM_LIT:32>])<EOL>data['<STR_LIT>']       = self._calculate_mtof(resp[<NUM_LIT>])<EOL>data['<STR_LIT>']       = self._calculate_mtof(resp[<NUM_LIT>])<EOL>data['<STR_LIT>']       = self._calculate_mtof(resp[<NUM_LIT>])<EOL>data['<STR_LIT>']     = self._calculate_temp(resp[<NUM_LIT>:<NUM_LIT>])<EOL>data['<STR_LIT>']        = self._calculate_pressure(resp[<NUM_LIT>:<NUM_LIT>])<EOL>data['<STR_LIT>'] = self._calculate_period(resp[<NUM_LIT>:<NUM_LIT>])<EOL>data['<STR_LIT>']        = self._16bit_unsigned(resp[<NUM_LIT>], resp[<NUM_LIT>])<EOL>data['<STR_LIT>']             = self._calculate_float(resp[<NUM_LIT:50>:<NUM_LIT>])<EOL>data['<STR_LIT>']           = self._calculate_float(resp[<NUM_LIT>:<NUM_LIT>])<EOL>data['<STR_LIT>']            = self._calculate_float(resp[<NUM_LIT>:])<EOL>histogram_sum = data['<STR_LIT>'] + data['<STR_LIT>'] + data['<STR_LIT>']   +data['<STR_LIT>'] + data['<STR_LIT>'] + data['<STR_LIT>'] + data['<STR_LIT>']   +data['<STR_LIT>'] + data['<STR_LIT>'] + data['<STR_LIT>'] + data['<STR_LIT>']  +data['<STR_LIT>'] + data['<STR_LIT>'] + data['<STR_LIT>'] + data['<STR_LIT>'] +data['<STR_LIT>']<EOL>return data<EOL>", "docstring": "Read and reset the histogram. The expected return is a dictionary\n        containing the counts per bin, MToF for bins 1, 3, 5, and 7, temperature,\n        pressure, the sampling period, the checksum, PM1, PM2.5, and PM10.\n\n        **NOTE:** The sampling period for the OPCN1 seems to be incorrect.\n\n        :returns: dictionary", "id": "f4940:c2:m8"}
{"signature": "def _enter_bootloader_mode(self):", "body": "return True if self.cnxn.xfer(<NUM_LIT>)[<NUM_LIT:0>] == <NUM_LIT> else False<EOL>", "docstring": "Enter bootloader mode. Must be issued prior to writing\n        configuration variables to non-volatile memory.\n\n        :rtype: boolean\n\n        :Example:\n\n        >>> alpha._enter_bootloader_mode()\n        True", "id": "f4940:c1:m9"}
{"signature": "def write_bin_particle_density(self):", "body": "return<EOL>", "docstring": "Write the bin particle density values to memory. This method is currently a\n        placeholder.\n\n        :returns: None", "id": "f4940:c2:m7"}
{"signature": "def write_config_variables(self, config_vars):", "body": "logger.warning(\"<STR_LIT>\")<EOL>return<EOL>", "docstring": "Write configuration variables to non-volatile memory.\n\n        **NOTE: This method is currently a placeholder and is not implemented.**\n\n        :param config_vars: dictionary containing the configuration variables\n\n        :type config_vars: dictionary", "id": "f4940:c1:m5"}
{"signature": "def read_bin_particle_density(self):", "body": "config = []<EOL>self.cnxn.xfer([<NUM_LIT>])<EOL>sleep(<NUM_LIT>)<EOL>for i in range(<NUM_LIT:4>):<EOL><INDENT>resp = self.cnxn.xfer([<NUM_LIT>])[<NUM_LIT:0>]<EOL>config.append(resp)<EOL><DEDENT>bpd = self._calculate_float(config)<EOL>return bpd<EOL>", "docstring": "Read the bin particle density\n\n        :returns: float", "id": "f4940:c2:m6"}
{"signature": "def article_date(value):", "body": "return value.strftime('<STR_LIT>')<EOL>", "docstring": "Convert a date to the format we want it displayed on the article template.\n\nFormat looks like --> Friday, November 4, 2016\n\nArgs:\n    value (datetime.datetime): input date\n\nReturns:\n    str: value, formatted nicely for displaying the date.", "id": "f4945:m1"}
{"signature": "def titlecase(value):", "body": "return _titlecase(value)<EOL>", "docstring": "Returns the titlecased version of the supplied text.\n\n    Args:\n        value (str): input value\n\n    Returns:\n        str: value, titlecase formatted", "id": "f4945:m3"}
{"signature": "def datetime(value, format_str='<STR_LIT>'):", "body": "return value.strftime(format_str)<EOL>", "docstring": "Convert a datetime to a different format.\n\nThe default format looks like --> 2016/11/25 12:34\n\nArgs:\n    value (datetime.datetime): input date and time\n    format_str (str): The datetime format string to apply to value\n\nReturns:\n    str: value, after the format_str has been applied", "id": "f4945:m0"}
{"signature": "def register():", "body": "signals.generator_init.connect(add_all_filters)<EOL>", "docstring": "Plugin registration.", "id": "f4946:m1"}
{"signature": "def find_meta(*meta_file_parts, meta_key):", "body": "meta_file = read(*meta_file_parts)<EOL>meta_match = re.search(r\"<STR_LIT>\".format(meta_key),<EOL>meta_file, re.M)<EOL>if meta_match:<EOL><INDENT>return meta_match.group(<NUM_LIT:1>)<EOL><DEDENT>raise RuntimeError(\"<STR_LIT>\".format(meta_key))<EOL>", "docstring": "Extract __*meta*__ from meta_file.", "id": "f4950:m1"}
{"signature": "def add_path_object(self, *args):", "body": "for obj in args:<EOL><INDENT>obj.bundle = self<EOL>self.files.append(obj)<EOL><DEDENT>", "docstring": "Add custom path objects\n\n:type: path_object: static_bundle.paths.AbstractPath", "id": "f4954:c0:m5"}
{"signature": "def add_directory(self, *args, **kwargs):", "body": "exc = kwargs.get('<STR_LIT>', None)<EOL>for path in args:<EOL><INDENT>self.files.append(DirectoryPath(path, self, exclusions=exc))<EOL><DEDENT>", "docstring": "Add directory or directories list to bundle\n\n:param exclusions: List of excluded paths\n\n:type path: str|unicode\n:type exclusions: list", "id": "f4954:c0:m4"}
{"signature": "def init_build(self, asset, builder):", "body": "if not self.abs_path:<EOL><INDENT>rel_path = utils.prepare_path(self.rel_bundle_path)<EOL>self.abs_bundle_path = utils.prepare_path([builder.config.input_dir, rel_path])<EOL>self.abs_path = True<EOL><DEDENT>self.input_dir = builder.config.input_dir<EOL>", "docstring": "Called when builder group collect files\nResolves absolute url if relative passed\n\n:type asset: static_bundle.builders.Asset\n:type builder: static_bundle.builders.StandardBuilder", "id": "f4954:c0:m2"}
{"signature": "@property<EOL><INDENT>def path(self):<DEDENT>", "body": "assert self.abs_path and self.abs_bundle_path, \"<STR_LIT>\"<EOL>return self.abs_bundle_path<EOL>", "docstring": "Check if absolute path is not resolved yet", "id": "f4954:c0:m1"}
{"signature": "def prepare(self):", "body": "result_files = self.collect_files()<EOL>chain = self.prepare_handlers_chain<EOL>if chain is None:<EOL><INDENT>chain = [<EOL>LessCompilerPrepareHandler()<EOL>]<EOL><DEDENT>for prepare_handler in chain:<EOL><INDENT>result_files = prepare_handler.prepare(result_files, self)<EOL><DEDENT>return result_files<EOL>", "docstring": "Called when builder run collect files in builder group\n\n:rtype: list[static_bundle.files.StaticFileResult]", "id": "f4954:c0:m7"}
{"signature": "def render_include_group(self, name):", "body": "return self.render_asset(name)<EOL>", "docstring": "Alias for render_asset method", "id": "f4957:c1:m6"}
{"signature": "def make_build(self):", "body": "for asset in self.assets.values():<EOL><INDENT>if asset.has_bundles():<EOL><INDENT>asset.collect_files()<EOL><DEDENT><DEDENT>if not os.path.exists(self.config.output_dir):<EOL><INDENT>os.makedirs(self.config.output_dir)<EOL><DEDENT>if self.config.copy_only_bundles:<EOL><INDENT>for asset in self.assets.values():<EOL><INDENT>if not asset.minify and asset.files:<EOL><INDENT>for f in asset.files:<EOL><INDENT>copy_file(f.abs_path, self._get_output_path(f.abs_path))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>copy_excludes = {}<EOL>for asset in self.assets.values():<EOL><INDENT>if asset.minify and asset.files:<EOL><INDENT>for f in asset.files:<EOL><INDENT>copy_excludes[f.abs_path] = f<EOL><DEDENT><DEDENT><DEDENT>for root, dirs, files in os.walk(self.config.input_dir):<EOL><INDENT>for fpath in files:<EOL><INDENT>current_file_path = os.path.join(root, fpath)<EOL>if current_file_path not in copy_excludes:<EOL><INDENT>copy_file(current_file_path, self._get_output_path(current_file_path))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>self._minify()<EOL>", "docstring": "Move files / make static build", "id": "f4957:c1:m8"}
{"signature": "def collect_links(self, env=None):", "body": "for asset in self.assets.values():<EOL><INDENT>if asset.has_bundles():<EOL><INDENT>asset.collect_files()<EOL><DEDENT><DEDENT>if env is None:<EOL><INDENT>env = self.config.env<EOL><DEDENT>if env == static_bundle.ENV_PRODUCTION:<EOL><INDENT>self._minify(emulate=True)<EOL><DEDENT>self._add_url_prefix()<EOL>", "docstring": "Return links without build files", "id": "f4957:c1:m7"}
{"signature": "def render_asset(self, name):", "body": "result = \"<STR_LIT>\"<EOL>if self.has_asset(name):<EOL><INDENT>asset = self.get_asset(name)<EOL>if asset.files:<EOL><INDENT>for f in asset.files:<EOL><INDENT>result += f.render_include() + \"<STR_LIT:\\r\\n>\"<EOL><DEDENT><DEDENT><DEDENT>return result<EOL>", "docstring": "Render all includes in asset by names\n\n:type name: str|unicode\n:rtype: str|unicode", "id": "f4957:c1:m5"}
{"signature": "def __init__(self, file_path, bundle=None):", "body": "self.file_path = prepare_path(file_path)<EOL>self.bundle = bundle<EOL>", "docstring": ":type file_path: str|unicode\n:type bundle: static_bundle.bundles.AbstractBundle", "id": "f4958:c1:m0"}
{"signature": "def get_files(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Collect used files\nReturn list with one element for single file\nand list with all files for directory path\n\n:rtype: list", "id": "f4958:c0:m0"}
{"signature": "def get_abs_and_rel_paths(self, root_path, file_name, input_dir):", "body": "<EOL>relative_dir = root_path.replace(input_dir, '<STR_LIT>')<EOL>return os.path.join(root_path, file_name), relative_dir + '<STR_LIT:/>' + file_name<EOL>", "docstring": "Return absolute and relative path for file\n\n:type root_path: str|unicode\n:type file_name: str|unicode\n:type input_dir: str|unicode\n:rtype: tuple", "id": "f4958:c0:m1"}
{"signature": "def read_from_file(file_path, encoding=\"<STR_LIT:utf-8>\"):", "body": "with codecs.open(file_path, \"<STR_LIT:r>\", encoding) as f:<EOL><INDENT>return f.read()<EOL><DEDENT>", "docstring": "Read helper method\n\n:type file_path: str|unicode\n:type encoding: str|unicode\n:rtype: str|unicode", "id": "f4961:m1"}
{"signature": "def after(self, text):", "body": "return text<EOL>", "docstring": "@type text: str|unicode\n@rtype: str|unicode", "id": "f4962:c0:m4"}
{"signature": "def before(self):", "body": "return u'<STR_LIT>'<EOL>", "docstring": "Called before minify\nReturned text will be prepend on head\n\n:rtype: unicode", "id": "f4962:c0:m2"}
{"signature": "def init_asset(self, asset):", "body": "self.asset = asset<EOL>", "docstring": "Called before build\n\n:type asset: static_bundle.builders.Asset", "id": "f4962:c0:m1"}
{"signature": "def find_fann():", "body": "<EOL>if sys.platform == \"<STR_LIT:win32>\":<EOL><INDENT>dirs = sys.path<EOL>for ver in dirs:<EOL><INDENT>if os.path.isdir(ver):<EOL><INDENT>if find_x(ver):<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>dirs = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>for path in dirs:<EOL><INDENT>if os.path.isdir(path):<EOL><INDENT>if find_x(path):<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Find doublefann library", "id": "f4967:m2"}
{"signature": "def find_swig():", "body": "for executable in (\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>if find_executable(executable):<EOL><INDENT>return executable<EOL><DEDENT><DEDENT>raise Exception(\"<STR_LIT>\")<EOL>", "docstring": "Find SWIG executable path", "id": "f4967:m3"}
{"signature": "def find_executable(executable, path=None):", "body": "if path is None:<EOL><INDENT>path = os.environ['<STR_LIT>']<EOL><DEDENT>paths = path.split(os.pathsep)<EOL>extlist = ['<STR_LIT>']<EOL>if os.name == '<STR_LIT>':<EOL><INDENT>ext = os.path.splitext(executable)<EOL>if not ext:<EOL><INDENT>executable = executable + \"<STR_LIT>\"<EOL><DEDENT><DEDENT>elif sys.platform == '<STR_LIT:win32>':<EOL><INDENT>pathext = os.environ['<STR_LIT>'].lower().split(os.pathsep)<EOL>ext = os.path.splitext(executable)<EOL>if ext not in pathext:<EOL><INDENT>extlist = pathext<EOL><DEDENT><DEDENT>for ext in extlist:<EOL><INDENT>execname = executable + ext<EOL>if os.path.isfile(execname):<EOL><INDENT>return execname<EOL><DEDENT>else:<EOL><INDENT>for pth in paths:<EOL><INDENT>fil = os.path.join(pth, execname)<EOL>if os.path.isfile(fil):<EOL><INDENT>return fil<EOL><DEDENT><DEDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Try to find 'executable' in the directories listed in 'path' (a\n    string listing directories separated by 'os.pathsep'; defaults to\n    os.environ['PATH']).", "id": "f4967:m0"}
{"signature": "def find_x(path1):", "body": "libs = os.listdir(path1)<EOL>for lib_dir in libs:<EOL><INDENT>if \"<STR_LIT>\" in lib_dir:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>", "docstring": "Return true if substring is in string for files\n    in specified path", "id": "f4967:m1"}
{"signature": "def build_swig():", "body": "print(\"<STR_LIT>\")<EOL>find_fann()<EOL>print(\"<STR_LIT>\")<EOL>swig_bin = find_swig()<EOL>swig_cmd = [swig_bin, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>subprocess.Popen(swig_cmd).wait()<EOL>", "docstring": "Run SWIG with specified parameters", "id": "f4967:m4"}
{"signature": "def color_from_hls(hue, light, sat):", "body": "if light > <NUM_LIT>: <EOL><INDENT>return <NUM_LIT><EOL><DEDENT>elif light < <NUM_LIT>: <EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>hue = (-hue + <NUM_LIT:1> + <NUM_LIT>/<NUM_LIT>) % <NUM_LIT:1> <EOL>return int(floor(hue * <NUM_LIT>))<EOL><DEDENT>", "docstring": "Takes a hls color and converts to proper hue \n        Bulbs use a BGR order instead of RGB", "id": "f4970:m0"}
{"signature": "def brightness(level=<NUM_LIT:100>, group=<NUM_LIT:0>):", "body": "if level not in range(<NUM_LIT:0>,<NUM_LIT>):<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>b = int(floor(level / <NUM_LIT>) + <NUM_LIT:2>) <EOL>return (COMMANDS['<STR_LIT>'][group], Command(<NUM_LIT>, b))<EOL>", "docstring": "Assumes level is out of 100", "id": "f4971:m2"}
{"signature": "def list_ingest_points(self):", "body": "return self.protocol.execute('<STR_LIT>')<EOL>", "docstring": "Lists the currently available Ingest Points.\n\n:link: http://docs.evostream.com/ems_api_definition/listingestpoints", "id": "f4976:c0:m30"}
{"signature": "@expected('<STR_LIT>')<EOL><INDENT>def get_group_name_by_alias(self, aliasName):<DEDENT>", "body": "return self.protocol.execute('<STR_LIT>',<EOL>aliasName=aliasName)<EOL>", "docstring": "Returns the group name given the alias name.\n\n:param aliasName: The group name alias\n:type aliasName: str\n\n:link: http://docs.evostream.com/ems_api_definition/getgroupnamebyalias", "id": "f4976:c0:m24"}
{"signature": "@expected('<STR_LIT:id>', '<STR_LIT>', '<STR_LIT>')<EOL><INDENT>def remove_config(self, **kwargs):<DEDENT>", "body": "return self.protocol.execute('<STR_LIT>', **kwargs)<EOL>", "docstring": "This command will both stop the stream and remove the corresponding\nconfiguration entry. This command is the same as performing\nshutdownStream permanently=1.\n\n:param id: The configId of the configuration that needs to be removed.\n    ConfigId's can be obtained from the listConfig interface.\n    Removing an inbound stream will also automatically remove all\n    associated outbound streams.\n:type id: int\n\n:param groupName: The name of the group that needs to be removed\n    (applicable to HLS, HDS and external processes). Mandatory only if\n    the id parameter is not specified.\n:type groupName: str\n\n:param removeHlsHdsFiles: If 1 (true) and the stream is HLS or HDS, the\n    folder associated with it will be removed\n:type removeHlsHdsFiles: int\n\n:link: http://docs.evostream.com/ems_api_definition/removeconfig", "id": "f4976:c0:m15"}
{"signature": "@expected('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL><INDENT>def pull_stream(self, uri, **kwargs):<DEDENT>", "body": "return self.protocol.execute('<STR_LIT>', uri=uri, **kwargs)<EOL>", "docstring": "This will try to pull in a stream from an external source. Once a\nstream has been successfully pulled it is assigned a 'local stream\nname' which can be used to access the stream from the EMS.\n\n:param uri: The URI of the external stream. Can be RTMP, RTSP or\n    unicast/multicast (d) mpegts\n:type uri: str\n\n:param keepAlive: If keepAlive is set to 1, the server will attempt to\n    reestablish connection with a stream source after a connection has\n    been lost. The reconnect will be attempted once every second\n    (default: 1 true)\n:type keepAlive: int\n\n:param localStreamName: If provided, the stream will be given this\n    name. Otherwise, a fallback techniques used to determine the stream\n    name (based on the URI)\n:type localStreamName: str\n\n:param forceTcp: If 1 and if the stream is RTSP, a TCP connection will\n    be forced. Otherwise the transport mechanism will be negotiated\n    (UDP or TCP) (default: 1 true)\n:type forceTcp: int\n\n:param tcUrl: When specified, this value will be used to set the TC URL\n    in the initial RTMP connect invoke\n:type tcUrl: str\n\n:param pageUrl: When specified, this value will be used to set the\n    originating web page address in the initial RTMP connect invoke\n:type pageUrl: str\n\n:param swfUrl: When specified, this value will be used to set the\n    originating swf URL in the initial RTMP connect invoke\n:type swfUrl: str\n\n:param rangeStart: For RTSP and RTMP connections. A value from which\n    the playback should start expressed in seconds. There are 2 special\n    values: -2 and -1. For more information, please read about\n    start/len parameters here:\n    http://livedocs.adobe.com/flashmediaserver/3.0/hpdocs/help.html?content=00000185.html\n:type rangeStart: int\n\n:param rangeEnd: The length in seconds for the playback. -1 is a\n    special value. For more information, please read about start/len\n    parameters here:\n    http://livedocs.adobe.com/flashmediaserver/3.0/hpdocs/help.html?content=00000185.html\n:type rangeEnd: int\n\n:param ttl: Sets the IP_TTL (time to live) option on the socket\n:type ttl: int\n\n:param tos: Sets the IP_TOS (Type of Service) option on the socket\n:type tos: int\n\n:param rtcpDetectionInterval: How much time (in seconds) should the\n    server wait for RTCP packets before declaring the RTSP stream as a\n    RTCP-less stream\n:type rtcpDetectionInterval: int\n\n:param emulateUserAgent: When specified, this value will be used as the\n    user agent string. It is meaningful only for RTMP\n:type emulateUserAgent: str\n\n:param isAudio: If 1 and if the stream is RTP, it indicates that the\n    currently pulled stream is an audio source. Otherwise the pulled\n    source is assumed as a video source\n:type isAudio: int\n\n:param audioCodecBytes: The audio codec setup of this RTP stream if it\n    is audio. Represented as hex format without '0x' or 'h'. For\n    example: audioCodecBytes=1190\n:type audioCodecBytes: str\n\n:param spsBytes: The video SPS bytes of this RTP stream if it is video.\n    It should be base 64 encoded.\n:type spsBytes: str\n\n:param ppsBytes: The video PPS bytes of this RTP stream if it is video.\n    It should be base 64 encoded\n:type ppsBytes: str\n\n:param ssmIp: The source IP from source-specific-multicast. Only usable\n    when doing UDP based pull\n:type ssmIp: str\n\n:param httpProxy: This parameter has two valid values: IP:Port - This\n    value combination specifies an RTSP HTTP Proxy from which the RTSP\n    stream should be pulled from Self - Specifying \"self\" as the value\n    implies pulling RTSP over HTTP\n:type httpProxy: str\n\n:link: http://docs.evostream.com/ems_api_definition/pullstream", "id": "f4976:c0:m1"}
{"signature": "@expected('<STR_LIT:id>', '<STR_LIT>', '<STR_LIT>')<EOL><INDENT>def shutdown_stream(self, **kwargs):<DEDENT>", "body": "return self.protocol.execute('<STR_LIT>', **kwargs)<EOL>", "docstring": "Terminates a specific stream. When permanently=1 is used, this command\nis analogous to removeConfig.\n\n:param id: The uniqueId of the stream that needs to be terminated. The\n    stream ID's can be obtained using the listStreams command. This\n    parameter is not mandatory but either this or localStreamName\n    should be present to identify the particular stream\n:type id: int\n\n:param localStreamName: The name of the inbound stream which you wish\n    to terminate. This will also terminate any outbound streams that\n    are dependent upon this input stream. This parameter is not\n    mandatory but either this or the id should be present to identify\n    the particular stream\n:type localStreamName: str\n\n:param permanently: If true, the corresponding push/pull configuration\n    will also be terminated. Therefore, the stream will NOT be\n    reconnected when the server restarts\n:type permanently: int\n\n:link: http://docs.evostream.com/ems_api_definition/shutdownstream", "id": "f4976:c0:m13"}
{"signature": "@expected('<STR_LIT>', '<STR_LIT>')<EOL><INDENT>def create_ingest_point(self, privateStreamName, publicStreamName):<DEDENT>", "body": "return self.protocol.execute('<STR_LIT>',<EOL>privateStreamName=privateStreamName,<EOL>publicStreamName=publicStreamName)<EOL>", "docstring": "Creates an RTMP ingest point, which mandates that streams pushed into\nthe EMS have a target stream name which matches one Ingest Point\nprivateStreamName.\n\n:param privateStreamName: The name that RTMP Target Stream Names must\n    match.\n:type privateStreamName: str\n\n:param publicStreamName: The name that is used to access the stream\n    pushed to the privateStreamName. The publicStreamName becomes the\n    streams localStreamName.\n:type publicStreamName: str\n\n:link: http://docs.evostream.com/ems_api_definition/createingestpoint", "id": "f4976:c0:m28"}
{"signature": "@expected('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL><INDENT>def add_stream_alias(self, localStreamName, aliasName, **kwargs):<DEDENT>", "body": "return self.protocol.execute('<STR_LIT>',<EOL>localStreamName=localStreamName,<EOL>aliasName=aliasName, **kwargs)<EOL>", "docstring": "Allows you to create secondary name(s) for internal streams. Once an\nalias is created the localstreamname cannot be used to request\nplayback of that stream. Once an alias is used (requested by a client)\nthe alias is removed. Aliases are designed to be used to protect/hide\nyour source streams.\n\n:param localStreamName: The original stream name\n:type localStreamName: str\n\n:param aliasName: The alias alternative to the localStreamName\n:type aliasName: str\n\n:param expirePeriod: The expiration period for this alias. Negative\n    values will be treated as one-shot but no longer than the absolute\n    positive value in seconds, 0 means it will not expire, positive\n    values mean the alias can be used multiple times but expires after\n    this many seconds. The default is -600 (one-shot, 10 mins)\n:type expirePeriod: int\n\n:link: http://docs.evostream.com/ems_api_definition/addstreamalias", "id": "f4976:c0:m18"}
{"signature": "@expected('<STR_LIT>', '<STR_LIT>')<EOL><INDENT>def add_group_name_alias(self, groupName, aliasName):<DEDENT>", "body": "return self.protocol.execute('<STR_LIT>', groupName=groupName,<EOL>aliasName=aliasName)<EOL>", "docstring": "Creates secondary name(s) for group names. Once an alias is created the\ngroup name cannot be used to request HTTP playback of that stream. Once\nan alias is used (requested by a client) the alias is removed. Aliases\nare designed to be used to protect/hide your source streams.\n\n:param groupName: The original group name\n:type groupName: str\n\n:param aliasName: The alias alternative to the group name\n:type aliasName: str\n\n:link: http://docs.evostream.com/ems_api_definition/addgroupnamealias", "id": "f4976:c0:m22"}
{"signature": "@expected('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>')<EOL><INDENT>def push_stream(self, uri, **kwargs):<DEDENT>", "body": "return self.protocol.execute('<STR_LIT>', uri=uri, **kwargs)<EOL>", "docstring": "Try to push a local stream to an external destination. The pushed\nstream can only use the RTMP, RTSP or MPEG-TS unicast/multicast\nprotocol.\n\n:param uri: The URI of the external stream. Can be RTMP, RTSP or\n    unicast/multicast (d) mpegts\n:type uri: str\n\n:param keepAlive: If keepAlive is set to 1, the server will attempt to\n    reestablish connection with a stream source after a connection has\n    been lost. The reconnect will be attempted once every second\n    (default: 1 true)\n:type keepAlive: int\n\n:param localStreamName: If provided, the stream will be given this\n    name. Otherwise, a fallback techniques used to determine the stream\n    name (based on the URI)\n:type localStreamName: str\n\n:param targetStreamName: The name of the stream at destination. If not\n    provided, the target stream name willbe the same as the local\n    stream name\n:type targetStreamName: str\n\n:param targetStreamType: It can be one of following: **live**,\n    **record**, **append**. It is meaningful only for RTMP\n:type targetStreamType: str\n\n:param tcUrl: When specified, this value will be used to set the TC\n    URL in the initial RTMP connect invoke\n:type tcUrl: str\n\n:param pageUrl: When specified, this value will be used to set the\n    originating web page address in the initial RTMP connect invoke\n:type pageUrl: str\n\n:param swfUrl: When specified, this value will be used to set the\n    originating swf URL in the initial RTMP connect invoke\n:type swfUrl: str\n\n:param ttl: Sets the IP_TTL (time to live) option on the socket\n:type ttl: int\n\n:param tos: Sets the IP_TOS (Type of Service) option on the socket\n:type tos: int\n\n:param emulateUserAgent: When specified, this value will be used as the\n    user agent string. It is meaningful only for RTMP\n:type emulateUserAgent: str\n\n:param rtmpAbsoluteTimestamps: Forces the timestamps to be absolute\n    when using RTMP.\n:type rtmpAbsoluteTimestamps: int\n\n:param sendChunkSizeRequest: Sets whether the RTMP stream will or will\n    not send a \"Set Chunk Length\" message. This is significant when\n    pushing to Akamai's new RTMP HD ingest point where this parameter\n    should be set to 0 so that Akamai will not drop the connection.\n:type sendChunkSizeRequest: int\n\n:param useSourcePts: When value is true, timestamps on source inbound\n    RTMP stream are passed directly to the outbound (pushed) RTMP\n    streams. This affects only pushed Outbound Net RTMP with net RTMP\n    source. This parameter overrides the value of the config.lua\n    option of the same name.\n:type useSourcePts: int\n\n:link: http://docs.evostream.com/ems_api_definition/pushstream", "id": "f4976:c0:m2"}
{"signature": "def list_streams_ids(self):", "body": "return self.protocol.execute('<STR_LIT>')<EOL>", "docstring": "Get a list of IDs for every active stream.\n\n:link: http://docs.evostream.com/ems_api_definition/liststreamsids", "id": "f4976:c0:m9"}
{"signature": "def py_hash(key, num_buckets):", "body": "b, j = -<NUM_LIT:1>, <NUM_LIT:0><EOL>if num_buckets < <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>while j < num_buckets:<EOL><INDENT>b = int(j)<EOL>key = ((key * long(<NUM_LIT>)) + <NUM_LIT:1>) & <NUM_LIT><EOL>j = float(b + <NUM_LIT:1>) * (float(<NUM_LIT:1> << <NUM_LIT>) / float((key >> <NUM_LIT>) + <NUM_LIT:1>))<EOL><DEDENT>return int(b)<EOL>", "docstring": "Generate a number in the range [0, num_buckets).\n\n    Args:\n        key (int): The key to hash.\n        num_buckets (int): Number of buckets to use.\n\n    Returns:\n        The bucket number `key` computes to.\n\n    Raises:\n        ValueError: If `num_buckets` is not a positive number.", "id": "f4981:m0"}
{"signature": "def sqnxt23v5_w2(**kwargs):", "body": "return get_squeezenext(version=\"<STR_LIT>\", width_scale=<NUM_LIT>, model_name=\"<STR_LIT>\", **kwargs)<EOL>", "docstring": "0.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\nParameters:\n----------\npretrained : bool, default False\n    Whether to load the pretrained weights for model.\nroot : str, default '~/.torch/models'\n    Location for keeping the model parameters.", "id": "f4989:m6"}
{"signature": "def sqnxt23v5_w1(**kwargs):", "body": "return get_squeezenext(version=\"<STR_LIT>\", width_scale=<NUM_LIT:1.0>, model_name=\"<STR_LIT>\", **kwargs)<EOL>", "docstring": "1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.\n\nParameters:\n----------\npretrained : bool, default False\n    Whether to load the pretrained weights for model.\nroot : str, default '~/.torch/models'\n    Location for keeping the model parameters.", "id": "f4989:m4"}
{"signature": "def se_resnet34(num_classes):", "body": "model = ResNet(SEBasicBlock, [<NUM_LIT:3>, <NUM_LIT:4>, <NUM_LIT:6>, <NUM_LIT:3>], num_classes=num_classes)<EOL>model.avgpool = nn.AdaptiveAvgPool2d(<NUM_LIT:1>)<EOL>return model<EOL>", "docstring": "Constructs a ResNet-34 model.\n\n    Args:\n        pretrained (bool): If True, returns a model pre-trained on ImageNet", "id": "f4994:m2"}
{"signature": "def se_resnet152(num_classes):", "body": "model = ResNet(SEBottleneck, [<NUM_LIT:3>, <NUM_LIT:8>, <NUM_LIT>, <NUM_LIT:3>], num_classes=num_classes)<EOL>model.avgpool = nn.AdaptiveAvgPool2d(<NUM_LIT:1>)<EOL>return model<EOL>", "docstring": "Constructs a ResNet-152 model.\n\n    Args:\n        pretrained (bool): If True, returns a model pre-trained on ImageNet", "id": "f4994:m5"}
{"signature": "def se_resnet50(num_classes):", "body": "model = ResNet(SEBottleneck, [<NUM_LIT:3>, <NUM_LIT:4>, <NUM_LIT:6>, <NUM_LIT:3>], num_classes=num_classes)<EOL>model.avgpool = nn.AdaptiveAvgPool2d(<NUM_LIT:1>)<EOL>return model<EOL>", "docstring": "Constructs a ResNet-50 model.\n\n    Args:\n        pretrained (bool): If True, returns a model pre-trained on ImageNet", "id": "f4994:m3"}
{"signature": "def preres_conv3x3(in_channels,<EOL>out_channels,<EOL>stride):", "body": "return PreResConv(<EOL>in_channels=in_channels,<EOL>out_channels=out_channels,<EOL>kernel_size=<NUM_LIT:3>,<EOL>stride=stride,<EOL>padding=<NUM_LIT:1>)<EOL>", "docstring": "3x3 version of the PreResNet specific convolution block.\n\nParameters:\n----------\nin_channels : int\n    Number of input channels.\nout_channels : int\n    Number of output channels.\nstride : int or tuple/list of 2 int\n    Strides of the convolution.\nbn_use_global_stats : bool\n    Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.", "id": "f4996:m2"}
{"signature": "def preresnet10(**kwargs):", "body": "return get_preresnet(blocks=<NUM_LIT:10>, model_name=\"<STR_LIT>\", **kwargs)<EOL>", "docstring": "PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.\nIt's an experimental model.\n\nParameters:\n----------\npretrained : bool, default False\n    Whether to load the pretrained weights for model.\nroot : str, default '~/.torch/models'\n    Location for keeping the model parameters.", "id": "f4996:m4"}
{"signature": "def preresnet18(**kwargs):", "body": "return get_preresnet(blocks=<NUM_LIT>, model_name=\"<STR_LIT>\", **kwargs)<EOL>", "docstring": "PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.\n\nParameters:\n----------\npretrained : bool, default False\n    Whether to load the pretrained weights for model.\nroot : str, default '~/.torch/models'\n    Location for keeping the model parameters.", "id": "f4996:m11"}
{"signature": "def preresnet50b(**kwargs):", "body": "return get_preresnet(blocks=<NUM_LIT:50>, conv1_stride=False, model_name=\"<STR_LIT>\", **kwargs)<EOL>", "docstring": "PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep\nResidual Networks,' https://arxiv.org/abs/1603.05027.\n\nParameters:\n----------\npretrained : bool, default False\n    Whether to load the pretrained weights for model.\nroot : str, default '~/.torch/models'\n    Location for keeping the model parameters.", "id": "f4996:m14"}
{"signature": "def conv1x1(in_channels,<EOL>out_channels,<EOL>stride):", "body": "return nn.Conv2d(<EOL>in_channels=in_channels,<EOL>out_channels=out_channels,<EOL>kernel_size=<NUM_LIT:1>,<EOL>stride=stride,<EOL>padding=<NUM_LIT:0>,<EOL>bias=False)<EOL>", "docstring": "Convolution 1x1 layer.\n\nParameters:\n----------\nin_channels : int\n    Number of input channels.\nout_channels : int\n    Number of output channels.\nstride : int or tuple/list of 2 int\n    Strides of the convolution.", "id": "f4996:m0"}
{"signature": "def preresnet101(**kwargs):", "body": "return get_preresnet(blocks=<NUM_LIT>, model_name=\"<STR_LIT>\", **kwargs)<EOL>", "docstring": "PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.\n\nParameters:\n----------\npretrained : bool, default False\n    Whether to load the pretrained weights for model.\nroot : str, default '~/.torch/models'\n    Location for keeping the model parameters.", "id": "f4996:m15"}
{"signature": "def preres_conv1x1(in_channels,<EOL>out_channels,<EOL>stride):", "body": "return PreResConv(<EOL>in_channels=in_channels,<EOL>out_channels=out_channels,<EOL>kernel_size=<NUM_LIT:1>,<EOL>stride=stride,<EOL>padding=<NUM_LIT:0>)<EOL>", "docstring": "1x1 version of the PreResNet specific convolution block.\n\nParameters:\n----------\nin_channels : int\n    Number of input channels.\nout_channels : int\n    Number of output channels.\nstride : int or tuple/list of 2 int\n    Strides of the convolution.", "id": "f4996:m1"}
{"signature": "def preresnet101b(**kwargs):", "body": "return get_preresnet(blocks=<NUM_LIT>, conv1_stride=False, model_name=\"<STR_LIT>\", **kwargs)<EOL>", "docstring": "PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep\nResidual Networks,' https://arxiv.org/abs/1603.05027.\n\nParameters:\n----------\npretrained : bool, default False\n    Whether to load the pretrained weights for model.\nroot : str, default '~/.torch/models'\n    Location for keeping the model parameters.", "id": "f4996:m16"}
{"signature": "def preresnet16(**kwargs):", "body": "return get_preresnet(blocks=<NUM_LIT:16>, model_name=\"<STR_LIT>\", **kwargs)<EOL>", "docstring": "PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.\nIt's an experimental model.\n\nParameters:\n----------\npretrained : bool, default False\n    Whether to load the pretrained weights for model.\nroot : str, default '~/.torch/models'\n    Location for keeping the model parameters.", "id": "f4996:m7"}
{"signature": "def convert_dropout(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:6>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>dropout = keras.layers.Dropout(rate=params['<STR_LIT>'], name=tf_name)<EOL>layers[scope_name] = dropout(layers[inputs[<NUM_LIT:0>]])<EOL>", "docstring": "Convert dropout.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5025:m2"}
{"signature": "def convert_lrelu(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:3>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>leakyrelu =keras.layers.LeakyReLU(alpha=params['<STR_LIT>'], name=tf_name)<EOL>layers[scope_name] = leakyrelu(layers[inputs[<NUM_LIT:0>]])<EOL>", "docstring": "Convert leaky relu layer.\n\nArgs:\n     params: dictionary with layer parameters\n     w_name: name prefix in state_dict\n     scope_name: pytorch scope name\n     inputs: pytorch node inputs\n     layers: dictionary with keras tensors\n     weights: pytorch state_dict\n     names: use short names for keras layers", "id": "f5027:m1"}
{"signature": "def convert_softmax(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:4>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>def target_layer(x, dim=params['<STR_LIT>']):<EOL><INDENT>import keras<EOL>return keras.activations.softmax(x, axis=dim)<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer)<EOL>layers[scope_name] = lambda_layer(layers[inputs[<NUM_LIT:0>]])<EOL>", "docstring": "Convert softmax layer.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5027:m3"}
{"signature": "def convert_relu(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:4>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>relu = keras.layers.Activation('<STR_LIT:relu>', name=tf_name)<EOL>layers[scope_name] = relu(layers[inputs[<NUM_LIT:0>]])<EOL>", "docstring": "Convert relu layer.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5027:m0"}
{"signature": "def convert_padding(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>if params['<STR_LIT:value>'] != <NUM_LIT:0.0>:<EOL><INDENT>raise AssertionError('<STR_LIT>')<EOL><DEDENT>if names:<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:4>)<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>padding_name = tf_name<EOL>padding_layer = keras.layers.ZeroPadding2D(<EOL>padding=((params['<STR_LIT>'][<NUM_LIT:2>], params['<STR_LIT>'][<NUM_LIT:6>]), (params['<STR_LIT>'][<NUM_LIT:3>], params['<STR_LIT>'][<NUM_LIT:7>])),<EOL>name=padding_name<EOL>)<EOL>layers[scope_name] = padding_layer(layers[inputs[<NUM_LIT:0>]])<EOL><DEDENT>elif params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>def target_layer(x, pads=params['<STR_LIT>']):<EOL><INDENT>layer = tf.pad(x, [[<NUM_LIT:0>, <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:0>], [pads[<NUM_LIT:2>], pads[<NUM_LIT:6>]], [pads[<NUM_LIT:3>], pads[<NUM_LIT:7>]]], '<STR_LIT>')<EOL>return layer<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer)<EOL>layers[scope_name] = lambda_layer(layers[inputs[<NUM_LIT:0>]])<EOL><DEDENT>", "docstring": "Convert padding layer.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5033:m0"}
{"signature": "def convert_constant(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>params_list = params['<STR_LIT:value>'].numpy()<EOL>def target_layer(x, value=params_list):<EOL><INDENT>return tf.constant(value.tolist(), shape=value.shape)<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer)<EOL>layers[scope_name + '<STR_LIT>'] = params_list  <EOL>layers[scope_name] = lambda_layer(layers[list(layers.keys())[<NUM_LIT:0>]])<EOL>", "docstring": "Convert constant layer.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5034:m0"}
{"signature": "def convert_convtranspose(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT:C>' + random_string(<NUM_LIT:7>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>bias_name = '<STR_LIT>'.format(w_name)<EOL>weights_name = '<STR_LIT>'.format(w_name)<EOL>if len(weights[weights_name].numpy().shape) == <NUM_LIT:4>:<EOL><INDENT>W = weights[weights_name].numpy().transpose(<NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:0>)<EOL>height, width, n_filters, channels = W.shape<EOL>n_groups = params['<STR_LIT>']<EOL>if n_groups > <NUM_LIT:1>:<EOL><INDENT>raise AssertionError('<STR_LIT>')<EOL><DEDENT>if params['<STR_LIT>'][<NUM_LIT:0>] > <NUM_LIT:1>:<EOL><INDENT>raise AssertionError('<STR_LIT>')<EOL><DEDENT>if bias_name in weights:<EOL><INDENT>biases = weights[bias_name].numpy()<EOL>has_bias = True<EOL><DEDENT>else:<EOL><INDENT>biases = None<EOL>has_bias = False<EOL><DEDENT>input_name = inputs[<NUM_LIT:0>]<EOL>if has_bias:<EOL><INDENT>weights = [W, biases]<EOL><DEDENT>else:<EOL><INDENT>weights = [W]<EOL><DEDENT>conv = keras.layers.Conv2DTranspose(<EOL>filters=n_filters,<EOL>kernel_size=(height, width),<EOL>strides=(params['<STR_LIT>'][<NUM_LIT:0>], params['<STR_LIT>'][<NUM_LIT:1>]),<EOL>padding='<STR_LIT>',<EOL>output_padding=<NUM_LIT:0>,<EOL>weights=weights,<EOL>use_bias=has_bias,<EOL>activation=None,<EOL>dilation_rate=params['<STR_LIT>'][<NUM_LIT:0>],<EOL>bias_initializer='<STR_LIT>', kernel_initializer='<STR_LIT>',<EOL>name=tf_name<EOL>)<EOL>layers[scope_name] = conv(layers[input_name])<EOL>layers[scope_name].set_shape(layers[scope_name]._keras_shape)<EOL>pads = params['<STR_LIT>']<EOL>if pads[<NUM_LIT:0>] > <NUM_LIT:0>:<EOL><INDENT>assert(len(pads) == <NUM_LIT:2> or (pads[<NUM_LIT:2>] == pads[<NUM_LIT:0>] and pads[<NUM_LIT:3>] == pads[<NUM_LIT:1>]))<EOL>crop = keras.layers.Cropping2D(<EOL>pads[:<NUM_LIT:2>],<EOL>name=tf_name + '<STR_LIT>'<EOL>)<EOL>layers[scope_name] = crop(layers[scope_name])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise AssertionError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Convert transposed convolution layer.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5035:m1"}
{"signature": "def convert_conv(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT:C>' + random_string(<NUM_LIT:7>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>bias_name = '<STR_LIT>'.format(w_name)<EOL>weights_name = '<STR_LIT>'.format(w_name)<EOL>input_name = inputs[<NUM_LIT:0>]<EOL>if len(weights[weights_name].numpy().shape) == <NUM_LIT:5>:  <EOL><INDENT>W = weights[weights_name].numpy().transpose(<NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:4>, <NUM_LIT:1>, <NUM_LIT:0>)<EOL>height, width, channels, n_layers, n_filters = W.shape<EOL>if bias_name in weights:<EOL><INDENT>biases = weights[bias_name].numpy()<EOL>has_bias = True<EOL><DEDENT>else:<EOL><INDENT>biases = None<EOL>has_bias = False<EOL><DEDENT>if params['<STR_LIT>'][<NUM_LIT:0>] > <NUM_LIT:0> or params['<STR_LIT>'][<NUM_LIT:1>] > <NUM_LIT:0>:<EOL><INDENT>padding_name = tf_name + '<STR_LIT>'<EOL>padding_layer = keras.layers.ZeroPadding3D(<EOL>padding=(params['<STR_LIT>'][<NUM_LIT:0>],<EOL>params['<STR_LIT>'][<NUM_LIT:1>],<EOL>params['<STR_LIT>'][<NUM_LIT:2>]),<EOL>name=padding_name<EOL>)<EOL>layers[padding_name] = padding_layer(layers[input_name])<EOL>input_name = padding_name<EOL><DEDENT>if has_bias:<EOL><INDENT>weights = [W, biases]<EOL><DEDENT>else:<EOL><INDENT>weights = [W]<EOL><DEDENT>conv = keras.layers.Conv3D(<EOL>filters=n_filters,<EOL>kernel_size=(channels, height, width),<EOL>strides=(params['<STR_LIT>'][<NUM_LIT:0>],<EOL>params['<STR_LIT>'][<NUM_LIT:1>],<EOL>params['<STR_LIT>'][<NUM_LIT:2>]),<EOL>padding='<STR_LIT>',<EOL>weights=weights,<EOL>use_bias=has_bias,<EOL>activation=None,<EOL>dilation_rate=params['<STR_LIT>'][<NUM_LIT:0>],<EOL>bias_initializer='<STR_LIT>', kernel_initializer='<STR_LIT>',<EOL>name=tf_name<EOL>)<EOL>layers[scope_name] = conv(layers[input_name])<EOL><DEDENT>elif len(weights[weights_name].numpy().shape) == <NUM_LIT:4>:  <EOL><INDENT>if params['<STR_LIT>'][<NUM_LIT:0>] > <NUM_LIT:0> or params['<STR_LIT>'][<NUM_LIT:1>] > <NUM_LIT:0>:<EOL><INDENT>padding_name = tf_name + '<STR_LIT>'<EOL>padding_layer = keras.layers.ZeroPadding2D(<EOL>padding=(params['<STR_LIT>'][<NUM_LIT:0>], params['<STR_LIT>'][<NUM_LIT:1>]),<EOL>name=padding_name<EOL>)<EOL>layers[padding_name] = padding_layer(layers[input_name])<EOL>input_name = padding_name<EOL><DEDENT>W = weights[weights_name].numpy().transpose(<NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:0>)<EOL>height, width, channels_per_group, out_channels = W.shape<EOL>n_groups = params['<STR_LIT>']<EOL>in_channels = channels_per_group * n_groups<EOL>if n_groups == in_channels and n_groups != <NUM_LIT:1>:<EOL><INDENT>if bias_name in weights:<EOL><INDENT>biases = weights[bias_name].numpy()<EOL>has_bias = True<EOL><DEDENT>else:<EOL><INDENT>biases = None<EOL>has_bias = False<EOL><DEDENT>W = W.transpose(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:3>, <NUM_LIT:2>)<EOL>if has_bias:<EOL><INDENT>weights = [W, biases]<EOL><DEDENT>else:<EOL><INDENT>weights = [W]<EOL><DEDENT>conv = keras.layers.DepthwiseConv2D(<EOL>kernel_size=(height, width),<EOL>strides=(params['<STR_LIT>'][<NUM_LIT:0>], params['<STR_LIT>'][<NUM_LIT:1>]),<EOL>padding='<STR_LIT>',<EOL>use_bias=has_bias,<EOL>activation=None,<EOL>depth_multiplier=<NUM_LIT:1>,<EOL>weights = weights,<EOL>dilation_rate=params['<STR_LIT>'][<NUM_LIT:0>],<EOL>bias_initializer='<STR_LIT>', kernel_initializer='<STR_LIT>'<EOL>)<EOL>layers[scope_name] = conv(layers[input_name])<EOL><DEDENT>elif n_groups != <NUM_LIT:1>:<EOL><INDENT>def target_layer(x, groups=params['<STR_LIT>'], stride_y=params['<STR_LIT>'][<NUM_LIT:0>], stride_x=params['<STR_LIT>'][<NUM_LIT:1>]):<EOL><INDENT>x = tf.transpose(x, [<NUM_LIT:0>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1>])<EOL>def convolve_lambda(i, k):<EOL><INDENT>return tf.nn.conv2d(i, k, strides=[<NUM_LIT:1>, stride_y, stride_x, <NUM_LIT:1>], padding='<STR_LIT>')<EOL><DEDENT>input_groups = tf.split(axis=<NUM_LIT:3>, num_or_size_splits=groups, value=x)<EOL>weight_groups = tf.split(axis=<NUM_LIT:3>, num_or_size_splits=groups, value=W.transpose(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>))<EOL>output_groups = [convolve_lambda(i, k) for i, k in zip(input_groups, weight_groups)]<EOL>layer = tf.concat(axis=<NUM_LIT:3>, values=output_groups)<EOL>layer = tf.transpose(layer, [<NUM_LIT:0>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:2>])<EOL>return layer<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer)<EOL>layers[scope_name] = lambda_layer(layers[input_name])<EOL><DEDENT>else:<EOL><INDENT>if bias_name in weights:<EOL><INDENT>biases = weights[bias_name].numpy()<EOL>has_bias = True<EOL><DEDENT>else:<EOL><INDENT>biases = None<EOL>has_bias = False<EOL><DEDENT>if has_bias:<EOL><INDENT>weights = [W, biases]<EOL><DEDENT>else:<EOL><INDENT>weights = [W]<EOL><DEDENT>conv = keras.layers.Conv2D(<EOL>filters=out_channels,<EOL>kernel_size=(height, width),<EOL>strides=(params['<STR_LIT>'][<NUM_LIT:0>], params['<STR_LIT>'][<NUM_LIT:1>]),<EOL>padding='<STR_LIT>',<EOL>weights=weights,<EOL>use_bias=has_bias,<EOL>activation=None,<EOL>dilation_rate=params['<STR_LIT>'][<NUM_LIT:0>],<EOL>bias_initializer='<STR_LIT>', kernel_initializer='<STR_LIT>',<EOL>name=tf_name<EOL>)<EOL>layers[scope_name] = conv(layers[input_name])<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>W = weights[weights_name].numpy().transpose(<NUM_LIT:2>, <NUM_LIT:1>, <NUM_LIT:0>)<EOL>width, channels, n_filters = W.shape<EOL>n_groups = params['<STR_LIT>']<EOL>if n_groups > <NUM_LIT:1>:<EOL><INDENT>raise AssertionError('<STR_LIT>')<EOL><DEDENT>if bias_name in weights:<EOL><INDENT>biases = weights[bias_name].numpy()<EOL>has_bias = True<EOL><DEDENT>else:<EOL><INDENT>biases = None<EOL>has_bias = False<EOL><DEDENT>padding_name = tf_name + '<STR_LIT>'<EOL>padding_layer = keras.layers.ZeroPadding1D(<EOL>padding=params['<STR_LIT>'][<NUM_LIT:0>],<EOL>name=padding_name<EOL>)<EOL>layers[padding_name] = padding_layer(layers[inputs[<NUM_LIT:0>]])<EOL>input_name = padding_name<EOL>if has_bias:<EOL><INDENT>weights = [W, biases]<EOL><DEDENT>else:<EOL><INDENT>weights = [W]<EOL><DEDENT>conv = keras.layers.Conv1D(<EOL>filters=channels,<EOL>kernel_size=width,<EOL>strides=params['<STR_LIT>'],<EOL>padding='<STR_LIT>',<EOL>weights=weights,<EOL>use_bias=has_bias,<EOL>activation=None,<EOL>data_format='<STR_LIT>',<EOL>dilation_rate=params['<STR_LIT>'],<EOL>bias_initializer='<STR_LIT>', kernel_initializer='<STR_LIT>',<EOL>name=tf_name<EOL>)<EOL>layers[scope_name] = conv(layers[input_name])<EOL><DEDENT>", "docstring": "Convert convolution layer.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5035:m0"}
{"signature": "def convert_adaptive_max_pool2d(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:4>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>global_pool = keras.layers.GlobalMaxPooling2D(data_format='<STR_LIT>', name=tf_name)<EOL>layers[scope_name] = global_pool(layers[inputs[<NUM_LIT:0>]])<EOL>def target_layer(x):<EOL><INDENT>import keras<EOL>return keras.backend.expand_dims(x)<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer, name=tf_name + '<STR_LIT:E>')<EOL>layers[scope_name] = lambda_layer(layers[scope_name])  <EOL>layers[scope_name] = lambda_layer(layers[scope_name])<EOL>", "docstring": "Convert convert_adaptive_max_pool2d layer.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5036:m4"}
{"signature": "def convert_adaptive_avg_pool2d(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:4>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>global_pool = keras.layers.GlobalAveragePooling2D(data_format='<STR_LIT>', name=tf_name)<EOL>layers[scope_name] = global_pool(layers[inputs[<NUM_LIT:0>]])<EOL>def target_layer(x):<EOL><INDENT>import keras<EOL>return keras.backend.expand_dims(x)<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer, name=tf_name + '<STR_LIT:E>')<EOL>layers[scope_name] = lambda_layer(layers[scope_name])  <EOL>layers[scope_name] = lambda_layer(layers[scope_name])<EOL>", "docstring": "Convert adaptive_avg_pool2d layer.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5036:m3"}
{"signature": "def convert_maxpool3(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT:P>' + random_string(<NUM_LIT:7>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>if '<STR_LIT>' in params:<EOL><INDENT>height, width, depth = params['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>height, width, depth = params['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in params:<EOL><INDENT>stride_height, stride_width, stride_depth = params['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>stride_height, stride_width, stride_depth = params['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in params:<EOL><INDENT>padding_h, padding_w, padding_d, _, _ = params['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>padding_h, padding_w, padding_d = params['<STR_LIT>']<EOL><DEDENT>input_name = inputs[<NUM_LIT:0>]<EOL>if padding_h > <NUM_LIT:0> and padding_w > <NUM_LIT:0> and padding_d > <NUM_LIT:0>:<EOL><INDENT>padding_name = tf_name + '<STR_LIT>'<EOL>padding_layer = keras.layers.ZeroPadding3D(<EOL>padding=(padding_h, padding_w, padding_d),<EOL>name=padding_name<EOL>)<EOL>layers[padding_name] = padding_layer(layers[inputs[<NUM_LIT:0>]])<EOL>input_name = padding_name<EOL><DEDENT>pooling = keras.layers.MaxPooling3D(<EOL>pool_size=(height, width, depth),<EOL>strides=(stride_height, stride_width, stride_depth),<EOL>padding='<STR_LIT>',<EOL>name=tf_name<EOL>)<EOL>layers[scope_name] = pooling(layers[input_name])<EOL>", "docstring": "Convert 3d Max pooling.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5036:m2"}
{"signature": "def convert_avgpool(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT:P>' + random_string(<NUM_LIT:7>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>if '<STR_LIT>' in params:<EOL><INDENT>height, width = params['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>height, width = params['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in params:<EOL><INDENT>stride_height, stride_width = params['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>stride_height, stride_width = params['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in params:<EOL><INDENT>padding_h, padding_w, _, _ = params['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>padding_h, padding_w = params['<STR_LIT>']<EOL><DEDENT>input_name = inputs[<NUM_LIT:0>]<EOL>pad = '<STR_LIT>' <EOL>if height % <NUM_LIT:2> == <NUM_LIT:1> and width % <NUM_LIT:2> == <NUM_LIT:1> andheight // <NUM_LIT:2> == padding_h and width // <NUM_LIT:2> == padding_w andstride_height == <NUM_LIT:1> and stride_width == <NUM_LIT:1>:<EOL><INDENT>pad = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>padding_name = tf_name + '<STR_LIT>'<EOL>padding_layer = keras.layers.ZeroPadding2D(<EOL>padding=(padding_h, padding_w),<EOL>name=padding_name<EOL>)<EOL>layers[padding_name] = padding_layer(layers[inputs[<NUM_LIT:0>]])<EOL>input_name = padding_name<EOL><DEDENT>pooling = keras.layers.AveragePooling2D(<EOL>pool_size=(height, width),<EOL>strides=(stride_height, stride_width),<EOL>padding=pad,<EOL>name=tf_name,<EOL>data_format='<STR_LIT>'<EOL>)<EOL>layers[scope_name] = pooling(layers[input_name])<EOL>", "docstring": "Convert Average pooling.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5036:m0"}
{"signature": "def convert_shape(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>def target_layer(x):<EOL><INDENT>import tensorflow as tf<EOL>return tf.shape(x)<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer)<EOL>layers[scope_name] = lambda_layer(layers[inputs[<NUM_LIT:0>]])<EOL>", "docstring": "Convert shape operation.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5037:m5"}
{"signature": "def convert_unsqueeze(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:4>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>def target_layer(x):<EOL><INDENT>import keras<EOL>return keras.backend.expand_dims(x)<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer, name=tf_name + '<STR_LIT:E>')<EOL>layers[scope_name] = lambda_layer(layers[inputs[<NUM_LIT:0>]])<EOL>", "docstring": "Convert unsqueeze operation.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5037:m4"}
{"signature": "def convert_flatten(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT:R>' + random_string(<NUM_LIT:7>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>reshape = keras.layers.Reshape([-<NUM_LIT:1>], name=tf_name)<EOL>layers[scope_name] = reshape(layers[inputs[<NUM_LIT:0>]])<EOL>", "docstring": "Convert reshape(view).\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5037:m0"}
{"signature": "def convert_reshape(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:4>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>if len(inputs) > <NUM_LIT:1>:<EOL><INDENT>if layers[inputs[<NUM_LIT:1>]][<NUM_LIT:0>] == -<NUM_LIT:1>:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>reshape = keras.layers.Reshape(layers[inputs[<NUM_LIT:1>] + '<STR_LIT>'], name=tf_name)<EOL>layers[scope_name] = reshape(layers[inputs[<NUM_LIT:0>]])<EOL><DEDENT>else:<EOL><INDENT>if inputs[<NUM_LIT:0>] in layers:<EOL><INDENT>reshape = keras.layers.Reshape(params['<STR_LIT>'][<NUM_LIT:1>:], name=tf_name)<EOL>layers[scope_name] = reshape(layers[inputs[<NUM_LIT:0>]])<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Convert reshape layer.\n\nArgs:\n     params: dictionary with layer parameters\n     w_name: name prefix in state_dict\n     scope_name: pytorch scope name\n     inputs: pytorch node inputs\n     layers: dictionary with keras tensors\n     weights: pytorch state_dict\n     names: use short names for keras layers", "id": "f5037:m2"}
{"signature": "def convert_transpose(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>if params['<STR_LIT>'][<NUM_LIT:0>] != <NUM_LIT:0>:<EOL><INDENT>if inputs[<NUM_LIT:0>] in layers:<EOL><INDENT>print('<STR_LIT>')<EOL>layers[scope_name] = layers[inputs[<NUM_LIT:0>]]<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if names:<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:4>)<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>permute = keras.layers.Permute(params['<STR_LIT>'][<NUM_LIT:1>:], name=tf_name)<EOL>layers[scope_name] = permute(layers[inputs[<NUM_LIT:0>]])<EOL><DEDENT>", "docstring": "Convert transpose layer.\n\nArgs:\n     params: dictionary with layer parameters\n     w_name: name prefix in state_dict\n     scope_name: pytorch scope name\n     inputs: pytorch node inputs\n     layers: dictionary with keras tensors\n     weights: pytorch state_dict\n     names: use short names for keras layers", "id": "f5037:m1"}
{"signature": "def pytorch_to_keras(<EOL>model, args, input_shapes,<EOL>change_ordering=False, training=False, verbose=False, names=False,<EOL>):", "body": "<EOL>if isinstance(args, torch.autograd.Variable):<EOL><INDENT>args = (args, )<EOL><DEDENT>if isinstance(input_shapes, tuple):<EOL><INDENT>input_shapes = [input_shapes]<EOL><DEDENT>orig_state_dict_keys = _unique_state_dict(model).keys()<EOL>with set_training(model, training):<EOL><INDENT>trace, torch_out = torch.jit.get_trace_graph(model, tuple(args))<EOL><DEDENT>if orig_state_dict_keys != _unique_state_dict(model).keys():<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if version.parse('<STR_LIT>') < version.parse(torch.__version__):<EOL><INDENT>trace.set_graph(_optimize_graph(trace.graph(), OperatorExportTypes.ONNX))<EOL><DEDENT>else:<EOL><INDENT>trace.set_graph(_optimize_graph(trace.graph(), False))<EOL><DEDENT>trace.graph().lint()<EOL>if verbose:<EOL><INDENT>print(trace.graph())<EOL><DEDENT>nodes = list(trace.graph().nodes())<EOL>if six.PY3:<EOL><INDENT>from types import SimpleNamespace<EOL>seq_to_find =['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>k = <NUM_LIT:0><EOL>s = <NUM_LIT:0><EOL>for i, node in enumerate(nodes):<EOL><INDENT>if node.kind() == seq_to_find[k]:<EOL><INDENT>if k == <NUM_LIT:0>:<EOL><INDENT>s = i<EOL><DEDENT>k += <NUM_LIT:1><EOL>if k == len(seq_to_find):<EOL><INDENT>reshape_op = nodes[s + k - <NUM_LIT:1>]<EOL>flatten_op = {<EOL>'<STR_LIT>': (lambda: '<STR_LIT>'),<EOL>'<STR_LIT>': (lambda: {}),<EOL>'<STR_LIT>':  (lambda: list(reshape_op.outputs())),<EOL>'<STR_LIT>': (lambda: reshape_op.scopeName()),<EOL>'<STR_LIT>': (lambda: list(reshape_op.inputs())[:<NUM_LIT:1>]),<EOL>'<STR_LIT>': (lambda: reshape_op.__str__()),<EOL>}<EOL>nodes = nodes[:s] + [SimpleNamespace(**flatten_op)] + nodes[s+k:]<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>k = <NUM_LIT:0><EOL>s = -<NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>graph_outputs = [get_leaf_id(n) for n in trace.graph().outputs()]<EOL>graph_inputs = [get_leaf_id(n) for n in trace.graph().inputs()]<EOL>state_dict = _unique_state_dict(model)<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>', graph_inputs)<EOL>print('<STR_LIT>', graph_outputs)<EOL>print('<STR_LIT>', list(state_dict))<EOL><DEDENT>import re<EOL>import keras<EOL>from keras import backend as K<EOL>K.set_image_data_format('<STR_LIT>')<EOL>layers = dict()<EOL>keras_inputs = []<EOL>for i in range(len(args)):<EOL><INDENT>layers[graph_inputs[i]] = keras.layers.InputLayer(<EOL>input_shape=input_shapes[i], name='<STR_LIT>'.format(i)<EOL>).output<EOL>keras_inputs.append(layers[graph_inputs[i]])<EOL><DEDENT>outputs = []<EOL>group_indices = defaultdict(lambda: <NUM_LIT:0>, {})<EOL>for node in nodes:<EOL><INDENT>node_inputs = list(node.inputs())<EOL>node_input_names = []<EOL>for node_input in node_inputs:<EOL><INDENT>node_input_names.append(get_leaf_id(node_input))<EOL><DEDENT>node_type = node.kind()<EOL>node_scope_name = node.scopeName()<EOL>node_id = get_node_id(node)<EOL>node_name_regex = re.findall(r'<STR_LIT>', node_scope_name)<EOL>try: <EOL><INDENT>int(node_name_regex[-<NUM_LIT:1>])<EOL>node_weigth_group_name = '<STR_LIT:.>'.join(<EOL>node_name_regex[:-<NUM_LIT:1>]<EOL>)<EOL>node_weights_name = node_weigth_group_name + '<STR_LIT:.>' + str(group_indices[node_weigth_group_name])<EOL>group_indices[node_weigth_group_name] += <NUM_LIT:1><EOL><DEDENT>except ValueError:<EOL><INDENT>node_weights_name = '<STR_LIT:.>'.join(<EOL>node_name_regex<EOL>)<EOL><DEDENT>except IndexError:<EOL><INDENT>node_weights_name = '<STR_LIT:.>'.join(node_input_names)<EOL><DEDENT>node_attrs = {k: node[k] for k in node.attributeNames()}<EOL>node_outputs = list(node.outputs())<EOL>node_outputs_names = []<EOL>for node_output in node_outputs:<EOL><INDENT>if node_output.node().scopeName():<EOL><INDENT>node_outputs_names.append(node_output.node().scopeName())<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print('<STR_LIT>')<EOL>print('<STR_LIT>', node_scope_name)<EOL>print('<STR_LIT>', node_id)<EOL>print('<STR_LIT>', node_type)<EOL>print('<STR_LIT>', node_input_names)<EOL>print('<STR_LIT>', node_outputs_names)<EOL>print('<STR_LIT>', node_weights_name)<EOL>print('<STR_LIT>', node_attrs)<EOL>print('<STR_LIT>', node_id in graph_outputs)<EOL><DEDENT>AVAILABLE_CONVERTERS[node_type](<EOL>node_attrs,<EOL>node_weights_name, node_id,<EOL>node_input_names,<EOL>layers, state_dict,<EOL>names<EOL>)<EOL>if node_id in graph_outputs:<EOL><INDENT>outputs.append(layers[node_id])<EOL><DEDENT><DEDENT>model = keras.models.Model(inputs=keras_inputs, outputs=outputs)<EOL>if change_ordering:<EOL><INDENT>import numpy as np<EOL>conf = model.get_config()<EOL>for layer in conf['<STR_LIT>']:<EOL><INDENT>if layer['<STR_LIT>'] and '<STR_LIT>' in layer['<STR_LIT>']:<EOL><INDENT>layer['<STR_LIT>']['<STR_LIT>'] =tuple(np.reshape(np.array(<EOL>[<EOL>[None] +<EOL>list(layer['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:2>:][:]) +<EOL>[layer['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:1>]]<EOL>]), -<NUM_LIT:1><EOL>))<EOL><DEDENT>if layer['<STR_LIT>'] and '<STR_LIT>' in layer['<STR_LIT>']:<EOL><INDENT>if len(list(layer['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:1>:][:])) > <NUM_LIT:0>:<EOL><INDENT>layer['<STR_LIT>']['<STR_LIT>'] =tuple(np.reshape(np.array(<EOL>[<EOL>list(layer['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:1>:][:]),<EOL>layer['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]<EOL>]), -<NUM_LIT:1><EOL>),)<EOL><DEDENT><DEDENT>if layer['<STR_LIT>'] and '<STR_LIT>' in layer['<STR_LIT>']:<EOL><INDENT>layer['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if layer['<STR_LIT>'] and '<STR_LIT>' in layer['<STR_LIT>']:<EOL><INDENT>layer['<STR_LIT>']['<STR_LIT>'] = <NUM_LIT:3><EOL><DEDENT><DEDENT>K.set_image_data_format('<STR_LIT>')<EOL>model_tf_ordering = keras.models.Model.from_config(conf)<EOL>for dst_layer, src_layer in zip(<EOL>model_tf_ordering.layers, model.layers<EOL>):<EOL><INDENT>dst_layer.set_weights(src_layer.get_weights())<EOL><DEDENT>model = model_tf_ordering<EOL><DEDENT>print('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return model<EOL>", "docstring": "By given pytorch model convert layers with specified convertors.\n\nArgs:\n    model: pytorch model\n    args: pytorch model arguments\n    input_shapes: keras input shapes (using for each InputLayer)\n    change_ordering: change CHW to HWC\n    training: switch model to training mode\n    verbose: verbose output\n    names: use short names, use random-suffix or keep original names for keras layers\n\nReturns:\n    model: created keras model.", "id": "f5038:m3"}
{"signature": "@contextlib.contextmanager<EOL>def set_training(model, mode):", "body": "if mode is None:<EOL><INDENT>yield<EOL>return<EOL><DEDENT>old_mode = model.training<EOL>if old_mode != mode:<EOL><INDENT>model.train(mode)<EOL><DEDENT>try:<EOL><INDENT>yield<EOL><DEDENT>finally:<EOL><INDENT>if old_mode != mode:<EOL><INDENT>model.train(old_mode)<EOL><DEDENT><DEDENT>", "docstring": "A context manager to temporarily set the training mode of 'model'\nto 'mode', resetting it when we exit the with-block.  A no-op if\nmode is None.", "id": "f5038:m0"}
{"signature": "def convert_concat(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>concat_nodes = [layers[i] for i in inputs]<EOL>if len(concat_nodes) == <NUM_LIT:1>:<EOL><INDENT>layers[scope_name] = concat_nodes[<NUM_LIT:0>]<EOL>return<EOL><DEDENT>if names == '<STR_LIT>':<EOL><INDENT>tf_name = '<STR_LIT>' + random_string(<NUM_LIT:5>)<EOL><DEDENT>elif names == '<STR_LIT>':<EOL><INDENT>tf_name = w_name<EOL><DEDENT>else:<EOL><INDENT>tf_name = w_name + str(random.random())<EOL><DEDENT>cat = keras.layers.Concatenate(name=tf_name, axis=params['<STR_LIT>'])<EOL>layers[scope_name] = cat(concat_nodes)<EOL>", "docstring": "Convert concatenation.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5039:m2"}
{"signature": "def convert_reduce_sum(params, w_name, scope_name, inputs, layers, weights, names):", "body": "print('<STR_LIT>')<EOL>keepdims = params['<STR_LIT>'] > <NUM_LIT:0><EOL>axis = params['<STR_LIT>']<EOL>def target_layer(x, keepdims=keepdims, axis=axis):<EOL><INDENT>import keras.backend as K<EOL>return K.sum(x, keepdims=keepdims, axis=axis)<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer)<EOL>layers[scope_name] = lambda_layer(layers[inputs[<NUM_LIT:0>]])<EOL>", "docstring": "Convert reduce_sum layer.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5039:m1"}
{"signature": "def convert_sum(<EOL>params, w_name, scope_name, inputs, layers, weights, names<EOL>):", "body": "print('<STR_LIT>')<EOL>def target_layer(x):<EOL><INDENT>import keras.backend as K<EOL>return K.sum(x)<EOL><DEDENT>lambda_layer = keras.layers.Lambda(target_layer)<EOL>layers[scope_name] = lambda_layer(layers[inputs[<NUM_LIT:0>]])<EOL>", "docstring": "Convert sum.\n\nArgs:\n    params: dictionary with layer parameters\n    w_name: name prefix in state_dict\n    scope_name: pytorch scope name\n    inputs: pytorch node inputs\n    layers: dictionary with keras tensors\n    weights: pytorch state_dict\n    names: use short names for keras layers", "id": "f5039:m0"}
{"signature": "def json_get_default(json: JsonValue, path: str,<EOL>default: Any, expected_type: Any = ANY) -> Any:", "body": "try:<EOL><INDENT>return json_get(json, path, expected_type)<EOL><DEDENT>except (ValueError, IndexError):<EOL><INDENT>return default<EOL><DEDENT>", "docstring": "Get a JSON value by path, optionally checking its type.\n\n    This works exactly like json_get(), but instead of raising\n    ValueError or IndexError when a path part is not found, return\n    the provided default value:\n\n    >>> json_get_default({}, \"/foo\", \"I am a default value\")\n    'I am a default value'\n\n    TypeErrors will be raised as in json_get() if an expected_type\n    is provided:\n\n    >>> json_get_default({\"foo\": \"bar\"}, \"/foo\", 123, int)\n    Traceback (most recent call last):\n        ...\n    TypeError: wrong JSON type int != str", "id": "f5042:m20"}
{"signature": "def assert_json_type(value: JsonValue, expected_type: JsonCheckType) -> None:", "body": "def type_name(t: Union[JsonCheckType, Type[None]]) -> str:<EOL><INDENT>if t is None:<EOL><INDENT>return \"<STR_LIT:None>\"<EOL><DEDENT>if isinstance(t, JList):<EOL><INDENT>return \"<STR_LIT:list>\"<EOL><DEDENT>return t.__name__<EOL><DEDENT>if expected_type is None:<EOL><INDENT>if value is None:<EOL><INDENT>return<EOL><DEDENT><DEDENT>elif expected_type == float:<EOL><INDENT>if isinstance(value, float) or isinstance(value, int):<EOL><INDENT>return<EOL><DEDENT><DEDENT>elif expected_type in [str, int, bool, list, dict]:<EOL><INDENT>if isinstance(value, expected_type):  <EOL><INDENT>return<EOL><DEDENT><DEDENT>elif isinstance(expected_type, JList):<EOL><INDENT>if isinstance(value, list):<EOL><INDENT>for v in value:<EOL><INDENT>assert_json_type(v, expected_type.value_type)<EOL><DEDENT>return<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>raise TypeError(\"<STR_LIT>\".format(<EOL>type_name(expected_type), type_name(type(value))))<EOL>", "docstring": "Check that a value has a certain JSON type.\n\n    Raise TypeError if the type does not match.\n\n    Supported types: str, int, float, bool, list, dict, and None.\n    float will match any number, int will only match numbers without\n    fractional part.\n\n    The special type JList(x) will match a list value where each\n    item is of type x:\n\n    >>> assert_json_type([1, 2, 3], JList(int))", "id": "f5042:m0"}
{"signature": "@property<EOL><INDENT>def deadline(self):<DEDENT>", "body": "return self.__deadline<EOL>", "docstring": "Timeout before ping exits.\nYou can specify either a number or a string (e.g. ``\"1sec\"``).\nIf both :py:attr:`~.deadline` and :py:attr:`~.count` are |None|,\nIf a number is specified, the unit will be considered as seconds.\n\n    +------------+----------------------------------------------------------+\n    |    Unit    |                Available specifiers (str)                |\n    +============+==========================================================+\n    |days        |``d``/``day``/``days``                                    |\n    +------------+----------------------------------------------------------+\n    |hours       |``h``/``hour``/``hours``                                  |\n    +------------+----------------------------------------------------------+\n    |minutes     |``m``/``min``/``mins``/``minute``/``minutes``             |\n    +------------+----------------------------------------------------------+\n    |seconds     |``s``/``sec``/``secs``/``second``/``seconds``             |\n    +------------+----------------------------------------------------------+\n    |milliseconds|``ms``/``msec``/``msecs``/``millisecond``/``milliseconds``|\n    +------------+----------------------------------------------------------+\n    |microseconds|``us``/``usec``/``usecs``/``microsecond``/``microseconds``|\n    +------------+----------------------------------------------------------+\n\n:py:attr:`~.deadline` automatically set to the default value (``3 seconds``).\nDefaults to |None|.\n\nReturns:\n    humanreadable.Time: deadline", "id": "f5047:c1:m2"}
{"signature": "@property<EOL><INDENT>def timeout(self):<DEDENT>", "body": "return self.__timeout<EOL>", "docstring": "Time to wait for a response per packet.\nYou can specify either a number or a string (e.g. ``\"1sec\"``).\nIf a number is specified, the unit will be considered as milliseconds.\n\n    +------------+----------------------------------------------------------+\n    |    Unit    |                Available specifiers (str)                |\n    +============+==========================================================+\n    |days        |``d``/``day``/``days``                                    |\n    +------------+----------------------------------------------------------+\n    |hours       |``h``/``hour``/``hours``                                  |\n    +------------+----------------------------------------------------------+\n    |minutes     |``m``/``min``/``mins``/``minute``/``minutes``             |\n    +------------+----------------------------------------------------------+\n    |seconds     |``s``/``sec``/``secs``/``second``/``seconds``             |\n    +------------+----------------------------------------------------------+\n    |milliseconds|``ms``/``msec``/``msecs``/``millisecond``/``milliseconds``|\n    +------------+----------------------------------------------------------+\n    |microseconds|``us``/``usec``/``usecs``/``microsecond``/``microseconds``|\n    +------------+----------------------------------------------------------+\n\nUse system default timeout if the value is |None|.\nDefaults to |None|.\nIf the system does not support timeout in milliseconds, round up as seconds.\n\nReturns:\n    humanreadable.Time: timeout", "id": "f5047:c1:m0"}
{"signature": "@property<EOL><INDENT>def icmp_replies(self):<DEDENT>", "body": "return self.__icmp_replies<EOL>", "docstring": "ICMP packet reply information.\n\nReturns:\n    |list| of |dict|:", "id": "f5050:c0:m12"}
{"signature": "@property<EOL><INDENT>def packet_transmit(self):<DEDENT>", "body": "return self.__packet_transmit<EOL>", "docstring": "Number of packets transmitted.\n\nReturns:\n    |int|:", "id": "f5050:c0:m2"}
{"signature": "@property<EOL><INDENT>def packet_loss_count(self):<DEDENT>", "body": "try:<EOL><INDENT>return self.packet_transmit - self.packet_receive<EOL><DEDENT>except TypeError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Number of packet losses.\n\nReturns:\n    |int|: |None| if the value is not a number.", "id": "f5050:c0:m4"}
{"signature": "@property<EOL><INDENT>def rtt_min(self):<DEDENT>", "body": "return self.__rtt_min<EOL>", "docstring": "Minimum round trip time of transmitted ICMP packets |msec_unit|.\n\nReturns:\n    |float|:", "id": "f5050:c0:m6"}
{"signature": "def set_log_level(log_level):", "body": "<EOL>logbook.get_level_name(log_level)<EOL>if log_level == logger.level:<EOL><INDENT>return<EOL><DEDENT>if log_level == logbook.NOTSET:<EOL><INDENT>set_logger(is_enable=False)<EOL><DEDENT>else:<EOL><INDENT>set_logger(is_enable=True)<EOL><DEDENT>logger.level = log_level<EOL>subprocrunner.set_log_level(log_level)<EOL>", "docstring": "Set logging level of this module. The module using\n`logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging.\n\n:param int log_level:\n    One of the log level of the\n    `logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__.\n    Disabled logging if the ``log_level`` is ``logbook.NOTSET``.\n:raises LookupError: If ``log_level`` is an invalid value.", "id": "f5053:m1"}
{"signature": "def parse(self, ping_message):", "body": "try:<EOL><INDENT>if typepy.is_not_null_string(ping_message.stdout):<EOL><INDENT>ping_message = ping_message.stdout<EOL><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(ping_message))<EOL>self.__parser = NullPingParser()<EOL>if typepy.is_null_string(ping_message):<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self.__stats = PingStats()<EOL>return self.__stats<EOL><DEDENT>ping_lines = _to_unicode(ping_message).splitlines()<EOL>parser_class_list = (<EOL>LinuxPingParser,<EOL>WindowsPingParser,<EOL>MacOsPingParser,<EOL>AlpineLinuxPingParser,<EOL>)<EOL>for parser_class in parser_class_list:<EOL><INDENT>self.__parser = parser_class()<EOL>try:<EOL><INDENT>self.__stats = self.__parser.parse(ping_lines)<EOL>return self.__stats<EOL><DEDENT>except ParseError as e:<EOL><INDENT>if e.reason != ParseErrorReason.HEADER_NOT_FOUND:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT>except pp.ParseException:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>self.__parser = NullPingParser()<EOL>return self.__stats<EOL>", "docstring": "Parse ping command output.\n\nArgs:\n    ping_message (str or :py:class:`~pingparsing.PingResult`):\n        ``ping`` command output.\n\nReturns:\n    :py:class:`~pingparsing.PingStats`: Parsed result.", "id": "f5054:c0:m17"}
{"signature": "def _clean(zipcode, valid_length=_valid_zipcode_length):", "body": "zipcode = zipcode.split(\"<STR_LIT:->\")[<NUM_LIT:0>]  <EOL>if len(zipcode) != valid_length:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if _contains_nondigits(zipcode):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return zipcode<EOL>", "docstring": "Assumes zipcode is of type `str`", "id": "f5071:m2"}
{"signature": "@_clean_zipcode<EOL>def is_real(zipcode):", "body": "return bool(matching(zipcode))<EOL>", "docstring": "Determine whether a given zip or zip+4 zipcode is real.", "id": "f5071:m5"}
{"signature": "def filter_by(zips=_zips, **kwargs):", "body": "return [z for z in zips if all([k in z and z[k] == v for k, v in kwargs.items()])]<EOL>", "docstring": "Use `kwargs` to select for desired attributes from list of zipcode dicts", "id": "f5071:m7"}
{"signature": "def __contains__(self, key):", "body": "return key in self.keys() or hasattr(self, key)<EOL>", "docstring": "Context uses 'if key in ...'", "id": "f5083:c3:m1"}
{"signature": "@cached_property<EOL><INDENT>def display(self):<DEDENT>", "body": "return dict(self.choices).get(self.value, '<STR_LIT>')<EOL>", "docstring": "Display value for selected choice.", "id": "f5083:c3:m6"}
{"signature": "def resolve_blocks(template, context):", "body": "try:<EOL><INDENT>blocks = context.render_context[BLOCK_CONTEXT_KEY]<EOL><DEDENT>except KeyError:<EOL><INDENT>blocks = context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()<EOL><DEDENT>if isinstance(template, six.string_types):<EOL><INDENT>template = get_template(template)<EOL><DEDENT>template = getattr(template, '<STR_LIT>', template)<EOL>local_blocks = {<EOL>block.name: block<EOL>for block in template.nodelist.get_nodes_by_type(BlockNode)<EOL>}<EOL>blocks.add_blocks(local_blocks)<EOL>extends = template.nodelist.get_nodes_by_type(ExtendsNode)<EOL>if extends:<EOL><INDENT>extends_node = extends[<NUM_LIT:0>]<EOL>parent_template = extends_node.get_parent(context)<EOL>resolve_blocks(parent_template, context)<EOL><DEDENT>return blocks<EOL>", "docstring": "Return a BlockContext instance of all the {% block %} tags in the template.\n\nIf template is a string, it will be resolved through get_template", "id": "f5083:m0"}
{"signature": "def auto_widget(field):", "body": "<EOL>info = {<EOL>'<STR_LIT>': field.field.widget.__class__.__name__,<EOL>'<STR_LIT>': field.field.__class__.__name__,<EOL>'<STR_LIT:name>': field.name,<EOL>}<EOL>return [<EOL>fmt.format(**info)<EOL>for fmt in (<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>)<EOL>]<EOL>", "docstring": "Return a list of widget names for the provided field.", "id": "f5083:m9"}
{"signature": "def get_md_device(self, line, personalities=[]):", "body": "ret = {}<EOL>splitted = split('<STR_LIT>', line)<EOL>ret['<STR_LIT:status>'] = splitted[<NUM_LIT:1>]<EOL>if splitted[<NUM_LIT:2>] in personalities:<EOL><INDENT>ret['<STR_LIT:type>'] = splitted[<NUM_LIT:2>]<EOL>ret['<STR_LIT>'] = self.get_components(line, with_type=True)<EOL><DEDENT>else:<EOL><INDENT>ret['<STR_LIT:type>'] = None<EOL>ret['<STR_LIT>'] = self.get_components(line, with_type=False)<EOL><DEDENT>return ret<EOL>", "docstring": "Return a dict of md device define in the line.", "id": "f5087:c0:m16"}
{"signature": "def get_md_status(self, line):", "body": "ret = {}<EOL>splitted = split('<STR_LIT>', line)<EOL>if len(splitted) < <NUM_LIT:7>:<EOL><INDENT>ret['<STR_LIT>'] = None<EOL>ret['<STR_LIT>'] = None<EOL>ret['<STR_LIT>'] = None<EOL><DEDENT>else:<EOL><INDENT>ret['<STR_LIT>'] = splitted[-<NUM_LIT:4>]<EOL>ret['<STR_LIT>'] = splitted[-<NUM_LIT:3>]<EOL>ret['<STR_LIT>'] = splitted[-<NUM_LIT:2>]<EOL><DEDENT>return ret<EOL>", "docstring": "Return a dict of md status define in the line.", "id": "f5087:c0:m17"}
{"signature": "def available(self, array):", "body": "return int(self.get_stats()['<STR_LIT>'][array]['<STR_LIT>'])<EOL>", "docstring": "Return the array's available components number.", "id": "f5087:c0:m10"}
{"signature": "def config(self, array):", "body": "return self.get_stats()['<STR_LIT>'][array]['<STR_LIT>']<EOL>", "docstring": "Return the array's config/status.\n\n        U mean OK\n        _ mean Failed", "id": "f5087:c0:m12"}
{"signature": "def get_stats(self):", "body": "return self.stats<EOL>", "docstring": "Return the stats.", "id": "f5087:c0:m4"}
{"signature": "def type(self, array):", "body": "return self.get_stats()['<STR_LIT>'][array]['<STR_LIT:type>']<EOL>", "docstring": "Return the array's type.", "id": "f5087:c0:m7"}
{"signature": "def status(self, array):", "body": "return self.get_stats()['<STR_LIT>'][array]['<STR_LIT:status>']<EOL>", "docstring": "Return the array's status.", "id": "f5087:c0:m8"}
{"signature": "def used(self, array):", "body": "return int(self.get_stats()['<STR_LIT>'][array]['<STR_LIT>'])<EOL>", "docstring": "Return the array's used components number.", "id": "f5087:c0:m11"}
{"signature": "def components(self, array):", "body": "return self.get_stats()['<STR_LIT>'][array]['<STR_LIT>'].keys()<EOL>", "docstring": "Return the components of the arrays (list).", "id": "f5087:c0:m9"}
{"signature": "def arrays(self):", "body": "return self.get_stats()['<STR_LIT>'].keys()<EOL>", "docstring": "Return the arrays (list).", "id": "f5087:c0:m6"}
{"signature": "def get_random_filename(instance, filename):", "body": "folder = settings.UPLOADS_ROOT<EOL>ext = filename.split('<STR_LIT:.>')[-<NUM_LIT:1>]<EOL>filename = '<STR_LIT>'.format(str(uuid4()), ext)<EOL>return os.path.join(folder, filename)<EOL>", "docstring": "Generates random filename for uploading file using uuid4 hashes\nYou need to define UPLOADS_ROOT in your django settings\nsomething like this\nUPLOADS_ROOT = rel(MEDIA_ROOT, 'uploads')", "id": "f5094:m0"}
{"signature": "def uppercase(string):", "body": "return str(string).upper()<EOL>", "docstring": "Convert string into upper case.\n\n    Args:\n        string: String to convert.\n\n    Returns:\n        string: Uppercase case string.", "id": "f5098:m13"}
{"signature": "def titlecase(string):", "body": "return '<STR_LIT:U+0020>'.join(<EOL>[capitalcase(word) for word in snakecase(string).split(\"<STR_LIT:_>\")]<EOL>)<EOL>", "docstring": "Convert string into sentence case.\n    First letter capped while each punctuations is capitalsed\n    and joined with space.\n\n    Args:\n        string: String to convert.\n\n    Returns:\n        string: Title cased string.", "id": "f5098:m11"}
{"signature": "def pascalcase(string):", "body": "return capitalcase(camelcase(string))<EOL>", "docstring": "Convert string into pascal case.\n\n    Args:\n        string: String to convert.\n\n    Returns:\n        string: Pascal case string.", "id": "f5098:m4"}
{"signature": "def backslashcase(string):", "body": "str1 = re.sub(r\"<STR_LIT:_>\", r\"<STR_LIT:\\\\>\", snakecase(string))<EOL>return str1<EOL>", "docstring": "Convert string into spinal case.\n    Join punctuation with backslash.\n\n    Args:\n        string: String to convert.\n\n    Returns:\n        string: Spinal cased string.", "id": "f5098:m6"}
{"signature": "def trimcase(string):", "body": "return str(string).strip()<EOL>", "docstring": "Convert string into trimmed string.\n\n    Args:\n        string: String to convert.\n\n    Returns:\n        string: Trimmed case string", "id": "f5098:m12"}
{"signature": "def parse_version(package):", "body": "from os.path import dirname, join, exists<EOL>import ast<EOL>_candiates = [<EOL>join(dirname(__file__), package + '<STR_LIT>'),<EOL>join(dirname(__file__), package, '<STR_LIT>'),<EOL>]<EOL>_found = [init_fpath for init_fpath in _candiates if exists(init_fpath)]<EOL>if len(_found) > <NUM_LIT:0>:<EOL><INDENT>init_fpath = _found[<NUM_LIT:0>]<EOL><DEDENT>elif len(_found) > <NUM_LIT:1>:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>elif len(_found) == <NUM_LIT:0>:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>with open(init_fpath, '<STR_LIT:r>') as file_:<EOL><INDENT>sourcecode = file_.read()<EOL><DEDENT>pt = ast.parse(sourcecode)<EOL>class VersionVisitor(ast.NodeVisitor):<EOL><INDENT>def visit_Assign(self, node):<EOL><INDENT>for target in node.targets:<EOL><INDENT>if getattr(target, '<STR_LIT:id>', None) == '<STR_LIT>':<EOL><INDENT>self.version = node.value.s<EOL><DEDENT><DEDENT><DEDENT><DEDENT>visitor = VersionVisitor()<EOL>visitor.visit(pt)<EOL>return visitor.version<EOL>", "docstring": "Statically parse the version number from __init__.py\n\nCommandLine:\n    python -c \"import setup; print(setup.parse_version('ubelt'))\"", "id": "f5121:m0"}
{"signature": "def parse_description():", "body": "from os.path import dirname, join, exists<EOL>readme_fpath = join(dirname(__file__), '<STR_LIT>')<EOL>if exists(readme_fpath):<EOL><INDENT>with open(readme_fpath, '<STR_LIT:r>') as f:<EOL><INDENT>text = f.read()<EOL><DEDENT>return text<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Parse the description in the README file\n\nCommandLine:\n    pandoc --from=markdown --to=rst --output=README.rst README.md\n    python -c \"import setup; print(setup.parse_description())\"", "id": "f5121:m1"}
{"signature": "def _register_numpy_extensions(self):", "body": "<EOL>import numpy as np<EOL>numpy_floating_types = (np.float16, np.float32, np.float64)<EOL>if hasattr(np, '<STR_LIT>'):  <EOL><INDENT>numpy_floating_types = numpy_floating_types + (np.float128,)<EOL><DEDENT>@self.add_iterable_check<EOL>def is_object_ndarray(data):<EOL><INDENT>return isinstance(data, np.ndarray) and data.dtype.kind == '<STR_LIT:O>'<EOL><DEDENT>@self.register(np.ndarray)<EOL>def hash_numpy_array(data):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if data.dtype.kind == '<STR_LIT:O>':<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise TypeError(msg)<EOL><DEDENT>else:<EOL><INDENT>header = b'<STR_LIT>'.join(_hashable_sequence((len(data.shape), data.shape)))<EOL>dtype = b'<STR_LIT>'.join(_hashable_sequence(data.dtype.descr))<EOL>hashable = header + dtype + data.tobytes()<EOL><DEDENT>prefix = b'<STR_LIT>'<EOL>return prefix, hashable<EOL><DEDENT>@self.register((np.int64, np.int32, np.int16, np.int8) +<EOL>(np.uint64, np.uint32, np.uint16, np.uint8))<EOL>def _hash_numpy_int(data):<EOL><INDENT>return _convert_to_hashable(int(data))<EOL><DEDENT>@self.register(numpy_floating_types)<EOL>def _hash_numpy_float(data):<EOL><INDENT>return _convert_to_hashable(float(data))<EOL><DEDENT>@self.register(np.random.RandomState)<EOL>def _hash_numpy_random_state(data):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>hashable = b'<STR_LIT>'.join(_hashable_sequence(data.get_state()))<EOL>prefix = b'<STR_LIT>'<EOL>return prefix, hashable<EOL><DEDENT>", "docstring": "Numpy extensions are builtin", "id": "f5122:c0:m4"}
{"signature": "def _update_hasher(hasher, data, types=True):", "body": "<EOL>if isinstance(data, (tuple, list, zip)):<EOL><INDENT>needs_iteration = True<EOL><DEDENT>else:<EOL><INDENT>needs_iteration = any(check(data) for check in<EOL>_HASHABLE_EXTENSIONS.iterable_checks)<EOL><DEDENT>if needs_iteration:<EOL><INDENT>SEP = b'<STR_LIT>'<EOL>ITER_PREFIX = b'<STR_LIT>'<EOL>ITER_SUFFIX = b'<STR_LIT>'<EOL>iter_ = iter(data)<EOL>hasher.update(ITER_PREFIX)<EOL>try:<EOL><INDENT>for item in iter_:<EOL><INDENT>prefix, hashable = _convert_to_hashable(item, types)<EOL>binary_data = prefix + hashable + SEP<EOL>hasher.update(binary_data)<EOL><DEDENT><DEDENT>except TypeError:<EOL><INDENT>_update_hasher(hasher, item, types)<EOL>for item in iter_:<EOL><INDENT>_update_hasher(hasher, item, types)<EOL>hasher.update(SEP)<EOL><DEDENT><DEDENT>hasher.update(ITER_SUFFIX)<EOL><DEDENT>else:<EOL><INDENT>prefix, hashable = _convert_to_hashable(data, types)<EOL>binary_data = prefix + hashable<EOL>hasher.update(binary_data)<EOL><DEDENT>", "docstring": "Converts `data` into a byte representation and calls update on the hasher\n`hashlib.HASH` algorithm.\n\nArgs:\n    hasher (HASH): instance of a hashlib algorithm\n    data (object): ordered data with structure\n    types (bool): include type prefixes in the hash\n\nExample:\n    >>> hasher = hashlib.sha512()\n    >>> data = [1, 2, ['a', 2, 'c']]\n    >>> _update_hasher(hasher, data)\n    >>> print(hasher.hexdigest()[0:8])\n    e2c67675\n\n    2ba8d82b", "id": "f5122:m5"}
{"signature": "def _rectify_hashlen(hashlen):", "body": "if hashlen is NoParam or hashlen == '<STR_LIT:default>':<EOL><INDENT>return DEFAULT_HASHLEN<EOL><DEDENT>else:<EOL><INDENT>return hashlen<EOL><DEDENT>", "docstring": "Example:\n    >>> assert _rectify_hashlen(NoParam) is DEFAULT_HASHLEN\n    >>> assert _rectify_hashlen(8) == 8", "id": "f5122:m2"}
{"signature": "def _register_builtin_class_extensions(self):", "body": "@self.register(uuid.UUID)<EOL>def _hash_uuid(data):<EOL><INDENT>hashable = data.bytes<EOL>prefix = b'<STR_LIT>'<EOL>return prefix, hashable<EOL><DEDENT>@self.register(OrderedDict)<EOL>def _hash_ordered_dict(data):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>hashable = b'<STR_LIT>'.join(_hashable_sequence(list(data.items())))<EOL>prefix = b'<STR_LIT>'<EOL>return prefix, hashable<EOL><DEDENT>", "docstring": "Register hashing extensions for a selection of classes included in\npython stdlib.\n\nExample:\n    >>> data = uuid.UUID('7e9d206b-dc02-4240-8bdb-fffe858121d0')\n    >>> print(hash_data(data, base='abc', hasher='sha512', types=True)[0:8])\n    cryarepd\n    >>> data = OrderedDict([('a', 1), ('b', 2), ('c', [1, 2, 3]),\n    >>>                     (4, OrderedDict())])\n    >>> print(hash_data(data, base='abc', hasher='sha512', types=True)[0:8])\n    qjspicvv\n\n    gpxtclct", "id": "f5122:c0:m5"}
{"signature": "def _convert_hexstr_base(hexstr, base):", "body": "if base is _ALPHABET_16:<EOL><INDENT>return hexstr<EOL><DEDENT>baselen = len(base)<EOL>x = int(hexstr, <NUM_LIT:16>)  <EOL>if x == <NUM_LIT:0>:<EOL><INDENT>return '<STR_LIT:0>'<EOL><DEDENT>sign = <NUM_LIT:1> if x > <NUM_LIT:0> else -<NUM_LIT:1><EOL>x *= sign<EOL>digits = []<EOL>while x:<EOL><INDENT>digits.append(base[x % baselen])<EOL>x //= baselen<EOL><DEDENT>if sign < <NUM_LIT:0>:<EOL><INDENT>digits.append('<STR_LIT:->')<EOL><DEDENT>digits.reverse()<EOL>newbase_str = '<STR_LIT>'.join(digits)<EOL>return newbase_str<EOL>", "docstring": "r\"\"\"\n    Packs a long hexstr into a shorter length string with a larger base.\n\n    Args:\n        hexstr (str): string of hexidecimal symbols to convert\n        base (list): symbols of the conversion base\n\n    Example:\n        >>> print(_convert_hexstr_base('ffffffff', _ALPHABET_26))\n        nxmrlxv\n        >>> print(_convert_hexstr_base('0', _ALPHABET_26))\n        0\n        >>> print(_convert_hexstr_base('-ffffffff', _ALPHABET_26))\n        -nxmrlxv\n        >>> print(_convert_hexstr_base('aafffff1', _ALPHABET_16))\n        aafffff1\n\n    Sympy:\n        >>> import sympy as sy\n        >>> # Determine the length savings with lossless conversion\n        >>> consts = dict(hexbase=16, hexlen=256, baselen=27)\n        >>> symbols = sy.symbols('hexbase, hexlen, baselen, newlen')\n        >>> haexbase, hexlen, baselen, newlen = symbols\n        >>> eqn = sy.Eq(16 ** hexlen,  baselen ** newlen)\n        >>> newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()\n        >>> print('newlen_ans = %r' % (newlen_ans,))\n        >>> # for a 26 char base we can get 216\n        >>> print('Required length for lossless conversion len2 = %r' % (len2,))\n        >>> def info(base, len):\n        ...     bits = base ** len\n        ...     print('base = %r' % (base,))\n        ...     print('len = %r' % (len,))\n        ...     print('bits = %r' % (bits,))\n        >>> info(16, 256)\n        >>> info(27, 16)\n        >>> info(27, 64)\n        >>> info(27, 216)", "id": "f5122:m6"}
{"signature": "def hash_file(fpath, blocksize=<NUM_LIT>, stride=<NUM_LIT:1>, hasher=NoParam,<EOL>hashlen=NoParam, base=NoParam):", "body": "base = _rectify_base(base)<EOL>hashlen = _rectify_hashlen(hashlen)<EOL>hasher = _rectify_hasher(hasher)()<EOL>with open(fpath, '<STR_LIT:rb>') as file:<EOL><INDENT>buf = file.read(blocksize)<EOL>if stride > <NUM_LIT:1>:<EOL><INDENT>while len(buf) > <NUM_LIT:0>:<EOL><INDENT>hasher.update(buf)<EOL>file.seek(blocksize * (stride - <NUM_LIT:1>), <NUM_LIT:1>)<EOL>buf = file.read(blocksize)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>while len(buf) > <NUM_LIT:0>:<EOL><INDENT>hasher.update(buf)<EOL>buf = file.read(blocksize)<EOL><DEDENT><DEDENT><DEDENT>text = _digest_hasher(hasher, hashlen, base)<EOL>return text<EOL>", "docstring": "Hashes the data in a file on disk.\n\nArgs:\n    fpath (PathLike):  file path string\n\n    blocksize (int): 2 ** 16. Affects speed of reading file\n\n    stride (int): strides > 1 skip data to hash, useful for faster\n                  hashing, but less accurate, also makes hash dependant on\n                  blocksize.\n\n    hasher (HASH): hash algorithm from hashlib, defaults to `sha512`.\n\n    hashlen (int): maximum number of symbols in the returned hash. If\n        not specified, all are returned.\n\n    base (list, str): list of symbols or shorthand key. Valid keys are\n        'abc', 'hex', and 'dec'. Defaults to 'hex'.\n\nNotes:\n    For better hashes keep stride = 1\n    For faster hashes set stride > 1\n    blocksize matters when stride > 1\n\nReferences:\n    http://stackoverflow.com/questions/3431825/md5-checksum-of-a-file\n    http://stackoverflow.com/questions/5001893/when-to-use-sha-1-vs-sha-2\n\nExample:\n    >>> import ubelt as ub\n    >>> from os.path import join\n    >>> fpath = join(ub.ensure_app_cache_dir('ubelt'), 'tmp.txt')\n    >>> ub.writeto(fpath, 'foobar')\n    >>> print(ub.hash_file(fpath, hasher='sha1', base='hex'))\n    8843d7f92416211de9ebb963ff4ce28125932878\n\nExample:\n    >>> import ubelt as ub\n    >>> from os.path import join\n    >>> fpath = ub.touch(join(ub.ensure_app_cache_dir('ubelt'), 'empty_file'))\n    >>> # Test that the output is the same as sha1sum\n    >>> if ub.find_exe('sha1sum'):\n    >>>     want = ub.cmd(['sha1sum', fpath], verbose=2)['out'].split(' ')[0]\n    >>>     got = ub.hash_file(fpath, hasher='sha1')\n    >>>     print('want = {!r}'.format(want))\n    >>>     print('got = {!r}'.format(got))\n    >>>     assert want.endswith(got)\n    >>> # Do the same for sha512 sum and md5sum\n    >>> if ub.find_exe('sha512sum'):\n    >>>     want = ub.cmd(['sha512sum', fpath], verbose=2)['out'].split(' ')[0]\n    >>>     got = ub.hash_file(fpath, hasher='sha512')\n    >>>     print('want = {!r}'.format(want))\n    >>>     print('got = {!r}'.format(got))\n    >>>     assert want.endswith(got)\n    >>> if ub.find_exe('md5sum'):\n    >>>     want = ub.cmd(['md5sum', fpath], verbose=2)['out'].split(' ')[0]\n    >>>     got = ub.hash_file(fpath, hasher='md5')\n    >>>     print('want = {!r}'.format(want))\n    >>>     print('got = {!r}'.format(got))\n    >>>     assert want.endswith(got)", "id": "f5122:m9"}
{"signature": "def _convert_to_hashable(data, types=True):", "body": "<EOL>if data is None:<EOL><INDENT>hashable = b'<STR_LIT>'<EOL>prefix = b'<STR_LIT>'<EOL><DEDENT>elif isinstance(data, six.binary_type):<EOL><INDENT>hashable = data<EOL>prefix = b'<STR_LIT>'<EOL><DEDENT>elif isinstance(data, six.text_type):<EOL><INDENT>hashable = data.encode('<STR_LIT:utf-8>')<EOL>prefix = b'<STR_LIT>'<EOL><DEDENT>elif isinstance(data, _intlike):<EOL><INDENT>hashable = _int_to_bytes(data)<EOL>prefix = b'<STR_LIT>'<EOL><DEDENT>elif isinstance(data, float):<EOL><INDENT>a, b = float(data).as_integer_ratio()<EOL>hashable = _int_to_bytes(a) + b'<STR_LIT:/>' +  _int_to_bytes(b)<EOL>prefix = b'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>hash_func = _HASHABLE_EXTENSIONS.lookup(data)<EOL>prefix, hashable = hash_func(data)<EOL><DEDENT>if types:<EOL><INDENT>return prefix, hashable<EOL><DEDENT>else:<EOL><INDENT>return b'<STR_LIT>', hashable<EOL><DEDENT>", "docstring": "r\"\"\"\n    Converts `data` into a hashable byte representation if an appropriate\n    hashing function is known.\n\n    Args:\n        data (object): ordered data with structure\n        types (bool): include type prefixes in the hash\n\n    Returns:\n        tuple(bytes, bytes): prefix, hashable:\n            a prefix hinting the original data type and the byte representation\n            of `data`.\n\n    Raises:\n        TypeError : if data has no registered hash methods\n\n    Example:\n        >>> assert _convert_to_hashable(None) == (b'NULL', b'NONE')\n        >>> assert _convert_to_hashable('string') == (b'TXT', b'string')\n        >>> assert _convert_to_hashable(1) == (b'INT', b'\\x01')\n        >>> assert _convert_to_hashable(1.0) == (b'FLT', b'\\x01/\\x01')\n        >>> assert _convert_to_hashable(_intlike[-1](1)) == (b'INT', b'\\x01')", "id": "f5122:m4"}
{"signature": "def codeblock(block_str):", "body": "import textwrap  <EOL>return textwrap.dedent(block_str).strip('<STR_LIT:\\n>')<EOL>", "docstring": "Wraps multiline string blocks and returns unindented code.\nUseful for templated code defined in indented parts of code.\n\nArgs:\n    block_str (str): typically in the form of a multiline string\n\nReturns:\n    str: the unindented string\n\nCommandLine:\n    python -m ubelt.util_str codeblock\n\nExample:\n    >>> from ubelt.util_str import *  # NOQA\n    >>> # Simulate an indented part of code\n    >>> if True:\n    >>>     # notice the indentation on this will be normal\n    >>>     codeblock_version = codeblock(\n    ...             '''\n    ...             def foo():\n    ...                 return 'bar'\n    ...             '''\n    ...         )\n    >>>     # notice the indentation and newlines on this will be odd\n    >>>     normal_version = ('''\n    ...         def foo():\n    ...             return 'bar'\n    ...     ''')\n    >>> assert normal_version != codeblock_version\n    >>> print('Without codeblock')\n    >>> print(normal_version)\n    >>> print('With codeblock')\n    >>> print(codeblock_version)", "id": "f5124:m1"}
{"signature": "def _join_itemstrs(itemstrs, itemsep, newlines, _leaf_info, nobraces,<EOL>trailing_sep, compact_brace, lbr, rbr):", "body": "<EOL>use_newline = newlines > <NUM_LIT:0><EOL>if newlines < <NUM_LIT:0>:<EOL><INDENT>use_newline = (-newlines) < _leaf_info['<STR_LIT>']<EOL><DEDENT>if use_newline:<EOL><INDENT>sep = '<STR_LIT>'<EOL>if nobraces:<EOL><INDENT>body_str = sep.join(itemstrs)<EOL>if trailing_sep and len(itemstrs) > <NUM_LIT:0>:<EOL><INDENT>body_str += '<STR_LIT:U+002C>'<EOL><DEDENT>retstr = body_str<EOL><DEDENT>else:<EOL><INDENT>if compact_brace:<EOL><INDENT>indented = itemstrs<EOL><DEDENT>else:<EOL><INDENT>import ubelt as ub<EOL>prefix = '<STR_LIT:U+0020>' * <NUM_LIT:4><EOL>indented = [ub.indent(s, prefix) for s in itemstrs]<EOL><DEDENT>body_str = sep.join(indented)<EOL>if trailing_sep and len(itemstrs) > <NUM_LIT:0>:<EOL><INDENT>body_str += '<STR_LIT:U+002C>'<EOL><DEDENT>if compact_brace:<EOL><INDENT>braced_body_str = (lbr + body_str.replace('<STR_LIT:\\n>', '<STR_LIT>') + rbr)<EOL><DEDENT>else:<EOL><INDENT>braced_body_str = (lbr + '<STR_LIT:\\n>' + body_str + '<STR_LIT:\\n>' + rbr)<EOL><DEDENT>retstr = braced_body_str<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sep = '<STR_LIT:U+002C>' + itemsep<EOL>body_str = sep.join(itemstrs)<EOL>if trailing_sep and len(itemstrs) > <NUM_LIT:0>:<EOL><INDENT>body_str += '<STR_LIT:U+002C>'<EOL><DEDENT>retstr  = (lbr + body_str +  rbr)<EOL><DEDENT>return retstr<EOL>", "docstring": "Joins string-ified items with separators newlines and container-braces.", "id": "f5126:m7"}
{"signature": "def repr2(data, **kwargs):", "body": "custom_extensions = kwargs.get('<STR_LIT>', None)<EOL>_return_info = kwargs.get('<STR_LIT>', False)<EOL>kwargs['<STR_LIT>'] = _rectify_root_info(kwargs.get('<STR_LIT>', None))<EOL>outstr = None<EOL>_leaf_info = None<EOL>if custom_extensions:<EOL><INDENT>func = custom_extensions.lookup(data)<EOL>if func is not None:<EOL><INDENT>outstr = func(data, **kwargs)<EOL><DEDENT><DEDENT>if outstr is None:<EOL><INDENT>if isinstance(data, dict):<EOL><INDENT>outstr, _leaf_info = _format_dict(data, **kwargs)<EOL><DEDENT>elif isinstance(data, (list, tuple, set, frozenset)):<EOL><INDENT>outstr, _leaf_info = _format_list(data, **kwargs)<EOL><DEDENT><DEDENT>if outstr is None:<EOL><INDENT>func = _FORMATTER_EXTENSIONS.lookup(data)<EOL>if func is not None:<EOL><INDENT>outstr = func(data, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>outstr = _format_object(data, **kwargs)<EOL><DEDENT><DEDENT>if _return_info:<EOL><INDENT>_leaf_info = _rectify_leaf_info(_leaf_info)<EOL>return outstr, _leaf_info<EOL><DEDENT>else:<EOL><INDENT>return outstr<EOL><DEDENT>", "docstring": "Makes a pretty and easy-to-doctest string representation!\n\nThis is an alternative to repr, and `pprint.pformat` that attempts to be\nboth more configurable and generate output that is consistent between\npython versions.\n\nNotes:\n    This function has many keyword arguments that can be used to customize\n    the final representation. For convinience some of the more frequently\n    used kwargs have short aliases. See `Args` for more details.\n\nArgs:\n    data (object): an arbitrary python object\n    **kwargs: see `the Kwargs` section\n\nKwargs:\n    si, stritems, (bool):\n        dict/list items use str instead of repr\n\n    strkeys, sk (bool):\n        dict keys use str instead of repr\n\n    strvals, sv (bool):\n        dict values use str instead of repr\n\n    nl, newlines (int | bool):\n        number of top level nestings to place a newline after. If true all\n        items are followed by newlines regardless of nesting level.\n        Defaults to 1 for lists and True for dicts.\n\n    nobr, nobraces (bool, default=False):\n        if True, text will not contain outer braces for containers\n\n    cbr, compact_brace (bool, default=False):\n        if True, braces are compactified (i.e. they will not have newlines\n        placed directly after them, think java / K&R / 1TBS)\n\n    trailsep, trailing_sep (bool):\n        if True, a separator is placed after the last item in a sequence.\n        By default this is True if there are any `nl > 0`.\n\n    explicit (bool, default=False):\n        changes dict representation from `{k1: v1, ...}` to\n        `dict(k1=v1, ...)`.\n\n    precision (int, default=None):\n        if specified floats are formatted with this precision\n\n    kvsep (str, default=': '):\n        separator between keys and values\n\n    itemsep (str, default=' '):\n        separator between items\n\n    sort (bool):\n        if True, attempts to sort all unordered collections in the returned\n        text. NOTE: currently if True this will sort lists, this may not be\n        a correct thing to do, as such the behavior of this arg is subject\n        to change.\n\n    suppress_small (bool):\n        passed to `numpy.array2string` for ndarrays\n\n    max_line_width (int):\n        passed to `numpy.array2string` for ndarrays\n\n    with_dtype (bool):\n        only relevant to ndarrays. if True includes the dtype.\n\nReturns:\n    str: outstr: output string\n\nNotes:\n    There are also internal kwargs, which should not be used:\n        _return_info (bool):  return information about child context\n        _root_info (depth): information about parent context\n\nCommandLine:\n    python -m ubelt.util_format repr2:0\n    python -m ubelt.util_format repr2:1\n\nExample:\n    >>> from ubelt.util_format import *\n    >>> import ubelt as ub\n    >>> dict_ = {\n    ...     'custom_types': [slice(0, 1, None), 1/3],\n    ...     'nest_dict': {'k1': [1, 2, {3: {4, 5}}],\n    ...                   'key2': [1, 2, {3: {4, 5}}],\n    ...                   'key3': [1, 2, {3: {4, 5}}],\n    ...                   },\n    ...     'nest_dict2': {'k': [1, 2, {3: {4, 5}}]},\n    ...     'nested_tuples': [tuple([1]), tuple([2, 3]), frozenset([4, 5, 6])],\n    ...     'one_tup': tuple([1]),\n    ...     'simple_dict': {'spam': 'eggs', 'ham': 'jam'},\n    ...     'simple_list': [1, 2, 'red', 'blue'],\n    ...     'odict': ub.odict([(1, '1'), (2, '2')]),\n    ... }\n    >>> result = repr2(dict_, nl=3, precision=2); print(result)\n    >>> result = repr2(dict_, nl=2, precision=2); print(result)\n    >>> result = repr2(dict_, nl=1, precision=2); print(result)\n    >>> result = repr2(dict_, nl=1, precision=2, itemsep='', explicit=True); print(result)\n    >>> result = repr2(dict_, nl=1, precision=2, nobr=1, itemsep='', explicit=True); print(result)\n    >>> result = repr2(dict_, nl=3, precision=2, cbr=True); print(result)\n    >>> result = repr2(dict_, nl=3, precision=2, si=True); print(result)\n    >>> result = repr2(dict_, nl=3, sort=True); print(result)\n    >>> result = repr2(dict_, nl=3, sort=False, trailing_sep=False); print(result)\n    >>> result = repr2(dict_, nl=3, sort=False, trailing_sep=False, nobr=True); print(result)\n\nExample:\n    >>> from ubelt.util_format import *\n    >>> def _nest(d, w):\n    ...     if d == 0:\n    ...         return {}\n    ...     else:\n    ...         return {'n{}'.format(d): _nest(d - 1, w + 1), 'm{}'.format(d): _nest(d - 1, w + 1)}\n    >>> dict_ = _nest(d=4, w=1)\n    >>> result = repr2(dict_, nl=6, precision=2, cbr=1)\n    >>> print('---')\n    >>> print(result)\n    >>> result = repr2(dict_, nl=-1, precision=2)\n    >>> print('---')\n    >>> print(result)", "id": "f5126:m0"}
{"signature": "def register(self, type):", "body": "def _decorator(func):<EOL><INDENT>if isinstance(type, tuple):<EOL><INDENT>for t in type:<EOL><INDENT>self.func_registry[t] = func<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.func_registry[type] = func<EOL><DEDENT>return func<EOL><DEDENT>return _decorator<EOL>", "docstring": "Registers a custom formatting function with ub.repr2", "id": "f5126:c0:m1"}
{"signature": "def _register_numpy_extensions(self):", "body": "import numpy as np<EOL>@self.register(np.ndarray)<EOL>def format_ndarray(data, **kwargs):<EOL><INDENT>import re<EOL>strvals = kwargs.get('<STR_LIT>', kwargs.get('<STR_LIT>', False))<EOL>itemsep = kwargs.get('<STR_LIT>', '<STR_LIT:U+0020>')<EOL>precision = kwargs.get('<STR_LIT>', None)<EOL>suppress_small = kwargs.get('<STR_LIT>', None)<EOL>max_line_width = kwargs.get('<STR_LIT>', None)<EOL>with_dtype = kwargs.get('<STR_LIT>', kwargs.get('<STR_LIT>', not strvals))<EOL>newlines = kwargs.pop('<STR_LIT>', kwargs.pop('<STR_LIT>', <NUM_LIT:1>))<EOL>separator = '<STR_LIT:U+002C>' + itemsep<EOL>if strvals:<EOL><INDENT>prefix = '<STR_LIT>'<EOL>suffix = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>modname = type(data).__module__<EOL>np_nice = '<STR_LIT>'<EOL>modname = re.sub('<STR_LIT>', np_nice, modname)<EOL>modname = re.sub('<STR_LIT>', '<STR_LIT>', modname)<EOL>class_name = type(data).__name__<EOL>if class_name == '<STR_LIT>':<EOL><INDENT>class_name = '<STR_LIT>'<EOL><DEDENT>prefix = modname + '<STR_LIT:.>' + class_name + '<STR_LIT:(>'<EOL>if with_dtype:<EOL><INDENT>dtype_repr = data.dtype.name<EOL>suffix = '<STR_LIT>'.format(itemsep, np_nice, dtype_repr)<EOL><DEDENT>else:<EOL><INDENT>suffix = '<STR_LIT:)>'<EOL><DEDENT><DEDENT>if not strvals and data.size == <NUM_LIT:0> and data.shape != (<NUM_LIT:0>,):<EOL><INDENT>prefix = modname + '<STR_LIT>'<EOL>body = repr(tuple(map(int, data.shape)))<EOL><DEDENT>else:<EOL><INDENT>body = np.array2string(data, precision=precision,<EOL>separator=separator,<EOL>suppress_small=suppress_small,<EOL>prefix=prefix,<EOL>max_line_width=max_line_width)<EOL><DEDENT>if not newlines:<EOL><INDENT>body = re.sub('<STR_LIT>', '<STR_LIT>', body)<EOL><DEDENT>formatted = prefix + body + suffix<EOL>return formatted<EOL><DEDENT>self.register(np.float32)(self.func_registry[float])<EOL>", "docstring": "CommandLine:\n    python -m ubelt.util_format FormatterExtensions._register_numpy_extensions\n\nExample:\n    >>> import sys\n    >>> import pytest\n    >>> import ubelt as ub\n    >>> if not ub.modname_to_modpath('numpy'):\n    ...     raise pytest.skip()\n    >>> # xdoctest: +IGNORE_WHITESPACE\n    >>> import numpy as np\n    >>> data = np.array([[.2, 42, 5], [21.2, 3, .4]])\n    >>> print(ub.repr2(data))\n    np.array([[ 0.2, 42. ,  5. ],\n              [21.2,  3. ,  0.4]], dtype=np.float64)\n    >>> print(ub.repr2(data, with_dtype=False))\n    np.array([[ 0.2, 42. ,  5. ],\n              [21.2,  3. ,  0.4]])\n    >>> print(ub.repr2(data, strvals=True))\n    [[ 0.2, 42. ,  5. ],\n     [21.2,  3. ,  0.4]]\n    >>> data = np.empty((0, 10), dtype=np.float64)\n    >>> print(ub.repr2(data, strvals=False))\n    np.empty((0, 10), dtype=np.float64)\n    >>> print(ub.repr2(data, strvals=True))\n    []\n    >>> data = np.ma.empty((0, 10), dtype=np.float64)\n    >>> print(ub.repr2(data, strvals=False))\n    np.ma.empty((0, 10), dtype=np.float64)", "id": "f5126:c0:m3"}
{"signature": "def _list_itemstrs(list_, **kwargs):", "body": "items = list(list_)<EOL>kwargs['<STR_LIT>'] = True<EOL>_tups = [repr2(item, **kwargs) for item in items]<EOL>itemstrs = [t[<NUM_LIT:0>] for t in _tups]<EOL>max_height = max([t[<NUM_LIT:1>]['<STR_LIT>'] for t in _tups]) if _tups else <NUM_LIT:0><EOL>_leaf_info = {<EOL>'<STR_LIT>': max_height + <NUM_LIT:1>,<EOL>}<EOL>sort = kwargs.get('<STR_LIT>', None)<EOL>if sort is None:<EOL><INDENT>sort = isinstance(list_, (set, frozenset))<EOL><DEDENT>if sort:<EOL><INDENT>itemstrs = _sort_itemstrs(items, itemstrs)<EOL><DEDENT>return itemstrs, _leaf_info<EOL>", "docstring": "Create a string representation for each item in a list.", "id": "f5126:m9"}
{"signature": "def _dict_itemstrs(dict_, **kwargs):", "body": "import ubelt as ub<EOL>explicit = kwargs.get('<STR_LIT>', False)<EOL>kwargs['<STR_LIT>'] = _rectify_countdown_or_bool(explicit)<EOL>precision = kwargs.get('<STR_LIT>', None)<EOL>kvsep = kwargs.get('<STR_LIT>', '<STR_LIT>')<EOL>if explicit:<EOL><INDENT>kvsep = '<STR_LIT:=>'<EOL><DEDENT>def make_item_str(key, val):<EOL><INDENT>if explicit or kwargs.get('<STR_LIT>', False):<EOL><INDENT>key_str = six.text_type(key)<EOL><DEDENT>else:<EOL><INDENT>key_str = repr2(key, precision=precision, newlines=<NUM_LIT:0>)<EOL><DEDENT>prefix = key_str + kvsep<EOL>kwargs['<STR_LIT>'] = True<EOL>val_str, _leaf_info = repr2(val, **kwargs)<EOL>pos = val_str.find('<STR_LIT:\\n>')<EOL>first_line = val_str if pos == -<NUM_LIT:1> else val_str[:pos]<EOL>compact_brace = kwargs.get('<STR_LIT>', kwargs.get('<STR_LIT>', False))<EOL>if compact_brace or not first_line.rstrip().endswith(tuple('<STR_LIT>')):<EOL><INDENT>rest = '<STR_LIT>' if pos == -<NUM_LIT:1> else val_str[pos:]<EOL>val_str = first_line.lstrip() + rest<EOL>if '<STR_LIT:\\n>' in prefix:<EOL><INDENT>item_str = prefix + val_str<EOL><DEDENT>else:<EOL><INDENT>item_str = ub.hzcat([prefix, val_str])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>item_str = prefix + val_str<EOL><DEDENT>return item_str, _leaf_info<EOL><DEDENT>items = list(six.iteritems(dict_))<EOL>_tups = [make_item_str(key, val) for (key, val) in items]<EOL>itemstrs = [t[<NUM_LIT:0>] for t in _tups]<EOL>max_height = max([t[<NUM_LIT:1>]['<STR_LIT>'] for t in _tups]) if _tups else <NUM_LIT:0><EOL>_leaf_info = {<EOL>'<STR_LIT>': max_height + <NUM_LIT:1>,<EOL>}<EOL>sort = kwargs.get('<STR_LIT>', None)<EOL>if sort is None:<EOL><INDENT>sort = True<EOL><DEDENT>if isinstance(dict_, collections.OrderedDict):<EOL><INDENT>sort = False<EOL><DEDENT>if sort:<EOL><INDENT>itemstrs = _sort_itemstrs(items, itemstrs)<EOL><DEDENT>return itemstrs, _leaf_info<EOL>", "docstring": "Create a string representation for each item in a dict.\n\nExample:\n    >>> from ubelt.util_format import *\n    >>> dict_ =  {'b': .1, 'l': 'st', 'g': 1.0, 's': 10, 'm': 0.9, 'w': .5}\n    >>> kwargs = {'strkeys': True}\n    >>> itemstrs, _ = _dict_itemstrs(dict_, **kwargs)\n    >>> char_order = [p[0] for p in itemstrs]\n    >>> assert char_order == ['b', 'g', 'l', 'm', 's', 'w']", "id": "f5126:m8"}
{"signature": "def _make_signature_key(args, kwargs):", "body": "kwitems = kwargs.items()<EOL>if (sys.version_info.major, sys.version_info.minor) < (<NUM_LIT:3>, <NUM_LIT:7>):  <EOL><INDENT>kwitems = sorted(kwitems)<EOL><DEDENT>kwitems = tuple(kwitems)<EOL>try:<EOL><INDENT>key = _hashable(args), _hashable(kwitems)<EOL><DEDENT>except TypeError:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(args, kwargs))<EOL><DEDENT>return key<EOL>", "docstring": "Transforms function args into a key that can be used by the cache\n\nCommandLine:\n    xdoctest -m ubelt.util_memoize _make_signature_key\n\nExample:\n    >>> args = (4, [1, 2])\n    >>> kwargs = {'a': 'b'}\n    >>> key = _make_signature_key(args, kwargs)\n    >>> print('key = {!r}'.format(key))\n    >>> # Some mutable types cannot be handled by ub.hash_data\n    >>> import pytest\n    >>> import six\n    >>> if six.PY2:\n    >>>     import collections as abc\n    >>> else:\n    >>>     from collections import abc\n    >>> with pytest.raises(TypeError):\n    >>>     _make_signature_key((4, [1, 2], {1: 2, 'a': 'b'}), kwargs={})\n    >>> class Dummy(abc.MutableSet):\n    >>>     def __contains__(self, item): return None\n    >>>     def __iter__(self): return iter([])\n    >>>     def __len__(self): return 0\n    >>>     def add(self, item, loc): return None\n    >>>     def discard(self, item): return None\n    >>> with pytest.raises(TypeError):\n    >>>     _make_signature_key((Dummy(),), kwargs={})", "id": "f5129:m1"}
{"signature": "def memoize(func):", "body": "cache = {}<EOL>@functools.wraps(func)<EOL>def memoizer(*args, **kwargs):<EOL><INDENT>key = _make_signature_key(args, kwargs)<EOL>if key not in cache:<EOL><INDENT>cache[key] = func(*args, **kwargs)<EOL><DEDENT>return cache[key]<EOL><DEDENT>memoizer.cache = cache<EOL>return memoizer<EOL>", "docstring": "memoization decorator that respects args and kwargs\n\nReferences:\n    https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize\n\nArgs:\n    func (Callable): live python function\n\nReturns:\n    func: memoized wrapper\n\nCommandLine:\n    xdoctest -m ubelt.util_memoize memoize\n\nExample:\n    >>> import ubelt as ub\n    >>> closure = {'a': 'b', 'c': 'd'}\n    >>> incr = [0]\n    >>> def foo(key):\n    >>>     value = closure[key]\n    >>>     incr[0] += 1\n    >>>     return value\n    >>> foo_memo = ub.memoize(foo)\n    >>> assert foo('a') == 'b' and foo('c') == 'd'\n    >>> assert incr[0] == 2\n    >>> print('Call memoized version')\n    >>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'\n    >>> assert incr[0] == 4\n    >>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'\n    >>> print('Counter should no longer increase')\n    >>> assert incr[0] == 4\n    >>> print('Closure changes result without memoization')\n    >>> closure = {'a': 0, 'c': 1}\n    >>> assert foo('a') == 0 and foo('c') == 1\n    >>> assert incr[0] == 6\n    >>> assert foo_memo('a') == 'b' and foo_memo('c') == 'd'", "id": "f5129:m2"}
{"signature": "def _syspath_modname_to_modpath(modname, sys_path=None, exclude=None):", "body": "def _isvalid(modpath, base):<EOL><INDENT>subdir = dirname(modpath)<EOL>while subdir and subdir != base:<EOL><INDENT>if not exists(join(subdir, '<STR_LIT>')):<EOL><INDENT>return False<EOL><DEDENT>subdir = dirname(subdir)<EOL><DEDENT>return True<EOL><DEDENT>_fname_we = modname.replace('<STR_LIT:.>', os.path.sep)<EOL>candidate_fnames = [<EOL>_fname_we + '<STR_LIT>',<EOL>]<EOL>candidate_fnames += [_fname_we + ext for ext in _platform_pylib_exts()]<EOL>if sys_path is None:<EOL><INDENT>sys_path = sys.path<EOL><DEDENT>candidate_dpaths = ['<STR_LIT:.>' if p == '<STR_LIT>' else p for p in sys_path]<EOL>if exclude:<EOL><INDENT>def normalize(p):<EOL><INDENT>if sys.platform.startswith('<STR_LIT:win32>'):  <EOL><INDENT>return realpath(p).lower()<EOL><DEDENT>else:<EOL><INDENT>return realpath(p)<EOL><DEDENT><DEDENT>real_exclude = {normalize(p) for p in exclude}<EOL>candidate_dpaths = [p for p in candidate_dpaths<EOL>if normalize(p) not in real_exclude]<EOL><DEDENT>for dpath in candidate_dpaths:<EOL><INDENT>modpath = join(dpath, _fname_we)<EOL>if exists(modpath):<EOL><INDENT>if isfile(join(modpath, '<STR_LIT>')):<EOL><INDENT>if _isvalid(modpath, dpath):<EOL><INDENT>return modpath<EOL><DEDENT><DEDENT><DEDENT>for fname in candidate_fnames:<EOL><INDENT>modpath = join(dpath, fname)<EOL>if isfile(modpath):<EOL><INDENT>if _isvalid(modpath, dpath):<EOL><INDENT>return modpath<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "syspath version of modname_to_modpath\n\nArgs:\n    modname (str): name of module to find\n    sys_path (List[PathLike], default=None):\n        if specified overrides `sys.path`\n    exclude (List[PathLike], default=None):\n        list of directory paths. if specified prevents these directories\n        from being searched.\n\nNotes:\n    This is much slower than the pkgutil mechanisms.\n\nCommandLine:\n    python -m xdoctest.static_analysis _syspath_modname_to_modpath\n\nExample:\n    >>> print(_syspath_modname_to_modpath('xdoctest.static_analysis'))\n    ...static_analysis.py\n    >>> print(_syspath_modname_to_modpath('xdoctest'))\n    ...xdoctest\n    >>> print(_syspath_modname_to_modpath('_ctypes'))\n    ..._ctypes...\n    >>> assert _syspath_modname_to_modpath('xdoctest', sys_path=[]) is None\n    >>> assert _syspath_modname_to_modpath('xdoctest.static_analysis', sys_path=[]) is None\n    >>> assert _syspath_modname_to_modpath('_ctypes', sys_path=[]) is None\n    >>> assert _syspath_modname_to_modpath('this', sys_path=[]) is None\n\nExample:\n    >>> # test what happens when the module is not visible in the path\n    >>> modname = 'xdoctest.static_analysis'\n    >>> modpath = _syspath_modname_to_modpath(modname)\n    >>> exclude = [split_modpath(modpath)[0]]\n    >>> found = _syspath_modname_to_modpath(modname, exclude=exclude)\n    >>> # this only works if installed in dev mode, pypi fails\n    >>> assert found is None, 'should not have found {}'.format(found)", "id": "f5130:m5"}
{"signature": "def import_module_from_name(modname):", "body": "if True:<EOL><INDENT>import importlib<EOL>module = importlib.import_module(modname)<EOL><DEDENT>else:<EOL><INDENT>if '<STR_LIT:.>' in modname:<EOL><INDENT>fromlist = modname.split('<STR_LIT:.>')[-<NUM_LIT:1>]<EOL>fromlist_ = list(map(str, fromlist))  <EOL>module = __import__(modname, {}, {}, fromlist_, <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>module = __import__(modname, {}, {}, [], <NUM_LIT:0>)<EOL><DEDENT><DEDENT>return module<EOL>", "docstring": "Imports a module from its string name (__name__)\n\nArgs:\n    modname (str):  module name\n\nReturns:\n    module: module\n\nExample:\n    >>> # test with modules that wont be imported in normal circumstances\n    >>> # todo write a test where we gaurentee this\n    >>> modname_list = [\n    >>>     'pickletools',\n    >>>     'lib2to3.fixes.fix_apply',\n    >>> ]\n    >>> #assert not any(m in sys.modules for m in modname_list)\n    >>> modules = [import_module_from_name(modname) for modname in modname_list]\n    >>> assert [m.__name__ for m in modules] == modname_list\n    >>> assert all(m in sys.modules for m in modname_list)", "id": "f5130:m2"}
{"signature": "def split_modpath(modpath, check=True):", "body": "if six.PY2:<EOL><INDENT>if modpath.endswith('<STR_LIT>'):<EOL><INDENT>modpath = modpath[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>modpath_ = abspath(expanduser(modpath))<EOL>if check:<EOL><INDENT>if not exists(modpath_):<EOL><INDENT>if not exists(modpath):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(modpath))<EOL><DEDENT>raise ValueError('<STR_LIT>'.format(modpath))<EOL><DEDENT>if isdir(modpath_) and not exists(join(modpath, '<STR_LIT>')):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(modpath))<EOL><DEDENT><DEDENT>full_dpath, fname_ext = split(modpath_)<EOL>_relmod_parts = [fname_ext]<EOL>dpath = full_dpath<EOL>while exists(join(dpath, '<STR_LIT>')):<EOL><INDENT>dpath, dname = split(dpath)<EOL>_relmod_parts.append(dname)<EOL><DEDENT>relmod_parts = _relmod_parts[::-<NUM_LIT:1>]<EOL>rel_modpath = os.path.sep.join(relmod_parts)<EOL>return dpath, rel_modpath<EOL>", "docstring": "Splits the modpath into the dir that must be in PYTHONPATH for the module\nto be imported and the modulepath relative to this directory.\n\nArgs:\n    modpath (str): module filepath\n    check (bool): if False, does not raise an error if modpath is a\n        directory and does not contain an `__init__.py` file.\n\nReturns:\n    tuple: (directory, rel_modpath)\n\nRaises:\n    ValueError: if modpath does not exist or is not a package\n\nExample:\n    >>> from xdoctest import static_analysis\n    >>> modpath = static_analysis.__file__.replace('.pyc', '.py')\n    >>> modpath = abspath(modpath)\n    >>> dpath, rel_modpath = split_modpath(modpath)\n    >>> recon = join(dpath, rel_modpath)\n    >>> assert recon == modpath\n    >>> assert rel_modpath == join('xdoctest', 'static_analysis.py')", "id": "f5130:m9"}
{"signature": "def modname_to_modpath(modname, hide_init=True, hide_main=False, sys_path=None):", "body": "modpath = _syspath_modname_to_modpath(modname, sys_path)<EOL>if modpath is None:<EOL><INDENT>return None<EOL><DEDENT>modpath = normalize_modpath(modpath, hide_init=hide_init,<EOL>hide_main=hide_main)<EOL>return modpath<EOL>", "docstring": "Finds the path to a python module from its name.\n\nDetermines the path to a python module without directly import it\n\nConverts the name of a module (__name__) to the path (__file__) where it is\nlocated without importing the module. Returns None if the module does not\nexist.\n\nArgs:\n    modname (str): module filepath\n    hide_init (bool): if False, __init__.py will be returned for packages\n    hide_main (bool): if False, and hide_init is True, __main__.py will be\n        returned for packages, if it exists.\n    sys_path (list): if specified overrides `sys.path` (default None)\n\nReturns:\n    str: modpath - path to the module, or None if it doesn't exist\n\nCommandLine:\n    python -m xdoctest.static_analysis modname_to_modpath:0\n    pytest  /home/joncrall/code/xdoctest/xdoctest/static_analysis.py::modname_to_modpath:0\n\nExample:\n    >>> modname = 'xdoctest.__main__'\n    >>> modpath = modname_to_modpath(modname, hide_main=False)\n    >>> assert modpath.endswith('__main__.py')\n    >>> modname = 'xdoctest'\n    >>> modpath = modname_to_modpath(modname, hide_init=False)\n    >>> assert modpath.endswith('__init__.py')\n    >>> modpath = basename(modname_to_modpath('_ctypes'))\n    >>> assert 'ctypes' in modpath", "id": "f5130:m6"}
{"signature": "def import_module_from_path(modpath, index=-<NUM_LIT:1>):", "body": "import os<EOL>if not os.path.exists(modpath):<EOL><INDENT>import re<EOL>import zipimport<EOL>pat = '<STR_LIT>' + re.escape(os.path.sep) + '<STR_LIT>'<EOL>parts = re.split(pat, modpath, flags=re.IGNORECASE)<EOL>if len(parts) > <NUM_LIT:2>:<EOL><INDENT>archivepath = '<STR_LIT>'.join(parts[:-<NUM_LIT:1>])[:-<NUM_LIT:1>]<EOL>internal = parts[-<NUM_LIT:1>]<EOL>modname = os.path.splitext(internal)[<NUM_LIT:0>]<EOL>modname = os.path.normpath(modname)<EOL>if os.path.exists(archivepath):<EOL><INDENT>zimp_file = zipimport.zipimporter(archivepath)<EOL>module = zimp_file.load_module(modname)<EOL>return module<EOL><DEDENT><DEDENT>raise IOError('<STR_LIT>'.format(modpath))<EOL><DEDENT>else:<EOL><INDENT>module = _custom_import_modpath(modpath)<EOL>return module<EOL><DEDENT>", "docstring": "Imports a module via its path\n\nArgs:\n    modpath (PathLike): path to the module on disk or within a zipfile.\n\nReturns:\n    module: the imported module\n\nReferences:\n    https://stackoverflow.com/questions/67631/import-module-given-path\n\nNotes:\n    If the module is part of a package, the package will be imported first.\n    These modules may cause problems when reloading via IPython magic\n\n    This can import a module from within a zipfile. To do this modpath\n    should specify the path to the zipfile and the path to the module\n    within that zipfile separated by a colon or pathsep.\n    E.g. `/path/to/archive.zip:mymodule.py`\n\nWarning:\n    It is best to use this with paths that will not conflict with\n    previously existing modules.\n\n    If the modpath conflicts with a previously existing module name. And\n    the target module does imports of its own relative to this conflicting\n    path. In this case, the module that was loaded first will win.\n\n    For example if you try to import '/foo/bar/pkg/mod.py' from the folder\n    structure:\n      - foo/\n        +- bar/\n           +- pkg/\n              +  __init__.py\n              |- mod.py\n              |- helper.py\n\n   If there exists another module named `pkg` already in sys.modules\n   and mod.py does something like `from . import helper`, Python will\n   assume helper belongs to the `pkg` module already in sys.modules.\n   This can cause a NameError or worse --- a incorrect helper module.\n\nExample:\n    >>> import xdoctest\n    >>> modpath = xdoctest.__file__\n    >>> module = import_module_from_path(modpath)\n    >>> assert module is xdoctest\n\nExample:\n    >>> # Test importing a module from within a zipfile\n    >>> import zipfile\n    >>> from xdoctest import utils\n    >>> from os.path import join, expanduser\n    >>> dpath = expanduser('~/.cache/xdoctest')\n    >>> dpath = utils.ensuredir(dpath)\n    >>> #dpath = utils.TempDir().ensure()\n    >>> # Write to an external module named bar\n    >>> external_modpath = join(dpath, 'bar.py')\n    >>> open(external_modpath, 'w').write('testvar = 1')\n    >>> internal = 'folder/bar.py'\n    >>> # Move the external bar module into a zipfile\n    >>> zippath = join(dpath, 'myzip.zip')\n    >>> with zipfile.ZipFile(zippath, 'w') as myzip:\n    >>>     myzip.write(external_modpath, internal)\n    >>> # Import the bar module from within the zipfile\n    >>> modpath = zippath + ':' + internal\n    >>> modpath = zippath + os.path.sep + internal\n    >>> module = import_module_from_path(modpath)\n    >>> assert module.__name__ == os.path.normpath('folder/bar')\n    >>> assert module.testvar == 1\n\nDoctest:\n    >>> import pytest\n    >>> with pytest.raises(IOError):\n    >>>     import_module_from_path('does-not-exist')\n    >>> with pytest.raises(IOError):\n    >>>     import_module_from_path('does-not-exist.zip/')", "id": "f5130:m1"}
{"signature": "def _dirstats(dpath=None):  ", "body": "from ubelt import util_colors<EOL>if dpath is None:<EOL><INDENT>dpath = os.getcwd()<EOL><DEDENT>print('<STR_LIT>')<EOL>print('<STR_LIT>'.format(dpath))<EOL>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>if not os.path.exists(dpath):<EOL><INDENT>print('<STR_LIT>')<EOL>return<EOL><DEDENT>paths = sorted(os.listdir(dpath))<EOL>for path in paths:<EOL><INDENT>full_path = join(dpath, path)<EOL>E = os.path.exists(full_path)<EOL>L = os.path.islink(full_path)<EOL>F = os.path.isfile(full_path)<EOL>D = os.path.isdir(full_path)<EOL>J = util_platform.WIN32 and _win32_links._win32_is_junction(full_path)<EOL>ELFDJ = [E, L, F, D, J]<EOL>if   ELFDJ == [<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>elif ELFDJ == [<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>elif ELFDJ == [<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>elif ELFDJ == [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>elif ELFDJ == [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>elif ELFDJ == [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>elif ELFDJ == [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>elif ELFDJ == [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>elif ELFDJ == [<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>elif ELFDJ == [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>]:<EOL><INDENT>path = util_colors.color_text(path, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>'.format(dpath))<EOL>print('<STR_LIT>'.format(path))<EOL>raise AssertionError(str(ELFDJ) + str(path))<EOL><DEDENT>line = '<STR_LIT>'.format(**locals())<EOL>if os.path.islink(full_path):<EOL><INDENT>line += '<STR_LIT>' + os.readlink(full_path)<EOL><DEDENT>elif _win32_links is not None:<EOL><INDENT>if _win32_links._win32_is_junction(full_path):<EOL><INDENT>line += '<STR_LIT>' + _win32_links._win32_read_junction(full_path)<EOL><DEDENT><DEDENT>print(line)<EOL><DEDENT>", "docstring": "Testing helper for printing directory information\n(mostly for investigating windows weirdness)\n\nCommandLine:\n    python -m ubelt.util_links _dirstats", "id": "f5131:m3"}
{"signature": "def _can_symlink(verbose=<NUM_LIT:0>):  ", "body": "if _win32_links is not None:<EOL><INDENT>return _win32_links._win32_can_symlink(verbose)<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Return true if we have permission to create real symlinks.\nThis check always returns True on non-win32 systems.\nIf this check returns false, then we still may be able to use junctions.\n\nCommandLine:\n    python -m ubelt.util_platform _can_symlink\n\nExample:\n    >>> # Script\n    >>> print(_can_symlink(verbose=1))", "id": "f5131:m2"}
{"signature": "@property<EOL><INDENT>def encoding(self):<DEDENT>", "body": "if self.redirect is not None:<EOL><INDENT>return self.redirect.encoding<EOL><DEDENT>else:<EOL><INDENT>return super(TeeStringIO, self).encoding<EOL><DEDENT>", "docstring": "Gets the encoding of the `redirect` IO object\n\nDoctest:\n    >>> redirect = io.StringIO()\n    >>> assert TeeStringIO(redirect).encoding is None\n    >>> assert TeeStringIO(None).encoding is None\n    >>> assert TeeStringIO(sys.stdout).encoding is sys.stdout.encoding\n    >>> redirect = io.TextIOWrapper(io.StringIO())\n    >>> assert TeeStringIO(redirect).encoding is redirect.encoding", "id": "f5132:c0:m2"}
{"signature": "def write(self, msg):", "body": "if self.redirect is not None:<EOL><INDENT>self.redirect.write(msg)<EOL><DEDENT>if six.PY2:<EOL><INDENT>from xdoctest.utils.util_str import ensure_unicode<EOL>msg = ensure_unicode(msg)<EOL><DEDENT>super(TeeStringIO, self).write(msg)<EOL>", "docstring": "Write to this and the redirected stream", "id": "f5132:c0:m3"}
{"signature": "def isatty(self):  ", "body": "return (self.redirect is not None and<EOL>hasattr(self.redirect, '<STR_LIT>') and self.redirect.isatty())<EOL>", "docstring": "Returns true of the redirect is a terminal.\n\nNotes:\n    Needed for IPython.embed to work properly when this class is used\n    to override stdout / stderr.", "id": "f5132:c0:m1"}
{"signature": "def ensuredir(dpath, mode=<NUM_LIT>, verbose=None):", "body": "if verbose is None:  <EOL><INDENT>verbose = <NUM_LIT:0><EOL><DEDENT>if isinstance(dpath, (list, tuple)):  <EOL><INDENT>dpath = join(*dpath)<EOL><DEDENT>if not exists(dpath):<EOL><INDENT>if verbose:  <EOL><INDENT>print('<STR_LIT>' % dpath)<EOL><DEDENT>if sys.version_info.major == <NUM_LIT:2>:  <EOL><INDENT>os.makedirs(normpath(dpath), mode=mode)<EOL><DEDENT>else:<EOL><INDENT>os.makedirs(normpath(dpath), mode=mode, exist_ok=True)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:  <EOL><INDENT>print('<STR_LIT>' % dpath)<EOL><DEDENT><DEDENT>return dpath<EOL>", "docstring": "r\"\"\"\n    Ensures that directory will exist. Creates new dir with sticky bits by\n    default\n\n    Args:\n        dpath (PathLike): dir to ensure. Can also be a tuple to send to join\n        mode (int): octal mode of directory (default 0o1777)\n        verbose (int): verbosity (default 0)\n\n    Returns:\n        PathLike: path: the ensured directory\n\n    Notes:\n        This function is not thread-safe in Python2\n\n    Example:\n        >>> from ubelt.util_platform import *  # NOQA\n        >>> import ubelt as ub\n        >>> cache_dpath = ub.ensure_app_cache_dir('ubelt')\n        >>> dpath = join(cache_dpath, 'ensuredir')\n        >>> if exists(dpath):\n        ...     os.rmdir(dpath)\n        >>> assert not exists(dpath)\n        >>> ub.ensuredir(dpath)\n        >>> assert exists(dpath)\n        >>> os.rmdir(dpath)", "id": "f5133:m5"}
{"signature": "def truepath(path, real=False):", "body": "path = expanduser(path)<EOL>path = expandvars(path)<EOL>if real:<EOL><INDENT>path = realpath(path)<EOL><DEDENT>else:<EOL><INDENT>path = abspath(path)<EOL><DEDENT>path = normpath(path)<EOL>return path<EOL>", "docstring": "Normalizes a string representation of a path and does shell-like expansion.\n\nArgs:\n    path (PathLike): string representation of a path\n    real (bool): if True, all symbolic links are followed. (default: False)\n\nReturns:\n    PathLike : normalized path\n\nNote:\n    This function is similar to the composition of expanduser, expandvars,\n    normpath, and (realpath if `real` else abspath). However, on windows\n    backslashes are then replaced with forward slashes to offer a\n    consistent unix-like experience across platforms.\n\n    On windows expanduser will expand environment variables formatted as\n    %name%, whereas on unix, this will not occur.\n\nCommandLine:\n    python -m ubelt.util_path truepath\n\nExample:\n    >>> import ubelt as ub\n    >>> assert ub.truepath('~/foo') == join(ub.userhome(), 'foo')\n    >>> assert ub.truepath('~/foo') == ub.truepath('~/foo/bar/..')\n    >>> assert ub.truepath('~/foo', real=True) == ub.truepath('~/foo')", "id": "f5133:m4"}
{"signature": "def expandpath(path):", "body": "path = expanduser(path)<EOL>path = expandvars(path)<EOL>return path<EOL>", "docstring": "Wrapper around expanduser and expandvars.\n\nLess aggressive than truepath. Only expands environs and tilde. Does not\nchange relative paths to absolute paths.\n\nArgs:\n    path (PathLike): string representation of a path\n\nReturns:\n    PathLike : expanded path\n\nExample:\n    >>> import ubelt as ub\n    >>> assert normpath(ub.expandpath('~/foo')) == join(ub.userhome(), 'foo')\n    >>> assert ub.expandpath('foo') == 'foo'", "id": "f5133:m3"}
{"signature": "def download(url, fpath=None, hash_prefix=None, hasher='<STR_LIT>',<EOL>chunksize=<NUM_LIT>, verbose=<NUM_LIT:1>):", "body": "from progiter import ProgIter as Progress<EOL>from ubelt import util_platform<EOL>import shutil<EOL>import tempfile<EOL>import hashlib<EOL>if six.PY2:  <EOL><INDENT>from urllib2 import urlopen  <EOL><DEDENT>else:<EOL><INDENT>from urllib.request import urlopen  <EOL><DEDENT>if fpath is None:<EOL><INDENT>dpath = util_platform.ensure_app_cache_dir('<STR_LIT>')<EOL>fname = basename(url)<EOL>fpath = join(dpath, fname)<EOL><DEDENT>_dst_is_io_object = hasattr(fpath, '<STR_LIT>')<EOL>if verbose:<EOL><INDENT>if _dst_is_io_object:<EOL><INDENT>print('<STR_LIT>' % (url,))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>' % (url, fpath))<EOL><DEDENT><DEDENT>urldata = urlopen(url)<EOL>meta = urldata.info()<EOL>try:<EOL><INDENT>if hasattr(meta, '<STR_LIT>'):  <EOL><INDENT>file_size = int(meta.getheaders(\"<STR_LIT>\")[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>file_size = int(meta.get_all(\"<STR_LIT>\")[<NUM_LIT:0>])<EOL><DEDENT><DEDENT>except Exception:  <EOL><INDENT>file_size = None<EOL><DEDENT>if hash_prefix:<EOL><INDENT>if isinstance(hasher, six.string_types):<EOL><INDENT>if hasher == '<STR_LIT>':<EOL><INDENT>hasher = hashlib.sha1()<EOL><DEDENT>elif hasher == '<STR_LIT>':<EOL><INDENT>hasher = hashlib.sha512()<EOL><DEDENT>else:<EOL><INDENT>raise KeyError(hasher)<EOL><DEDENT><DEDENT><DEDENT>if _dst_is_io_object:<EOL><INDENT>_file_write = fpath.write<EOL><DEDENT>else:<EOL><INDENT>tmp = tempfile.NamedTemporaryFile(delete=False)<EOL>_file_write = tmp.write<EOL><DEDENT>_urldata_read = urldata.read<EOL>try:<EOL><INDENT>with Progress(total=file_size, disable=not verbose) as pbar:<EOL><INDENT>_pbar_update = pbar.update<EOL>def _critical_loop():<EOL><INDENT>buffer = '<STR_LIT:U+0020>'<EOL>if hash_prefix:<EOL><INDENT>_hasher_update = hasher.update<EOL>while buffer:<EOL><INDENT>buffer = _urldata_read(chunksize)<EOL>_file_write(buffer)<EOL>_hasher_update(buffer)<EOL>_pbar_update(len(buffer))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>while buffer:<EOL><INDENT>buffer = _urldata_read(chunksize)<EOL>_file_write(buffer)<EOL>_pbar_update(len(buffer))<EOL><DEDENT><DEDENT><DEDENT>_critical_loop()<EOL><DEDENT>if not _dst_is_io_object:<EOL><INDENT>tmp.close()<EOL>shutil.move(tmp.name, fpath)<EOL><DEDENT>if hash_prefix:<EOL><INDENT>got = hasher.hexdigest()<EOL>if got[:len(hash_prefix)] != hash_prefix:<EOL><INDENT>print('<STR_LIT>'.format(hash_prefix))<EOL>print('<STR_LIT>'.format(got))<EOL>if _dst_is_io_object:<EOL><INDENT>raise RuntimeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(hash_prefix, got))<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>fpath, hash_prefix, got))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>if not _dst_is_io_object:  <EOL><INDENT>tmp.close()<EOL>if exists(tmp.name):<EOL><INDENT>os.remove(tmp.name)<EOL><DEDENT><DEDENT><DEDENT>return fpath<EOL>", "docstring": "downloads a url to a fpath.\n\nArgs:\n    url (str):\n        The url to download.\n\n    fpath (PathLike | io.BytesIOtringIO):\n        The path to download to. Defaults to basename of url and ubelt's\n        application cache. If this is a io.BytesIO object then information\n        is directly written to this object (note this prevents the use of\n        temporary files).\n\n    hash_prefix (None or str):\n        If specified, download will retry / error if the file hash\n        does not match this value. Defaults to None.\n\n    hasher (str or Hasher):\n        If hash_prefix is specified, this indicates the hashing\n        algorithm to apply to the file. Defaults to sha512.\n\n    chunksize (int):\n        Download chunksize. Defaults to 2 ** 13.\n\n    verbose (int):\n        Verbosity level 0 or 1. Defaults to 1.\n\nReturns:\n    PathLike: fpath - file path string\n\nRaises:\n    URLError - if there is problem downloading the url\n    RuntimeError - if the hash does not match the hash_prefix\n\nNotes:\n    Original code taken from pytorch in torch/utils/model_zoo.py and\n    slightly modified.\n\nReferences:\n    http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/\n    http://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads\n    http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py\n\nCommandLine:\n    python -m xdoctest ubelt.util_download download:1\n\nExample:\n    >>> # xdoctest: +REQUIRES(--network)\n    >>> from ubelt.util_download import *  # NOQA\n    >>> url = 'http://i.imgur.com/rqwaDag.png'\n    >>> fpath = download(url)\n    >>> print(basename(fpath))\n    rqwaDag.png\n\nExample:\n    >>> # xdoctest: +REQUIRES(--network)\n    >>> import ubelt as ub\n    >>> import io\n    >>> url = 'http://i.imgur.com/rqwaDag.png'\n    >>> file = io.BytesIO()\n    >>> fpath = download(url, file)\n    >>> file.seek(0)\n    >>> data = file.read()\n    >>> assert ub.hash_data(data, hasher='sha1').startswith('f79ea24571')\n\nExample:\n    >>> # xdoctest: +REQUIRES(--network)\n    >>> url = 'http://i.imgur.com/rqwaDag.png'\n    >>> fpath = download(url, hasher='sha1', hash_prefix='f79ea24571da6ddd2ba12e3d57b515249ecb8a35')\n    Downloading url='http://i.imgur.com/rqwaDag.png' to fpath=...rqwaDag.png\n    ...\n    ...1233/1233... rate=... Hz, eta=..., total=..., wall=...\n\nExample:\n    >>> # xdoctest: +REQUIRES(--network)\n    >>> # test download from girder\n    >>> import pytest\n    >>> import ubelt as ub\n    >>> url = 'https://data.kitware.com/api/v1/item/5b4039308d777f2e6225994c/download'\n    >>> ub.download(url, hasher='sha512', hash_prefix='c98a46cb31205cf')\n    >>> with pytest.raises(RuntimeError):\n    >>>     ub.download(url, hasher='sha512', hash_prefix='BAD_HASH')", "id": "f5135:m0"}
{"signature": "def cmd(command, shell=False, detach=False, verbose=<NUM_LIT:0>, tee=None, cwd=None,<EOL>env=None, tee_backend='<STR_LIT>', verbout=None, **kwargs):", "body": "if kwargs:  <EOL><INDENT>if '<STR_LIT>' in kwargs:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', DeprecationWarning)<EOL>tee = kwargs.pop('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in kwargs:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', DeprecationWarning)<EOL>detach = kwargs.pop('<STR_LIT>')<EOL><DEDENT>if kwargs:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(list(kwargs.keys)))<EOL><DEDENT><DEDENT>if isinstance(command, six.string_types):<EOL><INDENT>command_text = command<EOL>command_tup = None<EOL><DEDENT>else:<EOL><INDENT>import pipes<EOL>command_tup = command<EOL>command_text = '<STR_LIT:U+0020>'.join(list(map(pipes.quote, command_tup)))<EOL><DEDENT>if shell or sys.platform.startswith('<STR_LIT:win32>'):<EOL><INDENT>args = command_text<EOL><DEDENT>else:<EOL><INDENT>if command_tup is None:<EOL><INDENT>import shlex<EOL>command_tup = shlex.split(command_text)<EOL><DEDENT>args = command_tup<EOL><DEDENT>if tee is None:<EOL><INDENT>tee = verbose > <NUM_LIT:0><EOL><DEDENT>if verbose > <NUM_LIT:1>:<EOL><INDENT>import os<EOL>import platform<EOL>import getpass<EOL>from ubelt import util_path<EOL>if verbose > <NUM_LIT:2>:<EOL><INDENT>try:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>except Exception:  <EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>cwd_ = os.getcwd() if cwd is None else cwd<EOL>compname = platform.node()<EOL>username = getpass.getuser()<EOL>cwd_ = util_path.compressuser(cwd_)<EOL>ps1 = '<STR_LIT>'.format(username, compname, cwd_)<EOL>print(ps1 + command_text)<EOL><DEDENT>def make_proc():<EOL><INDENT>import subprocess<EOL>proc = subprocess.Popen(args, stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE, shell=shell,<EOL>universal_newlines=True, cwd=cwd, env=env)<EOL>return proc<EOL><DEDENT>if detach:<EOL><INDENT>info = {'<STR_LIT>': make_proc(), '<STR_LIT>': command_text}<EOL>if verbose > <NUM_LIT:0>:  <EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if tee:<EOL><INDENT>stdout, stderr = sys.stdout, sys.stderr<EOL>proc, logged_out, logged_err = _tee_output(make_proc, stdout, stderr,<EOL>backend=tee_backend)<EOL>try:<EOL><INDENT>out = '<STR_LIT>'.join(logged_out)<EOL><DEDENT>except UnicodeDecodeError:  <EOL><INDENT>out = '<STR_LIT:\\n>'.join(_.decode('<STR_LIT:utf-8>') for _ in logged_out)<EOL><DEDENT>try:<EOL><INDENT>err = '<STR_LIT>'.join(logged_err)<EOL><DEDENT>except UnicodeDecodeError:  <EOL><INDENT>err = '<STR_LIT:\\n>'.join(_.decode('<STR_LIT:utf-8>') for _ in logged_err)<EOL><DEDENT>(out_, err_) = proc.communicate()<EOL><DEDENT>else:<EOL><INDENT>proc = make_proc()<EOL>(out, err) = proc.communicate()<EOL><DEDENT>ret = proc.wait()<EOL>info = {<EOL>'<STR_LIT>': out,<EOL>'<STR_LIT>': err,<EOL>'<STR_LIT>': ret,<EOL>'<STR_LIT>': proc,<EOL>'<STR_LIT>': cwd,<EOL>'<STR_LIT>': command_text<EOL>}<EOL>if verbose > <NUM_LIT:2>:<EOL><INDENT>try:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>except Exception:  <EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>return info<EOL>", "docstring": "Executes a command in a subprocess.\n\nThe advantage of this wrapper around subprocess is that\n(1) you control if the subprocess prints to stdout,\n(2) the text written to stdout and stderr is returned for parsing,\n(3) cross platform behavior that lets you specify the command as a string\nor tuple regardless of whether or not shell=True.\n(4) ability to detach, return the process object and allow the process to\nrun in the background (eventually we may return a Future object instead).\n\nArgs:\n    command (str or Sequence): bash-like command string or tuple of\n        executable and args\n\n    shell (bool): if True, process is run in shell, defaults to False.\n\n    detach (bool): if True, process is detached and run in background,\n        defaults to False.\n\n    verbose (int): verbosity mode. Can be 0, 1, 2, or 3. Defaults to 0.\n\n    tee (bool, optional): if True, simultaneously writes to stdout while\n        capturing output from the command. If not specified, defaults to\n        True if verbose > 0.  If detech is True, then this argument is\n        ignored.\n\n    cwd (PathLike, optional): path to run command\n\n    env (str, optional): environment passed to Popen\n\n    tee_backend (str, optional): backend for tee output.\n        Valid choices are: \"auto\", \"select\" (POSIX only), and \"thread\".\n\n    **kwargs: only used to support deprecated arguments\n\nReturns:\n    dict: info - information about command status.\n        if detach is False `info` contains captured standard out,\n        standard error, and the return code\n        if detach is False `info` contains a reference to the process.\n\nNotes:\n    Inputs can either be text or tuple based. On UNIX we ensure conversion\n    to text if shell=True, and to tuple if shell=False. On windows, the\n    input is always text based.  See [3] for a potential cross-platform\n    shlex solution for windows.\n\nCommandLine:\n    python -m ubelt.util_cmd cmd\n    python -c \"import ubelt as ub; ub.cmd('ping localhost -c 2', verbose=2)\"\n\nReferences:\n    [1] https://stackoverflow.com/questions/11495783/redirect-subprocess-stderr-to-stdout\n    [2] https://stackoverflow.com/questions/7729336/how-can-i-print-and-display-subprocess-stdout-and-stderr-output-without-distorti\n    [3] https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex\n\nExample:\n    >>> info = cmd(('echo', 'simple cmdline interface'), verbose=1)\n    simple cmdline interface\n    >>> assert info['ret'] == 0\n    >>> assert info['out'].strip() == 'simple cmdline interface'\n    >>> assert info['err'].strip() == ''\n\nDoctest:\n    >>> info = cmd('echo str noshell', verbose=0)\n    >>> assert info['out'].strip() == 'str noshell'\n\nDoctest:\n    >>> # windows echo will output extra single quotes\n    >>> info = cmd(('echo', 'tuple noshell'), verbose=0)\n    >>> assert info['out'].strip().strip(\"'\") == 'tuple noshell'\n\nDoctest:\n    >>> # Note this command is formatted to work on win32 and unix\n    >>> info = cmd('echo str&&echo shell', verbose=0, shell=True)\n    >>> assert info['out'].strip() == 'str' + chr(10) + 'shell'\n\nDoctest:\n    >>> info = cmd(('echo', 'tuple shell'), verbose=0, shell=True)\n    >>> assert info['out'].strip().strip(\"'\") == 'tuple shell'\n\nDoctest:\n    >>> import ubelt as ub\n    >>> from os.path import join, exists\n    >>> fpath1 = join(ub.get_app_cache_dir('ubelt'), 'cmdout1.txt')\n    >>> fpath2 = join(ub.get_app_cache_dir('ubelt'), 'cmdout2.txt')\n    >>> ub.delete(fpath1)\n    >>> ub.delete(fpath2)\n    >>> info1 = ub.cmd(('touch', fpath1), detach=True)\n    >>> info2 = ub.cmd('echo writing2 > ' + fpath2, shell=True, detach=True)\n    >>> while not exists(fpath1):\n    ...     pass\n    >>> while not exists(fpath2):\n    ...     pass\n    >>> assert ub.readfrom(fpath1) == ''\n    >>> assert ub.readfrom(fpath2).strip() == 'writing2'\n    >>> info1['proc'].wait()\n    >>> info2['proc'].wait()", "id": "f5136:m5"}
{"signature": "def _proc_iteroutput_thread(proc):", "body": "from six.moves import queue<EOL>stdout_queue = _proc_async_iter_stream(proc, proc.stdout)<EOL>stderr_queue = _proc_async_iter_stream(proc, proc.stderr)<EOL>stdout_live = True<EOL>stderr_live = True<EOL>while stdout_live or stderr_live:<EOL><INDENT>if stdout_live:  <EOL><INDENT>try:<EOL><INDENT>oline = stdout_queue.get_nowait()<EOL>stdout_live = oline is not None<EOL><DEDENT>except queue.Empty:<EOL><INDENT>oline = None<EOL><DEDENT><DEDENT>if stderr_live:<EOL><INDENT>try:<EOL><INDENT>eline = stderr_queue.get_nowait()<EOL>stderr_live = eline is not None<EOL><DEDENT>except queue.Empty:<EOL><INDENT>eline = None<EOL><DEDENT><DEDENT>if oline is not None or eline is not None:<EOL><INDENT>yield oline, eline<EOL><DEDENT><DEDENT>", "docstring": "Iterates over output from a process line by line\n\nNote:\n    WARNING. Current implementation might have bugs with other threads.\n    This behavior was seen when using earlier versions of tqdm. I'm not\n    sure if this was our bug or tqdm's. Newer versions of tqdm fix this,\n    but I cannot guarantee that there isn't an issue on our end.\n\nYields:\n    Tuple[str, str]: oline, eline: stdout and stderr line\n\nReferences:\n    https://stackoverflow.com/questions/375427/non-blocking-read-subproc", "id": "f5136:m2"}
{"signature": "def _proc_iteroutput_select(proc):", "body": "from six.moves import zip_longest<EOL>while proc.poll() is None:<EOL><INDENT>reads = [proc.stdout.fileno(), proc.stderr.fileno()]<EOL>ret = select.select(reads, [], [])<EOL>oline = eline = None<EOL>for fd in ret[<NUM_LIT:0>]:<EOL><INDENT>if fd == proc.stdout.fileno():<EOL><INDENT>oline = proc.stdout.readline()<EOL><DEDENT>if fd == proc.stderr.fileno():<EOL><INDENT>eline = proc.stderr.readline()<EOL><DEDENT><DEDENT>yield oline, eline<EOL><DEDENT>oline_iter = _textio_iterlines(proc.stdout)<EOL>eline_iter = _textio_iterlines(proc.stderr)<EOL>for oline, eline in zip_longest(oline_iter, eline_iter):<EOL><INDENT>yield oline, eline<EOL><DEDENT>", "docstring": "Iterates over output from a process line by line\n\nUNIX only. Use `_proc_iteroutput_thread` instead for a cross platform\nsolution based on threads.\n\nYields:\n    Tuple[str, str]: oline, eline: stdout and stderr line", "id": "f5136:m3"}
{"signature": "def _proc_async_iter_stream(proc, stream, buffersize=<NUM_LIT:1>):", "body": "from six.moves import queue<EOL>from threading import Thread<EOL>def enqueue_output(proc, stream, stream_queue):<EOL><INDENT>while proc.poll() is None:<EOL><INDENT>line = stream.readline()<EOL>stream_queue.put(line)<EOL><DEDENT>for line in _textio_iterlines(stream):<EOL><INDENT>stream_queue.put(line)<EOL><DEDENT>stream_queue.put(None)  <EOL><DEDENT>stream_queue = queue.Queue(maxsize=buffersize)<EOL>_thread = Thread(target=enqueue_output, args=(proc, stream, stream_queue))<EOL>_thread.daemon = True  <EOL>_thread.start()<EOL>return stream_queue<EOL>", "docstring": "Reads output from a process in a separate thread", "id": "f5136:m1"}
{"signature": "def timestamp(method='<STR_LIT>'):", "body": "if method == '<STR_LIT>':<EOL><INDENT>tz_hour = time.timezone // <NUM_LIT><EOL>utc_offset = str(tz_hour) if tz_hour < <NUM_LIT:0> else '<STR_LIT:+>' + str(tz_hour)<EOL>stamp = time.strftime('<STR_LIT>') + utc_offset<EOL>return stamp<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "make an iso8601 timestamp\n\nArgs:\n    method (str): type of timestamp\n\nExample:\n    >>> stamp = timestamp()\n    >>> print('stamp = {!r}'.format(stamp))\n    stamp = ...-...-...T...", "id": "f5137:m0"}
{"signature": "def argmin(indexable, key=None):", "body": "if key is None and isinstance(indexable, collections_abc.Mapping):<EOL><INDENT>return min(indexable.items(), key=operator.itemgetter(<NUM_LIT:1>))[<NUM_LIT:0>]<EOL><DEDENT>elif hasattr(indexable, '<STR_LIT:index>'):<EOL><INDENT>if key is None:<EOL><INDENT>return indexable.index(min(indexable))<EOL><DEDENT>else:<EOL><INDENT>return indexable.index(min(indexable, key=key))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return argsort(indexable, key=key)[<NUM_LIT:0>]<EOL><DEDENT>", "docstring": "Returns index / key of the item with the smallest value.\n\nThis is similar to `numpy.argmin`, but it is written in pure python and\nworks on both lists and dictionaries.\n\nArgs:\n    indexable (Iterable or Mapping): indexable to sort by\n\n    key (Callable, optional): customizes the ordering of the indexable\n\nExample:\n    >>> assert argmin({'a': 3, 'b': 2, 'c': 100}) == 'b'\n    >>> assert argmin(['a', 'c', 'b', 'z', 'f']) == 0\n    >>> assert argmin([[0, 1], [2, 3, 4], [5]], key=len) == 2\n    >>> assert argmin({'a': 3, 'b': 2, 3: 100, 4: 4}) == 'b'\n    >>> assert argmin(iter(['a', 'c', 'A', 'z', 'f'])) == 2", "id": "f5139:m12"}
{"signature": "def argunique(items, key=None):", "body": "<EOL>if key is None:<EOL><INDENT>return unique(range(len(items)), key=lambda i: items[i])<EOL><DEDENT>else:<EOL><INDENT>return unique(range(len(items)), key=lambda i: key(items[i]))<EOL><DEDENT>", "docstring": "Returns indices corresponding to the first instance of each unique item.\n\nArgs:\n    items (Sequence): indexable collection of items\n\n    key (Callable, optional): custom normalization function.\n        If specified returns items where `key(item)` is unique.\n\nYields:\n    int : indices of the unique items\n\nExample:\n    >>> items = [0, 2, 5, 1, 1, 0, 2, 4]\n    >>> indices = list(argunique(items))\n    >>> assert indices == [0, 1, 2, 3, 7]\n    >>> indices = list(argunique(items, key=lambda x: x % 2 == 0))\n    >>> assert indices == [0, 2]", "id": "f5139:m5"}
{"signature": "def flatten(nested_list):", "body": "return it.chain.from_iterable(nested_list)<EOL>", "docstring": "Transforms a nested iterable into a flat iterable.\n\nThis is simply an alias for `itertools.chain.from_iterable`\n\nArgs:\n    nested_list (Iterable[Iterable]): list of lists\n\nReturns:\n    Iterable: flattened items\n\nExample:\n    >>> import ubelt as ub\n    >>> nested_list = [['a', 'b'], ['c', 'd']]\n    >>> list(ub.flatten(nested_list))\n    ['a', 'b', 'c', 'd']", "id": "f5139:m3"}
{"signature": "def boolmask(indices, maxval=None):", "body": "if maxval is None:<EOL><INDENT>indices = list(indices)<EOL>maxval = max(indices) + <NUM_LIT:1><EOL><DEDENT>mask = [False] * maxval<EOL>for index in indices:<EOL><INDENT>mask[index] = True<EOL><DEDENT>return mask<EOL>", "docstring": "Constructs a list of booleans where an item is True if its position is in\n`indices` otherwise it is False.\n\nArgs:\n    indices (list): list of integer indices\n\n    maxval (int): length of the returned list. If not specified\n        this is inferred from `indices`\n\nNote:\n    In the future the arg `maxval` may change its name to `shape`\n\nReturns:\n    list: mask: list of booleans. mask[idx] is True if idx in indices\n\nExample:\n    >>> import ubelt as ub\n    >>> indices = [0, 1, 4]\n    >>> mask = ub.boolmask(indices, maxval=6)\n    >>> assert mask == [True, True, False, False, True, False]\n    >>> mask = ub.boolmask(indices)\n    >>> assert mask == [True, True, False, False, True]", "id": "f5139:m7"}
{"signature": "def argmax(indexable, key=None):", "body": "if key is None and isinstance(indexable, collections_abc.Mapping):<EOL><INDENT>return max(indexable.items(), key=operator.itemgetter(<NUM_LIT:1>))[<NUM_LIT:0>]<EOL><DEDENT>elif hasattr(indexable, '<STR_LIT:index>'):<EOL><INDENT>if key is None:<EOL><INDENT>return indexable.index(max(indexable))<EOL><DEDENT>else:<EOL><INDENT>return indexable.index(max(indexable, key=key))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return argsort(indexable, key=key)[-<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "Returns index / key of the item with the largest value.\n\nThis is similar to `numpy.argmax`, but it is written in pure python and\nworks on both lists and dictionaries.\n\nArgs:\n    indexable (Iterable or Mapping): indexable to sort by\n\n    key (Callable, optional): customizes the ordering of the indexable\n\nCommandLine:\n    python -m ubelt.util_list argmax\n\nExample:\n    >>> assert argmax({'a': 3, 'b': 2, 'c': 100}) == 'c'\n    >>> assert argmax(['a', 'c', 'b', 'z', 'f']) == 3\n    >>> assert argmax([[0, 1], [2, 3, 4], [5]], key=len) == 1\n    >>> assert argmax({'a': 3, 'b': 2, 3: 100, 4: 4}) == 3\n    >>> assert argmax(iter(['a', 'c', 'b', 'z', 'f'])) == 3", "id": "f5139:m11"}
{"signature": "def unique_flags(items, key=None):", "body": "len_ = len(items)<EOL>if key is None:<EOL><INDENT>item_to_index = dict(zip(reversed(items), reversed(range(len_))))<EOL>indices = item_to_index.values()<EOL><DEDENT>else:<EOL><INDENT>indices = argunique(items, key=key)<EOL><DEDENT>flags = boolmask(indices, len_)<EOL>return flags<EOL>", "docstring": "Returns a list of booleans corresponding to the first instance of each\nunique item.\n\nArgs:\n    items (Sequence): indexable collection of items\n\n    key (Callable, optional): custom normalization function.\n        If specified returns items where `key(item)` is unique.\n\nReturns:\n    List[bool] : flags the items that are unique\n\nExample:\n    >>> import ubelt as ub\n    >>> items = [0, 2, 1, 1, 0, 9, 2]\n    >>> flags = unique_flags(items)\n    >>> assert flags == [True, True, True, False, False, True, False]\n    >>> flags = unique_flags(items, key=lambda x: x % 2 == 0)\n    >>> assert flags == [True, False, True, False, False, False, False]", "id": "f5139:m6"}
{"signature": "def compress(items, flags):", "body": "return it.compress(items, flags)<EOL>", "docstring": "Selects items where the corresponding value in flags is True\nThis is similar to np.compress and it.compress\n\nArgs:\n    items (Iterable): a sequence to select items from\n\n    flags (Iterable): corresponding sequence of bools\n\nReturns:\n    Iterable: a subset of masked items\n\nExample:\n    >>> import ubelt as ub\n    >>> items = [1, 2, 3, 4, 5]\n    >>> flags = [False, True, True, False, True]\n    >>> list(ub.compress(items, flags))\n    [2, 3, 5]", "id": "f5139:m2"}
{"signature": "def take(items, indices):", "body": "return (items[index] for index in indices)<EOL>", "docstring": "Selects a subset of a list based on a list of indices.\nThis is similar to np.take, but pure python.\n\nArgs:\n    items (Sequence): an indexable object to select items from\n\n    indices (Iterable): sequence of indexing objects\n\nReturns:\n    Iterable or scalar: subset of the list\n\nSeeAlso:\n    ub.dict_subset\n\nExample:\n    >>> import ubelt as ub\n    >>> items = [0, 1, 2, 3]\n    >>> indices = [2, 0]\n    >>> list(ub.take(items, indices))\n    [2, 0]", "id": "f5139:m1"}
{"signature": "def _win32_read_junction(path):", "body": "if not jwfs.is_reparse_point(path):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>handle = jwfs.api.CreateFile(<EOL>path, <NUM_LIT:0>, <NUM_LIT:0>, None, jwfs.api.OPEN_EXISTING,<EOL>jwfs.api.FILE_FLAG_OPEN_REPARSE_POINT |<EOL>jwfs.api.FILE_FLAG_BACKUP_SEMANTICS,<EOL>None)<EOL>if handle == jwfs.api.INVALID_HANDLE_VALUE:<EOL><INDENT>raise WindowsError()<EOL><DEDENT>res = jwfs.reparse.DeviceIoControl(<EOL>handle, jwfs.api.FSCTL_GET_REPARSE_POINT, None, <NUM_LIT>)<EOL>bytes = jwfs.create_string_buffer(res)<EOL>p_rdb = jwfs.cast(bytes, jwfs.POINTER(jwfs.api.REPARSE_DATA_BUFFER))<EOL>rdb = p_rdb.contents<EOL>if rdb.tag not in [<NUM_LIT>, jwfs.api.IO_REPARSE_TAG_SYMLINK]:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\" % rdb.tag)<EOL><DEDENT>jwfs.handle_nonzero_success(jwfs.api.CloseHandle(handle))<EOL>subname = rdb.get_substitute_name()<EOL>if subname.startswith('<STR_LIT>'):<EOL><INDENT>subname = subname[<NUM_LIT:2>:]<EOL><DEDENT>return subname<EOL>", "docstring": "Returns the location that the junction points, raises ValueError if path is\nnot a junction.\n\nCommandLine:\n    python -m ubelt._win32_links _win32_read_junction\n\nExample:\n    >>> # xdoc: +REQUIRES(WIN32)\n    >>> import ubelt as ub\n    >>> root = ub.ensure_app_cache_dir('ubelt', 'win32_junction')\n    >>> ub.delete(root)\n    >>> ub.ensuredir(root)\n    >>> dpath = join(root, 'dpath')\n    >>> djunc = join(root, 'djunc')\n    >>> ub.ensuredir(dpath)\n    >>> _win32_junction(dpath, djunc)\n    >>> path = djunc\n    >>> pointed = _win32_read_junction(path)\n    >>> print('pointed = {!r}'.format(pointed))", "id": "f5140:m6"}
{"signature": "def _win32_dir(path, star='<STR_LIT>'):", "body": "from ubelt import util_cmd<EOL>import re<EOL>wrapper = '<STR_LIT>'  <EOL>command = '<STR_LIT>'.format(path, star)<EOL>wrapped = wrapper.format(command)<EOL>info = util_cmd.cmd(wrapped, shell=True)<EOL>if info['<STR_LIT>'] != <NUM_LIT:0>:<EOL><INDENT>from ubelt import util_format<EOL>print('<STR_LIT>')<EOL>print(info['<STR_LIT>'])<EOL>print(util_format.repr2(info, nl=<NUM_LIT:1>))<EOL>raise OSError(str(info))<EOL><DEDENT>lines = info['<STR_LIT>'].split('<STR_LIT:\\n>')[<NUM_LIT:5>:-<NUM_LIT:3>]<EOL>splitter = re.compile('<STR_LIT>')<EOL>for line in lines:<EOL><INDENT>parts = splitter.split(line)<EOL>date, sep, time, sep, ampm, sep, type_or_size, sep = parts[:<NUM_LIT:8>]<EOL>name = '<STR_LIT>'.join(parts[<NUM_LIT:8>:])<EOL>if name == '<STR_LIT:.>' or name == '<STR_LIT:..>':<EOL><INDENT>continue<EOL><DEDENT>if type_or_size in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>pos = name.find('<STR_LIT::>')<EOL>bpos = name[:pos].rfind('<STR_LIT:[>')<EOL>name = name[:bpos - <NUM_LIT:1>]<EOL>pointed = name[bpos + <NUM_LIT:1>:-<NUM_LIT:1>]<EOL>yield type_or_size, name, pointed<EOL><DEDENT>else:<EOL><INDENT>yield type_or_size, name, None<EOL><DEDENT><DEDENT>", "docstring": "Using the windows cmd shell to get information about a directory", "id": "f5140:m9"}
{"signature": "def _win32_rmtree(path, verbose=<NUM_LIT:0>):", "body": "<EOL>def _rmjunctions(root):<EOL><INDENT>subdirs = []<EOL>for name in os.listdir(root):<EOL><INDENT>current = join(root, name)<EOL>if os.path.isdir(current):<EOL><INDENT>if _win32_is_junction(current):<EOL><INDENT>os.rmdir(current)<EOL><DEDENT>elif not os.path.islink(current):<EOL><INDENT>subdirs.append(current)<EOL><DEDENT><DEDENT><DEDENT>for subdir in subdirs:<EOL><INDENT>_rmjunctions(subdir)<EOL><DEDENT><DEDENT>if _win32_is_junction(path):<EOL><INDENT>if verbose:<EOL><INDENT>print('<STR_LIT>'.format(path))<EOL><DEDENT>os.rmdir(path)<EOL><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>print('<STR_LIT>'.format(path))<EOL><DEDENT>_rmjunctions(path)<EOL>import shutil<EOL>shutil.rmtree(path)<EOL><DEDENT>", "docstring": "rmtree for win32 that treats junctions like directory symlinks.\nThe junction removal portion may not be safe on race conditions.\n\nThere is a known issue that prevents shutil.rmtree from\ndeleting directories with junctions.\nhttps://bugs.python.org/issue31226", "id": "f5140:m7"}
{"signature": "def argflag(key, argv=None):", "body": "if argv is None:  <EOL><INDENT>argv = sys.argv<EOL><DEDENT>keys = [key] if isinstance(key, six.string_types) else key<EOL>flag = any(k in argv for k in keys)<EOL>return flag<EOL>", "docstring": "Determines if a key is specified on the command line\n\nArgs:\n    key (str or tuple): string or tuple of strings. Each key should be\n        prefixed with two hyphens (i.e. `--`)\n    argv (Optional[list]): overrides `sys.argv` if specified\n\nReturns:\n    bool: flag : True if the key (or any of the keys) was specified\n\nExample:\n    >>> import ubelt as ub\n    >>> argv = ['--spam', '--eggs', 'foo']\n    >>> assert ub.argflag('--eggs', argv=argv) is True\n    >>> assert ub.argflag('--ans', argv=argv) is False\n    >>> assert ub.argflag('foo', argv=argv) is True\n    >>> assert ub.argflag(('bar', '--spam'), argv=argv) is True", "id": "f5141:m1"}
{"signature": "def argval(key, default=util_const.NoParam, argv=None):", "body": "if argv is None:  <EOL><INDENT>argv = sys.argv<EOL><DEDENT>keys = [key] if isinstance(key, six.string_types) else key<EOL>n_max = len(argv) - <NUM_LIT:1><EOL>for argx, item in enumerate(argv):<EOL><INDENT>for key_ in keys:<EOL><INDENT>if item == key_:<EOL><INDENT>if argx < n_max:<EOL><INDENT>value = argv[argx + <NUM_LIT:1>]<EOL>return value<EOL><DEDENT><DEDENT>elif item.startswith(key_ + '<STR_LIT:=>'):<EOL><INDENT>value = '<STR_LIT:=>'.join(item.split('<STR_LIT:=>')[<NUM_LIT:1>:])<EOL>return value<EOL><DEDENT><DEDENT><DEDENT>value = default<EOL>return value<EOL>", "docstring": "Get the value of a keyword argument specified on the command line.\n\nValues can be specified as `<key> <value>` or `<key>=<value>`\n\nArgs:\n    key (str or tuple): string or tuple of strings. Each key should be\n        prefixed with two hyphens (i.e. `--`)\n    default (Optional[object]): value to return if not specified\n    argv (Optional[list]): overrides `sys.argv` if specified\n\nReturns:\n    str: value : the value specified after the key. It they key is\n        specified multiple times, then the first value is returned.\n\nTODO:\n    - [ ] Can we handle the case where the value is a list of long paths?\n    - [ ] Should we default the first or last specified instance of the flag.\n\nExample:\n    >>> import ubelt as ub\n    >>> argv = ['--ans', '42', '--quest=the grail', '--ans=6', '--bad']\n    >>> assert ub.argval('--spam', argv=argv) == ub.NoParam\n    >>> assert ub.argval('--quest', argv=argv) == 'the grail'\n    >>> assert ub.argval('--ans', argv=argv) == '42'\n    >>> assert ub.argval('--bad', argv=argv) == ub.NoParam\n    >>> assert ub.argval(('--bad', '--bar'), argv=argv) == ub.NoParam\n\nExample:\n    >>> # Test fix for GH Issue #41\n    >>> import ubelt as ub\n    >>> argv = ['--path=/path/with/k=3']\n    >>> ub.argval('--path', argv=argv) == '/path/with/k=3'", "id": "f5141:m0"}
{"signature": "def touch(fpath, mode=<NUM_LIT>, dir_fd=None, verbose=<NUM_LIT:0>, **kwargs):", "body": "if verbose:<EOL><INDENT>print('<STR_LIT>'.format(fpath))<EOL><DEDENT>if six.PY2:  <EOL><INDENT>with open(fpath, '<STR_LIT:a>'):<EOL><INDENT>os.utime(fpath, None)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>flags = os.O_CREAT | os.O_APPEND<EOL>with os.fdopen(os.open(fpath, flags=flags, mode=mode, dir_fd=dir_fd)) as f:<EOL><INDENT>os.utime(f.fileno() if os.utime in os.supports_fd else fpath,<EOL>dir_fd=None if os.supports_fd else dir_fd, **kwargs)<EOL><DEDENT><DEDENT>return fpath<EOL>", "docstring": "change file timestamps\n\nWorks like the touch unix utility\n\nArgs:\n    fpath (PathLike): name of the file\n    mode (int): file permissions (python3 and unix only)\n    dir_fd (file): optional directory file descriptor. If specified, fpath\n        is interpreted as relative to this descriptor (python 3 only).\n    verbose (int): verbosity\n    **kwargs : extra args passed to `os.utime` (python 3 only).\n\nReturns:\n    PathLike: path to the file\n\nReferences:\n    https://stackoverflow.com/questions/1158076/implement-touch-using-python\n\nExample:\n    >>> import ubelt as ub\n    >>> dpath = ub.ensure_app_cache_dir('ubelt')\n    >>> fpath = join(dpath, 'touch_file')\n    >>> assert not exists(fpath)\n    >>> ub.touch(fpath)\n    >>> assert exists(fpath)\n    >>> os.unlink(fpath)", "id": "f5143:m2"}
{"signature": "def writeto(fpath, to_write, aslines=False, verbose=None):", "body": "if verbose:<EOL><INDENT>print('<STR_LIT>' % (fpath,))<EOL><DEDENT>with open(fpath, '<STR_LIT:wb>') as file:<EOL><INDENT>if aslines:<EOL><INDENT>to_write = map(_ensure_bytes , to_write)<EOL>file.writelines(to_write)<EOL><DEDENT>else:<EOL><INDENT>bytes = _ensure_bytes(to_write)<EOL>file.write(bytes)<EOL><DEDENT><DEDENT>", "docstring": "r\"\"\"\n    Writes (utf8) text to a file.\n\n    Args:\n        fpath (PathLike): file path\n        to_write (str): text to write (must be unicode text)\n        aslines (bool): if True to_write is assumed to be a list of lines\n        verbose (bool): verbosity flag\n\n    CommandLine:\n        python -m ubelt.util_io writeto --verbose\n\n    Example:\n        >>> import ubelt as ub\n        >>> dpath = ub.ensure_app_cache_dir('ubelt')\n        >>> fpath = dpath + '/' + 'testwrite.txt'\n        >>> if exists(fpath):\n        >>>     os.remove(fpath)\n        >>> to_write = 'utf-8 symbols \u0394, \u0419, \u05e7, \u0645, \u0e57, \u3042, \u53f6, \u8449, and \ub9d0.'\n        >>> writeto(fpath, to_write)\n        >>> read_ = ub.readfrom(fpath)\n        >>> print('read_    = ' + read_)\n        >>> print('to_write = ' + to_write)\n        >>> assert read_ == to_write\n\n    Example:\n        >>> import ubelt as ub\n        >>> dpath = ub.ensure_app_cache_dir('ubelt')\n        >>> fpath = dpath + '/' + 'testwrite2.txt'\n        >>> if exists(fpath):\n        >>>     os.remove(fpath)\n        >>> to_write = ['a\\n', 'b\\n', 'c\\n', 'd\\n']\n        >>> writeto(fpath, to_write, aslines=True)\n        >>> read_ = ub.readfrom(fpath, aslines=True)\n        >>> print('read_    = {}'.format(read_))\n        >>> print('to_write = {}'.format(to_write))\n        >>> assert read_ == to_write", "id": "f5143:m0"}
{"signature": "def readfrom(fpath, aslines=False, errors='<STR_LIT:replace>', verbose=None):", "body": "if verbose:<EOL><INDENT>print('<STR_LIT>' % (fpath,))<EOL><DEDENT>if not exists(fpath):<EOL><INDENT>raise IOError('<STR_LIT>' % (fpath,))<EOL><DEDENT>with open(fpath, '<STR_LIT:rb>') as file:<EOL><INDENT>if aslines:<EOL><INDENT>text = [line.decode('<STR_LIT:utf8>', errors=errors)<EOL>for line in file.readlines()]<EOL>if sys.platform.startswith('<STR_LIT:win32>'):  <EOL><INDENT>text = [<EOL>line[:-<NUM_LIT:2>] + '<STR_LIT:\\n>' if line.endswith('<STR_LIT:\\r\\n>') else line<EOL>for line in text<EOL>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>text = file.read().decode('<STR_LIT:utf8>', errors=errors)<EOL><DEDENT><DEDENT>return text<EOL>", "docstring": "Reads (utf8) text from a file.\n\nArgs:\n    fpath (PathLike): file path\n    aslines (bool): if True returns list of lines\n    verbose (bool): verbosity flag\n\nReturns:\n    str: text from fpath (this is unicode)", "id": "f5143:m1"}
{"signature": "def find_duplicates(items, k=<NUM_LIT:2>, key=None):", "body": "<EOL>duplicates = defaultdict(list)<EOL>if key is None:<EOL><INDENT>for count, item in enumerate(items):<EOL><INDENT>duplicates[item].append(count)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for count, item in enumerate(items):<EOL><INDENT>duplicates[key(item)].append(count)<EOL><DEDENT><DEDENT>for key in list(duplicates.keys()):<EOL><INDENT>if len(duplicates[key]) < k:<EOL><INDENT>del duplicates[key]<EOL><DEDENT><DEDENT>duplicates = dict(duplicates)<EOL>return duplicates<EOL>", "docstring": "Find all duplicate items in a list.\n\nSearch for all items that appear more than `k` times and return a mapping\nfrom each (k)-duplicate item to the positions it appeared in.\n\nArgs:\n    items (Iterable): hashable items possibly containing duplicates\n    k (int): only return items that appear at least `k` times (default=2)\n    key (Callable, optional): Returns indices where `key(items[i])`\n        maps to a particular value at least k times.\n\nReturns:\n    dict: maps each duplicate item to the indices at which it appears\n\nCommandLine:\n    python -m ubelt.util_dict find_duplicates\n\nExample:\n    >>> import ubelt as ub\n    >>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]\n    >>> duplicates = ub.find_duplicates(items)\n    >>> print('items = %r' % (items,))\n    >>> print('duplicates = %r' % (duplicates,))\n    >>> assert duplicates == {0: [0, 1, 6], 2: [3, 8], 3: [4, 5]}\n    >>> assert ub.find_duplicates(items, 3) == {0: [0, 1, 6]}\n\nExample:\n    >>> import ubelt as ub\n    >>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9]\n    >>> # note: k can be 0\n    >>> duplicates = ub.find_duplicates(items, k=0)\n    >>> print(ub.repr2(duplicates, nl=0))\n    {0: [0, 1, 6], 1: [2], 2: [3, 8], 3: [4, 5], 9: [9], 12: [7]}\n\nExample:\n    >>> import ubelt as ub\n    >>> items = [10, 11, 12, 13, 14, 15, 16]\n    >>> duplicates = ub.find_duplicates(items, key=lambda x: x // 2)\n    >>> print(ub.repr2(duplicates, nl=0))\n    {5: [0, 1], 6: [2, 3], 7: [4, 5]}", "id": "f5144:m3"}
{"signature": "def invert_dict(dict_, unique_vals=True):", "body": "if unique_vals:<EOL><INDENT>if isinstance(dict_, OrderedDict):<EOL><INDENT>inverted = OrderedDict((val, key) for key, val in dict_.items())<EOL><DEDENT>else:<EOL><INDENT>inverted = {val: key for key, val in dict_.items()}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>inverted = defaultdict(set)<EOL>for key, value in dict_.items():<EOL><INDENT>inverted[value].add(key)<EOL><DEDENT>inverted = dict(inverted)<EOL><DEDENT>return inverted<EOL>", "docstring": "r\"\"\"\n    Swaps the keys and values in a dictionary.\n\n    Args:\n        dict_ (dict): dictionary to invert\n        unique_vals (bool): if False, inverted keys are returned in a set.\n            The default is True.\n\n    Returns:\n        dict: inverted\n\n    Notes:\n        The must values be hashable.\n\n        If the original dictionary contains duplicate values, then only one of\n        the corresponding keys will be returned and the others will be\n        discarded.  This can be prevented by setting `unique_vals=True`,\n        causing the inverted keys to be returned in a set.\n\n    CommandLine:\n        python -m ubelt.util_dict invert_dict\n\n    Example:\n        >>> import ubelt as ub\n        >>> dict_ = {'a': 1, 'b': 2}\n        >>> inverted = ub.invert_dict(dict_)\n        >>> assert inverted == {1: 'a', 2: 'b'}\n\n    Example:\n        >>> import ubelt as ub\n        >>> dict_ = ub.odict([(2, 'a'), (1, 'b'), (0, 'c'), (None, 'd')])\n        >>> inverted = ub.invert_dict(dict_)\n        >>> assert list(inverted.keys())[0] == 'a'\n\n    Example:\n        >>> import ubelt as ub\n        >>> dict_ = {'a': 1, 'b': 0, 'c': 0, 'd': 0, 'f': 2}\n        >>> inverted = ub.invert_dict(dict_, unique_vals=False)\n        >>> assert inverted == {0: {'b', 'c', 'd'}, 1: {'a'}, 2: {'f'}}", "id": "f5144:m10"}
{"signature": "def dzip(items1, items2, cls=dict):", "body": "try:<EOL><INDENT>len(items1)<EOL><DEDENT>except TypeError:<EOL><INDENT>items1 = list(items1)<EOL><DEDENT>try:<EOL><INDENT>len(items2)<EOL><DEDENT>except TypeError:<EOL><INDENT>items2 = list(items2)<EOL><DEDENT>if len(items1) == <NUM_LIT:0> and len(items2) == <NUM_LIT:1>:<EOL><INDENT>items2 = []<EOL><DEDENT>if len(items2) == <NUM_LIT:1> and len(items1) > <NUM_LIT:1>:<EOL><INDENT>items2 = items2 * len(items1)<EOL><DEDENT>if len(items1) != len(items2):<EOL><INDENT>raise ValueError('<STR_LIT>' % (<EOL>len(items1), len(items2)))<EOL><DEDENT>return cls(zip(items1, items2))<EOL>", "docstring": "Zips elementwise pairs between items1 and items2 into a dictionary. Values\nfrom items2 can be broadcast onto items1.\n\nArgs:\n    items1 (Iterable): full sequence\n    items2 (Iterable): can either be a sequence of one item or a sequence\n        of equal length to `items1`\n    cls (Type[dict]): dictionary type to use. Defaults to dict, but could\n        be ordered dict instead.\n\nReturns:\n    dict: similar to dict(zip(items1, items2))\n\nExample:\n    >>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}\n    >>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}\n    >>> assert dzip([], [4]) == {}", "id": "f5144:m0"}
{"signature": "def dict_isect(*args):", "body": "if not args:<EOL><INDENT>return {}<EOL><DEDENT>else:<EOL><INDENT>dictclass = OrderedDict if isinstance(args[<NUM_LIT:0>], OrderedDict) else dict<EOL>common_keys = set.intersection(*map(set, args))<EOL>first_dict = args[<NUM_LIT:0>]<EOL>return dictclass((k, first_dict[k]) for k in common_keys)<EOL><DEDENT>", "docstring": "Constructs a dictionary that contains keys common between all inputs.\nThe returned values will only belong to the first dictionary.\n\nArgs:\n    *args : a sequence of dictionaries (or sets of keys)\n\nReturns:\n    Dict | OrderedDict :\n        OrderedDict if the first argument is an OrderedDict, otherwise dict\n\nNotes:\n    This function can be used as an alternative to `dict_subset` where any\n    key not in the dictionary is ignored. See the following example:\n\n    >>> dict_isect({'a': 1, 'b': 2, 'c': 3}, ['a', 'c', 'd'])\n    {'a': 1, 'c': 3}\n\nExample:\n    >>> dict_isect({'a': 1, 'b': 1}, {'b': 2, 'c': 2})\n    {'b': 1}\n    >>> dict_isect(odict([('a', 1), ('b', 2)]), odict([('c', 3)]))\n    OrderedDict()\n    >>> dict_isect()\n    {}", "id": "f5144:m7"}
{"signature": "def dict_take(dict_, keys, default=util_const.NoParam):", "body": "if default is util_const.NoParam:<EOL><INDENT>for key in keys:<EOL><INDENT>yield dict_[key]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for key in keys:<EOL><INDENT>yield dict_.get(key, default)<EOL><DEDENT><DEDENT>", "docstring": "r\"\"\"\n    Generates values from a dictionary\n\n    Args:\n        dict_ (Mapping): a dictionary to take from\n        keys (Iterable): the keys to take\n        default (object, optional): if specified uses default if keys are missing\n\n    CommandLine:\n        python -m ubelt.util_dict dict_take_gen\n\n    Example:\n        >>> import ubelt as ub\n        >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n        >>> keys = [1, 2, 3, 4, 5]\n        >>> result = list(ub.dict_take(dict_, keys, None))\n        >>> assert result == ['a', 'b', 'c', None, None]\n\n    Example:\n        >>> import ubelt as ub\n        >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n        >>> keys = [1, 2, 3, 4, 5]\n        >>> try:\n        >>>     print(list(ub.dict_take(dict_, keys)))\n        >>>     raise AssertionError('did not get key error')\n        >>> except KeyError:\n        >>>     print('correctly got key error')", "id": "f5144:m5"}
{"signature": "def to_dict(self):", "body": "return self._base(<EOL>(key, (value.to_dict() if isinstance(value, AutoDict) else value))<EOL>for key, value in self.items())<EOL>", "docstring": "Recursively casts a AutoDict into a regular dictionary. All nested\nAutoDict values are also converted.\n\nReturns:\n    dict: a copy of this dict without autovivification\n\nExample:\n    >>> from ubelt.util_dict import AutoDict\n    >>> auto = AutoDict()\n    >>> auto[1] = 1\n    >>> auto['n1'] = AutoDict()\n    >>> static = auto.to_dict()\n    >>> assert not isinstance(static, AutoDict)\n    >>> assert not isinstance(static['n1'], AutoDict)", "id": "f5144:c0:m1"}
{"signature": "def group_items(items, groupids):", "body": "if callable(groupids):<EOL><INDENT>keyfunc = groupids<EOL>pair_list = ((keyfunc(item), item) for item in items)<EOL><DEDENT>else:<EOL><INDENT>pair_list = zip(groupids, items)<EOL><DEDENT>groupid_to_items = defaultdict(list)<EOL>for key, item in pair_list:<EOL><INDENT>groupid_to_items[key].append(item)<EOL><DEDENT>return groupid_to_items<EOL>", "docstring": "r\"\"\"\n    Groups a list of items by group id.\n\n    Args:\n        items (Iterable): a list of items to group\n        groupids (Iterable or Callable): a corresponding list of item groupids\n            or a function mapping an item to a groupid.\n\n    Returns:\n        dict: groupid_to_items: maps a groupid to a list of items\n\n    CommandLine:\n        python -m ubelt.util_dict group_items\n\n    Example:\n        >>> import ubelt as ub\n        >>> items    = ['ham',     'jam',   'spam',     'eggs',    'cheese', 'banana']\n        >>> groupids = ['protein', 'fruit', 'protein',  'protein', 'dairy',  'fruit']\n        >>> groupid_to_items = ub.group_items(items, groupids)\n        >>> print(ub.repr2(groupid_to_items, nl=0))\n        {'dairy': ['cheese'], 'fruit': ['jam', 'banana'], 'protein': ['ham', 'spam', 'eggs']}", "id": "f5144:m1"}
{"signature": "def platform_data_dir():", "body": "if LINUX:  <EOL><INDENT>dpath_ = os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>elif DARWIN:  <EOL><INDENT>dpath_  = '<STR_LIT>'<EOL><DEDENT>elif WIN32:  <EOL><INDENT>dpath_ = os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:  <EOL><INDENT>raise '<STR_LIT>'<EOL><DEDENT>dpath = normpath(expanduser(dpath_))<EOL>return dpath<EOL>", "docstring": "Returns path for user-specific data files\n\nReturns:\n    PathLike : path to the data dir used by the current operating system", "id": "f5145:m0"}
{"signature": "def editfile(fpath, verbose=True):  ", "body": "from six import types<EOL>from ubelt import util_cmd<EOL>import warnings<EOL>warnings.warn('<STR_LIT>', DeprecationWarning)<EOL>if not isinstance(fpath, six.string_types):<EOL><INDENT>if isinstance(fpath, types.ModuleType):<EOL><INDENT>fpath = fpath.__file__<EOL><DEDENT>else:<EOL><INDENT>fpath =  sys.modules[fpath.__module__].__file__<EOL><DEDENT>fpath_py = fpath.replace('<STR_LIT>', '<STR_LIT>')<EOL>if exists(fpath_py):<EOL><INDENT>fpath = fpath_py<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print('<STR_LIT>'.format(fpath))<EOL><DEDENT>editor = os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL>if not exists(fpath):<EOL><INDENT>raise IOError('<STR_LIT>' % fpath)<EOL><DEDENT>util_cmd.cmd([editor, fpath], fpath, detach=True)<EOL>", "docstring": "DEPRICATED: This has been ported to xdev, please use that version.\n\nOpens a file or code corresponding to a live python object in your\npreferred visual editor. This function is mainly useful in an interactive\nIPython session.\n\nThe visual editor is determined by the `VISUAL` environment variable.  If\nthis is not specified it defaults to gvim.\n\nArgs:\n    fpath (PathLike): a file path or python module / function\n    verbose (int): verbosity\n\nDisableExample:\n    >>> # This test interacts with a GUI frontend, not sure how to test.\n    >>> import ubelt as ub\n    >>> ub.editfile(ub.util_platform.__file__)\n    >>> ub.editfile(ub)\n    >>> ub.editfile(ub.editfile)", "id": "f5145:m12"}
{"signature": "def get_app_config_dir(appname, *args):", "body": "dpath = join(platform_config_dir(), appname, *args)<EOL>return dpath<EOL>", "docstring": "r\"\"\"\n    Returns a writable directory for an application\n    This should be used for persistent configuration files.\n\n    Args:\n        appname (str): the name of the application\n        *args: any other subdirectories may be specified\n\n    Returns:\n        PathLike: dpath: writable config directory for this application\n\n    SeeAlso:\n        ensure_app_config_dir", "id": "f5145:m5"}
{"signature": "def ensure_app_config_dir(appname, *args):", "body": "from ubelt import util_path<EOL>dpath = get_app_config_dir(appname, *args)<EOL>util_path.ensuredir(dpath)<EOL>return dpath<EOL>", "docstring": "Calls `get_app_config_dir` but ensures the directory exists.\n\nArgs:\n    appname (str): the name of the application\n    *args: any other subdirectories may be specified\n\nSeeAlso:\n    get_app_config_dir\n\nExample:\n    >>> import ubelt as ub\n    >>> dpath = ub.ensure_app_config_dir('ubelt')\n    >>> assert exists(dpath)", "id": "f5145:m6"}
{"signature": "def get_app_cache_dir(appname, *args):", "body": "dpath = join(platform_cache_dir(), appname, *args)<EOL>return dpath<EOL>", "docstring": "r\"\"\"\n    Returns a writable directory for an application.\n    This should be used for temporary deletable data.\n\n    Args:\n        appname (str): the name of the application\n        *args: any other subdirectories may be specified\n\n    Returns:\n        PathLike: dpath: writable cache directory for this application\n\n    SeeAlso:\n        ensure_app_cache_dir", "id": "f5145:m7"}
{"signature": "def platform_config_dir():", "body": "if LINUX:  <EOL><INDENT>dpath_ = os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>elif DARWIN:  <EOL><INDENT>dpath_  = '<STR_LIT>'<EOL><DEDENT>elif WIN32:  <EOL><INDENT>dpath_ = os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:  <EOL><INDENT>raise NotImplementedError('<STR_LIT>' % (sys.platform,))<EOL><DEDENT>dpath = normpath(expanduser(dpath_))<EOL>return dpath<EOL>", "docstring": "Returns a directory which should be writable for any application\nThis should be used for persistent configuration files.\n\nReturns:\n    PathLike : path to the cahce dir used by the current operating system", "id": "f5145:m1"}
{"signature": "def get_app_data_dir(appname, *args):", "body": "dpath = join(platform_data_dir(), appname, *args)<EOL>return dpath<EOL>", "docstring": "r\"\"\"\n    Returns a writable directory for an application.\n    This should be used for temporary deletable data.\n\n    Args:\n        appname (str): the name of the application\n        *args: any other subdirectories may be specified\n\n    Returns:\n        PathLike: dpath: writable data directory for this application\n\n    SeeAlso:\n        ensure_app_data_dir", "id": "f5145:m3"}
{"signature": "def ensure_app_resource_dir(appname, *args):  ", "body": "return ensure_app_cache_dir(appname, *args)<EOL>", "docstring": "Calls `get_app_resource_dir` but ensures the directory exists.\n\nDEPRICATED in favor of ensure_app_config_dir / ensure_app_data_dir\n\nArgs:\n    appname (str): the name of the application\n    *args: any other subdirectories may be specified\n\nSeeAlso:\n    get_app_resource_dir", "id": "f5145:m15"}
{"signature": "def platform_cache_dir():", "body": "if LINUX:  <EOL><INDENT>dpath_ = os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>elif DARWIN:  <EOL><INDENT>dpath_  = '<STR_LIT>'<EOL><DEDENT>elif WIN32:  <EOL><INDENT>dpath_ = os.environ.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:  <EOL><INDENT>raise NotImplementedError('<STR_LIT>' % (sys.platform,))<EOL><DEDENT>dpath = normpath(expanduser(dpath_))<EOL>return dpath<EOL>", "docstring": "Returns a directory which should be writable for any application\nThis should be used for temporary deletable data.\n\nReturns:\n    PathLike : path to the cache dir used by the current operating system", "id": "f5145:m2"}
{"signature": "def startfile(fpath, verbose=True):  ", "body": "from ubelt import util_cmd<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>'.format(fpath))<EOL><DEDENT>fpath = normpath(fpath)<EOL>if not exists(fpath):<EOL><INDENT>raise Exception('<STR_LIT>' % fpath)<EOL><DEDENT>if not WIN32:<EOL><INDENT>import pipes<EOL>fpath = pipes.quote(fpath)<EOL><DEDENT>if LINUX:<EOL><INDENT>info = util_cmd.cmd(('<STR_LIT>', fpath), detach=True, verbose=verbose)<EOL><DEDENT>elif DARWIN:<EOL><INDENT>info = util_cmd.cmd(('<STR_LIT>', fpath), detach=True, verbose=verbose)<EOL><DEDENT>elif WIN32:<EOL><INDENT>os.startfile(fpath)<EOL>info = None<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if info is not None:<EOL><INDENT>if not info['<STR_LIT>']:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Uses default program defined by the system to open a file.\nThis is done via `os.startfile` on windows, `open` on mac, and `xdg-open`\non linux.\n\nArgs:\n    fpath (PathLike): a file to open using the program associated with the\n        files extension type.\n    verbose (int): verbosity\n\nReferences:\n    http://stackoverflow.com/questions/2692873/quote-posix\n\nDisableExample:\n    >>> # This test interacts with a GUI frontend, not sure how to test.\n    >>> import ubelt as ub\n    >>> base = ub.ensure_app_cache_dir('ubelt')\n    >>> fpath1 = join(base, 'test_open.txt')\n    >>> ub.touch(fpath1)\n    >>> proc = ub.startfile(fpath1)", "id": "f5145:m9"}
{"signature": "def inject_method(self, func, name=None):", "body": "<EOL>new_method = func.__get__(self, self.__class__)<EOL>if name is None:<EOL><INDENT>name = func.__name__<EOL><DEDENT>setattr(self, name, new_method)<EOL>", "docstring": "Injects a function into an object instance as a bound method\n\nThe main use case of this function is for monkey patching. While monkey\npatching is sometimes necessary it should generally be avoided. Thus, we\nsimply remind the developer that there might be a better way.\n\nArgs:\n    self (object): instance to inject a function into\n    func (func): the function to inject (must contain an arg for self)\n    name (str): name of the method. optional. If not specified the name\n        of the function is used.\n\nExample:\n    >>> class Foo(object):\n    >>>     def bar(self):\n    >>>         return 'bar'\n    >>> def baz(self):\n    >>>     return 'baz'\n    >>> self = Foo()\n    >>> assert self.bar() == 'bar'\n    >>> assert not hasattr(self, 'baz')\n    >>> inject_method(self, baz)\n    >>> assert not hasattr(Foo, 'baz'), 'should only change one instance'\n    >>> assert self.baz() == 'baz'\n    >>> inject_method(self, baz, 'bar')\n    >>> assert self.bar() == 'baz'", "id": "f5146:m1"}
{"signature": "def save(self, data, cfgstr=None):", "body": "from six.moves import cPickle as pickle<EOL>if not self.enabled:<EOL><INDENT>return<EOL><DEDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>self.log('<STR_LIT>'.format(self.fname))<EOL><DEDENT>cfgstr = self._rectify_cfgstr(cfgstr)<EOL>condensed = self._condense_cfgstr(cfgstr)<EOL>util_path.ensuredir(self.dpath)<EOL>data_fpath = self.get_fpath(cfgstr=cfgstr)<EOL>meta_fpath = data_fpath + '<STR_LIT>'<EOL>with open(meta_fpath, '<STR_LIT:a>') as file_:<EOL><INDENT>file_.write('<STR_LIT>'.format(util_time.timestamp()))<EOL>file_.write(self.fname + '<STR_LIT:\\n>')<EOL>file_.write(condensed + '<STR_LIT:\\n>')<EOL>file_.write(cfgstr + '<STR_LIT:\\n>')<EOL>file_.write(str(self.meta) + '<STR_LIT:\\n>')<EOL><DEDENT>with open(data_fpath, '<STR_LIT:wb>') as file_:<EOL><INDENT>pickle.dump(data, file_, protocol=self.protocol)<EOL><DEDENT>", "docstring": "Writes data to path specified by `self.fpath(cfgstr)`.\n\nMetadata containing information about the cache will also be appended\nto an adjacent file with the `.meta` suffix.\n\nExample:\n    >>> from ubelt.util_cache import *  # NOQA\n    >>> # Normal functioning\n    >>> cfgstr = 'long-cfg' * 32\n    >>> cacher = Cacher('test_enabled_save', cfgstr)\n    >>> cacher.save('data')\n    >>> assert exists(cacher.get_fpath()), 'should be enabeled'\n    >>> assert exists(cacher.get_fpath() + '.meta'), 'missing metadata'\n    >>> # Setting the cacher as enabled=False turns it off\n    >>> cacher2 = Cacher('test_disabled_save', 'params', enabled=False)\n    >>> cacher2.save('data')\n    >>> assert not exists(cacher2.get_fpath()), 'should be disabled'", "id": "f5147:c0:m9"}
{"signature": "def exists(self, cfgstr=None):", "body": "return exists(self.get_fpath())<EOL>", "docstring": "Check to see if the cache exists", "id": "f5147:c0:m4"}
{"signature": "def get_fpath(self, cfgstr=None):", "body": "condensed = self._condense_cfgstr(cfgstr)<EOL>fname_cfgstr = '<STR_LIT>'.format(self.fname, condensed, self.ext)<EOL>fpath = join(self.dpath, fname_cfgstr)<EOL>fpath = normpath(fpath)<EOL>return fpath<EOL>", "docstring": "Reports the filepath that the cacher will use.\nIt will attempt to use '{fname}_{cfgstr}{ext}' unless that is too long.\nThen cfgstr will be hashed.\n\nExample:\n    >>> from ubelt.util_cache import Cacher\n    >>> import pytest\n    >>> with pytest.warns(UserWarning):\n    >>>     cacher = Cacher('test_cacher1')\n    >>>     cacher.get_fpath()\n    >>> self = Cacher('test_cacher2', cfgstr='cfg1')\n    >>> self.get_fpath()\n    >>> self = Cacher('test_cacher3', cfgstr='cfg1' * 32)\n    >>> self.get_fpath()", "id": "f5147:c0:m3"}
{"signature": "def existing_versions(self):", "body": "import glob<EOL>pattern = join(self.dpath, self.fname + '<STR_LIT>' + self.ext)<EOL>for fname in glob.iglob(pattern):<EOL><INDENT>data_fpath = join(self.dpath, fname)<EOL>yield data_fpath<EOL><DEDENT>", "docstring": "Returns data with different cfgstr values that were previously computed\nwith this cacher.\n\nExample:\n    >>> from ubelt.util_cache import Cacher\n    >>> # Ensure that some data exists\n    >>> known_fnames = set()\n    >>> cacher = Cacher('versioned_data', cfgstr='1')\n    >>> cacher.ensure(lambda: 'data1')\n    >>> known_fnames.add(cacher.get_fpath())\n    >>> cacher = Cacher('versioned_data', cfgstr='2')\n    >>> cacher.ensure(lambda: 'data2')\n    >>> known_fnames.add(cacher.get_fpath())\n    >>> # List previously computed configs for this type\n    >>> from os.path import basename\n    >>> cacher = Cacher('versioned_data', cfgstr='2')\n    >>> exist_fpaths = set(cacher.existing_versions())\n    >>> exist_fnames = list(map(basename, exist_fpaths))\n    >>> print(exist_fnames)\n    >>> assert exist_fpaths == known_fnames\n\n    ['versioned_data_1.pkl', 'versioned_data_2.pkl']", "id": "f5147:c0:m5"}
{"signature": "def tryload(self, cfgstr=None, on_error='<STR_LIT>'):", "body": "cfgstr = self._rectify_cfgstr(cfgstr)<EOL>if self.enabled:<EOL><INDENT>try:<EOL><INDENT>if self.verbose > <NUM_LIT:1>:<EOL><INDENT>self.log('<STR_LIT>'.format(self.fname))<EOL><DEDENT>return self.load(cfgstr)<EOL><DEDENT>except IOError:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>self.log('<STR_LIT>'.format(self.fname))<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>if self.verbose > <NUM_LIT:0>:<EOL><INDENT>self.log('<STR_LIT>')<EOL><DEDENT>if on_error == '<STR_LIT>':<EOL><INDENT>raise<EOL><DEDENT>elif on_error == '<STR_LIT>':<EOL><INDENT>self.clear(cfgstr)<EOL>return None<EOL><DEDENT>else:<EOL><INDENT>raise KeyError('<STR_LIT>'.format(on_error))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if self.verbose > <NUM_LIT:1>:<EOL><INDENT>self.log('<STR_LIT>'.format(self.fname))<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Like load, but returns None if the load fails due to a cache miss.\n\nArgs:\n    on_error (str): How to handle non-io errors errors. Either raise,\n        which re-raises the exception, or clear which deletes the cache\n        and returns None.", "id": "f5147:c0:m7"}
{"signature": "def ensure(self, func, *args, **kwargs):", "body": "data = self.tryload()<EOL>if data is None:<EOL><INDENT>data = func(*args, **kwargs)<EOL>self.save(data)<EOL><DEDENT>return data<EOL>", "docstring": "r\"\"\"\n        Wraps around a function. A cfgstr must be stored in the base cacher.\n\n        Args:\n            func (callable): function that will compute data on cache miss\n            *args: passed to func\n            **kwargs: passed to func\n\n        Example:\n            >>> from ubelt.util_cache import *  # NOQA\n            >>> def func():\n            >>>     return 'expensive result'\n            >>> fname = 'test_cacher_ensure'\n            >>> cfgstr = 'func params'\n            >>> cacher = Cacher(fname, cfgstr)\n            >>> cacher.clear()\n            >>> data1 = cacher.ensure(func)\n            >>> data2 = cacher.ensure(func)\n            >>> assert data1 == 'expensive result'\n            >>> assert data1 == data2\n            >>> cacher.clear()", "id": "f5147:c0:m10"}
{"signature": "def transduce(self, es):", "body": "for e in es:<EOL><INDENT>ensure_freshness(e)<EOL><DEDENT>for (fb,bb) in self.builder_layers:<EOL><INDENT>fs = fb.initial_state().transduce(es)<EOL>bs = bb.initial_state().transduce(reversed(es))<EOL>es = [concatenate([f,b]) for f,b in zip(fs, reversed(bs))]<EOL><DEDENT>return es<EOL>", "docstring": "returns the list of output Expressions obtained by adding the given inputs\nto the current state, one by one, to both the forward and backward RNNs, \nand concatenating.\n\n@param es: a list of Expression\n\nsee also add_inputs(xs)\n\n.transduce(xs) is different from .add_inputs(xs) in the following way:\n\n    .add_inputs(xs) returns a list of RNNState pairs. RNNState objects can be\n     queried in various ways. In particular, they allow access to the previous\n     state, as well as to the state-vectors (h() and s() )\n\n    .transduce(xs) returns a list of Expression. These are just the output\n     expressions. For many cases, this suffices. \n     transduce is much more memory efficient than add_inputs.", "id": "f5153:c10:m5"}
{"signature": "def make_network_graph(compact, expression_names, lookup_names):", "body": "nodes = set()<EOL>edges = defaultdict(set) <EOL>var_name_dict = dict()<EOL>if expression_names:<EOL><INDENT>for e in graphviz_items: <EOL><INDENT>if e in expression_names:<EOL><INDENT>var_name_dict[e.vindex] = expression_names[e]<EOL><DEDENT><DEDENT><DEDENT>rnn_bldr_name = defaultdict(lambda: chr(len(rnn_bldr_name)+ord('<STR_LIT:A>')))<EOL>def vidx2str(vidx): return '<STR_LIT>' % ('<STR_LIT:N>', vidx)<EOL>for e in graphviz_items: <EOL><INDENT>vidx = e.vindex<EOL>f_name = e.name<EOL>args = e.args<EOL>output_dim = e.dim<EOL>input_dim = None <EOL>children = set()<EOL>node_type = '<STR_LIT>'<EOL>if f_name == '<STR_LIT>':<EOL><INDENT>[_dim] = args<EOL>arg_strs = []<EOL><DEDENT>elif f_name == '<STR_LIT>':<EOL><INDENT>[_v] = args<EOL>arg_strs = []<EOL><DEDENT>elif f_name == '<STR_LIT>':<EOL><INDENT>[_d1, _d2] = args<EOL>arg_strs = []<EOL><DEDENT>elif f_name == '<STR_LIT>':<EOL><INDENT>[_v, _d] = args<EOL>arg_strs = []<EOL><DEDENT>elif f_name == '<STR_LIT>':<EOL><INDENT>[_dim] = args<EOL>arg_strs = []<EOL>if compact:<EOL><INDENT>if vidx in var_name_dict:<EOL><INDENT>f_name = var_name_dict[vidx]<EOL><DEDENT><DEDENT>node_type = '<STR_LIT>'<EOL><DEDENT>elif f_name == '<STR_LIT>':<EOL><INDENT>[_dim] = args<EOL>arg_strs = []<EOL>if compact:<EOL><INDENT>if vidx in var_name_dict:<EOL><INDENT>f_name = var_name_dict[vidx]<EOL><DEDENT><DEDENT>node_type = '<STR_LIT>'<EOL><DEDENT>elif f_name == '<STR_LIT>':<EOL><INDENT>[p, idx, update] = args<EOL>[_dim] = p.args<EOL>if vidx in var_name_dict:<EOL><INDENT>name = var_name_dict[vidx]<EOL><DEDENT>else:<EOL><INDENT>name = None<EOL><DEDENT>item_name = None<EOL>if lookup_names and p in expression_names:<EOL><INDENT>param_name = expression_names[p]<EOL>if param_name in lookup_names:<EOL><INDENT>item_name = '<STR_LIT>' % (lookup_names[param_name][idx],)<EOL><DEDENT><DEDENT>if compact:<EOL><INDENT>if item_name is not None:<EOL><INDENT>f_name = item_name<EOL><DEDENT>elif name is not None:<EOL><INDENT>f_name = '<STR_LIT>' % (name, idx)<EOL><DEDENT>else:<EOL><INDENT>f_name = '<STR_LIT>' % (idx)<EOL><DEDENT>arg_strs = []<EOL><DEDENT>else:<EOL><INDENT>arg_strs = [var_name_dict.get(p.vindex, '<STR_LIT>' % (p.vindex))]<EOL>if item_name is not None:<EOL><INDENT>arg_strs.append(item_name)<EOL><DEDENT>vocab_size = _dim[<NUM_LIT:0>]<EOL>arg_strs.extend(['<STR_LIT:%s>' % (idx), '<STR_LIT:%s>' % (vocab_size), '<STR_LIT>' if update else '<STR_LIT>'])<EOL><DEDENT><DEDENT>elif f_name == '<STR_LIT>':<EOL><INDENT>[arg, input_dim, bldr_type, bldr_num, state_idx] = args <EOL>rnn_name = rnn_bldr_name[bldr_num]<EOL>if bldr_type.endswith('<STR_LIT>'):<EOL><INDENT>bldr_type[:-len('<STR_LIT>')]<EOL><DEDENT>f_name = '<STR_LIT>' % (bldr_type, rnn_name, state_idx)<EOL>if not compact:<EOL><INDENT>i = arg.vindex<EOL>s = var_name_dict.get(i, '<STR_LIT>' % (i))<EOL>arg_strs = [s]<EOL><DEDENT>else:<EOL><INDENT>arg_strs = []<EOL><DEDENT>children.add(vidx2str(arg.vindex))<EOL>node_type = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>arg_strs = []<EOL>for arg in args:<EOL><INDENT>if isinstance(arg, Expression):<EOL><INDENT>if not compact:<EOL><INDENT>i = arg.vindex<EOL>s = var_name_dict.get(i, '<STR_LIT>' % (i))<EOL>arg_strs.append(s)<EOL><DEDENT>children.add(vidx2str(arg.vindex))<EOL><DEDENT>elif isinstance(arg, float) and compact:<EOL><INDENT>s = re.sub('<STR_LIT>', '<STR_LIT>', '<STR_LIT>' % (arg))<EOL>if s == '<STR_LIT>':<EOL><INDENT>s = str(arg)<EOL><DEDENT>arg_strs.append(s)<EOL><DEDENT>else:<EOL><INDENT>arg_strs.append(str(arg))<EOL>", "docstring": "Make a network graph, represented as of nodes and a set of edges.  \n  The nodes are represented as tuples: (name: string, input_dim: Dim, label: string, output_dim: Dim, children: set[name], features: string)\n#   The edges are represented as dict of children to sets of parents: (child: string) -> [(parent: string, features: string)]", "id": "f5153:m83"}
{"signature": "def __init__(self, num_layers, input_dim, hidden_dim, model, rnn_builder_factory):", "body": "assert num_layers > <NUM_LIT:0><EOL>assert hidden_dim % <NUM_LIT:2> == <NUM_LIT:0><EOL>self.builder_layers = []<EOL>f = rnn_builder_factory(<NUM_LIT:1>, input_dim, hidden_dim/<NUM_LIT:2>, model)<EOL>b = rnn_builder_factory(<NUM_LIT:1>, input_dim, hidden_dim/<NUM_LIT:2>, model)<EOL>self.builder_layers.append((f,b))<EOL>for _ in xrange(num_layers-<NUM_LIT:1>):<EOL><INDENT>f = rnn_builder_factory(<NUM_LIT:1>, hidden_dim, hidden_dim/<NUM_LIT:2>, model)<EOL>b = rnn_builder_factory(<NUM_LIT:1>, hidden_dim, hidden_dim/<NUM_LIT:2>, model)<EOL>self.builder_layers.append((f,b))<EOL><DEDENT>", "docstring": "@param num_layers: depth of the BiRNN\n@param input_dim: size of the inputs\n@param hidden_dim: size of the outputs (and intermediate layer representations)\n@param model\n@param rnn_builder_factory: RNNBuilder subclass, e.g. LSTMBuilder", "id": "f5153:c10:m0"}
{"signature": "def add_inputs(self, es):", "body": "for e in es:<EOL><INDENT>ensure_freshness(e)<EOL><DEDENT>for (fb,bb) in self.builder_layers[:-<NUM_LIT:1>]:<EOL><INDENT>fs = fb.initial_state().transduce(es)<EOL>bs = bb.initial_state().transduce(reversed(es))<EOL>es = [concatenate([f,b]) for f,b in zip(fs, reversed(bs))]<EOL><DEDENT>(fb,bb) = self.builder_layers[-<NUM_LIT:1>]<EOL>fs = fb.initial_state().add_inputs(es)<EOL>bs = bb.initial_state().add_inputs(reversed(es))<EOL>return [(f,b) for f,b in zip(fs, reversed(bs))]<EOL>", "docstring": "returns the list of state pairs (stateF, stateB) obtained by adding \ninputs to both forward (stateF) and backward (stateB) RNNs.  \n\n@param es: a list of Expression\n\nsee also transduce(xs)\n\n.transduce(xs) is different from .add_inputs(xs) in the following way:\n\n    .add_inputs(xs) returns a list of RNNState pairs. RNNState objects can be\n     queried in various ways. In particular, they allow access to the previous\n     state, as well as to the state-vectors (h() and s() )\n\n    .transduce(xs) returns a list of Expression. These are just the output\n     expressions. For many cases, this suffices. \n     transduce is much more memory efficient than add_inputs.", "id": "f5153:c10:m4"}
{"signature": "def get_indent(indent_str):", "body": "if indent_str is None:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return indent_str<EOL><DEDENT>", "docstring": "Check if the indent exists", "id": "f5156:m1"}
{"signature": "def pythonize_arguments(arg_str):", "body": "out_args = []<EOL>if arg_str is None:<EOL><INDENT>return out_str<EOL><DEDENT>args = arg_str.split('<STR_LIT:U+002C>')<EOL>for arg in args:<EOL><INDENT>components = arg.split('<STR_LIT:=>')<EOL>name_and_type=components[<NUM_LIT:0>].split('<STR_LIT:U+0020>')<EOL>if name_and_type[-<NUM_LIT:1>]=='<STR_LIT>' and len(name_and_type)><NUM_LIT:1>:<EOL><INDENT>name=name_and_type[-<NUM_LIT:2>]<EOL><DEDENT>else:<EOL><INDENT>name=name_and_type[-<NUM_LIT:1>]<EOL><DEDENT>if len(components)><NUM_LIT:1>:<EOL><INDENT>name+='<STR_LIT:=>'+components[<NUM_LIT:1>]<EOL><DEDENT>out_args.append(name)<EOL><DEDENT>return '<STR_LIT:U+002C>'.join(out_args)<EOL>", "docstring": "Remove types from function arguments in cython", "id": "f5156:m0"}
{"signature": "def load_mnist(dataset, path):", "body": "if dataset is \"<STR_LIT>\":<EOL><INDENT>fname_img = os.path.join(path, \"<STR_LIT>\")<EOL>fname_lbl = os.path.join(path, \"<STR_LIT>\")<EOL><DEDENT>elif dataset is \"<STR_LIT>\":<EOL><INDENT>fname_img = os.path.join(path, \"<STR_LIT>\")<EOL>fname_lbl = os.path.join(path, \"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>with open(fname_lbl, \"<STR_LIT:rb>\") as flbl:<EOL><INDENT>magic, num = struct.unpack(\"<STR_LIT>\", flbl.read(<NUM_LIT:8>))<EOL>labels = np.fromfile(flbl, dtype=np.int8)<EOL><DEDENT>with open(fname_img, \"<STR_LIT:rb>\") as fimg:<EOL><INDENT>magic, num, rows, cols = struct.unpack(\"<STR_LIT>\", fimg.read(<NUM_LIT:16>))<EOL>images = np.multiply(<EOL>np.fromfile(fimg, dtype=np.uint8).reshape(len(labels), rows*cols),<EOL><NUM_LIT:1.0> / <NUM_LIT>)<EOL><DEDENT>get_instance = lambda idx: (labels[idx], images[idx].reshape(<NUM_LIT:1>, <NUM_LIT>, <NUM_LIT>))<EOL>size_reset = lambda x: x.reshape(<NUM_LIT:1>, <NUM_LIT>, <NUM_LIT>)<EOL>return list(map(size_reset, images))<EOL>", "docstring": "wget -O - http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz | gunzip > train-images-idx3-ubyte\nwget -O - http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz | gunzip > train-labels-idx1-ubyte\nwget -O - http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz | gunzip > t10k-images-idx3-ubyte\nwget -O - http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz | gunzip > t10k-labels-idx1-ubyte", "id": "f5187:m0"}
{"signature": "def make_grid(tensor, nrow=<NUM_LIT:8>, padding=<NUM_LIT:2>, pad_value=<NUM_LIT:0>):", "body": "if not (isinstance(tensor, np.ndarray) or<EOL>(isinstance(tensor, list) and all(isinstance(t, np.ndarray) for t in tensor))):<EOL><INDENT>raise TypeError('<STR_LIT>'.format(type(tensor)))<EOL><DEDENT>if isinstance(tensor, list):<EOL><INDENT>tensor = np.stack(tensor, <NUM_LIT:0>)<EOL><DEDENT>if tensor.ndim == <NUM_LIT:2>:  <EOL><INDENT>tensor = tensor.reshape((<NUM_LIT:1>, tensor.shape[<NUM_LIT:0>], tensor.shape[<NUM_LIT:1>]))<EOL><DEDENT>if tensor.ndim == <NUM_LIT:3>:<EOL><INDENT>if tensor.shape[<NUM_LIT:0>] == <NUM_LIT:1>:  <EOL><INDENT>tensor = np.concatenate((tensor, tensor, tensor), <NUM_LIT:0>)<EOL><DEDENT>tensor = tensor.reshape((<NUM_LIT:1>, tensor.shape[<NUM_LIT:0>], tensor.shape[<NUM_LIT:1>], tensor.shape[<NUM_LIT:2>]))<EOL><DEDENT>if tensor.ndim == <NUM_LIT:4> and tensor.shape[<NUM_LIT:1>] == <NUM_LIT:1>:  <EOL><INDENT>tensor = np.concatenate((tensor, tensor, tensor), <NUM_LIT:1>)<EOL><DEDENT>if tensor.shape[<NUM_LIT:0>] == <NUM_LIT:1>:<EOL><INDENT>return np.squeeze(tensor)<EOL><DEDENT>nmaps = tensor.shape[<NUM_LIT:0>]<EOL>xmaps = min(nrow, nmaps)<EOL>ymaps = int(math.ceil(float(nmaps) / xmaps))<EOL>height, width = int(tensor.shape[<NUM_LIT:2>] + padding), int(tensor.shape[<NUM_LIT:3>] + padding)<EOL>grid = np.ones((<NUM_LIT:3>, height * ymaps + padding, width * xmaps + padding)) * pad_value<EOL>k = <NUM_LIT:0><EOL>for y in range(ymaps):<EOL><INDENT>for x in range(xmaps):<EOL><INDENT>if k >= nmaps:<EOL><INDENT>break<EOL><DEDENT>grid[:, y * height + padding:(y+<NUM_LIT:1>) * height,x * width + padding:(x+<NUM_LIT:1>) * width] = tensor[k]<EOL>k = k + <NUM_LIT:1><EOL><DEDENT><DEDENT>return grid<EOL>", "docstring": "Make a grid of images, via numpy.\n\n    Args:\n        tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n            or a list of images all of the same size.\n        nrow (int, optional): Number of images displayed in each row of the grid.\n            The Final grid size is (B / nrow, nrow). Default is 8.\n        padding (int, optional): amount of padding. Default is 2.\n        pad_value (float, optional): Value for the padded pixels.", "id": "f5187:m1"}
{"signature": "def save_image(tensor, filename, nrow=<NUM_LIT:8>, padding=<NUM_LIT:2>, pad_value=<NUM_LIT:0>):", "body": "from PIL import Image<EOL>grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value)<EOL>im = Image.fromarray(pre_pillow_float_img_process(grid))<EOL>im.save(filename)<EOL>", "docstring": "Save a given Tensor into an image file.\n\n    Args:\n        tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n            saves the tensor as a grid of images by calling ``make_grid``.\n        **kwargs: Other arguments are documented in ``make_grid``.", "id": "f5187:m3"}
{"signature": "def get_user_config(project_path, use_cache=True):", "body": "if sys.platform == '<STR_LIT:win32>':<EOL><INDENT>user_config = os.path.expanduser(r'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>user_config = os.path.join(<EOL>os.getenv('<STR_LIT>') or os.path.expanduser('<STR_LIT>'),<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if os.path.exists(user_config):<EOL><INDENT>with open(user_config, '<STR_LIT:r>') as config_file:<EOL><INDENT>config = pytoml.load(config_file).get('<STR_LIT>', {})<EOL><DEDENT>config = merge_dict(get_default_config(), config)<EOL>config = process_extensions(config, project_path, use_cache=use_cache)<EOL>return config<EOL><DEDENT>return None<EOL>", "docstring": "Produces a TidyPy configuration that incorporates the configuration files\nstored in the current user's home directory.\n\n:param project_path: the path to the project that is going to be analyzed\n:type project_path: str\n:param use_cache:\n    whether or not to use cached versions of any remote/referenced TidyPy\n    configurations. If not specified, defaults to ``True``.\n:type use_cache: bool\n:rtype: dict", "id": "f5217:m10"}
{"signature": "def get_reports():", "body": "<EOL>if not hasattr(get_reports, '<STR_LIT>'):<EOL><INDENT>get_reports._CACHE = dict()<EOL>for entry in pkg_resources.iter_entry_points('<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>get_reports._CACHE[entry.name] = entry.load()<EOL><DEDENT>except ImportError as exc:  <EOL><INDENT>output_error(<EOL>'<STR_LIT>' % (<EOL>entry,<EOL>entry.dist,<EOL>exc,<EOL>),<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return get_reports._CACHE<EOL>", "docstring": "Retrieves the TidyPy issue reports that are available in the current Python\nenvironment.\n\nThe returned dictionary has keys are the report names and values are the\nreport classes.\n\n:rtype: dict", "id": "f5217:m1"}
{"signature": "def purge_config_cache(location=None):", "body": "cache_path = get_cache_path(location)<EOL>if location:<EOL><INDENT>os.remove(cache_path)<EOL><DEDENT>else:<EOL><INDENT>shutil.rmtree(cache_path)<EOL><DEDENT>", "docstring": "Clears out the cache of TidyPy configurations that were retrieved from\noutside the normal locations.", "id": "f5217:m4"}
{"signature": "def parse_python_file(filepath):", "body": "with _AST_CACHE_LOCK:<EOL><INDENT>if filepath not in _AST_CACHE:<EOL><INDENT>source = read_file(filepath)<EOL>_AST_CACHE[filepath] = ast.parse(source, filename=filepath)<EOL><DEDENT><DEDENT>return _AST_CACHE[filepath]<EOL>", "docstring": "Retrieves the AST of the specified file.\n\nThis function performs simple caching so that the same file isn't read or\nparsed more than once per process.\n\n:param filepath: the file to parse\n:type filepath: str\n:returns: ast.AST", "id": "f5218:m7"}
{"signature": "def get_stdout(self):", "body": "return self._stdout.getvalue()<EOL>", "docstring": "Retrieves the content that was written to ``stdout``.\n\n:returns: str", "id": "f5218:c0:m3"}
{"signature": "def matches_masks(target, masks):", "body": "for mask in masks:<EOL><INDENT>if mask.search(target):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Determines whether or not the target string matches any of the regular\nexpressions specified.\n\n:param target: the string to check\n:type target: str\n:param masks: the regular expressions to check against\n:type masks: list(regular expression object)\n:returns: bool", "id": "f5218:m5"}
{"signature": "def output_error(msg):", "body": "click.echo(click.style(msg, fg='<STR_LIT>'), err=True)<EOL>", "docstring": "Prints the specified string to ``stderr``.\n\n:param msg: the message to print\n:type msg: str", "id": "f5218:m2"}
{"signature": "def get_requests():", "body": "return _REQUESTS<EOL>", "docstring": "Retrieves a ``requests`` object to use within TidyPy.", "id": "f5218:m8"}
{"signature": "def compile_masks(masks):", "body": "if not masks:<EOL><INDENT>masks = []<EOL><DEDENT>elif not isinstance(masks, (list, tuple)):<EOL><INDENT>masks = [masks]<EOL><DEDENT>return [<EOL>re.compile(mask)<EOL>for mask in masks<EOL>]<EOL>", "docstring": "Compiles a list of regular expressions.\n\n:param masks: the regular expressions to compile\n:type masks: list(str) or str\n:returns: list(regular expression object)", "id": "f5218:m4"}
{"signature": "@classmethod<EOL><INDENT>def get_default_config(cls):<DEDENT>", "body": "return {<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT>': [],<EOL>'<STR_LIT>': [],<EOL>'<STR_LIT>': {},<EOL>}<EOL>", "docstring": "Produces a tool configuration stanza that acts as the base/default for\nthis tool.\n\nrtype: dict", "id": "f5227:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def can_be_used(cls):<DEDENT>", "body": "return True<EOL>", "docstring": "Indicates whether or not this tool can be executed now. Useful when you\nneed to check for certain environmental conditions (e.g., Python\nversion, dependency availability, etc).\n\nUnless overridden, always returns ``True``.\n\n:rtype: bool", "id": "f5227:c0:m0"}
{"signature": "def on_tool_finish(self, tool):", "body": "with self._lock:<EOL><INDENT>if tool in self.current_tools:<EOL><INDENT>self.current_tools.remove(tool)<EOL>self.completed_tools.append(tool)<EOL><DEDENT><DEDENT>", "docstring": "Called when an individual tool completes execution.\n\n:param tool: the name of the tool that completed\n:type tool: str", "id": "f5244:c0:m3"}
{"signature": "def on_finish(self):", "body": "", "docstring": "Called after all tools in the suite have completed.", "id": "f5244:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def retrieve(cls, location, project_path):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Retrieves a TidyPy configuration from the specified location.\n\n:param location:\n    a URI indicating where to retrieve the TidyPy configuration from\n:type location: str\n:param project_path: the full path to the project's base\n:type project_path: str\n:rtype: dict", "id": "f5251:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def parse(cls, content, is_pyproject=False):<DEDENT>", "body": "parsed = pytoml.loads(content)<EOL>if is_pyproject:<EOL><INDENT>parsed = parsed.get('<STR_LIT>', {})<EOL><DEDENT>parsed = parsed.get('<STR_LIT>', {})<EOL>return parsed<EOL>", "docstring": "A convenience method for parsing a TOML-serialized configuration.\n\n:param content: a TOML string containing a TidyPy configuration\n:type content: str\n:param is_pyproject:\n    whether or not the content is (or resembles) a ``pyproject.toml``\n    file, where the TidyPy configuration is located within a key named\n    ``tool``.\n:type is_pyproject: bool\n:rtype: dict", "id": "f5251:c0:m2"}
{"signature": "def output(self, msg, newline=True):", "body": "click.echo(text_type(msg), nl=newline, file=self.output_file)<EOL>", "docstring": "Writes the specified string to the output target of the report.\n\n:param msg: the message to output.\n:type msg: str\n:param newline:\n    whether or not to append a newline to the end of the message\n:type newline: str", "id": "f5264:c0:m4"}
{"signature": "def relative_filename(self, filename):", "body": "return Path(filename).relative_to(self.base_path).as_posix()<EOL>", "docstring": "Generates a path for the specified filename that is relative to the\nproject path.\n\n:param filename: the filename to generate the path for\n:type filename: str\n:rtype: str", "id": "f5264:c0:m3"}
{"signature": "def read_file(self, filepath):", "body": "return read_file(filepath)<EOL>", "docstring": "Retrieves the contents of the specified file.\n\nThis function performs simple caching so that the same file isn't read\nmore than once per process.\n\n:param filepath: the file to read.\n:type filepath: str\n:rtype: str", "id": "f5268:c0:m13"}
{"signature": "@property<EOL><INDENT>def project_path(self):<DEDENT>", "body": "return text_type(self.base_path)<EOL>", "docstring": "The path to the project that this Finder is operating from.", "id": "f5268:c0:m1"}
{"signature": "def is_excluded_dir(self, path):", "body": "if self.is_excluded(path):<EOL><INDENT>return True<EOL><DEDENT>return matches_masks(path.name, ALWAYS_EXCLUDED_DIRS)<EOL>", "docstring": "Determines whether or not the specified directory is excluded by the\nproject's configuration.\n\n:param path: the path to check\n:type path: pathlib.Path\n:rtype: bool", "id": "f5268:c0:m5"}
{"signature": "def __init__(self, base_path, config):", "body": "self.base_path = Path(base_path).resolve()<EOL>self.excludes = compile_masks(config['<STR_LIT>'])<EOL>self._found = dict()<EOL>self._find(self.base_path)<EOL>self._found = dict([<EOL>(dirname, files)<EOL>for dirname, files in iteritems(self._found)<EOL>if files<EOL>])<EOL>", "docstring": ":param base_path: the path to the base of the project\n:type base_path: str\n:param config: the configuration to use when searching the project\n:type config: dict", "id": "f5268:c0:m0"}
{"signature": "def directories(self, filters=None, containing=None):", "body": "filters = compile_masks(filters or [r'<STR_LIT>'])<EOL>contains = compile_masks(containing)<EOL>for dirname, files in iteritems(self._found):<EOL><INDENT>relpath = text_type(Path(dirname).relative_to(self.base_path))<EOL>if matches_masks(relpath, filters):<EOL><INDENT>if not contains or self._contains(files, contains):<EOL><INDENT>yield dirname<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "A generator that produces a sequence of paths to directories in the\nproject that matches the specified filters.\n\n:param filters:\n    the regular expressions to use when finding directories in the\n    project. If not specified, all directories are returned.\n:type filters: list(str)\n:param containing:\n    if a directory passes through the specified filters, it is checked\n    for the presence of a file that matches one of the regular\n    expressions in this parameter.\n:type containing: list(str)", "id": "f5268:c0:m8"}
{"signature": "@staticmethod<EOL><INDENT>def format_time(seconds):<DEDENT>", "body": "timedelta = datetime.timedelta(seconds=int(seconds))<EOL>mm, ss = divmod(timedelta.seconds, <NUM_LIT>)<EOL>if mm < <NUM_LIT>:<EOL><INDENT>return \"<STR_LIT>\" % (mm, ss)<EOL><DEDENT>hh, mm = divmod(mm, <NUM_LIT>)<EOL>if hh < <NUM_LIT>:<EOL><INDENT>return \"<STR_LIT>\" % (hh, mm, ss)<EOL><DEDENT>dd, hh = divmod(mm, <NUM_LIT>)<EOL>return \"<STR_LIT>\" % (dd, hh, mm, ss)<EOL>", "docstring": "Formats time as the string \"HH:MM:SS\".", "id": "f5273:c1:m1"}
{"signature": "def update(self, pbar):", "body": "if pbar.currval == <NUM_LIT:0>:<EOL><INDENT>return self.eta_format % '<STR_LIT>'<EOL><DEDENT>elif pbar.finished:<EOL><INDENT>return self.complete_format % self.format_time(pbar.seconds_elapsed)<EOL><DEDENT>else:<EOL><INDENT>elapsed = pbar.seconds_elapsed<EOL>eta = elapsed * pbar.maxval / pbar.currval - elapsed<EOL>return self.eta_format % self.format_time(eta)<EOL><DEDENT>", "docstring": "Updates the widget to show the ETA or total time when finished.", "id": "f5273:c1:m2"}
{"signature": "def _loop(self):", "body": "while True:<EOL><INDENT>try:<EOL><INDENT>with uncaught_greenlet_exception_context():<EOL><INDENT>self._loop_callback()<EOL><DEDENT><DEDENT>except gevent.GreenletExit:<EOL><INDENT>break<EOL><DEDENT>if self._stop_event.wait(self._interval):<EOL><INDENT>break<EOL><DEDENT><DEDENT>self._clear()<EOL>", "docstring": "Main loop - used internally.", "id": "f5285:c0:m1"}
{"signature": "def _loop_callback(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Subclasses should implement this function - called every loop iteration.", "id": "f5285:c0:m2"}
{"signature": "def start(self):", "body": "assert not self.has_started(), \"<STR_LIT>\"<EOL>self._stop_event = Event()<EOL>self._greenlet = gevent.spawn(self._loop)<EOL>", "docstring": "Starts the loop. Calling a running loop is an error.", "id": "f5285:c0:m3"}
{"signature": "@contextmanager<EOL>def loop_in_background(interval, callback):", "body": "loop = GeventLoop(interval, callback)<EOL>loop.start()<EOL>try:<EOL><INDENT>yield loop<EOL><DEDENT>finally:<EOL><INDENT>if loop.has_started():<EOL><INDENT>loop.stop()<EOL><DEDENT><DEDENT>", "docstring": "When entering the context, spawns a greenlet that sleeps for `interval` seconds between `callback` executions.\nWhen leaving the context stops the greenlet.\nThe yielded object is the `GeventLoop` object so the loop can be stopped from within the context.\n\nFor example:\n```\nwith loop_in_background(60.0, purge_cache) as purge_cache_job:\n    ...\n    ...\n    if should_stop_cache():\n        purge_cache_job.stop()\n```", "id": "f5285:m0"}
{"signature": "def stop(self, timeout=None):", "body": "assert self.has_started(), \"<STR_LIT>\"<EOL>greenlet = self._greenlet if gevent.getcurrent() != self._greenlet else None<EOL>self._stop_event.set()<EOL>if greenlet is not None and timeout is not None:<EOL><INDENT>greenlet.join(timeout)<EOL>return greenlet.ready<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Stops a running loop and waits for it to end if timeout is set. Calling a non-running loop is an error.\n:param timeout: time (in seconds) to wait for the loop to end after signalling it. ``None`` is to block till it\nends.\n:return: True if the loop stopped, False if still stopping.", "id": "f5285:c0:m4"}
{"signature": "def decode(json_string):", "body": "try:<EOL><INDENT>return json.loads(json_string)<EOL><DEDENT>except:<EOL><INDENT>raise DecodeError()<EOL><DEDENT>", "docstring": ":returns: a Python object", "id": "f5286:m2"}
{"signature": "def create_threadpool_executed_func(original_func):", "body": "def wrapped_func(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>result = original_func(*args, **kwargs)<EOL>return True, result<EOL><DEDENT>except:<EOL><INDENT>return False, sys.exc_info()<EOL><DEDENT><DEDENT>def new_func(*args, **kwargs):<EOL><INDENT>status, result = gevent.get_hub().threadpool.apply(wrapped_func, args, kwargs)<EOL>if status:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>six.reraise(*result)<EOL><DEDENT><DEDENT>new_func.__name__ = original_func.__name__<EOL>new_func.__doc__ = \"<STR_LIT>\" + (\"<STR_LIT:U+0020>\" + original_func.__doc__ if original_func.__doc__ is not None else \"<STR_LIT>\")<EOL>return new_func<EOL>", "docstring": "Returns a function wrapper that defers function calls execute inside gevent's threadpool but keeps any exception\nor backtrace in the caller's context.\n:param original_func: function to wrap\n:returns: wrapper function", "id": "f5290:m0"}
{"signature": "def safe_joinall(greenlets, timeout=None, raise_error=False):", "body": "greenlets = list(greenlets)<EOL>try:<EOL><INDENT>gevent.joinall(greenlets, timeout=timeout, raise_error=raise_error)<EOL><DEDENT>except gevent.GreenletExit:<EOL><INDENT>[greenlet.kill() for greenlet in greenlets if not greenlet.ready()]<EOL>raise<EOL><DEDENT>return greenlets<EOL>", "docstring": "Wrapper for gevent.joinall if the greenlet that waits for the joins is killed, it kills all the greenlets it\njoins for.", "id": "f5291:m9"}
{"signature": "def _normalize_instancemethod(instance_method):", "body": "if not hasattr(instance_method, '<STR_LIT>'):<EOL><INDENT>return instance_method<EOL><DEDENT>def _func(*args, **kwargs):<EOL><INDENT>return instance_method(*args, **kwargs)<EOL><DEDENT>_func.__name__ = repr(instance_method)<EOL>return _func<EOL>", "docstring": "wraps(instancemethod) returns a function, not an instancemethod so its repr() is all messed up;\nwe want the original repr to show up in the logs, therefore we do this trick", "id": "f5291:m3"}
{"signature": "def set_greenlet_uncaught_exception_handler(func):", "body": "global _uncaught_exception_handler<EOL>prev_handler, _uncaught_exception_handler = _uncaught_exception_handler, func<EOL>return prev_handler<EOL>", "docstring": "Sets a global greenlet uncaught exception handler that will get called if a greenlet spawned by one of this module's\nwrappers raises an uncaught exception.\n:param func: exception handler function that will receive the exc_info tuple as an argument\n:returns: previous exception handler function or None", "id": "f5291:m0"}
{"signature": "@job<EOL>def worker(wrapped, dkwargs, hash_value=None, *args, **kwargs):", "body": "if \"<STR_LIT>\" not in dkwargs:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise TypeError(msg)<EOL><DEDENT>event = dkwargs['<STR_LIT>']<EOL>if \"<STR_LIT>\" not in kwargs:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise TypeError(msg)<EOL><DEDENT>owner = kwargs['<STR_LIT>']<EOL>if \"<STR_LIT>\" not in kwargs:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise TypeError(msg)<EOL><DEDENT>identifier = kwargs['<STR_LIT>']<EOL>senderobj = DjangoRQSenderable(<EOL>wrapped, dkwargs, hash_value, WEBHOOK_ATTEMPTS, *args, **kwargs<EOL>)<EOL>senderobj.webhook_target = WebhookTarget.objects.get(<EOL>event=event,<EOL>owner=owner,<EOL>identifier=identifier<EOL>)<EOL>senderobj.url = senderobj.webhook_target.target_url<EOL>senderobj.payload = senderobj.get_payload()<EOL>senderobj.payload['<STR_LIT>'] = getattr(kwargs['<STR_LIT>'], WEBHOOK_OWNER_FIELD)<EOL>senderobj.payload['<STR_LIT>'] = dkwargs['<STR_LIT>']<EOL>return senderobj.send()<EOL>", "docstring": "This is an asynchronous sender callable that uses the Django ORM to store\n    webhooks. Redis is used to handle the message queue.\n\ndkwargs argument requires the following key/values:\n\n    :event: A string representing an event.\n\nkwargs argument requires the following key/values\n\n    :owner: The user who created/owns the event", "id": "f5303:m0"}
{"signature": "def notify(self, message):", "body": "data = dict(<EOL>payload=self.payload,<EOL>attempt=self.attempt,<EOL>success=self.success,<EOL>response_message=self.response_content,<EOL>hash_value=self.hash_value,<EOL>response_status=self.response.status_code,<EOL>notification=message,<EOL>created=timezone.now()<EOL>)<EOL>value = json.dumps(data, cls=StandardJSONEncoder)<EOL>key = make_key(self.event, self.owner.username, self.identifier)<EOL>redis.lpush(key, value)<EOL>", "docstring": "TODO: Add code to lpush to redis stack\n        rpop when stack hits size 'X'", "id": "f5305:c0:m1"}
{"signature": "def redislog_callable(wrapped, dkwargs, hash_value=None, *args, **kwargs):", "body": "if \"<STR_LIT>\" not in dkwargs:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise TypeError(msg)<EOL><DEDENT>event = dkwargs['<STR_LIT>']<EOL>if \"<STR_LIT>\" not in kwargs:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise TypeError(msg)<EOL><DEDENT>owner = kwargs['<STR_LIT>']<EOL>if \"<STR_LIT>\" not in kwargs:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise TypeError(msg)<EOL><DEDENT>identifier = kwargs['<STR_LIT>']<EOL>senderobj = RedisLogSenderable(<EOL>wrapped, dkwargs, hash_value, WEBHOOK_ATTEMPTS, *args, **kwargs<EOL>)<EOL>try:<EOL><INDENT>senderobj.webhook_target = WebhookTarget.objects.get(<EOL>event=event,<EOL>owner=owner,<EOL>identifier=identifier<EOL>)<EOL><DEDENT>except WebhookTarget.DoesNotExist:<EOL><INDENT>return {\"<STR_LIT:error>\": \"<STR_LIT>\"}<EOL><DEDENT>senderobj.url = senderobj.webhook_target.target_url<EOL>senderobj.payload = senderobj.get_payload()<EOL>senderobj.payload['<STR_LIT>'] = getattr(kwargs['<STR_LIT>'], WEBHOOK_OWNER_FIELD)<EOL>senderobj.payload['<STR_LIT>'] = dkwargs['<STR_LIT>']<EOL>return senderobj.send()<EOL>", "docstring": "This is a synchronous sender callable that uses the Django ORM to store\n    webhooks and Redis for the delivery log.\n\ndkwargs argument requires the following key/values:\n\n    :event: A string representing an event.\n\nkwargs argument requires the following key/values\n\n    :owner: The user who created/owns the event", "id": "f5305:m1"}
{"signature": "def event_choices(events):", "body": "if events is None:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ImproperlyConfigured(msg)<EOL><DEDENT>try:<EOL><INDENT>choices = [(x, x) for x in events]<EOL><DEDENT>except TypeError:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>msg = \"<STR_LIT>\"<EOL>raise ImproperlyConfigured(msg)<EOL><DEDENT>return choices<EOL>", "docstring": "Get the possible events from settings", "id": "f5309:m0"}
{"signature": "def progressbar(njobs, finished, msg=\"<STR_LIT>\", spacer=\"<STR_LIT:U+0020>\"):", "body": "if njobs:<EOL><INDENT>progress = <NUM_LIT:100>*(finished / float(njobs))<EOL><DEDENT>else:<EOL><INDENT>progress = <NUM_LIT:100><EOL><DEDENT>hashes = '<STR_LIT:#>'*int(progress/<NUM_LIT>)<EOL>nohash = '<STR_LIT:U+0020>'*int(<NUM_LIT:20>-len(hashes))<EOL>if not ipyrad.__interactive__:<EOL><INDENT>msg = msg.rsplit(\"<STR_LIT:|>\", <NUM_LIT:2>)[<NUM_LIT:0>]<EOL><DEDENT>args = [spacer, hashes+nohash, int(progress), msg]<EOL>print(\"<STR_LIT>\".format(*args), end=\"<STR_LIT>\")<EOL>sys.stdout.flush()<EOL>", "docstring": "prints a progress bar", "id": "f5312:m14"}
{"signature": "def comp(seq):", "body": "<EOL>return seq.replace(\"<STR_LIT:A>\", '<STR_LIT:t>').replace('<STR_LIT:T>', '<STR_LIT:a>').replace('<STR_LIT:C>', '<STR_LIT:g>').replace('<STR_LIT>', '<STR_LIT:c>').replace('<STR_LIT:n>', '<STR_LIT>').upper().replace(\"<STR_LIT>\", \"<STR_LIT:n>\")<EOL>", "docstring": "returns a seq with complement. Preserves little n's for splitters.", "id": "f5312:m3"}
{"signature": "def merge_after_pysam(data, clust):", "body": "try:<EOL><INDENT>r1file = tempfile.NamedTemporaryFile(mode='<STR_LIT:wb>', delete=False,<EOL>dir=data.dirs.edits,<EOL>suffix=\"<STR_LIT>\")<EOL>r2file = tempfile.NamedTemporaryFile(mode='<STR_LIT:wb>', delete=False,<EOL>dir=data.dirs.edits,<EOL>suffix=\"<STR_LIT>\")<EOL>r1dat = []<EOL>r2dat = []<EOL>for locus in clust:<EOL><INDENT>sname, seq = locus.split(\"<STR_LIT:\\n>\")<EOL>sname = \"<STR_LIT:@>\" + sname[<NUM_LIT:1>:]<EOL>r1, r2 = seq.split(\"<STR_LIT>\")<EOL>r1dat.append(\"<STR_LIT>\".format(sname, r1, \"<STR_LIT:+>\", \"<STR_LIT:B>\"*(len(r1))))<EOL>r2dat.append(\"<STR_LIT>\".format(sname, r2, \"<STR_LIT:+>\", \"<STR_LIT:B>\"*(len(r2))))<EOL><DEDENT>r1file.write(\"<STR_LIT:\\n>\".join(r1dat))<EOL>r2file.write(\"<STR_LIT:\\n>\".join(r2dat))<EOL>r1file.close()<EOL>r2file.close()<EOL>merged_file = tempfile.NamedTemporaryFile(mode='<STR_LIT:wb>',<EOL>dir=data.dirs.edits,<EOL>suffix=\"<STR_LIT>\").name<EOL>clust = []<EOL>merge_pairs(data, [(r1file.name, r2file.name)], merged_file, <NUM_LIT:0>, <NUM_LIT:1>)<EOL>with open(merged_file) as infile:<EOL><INDENT>quarts = itertools.izip(*[iter(infile)]*<NUM_LIT:4>)<EOL>while <NUM_LIT:1>:<EOL><INDENT>try:<EOL><INDENT>sname, seq, _, _ = quarts.next()<EOL>if not \"<STR_LIT>\" in sname.rsplit(\"<STR_LIT:;>\", <NUM_LIT:1>)[<NUM_LIT:1>]:<EOL><INDENT>try:<EOL><INDENT>R1, R2 = seq.split(\"<STR_LIT>\")<EOL>seq = R1 + \"<STR_LIT>\" + revcomp(R2)<EOL><DEDENT>except ValueError as inst:<EOL><INDENT>LOGGER.error(\"<STR_LIT>\".format(sname, seq))<EOL>raise<EOL><DEDENT><DEDENT><DEDENT>except StopIteration:<EOL><INDENT>break<EOL><DEDENT>sname = \"<STR_LIT:>>\" + sname[<NUM_LIT:1>:]<EOL>clust.extend([sname.strip(), seq.strip()])<EOL><DEDENT><DEDENT><DEDENT>except:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL>raise<EOL><DEDENT>finally:<EOL><INDENT>for i in [r1file.name, r2file.name, merged_file]:<EOL><INDENT>if os.path.exists(i):<EOL><INDENT>log_level = logging.getLevelName(LOGGER.getEffectiveLevel())<EOL>os.remove(i)<EOL><DEDENT><DEDENT><DEDENT>return clust<EOL>", "docstring": "This is for pysam post-flight merging. The input is a cluster\nfor an individual locus. We have to split the clusters, write\nR1 and R2 to files then call merge_pairs(). This is not ideal,\nit's slow, but it works. This is the absolute worst way to do this,\nit bounces all the files for each locus off the disk. I/O _hog_.", "id": "f5312:m7"}
{"signature": "def unstruct(amb):", "body": "amb = amb.upper()<EOL>return DUCT.get(amb)<EOL>", "docstring": "This is copied from pyrad.alignable, and is referenced in\n    several of the loci2*.py conversion modules. It duplicates some\n    of the effort of unhetero(), but i guess it's fine for now. Probably\n    could merge these two functions if you wanted to.", "id": "f5312:m12"}
{"signature": "def fullcomp(seq):", "body": "<EOL>seq = seq.replace(\"<STR_LIT:A>\", '<STR_LIT:u>').replace('<STR_LIT:T>', '<STR_LIT:v>').replace('<STR_LIT:C>', '<STR_LIT:p>').replace('<STR_LIT>', '<STR_LIT:z>').replace('<STR_LIT:u>', '<STR_LIT:T>').replace('<STR_LIT:v>', '<STR_LIT:A>').replace('<STR_LIT:p>', '<STR_LIT>').replace('<STR_LIT:z>', '<STR_LIT:C>')<EOL>seq = seq.replace('<STR_LIT:R>', '<STR_LIT:u>').replace('<STR_LIT>', '<STR_LIT:v>').replace('<STR_LIT:Y>', '<STR_LIT:b>').replace('<STR_LIT:M>', '<STR_LIT:o>').replace('<STR_LIT:u>', '<STR_LIT:Y>').replace('<STR_LIT:v>', '<STR_LIT:M>').replace('<STR_LIT:b>', '<STR_LIT:R>').replace('<STR_LIT:o>', '<STR_LIT>')<EOL>seq = seq.replace('<STR_LIT:r>', '<STR_LIT:u>').replace('<STR_LIT:k>', '<STR_LIT:v>').replace('<STR_LIT:y>', '<STR_LIT:b>').replace('<STR_LIT:m>', '<STR_LIT:o>').replace('<STR_LIT:u>', '<STR_LIT:y>').replace('<STR_LIT:v>', '<STR_LIT:m>').replace('<STR_LIT:b>', '<STR_LIT:r>').replace('<STR_LIT:o>', '<STR_LIT:k>')<EOL>return seq<EOL>", "docstring": "returns complement of sequence including ambiguity characters,\n    and saves lower case info for multiple hetero sequences", "id": "f5312:m4"}
{"signature": "def fastq_touchup_for_vsearch_merge(read, outfile, reverse=False):", "body": "counts = <NUM_LIT:0><EOL>with open(outfile, '<STR_LIT:w>') as out:<EOL><INDENT>if read.endswith(\"<STR_LIT>\"):<EOL><INDENT>fr1 = gzip.open(read, '<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>fr1 = open(read, '<STR_LIT:rb>')<EOL><DEDENT>quarts = itertools.izip(*[iter(fr1)]*<NUM_LIT:4>)<EOL>writing = []<EOL>while <NUM_LIT:1>:<EOL><INDENT>try:<EOL><INDENT>lines = quarts.next()<EOL><DEDENT>except StopIteration:<EOL><INDENT>break<EOL><DEDENT>if reverse:<EOL><INDENT>seq = lines[<NUM_LIT:1>].strip()[::-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>seq = lines[<NUM_LIT:1>].strip()<EOL><DEDENT>writing.append(\"<STR_LIT>\".join([<EOL>lines[<NUM_LIT:0>],<EOL>seq+\"<STR_LIT:\\n>\",<EOL>lines[<NUM_LIT:2>],<EOL>\"<STR_LIT:B>\"*len(seq)<EOL>]))<EOL>counts += <NUM_LIT:1><EOL>if not counts % <NUM_LIT:1000>:<EOL><INDENT>out.write(\"<STR_LIT:\\n>\".join(writing)+\"<STR_LIT:\\n>\")<EOL>writing = []<EOL><DEDENT><DEDENT>if writing:<EOL><INDENT>out.write(\"<STR_LIT:\\n>\".join(writing))<EOL><DEDENT><DEDENT>out.close()<EOL>fr1.close()<EOL>", "docstring": "option to change orientation of reads and sets Qscore to B", "id": "f5312:m5"}
{"signature": "def unhetero(amb):", "body": "amb = amb.upper()<EOL>return AMBIGS.get(amb)<EOL>", "docstring": "returns bases from ambiguity code", "id": "f5312:m11"}
{"signature": "def get_haploid_lik(errors, bfreqs, ustacks, counts):", "body": "hetero = <NUM_LIT:0.><EOL>if errors <= <NUM_LIT:0.>:<EOL><INDENT>score = np.exp(<NUM_LIT:100>)<EOL><DEDENT>else:<EOL><INDENT>lik1 = ((<NUM_LIT:1.>-hetero)*likelihood1(errors, bfreqs, ustacks)) <EOL>liks = lik1<EOL>logliks = np.log(liks[liks > <NUM_LIT:0>])*counts[liks > <NUM_LIT:0>]<EOL>score = -logliks.sum()<EOL><DEDENT>return score<EOL>", "docstring": "Log likelihood score given values [E].", "id": "f5313:m5"}
{"signature": "def stackarray(data, sample):", "body": "<EOL>sample, _, _, nhidepth, maxlen = recal_hidepth(data, sample)<EOL>clusters = gzip.open(sample.files.clusters)<EOL>pairdealer = itertools.izip(*[iter(clusters)]*<NUM_LIT:2>)<EOL>dims = (nhidepth, maxlen, <NUM_LIT:4>)<EOL>stacked = np.zeros(dims, dtype=np.uint64)<EOL>cutlens = [None, None]<EOL>try:<EOL><INDENT>cutlens[<NUM_LIT:0>] = len(data.paramsdict[\"<STR_LIT>\"][<NUM_LIT:0>])<EOL>cutlens[<NUM_LIT:1>] = maxlen - len(data.paramsdict[\"<STR_LIT>\"][<NUM_LIT:1>])<EOL><DEDENT>except TypeError:<EOL><INDENT>pass<EOL><DEDENT>nclust = <NUM_LIT:0><EOL>done = <NUM_LIT:0><EOL>while not done:<EOL><INDENT>try:<EOL><INDENT>done, chunk = clustdealer(pairdealer, <NUM_LIT:1>)<EOL><DEDENT>except IndexError:<EOL><INDENT>raise IPyradError(\"<STR_LIT>\", chunk)<EOL><DEDENT>if chunk:<EOL><INDENT>piece = chunk[<NUM_LIT:0>].strip().split(\"<STR_LIT:\\n>\")<EOL>names = piece[<NUM_LIT:0>::<NUM_LIT:2>]<EOL>seqs = piece[<NUM_LIT:1>::<NUM_LIT:2>]<EOL>reps = [int(sname.split(\"<STR_LIT:;>\")[-<NUM_LIT:2>][<NUM_LIT:5>:]) for sname in names]<EOL>sseqs = [list(seq) for seq in seqs]<EOL>arrayed = np.concatenate(<EOL>[[seq]*rep for seq, rep in zip(sseqs, reps)])<EOL>if arrayed.shape[<NUM_LIT:0>] >= data.paramsdict[\"<STR_LIT>\"]:<EOL><INDENT>arrayed = arrayed[:<NUM_LIT>, cutlens[<NUM_LIT:0>]:cutlens[<NUM_LIT:1>]]<EOL>arrayed = arrayed[:, ~np.any(arrayed == \"<STR_LIT:n>\", axis=<NUM_LIT:0>)]<EOL>arrayed[arrayed == \"<STR_LIT:->\"] = \"<STR_LIT:N>\"<EOL>arrayed = arrayed[:, ~np.all(arrayed == \"<STR_LIT:N>\", axis=<NUM_LIT:0>)]<EOL>catg = np.array([np.sum(arrayed == i, axis=<NUM_LIT:0>) for i in list(\"<STR_LIT>\")], <EOL>dtype=np.uint64).T<EOL>stacked[nclust, :catg.shape[<NUM_LIT:0>], :] = catg<EOL>nclust += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>newstack = stacked[stacked.sum(axis=<NUM_LIT:2>) > <NUM_LIT:0>]<EOL>assert not np.any(newstack.sum(axis=<NUM_LIT:1>) == <NUM_LIT:0>), \"<STR_LIT>\"<EOL>clusters.close()<EOL>return newstack<EOL>", "docstring": "Stacks clusters into arrays", "id": "f5313:m7"}
{"signature": "def nget_diploid_lik(pstart, bfreqs, ustacks, counts):", "body": "hetero, errors = pstart<EOL>if (hetero <= <NUM_LIT:0.>) or (errors <= <NUM_LIT:0.>):<EOL><INDENT>score = np.exp(<NUM_LIT:100>)<EOL><DEDENT>else:<EOL><INDENT>lik1 = (<NUM_LIT:1.>-hetero) * likelihood1(errors, bfreqs, ustacks)<EOL>lik2 = (hetero) * nlikelihood2(errors, bfreqs, ustacks)<EOL>liks = lik1 + lik2<EOL>logliks = np.log(liks[liks > <NUM_LIT:0>]) * counts[liks > <NUM_LIT:0>]<EOL>score = -logliks.sum()<EOL><DEDENT>return score<EOL>", "docstring": "Log likelihood score given values [H,E]", "id": "f5313:m4"}
{"signature": "@numba.jit(nopython=True)<EOL>def nblik2_build(ustacks):", "body": "<EOL>tots = np.empty((ustacks.shape[<NUM_LIT:0>], <NUM_LIT:1>))<EOL>twos = np.empty((ustacks.shape[<NUM_LIT:0>], <NUM_LIT:6>))<EOL>thrs = np.empty((ustacks.shape[<NUM_LIT:0>], <NUM_LIT:6>, <NUM_LIT:2>))<EOL>for idx in xrange(ustacks.shape[<NUM_LIT:0>]):<EOL><INDENT>ust = ustacks[idx]<EOL>tot = ust.sum()<EOL>tots[idx] = tot<EOL>i = <NUM_LIT:0><EOL>for jdx in xrange(<NUM_LIT:4>):<EOL><INDENT>for kdx in xrange(<NUM_LIT:4>):<EOL><INDENT>if jdx < kdx:<EOL><INDENT>twos[idx][i] = tot - ust[jdx] - ust[kdx]<EOL>thrs[idx][i] = ust[jdx], ust[jdx] + ust[kdx]<EOL>i += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>return tots, twos, thrs<EOL>", "docstring": "JIT'd function builds large array that can be used to calc binom pmf", "id": "f5313:m1"}
{"signature": "def recal_hidepth(data, sample):", "body": "<EOL>majrdepth = data.paramsdict[\"<STR_LIT>\"]<EOL>statdepth = data.paramsdict[\"<STR_LIT>\"]    <EOL>maxlen = data._hackersonly[\"<STR_LIT>\"]<EOL>if not hasattr(sample.stats_dfs.s3, \"<STR_LIT>\"):<EOL><INDENT>sample.stats_dfs.s3[\"<STR_LIT>\"] = data.paramsdict[\"<STR_LIT>\"]<EOL><DEDENT>if <NUM_LIT:1>: <EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL>maxlens, depths = get_quick_depths(data, sample)<EOL>hidepths = depths >= majrdepth<EOL>stathidepths = depths >= statdepth<EOL>keepmj = depths[hidepths]<EOL>keepst = depths[stathidepths]<EOL>try:<EOL><INDENT>statlens = maxlens[stathidepths]<EOL>statlen = int(statlens.mean() + (<NUM_LIT>*statlens.std()))<EOL><DEDENT>except:<EOL><INDENT>raise IPyradError(\"<STR_LIT>\")<EOL><DEDENT>LOGGER.info(\"<STR_LIT>\", maxlens.shape, maxlens.mean(), maxlens.std())<EOL>maxlens = maxlens[hidepths]<EOL>maxlen = int(maxlens.mean() + (<NUM_LIT>*maxlens.std()))<EOL>sample.stats[\"<STR_LIT>\"] = keepmj.shape[<NUM_LIT:0>]<EOL>sample.stats_dfs.s3[\"<STR_LIT>\"] = keepmj.shape[<NUM_LIT:0>]        <EOL><DEDENT>return sample, keepmj.shape[<NUM_LIT:0>], maxlen, keepst.shape[<NUM_LIT:0>], statlen<EOL>", "docstring": "if mindepth setting were changed then 'clusters_hidepth' needs to be \nrecalculated. Check and recalculate if necessary.", "id": "f5313:m6"}
{"signature": "def assembly_cleanup(data):", "body": "<EOL>data.stats_dfs.s4 = data._build_stat(\"<STR_LIT>\")<EOL>data.stats_files.s4 = os.path.join(data.dirs.clusts, <EOL>\"<STR_LIT>\")<EOL>with io.open(data.stats_files.s4, '<STR_LIT:w>') as outfile:<EOL><INDENT>data.stats_dfs.s4.to_string(outfile)<EOL><DEDENT>fails = data.stats[data.stats[\"<STR_LIT:state>\"] == <NUM_LIT:3>].index.values<EOL>if fails:<EOL><INDENT>msg = \"\"\"<STR_LIT>\"\"\".format(fails)<EOL>print(msg)<EOL><DEDENT>", "docstring": "cleanup assembly stats", "id": "f5313:m12"}
{"signature": "def refmap_stats(data, sample):", "body": "<EOL>mapf = os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\")<EOL>umapf = os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\")<EOL>cmd1 = [ipyrad.bins.samtools, \"<STR_LIT>\", umapf]<EOL>proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)<EOL>result1 = proc1.communicate()[<NUM_LIT:0>]<EOL>cmd2 = [ipyrad.bins.samtools, \"<STR_LIT>\", mapf]<EOL>proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)<EOL>result2 = proc2.communicate()[<NUM_LIT:0>]<EOL>if \"<STR_LIT>\" in data.paramsdict[\"<STR_LIT>\"]:<EOL><INDENT>sample.stats[\"<STR_LIT>\"] = int(result1.split()[<NUM_LIT:0>]) / <NUM_LIT:2><EOL>sample.stats[\"<STR_LIT>\"] = int(result2.split()[<NUM_LIT:0>]) / <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>sample.stats[\"<STR_LIT>\"] = int(result1.split()[<NUM_LIT:0>])<EOL>sample.stats[\"<STR_LIT>\"] = int(result2.split()[<NUM_LIT:0>])<EOL><DEDENT>sample_cleanup(data, sample)<EOL>", "docstring": "Get the number of mapped and unmapped reads for a sample\nand update sample.stats", "id": "f5314:m13"}
{"signature": "def mapreads(data, sample, nthreads, force):", "body": "LOGGER.info(\"<STR_LIT>\", sample.name, nthreads)<EOL>derepfile = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")<EOL>sample.files.dereps = [derepfile]<EOL>mumapfile = sample.files.unmapped_reads<EOL>umap1file = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")<EOL>umap2file = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")        <EOL>if \"<STR_LIT>\" in data.paramsdict[\"<STR_LIT>\"]:<EOL><INDENT>sample.files.split1 = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")<EOL>sample.files.split2 = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")<EOL>sample.files.dereps = [sample.files.split1, sample.files.split2]<EOL>split_merged_reads(sample.files.dereps, derepfile)<EOL><DEDENT>if \"<STR_LIT>\" in data._hackersonly[\"<STR_LIT>\"]:<EOL><INDENT>cmd1 = [ipyrad.bins.smalt, \"<STR_LIT>\", <EOL>\"<STR_LIT>\", \"<STR_LIT>\", <EOL>\"<STR_LIT>\", str(max(<NUM_LIT:1>, nthreads)),<EOL>\"<STR_LIT>\", str(data.paramsdict['<STR_LIT>']), <EOL>\"<STR_LIT>\", os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\"),<EOL>\"<STR_LIT>\",<EOL>data.paramsdict['<STR_LIT>']<EOL>] + sample.files.dereps<EOL>cmd1_stdout = sps.PIPE<EOL>cmd1_stderr = sps.STDOUT<EOL><DEDENT>else:<EOL><INDENT>cmd1 = [ipyrad.bins.bwa, \"<STR_LIT>\",<EOL>\"<STR_LIT>\", str(max(<NUM_LIT:1>, nthreads)),<EOL>\"<STR_LIT>\",<EOL>data.paramsdict['<STR_LIT>']<EOL>] + sample.files.dereps<EOL>try:<EOL><INDENT>bwa_args = data._hackersonly[\"<STR_LIT>\"].split()<EOL>bwa_args.reverse()<EOL>for arg in bwa_args:<EOL><INDENT>cmd1.insert(<NUM_LIT:2>, arg)<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>cmd1_stdout = open(os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\"), '<STR_LIT:w>')<EOL>cmd1_stderr = None<EOL><DEDENT>cmd2 = [ipyrad.bins.samtools, \"<STR_LIT>\", <EOL>\"<STR_LIT>\", <EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\"), <EOL>os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\")]<EOL>cmd3 = [ipyrad.bins.samtools, \"<STR_LIT>\", <EOL>\"<STR_LIT>\", os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\"),<EOL>\"<STR_LIT>\", \"<STR_LIT>\", <EOL>\"<STR_LIT>\", sample.files.mapped_reads]<EOL>cmd4 = [ipyrad.bins.samtools, \"<STR_LIT:index>\", sample.files.mapped_reads]<EOL>cmd5 = [ipyrad.bins.samtools, \"<STR_LIT>\", \"<STR_LIT>\",<EOL>os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\")]<EOL>if '<STR_LIT>' in data.paramsdict[\"<STR_LIT>\"]:<EOL><INDENT>if \"<STR_LIT>\" in data._hackersonly[\"<STR_LIT>\"]:<EOL><INDENT>cmd1.insert(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>cmd1.insert(<NUM_LIT:2>, \"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>cmd2.insert(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>cmd2.insert(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>cmd5.insert(<NUM_LIT:2>, umap1file)<EOL>cmd5.insert(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>cmd5.insert(<NUM_LIT:2>, umap2file)<EOL>cmd5.insert(<NUM_LIT:2>, \"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>cmd5.insert(<NUM_LIT:2>, mumapfile)<EOL>cmd5.insert(<NUM_LIT:2>, \"<STR_LIT>\")<EOL><DEDENT>LOGGER.debug(\"<STR_LIT:U+0020>\".join(cmd1))<EOL>proc1 = sps.Popen(cmd1, stderr=cmd1_stderr, stdout=cmd1_stdout)<EOL>try:<EOL><INDENT>error1 = proc1.communicate()[<NUM_LIT:0>]<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>proc1.kill()<EOL><DEDENT>if proc1.returncode:<EOL><INDENT>raise IPyradWarningExit(error1)<EOL><DEDENT>LOGGER.debug(\"<STR_LIT:U+0020>\".join(cmd2))<EOL>proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)<EOL>LOGGER.debug(\"<STR_LIT:U+0020>\".join(cmd3))<EOL>proc3 = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc2.stdout)<EOL>error3 = proc3.communicate()[<NUM_LIT:0>]<EOL>if proc3.returncode:<EOL><INDENT>raise IPyradWarningExit(error3)<EOL><DEDENT>proc2.stdout.close()<EOL>LOGGER.debug(\"<STR_LIT:U+0020>\".join(cmd4))<EOL>proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE)<EOL>error4 = proc4.communicate()[<NUM_LIT:0>]<EOL>if proc4.returncode:<EOL><INDENT>raise IPyradWarningExit(error4)<EOL><DEDENT>LOGGER.debug(\"<STR_LIT:U+0020>\".join(cmd5))<EOL>proc5 = sps.Popen(cmd5, stderr=sps.STDOUT, stdout=sps.PIPE)<EOL>error5 = proc5.communicate()[<NUM_LIT:0>]<EOL>if proc5.returncode:<EOL><INDENT>raise IPyradWarningExit(error5)<EOL><DEDENT>if '<STR_LIT>' in data.paramsdict[\"<STR_LIT>\"]:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\".format(umap1file, umap2file))<EOL>merge_pairs_after_refmapping(data, [(umap1file, umap2file)], mumapfile)<EOL><DEDENT>", "docstring": "Attempt to map reads to reference sequence. This reads in the fasta files\n(samples.files.edits), and maps each read to the reference. Unmapped reads \nare dropped right back in the de novo pipeline. Reads that map successfully\nare processed and pushed downstream and joined with the rest of the data \npost muscle_align. \n\nMapped reads end up in a sam file.", "id": "f5314:m2"}
{"signature": "def fetch_cluster_se(data, samfile, chrom, rstart, rend):", "body": "<EOL>overlap_buffer = data._hackersonly[\"<STR_LIT>\"]<EOL>rstart_buff = rstart + overlap_buffer<EOL>rend_buff = rend - overlap_buffer<EOL>if rstart_buff > rend_buff:<EOL><INDENT>tmp = rstart_buff<EOL>rstart_buff = rend_buff<EOL>rend_buff = tmp<EOL><DEDENT>if rstart_buff == rend_buff:<EOL><INDENT>rend_buff += <NUM_LIT:1><EOL><DEDENT>rdict = {}<EOL>clust = []<EOL>iterreg = []<EOL>iterreg = samfile.fetch(chrom, rstart_buff, rend_buff)<EOL>for read in iterreg:<EOL><INDENT>if read.qname not in rdict:<EOL><INDENT>rdict[read.qname] = read<EOL><DEDENT><DEDENT>sfunc = lambda x: int(x.split(\"<STR_LIT>\")[<NUM_LIT:1>].split(\"<STR_LIT:;>\")[<NUM_LIT:0>])<EOL>rkeys = sorted(rdict.keys(), key=sfunc, reverse=True)<EOL>try:<EOL><INDENT>read1 = rdict[rkeys[<NUM_LIT:0>]]<EOL><DEDENT>except ValueError:<EOL><INDENT>LOGGER.error(\"<STR_LIT>\".format(rkeys[<NUM_LIT:0>], rdict))<EOL>return \"<STR_LIT>\"<EOL><DEDENT>poss = read1.get_reference_positions(full_length=True)<EOL>seed_r1start = min(poss)<EOL>seed_r1end = max(poss)<EOL>if read1.is_reverse:<EOL><INDENT>seq = revcomp(read1.seq)<EOL><DEDENT>else:<EOL><INDENT>seq = read1.seq<EOL><DEDENT>size = sfunc(rkeys[<NUM_LIT:0>])<EOL>clust.append(\"<STR_LIT>\".format(chrom, seed_r1start, seed_r1end, size, seq))<EOL>if len(rkeys) > <NUM_LIT:1>:<EOL><INDENT>for key in rkeys[<NUM_LIT:1>:]:<EOL><INDENT>skip = False<EOL>try:<EOL><INDENT>read1 = rdict[key]<EOL><DEDENT>except ValueError:<EOL><INDENT>read1 = rdict[key][<NUM_LIT:0>]<EOL>skip = True<EOL><DEDENT>if not skip:<EOL><INDENT>poss = read1.get_reference_positions(full_length=True)<EOL>minpos = min(poss)<EOL>maxpos = max(poss)<EOL>if read1.is_reverse:<EOL><INDENT>seq = revcomp(read1.seq)<EOL><DEDENT>else:<EOL><INDENT>seq = read1.seq<EOL><DEDENT>size = sfunc(key)<EOL>clust.append(\"<STR_LIT>\".format(chrom, minpos, maxpos, size, seq))<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>return clust<EOL>", "docstring": "Builds a single end cluster from the refmapped data.", "id": "f5314:m3"}
{"signature": "def trim_reference_sequence(fasta):", "body": "LOGGER.debug(\"<STR_LIT>\".format(fasta[<NUM_LIT:0>]))<EOL>if \"<STR_LIT>\" in fasta[<NUM_LIT:1>]:<EOL><INDENT>r1_len = len(fasta[<NUM_LIT:1>].split(\"<STR_LIT:\\n>\")[<NUM_LIT:1>].split(\"<STR_LIT>\")[<NUM_LIT:0>])<EOL>r2_len = len(fasta[<NUM_LIT:1>].split(\"<STR_LIT:\\n>\")[<NUM_LIT:1>].split(\"<STR_LIT>\")[<NUM_LIT:1>])<EOL>new_seq = fasta[<NUM_LIT:0>].split(\"<STR_LIT:\\n>\")[<NUM_LIT:1>][:r1_len]+(\"<STR_LIT>\")+ revcomp(fasta[<NUM_LIT:0>].split(\"<STR_LIT:\\n>\")[<NUM_LIT:1>][-r2_len:])<EOL>fasta[<NUM_LIT:0>] = fasta[<NUM_LIT:0>].split(\"<STR_LIT:\\n>\")[<NUM_LIT:0>]+\"<STR_LIT:\\n>\"+new_seq<EOL><DEDENT>LOGGER.debug(\"<STR_LIT>\".format(fasta[<NUM_LIT:0>]))<EOL>return fasta<EOL>", "docstring": "If doing PE and R1/R2 don't overlap then the reference sequence\nwill be quite long and will cause indel hell during the \nalignment stage. Here trim the reference sequence to the length\nof the merged reads. Input is a list of alternating locus labels\nand sequence data. The first locus label is the reference\nsequence label and the first seq is the reference seq. Returns\nthe same list except with the reference sequence trimmed to\nthe length of the rad tags", "id": "f5314:m11"}
{"signature": "def fetch_cluster_pairs(data, samfile, chrom, rstart, rend):", "body": "<EOL>rdict = {}<EOL>clust = []<EOL>iterreg = samfile.fetch(chrom, rstart, rend)<EOL>for read in iterreg:<EOL><INDENT>if read.qname not in rdict:<EOL><INDENT>rdict[read.qname] = [read]<EOL><DEDENT>else:<EOL><INDENT>rdict[read.qname].append(read)<EOL><DEDENT><DEDENT>sfunc = lambda x: int(x.split(\"<STR_LIT>\")[<NUM_LIT:1>].split(\"<STR_LIT:;>\")[<NUM_LIT:0>])<EOL>rkeys = sorted(rdict.keys(), key=sfunc, reverse=True)<EOL>try:<EOL><INDENT>read1, read2 = rdict[rkeys[<NUM_LIT:0>]]<EOL><DEDENT>except ValueError:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>poss = read1.get_reference_positions() + read2.get_reference_positions()<EOL>seed_r1start = min(poss)<EOL>seed_r2end = max(poss)<EOL>reads_overlap = False<EOL>if read1.is_reverse:<EOL><INDENT>if read2.aend > read1.get_blocks()[<NUM_LIT:0>][<NUM_LIT:0>]:<EOL><INDENT>reads_overlap = True<EOL>seq = read2.seq + \"<STR_LIT>\" + revcomp(read1.seq)<EOL><DEDENT>else:<EOL><INDENT>seq = read2.seq + \"<STR_LIT>\" + read1.seq<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if read1.aend > read2.get_blocks()[<NUM_LIT:0>][<NUM_LIT:0>]:<EOL><INDENT>reads_overlap = True<EOL>seq = read1.seq + \"<STR_LIT>\" + revcomp(read2.seq)<EOL><DEDENT>else:<EOL><INDENT>seq = read1.seq + \"<STR_LIT>\" + read2.seq<EOL><DEDENT><DEDENT>size = sfunc(rkeys[<NUM_LIT:0>])<EOL>clust.append(\"<STR_LIT>\".format(chrom, seed_r1start, seed_r2end, size, seq))<EOL>if len(rkeys) > <NUM_LIT:1>:<EOL><INDENT>for key in rkeys[<NUM_LIT:1>:]:<EOL><INDENT>skip = False<EOL>try:<EOL><INDENT>read1, read2 = rdict[key]<EOL><DEDENT>except ValueError:<EOL><INDENT>read1 = rdict[key][<NUM_LIT:0>]<EOL>read2 = read1<EOL>skip = True<EOL><DEDENT>poss = read1.get_reference_positions() + read2.get_reference_positions()<EOL>minpos = min(poss)<EOL>maxpos = max(poss)<EOL>if read1.has_tag(\"<STR_LIT>\") or read2.has_tag(\"<STR_LIT>\"):<EOL><INDENT>skip = True<EOL><DEDENT>if (abs(minpos - seed_r1start) < <NUM_LIT:50>) and(abs(maxpos - seed_r2end) < <NUM_LIT:50>) and(not skip):<EOL><INDENT>if read1.is_reverse:<EOL><INDENT>if read2.aend > read1.get_blocks()[<NUM_LIT:0>][<NUM_LIT:0>]:<EOL><INDENT>reads_overlap = True<EOL>seq = read2.seq + \"<STR_LIT>\" + revcomp(read1.seq)<EOL><DEDENT>else:<EOL><INDENT>seq = read2.seq + \"<STR_LIT>\" + read1.seq<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if read1.aend > read2.get_blocks()[<NUM_LIT:0>][<NUM_LIT:0>]:<EOL><INDENT>reads_overlap = True<EOL>seq = read1.seq + \"<STR_LIT>\" + revcomp(read2.seq)<EOL><DEDENT>else:<EOL><INDENT>seq = read1.seq + \"<STR_LIT>\" + read2.seq<EOL><DEDENT><DEDENT>size = sfunc(key)<EOL>clust.append(\"<STR_LIT>\".format(chrom, minpos, maxpos, size, seq))<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>if reads_overlap:<EOL><INDENT>if data._hackersonly[\"<STR_LIT>\"]:<EOL><INDENT>clust = merge_after_pysam(data, clust)<EOL><DEDENT><DEDENT>return clust<EOL>", "docstring": "Builds a paired cluster from the refmapped data.", "id": "f5314:m4"}
{"signature": "def refmap_init(data, sample, force):", "body": "<EOL>sample.files.unmapped_reads = os.path.join(data.dirs.edits, <EOL>\"<STR_LIT>\".format(sample.name))<EOL>sample.files.mapped_reads = os.path.join(data.dirs.refmapping,<EOL>\"<STR_LIT>\".format(sample.name))<EOL>", "docstring": "create some file handles for refmapping", "id": "f5314:m14"}
{"signature": "def ref_build_and_muscle_chunk(data, sample):", "body": "<EOL>regions = bedtools_merge(data, sample).strip().split(\"<STR_LIT:\\n>\")<EOL>nregions = len(regions)<EOL>chunksize = (nregions / <NUM_LIT:10>) + (nregions % <NUM_LIT:10>)<EOL>LOGGER.debug(\"<STR_LIT>\".format(nregions, chunksize))<EOL>idx = <NUM_LIT:0><EOL>tmpfile = os.path.join(data.tmpdir, sample.name+\"<STR_LIT>\")<EOL>for i in range(<NUM_LIT:11>):<EOL><INDENT>if os.path.exists(tmpfile.format(i)):<EOL><INDENT>os.remove(tmpfile.format(i))<EOL><DEDENT><DEDENT>fopen = open<EOL>if data.paramsdict[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>tmpfile = os.path.join(data.dirs.clusts, sample.name+\"<STR_LIT>\")<EOL>fopen = gzip.open<EOL><DEDENT>samfile = pysam.AlignmentFile(sample.files.mapped_reads, '<STR_LIT:rb>')<EOL>clusts = []<EOL>nclusts = <NUM_LIT:0><EOL>for region in regions:<EOL><INDENT>chrom, pos1, pos2 = region.split()<EOL>try:<EOL><INDENT>if \"<STR_LIT>\" in data.paramsdict[\"<STR_LIT>\"]:<EOL><INDENT>clust = fetch_cluster_pairs(data, samfile, chrom, int(pos1), int(pos2))<EOL><DEDENT>else:<EOL><INDENT>clust = fetch_cluster_se(data, samfile, chrom, int(pos1), int(pos2))<EOL><DEDENT><DEDENT>except IndexError as inst:<EOL><INDENT>LOGGER.error(\"<STR_LIT>\".format(chrom, pos1, pos2))<EOL>continue<EOL><DEDENT>if clust:<EOL><INDENT>clusts.append(\"<STR_LIT:\\n>\".join(clust))<EOL>nclusts += <NUM_LIT:1><EOL>if nclusts == chunksize:<EOL><INDENT>tmphandle = tmpfile.format(idx)<EOL>with fopen(tmphandle, '<STR_LIT:a>') as tmp:<EOL><INDENT>tmp.write(\"<STR_LIT>\".join(clusts)+\"<STR_LIT>\")<EOL><DEDENT>idx += <NUM_LIT:1><EOL>nclusts = <NUM_LIT:0><EOL>clusts = []<EOL><DEDENT><DEDENT><DEDENT>if clusts:<EOL><INDENT>with fopen(tmpfile.format(idx), '<STR_LIT:a>') as tmp:<EOL><INDENT>tmp.write(\"<STR_LIT>\".join(clusts)+\"<STR_LIT>\")<EOL><DEDENT>clusts = []<EOL><DEDENT>if not data.paramsdict[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>chunkfiles = glob.glob(os.path.join(data.tmpdir, sample.name+\"<STR_LIT>\"))<EOL>LOGGER.info(\"<STR_LIT>\", chunkfiles)<EOL><DEDENT>samfile.close()<EOL>", "docstring": "1. Run bedtools to get all overlapping regions\n2. Parse out reads from regions using pysam and dump into chunk files. \n   We measure it out to create 10 chunk files per sample. \n3. If we really wanted to speed this up, though it is pretty fast already, \n   we could parallelize it since we can easily break the regions into \n   a list of chunks.", "id": "f5314:m5"}
{"signature": "def index_reference_sequence(data, force=False):", "body": "<EOL>refseq_file = data.paramsdict['<STR_LIT>']<EOL>index_files = []<EOL>if \"<STR_LIT>\" in data._hackersonly[\"<STR_LIT>\"]:<EOL><INDENT>index_files.extend([\"<STR_LIT>\", \"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>index_files.extend([\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"])<EOL><DEDENT>index_files.extend([\"<STR_LIT>\"])<EOL>if not force:<EOL><INDENT>if all([os.path.isfile(refseq_file+i) for i in index_files]):<EOL><INDENT>return<EOL><DEDENT><DEDENT>if \"<STR_LIT>\" in data._hackersonly[\"<STR_LIT>\"]:<EOL><INDENT>cmd1 = [ipyrad.bins.smalt, \"<STR_LIT:index>\", <EOL>\"<STR_LIT>\", str(data._hackersonly[\"<STR_LIT>\"]), <EOL>refseq_file, <EOL>refseq_file]<EOL><DEDENT>else:<EOL><INDENT>cmd1 = [ipyrad.bins.bwa, \"<STR_LIT:index>\", refseq_file]<EOL><DEDENT>LOGGER.info(\"<STR_LIT:U+0020>\".join(cmd1))<EOL>proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)<EOL>error1 = proc1.communicate()[<NUM_LIT:0>]<EOL>cmd2 = [ipyrad.bins.samtools, \"<STR_LIT>\", refseq_file]<EOL>LOGGER.info(\"<STR_LIT:U+0020>\".join(cmd2))<EOL>proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)<EOL>error2 = proc2.communicate()[<NUM_LIT:0>]<EOL>if proc1.returncode:<EOL><INDENT>raise IPyradWarningExit(error1)<EOL><DEDENT>if error2:<EOL><INDENT>if \"<STR_LIT>\" in error2:<EOL><INDENT>raise IPyradWarningExit(NO_ZIP_BINS.format(refseq_file))<EOL><DEDENT>else:<EOL><INDENT>raise IPyradWarningExit(error2)<EOL><DEDENT><DEDENT>", "docstring": "Index the reference sequence, unless it already exists. Also make a mapping\nof scaffolds to index numbers for later user in steps 5-6.", "id": "f5314:m1"}
{"signature": "def sample_cleanup(data, sample):", "body": "umap1file = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")<EOL>umap2file = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")<EOL>unmapped = os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\")<EOL>samplesam = os.path.join(data.dirs.refmapping, sample.name+\"<STR_LIT>\")<EOL>split1 = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")<EOL>split2 = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")<EOL>refmap_derep = os.path.join(data.dirs.edits, sample.name+\"<STR_LIT>\")<EOL>for f in [umap1file, umap2file, unmapped, samplesam, split1, split2, refmap_derep]:<EOL><INDENT>try:<EOL><INDENT>os.remove(f)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "Clean up a bunch of loose files.", "id": "f5314:m0"}
{"signature": "def tree_panel_plot(ttree,<EOL>print_args=False,<EOL>*args, <EOL>**kwargs):", "body": "<EOL>panel = Panel(ttree)          <EOL>if not kwargs.get(\"<STR_LIT:width>\"):<EOL><INDENT>panel.kwargs[\"<STR_LIT:width>\"] = min(<NUM_LIT:1000>, <NUM_LIT>*len(panel.tree))<EOL><DEDENT>if not kwargs.get(\"<STR_LIT>\"):<EOL><INDENT>panel.kwargs[\"<STR_LIT>\"] = panel.kwargs[\"<STR_LIT:width>\"]<EOL><DEDENT>panel.kwargs.update(kwargs)<EOL>if panel.kwargs[\"<STR_LIT>\"]:<EOL><INDENT>nnodes = sum(<NUM_LIT:1> for i in panel.tree.traverse()) - len(panel.tree)<EOL>supps = [int(panel.tree.search_nodes(idx=j)[<NUM_LIT:0>].support)for j in range(nnodes)]<EOL>if not panel.kwargs[\"<STR_LIT>\"]:<EOL><INDENT>panel.kwargs[\"<STR_LIT>\"] = <NUM_LIT:20><EOL><DEDENT>sizes = [panel.kwargs[\"<STR_LIT>\"] for j in range(nnodes)]<EOL>supps += [\"<STR_LIT>\"] * len(panel.tree)<EOL>sizes += [<NUM_LIT:0>] * len(panel.tree)<EOL>panel.kwargs[\"<STR_LIT>\"] = supps<EOL>panel.kwargs[\"<STR_LIT>\"] = sizes<EOL>panel.kwargs[\"<STR_LIT>\"] = True<EOL>if len(panel.tree.children) > <NUM_LIT:2>:<EOL><INDENT>supps[<NUM_LIT:0>] = \"<STR_LIT>\"<EOL>sizes[<NUM_LIT:0>] = <NUM_LIT:0><EOL><DEDENT><DEDENT>elif panel.kwargs.get(\"<STR_LIT>\"):<EOL><INDENT>panel.kwargs[\"<STR_LIT>\"] = panel.kwargs[\"<STR_LIT>\"]<EOL>panel.kwargs[\"<STR_LIT>\"] = True<EOL><DEDENT>else:<EOL><INDENT>panel.kwargs[\"<STR_LIT>\"] = panel.node_labels.keys() <EOL><DEDENT>if print_args:<EOL><INDENT>print(panel.kwargs)<EOL><DEDENT>canvas = toyplot.Canvas(height=panel.kwargs['<STR_LIT>'], width=panel.kwargs['<STR_LIT:width>'])<EOL>axes = canvas.cartesian(bounds=(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"))    <EOL>axes.show = panel.kwargs[\"<STR_LIT>\"]<EOL>panel._panel_tree(axes)<EOL>if panel.kwargs[\"<STR_LIT>\"]:<EOL><INDENT>panel._panel_tip_labels(axes)<EOL><DEDENT>return canvas, axes, panel<EOL>", "docstring": "signature...", "id": "f5316:m0"}
{"signature": "def _countmatrix(lxs):", "body": "<EOL>share = np.zeros((lxs.shape[<NUM_LIT:0>], lxs.shape[<NUM_LIT:0>]), dtype=np.uint64)<EOL>names = range(lxs.shape[<NUM_LIT:0>])<EOL>for row in lxs:<EOL><INDENT>for samp1, samp2 in itertools.combinations(names, <NUM_LIT:2>):<EOL><INDENT>shared = lxs[samp1, lxs[samp2] > <NUM_LIT:0>].sum()<EOL>share[samp1, samp2] = shared<EOL><DEDENT><DEDENT>share += share.T<EOL>for row in xrange(len(names)):<EOL><INDENT>share[row, row] = lxs[row].sum()<EOL><DEDENT>return share<EOL>", "docstring": "fill a matrix with pairwise data sharing", "id": "f5320:m5"}
{"signature": "def _getarray(loci, snames):", "body": "<EOL>lxs = np.zeros((len(snames), len(loci)), dtype=np.uint64)<EOL>for loc in xrange(len(loci)):<EOL><INDENT>for seq in loci[loc].split(\"<STR_LIT:\\n>\"):<EOL><INDENT>if \"<STR_LIT>\" not in seq:<EOL><INDENT>lxs[snames.index(seq.split()[<NUM_LIT:0>][:]), loc] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return lxs, snames<EOL>", "docstring": "parse loci list and return presence/absence matrix\nordered by the tips on the tree or list of names.", "id": "f5320:m4"}
{"signature": "def _countmatrix(lxs):", "body": "<EOL>share = np.zeros((lxs.shape[<NUM_LIT:0>], lxs.shape[<NUM_LIT:0>]))<EOL>names = range(lxs.shape[<NUM_LIT:0>])<EOL>for row in lxs:<EOL><INDENT>for samp1, samp2 in itertools.combinations(names, <NUM_LIT:2>):<EOL><INDENT>shared = lxs[samp1, lxs[samp2] > <NUM_LIT:0>].sum()<EOL>share[samp1, samp2] = shared<EOL><DEDENT><DEDENT>for row in xrange(len(names)):<EOL><INDENT>share[row, row] = lxs[row].sum()<EOL><DEDENT>return share<EOL>", "docstring": "fill a matrix with pairwise data sharing", "id": "f5322:m3"}
{"signature": "def start_ipcluster(data):", "body": "<EOL>iparg = \"<STR_LIT>\"<EOL>if \"<STR_LIT>\" in data._ipcluster[\"<STR_LIT>\"]:<EOL><INDENT>iparg = \"<STR_LIT>\"<EOL><DEDENT>standard = \"\"\"<STR_LIT>\"\"\".format(data._ipcluster[\"<STR_LIT>\"], <EOL>data._ipcluster[\"<STR_LIT>\"], <EOL>data._ipcluster[\"<STR_LIT>\"],<EOL>data._ipcluster[\"<STR_LIT>\"],<EOL>iparg)<EOL>try: <EOL><INDENT>LOGGER.info(shlex.split(standard))<EOL>subprocess.check_call(shlex.split(standard), <EOL>stderr=subprocess.STDOUT,<EOL>stdout=subprocess.PIPE)<EOL><DEDENT>except subprocess.CalledProcessError as inst:<EOL><INDENT>LOGGER.debug(\"<STR_LIT>\")<EOL>raise<EOL><DEDENT>except Exception as inst:<EOL><INDENT>sys.exit(\"<STR_LIT>\".format(inst))<EOL><DEDENT>", "docstring": "Start ipcluster", "id": "f5323:m0"}
{"signature": "def paramname(param=\"<STR_LIT>\"):", "body": "try: <EOL><INDENT>name = pinfo[str(param)][<NUM_LIT:0>].strip().split(\"<STR_LIT:U+0020>\")[<NUM_LIT:1>]<EOL><DEDENT>except (KeyError, ValueError) as err:<EOL><INDENT>print(\"<STR_LIT>\".format(param), err)<EOL>raise<EOL><DEDENT>return name<EOL>", "docstring": "Get the param name from the dict index value.", "id": "f5324:m0"}
{"signature": "def paraminfo(param=\"<STR_LIT>\", short=False):", "body": "<EOL>if short:<EOL><INDENT>desc = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>desc = <NUM_LIT:0><EOL><DEDENT>try: <EOL><INDENT>description = pinfo[str(param)][desc]<EOL><DEDENT>except (KeyError, ValueError) as err:<EOL><INDENT>print(\"<STR_LIT>\".format(param), err)<EOL>raise<EOL><DEDENT>return description<EOL>", "docstring": "Returns detailed information for the numbered parameter. \n        Further information is available in the tutorial.\n        Unlike params() this function doesn't deal well with *\n        It only takes one parameter at a time and returns the desc", "id": "f5324:m1"}
{"signature": "def _to_fulldict(self):", "body": "<EOL>returndict = OrderedDict([<EOL>(\"<STR_LIT:name>\", self.name),<EOL>(\"<STR_LIT>\", self.barcode),<EOL>(\"<STR_LIT>\", self.files),<EOL>(\"<STR_LIT>\", {<EOL>\"<STR_LIT>\": self.stats_dfs.s1.to_dict(),<EOL>\"<STR_LIT>\": self.stats_dfs.s2.to_dict(),                <EOL>\"<STR_LIT>\": self.stats_dfs.s3.to_dict(),<EOL>\"<STR_LIT>\": self.stats_dfs.s4.to_dict(),<EOL>\"<STR_LIT>\": self.stats_dfs.s5.to_dict(),<EOL>}),<EOL>(\"<STR_LIT>\", self.stats.to_dict()),<EOL>(\"<STR_LIT>\", self.depths)<EOL>])<EOL>return returndict<EOL>", "docstring": "Write to dict including data frames. All sample dicts \nare combined in save() to dump JSON output", "id": "f5326:c0:m2"}
{"signature": "def parse_command_line():", "body": "<EOL>parser = argparse.ArgumentParser(<EOL>formatter_class=argparse.RawDescriptionHelpFormatter,<EOL>epilog=\"\"\"<STR_LIT>\"\"\")<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:version>', <EOL>version=str(pkg_resources.get_distribution('<STR_LIT>')))<EOL>parser.add_argument('<STR_LIT>', \"<STR_LIT>\", action='<STR_LIT:store_true>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', \"<STR_LIT>\", action='<STR_LIT:store_true>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', \"<STR_LIT>\", action='<STR_LIT:store_true>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', \"<STR_LIT>\", action='<STR_LIT:store_true>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', metavar='<STR_LIT>', dest=\"<STR_LIT>\", type=str, <EOL>default=None, <EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', metavar='<STR_LIT>', dest=\"<STR_LIT>\",<EOL>type=str, default=None,<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', metavar='<STR_LIT>', dest=\"<STR_LIT>\",<EOL>type=str, default=None, nargs=\"<STR_LIT:*>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', metavar='<STR_LIT>', dest=\"<STR_LIT>\",<EOL>default=None, nargs=\"<STR_LIT:*>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', metavar=\"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>type=str, default=None,<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT:-c>\", metavar=\"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>type=int, default=<NUM_LIT:0>,<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", metavar=\"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>type=int, default=<NUM_LIT:2>,<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", action='<STR_LIT:store_true>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", metavar=\"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>type=str, nargs=\"<STR_LIT:?>\", const=\"<STR_LIT:default>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", metavar=\"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>type=str, nargs=\"<STR_LIT:*>\", default=None, <EOL>help=\"<STR_LIT>\")<EOL>if len(sys.argv) == <NUM_LIT:1>:<EOL><INDENT>parser.print_help()<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>args = parser.parse_args()<EOL>if not any(x in [\"<STR_LIT>\", \"<STR_LIT>\"] for x in vars(args).keys()):<EOL><INDENT>print(\"<STR_LIT>\"+\"<STR_LIT>\")<EOL>parser.print_help()<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return args<EOL>", "docstring": "Parse CLI args.", "id": "f5327:m6"}
{"signature": "def _check_version():", "body": "import urllib2<EOL>from distutils.version import LooseVersion<EOL>header =\"<STR_LIT>\"+\"<STR_LIT>\".format(ip.__version__)+\"<STR_LIT>\"+\"<STR_LIT>\"<EOL>try:<EOL><INDENT>htmldat = urllib2.urlopen(\"<STR_LIT>\").readlines()<EOL>curversion = next((x for x in htmldat if \"<STR_LIT>\" in x), None).split(\"<STR_LIT:>>\")[<NUM_LIT:1>].split(\"<STR_LIT:<>\")[<NUM_LIT:0>]<EOL>if LooseVersion(ip.__version__) < LooseVersion(curversion):<EOL><INDENT>msg = \"\"\"<STR_LIT>\"\"\".format(curversion)<EOL>print(header + \"<STR_LIT:\\n>\" + msg)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>except Exception as inst:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Test if there's a newer version and nag the user to upgrade.", "id": "f5327:m5"}
{"signature": "def cluster_info(ipyclient, spacer=\"<STR_LIT>\"):", "body": "<EOL>hosts = []<EOL>for eid in ipyclient.ids:<EOL><INDENT>engine = ipyclient[eid]<EOL>if not engine.outstanding:<EOL><INDENT>hosts.append(engine.apply(_socket.gethostname))<EOL><DEDENT><DEDENT>hosts = [i.get() for i in hosts]<EOL>result = []<EOL>for hostname in set(hosts):<EOL><INDENT>result.append(\"<STR_LIT>\".format(spacer, hosts.count(hostname), hostname))<EOL><DEDENT>print(\"<STR_LIT:\\n>\".join(result))<EOL>", "docstring": "reports host and engine info for an ipyclient", "id": "f5328:m0"}
{"signature": "def _set_debug_dict(__loglevel__):", "body": "_lconfig.dictConfig({<EOL>'<STR_LIT:version>': <NUM_LIT:1>,<EOL>'<STR_LIT>': False,<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': \"<STR_LIT>\"+\"<STR_LIT>\"+\"<STR_LIT>\"+\"<STR_LIT>\"+\"<STR_LIT>\"<EOL>},<EOL>},<EOL>'<STR_LIT>': {<EOL>__name__: {<EOL>'<STR_LIT>':__loglevel__,<EOL>'<STR_LIT:class>':'<STR_LIT>',<EOL>'<STR_LIT:filename>':__debugfile__,<EOL>'<STR_LIT>':\"<STR_LIT>\",<EOL>'<STR_LIT>':'<STR_LIT>'<EOL>}<EOL>},<EOL>'<STR_LIT>':{<EOL>__name__: {<EOL>'<STR_LIT>': [__name__],<EOL>'<STR_LIT>': __loglevel__,<EOL>'<STR_LIT>': True<EOL>}<EOL>}<EOL>})<EOL>", "docstring": "set the debug dict", "id": "f5328:m2"}
{"signature": "def _debug_off():", "body": "if _os.path.exists(__debugflag__):<EOL><INDENT>_os.remove(__debugflag__)<EOL><DEDENT>__loglevel__ = \"<STR_LIT>\"<EOL>_LOGGER.info(\"<STR_LIT>\")<EOL>_set_debug_dict(__loglevel__)<EOL>", "docstring": "turns off debugging by removing hidden tmp file", "id": "f5328:m3"}
{"signature": "def _tup_and_byte(obj):", "body": "<EOL>if isinstance(obj, unicode):<EOL><INDENT>return obj.encode('<STR_LIT:utf-8>')<EOL><DEDENT>if isinstance(obj, list):<EOL><INDENT>return [_tup_and_byte(item) for item in obj]<EOL><DEDENT>if isinstance(obj, dict):<EOL><INDENT>if \"<STR_LIT>\" in obj:<EOL><INDENT>return tuple(_tup_and_byte(item) for item in obj[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>return {<EOL>_tup_and_byte(key): _tup_and_byte(val) forkey, val in obj.iteritems()<EOL>}<EOL><DEDENT><DEDENT>return obj<EOL>", "docstring": "wat", "id": "f5330:m5"}
{"signature": "def save_json2(data):", "body": "<EOL>datadict = OrderedDict([<EOL>(\"<STR_LIT>\", data.__dict__[\"<STR_LIT>\"]),<EOL>(\"<STR_LIT>\", dict(data.__dict__[\"<STR_LIT>\"])),<EOL>(\"<STR_LIT>\", data.__dict__[\"<STR_LIT>\"])<EOL>])<EOL>", "docstring": "save to json.", "id": "f5330:m2"}
{"signature": "def loci2bpp(name, locifile, imap, guidetree,<EOL>minmap=None,<EOL>maxloci=None,<EOL>infer_sptree=<NUM_LIT:0>,<EOL>infer_delimit=<NUM_LIT:0>,<EOL>delimit_alg=(<NUM_LIT:0>, <NUM_LIT:5>),<EOL>seed=<NUM_LIT>,<EOL>burnin=<NUM_LIT:1000>,<EOL>nsample=<NUM_LIT>,<EOL>sampfreq=<NUM_LIT:2>,<EOL>thetaprior=(<NUM_LIT:5>, <NUM_LIT:5>),<EOL>tauprior=(<NUM_LIT:4>, <NUM_LIT:2>, <NUM_LIT:1>),<EOL>traits_df=None,<EOL>nu=<NUM_LIT:0>,<EOL>kappa=<NUM_LIT:0>,<EOL>useseqdata=<NUM_LIT:1>,<EOL>usetraitdata=<NUM_LIT:1>,<EOL>cleandata=<NUM_LIT:0>,<EOL>wdir=None,<EOL>finetune=(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>),<EOL>verbose=<NUM_LIT:0>):", "body": "<EOL>if not imap:<EOL><INDENT>raise IPyradWarningExit(IMAP_REQUIRED)<EOL><DEDENT>if minmap:<EOL><INDENT>if minmap.keys() != imap.keys():<EOL><INDENT>raise IPyradWarningExit(KEYS_DIFFER)<EOL><DEDENT><DEDENT>if wdir:<EOL><INDENT>wdir = os.path.abspath(wdir)<EOL>if not os.path.exists(wdir):<EOL><INDENT>raise IPyradWarningExit(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>wdir = os.path.curdir<EOL><DEDENT>prog = '<STR_LIT>'<EOL>if isinstance(traits_df, pd.DataFrame):<EOL><INDENT>prog = '<STR_LIT>'<EOL><DEDENT>outfile = OPJ(wdir, \"<STR_LIT>\".format(name, prog))<EOL>mapfile = OPJ(wdir, \"<STR_LIT>\".format(name, prog))<EOL>fout = open(outfile, '<STR_LIT:w>')<EOL>fmap = open(mapfile, '<STR_LIT:w>')<EOL>with open(locifile, '<STR_LIT:r>') as infile:<EOL><INDENT>loci = infile.read().strip().split(\"<STR_LIT>\")<EOL>nloci = len(loci)<EOL><DEDENT>samples = list(itertools.chain(*imap.values()))<EOL>nkept = <NUM_LIT:0><EOL>for iloc in xrange(nloci):<EOL><INDENT>lines = loci[iloc].split(\"<STR_LIT>\")[<NUM_LIT:0>].split()<EOL>names = lines[::<NUM_LIT:2>]<EOL>names = [\"<STR_LIT>\"+i for i in names]<EOL>seqs = [list(i) for i in lines[<NUM_LIT:1>::<NUM_LIT:2>]]<EOL>seqlen = len(seqs[<NUM_LIT:0>])<EOL>skip = <NUM_LIT:0><EOL>if minmap:<EOL><INDENT>covd = {}<EOL>for group, vals in imap.items():<EOL><INDENT>covd[group] = sum([\"<STR_LIT>\"+i in names for i in vals])<EOL><DEDENT>if not all([covd[group] >= minmap[group] for group in minmap]):<EOL><INDENT>skip = <NUM_LIT:1><EOL><DEDENT><DEDENT>if maxloci:<EOL><INDENT>if nkept >= maxloci:<EOL><INDENT>skip = <NUM_LIT:1><EOL><DEDENT><DEDENT>if not skip:<EOL><INDENT>data = [\"<STR_LIT>\".format(i, \"<STR_LIT>\".join(k).replace(\"<STR_LIT:->\", \"<STR_LIT:N>\")) for(i, k) in zip(names, seqs) if i[<NUM_LIT:1>:] in samples]<EOL>if data:<EOL><INDENT>fout.write(\"<STR_LIT>\".format(len(data), seqlen, \"<STR_LIT:\\n>\".join(data)))<EOL>nkept += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>fout.close()<EOL>data = [\"<STR_LIT>\".format(val, key) for keyin sorted(imap) for val in imap[key]]<EOL>fmap.write(\"<STR_LIT:\\n>\".join(data))<EOL>fmap.close()<EOL>write_ctl(name, imap, guidetree, nkept,<EOL>infer_sptree, infer_delimit, delimit_alg,<EOL>seed, burnin, nsample, sampfreq,<EOL>thetaprior, tauprior, traits_df, nu, kappa,<EOL>cleandata, useseqdata, usetraitdata, wdir,<EOL>finetune, verbose)<EOL>sys.stderr.write(\"<STR_LIT>\".format(nkept, len(imap.keys()),<EOL>sum([len(i) for i in imap.values()])))<EOL>sys.stderr.write(\"<STR_LIT>\".format(name, prog))<EOL>sys.stderr.write(\"<STR_LIT>\".format(name, prog))<EOL>sys.stderr.write(\"<STR_LIT>\".format(name, prog))<EOL>if isinstance(traits_df, pd.DataFrame):<EOL><INDENT>sys.stderr.write(\"<STR_LIT>\".format(name, prog))<EOL><DEDENT>return os.path.abspath(<EOL>\"<STR_LIT>\".format(OPJ(wdir, name), prog))<EOL>", "docstring": "Converts loci file format to bpp file format, i.e., concatenated phylip-like\nformat, and produces imap and ctl input files for bpp.\n\nParameters:\n-----------\nname:\n    A prefix name for output files that will be produced\nlocifile:\n    A .loci file produced by ipyrad.\nimap:\n    A Python dictionary with 'species' names as keys, and lists of sample\n    names for the values. Any sample that is not included in the imap\n    dictionary will be filtered out of the data when converting the .loci\n    file into the bpp formatted sequence file. Each species in the imap\n    dictionary must also be present in the input 'guidetree'.\nguidetree:\n    A newick string species tree hypothesis [e.g., (((a,b),(c,d)),e);]\n    All species in the imap dictionary must also be present in the guidetree\n\nOptional parameters:\n--------------------\ninfer_sptree:\n    Default=0, only infer parameters on a fixed species tree. If 1, then the\n    input tree is treated as a guidetree and tree search is employed to find\n    the best tree. The results will include support values for the inferred\n    topology.\ninfer_delimit:\n    Default=0, no delimitation. If 1 then splits in the tree that separate\n    'species' will be collapsed to test whether fewer species are a better\n    fit to the data than the number in the input guidetree.\ndelimit_alg:\n    Species delimitation algorithm. This is a tuple. The first value\n    is the algorithm (0 or 1) and the following values are arguments\n    for the given algorithm. See other ctl files for examples of what the\n    delimitation line looks like. This is where you can enter the params\n    (e.g., alpha, migration) for the two different algorithms.\n    For example, the following args would produce the following ctl lines:\n       alg=0, epsilon=5\n       > delimit_alg = (0, 5)\n       speciesdelimitation = 1 0 5\n\n       alg=1, alpha=2, migration=1\n       > delimit_alg = (1, 2, 1)\n       speciesdelimitation = 1 1 2 1\n\n       alg=1, alpha=2, migration=1, diagnosis=0, ?=1\n       > delimit_alg = (1, 2, 1, 0, 1)\n       speciesdelimitation = 1 1 2 1 0 1\nseed:\n    A random number seed at start of analysis.\nburnin:\n    Number of burnin generations in mcmc\nnsample:\n    Number of mcmc generations to run.\nsampfreq:\n    How often to sample from the mcmc chain.\nthetaprior:\n    Prior on theta (4Neu), gamma distributed. mean = a/b. e.g., (5, 5)\ntauprior\n    Prior on root tau, gamma distributed mean = a/b. Last number is \n    dirichlet prior for other taus. e.g., (4, 2, 1)\ntraits_df:\n    A pandas DataFrame with trait data properly formatted. This means only\n    quantitative traits are included, and missing values are NaN.\n    The first column contains sample names, with \"Indiv\" as the header.\n    The following columns have a header row with trait names. This script\n    will write a CSV trait file with trait values mean-standardized, with\n    NaN replaced by \"NA\", and with sample not present in IMAP removed.\nnu:\n    A prior on phenotypic trait variance (0) for iBPP analysis.\nkappa:\n    A prior on phenotypic trait mean (0) for iBPP analysis.\nuseseqdata:\n    If false inference proceeds without sequence data (can be used to test\n    the effect of priors on the tree distributions).\nusetraitdata:\n    If false inference proceeds without trait data (can be used to test\n    the effect of priors on the trait distributions).\ncleandata:\n    If 1 then sites with missing or hetero characters are removed.\nwdir:\n    A working directory to write files to.\nfinetune:\n    See bpp documentation.\nverbose:\n    If verbose=1 the ctl file text will also be written to screen (stderr).", "id": "f5338:m0"}
{"signature": "def write_ctl(name, imap, guidetree, nloci,<EOL>infer_sptree, infer_delimit, delimit_alg,<EOL>seed, burnin, nsample, sampfreq,<EOL>thetaprior, tauprior, traits_df, nu0, kappa0,<EOL>cleandata, useseqdata, usetraitdata, wdir,<EOL>finetune, verbose):", "body": "<EOL>ctl = []<EOL>if not guidetree.endswith(\"<STR_LIT:;>\"):<EOL><INDENT>guidetree += \"<STR_LIT:;>\"<EOL><DEDENT>prog = '<STR_LIT>'<EOL>if isinstance(traits_df, pd.DataFrame):<EOL><INDENT>prog = '<STR_LIT>'<EOL><DEDENT>ctl.append(\"<STR_LIT>\".format(seed))<EOL>ctl.append(\"<STR_LIT>\".format(OPJ(wdir, name), prog))<EOL>ctl.append(\"<STR_LIT>\".format(OPJ(wdir, name), prog))<EOL>ctl.append(\"<STR_LIT>\".format(OPJ(wdir, name), prog))<EOL>ctl.append(\"<STR_LIT>\".format(OPJ(wdir, name), prog))<EOL>if isinstance(traits_df, pd.DataFrame):<EOL><INDENT>ctl.append(\"<STR_LIT>\".format(OPJ(wdir, name), prog))<EOL><DEDENT>ctl.append(\"<STR_LIT>\".format(nloci))<EOL>ctl.append(\"<STR_LIT>\".format(useseqdata))<EOL>ctl.append(\"<STR_LIT>\".format(cleandata))<EOL>if infer_sptree:<EOL><INDENT>ctl.append(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>ctl.append(\"<STR_LIT>\")<EOL><DEDENT>ctl.append(\"<STR_LIT>\".format(infer_delimit, delimit_alg[<NUM_LIT:0>],<EOL>\"<STR_LIT:U+0020>\".join([str(i) for i in delimit_alg[<NUM_LIT:1>:]])))<EOL>if isinstance(traits_df, pd.DataFrame):<EOL><INDENT>try:<EOL><INDENT>traits_df.values.astype(float)<EOL><DEDENT>except Exception:<EOL><INDENT>raise IPyradWarningExit(PDREAD_ERROR)<EOL><DEDENT>samples = sorted(list(itertools.chain(*imap.values())))<EOL>didx = [list(traits_df.index).index(i) for i in traits_df.indexif i not in samples]<EOL>dtraits = traits_df.drop(traits_df.index[didx])<EOL>straits = dtraits.apply(lambda x: (x - x.mean()) / (x.std()))<EOL>ftraits = straits.fillna(\"<STR_LIT>\")<EOL>traitdict = ftraits.T.to_dict(\"<STR_LIT:list>\")<EOL>rev = {val:key for key in sorted(imap) for val in imap[key]}<EOL>traitfile = \"<STR_LIT>\".format(os.path.join(wdir, name), prog)<EOL>with open(traitfile, '<STR_LIT:w>') as tout:<EOL><INDENT>tout.write(\"<STR_LIT>\")<EOL>tout.write(\"<STR_LIT:\\t>\".join(<EOL>['<STR_LIT>'] + list(ftraits.columns))+\"<STR_LIT:\\n>\"<EOL>)<EOL>nindT = <NUM_LIT:0><EOL>for ikey in sorted(imap.keys()):<EOL><INDENT>samps = imap[ikey]<EOL>for samp in sorted(samps):<EOL><INDENT>if samp in traitdict:<EOL><INDENT>tout.write(\"<STR_LIT:\\t>\".join([samp, rev[samp]] +[str(i) for i in traitdict[samp]])+\"<STR_LIT:\\n>\"<EOL>)<EOL>nindT += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>ctl.append(\"<STR_LIT>\".format(traits_df.shape[<NUM_LIT:1>]))<EOL>ctl.append(\"<STR_LIT>\".format(nindT))  <EOL>ctl.append(\"<STR_LIT>\".format(usetraitdata))<EOL>ctl.append(\"<STR_LIT>\".format(useseqdata))<EOL>ctl.append(\"<STR_LIT>\".format(nu0))<EOL>ctl.append(\"<STR_LIT>\".format(kappa0))<EOL>ctl.remove(\"<STR_LIT>\".format(useseqdata))<EOL>ctl.remove(\"<STR_LIT>\".format(infer_sptree))<EOL><DEDENT>nspecies = str(len(imap))<EOL>species = \"<STR_LIT:U+0020>\".join(sorted(imap))<EOL>ninds = \"<STR_LIT:U+0020>\".join([str(len(imap[i])) for i in sorted(imap)])<EOL>ctl.append(\"\"\"<STR_LIT>\"\"\".format(nspecies, species, ninds, guidetree))<EOL>ctl.append(\"<STR_LIT>\".format(*thetaprior))<EOL>ctl.append(\"<STR_LIT>\".format(*tauprior))<EOL>ctl.append(\"<STR_LIT>\".format(\"<STR_LIT:U+0020>\".join([str(i) for i in finetune])))<EOL>ctl.append(\"<STR_LIT>\")<EOL>ctl.append(\"<STR_LIT>\".format(burnin))<EOL>ctl.append(\"<STR_LIT>\".format(sampfreq))<EOL>ctl.append(\"<STR_LIT>\".format(nsample))<EOL>with open(\"<STR_LIT>\".format(OPJ(wdir, name), prog), '<STR_LIT:w>') as out:<EOL><INDENT>out.write(\"<STR_LIT:\\n>\".join(ctl))<EOL><DEDENT>if verbose:<EOL><INDENT>sys.stderr.write(\"<STR_LIT>\"+\"<STR_LIT:\\n>\".join(ctl)+\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "write outfile with any args in argdict", "id": "f5338:m1"}
{"signature": "def _count_PIS(seqsamp, N):", "body": "counts = [Counter(col) for col in seqsamp.T if not (\"<STR_LIT:->\" in col or \"<STR_LIT:N>\" in col)]<EOL>pis = [i.most_common(<NUM_LIT:2>)[<NUM_LIT:1>][<NUM_LIT:1>] > <NUM_LIT:1> for i in counts if len(i.most_common(<NUM_LIT:2>))><NUM_LIT:1>]<EOL>if sum(pis) >= N:<EOL><INDENT>return sum(pis)<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "filters for loci with >= N PIS", "id": "f5343:m3"}
{"signature": "def loci2multinex(name, <EOL>locifile, <EOL>subsamples=None,<EOL>outdir=None,<EOL>maxloci=None,<EOL>minSNPs=<NUM_LIT:1>,<EOL>seed=<NUM_LIT>,<EOL>mcmc_burnin=int(<NUM_LIT>),<EOL>mcmc_ngen=int(<NUM_LIT>),<EOL>mcmc_sample_freq=<NUM_LIT:1000>,<EOL>):", "body": "<EOL>if outdir:<EOL><INDENT>if not os.path.exists(outdir):<EOL><INDENT>os.makedirs(outdir)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>outdir = os.path.curdir<EOL><DEDENT>outdir = os.path.realpath(outdir)<EOL>outdir = os.path.join(outdir, \"<STR_LIT>\".format(name))<EOL>if not os.path.exists(outdir):<EOL><INDENT>os.makedirs(outdir)<EOL><DEDENT>else:<EOL><INDENT>ofiles = glob.glob(os.path.join(outdir, \"<STR_LIT>\"))<EOL>for ofile in ofiles:<EOL><INDENT>os.remove(ofile)<EOL><DEDENT><DEDENT>with open(locifile) as infile:<EOL><INDENT>loci = (i for i in infile.read().strip().split(\"<STR_LIT>\"))<EOL><DEDENT>if not subsamples:<EOL><INDENT>with open(locifile) as infile:<EOL><INDENT>subs = set((i.split()[<NUM_LIT:0>] for i in infile.readlines() if \"<STR_LIT>\" not in i))<EOL><DEDENT><DEDENT>else:   <EOL><INDENT>subs = set(subsamples)<EOL><DEDENT>lens = len(subs)<EOL>nlocus = <NUM_LIT:0><EOL>for loc in loci:<EOL><INDENT>dat = loc.split(\"<STR_LIT:\\n>\")[:-<NUM_LIT:1>]<EOL>names = [i.split()[<NUM_LIT:0>] for i in dat]<EOL>seqs = np.array([list(i.split()[<NUM_LIT:1>]) for i in dat])<EOL>if len(set(names).intersection(set(subs))) == lens:<EOL><INDENT>seqsamp = seqs[[names.index(tax) for tax in subs]]<EOL>seqsamp = _resolveambig(seqsamp)<EOL>pis = _count_PIS(seqsamp, minSNPs)<EOL>if pis:<EOL><INDENT>nlocus += <NUM_LIT:1><EOL>copied = seqsamp.copy()<EOL>copied[copied == \"<STR_LIT:->\"] = \"<STR_LIT:N>\"<EOL>rmcol = np.all(copied == \"<STR_LIT:N>\", axis=<NUM_LIT:0>)<EOL>seqsamp = seqsamp[:, ~rmcol]<EOL>mdict = dict(list(zip(subs, [i.tostring() for i in seqsamp])))<EOL>nexmake(mdict, nlocus, outdir, mcmc_burnin, mcmc_ngen, mcmc_sample_freq)<EOL><DEDENT><DEDENT><DEDENT>print(\"<STR_LIT>\".format(nlocus, outdir))<EOL>", "docstring": "Converts loci file format to multiple nexus formatted files, one for \neach locus, and writes a mrbayes block in the nexus information. The \nmrbayes block will be set to run 2 replicate chains, for [mcmc_ngen]\ngenerations, skipping [burnin] steps, and sampling every \n[mcmc_sample_freq] steps. \n\n\nParameters:\n-----------\nname: (str)\n    A prefix name for output files that will be produced\nlocifile: (str)\n    A .loci file produced by ipyrad.\nmaxloci: (int)\n    Limit the number of loci to the first N loci with sufficient sampling\n    to be included in the analysis. \nminSNPs: (int)\n    Only include loci that have at least N parsimony informative SNPs.\nseed: (int)\n    Random seed used for resolving ambiguities.\nburnin: (int)\n    mrbayes nexus block burnin parameter used for 'sump burnin' and 'sumt burnin'.\n    The number of generations to skip before starting parameter and tree sampling. \nmcmc_ngen: (int)\n    mrbayes nexus block 'mcmc ngen' and 'mcmc printfreq' parameters. We don't really\n    to have any info printed to screen, so these values are set equal. This is the \n    length of the chains that will be run. \nmcmc_sample_freq: (int)\n    mrbayes nexus block 'mcmc samplefreq' parameter. The frequency of sampling from\n    the mcmc chain.", "id": "f5343:m0"}
{"signature": "def draw(self, axes):", "body": "<EOL>tre = toytree.tree(newick=self.results.tree)<EOL>tre.draw(<EOL>axes=axes,<EOL>use_edge_lengths=True,<EOL>tree_style='<STR_LIT:c>',<EOL>tip_labels_align=True,<EOL>edge_align_style={\"<STR_LIT>\": <NUM_LIT:1>}<EOL>);<EOL>for admix in self.results.admixture:<EOL><INDENT>pidx, pdist, cidx, cdist, weight = admix<EOL>a = _get_admix_point(tre, pidx, pdist)<EOL>b = _get_admix_point(tre, cidx, cdist)<EOL>mark = axes.plot(<EOL>a = (a[<NUM_LIT:0>], b[<NUM_LIT:0>]),<EOL>b = (a[<NUM_LIT:1>], b[<NUM_LIT:1>]),<EOL>style={\"<STR_LIT>\": <NUM_LIT:10>*weight,<EOL>\"<STR_LIT>\": <NUM_LIT>, <EOL>\"<STR_LIT>\": \"<STR_LIT>\"}<EOL>)<EOL>axes.scatterplot(<EOL>a = (b[<NUM_LIT:0>]),<EOL>b = (b[<NUM_LIT:1>]),<EOL>size=<NUM_LIT:8>,<EOL>title=\"<STR_LIT>\".format(weight),<EOL>)<EOL><DEDENT>axes.y.show=False<EOL>axes.x.ticks.show=True<EOL>axes.x.label.text = \"<STR_LIT>\"<EOL>return axes<EOL>", "docstring": "Returns a treemix plot on a toyplot.axes object.", "id": "f5344:c0:m7"}
{"signature": "def copy(self, name):", "body": "<EOL>subdict = {i:j for i, j in self.__dict__.iteritems() if i != \"<STR_LIT>\"}<EOL>newdict = copy.deepcopy(subdict)<EOL>newobj = Treemix(<EOL>data=newdict[\"<STR_LIT:data>\"],<EOL>name=name,<EOL>workdir=newdict[\"<STR_LIT>\"],<EOL>imap={i:j for i, j in newdict[\"<STR_LIT>\"].items()},<EOL>mapfile=newdict['<STR_LIT>'],<EOL>minmap={i:j for i, j in newdict[\"<STR_LIT>\"].items()},<EOL>seed=np.random.randint(<NUM_LIT:0>, int(<NUM_LIT>)),<EOL>)<EOL>for key, val in newobj.params.__dict__.iteritems():<EOL><INDENT>newobj.params.__setattr__(key, self.params.__getattribute__(key))<EOL><DEDENT>return newobj<EOL>", "docstring": "Returns a copy of the treemix object with the same parameter settings\nbut with the files attributes cleared, and with a new 'name' attribute. \n\nParameters\n----------\nname (str):\n    A name for the new copied treemix bject that will be used for the \n    output files created by the object.", "id": "f5344:c0:m5"}
{"signature": "def _call_treemix(command_list):", "body": "proc = subprocess.Popen(<EOL>command_list,<EOL>stderr=subprocess.STDOUT, <EOL>stdout=subprocess.PIPE<EOL>)<EOL>comm = proc.communicate()<EOL>return comm<EOL>", "docstring": "call the command as sps", "id": "f5344:m3"}
{"signature": "@property<EOL><INDENT>def command(self):<DEDENT>", "body": "return \"<STR_LIT:U+0020>\".join(self._command_list)<EOL>", "docstring": "returns command as a string", "id": "f5344:c0:m2"}
{"signature": "def run(self, <EOL>ipyclient=None,<EOL>):", "body": "self.results_table, self.results_boots = batch(self, ipyclient)<EOL>if not isinstance(self.results_table, list):<EOL><INDENT>self.results_table.nloci = np.nan_to_num(self.results_table.nloci).astype(int)<EOL><DEDENT>", "docstring": "Run a batch of dstat tests on a list of tests, where each test is \na dictionary mapping sample names to {p1 - p4} (and sometimes p5). \nParameters modifying the behavior of the run, such as the number\nof bootstrap replicates (nboots) or the minimum coverage for \nloci (mincov) can be set in {object}.params.\n\nParameters:\n-----------\nipyclient (ipyparallel.Client object):\n    An ipyparallel client object to distribute jobs to a cluster.", "id": "f5345:c0:m2"}
{"signature": "def _loci_to_arr(loci, taxdict, mindict):", "body": "<EOL>nloci = len(loci)<EOL>maxlen = np.max(np.array([len(locus.split(\"<STR_LIT:\\n>\")[<NUM_LIT:0>]) for locus in loci]))<EOL>keep = np.zeros(nloci, dtype=np.bool_)<EOL>arr = np.zeros((nloci, <NUM_LIT:4>, maxlen), dtype=np.float64)<EOL>if len(taxdict) == <NUM_LIT:5>:<EOL><INDENT>arr = np.zeros((nloci, <NUM_LIT:6>, maxlen), dtype=np.float64)<EOL><DEDENT>if isinstance(mindict, int):<EOL><INDENT>mindict = {i: mindict for i in taxdict}<EOL><DEDENT>elif isinstance(mindict, dict):<EOL><INDENT>mindict = {i: mindict[i] for i in taxdict}<EOL><DEDENT>else:<EOL><INDENT>mindict = {i: <NUM_LIT:1> for i in taxdict}<EOL><DEDENT>allowed_names = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>if any([i not in allowed_names for i in taxdict]):<EOL><INDENT>raise IPyradError(\"<STR_LIT>\")<EOL><DEDENT>keys = sorted([i for i in taxdict.keys() if i[<NUM_LIT:0>] == '<STR_LIT:p>'])<EOL>outg = keys[-<NUM_LIT:1>]<EOL>for loc in xrange(nloci):<EOL><INDENT>lines = loci[loc].split(\"<STR_LIT:\\n>\")[:-<NUM_LIT:1>]<EOL>names = [i.split()[<NUM_LIT:0>] for i in lines]<EOL>seqs = np.array([list(i.split()[<NUM_LIT:1>]) for i in lines])<EOL>covs = [sum([j in names for j in taxdict[tax]]) >= mindict[tax]for tax in taxdict]<EOL>if all(covs):<EOL><INDENT>keep[loc] = True<EOL>refidx = np.where([i in taxdict[outg] for i in names])[<NUM_LIT:0>]<EOL>refseq = seqs[refidx].view(np.uint8)<EOL>ancestral = np.array([reftrick(refseq, GETCONS2)[:, <NUM_LIT:0>]])<EOL>iseq = _reffreq2(ancestral, refseq, GETCONS2)<EOL>arr[loc, -<NUM_LIT:1>, :iseq.shape[<NUM_LIT:1>]] = iseq <EOL>if len(taxdict) == <NUM_LIT:4>:<EOL><INDENT>for tidx, key in enumerate(keys[:-<NUM_LIT:1>]):<EOL><INDENT>nidx = np.where([i in taxdict[key] for i in names])[<NUM_LIT:0>]<EOL>sidx = seqs[nidx].view(np.uint8)<EOL>iseq = _reffreq2(ancestral, sidx, GETCONS2)<EOL>arr[loc, tidx, :iseq.shape[<NUM_LIT:1>]] = iseq<EOL><DEDENT><DEDENT>else:<EOL><INDENT>iseq = _reffreq2(ancestral, refseq, GETCONS2) <EOL>arr[loc, -<NUM_LIT:1>, :iseq.shape[<NUM_LIT:1>]] = iseq <EOL>nidx = np.where([i in taxdict['<STR_LIT>'] for i in names])[<NUM_LIT:0>]<EOL>sidx = seqs[nidx].view(np.uint8)<EOL>iseq = _reffreq2(ancestral, sidx, GETCONS2)<EOL>arr[loc, <NUM_LIT:0>, :iseq.shape[<NUM_LIT:1>]] = iseq<EOL>nidx = np.where([i in taxdict['<STR_LIT>'] for i in names])[<NUM_LIT:0>]<EOL>sidx = seqs[nidx].view(np.uint8)<EOL>iseq = _reffreq2(ancestral, sidx, GETCONS2)<EOL>arr[loc, <NUM_LIT:1>, :iseq.shape[<NUM_LIT:1>]] = iseq<EOL>nidx = np.where([i in taxdict['<STR_LIT>'] for i in names])[<NUM_LIT:0>]<EOL>nidy = np.where([i in taxdict['<STR_LIT>'] for i in names])[<NUM_LIT:0>]<EOL>sidx = seqs[nidx].view(np.uint8)<EOL>sidy = seqs[nidy].view(np.uint8)<EOL>xseq = _reffreq2(ancestral, sidx, GETCONS2)<EOL>yseq = _reffreq2(ancestral, sidy, GETCONS2)<EOL>mask3 = xseq != <NUM_LIT:0><EOL>mask4 = yseq != <NUM_LIT:0><EOL>xseq[mask4] = <NUM_LIT:0><EOL>yseq[mask3] = <NUM_LIT:0><EOL>arr[loc, <NUM_LIT:2>, :xseq.shape[<NUM_LIT:1>]] = xseq<EOL>arr[loc, <NUM_LIT:3>, :yseq.shape[<NUM_LIT:1>]] = yseq<EOL>nidx = nidx.tolist() + nidy.tolist()<EOL>sidx = seqs[nidx].view(np.uint8)<EOL>iseq = _reffreq2(ancestral, sidx, GETCONS2)<EOL>arr[loc, <NUM_LIT:4>, :iseq.shape[<NUM_LIT:1>]] = iseq<EOL><DEDENT><DEDENT><DEDENT>arr = arr[keep, :, :]<EOL>arr = masknulls(arr)<EOL>return arr, keep<EOL>", "docstring": "return a frequency array from a loci file for all loci with taxa from \ntaxdict and min coverage from mindict.", "id": "f5345:m2"}
{"signature": "def draw(<EOL>self, <EOL>show_tip_labels=True, <EOL>show_node_support=False,<EOL>use_edge_lengths=False, <EOL>orient=\"<STR_LIT:right>\",<EOL>print_args=False,<EOL>*args,<EOL>**kwargs):", "body": "<EOL>self._decompose_tree(orient=orient, use_edge_lengths=use_edge_lengths)<EOL>dwargs = {}<EOL>dwargs[\"<STR_LIT>\"] = show_tip_labels<EOL>dwargs[\"<STR_LIT>\"] = show_node_support<EOL>dwargs.update(kwargs)<EOL>canvas, axes, panel = tree_panel_plot(self, print_args, **dwargs)<EOL>return canvas, axes, panel<EOL>", "docstring": "plot the tree using toyplot.graph. \n\nParameters:\n-----------\n    show_tip_labels: bool\n        Show tip names from tree.\n    use_edge_lengths: bool\n        Use edge lengths from newick tree.\n    show_node_support: bool\n        Show support values at nodes using a set of default \n        options. \n\n    ...", "id": "f5346:c0:m6"}
{"signature": "def refresh(self):", "body": "<EOL>oldfiles = [self.files.qdump] +self.database.__dict__.values() +self.trees.__dict__.values()<EOL>for oldfile in oldfiles:<EOL><INDENT>if oldfile:<EOL><INDENT>if os.path.exists(oldfile):<EOL><INDENT>os.remove(oldfile)<EOL><DEDENT><DEDENT><DEDENT>oldcluster = copy.deepcopy(self._ipcluster)<EOL>self.__init__(<EOL>name=self.name, <EOL>data=self.files.data, <EOL>mapfile=self.files.mapfile,<EOL>workdir=self.dirs,<EOL>method=self.params.method,<EOL>guidetreefile=self.files.guidetreefile,<EOL>resolve=self._resolve, <EOL>nboots=self.params.nboots, <EOL>nquartets=self.params.nquartets, <EOL>initarr=True, <EOL>quiet=True,<EOL>cli=self.kwargs.get(\"<STR_LIT>\")<EOL>)<EOL>self._ipcluster = oldcluster<EOL>", "docstring": "Remove all existing results files and reinit the h5 arrays \nso that the tetrad object is just like fresh from a CLI start.", "id": "f5347:c0:m1"}
{"signature": "def random_combination(iterable, nquartets):", "body": "pool = tuple(iterable)<EOL>size = len(pool)<EOL>indices = random.sample(xrange(size), nquartets)<EOL>return tuple(pool[i] for i in indices)<EOL>", "docstring": "Random selection from itertools.combinations(iterable, r). \nUse this if not sampling all possible quartets.", "id": "f5347:m2"}
{"signature": "def random_product(iter1, iter2):", "body": "pool1 = tuple(iter1)<EOL>pool2 = tuple(iter2)<EOL>ind1 = random.sample(pool1, <NUM_LIT:2>)<EOL>ind2 = random.sample(pool2, <NUM_LIT:2>)<EOL>return tuple(ind1+ind2)<EOL>", "docstring": "random sampler for equal_splits func", "id": "f5347:m3"}
{"signature": "def consensus_tree(trees, names=None, cutoff=<NUM_LIT:0.0>):", "body": "<EOL>namedict, clade_counts = _find_clades(trees, names=names)<EOL>fclade_counts = _filter_clades(clade_counts, cutoff)<EOL>consens_tree, _ = _build_trees(fclade_counts, namedict)<EOL>return consens_tree, clade_counts<EOL>", "docstring": "An extended majority rule consensus function for ete3. \nModelled on the similar function from scikit-bio tree module. If \ncutoff=0.5 then it is a normal majority rule consensus, while if \ncutoff=0.0 then subsequent non-conflicting clades are added to the tree.", "id": "f5347:m17"}
{"signature": "def _renamer(self, tre):", "body": "<EOL>names = tre.get_leaves()<EOL>for name in names:<EOL><INDENT>name.name = self.samples[int(name.name)]<EOL><DEDENT>return tre.write(format=<NUM_LIT:9>)<EOL>", "docstring": "renames newick from numbers to sample names", "id": "f5347:c0:m10"}
{"signature": "def _inference(self, start, lbview, quiet=False):", "body": "<EOL>gen = xrange(self.checkpoint.arr, self.params.nquartets, self._chunksize)<EOL>njobs = sum(<NUM_LIT:1> for _ in gen)<EOL>jobiter = iter(gen)<EOL>LOGGER.info(\"<STR_LIT>\",self._chunksize, self.checkpoint.arr, self.params.nquartets, njobs)<EOL>key = \"<STR_LIT>\".format(self.checkpoint.boots)<EOL>with h5py.File(self.database.output, '<STR_LIT>') as out:<EOL><INDENT>if key not in out[\"<STR_LIT>\"].keys():<EOL><INDENT>out[\"<STR_LIT>\"].create_dataset(key, <EOL>(self.params.nquartets, <NUM_LIT:4>), <EOL>dtype=np.uint32, <EOL>chunks=(self._chunksize, <NUM_LIT:4>))<EOL><DEDENT><DEDENT>elapsed = datetime.timedelta(seconds=int(time.time()-start))<EOL>if not self.checkpoint.boots:<EOL><INDENT>printstr = \"<STR_LIT>\"<EOL>if not quiet:<EOL><INDENT>progressbar(<NUM_LIT:1>, <NUM_LIT:0>, printstr.format(elapsed), spacer=\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>printstr = \"<STR_LIT>\"<EOL>if not quiet:<EOL><INDENT>progressbar(self.params.nboots, self.checkpoint.boots, <EOL>printstr.format(self.checkpoint.boots, elapsed), spacer=\"<STR_LIT>\")<EOL><DEDENT><DEDENT>res = {}<EOL>for _ in xrange(njobs):<EOL><INDENT>qidx = jobiter.next()<EOL>LOGGER.info('<STR_LIT>', qidx)<EOL>with h5py.File(self.database.input, '<STR_LIT:r>') as inh5:<EOL><INDENT>smps = inh5[\"<STR_LIT>\"][qidx:qidx+self._chunksize]<EOL>res[qidx] = lbview.apply(nworker, *[self, smps, TESTS])<EOL><DEDENT><DEDENT>done = <NUM_LIT:0><EOL>while <NUM_LIT:1>:<EOL><INDENT>curkeys = res.keys()<EOL>finished = [i.ready() for i in res.values()]<EOL>if any(finished):<EOL><INDENT>for ikey in curkeys:<EOL><INDENT>if res[ikey].ready():<EOL><INDENT>if res[ikey].successful():<EOL><INDENT>done += <NUM_LIT:1><EOL>results = res[ikey].get(<NUM_LIT:0>)<EOL>LOGGER.info(\"<STR_LIT:%s>\", results[<NUM_LIT:1>])<EOL>self._insert_to_array(ikey, results) <EOL>del res[ikey]<EOL><DEDENT>else:<EOL><INDENT>raise IPyradWarningExit(\"\"\"<STR_LIT>\"\"\".format(res[ikey].exception()))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>qidx = jobiter.next()<EOL>with h5py.File(self.database.input, '<STR_LIT:r>') as inh5:<EOL><INDENT>smps = inh5[\"<STR_LIT>\"][qidx:qidx+self._chunksize]<EOL><DEDENT>res[qidx] = lbview.apply(nworker, *[self, smps, TESTS])<EOL><DEDENT>except StopIteration:<EOL><INDENT>continue<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>time.sleep(<NUM_LIT>)<EOL><DEDENT>elapsed = datetime.timedelta(seconds=int(time.time()-start))<EOL>if not self.checkpoint.boots:<EOL><INDENT>if not quiet:<EOL><INDENT>progressbar(njobs, done, printstr.format(elapsed), spacer=\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if not quiet:<EOL><INDENT>progressbar(self.params.nboots, self.checkpoint.boots, <EOL>printstr.format(self.checkpoint.boots, elapsed), <EOL>spacer=\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if njobs == done:<EOL><INDENT>break<EOL><DEDENT><DEDENT>self._dump_qmc()<EOL>if not self.checkpoint.boots:<EOL><INDENT>self._run_qmc(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>self._run_qmc(<NUM_LIT:1>)            <EOL><DEDENT>self.checkpoint.arr = <NUM_LIT:0><EOL>", "docstring": "Inference sends slices of jobs to the parallel engines for computing\nand collects the results into the output hdf5 array as they finish.", "id": "f5347:c0:m17"}
{"signature": "@numba.jit('<STR_LIT>', nopython=True)<EOL>def subsample_snps_map(seqchunk, nmask, maparr):", "body": "<EOL>rmask = np.zeros(seqchunk.shape[<NUM_LIT:1>], dtype=np.bool_)<EOL>last_loc = -<NUM_LIT:1><EOL>for idx in xrange(maparr.shape[<NUM_LIT:0>]):<EOL><INDENT>if maparr[idx] != last_loc:<EOL><INDENT>if not nmask[idx]:<EOL><INDENT>rmask[idx] = True<EOL><DEDENT>last_loc = maparr[idx]<EOL><DEDENT><DEDENT>return rmask<EOL>", "docstring": "removes ncolumns from snparray prior to matrix calculation, and \nsubsamples 'linked' snps (those from the same RAD locus) such that\nfor these four samples only 1 SNP per locus is kept. This information\ncomes from the 'map' array (map file).", "id": "f5347:m6"}
{"signature": "def _dump_qmc(self):", "body": "<EOL>io5 = h5py.File(self.database.output, '<STR_LIT:r>')<EOL>self.files.qdump = os.path.join(self.dirs, self.name+\"<STR_LIT>\")<EOL>LOGGER.info(\"<STR_LIT>\", self.files.qdump)<EOL>outfile = open(self.files.qdump, '<STR_LIT:w>')<EOL>for idx in xrange(<NUM_LIT:0>, self.params.nquartets, self._chunksize):<EOL><INDENT>masked_quartets = io5[\"<STR_LIT>\"][idx:idx+self._chunksize, :]<EOL>quarts = [list(j) for j in masked_quartets]<EOL>chunk = [\"<STR_LIT>\".format(*i) for i in quarts]<EOL>outfile.write(\"<STR_LIT:\\n>\".join(chunk)+\"<STR_LIT:\\n>\")<EOL><DEDENT>outfile.close()<EOL>io5.close()<EOL>", "docstring": "Makes a reduced array that excludes quartets with no information and \nprints the quartets and weights to a file formatted for wQMC", "id": "f5347:c0:m9"}
{"signature": "@numba.jit(nopython=True)<EOL>def fill_boot(seqarr, newboot, newmap, spans, loci):", "body": "<EOL>cidx = <NUM_LIT:0><EOL>for i in xrange(loci.shape[<NUM_LIT:0>]):<EOL><INDENT>x1 = spans[loci[i]][<NUM_LIT:0>]<EOL>x2 = spans[loci[i]][<NUM_LIT:1>]<EOL>cols = seqarr[:, x1:x2]<EOL>cord = np.random.choice(cols.shape[<NUM_LIT:1>], cols.shape[<NUM_LIT:1>], replace=False)<EOL>rcols = cols[:, cord]<EOL>newboot[:, cidx:cidx+cols.shape[<NUM_LIT:1>]] = rcols<EOL>newmap[cidx: cidx+cols.shape[<NUM_LIT:1>], <NUM_LIT:0>] = i+<NUM_LIT:1><EOL>cidx += cols.shape[<NUM_LIT:1>]<EOL><DEDENT>return newboot, newmap<EOL>", "docstring": "fills the new bootstrap resampled array", "id": "f5347:m15"}
{"signature": "def run(self, force=<NUM_LIT:0>, verbose=<NUM_LIT:2>, ipyclient=None):", "body": "<EOL>if force:<EOL><INDENT>self.refresh()<EOL><DEDENT>inst = None<EOL>try:<EOL><INDENT>if not ipyclient:<EOL><INDENT>args = self._ipcluster.items() + [(\"<STR_LIT>\", \"<STR_LIT>\")] <EOL>ipyclient = ip.core.parallel.get_client(**dict(args)) <EOL><DEDENT>if verbose == <NUM_LIT:2>:<EOL><INDENT>ip.cluster_info(ipyclient)<EOL><DEDENT>targets = get_targets(ipyclient)<EOL>lbview = ipyclient.load_balanced_view(targets=targets)<EOL>self._ipcluster[\"<STR_LIT>\"] = ipyclient[:].apply(os.getpid).get_dict()<EOL>if not self._chunksize:<EOL><INDENT>if self.params.method != '<STR_LIT>':<EOL><INDENT>self._store_N_samples(ncpus=len(lbview))<EOL><DEDENT>else:<EOL><INDENT>self._store_equal_samples(ncpus=len(lbview))<EOL><DEDENT><DEDENT>start = time.time()            <EOL>if not self.trees.tree:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\".format(self.params.nquartets))<EOL><DEDENT>self._inference(start, lbview, quiet=verbose == <NUM_LIT:0>)<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>start = time.time()<EOL>if self.params.nboots:<EOL><INDENT>if self.checkpoint.boots == self.params.nboots:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\".format(self.params.nboots))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>while self.checkpoint.boots < self.params.nboots:<EOL><INDENT>if self.files.mapfile:<EOL><INDENT>self._sample_bootseq_array_map()<EOL><DEDENT>else:<EOL><INDENT>self._sample_bootseq_array() <EOL><DEDENT>self.checkpoint.boots += <NUM_LIT:1><EOL>self._inference(start, lbview, quiet=verbose == <NUM_LIT:0>)<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>self.files.stats = os.path.join(self.dirs, self.name+\"<STR_LIT>\")<EOL>if not self.kwargs.get(\"<STR_LIT>\"):<EOL><INDENT>self._compute_tree_stats(ipyclient)<EOL><DEDENT>else:<EOL><INDENT>self._finalize_stats(ipyclient)<EOL><DEDENT><DEDENT>except KeyboardInterrupt as inst:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>except IPyradWarningExit as inst:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\", inst)<EOL>print(inst)<EOL><DEDENT>except Exception as inst:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\", inst)<EOL>print(\"<STR_LIT>\".format(inst))<EOL><DEDENT>finally:<EOL><INDENT>try:<EOL><INDENT>self._save()                <EOL>if ipyclient:<EOL><INDENT>ipyclient.abort()<EOL>LOGGER.info(\"<STR_LIT>\", self._ipcluster[\"<STR_LIT>\"])<EOL>for engine_id, pid in self._ipcluster[\"<STR_LIT>\"].items():<EOL><INDENT>LOGGER.info(\"<STR_LIT>\", engine_id)<EOL>LOGGER.info(\"<STR_LIT>\", pid)<EOL>LOGGER.info(\"<STR_LIT>\", ipyclient.queue_status()[engine_id][\"<STR_LIT>\"])<EOL>if ipyclient.queue_status()[engine_id][\"<STR_LIT>\"]:<EOL><INDENT>LOGGER.info('<STR_LIT>'.format(engine_id, pid))<EOL>os.kill(pid, <NUM_LIT:2>)<EOL><DEDENT><DEDENT>time.sleep(<NUM_LIT:1>)<EOL>if '<STR_LIT>' in self._ipcluster[\"<STR_LIT>\"]:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL>ipyclient.shutdown(hub=True, block=False)<EOL>ipyclient.close()<EOL>LOGGER.info(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>if not ipyclient.outstanding:<EOL><INDENT>ipyclient.purge_everything()<EOL><DEDENT>else:<EOL><INDENT>ipyclient.shutdown(hub=True, block=False)<EOL>ipyclient.close()<EOL>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>if inst:<EOL><INDENT>raise inst<EOL><DEDENT><DEDENT>except Exception as inst2:<EOL><INDENT>print(\"<STR_LIT>\".format(inst2))<EOL>LOGGER.error(\"<STR_LIT>\", inst2)<EOL><DEDENT><DEDENT>", "docstring": "Run quartet inference on a SNP alignment and distribute work\nacross an ipyparallel cluster (ipyclient). Unless passed an \nipyclient explicitly, it looks for a running ipcluster instance\nrunning from the defautl (\"\") profile, and will raise an exception\nif one is not found within a set time limit. If not using the default\nprofile then you can set \"profile\" as an argument to the tetrad object. \nParameter settings influencing the run (e.g., nquartets, method) should\nbe set on the tetrad Class object itself. \n\nParameters\n----------\nforce (bool):\n    Overwrite results for an object with this name if they exist.\nverbose (int):\n    0=print nothing, 1=print progress bars, 2=print progress bars and\n    print cluster info. \nipyclient (ipyparallel.Client object):\n    Default is None (use running Default ipcluster instance). To use\n    a different ipcluster instance start a Client class object \n    and pass it in as an argument here.", "id": "f5347:c0:m16"}
{"signature": "def _byteify(data, ignore_dicts=False):", "body": "if isinstance(data, unicode):<EOL><INDENT>return data.encode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>if isinstance(data, list):<EOL><INDENT>return [_byteify(item, ignore_dicts=True) for item in data]<EOL><DEDENT>if isinstance(data, dict) and not ignore_dicts:<EOL><INDENT>return {<EOL>_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)<EOL>for key, value in data.iteritems()<EOL>}<EOL><DEDENT>return data<EOL>", "docstring": "converts unicode to utf-8 when reading in json files", "id": "f5347:m16"}
{"signature": "def get_targets(ipyclient):", "body": "<EOL>hosts = []<EOL>for eid in ipyclient.ids:<EOL><INDENT>engine = ipyclient[eid]<EOL>if not engine.outstanding:<EOL><INDENT>hosts.append(engine.apply(socket.gethostname))<EOL><DEDENT><DEDENT>hosts = [i.get() for i in hosts]<EOL>hostset = set(hosts)<EOL>hostzip = zip(hosts, ipyclient.ids)<EOL>hostdict = {host: [i[<NUM_LIT:1>] for i in hostzip if i[<NUM_LIT:0>] == host] for host in hostset}<EOL>targets = list(itertools.chain(*[hostdict[i][:<NUM_LIT:2>] for i in hostdict]))<EOL>return targets<EOL>", "docstring": "A function to find 2 engines per hostname on the ipyclient.\nWe'll assume that the CPUs are hyperthreaded, which is why\nwe grab two. If they are not then no foul. Two multi-threaded\njobs will be run on each of the 2 engines per host.", "id": "f5347:m0"}
{"signature": "def _get_total(tots, node):", "body": "if (node.is_leaf() or node.is_root()):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>if len(node.children) > <NUM_LIT:2>:<EOL><INDENT>down_r = node.children[<NUM_LIT:0>]<EOL>down_l = node.children[<NUM_LIT:1>]<EOL>for child in node.children[<NUM_LIT:2>:]:<EOL><INDENT>down_l += child<EOL><DEDENT><DEDENT>else:<EOL><INDENT>down_r, down_l = node.children<EOL><DEDENT>lendr = sum(<NUM_LIT:1> for i in down_r.iter_leaves())<EOL>lendl = sum(<NUM_LIT:1> for i in down_l.iter_leaves())<EOL>up_r = node.get_sisters()[<NUM_LIT:0>]<EOL>lenur = sum(<NUM_LIT:1> for i in up_r.iter_leaves())<EOL>lenul = tots - (lendr + lendl + lenur)<EOL>return lendr * lendl * lenur * lenul<EOL><DEDENT>", "docstring": "get total number of quartets possible for a split", "id": "f5347:m21"}
{"signature": "def n_choose_k(n, k):", "body": "return int(reduce(MUL, (Fraction(n-i, i+<NUM_LIT:1>) for i in range(k)), <NUM_LIT:1>))<EOL>", "docstring": "get the number of quartets as n-choose-k. This is used\n    in equal splits to decide whether a split should be exhaustively sampled\n    or randomly sampled. Edges near tips can be exhaustive while highly nested\n    edges probably have too many quartets", "id": "f5347:m4"}
{"signature": "def _compute_tree_stats(self, ipyclient):", "body": "compute_tree_stats(self, ipyclient)<EOL>", "docstring": "writes support values as edge labels on unrooted tree", "id": "f5347:c0:m12"}
{"signature": "def compute_tree_stats(self, ipyclient):", "body": "<EOL>names = self.samples<EOL>if self.params.nboots:<EOL><INDENT>fulltre = ete3.Tree(self.trees.tree, format=<NUM_LIT:0>)<EOL>fulltre.unroot()<EOL>with open(self.trees.boots, '<STR_LIT:r>') as inboots:<EOL><INDENT>bb = [ete3.Tree(i.strip(), format=<NUM_LIT:0>) for i in inboots.readlines()]<EOL>wboots = [fulltre] + bb[-self.params.nboots:]<EOL><DEDENT>wctre, wcounts = consensus_tree(wboots, names=names)<EOL>self.trees.cons = os.path.join(self.dirs, self.name + \"<STR_LIT>\")<EOL>with open(self.trees.cons, '<STR_LIT:w>') as ocons:<EOL><INDENT>ocons.write(wctre.write(format=<NUM_LIT:0>))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>wctre = ete3.Tree(self.trees.tree, format=<NUM_LIT:0>)<EOL>wctre.unroot()<EOL><DEDENT>self.trees.nhx = os.path.join(self.dirs, self.name + \"<STR_LIT>\")<EOL>with open(self.files.stats, '<STR_LIT:w>') as ostats:<EOL><INDENT>if self.params.nboots:<EOL><INDENT>ostats.write(\"<STR_LIT>\".format(len(wboots)))<EOL>for i, j in enumerate(self.samples):<EOL><INDENT>ostats.write(\"<STR_LIT>\".format(i, j))<EOL><DEDENT>ostats.write(\"<STR_LIT:\\n>\")<EOL>for split, freq in wcounts:<EOL><INDENT>if split.count('<STR_LIT:1>') > <NUM_LIT:1>:<EOL><INDENT>ostats.write(\"<STR_LIT>\".format(split, round(freq, <NUM_LIT:2>)))<EOL><DEDENT><DEDENT>ostats.write(\"<STR_LIT:\\n>\")<EOL><DEDENT><DEDENT>lbview = ipyclient.load_balanced_view()<EOL>qtots = {}<EOL>qsamp = {}<EOL>tots = sum(<NUM_LIT:1> for i in wctre.iter_leaves())<EOL>totn = set(wctre.get_leaf_names())<EOL>for node in wctre.traverse():<EOL><INDENT>qtots[node] = lbview.apply(_get_total, *(tots, node))<EOL>qsamp[node] = lbview.apply(_get_sampled, *(self, totn, node))<EOL><DEDENT>ipyclient.wait()<EOL>for node in wctre.traverse():<EOL><INDENT>total = qtots[node].result()<EOL>sampled = qsamp[node].result()<EOL>node.add_feature(\"<STR_LIT>\", total)<EOL>node.add_feature(\"<STR_LIT>\", sampled)<EOL><DEDENT>features = [\"<STR_LIT>\", \"<STR_LIT>\"]<EOL>with open(self.trees.nhx, '<STR_LIT:w>') as outtre:<EOL><INDENT>outtre.write(wctre.write(format=<NUM_LIT:0>, features=features))<EOL><DEDENT>", "docstring": "compute stats for stats file and NHX tree features", "id": "f5347:m1"}
{"signature": "def opr(path):", "body": "return os.path.realpath(path)<EOL>", "docstring": "shorthand for realpath", "id": "f5347:m10"}
{"signature": "def _finalize_stats(self, ipyclient):", "body": "<EOL>print(FINALTREES.format(opr(self.trees.tree)))<EOL>if self.params.nboots:<EOL><INDENT>self._compute_tree_stats(ipyclient)<EOL>print(BOOTTREES.format(opr(self.trees.cons), opr(self.trees.boots))) <EOL><DEDENT>if len(self.samples) < <NUM_LIT:20>:<EOL><INDENT>if self.params.nboots:<EOL><INDENT>wctre = ete3.Tree(self.trees.cons, format=<NUM_LIT:0>)<EOL>wctre.ladderize()<EOL>print(wctre.get_ascii(show_internal=True, <EOL>attributes=[\"<STR_LIT>\", \"<STR_LIT:name>\"]))<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>qtre = ete3.Tree(self.trees.tree, format=<NUM_LIT:0>)<EOL>qtre.ladderize()<EOL>print(qtre.get_ascii())<EOL>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>docslink = \"<STR_LIT>\"    <EOL>citelink = \"<STR_LIT>\"<EOL>print(LINKS.format(docslink, citelink))<EOL>", "docstring": "write final tree files", "id": "f5347:c0:m11"}
{"signature": "def _parse_names(self):", "body": "self.samples = []<EOL>with iter(open(self.files.data, '<STR_LIT:r>')) as infile:<EOL><INDENT>infile.next().strip().split()<EOL>while <NUM_LIT:1>:<EOL><INDENT>try:<EOL><INDENT>self.samples.append(infile.next().split()[<NUM_LIT:0>])<EOL><DEDENT>except StopIteration:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "parse sample names from the sequence file", "id": "f5347:c0:m2"}
{"signature": "def _insert_to_array(self, start, results):", "body": "qrts, wgts, qsts = results<EOL>with h5py.File(self.database.output, '<STR_LIT>') as out:<EOL><INDENT>chunk = self._chunksize<EOL>out['<STR_LIT>'][start:start+chunk] = qrts<EOL>if self.checkpoint.boots:<EOL><INDENT>key = \"<STR_LIT>\".format(self.checkpoint.boots-<NUM_LIT:1>)<EOL>out[key][start:start+chunk] = qsts<EOL><DEDENT>else:<EOL><INDENT>out[\"<STR_LIT>\"][start:start+chunk] = qsts<EOL><DEDENT><DEDENT>", "docstring": "inputs results from workers into hdf4 array", "id": "f5347:c0:m14"}
{"signature": "def copy(self):", "body": "cp = copy.deepcopy(self)<EOL>cp.genotypes = allel.GenotypeArray(self.genotypes, copy=True)<EOL>return cp<EOL>", "docstring": "returns a copy of the pca analysis object", "id": "f5349:c0:m11"}
{"signature": "def __init__(self, <EOL>data=None, <EOL>pops=None,<EOL>ncomps=<NUM_LIT:10>,<EOL>quiet=True):", "body": "self.quiet = quiet<EOL>self.ncomponents = ncomps<EOL>if isinstance(data, Assembly):<EOL><INDENT>self.assembly = data<EOL>self.pops = data.populations<EOL>try:<EOL><INDENT>self.data = data.outfiles.vcf<EOL><DEDENT>except AttributeError as inst:<EOL><INDENT>raise IPyradError(MISSING_VCF_ERROR)  <EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.assembly = Assembly(\"<STR_LIT>\", quiet=True)<EOL>self.data = os.path.realpath(data)<EOL>self.pops = {}<EOL><DEDENT>if pops:<EOL><INDENT>if isinstance(pops, dict):<EOL><INDENT>self.pops = {x:(<NUM_LIT:0>, y) for x, y in pops.items()}<EOL><DEDENT>else:<EOL><INDENT>if not os.path.isfile(pops):<EOL><INDENT>raise IPyradError(\"<STR_LIT>\".format(pops))<EOL><DEDENT>mindat = [i.lstrip(\"<STR_LIT:#>\").lstrip().rstrip() for i inopen(pops, '<STR_LIT:r>').readlines() if i.startswith(\"<STR_LIT:#>\")]<EOL>if not mindat:<EOL><INDENT>lines = open(pops, '<STR_LIT:r>').readlines()<EOL>p = set([x.split()[<NUM_LIT:1>].strip() for x in lines])<EOL>with open(pops, '<STR_LIT:a>') as outfile:<EOL><INDENT>outfile.write(\"<STR_LIT>\" + \"<STR_LIT:U+0020>\".join([\"<STR_LIT>\".format(x) for x in p]))<EOL><DEDENT><DEDENT>self.assembly.paramsdict[\"<STR_LIT>\"] = os.path.realpath(pops)<EOL>self.assembly._link_populations()<EOL>self.pops = self.assembly.populations<EOL><DEDENT><DEDENT>tmpdict = {}<EOL>for samp in self.pops:<EOL><INDENT>tmpdict[samp] = self.pops[samp][<NUM_LIT:1>]<EOL><DEDENT>self.pops = tmpdict<EOL>self._load_calldata()<EOL>if not self.pops:<EOL><INDENT>self.pops = {\"<STR_LIT>\":self.samples_vcforder}<EOL><DEDENT>if not self.quiet:<EOL><INDENT>print(\"<STR_LIT>\".format(self.pops))<EOL><DEDENT>if not self.pops:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "ipyrad.analysis Baba Class object.\n\nParameters\n----------\ndata : Assembly object or path to file\n    Either an ipyrad assembly or a  string path to a .vcf file. If\n    it's a string path then you'll probably want to specify pops as\n    well or else all your dots will be the same color.\n\npops : dict or path to file\n    A dictionary specifying the population assignment of each\n    sample. This is optional, since by default if you used a pops\n    file during your assembly the assembly object will include\n    the pops info internally.\nncomps : int\n    The number of PCs to calculate. Probably most people won't care\n    to mess with this, but it's simple enough to make it flexible. \n\nFunctions\n---------\nrun()\n    ...\nplot()\n    ...", "id": "f5349:c0:m0"}
{"signature": "def main():", "body": "<EOL>args = parse_command_line()<EOL>print(HEADER.format(ip.__version__))<EOL>np.random.seed(args.rseed)<EOL>if os.path.exists(ip.__debugflag__):<EOL><INDENT>os.remove(ip.__debugflag__)<EOL><DEDENT>if args.debug:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>ip._debug_on()<EOL><DEDENT>if args.json:<EOL><INDENT>data = ipa.tetrad(name=args.name, workdir=args.workdir, load=True)<EOL>if args.force:<EOL><INDENT>data._refresh()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>newjson = os.path.join(args.workdir, args.name+'<STR_LIT>')<EOL>print(\"<STR_LIT>\".format(args.name))<EOL>if (not os.path.exists(newjson)) or args.force:<EOL><INDENT>if args.force:<EOL><INDENT>ipa.tetrad(name=args.name, <EOL>workdir=args.workdir, <EOL>data=args.seq, <EOL>initarr=False, <EOL>save_invariants=args.invariants,<EOL>cli=True,<EOL>quiet=True)._refresh()<EOL><DEDENT>data = ipa.tetrad(name=args.name, <EOL>workdir=args.workdir, <EOL>method=args.method, <EOL>data=args.seq, <EOL>resolve=args.resolve,<EOL>mapfile=args.map, <EOL>guidetree=args.tree, <EOL>nboots=args.boots, <EOL>nquartets=args.nquartets, <EOL>cli=True,<EOL>save_invariants=args.invariants,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>raise SystemExit(QUARTET_EXISTS.format(args.name, args.workdir, args.workdir, args.name, args.name))<EOL><DEDENT><DEDENT>if args.boots:<EOL><INDENT>data.params.nboots = int(args.boots)<EOL><DEDENT>if args.ipcluster:<EOL><INDENT>ipyclient = ipp.Client(profile=args.ipcluster)<EOL>data._ipcluster[\"<STR_LIT>\"] = len(ipyclient)<EOL><DEDENT>else:<EOL><INDENT>ipyclient = None<EOL>data._ipcluster[\"<STR_LIT>\"] = args.cores if args.cores else detect_cpus()<EOL>data._ipcluster[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>if args.MPI:<EOL><INDENT>data._ipcluster[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>if not args.cores:<EOL><INDENT>raise IPyradWarningExit(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>data = register_ipcluster(data)<EOL><DEDENT>if data.checkpoint.boots:<EOL><INDENT>print(LOADING_MESSAGE.format(<EOL>data.name, data.params.method, data.checkpoint.boots))<EOL><DEDENT>data.run(force=args.force, ipyclient=ipyclient)<EOL>", "docstring": "main function", "id": "f5350:m1"}
{"signature": "def requires():", "body": "with open('<STR_LIT>') as infile:<EOL><INDENT>return infile.read().splitlines()<EOL><DEDENT>", "docstring": "gets packages from requirements.txt", "id": "f5352:m0"}
{"signature": "def query_yes_no(question, default='<STR_LIT:yes>'):", "body": "valid = {'<STR_LIT:yes>': True, '<STR_LIT:y>': True, '<STR_LIT>': True,<EOL>'<STR_LIT>': False, '<STR_LIT:n>': False}<EOL>if default is None:<EOL><INDENT>prompt = '<STR_LIT>'<EOL><DEDENT>elif default == '<STR_LIT:yes>':<EOL><INDENT>prompt = '<STR_LIT>'<EOL><DEDENT>elif default == '<STR_LIT>':<EOL><INDENT>prompt = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % default)<EOL><DEDENT>while True:<EOL><INDENT>sys.stderr.write(question + prompt)<EOL>choice = raw_input().strip().lower()<EOL>if default is not None and choice == '<STR_LIT>':<EOL><INDENT>return valid[default]<EOL><DEDENT>elif choice in valid:<EOL><INDENT>return valid[choice]<EOL><DEDENT>else:<EOL><INDENT>sys.stderr.write('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Ask a yes/no question via raw_input() and return their answer.\n\n    'question' is a string that is presented to the user.\n    'default' is the presumed answer if the user just hits <Enter>.\n        It must be 'yes' (the default), 'no' or None (meaning\n        an answer is required of the user).\n\n    The 'answer' return value is one of 'yes' or 'no'.", "id": "f5356:m0"}
{"signature": "def sequence(self, line_data, child_type=None, reference=None):", "body": "<EOL>reference = reference or self.fasta_external or self.fasta_embedded<EOL>if not reference:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>line_index = line_data['<STR_LIT>']<EOL><DEDENT>except TypeError:<EOL><INDENT>line_index = self.lines[line_data]['<STR_LIT>']<EOL><DEDENT>ld = self.lines[line_index]<EOL>if ld['<STR_LIT:type>'] != '<STR_LIT>':<EOL><INDENT>return None<EOL><DEDENT>seq = reference[ld['<STR_LIT>']][ld['<STR_LIT:start>']-<NUM_LIT:1>:ld['<STR_LIT:end>']]<EOL>if ld['<STR_LIT>'] == '<STR_LIT:->':<EOL><INDENT>seq = complement(seq[::-<NUM_LIT:1>])<EOL><DEDENT>return seq<EOL>", "docstring": "Get the sequence of line_data, according to the columns 'seqid', 'start', 'end', 'strand'.\nRequires fasta reference.\nWhen used on 'mRNA' type line_data, child_type can be used to specify which kind of sequence to return:\n* child_type=None:  pre-mRNA, returns the sequence of line_data from start to end, reverse complement according to strand. (default)\n* child_type='exon':  mature mRNA, concatenates the sequences of children type 'exon'.\n* child_type='CDS':  coding sequence, concatenates the sequences of children type 'CDS'. Use the helper\n                     function translate(seq) on the returned value to obtain the protein sequence.\n\n:param line_data: line_data(dict) with line_data['line_index'] or line_index(int)\n:param child_type: None or feature type(string)\n:param reference: If None, will use self.fasta_external or self.fasta_embedded(dict)\n:return: sequence(string)", "id": "f5359:c0:m16"}
{"signature": "def remove(self, line_data, root_type=None):", "body": "roots = [ld for ld in self.ancestors(line_data) if (root_type and ld['<STR_LIT>'] == root_type) or (not root_type and not ld['<STR_LIT>'])] or [line_data]<EOL>for root in roots:<EOL><INDENT>root['<STR_LIT>'] = '<STR_LIT>'<EOL>root_descendants = self.descendants(root)<EOL>for root_descendant in root_descendants:<EOL><INDENT>root_descendant['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>root_ancestors = self.ancestors(root) <EOL>for root_ancestor in root_ancestors:<EOL><INDENT>if len([ld for ld in root_ancestor['<STR_LIT>'] if ld['<STR_LIT>'] != '<STR_LIT>']) == <NUM_LIT:0>: <EOL><INDENT>root_ancestor['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Marks line_data and all of its associated feature's 'line_status' as 'removed', does not actually remove the line_data from the data structure.\nThe write function checks the 'line_status' when writing the gff file.\nFind the root parent of line_data of type root_type, remove all of its descendants.\nIf the root parent has a parent with no children after the remove, remove the root parent's parent recursively.\n\n:param line_data:\n:param root_type:\n:return:", "id": "f5359:c0:m13"}
{"signature": "def check_parent_boundary(self):", "body": "for line in self.lines:<EOL><INDENT>for parent_feature in line['<STR_LIT>']:<EOL><INDENT>ok = False<EOL>for parent_line in parent_feature:<EOL><INDENT>if parent_line['<STR_LIT:start>'] <= line['<STR_LIT:start>'] and line['<STR_LIT:end>'] <= parent_line['<STR_LIT:end>']:<EOL><INDENT>ok = True<EOL>break<EOL><DEDENT><DEDENT>if not ok:<EOL><INDENT>self.add_line_error(line, {'<STR_LIT:message>': '<STR_LIT>'.format(<EOL>parent_feature[<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>'],<EOL>'<STR_LIT:U+002C>'.join(['<STR_LIT>'.format(line['<STR_LIT>'], line['<STR_LIT:start>'], line['<STR_LIT:end>']) for line in parent_feature])<EOL>), '<STR_LIT>': '<STR_LIT>', '<STR_LIT:location>': '<STR_LIT>'})<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "checks whether child features are within the coordinate boundaries of parent features\n\n:return:", "id": "f5359:c0:m3"}
{"signature": "def adopted(self, old_child, new_child):", "body": "pass<EOL>", "docstring": "Transfer parents from old_child to new_child\n\n:param old_child: line_data(dict) with line_data['line_index'] or line_index(int)\n:param new_child: line_data(dict) with line_data['line_index'] or line_index(int)\n:return: List of parents transferred", "id": "f5359:c0:m11"}
{"signature": "def check_reference(self, sequence_region=False, fasta_embedded=False, fasta_external=False, check_bounds=True, check_n=True, allowed_num_of_n=<NUM_LIT:0>, feature_types=('<STR_LIT>',)):", "body": "<EOL>error_lines = set()<EOL>if not self.lines:<EOL><INDENT>self.logger.debug('<STR_LIT>')<EOL>return error_lines<EOL><DEDENT>check_n_feature_types = set(feature_types)<EOL>if len(check_n_feature_types) == <NUM_LIT:0>:<EOL><INDENT>check_n_feature_types.add('<STR_LIT>')<EOL><DEDENT>n_segments_finditer = re.compile(r'<STR_LIT>').finditer<EOL>check_all_sources = True<EOL>if sequence_region or fasta_embedded or fasta_external:<EOL><INDENT>check_all_sources = False<EOL><DEDENT>start_end_error_locations = set(('<STR_LIT:start>', '<STR_LIT:end>', '<STR_LIT>'))<EOL>valid_line_data_seqid = [(line_data, unquote(line_data['<STR_LIT>'])) for line_data in self.lines if line_data['<STR_LIT>'] == '<STR_LIT>' and line_data['<STR_LIT>'] != '<STR_LIT:.>' and (not line_data['<STR_LIT>'] or not [error_info for error_info in line_data['<STR_LIT>'] if '<STR_LIT:location>' in error_info and error_info['<STR_LIT:location>'] in start_end_error_locations])]<EOL>checked_at_least_one_source = False<EOL>valid_sequence_regions = dict([(unquote(line_data['<STR_LIT>']), line_data) for line_data in self.lines if line_data['<STR_LIT>'] == '<STR_LIT>' and not line_data['<STR_LIT>']])<EOL>unresolved_seqid = set()<EOL>if (check_all_sources or sequence_region) and valid_sequence_regions:<EOL><INDENT>checked_at_least_one_source = True<EOL>for line_data, seqid in valid_line_data_seqid:<EOL><INDENT>if seqid not in valid_sequence_regions and seqid not in unresolved_seqid:<EOL><INDENT>unresolved_seqid.add(seqid)<EOL>error_lines.add(line_data['<STR_LIT>'])<EOL>self.add_line_error(line_data, {'<STR_LIT:message>': u'<STR_LIT>'.format(<EOL>seqid), '<STR_LIT>': '<STR_LIT>', '<STR_LIT:location>': '<STR_LIT>'})<EOL>continue<EOL><DEDENT>if line_data['<STR_LIT:start>'] < valid_sequence_regions[seqid]['<STR_LIT:start>']:<EOL><INDENT>error_lines.add(line_data['<STR_LIT>'])<EOL>self.add_line_error(line_data, {'<STR_LIT:message>': '<STR_LIT>' % valid_sequence_regions[seqid]['<STR_LIT:start>'], '<STR_LIT>': '<STR_LIT>', '<STR_LIT:location>': '<STR_LIT>'})<EOL><DEDENT>if line_data['<STR_LIT:end>'] > valid_sequence_regions[seqid]['<STR_LIT:end>']:<EOL><INDENT>error_lines.add(line_data['<STR_LIT>'])<EOL>self.add_line_error(line_data, {'<STR_LIT:message>': '<STR_LIT>' % valid_sequence_regions[seqid]['<STR_LIT:end>'], '<STR_LIT>': '<STR_LIT>', '<STR_LIT:location>': '<STR_LIT>'})<EOL><DEDENT><DEDENT><DEDENT>elif sequence_region:<EOL><INDENT>self.logger.debug('<STR_LIT>')<EOL><DEDENT>unresolved_seqid = set()<EOL>if (check_all_sources or fasta_embedded) and self.fasta_embedded:<EOL><INDENT>checked_at_least_one_source = True<EOL>for line_data, seqid in valid_line_data_seqid:<EOL><INDENT>if seqid not in self.fasta_embedded and seqid not in unresolved_seqid:<EOL><INDENT>unresolved_seqid.add(seqid)<EOL>error_lines.add(line_data['<STR_LIT>'])<EOL>self.add_line_error(line_data, {'<STR_LIT:message>': '<STR_LIT>' % seqid, '<STR_LIT>': '<STR_LIT>', '<STR_LIT:location>': '<STR_LIT>'})<EOL>continue<EOL><DEDENT>if line_data['<STR_LIT:end>'] > len(self.fasta_embedded[seqid]['<STR_LIT>']):<EOL><INDENT>error_lines.add(line_data['<STR_LIT>'])<EOL>self.add_line_error(line_data, {'<STR_LIT:message>': '<STR_LIT>' % len(self.fasta_embedded[seqid]['<STR_LIT>']), '<STR_LIT>': '<STR_LIT>', '<STR_LIT:location>': '<STR_LIT>'})<EOL><DEDENT>if check_n and line_data['<STR_LIT:type>'] in check_n_feature_types:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>n_count = self.fasta_embedded[seqid]['<STR_LIT>'].count('<STR_LIT:N>', line_data['<STR_LIT:start>'] - <NUM_LIT:1>, line_data['<STR_LIT:end>']) + self.fasta_embedded[seqid]['<STR_LIT>'].count('<STR_LIT:n>', line_data['<STR_LIT:start>'] - <NUM_LIT:1>, line_data['<STR_LIT:end>'])<EOL>if n_count > allowed_num_of_n:<EOL><INDENT>n_segments = [(m.start(), m.end() - m.start()) for m in n_segments_finditer(self.fasta_embedded[seqid]['<STR_LIT>'], line_data['<STR_LIT:start>'] - <NUM_LIT:1>, line_data['<STR_LIT:end>'])]<EOL>n_segments_str = ['<STR_LIT>' % (m[<NUM_LIT:0>], m[<NUM_LIT:1>]) for m in n_segments]<EOL>error_lines.add(line_data['<STR_LIT>'])<EOL>self.add_line_error(line_data, {'<STR_LIT:message>': '<STR_LIT>' % (n_count, line_data['<STR_LIT:type>'], line_data['<STR_LIT:end>'] - line_data['<STR_LIT:start>'], len(n_segments), '<STR_LIT:U+002CU+0020>'.join(n_segments_str)), '<STR_LIT>': '<STR_LIT>', '<STR_LIT>': n_segments, '<STR_LIT:location>': '<STR_LIT>'})<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif fasta_embedded:<EOL><INDENT>self.logger.debug('<STR_LIT>')<EOL><DEDENT>unresolved_seqid = set()<EOL>if (check_all_sources or fasta_external) and self.fasta_external:<EOL><INDENT>checked_at_least_one_source = True<EOL>for line_data, seqid in valid_line_data_seqid:<EOL><INDENT>if seqid not in self.fasta_external and seqid not in unresolved_seqid:<EOL><INDENT>unresolved_seqid.add(seqid)<EOL>error_lines.add(line_data['<STR_LIT>'])<EOL>self.add_line_error(line_data, {'<STR_LIT:message>': '<STR_LIT>' % seqid, '<STR_LIT>': '<STR_LIT>', '<STR_LIT:location>': '<STR_LIT>'})<EOL>continue<EOL><DEDENT>if line_data['<STR_LIT:end>'] > len(self.fasta_external[seqid]['<STR_LIT>']):<EOL><INDENT>error_lines.add(line_data['<STR_LIT>'])<EOL>self.add_line_error(line_data, {'<STR_LIT:message>': '<STR_LIT>' % len(self.fasta_external[seqid]['<STR_LIT>']), '<STR_LIT>': '<STR_LIT>', '<STR_LIT:location>': '<STR_LIT>'})<EOL><DEDENT>if check_n and line_data['<STR_LIT:type>'] in check_n_feature_types:<EOL><INDENT>n_count = self.fasta_external[seqid]['<STR_LIT>'].count('<STR_LIT:N>', line_data['<STR_LIT:start>'] - <NUM_LIT:1>, line_data['<STR_LIT:end>']) + self.fasta_external[seqid]['<STR_LIT>'].count('<STR_LIT:n>', line_data['<STR_LIT:start>'] - <NUM_LIT:1>, line_data['<STR_LIT:end>'])<EOL>if n_count > allowed_num_of_n:<EOL><INDENT>n_segments = [(m.start(), m.end() - m.start()) for m in n_segments_finditer(self.fasta_external[seqid]['<STR_LIT>'], line_data['<STR_LIT:start>'] - <NUM_LIT:1>, line_data['<STR_LIT:end>'])]<EOL>n_segments_str = ['<STR_LIT>' % (m[<NUM_LIT:0>], m[<NUM_LIT:1>]) for m in n_segments]<EOL>error_lines.add(line_data['<STR_LIT>'])<EOL>self.add_line_error(line_data, {'<STR_LIT:message>': '<STR_LIT>' % (n_count, line_data['<STR_LIT:type>'], line_data['<STR_LIT:end>'] - line_data['<STR_LIT:start>'], len(n_segments), '<STR_LIT:U+002CU+0020>'.join(n_segments_str)), '<STR_LIT>': '<STR_LIT>', '<STR_LIT>': n_segments, '<STR_LIT:location>': '<STR_LIT>'})<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif fasta_external:<EOL><INDENT>self.logger.debug('<STR_LIT>')<EOL><DEDENT>if check_all_sources and not checked_at_least_one_source:<EOL><INDENT>self.logger.debug('<STR_LIT>')<EOL><DEDENT>return error_lines<EOL>", "docstring": "Check seqid, bounds and the number of Ns in each feature using one or more reference sources.\n\nSeqid check: check if the seqid can be found in the reference sources.\n\nBounds check: check the start and end fields of each features and log error if the values aren't within the seqid sequence length, requires at least one of these sources: ##sequence-region, embedded #FASTA, or external FASTA file.\n\nNs check: count the number of Ns in each feature with the type specified in *line_types (default: 'CDS') and log an error if the number is greater than allowed_num_of_n (default: 0), requires at least one of these sources: embedded #FASTA, or external FASTA file.\n\nWhen called with all source parameters set as False (default), check all available sources, and log debug message if unable to perform a check due to none of the reference sources being available.\n\nIf any source parameter is set to True, check only those sources marked as True, log error if those sources don't exist.\n\n:param sequence_region: check bounds using the ##sequence-region directive (default: False)\n:param fasta_embedded: check bounds using the embedded fasta specified by the ##FASTA directive (default: False)\n:param fasta_external: check bounds using the external fasta given by the self.parse_fasta_external (default: False)\n:param check_bounds: If False, don't run the bounds check (default: True)\n:param check_n: If False, don't run the Ns check (default: True)\n:param allowed_num_of_n: only report features with a number of Ns greater than the specified value (default: 0)\n:param feature_types: only check features of these feature_types, multiple types may be specified, if none are specified, check only 'CDS'\n:return: error_lines: a set of line_index(int) with errors detected by check_reference", "id": "f5359:c0:m6"}
{"signature": "@might_need_auth<EOL>def fetch(args):", "body": "storage, remote_path = split_storage(args.remote)<EOL>local_path = args.local<EOL>if local_path is None:<EOL><INDENT>_, local_path = os.path.split(remote_path)<EOL><DEDENT>local_path_exists = os.path.exists(local_path)<EOL>if local_path_exists and not args.force and not args.update:<EOL><INDENT>sys.exit(\"<STR_LIT>\" % local_path)<EOL><DEDENT>directory, _ = os.path.split(local_path)<EOL>if directory:<EOL><INDENT>makedirs(directory, exist_ok=True)<EOL><DEDENT>osf = _setup_osf(args)<EOL>project = osf.project(args.project)<EOL>store = project.storage(storage)<EOL>for file_ in store.files:<EOL><INDENT>if norm_remote_path(file_.path) == remote_path:<EOL><INDENT>if local_path_exists and not args.force and args.update:<EOL><INDENT>if file_.hashes.get('<STR_LIT>') == checksum(local_path):<EOL><INDENT>print(\"<STR_LIT>\" % local_path)<EOL>break<EOL><DEDENT><DEDENT>with open(local_path, '<STR_LIT:wb>') as fp:<EOL><INDENT>file_.write_to(fp)<EOL><DEDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Fetch an individual file from a project.\n\n    The first part of the remote path is interpreted as the name of the\n    storage provider. If there is no match the default (osfstorage) is\n    used.\n\n    The local path defaults to the name of the remote file.\n\n    If the project is private you need to specify a username.\n\n    If args.force is True, write local file even if that file already exists.\n    If args.force is False but args.update is True, overwrite an existing local\n    file only if local and remote files differ.", "id": "f5378:m7"}
{"signature": "@might_need_auth<EOL>def remove(args):", "body": "osf = _setup_osf(args)<EOL>if osf.username is None or osf.password is None:<EOL><INDENT>sys.exit('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>project = osf.project(args.project)<EOL>storage, remote_path = split_storage(args.target)<EOL>store = project.storage(storage)<EOL>for f in store.files:<EOL><INDENT>if norm_remote_path(f.path) == remote_path:<EOL><INDENT>f.remove()<EOL><DEDENT><DEDENT>", "docstring": "Remove a file from the project's storage.\n\n    The first part of the remote path is interpreted as the name of the\n    storage provider. If there is no match the default (osfstorage) is\n    used.", "id": "f5378:m10"}
{"signature": "@might_need_auth<EOL>def upload(args):", "body": "osf = _setup_osf(args)<EOL>if osf.username is None or osf.password is None:<EOL><INDENT>sys.exit('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>project = osf.project(args.project)<EOL>storage, remote_path = split_storage(args.destination)<EOL>store = project.storage(storage)<EOL>if args.recursive:<EOL><INDENT>if not os.path.isdir(args.source):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(args.source))<EOL><DEDENT>_, dir_name = os.path.split(args.source)<EOL>for root, _, files in os.walk(args.source):<EOL><INDENT>subdir_path = os.path.relpath(root, args.source)<EOL>for fname in files:<EOL><INDENT>local_path = os.path.join(root, fname)<EOL>with open(local_path, '<STR_LIT:rb>') as fp:<EOL><INDENT>name = os.path.join(remote_path, dir_name, subdir_path,<EOL>fname)<EOL>store.create_file(name, fp, force=args.force,<EOL>update=args.update)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>with open(args.source, '<STR_LIT:rb>') as fp:<EOL><INDENT>store.create_file(remote_path, fp, force=args.force,<EOL>update=args.update)<EOL><DEDENT><DEDENT>", "docstring": "Upload a new file to an existing project.\n\n    The first part of the remote path is interpreted as the name of the\n    storage provider. If there is no match the default (osfstorage) is\n    used.\n\n    If the project is private you need to specify a username.\n\n    To upload a whole directory (and all its sub-directories) use the `-r`\n    command-line option. If your source directory name ends in a / then\n    files will be created directly in the remote directory. If it does not\n    end in a slash an extra sub-directory with the name of the local directory\n    will be created.\n\n    To place contents of local directory `foo` in remote directory `bar/foo`:\n    $ osf upload -r foo bar\n    To place contents of local directory `foo` in remote directory `bar`:\n    $ osf upload -r foo/ bar", "id": "f5378:m9"}
{"signature": "def create_file(self, path, fp, force=False, update=False):", "body": "if '<STR_LIT:b>' not in fp.mode:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>path = norm_remote_path(path)<EOL>directory, fname = os.path.split(path)<EOL>directories = directory.split(os.path.sep)<EOL>parent = self<EOL>for directory in directories:<EOL><INDENT>if directory:<EOL><INDENT>parent = parent.create_folder(directory, exist_ok=True)<EOL><DEDENT><DEDENT>url = parent._new_file_url<EOL>connection_error = False<EOL>if file_empty(fp):<EOL><INDENT>response = self._put(url, params={'<STR_LIT:name>': fname}, data=b'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>response = self._put(url, params={'<STR_LIT:name>': fname}, data=fp)<EOL><DEDENT>except ConnectionError:<EOL><INDENT>connection_error = True<EOL><DEDENT><DEDENT>if connection_error or response.status_code == <NUM_LIT>:<EOL><INDENT>if not force and not update:<EOL><INDENT>file_size_bytes = get_local_file_size(fp)<EOL>large_file_cutoff = <NUM_LIT:2>**<NUM_LIT:20> <EOL>if connection_error and file_size_bytes < large_file_cutoff:<EOL><INDENT>msg = (<EOL>\"<STR_LIT>\" +<EOL>\"<STR_LIT>\" +<EOL>\"<STR_LIT>\"<EOL>).format(path)<EOL>raise RuntimeError(msg)<EOL><DEDENT>else:<EOL><INDENT>raise FileExistsError(path)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for file_ in self.files:<EOL><INDENT>if norm_remote_path(file_.path) == path:<EOL><INDENT>if not force:<EOL><INDENT>if checksum(path) == file_.hashes.get('<STR_LIT>'):<EOL><INDENT>break<EOL><DEDENT><DEDENT>fp.seek(<NUM_LIT:0>)<EOL>file_.update(fp)<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(path))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Store a new file at `path` in this storage.\n\n        The contents of the file descriptor `fp` (opened in 'rb' mode)\n        will be uploaded to `path` which is the full path at\n        which to store the file.\n\n        To force overwrite of an existing file, set `force=True`.\n        To overwrite an existing file only if the files differ, set `update=True`", "id": "f5379:c0:m3"}
{"signature": "def copyfileobj(fsrc, fdst, total, length=<NUM_LIT:16>*<NUM_LIT>):", "body": "with tqdm(unit='<STR_LIT>', total=total, unit_scale=True) as pbar:<EOL><INDENT>while <NUM_LIT:1>:<EOL><INDENT>buf = fsrc.read(length)<EOL>if not buf:<EOL><INDENT>break<EOL><DEDENT>fdst.write(buf)<EOL>pbar.update(len(buf))<EOL><DEDENT><DEDENT>", "docstring": "Copy data from file-like object fsrc to file-like object fdst\n\n    This is like shutil.copyfileobj but with a progressbar.", "id": "f5381:m0"}
{"signature": "def write_to(self, fp):", "body": "if '<STR_LIT:b>' not in fp.mode:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>response = self._get(self._download_url, stream=True)<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>response.raw.decode_content = True<EOL>copyfileobj(response.raw, fp,<EOL>int(response.headers['<STR_LIT>']))<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(response.status_code))<EOL><DEDENT>", "docstring": "Write contents of this file to a local file.\n\n        Pass in a filepointer `fp` that has been opened for writing in\n        binary mode.", "id": "f5381:c0:m2"}
{"signature": "def _iter_children(self, url, kind, klass, recurse=None):", "body": "children = self._follow_next(url)<EOL>while children:<EOL><INDENT>child = children.pop()<EOL>kind_ = child['<STR_LIT>']['<STR_LIT>']<EOL>if kind_ == kind:<EOL><INDENT>yield klass(child, self.session)<EOL><DEDENT>elif recurse is not None:<EOL><INDENT>url = self._get_attribute(child, *recurse)<EOL>children.extend(self._follow_next(url))<EOL><DEDENT><DEDENT>", "docstring": "Iterate over all children of `kind`\n\n        Yield an instance of `klass` when a child is of type `kind`. Uses\n        `recurse` as the path of attributes in the JSON returned from `url`\n        to find more children.", "id": "f5381:c1:m0"}
{"signature": "def _json(self, response, status_code):", "body": "if isinstance(status_code, numbers.Integral):<EOL><INDENT>status_code = (status_code,)<EOL><DEDENT>if response.status_code in status_code:<EOL><INDENT>return response.json()<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(response.status_code,<EOL>status_code))<EOL><DEDENT>", "docstring": "Extract JSON from response if `status_code` matches.", "id": "f5382:c0:m7"}
{"signature": "def __init__(self):", "body": "super(OSFSession, self).__init__()<EOL>self.headers.update({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:utf-8>',<EOL>'<STR_LIT:Content-Type>': \"<STR_LIT:application/json>\",<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>})<EOL>self.base_url = '<STR_LIT>'<EOL>", "docstring": "Handle HTTP session related work.", "id": "f5384:c0:m0"}
{"signature": "def split_storage(path, default='<STR_LIT>'):", "body": "path = norm_remote_path(path)<EOL>for provider in KNOWN_PROVIDERS:<EOL><INDENT>if path.startswith(provider + '<STR_LIT:/>'):<EOL><INDENT>if six.PY3:<EOL><INDENT>return path.split('<STR_LIT:/>', maxsplit=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>return path.split('<STR_LIT:/>', <NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>return (default, path)<EOL>", "docstring": "Extract storage name from file path.\n\n    If a path begins with a known storage provider the name is removed\n    from the path. Otherwise the `default` storage provider is returned\n    and the path is not modified.", "id": "f5388:m1"}
{"signature": "def checksum(file_path, hash_type='<STR_LIT>', block_size=<NUM_LIT>):", "body": "if hash_type == '<STR_LIT>':<EOL><INDENT>hash_ = hashlib.md5()<EOL><DEDENT>elif hash_type == '<STR_LIT>':<EOL><INDENT>hash_ = hashlib.sha256()<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>.format(hash_type)<EOL>)<EOL><DEDENT>with open(file_path, '<STR_LIT:rb>') as f:<EOL><INDENT>for block in iter(lambda: f.read(block_size), b'<STR_LIT>'):<EOL><INDENT>hash_.update(block)<EOL><DEDENT><DEDENT>return hash_.hexdigest()<EOL>", "docstring": "Returns either the md5 or sha256 hash of a file at `file_path`.\n\n    md5 is the default hash_type as it is faster than sha256\n\n    The default block size is 64 kb, which appears to be one of a few command\n    choices according to https://stackoverflow.com/a/44873382/2680. The code\n    below is an extension of the example presented in that post.", "id": "f5388:m4"}
{"signature": "def norm_remote_path(path):", "body": "path = os.path.normpath(path)<EOL>if path.startswith(os.path.sep):<EOL><INDENT>return path[<NUM_LIT:1>:]<EOL><DEDENT>else:<EOL><INDENT>return path<EOL><DEDENT>", "docstring": "Normalize `path`.\n\n    All remote paths are absolute.", "id": "f5388:m0"}
{"signature": "def guid(self, guid):", "body": "return self._json(self._get(self._build_url('<STR_LIT>', guid)), <NUM_LIT:200>)['<STR_LIT:data>']['<STR_LIT:type>']<EOL>", "docstring": "Determines JSONAPI type for provided GUID", "id": "f5389:c0:m3"}
{"signature": "def project(self, project_id):", "body": "type_ = self.guid(project_id)<EOL>url = self._build_url(type_, project_id)<EOL>if type_ in Project._types:<EOL><INDENT>return Project(self._json(self._get(url), <NUM_LIT:200>), self.session)<EOL><DEDENT>raise OSFException('<STR_LIT>'.format(project_id, type_))<EOL>", "docstring": "Fetch project `project_id`.", "id": "f5389:c0:m2"}
{"signature": "def login(self, username, password=None, token=None):", "body": "self.session.basic_auth(username, password)<EOL>", "docstring": "Login user for protected API calls.", "id": "f5389:c0:m1"}
{"signature": "def cfg_convert(self, value):", "body": "rest = value<EOL>m = self.WORD_PATTERN.match(rest)<EOL>if m is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % value)<EOL><DEDENT>else:<EOL><INDENT>rest = rest[m.end():]<EOL>d = self.config[m.groups()[<NUM_LIT:0>]]<EOL>while rest:<EOL><INDENT>m = self.DOT_PATTERN.match(rest)<EOL>if m:<EOL><INDENT>d = d[m.groups()[<NUM_LIT:0>]]<EOL><DEDENT>else:<EOL><INDENT>m = self.INDEX_PATTERN.match(rest)<EOL>if m:<EOL><INDENT>idx = m.groups()[<NUM_LIT:0>]<EOL>if not self.DIGIT_PATTERN.match(idx):<EOL><INDENT>d = d[idx]<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>n = int(idx)<EOL>d = d[n]<EOL><DEDENT>except TypeError:<EOL><INDENT>d = d[idx]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if m:<EOL><INDENT>rest = rest[m.end():]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % (value, rest))<EOL><DEDENT><DEDENT><DEDENT>return d<EOL>", "docstring": "Default converter for the cfg:// protocol.", "id": "f5393:c3:m3"}
{"signature": "def configure_custom(self, config):", "body": "c = config.pop('<STR_LIT>')<EOL>if not hasattr(c, '<STR_LIT>') andhasattr(types, '<STR_LIT>') and isinstance(c, types.ClassType):<EOL><INDENT>c = self.resolve(c)<EOL><DEDENT>props = config.pop('<STR_LIT:.>', None)<EOL>kwargs = dict((k, config[k]) for k in config if valid_ident(k))<EOL>result = c(**kwargs)<EOL>if props:<EOL><INDENT>for name, value in props.items():<EOL><INDENT>setattr(result, name, value)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Configure an object with a user-supplied factory.", "id": "f5393:c3:m5"}
{"signature": "def configure_root(self, config, incremental=False):", "body": "root = logging.getLogger()<EOL>self.common_logger_config(root, config, incremental)<EOL>", "docstring": "Configure a root logger from a dictionary.", "id": "f5393:c4:m8"}
{"signature": "def configure_filter(self, config):", "body": "if '<STR_LIT>' in config:<EOL><INDENT>result = self.configure_custom(config)<EOL><DEDENT>else:<EOL><INDENT>name = config.get('<STR_LIT:name>', '<STR_LIT>')<EOL>result = logging.Filter(name)<EOL><DEDENT>return result<EOL>", "docstring": "Configure a filter from a dictionary.", "id": "f5393:c4:m2"}
{"signature": "def as_tuple(self, value):", "body": "if isinstance(value, list):<EOL><INDENT>value = tuple(value)<EOL><DEDENT>return value<EOL>", "docstring": "Utility function which converts lists to tuples.", "id": "f5393:c3:m6"}
{"signature": "def configure_logger(self, name, config, incremental=False):", "body": "logger = logging.getLogger(name)<EOL>self.common_logger_config(logger, config, incremental)<EOL>propagate = config.get('<STR_LIT>', None)<EOL>if propagate is not None:<EOL><INDENT>logger.propagate = propagate<EOL><DEDENT>", "docstring": "Configure a non-root logger from a dictionary.", "id": "f5393:c4:m7"}
{"signature": "def convert(self, value):", "body": "if not isinstance(value, ConvertingDict) and isinstance(value, dict):<EOL><INDENT>value = ConvertingDict(value)<EOL>value.configurator = self<EOL><DEDENT>elif not isinstance(value, ConvertingList) and isinstance(value, list):<EOL><INDENT>value = ConvertingList(value)<EOL>value.configurator = self<EOL><DEDENT>elif not isinstance(value, ConvertingTuple) andisinstance(value, tuple):<EOL><INDENT>value = ConvertingTuple(value)<EOL>value.configurator = self<EOL><DEDENT>elif isinstance(value, six.string_types):  <EOL><INDENT>m = self.CONVERT_PATTERN.match(value)<EOL>if m:<EOL><INDENT>d = m.groupdict()<EOL>prefix = d['<STR_LIT>']<EOL>converter = self.value_converters.get(prefix, None)<EOL>if converter:<EOL><INDENT>suffix = d['<STR_LIT>']<EOL>converter = getattr(self, converter)<EOL>value = converter(suffix)<EOL><DEDENT><DEDENT><DEDENT>return value<EOL>", "docstring": "Convert values to an appropriate type. dicts, lists and tuples are\nreplaced by their converting alternatives. Strings are checked to\nsee if they have a conversion format and are converted if they do.", "id": "f5393:c3:m4"}
{"signature": "def configure_handler(self, config):", "body": "formatter = config.pop('<STR_LIT>', None)<EOL>if formatter:<EOL><INDENT>try:<EOL><INDENT>formatter = self.config['<STR_LIT>'][formatter]<EOL><DEDENT>except StandardError as e:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % (formatter, e))<EOL><DEDENT><DEDENT>level = config.pop('<STR_LIT>', None)<EOL>filters = config.pop('<STR_LIT>', None)<EOL>if '<STR_LIT>' in config:<EOL><INDENT>c = config.pop('<STR_LIT>')<EOL>if not hasattr(c, '<STR_LIT>') andhasattr(types, '<STR_LIT>') andisinstance(c, types.ClassType):<EOL><INDENT>c = self.resolve(c)<EOL><DEDENT>factory = c<EOL><DEDENT>else:<EOL><INDENT>klass = self.resolve(config.pop('<STR_LIT:class>'))<EOL>if issubclass(klass, logging.handlers.MemoryHandler) and'<STR_LIT:target>' in config:<EOL><INDENT>try:<EOL><INDENT>config['<STR_LIT:target>'] =self.config['<STR_LIT>'][config['<STR_LIT:target>']]<EOL><DEDENT>except StandardError as e:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % (config['<STR_LIT:target>'], e))<EOL><DEDENT><DEDENT>elif issubclass(klass, logging.handlers.SMTPHandler) and'<STR_LIT>' in config:<EOL><INDENT>config['<STR_LIT>'] = self.as_tuple(config['<STR_LIT>'])<EOL><DEDENT>elif issubclass(klass, logging.handlers.SysLogHandler) and'<STR_LIT:address>' in config:<EOL><INDENT>config['<STR_LIT:address>'] = self.as_tuple(config['<STR_LIT:address>'])<EOL><DEDENT>factory = klass<EOL><DEDENT>kwargs = dict((k, config[k]) for k in config if valid_ident(k))<EOL>try:<EOL><INDENT>result = factory(**kwargs)<EOL><DEDENT>except TypeError as te:<EOL><INDENT>if \"<STR_LIT>\" not in str(te):<EOL><INDENT>raise<EOL><DEDENT>kwargs['<STR_LIT>'] = kwargs.pop('<STR_LIT>')<EOL>result = factory(**kwargs)<EOL><DEDENT>if formatter:<EOL><INDENT>result.setFormatter(formatter)<EOL><DEDENT>if level is not None:<EOL><INDENT>result.setLevel(_checkLevel(level))<EOL><DEDENT>if filters:<EOL><INDENT>self.add_filters(result, filters)<EOL><DEDENT>return result<EOL>", "docstring": "Configure a handler from a dictionary.", "id": "f5393:c4:m4"}
{"signature": "def configure_formatter(self, config):", "body": "if '<STR_LIT>' in config:<EOL><INDENT>factory = config['<STR_LIT>']  <EOL>try:<EOL><INDENT>result = self.configure_custom(config)<EOL><DEDENT>except TypeError as te:<EOL><INDENT>if \"<STR_LIT>\" not in str(te):<EOL><INDENT>raise<EOL><DEDENT>config['<STR_LIT>'] = config.pop('<STR_LIT>')<EOL>config['<STR_LIT>'] = factory<EOL>result = self.configure_custom(config)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>fmt = config.get('<STR_LIT>', None)<EOL>dfmt = config.get('<STR_LIT>', None)<EOL>result = logging.Formatter(fmt, dfmt)<EOL><DEDENT>return result<EOL>", "docstring": "Configure a formatter from a dictionary.", "id": "f5393:c4:m1"}
{"signature": "def common_logger_config(self, logger, config, incremental=False):", "body": "level = config.get('<STR_LIT>', None)<EOL>if level is not None:<EOL><INDENT>logger.setLevel(_checkLevel(level))<EOL><DEDENT>if not incremental:<EOL><INDENT>for h in logger.handlers[:]:<EOL><INDENT>logger.removeHandler(h)<EOL><DEDENT>handlers = config.get('<STR_LIT>', None)<EOL>if handlers:<EOL><INDENT>self.add_handlers(logger, handlers)<EOL><DEDENT>filters = config.get('<STR_LIT>', None)<EOL>if filters:<EOL><INDENT>self.add_filters(logger, filters)<EOL><DEDENT><DEDENT>", "docstring": "Perform configuration which is common to root and non-root loggers.", "id": "f5393:c4:m6"}
{"signature": "def add_filters(self, filterer, filters):", "body": "for f in filters:<EOL><INDENT>try:<EOL><INDENT>filterer.addFilter(self.config['<STR_LIT>'][f])<EOL><DEDENT>except StandardError as e:<EOL><INDENT>raise ValueError('<STR_LIT>' % (f, e))<EOL><DEDENT><DEDENT>", "docstring": "Add filters to a filterer from a list of names.", "id": "f5393:c4:m3"}
{"signature": "def itervalues(d, **kw):", "body": "return iter(getattr(d, _itervalues)(**kw))<EOL>", "docstring": "Return an iterator over the values of a dictionary.", "id": "f5398:m5"}
{"signature": "def add_move(move):", "body": "setattr(_MovedItems, move.name, move)<EOL>", "docstring": "Add an item to six.moves.", "id": "f5398:m2"}
{"signature": "def _import_module(name):", "body": "__import__(name)<EOL>return sys.modules[name]<EOL>", "docstring": "Import module, returning the module after the last dot.", "id": "f5398:m1"}
{"signature": "def iterlists(d, **kw):", "body": "return iter(getattr(d, _iterlists)(**kw))<EOL>", "docstring": "Return an iterator over the (key, [values]) pairs of a dictionary.", "id": "f5398:m7"}
{"signature": "def _set_global_verbosity_level(is_verbose_output=False):", "body": "global verbose_output<EOL>verbose_output = is_verbose_output<EOL>if verbose_output:<EOL><INDENT>jocker_lgr.setLevel(logging.DEBUG)<EOL><DEDENT>else:<EOL><INDENT>jocker_lgr.setLevel(logging.INFO)<EOL><DEDENT>", "docstring": "sets the global verbosity level for console and the jocker_lgr logger.\n\n    :param bool is_verbose_output: should be output be verbose", "id": "f5400:m0"}
{"signature": "def _import_config(config_file):", "body": "<EOL>jocker_lgr.debug('<STR_LIT>'.format(config_file))<EOL>try:<EOL><INDENT>jocker_lgr.debug('<STR_LIT>')<EOL>with open(config_file, '<STR_LIT:r>') as c:<EOL><INDENT>return yaml.safe_load(c.read())<EOL><DEDENT><DEDENT>except IOError as ex:<EOL><INDENT>jocker_lgr.error(str(ex))<EOL>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>except yaml.parser.ParserError as ex:<EOL><INDENT>jocker_lgr.error('<STR_LIT>'.format(ex))<EOL>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "returns a configuration object\n\n    :param string config_file: path to config file", "id": "f5400:m1"}
{"signature": "def _parse_dumb_push_output(self, string):", "body": "stack = <NUM_LIT:0><EOL>json_list = []<EOL>tmp_json = '<STR_LIT>'<EOL>for char in string:<EOL><INDENT>if not char == '<STR_LIT:\\r>' and not char == '<STR_LIT:\\n>':<EOL><INDENT>tmp_json += char<EOL><DEDENT>if char == '<STR_LIT:{>':<EOL><INDENT>stack += <NUM_LIT:1><EOL><DEDENT>elif char == '<STR_LIT:}>':<EOL><INDENT>stack -= <NUM_LIT:1><EOL><DEDENT>if stack == <NUM_LIT:0>:<EOL><INDENT>if not len(tmp_json) == <NUM_LIT:0>:<EOL><INDENT>json_list.append(tmp_json)<EOL><DEDENT>tmp_json = '<STR_LIT>'<EOL><DEDENT><DEDENT>return json_list<EOL>", "docstring": "since the push process outputs a single unicode string consisting of\n        multiple JSON formatted \"status\" lines, we need to parse it so that it\n        can be read as multiple strings.\n\n        This will receive the string as an input, count curly braces and ignore\n        any newlines. When the curly braces stack is 0, it will append the\n        entire string it has read up until then to a list and so forth.\n\n        :param string: the string to parse\n        :rtype: list of JSON's", "id": "f5400:c0:m1"}
{"signature": "def execute(varsfile, templatefile, outputfile=None, configfile=None,<EOL>dryrun=False, build=False, push=False, verbose=False):", "body": "if dryrun and (build or push):<EOL><INDENT>jocker_lgr.error('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:100>)<EOL><DEDENT>_set_global_verbosity_level(verbose)<EOL>j = Jocker(varsfile, templatefile, outputfile, configfile, dryrun,<EOL>build, push)<EOL>formatted_text = j.generate()<EOL>if dryrun:<EOL><INDENT>g = j.dryrun(formatted_text)<EOL><DEDENT>if build or push:<EOL><INDENT>j.build_image()<EOL><DEDENT>if push:<EOL><INDENT>j.push_image()<EOL><DEDENT>if dryrun:<EOL><INDENT>return g<EOL><DEDENT>", "docstring": "generates a Dockerfile, builds an image and pushes it to DockerHub\n\n    A `Dockerfile` will be generated by Jinja2 according to the `varsfile`\n    imported. If build is true, an image will be generated from the\n    `outputfile` which is the generated Dockerfile and committed to the\n    image:tag string supplied to `build`.\n    If push is true, a build will be triggered and the produced image\n    will be pushed to DockerHub upon completion.\n\n    :param string varsfile: path to file with variables.\n    :param string templatefile: path to template file to use.\n    :param string outputfile: path to output Dockerfile.\n    :param string configfile: path to yaml file with docker-py config.\n    :param bool dryrun: mock run.\n    :param build: False or the image:tag to build to.\n    :param push: False or the image:tag to build to. (triggers build)\n    :param bool verbose: verbose output.", "id": "f5400:m2"}
{"signature": "def init(base_level=DEFAULT_BASE_LOGGING_LEVEL,<EOL>verbose_level=DEFAULT_VERBOSE_LOGGING_LEVEL,<EOL>logging_config=None):", "body": "if logging_config is None:<EOL><INDENT>logging_config = {}<EOL><DEDENT>logging_config = logging_config or LOGGER<EOL>log_file = LOGGER['<STR_LIT>']['<STR_LIT:file>']['<STR_LIT:filename>']<EOL>log_dir = os.path.dirname(os.path.expanduser(log_file))<EOL>if os.path.isfile(log_dir):<EOL><INDENT>sys.exit('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>.format(log_dir))<EOL><DEDENT>try:<EOL><INDENT>if not os.path.exists(log_dir) and not len(log_dir) == <NUM_LIT:0>:<EOL><INDENT>os.makedirs(log_dir)<EOL><DEDENT>dictconfig.dictConfig(logging_config)<EOL>lgr = logging.getLogger('<STR_LIT:user>')<EOL>lgr.setLevel(base_level)<EOL>return lgr<EOL><DEDENT>except ValueError as e:<EOL><INDENT>sys.exit('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>.format(log_file, e))<EOL><DEDENT>", "docstring": "initializes a base logger\n\n    you can use this to init a logger in any of your files.\n    this will use config.py's LOGGER param and logging.dictConfig to configure\n    the logger for you.\n\n    :param int|logging.LEVEL base_level: desired base logging level\n    :param int|logging.LEVEL verbose_level: desired verbose logging level\n    :param dict logging_dict: dictConfig based configuration.\n     used to override the default configuration from config.py\n    :rtype: `python logger`", "id": "f5401:m0"}
{"signature": "@property<EOL><INDENT>def path(self):<DEDENT>", "body": "path = super(WindowsPath2, self).path<EOL>if path.startswith(\"<STR_LIT>\"):<EOL><INDENT>return path[<NUM_LIT:4>:]<EOL><DEDENT>return path<EOL>", "docstring": "Return the path always without the \\\\?\\ prefix.", "id": "f5406:c1:m9"}
{"signature": "@property<EOL><INDENT>def extended_path(self):<DEDENT>", "body": "if self.is_absolute() and not self.path.startswith(\"<STR_LIT>\"):<EOL><INDENT>return \"<STR_LIT>\" % self.path<EOL><DEDENT>return self.path<EOL>", "docstring": "Add prefix \\\\?\\ to every absolute path, so that it's a \"extended-length\"\npath, that should be longer than 259 characters (called: \"MAX_PATH\")\nsee:\nhttps://msdn.microsoft.com/en-us/library/aa365247.aspx#maxpath", "id": "f5406:c1:m8"}
{"signature": "@classmethod<EOL><INDENT>def _from_parts(cls, args, init=True):<DEDENT>", "body": "if args:<EOL><INDENT>args = list(args)<EOL>if isinstance(args[<NUM_LIT:0>], WindowsPath2):<EOL><INDENT>args[<NUM_LIT:0>] = args[<NUM_LIT:0>].path<EOL><DEDENT>elif args[<NUM_LIT:0>].startswith(\"<STR_LIT>\"):<EOL><INDENT>args[<NUM_LIT:0>] = args[<NUM_LIT:0>][<NUM_LIT:4>:]<EOL><DEDENT>args = tuple(args)<EOL><DEDENT>return super(WindowsPath2, cls)._from_parts(args, init)<EOL>", "docstring": "Strip \\\\?\\ prefix in init phase", "id": "f5406:c1:m7"}
{"signature": "def utime(self, *args, **kwargs):", "body": "os.utime(self.extended_path, *args, **kwargs)<EOL>", "docstring": "Set the access and modified times of the file specified by path.", "id": "f5406:c0:m6"}
{"signature": "def apply_types(use_types, guess_type, line):", "body": "new_line = {}<EOL>for k, v in list(line.items()):<EOL><INDENT>if k in use_types:<EOL><INDENT>new_line[k] = force_type(use_types[k], v)<EOL><DEDENT>elif guess_type:<EOL><INDENT>new_line[k] = determine_type(v)<EOL><DEDENT>else:<EOL><INDENT>new_line[k] = v<EOL><DEDENT><DEDENT>return new_line<EOL>", "docstring": "Apply the types on the elements of the line", "id": "f5411:m4"}
{"signature": "def format_to_csv(filename, skiprows=<NUM_LIT:0>, delimiter=\"<STR_LIT>\"):", "body": "if not delimiter:<EOL><INDENT>delimiter = \"<STR_LIT:\\t>\"<EOL><DEDENT>input_file = open(filename, \"<STR_LIT:r>\")<EOL>if skiprows:<EOL><INDENT>[input_file.readline() for _ in range(skiprows)]<EOL><DEDENT>new_filename = os.path.splitext(filename)[<NUM_LIT:0>] + \"<STR_LIT>\"<EOL>output_file = open(new_filename, \"<STR_LIT:w>\")<EOL>header = input_file.readline().split()<EOL>reader = csv.DictReader(input_file, fieldnames=header, delimiter=delimiter)<EOL>writer = csv.DictWriter(output_file, fieldnames=header, delimiter=\"<STR_LIT:U+002C>\")<EOL>writer.writerow(dict((x, x) for x in header))<EOL>for line in reader:<EOL><INDENT>if None in line: del line[None]<EOL>writer.writerow(line)<EOL><DEDENT>input_file.close()<EOL>output_file.close()<EOL>print(\"<STR_LIT>\" % new_filename)<EOL>", "docstring": "Convert a file to a .csv file", "id": "f5411:m7"}
{"signature": "def determine_type(x):", "body": "types = (int, float, str)<EOL>_type = [a for a in types if is_type(a, x)][<NUM_LIT:0>]<EOL>return _type(x)<EOL>", "docstring": "Determine the type of x", "id": "f5411:m1"}
{"signature": "def read_csv(filename, delimiter=\"<STR_LIT:U+002C>\", skip=<NUM_LIT:0>, guess_type=True, has_header=True, use_types={}):", "body": "with open(filename, '<STR_LIT:r>') as f:<EOL><INDENT>if has_header:<EOL><INDENT>header = f.readline().strip().split(delimiter)<EOL><DEDENT>else:<EOL><INDENT>header = None<EOL><DEDENT>for i in range(skip):<EOL><INDENT>f.readline()<EOL><DEDENT>for line in csv.DictReader(f, delimiter=delimiter, fieldnames=header):<EOL><INDENT>if use_types:<EOL><INDENT>yield apply_types(use_types, guess_type, line)<EOL><DEDENT>elif guess_type:<EOL><INDENT>yield dmap(determine_type, line)<EOL><DEDENT>else:<EOL><INDENT>yield line<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Read a CSV file\n\n    Usage\n    -----\n    >>> data = read_csv(filename, delimiter=delimiter, skip=skip,\n            guess_type=guess_type, has_header=True, use_types={}) \n\n    # Use specific types\n    >>> types = {\"sepal.length\": int, \"petal.width\": float}\n    >>> data = read_csv(filename, guess_type=guess_type, use_types=types) \n\n    keywords\n    :has_header:\n        Determine whether the file has a header or not", "id": "f5411:m5"}
{"signature": "def __init__(self, constant, *args, **kwargs):", "body": "super(ConstKDFChain, self).__init__(*args, **kwargs)<EOL>self.__constant = constant<EOL>", "docstring": "Initialize a ConstKDFChain, which uses constant input data instead of passed data\non chains steps.\n\n:param constant: The constant data to pass to the next method on each step.", "id": "f5415:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def generate(cls):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": ":returns: A new key pair with private and public key set.", "id": "f5418:c0:m1"}
{"signature": "@property<EOL><INDENT>def priv(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": ":returns: A bytes-like object encoding the private key of this key pair instance.", "id": "f5418:c0:m2"}
{"signature": "def __init__(self, hash_function, info_string):", "body": "super(RootKeyKDF, self).__init__()<EOL>if not hash_function in RootKeyKDF.HASH_FUNCTIONS:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(info_string, bytes):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>self.__hash_function = RootKeyKDF.HASH_FUNCTIONS[hash_function]<EOL>self.__info_string   = info_string<EOL>", "docstring": "Prepare a RootKeyKDF, following a recommendation by WhisperSystems.\n\n:param hash_function: One of (the strings) \"SHA-256\" and \"SHA-512\".\n:param info_string: A bytes-like object encoding a string unique to this usage\n    within the application.", "id": "f5421:c0:m0"}
{"signature": "@property<EOL><INDENT>def pn(self):<DEDENT>", "body": "return self.__pn<EOL>", "docstring": ":returns: The length of the senders previous sending chain, as an integer. This\n    enables the receiver to store keys for skipped messages of the previous chain.", "id": "f5424:c0:m3"}
{"signature": "@property<EOL><INDENT>def n(self):<DEDENT>", "body": "return self.__n<EOL>", "docstring": ":returns: The current length of the senders sending chain, as an integer.", "id": "f5424:c0:m2"}
{"signature": "@property<EOL><INDENT>def dh_pub(self):<DEDENT>", "body": "return self.__dh_pub<EOL>", "docstring": ":returns: A bytes-like object encoding the new public key of the senders\n    diffie-hellman ratchet.", "id": "f5424:c0:m1"}
{"signature": "@property<EOL><INDENT>def previous_sending_chain_length(self):<DEDENT>", "body": "return self.__previous_sending_chain_length<EOL>", "docstring": "Get the length of the previous sending chain.\n\n:returns: Either an integer representing the length of the previous sending chain\n    or None, if the current one is the first sending chain.", "id": "f5432:c0:m4"}
{"signature": "def encryptMessage(self, message, ad = None):", "body": "if ad == None:<EOL><INDENT>ad = self.__ad<EOL><DEDENT>header = Header(<EOL>self.pub,<EOL>self.__skr.sending_chain_length,<EOL>self.__skr.previous_sending_chain_length<EOL>)<EOL>ciphertext = self.__aead.encrypt(<EOL>message,<EOL>self.__skr.nextEncryptionKey(),<EOL>self._makeAD(header, ad)<EOL>)<EOL>return {<EOL>\"<STR_LIT>\"     : header,<EOL>\"<STR_LIT>\" : ciphertext<EOL>}<EOL>", "docstring": "Encrypt a message using this double ratchet session.\n\n:param message: A bytes-like object encoding the message to encrypt.\n:param ad: A bytes-like object encoding the associated data to use for message\n    authentication. Pass None to use the associated data set during construction.\n:returns: A dictionary containing the message header and ciphertext. The header is\n    required to synchronize the double ratchet of the receiving party. Send it\n    along with the ciphertext.\n\nThe returned dictionary consists of two keys: \"header\", which includes an instance\nof the Header class and \"ciphertext\", which includes the encrypted message encoded\nas a bytes-like object.\n\n:raises NotInitializedException: If this double ratchet session is not yet\n    initialized with the other parties public key, thus not ready to encrypt a\n    message to that party.", "id": "f5433:c0:m8"}
{"signature": "def _onNewChainKey(self, key, chain):", "body": "self.__skr.step(key, chain)<EOL>", "docstring": "Update the symmetric key ratchet with the new key.", "id": "f5433:c0:m3"}
{"signature": "def __init__(<EOL>self,<EOL>aead,<EOL>message_key_store_max,<EOL>symmetric_key_ratchet,<EOL>ad,<EOL>*args,<EOL>**kwargs<EOL>):", "body": "self.__aead    = aead<EOL>self.__mks_max = message_key_store_max<EOL>self.__skr     = symmetric_key_ratchet<EOL>self.__ad      = ad<EOL>self.__saved_message_keys = {}<EOL>super(DoubleRatchet, self).__init__(*args, **kwargs)<EOL>", "docstring": "Initialize a new DoubleRatchet.\n\n:param aead: An instance of an implementation of the AEAD interface, which is used\n    to provice authenticated message encryption and is fed with the message keys\n    derived using the symmetric key ratchet.\n:param message_key_store_max: An integer defining the maximum amount of message\n    keys to store before raising an exception. This mechanism allows out-of-order\n    messages, by storing message keys of out-of-order messages instead of\n    discarding them.\n:param symmetric_key_ratchet: An instance of the SymmetricKeyRatchet class, which\n    is used to derive en- and decryption keys for message exchange.\n:param ad: Some associated data to use for message authentication, encoded as a\n    bytes-like object.", "id": "f5433:c0:m0"}
{"signature": "def decryptMessage(self, ciphertext, header, ad = None):", "body": "if ad == None:<EOL><INDENT>ad = self.__ad<EOL><DEDENT>plaintext = self.__decryptSavedMessage(ciphertext, header, ad)<EOL>if plaintext:<EOL><INDENT>return plaintext<EOL><DEDENT>if self.triggersStep(header.dh_pub):<EOL><INDENT>self.__saveMessageKeys(header.pn)<EOL>self.step(header.dh_pub)<EOL><DEDENT>self.__saveMessageKeys(header.n)<EOL>return self.__decrypt(<EOL>ciphertext,<EOL>self.__skr.nextDecryptionKey(),<EOL>header,<EOL>ad<EOL>)<EOL>", "docstring": "Decrypt a message using this double ratchet session.\n\n:param ciphertext: A bytes-like object encoding the message to decrypt.\n:param header: An instance of the Header class. This should have been sent\n    together with the ciphertext.\n:param ad: A bytes-like object encoding the associated data to use for message\n    authentication. Pass None to use the associated data set during construction.\n:returns: The plaintext.\n\n:raises AuthenticationFailedException: If checking the authentication for this\n    message failed.\n:raises NotInitializedException: If this double ratchet session is not yet\n    initialized with a key pair, thus not prepared to decrypt an incoming message.\n:raises TooManySavedMessageKeysException: If more than message_key_store_max have\n    to be stored to decrypt this message.", "id": "f5433:c0:m4"}
{"signature": "def _makeAD(self, header, ad):", "body": "raise NotImplementedError<EOL>", "docstring": "Construct specific associated data for this message from the message header and\nthe general associated data.\n\n:param header: An instance of the Header class.\n:param ad: A bytes-like object encoding the general associated data.\n:returns: A bytes-like object encoding the message-specific associated data.", "id": "f5433:c0:m9"}
{"signature": "@property<EOL><INDENT>def other_pub(self):<DEDENT>", "body": "return self.__other.pub<EOL>", "docstring": ":returns: A bytes-like object encoding the public key of the other Diffie-Hellman\n    ratchet to synchronize with.", "id": "f5435:c0:m10"}
{"signature": "def triggersStep(self, other_pub):", "body": "return other_pub != self.__other.pub<EOL>", "docstring": ":returns: A boolean indicating whether calling next with this public key would\n    trigger a ratchet step.", "id": "f5435:c0:m6"}
{"signature": "def step(self, other_pub):", "body": "if self.triggersStep(other_pub):<EOL><INDENT>self.__wrapOtherPub(other_pub)<EOL>self.__newRootKey(\"<STR_LIT>\")<EOL>self.__newRatchetKey()<EOL>self.__newRootKey(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Perform a rachted step, calculating a new shared secret from the public key and\nderiving new chain keys from this secret.\n\nNew Diffie-Hellman calculations are only performed if the public key is different\nfrom the previous one.\n\n:param other_pub: A bytes-like object encoding the public key of the other\n    Diffie-Hellman ratchet to synchronize with.", "id": "f5435:c0:m3"}
{"signature": "def step(self, *args, **kwargs):", "body": "raise NotImplementedError<EOL>", "docstring": "Perform a ratchet step using provided arguments.", "id": "f5436:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def fromSerialized(cls, serialized, *args, **kwargs):<DEDENT>", "body": "return cls(*args, **kwargs)<EOL>", "docstring": ":param serialized: A serializable Python object.\n:returns: Return a new instance that was set to the state that was saved into the\n    serialized object.\n\nUse together with the serialize method.\nNotice: You have to pass all positional parameters required by the constructor of\nthe class you call fromSerialized on.", "id": "f5437:c0:m1"}
{"signature": "def _get_c_program(self, makefile_target_name, binary_name):", "body": "find_executable(MAKE_CMD)<EOL>if not find_executable(MAKE_CMD):<EOL><INDENT>print(<EOL>'<STR_LIT>'<EOL>% MAKE_CMD<EOL>)<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>SAMPLE_C_CODE_DIR = os.path.join(<EOL>os.path.dirname(os.path.realpath(__file__)), \"<STR_LIT>\"<EOL>)<EOL>binary_path = os.path.join(SAMPLE_C_CODE_DIR, binary_name)<EOL>subprocess.call([\"<STR_LIT>\", \"<STR_LIT>\"], cwd=SAMPLE_C_CODE_DIR)<EOL>subprocess.check_output(<EOL>[MAKE_CMD, makefile_target_name, \"<STR_LIT>\", SAMPLE_C_CODE_DIR, \"<STR_LIT>\"]<EOL>)<EOL>return binary_path<EOL>", "docstring": "build c program and return path to binary", "id": "f5442:c0:m1"}
{"signature": "def get_subprocess_cmd(self):", "body": "return \"<STR_LIT:U+0020>\".join(quote(c) for c in self.cmd)<EOL>", "docstring": "Returns the shell-escaped string used to invoke the gdb subprocess.\n        This is a string that can be executed directly in a shell.", "id": "f5445:c2:m2"}
{"signature": "def send_signal_to_gdb(self, signal_input):", "body": "try:<EOL><INDENT>signal = int(signal_input)<EOL><DEDENT>except Exception:<EOL><INDENT>signal = SIGNAL_NAME_TO_NUM.get(signal_input.upper())<EOL><DEDENT>if not signal:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % str(signal)<EOL>)<EOL><DEDENT>if self.gdb_process:<EOL><INDENT>os.kill(self.gdb_process.pid, signal)<EOL><DEDENT>else:<EOL><INDENT>raise NoGdbProcessError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>", "docstring": "Send signal name (case insensitive) or number to gdb subprocess\n        gdbmi.send_signal_to_gdb(2)  # valid\n        gdbmi.send_signal_to_gdb('sigint')  # also valid\n        gdbmi.send_signal_to_gdb('SIGINT')  # also valid\n\n        raises ValueError if signal_input is invalie\n        raises NoGdbProcessError if there is no gdb process to send a signal to", "id": "f5445:c2:m10"}
{"signature": "def _buffer_incomplete_responses(raw_output, buf):", "body": "if raw_output:<EOL><INDENT>if buf:<EOL><INDENT>raw_output = b\"<STR_LIT>\".join([buf, raw_output])<EOL>buf = None<EOL><DEDENT>if b\"<STR_LIT:\\n>\" not in raw_output:<EOL><INDENT>buf = raw_output<EOL>raw_output = None<EOL><DEDENT>elif not raw_output.endswith(b\"<STR_LIT:\\n>\"):<EOL><INDENT>remainder_offset = raw_output.rindex(b\"<STR_LIT:\\n>\") + <NUM_LIT:1><EOL>buf = raw_output[remainder_offset:]<EOL>raw_output = raw_output[:remainder_offset]<EOL><DEDENT><DEDENT>return (raw_output, buf)<EOL>", "docstring": "It is possible for some of gdb's output to be read before it completely finished its response.\n    In that case, a partial mi response was read, which cannot be parsed into structured data.\n    We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's\n    output if the output did not end in a newline.\n\n    Args:\n        raw_output: Contents of the gdb mi output\n        buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to\n        gdb's next output.\n\n    Returns:\n        (raw_output, buf)", "id": "f5445:m0"}
{"signature": "def verify_valid_gdb_subprocess(self):", "body": "if not self.gdb_process:<EOL><INDENT>raise NoGdbProcessError(\"<STR_LIT>\")<EOL><DEDENT>elif self.gdb_process.poll() is not None:<EOL><INDENT>raise NoGdbProcessError(<EOL>\"<STR_LIT>\"<EOL>% str(self.gdb_process.poll())<EOL>)<EOL><DEDENT>", "docstring": "Verify there is a process object, and that it is still running.\n        Raise NoGdbProcessError if either of the above are not true.", "id": "f5445:c2:m4"}
{"signature": "def spawn_new_gdb_subprocess(self):", "body": "if self.gdb_process:<EOL><INDENT>self.logger.debug(<EOL>\"<STR_LIT>\" % self.gdb_process.pid<EOL>)<EOL>self.exit()<EOL><DEDENT>self.logger.debug('<STR_LIT>' % \"<STR_LIT:U+0020>\".join(self.cmd))<EOL>self.gdb_process = subprocess.Popen(<EOL>self.cmd,<EOL>shell=False,<EOL>stdout=subprocess.PIPE,<EOL>stdin=subprocess.PIPE,<EOL>stderr=subprocess.PIPE,<EOL>bufsize=<NUM_LIT:0>,<EOL>)<EOL>_make_non_blocking(self.gdb_process.stdout)<EOL>_make_non_blocking(self.gdb_process.stderr)<EOL>self.stdout_fileno = self.gdb_process.stdout.fileno()<EOL>self.stderr_fileno = self.gdb_process.stderr.fileno()<EOL>self.stdin_fileno = self.gdb_process.stdin.fileno()<EOL>self.read_list = [self.stdout_fileno, self.stderr_fileno]<EOL>self.write_list = [self.stdin_fileno]<EOL>self._incomplete_output = {\"<STR_LIT>\": None, \"<STR_LIT>\": None}<EOL>return self.gdb_process.pid<EOL>", "docstring": "Spawn a new gdb subprocess with the arguments supplied to the object\n        during initialization. If gdb subprocess already exists, terminate it before\n        spanwing a new one.\n        Return int: gdb process id", "id": "f5445:c2:m3"}
{"signature": "def parse_response(gdb_mi_text):", "body": "stream = StringStream(gdb_mi_text, debug=_DEBUG)<EOL>if _GDB_MI_NOTIFY_RE.match(gdb_mi_text):<EOL><INDENT>token, message, payload = _get_notify_msg_and_payload(gdb_mi_text, stream)<EOL>return {<EOL>\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:message>\": message,<EOL>\"<STR_LIT>\": payload,<EOL>\"<STR_LIT>\": token,<EOL>}<EOL><DEDENT>elif _GDB_MI_RESULT_RE.match(gdb_mi_text):<EOL><INDENT>token, message, payload = _get_result_msg_and_payload(gdb_mi_text, stream)<EOL>return {<EOL>\"<STR_LIT:type>\": \"<STR_LIT:result>\",<EOL>\"<STR_LIT:message>\": message,<EOL>\"<STR_LIT>\": payload,<EOL>\"<STR_LIT>\": token,<EOL>}<EOL><DEDENT>elif _GDB_MI_CONSOLE_RE.match(gdb_mi_text):<EOL><INDENT>return {<EOL>\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:message>\": None,<EOL>\"<STR_LIT>\": _GDB_MI_CONSOLE_RE.match(gdb_mi_text).groups()[<NUM_LIT:0>],<EOL>}<EOL><DEDENT>elif _GDB_MI_LOG_RE.match(gdb_mi_text):<EOL><INDENT>return {<EOL>\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:message>\": None,<EOL>\"<STR_LIT>\": _GDB_MI_LOG_RE.match(gdb_mi_text).groups()[<NUM_LIT:0>],<EOL>}<EOL><DEDENT>elif _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text):<EOL><INDENT>return {<EOL>\"<STR_LIT:type>\": \"<STR_LIT:target>\",<EOL>\"<STR_LIT:message>\": None,<EOL>\"<STR_LIT>\": _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text).groups()[<NUM_LIT:0>],<EOL>}<EOL><DEDENT>elif response_is_finished(gdb_mi_text):<EOL><INDENT>return {\"<STR_LIT:type>\": \"<STR_LIT>\", \"<STR_LIT:message>\": None, \"<STR_LIT>\": None}<EOL><DEDENT>else:<EOL><INDENT>return {\"<STR_LIT:type>\": \"<STR_LIT>\", \"<STR_LIT:message>\": None, \"<STR_LIT>\": gdb_mi_text}<EOL><DEDENT>", "docstring": "Parse gdb mi text and turn it into a dictionary.\n\n    See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records\n    for details on types of gdb mi output.\n\n    Args:\n        gdb_mi_text (str): String output from gdb\n\n    Returns:\n        dict with the following keys:\n        type (either 'notify', 'result', 'console', 'log', 'target', 'done'),\n        message (str or None),\n        payload (str, list, dict, or None)", "id": "f5447:m1"}
{"signature": "def _parse_key_val(stream):", "body": "logger.debug(\"<STR_LIT>\")<EOL>key = _parse_key(stream)<EOL>val = _parse_val(stream)<EOL>logger.debug(\"<STR_LIT>\")<EOL>logger.debug(\"<STR_LIT:%s>\", fmt_green(key))<EOL>logger.debug(\"<STR_LIT:%s>\", fmt_green(val))<EOL>return key, val<EOL>", "docstring": "Parse key, value combination\n    return (tuple):\n        Parsed key (string)\n        Parsed value (either a string, array, or dict)", "id": "f5447:m7"}
{"signature": "def _parse_key(stream):", "body": "logger.debug(\"<STR_LIT>\")<EOL>key = stream.advance_past_chars([\"<STR_LIT:=>\"])<EOL>logger.debug(\"<STR_LIT>\")<EOL>logger.debug(\"<STR_LIT:%s>\", fmt_green(key))<EOL>return key<EOL>", "docstring": "Parse key, value combination\n    returns :\n        Parsed key (string)", "id": "f5447:m8"}
{"signature": "def _get_result_msg_and_payload(result, stream):", "body": "groups = _GDB_MI_RESULT_RE.match(result).groups()<EOL>token = int(groups[<NUM_LIT:0>]) if groups[<NUM_LIT:0>] != \"<STR_LIT>\" else None<EOL>message = groups[<NUM_LIT:1>]<EOL>if groups[<NUM_LIT:2>] is None:<EOL><INDENT>payload = None<EOL><DEDENT>else:<EOL><INDENT>stream.advance_past_chars([\"<STR_LIT:U+002C>\"])<EOL>payload = _parse_dict(stream)<EOL><DEDENT>return token, message, payload<EOL>", "docstring": "Get result message and payload dict", "id": "f5447:m5"}
{"signature": "def _parse_val(stream):", "body": "logger.debug(\"<STR_LIT>\")<EOL>while True:<EOL><INDENT>c = stream.read(<NUM_LIT:1>)<EOL>if c == \"<STR_LIT:{>\":<EOL><INDENT>val = _parse_dict(stream)<EOL>break<EOL><DEDENT>elif c == \"<STR_LIT:[>\":<EOL><INDENT>val = _parse_array(stream)<EOL>break<EOL><DEDENT>elif c == '<STR_LIT:\">':<EOL><INDENT>val = stream.advance_past_string_with_gdb_escapes()<EOL>break<EOL><DEDENT>elif _DEBUG:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % c)<EOL><DEDENT>else:<EOL><INDENT>print(<EOL>'<STR_LIT>'<EOL>% c<EOL>)<EOL>val = \"<STR_LIT>\"  <EOL><DEDENT><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>logger.debug(\"<STR_LIT:%s>\", fmt_green(val))<EOL>return val<EOL>", "docstring": "Parse value from string\n    returns:\n        Parsed value (either a string, array, or dict)", "id": "f5447:m9"}
{"signature": "def assert_match(actual_char_or_str, expected_char_or_str):", "body": "if expected_char_or_str != actual_char_or_str:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>pprint(expected_char_or_str)<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>pprint(actual_char_or_str)<EOL>raise ValueError()<EOL><DEDENT>", "docstring": "If values don't match, print them and raise a ValueError, otherwise,\n    continue\n    Raises: ValueError if argumetns do not match", "id": "f5447:m3"}
{"signature": "def parse(self, argv):", "body": "if not argv and self.optional:<EOL><INDENT>self.value = [] if self.recurring else None<EOL>return<EOL><DEDENT>try:<EOL><INDENT>value = self.format.parse(argv)<EOL>if not self.recurring:<EOL><INDENT>self.value = value<EOL>return<EOL><DEDENT>self.value = [value]<EOL>while argv:<EOL><INDENT>self.value.append(self.format.parse(argv))<EOL><DEDENT><DEDENT>except formats.BadNumberOfArguments as e:<EOL><INDENT>raise BadNumberOfArguments(self.displayname, e.required, e.given)<EOL><DEDENT>except formats.BadArgument as e:<EOL><INDENT>raise BadArgument(self.displayname, e.argument, e.details)<EOL><DEDENT>", "docstring": "Consume and process arguments and store the result.\n\n        argv is the list of arguments to parse (will be modified).\n\n        Recurring PositionalArgumants get a list as .value.\n\n        Optional PositionalArguments that do not get any arguments to parse get\n        None as .value, or [] if recurring.", "id": "f5450:c12:m2"}
{"signature": "def __init__(self,<EOL>parameters=None,<EOL>version=None,<EOL>install_dir=None,<EOL>argv=None,<EOL>launch=True,<EOL>progname=None,<EOL>author=None,<EOL>title='<STR_LIT>',<EOL>description=None,<EOL>contact=None,<EOL>website=None,<EOL>download=None,<EOL>git=None,<EOL>subversion=None,<EOL>license=None,<EOL>copyright=None,<EOL>command=None,<EOL>usage=None,<EOL>general=None,<EOL>additional=None,<EOL>note=None,<EOL>filedocs=None,<EOL>docsfiles=None,<EOL>docsfilenames=None,<EOL>width=None,<EOL>configfiles=None,<EOL>configdirs=None,<EOL>configfilenames=None,<EOL>sections=None,<EOL>ignore=None,<EOL>helpoption=help_option,<EOL>longhelpoption=longhelp_option,<EOL>versionoption=version_option,<EOL>settingsoption=settings_option):", "body": "params = locals()<EOL>self.options = dict()<EOL>self.option_order = []<EOL>self.abbreviations = dict()<EOL>self.positional_args = []<EOL>for p in parameters:<EOL><INDENT>if isinstance(p, Option):<EOL><INDENT>self._add_option(p)<EOL><DEDENT>elif isinstance(p, PositionalArgument):<EOL><INDENT>self._add_positional_argument(p)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT><DEDENT>self.basic_option_names = dict()<EOL>for optiontype in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:version>']:<EOL><INDENT>option = params[optiontype + '<STR_LIT>']<EOL>if not option:<EOL><INDENT>continue<EOL><DEDENT>if not isinstance(option, Option):<EOL><INDENT>option = option()<EOL><DEDENT>self._add_option(option)<EOL>self.basic_option_names[optiontype] = option.name<EOL><DEDENT>if isinstance(install_dir, type(sys)):<EOL><INDENT>install_dir = install_dir.__file__<EOL><DEDENT>if isinstance(install_dir, str) and os.path.isfile(install_dir):<EOL><INDENT>install_dir = os.path.dirname(install_dir)<EOL><DEDENT>self.docvars = dict((s, params[s]) for s in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:version>'])<EOL>if command is None:<EOL><INDENT>command = os.path.basename(sys.argv[<NUM_LIT:0>])<EOL><DEDENT>self.docvars['<STR_LIT>'] = command<EOL>command_base = command<EOL>if command.endswith('<STR_LIT>'):<EOL><INDENT>command_base = command[:-<NUM_LIT:3>]<EOL><DEDENT>self.docs = dict(title=_docs(title, self.docvars),<EOL>usage=_docs(usage, self.docvars),<EOL>files=_docs(filedocs, self.docvars))<EOL>if filedocs is None:<EOL><INDENT>filedocs = dict()<EOL><DEDENT>self.docs['<STR_LIT>'] = filedocs<EOL>self.docs.update((name, _list(params[name])) for name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:description>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>self.ignore = _list(ignore)<EOL>if docsfiles is None:<EOL><INDENT>if install_dir:<EOL><INDENT>if docsfilenames is None:<EOL><INDENT>docsfilenames = [os.path.basename(install_dir) + '<STR_LIT>']<EOL>if command_base:<EOL><INDENT>docsfilenames.append(command_base + '<STR_LIT>')<EOL><DEDENT><DEDENT>docsfiles = [os.path.join(install_dir, f) for f in docsfilenames]<EOL><DEDENT><DEDENT>elif docsfilenames:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.read_docs(docsfiles)<EOL>self.sections = _list(sections, [command_base])<EOL>if configfiles is None:<EOL><INDENT>dirname = command_base<EOL>if isinstance(configdirs, str):<EOL><INDENT>dirname = configdirs<EOL>configdirs = None<EOL><DEDENT>if configdirs is None:<EOL><INDENT>configdirs = []<EOL>if install_dir is not None:<EOL><INDENT>configdirs.append(install_dir)<EOL><DEDENT>configdirs += [os.path.join('<STR_LIT>', dirname), <EOL>os.path.expanduser(os.path.join('<STR_LIT>', '<STR_LIT:.>' + dirname))]<EOL><DEDENT>configfilenames = _list(configfilenames, [command_base + '<STR_LIT>'])<EOL>configfiles = [os.path.join(d, f) for d in configdirs for f in configfilenames]<EOL><DEDENT>elif configdirs or configfilenames:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif isinstance(configfiles, str):<EOL><INDENT>configfiles = [configfiles]<EOL><DEDENT>self.configfiles = configfiles<EOL>if self.configfiles:<EOL><INDENT>self.addconfigfiledocs()<EOL><DEDENT>if not width:<EOL><INDENT>width = get_terminal_size()[<NUM_LIT:0>]<EOL><DEDENT>self.width = width<EOL>if launch:<EOL><INDENT>self.launch(argv)<EOL><DEDENT>", "docstring": "Many of the metainfo parameters (author, progname...) should already\nbe present in the program docstring if you're coding by the book. You \ncan avoid repeating them by doing something like: \nui = tui(..., **get_metainfo(__file__)).\nThe parameters that will be available using the above incantation are\nmarked with asterixes* below.\n\n.readdocs() can read most user documentaion from external docsfiles, \nso there is typically no need to supply that on instantiation. See the\ndocsfile parameter. Parameters that can be set using .readdocs() are \nmarked with percent% below.\n\nAll parameters that take a list of indented paragraphs can use docvars.\nThey also accept just a string which, is then taken as a list of one \nparagraph. None means there is no such documentation.\n\nAll parameters that take an Option also accept factory functions for\nOption objects (like a class, or a function like help_option). None\nmeans don't add such an option. Note however that just adding the \nhelpoption, longhelpoption, versionoption and settingsoption at \ninstantiation will not activate their documented functionality. Use \ne.g. .start() for this.\n\nparameters is a list of Options and PositionalArguments to be added\nto the user interface.\n\nversion* is the current program version string. Although you *can* set\nthis using get_metainfo, you typically want to pass __version__ here\ninstead. Available as a docvar.\n\ninstall_dir (if given) will be searched for configfiles and docsfiles.\nUse containing dir if given a python package or a path to a file (e.g. \nmypackage or __file__).\n\nargv is the list of command line arguments to use (see launch just \nbelow). None means use a copy of sys.argv.\n\nIf launch is True (default) then .launch(argv) will be called after \ninitiallization.\n\n*progname is the user friendly name of the program. Available as a \ndocvar. \n\n*author is who made the program. Available as a docvar. \n\ntitle is what to put in the title line on help pages. Can use docvars.\n\n*%description is a oneliner describing the program. Can use docvars.\n\n*%contact is contact information as a list of indented paragraphs.\n\n*%website is the url to the website of the program. Can be given as a\nlist of indented paragraphs if needed.\n\n*%download is where the program can be downloaded. Can be given as a\nlist of indented paragraphs if needed.\n\n*%git is where the source can be accessed by git. Can be given as a\nlist of indented paragraphs if needed.\n\n*%subversion is where the source can be accessed by svn. Can be given \nas a list of indented paragraphs if needed.\n\n*%copyright is copyright information as a list of indented paragraphs. \n\n*%license is brief information on the licensing conditions of the \nprogram. Can be given as a list of indented paragraphs if needed.\n\n*command* is how to execute the program. Available as a docvar. None \nmeans use the basename of sys.argv[0].\n\nusage is user friendly help on how to execute the program. Can \nuse docvars. None means auto generate from options and positional \narguments.\n\n%general is general program documentation as a list of indented\nparagraphs. Typically displayed on the normal help page. \n\n%additional is additional program documentation as a list of indented\nparagraphs. Typically only displayed in verbose help (longhelp). \n\n%note is the same as additional, only with a different label.\n\n%filedocs is documentation on files used by the program, as a dict of\nfile names and indented paragraphs. Typically only displayed in \nverbose help (longhelp). Keys and values may use docvars. None means \nno such documentation.\n\ndocsfilenames is a list of basenames of docsfiles and is only useful \ntogether with install_dir. A str means a list of one item. None means \n[basename(install_dir) + 'docs' (if given), command + '.docs']. \nDo not use together with the docsfiles parameter.\n\ndocsfiles is a list of paths to potential DocParser docsfiles; parse if\npresent, in the given order. None means look for each docsfilename in\ninstall_dir (if given). A str means a list of one item. Note that no \ndocs will be read on instantation. Use .readdocs() (or .launch()) for \nthat.\n\nwidth is the maximum allowed width for help text. 0 means try to guess\nthe terminal width, and use 79 if that fails.\n\nconfigdirs is a list of paths to directories to search for configfiles.\nNone means [install_dir (if given), '/etc/' + command, '~/.' + command].\nA str means the same, but with this string replacing command. Do not \nuse together with the configfiles parameter.\n\nconfigfilenames is a list of basenames of configfiles. A str means a \nlist of one item. None means [command + '.conf']. Do not use together \nwith the configfiles parameter.\n\nconfigfiles is a list of config files to parse. None means try each \nconfigfilename in the given order, in each of the configdirs in the \ngiven order. configfiles are parsed incrementally by .parsefiles() (or\n.launch()). A str means a list of one item. [] means no configfiles.\n\nsections is a list of sections to read in config files. A str means a \nlist of one item. None means [command]. The DEFAULT section (note \nuppercase) is always read, and it is read as if its contents were \ncopied to the beginning of all other sections.\n\nignore is a list of option and positional argument names that should be\nignored if encountered in configfiles or docsfiles. Any other unknown \nnames will raise an error. This is primarily to help users catch \nspelling mistakes. This feature is useful for example if you are using\nthe same files for several programs in a suite, where some parameters \nare shared and you want to ignore the others. Set to None to ignore all\nall unknown (not recommended). \n\noptions can be used to supply a preconfigured option dictionary, if you\nfor some reason prefer this to .makeoption(). tui will not check this \nfor you. See also option_order and abbreviations.\n\noption_order determines the order in which the options will be shown in\nhelp texts.\n\nabbreviations is a abbreviation:option dict, similar to options.\n\npositional_args can be used to supply a list of preconfigured positional\narguments, if you for some reason prefer this to .makeposarg(). tui will \nnot check this for you. See also positional_arg_names.\n\npositional_arg_names are the display names for positional arguments in\nhelp texts.\n\nhelpoption adds an option that lets the user request help in the usual\nway. Default is '--help' or '-h', option reserved for command line use.\nNone means don't add such an option.\n\nlonghelpoption adds an option that lets the user request verbose help.\nDefault is '--HELP' or '-H', option reserved for command line use. None\nmeans don't add such an option. \n\nversionoption adds an option that lets the user print the version \nstring. Default is '--version' or '-V', option reserved for command\nline use). None means don't add such an option.\n\nsettingsoption adds an option that lets the user print a brief summary\nof program settings. Default is '--settings' or '-S', option reserved \nfor command line use). None means don't add such an option.", "id": "f5450:c13:m0"}
{"signature": "def _add_option(self, option):", "body": "if option.name in self.options:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if option.abbreviation in self.abbreviations:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if option.name in [arg.name for arg in self.positional_args]:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.options[option.name] = option<EOL>if option.abbreviation:<EOL><INDENT>self.abbreviations[option.abbreviation] = option<EOL><DEDENT>self.option_order.append(option.name)<EOL>", "docstring": "Add an Option object to the user interface.", "id": "f5450:c13:m10"}
{"signature": "def _wrapusage(self, usage=None, width=<NUM_LIT:0>):", "body": "if not width:<EOL><INDENT>width = self.width<EOL><DEDENT>return textwrap.fill('<STR_LIT>' + self.format_usage(usage), width=width, subsequent_indent='<STR_LIT>')<EOL>", "docstring": "Textwrap usage instructions.\n        ARGS:\n        width = 0 <int>:\n            Maximum allowed page width. 0 means use default from\n            self.iMaxHelpWidth.", "id": "f5450:c13:m24"}
{"signature": "def __setitem__(self, key, value):", "body": "self.getparam(key).value = value<EOL>", "docstring": "Shorthand for .getparam(key).value = value.\n\n        Useful when doing additional modifications after tui is done parsing.", "id": "f5450:c13:m4"}
{"signature": "def longhelp(self, width=<NUM_LIT:0>):", "body": "return self.customhelp(self.longhelp_sections, width)<EOL>", "docstring": "Return the standard formatted help text for the prog. \n\n        This should have approximately the amount of information as you'd \n        expect in a man page.\n\n        width is maximum allowed page width, use self.width if 0.", "id": "f5450:c13:m29"}
{"signature": "def __contains__(self, key):", "body": "return key in list(self.keys())<EOL>", "docstring": "Shorthand for key in .keys().", "id": "f5450:c13:m2"}
{"signature": "def _parse_positional_arguments(self, argv):", "body": "for posarg in self.positional_args:<EOL><INDENT>posarg.parse(argv)<EOL><DEDENT>if argv:<EOL><INDENT>if None in [p.nargs for p in self.positional_args]:<EOL><INDENT>msg = '<STR_LIT>'<EOL>plural_s = len(argv) > <NUM_LIT:1> and '<STR_LIT:s>' or '<STR_LIT>'<EOL>raise BadNumberOfArguments(message=msg % (len(argv), plural_s))<EOL><DEDENT>msg = '<STR_LIT>'<EOL>required = len([p.nargs for p in self.positional_args])<EOL>raise BadNumberOfArguments(message=msg % (required, required + len(argv)))<EOL><DEDENT>", "docstring": "Parse the positional arguments part of an argument list.\n        argv <list str>:\n            List of arguments. Will be altered.", "id": "f5450:c13:m16"}
{"signature": "def _parse_options(self, argv, location):", "body": "observed = []<EOL>while argv:<EOL><INDENT>if argv[<NUM_LIT:0>].startswith('<STR_LIT>'):<EOL><INDENT>name = argv.pop(<NUM_LIT:0>)[<NUM_LIT:2>:]<EOL>if not name:<EOL><INDENT>break<EOL><DEDENT>if name not in self.options:<EOL><INDENT>raise InvalidOption(name)<EOL><DEDENT>option = self.options[name]<EOL>if not option.recurring:<EOL><INDENT>if option in observed:<EOL><INDENT>raise OptionRecurrenceError(name)<EOL><DEDENT>observed.append(option)<EOL><DEDENT>option.parse(argv, name, location)<EOL><DEDENT>elif argv[<NUM_LIT:0>].startswith('<STR_LIT:->'):<EOL><INDENT>if argv[<NUM_LIT:0>] == '<STR_LIT:->':<EOL><INDENT>break<EOL><DEDENT>block = argv.pop(<NUM_LIT:0>)[<NUM_LIT:1>:]<EOL>for abbreviation in block[:-<NUM_LIT:1>]:<EOL><INDENT>if self.abbreviations[abbreviation].nargs != <NUM_LIT:0>:<EOL><INDENT>raise BadAbbreviationBlock(abbreviation, block, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>for abbreviation in block:<EOL><INDENT>option = self.abbreviations[abbreviation]<EOL>if not option.recurring:<EOL><INDENT>if option in observed:<EOL><INDENT>raise OptionRecurrenceError(option.name)<EOL><DEDENT>observed.append(option)<EOL><DEDENT>option.parse(argv, '<STR_LIT:->' + abbreviation, location)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Parse the options part of an argument list.\n        IN:\n        lsArgs <list str>:\n            List of arguments. Will be altered.\n        location <str>:\n            A user friendly string describing where this data came from.", "id": "f5450:c13:m15"}
{"signature": "def _wrap(self, text, indent=<NUM_LIT:0>, width=<NUM_LIT:0>):", "body": "text = _list(text)<EOL>if not width:<EOL><INDENT>width = self.width<EOL><DEDENT>paragraph = text[<NUM_LIT:0>].lstrip()<EOL>s = '<STR_LIT:U+0020>' * (len(text[<NUM_LIT:0>]) - len(paragraph) + indent)<EOL>wrapped = textwrap.wrap(paragraph.strip(), width, initial_indent=s, subsequent_indent=s)<EOL>return '<STR_LIT:\\n>'.join(wrapped)<EOL>", "docstring": "Textwrap an indented paragraph.\n        ARGS:\n        width = 0 <int>:\n            Maximum allowed page width. 0 means use default from\n            self.iMaxHelpWidth.", "id": "f5450:c13:m21"}
{"signature": "def parse(self, argv, usedname, location):", "body": "try:<EOL><INDENT>value = self.format.parse(argv)<EOL><DEDENT>except formats.BadNumberOfArguments as e:<EOL><INDENT>raise BadNumberOfArguments(usedname, e.required, e.supplied)<EOL><DEDENT>except formats.BadArgument as e:<EOL><INDENT>raise BadArgument(usedname, e.argument, e.message)<EOL><DEDENT>if self.recurring:<EOL><INDENT>self.value.append(value)<EOL><DEDENT>else:<EOL><INDENT>self.value = value<EOL><DEDENT>self.location = location<EOL>", "docstring": "Consume and process arguments and store the result.\n        ARGS:\n        argv <list str>:\n            The argument list to parse.\n        usedname <str>:\n            The string used by the user to invoke the option.\n        location <str>:\n            A user friendly sring describing where the parser got this\n            data from.", "id": "f5450:c11:m1"}
{"signature": "def keys(self):", "body": "return list(self.options.keys()) + [p.name for p in self.positional_args]<EOL>", "docstring": "List names of options and positional arguments.", "id": "f5450:c13:m5"}
{"signature": "def optionhelp(self, indent=<NUM_LIT:0>, maxindent=<NUM_LIT>, width=<NUM_LIT>):", "body": "def makelabels(option):<EOL><INDENT>labels = '<STR_LIT>' % (indent, '<STR_LIT:U+0020>', option.name)<EOL>if option.abbreviation:<EOL><INDENT>labels += '<STR_LIT>' + option.abbreviation<EOL><DEDENT>return labels + '<STR_LIT>'<EOL><DEDENT>docs = []<EOL>helpindent = _autoindent([makelabels(o) for o in list(self.options.values())], indent, maxindent)<EOL>for name in self.option_order:<EOL><INDENT>option = self.options[name]<EOL>labels = makelabels(option)<EOL>helpstring = \"<STR_LIT>\" % (option.formatname, option.strvalue, option.docs)<EOL>wrapped = self._wrap_labelled(labels, helpstring, helpindent, width)<EOL>docs.extend(wrapped)<EOL><DEDENT>return '<STR_LIT:\\n>'.join(docs)<EOL>", "docstring": "Return user friendly help on program options.", "id": "f5450:c13:m18"}
{"signature": "def parsestr(self, argsstr, usedname, location):", "body": "", "docstring": "Parse a string lexically and store the result.\n\n        Override in subclasses to do actual work. Subclasses may take other\n        params.", "id": "f5450:c10:m3"}
{"signature": "def parse(self, argv):", "body": "", "docstring": "Consume and process arguments and store the result.\n\n        Override in subclasses to do actual work. Subclasses may take other\n        params.", "id": "f5450:c10:m2"}
{"signature": "def text(self):", "body": "return self.lines<EOL>", "docstring": "Return the text in the block as a list of lines.", "id": "f5451:c1:m4"}
{"signature": "def addline(self, line):", "body": "self.lines.append(line)<EOL>", "docstring": "Add a line (no trailing newlines) to the text block.", "id": "f5451:c1:m3"}
{"signature": "def decomment(self, line):", "body": "", "docstring": "Remove the comment parts from a line of text.\n\n        Return None to indicate that the line is completely commented out and\n        that it should not be fed to TextBlock storage.", "id": "f5451:c4:m0"}
{"signature": "def text(self):", "body": "return self.lines<EOL>", "docstring": "Return the indented paragraphs as a list of strings.", "id": "f5451:c3:m2"}
{"signature": "def parsestr(self, argstr):", "body": "argv = shlex.split(argstr, comments=True)<EOL>if len(argv) != self.nargs:<EOL><INDENT>raise BadNumberOfArguments(self.nargs, len(argv))<EOL><DEDENT>return self.parse(argv)<EOL>", "docstring": "Parse arguments found in settings files.\n\n        argstr is the string that should be parsed. Use e.g. '\"\"' to pass an\n        empty string.\n\n        if self.nargs > 1 a list of parsed values will be returned.\n\n        NOTE: formats with nargs == 0 or None probably want to override this \n        method.", "id": "f5452:c3:m2"}
{"signature": "def present(self, value):", "body": "for k, v in list(self.special.items()):<EOL><INDENT>if v == value:<EOL><INDENT>return k<EOL><DEDENT><DEDENT>return self.to_literal(value, *self.args, **self.kw)<EOL>", "docstring": "Return a user-friendly representation of a value.\n\n        Lookup value in self.specials, or call .to_literal() if absent.", "id": "f5452:c5:m5"}
{"signature": "def to_literal(self, value, *args, **kw):", "body": "return str(value)<EOL>", "docstring": "Convert a value to a user-friendly representation.", "id": "f5452:c5:m2"}
{"signature": "def __init__(self,<EOL>lower=None,<EOL>upper=None,<EOL>inclusive=None,<EOL>lowerinclusive=True,<EOL>upperinclusive=True,<EOL>**kw):", "body": "super(Int, self).__init__(**kw)<EOL>if inclusive is not None:<EOL><INDENT>lowerinclusive = upperinclusive = inclusive<EOL><DEDENT>self.lower = lower<EOL>self.upper = upper<EOL>self.lowerinclusive = lowerinclusive<EOL>self.upperinclusive = upperinclusive<EOL>docs = [self.docs[:-<NUM_LIT:1>]]<EOL>if lower is not None:<EOL><INDENT>if lowerinclusive:<EOL><INDENT>docs.append('<STR_LIT>' + self.to_literal(lower))<EOL><DEDENT>else:<EOL><INDENT>docs.append('<STR_LIT>' + self.to_literal(lower))<EOL><DEDENT>if upper is not None:<EOL><INDENT>docs.append('<STR_LIT>')<EOL><DEDENT><DEDENT>if upper is not None:<EOL><INDENT>if upperinclusive:<EOL><INDENT>docs.append('<STR_LIT>' + self.to_literal(upper))<EOL><DEDENT>else:<EOL><INDENT>docs.append('<STR_LIT>' + self.to_literal(upper))<EOL><DEDENT><DEDENT>self._docs = '<STR_LIT:U+0020>'.join(docs) + '<STR_LIT:.>'<EOL>", "docstring": "lower and upper (if not None) give the lower and upper bounds of \npermissible values. \n\nlowerinclusive and upperinclusive state whether the bound values \nthemselves are within the permissible range.\n\ninclusive (if not None) overrides lowerinclusive and upperinclusive.", "id": "f5452:c8:m0"}
{"signature": "def parse(self, argv):", "body": "", "docstring": "Pop, parse and return the first self.nargs items from args.\n\n        Subclasses that desire other behavior can override this (must be \n        overridden if self.nargs is None). \n\n        if self.nargs > 1 a list of parsed values will be returned.\n\n        Raise BadNumberOfArguments or BadArgument on errors.\n\n        NOTE: args may be modified in place by this method.", "id": "f5452:c3:m1"}
{"signature": "def parse(self, argv):", "body": "if len(argv) < self.nargs:<EOL><INDENT>raise BadNumberOfArguments(self.nargs, len(argv))<EOL><DEDENT>if self.nargs == <NUM_LIT:1>:<EOL><INDENT>return self.parse_argument(argv.pop(<NUM_LIT:0>))<EOL><DEDENT>return [self.parse_argument(argv.pop(<NUM_LIT:0>)) for tmp in range(self.nargs)]<EOL>", "docstring": "Pop, parse and return the first self.nargs items from args.\n\n        if self.nargs > 1 a list of parsed values will be returned.\n\n        Raise BadNumberOfArguments or BadArgument on errors.\n\n        NOTE: argv may be modified in place by this method.", "id": "f5452:c5:m4"}
{"signature": "def __init__(self, flags=<NUM_LIT:0>, **kw):", "body": "if isinstance(flags, str):<EOL><INDENT>flags = flags.upper()<EOL>if flags.translate(None, '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>' + flags.translate(None, '<STR_LIT>'))<EOL><DEDENT>flags = reduce(lambda i, flag: i | getattr(re, flag), flags, <NUM_LIT:0>)<EOL><DEDENT>super(RegEx, self).__init__(**kw)<EOL>self.kw['<STR_LIT>'] = flags<EOL>", "docstring": "flags should be an integer and is passed to re.compile. It can \n        also be a string of one or more of the letters 'iLmsux' (the short \n        names of the re flags).", "id": "f5452:c16:m0"}
{"signature": "def parse(self, argv):", "body": "if not argv:<EOL><INDENT>raise BadNumberOfArguments(<NUM_LIT:1>, <NUM_LIT:0>)<EOL><DEDENT>argument = argv.pop(<NUM_LIT:0>)<EOL>lookup = self.casesensitive and argument or argument.lower()<EOL>if lookup in self.special:<EOL><INDENT>return self.special[lookup]<EOL><DEDENT>argv = [(self.strip and s.strip() or s) for s in argument.split(self.separator)]<EOL>values = []<EOL>while argv:<EOL><INDENT>values.append(self.format.parse(argv))<EOL><DEDENT>return values<EOL>", "docstring": "Pop, parse and return the first arg from argv.\n\n        The arg will be .split() based on self.separator and the (optionally \n        stripped) items will be parsed by self.format and returned as a list. \n\n        Raise BadNumberOfArguments or BadArgument on errors.\n\n        NOTE: args will be modified.", "id": "f5452:c17:m3"}
{"signature": "def get_format(format):", "body": "if isinstance(format, BaseFormat):<EOL><INDENT>return format<EOL><DEDENT>if isinstance(format, str):<EOL><INDENT>for name, formatclass in list(globals().items()):<EOL><INDENT>if name.lower() == format.lower():<EOL><INDENT>if not issubclass(formatclass, BaseFormat):<EOL><INDENT>raise ValueError('<STR_LIT>' % format)<EOL><DEDENT>return formatclass()<EOL><DEDENT><DEDENT><DEDENT>try:<EOL><INDENT>return format()<EOL><DEDENT>except:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Get a format object.\n\n    If format is a format object, return unchanged. If it is a string \n    matching one of the BaseFormat subclasses in the tui.formats module\n    (case insensitive), return an instance of that class. Otherwise assume \n    it'a factory function for Formats (such as a class) so call and return,\n    and raise ValueError on error.", "id": "f5452:m1"}
{"signature": "def parsestr(self, argstr):", "body": "argv = shlex.split(argstr, comments=True)<EOL>if len(argv) != <NUM_LIT:1>:<EOL><INDENT>raise BadNumberOfArguments(<NUM_LIT:1>, len(argv))<EOL><DEDENT>arg = argv[<NUM_LIT:0>]<EOL>lower = arg.lower()<EOL>if lower in self.true:<EOL><INDENT>return True<EOL><DEDENT>if lower in self.false:<EOL><INDENT>return False<EOL><DEDENT>raise BadArgument(arg, \"<STR_LIT>\" + self.allowed + '<STR_LIT:.>')<EOL>", "docstring": "Parse arguments found in settings files.\n\n        Use the values in self.true for True in settings files, or those in \n        self.false for False, case insensitive.", "id": "f5452:c6:m1"}
{"signature": "def main():", "body": "options = _parse_args()<EOL>archive = download_setuptools(<EOL>version=options.version,<EOL>download_base=options.download_base,<EOL>downloader_factory=options.downloader_factory,<EOL>)<EOL>return _install(archive, _build_install_args(options))<EOL>", "docstring": "Install or upgrade setuptools and EasyInstall", "id": "f5454:m19"}
{"signature": "def _parse_args():", "body": "parser = optparse.OptionParser()<EOL>parser.add_option(<EOL>'<STR_LIT>', dest='<STR_LIT>', action='<STR_LIT:store_true>', default=False,<EOL>help='<STR_LIT>')<EOL>parser.add_option(<EOL>'<STR_LIT>', dest='<STR_LIT>', metavar=\"<STR_LIT>\",<EOL>default=DEFAULT_URL,<EOL>help='<STR_LIT>')<EOL>parser.add_option(<EOL>'<STR_LIT>', dest='<STR_LIT>', action='<STR_LIT>',<EOL>const=lambda: download_file_insecure, default=get_best_downloader,<EOL>help='<STR_LIT>'<EOL>)<EOL>parser.add_option(<EOL>'<STR_LIT>', help=\"<STR_LIT>\",<EOL>default=DEFAULT_VERSION,<EOL>)<EOL>options, args = parser.parse_args()<EOL>return options<EOL>", "docstring": "Parse the command line for options", "id": "f5454:m18"}
{"signature": "def get_zip_class():", "body": "class ContextualZipFile(zipfile.ZipFile):<EOL><INDENT>def __enter__(self):<EOL><INDENT>return self<EOL><DEDENT>def __exit__(self, type, value, traceback):<EOL><INDENT>self.close<EOL><DEDENT><DEDENT>return zipfile.ZipFile if hasattr(zipfile.ZipFile, '<STR_LIT>') elseContextualZipFile<EOL>", "docstring": "Supplement ZipFile class to support context manager for Python 2.6", "id": "f5454:m3"}
{"signature": "def download_file_powershell(url, target):", "body": "target = os.path.abspath(target)<EOL>cmd = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>\"<STR_LIT>\" % vars(),<EOL>]<EOL>_clean_check(cmd, target)<EOL>", "docstring": "Download the file at url to target using Powershell (which will validate\ntrust). Raise an exception if the command cannot complete.", "id": "f5454:m8"}
{"signature": "def _build_install_args(options):", "body": "return ['<STR_LIT>'] if options.user_install else []<EOL>", "docstring": "Build the arguments to 'python setup.py install' on the setuptools package", "id": "f5454:m17"}
{"signature": "def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,<EOL>to_dir=os.curdir, delay=<NUM_LIT:15>, downloader_factory=get_best_downloader):", "body": "<EOL>to_dir = os.path.abspath(to_dir)<EOL>zip_name = \"<STR_LIT>\" % version<EOL>url = download_base + zip_name<EOL>saveto = os.path.join(to_dir, zip_name)<EOL>if not os.path.exists(saveto):  <EOL><INDENT>log.warn(\"<STR_LIT>\", url)<EOL>downloader = downloader_factory()<EOL>downloader(url, saveto)<EOL><DEDENT>return os.path.realpath(saveto)<EOL>", "docstring": "Download setuptools from a specified location and return its filename\n\n`version` should be a valid setuptools version number that is available\nas an egg for download under the `download_base` URL (which should end\nwith a '/'). `to_dir` is the directory where the egg will be downloaded.\n`delay` is the number of seconds to pause before an actual download\nattempt.\n\n``downloader_factory`` should be a function taking no arguments and\nreturning a function for downloading a URL to a target.", "id": "f5454:m16"}
{"signature": "def stop_adc(self):", "body": "<EOL>config = <NUM_LIT><EOL>self._device.writeList(ADS1x15_POINTER_CONFIG, [(config >> <NUM_LIT:8>) & <NUM_LIT>, config & <NUM_LIT>])<EOL>", "docstring": "Stop all continuous ADC conversions (either normal or difference mode).", "id": "f5460:c0:m12"}
{"signature": "def get_last_result(self):", "body": "<EOL>result = self._device.readList(ADS1x15_POINTER_CONVERSION, <NUM_LIT:2>)<EOL>return self._conversion_value(result[<NUM_LIT:1>], result[<NUM_LIT:0>])<EOL>", "docstring": "Read the last conversion result when in continuous conversion mode.\n        Will return a signed integer value.", "id": "f5460:c0:m13"}
{"signature": "def start_adc_difference(self, differential, gain=<NUM_LIT:1>, data_rate=None):", "body": "assert <NUM_LIT:0> <= differential <= <NUM_LIT:3>, '<STR_LIT>'<EOL>return self._read(differential, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS)<EOL>", "docstring": "Start continuous ADC conversions between two ADC channels. Differential\n        must be one of:\n          - 0 = Channel 0 minus channel 1\n          - 1 = Channel 0 minus channel 3\n          - 2 = Channel 1 minus channel 3\n          - 3 = Channel 2 minus channel 3\n        Will return an initial conversion result, then call the get_last_result()\n        function continuously to read the most recent conversion result.  Call\n        stop_adc() to stop conversions.", "id": "f5460:c0:m9"}
{"signature": "def start_adc_difference_comparator(self, differential, high_threshold, low_threshold,<EOL>gain=<NUM_LIT:1>, data_rate=None, active_low=True,<EOL>traditional=True, latching=False, num_readings=<NUM_LIT:1>):", "body": "assert <NUM_LIT:0> <= differential <= <NUM_LIT:3>, '<STR_LIT>'<EOL>return self._read_comparator(differential, gain, data_rate,<EOL>ADS1x15_CONFIG_MODE_CONTINUOUS,<EOL>high_threshold, low_threshold, active_low,<EOL>traditional, latching, num_readings)<EOL>", "docstring": "Start continuous ADC conversions between two channels with\n        the comparator enabled.  See start_adc_difference for valid differential\n        parameter values and their meaning.  When enabled the comparator to will\n        check if the ADC value is within the high_threshold & low_threshold value\n        (both should be signed 16-bit integers) and trigger the ALERT pin.  The\n        behavior can be controlled by the following parameters:\n          - active_low: Boolean that indicates if ALERT is pulled low or high\n                        when active/triggered.  Default is true, active low.\n          - traditional: Boolean that indicates if the comparator is in traditional\n                         mode where it fires when the value is within the threshold,\n                         or in window mode where it fires when the value is _outside_\n                         the threshold range.  Default is true, traditional mode.\n          - latching: Boolean that indicates if the alert should be held until\n                      get_last_result() is called to read the value and clear\n                      the alert.  Default is false, non-latching.\n          - num_readings: The number of readings that match the comparator before\n                          triggering the alert.  Can be 1, 2, or 4.  Default is 1.\n        Will return an initial conversion result, then call the get_last_result()\n        function continuously to read the most recent conversion result.  Call\n        stop_adc() to stop conversions.", "id": "f5460:c0:m11"}
{"signature": "def _conversion_value(self, low, high):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Subclasses should override this function that takes the low and high\n        byte of a conversion result and returns a signed integer value.", "id": "f5460:c0:m3"}
{"signature": "def _data_rate_default(self):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Retrieve the default data rate for this ADC (in samples per second).\n        Should be implemented by subclasses.", "id": "f5460:c0:m1"}
{"signature": "def get_urls(self):", "body": "urls = patterns(<EOL>'<STR_LIT>',<EOL>url(r'<STR_LIT>', self.admin_site.admin_view(self.handle_upload), name='<STR_LIT>'),<EOL>)<EOL>return urls + super(QuillAdmin, self).get_urls()<EOL>", "docstring": "Add URLs needed to handle image uploads.", "id": "f5472:c0:m0"}
{"signature": "@register.filter()<EOL>def quill_conf_json(name):", "body": "return json.dumps(getattr(quill_app, name))<EOL>", "docstring": "Get a value from the configuration app as JSON.", "id": "f5473:m1"}
{"signature": "@register.simple_tag(takes_context=True)<EOL>def render_editor(context, config):", "body": "quill_config = getattr(quill_app, config)<EOL>t = template.loader.get_template(quill_config['<STR_LIT>'])<EOL>return t.render(context)<EOL>", "docstring": "Render the editor for the given config.", "id": "f5473:m3"}
{"signature": "def formfield(self, **kwargs):", "body": "defaults = {<EOL>'<STR_LIT>': RichTextFormField,<EOL>'<STR_LIT>': self.config,<EOL>}<EOL>defaults.update(kwargs)<EOL>return super(RichTextField, self).formfield(**defaults)<EOL>", "docstring": "Get the form for field.", "id": "f5475:c0:m1"}
{"signature": "def __init__(self, config='<STR_LIT:default>', *args, **kwargs):", "body": "self.config = config<EOL>super(RichTextField, self).__init__(*args, **kwargs)<EOL>", "docstring": "Create a new WYSIWYG field.\n\n        :param str config: The QuillJS config to use (from :py:class:`quill.apps.QuillConfig`)", "id": "f5475:c0:m0"}
{"signature": "def render(self, name, value, attrs={}):", "body": "if value is None:<EOL><INDENT>value = '<STR_LIT>'<EOL><DEDENT>final_attrs = self.build_attrs(attrs, name=name)<EOL>quill_app = apps.get_app_config('<STR_LIT>')<EOL>quill_config = getattr(quill_app, self.config)<EOL>return mark_safe(render_to_string(quill_config['<STR_LIT>'], {<EOL>'<STR_LIT>': flatatt(final_attrs),<EOL>'<STR_LIT:value>': value,<EOL>'<STR_LIT:id>': final_attrs['<STR_LIT:id>'],<EOL>'<STR_LIT>': self.config,<EOL>}))<EOL>", "docstring": "Render the Quill WYSIWYG.", "id": "f5478:c0:m1"}
{"signature": "def get_ve_dir(self):", "body": "return self._ve_dir<EOL>", "docstring": "Returns the path to the virtualenv", "id": "f5485:c0:m4"}
{"signature": "def get_project_name(self):", "body": "return self._project_name<EOL>", "docstring": "Returns the name of the project", "id": "f5485:c0:m1"}
{"signature": "def create_manage_scripts(self):", "body": "<EOL>start = '<STR_LIT>'.format(self._project_name)<EOL>start += '<STR_LIT>'<EOL>start += '<STR_LIT>'.format(os.path.join(self._conf_dir, self._project_name))<EOL>start += '<STR_LIT>'<EOL>start += '<STR_LIT>'<EOL>start += '<STR_LIT>'.format(os.path.join(self._conf_dir, self._project_name))<EOL>start += '<STR_LIT>'<EOL>start += '<STR_LIT>'.format(self._project_name)<EOL>stop = '<STR_LIT>'.format(self._project_name)<EOL>stop += '<STR_LIT>'.format(os.path.join(self._var_dir, self._project_name), os.path.join(self._conf_dir, self._project_name))<EOL>stop += '<STR_LIT>'.format(os.path.join(self._var_dir, self._project_name))<EOL>stop += '<STR_LIT>'.format(self._project_name)<EOL>start_file = '<STR_LIT>'.format(os.path.join(self._script_dir, self._project_name))<EOL>stop_file = '<STR_LIT>'.format(os.path.join(self._script_dir, self._project_name))<EOL>f = open(start_file, '<STR_LIT:w>')<EOL>f.write(start)<EOL>f.close()<EOL>f = open(stop_file, '<STR_LIT:w>')<EOL>f.write(stop)<EOL>f.close()<EOL>os.chmod(start_file, <NUM_LIT>)<EOL>os.chmod(stop_file, <NUM_LIT>)<EOL>", "docstring": "Creates scripts to start and stop the application", "id": "f5485:c0:m11"}
{"signature": "def get_nginx_config(self):", "body": "if os.path.exists(self._nginx_config):<EOL><INDENT>return open(self._nginx_config, '<STR_LIT:r>').read()<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Gets the Nginx config for the project", "id": "f5485:c0:m2"}
{"signature": "def create_nginx_config(self):", "body": "cfg = '<STR_LIT>'.format(self._project_name)<EOL>if not self._shared_hosting:<EOL><INDENT>if self._user:<EOL><INDENT>cfg += '<STR_LIT>'.format(self._user)<EOL><DEDENT>cfg += '<STR_LIT>'.format(os.path.join(self._log_dir,self._project_name), os.path.join(self._var_dir, self._project_name))<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>if self._include_mimetypes:<EOL><INDENT>cfg += '<STR_LIT>'<EOL><DEDENT>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'.format(os.path.join(self._log_dir, self._project_name))<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL><DEDENT>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'.format(self._port)<EOL>if self._server_name:<EOL><INDENT>cfg += '<STR_LIT>'.format(self._server_name)<EOL><DEDENT>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'.format(os.path.join(self._var_dir, self._project_name))<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>cfg += '<STR_LIT>'<EOL>if not self._shared_hosting:<EOL><INDENT>cfg += '<STR_LIT>'<EOL><DEDENT>f = open(self._nginx_config, '<STR_LIT:w>')<EOL>f.write(cfg)<EOL>f.close()<EOL>", "docstring": "Creates the Nginx configuration for the project", "id": "f5485:c0:m10"}
{"signature": "def create(self):", "body": "<EOL>self.create_virtualenv()<EOL>self.create_project()<EOL>self.create_uwsgi_script()<EOL>self.create_nginx_config()<EOL>self.create_manage_scripts()<EOL>logging.info('<STR_LIT>')<EOL>", "docstring": "Creates the full project", "id": "f5485:c0:m12"}
{"signature": "def __init__(self, project_name=None, root_dir=os.getcwd(), modules=[], **kwargs):", "body": "ProjectCreator.__init__(self, project_name, root_dir, modules, **kwargs)<EOL>self.log = logging.getLogger('<STR_LIT>')<EOL>flask_found = False<EOL>for m in self._modules:<EOL><INDENT>if m.find('<STR_LIT>') > -<NUM_LIT:1>:<EOL><INDENT>flask_found = True<EOL><DEDENT><DEDENT>if not flask_found:<EOL><INDENT>self._modules.append('<STR_LIT>')<EOL><DEDENT>", "docstring": "Handles creating Flask projects\n\n:keyword project_name: Name of project to create or edit\n:keyword root_dir: Base directory where projects are stored\n:keyword modules: List of Python modules to install into virtualenv (uses PIP)", "id": "f5486:c0:m0"}
{"signature": "def create_project(self):", "body": "if os.path.exists(self._py):<EOL><INDENT>prj_dir = os.path.join(self._app_dir, self._project_name)<EOL>if os.path.exists(prj_dir):<EOL><INDENT>if self._force:<EOL><INDENT>logging.warn('<STR_LIT>')<EOL>shutil.rmtree(prj_dir)<EOL><DEDENT>else:<EOL><INDENT>logging.warn('<STR_LIT>')<EOL>return<EOL><DEDENT><DEDENT>logging.info('<STR_LIT>')<EOL>os.makedirs(prj_dir)<EOL>app = \"\"\"<STR_LIT>\"\"\"\"\"\"<STR_LIT>\"\"\"\"\"\"<STR_LIT>\"\"\"\"\"\"<STR_LIT>\"\"\"\"\"\"<STR_LIT>\"\"\"\"\"\"<STR_LIT>\"\"\"\"\"\"<STR_LIT>\"\"\"\"\"\"<STR_LIT>\"\"\"<EOL>with open(os.path.join(prj_dir, '<STR_LIT>'), '<STR_LIT:w>') as f:<EOL><INDENT>f.write(app)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logging.error('<STR_LIT>')<EOL>return<EOL><DEDENT>", "docstring": "Creates a base Flask project", "id": "f5486:c0:m1"}
{"signature": "def run(self, params, exercise, silent=False, env=os.environ):", "body": "out, err, code = \"<STR_LIT>\", \"<STR_LIT>\", -<NUM_LIT:1><EOL>@Spinner.decorate(\"<STR_LIT>\" if not silent else \"<STR_LIT>\",<EOL>waitmsg=\"<STR_LIT>\" + self.name)<EOL>def inner():<EOL><INDENT>ret = Popen(params, stdout=PIPE, stderr=PIPE, cwd=exercise.path(),<EOL>env=env)<EOL>out, err = ret.communicate()<EOL>return (out.decode(\"<STR_LIT:utf-8>\", \"<STR_LIT>\"),<EOL>err.decode(\"<STR_LIT:utf-8>\", \"<STR_LIT>\"),<EOL>ret.returncode)<EOL><DEDENT>try:<EOL><INDENT>out, err, code = inner()<EOL><DEDENT>except OSError as e:<EOL><INDENT>if e.errno in [os.errno.ENOENT, os.errno.EACCES]:<EOL><INDENT>raise MissingProgram(params[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT>return code, out, err<EOL>", "docstring": "Run a program with Popen and handle common errors\n:param params: Parameter list to Popen\n:param exercise:\n:return: returncode, stdout, stderr", "id": "f5494:c1:m1"}
{"signature": "def selected_exercise(func):", "body": "@wraps(func)<EOL>def inner(*args, **kwargs):<EOL><INDENT>exercise = Exercise.get_selected()<EOL>return func(exercise, *args, **kwargs)<EOL><DEDENT>return inner<EOL>", "docstring": "Passes the selected exercise as the first argument to func.", "id": "f5498:m1"}
{"signature": "@aliases(\"<STR_LIT>\")<EOL>@selected_course<EOL>@false_exit<EOL>def skip(course, num=<NUM_LIT:1>):", "body": "sel = None<EOL>try:<EOL><INDENT>sel = Exercise.get_selected()<EOL>if sel.course.tid != course.tid:<EOL><INDENT>sel = None<EOL><DEDENT><DEDENT>except NoExerciseSelected:<EOL><INDENT>pass<EOL><DEDENT>if sel is None:<EOL><INDENT>sel = course.exercises.first()<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>sel = Exercise.get(Exercise.id == sel.id + num)<EOL><DEDENT>except peewee.DoesNotExist:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT><DEDENT>sel.set_select()<EOL>list_all(single=sel)<EOL>", "docstring": "Go to the next exercise.", "id": "f5498:m7"}
{"signature": "@aliases(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>@selected_course<EOL>def list_all(course, single=None):", "body": "def bs(val):<EOL><INDENT>return \"<STR_LIT>\" if val else \"<STR_LIT:U+0020>\"<EOL><DEDENT>def bc(val):<EOL><INDENT>return as_success(\"<STR_LIT>\") if val else as_error(\"<STR_LIT>\")<EOL><DEDENT>def format_line(exercise):<EOL><INDENT>return \"<STR_LIT>\".format(exercise.tid,<EOL>bs(exercise.is_selected),<EOL>bc(exercise.is_downloaded),<EOL>bc(exercise.is_completed),<EOL>exercise.menuname())<EOL><DEDENT>print(\"<STR_LIT>\".format(<EOL>(len(str(course.exercises[<NUM_LIT:0>].tid)) - <NUM_LIT:1>) * \"<STR_LIT:U+0020>\"<EOL>))<EOL>if single:<EOL><INDENT>print(format_line(single))<EOL>return<EOL><DEDENT>for exercise in course.exercises:<EOL><INDENT>print(format_line(exercise))<EOL><DEDENT>", "docstring": "Lists all of the exercises in the current course.", "id": "f5498:m15"}
{"signature": "@aliases(\"<STR_LIT>\")<EOL>@arg(\"<STR_LIT>\", \"<STR_LIT>\", dest=\"<STR_LIT>\", help=\"<STR_LIT>\")<EOL>@arg(\"<STR_LIT>\", \"<STR_LIT>\", default=False, action=\"<STR_LIT:store_true>\",<EOL>help=\"<STR_LIT>\")<EOL>def paste(tid=None, review=False):", "body": "submit(pastebin=True, tid=tid, review=False)<EOL>", "docstring": "Sends the selected exercise to the TMC pastebin.", "id": "f5498:m13"}
{"signature": "@aliases(\"<STR_LIT>\")<EOL>def reset():", "body": "print(\"<STR_LIT>\",<EOL>\"<STR_LIT>\")<EOL>if yn_prompt(\"<STR_LIT>\", False):<EOL><INDENT>reset_db()<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Resets the local database.", "id": "f5498:m9"}
{"signature": "@aliases(\"<STR_LIT>\")<EOL>@arg(\"<STR_LIT>\", \"<STR_LIT>\", dest=\"<STR_LIT>\", help=\"<STR_LIT>\")<EOL>@arg(\"<STR_LIT>\", \"<STR_LIT>\", default=False, action=\"<STR_LIT:store_true>\",<EOL>help=\"<STR_LIT>\")<EOL>@arg(\"<STR_LIT>\", \"<STR_LIT>\", default=False, action=\"<STR_LIT:store_true>\",<EOL>help=\"<STR_LIT>\")<EOL>@selected_course<EOL>@false_exit<EOL>def submit(course, tid=None, pastebin=False, review=False):", "body": "if tid is not None:<EOL><INDENT>return submit_exercise(Exercise.byid(tid),<EOL>pastebin=pastebin,<EOL>request_review=review)<EOL><DEDENT>else:<EOL><INDENT>sel = Exercise.get_selected()<EOL>if not sel:<EOL><INDENT>raise NoExerciseSelected()<EOL><DEDENT>return submit_exercise(sel, pastebin=pastebin, request_review=review)<EOL><DEDENT>", "docstring": "Submit the selected exercise to the server.", "id": "f5498:m12"}
{"signature": "@aliases(\"<STR_LIT>\")<EOL>@arg(\"<STR_LIT:-c>\", \"<STR_LIT>\", action=\"<STR_LIT:store_true>\", help=\"<STR_LIT>\")<EOL>@arg(\"<STR_LIT>\", \"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\")<EOL>def select(course=False, tid=None, auto=False):", "body": "if course:<EOL><INDENT>update(course=True)<EOL>course = None<EOL>try:<EOL><INDENT>course = Course.get_selected()<EOL><DEDENT>except NoCourseSelected:<EOL><INDENT>pass<EOL><DEDENT>ret = {}<EOL>if not tid:<EOL><INDENT>ret = Menu.launch(\"<STR_LIT>\",<EOL>Course.select().execute(),<EOL>course)<EOL><DEDENT>else:<EOL><INDENT>ret[\"<STR_LIT>\"] = Course.get(Course.tid == tid)<EOL><DEDENT>if \"<STR_LIT>\" in ret:<EOL><INDENT>ret[\"<STR_LIT>\"].set_select()<EOL>update()<EOL>if ret[\"<STR_LIT>\"].path == \"<STR_LIT>\":<EOL><INDENT>select_a_path(auto=auto)<EOL><DEDENT>skip()<EOL>return<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return<EOL><DEDENT><DEDENT>else:<EOL><INDENT>selected = None<EOL>try:<EOL><INDENT>selected = Exercise.get_selected()<EOL><DEDENT>except NoExerciseSelected:<EOL><INDENT>pass<EOL><DEDENT>ret = {}<EOL>if not tid:<EOL><INDENT>ret = Menu.launch(\"<STR_LIT>\",<EOL>Course.get_selected().exercises,<EOL>selected)<EOL><DEDENT>else:<EOL><INDENT>ret[\"<STR_LIT>\"] = Exercise.byid(tid)<EOL><DEDENT>if \"<STR_LIT>\" in ret:<EOL><INDENT>ret[\"<STR_LIT>\"].set_select()<EOL>print(\"<STR_LIT>\".format(ret[\"<STR_LIT>\"]))<EOL><DEDENT><DEDENT>", "docstring": "Select a course or an exercise.", "id": "f5498:m11"}
{"signature": "def custom_prompt(msg, options, default):", "body": "formatted_options = [<EOL>x.upper() if x == default else x.lower() for x in options<EOL>]<EOL>sure = input(\"<STR_LIT>\".format(msg, \"<STR_LIT:/>\".join(formatted_options)))<EOL>if len(sure) == <NUM_LIT:0>:<EOL><INDENT>return default<EOL><DEDENT>for option in options:<EOL><INDENT>if sure.upper() == option.upper():<EOL><INDENT>return option<EOL><DEDENT><DEDENT>return default<EOL>", "docstring": "Prompts the user with custom options.", "id": "f5500:m1"}
{"signature": "@staticmethod<EOL><INDENT>def launch(title, items, selected=None):<DEDENT>", "body": "resp = {\"<STR_LIT:code>\": -<NUM_LIT:1>, \"<STR_LIT>\": False}<EOL>curses.wrapper(Menu, title, items, selected, resp)<EOL>return resp<EOL>", "docstring": "Launches a new menu. Wraps curses nicely so exceptions won't screw with\nthe terminal too much.", "id": "f5501:c0:m5"}
{"signature": "def _to_json(self, resp):", "body": "try:<EOL><INDENT>json = resp.json()<EOL><DEDENT>except ValueError as e:<EOL><INDENT>reason = \"<STR_LIT>\"<EOL>raise APIError(reason.format(repr(e)))<EOL><DEDENT>return json<EOL>", "docstring": "Extract json from a response.\nAssumes response is valid otherwise.\nInternal use only.", "id": "f5505:c0:m11"}
{"signature": "def prepend_line(filepath, line):", "body": "with open(filepath) as f:<EOL><INDENT>lines = f.readlines()<EOL><DEDENT>lines.insert(<NUM_LIT:0>, line)<EOL>with open(filepath, '<STR_LIT:w>') as f:<EOL><INDENT>f.writelines(lines)<EOL><DEDENT>", "docstring": "Rewrite a file adding a line to its beginning.", "id": "f5510:m3"}
{"signature": "def copy_attributes(source, destination, ignore_patterns=[]):", "body": "for attr in _wildcard_filter(dir(source), *ignore_patterns):<EOL><INDENT>setattr(destination, attr, getattr(source, attr))<EOL><DEDENT>", "docstring": "Copy the attributes from a source object to a destination object.", "id": "f5512:m1"}
{"signature": "def if_(*args):", "body": "for i in range(<NUM_LIT:0>, len(args) - <NUM_LIT:1>, <NUM_LIT:2>):<EOL><INDENT>if args[i]:<EOL><INDENT>return args[i + <NUM_LIT:1>]<EOL><DEDENT><DEDENT>if len(args) % <NUM_LIT:2>:<EOL><INDENT>return args[-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Implements the 'if' operator with support for multiple elseif-s.", "id": "f5516:m0"}
{"signature": "def plus(*args):", "body": "return sum(to_numeric(arg) for arg in args)<EOL>", "docstring": "Sum converts either to ints or to floats.", "id": "f5516:m6"}
{"signature": "def to_numeric(arg):", "body": "if isinstance(arg, str):<EOL><INDENT>if '<STR_LIT:.>' in arg:<EOL><INDENT>return float(arg)<EOL><DEDENT>else:<EOL><INDENT>return int(arg)<EOL><DEDENT><DEDENT>return arg<EOL>", "docstring": "Converts a string either to int or to float.\nThis is important, because e.g. {\"!==\": [{\"+\": \"0\"}, 0.0]}", "id": "f5516:m5"}
{"signature": "@error_handler<EOL>def main(*args):", "body": "<EOL>with disable_error_handler():<EOL><INDENT>args = parse_args(args or sys.argv[<NUM_LIT:1>:])<EOL><DEDENT>config = read_config(args.config, args)<EOL>if config is None:<EOL><INDENT>return True<EOL><DEDENT>bootstrap = config[__script__]<EOL>if not check_pre_requirements(bootstrap['<STR_LIT>']):<EOL><INDENT>return True<EOL><DEDENT>env_args = prepare_args(config['<STR_LIT>'], bootstrap)<EOL>if not create_env(<EOL>bootstrap['<STR_LIT>'],<EOL>env_args,<EOL>bootstrap['<STR_LIT>'],<EOL>bootstrap['<STR_LIT>'],<EOL>bootstrap['<STR_LIT>']<EOL>):<EOL><INDENT>return True<EOL><DEDENT>pip_args = prepare_args(config['<STR_LIT>'], bootstrap)<EOL>if not install(<EOL>bootstrap['<STR_LIT>'],<EOL>bootstrap['<STR_LIT>'],<EOL>pip_args,<EOL>bootstrap['<STR_LIT>'],<EOL>bootstrap['<STR_LIT>'],<EOL>bootstrap['<STR_LIT>']<EOL>):<EOL><INDENT>return True<EOL><DEDENT>run_hook(bootstrap['<STR_LIT>'], bootstrap, bootstrap['<STR_LIT>'])<EOL>if not bootstrap['<STR_LIT>']:<EOL><INDENT>print_message('<STR_LIT>')<EOL><DEDENT>return False<EOL>", "docstring": "r\"\"\"Bootstrap Python projects and libraries with virtualenv and pip.\n\n    Also check system requirements before bootstrap and run post bootstrap\n    hook if any.\n\n    :param \\*args: Command line arguments list.", "id": "f5521:m9"}
{"signature": "def config_to_args(config):", "body": "result = []<EOL>for key, value in iteritems(config):<EOL><INDENT>if value is False:<EOL><INDENT>continue<EOL><DEDENT>key = '<STR_LIT>'.format(key.replace('<STR_LIT:_>', '<STR_LIT:->'))<EOL>if isinstance(value, (list, set, tuple)):<EOL><INDENT>for item in value:<EOL><INDENT>result.extend((key, smart_str(item)))<EOL><DEDENT><DEDENT>elif value is not True:<EOL><INDENT>result.extend((key, smart_str(value)))<EOL><DEDENT>else:<EOL><INDENT>result.append(key)<EOL><DEDENT><DEDENT>return tuple(result)<EOL>", "docstring": "Convert config dict to arguments list.\n\n    :param config: Configuration dict.", "id": "f5521:m1"}
{"signature": "def run_hook(hook, config, quiet=False):", "body": "if not hook:<EOL><INDENT>return True<EOL><DEDENT>if not quiet:<EOL><INDENT>print_message('<STR_LIT>')<EOL><DEDENT>result = not run_cmd(prepare_args(hook, config),<EOL>echo=not quiet,<EOL>fail_silently=True,<EOL>shell=True)<EOL>if not quiet:<EOL><INDENT>print_message()<EOL><DEDENT>return result<EOL>", "docstring": "Run post-bootstrap hook if any.\n\n    :param hook: Hook to run.\n    :param config: Configuration dict.\n    :param quiet: Do not output messages to STDOUT/STDERR. By default: False", "id": "f5521:m17"}
{"signature": "def pip_cmd(env, cmd, ignore_activated=False, **kwargs):", "body": "cmd = tuple(cmd)<EOL>dirname = safe_path(env)<EOL>if not ignore_activated:<EOL><INDENT>activated_env = os.environ.get('<STR_LIT>')<EOL>if hasattr(sys, '<STR_LIT>'):<EOL><INDENT>dirname = sys.prefix<EOL><DEDENT>elif activated_env:<EOL><INDENT>dirname = activated_env<EOL><DEDENT><DEDENT>pip_path = os.path.join(dirname, '<STR_LIT>' if IS_WINDOWS else '<STR_LIT>', '<STR_LIT>')<EOL>if kwargs.pop('<STR_LIT>', False):<EOL><INDENT>return pip_path<EOL><DEDENT>if not os.path.isfile(pip_path):<EOL><INDENT>raise OSError('<STR_LIT>'.format(pip_path))<EOL><DEDENT>if BOOTSTRAPPER_TEST_KEY in os.environ and cmd[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>cmd = list(cmd)<EOL>cmd.insert(<NUM_LIT:1>, '<STR_LIT>')<EOL>cmd = tuple(cmd)<EOL><DEDENT>with disable_error_handler():<EOL><INDENT>return run_cmd((pip_path, ) + cmd, **kwargs)<EOL><DEDENT>", "docstring": "r\"\"\"Run pip command in given or activated virtual environment.\n\n    :param env: Virtual environment name.\n    :param cmd: Pip subcommand to run.\n    :param ignore_activated:\n        Ignore activated virtual environment and use given venv instead. By\n        default: False\n    :param \\*\\*kwargs:\n        Additional keyword arguments to be passed to :func:`~run_cmd`", "id": "f5521:m11"}
{"signature": "def parse_args(args):", "body": "from argparse import ArgumentParser<EOL>description = ('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser = ArgumentParser(description=description)<EOL>parser.add_argument('<STR_LIT>', action='<STR_LIT:version>', version=__version__)<EOL>parser.add_argument(<EOL>'<STR_LIT:-c>', '<STR_LIT>', default=DEFAULT_CONFIG,<EOL>help='<STR_LIT>'.format(DEFAULT_CONFIG)<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>', default=[], nargs='<STR_LIT:+>',<EOL>help='<STR_LIT>'<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>help='<STR_LIT>'.<EOL>format(CONFIG[__script__]['<STR_LIT>'])<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>help='<STR_LIT>'.<EOL>format(CONFIG[__script__]['<STR_LIT>'])<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>', default=None,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>', help='<STR_LIT>'<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>', action='<STR_LIT:store_true>', default=None,<EOL>help='<STR_LIT>'<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>', action='<STR_LIT:store_true>', default=None,<EOL>help='<STR_LIT>'<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>', default=None,<EOL>help='<STR_LIT>'<EOL>)<EOL>return parser.parse_args(args)<EOL>", "docstring": "Parse args from command line by creating argument parser instance and\nprocess it.\n\n:param args: Command line arguments list.", "id": "f5521:m10"}
{"signature": "def get_temp_streams():", "body": "kwargs = {'<STR_LIT>': '<STR_LIT:utf-8>'} if IS_PY3 else {}<EOL>return (tempfile.TemporaryFile('<STR_LIT>', **kwargs),<EOL>tempfile.TemporaryFile('<STR_LIT>', **kwargs))<EOL>", "docstring": "Return two temporary file handlers for STDOUT and STDERR.", "id": "f5521:m5"}
{"signature": "def read_config(filename, args):", "body": "<EOL>config = defaultdict(dict)<EOL>splitter = operator.methodcaller('<STR_LIT>', '<STR_LIT:U+0020>')<EOL>converters = {<EOL>__script__: {<EOL>'<STR_LIT>': safe_path,<EOL>'<STR_LIT>': splitter,<EOL>},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': splitter,<EOL>'<STR_LIT>': splitter,<EOL>}<EOL>}<EOL>default = copy.deepcopy(CONFIG)<EOL>sections = set(iterkeys(default))<EOL>if int(getattr(pip, '<STR_LIT>', '<STR_LIT>').split('<STR_LIT:.>')[<NUM_LIT:0>]) < <NUM_LIT:6>:<EOL><INDENT>default['<STR_LIT>']['<STR_LIT>'] = safe_path(os.path.expanduser(<EOL>os.path.join('<STR_LIT>', '<STR_LIT>'.format(__script__), '<STR_LIT>')<EOL>))<EOL><DEDENT>is_default = filename == DEFAULT_CONFIG<EOL>filename = os.path.expandvars(os.path.expanduser(filename))<EOL>if not is_default and not os.path.isfile(filename):<EOL><INDENT>print_error('<STR_LIT>'.format(filename))<EOL>return None<EOL><DEDENT>parser = ConfigParser()<EOL>try:<EOL><INDENT>parser.read(filename)<EOL><DEDENT>except ConfigParserError:<EOL><INDENT>print_error('<STR_LIT>'.format(filename))<EOL>return None<EOL><DEDENT>for section in sections:<EOL><INDENT>if not parser.has_section(section):<EOL><INDENT>continue<EOL><DEDENT>items = parser.items(section)<EOL>for key, value in items:<EOL><INDENT>try:<EOL><INDENT>value = int(value)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>try:<EOL><INDENT>value = bool(strtobool(value))<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if section in converters and key in converters[section]:<EOL><INDENT>value = converters[section][key](value)<EOL><DEDENT>config[section][key] = value<EOL><DEDENT><DEDENT>for section, data in iteritems(default):<EOL><INDENT>if section not in config:<EOL><INDENT>config[section] = data<EOL><DEDENT>else:<EOL><INDENT>for key, value in iteritems(data):<EOL><INDENT>config[section].setdefault(key, value)<EOL><DEDENT><DEDENT><DEDENT>keys = set((<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'<EOL>))<EOL>for key in keys:<EOL><INDENT>value = getattr(args, key)<EOL>config[__script__].setdefault(key, value)<EOL>if key == '<STR_LIT>' and not value:<EOL><INDENT>continue<EOL><DEDENT>if value is not None:<EOL><INDENT>config[__script__][key] = value<EOL><DEDENT><DEDENT>return config<EOL>", "docstring": "Read and parse configuration file. By default, ``filename`` is relative\npath to current work directory.\n\nIf no config file found, default ``CONFIG`` would be used.\n\n:param filename: Read config from given filename.\n:param args: Parsed command line arguments.", "id": "f5521:m15"}
{"signature": "def iteritems(data, **kwargs):", "body": "return iter(data.items(**kwargs)) if IS_PY3 else data.iteritems(**kwargs)<EOL>", "docstring": "Iterate over dict items.", "id": "f5521:m7"}
{"signature": "def deref(self, data):", "body": "<EOL>deref = copy.deepcopy(jsonref.JsonRef.replace_refs(data))<EOL>self.write_template(deref, filename='<STR_LIT>')<EOL>return deref<EOL>", "docstring": "AWS doesn't quite have Swagger 2.0 validation right and will fail\n        on some refs. So, we need to convert to deref before\n        upload.", "id": "f5525:c0:m17"}
{"signature": "def deny_method(self, verb, resource):", "body": "self._add_method('<STR_LIT>', verb, resource, [])<EOL>", "docstring": "Adds an API Gateway method (Http verb + Resource path)\nto the list of denied methods for the policy", "id": "f5530:c1:m7"}
{"signature": "def allow_method_with_conditions(self, verb, resource, conditions):", "body": "self._add_method('<STR_LIT>', verb, resource, conditions)<EOL>", "docstring": "Adds an API Gateway method (Http verb + Resource path) to the\nlist of allowed methods and includes a condition for the policy\nstatement. More on AWS policy conditions here:\nhttp://docs.aws.amazon.com/IAM/latest/UserGuide/\nreference_policies_elements.html#Condition", "id": "f5530:c1:m8"}
{"signature": "def deny_method_with_conditions(self, verb, resource, conditions):", "body": "self._add_method('<STR_LIT>', verb, resource, conditions)<EOL>", "docstring": "Adds an API Gateway method (Http verb + Resource path) to the\nlist of denied methods and includes a condition for the policy\nstatement. More on AWS policy conditions here:\nhttp://docs.aws.amazon.com/IAM/latest/UserGuide/\nreference_policies_elements.html#Condition", "id": "f5530:c1:m9"}
{"signature": "def deny_all_methods(self):", "body": "self._add_method('<STR_LIT>', HttpVerb.ALL, '<STR_LIT:*>', [])<EOL>", "docstring": "Adds a '*' allow to deny access to all methods of an API", "id": "f5530:c1:m5"}
{"signature": "def allow_method(self, verb, resource):", "body": "self._add_method('<STR_LIT>', verb, resource, [])<EOL>", "docstring": "Adds an API Gateway method (Http verb + Resource path)\nto the list of allowed methods for the policy", "id": "f5530:c1:m6"}
{"signature": "def allow_all_methods(self):", "body": "self._add_method('<STR_LIT>', HttpVerb.ALL, '<STR_LIT:*>', [])<EOL>", "docstring": "Adds a '*' allow to authorize access to all methods of an API", "id": "f5530:c1:m4"}
{"signature": "def tell(self, message, sender=no_sender):", "body": "if sender is not no_sender and not isinstance(sender, ActorRef):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self._cell.send_message(message, sender)<EOL>", "docstring": "Send a message to this actor. Asynchronous fire-and-forget.\n\n        :param message: The message to send.\n        :type message: Any\n\n        :param sender: The sender of the message. If provided it will be made\n            available to the receiving actor via the :attr:`Actor.sender` attribute.\n        :type sender: :class:`Actor`", "id": "f5549:c0:m1"}
{"signature": "def suspend(self):", "body": "self._primary_status = _Status.SUSPENDED<EOL>", "docstring": "Stop processing of user message.", "id": "f5550:c2:m4"}
{"signature": "def set_scheduled(self):", "body": "with self._idle_lock:<EOL><INDENT>if self._idle:<EOL><INDENT>self._idle = False<EOL>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Returns True if state was successfully changed from idle to scheduled.", "id": "f5550:c2:m11"}
{"signature": "def __init__(self, system, supervisor):", "body": "self._system = system<EOL>self._supervisor = supervisor<EOL>", "docstring": ":param system:\n:type system: :class:`ActorSystem`\n\n:param supervisor:\n:type supervisor: :class:`InternalRef`", "id": "f5555:c0:m0"}
{"signature": "def __init__(self, system_dispatcher=None):", "body": "self._system_dispatcher = Dispatcher(Executor())if system_dispatcher is None else system_dispatcher<EOL>self._dead_letters = _DeadLetterRef()<EOL>self._terminate_promise = Promise()<EOL>class Guardian(Actor):<EOL><INDENT>def __init__(me):<EOL><INDENT>me._logger = logging.getLogger(__name__)<EOL><DEDENT>def receive(me, message):<EOL><INDENT>me._logger.warning(\"<STR_LIT>\")<EOL><DEDENT>def post_stop(me):<EOL><INDENT>self._terminate_promise.complete(None)<EOL><DEDENT><DEDENT>self._guardian = InternalRef(actors.internal.cell.Cell(Guardian,<EOL>dispatcher=self._system_dispatcher, system=self, parent=None))<EOL>self._guardian.send_system_message(Start)<EOL>actors.internal.factory.ActorFactory.__init__(self, self, self._guardian)<EOL>", "docstring": "The actor system is responsible for creating, configuring and stopping actors and\ndispatchers.\n\nNormally, only one system per application should be created.\n\n:param system_dispatcher: Override the dispatcher used by the system. This also acts as the\n    default dispatcher for new actors.\n:type system_dispatcher: :class:`Dispatcher`", "id": "f5556:c0:m0"}
{"signature": "def pre_restart(self):", "body": "", "docstring": "Called on the failed actor before it's disposed of.", "id": "f5561:c0:m3"}
{"signature": "def receive(self, message):", "body": "", "docstring": "Override to provide the actor behaviour.\n\n:param message: The current message.", "id": "f5561:c0:m0"}
{"signature": "def get(self, timeout=None):", "body": "result = None<EOL>try:<EOL><INDENT>result = self._result.get(True, timeout=timeout)<EOL><DEDENT>except Empty:<EOL><INDENT>raise Timeout()<EOL><DEDENT>if isinstance(result, Failure):<EOL><INDENT>six.reraise(*result.exc_info)<EOL><DEDENT>else:<EOL><INDENT>return result<EOL><DEDENT>", "docstring": "Return value on success, or raise exception on failure.", "id": "f5563:c1:m1"}
{"signature": "@property<EOL><INDENT>def mem_size(self):<DEDENT>", "body": "data_len = self._data_mem_size<EOL>node_count = len(list(self.xml_doc.iter(tag=etree.Element)))<EOL>if self.compressed:<EOL><INDENT>size = <NUM_LIT> * node_count + data_len + <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>tags_len = <NUM_LIT:0><EOL>for e in self.xml_doc.iter(tag=etree.Element):<EOL><INDENT>e_len = max(len(e.tag), <NUM_LIT:8>)<EOL>e_len = (e_len + <NUM_LIT:3>) & ~<NUM_LIT:3><EOL>tags_len += e_len<EOL><DEDENT>size = <NUM_LIT> * node_count + data_len + <NUM_LIT> + tags_len<EOL><DEDENT>return (size + <NUM_LIT:8>) & ~<NUM_LIT:7><EOL>", "docstring": "used when allocating memory ingame", "id": "f5570:c0:m5"}
{"signature": "def bdib(self, ticker, start_datetime, end_datetime, event_type, interval,<EOL>elms=None):", "body": "elms = [] if not elms else elms<EOL>logger = _get_logger(self.debug)<EOL>while(self._session.tryNextEvent()):<EOL><INDENT>pass<EOL><DEDENT>request = self.refDataService.createRequest('<STR_LIT>')<EOL>request.set('<STR_LIT>', ticker)<EOL>request.set('<STR_LIT>', event_type)<EOL>request.set('<STR_LIT>', interval)  <EOL>request.set('<STR_LIT>', start_datetime)<EOL>request.set('<STR_LIT>', end_datetime)<EOL>for name, val in elms:<EOL><INDENT>request.set(name, val)<EOL><DEDENT>logger.info('<STR_LIT>'.format(request))<EOL>self._session.sendRequest(request, identity=self._identity)<EOL>data = []<EOL>flds = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>for msg in self._receive_events():<EOL><INDENT>d = msg['<STR_LIT>']['<STR_LIT>']<EOL>for bar in d['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>data.append(bar['<STR_LIT>'])<EOL><DEDENT><DEDENT>data = pd.DataFrame(data).set_index('<STR_LIT:time>').sort_index().loc[:, flds]<EOL>return data<EOL>", "docstring": "Get Open, High, Low, Close, Volume, and numEvents for a ticker.\nReturn pandas DataFrame\n\nParameters\n----------\nticker: string\n    String corresponding to ticker\nstart_datetime: string\n    UTC datetime in format YYYY-mm-ddTHH:MM:SS\nend_datetime: string\n    UTC datetime in format YYYY-mm-ddTHH:MM:SS\nevent_type: string {TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID,\n                   BEST_ASK}\n    Requested data event type\ninterval: int {1... 1440}\n    Length of time bars\nelms: list of tuples\n    List of tuples where each tuple corresponds to the other elements\n    to be set. Refer to the IntradayBarRequest section in the\n    'Services & schemas reference guide' for more info on these values", "id": "f5580:c0:m17"}
{"signature": "def ref(self, tickers, flds, ovrds=None):", "body": "ovrds = [] if not ovrds else ovrds<EOL>logger = _get_logger(self.debug)<EOL>if type(tickers) is not list:<EOL><INDENT>tickers = [tickers]<EOL><DEDENT>if type(flds) is not list:<EOL><INDENT>flds = [flds]<EOL><DEDENT>request = self._create_req('<STR_LIT>', tickers, flds,<EOL>ovrds, [])<EOL>logger.info('<STR_LIT>'.format(request))<EOL>self._session.sendRequest(request, identity=self._identity)<EOL>data = self._parse_ref(flds)<EOL>data = pd.DataFrame(data)<EOL>data.columns = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:value>']<EOL>return data<EOL>", "docstring": "Make a reference data request, get tickers and fields, return long\npandas DataFrame with columns [ticker, field, value]\n\nParameters\n----------\ntickers: {list, string}\n    String or list of strings corresponding to tickers\nflds: {list, string}\n    String or list of strings corresponding to FLDS\novrds: list of tuples\n    List of tuples where each tuple corresponds to the override\n    field and value\n\nExample\n-------\n>>> import pdblp\n>>> con = pdblp.BCon()\n>>> con.start()\n>>> con.ref(\"CL1 Comdty\", [\"FUT_GEN_MONTH\"])\n\nNotes\n-----\nThis returns reference data which has singleton values. In raw format\nthe messages passed back contain data of the form\n\nfieldData = {\n        FUT_GEN_MONTH = \"FGHJKMNQUVXZ\"\n}", "id": "f5580:c0:m9"}
{"signature": "def start(self):", "body": "<EOL>logger = _get_logger(self.debug)<EOL>started = self._session.start()<EOL>if started:<EOL><INDENT>ev = self._session.nextEvent()<EOL>ev_name = _EVENT_DICT[ev.eventType()]<EOL>logger.info('<STR_LIT>'.format(ev_name))<EOL>for msg in ev:<EOL><INDENT>logger.info('<STR_LIT>'.format(msg))<EOL><DEDENT>if ev.eventType() != blpapi.Event.SESSION_STATUS:<EOL><INDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT>'.format(ev_name))<EOL><DEDENT>ev = self._session.nextEvent()<EOL>ev_name = _EVENT_DICT[ev.eventType()]<EOL>logger.info('<STR_LIT>'.format(ev_name))<EOL>for msg in ev:<EOL><INDENT>logger.info('<STR_LIT>'.format(msg))<EOL><DEDENT>if ev.eventType() != blpapi.Event.SESSION_STATUS:<EOL><INDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT>'.format(ev_name))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ev = self._session.nextEvent(self.timeout)<EOL>if ev.eventType() == blpapi.Event.SESSION_STATUS:<EOL><INDENT>for msg in ev:<EOL><INDENT>logger.warning('<STR_LIT>'.format(msg))<EOL><DEDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT><DEDENT>self._init_services()<EOL>return self<EOL>", "docstring": "Start connection and initialize session services", "id": "f5580:c0:m3"}
{"signature": "def bulkref(self, tickers, flds, ovrds=None):", "body": "ovrds = [] if not ovrds else ovrds<EOL>logger = _get_logger(self.debug)<EOL>if type(tickers) is not list:<EOL><INDENT>tickers = [tickers]<EOL><DEDENT>if type(flds) is not list:<EOL><INDENT>flds = [flds]<EOL><DEDENT>setvals = []<EOL>request = self._create_req('<STR_LIT>', tickers, flds,<EOL>ovrds, setvals)<EOL>logger.info('<STR_LIT>'.format(request))<EOL>self._session.sendRequest(request, identity=self._identity)<EOL>data = self._parse_bulkref(flds)<EOL>data = pd.DataFrame(data)<EOL>data.columns = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:name>', '<STR_LIT:value>', '<STR_LIT>']<EOL>return data<EOL>", "docstring": "Make a bulk reference data request, get tickers and fields, return long\npandas DataFrame with columns [ticker, field, name, value, position].\nName refers to the element name and position is the position in the\ncorresponding array returned.\n\nParameters\n----------\ntickers: {list, string}\n    String or list of strings corresponding to tickers\nflds: {list, string}\n    String or list of strings corresponding to FLDS\novrds: list of tuples\n    List of tuples where each tuple corresponds to the override\n    field and value\n\nExample\n-------\n>>> import pdblp\n>>> con = pdblp.BCon()\n>>> con.start()\n>>> con.bulkref('BCOM Index', 'INDX_MWEIGHT')\n\nNotes\n-----\nThis returns bulk reference data which has array values. In raw format\nthe messages passed back contain data of the form\n\nfieldData = {\n    INDX_MWEIGHT[] = {\n        INDX_MWEIGHT = {\n            Member Ticker and Exchange Code = \"BON8\"\n            Percentage Weight = 2.410000\n        }\n        INDX_MWEIGHT = {\n            Member Ticker and Exchange Code = \"C N8\"\n            Percentage Weight = 6.560000\n        }\n        INDX_MWEIGHT = {\n            Member Ticker and Exchange Code = \"CLN8\"\n            Percentage Weight = 7.620000\n        }\n    }\n}", "id": "f5580:c0:m11"}
{"signature": "def ref_hist(self, tickers, flds, dates, ovrds=None,<EOL>date_field='<STR_LIT>'):", "body": "ovrds = [] if not ovrds else ovrds<EOL>if type(tickers) is not list:<EOL><INDENT>tickers = [tickers]<EOL><DEDENT>if type(flds) is not list:<EOL><INDENT>flds = [flds]<EOL><DEDENT>self._send_hist(tickers, flds, dates, date_field, ovrds)<EOL>data = self._parse_ref(flds, keep_corrId=True, sent_events=len(dates))<EOL>data = pd.DataFrame(data)<EOL>data.columns = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:value>', '<STR_LIT:date>']<EOL>data = data.sort_values(by='<STR_LIT:date>').reset_index(drop=True)<EOL>data = data.loc[:, ['<STR_LIT:date>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:value>']]<EOL>return data<EOL>", "docstring": "Make iterative calls to ref() and create a long DataFrame with columns\n[date, ticker, field, value] where each date corresponds to overriding\na historical data override field.\n\nParameters\n----------\ntickers: {list, string}\n    String or list of strings corresponding to tickers\nflds: {list, string}\n    String or list of strings corresponding to FLDS\ndates: list\n    list of date strings in the format YYYYmmdd\novrds: list of tuples\n    List of tuples where each tuple corresponds to the override\n    field and value. This should not include the date_field which will\n    be iteratively overridden\ndate_field: str\n    Field to iteratively override for requesting historical data,\n    e.g. REFERENCE_DATE, CURVE_DATE, etc.\n\nExample\n-------\n>>> import pdblp\n>>> con = pdblp.BCon()\n>>> con.start()\n>>> dates = [\"20160625\", \"20160626\"]\n>>> con.ref_hist(\"AUD1M CMPN Curncy\", \"SETTLE_DT\", dates)", "id": "f5580:c0:m14"}
{"signature": "@debug.setter<EOL><INDENT>def debug(self, value):<DEDENT>", "body": "self._debug = value<EOL>", "docstring": "Set whether logging is True or False", "id": "f5580:c0:m2"}
{"signature": "@property<EOL><INDENT>def debug(self):<DEDENT>", "body": "return self._debug<EOL>", "docstring": "When True, print all Bloomberg Open API request and response messages\nto stdout", "id": "f5580:c0:m1"}
{"signature": "def _init_services(self):", "body": "logger = _get_logger(self.debug)<EOL>opened = self._session.openService('<STR_LIT>')<EOL>ev = self._session.nextEvent()<EOL>ev_name = _EVENT_DICT[ev.eventType()]<EOL>logger.info('<STR_LIT>'.format(ev_name))<EOL>for msg in ev:<EOL><INDENT>logger.info('<STR_LIT>'.format(msg))<EOL><DEDENT>if ev.eventType() != blpapi.Event.SERVICE_STATUS:<EOL><INDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT>'.format(ev_name))<EOL><DEDENT>if not opened:<EOL><INDENT>logger.warning('<STR_LIT>')<EOL>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>self.refDataService = self._session.getService('<STR_LIT>')<EOL>opened = self._session.openService('<STR_LIT>')<EOL>ev = self._session.nextEvent()<EOL>ev_name = _EVENT_DICT[ev.eventType()]<EOL>logger.info('<STR_LIT>'.format(ev_name))<EOL>for msg in ev:<EOL><INDENT>logger.info('<STR_LIT>'.format(msg))<EOL><DEDENT>if ev.eventType() != blpapi.Event.SERVICE_STATUS:<EOL><INDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT>'.format(ev_name))<EOL><DEDENT>if not opened:<EOL><INDENT>logger.warning('<STR_LIT>')<EOL>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>self.exrService = self._session.getService('<STR_LIT>')<EOL>return self<EOL>", "docstring": "Initialize blpapi.Session services", "id": "f5580:c0:m4"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def control_disable_by_handle(hwnd, h_ctrl):", "body": "ret = AUTO_IT.AU3_ControlDisableByHandle(HWND(hwnd), HWND(h_ctrl))<EOL>return ret<EOL>", "docstring": ":param hwnd:\n:param h_ctrl:\n:return:", "id": "f5585:m7"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def control_move(title, control, x, y, width=-<NUM_LIT:1>, height=-<NUM_LIT:1>, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret = AUTO_IT.AU3_ControlMove(<EOL>LPCWSTR(title), LPCWSTR(text), LPCWSTR(control),<EOL>INT(x), INT(y), INT(width), INT(height)<EOL>)<EOL>return ret<EOL>", "docstring": ":param title:\n:param control:\n:param x:\n:param y:\n:param kwargs:\n:return:", "id": "f5585:m22"}
{"signature": "@api.check(<NUM_LIT:1>, \"<STR_LIT>\")<EOL>def control_list_view(title, control, command, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>buf_size = kwargs.get(\"<STR_LIT>\", <NUM_LIT>)<EOL>result = ctypes.create_unicode_buffer(buf_size)<EOL>extra1 = kwargs.get(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>extra2 = kwargs.get(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>AUTO_IT.AU3_ControlListView(<EOL>LPCWSTR(title), LPCWSTR(text), LPCWSTR(control), LPCWSTR(command),<EOL>LPCWSTR(extra1), LPCWSTR(extra2), result, INT(buf_size)<EOL>)<EOL>return result.value.rstrip()<EOL>", "docstring": ":param title:\n:param control:\n:param command:\n:param args:\n:param kwargs:\n:return:", "id": "f5585:m4"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def control_focus_by_handle(hwnd, h_ctrl):", "body": "ret = AUTO_IT.AU3_ControlFocusByHandle(HWND(hwnd), HWND(h_ctrl))<EOL>return ret<EOL>", "docstring": ":param hwnd:\n:param h_ctrl:\n:return:", "id": "f5585:m11"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def control_show_by_handle(hwnd, h_ctrl):", "body": "ret = AUTO_IT.AU3_ControlShowByHandle(HWND(hwnd), HWND(h_ctrl))<EOL>return ret<EOL>", "docstring": ":param hwnd:\n:param h_ctrl:\n:return:", "id": "f5585:m29"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def control_focus(title, control, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret = AUTO_IT.AU3_ControlFocus(<EOL>LPCWSTR(title), LPCWSTR(text), LPCWSTR(control))<EOL>return ret<EOL>", "docstring": ":param title:\n:param control:\n:param kwargs:\n:return:", "id": "f5585:m10"}
{"signature": "@api.check(<NUM_LIT:1>, \"<STR_LIT>\")<EOL>def control_get_focus_by_handle(hwnd, buf_size=<NUM_LIT>):", "body": "ctrl_with_focus = ctypes.create_unicode_buffer(buf_size)<EOL>AUTO_IT.AU3_ControlGetFocusByHandle(HWND(hwnd), ctrl_with_focus,<EOL>INT(buf_size))<EOL>return ctrl_with_focus.value.rstrip()<EOL>", "docstring": ":param hwnd:\n:param buf_size:\n:return:", "id": "f5585:m13"}
{"signature": "@api.check(<NUM_LIT:1>, \"<STR_LIT>\")<EOL>def control_get_handle_as_text(title, control, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>buf_size = kwargs.get(\"<STR_LIT>\", <NUM_LIT:32>)<EOL>ret_text = ctypes.create_unicode_buffer(buf_size)<EOL>AUTO_IT.AU3_ControlGetHandleAsText(<EOL>LPCWSTR(title), LPCWSTR(text), LPCWSTR(control),<EOL>ret_text, INT(buf_size)<EOL>)<EOL>return ret_text.value.rstrip()<EOL>", "docstring": ":param title:\n:param control:\n:param kwargs:\n:return:", "id": "f5585:m15"}
{"signature": "@api.check(<NUM_LIT:1>, \"<STR_LIT>\")<EOL>def control_list_view_by_handle(hwnd, h_ctrl, command, **kwargs):", "body": "extra1 = kwargs.get(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>extra2 = kwargs.get(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>buf_size = kwargs.get(\"<STR_LIT>\", <NUM_LIT>)<EOL>result = ctypes.create_unicode_buffer(buf_size)<EOL>AUTO_IT.AU3_ControlListViewByHandle(<EOL>HWND(hwnd), HWND(h_ctrl), LPCWSTR(command),<EOL>LPCWSTR(extra1), LPCWSTR(extra2), result, INT(buf_size)<EOL>)<EOL>return result.value.rstrip()<EOL>", "docstring": ":param hwnd:\n:param h_ctrl:\n:param command:\n:param kwargs:\n:return:", "id": "f5585:m5"}
{"signature": "def check(self, mark=<NUM_LIT:0>, err_msg=\"<STR_LIT>\", **kwds):", "body": "unexpected_ret = kwds.get(\"<STR_LIT>\", (<NUM_LIT:0>,))<EOL>def _check(fn):<EOL><INDENT>@wraps(fn)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>ret = fn(*args, **kwargs)<EOL>flags = reduce(<EOL>self._parser, [dict(num=mark, flags=[]), <NUM_LIT:2>, <NUM_LIT:1>])[\"<STR_LIT>\"]<EOL>if <NUM_LIT:1> in flags:<EOL><INDENT>if self._has_error():<EOL><INDENT>raise AutoItError(err_msg)<EOL><DEDENT><DEDENT>if <NUM_LIT:2> in flags:<EOL><INDENT>if self._has_unexpected_ret(ret, unexpected_ret):<EOL><INDENT>raise AutoItError(err_msg)<EOL><DEDENT><DEDENT>return ret<EOL><DEDENT>return wrapper<EOL><DEDENT>return _check<EOL>", "docstring": ":param mark:\n    0 - do not need check return value or error()\n    1 - check error()\n    2 - check return value", "id": "f5586:c1:m4"}
{"signature": "def is_admin():", "body": "ret = AUTO_IT.AU3_IsAdmin()<EOL>return ret<EOL>", "docstring": ":return:", "id": "f5586:m4"}
{"signature": "@api.check(<NUM_LIT:2>, err_msg=\"<STR_LIT>\")<EOL>def drive_map_del(device):", "body": "ret = AUTO_IT.AU3_DriveMapDel(LPCWSTR(device))<EOL>return ret<EOL>", "docstring": ":param device:\n:return:", "id": "f5586:m6"}
{"signature": "@api.check(<NUM_LIT:2>, err_msg=\"<STR_LIT>\")<EOL>def clip_put(value):", "body": "ret = AUTO_IT.AU3_ClipPut(LPCWSTR(value))<EOL>return ret<EOL>", "docstring": ":param value:\n:return:", "id": "f5586:m3"}
{"signature": "@api.check(<NUM_LIT:1>, err_msg=\"<STR_LIT>\")<EOL>def pixel_search(left, top, right, bottom, col, var=<NUM_LIT:1>, step=<NUM_LIT:1>):", "body": "p = POINT()<EOL>rect = RECT(left, top, right, bottom)<EOL>AUTO_IT.AU3_PixelSearch(<EOL>ctypes.byref(rect), INT(col), INT(var), INT(step), ctypes.byref(p)<EOL>)<EOL>return p.x, p.y<EOL>", "docstring": ":param left:\n:param top:\n:param right:\n:param bottom:\n:param col:\n:param var:\n:param step:\n:return:", "id": "f5586:m19"}
{"signature": "def mouse_move(x, y, speed=-<NUM_LIT:1>):", "body": "ret = AUTO_IT.AU3_MouseMove(INT(x), INT(y), INT(speed))<EOL>return ret<EOL>", "docstring": ":param x:\n:param y:\n:param speed:\n:return:", "id": "f5586:m13"}
{"signature": "def tooltip(tip, x=INTDEFAULT, y=INTDEFAULT):", "body": "AUTO_IT.AU3_ToolTip(LPCWSTR(tip), INT(x), INT(y))<EOL>", "docstring": ":param tip:\n:param x:\n:param y:\n:return:", "id": "f5586:m21"}
{"signature": "def win_get_title(title, buf_size=<NUM_LIT>, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret_text = create_unicode_buffer(buf_size)<EOL>AUTO_IT.AU3_WinGetTitle(LPCWSTR(title), LPCWSTR(text), ret_text,<EOL>INT(buf_size))<EOL>val = ret_text.value.rstrip()<EOL>return val<EOL>", "docstring": ":param title:\n:param text:\n:return:", "id": "f5587:m23"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_close_by_handle(handle):", "body": "ret = AUTO_IT.AU3_WinCloseByHandle(HWND(handle))<EOL>return ret<EOL>", "docstring": ":param handle:\n:return:", "id": "f5587:m5"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_close(title, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret = AUTO_IT.AU3_WinClose(LPCWSTR(title), LPCWSTR(text))<EOL>return ret<EOL>", "docstring": ":param title:\n:param text:\n:return:", "id": "f5587:m4"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_menu_select_item(title, *items, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>if not (<NUM_LIT:0> < len(items) < <NUM_LIT:8>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>f_items = [LPCWSTR(item) for item in items]<EOL>for i in xrange(<NUM_LIT:8> - len(f_items)):<EOL><INDENT>f_items.append(LPCWSTR(\"<STR_LIT>\"))<EOL><DEDENT>ret = AUTO_IT.AU3_WinMenuSelectItem(LPCWSTR(title), LPCWSTR(text),<EOL>*f_items)<EOL>return ret<EOL>", "docstring": "Usage:\n    win_menu_select_item(\"[CLASS:Notepad]\", \"\", u\"\u6587\u4ef6(&F)\", u\"\u9000\u51fa(&X)\")\n:param title:\n:param text:\n:param items:\n:return:", "id": "f5587:m27"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_move(title, x, y, width=-<NUM_LIT:1>, height=-<NUM_LIT:1>, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret = AUTO_IT.AU3_WinMove(LPCWSTR(title), LPCWSTR(text), INT(x), INT(y),<EOL>INT(width), INT(height))<EOL>return ret<EOL>", "docstring": ":param title:\n:param text:\n:param x:\n:param y:\n:param width:\n:param height:\n:return:", "id": "f5587:m31"}
{"signature": "def win_exists(title, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret = AUTO_IT.AU3_WinExists(LPCWSTR(title), LPCWSTR(text))<EOL>return ret<EOL>", "docstring": "Checks to see if a specified window exists.\n:param title: The title of the window to check.\n:param text: The text of the window to check.\n:return: Returns 1 if the window exists, otherwise returns 0.", "id": "f5587:m6"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_wait_by_handle(handle, timeout):", "body": "ret = AUTO_IT.AU3_WinWaitByHandle(HWND(handle), INT(timeout))<EOL>return ret<EOL>", "docstring": ":param handle:\n:param timeout:\n:return:", "id": "f5587:m42"}
{"signature": "def win_get_class_list(title, buf_size=<NUM_LIT:200>, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>rec_text = create_unicode_buffer(buf_size)  <EOL>AUTO_IT.AU3_WinGetClassList(LPCWSTR(title), LPCWSTR(text),<EOL>rec_text, INT(buf_size))<EOL>msg = rec_text.value.rstrip()<EOL>return msg<EOL>", "docstring": ":param title:\n:param text:\n:param buf_size:\n:return:", "id": "f5587:m9"}
{"signature": "def win_get_text_by_handle(handle, buf_size=<NUM_LIT>):", "body": "ret_text = create_unicode_buffer(buf_size)<EOL>AUTO_IT.AU3_WinGetTextByHandle(HWND(handle), ret_text, INT(buf_size))<EOL>return ret_text.value.rstrip()<EOL>", "docstring": ":param handle:\n:return:", "id": "f5587:m22"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_wait_active_by_handle(handle, timeout):", "body": "ret = AUTO_IT.AU3_WinWaitActiveByHandle(HWND(handle), INT(timeout))<EOL>return ret<EOL>", "docstring": ":param handle:\n:param timeout:\n:return:", "id": "f5587:m44"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_menu_select_item_by_handle(handle, *items):", "body": "if not (<NUM_LIT:0> < len(items) < <NUM_LIT:8>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>f_items = [LPCWSTR(item) for item in items]<EOL>for i in xrange(<NUM_LIT:8> - len(f_items)):<EOL><INDENT>f_items.append(LPCWSTR(\"<STR_LIT>\"))<EOL><DEDENT>ret = AUTO_IT.AU3_WinMenuSelectItemByHandle(HWND(handle), *f_items)<EOL>return ret<EOL>", "docstring": ":param handle:\n:param items:\n:return:", "id": "f5587:m28"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_wait_active(title, timeout=<NUM_LIT:0>, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret = AUTO_IT.AU3_WinWaitActive(LPCWSTR(title), LPCWSTR(text),<EOL>INT(timeout))<EOL>return ret<EOL>", "docstring": ":param title:\n:param timeout:\n:param kwargs:\n:return:", "id": "f5587:m43"}
{"signature": "def win_get_text(title, buf_size=<NUM_LIT>, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret_text = create_unicode_buffer(buf_size)<EOL>AUTO_IT.AU3_WinGetText(LPCWSTR(title), LPCWSTR(text), ret_text,<EOL>INT(buf_size))<EOL>val = ret_text.value.rstrip()<EOL>return val<EOL>", "docstring": ":param title:\n:param text:\n:param buf_size:\n:return:", "id": "f5587:m21"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_set_trans_by_handle(handle, trans):", "body": "ret = AUTO_IT.AU3_WinSetTransByHandle(HWND(handle), INT(trans))<EOL>return ret<EOL>", "docstring": ":param handle:\n:param trans:\n:return:", "id": "f5587:m40"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_set_title(title, new_title, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret = AUTO_IT.AU3_WinSetTitle(LPCWSTR(title), LPCWSTR(text),<EOL>LPCWSTR(new_title))<EOL>return ret<EOL>", "docstring": ":param title:\n:param new_title:\n:param kwargs:\n:return:", "id": "f5587:m37"}
{"signature": "def win_minimize_all():", "body": "AUTO_IT.AU3_WinMinimizeAll()<EOL>", "docstring": ":return:", "id": "f5587:m29"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_wait_not_active(title, timeout=<NUM_LIT:0>, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret = AUTO_IT.AU3_WinWaitNotActive(LPCWSTR(title), LPCWSTR(text),<EOL>INT(timeout))<EOL>return ret<EOL>", "docstring": ":param title:\n:param timeout:\n:param kwargs:\n:return:", "id": "f5587:m47"}
{"signature": "@api.check(<NUM_LIT:1>, \"<STR_LIT>\")<EOL>def win_get_caret_pos():", "body": "p = POINT()<EOL>AUTO_IT.AU3_WinGetCaretPos(byref(p))<EOL>return p.x, p.y<EOL>", "docstring": "Returns the coordinates of the caret in the foreground window\n:return:", "id": "f5587:m8"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_set_state(title, flag=properties.SW_SHOW, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>ret = AUTO_IT.AU3_WinSetState(LPCWSTR(title), LPCWSTR(text), INT(flag))<EOL>return ret<EOL>", "docstring": ":param title:\n:param flag: The \"show\" flag of the executed program:\n    SW_HIDE = Hide window\n    SW_SHOW = Shows a previously hidden window\n    SW_MINIMIZE = Minimize window\n    SW_MAXIMIZE = Maximize window\n    SW_RESTORE = Undoes a window minimization or maximization\n:param kwargs:\n:return:", "id": "f5587:m35"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def win_get_state(title, **kwargs):", "body": "text = kwargs.get(\"<STR_LIT:text>\", \"<STR_LIT>\")<EOL>res = AUTO_IT.AU3_WinGetState(LPCWSTR(title), LPCWSTR(text))<EOL>return res<EOL>", "docstring": "Retrieves the state of a given window.\n:param title:\n:param text:\n:return:\n1 = Window exists\n2 = Window is visible\n4 = Windows is enabled\n8 = Window is active\n16 = Window is minimized\n32 = Windows is maximized", "id": "f5587:m19"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def process_wait(process, timeout=<NUM_LIT:0>):", "body": "ret = AUTO_IT.AU3_ProcessWait(LPCWSTR(process), INT(timeout))<EOL>return ret<EOL>", "docstring": "Pauses script execution until a given process exists.\n:param process:\n:param timeout:\n:return:", "id": "f5589:m5"}
{"signature": "@api.check(<NUM_LIT:2>, \"<STR_LIT>\")<EOL>def shutdown(code):", "body": "ret = AUTO_IT.AU3_Shutdown(INT(code))<EOL>return ret<EOL>", "docstring": ":param code: The shutdown code is a combination of the following values:\n    0 = Logoff\n    1 = Shutdown\n    2 = Reboot\n    4 = Force\n    8 = Power down\n:return:", "id": "f5589:m9"}
{"signature": "@api.check(<NUM_LIT:1>, \"<STR_LIT>\")<EOL>def run_wait(filename, work_dir=\"<STR_LIT>\", show_flag=Properties.SW_SHOWNORMAL):", "body": "ret = AUTO_IT.AU3_RunWait(LPCWSTR(filename), LPCWSTR(work_dir),<EOL>INT(show_flag))<EOL>return ret<EOL>", "docstring": ":param filename:\n:param work_dir:\n:param show_flag:\n:return:", "id": "f5589:m1"}
{"signature": "def process_exists(process):", "body": "ret = AUTO_IT.AU3_ProcessExists(LPCWSTR(process))<EOL>return ret<EOL>", "docstring": ":param process:\n:return:", "id": "f5589:m3"}
{"signature": "def process_set_priority(process, priority):", "body": "ret = AUTO_IT.AU3_ProcessSetPriority(LPCWSTR(process), INT(priority))<EOL>if ret == <NUM_LIT:0>:<EOL><INDENT>if error() == <NUM_LIT:1>:<EOL><INDENT>raise AutoItError(\"<STR_LIT>\")<EOL><DEDENT>elif error() == <NUM_LIT:2>:<EOL><INDENT>raise AutoItError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "Changes the priority of a process\n:param process: The name or PID of the process to check.\n:param priority:A flag which determines what priority to set\n    0 - Idle/Low\n    1 - Below Normal (Not supported on Windows 95/98/ME)\n    2 - Normal\n    3 - Above Normal (Not supported on Windows 95/98/ME)\n    4 - High\n    5 - Realtime (Use with caution, may make the system unstable)\n:return:", "id": "f5589:m4"}
{"signature": "def mod2md(module, title, title_api_section, toc=True, maxdepth=<NUM_LIT:0>):", "body": "docstr = module.__doc__<EOL>text = doctrim(docstr)<EOL>lines = text.split('<STR_LIT:\\n>')<EOL>sections = find_sections(lines)<EOL>if sections:<EOL><INDENT>level = min(n for n,t in sections) - <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>level = <NUM_LIT:1><EOL><DEDENT>api_md = []<EOL>api_sec = []<EOL>if title_api_section and module.__all__:<EOL><INDENT>sections.append((level+<NUM_LIT:1>, title_api_section))<EOL>for name in module.__all__:<EOL><INDENT>api_sec.append((level+<NUM_LIT:2>, \"<STR_LIT>\" + name + \"<STR_LIT>\"))<EOL>api_md += ['<STR_LIT>', '<STR_LIT>']<EOL>entry = module.__dict__[name]<EOL>if entry.__doc__:<EOL><INDENT>md, sec = doc2md(entry.__doc__, \"<STR_LIT>\" + name + \"<STR_LIT>\",<EOL>min_level=level+<NUM_LIT:2>, more_info=True, toc=False)<EOL>api_sec += sec<EOL>api_md += md<EOL><DEDENT><DEDENT><DEDENT>sections += api_sec<EOL>head = next((i for i, l in enumerate(lines) if is_heading(l)), <NUM_LIT:0>)<EOL>md = [<EOL>make_heading(level, title),<EOL>\"<STR_LIT>\",<EOL>] + lines[:head]<EOL>if toc:<EOL><INDENT>md += make_toc(sections, maxdepth)<EOL>md += ['<STR_LIT>']<EOL><DEDENT>md += _doc2md(lines[head:])<EOL>md += [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>make_heading(level+<NUM_LIT:1>, title_api_section),<EOL>]<EOL>if toc:<EOL><INDENT>md += ['<STR_LIT>']<EOL>md += make_toc(api_sec, <NUM_LIT:1>)<EOL><DEDENT>md += api_md<EOL>return \"<STR_LIT:\\n>\".join(md)<EOL>", "docstring": "Generate markdown document from module, including API section.", "id": "f5592:m11"}
{"signature": "def long_description():", "body": "import argparse<EOL>parser = argparse.ArgumentParser()<EOL>parser.add_argument('<STR_LIT>', dest=\"<STR_LIT>\",<EOL>action=\"<STR_LIT:store_true>\", default=False)<EOL>args, sys.argv = parser.parse_known_args(sys.argv)<EOL>if args.doc:<EOL><INDENT>import doc2md, pypandoc<EOL>md = doc2md.doc2md(doc2md.__doc__, \"<STR_LIT>\", toc=False)<EOL>long_description = pypandoc.convert(md, '<STR_LIT>', format='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Generate .rst document for PyPi.", "id": "f5593:m0"}
{"signature": "@yaz.task<EOL>def say(message: str = \"<STR_LIT>\"):", "body": "return message<EOL>", "docstring": "Print MESSAGE to the console", "id": "f5595:m0"}
{"signature": "def get_parameters(self):", "body": "if self.plugin_class is None:<EOL><INDENT>sig = inspect.signature(self.func)<EOL>for index, parameter in enumerate(sig.parameters.values()):<EOL><INDENT>if not parameter.kind in [parameter.POSITIONAL_ONLY, parameter.KEYWORD_ONLY, parameter.POSITIONAL_OR_KEYWORD]:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\".format(parameter, parameter.kind))<EOL><DEDENT>yield parameter<EOL><DEDENT><DEDENT>else:<EOL><INDENT>var_keyword_seen = set()<EOL>for cls in inspect.getmro(self.plugin_class):<EOL><INDENT>if issubclass(cls, BasePlugin) and hasattr(cls, self.func.__name__):<EOL><INDENT>func = getattr(cls, self.func.__name__)<EOL>logger.debug(\"<STR_LIT>\", func, cls)<EOL>var_keyword_found = False<EOL>sig = inspect.signature(func)<EOL>for index, parameter in enumerate(sig.parameters.values()):<EOL><INDENT>if index == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>if parameter.kind == inspect.Parameter.VAR_KEYWORD:<EOL><INDENT>var_keyword_found = True<EOL>continue<EOL><DEDENT>if parameter.kind in [parameter.POSITIONAL_ONLY, parameter.VAR_POSITIONAL]:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\".format(func, parameter))<EOL><DEDENT>if not parameter.name in var_keyword_seen:<EOL><INDENT>var_keyword_seen.add(parameter.name)<EOL>logger.debug(\"<STR_LIT>\", parameter, parameter.kind)<EOL>yield parameter<EOL><DEDENT><DEDENT>if not var_keyword_found:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Returns a list of parameters", "id": "f5601:c0:m5"}
{"signature": "@property<EOL><INDENT>def qualified_name(self):<DEDENT>", "body": "return self.func.__qualname__<EOL>", "docstring": "Returns the __qualname__ of this Task", "id": "f5601:c0:m3"}
{"signature": "def get_task_tree(white_list=None):", "body": "assert white_list is None or isinstance(white_list, list), type(white_list)<EOL>if white_list is not None:<EOL><INDENT>white_list = set(item if isinstance(item, str) else item.__qualname__ for item in white_list)<EOL><DEDENT>tree = dict((task.qualified_name, task)<EOL>for task<EOL>in _task_list.values()<EOL>if white_list is None or task.qualified_name in white_list)<EOL>plugins = get_plugin_list()<EOL>for plugin in [plugin for plugin in plugins.values() if white_list is None or plugin.__qualname__ in white_list]:<EOL><INDENT>tasks = [func<EOL>for _, func<EOL>in inspect.getmembers(plugin)<EOL>if inspect.isfunction(func) and hasattr(func, \"<STR_LIT>\")]<EOL>if len(tasks) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>node = tree<EOL>for name in plugin.__qualname__.split(\"<STR_LIT:.>\"):<EOL><INDENT>if not name in node:<EOL><INDENT>node[name] = {}<EOL><DEDENT>node = node[name]<EOL><DEDENT>for func in tasks:<EOL><INDENT>logger.debug(\"<STR_LIT>\", func)<EOL>node[func.__name__] = Task(plugin_class=plugin, func=func, config=func.yaz_task_config)<EOL><DEDENT><DEDENT>return tree<EOL>", "docstring": "Returns a tree of Task instances\n\n    The tree is comprised of dictionaries containing strings for\n    keys and either dictionaries or Task instances for values.\n\n    When WHITE_LIST is given, only the tasks and plugins in this\n    list will become part of the task tree.  The WHITE_LIST may\n    contain either strings, corresponding to the task of plugin\n    __qualname__, or, preferable, the WHITE_LIST contains\n    links to the task function or plugin class instead.", "id": "f5601:m0"}
{"signature": "@decorator<EOL>def task(func, **config):", "body": "if func.__name__ == func.__qualname__:<EOL><INDENT>assert not func.__qualname__ in _task_list, \"<STR_LIT>\".format(func.__qualname__)<EOL>logger.debug(\"<STR_LIT>\", func)<EOL>_task_list[func.__qualname__] = Task(plugin_class=None, func=func, config=config)<EOL><DEDENT>else:<EOL><INDENT>func.yaz_task_config = config<EOL><DEDENT>return func<EOL>", "docstring": "Declare a function or method to be a Yaz task\n\n    @yaz.task\n    def talk(message: str = \"Hello World!\"):\n        return message\n\n    Or... group multiple tasks together\n\n    class Tools(yaz.Plugin):\n        @yaz.task\n        def say(self, message: str = \"Hello World!\"):\n            return message\n\n        @yaz.task(option__choices=[\"A\", \"B\", \"C\"])\n        def choose(self, option: str = \"A\"):\n            return option", "id": "f5601:m1"}
{"signature": "@yaz.task<EOL><INDENT>def main(self, pos1: str, pos_or_key1: str = \"<STR_LIT>\", *, key1: str = \"<STR_LIT>\", extra1: str = \"<STR_LIT>\", **kwargs):<DEDENT>", "body": "return super().main(pos1, pos_or_key1=pos_or_key1, key1=key1, **kwargs) + [extra1]<EOL>", "docstring": "required from water-custom-plugin", "id": "f5608:c6:m0"}
{"signature": "@yaz.task<EOL>def multiple_functions_foo():", "body": "return \"<STR_LIT>\"<EOL>", "docstring": "Well... why not \"foo\"?", "id": "f5609:m0"}
{"signature": "@yaz.task<EOL>def multiple_functions_bar():", "body": "return \"<STR_LIT>\"<EOL>", "docstring": "I disagree and subsequently prefer \"bar", "id": "f5609:m1"}
{"signature": "@yaz.task(choice__choices=[\"<STR_LIT:yes>\", \"<STR_LIT>\", \"<STR_LIT>\"])<EOL><INDENT>def required_choice(self, choice):<DEDENT>", "body": "return self.choices[choice]<EOL>", "docstring": "This is the documentation for the required_choice task", "id": "f5613:c0:m0"}
{"signature": "@yaz.task<EOL><INDENT>def multi_line_doc_string(self):<DEDENT>", "body": "pass<EOL>", "docstring": "This is the documentation for the multi_line_doc_string task\n\nThis is the long description, for example:\nbla bla,\netc...", "id": "f5613:c0:m2"}
{"signature": "def load(directory_name, module_name):", "body": "directory_name = os.path.expanduser(directory_name)<EOL>if os.path.isdir(directory_name) and directory_name not in sys.path:<EOL><INDENT>sys.path.append(directory_name)<EOL><DEDENT>try:<EOL><INDENT>return importlib.import_module(module_name)<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Try to load and return a module\n\n    Will add DIRECTORY_NAME to sys.path and tries to import MODULE_NAME.\n\n    For example:\n    load(\"~/.yaz\", \"yaz_extension\")", "id": "f5619:m0"}
{"signature": "def get_plugin_list():", "body": "global _yaz_plugin_classes<EOL>def get_recursively(cls, plugin_list):<EOL><INDENT>for plugin in cls.__subclasses__():<EOL><INDENT>if not (plugin.yaz_is_final() or plugin.__qualname__ in _yaz_plugin_classes):<EOL><INDENT>plugin_list[plugin.__qualname__].append(plugin)<EOL><DEDENT>get_recursively(plugin, plugin_list)<EOL><DEDENT>return plugin_list<EOL><DEDENT>def include_class(candidate, classes):<EOL><INDENT>for cls in classes:<EOL><INDENT>if candidate is cls:<EOL><INDENT>continue<EOL><DEDENT>if issubclass(cls, candidate):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL><DEDENT>def get_plugin_type(qualname, plugins):<EOL><INDENT>classes = sorted(plugins, key=lambda plugin: plugin.yaz_get_ordinal())<EOL>classes = [cls for cls in classes if include_class(cls, classes)]<EOL>logger.debug(\"<STR_LIT>\", qualname, [cls for cls in classes])<EOL>return type(qualname, tuple(classes) + (Final,), {})<EOL><DEDENT>logger.debug(\"<STR_LIT>\" % _yaz_plugin_classes)<EOL>plugin_list = get_recursively(BasePlugin, collections.defaultdict(list))<EOL>_yaz_plugin_classes.update((qualname, get_plugin_type(qualname, plugins))<EOL>for qualname, plugins<EOL>in plugin_list.items())<EOL>assert isinstance(_yaz_plugin_classes, dict), type(_yaz_plugin_classes)<EOL>assert all(isinstance(qualname, str) for qualname in _yaz_plugin_classes.keys()), \"<STR_LIT>\"<EOL>assert all(issubclass(plugin_class, Final) for plugin_class in _yaz_plugin_classes.values()), \"<STR_LIT>\"<EOL>return _yaz_plugin_classes<EOL>", "docstring": "Finds all yaz plugins and returns them in a __qualname__: plugin_class dictionary", "id": "f5622:m1"}
{"signature": "def tearDown(self):", "body": "HTTPretty.disable()<EOL>", "docstring": "Tear stuff down", "id": "f5624:c0:m38"}
{"signature": "def get_doc(self, doc_name, cwd=cwd):", "body": "with open(os.path.join(cwd, \"<STR_LIT>\", \"<STR_LIT:%s>\" % doc_name), \"<STR_LIT:r>\") as f:<EOL><INDENT>return f.read()<EOL><DEDENT>", "docstring": "return the requested test document", "id": "f5624:c0:m0"}
{"signature": "def setUp(self):", "body": "self.item_doc = self.get_doc(\"<STR_LIT>\")<EOL>self.items_doc = self.get_doc(\"<STR_LIT>\")<EOL>self.item_versions = self.get_doc(\"<STR_LIT>\")<EOL>self.collection_versions = self.get_doc(\"<STR_LIT>\")<EOL>self.collections_doc = self.get_doc(\"<STR_LIT>\")<EOL>self.collection_doc = self.get_doc(\"<STR_LIT>\")<EOL>self.collection_tags = self.get_doc(\"<STR_LIT>\")<EOL>self.citation_doc = self.get_doc(\"<STR_LIT>\")<EOL>self.attachments_doc = self.get_doc(\"<STR_LIT>\")<EOL>self.tags_doc = self.get_doc(\"<STR_LIT>\")<EOL>self.groups_doc = self.get_doc(\"<STR_LIT>\")<EOL>self.item_templt = self.get_doc(\"<STR_LIT>\")<EOL>self.item_types = self.get_doc(\"<STR_LIT>\")<EOL>self.item_fields = self.get_doc(\"<STR_LIT>\")<EOL>self.keys_response = self.get_doc(\"<STR_LIT>\")<EOL>self.creation_doc = self.get_doc(\"<STR_LIT>\")<EOL>self.item_file = self.get_doc(\"<STR_LIT>\")<EOL>HTTPretty.enable()<EOL>HTTPretty.register_uri(<EOL>HTTPretty.GET,<EOL>\"<STR_LIT>\",<EOL>content_type=\"<STR_LIT:application/json>\",<EOL>body=self.items_doc,<EOL>)<EOL>", "docstring": "Set stuff up", "id": "f5624:c0:m1"}
{"signature": "@retrieve<EOL><INDENT>def collections(self, **kwargs):<DEDENT>", "body": "query_string = \"<STR_LIT>\"<EOL>return self._build_query(query_string)<EOL>", "docstring": "Get user collections", "id": "f5629:c0:m35"}
{"signature": "def add_tags(self, item, *tags):", "body": "<EOL>try:<EOL><INDENT>assert item[\"<STR_LIT:data>\"][\"<STR_LIT>\"]<EOL><DEDENT>except AssertionError:<EOL><INDENT>item[\"<STR_LIT:data>\"][\"<STR_LIT>\"] = list()<EOL><DEDENT>for tag in tags:<EOL><INDENT>item[\"<STR_LIT:data>\"][\"<STR_LIT>\"].append({\"<STR_LIT>\": \"<STR_LIT:%s>\" % tag})<EOL><DEDENT>assert self.check_items([item])<EOL>return self.update_item(item)<EOL>", "docstring": "Add one or more tags to a retrieved item,\nthen update it on the server\nAccepts a dict, and one or more tags to add to it\nReturns the updated item from the server", "id": "f5629:c0:m62"}
{"signature": "def _citation_processor(self, retrieved):", "body": "items = []<EOL>for cit in retrieved.entries:<EOL><INDENT>items.append(cit[\"<STR_LIT:content>\"][<NUM_LIT:0>][\"<STR_LIT:value>\"])<EOL><DEDENT>self.url_params = None<EOL>return items<EOL>", "docstring": "Return a list of strings formatted as HTML citation entries", "id": "f5629:c0:m51"}
{"signature": "def upload_attachments(self, attachments, parentid=None, basedir=None):", "body": "return Zupload(self, attachments, parentid, basedir=basedir).upload()<EOL>", "docstring": "Upload files to the already created (but never uploaded) attachments", "id": "f5629:c0:m61"}
{"signature": "def _validate(self, conditions):", "body": "allowed_keys = set(self.searchkeys)<EOL>operators_set = set(self.operators.keys())<EOL>for condition in conditions:<EOL><INDENT>if set(condition.keys()) != allowed_keys:<EOL><INDENT>raise ze.ParamNotPassed(<EOL>\"<STR_LIT>\" % \"<STR_LIT:U+002CU+0020>\".join(self.searchkeys)<EOL>)<EOL><DEDENT>if condition.get(\"<STR_LIT>\") not in operators_set:<EOL><INDENT>raise ze.ParamNotPassed(<EOL>\"<STR_LIT>\"<EOL>% condition.get(\"<STR_LIT>\")<EOL>)<EOL><DEDENT>permitted_operators = self.conditions_operators.get(<EOL>condition.get(\"<STR_LIT>\")<EOL>)<EOL>permitted_operators_list = set(<EOL>[self.operators.get(op) for op in permitted_operators]<EOL>)<EOL>if condition.get(\"<STR_LIT>\") not in permitted_operators_list:<EOL><INDENT>raise ze.ParamNotPassed(<EOL>\"<STR_LIT>\"<EOL>% (<EOL>condition.get(\"<STR_LIT>\"),<EOL>condition.get(\"<STR_LIT>\"),<EOL>\"<STR_LIT:U+002CU+0020>\".join(list(permitted_operators_list)),<EOL>)<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Validate saved search conditions, raising an error if any contain invalid operators", "id": "f5629:c2:m1"}
{"signature": "def set_fulltext(self, itemkey, payload):", "body": "headers = self.default_headers()<EOL>headers.update({\"<STR_LIT:Content-Type>\": \"<STR_LIT:application/json>\"})<EOL>req = requests.put(<EOL>url=self.endpoint<EOL>+ \"<STR_LIT>\".format(<EOL>t=self.library_type, u=self.library_id, k=itemkey<EOL>),<EOL>headers=headers,<EOL>data=json.dumps(payload),<EOL>)<EOL>try:<EOL><INDENT>req.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(req)<EOL><DEDENT>return True<EOL>", "docstring": "Set full-text data for an item\n<itemkey> should correspond to an existing attachment item.\npayload should be a dict containing three keys:\n'content': the full-text content and either\nFor text documents, 'indexedChars' and 'totalChars' OR\nFor PDFs, 'indexedPages' and 'totalPages'.", "id": "f5629:c0:m18"}
{"signature": "def create_collection(self, payload, last_modified=None):", "body": "return self.create_collections(payload, last_modified)<EOL>", "docstring": "Alias for create_collections to preserve backward compatibility", "id": "f5629:c0:m71"}
{"signature": "def item_versions(self, **kwargs):", "body": "if \"<STR_LIT>\" not in kwargs:<EOL><INDENT>kwargs[\"<STR_LIT>\"] = None<EOL><DEDENT>kwargs[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>return self.items(**kwargs)<EOL>", "docstring": "Returns dict associating items keys (all no limit by default) to versions.\nAccepts a since= parameter in kwargs to limit the data to those updated since since=", "id": "f5629:c0:m20"}
{"signature": "def fields_types(self, tname, qstring, itemtype):", "body": "<EOL>template_name = tname + itemtype<EOL>query_string = qstring.format(i=itemtype)<EOL>if self.templates.get(template_name) and not self._updated(<EOL>query_string, self.templates[template_name], template_name<EOL>):<EOL><INDENT>return self.templates[template_name][\"<STR_LIT>\"]<EOL><DEDENT>retrieved = self._retrieve_data(query_string)<EOL>return self._cache(retrieved, template_name)<EOL>", "docstring": "Retrieve item fields or creator types", "id": "f5629:c0:m66"}
{"signature": "def everything(self, query):", "body": "try:<EOL><INDENT>items = []<EOL>items.extend(query)<EOL>while self.links.get(\"<STR_LIT>\"):<EOL><INDENT>items.extend(self.follow())<EOL><DEDENT><DEDENT>except TypeError:<EOL><INDENT>items = copy.deepcopy(query)<EOL>while self.links.get(\"<STR_LIT>\"):<EOL><INDENT>items.entries.extend(self.follow().entries)<EOL><DEDENT><DEDENT>return items<EOL>", "docstring": "Retrieve all items in the library for a particular query\nThis method will override the 'limit' parameter if it's been set", "id": "f5629:c0:m46"}
{"signature": "def item_type_fields(self, itemtype):", "body": "return self.fields_types(<EOL>\"<STR_LIT>\", \"<STR_LIT>\", itemtype<EOL>)<EOL>", "docstring": "Get all valid fields for an item", "id": "f5629:c0:m67"}
{"signature": "def update_collection(self, payload, last_modified=None):", "body": "modified = payload[\"<STR_LIT:version>\"]<EOL>if last_modified is not None:<EOL><INDENT>modified = last_modified<EOL><DEDENT>key = payload[\"<STR_LIT:key>\"]<EOL>headers = {\"<STR_LIT>\": str(modified)}<EOL>headers.update(self.default_headers())<EOL>headers.update({\"<STR_LIT:Content-Type>\": \"<STR_LIT:application/json>\"})<EOL>req = requests.put(<EOL>url=self.endpoint<EOL>+ \"<STR_LIT>\".format(<EOL>t=self.library_type, u=self.library_id, c=key<EOL>),<EOL>headers=headers,<EOL>data=json.dumps(payload),<EOL>)<EOL>self.request = req<EOL>try:<EOL><INDENT>req.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(req)<EOL><DEDENT>return True<EOL>", "docstring": "Update a Zotero collection property such as 'name'\nAccepts one argument, a dict containing collection data retrieved\nusing e.g. 'collections()'", "id": "f5629:c0:m73"}
{"signature": "def count_items(self):", "body": "query = \"<STR_LIT>\"<EOL>return self._totals(query)<EOL>", "docstring": "Return the count of all items in a group / library", "id": "f5629:c0:m11"}
{"signature": "def iterfollow(self):", "body": "<EOL>if self.links is None:<EOL><INDENT>return<EOL><DEDENT>if self.links.get(\"<STR_LIT>\"):<EOL><INDENT>yield self.follow()<EOL><DEDENT>else:<EOL><INDENT>raise StopIteration<EOL><DEDENT>", "docstring": "Generator for self.follow()", "id": "f5629:c0:m44"}
{"signature": "def deletefrom_collection(self, collection, payload):", "body": "ident = payload[\"<STR_LIT:key>\"]<EOL>modified = payload[\"<STR_LIT:version>\"]<EOL>modified_collections = [<EOL>c for c in payload[\"<STR_LIT:data>\"][\"<STR_LIT>\"] if c != collection<EOL>]<EOL>headers = {\"<STR_LIT>\": str(modified)}<EOL>headers.update(self.default_headers())<EOL>req = requests.patch(<EOL>url=self.endpoint<EOL>+ \"<STR_LIT>\".format(<EOL>t=self.library_type, u=self.library_id, i=ident<EOL>),<EOL>data=json.dumps({\"<STR_LIT>\": modified_collections}),<EOL>headers=headers,<EOL>)<EOL>self.request = req<EOL>try:<EOL><INDENT>req.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(req)<EOL><DEDENT>return True<EOL>", "docstring": "Delete an item from a collection\nAccepts two arguments:\nThe collection ID, and and an item dict", "id": "f5629:c0:m80"}
{"signature": "@retrieve<EOL><INDENT>def collection_items_top(self, collection, **kwargs):<DEDENT>", "body": "query_string = \"<STR_LIT>\".format(<EOL>u=self.library_id, t=self.library_type, c=collection.upper()<EOL>)<EOL>return self._build_query(query_string)<EOL>", "docstring": "Get a specific collection's top-level items", "id": "f5629:c0:m32"}
{"signature": "@ss_wrap<EOL><INDENT>def show_condition_operators(self, condition):<DEDENT>", "body": "<EOL>permitted_operators = self.savedsearch.conditions_operators.get(condition)<EOL>permitted_operators_list = set(<EOL>[self.savedsearch.operators.get(op) for op in permitted_operators]<EOL>)<EOL>return permitted_operators_list<EOL>", "docstring": "Show available operators for a given saved search condition", "id": "f5629:c0:m58"}
{"signature": "def item_template(self, itemtype):", "body": "<EOL>template_name = \"<STR_LIT>\" + itemtype<EOL>query_string = \"<STR_LIT>\".format(i=itemtype)<EOL>if self.templates.get(template_name) and not self._updated(<EOL>query_string, self.templates[template_name], template_name<EOL>):<EOL><INDENT>return copy.deepcopy(self.templates[template_name][\"<STR_LIT>\"])<EOL><DEDENT>retrieved = self._retrieve_data(query_string)<EOL>return self._cache(retrieved, template_name)<EOL>", "docstring": "Get a template for a new item", "id": "f5629:c0:m53"}
{"signature": "def last_modified_version(self, **kwargs):", "body": "self.items(**kwargs)<EOL>return int(self.request.headers.get(\"<STR_LIT>\", <NUM_LIT:0>))<EOL>", "docstring": "Get the last modified version", "id": "f5629:c0:m22"}
{"signature": "def _retrieve_data(self, request=None):", "body": "full_url = \"<STR_LIT>\" % (self.endpoint, request)<EOL>self.self_link = request<EOL>self.request = requests.get(url=full_url, headers=self.default_headers())<EOL>self.request.encoding = \"<STR_LIT:utf-8>\"<EOL>try:<EOL><INDENT>self.request.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(self.request)<EOL><DEDENT>return self.request<EOL>", "docstring": "Retrieve Zotero items via the API\nCombine endpoint and request to access the specific resource\nReturns a JSON document", "id": "f5629:c0:m4"}
{"signature": "def _extract_links(self):", "body": "extracted = dict()<EOL>try:<EOL><INDENT>for key, value in self.request.links.items():<EOL><INDENT>parsed = urlparse(value[\"<STR_LIT:url>\"])<EOL>fragment = \"<STR_LIT>\".format(path=parsed[<NUM_LIT:2>], query=parsed[<NUM_LIT:4>])<EOL>extracted[key] = fragment<EOL><DEDENT>parsed = list(urlparse(self.self_link))<EOL>stripped = \"<STR_LIT:&>\".join(<EOL>[<EOL>\"<STR_LIT>\" % (p[<NUM_LIT:0>], p[<NUM_LIT:1>])<EOL>for p in parse_qsl(parsed[<NUM_LIT:4>])<EOL>if p[<NUM_LIT:0>] != \"<STR_LIT>\"<EOL>]<EOL>)<EOL>extracted[\"<STR_LIT>\"] = urlunparse(<EOL>[parsed[<NUM_LIT:0>], parsed[<NUM_LIT:1>], parsed[<NUM_LIT:2>], parsed[<NUM_LIT:3>], stripped, parsed[<NUM_LIT:5>]]<EOL>)<EOL>return extracted<EOL><DEDENT>except KeyError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Extract self, first, next, last links from a request response", "id": "f5629:c0:m5"}
{"signature": "@ss_wrap<EOL><INDENT>def saved_search(self, name, conditions):<DEDENT>", "body": "self.savedsearch._validate(conditions)<EOL>payload = [{\"<STR_LIT:name>\": name, \"<STR_LIT>\": conditions}]<EOL>headers = {\"<STR_LIT>\": token()}<EOL>headers.update(self.default_headers())<EOL>req = requests.post(<EOL>url=self.endpoint<EOL>+ \"<STR_LIT>\".format(t=self.library_type, u=self.library_id),<EOL>headers=headers,<EOL>data=json.dumps(payload),<EOL>)<EOL>self.request = req<EOL>try:<EOL><INDENT>req.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(req)<EOL><DEDENT>return req.json()<EOL>", "docstring": "Create a saved search. conditions is a list of dicts\n        containing search conditions, and must contain the following str keys:\n        condition, operator, value", "id": "f5629:c0:m59"}
{"signature": "def _attachment_template(self, attachment_type):", "body": "return self.item_template(\"<STR_LIT>\" + attachment_type)<EOL>", "docstring": "Return a new attachment template of the required type:\nimported_file\nimported_url\nlinked_file\nlinked_url", "id": "f5629:c0:m54"}
{"signature": "def _json_processor(self, retrieved):", "body": "json_kwargs = {}<EOL>if self.preserve_json_order:<EOL><INDENT>json_kwargs[\"<STR_LIT>\"] = OrderedDict<EOL><DEDENT>try:<EOL><INDENT>items = [<EOL>json.loads(e[\"<STR_LIT:content>\"][<NUM_LIT:0>][\"<STR_LIT:value>\"], **json_kwargs)<EOL>for e in retrieved.entries<EOL>]<EOL><DEDENT>except KeyError:<EOL><INDENT>return self._tags_data(retrieved)<EOL><DEDENT>return items<EOL>", "docstring": "Format and return data from API calls which return Items", "id": "f5629:c0:m48"}
{"signature": "def cleanwrap(func):", "body": "def enc(self, *args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return (func(self, item, **kwargs) for item in args)<EOL><DEDENT>return enc<EOL>", "docstring": "Wrapper for Zotero._cleanup", "id": "f5629:m2"}
{"signature": "def _upload_file(self, authdata, attachment, reg_key):", "body": "upload_dict = authdata[<EOL>\"<STR_LIT>\"<EOL>]  <EOL>upload_list = [(\"<STR_LIT:key>\", upload_dict[\"<STR_LIT:key>\"])]<EOL>for k in upload_dict:<EOL><INDENT>if k != \"<STR_LIT:key>\":<EOL><INDENT>upload_list.append((k, upload_dict[k]))<EOL><DEDENT><DEDENT>upload_list.append((\"<STR_LIT:file>\", open(attachment, \"<STR_LIT:rb>\").read()))<EOL>upload_pairs = tuple(upload_list)<EOL>try:<EOL><INDENT>upload = requests.post(<EOL>url=authdata[\"<STR_LIT:url>\"],<EOL>files=upload_pairs,<EOL>headers={<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>% __version__<EOL>},<EOL>)<EOL><DEDENT>except (requests.exceptions.ConnectionError):<EOL><INDENT>raise ze.UploadError(\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>upload.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(upload)<EOL><DEDENT>return self._register_upload(authdata, reg_key)<EOL>", "docstring": "Step 2: auth successful, and file not on server\nzotero.org/support/dev/server_api/file_upload#a_full_upload\n\nreg_key isn't used, but we need to pass it through to Step 3", "id": "f5629:c3:m4"}
{"signature": "@property<EOL><INDENT>def delay(self):<DEDENT>", "body": "self.wait = self.wait * <NUM_LIT:2><EOL>return self.wait<EOL>", "docstring": "return increasing delays", "id": "f5629:c1:m1"}
{"signature": "def creator_fields(self):", "body": "<EOL>if self.templates.get(\"<STR_LIT>\") and not self._updated(<EOL>\"<STR_LIT>\", self.templates[\"<STR_LIT>\"], \"<STR_LIT>\"<EOL>):<EOL><INDENT>return self.templates[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL><DEDENT>query_string = \"<STR_LIT>\"<EOL>retrieved = self._retrieve_data(query_string)<EOL>return self._cache(retrieved, \"<STR_LIT>\")<EOL>", "docstring": "Get localised creator fields", "id": "f5629:c0:m65"}
{"signature": "def _create_prelim(self):", "body": "self._verify(self.payload)<EOL>if \"<STR_LIT:key>\" in self.payload[<NUM_LIT:0>] and self.payload[<NUM_LIT:0>][\"<STR_LIT:key>\"]:<EOL><INDENT>if next((i for i in self.payload if \"<STR_LIT:key>\" not in i), False):<EOL><INDENT>raise ze.UnsupportedParams(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>return None  <EOL><DEDENT>liblevel = \"<STR_LIT>\"<EOL>headers = {\"<STR_LIT>\": token(), \"<STR_LIT:Content-Type>\": \"<STR_LIT:application/json>\"}<EOL>headers.update(self.zinstance.default_headers())<EOL>if self.parentid:<EOL><INDENT>for child in self.payload:<EOL><INDENT>child[\"<STR_LIT>\"] = self.parentid<EOL><DEDENT><DEDENT>to_send = json.dumps(self.payload)<EOL>req = requests.post(<EOL>url=self.zinstance.endpoint<EOL>+ liblevel.format(<EOL>t=self.zinstance.library_type, u=self.zinstance.library_id<EOL>),<EOL>data=to_send,<EOL>headers=headers,<EOL>)<EOL>try:<EOL><INDENT>req.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(req)<EOL><DEDENT>data = req.json()<EOL>for k in data[\"<STR_LIT:success>\"]:<EOL><INDENT>self.payload[int(k)][\"<STR_LIT:key>\"] = data[\"<STR_LIT:success>\"][k]<EOL><DEDENT>return data<EOL>", "docstring": "Step 0: Register intent to upload files", "id": "f5629:c3:m2"}
{"signature": "def new_fulltext(self, version):", "body": "query_string = \"<STR_LIT>\".format(<EOL>t=self.library_type, u=self.library_id<EOL>)<EOL>headers = {\"<STR_LIT>\": str(version)}<EOL>headers.update(self.default_headers())<EOL>req = requests.get(self.endpoint + query_string, headers=headers)<EOL>try:<EOL><INDENT>req.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(req)<EOL><DEDENT>return req.json()<EOL>", "docstring": "Retrieve list of full-text content items and versions which are newer\nthan <version>", "id": "f5629:c0:m19"}
{"signature": "def item_fields(self):", "body": "<EOL>if self.templates.get(\"<STR_LIT>\") and not self._updated(<EOL>\"<STR_LIT>\", self.templates[\"<STR_LIT>\"], \"<STR_LIT>\"<EOL>):<EOL><INDENT>return self.templates[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL><DEDENT>query_string = \"<STR_LIT>\"<EOL>retrieved = self._retrieve_data(query_string)<EOL>return self._cache(retrieved, \"<STR_LIT>\")<EOL>", "docstring": "Get all available item fields", "id": "f5629:c0:m69"}
{"signature": "def check_items(self, items):", "body": "<EOL>if self.templates.get(\"<STR_LIT>\") and not self._updated(<EOL>\"<STR_LIT>\", self.templates[\"<STR_LIT>\"], \"<STR_LIT>\"<EOL>):<EOL><INDENT>template = set(t[\"<STR_LIT>\"] for t in self.templates[\"<STR_LIT>\"][\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>template = set(t[\"<STR_LIT>\"] for t in self.item_fields())<EOL><DEDENT>template = template | set(<EOL>[<EOL>\"<STR_LIT:path>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT:version>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT:filename>\",<EOL>]<EOL>)<EOL>template = template | set(self.temp_keys)<EOL>for pos, item in enumerate(items):<EOL><INDENT>if set(item) == set([\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:version>\", \"<STR_LIT>\", \"<STR_LIT:key>\", \"<STR_LIT:data>\"]):<EOL><INDENT>item = item[\"<STR_LIT:data>\"]<EOL><DEDENT>to_check = set(i for i in list(item.keys()))<EOL>difference = to_check.difference(template)<EOL>if difference:<EOL><INDENT>raise ze.InvalidItemFields(<EOL>\"<STR_LIT>\"<EOL>% (pos + <NUM_LIT:1>, \"<STR_LIT:U+0020>\".join(i for i in difference))<EOL>)<EOL><DEDENT><DEDENT>return items<EOL>", "docstring": "Check that items to be created contain no invalid dict keys\nAccepts a single argument: a list of one or more dicts\nThe retrieved fields are cached and re-used until a 304 call fails", "id": "f5629:c0:m63"}
{"signature": "@retrieve<EOL><INDENT>def top(self, **kwargs):<DEDENT>", "body": "query_string = \"<STR_LIT>\"<EOL>return self._build_query(query_string)<EOL>", "docstring": "Get user top-level items", "id": "f5629:c0:m23"}
{"signature": "@retrieve<EOL><INDENT>def publications(self):<DEDENT>", "body": "if self.library_type != \"<STR_LIT>\":<EOL><INDENT>raise ze.CallDoesNotExist(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>query_string = \"<STR_LIT>\"<EOL>return self._build_query(query_string)<EOL>", "docstring": "Return the contents of My Publications", "id": "f5629:c0:m9"}
{"signature": "@retrieve<EOL><INDENT>def collection_tags(self, collection, **kwargs):<DEDENT>", "body": "query_string = \"<STR_LIT>\".format(<EOL>u=self.library_id, t=self.library_type, c=collection.upper()<EOL>)<EOL>return self._build_query(query_string)<EOL>", "docstring": "Get a specific collection's tags", "id": "f5629:c0:m33"}
{"signature": "def reset(self):", "body": "self.wait = <NUM_LIT:1><EOL>", "docstring": "reset delay", "id": "f5629:c1:m2"}
{"signature": "def get_subset(self, subset):", "body": "if len(subset) > <NUM_LIT:50>:<EOL><INDENT>raise ze.TooManyItems(\"<STR_LIT>\")<EOL><DEDENT>params = self.url_params<EOL>retr = []<EOL>for itm in subset:<EOL><INDENT>retr.extend(self.item(itm))<EOL>self.url_params = params<EOL><DEDENT>self.url_params = None<EOL>return retr<EOL>", "docstring": "Retrieve a subset of items\nAccepts a single argument: a list of item IDs", "id": "f5629:c0:m47"}
{"signature": "@retrieve<EOL><INDENT>def collection_items(self, collection, **kwargs):<DEDENT>", "body": "query_string = \"<STR_LIT>\".format(<EOL>u=self.library_id, t=self.library_type, c=collection.upper()<EOL>)<EOL>return self._build_query(query_string)<EOL>", "docstring": "Get a specific collection's items", "id": "f5629:c0:m31"}
{"signature": "@retrieve<EOL><INDENT>def file(self, item, **kwargs):<DEDENT>", "body": "query_string = \"<STR_LIT>\".format(<EOL>u=self.library_id, t=self.library_type, i=item.upper()<EOL>)<EOL>return self._build_query(query_string, no_params=True)<EOL>", "docstring": "Get the file from an specific item", "id": "f5629:c0:m28"}
{"signature": "def num_tagitems(self, tag):", "body": "query = \"<STR_LIT>\".format(<EOL>u=self.library_id, t=self.library_type, ta=tag<EOL>)<EOL>return self._totals(query)<EOL>", "docstring": "Return the total number of items for the specified tag", "id": "f5629:c0:m13"}
{"signature": "@ss_wrap<EOL><INDENT>def delete_saved_search(self, keys):<DEDENT>", "body": "headers = {\"<STR_LIT>\": token()}<EOL>headers.update(self.default_headers())<EOL>req = requests.delete(<EOL>url=self.endpoint<EOL>+ \"<STR_LIT>\".format(t=self.library_type, u=self.library_id),<EOL>headers=headers,<EOL>params={\"<STR_LIT>\": \"<STR_LIT:U+002C>\".join(keys)},<EOL>)<EOL>self.request = req<EOL>try:<EOL><INDENT>req.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(req)<EOL><DEDENT>return req.status_code<EOL>", "docstring": "Delete one or more saved searches by passing a list of one or more\n        unique search keys", "id": "f5629:c0:m60"}
{"signature": "def addto_collection(self, collection, payload):", "body": "ident = payload[\"<STR_LIT:key>\"]<EOL>modified = payload[\"<STR_LIT:version>\"]<EOL>modified_collections = payload[\"<STR_LIT:data>\"][\"<STR_LIT>\"] + [collection]<EOL>headers = {\"<STR_LIT>\": str(modified)}<EOL>headers.update(self.default_headers())<EOL>req = requests.patch(<EOL>url=self.endpoint<EOL>+ \"<STR_LIT>\".format(<EOL>t=self.library_type, u=self.library_id, i=ident<EOL>),<EOL>data=json.dumps({\"<STR_LIT>\": modified_collections}),<EOL>headers=headers,<EOL>)<EOL>self.request = req<EOL>try:<EOL><INDENT>req.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>error_handler(req)<EOL><DEDENT>return True<EOL>", "docstring": "Add one or more items to a collection\nAccepts two arguments:\nThe collection ID, and an item dict", "id": "f5629:c0:m79"}
{"signature": "@retrieve<EOL><INDENT>def collections_top(self, **kwargs):<DEDENT>", "body": "query_string = \"<STR_LIT>\"<EOL>return self._build_query(query_string)<EOL>", "docstring": "Get top-level user collections", "id": "f5629:c0:m37"}
{"signature": "def _verify(self, payload):", "body": "if not payload:  <EOL><INDENT>raise ze.ParamNotPassed<EOL><DEDENT>for templt in payload:<EOL><INDENT>if os.path.isfile(str(self.basedir.joinpath(templt[\"<STR_LIT:filename>\"]))):<EOL><INDENT>try:<EOL><INDENT>with open(str(self.basedir.joinpath(templt[\"<STR_LIT:filename>\"]))):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>except IOError:<EOL><INDENT>raise ze.FileDoesNotExist(<EOL>\"<STR_LIT>\"<EOL>% str(self.basedir.joinpath(templt[\"<STR_LIT:filename>\"]))<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ze.FileDoesNotExist(<EOL>\"<STR_LIT>\"<EOL>% str(self.basedir.joinpath(templt[\"<STR_LIT:filename>\"]))<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "ensure that all files to be attached exist\nopen()'s better than exists(), cos it avoids a race condition", "id": "f5629:c3:m1"}
{"signature": "@ss_wrap<EOL><INDENT>def show_conditions(self):<DEDENT>", "body": "return self.savedsearch.conditions_operators.keys()<EOL>", "docstring": "Show available saved search conditions", "id": "f5629:c0:m57"}
{"signature": "def item_types(self):", "body": "<EOL>if self.templates.get(\"<STR_LIT>\") and not self._updated(<EOL>\"<STR_LIT>\", self.templates[\"<STR_LIT>\"], \"<STR_LIT>\"<EOL>):<EOL><INDENT>return self.templates[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL><DEDENT>query_string = \"<STR_LIT>\"<EOL>retrieved = self._retrieve_data(query_string)<EOL>return self._cache(retrieved, \"<STR_LIT>\")<EOL>", "docstring": "Get all available item types", "id": "f5629:c0:m64"}
{"signature": "def _cache(self, response, key):", "body": "<EOL>thetime = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone(\"<STR_LIT>\"))<EOL>self.templates[key] = {\"<STR_LIT>\": response.json(), \"<STR_LIT>\": thetime}<EOL>return copy.deepcopy(response.json())<EOL>", "docstring": "Add a retrieved template to the cache for 304 checking\naccepts a dict and key name, adds the retrieval time, and adds both\nto self.templates as a new dict using the specified key", "id": "f5629:c0:m2"}
{"signature": "@retrieve<EOL><INDENT>def items(self, **kwargs):<DEDENT>", "body": "query_string = \"<STR_LIT>\"<EOL>return self._build_query(query_string)<EOL>", "docstring": "Get user items", "id": "f5629:c0:m16"}
{"signature": "def default_headers(self):", "body": "_headers = {<EOL>\"<STR_LIT>\": \"<STR_LIT>\" % __version__,<EOL>\"<STR_LIT>\": \"<STR_LIT:%s>\" % __api_version__,<EOL>}<EOL>if self.api_key:<EOL><INDENT>_headers[\"<STR_LIT>\"] = \"<STR_LIT>\" % self.api_key<EOL><DEDENT>return _headers<EOL>", "docstring": "It's always OK to include these headers", "id": "f5629:c0:m1"}
{"signature": "def num_collectionitems(self, collection):", "body": "query = \"<STR_LIT>\".format(<EOL>u=self.library_id, t=self.library_type, c=collection.upper()<EOL>)<EOL>return self._totals(query)<EOL>", "docstring": "Return the total number of items in the specified collection", "id": "f5629:c0:m12"}
{"signature": "def __init__(self, parser, codegen, writer):", "body": "self._parser = parser<EOL>self._codegen = codegen<EOL>self._symbolgen = SymtableCodeGen()<EOL>self._writer = writer<EOL>self._sources = []<EOL>self._searchers = []<EOL>self._borrowers = []<EOL>", "docstring": "Creates an instance of *MibCompiler* class.\n\n           Args:\n               parser: ASN.1 MIB parser object\n               codegen: MIB transformation object\n               writer: transformed MIB storing object", "id": "f5634:c1:m0"}
{"signature": "def addSearchers(self, *searchers):", "body": "self._searchers.extend(searchers)<EOL>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join([str(x) for x in self._searchers]))<EOL>return self<EOL>", "docstring": "Add more transformed MIBs repositories.\n\n        MibCompiler.compile will invoke each of configured searcher objects\n        in order of their addition asking each if already transformed MIB\n        module already exists and is more recent than specified.\n\n        Args:\n            searchers: searcher object(s)\n\n        Returns:\n            reference to itself (can be used for call chaining)", "id": "f5634:c1:m2"}
{"signature": "def compile(self, *mibnames, **options):", "body": "processed = {}<EOL>parsedMibs = {}<EOL>failedMibs = {}<EOL>borrowedMibs = {}<EOL>builtMibs = {}<EOL>symbolTableMap = {}<EOL>mibsToParse = [x for x in mibnames]<EOL>canonicalMibNames = {}<EOL>while mibsToParse:<EOL><INDENT>mibname = mibsToParse.pop(<NUM_LIT:0>)<EOL>if mibname in parsedMibs:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % mibname)<EOL>continue<EOL><DEDENT>if mibname in failedMibs:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % mibname)<EOL>continue<EOL><DEDENT>for source in self._sources:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % source)<EOL>try:<EOL><INDENT>fileInfo, fileData = source.getData(mibname)<EOL>for mibTree in self._parser.parse(fileData):<EOL><INDENT>mibInfo, symbolTable = self._symbolgen.genCode(<EOL>mibTree, symbolTableMap<EOL>)<EOL>symbolTableMap[mibInfo.name] = symbolTable<EOL>parsedMibs[mibInfo.name] = fileInfo, mibInfo, mibTree<EOL>if mibname in failedMibs:<EOL><INDENT>del failedMibs[mibname]<EOL><DEDENT>mibsToParse.extend(mibInfo.imported)<EOL>if fileInfo.name in mibnames:<EOL><INDENT>if mibInfo.name not in canonicalMibNames:<EOL><INDENT>canonicalMibNames[mibInfo.name] = []<EOL><DEDENT>canonicalMibNames[mibInfo.name].append(fileInfo.name)<EOL><DEDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (<EOL>mibInfo.name, mibname, fileInfo.path, '<STR_LIT:U+002CU+0020>'.join(mibInfo.imported) or '<STR_LIT>'))<EOL><DEDENT>break<EOL><DEDENT>except error.PySmiReaderFileNotFoundError:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (mibname, source))<EOL>continue<EOL><DEDENT>except error.PySmiError:<EOL><INDENT>exc_class, exc, tb = sys.exc_info()<EOL>exc.source = source<EOL>exc.mibname = mibname<EOL>exc.msg += '<STR_LIT>' % mibname<EOL>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (<EOL>options.get('<STR_LIT>') and '<STR_LIT>' or '<STR_LIT>', exc, source))<EOL>failedMibs[mibname] = exc<EOL>processed[mibname] = statusFailed.setOptions(error=exc)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>exc = error.PySmiError('<STR_LIT>' % mibname)<EOL>exc.mibname = mibname<EOL>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % mibname)<EOL>if mibname not in failedMibs:<EOL><INDENT>failedMibs[mibname] = exc<EOL><DEDENT>if mibname not in processed:<EOL><INDENT>processed[mibname] = statusMissing<EOL><DEDENT><DEDENT><DEDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (len(parsedMibs), len(failedMibs)))<EOL>for mibname in tuple(parsedMibs):<EOL><INDENT>fileInfo, mibInfo, mibTree = parsedMibs[mibname]<EOL>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % mibname)<EOL>for searcher in self._searchers:<EOL><INDENT>try:<EOL><INDENT>searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('<STR_LIT>'))<EOL><DEDENT>except error.PySmiFileNotFoundError:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (mibname, searcher))<EOL>continue<EOL><DEDENT>except error.PySmiFileNotModifiedError:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (mibname, searcher))<EOL>del parsedMibs[mibname]<EOL>processed[mibname] = statusUntouched<EOL>break<EOL><DEDENT>except error.PySmiError:<EOL><INDENT>exc_class, exc, tb = sys.exc_info()<EOL>exc.searcher = searcher<EOL>exc.mibname = mibname<EOL>exc.msg += '<STR_LIT>' % mibname<EOL>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (searcher, exc))<EOL>continue<EOL><DEDENT><DEDENT>else:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % mibname)<EOL>if options.get('<STR_LIT>') and mibname not in canonicalMibNames:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % mibname)<EOL>del parsedMibs[mibname]<EOL>processed[mibname] = statusUntouched<EOL>continue<EOL><DEDENT><DEDENT><DEDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (len(parsedMibs), len(failedMibs)))<EOL>for mibname in parsedMibs.copy():<EOL><INDENT>fileInfo, mibInfo, mibTree = parsedMibs[mibname]<EOL>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (mibname, fileInfo.path))<EOL>platform_info, user_info = self._get_system_info()<EOL>comments = [<EOL>'<STR_LIT>' % fileInfo.path,<EOL>'<STR_LIT>' % (packageName, packageVersion, time.asctime()),<EOL>'<STR_LIT>' % (platform_info[<NUM_LIT:1>], platform_info[<NUM_LIT:0>],<EOL>platform_info[<NUM_LIT:2>], user_info[<NUM_LIT:0>]),<EOL>'<STR_LIT>' % sys.version.split('<STR_LIT:\\n>')[<NUM_LIT:0>]<EOL>]<EOL>try:<EOL><INDENT>mibInfo, mibData = self._codegen.genCode(<EOL>mibTree,<EOL>symbolTableMap,<EOL>comments=comments,<EOL>dstTemplate=options.get('<STR_LIT>'),<EOL>genTexts=options.get('<STR_LIT>'),<EOL>textFilter=options.get('<STR_LIT>')<EOL>)<EOL>builtMibs[mibname] = fileInfo, mibInfo, mibData<EOL>del parsedMibs[mibname]<EOL>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (mibname, fileInfo.path, self._writer))<EOL><DEDENT>except error.PySmiError:<EOL><INDENT>exc_class, exc, tb = sys.exc_info()<EOL>exc.handler = self._codegen<EOL>exc.mibname = mibname<EOL>exc.msg += '<STR_LIT>' % mibname<EOL>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (self._codegen, exc))<EOL>processed[mibname] = statusFailed.setOptions(error=exc)<EOL>failedMibs[mibname] = exc<EOL>del parsedMibs[mibname]<EOL><DEDENT><DEDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (len(parsedMibs), len(failedMibs)))<EOL>for mibname in failedMibs.copy():<EOL><INDENT>if options.get('<STR_LIT>') and mibname not in canonicalMibNames:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % mibname)<EOL>continue<EOL><DEDENT>for borrower in self._borrowers:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (mibname, borrower))<EOL>try:<EOL><INDENT>fileInfo, fileData = borrower.getData(<EOL>mibname,<EOL>genTexts=options.get('<STR_LIT>')<EOL>)<EOL>borrowedMibs[mibname] = fileInfo, MibInfo(name=mibname, imported=[]), fileData<EOL>del failedMibs[mibname]<EOL>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (mibname, borrower))<EOL>break<EOL><DEDENT>except error.PySmiError:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (borrower, sys.exc_info()[<NUM_LIT:1>]))<EOL><DEDENT><DEDENT><DEDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (len(borrowedMibs), len(failedMibs)))<EOL>for mibname in borrowedMibs.copy():<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % mibname)<EOL>fileInfo, mibInfo, mibData = borrowedMibs[mibname]<EOL>for searcher in self._searchers:<EOL><INDENT>try:<EOL><INDENT>searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('<STR_LIT>'))<EOL><DEDENT>except error.PySmiFileNotFoundError:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (mibname, searcher))<EOL>continue<EOL><DEDENT>except error.PySmiFileNotModifiedError:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (mibname, searcher))<EOL>del borrowedMibs[mibname]<EOL>processed[mibname] = statusUntouched<EOL>break<EOL><DEDENT>except error.PySmiError:<EOL><INDENT>exc_class, exc, tb = sys.exc_info()<EOL>exc.searcher = searcher<EOL>exc.mibname = mibname<EOL>exc.msg += '<STR_LIT>' % mibname<EOL>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (searcher, exc))<EOL>continue<EOL><DEDENT><DEDENT>else:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % mibname)<EOL>if options.get('<STR_LIT>') and mibname not in canonicalMibNames:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % mibname)<EOL>processed[mibname] = statusUntouched<EOL><DEDENT>else:<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % mibname)<EOL>builtMibs[mibname] = borrowedMibs[mibname]<EOL>processed[mibname] = statusBorrowed.setOptions(<EOL>path=fileInfo.path, file=fileInfo.file,<EOL>alias=fileInfo.name<EOL>)<EOL><DEDENT>del borrowedMibs[mibname]<EOL><DEDENT><DEDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % (len(builtMibs), len(failedMibs)))<EOL>if failedMibs and not options.get('<STR_LIT>'):<EOL><INDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join(failedMibs))<EOL>for mibname in builtMibs:<EOL><INDENT>processed[mibname] = statusUnprocessed<EOL><DEDENT>return processed<EOL><DEDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % ('<STR_LIT:U+002CU+0020>'.join(builtMibs), '<STR_LIT:U+002CU+0020>'.join(failedMibs)))<EOL>for mibname in builtMibs.copy():<EOL><INDENT>fileInfo, mibInfo, mibData = builtMibs[mibname]<EOL>try:<EOL><INDENT>if options.get('<STR_LIT>', True):<EOL><INDENT>self._writer.putData(<EOL>mibname, mibData, dryRun=options.get('<STR_LIT>')<EOL>)<EOL><DEDENT>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (mibname, self._writer))<EOL>del builtMibs[mibname]<EOL>if mibname not in processed:<EOL><INDENT>processed[mibname] = statusCompiled.setOptions(<EOL>path=fileInfo.path,<EOL>file=fileInfo.file,<EOL>alias=fileInfo.name,<EOL>oid=mibInfo.oid,<EOL>oids=mibInfo.oids,<EOL>identity=mibInfo.identity,<EOL>revision=mibInfo.revision,<EOL>enterprise=mibInfo.enterprise,<EOL>compliance=mibInfo.compliance,<EOL>)<EOL><DEDENT><DEDENT>except error.PySmiError:<EOL><INDENT>exc_class, exc, tb = sys.exc_info()<EOL>exc.handler = self._codegen<EOL>exc.mibname = mibname<EOL>exc.msg += '<STR_LIT>' % mibname<EOL>debug.logger & debug.flagCompiler and debug.logger('<STR_LIT>' % (exc, self._writer))<EOL>processed[mibname] = statusFailed.setOptions(error=exc)<EOL>failedMibs[mibname] = exc<EOL>del builtMibs[mibname]<EOL><DEDENT><DEDENT>debug.logger & debug.flagCompiler and debug.logger(<EOL>'<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join([x for x in processed if processed[x] in ('<STR_LIT>', '<STR_LIT>')]))<EOL>return processed<EOL>", "docstring": "Transform requested and possibly referred MIBs.\n\n        The *compile* method should be invoked when *MibCompiler* object\n        is operational meaning at least *sources* are specified.\n\n        Once called with a MIB module name, *compile* will:\n\n        * fetch ASN.1 MIB module with given name by calling *sources*\n        * make sure no such transformed MIB already exists (with *searchers*)\n        * parse ASN.1 MIB text with *parser*\n        * perform actual MIB transformation into target format with *code generator*\n        * may attempt to borrow pre-transformed MIB through *borrowers*\n        * write transformed MIB through *writer*\n\n        The above sequence will be performed for each MIB name given in\n        *mibnames* and may be performed for all MIBs referred to from\n        MIBs being processed.\n\n        Args:\n            mibnames: list of ASN.1 MIBs names\n            options: options that affect the way PySMI components work\n\n        Returns:\n            A dictionary of MIB module names processed (keys) and *MibStatus*\n            class instances (values)", "id": "f5634:c1:m5"}
{"signature": "def __init__(self, cbFun, cbCtx=None):", "body": "self._cbFun = cbFun<EOL>self._cbCtx = cbCtx<EOL>", "docstring": "Creates an instance of *CallbackWriter* class.\n\n        Args:\n            cbFun (callable): user-supplied callable\n        Keyword Args:\n            cbCtx: user-supplied object passed intact to user callback", "id": "f5635:c0:m0"}
{"signature": "def __init__(self, path):", "body": "self._path = decode(os.path.normpath(path))<EOL>", "docstring": "Creates an instance of *FileReader* class.\n\n           Args:\n               path: writable directory to store created files", "id": "f5638:c0:m0"}
{"signature": "def __init__(self, package):", "body": "self._package = package<EOL>self.__loader = None<EOL>", "docstring": "Create an instance of *PyPackageSearcher* bound to specific Python\n           package.\n\n           Args:\n               package (str): name of the Python package to look up Python\n                              modules at.", "id": "f5642:c0:m0"}
{"signature": "def __init__(self, host, locationTemplate, timeout=<NUM_LIT:5>, ssl=False, port=<NUM_LIT>,<EOL>user='<STR_LIT>', password='<STR_LIT>'):", "body": "self._host = host<EOL>self._locationTemplate = locationTemplate<EOL>self._timeout = timeout<EOL>self._ssl = ssl<EOL>self._port = port<EOL>self._user = user<EOL>self._password = password<EOL>if '<STR_LIT>' not in locationTemplate:<EOL><INDENT>raise error.PySmiError('<STR_LIT>' % self)<EOL><DEDENT>", "docstring": "Create an instance of *FtpReader* bound to specific FTP server\n           directory.\n\n           Args:\n               host (str): domain name or IP address of web server\n               locationTemplate (str): location part of the directory containing @mib@ magic placeholder to be\n                   replaced with MIB name fetch.\n\n           Keyword Args:\n               timeout (int): response timeout\n               ssl (bool): access HTTPS web site\n               port (int): TCP port web server is listening\n               user (str): username at FTP server\n               password (str): password for *username* at FTP server", "id": "f5649:c0:m0"}
{"signature": "def __init__(self, path, recursive=True, ignoreErrors=True):", "body": "self._path = os.path.normpath(path)<EOL>self._recursive = recursive<EOL>self._ignoreErrors = ignoreErrors<EOL>self._indexLoaded = False<EOL>self._mibIndex = None<EOL>", "docstring": "Create an instance of *FileReader* serving a directory.\n\n           Args:\n               path (str): directory to search MIB files\n\n           Keyword Args:\n               recursive (bool): whether to include subdirectories\n               ignoreErrors (bool): ignore filesystem access errors", "id": "f5652:c0:m0"}
{"signature": "def t_macro_END(self, t):", "body": "t.lexer.begin('<STR_LIT>')<EOL>return t<EOL>", "docstring": "r'END", "id": "f5667:c0:m5"}
{"signature": "def t_HEX_STRING(self, t):", "body": "value = t.value[<NUM_LIT:1>:-<NUM_LIT:2>]<EOL>while value and value[<NUM_LIT:0>] == '<STR_LIT:0>' and len(value) % <NUM_LIT:2>:<EOL><INDENT>value = value[<NUM_LIT:1>:]<EOL><DEDENT>return t<EOL>", "docstring": "r'\\'[0-9a-fA-F]*\\'[hH]", "id": "f5667:c0:m22"}
{"signature": "def t_choice_body(self, t):", "body": "pass<EOL>", "docstring": "r'[^\\}]+", "id": "f5667:c0:m14"}
{"signature": "def t_choice_end(self, t):", "body": "t.lexer.begin('<STR_LIT>')<EOL>", "docstring": "r'\\}", "id": "f5667:c0:m13"}
{"signature": "def t_macro_body(self, t):", "body": "pass<EOL>", "docstring": "r'.+?(?=END)", "id": "f5667:c0:m6"}
{"signature": "def t_exports_body(self, t):", "body": "pass<EOL>", "docstring": "r'[^;]+", "id": "f5667:c0:m10"}
{"signature": "def t_NUMBER(self, t):", "body": "t.value = int(t.value)<EOL>neg = <NUM_LIT:0><EOL>if t.value < <NUM_LIT:0>:<EOL><INDENT>neg = <NUM_LIT:1><EOL><DEDENT>val = abs(t.value)<EOL>if val <= UNSIGNED32_MAX:<EOL><INDENT>if neg:<EOL><INDENT>t.type = '<STR_LIT>'<EOL><DEDENT><DEDENT>elif val <= UNSIGNED64_MAX:<EOL><INDENT>if neg:<EOL><INDENT>t.type = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>t.type = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise error.PySmiLexerError(\"<STR_LIT>\" % t.value, lineno=t.lineno)<EOL><DEDENT>return t<EOL>", "docstring": "r'-?[0-9]+", "id": "f5667:c0:m20"}
{"signature": "def t_BIN_STRING(self, t):", "body": "value = t.value[<NUM_LIT:1>:-<NUM_LIT:2>]<EOL>while value and value[<NUM_LIT:0>] == '<STR_LIT:0>' and len(value) % <NUM_LIT:8>:<EOL><INDENT>value = value[<NUM_LIT:1>:]<EOL><DEDENT>return t<EOL>", "docstring": "r'\\'[01]*\\'[bB]", "id": "f5667:c0:m21"}
{"signature": "def t_LOWERCASE_IDENTIFIER(self, t):", "body": "if t.value[-<NUM_LIT:1>] == '<STR_LIT:->':<EOL><INDENT>raise error.PySmiLexerError(\"<STR_LIT>\" % t.value, lineno=t.lineno)<EOL><DEDENT>return t<EOL>", "docstring": "r'[0-9]*[a-z][-a-zA-z0-9]*", "id": "f5667:c0:m19"}
{"signature": "def p_VarPart(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] and p[<NUM_LIT:3>] or []<EOL>", "docstring": "VarPart : VARIABLES '{' VarTypes '}'\n                   | empty", "id": "f5672:c0:m45"}
{"signature": "def p_anySubType(self, p):", "body": "if p[<NUM_LIT:1>]:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "anySubType : integerSubType\n                      | octetStringSubType\n                      | enumSpec\n                      | empty", "id": "f5672:c0:m66"}
{"signature": "def p_conceptualTable(self, p):", "body": "p[<NUM_LIT:0>] = ('<STR_LIT>', p[<NUM_LIT:3>])<EOL>", "docstring": "conceptualTable : SEQUENCE OF row", "id": "f5672:c0:m32"}
{"signature": "def p_CompliancePart(self, p):", "body": "if p[<NUM_LIT:1>]:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "CompliancePart : Compliances\n                          | empty", "id": "f5672:c0:m121"}
{"signature": "def p_importedKeyword(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "importedKeyword : importedSMIKeyword\n                           | BITS\n                           | INTEGER32\n                           | IPADDRESS\n                           | MANDATORY_GROUPS\n                           | MODULE_COMPLIANCE\n                           | MODULE_IDENTITY\n                           | OBJECT_GROUP\n                           | OBJECT_IDENTITY\n                           | OBJECT_TYPE\n                           | OPAQUE\n                           | TEXTUAL_CONVENTION\n                           | TIMETICKS\n                           | UNSIGNED32", "id": "f5672:c0:m15"}
{"signature": "def p_Text(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>][<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>", "docstring": "Text : QUOTED_STRING", "id": "f5672:c0:m103"}
{"signature": "def p_moduleName(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "moduleName : UPPERCASE_IDENTIFIER", "id": "f5672:c0:m17"}
{"signature": "def p_Index(self, p):", "body": "<EOL>p[<NUM_LIT:0>] = p[<NUM_LIT:1>][<NUM_LIT:1>][<NUM_LIT:0>]<EOL>", "docstring": "Index : ObjectName", "id": "f5672:c0:m84"}
{"signature": "def p_typeSMI(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "typeSMI : typeSMIandSPPI\n                   | typeSMIonly", "id": "f5672:c0:m28"}
{"signature": "def p_IndexType(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = (<NUM_LIT:0>, p[<NUM_LIT:1>])<EOL><DEDENT>elif n == <NUM_LIT:3>:<EOL><INDENT>p[<NUM_LIT:0>] = (<NUM_LIT:1>, p[<NUM_LIT:2>])<EOL><DEDENT>", "docstring": "IndexType : IMPLIED Index\n                     | Index", "id": "f5672:c0:m83"}
{"signature": "def p_typeTag(self, p):", "body": "", "docstring": "typeTag : '[' APPLICATION NUMBER ']' IMPLICIT\n                   | '[' UNIVERSAL NUMBER ']' IMPLICIT", "id": "f5672:c0:m58"}
{"signature": "def p_DefValPart(self, p):", "body": "if p[<NUM_LIT:1>] and p[<NUM_LIT:3>]:<EOL><INDENT>p[<NUM_LIT:0>] = (p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL><DEDENT>", "docstring": "DefValPart : DEFVAL '{' Value '}'\n                      | empty", "id": "f5672:c0:m86"}
{"signature": "def p_sequenceItem(self, p):", "body": "p[<NUM_LIT:0>] = (p[<NUM_LIT:1>], p[<NUM_LIT:2>])<EOL>", "docstring": "sequenceItem : LOWERCASE_IDENTIFIER sequenceSyntax", "id": "f5672:c0:m36"}
{"signature": "@staticmethod<EOL><INDENT>def p_typeSMIv1(self, p):<DEDENT>", "body": "n = len(p)<EOL>indextype = n == <NUM_LIT:3> and p[<NUM_LIT:1>] + '<STR_LIT:U+0020>' + p[<NUM_LIT:2>] or p[<NUM_LIT:1>]<EOL>p[<NUM_LIT:0>] = indextype<EOL>", "docstring": "typeSMIv1 : INTEGER\n                     | OCTET STRING\n                     | IPADDRESS\n                     | NETWORKADDRESS", "id": "f5672:c2:m1"}
{"signature": "def p_NamedBit(self, p):", "body": "p[<NUM_LIT:0>] = (p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL>", "docstring": "NamedBit : LOWERCASE_IDENTIFIER '(' NUMBER ')", "id": "f5672:c0:m40"}
{"signature": "def p_MandatoryGroup(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>][<NUM_LIT:1>][<NUM_LIT:0>]<EOL>", "docstring": "MandatoryGroup : objectIdentifier", "id": "f5672:c0:m120"}
{"signature": "def p_MaxOrPIBAccessPart(self, p):", "body": "if p[<NUM_LIT:1>]:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "MaxOrPIBAccessPart : MaxAccessPart\n                              | empty", "id": "f5672:c0:m49"}
{"signature": "def p_empty(self, p):", "body": "", "docstring": "empty :", "id": "f5672:c0:m145"}
{"signature": "def p_ComplianceModulePart(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "ComplianceModulePart : ComplianceModules", "id": "f5672:c0:m114"}
{"signature": "def p_ComplianceGroup(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:2>][<NUM_LIT:1>][<NUM_LIT:0>]<EOL>", "docstring": "ComplianceGroup : GROUP objectIdentifier DESCRIPTION Text", "id": "f5672:c0:m124"}
{"signature": "def p_typeName(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "typeName : UPPERCASE_IDENTIFIER\n                    | typeSMI", "id": "f5672:c0:m27"}
{"signature": "def p_Revisions(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:3>:<EOL><INDENT>p[<NUM_LIT:0>] = ('<STR_LIT>', p[<NUM_LIT:1>][<NUM_LIT:1>] + [p[<NUM_LIT:2>]])<EOL><DEDENT>elif n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = ('<STR_LIT>', [p[<NUM_LIT:1>]])<EOL><DEDENT>", "docstring": "Revisions : Revisions Revision\n                     | Revision", "id": "f5672:c0:m94"}
{"signature": "def p_Revision(self, p):", "body": "p[<NUM_LIT:0>] = (p[<NUM_LIT:2>],  <EOL>(p[<NUM_LIT:3>], p[<NUM_LIT:4>]))<EOL>", "docstring": "Revision : REVISION ExtUTCTime DESCRIPTION Text", "id": "f5672:c0:m95"}
{"signature": "@staticmethod<EOL><INDENT>def p_sequenceItems(self, p):<DEDENT>", "body": "<EOL>n = len(p)<EOL>if n == <NUM_LIT:4>:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + [p[<NUM_LIT:3>]]<EOL><DEDENT>elif n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL><DEDENT>elif n == <NUM_LIT:3>:  <EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "sequenceItems : sequenceItems ',' sequenceItem\n                         | sequenceItem\n                         | sequenceItems ',", "id": "f5672:c4:m0"}
{"signature": "def p_MibIndex(self, p):", "body": "if p[<NUM_LIT:1>]:<EOL><INDENT>p[<NUM_LIT:0>] = (p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL><DEDENT>", "docstring": "MibIndex : INDEX '{' IndexTypes '}'\n                    | empty", "id": "f5672:c0:m81"}
{"signature": "def p_ComplianceObject(self, p):", "body": "<EOL>", "docstring": "ComplianceObject : OBJECT ObjectName SyntaxPart WriteSyntaxPart AccessPart DESCRIPTION Text", "id": "f5672:c0:m125"}
{"signature": "def p_SubjectCategories(self, p):", "body": "<EOL>", "docstring": "SubjectCategories : CategoryIDs", "id": "f5672:c0:m54"}
{"signature": "def p_NamedBits(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:4>:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + [p[<NUM_LIT:3>]]<EOL><DEDENT>elif n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL><DEDENT>", "docstring": "NamedBits : NamedBits ',' NamedBit\n                     | NamedBit", "id": "f5672:c0:m39"}
{"signature": "def p_Variations(self, p):", "body": "<EOL>", "docstring": "Variations : Variations Variation\n                      | Variation", "id": "f5672:c0:m138"}
{"signature": "def p_BitsValue(self, p):", "body": "if p[<NUM_LIT:1>]:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "BitsValue : BitNames\n                     | empty", "id": "f5672:c0:m88"}
{"signature": "def p_fuzzy_lowercase_identifier(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "fuzzy_lowercase_identifier : LOWERCASE_IDENTIFIER\n                                      | UPPERCASE_IDENTIFIER", "id": "f5672:c0:m24"}
{"signature": "def p_VariationPart(self, p):", "body": "<EOL>", "docstring": "VariationPart : Variations\n                         | empty", "id": "f5672:c0:m137"}
{"signature": "def p_trapTypeClause(self, p):", "body": "<EOL>p[<NUM_LIT:0>] = ('<STR_LIT>', p[<NUM_LIT:1>],  <EOL>p[<NUM_LIT:4>],  <EOL>p[<NUM_LIT:5>],  <EOL>p[<NUM_LIT:6>],  <EOL>p[<NUM_LIT:7>],  <EOL>p[<NUM_LIT:9>])<EOL>", "docstring": "trapTypeClause : fuzzy_lowercase_identifier TRAP_TYPE ENTERPRISE objectIdentifier VarPart DescrPart ReferPart COLON_COLON_EQUAL NUMBER", "id": "f5672:c0:m44"}
{"signature": "def p_DisplayPart(self, p):", "body": "if p[<NUM_LIT:1>]:<EOL><INDENT>p[<NUM_LIT:0>] = (p[<NUM_LIT:1>], p[<NUM_LIT:2>])<EOL><DEDENT>", "docstring": "DisplayPart : DISPLAY_HINT Text\n                       | empty", "id": "f5672:c0:m77"}
{"signature": "def p_sequenceItems(self, p):", "body": "<EOL>n = len(p)<EOL>if n == <NUM_LIT:4>:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + [p[<NUM_LIT:3>]]<EOL><DEDENT>elif n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL><DEDENT>", "docstring": "sequenceItems : sequenceItems ',' sequenceItem\n                         | sequenceItem", "id": "f5672:c0:m35"}
{"signature": "def p_Notification(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>][<NUM_LIT:1>][<NUM_LIT:0>]<EOL>", "docstring": "Notification : NotificationName", "id": "f5672:c0:m102"}
{"signature": "def p_CategoryID(self, p):", "body": "<EOL>", "docstring": "CategoryID : LOWERCASE_IDENTIFIER '(' NUMBER ')'\n                      | LOWERCASE_IDENTIFIER", "id": "f5672:c0:m56"}
{"signature": "def p_typeSMIandSPPI(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "typeSMIandSPPI : IPADDRESS\n                          | TIMETICKS\n                          | OPAQUE\n                          | INTEGER32\n                          | UNSIGNED32", "id": "f5672:c0:m29"}
{"signature": "def p_BitNames(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:4>:<EOL><INDENT>p[<NUM_LIT:0>] = ('<STR_LIT>', p[<NUM_LIT:1>][<NUM_LIT:1>] + [p[<NUM_LIT:3>]])<EOL><DEDENT>elif n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = ('<STR_LIT>', [p[<NUM_LIT:1>]])<EOL><DEDENT>", "docstring": "BitNames : BitNames ',' LOWERCASE_IDENTIFIER\n                    | LOWERCASE_IDENTIFIER", "id": "f5672:c0:m89"}
{"signature": "def p_SubjectCategoriesPart(self, p):", "body": "<EOL>", "docstring": "SubjectCategoriesPart : SUBJECT_CATEGORIES '{' SubjectCategories '}'\n                                 | empty", "id": "f5672:c0:m53"}
{"signature": "def p_importIdentifier(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "importIdentifier : LOWERCASE_IDENTIFIER\n                            | UPPERCASE_IDENTIFIER\n                            | importedKeyword", "id": "f5672:c0:m14"}
{"signature": "def p_Object(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>][<NUM_LIT:1>][<NUM_LIT:0>]<EOL>", "docstring": "Object : ObjectName", "id": "f5672:c0:m99"}
{"signature": "def p_importedSMIKeyword(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "importedSMIKeyword : AGENT_CAPABILITIES\n                              | COUNTER32\n                              | COUNTER64\n                              | GAUGE32\n                              | NOTIFICATION_GROUP\n                              | NOTIFICATION_TYPE\n                              | TRAP_TYPE", "id": "f5672:c0:m16"}
{"signature": "def p_ComplianceModules(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:3>:<EOL><INDENT>p[<NUM_LIT:0>] = ('<STR_LIT>', p[<NUM_LIT:1>][<NUM_LIT:1>] + [p[<NUM_LIT:2>]])<EOL><DEDENT>elif n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = ('<STR_LIT>', [p[<NUM_LIT:1>]])<EOL><DEDENT>", "docstring": "ComplianceModules : ComplianceModules ComplianceModule\n                             | ComplianceModule", "id": "f5672:c0:m115"}
{"signature": "def p_agentCapabilitiesClause(self, p):", "body": "p[<NUM_LIT:0>] = ('<STR_LIT>', p[<NUM_LIT:1>],  <EOL>(p[<NUM_LIT:3>], p[<NUM_LIT:4>]),  <EOL>p[<NUM_LIT:6>],  <EOL>(p[<NUM_LIT:7>], p[<NUM_LIT:8>]),  <EOL>p[<NUM_LIT:9>],  <EOL>p[<NUM_LIT>])<EOL>", "docstring": "agentCapabilitiesClause : LOWERCASE_IDENTIFIER AGENT_CAPABILITIES PRODUCT_RELEASE Text STATUS Status DESCRIPTION Text ReferPart ModulePart_Capabilities COLON_COLON_EQUAL '{' objectIdentifier '}", "id": "f5672:c0:m130"}
{"signature": "def p_sequenceSyntax(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "sequenceSyntax : BITS\n                          | UPPERCASE_IDENTIFIER anySubType\n                          | sequenceObjectSyntax", "id": "f5672:c0:m38"}
{"signature": "def p_Cells(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:4>:<EOL><INDENT>p[<NUM_LIT:0>] = ('<STR_LIT>', p[<NUM_LIT:1>][<NUM_LIT:1>] + [p[<NUM_LIT:3>]])<EOL><DEDENT>elif n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = ('<STR_LIT>', [p[<NUM_LIT:1>]])<EOL><DEDENT>", "docstring": "Cells : Cells ',' Cell\n                 | Cell", "id": "f5672:c0:m143"}
{"signature": "def p_sequenceSimpleSyntax(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:3>:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]  <EOL><DEDENT>elif n == <NUM_LIT:4>:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + '<STR_LIT:U+0020>' + p[<NUM_LIT:2>]<EOL><DEDENT>", "docstring": "sequenceSimpleSyntax : INTEGER anySubType\n                                | INTEGER32 anySubType\n                                | OCTET STRING anySubType\n                                | OBJECT IDENTIFIER anySubType", "id": "f5672:c0:m63"}
{"signature": "@staticmethod<EOL><INDENT>def p_enumItem(self, p):<DEDENT>", "body": "p[<NUM_LIT:0>] = (p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL>", "docstring": "enumItem : LOWERCASE_IDENTIFIER '(' enumNumber ')'\n                    | UPPERCASE_IDENTIFIER '(' enumNumber ')", "id": "f5672:c6:m0"}
{"signature": "def p_exportsClause(self, p):", "body": "", "docstring": "exportsClause : EXPORTS\n                         | empty", "id": "f5672:c0:m9"}
{"signature": "@staticmethod<EOL><INDENT>def p_Index(self, p):<DEDENT>", "body": "<EOL>p[<NUM_LIT:0>] = isinstance(p[<NUM_LIT:1>], tuple) and p[<NUM_LIT:1>][<NUM_LIT:1>][<NUM_LIT:0>] or p[<NUM_LIT:1>]<EOL>", "docstring": "Index : ObjectName\n                 | typeSMIv1", "id": "f5672:c2:m0"}
{"signature": "def p_VariationAccess(self, p):", "body": "<EOL>", "docstring": "VariationAccess : LOWERCASE_IDENTIFIER", "id": "f5672:c0:m141"}
{"signature": "def p_MandatoryPart(self, p):", "body": "if p[<NUM_LIT:1>]:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:3>]<EOL><DEDENT>", "docstring": "MandatoryPart : MANDATORY_GROUPS '{' MandatoryGroups '}'\n                         | empty", "id": "f5672:c0:m118"}
{"signature": "def p_VariationAccessPart(self, p):", "body": "<EOL>", "docstring": "VariationAccessPart : ACCESS VariationAccess\n                               | empty", "id": "f5672:c0:m140"}
{"signature": "def p_Access(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "Access : LOWERCASE_IDENTIFIER", "id": "f5672:c0:m79"}
{"signature": "def p_ObjectName(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "ObjectName : objectIdentifier", "id": "f5672:c0:m90"}
{"signature": "def p_ModulePart_Capabilities(self, p):", "body": "<EOL>", "docstring": "ModulePart_Capabilities : Modules_Capabilities\n                                   | empty", "id": "f5672:c0:m131"}
{"signature": "def parserFactory(**grammarOptions):", "body": "classAttr = {}<EOL>for option in grammarOptions:<EOL><INDENT>if grammarOptions[option]:<EOL><INDENT>if option not in relaxedGrammar:<EOL><INDENT>raise error.PySmiError('<STR_LIT>' % option)<EOL><DEDENT>for func in relaxedGrammar[option]:<EOL><INDENT>if sys.version_info[<NUM_LIT:0>] > <NUM_LIT:2>:<EOL><INDENT>classAttr[func.__name__] = func<EOL><DEDENT>else:<EOL><INDENT>classAttr[func.func_name] = func<EOL><DEDENT><DEDENT><DEDENT><DEDENT>classAttr['<STR_LIT>'] = lexerFactory(**grammarOptions)<EOL>return type('<STR_LIT>', (SmiV2Parser,), classAttr)<EOL>", "docstring": "Factory function producing custom specializations of base *SmiV2Parser*\n       class.\n\n       Keyword Args:\n           grammarOptions: a list of (bool) typed optional keyword parameters\n                           enabling particular set of SMIv2 grammar relaxations.\n\n       Returns:\n           Specialized copy of *SmiV2Parser* class.\n\n       Notes:\n           The following SMIv2 grammar relaxation parameters are defined:\n\n           * supportSmiV1Keywords - parses SMIv1 grammar\n           * supportIndex - tolerates ASN.1 types in INDEX clause\n           * commaAtTheEndOfImport - tolerates stray comma at the end of IMPORT section\n           * commaAtTheEndOfSequence - tolerates stray comma at the end of sequence of elements in MIB\n           * mixOfCommasAndSpaces - tolerate a mix of comma and spaces in MIB enumerations\n           * uppercaseIdentifier - tolerate uppercased MIB identifiers\n           * lowcaseIdentifier - tolerate lowercase MIB identifiers\n           * curlyBracesAroundEnterpriseInTrap - tolerate curly braces around enterprise ID in TRAP MACRO\n           * noCells - tolerate missing cells (XXX)\n\n       Examples:\n\n       >>> from pysmi.parser import smi\n       >>> SmiV1Parser = smi.parserFactory(supportSmiV1Keywords=True, supportIndex=True)", "id": "f5672:m0"}
{"signature": "def p_DescrPart(self, p):", "body": "if p[<NUM_LIT:1>]:<EOL><INDENT>p[<NUM_LIT:0>] = (p[<NUM_LIT:1>], p[<NUM_LIT:2>])<EOL><DEDENT>", "docstring": "DescrPart : DESCRIPTION Text\n                     | empty", "id": "f5672:c0:m48"}
{"signature": "def p_subidentifiers(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:3>:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + [p[<NUM_LIT:2>]]<EOL><DEDENT>elif n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL><DEDENT>", "docstring": "subidentifiers : subidentifiers subidentifier\n                          | subidentifier", "id": "f5672:c0:m106"}
{"signature": "def p_subidentifier(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>elif n == <NUM_LIT:5>:<EOL><INDENT>p[<NUM_LIT:0>] = (p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL><DEDENT>", "docstring": "subidentifier : fuzzy_lowercase_identifier\n                         | NUMBER\n                         | LOWERCASE_IDENTIFIER '(' NUMBER ')", "id": "f5672:c0:m107"}
{"signature": "def p_declaration(self, p):", "body": "if p[<NUM_LIT:1>]:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "declaration : typeDeclaration\n                       | valueDeclaration\n                       | objectIdentityClause\n                       | objectTypeClause\n                       | trapTypeClause\n                       | notificationTypeClause\n                       | moduleIdentityClause\n                       | moduleComplianceClause\n                       | objectGroupClause\n                       | notificationGroupClause\n                       | agentCapabilitiesClause\n                       | macroClause", "id": "f5672:c0:m20"}
{"signature": "def p_declarations(self, p):", "body": "n = len(p)<EOL>if n == <NUM_LIT:3>:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + [p[<NUM_LIT:2>]]<EOL><DEDENT>elif n == <NUM_LIT:2>:<EOL><INDENT>p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL><DEDENT>", "docstring": "declarations : declarations declaration\n                        | declaration", "id": "f5672:c0:m19"}
{"signature": "def p_Module_Capabilities(self, p):", "body": "<EOL>", "docstring": "Module_Capabilities : SUPPORTS ModuleName_Capabilities INCLUDES '{' CapabilitiesGroups '}' VariationPart", "id": "f5672:c0:m133"}
{"signature": "def p_moduleIdentityClause(self, p):", "body": "p[<NUM_LIT:0>] = ('<STR_LIT>', p[<NUM_LIT:1>],  <EOL>(p[<NUM_LIT:4>], p[<NUM_LIT:5>]),  <EOL>(p[<NUM_LIT:6>], p[<NUM_LIT:7>]),  <EOL>(p[<NUM_LIT:8>], p[<NUM_LIT:9>]),  <EOL>(p[<NUM_LIT:10>], p[<NUM_LIT:11>]),  <EOL>p[<NUM_LIT:12>],  <EOL>p[<NUM_LIT:15>])<EOL>", "docstring": "moduleIdentityClause : LOWERCASE_IDENTIFIER MODULE_IDENTITY SubjectCategoriesPart LAST_UPDATED ExtUTCTime ORGANIZATION Text CONTACT_INFO Text DESCRIPTION Text RevisionPart COLON_COLON_EQUAL '{' objectIdentifier '}", "id": "f5672:c0:m52"}
{"signature": "def coerce(value1, value2, default=None):", "body": "if value1 is not NoSet:<EOL><INDENT>return value1<EOL><DEDENT>elif value2 is not NoSet:<EOL><INDENT>return value2<EOL><DEDENT>else:<EOL><INDENT>return default<EOL><DEDENT>", "docstring": "Exclude NoSet objec\n\n    .. code-block::\n\n        >>> coerce(NoSet, 'value')\n        'value'", "id": "f5714:m0"}
{"signature": "@property<EOL><INDENT>def gamepad(self):<DEDENT>", "body": "state = _xinput_state()<EOL>_xinput.XInputGetState(self.ControllerID - <NUM_LIT:1>, pointer(state))<EOL>self.dwPacketNumber = state.dwPacketNumber<EOL>return state.XINPUT_GAMEPAD<EOL>", "docstring": "Returns the current gamepad state. Buttons pressed is shown as a raw integer value.\n        Use rController.buttons for a list of buttons pressed.", "id": "f5720:c2:m1"}
{"signature": "def main():", "body": "import time<EOL>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>con = rController(<NUM_LIT:1>)<EOL>for i in range(<NUM_LIT:3>):<EOL><INDENT>print('<STR_LIT>')<EOL>time.sleep(<NUM_LIT>)<EOL>print('<STR_LIT>', con.gamepad)<EOL>print('<STR_LIT>', con.buttons)<EOL>time.sleep(<NUM_LIT:0.5>)<EOL><DEDENT>print('<STR_LIT>')<EOL>", "docstring": "Test the functionality of the rController object", "id": "f5720:m0"}
{"signature": "def __init__(self, ControllerID):", "body": "self.ControllerID = ControllerID<EOL>self.dwPacketNumber = c_uint()<EOL>", "docstring": "Initialise Controller object\n        ControllerID    Int     Position number of desired controller (order of connection)", "id": "f5720:c2:m0"}
{"signature": "@register.filter()<EOL>def attachments(value, obj, width = WIDTH):", "body": "match = ATTACHMENT_REGEX.search(value)<EOL>safe = isinstance(value, (SafeString, SafeUnicode))<EOL>while not match is None and match.end() <= len(value):<EOL><INDENT>start = match.start()<EOL>end = match.end()<EOL>groups = match.groups()<EOL>if len(groups) > <NUM_LIT:0>:<EOL><INDENT>index = groups[<NUM_LIT:0>]<EOL>options = None<EOL>if len(groups) > <NUM_LIT:1>:<EOL><INDENT>options = groups[<NUM_LIT:1>]<EOL>if options:<EOL><INDENT>options = options.strip()<EOL>if options:<EOL><INDENT>try:<EOL><INDENT>options = split(smart_str(options))<EOL><DEDENT>except:<EOL><INDENT>options = None<EOL><DEDENT><DEDENT><DEDENT><DEDENT>args = []<EOL>kwargs = {<EOL>'<STR_LIT:width>': width<EOL>}<EOL>if options:<EOL><INDENT>for option in options:<EOL><INDENT>key, equals, val = option.partition('<STR_LIT:=>')<EOL>if equals != '<STR_LIT:=>':<EOL><INDENT>if key and not val:<EOL><INDENT>args.append(key)<EOL><DEDENT>continue<EOL><DEDENT>kwargs[key] = val<EOL><DEDENT><DEDENT>try:<EOL><INDENT>if isinstance(obj, dict):<EOL><INDENT>inner = Attachment(<EOL>**obj['<STR_LIT>'][int(index) - <NUM_LIT:1>]<EOL>).render(*args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>inner = obj.attachments.all()[int(index) - <NUM_LIT:1>].render(*args, **kwargs)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>inner = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>inner = '<STR_LIT>'<EOL><DEDENT>value = value[:start] + inner + value[end:]<EOL>match = ATTACHMENT_REGEX.search(value, start + len(inner))<EOL><DEDENT>if safe:<EOL><INDENT>return mark_safe(value)<EOL><DEDENT>else:<EOL><INDENT>return value<EOL><DEDENT>", "docstring": "Parse the copy inside ``value`` and look for shortcodes in this format::\n\n    <p>Here's an attachment</p>\n    <p>[attachment 1]</p>\n\nReplace the shortcode with a full image, video or audio element, or download link\n\n:param obj: The object against which attachments are saved\n:param width: The width of images or audio/video tags (defaults to the ``ATTACHMENT_WIDTH`` SETTING)", "id": "f5725:m0"}
{"signature": "def aN(a, dim=<NUM_LIT:3>, dtype='<STR_LIT:int>'):", "body": "if not hasattr(a, '<STR_LIT>'):<EOL><INDENT>return np.array([a]*dim, dtype=dtype)<EOL><DEDENT>return np.array(a).astype(dtype)<EOL>", "docstring": "Convert an integer or iterable list to numpy array of length dim. This func\nis used to allow other methods to take both scalars non-numpy arrays with\nflexibility.\n\nParameters\n----------\na : number, iterable, array-like\n    The object to convert to numpy array\n\ndim : integer\n    The length of the resulting array\n\ndtype : string or np.dtype\n    Type which the resulting array should be, e.g. 'float', np.int8\n\nReturns\n-------\narr : numpy array\n    Resulting numpy array of length ``dim`` and type ``dtype``\n\nExamples\n--------\n>>> aN(1, dim=2, dtype='float')\narray([ 1.,  1.])\n\n>>> aN(1, dtype='int')\narray([1, 1, 1])\n\n>>> aN(np.array([1,2,3]), dtype='float')\narray([ 1.,  2.,  3.])", "id": "f5741:m5"}
{"signature": "def contains(self, items, pad=<NUM_LIT:0>):", "body": "o = ((items >= self.l-pad) & (items < self.r+pad))<EOL>if len(o.shape) == <NUM_LIT:2>:<EOL><INDENT>o = o.all(axis=-<NUM_LIT:1>)<EOL><DEDENT>elif len(o.shape) == <NUM_LIT:1>:<EOL><INDENT>o = o.all()<EOL><DEDENT>return o<EOL>", "docstring": "Test whether coordinates are contained within this tile.\n\nParameters\n----------\nitems : ndarray [3] or [N, 3]\n    N coordinates to check are within the bounds of the tile\n\npad : integer or ndarray [3]\n    anisotropic padding to apply in the contain test\n\nExamples\n--------\n>>> Tile(5, dim=2).contains([[-1, 0], [2, 3], [2, 6]])\narray([False,  True, False], dtype=bool)", "id": "f5741:c1:m15"}
{"signature": "@staticmethod<EOL><INDENT>def boundingtile(tiles, *args):<DEDENT>", "body": "tiles = listify(tiles) + listify(args)<EOL>if len(tiles) < <NUM_LIT:2>:<EOL><INDENT>return tiles[<NUM_LIT:0>]<EOL><DEDENT>tile = tiles[<NUM_LIT:0>]<EOL>l, r = tile.l.copy(), tile.r.copy()<EOL>for tile in tiles[<NUM_LIT:1>:]:<EOL><INDENT>l = amin(l, tile.l)<EOL>r = amax(r, tile.r)<EOL><DEDENT>return Tile(l, r, dtype=l.dtype)<EOL>", "docstring": "Convex bounding box of a group of tiles\n\n>>> Tile.boundingtile(Tile([0, 1], [5, 4]), Tile([1, 0], [4, 5]))\nTile [0, 0] -> [5, 5] ([5, 5])", "id": "f5741:c1:m17"}
{"signature": "def delistify(a, b=None):", "body": "if isinstance(b, (tuple, list, np.ndarray)):<EOL><INDENT>if isinstance(a, (tuple, list, np.ndarray)):<EOL><INDENT>return type(b)(a)<EOL><DEDENT>return type(b)([a])<EOL><DEDENT>else:<EOL><INDENT>if isinstance(a, (tuple, list, np.ndarray)) and len(a) == <NUM_LIT:1>:<EOL><INDENT>return a[<NUM_LIT:0>]<EOL><DEDENT>return a<EOL><DEDENT>return a<EOL>", "docstring": "If a single element list, extract the element as an object, otherwise\nleave as it is.\n\nExamples\n--------\n>>> delistify('string')\n'string'\n\n>>> delistify(['string'])\n'string'\n\n>>> delistify(['string', 'other'])\n['string', 'other']\n\n>>> delistify(np.array([1.0]))\n1.0\n\n>>> delistify([1, 2, 3])\n[1, 2, 3]", "id": "f5741:m2"}
{"signature": "def __init__(self, left, right=None, mins=None, maxs=None,<EOL>size=None, centered=False, dim=None, dtype='<STR_LIT:int>'):", "body": "self.dtype = dtype<EOL>dims = set([getdim(i) for i in [left, right, size]] + [dim])<EOL>dims = dims.difference(set([None]))<EOL>if len(dims) == <NUM_LIT:0>:<EOL><INDENT>dim = <NUM_LIT:3><EOL><DEDENT>elif len(dims) == <NUM_LIT:1>:<EOL><INDENT>dim = dims.pop()<EOL><DEDENT>elif len(dims) > <NUM_LIT:1>:<EOL><INDENT>raise AttributeError(\"<STR_LIT>\")<EOL><DEDENT>nkw = {'<STR_LIT>': dim, '<STR_LIT>': self.dtype}<EOL>if right is None:<EOL><INDENT>if size is None:<EOL><INDENT>right = left<EOL>left = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>if not centered:<EOL><INDENT>right = aN(left, **nkw) + aN(size, **nkw)<EOL><DEDENT>else:<EOL><INDENT>l = aN(left, **nkw)<EOL>s = aN(size, **nkw)<EOL>if isint(self.dtype):<EOL><INDENT>left = l - s//<NUM_LIT:2><EOL>right = left + s<EOL><DEDENT>else:<EOL><INDENT>left, right = l - s/<NUM_LIT>, l + s/<NUM_LIT><EOL><DEDENT><DEDENT>assert np.all((right - left) == size)<EOL><DEDENT><DEDENT>left = aN(left, **nkw)<EOL>right = aN(right, **nkw)<EOL>if dim is not None:<EOL><INDENT>self.dim = dim<EOL>assert(left.shape[<NUM_LIT:0>] == dim)<EOL>assert(right.shape[<NUM_LIT:0>] == dim)<EOL><DEDENT>else:<EOL><INDENT>self.dim = left.shape[<NUM_LIT:0>]<EOL><DEDENT>if mins is not None:<EOL><INDENT>left = amax(left, aN(mins, **nkw))<EOL><DEDENT>if maxs is not None:<EOL><INDENT>right = amin(right, aN(maxs, **nkw))<EOL><DEDENT>self.l = np.array(left)<EOL>self.r = np.array(right)<EOL>self._build_caches()<EOL>", "docstring": "Creates a tile element which represents a hyperrectangle in D\ndimensions. These hyperrectangles may be operated upon to find\nintersections, bounding tiles, calculate interior coordinates\nand other common operations.\n\n\nParameters\n----------\nleft : number or array-like\n    Left side of the tile\n\nright : (optional) number or array-like\n    If provided along with left, gives the right side of the tile\n\nmins : (optional) number or array-like\n    Can be provided to clip the sides of the Tile to certain minimum\n\nmaxs : (optional) number or array-like\n    Can be provided to clip the sides of the Tile to certain maximum\n\nsize : (optional) number or array-like\n    If provided along with left gives the size of the tile\n\ncentered : boolean\n    * If true:   ``[left] - [size]/2 -> [left] + [size]/2``\n    * If false:  ``[left] -> [left] + [size]``\n\ndim : integer\n    Number of dimensions for the Tile\n\ndtype : string, np.dtype\n    Resulting type of number for the Tile coordinates\n\nNotes\n-----\n\nThese parameters can be combined into many different combinations\n(where [] indicates an array created from either a single number or any\niterable):\n\n    * left : ``[0,0,0] -> [left]``\n    * left, right : ``[left] -> [right]``\n    * left, size (not centered) : ``[left] -> [left] + [size]``\n    * left, size (yes centered) : ``[left] - [size]/2 -> [left] + [size]/2``\n\nEach of these can be limited by using (mins, maxs) which are applied\nafter calculating left, right for each element:\n\n    * ``left = max(left, [mins])``\n    * ``right = min(right, [maxs])``\n\nSince tiles are used for array slicing, they only allow integer values,\nwhich can truncated without warning from float.\n\nNotes on dimension. The dimensionality is determined first by the shape\nof left, right, size if provided not as an integer. If it not provided there\nthen it is assumed to be 3D. This can be overridden by setting dim in the\narguments. For example:\n\n    * Tile(3)         : ``[0,0,0] -> [3,3,3]``\n    * Tile(3, dim=2)  : ``[0,0] -> [3,3]``\n    * Tile([3])       : ``[0] -> [3]``\n\nExamples\n--------\n>>> Tile(10)\nTile [0, 0, 0] -> [10, 10, 10] ([10, 10, 10])\n\n>>> Tile([1,2])\nTile [0, 0] -> [1, 2] ([1, 2])\n\n>>> Tile(0, size=4, centered=True)\nTile [-2, -2, -2] -> [2, 2, 2] ([4, 4, 4])\n\n>>> Tile([-1, 0, 1], right=10, mins=0)\nTile [0, 0, 1] -> [10, 10, 10] ([10, 10, 9])\n\n>>> Tile(10, dtype='float')\nTile [0.0, 0.0, 0.0] -> [10.0, 10.0, 10.0] ([10.0, 10.0, 10.0])", "id": "f5741:c1:m0"}
{"signature": "def overhang(self, tile):", "body": "ll = np.abs(amin(self.l - tile.l, aN(<NUM_LIT:0>, dim=self.dim)))<EOL>rr = np.abs(amax(self.r - tile.r, aN(<NUM_LIT:0>, dim=self.dim)))<EOL>return ll, rr<EOL>", "docstring": "Get the left and right absolute overflow -- the amount of box\noverhanging `tile`, can be viewed as self \\\\ tile (set theory relative\ncomplement, but in a bounding sense)", "id": "f5741:c1:m25"}
{"signature": "def kvectors(self, norm=False, form='<STR_LIT>', real=False, shift=False):", "body": "if norm is False:<EOL><INDENT>norm = <NUM_LIT:1><EOL><DEDENT>if norm is True:<EOL><INDENT>norm = np.array(self.shape)<EOL><DEDENT>norm = aN(norm, self.dim, dtype='<STR_LIT:float>')<EOL>v = list(np.fft.fftfreq(self.shape[i])/norm[i] for i in range(self.dim))<EOL>if shift:<EOL><INDENT>v = list(np.fft.fftshift(t) for t in v)<EOL><DEDENT>if real:<EOL><INDENT>v[-<NUM_LIT:1>] = v[-<NUM_LIT:1>][:(self.shape[-<NUM_LIT:1>]+<NUM_LIT:1>)//<NUM_LIT:2>]<EOL><DEDENT>return self._format_vector(v, form=form)<EOL>", "docstring": "Return the kvectors associated with this tile, given the standard form\nof -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to\n`Tile.coords`.\n\nParameters\n-----------\nreal : boolean\n    whether to return kvectors associated with the real fft instead", "id": "f5741:c1:m12"}
{"signature": "def cdd(d, k):", "body": "if not isinstance(k, list):<EOL><INDENT>k = [k]<EOL><DEDENT>for i in k:<EOL><INDENT>if i in d:<EOL><INDENT>d.pop(i)<EOL><DEDENT><DEDENT>", "docstring": "Conditionally delete key (or list of keys) 'k' from dict 'd", "id": "f5741:m9"}
{"signature": "def oslicer(self, tile):", "body": "mask = None<EOL>vecs = tile.coords(form='<STR_LIT>')<EOL>for v in vecs:<EOL><INDENT>v[self.slicer] = -<NUM_LIT:1><EOL>mask = mask & (v > <NUM_LIT:0>) if mask is not None else (v><NUM_LIT:0>)<EOL><DEDENT>return tuple(np.array(i).astype('<STR_LIT:int>') for i in zip(*[v[mask] for v in vecs]))<EOL>", "docstring": "Opposite slicer, the outer part wrt to a field", "id": "f5741:c1:m3"}
{"signature": "def _format_vector(self, vecs, form='<STR_LIT>'):", "body": "if form == '<STR_LIT>':<EOL><INDENT>return np.meshgrid(*vecs, indexing='<STR_LIT>')<EOL><DEDENT>elif form == '<STR_LIT>':<EOL><INDENT>vecs = np.meshgrid(*vecs, indexing='<STR_LIT>')<EOL>return np.rollaxis(np.array(np.broadcast_arrays(*vecs)),<NUM_LIT:0>,self.dim+<NUM_LIT:1>)<EOL><DEDENT>elif form == '<STR_LIT>':<EOL><INDENT>return vecs<EOL><DEDENT>else:<EOL><INDENT>return [v[self._coord_slicers[i]] for i,v in enumerate(vecs)]<EOL><DEDENT>", "docstring": "Format a 3d vector field in certain ways, see `coords` for a description\nof each formatting method.", "id": "f5741:c1:m10"}
{"signature": "def filtered_image(self, im):", "body": "q = np.fft.fftn(im)<EOL>for k,v in self.filters:<EOL><INDENT>q[k] -= v<EOL><DEDENT>return np.real(np.fft.ifftn(q))<EOL>", "docstring": "Returns a filtered image after applying the Fourier-space filters", "id": "f5741:c2:m3"}
{"signature": "def patch_docs(subclass, superclass):", "body": "funcs0 = inspect.getmembers(subclass, predicate=inspect.ismethod)<EOL>funcs1 = inspect.getmembers(superclass, predicate=inspect.ismethod)<EOL>funcs1 = [f[<NUM_LIT:0>] for f in funcs1]<EOL>for name, func in funcs0:<EOL><INDENT>if name.startswith('<STR_LIT:_>'):<EOL><INDENT>continue<EOL><DEDENT>if name not in funcs1:<EOL><INDENT>continue<EOL><DEDENT>if func.__doc__ is None:<EOL><INDENT>func = getattr(subclass, name)<EOL>func.__func__.__doc__ = getattr(superclass, name).__func__.__doc__<EOL><DEDENT><DEDENT>", "docstring": "Apply the documentation from ``superclass`` to ``subclass`` by filling\nin all overridden member function docstrings with those from the\nparent class", "id": "f5741:m12"}
{"signature": "@property<EOL><INDENT>def slicer(self):<DEDENT>", "body": "return tuple(np.s_[l:r] for l,r in zip(*self.bounds))<EOL>", "docstring": "Array slicer object for this tile\n\n>>> Tile((2,3)).slicer\n(slice(0, 2, None), slice(0, 3, None))\n\n>>> np.arange(10)[Tile((4,)).slicer]\narray([0, 1, 2, 3])", "id": "f5741:c1:m2"}
{"signature": "def translate(self, dr):", "body": "tile = self.copy()<EOL>tile.l += dr<EOL>tile.r += dr<EOL>return tile<EOL>", "docstring": "Translate a tile by an amount dr\n\n>>> Tile(5).translate(1)\nTile [1, 1, 1] -> [6, 6, 6] ([5, 5, 5])", "id": "f5741:c1:m23"}
{"signature": "def update(self, value=<NUM_LIT:0>):", "body": "self._deltas.append(time.time())<EOL>self.value = value<EOL>self._percent = <NUM_LIT> * self.value / self.num<EOL>if self.bar:<EOL><INDENT>self._bars = self._bar_symbol*int(np.round(self._percent / <NUM_LIT> * self._barsize))<EOL><DEDENT>if (len(self._deltas) < <NUM_LIT:2>) or (self._deltas[-<NUM_LIT:1>] - self._deltas[-<NUM_LIT:2>]) > <NUM_LIT>:<EOL><INDENT>self._estimate_time()<EOL>self._draw()<EOL><DEDENT>if self.value == self.num:<EOL><INDENT>self.end()<EOL><DEDENT>", "docstring": "Update the value of the progress and update progress bar.\n\nParameters\n-----------\nvalue : integer\n    The current iteration of the progress", "id": "f5741:c5:m4"}
{"signature": "@property<EOL><INDENT>def volume(self):<DEDENT>", "body": "return np.prod(self.shape)<EOL>", "docstring": "Volume of the tile\n\n>>> Tile(10).volume\n1000\n\n>>> Tile(np.sqrt(2), dim=2, dtype='float').volume #doctest: +ELLIPSIS\n2.0000000000...", "id": "f5741:c1:m7"}
{"signature": "def __init__(self, filename, tile=None, invert=False, exposure=None,<EOL>float_precision=np.float64):", "body": "self.filename = filename<EOL>self.invert = invert<EOL>self.filters = None<EOL>self.exposure = exposure<EOL>if float_precision not in (np.float64, np.float32, np.float16):<EOL><INDENT>raise ValueError('<STR_LIT>' +<EOL>'<STR_LIT>')<EOL><DEDENT>self.float_precision = float_precision<EOL>image = self.load_image()<EOL>super(RawImage, self).__init__(image, tile=tile)<EOL>", "docstring": "An image object which stores information about desired region, exposure\ncompensation, color inversion, and filters to remove certain fourier\npeaks.\n\nParameters\n----------\nfilename : str\n    Path of the image file. Recommended that you supply a relative path\n    so that transfer between computers is possible, i.e. if the file is located\n    at ``/home/user/data/1.tif`` then work in the directory ``/home/user/data``\n    and supply the filename ``1.tif``.\n\ntile : :class:`peri.util.Tile`\n    the region of the image to crop out to use for the actual featuring, etc\n\ninvert : boolean\n    Whether to invert the image.\n\nexposure : tuple of numbers (min, max) | None\n    If set, it is the values used to normalize the image. It is the\n    values which map to 0 and 1 in the loaded version of the image, the\n    default being for 8-bit images, mapping raw values (0, 255) to\n    loaded values (0, 1). This functionality is provided since the\n    noise and exposure may change between images where a common scaling\n    is desired for proper comparison.  Setting this values allows a\n    series of images to be initialized with the same ILM, PSF etc.\n    Should be the bit value of the camera.\n\nfloat_precision : numpy float datatype\n    One of numpy.float16, numpy.float32, numpy.float64; precision\n    for precomputed arrays. Default is np.float64; make it 16 or 32\n    to save memory.", "id": "f5741:c4:m0"}
{"signature": "def __init__(self, num, label='<STR_LIT>', value=<NUM_LIT:0>, screen=<NUM_LIT>,<EOL>time_remaining=True, bar=True, bar_symbol='<STR_LIT:=>', bar_caps='<STR_LIT>',<EOL>bar_decimals=<NUM_LIT:2>, display=True):", "body": "<EOL>self.num = num<EOL>self.value = value<EOL>self._percent = <NUM_LIT:0><EOL>self.time_remaining = time_remaining<EOL>self._deltas = []<EOL>self.display = display<EOL>self.label = label<EOL>self.bar = bar<EOL>self._bar_symbol = bar_symbol<EOL>self._bar_caps = bar_caps<EOL>self._decimals = bar_decimals<EOL>self.screen = screen<EOL>if len(self._bar_caps) % <NUM_LIT:2> != <NUM_LIT:0>:<EOL><INDENT>raise AttributeError(\"<STR_LIT>\")<EOL><DEDENT>if self.bar:<EOL><INDENT>self._numsize = <NUM_LIT:3> + self._decimals + <NUM_LIT:1><EOL>self._cap_len = len(self._bar_caps)//<NUM_LIT:2><EOL>self._capl = self._bar_caps[:self._cap_len]<EOL>self._capr = self._bar_caps[self._cap_len:]<EOL>self._time_space = <NUM_LIT:11> if self.time_remaining else <NUM_LIT:0><EOL>self._barsize = (<EOL>self.screen - len(self.label) - self._numsize -<EOL>len(self._bar_caps) - <NUM_LIT:2> - <NUM_LIT:1> - self._time_space<EOL>)<EOL>self._formatstr = '<STR_LIT>'<EOL>self._percent = <NUM_LIT:0><EOL>self._dt = '<STR_LIT>'<EOL>self._bars = '<STR_LIT>'<EOL>if self.time_remaining:<EOL><INDENT>self._formatstr += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._digits = str(int(np.ceil(np.log10(self.num))))<EOL>self._formatstr = '<STR_LIT>'<EOL>self._dt = '<STR_LIT>'<EOL>if self.time_remaining:<EOL><INDENT>self._formatstr += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>self.update()<EOL>", "docstring": "ProgressBar class which creates a dynamic ASCII progress bar of two\ndifferent varieties:\n\n    1) A bar chart that looks like the following:\n        ``Progress [================      ]  63.00%``\n\n    2) A simple number completed look:\n        ``Progress :   17 / 289``\n\nParameters\n-----------\nnum : integer\n    The number of tasks that need to be completed\n\nlabel : string [default: 'Progress']\n    The label for this particular progress indicator,\n\nvalue : integer [default: 0]\n    Starting value\n\nscreen : integer [default: 79]\n    Size the screen to use for the progress bar\n\ntime_remaining : boolean [default: True]\n    Display estimated time remaining\n\nbar : boolean [default: True]\n    Whether or not to display the bar chart\n\nbar_symbol : char [default: '=']\n    The character to use to fill in the bar chart\n\nbar_caps : string [default: '[]']\n    Characters to use as the end caps of the.  The string will be split in\n    half and each half put on a side of the chart\n\nbar_decimals : integer [default: 2]\n    Number of decimal places to include in the percentage\n\ndisplay : boolean [default: True]\n    a crutch so that we don't have a lot of ``if``s later.  display\n    or don't display the progress bar", "id": "f5741:c5:m0"}
{"signature": "def set_tile(self, tile):", "body": "self.tile = tile<EOL>", "docstring": "Sets the current tile of the image to a `peri.util.Tile`", "id": "f5741:c2:m4"}
{"signature": "def set_scale(self, exposure):", "body": "self.exposure = exposure<EOL>", "docstring": "Set the exposure parameter for this image, which determines the\nvalues which get mapped to (0,1) in the output image.\n\nSee also\n--------\n:class:`peri.util.RawImage`", "id": "f5741:c4:m2"}
{"signature": "@staticmethod<EOL><INDENT>def intersection(tiles, *args):<DEDENT>", "body": "tiles = listify(tiles) + listify(args)<EOL>if len(tiles) < <NUM_LIT:2>:<EOL><INDENT>return tiles[<NUM_LIT:0>]<EOL><DEDENT>tile = tiles[<NUM_LIT:0>]<EOL>l, r = tile.l.copy(), tile.r.copy()<EOL>for tile in tiles[<NUM_LIT:1>:]:<EOL><INDENT>l = amax(l, tile.l)<EOL>r = amin(r, tile.r)<EOL><DEDENT>return Tile(l, r, dtype=l.dtype)<EOL>", "docstring": "Intersection of tiles, returned as a tile\n\n>>> Tile.intersection(Tile([0, 1], [5, 4]), Tile([1, 0], [4, 5]))\nTile [1, 1] -> [4, 4] ([3, 3])", "id": "f5741:c1:m16"}
{"signature": "@staticmethod<EOL><INDENT>def get_scale_from_raw(raw, scaled):<DEDENT>", "body": "t0, t1 = scaled.min(), scaled.max()<EOL>r0, r1 = float(raw.min()), float(raw.max())<EOL>rmin = (t1*r0 - t0*r1) / (t1 - t0)<EOL>rmax = (r1 - r0) / (t1 - t0) + rmin<EOL>return (rmin, rmax)<EOL>", "docstring": "When given a raw image and the scaled version of the same image, it\nextracts the ``exposure`` parameters associated with those images.\nThis is useful when\n\nParameters\n----------\nraw : array_like\n    The image loaded fresh from a file\n\nscaled : array_like\n    Image scaled using :func:`peri.initializers.normalize`\n\nReturns\n-------\nexposure : tuple of numbers\n    Returns the exposure parameters (emin, emax) which get mapped to\n    (0, 1) in the scaled image. Can be passed to\n    :func:`~peri.util.RawImage.__init__`", "id": "f5741:c4:m4"}
{"signature": "def load_image(self):", "body": "try:<EOL><INDENT>image = initializers.load_tiff(self.filename)<EOL>image = initializers.normalize(<EOL>image, invert=self.invert, scale=self.exposure,<EOL>dtype=self.float_precision<EOL>)<EOL><DEDENT>except IOError as e:<EOL><INDENT>log.error(\"<STR_LIT>\" % self.filename)<EOL>raise e<EOL><DEDENT>return image<EOL>", "docstring": "Read the file and perform any transforms to get a loaded image", "id": "f5741:c4:m1"}
{"signature": "def listify(a):", "body": "if a is None:<EOL><INDENT>return []<EOL><DEDENT>elif not isinstance(a, (tuple, list, np.ndarray)):<EOL><INDENT>return [a]<EOL><DEDENT>return list(a)<EOL>", "docstring": "Convert a scalar ``a`` to a list and all iterables to list as well.\n\nExamples\n--------\n>>> listify(0)\n[0]\n\n>>> listify([1,2,3])\n[1, 2, 3]\n\n>>> listify('a')\n['a']\n\n>>> listify(np.array([1,2,3]))\n[1, 2, 3]\n\n>>> listify('string')\n['string']", "id": "f5741:m1"}
{"signature": "def _jtj(self, funct, params=None, dl=<NUM_LIT>, rts=False, **kwargs):", "body": "grad = self._grad(funct=funct, params=params, dl=dl, rts=rts, **kwargs)<EOL>return np.dot(grad, grad.T)<EOL>", "docstring": "jTj of a `func` wrt to parmaeters `params`. (see _graddoc)", "id": "f5742:c1:m18"}
{"signature": "@property<EOL><INDENT>def model(self):<DEDENT>", "body": "return self._model<EOL>", "docstring": "Get the current model fit to the data", "id": "f5742:c2:m3"}
{"signature": "def update(self, params, values):", "body": "<EOL>comps = self.affected_components(params)<EOL>if len(comps) == <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>otile, itile, iotile = self.get_update_io_tiles(params, values)<EOL>if otile is None:<EOL><INDENT>return False<EOL><DEDENT>self.set_tile(otile)<EOL>oldmodel = self._model[itile.slicer].copy()<EOL>if len(comps) == <NUM_LIT:1> and self.mdl.get_difference_model(comps[<NUM_LIT:0>].category):<EOL><INDENT>comp = comps[<NUM_LIT:0>]<EOL>model0 = copy.deepcopy(comp.get())<EOL>super(ImageState, self).update(params, values)<EOL>model1 = copy.deepcopy(comp.get())<EOL>diff = model1 - model0<EOL>diff = self.mdl.evaluate(<EOL>self.comps, '<STR_LIT>', diffmap={comp.category: diff}<EOL>)<EOL>if isinstance(model0, (float, int)):<EOL><INDENT>self._model[itile.slicer] += diff<EOL><DEDENT>else:<EOL><INDENT>self._model[itile.slicer] += diff[iotile.slicer]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>super(ImageState, self).update(params, values)<EOL>diff = self.mdl.evaluate(self.comps, '<STR_LIT>')<EOL>self._model[itile.slicer] = diff[iotile.slicer]<EOL><DEDENT>newmodel = self._model[itile.slicer].copy()<EOL>self.update_from_model_change(oldmodel, newmodel, itile)<EOL>return True<EOL>", "docstring": "Actually perform an image (etc) update based on a set of params and\nvalues. These parameter can be any present in the components in any\nnumber. If there is only one component affected then difference image\nupdates will be employed.", "id": "f5742:c3:m12"}
{"signature": "def _calc_loglikelihood(self, model=None, tile=None):", "body": "if model is None:<EOL><INDENT>res = self.residuals<EOL><DEDENT>else:<EOL><INDENT>res = model - self._data[tile.slicer]<EOL><DEDENT>sig, isig = self.sigma, <NUM_LIT:1.0>/self.sigma<EOL>nlogs = -np.log(np.sqrt(<NUM_LIT:2>*np.pi)*sig)*res.size<EOL>return -<NUM_LIT:0.5>*isig*isig*np.dot(res.flat, res.flat) + nlogs<EOL>", "docstring": "Allows for fast local updates of log-likelihood", "id": "f5742:c3:m18"}
{"signature": "def __init__(self, image, comps, mdl=models.ConfocalImageModel(), sigma=<NUM_LIT>,<EOL>priors=None, pad=<NUM_LIT>, model_as_data=False):", "body": "self.dim = image.get_image().ndim<EOL>self.sigma = sigma<EOL>self.priors = priors<EOL>self.pad = util.aN(pad, dim=self.dim)<EOL>self.model_as_data = model_as_data<EOL>comp.ComponentCollection.__init__(self, comps=comps)<EOL>self.set_model(mdl=mdl)<EOL>self.set_image(image)<EOL>self.build_funcs()<EOL>if self.model_as_data:<EOL><INDENT>self.model_to_data(self.sigma)<EOL><DEDENT>", "docstring": "The state object to create a confocal image.  The model is that of\na spatially varying illumination field, from which platonic particle\nshapes are subtracted.  This is then spread with a point spread function\n(PSF).\n\nParameters\n-----------\nimage : :class:`peri.util.Image` object\n    The raw image with which to compare the model image from this\n    class.  This image should have been prepared through\n    prepare_for_state, which does things such as padding necessary for\n    this class. In the case of the RawImage, paths are used to keep\n    track of the image object to save on pickle size.\n\ncomp : list of :class:`peri.comp.comp.Component` or :class:`peri.comp.comp.ComponentCollection`\n    Components used to make up the model image. Each separate component\n    must be of a different category, otherwise combining them would be\n    ambiguous. If you desire multiple Components of one category,\n    combine them using a ComponentCollection (which has functions for\n    combining) and supply that to the comps list.\n\n    The component types must match the list of categories in the\n    ``ImageState.catmap`` which tells how components are matched to\n    parts of the model equation.\n\nmdl : :class:`peri.models.Model` object\n    Model defining how to combine different Components into a single\n    model.\n\npriors: list of ``peri.priors`` [default: ()]\n    Whether or not to turn on overlap priors using neighborlists\n\npad : integer or tuple of integers (optional)\n    No recommended to set by hand.  The padding level of the raw image\n    needed by the PSF support.\n\nmodel_as_data : boolean\n    Whether to use the model image as the true image after initializing", "id": "f5742:c3:m0"}
{"signature": "def pop_update(self):", "body": "params, values = self.stack.pop()<EOL>self.update(params, values)<EOL>", "docstring": "Pop the last update from the stack push by\n:func:`peri.states.States.push_update` by undoing the chnage last\nperformed.", "id": "f5742:c1:m12"}
{"signature": "def set_image(self, image):", "body": "if isinstance(image, np.ndarray):<EOL><INDENT>image = util.Image(image)<EOL><DEDENT>if isinstance(image, util.NullImage):<EOL><INDENT>self.model_as_data = True<EOL><DEDENT>else:<EOL><INDENT>self.model_as_data = False<EOL><DEDENT>self.image = image<EOL>self._data = self.image.get_padded_image(self.pad)<EOL>self.oshape = util.Tile(self._data.shape)<EOL>self.ishape = self.oshape.pad(-self.pad)<EOL>self.inner = self.ishape.slicer<EOL>for c in self.comps:<EOL><INDENT>c.set_shape(self.oshape, self.ishape)<EOL><DEDENT>self._model = np.zeros(self._data.shape, dtype=np.float64)<EOL>self._residuals = np.zeros(self._data.shape, dtype=np.float64)<EOL>self.calculate_model()<EOL>", "docstring": "Update the current comparison (real) image", "id": "f5742:c3:m2"}
{"signature": "def save(state, filename=None, desc='<STR_LIT>', extra=None):", "body": "if isinstance(state.image, util.RawImage):<EOL><INDENT>desc = desc or '<STR_LIT>'<EOL>filename = filename or state.image.filename + '<STR_LIT>' + desc + '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>if not filename:<EOL><INDENT>raise AttributeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if extra is None:<EOL><INDENT>save = state<EOL><DEDENT>else:<EOL><INDENT>save = [state] + extra<EOL><DEDENT>if os.path.exists(filename):<EOL><INDENT>ff = \"<STR_LIT>\".format(filename)<EOL>if os.path.exists(ff):<EOL><INDENT>os.remove(ff)<EOL><DEDENT>os.rename(filename, ff)<EOL><DEDENT>pickle.dump(save, open(filename, '<STR_LIT:wb>'), protocol=<NUM_LIT:2>)<EOL>", "docstring": "Save the current state with extra information (for example samples and LL\nfrom the optimization procedure).\n\nParameters\n----------\nstate : peri.states.ImageState\n    the state object which to save\n\nfilename : string\n    if provided, will override the default that is constructed based on\n    the state's raw image file.  If there is no filename and the state has\n    a RawImage, the it is saved to RawImage.filename + \"-peri-save.pkl\"\n\ndesc : string\n    if provided, will augment the default filename to be\n    RawImage.filename + '-peri-' + desc + '.pkl'\n\nextra : list of pickleable objects\n    if provided, will be saved with the state", "id": "f5742:m1"}
{"signature": "def update_hyper(self, params, values):", "body": "self.hyper_parameters.update(params, values)<EOL>", "docstring": "Update any single hyper parameter or group of parameters ``params``\nwith ``values``.\n\nParameters\n----------\nparams : string or list of strings\n    Parameter names which to update\n\nvalue : number or list of numbers\n    Values of those parameters which to update", "id": "f5742:c1:m9"}
{"signature": "def crb(self, params=None, *args, **kwargs):", "body": "fish = self.fisherinformation(params=params, *args, **kwargs)<EOL>return np.sqrt(np.diag(np.linalg.inv(fish))) * self.sigma<EOL>", "docstring": "Calculate the diagonal elements of the minimum covariance of the model\nwith respect to parameters params. ``*args`` and ``**kwargs`` go to\n``fisherinformation``.", "id": "f5742:c1:m22"}
{"signature": "def get(self, name):", "body": "for c in self.comps:<EOL><INDENT>if c.category == name:<EOL><INDENT>return c<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Return component by category name", "id": "f5742:c3:m13"}
{"signature": "@property<EOL><INDENT>def model(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Class property: the current model fit to the data. Should return a\nnumber or ndarray. Ideally this object should be an object updated by\nthe :func:`peri.states.State.update` function and simply returned in\nthis property", "id": "f5742:c1:m2"}
{"signature": "@property<EOL><INDENT>def model(self):<DEDENT>", "body": "return self._model[self.inner]<EOL>", "docstring": "Get the current model fit to the data", "id": "f5742:c3:m8"}
{"signature": "def _grad_one_param(self, funct, p, dl=<NUM_LIT>, rts=False, nout=<NUM_LIT:1>, **kwargs):", "body": "vals = self.get_values(p)<EOL>f0 = funct(**kwargs)<EOL>self.update(p, vals+dl)<EOL>f1 = funct(**kwargs)<EOL>if rts:<EOL><INDENT>self.update(p, vals)<EOL><DEDENT>if nout == <NUM_LIT:1>:<EOL><INDENT>return (f1 - f0) / dl<EOL><DEDENT>else:<EOL><INDENT>return [(f1[i] - f0[i]) / dl for i in range(nout)]<EOL><DEDENT>", "docstring": "Gradient of `func` wrt a single parameter `p`. (see _graddoc)", "id": "f5742:c1:m15"}
{"signature": "@property<EOL><INDENT>def error(self):<DEDENT>", "body": "r = self.residuals.ravel()<EOL>return np.dot(r,r)<EOL>", "docstring": "Class property: Sum of the squared errors,\n:math:`E = \\sum_i (D_i - M_i(\\\\theta))^2`", "id": "f5742:c1:m4"}
{"signature": "def _hess_two_param(self, funct, p0, p1, dl=<NUM_LIT>, rts=False, **kwargs):", "body": "vals0 = self.get_values(p0)<EOL>vals1 = self.get_values(p1)<EOL>f00 = funct(**kwargs)<EOL>self.update(p0, vals0+dl)<EOL>f10 = funct(**kwargs)<EOL>self.update(p1, vals1+dl)<EOL>f11 = funct(**kwargs)<EOL>self.update(p0, vals0)<EOL>f01 = funct(**kwargs)<EOL>if rts:<EOL><INDENT>self.update(p0, vals0)<EOL>self.update(p1, vals1)<EOL><DEDENT>return (f11 - f10 - f01 + f00) / (dl**<NUM_LIT:2>)<EOL>", "docstring": "Hessian of `func` wrt two parameters `p0` and `p1`. (see _graddoc)", "id": "f5742:c1:m16"}
{"signature": "def _grad(self, funct, params=None, dl=<NUM_LIT>, rts=False, nout=<NUM_LIT:1>, out=None,<EOL>**kwargs):", "body": "if params is None:<EOL><INDENT>params = self.param_all()<EOL><DEDENT>ps = util.listify(params)<EOL>f0 = funct(**kwargs)<EOL>calc_shape = (<EOL>lambda ar: (len(ps),) + (ar.shape if isinstance(<EOL>ar, np.ndarray) else (<NUM_LIT:1>,)))<EOL>if out is not None:<EOL><INDENT>grad = out  <EOL><DEDENT>elif nout == <NUM_LIT:1>:<EOL><INDENT>shape = calc_shape(f0)<EOL>grad = np.zeros(shape)  <EOL><DEDENT>else:<EOL><INDENT>shape = [calc_shape(f0[i]) for i in range(nout)]<EOL>grad = [np.zeros(shp) for shp in shape]<EOL><DEDENT>for i, p in enumerate(ps):<EOL><INDENT>if nout == <NUM_LIT:1>:<EOL><INDENT>grad[i] = self._grad_one_param(funct, p, dl=dl, rts=rts,<EOL>nout=nout, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>stuff = self._grad_one_param(funct, p, dl=dl, rts=rts,<EOL>nout=nout, **kwargs)<EOL>for a in range(nout): grad[a][i] = stuff[a]<EOL><DEDENT><DEDENT>return grad<EOL>", "docstring": "Gradient of `func` wrt a set of parameters params. (see _graddoc)", "id": "f5742:c1:m17"}
{"signature": "def model_to_data(self, sigma=<NUM_LIT:0.0>):", "body": "im = self.model.copy()<EOL>im += sigma*np.random.randn(*im.shape)<EOL>self.set_image(util.NullImage(image=im))<EOL>", "docstring": "Switch out the data for the model's recreation of the data.", "id": "f5742:c3:m4"}
{"signature": "def volume_render(field, outfile, maxopacity=<NUM_LIT:1.0>, cmap='<STR_LIT>',<EOL>size=<NUM_LIT>, elevation=<NUM_LIT>, azimuth=<NUM_LIT>, bkg=(<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>opacitycut=<NUM_LIT>, offscreen=False, rayfunction='<STR_LIT>'):", "body": "sh = field.shape<EOL>dataImporter = vtk.vtkImageImport()<EOL>dataImporter.SetDataScalarTypeToUnsignedChar()<EOL>data_string = field.tostring()<EOL>dataImporter.SetNumberOfScalarComponents(<NUM_LIT:1>)<EOL>dataImporter.CopyImportVoidPointer(data_string, len(data_string))<EOL>dataImporter.SetDataExtent(<NUM_LIT:0>, sh[<NUM_LIT:2>]-<NUM_LIT:1>, <NUM_LIT:0>, sh[<NUM_LIT:1>]-<NUM_LIT:1>, <NUM_LIT:0>, sh[<NUM_LIT:0>]-<NUM_LIT:1>)<EOL>dataImporter.SetWholeExtent(<NUM_LIT:0>, sh[<NUM_LIT:2>]-<NUM_LIT:1>, <NUM_LIT:0>, sh[<NUM_LIT:1>]-<NUM_LIT:1>, <NUM_LIT:0>, sh[<NUM_LIT:0>]-<NUM_LIT:1>)<EOL>alphaChannelFunc = vtk.vtkPiecewiseFunction()<EOL>alphaChannelFunc.AddPoint(<NUM_LIT:0>, <NUM_LIT:0.0>)<EOL>alphaChannelFunc.AddPoint(int(<NUM_LIT:255>*opacitycut), maxopacity)<EOL>volumeProperty = vtk.vtkVolumeProperty()<EOL>colorFunc = cmap2colorfunc(cmap)<EOL>volumeProperty.SetColor(colorFunc)<EOL>volumeProperty.SetScalarOpacity(alphaChannelFunc)<EOL>volumeMapper = vtk.vtkVolumeRayCastMapper()<EOL>if rayfunction == '<STR_LIT>':<EOL><INDENT>comp = vtk.vtkVolumeRayCastMIPFunction()<EOL>comp.SetMaximizeMethodToOpacity()<EOL><DEDENT>elif rayfunction == '<STR_LIT>':<EOL><INDENT>comp = vtk.vtkVolumeRayCastCompositeFunction()<EOL><DEDENT>elif rayfunction == '<STR_LIT>':<EOL><INDENT>comp = vtk.vtkVolumeRayCastIsosurfaceFunction()<EOL>comp.SetIsoValue(maxopacity/<NUM_LIT:2>)<EOL><DEDENT>else:<EOL><INDENT>comp = vtk.vtkVolumeRayCastIsosurfaceFunction()<EOL><DEDENT>volumeMapper.SetSampleDistance(<NUM_LIT:0.1>)<EOL>volumeMapper.SetVolumeRayCastFunction(comp)<EOL>if rayfunction == '<STR_LIT>':<EOL><INDENT>volumeMapper = vtk.vtkSmartVolumeMapper()<EOL><DEDENT>volumeMapper.SetInputConnection(dataImporter.GetOutputPort())<EOL>volume = vtk.vtkVolume()<EOL>volume.SetMapper(volumeMapper)<EOL>volume.SetProperty(volumeProperty)<EOL>light = vtk.vtkLight()<EOL>light.SetLightType(vtk.VTK_LIGHT_TYPE_HEADLIGHT)<EOL>light.SetIntensity(<NUM_LIT>)<EOL>light.SwitchOn()<EOL>renderer = vtk.vtkRenderer()<EOL>renderWin = vtk.vtkRenderWindow()<EOL>renderWin.AddRenderer(renderer)<EOL>renderWin.SetOffScreenRendering(<NUM_LIT:1>);<EOL>if not hasattr(size, '<STR_LIT>'):<EOL><INDENT>size = (size, size)<EOL><DEDENT>renderer.AddVolume(volume)<EOL>renderer.AddLight(light)<EOL>renderer.SetBackground(*bkg)<EOL>renderWin.SetSize(*size)<EOL>if offscreen:<EOL><INDENT>renderWin.SetOffScreenRendering(<NUM_LIT:1>)<EOL><DEDENT>def exitCheck(obj, event):<EOL><INDENT>if obj.GetEventPending() != <NUM_LIT:0>:<EOL><INDENT>obj.SetAbortRender(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>renderWin.AddObserver(\"<STR_LIT>\", exitCheck)<EOL>renderInteractor = vtk.vtkRenderWindowInteractor()<EOL>renderInteractor.Initialize()<EOL>renderWin.Render()<EOL>renderInteractor.Start()<EOL>writer = vtk.vtkPNGWriter()<EOL>w2i = vtk.vtkWindowToImageFilter()<EOL>w2i.SetInput(renderWin)<EOL>writer.SetInputConnection(w2i.GetOutputPort())<EOL>renderWin.Render()<EOL>ac = renderer.GetActiveCamera()<EOL>ac.Elevation(elevation)<EOL>ac.Azimuth(azimuth)<EOL>renderer.ResetCameraClippingRange()<EOL>renderWin.Render()<EOL>w2i.Modified()<EOL>writer.SetFileName(outfile)<EOL>writer.Write()<EOL>", "docstring": "Uses vtk to make render an image of a field, with control over the\ncamera angle and colormap.\n\nInput Parameters\n----------------\n    field : np.ndarray\n        3D array of the field to render.\n    outfile : string\n        The save name of the image.\n    maxopacity : Float\n        Default is 1.0\n    cmap : matplotlib colormap string\n        Passed to cmap2colorfunc. Default is bone.\n    size : 2-element list-like of ints or Int\n        The size of the final rendered image.\n    elevation : Numeric\n        The elevation of the camera angle, in degrees. Default is 45\n    azimuth : Numeric\n        The azimuth of the camera angle, in degrees. Default is 45\n    bkg : Tuple of floats\n        3-element tuple of floats on [0,1] of the background image color.\n        Default is (0., 0., 0.).", "id": "f5743:m5"}
{"signature": "def norm(field, vmin=<NUM_LIT:0>, vmax=<NUM_LIT:255>):", "body": "field = <NUM_LIT:255>*np.clip(field, <NUM_LIT:0>, <NUM_LIT:1>)<EOL>field = field.astype('<STR_LIT>')<EOL>return field<EOL>", "docstring": "Truncates field to 0,1; then normalizes to a uin8 on [0,255]", "id": "f5743:m0"}
{"signature": "def extract_field(state, field='<STR_LIT>'):", "body": "es, pp = field.split('<STR_LIT:->')  <EOL>if pp == '<STR_LIT>':<EOL><INDENT>o = state.get('<STR_LIT>')<EOL>if isinstance(o, peri.comp.comp.ComponentCollection):<EOL><INDENT>wts = <NUM_LIT:0>*o.get()[state.inner]<EOL>for c in o.comps:<EOL><INDENT>if isinstance(c, peri.comp.objs.PlatonicSpheresCollection):<EOL><INDENT>wts += c.get()[state.inner]<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>wts = o.get()[state.inner]<EOL><DEDENT><DEDENT>elif pp == '<STR_LIT>':<EOL><INDENT>wts = state.get('<STR_LIT>').get()[state.inner]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if es == '<STR_LIT>':<EOL><INDENT>out = (<NUM_LIT:1>-state.data) * (wts > <NUM_LIT>)<EOL><DEDENT>elif es == '<STR_LIT>':<EOL><INDENT>out = wts<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return norm(clip(roll(out)))<EOL>", "docstring": "Given a state, extracts a field. Extracted value depends on the value\nof field:\n    'exp-particles' : The inverted data in the regions of the particles,\n            zeros otherwise -- i.e. particles + noise.\n    'exp-platonic'  : Same as above, but nonzero in the region of the\n            entire platonic image -- i.e. platonic + noise.\n    'sim-particles' : Just the particles image; no noise from the data.\n    'sim-platonic'  : Just the platonic image; no noise from the data.", "id": "f5743:m3"}
{"signature": "def make_clean_figure(figsize, remove_tooltips=False, remove_keybindings=False):", "body": "tooltip = mpl.rcParams['<STR_LIT>']<EOL>if remove_tooltips:<EOL><INDENT>mpl.rcParams['<STR_LIT>'] = '<STR_LIT:None>'<EOL><DEDENT>fig = pl.figure(figsize=figsize)<EOL>mpl.rcParams['<STR_LIT>'] = tooltip<EOL>if remove_keybindings:<EOL><INDENT>fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)<EOL><DEDENT>return fig<EOL>", "docstring": "Makes a `matplotlib.pyplot.Figure` without tooltips or keybindings\n\nParameters\n----------\nfigsize : tuple\n    Figsize as passed to `matplotlib.pyplot.figure`\nremove_tooltips, remove_keybindings : bool\n    Set to True to remove the tooltips bar or any key bindings,\n    respectively. Default is False\n\nReturns\n-------\nfig : `matplotlib.pyplot.Figure`", "id": "f5744:m0"}
{"signature": "def __init__(self, field, onesided=True, vmin=None, vmax=None, cmap=None,<EOL>dohist=False, fourier=False, tooltips=False, size=<NUM_LIT:8>):", "body": "self.vmin = vmin<EOL>self.vmax = vmax<EOL>self.field = field<EOL>self.onesided = onesided<EOL>self.dohist = dohist<EOL>self.fourier = fourier<EOL>if cmap is not None:<EOL><INDENT>self.cmap = cmap<EOL><DEDENT>else:<EOL><INDENT>self.cmap = '<STR_LIT>' if self.onesided else '<STR_LIT>'<EOL><DEDENT>z,y,x = [float(i) for i in self.field.shape]<EOL>w = float(x + z)<EOL>h = float(y + z)<EOL>self.fig = make_clean_figure(<EOL>figsize=(size * w/h, size), remove_tooltips=not tooltips,<EOL>remove_keybindings=True)<EOL>self.g = {}<EOL>self.g['<STR_LIT>'] = self.fig.add_axes((<NUM_LIT:0.0>, <NUM_LIT:1>-y/h, x/w,   y/h))<EOL>self.g['<STR_LIT>'] = self.fig.add_axes((<NUM_LIT:0.0>, <NUM_LIT:0.0>,   x/w,   <NUM_LIT:1>-y/h))<EOL>self.g['<STR_LIT>'] = self.fig.add_axes((x/w, <NUM_LIT:1>-y/h, <NUM_LIT:1>-x/w, y/h))<EOL>self.g['<STR_LIT>'] = self.fig.add_axes((x/w, <NUM_LIT:0.0>,   <NUM_LIT:1>-x/w, <NUM_LIT:1>-y/h))<EOL>self.slices = (np.array(self.field.shape)/<NUM_LIT:2>).astype('<STR_LIT:int>')<EOL>if self.fourier:<EOL><INDENT>self.field = np.fft.fftshift(np.fft.fftn(self.field))<EOL><DEDENT>self.draw()<EOL>self.register_events()<EOL>", "docstring": "Easy interactive viewing of 3D ndarray with view selection.\n\nNavigate in 3D by clicking on the three panels which are slices\nthrough the array at a given position.\n\nParameters\n----------\nfield : np.ndarray\n    The field to view\nonesided : bool, optional\n    Whether to use the default one-sided or two-sided colormap.\n    Over-ridden by cmap. Default is True\nvmin, vmax : numeric, optional\n    The min, max colorbar range, as passed to the matplotlib\n    colormap. Default is the (min, max) of the data.\ncmap : {None, valid matplotlib colormap}, optional\n    Use to directly a specific colormap, e.g. `'bone'`. If None\n    selects `'bone' if onesided else 'RdBu'`. Default is None\ndohist : bool, optional\n    Set to True to include a histogram of `field` in an\n    additional panel. Default is False\nfourier : bool, optional\n    Set to True to view the Fourier transform of field. Default\n    is False\ntooltips : bool, optional\n    Whether to include the tooltips bar on the figure. Default\n    is False\nsize : numeric, optional\n    The rough figure size of the viewer; the actual size is re-\n    scaled based on the field's size. Default is 8", "id": "f5744:c1:m0"}
{"signature": "def _particle_func(self, coords, pos, wid):", "body": "dx, dy, dz = [c - p for c,p in zip(coords, pos)]<EOL>dr2 = dx*dx + dy*dy + dz*dz<EOL>return np.exp(-dr2/(<NUM_LIT:2>*wid*wid))<EOL>", "docstring": "Draws a gaussian, range is (0,1]. Coords = [3,n]", "id": "f5744:c2:m2"}
{"signature": "def _remove_closest_particle(self, p):", "body": "<EOL>dp = self.pos - p<EOL>dist2 = (dp*dp).sum(axis=<NUM_LIT:1>)<EOL>ind = dist2.argmin()<EOL>rp = self.pos[ind].copy()<EOL>self.pos = np.delete(self.pos, ind, axis=<NUM_LIT:0>)<EOL>return rp<EOL>", "docstring": "removes the closest particle in self.pos to ``p``", "id": "f5744:c2:m10"}
{"signature": "def update_field(self, poses=None):", "body": "m = np.clip(self.particle_field, <NUM_LIT:0>, <NUM_LIT:1>)<EOL>part_color = np.zeros(self._image.shape)<EOL>for a in range(<NUM_LIT:4>): part_color[:,:,:,a] = self.part_col[a]<EOL>self.field = np.zeros(self._image.shape)<EOL>for a in range(<NUM_LIT:4>):<EOL><INDENT>self.field[:,:,:,a] = m*part_color[:,:,:,a] + (<NUM_LIT:1>-m) * self._image[:,:,:,a]<EOL><DEDENT>", "docstring": "updates self.field", "id": "f5744:c2:m4"}
{"signature": "def center_data(data, vmin, vmax):", "body": "ans = data - vmin<EOL>ans /= (vmax - vmin)<EOL>return np.clip(ans, <NUM_LIT:0>, <NUM_LIT:1>)<EOL>", "docstring": "Clips data on [vmin, vmax]; then rescales to [0,1]", "id": "f5746:m10"}
{"signature": "def crb_compare(state0, samples0, state1, samples1, crb0=None, crb1=None,<EOL>zlayer=None, xlayer=None):", "body": "s0 = state0<EOL>s1 = state1<EOL>h0 = np.array(samples0)<EOL>h1 = np.array(samples1)<EOL>slicez = zlayer or s0.image.shape[<NUM_LIT:0>]//<NUM_LIT:2><EOL>slicex = xlayer or s0.image.shape[<NUM_LIT:2>]//<NUM_LIT:2><EOL>slicer1 = np.s_[slicez,s0.pad:-s0.pad,s0.pad:-s0.pad]<EOL>slicer2 = np.s_[s0.pad:-s0.pad,s0.pad:-s0.pad,slicex]<EOL>center = (slicez, s0.image.shape[<NUM_LIT:1>]//<NUM_LIT:2>, slicex)<EOL>mu0 = h0.mean(axis=<NUM_LIT:0>)<EOL>mu1 = h1.mean(axis=<NUM_LIT:0>)<EOL>std0 = h0.std(axis=<NUM_LIT:0>)<EOL>std1 = h1.std(axis=<NUM_LIT:0>)<EOL>mask0 = (s0.state[s0.b_typ]==<NUM_LIT:1.>) & (<EOL>analyze.trim_box(s0, mu0[s0.b_pos].reshape(-<NUM_LIT:1>,<NUM_LIT:3>)))<EOL>mask1 = (s1.state[s1.b_typ]==<NUM_LIT:1.>) & (<EOL>analyze.trim_box(s1, mu1[s1.b_pos].reshape(-<NUM_LIT:1>,<NUM_LIT:3>)))<EOL>active0 = np.arange(s0.N)[mask0]<EOL>active1 = np.arange(s1.N)[mask1]<EOL>pos0 = mu0[s0.b_pos].reshape(-<NUM_LIT:1>,<NUM_LIT:3>)[active0]<EOL>pos1 = mu1[s1.b_pos].reshape(-<NUM_LIT:1>,<NUM_LIT:3>)[active1]<EOL>rad0 = mu0[s0.b_rad][active0]<EOL>rad1 = mu1[s1.b_rad][active1]<EOL>link = analyze.nearest(pos0, pos1)<EOL>dpos = pos0 - pos1[link]<EOL>drad = rad0 - rad1[link]<EOL>drift = dpos.mean(axis=<NUM_LIT:0>)<EOL>log.info('<STR_LIT>'.format(drift))<EOL>dpos -= drift<EOL>fig = pl.figure(figsize=(<NUM_LIT>,<NUM_LIT:10>))<EOL>gs0 = ImageGrid(fig, rect=[<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>], nrows_ncols=(<NUM_LIT:2>,<NUM_LIT:3>), axes_pad=<NUM_LIT:0.1>)<EOL>lbl(gs0[<NUM_LIT:0>], '<STR_LIT:A>')<EOL>for i,slicer in enumerate([slicer1, slicer2]):<EOL><INDENT>ax_real = gs0[<NUM_LIT:3>*i+<NUM_LIT:0>]<EOL>ax_fake = gs0[<NUM_LIT:3>*i+<NUM_LIT:1>]<EOL>ax_diff = gs0[<NUM_LIT:3>*i+<NUM_LIT:2>]<EOL>diff0 = s0.get_model_image() - s0.image<EOL>diff1 = s1.get_model_image() - s1.image<EOL>a = (s0.image - s1.image)<EOL>b = (s0.get_model_image() - s1.get_model_image())<EOL>c = (diff0 - diff1)<EOL>ptp = <NUM_LIT>*max([np.abs(a).max(), np.abs(b).max(), np.abs(c).max()])<EOL>cmap = pl.cm.RdBu_r<EOL>ax_real.imshow(a[slicer], cmap=cmap, vmin=-ptp, vmax=ptp)<EOL>ax_real.set_xticks([])<EOL>ax_real.set_yticks([])<EOL>ax_fake.imshow(b[slicer], cmap=cmap, vmin=-ptp, vmax=ptp)<EOL>ax_fake.set_xticks([])<EOL>ax_fake.set_yticks([])<EOL>ax_diff.imshow(c[slicer], cmap=cmap, vmin=-ptp, vmax=ptp)<EOL>ax_diff.set_xticks([])<EOL>ax_diff.set_yticks([])<EOL>if i == <NUM_LIT:0>:<EOL><INDENT>ax_real.set_title(r\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_fake.set_title(r\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_diff.set_title(r\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_real.set_ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>ax_real.set_ylabel('<STR_LIT>')<EOL><DEDENT><DEDENT>gs1 = GridSpec(<NUM_LIT:1>,<NUM_LIT:3>, left=<NUM_LIT>, bottom=<NUM_LIT>, right=<NUM_LIT>, top=<NUM_LIT>,<EOL>wspace=<NUM_LIT>, hspace=<NUM_LIT>)<EOL>spos0 = std0[s0.b_pos].reshape(-<NUM_LIT:1>,<NUM_LIT:3>)[active0]<EOL>spos1 = std1[s1.b_pos].reshape(-<NUM_LIT:1>,<NUM_LIT:3>)[active1]<EOL>srad0 = std0[s0.b_rad][active0]<EOL>srad1 = std1[s1.b_rad][active1]<EOL>def hist(ax, vals, bins, *args, **kwargs):<EOL><INDENT>y,x = np.histogram(vals, bins=bins)<EOL>x = (x[<NUM_LIT:1>:] + x[:-<NUM_LIT:1>])/<NUM_LIT:2><EOL>y /= len(vals)<EOL>ax.plot(x,y, *args, **kwargs)<EOL><DEDENT>def pp(ind, tarr, tsim, tcrb, var='<STR_LIT:x>'):<EOL><INDENT>bins = <NUM_LIT:10>**np.linspace(-<NUM_LIT:3>, <NUM_LIT:0.0>, <NUM_LIT:30>)<EOL>bin2 = <NUM_LIT:10>**np.linspace(-<NUM_LIT:3>, <NUM_LIT:0.0>, <NUM_LIT:100>)<EOL>bins = np.linspace(<NUM_LIT:0.0>, <NUM_LIT>, <NUM_LIT:30>)<EOL>bin2 = np.linspace(<NUM_LIT:0.0>, <NUM_LIT>, <NUM_LIT:100>)<EOL>xlim = (<NUM_LIT:0.0>, <NUM_LIT>)<EOL>ylim = (<NUM_LIT>, <NUM_LIT:30>)<EOL>ticks = ticker.FuncFormatter(lambda x, pos: '<STR_LIT>'.format(np.log10(x)))<EOL>scaler = lambda x: x <EOL>ax_crb = pl.subplot(gs1[<NUM_LIT:0>,ind])<EOL>ax_crb.hist(scaler(np.abs(tarr)), bins=bins,<EOL>normed=True, alpha=<NUM_LIT>, histtype='<STR_LIT>', lw=<NUM_LIT:1>)<EOL>ax_crb.hist(scaler(np.abs(tcrb)).ravel(), bins=bin2,<EOL>normed=True, alpha=<NUM_LIT:1.0>, histtype='<STR_LIT>', ls='<STR_LIT>', lw=<NUM_LIT>, color='<STR_LIT:k>')<EOL>ax_crb.hist(scaler(np.abs(tsim).ravel()), bins=bin2,<EOL>normed=True, alpha=<NUM_LIT:1.0>, histtype='<STR_LIT>', lw=<NUM_LIT:3>)<EOL>ax_crb.set_xlabel(r\"<STR_LIT>\" % (var,var), fontsize=<NUM_LIT>)<EOL>ax_crb.set_xlim(xlim)<EOL>ax_crb.grid(b=False, which='<STR_LIT>', axis='<STR_LIT>')<EOL>if ind == <NUM_LIT:0>:<EOL><INDENT>lbl(ax_crb, '<STR_LIT:B>')<EOL>ax_crb.set_ylabel(r\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>ax_crb.set_yticks([])<EOL><DEDENT>ax_crb.locator_params(axis='<STR_LIT:x>', nbins=<NUM_LIT:3>)<EOL><DEDENT>f,g = <NUM_LIT>, <NUM_LIT><EOL>sim = f*sim_crb_diff(spos0[:,<NUM_LIT:1>], spos1[:,<NUM_LIT:1>][link])<EOL>crb = g*sim_crb_diff(crb0[<NUM_LIT:0>][:,<NUM_LIT:1>][active0], crb1[<NUM_LIT:0>][:,<NUM_LIT:1>][active1][link])<EOL>pp(<NUM_LIT:0>, dpos[:,<NUM_LIT:1>], sim, crb, '<STR_LIT:x>')<EOL>sim = f*sim_crb_diff(spos0[:,<NUM_LIT:0>], spos1[:,<NUM_LIT:0>][link])<EOL>crb = g*sim_crb_diff(crb0[<NUM_LIT:0>][:,<NUM_LIT:0>][active0], crb1[<NUM_LIT:0>][:,<NUM_LIT:0>][active1][link])<EOL>pp(<NUM_LIT:1>, dpos[:,<NUM_LIT:0>], sim, crb, '<STR_LIT:z>')<EOL>sim = f*sim_crb_diff(srad0, srad1[link])<EOL>crb = g*sim_crb_diff(crb0[<NUM_LIT:1>][active0], crb1[<NUM_LIT:1>][active1][link])<EOL>pp(<NUM_LIT:2>, drad, sim, crb, '<STR_LIT:a>')<EOL>gs2 = GridSpec(<NUM_LIT:2>,<NUM_LIT:2>, left=<NUM_LIT>, bottom=<NUM_LIT>, right=<NUM_LIT>, top=<NUM_LIT>,<EOL>wspace=<NUM_LIT>, hspace=<NUM_LIT>)<EOL>ax_hist = pl.subplot(gs2[<NUM_LIT:0>,<NUM_LIT:0>])<EOL>ax_hist.hist(std0[s0.b_pos], bins=np.logspace(-<NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:50>), alpha=<NUM_LIT>, label='<STR_LIT>', histtype='<STR_LIT>')<EOL>ax_hist.hist(std0[s0.b_rad], bins=np.logspace(-<NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:50>), alpha=<NUM_LIT>, label='<STR_LIT>', histtype='<STR_LIT>')<EOL>ax_hist.set_xlim((<NUM_LIT:10>**-<NUM_LIT>, <NUM_LIT:1>))<EOL>ax_hist.semilogx()<EOL>ax_hist.set_xlabel(r\"<STR_LIT>\")<EOL>ax_hist.set_ylabel(r\"<STR_LIT>\")<EOL>ax_hist.legend(loc='<STR_LIT>')<EOL>lbl(ax_hist, '<STR_LIT:C>')<EOL>imdiff = ((s0.get_model_image() - s0.image)/s0._sigma_field)[s0.image_mask==<NUM_LIT:1.>].ravel()<EOL>mu = imdiff.mean()<EOL>x = np.linspace(-<NUM_LIT:5>,<NUM_LIT:5>,<NUM_LIT>)<EOL>ax_diff = pl.subplot(gs2[<NUM_LIT:0>,<NUM_LIT:1>])<EOL>ax_diff.plot(x, <NUM_LIT:1.0>/np.sqrt(<NUM_LIT:2>*np.pi) * np.exp(-(x-mu)**<NUM_LIT:2> / <NUM_LIT:2>), '<STR_LIT:->', alpha=<NUM_LIT>, color='<STR_LIT:k>', lw=<NUM_LIT:2>)<EOL>ax_diff.hist(imdiff, bins=<NUM_LIT:1000>, histtype='<STR_LIT>', alpha=<NUM_LIT>, normed=True)<EOL>ax_diff.semilogy()<EOL>ax_diff.set_ylabel(r\"<STR_LIT>\")<EOL>ax_diff.set_xlabel(r\"<STR_LIT>\")<EOL>ax_diff.locator_params(axis='<STR_LIT:x>', nbins=<NUM_LIT:5>)<EOL>ax_diff.grid(b=False, which='<STR_LIT>', axis='<STR_LIT:y>')<EOL>ax_diff.set_xlim(-<NUM_LIT:5>, <NUM_LIT:5>)<EOL>ax_diff.set_ylim(<NUM_LIT>, <NUM_LIT>)<EOL>lbl(ax_diff, '<STR_LIT:D>')<EOL>pos = mu0[s0.b_pos].reshape(-<NUM_LIT:1>,<NUM_LIT:3>)<EOL>rad = mu0[s0.b_rad]<EOL>mask = analyze.trim_box(s0, pos)<EOL>pos = pos[mask]<EOL>rad = rad[mask]<EOL>gx, gy = analyze.gofr(pos, rad, mu0[s0.b_zscale][<NUM_LIT:0>], resolution=<NUM_LIT>,mask_start=<NUM_LIT:0.5>)<EOL>mask = gx < <NUM_LIT:5><EOL>gx = gx[mask]<EOL>gy = gy[mask]<EOL>ax_gofr = pl.subplot(gs2[<NUM_LIT:1>,<NUM_LIT:0>])<EOL>ax_gofr.plot(gx, gy, '<STR_LIT:->', lw=<NUM_LIT:1>)<EOL>ax_gofr.set_xlabel(r\"<STR_LIT>\")<EOL>ax_gofr.set_ylabel(r\"<STR_LIT>\")<EOL>ax_gofr.locator_params(axis='<STR_LIT>', nbins=<NUM_LIT:5>)<EOL>lbl(ax_gofr, '<STR_LIT:E>')<EOL>gx, gy = analyze.gofr(pos, rad, mu0[s0.b_zscale][<NUM_LIT:0>], method='<STR_LIT>')<EOL>mask = gx < <NUM_LIT:5><EOL>gx = gx[mask]<EOL>gy = gy[mask]<EOL>gy[gy <= <NUM_LIT:0.>] = gy[gy><NUM_LIT:0>].min()<EOL>ax_gofrs = pl.subplot(gs2[<NUM_LIT:1>,<NUM_LIT:1>])<EOL>ax_gofrs.plot(gx, gy, '<STR_LIT:->', lw=<NUM_LIT:1>)<EOL>ax_gofrs.set_xlabel(r\"<STR_LIT>\")<EOL>ax_gofrs.set_ylabel(r\"<STR_LIT>\")<EOL>ax_gofrs.locator_params(axis='<STR_LIT>', nbins=<NUM_LIT:5>)<EOL>ax_gofrs.grid(b=False, which='<STR_LIT>', axis='<STR_LIT:y>')<EOL>lbl(ax_gofrs, '<STR_LIT:F>')<EOL>ylim = ax_gofrs.get_ylim()<EOL>ax_gofrs.set_ylim(gy.min(), ylim[<NUM_LIT:1>])<EOL>", "docstring": "To run, do:\n\ns,h = pickle...\ns1,h1 = pickle...\n    i.e. /media/scratch/bamf/vacancy/vacancy_zoom-1.tif_t002.tif-featured-v2.pkl\n    i.e. /media/scratch/bamf/frozen-particles/0.tif-featured-full.pkl\ncrb0 = diag_crb_particles(s); crb1 = diag_crb_particles(s1)\ncrb_compare(s,h[-25:],s1,h1[-25:], crb0, crb1)", "id": "f5746:m13"}
{"signature": "def generative_model(s,x,y,z,r, factor=<NUM_LIT>):", "body": "pl.close('<STR_LIT:all>')<EOL>slicez = int(round(z.mean()))<EOL>slicex = s.image.shape[<NUM_LIT:2>]//<NUM_LIT:2><EOL>slicer1 = np.s_[slicez,s.pad:-s.pad,s.pad:-s.pad]<EOL>slicer2 = np.s_[s.pad:-s.pad,s.pad:-s.pad,slicex]<EOL>center = (slicez, s.image.shape[<NUM_LIT:1>]//<NUM_LIT:2>, slicex)<EOL>fig = pl.figure(figsize=(factor*<NUM_LIT>,factor*<NUM_LIT:10>))<EOL>gs1 = ImageGrid(fig, rect=[<NUM_LIT:0.0>, <NUM_LIT>, <NUM_LIT:1.0>, <NUM_LIT>], nrows_ncols=(<NUM_LIT:1>,<NUM_LIT:3>),<EOL>axes_pad=<NUM_LIT:0.1>)<EOL>ax_real = gs1[<NUM_LIT:0>]<EOL>ax_fake = gs1[<NUM_LIT:1>]<EOL>ax_diff = gs1[<NUM_LIT:2>]<EOL>diff = s.get_model_image() - s.image<EOL>ax_real.imshow(s.image[slicer1], cmap=pl.cm.bone_r)<EOL>ax_real.set_xticks([])<EOL>ax_real.set_yticks([])<EOL>ax_real.set_title(\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_fake.imshow(s.get_model_image()[slicer1], cmap=pl.cm.bone_r)<EOL>ax_fake.set_xticks([])<EOL>ax_fake.set_yticks([])<EOL>ax_fake.set_title(\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_diff.imshow(diff[slicer1], cmap=pl.cm.RdBu, vmin=-<NUM_LIT:0.1>, vmax=<NUM_LIT:0.1>)<EOL>ax_diff.set_xticks([])<EOL>ax_diff.set_yticks([])<EOL>ax_diff.set_title(\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>gs2 = ImageGrid(fig, rect=[<NUM_LIT:0.1>, <NUM_LIT:0.0>, <NUM_LIT>, <NUM_LIT>], nrows_ncols=(<NUM_LIT:3>,<NUM_LIT:2>),<EOL>axes_pad=<NUM_LIT:0.1>)<EOL>ax_plt1 = fig.add_subplot(gs2[<NUM_LIT:0>])<EOL>ax_plt2 = fig.add_subplot(gs2[<NUM_LIT:1>])<EOL>ax_ilm1 = fig.add_subplot(gs2[<NUM_LIT:2>])<EOL>ax_ilm2 = fig.add_subplot(gs2[<NUM_LIT:3>])<EOL>ax_psf1 = fig.add_subplot(gs2[<NUM_LIT:4>])<EOL>ax_psf2 = fig.add_subplot(gs2[<NUM_LIT:5>])<EOL>c = int(z.mean()), int(y.mean())+s.pad, int(x.mean())+s.pad<EOL>if s.image.shape[<NUM_LIT:0>] > <NUM_LIT:2>*s.image.shape[<NUM_LIT:1>]//<NUM_LIT:3>:<EOL><INDENT>w = s.image.shape[<NUM_LIT:2>] - <NUM_LIT:2>*s.pad<EOL>h = <NUM_LIT:2>*w//<NUM_LIT:3><EOL><DEDENT>else:<EOL><INDENT>h = s.image.shape[<NUM_LIT:0>] - <NUM_LIT:2>*s.pad<EOL>w = <NUM_LIT:3>*h//<NUM_LIT:2><EOL><DEDENT>w,h = w//<NUM_LIT:2>, h//<NUM_LIT:2><EOL>xyslice = np.s_[slicez, c[<NUM_LIT:1>]-h:c[<NUM_LIT:1>]+h, c[<NUM_LIT:2>]-w:c[<NUM_LIT:2>]+w]<EOL>yzslice = np.s_[c[<NUM_LIT:0>]-h:c[<NUM_LIT:0>]+h, c[<NUM_LIT:1>]-w:c[<NUM_LIT:1>]+w, slicex]<EOL>ax_plt1.imshow(<NUM_LIT:1>-s.obj.get_field()[xyslice], cmap=pl.cm.bone_r, vmin=<NUM_LIT:0>, vmax=<NUM_LIT:1>)<EOL>ax_plt1.set_xticks([])<EOL>ax_plt1.set_yticks([])<EOL>ax_plt1.set_ylabel(\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_plt1.set_title(\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_plt2.imshow(<NUM_LIT:1>-s._platonic_image()[yzslice], cmap=pl.cm.bone_r, vmin=<NUM_LIT:0>, vmax=<NUM_LIT:1>)<EOL>ax_plt2.set_xticks([])<EOL>ax_plt2.set_yticks([])<EOL>ax_plt2.set_title(\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_ilm1.imshow(s.ilm.get_field()[xyslice], cmap=pl.cm.bone_r)<EOL>ax_ilm1.set_xticks([])<EOL>ax_ilm1.set_yticks([])<EOL>ax_ilm1.set_ylabel(\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_ilm2.imshow(s.ilm.get_field()[yzslice], cmap=pl.cm.bone_r)<EOL>ax_ilm2.set_xticks([])<EOL>ax_ilm2.set_yticks([])<EOL>t = s.ilm.get_field().copy()<EOL>t *= <NUM_LIT:0><EOL>t[c] = <NUM_LIT:1><EOL>s.psf.set_tile(util.Tile(t.shape))<EOL>psf = (s.psf.execute(t)+<NUM_LIT>)**<NUM_LIT:0.1><EOL>ax_psf1.imshow(psf[xyslice], cmap=pl.cm.bone)<EOL>ax_psf1.set_xticks([])<EOL>ax_psf1.set_yticks([])<EOL>ax_psf1.set_ylabel(\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_psf2.imshow(psf[yzslice], cmap=pl.cm.bone)<EOL>ax_psf2.set_xticks([])<EOL>ax_psf2.set_yticks([])<EOL>ax_zoom = fig.add_axes([<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>])<EOL>im = s.image[slicer1]<EOL>sh = np.array(im.shape)<EOL>cx = x.mean()<EOL>cy = y.mean()<EOL>extent = [<NUM_LIT:0>,sh[<NUM_LIT:0>],<NUM_LIT:0>,sh[<NUM_LIT:1>]]<EOL>ax_zoom.set_xticks([])<EOL>ax_zoom.set_yticks([])<EOL>ax_zoom.imshow(im, extent=extent, cmap=pl.cm.bone_r)<EOL>ax_zoom.set_xlim(cx-<NUM_LIT:12>, cx+<NUM_LIT:12>)<EOL>ax_zoom.set_ylim(cy-<NUM_LIT:12>, cy+<NUM_LIT:12>)<EOL>ax_zoom.set_title(\"<STR_LIT>\", fontsize=<NUM_LIT>)<EOL>ax_zoom.hexbin(x,y, gridsize=<NUM_LIT:32>, mincnt=<NUM_LIT:0>, cmap=pl.cm.hot)<EOL>zoom1 = zoomed_inset_axes(ax_zoom, <NUM_LIT:30>, loc=<NUM_LIT:3>)<EOL>zoom1.imshow(im, extent=extent, cmap=pl.cm.bone_r)<EOL>zoom1.set_xlim(cx-<NUM_LIT:1.0>/<NUM_LIT:6>, cx+<NUM_LIT:1.0>/<NUM_LIT:6>)<EOL>zoom1.set_ylim(cy-<NUM_LIT:1.0>/<NUM_LIT:6>, cy+<NUM_LIT:1.0>/<NUM_LIT:6>)<EOL>zoom1.hexbin(x,y,gridsize=<NUM_LIT:32>, mincnt=<NUM_LIT:5>, cmap=pl.cm.hot)<EOL>zoom1.set_xticks([])<EOL>zoom1.set_yticks([])<EOL>zoom1.hlines(cy-<NUM_LIT:1.0>/<NUM_LIT:6> + <NUM_LIT:1.0>/<NUM_LIT:32>, cx-<NUM_LIT:1.0>/<NUM_LIT:6>+<NUM_LIT>, cx-<NUM_LIT:1.0>/<NUM_LIT:6>+<NUM_LIT>+<NUM_LIT>, lw=<NUM_LIT:3>)<EOL>zoom1.text(cx-<NUM_LIT:1.0>/<NUM_LIT:6> + <NUM_LIT:1.0>/<NUM_LIT>, cy-<NUM_LIT:1.0>/<NUM_LIT:6>+<NUM_LIT>, '<STR_LIT>')<EOL>mark_inset(ax_zoom, zoom1, loc1=<NUM_LIT:2>, loc2=<NUM_LIT:4>, fc=\"<STR_LIT:none>\", ec=\"<STR_LIT>\")<EOL>", "docstring": "Samples x,y,z,r are created by:\nb = s.blocks_particle(#)\nh = runner.sample_state(s, b, stepout=0.05, N=2000, doprint=True)\nz,y,x,r = h.get_histogram().T", "id": "f5746:m6"}
{"signature": "def compare_data_model_residuals(s, tile, data_vmin='<STR_LIT>', data_vmax='<STR_LIT>',<EOL>res_vmin=-<NUM_LIT:0.1>, res_vmax=<NUM_LIT:0.1>, edgepts='<STR_LIT>', do_imshow=True,<EOL>data_cmap=plt.cm.bone, res_cmap=plt.cm.RdBu):", "body": "<EOL>residuals = s.residuals[tile.slicer].squeeze()<EOL>data = s.data[tile.slicer].squeeze()<EOL>model = s.model[tile.slicer].squeeze()<EOL>if data.ndim != <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>im = np.zeros([data.shape[<NUM_LIT:0>], data.shape[<NUM_LIT:1>], <NUM_LIT:4>])<EOL>if data_vmin == '<STR_LIT>':<EOL><INDENT>data_vmin = <NUM_LIT:0.5>*(data.min() + model.min())<EOL><DEDENT>if data_vmax == '<STR_LIT>':<EOL><INDENT>data_vmax = <NUM_LIT:0.5>*(data.max() + model.max())<EOL><DEDENT>upper_mask, center_mask, lower_mask = trisect_image(im.shape, edgepts)<EOL>gm = data_cmap(center_data(model, data_vmin, data_vmax))<EOL>dt = data_cmap(center_data(data, data_vmin, data_vmax))<EOL>rs = res_cmap(center_data(residuals, res_vmin, res_vmax))<EOL>for a in range(<NUM_LIT:4>):<EOL><INDENT>im[:,:,a][upper_mask] = rs[:,:,a][upper_mask]<EOL>im[:,:,a][center_mask] = gm[:,:,a][center_mask]<EOL>im[:,:,a][lower_mask] = dt[:,:,a][lower_mask]<EOL><DEDENT>if do_imshow:<EOL><INDENT>return plt.imshow(im)<EOL><DEDENT>else:<EOL><INDENT>return im<EOL><DEDENT>", "docstring": "Compare the data, model, and residuals of a state.\n\nMakes an image of any 2D slice of a state that compares the data,\nmodel, and residuals. The upper left portion of the image is the raw\ndata, the central portion the model, and the lower right portion the\nimage. Either plots the image using plt.imshow() or returns a\nnp.ndarray of the image pixels for later use.\n\nParameters\n----------\n    st : peri.ImageState object\n        The state to plot.\n    tile : peri.util.Tile object\n        The slice of the image to plot. Can be any xy, xz, or yz\n        projection, but it must return a valid 2D slice (the slice is\n        squeezed internally).\n\n    data_vmin : {Float, `calc`}, optional\n        vmin for the imshow for the data and generative model (shared).\n        Default is 'calc' = 0.5(data.min() + model.min())\n    data_vmax : {Float, `calc`}, optional\n        vmax for the imshow for the data and generative model (shared).\n        Default is 'calc' = 0.5(data.max() + model.max())\n    res_vmin : Float, optional\n        vmin for the imshow for the residuals. Default is -0.1\n        Default is 'calc' = 0.5(data.min() + model.min())\n    res_vmax : Float, optional\n        vmax for the imshow for the residuals. Default is +0.1\n    edgepts : {Nested list-like, Float, 'calc'}, optional.\n        The vertices of the triangles which determine the splitting of\n        the image. The vertices are at (image corner, (edge, y), and\n        (x,edge), where edge is the appropriate edge of the image.\n            edgepts[0] : (x,y) points for the upper edge\n            edgepts[1] : (x,y) points for the lower edge\n        where `x` is the coordinate along the image's 0th axis and `y`\n        along the images 1st axis. Default is 'calc,' which calculates\n        edge points by splitting the image into 3 regions of equal\n        area. If edgepts is a float scalar, calculates the edge points\n        based on a constant fraction of distance from the edge.\n    do_imshow : Bool\n        If True, imshow's and returns the returned handle.\n        If False, returns the array as a [M,N,4] array.\n    data_cmap : matplotlib colormap instance\n        The colormap to use for the data and model.\n    res_cmap : matplotlib colormap instance\n        The colormap to use for the residuals.\n\nReturns\n-------\n    image : {matplotlib.pyplot.AxesImage, numpy.ndarray}\n        If `do_imshow` == True, the returned handle from imshow.\n        If `do_imshow` == False, an [M,N,4] np.ndarray of the image\n        pixels.", "id": "f5746:m8"}
{"signature": "def sim_crb_diff(std0, std1, N=<NUM_LIT>):", "body": "a = std0*np.random.randn(N, len(std0))<EOL>b = std1*np.random.randn(N, len(std1))<EOL>return a - b<EOL>", "docstring": "each element of std0 should correspond with the element of std1", "id": "f5746:m11"}
{"signature": "def trisect_image(imshape, edgepts='<STR_LIT>'):", "body": "im_x, im_y = np.meshgrid(np.arange(imshape[<NUM_LIT:0>]), np.arange(imshape[<NUM_LIT:1>]),<EOL>indexing='<STR_LIT>')<EOL>if np.size(edgepts) == <NUM_LIT:1>:<EOL><INDENT>f = np.sqrt(<NUM_LIT>/<NUM_LIT>) if edgepts == '<STR_LIT>' else edgepts<EOL>lower_edge = (imshape[<NUM_LIT:0>] * (<NUM_LIT:1>-f),  imshape[<NUM_LIT:1>] * f)<EOL>upper_edge = (imshape[<NUM_LIT:0>] * f,      imshape[<NUM_LIT:1>] * (<NUM_LIT:1>-f))<EOL><DEDENT>else:<EOL><INDENT>upper_edge, lower_edge = edgepts<EOL><DEDENT>lower_slope = lower_edge[<NUM_LIT:1>] / max(float(imshape[<NUM_LIT:0>] - lower_edge[<NUM_LIT:0>]), <NUM_LIT>)<EOL>upper_slope = (imshape[<NUM_LIT:1>] - upper_edge[<NUM_LIT:1>]) / float(upper_edge[<NUM_LIT:0>])<EOL>lower_intercept = -lower_slope * lower_edge[<NUM_LIT:0>]<EOL>upper_intercept = upper_edge[<NUM_LIT:1>]<EOL>lower_mask = im_y < (im_x * lower_slope + lower_intercept)<EOL>upper_mask = im_y > (im_x * upper_slope + upper_intercept)<EOL>center_mask= -(lower_mask | upper_mask)<EOL>return upper_mask, center_mask, lower_mask<EOL>", "docstring": "Returns 3 masks that trisect an image into 3 triangular portions.\n\nParameters\n----------\n    imshape : 2-element list-like of ints\n        The shape of the image. Elements after the first 2 are ignored.\n\n    edgepts : Nested list-like, float, or `calc`, optional.\n        The vertices of the triangles which determine the splitting of\n        the image. The vertices are at (image corner, (edge, y), and\n        (x,edge), where edge is the appropriate edge of the image.\n            edgepts[0] : (x,y) points for the upper edge\n            edgepts[1] : (x,y) points for the lower edge\n        where `x` is the coordinate along the image's 0th axis and `y`\n        along the images 1st axis. Default is 'calc,' which calculates\n        edge points by splitting the image into 3 regions of equal\n        area. If edgepts is a float scalar, calculates the edge points\n        based on a constant fraction of distance from the edge.\n\nReturns\n-------\n    upper_mask : numpy.ndarray\n        Boolean array; True in the image's upper  region.\n    center_mask : numpy.ndarray\n        Boolean array; True in the image's center region.\n    lower_mask : numpy.ndarray\n        Boolean array; True in the image's lower  region.", "id": "f5746:m9"}
{"signature": "def local_max_featuring(im, radius=<NUM_LIT>, noise_size=<NUM_LIT:1.>, bkg_size=None,<EOL>minmass=<NUM_LIT:1.>, trim_edge=False):", "body": "if radius <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>filtered = nd.gaussian_filter(im, noise_size, mode='<STR_LIT>')<EOL>if bkg_size is None:<EOL><INDENT>bkg_size = <NUM_LIT:2>*radius<EOL><DEDENT>filtered -= nd.gaussian_filter(filtered, bkg_size, mode='<STR_LIT>')<EOL>footprint = generate_sphere(radius)<EOL>e = nd.maximum_filter(filtered, footprint=footprint)<EOL>mass_im = nd.convolve(filtered, footprint, mode='<STR_LIT>')<EOL>good_im = (e==filtered) * (mass_im > minmass)<EOL>pos = np.transpose(np.nonzero(good_im))<EOL>if trim_edge:<EOL><INDENT>good = np.all(pos > <NUM_LIT:0>, axis=<NUM_LIT:1>) & np.all(pos+<NUM_LIT:1> < im.shape, axis=<NUM_LIT:1>)<EOL>pos = pos[good, :].copy()<EOL><DEDENT>masses = mass_im[pos[:,<NUM_LIT:0>], pos[:,<NUM_LIT:1>], pos[:,<NUM_LIT:2>]].copy()<EOL>return pos, masses<EOL>", "docstring": "Local max featuring to identify bright spherical particles on a\n    dark background.\n\n    Parameters\n    ----------\n        im : numpy.ndarray\n            The image to identify particles in.\n        radius : Float > 0, optional\n            Featuring radius of the particles. Default is 2.5\n        noise_size : Float, optional\n            Size of Gaussian kernel for smoothing out noise. Default is 1.\n        bkg_size : Float or None, optional\n            Size of the Gaussian kernel for removing long-wavelength\n            background. Default is None, which gives `2 * radius`\n        minmass : Float, optional\n            Return only particles with a ``mass > minmass``. Default is 1.\n        trim_edge : Bool, optional\n            Set to True to omit particles identified exactly at the edge\n            of the image. False-positive features frequently occur here\n            because of the reflected bandpass featuring. Default is\n            False, i.e. find particles at the edge of the image.\n\n    Returns\n    -------\n        pos, mass : numpy.ndarray\n            Particle positions and masses", "id": "f5748:m8"}
{"signature": "def identify_slab(im, sigma=<NUM_LIT>, region_size=<NUM_LIT:10>, masscut=<NUM_LIT>, asdict=False):", "body": "<EOL>fim = nd.filters.gaussian_filter(im, sigma)<EOL>trc, det = harris_feature(fim, region_size, to_return='<STR_LIT>')<EOL>dnrm = det / (trc*trc)<EOL>trc_cut = otsu_threshold(trc)<EOL>det_cut = otsu_threshold(dnrm)<EOL>slabs = (trc > trc_cut) & (dnrm < det_cut)<EOL>labeled, nslabs = nd.label(slabs)<EOL>masses = [(labeled == i).sum() for i in range(<NUM_LIT:1>, nslabs+<NUM_LIT:1>)]<EOL>good = np.array([m > masscut for m in masses])<EOL>inds = np.nonzero(good)[<NUM_LIT:0>] + <NUM_LIT:1>  <EOL>poses = np.array(nd.measurements.center_of_mass(trc, labeled, inds))<EOL>normals = []<EOL>z = np.arange(im.shape[<NUM_LIT:0>]).reshape(-<NUM_LIT:1>,<NUM_LIT:1>,<NUM_LIT:1>).astype('<STR_LIT:float>')<EOL>y = np.arange(im.shape[<NUM_LIT:1>]).reshape(<NUM_LIT:1>,-<NUM_LIT:1>,<NUM_LIT:1>).astype('<STR_LIT:float>')<EOL>x = np.arange(im.shape[<NUM_LIT:2>]).reshape(<NUM_LIT:1>,<NUM_LIT:1>,-<NUM_LIT:1>).astype('<STR_LIT:float>')<EOL>gim = [nd.sobel(fim, axis=i) for i in range(fim.ndim)]<EOL>for i, p in zip(range(<NUM_LIT:1>, nslabs+<NUM_LIT:1>), poses):<EOL><INDENT>wts = trc * (labeled == i)<EOL>wts /= wts.sum()<EOL>zc, yc, xc = [xi-pi for xi, pi in zip([z,y,x],p.squeeze())]<EOL>cov = [[np.sum(xi*xj*wts) for xi in [zc,yc,xc]] for xj in [zc,yc,xc]]<EOL>vl, vc = np.linalg.eigh(cov)<EOL>normal = vc[:,<NUM_LIT:0>]<EOL>gn = np.sum([n*g[tuple(p.astype('<STR_LIT:int>'))] for g,n in zip(gim, normal)])<EOL>normal *= np.sign(gn)<EOL>normals.append(normal)<EOL><DEDENT>if asdict:<EOL><INDENT>get_theta = lambda n: -np.arctan2(n[<NUM_LIT:1>], -n[<NUM_LIT:0>])<EOL>get_phi = lambda n: np.arcsin(n[<NUM_LIT:2>])<EOL>return [{'<STR_LIT>':p[<NUM_LIT:0>], '<STR_LIT>':(get_theta(n), get_phi(n))}<EOL>for p, n in zip(poses, normals)]<EOL><DEDENT>else:<EOL><INDENT>return poses, np.array(normals)<EOL><DEDENT>", "docstring": "Identifies slabs in an image.\n\nFunctions by running a Harris-inspired edge detection on the image,\nthresholding the edge, then clustering.\n\nParameters\n----------\n    im : numpy.ndarray\n        3D array of the image to analyze.\n    sigma : Float, optional\n        Gaussian blurring kernel to remove non-slab features such as\n        noise and particles. Default is 5.\n    region_size : Int, optional\n        The size of region for Harris corner featuring. Default is 10\n    masscut : Float, optional\n        The minimum number of pixels for a feature to be identified as\n        a slab. Default is 1e4; should be smaller for smaller images.\n    asdict : Bool, optional\n        Set to True to return a list of dicts, with keys of ``'theta'``\n        and ``'phi'`` as rotation angles about the x- and z- axes, and\n        of ``'zpos'`` for the z-position, i.e. a list of dicts which\n        can be unpacked into a :class:``peri.comp.objs.Slab``\n\nReturns\n-------\n    [poses, normals] : numpy.ndarray\n        The positions and normals of each slab in the image; ``poses[i]``\n        and ``normals[i]`` are the ``i``th slab. Returned if ``asdict``\n        is False\n    [list]\n        A list of dictionaries. Returned if ``asdict`` is True", "id": "f5748:m14"}
{"signature": "def normalize(im, invert=False, scale=None, dtype=np.float64):", "body": "if dtype not in {np.float16, np.float32, np.float64}:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>out = im.astype('<STR_LIT:float>').copy()<EOL>scale = scale or (<NUM_LIT:0.0>, <NUM_LIT>)<EOL>l, u = (float(i) for i in scale)<EOL>out = (out - l) / (u - l)<EOL>if invert:<EOL><INDENT>out = -out + (out.max() + out.min())<EOL><DEDENT>return out.astype(dtype)<EOL>", "docstring": "Normalize a field to a (min, max) exposure range, default is (0, 255).\n(min, max) exposure values. Invert the image if requested.", "id": "f5748:m6"}
{"signature": "def harris_feature(im, region_size=<NUM_LIT:5>, to_return='<STR_LIT>', scale=<NUM_LIT>):", "body": "ndim = im.ndim<EOL>grads = [nd.sobel(im, axis=i) for i in range(ndim)]<EOL>matrix = np.zeros((ndim, ndim) + im.shape)<EOL>for a in range(ndim):<EOL><INDENT>for b in range(ndim):<EOL><INDENT>matrix[a,b] = nd.filters.gaussian_filter(grads[a]*grads[b],<EOL>region_size)<EOL><DEDENT><DEDENT>if to_return == '<STR_LIT>':<EOL><INDENT>return matrix<EOL><DEDENT>trc = np.trace(matrix, axis1=<NUM_LIT:0>, axis2=<NUM_LIT:1>)<EOL>det = np.linalg.det(matrix.T).T<EOL>if to_return == '<STR_LIT>':<EOL><INDENT>return trc, det<EOL><DEDENT>else:<EOL><INDENT>harris = det - scale*trc*trc<EOL>return harris<EOL><DEDENT>", "docstring": "Harris-motivated feature detection on a d-dimensional image.\n\nParameters\n---------\n    im\n    region_size\n    to_return : {'harris','matrix','trace-determinant'}", "id": "f5748:m13"}
{"signature": "def otsu_threshold(data, bins=<NUM_LIT:255>):", "body": "h0, x0 = np.histogram(data.ravel(), bins=bins)<EOL>h = h0.astype('<STR_LIT:float>') / h0.sum()  <EOL>x = <NUM_LIT:0.5>*(x0[<NUM_LIT:1>:] + x0[:-<NUM_LIT:1>])  <EOL>wk = np.array([h[:i+<NUM_LIT:1>].sum() for i in range(h.size)])  <EOL>mk = np.array([sum(x[:i+<NUM_LIT:1>]*h[:i+<NUM_LIT:1>]) for i in range(h.size)])  <EOL>mt = mk[-<NUM_LIT:1>]  <EOL>sb = (mt*wk - mk)**<NUM_LIT:2> / (wk*(<NUM_LIT:1>-wk) + <NUM_LIT>)  <EOL>ind = sb.argmax()<EOL>return <NUM_LIT:0.5>*(x0[ind] + x0[ind+<NUM_LIT:1>])<EOL>", "docstring": "Otsu threshold on data.\n\nOtsu thresholding [1]_is a method for selecting an intensity value\nfor thresholding an image into foreground and background. The sel-\nected intensity threshold maximizes the inter-class variance.\n\nParameters\n----------\n    data : numpy.ndarray\n        The data to threshold\n    bins : Int or numpy.ndarray, optional\n        Bin edges, as passed to numpy.histogram\n\nReturns\n-------\n    numpy.float\n        The value of the threshold which maximizes the inter-class\n        variance.\n\nNotes\n-----\n    This could be generalized to more than 2 classes.\nReferences\n----------\n    ..[1] N. Otsu, \"A Threshold Selection Method from Gray-level\n        Histograms,\" IEEE Trans. Syst., Man, Cybern., Syst., 9, 1,\n        62-66 (1979)", "id": "f5748:m12"}
{"signature": "def calculate_polychrome_pinhole_psf(x, y, z, normalize=False, kfki=<NUM_LIT>,<EOL>sigkf=<NUM_LIT:0.1>, zint=<NUM_LIT>, nkpts=<NUM_LIT:3>, dist_type='<STR_LIT>', **kwargs):", "body": "<EOL>kfkipts, wts = get_polydisp_pts_wts(kfki, sigkf, dist_type=dist_type,<EOL>nkpts=nkpts)<EOL>rho = np.sqrt(x**<NUM_LIT:2> + y**<NUM_LIT:2>)<EOL>phi = np.arctan2(y, x)<EOL>hsym, hasym = get_hsym_asym(rho, z, zint=zint, get_hdet=False, **kwargs)<EOL>hilm = (hsym + np.cos(<NUM_LIT:2>*phi)*hasym)<EOL>hdet_func = lambda kfki: get_hsym_asym(rho*kfki, z*kfki,<EOL>zint=kfki*zint, get_hdet=True, **kwargs)[<NUM_LIT:0>]<EOL>inner = [wts[a] * hdet_func(kfkipts[a]) for a in range(nkpts)]<EOL>hdet = np.sum(inner, axis=<NUM_LIT:0>)<EOL>if normalize:<EOL><INDENT>hilm /= hilm.sum()<EOL>hdet /= hdet.sum()<EOL><DEDENT>psf = hdet * hilm<EOL>return psf if normalize else psf / psf.sum()<EOL>", "docstring": "Calculates the perfect-pinhole PSF, for a set of points (x,y,z).\n\nParameters\n-----------\n    x : numpy.ndarray\n        The x-coordinate of the PSF in units of 1/ the wavevector of\n        the incoming light.\n    y : numpy.ndarray\n        The y-coordinate.\n    z : numpy.ndarray\n        The z-coordinate.\n    kfki : Float\n        The mean ratio of the outgoing light's wavevector to the incoming\n        light's. Default is 0.89.\n    sigkf : Float\n        Standard deviation of kfki; the distribution of the light values\n        will be approximately kfki +- sigkf.\n    zint : Float\n        The (scalar) distance from the interface, in units of\n        1/k_incoming. Default is 100.0\n    dist_type: The distribution type of the polychromatic light.\n        Can be one of 'laguerre'/'gamma' or 'gaussian.' If 'gaussian'\n        the resulting k-values are taken in absolute value. Default\n        is 'gaussian.'\n    normalize : Bool\n        Set to True to normalize the psf correctly, accounting for\n        intensity variations with depth. This will give a psf that does\n        not sum to 1. Default is False.\n\nOther Parameters\n----------------\n    alpha : Float\n        The opening angle of the lens. Default is 1.\n    n2n1 : Float\n        The ratio of the index in the 2nd medium to that in the first.\n        Default is 0.95\n\nReturns\n-------\n    psf : numpy.ndarray, of shape x.shape\n\nComments\n--------\n    (1) The PSF is not necessarily centered on the z=0 pixel, since the\n        calculation includes the shift.\n\n    (2) If you want z-varying illumination of the psf then set\n        normalize=True. This does the normalization by doing:\n            hsym, hasym /= hsym.sum()\n            hdet /= hdet.sum()\n        and then calculating the psf that way. So if you want the\n        intensity to be correct you need to use a large-ish array of\n        roughly equally spaced points. Or do it manually by calling\n        get_hsym_asym()", "id": "f5753:m11"}
{"signature": "def calc_pts_hg(npts=<NUM_LIT:20>):", "body": "pts_hg, wts_hg = np.polynomial.hermite.hermgauss(npts*<NUM_LIT:2>)<EOL>pts_hg = pts_hg[npts:]<EOL>wts_hg = wts_hg[npts:] * np.exp(pts_hg*pts_hg)<EOL>return pts_hg, wts_hg<EOL>", "docstring": "Returns Hermite-Gauss quadrature points for even functions", "id": "f5753:m1"}
{"signature": "def calculate_linescan_ilm_psf(y,z, polar_angle=<NUM_LIT:0.>, nlpts=<NUM_LIT:1>,<EOL>pinhole_width=<NUM_LIT:1>, use_laggauss=False, **kwargs):", "body": "if use_laggauss:<EOL><INDENT>x_vals, wts = calc_pts_lag()<EOL><DEDENT>else:<EOL><INDENT>x_vals, wts = calc_pts_hg()<EOL><DEDENT>xg, yg, zg = [np.zeros( list(y.shape) + [x_vals.size] ) for a in range(<NUM_LIT:3>)]<EOL>hilm = np.zeros(xg.shape)<EOL>for a in range(x_vals.size):<EOL><INDENT>xg[...,a] = x_vals[a]<EOL>yg[...,a] = y.copy()<EOL>zg[...,a] = z.copy()<EOL><DEDENT>y_pinhole, wts_pinhole = np.polynomial.hermite.hermgauss(nlpts)<EOL>y_pinhole *= np.sqrt(<NUM_LIT:2>)*pinhole_width<EOL>wts_pinhole /= np.sqrt(np.pi)<EOL>for yp, wp in zip(y_pinhole, wts_pinhole):<EOL><INDENT>rho = np.sqrt(xg*xg + (yg-yp)*(yg-yp))<EOL>phi = np.arctan2(yg,xg)<EOL>hsym, hasym = get_hsym_asym(rho,zg,get_hdet = False, **kwargs)<EOL>hilm += wp*(hsym + np.cos(<NUM_LIT:2>*(phi-polar_angle))*hasym)<EOL><DEDENT>for a in range(x_vals.size):<EOL><INDENT>hilm[...,a] *= wts[a]<EOL><DEDENT>return hilm.sum(axis=-<NUM_LIT:1>)*<NUM_LIT><EOL>", "docstring": "Calculates the illumination PSF for a line-scanning confocal with the\nconfocal line oriented along the x direction.\n\nParameters\n----------\n    y : numpy.ndarray\n        The y points (in-plane, perpendicular to the line direction)\n        at which to evaluate the illumination PSF, in units of 1/k.\n        Arbitrary shape.\n    z : numpy.ndarray\n        The z points (optical axis) at which to evaluate the illum-\n        ination PSF, in units of 1/k. Must be the same shape as `y`\n    polar_angle : Float, optional\n        The angle of the illuminating light's polarization with\n        respect to the line's orientation along x. Default is 0.\n    pinhole_width : Float, optional\n        The width of the geometric image of the line projected onto\n        the sample, in units of 1/k. Default is 1. The perfect line\n        image is assumed to be a Gaussian. If `nlpts` is set to 1,\n        the line will always be of zero width.\n    nlpts : Int, optional\n        The number of points to use for Hermite-gauss quadrature over\n        the line's width. Default is 1, corresponding to a zero-width\n        line.\n    use_laggauss : Bool, optional\n        Set to True to use a more-accurate sinh'd Laguerre-Gauss\n        quadrature for integration over the line's length (more accurate\n        in the same amount of time). Default is False for backwards\n        compatibility.  FIXME what did we do here?\n\nOther Parameters\n----------------\n    alpha : Float, optional\n        The acceptance angle of the lens, on (0,pi/2). Default is 1.\n    zint : Float, optional\n        The distance of the len's unaberrated focal point from the\n        optical interface, in units of 1/k. Default is 100.\n    n2n1 : Float, optional\n        The ratio n2/n1 of the index mismatch between the sample\n        (index n2) and the optical train (index n1). Must be on\n        [0,inf) but should be near 1. Default is 0.95\n\nReturns\n-------\n    hilm : numpy.ndarray\n        The line illumination, of the same shape as y and z.", "id": "f5753:m13"}
{"signature": "def get_K(rho, z, alpha=<NUM_LIT:1.0>, zint=<NUM_LIT>, n2n1=<NUM_LIT>, get_hdet=False, K=<NUM_LIT:1>,<EOL>Kprefactor=None, return_Kprefactor=False, npts=<NUM_LIT:20>, **kwargs):", "body": "<EOL>if type(rho) != np.ndarray or type(z) != np.ndarray or (rho.shape != z.shape):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>pts, wts = np.polynomial.legendre.leggauss(npts)<EOL>n1n2 = <NUM_LIT:1.0>/n2n1<EOL>rr = np.ravel(rho)<EOL>zr = np.ravel(z)<EOL>cos_theta = <NUM_LIT:0.5>*(<NUM_LIT:1>-np.cos(alpha))*pts+<NUM_LIT:0.5>*(<NUM_LIT:1>+np.cos(alpha))<EOL>if Kprefactor is None:<EOL><INDENT>Kprefactor = get_Kprefactor(z, cos_theta, zint=zint,n2n1=n2n1,get_hdet=get_hdet, **kwargs)<EOL><DEDENT>if K==<NUM_LIT:1>:<EOL><INDENT>part_1 = j0(np.outer(rr,np.sqrt(<NUM_LIT:1>-cos_theta**<NUM_LIT:2>)))*np.outer(np.ones_like(rr), <NUM_LIT:0.5>*(get_taus(cos_theta,n2n1=n2n1)+get_taup(cos_theta,n2n1=n2n1)*csqrt(<NUM_LIT:1>-n1n2**<NUM_LIT:2>*(<NUM_LIT:1>-cos_theta**<NUM_LIT:2>))))<EOL>integrand = Kprefactor * part_1<EOL><DEDENT>elif K==<NUM_LIT:2>:<EOL><INDENT>part_2=j2(np.outer(rr,np.sqrt(<NUM_LIT:1>-cos_theta**<NUM_LIT:2>)))*np.outer(np.ones_like(rr),<NUM_LIT:0.5>*(get_taus(cos_theta,n2n1=n2n1)-get_taup(cos_theta,n2n1=n2n1)*csqrt(<NUM_LIT:1>-n1n2**<NUM_LIT:2>*(<NUM_LIT:1>-cos_theta**<NUM_LIT:2>))))<EOL>integrand = Kprefactor * part_2<EOL><DEDENT>elif K==<NUM_LIT:3>:<EOL><INDENT>part_3=j1(np.outer(rho,np.sqrt(<NUM_LIT:1>-cos_theta**<NUM_LIT:2>)))*np.outer(np.ones_like(rr), n1n2*get_taup(cos_theta,n2n1=n2n1)*np.sqrt(<NUM_LIT:1>-cos_theta**<NUM_LIT:2>))<EOL>integrand = Kprefactor * part_3<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>big_wts=np.outer(np.ones_like(rr), wts)<EOL>kint = (big_wts*integrand).sum(axis=<NUM_LIT:1>) * <NUM_LIT:0.5>*(<NUM_LIT:1>-np.cos(alpha))<EOL>if return_Kprefactor:<EOL><INDENT>return kint.reshape(rho.shape), Kprefactor<EOL><DEDENT>else:<EOL><INDENT>return kint.reshape(rho.shape)<EOL><DEDENT>", "docstring": "Calculates one of three electric field integrals.\n\nInternal function for calculating point spread functions. Returns\none of three electric field integrals that describe the electric\nfield near the focus of a lens; these integrals appear in Hell's psf\ncalculation.\n\nParameters\n----------\n    rho : numpy.ndarray\n        Rho in cylindrical coordinates, in units of 1/k.\n    z : numpy.ndarray\n        Z in cylindrical coordinates, in units of 1/k. `rho` and\n        `z` must be the same shape\n\n    alpha : Float, optional\n        The acceptance angle of the lens, on (0,pi/2). Default is 1.\n    zint : Float, optional\n        The distance of the len's unaberrated focal point from the\n        optical interface, in units of 1/k. Default is 100.\n    n2n1 : Float, optional\n        The ratio n2/n1 of the index mismatch between the sample\n        (index n2) and the optical train (index n1). Must be on\n        [0,inf) but should be near 1. Default is 0.95\n    get_hdet : Bool, optional\n        Set to True to get the detection portion of the psf; False\n        to get the illumination portion of the psf. Default is True\n    K : {1, 2, 3}, optional\n        Which of the 3 integrals to evaluate. Default is 1\n    Kprefactor : numpy.ndarray or None\n        This array is calculated internally and optionally returned;\n        pass it back to avoid recalculation and increase speed. Default\n        is None, i.e. calculate it internally.\n    return_Kprefactor : Bool, optional\n        Set to True to also return the Kprefactor (parameter above)\n        to speed up the calculation for the next values of K. Default\n        is False\n    npts : Int, optional\n        The number of points to use for Gauss-Legendre quadrature of\n        the integral. Default is 20, which is a good number for x,y,z\n        less than 100 or so.\n\nReturns\n-------\n    kint : numpy.ndarray\n        The integral K_i; rho.shape numpy.array\n    [, Kprefactor] : numpy.ndarray\n        The prefactor that is independent of which integral is being\n        calculated but does depend on the parameters; can be passed\n        back to the function for speed.\n\nNotes\n-----\n    npts=20 gives double precision (no difference between 20, 30, and\n    doing all the integrals with scipy.quad). The integrals are only\n    over the acceptance angle of the lens, so for moderate x,y,z they\n    don't vary too rapidly. For x,y,z, zint large compared to 100, a\n    higher npts might be necessary.", "id": "f5753:m7"}
{"signature": "def j2(x):", "body": "to_return = <NUM_LIT>/(x+<NUM_LIT>)*j1(x) - j0(x)<EOL>to_return[x==<NUM_LIT:0>] = <NUM_LIT:0><EOL>return to_return<EOL>", "docstring": "A fast j2 defined in terms of other special functions", "id": "f5753:m0"}
{"signature": "def get_Kprefactor(z, cos_theta, zint=<NUM_LIT>, n2n1=<NUM_LIT>, get_hdet=False,<EOL>**kwargs):", "body": "phase = f_theta(cos_theta, zint, z, n2n1=n2n1, **kwargs)<EOL>to_return = np.exp(-<NUM_LIT>*phase)<EOL>if not get_hdet:<EOL><INDENT>to_return *= np.outer(np.ones_like(z),np.sqrt(cos_theta))<EOL><DEDENT>return to_return<EOL>", "docstring": "Returns a prefactor in the electric field integral.\n\nThis is an internal function called by get_K. The returned prefactor\nin the integrand is independent of which integral is being called;\nit is a combination of the exp(1j*phase) and apodization.\n\nParameters\n----------\n    z : numpy.ndarray\n        The values of z (distance along optical axis) at which to\n        calculate the prefactor. Size is unrelated to the size of\n        `cos_theta`\n    cos_theta : numpy.ndarray\n        The values of cos(theta) (i.e. position on the incoming\n        focal spherical wavefront) at which to calculate the\n        prefactor. Size is unrelated to the size of `z`\n    zint : Float, optional\n        The position of the optical interface, in units of 1/k.\n        Default is 100.\n    n2n1 : Float, optional\n        The ratio of the index mismatch between the optics (n1) and\n        the sample (n2). Default is 0.95\n    get_hdet : Bool, optional\n        Set to True to calculate the detection prefactor vs the\n        illumination prefactor (i.e. False to include apodization).\n        Default is False\n\nReturns\n-------\n    numpy.ndarray\n        The prefactor, of size [`z.size`, `cos_theta.size`], sampled\n        at the values [`z`, `cos_theta`]", "id": "f5753:m6"}
{"signature": "def get_taus(cos_theta, n2n1=<NUM_LIT>):", "body": "return <NUM_LIT>/(<NUM_LIT:1>+csqrt(<NUM_LIT:1>+(n2n1**<NUM_LIT:2>-<NUM_LIT:1>)*cos_theta**-<NUM_LIT:2>))<EOL>", "docstring": "Calculates the Fresnel reflectivity for s-polarized light incident on an\ninterface with index ration n2n1.\n\nParameters\n----------\n    cos_theta : Float or numpy.ndarray\n        The _cosine_ of the angle of the incoming light. Float.\n    n2n1 : Float, optional\n        The ratio n2/n1 of the 2nd material's index n2 to the first's n1\n        Default is 0.95\n\nReturns\n-------\n    Float or numpy.ndarray\n        The reflectivity, in the same type (ndarray or Float) and\n        shape as cos_theta", "id": "f5753:m4"}
{"signature": "def f_theta(cos_theta, zint, z, n2n1=<NUM_LIT>, sph6_ab=None, **kwargs):", "body": "wvfront = (np.outer(np.ones_like(z)*zint, cos_theta) -<EOL>np.outer(zint+z, csqrt(n2n1**<NUM_LIT:2>-<NUM_LIT:1>+cos_theta**<NUM_LIT:2>)))<EOL>if (sph6_ab is not None) and (not np.isnan(sph6_ab)):<EOL><INDENT>sec2_theta = <NUM_LIT:1.0>/(cos_theta*cos_theta)<EOL>wvfront += sph6_ab * (sec2_theta-<NUM_LIT:1>)*(sec2_theta-<NUM_LIT:2>)*cos_theta<EOL><DEDENT>if wvfront.dtype == np.dtype('<STR_LIT>'):<EOL><INDENT>wvfront.imag = -np.abs(wvfront.imag)<EOL><DEDENT>return wvfront<EOL>", "docstring": "Returns the wavefront aberration for an aberrated, defocused lens.\n\nCalculates the portions of the wavefront distortion due to z, theta\nonly, for a lens with defocus and spherical aberration induced by\ncoverslip mismatch. (The rho portion can be analytically integrated\nto Bessels.)\n\nParameters\n----------\n    cos_theta : numpy.ndarray.\n        The N values of cos(theta) at which to compute f_theta.\n    zint : Float\n        The position of the lens relative to the interface.\n    z : numpy.ndarray\n        The M z-values to compute f_theta at. `z.size` is unrelated\n        to `cos_theta.size`\n    n2n1: Float, optional\n        The ratio of the index of the immersed medium to the optics.\n        Default is 0.95\n    sph6_ab : Float or None, optional\n        Set sph6_ab to a nonzero value to add residual 6th-order\n        spherical aberration that is proportional to sph6_ab. Default\n        is None (i.e. doesn't calculate).\n\nReturns\n-------\n    wvfront : numpy.ndarray\n        The aberrated wavefront, as a function of theta and z.\n        Shape is [z.size, cos_theta.size]", "id": "f5753:m3"}
{"signature": "def calculate_linescan_psf(x, y, z, normalize=False, kfki=<NUM_LIT>, zint=<NUM_LIT>,<EOL>polar_angle=<NUM_LIT:0.>, wrap=True, **kwargs):", "body": "<EOL>if wrap:<EOL><INDENT>xpts = vec_to_halfvec(x)<EOL>ypts = vec_to_halfvec(y)<EOL>x3, y3, z3 = np.meshgrid(xpts, ypts, z, indexing='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>x3,y3,z3 = np.meshgrid(x, y, z, indexing='<STR_LIT>')<EOL><DEDENT>rho3 = np.sqrt(x3*x3 + y3*y3)<EOL>if wrap:<EOL><INDENT>y2,z2 = np.meshgrid(ypts, z, indexing='<STR_LIT>')<EOL>hilm0 = calculate_linescan_ilm_psf(y2, z2, zint=zint,<EOL>polar_angle=polar_angle, **kwargs)<EOL>if ypts[<NUM_LIT:0>] == <NUM_LIT:0>:<EOL><INDENT>hilm = np.append(hilm0[-<NUM_LIT:1>:<NUM_LIT:0>:-<NUM_LIT:1>], hilm0, axis=<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>hilm = np.append(hilm0[::-<NUM_LIT:1>], hilm0, axis=<NUM_LIT:0>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>y2,z2 = np.meshgrid(y, z, indexing='<STR_LIT>')<EOL>hilm = calculate_linescan_ilm_psf(y2, z2, zint=zint,<EOL>polar_angle=polar_angle, **kwargs)<EOL><DEDENT>if wrap:<EOL><INDENT>func = lambda *args: get_hsym_asym(rho3*kfki, z3*kfki, zint=kfki*zint,<EOL>get_hdet=True, **kwargs)[<NUM_LIT:0>]<EOL>hdet = wrap_and_calc_psf(xpts, ypts, z, func)<EOL><DEDENT>else:<EOL><INDENT>hdet, toss = get_hsym_asym(rho3*kfki, z3*kfki, zint=kfki*zint,<EOL>get_hdet=True, **kwargs)<EOL><DEDENT>if normalize:<EOL><INDENT>hilm /= hilm.sum()<EOL>hdet /= hdet.sum()<EOL><DEDENT>for a in range(x.size):<EOL><INDENT>hdet[a] *= hilm<EOL><DEDENT>return hdet if normalize else hdet / hdet.sum()<EOL>", "docstring": "Calculates the point spread function of a line-scanning confocal.\n\nMake x,y,z  __1D__ numpy.arrays, with x the direction along the\nscan line. (to make the calculation faster since I dont' need the line\nilm for each x).\n\nParameters\n----------\n    x : numpy.ndarray\n        _One_dimensional_ array of the x grid points (along the line\n        illumination) at which to evaluate the psf. In units of\n        1/k_incoming.\n    y : numpy.ndarray\n        _One_dimensional_ array of the y grid points (in plane,\n        perpendicular to the line illumination) at which to evaluate\n        the psf. In units of 1/k_incoming.\n    z : numpy.ndarray\n        _One_dimensional_ array of the z grid points (along the\n        optical axis) at which to evaluate the psf. In units of\n        1/k_incoming.\n    normalize : Bool, optional\n        Set to True to include the effects of PSF normalization on\n        the image intensity. Default is False.\n    kfki : Float, optional\n        The ratio of the final light's wavevector to the incoming.\n        Default is 0.889\n    zint : Float, optional\n        The position of the optical interface, in units of 1/k_incoming\n        Default is 100.\n    wrap : Bool, optional\n        If True, wraps the psf calculation for speed, assuming that\n        the input x, y are regularly-spaced points. If x,y are not\n        regularly spaced then `wrap` must be set to False. Default is True.\n    polar_angle : Float, optional\n        The polarization angle of the light (radians) with respect to\n        the line direction (x). Default is 0.\n\nOther Parameters\n----------------\n    alpha : Float\n        The opening angle of the lens. Default is 1.\n    n2n1 : Float\n        The ratio of the index in the 2nd medium to that in the first.\n        Default is 0.95\n\nReturns\n-------\n    numpy.ndarray\n        A 3D- numpy.array of the point-spread function. Indexing is\n        psf[x,y,z]; shape is [x.size, y,size, z.size]", "id": "f5753:m14"}
{"signature": "def calc_pts_lag(npts=<NUM_LIT:20>):", "body": "scl = { <NUM_LIT:15>:<NUM_LIT>,<EOL><NUM_LIT:20>:<NUM_LIT>,<EOL><NUM_LIT>:<NUM_LIT>}[npts]<EOL>pts0, wts0 = np.polynomial.laguerre.laggauss(npts)<EOL>pts = np.sinh(pts0*scl)<EOL>wts = scl*wts0*np.cosh(pts0*scl)*np.exp(pts0)<EOL>return pts, wts<EOL>", "docstring": "Returns Gauss-Laguerre quadrature points rescaled for line scan integration\n\nParameters\n----------\n    npts : {15, 20, 25}, optional\n        The number of points to\n\nNotes\n-----\n    The scale is set internally as the best rescaling for a line scan\n    integral; it was checked numerically for the allowed npts.\n    Acceptable pts/scls/approximate line integral scan error:\n    (pts,   scl  )      :         ERR\n    ------------------------------------\n    (15, 0.072144)      :       0.002193\n    (20, 0.051532)      :       0.001498\n    (25, 0.043266)      :       0.001209\n\n    The previous HG(20) error was ~0.13ish", "id": "f5753:m2"}
{"signature": "def wrap_and_calc_psf(xpts, ypts, zpts, func, **kwargs):", "body": "<EOL>for t in [xpts,ypts,zpts]:<EOL><INDENT>if len(t.shape) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>dx = <NUM_LIT:1> if xpts[<NUM_LIT:0>]==<NUM_LIT:0> else <NUM_LIT:0><EOL>dy = <NUM_LIT:1> if ypts[<NUM_LIT:0>]==<NUM_LIT:0> else <NUM_LIT:0><EOL>xg,yg,zg = np.meshgrid(xpts,ypts,zpts, indexing='<STR_LIT>')<EOL>xs, ys, zs = [ pts.size for pts in [xpts,ypts,zpts] ]<EOL>to_return = np.zeros([<NUM_LIT:2>*xs-dx, <NUM_LIT:2>*ys-dy, zs])<EOL>up_corner_psf = func(xg,yg,zg, **kwargs)<EOL>to_return[xs-dx:,ys-dy:,:] = up_corner_psf.copy()                     <EOL>if dx == <NUM_LIT:0>:<EOL><INDENT>to_return[:xs-dx,ys-dy:,:] = up_corner_psf[::-<NUM_LIT:1>,:,:].copy()       <EOL><DEDENT>else:<EOL><INDENT>to_return[:xs-dx,ys-dy:,:] = up_corner_psf[-<NUM_LIT:1>:<NUM_LIT:0>:-<NUM_LIT:1>,:,:].copy()    <EOL><DEDENT>if dy == <NUM_LIT:0>:<EOL><INDENT>to_return[xs-dx:,:ys-dy,:] = up_corner_psf[:,::-<NUM_LIT:1>,:].copy()       <EOL><DEDENT>else:<EOL><INDENT>to_return[xs-dx:,:ys-dy,:] = up_corner_psf[:,-<NUM_LIT:1>:<NUM_LIT:0>:-<NUM_LIT:1>,:].copy()    <EOL><DEDENT>if (dx == <NUM_LIT:0>) and (dy == <NUM_LIT:0>):<EOL><INDENT>to_return[:xs-dx,:ys-dy,:] = up_corner_psf[::-<NUM_LIT:1>,::-<NUM_LIT:1>,:].copy()    <EOL><DEDENT>elif (dx == <NUM_LIT:0>) and (dy != <NUM_LIT:0>):<EOL><INDENT>to_return[:xs-dx,:ys-dy,:] = up_corner_psf[::-<NUM_LIT:1>,-<NUM_LIT:1>:<NUM_LIT:0>:-<NUM_LIT:1>,:].copy() <EOL><DEDENT>elif (dy == <NUM_LIT:0>) and (dx != <NUM_LIT:0>):<EOL><INDENT>to_return[:xs-dx,:ys-dy,:] = up_corner_psf[-<NUM_LIT:1>:<NUM_LIT:0>:-<NUM_LIT:1>,::-<NUM_LIT:1>,:].copy() <EOL><DEDENT>else: <EOL><INDENT>to_return[:xs-dx,:ys-dy,:] = up_corner_psf[-<NUM_LIT:1>:<NUM_LIT:0>:-<NUM_LIT:1>,-<NUM_LIT:1>:<NUM_LIT:0>:-<NUM_LIT:1>,:].copy()<EOL><DEDENT>return to_return<EOL>", "docstring": "Wraps a point-spread function in x and y.\n\nSpeeds up psf calculations by a factor of 4 for free / some broadcasting\nby exploiting the x->-x, y->-y symmetry of a psf function. Pass x and y\nas the positive (say) values of the coordinates at which to evaluate func,\nand it will return the function sampled at [x[::-1]] + x. Note it is not\nwrapped in z.\n\nParameters\n----------\n    xpts : numpy.ndarray\n        1D N-element numpy.array of the x-points to evaluate func at.\n    ypts : numpy.ndarray\n        y-points to evaluate func at.\n    zpts : numpy.ndarray\n        z-points to evaluate func at.\n    func : function\n        The function to evaluate and wrap around. Syntax must be\n        func(x,y,z, **kwargs)\n    **kwargs : Any parameters passed to the function.\n\nOutputs\n-------\n    to_return : numpy.ndarray\n        The wrapped and calculated psf, of shape\n        [2*x.size - x0, 2*y.size - y0, z.size], where x0=1 if x[0]=0, etc.\n\nNotes\n-----\nThe coordinates should be something like numpy.arange(start, stop, diff),\nwith start near 0. If x[0]==0, all of x is calcualted but only x[1:]\nis wrapped (i.e. it works whether or not x[0]=0).\n\nThis doesn't work directly for a linescan psf because the illumination\nportion is not like a grid. However, the illumination and detection\nare already combined with wrap_and_calc in calculate_linescan_psf etc.", "id": "f5753:m16"}
{"signature": "@memoize()<EOL><INDENT>def _skew(self, x, z, d=<NUM_LIT:0>):<DEDENT>", "body": "<EOL>kval = (np.tanh(self._poly(z, self._kurtosis_coeffs(d)))+<NUM_LIT:1>)/<NUM_LIT><EOL>bdpoly = np.array([<EOL>-<NUM_LIT>,  <NUM_LIT>, -<NUM_LIT>,<EOL>-<NUM_LIT>, <NUM_LIT>, <NUM_LIT><EOL>])<EOL>top = np.polyval(bdpoly, kval)<EOL>skew = self._poly(z, self._skew_coeffs(d))<EOL>skewval = top*(np.tanh(skew) + <NUM_LIT:1>) - top<EOL>return skewval*(<NUM_LIT:3>*x - x**<NUM_LIT:3>)<EOL>", "docstring": "returns the kurtosis parameter for direction d, d=0 is rho, d=1 is z", "id": "f5755:c8:m7"}
{"signature": "@memoize()<EOL><INDENT>def _kurtosis(self, x, z, d=<NUM_LIT:0>):<DEDENT>", "body": "val = self._poly(z, self._kurtosis_coeffs(d))<EOL>return (np.tanh(val)+<NUM_LIT:1>)/<NUM_LIT>*(<NUM_LIT:3> - <NUM_LIT:6>*x**<NUM_LIT:2> + x**<NUM_LIT:4>)<EOL>", "docstring": "returns the kurtosis parameter for direction d, d=0 is rho, d=1 is z", "id": "f5755:c8:m8"}
{"signature": "def randomize_parameters(self, ptp=<NUM_LIT>, fourier=False, vmin=None, vmax=None):", "body": "if vmin is not None and vmax is not None:<EOL><INDENT>ptp = vmax - vmin<EOL><DEDENT>elif vmax is not None and vmin is None:<EOL><INDENT>vmin = vmax - ptp<EOL><DEDENT>elif vmin is not None and vmax is None:<EOL><INDENT>vmax = vmin + ptp<EOL><DEDENT>else:<EOL><INDENT>vmax = <NUM_LIT:1.0><EOL>vmin = vmax - ptp<EOL><DEDENT>self.set_values(self.category+'<STR_LIT>', <NUM_LIT:1.0>)<EOL>self.set_values(self.category+'<STR_LIT>', <NUM_LIT:0.0>)<EOL>for k, v in iteritems(self.poly_params):<EOL><INDENT>norm = (self.zorder + <NUM_LIT:1.0>)*<NUM_LIT:2><EOL>self.set_values(k, ptp*(np.random.rand() - <NUM_LIT:0.5>) / norm)<EOL><DEDENT>for i, p in enumerate(self.barnes_params):<EOL><INDENT>N = len(p)<EOL>if fourier:<EOL><INDENT>t = ((np.random.rand(N)-<NUM_LIT:0.5>) + <NUM_LIT>*(np.random.rand(N)-<NUM_LIT:0.5>))/(np.arange(N)+<NUM_LIT:1>)<EOL>q = np.real(np.fft.ifftn(t)) / (i+<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>t = ptp*np.sqrt(N)*(np.random.rand(N)-<NUM_LIT:0.5>)<EOL>q = np.cumsum(t) / (i+<NUM_LIT:1>)<EOL><DEDENT>q = ptp * q / q.ptp() / len(self.barnes_params)<EOL>q -= q.mean()<EOL>self.set_values(p, q)<EOL><DEDENT>self._norm_stat = [ptp, vmin]<EOL>if self.shape:<EOL><INDENT>self.initialize()<EOL><DEDENT>if self._parent:<EOL><INDENT>param = self.category+'<STR_LIT>'<EOL>self.trigger_update(param, self.get_values(param))<EOL><DEDENT>", "docstring": "Create random parameters for this ILM that mimic experiments\nas closely as possible without real assumptions.", "id": "f5756:c6:m7"}
{"signature": "def __init__(self, npts=(<NUM_LIT>,<NUM_LIT:20>), zorder=<NUM_LIT:7>, op='<STR_LIT:*>', barnes_dist=<NUM_LIT>,<EOL>barnes_clip_size=<NUM_LIT:3>, category='<STR_LIT>', shape=None,<EOL>float_precision=np.float64):", "body": "super(BarnesXYLegPolyZ, self).__init__(npts=npts, zorder=zorder,<EOL>op=op, barnes_dist=barnes_dist, barnes_clip_size=barnes_clip_size,<EOL>local_updates=True, category=category, shape=shape,<EOL>float_precision=float_precision)<EOL>", "docstring": "A Barnes interpolant. This one is of the form\n\n.. math::\n    I = \\\\left[1 + B(x,y) \\\\right]  (1 + z q(z)) + c\n\nwhere B is a Barnes interpolants and q is a polynomial strictly in z.\nAdditionally.... operation multiply or x for poly?.\nAlways uses local updates, since evaluating a 2D barnes for an entire\nimage can be slow.\n\nParameters\n----------\nshape : iterable\n    size of the field in pixels, needs to be padded shape\n\nnpts : 2-element tuple of ints, optional\n    Number of control points used for the Barnes interpolant\n    in x & y. Default is (40,20)\n\nzorder : integer\n    Number of orders for the z-polynomial.\n\nop : string\n    The operation to perform between Barnes and LegPoly, '*' or '+'.\n\nbarnes_dist : float\n    Fractional distance to use for the barnes interpolator\n\nlocal_updates : boolean\n    Whether to perform local updates on the ILM\n\nfloat_precision : numpy float datatype\n    One of numpy.float16, numpy.float32, numpy.float64; precision\n    for precomputed arrays. Default is np.float64; make it 16 or 32\n    to save memory.", "id": "f5756:c7:m0"}
{"signature": "def __init__(self, npts=(<NUM_LIT>,<NUM_LIT:20>), zorder=<NUM_LIT:7>, op='<STR_LIT:*>', barnes_dist=<NUM_LIT>,<EOL>barnes_clip_size=<NUM_LIT:3>, local_updates=True, category='<STR_LIT>', shape=None,<EOL>float_precision=np.float64, donorm=True):", "body": "self.donorm = donorm<EOL>super(BarnesStreakLegPoly2P1D, self).__init__(npts=npts, zorder=zorder,<EOL>op=op, barnes_dist=barnes_dist, barnes_clip_size=barnes_clip_size,<EOL>local_updates=local_updates, category=category, shape=shape,<EOL>float_precision=float_precision)<EOL>", "docstring": "A Barnes interpolant. This one is of the form\n\n.. math::\n    I = \\\\left[1 + \\\\left(\\\\sum b_k(x) (o) L_k(y)\\\\right)\\\\right]  (1 + z q(z)) + c\n\nwhere b_k are independent barnes interpolants and L_k are legendre\npolynomials. q is a polynomial strictly in z. Additionally, the\noperation (o) is settable.\n\nParameters\n----------\nshape : iterable\n    size of the field in pixels, needs to be padded shape\n\nnpts : tuple of ints, optional\n    Number of control points used for the Barnes interpolant b_k\n    in the x-y sum. Default is (40,20)\n\nzorder : integer\n    Number of orders for the z-polynomial.\n\nop : string\n    The operation to perform between Barnes and LegPoly, '*' or '+'.\n\nbarnes_dist : float\n    Fractional distance to use for the barnes interpolator\n\nlocal_updates : boolean\n    Whether to perform local updates on the ILM\n\nfloat_precision : numpy float datatype\n    One of numpy.float16, numpy.float32, numpy.float64; precision\n    for precomputed arrays. Default is np.float64; make it 16 or 32\n    to save memory.\n\ndonorm : Bool\n    Whether or not to normalize the Barnes interpolation\n    (compatibility patch). Use True, i.e. normalize the Barnes\n    interpolant. Old version is False. Default is True.", "id": "f5756:c6:m0"}
{"signature": "def _barnes_val(self):", "body": "return self._barnes(self.b_out)<EOL>", "docstring": "Returns the raveled values of the barnes on the field", "id": "f5756:c7:m3"}
{"signature": "def _kpad(self, field, finalshape, zpad=False, norm=True):", "body": "currshape = np.array(field.shape)<EOL>if any(finalshape < currshape):<EOL><INDENT>raise IndexError(\"<STR_LIT>\")<EOL><DEDENT>d = finalshape - currshape<EOL>o = d % <NUM_LIT:2><EOL>d = np.floor_divide(d, <NUM_LIT:2>)<EOL>if not zpad:<EOL><INDENT>o[<NUM_LIT:0>] = <NUM_LIT:0><EOL><DEDENT>axes = None<EOL>pad = tuple((d[i]+o[i],d[i]) for i in [<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:2>])<EOL>rpsf = np.pad(field, pad, mode='<STR_LIT>', constant_values=<NUM_LIT:0>)<EOL>rpsf = np.fft.ifftshift(rpsf, axes=axes)<EOL>kpsf = fft.rfftn(rpsf, **fftkwargs)<EOL>if norm:<EOL><INDENT>kpsf /= kpsf[<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:0>]<EOL><DEDENT>return kpsf<EOL>", "docstring": "fftshift and pad the field with zeros until it has size finalshape.\nif zpad is off, then no padding is put on the z direction. returns\nthe fourier transform of the field", "id": "f5757:c0:m16"}
{"signature": "def pack_args(self):", "body": "mapper = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>bads = [self.zscale, '<STR_LIT>']<EOL>d = {}<EOL>for k,v in iteritems(mapper):<EOL><INDENT>if k in self.param_dict:<EOL><INDENT>d[v] = self.param_dict[k]<EOL><DEDENT><DEDENT>d.update({<EOL>'<STR_LIT>': self.polar_angle,<EOL>'<STR_LIT>': self.normalize,<EOL>'<STR_LIT>':self.use_J1<EOL>})<EOL>if self.polychromatic:<EOL><INDENT>d.update({'<STR_LIT>': self.nkpts})<EOL>d.update({'<STR_LIT>': self.k_dist})<EOL><DEDENT>if self.do_pinhole:<EOL><INDENT>d.update({'<STR_LIT>': self.num_line_pts})<EOL><DEDENT>return d<EOL>", "docstring": "Pack the parameters into the form necessary for the integration\nroutines above.  For example, packs for calculate_linescan_psf", "id": "f5757:c2:m1"}
{"signature": "def characterize_psf(self):", "body": "l,u = max(self.zrange[<NUM_LIT:0>], self.param_dict['<STR_LIT>']), self.zrange[<NUM_LIT:1>]<EOL>size_l, drift_l = self.measure_size_drift(l, size=self.support)<EOL>size_u, drift_u = self.measure_size_drift(u, size=self.support)<EOL>self.drift_poly = np.polyfit([l, u], [drift_l, drift_u], <NUM_LIT:1>)<EOL>", "docstring": "Get support size and drift polynomial for current set of params", "id": "f5757:c4:m1"}
{"signature": "def measure_size_drift(self, z, size=<NUM_LIT>, zoffset=<NUM_LIT:0.>):", "body": "drift = <NUM_LIT:0.0><EOL>for i in range(self.measurement_iterations):<EOL><INDENT>psf, vec = self.psf_slice(z, size=size, zoffset=zoffset+drift)<EOL>psf = psf / psf.sum()<EOL>drift += moment(psf, vec[<NUM_LIT:0>], order=<NUM_LIT:1>)<EOL>psize = [moment(psf, j, order=<NUM_LIT:2>) for j in vec]<EOL><DEDENT>return np.array(psize), drift<EOL>", "docstring": "Returns the 'size' of the psf in each direction a particular z (px)", "id": "f5757:c0:m10"}
{"signature": "def __init__(self, shape=None, zrange=None, laser_wavelength=<NUM_LIT>,<EOL>zslab=<NUM_LIT:0.>, zscale=<NUM_LIT:1.0>, kfki=<NUM_LIT>, n2n1=<NUM_LIT>/<NUM_LIT>, alpha=<NUM_LIT>,<EOL>polar_angle=<NUM_LIT:0.>, pxsize=<NUM_LIT>, support_factor=<NUM_LIT:2>, normalize=False,<EOL>sigkf=<NUM_LIT:0.0>, nkpts=None, cutoffval=None, measurement_iterations=None,<EOL>k_dist='<STR_LIT>', use_J1=True, sph6_ab=None, global_zscale=False,<EOL>cutbyval=False, cutfallrate=<NUM_LIT>, cutedgeval=<NUM_LIT>,<EOL>pinhole_width=None, do_pinhole=False, *args, **kwargs):", "body": "self.pxsize = pxsize<EOL>self.polar_angle = polar_angle<EOL>self.support_factor = support_factor<EOL>self.normalize = normalize<EOL>self.measurement_iterations = measurement_iterations or <NUM_LIT:11><EOL>self.global_zscale = global_zscale<EOL>self.polychromatic = False<EOL>self.sigkf = sigkf<EOL>self.nkpts = nkpts<EOL>self.cutoffval = cutoffval<EOL>self.cutbyval = cutbyval<EOL>self.cutfallrate = cutfallrate<EOL>self.cutedgeval = cutedgeval<EOL>self.k_dist = k_dist<EOL>self.use_J1 = use_J1<EOL>self.do_pinhole = do_pinhole<EOL>if self.sigkf is not None:<EOL><INDENT>self.nkpts = self.nkpts or <NUM_LIT:3><EOL>self.polychromatic = True<EOL><DEDENT>elif self.nkpts is not None:<EOL><INDENT>self.sigkf = <NUM_LIT:0.0><EOL>self.polychromatic = True<EOL><DEDENT>else:<EOL><INDENT>self.sigkf = sigkf = <NUM_LIT:0.0><EOL>self.polychromatic = False<EOL><DEDENT>if (sph6_ab is not None) and (not np.isnan(sph6_ab)):<EOL><INDENT>self.use_sph6_ab = True <EOL><DEDENT>else:<EOL><INDENT>self.use_sph6_ab = False<EOL><DEDENT>if shape and zrange is None:<EOL><INDENT>zrange = (<NUM_LIT:0>, shape.shape[<NUM_LIT:0>])<EOL><DEDENT>self.zrange = zrange<EOL>params = [<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'<EOL>]<EOL>values = np.array([<EOL>kfki,   zslab,   zscale,   alpha,   n2n1,   laser_wavelength,<EOL>sigkf,    sph6_ab<EOL>])<EOL>if not self.polychromatic:<EOL><INDENT>ind = params.index('<STR_LIT>')<EOL>params.pop(ind)<EOL>values = np.delete(values, ind)<EOL><DEDENT>if not self.use_sph6_ab:<EOL><INDENT>ind = params.index('<STR_LIT>')<EOL>params.pop(ind)<EOL>values = np.delete(values, ind)<EOL><DEDENT>for i in range(len(params)):<EOL><INDENT>if params[i] is '<STR_LIT>' and self.global_zscale:<EOL><INDENT>continue<EOL><DEDENT>params[i] = '<STR_LIT>' + params[i]<EOL><DEDENT>super(ExactPSF, self).__init__(<EOL>*args, params=params, values=values, shape=shape, **kwargs<EOL>)<EOL>", "docstring": "Superclass for all the exact PSFs, i.e. any PSF that is based on\nphysical properties of the imaging system such as the laser\nwavelength.\n\nThis PSF functions by calculating the local PSF for every z layer\nin the image, and convolving each layer independently (numerically\nthe exact model of image formation).\n\nParameters\n----------\nshape : tuple\n    Shape of the image in (z,y,x) pixel numbers (to be deprecated)\n\nzrange : tuple\n    range of z pixels over which we should calculate the psf, good pixels\n    being zrange[0] <= z <= zrange[1]. currently must be set to the interior\n    of the image, so [state.pad, state.image.shape[0] - state.pad]\n\nlaser_wavelength : float\n    wavelength of light in um of the incoming laser light\n\nzslab : float\n    Pixel position of the optical interface where 0 is the edge of the\n    image in the z direction\n\nzscale : float\n    Scaling of the z pixel size, so that each pixel is located at\n    zscale*(z - zint), such that the interface does not move with zscale\n\nkfki : float\n    Ratio of outgoing to incoming light wavevectors, 2\\pi/\\lambda\n\nn2n1 : float\n    Ratio of the index mismatch across the optical interface. For typical\n    glass with glycerol/water 80/20 mixture this is 1.4/1.518\n\nalpha : float\n    Aperture of the lens in radians, set by arcsin(n2n1)?\n\npolar_angle : float\n    the angle of the light polarization with respect to the scanning axis\n\npxsize : float\n    the size of a xy pixel in um, defaults to cohen group size 0.125 um\n\nsupport_factor : integer\n    size of the support\n\nnormalize : boolean\n    if True, use the normalization as calculated by the PSF instead of\n    unit normalization\n\nsigkf : float\n    Width of wavelengths to use in the polychromatic psf, None is\n    monochromatic. Values for kfki = kfki +- sigkf, unitless\n\nnkpts : integer\n    number of integration points to use for the polychromatic psf\n\ncutoffval : float\n    Percentage of peak psf value to cutoff using a 3-axis\n    exp(-(r-r0)**4) function where r0 is determined by cutoffval. A\n    good value for this is the bit depth of the camera, or possibly the\n    SNR, so 1/2**8 or 1/50.\n\nmeasurement_iterations : int\n    number of interations used when trying to find the center of mass\n    of the psf in a certain slice\n\nk_dist : str\n    Eithe ['gaussian', 'gamma'] which control the wavevector\n    distribution for the polychromatic detection psf. Default\n    is Gaussian.\n\nuse_J1 : boolean\n    Which hdet confocal model to use. Set to True to include the\n    J1 term corresponding to a large-NA focusing lens, False to\n    exclude it. Default is True\n\ncutbyval : boolean\n    If True, cuts the PSF based on the actual value instead of the\n    position associated with the nearest value.\n\ncutfallrate : float\n    The relative value of the cutoffval over which to damp the\n    remaining values of the psf. 0.3 seems a good fit now.\n\ncutedgeval : float\n    The value with which to determine the edge of the psf, typically\n    taken around floating point, 1e-12\n\npinhole_width : Float\n    The width of the line illumination, in 1/k units. Default is 1.0.\n\ndo_pinhole : Bool\n    Whether or not to include pinhole line width in the sampling.\n    Default is False.", "id": "f5757:c0:m0"}
{"signature": "def drift(self, z):", "body": "return np.polyval(self.drift_poly, z)<EOL>", "docstring": "Give the pixel offset at a given z value for the current parameters", "id": "f5757:c0:m9"}
{"signature": "def __init__(self, shape=None, zrange=None, laser_wavelength=<NUM_LIT>, zslab=<NUM_LIT:0.>,<EOL>zscale=<NUM_LIT:1.0>, kfki=<NUM_LIT>, n2n1=<NUM_LIT>/<NUM_LIT>, alpha=<NUM_LIT>, polar_angle=<NUM_LIT:0.>,<EOL>pxsize=<NUM_LIT>, support_factor=<NUM_LIT:2>, normalize=False, sigkf=<NUM_LIT:0.0>,<EOL>nkpts=None, cutoffval=None, measurement_iterations=None,<EOL>k_dist='<STR_LIT>', use_J1=True, sph6_ab=None, global_zscale=False,<EOL>cutbyval=False, cutfallrate=<NUM_LIT>, cutedgeval=<NUM_LIT>,<EOL>*args, **kwargs):", "body": "super(ExactPinholeConfocalPSF, self).__init__(shape=shape,<EOL>zrange=zrange, laser_wavelength=laser_wavelength, zslab=zslab,<EOL>zscale=zscale, kfki=kfki, n2n1=n2n1, alpha=alpha, pxsize=<EOL>pxsize, polar_angle=polar_angle, support_factor=support_factor,<EOL>normalize=normalize, sigkf=sigkf, nkpts=nkpts, cutoffval=<EOL>cutoffval, measurement_iterations= measurement_iterations,<EOL>k_dist=k_dist, use_J1=use_J1, sph6_ab=sph6_ab, global_zscale=<EOL>global_zscale, cutbyval=cutbyval, cutfallrate=cutfallrate,<EOL>cutedgeval=cutedgeval, *args, **kwargs)<EOL>", "docstring": "PSF for a pinhole confocal microscopes that can be used with the\nperi framework.  Calculates the spatially varying point spread\nfunction for confocal microscopes and allows them to be applied to\nimages as a convolution.\n\nThis PSF assumes that the z extent is large compared to the image size\nand so calculates the local PSF for every z layer in the image.\n\nParameters\n----------\nshape : tuple\n    Shape of the image in (z,y,x) pixel numbers (to be deprecated)\n\nzrange : tuple\n    range of z pixels over which we should calculate the psf, good pixels\n    being zrange[0] <= z <= zrange[1]. currently must be set to the interior\n    of the image, so [state.pad, state.image.shape[0] - state.pad]\n\nlaser_wavelength : float\n    wavelength of light in um of the incoming laser light\n\nzslab : float\n    Pixel position of the optical interface where 0 is the edge of the\n    image in the z direction\n\nzscale : float\n    Scaling of the z pixel size, so that each pixel is located at\n    zscale*(z - zint), such that the interface does not move with zscale\n\nkfki : float\n    Ratio of outgoing to incoming light wavevectors, 2\\pi/\\lambda\n\nn2n1 : float\n    Ratio of the index mismatch across the optical interface. For typical\n    glass with glycerol/water 80/20 mixture this is 1.4/1.518\n\nalpha : float\n    Aperture of the lens in radians, set by arcsin(n2n1)?\n\npolar_angle : float\n    the angle of the light polarization with respect to the scanning axis\n\npxsize : float\n    the size of a xy pixel in um, defaults to cohen group size 0.125 um\n\nsupport_factor : integer\n    size of the support\n\nnormalize : boolean\n    if True, use the normalization as calculated by the PSF instead of\n    unit normalization\n\nsigkf : float\n    Width of wavelengths to use in the polychromatic psf, None is\n    monochromatic. Values for kfki = kfki +- sigkf, unitless\n\nnkpts : integer\n    number of integration points to use for the polychromatic psf\n\ncutoffval : float\n    Percentage of peak psf value to cutoff using a 3-axis\n    exp(-(r-r0)**4) function where r0 is determined by cutoffval. A\n    good value for this is the bit depth of the camera, or possibly the\n    SNR, so 1/2**8 or 1/50.\n\nmeasurement_iterations : int\n    number of interations used when trying to find the center of mass\n    of the psf in a certain slice\n\nk_dist : str\n    Eithe ['gaussian', 'gamma'] which control the wavevector\n    distribution for the polychromatic detection psf. Default\n    is Gaussian.\n\nuse_J1 : boolean\n    Which hdet confocal model to use. Set to True to include the\n    J1 term corresponding to a large-NA focusing lens, False to\n    exclude it. Default is True\n\ncutbyval : boolean\n    If True, cuts the PSF based on the actual value instead of the\n    position associated with the nearest value.\n\ncutfallrate : float\n    The relative value of the cutoffval over which to damp the\n    remaining values of the psf. 0.3 seems a good fit now.\n\ncutedgeval : float\n    The value with which to determine the edge of the psf, typically\n    taken around floating point, 1e-12\n\nNotes:\n    a = ExactLineScanConfocalPSF((64,)*3)\n    psf, (z,y,x) = a.psf_slice(1., size=51)\n    imshow((psf*r**4)[:,:,25], cmap='bone')", "id": "f5757:c2:m0"}
{"signature": "def pack_args(self):", "body": "mapper = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>bads = [self.zscale, '<STR_LIT>']<EOL>d = {}<EOL>for k,v in iteritems(mapper):<EOL><INDENT>if k in self.param_dict:<EOL><INDENT>d[v] = self.param_dict[k]<EOL><DEDENT><DEDENT>d.update({<EOL>'<STR_LIT>': self.polar_angle,<EOL>'<STR_LIT>': self.normalize,<EOL>'<STR_LIT>':self.use_J1<EOL>})<EOL>if self.polychromatic:<EOL><INDENT>d.update({'<STR_LIT>': self.nkpts})<EOL>d.update({'<STR_LIT>': self.k_dist})<EOL><DEDENT>if self.do_pinhole:<EOL><INDENT>d.update({'<STR_LIT>': self.num_line_pts})<EOL><DEDENT>d.update({'<STR_LIT>': True})<EOL>return d<EOL>", "docstring": "Pack the parameters into the form necessary for the integration\nroutines above.  For example, packs for calculate_linescan_psf", "id": "f5757:c1:m1"}
{"signature": "def set_shape(self, shape, inner):", "body": "for c in self.comps:<EOL><INDENT>c.set_shape(shape, inner)<EOL><DEDENT>", "docstring": "Set the shape for all components", "id": "f5758:c4:m16"}
{"signature": "def initargs(self):", "body": "return {<EOL>'<STR_LIT>': self.comps,<EOL>'<STR_LIT>': self.category,<EOL>'<STR_LIT>': self.field_reduce_func,<EOL>}<EOL>", "docstring": "Return arguments that are passed to init to setup the class again", "id": "f5758:c4:m1"}
{"signature": "def initialize(self):", "body": "pass<EOL>", "docstring": "Begin anew and initialize the component", "id": "f5758:c2:m1"}
{"signature": "def initargs(self):", "body": "return {\"<STR_LIT>\": self.params, \"<STR_LIT>\": self.values, \"<STR_LIT>\": self.ordered}<EOL>", "docstring": "Pickling helper method which returns a dictionary of function\nparameters which get passed to pickle via `__getinitargs__\n<https://docs.python.org/2/library/pickle.html#object.__getinitargs__>`_\n\nReturns\n-------\narg_dict : dictionary\n    ``**kwargs`` to be passed to the __init__ func after unpickling", "id": "f5758:c1:m7"}
{"signature": "def sync_params(self):", "body": "def _normalize(comps, param):<EOL><INDENT>vals = [c.get_values(param) for c in comps]<EOL>diff = any([vals[i] != vals[i+<NUM_LIT:1>] for i in range(len(vals)-<NUM_LIT:1>)])<EOL>if diff:<EOL><INDENT>for c in comps:<EOL><INDENT>c.set_values(param, vals[<NUM_LIT:0>])<EOL><DEDENT><DEDENT><DEDENT>for param, comps in iteritems(self.lmap):<EOL><INDENT>if isinstance(comps, list) and len(comps) > <NUM_LIT:1>:<EOL><INDENT>_normalize(comps, param)<EOL><DEDENT><DEDENT>", "docstring": "Ensure that shared parameters are the same value everywhere", "id": "f5758:c4:m17"}
{"signature": "def set_tile(self, tile):", "body": "for c in self.comps:<EOL><INDENT>c.set_tile(tile)<EOL><DEDENT>", "docstring": "Set the current working tile for components", "id": "f5758:c4:m15"}
{"signature": "def split_params(self, params, values=None):", "body": "pc, vc = [], []<EOL>returnvalues = values is not None<EOL>if values is None:<EOL><INDENT>values = [<NUM_LIT:0>]*len(util.listify(params))<EOL><DEDENT>for c in self.comps:<EOL><INDENT>tp, tv = [], []<EOL>for p,v in zip(util.listify(params), util.listify(values)):<EOL><INDENT>if not p in self.lmap:<EOL><INDENT>raise NotAParameterError(\"<STR_LIT>\" % (p, self))<EOL><DEDENT>if c in self.pmap[p]:<EOL><INDENT>tp.append(p)<EOL>tv.append(v)<EOL><DEDENT><DEDENT>pc.append(tp)<EOL>vc.append(tv)<EOL><DEDENT>if returnvalues:<EOL><INDENT>return pc, vc<EOL><DEDENT>return pc<EOL>", "docstring": "Split params, values into groups that correspond to the ordering in\nself.comps. For example, given a sphere collection and slab::\n\n    [\n        (spheres) [pos rad etc] [pos val, rad val, etc]\n        (slab) [slab params] [slab vals]\n    ]", "id": "f5758:c4:m4"}
{"signature": "def get_update_tile(self, params, values):", "body": "pass<EOL>", "docstring": "This method returns a :class:`~peri.util.Tile` object defining the\nregion of a field that has to be modified by the update of (params,\nvalues). For example, if this Component is the point-spread-function,\nit might return a tile of entire image since every parameter affects\nthe entire image::\n\n    return self.shape\n\n\n\nParameters\n-----------\nparams : single param, list of params\n    A single parameter or list of parameters to be updated\n\nvalues : single value, list of values\n    The values corresponding to the params\n\nReturns\n-------\ntile : :class:`~peri.util.Tile`\n    A tile corresponding to the image region", "id": "f5758:c2:m2"}
{"signature": "def register(self, obj):", "body": "self._parent = obj<EOL>", "docstring": "Registery a parent object so that communication maybe happen upwards", "id": "f5758:c2:m10"}
{"signature": "def get(self):", "body": "fields = [c.get() for c in self.comps]<EOL>return self.field_reduce_func(fields)<EOL>", "docstring": "Combine the fields from all components", "id": "f5758:c4:m14"}
{"signature": "def get(self):", "body": "pass<EOL>", "docstring": "Return the `natural` part of the model. In the case of most elements it\nis the calculated field, for others it is the object itself.", "id": "f5758:c2:m7"}
{"signature": "def trigger_update(self, params, values):", "body": "if self._parent:<EOL><INDENT>self._parent.trigger_update(params, values)<EOL><DEDENT>else:<EOL><INDENT>self.update(params, values)<EOL><DEDENT>", "docstring": "Notify parent of a parameter change", "id": "f5758:c2:m12"}
{"signature": "def __init__(self, params, values, ordered=True, category='<STR_LIT>'):", "body": "for attr in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if not hasattr(self, attr):<EOL><INDENT>setattr(self, attr, None)  <EOL><DEDENT><DEDENT>super(Component, self).__init__(<EOL>params, values, ordered=ordered, category=category<EOL>)<EOL>", "docstring": "A :class:`~peri.comp.comp.ParameterGroup` which specifically computes\nover sections of an image for an :class:`~peri.states.ImageState`. To\nthis end, we require the implementation of several new member functions:\n\n    * :func:`~peri.comp.comp.Component.initialize`\n    * :func:`~peri.comp.comp.Component.get_update_tile`\n    * :func:`~peri.comp.comp.Component.get_padding_size`\n    * :func:`~peri.comp.comp.Component.set_shape`\n    * :func:`~peri.comp.comp.Component.set_tile`\n    * :func:`~peri.comp.comp.Component.get`\n\nIn order to facilitate optimizations such as caching and local updates,\nwe must incorporate tiling in this object. \n\nParameters\n----------\nparams : string, list of strings\n    The names of the parameters, in the proper order\n\nvalues : number, list of numbers\n    The values corresponding to the parameter names\n\nordered : boolean (default: True)\n    If True, uses an OrderedDict so that parameter order is\n    deterministic independent of number of parameters\n\ncategory : string (default: 'param')\n    Name of the category associated with this ParameterGroup.", "id": "f5758:c2:m0"}
{"signature": "def nopickle(self):", "body": "return []<EOL>", "docstring": "Elements of the class that should not be included in pickled objects.\nIf inheriting a new class, should be::\n\n    super(Class, self).nopickle() + ['other1', 'other2', ...]\n\nReturns\n-------\nelements : list of strings\n    The name of class member variables which should not be pickled", "id": "f5758:c1:m6"}
{"signature": "def execute(self, *args, **kwargs):", "body": "pass<EOL>", "docstring": "Perform its routine, whatever that may be", "id": "f5758:c2:m6"}
{"signature": "def trigger_parameter_change(self):", "body": "if self._parent:<EOL><INDENT>self._parent.trigger_parameter_change()<EOL><DEDENT>", "docstring": "Notify parents of a parameter change", "id": "f5758:c2:m11"}
{"signature": "def __init__(self, pos, shape=None, param_prefix='<STR_LIT>', category='<STR_LIT>',<EOL>support_pad=<NUM_LIT:4>, float_precision=np.float64):", "body": "if pos.ndim != <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.category = category<EOL>self.support_pad = support_pad<EOL>self.pos = pos.astype('<STR_LIT:float>')<EOL>self.param_prefix = param_prefix<EOL>if float_precision not in (np.float64, np.float32, np.float16):<EOL><INDENT>raise ValueError('<STR_LIT>' +<EOL>'<STR_LIT>')<EOL><DEDENT>self.float_precision = float_precision<EOL>self.shape = shape<EOL>self.setup_variables()<EOL>if self.shape:<EOL><INDENT>self.inner = self.shape.copy()<EOL>self.tile = self.inner.copy()<EOL>self.initialize()<EOL><DEDENT>", "docstring": "Parent class for a large collection of particles, such as spheres or\npoints or ellipsoids or rods.\n\nThis class is good for a collection of objects which each have a\nposition as well as (possibly) some other parameters, like particle\nradius, aspect ratio, or brightness. Its .get() method returns a\nfield of the drawn particles, selected on the current tile. Any\ndaughter classes need the following methods:\n\n* _draw_particle\n* _update_type\n* setup_variables\n* get_values\n* set_values\n* add_particle\n* remove_particle\n\nIn addition, the following methods should be modified for particles\nwith more parameters than just positions:\n\n* _drawargs\n* _tile\n* param_particle\n* exports\n* _p2i\n\nIf you have a few objects to group, like 2 or 3 slabs, group them\nwith a `peri.comp.ComponentCollection` instead.\n\nParameters\n----------\npos : ndarray [N,d]\n    Initial positions of the particles. Re-cast as float internally\n\nshape : ``peri.util.Tile``, optional\n    Shape of the field over which to draw the platonic spheres.\n    Default is None.\n\nparam_prefix : string, optional\n    Prefix for the particle parameter names. Default is `'sph'`\n\ncategory : string, optional\n    Category, as in comp.Component. Default is `'obj'`.\n\nsupport_pad : Int, optional\n    How much to pad the boundary of particles when calculating the\n    support so that particles do not leak out the edges. Default is 4\n\nfloat_precision : numpy float datatype, optional\n    One of numpy.float16, numpy.float32, numpy.float64; precision\n    for precomputed arrays. Default is np.float64; make it 16 or 32\n    to save memory.", "id": "f5759:c0:m0"}
{"signature": "def _update_type(self, params):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Given a list of parameters, returns a bool of whether or not any of\nthe parameters require a global update, and a list of particle indices\nwhich are included in ``params``, e.g.\n``return doglobal, particles``", "id": "f5759:c0:m2"}
{"signature": "def _i2p(self, ind, coord):", "body": "return '<STR_LIT:->'.join([self.param_prefix, str(ind), coord])<EOL>", "docstring": "Translate index info to parameter name", "id": "f5759:c0:m24"}
{"signature": "def __init__(self, zpos=<NUM_LIT:0>, angles=(<NUM_LIT:0>,<NUM_LIT:0>), param_prefix='<STR_LIT>', shape=None,<EOL>float_precision=np.float64, category='<STR_LIT>'):", "body": "self.lbl_zpos = param_prefix+'<STR_LIT>'<EOL>self.lbl_theta = param_prefix+'<STR_LIT>'<EOL>self.lbl_phi = param_prefix+'<STR_LIT>'<EOL>if float_precision not in (np.float64, np.float32, np.float16):<EOL><INDENT>raise ValueError('<STR_LIT>' +<EOL>'<STR_LIT>')<EOL><DEDENT>self.float_precision = float_precision<EOL>params = [self.lbl_zpos, self.lbl_theta, self.lbl_phi]<EOL>values = [float(i) for i in [zpos, angles[<NUM_LIT:0>], angles[<NUM_LIT:1>]]]<EOL>super(Slab, self).__init__(params, values, ordered=False,<EOL>category=category)<EOL>if shape:<EOL><INDENT>inner = shape.copy()  <EOL>self.set_shape(shape, inner)<EOL><DEDENT>self.set_tile(self.shape)<EOL>if self.shape:<EOL><INDENT>self.initialize()<EOL><DEDENT>", "docstring": "A half plane corresponding to a cover-slip.\n\nParameters\n----------\nshape : tuple\n    field shape over which to calculate\n\nzpos : float\n    position of the center of the slab in pixels\n\nangles : tuple of float (2,), optional\n    Euler-like Angles of rotation of the normal with respect to the\n    z-axis, i.e. ``angles=(0., 0.)`` gives a slab with a normal\n    along z. The first angle theta is the rotation about the x-axis;\n    the second angle phi is the rotation about the y-axis. Default\n    is (0,0).\n\nfloat_precision : numpy float datatype\n    One of numpy.float16, numpy.float32, numpy.float64; precision\n    for precomputed arrays. Default is np.float64; make it 16 or 32\n    to save memory.", "id": "f5759:c2:m0"}
{"signature": "def sphere_constrained_cubic(dr, a, alpha):", "body": "sqrt3 = np.sqrt(<NUM_LIT:3>)<EOL>b_coeff = a*<NUM_LIT:0.5>/sqrt3*(<NUM_LIT:1> - <NUM_LIT>*sqrt3*alpha)/(<NUM_LIT> + a*a)<EOL>rscl = np.clip(dr, -<NUM_LIT:0.5>*sqrt3, <NUM_LIT:0.5>*sqrt3)<EOL>a, d = rscl + <NUM_LIT:0.5>*sqrt3, rscl - <NUM_LIT:0.5>*sqrt3<EOL>return alpha*d*a*rscl + b_coeff*d*a - d/sqrt3<EOL>", "docstring": "Sphere generated by a cubic interpolant constrained to be (1,0) on\n(r0-sqrt(3)/2, r0+sqrt(3)/2), the size of the cube in the (111) direction.", "id": "f5759:m9"}
{"signature": "def param_radii(self):", "body": "return [self._i2p(i, '<STR_LIT:a>') for i in range(self.N)]<EOL>", "docstring": "Return params of all radii", "id": "f5759:c1:m7"}
{"signature": "def sphere_lerp(dr, a, alpha):", "body": "return (<NUM_LIT:1>-np.clip((dr+alpha) / (<NUM_LIT:2>*alpha), <NUM_LIT:0>, <NUM_LIT:1>))<EOL>", "docstring": "Linearly interpolate the pixels for the platonic object", "id": "f5759:m3"}
{"signature": "def param_particle_pos(self, ind):", "body": "ind = self._vps(listify(ind))<EOL>return [self._i2p(i, j) for i in ind for j in ['<STR_LIT:z>', '<STR_LIT:y>', '<STR_LIT:x>']]<EOL>", "docstring": "Get position of one or more particles", "id": "f5759:c1:m9"}
{"signature": "def update(self, params, values):", "body": "<EOL>global_update, particles = self._update_type(params)<EOL>if global_update:<EOL><INDENT>self.set_values(params, values)<EOL>self.initialize()<EOL>return<EOL><DEDENT>oldargs = self._drawargs()<EOL>for n in particles:<EOL><INDENT>self._draw_particle(self.pos[n], *listify(oldargs[n]), sign=-<NUM_LIT:1>)<EOL><DEDENT>self.set_values(params, values)<EOL>newargs = self._drawargs()<EOL>for n in particles:<EOL><INDENT>self._draw_particle(self.pos[n], *listify(newargs[n]), sign=+<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Update the particles field given new parameter values", "id": "f5759:c0:m26"}
{"signature": "def update(self, params, values):", "body": "<EOL>params = listify(params)<EOL>values = listify(values)<EOL>for i, p in enumerate(params):<EOL><INDENT>if (p[-<NUM_LIT:2>:] == '<STR_LIT>') and (values[i] < <NUM_LIT:0>):<EOL><INDENT>values[i] = <NUM_LIT:0.0><EOL><DEDENT><DEDENT>super(PlatonicSpheresCollection, self).update(params, values)<EOL>", "docstring": "Calls an update, but clips radii to be > 0", "id": "f5759:c1:m18"}
{"signature": "def _draw_particle(self, pos, sign=<NUM_LIT:1>):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Updates ``self.particles`` by drawing a particle at position ``pos``,\nwith possible additional unnamed arguments between ``pos`` and\n``sign``. If ``sign`` is -1, un-draws the particle instead.\n\nTo be able to fit this component in a model, _draw_particle must\ncreate an image that is numerically continuous as pos changes --\ni.e. the edge of the particle must alias smoothly to 0.", "id": "f5759:c0:m1"}
{"signature": "def _update_type(self, params):", "body": "dozscale = False<EOL>particles = []<EOL>for p in listify(params):<EOL><INDENT>typ, ind = self._p2i(p)<EOL>particles.append(ind)<EOL>dozscale = dozscale or typ == '<STR_LIT>'<EOL><DEDENT>particles = set(particles)<EOL>return dozscale, particles<EOL>", "docstring": "Returns dozscale and particle list of update", "id": "f5759:c1:m16"}
{"signature": "def _tile(self, n):", "body": "pos = self._trans(self.pos[n])<EOL>return Tile(pos, pos).pad(self.support_pad)<EOL>", "docstring": "Get the update tile surrounding particle `n`", "id": "f5759:c0:m9"}
{"signature": "def param_particle_rad(self, ind):", "body": "ind = self._vps(listify(ind))<EOL>return [self._i2p(i, '<STR_LIT:a>') for i in ind]<EOL>", "docstring": "Get radius of one or more particles", "id": "f5759:c1:m10"}
{"signature": "def _vps(self, inds):", "body": "return [j for j in inds if j >= <NUM_LIT:0> and j < self.N]<EOL>", "docstring": "Clips a list of inds to be on [0, self.N]", "id": "f5759:c0:m16"}
{"signature": "def get_values(self, params):", "body": "<EOL>raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Returns a util.delisty-d assortment of values and parameters, e.g.\n(get values for the parameters, both particle positions and globals)\n(return delistify(values, params))", "id": "f5759:c0:m4"}
{"signature": "def param_positions(self):", "body": "return self.param_particle_pos(list(range(self.N)))<EOL>", "docstring": "Return params of all positions", "id": "f5759:c0:m17"}
{"signature": "def param_particle_pos(self, ind):", "body": "<EOL>ind = self._vps(listify(ind))<EOL>return [self._i2p(i, j) for i in ind for j in ['<STR_LIT:z>', '<STR_LIT:y>', '<STR_LIT:x>']]<EOL>", "docstring": "Get position of one or more particles", "id": "f5759:c0:m18"}
{"signature": "def exact_volume_sphere(rvec, pos, radius, zscale=<NUM_LIT:1.0>, volume_error=<NUM_LIT>,<EOL>function=sphere_analytical_gaussian, max_radius_change=<NUM_LIT>, args=()):", "body": "vol_goal = <NUM_LIT>/<NUM_LIT:3>*np.pi*radius**<NUM_LIT:3> / zscale<EOL>rprime = radius<EOL>dr = inner(rvec, pos, rprime, zscale=zscale)<EOL>t = function(dr, rprime, *args)<EOL>for i in range(MAX_VOLUME_ITERATIONS):<EOL><INDENT>vol_curr = np.abs(t.sum())<EOL>if np.abs(vol_goal - vol_curr)/vol_goal < volume_error:<EOL><INDENT>break<EOL><DEDENT>rprime = rprime + <NUM_LIT:1.0>*(vol_goal - vol_curr) / (<NUM_LIT:4>*np.pi*rprime**<NUM_LIT:2>)<EOL>if np.abs(rprime - radius)/radius > max_radius_change:<EOL><INDENT>break<EOL><DEDENT>dr = inner(rvec, pos, rprime, zscale=zscale)<EOL>t = function(dr, rprime, *args)<EOL><DEDENT>return t<EOL>", "docstring": "Perform an iterative method to calculate the effective sphere that perfectly\n(up to the volume_error) conserves volume.  Return the resulting image", "id": "f5759:m10"}
{"signature": "def set_values(self, params, values):", "body": "<EOL>raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Sets the parameters and values", "id": "f5759:c0:m5"}
{"signature": "def _drawargs(self):", "body": "return [[] for p in self.pos]<EOL>", "docstring": "Returns a list of arguments for self._draw_particle, of the same\nlength as `self.pos`. For example, if drawing a sphere, _drawargs\ncould return a list of radii.", "id": "f5759:c0:m8"}
{"signature": "def closest_particle(self, x):", "body": "return (((self.pos - x)**<NUM_LIT:2>).sum(axis=-<NUM_LIT:1>)).argmin()<EOL>", "docstring": "Get the index of the particle closest to vector `x`", "id": "f5759:c0:m21"}
{"signature": "def step(self, steps=<NUM_LIT:100>, mask=None):", "body": "if mask is None:<EOL><INDENT>mask = np.ones_like(self.rad).astype('<STR_LIT:bool>')<EOL><DEDENT>for step in range(steps):<EOL><INDENT>self.forces = self.force_hardsphere(mask) + self.force_noise()<EOL>self.forces[~mask, :] = <NUM_LIT:0.><EOL>self.integrate(self.forces)<EOL>self.boundary_condition()<EOL><DEDENT>", "docstring": "Perform a set of integration / BC steps and update plot\n\nParameters:\n-----------\nsteps : int\n    number of time steps of size self.dt to perform\n\nmask : boolean ndarray[N]\n    if provided, only simulate the motion of certain particles given\n    by the boolean array mask", "id": "f5760:c0:m4"}
{"signature": "def create_configuration(N, tile, radius=<NUM_LIT>, **kwargs):", "body": "pos, rad, tile = initialize_particles(N, tile, radius=radius, **kwargs)<EOL>sim = BrownianHardSphereSimulation(pos, rad, tile)<EOL>sim.relax(<NUM_LIT:1000>)<EOL>sim.step(<NUM_LIT:1000>)<EOL>sim.relax(<NUM_LIT:1000>)<EOL>return sim.pos, sim.rad, tile<EOL>", "docstring": "Quick configuration creation with `N` particles in a tile of size `tile`.", "id": "f5760:m0"}
{"signature": "def __init__(self, pos, rad, tile, D=<NUM_LIT>, epsilon=<NUM_LIT>, dt=<NUM_LIT>):", "body": "self.pos = pos<EOL>self.rad = rad<EOL>self.tile = tile<EOL>self.N = self.pos.shape[<NUM_LIT:0>]<EOL>self.dim = self.pos.shape[<NUM_LIT:1>]<EOL>self.forces = <NUM_LIT:0>*self.pos<EOL>self.D = D<EOL>self.dt = dt<EOL>self.epsilon = epsilon<EOL>", "docstring": "Creates a hard sphere brownian dynamics simulation in ND as specified\nby the pos, rad and tile supplied to initializer.\n\nParameters:\n-----------\npos : ndarray [N, dim]\n    Positions of particles in the simulation.\n\nrad : ndarray [N]\n    Radii of particles in the simulation.\n\ntile : `peri.util.Tile`\n    The simulation box defined by a tile with left and right bounds.\n\nD : float\n    diffusion constant in proper units\n\nepsilon : float\n    Force constant for the soft-sphere potential f = \\epsilon (1-d/d_0)^{3/2}.\n    This parameter is really a ratio of epsilon / eta (the damping parameter)\n\ndt : float\n    timestep for the integrator", "id": "f5760:c0:m0"}
{"signature": "def force_noise(self):", "body": "coeff = np.sqrt(<NUM_LIT:2>*self.D)<EOL>return coeff * np.random.randn(*self.pos.shape)<EOL>", "docstring": "Calculate the effective force of the Langevin dynamics", "id": "f5760:c0:m1"}
{"signature": "def get_xyzr_t(df, particle_ind):", "body": "ind = df['<STR_LIT>'] == particle_ind<EOL>rads = np.array(df['<STR_LIT>'][ind].copy())<EOL>z = np.array(df['<STR_LIT:z>'][ind].copy())<EOL>x = np.array(df['<STR_LIT:x>'][ind].copy())<EOL>y = np.array(df['<STR_LIT:y>'][ind].copy())<EOL>return x,y,z,rads<EOL>", "docstring": "Returns the x,y,z, radii values of a dataframe for a given particle index.\n\nParameters\n----------\n    df : DataFrame\n    particle_ind : Int\nReturns\n-------\n    x, y, z, rads : numpy.ndarray", "id": "f5761:m1"}
{"signature": "def calculate_state_radii_fluctuations(state_list, inbox=True, fullinbox=True,<EOL>inboxrad=False, maxdisp=<NUM_LIT>, threshold=None, return_all=False):", "body": "pos, rad = [], []<EOL>for s in state_list:<EOL><INDENT>m = analyze.good_particles(s, inbox=inbox, inboxrad=inboxrad,<EOL>fullinbox=fullinbox)<EOL>pos.append(s.obj_get_positions()[m])<EOL>rad.append(s.obj_get_radii()[m])<EOL><DEDENT>df = track(pos, rad, maxdisp=maxdisp, threshold=threshold)<EOL>zyxr_t = []<EOL>for i in np.unique(df['<STR_LIT>']):<EOL><INDENT>x,y,z,r = get_xyzr_t(df, <NUM_LIT:1>*i)<EOL>if z.size > <NUM_LIT:1>:<EOL><INDENT>zyxr_t.append([x.mean(), y.mean(), z.mean(), r.mean(), r.std()])<EOL><DEDENT><DEDENT>z, y, x, rm, rs = np.transpose(zyxr_t)<EOL>if return_all:<EOL><INDENT>return z, y, x, rm, rs<EOL><DEDENT>else:<EOL><INDENT>return rs<EOL><DEDENT>", "docstring": "Calculates the radii fluctuations throughout a series of images.\n\nParameters\n----------\n    state_list : iterable\n        List or any iterator/generator of peri.states objects to analyze.\n\n    inbox : Bool\n        Set to False to include all particles, not just the ones in the\n        image. Default is True (only particles in image).\n    fullinbox : Bool\n        Set to True to only include particles which are\n    inboxrad : Bool\n        Set to True to include all particles which overlap at all with the\n        image. Default False\n\n    maxdisp : Float\n        The maximum displacement a particle can do between frames.\n    threshold : Int\n        Threshold for trackpy.filter_stubs\n\n    return_all : Bool\n        Set to True to return the mean z,y,x,r for all the particles\n        as well as r.std(). Default is False (just returns r.std()\n\nReturns\n-------\n    [z,] : np.ndarray\n        The mean z-positions of each particle. Returned if `return_all`\n        is True.\n    [y,] : np.ndarray\n        The mean y-positions of each particle. Returned if `return_all`\n        is True.\n    [x,] : np.ndarray\n        The mean x-positions of each particle. Returned if `return_all`\n        is True.\n    [rm,] : np.ndarray\n        The mean radii of each particle. Returned if `return_all` is\n        True.\n    rs : np.ndarray\n        The standard deviation of each particle's radius across all\n        states.", "id": "f5761:m3"}
{"signature": "def listen(func):", "body": "import argparse<EOL>import beanstalkc as bean<EOL>parser = argparse.ArgumentParser(description=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', type=int, help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', type=int, help=\"<STR_LIT>\")<EOL>args = vars(parser.parse_args())<EOL>port = int(args.get('<STR_LIT:port>') or DEFAULT_BEANSTALKD)<EOL>proc = int(args.get('<STR_LIT>') or <NUM_LIT:1>)<EOL>def _listen(func, index):<EOL><INDENT>sys.stdout = open('<STR_LIT>'.format(index), \"<STR_LIT:w>\", buffering=<NUM_LIT:0>)<EOL>sys.stderr = open('<STR_LIT>'.format(index), \"<STR_LIT:w>\", buffering=<NUM_LIT:0>)<EOL>bsd = bean.Connection(host=LOCALHOST, port=port)<EOL>while True:<EOL><INDENT>job = bsd.reserve()<EOL>func(json.loads(job.body))<EOL><DEDENT><DEDENT>log.info('<STR_LIT>')<EOL>for i in range(proc):<EOL><INDENT>log.info('<STR_LIT:{}>'.format(i))<EOL>Process(target=_listen, args=(func,i)).start()<EOL><DEDENT>", "docstring": "This function is for use in external scripts in order to listen on the\nqueue and perform tasks. In particular, create a function which takes the\narguments you would pass to `launch_all`s jobs parameter, then at the\nbottom of the script add:\n\nif __name__ == '__main__':\n    peri.test.beanstalk.listen(my_function_name)", "id": "f5762:m3"}
{"signature": "def force_damp(self):", "body": "return - self.beta * self.vel<EOL>", "docstring": "Calculate the damping force -beta v", "id": "f5763:c0:m2"}
{"signature": "def boundary_condition(self):", "body": "for i in range(self.dim):<EOL><INDENT>mask = (self.pos[:,i] < <NUM_LIT:0>)<EOL>self.pos[mask,i] = <NUM_LIT:2>*<NUM_LIT:0>-self.pos[mask,i]<EOL>self.vel[mask,i] *= -<NUM_LIT:1><EOL>mask = (self.pos[:,i] > self.box[i])<EOL>self.pos[mask,i] = <NUM_LIT:2>*self.box[i]-self.pos[mask,i]<EOL>self.vel[mask,i] *= -<NUM_LIT:1><EOL><DEDENT>", "docstring": "Apply hard reflective boundary conditions to particles", "id": "f5763:c0:m4"}
{"signature": "def step(self, steps=<NUM_LIT:1>):", "body": "for step in range(steps):<EOL><INDENT>self.forces = self.force_hertzian() + self.force_damp() + self.force_noise()<EOL>self.integrate(self.forces)<EOL>self.boundary_condition()<EOL><DEDENT>", "docstring": "Perform a set of integration / BC steps including finite temperature\n\nParameters:\n-----------\nsteps : int\n    number of time steps of size self.dt to perform", "id": "f5763:c0:m6"}
{"signature": "def relax(self, steps=<NUM_LIT:1000>):", "body": "for step in range(steps):<EOL><INDENT>self.forces = self.force_hertzian() + self.force_damp()<EOL>self.integrate(self.forces)<EOL>self.boundary_condition()<EOL><DEDENT>", "docstring": "Relax the current configuration using just pair wise forces (no noise)", "id": "f5763:c0:m7"}
{"signature": "def __init__(self, N=<NUM_LIT>, phi=<NUM_LIT>, radius=<NUM_LIT>, polydispersity=<NUM_LIT:0.0>, beta=<NUM_LIT:1>,<EOL>epsilon=<NUM_LIT>, T=<NUM_LIT:1>, dt=<NUM_LIT>, dim=<NUM_LIT:3>, box_side_ratio=<NUM_LIT:1.0>):", "body": "self.N = N<EOL>self.phi = phi<EOL>self.beta = beta<EOL>self.epsilon = epsilon<EOL>self.T = T<EOL>self.dt = dt<EOL>self.radius = radius<EOL>self.polydispersity = polydispersity<EOL>self.box_side_ratio = box_side_ratio<EOL>self.dim = int(dim)<EOL>if self.dim == <NUM_LIT:2>:<EOL><INDENT>self.box_side = (self.N*np.pi*self.radius**<NUM_LIT:2> / self.phi)**(<NUM_LIT:1.>/<NUM_LIT:2>)<EOL>self.box = np.array([self.box_side]*self.dim)<EOL><DEDENT>if self.dim == <NUM_LIT:3>:<EOL><INDENT>self.box_side = (self.N*<NUM_LIT>/<NUM_LIT:3>*np.pi*self.radius**<NUM_LIT:3> / self.phi)**(<NUM_LIT:1.>/<NUM_LIT:3>)<EOL>sxy = self.box_side/np.sqrt(self.box_side_ratio)<EOL>sz  = self.box_side*self.box_side_ratio<EOL>self.box = np.array([sz, sxy, sxy])<EOL><DEDENT>self.init_random()<EOL>", "docstring": "Creates a simulation of soft sphere particles. By default, creates\nparticles in a random uniform (perhaps overlapping) distribution of\nparticles inside a box dimension `dim` given a packing fraction\n\nParameters:\n-----------\nN : integer [default: 500]\n    the number of particles in the simulation\n\nphi : float \n    packing fraction to use while initializing the particles\n\nradius : float [default: 5]\n    mean radius of the particles, see polydispersity\n\npolydispersity : float [default: 0]\n    relative polydispersity goal sqrt(<(a-<a>)^2>)/<a>\n\nbeta : float\n    damping parameter f = -beta v\n\nepsilon : float\n    force constant for the soft-sphere potential f = \\epsilon (1-d/d_0)^{3/2}\n\nT : float\n    temperature of the system\n\ndt : float\n    timestep for the integrator\n\ndim : integer [default: 3]\n    number of dimensions for the simulation\n\nbox_side_ratio : float [default: 1]\n    ratio of the box's z height to original z height before scaling.\n    therefore, you can squish the box by half (while elongating the\n    sides) by setting box_side_ratio=0.5", "id": "f5763:c0:m0"}
{"signature": "def integrate(self, forces):", "body": "self.vel += forces*self.dt<EOL>self.pos += self.vel*self.dt<EOL>", "docstring": "Integrate the equations of motion. For this simple integrator, we are\nusing the simplest sympletic integrator, NSV where\n\n    v_{n+1} = v_n + f*dt\n    x_{n+1} = x_n + v_{n+1}*dt\n\nParameters:\n-----------\nforces : ndarray[N,dim]\n    the forces on each particle", "id": "f5763:c0:m5"}
{"signature": "def create_single_particle_state(imsize, radius=<NUM_LIT>, seed=None, **kwargs):", "body": "_seed_or_not(seed)<EOL>imsize = _toarr(imsize)<EOL>pos = imsize.reshape(-<NUM_LIT:1>,<NUM_LIT:3>)/<NUM_LIT><EOL>rad = radius<EOL>return create_state(util.NullImage(shape=imsize), pos, rad, **kwargs)<EOL>", "docstring": "Creates a single particle state\n\nParameters:\n-----------\nimsize : tuple, array_like, or integer\n    the unpadded image size to fill with particles\n\nradius : float\n    radius of particles to add\n\nseed : integer\n    set the seed if desired\n\n*args, **kwargs : see create_state", "id": "f5764:m4"}
{"signature": "def create_two_particle_state(imsize, radius=<NUM_LIT>, delta=<NUM_LIT:1.0>, seed=None, axis='<STR_LIT:x>', **kwargs):", "body": "_seed_or_not(seed)<EOL>imsize = _toarr(imsize)<EOL>comp = {'<STR_LIT:x>': <NUM_LIT:2>, '<STR_LIT:y>': <NUM_LIT:1>, '<STR_LIT:z>': <NUM_LIT:0>}<EOL>t = float(radius)+float(delta)/<NUM_LIT:2><EOL>d = np.array([<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>])<EOL>d[comp[axis]] = t<EOL>pos = np.array([imsize/<NUM_LIT> - d, imsize/<NUM_LIT> + d]).reshape(-<NUM_LIT:1>,<NUM_LIT:3>)<EOL>rad = np.array([radius, radius])<EOL>return create_state(util.NullImage(shape=imsize), pos, rad, **kwargs)<EOL>", "docstring": "Creates a two particle state\n\nParameters:\n-----------\nimsize : tuple, array_like, or integer\n    the unpadded image size to fill with particles\n\nradius : float\n    radius of particles to add\n\ndelta : float\n    separation between the two particles\n\nseed : integer\n    set the seed if desired\n\n*args, **kwargs : see create_state", "id": "f5764:m5"}
{"signature": "def create_state(image, pos, rad, slab=None, sigma=<NUM_LIT>, conf=conf_simple):", "body": "<EOL>if not hasattr(rad, '<STR_LIT>'):<EOL><INDENT>rad = rad*np.ones(pos.shape[<NUM_LIT:0>])<EOL><DEDENT>model = models.models[conf.get('<STR_LIT>')]()<EOL>components = []<EOL>for k,v in iteritems(conf.get('<STR_LIT>', {})):<EOL><INDENT>args = conf.get('<STR_LIT:args>').get(k, {})<EOL>comp = model.registry[k][v](**args)<EOL>components.append(comp)<EOL><DEDENT>sphs = objs.PlatonicSpheresCollection(pos, rad)<EOL>if slab is not None:<EOL><INDENT>sphs = ComponentCollection([sphs, objs.Slab(zpos=slab+pad)], category='<STR_LIT>')<EOL><DEDENT>components.append(sphs)<EOL>s = states.ImageState(image, components, sigma=sigma)<EOL>if isinstance(image, util.NullImage):<EOL><INDENT>s.model_to_data()<EOL><DEDENT>return s<EOL>", "docstring": "Create a state from a blank image, set of pos and radii\n\nParameters:\n-----------\nimage : `peri.util.Image` object\n    raw confocal image with which to compare.\n\npos : initial conditions for positions (in raw image coordinates)\nrad : initial conditions for radii array (can be scalar)\nsigma : float, noise level\n\nslab : float\n    z-position of the microscope slide in the image (pixel units)", "id": "f5764:m2"}
{"signature": "def dict_to_pos_rad(d):", "body": "p, r = [], []<EOL>for i in itertools.count():<EOL><INDENT>try:<EOL><INDENT>p.append([d['<STR_LIT>'.format(i, c)] for c in '<STR_LIT>'])<EOL>r.append(d['<STR_LIT>'.format(i)])<EOL><DEDENT>except KeyError:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return np.array(p), np.array(r)<EOL>", "docstring": "Given a dictionary of a states params:values, returns the pos & rad.", "id": "f5765:m1"}
{"signature": "def state_to_ordereddict(st, include_iminfo=True):", "body": "od = OrderedDict()<EOL>for p in st.params:<EOL><INDENT>od.update({p:st.state[p]})<EOL><DEDENT>if include_iminfo:<EOL><INDENT>od.update({ '<STR_LIT>':st.image.filename,<EOL>'<STR_LIT>':str(st.image.tile)})<EOL><DEDENT>return od<EOL>", "docstring": "Represents a state as an OrderedDict\n\n    Parameters\n    ----------\n        st : :class:``peri.states.State``\n            The state to represent.\n        include_iminfo : Bool, optional\n            If set, includes two additional keys, ``'image.filename'`` and\n            ``'image.tile'`` with corresponding info about the image.\n            Default is True.\n\n    Returns\n    -------\n        ``collections.OrderedDict``", "id": "f5765:m2"}
{"signature": "def nearest(p0, p1, cutoff=None):", "body": "ind0, ind1 = [], []<EOL>for i in range(len(p0)):<EOL><INDENT>dist = np.sqrt(((p0[i] - p1)**<NUM_LIT:2>).sum(axis=-<NUM_LIT:1>))<EOL>if cutoff is None:<EOL><INDENT>ind1.append(dist.argmin())<EOL><DEDENT>elif dist.min() < cutoff:<EOL><INDENT>ind0.append(i)<EOL>ind1.append(dist.argmin())<EOL><DEDENT><DEDENT>if cutoff is None:<EOL><INDENT>return ind1<EOL><DEDENT>return ind0, ind1<EOL>", "docstring": "Correlate closest particles with each other (within cutoff).\n\nReturns ind0, ind1 so that p0[ind0] is close to p1[ind1].\n\nParameters\n----------\n    p0, p1 : numpy.ndarray\n        The particle positions.\n    cutoff : Float or None, optional\n        If not None, only returns particle indices with distance less\n        than `cutoff`. Default is None.\n\nReturns\n-------\n    ind0, ind1 : List\n        The lists of particle indices, p0[ind0] is close to p1[ind1].", "id": "f5765:m8"}
{"signature": "def sorted_files(globber, num_sort=True, num_indices=None, return_num=False):", "body": "files = glob.glob(globber)<EOL>files.sort()<EOL>if not num_sort:<EOL><INDENT>return files<EOL><DEDENT>num_indices = num_indices or np.s_[:]<EOL>allfiles = []<EOL>for fn in files:<EOL><INDENT>nums = re.findall(r'<STR_LIT>', fn)<EOL>data = [int(n) for n in nums[num_indices]] + [fn]<EOL>allfiles.append(data)<EOL><DEDENT>allfiles = sorted(allfiles)<EOL>if return_num:<EOL><INDENT>return allfiles<EOL><DEDENT>return [f[-<NUM_LIT:1>] for f in allfiles]<EOL>", "docstring": "Give a globbing expression of files to find. They will be sorted upon return.\nThis function is most useful when sorting does not provide numerical order,\ne.g.:\n    9 -> 12 returned as 10 11 12 9 by string sort\n\nIn this case use num_sort=True, and it will be sorted by numbers whose index\nis given by num_indices (possibly None for all numbers) then by string.", "id": "f5765:m0"}
{"signature": "def packing_fraction_state(state):", "body": "return state.get('<STR_LIT>').get()[state.inner].mean()<EOL>", "docstring": "Calculates the packing fraction of a state.\n\nParameters\n----------\n    state : :class:`peri.states.ImageState`\n\nReturns\n-------\n    Float\n        The volume fraction", "id": "f5765:m13"}
{"signature": "def good_particles(state, inbox=True, inboxrad=False, fullinbox=False,<EOL>pos=None, rad=None, ishape=None):", "body": "if pos is None:<EOL><INDENT>pos = state.obj_get_positions()<EOL><DEDENT>if rad is None:<EOL><INDENT>rad = state.obj_get_radii()<EOL><DEDENT>mask = rad > <NUM_LIT:0><EOL>if (inbox | inboxrad | fullinbox):<EOL><INDENT>if fullinbox:<EOL><INDENT>mask &= trim_box(state, pos, rad=-rad, ishape=ishape)<EOL><DEDENT>elif inboxrad:<EOL><INDENT>mask &= trim_box(state, pos, rad=rad, ishape=ishape)<EOL><DEDENT>else:<EOL><INDENT>mask &= trim_box(state, pos, rad=None, ishape=ishape)<EOL><DEDENT><DEDENT>return mask<EOL>", "docstring": "Returns a mask of `good' particles as defined by\n    * radius > 0\n    * position inside box\n\nParameters\n----------\n    state : :class:`peri.states.ImageState`\n        The state to identify the good particles. If pos, rad, and ishape\n        are provided, then this does not need to be passed.\n    inbox : Bool\n        Whether to only count particle centers within the image. Default\n        is True.\n    inboxrad : Bool\n        Whether to only count particles that overlap the image at all.\n        Default is False.\n    fullinbox : Bool\n        Whether to only include particles which are entirely in the\n        image. Default is False\n    pos : [3,N] np.ndarray or None\n        If not None, the particles' positions.\n    rad : [N] element numpy.ndarray or None\n        If not None, the particles' radii.\n    ishape : 3-element list-like or None\n        If not None, the inner region of the state.\n\nReturns\n-------\n    mask : np.ndarray of bools\n        A boolean mask of which particles are good (True) or bad.\nSee Also\n--------\n    trim_box", "id": "f5765:m6"}
{"signature": "def _eval_firstorder(self, rvecs, data, sigma):", "body": "if not self.blocksize:<EOL><INDENT>dist_between_points = self._distance_matrix(rvecs, self.x)<EOL>gaussian_weights = self._weight(dist_between_points, sigma=sigma)<EOL>return gaussian_weights.dot(data) / gaussian_weights.sum(axis=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>ans = np.zeros(rvecs.shape[<NUM_LIT:0>], dtype='<STR_LIT:float>')<EOL>bs = self.blocksize<EOL>for a in range(<NUM_LIT:0>, rvecs.shape[<NUM_LIT:0>], bs):<EOL><INDENT>dist = self._distance_matrix(rvecs[a:a+bs], self.x)<EOL>weights = self._weight(dist, sigma=sigma)<EOL>ans[a:a+bs] += weights.dot(data) / weights.sum(axis=<NUM_LIT:1>)<EOL><DEDENT>return ans<EOL><DEDENT>", "docstring": "The first-order Barnes approximation", "id": "f5767:c0:m5"}
{"signature": "def _distance_matrix(self, a, b):", "body": "return (a[:, None] - b[None, :])**<NUM_LIT:2><EOL>", "docstring": "Pairwise distance between each point in `a` and each point in `b`", "id": "f5767:c0:m2"}
{"signature": "def _weight(self, rsq, sigma=None):", "body": "sigma = sigma or self.filter_size<EOL>if not self.clip:<EOL><INDENT>o = np.exp(-rsq / (<NUM_LIT:2>*sigma**<NUM_LIT:2>))<EOL><DEDENT>else:<EOL><INDENT>o = np.zeros(rsq.shape, dtype='<STR_LIT:float>')<EOL>m = (rsq < self.clipsize**<NUM_LIT:2>)<EOL>o[m] = np.exp(-rsq[m] / (<NUM_LIT:2>*sigma**<NUM_LIT:2>))<EOL><DEDENT>return o<EOL>", "docstring": "weighting function for Barnes", "id": "f5767:c0:m3"}
{"signature": "def _oldcall(self, rvecs):", "body": "g = self.filter_size<EOL>dist0 = self._distance_matrix(self.x, self.x)<EOL>dist1 = self._distance_matrix(rvecs, self.x)<EOL>tmp = self._weight(dist0, g).dot(self.d)<EOL>out = self._weight(dist1, g).dot(self.d)<EOL>for i in range(self.iterations):<EOL><INDENT>out = out + self._weight(dist1, g).dot(self.d - tmp)<EOL>tmp = tmp + self._weight(dist0, g).dot(self.d - tmp)<EOL>g *= self.damp<EOL><DEDENT>return out<EOL>", "docstring": "Barnes w/o normalizing the weights", "id": "f5767:c0:m7"}
{"signature": "def _distance_matrix(self, a, b):", "body": "def sq(x): return (x * x)<EOL>matrix = sq(a[:, <NUM_LIT:0>][:, None] - b[:, <NUM_LIT:0>][None, :])<EOL>for x, y in zip(a.T[<NUM_LIT:1>:], b.T[<NUM_LIT:1>:]):<EOL><INDENT>matrix += sq(x[:, None] - y[None, :])<EOL><DEDENT>return matrix<EOL>", "docstring": "Pairwise distance between each point in `a` and each point in `b`", "id": "f5767:c1:m1"}
{"signature": "def _c2x(self, c):", "body": "return <NUM_LIT:0.5> * (self.window[<NUM_LIT:0>] + self.window[<NUM_LIT:1>] +<EOL>c * (self.window[<NUM_LIT:1>] - self.window[<NUM_LIT:0>]))<EOL>", "docstring": "Convert cheb coordinates to windowdow coordinates", "id": "f5767:c2:m2"}
{"signature": "def get_base_model(self):", "body": "return self.modelstr['<STR_LIT>']<EOL>", "docstring": "The complete model, no derivatives", "id": "f5772:c1:m4"}
{"signature": "def check_consistency(self):", "body": "error = False<EOL>regex = re.compile('<STR_LIT>')<EOL>if '<STR_LIT>' not in self.modelstr:<EOL><INDENT>raise ModelError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>for name, eq in iteritems(self.modelstr):<EOL><INDENT>var = regex.findall(eq)<EOL>for v in var:<EOL><INDENT>v = re.sub(r\"<STR_LIT>\", '<STR_LIT>', v)<EOL>if v not in self.varmap:<EOL><INDENT>log.error(<EOL>\"<STR_LIT>\" %<EOL>(v, name, eq, self.varmap)<EOL>)<EOL>error = True<EOL><DEDENT><DEDENT><DEDENT>if error:<EOL><INDENT>raise ModelError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Make sure that the required comps are included in the list of\ncomponents supplied by the user. Also check that the parameters are\nconsistent across the many components.", "id": "f5772:c1:m1"}
{"signature": "def check_inputs(self, comps):", "body": "error = False<EOL>compcats = [c.category for c in comps]<EOL>for k, v in iteritems(self.varmap):<EOL><INDENT>if k not in self.modelstr['<STR_LIT>']:<EOL><INDENT>log.warn('<STR_LIT>' % (k,v))<EOL><DEDENT>if v not in compcats:<EOL><INDENT>log.error('<STR_LIT>' % (k,v))<EOL>error = True<EOL><DEDENT><DEDENT>if error:<EOL><INDENT>raise ModelError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Check that the list of components `comp` is compatible with both the\nvarmap and modelstr for this Model", "id": "f5772:c1:m2"}
{"signature": "def diffname(self, name):", "body": "return '<STR_LIT:d>'+name<EOL>", "docstring": "Transform a variable name into a derivative", "id": "f5772:c1:m3"}
{"signature": "def do_internal_run(self, initial_count=<NUM_LIT:0>, subblock=None, update_derr=True):", "body": "self._inner_run_counter = initial_count; good_step = True<EOL>n_good_steps = <NUM_LIT:0><EOL>CLOG.debug('<STR_LIT>')<EOL>_last_residuals = self.calc_residuals().copy()<EOL>while ((self._inner_run_counter < self.run_length) & good_step &<EOL>(not self.check_terminate())):<EOL><INDENT>if self.check_Broyden_J() and self._inner_run_counter != <NUM_LIT:0>:<EOL><INDENT>self.update_Broyden_J()<EOL><DEDENT>if self.check_update_eig_J() and self._inner_run_counter != <NUM_LIT:0>:<EOL><INDENT>self.update_eig_J()<EOL><DEDENT>er0 = <NUM_LIT:1>*self.error<EOL>delta_vals = self.find_LM_updates(self.calc_grad(),<EOL>do_correct_damping=False, subblock=subblock)<EOL>er1 = self.update_function(self.param_vals + delta_vals)<EOL>good_step = er1 < er0<EOL>if good_step:<EOL><INDENT>n_good_steps += <NUM_LIT:1><EOL>CLOG.debug('<STR_LIT>' % (er0, er1))<EOL>self.update_param_vals(delta_vals, incremental=True)<EOL>self._last_residuals = _last_residuals.copy()<EOL>if update_derr:<EOL><INDENT>self._last_error = er0<EOL><DEDENT>self.error = er1<EOL>_last_residuals = self.calc_residuals().copy()<EOL><DEDENT>else:<EOL><INDENT>er0_0 = self.update_function(self.param_vals)<EOL>CLOG.debug('<STR_LIT>')<EOL>if np.abs(er0 - er0_0) > <NUM_LIT>:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT><DEDENT>self._inner_run_counter += <NUM_LIT:1><EOL><DEDENT>return n_good_steps<EOL>", "docstring": "Takes more steps without calculating J again.\n\nGiven a fixed damping, J, JTJ, iterates calculating steps, with\noptional Broyden or eigendirection updates. Iterates either until\na bad step is taken or for self.run_length times.\nCalled internally by do_run_2() but is also useful on its own.\n\nParameters\n----------\n    initial_count : Int, optional\n        The initial count of the run. Default is 0. Increasing from\n        0 effectively temporarily decreases run_length.\n    subblock : None or np.ndarray of bools, optional\n        If not None, a boolean mask which determines which sub-\n        block of parameters to run over. Default is None, i.e.\n        all the parameters.\n    update_derr : Bool, optional\n        Set to False to not update the variable that determines\n        delta_err, preventing premature termination through errtol.\n\nNotes\n-----\nIt might be good to do something similar to update_derr with the\nparameter values, but this is trickier because of Broyden updates\nand _fresh_J.", "id": "f5773:c0:m10"}
{"signature": "def check_terminate(self):", "body": "if not self._has_run:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>terminate = self.check_completion()<EOL>terminate |= (self._num_iter >= self.max_iter)<EOL>return terminate<EOL><DEDENT>", "docstring": "Returns a Bool of whether to terminate.\n\nChecks whether a satisfactory minimum has been found or whether\ntoo many iterations have occurred.", "id": "f5773:c0:m21"}
{"signature": "def find_particles_in_tile(positions, tile):", "body": "bools = tile.contains(positions)<EOL>return np.arange(bools.size)[bools]<EOL>", "docstring": "Finds the particles in a tile, as numpy.ndarray of ints.\n\nParameters\n----------\n    positions : `numpy.ndarray`\n        [N,3] array of the particle positions to check in the tile\n    tile : :class:`peri.util.Tile` instance\n        Tile of the region inside which to check for particles.\n\nReturns\n-------\n    numpy.ndarray, int\n        The indices of the particles in the tile.", "id": "f5773:m5"}
{"signature": "def calc_J(self):", "body": "del self.J<EOL>self.J = np.zeros([self.param_vals.size, self.data.size])<EOL>dp = np.zeros_like(self.param_vals)<EOL>f0 = self.model.copy()<EOL>for a in range(self.param_vals.size):<EOL><INDENT>dp *= <NUM_LIT:0><EOL>dp[a] = self.dl[a]<EOL>f1 = self.func(self.param_vals + dp, *self.func_args, **self.func_kwargs)<EOL>grad_func = (f1 - f0) / dp[a]<EOL>self.J[a] = -grad_func<EOL><DEDENT>", "docstring": "Updates self.J, returns nothing", "id": "f5773:c1:m2"}
{"signature": "def calc_residuals(self):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "returns residuals = data - model.", "id": "f5773:c0:m4"}
{"signature": "def separate_particles_into_groups(s, region_size=<NUM_LIT>, bounds=None,<EOL>doshift=False):", "body": "imtile = s.oshape.translate(-s.pad)<EOL>bounding_tile = (imtile if bounds is None else Tile(bounds[<NUM_LIT:0>], bounds[<NUM_LIT:1>]))<EOL>rs = (np.ones(bounding_tile.dim, dtype='<STR_LIT:int>')*region_size if<EOL>np.size(region_size) == <NUM_LIT:1> else np.array(region_size))<EOL>n_translate = np.ceil(bounding_tile.shape.astype('<STR_LIT:float>')/rs).astype('<STR_LIT:int>')<EOL>particle_groups = []<EOL>tile = Tile(left=bounding_tile.l, right=bounding_tile.l + rs)<EOL>if doshift == '<STR_LIT>':<EOL><INDENT>doshift = np.random.choice([True, False])<EOL><DEDENT>if doshift:<EOL><INDENT>shift = rs // <NUM_LIT:2><EOL>n_translate += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>shift = <NUM_LIT:0><EOL><DEDENT>deltas = np.meshgrid(*[np.arange(i) for i in n_translate])<EOL>positions = s.obj_get_positions()<EOL>if bounds is None:<EOL><INDENT>positions = np.clip(positions, imtile.l+<NUM_LIT>, imtile.r-<NUM_LIT>)<EOL><DEDENT>groups = list(map(lambda *args: find_particles_in_tile(positions,<EOL>tile.translate( np.array(args) * rs - shift)), *[d.ravel()<EOL>for d in deltas]))<EOL>for i in range(len(groups)-<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>if groups[i].size == <NUM_LIT:0>:<EOL><INDENT>groups.pop(i)<EOL><DEDENT><DEDENT>assert _check_groups(s, groups)<EOL>return groups<EOL>", "docstring": "Separates particles into convenient groups for optimization.\n\nGiven a state, returns a list of groups of particles. Each group of\nparticles are located near each other in the image. Every particle\nlocated in the desired region is contained in exactly 1 group.\n\nParameters\n----------\ns : :class:`peri.states.ImageState`\n    The peri state to find particles in.\nregion_size : Int or 3-element list-like of ints, optional\n    The size of the box. Groups particles into boxes of shape\n    (region_size[0], region_size[1], region_size[2]). If region_size\n    is a scalar, the box is a cube of length region_size.\n    Default is 40.\nbounds : 2-element list-like of 3-element lists, optional\n    The sub-region of the image over which to look for particles.\n        bounds[0]: The lower-left  corner of the image region.\n        bounds[1]: The upper-right corner of the image region.\n    Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire\n    image size, i.e. the default places every particle in the image\n    somewhere in the groups.\ndoshift : {True, False, `'rand'`}, optional\n    Whether or not to shift the tile boxes by half a region size, to\n    prevent the same particles to be chosen every time. If `'rand'`,\n    randomly chooses either True or False. Default is False\n\nReturns\n-------\nparticle_groups : List\n    Each element of particle_groups is an int numpy.ndarray of the\n    group of nearby particles. Only contains groups with a nonzero\n    number of particles, so the elements don't necessarily correspond\n    to a given image region.", "id": "f5773:m6"}
{"signature": "def reset(self, new_region_size=None, do_calc_size=True, new_damping=None,<EOL>new_max_mem=None):", "body": "if new_region_size is not None:<EOL><INDENT>self.region_size = new_region_size<EOL><DEDENT>if new_max_mem != None:<EOL><INDENT>self.max_mem = new_max_mem<EOL><DEDENT>if do_calc_size:<EOL><INDENT>self.region_size = calc_particle_group_region_size(self.state,<EOL>region_size=self.region_size, max_mem=self.max_mem)<EOL><DEDENT>self.stats = []<EOL>self.particle_groups = separate_particles_into_groups(self.state,<EOL>self.region_size, doshift='<STR_LIT>')<EOL>if new_damping is not None:<EOL><INDENT>self._kwargs.update({'<STR_LIT>':new_damping})<EOL><DEDENT>if self.save_J:<EOL><INDENT>if len(self.particle_groups) > <NUM_LIT>:<EOL><INDENT>CLOG.warn('<STR_LIT>')<EOL><DEDENT>self._tempfiles = []<EOL>self._has_saved_J = []<EOL>for a in range(len(self.particle_groups)):<EOL><INDENT>for _ in ['<STR_LIT>','<STR_LIT>']:<EOL><INDENT>self._tempfiles.append(tempfile.TemporaryFile(dir=os.getcwd()))<EOL><DEDENT>self._has_saved_J.append(False)<EOL><DEDENT><DEDENT>", "docstring": "Resets the particle groups and optionally the region size and damping.\n\nParameters\n----------\n    new_region_size : : Int or 3-element list-like of ints, optional\n        The region size for sub-blocking particles. Default is 40\n    do_calc_size : Bool, optional\n        If True, calculates the region size internally based on\n        the maximum allowed memory. Default is True\n    new_damping : Float or None, optional\n        The new damping of the optimizer. Set to None to leave\n        as the default for LMParticles. Default is None.\n    new_max_mem : Numeric, optional\n        The maximum allowed memory for J to occupy. Default is 1e9", "id": "f5773:c7:m1"}
{"signature": "def update_Broyden_J(self):", "body": "CLOG.debug('<STR_LIT>')<EOL>delta_vals = self.param_vals - self._last_vals<EOL>delta_residuals = self.calc_residuals() - self._last_residuals<EOL>nrm = np.sqrt(np.dot(delta_vals, delta_vals))<EOL>direction = delta_vals / nrm<EOL>vals = delta_residuals / nrm<EOL>self._rank_1_J_update(direction, vals)<EOL>self.JTJ = np.dot(self.J, self.J.T)<EOL>", "docstring": "Execute a Broyden update of J", "id": "f5773:c0:m27"}
{"signature": "def update_param_vals(self, new_vals, incremental=False):", "body": "self._last_vals = self.param_vals.copy()<EOL>if incremental:<EOL><INDENT>self.param_vals += new_vals<EOL><DEDENT>else:<EOL><INDENT>self.param_vals = new_vals.copy()<EOL><DEDENT>self._fresh_JTJ = False<EOL>", "docstring": "Updates the current set of parameter values and previous values,\nsets a flag to re-calculate J.\n\nParameters\n----------\n    new_vals : numpy.ndarray\n        The new values to update to\n    incremental : Bool, optional\n        Set to True to make it an incremental update relative\n        to the old parameters. Default is False", "id": "f5773:c0:m16"}
{"signature": "def update_J(self):", "body": "self.calc_J()<EOL>step = np.ceil(<NUM_LIT> * self.J.shape[<NUM_LIT:1>]).astype('<STR_LIT:int>')  <EOL>self.JTJ = low_mem_sq(self.J, step=step)<EOL>self._fresh_JTJ = True<EOL>self._J_update_counter = <NUM_LIT:0><EOL>if np.any(np.isnan(self.JTJ)):<EOL><INDENT>raise FloatingPointError('<STR_LIT>')<EOL><DEDENT>self._exp_err = self.error - self.find_expected_error(delta_params='<STR_LIT>')<EOL>", "docstring": "Updates J, JTJ, and internal counters.", "id": "f5773:c0:m23"}
{"signature": "def update_function(self, param_vals):", "body": "self.model = self.func(param_vals, *self.func_args, **self.func_kwargs)<EOL>d = self.calc_residuals()<EOL>return np.dot(d.flat, d.flat)<EOL>", "docstring": "Takes an array param_vals, updates function, returns the new error", "id": "f5773:c1:m4"}
{"signature": "def calc_accel_correction(self, damped_JTJ, delta0):", "body": "<EOL>_ = self.update_function(self.param_vals)<EOL>rm0 = self.calc_residuals().copy()<EOL>_ = self.update_function(self.param_vals + delta0)<EOL>rm1 = self.calc_residuals().copy()<EOL>_ = self.update_function(self.param_vals - delta0)<EOL>rm2 = self.calc_residuals().copy()<EOL>der2 = (rm2 + rm1 - <NUM_LIT:2>*rm0)<EOL>corr, res, rank, s = np.linalg.lstsq(damped_JTJ, np.dot(self.J, der2),<EOL>rcond=self.min_eigval)<EOL>corr *= -<NUM_LIT:0.5><EOL>return corr<EOL>", "docstring": "Geodesic acceleration correction to the LM step.\n\nParameters\n----------\n    damped_JTJ : numpy.ndarray\n        The damped JTJ used to calculate the initial step.\n    delta0 : numpy.ndarray\n        The initial LM step.\n\nReturns\n-------\n    corr : numpy.ndarray\n        The correction to the original LM step.", "id": "f5773:c0:m30"}
{"signature": "def check_completion(self):", "body": "terminate = False<EOL>term_dict = self.get_termination_stats(get_cos=self.costol is not None)<EOL>terminate |= np.all(np.abs(term_dict['<STR_LIT>']) < self.paramtol)<EOL>terminate |= (term_dict['<STR_LIT>'] < self.errtol)<EOL>terminate |= (term_dict['<STR_LIT>'] < self.exptol)<EOL>terminate |= (term_dict['<STR_LIT>'] < self.fractol)<EOL>if self.costol is not None:<EOL><INDENT>terminate |= (curcos < term_dict['<STR_LIT>'])<EOL><DEDENT>return terminate<EOL>", "docstring": "Returns a Bool of whether the algorithm has found a satisfactory minimum", "id": "f5773:c0:m20"}
{"signature": "def find_best_step(err_vals):", "body": "if np.all(np.isnan(err_vals)):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return np.nanargmin(err_vals)<EOL>", "docstring": "Returns the index of the lowest of the passed values. Catches nans etc.", "id": "f5773:m10"}
{"signature": "def calc_model_cosine(self, decimate=None, mode='<STR_LIT>'):", "body": "if mode == '<STR_LIT>':<EOL><INDENT>slicer = slice(<NUM_LIT:0>, None, decimate)<EOL>u, sig, v = np.linalg.svd(self.J[:,slicer], full_matrices=False) <EOL>r = self.calc_residuals()[slicer]<EOL>abs_r = np.sqrt((r*r).sum())<EOL>v_r = np.dot(v,r/abs_r)<EOL>projected = np.dot(v.T, v_r)<EOL>abs_cos = np.sqrt((projected*projected).sum())<EOL><DEDENT>elif mode == '<STR_LIT>':<EOL><INDENT>expected_error = self.find_expected_error(delta_params='<STR_LIT>')<EOL>model_sine_2 = expected_error / self.error  <EOL>abs_cos = np.sqrt(<NUM_LIT:1> - model_sine_2)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return abs_cos<EOL>", "docstring": "Calculates the cosine of the residuals with the model.\n\nParameters\n----------\n    decimate : Int or None, optional\n        Decimate the residuals by `decimate` pixels. If None, no\n        decimation is used. Valid only with mode='svd'. Default\n        is None\n    mode : {'svd', 'err'}\n        Which mode to use; see Notes section. Default is 'err'.\n\nReturns\n-------\n    abs_cos : numpy.float64\n        The absolute value of the model cosine.\n\nNotes\n-----\nThe model cosine is defined in terms of the geometric view of\ncurve-fitting, as a model manifold embedded in a high-dimensional\nspace. The model cosine is the cosine of the residuals vector\nwith its projection on the tangent space: :math:`cos(phi) = |P^T r|/|r|`\nwhere :math:`P^T` is the projection operator onto the model manifold\nand :math:`r` the residuals. This can be calculated two ways: By\ncalculating the projection operator P directly with SVD (mode=`svd`),\nor by using the expected error if the model were linear to calculate\na model sine first (mode=`err`). Since the SVD of a large matrix is\nslow, mode=`err` is faster.\n\n`decimate` allows for every nth pixel only to be counted in the\nSVD matrix of J for speed. While this is n x faster, it is\nconsiderably less accurate, so the default is no decimation.", "id": "f5773:c0:m18"}
{"signature": "def update_function(self, param_vals):", "body": "self.opt_obj.update_function(param_vals)<EOL>return self.opt_obj.get_error()<EOL>", "docstring": "Updates the opt_obj, returns new error.", "id": "f5773:c2:m4"}
{"signature": "def do_run_2(self):", "body": "while not self.check_terminate():<EOL><INDENT>self._has_run = True<EOL>self._run2()<EOL>self._num_iter += <NUM_LIT:1><EOL><DEDENT>", "docstring": "LM run evaluating 2 steps (damped and not) and choosing the best.\n\nAfter finding the best of 2 steps, runs with that damping + Broyden\nor eigendirection updates, until deciding to do a full-J update.\nOnly changes damping after full-J updates.", "id": "f5773:c0:m8"}
{"signature": "def calc_grad(self):", "body": "residuals = self.calc_residuals()<EOL>return <NUM_LIT:2>*np.dot(self.J, residuals)<EOL>", "docstring": "The gradient of the cost w.r.t. the parameters.", "id": "f5773:c0:m24"}
{"signature": "def do_levmarq_particles(s, particles, damping=<NUM_LIT:1.0>, decrease_damp_factor=<NUM_LIT>,<EOL>run_length=<NUM_LIT:4>, collect_stats=False, max_iter=<NUM_LIT:2>, **kwargs):", "body": "lp = LMParticles(s, particles, damping=damping, run_length=run_length,<EOL>decrease_damp_factor=decrease_damp_factor, max_iter=max_iter,<EOL>**kwargs)<EOL>lp.do_run_2()<EOL>if collect_stats:<EOL><INDENT>return lp.get_termination_stats()<EOL><DEDENT>", "docstring": "Levenberg-Marquardt optimization on a set of particles.\n\nConvenience wrapper for LMParticles. Same keyword args, but the\ndefaults have been set to useful values for optimizing particles.\nSee LMParticles and LMEngine for documentation.\n\nSee Also\n--------\n    do_levmarq_all_particle_groups : Levenberg-Marquardt optimization\n        of all the particles in the state.\n\n    do_levmarq : Levenberg-Marquardt optimization of the entire state;\n        useful for optimizing global parameters.\n\n    LMParticles : Optimizer object; the workhorse of do_levmarq_particles.\n\n    LMEngine : Engine superclass for all the optimizers.", "id": "f5773:m12"}
{"signature": "def calc_particle_group_region_size(s, region_size=<NUM_LIT>, max_mem=<NUM_LIT>, **kwargs):", "body": "region_size = np.array(region_size).astype('<STR_LIT:int>')<EOL>def calc_mem_usage(region_size):<EOL><INDENT>rs = np.array(region_size)<EOL>particle_groups = separate_particles_into_groups(s, region_size=<EOL>rs.tolist(), **kwargs)<EOL>numpart = [np.size(g) for g in particle_groups]<EOL>biggroups = [particle_groups[i] for i in np.argsort(numpart)[-<NUM_LIT:5>:]]<EOL>def get_tile_jsize(group):<EOL><INDENT>nms = s.param_particle(group)<EOL>tile = s.get_update_io_tiles(nms, s.get_values(nms))[<NUM_LIT:2>]<EOL>return tile.shape.prod() * len(nms)<EOL><DEDENT>mems = [<NUM_LIT:8>*get_tile_jsize(g) for g in biggroups]  <EOL>return np.max(mems)<EOL><DEDENT>im_shape = s.oshape.shape<EOL>if calc_mem_usage(region_size) > max_mem:<EOL><INDENT>while ((calc_mem_usage(region_size) > max_mem) and<EOL>np.any(region_size > <NUM_LIT:2>)):<EOL><INDENT>region_size = np.clip(region_size-<NUM_LIT:1>, <NUM_LIT:2>, im_shape)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>while ((calc_mem_usage(region_size) < max_mem) and<EOL>np.any(region_size < im_shape)):<EOL><INDENT>region_size = np.clip(region_size+<NUM_LIT:1>, <NUM_LIT:2>, im_shape)<EOL><DEDENT>region_size -= <NUM_LIT:1> <EOL><DEDENT>return region_size<EOL>", "docstring": "Finds the biggest region size for LM particle optimization with a\ngiven memory constraint.\n\nInput Parameters\n----------------\n    s : :class:`peri.states.ImageState`\n        The state with the particles\n    region_size : Int or 3-element list-like of ints, optional.\n        The initial guess for the region size. Default is 40\n    max_mem : Numeric, optional\n        The maximum memory for the optimizer to take. Default is 1e9\n\nOther Parameters\n----------------\n    bounds: 2-element list-like of 3-element lists.\n        The sub-region of the image over which to look for particles.\n            bounds[0]: The lower-left  corner of the image region.\n            bounds[1]: The upper-right corner of the image region.\n        Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire\n        image size, i.e. the default places every particle in the image\n        somewhere in the groups.\nReturns\n-------\n    region_size : numpy.ndarray of ints of the region size.", "id": "f5773:m8"}
{"signature": "def calc_residuals(self):", "body": "return self.state.residuals.ravel().copy()<EOL>", "docstring": "See also\n--------\n:func:`peri.state.States.residuals`", "id": "f5773:c4:m3"}
{"signature": "def get_termination_stats(self, get_cos=True):", "body": "delta_vals = self._last_vals - self.param_vals<EOL>delta_err = self._last_error - self.error<EOL>frac_err = delta_err / self.error<EOL>to_return = {'<STR_LIT>':delta_vals, '<STR_LIT>':delta_err,<EOL>'<STR_LIT>':<NUM_LIT:1>*self._num_iter, '<STR_LIT>':frac_err,<EOL>'<STR_LIT:error>':self.error, '<STR_LIT>':self._exp_err}<EOL>if get_cos:<EOL><INDENT>model_cosine = self.calc_model_cosine()<EOL>to_return.update({'<STR_LIT>':model_cosine})<EOL><DEDENT>return to_return<EOL>", "docstring": "Returns a dict of termination statistics\n\nParameters\n----------\n    get_cos : Bool, optional\n        Whether or not to calcualte the cosine of the residuals\n        with the tangent plane of the model using the current J.\n        The calculation may take some time. Default is True\n\nReturns\n-------\n    dict\n        Has keys\n            delta_vals  : The last change in parameter values.\n            delta_err   : The last change in the error.\n            exp_err     : The expected (last) change in the error.\n            frac_err    : The fractional change in the error.\n            num_iter    : The number of iterations completed.\n            error       : The current error.", "id": "f5773:c0:m19"}
{"signature": "def update_select_J(self, blk):", "body": "self.update_function(self.param_vals)<EOL>params = np.array(self.param_names)[blk].tolist()<EOL>blk_J = -self.state.gradmodel(params=params, inds=self._inds, flat=False)<EOL>self.J[blk] = blk_J<EOL>self.JTJ = np.dot(self.J, self.J.T)<EOL>if np.any(np.isnan(self.J)) or np.any(np.isnan(self.JTJ)):<EOL><INDENT>raise FloatingPointError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Updates J only for certain parameters, described by the boolean\nmask blk.", "id": "f5773:c5:m6"}
{"signature": "def _check_groups(s, groups):", "body": "ans = []<EOL>for g in groups:<EOL><INDENT>ans.extend(g)<EOL><DEDENT>if np.unique(ans).size != np.size(ans):<EOL><INDENT>return False<EOL><DEDENT>elif np.unique(ans).size != s.obj_get_positions().shape[<NUM_LIT:0>]:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return (np.arange(s.obj_get_radii().size) == np.sort(ans)).all()<EOL><DEDENT>", "docstring": "Ensures that all particles are included in exactly 1 group", "id": "f5773:m7"}
{"signature": "def reset(self, **kwargs):", "body": "self.aug_state.reset()<EOL>super(LMAugmentedState, self).reset(**kwargs)<EOL>", "docstring": "Resets the aug_state and the LMEngine", "id": "f5773:c9:m4"}
{"signature": "def do_run_2(self):", "body": "self._do_run(mode='<STR_LIT:2>')<EOL>", "docstring": "Calls LMParticles.do_run_2 for each group of particles.", "id": "f5773:c7:m7"}
{"signature": "def _run1(self):", "body": "if self.check_update_J():<EOL><INDENT>self.update_J()<EOL><DEDENT>else:<EOL><INDENT>if self.check_Broyden_J():<EOL><INDENT>self.update_Broyden_J()<EOL><DEDENT>if self.check_update_eig_J():<EOL><INDENT>self.update_eig_J()<EOL><DEDENT><DEDENT>delta_vals = self.find_LM_updates(self.calc_grad())<EOL>er1 = self.update_function(self.param_vals + delta_vals)<EOL>good_step = (find_best_step([self.error, er1]) == <NUM_LIT:1>)<EOL>if not good_step:<EOL><INDENT>er0 = self.update_function(self.param_vals)<EOL>if np.abs(er0 -self.error)/er0 > <NUM_LIT>:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>CLOG.debug('<STR_LIT>')<EOL>CLOG.debug('<STR_LIT>' % (self.error, er1))<EOL>grad = self.calc_grad()<EOL>for _try in range(self._max_inner_loop):<EOL><INDENT>self.increase_damping()<EOL>delta_vals = self.find_LM_updates(grad)<EOL>er1 = self.update_function(self.param_vals + delta_vals)<EOL>good_step = (find_best_step([self.error, er1]) == <NUM_LIT:1>)<EOL>if good_step:<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>er0 = self.update_function(self.param_vals)<EOL>CLOG.warn('<STR_LIT>')<EOL>if np.abs(er0 -self.error)/er0 > <NUM_LIT>:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>if good_step:<EOL><INDENT>self._last_error = self.error<EOL>self.error = er1<EOL>CLOG.debug('<STR_LIT>' % (self._last_error, self.error))<EOL>self.update_param_vals(delta_vals, incremental=True)<EOL>self.decrease_damping()<EOL><DEDENT>", "docstring": "workhorse for do_run_1", "id": "f5773:c0:m7"}
{"signature": "def update_function(self, param_vals):", "body": "dp = np.zeros(self.p0.size)<EOL>for a in range(param_vals.size):<EOL><INDENT>dp += param_vals[a] * self.directions[a]<EOL><DEDENT>self.state.update(self.state.params, self.p0 + dp)<EOL>self.param_vals[:] = param_vals<EOL>return None<EOL>", "docstring": "Updates with param_vals[i] = distance from self.p0 along self.direction[i].", "id": "f5773:c4:m1"}
{"signature": "def do_internal_run(self):", "body": "if not self.save_J:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if not np.all(self._has_saved_J):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>self._do_run(mode='<STR_LIT>')<EOL>", "docstring": "Calls LMParticles.do_internal_run for each group of particles.", "id": "f5773:c7:m8"}
{"signature": "def check_update_J(self):", "body": "self._J_update_counter += <NUM_LIT:1><EOL>update = self._J_update_counter >= self.update_J_frequency<EOL>return update & (not self._fresh_JTJ)<EOL>", "docstring": "Checks if the full J should be updated.\n\nRight now, just updates after update_J_frequency loops", "id": "f5773:c0:m22"}
{"signature": "def reset(self):", "body": "inds = list(range(self.state.obj_get_positions().shape[<NUM_LIT:0>]))<EOL>self._rad_nms = self.state.param_particle_rad(inds)<EOL>self._pos_nms = self.state.param_particle_pos(inds)<EOL>self._initial_rad = np.copy(self.state.state[self._rad_nms])<EOL>self._initial_pos = np.copy(self.state.state[self._pos_nms]).reshape((-<NUM_LIT:1>,<NUM_LIT:3>))<EOL>self.param_vals[self.rscale_mask] = <NUM_LIT:0><EOL>", "docstring": "Resets the initial radii used for updating the particles. Call\nif any of the particle radii or positions have been changed\nexternal to the augmented state.", "id": "f5773:c8:m1"}
{"signature": "def update_eig_J(self):", "body": "CLOG.debug('<STR_LIT>')<EOL>vls, vcs = np.linalg.eigh(self.JTJ)<EOL>res0 = self.calc_residuals()<EOL>for a in range(min([self.num_eig_dirs, vls.size])):<EOL><INDENT>stif_dir = vcs[-(a+<NUM_LIT:1>)] <EOL>dl = self.eig_dl <EOL>_ = self.update_function(self.param_vals + dl*stif_dir)<EOL>res1 = self.calc_residuals()<EOL>grad_stif = (res1-res0)/dl<EOL>self._rank_1_J_update(stif_dir, grad_stif)<EOL><DEDENT>self.JTJ = np.dot(self.J, self.J.T)<EOL>_ = self.update_function(self.param_vals)<EOL>", "docstring": "Execute an eigen update of J", "id": "f5773:c0:m29"}
{"signature": "def get_rand_Japprox(s, params, num_inds=<NUM_LIT:1000>, include_cost=False, **kwargs):", "body": "start_time = time.time()<EOL>tot_pix = s.residuals.size<EOL>if num_inds < tot_pix:<EOL><INDENT>inds = np.random.choice(tot_pix, size=num_inds, replace=False)<EOL>slicer = None<EOL>return_inds = np.sort(inds)<EOL><DEDENT>else:<EOL><INDENT>inds = None<EOL>return_inds = slice(<NUM_LIT:0>, None)<EOL>slicer = [slice(<NUM_LIT:0>, None)]*len(s.residuals.shape)<EOL><DEDENT>if include_cost:<EOL><INDENT>Jact, ge = s.gradmodel_e(params=params, inds=inds, slicer=slicer,flat=False,<EOL>**kwargs)<EOL>Jact *= -<NUM_LIT:1><EOL>J = [Jact, ge]<EOL><DEDENT>else:<EOL><INDENT>J = -s.gradmodel(params=params, inds=inds, slicer=slicer, flat=False,<EOL>**kwargs)<EOL><DEDENT>CLOG.debug('<STR_LIT>' % (time.time()-start_time))<EOL>return J, return_inds<EOL>", "docstring": "Calculates a random approximation to J by returning J only at a\nset of random pixel/voxel locations.\n\nParameters\n----------\n    s : :class:`peri.states.State`\n        The state to calculate J for.\n    params : List\n        The list of parameter names to calculate the gradient of.\n    num_inds : Int, optional.\n        The number of pix/voxels at which to calculate the random\n        approximation to J. Default is 1000.\n    include_cost : Bool, optional\n        Set to True to append a finite-difference measure of the full\n        cost gradient onto the returned J.\n\nOther Parameters\n----------------\n    All kwargs parameters get passed to s.gradmodel only.\n\nReturns\n-------\n    J : numpy.ndarray\n        [d, num_inds] array of J, at the given indices.\n\n    return_inds : numpy.ndarray or slice\n        [num_inds] element array or slice(0, None) of the model\n        indices at which J was evaluated.", "id": "f5773:m0"}
{"signature": "def fit_comp(new_comp, old_comp, **kwargs):", "body": "<EOL>new_cat = new_comp.category<EOL>new_comp.category = '<STR_LIT>'<EOL>fake_s = states.ImageState(Image(old_comp.get().copy()), [new_comp], pad=<NUM_LIT:0>,<EOL>mdl=mdl.SmoothFieldModel())<EOL>do_levmarq(fake_s, new_comp.params, **kwargs)<EOL>new_comp.category = new_cat<EOL>", "docstring": "Fits a new component to an old component\n\nCalls do_levmarq to match the .get() fields of the two objects. The\nparameters of new_comp are modified in place.\n\nParameters\n----------\nnew_comp : :class:`peri.comps.comp`\n    The new object, whose parameters to update to fit the field of\n    `old_comp`. Must have a .get() attribute which returns an ndarray\nold_comp : peri.comp\n    The old ilm to match to.\n\nOther Parameters\n----------------\n    Any keyword arguments to be passed to the optimizer LMGlobals\n    through do_levmarq.\n\nSee Also\n--------\ndo_levmarq : Levenberg-Marquardt minimization using a random subset\n    of the image pixels.", "id": "f5773:m17"}
{"signature": "def get_residuals_update_tile(st, padded_tile):", "body": "inner_tile = st.ishape.intersection([st.ishape, padded_tile])<EOL>return inner_tile.translate(-st.pad)<EOL>", "docstring": "Translates a tile in the padded image to the unpadded image.\n\nGiven a state and a tile that corresponds to the padded image, returns\na tile that corresponds to the the corresponding pixels of the difference\nimage\n\nParameters\n----------\n    st : :class:`peri.states.State`\n        The state\n    padded_tile : :class:`peri.util.Tile`\n        The tile in the padded image.\n\nReturns\n-------\n    :class:`peri.util.Tile`\n        The tile corresponding to padded_tile in the unpadded image.", "id": "f5773:m9"}
{"signature": "def remove_bad_particles(st, min_rad='<STR_LIT>', max_rad='<STR_LIT>', min_edge_dist=<NUM_LIT>,<EOL>check_rad_cutoff=[<NUM_LIT>, <NUM_LIT:15>], check_outside_im=True,<EOL>tries=<NUM_LIT:50>, im_change_frac=<NUM_LIT>, **kwargs):", "body": "is_near_im_edge = lambda pos, pad: (((pos + st.pad) < pad) | (pos ><EOL>np.array(st.ishape.shape) + st.pad - pad)).any(axis=<NUM_LIT:1>)<EOL>removed = <NUM_LIT:0><EOL>attempts = <NUM_LIT:0><EOL>n_tot_part = st.obj_get_positions().shape[<NUM_LIT:0>]<EOL>q10 = int(<NUM_LIT:0.1> * n_tot_part)  <EOL>r_sig = np.sort(st.obj_get_radii())[q10:-q10].std()<EOL>r_med = np.median(st.obj_get_radii())<EOL>if max_rad == '<STR_LIT>':<EOL><INDENT>max_rad = r_med + <NUM_LIT:15>*r_sig<EOL><DEDENT>if min_rad == '<STR_LIT>':<EOL><INDENT>min_rad = r_med - <NUM_LIT>*r_sig<EOL><DEDENT>if check_rad_cutoff == '<STR_LIT>':<EOL><INDENT>check_rad_cutoff = [r_med - <NUM_LIT>*r_sig, r_med + <NUM_LIT>*r_sig]<EOL><DEDENT>rad_wrong_size = np.nonzero(<EOL>(st.obj_get_radii() < min_rad) | (st.obj_get_radii() > max_rad))[<NUM_LIT:0>]<EOL>near_im_edge = np.nonzero(is_near_im_edge(st.obj_get_positions(),<EOL>min_edge_dist - st.pad))[<NUM_LIT:0>]<EOL>delete_inds = np.unique(np.append(rad_wrong_size, near_im_edge)).tolist()<EOL>delete_poses = st.obj_get_positions()[delete_inds].tolist()<EOL>message = ('<STR_LIT:->'*<NUM_LIT> + '<STR_LIT>' + '<STR_LIT:->'*<NUM_LIT> +<EOL>'<STR_LIT>')<EOL>with log.noformat():<EOL><INDENT>CLOG.info(message)<EOL><DEDENT>for pos in delete_poses:<EOL><INDENT>ind = st.obj_closest_particle(pos)<EOL>old_err = st.error<EOL>p, r = st.obj_remove_particle(ind)<EOL>p = p[<NUM_LIT:0>]<EOL>r = r[<NUM_LIT:0>]<EOL>part_msg = '<STR_LIT>' % (<EOL>tuple(p) + (r,) + (old_err, st.error))<EOL>with log.noformat():<EOL><INDENT>CLOG.info(part_msg)<EOL><DEDENT>removed += <NUM_LIT:1><EOL><DEDENT>check_rad_inds = np.nonzero((st.obj_get_radii() < check_rad_cutoff[<NUM_LIT:0>]) |<EOL>(st.obj_get_radii() > check_rad_cutoff[<NUM_LIT:1>]))[<NUM_LIT:0>]<EOL>if check_outside_im:<EOL><INDENT>check_edge_inds = np.nonzero(<EOL>is_near_im_edge(st.obj_get_positions(), st.pad))[<NUM_LIT:0>]<EOL>check_inds = np.unique(np.append(check_rad_inds, check_edge_inds))<EOL><DEDENT>else:<EOL><INDENT>check_inds = check_rad_inds<EOL><DEDENT>check_inds = check_inds[np.argsort(st.obj_get_radii()[check_inds])]<EOL>tries = np.min([tries, check_inds.size])<EOL>check_poses = st.obj_get_positions()[check_inds[:tries]].copy()<EOL>for pos in check_poses:<EOL><INDENT>old_err = st.error<EOL>ind = st.obj_closest_particle(pos)<EOL>killed, p, r = check_remove_particle(<EOL>st, ind, im_change_frac=im_change_frac)<EOL>if killed:<EOL><INDENT>removed += <NUM_LIT:1><EOL>check_inds[check_inds > ind] -= <NUM_LIT:1>  <EOL>delete_poses.append(pos)<EOL>part_msg = '<STR_LIT>' % (<EOL>p + r + (old_err, st.error))<EOL>with log.noformat():<EOL><INDENT>CLOG.info(part_msg)<EOL><DEDENT><DEDENT><DEDENT>return removed, delete_poses<EOL>", "docstring": "Removes improperly-featured particles from the state, based on a\ncombination of particle size and the change in error on removal.\n\nParameters\n-----------\nst : :class:`peri.states.State`\n    The state to remove bad particles from.\nmin_rad : Float, optional\n    All particles with radius below min_rad are automatically deleted.\n    Set to 'calc' to make it the median rad - 25* radius std.\n    Default is 'calc'.\n\nmax_rad : Float, optional\n    All particles with radius above max_rad are automatically deleted.\n    Set to 'calc' to make it the median rad + 15* radius std.\n    Default is 'calc'.\n\nmin_edge_dist : Float, optional\n    All particles within min_edge_dist of the (padded) image\n    edges are automatically deleted. Default is 2.0\n\ncheck_rad_cutoff : 2-element list of floats, optional\n    Particles with radii < check_rad_cutoff[0] or > check_rad_cutoff[1]\n    are checked if they should be deleted. Set to 'calc' to make it the\n    median rad +- 3.5 * radius std. Default is [3.5, 15].\n\ncheck_outside_im : Bool, optional\n    If True, checks if particles located outside the unpadded image\n    should be deleted. Default is True.\n\ntries : Int, optional\n    The maximum number of particles with radii < check_rad_cutoff\n    to try to remove. Checks in increasing order of radius size.\n    Default is 50.\n\nim_change_frac : Float, , optional\n    Number between 0 and 1. If removing a particle decreases the\n    error by less than im_change_frac*the change in the image, then\n    the particle is deleted. Default is 0.2\n\nReturns\n-------\nremoved: Int\n    The cumulative number of particles removed.", "id": "f5774:m6"}
{"signature": "def identify_misfeatured_regions(st, filter_size=<NUM_LIT:5>, sigma_cutoff=<NUM_LIT>):", "body": "<EOL>r = st.residuals<EOL>weights = np.ones([filter_size]*len(r.shape), dtype='<STR_LIT:float>')<EOL>weights /= weights.sum()<EOL>f = np.sqrt(nd.filters.convolve(r*r, weights, mode='<STR_LIT>'))<EOL>if sigma_cutoff == '<STR_LIT>':<EOL><INDENT>max_ok = initializers.otsu_threshold(f)<EOL><DEDENT>else:<EOL><INDENT>max_ok = f.mean() + sigma_cutoff * f.std()<EOL><DEDENT>bad = f > max_ok<EOL>labels, n = nd.measurements.label(bad)<EOL>inds = []<EOL>for i in range(<NUM_LIT:1>, n+<NUM_LIT:1>):<EOL><INDENT>inds.append(np.nonzero(labels == i))<EOL><DEDENT>tiles = [Tile(np.min(ind, axis=<NUM_LIT:1>), np.max(ind, axis=<NUM_LIT:1>)+<NUM_LIT:1>) for ind in inds]<EOL>volumes = [t.volume for t in tiles]<EOL>return [tiles[i] for i in np.argsort(volumes)[::-<NUM_LIT:1>]]<EOL>", "docstring": "Identifies regions of missing/misfeatured particles based on the\nresiduals' local deviation from uniform Gaussian noise.\n\nParameters\n----------\nst : :class:`peri.states.State`\n    The state in which to identify mis-featured regions.\n\nfilter_size : Int, best if odd.\n    The size of the filter for calculating the local standard deviation;\n    should approximately be the size of a poorly featured region in\n    each dimension. Default is 5.\n\nsigma_cutoff : Float or `otsu`, optional\n    The max allowed deviation of the residuals from what is expected,\n    in units of the residuals' standard deviation. Lower means more\n    sensitive, higher = less sensitive. Default is 8.0, i.e. one pixel\n    out of every 7*10^11 is mis-identified randomly. In practice the\n    noise is not Gaussian so there are still some regions mis-identified\n    as improperly featured. Set to ```otsu``` to calculate this number\n    based on an automatic Otsu threshold.\n\nReturns\n-------\ntiles : List of :class:`peri.util.Tile`\n    Each tile is the smallest bounding tile that contains an improperly\n    featured region. The list is sorted by the tile's volume.\n\nNotes\n-----\nAlgorithm is\n1.  Create a field of the local standard deviation, as measured over\n    a hypercube of size filter_size.\n2.  Find the maximum reasonable value of the field. [The field should\n    be a random variable with mean of r.std() and standard deviation\n    of ~r.std() / sqrt(N), where r is the residuals and N is the\n    number of pixels in the hypercube.]\n3.  Label & Identify the misfeatured regions as portions where\n    the local error is too large.\n4.  Parse the misfeatured regions into tiles.\n5.  Return the sorted tiles.\nThe Otsu option to calculate the sigma cutoff works well for images\nthat actually contain missing particles, returning a number similar\nto one calculated with a sigma cutoff. However, if the image is\nwell-featured with Gaussian residuals, then the Otsu threshold\nsplits the Gaussian down the middle instead of at the tails, which\nis very bad. So use with caution.", "id": "f5774:m8"}
{"signature": "def add_missing_particles(st, rad='<STR_LIT>', tries=<NUM_LIT:50>, **kwargs):", "body": "if rad == '<STR_LIT>':<EOL><INDENT>rad = guess_add_radii(st)<EOL><DEDENT>guess, npart = feature_guess(st, rad, **kwargs)<EOL>tries = np.min([tries, npart])<EOL>accepts, new_poses = check_add_particles(<EOL>st, guess[:tries], rad=rad, **kwargs)<EOL>return accepts, new_poses<EOL>", "docstring": "Attempts to add missing particles to the state.\n\nOperates by:\n(1) featuring the difference image using feature_guess,\n(2) attempting to add the featured positions using check_add_particles.\n\nParameters\n----------\nst : :class:`peri.states.State`\n    The state to check adding particles to.\nrad : Float or 'calc', optional\n    The radius of the newly-added particles and of the feature size for\n    featuring. Default is 'calc', which uses the median of the state's\n    current radii.\ntries : Int, optional\n    How many particles to attempt to add. Only tries to add the first\n    ``tries`` particles, in order of mass. Default is 50.\n\nOther Parameters\n----------------\ninvert : Bool, optional\n    Whether to invert the image. Default is ``True``, i.e. dark particles\nminmass : Float or None, optionals\n    The minimum mass/masscut of a particle. Default is ``None``=calcualted\n    by ``feature_guess``.\nuse_tp : Bool, optional\n    Whether to use trackpy in feature_guess. Default is False, since\n    trackpy cuts out particles at the edge.\n\ndo_opt : Bool, optional\n    Whether to optimize the particle position before checking if it\n    should be kept. Default is True (optimizes position).\nim_change_frac : Float, optional\n    How good the change in error needs to be relative to the change\n    in the difference image. Default is 0.2; i.e. if the error does\n    not decrease by 20% of the change in the difference image, do\n    not add the particle.\n\nmin_derr : Float or '3sig', optional\n    The minimal improvement in error to add a particle. Default\n    is ``'3sig' = 3*st.sigma``.\n\nReturns\n-------\naccepts : Int\n    The number of added particles\nnew_poses : [N,3] list\n    List of the positions of the added particles. If ``do_opt==True``,\n    then these positions will differ from the input 'guess'.", "id": "f5774:m5"}
{"signature": "def add_subtract(st, max_iter=<NUM_LIT:7>, max_npart='<STR_LIT>', max_mem=<NUM_LIT>,<EOL>always_check_remove=False, **kwargs):", "body": "if max_npart == '<STR_LIT>':<EOL><INDENT>max_npart = <NUM_LIT> * st.obj_get_positions().shape[<NUM_LIT:0>]<EOL><DEDENT>total_changed = <NUM_LIT:0><EOL>_change_since_opt = <NUM_LIT:0><EOL>removed_poses = []<EOL>added_poses0 = []<EOL>added_poses = []<EOL>nr = <NUM_LIT:1>  <EOL>for _ in range(max_iter):<EOL><INDENT>if (nr != <NUM_LIT:0>) or (always_check_remove):<EOL><INDENT>nr, rposes = remove_bad_particles(st, **kwargs)<EOL><DEDENT>na, aposes = add_missing_particles(st, **kwargs)<EOL>current_changed = na + nr<EOL>removed_poses.extend(rposes)<EOL>added_poses0.extend(aposes)<EOL>total_changed += current_changed<EOL>_change_since_opt += current_changed<EOL>if current_changed == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>elif _change_since_opt > max_npart:<EOL><INDENT>_change_since_opt *= <NUM_LIT:0><EOL>CLOG.info('<STR_LIT>')<EOL>opt.do_levmarq(st, opt.name_globals(st, remove_params=st.get(<EOL>'<STR_LIT>').params), max_iter=<NUM_LIT:1>, run_length=<NUM_LIT:4>, num_eig_dirs=<NUM_LIT:3>,<EOL>max_mem=max_mem, eig_update_frequency=<NUM_LIT:2>, rz_order=<NUM_LIT:0>,<EOL>use_accel=True)<EOL>CLOG.info('<STR_LIT>'.format(st.error))<EOL><DEDENT><DEDENT>for p in added_poses0:<EOL><INDENT>i = st.obj_closest_particle(p)<EOL>opt.do_levmarq_particles(st, np.array([i]), max_iter=<NUM_LIT:2>, damping=<NUM_LIT>)<EOL>added_poses.append(st.obj_get_positions()[i])<EOL><DEDENT>return total_changed, np.array(removed_poses), np.array(added_poses)<EOL>", "docstring": "Automatically adds and subtracts missing & extra particles.\n\nOperates by removing bad particles then adding missing particles on\nrepeat, until either no particles are added/removed or after `max_iter`\nattempts.\n\nParameters\n----------\nst: :class:`peri.states.State`\n    The state to add and subtract particles to.\nmax_iter : Int, optional\n    The maximum number of add-subtract loops to use. Default is 7.\n    Terminates after either max_iter loops or when nothing has changed.\nmax_npart : Int or 'calc', optional\n    The maximum number of particles to add before optimizing the non-psf\n    globals. Default is ``'calc'``, which uses 5% of the initial number\n    of particles.\nmax_mem : Int, optional\n    The maximum memory to use for optimization after adding max_npart\n    particles. Default is 2e8.\nalways_check_remove : Bool, optional\n    Set to True to always check whether to remove particles. If ``False``,\n    only checks for removal while particles were removed on the previous\n    attempt. Default is False.\n\nOther Parameters\n----------------\ninvert : Bool, optional\n    ``True`` if the particles are dark on a bright background, ``False``\n    if they are bright on a dark background. Default is ``True``.\nmin_rad : Float, optional\n    Particles with radius below ``min_rad`` are automatically deleted.\n    Default is ``'calc'`` = median rad - 25* radius std.\nmax_rad : Float, optional\n    Particles with radius above ``max_rad`` are automatically deleted.\n    Default is ``'calc'`` = median rad + 15* radius std, but you should\n    change this for your particle sizes.\n\nmin_edge_dist : Float, optional\n    Particles closer to the edge of the padded image than this are\n    automatically deleted. Default is 2.0.\ncheck_rad_cutoff : 2-element float list.\n    Particles with ``radii < check_rad_cutoff[0]`` or ``> check...[1]``\n    are checked if they should be deleted (not automatic). Default is\n    ``[3.5, 15]``.\ncheck_outside_im : Bool, optional\n    Set to True to check whether to delete particles whose positions are\n    outside the un-padded image.\n\nrad : Float, optional\n    The initial radius for added particles; added particles radii are\n    not fit until the end of ``add_subtract``. Default is ``'calc'``,\n    which uses the median radii of active particles.\n\ntries : Int, optional\n    The number of particles to attempt to remove or add, per iteration.\n    Default is 50.\n\nim_change_frac : Float, optional\n    How good the change in error needs to be relative to the change in\n    the difference image. Default is 0.2; i.e. if the error does not\n    decrease by 20% of the change in the difference image, do not add\n    the particle.\n\nmin_derr : Float, optional\n    The minimum change in the state's error to keep a particle in the\n    image. Default is ``'3sig'`` which uses ``3*st.sigma``.\n\ndo_opt : Bool, optional\n    Set to False to avoid optimizing particle positions after adding.\nminmass : Float, optional\n    The minimum mass for a particle to be identified as a feature,\n    as used by trackpy. Defaults to a decent guess.\n\nuse_tp : Bool, optional\n    Set to True to use trackpy to find missing particles inside the\n    image. Not recommended since trackpy deliberately cuts out particles\n    at the edge of the image. Default is ``False``.\n\nReturns\n-------\ntotal_changed : Int\n    The total number of adds and subtracts done on the data. Not the\n    same as ``changed_inds.size`` since the same particle or particle\n    index can be added/subtracted multiple times.\nadded_positions : [N_added,3] numpy.ndarray\n    The positions of particles that have been added at any point in the\n    add-subtract cycle.\nremoved_positions : [N_added,3] numpy.ndarray\n    The positions of particles that have been removed at any point in\n    the add-subtract cycle.\n\nNotes\n------\nOccasionally after the intial featuring a cluster of particles is\nfeatured as 1 big particle. To fix these mistakes, it helps to set\nmax_rad to a physical value. This removes the big particle and allows\nit to be re-featured by (several passes of) the adds.\n\nThe added/removed positions returned are whether or not the position\nhas been added or removed ever. It's possible that a position is\nadded, then removed during a later iteration.", "id": "f5774:m7"}
{"signature": "def add_subtract_locally(st, region_depth=<NUM_LIT:3>, filter_size=<NUM_LIT:5>, sigma_cutoff=<NUM_LIT:8>,<EOL>**kwargs):", "body": "<EOL>tiles = identify_misfeatured_regions(<EOL>st, filter_size=filter_size, sigma_cutoff=sigma_cutoff)<EOL>n_empty = <NUM_LIT:0><EOL>n_added = <NUM_LIT:0><EOL>new_poses = []<EOL>for t in tiles:<EOL><INDENT>curn, curinds = add_subtract_misfeatured_tile(st, t, **kwargs)<EOL>if curn == <NUM_LIT:0>:<EOL><INDENT>n_empty += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>n_added += curn<EOL>new_poses.extend(st.obj_get_positions()[curinds])<EOL><DEDENT>if n_empty > region_depth:<EOL><INDENT>break  <EOL><DEDENT><DEDENT>else:  <EOL><INDENT>pass<EOL><DEDENT>return n_added, new_poses<EOL>", "docstring": "Automatically adds and subtracts missing particles based on local\nregions of poor fit.\n\nCalls identify_misfeatured_regions to identify regions, then\nadd_subtract_misfeatured_tile on the tiles in order of size until\nregion_depth tiles have been checked without adding any particles.\n\nParameters\n----------\nst: :class:`peri.states.State`\n    The state to add and subtract particles to.\nregion_depth : Int\n    The minimum amount of regions to try; the algorithm terminates if\n    region_depth regions have been tried without adding particles.\n\nOther Parameters\n----------------\nfilter_size : Int, optional\n    The size of the filter for calculating the local standard deviation;\n    should approximately be the size of a poorly featured region in each\n    dimension. Best if odd. Default is 5.\nsigma_cutoff : Float, optional\n    The max allowed deviation of the residuals from what is expected,\n    in units of the residuals' standard deviation. Lower means more\n    sensitive, higher = less sensitive. Default is 8.0, i.e. one pixel\n    out of every ``7*10^11`` is mis-identified randomly. In practice the\n    noise is not Gaussian so there are still some regions mis-\n    identified as improperly featured.\nrad : Float or 'calc', optional\n    The initial radius for added particles; added particles radii are\n    not fit until the end of add_subtract. Default is ``'calc'``, which\n    uses the median radii of active particles.\nmax_iter : Int, optional\n    The maximum number of loops for attempted adds at one tile location.\n    Default is 3.\ninvert : Bool, optional\n    Whether to invert the image for feature_guess. Default is ``True``,\n    i.e. dark particles on bright background.\nmax_allowed_remove : Int, optional\n    The maximum number of particles to remove. If the misfeatured tile\n    contains more than this many particles, raises an error. If it\n    contains more than half as many particles, throws a warning. If more\n    than this many particles are added, they are optimized in blocks of\n    ``max_allowed_remove``. Default is 20.\nim_change_frac : Float, between 0 and 1.\n    If adding or removing a particle decreases the error less than\n    ``im_change_frac *`` the change in the image, the particle is deleted.\n    Default is 0.2.\nmin_derr : Float\n    The minimum change in the state's error to keep a particle in the\n    image. Default is ``'3sig'`` which uses ``3*st.sigma``.\ndo_opt : Bool, optional\n    Set to False to avoid optimizing particle positions after adding\n    them. Default is True\nminmass : Float, optional\n    The minimum mass for a particle to be identified as a feature, as\n    used by trackpy. Defaults to a decent guess.\nuse_tp : Bool, optional\n    Set to True to use trackpy to find missing particles inside the\n    image. Not recommended since trackpy deliberately cuts out\n    particles at the edge of the image. Default is False.\nmax_allowed_remove : Int, optional\n    The maximum number of particles to remove. If the misfeatured tile\n    contains more than this many particles, raises an error. If it\n    contains more than half as many particles, throws a warning. If more\n    than this many particles are added, they are optimized in blocks of\n    ``max_allowed_remove``. Default is 20.\n\nReturns\n-------\nn_added : Int\n    The change in the number of particles; i.e the number added - number\n    removed.\nnew_poses : List\n    [N,3] element list of the added particle positions.\n\nNotes\n-----\nAlgorithm Description\n\n1. Identify mis-featured regions by how much the local residuals\n   deviate from the global residuals, as measured by the standard\n   deviation of both.\n2. Loop over each of those regions, and:\n\n   a. Remove every particle in the current region.\n   b. Try to add particles in the current region until no more\n      can be added while adequately decreasing the error.\n   c. Terminate if at least region_depth regions have been\n      checked without successfully adding a particle.\n\nBecause this algorithm is more judicious about chooosing regions to\ncheck, and more aggressive about removing particles in those regions,\nit runs faster and does a better job than the (global) add_subtract.\nHowever, this function usually does not work better as an initial add-\nsubtract on an image, since (1) it doesn't check for removing small/big\nparticles per se, and (2) when the poorly-featured regions of the image\nare large or when the fit is bad, it will remove essentially all of the\nparticles, taking a long time. As a result, it's usually best to do a\nnormal add_subtract first and using this function for tough missing or\ndouble-featured particles.", "id": "f5774:m10"}
{"signature": "def optimize_particle(state, index, method='<STR_LIT>', doradius=True):", "body": "blocks = state.param_particle(index)<EOL>if not doradius:<EOL><INDENT>blocks = blocks[:-<NUM_LIT:1>]<EOL><DEDENT>g = state.gradloglikelihood(blocks=blocks)<EOL>if method == '<STR_LIT>':<EOL><INDENT>h = state.jtj(blocks=blocks)<EOL><DEDENT>if method == '<STR_LIT>':<EOL><INDENT>h = state.hessloglikelihood(blocks=blocks)<EOL><DEDENT>step = np.linalg.solve(h, g)<EOL>h = np.zeros_like(g)<EOL>for i in range(len(g)):<EOL><INDENT>state.update(blocks[i], state.state[blocks[i]] - step[i])<EOL><DEDENT>return g,h<EOL>", "docstring": "Methods available are\n    gn : Gauss-Newton with JTJ (recommended)\n    nr : Newton-Rhaphson with hessian\n\nif doradius, also optimize the radius.", "id": "f5775:m0"}
{"signature": "def rosenbrock_gendd(xd, A=<NUM_LIT:10>, order=<NUM_LIT:3>):", "body": "xp = xd[<NUM_LIT:1>:]<EOL>xi = xd[:-<NUM_LIT:1>]<EOL>r1 = A*(xp - xi**order)<EOL>r2 = <NUM_LIT:1> - xi<EOL>return np.append(r1, r2)<EOL>", "docstring": "A higher-dimensional modification of a generalized rosenbrock\nfunction, as a set of residuals (cost = sum(residuals**2))\n\nThe standard modified function is with data = zeros(N), with a\nglobal minimum at xd=ones(d). This function is a coupled model,\npolynomial in the first d-1 parameters. Its data-space dimension\n(2d-2) is greater than its model-space dimension (d) for d > 2.\n\nParameters\n----------\n    - xd : d-element list-like\n        The x,y parameters of the model.\n    - order : Int, optional\n        The order of the model nonlinearity. Default is 3\n\nReturns\n-------\n    (2d-2)-element list-like\n        The residuals of the model.\n\nNotes\n-----\n    Based on the multidimensional variation\n        f(x) = \\sum_{i=1}^{N-1} 100(x_{i+1} - x_i&2) + (1-x_i)^2\nThis gives a data space of dimension 2*(N-1) =2N-2 and a parameter\nspace of dimension N. According to wikipedia, the quadratic version\nhas 1 minimia for N=3 at (1,1,1), 2 minima for 4<=N<=7. See:\nhttps://en.wikipedia.org/wiki/Rosenbrock_function#Multidimensional_generalisations", "id": "f5776:m5"}
{"signature": "def himmelblau(xy):", "body": "x, y = xy<EOL>r1 = x*x + y<EOL>r2 = y*y + x<EOL>return np.array([r1, r2])<EOL>", "docstring": "Himmelblau's function, as a set of residuals (cost = sum(residuals**2))\n\nThe standard Himmelbau's function is with data as [11, 7], and four\nminimum at (3.0, 2.0), ~(-2.8, 3.1), ~(-3.8, -3.3), ~(3.6, -1.8).\nHimmelblau's function is a quadratic model in both x and y. Its data-\nspace dimension (2) is equal to its model-space dimension (2), so\nthere is only parameter-effect curvature.\n\nParameters\n----------\n    - xy : 2-element list-like\n        The x,y parameters of the model.\n\nReturns\n-------\n    2-element list-like\n        The residuals of the model.\nNotes\n------\nhttps://en.wikipedia.org/wiki/Himmelblau%27s_function", "id": "f5776:m0"}
{"signature": "def beale(xy):", "body": "x, y = xy<EOL>r1 = x - x*y<EOL>r2 = x - x*y*y<EOL>r3 = x - x*y*y*y<EOL>return np.array([r1, r2, r3])<EOL>", "docstring": "The Beale function, as a set of residuals (cost = sum(residuals**2))\n\nThe standard Beale's function is with data as [1.5, 2.25, 2.625],\nand has a global minima at (3, 0.5). Beale's function is a coupled\nmodel, linear in x and quartic in y. Its data-space dimension (3) is\ngreater than its model-space dimension (2).\n\nParameters\n----------\n    - xy : 2-element list-like\n        The x,y parameters of the model.\n\nReturns\n-------\n    3-element list-like\n        The residuals of the model.", "id": "f5776:m6"}
{"signature": "def booth(xy):", "body": "x, y = xy<EOL>r1 = x + <NUM_LIT:2>*y<EOL>r2 = y + <NUM_LIT:2>*x<EOL>return np.array([r1, r2])<EOL>", "docstring": "The Booth's function, as a set of residuals (cost = sum(residuals**2))\n\nThe standard Booth function is with data as [7, 5], and has a single\nglobal minimum at (1,3). It is a coupled linear model, with the\nparameter and data space both 2-dimensional.\n\nParameters\n----------\n    - xy : 2-element list-like\n        The x,y parameters of the model.\n\nReturns\n-------\n    2-element list-like\n        The residuals of the model.", "id": "f5776:m7"}
{"signature": "def simple_sphere(xy):", "body": "return xy<EOL>", "docstring": "A simple sphere, as a set of residuals (cost = sum(residuals**2))\n\nSimply returns the input parameters. A linear, uncoupled model.\n\nParameters\n----------\n    - xy : 2-element list-like\n        The x,y parameters of the model.\n\nReturns\n-------\n    - xy : 2-element list-like\n        The residuals, which are just xy", "id": "f5776:m1"}
{"signature": "def rosenbrock_gen(xy, A=<NUM_LIT:10>, order=<NUM_LIT:3>):", "body": "x, y = xy<EOL>r1 = x<EOL>r2 = A*(y-x**order / order)<EOL>return np.array([r1, r2])<EOL>", "docstring": "A generalized rosenbrock banana function, as a set of residuals\n(cost = sum(residuals**2))\n\nThe Rosenbrock function, generalized from a quadratic model to a\nhigher-order nonlinearity. The residuals are\n    r1 = xy[0]\n    r2 = xy[1] - xy[0]^n/n\nThe original function is with data = [1,0] and order=2, and has a\nsingle minimum at (x,y) = (1, 1/order). The model is coupled,\npolynomial in x and linear in y. Its data-space dimension (2) is\nequal to its model-space dimension (2), so there is only parameter-\neffect curvature. (See M Transtrum et al, PRE 2011)\n\nParameters\n----------\n    - xy : 2-element list-like\n        The x,y parameters of the model.\n    - order : Int, optional\n        The order of the model nonlinearity. Default is 3\n\nReturns\n-------\n    2-element list-like\n        The residuals of the model.", "id": "f5776:m3"}
{"signature": "def tile_overlap(inner, outer, norm=False):", "body": "div = <NUM_LIT:1.0>/inner.volume if norm else <NUM_LIT:1.0><EOL>return div*(inner.volume - util.Tile.intersection(inner, outer).volume)<EOL>", "docstring": "How much of inner is in outer by volume", "id": "f5777:m2"}
{"signature": "def separate_particles_into_groups(s, region_size=<NUM_LIT>, bounds=None):", "body": "imtile = (<EOL>s.oshape.translate(-s.pad) if bounds is None else<EOL>util.Tile(bounds[<NUM_LIT:0>], bounds[<NUM_LIT:1>])<EOL>)<EOL>region = util.Tile(region_size, dim=s.dim)<EOL>trange = np.ceil(imtile.shape.astype('<STR_LIT:float>') / region.shape)<EOL>translations = util.Tile(trange).coords(form='<STR_LIT>')<EOL>translations = translations.reshape(-<NUM_LIT:1>, translations.shape[-<NUM_LIT:1>])<EOL>groups = []<EOL>positions = s.obj_get_positions()<EOL>for v in translations:<EOL><INDENT>tmptile = region.copy().translate(region.shape * v - s.pad)<EOL>groups.append(find_particles_in_tile(positions, tmptile))<EOL><DEDENT>return [g for g in groups if len(g) > <NUM_LIT:0>]<EOL>", "docstring": "Given a state, returns a list of groups of particles. Each group of\nparticles are located near each other in the image. Every particle\nlocated in the desired region is contained in exactly 1 group.\n\nParameters:\n-----------\ns : state\n    The PERI state to find particles in.\n\nregion_size: int or list of ints\n    The size of the box. Groups particles into boxes of shape region_size.\n    If region_size is a scalar, the box is a cube of length region_size.\n    Default is 40.\n\nbounds: 2-element list-like of 3-element lists.\n    The sub-region of the image over which to look for particles.\n        bounds[0]: The lower-left  corner of the image region.\n        bounds[1]: The upper-right corner of the image region.\n    Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire\n    image size, i.e. the default places every particle in the image\n    somewhere in the groups.\n\nReturns:\n-----------\nparticle_groups: list\n    Each element of particle_groups is an int numpy.ndarray of the\n    group of nearby particles. Only contains groups with a nonzero\n    number of particles, so the elements don't necessarily correspond\n    to a given image region.", "id": "f5777:m11"}
{"signature": "def get_conf_filename():", "body": "default = os.path.join(os.path.expanduser(\"<STR_LIT>\"), \"<STR_LIT>\")<EOL>return os.environ.get('<STR_LIT>', default)<EOL>", "docstring": "The configuration file either lives in ~/.peri.json or is specified on the\ncommand line via the environment variables PERI_CONF_FILE", "id": "f5779:m0"}
{"signature": "def read_environment():", "body": "out = {}<EOL>for k,v in iteritems(os.environ):<EOL><INDENT>if transform(k) in default_conf:<EOL><INDENT>out[transform(k)] = v<EOL><DEDENT><DEDENT>return out<EOL>", "docstring": "Read all environment variables to see if they contain PERI", "id": "f5779:m2"}
{"signature": "def feature_from_pos_rad(statemaker, pos, rad, im_name=None, tile=None,<EOL>desc='<STR_LIT>', use_full_path=False, statemaker_kwargs={}, **kwargs):", "body": "if np.size(pos) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif np.shape(pos)[<NUM_LIT:1>] != <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>_,  im_name = _pick_state_im_name('<STR_LIT>', im_name, use_full_path=use_full_path)<EOL>im = util.RawImage(im_name, tile=tile)<EOL>s = statemaker(im, pos, rad, **statemaker_kwargs)<EOL>RLOG.info('<STR_LIT>')<EOL>if desc is not None:<EOL><INDENT>states.save(s, desc=desc+'<STR_LIT>')<EOL><DEDENT>optimize_from_initial(s, desc=desc, **kwargs)<EOL>return s<EOL>", "docstring": "Gets a completely-optimized state from an image and an initial guess of\nparticle positions and radii.\n\nThe state is periodically saved during optimization, with different\nfilename for different stages of the optimization. The user can select\nthe image.\n\nParameters\n----------\n    statemaker : Function\n        A statemaker function. Given arguments `im` (a\n        :class:`~peri.util.Image`), `pos` (numpy.ndarray), `rad` (ndarray),\n        and any additional `statemaker_kwargs`, must return a\n        :class:`~peri.states.ImageState`.  There is an example function in\n        scripts/statemaker_example.py\n    pos : [N,3] element numpy.ndarray.\n        The initial guess for the N particle positions.\n    rad : N element numpy.ndarray.\n        The initial guess for the N particle radii.\n    im_name : string or None, optional\n        The filename of the image to feature. Default is None, in which\n        the user selects the image.\n    tile : :class:`peri.util.Tile`, optional\n        A tile of the sub-region of the image to feature. Default is\n        None, i.e. entire image.\n    desc : String, optional\n        A description to be inserted in saved state. The save name will\n        be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is ''\n    use_full_path : Bool, optional\n        Set to True to use the full path name for the image. Default\n        is False.\n    statemaker_kwargs : Dict, optional\n        kwargs-like dict of any additional keyword arguments to pass to\n        the statemaker function. Default is ``{}``.\n\nOther Parameters\n----------------\n    max_mem : Numeric\n        The maximum additional memory to use for the optimizers, as\n        passed to optimize.burn. Default is 1e9.\n    min_rad : Float, optional\n        The minimum particle radius, as passed to addsubtract.add_subtract.\n        Particles with a fitted radius smaller than this are identified\n        as fake and removed. Default is 0.5 * actual_rad.\n    max_rad : Float, optional\n        The maximum particle radius, as passed to addsubtract.add_subtract.\n        Particles with a fitted radius larger than this are identified\n        as fake and removed. Default is 1.5 * actual_rad, however you\n        may find better results if you make this more stringent.\n    invert : {'guess', True, False}\n        Whether to invert the image for featuring, as passed to\n        addsubtract.add_subtract. Default is to guess from the\n        current state's particle positions.\n    rz_order : int, optional\n        If nonzero, the order of an additional augmented rscl(z)\n        parameter for optimization. Default is 0; i.e. no rscl(z)\n        optimization.\n    zscale : Float, optional\n        The zscale of the image. Default is 1.0\n\nReturns\n-------\n    s : :class:`peri.states.ImageState`\n        The optimized state.\n\nSee Also\n--------\n    get_initial_featuring   : Features an image from scratch, using\n        centroid methods as initial particle locations.\n\n    get_particle_featuring  : Using a previous state's globals and\n        positions as an initial guess, completely optimizes a state.\n\n    translate_featuring     : Use a previous state's globals and\n        centroids methods for an initial particle guess, completely\n        optimizes a state.\n\nNotes\n-----\nThe ``Other Parameters`` are passed to _optimize_from_centroid.\nProceeds by centroid-featuring the image for an initial guess of\nparticle positions, then optimizing the globals + positions until\ntermination as called in _optimize_from_centroid.", "id": "f5780:m2"}
{"signature": "def translate_featuring(state_name=None, im_name=None, use_full_path=False,<EOL>**kwargs):", "body": "state_name, im_name = _pick_state_im_name(<EOL>state_name, im_name, use_full_path=use_full_path)<EOL>s = states.load(state_name)<EOL>im = util.RawImage(im_name, tile=s.image.tile)<EOL>s.set_image(im)<EOL>_translate_particles(s, **kwargs)<EOL>return s<EOL>", "docstring": "Translates one optimized state into another image where the particles\nhave moved by a small amount (~1 particle radius).\n\nReturns a completely-optimized state. The user can interactively\nselects the initial state and the second raw image. The state is\nperiodically saved during optimization, with different filename for\ndifferent stages of the optimization.\n\nParameters\n----------\n    state_name : String or None, optional\n        The name of the initially-optimized state. Default is None,\n        which prompts the user to select the name interactively\n        through a Tk window.\n    im_name : String or None, optional\n        The name of the new image to optimize. Default is None,\n        which prompts the user to select the name interactively\n        through a Tk window.\n    use_full_path : Bool, optional\n        Set to True to use the full path of the state instead of\n        partial path names (e.g. /full/path/name/state.pkl vs\n        state.pkl). Default is False\n\nOther Parameters\n----------------\n    max_mem : Numeric\n        The maximum additional memory to use for the optimizers, as\n        passed to optimize.burn. Default is 1e9.\n    desc : String, optional\n        A description to be inserted in saved state. The save name will\n        be, e.g., '0.tif-peri-' + desc + 'initial-burn.pkl'. Default is ''\n    min_rad : Float, optional\n        The minimum particle radius, as passed to addsubtract.add_subtract.\n        Particles with a fitted radius smaller than this are identified\n        as fake and removed. Default is 0.5 * actual_rad.\n    max_rad : Float, optional\n        The maximum particle radius, as passed to addsubtract.add_subtract.\n        Particles with a fitted radius larger than this are identified\n        as fake and removed. Default is 1.5 * actual_rad, however you\n        may find better results if you make this more stringent.\n    invert : {True, False, 'guess'}\n        Whether to invert the image for featuring, as passed to\n        addsubtract.add_subtract. Default is to guess from the\n        state's current particles.\n    rz_order : int, optional\n        If nonzero, the order of an additional augmented rscl(z)\n        parameter for optimization. Default is 0; i.e. no rscl(z)\n        optimization.\n    do_polish : Bool, optional\n        Set to False to only optimize the particles and add-subtract.\n        Default is True, which then runs a polish afterwards.\n\nReturns\n-------\n    s : :class:`peri.states.ImageState`\n        The optimized state.\n\nSee Also\n--------\n    get_initial_featuring   : Features an image from scratch, using\n        centroid methods as initial particle locations.\n\n    feature_from_pos_rad    : Using a previous state's globals and\n        user-provided positions and radii as an initial guess,\n        completely optimizes a state.\n\n    get_particle_featuring  : Using a previous state's globals and\n        positions as an initial guess, completely optimizes a state.\n\nNotes\n-----\nThe ``Other Parameters`` are passed to _translate_particles.\nProceeds by:\n    1. Optimize particle positions only.\n    2. Optimize particle positions and radii only.\n    3. Add-subtract missing and bad particles.\n    4. If polish, optimize the illumination, background, and particles.\n    5. If polish, optimize everything.", "id": "f5780:m4"}
{"signature": "def optimize_from_initial(s, max_mem=<NUM_LIT>, invert='<STR_LIT>', desc='<STR_LIT>', rz_order=<NUM_LIT:3>,<EOL>min_rad=None, max_rad=None):", "body": "RLOG.info('<STR_LIT>')<EOL>if desc is not None:<EOL><INDENT>desc_burn = desc + '<STR_LIT>'<EOL>desc_polish = desc + '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>desc_burn, desc_polish = [None] * <NUM_LIT:2><EOL><DEDENT>opt.burn(s, mode='<STR_LIT>', n_loop=<NUM_LIT:3>, fractol=<NUM_LIT:0.1>, desc=desc_burn,<EOL>max_mem=max_mem, include_rad=False, dowarn=False)<EOL>opt.burn(s, mode='<STR_LIT>', n_loop=<NUM_LIT:3>, fractol=<NUM_LIT:0.1>, desc=desc_burn,<EOL>max_mem=max_mem, include_rad=True, dowarn=False)<EOL>RLOG.info('<STR_LIT>')<EOL>rad = s.obj_get_radii()<EOL>if min_rad is None:<EOL><INDENT>min_rad = <NUM_LIT:0.5> * np.median(rad)<EOL><DEDENT>if max_rad is None:<EOL><INDENT>max_rad = <NUM_LIT> * np.median(rad)<EOL><DEDENT>addsub.add_subtract(s, tries=<NUM_LIT:30>, min_rad=min_rad, max_rad=max_rad,<EOL>invert=invert)<EOL>if desc is not None:<EOL><INDENT>states.save(s, desc=desc + '<STR_LIT>')<EOL><DEDENT>RLOG.info('<STR_LIT>')<EOL>d = opt.burn(s, mode='<STR_LIT>', n_loop=<NUM_LIT:8>, fractol=<NUM_LIT>, desc=desc_polish,<EOL>max_mem=max_mem, rz_order=rz_order, dowarn=False)<EOL>if not d['<STR_LIT>']:<EOL><INDENT>RLOG.warn('<STR_LIT>')<EOL><DEDENT>return s<EOL>", "docstring": "Optimizes a state from an initial set of positions and radii, without\nany known microscope parameters.\n\nParameters\n----------\n    s : :class:`peri.states.ImageState`\n        The state to optimize. It is modified internally and returned.\n    max_mem : Numeric, optional\n        The maximum memory for the optimizer to use. Default is 1e9 (bytes)\n    invert : Bool or `'guess'`, optional\n        Set to True if the image is dark particles on a bright\n        background, False otherwise. Used for add-subtract. The\n        default is to guess from the state's current particles.\n    desc : String, optional\n        An additional description to infix for periodic saving along the\n        way. Default is the null string ``''``.\n    rz_order : int, optional\n        ``rz_order`` as passed to opt.burn. Default is 3\n    min_rad : Float or None, optional\n        The minimum radius to identify a particles as bad, as passed to\n        add-subtract. Default is None, which picks half the median radii.\n        If your sample is not monodisperse you should pick a different\n        value.\n    max_rad : Float or None, optional\n        The maximum radius to identify a particles as bad, as passed to\n        add-subtract. Default is None, which picks 1.5x the median radii.\n        If your sample is not monodisperse you should pick a different\n        value.\n\nReturns\n-------\n    s : :class:`peri.states.ImageState`\n        The optimized state, which is the same as the input ``s`` but\n        modified in-place.", "id": "f5780:m3"}
{"signature": "def emit(self,record):", "body": "err_message = self.format(record)<EOL>pygmentize(err_message)<EOL>", "docstring": "Send the message", "id": "f5781:c2:m1"}
{"signature": "def save_wisdom(wisdomfile):", "body": "if wisdomfile is None:<EOL><INDENT>return<EOL><DEDENT>if wisdomfile:<EOL><INDENT>pickle.dump(<EOL>pyfftw.export_wisdom(), open(wisdomfile, '<STR_LIT:wb>'),<EOL>protocol=<NUM_LIT:2><EOL>)<EOL><DEDENT>", "docstring": "Save the acquired 'wisdom' generated by FFTW to file so that future\ninitializations of FFTW will be faster.", "id": "f5782:m1"}
{"signature": "def load_wisdom(wisdomfile):", "body": "if wisdomfile is None:<EOL><INDENT>return<EOL><DEDENT>try:<EOL><INDENT>pyfftw.import_wisdom(pickle.load(open(wisdomfile, '<STR_LIT:rb>')))<EOL><DEDENT>except (IOError, TypeError) as e:<EOL><INDENT>log.warn(\"<STR_LIT>\" % wisdomfile)<EOL>save_wisdom(wisdomfile)<EOL><DEDENT>", "docstring": "Prime FFTW with knowledge of which FFTs are best on this machine by\nloading 'wisdom' from the file ``wisdomfile``", "id": "f5782:m0"}
{"signature": "def add_handler(self, name='<STR_LIT>', level='<STR_LIT:info>', formatter='<STR_LIT>', **kwargs):", "body": "<EOL>if name == '<STR_LIT>' and '<STR_LIT:filename>' not in kwargs:<EOL><INDENT>kwargs.update({'<STR_LIT:filename>': self.logfilename})<EOL><DEDENT>if name == '<STR_LIT>' and '<STR_LIT>' not in kwargs:<EOL><INDENT>kwargs.update({'<STR_LIT>': StringIO.StringIO()})<EOL><DEDENT>handler = types[name](**kwargs)<EOL>self.add_handler_raw(handler, name, level=level, formatter=formatter)<EOL>", "docstring": "Add another handler to the logging system if not present already.\nAvailable handlers are currently: ['console-bw', 'console-color', 'rotating-log']", "id": "f5783:c0:m5"}
{"signature": "def remove_handler(self, name):", "body": "if name in self.handlers:<EOL><INDENT>self.log.removeHandler(self.handlers[name])<EOL><DEDENT>", "docstring": "Remove handler from the logging system if present already.\nAvailable handlers are currently: ['console-bw', 'console-color', 'rotating-log']", "id": "f5783:c0:m7"}
{"signature": "def set_formatter(self, formatter='<STR_LIT>', handlers=None):", "body": "for h in self.get_handlers(handlers):<EOL><INDENT>h.setFormatter(logging.Formatter(formatters[formatter]))<EOL><DEDENT>", "docstring": "Set the text format of messages to one of the pre-determined forms,\none of ['quiet', 'minimal', 'standard', 'verbose']", "id": "f5783:c0:m4"}
{"signature": "def __init__(self, verbosity='<STR_LIT>', colorlogs=False, logtofile=False,<EOL>logfilename='<STR_LIT>'):", "body": "self.log = logging.getLogger('<STR_LIT>')<EOL>self.log.setLevel(<NUM_LIT:1>)<EOL>self.handlers = {}<EOL>self.verbosity = sanitize(verbosity)<EOL>self.logfilename = logfilename<EOL>level = v2l.get(verbosity, '<STR_LIT:info>')<EOL>form  = v2f.get(verbosity, '<STR_LIT>')<EOL>color = '<STR_LIT>' if colorlogs else '<STR_LIT>'<EOL>if logtofile:<EOL><INDENT>self.add_handler(<EOL>name='<STR_LIT>', level=level, formatter=form,<EOL>filename=self.logfilename<EOL>)<EOL><DEDENT>self.add_handler(name=color, level=level, formatter=form)<EOL>", "docstring": "Create a new logger class. Since the logging interface is actually global,\nany new logs will create even more clutter on the screen. Therefore,\nonly create one! (as is created at the bottom of this file)", "id": "f5783:c0:m0"}
{"signature": "def create_img():", "body": "<EOL>rad = <NUM_LIT:0.5> * np.random.randn(POS.shape[<NUM_LIT:0>]) + <NUM_LIT>  <EOL>part = objs.PlatonicSpheresCollection(POS, rad, zscale=<NUM_LIT>)<EOL>slab = objs.Slab(zpos=<NUM_LIT>, angles=(-<NUM_LIT>, -<NUM_LIT>))<EOL>objects = comp.ComponentCollection([part, slab], category='<STR_LIT>')<EOL>p = exactpsf.FixedSSChebLinePSF(kfki=<NUM_LIT>, zslab=-<NUM_LIT>, alpha=<NUM_LIT>,<EOL>n2n1=<NUM_LIT>, sigkf=-<NUM_LIT>, zscale=<NUM_LIT>, laser_wavelength=<NUM_LIT>)<EOL>i = ilms.BarnesStreakLegPoly2P1D(npts=(<NUM_LIT:16>,<NUM_LIT:10>,<NUM_LIT:8>,<NUM_LIT:4>), zorder=<NUM_LIT:8>)<EOL>b = ilms.LegendrePoly2P1D(order=(<NUM_LIT:7>,<NUM_LIT:2>,<NUM_LIT:2>), category='<STR_LIT>')<EOL>off = comp.GlobalScalar(name='<STR_LIT>', value=-<NUM_LIT>)<EOL>mdl = models.ConfocalImageModel()<EOL>st = states.ImageState(util.NullImage(shape=[<NUM_LIT>,<NUM_LIT:64>,<NUM_LIT:64>]),<EOL>[objects, p, i, b, off], mdl=mdl, model_as_data=True)<EOL>b.update(b.params, BKGVALS)<EOL>i.update(i.params, ILMVALS)<EOL>im = st.model + np.random.randn(*st.model.shape) * <NUM_LIT><EOL>return util.Image(im)<EOL>", "docstring": "Creates an image, as a `peri.util.Image`, which is similar\n    to the image in the tutorial", "id": "f5790:m1"}
{"signature": "def dorun(SNR=<NUM_LIT:20>, ntimes=<NUM_LIT:20>, samples=<NUM_LIT:10>, noise_samples=<NUM_LIT:10>, sweeps=<NUM_LIT:20>, burn=<NUM_LIT:10>,<EOL>correlated=False):", "body": "if not correlated:<EOL><INDENT>times = np.logspace(-<NUM_LIT:3>, <NUM_LIT:0>, ntimes)<EOL><DEDENT>else:<EOL><INDENT>times = np.logspace(np.log10(<NUM_LIT>), np.log10(<NUM_LIT:30>), ntimes)<EOL><DEDENT>crbs, vals, errs, poss = [], [], [], []<EOL>for i,t in enumerate(times):<EOL><INDENT>print('<STR_LIT>', i, t)<EOL>for j in range(samples):<EOL><INDENT>print('<STR_LIT:image>', j, '<STR_LIT:|>', end='<STR_LIT:U+0020>') <EOL>if not correlated:<EOL><INDENT>s,im,pos = diffusion(diffusion_constant=<NUM_LIT>, exposure_time=t)<EOL><DEDENT>else:<EOL><INDENT>s,im,pos = diffusion_correlated(diffusion_constant=<NUM_LIT>, exposure_time=t)<EOL><DEDENT>common.set_image(s, im, <NUM_LIT:1.0>/SNR)<EOL>crbs.append(common.crb(s))<EOL>val, err = common.sample(s, im, <NUM_LIT:1.0>/SNR, N=noise_samples, sweeps=sweeps, burn=burn)<EOL>poss.append(pos)<EOL>vals.append(val)<EOL>errs.append(err)<EOL><DEDENT><DEDENT>shape0 = (ntimes, samples, -<NUM_LIT:1>)<EOL>shape1 = (ntimes, samples, noise_samples, -<NUM_LIT:1>)<EOL>crbs = np.array(crbs).reshape(shape0)<EOL>vals = np.array(vals).reshape(shape1)<EOL>errs = np.array(errs).reshape(shape1)<EOL>poss = np.array(poss).reshape(shape0)<EOL>return  [crbs, vals, errs, poss, times]<EOL>", "docstring": "we want to display the errors introduced by pixelation so we plot:\n    * CRB, sampled error vs exposure time\n\na = dorun(ntimes=10, samples=5, noise_samples=5, sweeps=20, burn=8)", "id": "f5794:m2"}
{"signature": "def diffusion(diffusion_constant=<NUM_LIT>, exposure_time=<NUM_LIT>, samples=<NUM_LIT:200>):", "body": "radius = <NUM_LIT:5><EOL>psfsize = np.array([<NUM_LIT>, <NUM_LIT:1.0>, <NUM_LIT>])<EOL>s0 = init.create_single_particle_state(imsize=<NUM_LIT:4>*radius, <EOL>radius=radius, psfargs={'<STR_LIT>': psfsize, '<STR_LIT:error>': <NUM_LIT>})<EOL>finalimage = <NUM_LIT:0>*s0.get_model_image()[s0.inner]<EOL>position = <NUM_LIT:0>*s0.obj.pos[<NUM_LIT:0>]<EOL>for i in range(samples):<EOL><INDENT>offset = np.sqrt(<NUM_LIT:6>*diffusion_constant*exposure_time)*np.random.randn(<NUM_LIT:3>)<EOL>s0.obj.pos[<NUM_LIT:0>] = np.array(s0.image.shape)/<NUM_LIT:2> + offset<EOL>s0.reset()<EOL>finalimage += s0.get_model_image()[s0.inner]<EOL>position += s0.obj.pos[<NUM_LIT:0>]<EOL><DEDENT>finalimage /= float(samples)<EOL>position /= float(samples)<EOL>s = init.create_single_particle_state(imsize=<NUM_LIT:4>*radius, sigma=<NUM_LIT>,<EOL>radius=radius, psfargs={'<STR_LIT>': psfsize, '<STR_LIT:error>': <NUM_LIT>})<EOL>s.reset()<EOL>return s, finalimage, position<EOL>", "docstring": "See `diffusion_correlated` for information related to units, etc", "id": "f5794:m0"}
{"signature": "def translate_fourier(image, dx):", "body": "N = image.shape[<NUM_LIT:0>]<EOL>f = <NUM_LIT:2>*np.pi*np.fft.fftfreq(N)<EOL>kx,ky,kz = np.meshgrid(*(f,)*<NUM_LIT:3>, indexing='<STR_LIT>')<EOL>kv = np.array([kx,ky,kz]).T<EOL>q = np.fft.fftn(image)*np.exp(-<NUM_LIT>*(kv*dx).sum(axis=-<NUM_LIT:1>)).T<EOL>return np.real(np.fft.ifftn(q))<EOL>", "docstring": "Translate an image in fourier-space with plane waves", "id": "f5798:m6"}
{"signature": "def create_comparison_state(image, position, radius=<NUM_LIT>, snr=<NUM_LIT:20>,<EOL>method='<STR_LIT>', extrapad=<NUM_LIT:2>, zscale=<NUM_LIT:1.0>):", "body": "<EOL>image = common.pad(image, extrapad, <NUM_LIT:0>)<EOL>s = init.create_single_particle_state(imsize=np.array(image.shape), sigma=<NUM_LIT:1.0>/snr,<EOL>radius=radius, psfargs={'<STR_LIT>': np.array([<NUM_LIT>, <NUM_LIT:1.0>, <NUM_LIT>]), '<STR_LIT:error>': <NUM_LIT>, '<STR_LIT>': <NUM_LIT:2>},<EOL>objargs={'<STR_LIT>': method}, stateargs={'<STR_LIT>': False, '<STR_LIT>': <NUM_LIT:4>, '<STR_LIT>': zscale})<EOL>s.obj.pos[<NUM_LIT:0>] = position + s.pad + extrapad<EOL>s.reset()<EOL>s.model_to_true_image()<EOL>timage = <NUM_LIT:1>-np.pad(image, s.pad, mode='<STR_LIT>', constant_values=<NUM_LIT:0>)<EOL>timage = s.psf.execute(timage)<EOL>return s, timage[s.inner]<EOL>", "docstring": "Take a platonic image and position and create a state which we can\nuse to sample the error for peri. Also return the blurred platonic\nimage so we can vary the noise on it later", "id": "f5799:m0"}
{"signature": "def dorun(SNR=<NUM_LIT:20>, njitters=<NUM_LIT:20>, samples=<NUM_LIT:10>, noise_samples=<NUM_LIT:10>, sweeps=<NUM_LIT:20>, burn=<NUM_LIT:10>):", "body": "jitters = np.logspace(-<NUM_LIT:6>, np.log10(<NUM_LIT:0.5>), njitters)<EOL>crbs, vals, errs, poss = [], [], [], []<EOL>for i,t in enumerate(jitters):<EOL><INDENT>print('<STR_LIT>', i, t)<EOL>for j in range(samples):<EOL><INDENT>print('<STR_LIT:image>', j, '<STR_LIT:|>', end='<STR_LIT:U+0020>') <EOL>s,im,pos = zjitter(jitter=t)<EOL>common.set_image(s, im, <NUM_LIT:1.0>/SNR)<EOL>crbs.append(crb(s))<EOL>val, err = sample(s, im, <NUM_LIT:1.0>/SNR, N=noise_samples, sweeps=sweeps, burn=burn)<EOL>poss.append(pos)<EOL>vals.append(val)<EOL>errs.append(err)<EOL><DEDENT><DEDENT>shape0 = (njitters, samples, -<NUM_LIT:1>)<EOL>shape1 = (njitters, samples, noise_samples, -<NUM_LIT:1>)<EOL>crbs = np.array(crbs).reshape(shape0)<EOL>vals = np.array(vals).reshape(shape1)<EOL>errs = np.array(errs).reshape(shape1)<EOL>poss = np.array(poss).reshape(shape0)<EOL>return  [crbs, vals, errs, poss, jitters]<EOL>", "docstring": "we want to display the errors introduced by pixelation so we plot:\n    * CRB, sampled error vs exposure time\n\na = dorun(ntimes=10, samples=5, noise_samples=5, sweeps=20, burn=8)", "id": "f5800:m1"}
{"signature": "def fit_edge(separation, radius=<NUM_LIT>, samples=<NUM_LIT:100>, imsize=<NUM_LIT:64>, sigma=<NUM_LIT>, axis='<STR_LIT:z>'):", "body": "terrors = []<EOL>berrors = []<EOL>crbs = []<EOL>for sep in separation:<EOL><INDENT>print('<STR_LIT:=>'*<NUM_LIT>)<EOL>print('<STR_LIT>', sep, end='<STR_LIT:U+0020>')<EOL>s = init.create_two_particle_state(imsize, radius=radius, delta=sep, sigma=<NUM_LIT>,<EOL>axis='<STR_LIT:z>', psfargs={'<STR_LIT>': (<NUM_LIT>,<NUM_LIT:1.0>,<NUM_LIT>), '<STR_LIT:error>': <NUM_LIT>},<EOL>stateargs={'<STR_LIT>': True, '<STR_LIT>': const.PAD})<EOL>d = np.array([<NUM_LIT:0>,<NUM_LIT:0.5>,<NUM_LIT:0.5>])<EOL>s.obj.pos -= d<EOL>s.reset()<EOL>bl = s.blocks_particle(<NUM_LIT:0>)<EOL>s.update(bl[<NUM_LIT:0>], np.array([s.pad+radius]))<EOL>bl = s.blocks_particle(<NUM_LIT:1>)<EOL>s.update(bl[<NUM_LIT:0>], np.array([s.pad-radius]))<EOL>if axis == '<STR_LIT:z>':<EOL><INDENT>bl = s.blocks_particle(<NUM_LIT:1>)<EOL>s.update(bl[<NUM_LIT:0>], s.state[bl[<NUM_LIT:0>]]-sep)<EOL>s.model_to_true_image()<EOL><DEDENT>if axis == '<STR_LIT>':<EOL><INDENT>bl = s.blocks_particle(<NUM_LIT:1>)<EOL>s.update(bl[<NUM_LIT:2>], s.state[bl[<NUM_LIT:2>]]+sep)<EOL>s.model_to_true_image()<EOL><DEDENT>p = s.state[s.b_pos].reshape(-<NUM_LIT:1>,<NUM_LIT:3>).copy()<EOL>print(p[<NUM_LIT:0>], p[<NUM_LIT:1>])<EOL>bl = s.explode(s.b_pos)<EOL>crbs.append(np.sqrt(np.diag(np.linalg.inv(s.fisher_information(blocks=bl)))).reshape(-<NUM_LIT:1>,<NUM_LIT:3>))<EOL>tmp_tp, tmp_bf = [],[]<EOL>for i in range(samples):<EOL><INDENT>print(i)<EOL>bench.jiggle_particles(s, pos=p, sig=<NUM_LIT>, mask=np.array([<NUM_LIT:1>,<NUM_LIT:1>,<NUM_LIT:1>]))<EOL>t = bench.trackpy(s)<EOL>b = bench.bamfpy_positions(s, sweeps=<NUM_LIT:15>)<EOL>tmp_tp.append(bench.error(s, t))<EOL>tmp_bf.append(bench.error(s, b))<EOL><DEDENT>terrors.append(tmp_tp)<EOL>berrors.append(tmp_bf)<EOL><DEDENT>return np.array(crbs), np.array(terrors), np.array(berrors)<EOL>", "docstring": "axis is 'z' or 'xy'\nseps = np.linspace(0,2,20) 'z'\nseps = np.linspace(-2,2,20) 'xy'", "id": "f5803:m0"}
{"signature": "def __call__(self, field):", "body": "<EOL>tile = peri.util.Tile(field.shape)<EOL>rx, ry = tile.kvectors(norm=<NUM_LIT:1.0>/tile.shape)<EOL>sx, sy = self.values<EOL>psf = np.exp(-((rx/sx)**<NUM_LIT:2> + (ry/sy)**<NUM_LIT:2>)/<NUM_LIT:2>)<EOL>psf = psf / psf.sum()<EOL>self.psf = psf<EOL>out = fft.fftn(fft.ifftn(field)*fft.ifftn(psf))<EOL>return fftnorm(np.real(out))<EOL>", "docstring": "Accept a field, apply the point-spread-function, and return\nthe resulting image of a blurred field", "id": "f5809:c1:m2"}
{"signature": "def get(self):", "body": "return self<EOL>", "docstring": "Since we wish to use the GaussianPSF in the model by calling P(D), the\nget function will simply return this object and we will override\n__call__ so that we can use P(...).", "id": "f5809:c1:m1"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def create_chunker(self, chunk_size):<DEDENT>", "body": "", "docstring": "Abstract interface for chunker creation.", "id": "f5812:c0:m1"}
{"signature": "def create_multilevel_chunker(self, chunk_sizes):", "body": "rolling_hash = _rabinkarprh.RabinKarpMultiThresholdHash(self.window_size, self._seed,<EOL>[<NUM_LIT:1.0> / chunk_size for chunk_size in chunk_sizes])<EOL>return RabinKarpCDC._MultiLevelChunker(rolling_hash)<EOL>", "docstring": "Create a multi-level chunker performing content-defined chunking (CDC) using Rabin Karp's rolling hash scheme\n        with different specific, expected chunk sizes.\n\n        Args:\n            chunk_sizes (list): List of (expected) target chunk sizes.\n\n                Warning:\n                    For performance reasons, behavior is only defined if chunk sizes are passed in order, i.e., from\n                    lowest to highest value.\n\n        Returns:\n            BaseMultiLevelChunker: A multi-level chunker object.", "id": "f5812:c5:m2"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def next_chunk_boundaries_levels(self, buf, prepend_bytes=<NUM_LIT:0>):<DEDENT>", "body": "", "docstring": "Computes the next chunk boundaries within `buf`.\n\n        Similar to :meth:`.next_chunk_boundaries`, but information about which chunker led to a respective boundary is\n        included in the returned value.\n\n        Args:\n            buf (bytes): The message that is to be chunked.\n            prepend_bytes (Optional[int]): Optional number of zero bytes that should be input to the chunking algorithm\n                before `buf`.\n\n        Returns:\n            iterable: An iterable yielding tuples (boundary, level), where boundary is a boundary position relative to\n                `buf` and level is the index of the chunker (i.e., the index of its chunk size specified during\n                instantiation) that yielded the boundary.\n\n            If multiple chunkers yield the same boundary, it is returned only once, along with the highest matching\n            chunker index.", "id": "f5812:c2:m1"}
{"signature": "def create_multilevel_chunker(self, chunk_sizes):", "body": "return DefaultMultiLevelChunker(chunk_sizes, self.create_chunker)<EOL>", "docstring": "Create a multi-level chunker performing chunking with different chunk sizes.\n\n        Args:\n            chunk_sizes (list): List of target chunk sizes.\n\n                Warning:\n                    For performance reasons, behavior is only defined if chunk sizes are passed in order, i.e., from\n                    lowest to highest value.\n\n        Returns:\n            BaseMultiLevelChunker: A multi-level chunker object.", "id": "f5812:c0:m2"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def next_chunk_boundaries(self, buf, prepend_bytes=<NUM_LIT:0>):<DEDENT>", "body": "", "docstring": "Computes the next chunk boundaries within `buf`.\n\n        Note:\n            If called more than once, output depends on `all` previous calls of this function: The chunking algorithm is\n            applied to the concatenation of all `buf` values.\n\n        Args:\n            buf (bytes): The message that is to be chunked.\n            prepend_bytes (Optional[int]): Optional number of zero bytes that should be input to the chunking algorithm\n                before `buf`.\n\n        Returns:\n            iterable: An iterable yielding chunk boundary positions relative to `buf`.", "id": "f5812:c1:m0"}
{"signature": "def build(<EOL>src, requirements=None, local_package=None,<EOL>config_file='<STR_LIT>', profile_name=None,<EOL>):", "body": "<EOL>path_to_config_file = os.path.join(src, config_file)<EOL>cfg = read_cfg(path_to_config_file, profile_name)<EOL>dist_directory = cfg.get('<STR_LIT>', '<STR_LIT>')<EOL>path_to_dist = os.path.join(src, dist_directory)<EOL>mkdir(path_to_dist)<EOL>function_name = cfg.get('<STR_LIT>')<EOL>output_filename = '<STR_LIT>'.format(timestamp(), function_name)<EOL>path_to_temp = mkdtemp(prefix='<STR_LIT>')<EOL>pip_install_to_target(<EOL>path_to_temp,<EOL>requirements=requirements,<EOL>local_package=local_package,<EOL>)<EOL>if '<STR_LIT>' in os.listdir(path_to_temp):<EOL><INDENT>print(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>)<EOL>with open(os.path.join(path_to_temp, '<STR_LIT>'), '<STR_LIT:wb>'):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>output_filename = (<EOL>'<STR_LIT>'.format(output_filename)<EOL>if not output_filename.endswith('<STR_LIT>')<EOL>else output_filename<EOL>)<EOL>build_config = defaultdict(**cfg.get('<STR_LIT>', {}))<EOL>build_source_directories = build_config.get('<STR_LIT>', '<STR_LIT>')<EOL>build_source_directories = (<EOL>build_source_directories<EOL>if build_source_directories is not None<EOL>else '<STR_LIT>'<EOL>)<EOL>source_directories = [<EOL>d.strip() for d in build_source_directories.split('<STR_LIT:U+002C>')<EOL>]<EOL>files = []<EOL>for filename in os.listdir(src):<EOL><INDENT>if os.path.isfile(filename):<EOL><INDENT>if filename == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>if filename == config_file:<EOL><INDENT>continue<EOL><DEDENT>print('<STR_LIT>' % filename)<EOL>files.append(os.path.join(src, filename))<EOL><DEDENT>elif os.path.isdir(filename) and filename in source_directories:<EOL><INDENT>print('<STR_LIT>' % filename)<EOL>files.append(os.path.join(src, filename))<EOL><DEDENT><DEDENT>os.chdir(path_to_temp)<EOL>for f in files:<EOL><INDENT>if os.path.isfile(f):<EOL><INDENT>_, filename = os.path.split(f)<EOL>copyfile(f, os.path.join(path_to_temp, filename))<EOL>copystat(f, os.path.join(path_to_temp, filename))<EOL><DEDENT>elif os.path.isdir(f):<EOL><INDENT>destination_folder = os.path.join(path_to_temp, f[len(src) + <NUM_LIT:1>:])<EOL>copytree(f, destination_folder)<EOL><DEDENT><DEDENT>path_to_zip_file = archive('<STR_LIT>', path_to_dist, output_filename)<EOL>return path_to_zip_file<EOL>", "docstring": "Builds the file bundle.\n\n    :param str src:\n       The path to your Lambda ready project (folder must contain a valid\n        config.yaml and handler module (e.g.: service.py).\n    :param str local_package:\n        The path to a local package with should be included in the deploy as\n        well (and/or is not available on PyPi)", "id": "f5821:m6"}
{"signature": "def upload(<EOL>src, requirements=None, local_package=None,<EOL>config_file='<STR_LIT>', profile_name=None,<EOL>):", "body": "<EOL>path_to_config_file = os.path.join(src, config_file)<EOL>cfg = read_cfg(path_to_config_file, profile_name)<EOL>path_to_zip_file = build(<EOL>src, config_file=config_file, requirements=requirements,<EOL>local_package=local_package,<EOL>)<EOL>upload_s3(cfg, path_to_zip_file)<EOL>", "docstring": "Uploads a new function to AWS S3.\n\n    :param str src:\n        The path to your Lambda ready project (folder must contain a valid\n        config.yaml and handler module (e.g.: service.py).\n    :param str local_package:\n        The path to a local package with should be included in the deploy as\n        well (and/or is not available on PyPi)", "id": "f5821:m3"}
{"signature": "def get_handler_filename(handler):", "body": "module_name, _ = handler.split('<STR_LIT:.>')<EOL>return '<STR_LIT>'.format(module_name)<EOL>", "docstring": "Shortcut to get the filename from the handler string.\n\n    :param str handler:\n      A dot delimited string representing the `<module>.<function name>`.", "id": "f5821:m8"}
{"signature": "def pip_install_to_target(path, requirements=None, local_package=None):", "body": "packages = []<EOL>if not requirements:<EOL><INDENT>print('<STR_LIT>')<EOL>pkgStr = subprocess.check_output([sys.executable, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>packages.extend(pkgStr.decode('<STR_LIT:utf-8>').splitlines())<EOL><DEDENT>else:<EOL><INDENT>if os.path.exists(requirements):<EOL><INDENT>print('<STR_LIT>')<EOL>data = read(requirements)<EOL>packages.extend(data.splitlines())<EOL><DEDENT><DEDENT>if not packages:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>if local_package is not None:<EOL><INDENT>if not isinstance(local_package, (list, tuple)):<EOL><INDENT>local_package = [local_package]<EOL><DEDENT>for l_package in local_package:<EOL><INDENT>packages.append(l_package)<EOL><DEDENT><DEDENT>_install_packages(path, packages)<EOL>", "docstring": "For a given active virtualenv, gather all installed pip packages then\n    copy (re-install) them to the path provided.\n\n    :param str path:\n        Path to copy installed pip packages to.\n    :param str requirements:\n        If set, only the packages in the supplied requirements file are\n        installed.\n        If not set then installs all packages found via pip freeze.\n    :param str local_package:\n        The path to a local package with should be included in the deploy as\n        well (and/or is not available on PyPi)", "id": "f5821:m10"}
{"signature": "def deploy_s3(<EOL>src, requirements=None, local_package=None,<EOL>config_file='<STR_LIT>', profile_name=None,<EOL>preserve_vpc=False<EOL>):", "body": "<EOL>path_to_config_file = os.path.join(src, config_file)<EOL>cfg = read_cfg(path_to_config_file, profile_name)<EOL>path_to_zip_file = build(<EOL>src, config_file=config_file, requirements=requirements,<EOL>local_package=local_package,<EOL>)<EOL>use_s3 = True<EOL>s3_file = upload_s3(cfg, path_to_zip_file, use_s3)<EOL>existing_config = get_function_config(cfg)<EOL>if existing_config:<EOL><INDENT>update_function(cfg, path_to_zip_file, existing_config, use_s3=use_s3,<EOL>s3_file=s3_file, preserve_vpc=preserve_vpc)<EOL><DEDENT>else:<EOL><INDENT>create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file)<EOL><DEDENT>", "docstring": "Deploys a new function via AWS S3.\n\n    :param str src:\n        The path to your Lambda ready project (folder must contain a valid\n        config.yaml and handler module (e.g.: service.py).\n    :param str local_package:\n        The path to a local package with should be included in the deploy as\n        well (and/or is not available on PyPi)", "id": "f5821:m2"}
{"signature": "def deploy(<EOL>src, requirements=None, local_package=None,<EOL>config_file='<STR_LIT>', profile_name=None,<EOL>preserve_vpc=False<EOL>):", "body": "<EOL>path_to_config_file = os.path.join(src, config_file)<EOL>cfg = read_cfg(path_to_config_file, profile_name)<EOL>path_to_zip_file = build(<EOL>src, config_file=config_file,<EOL>requirements=requirements,<EOL>local_package=local_package,<EOL>)<EOL>existing_config = get_function_config(cfg)<EOL>if existing_config:<EOL><INDENT>update_function(cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc)<EOL><DEDENT>else:<EOL><INDENT>create_function(cfg, path_to_zip_file)<EOL><DEDENT>", "docstring": "Deploys a new function to AWS Lambda.\n\n    :param str src:\n        The path to your Lambda ready project (folder must contain a valid\n        config.yaml and handler module (e.g.: service.py).\n    :param str local_package:\n        The path to a local package with should be included in the deploy as\n        well (and/or is not available on PyPi)", "id": "f5821:m1"}
{"signature": "def get_function_config(cfg):", "body": "function_name = cfg.get('<STR_LIT>')<EOL>profile_name = cfg.get('<STR_LIT>')<EOL>aws_access_key_id = cfg.get('<STR_LIT>')<EOL>aws_secret_access_key = cfg.get('<STR_LIT>')<EOL>client = get_client(<EOL>'<STR_LIT>', profile_name, aws_access_key_id, aws_secret_access_key,<EOL>cfg.get('<STR_LIT>'),<EOL>)<EOL>try:<EOL><INDENT>return client.get_function(FunctionName=function_name)<EOL><DEDENT>except client.exceptions.ResourceNotFoundException as e:<EOL><INDENT>if '<STR_LIT>' in str(e):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>", "docstring": "Check whether a function exists or not and return its config", "id": "f5821:m17"}
{"signature": "def setup_paths(source, destination, name, add_to_global, force):", "body": "if source[-<NUM_LIT:1>] == \"<STR_LIT:/>\":<EOL><INDENT>source = source[:-<NUM_LIT:1>]<EOL><DEDENT>if not name:<EOL><INDENT>name = os.path.split(source)[-<NUM_LIT:1>]<EOL><DEDENT>elif name.endswith(\"<STR_LIT>\"):<EOL><INDENT>name = name.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL><DEDENT>if add_to_global:<EOL><INDENT>destination = DEFAULT_DOCSET_PATH<EOL><DEDENT>dest = os.path.join(destination or \"<STR_LIT>\", name + \"<STR_LIT>\")<EOL>dst_exists = os.path.lexists(dest)<EOL>if dst_exists and force:<EOL><INDENT>shutil.rmtree(dest)<EOL><DEDENT>elif dst_exists:<EOL><INDENT>log.error(<EOL>'<STR_LIT>'.format(<EOL>click.format_filename(dest)<EOL>)<EOL>)<EOL>raise SystemExit(errno.EEXIST)<EOL><DEDENT>return source, dest, name<EOL>", "docstring": "Determine source and destination using the options.", "id": "f5828:m2"}
{"signature": "def parse(self):", "body": "soup = BeautifulSoup(<EOL>codecs.open(<EOL>os.path.join(self.doc_path, \"<STR_LIT>\"),<EOL>mode=\"<STR_LIT:r>\",<EOL>encoding=\"<STR_LIT:utf-8>\",<EOL>),<EOL>\"<STR_LIT>\",<EOL>)<EOL>for tag in soup.body.find_all(\"<STR_LIT:a>\"):<EOL><INDENT>path = tag.get(\"<STR_LIT>\")<EOL>data_type = tag.get(\"<STR_LIT>\")<EOL>if path and data_type and not path.startswith(\"<STR_LIT:#>\"):<EOL><INDENT>name = tag.string<EOL>yield ParserEntry(<EOL>name=name,<EOL>type=data_type.replace(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>path=str(path),<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Parse pydoctor docs at *doc_path*.\n\nyield `ParserEntry`s", "id": "f5830:c0:m1"}
{"signature": "def parse(self):", "body": "with open(os.path.join(self.doc_path, \"<STR_LIT>\"), \"<STR_LIT:rb>\") as inv_f:<EOL><INDENT>yield from self._inv_to_entries(<EOL>InventoryFile.load(inv_f, \"<STR_LIT>\", os.path.join)<EOL>)<EOL><DEDENT>", "docstring": "Parse sphinx docs at self.doc_path.\n\nyield `ParserEntry`s.", "id": "f5831:c0:m1"}
{"signature": "def inv_entry_to_path(data):", "body": "path_tuple = data[<NUM_LIT:2>].split(\"<STR_LIT:#>\")<EOL>if len(path_tuple) > <NUM_LIT:1>:<EOL><INDENT>path_str = \"<STR_LIT:#>\".join((path_tuple[<NUM_LIT:0>], path_tuple[-<NUM_LIT:1>]))<EOL><DEDENT>else:<EOL><INDENT>path_str = data[<NUM_LIT:2>]<EOL><DEDENT>return path_str<EOL>", "docstring": "Determine the path from the intersphinx inventory entry\n\nDiscard the anchors between head and tail to make it\ncompatible with situations where extra meta information is encoded.", "id": "f5831:m1"}
{"signature": "def find_and_patch_entry(soup, entry):", "body": "link = soup.find(\"<STR_LIT:a>\", {\"<STR_LIT:class>\": \"<STR_LIT>\"}, href=\"<STR_LIT:#>\" + entry.anchor)<EOL>tag = soup.new_tag(\"<STR_LIT:a>\")<EOL>tag[\"<STR_LIT:name>\"] = APPLE_REF_TEMPLATE.format(entry.type, entry.name)<EOL>if link:<EOL><INDENT>link.parent.insert(<NUM_LIT:0>, tag)<EOL>return True<EOL><DEDENT>elif entry.anchor.startswith(\"<STR_LIT>\"):<EOL><INDENT>soup.h1.parent.insert(<NUM_LIT:0>, tag)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Modify soup so Dash.app can generate TOCs on the fly.", "id": "f5831:m0"}
{"signature": "def detect(path: str) -> bool:", "body": "", "docstring": "A static method that returns whether *path* can be parsed by us.", "id": "f5834:c1:m0"}
{"signature": "def parse() -> None:", "body": "", "docstring": "Parse `self.doc_path`, yield a :class:`ParserEntry` for each found\nentry.", "id": "f5834:c1:m1"}
{"signature": "@coroutine<EOL>def patch_anchors(parser, show_progressbar):", "body": "files = defaultdict(list)<EOL>try:<EOL><INDENT>while True:<EOL><INDENT>pentry = (yield)<EOL>try:<EOL><INDENT>fname, anchor = pentry.path.split(\"<STR_LIT:#>\")<EOL>files[fname].append(<EOL>TOCEntry(name=pentry.name, type=pentry.type, anchor=anchor)<EOL>)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>except GeneratorExit:<EOL><INDENT>pass<EOL><DEDENT>def patch_files(files):<EOL><INDENT>for fname, entries in files:<EOL><INDENT>full_path = os.path.join(parser.doc_path, fname)<EOL>with codecs.open(full_path, mode=\"<STR_LIT:r>\", encoding=\"<STR_LIT:utf-8>\") as fp:<EOL><INDENT>soup = BeautifulSoup(fp, \"<STR_LIT>\")<EOL>for entry in entries:<EOL><INDENT>if not parser.find_and_patch_entry(soup, entry):<EOL><INDENT>log.debug(<EOL>\"<STR_LIT>\".format(<EOL>entry.anchor, click.format_filename(fname)<EOL>)<EOL>)<EOL><DEDENT><DEDENT><DEDENT>with open(full_path, mode=\"<STR_LIT:wb>\") as fp:<EOL><INDENT>fp.write(soup.encode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT><DEDENT><DEDENT>if show_progressbar is True:<EOL><INDENT>with click.progressbar(<EOL>files.items(),<EOL>width=<NUM_LIT:0>,<EOL>length=len(files),<EOL>label=\"<STR_LIT>\",<EOL>) as pbar:<EOL><INDENT>patch_files(pbar)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>patch_files(files.items())<EOL><DEDENT>", "docstring": "Consume ``ParseEntry``s then patch docs for TOCs by calling\n*parser*'s ``find_and_patch_entry``.", "id": "f5834:m1"}
{"signature": "def read(*parts):", "body": "here = os.path.abspath(os.path.dirname(__file__))<EOL>with codecs.open(os.path.join(here, *parts), \"<STR_LIT:rb>\", \"<STR_LIT:utf-8>\") as f:<EOL><INDENT>return f.read()<EOL><DEDENT>", "docstring": "Build an absolute path from *parts* and and return the contents of the\nresulting file.  Assume UTF-8 encoding.", "id": "f5836:m0"}
{"signature": "def pause(self):", "body": "self.running = False<EOL>", "docstring": "Pause the consumer.", "id": "f5838:c0:m2"}
{"signature": "def run(self):", "body": "self.log.debug('<STR_LIT>')<EOL>self.running = True<EOL>while self.running:<EOL><INDENT>self.upload()<EOL><DEDENT>self.log.debug('<STR_LIT>')<EOL>", "docstring": "Runs the consumer.", "id": "f5838:c0:m1"}
{"signature": "def __init__(self, user, token, queue, upload_size=<NUM_LIT:100>, on_error=None):", "body": "Thread.__init__(self)<EOL>self.api = librato.connect(user, token)<EOL>self.daemon = True  <EOL>self.upload_size = upload_size<EOL>self.on_error = on_error<EOL>self.queue = queue<EOL>self.retries = <NUM_LIT:3><EOL>", "docstring": "Create a consumer thread.", "id": "f5838:c0:m0"}
{"signature": "def join(self):", "body": "self.consumer.pause()<EOL>self.consumer.join()<EOL>", "docstring": "Ends the consumer thread once the queue is empty. Blocks execution until finished", "id": "f5839:c0:m4"}
{"signature": "def _is_requirement(line):", "body": "line = line.strip()<EOL>return line and not (line.startswith(\"<STR_LIT>\") or line.startswith(\"<STR_LIT:#>\"))<EOL>", "docstring": "Returns whether the line is a valid package requirement.", "id": "f5841:m0"}
{"signature": "def _read_requirements(filename):", "body": "requirements_file = open(filename).read()<EOL>return [line.strip() for line in requirements_file.splitlines()<EOL>if _is_requirement(line)]<EOL>", "docstring": "Returns a list of package requirements read from the file.", "id": "f5841:m1"}
{"signature": "def get_params(self):", "body": "out = {}<EOL>out['<STR_LIT>'] = self.__class__<EOL>out['<STR_LIT>'] = dict(steps=[])<EOL>for name, step in self.steps:<EOL><INDENT>out['<STR_LIT>']['<STR_LIT>'].append([name, step.get_params(deep=True)])<EOL><DEDENT>return out<EOL>", "docstring": "Get the parameters for this object.  Returns as a dict.", "id": "f5846:c1:m1"}
{"signature": "def transform(self, jam):", "body": "for state in self.states(jam):<EOL><INDENT>yield self._transform(jam, state)<EOL><DEDENT>", "docstring": "Iterative transformation generator\n\n        Applies the deformation to an input jams object.\n\n        This generates a sequence of deformed output JAMS.\n\n        Parameters\n        ----------\n        jam : jams.JAMS\n            The jam to transform\n\n        Examples\n        --------\n        >>> for jam_out in deformer.transform(jam_in):\n        ...     process(jam_out)", "id": "f5846:c0:m7"}
{"signature": "def transform(self, jam):", "body": "for output in self.__recursive_transform(jam, self.steps):<EOL><INDENT>yield output<EOL><DEDENT>", "docstring": "Apply the sequence of transformations to a single jam object.\n\n        Parameters\n        ----------\n        jam : jams.JAMS\n            The jam object to transform\n\n        Yields\n        ------\n        jam_out : jams.JAMS\n            The jam objects produced by the transformation sequence", "id": "f5846:c1:m4"}
{"signature": "def __recursive_transform(self, jam, steps):", "body": "if len(steps) > <NUM_LIT:0>:<EOL><INDENT>head_transformer = steps[<NUM_LIT:0>][<NUM_LIT:1>]<EOL>for t_jam in head_transformer.transform(jam):<EOL><INDENT>for q in self.__recursive_transform(t_jam, steps[<NUM_LIT:1>:]):<EOL><INDENT>yield q<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>yield jam<EOL><DEDENT>", "docstring": "A recursive transformation pipeline", "id": "f5846:c1:m3"}
{"signature": "def __repr__(self):", "body": "class_name = self.__class__.__name__<EOL>return '<STR_LIT>'.format(class_name,<EOL>_pprint(self.get_params(),<EOL>offset=len(class_name),),)<EOL>", "docstring": "Pretty-print the object", "id": "f5846:c1:m2"}
{"signature": "def __repr__(self):", "body": "class_name = self.__class__.__name__<EOL>return '<STR_LIT>'.format(class_name,<EOL>_pprint(self.get_params(),<EOL>offset=len(class_name),),)<EOL>", "docstring": "Pretty-print the object", "id": "f5846:c2:m2"}
{"signature": "@property<EOL><INDENT>def __serialize__(self):<DEDENT>", "body": "data = self.get_params()<EOL>data['<STR_LIT>'] = data['<STR_LIT>'].__name__<EOL>return data<EOL>", "docstring": "Serializer", "id": "f5846:c0:m8"}
{"signature": "def __repr__(self):", "body": "class_name = self.__class__.__name__<EOL>return '<STR_LIT>'.format(class_name,<EOL>_pprint(self.get_params(deep=False)['<STR_LIT>'],<EOL>offset=len(class_name),),)<EOL>", "docstring": "Pretty-print this object", "id": "f5846:c0:m2"}
{"signature": "def get_params(self):", "body": "out = {}<EOL>out['<STR_LIT>'] = self.__class__<EOL>out['<STR_LIT>'] = dict(steps=[])<EOL>for name, step in self.steps:<EOL><INDENT>out['<STR_LIT>']['<STR_LIT>'].append([name, step.get_params(deep=True)])<EOL><DEDENT>return out<EOL>", "docstring": "Get the parameters for this object.  Returns as a dict.", "id": "f5846:c2:m1"}
{"signature": "def __init__(self, rate=<NUM_LIT>):", "body": "AbstractTimeStretch.__init__(self)<EOL>self.rate = np.atleast_1d(rate).flatten()<EOL>if np.any(self.rate <= <NUM_LIT:0>):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.rate = self.rate.tolist()<EOL>", "docstring": "Time stretching", "id": "f5850:c1:m0"}
{"signature": "def transpose(label, n_semitones):", "body": "<EOL>match = re.match(six.text_type('<STR_LIT>'),<EOL>six.text_type(label))<EOL>if not match:<EOL><INDENT>return label<EOL><DEDENT>note = match.group('<STR_LIT>')<EOL>new_note = librosa.midi_to_note(librosa.note_to_midi(note) + n_semitones,<EOL>octave=False)<EOL>return new_note + match.group('<STR_LIT>')<EOL>", "docstring": "Transpose a chord label by some number of semitones\n\n    Parameters\n    ----------\n    label : str\n        A chord string\n\n    n_semitones : float\n        The number of semitones to move `label`\n\n    Returns\n    -------\n    label_transpose : str\n        The transposed chord label", "id": "f5853:m0"}
{"signature": "def serialize(transform, **kwargs):", "body": "params = transform.get_params()<EOL>return jsonpickle.encode(params, **kwargs)<EOL>", "docstring": "Serialize a transformation object or pipeline.\n\n    Parameters\n    ----------\n    transform : BaseTransform or Pipeline\n        The transformation object to be serialized\n\n    kwargs\n        Additional keyword arguments to `jsonpickle.encode()`\n\n    Returns\n    -------\n    json_str : str\n        A JSON encoding of the transformation\n\n    See Also\n    --------\n    deserialize\n\n    Examples\n    --------\n    >>> D = muda.deformers.TimeStretch(rate=1.5)\n    >>> muda.serialize(D)\n    '{\"params\": {\"rate\": 1.5},\n      \"__class__\": {\"py/type\": \"muda.deformers.time.TimeStretch\"}}'", "id": "f5854:m4"}
{"signature": "def __reconstruct(params):", "body": "if isinstance(params, dict):<EOL><INDENT>if '<STR_LIT>' in params:<EOL><INDENT>cls = params['<STR_LIT>']<EOL>data = __reconstruct(params['<STR_LIT>'])<EOL>return cls(**data)<EOL><DEDENT>else:<EOL><INDENT>data = dict()<EOL>for key, value in six.iteritems(params):<EOL><INDENT>data[key] = __reconstruct(value)<EOL><DEDENT>return data<EOL><DEDENT><DEDENT>elif isinstance(params, (list, tuple)):<EOL><INDENT>return [__reconstruct(v) for v in params]<EOL><DEDENT>else:<EOL><INDENT>return params<EOL><DEDENT>", "docstring": "Reconstruct a transformation or pipeline given a parameter dump.", "id": "f5854:m3"}
{"signature": "def jam_pack(jam, **kwargs):", "body": "if not hasattr(jam.sandbox, '<STR_LIT>'):<EOL><INDENT>jam.sandbox.muda = jams.Sandbox(history=[],<EOL>state=[],<EOL>version=dict(muda=version,<EOL>librosa=librosa.__version__,<EOL>jams=jams.__version__,<EOL>pysoundfile=psf.__version__))<EOL><DEDENT>elif not isinstance(jam.sandbox.muda, jams.Sandbox):<EOL><INDENT>jam.sandbox.muda = jams.Sandbox(**jam.sandbox.muda)<EOL><DEDENT>jam.sandbox.muda.update(**kwargs)<EOL>return jam<EOL>", "docstring": "Pack data into a jams sandbox.\n\n    If not already present, this creates a `muda` field within `jam.sandbox`,\n    along with `history`, `state`, and version arrays which are populated by\n    deformation objects.\n\n    Any additional fields can be added to the `muda` sandbox by supplying\n    keyword arguments.\n\n    Parameters\n    ----------\n    jam : jams.JAMS\n        A JAMS object\n\n    Returns\n    -------\n    jam : jams.JAMS\n        The updated JAMS object\n\n    Examples\n    --------\n    >>> jam = jams.JAMS()\n    >>> muda.jam_pack(jam, my_data=dict(foo=5, bar=None))\n    >>> jam.sandbox\n    <Sandbox: muda>\n    >>> jam.sandbox.muda\n    <Sandbox: state, version, my_data, history>\n    >>> jam.sandbox.muda.my_data\n    {'foo': 5, 'bar': None}", "id": "f5854:m0"}
{"signature": "def bresenham(x0, y0, x1, y1):", "body": "dx = x1 - x0<EOL>dy = y1 - y0<EOL>xsign = <NUM_LIT:1> if dx > <NUM_LIT:0> else -<NUM_LIT:1><EOL>ysign = <NUM_LIT:1> if dy > <NUM_LIT:0> else -<NUM_LIT:1><EOL>dx = abs(dx)<EOL>dy = abs(dy)<EOL>if dx > dy:<EOL><INDENT>xx, xy, yx, yy = xsign, <NUM_LIT:0>, <NUM_LIT:0>, ysign<EOL><DEDENT>else:<EOL><INDENT>dx, dy = dy, dx<EOL>xx, xy, yx, yy = <NUM_LIT:0>, ysign, xsign, <NUM_LIT:0><EOL><DEDENT>D = <NUM_LIT:2>*dy - dx<EOL>y = <NUM_LIT:0><EOL>for x in range(dx + <NUM_LIT:1>):<EOL><INDENT>yield x0 + x*xx + y*yx, y0 + x*xy + y*yy<EOL>if D >= <NUM_LIT:0>:<EOL><INDENT>y += <NUM_LIT:1><EOL>D -= <NUM_LIT:2>*dx<EOL><DEDENT>D += <NUM_LIT:2>*dy<EOL><DEDENT>", "docstring": "Yield integer coordinates on the line from (x0, y0) to (x1, y1).\n\n    Input coordinates should be integers.\n\n    The result will contain both the start and the end point.", "id": "f5857:m0"}
{"signature": "def _build_settings(config_data):", "body": "spacer = '<STR_LIT:U+0020>'<EOL>text = []<EOL>vars = get_settings()<EOL>vars.MIDDLEWARE_CLASSES.insert(<NUM_LIT:0>, vars.APPHOOK_RELOAD_MIDDLEWARE_CLASS)<EOL>processors = vars.TEMPLATE_CONTEXT_PROCESSORS + vars.TEMPLATE_CONTEXT_PROCESSORS_3<EOL>text.append(data.TEMPLATES_1_8.format(<EOL>loaders=('<STR_LIT>' + spacer * <NUM_LIT:4>).join([<EOL>\"<STR_LIT>\".format(var) for var in vars.TEMPLATE_LOADERS<EOL>if (<EOL>LooseVersion(config_data.django_version) < LooseVersion('<STR_LIT>') or<EOL>'<STR_LIT>' not in var<EOL>)<EOL>]),<EOL>processors=('<STR_LIT>' + spacer * <NUM_LIT:4>).join([\"<STR_LIT>\".format(var) for var in processors]),<EOL>dirs=\"<STR_LIT>\".format(config_data.project_name)<EOL>))<EOL>if LooseVersion(config_data.django_version) >= LooseVersion('<STR_LIT>'):<EOL><INDENT>text.append('<STR_LIT>'.format(<EOL>spacer, ('<STR_LIT>' + spacer).join(['<STR_LIT>'.format(var)<EOL>for var in vars.MIDDLEWARE_CLASSES])<EOL>))<EOL><DEDENT>else:<EOL><INDENT>text.append('<STR_LIT>'.format(<EOL>spacer, ('<STR_LIT>' + spacer).join([\"<STR_LIT>\".format(var)<EOL>for var in vars.MIDDLEWARE_CLASSES])<EOL>))<EOL><DEDENT>apps = list(vars.INSTALLED_APPS)<EOL>apps = list(vars.CMS_3_HEAD) + apps<EOL>apps.extend(vars.TREEBEARD_APPS)<EOL>apps.extend(vars.CMS_3_APPLICATIONS)<EOL>if not config_data.no_plugins:<EOL><INDENT>apps.extend(vars.FILER_PLUGINS_3)<EOL><DEDENT>if config_data.aldryn:  <EOL><INDENT>apps.extend(vars.ALDRYN_APPLICATIONS)<EOL><DEDENT>if config_data.reversion and LooseVersion(config_data.cms_version) < LooseVersion('<STR_LIT>'):<EOL><INDENT>apps.extend(vars.REVERSION_APPLICATIONS)<EOL><DEDENT>text.append('<STR_LIT>'.format(<EOL>spacer, ('<STR_LIT>' + spacer).join(['<STR_LIT>'.format(var) for var in apps] +<EOL>['<STR_LIT>'.format(config_data.project_name)])<EOL>))<EOL>text.append('<STR_LIT>'.format(<EOL>spacer, '<STR_LIT>',<EOL>('<STR_LIT:\\n>' + spacer).join(['<STR_LIT>'.format(item) for item in config_data.languages])  <EOL>))<EOL>cms_langs = deepcopy(vars.CMS_LANGUAGES)<EOL>for lang in config_data.languages:<EOL><INDENT>lang_dict = {'<STR_LIT:code>': lang, '<STR_LIT:name>': lang}<EOL>lang_dict.update(copy(cms_langs['<STR_LIT:default>']))<EOL>cms_langs[<NUM_LIT:1>].append(lang_dict)<EOL><DEDENT>cms_text = ['<STR_LIT>']<EOL>cms_text.append('<STR_LIT>'.format(spacer, '<STR_LIT>'))<EOL>for key, value in iteritems(cms_langs):<EOL><INDENT>if key == '<STR_LIT:default>':<EOL><INDENT>cms_text.append('<STR_LIT>'.format(spacer, key))<EOL>for config_name, config_value in iteritems(value):<EOL><INDENT>cms_text.append('<STR_LIT>'.format(spacer * <NUM_LIT:2>, config_name, config_value))<EOL><DEDENT>cms_text.append('<STR_LIT>'.format(spacer))<EOL><DEDENT>else:<EOL><INDENT>cms_text.append('<STR_LIT>'.format(spacer, key))<EOL>for lang in value:<EOL><INDENT>cms_text.append('<STR_LIT>'.format(spacer * <NUM_LIT:2>))<EOL>for config_name, config_value in iteritems(lang):<EOL><INDENT>if config_name == '<STR_LIT:code>':<EOL><INDENT>cms_text.append('<STR_LIT>'.format(spacer * <NUM_LIT:3>, config_name, config_value))  <EOL><DEDENT>elif config_name == '<STR_LIT:name>':<EOL><INDENT>cms_text.append('<STR_LIT>'.format(spacer * <NUM_LIT:3>, config_name, config_value))  <EOL><DEDENT>else:<EOL><INDENT>cms_text.append('<STR_LIT>'.format(<EOL>spacer * <NUM_LIT:3>, config_name, config_value<EOL>))<EOL><DEDENT><DEDENT>cms_text.append('<STR_LIT>'.format(spacer * <NUM_LIT:2>))<EOL><DEDENT>cms_text.append('<STR_LIT>'.format(spacer))<EOL><DEDENT><DEDENT>cms_text.append('<STR_LIT:}>')<EOL>text.append('<STR_LIT:\\n>'.join(cms_text))<EOL>if config_data.bootstrap:<EOL><INDENT>cms_templates = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>cms_templates = '<STR_LIT>'<EOL><DEDENT>text.append('<STR_LIT>'.format(<EOL>spacer, '<STR_LIT>',<EOL>('<STR_LIT>' + spacer).join(<EOL>['<STR_LIT>'.format(*item) for item in getattr(vars, cms_templates)]<EOL>)<EOL>))<EOL>text.append('<STR_LIT>'.format(vars.CMS_PERMISSION))<EOL>text.append('<STR_LIT>'.format(vars.CMS_PLACEHOLDER_CONF))<EOL>database = ['<STR_LIT>'.format(key, format_val(val)) for key, val in sorted(config_data.db_parsed.items(), key=lambda x: x[<NUM_LIT:0>])]  <EOL>text.append(textwrap.dedent(\"\"\"<STR_LIT>\"\"\").strip().format(('<STR_LIT>' + spacer * <NUM_LIT:2>).join(database)))  <EOL>DJANGO_MIGRATION_MODULES = _detect_migration_layout(vars, apps)<EOL>text.append('<STR_LIT>'.format(<EOL>spacer, ('<STR_LIT>' + spacer).join(<EOL>['<STR_LIT>'.format(*item) for item in DJANGO_MIGRATION_MODULES.items()]<EOL>)<EOL>))<EOL>if config_data.filer:<EOL><INDENT>text.append('<STR_LIT>'.format(<EOL>spacer, ('<STR_LIT>' + spacer).join(<EOL>['<STR_LIT>'.format(var) for var in vars.THUMBNAIL_PROCESSORS]<EOL>)<EOL>))<EOL><DEDENT>return '<STR_LIT>'.join(text)<EOL>", "docstring": "Build the django CMS settings dictionary\n\n:param config_data: configuration data", "id": "f5883:m5"}
{"signature": "def patch_settings(config_data):", "body": "import django<EOL>current_django_version = LooseVersion(django.__version__)<EOL>declared_django_version = LooseVersion(config_data.django_version)<EOL>if not os.path.exists(config_data.settings_path):<EOL><INDENT>sys.stderr.write(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(config_data.settings_path)<EOL>)<EOL>return sys.exit(<NUM_LIT:5>)<EOL><DEDENT>if current_django_version.version[:<NUM_LIT:2>] != declared_django_version.version[:<NUM_LIT:2>]:<EOL><INDENT>sys.stderr.write(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT:\\n>'.format(<EOL>current_django_version, declared_django_version<EOL>)<EOL>)<EOL>return sys.exit(<NUM_LIT:9>)<EOL><DEDENT>overridden_settings = (<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'<EOL>)<EOL>extra_settings = '<STR_LIT>'<EOL>with open(config_data.settings_path, '<STR_LIT:r>') as fd_original:<EOL><INDENT>original = fd_original.read()<EOL><DEDENT>if config_data.extra_settings and os.path.exists(config_data.extra_settings):<EOL><INDENT>with open(config_data.extra_settings, '<STR_LIT:r>') as fd_extra:<EOL><INDENT>extra_settings = fd_extra.read()<EOL><DEDENT><DEDENT>original = original.replace('<STR_LIT>', '<STR_LIT>')<EOL>if config_data.aldryn:  <EOL><INDENT>DATA_DIR = (<EOL>'<STR_LIT>'<EOL>)<EOL>STATICFILES_DIR = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>DATA_DIR = '<STR_LIT>'<EOL>STATICFILES_DIR = '<STR_LIT>'.format(<EOL>config_data.project_name<EOL>)<EOL><DEDENT>original = data.DEFAULT_PROJECT_HEADER + DATA_DIR + original<EOL>original += '<STR_LIT>'<EOL>original += '<STR_LIT>'<EOL>original += '<STR_LIT>'<EOL>original +=", "docstring": "Modify the settings file created by Django injecting the django CMS\nconfiguration\n\n:param config_data: configuration data", "id": "f5883:m4"}
{"signature": "def _detect_migration_layout(vars, apps):", "body": "DJANGO_MODULES = {}<EOL>for module in vars.MIGRATIONS_CHECK_MODULES:<EOL><INDENT>if module in apps:<EOL><INDENT>try:<EOL><INDENT>mod = __import__('<STR_LIT>'.format(module))  <EOL>DJANGO_MODULES[module] = '<STR_LIT>'.format(module)<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>return DJANGO_MODULES<EOL>", "docstring": "Detect migrations layout for plugins\n:param vars: installer settings\n:param apps: installed applications", "id": "f5883:m1"}
{"signature": "def create_user(config_data):", "body": "with chdir(os.path.abspath(config_data.project_directory)):<EOL><INDENT>env = deepcopy(dict(os.environ))<EOL>env[str('<STR_LIT>')] = str('<STR_LIT>'.format(config_data.project_name))<EOL>env[str('<STR_LIT>')] = str(os.pathsep.join(map(shlex_quote, sys.path)))<EOL>subprocess.check_call(<EOL>[sys.executable, '<STR_LIT>'], env=env, stderr=subprocess.STDOUT<EOL>)<EOL>for ext in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>try:<EOL><INDENT>os.remove('<STR_LIT>'.format(ext))<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Create admin user without user input\n\n:param config_data: configuration data", "id": "f5883:m7"}
{"signature": "def create_project(config_data):", "body": "env = deepcopy(dict(os.environ))<EOL>env[str('<STR_LIT>')] = str('<STR_LIT>'.format(config_data.project_name))<EOL>env[str('<STR_LIT>')] = str(os.pathsep.join(map(shlex_quote, sys.path)))<EOL>kwargs = {}<EOL>args = []<EOL>if config_data.template:<EOL><INDENT>kwargs['<STR_LIT>'] = config_data.template<EOL><DEDENT>args.append(config_data.project_name)<EOL>if config_data.project_directory:<EOL><INDENT>args.append(config_data.project_directory)<EOL>if not os.path.exists(config_data.project_directory):<EOL><INDENT>os.makedirs(config_data.project_directory)<EOL><DEDENT><DEDENT>base_cmd = '<STR_LIT>'<EOL>start_cmds = [os.path.join(os.path.dirname(sys.executable), base_cmd)]<EOL>start_cmd_pnodes = ['<STR_LIT>']<EOL>start_cmds.extend([<EOL>os.path.join(os.path.dirname(sys.executable), pnode, base_cmd)<EOL>for pnode in start_cmd_pnodes<EOL>])<EOL>start_cmd = [base_cmd]<EOL>for p in start_cmds:<EOL><INDENT>if os.path.exists(p):<EOL><INDENT>start_cmd = [sys.executable, p]<EOL>break<EOL><DEDENT><DEDENT>cmd_args = start_cmd + ['<STR_LIT>'] + args<EOL>if config_data.verbose:<EOL><INDENT>sys.stdout.write('<STR_LIT>'.format('<STR_LIT:U+0020>'.join(cmd_args)))<EOL><DEDENT>try:<EOL><INDENT>output = subprocess.check_output(cmd_args, stderr=subprocess.STDOUT)<EOL>sys.stdout.write(output.decode('<STR_LIT:utf-8>'))<EOL><DEDENT>except subprocess.CalledProcessError as e:  <EOL><INDENT>if config_data.verbose:<EOL><INDENT>sys.stdout.write(e.output.decode('<STR_LIT:utf-8>'))<EOL><DEDENT>raise<EOL><DEDENT>", "docstring": "Call django-admin to create the project structure\n\n:param config_data: configuration data", "id": "f5883:m0"}
{"signature": "def _install_aldryn(config_data):  ", "body": "import requests<EOL>media_project = os.path.join(config_data.project_directory, '<STR_LIT>', '<STR_LIT>')<EOL>static_main = False<EOL>static_project = os.path.join(config_data.project_directory, '<STR_LIT>', '<STR_LIT>')<EOL>template_target = os.path.join(config_data.project_directory, '<STR_LIT>')<EOL>tmpdir = tempfile.mkdtemp()<EOL>aldrynzip = requests.get(data.ALDRYN_BOILERPLATE)<EOL>zip_open = zipfile.ZipFile(BytesIO(aldrynzip.content))<EOL>zip_open.extractall(path=tmpdir)<EOL>for component in os.listdir(os.path.join(tmpdir, '<STR_LIT>')):<EOL><INDENT>src = os.path.join(tmpdir, '<STR_LIT>', component)<EOL>dst = os.path.join(config_data.project_directory, component)<EOL>if os.path.isfile(src):<EOL><INDENT>shutil.copy(src, dst)<EOL><DEDENT>else:<EOL><INDENT>shutil.copytree(src, dst)<EOL><DEDENT><DEDENT>shutil.rmtree(tmpdir)<EOL>return media_project, static_main, static_project, template_target<EOL>", "docstring": "Install aldryn boilerplate\n\n:param config_data: configuration data", "id": "f5883:m2"}
{"signature": "def _convert_config_to_stdin(config, parser):", "body": "keys_empty_values_not_pass = (<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>args = []<EOL>for key, val in config.items(SECTION):<EOL><INDENT>keyp = '<STR_LIT>'.format(key)<EOL>action = parser._option_string_actions[keyp]<EOL>if action.const:<EOL><INDENT>try:<EOL><INDENT>if config.getboolean(SECTION, key):<EOL><INDENT>args.append(keyp)<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>args.extend([keyp, val])  <EOL><DEDENT><DEDENT>elif any([i for i in keys_empty_values_not_pass if i in action.option_strings]):<EOL><INDENT>if val != '<STR_LIT>':<EOL><INDENT>args.extend([keyp, val])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>args.extend([keyp, val])<EOL><DEDENT><DEDENT>return args<EOL>", "docstring": "Convert config options to stdin args.\n\n    Especially boolean values, for more information\n    @see https://docs.python.org/3.4/library/configparser.html#supported-datatypes", "id": "f5887:m2"}
{"signature": "def dump_config_file(filename, args, parser=None):", "body": "config = ConfigParser()<EOL>config.add_section(SECTION)<EOL>if parser is None:<EOL><INDENT>for attr in args:<EOL><INDENT>config.set(SECTION, attr, args.attr)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>keys_empty_values_not_pass = (<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>for action in parser._actions:<EOL><INDENT>if action.dest in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>keyp = action.option_strings[<NUM_LIT:0>]<EOL>option_name = keyp.lstrip('<STR_LIT:->')<EOL>option_value = getattr(args, action.dest)<EOL>if any([i for i in keys_empty_values_not_pass if i in action.option_strings]):<EOL><INDENT>if action.dest == '<STR_LIT>':<EOL><INDENT>if len(option_value) == <NUM_LIT:1> and option_value[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>config.set(SECTION, option_name, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>config.set(SECTION, option_name, '<STR_LIT:U+002C>'.join(option_value))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>config.set(SECTION, option_name, option_value if option_value else '<STR_LIT>')<EOL><DEDENT><DEDENT>elif action.choices == ('<STR_LIT:yes>', '<STR_LIT>'):<EOL><INDENT>config.set(SECTION, option_name, '<STR_LIT:yes>' if option_value else '<STR_LIT>')<EOL><DEDENT>elif action.dest == '<STR_LIT>':<EOL><INDENT>config.set(SECTION, option_name, option_value if option_value else '<STR_LIT>')<EOL><DEDENT>elif action.dest == '<STR_LIT>':<EOL><INDENT>version = ('<STR_LIT>' if option_value == CMS_VERSION_MATRIX['<STR_LIT>']<EOL>else option_value)<EOL>config.set(SECTION, option_name, version)<EOL><DEDENT>elif action.dest == '<STR_LIT>':<EOL><INDENT>version = ('<STR_LIT>' if option_value == DJANGO_VERSION_MATRIX['<STR_LIT>']<EOL>else option_value)<EOL>config.set(SECTION, option_name, version)<EOL><DEDENT>elif action.const:<EOL><INDENT>config.set(SECTION, option_name, '<STR_LIT:true>' if option_value else '<STR_LIT:false>')<EOL><DEDENT>else:<EOL><INDENT>config.set(SECTION, option_name, str(option_value))<EOL><DEDENT><DEDENT><DEDENT>with open(filename, '<STR_LIT:w>') as fp:<EOL><INDENT>config.write(fp)<EOL><DEDENT>", "docstring": "Dump args to config file.", "id": "f5887:m1"}
{"signature": "def validate_project(project_name):", "body": "if '<STR_LIT:->' in project_name:<EOL><INDENT>return None<EOL><DEDENT>if keyword.iskeyword(project_name):<EOL><INDENT>return None<EOL><DEDENT>if project_name in dir(__builtins__):<EOL><INDENT>return None<EOL><DEDENT>try:<EOL><INDENT>__import__(project_name)<EOL>return None<EOL><DEDENT>except ImportError:<EOL><INDENT>return project_name<EOL><DEDENT>", "docstring": "Check the defined project name against keywords, builtins and existing\nmodules to avoid name clashing", "id": "f5890:m0"}
{"signature": "def format_val(val):", "body": "val = text_type(val)<EOL>if val.isdigit():<EOL><INDENT>return int(val)<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'.format(val)<EOL><DEDENT>", "docstring": "Returns val as integer or as escaped string according to its value\n:param val: any value\n:return: formatted string", "id": "f5897:m3"}
{"signature": "def supported_versions(django, cms):", "body": "cms_version = None<EOL>django_version = None<EOL>try:<EOL><INDENT>cms_version = Decimal(cms)<EOL><DEDENT>except (ValueError, InvalidOperation):<EOL><INDENT>try:<EOL><INDENT>cms_version = CMS_VERSION_MATRIX[str(cms)]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>try:<EOL><INDENT>django_version = Decimal(django)<EOL><DEDENT>except (ValueError, InvalidOperation):<EOL><INDENT>try:<EOL><INDENT>django_version = DJANGO_VERSION_MATRIX[str(django)]<EOL><DEDENT>except KeyError:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT>try:<EOL><INDENT>if (<EOL>cms_version and django_version and<EOL>not (LooseVersion(VERSION_MATRIX[compat.unicode(cms_version)][<NUM_LIT:0>]) <=<EOL>LooseVersion(compat.unicode(django_version)) <=<EOL>LooseVersion(VERSION_MATRIX[compat.unicode(cms_version)][<NUM_LIT:1>]))<EOL>):<EOL><INDENT>raise RuntimeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(django_version, cms_version)<EOL>)<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>raise RuntimeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(django_version, cms_version)<EOL>)<EOL><DEDENT>return (<EOL>compat.unicode(django_version) if django_version else django_version,<EOL>compat.unicode(cms_version) if cms_version else cms_version<EOL>)<EOL>", "docstring": "Convert numeric and literal version information to numeric format", "id": "f5897:m1"}
{"signature": "def less_than_version(value):", "body": "items = list(map(int, str(value).split('<STR_LIT:.>')))<EOL>if len(items) == <NUM_LIT:1>:<EOL><INDENT>items.append(<NUM_LIT:0>)<EOL><DEDENT>items[<NUM_LIT:1>] += <NUM_LIT:1><EOL>if value == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT:.>'.join(map(str, items))<EOL><DEDENT>", "docstring": "Converts the current version to the next one for inserting into requirements\nin the ' < version' format", "id": "f5897:m2"}
{"signature": "def ipfn_np(self, m, aggregates, dimensions, weight_col='<STR_LIT>'):", "body": "steps = len(aggregates)<EOL>dim = len(m.shape)<EOL>product_elem = []<EOL>tables = [m]<EOL>for inc in range(steps - <NUM_LIT:1>):<EOL><INDENT>tables.append(np.array(np.zeros(m.shape)))<EOL><DEDENT>original = copy.copy(m)<EOL>for inc in range(steps):<EOL><INDENT>if inc == (steps - <NUM_LIT:1>):<EOL><INDENT>table_update = m<EOL>table_current = tables[inc]<EOL><DEDENT>else:<EOL><INDENT>table_update = tables[inc + <NUM_LIT:1>]<EOL>table_current = tables[inc]<EOL><DEDENT>for dimension in dimensions[inc]:<EOL><INDENT>product_elem.append(range(m.shape[dimension]))<EOL><DEDENT>for item in product(*product_elem):<EOL><INDENT>idx = self.index_axis_elem(dim, dimensions[inc], item)<EOL>table_current_slice = table_current[idx]<EOL>mijk = table_current_slice.sum()<EOL>xijk = aggregates[inc]<EOL>xijk = xijk[item]<EOL>if mijk == <NUM_LIT:0>:<EOL><INDENT>table_update[idx] = table_current_slice<EOL><DEDENT>else:<EOL><INDENT>table_update[idx] = table_current_slice * <NUM_LIT:1.0> * xijk / mijk<EOL><DEDENT><DEDENT>product_elem = []<EOL><DEDENT>max_conv = <NUM_LIT:0><EOL>for inc in range(steps):<EOL><INDENT>for dimension in dimensions[inc]:<EOL><INDENT>product_elem.append(range(m.shape[dimension]))<EOL><DEDENT>for item in product(*product_elem):<EOL><INDENT>idx = self.index_axis_elem(dim, dimensions[inc], item)<EOL>ori_ijk = aggregates[inc][item]<EOL>m_slice = m[idx]<EOL>m_ijk = m_slice.sum()<EOL>if abs(m_ijk / ori_ijk - <NUM_LIT:1>) > max_conv:<EOL><INDENT>max_conv = abs(m_ijk / ori_ijk - <NUM_LIT:1>)<EOL><DEDENT><DEDENT>product_elem = []<EOL><DEDENT>return m, max_conv<EOL>", "docstring": "Runs the ipfn method from a matrix m, aggregates/marginals and the dimension(s) preserved.\nFor example:\nfrom ipfn import ipfn\nimport numpy as np\nm = np.array([[8., 4., 6., 7.], [3., 6., 5., 2.], [9., 11., 3., 1.]], )\nxip = np.array([20., 18., 22.])\nxpj = np.array([18., 16., 12., 14.])\naggregates = [xip, xpj]\ndimensions = [[0], [1]]\n\nIPF = ipfn(m, aggregates, dimensions)\nm = IPF.iteration()", "id": "f5901:c0:m2"}
{"signature": "def ipfn_df(self, df, aggregates, dimensions, weight_col='<STR_LIT>'):", "body": "steps = len(aggregates)<EOL>tables = [df]<EOL>for inc in range(steps - <NUM_LIT:1>):<EOL><INDENT>tables.append(df.copy())<EOL><DEDENT>original = df.copy()<EOL>inc = <NUM_LIT:0><EOL>for features in dimensions:<EOL><INDENT>if inc == (steps - <NUM_LIT:1>):<EOL><INDENT>table_update = df<EOL>table_current = tables[inc]<EOL><DEDENT>else:<EOL><INDENT>table_update = tables[inc + <NUM_LIT:1>]<EOL>table_current = tables[inc]<EOL><DEDENT>tmp = table_current.groupby(features)[weight_col].sum()<EOL>xijk = aggregates[inc]<EOL>feat_l = []<EOL>for feature in features:<EOL><INDENT>feat_l.append(np.unique(table_current[feature]))<EOL><DEDENT>table_update.set_index(features, inplace=True)<EOL>table_current.set_index(features, inplace=True)<EOL>for feature in product(*feat_l):<EOL><INDENT>den = tmp.loc[feature]<EOL>if den == <NUM_LIT:0>:<EOL><INDENT>table_update.loc[feature, weight_col] =table_current.loc[feature, weight_col] *xijk.loc[feature]<EOL><DEDENT>else:<EOL><INDENT>table_update.loc[feature, weight_col] =table_current.loc[feature, weight_col].astype(float) *xijk.loc[feature] / den<EOL><DEDENT><DEDENT>table_update.reset_index(inplace=True)<EOL>table_current.reset_index(inplace=True)<EOL>inc += <NUM_LIT:1><EOL>feat_l = []<EOL><DEDENT>max_conv = <NUM_LIT:0><EOL>inc = <NUM_LIT:0><EOL>for features in dimensions:<EOL><INDENT>tmp = df.groupby(features)[weight_col].sum()<EOL>ori_ijk = aggregates[inc]<EOL>temp_conv = max(abs(tmp / ori_ijk - <NUM_LIT:1>))<EOL>if temp_conv > max_conv:<EOL><INDENT>max_conv = temp_conv<EOL><DEDENT>inc += <NUM_LIT:1><EOL><DEDENT>return df, max_conv<EOL>", "docstring": "Runs the ipfn method from a dataframe df, aggregates/marginals and the dimension(s) preserved.\nFor example:\nfrom ipfn import ipfn\nimport pandas as pd\nage = [30, 30, 30, 30, 40, 40, 40, 40, 50, 50, 50, 50]\ndistance = [10,20,30,40,10,20,30,40,10,20,30,40]\nm = [8., 4., 6., 7., 3., 6., 5., 2., 9., 11., 3., 1.]\ndf = pd.DataFrame()\ndf['age'] = age\ndf['distance'] = distance\ndf['total'] = m\n\nxip = df.groupby('age')['total'].sum()\nxip.loc[30] = 20\nxip.loc[40] = 18\nxip.loc[50] = 22\nxpj = df.groupby('distance')['total'].sum()\nxpj.loc[10] = 18\nxpj.loc[20] = 16\nxpj.loc[30] = 12\nxpj.loc[40] = 14\ndimensions = [['age'], ['distance']]\naggregates = [xip, xpj]\n\nIPF = ipfn(df, aggregates, dimensions)\ndf = IPF.iteration()\n\nprint(df)\nprint(df.groupby('age')['total'].sum(), xip)", "id": "f5901:c0:m3"}
{"signature": "def account_is_suspended(self):", "body": "return self._suspended<EOL>", "docstring": "Return if account is suspended.", "id": "f5903:c0:m2"}
{"signature": "def login_data_valid(self):", "body": "login_working = False<EOL>try:<EOL><INDENT>with self._login(requests.Session()) as sess:<EOL><INDENT>sess.get(self._logout_url)<EOL><DEDENT><DEDENT>except self.LoginError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>login_working = True<EOL><DEDENT>return login_working<EOL>", "docstring": "Check for working login data.", "id": "f5903:c0:m3"}
{"signature": "def _login(self, session, get_request=False):", "body": "req = session.post(self._login_url, data=self._logindata)<EOL>if _LOGIN_ERROR_STRING in req.text orreq.status_code == <NUM_LIT> orreq.url == _LOGIN_URL:<EOL><INDENT>err_mess = \"<STR_LIT>\"<EOL>if _LOGIN_LOCKED_MESS in req.text:<EOL><INDENT>err_mess += \"<STR_LIT>\" + _LOGIN_LOCKED_MESS_ENG<EOL>self._suspended = True<EOL>raise self.AccountSuspendedError(err_mess)<EOL><DEDENT>raise self.LoginError(err_mess)<EOL><DEDENT>self._suspended = False  <EOL>return (session, req) if get_request else session<EOL>", "docstring": "Return a session for yesss.at.", "id": "f5903:c0:m1"}
{"signature": "def version(self):", "body": "return self._version<EOL>", "docstring": "Get version of YesssSMS package.", "id": "f5903:c0:m5"}
{"signature": "def __init__(self, yesss_login=YESSS_LOGIN, yesss_pw=YESSS_PASSWD):", "body": "self._version = VERSION<EOL>self._login_url = _LOGIN_URL<EOL>self._logout_url = _LOGOUT_URL<EOL>self._kontomanager = _KONTOMANAGER_URL<EOL>self._websms_url = _WEBSMS_URL<EOL>self._suspended = False<EOL>self._logindata = {'<STR_LIT>': yesss_login,<EOL>'<STR_LIT>': yesss_pw}<EOL>", "docstring": "Initialize YesssSMS member variables.", "id": "f5903:c0:m0"}
{"signature": "def forwards(self, orm):", "body": "for translation in orm['<STR_LIT>'].objects.all():<EOL><INDENT>translation.person.roman_first_name = translation.roman_first_name<EOL>translation.person.roman_last_name = translation.roman_last_name<EOL>translation.person.non_roman_first_name = translation.non_roman_first_name<EOL>translation.person.non_roman_last_name = translation.non_roman_last_name<EOL>translation.person.save()<EOL><DEDENT>", "docstring": "Write your forwards methods here.", "id": "f5932:c0:m0"}
{"signature": "def get_gender(self):", "body": "if self.gender == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif self.gender == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Returns either 'Mr.' or 'Ms.' depending on the gender.", "id": "f5938:c3:m1"}
{"signature": "def get_non_romanized_last_name(self):", "body": "return self.non_roman_last_name<EOL>", "docstring": "Returns the non roman version of the first name.", "id": "f5938:c3:m6"}
{"signature": "def neighbors_from_pixelization(self, pixels, ridge_points):", "body": "return pixelization_util.voronoi_neighbors_from_pixels_and_ridge_points(pixels=pixels,<EOL>ridge_points=np.asarray(ridge_points))<EOL>", "docstring": "Compute the neighbors of every Voronoi pixel as an ndarray of the pixel index's each pixel shares a \\\n        vertex with.\n\n        The ridge points of the Voronoi grid are used to derive this.\n\n        Parameters\n        ----------\n        ridge_points : scipy.spatial.Voronoi.ridge_points\n            Each Voronoi-ridge (two indexes representing a pixel mapping_matrix).", "id": "f5940:c3:m3"}
{"signature": "def mapper_from_grid_stack_and_border(self, grid_stack, border):", "body": "if border is not None:<EOL><INDENT>relocated_grid_stack = border.relocated_grid_stack_from_grid_stack(grid_stack)<EOL><DEDENT>else:<EOL><INDENT>relocated_grid_stack = grid_stack<EOL><DEDENT>geometry = self.geometry_from_grid(grid=relocated_grid_stack.sub)<EOL>return mappers.RectangularMapper(pixels=self.pixels, grid_stack=relocated_grid_stack, border=border,<EOL>shape=self.shape, geometry=geometry)<EOL>", "docstring": "Setup a rectangular mapper from a rectangular pixelization, as follows:\n\n        1) If a border is supplied, relocate all of the grid-stack's regular and sub grid pixels beyond the border.\n        2) Determine the rectangular pixelization's geometry, by laying the pixelization over the sub-grid.\n        3) Setup the rectangular mapper from the relocated grid-stack and rectangular pixelization.\n\n        Parameters\n        ----------\n        grid_stack : grids.GridStack\n            A stack of grid describing the observed image's pixel coordinates (e.g. an image-grid, sub-grid, etc.).\n        border : grids.RegularGridBorder | None\n            The border of the grid-stack's regular-grid.", "id": "f5940:c2:m3"}
{"signature": "def setup_image_plane_pixelization_grid_from_galaxies_and_grid_stack(galaxies, grid_stack):", "body": "if not isinstance(grid_stack.regular, grids.PaddedRegularGrid):<EOL><INDENT>for galaxy in galaxies:<EOL><INDENT>if hasattr(galaxy, '<STR_LIT>'):<EOL><INDENT>if isinstance(galaxy.pixelization, ImagePlanePixelization):<EOL><INDENT>image_plane_pix_grid = galaxy.pixelization.image_plane_pix_grid_from_regular_grid(<EOL>regular_grid=grid_stack.regular)<EOL>return grid_stack.new_grid_stack_with_pix_grid_added(pix_grid=image_plane_pix_grid.sparse_grid,<EOL>regular_to_nearest_pix=image_plane_pix_grid.regular_to_sparse)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return grid_stack<EOL>", "docstring": "An image-plane pixelization is one where its pixel centres are computed by tracing a sparse grid of pixels from \\\n    the image's regular grid to other planes (e.g. the source-plane).\n\n    Provided a galaxy has an image-plane pixelization, this function returns a new *GridStack* instance where the \\\n    image-plane pixelization's sparse grid is added to it as an attibute.\n\n    Thus, when the *GridStack* are are passed to the *ray_tracing* module this sparse grid is also traced and the \\\n    traced coordinates represent the centre of each pixelization pixel.\n\n    Parameters\n    -----------\n    galaxies : [model.galaxy.galaxy.Galaxy]\n        A list of galaxies, which may contain pixelizations and an *ImagePlanePixelization*.\n    grid_stacks : image.array.grid_stacks.GridStack\n        The collection of grid_stacks (regular, sub, etc.) which the image-plane pixelization grid (referred to as pix) \\\n        may be added to.", "id": "f5940:m0"}
{"signature": "def mapper_from_grid_stack_and_border(self, grid_stack, border):", "body": "if border is not None:<EOL><INDENT>relocated_grids = border.relocated_grid_stack_from_grid_stack(grid_stack)<EOL><DEDENT>else:<EOL><INDENT>relocated_grids = grid_stack<EOL><DEDENT>pixel_centres = relocated_grids.pix<EOL>pixels = pixel_centres.shape[<NUM_LIT:0>]<EOL>voronoi = self.voronoi_from_pixel_centers(pixel_centres)<EOL>pixel_neighbors, pixel_neighbors_size = self.neighbors_from_pixelization(pixels=pixels,<EOL>ridge_points=voronoi.ridge_points)<EOL>geometry = self.geometry_from_grid(grid=relocated_grids.sub, pixel_centres=pixel_centres,<EOL>pixel_neighbors=pixel_neighbors,<EOL>pixel_neighbors_size=pixel_neighbors_size)<EOL>return mappers.VoronoiMapper(pixels=pixels, grid_stack=relocated_grids, border=border,<EOL>voronoi=voronoi, geometry=geometry)<EOL>", "docstring": "Setup a Voronoi mapper from an adaptive-magnification pixelization, as follows:\n\n        1) (before this routine is called), setup the 'pix' grid as part of the grid-stack, which corresponds to a \\\n           sparse set of pixels in the image-plane which are traced to form the pixel centres.\n        2) If a border is supplied, relocate all of the grid-stack's regular, sub and pix grid pixels beyond the border.\n        3) Determine the adaptive-magnification pixelization's pixel centres, by extracting them from the relocated \\\n           pix grid.\n        4) Use these pixelization centres to setup the Voronoi pixelization.\n        5) Determine the neighbors of every Voronoi cell in the Voronoi pixelization.\n        6) Setup the geometry of the pixelizatioon using the relocated sub-grid and Voronoi pixelization.\n        7) Setup a Voronoi mapper from all of the above quantities.\n\n        Parameters\n        ----------\n        grid_stack : grids.GridStack\n            A collection of grid describing the observed image's pixel coordinates (includes an image and sub grid).\n        border : grids.RegularGridBorder\n            The borders of the grid_stacks (defined by their image-plane masks).", "id": "f5940:c4:m1"}
{"signature": "def __init__(self):", "body": "", "docstring": "Abstract base class for a pixelization, which discretizes grid_stack of (y,x) coordinates into pixels.", "id": "f5940:c1:m0"}
{"signature": "def __init__(self, shape=(<NUM_LIT:3>, <NUM_LIT:3>)):", "body": "super(AdaptiveMagnification, self).__init__()<EOL>ImagePlanePixelization.__init__(self=self, shape=shape)<EOL>", "docstring": "A pixelization which adapts to the magnification pattern of a lens's mass model and uses a Voronoi \\\n        pixelization to discretize the grid into pixels.\n\n        Parameters\n        ----------\n        shape : (int, int)\n            The shape of the unmasked sparse-grid which is laid over the masked image, in order to derive the \\\n            adaptive-magnification pixelization (see *ImagePlanePixelization*)", "id": "f5940:c4:m0"}
{"signature": "def __init__(self, image_1d, noise_map_1d, convolver, mapper, regularization):", "body": "self.mapper = mapper<EOL>self.regularization = regularization<EOL>self.blurred_mapping_matrix = convolver.convolve_mapping_matrix(mapping_matrix=mapper.mapping_matrix)<EOL>self.data_vector = inversion_util.data_vector_from_blurred_mapping_matrix_and_data(<EOL>blurred_mapping_matrix=self.blurred_mapping_matrix, image_1d=image_1d, noise_map_1d=noise_map_1d)<EOL>self.curvature_matrix = inversion_util.curvature_matrix_from_blurred_mapping_matrix(<EOL>blurred_mapping_matrix=self.blurred_mapping_matrix, noise_map_1d=noise_map_1d)<EOL>self.regularization_matrix =regularization.regularization_matrix_from_pixel_neighbors(pixel_neighbors=mapper.geometry.pixel_neighbors,<EOL>pixel_neighbors_size=mapper.geometry.pixel_neighbors_size)<EOL>self.curvature_reg_matrix = np.add(self.curvature_matrix, self.regularization_matrix)<EOL>self.solution_vector = np.linalg.solve(self.curvature_reg_matrix, self.data_vector)<EOL>", "docstring": "An inversion, which given an input image and noise-map reconstructs the image using a linear inversion, \\\n        including a convolution that accounts for blurring.\n\n        The inversion uses a 2D pixelization to perform the reconstruction by mapping each pixelization pixel to a \\\n        set of image pixels via a mapper. The reconstructed pixelization is smoothed via a regularization scheme to \\\n        prevent over-fitting noise.\n\n        Parameters\n        -----------\n        image_1d : ndarray\n            Flattened 1D array of the observed image the inversion is fitting.\n        noise_map_1d : ndarray\n            Flattened 1D array of the noise-map used by the inversion during the fit.   \n        convolver : ccd.convolution.Convolver\n            The convolver used to blur the mapping matrix with the PSF.\n        mapper : inversion.mappers.Mapper\n            The mapping between the image-pixels (via its regular / sub-grid) and pixelization pixels.\n        regularization : inversion.regularization.Regularization\n            The regularization scheme applied to smooth the pixelization used to reconstruct the image for the \\\n            inversion\n\n        Attributes\n        -----------\n        blurred_mapping_matrix : ndarray\n            The matrix representing the blurred mappings between the image's sub-grid of pixels and the pixelization \\\n            pixels.\n        regularization_matrix : ndarray\n            The matrix defining how the pixelization's pixels are regularized with one another for smoothing (H).\n        curvature_matrix : ndarray\n            The curvature_matrix between each pixelization pixel and all other pixelization pixels (F).\n        curvature_reg_matrix : ndarray\n            The curvature_matrix + regularization matrix.\n        solution_vector : ndarray\n            The vector containing the reconstructed fit to the hyper.", "id": "f5941:c0:m0"}
{"signature": "@decorator_util.jit()<EOL>def voronoi_regular_to_pix_from_grids_and_geometry(regular_grid, regular_to_nearest_pix, pixel_centres,<EOL>pixel_neighbors, pixel_neighbors_size):", "body": "regular_to_pix = np.zeros((regular_grid.shape[<NUM_LIT:0>]))<EOL>for regular_index in range(regular_grid.shape[<NUM_LIT:0>]):<EOL><INDENT>nearest_pix_pixel_index = regular_to_nearest_pix[regular_index]<EOL>while True:<EOL><INDENT>nearest_pix_pixel_center = pixel_centres[nearest_pix_pixel_index]<EOL>sub_to_nearest_pix_distance = (regular_grid[regular_index, <NUM_LIT:0>] - nearest_pix_pixel_center[<NUM_LIT:0>]) ** <NUM_LIT:2> +(regular_grid[regular_index, <NUM_LIT:1>] - nearest_pix_pixel_center[<NUM_LIT:1>]) ** <NUM_LIT:2><EOL>closest_separation_from_pix_neighbor = <NUM_LIT><EOL>for neighbor_index in range(pixel_neighbors_size[nearest_pix_pixel_index]):<EOL><INDENT>neighbor = pixel_neighbors[nearest_pix_pixel_index, neighbor_index]<EOL>separation_from_neighbor = (regular_grid[regular_index, <NUM_LIT:0>] - pixel_centres[neighbor, <NUM_LIT:0>]) ** <NUM_LIT:2> +(regular_grid[regular_index, <NUM_LIT:1>] - pixel_centres[neighbor, <NUM_LIT:1>]) ** <NUM_LIT:2><EOL>if separation_from_neighbor < closest_separation_from_pix_neighbor:<EOL><INDENT>closest_separation_from_pix_neighbor = separation_from_neighbor<EOL>closest_neighbor_index = neighbor_index<EOL><DEDENT><DEDENT>neighboring_pix_pixel_index = pixel_neighbors[nearest_pix_pixel_index, closest_neighbor_index]<EOL>sub_to_neighboring_pix_distance = closest_separation_from_pix_neighbor<EOL>if sub_to_nearest_pix_distance <= sub_to_neighboring_pix_distance:<EOL><INDENT>regular_to_pix[regular_index] = nearest_pix_pixel_index<EOL>break<EOL><DEDENT>else:<EOL><INDENT>nearest_pix_pixel_index = neighboring_pix_pixel_index<EOL><DEDENT><DEDENT><DEDENT>return regular_to_pix<EOL>", "docstring": "Compute the mappings between a set of regular-grid pixels and pixelization pixels, using information on \\\n    how regular pixels map to their closest pixelization pixel on the image-plane pix-grid and the pixelization's \\\n    pixel centres.\n\n    To determine the complete set of regular-pixel to pixelization pixel mappings, we must pair every regular-pixel to \\\n    its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \\\n    the Voronoi grid) are used to localize each nearest neighbor search via a graph search.\n\n    Parameters\n    ----------\n    regular_grid : RegularGrid\n        The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \\\n        to an irregular grid via lens.\n    regular_to_nearest_pix : ndarray\n        A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel (as determined on the unlensed \\\n        2D array).\n    pixel_centres : ndarray\n        The (y,x) centre of every Voronoi pixel in arc-seconds.\n    pixel_neighbors : ndarray\n        An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \\\n        the Voronoi grid (entries of -1 correspond to no neighbor).\n    pixel_neighbors_size : ndarray\n        An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \\\n        Voronoi grid.", "id": "f5942:m1"}
{"signature": "@decorator_util.jit()<EOL>def mapping_matrix_from_sub_to_pix(sub_to_pix, pixels, regular_pixels, sub_to_regular, sub_grid_fraction):", "body": "mapping_matrix = np.zeros((regular_pixels, pixels))<EOL>for sub_index in range(sub_to_regular.shape[<NUM_LIT:0>]):<EOL><INDENT>mapping_matrix[sub_to_regular[sub_index], sub_to_pix[sub_index]] += sub_grid_fraction<EOL><DEDENT>return mapping_matrix<EOL>", "docstring": "Computes the mapping matrix, by iterating over the known mappings between the sub-grid and pixelization.\n\n    Parameters\n    -----------\n    sub_to_pix : ndarray\n        The mappings between the observed regular's sub-pixels and pixelization's pixels.\n    pixels : int\n        The number of pixels in the pixelization.\n    regular_pixels : int\n        The number of datas pixels in the observed datas and thus on the regular grid.\n    sub_to_regular : ndarray\n        The mappings between the observed regular's sub-pixels and observed regular's pixels.\n    sub_grid_fraction : float\n        The fractional area each sub-pixel takes up in an regular-pixel.", "id": "f5942:m0"}
{"signature": "@decorator_util.jit()<EOL>def voronoi_neighbors_from_pixels_and_ridge_points(pixels, ridge_points):", "body": "pixel_neighbors_size = np.zeros(shape=(pixels))<EOL>for ridge_index in range(ridge_points.shape[<NUM_LIT:0>]):<EOL><INDENT>pair0 = ridge_points[ridge_index, <NUM_LIT:0>]<EOL>pair1 = ridge_points[ridge_index, <NUM_LIT:1>]<EOL>pixel_neighbors_size[pair0] += <NUM_LIT:1><EOL>pixel_neighbors_size[pair1] += <NUM_LIT:1><EOL><DEDENT>pixel_neighbors_index = np.zeros(shape=(pixels))<EOL>pixel_neighbors = -<NUM_LIT:1> * np.ones(shape=(pixels, int(np.max(pixel_neighbors_size))))<EOL>for ridge_index in range(ridge_points.shape[<NUM_LIT:0>]):<EOL><INDENT>pair0 = ridge_points[ridge_index, <NUM_LIT:0>]<EOL>pair1 = ridge_points[ridge_index, <NUM_LIT:1>]<EOL>pixel_neighbors[pair0, int(pixel_neighbors_index[pair0])] = pair1<EOL>pixel_neighbors[pair1, int(pixel_neighbors_index[pair1])] = pair0<EOL>pixel_neighbors_index[pair0] += <NUM_LIT:1><EOL>pixel_neighbors_index[pair1] += <NUM_LIT:1><EOL><DEDENT>return pixel_neighbors, pixel_neighbors_size<EOL>", "docstring": "Compute the neighbors of every pixel as a list of the pixel index's each pixel shares a vertex with.\n\n    The ridge points of the Voronoi grid are used to derive this.\n\n    Parameters\n    ----------\n    ridge_points : scipy.spatial.Voronoi.ridge_points\n        Each Voronoi-ridge (two indexes representing a pixel mapping_matrix).", "id": "f5944:m7"}
{"signature": "@decorator_util.jit()<EOL>def curvature_matrix_from_blurred_mapping_matrix_jit(blurred_mapping_matrix, noise_map_1d, flist, iflist):", "body": "curvature_matrix = np.zeros((blurred_mapping_matrix.shape[<NUM_LIT:1>], blurred_mapping_matrix.shape[<NUM_LIT:1>]))<EOL>for image_index in range(blurred_mapping_matrix.shape[<NUM_LIT:0>]):<EOL><INDENT>index = <NUM_LIT:0><EOL>for pixel_index in range(blurred_mapping_matrix.shape[<NUM_LIT:1>]):<EOL><INDENT>if blurred_mapping_matrix[image_index, pixel_index] > <NUM_LIT:0.0>:<EOL><INDENT>flist[index] = blurred_mapping_matrix[image_index, pixel_index] / noise_map_1d[image_index]<EOL>iflist[index] = pixel_index<EOL>index += <NUM_LIT:1><EOL><DEDENT><DEDENT>if index > <NUM_LIT:0>:<EOL><INDENT>for i1 in range(index):<EOL><INDENT>for j1 in range(index):<EOL><INDENT>ix = iflist[i1]<EOL>iy = iflist[j1]<EOL>curvature_matrix[ix, iy] += flist[i1] * flist[j1]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>for i in range(blurred_mapping_matrix.shape[<NUM_LIT:1>]):<EOL><INDENT>for j in range(blurred_mapping_matrix.shape[<NUM_LIT:1>]):<EOL><INDENT>curvature_matrix[i, j] = curvature_matrix[j, i]<EOL><DEDENT><DEDENT>return curvature_matrix<EOL>", "docstring": "Compute the curvature matrix *F* from a blurred mapping matrix *f* and the 1D noise-map *\\sigma* \\\n    (see Warren & Dye 2003).\n\n    Parameters\n    -----------\n    blurred_mapping_matrix : ndarray\n        The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.\n    noise_map_1d : ndarray\n        Flattened 1D array of the noise-map used by the inversion during the fit.\n    flist : ndarray\n        NumPy array of floats used to store mappings for efficienctly calculation.\n    iflist : ndarray\n        NumPy array of integers used to store mappings for efficienctly calculation.", "id": "f5945:m2"}
{"signature": "def __init__(self, coefficients=(<NUM_LIT:1.0>,)):", "body": "self.coefficients = coefficients<EOL>", "docstring": "Abstract base class for a regularization-scheme, which is applied to a pixelization to enforce a \\\n        smooth-source solution and prevent over-fitting noise_map in the hyper. This is achieved by computing a \\\n        'regularization term' - which is the sum of differences in reconstructed flux between every set of neighboring \\\n        pixels. This regularization term is added to the solution's chi-squared as a penalty term. This effects \\\n        a pixelization in the following ways:\n\n        1) The regularization matrix (see below) is added to the curvature matrix used by the inversion to \\\n           linearly invert and fit the hyper. Thus, it changes the pixelization in a linear manner, ensuring that \\\n           the minimum chi-squared solution is achieved accounting for the penalty term.\n\n        2) The likelihood of the pixelization's fit to the hyper changes from L = -0.5 *(chi^2 + noise_normalization) \\\n           to L = -0.5 (chi^2 + coefficients * regularization_term + noise_normalization). The regularization \\\n           coefficient is a 'hyper-parameter' which determines how strongly we smooth the pixelization's reconstruction.\n\n        The value of the coefficients(s) is set using the Bayesian framework of (Suyu 2006) and this \\\n        is described further in the (*inversion.Inversion* class).\n\n        The regularization matrix, H, is calculated by defining a set of B matrices which describe how the \\\n        pixels neighbor one another. For example, lets take a 3x3 square grid:\n        ______\n        |0|1|2|\n        |3|4|5|\n        |6|7|8|\n        ^^^^^^^\n\n        We want to regularize this grid such that each pixel is regularized with the pixel to its right and below it \\\n        (provided there are pixels in that direction). This means that:\n\n        - pixel 0 is regularized with pixel 1 (to the right) and pixel 3 (below).\n        - pixel 1 is regularized with pixel 2 (to the right) and pixel 4 (below),\n        - Pixel 2 is only regularized with pixel 5, as there is no pixel to its right.\n        - and so on.\n\n        We make two 9 x 9 B matrices, which describe regularization in each direction (i.e. rightwards and downwards). \\\n        We simply put a -1 and 1 in each row of a pixel index where it has a neighbor, where the value 1 goes in the \\\n        column of its neighbor's index. Thus, the B matrix describing neighboring pixels to their right looks like:\n\n        B_x = [-1,  1,  0,  0,  0,  0,  0,  0,  0] # [0->1]\n              [ 0, -1,  1,  0,  0,  0,  0,  0,  0] # [1->2]\n              [ 0,  0, -1,  0,  0,  0,  0,  0,  0] # [] NOTE - no pixel neighbor.\n              [ 0,  0,  0, -1,  1,  0,  0,  0,  0] # [3->4]\n              [ 0,  0,  0,  0, -1,  1,  0,  0,  0] # [4->5]\n              [ 0,  0,  0,  0,  0, -1,  0,  0,  0] # [] NOTE - no pixel neighbor.\n              [ 0,  0,  0,  0,  0,  0, -1,  1,  0] # [6->7]\n              [ 0,  0,  0,  0,  0,  0,  0, -1,  1] # [7->8]\n              [ 0,  0,  0,  0,  0,  0,  0,  0, -1] # [] NOTE - no pixel neighbor.\n\n        We now make another B matrix for the regularization downwards:\n\n        B_y = [-1,  0,  0,  1,  0,  0,  0,  0,  0] # [0->3]\n              [ 0, -1,  0,  0,  1,  0,  0,  0,  0] # [1->4]\n              [ 0,  0, -1,  0,  0,  1,  0,  0,  0] # [2->5]\n              [ 0,  0,  0, -1,  0,  0,  1,  0,  0] # [3->6]\n              [ 0,  0,  0,  0, -1,  0,  0,  1,  0] # [4->7]\n              [ 0,  0,  0,  0,  0, -1,  0,  0,  1] # [5->8]\n              [ 0,  0,  0,  0,  0,  0, -1,  0,  0] # [] NOTE - no pixel neighbor.\n              [ 0,  0,  0,  0,  0,  0,  0, -1,  0] # [] NOTE - no pixel neighbor.\n              [ 0,  0,  0,  0,  0,  0,  0,  0, -1] # [] NOTE - no pixel neighbor.\n\n        After making the B matrices that represent our pixel neighbors, we can compute the regularization matrix, H, \\\n        of each direction as H = B * B.T (matrix multiplication).\n\n        E.g.\n\n        H_x = B_x.T, * B_x\n        H_y = B_y.T * B_y\n        H = H_x + H_y\n\n        Whilst the example above used a square-grid with regularization to the right and downwards, this matrix \\\n        formalism can be extended to describe regularization in more directions (e.g. upwards, to the left).\n\n        It can also describe irregular pixelizations, e.g. an irregular Voronoi pixelization, where a B matrix is \\\n        computed for every shared Voronoi vertex of each Voronoi pixel. The number of B matrices is now equal to the \\\n        number of Voronoi vertices in the pixel with the most Voronoi vertices. However, we describe below a scheme to \\\n        compute this solution more efficiently.\n\n        ### COMBINING B MATRICES ###\n\n        The B matrices above each had the -1's going down the diagonal. This is not necessary, and it is valid to put \\\n        each pixel pairing anywhere. So, if we had a 4x4 B matrix, where:\n\n        - pixel 0 regularizes with pixel 1\n        - pixel 2 regularizes with pixel 3\n        - pixel 3 regularizes with pixel 0\n\n        We can still set this up as one matrix (even though the pixel 0 comes up twice):\n\n        B = [-1, 1,  0 , 0] # [0->1]\n            [ 0, 0,  0 , 0] # We can skip rows by making them all zeros.\n            [ 0, 0, -1 , 1] # [2->3]\n            [ 1, 0,  0 ,-1] # [3->0] This is valid!\n\n        So, for a Voronoi pixelzation, we don't have to make the same number of B matrices as Voronoi vertices,  \\\n        we can combine them into fewer B matrices as above.\n\n        # SKIPPING THE B MATRIX CALCULATION #\n\n        Infact, going through the rigmarole of computing and multiplying B matrices like this is uncessary. It is \\\n        more computationally efficiently to directly compute H. This is possible, provided you know know all of the \\\n        neighboring pixel pairs (which, by definition, you need to know to set up the B matrices anyway). Thus, the \\\n       'regularization_matrix_from_pixel_neighbors' functions in this module directly compute H from the pixel \\\n        neighbors.\n\n        # POSITIVE DEFINITE MATRIX #\n\n        The regularization matrix must be positive-definite, as the Bayesian framework of Suyu 2006 requires that we \\\n        use its determinant in the calculation.\n\n        Parameters\n        -----------\n        shape : (int, int)\n            The dimensions of the rectangular grid of pixels (x_pixels, y_pixel)\n        coefficients : (float,)\n            The regularization_matrix coefficients used to smooth the pix reconstructed_inversion_image.", "id": "f5946:c0:m0"}
{"signature": "def __init__(self, coefficients=(<NUM_LIT:1.0>, <NUM_LIT:1.0>), signal_scale=<NUM_LIT:1.0>):", "body": "super(Weighted, self).__init__(coefficients)<EOL>self.signal_scale = signal_scale<EOL>", "docstring": "A constant-regularization scheme (regularization is described in the *Regularization* class above).\n\n        For the weighted regularization scheme, each pixel is given an 'effective regularization weight', which is \\\n        applied when each set of pixel neighbors are regularized with one another. The motivation of this is that \\\n        different regions of a pixelization require different levels of regularization (e.g., high smoothing where the \\\n        no signal is present and less smoothing where it is, see (Nightingale, Dye and Massey 2018)).\n\n        Unlike the constant regularization_matrix scheme, neighboring pixels must now be regularized with one another \\\n        in both directions (e.g. if pixel 0 regularizes pixel 1, pixel 1 must also regularize pixel 0). For example:\n\n        B = [-1, 1]  [0->1]\n            [-1, -1]  1 now also regularizes 0\n\n        For a constant regularization coefficient this would NOT produce a positive-definite matrix. However, for\n        the weighted scheme, it does!\n\n        The regularize weights change the B matrix as shown below - we simply multiply each pixel's effective \\\n        regularization weight by each row of B it has a -1 in, so:\n\n        regularization_weights = [1, 2, 3, 4]\n\n        B = [-1, 1, 0 ,0] # [0->1]\n            [0, -2, 2 ,0] # [1->2]\n            [0, 0, -3 ,3] # [2->3]\n            [4, 0, 0 ,-4] # [3->0]\n\n        If our -1's werent down the diagonal this would look like:\n\n        B = [4, 0, 0 ,-4] # [3->0]\n            [0, -2, 2 ,0] # [1->2]\n            [-1, 1, 0 ,0] # [0->1]\n            [0, 0, -3 ,3] # [2->3] This is valid!\n\n        Parameters\n        -----------\n        coefficients : (float, float)\n            The regularization coefficients which controls the degree of smoothing of the inversion reconstruction in \\\n            high and low signal regions of the reconstruction.\n        signal_scale : float\n            A factor which controls how rapidly the smoothness of regularization varies from high signal regions to \\\n            low signal regions.", "id": "f5946:c2:m0"}
{"signature": "def voronoi_finite_polygons_2d(vor, radius=None):", "body": "if vor.points.shape[<NUM_LIT:1>] != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>new_regions = []<EOL>new_vertices = vor.vertices.tolist()<EOL>center = vor.points.mean(axis=<NUM_LIT:0>)<EOL>if radius is None:<EOL><INDENT>radius = vor.points.ptp().max()*<NUM_LIT:2><EOL><DEDENT>all_ridges = {}<EOL>for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):<EOL><INDENT>all_ridges.setdefault(p1, []).append((p2, v1, v2))<EOL>all_ridges.setdefault(p2, []).append((p1, v1, v2))<EOL><DEDENT>for p1, region in enumerate(vor.point_region):<EOL><INDENT>vertices = vor.regions[region]<EOL>if all(v >= <NUM_LIT:0> for v in vertices):<EOL><INDENT>new_regions.append(vertices)<EOL>continue<EOL><DEDENT>ridges = all_ridges[p1]<EOL>new_region = [v for v in vertices if v >= <NUM_LIT:0>]<EOL>for p2, v1, v2 in ridges:<EOL><INDENT>if v2 < <NUM_LIT:0>:<EOL><INDENT>v1, v2 = v2, v1<EOL><DEDENT>if v1 >= <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>t = vor.points[p2] - vor.points[p1] <EOL>t /= np.linalg.norm(t)<EOL>n = np.array([-t[<NUM_LIT:1>], t[<NUM_LIT:0>]])  <EOL>midpoint = vor.points[[p1, p2]].mean(axis=<NUM_LIT:0>)<EOL>direction = np.sign(np.dot(midpoint - center, n)) * n<EOL>far_point = vor.vertices[v2] + direction * radius<EOL>new_region.append(len(new_vertices))<EOL>new_vertices.append(far_point.tolist())<EOL><DEDENT>vs = np.asarray([new_vertices[v] for v in new_region])<EOL>c = vs.mean(axis=<NUM_LIT:0>)<EOL>angles = np.arctan2(vs[:,<NUM_LIT:1>] - c[<NUM_LIT:1>], vs[:,<NUM_LIT:0>] - c[<NUM_LIT:0>])<EOL>new_region = np.array(new_region)[np.argsort(angles)]<EOL>new_regions.append(new_region.tolist())<EOL><DEDENT>return new_regions, np.asarray(new_vertices)<EOL>", "docstring": "Reconstruct infinite voronoi regions in a 2D diagram to finite\nregions.\nParameters\n----------\nvor : Voronoi\n    Input diagram\nradius : float, optional\n    Distance to 'points at infinity'.\nReturns\n-------\nregions : list of tuples\n    Indices of vertices in each revised Voronoi regions.\nvertices : list of tuples\n    Coordinates for revised Voronoi vertices. Same as coordinates\n    of input vertices, with 'points at infinity' appended to the\n    end.", "id": "f5949:m4"}
{"signature": "def reconstructed_pixelization_from_solution_vector(self, solution_vector):", "body": "recon = mapping_util.map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape(array_1d=solution_vector,<EOL>shape=self.shape)<EOL>return scaled_array.ScaledRectangularPixelArray(array=recon, pixel_scales=self.geometry.pixel_scales,<EOL>origin=self.geometry.origin)<EOL>", "docstring": "Given the solution vector of an inversion (see *inversions.Inversion*), determine the reconstructed \\\n        pixelization of the rectangular pixelization by using the mapper.", "id": "f5950:c1:m4"}
{"signature": "@property<EOL><INDENT>def regular_to_pix(self):<DEDENT>", "body": "return mapper_util.voronoi_regular_to_pix_from_grids_and_geometry(regular_grid=self.grid_stack.regular,<EOL>regular_to_nearest_pix=self.grid_stack.pix.regular_to_nearest_pix,<EOL>pixel_centres=self.geometry.pixel_centres, pixel_neighbors=self.geometry.pixel_neighbors,<EOL>pixel_neighbors_size=self.geometry.pixel_neighbors_size).astype('<STR_LIT:int>')<EOL>", "docstring": "The 1D index mappings between the regular pixels and Voronoi pixelization pixels.", "id": "f5950:c2:m2"}
{"signature": "@property<EOL><INDENT>def mapping_matrix(self):<DEDENT>", "body": "return mapper_util.mapping_matrix_from_sub_to_pix(sub_to_pix=self.sub_to_pix, pixels=self.pixels,<EOL>regular_pixels=self.grid_stack.regular.shape[<NUM_LIT:0>],<EOL>sub_to_regular=self.grid_stack.sub.sub_to_regular,<EOL>sub_grid_fraction=self.grid_stack.sub.sub_grid_fraction)<EOL>", "docstring": "The mapping matrix is a matrix representing the mapping between every unmasked pixel of a grid and \\\n        the pixels of a pixelization. Non-zero entries signify a mapping, whereas zeros signify no mapping.\n\n        For example, if the regular grid has 5 pixels and the pixelization 3 pixels, with the following mappings:\n\n        regular pixel 0 -> pixelization pixel 0\n        regular pixel 1 -> pixelization pixel 0\n        regular pixel 2 -> pixelization pixel 1\n        regular pixel 3 -> pixelization pixel 1\n        regular pixel 4 -> pixelization pixel 2\n\n        The mapping matrix (which is of dimensions regular_pixels x pixelization_pixels) would appear as follows:\n\n        [1, 0, 0] [0->0]\n        [1, 0, 0] [1->0]\n        [0, 1, 0] [2->1]\n        [0, 1, 0] [3->1]\n        [0, 0, 1] [4->2]\n\n        The mapping matrix is in fact built using the sub-grid of the grid-stack, whereby each regular-pixel is \\\n        divided into a regular grid of sub-pixels which are all paired to pixels in the pixelization. The entires \\\n        in the mapping matrix now become fractional values dependent on the sub-grid size. For example, for a 2x2 \\\n        sub-grid in each pixel (which means the fraction value is 1.0/(2.0^2) = 0.25, if we have the following mappings:\n\n        regular pixel 0 -> sub pixel 0 -> pixelization pixel 0\n        regular pixel 0 -> sub pixel 1 -> pixelization pixel 1\n        regular pixel 0 -> sub pixel 2 -> pixelization pixel 1\n        regular pixel 0 -> sub pixel 3 -> pixelization pixel 1\n        regular pixel 1 -> sub pixel 0 -> pixelization pixel 1\n        regular pixel 1 -> sub pixel 1 -> pixelization pixel 1\n        regular pixel 1 -> sub pixel 2 -> pixelization pixel 1\n        regular pixel 1 -> sub pixel 3 -> pixelization pixel 1\n        regular pixel 2 -> sub pixel 0 -> pixelization pixel 2\n        regular pixel 2 -> sub pixel 1 -> pixelization pixel 2\n        regular pixel 2 -> sub pixel 2 -> pixelization pixel 3\n        regular pixel 2 -> sub pixel 3 -> pixelization pixel 3\n\n        The mapping matrix (which is still of dimensions regular_pixels x source_pixels) would appear as follows:\n\n        [0.25, 0.75, 0.0, 0.0] [1 sub-pixel maps to pixel 0, 3 map to pixel 1]\n        [ 0.0,  1.0, 0.0, 0.0] [All sub-pixels map to pixel 1]\n        [ 0.0,  0.0, 0.5, 0.5] [2 sub-pixels map to pixel 2, 2 map to pixel 3]", "id": "f5950:c0:m1"}
{"signature": "def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>,<EOL>sersic_index: float = <NUM_LIT>,<EOL>mass_to_light_ratio: dim.MassOverLuminosity = <NUM_LIT:1.0>,<EOL>mass_to_light_gradient: float = <NUM_LIT:0.0>):", "body": "EllipticalSersicRadialGradient.__init__(self, centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>, intensity=intensity,<EOL>effective_radius=effective_radius, sersic_index=sersic_index,<EOL>mass_to_light_ratio=mass_to_light_ratio,<EOL>mass_to_light_gradient=mass_to_light_gradient)<EOL>", "docstring": "Setup a Sersic mass and light profiles.\n\nParameters\n----------\ncentre: (float, float)\n    The origin of the profiles\nintensity : float\n    Overall flux intensity normalisation in the light profiles (electrons per second)\neffective_radius : float\n    The radius containing half the light of this model_mapper\nsersic_index : Int\n    The concentration of the light profiles\nmass_to_light_ratio : float\n    The mass-to-light ratio of the light profiles\nmass_to_light_gradient : float\n    The mass-to-light radial gradient.", "id": "f5951:c7:m0"}
{"signature": "@property<EOL><INDENT>def elliptical_effective_radius(self):<DEDENT>", "body": "return self.effective_radius / np.sqrt(self.axis_ratio)<EOL>", "docstring": "The effective_radius of a Sersic light profile is defined as the circular effective radius. This is the \\\n        radius within which a circular aperture contains half the profiles's total integrated light. For elliptical \\\n        systems, this won't robustly capture the light profile's elliptical shape.\n\n        The elliptical effective radius instead describes the major-axis radius of the ellipse containing \\\n        half the light, and may be more appropriate for highly flattened systems like disk galaxies.", "id": "f5952:c18:m6"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>einstein_radius: dim.Length = <NUM_LIT:1.0>,<EOL>core_radius: dim.Length = <NUM_LIT>):<DEDENT>", "body": "super(SphericalCoredIsothermal, self).__init__(centre=centre, einstein_radius=einstein_radius, slope=<NUM_LIT>,<EOL>core_radius=core_radius)<EOL>", "docstring": "Represents a cored spherical isothermal density distribution, which is equivalent to the elliptical power-law\ndensity distribution for the value slope: float = 2.0\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\neinstein_radius : float\n    The arc-second Einstein radius.\ncore_radius : float\n    The arc-second radius of the inner core.", "id": "f5952:c8:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>einstein_radius: dim.Length = <NUM_LIT:1.0>,<EOL>core_radius: dim.Length = <NUM_LIT>):<DEDENT>", "body": "super(EllipticalCoredIsothermal, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi,<EOL>einstein_radius=einstein_radius, slope=<NUM_LIT>,<EOL>core_radius=core_radius)<EOL>", "docstring": "Represents a cored elliptical isothermal density distribution, which is equivalent to the elliptical power-law\ndensity distribution for the value slope: float = 2.0\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\naxis_ratio : float\n    The elliptical mass profile's minor-to-major axis ratio (b/a).\nphi : float\n    Rotation angle of mass profile's ellipse counter-clockwise from positive x-axis.\neinstein_radius : float\n    The arc-second Einstein radius.\ncore_radius : float\n    The arc-second radius of the inner core.", "id": "f5952:c7:m0"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def deflections_from_grid(self, grid):<DEDENT>", "body": "eta = np.multiply(<NUM_LIT:1.> / self.scale_radius, self.grid_to_grid_radii(grid=grid))<EOL>deflection_r = np.multiply(<NUM_LIT> * self.kappa_s * self.scale_radius, self.deflection_func_sph(eta))<EOL>return self.grid_to_grid_cartesian(grid, deflection_r)<EOL>", "docstring": "Calculate the deflection angles at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c17:m2"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>einstein_radius: dim.Length = <NUM_LIT:1.0>,<EOL>slope: float = <NUM_LIT>,<EOL>core_radius: dim.Length = <NUM_LIT>):<DEDENT>", "body": "super(SphericalCoredPowerLaw, self).__init__(centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>,<EOL>einstein_radius=einstein_radius, slope=slope,<EOL>core_radius=core_radius)<EOL>", "docstring": "Represents a cored spherical power-law density distribution\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\neinstein_radius : float\n    The arc-second Einstein radius.\nslope : float\n    The density slope of the power-law (lower value -> shallower profile, higher value -> steeper profile).\ncore_radius : float\n    The arc-second radius of the inner core.", "id": "f5952:c4:m0"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def convergence_from_grid(self, grid):<DEDENT>", "body": "return self.convergence_func(self.grid_to_eccentric_radii(grid))<EOL>", "docstring": "Calculate the projected convergence at a given set of arc-second gridded coordinates.\n\n        Parameters\n        ----------\n        grid : grids.RegularGrid\n            The grid of (y,x) arc-second coordinates the surface density is computed on.", "id": "f5952:c25:m1"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def deflections_from_grid(self, grid):<DEDENT>", "body": "factor = <NUM_LIT> * self.einstein_radius_rescaled * self.axis_ratio / np.sqrt(<NUM_LIT:1> - self.axis_ratio ** <NUM_LIT:2>)<EOL>psi = np.sqrt(np.add(np.multiply(self.axis_ratio ** <NUM_LIT:2>, np.square(grid[:, <NUM_LIT:1>])), np.square(grid[:, <NUM_LIT:0>])))<EOL>deflection_y = np.arctanh(np.divide(np.multiply(np.sqrt(<NUM_LIT:1> - self.axis_ratio ** <NUM_LIT:2>), grid[:, <NUM_LIT:0>]), psi))<EOL>deflection_x = np.arctan(np.divide(np.multiply(np.sqrt(<NUM_LIT:1> - self.axis_ratio ** <NUM_LIT:2>), grid[:, <NUM_LIT:1>]), psi))<EOL>return self.rotate_grid_from_profile(np.multiply(factor, np.vstack((deflection_y, deflection_x)).T))<EOL>", "docstring": "Calculate the deflection angles at a given set of arc-second gridded coordinates.\n\nFor coordinates (0.0, 0.0) the analytic calculation of the deflection angle gives a NaN. Therefore, \\\ncoordinates at (0.0, 0.0) are shifted slightly to (1.0e-8, 1.0e-8).\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c9:m1"}
{"signature": "def intensity_at_radius(self, radius):", "body": "return self.intensity * np.exp(<EOL>-self.sersic_constant * (((radius / self.effective_radius) ** (<NUM_LIT:1.> / self.sersic_index)) - <NUM_LIT:1>))<EOL>", "docstring": "Compute the intensity of the profile at a given radius.\n\n        Parameters\n        ----------\n        radius : float\n            The distance from the centre of the profile.", "id": "f5952:c18:m4"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def potential_from_grid(self, grid, tabulate_bins=<NUM_LIT:1000>):<DEDENT>", "body": "@jit_integrand<EOL>def deflection_integrand(x, kappa_radius, scale_radius, inner_slope):<EOL><INDENT>return (x + kappa_radius / scale_radius) ** (inner_slope - <NUM_LIT:3>) * ((<NUM_LIT:1> - np.sqrt(<NUM_LIT:1> - x ** <NUM_LIT:2>)) / x)<EOL><DEDENT>eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size = self.tabulate_integral(grid, tabulate_bins)<EOL>potential_grid = np.zeros(grid.shape[<NUM_LIT:0>])<EOL>deflection_integral = np.zeros((tabulate_bins,))<EOL>for i in range(tabulate_bins):<EOL><INDENT>eta = <NUM_LIT> ** (minimum_log_eta + (i - <NUM_LIT:1>) * bin_size)<EOL>integral =quad(deflection_integrand, a=<NUM_LIT:0.0>, b=<NUM_LIT:1.0>, args=(eta, self.scale_radius, self.inner_slope),<EOL>epsrel=EllipticalGeneralizedNFW.epsrel)[<NUM_LIT:0>]<EOL>deflection_integral[i] = ((eta / self.scale_radius) ** (<NUM_LIT:2> - self.inner_slope)) * (<EOL>(<NUM_LIT:1.0> / (<NUM_LIT:3> - self.inner_slope)) *<EOL>special.hyp2f1(<NUM_LIT:3> - self.inner_slope, <NUM_LIT:3> - self.inner_slope, <NUM_LIT:4> - self.inner_slope,<EOL>- (eta / self.scale_radius)) + integral)<EOL><DEDENT>for i in range(grid.shape[<NUM_LIT:0>]):<EOL><INDENT>potential_grid[i] = (<NUM_LIT> * self.kappa_s * self.axis_ratio) *quad(self.potential_func, a=<NUM_LIT:0.0>, b=<NUM_LIT:1.0>, args=(grid[i, <NUM_LIT:0>], grid[i, <NUM_LIT:1>],<EOL>self.axis_ratio, minimum_log_eta,<EOL>maximum_log_eta, tabulate_bins,<EOL>deflection_integral),<EOL>epsrel=EllipticalGeneralizedNFW.epsrel)[<NUM_LIT:0>]<EOL><DEDENT>return potential_grid<EOL>", "docstring": "Calculate the potential at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.\ntabulate_bins : int\n    The number of bins to tabulate the inner integral of this profile.", "id": "f5952:c12:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>magnitude: float = <NUM_LIT>,<EOL>phi: float = <NUM_LIT:0.0>):<DEDENT>", "body": "super(ExternalShear, self).__init__(centre=(<NUM_LIT:0.0>, <NUM_LIT:0.0>), phi=phi, axis_ratio=<NUM_LIT:1.0>)<EOL>self.magnitude = magnitude<EOL>", "docstring": "An external shear term, to model the line-of-sight contribution of other galaxies / satellites.\n\nParameters\n----------\nmagnitude : float\n    The overall magnitude of the shear (gamma).\nphi : float\n    The rotation axis of the shear.", "id": "f5952:c28:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>einstein_radius: dim.Length = <NUM_LIT:1.0>):<DEDENT>", "body": "super(EllipticalIsothermal, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi,<EOL>einstein_radius=einstein_radius, slope=<NUM_LIT>)<EOL>", "docstring": "Represents an elliptical isothermal density distribution, which is equivalent to the elliptical power-law\ndensity distribution for the value slope: float = 2.0\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\naxis_ratio : float\n    The elliptical mass profile's minor-to-major axis ratio (b/a).\nphi : float\n    Rotation angle of mass profile's ellipse counter-clockwise from positive x-axis.\neinstein_radius : float\n    The arc-second Einstein radius.", "id": "f5952:c9:m0"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def potential_from_grid(self, grid):<DEDENT>", "body": "eta = (<NUM_LIT:1.0> / self.scale_radius) * self.grid_to_grid_radii(grid) + <NUM_LIT><EOL>return np.real(<NUM_LIT> * self.scale_radius * self.kappa_s * self.potential_func_sph(eta))<EOL>", "docstring": "Calculate the potential at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c17:m1"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def convergence_from_grid(self, grid):<DEDENT>", "body": "return self.convergence_func(self.grid_to_eccentric_radii(grid))<EOL>", "docstring": "Calculate the projected convergence at a given set of arc-second gridded coordinates.\n\n        Parameters\n        ----------\n        grid : grids.RegularGrid\n            The grid of (y,x) arc-second coordinates the surface density is computed on.", "id": "f5952:c18:m1"}
{"signature": "@grids.grid_interpolate<EOL><INDENT>@geometry_profiles.cache<EOL>@geometry_profiles.transform_grid<EOL>@geometry_profiles.move_grid_to_radial_minimum<EOL>def deflections_from_grid(self, grid):<DEDENT>", "body": "def calculate_deflection_component(npow, index):<EOL><INDENT>einstein_radius_rescaled = self.einstein_radius_rescaled<EOL>deflection_grid = self.axis_ratio * grid[:, index]<EOL>deflection_grid *= quad_grid(self.deflection_func, <NUM_LIT:0.0>, <NUM_LIT:1.0>,<EOL>grid, args=(npow, self.axis_ratio,<EOL>einstein_radius_rescaled, self.slope,<EOL>self.core_radius))[<NUM_LIT:0>]<EOL>return deflection_grid<EOL><DEDENT>deflection_y = calculate_deflection_component(<NUM_LIT:1.0>, <NUM_LIT:0>)<EOL>deflection_x = calculate_deflection_component(<NUM_LIT:0.0>, <NUM_LIT:1>)<EOL>return self.rotate_grid_from_profile(np.multiply(<NUM_LIT:1.0>, np.vstack((deflection_y, deflection_x)).T))<EOL>", "docstring": "Calculate the deflection angles at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c3:m4"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def potential_from_grid(self, grid):<DEDENT>", "body": "potential_grid = quad_grid(self.potential_func, <NUM_LIT:0.0>, <NUM_LIT:1.0>, grid,<EOL>args=(self.axis_ratio, self.kappa_s, self.scale_radius),<EOL>epsrel=<NUM_LIT>)[<NUM_LIT:0>]<EOL>return potential_grid<EOL>", "docstring": "Calculate the potential at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c16:m2"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def deflections_from_grid(self, grid):<DEDENT>", "body": "deflection_y = -np.multiply(self.magnitude, grid[:, <NUM_LIT:0>])<EOL>deflection_x = np.multiply(self.magnitude, grid[:, <NUM_LIT:1>])<EOL>return self.rotate_grid_from_profile(np.vstack((deflection_y, deflection_x)).T)<EOL>", "docstring": "Calculate the deflection angles at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c28:m7"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def deflections_from_grid(self, grid, **kwargs):<DEDENT>", "body": "eta = np.multiply(<NUM_LIT:1.> / self.scale_radius, self.grid_to_grid_radii(grid))<EOL>deflection_grid = np.multiply((<NUM_LIT> * self.kappa_s * self.scale_radius / eta), self.deflection_func_sph(eta))<EOL>return self.grid_to_grid_cartesian(grid, deflection_grid)<EOL>", "docstring": "Calculate the deflection angles at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c14:m7"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>,<EOL>sersic_index: float = <NUM_LIT>,<EOL>mass_to_light_ratio: dim.MassOverLuminosity = <NUM_LIT:1.0>):<DEDENT>", "body": "super(AbstractEllipticalSersic, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi)<EOL>super(EllipticalMassProfile, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi)<EOL>self.mass_to_light_ratio = mass_to_light_ratio<EOL>self.intensity = intensity<EOL>self.effective_radius = effective_radius<EOL>self.sersic_index = sersic_index<EOL>", "docstring": "The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens \\\nmodel_galaxy's light.\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\naxis_ratio : float\n    Ratio of profiles ellipse's minor and major axes (b/a).\nphi : float\n    Rotational angle of profiles ellipse counter-clockwise from positive x-axis.\nintensity : float\n    Overall flux intensity normalisation in the light profiles (electrons per second).\neffective_radius : float\n    The radius containing half the light of this profile.\nsersic_index : float\n    Controls the concentration of the of the profile (lower value -> less concentrated, \\\n    higher value -> more concentrated).\nmass_to_light_ratio : float\n    The mass-to-light ratio of the light profiles", "id": "f5952:c18:m0"}
{"signature": "def average_convergence_of_1_radius_in_units(self, unit_length='<STR_LIT>', kpc_per_arcsec=None):", "body": "def func(radius):<EOL><INDENT>radius = dim.Length(radius, unit_length=unit_length)<EOL>return self.mass_within_circle_in_units(radius=radius) - np.pi * radius ** <NUM_LIT><EOL><DEDENT>radius = self.ellipticity_rescale * root_scalar(func, bracket=[<NUM_LIT>, <NUM_LIT>]).root<EOL>radius = dim.Length(radius, unit_length)<EOL>return radius.convert(unit_length=unit_length, kpc_per_arcsec=kpc_per_arcsec)<EOL>", "docstring": "The radius a critical curve forms for this mass profile, e.g. where the mean convergence is equal to 1.0.\n\n         In case of ellipitical mass profiles, the 'average' critical curve is used, whereby the convergence is \\\n         rescaled into a circle using the axis ratio.\n\n         This radius corresponds to the Einstein radius of the mass profile, and is a property of a number of \\\n         mass profiles below.", "id": "f5952:c2:m6"}
{"signature": "@property<EOL><INDENT>def einstein_radius_rescaled(self):<DEDENT>", "body": "return ((<NUM_LIT:3> - self.slope) / (<NUM_LIT:1> + self.axis_ratio)) * self.einstein_radius ** (self.slope - <NUM_LIT:1>)<EOL>", "docstring": "Rescale the einstein radius by slope and axis_ratio, to reduce its degeneracy with other mass-profiles\n        parameters", "id": "f5952:c3:m1"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def deflections_from_grid(self, grid):<DEDENT>", "body": "eta = self.grid_to_grid_radii(grid=grid)<EOL>deflection = np.multiply(<NUM_LIT> * self.einstein_radius_rescaled, np.divide(<EOL>np.add(np.power(np.add(self.core_radius ** <NUM_LIT:2>, np.square(eta)), (<NUM_LIT> - self.slope) / <NUM_LIT>),<EOL>-self.core_radius ** (<NUM_LIT:3> - self.slope)), np.multiply((<NUM_LIT> - self.slope), eta)))<EOL>return self.grid_to_grid_cartesian(grid=grid, radius=deflection)<EOL>", "docstring": "Calculate the deflection angles at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c4:m1"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>einstein_radius: dim.Length = <NUM_LIT:1.0>,<EOL>slope: float = <NUM_LIT>,<EOL>core_radius: dim.Length = <NUM_LIT>):<DEDENT>", "body": "super(EllipticalCoredPowerLaw, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi)<EOL>self.einstein_radius = einstein_radius<EOL>self.slope = slope<EOL>self.core_radius = core_radius<EOL>", "docstring": "Represents a cored elliptical power-law density distribution\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\naxis_ratio : float\n    The elliptical mass profile's minor-to-major axis ratio (b/a).\nphi : float\n    Rotation angle of mass profile's ellipse counter-clockwise from positive x-axis.\neinstein_radius : float\n    The arc-second Einstein radius.\nslope : float\n    The density slope of the power-law (lower value -> shallower profile, higher value -> steeper profile).\ncore_radius : float\n    The arc-second radius of the inner core.", "id": "f5952:c3:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>,<EOL>sersic_index: float = <NUM_LIT>,<EOL>mass_to_light_ratio: dim.MassOverLuminosity = <NUM_LIT:1.0>,<EOL>mass_to_light_gradient: float = <NUM_LIT:0.0>):<DEDENT>", "body": "super(EllipticalSersicRadialGradient, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi,<EOL>intensity=intensity, effective_radius=effective_radius,<EOL>sersic_index=sersic_index,<EOL>mass_to_light_ratio=mass_to_light_ratio)<EOL>self.mass_to_light_gradient = mass_to_light_gradient<EOL>", "docstring": "Setup a Sersic mass and light profiles.\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\naxis_ratio : float\n    Ratio of profiles ellipse's minor and major axes (b/a).\nphi : float\n    Rotational angle of profiles ellipse counter-clockwise from positive x-axis.\nintensity : float\n    Overall flux intensity normalisation in the light profiles (electrons per second).\neffective_radius : float\n    The circular radius containing half the light of this profile.\nsersic_index : float\n    Controls the concentration of the of the profile (lower value -> less concentrated, \\\n    higher value -> more concentrated).\nmass_to_light_ratio : float\n    The mass-to-light ratio of the light profile.\nmass_to_light_gradient : float\n    The mass-to-light radial gradient.", "id": "f5952:c25:m0"}
{"signature": "@grids.grid_interpolate<EOL><INDENT>@geometry_profiles.cache<EOL>@geometry_profiles.transform_grid<EOL>@geometry_profiles.move_grid_to_radial_minimum<EOL>def deflections_from_grid(self, grid):<DEDENT>", "body": "def calculate_deflection_component(npow, index):<EOL><INDENT>sersic_constant = self.sersic_constant<EOL>deflection_grid = self.axis_ratio * grid[:, index]<EOL>deflection_grid *= quad_grid(self.deflection_func, <NUM_LIT:0.0>, <NUM_LIT:1.0>, grid,<EOL>args=(npow, self.axis_ratio, self.intensity,<EOL>self.sersic_index, self.effective_radius,<EOL>self.mass_to_light_ratio, sersic_constant))[<NUM_LIT:0>]<EOL>return deflection_grid<EOL><DEDENT>deflection_y = calculate_deflection_component(<NUM_LIT:1.0>, <NUM_LIT:0>)<EOL>deflection_x = calculate_deflection_component(<NUM_LIT:0.0>, <NUM_LIT:1>)<EOL>return self.rotate_grid_from_profile(np.multiply(<NUM_LIT:1.0>, np.vstack((deflection_y, deflection_x)).T))<EOL>", "docstring": "Calculate the deflection angles at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c19:m1"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>,<EOL>mass_to_light_ratio: dim.MassOverLuminosity = <NUM_LIT:1.0>):<DEDENT>", "body": "super(SphericalExponential, self).__init__(centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>, intensity=intensity,<EOL>effective_radius=effective_radius,<EOL>mass_to_light_ratio=mass_to_light_ratio)<EOL>", "docstring": "The Exponential mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens\nmodel_galaxy's light.\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\nintensity : float\n    Overall flux intensity normalisation in the light profiles (electrons per second).\neffective_radius : float\n    The circular radius containing half the light of this profile.\nmass_to_light_ratio : float\n    The mass-to-light ratio of the light profiles.", "id": "f5952:c22:m0"}
{"signature": "def mass_within_ellipse_in_units(self, major_axis, unit_mass='<STR_LIT>', kpc_per_arcsec=None,<EOL>critical_surface_density=None):", "body": "self.check_units_of_radius_and_critical_surface_density(<EOL>radius=major_axis, critical_surface_density=critical_surface_density)<EOL>profile = self.new_profile_with_units_converted(<EOL>unit_length=major_axis.unit_length, unit_mass='<STR_LIT>',<EOL>kpc_per_arcsec=kpc_per_arcsec, critical_surface_density=critical_surface_density)<EOL>mass_angular = dim.Mass(value=quad(profile.mass_integral, a=<NUM_LIT:0.0>, b=major_axis, args=(self.axis_ratio,))[<NUM_LIT:0>],<EOL>unit_mass='<STR_LIT>')<EOL>return mass_angular.convert(unit_mass=unit_mass, critical_surface_density=critical_surface_density)<EOL>", "docstring": "Integrate the mass profiles's convergence profile to compute the total angular mass within an ellipse of \\\n        specified major axis. This is centred on the mass profile.\n\n        The following units for mass can be specified and output:\n\n        - Dimensionless angular units (default) - 'angular'.\n        - Solar masses - 'angular' (multiplies the angular mass by the critical surface mass density)\n\n        Parameters\n        ----------\n        major_axis : float\n            The major-axis radius of the ellipse.\n        unit_mass : str\n            The units the mass is returned in (angular | angular).\n        critical_surface_density : float or None\n            The critical surface mass density of the strong lens configuration, which converts mass from angular \\\n            units to phsical units (e.g. solar masses).", "id": "f5952:c2:m3"}
{"signature": "@grids.grid_interpolate<EOL><INDENT>@geometry_profiles.cache<EOL>@geometry_profiles.transform_grid<EOL>@geometry_profiles.move_grid_to_radial_minimum<EOL>def deflections_from_grid(self, grid):<DEDENT>", "body": "def calculate_deflection_component(npow, index):<EOL><INDENT>sersic_constant = self.sersic_constant<EOL>deflection_grid = self.axis_ratio * grid[:, index]<EOL>deflection_grid *= quad_grid(self.deflection_func, <NUM_LIT:0.0>, <NUM_LIT:1.0>, grid,<EOL>args=(npow, self.axis_ratio, self.intensity,<EOL>self.sersic_index, self.effective_radius,<EOL>self.mass_to_light_ratio, self.mass_to_light_gradient,<EOL>sersic_constant))[<NUM_LIT:0>]<EOL>return deflection_grid<EOL><DEDENT>deflection_y = calculate_deflection_component(<NUM_LIT:1.0>, <NUM_LIT:0>)<EOL>deflection_x = calculate_deflection_component(<NUM_LIT:0.0>, <NUM_LIT:1>)<EOL>return self.rotate_grid_from_profile(np.multiply(<NUM_LIT:1.0>, np.vstack((deflection_y, deflection_x)).T))<EOL>", "docstring": "Calculate the deflection angles at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c25:m2"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>kappa_s: float = <NUM_LIT>,<EOL>inner_slope: float = <NUM_LIT:1.0>,<EOL>scale_radius: dim.Length = <NUM_LIT:1.0>):<DEDENT>", "body": "super(AbstractEllipticalGeneralizedNFW, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi)<EOL>super(MassProfile, self).__init__()<EOL>self.kappa_s = kappa_s<EOL>self.scale_radius = scale_radius<EOL>self.inner_slope = inner_slope<EOL>", "docstring": "The elliptical NFW profiles, used to fit the dark matter halo of the lens.\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\naxis_ratio : float\n    Ratio of profiles ellipse's minor and major axes (b/a).\nphi : float\n    Rotational angle of profiles ellipse counter-clockwise from positive x-axis.\nkappa_s : float\n    The overall normalization of the dark matter halo \\\n    (kappa_s = (rho_s * scale_radius)/lensing_critical_density)\ninner_slope : float\n    The inner slope of the dark matter halo\nscale_radius : float\n    The arc-second radius where the average density within this radius is 200 times the critical density of \\\n    the Universe..", "id": "f5952:c11:m0"}
{"signature": "def density_between_circular_annuli_in_angular_units(self, inner_annuli_radius, outer_annuli_radius):", "body": "annuli_area = (np.pi * outer_annuli_radius ** <NUM_LIT>) - (np.pi * inner_annuli_radius ** <NUM_LIT>)<EOL>return (self.mass_within_circle_in_units(radius=outer_annuli_radius) -<EOL>self.mass_within_circle_in_units(radius=inner_annuli_radius))/ annuli_area<EOL>", "docstring": "Calculate the mass between two circular annuli and compute the density by dividing by the annuli surface\n        area.\n\n        The value returned by the mass integral is dimensionless, therefore the density between annuli is returned in \\\n        units of inverse radius squared. A conversion factor can be specified to convert this to a physical value \\\n        (e.g. the critical surface mass density).\n\n        Parameters\n        -----------\n        inner_annuli_radius : float\n            The radius of the inner annulus outside of which the density are estimated.\n        outer_annuli_radius : float\n            The radius of the outer annulus inside of which the density is estimated.", "id": "f5952:c2:m5"}
{"signature": "def mass_integral(self, x, axis_ratio):", "body": "r = x * axis_ratio<EOL>return <NUM_LIT:2> * np.pi * r * self.convergence_func(x)<EOL>", "docstring": "Routine to integrate an elliptical light profiles - set axis ratio to 1 to compute the luminosity within a \\\n        circle", "id": "f5952:c2:m4"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>kappa_s: float = <NUM_LIT>,<EOL>inner_slope: float = <NUM_LIT:1.0>,<EOL>scale_radius: dim.Length = <NUM_LIT:1.0>):<DEDENT>", "body": "super(SphericalGeneralizedNFW, self).__init__(centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>, kappa_s=kappa_s,<EOL>inner_slope=inner_slope, scale_radius=scale_radius)<EOL>", "docstring": "The spherical NFW profiles, used to fit the dark matter halo of the lens.\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\nkappa_s : float\n    The overall normalization of the dark matter halo \\\n    (kappa_s = (rho_s * scale_radius)/lensing_critical_density)\ninner_slope : float\n    The inner slope of the dark matter halo.\nscale_radius : float\n    The arc-second radius where the average density within this radius is 200 times the critical density of \\\n    the Universe..", "id": "f5952:c13:m0"}
{"signature": "@geometry_profiles.transform_grid<EOL><INDENT>@geometry_profiles.move_grid_to_radial_minimum<EOL>def potential_from_grid(self, grid):<DEDENT>", "body": "potential_grid = quad_grid(self.potential_func, <NUM_LIT:0.0>, <NUM_LIT:1.0>, grid,<EOL>args=(self.axis_ratio, self.slope, self.core_radius))[<NUM_LIT:0>]<EOL>return self.einstein_radius_rescaled * self.axis_ratio * potential_grid<EOL>", "docstring": "Calculate the potential at a given set of arc-second gridded coordinates.\n\nParameters\n----------\ngrid : grids.RegularGrid\n    The grid of (y,x) arc-second coordinates the deflection angles are computed on.", "id": "f5952:c3:m3"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>,<EOL>mass_to_light_ratio: dim.MassOverLuminosity = <NUM_LIT:1.0>):<DEDENT>", "body": "super(SphericalDevVaucouleurs, self).__init__(centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>, intensity=intensity,<EOL>effective_radius=effective_radius,<EOL>mass_to_light_ratio=mass_to_light_ratio)<EOL>", "docstring": "The DevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and subtract the\nlens model_galaxy's light.\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\nintensity : float\n    Overall flux intensity normalisation in the light profiles (electrons per second).\neffective_radius : float\n    The circular radius containing half the light of this profile.\nmass_to_light_ratio : float\n    The mass-to-light ratio of the light profiles.", "id": "f5952:c24:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>einstein_radius: dim.Length = <NUM_LIT:1.0>):<DEDENT>", "body": "super(SphericalIsothermal, self).__init__(centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>,<EOL>einstein_radius=einstein_radius)<EOL>", "docstring": "Represents a spherical isothermal density distribution, which is equivalent to the spherical power-law\ndensity distribution for the value slope: float = 2.0\n\nParameters\n----------\ncentre: (float, float)\n    The (y,x) arc-second coordinates of the profile centre.\neinstein_radius : float\n    The arc-second Einstein radius.", "id": "f5952:c10:m0"}
{"signature": "def tabulate_integral(self, grid, tabulate_bins):", "body": "eta_min = <NUM_LIT><EOL>eta_max = <NUM_LIT> * np.max(self.grid_to_elliptical_radii(grid))<EOL>minimum_log_eta = np.log10(eta_min)<EOL>maximum_log_eta = np.log10(eta_max)<EOL>bin_size = (maximum_log_eta - minimum_log_eta) / (tabulate_bins - <NUM_LIT:1>)<EOL>return eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size<EOL>", "docstring": "Tabulate an integral over the surface density of deflection potential of a mass profile. This is used in \\\n        the GeneralizedNFW profile classes to speed up the integration procedure.\n\n        Parameters\n        -----------\n        grid : grids.RegularGrid\n            The grid of (y,x) arc-second coordinates the potential / deflection_stacks are computed on.\n        tabulate_bins : int\n            The number of bins to tabulate the inner integral of this profile.", "id": "f5952:c11:m1"}
{"signature": "def plot_deflections_x(<EOL>mass_profile, grid, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, <EOL>as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "deflections = mass_profile.deflections_from_grid(grid)<EOL>deflections_x = grid.scaled_array_2d_from_array_1d(deflections[:, <NUM_LIT:1>])<EOL>array_plotters.plot_array(<EOL>array=deflections_x, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the x component of the deflection angles of a mass profile, on a regular grid of (y,x) coordinates.\n\n     Set *autolens.hyper.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n     Parameters\n     -----------\n     mass_profile : model.profiles.mass_profiles.MassProfile\n         The mass profile whose x deflecton angles are plotted.\n     grid : ndarray or hyper.array.grid_stacks.RegularGrid\n         The (y,x) coordinates of the grid, in an array of shape (total_coordinates, 2)", "id": "f5953:m4"}
{"signature": "def plot_convergence(<EOL>mass_profile, grid, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, <EOL>as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "convergence = mass_profile.convergence_from_grid(grid=grid)<EOL>convergence = grid.scaled_array_2d_from_array_1d(convergence)<EOL>array_plotters.plot_array(<EOL>array=convergence, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the convergence of a mass profile, on a regular grid of (y,x) coordinates.\n\n    Set *autolens.hyper.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n    Parameters\n    -----------\n    mass_profile : model.profiles.mass_profiles.MassProfile\n        The mass profile whose convergence is plotted.\n    grid : ndarray or hyper.array.grid_stacks.RegularGrid\n        The (y,x) coordinates of the grid, in an array of shape (total_coordinates, 2)", "id": "f5953:m1"}
{"signature": "def plot_potential(<EOL>mass_profile, grid, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, <EOL>as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "potential = mass_profile.potential_from_grid(grid=grid)<EOL>potential = grid.scaled_array_2d_from_array_1d(potential)<EOL>array_plotters.plot_array(<EOL>array=potential, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the potential of a mass profile, on a regular grid of (y,x) coordinates.\n\n    Set *autolens.hyper.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n    Parameters\n    -----------\n    mass_profile : model.profiles.mass_profiles.MassProfile\n        The mass profile whose potential is plotted.\n    grid : ndarray or hyper.array.grid_stacks.RegularGrid\n        The (y,x) coordinates of the grid, in an array of shape (total_coordinates, 2)", "id": "f5953:m2"}
{"signature": "def plot_intensities(<EOL>light_profile, grid, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, <EOL>as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "intensities = light_profile.intensities_from_grid(grid=grid)<EOL>intensities = grid.scaled_array_2d_from_array_1d(intensities)<EOL>array_plotters.plot_array(<EOL>array=intensities, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the intensities (e.g. the image) of a light profile, on a regular grid of (y,x) coordinates.\n\n    Set *autolens.hyper.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n    Parameters\n    -----------\n    light_profile : model.profiles.light_profiles.LightProfile\n        The light profile whose intensities are plotted.\n    grid : ndarray or hyper.array.grid_stacks.RegularGrid\n        The (y,x) coordinates of the grid, in an array of shape (total_coordinates, 2)", "id": "f5953:m0"}
{"signature": "def cos_and_sin_from_x_axis(self):", "body": "phi_radians = np.radians(self.phi)<EOL>return np.cos(phi_radians), np.sin(phi_radians)<EOL>", "docstring": "Determine the sin and cosine of the angle between the profile's ellipse and the positive x-axis, \\\n        counter-clockwise.", "id": "f5954:c3:m4"}
{"signature": "def transform_grid_to_reference_frame(self, grid):", "body": "if self.__class__.__name__.startswith(\"<STR_LIT>\"):<EOL><INDENT>return super().transform_grid_to_reference_frame(grid)<EOL><DEDENT>shifted_coordinates = np.subtract(grid, self.centre)<EOL>radius = np.sqrt(np.sum(shifted_coordinates ** <NUM_LIT>, <NUM_LIT:1>))<EOL>theta_coordinate_to_profile = np.arctan2(shifted_coordinates[:, <NUM_LIT:0>],<EOL>shifted_coordinates[:, <NUM_LIT:1>]) - self.phi_radians<EOL>transformed = np.vstack(<EOL>(radius * np.sin(theta_coordinate_to_profile), radius * np.cos(theta_coordinate_to_profile))).T<EOL>return transformed.view(TransformedGrid)<EOL>", "docstring": "Transform a grid of (y,x) coordinates to the reference frame of the profile, including a translation to \\\n        its centre and a rotation to it orientation.\n\n        Parameters\n        ----------\n        grid : ndarray\n            The (y, x) coordinates in the original reference frame of the grid.", "id": "f5954:c3:m9"}
{"signature": "def rotate_grid_from_profile(self, grid_elliptical):", "body": "y = np.add(np.multiply(grid_elliptical[:, <NUM_LIT:1>], self.sin_phi), np.multiply(grid_elliptical[:, <NUM_LIT:0>], self.cos_phi))<EOL>x = np.add(np.multiply(grid_elliptical[:, <NUM_LIT:1>], self.cos_phi), - np.multiply(grid_elliptical[:, <NUM_LIT:0>], self.sin_phi))<EOL>return np.vstack((y, x)).T<EOL>", "docstring": "Rotate a grid of elliptical (y,x) coordinates from the reference frame of the profile back to the \\\n        unrotated coordinate grid reference frame (coordinates are not shifted back to their original centre).\n\n        This routine is used after computing deflection angles in the reference frame of the profile, so that the \\\n        deflection angles can be re-rotated to the frame of the original coordinates before performing ray-tracing.\n\n        Parameters\n        ----------\n        grid_elliptical : TransformedGrid(ndarray)\n            The (y, x) coordinates in the reference frame of an elliptical profile.", "id": "f5954:c3:m6"}
{"signature": "@transform_grid<EOL><INDENT>@move_grid_to_radial_minimum<EOL>def grid_to_eccentric_radii(self, grid):<DEDENT>", "body": "return np.multiply(np.sqrt(self.axis_ratio), self.grid_to_elliptical_radii(grid)).view(np.ndarray)<EOL>", "docstring": "Convert a grid of (y,x) coordinates to an eccentric radius, which is (1.0/axis_ratio) * elliptical radius \\\n        and used to define light profile half-light radii using circular radii.\n\n        If the coordinates have not been transformed to the profile's geometry, this is performed automatically.\n\n        Parameters\n        ----------\n        grid : TransformedGrid(ndarray)\n            The (y, x) coordinates in the reference frame of the elliptical profile.", "id": "f5954:c3:m8"}
{"signature": "@transform_grid<EOL><INDENT>@move_grid_to_radial_minimum<EOL>def grid_to_elliptical_radii(self, grid):<DEDENT>", "body": "return np.sqrt(np.add(np.square(grid[:, <NUM_LIT:1>]), np.square(np.divide(grid[:, <NUM_LIT:0>], self.axis_ratio))))<EOL>", "docstring": "Convert a grid of (y,x) coordinates to an elliptical radius.\n\n        If the coordinates have not been transformed to the profile's geometry, this is performed automatically.\n\n        Parameters\n        ----------\n        grid : TransformedGrid(ndarray)\n            The (y, x) coordinates in the reference frame of the elliptical profile.", "id": "f5954:c3:m7"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>)):<DEDENT>", "body": "super(SphericalProfile, self).__init__(centre=centre)<EOL>", "docstring": "A spherical profile, which describes profiles with y and x centre Cartesian coordinates.\n\n        Parameters\n        ----------\n        centre: (float, float)\n            The (y,x) arc-second coordinates of the profile centre.", "id": "f5954:c2:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>):<DEDENT>", "body": "super(EllipticalProfile, self).__init__(centre=centre)<EOL>self.axis_ratio = axis_ratio<EOL>self.phi = phi<EOL>", "docstring": "An elliptical profile, which describes profiles with y and x centre Cartesian coordinates, an axis-ratio \\\n        and rotational angle phi.\n\n        Parameters\n        ----------\n        centre: (float, float)\n            The (y,x) arc-second coordinates of the profile centre.\n        axis_ratio : float\n            Ratio of profiles ellipse's minor and major axes (b/a)\n        phi : float\n            Rotation angle of profiles ellipse counter-clockwise from positive x-axis", "id": "f5954:c3:m0"}
{"signature": "@transform_grid<EOL><INDENT>def grid_to_grid_radii(self, grid):<DEDENT>", "body": "return np.sqrt(np.add(np.square(grid[:, <NUM_LIT:0>]), np.square(grid[:, <NUM_LIT:1>])))<EOL>", "docstring": "Convert a grid of (y, x) coordinates to a grid of their circular radii.\n\n        If the coordinates have not been transformed to the profile's centre, this is performed automatically.\n\n        Parameters\n        ----------\n        grid : TransformedGrid(ndarray)\n            The (y, x) coordinates in the reference frame of the profile.", "id": "f5954:c2:m1"}
{"signature": "def transform_grid_from_reference_frame(self, grid):", "body": "transformed = np.add(grid, self.centre)<EOL>return transformed.view(TransformedGrid)<EOL>", "docstring": "Transform a grid of (y,x) coordinates from the reference frame of the profile to the original observer \\\n        reference frame, including a translation from the profile's centre.\n\n        Parameters\n        ----------\n        grid : TransformedGrid(ndarray)\n            The (y, x) coordinates in the reference frame of the profile.", "id": "f5954:c2:m5"}
{"signature": "def grid_angle_to_profile(self, grid_thetas):", "body": "return np.cos(grid_thetas), np.sin(grid_thetas)<EOL>", "docstring": "The angle between each (y,x) coordinate on the grid and the profile, in radians.\n\n        Parameters\n        -----------\n        grid_thetas : ndarray\n            The angle theta counter-clockwise from the positive x-axis to each coordinate in radians.", "id": "f5954:c2:m2"}
{"signature": "def grid_angle_to_profile(self, grid_thetas):", "body": "theta_coordinate_to_profile = np.add(grid_thetas, - self.phi_radians)<EOL>return np.cos(theta_coordinate_to_profile), np.sin(theta_coordinate_to_profile)<EOL>", "docstring": "The angle between each angle theta on the grid and the profile, in radians.\n\n        Parameters\n        -----------\n        grid_thetas : ndarray\n            The angle theta counter-clockwise from the positive x-axis to each coordinate in radians.", "id": "f5954:c3:m5"}
{"signature": "def transform_grid(func):", "body": "@wraps(func)<EOL>def wrapper(profile, grid, *args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if not isinstance(grid, TransformedGrid):<EOL><INDENT>return func(profile, profile.transform_grid_to_reference_frame(grid), *args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>return func(profile, grid, *args, **kwargs)<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Wrap the function in a function that checks whether the coordinates have been transformed. If they have not \\ \n    been transformed then they are transformed.\n\n    Parameters\n    ----------\n    func : (profiles, *args, **kwargs) -> Object\n        A function that requires transformed coordinates\n\n    Returns\n    -------\n        A function that can except cartesian or transformed coordinates", "id": "f5954:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>,<EOL>sersic_index: float = <NUM_LIT>,<EOL>radius_break: dim.Length = <NUM_LIT>,<EOL>intensity_break: dim.Luminosity = <NUM_LIT>,<EOL>gamma: float = <NUM_LIT>,<EOL>alpha: float = <NUM_LIT>):<DEDENT>", "body": "super(EllipticalCoreSersic, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi, intensity=intensity,<EOL>effective_radius=effective_radius, sersic_index=sersic_index)<EOL>self.radius_break = radius_break<EOL>self.intensity_break = intensity_break<EOL>self.alpha = alpha<EOL>self.gamma = gamma<EOL>", "docstring": "The elliptical cored-Sersic light profile.\n\n        Parameters\n        ----------\n        centre : (float, float)\n            The (y,x) arc-second coordinates of the profile centre.\n        axis_ratio : float\n            Ratio of light profiles ellipse's minor and major axes (b/a).\n        phi : float\n            Rotation angle of light profile counter-clockwise from positive x-axis.\n        intensity : float\n            Overall intensity normalisation of the light profiles (electrons per second).\n        effective_radius : float\n            The circular radius containing half the light of this profile.\n        sersic_index : Int\n            Controls the concentration of the of the profile (lower value -> less concentrated, \\\n            higher value -> more concentrated).\n        radius_break : Float\n            The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.\n        intensity_break : Float\n            The intensity at the break radius.\n        gamma : Float\n            The logarithmic power-law slope of the inner core profiles\n        alpha :\n            Controls the sharpness of the transition between the inner core / outer Sersic profiles.", "id": "f5955:c11:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>,<EOL>sersic_index: float = <NUM_LIT>,<EOL>radius_break: dim.Length = <NUM_LIT>,<EOL>intensity_break: dim.Luminosity = <NUM_LIT>,<EOL>gamma: float = <NUM_LIT>,<EOL>alpha: float = <NUM_LIT>):<DEDENT>", "body": "super(SphericalCoreSersic, self).__init__(centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>, intensity=intensity,<EOL>effective_radius=effective_radius, sersic_index=sersic_index,<EOL>radius_break=radius_break, intensity_break=intensity_break,<EOL>gamma=gamma, alpha=alpha)<EOL>self.radius_break = radius_break<EOL>self.intensity_break = intensity_break<EOL>self.alpha = alpha<EOL>self.gamma = gamma<EOL>", "docstring": "The elliptical cored-Sersic light profile.\n\n        Parameters\n        ----------\n        centre : (float, float)\n            The (y,x) arc-second coordinates of the profile centre.\n        intensity : float\n            Overall intensity normalisation of the light profiles (electrons per second).\n        effective_radius : float\n            The circular radius containing half the light of this profile.\n        sersic_index : Int\n            Controls the concentration of the of the profile (lower value -> less concentrated, \\\n            higher value -> more concentrated).\n        radius_break : Float\n            The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.\n        intensity_break : Float\n            The intensity at the break radius.\n        gamma : Float\n            The logarithmic power-law slope of the inner core profiles\n        alpha :\n            Controls the sharpness of the transition between the inner core / outer Sersic profiles.", "id": "f5955:c12:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>,<EOL>sersic_index: float = <NUM_LIT>):<DEDENT>", "body": "super(AbstractEllipticalSersic, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi)<EOL>self.intensity = intensity<EOL>self.effective_radius = effective_radius<EOL>self.sersic_index = sersic_index<EOL>", "docstring": "Abstract base class for an elliptical Sersic light profile, used for computing its effective radius and\n        Sersic constant.\n\n        Parameters\n        ----------\n        centre : (float, float)\n            The (y,x) arc-second coordinates of the profile centre.\n        axis_ratio : float\n            Ratio of light profiles ellipse's minor and major axes (b/a)\n        phi : float\n            Rotational angle of profiles ellipse counter-clockwise from positive x-axis\n        intensity : float\n            Overall intensity normalisation in the light profiles (electrons per second)\n        effective_radius : float\n            The circular radius containing half the light of this model_mapper\n        sersic_index : Int\n            Controls the concentration of the of the profile (lower value -> less concentrated, \\\n            higher value -> more concentrated).", "id": "f5955:c4:m0"}
{"signature": "def luminosity_integral(self, x, axis_ratio):", "body": "r = x * axis_ratio<EOL>return <NUM_LIT:2> * np.pi * r * self.intensities_from_grid_radii(x)<EOL>", "docstring": "Routine to integrate the luminosity of an elliptical light profile.\n\n        The axis ratio is set to 1.0 for computing the luminosity within a circle", "id": "f5955:c1:m3"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>sigma: dim.Length = <NUM_LIT>):<DEDENT>", "body": "super(SphericalGaussian, self).__init__(centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>, intensity=intensity,<EOL>sigma=sigma)<EOL>", "docstring": "The spherical Gaussian light profile.\n\n        Parameters\n        ----------\n        centre : (float, float)\n            The (y,x) arc-second coordinates of the profile centre.\n        intensity : float\n            Overall intensity normalisation of the light profiles (electrons per second).\n        sigma : float\n            The full-width half-maximum of the Gaussian.", "id": "f5955:c3:m0"}
{"signature": "@property<EOL><INDENT>def elliptical_effective_radius(self):<DEDENT>", "body": "return self.effective_radius / np.sqrt(self.axis_ratio)<EOL>", "docstring": "The effective_radius of a Sersic light profile is defined as the circular effective radius. This is the \\\n        radius within which a circular aperture contains half the profiles's total integrated light. For elliptical \\\n        systems, this won't robustly capture the light profile's elliptical shape.\n\n        The elliptical effective radius instead describes the major-axis radius of the ellipse containing \\\n        half the light, and may be more appropriate for highly flattened systems like disk galaxies.", "id": "f5955:c4:m2"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>):<DEDENT>", "body": "super(SphericalDevVaucouleurs, self).__init__(centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>, intensity=intensity,<EOL>effective_radius=effective_radius)<EOL>", "docstring": "The spherical Dev Vaucouleurs light profile.\n\n        This is a subset of the elliptical Sersic profile, specific to the case that sersic_index = 1.0.\n\n        Parameters\n        ----------\n        centre : (float, float)\n            The (y,x) arc-second coordinates of the profile centre.\n        intensity : float\n            Overall intensity normalisation of the light profiles (electrons per second).\n        effective_radius : float\n            The circular radius containing half the light of this profile.", "id": "f5955:c10:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>):<DEDENT>", "body": "super(SphericalExponential, self).__init__(centre=centre, axis_ratio=<NUM_LIT:1.0>, phi=<NUM_LIT:0.0>, intensity=intensity,<EOL>effective_radius=effective_radius)<EOL>", "docstring": "The spherical exponential profile.\n\n        This is a subset of the elliptical Sersic profile, specific to the case that sersic_index = 1.0.\n\n        Parameters\n        ----------\n        centre : (float, float)\n            The (y,x) arc-second coordinates of the profile centre.\n        intensity : float\n            Overall intensity normalisation of the light profiles (electrons per second).\n        effective_radius : float\n            The circular radius containing half the light of this profile.", "id": "f5955:c8:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>,<EOL>sersic_index: float = <NUM_LIT>):<DEDENT>", "body": "super(EllipticalSersic, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi, intensity=intensity,<EOL>effective_radius=effective_radius, sersic_index=sersic_index)<EOL>", "docstring": "The elliptical Sersic light profile.\n\n        Parameters\n        ----------\n        centre : (float, float)\n            The (y,x) arc-second coordinates of the profile centre.\n        axis_ratio : float\n            Ratio of light profiles ellipse's minor and major axes (b/a).\n        phi : float\n            Rotation angle of light profile counter-clockwise from positive x-axis.\n        intensity : float\n            Overall intensity normalisation of the light profiles (electrons per second).\n        effective_radius : float\n            The circular radius containing half the light of this profile.\n        sersic_index : Int\n            Controls the concentration of the of the profile (lower value -> less concentrated, \\\n            higher value -> more concentrated).", "id": "f5955:c5:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>):<DEDENT>", "body": "super(EllipticalExponential, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi, intensity=intensity,<EOL>effective_radius=effective_radius, sersic_index=<NUM_LIT:1.0>)<EOL>", "docstring": "The elliptical exponential profile.\n\n        This is a subset of the elliptical Sersic profile, specific to the case that sersic_index = 1.0.\n\n        Parameters\n        ----------\n        centre : (float, float)\n            The (y,x) arc-second centre of the light profile.\n        axis_ratio : float\n            Ratio of light profiles ellipse's minor and major axes (b/a).\n        phi : float\n            Rotation angle of light profile counter-clockwise from positive x-axis.\n        intensity : float\n            Overall intensity normalisation of the light profiles (electrons per second).\n        effective_radius : float\n            The circular radius containing half the light of this profile.", "id": "f5955:c7:m0"}
{"signature": "@map_types<EOL><INDENT>def __init__(self,<EOL>centre: dim.Position = (<NUM_LIT:0.0>, <NUM_LIT:0.0>),<EOL>axis_ratio: float = <NUM_LIT:1.0>,<EOL>phi: float = <NUM_LIT:0.0>,<EOL>intensity: dim.Luminosity = <NUM_LIT:0.1>,<EOL>effective_radius: dim.Length = <NUM_LIT>):<DEDENT>", "body": "super(EllipticalDevVaucouleurs, self).__init__(centre=centre, axis_ratio=axis_ratio, phi=phi,<EOL>intensity=intensity, effective_radius=effective_radius,<EOL>sersic_index=<NUM_LIT>)<EOL>", "docstring": "The elliptical Dev Vaucouleurs light profile.\n\n        This is a subset of the elliptical Sersic profile, specific to the case that sersic_index = 4.0.\n\n        Parameters\n        ----------\n        centre : (float, float)\n            The (y,x) arc-second coordinates of the profile centre.\n        axis_ratio : float\n            Ratio of light profiles ellipse's minor and major axes (b/a).\n        phi : float\n            Rotation angle of light profile counter-clockwise from positive x-axis.\n        intensity : float\n            Overall intensity normalisation of the light profiles (electrons per second).\n        effective_radius : float\n            The circular radius containing half the light of this profile.", "id": "f5955:c9:m0"}
{"signature": "def luminosity_within_ellipse_in_units(self, major_axis, unit_luminosity='<STR_LIT>', kpc_per_arcsec=None,<EOL>exposure_time=None):", "body": "if not isinstance(major_axis, dim.Length):<EOL><INDENT>major_axis = dim.Length(major_axis, '<STR_LIT>')<EOL><DEDENT>profile = self.new_profile_with_units_converted(unit_length=major_axis.unit_length,<EOL>unit_luminosity=unit_luminosity,<EOL>kpc_per_arcsec=kpc_per_arcsec, exposure_time=exposure_time)<EOL>luminosity = quad(profile.luminosity_integral, a=<NUM_LIT:0.0>, b=major_axis, args=(self.axis_ratio,))[<NUM_LIT:0>]<EOL>return dim.Luminosity(luminosity, unit_luminosity)<EOL>", "docstring": "Integrate the light profiles to compute the total luminosity within an ellipse of specified major axis. \\\n        This is centred on the light profile's centre.\n\n        The following units for mass can be specified and output:\n\n        - Electrons per second (default) - 'eps'.\n        - Counts - 'counts' (multiplies the luminosity in electrons per second by the exposure time).\n\n        Parameters\n        ----------\n        major_axis : float\n            The major-axis radius of the ellipse.\n        unit_luminosity : str\n            The units the luminosity is returned in (eps | counts).\n        exposure_time : float or None\n            The exposure time of the observation, which converts luminosity from electrons per second units to counts.", "id": "f5955:c1:m2"}
{"signature": "def intensity_at_radius(self, radius):", "body": "return self.intensity * np.exp(<EOL>-self.sersic_constant * (((radius / self.effective_radius) ** (<NUM_LIT:1.> / self.sersic_index)) - <NUM_LIT:1>))<EOL>", "docstring": "Compute the intensity of the profile at a given radius.\n\n        Parameters\n        ----------\n        radius : float\n            The distance from the centre of the profile.", "id": "f5955:c4:m4"}
{"signature": "def luminosity_within_circle_in_units(self, radius: dim.Length, unit_luminosity='<STR_LIT>', kpc_per_arcsec=None,<EOL>exposure_time=None):", "body": "if not isinstance(radius, dim.Length):<EOL><INDENT>radius = dim.Length(value=radius, unit_length='<STR_LIT>')<EOL><DEDENT>profile = self.new_profile_with_units_converted(unit_length=radius.unit_length, unit_luminosity=unit_luminosity,<EOL>kpc_per_arcsec=kpc_per_arcsec, exposure_time=exposure_time)<EOL>luminosity = quad(profile.luminosity_integral, a=<NUM_LIT:0.0>, b=radius, args=(<NUM_LIT:1.0>,))[<NUM_LIT:0>]<EOL>return dim.Luminosity(luminosity, unit_luminosity)<EOL>", "docstring": "Integrate the light profile to compute the total luminosity within a circle of specified radius. This is \\\n        centred on the light profile's centre.\n\n        The following units for mass can be specified and output:\n\n        - Electrons per second (default) - 'eps'.\n        - Counts - 'counts' (multiplies the luminosity in electrons per second by the exposure time).\n\n        Parameters\n        ----------\n        radius : float\n            The radius of the circle to compute the dimensionless mass within.\n        unit_luminosity : str\n            The units the luminosity is returned in (eps | counts).\n        exposure_time : float or None\n            The exposure time of the observation, which converts luminosity from electrons per second units to counts.", "id": "f5955:c1:m1"}
{"signature": "def __init__(self, galaxy_data, mask, sub_grid_size=<NUM_LIT:2>, use_intensities=False, use_convergence=False,<EOL>use_potential=False, use_deflections_y=False, use_deflections_x=False):", "body": "self.image = galaxy_data.image<EOL>self.pixel_scale = galaxy_data.pixel_scale<EOL>self.noise_map = galaxy_data.noise_map<EOL>self.mask = mask<EOL>self.image_1d = mask.map_2d_array_to_masked_1d_array(array_2d=self.image)<EOL>self.noise_map_1d = mask.map_2d_array_to_masked_1d_array(array_2d=self.noise_map)<EOL>self.mask_1d = mask.map_2d_array_to_masked_1d_array(array_2d=mask)<EOL>self.sub_grid_size = sub_grid_size<EOL>self.grid_stack = grids.GridStack.grid_stack_from_mask_sub_grid_size_and_psf_shape(mask=mask,<EOL>sub_grid_size=sub_grid_size,<EOL>psf_shape=(<NUM_LIT:1>, <NUM_LIT:1>))<EOL>self.padded_grid_stack = grids.GridStack.padded_grid_stack_from_mask_sub_grid_size_and_psf_shape(<EOL>mask=mask, sub_grid_size=sub_grid_size, psf_shape=(<NUM_LIT:1>, <NUM_LIT:1>))<EOL>if all(not element for element in [use_intensities, use_convergence, use_potential,<EOL>use_deflections_y, use_deflections_x]):<EOL><INDENT>raise exc.GalaxyException('<STR_LIT>')<EOL><DEDENT>if sum([use_intensities, use_convergence, use_potential, use_deflections_y, use_deflections_x]) > <NUM_LIT:1>:<EOL><INDENT>raise exc.GalaxyException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>self.use_intensities = use_intensities<EOL>self.use_convergence = use_convergence<EOL>self.use_potential = use_potential<EOL>self.use_deflections_y = use_deflections_y<EOL>self.use_deflections_x = use_deflections_x<EOL>", "docstring": "A galaxy-fit data is a collection of fit data components which are used to fit a galaxy to another galaxy. \\\n        This is where a component of a galaxy's light profiles (e.g. intensities) or mass profiles (e.g. surface \\\n        density, potential or deflection angles) are fitted to one another.\n\n        This is primarily performed for automatic prior linking, as a means to efficiently link the priors of a galaxy \\\n        using one inferred parametrization of light or mass profiles to a new galaxy with a different parametrization \\\n        of light or mass profiles.\n\n        This omits a number of the fit data components typically used when fitting an image (e.g. the observed image, PSF, \\\n        exposure time map), but still has a number of the other components (e.g. an effective noise_map-map, grid_stacks).\n\n        Parameters\n        ----------\n        galaxy_data : GalaxyData\n            The collection of data about the galaxy (image of its profile map, noise-map, etc.) that is fitted.\n        mask: msk.Mask\n            The 2D masks that is applied to image fit data.\n        sub_grid_size : int\n            The size of the sub-grid used for computing the SubGrid (see ccd.masks.SubGrid).\n\n        Attributes\n        ----------\n        noise_map_1d : ndarray\n            The masked 1D array of the noise_map-map\n        grid_stacks : ccd.masks.GridStack\n            Grids of (y,x) Cartesian coordinates which map over the masked 1D fit data array's pixels (includes an \\\n            regular-grid, sub-grid, etc.)\n        padded_grid_stack : ccd.masks.GridStack\n            Grids of padded (y,x) Cartesian coordinates which map over the every fit data array's pixel in 1D and a \\\n            padded regioon to include edge's for accurate PSF convolution (includes an regular-grid, sub-grid, etc.)", "id": "f5957:c1:m0"}
{"signature": "def deflections_from_grid(self, grid):", "body": "if self.has_mass_profile:<EOL><INDENT>return sum(map(lambda p: p.deflections_from_grid(grid), self.mass_profiles))<EOL><DEDENT>else:<EOL><INDENT>return np.full((grid.shape[<NUM_LIT:0>], <NUM_LIT:2>), <NUM_LIT:0.0>)<EOL><DEDENT>", "docstring": "Compute the summed (y,x) deflection angles of the galaxy's mass profiles using a grid of Cartesian (y,x) \\\n        coordinates.\n\n        If the galaxy has no mass profiles, two grid of zeros are returned.\n\n        See *profiles.mass_profiles* module for details of how this is performed.\n\n        Parameters\n        ----------\n        grid : ndarray\n            The (y, x) coordinates in the original reference frame of the grid.", "id": "f5958:c0:m17"}
{"signature": "def hyper_noise_from_contributions(self, noise_map, contributions):", "body": "return self.noise_factor * (noise_map * contributions) ** self.noise_power<EOL>", "docstring": "Compute a scaled galaxy hyper noise-map from a baseline noise-map.\n\n        This uses the galaxy contribution map and the *noise_factor* and *noise_power* hyper-parameters.\n\n        Parameters\n        -----------\n        noise_map : ndarray\n            The observed noise-map (before scaling).\n        contributions : ndarray\n            The galaxy contribution map.", "id": "f5958:c1:m3"}
{"signature": "def convergence_from_grid(self, grid):", "body": "if self.has_mass_profile:<EOL><INDENT>return sum(map(lambda p: p.convergence_from_grid(grid), self.mass_profiles))<EOL><DEDENT>else:<EOL><INDENT>return np.zeros((grid.shape[<NUM_LIT:0>],))<EOL><DEDENT>", "docstring": "Compute the summed convergence of the galaxy's mass profiles using a grid of Cartesian (y,x) \\\n        coordinates.\n\n        If the galaxy has no mass profiles, a grid of zeros is returned.\n\n        See *profiles.mass_profiles* module for details of how this is performed.\n\n        Parameters\n        ----------\n        grid : ndarray\n            The (y, x) coordinates in the original reference frame of the grid.", "id": "f5958:c0:m15"}
{"signature": "def __init__(self, contribution_factor=<NUM_LIT:0.0>, noise_factor=<NUM_LIT:0.0>, noise_power=<NUM_LIT:1.0>):", "body": "self.contribution_factor = contribution_factor<EOL>self.noise_factor = noise_factor<EOL>self.noise_power = noise_power<EOL>self.component_number = next(self._ids)<EOL>", "docstring": "If a *Galaxy* is given a *HyperGalaxy* as an attribute, the noise-map in the regions of the image that the \\\n        galaxy is located will be scaled, to prevent over-fitting of the galaxy. \n\n        This is performed by first computing the hyper-galaxy's 'contribution-map', which determines the fraction of \\\n        flux in every pixel of the image that can be associated with this particular hyper-galaxy. This is computed \\\n        using  hyper-hyper set (e.g. fitting.fit_data.FitDataHyper), which includes  best-fit unblurred_image_1d of \\\n        the galaxy's light from a previous analysis phase.\n\n        The *HyperGalaxy* class contains the hyper-parameters which are associated with this galaxy for scaling the \\\n        noise-map.\n\n        Parameters\n        -----------\n        contribution_factor : float\n            Factor that adjusts how much of the galaxy's light is attributed to the contribution map.\n        noise_factor : float\n            Factor by which the noise-map is increased in the regions of the galaxy's contribution map.\n        noise_power : float\n            The power to which the contribution map is raised when scaling the noise-map.", "id": "f5958:c1:m0"}
{"signature": "@grids.sub_to_image_grid<EOL>def convergence_of_galaxies_from_grid(grid, galaxies):", "body": "if galaxies:<EOL><INDENT>return sum(map(lambda g: g.convergence_from_grid(grid), galaxies))<EOL><DEDENT>else:<EOL><INDENT>return np.full((grid.shape[<NUM_LIT:0>]), <NUM_LIT:0.0>)<EOL><DEDENT>", "docstring": "Compute the convergence of a list of galaxies from an input grid, by summing the individual convergence \\\n    of each galaxy's mass profile.\n\n    If the input grid is a *grids.SubGrid*, the convergence is calculated on the sub-grid and binned-up to the \\\n    original regular grid by taking the mean value of every set of sub-pixels.\n\n    If no galaxies are entered into the function, an array of all zeros is returned.\n\n    Parameters\n    -----------\n    grid : RegularGrid\n        The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \\\n        convergence is calculated on.\n    galaxies : [galaxy.Galaxy]\n        The galaxies whose mass profiles are used to compute the convergence.", "id": "f5959:m1"}
{"signature": "def deflections_of_galaxies_from_sub_grid(sub_grid, galaxies):", "body": "if galaxies:<EOL><INDENT>return sum(map(lambda galaxy: galaxy.deflections_from_grid(sub_grid), galaxies))<EOL><DEDENT>else:<EOL><INDENT>return np.full((sub_grid.shape[<NUM_LIT:0>], <NUM_LIT:2>), <NUM_LIT:0.0>)<EOL><DEDENT>", "docstring": "Compute the deflections of a list of galaxies from an input sub-grid, by summing the individual deflections \\\n    of each galaxy's mass profile.\n\n    The deflections are calculated on the sub-grid and binned-up to the original regular grid by taking the mean value \\\n    of every set of sub-pixels.\n\n    If no galaxies are entered into the function, an array of all zeros is returned.\n\n    Parameters\n    -----------\n    sub_grid : RegularGrid\n        The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \\\n        deflections is calculated on.\n    galaxies : [galaxy.Galaxy]\n        The galaxies whose mass profiles are used to compute the surface densities.", "id": "f5959:m4"}
{"signature": "def deflections_of_galaxies_from_grid(grid, galaxies):", "body": "if len(galaxies) > <NUM_LIT:0>:<EOL><INDENT>deflections = sum(map(lambda galaxy: galaxy.deflections_from_grid(grid), galaxies))<EOL><DEDENT>else:<EOL><INDENT>deflections = np.full((grid.shape[<NUM_LIT:0>], <NUM_LIT:2>), <NUM_LIT:0.0>)<EOL><DEDENT>if isinstance(grid, grids.SubGrid):<EOL><INDENT>return np.asarray([grid.regular_data_1d_from_sub_data_1d(deflections[:, <NUM_LIT:0>]),<EOL>grid.regular_data_1d_from_sub_data_1d(deflections[:, <NUM_LIT:1>])]).T<EOL><DEDENT>return deflections<EOL>", "docstring": "Compute the deflections of a list of galaxies from an input grid, by summing the individual deflections \\\n    of each galaxy's mass profile.\n\n    If the input grid is a *grids.SubGrid*, the potential is calculated on the sub-grid and binned-up to the \\\n    original regular grid by taking the mean value of every set of sub-pixels.\n\n    If no galaxies are entered into the function, an array of all zeros is returned.\n\n    Parameters\n    -----------\n    grid : RegularGrid\n        The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \\\n        deflections is calculated on.\n    galaxies : [galaxy.Galaxy]\n        The galaxies whose mass profiles are used to compute the surface densities.", "id": "f5959:m3"}
{"signature": "def plot_potential(<EOL>galaxy, grid, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "potential = galaxy.potential_from_grid(grid=grid)<EOL>potential = grid.scaled_array_2d_from_array_1d(potential)<EOL>array_plotters.plot_array(<EOL>array=potential, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the potential of a galaxy, on a regular grid of (y,x) coordinates.\n\n     Set *autolens.datas.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n     Parameters\n     -----------\n    galaxy : model.galaxy.galaxy.Galaxy\n         The galaxy whose potential is plotted.\n    grid : ndarray or datas.array.grid_stacks.RegularGrid\n         The (y,x) coordinates of the grid, in an array of shape (total_coordinates, 2)", "id": "f5960:m2"}
{"signature": "def plot_convergence(<EOL>galaxy, grid, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "convergence = galaxy.convergence_from_grid(grid=grid)<EOL>convergence = grid.scaled_array_2d_from_array_1d(convergence)<EOL>array_plotters.plot_array(<EOL>array=convergence, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the convergence of a galaxy, on a regular grid of (y,x) coordinates.\n\n    Set *autolens.datas.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n    Parameters\n    -----------\n    galaxy : model.galaxy.galaxy.Galaxy\n        The galaxy whose convergence is plotted.\n    grid : ndarray or datas.array.grid_stacks.RegularGrid\n        The (y,x) coordinates of the grid, in an array of shape (total_coordinates, 2)", "id": "f5960:m1"}
{"signature": "@property<EOL><INDENT>@cast_collection(PriorNameValue)<EOL>def prior_tuples(self):<DEDENT>", "body": "return [prior for prior_model in self.prior_models for prior in prior_model.prior_tuples]<EOL>", "docstring": "Returns\n-------\npriors: [PriorTuple]\n    A list of priors associated with prior models in this galaxy prior.", "id": "f5963:c0:m9"}
{"signature": "@property<EOL><INDENT>def profile_prior_model_dict(self):<DEDENT>", "body": "return {key: value for key, value in<EOL>filter(lambda t: isinstance(t[<NUM_LIT:1>], pm.PriorModel) and is_profile_class(t[<NUM_LIT:1>].cls),<EOL>self.__dict__.items())}<EOL>", "docstring": "Returns\n-------\nprofile_prior_model_dict: {str: PriorModel}\n    A dictionary mapping_matrix instance variable names to variable profiles.", "id": "f5963:c0:m5"}
{"signature": "def instance_for_arguments(self, arguments):", "body": "profiles = {**{key: value.instance_for_arguments(arguments)<EOL>for key, value<EOL>in self.profile_prior_model_dict.items()}, **self.constant_profile_dict}<EOL>try:<EOL><INDENT>redshift = self.redshift.instance_for_arguments(arguments)<EOL><DEDENT>except AttributeError:<EOL><INDENT>redshift = self.redshift<EOL><DEDENT>pixelization = self.pixelization.instance_for_arguments(arguments)if isinstance(self.pixelization, pm.PriorModel)else self.pixelization<EOL>regularization = self.regularization.instance_for_arguments(arguments)if isinstance(self.regularization, pm.PriorModel)else self.regularization<EOL>hyper_galaxy = self.hyper_galaxy.instance_for_arguments(arguments)if isinstance(self.hyper_galaxy, pm.PriorModel)else self.hyper_galaxy<EOL>return galaxy.Galaxy(redshift=redshift, pixelization=pixelization, regularization=regularization,<EOL>hyper_galaxy=hyper_galaxy, **profiles)<EOL>", "docstring": "Create an instance of the associated class for a set of arguments\n\nParameters\n----------\narguments: {Prior: value}\n    Dictionary mapping_matrix priors to attribute analysis_path and value pairs\n\nReturns\n-------\n    An instance of the class", "id": "f5963:c0:m12"}
{"signature": "def bulge_disk_tag_from_align_bulge_disks(align_bulge_disk_centre, align_bulge_disk_axis_ratio, align_bulge_disk_phi):", "body": "align_bulge_disk_centre_tag = align_bulge_disk_centre_tag_from_align_bulge_disk_centre(<EOL>align_bulge_disk_centre=align_bulge_disk_centre)<EOL>align_bulge_disk_axis_ratio_tag = align_bulge_disk_axis_ratio_tag_from_align_bulge_disk_axis_ratio(<EOL>align_bulge_disk_axis_ratio=align_bulge_disk_axis_ratio)<EOL>align_bulge_disk_phi_tag = align_bulge_disk_phi_tag_from_align_bulge_disk_phi(<EOL>align_bulge_disk_phi=align_bulge_disk_phi)<EOL>return align_bulge_disk_centre_tag + align_bulge_disk_axis_ratio_tag + align_bulge_disk_phi_tag<EOL>", "docstring": "Generate a tag for the alignment of the geometry of the bulge and disk of a bulge-disk system, to customize \\ \n    phase names based on the bulge-disk model. This adds together the bulge_disk tags generated in the 3 functions\n    above", "id": "f5966:m12"}
{"signature": "def align_bulge_disk_centre_tag_from_align_bulge_disk_centre(align_bulge_disk_centre):", "body": "if not align_bulge_disk_centre:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif align_bulge_disk_centre:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>", "docstring": "Generate a tag for if the bulge and disk of a bulge-disk system are aligned or not, to customize phase names \\\n    based on the bulge-disk model. This changee the phase name 'phase_name' as follows:\n\n    bd_align_centres = False -> phase_name\n    bd_align_centres = True -> phase_name_bd_align_centres", "id": "f5966:m9"}
{"signature": "def image_psf_shape_tag_from_image_psf_shape(image_psf_shape):", "body": "if image_psf_shape is None:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>y = str(image_psf_shape[<NUM_LIT:0>])<EOL>x = str(image_psf_shape[<NUM_LIT:1>])<EOL>return ('<STR_LIT>' + y + '<STR_LIT:x>' + x)<EOL><DEDENT>", "docstring": "Generate an image psf shape tag, to customize phase names based on size of the image PSF that the original PSF \\\n    is trimmed to for faster run times.\n\n    This changes the phase name 'phase_name' as follows:\n\n    image_psf_shape = 1 -> phase_name\n    image_psf_shape = 2 -> phase_name_image_psf_shape_2\n    image_psf_shape = 2 -> phase_name_image_psf_shape_2", "id": "f5966:m4"}
{"signature": "def sub_grid_size_tag_from_sub_grid_size(sub_grid_size):", "body": "return '<STR_LIT>' + str(sub_grid_size)<EOL>", "docstring": "Generate a sub-grid tag, to customize phase names based on the sub-grid size used.\n\n    This changes the phase name 'phase_name' as follows:\n\n    sub_grid_size = None -> phase_name\n    sub_grid_size = 1 -> phase_name_sub_grid_size_2\n    sub_grid_size = 4 -> phase_name_sub_grid_size_4", "id": "f5966:m2"}
{"signature": "def inner_mask_radii_tag_from_inner_circular_mask_radii(inner_mask_radii):", "body": "if inner_mask_radii == None:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'.format(inner_mask_radii)<EOL><DEDENT>", "docstring": "Generate an inner mask radii tag, to customize phase names based on the size of the circular masked area in the \\\n    centre of an image.\n\n    This changes the phase name 'phase_name' as follows:\n\n    inner_circular_mask_radii = 1 -> phase_name\n    inner_circular_mask_radii = 2 -> phase_name_inner_circular_mask_radii_2\n    inner_circular_mask_radii = 2 -> phase_name_inner_circular_mask_radii_2", "id": "f5966:m3"}
{"signature": "def align_bulge_disk_phi_tag_from_align_bulge_disk_phi(align_bulge_disk_phi):", "body": "if not align_bulge_disk_phi:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif align_bulge_disk_phi:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>", "docstring": "Generate a tag for if the bulge and disk of a bulge-disk system are aligned or not, to customize phase names \\\n    based on the bulge-disk model. This changes the phase name 'phase_name' as follows:\n\n    bd_align_phi = False -> phase_name\n    bd_align_phi = True -> phase_name_bd_align_phi", "id": "f5966:m11"}
{"signature": "def fix_lens_light_tag_from_fix_lens_light(fix_lens_light):", "body": "if not fix_lens_light:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif fix_lens_light:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>", "docstring": "Generate a tag for if the lens light of the pipeline and / or phase are fixed to a previous estimate, or varied \\\n     during he analysis, to customize phase names.\n\n    This changes the phase name 'phase_name' as follows:\n\n    fix_lens_light = False -> phase_name\n    fix_lens_light = True -> phase_name_fix_lens_light", "id": "f5966:m8"}
{"signature": "def run(self, positions, pixel_scale, results=None):", "body": "analysis = self.make_analysis(positions=positions, pixel_scale=pixel_scale, results=results)<EOL>result = self.run_analysis(analysis)<EOL>return self.make_result(result, analysis)<EOL>", "docstring": "Run this phase.\n\nParameters\n----------\npixel_scale\npositions\nresults: autofit.tools.pipeline.ResultsCollection\n    An object describing the results of the last phase or None if no phase has been executed\n\nReturns\n-------\nresult: AbstractPhase.Result\n    A result object comprising the best fit model and other hyper.", "id": "f5967:c2:m2"}
{"signature": "def set_defaults(key):", "body": "def decorator(func):<EOL><INDENT>@functools.wraps(func)<EOL>def wrapper(phase, new_value):<EOL><INDENT>new_value = new_value or []<EOL>for item in new_value:<EOL><INDENT>galaxy = new_value[item] if isinstance(item, str) else item<EOL>galaxy.redshift = galaxy.redshift or conf.instance.general.get(\"<STR_LIT>\", key, float)<EOL><DEDENT>return func(phase, new_value)<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "Load a default value for redshift from config and set it as the redshift for source or lens galaxies that have\nfalsey redshifts\n\nParameters\n----------\nkey: str\n\nReturns\n-------\ndecorator\n    A decorator that wraps the setter function to set defaults", "id": "f5967:m2"}
{"signature": "def __init__(self, phase_name, tag_phases=True, phase_folders=None, galaxies=None,<EOL>optimizer_class=non_linear.MultiNest,<EOL>sub_grid_size=<NUM_LIT:2>, bin_up_factor=None, image_psf_shape=None, positions_threshold=None,<EOL>mask_function=None,<EOL>inner_mask_radii=None, cosmology=cosmo.Planck15, auto_link_priors=False):", "body": "super(MultiPlanePhase, self).__init__(phase_name=phase_name,<EOL>tag_phases=tag_phases,<EOL>phase_folders=phase_folders,<EOL>optimizer_class=optimizer_class,<EOL>sub_grid_size=sub_grid_size,<EOL>bin_up_factor=bin_up_factor,<EOL>image_psf_shape=image_psf_shape,<EOL>positions_threshold=positions_threshold,<EOL>mask_function=mask_function,<EOL>inner_mask_radii=inner_mask_radii,<EOL>cosmology=cosmology,<EOL>auto_link_priors=auto_link_priors)<EOL>self.galaxies = galaxies<EOL>", "docstring": "A phase with a simple source/lens model\n\nParameters\n----------\ngalaxies : [g.Galaxy] | [gm.GalaxyModel]\n    A galaxy that acts as a gravitational lens or is being lensed\noptimizer_class: class\n    The class of a non-linear optimizer\nsub_grid_size: int\n    The side length of the subgrid", "id": "f5967:c6:m1"}
{"signature": "def make_analysis(self, galaxy_data, results=None, mask=None):", "body": "mask = setup_phase_mask(data=galaxy_data[<NUM_LIT:0>], mask=mask, mask_function=self.mask_function,<EOL>inner_mask_radii=None)<EOL>self.pass_priors(results)<EOL>if self.use_intensities or self.use_convergence or self.use_potential:<EOL><INDENT>galaxy_data = gd.GalaxyFitData(galaxy_data=galaxy_data[<NUM_LIT:0>], mask=mask, sub_grid_size=self.sub_grid_size,<EOL>use_intensities=self.use_intensities,<EOL>use_convergence=self.use_convergence,<EOL>use_potential=self.use_potential,<EOL>use_deflections_y=self.use_deflections,<EOL>use_deflections_x=self.use_deflections)<EOL>return self.__class__.AnalysisSingle(galaxy_data=galaxy_data,<EOL>cosmology=self.cosmology,<EOL>results=results)<EOL><DEDENT>elif self.use_deflections:<EOL><INDENT>galaxy_data_y = gd.GalaxyFitData(galaxy_data=galaxy_data[<NUM_LIT:0>], mask=mask, sub_grid_size=self.sub_grid_size,<EOL>use_intensities=self.use_intensities,<EOL>use_convergence=self.use_convergence,<EOL>use_potential=self.use_potential,<EOL>use_deflections_y=self.use_deflections, use_deflections_x=False)<EOL>galaxy_data_x = gd.GalaxyFitData(galaxy_data=galaxy_data[<NUM_LIT:1>], mask=mask, sub_grid_size=self.sub_grid_size,<EOL>use_intensities=self.use_intensities,<EOL>use_convergence=self.use_convergence,<EOL>use_potential=self.use_potential,<EOL>use_deflections_y=False, use_deflections_x=self.use_deflections)<EOL>return self.__class__.AnalysisDeflections(galaxy_data_y=galaxy_data_y, galaxy_data_x=galaxy_data_x,<EOL>cosmology=self.cosmology,<EOL>results=results)<EOL><DEDENT>", "docstring": "Create an lens object. Also calls the prior passing and lens_data modifying functions to allow child\nclasses to change the behaviour of the phase.\n\nParameters\n----------\ngalaxy_data\nmask: Mask\n    The default masks passed in by the pipeline\nresults: autofit.tools.pipeline.ResultsCollection\n    The result from the previous phase\n\nReturns\n-------\nlens: Analysis\n    An lens object that the non-linear optimizer calls to determine the fit of a set of values", "id": "f5967:c7:m2"}
{"signature": "def run(self, data, results=None, mask=None, positions=None):", "body": "model_image = results.last.unmasked_model_image<EOL>galaxy_tuples = results.last.constant.name_instance_tuples_for_class(g.Galaxy)<EOL>results_copy = copy.copy(results.last)<EOL>for name, galaxy in galaxy_tuples:<EOL><INDENT>optimizer = self.optimizer.copy_with_name_extension(name)<EOL>optimizer.variable.hyper_galaxy = g.HyperGalaxy<EOL>galaxy_image = results.last.unmasked_image_for_galaxy(galaxy)<EOL>optimizer.fit(self.__class__.Analysis(data, model_image, galaxy_image))<EOL>getattr(results_copy.variable, name).hyper_galaxy = optimizer.variable.hyper_galaxy<EOL>getattr(results_copy.constant, name).hyper_galaxy = optimizer.constant.hyper_galaxy<EOL><DEDENT>return results_copy<EOL>", "docstring": "Run a fit for each galaxy from the previous phase.\n\nParameters\n----------\ndata: LensData\nresults: ResultsCollection\n    Results from all previous phases\nmask: Mask\n    The mask\npositions\n\nReturns\n-------\nresults: HyperGalaxyResults\n    A collection of results, with one item per a galaxy", "id": "f5967:c9:m0"}
{"signature": "def __init__(self, phase_name, tag_phases=True, phase_folders=None, optimizer_class=non_linear.MultiNest,<EOL>sub_grid_size=<NUM_LIT:2>, bin_up_factor=None, image_psf_shape=None,<EOL>inversion_psf_shape=None, positions_threshold=None, mask_function=None, inner_mask_radii=None,<EOL>interp_pixel_scale=None, cosmology=cosmo.Planck15, auto_link_priors=False):", "body": "if tag_phases:<EOL><INDENT>phase_tag = tag.phase_tag_from_phase_settings(sub_grid_size=sub_grid_size,<EOL>bin_up_factor=bin_up_factor,<EOL>image_psf_shape=image_psf_shape,<EOL>inversion_psf_shape=inversion_psf_shape,<EOL>positions_threshold=positions_threshold,<EOL>inner_mask_radii=inner_mask_radii,<EOL>interp_pixel_scale=interp_pixel_scale)<EOL><DEDENT>else:<EOL><INDENT>phase_tag = None<EOL><DEDENT>super(PhaseImaging, self).__init__(phase_name=phase_name, phase_tag=phase_tag, phase_folders=phase_folders,<EOL>tag_phases=tag_phases,<EOL>optimizer_class=optimizer_class, cosmology=cosmology,<EOL>auto_link_priors=auto_link_priors)<EOL>self.sub_grid_size = sub_grid_size<EOL>self.bin_up_factor = bin_up_factor<EOL>self.image_psf_shape = image_psf_shape<EOL>self.inversion_psf_shape = inversion_psf_shape<EOL>self.positions_threshold = positions_threshold<EOL>self.mask_function = mask_function<EOL>self.inner_mask_radii = inner_mask_radii<EOL>self.interp_pixel_scale = interp_pixel_scale<EOL>", "docstring": "A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper\npassed to it.\n\nParameters\n----------\noptimizer_class: class\n    The class of a non_linear optimizer\nsub_grid_size: int\n    The side length of the subgrid", "id": "f5967:c3:m0"}
{"signature": "def modify_image(self, image, results):", "body": "return image<EOL>", "docstring": "Customize an lens_data. e.g. removing lens light.\n\nParameters\n----------\nimage: scaled_array.ScaledSquarePixelArray\n    An lens_data that has been masked\nresults: autofit.tools.pipeline.ResultsCollection\n    The result of the previous lens\n\nReturns\n-------\nlens_data: scaled_array.ScaledSquarePixelArray\n    The modified image (not changed by default)", "id": "f5967:c3:m1"}
{"signature": "@property<EOL><INDENT>def variable(self):<DEDENT>", "body": "return self.optimizer.variable<EOL>", "docstring": "Convenience method\n\nReturns\n-------\nModelMapper\n    A model mapper comprising all the variable (prior) objects in this lens", "id": "f5967:c0:m2"}
{"signature": "def __init__(self, phase_name, phase_tag=None, phase_folders=None, tag_phases=True,<EOL>optimizer_class=non_linear.MultiNest,<EOL>cosmology=cosmo.Planck15, auto_link_priors=False):", "body": "super().__init__(phase_name=phase_name, phase_tag=phase_tag, phase_folders=phase_folders, tag_phases=tag_phases,<EOL>optimizer_class=optimizer_class, auto_link_priors=auto_link_priors)<EOL>self.cosmology = cosmology<EOL>", "docstring": "A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper\npassed to it.\n\nParameters\n----------\noptimizer_class: class\n    The class of a non_linear optimizer\nphase_name: str\n    The name of this phase", "id": "f5967:c0:m0"}
{"signature": "def __init__(self, phase_name, phase_tagging=True, phase_folders=None, galaxies=None, use_intensities=False,<EOL>use_convergence=False,<EOL>use_potential=False,<EOL>use_deflections=False, optimizer_class=non_linear.MultiNest, sub_grid_size=<NUM_LIT:2>,<EOL>mask_function=None, cosmology=cosmo.Planck15):", "body": "super(GalaxyFitPhase, self).__init__(phase_name=phase_name, phase_tagging=phase_tagging,<EOL>phase_folders=phase_folders,<EOL>optimizer_class=optimizer_class, cosmology=cosmology)<EOL>self.use_intensities = use_intensities<EOL>self.use_convergence = use_convergence<EOL>self.use_potential = use_potential<EOL>self.use_deflections = use_deflections<EOL>self.galaxies = galaxies<EOL>self.sub_grid_size = sub_grid_size<EOL>self.mask_function = mask_function<EOL>", "docstring": "A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper\npassed to it.\n\nParameters\n----------\noptimizer_class: class\n    The class of a non_linear optimizer\nsub_grid_size: int\n    The side length of the subgrid", "id": "f5967:c7:m0"}
{"signature": "def make_analysis(self, data, results=None, mask=None, positions=None):", "body": "mask = setup_phase_mask(data=data, mask=mask, mask_function=self.mask_function,<EOL>inner_mask_radii=self.inner_mask_radii)<EOL>if self.positions_threshold is not None and positions is not None:<EOL><INDENT>positions = list(map(lambda position_set: np.asarray(position_set), positions))<EOL><DEDENT>elif self.positions_threshold is None:<EOL><INDENT>positions = None<EOL><DEDENT>elif self.positions_threshold is not None and positions is None:<EOL><INDENT>raise exc.PhaseException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>lens_data = li.LensData(ccd_data=data, mask=mask, sub_grid_size=self.sub_grid_size,<EOL>image_psf_shape=self.image_psf_shape, positions=positions,<EOL>interp_pixel_scale=self.interp_pixel_scale)<EOL>modified_image = self.modify_image(image=lens_data.image, results=results)<EOL>lens_data = lens_data.new_lens_data_with_modified_image(modified_image=modified_image)<EOL>if self.bin_up_factor is not None:<EOL><INDENT>lens_data = lens_data.new_lens_data_with_binned_up_ccd_data_and_mask(bin_up_factor=self.bin_up_factor)<EOL><DEDENT>self.pass_priors(results)<EOL>self.output_phase_info()<EOL>analysis = self.__class__.Analysis(lens_data=lens_data, cosmology=self.cosmology,<EOL>positions_threshold=self.positions_threshold, results=results)<EOL>return analysis<EOL>", "docstring": "Create an lens object. Also calls the prior passing and lens_data modifying functions to allow child\nclasses to change the behaviour of the phase.\n\nParameters\n----------\npositions\nmask: Mask\n    The default masks passed in by the pipeline\ndata: im.CCD\n    An lens_data that has been masked\nresults: autofit.tools.pipeline.ResultsCollection\n    The result from the previous phase\n\nReturns\n-------\nlens : Analysis\n    An lens object that the non-linear optimizer calls to determine the fit of a set of values", "id": "f5967:c3:m3"}
{"signature": "def run(self, galaxy_data, results=None, mask=None):", "body": "analysis = self.make_analysis(galaxy_data=galaxy_data, results=results, mask=mask)<EOL>result = self.run_analysis(analysis)<EOL>return self.make_result(result, analysis)<EOL>", "docstring": "Run this phase.\n\nParameters\n----------\ngalaxy_data\nmask: Mask\n    The default masks passed in by the pipeline\nresults: autofit.tools.pipeline.ResultsCollection\n    An object describing the results of the last phase or None if no phase has been executed\n\nReturns\n-------\nresult: AbstractPhase.Result\n    A result object comprising the best fit model and other hyper.", "id": "f5967:c7:m1"}
{"signature": "def __init__(self, phase_name, tag_phases=None, phase_folders=None, lens_galaxies=None, source_galaxies=None,<EOL>sensitive_galaxies=None,<EOL>optimizer_class=non_linear.MultiNest, sub_grid_size=<NUM_LIT:2>, bin_up_factor=None, mask_function=None,<EOL>cosmology=cosmo.Planck15):", "body": "super(SensitivityPhase, self).__init__(phase_name=phase_name, tag_phases=tag_phases,<EOL>phase_folders=phase_folders,<EOL>optimizer_class=optimizer_class, sub_grid_size=sub_grid_size,<EOL>bin_up_factor=bin_up_factor, mask_function=mask_function,<EOL>cosmology=cosmology)<EOL>self.lens_galaxies = lens_galaxies or []<EOL>self.source_galaxies = source_galaxies or []<EOL>self.sensitive_galaxies = sensitive_galaxies or []<EOL>", "docstring": "A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper\npassed to it.\n\nParameters\n----------\noptimizer_class: class\n    The class of a non_linear optimizer\nsub_grid_size: int\n    The side length of the subgrid", "id": "f5967:c8:m0"}
{"signature": "def __init__(self, ccd_data, mask, hyper_model_image, hyper_galaxy_images, hyper_minimum_values, sub_grid_size=<NUM_LIT:2>,<EOL>image_psf_shape=None, inversion_psf_shape=None, positions=None, interp_pixel_scale=None):", "body": "super().__init__(ccd_data=ccd_data, mask=mask, sub_grid_size=sub_grid_size, image_psf_shape=image_psf_shape,<EOL>inversion_psf_shape=inversion_psf_shape, positions=positions,<EOL>interp_pixel_scale=interp_pixel_scale)<EOL>self.hyper_model_image = hyper_model_image<EOL>self.hyper_galaxy_images = hyper_galaxy_images<EOL>self.hyper_minimum_values = hyper_minimum_values<EOL>self.hyper_model_image_1d = mask.map_2d_array_to_masked_1d_array(array_2d=hyper_model_image)<EOL>self.hyper_galaxy_images_1d = list(map(lambda hyper_galaxy_image :<EOL>mask.map_2d_array_to_masked_1d_array(hyper_galaxy_image),<EOL>hyper_galaxy_images))<EOL>", "docstring": "The lens data is the collection of data (image, noise-map, PSF), a mask, grid_stack, convolver \\\nand other utilities that are used for modeling and fitting an image of a strong lens.\n\nWhilst the image, noise-map, etc. are loaded in 2D, the lens data creates reduced 1D arrays of each \\\nfor lensing calculations.\n\nLens hyper-data includes the hyper-images necessary for changing different aspects of the data that is fitted \\\nin a Bayesian framework, for example the background-sky subtraction and noise-map.\n\nParameters\n----------\nccd_data: im.CCD\n    The ccd data all in 2D (the image, noise-map, PSF, etc.)\nmask: msk.Mask\n    The 2D mask that is applied to the image.\nsub_grid_size : int\n    The size of the sub-grid used for each lens SubGrid. E.g. a value of 2 grid_stack each image-pixel on a 2x2 \\\n    sub-grid.\nimage_psf_shape : (int, int)\n    The shape of the PSF used for convolving model image generated using analytic light profiles. A smaller \\\n    shape will trim the PSF relative to the input image PSF, giving a faster analysis run-time.\ninversion_psf_shape : (int, int)\n    The shape of the PSF used for convolving the inversion mapping matrix. A smaller \\\n    shape will trim the PSF relative to the input image PSF, giving a faster analysis run-time.\npositions : [[]]\n    Lists of image-pixel coordinates (arc-seconds) that mappers close to one another in the source-plane(s), used \\\n    to speed up the non-linear sampling.\ninterp_pixel_scale : float\n    If *True*, expensive to compute mass profile deflection angles will be computed on a sparse grid and \\\n    interpolated to the regular, sub and blurring grids.", "id": "f5970:c1:m0"}
{"signature": "def masses_of_galaxies_within_circles_in_units(self, radius : dim.Length, unit_mass='<STR_LIT>',<EOL>critical_surface_density=None):", "body": "return list(map(lambda galaxy: galaxy.mass_within_circle_in_units(<EOL>radius=radius, unit_mass=unit_mass, kpc_per_arcsec=self.kpc_per_arcsec,<EOL>critical_surface_density=critical_surface_density),<EOL>self.galaxies))<EOL>", "docstring": "Compute the total mass of all galaxies in this plane within a circle of specified radius.\n\n        See *galaxy.angular_mass_within_circle* and *mass_profiles.angular_mass_within_circle* for details\n        of how this is performed.\n\n        Parameters\n        ----------\n        radius : float\n            The radius of the circle to compute the dimensionless mass within.\n        units_mass : str\n            The units the mass is returned in (angular | solMass).\n        critical_surface_density : float\n            The critical surface mass density of the strong lens configuration, which converts mass from angulalr \\\n            units to physical units (e.g. solar masses).", "id": "f5971:c0:m16"}
{"signature": "@property<EOL><INDENT>def yticks(self):<DEDENT>", "body": "return np.linspace(np.amin(self.grid_stack.regular[:, <NUM_LIT:0>]), np.amax(self.grid_stack.regular[:, <NUM_LIT:0>]), <NUM_LIT:4>)<EOL>", "docstring": "Compute the yticks labels of this grid_stack, used for plotting the y-axis ticks when visualizing an image \\", "id": "f5971:c1:m16"}
{"signature": "def __init__(self, redshift, galaxies, positions, compute_deflections=True, cosmology=None):", "body": "self.redshift = redshift<EOL>self.galaxies = galaxies<EOL>self.positions = positions<EOL>if compute_deflections:<EOL><INDENT>def calculate_deflections(pos):<EOL><INDENT>return sum(map(lambda galaxy: galaxy.deflections_from_grid(pos), galaxies))<EOL><DEDENT>self.deflections = list(map(lambda pos: calculate_deflections(pos), self.positions))<EOL><DEDENT>self.cosmology = cosmology<EOL>", "docstring": "A plane represents a set of galaxies at a given redshift in a ray-tracer_normal and the positions of image-plane \\\n        coordinates which mappers close to one another in the source-plane.\n\n        Parameters\n        -----------\n        galaxies : [Galaxy]\n            The list of lens galaxies in this plane.\n        positions : [[[]]]\n            The (y,x) arc-second coordinates of image-plane pixels which (are expected to) mappers to the same\n            location(s) in the final source-plane.\n        compute_deflections : bool\n            If true, the deflection-angles of this plane's coordinates are calculated use its galaxy's mass-profiles.", "id": "f5971:c4:m0"}
{"signature": "def trace_to_next_plane(self):", "body": "return list(map(lambda positions, deflections: np.subtract(positions, deflections),<EOL>self.positions, self.deflections))<EOL>", "docstring": "Trace the positions to the next plane.", "id": "f5971:c4:m1"}
{"signature": "@property<EOL><INDENT>def xticks(self):<DEDENT>", "body": "return np.linspace(np.amin(self.grid_stack.regular[:, <NUM_LIT:1>]), np.amax(self.grid_stack.regular[:, <NUM_LIT:1>]), <NUM_LIT:4>)<EOL>", "docstring": "Compute the xticks labels of this grid_stack, used for plotting the x-axis ticks when visualizing an \\\n        image", "id": "f5971:c1:m17"}
{"signature": "def masses_of_galaxies_within_ellipses_in_units(self, major_axis : dim.Length, unit_mass='<STR_LIT>',<EOL>critical_surface_density=None):", "body": "return list(map(lambda galaxy: galaxy.mass_within_ellipse_in_units(<EOL>major_axis=major_axis, unit_mass=unit_mass, kpc_per_arcsec=self.kpc_per_arcsec,<EOL>critical_surface_density=critical_surface_density),<EOL>self.galaxies))<EOL>", "docstring": "Compute the total mass of all galaxies in this plane within a ellipse of specified major-axis.\n\n        See *galaxy.angular_mass_within_ellipse* and *mass_profiles.angular_mass_within_ellipse* for details \\\n        of how this is performed.\n\n        Parameters\n        ----------\n        major_axis : float\n            The major-axis radius of the ellipse.\n        units_luminosity : str\n            The units the luminosity is returned in (eps | counts).\n        exposure_time : float\n            The exposure time of the observation, which converts luminosity from electrons per second units to counts.", "id": "f5971:c0:m17"}
{"signature": "def __init__(self, plane_redshifts, cosmology):", "body": "self.plane_redshifts = plane_redshifts<EOL>self.cosmology = cosmology<EOL>", "docstring": "Abstract Ray tracer for lens systems with any number of planes.\n\n        From the galaxies of the tracer's planes, their grid-stack(s) and the cosmology physically derived quantities \\\n        (e.g. surface density, angular diameter distances, critical surface densities) can be computed.\n\n        Parameters\n        ----------\n        plane_redshifts : [pl.Plane] or [pl.PlaneStack]\n            The list of the tracer's planes in ascending redshift order.\n        cosmology : astropy.cosmology\n            The cosmology of the ray-tracing calculation.", "id": "f5972:c0:m0"}
{"signature": "def check_tracer_for_mass_profile(func):", "body": "@wraps(func)<EOL>def wrapper(self):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if self.has_mass_profile is True:<EOL><INDENT>return func(self)<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "If none of the tracer's galaxies have a mass profile, it surface density, potential and deflections cannot \\\n    be computed. This wrapper makes these properties return *None*.\n\n    Parameters\n    ----------\n    func : (self) -> Object\n        A property function that requires galaxies to have a mass profile.", "id": "f5972:m1"}
{"signature": "def __init__(self, galaxies, image_plane_grid_stack, border=None, cosmology=cosmo.Planck15):", "body": "plane_redshifts = lens_util.ordered_plane_redshifts_from_galaxies(galaxies=galaxies)<EOL>galaxies_in_planes =lens_util.galaxies_in_redshift_ordered_planes_from_galaxies(galaxies=galaxies,<EOL>plane_redshifts=plane_redshifts)<EOL>image_plane_grid_stack = pix.setup_image_plane_pixelization_grid_from_galaxies_and_grid_stack(<EOL>galaxies=galaxies, grid_stack=image_plane_grid_stack)<EOL>planes = []<EOL>for plane_index in range(<NUM_LIT:0>, len(plane_redshifts)):<EOL><INDENT>compute_deflections = lens_util.compute_deflections_at_next_plane(plane_index=plane_index,<EOL>total_planes=len(plane_redshifts))<EOL>new_grid_stack = image_plane_grid_stack<EOL>if plane_index > <NUM_LIT:0>:<EOL><INDENT>for previous_plane_index in range(plane_index):<EOL><INDENT>scaling_factor = cosmology_util.scaling_factor_between_redshifts_from_redshifts_and_cosmology(<EOL>redshift_0=plane_redshifts[previous_plane_index], redshift_1=plane_redshifts[plane_index],<EOL>redshift_final=plane_redshifts[-<NUM_LIT:1>], cosmology=cosmology)<EOL>scaled_deflection_stack = lens_util.scaled_deflection_stack_from_plane_and_scaling_factor(<EOL>plane=planes[previous_plane_index], scaling_factor=scaling_factor)<EOL>new_grid_stack =lens_util.grid_stack_from_deflection_stack(grid_stack=new_grid_stack,<EOL>deflection_stack=scaled_deflection_stack)<EOL><DEDENT><DEDENT>planes.append(pl.Plane(galaxies=galaxies_in_planes[plane_index], grid_stack=new_grid_stack,<EOL>border=border, compute_deflections=compute_deflections, cosmology=cosmology))<EOL><DEDENT>super(TracerMultiPlanes, self).__init__(planes=planes, cosmology=cosmology)<EOL>", "docstring": "Ray-tracer for a lens system with any number of planes.\n\n        The redshift of these planes are specified by the redshits of the galaxies; there is a unique plane redshift \\\n        for every unique galaxy redshift (galaxies with identical redshifts are put in the same plane).\n\n        To perform multi-plane ray-tracing, a cosmology must be supplied so that deflection-angles can be rescaled \\\n        according to the lens-geometry of the multi-plane system. All galaxies input to the tracer must therefore \\\n        have redshifts.\n\n        This tracer has only one grid-stack (see grid_stack.GridStack) which is used for ray-tracing.\n\n        Parameters\n        ----------\n        galaxies : [Galaxy]\n            The list of galaxies in the ray-tracing calculation.\n        image_plane_grid_stack : grid_stacks.GridStack\n            The image-plane grid stack which is traced. (includes the regular-grid, sub-grid, blurring-grid, etc.).\n        border : masks.RegularGridBorder\n            The border of the regular-grid, which is used to relocate demagnified traced pixels to the \\\n            source-plane borders.\n        cosmology : astropy.cosmology\n            The cosmology of the ray-tracing calculation.", "id": "f5972:c4:m0"}
{"signature": "def __init__(self, lens_galaxies, image_plane_positions, cosmology=cosmo.Planck15):", "body": "image_plane = pl.PlanePositions(redshift=lens_galaxies[<NUM_LIT:0>].redshift, galaxies=lens_galaxies,<EOL>positions=image_plane_positions, compute_deflections=True, cosmology=cosmology)<EOL>source_plane_positions = image_plane.trace_to_next_plane()<EOL>source_plane = pl.PlanePositions(redshift=None, galaxies=None, positions=source_plane_positions,<EOL>compute_deflections=False, cosmology=cosmology)<EOL>super(TracerImageSourcePlanesPositions, self).__init__(planes=[image_plane, source_plane], cosmology=cosmology)<EOL>", "docstring": "Positional ray-tracer for a lens system with two planes, an image-plane and source-plane (source-plane \\\n        galaxies are not input for the positional ray-tracer, as it is only the proximity that image_plane_positions \\\n        trace to within one another that needs to be computed).\n\n        By default, this has no associated cosmology, thus all calculations are performed in arc seconds and galaxies \\\n        do not need input redshifts. If a cosmology is supplied, the plane's angular diameter distances, \\\n        conversion factors, etc. are used to provide quantities in kpc.\n\n        Parameters\n        ----------\n        lens_galaxies : [Galaxy]\n            The list of lens galaxies in the image-plane.\n        image_plane_positions : [[[]]]\n            The (y,x) arc-second coordinates of image-plane pixels which (are expected to) mappers to the same \\\n            location(s) in the source-plane.\n        cosmology : astropy.cosmology.Planck15\n            The cosmology of the ray-tracing calculation.", "id": "f5972:c6:m0"}
{"signature": "def __init__(self, lens_galaxies, image_plane_grid_stack, border=None, cosmology=cosmo.Planck15,<EOL>units_distance='<STR_LIT>', units_luminosity='<STR_LIT>', units_mass='<STR_LIT>'):", "body": "if not lens_galaxies:<EOL><INDENT>raise exc.RayTracingException('<STR_LIT>')<EOL><DEDENT>image_plane = pl.Plane(galaxies=lens_galaxies, grid_stack=image_plane_grid_stack, border=border,<EOL>compute_deflections=True, cosmology=cosmology)<EOL>super(TracerImagePlane, self).__init__(planes=[image_plane], cosmology=cosmology)<EOL>", "docstring": "Ray tracer for a lens system with just an image-plane. \n\n        As there is only 1 plane, there are no ray-tracing calculations. This class is therefore only used for fitting \\ \n        image-plane galaxies with light profiles.\n\n        This tracer has only one grid-stack (see grid_stack.GridStack) which is used for ray-tracing.\n\n        Parameters\n        ----------\n        lens_galaxies : [Galaxy]\n            The list of lens galaxies in the image-plane.\n        image_plane_grid_stack : grid_stacks.GridStack\n            The image-plane grid stack which is traced. (includes the regular-grid, sub-grid, blurring-grid, etc.).\n        border : masks.RegularGridBorder\n            The border of the regular-grid, which is used to relocate demagnified traced pixels to the \\\n            source-plane borders.\n        cosmology : astropy.cosmology\n            The cosmology of the ray-tracing calculation.", "id": "f5972:c2:m0"}
{"signature": "def __init__(self, galaxies, image_plane_positions, cosmology=cosmo.Planck15):", "body": "plane_redshifts = lens_util.ordered_plane_redshifts_from_galaxies(galaxies=galaxies)<EOL>galaxies_in_redshift_ordered_lists =lens_util.galaxies_in_redshift_ordered_planes_from_galaxies(galaxies=galaxies,<EOL>plane_redshifts=plane_redshifts)<EOL>if not galaxies:<EOL><INDENT>raise exc.RayTracingException('<STR_LIT>')<EOL><DEDENT>planes = []<EOL>for plane_index in range(<NUM_LIT:0>, len(plane_redshifts)):<EOL><INDENT>if plane_index < len(plane_redshifts) - <NUM_LIT:1>:<EOL><INDENT>compute_deflections = True<EOL><DEDENT>elif plane_index == len(plane_redshifts) - <NUM_LIT:1>:<EOL><INDENT>compute_deflections = False<EOL><DEDENT>else:<EOL><INDENT>raise exc.RayTracingException('<STR_LIT>')<EOL><DEDENT>new_positions = image_plane_positions<EOL>if plane_index > <NUM_LIT:0>:<EOL><INDENT>for previous_plane_index in range(plane_index):<EOL><INDENT>scaling_factor = cosmology_util.scaling_factor_between_redshifts_from_redshifts_and_cosmology(<EOL>redshift_0=plane_redshifts[previous_plane_index], redshift_1=plane_redshifts[plane_index],<EOL>redshift_final=plane_redshifts[-<NUM_LIT:1>], cosmology=cosmology)<EOL>scaled_deflections = list(map(lambda deflections:<EOL>np.multiply(scaling_factor, deflections),<EOL>planes[previous_plane_index].deflections))<EOL>new_positions = list(map(lambda positions, deflections:<EOL>np.subtract(positions, deflections), new_positions, scaled_deflections))<EOL><DEDENT><DEDENT>planes.append(pl.PlanePositions(redshift=plane_redshifts[plane_index],<EOL>galaxies=galaxies_in_redshift_ordered_lists[plane_index],<EOL>positions=new_positions, compute_deflections=compute_deflections))<EOL><DEDENT>super(TracerMultiPlanesPositions, self).__init__(planes=planes, cosmology=cosmology)<EOL>", "docstring": "Positional ray-tracer for a lens system with any number of planes.\n\n        To perform multi-plane ray-tracing, a cosmology must be supplied so that deflection-angles can be rescaled \\\n        according to the lens-geometry of the multi-plane system.\n\n        Parameters\n        ----------\n        galaxies : [Galaxy]\n            The list of galaxies in the ray-tracing calculation.\n        image_plane_positions : [[[]]]\n            The (y,x) arc-second coordinates of image-plane pixels which (are expected to) mappers to the same \\\n            location(s) in the final source-plane.\n        cosmology : astropy.cosmology\n            The cosmology of the ray-tracing calculation.", "id": "f5972:c7:m0"}
{"signature": "def ordered_plane_redshifts_from_galaxies(galaxies):", "body": "ordered_galaxies = sorted(galaxies, key=lambda galaxy: galaxy.redshift, reverse=False)<EOL>galaxy_redshifts = list(map(lambda galaxy: galaxy.redshift, ordered_galaxies))<EOL>return [redshift for i, redshift in enumerate(galaxy_redshifts) if redshift not in galaxy_redshifts[:i]]<EOL>", "docstring": "Given a list of galaxies (with redshifts), return a list of the redshifts in ascending order.\n\n    If two or more galaxies have the same redshift that redshift is not double counted.\n\n    Parameters\n    -----------\n    galaxies : [Galaxy]\n        The list of galaxies in the ray-tracing calculation.", "id": "f5973:m1"}
{"signature": "def galaxies_in_redshift_ordered_planes_from_galaxies(galaxies, plane_redshifts):", "body": "galaxies_in_redshift_ordered_planes =  [[] for i in range(len(plane_redshifts))]<EOL>for galaxy in galaxies:<EOL><INDENT>index = (np.abs(np.asarray(plane_redshifts) - galaxy.redshift)).argmin()<EOL>galaxies_in_redshift_ordered_planes[index].append(galaxy)<EOL><DEDENT>return galaxies_in_redshift_ordered_planes<EOL>", "docstring": "Given a list of galaxies (with redshifts), return a list of the galaxies where each entry contains a list \\\n    of galaxies at the same redshift in ascending redshift order.\n\n    Parameters\n    -----------\n    galaxies : [Galaxy]\n        The list of galaxies in the ray-tracing calculation.", "id": "f5973:m3"}
{"signature": "def plot_residual_map(<EOL>fit, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "array_plotters.plot_array(<EOL>array=fit.residual_map, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the residual-map of a lens fit.\n\n    Set *autolens.datas.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    fit : datas.fitting.fitting.AbstractFitter\n        The fit to the datas, which includes a list of every model image, residual_map, chi-squareds, etc.\n    image_index : int\n        The index of the datas in the datas-set of which the residual_map are plotted.", "id": "f5975:m6"}
{"signature": "def plot_contribution_maps(<EOL>fit, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "if len(fit.contribution_maps) > <NUM_LIT:1>:<EOL><INDENT>contribution_map = sum(fit.contribution_maps)<EOL><DEDENT>else:<EOL><INDENT>contribution_map = fit.contribution_maps[<NUM_LIT:0>]<EOL><DEDENT>array_plotters.plot_array(<EOL>array=contribution_map, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the summed contribution maps of a hyper-fit.\n\n    Set *autolens.datas.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    fit : datas.fitting.fitting.AbstractLensHyperFit\n        The hyper-fit to the datas, which includes a list of every model image, residual_map, chi-squareds, etc.\n    image_index : int\n        The index of the datas in the datas-set of which the contribution_maps are plotted.", "id": "f5975:m8"}
{"signature": "def plot_lens_subtracted_image(<EOL>fit, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "if fit.tracer.total_planes == <NUM_LIT:2>:<EOL><INDENT>if fit.tracer.image_plane.has_light_profile:<EOL><INDENT>lens_subtracted_image = fit.image - fit.model_image_of_planes[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>lens_subtracted_image = fit.image<EOL><DEDENT><DEDENT>else:<EOL><INDENT>lens_subtracted_image = fit.image - sum(fit.model_image_of_planes[<NUM_LIT:0>:-<NUM_LIT:2>])<EOL><DEDENT>array_plotters.plot_array(<EOL>array=lens_subtracted_image, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the model image of a specific plane of a lens fit.\n\n    Set *autolens.datas.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    fit : datas.fitting.fitting.AbstractFitter\n        The fit to the datas, which includes a list of every model image, residual_map, chi-squareds, etc.\n    image_index : int\n        The index of the datas in the datas-set of which the model image is plotted.\n    plane_indexes : int\n        The plane from which the model image is generated.", "id": "f5975:m4"}
{"signature": "def plot_image(<EOL>fit, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None,<EOL>image_plane_pix_grid=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>grid_pointsize=<NUM_LIT:1>, mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "array_plotters.plot_array(<EOL>array=fit.image, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, grid=image_plane_pix_grid,<EOL>positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>grid_pointsize=grid_pointsize, mask_pointsize=mask_pointsize, position_pointsize=position_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the image of a lens fit.\n\n    Set *autolens.datas.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    image : datas.ccd.datas.CCD\n        The datas-datas, which includes the observed datas, noise_map-map, PSF, signal-to-noise_map-map, etc.\n    plot_origin : True\n        If true, the origin of the datas's coordinate system is plotted as a 'x'.", "id": "f5975:m0"}
{"signature": "def plot_signal_to_noise_map(<EOL>fit, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "array_plotters.plot_array(<EOL>array=fit.signal_to_noise_map, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max,<EOL>linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the noise-map of a lens fit.\n\n    Set *autolens.datas.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    image : datas.ccd.datas.CCD\n    The datas-datas, which includes the observed datas, signal_to_noise_map-map, PSF, signal-to-signal_to_noise_map-map, etc.\n    plot_origin : True\n    If true, the origin of the datas's coordinate system is plotted as a 'x'.", "id": "f5975:m2"}
{"signature": "def plot_model_data(<EOL>fit, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None,<EOL>plot_mass_profile_centres=True, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "centres = get_mass_profile_centes(plot_mass_profile_centres=plot_mass_profile_centres, fit=fit)<EOL>array_plotters.plot_array(<EOL>array=fit.model_data, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, positions=positions, centres=centres, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the model image of a fit.\n\n    Set *autolens.datas.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    fit : datas.fitting.fitting.AbstractFitter\n        The fit to the datas, which includes a list of every model image, residual_map, chi-squareds, etc.\n    image_index : int\n        The index of the datas in the datas-set of which the model image is plotted.", "id": "f5975:m3"}
{"signature": "def plot_noise_map(<EOL>fit, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "array_plotters.plot_array(<EOL>array=fit.noise_map, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask,<EOL>positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the noise-map of a lens fit.\n\n    Set *autolens.datas.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    image : datas.ccd.datas.CCD\n        The datas-datas, which includes the observed datas, noise_map-map, PSF, signal-to-noise_map-map, etc.\n    plot_origin : True\n        If true, the origin of the datas's coordinate system is plotted as a 'x'.", "id": "f5975:m1"}
{"signature": "def plot_fit_individuals_lens_and_source_planes(<EOL>fit, should_plot_mask=True, extract_array_from_mask=False, zoom_around_mask=False, positions=None,<EOL>should_plot_image_plane_pix=False,<EOL>should_plot_image=False,<EOL>should_plot_noise_map=False,<EOL>should_plot_signal_to_noise_map=False,<EOL>should_plot_lens_subtracted_image=False,<EOL>should_plot_model_image=False,<EOL>should_plot_lens_model_image=False,<EOL>should_plot_source_model_image=False,<EOL>should_plot_source_plane_image=False,<EOL>should_plot_residual_map=False,<EOL>should_plot_chi_squared_map=False,<EOL>units='<STR_LIT>',<EOL>output_path=None, output_format='<STR_LIT>'):", "body": "mask = lens_plotter_util.get_mask(fit=fit, should_plot_mask=should_plot_mask)<EOL>kpc_per_arcsec = fit.tracer.image_plane.kpc_per_arcsec<EOL>if should_plot_image:<EOL><INDENT>image_plane_pix_grid = lens_plotter_util.get_image_plane_pix_grid(should_plot_image_plane_pix, fit)<EOL>lens_plotter_util.plot_image(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>image_plane_pix_grid=image_plane_pix_grid,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>output_path=output_path, output_format=output_format)<EOL><DEDENT>if should_plot_noise_map:<EOL><INDENT>lens_plotter_util.plot_noise_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>output_path=output_path, output_format=output_format)<EOL><DEDENT>if should_plot_signal_to_noise_map:<EOL><INDENT>lens_plotter_util.plot_signal_to_noise_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>output_path=output_path, output_format=output_format)<EOL><DEDENT>if should_plot_lens_subtracted_image:<EOL><INDENT>lens_plotter_util.plot_lens_subtracted_image(<EOL>fit=fit, mask=mask, positions=positions, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>output_path=output_path, output_format=output_format)<EOL><DEDENT>if should_plot_model_image:<EOL><INDENT>lens_plotter_util.plot_model_data(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>output_path=output_path, output_format=output_format)<EOL><DEDENT>if should_plot_lens_model_image:<EOL><INDENT>lens_plotter_util.plot_model_image_of_planes(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>plot_foreground=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL><DEDENT>if should_plot_source_model_image:<EOL><INDENT>lens_plotter_util.plot_model_image_of_planes(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>plot_source=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL><DEDENT>if should_plot_source_plane_image:<EOL><INDENT>if fit.total_inversions == <NUM_LIT:0>:<EOL><INDENT>plane_plotters.plot_plane_image(<EOL>plane=fit.tracer.source_plane, plot_grid=True,<EOL>units=units, figsize=(<NUM_LIT:20>, <NUM_LIT:20>),<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL><DEDENT>elif fit.total_inversions == <NUM_LIT:1>:<EOL><INDENT>inversion_plotters.plot_reconstructed_pixelization(<EOL>inversion=fit.inversion, should_plot_grid=True,<EOL>units=units, figsize=(<NUM_LIT:20>, <NUM_LIT:20>),<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL><DEDENT><DEDENT>if should_plot_residual_map:<EOL><INDENT>lens_plotter_util.plot_residual_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>output_path=output_path, output_format=output_format)<EOL><DEDENT>if should_plot_chi_squared_map:<EOL><INDENT>lens_plotter_util.plot_chi_squared_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>output_path=output_path, output_format=output_format)<EOL><DEDENT>", "docstring": "Plot the model datas_ of an analysis, using the *Fitter* class object.\n\n    The visualization and output type can be fully customized.\n\n    Parameters\n    -----------\n    fit : autolens.lens.fitting.Fitter\n        Class containing fit between the model datas_ and observed lens datas_ (including residual_map, chi_squared_map etc.)\n    output_path : str\n        The path where the datas_ is output if the output_type is a file format (e.g. png, fits)\n    output_format : str\n        How the datas_ is output. File formats (e.g. png, fits) output the datas_ to harddisk. 'show' displays the datas_ \\\n        in the python interpreter window.", "id": "f5976:m5"}
{"signature": "def plot_fit_subplot_lens_and_source_planes(<EOL>fit, should_plot_mask=True, extract_array_from_mask=False, zoom_around_mask=False,<EOL>should_plot_source_grid=False, positions=None, should_plot_image_plane_pix=False, plot_mass_profile_centres=True,<EOL>units='<STR_LIT>', figsize=None, aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>titlesize=<NUM_LIT:10>, xlabelsize=<NUM_LIT:10>, ylabelsize=<NUM_LIT:10>, xyticksize=<NUM_LIT:10>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:10>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_filename='<STR_LIT>', output_format='<STR_LIT>'):", "body": "rows, columns, figsize_tool = plotter_util.get_subplot_rows_columns_figsize(number_subplots=<NUM_LIT:9>)<EOL>mask = lens_plotter_util.get_mask(fit=fit, should_plot_mask=should_plot_mask)<EOL>if figsize is None:<EOL><INDENT>figsize = figsize_tool<EOL><DEDENT>plt.figure(figsize=figsize)<EOL>kpc_per_arcsec = fit.tracer.image_plane.kpc_per_arcsec<EOL>image_plane_pix_grid = lens_plotter_util.get_image_plane_pix_grid(should_plot_image_plane_pix, fit)<EOL>plt.subplot(rows, columns, <NUM_LIT:1>)<EOL>lens_plotter_util.plot_image(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, positions=positions,<EOL>image_plane_pix_grid=image_plane_pix_grid, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>grid_pointsize=grid_pointsize, position_pointsize=position_pointsize, mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plt.subplot(rows, columns, <NUM_LIT:2>)<EOL>lens_plotter_util.plot_noise_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, positions=None, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>position_pointsize=position_pointsize, mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plt.subplot(rows, columns, <NUM_LIT:3>)<EOL>lens_plotter_util.plot_signal_to_noise_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, positions=None, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>position_pointsize=position_pointsize, mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plt.subplot(rows, columns, <NUM_LIT:4>)<EOL>lens_plotter_util.plot_lens_subtracted_image(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, positions=positions, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title='<STR_LIT>', titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize,<EOL>position_pointsize=position_pointsize, xyticksize=xyticksize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>if fit.tracer.image_plane.has_light_profile:<EOL><INDENT>plt.subplot(rows, columns, <NUM_LIT:5>)<EOL>lens_plotter_util.plot_model_image_of_planes(<EOL>fit=fit, plot_foreground=True, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, plot_mass_profile_centres=plot_mass_profile_centres, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title='<STR_LIT>', titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize,<EOL>xyticksize=xyticksize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL><DEDENT>plt.subplot(rows, columns, <NUM_LIT:6>)<EOL>lens_plotter_util.plot_model_image_of_planes(<EOL>fit=fit, plot_source=True, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, plot_mass_profile_centres=plot_mass_profile_centres, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title='<STR_LIT>', titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize,<EOL>xyticksize=xyticksize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>if fit.total_inversions == <NUM_LIT:0>:<EOL><INDENT>plt.subplot(rows, columns, <NUM_LIT:7>)<EOL>plane_plotters.plot_plane_image(<EOL>plane=fit.tracer.source_plane, positions=None, plot_grid=should_plot_source_grid, as_subplot=True,<EOL>units=units, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,<EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>grid_pointsize=grid_pointsize, position_pointsize=position_pointsize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL><DEDENT>elif fit.total_inversions == <NUM_LIT:1>:<EOL><INDENT>ratio = float((fit.inversion.mapper.geometry.arc_second_maxima[<NUM_LIT:1>] - fit.inversion.mapper.geometry.arc_second_minima[<NUM_LIT:1>]) /(fit.inversion.mapper.geometry.arc_second_maxima[<NUM_LIT:0>] - fit.inversion.mapper.geometry.arc_second_minima[<NUM_LIT:0>]))<EOL>if aspect is '<STR_LIT>':<EOL><INDENT>aspect_inv = ratio<EOL><DEDENT>elif aspect is '<STR_LIT>':<EOL><INDENT>aspect_inv = <NUM_LIT:1.0> / ratio<EOL><DEDENT>elif aspect is '<STR_LIT>':<EOL><INDENT>aspect_inv = <NUM_LIT:1.0><EOL><DEDENT>plt.subplot(rows, columns, <NUM_LIT:7>, aspect=float(aspect_inv))<EOL>inversion_plotters.plot_reconstructed_pixelization(<EOL>inversion=fit.inversion, positions=None, should_plot_grid=False, should_plot_centres=False, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=None,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,<EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>output_path=output_path, output_filename=None, output_format=output_format)<EOL><DEDENT>plt.subplot(rows, columns, <NUM_LIT:8>)<EOL>lens_plotter_util.plot_residual_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,<EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plt.subplot(rows, columns, <NUM_LIT:9>)<EOL>lens_plotter_util.plot_chi_squared_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,<EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plotter_util.output_subplot_array(output_path=output_path, output_filename=output_filename,<EOL>output_format=output_format)<EOL>plt.close()<EOL>", "docstring": "Plot the model datas_ of an analysis, using the *Fitter* class object.\n\n    The visualization and output type can be fully customized.\n\n    Parameters\n    -----------\n    fit : autolens.lens.fitting.Fitter\n        Class containing fit between the model datas_ and observed lens datas_ (including residual_map, chi_squared_map etc.)\n    output_path : str\n        The path where the datas_ is output if the output_type is a file format (e.g. png, fits)\n    output_filename : str\n        The name of the file that is output, if the output_type is a file format (e.g. png, fits)\n    output_format : str\n        How the datas_ is output. File formats (e.g. png, fits) output the datas_ to harddisk. 'show' displays the datas_ \\\n        in the python interpreter window.", "id": "f5976:m2"}
{"signature": "def plot_fit_subplot_lens_plane_only(<EOL>fit, should_plot_mask=True, extract_array_from_mask=False, zoom_around_mask=False, positions=None,<EOL>should_plot_image_plane_pix=False,<EOL>units='<STR_LIT>', figsize=None, aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>titlesize=<NUM_LIT:10>, xlabelsize=<NUM_LIT:10>, ylabelsize=<NUM_LIT:10>, xyticksize=<NUM_LIT:10>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:10>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_filename='<STR_LIT>', output_format='<STR_LIT>'):", "body": "rows, columns, figsize_tool = plotter_util.get_subplot_rows_columns_figsize(number_subplots=<NUM_LIT:6>)<EOL>mask = lens_plotter_util.get_mask(fit=fit, should_plot_mask=should_plot_mask)<EOL>if figsize is None:<EOL><INDENT>figsize = figsize_tool<EOL><DEDENT>plt.figure(figsize=figsize)<EOL>plt.subplot(rows, columns, <NUM_LIT:1>)<EOL>kpc_per_arcsec = fit.tracer.image_plane.kpc_per_arcsec<EOL>image_plane_pix_grid = lens_plotter_util.get_image_plane_pix_grid(should_plot_image_plane_pix, fit)<EOL>lens_plotter_util.plot_image(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>positions=positions, image_plane_pix_grid=image_plane_pix_grid, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>grid_pointsize=grid_pointsize, position_pointsize=position_pointsize, mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plt.subplot(rows, columns, <NUM_LIT:2>)<EOL>lens_plotter_util.plot_noise_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>positions=positions, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>position_pointsize=position_pointsize, mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plt.subplot(rows, columns, <NUM_LIT:3>)<EOL>lens_plotter_util.plot_signal_to_noise_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,<EOL>positions=positions, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>position_pointsize=position_pointsize, mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plt.subplot(rows, columns, <NUM_LIT:4>)<EOL>lens_plotter_util.plot_model_data(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plt.subplot(rows, columns, <NUM_LIT:5>)<EOL>lens_plotter_util.plot_residual_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plt.subplot(rows, columns, <NUM_LIT:6>)<EOL>lens_plotter_util.plot_chi_squared_map(<EOL>fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>output_path=output_path, output_filename='<STR_LIT>', output_format=output_format)<EOL>plotter_util.output_subplot_array(output_path=output_path, output_filename=output_filename,<EOL>output_format=output_format)<EOL>plt.close()<EOL>", "docstring": "Plot the model datas_ of an analysis, using the *Fitter* class object.\n\n    The visualization and output type can be fully customized.\n\n    Parameters\n    -----------\n    fit : autolens.lens.fitting.Fitter\n        Class containing fit between the model datas_ and observed lens datas_ (including residual_map, chi_squared_map etc.)\n    output_path : str\n        The path where the datas_ is output if the output_type is a file format (e.g. png, fits)\n    output_filename : str\n        The name of the file that is output, if the output_type is a file format (e.g. png, fits)\n    output_format : str\n        How the datas_ is output. File formats (e.g. png, fits) output the datas_ to harddisk. 'show' displays the datas_ \\\n        in the python interpreter window.", "id": "f5976:m1"}
{"signature": "def __init__(self, lens_data, tracer_normal, tracer_sensitive):", "body": "AbstractSensitivityFit.__init__(self=self, tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive)<EOL>self.fit_normal = lens_fit.LensProfileFit(lens_data=lens_data, tracer=tracer_normal)<EOL>self.fit_sensitive = lens_fit.LensProfileFit(lens_data=lens_data, tracer=tracer_sensitive)<EOL>", "docstring": "Evaluate the sensitivity of a profile fit to a specific component of a lens model and tracer. This is \\\n        performed by evaluating the likelihood of a fit to an image using two tracers:\n\n        1) A 'normal tracer', which uses the same lens model as a the simulated lens data. This gives a baseline \\\n           value of the likelihood we can expect when we fit the model to itself.\n\n        2) A 'sensitive tracer', which uses the same lens model as the simulated lens data, but also includes the \\\n           additional model components (e.g. a mass clump 'subhalo') which we are testing our sensitivity to.\n\n        The difference in likelihood of these two fits informs us of how sensitive we are to the component in the \\\n        second tracer. For example, if the difference in likelihood is neglible, it means the model component had no \\\n        impact on our fit, meaning we are not sensitive to its properties.\n\n        Parameters\n        ----------\n        lens_data: lens_data.LensData\n            A simulated lens data which is used to determine our sensitiivity to specific model components.\n        tracer_normal : ray_tracing.AbstractTracer\n            A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \\\n            lens data that we are fitting.\n       tracer_sensitive : ray_tracing.AbstractTracerNonStack\n            A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \\\n            lens data that we are fitting, but also addition components (e.g. mass clumps) which we measure \\\n            how sensitive we are too.", "id": "f5980:c1:m0"}
{"signature": "def fit_lens_data_with_sensitivity_tracers(lens_data, tracer_normal, tracer_sensitive):", "body": "if (tracer_normal.has_light_profile and tracer_sensitive.has_light_profile) and(not tracer_normal.has_pixelization and not tracer_sensitive.has_pixelization):<EOL><INDENT>return SensitivityProfileFit(lens_data=lens_data, tracer_normal=tracer_normal,<EOL>tracer_sensitive=tracer_sensitive)<EOL><DEDENT>elif (not tracer_normal.has_light_profile and not tracer_sensitive.has_light_profile) and(tracer_normal.has_pixelization and tracer_sensitive.has_pixelization):<EOL><INDENT>return SensitivityInversionFit(lens_data=lens_data, tracer_normal=tracer_normal,<EOL>tracer_sensitive=tracer_sensitive)<EOL><DEDENT>else:<EOL><INDENT>raise exc.FittingException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Fit lens data with a normal tracer and sensitivity tracer, to determine our sensitivity to a selection of \\ \n    galaxy components. This factory automatically determines the type of fit based on the properties of the galaxies \\\n    in the tracers.\n\n    Parameters\n    -----------\n    lens_data : lens_data.LensData or lens_data.LensDataHyper\n        The lens-images that is fitted.\n    tracer_normal : ray_tracing.AbstractTracer\n        A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \\\n        lens data that we are fitting.\n    tracer_sensitive : ray_tracing.AbstractTracerNonStack\n        A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \\\n        lens data that we are fitting, but also addition components (e.g. mass clumps) which we measure \\\n        how sensitive we are too.", "id": "f5980:m0"}
{"signature": "def __init__(self, positions, noise_map):", "body": "self.positions = positions<EOL>self.noise_map = noise_map<EOL>", "docstring": "A lens position fitter, which takes a set of positions (e.g. from a plane in the tracer) and computes \\\n        their maximum separation, such that points which tracer closer to one another have a higher likelihood.\n\n        Parameters\n        -----------\n        positions : [[]]\n            The (y,x) arc-second coordinates of positions which the maximum distance and likelihood is computed using.\n        noise_map : ndarray | float\n            The noise-value assumed when computing the likelihood.", "id": "f5981:c6:m0"}
{"signature": "def __init__(self, lens_data, tracer, padded_tracer=None):", "body": "blurred_profile_image_1d = util.blurred_image_1d_from_1d_unblurred_and_blurring_images(<EOL>unblurred_image_1d=tracer.image_plane_image_1d, blurring_image_1d=tracer.image_plane_blurring_image_1d,<EOL>convolver=lens_data.convolver_image)<EOL>blurred_profile_image = lens_data.map_to_scaled_array(array_1d=blurred_profile_image_1d)<EOL>profile_subtracted_image_1d = lens_data.image_1d - blurred_profile_image_1d<EOL>inversion = inversions.inversion_from_image_mapper_and_regularization(<EOL>image_1d=profile_subtracted_image_1d, noise_map_1d=lens_data.noise_map_1d,<EOL>convolver=lens_data.convolver_mapping_matrix, mapper=tracer.mappers_of_planes[-<NUM_LIT:1>],<EOL>regularization=tracer.regularizations_of_planes[-<NUM_LIT:1>])<EOL>model_image = blurred_profile_image + inversion.reconstructed_data<EOL>super(LensProfileInversionFit, self).__init__(tracer=tracer,<EOL>padded_tracer=padded_tracer,<EOL>lens_data=lens_data,<EOL>model_image=model_image,<EOL>inversion=inversion)<EOL>self.convolver_image = lens_data.convolver_image<EOL>self.blurred_profile_image = blurred_profile_image<EOL>self.profile_subtracted_image = lens_data.image - self.blurred_profile_image<EOL>", "docstring": "An  lens profile and inversion fitter, which first generates and subtracts the image-plane \\\n        image of all galaxies (with light profiles) in the tracer, blurs it with the PSF and fits the residual image \\\n        with an inversion using the mapper(s) and regularization(s) in the galaxy's of the tracer.\n\n        This inversion use's the lens-image, its PSF and an input noise-map.\n\n        If a padded tracer is supplied, the blurred profile image's can be generated over the entire image and thus \\\n        without the mask.\n\n        Parameters\n        -----------\n        lens_data : lens_data.LensData\n            The lens-image that is fitted.\n        tracer : ray_tracing.Tracer\n            The tracer, which describes the ray-tracing and strong lens configuration.\n        padded_tracer : ray_tracing.TracerNonStack or None\n            A tracer with an identical strong lens configuration to the tracer above, but using the lens data's \\\n            padded grid_stack such that unmasked model-images can be computed.", "id": "f5981:c5:m0"}
{"signature": "def __init__(self, lens_data, tracer, padded_tracer=None):", "body": "inversion = inversions.inversion_from_image_mapper_and_regularization(<EOL>image_1d=lens_data.image_1d, noise_map_1d=lens_data.noise_map_1d,<EOL>convolver=lens_data.convolver_mapping_matrix,<EOL>mapper=tracer.mappers_of_planes[-<NUM_LIT:1>], regularization=tracer.regularizations_of_planes[-<NUM_LIT:1>])<EOL>super().__init__(lens_data=lens_data,<EOL>model_image=inversion.reconstructed_data,<EOL>tracer=tracer,<EOL>inversion=inversion,<EOL>padded_tracer=padded_tracer)<EOL>", "docstring": "An  lens inversion fitter, which fits the lens data an inversion using the mapper(s) and \\\n        regularization(s) in the galaxies of the tracer.\n\n        This inversion use's the lens-image, its PSF and an input noise-map.\n\n        Parameters\n        -----------\n        lens_data : lens_data.LensData\n            The lens-image that is fitted.\n        tracer : ray_tracing.Tracer\n            The tracer, which describes the ray-tracing and strong lens configuration.", "id": "f5981:c4:m0"}
{"signature": "def setup_figure(figsize, as_subplot):", "body": "if not as_subplot:<EOL><INDENT>fig = plt.figure(figsize=figsize)<EOL>return fig<EOL><DEDENT>", "docstring": "Setup a figure for plotting an image.\n\n    Parameters\n    -----------\n    figsize : (int, int)\n        The size of the figure in (rows, columns).\n    as_subplot : bool\n        If the figure is a subplot, the setup_figure function is omitted to ensure that each subplot does not create a \\\n        new figure and so that it can be output using the *output_subplot_array* function.", "id": "f5984:m1"}
{"signature": "def get_subplot_rows_columns_figsize(number_subplots):", "body": "if number_subplots <= <NUM_LIT:2>:<EOL><INDENT>return <NUM_LIT:1>, <NUM_LIT:2>, (<NUM_LIT>, <NUM_LIT:8>)<EOL><DEDENT>elif number_subplots <= <NUM_LIT:4>:<EOL><INDENT>return <NUM_LIT:2>, <NUM_LIT:2>, (<NUM_LIT>, <NUM_LIT:10>)<EOL><DEDENT>elif number_subplots <= <NUM_LIT:6>:<EOL><INDENT>return <NUM_LIT:2>, <NUM_LIT:3>, (<NUM_LIT>, <NUM_LIT:12>)<EOL><DEDENT>elif number_subplots <= <NUM_LIT:9>:<EOL><INDENT>return <NUM_LIT:3>, <NUM_LIT:3>, (<NUM_LIT>, <NUM_LIT:20>)<EOL><DEDENT>elif number_subplots <= <NUM_LIT:12>:<EOL><INDENT>return <NUM_LIT:3>, <NUM_LIT:4>, (<NUM_LIT>, <NUM_LIT:20>)<EOL><DEDENT>elif number_subplots <= <NUM_LIT:16>:<EOL><INDENT>return <NUM_LIT:4>, <NUM_LIT:4>, (<NUM_LIT>, <NUM_LIT:20>)<EOL><DEDENT>elif number_subplots <= <NUM_LIT:20>:<EOL><INDENT>return <NUM_LIT:4>, <NUM_LIT:5>, (<NUM_LIT>, <NUM_LIT:20>)<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:6>, <NUM_LIT:6>, (<NUM_LIT>, <NUM_LIT:20>)<EOL><DEDENT>", "docstring": "Get the size of a sub plot in (rows, columns), based on the number of subplots that are going to be plotted.\n\n    Parameters\n    -----------\n    number_subplots : int\n        The number of subplots that are to be plotted in the figure.", "id": "f5984:m0"}
{"signature": "def set_title(title, titlesize):", "body": "plt.title(title, fontsize=titlesize)<EOL>", "docstring": "Set the title and title size of the figure.\n\n    Parameters\n    -----------\n    title : str\n        The text of the title.\n    titlesize : int\n        The size of of the title of the figure.", "id": "f5984:m2"}
{"signature": "def close_figure(as_subplot):", "body": "if not as_subplot:<EOL><INDENT>plt.close()<EOL><DEDENT>", "docstring": "After plotting and outputting a figure, close the matplotlib figure instance (omit if a subplot).\n\n    Parameters\n    -----------\n    as_subplot : bool\n        Whether the figure is part of subplot, in which case the figure is not closed so that the entire figure can \\\n        be closed later after output.", "id": "f5984:m5"}
{"signature": "def get_extent(array, units, kpc_per_arcsec, xticks_manual, yticks_manual):", "body": "if xticks_manual is not None and yticks_manual is not None:<EOL><INDENT>return np.asarray([xticks_manual[<NUM_LIT:0>], xticks_manual[<NUM_LIT:3>], yticks_manual[<NUM_LIT:0>], yticks_manual[<NUM_LIT:3>]])<EOL><DEDENT>if units in '<STR_LIT>':<EOL><INDENT>return np.asarray([<NUM_LIT:0>, array.shape[<NUM_LIT:1>], <NUM_LIT:0>, array.shape[<NUM_LIT:0>]])<EOL><DEDENT>elif units in '<STR_LIT>' or kpc_per_arcsec is None:<EOL><INDENT>return np.asarray([array.arc_second_minima[<NUM_LIT:1>], array.arc_second_maxima[<NUM_LIT:1>],<EOL>array.arc_second_minima[<NUM_LIT:0>], array.arc_second_maxima[<NUM_LIT:0>]])<EOL><DEDENT>elif units in '<STR_LIT>':<EOL><INDENT>return list(map(lambda tick : tick*kpc_per_arcsec,<EOL>np.asarray([array.arc_second_minima[<NUM_LIT:1>], array.arc_second_maxima[<NUM_LIT:1>],<EOL>array.arc_second_minima[<NUM_LIT:0>], array.arc_second_maxima[<NUM_LIT:0>]])))<EOL><DEDENT>else:<EOL><INDENT>raise exc.PlottingException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Get the extent of the dimensions of the array in the units of the figure (e.g. arc-seconds or kpc).\n\n    This is used to set the extent of the array and thus the y / x axis limits.\n\n    Parameters\n    -----------\n    array : data.array.scaled_array.ScaledArray\n        The 2D array of data which is plotted.\n    units : str\n        The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n    kpc_per_arcsec : float\n        The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n    xticks_manual :  [] or None\n        If input, the xticks do not use the array's default xticks but instead overwrite them as these values.\n    yticks_manual :  [] or None\n        If input, the yticks do not use the array's default yticks but instead overwrite them as these values.", "id": "f5985:m2"}
{"signature": "def get_normalization_scale(norm, norm_min, norm_max, linthresh, linscale):", "body": "if norm is '<STR_LIT>':<EOL><INDENT>return colors.Normalize(vmin=norm_min, vmax=norm_max)<EOL><DEDENT>elif norm is '<STR_LIT>':<EOL><INDENT>if norm_min == <NUM_LIT:0.0>:<EOL><INDENT>norm_min = <NUM_LIT><EOL><DEDENT>return colors.LogNorm(vmin=norm_min, vmax=norm_max)<EOL><DEDENT>elif norm is '<STR_LIT>':<EOL><INDENT>return colors.SymLogNorm(linthresh=linthresh, linscale=linscale, vmin=norm_min, vmax=norm_max)<EOL><DEDENT>else:<EOL><INDENT>raise exc.PlottingException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Get the normalization scale of the colormap. This will be scaled based on the input min / max normalization \\\n    values.\n\n    For a 'symmetric_log' colormap, linthesh and linscale also change the colormap.\n\n    If norm_min / norm_max are not supplied, the minimum / maximum values of the array of data are used.\n\n    Parameters\n    -----------\n    array : data.array.scaled_array.ScaledArray\n        The 2D array of data which is plotted.\n    norm_min : float or None\n        The minimum array value the colormap map spans (all values below this value are plotted the same color).\n    norm_max : float or None\n        The maximum array value the colormap map spans (all values above this value are plotted the same color).\n    linthresh : float\n        For the 'symmetric_log' colormap normalization ,this specifies the range of values within which the colormap \\\n        is linear.\n    linscale : float\n        For the 'symmetric_log' colormap normalization, this allowws the linear range set by linthresh to be stretched \\\n        relative to the logarithmic range.", "id": "f5985:m4"}
{"signature": "def plot_origin(array, origin, units, kpc_per_arcsec, zoom_offset_arcsec):", "body": "if origin is not None:<EOL><INDENT>origin_grid = np.asarray(origin)<EOL>if zoom_offset_arcsec is not None:<EOL><INDENT>origin_grid -= zoom_offset_arcsec<EOL><DEDENT>origin_units = convert_grid_units(array=array, grid_arcsec=origin_grid, units=units,<EOL>kpc_per_arcsec=kpc_per_arcsec)<EOL>plt.scatter(y=origin_units[<NUM_LIT:0>], x=origin_units[<NUM_LIT:1>], s=<NUM_LIT>, c='<STR_LIT:k>', marker='<STR_LIT:x>')<EOL><DEDENT>", "docstring": "Plot the (y,x) origin ofo the array's coordinates as a 'x'.\n\n    Parameters\n    -----------\n    array : data.array.scaled_array.ScaledArray\n        The 2D array of data which is plotted.\n    origin : (float, float).\n        The origin of the coordinate system of the array, which is plotted as an 'x' on the image if input.\n    units : str\n        The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n    kpc_per_arcsec : float or None\n        The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.", "id": "f5985:m8"}
{"signature": "def set_colorbar(cb_ticksize, cb_fraction, cb_pad, cb_tick_values, cb_tick_labels):", "body": "if cb_tick_values is None and cb_tick_labels is None:<EOL><INDENT>cb = plt.colorbar(fraction=cb_fraction, pad=cb_pad)<EOL><DEDENT>elif cb_tick_values is not None and cb_tick_labels is not None:<EOL><INDENT>cb = plt.colorbar(fraction=cb_fraction, pad=cb_pad, ticks=cb_tick_values)<EOL>cb.ax.set_yticklabels(cb_tick_labels)<EOL><DEDENT>else:<EOL><INDENT>raise exc.PlottingException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>cb.ax.tick_params(labelsize=cb_ticksize)<EOL>", "docstring": "Setup the colorbar of the figure, specifically its ticksize and the size is appears relative to the figure.\n\n    Parameters\n    -----------\n    cb_ticksize : int\n        The size of the tick labels on the colorbar.\n    cb_fraction : float\n        The fraction of the figure that the colorbar takes up, which resizes the colorbar relative to the figure.\n    cb_pad : float\n        Pads the color bar in the figure, which resizes the colorbar relative to the figure.\n    cb_tick_values : [float]\n        Manually specified values of where the colorbar tick labels appear on the colorbar.\n    cb_tick_labels : [float]\n        Manually specified labels of the color bar tick labels, which appear where specified by cb_tick_values.", "id": "f5985:m6"}
{"signature": "def plot_grid(grid_arcsec, array, units, kpc_per_arcsec, pointsize, zoom_offset_arcsec):", "body": "if grid_arcsec is not None:<EOL><INDENT>if zoom_offset_arcsec is not None:<EOL><INDENT>grid_arcsec -= zoom_offset_arcsec<EOL><DEDENT>grid_units = convert_grid_units(grid_arcsec=grid_arcsec, array=array, units=units,<EOL>kpc_per_arcsec=kpc_per_arcsec)<EOL>plt.scatter(y=np.asarray(grid_units[:, <NUM_LIT:0>]), x=np.asarray(grid_units[:, <NUM_LIT:1>]), s=pointsize, c='<STR_LIT:k>')<EOL><DEDENT>", "docstring": "Plot a grid of points over the array of data on the figure.\n\n     Parameters\n     -----------.\n     grid_arcsec : ndarray or data.array.grids.RegularGrid\n         A grid of (y,x) coordinates in arc-seconds which may be plotted over the array.\n     array : data.array.scaled_array.ScaledArray\n        The 2D array of data which is plotted.\n     units : str\n         The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n     kpc_per_arcsec : float or None\n         The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n     grid_pointsize : int\n         The size of the points plotted to show the grid.", "id": "f5985:m14"}
{"signature": "def plot_figure(array, as_subplot, units, kpc_per_arcsec, figsize, aspect, cmap, norm, norm_min, norm_max,<EOL>linthresh, linscale, xticks_manual, yticks_manual):", "body": "fig = plotter_util.setup_figure(figsize=figsize, as_subplot=as_subplot)<EOL>norm_min, norm_max = get_normalization_min_max(array=array, norm_min=norm_min, norm_max=norm_max)<EOL>norm_scale = get_normalization_scale(norm=norm, norm_min=norm_min, norm_max=norm_max,<EOL>linthresh=linthresh, linscale=linscale)<EOL>extent = get_extent(array=array, units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>xticks_manual=xticks_manual, yticks_manual=yticks_manual)<EOL>plt.imshow(array, aspect=aspect, cmap=cmap, norm=norm_scale, extent=extent)<EOL>return fig<EOL>", "docstring": "Open a matplotlib figure and plot the array of data on it.\n\n    Parameters\n    -----------\n    array : data.array.scaled_array.ScaledArray\n        The 2D array of data which is plotted.\n    as_subplot : bool\n        Whether the array is plotted as part of a subplot, in which case the grid figure is not opened / closed.\n    units : str\n        The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n    kpc_per_arcsec : float or None\n        The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n    figsize : (int, int)\n        The size of the figure in (rows, columns).\n    aspect : str\n        The aspect ratio of the array, specifically whether it is forced to be square ('equal') or adapts its size to \\\n        the figure size ('auto').\n    cmap : str\n        The colormap the array is plotted using, which may be chosen from the standard matplotlib colormaps.\n    norm : str\n        The normalization of the colormap used to plot the image, specifically whether it is linear ('linear'), log \\\n        ('log') or a symmetric log normalization ('symmetric_log').\n    norm_min : float or None\n        The minimum array value the colormap map spans (all values below this value are plotted the same color).\n    norm_max : float or None\n        The maximum array value the colormap map spans (all values above this value are plotted the same color).\n    linthresh : float\n        For the 'symmetric_log' colormap normalization ,this specifies the range of values within which the colormap \\\n        is linear.\n    linscale : float\n        For the 'symmetric_log' colormap normalization, this allowws the linear range set by linthresh to be stretched \\\n        relative to the logarithmic range.\n    xticks_manual :  [] or None\n        If input, the xticks do not use the array's default xticks but instead overwrite them as these values.\n    yticks_manual :  [] or None\n        If input, the yticks do not use the array's default yticks but instead overwrite them as these values.", "id": "f5985:m1"}
{"signature": "def plot_array(array, origin=None, mask=None, extract_array_from_mask=False, zoom_around_mask=False,<EOL>should_plot_border=False, positions=None, centres=None, axis_ratios=None, phis=None, grid=None,<EOL>as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, border_pointsize=<NUM_LIT:2>, position_pointsize=<NUM_LIT:30>, grid_pointsize=<NUM_LIT:1>,<EOL>xticks_manual=None, yticks_manual=None,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "if array is None:<EOL><INDENT>return<EOL><DEDENT>if extract_array_from_mask and mask is not None:<EOL><INDENT>array = np.add(array, <NUM_LIT:0.0>, out=np.zeros_like(array), where=np.asarray(mask) == <NUM_LIT:0>)<EOL><DEDENT>if zoom_around_mask and mask is not None:<EOL><INDENT>array = array.zoomed_scaled_array_around_mask(mask=mask, buffer=<NUM_LIT:2>)<EOL>zoom_offset_pixels = np.asarray(mask.zoom_offset_pixels)<EOL>zoom_offset_arcsec = np.asarray(mask.zoom_offset_arcsec)<EOL><DEDENT>else:<EOL><INDENT>zoom_offset_pixels = None<EOL>zoom_offset_arcsec = None<EOL><DEDENT>if aspect is '<STR_LIT>':<EOL><INDENT>aspect = float(array.shape_arcsec[<NUM_LIT:1>]) / float(array.shape_arcsec[<NUM_LIT:0>])<EOL><DEDENT>fig = plot_figure(array=array, as_subplot=as_subplot, units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>figsize=figsize, aspect=aspect, cmap=cmap, norm=norm,<EOL>norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>xticks_manual=xticks_manual, yticks_manual=yticks_manual)<EOL>plotter_util.set_title(title=title, titlesize=titlesize)<EOL>set_xy_labels_and_ticksize(units=units, kpc_per_arcsec=kpc_per_arcsec, xlabelsize=xlabelsize, ylabelsize=ylabelsize,<EOL>xyticksize=xyticksize)<EOL>set_colorbar(cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad,<EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels)<EOL>plot_origin(array=array, origin=origin, units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>zoom_offset_arcsec=zoom_offset_arcsec)<EOL>plot_mask(mask=mask, units=units, kpc_per_arcsec=kpc_per_arcsec, pointsize=mask_pointsize,<EOL>zoom_offset_pixels=zoom_offset_pixels)<EOL>plot_border(mask=mask, should_plot_border=should_plot_border, units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>pointsize=border_pointsize, zoom_offset_pixels=zoom_offset_pixels)<EOL>plot_points(points_arcsec=positions, array=array, units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>pointsize=position_pointsize, zoom_offset_arcsec=zoom_offset_arcsec)<EOL>plot_grid(grid_arcsec=grid, array=array, units=units, kpc_per_arcsec=kpc_per_arcsec, pointsize=grid_pointsize,<EOL>zoom_offset_arcsec=zoom_offset_arcsec)<EOL>plot_centres(array=array, centres=centres, units=units, kpc_per_arcsec=kpc_per_arcsec,<EOL>zoom_offset_arcsec=zoom_offset_arcsec)<EOL>plot_ellipses(fig=fig, array=array, centres=centres, axis_ratios=axis_ratios, phis=phis, units=units,<EOL>kpc_per_arcsec=kpc_per_arcsec, zoom_offset_arcsec=zoom_offset_arcsec)<EOL>plotter_util.output_figure(array, as_subplot=as_subplot, output_path=output_path, output_filename=output_filename,<EOL>output_format=output_format)<EOL>plotter_util.close_figure(as_subplot=as_subplot)<EOL>", "docstring": "Plot an array of data as a figure.\n\n    Parameters\n    -----------\n    array : data.array.scaled_array.ScaledArray\n        The 2D array of data which is plotted.\n    origin : (float, float).\n        The origin of the coordinate system of the array, which is plotted as an 'x' on the image if input.\n    mask : data.array.mask.Mask\n        The mask applied to the array, the edge of which is plotted as a set of points over the plotted array.\n    extract_array_from_mask : bool\n        The plotter array is extracted using the mask, such that masked values are plotted as zeros. This ensures \\\n        bright features outside the mask do not impact the color map of the plot.\n    zoom_around_mask : bool\n        If True, the 2D region of the array corresponding to the rectangle encompassing all unmasked values is \\\n        plotted, thereby zooming into the region of interest.\n    should_plot_border : bool\n        If a mask is supplied, its borders pixels (e.g. the exterior edge) is plotted if this is *True*.\n    positions : [[]]\n        Lists of (y,x) coordinates on the image which are plotted as colored dots, to highlight specific pixels.\n    grid : data.array.grids.RegularGrid\n        A grid of (y,x) coordinates which may be plotted over the plotted array.\n    as_subplot : bool\n        Whether the array is plotted as part of a subplot, in which case the grid figure is not opened / closed.\n    units : str\n        The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n    kpc_per_arcsec : float or None\n        The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n    figsize : (int, int)\n        The size of the figure in (rows, columns).\n    aspect : str\n        The aspect ratio of the array, specifically whether it is forced to be square ('equal') or adapts its size to \\\n        the figure size ('auto').\n    cmap : str\n        The colormap the array is plotted using, which may be chosen from the standard matplotlib colormaps.\n    norm : str\n        The normalization of the colormap used to plot the image, specifically whether it is linear ('linear'), log \\\n        ('log') or a symmetric log normalization ('symmetric_log').\n    norm_min : float or None\n        The minimum array value the colormap map spans (all values below this value are plotted the same color).\n    norm_max : float or None\n        The maximum array value the colormap map spans (all values above this value are plotted the same color).\n    linthresh : float\n        For the 'symmetric_log' colormap normalization ,this specifies the range of values within which the colormap \\\n        is linear.\n    linscale : float\n        For the 'symmetric_log' colormap normalization, this allowws the linear range set by linthresh to be stretched \\\n        relative to the logarithmic range.\n    cb_ticksize : int\n        The size of the tick labels on the colorbar.\n    cb_fraction : float\n        The fraction of the figure that the colorbar takes up, which resizes the colorbar relative to the figure.\n    cb_pad : float\n        Pads the color bar in the figure, which resizes the colorbar relative to the figure.\n    xlabelsize : int\n        The fontsize of the x axes label.\n    ylabelsize : int\n        The fontsize of the y axes label.\n    xyticksize : int\n        The font size of the x and y ticks on the figure axes.\n    mask_pointsize : int\n        The size of the points plotted to show the mask.\n    border_pointsize : int\n        The size of the points plotted to show the borders.\n    positions_pointsize : int\n        The size of the points plotted to show the input positions.\n    grid_pointsize : int\n        The size of the points plotted to show the grid.\n    xticks_manual :  [] or None\n        If input, the xticks do not use the array's default xticks but instead overwrite them as these values.\n    yticks_manual :  [] or None\n        If input, the yticks do not use the array's default yticks but instead overwrite them as these values.\n    output_path : str\n        The path on the hard-disk where the figure is output.\n    output_filename : str\n        The filename of the figure that is output.\n    output_format : str\n        The format the figue is output:\n        'show' - display on computer screen.\n        'png' - output to hard-disk as a png.\n        'fits' - output to hard-disk as a fits file.'\n\n    Returns\n    --------\n    None\n\n    Examples\n    --------\n        array_plotters.plot_array(\n        array=image, origin=(0.0, 0.0), mask=circular_mask, extract_array_from_mask=True, zoom_around_mask=True,\n        should_plot_border=False, positions=[[1.0, 1.0], [2.0, 2.0]], grid=None, as_subplot=False,\n        units='arcsec', kpc_per_arcsec=None, figsize=(7,7), aspect='auto',\n        cmap='jet', norm='linear, norm_min=None, norm_max=None, linthresh=None, linscale=None,\n        cb_ticksize=10, cb_fraction=0.047, cb_pad=0.01, cb_tick_values=None, cb_tick_labels=None,\n        title='Image', titlesize=16, xlabelsize=16, ylabelsize=16, xyticksize=16,\n        mask_pointsize=10, border_pointsize=2, position_pointsize=10, grid_pointsize=10,\n        xticks_manual=None, yticks_manual=None,\n        output_path='/path/to/output', output_format='png', output_filename='image')", "id": "f5985:m0"}
{"signature": "def set_xy_labels_and_ticksize(units, kpc_per_arcsec, ylabel, xlabelsize, ylabelsize, xyticksize):", "body": "plt.ylabel(ylabel=ylabel, fontsize=ylabelsize)<EOL>if units in '<STR_LIT>' or kpc_per_arcsec is None:<EOL><INDENT>plt.xlabel('<STR_LIT>', fontsize=xlabelsize)<EOL><DEDENT>elif units in '<STR_LIT>':<EOL><INDENT>plt.xlabel('<STR_LIT>', fontsize=xlabelsize)<EOL><DEDENT>else:<EOL><INDENT>raise exc.PlottingException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>plt.tick_params(labelsize=xyticksize)<EOL>", "docstring": "Set the x and y labels of the figure, and set the fontsize of those labels.\n\n    The x label is always the distance scale / radius, thus the x-label is either arc-seconds or kpc and depending \\\n    on the units the figure is plotted in.\n\n    The ylabel is the physical quantity being plotted and is passed as an input parameter.\n\n    Parameters\n    -----------\n    units : str\n        The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n    kpc_per_arcsec : float\n        The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n    ylabel : str\n        The y-label of the figure, which is the physical quantitiy being plotted.\n    xlabelsize : int\n        The fontsize of the x axes label.\n    ylabelsize : int\n        The fontsize of the y axes label.\n    xyticksize : int\n        The font size of the x and y ticks on the figure axes.", "id": "f5986:m3"}
{"signature": "def plot_grid(grid, axis_limits=None, points=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None,<EOL>figsize=(<NUM_LIT:12>, <NUM_LIT:8>), pointsize=<NUM_LIT:5>, pointcolor='<STR_LIT:k>', xyticksize=<NUM_LIT:16>,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "plotter_util.setup_figure(figsize=figsize, as_subplot=as_subplot)<EOL>grid = convert_grid_units(grid_arcsec=grid, units=units, kpc_per_arcsec=kpc_per_arcsec)<EOL>plt.scatter(y=np.asarray(grid[:, <NUM_LIT:0>]), x=np.asarray(grid[:, <NUM_LIT:1>]), s=pointsize, marker='<STR_LIT:.>')<EOL>plotter_util.set_title(title=title, titlesize=titlesize)<EOL>set_xy_labels(units, kpc_per_arcsec, xlabelsize, ylabelsize, xyticksize)<EOL>set_axis_limits(axis_limits)<EOL>plot_points(grid, points, pointcolor)<EOL>plt.tick_params(labelsize=xyticksize)<EOL>plotter_util.output_figure(None, as_subplot, output_path, output_filename, output_format)<EOL>plotter_util.close_figure(as_subplot=as_subplot)<EOL>", "docstring": "Plot a grid of (y,x) Cartesian coordinates as a scatter plot of points.\n\n    Parameters\n    -----------\n    grid : data.array.grids.RegularGrid\n        The (y,x) coordinates of the grid, in an array of shape (total_coordinates, 2).\n    axis_limits : []\n        The axis limits of the figure on which the grid is plotted, following [xmin, xmax, ymin, ymax].\n    points : []\n        A set of points that are plotted in a different colour for emphasis (e.g. to show the mappings between \\\n        different planes).\n    as_subplot : bool\n        Whether the grid is plotted as part of a subplot, in which case the grid figure is not opened / closed.\n    units : str\n        The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n    kpc_per_arcsec : float\n        The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.\n    figsize : (int, int)\n        The size of the figure in (rows, columns).\n    pointsize : int\n        The size of the points plotted on the grid.\n    xyticksize : int\n        The font size of the x and y ticks on the figure axes.\n    title : str\n        The text of the title.\n    titlesize : int\n        The size of of the title of the figure.\n    xlabelsize : int\n        The fontsize of the x axes label.\n    ylabelsize : int\n        The fontsize of the y axes label.\n    output_path : str\n        The path on the hard-disk where the figure is output.\n    output_filename : str\n        The filename of the figure that is output.\n    output_format : str\n        The format the figue is output:\n        'show' - display on computer screen.\n        'png' - output to hard-disk as a png.", "id": "f5987:m0"}
{"signature": "def convert_grid_units(grid_arcsec, units, kpc_per_arcsec):", "body": "if units in '<STR_LIT>' or kpc_per_arcsec is None:<EOL><INDENT>return grid_arcsec<EOL><DEDENT>elif units in '<STR_LIT>':<EOL><INDENT>return grid_arcsec * kpc_per_arcsec<EOL><DEDENT>", "docstring": "Convert the grid from its input units (arc-seconds) to the input unit (e.g. retain arc-seconds) or convert to \\\n    another set of units (kiloparsecs).\n\n    Parameters\n    -----------\n    grid_arcsec : ndarray or data.array.grids.RegularGrid\n        The (y,x) coordinates of the grid in arc-seconds, in an array of shape (total_coordinates, 2).\n    units : str\n        The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').\n    kpc_per_arcsec : float\n        The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.", "id": "f5987:m1"}
{"signature": "def set_axis_limits(axis_limits):", "body": "if axis_limits is not None:<EOL><INDENT>plt.axis(axis_limits)<EOL><DEDENT>", "docstring": "Set the axis limits of the figure the grid is plotted on.\n\n    Parameters\n    -----------\n    axis_limits : []\n        The axis limits of the figure on which the grid is plotted, following [xmin, xmax, ymin, ymax].", "id": "f5987:m3"}
{"signature": "def load_poisson_noise_map(poisson_noise_map_path, poisson_noise_map_hdu, pixel_scale,<EOL>convert_poisson_noise_map_from_weight_map,<EOL>convert_poisson_noise_map_from_inverse_noise_map,<EOL>poisson_noise_map_from_image,<EOL>image, exposure_time_map, convert_from_electrons, gain, convert_from_adus):", "body": "poisson_noise_map_options = sum([convert_poisson_noise_map_from_weight_map,<EOL>convert_poisson_noise_map_from_inverse_noise_map,<EOL>poisson_noise_map_from_image])<EOL>if poisson_noise_map_options == <NUM_LIT:0> and poisson_noise_map_path is not None:<EOL><INDENT>return PoissonNoiseMap.from_fits_with_pixel_scale(file_path=poisson_noise_map_path, hdu=poisson_noise_map_hdu,<EOL>pixel_scale=pixel_scale)<EOL><DEDENT>elif poisson_noise_map_from_image:<EOL><INDENT>if not (convert_from_electrons or convert_from_adus) and exposure_time_map is None:<EOL><INDENT>raise exc.DataException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if convert_from_adus and gain is None:<EOL><INDENT>raise exc.DataException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>return PoissonNoiseMap.from_image_and_exposure_time_map(pixel_scale=pixel_scale, image=image,<EOL>exposure_time_map=exposure_time_map,<EOL>convert_from_electrons=convert_from_electrons,<EOL>gain=gain,<EOL>convert_from_adus=convert_from_adus)<EOL><DEDENT>elif convert_poisson_noise_map_from_weight_map and poisson_noise_map_path is not None:<EOL><INDENT>weight_map = Array.from_fits(file_path=poisson_noise_map_path, hdu=poisson_noise_map_hdu)<EOL>return PoissonNoiseMap.from_weight_map(weight_map=weight_map, pixel_scale=pixel_scale)<EOL><DEDENT>elif convert_poisson_noise_map_from_inverse_noise_map and poisson_noise_map_path is not None:<EOL><INDENT>inverse_noise_map = Array.from_fits(file_path=poisson_noise_map_path, hdu=poisson_noise_map_hdu)<EOL>return PoissonNoiseMap.from_inverse_noise_map(inverse_noise_map=inverse_noise_map, pixel_scale=pixel_scale)<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Factory for loading the Poisson noise-map from a .fits file.\n\n    This factory also includes a number of routines for converting the Poisson noise-map from from other units (e.g. \\\n    a weight map) or computing the Poisson noise_map from other unblurred_image_1d (e.g. the ccd image).\n\n    Parameters\n    ----------\n    poisson_noise_map_path : str\n        The path to the poisson_noise_map .fits file containing the Poisson noise-map \\\n         (e.g. '/path/to/poisson_noise_map.fits')\n    poisson_noise_map_hdu : int\n        The hdu the poisson_noise_map is contained in the .fits file specified by *poisson_noise_map_path*.\n    pixel_scale : float\n        The size of each pixel in arc seconds.\n    convert_poisson_noise_map_from_weight_map : bool\n        If True, the Poisson noise-map loaded from the .fits file is converted from a weight-map to a noise-map (see \\\n        *NoiseMap.from_weight_map).\n    convert_poisson_noise_map_from_inverse_noise_map : bool\n        If True, the Poisson noise-map loaded from the .fits file is converted from an inverse noise-map to a \\\n        noise-map (see *NoiseMap.from_inverse_noise_map).\n    poisson_noise_map_from_image : bool\n        If True, the Poisson noise-map is estimated using the image.\n    image : ndarray\n        The image, which the Poisson noise-map can be calculated using.\n    background_noise_map : ndarray\n        The background noise-map, which the Poisson noise-map can be calculated using.\n    exposure_time_map : ndarray\n        The exposure-time map, which the Poisson noise-map can be calculated using.\n    convert_from_electrons : bool\n        If True, the input unblurred_image_1d are in units of electrons and all converted to electrons / second using the exposure \\\n        time map.\n    gain : float\n        The image gain, used for convert from ADUs.\n    convert_from_adus : bool\n        If True, the input unblurred_image_1d are in units of adus and all converted to electrons / second using the exposure \\\n        time map and gain.", "id": "f5989:m6"}
{"signature": "def generate_poisson_noise(image, exposure_time_map, seed=-<NUM_LIT:1>):", "body": "setup_random_seed(seed)<EOL>image_counts = np.multiply(image, exposure_time_map)<EOL>return image - np.divide(np.random.poisson(image_counts, image.shape), exposure_time_map)<EOL>", "docstring": "Generate a two-dimensional poisson noise_maps-mappers from an image.\n\nValues are computed from a Poisson distribution using the image's input values in units of counts.\n\nParameters\n----------\nimage : ndarray\n    The 2D image, whose values in counts are used to draw Poisson noise_maps values.\nexposure_time_map : Union(ndarray, int)\n    2D array of the exposure time in each pixel used to convert to / from counts and electrons per second.\nseed : int\n    The seed of the random number generator, used for the random noise_maps maps.\n\nReturns\n-------\npoisson_noise_map: ndarray\n    An array describing simulated poisson noise_maps", "id": "f5989:m1"}
{"signature": "def array_from_electrons_per_second_to_counts(self, array):", "body": "return np.multiply(array, self.exposure_time_map)<EOL>", "docstring": "For an array (in electrons per second) and an exposure time mappers, return an array in units counts.\n\nParameters\n----------\narray : ndarray\n    The array the values are to be converted from electrons per seconds to counts.", "id": "f5989:c0:m21"}
{"signature": "@classmethod<EOL><INDENT>def from_fits_with_scale(cls, file_path, hdu, pixel_scale):<DEDENT>", "body": "return cls(array=array_util.numpy_array_2d_from_fits(file_path, hdu), pixel_scale=pixel_scale)<EOL>", "docstring": "Loads the PSF from a .fits file.\n\nParameters\n----------\npixel_scale\nfile_path: String\n    The path to the file containing the PSF\nhdu : int\n    The HDU the PSF is stored in the .fits file.", "id": "f5989:c3:m4"}
{"signature": "@property<EOL><INDENT>def estimated_noise_map(self):<DEDENT>", "body": "return self.array_from_counts_to_electrons_per_second(self.estimated_noise_map_counts)<EOL>", "docstring": "The estimated noise_maps mappers of the image (using its background noise_maps mappers and image values\n        in counts) in electrons per second.", "id": "f5989:c0:m27"}
{"signature": "@classmethod<EOL><INDENT>def from_weight_map(cls, pixel_scale, weight_map):<DEDENT>", "body": "np.seterr(divide='<STR_LIT:ignore>')<EOL>noise_map = <NUM_LIT:1.0> / np.sqrt(weight_map)<EOL>noise_map[noise_map == np.inf] = <NUM_LIT><EOL>return NoiseMap(array=noise_map, pixel_scale=pixel_scale)<EOL>", "docstring": "Setup the noise-map from a weight map, which is a form of noise-map that comes via HST image-reduction and \\\n        the software package MultiDrizzle.\n\n        The variance in each pixel is computed as:\n\n        Variance = 1.0 / sqrt(weight_map).\n\n        The weight map may contain zeros, in which cause the variances are converted to large values to omit them from \\\n        the analysis.\n\n        Parameters\n        -----------\n        pixel_scale : float\n            The size of each pixel in arc seconds.\n        weight_map : ndarray\n            The weight-value of each pixel which is converted to a variance.", "id": "f5989:c1:m0"}
{"signature": "@classmethod<EOL><INDENT>def simulate_variable_arrays(cls, array, pixel_scale, exposure_time_map, psf=None, background_sky_map=None,<EOL>add_noise=True, noise_if_add_noise_false=<NUM_LIT:0.1>, noise_seed=-<NUM_LIT:1>, name=None):<DEDENT>", "body": "if background_sky_map is not None:<EOL><INDENT>array += background_sky_map<EOL><DEDENT>if psf is not None:<EOL><INDENT>array = psf.convolve(array)<EOL>array = cls.trim_psf_edges(array, psf)<EOL>exposure_time_map = cls.trim_psf_edges(exposure_time_map, psf)<EOL>if background_sky_map is not None:<EOL><INDENT>background_sky_map = cls.trim_psf_edges(background_sky_map, psf)<EOL><DEDENT><DEDENT>if add_noise is True:<EOL><INDENT>array += generate_poisson_noise(array, exposure_time_map, noise_seed)<EOL>array_counts = np.multiply(array, exposure_time_map)<EOL>noise_map = np.divide(np.sqrt(array_counts), exposure_time_map)<EOL><DEDENT>else:<EOL><INDENT>noise_map = noise_if_add_noise_false * np.ones(array.shape)<EOL><DEDENT>if np.isnan(noise_map).any():<EOL><INDENT>raise exc.DataException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if background_sky_map is not None:<EOL><INDENT>array -= background_sky_map<EOL><DEDENT>if background_sky_map is not None:<EOL><INDENT>background_noise_map_counts = np.sqrt(np.multiply(background_sky_map, exposure_time_map))<EOL>background_noise_map = np.divide(background_noise_map_counts, exposure_time_map)<EOL><DEDENT>else:<EOL><INDENT>background_noise_map = None<EOL><DEDENT>array_counts = np.multiply(array, exposure_time_map)<EOL>poisson_noise_map = np.divide(np.sqrt(np.abs(array_counts)), exposure_time_map)<EOL>array = ScaledSquarePixelArray(array=array, pixel_scale=pixel_scale)<EOL>noise_map = NoiseMap(array=noise_map, pixel_scale=pixel_scale)<EOL>if background_noise_map is not None:<EOL><INDENT>background_noise_map = NoiseMap(array=background_noise_map, pixel_scale=pixel_scale)<EOL><DEDENT>if poisson_noise_map is not None:<EOL><INDENT>poisson_noise_map = PoissonNoiseMap(array=poisson_noise_map, pixel_scale=pixel_scale)<EOL><DEDENT>return CCDData(array, pixel_scale=pixel_scale, psf=psf, noise_map=noise_map,<EOL>background_noise_map=background_noise_map, poisson_noise_map=poisson_noise_map,<EOL>exposure_time_map=exposure_time_map, background_sky_map=background_sky_map, name=name)<EOL>", "docstring": "Create a realistic simulated image by applying effects to a plain simulated image.\n\nParameters\n----------\nname\narray : ndarray\n    The image before simulating (e.g. the lens and source galaxies before optics blurring and CCD read-out).\npixel_scale: float\n    The scale of each pixel in arc seconds\nexposure_time_map : ndarray\n    An array representing the effective exposure time of each pixel.\npsf: PSF\n    An array describing the PSF the simulated image is blurred with.\nbackground_sky_map : ndarray\n    The value of background sky in every image pixel (electrons per second).\nadd_noise: Bool\n    If True poisson noise_maps is simulated and added to the image, based on the total counts in each image\n    pixel\nnoise_seed: int\n    A seed for random noise_maps generation", "id": "f5989:c0:m3"}
{"signature": "@property<EOL><INDENT>def background_noise_map_counts(self):<DEDENT>", "body": "return self.array_from_electrons_per_second_to_counts(self.background_noise_map)<EOL>", "docstring": "The background noise_maps mappers in units of counts.", "id": "f5989:c0:m25"}
{"signature": "def __init__(self, image, pixel_scale, psf, noise_map=None, background_noise_map=None, poisson_noise_map=None,<EOL>exposure_time_map=None, background_sky_map=None, name=None, **kwargs):", "body": "self.name = name<EOL>self.image = image<EOL>self.pixel_scale = pixel_scale<EOL>self.psf = psf<EOL>self.noise_map = noise_map<EOL>self.background_noise_map = background_noise_map<EOL>self.poisson_noise_map = poisson_noise_map<EOL>self.exposure_time_map = exposure_time_map<EOL>self.background_sky_map = background_sky_map<EOL>self.origin = (<NUM_LIT:0.0>, <NUM_LIT:0.0>)<EOL>", "docstring": "A collection of 2D CCD data (an image, noise-map, psf, etc.)\n\n        Parameters\n        ----------\n        image : scaled_array.ScaledArraySquarePixels\n            The array of the image data, in units of electrons per second.\n        pixel_scale : float\n            The size of each pixel in arc seconds.\n        psf : PSF\n            An array describing the PSF kernel of the image.\n        noise_map : NoiseMap | float | ndarray\n            An array describing the RMS standard deviation error in each pixel, preferably in units of electrons per\n            second.\n        background_noise_map : NoiseMap\n            An array describing the RMS standard deviation error in each pixel due to the background sky noise_map,\n            preferably in units of electrons per second.\n        poisson_noise_map : NoiseMap\n            An array describing the RMS standard deviation error in each pixel due to the Poisson counts of the source,\n            preferably in units of electrons per second.\n        exposure_time_map : scaled_array.ScaledSquarePixelArray\n            An array describing the effective exposure time in each ccd pixel.\n        background_sky_map : scaled_array.ScaledSquarePixelArray\n            An array describing the background sky.", "id": "f5989:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def from_fits_renormalized(cls, file_path, hdu, pixel_scale):<DEDENT>", "body": "psf = PSF.from_fits_with_scale(file_path, hdu, pixel_scale)<EOL>psf[:, :] = np.divide(psf, np.sum(psf))<EOL>return psf<EOL>", "docstring": "Loads a PSF from fits and renormalizes it\n\n        Parameters\n        ----------\n        pixel_scale\n        file_path: String\n            The path to the file containing the PSF\n        hdu : int\n            The HDU the PSF is stored in the .fits file.\n\n        Returns\n        -------\n        psf: PSF\n            A renormalized PSF instance", "id": "f5989:c3:m3"}
{"signature": "def __init__(self, array, pixel_scale, renormalize=False, **kwargs):", "body": "<EOL>super().__init__(array=array, pixel_scale=pixel_scale)<EOL>if renormalize:<EOL><INDENT>self[:, :] = np.divide(self, np.sum(self))<EOL><DEDENT>", "docstring": "Class storing a 2D Point Spread Function (PSF), including its blurring kernel.\n\nParameters\n----------\narray : ndarray\n    The 2d PSF blurring kernel.\nrenormalize : bool\n    Renormalize the PSF such that he sum of kernel values total 1.0?", "id": "f5989:c3:m0"}
{"signature": "def load_positions(positions_path):", "body": "with open(positions_path) as f:<EOL><INDENT>position_string = f.readlines()<EOL><DEDENT>positions = []<EOL>for line in position_string:<EOL><INDENT>position_list = ast.literal_eval(line)<EOL>positions.append(position_list)<EOL><DEDENT>return positions<EOL>", "docstring": "Load the positions of an image.\n\n    Positions correspond to a set of pixels in the lensed source galaxy that are anticipated to come from the same \\\n    multiply-imaged region of the source-plane. Mass models which do not trace the pixels within a threshold value of \\\n    one another are resampled during the non-linear search.\n\n    Positions are stored in a .dat file, where each line of the file gives a list of list of (y,x) positions which \\\n    correspond to the same region of the source-plane. Thus, multiple source-plane regions can be input over multiple \\\n    lines of the same positions file.\n\n    Parameters\n    ----------\n    positions_path : str\n        The path to the positions .dat file containing the positions (e.g. '/path/to/positions.dat')", "id": "f5989:m11"}
{"signature": "def setup_random_seed(seed):", "body": "if seed == -<NUM_LIT:1>:<EOL><INDENT>seed = np.random.randint(<NUM_LIT:0>,<EOL>int(<NUM_LIT>))  <EOL><DEDENT>np.random.seed(seed)<EOL>", "docstring": "Setup the random seed. If the input seed is -1, the code will use a random seed for every run. If it is \\\n    positive, that seed is used for all runs, thereby giving reproducible results.\n\n    Parameters\n    ----------\n    seed : int\n        The seed of the random number generator.", "id": "f5989:m0"}
{"signature": "@classmethod<EOL><INDENT>def simulate_to_target_signal_to_noise(cls, array, pixel_scale, target_signal_to_noise, exposure_time_map,<EOL>psf=None, background_sky_map=None, seed=-<NUM_LIT:1>):<DEDENT>", "body": "max_index = np.unravel_index(array.argmax(), array.shape)<EOL>max_image = array[max_index]<EOL>max_effective_exposure_time = exposure_time_map[max_index]<EOL>max_array_counts = np.multiply(max_image, max_effective_exposure_time)<EOL>if background_sky_map is not None:<EOL><INDENT>max_background_sky_map = background_sky_map[max_index]<EOL>max_background_sky_map_counts = np.multiply(max_background_sky_map, max_effective_exposure_time)<EOL><DEDENT>else:<EOL><INDENT>max_background_sky_map_counts = None<EOL><DEDENT>scale_factor = <NUM_LIT:1.><EOL>if background_sky_map is None:<EOL><INDENT>scale_factor = target_signal_to_noise ** <NUM_LIT> / max_array_counts<EOL><DEDENT>elif background_sky_map is not None:<EOL><INDENT>scale_factor = (max_array_counts + max_background_sky_map_counts) * target_signal_to_noise ** <NUM_LIT>/ max_array_counts ** <NUM_LIT><EOL><DEDENT>scaled_effective_exposure_time = np.multiply(scale_factor, exposure_time_map)<EOL>return cls.simulate_variable_arrays(array=array, pixel_scale=pixel_scale,<EOL>exposure_time_map=scaled_effective_exposure_time,<EOL>psf=psf, background_sky_map=background_sky_map,<EOL>add_noise=True, noise_seed=seed)<EOL>", "docstring": "Create a realistic simulated image by applying effects to a plain simulated image.\n\nParameters\n----------\ntarget_signal_to_noise\narray : ndarray\n    The image before simulating (e.g. the lens and source galaxies before optics blurring and CCD read-out).\npixel_scale: float\n    The scale of each pixel in arc seconds\nexposure_time_map : ndarray\n    An array representing the effective exposure time of each pixel.\npsf: PSF\n    An array describing the PSF the simulated image is blurred with.\nbackground_sky_map : ndarray\n    The value of background sky in every image pixel (electrons per second).\nseed: int\n    A seed for random noise_maps generation", "id": "f5989:c0:m4"}
{"signature": "def load_exposure_time_map(exposure_time_map_path, exposure_time_map_hdu, pixel_scale, shape, exposure_time,<EOL>exposure_time_map_from_inverse_noise_map, inverse_noise_map):", "body": "exposure_time_map_options = sum([exposure_time_map_from_inverse_noise_map])<EOL>if exposure_time is not None and exposure_time_map_path is not None:<EOL><INDENT>raise exc.DataException(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if exposure_time_map_options == <NUM_LIT:0>:<EOL><INDENT>if exposure_time is not None and exposure_time_map_path is None:<EOL><INDENT>return ExposureTimeMap.single_value(value=exposure_time, pixel_scale=pixel_scale, shape=shape)<EOL><DEDENT>elif exposure_time is None and exposure_time_map_path is not None:<EOL><INDENT>return ExposureTimeMap.from_fits_with_pixel_scale(file_path=exposure_time_map_path,<EOL>hdu=exposure_time_map_hdu, pixel_scale=pixel_scale)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if exposure_time_map_from_inverse_noise_map:<EOL><INDENT>return ExposureTimeMap.from_exposure_time_and_inverse_noise_map(pixel_scale=pixel_scale,<EOL>exposure_time=exposure_time,<EOL>inverse_noise_map=inverse_noise_map)<EOL><DEDENT><DEDENT>", "docstring": "Factory for loading the exposure time map from a .fits file.\n\n    This factory also includes a number of routines for computing the exposure-time map from other unblurred_image_1d \\\n    (e.g. the background noise-map).\n\n    Parameters\n    ----------\n    exposure_time_map_path : str\n        The path to the exposure_time_map .fits file containing the exposure time map \\\n        (e.g. '/path/to/exposure_time_map.fits')\n    exposure_time_map_hdu : int\n        The hdu the exposure_time_map is contained in the .fits file specified by *exposure_time_map_path*.\n    pixel_scale : float\n        The size of each pixel in arc seconds.\n    shape : (int, int)\n        The shape of the image, required if a single value is used to calculate the exposure time map.\n    exposure_time : float\n        The exposure-time used to compute the expsure-time map if only a single value is used.\n    exposure_time_map_from_inverse_noise_map : bool\n        If True, the exposure-time map is computed from the background noise_map map \\\n        (see *ExposureTimeMap.from_background_noise_map*)\n    inverse_noise_map : ndarray\n        The background noise-map, which the Poisson noise-map can be calculated using.", "id": "f5989:m8"}
{"signature": "@property<EOL><INDENT>def image_counts(self):<DEDENT>", "body": "return self.array_from_electrons_per_second_to_counts(self.image)<EOL>", "docstring": "The image in units of counts.", "id": "f5989:c0:m24"}
{"signature": "@classmethod<EOL><INDENT>def from_mask_and_sub_grid_size(cls, mask, sub_grid_size=<NUM_LIT:1>):<DEDENT>", "body": "sub_grid_masked = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(<EOL>mask=mask,<EOL>pixel_scales=mask.pixel_scales,<EOL>sub_grid_size=sub_grid_size)<EOL>return SubGrid(sub_grid_masked, mask, sub_grid_size)<EOL>", "docstring": "Setup a sub-grid of the unmasked pixels, using a mask and a specified sub-grid size. The center of \\\n        every unmasked pixel's sub-pixels give the grid's (y,x) arc-second coordinates.\n\n        Parameters\n        -----------\n        mask : Mask\n            The mask whose masked pixels are used to setup the sub-pixel grid_stack.\n        sub_grid_size : int\n            The size (sub_grid_size x sub_grid_size) of each unmasked pixels sub-grid.", "id": "f5990:c2:m3"}
{"signature": "def padded_blurred_image_2d_from_padded_image_1d_and_psf(self, padded_image_1d, psf):", "body": "padded_model_image_1d = self.convolve_array_1d_with_psf(padded_array_1d=padded_image_1d, psf=psf)<EOL>return self.scaled_array_2d_from_array_1d(array_1d=padded_model_image_1d)<EOL>", "docstring": "Compute a 2D padded blurred image from a 1D padded image.\n\n        Parameters\n        ----------\n        padded_image_1d : ndarray\n            A 1D unmasked image which is blurred with the PSF.\n        psf : ndarray\n            An array describing the PSF kernel of the image.", "id": "f5990:c5:m3"}
{"signature": "def __new__(cls, arr, regular_to_nearest_pix, *args, **kwargs):", "body": "obj = arr.view(cls)<EOL>obj.regular_to_nearest_pix = regular_to_nearest_pix<EOL>obj.interpolator = None<EOL>return obj<EOL>", "docstring": "A pix-grid of (y,x) coordinates which are used to form the pixel centres of adaptive pixelizations in the \\\n        *pixelizations* module.\n\n        A *PixGrid* is ordered such pixels begin from the top-row of the mask and go rightwards and then \\\n        downwards. Therefore, it is a ndarray of shape [total_pix_pixels, 2]. The first element of the ndarray \\\n        thus corresponds to the pix pixel index and second element the y or x arc -econd coordinates. For example:\n\n        - pix_grid[3,0] = the 4th unmasked pixel's y-coordinate.\n        - pix_grid[6,1] = the 7th unmasked pixel's x-coordinate.\n\n        Parameters\n        -----------\n        pix_grid : ndarray\n            The grid of (y,x) arc-second coordinates of every image-plane pixelization grid used for adaptive source \\\n            -plane pixelizations.\n        regular_to_nearest_pix : ndarray\n            A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel.", "id": "f5990:c3:m0"}
{"signature": "@classmethod<EOL><INDENT>def padded_grid_from_shape_psf_shape_and_pixel_scale(cls, shape, psf_shape, pixel_scale):<DEDENT>", "body": "padded_shape = (shape[<NUM_LIT:0>] + psf_shape[<NUM_LIT:0>] - <NUM_LIT:1>, shape[<NUM_LIT:1>] + psf_shape[<NUM_LIT:1>] - <NUM_LIT:1>)<EOL>padded_regular_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(<EOL>mask=np.full(padded_shape, False), pixel_scales=(pixel_scale, pixel_scale))<EOL>padded_mask = msk.Mask.unmasked_for_shape_and_pixel_scale(shape=padded_shape, pixel_scale=pixel_scale)<EOL>return PaddedRegularGrid(arr=padded_regular_grid, mask=padded_mask, image_shape=shape)<EOL>", "docstring": "Setup a regular padded grid from a 2D array shape, psf-shape and pixel-scale.\n\n        The center of every pixel is used to setup the grid's (y,x) arc-second coordinates, including padded pixels \\\n        which are beyond the input shape but will blurred light into the 2D array's shape due to the psf.\n\n        Parameters\n        ----------\n        shape : (int, int)\n            The (y,x) shape of the masked-grid's 2D image in units of pixels.\n        psf_shape : (int, int)\n           The shape of the psf which defines the blurring region and therefore size of padding.\n        pixel_scale : float\n            The scale of each pixel in arc seconds", "id": "f5990:c5:m2"}
{"signature": "@classmethod<EOL><INDENT>def blurring_grid_from_mask_and_psf_shape(cls, mask, psf_shape):<DEDENT>", "body": "blurring_mask = mask.blurring_mask_for_psf_shape(psf_shape)<EOL>return RegularGrid.from_mask(blurring_mask)<EOL>", "docstring": "Setup a blurring-grid from a mask, where a blurring grid consists of all pixels that are masked, but they \\\n        are close enough to the unmasked pixels that a fraction of their light will be blurred into those pixels \\\n        via PSF convolution. For example, if our mask is as follows:\n\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|     This is an ccd.Mask, where:\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|     x = True (Pixel is masked and excluded from lens)\n        |x|x|x|o|o|o|x|x|x|x|     o = False (Pixel is not masked and included in lens)\n        |x|x|x|o|o|o|x|x|x|x|\n        |x|x|x|o|o|o|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n\n        For a PSF of shape (3,3), the following blurring mask is computed (noting that only pixels that are direct \\\n        neighbors of the unmasked pixels above will blur light into an unmasked pixel):\n\n        |x|x|x|x|x|x|x|x|x|     This is an example regular.Mask, where:\n        |x|x|x|x|x|x|x|x|x|\n        |x|x|o|o|o|o|o|x|x|     x = True (Pixel is masked and excluded from lens)\n        |x|x|o|x|x|x|o|x|x|     o = False (Pixel is not masked and included in lens)\n        |x|x|o|x|x|x|o|x|x|\n        |x|x|o|x|x|x|o|x|x|\n        |x|x|o|o|o|o|o|x|x|\n        |x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|\n\n        Thus, the blurring grid coordinates and indexes will be as follows:\n\n        pixel_scale = 1.0\"\n\n        <--- -ve  x  +ve -->\n                                                            y     x\n        |x|x|x |x |x |x |x |x|x|  |   blurring_grid[0] = [2.0, -2.0]  blurring_grid[9] =  [-1.0, -2.0]\n        |x|x|x |x |x |x |x |x|x|  |   blurring_grid[1] = [2.0, -1.0]  blurring_grid[10] = [-1.0,  2.0]\n        |x|x|0 |1 |2 |3 |4 |x|x| +ve  blurring_grid[2] = [2.0,  0.0]  blurring_grid[11] = [-2.0, -2.0]\n        |x|x|5 |x |x |x |6 |x|x|  y   blurring_grid[3] = [2.0,  1.0]  blurring_grid[12] = [-2.0, -1.0]\n        |x|x|7 |x |x |x |8 |x|x| -ve  blurring_grid[4] = [2.0,  2.0]  blurring_grid[13] = [-2.0,  0.0]\n        |x|x|9 |x |x |x |10|x|x|  |   blurring_grid[5] = [1.0, -2.0]  blurring_grid[14] = [-2.0,  1.0]\n        |x|x|11|12|13|14|15|x|x|  |   blurring_grid[6] = [1.0,  2.0]  blurring_grid[15] = [-2.0,  2.0]\n        |x|x|x |x |x |x |x |x|x| \\/   blurring_grid[7] = [0.0, -2.0]\n        |x|x|x |x |x |x |x |x|x|      blurring_grid[8] = [0.0,  2.0]\n\n        For a PSF of shape (5,5), the following blurring mask is computed (noting that pixels that are 2 pixels from an\n        direct unmasked pixels now blur light into an unmasked pixel):\n\n        |x|x|x|x|x|x|x|x|x|     This is an example regular.Mask, where:\n        |x|o|o|o|o|o|o|o|x|\n        |x|o|o|o|o|o|o|o|x|     x = True (Pixel is masked and excluded from lens)\n        |x|o|o|x|x|x|o|o|x|     o = False (Pixel is not masked and included in lens)\n        |x|o|o|x|x|x|o|o|x|\n        |x|o|o|x|x|x|o|o|x|\n        |x|o|o|o|o|o|o|o|x|\n        |x|o|o|o|o|o|o|o|x|\n        |x|x|x|x|x|x|x|x|x|", "id": "f5990:c1:m8"}
{"signature": "@classmethod<EOL><INDENT>def from_mask(cls, mask):<DEDENT>", "body": "array = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask,<EOL>pixel_scales=mask.pixel_scales)<EOL>return cls(array, mask)<EOL>", "docstring": "Setup a regular-grid from a mask, wehere the center of every unmasked pixel gives the grid's (y,x) \\\n        arc-second coordinates.\n\n        Parameters\n        -----------\n        mask : Mask\n            The mask whose unmasked pixels are used to setup the regular-pixel grid.", "id": "f5990:c1:m6"}
{"signature": "@classmethod<EOL><INDENT>def grid_stack_for_simulation(cls, shape, pixel_scale, psf_shape, sub_grid_size=<NUM_LIT:2>):<DEDENT>", "body": "return cls.padded_grid_stack_from_mask_sub_grid_size_and_psf_shape(mask=msk.Mask(array=np.full(shape, False),<EOL>pixel_scale=pixel_scale),<EOL>sub_grid_size=sub_grid_size,<EOL>psf_shape=psf_shape)<EOL>", "docstring": "Setup a grid-stack of grid_stack for simulating an image of a strong lens, whereby the grid's use \\\n        padded-grid_stack to ensure that the PSF blurring in the simulation routine (*ccd.PrepatoryImage.simulate*) \\\n        is not degraded due to edge effects.\n\n        Parameters\n        -----------\n        shape : (int, int)\n            The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack.\n        pixel_scale : float\n            The size of each pixel in arc seconds.            \n        psf_shape : (int, int)\n            The shape of the PSF used in the analysis, which defines how much the grid's must be masked to mitigate \\\n            edge effects.\n        sub_grid_size : int\n            The size of a sub-pixel's sub-grid (sub_grid_size x sub_grid_size).", "id": "f5990:c0:m5"}
{"signature": "def __init__(self, array, mask, sub_grid_size=<NUM_LIT:1>):", "body": "<EOL>super(SubGrid, self).__init__()<EOL>self.mask = mask<EOL>self.sub_grid_size = sub_grid_size<EOL>self.sub_grid_length = int(sub_grid_size ** <NUM_LIT>)<EOL>self.sub_grid_fraction = <NUM_LIT:1.0> / self.sub_grid_length<EOL>", "docstring": "A sub-grid of coordinates, where each entry corresponds to the (y,x) coordinates at the centre of each \\\n        sub-pixel of an unmasked pixel (e.g. the pixels of a regular-grid). The positive y-axis is upwards and poitive \\\n        x-axis to the right, and this convention is followed for the sub-pixels in each unmasked pixel.\n\n        A *SubGrid* is ordered such that pixels begin from the first (top-left) sub-pixel in the first unmasked pixel. \\\n        Indexes then go over the sub-pixels in each unmasked pixel, for every unmasked pixel. Therefore, \\\n        the sub-grid is an ndarray of shape [total_unmasked_pixels*(sub_grid_shape)**2, 2]. For example:\n\n        - sub_grid[9, 1] - using a 2x2 sub-grid, gives the 3rd unmasked pixel's 2nd sub-pixel x-coordinate.\n        - sub_grid[9, 1] - using a 3x3 sub-grid, gives the 2nd unmasked pixel's 1st sub-pixel x-coordinate.\n        - sub_grid[27, 0] - using a 3x3 sub-grid, gives the 4th unmasked pixel's 1st sub-pixel y-coordinate.\n\n        Below is a visual illustration of a sub grid. Like the regular grid, the indexing of each sub-pixel goes from \\\n        the top-left corner. In contrast to the regular grid above, our illustration below restricts the mask to just \\\n        2 pixels, to keep the illustration brief.\n\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|     This is an example mask.Mask, where:\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|     x = True (Pixel is masked and excluded from lens)\n        |x|x|x|x|o|o|x|x|x|x|     o = False (Pixel is not masked and included in lens)\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n\n        Our regular-grid looks like it did before:\n\n        pixel_scale = 1.0\"\n\n        <--- -ve  x  +ve -->\n\n        |x|x|x|x|x|x|x|x|x|x|  ^\n        |x|x|x|x|x|x|x|x|x|x|  |\n        |x|x|x|x|x|x|x|x|x|x|  |                        y     x\n        |x|x|x|x|x|x|x|x|x|x| +ve  regular_grid[0] = [0.5,  -1.5]\n        |x|x|x|0|1|x|x|x|x|x|  y   regular_grid[1] = [0.5,  -0.5]\n        |x|x|x|x|x|x|x|x|x|x| -ve\n        |x|x|x|x|x|x|x|x|x|x|  |\n        |x|x|x|x|x|x|x|x|x|x|  |\n        |x|x|x|x|x|x|x|x|x|x| \\/\n        |x|x|x|x|x|x|x|x|x|x|\n\n        However, we now go to each unmasked pixel and derive a sub-pixel grid for it. For example, for pixel 0,\n        if *sub_grid_size=2*, we use a 2x2 sub-grid:\n\n        Pixel 0 - (2x2):\n                                y      x\n               sub_grid[0] = [0.66, -1.66]\n        |0|1|  sub_grid[1] = [0.66, -1.33]\n        |2|3|  sub_grid[2] = [0.33, -1.66]\n               sub_grid[3] = [0.33, -1.33]\n\n        Now, we'd normally sub-grid all pixels using the same *sub_grid_size*, but for this illustration lets\n        pretend we used a sub_grid_size of 3x3 for pixel 1:\n\n                                  y      x\n                 sub_grid[0] = [0.75, -0.75]\n                 sub_grid[1] = [0.75, -0.5]\n                 sub_grid[2] = [0.75, -0.25]\n        |0|1|2|  sub_grid[3] = [0.5,  -0.75]\n        |3|4|5|  sub_grid[4] = [0.5,  -0.5]\n        |6|7|8|  sub_grid[5] = [0.5,  -0.25]\n                 sub_grid[6] = [0.25, -0.75]\n                 sub_grid[7] = [0.25, -0.5]\n                 sub_grid[8] = [0.25, -0.25]", "id": "f5990:c2:m0"}
{"signature": "def __new__(cls, arr, *args, **kwargs):", "body": "border = arr.view(cls)<EOL>return border<EOL>", "docstring": "The borders of a regular grid, containing the pixel-index's of all masked pixels that are on the \\\n        mask's border (e.g. they are next to a *True* value in at least one of the surrounding 8 pixels and at one of \\\n        the exterior edge's of the mask).\n\n        This is used to relocate demagnified pixel's in a grid to its border, so that they do not disrupt an \\\n        adaptive pixelization's inversion.\n\n        Parameters\n        -----------\n        arr : ndarray\n            A 1D array of the integer indexes of an *RegularGrid*'s borders pixels.", "id": "f5990:c7:m0"}
{"signature": "def sub_to_image_grid(func):", "body": "@wraps(func)<EOL>def wrapper(grid, galaxies, *args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>result = func(grid, galaxies, *args, *kwargs)<EOL>if isinstance(grid, SubGrid):<EOL><INDENT>return grid.regular_data_1d_from_sub_data_1d(result)<EOL><DEDENT>else:<EOL><INDENT>return result<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Wrap the function in a function that, if the grid is a sub-grid (grids.SubGrid), rebins the computed \\\nvalues to the sub-grids corresponding regular-grid by taking the mean of each set of sub-gridded values.\n\nParameters\n----------\nfunc : (profiles, *args, **kwargs) -> Object\n    A function that requires the sub-grid and galaxies.", "id": "f5990:m0"}
{"signature": "def grid_interpolate(func):", "body": "@wraps(func)<EOL>def wrapper(profile, grid, grid_radial_minimum=None, *args, **kwargs):<EOL><INDENT>if hasattr(grid, \"<STR_LIT>\"):<EOL><INDENT>interpolator = grid.interpolator<EOL>if grid.interpolator is not None:<EOL><INDENT>values = func(profile, interpolator.interp_grid, grid_radial_minimum, *args, **kwargs)<EOL>if values.ndim == <NUM_LIT:1>:<EOL><INDENT>return interpolator.interpolated_values_from_values(values=values)<EOL><DEDENT>elif values.ndim == <NUM_LIT:2>:<EOL><INDENT>y_values = interpolator.interpolated_values_from_values(values=values[:, <NUM_LIT:0>])<EOL>x_values = interpolator.interpolated_values_from_values(values=values[:, <NUM_LIT:1>])<EOL>return np.asarray([y_values, x_values]).T<EOL><DEDENT><DEDENT><DEDENT>return func(profile, grid, grid_radial_minimum, *args, **kwargs)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorate a profile method that accepts a coordinate grid and returns a data grid.\n\nIf an interpolator attribute is associated with the input grid then that interpolator is used to down sample the\ncoordinate grid prior to calling the function and up sample the result of the function.\n\nIf no interpolator attribute is associated with the input grid then the function is called as normal.\n\nParameters\n----------\nfunc\n    Some method that accepts a grid\n\nReturns\n-------\ndecorated_function\n    The function with optional interpolation", "id": "f5990:m1"}
{"signature": "@classmethod<EOL><INDENT>def from_shape_pixel_scale_and_sub_grid_size(cls, shape, pixel_scale, sub_grid_size=<NUM_LIT:2>):<DEDENT>", "body": "regular_grid = RegularGrid.from_shape_and_pixel_scale(shape=shape, pixel_scale=pixel_scale)<EOL>sub_grid = SubGrid.from_shape_pixel_scale_and_sub_grid_size(shape=shape, pixel_scale=pixel_scale,<EOL>sub_grid_size=sub_grid_size)<EOL>blurring_grid = np.array([[<NUM_LIT:0.0>, <NUM_LIT:0.0>]])<EOL>return GridStack(regular_grid, sub_grid, blurring_grid)<EOL>", "docstring": "Setup a grid-stack of grid_stack from a 2D array shape, a pixel scale and a sub-grid size.\n\n        This grid corresponds to a fully unmasked 2D array.\n\n        Parameters\n        -----------\n        shape : (int, int)\n            The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack.\n        pixel_scale : float\n            The size of each pixel in arc seconds.            \n        sub_grid_size : int\n            The size of a sub-pixel's sub-grid (sub_grid_size x sub_grid_size).", "id": "f5990:c0:m3"}
{"signature": "def __new__(cls, arr, mask, *args, **kwargs):", "body": "obj = arr.view(cls)<EOL>obj.mask = mask<EOL>obj.interpolator = None<EOL>return obj<EOL>", "docstring": "A regular grid of coordinates, where each entry corresponds to the (y,x) coordinates at the centre of an \\\n        unmasked pixel. The positive y-axis is upwards and poitive x-axis to the right. \n\n        A *RegularGrid* is ordered such pixels begin from the top-row of the mask and go rightwards and then \\ \n        downwards. Therefore, it is a ndarray of shape [total_unmasked_pixels, 2]. The first element of the ndarray \\\n        thus corresponds to the regular pixel index and second element the y or x arc -econd coordinates. For example:\n\n        - regular_grid[3,0] = the 4th unmasked pixel's y-coordinate.\n        - regular_grid[6,1] = the 7th unmasked pixel's x-coordinate.\n\n        Below is a visual illustration of a regular-grid, where a total of 10 pixels are unmasked and are included in \\\n        the grid.\n\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|     This is an example mask.Mask, where:\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|o|o|x|x|x|x|     x = True (Pixel is masked and excluded from the regular grid)\n        |x|x|x|o|o|o|o|x|x|x|     o = False (Pixel is not masked and included in the regular grid)\n        |x|x|x|o|o|o|o|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n        |x|x|x|x|x|x|x|x|x|x|\n\n        The mask pixel index's will come out like this (and the direction of arc-second coordinates is highlighted\n        around the mask.\n\n        pixel_scale = 1.0\"\n\n        <--- -ve  x  +ve -->\n                                                        y      x\n        |x|x|x|x|x|x|x|x|x|x|  ^   regular_grid[0] = [ 1.5, -0.5]\n        |x|x|x|x|x|x|x|x|x|x|  |   regular_grid[1] = [ 1.5,  0.5]\n        |x|x|x|x|x|x|x|x|x|x|  |   regular_grid[2] = [ 0.5, -1.5]\n        |x|x|x|x|0|1|x|x|x|x| +ve  regular_grid[3] = [ 0.5, -0.5]\n        |x|x|x|2|3|4|5|x|x|x|  y   regular_grid[4] = [ 0.5,  0.5]\n        |x|x|x|6|7|8|9|x|x|x| -ve  regular_grid[5] = [ 0.5,  1.5]\n        |x|x|x|x|x|x|x|x|x|x|  |   regular_grid[6] = [-0.5, -1.5]\n        |x|x|x|x|x|x|x|x|x|x|  |   regular_grid[7] = [-0.5, -0.5]\n        |x|x|x|x|x|x|x|x|x|x| \\/   regular_grid[8] = [-0.5,  0.5]\n        |x|x|x|x|x|x|x|x|x|x|      regular_grid[9] = [-0.5,  1.5]", "id": "f5990:c1:m0"}
{"signature": "def map_function(self, func, *arg_lists):", "body": "return GridStack(*[func(*args) for args in zip(self, *arg_lists)])<EOL>", "docstring": "Map a function to all grid_stack in a grid-stack", "id": "f5990:c0:m9"}
{"signature": "def new_grid_stack_with_pix_grid_added(self, pix_grid, regular_to_nearest_pix):", "body": "pix = PixGrid(arr=pix_grid, regular_to_nearest_pix=regular_to_nearest_pix)<EOL>return GridStack(regular=self.regular, sub=self.sub, blurring=self.blurring, pix=pix)<EOL>", "docstring": "Setup a grid-stack of grid_stack using an existing grid-stack.\n\n        The new grid-stack has the same grid_stack (regular, sub, blurring, etc.) as before, but adds a pix-grid as a \\\n        new attribute.\n\n        Parameters\n        -----------\n        pix_grid : ndarray\n            The grid of (y,x) arc-second coordinates of every image-plane pixelization grid used for adaptive \\\n             pixelizations.\n        regular_to_nearest_pix : ndarray\n            A 1D array that maps every regular-grid pixel to its nearest pix-grid pixel.", "id": "f5990:c0:m7"}
{"signature": "@property<EOL><INDENT>def yticks(self):<DEDENT>", "body": "return np.linspace(np.min(self[:, <NUM_LIT:0>]), np.max(self[:, <NUM_LIT:0>]), <NUM_LIT:4>)<EOL>", "docstring": "Compute the yticks labels of this grid, used for plotting the y-axis ticks when visualizing a regular", "id": "f5990:c1:m16"}
{"signature": "def scaled_array_2d_with_regular_dimensions_from_binned_up_sub_array_1d(self, sub_array_1d):", "body": "array_1d = self.regular_data_1d_from_sub_data_1d(sub_array_1d=sub_array_1d)<EOL>return scaled_array.ScaledSquarePixelArray(array=self.array_2d_from_array_1d(array_1d=array_1d),<EOL>pixel_scale=self.mask.pixel_scale,<EOL>origin=self.mask.origin)<EOL>", "docstring": "Map a 1D sub-array the same dimension as the sub-grid to its original masked 2D sub-array and return it as\n        a scaled array.\n\n        Parameters\n        -----------\n        sub_array_1d : ndarray\n            The 1D sub-array of which is mapped to a 2D scaled sub-array the dimensions.", "id": "f5990:c2:m7"}
{"signature": "def array_2d_from_array_1d(self, array_1d):", "body": "return mapping_util.map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(<EOL>array_1d=array_1d, shape=self.mask.shape, one_to_two=self.mask.masked_grid_index_to_pixel)<EOL>", "docstring": "Map a 1D array the same dimension as the grid to its original masked 2D array.\n\n        Parameters\n        -----------\n        array_1d : ndarray\n            The 1D array which is mapped to its masked 2D array.", "id": "f5990:c1:m10"}
{"signature": "@property<EOL><INDENT>@array_util.Memoizer()<EOL>def sub_to_regular(self):<DEDENT>", "body": "return mapping_util.sub_to_regular_from_mask(self.mask, self.sub_grid_size).astype('<STR_LIT:int>')<EOL>", "docstring": "The mapping between every sub-pixel and its host regular-pixel.\n\n        For example:\n\n        - sub_to_pixel[8] = 2 -  The ninth sub-pixel is within the 3rd regular pixel.\n        - sub_to_pixel[20] = 4 -  The twenty first sub-pixel is within the 5th regular pixel.", "id": "f5990:c2:m10"}
{"signature": "@classmethod<EOL><INDENT>def from_shape_and_pixel_scale(cls, shape, pixel_scale):<DEDENT>", "body": "mask = msk.Mask.unmasked_for_shape_and_pixel_scale(shape=shape, pixel_scale=pixel_scale)<EOL>array = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask,<EOL>pixel_scales=mask.pixel_scales)<EOL>return cls(array, mask)<EOL>", "docstring": "Setup a regular-grid from a 2D array shape and pixel scale. Here, the center of every pixel on the 2D \\\n        array gives the grid's (y,x) arc-second coordinates. \n\n        This is equivalent to using a 2D mask consisting entirely of unmasked pixels.\n\n        Parameters\n        -----------\n        shape : (int, int)\n            The 2D shape of the array, where all pixels are used to generate the grid-stack's grid_stack.\n        pixel_scale : float\n            The size of each pixel in arc seconds.", "id": "f5990:c1:m7"}
{"signature": "def sub_array_2d_from_sub_array_1d(self, sub_array_1d):", "body": "sub_shape = (self.mask.shape[<NUM_LIT:0>] * self.sub_grid_size, self.mask.shape[<NUM_LIT:1>] * self.sub_grid_size)<EOL>sub_one_to_two = self.mask.masked_sub_grid_index_to_sub_pixel(sub_grid_size=self.sub_grid_size)<EOL>return mapping_util.map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(<EOL>array_1d=sub_array_1d, shape=sub_shape, one_to_two=sub_one_to_two)<EOL>", "docstring": "Map a 1D sub-array the same dimension as the sub-grid (e.g. including sub-pixels) to its original masked\n        2D sub array.\n\n        Parameters\n        -----------\n        sub_array_1d : ndarray\n            The 1D sub_array which is mapped to its masked 2D sub-array.", "id": "f5990:c2:m5"}
{"signature": "@classmethod<EOL><INDENT>def padded_grid_stack_from_mask_sub_grid_size_and_psf_shape(cls, mask, sub_grid_size, psf_shape):<DEDENT>", "body": "regular_padded_grid = PaddedRegularGrid.padded_grid_from_shape_psf_shape_and_pixel_scale(<EOL>shape=mask.shape,<EOL>psf_shape=psf_shape,<EOL>pixel_scale=mask.pixel_scale)<EOL>sub_padded_grid = PaddedSubGrid.padded_grid_from_mask_sub_grid_size_and_psf_shape(mask=mask,<EOL>sub_grid_size=sub_grid_size,<EOL>psf_shape=psf_shape)<EOL>return GridStack(regular=regular_padded_grid, sub=sub_padded_grid, blurring=np.array([[<NUM_LIT:0.0>, <NUM_LIT:0.0>]]))<EOL>", "docstring": "Setup a grid-stack of masked grid_stack from a mask,  sub-grid size and psf-shape.\n\n        Parameters\n        -----------\n        mask : Mask\n            The mask whose masked pixels the grid-stack are setup using.\n        sub_grid_size : int\n            The size of a sub-pixels sub-grid (sub_grid_size x sub_grid_size).\n        psf_shape : (int, int)\n            The shape of the PSF used in the analysis, which defines the mask's blurring-region.", "id": "f5990:c0:m4"}
{"signature": "def map_to_2d_keep_padded(self, padded_array_1d):", "body": "return mapping_util.map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape(array_1d=padded_array_1d,<EOL>shape=self.mask.shape)<EOL>", "docstring": "Map a padded 1D array of values to its padded 2D array.\n\n        Parameters\n        -----------\n        padded_array_1d : ndarray\n            A 1D array of values which were computed using the *PaddedRegularGrid*.", "id": "f5990:c5:m5"}
{"signature": "def __init__(self, array, pixel_scale, origin=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):", "body": "<EOL>super(Mask, self).__init__(array=array, pixel_scale=pixel_scale, origin=origin)<EOL>", "docstring": "A mask, which is applied to a 2D array of hyper to extract a set of unmasked image pixels (i.e. mask entry \\\n        is *False* or 0) which are then fitted in an analysis.\n\n        The mask retains the pixel scale of the array and has a centre and origin.\n\n        Parameters\n        ----------\n        array: ndarray\n            An array of bools representing the mask.\n        pixel_scale: float\n            The arc-second to pixel conversion factor of each pixel.\n        origin : (float, float)\n            The (y,x) arc-second origin of the mask's coordinate system.\n        centre : (float, float)\n            The (y,x) arc-second centre of the mask provided it is a standard geometric shape (e.g. a circle).", "id": "f5991:c0:m0"}
{"signature": "@property<EOL><INDENT>def masked_grid_index_to_pixel(self):<DEDENT>", "body": "return mask_util.masked_grid_1d_index_to_2d_pixel_index_from_mask(self).astype('<STR_LIT:int>')<EOL>", "docstring": "A 1D array of mappings between every unmasked pixel and its 2D pixel coordinates.", "id": "f5991:c0:m12"}
{"signature": "def map_2d_array_to_masked_1d_array(self, array_2d):", "body": "if array_2d is None or isinstance(array_2d, float):<EOL><INDENT>return array_2d<EOL><DEDENT>return mapping_util.map_2d_array_to_masked_1d_array_from_array_2d_and_mask(self, array_2d)<EOL>", "docstring": "For a 2D array (e.g. an image, noise_map, etc.) map it to a masked 1D array of valuees using this mask.\n\n        Parameters\n        ----------\n        array_2d : ndarray | None | float\n            The 2D array to be mapped to a masked 1D array.", "id": "f5991:c0:m14"}
{"signature": "@classmethod<EOL><INDENT>def circular(cls, shape, pixel_scale, radius_arcsec, centre=(<NUM_LIT:0.>, <NUM_LIT:0.>), invert=False):<DEDENT>", "body": "mask = mask_util.mask_circular_from_shape_pixel_scale_and_radius(shape, pixel_scale, radius_arcsec,<EOL>centre)<EOL>if invert: mask = np.invert(mask)<EOL>return cls(array=mask.astype('<STR_LIT:bool>'), pixel_scale=pixel_scale)<EOL>", "docstring": "Setup a mask where unmasked pixels are within a circle of an input arc second radius and centre.\n\n        Parameters\n        ----------\n        shape: (int, int)\n            The (y,x) shape of the mask in units of pixels.\n        pixel_scale: float\n            The arc-second to pixel conversion factor of each pixel.\n        radius_arcsec : float\n            The radius (in arc seconds) of the circle within which pixels unmasked.\n        centre: (float, float)\n            The centre of the circle used to mask pixels.", "id": "f5991:c0:m4"}
{"signature": "@property<EOL><INDENT>def border_pixels(self):<DEDENT>", "body": "return mask_util.border_pixels_from_mask(self).astype('<STR_LIT:int>')<EOL>", "docstring": "The indicies of the mask's border pixels, where a border pixel is any unmasked pixel on an\n        exterior edge (e.g. next to at least one pixel with a *True* value but not central pixels like those within \\\n        an annulus mask).", "id": "f5991:c0:m17"}
{"signature": "@classmethod<EOL><INDENT>def circular_annular(cls, shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, centre=(<NUM_LIT:0.>, <NUM_LIT:0.>),<EOL>invert=False):<DEDENT>", "body": "mask = mask_util.mask_circular_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec,<EOL>outer_radius_arcsec, centre)<EOL>if invert: mask = np.invert(mask)<EOL>return cls(array=mask.astype('<STR_LIT:bool>'), pixel_scale=pixel_scale)<EOL>", "docstring": "Setup a mask where unmasked pixels are within an annulus of input inner and outer arc second radii and \\\n         centre.\n\n        Parameters\n        ----------\n        shape : (int, int)\n            The (y,x) shape of the mask in units of pixels.\n        pixel_scale: float\n            The arc-second to pixel conversion factor of each pixel.\n        inner_radius_arcsec : float\n            The radius (in arc seconds) of the inner circle outside of which pixels are unmasked.\n        outer_radius_arcsec : float\n            The radius (in arc seconds) of the outer circle within which pixels are unmasked.\n        centre: (float, float)\n            The centre of the annulus used to mask pixels.", "id": "f5991:c0:m5"}
{"signature": "@property<EOL><INDENT>def zoom_region(self):<DEDENT>", "body": "<EOL>where = np.array(np.where(np.invert(self.astype('<STR_LIT:bool>'))))<EOL>y0, x0 = np.amin(where, axis=<NUM_LIT:1>)<EOL>y1, x1 = np.amax(where, axis=<NUM_LIT:1>)<EOL>return [y0, y1+<NUM_LIT:1>, x0, x1+<NUM_LIT:1>]<EOL>", "docstring": "The zoomed rectangular region corresponding to the square encompassing all unmasked values.\n\n        This is used to zoom in on the region of an image that is used in an analysis for visualization.", "id": "f5991:c0:m22"}
{"signature": "@array_util.Memoizer()<EOL><INDENT>def blurring_mask_for_psf_shape(self, psf_shape):<DEDENT>", "body": "if psf_shape[<NUM_LIT:0>] % <NUM_LIT:2> == <NUM_LIT:0> or psf_shape[<NUM_LIT:1>] % <NUM_LIT:2> == <NUM_LIT:0>:<EOL><INDENT>raise exc.MaskException(\"<STR_LIT>\")<EOL><DEDENT>blurring_mask = mask_util.mask_blurring_from_mask_and_psf_shape(self, psf_shape)<EOL>return Mask(blurring_mask, self.pixel_scale)<EOL>", "docstring": "Compute a blurring mask, which represents all masked pixels whose light will be blurred into unmasked \\\n        pixels via PSF convolution (see grid_stack.RegularGrid.blurring_grid_from_mask_and_psf_shape).\n\n        Parameters\n        ----------\n        psf_shape : (int, int)\n           The shape of the psf which defines the blurring region (e.g. the shape of the PSF)", "id": "f5991:c0:m15"}
{"signature": "@property<EOL><INDENT>def edge_pixels(self):<DEDENT>", "body": "return mask_util.edge_pixels_from_mask(self).astype('<STR_LIT:int>')<EOL>", "docstring": "The indicies of the mask's edge pixels, where an edge pixel is any unmasked pixel on its edge \\\n        (next to at least one pixel with a *True* value).", "id": "f5991:c0:m16"}
{"signature": "@classmethod<EOL><INDENT>def circular_anti_annular(cls, shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, outer_radius_2_arcsec,<EOL>centre=(<NUM_LIT:0.>, <NUM_LIT:0.>), invert=False):<DEDENT>", "body": "mask = mask_util.mask_circular_anti_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec,<EOL>outer_radius_arcsec,<EOL>outer_radius_2_arcsec, centre)<EOL>if invert: mask = np.invert(mask)<EOL>return cls(array=mask.astype('<STR_LIT:bool>'), pixel_scale=pixel_scale)<EOL>", "docstring": "Setup a mask where unmasked pixels are outside an annulus of input inner and outer arc second radii, but \\\n        within a second outer radius, and at a given centre.\n\n        This mask there has two distinct unmasked regions (an inner circle and outer annulus), with an inner annulus \\\n        of masked pixels.\n\n        Parameters\n        ----------\n        shape : (int, int)\n            The (y,x) shape of the mask in units of pixels.\n        pixel_scale: float\n            The arc-second to pixel conversion factor of each pixel.\n        inner_radius_arcsec : float\n            The radius (in arc seconds) of the inner circle inside of which pixels are unmasked.\n        outer_radius_arcsec : float\n            The radius (in arc seconds) of the outer circle within which pixels are masked and outside of which they \\\n            are unmasked.\n        outer_radius_2_arcsec : float\n            The radius (in arc seconds) of the second outer circle within which pixels are unmasked and outside of \\\n            which they masked.\n        centre: (float, float)\n            The centre of the anti-annulus used to mask pixels.", "id": "f5991:c0:m6"}
{"signature": "@decorator_util.jit()<EOL>def centres_from_shape_pixel_scales_and_origin(shape, pixel_scales, origin):", "body": "y_centre_arcsec = float(shape[<NUM_LIT:0>] - <NUM_LIT:1>) / <NUM_LIT:2> + (origin[<NUM_LIT:0>] / pixel_scales[<NUM_LIT:0>])<EOL>x_centre_arcsec = float(shape[<NUM_LIT:1>] - <NUM_LIT:1>) / <NUM_LIT:2> - (origin[<NUM_LIT:1>] / pixel_scales[<NUM_LIT:1>])<EOL>return (y_centre_arcsec, x_centre_arcsec)<EOL>", "docstring": "Determine the (y,x) arc-second central coordinates of an array from its shape, pixel-scales and origin.\n\n     The coordinate system is defined such that the positive y axis is up and positive x axis is right.\n\n    Parameters\n     ----------\n    shape : (int, int)\n        The (y,x) shape of the 2D array the arc-second centre is computed for.\n    pixel_scales : (float, float)\n        The (y,x) arc-second to pixel scales of the 2D array.\n    origin : (float, flloat)\n        The (y,x) origin of the 2D array, which the centre is shifted to.\n\n    Returns\n    --------\n    tuple (float, float)\n        The (y,x) arc-second central coordinates of the input array.\n\n    Examples\n    --------\n    centres_arcsec = centres_from_shape_pixel_scales_and_origin(shape=(5,5), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))", "id": "f5992:m0"}
{"signature": "@decorator_util.jit()<EOL>def grid_arcsec_1d_to_grid_pixel_centres_1d(grid_arcsec_1d, shape, pixel_scales, origin=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):", "body": "grid_pixels_1d = np.zeros((grid_arcsec_1d.shape[<NUM_LIT:0>], <NUM_LIT:2>))<EOL>centres_arcsec = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales, origin=origin)<EOL>for i in range(grid_arcsec_1d.shape[<NUM_LIT:0>]):<EOL><INDENT>grid_pixels_1d[i, <NUM_LIT:0>] = int((-grid_arcsec_1d[i, <NUM_LIT:0>] / pixel_scales[<NUM_LIT:0>]) + centres_arcsec[<NUM_LIT:0>] + <NUM_LIT:0.5>)<EOL>grid_pixels_1d[i, <NUM_LIT:1>] = int((grid_arcsec_1d[i, <NUM_LIT:1>] / pixel_scales[<NUM_LIT:1>]) + centres_arcsec[<NUM_LIT:1>] + <NUM_LIT:0.5>)<EOL><DEDENT>return grid_pixels_1d<EOL>", "docstring": "Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel values. Pixel coordinates are \\\n    returned as integers such that they map directly to the pixel they are contained within.\n\n    The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \\\n    higher y arc-second coordinate value and lowest x arc-second coordinate.\n\n    The arc-second coordinate grid is defined by the class attribute origin, and coordinates are shifted to this \\\n    origin before computing their 1D grid pixel indexes.\n\n    The input and output grids are both of shape (total_pixels, 2).\n\n    Parameters\n    ----------\n    grid_arcsec_1d: ndarray\n        The grid of (y,x) coordinates in arc seconds which is converted to pixel indexes.\n    shape : (int, int)\n        The (y,x) shape of the original 2D array the arc-second coordinates were computed on.\n    pixel_scales : (float, float)\n        The (y,x) arc-second to pixel scales of the original 2D array.\n    origin : (float, flloat)\n        The (y,x) origin of the grid, which the arc-second grid is shifted\n\n    Returns\n    --------\n    ndarray\n        A grid of (y,x) pixel indexes with dimensions (total_pixels, 2).\n\n    Examples\n    --------\n    grid_arcsec_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])\n    grid_pixels_1d = grid_arcsec_1d_to_grid_pixel_centres_1d(grid_arcsec_1d=grid_arcsec_1d, shape=(2,2),\n                                                           pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))", "id": "f5992:m6"}
{"signature": "@decorator_util.jit()<EOL>def grid_arcsec_1d_to_grid_pixel_indexes_1d(grid_arcsec_1d, shape, pixel_scales, origin=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):", "body": "grid_pixels_1d = grid_arcsec_1d_to_grid_pixel_centres_1d(grid_arcsec_1d=grid_arcsec_1d, shape=shape,<EOL>pixel_scales=pixel_scales, origin=origin)<EOL>grid_pixel_indexes_1d = np.zeros(grid_pixels_1d.shape[<NUM_LIT:0>])<EOL>for i in range(grid_pixels_1d.shape[<NUM_LIT:0>]):<EOL><INDENT>grid_pixel_indexes_1d[i] = int(grid_pixels_1d[i,<NUM_LIT:0>] * shape[<NUM_LIT:1>] + grid_pixels_1d[i,<NUM_LIT:1>])<EOL><DEDENT>return grid_pixel_indexes_1d<EOL>", "docstring": "Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel 1D indexes. Pixel coordinates are \\\n    returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then \\\n    downwards.\n\n    For example:\n\n    The pixel at the top-left, whose 2D index is [0,0], corresponds to 1D index 0.\n    The fifth pixel on the top row, whose 2D index is [0,5], corresponds to 1D index 4.\n    The first pixel on the second row, whose 2D index is [0,1], has 1D index 10 if a row has 10 pixels.\n\n    The arc-second coordinate grid is defined by the class attribute origin, and coordinates are shifted to this \\\n    origin before computing their 1D grid pixel indexes.\n\n    The input and output grids are both of shape (total_pixels, 2).\n\n    Parameters\n    ----------\n    grid_arcsec_1d: ndarray\n        The grid of (y,x) coordinates in arc seconds which is converted to 1D pixel indexes.\n    shape : (int, int)\n        The (y,x) shape of the original 2D array the arc-second coordinates were computed on.\n    pixel_scales : (float, float)\n        The (y,x) arc-second to pixel scales of the original 2D array.\n    origin : (float, flloat)\n        The (y,x) origin of the grid, which the arc-second grid is shifted.\n\n    Returns\n    --------\n    ndarray\n        A grid of 1d pixel indexes with dimensions (total_pixels, 2).\n\n    Examples\n    --------\n    grid_arcsec_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])\n    grid_pixels_1d = grid_arcsec_1d_to_grid_pixel_indexes_1d(grid_arcsec_1d=grid_arcsec_1d, shape=(2,2),\n                                                           pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))", "id": "f5992:m7"}
{"signature": "@decorator_util.jit()<EOL>def regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask, pixel_scales, origin=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):", "body": "grid_2d = regular_grid_2d_from_shape_pixel_scales_and_origin(mask.shape, pixel_scales, origin)<EOL>total_regular_pixels = mask_util.total_regular_pixels_from_mask(mask)<EOL>regular_grid_1d = np.zeros(shape=(total_regular_pixels, <NUM_LIT:2>))<EOL>pixel_count = <NUM_LIT:0><EOL>for y in range(mask.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(mask.shape[<NUM_LIT:1>]):<EOL><INDENT>if not mask[y, x]:<EOL><INDENT>regular_grid_1d[pixel_count, :] = grid_2d[y, x]<EOL>pixel_count += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return regular_grid_1d<EOL>", "docstring": "Compute the (y,x) arc second coordinates at the centre of every pixel of a 2D mask array of shape (rows, columns).\n\n    Coordinates are defined from the top-left corner, where the first unmasked pixel corresponds to index 0. The pixel \\\n    at the top-left of the array has negative x and y values in arc seconds.\n\n    The regular grid is returned on an array of shape (total_unmasked_pixels, 2). y coordinates are stored in the 0 \\\n    index of the second dimension, x coordinates in the 1 index.\n\n    Parameters\n     ----------\n    mask : ndarray\n        A 2D array of bools, where *False* values mean unmasked and are therefore included as part of the calculated \\\n        regular grid.\n    pixel_scales : (float, float)\n        The (y,x) arc-second to pixel scales of the 2D mask array.\n    origin : (float, flloat)\n        The (y,x) origin of the 2D array, which the regular grid is shifted around.\n\n    Returns\n    --------\n    ndarray\n        A regular grid of (y,x) arc-second coordinates at the centre of every pixel unmasked pixel on the 2D mask \\\n        array. The regular grid array has dimensions (total_unmasked_pixels, 2).\n\n    Examples\n    --------\n    mask = np.array([[True, False, True],\n                     [False, False, False]\n                     [True, False, True]])\n    regular_grid_1d = regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(0.5, 0.5),\n                                                                            origin=(0.0, 0.0))", "id": "f5992:m3"}
{"signature": "@decorator_util.jit()<EOL>def grid_arcsec_2d_to_grid_pixel_centres_2d(grid_arcsec_2d, shape, pixel_scales, origin=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):", "body": "grid_pixels_2d = np.zeros((grid_arcsec_2d.shape[<NUM_LIT:0>], grid_arcsec_2d.shape[<NUM_LIT:1>], <NUM_LIT:2>))<EOL>centres_arcsec = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales, origin=origin)<EOL>for y in range(grid_arcsec_2d.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(grid_arcsec_2d.shape[<NUM_LIT:1>]):<EOL><INDENT>grid_pixels_2d[y, x, <NUM_LIT:0>] = int((-grid_arcsec_2d[y, x, <NUM_LIT:0>] / pixel_scales[<NUM_LIT:0>]) + centres_arcsec[<NUM_LIT:0>] + <NUM_LIT:0.5>)<EOL>grid_pixels_2d[y, x, <NUM_LIT:1>] = int((grid_arcsec_2d[y, x, <NUM_LIT:1>] / pixel_scales[<NUM_LIT:1>]) + centres_arcsec[<NUM_LIT:1>] + <NUM_LIT:0.5>)<EOL><DEDENT><DEDENT>return grid_pixels_2d<EOL>", "docstring": "Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel values. Pixel coordinates are \\\n    returned as integers such that they map directly to the pixel they are contained within.\n\n    The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \\\n    higher y arc-second coordinate value and lowest x arc-second coordinate.\n\n    The arc-second coordinate grid is defined by the class attribute origin, and coordinates are shifted to this \\\n    origin before computing their 1D grid pixel indexes.\n\n    The input and output grids are both of shape (total_pixels, 2).\n\n    Parameters\n    ----------\n    grid_arcsec_1d: ndarray\n        The grid of (y,x) coordinates in arc seconds which is converted to pixel indexes.\n    shape : (int, int)\n        The (y,x) shape of the original 2D array the arc-second coordinates were computed on.\n    pixel_scales : (float, float)\n        The (y,x) arc-second to pixel scales of the original 2D array.\n    origin : (float, flloat)\n        The (y,x) origin of the grid, which the arc-second grid is shifted\n\n    Returns\n    --------\n    ndarray\n        A grid of (y,x) pixel indexes with dimensions (total_pixels, 2).\n\n    Examples\n    --------\n    grid_arcsec_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])\n    grid_pixels_1d = grid_arcsec_1d_to_grid_pixel_centres_1d(grid_arcsec_1d=grid_arcsec_1d, shape=(2,2),\n                                                           pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))", "id": "f5992:m9"}
{"signature": "@decorator_util.jit()<EOL>def grid_arcsec_1d_to_grid_pixels_1d(grid_arcsec_1d, shape, pixel_scales, origin=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):", "body": "grid_pixels_1d = np.zeros((grid_arcsec_1d.shape[<NUM_LIT:0>], <NUM_LIT:2>))<EOL>centres_arcsec = centres_from_shape_pixel_scales_and_origin(shape=shape, pixel_scales=pixel_scales, origin=origin)<EOL>for i in range(grid_arcsec_1d.shape[<NUM_LIT:0>]):<EOL><INDENT>grid_pixels_1d[i, <NUM_LIT:0>] = (-grid_arcsec_1d[i, <NUM_LIT:0>] / pixel_scales[<NUM_LIT:0>]) + centres_arcsec[<NUM_LIT:0>] + <NUM_LIT:0.5><EOL>grid_pixels_1d[i, <NUM_LIT:1>] = (grid_arcsec_1d[i, <NUM_LIT:1>] / pixel_scales[<NUM_LIT:1>]) + centres_arcsec[<NUM_LIT:1>] + <NUM_LIT:0.5><EOL><DEDENT>return grid_pixels_1d<EOL>", "docstring": "Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel coordinate values. Pixel coordinates \\\n    are returned as floats such that they include the decimal offset from each pixel's top-left corner relative to \\\n    the input arc-second coordinate.\n\n    The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to \\\n    the highest y arc-second coordinate and lowest x arc-second coordinate on the gird.\n\n    The arc-second grid is defined by an origin and coordinates are shifted to this origin before computing their \\\n    1D grid pixel coordinate values.\n\n    The input and output grids are both of shape (total_pixels, 2).\n\n    Parameters\n    ----------\n    grid_arcsec_1d: ndarray\n        The grid of (y,x) coordinates in arc seconds which is converted to pixel value coordinates.\n    shape : (int, int)\n        The (y,x) shape of the original 2D array the arc-second coordinates were computed on.\n    pixel_scales : (float, float)\n        The (y,x) arc-second to pixel scales of the original 2D array.\n    origin : (float, flloat)\n        The (y,x) origin of the grid, which the arc-second grid is shifted to.\n\n    Returns\n    --------\n    ndarray\n        A grid of (y,x) pixel-value coordinates with dimensions (total_pixels, 2).\n\n    Examples\n    --------\n    grid_arcsec_1d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])\n    grid_pixels_1d = grid_arcsec_1d_to_grid_pixels_1d(grid_arcsec_1d=grid_arcsec_1d, shape=(2,2),\n                                                           pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))", "id": "f5992:m5"}
{"signature": "@decorator_util.jit()<EOL>def extracted_array_2d_from_array_2d_and_coordinates(array_2d, y0, y1, x0, x1):", "body": "new_shape = (y1-y0, x1-x0)<EOL>resized_array = np.zeros(shape=new_shape)<EOL>for y_resized, y in enumerate(range(y0, y1)):<EOL><INDENT>for x_resized, x in enumerate(range(x0, x1)):<EOL><INDENT>resized_array[y_resized, x_resized] = array_2d[y, x]<EOL><DEDENT><DEDENT>return resized_array<EOL>", "docstring": "Resize an array to a new size by extracting a sub-set of the array.\n\n    The extracted input coordinates use NumPy convention, such that the upper values should be specified as +1 the \\\n    dimensions of the extracted array.\n\n    In the example below, an array of size (5,5) is extracted using the coordinates y0=1, y1=4, x0=1, x1=4. This\n    extracts an array of dimensions (2,2) and is equivalent to array_2d[1:4, 1:4]\n\n    Parameters\n    ----------\n    array_2d : ndarray\n        The 2D array that is an array is extracted from.\n    y0 : int\n        The lower row number (e.g. the higher y-coodinate) of the array that is extracted for the resize.\n    y1 : int\n        The upper row number (e.g. the lower y-coodinate) of the array that is extracted for the resize.\n    x0 : int\n        The lower column number (e.g. the lower x-coodinate) of the array that is extracted for the resize.\n    x1 : int\n        The upper column number (e.g. the higher x-coodinate) of the array that is extracted for the resize.\n\n    Returns\n    -------\n    ndarray\n        The extracted 2D array from the input 2D array.\n\n    Examples\n    --------\n    array_2d = np.ones((5,5))\n    extracted_array = extract_array_2d(array_2d=array_2d, y0=1, y1=4, x0=1, x1=4)", "id": "f5993:m0"}
{"signature": "@decorator_util.jit()<EOL>def resized_array_2d_from_array_2d_and_resized_shape(array_2d, resized_shape, origin=(-<NUM_LIT:1>, -<NUM_LIT:1>), pad_value=<NUM_LIT:0.0>):", "body": "y_is_even = int(array_2d.shape[<NUM_LIT:0>]) % <NUM_LIT:2> == <NUM_LIT:0><EOL>x_is_even = int(array_2d.shape[<NUM_LIT:1>]) % <NUM_LIT:2> == <NUM_LIT:0><EOL>if origin is (-<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>if y_is_even:<EOL><INDENT>y_centre = int(array_2d.shape[<NUM_LIT:0>] / <NUM_LIT:2>)<EOL><DEDENT>elif not y_is_even:<EOL><INDENT>y_centre = int(array_2d.shape[<NUM_LIT:0>] / <NUM_LIT:2>)<EOL><DEDENT>if x_is_even:<EOL><INDENT>x_centre = int(array_2d.shape[<NUM_LIT:1>] / <NUM_LIT:2>)<EOL><DEDENT>elif not x_is_even:<EOL><INDENT>x_centre = int(array_2d.shape[<NUM_LIT:1>] / <NUM_LIT:2>)<EOL><DEDENT>origin = (y_centre, x_centre)<EOL><DEDENT>resized_array = np.zeros(shape=resized_shape)<EOL>if y_is_even:<EOL><INDENT>y_min = origin[<NUM_LIT:0>] - int(resized_shape[<NUM_LIT:0>] / <NUM_LIT:2>)<EOL>y_max = origin[<NUM_LIT:0>] + int((resized_shape[<NUM_LIT:0>] / <NUM_LIT:2>)) + <NUM_LIT:1><EOL><DEDENT>elif not y_is_even:<EOL><INDENT>y_min = origin[<NUM_LIT:0>] - int(resized_shape[<NUM_LIT:0>] / <NUM_LIT:2>)<EOL>y_max = origin[<NUM_LIT:0>] + int((resized_shape[<NUM_LIT:0>] / <NUM_LIT:2>)) + <NUM_LIT:1><EOL><DEDENT>if x_is_even:<EOL><INDENT>x_min = origin[<NUM_LIT:1>] - int(resized_shape[<NUM_LIT:1>] / <NUM_LIT:2>)<EOL>x_max = origin[<NUM_LIT:1>] + int((resized_shape[<NUM_LIT:1>] / <NUM_LIT:2>)) + <NUM_LIT:1><EOL><DEDENT>elif not x_is_even:<EOL><INDENT>x_min = origin[<NUM_LIT:1>] - int(resized_shape[<NUM_LIT:1>] / <NUM_LIT:2>)<EOL>x_max = origin[<NUM_LIT:1>] + int((resized_shape[<NUM_LIT:1>] / <NUM_LIT:2>)) + <NUM_LIT:1><EOL><DEDENT>for y_resized, y in enumerate(range(y_min, y_max)):<EOL><INDENT>for x_resized, x in enumerate(range(x_min, x_max)):<EOL><INDENT>if y >= <NUM_LIT:0> and y < array_2d.shape[<NUM_LIT:0>] and x >= <NUM_LIT:0> and x < array_2d.shape[<NUM_LIT:1>]:<EOL><INDENT>if y_resized >= <NUM_LIT:0> and y_resized < resized_shape[<NUM_LIT:0>] and x_resized >= <NUM_LIT:0> and x_resized < resized_shape[<NUM_LIT:1>]:<EOL><INDENT>resized_array[y_resized, x_resized] = array_2d[y, x]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if y_resized >= <NUM_LIT:0> and y_resized < resized_shape[<NUM_LIT:0>] and x_resized >= <NUM_LIT:0> and x_resized < resized_shape[<NUM_LIT:1>]:<EOL><INDENT>resized_array[y_resized, x_resized] = pad_value<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return resized_array<EOL>", "docstring": "Resize an array to a new size around a central pixel.\n\n    If the origin (e.g. the central pixel) of the resized array is not specified, the central pixel of the array is \\\n    calculated automatically. For example, a (5,5) array's central pixel is (2,2). For even dimensions the central \\\n    pixel is assumed to be the lower indexed value, e.g. a (6,4) array's central pixel is calculated as (2,1).\n\n    The default origin is (-1, -1) because numba requires that the function input is the same type throughout the \\\n    function, thus a default 'None' value cannot be used.\n\n    Parameters\n    ----------\n    array_2d : ndarray\n        The 2D array that is resized.\n    resized_shape : (int, int)\n        The (y,x) new pixel dimension of the trimmed array.\n    origin : (int, int)\n        The oigin of the resized array, e.g. the central pixel around which the array is extracted.\n\n    Returns\n    -------\n    ndarray\n        The resized 2D array from the input 2D array.\n\n    Examples\n    --------\n    array_2d = np.ones((5,5))\n    resize_array = resize_array_2d(array_2d=array_2d, new_shape=(2,2), origin=(2, 2))", "id": "f5993:m1"}
{"signature": "@decorator_util.jit()<EOL>def bin_up_array_2d_using_mean(array_2d, bin_up_factor):", "body": "padded_array_2d = pad_2d_array_for_binning_up_with_bin_up_factor(array_2d=array_2d, bin_up_factor=bin_up_factor)<EOL>binned_array_2d = np.zeros(shape=(padded_array_2d.shape[<NUM_LIT:0>] // bin_up_factor,<EOL>padded_array_2d.shape[<NUM_LIT:1>] // bin_up_factor))<EOL>for y in range(binned_array_2d.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(binned_array_2d.shape[<NUM_LIT:1>]):<EOL><INDENT>value = <NUM_LIT:0.0><EOL>for y1 in range(bin_up_factor):<EOL><INDENT>for x1 in range(bin_up_factor):<EOL><INDENT>padded_y = y*bin_up_factor + y1<EOL>padded_x = x*bin_up_factor + x1<EOL>value += padded_array_2d[padded_y, padded_x]<EOL><DEDENT><DEDENT>binned_array_2d[y,x] = value / (bin_up_factor ** <NUM_LIT>)<EOL><DEDENT><DEDENT>return binned_array_2d<EOL>", "docstring": "Bin up an array to coarser resolution, by binning up groups of pixels and using their mean value to determine \\\n     the value of the new pixel.\n\n    If an array of shape (8,8) is input and the bin up size is 2, this would return a new array of size (4,4) where \\\n    every pixel was the mean of each collection of 2x2 pixels on the (8,8) array.\n\n    If binning up the array leads to an edge being cut (e.g. a (9,9) array binned up by 2), an array is first \\\n    extracted around the centre of that array.\n\n\n    Parameters\n    ----------\n    array_2d : ndarray\n        The 2D array that is resized.\n    new_shape : (int, int)\n        The (y,x) new pixel dimension of the trimmed array.\n    origin : (int, int)\n        The oigin of the resized array, e.g. the central pixel around which the array is extracted.\n\n    Returns\n    -------\n    ndarray\n        The resized 2D array from the input 2D array.\n\n    Examples\n    --------\n    array_2d = np.ones((5,5))\n    resize_array = resize_array_2d(array_2d=array_2d, new_shape=(2,2), origin=(2, 2))", "id": "f5993:m4"}
{"signature": "def __call__(self, func):", "body": "if self.arg_names is not None:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>self.arg_names = inspect.getfullargspec(func).args<EOL>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>key = \"<STR_LIT:U+002CU+0020>\".join(<EOL>[\"<STR_LIT>\".format(arg_name, arg) for arg_name, arg in<EOL>list(zip(self.arg_names, args)) + [(k, v) for k, v in kwargs.items()]])<EOL>if key not in self.results:<EOL><INDENT>self.calls += <NUM_LIT:1><EOL><DEDENT>self.results[key] = func(*args, **kwargs)<EOL>return self.results[key]<EOL><DEDENT>return wrapper<EOL>", "docstring": "Memoize decorator. Any time a function is called that a memoizer has been attached to its results are stored in\nthe results dictionary or retrieved from the dictionary if the function has already been called with those\narguments.\n\nNote that the same memoizer persists over all instances of a class. Any state for a given instance that is not\ngiven in the representation of that instance will be ignored. That is, it is possible that the memoizer will\ngive incorrect results if instance state does not affect __str__ but does affect the value returned by the\nmemoized method.\n\nParameters\n----------\nfunc: function\n    A function for which results should be memoized\n\nReturns\n-------\ndecorated : function\n    A function that memoizes results", "id": "f5993:c0:m1"}
{"signature": "def __init__(self):", "body": "self.results = {}<EOL>self.calls = <NUM_LIT:0><EOL>self.arg_names = None<EOL>", "docstring": "Class to store the results of a function given a set of inputs.", "id": "f5993:c0:m0"}
{"signature": "@decorator_util.jit()<EOL>def bin_up_array_2d_using_quadrature(array_2d, bin_up_factor):", "body": "padded_array_2d = pad_2d_array_for_binning_up_with_bin_up_factor(array_2d=array_2d, bin_up_factor=bin_up_factor)<EOL>binned_array_2d = np.zeros(shape=(padded_array_2d.shape[<NUM_LIT:0>] // bin_up_factor,<EOL>padded_array_2d.shape[<NUM_LIT:1>] // bin_up_factor))<EOL>for y in range(binned_array_2d.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(binned_array_2d.shape[<NUM_LIT:1>]):<EOL><INDENT>value = <NUM_LIT:0.0><EOL>for y1 in range(bin_up_factor):<EOL><INDENT>for x1 in range(bin_up_factor):<EOL><INDENT>padded_y = y*bin_up_factor + y1<EOL>padded_x = x*bin_up_factor + x1<EOL>value += padded_array_2d[padded_y, padded_x] ** <NUM_LIT><EOL><DEDENT><DEDENT>binned_array_2d[y,x] = np.sqrt(value) / (bin_up_factor ** <NUM_LIT>)<EOL><DEDENT><DEDENT>return binned_array_2d<EOL>", "docstring": "Bin up an array to coarser resolution, by binning up groups of pixels and using their quadrature value to determine \\\n     the value of the new pixel.\n\n    If an array of shape (8,8) is input and the bin up size is 2, this would return a new array of size (4,4) where \\\n    every pixel was the quadrature of each collection of 2x2 pixels on the (8,8) array.\n\n    If binning up the array leads to an edge being cut (e.g. a (9,9) array binned up by 2), an array is first \\\n    extracted around the centre of that array.\n\n\n    Parameters\n    ----------\n    array_2d : ndarray\n        The 2D array that is resized.\n    new_shape : (int, int)\n        The (y,x) new pixel dimension of the trimmed array.\n    origin : (int, int)\n        The oigin of the resized array, e.g. the central pixel around which the array is extracted.\n\n    Returns\n    -------\n    ndarray\n        The resized 2D array from the input 2D array.\n\n    Examples\n    --------\n    array_2d = np.ones((5,5))\n    resize_array = resize_array_2d(array_2d=array_2d, new_shape=(2,2), origin=(2, 2))", "id": "f5993:m5"}
{"signature": "@decorator_util.jit()<EOL>def masked_sub_grid_1d_index_to_2d_sub_pixel_index_from_mask(mask, sub_grid_size):", "body": "total_sub_pixels = total_sub_pixels_from_mask_and_sub_grid_size(mask=mask, sub_grid_size=sub_grid_size)<EOL>sub_grid_to_sub_pixel = np.zeros(shape=(total_sub_pixels, <NUM_LIT:2>))<EOL>sub_pixel_count = <NUM_LIT:0><EOL>for y in range(mask.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(mask.shape[<NUM_LIT:1>]):<EOL><INDENT>if not mask[y, x]:<EOL><INDENT>for y1 in range(sub_grid_size):<EOL><INDENT>for x1 in range(sub_grid_size):<EOL><INDENT>sub_grid_to_sub_pixel[sub_pixel_count, :] = (y*sub_grid_size)+y1, (x*sub_grid_size)+x1<EOL>sub_pixel_count += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return sub_grid_to_sub_pixel<EOL>", "docstring": "Compute a 1D array that maps every unmasked pixel to its corresponding 2d pixel using its (y,x) pixel indexes.\n\n    For howtolens if pixel [2,5] corresponds to the second pixel on the 1D array, grid_to_pixel[1] = [2,5]", "id": "f5994:m12"}
{"signature": "@decorator_util.jit()<EOL>def total_sparse_pixels_from_mask(mask, unmasked_sparse_grid_pixel_centres):", "body": "total_sparse_pixels = <NUM_LIT:0><EOL>for unmasked_sparse_pixel_index in range(unmasked_sparse_grid_pixel_centres.shape[<NUM_LIT:0>]):<EOL><INDENT>y = unmasked_sparse_grid_pixel_centres[unmasked_sparse_pixel_index, <NUM_LIT:0>]<EOL>x = unmasked_sparse_grid_pixel_centres[unmasked_sparse_pixel_index, <NUM_LIT:1>]<EOL>if not mask[y,x]:<EOL><INDENT>total_sparse_pixels += <NUM_LIT:1><EOL><DEDENT><DEDENT>return total_sparse_pixels<EOL>", "docstring": "Given the full (i.e. without removing pixels which are outside the regular-masks) pixelization grid's pixel centers\n    and the regular-masks, compute the total number of pixels which are within the regular-masks and thus used by the\n    pixelization grid.\n\n    Parameters\n    -----------\n    mask : ccd.masks.Mask\n        The regular-masks within which pixelization pixels must be inside\n    unmasked_sparse_grid_pixel_centres : ndarray\n        The centres of the unmasked pixelization grid pixels.", "id": "f5994:m3"}
{"signature": "@decorator_util.jit()<EOL>def border_pixels_from_mask(mask):", "body": "edge_pixels = edge_pixels_from_mask(mask)<EOL>masked_grid_index_to_pixel = masked_grid_1d_index_to_2d_pixel_index_from_mask(mask)<EOL>border_pixel_total = total_border_pixels_from_mask_and_edge_pixels(mask, edge_pixels, masked_grid_index_to_pixel)<EOL>border_pixels = np.zeros(border_pixel_total)<EOL>border_pixel_index = <NUM_LIT:0><EOL>for edge_pixel_index in range(edge_pixels.shape[<NUM_LIT:0>]):<EOL><INDENT>if check_if_border_pixel(mask, edge_pixels[edge_pixel_index], masked_grid_index_to_pixel):<EOL><INDENT>border_pixels[border_pixel_index] = edge_pixels[edge_pixel_index]<EOL>border_pixel_index += <NUM_LIT:1><EOL><DEDENT><DEDENT>return border_pixels<EOL>", "docstring": "Compute a 1D array listing all borders pixel indexes in the masks. A borders pixel is a pixel which:\n\n     1) is not fully surrounding by False masks values.\n     2) Can reach the edge of the array without hitting a masked pixel in one of four directions (upwards, downwards,\n     left, right).\n\n     The borders pixels are thus pixels which are on the exterior edge of the masks. For example, the inner ring of edge \\\n     pixels in an annular masks are edge pixels but not borders pixels.", "id": "f5994:m17"}
{"signature": "@decorator_util.jit()<EOL>def mask_circular_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec,<EOL>centre=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):", "body": "mask = np.full(shape, True)<EOL>centres_arcsec = mask_centres_from_shape_pixel_scale_and_centre(shape=mask.shape, pixel_scale=pixel_scale, centre=centre)<EOL>for y in range(mask.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(mask.shape[<NUM_LIT:1>]):<EOL><INDENT>y_arcsec = (y - centres_arcsec[<NUM_LIT:0>]) * pixel_scale<EOL>x_arcsec = (x - centres_arcsec[<NUM_LIT:1>]) * pixel_scale<EOL>r_arcsec = np.sqrt(x_arcsec ** <NUM_LIT:2> + y_arcsec ** <NUM_LIT:2>)<EOL>if outer_radius_arcsec >= r_arcsec >= inner_radius_arcsec:<EOL><INDENT>mask[y, x] = False<EOL><DEDENT><DEDENT><DEDENT>return mask<EOL>", "docstring": "Compute an annular masks from an input inner and outer masks radius and regular shape.", "id": "f5994:m5"}
{"signature": "@decorator_util.jit()<EOL>def mask_circular_from_shape_pixel_scale_and_radius(shape, pixel_scale, radius_arcsec, centre=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):", "body": "mask = np.full(shape, True)<EOL>centres_arcsec = mask_centres_from_shape_pixel_scale_and_centre(shape=mask.shape, pixel_scale=pixel_scale, centre=centre)<EOL>for y in range(mask.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(mask.shape[<NUM_LIT:1>]):<EOL><INDENT>y_arcsec = (y - centres_arcsec[<NUM_LIT:0>]) * pixel_scale<EOL>x_arcsec = (x - centres_arcsec[<NUM_LIT:1>]) * pixel_scale<EOL>r_arcsec = np.sqrt(x_arcsec ** <NUM_LIT:2> + y_arcsec ** <NUM_LIT:2>)<EOL>if r_arcsec <= radius_arcsec:<EOL><INDENT>mask[y, x] = False<EOL><DEDENT><DEDENT><DEDENT>return mask<EOL>", "docstring": "Compute a circular masks from an input masks radius and regular shape.", "id": "f5994:m4"}
{"signature": "@decorator_util.jit()<EOL>def total_border_pixels_from_mask_and_edge_pixels(mask, edge_pixels, masked_grid_index_to_pixel):", "body": "border_pixel_total = <NUM_LIT:0><EOL>for i in range(edge_pixels.shape[<NUM_LIT:0>]):<EOL><INDENT>if check_if_border_pixel(mask, edge_pixels[i], masked_grid_index_to_pixel):<EOL><INDENT>border_pixel_total += <NUM_LIT:1><EOL><DEDENT><DEDENT>return border_pixel_total<EOL>", "docstring": "Compute the total number of borders-pixels in a masks.", "id": "f5994:m16"}
{"signature": "@decorator_util.jit()<EOL>def total_regular_pixels_from_mask(mask):", "body": "total_regular_pixels = <NUM_LIT:0><EOL>for y in range(mask.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(mask.shape[<NUM_LIT:1>]):<EOL><INDENT>if not mask[y, x]:<EOL><INDENT>total_regular_pixels += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return total_regular_pixels<EOL>", "docstring": "Compute the total number of unmasked regular pixels in a masks.", "id": "f5994:m1"}
{"signature": "@decorator_util.jit()<EOL>def edge_pixels_from_mask(mask):", "body": "edge_pixel_total = total_edge_pixels_from_mask(mask)<EOL>edge_pixels = np.zeros(edge_pixel_total)<EOL>edge_index = <NUM_LIT:0><EOL>regular_index = <NUM_LIT:0><EOL>for y in range(mask.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(mask.shape[<NUM_LIT:1>]):<EOL><INDENT>if not mask[y, x]:<EOL><INDENT>if mask[y + <NUM_LIT:1>, x] or mask[y - <NUM_LIT:1>, x] or mask[y, x + <NUM_LIT:1>] or mask[y, x - <NUM_LIT:1>] ormask[y + <NUM_LIT:1>, x + <NUM_LIT:1>] or mask[y + <NUM_LIT:1>, x - <NUM_LIT:1>] or mask[y - <NUM_LIT:1>, x + <NUM_LIT:1>] or mask[y - <NUM_LIT:1>, x - <NUM_LIT:1>]:<EOL><INDENT>edge_pixels[edge_index] = regular_index<EOL>edge_index += <NUM_LIT:1><EOL><DEDENT>regular_index += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return edge_pixels<EOL>", "docstring": "Compute a 1D array listing all edge pixel indexes in the masks. An edge pixel is a pixel which is not fully \\\n    surrounding by False masks values i.e. it is on an edge.", "id": "f5994:m14"}
{"signature": "@decorator_util.jit()<EOL>def map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape(array_1d, shape):", "body": "array_2d = np.zeros(shape)<EOL>index = <NUM_LIT:0><EOL>for y in range(shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(shape[<NUM_LIT:1>]):<EOL><INDENT>array_2d[y, x] = array_1d[index]<EOL>index += <NUM_LIT:1><EOL><DEDENT><DEDENT>return array_2d<EOL>", "docstring": "For a 1D array that was flattened from a 2D array of shape (rows, columns), map its values back to the \\\n    original 2D array.\n\n    The pixel coordinate origin is at the top left corner of the 2D array and goes right-wards and downwards, such\n    that for an array of shape (3,3):\n\n    - pixel 0 of the 1D array will correspond to index [0,0] of the 2D array.\n    - pixel 1 of the 1D array will correspond to index [0,1] of the 2D array.\n    - pixel 4 of the 1D array will correspond to index [1,0] of the 2D array.\n\n    Parameters\n     ----------\n    array_1d : ndarray\n        The 1D array of values which are mapped to a 2D array.\n    shape : (int, int)\n        The shape of the 2D array which the pixels are defined on.\n\n    Returns\n    --------\n    ndarray\n        A 2D array of values mapped from the 1D array with dimensions (shape).\n\n    Examples\n    --------\n    one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]])\n\n    array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0])\n\n    array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3),\n                                                                                  one_to_two=one_to_two)", "id": "f5995:m5"}
{"signature": "@decorator_util.jit()<EOL>def sparse_grid_from_unmasked_sparse_grid(unmasked_sparse_grid, sparse_to_unmasked_sparse):", "body": "total_pix_pixels = sparse_to_unmasked_sparse.shape[<NUM_LIT:0>]<EOL>pix_grid = np.zeros((total_pix_pixels, <NUM_LIT:2>))<EOL>for pixel_index in range(total_pix_pixels):<EOL><INDENT>pix_grid[pixel_index, <NUM_LIT:0>] = unmasked_sparse_grid[sparse_to_unmasked_sparse[pixel_index], <NUM_LIT:0>]<EOL>pix_grid[pixel_index, <NUM_LIT:1>] = unmasked_sparse_grid[sparse_to_unmasked_sparse[pixel_index], <NUM_LIT:1>]<EOL><DEDENT>return pix_grid<EOL>", "docstring": "Use the central arc-second coordinate of every unmasked pixelization grid's pixels and mapping between each\n    pixelization pixel and unmasked pixelization pixel to compute the central arc-second coordinate of every masked\n    pixelization grid pixel.\n\n    Parameters\n    -----------\n    unmasked_sparse_grid : ndarray\n        The (y,x) arc-second centre of every unmasked pixelization grid pixel.\n    sparse_to_unmasked_sparse : ndarray\n        The index mapping between every pixelization pixel and masked pixelization pixel.", "id": "f5995:m9"}
{"signature": "@decorator_util.jit()<EOL>def map_2d_indexes_to_1d_indexes_for_shape(indexes_2d, shape):", "body": "indexes_1d = np.zeros(indexes_2d.shape[<NUM_LIT:0>])<EOL>for i in range(indexes_2d.shape[<NUM_LIT:0>]):<EOL><INDENT>indexes_1d[i] = int((indexes_2d[i, <NUM_LIT:0>]) * shape[<NUM_LIT:1>] + indexes_2d[i, <NUM_LIT:1>])<EOL><DEDENT>return indexes_1d<EOL>", "docstring": "For pixels on a 2D array of shape (rows, colums), map an array of 2D pixel indexes to 1D pixel indexes.\n\n    Indexing is defined from the top-left corner rightwards and downwards, whereby the top-left pixel on the 2D array\n    corresponds to index 0, the pixel to its right pixel 1, and so on.\n\n    For a 2D array of shape (3,3), 2D pixel indexes are converted as follows:\n\n    - 2D Pixel index [0,0] maps -> 1D pixel index 0.\n    - 2D Pixel index [0,1] maps -> 2D pixel index 1.\n    - 2D Pixel index [1,0] maps -> 2D pixel index 4.\n    - 2D Pixel index [2,2] maps -> 2D pixel index 8.\n\n    Parameters\n     ----------\n    indexes_2d : ndarray\n        The 2D pixel indexes which are mapped to 1D indexes.\n    shape : (int, int)\n        The shape of the 2D array which the pixels are defined on.\n\n    Returns\n    --------\n    ndarray\n        An array of 1d pixel indexes with dimensions (total_indexes).\n\n    Examples\n    --------\n    indexes_2d = np.array([[0,0], [1,0], [2,0], [2,2]])\n    indexes_1d = map_1d_indexes_to_1d_indexes_for_shape(indexes_2d=indexes_2d, shape=(3,3))", "id": "f5995:m1"}
{"signature": "@decorator_util.jit()<EOL>def map_2d_array_to_masked_1d_array_from_array_2d_and_mask(mask, array_2d):", "body": "total_image_pixels = mask_util.total_regular_pixels_from_mask(mask)<EOL>array_1d = np.zeros(shape=total_image_pixels)<EOL>index = <NUM_LIT:0><EOL>for y in range(mask.shape[<NUM_LIT:0>]):<EOL><INDENT>for x in range(mask.shape[<NUM_LIT:1>]):<EOL><INDENT>if not mask[y, x]:<EOL><INDENT>array_1d[index] = array_2d[y, x]<EOL>index += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return array_1d<EOL>", "docstring": "For a 2D array and mask, map the values of all unmasked pixels to a 1D array.\n\n    The pixel coordinate origin is at the top left corner of the 2D array and goes right-wards and downwards, such\n    that for an array of shape (3,3) where all pixels are unmasked:\n\n    - pixel [0,0] of the 2D array will correspond to index 0 of the 1D array.\n    - pixel [0,1] of the 2D array will correspond to index 1 of the 1D array.\n    - pixel [1,0] of the 2D array will correspond to index 4 of the 1D array.\n\n    Parameters\n     ----------\n    mask : ndarray\n        A 2D array of bools, where *False* values mean unmasked and are included in the mapping.\n    array_2d : ndarray\n        The 2D array of values which are mapped to a 1D array.\n\n    Returns\n    --------\n    ndarray\n        A 1D array of values mapped from the 2D array with dimensions (total_unmasked_pixels).\n\n    Examples\n    --------\n    mask = np.array([[True, False, True],\n                     [False, False, False]\n                     [True, False, True]])\n\n    array_2d = np.array([[1.0, 2.0, 3.0],\n                          [4.0, 5.0, 6.0],\n                          [7.0, 8.0, 9.0]])\n\n    array_1d = map_2d_array_to_masked_1d_array_from_array_2d_and_mask(mask=mask, array_2d=array_2d)", "id": "f5995:m3"}
{"signature": "@decorator_util.jit()<EOL>def unmasked_sparse_to_sparse_from_mask_and_pixel_centres(mask, unmasked_sparse_grid_pixel_centres,<EOL>total_sparse_pixels):", "body": "total_unmasked_sparse_pixels = unmasked_sparse_grid_pixel_centres.shape[<NUM_LIT:0>]<EOL>unmasked_sparse_to_sparse = np.zeros(total_unmasked_sparse_pixels)<EOL>pixel_index = <NUM_LIT:0><EOL>for unmasked_sparse_pixel_index in range(total_unmasked_sparse_pixels):<EOL><INDENT>y = unmasked_sparse_grid_pixel_centres[unmasked_sparse_pixel_index, <NUM_LIT:0>]<EOL>x = unmasked_sparse_grid_pixel_centres[unmasked_sparse_pixel_index, <NUM_LIT:1>]<EOL>unmasked_sparse_to_sparse[unmasked_sparse_pixel_index] = pixel_index<EOL>if not mask[y, x]:<EOL><INDENT>if pixel_index < total_sparse_pixels - <NUM_LIT:1>:<EOL><INDENT>pixel_index += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return unmasked_sparse_to_sparse<EOL>", "docstring": "Determine the mapping between every pixelization-grid pixel and masked pixelization-grid pixel. This is\n    performed by checking whether each pixelization-grid pixel is within the regular-masks, and mapping the indexes.\n\n    Pixelization pixels are paired with the next masked pixel index. This may mean that a pixel is not paired with a\n    pixel near it, if the next pixel is on the next row of the grid. This is not a problem, as it is only\n    unmasked pixels that are referened when computing image_to_pix, which is what this array is used for.\n\n    Parameters\n    -----------\n    total_sparse_pixels : int\n        The total number of pixels in the pixelization grid which fall within the regular-masks.\n    mask : ccd.masks.Mask\n        The regular-masks within which pixelization pixels must be inside\n    unmasked_sparse_grid_pixel_centres : ndarray\n        The centres of the unmasked pixelization grid pixels.", "id": "f5995:m7"}
{"signature": "def resized_scaled_array_from_array(self, new_shape, new_centre_pixels=None, new_centre_arcsec=None):", "body": "if new_centre_pixels is None and new_centre_arcsec is None:<EOL><INDENT>new_centre = (-<NUM_LIT:1>, -<NUM_LIT:1>)  <EOL><DEDENT>elif new_centre_pixels is not None and new_centre_arcsec is None:<EOL><INDENT>new_centre = new_centre_pixels<EOL><DEDENT>elif new_centre_pixels is None and new_centre_arcsec is not None:<EOL><INDENT>new_centre = self.arc_second_coordinates_to_pixel_coordinates(arc_second_coordinates=new_centre_arcsec)<EOL><DEDENT>else:<EOL><INDENT>raise exc.DataException('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>return self.new_with_array(array=array_util.resized_array_2d_from_array_2d_and_resized_shape(<EOL>array_2d=self, resized_shape=new_shape, origin=new_centre))<EOL>", "docstring": "resized the array to a new shape and at a new origin.\n\n        Parameters\n        -----------\n        new_shape : (int, int)\n            The new two-dimensional shape of the array.", "id": "f5996:c4:m8"}
{"signature": "@property<EOL><INDENT>def grid_2d(self):<DEDENT>", "body": "return grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=self.shape,<EOL>pixel_scales=self.pixel_scales,<EOL>origin=self.origin)<EOL>", "docstring": "The arc second-grid of (y,x) coordinates of every pixel.\n\n        This is defined from the top-left corner, such that the first pixel at location [0, 0] will have a negative x \\\n        value y value in arc seconds.", "id": "f5996:c1:m8"}
{"signature": "def grid_arcsec_to_grid_pixel_indexes(self, grid_arcsec):", "body": "return grid_util.grid_arcsec_1d_to_grid_pixel_indexes_1d(grid_arcsec_1d=grid_arcsec,<EOL>shape=self.shape,<EOL>pixel_scales=self.pixel_scales,<EOL>origin=self.origin).astype('<STR_LIT:int>')<EOL>", "docstring": "Convert a grid of (y,x) arc second coordinates to a grid of (y,x) pixel 1D indexes. Pixel coordinates are \\\n        returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then \\\n        downwards.\n\n        For example:\n\n        The pixel at the top-left, whose 2D index is [0,0], corresponds to 1D index 0.\n        The fifth pixel on the top row, whose 2D index is [0,5], corresponds to 1D index 4.\n        The first pixel on the second row, whose 2D index is [0,1], has 1D index 10 if a row has 10 pixels.\n\n        The arc-second coordinate origin is defined by the class attribute origin, and coordinates are shifted to this \\\n        origin before computing their 1D grid pixel indexes.\n\n        Parameters\n        ----------\n        grid_arcsec: ndarray\n            The grid of (y,x) coordinates in arc seconds.", "id": "f5996:c1:m5"}
{"signature": "def new_with_array(self, array):", "body": "arguments = vars(self)<EOL>arguments.update({\"<STR_LIT>\": array})<EOL>if '<STR_LIT>' in arguments:<EOL><INDENT>arguments.pop(\"<STR_LIT>\")<EOL><DEDENT>return self.__class__(**arguments)<EOL>", "docstring": "Parameters\n----------\narray: ndarray\n    An ndarray\n\nReturns\n-------\nnew_array: ScaledSquarePixelArray\n    A new instance of this class that shares all of this instances attributes with a new ndarray.", "id": "f5996:c2:m4"}
{"signature": "def zoomed_scaled_array_around_mask(self, mask, buffer=<NUM_LIT:1>):", "body": "return self.new_with_array(array=array_util.extracted_array_2d_from_array_2d_and_coordinates(<EOL>array_2d=self,  y0=mask.zoom_region[<NUM_LIT:0>]-buffer, y1=mask.zoom_region[<NUM_LIT:1>]+buffer,<EOL>x0=mask.zoom_region[<NUM_LIT:2>]-buffer, x1=mask.zoom_region[<NUM_LIT:3>]+buffer))<EOL>", "docstring": "Extract the 2D region of an array corresponding to the rectangle encompassing all unmasked values.\n\n        This is used to extract and visualize only the region of an image that is used in an analysis.\n\n        Parameters\n        ----------\n        mask : mask.Mask\n            The mask around which the scaled array is extracted.\n        buffer : int\n            The buffer of pixels around the extraction.", "id": "f5996:c4:m7"}
{"signature": "@classmethod<EOL><INDENT>def single_value(cls, value, shape, pixel_scale, origin=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):<DEDENT>", "body": "array = np.ones(shape) * value<EOL>return cls(array, pixel_scale, origin)<EOL>", "docstring": "Creates an instance of Array and fills it with a single value\n\nParameters\n----------\nvalue: float\n    The value with which the array should be filled\nshape: (int, int)\n    The shape of the array\npixel_scale: float\n    The scale of a pixel in arc seconds\n\nReturns\n-------\narray: ScaledSquarePixelArray\n    An array filled with a single value", "id": "f5996:c4:m4"}
{"signature": "def __init__(self, array, pixel_scale, origin=(<NUM_LIT:0.0>, <NUM_LIT:0.0>)):", "body": "pixel_scale_sanity_checks(pixel_scales=(pixel_scale, pixel_scale))<EOL>self.pixel_scale = pixel_scale<EOL>self.origin = origin<EOL>super(ScaledSquarePixelArray, self).__init__(array=array)<EOL>", "docstring": "A scaled array with square-pixels.\n\n        Parameters\n        ----------\n        array: ndarray\n            An array representing image (e.g. an image, noise-map, etc.)\n        pixel_scale: float\n            The arc-second to pixel conversion factor of each pixel.\n        origin : (float, float)\n            The arc-second origin of the scaled array's coordinate system.", "id": "f5996:c4:m0"}
{"signature": "def __init__(self, mask, psf):", "body": "if psf.shape[<NUM_LIT:0>] % <NUM_LIT:2> == <NUM_LIT:0> or psf.shape[<NUM_LIT:1>] % <NUM_LIT:2> == <NUM_LIT:0>:<EOL><INDENT>raise exc.KernelException(\"<STR_LIT>\")<EOL><DEDENT>self.mask_index_array = np.full(mask.shape, -<NUM_LIT:1>)<EOL>self.pixels_in_mask = int(np.size(mask) - np.sum(mask))<EOL>count = <NUM_LIT:0><EOL>for x in range(mask.shape[<NUM_LIT:0>]):<EOL><INDENT>for y in range(mask.shape[<NUM_LIT:1>]):<EOL><INDENT>if not mask[x, y]:<EOL><INDENT>self.mask_index_array[x, y] = count<EOL>count += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>self.psf = psf<EOL>self.psf_shape = psf.shape<EOL>self.psf_max_size = self.psf_shape[<NUM_LIT:0>] * self.psf_shape[<NUM_LIT:1>]<EOL>image_index = <NUM_LIT:0><EOL>self.image_frame_indexes = np.zeros((self.pixels_in_mask, self.psf_max_size), dtype='<STR_LIT:int>')<EOL>self.image_frame_psfs = np.zeros((self.pixels_in_mask, self.psf_max_size))<EOL>self.image_frame_lengths = np.zeros((self.pixels_in_mask), dtype='<STR_LIT:int>')<EOL>for x in range(self.mask_index_array.shape[<NUM_LIT:0>]):<EOL><INDENT>for y in range(self.mask_index_array.shape[<NUM_LIT:1>]):<EOL><INDENT>if not mask[x][y]:<EOL><INDENT>image_frame_indexes, image_frame_psfs = self.frame_at_coordinates_jit((x, y), mask,<EOL>self.mask_index_array,<EOL>self.psf[:, :])<EOL>self.image_frame_indexes[image_index, :] = image_frame_indexes<EOL>self.image_frame_psfs[image_index, :] = image_frame_psfs<EOL>self.image_frame_lengths[image_index] = image_frame_indexes[image_frame_indexes >= <NUM_LIT:0>].shape[<NUM_LIT:0>]<EOL>image_index += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Class to create regular frames and blurring frames used to convolve a psf with a 1D regular of non-masked \\\nvalues.\n\nParameters\n----------\nmask : Mask\n    A masks where True eliminates datas.\npsf : regular.PSF or ndarray\n    An array representing a PSF.", "id": "f5998:c0:m0"}
{"signature": "def convolve_image(self, image_array, blurring_array):", "body": "return self.convolve_jit(image_array, self.image_frame_indexes, self.image_frame_psfs, self.image_frame_lengths,<EOL>blurring_array, self.blurring_frame_indexes, self.blurring_frame_psfs,<EOL>self.blurring_frame_lengths)<EOL>", "docstring": "For a given 1D regular array and blurring array, convolve the two using this convolver.\n\n        Parameters\n        -----------\n        image_array : ndarray\n            1D array of the regular values which are to be blurred with the convolver's PSF.\n        blurring_array : ndarray\n            1D array of the blurring regular values which blur into the regular-array after PSF convolution.", "id": "f5998:c1:m1"}
{"signature": "def plot_image(<EOL>image, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False,<EOL>should_plot_border=False, positions=None, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:30>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT:image>'):", "body": "origin = get_origin(array=image, plot_origin=plot_origin)<EOL>array_plotters.plot_array(<EOL>array=image, origin=origin, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask,<EOL>should_plot_border=should_plot_border, positions=positions, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the observed image of the ccd data.\n\n    Set *autolens.data.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    image : ScaledSquarePixelArray\n        The image of the data.\n    plot_origin : True\n        If true, the origin of the data's coordinate system is plotted as a 'x'.\n    image_plane_pix_grid : ndarray or data.array.grid_stacks.PixGrid\n        If an adaptive pixelization whose pixels are formed by tracing pixels from the data, this plots those pixels \\\n        over the immage.", "id": "f5999:m0"}
{"signature": "def plot_potential_chi_squared_map(<EOL>potential_chi_squared_map, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False,<EOL>as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "origin = get_origin(array=potential_chi_squared_map, plot_origin=plot_origin)<EOL>array_plotters.plot_array(<EOL>array=potential_chi_squared_map, origin=origin, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask,<EOL>as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the signal-to-noise_map of the ccd data.\n\n    Set *autolens.data.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    potential_chi_squared_map : ScaledSquarePixelArray\n        The signal-to-noise map of the data.\n    plot_origin : True\n        If true, the origin of the data's coordinate system is plotted as a 'x'.", "id": "f5999:m5"}
{"signature": "def get_origin(array, plot_origin):", "body": "if plot_origin:<EOL><INDENT>return array.origin<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get the (y,x) origin of the ccd data if it going to be plotted.\n\n    Parameters\n    -----------\n    array : data.array.scaled_array.ScaledArray\n        The array from which the origin is extracted.\n    plot_origin : True\n        If true, the origin of the data's coordinate system is returned.", "id": "f5999:m6"}
{"signature": "def plot_absolute_signal_to_noise_map(<EOL>absolute_signal_to_noise_map, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False,<EOL>as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "origin = get_origin(array=absolute_signal_to_noise_map, plot_origin=plot_origin)<EOL>array_plotters.plot_array(<EOL>array=absolute_signal_to_noise_map, origin=origin, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask,<EOL>as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the absolute signal-to-noise map of the ccd data.\n\n    Set *autolens.data.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    absolute_signal_to_noise_map : ScaledSquarePixelArray\n        The absolute signal-to-noise map of the data.\n    plot_origin : True\n        If true, the origin of the data's coordinate system is plotted as a 'x'.", "id": "f5999:m4"}
{"signature": "def plot_noise_map(<EOL>noise_map, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "origin = get_origin(array=noise_map, plot_origin=plot_origin)<EOL>array_plotters.plot_array(<EOL>array=noise_map, origin=origin, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the noise_map of the ccd data.\n\n    Set *autolens.data.array.plotters.array_plotters* for a description of all input parameters not described below.\n\n    Parameters\n    -----------\n    noise_map : ScaledSquarePixelArray\n        The noise map of the data.\n    plot_origin : True\n        If true, the origin of the data's coordinate system is plotted as a 'x'.", "id": "f5999:m1"}
{"signature": "def plot_image(<EOL>ccd_data, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False,<EOL>should_plot_border=False, positions=None,<EOL>as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>, position_pointsize=<NUM_LIT:30>, grid_pointsize=<NUM_LIT:1>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "data_plotters.plot_image(<EOL>image=ccd_data.image, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, should_plot_border=should_plot_border, positions=positions,<EOL>as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the observed data of the ccd data.\n\n    Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n    Parameters\n    -----------\n    image : data.CCDData\n        The ccd data, which includes the observed data, noise_map-map, PSF, signal-to-noise_map-map, etc.\n    plot_origin : True\n        If true, the origin of the data's coordinate system is plotted as a 'x'.\n    image_plane_pix_grid : ndarray or data.array.grid_stacks.PixGrid\n        If an adaptive pixelization whose pixels are formed by tracing pixels from the data, this plots those pixels \\\n        over the immage.", "id": "f6000:m2"}
{"signature": "def plot_signal_to_noise_map(<EOL>ccd_data, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>mask_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "data_plotters.plot_signal_to_noise_map(<EOL>signal_to_noise_map=ccd_data.signal_to_noise_map, plot_origin=plot_origin, mask=mask,<EOL>extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the signal-to-noise_map-map of the ccd data.\n\n    Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n    Parameters\n    -----------\n    image : data.CCDData\n        The ccd data, which includes the observed image, noise_map-map, PSF, signal-to-noise_map-map, etc.\n    plot_origin : True\n        If true, the origin of the data's coordinate system is plotted as a 'x'.", "id": "f6000:m5"}
{"signature": "def plot_psf(<EOL>ccd_data, plot_origin=True, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "data_plotters.plot_psf(<EOL>psf=ccd_data.psf, plot_origin=plot_origin, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the PSF of the ccd data.\n\n    Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n    Parameters\n    -----------\n    image : data.CCDData\n        The ccd data, which includes the observed data, noise_map-map, PSF, signal-to-noise_map-map, etc.\n    plot_origin : True\n        If true, the origin of the data's coordinate system is plotted as a 'x'.", "id": "f6000:m4"}
{"signature": "def plot_noise_map(<EOL>ccd_data, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False, as_subplot=False,<EOL>units='<STR_LIT>', kpc_per_arcsec=None, figsize=(<NUM_LIT:7>, <NUM_LIT:7>), aspect='<STR_LIT>',<EOL>cmap='<STR_LIT>', norm='<STR_LIT>', norm_min=None, norm_max=None, linthresh=<NUM_LIT>, linscale=<NUM_LIT>,<EOL>cb_ticksize=<NUM_LIT:10>, cb_fraction=<NUM_LIT>, cb_pad=<NUM_LIT>, cb_tick_values=None, cb_tick_labels=None,<EOL>title='<STR_LIT>', titlesize=<NUM_LIT:16>, xlabelsize=<NUM_LIT:16>, ylabelsize=<NUM_LIT:16>, xyticksize=<NUM_LIT:16>, mask_pointsize=<NUM_LIT:10>,<EOL>output_path=None, output_format='<STR_LIT>', output_filename='<STR_LIT>'):", "body": "data_plotters.plot_noise_map(<EOL>noise_map=ccd_data.noise_map, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask,<EOL>zoom_around_mask=zoom_around_mask, as_subplot=as_subplot,<EOL>units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect,<EOL>cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale,<EOL>cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, <EOL>cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels,<EOL>title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize,<EOL>mask_pointsize=mask_pointsize,<EOL>output_path=output_path, output_format=output_format, output_filename=output_filename)<EOL>", "docstring": "Plot the noise_map-map of the ccd data.\n\n    Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below.\n\n    Parameters\n    -----------\n    image : data.CCDData\n        The ccd data, which includes the observed data, noise_map-map, PSF, signal-to-noise_map-map, etc.\n    plot_origin : True\n        If true, the origin of the data's coordinate system is plotted as a 'x'.", "id": "f6000:m3"}
{"signature": "def pixel_scale_from_data_resolution(data_resolution):", "body": "if data_resolution == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT><EOL><DEDENT>elif data_resolution == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT:0.1><EOL><DEDENT>elif data_resolution == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT><EOL><DEDENT>elif data_resolution == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT><EOL><DEDENT>elif data_resolution == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>', data_resolution)<EOL><DEDENT>", "docstring": "Determine the pixel scale from a data resolution type based on real observations.\n\n    These options are representative of LSST, Euclid, HST, over-sampled HST and Adaptive Optics image.\n\n    Parameters\n    ----------\n    data_resolution : str\n        A string giving the resolution of the desired data type (LSST | Euclid | HST | HST_Up | AO).", "id": "f6145:m0"}
{"signature": "def send_payload(self, params):", "body": "data = json.dumps({<EOL>'<STR_LIT>': self.version,<EOL>'<STR_LIT>': self.service_name,<EOL>'<STR_LIT>': params,<EOL>'<STR_LIT:id>': text_type(uuid.uuid4())<EOL>})<EOL>data_binary = data.encode('<STR_LIT:utf-8>')<EOL>url_request = Request(self.service_url, data_binary, headers=self.headers)<EOL>return urlopen(url_request).read()<EOL>", "docstring": "Performs the actual sending action and returns the result", "id": "f6175:c0:m3"}
{"signature": "def make_response(self, rv):", "body": "status_or_headers = headers = None<EOL>if isinstance(rv, tuple):<EOL><INDENT>rv, status_or_headers, headers = rv + (None,) * (<NUM_LIT:3> - len(rv))<EOL><DEDENT>if rv is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if isinstance(status_or_headers, (dict, list)):<EOL><INDENT>headers, status_or_headers = status_or_headers, None<EOL><DEDENT>D = json.loads(extract_raw_data_request(request))<EOL>if type(D) is list:<EOL><INDENT>raise InvalidRequestError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>response_obj = self.empty_response(version=D['<STR_LIT>'])<EOL>response_obj['<STR_LIT:id>'] = D['<STR_LIT:id>']<EOL>response_obj['<STR_LIT:result>'] = rv<EOL>response_obj.pop('<STR_LIT:error>', None)<EOL>rv = jsonify(response_obj)<EOL><DEDENT>if status_or_headers is not None:<EOL><INDENT>if isinstance(status_or_headers, string_types):<EOL><INDENT>rv.status = status_or_headers<EOL><DEDENT>else:<EOL><INDENT>rv.status_code = status_or_headers<EOL><DEDENT><DEDENT>if headers:<EOL><INDENT>rv.headers.extend(headers)<EOL><DEDENT>return rv<EOL>", "docstring": "Converts the return value from a view function to a real\n        response object that is an instance of :attr:`response_class`.", "id": "f6176:c0:m10"}
{"signature": "def _inject_args(sig, types):", "body": "if '<STR_LIT:(>' in sig:<EOL><INDENT>parts = sig.split('<STR_LIT:(>')<EOL>sig = '<STR_LIT>'.format(<EOL>parts[<NUM_LIT:0>], '<STR_LIT:U+002CU+0020>'.join(types),<EOL>('<STR_LIT:U+002CU+0020>' if parts[<NUM_LIT:1>].index('<STR_LIT:)>') > <NUM_LIT:0> else '<STR_LIT>'), parts[<NUM_LIT:1>]<EOL>)<EOL><DEDENT>else:<EOL><INDENT>sig = '<STR_LIT>'.format(sig, '<STR_LIT:U+002CU+0020>'.join(types))<EOL><DEDENT>return sig<EOL>", "docstring": "A function to inject arguments manually into a method signature before\n    it's been parsed. If using keyword arguments use 'kw=type' instead in\n    the types array.\n\n        sig         the string signature\n        types       a list of types to be inserted\n\n    Returns the altered signature.", "id": "f6179:m4"}
{"signature": "def jsonify_status_code(status_code, *args, **kw):", "body": "is_batch = kw.pop('<STR_LIT>', False)<EOL>if is_batch:<EOL><INDENT>response = flask_make_response(json.dumps(*args, **kw))<EOL>response.mimetype = '<STR_LIT:application/json>'<EOL>response.status_code = status_code<EOL>return response<EOL><DEDENT>response = jsonify(*args, **kw)<EOL>response.status_code = status_code<EOL>return response<EOL>", "docstring": "Returns a jsonified response with the specified HTTP status code.\n\n    The positional and keyword arguments are passed directly to the\n    :func:`flask.jsonify` function which creates the response.", "id": "f6180:m1"}
{"signature": "def __init__(self, message=None, code=None, data=None):", "body": "super(Error, self).__init__()<EOL>if message is not None:<EOL><INDENT>self.message = message<EOL><DEDENT>if code is not None:<EOL><INDENT>self.code = code<EOL><DEDENT>if data is not None:<EOL><INDENT>self.data = data<EOL><DEDENT>", "docstring": "Setup the Exception and overwrite the default message", "id": "f6181:c0:m0"}
{"signature": "@property<EOL><INDENT>def json_rpc_format(self):<DEDENT>", "body": "error = {<EOL>'<STR_LIT:name>': text_type(self.__class__.__name__),<EOL>'<STR_LIT:code>': self.code,<EOL>'<STR_LIT:message>': '<STR_LIT>'.format(text_type(self.message)),<EOL>'<STR_LIT:data>': self.data<EOL>}<EOL>if current_app.config['<STR_LIT>']:<EOL><INDENT>import sys, traceback<EOL>error['<STR_LIT>'] = traceback.format_exc()<EOL>error['<STR_LIT>'] = sys.executable<EOL><DEDENT>return error<EOL>", "docstring": "Return the Exception data in a format for JSON-RPC", "id": "f6181:c0:m1"}
{"signature": "def register(self, app, options, first_registration=False):", "body": "self.jsonrpc_site = options.get('<STR_LIT>')<EOL>self._got_registered_once = True<EOL>state = self.make_setup_state(app, options, first_registration)<EOL>if self.has_static_folder andnot self.name + '<STR_LIT>' in state.app.view_functions.keys():<EOL><INDENT>state.add_url_rule(self.static_url_path + '<STR_LIT>',<EOL>view_func=self.send_static_file,<EOL>endpoint='<STR_LIT>')<EOL><DEDENT>for deferred in self.deferred_functions:<EOL><INDENT>deferred(state)<EOL><DEDENT>", "docstring": "Called by :meth:`Flask.register_blueprint` to register a blueprint\n        on the application. This can be overridden to customize the register\n        behavior. Keyword arguments from\n        :func:`~flask.Flask.register_blueprint` are directly forwarded to this\n        method in the `options` dictionary.", "id": "f6182:c0:m0"}
{"signature": "def _parse_arguments(prog, argv):", "body": "<EOL>param_util.handle_version_flag()<EOL>parser = provider_base.create_parser(prog)<EOL>parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>', default=False, help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=False,<EOL>action='<STR_LIT:store_true>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:*>',<EOL>action=TaskParamAction,<EOL>help=\"\"\"<STR_LIT>\"\"\",<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default='<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=False,<EOL>action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\",<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\",<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:*>',<EOL>action=param_util.ListParamAction,<EOL>default=[],<EOL>help='<STR_LIT>',<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:*>',<EOL>action=param_util.ListParamAction,<EOL>default=[],<EOL>help='<STR_LIT>',<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:*>',<EOL>action=param_util.ListParamAction,<EOL>default=[],<EOL>help=\"\"\"<STR_LIT>\"\"\",<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:*>',<EOL>action=param_util.ListParamAction,<EOL>default=[],<EOL>help=\"\"\"<STR_LIT>\"\"\",<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:*>',<EOL>action=param_util.ListParamAction,<EOL>default=[],<EOL>help=\"\"\"<STR_LIT>\"\"\",<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:*>',<EOL>action=param_util.ListParamAction,<EOL>default=[],<EOL>help=\"\"\"<STR_LIT>\"\"\",<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:*>',<EOL>action=param_util.ListParamAction,<EOL>default=[],<EOL>help=\"\"\"<STR_LIT>\"\"\",<EOL>metavar='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=<NUM_LIT:0>,<EOL>type=int,<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=<NUM_LIT:10>,<EOL>type=int,<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:+>',<EOL>default=[],<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=False,<EOL>action='<STR_LIT:store_true>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>type=int,<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>type=float,<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=job_model.DEFAULT_DISK_SIZE,<EOL>type=int,<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>google_common = parser.add_argument_group(<EOL>title='<STR_LIT>',<EOL>description='<STR_LIT>')<EOL>google_common.add_argument(<EOL>'<STR_LIT>', help='<STR_LIT>')<EOL>google_common.add_argument(<EOL>'<STR_LIT>',<EOL>default=job_model.DEFAULT_BOOT_DISK_SIZE,<EOL>type=int,<EOL>help='<STR_LIT>')<EOL>google_common.add_argument(<EOL>'<STR_LIT>',<EOL>default=False,<EOL>action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>')<EOL>google_common.add_argument(<EOL>'<STR_LIT>', nargs='<STR_LIT:+>', help='<STR_LIT>')<EOL>google_common.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:+>',<EOL>help=\"\"\"<STR_LIT>\"\"\" % '<STR_LIT:U+002C>'.join(<EOL>google_base.DEFAULT_SCOPES))<EOL>google_common.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_common.add_argument(<EOL>'<STR_LIT>',<EOL>type=int,<EOL>default=<NUM_LIT:0>,<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google = parser.add_argument_group(<EOL>title='<STR_LIT>',<EOL>description='<STR_LIT>')<EOL>google.add_argument(<EOL>'<STR_LIT>',<EOL>type=int,<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_v2 = parser.add_argument_group(<EOL>title='<STR_LIT>',<EOL>description='<STR_LIT>')<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:+>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_v2.add_argument(<EOL>'<STR_LIT>', help='<STR_LIT>')<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\"<EOL>)<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>default=False,<EOL>action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>')<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>default=False,<EOL>action='<STR_LIT:store_true>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>type=str,<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>google_v2.add_argument(<EOL>'<STR_LIT>',<EOL>help=\"\"\"<STR_LIT>\"\"\")<EOL>args = provider_base.parse_args(<EOL>parser, {<EOL>'<STR_LIT>': ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'],<EOL>'<STR_LIT>': ['<STR_LIT>', '<STR_LIT>'],<EOL>'<STR_LIT>': [],<EOL>'<STR_LIT>': ['<STR_LIT>'],<EOL>}, argv)<EOL>if args.provider == '<STR_LIT>':<EOL><INDENT>_google_parse_arguments(args)<EOL><DEDENT>if args.provider == '<STR_LIT>':<EOL><INDENT>_google_v2_parse_arguments(args)<EOL><DEDENT>return args<EOL>", "docstring": "Parses command line arguments.\n\n    Args:\n      prog: The path of the program (dsub.py) or an alternate program name to\n      display in usage.\n      argv: The list of program arguments to parse.\n\n    Returns:\n      A Namespace of parsed arguments.", "id": "f6185:m2"}
{"signature": "def _validate_job_and_task_arguments(job_params, task_descriptors):", "body": "if not task_descriptors:<EOL><INDENT>return<EOL><DEDENT>task_params = task_descriptors[<NUM_LIT:0>].task_params<EOL>from_jobs = {label.name for label in job_params['<STR_LIT>']}<EOL>from_tasks = {label.name for label in task_params['<STR_LIT>']}<EOL>intersect = from_jobs & from_tasks<EOL>if intersect:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format('<STR_LIT:U+002C>'.join(intersect)))<EOL><DEDENT>from_jobs = {<EOL>item.name<EOL>for item in job_params['<STR_LIT>'] | job_params['<STR_LIT>']<EOL>| job_params['<STR_LIT>']<EOL>}<EOL>from_tasks = {<EOL>item.name<EOL>for item in task_params['<STR_LIT>'] | task_params['<STR_LIT>']<EOL>| task_params['<STR_LIT>']<EOL>}<EOL>intersect = from_jobs & from_tasks<EOL>if intersect:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format('<STR_LIT:U+002C>'.join(intersect)))<EOL><DEDENT>", "docstring": "Validates that job and task argument names do not overlap.", "id": "f6185:m14"}
{"signature": "def _retry_task(provider, job_descriptor, task_id, task_attempt):", "body": "td_orig = job_descriptor.find_task_descriptor(task_id)<EOL>new_task_descriptors = [<EOL>job_model.TaskDescriptor({<EOL>'<STR_LIT>': task_id,<EOL>'<STR_LIT>': task_attempt<EOL>}, td_orig.task_params, td_orig.task_resources)<EOL>]<EOL>_resolve_task_resources(job_descriptor.job_metadata,<EOL>job_descriptor.job_resources, new_task_descriptors)<EOL>provider.submit_job(<EOL>job_model.JobDescriptor(<EOL>job_descriptor.job_metadata, job_descriptor.job_params,<EOL>job_descriptor.job_resources, new_task_descriptors), False)<EOL>", "docstring": "Retry task_id (numeric id) assigning it task_attempt.", "id": "f6185:m9"}
{"signature": "def _resolve_task_resources(job_metadata, job_resources, task_descriptors):", "body": "_resolve_task_logging(job_metadata, job_resources, task_descriptors)<EOL>", "docstring": "Resolve task properties (such as the logging path) from job properties.\n\n    Args:\n      job_metadata: Job metadata, such as job-id, job-name, and user-id.\n      job_resources: Resources specified such as ram, cpu, and logging path.\n      task_descriptors: Task metadata, parameters, and resources.\n\n    This function exists to be called at the point that all job properties have\n    been validated and resolved. It is also called prior to re-trying a task.\n\n    The only property to be resolved right now is the logging path,\n    which may have substitution parameters such as\n    job-id, task-id, task-attempt, user-id, and job-name.", "id": "f6185:m6"}
{"signature": "def _dominant_task_for_jobs(tasks):", "body": "per_job = _group_tasks_by_jobid(tasks)<EOL>ret = []<EOL>for job_id in per_job.keys():<EOL><INDENT>tasks_in_salience_order = sorted(per_job[job_id], key=_importance_of_task)<EOL>ret.append(tasks_in_salience_order[<NUM_LIT:0>])<EOL><DEDENT>return ret<EOL>", "docstring": "A list with, for each job, its dominant task.\n\n    The dominant task is the one that exemplifies its job's\n    status. It is either:\n    - the first (FAILURE or CANCELED) task, or if none\n    - the first RUNNING task, or if none\n    - the first SUCCESS task.\n\n    Args:\n      tasks: a list of tasks to consider\n\n    Returns:\n      A list with, for each job, its dominant task.", "id": "f6185:m10"}
{"signature": "def _google_parse_arguments(args):", "body": "if args.machine_type:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if args.mount:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Validated google arguments.", "id": "f6185:m0"}
{"signature": "def prepare_output(self, row):", "body": "date_fields = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>int_fields = ['<STR_LIT>']<EOL>for col in date_fields:<EOL><INDENT>if col in row:<EOL><INDENT>row[col] = self.default_format_date(row[col])<EOL><DEDENT><DEDENT>for col in int_fields:<EOL><INDENT>if col in row and row[col] is not None:<EOL><INDENT>row[col] = int(row[col])<EOL><DEDENT><DEDENT>return row<EOL>", "docstring": "Convert types of task fields.", "id": "f6187:c0:m5"}
{"signature": "def string_presenter(self, dumper, data):", "body": "if '<STR_LIT:\\n>' in data:<EOL><INDENT>return dumper.represent_scalar('<STR_LIT>', data, style='<STR_LIT:|>')<EOL><DEDENT>else:<EOL><INDENT>return dumper.represent_scalar('<STR_LIT>', data)<EOL><DEDENT>", "docstring": "Presenter to force yaml.dump to use multi-line string style.", "id": "f6187:c2:m1"}
{"signature": "def directory_fmt(directory):", "body": "return directory.rstrip('<STR_LIT:/>') + '<STR_LIT:/>'<EOL>", "docstring": "In ensure that directories end with '/'.\n\n    Frequently we need to ensure that directory paths end with a forward slash.\n    Pythons dirname and split functions in the path library treat this\n    inconsistently creating this requirement. This function is simple but was\n    written to centralize documentation of an often used (and often explained)\n    requirement in this codebase.\n\n    >>> os.path.dirname('gs://bucket/folder/file.txt')\n    'gs://bucket/folder'\n    >>> directory_fmt(os.path.dirname('gs://bucket/folder/file.txt'))\n    'gs://bucket/folder/'\n    >>> os.path.dirname('/newfile')\n    '/'\n    >>> directory_fmt(os.path.dirname('/newfile'))\n    '/'\n\n    Specifically we need this since copy commands must know whether the\n    destination is a directory to function properly. See the following shell\n    interaction for an example of the inconsistency. Notice that text files are\n    copied as expected but the bam is copied over the directory name.\n\n    Multiple files copy, works as intended in all cases:\n        $ touch a.txt b.txt\n        $ gsutil cp ./*.txt gs://mybucket/text_dest\n        $ gsutil ls gs://mybucket/text_dest/\n              0  2017-07-19T21:44:36Z  gs://mybucket/text_dest/a.txt\n              0  2017-07-19T21:44:36Z  gs://mybucket/text_dest/b.txt\n        TOTAL: 2 objects, 0 bytes (0 B)\n\n    Single file copy fails to copy into a directory:\n        $ touch 1.bam\n        $ gsutil cp ./*.bam gs://mybucket/bad_dest\n        $ gsutil ls gs://mybucket/bad_dest\n                 0  2017-07-19T21:46:16Z  gs://mybucket/bad_dest\n        TOTAL: 1 objects, 0 bytes (0 B)\n\n    Adding a trailing forward slash fixes this:\n        $ touch my.sam\n        $ gsutil cp ./*.sam gs://mybucket/good_folder\n        $ gsutil ls gs://mybucket/good_folder\n                 0  2017-07-19T21:46:16Z  gs://mybucket/good_folder/my.sam\n        TOTAL: 1 objects, 0 bytes (0 B)\n\n    Args:\n      directory (str): a uri without an blob or file basename.\n\n    Returns:\n      the directory with a trailing slash.", "id": "f6189:m14"}
{"signature": "def get_gcs_mounts(mounts):", "body": "return _get_filtered_mounts(mounts, job_model.GCSMountParam)<EOL>", "docstring": "Returns the GCS mounts from mounts.", "id": "f6189:m2"}
{"signature": "def age_to_create_time(age, from_time=None):", "body": "if not age:<EOL><INDENT>return None<EOL><DEDENT>if not from_time:<EOL><INDENT>from_time = dsub_util.replace_timezone(datetime.datetime.now(), tzlocal())<EOL><DEDENT>try:<EOL><INDENT>last_char = age[-<NUM_LIT:1>]<EOL>if last_char == '<STR_LIT:s>':<EOL><INDENT>return from_time - datetime.timedelta(seconds=int(age[:-<NUM_LIT:1>]))<EOL><DEDENT>elif last_char == '<STR_LIT:m>':<EOL><INDENT>return from_time - datetime.timedelta(minutes=int(age[:-<NUM_LIT:1>]))<EOL><DEDENT>elif last_char == '<STR_LIT:h>':<EOL><INDENT>return from_time - datetime.timedelta(hours=int(age[:-<NUM_LIT:1>]))<EOL><DEDENT>elif last_char == '<STR_LIT:d>':<EOL><INDENT>return from_time - datetime.timedelta(days=int(age[:-<NUM_LIT:1>]))<EOL><DEDENT>elif last_char == '<STR_LIT:w>':<EOL><INDENT>return from_time - datetime.timedelta(weeks=int(age[:-<NUM_LIT:1>]))<EOL><DEDENT>else:<EOL><INDENT>return dsub_util.replace_timezone(<EOL>datetime.datetime.utcfromtimestamp(int(age)), pytz.utc)<EOL><DEDENT><DEDENT>except (ValueError, OverflowError) as e:<EOL><INDENT>raise ValueError('<STR_LIT>' % (age, e))<EOL><DEDENT>", "docstring": "Compute the create time (UTC) for the list filter.\n\n    If the age is an integer value it is treated as a UTC date.\n    Otherwise the value must be of the form \"<integer><unit>\" where supported\n    units are s, m, h, d, w (seconds, minutes, hours, days, weeks).\n\n    Args:\n      age: A \"<integer><unit>\" string or integer value.\n      from_time:\n\n    Returns:\n      A timezone-aware datetime or None if age parameter is empty.", "id": "f6189:m16"}
{"signature": "def args_to_job_params(envs, labels, inputs, inputs_recursive, outputs,<EOL>outputs_recursive, mounts, input_file_param_util,<EOL>output_file_param_util, mount_param_util):", "body": "<EOL>env_data = parse_pair_args(envs, job_model.EnvParam)<EOL>label_data = parse_pair_args(labels, job_model.LabelParam)<EOL>input_data = set()<EOL>for (recursive, args) in ((False, inputs), (True, inputs_recursive)):<EOL><INDENT>for arg in args:<EOL><INDENT>name, value = split_pair(arg, '<STR_LIT:=>', nullable_idx=<NUM_LIT:0>)<EOL>name = input_file_param_util.get_variable_name(name)<EOL>input_data.add(input_file_param_util.make_param(name, value, recursive))<EOL><DEDENT><DEDENT>output_data = set()<EOL>for (recursive, args) in ((False, outputs), (True, outputs_recursive)):<EOL><INDENT>for arg in args:<EOL><INDENT>name, value = split_pair(arg, '<STR_LIT:=>', <NUM_LIT:0>)<EOL>name = output_file_param_util.get_variable_name(name)<EOL>output_data.add(output_file_param_util.make_param(name, value, recursive))<EOL><DEDENT><DEDENT>mount_data = set()<EOL>for arg in mounts:<EOL><INDENT>if '<STR_LIT:U+0020>' in arg:<EOL><INDENT>key_value_pair, disk_size = arg.split('<STR_LIT:U+0020>')<EOL>name, value = split_pair(key_value_pair, '<STR_LIT:=>', <NUM_LIT:1>)<EOL>mount_data.add(mount_param_util.make_param(name, value, disk_size))<EOL><DEDENT>else:<EOL><INDENT>name, value = split_pair(arg, '<STR_LIT:=>', <NUM_LIT:1>)<EOL>mount_data.add(mount_param_util.make_param(name, value, disk_size=None))<EOL><DEDENT><DEDENT>return {<EOL>'<STR_LIT>': env_data,<EOL>'<STR_LIT>': input_data,<EOL>'<STR_LIT>': output_data,<EOL>'<STR_LIT>': label_data,<EOL>'<STR_LIT>': mount_data,<EOL>}<EOL>", "docstring": "Parse env, input, and output parameters into a job parameters and data.\n\n    Passing arguments on the command-line allows for launching a single job.\n    The env, input, and output arguments encode both the definition of the\n    job as well as the single job's values.\n\n    Env arguments are simple name=value pairs.\n    Input and output file arguments can contain name=value pairs or just values.\n    Either of the following is valid:\n\n      uri\n      myfile=uri\n\n    Args:\n      envs: list of environment variable job parameters\n      labels: list of labels to attach to the tasks\n      inputs: list of file input parameters\n      inputs_recursive: list of recursive directory input parameters\n      outputs: list of file output parameters\n      outputs_recursive: list of recursive directory output parameters\n      mounts: list of gcs buckets to mount\n      input_file_param_util: Utility for producing InputFileParam objects.\n      output_file_param_util: Utility for producing OutputFileParam objects.\n      mount_param_util: Utility for producing MountParam objects.\n\n    Returns:\n      job_params: a dictionary of 'envs', 'inputs', and 'outputs' that defines the\n      set of parameters and data for a job.", "id": "f6189:m11"}
{"signature": "def parse_pair_args(labels, argclass):", "body": "label_data = set()<EOL>for arg in labels:<EOL><INDENT>name, value = split_pair(arg, '<STR_LIT:=>', nullable_idx=<NUM_LIT:1>)<EOL>label_data.add(argclass(name, value))<EOL><DEDENT>return label_data<EOL>", "docstring": "Parse flags of key=value pairs and return a list of argclass.\n\n    For pair variables, we need to:\n       * split the input into name=value pairs (value optional)\n       * Create the EnvParam object\n\n    Args:\n      labels: list of 'key' or 'key=value' strings.\n      argclass: Container class for args, must instantiate with argclass(k, v).\n\n    Returns:\n      list of argclass objects.", "id": "f6189:m10"}
{"signature": "def get_local_mounts(mounts):", "body": "return _get_filtered_mounts(mounts, job_model.LocalMountParam)<EOL>", "docstring": "Returns the local mounts from mounts.", "id": "f6189:m4"}
{"signature": "def _parse_local_mount_uri(self, raw_uri):", "body": "raw_uri = directory_fmt(raw_uri)<EOL>_, docker_path = _local_uri_rewriter(raw_uri)<EOL>local_path = docker_path[len('<STR_LIT:file>'):]<EOL>docker_uri = os.path.join(self._relative_path, docker_path)<EOL>return local_path, docker_uri<EOL>", "docstring": "Return a valid docker_path for a local file path.", "id": "f6189:c4:m2"}
{"signature": "def _parse_image_uri(self, raw_uri):", "body": "<EOL>docker_uri = os.path.join(self._relative_path,<EOL>raw_uri.replace('<STR_LIT>', '<STR_LIT>', <NUM_LIT:1>))<EOL>return docker_uri<EOL>", "docstring": "Return a valid docker_path from a Google Persistent Disk url.", "id": "f6189:c4:m1"}
{"signature": "@staticmethod<EOL><INDENT>def parse_file_provider(uri):<DEDENT>", "body": "providers = {'<STR_LIT>': job_model.P_GCS, '<STR_LIT:file>': job_model.P_LOCAL}<EOL>provider_found = re.match(r'<STR_LIT>', uri)<EOL>if provider_found:<EOL><INDENT>prefix = provider_found.group(<NUM_LIT:1>).lower()<EOL><DEDENT>else:<EOL><INDENT>prefix = '<STR_LIT:file>'<EOL><DEDENT>if prefix in providers:<EOL><INDENT>return providers[prefix]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % prefix)<EOL><DEDENT>", "docstring": "Find the file provider for a URI.", "id": "f6189:c1:m3"}
{"signature": "def split_pair(pair_string, separator, nullable_idx=<NUM_LIT:1>):", "body": "pair = pair_string.split(separator, <NUM_LIT:1>)<EOL>if len(pair) == <NUM_LIT:1>:<EOL><INDENT>if nullable_idx == <NUM_LIT:0>:<EOL><INDENT>return [None, pair[<NUM_LIT:0>]]<EOL><DEDENT>elif nullable_idx == <NUM_LIT:1>:<EOL><INDENT>return [pair[<NUM_LIT:0>], None]<EOL><DEDENT>else:<EOL><INDENT>raise IndexError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return pair<EOL><DEDENT>", "docstring": "Split a string into a pair, which can have one empty value.\n\n    Args:\n      pair_string: The string to be split.\n      separator: The separator to be used for splitting.\n      nullable_idx: The location to be set to null if the separator is not in the\n                    input string. Should be either 0 or 1.\n\n    Returns:\n      A list containing the pair.\n\n    Raises:\n      IndexError: If nullable_idx is not 0 or 1.", "id": "f6189:m7"}
{"signature": "def validate_submit_args_or_fail(job_descriptor, provider_name, input_providers,<EOL>output_providers, logging_providers):", "body": "job_resources = job_descriptor.job_resources<EOL>job_params = job_descriptor.job_params<EOL>task_descriptors = job_descriptor.task_descriptors<EOL>_validate_providers([job_resources.logging], '<STR_LIT>', logging_providers,<EOL>provider_name)<EOL>_validate_providers(job_params['<STR_LIT>'], '<STR_LIT:input>', input_providers,<EOL>provider_name)<EOL>_validate_providers(job_params['<STR_LIT>'], '<STR_LIT>', output_providers,<EOL>provider_name)<EOL>for task_descriptor in task_descriptors:<EOL><INDENT>_validate_providers(task_descriptor.task_params['<STR_LIT>'], '<STR_LIT:input>',<EOL>input_providers, provider_name)<EOL>_validate_providers(task_descriptor.task_params['<STR_LIT>'], '<STR_LIT>',<EOL>output_providers, provider_name)<EOL><DEDENT>", "docstring": "Validate that arguments passed to submit_job have valid file providers.\n\n    This utility function takes resources and task data args from `submit_job`\n    in the base provider. This function will fail with a value error if any of the\n    parameters are not valid. See the following example;\n\n    >>> job_resources = type('', (object,),\n    ...    {\"logging\": job_model.LoggingParam('gs://logtemp', job_model.P_GCS)})()\n    >>> job_params={'inputs': set(), 'outputs': set(), 'mounts': set()}\n    >>> task_descriptors = [\n    ...     job_model.TaskDescriptor(None, {\n    ...       'inputs': {\n    ...           job_model.FileParam('IN', uri='gs://in/*',\n    ...                               file_provider=job_model.P_GCS)},\n    ...       'outputs': set()}, None),\n    ...     job_model.TaskDescriptor(None, {\n    ...       'inputs': set(),\n    ...       'outputs': {\n    ...           job_model.FileParam('OUT', uri='gs://out/*',\n    ...                               file_provider=job_model.P_GCS)}}, None)]\n    ...\n    >>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params,\n    ...                              job_resources, task_descriptors),\n    ...                              provider_name='MYPROVIDER',\n    ...                              input_providers=[job_model.P_GCS],\n    ...                              output_providers=[job_model.P_GCS],\n    ...                              logging_providers=[job_model.P_GCS])\n    ...\n    >>> validate_submit_args_or_fail(job_model.JobDescriptor(None, job_params,\n    ...                              job_resources, task_descriptors),\n    ...                              provider_name='MYPROVIDER',\n    ...                              input_providers=[job_model.P_GCS],\n    ...                              output_providers=[job_model.P_LOCAL],\n    ...                              logging_providers=[job_model.P_GCS])\n    Traceback (most recent call last):\n         ...\n    ValueError: Unsupported output path (gs://out/*) for provider 'MYPROVIDER'.\n\n    Args:\n      job_descriptor: instance of job_model.JobDescriptor.\n      provider_name: (str) the name of the execution provider.\n      input_providers: (string collection) whitelist of file providers for input.\n      output_providers: (string collection) whitelist of providers for output.\n      logging_providers: (string collection) whitelist of providers for logging.\n\n    Raises:\n      ValueError: if any file providers do not match the whitelists.", "id": "f6189:m13"}
{"signature": "def build_logging_param(logging_uri, util_class=OutputFileParamUtil):", "body": "if not logging_uri:<EOL><INDENT>return job_model.LoggingParam(None, None)<EOL><DEDENT>recursive = not logging_uri.endswith('<STR_LIT>')<EOL>oututil = util_class('<STR_LIT>')<EOL>_, uri, provider = oututil.parse_uri(logging_uri, recursive)<EOL>if '<STR_LIT:*>' in uri.basename:<EOL><INDENT>raise ValueError('<STR_LIT>' % uri)<EOL><DEDENT>return job_model.LoggingParam(uri, provider)<EOL>", "docstring": "Convenience function simplifies construction of the logging uri.", "id": "f6189:m6"}
{"signature": "def _parse_gcs_uri(self, raw_uri):", "body": "<EOL>raw_uri = directory_fmt(raw_uri)<EOL>_, docker_path = _gcs_uri_rewriter(raw_uri)<EOL>docker_uri = os.path.join(self._relative_path, docker_path)<EOL>return docker_uri<EOL>", "docstring": "Return a valid docker_path for a GCS bucket.", "id": "f6189:c4:m3"}
{"signature": "def _get_storage_service(credentials):", "body": "if credentials is None:<EOL><INDENT>credentials = oauth2client.client.GoogleCredentials.get_application_default(<EOL>)<EOL><DEDENT>return discovery.build('<STR_LIT>', '<STR_LIT>', credentials=credentials)<EOL>", "docstring": "Get a storage client using the provided credentials or defaults.", "id": "f6191:m6"}
{"signature": "def load_file(file_path, credentials=None):", "body": "if file_path.startswith('<STR_LIT>'):<EOL><INDENT>return _load_file_from_gcs(file_path, credentials)<EOL><DEDENT>else:<EOL><INDENT>return open(file_path, '<STR_LIT:r>')<EOL><DEDENT>", "docstring": "Load a file from either local or gcs.\n\n    Args:\n      file_path: The target file path, which should have the prefix 'gs://' if\n                 to be loaded from gcs.\n      credentials: Optional credential to be used to load the file from gcs.\n\n    Returns:\n      A python File object if loading file from local or a StringIO object if\n      loading from gcs.", "id": "f6191:m10"}
{"signature": "@contextmanager<EOL>def replace_print(fileobj=sys.stderr):", "body": "printer = _Printer(fileobj)<EOL>previous_stdout = sys.stdout<EOL>sys.stdout = printer<EOL>try:<EOL><INDENT>yield printer<EOL><DEDENT>finally:<EOL><INDENT>sys.stdout = previous_stdout<EOL><DEDENT>", "docstring": "Sys.out replacer, by default with stderr.\n\n    Use it like this:\n    with replace_print_with(fileobj):\n      print \"hello\"  # writes to the file\n    print \"done\"  # prints to stdout\n\n    Args:\n      fileobj: a file object to replace stdout.\n\n    Yields:\n      The printer.", "id": "f6191:m1"}
{"signature": "def _load_file_from_gcs(gcs_file_path, credentials=None):", "body": "gcs_service = _get_storage_service(credentials)<EOL>bucket_name, object_name = gcs_file_path[len('<STR_LIT>'):].split('<STR_LIT:/>', <NUM_LIT:1>)<EOL>request = gcs_service.objects().get_media(<EOL>bucket=bucket_name, object=object_name)<EOL>file_handle = io.BytesIO()<EOL>downloader = MediaIoBaseDownload(file_handle, request, chunksize=<NUM_LIT> * <NUM_LIT>)<EOL>done = False<EOL>while not done:<EOL><INDENT>_, done = _downloader_next_chunk(downloader)<EOL><DEDENT>filevalue = file_handle.getvalue()<EOL>if not isinstance(filevalue, six.string_types):<EOL><INDENT>filevalue = filevalue.decode()<EOL><DEDENT>return six.StringIO(filevalue)<EOL>", "docstring": "Load context from a text file in gcs.\n\n    Args:\n      gcs_file_path: The target file path; should have the 'gs://' prefix.\n      credentials: Optional credential to be used to load the file from gcs.\n\n    Returns:\n      The content of the text file as a string.", "id": "f6191:m9"}
{"signature": "def __lt__(self, other):", "body": "<EOL>if self.priority == other.priority:<EOL><INDENT>localkeys = self.next_value.keys()<EOL>otherkeys = other.next_value.keys()<EOL>shared_keys = sorted(set(localkeys).intersection(otherkeys))<EOL>for key in shared_keys:<EOL><INDENT>if self.next_value[key] != other.next_value[key]:<EOL><INDENT>return self.next_value[key] < other.next_value[key]<EOL><DEDENT><DEDENT><DEDENT>return self.priority < other.priority<EOL>", "docstring": "Allow order testing via '<' operator, needed by priority queue.", "id": "f6192:c0:m1"}
{"signature": "def ensure_task_params_are_complete(task_descriptors):", "body": "for task_desc in task_descriptors:<EOL><INDENT>for param in [<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'<EOL>]:<EOL><INDENT>if not task_desc.task_params.get(param):<EOL><INDENT>task_desc.task_params[param] = set()<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "For each task, ensure that each task param entry is not None.", "id": "f6194:m4"}
{"signature": "@classmethod<EOL><INDENT>def from_yaml(cls, yaml_string):<DEDENT>", "body": "try:<EOL><INDENT>job = yaml.full_load(yaml_string)<EOL><DEDENT>except AttributeError:<EOL><INDENT>job = yaml.load(yaml_string)<EOL><DEDENT>dsub_version = job.get('<STR_LIT>')<EOL>if not dsub_version:<EOL><INDENT>return cls._from_yaml_v0(job)<EOL><DEDENT>job_metadata = {}<EOL>for key in [<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'<EOL>]:<EOL><INDENT>if job.get(key) is not None:<EOL><INDENT>job_metadata[key] = job.get(key)<EOL><DEDENT><DEDENT>job_metadata['<STR_LIT>'] = dsub_util.replace_timezone(<EOL>job.get('<STR_LIT>'), pytz.utc)<EOL>job_resources = Resources(logging=job.get('<STR_LIT>'))<EOL>job_params = {}<EOL>job_params['<STR_LIT>'] = cls._label_params_from_dict(job.get('<STR_LIT>', {}))<EOL>job_params['<STR_LIT>'] = cls._env_params_from_dict(job.get('<STR_LIT>', {}))<EOL>job_params['<STR_LIT>'] = cls._input_file_params_from_dict(<EOL>job.get('<STR_LIT>', {}), False)<EOL>job_params['<STR_LIT>'] = cls._input_file_params_from_dict(<EOL>job.get('<STR_LIT>', {}), True)<EOL>job_params['<STR_LIT>'] = cls._output_file_params_from_dict(<EOL>job.get('<STR_LIT>', {}), False)<EOL>job_params['<STR_LIT>'] = cls._output_file_params_from_dict(<EOL>job.get('<STR_LIT>', {}), True)<EOL>job_params['<STR_LIT>'] = cls._mount_params_from_dict(job.get('<STR_LIT>', {}))<EOL>task_descriptors = []<EOL>for task in job.get('<STR_LIT>', []):<EOL><INDENT>task_metadata = {'<STR_LIT>': task.get('<STR_LIT>')}<EOL>create_time = task.get('<STR_LIT>')<EOL>if create_time:<EOL><INDENT>task_metadata['<STR_LIT>'] = dsub_util.replace_timezone(<EOL>create_time, pytz.utc)<EOL><DEDENT>if task.get('<STR_LIT>') is not None:<EOL><INDENT>task_metadata['<STR_LIT>'] = task.get('<STR_LIT>')<EOL><DEDENT>task_params = {}<EOL>task_params['<STR_LIT>'] = cls._label_params_from_dict(<EOL>task.get('<STR_LIT>', {}))<EOL>task_params['<STR_LIT>'] = cls._env_params_from_dict(task.get('<STR_LIT>', {}))<EOL>task_params['<STR_LIT>'] = cls._input_file_params_from_dict(<EOL>task.get('<STR_LIT>', {}), False)<EOL>task_params['<STR_LIT>'] = cls._input_file_params_from_dict(<EOL>task.get('<STR_LIT>', {}), True)<EOL>task_params['<STR_LIT>'] = cls._output_file_params_from_dict(<EOL>task.get('<STR_LIT>', {}), False)<EOL>task_params['<STR_LIT>'] = cls._output_file_params_from_dict(<EOL>task.get('<STR_LIT>', {}), True)<EOL>task_resources = Resources(logging_path=task.get('<STR_LIT>'))<EOL>task_descriptors.append(<EOL>TaskDescriptor(task_metadata, task_params, task_resources))<EOL><DEDENT>return JobDescriptor(job_metadata, job_params, job_resources,<EOL>task_descriptors)<EOL>", "docstring": "Populate and return a JobDescriptor from a YAML string.", "id": "f6194:c14:m12"}
{"signature": "def numeric_task_id(task_id):", "body": "<EOL>if task_id is not None:<EOL><INDENT>if task_id.startswith('<STR_LIT>'):<EOL><INDENT>return int(task_id[len('<STR_LIT>'):])<EOL><DEDENT>else:<EOL><INDENT>return int(task_id)<EOL><DEDENT><DEDENT>", "docstring": "Converts a task-id to the numeric task-id.\n\n    Args:\n      task_id: task-id in either task-n or n format\n\n    Returns:\n      n", "id": "f6194:m7"}
{"signature": "def to_yaml(self):", "body": "return yaml.dump(self.to_serializable(), default_flow_style=False)<EOL>", "docstring": "Return a YAML string representing the job and task data.\n\n        A provider's internal representation of a dsub task typically does not map\n        1-1 to the dsub representation. For example, the Google Genomics Pipeline\n        does not natively support \"input-recursive\" or \"output-recursive\", so the\n        google provider cannot easily reconstruct the user inputs from the\n        pipeline's associated Operation object.\n\n        All providers are likely to need a way to reliably serialize job and task-\n        related information, either for dstat or for any type of \"retry\" mechanism\n        we might want to build.\n\n        Returns:\n          YAML string", "id": "f6194:c14:m5"}
{"signature": "def to_serializable(self):", "body": "task_metadata = self.task_metadata<EOL>task_params = self.task_params<EOL>task_resources = self.task_resources<EOL>task_id = None<EOL>if task_metadata.get('<STR_LIT>') is not None:<EOL><INDENT>task_id = str(task_metadata.get('<STR_LIT>'))<EOL><DEDENT>task = {'<STR_LIT>': task_id}<EOL>task['<STR_LIT>'] = task_metadata.get('<STR_LIT>')<EOL>task['<STR_LIT>'] = task_metadata.get('<STR_LIT>')<EOL>if task_resources.logging_path:<EOL><INDENT>task['<STR_LIT>'] = str(task_resources.logging_path.uri)<EOL><DEDENT>task['<STR_LIT>'] = {var.name: var.value for var in task_params['<STR_LIT>']}<EOL>task['<STR_LIT>'] = {var.name: var.value for var in task_params['<STR_LIT>']}<EOL>task['<STR_LIT>'] = {<EOL>var.name: var.value<EOL>for var in task_params['<STR_LIT>']<EOL>if not var.recursive<EOL>}<EOL>task['<STR_LIT>'] = {<EOL>var.name: var.value<EOL>for var in task_params['<STR_LIT>']<EOL>if var.recursive<EOL>}<EOL>task['<STR_LIT>'] = {<EOL>var.name: var.value<EOL>for var in task_params['<STR_LIT>']<EOL>if not var.recursive<EOL>}<EOL>task['<STR_LIT>'] = {<EOL>var.name: var.value<EOL>for var in task_params['<STR_LIT>']<EOL>if var.recursive<EOL>}<EOL>return _remove_empty_items(task, ['<STR_LIT>'])<EOL>", "docstring": "Return a dict populated for serialization (as YAML/JSON).", "id": "f6194:c13:m4"}
{"signature": "@classmethod<EOL><INDENT>def _validate_label(cls, name, value):<DEDENT>", "body": "<EOL>cls._check_label_name(name)<EOL>cls._check_label_value(value)<EOL>if not cls._allow_reserved_keys and name in RESERVED_LABELS:<EOL><INDENT>raise ValueError('<STR_LIT>' %<EOL>(name, list(RESERVED_LABELS)))<EOL><DEDENT>", "docstring": "Raise ValueError if the label is invalid.", "id": "f6194:c4:m1"}
{"signature": "def build_recursive_gcs_delocalize_env(source, outputs):", "body": "filtered_outs = [<EOL>var for var in outputs<EOL>if var.recursive and var.file_provider == job_model.P_GCS<EOL>]<EOL>return '<STR_LIT:\\n>'.join([<EOL>'<STR_LIT>'.format(var.name,<EOL>source.rstrip('<STR_LIT:/>'),<EOL>var.docker_path.rstrip('<STR_LIT:/>'))<EOL>for var in filtered_outs<EOL>])<EOL>", "docstring": "Return a multi-line string with export statements for the variables.\n\n    Arguments:\n      source: Folder with the data.\n              For example /mnt/data\n      outputs: a list of OutputFileParam\n\n    Returns:\n      a multi-line string with a shell script that sets environment variables\n      corresponding to the outputs.", "id": "f6195:m3"}
{"signature": "def build_recursive_localize_env(destination, inputs):", "body": "export_input_dirs = '<STR_LIT:\\n>'.join([<EOL>'<STR_LIT>'.format(var.name, destination.rstrip('<STR_LIT:/>'),<EOL>var.docker_path.rstrip('<STR_LIT:/>'))<EOL>for var in inputs<EOL>if var.recursive and var.docker_path<EOL>])<EOL>return export_input_dirs<EOL>", "docstring": "Return a multi-line string with export statements for the variables.\n\n    Arguments:\n      destination: Folder where the data will be put.\n                   For example /mnt/data\n      inputs: a list of InputFileParam\n\n    Returns:\n      a multi-line string with a shell script that sets environment variables\n      corresponding to the inputs.", "id": "f6195:m1"}
{"signature": "def build_mount_env(source, mounts):", "body": "return '<STR_LIT:\\n>'.join([<EOL>'<STR_LIT>'.format(var.name, source.rstrip('<STR_LIT:/>'),<EOL>var.docker_path.rstrip('<STR_LIT:/>')) for var in mounts<EOL>])<EOL>", "docstring": "Return a multi-line string with export statements for the variables.\n\n    Arguments:\n      source: Folder with the data. For example /mnt/data\n      mounts: a list of MountParam\n\n    Returns:\n      a multi-line string with a shell script that sets environment variables\n      corresponding to the mounts.", "id": "f6195:m6"}
{"signature": "def delete_jobs(self,<EOL>user_ids,<EOL>job_ids,<EOL>task_ids,<EOL>labels,<EOL>create_time_min=None,<EOL>create_time_max=None):", "body": "<EOL>tasks = list(<EOL>self.lookup_job_tasks(<EOL>{'<STR_LIT>'},<EOL>user_ids=user_ids,<EOL>job_ids=job_ids,<EOL>task_ids=task_ids,<EOL>labels=labels,<EOL>create_time_min=create_time_min,<EOL>create_time_max=create_time_max))<EOL>print('<STR_LIT>' % len(tasks))<EOL>return google_base.cancel(self._service.new_batch_http_request,<EOL>self._service.operations().cancel, tasks)<EOL>", "docstring": "Kills the operations associated with the specified job or job.task.\n\n        Args:\n          user_ids: List of user ids who \"own\" the job(s) to cancel.\n          job_ids: List of job_ids to cancel.\n          task_ids: List of task-ids to cancel.\n          labels: List of LabelParam, each must match the job(s) to be canceled.\n          create_time_min: a timezone-aware datetime value for the earliest create\n                           time of a task, inclusive.\n          create_time_max: a timezone-aware datetime value for the most recent\n                           create time of a task, inclusive.\n\n        Returns:\n          A list of tasks canceled and a list of error messages.", "id": "f6197:c2:m6"}
{"signature": "@staticmethod<EOL><INDENT>def _datetime_to_utc_int(date):<DEDENT>", "body": "if date is None:<EOL><INDENT>return None<EOL><DEDENT>epoch = dsub_util.replace_timezone(datetime.utcfromtimestamp(<NUM_LIT:0>), pytz.utc)<EOL>return (date - epoch).total_seconds()<EOL>", "docstring": "Convert the integer UTC time value into a local datetime.", "id": "f6197:c1:m0"}
{"signature": "@classmethod<EOL><INDENT>def build_pipeline_args(cls, project, script, job_params, task_params,<EOL>reserved_labels, preemptible, logging_uri, scopes,<EOL>keep_alive):<DEDENT>", "body": "<EOL>inputs = {}<EOL>inputs.update({SCRIPT_VARNAME: script})<EOL>inputs.update({<EOL>var.name: var.value<EOL>for var in job_params['<STR_LIT>'] | task_params['<STR_LIT>']<EOL>if var.value<EOL>})<EOL>inputs.update({<EOL>var.name: var.uri<EOL>for var in job_params['<STR_LIT>'] | task_params['<STR_LIT>']<EOL>if not var.recursive and var.value<EOL>})<EOL>outputs = {}<EOL>for var in job_params['<STR_LIT>'] | task_params['<STR_LIT>']:<EOL><INDENT>if var.recursive or not var.value:<EOL><INDENT>continue<EOL><DEDENT>if '<STR_LIT:*>' in var.uri.basename:<EOL><INDENT>outputs[var.name] = var.uri.path<EOL><DEDENT>else:<EOL><INDENT>outputs[var.name] = var.uri<EOL><DEDENT><DEDENT>labels = {}<EOL>labels.update({<EOL>label.name: label.value if label.value else '<STR_LIT>'<EOL>for label in (reserved_labels | job_params['<STR_LIT>']<EOL>| task_params['<STR_LIT>'])<EOL>})<EOL>args = {<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': project,<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': preemptible,<EOL>},<EOL>'<STR_LIT>': inputs,<EOL>'<STR_LIT>': outputs,<EOL>'<STR_LIT>': labels,<EOL>'<STR_LIT>': {<EOL>'<STR_LIT:email>': '<STR_LIT:default>',<EOL>'<STR_LIT>': scopes,<EOL>},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': logging_uri<EOL>},<EOL>}<EOL>}<EOL>if keep_alive:<EOL><INDENT>args['<STR_LIT>'][<EOL>'<STR_LIT>'] = '<STR_LIT>' % keep_alive<EOL><DEDENT>return args<EOL>", "docstring": "Builds pipeline args for execution.\n\n        Args:\n          project: string name of project.\n          script: Body of the script to execute.\n          job_params: dictionary of values for labels, envs, inputs, and outputs\n              for this job.\n          task_params: dictionary of values for labels, envs, inputs, and outputs\n              for this task.\n          reserved_labels: dictionary of reserved labels (e.g. task-id,\n              task-attempt)\n          preemptible: use a preemptible VM for the job\n          logging_uri: path for job logging output.\n          scopes: list of scope.\n          keep_alive: Seconds to keep VM alive on failure\n\n        Returns:\n          A nested dictionary with one entry under the key pipelineArgs containing\n          the pipeline arguments.", "id": "f6197:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def list(cls, service, ops_filter, page_size=<NUM_LIT:0>):<DEDENT>", "body": "page_token = None<EOL>more_operations = True<EOL>documented_default_page_size = <NUM_LIT><EOL>documented_max_page_size = <NUM_LIT><EOL>if not page_size:<EOL><INDENT>page_size = documented_default_page_size<EOL><DEDENT>page_size = min(page_size, documented_max_page_size)<EOL>while more_operations:<EOL><INDENT>api = service.operations().list(<EOL>name='<STR_LIT>',<EOL>filter=ops_filter,<EOL>pageToken=page_token,<EOL>pageSize=page_size)<EOL>response = google_base.Api.execute(api)<EOL>ops = response.get('<STR_LIT>', [])<EOL>for op in ops:<EOL><INDENT>if cls.is_dsub_operation(op):<EOL><INDENT>yield GoogleOperation(op)<EOL><DEDENT><DEDENT>page_token = response.get('<STR_LIT>')<EOL>more_operations = bool(page_token)<EOL><DEDENT>", "docstring": "Gets the list of operations for the specified filter.\n\n        Args:\n          service: Google Genomics API service object\n          ops_filter: string filter of operations to return\n          page_size: the number of operations to requested on each list operation to\n            the pipelines API (if 0 or None, the API default is used)\n\n        Yields:\n          Operations matching the filter criteria.", "id": "f6197:c1:m6"}
{"signature": "@classmethod<EOL><INDENT>def build_pipeline(cls, project, zones, min_cores, min_ram, disk_size,<EOL>boot_disk_size, preemptible, accelerator_type,<EOL>accelerator_count, image, script_name, envs, inputs,<EOL>outputs, pipeline_name):<DEDENT>", "body": "if min_cores is None:<EOL><INDENT>min_cores = job_model.DEFAULT_MIN_CORES<EOL><DEDENT>if min_ram is None:<EOL><INDENT>min_ram = job_model.DEFAULT_MIN_RAM<EOL><DEDENT>if disk_size is None:<EOL><INDENT>disk_size = job_model.DEFAULT_DISK_SIZE<EOL><DEDENT>if boot_disk_size is None:<EOL><INDENT>boot_disk_size = job_model.DEFAULT_BOOT_DISK_SIZE<EOL><DEDENT>if preemptible is None:<EOL><INDENT>preemptible = job_model.DEFAULT_PREEMPTIBLE<EOL><DEDENT>docker_command = cls._build_pipeline_docker_command(script_name, inputs,<EOL>outputs, envs)<EOL>input_envs = [{<EOL>'<STR_LIT:name>': SCRIPT_VARNAME<EOL>}] + [{<EOL>'<STR_LIT:name>': env.name<EOL>} for env in envs if env.value]<EOL>input_files = [<EOL>cls._build_pipeline_input_file_param(var.name, var.docker_path)<EOL>for var in inputs<EOL>if not var.recursive and var.value<EOL>]<EOL>output_files = [<EOL>cls._build_pipeline_file_param(var.name, var.docker_path)<EOL>for var in outputs<EOL>if not var.recursive and var.value<EOL>]<EOL>return {<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': project,<EOL>'<STR_LIT:name>': pipeline_name,<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': min_cores,<EOL>'<STR_LIT>': min_ram,<EOL>'<STR_LIT>': boot_disk_size,<EOL>'<STR_LIT>': preemptible,<EOL>'<STR_LIT>': google_base.get_zones(zones),<EOL>'<STR_LIT>': accelerator_type,<EOL>'<STR_LIT>': accelerator_count,<EOL>'<STR_LIT>': [{<EOL>'<STR_LIT:name>': '<STR_LIT>',<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT>': disk_size,<EOL>'<STR_LIT>': providers_util.DATA_MOUNT_POINT,<EOL>}],<EOL>},<EOL>'<STR_LIT>': input_envs + input_files,<EOL>'<STR_LIT>': output_files,<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': image,<EOL>'<STR_LIT>': docker_command,<EOL>}<EOL>}<EOL>}<EOL>", "docstring": "Builds a pipeline configuration for execution.\n\n        Args:\n          project: string name of project.\n          zones: list of zone names for jobs to be run at.\n          min_cores: int number of CPU cores required per job.\n          min_ram: int GB of RAM required per job.\n          disk_size: int GB of disk to attach under /mnt/data.\n          boot_disk_size: int GB of disk for boot.\n          preemptible: use a preemptible VM for the job\n          accelerator_type: string GCE defined accelerator type.\n          accelerator_count: int number of accelerators of the specified type to\n            attach.\n          image: string Docker image name in which to run.\n          script_name: file name of the script to run.\n          envs: list of EnvParam objects specifying environment variables to set\n            within each job.\n          inputs: list of FileParam objects specifying input variables to set\n            within each job.\n          outputs: list of FileParam objects specifying output variables to set\n            within each job.\n          pipeline_name: string name of pipeline.\n\n        Returns:\n          A nested dictionary with one entry under the key ephemeralPipeline\n          containing the pipeline configuration.", "id": "f6197:c0:m3"}
{"signature": "def _operation_status(self):", "body": "if not self._op['<STR_LIT>']:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT:error>' not in self._op:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if self._op['<STR_LIT:error>'].get('<STR_LIT:code>', <NUM_LIT:0>) == <NUM_LIT:1>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Returns the status of this operation.\n\n        ie. RUNNING, SUCCESS, CANCELED or FAILURE.\n\n        Returns:\n          A printable status string", "id": "f6197:c3:m3"}
{"signature": "def error_message(self):", "body": "if '<STR_LIT:error>' in self._op:<EOL><INDENT>if '<STR_LIT>' in self._op['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>job_id = self._op['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>job_id = self._op['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>return '<STR_LIT>' % (job_id,<EOL>self._op['<STR_LIT:error>']['<STR_LIT:code>'],<EOL>self._op['<STR_LIT:error>']['<STR_LIT:message>'])<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>", "docstring": "Returns an error message if the operation failed for any reason.\n\n        Failure as defined here means; ended for any reason other than 'success'.\n        This means that a successful cancelation will also create an error message\n        here.\n\n        Returns:\n          string, string will be empty if job did not error.", "id": "f6197:c3:m7"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def delete_jobs(self,<EOL>user_ids,<EOL>job_ids,<EOL>task_ids,<EOL>labels,<EOL>create_time_min=None,<EOL>create_time_max=None):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Kills the operations associated with the specified job or job.task.\n\n        Some providers may provide only a \"cancel\" operation, which terminates the\n        task but does not truly \"delete\" it from the \"task list\".\n\n        Args:\n          user_ids: a set of user ids who \"own\" the job(s) to delete.\n          job_ids: a set of job ids to delete.\n          task_ids: a set of task ids to delete.\n          labels: a set of LabelParam, each must match the job(s) to be cancelled.\n          create_time_min: a timezone-aware datetime value for the earliest create\n                           time of a task, inclusive.\n          create_time_max: a timezone-aware datetime value for the most recent\n                           create time of a task, inclusive.\n\n        Returns:\n          (list of tasks canceled,\n           for each task that couldn't be canceled, the error message).\n\n          Only tasks that were running are included in the return value.", "id": "f6198:c0:m2"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def submit_job(self, job_descriptor, skip_if_output_present):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Submit the job to be executed.\n\n        Args:\n          job_descriptor (job_model.JobDescriptor): parameters needed to launch all\n          job tasks\n          skip_if_output_present: (boolean) if true, skip tasks whose output\n            is present (see --skip flag for more explanation).\n\n\n        Returns:\n          A dictionary containing the 'user-id', 'job-id', and 'task-id' list.\n          For jobs that are not task array jobs, the task-id list should be empty.\n          If all tasks were skipped, then the job-id is dsub_lib.NO_JOB.\n\n\n        Raises:\n          ValueError: submit_job may validate any of the parameters and raise\n            a value error if any parameter (or specific combination of parameters)\n            is not supported by the provider.", "id": "f6198:c0:m1"}
{"signature": "def parse_rfc3339_utc_string(rfc3339_utc_string):", "body": "<EOL>m = re.match(r'<STR_LIT>',<EOL>rfc3339_utc_string)<EOL>if not m:<EOL><INDENT>return None<EOL><DEDENT>groups = m.groups()<EOL>if len(groups[<NUM_LIT:6>]) not in (<NUM_LIT:0>, <NUM_LIT:3>, <NUM_LIT:6>, <NUM_LIT:9>):<EOL><INDENT>return None<EOL><DEDENT>g = [int(val) for val in groups[:<NUM_LIT:6>]]<EOL>fraction = groups[<NUM_LIT:6>]<EOL>if not fraction:<EOL><INDENT>micros = <NUM_LIT:0><EOL><DEDENT>elif len(fraction) == <NUM_LIT:3>:<EOL><INDENT>micros = int(fraction) * <NUM_LIT:1000><EOL><DEDENT>elif len(fraction) == <NUM_LIT:6>:<EOL><INDENT>micros = int(fraction)<EOL><DEDENT>elif len(fraction) == <NUM_LIT:9>:<EOL><INDENT>micros = int(round(int(fraction) / <NUM_LIT:1000>))<EOL><DEDENT>else:<EOL><INDENT>assert False, '<STR_LIT>'.len(fraction)<EOL><DEDENT>try:<EOL><INDENT>return datetime(g[<NUM_LIT:0>], g[<NUM_LIT:1>], g[<NUM_LIT:2>], g[<NUM_LIT:3>], g[<NUM_LIT:4>], g[<NUM_LIT:5>], micros, tzinfo=pytz.utc)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>assert False, '<STR_LIT>'.format(<EOL>rfc3339_utc_string, e)<EOL><DEDENT>", "docstring": "Converts a datestamp from RFC3339 UTC to a datetime.\n\n    Args:\n      rfc3339_utc_string: a datetime string in RFC3339 UTC \"Zulu\" format\n\n    Returns:\n      A datetime.", "id": "f6199:m5"}
{"signature": "def _cancel_batch(batch_fn, cancel_fn, ops):", "body": "<EOL>canceled = []<EOL>failed = []<EOL>def handle_cancel_response(request_id, response, exception):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>del response  <EOL>if exception:<EOL><INDENT>msg = '<STR_LIT>' % (exception.resp.status, exception.resp.reason)<EOL>if exception.resp.status == FAILED_PRECONDITION_CODE:<EOL><INDENT>detail = json.loads(exception.content)<EOL>status = detail.get('<STR_LIT:error>', {}).get('<STR_LIT:status>')<EOL>if status == FAILED_PRECONDITION_STATUS:<EOL><INDENT>msg = '<STR_LIT>'<EOL><DEDENT><DEDENT>failed.append({'<STR_LIT:name>': request_id, '<STR_LIT>': msg})<EOL><DEDENT>else:<EOL><INDENT>canceled.append({'<STR_LIT:name>': request_id})<EOL><DEDENT>return<EOL><DEDENT>batch = batch_fn(callback=handle_cancel_response)<EOL>ops_by_name = {}<EOL>for op in ops:<EOL><INDENT>op_name = op.get_field('<STR_LIT>')<EOL>ops_by_name[op_name] = op<EOL>batch.add(cancel_fn(name=op_name, body={}), request_id=op_name)<EOL><DEDENT>batch.execute()<EOL>canceled_ops = [ops_by_name[op['<STR_LIT:name>']] for op in canceled]<EOL>error_messages = []<EOL>for fail in failed:<EOL><INDENT>op = ops_by_name[fail['<STR_LIT:name>']]<EOL>error_messages.append(\"<STR_LIT>\" %<EOL>(get_operation_full_job_id(op), fail['<STR_LIT>']))<EOL><DEDENT>return canceled_ops, error_messages<EOL>", "docstring": "Cancel a batch of operations.\n\n    Args:\n      batch_fn: API-specific batch function.\n      cancel_fn: API-specific cancel function.\n      ops: A list of operations to cancel.\n\n    Returns:\n      A list of operations canceled and a list of error messages.", "id": "f6199:m7"}
{"signature": "@retrying.retry(<EOL>stop_max_attempt_number=<NUM_LIT>,<EOL>retry_on_exception=retry_api_check,<EOL>wait_exponential_multiplier=<NUM_LIT>,<EOL>wait_exponential_max=<NUM_LIT>)<EOL>@retrying.retry(<EOL>stop_max_attempt_number=<NUM_LIT:5>,<EOL>retry_on_exception=retry_auth_check,<EOL>wait_exponential_multiplier=<NUM_LIT>,<EOL>wait_exponential_max=<NUM_LIT>)<EOL>def setup_service(api_name, api_version, credentials=None):", "body": "if not credentials:<EOL><INDENT>credentials = oauth2client.client.GoogleCredentials.get_application_default(<EOL>)<EOL><DEDENT>return apiclient.discovery.build(<EOL>api_name, api_version, credentials=credentials)<EOL>", "docstring": "Configures genomics API client.\n\n    Args:\n      api_name: Name of the Google API (for example: \"genomics\")\n      api_version: Version of the API (for example: \"v2alpha1\")\n      credentials: Credentials to be used for the gcloud API calls.\n\n    Returns:\n      A configured Google Genomics API client with appropriate credentials.", "id": "f6199:m11"}
{"signature": "def _print_error(msg):", "body": "print(msg, file=sys.stderr)<EOL>", "docstring": "Utility routine to emit messages to stderr.", "id": "f6199:m0"}
{"signature": "def cancel(batch_fn, cancel_fn, ops):", "body": "<EOL>canceled_ops = []<EOL>error_messages = []<EOL>max_batch = <NUM_LIT><EOL>total_ops = len(ops)<EOL>for first_op in range(<NUM_LIT:0>, total_ops, max_batch):<EOL><INDENT>batch_canceled, batch_messages = _cancel_batch(<EOL>batch_fn, cancel_fn, ops[first_op:first_op + max_batch])<EOL>canceled_ops.extend(batch_canceled)<EOL>error_messages.extend(batch_messages)<EOL><DEDENT>return canceled_ops, error_messages<EOL>", "docstring": "Cancel operations.\n\n    Args:\n      batch_fn: API-specific batch function.\n      cancel_fn: API-specific cancel function.\n      ops: A list of operations to cancel.\n\n    Returns:\n      A list of operations canceled and a list of error messages.", "id": "f6199:m8"}
{"signature": "def _get_mount_actions(self, mounts, mnt_datadisk):", "body": "actions_to_add = []<EOL>for mount in mounts:<EOL><INDENT>bucket = mount.value[len('<STR_LIT>'):]<EOL>mount_path = mount.docker_path<EOL>actions_to_add.extend([<EOL>google_v2_pipelines.build_action(<EOL>name='<STR_LIT>'.format(bucket),<EOL>flags=['<STR_LIT>', '<STR_LIT>'],<EOL>image_uri=_GCSFUSE_IMAGE,<EOL>mounts=[mnt_datadisk],<EOL>commands=[<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', bucket,<EOL>os.path.join(providers_util.DATA_MOUNT_POINT, mount_path)<EOL>]),<EOL>google_v2_pipelines.build_action(<EOL>name='<STR_LIT>'.format(bucket),<EOL>flags=['<STR_LIT>'],<EOL>image_uri=_GCSFUSE_IMAGE,<EOL>mounts=[mnt_datadisk],<EOL>commands=[<EOL>'<STR_LIT>',<EOL>os.path.join(providers_util.DATA_MOUNT_POINT, mount_path)<EOL>])<EOL>])<EOL><DEDENT>return actions_to_add<EOL>", "docstring": "Returns a list of two actions per gcs bucket to mount.", "id": "f6201:c2:m7"}
{"signature": "def delete_jobs(self,<EOL>user_ids,<EOL>job_ids,<EOL>task_ids,<EOL>labels,<EOL>create_time_min=None,<EOL>create_time_max=None):", "body": "<EOL>tasks = list(<EOL>self.lookup_job_tasks(<EOL>{'<STR_LIT>'},<EOL>user_ids=user_ids,<EOL>job_ids=job_ids,<EOL>task_ids=task_ids,<EOL>labels=labels,<EOL>create_time_min=create_time_min,<EOL>create_time_max=create_time_max))<EOL>print('<STR_LIT>' % len(tasks))<EOL>return google_base.cancel(GoogleV2BatchHandler,<EOL>self._service.projects().operations().cancel,<EOL>tasks)<EOL>", "docstring": "Kills the operations associated with the specified job or job.task.\n\n        Args:\n          user_ids: List of user ids who \"own\" the job(s) to cancel.\n          job_ids: List of job_ids to cancel.\n          task_ids: List of task-ids to cancel.\n          labels: List of LabelParam, each must match the job(s) to be canceled.\n          create_time_min: a timezone-aware datetime value for the earliest create\n                           time of a task, inclusive.\n          create_time_max: a timezone-aware datetime value for the most recent\n                           create time of a task, inclusive.\n\n        Returns:\n          A list of tasks canceled and a list of error messages.", "id": "f6201:c2:m19"}
{"signature": "def _build_user_environment(self, envs, inputs, outputs, mounts):", "body": "envs = {env.name: env.value for env in envs}<EOL>envs.update(providers_util.get_file_environment_variables(inputs))<EOL>envs.update(providers_util.get_file_environment_variables(outputs))<EOL>envs.update(providers_util.get_file_environment_variables(mounts))<EOL>return envs<EOL>", "docstring": "Returns a dictionary of for the user container environment.", "id": "f6201:c2:m6"}
{"signature": "def prepare_job_metadata(self, script, job_name, user_id, create_time):", "body": "return google_base.prepare_job_metadata(script, job_name, user_id,<EOL>create_time)<EOL>", "docstring": "Returns a dictionary of metadata fields for the job.", "id": "f6201:c2:m1"}
{"signature": "def _build_pipeline_request(self, task_view):", "body": "job_metadata = task_view.job_metadata<EOL>job_params = task_view.job_params<EOL>job_resources = task_view.job_resources<EOL>task_metadata = task_view.task_descriptors[<NUM_LIT:0>].task_metadata<EOL>task_params = task_view.task_descriptors[<NUM_LIT:0>].task_params<EOL>task_resources = task_view.task_descriptors[<NUM_LIT:0>].task_resources<EOL>mnt_datadisk = google_v2_pipelines.build_mount(<EOL>disk=_DATA_DISK_NAME,<EOL>path=providers_util.DATA_MOUNT_POINT,<EOL>read_only=False)<EOL>scopes = job_resources.scopes or google_base.DEFAULT_SCOPES<EOL>labels = {<EOL>label.name: label.value if label.value else '<STR_LIT>' for label in<EOL>google_base.build_pipeline_labels(job_metadata, task_metadata)<EOL>| job_params['<STR_LIT>'] | task_params['<STR_LIT>']<EOL>}<EOL>script = task_view.job_metadata['<STR_LIT>']<EOL>user_project = task_view.job_metadata['<STR_LIT>'] or '<STR_LIT>'<EOL>envs = job_params['<STR_LIT>'] | task_params['<STR_LIT>']<EOL>inputs = job_params['<STR_LIT>'] | task_params['<STR_LIT>']<EOL>outputs = job_params['<STR_LIT>'] | task_params['<STR_LIT>']<EOL>mounts = job_params['<STR_LIT>']<EOL>gcs_mounts = param_util.get_gcs_mounts(mounts)<EOL>persistent_disk_mount_params = param_util.get_persistent_disk_mounts(mounts)<EOL>persistent_disks = [<EOL>google_v2_pipelines.build_disk(<EOL>name=disk.name.replace('<STR_LIT:_>', '<STR_LIT:->'),  <EOL>size_gb=disk.disk_size or job_model.DEFAULT_MOUNTED_DISK_SIZE,<EOL>source_image=disk.value,<EOL>disk_type=disk.disk_type or job_model.DEFAULT_DISK_TYPE)<EOL>for disk in persistent_disk_mount_params<EOL>]<EOL>persistent_disk_mounts = [<EOL>google_v2_pipelines.build_mount(<EOL>disk=persistent_disk.get('<STR_LIT:name>'),<EOL>path=os.path.join(providers_util.DATA_MOUNT_POINT,<EOL>persistent_disk_mount_param.docker_path),<EOL>read_only=True)<EOL>for persistent_disk, persistent_disk_mount_param in zip(<EOL>persistent_disks, persistent_disk_mount_params)<EOL>]<EOL>optional_actions = <NUM_LIT:0><EOL>if job_resources.ssh:<EOL><INDENT>optional_actions += <NUM_LIT:1><EOL><DEDENT>mount_actions = self._get_mount_actions(gcs_mounts, mnt_datadisk)<EOL>optional_actions += len(mount_actions)<EOL>user_action = <NUM_LIT:4> + optional_actions<EOL>final_logging_action = <NUM_LIT:6> + optional_actions<EOL>logging_cmd = _LOGGING_CMD.format(<EOL>log_cp_fn=_GSUTIL_CP_FN,<EOL>log_cp_cmd=_LOG_CP_CMD.format(<EOL>user_action=user_action, logging_action='<STR_LIT>'))<EOL>continuous_logging_cmd = _CONTINUOUS_LOGGING_CMD.format(<EOL>log_msg_fn=_LOG_MSG_FN,<EOL>log_cp_fn=_GSUTIL_CP_FN,<EOL>log_cp_cmd=_LOG_CP_CMD.format(<EOL>user_action=user_action,<EOL>logging_action='<STR_LIT>'),<EOL>final_logging_action=final_logging_action,<EOL>log_interval=job_resources.log_interval or '<STR_LIT>')<EOL>logging_env = self._get_logging_env(task_resources.logging_path.uri,<EOL>user_project)<EOL>script_path = os.path.join(providers_util.SCRIPT_DIR, script.name)<EOL>prepare_command = _PREPARE_CMD.format(<EOL>log_msg_fn=_LOG_MSG_FN,<EOL>mk_runtime_dirs=_MK_RUNTIME_DIRS_CMD,<EOL>script_var=_SCRIPT_VARNAME,<EOL>python_decode_script=_PYTHON_DECODE_SCRIPT,<EOL>script_path=script_path,<EOL>mk_io_dirs=_MK_IO_DIRS)<EOL>prepare_env = self._get_prepare_env(script, task_view, inputs, outputs,<EOL>mounts)<EOL>localization_env = self._get_localization_env(inputs, user_project)<EOL>user_environment = self._build_user_environment(envs, inputs, outputs,<EOL>mounts)<EOL>delocalization_env = self._get_delocalization_env(outputs, user_project)<EOL>actions = []<EOL>actions.append(<EOL>google_v2_pipelines.build_action(<EOL>name='<STR_LIT>',<EOL>flags='<STR_LIT>',<EOL>image_uri=_CLOUD_SDK_IMAGE,<EOL>environment=logging_env,<EOL>entrypoint='<STR_LIT>',<EOL>commands=['<STR_LIT:-c>', continuous_logging_cmd]))<EOL>if job_resources.ssh:<EOL><INDENT>actions.append(<EOL>google_v2_pipelines.build_action(<EOL>name='<STR_LIT>',<EOL>image_uri=_SSH_IMAGE,<EOL>mounts=[mnt_datadisk],<EOL>entrypoint='<STR_LIT>',<EOL>port_mappings={_DEFAULT_SSH_PORT: _DEFAULT_SSH_PORT},<EOL>flags='<STR_LIT>'))<EOL><DEDENT>actions.append(<EOL>google_v2_pipelines.build_action(<EOL>name='<STR_LIT>',<EOL>image_uri=_PYTHON_IMAGE,<EOL>mounts=[mnt_datadisk],<EOL>environment=prepare_env,<EOL>entrypoint='<STR_LIT>',<EOL>commands=['<STR_LIT:-c>', prepare_command]),)<EOL>actions.extend(mount_actions)<EOL>actions.extend([<EOL>google_v2_pipelines.build_action(<EOL>name='<STR_LIT>',<EOL>image_uri=_CLOUD_SDK_IMAGE,<EOL>mounts=[mnt_datadisk],<EOL>environment=localization_env,<EOL>entrypoint='<STR_LIT>',<EOL>commands=[<EOL>'<STR_LIT:-c>',<EOL>_LOCALIZATION_CMD.format(<EOL>log_msg_fn=_LOG_MSG_FN,<EOL>recursive_cp_fn=_GSUTIL_RSYNC_FN,<EOL>cp_fn=_GSUTIL_CP_FN,<EOL>cp_loop=_LOCALIZATION_LOOP)<EOL>]),<EOL>google_v2_pipelines.build_action(<EOL>name='<STR_LIT>',<EOL>image_uri=job_resources.image,<EOL>mounts=[mnt_datadisk] + persistent_disk_mounts,<EOL>environment=user_environment,<EOL>entrypoint='<STR_LIT>',<EOL>commands=[<EOL>'<STR_LIT>', '<STR_LIT:-c>',<EOL>_USER_CMD.format(<EOL>tmp_dir=providers_util.TMP_DIR,<EOL>working_dir=providers_util.WORKING_DIR,<EOL>user_script=script_path)<EOL>]),<EOL>google_v2_pipelines.build_action(<EOL>name='<STR_LIT>',<EOL>image_uri=_CLOUD_SDK_IMAGE,<EOL>mounts=[mnt_datadisk],<EOL>environment=delocalization_env,<EOL>entrypoint='<STR_LIT>',<EOL>commands=[<EOL>'<STR_LIT:-c>',<EOL>_LOCALIZATION_CMD.format(<EOL>log_msg_fn=_LOG_MSG_FN,<EOL>recursive_cp_fn=_GSUTIL_RSYNC_FN,<EOL>cp_fn=_GSUTIL_CP_FN,<EOL>cp_loop=_DELOCALIZATION_LOOP)<EOL>]),<EOL>google_v2_pipelines.build_action(<EOL>name='<STR_LIT>',<EOL>flags='<STR_LIT>',<EOL>image_uri=_CLOUD_SDK_IMAGE,<EOL>environment=logging_env,<EOL>entrypoint='<STR_LIT>',<EOL>commands=['<STR_LIT:-c>', logging_cmd]),<EOL>])<EOL>assert len(actions) - <NUM_LIT:2> == user_action<EOL>assert len(actions) == final_logging_action<EOL>disks = [<EOL>google_v2_pipelines.build_disk(<EOL>_DATA_DISK_NAME,<EOL>job_resources.disk_size,<EOL>source_image=None,<EOL>disk_type=job_resources.disk_type or job_model.DEFAULT_DISK_TYPE)<EOL>]<EOL>disks.extend(persistent_disks)<EOL>network = google_v2_pipelines.build_network(<EOL>job_resources.network, job_resources.subnetwork,<EOL>job_resources.use_private_address)<EOL>if job_resources.machine_type:<EOL><INDENT>machine_type = job_resources.machine_type<EOL><DEDENT>elif job_resources.min_cores or job_resources.min_ram:<EOL><INDENT>machine_type = GoogleV2CustomMachine.build_machine_type(<EOL>job_resources.min_cores, job_resources.min_ram)<EOL><DEDENT>else:<EOL><INDENT>machine_type = job_model.DEFAULT_MACHINE_TYPE<EOL><DEDENT>accelerators = None<EOL>if job_resources.accelerator_type:<EOL><INDENT>accelerators = [<EOL>google_v2_pipelines.build_accelerator(job_resources.accelerator_type,<EOL>job_resources.accelerator_count)<EOL>]<EOL><DEDENT>service_account = google_v2_pipelines.build_service_account(<EOL>job_resources.service_account or '<STR_LIT:default>', scopes)<EOL>resources = google_v2_pipelines.build_resources(<EOL>self._project,<EOL>job_resources.regions,<EOL>google_base.get_zones(job_resources.zones),<EOL>google_v2_pipelines.build_machine(<EOL>network=network,<EOL>machine_type=machine_type,<EOL>preemptible=job_resources.preemptible,<EOL>service_account=service_account,<EOL>boot_disk_size_gb=job_resources.boot_disk_size,<EOL>disks=disks,<EOL>accelerators=accelerators,<EOL>nvidia_driver_version=job_resources.nvidia_driver_version,<EOL>labels=labels,<EOL>cpu_platform=job_resources.cpu_platform),<EOL>)<EOL>pipeline = google_v2_pipelines.build_pipeline(actions, resources, None,<EOL>job_resources.timeout)<EOL>return {'<STR_LIT>': pipeline, '<STR_LIT>': labels}<EOL>", "docstring": "Returns a Pipeline objects for the task.", "id": "f6201:c2:m8"}
{"signature": "@staticmethod<EOL><INDENT>def _validate_cores(cores):<DEDENT>", "body": "if cores == <NUM_LIT:1> or cores % <NUM_LIT:2> == <NUM_LIT:0>:<EOL><INDENT>return cores<EOL><DEDENT>else:<EOL><INDENT>return cores + <NUM_LIT:1><EOL><DEDENT>", "docstring": "Make sure cores is either one or even.", "id": "f6201:c4:m0"}
{"signature": "def lookup_job_tasks(self,<EOL>statuses,<EOL>user_ids=None,<EOL>job_ids=None,<EOL>job_names=None,<EOL>task_ids=None,<EOL>task_attempts=None,<EOL>labels=None,<EOL>create_time_min=None,<EOL>create_time_max=None,<EOL>max_tasks=<NUM_LIT:0>,<EOL>page_size=<NUM_LIT:0>):", "body": "<EOL>ops_filter = self._build_query_filter(<EOL>statuses, user_ids, job_ids, job_names, task_ids, task_attempts, labels,<EOL>create_time_min, create_time_max)<EOL>page_token = None<EOL>tasks_yielded = <NUM_LIT:0><EOL>while True:<EOL><INDENT>max_to_fetch = None<EOL>if max_tasks:<EOL><INDENT>max_to_fetch = max_tasks - tasks_yielded<EOL><DEDENT>ops, page_token = self._operations_list(ops_filter, max_to_fetch,<EOL>page_size, page_token)<EOL>for op in ops:<EOL><INDENT>yield op<EOL>tasks_yielded += <NUM_LIT:1><EOL><DEDENT>assert (max_tasks >= tasks_yielded or not max_tasks)<EOL>if not page_token or <NUM_LIT:0> < max_tasks <= tasks_yielded:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Yields operations based on the input criteria.\n\n        If any of the filters are empty or {'*'}, then no filtering is performed on\n        that field. Filtering by both a job id list and job name list is\n        unsupported.\n\n        Args:\n          statuses: {'*'}, or a list of job status strings to return. Valid\n            status strings are 'RUNNING', 'SUCCESS', 'FAILURE', or 'CANCELED'.\n          user_ids: a list of ids for the user(s) who launched the job.\n          job_ids: a list of job ids to return.\n          job_names: a list of job names to return.\n          task_ids: a list of specific tasks within the specified job(s) to return.\n          task_attempts: a list of specific attempts within the specified tasks(s)\n            to return.\n          labels: a list of LabelParam with user-added labels. All labels must\n                  match the task being fetched.\n          create_time_min: a timezone-aware datetime value for the earliest create\n                           time of a task, inclusive.\n          create_time_max: a timezone-aware datetime value for the most recent\n                           create time of a task, inclusive.\n          max_tasks: the maximum number of job tasks to return or 0 for no limit.\n          page_size: the page size to use for each query to the pipelins API.\n\n        Raises:\n          ValueError: if both a job id list and a job name list are provided\n\n        Yeilds:\n          Genomics API Operations objects.", "id": "f6201:c2:m18"}
{"signature": "@classmethod<EOL><INDENT>def build_machine_type(cls, min_cores, min_ram):<DEDENT>", "body": "min_cores = min_cores or job_model.DEFAULT_MIN_CORES<EOL>min_ram = min_ram or job_model.DEFAULT_MIN_RAM<EOL>min_ram *= GoogleV2CustomMachine._MB_PER_GB<EOL>cores = cls._validate_cores(min_cores)<EOL>ram = cls._validate_ram(min_ram)<EOL>memory_to_cpu_ratio = ram / cores<EOL>if memory_to_cpu_ratio < GoogleV2CustomMachine._MIN_MEMORY_PER_CPU:<EOL><INDENT>adjusted_ram = GoogleV2CustomMachine._MIN_MEMORY_PER_CPU * cores<EOL>ram = cls._validate_ram(adjusted_ram)<EOL><DEDENT>elif memory_to_cpu_ratio > GoogleV2CustomMachine._MAX_MEMORY_PER_CPU:<EOL><INDENT>adjusted_cores = math.ceil(<EOL>ram / GoogleV2CustomMachine._MAX_MEMORY_PER_CPU)<EOL>cores = cls._validate_cores(adjusted_cores)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return '<STR_LIT>'.format(int(cores), int(ram))<EOL>", "docstring": "Returns a custom machine type string.", "id": "f6201:c4:m2"}
{"signature": "def _get_localization_env(self, inputs, user_project):", "body": "<EOL>non_empty_inputs = [var for var in inputs if var.value]<EOL>env = {'<STR_LIT>': str(len(non_empty_inputs))}<EOL>for idx, var in enumerate(non_empty_inputs):<EOL><INDENT>env['<STR_LIT>'.format(idx)] = var.name<EOL>env['<STR_LIT>'.format(idx)] = str(int(var.recursive))<EOL>env['<STR_LIT>'.format(idx)] = var.value<EOL>dst = os.path.join(providers_util.DATA_MOUNT_POINT, var.docker_path)<EOL>path, filename = os.path.split(dst)<EOL>if '<STR_LIT:*>' in filename:<EOL><INDENT>dst = '<STR_LIT>'.format(path)<EOL><DEDENT>env['<STR_LIT>'.format(idx)] = dst<EOL><DEDENT>env['<STR_LIT>'] = user_project<EOL>return env<EOL>", "docstring": "Return a dict with variables for the 'localization' action.", "id": "f6201:c2:m4"}
{"signature": "def get_filtered_normalized_events(self):", "body": "<EOL>user_image = google_v2_operations.get_action_image(self._op,<EOL>_ACTION_USER_COMMAND)<EOL>need_ok = google_v2_operations.is_success(self._op)<EOL>events = {}<EOL>for event in google_v2_operations.get_events(self._op):<EOL><INDENT>if self._filter(event):<EOL><INDENT>continue<EOL><DEDENT>mapped, match = self._map(event)<EOL>name = mapped['<STR_LIT:name>']<EOL>if name == '<STR_LIT>':<EOL><INDENT>if not need_ok or '<STR_LIT>' in events:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>if name == '<STR_LIT>':<EOL><INDENT>if match.group(<NUM_LIT:1>) != user_image:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>events[name] = mapped<EOL><DEDENT>return sorted(events.values(), key=operator.itemgetter('<STR_LIT>'))<EOL>", "docstring": "Filter the granular v2 events down to events of interest.\n\n        Filter through the large number of granular events returned by the\n        pipelines API, and extract only those that are interesting to a user. This\n        is implemented by filtering out events which are known to be uninteresting\n        (i.e. the default actions run for every job) and by explicitly matching\n        specific events which are interesting and mapping those to v1 style naming.\n\n        Events which are not whitelisted or blacklisted will still be output,\n        meaning any events which are added in the future won't be masked.\n        We don't want to suppress display of events that we don't recognize.\n        They may be important.\n\n        Returns:\n          A list of maps containing the normalized, filtered events.", "id": "f6201:c0:m1"}
{"signature": "def _get_logging_env(self, logging_uri, user_project):", "body": "if not logging_uri.endswith('<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(logging_uri))<EOL><DEDENT>logging_prefix = logging_uri[:-len('<STR_LIT>')]<EOL>return {<EOL>'<STR_LIT>': '<STR_LIT>'.format(logging_prefix),<EOL>'<STR_LIT>': '<STR_LIT>'.format(logging_prefix),<EOL>'<STR_LIT>': '<STR_LIT>'.format(logging_prefix),<EOL>'<STR_LIT>': user_project,<EOL>}<EOL>", "docstring": "Returns the environment for actions that copy logging files.", "id": "f6201:c2:m2"}
{"signature": "def _operation_status_message(self):", "body": "msg = None<EOL>action = None<EOL>if not google_v2_operations.is_done(self._op):<EOL><INDENT>last_event = google_v2_operations.get_last_event(self._op)<EOL>if last_event:<EOL><INDENT>msg = last_event['<STR_LIT:description>']<EOL>action_id = last_event.get('<STR_LIT>', {}).get('<STR_LIT>')<EOL>if action_id:<EOL><INDENT>action = google_v2_operations.get_action_by_id(self._op, action_id)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>msg = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>failed_events = google_v2_operations.get_failed_events(self._op)<EOL>if failed_events:<EOL><INDENT>failed_event = failed_events[-<NUM_LIT:1>]<EOL>msg = failed_event.get('<STR_LIT>', {}).get('<STR_LIT>')<EOL>action_id = failed_event.get('<STR_LIT>', {}).get('<STR_LIT>')<EOL>if action_id:<EOL><INDENT>action = google_v2_operations.get_action_by_id(self._op, action_id)<EOL><DEDENT><DEDENT>if not msg:<EOL><INDENT>error = google_v2_operations.get_error(self._op)<EOL>if error:<EOL><INDENT>msg = error['<STR_LIT:message>']<EOL><DEDENT>else:<EOL><INDENT>msg = '<STR_LIT>'<EOL><DEDENT><DEDENT><DEDENT>return msg, action<EOL>", "docstring": "Returns the most relevant status string and failed action.\n\n        This string is meant for display only.\n\n        Returns:\n          A printable status string and name of failed action (if any).", "id": "f6201:c3:m5"}
{"signature": "def raw_task_data(self):", "body": "return self._raw._asdict()<EOL>", "docstring": "Return a provider-specific representation of task data.\n\n        Returns:\n          string of task data from the provider.", "id": "f6202:c1:m1"}
{"signature": "def _task_directory(self, job_id, task_id, task_attempt):", "body": "dir_name = '<STR_LIT>' if task_id is None else str(task_id)<EOL>if task_attempt:<EOL><INDENT>dir_name = '<STR_LIT>' % (dir_name, task_attempt)<EOL><DEDENT>return self._provider_root() + '<STR_LIT:/>' + job_id + '<STR_LIT:/>' + dir_name<EOL>", "docstring": "The local dir for staging files for that particular task.", "id": "f6202:c0:m20"}
{"signature": "def _split_task_directory(self, task_dir):", "body": "if '<STR_LIT:.>' in task_dir:<EOL><INDENT>return task_dir.split('<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>return task_dir, None<EOL><DEDENT>", "docstring": "Return task_id and task_attempt from dir_name.", "id": "f6202:c0:m21"}
{"signature": "def _localize_inputs_recursive_command(self, task_dir, inputs):", "body": "data_dir = os.path.join(task_dir, _DATA_SUBDIR)<EOL>provider_commands = [<EOL>providers_util.build_recursive_localize_command(data_dir, inputs,<EOL>file_provider)<EOL>for file_provider in _SUPPORTED_INPUT_PROVIDERS<EOL>]<EOL>return '<STR_LIT:\\n>'.join(provider_commands)<EOL>", "docstring": "Returns a command that will stage recursive inputs.", "id": "f6202:c0:m23"}
{"signature": "def _datetime_in_range(self, dt, dt_min=None, dt_max=None):", "body": "<EOL>dt = dt.replace(microsecond=<NUM_LIT:0>)<EOL>if dt_min:<EOL><INDENT>dt_min = dt_min.replace(microsecond=<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>dt_min = dsub_util.replace_timezone(datetime.datetime.min, pytz.utc)<EOL><DEDENT>if dt_max:<EOL><INDENT>dt_max = dt_max.replace(microsecond=<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>dt_max = dsub_util.replace_timezone(datetime.datetime.max, pytz.utc)<EOL><DEDENT>return dt_min <= dt <= dt_max<EOL>", "docstring": "Determine if the provided time is within the range, inclusive.", "id": "f6202:c0:m8"}
{"signature": "def _delocalize_outputs_commands(self, task_dir, outputs, user_project):", "body": "commands = []<EOL>for o in outputs:<EOL><INDENT>if o.recursive or not o.value:<EOL><INDENT>continue<EOL><DEDENT>dest_path = o.uri.path<EOL>local_path = task_dir + '<STR_LIT:/>' + _DATA_SUBDIR + '<STR_LIT:/>' + o.docker_path<EOL>if o.file_provider == job_model.P_LOCAL:<EOL><INDENT>commands.append('<STR_LIT>' % dest_path)<EOL><DEDENT>if o.file_provider in [job_model.P_LOCAL, job_model.P_GCS]:<EOL><INDENT>if user_project:<EOL><INDENT>command = '<STR_LIT>' % (user_project, local_path,<EOL>dest_path)<EOL><DEDENT>else:<EOL><INDENT>command = '<STR_LIT>' % (local_path, dest_path)<EOL><DEDENT>commands.append(command)<EOL><DEDENT><DEDENT>return '<STR_LIT:\\n>'.join(commands)<EOL>", "docstring": "Copy outputs from local disk to GCS.", "id": "f6202:c0:m28"}
{"signature": "def _sort_tasks(tasks):", "body": "<EOL>tasks.sort(key=_task_sort_function, reverse=True)<EOL>", "docstring": "Sort tasks by 'most recent first'.", "id": "f6202:m3"}
{"signature": "def build_resources(project=None,<EOL>regions=None,<EOL>zones=None,<EOL>virtual_machine=None):", "body": "return {<EOL>'<STR_LIT>': project,<EOL>'<STR_LIT>': regions,<EOL>'<STR_LIT>': zones,<EOL>'<STR_LIT>': virtual_machine,<EOL>}<EOL>", "docstring": "Build a Resources object for a Pipeline request.\n\n    Args:\n      project (str): Cloud project for the Pipeline to run in.\n      regions (List[str]): List of regions for the pipeline to run in.\n      zones (List[str]): List of zones for the pipeline to run in.\n      virtual_machine(str): Virtual machine type string.\n\n    Returns:\n      An object representing a Resource.", "id": "f6203:m5"}
{"signature": "def build_pipeline(actions, resources, environment, timeout):", "body": "return {<EOL>'<STR_LIT>': actions,<EOL>'<STR_LIT>': resources,<EOL>'<STR_LIT>': environment,<EOL>'<STR_LIT>': timeout,<EOL>}<EOL>", "docstring": "Build an Pipeline argument for a Pipeline request.\n\n    Args:\n      actions (List): A list of actions to execute.\n      resources (dict): An object indicating pipeline resources.\n      environment (dict[str,str]): The environment to pass into the container.\n      timeout (str): A duration in seconds with up to nine fractional digits,\n        terminated by 's'.\n\n    Returns:\n      An object representing a Pipelines Resource.", "id": "f6203:m8"}
{"signature": "def parse_args(parser, provider_required_args, argv):", "body": "<EOL>epilog = '<STR_LIT>'<EOL>for provider in provider_required_args:<EOL><INDENT>epilog += '<STR_LIT>' % (provider, provider_required_args[provider])<EOL><DEDENT>parser.epilog = epilog<EOL>args = parser.parse_args(argv)<EOL>for arg in provider_required_args[args.provider]:<EOL><INDENT>if not args.__getattribute__(arg):<EOL><INDENT>parser.error('<STR_LIT>' % arg)<EOL><DEDENT><DEDENT>return args<EOL>", "docstring": "Add provider required arguments epilog message, parse, and validate.", "id": "f6204:m3"}
{"signature": "def get_provider(args, resources):", "body": "provider = getattr(args, '<STR_LIT>', '<STR_LIT>')<EOL>if provider == '<STR_LIT>':<EOL><INDENT>return google.GoogleJobProvider(<EOL>getattr(args, '<STR_LIT>', False),<EOL>getattr(args, '<STR_LIT>', False), args.project)<EOL><DEDENT>elif provider == '<STR_LIT>':<EOL><INDENT>return google_v2.GoogleV2JobProvider(<EOL>getattr(args, '<STR_LIT>', False), getattr(args, '<STR_LIT>', False),<EOL>args.project)<EOL><DEDENT>elif provider == '<STR_LIT>':<EOL><INDENT>return local.LocalJobProvider(resources)<EOL><DEDENT>elif provider == '<STR_LIT>':<EOL><INDENT>return test_fails.FailsJobProvider()<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' + provider)<EOL><DEDENT>", "docstring": "Returns a provider for job submission requests.", "id": "f6204:m0"}
{"signature": "def create_parser(prog):", "body": "parser = argparse.ArgumentParser(prog=prog, formatter_class=DsubHelpFormatter)<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default='<STR_LIT>',<EOL>choices=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'],<EOL>help=\"\"\"<STR_LIT>\"\"\",<EOL>metavar='<STR_LIT>')<EOL>return parser<EOL>", "docstring": "Create an argument parser, adding in the list of providers.", "id": "f6204:m2"}
{"signature": "def get_ddel_provider_args(provider_type, project):", "body": "<EOL>return get_dstat_provider_args(provider_type, project)<EOL>", "docstring": "A string with the arguments to point ddel to the same provider+project.", "id": "f6204:m5"}
{"signature": "def lookup_job_tasks(self,<EOL>statuses,<EOL>user_ids=None,<EOL>job_ids=None,<EOL>job_names=None,<EOL>task_ids=None,<EOL>task_attempts=None,<EOL>labels=None,<EOL>create_time_min=None,<EOL>create_time_max=None,<EOL>max_tasks=<NUM_LIT:0>):", "body": "statuses = None if statuses == {'<STR_LIT:*>'} else statuses<EOL>user_ids = None if user_ids == {'<STR_LIT:*>'} else user_ids<EOL>job_ids = None if job_ids == {'<STR_LIT:*>'} else job_ids<EOL>job_names = None if job_names == {'<STR_LIT:*>'} else job_names<EOL>task_ids = None if task_ids == {'<STR_LIT:*>'} else task_ids<EOL>task_attempts = None if task_attempts == {'<STR_LIT:*>'} else task_attempts<EOL>if labels or create_time_min or create_time_max:<EOL><INDENT>raise NotImplementedError(<EOL>'<STR_LIT>')<EOL><DEDENT>operations = [<EOL>x for x in self._operations<EOL>if ((not statuses or x.get_field('<STR_LIT:status>', (None, None))[<NUM_LIT:0>] in statuses<EOL>) and (not user_ids or x.get_field('<STR_LIT:user>', None) in user_ids) and<EOL>(not job_ids or x.get_field('<STR_LIT>', None) in job_ids) and<EOL>(not job_names or x.get_field('<STR_LIT>', None) in job_names) and<EOL>(not task_ids or x.get_field('<STR_LIT>', None) in task_ids) and<EOL>(not task_attempts or<EOL>x.get_field('<STR_LIT>', None) in task_attempts))<EOL>]<EOL>if max_tasks > <NUM_LIT:0>:<EOL><INDENT>operations = operations[:max_tasks]<EOL><DEDENT>return operations<EOL>", "docstring": "Return a list of operations. See base.py for additional detail.", "id": "f6205:c0:m6"}
{"signature": "def set_operations(self, ops):", "body": "self._operations = [StubTask(o) for o in ops]<EOL>", "docstring": "Set the state of the fictional world.\n\n        Args:\n         ops: a list of dict, each representing an operation.\n\n        Operations can have the following fields:\n           - status: tuple (string,date)\n           - user: string\n           - job-id: string\n           - job-name: string\n           - task-id: string\n           - task-attempt: integer\n           - labels: list<dict>\n           - status-message: string\n           - error-messages : list of string", "id": "f6205:c0:m3"}
{"signature": "def is_success(op):", "body": "return is_done(op) and ('<STR_LIT:error>' not in op)<EOL>", "docstring": "Return whether the operation has completed successfully.", "id": "f6206:m8"}
{"signature": "def get_action_image(op, name):", "body": "action = _get_action_by_name(op, name)<EOL>if action:<EOL><INDENT>return action.get('<STR_LIT>')<EOL><DEDENT>", "docstring": "Return the image for the operation.", "id": "f6206:m17"}
{"signature": "def get_last_update(op):", "body": "last_update = get_end_time(op)<EOL>if not last_update:<EOL><INDENT>last_event = get_last_event(op)<EOL>if last_event:<EOL><INDENT>last_update = last_event['<STR_LIT>']<EOL><DEDENT><DEDENT>if not last_update:<EOL><INDENT>last_update = get_create_time(op)<EOL><DEDENT>return last_update<EOL>", "docstring": "Return the most recent timestamp in the operation.", "id": "f6206:m23"}
{"signature": "def create_time_filter(create_time, comparator):", "body": "return '<STR_LIT>'.format(comparator, create_time)<EOL>", "docstring": "Return a valid createTime filter for operations.list().", "id": "f6206:m1"}
{"signature": "def is_pipeline(op):", "body": "return get_metadata_type(<EOL>op) == '<STR_LIT>'<EOL>", "docstring": "Check that an operation is a genomics pipeline run.\n\n    An operation is a Genomics Pipeline run if the request metadata's @type\n    is \"type.googleapis.com/google.genomics.v2alpha1.Metadata\".\n\n    Args:\n      op: a pipelines operation.\n\n    Returns:\n      Boolean, true if the operation is a RunPipelineRequest.", "id": "f6206:m25"}
{"signature": "def label_filter(label_key, label_value):", "body": "return '<STR_LIT>'.format(label_key, label_value)<EOL>", "docstring": "Return a valid label filter for operations.list().", "id": "f6206:m0"}
{"signature": "def get_labels(op):", "body": "return op.get('<STR_LIT>', {}).get('<STR_LIT>', {})<EOL>", "docstring": "Return the operation's array of labels.", "id": "f6206:m11"}
{"signature": "def _get_action_by_name(op, name):", "body": "actions = get_actions(op)<EOL>for action in actions:<EOL><INDENT>if action.get('<STR_LIT:name>') == name:<EOL><INDENT>return action<EOL><DEDENT><DEDENT>", "docstring": "Return the value for the specified action.", "id": "f6206:m15"}
{"signature": "def get_start_time(op):", "body": "return op.get('<STR_LIT>', {}).get('<STR_LIT>')<EOL>", "docstring": "Return the start time string of the operation.", "id": "f6206:m3"}
{"signature": "def get_label(op, name):", "body": "return get_labels(op).get(name)<EOL>", "docstring": "Return the value for the specified label.", "id": "f6206:m12"}
{"signature": "def get_end_time(op):", "body": "return op.get('<STR_LIT>', {}).get('<STR_LIT>')<EOL>", "docstring": "Return the end time string of the operation.", "id": "f6206:m4"}
{"signature": "def is_dsub_operation(op):", "body": "if not is_pipeline(op):<EOL><INDENT>return False<EOL><DEDENT>for name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if not get_label(op, name):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Determine if a pipelines operation is a dsub request.\n\n    We don't have a rigorous way to identify an operation as being submitted\n    by dsub. Our best option is to check for certain fields that have always\n    been part of dsub operations.\n\n    - labels: job-id, job-name, and user-id have always existed. The dsub-version\n              label has always existed for the google-v2 provider.\n\n    Args:\n      op: a pipelines operation.\n\n    Returns:\n      Boolean, true if the pipeline run was generated by dsub.", "id": "f6206:m26"}
{"signature": "def get_failed_events(op):", "body": "events = get_events(op)<EOL>if events:<EOL><INDENT>return [<EOL>e for e in events if int(e.get('<STR_LIT>', {}).get('<STR_LIT>', <NUM_LIT:0>)) != <NUM_LIT:0><EOL>]<EOL><DEDENT>return None<EOL>", "docstring": "Return the events (if any) with a non-zero exitStatus.", "id": "f6206:m20"}
{"signature": "def raw_ops(tasklist):", "body": "return [t.raw_task_data() for t in tasklist]<EOL>", "docstring": "Convert returned operations to raw operations.", "id": "f6218:m0"}
{"signature": "def sleep(self, seconds):", "body": "if seconds < <NUM_LIT:0>:<EOL><INDENT>raise IOError(\"<STR_LIT>\")<EOL><DEDENT>target = self._now + seconds<EOL>if target < self._next:<EOL><INDENT>self._now = target<EOL>return<EOL><DEDENT>self._now = target<EOL>for step in self._chronology:<EOL><INDENT>self._next += step<EOL>if self._next > target:<EOL><INDENT>return<EOL><DEDENT><DEDENT>raise BaseException(\"<STR_LIT>\")<EOL>", "docstring": "Sleep for this many fictitious seconds.\n\n        The world will update accordingly.\n\n        Args:\n          seconds: how many seconds to pretend sleep for.\n\n        Raises:\n          IOError: for negative delays (matches time.sleep).\n          BaseException: at the end of times, to detect infinite loop bugs\n        and the like.", "id": "f6219:c0:m1"}
{"signature": "def __init__(self, chronology):", "body": "self._chronology = chronology<EOL>self._now = <NUM_LIT:0><EOL>self._next = six.advance_iterator(self._chronology)<EOL>", "docstring": "The chronology is a generator that updates the world.\n\n        Yield how many seconds of fake time elapse. Think of each yield as a\n        \"sleep\". We read the very first value immediately, so that the world can be\n        initialized in the chronology.\n\n        Args:\n          chronology: a generator that updates the state of the world and then\n          sleeps via yielding.", "id": "f6219:c0:m0"}
{"signature": "def expand_tsv_fields(newenv, tmpl_file, tsv_file):", "body": "with open(tmpl_file, '<STR_LIT:r>') as f:<EOL><INDENT>input_lines = f.readlines()<EOL><DEDENT>with open(tsv_file, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(input_lines[<NUM_LIT:0>])<EOL>for line in input_lines[<NUM_LIT:1>:]:<EOL><INDENT>curr = []<EOL>for field in line.split('<STR_LIT:\\t>'):<EOL><INDENT>cell = subprocess.check_output(<EOL>'<STR_LIT>' % field, env=newenv, shell=True)<EOL>curr.append(cell)<EOL><DEDENT>f.write('<STR_LIT:\\t>'.join(curr) + '<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>", "docstring": "Write a TSV file, expanding environment variables in the template.", "id": "f6226:m4"}
{"signature": "def to_string(stdoutbytes):", "body": "if sys.version_info[<NUM_LIT:0>] < <NUM_LIT:3>:<EOL><INDENT>return stdoutbytes<EOL><DEDENT>encoding = sys.stdout.encoding if sys.stdout.encoding else '<STR_LIT:utf-8>'<EOL>return stdoutbytes.decode(encoding)<EOL>", "docstring": "Convert stdout to a string in python 2 or 3.", "id": "f6226:m0"}
{"signature": "def unquote(value):", "body": "if value.startswith('<STR_LIT:\">') and value.endswith('<STR_LIT:\">'):<EOL><INDENT>return value[<NUM_LIT:1>:-<NUM_LIT:1>].replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>return value<EOL>", "docstring": "Remove surrounding single or double quotes.", "id": "f6229:m0"}
{"signature": "def inject_quiet(levels):", "body": "loggers = list(Logger.manager.loggerDict.items())<EOL>loggers.append((\"<STR_LIT:root>\", getLogger()))<EOL>level_filter = LevelFilter(levels)<EOL>for logger_name, logger in loggers:<EOL><INDENT>for handler in getattr(logger, \"<STR_LIT>\", []):<EOL><INDENT>handler.addFilter(level_filter)<EOL><DEDENT><DEDENT>", "docstring": "see --quiet flag help for what this does", "id": "f6245:m0"}
{"signature": "def flush(self, line):", "body": "<EOL>sys.stdout.write(line)<EOL>sys.stdout.flush()<EOL>", "docstring": "flush the line to stdout", "id": "f6247:c0:m5"}
{"signature": "def execute(self, arg_str='<STR_LIT>', **kwargs):", "body": "cmd = \"<STR_LIT>\".format(self.cmd_prefix, self.script, arg_str)<EOL>expected_ret_code = kwargs.pop('<STR_LIT:code>', <NUM_LIT:0>)<EOL>environ = self.environ<EOL>for k in list(kwargs.keys()):<EOL><INDENT>if k.isupper():<EOL><INDENT>environ[k] = kwargs.pop(k)<EOL><DEDENT><DEDENT>kwargs.setdefault(\"<STR_LIT>\", subprocess.STDOUT)<EOL>kwargs[\"<STR_LIT>\"] = True<EOL>kwargs[\"<STR_LIT>\"] = subprocess.PIPE<EOL>kwargs[\"<STR_LIT>\"] = self.cwd<EOL>kwargs[\"<STR_LIT>\"] = environ<EOL>process = None<EOL>self.buf = deque(maxlen=self.bufsize)<EOL>try:<EOL><INDENT>process = subprocess.Popen(<EOL>cmd,<EOL>**kwargs<EOL>)<EOL>for line in iter(process.stdout.readline, b\"<STR_LIT>\"):<EOL><INDENT>line = line.decode(self.encoding)<EOL>self.buf.append(line.rstrip())<EOL>yield line<EOL><DEDENT>process.wait()<EOL>if process.returncode != expected_ret_code:<EOL><INDENT>if process.returncode > <NUM_LIT:0>:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\".format(<EOL>cmd,<EOL>process.returncode,<EOL>self.output<EOL>))<EOL><DEDENT><DEDENT><DEDENT>except subprocess.CalledProcessError as e:<EOL><INDENT>if e.returncode != expected_ret_code:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\".format(<EOL>cmd,<EOL>e.returncode,<EOL>self.output<EOL>))<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>if process:<EOL><INDENT>process.stdout.close()<EOL><DEDENT><DEDENT>", "docstring": "runs the passed in arguments and returns an iterator on the output of\n        running command", "id": "f6247:c0:m9"}
{"signature": "def err(format_msg, *args, **kwargs):", "body": "exc_info = kwargs.pop(\"<STR_LIT>\", False)<EOL>stderr.warning(str(format_msg).format(*args, **kwargs), exc_info=exc_info)<EOL>", "docstring": "print format_msg to stderr", "id": "f6248:m5"}
{"signature": "def table(*columns, **kwargs):", "body": "ret = []<EOL>prefix = kwargs.get('<STR_LIT>', '<STR_LIT>')<EOL>buf_count = kwargs.get('<STR_LIT>', <NUM_LIT:2>)<EOL>if len(columns) == <NUM_LIT:1>:<EOL><INDENT>columns = list(columns[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>columns = list(zip(*columns))<EOL><DEDENT>headers = kwargs.get(\"<STR_LIT>\", [])<EOL>if headers:<EOL><INDENT>columns.insert(<NUM_LIT:0>, headers)<EOL><DEDENT>widths = kwargs.get(\"<STR_LIT>\", [])<EOL>row_counts = Counter()<EOL>for i in range(len(widths)):<EOL><INDENT>row_counts[i] = int(widths[i])<EOL><DEDENT>width = int(kwargs.get(\"<STR_LIT:width>\", <NUM_LIT:0>))<EOL>for row in columns:<EOL><INDENT>for i, c in enumerate(row):<EOL><INDENT>if isinstance(c, basestring):<EOL><INDENT>cl = len(c)<EOL><DEDENT>else:<EOL><INDENT>cl = len(str(c))<EOL><DEDENT>if cl > row_counts[i]:<EOL><INDENT>row_counts[i] = cl<EOL><DEDENT><DEDENT><DEDENT>width = int(kwargs.get(\"<STR_LIT:width>\", <NUM_LIT:0>))<EOL>if width:<EOL><INDENT>for i in row_counts:<EOL><INDENT>if row_counts[i] < width:<EOL><INDENT>row_counts[i] = width<EOL><DEDENT><DEDENT><DEDENT>def colstr(c):<EOL><INDENT>if isinstance(c, basestring): return c<EOL>return str(c)<EOL><DEDENT>def rowstr(row, prefix, row_counts):<EOL><INDENT>row_format = prefix<EOL>cols = list(map(colstr, row))<EOL>for i in range(len(row_counts)):<EOL><INDENT>c = cols[i]<EOL>if re.match(r\"<STR_LIT>\", c):<EOL><INDENT>if i == <NUM_LIT:0>:<EOL><INDENT>row_format += \"<STR_LIT>\" + str(row_counts[i]) + \"<STR_LIT:}>\"<EOL><DEDENT>else:<EOL><INDENT>row_format += \"<STR_LIT>\" + str(row_counts[i] + buf_count) + \"<STR_LIT:}>\"<EOL><DEDENT><DEDENT>else:<EOL><INDENT>row_format += \"<STR_LIT>\" + str(row_counts[i] + buf_count) + \"<STR_LIT:}>\"<EOL><DEDENT><DEDENT>return row_format.format(*cols)<EOL><DEDENT>for row in columns:<EOL><INDENT>ret.append(rowstr(row, prefix, row_counts))<EOL><DEDENT>out(os.linesep.join(ret))<EOL>", "docstring": "format columned data so we can easily print it out on a console, this just takes\ncolumns of data and it will format it into properly aligned columns, it's not\nfancy, but it works for most type of strings that I need it for, like server name\nlists.\n\nother formatting options:\n    http://stackoverflow.com/a/8234511/5006\n\nother packages that probably do this way better:\n    https://stackoverflow.com/a/26937531/5006\n\n:Example:\n    >>> echo.table([(1, 2), (3, 4), (5, 6), (7, 8), (9, 0)])\n    1  2\n    3  4\n    5  6\n    7  8\n    9  0\n    >>> echo.table([1, 3, 5, 7, 9], [2, 4, 6, 8, 0])\n    1  2\n    3  4\n    5  6\n    7  8\n    9  0\n\n:param *columns: can either be a list of rows or multiple lists representing each\n    column in the table\n:param **kwargs: dict\n    prefix -- string -- what you want before each row (eg, a tab)\n    buf_count -- integer -- how many spaces between longest col value and its neighbor\n    headers -- list -- the headers you want, must match column count\n    widths -- list -- the widths of each column you want to use, this doesn't have\n        to match column count, so you can do something like [0, 5] to set the\n        width of the second column\n    width -- int -- similar to widths except it will set this value for all columns", "id": "f6248:m22"}
{"signature": "def ol(*lines):", "body": "bullets(*lines, numbers=False)<EOL>", "docstring": "ordered list", "id": "f6248:m15"}
{"signature": "def ch(c):", "body": "<EOL>istdout.info(c)<EOL>", "docstring": "print one or more characters without a newline at the end\n\n    example --\n        for x in range(1000):\n            echo.ch(\".\")\n\n    c -- string -- the chars that will be output", "id": "f6248:m6"}
{"signature": "def banner(*lines, **kwargs):", "body": "sep = kwargs.get(\"<STR_LIT>\", \"<STR_LIT:*>\")<EOL>count = kwargs.get(\"<STR_LIT:width>\", globals()[\"<STR_LIT>\"])<EOL>out(sep * count)<EOL>if lines:<EOL><INDENT>out(sep)<EOL>for line in lines:<EOL><INDENT>out(\"<STR_LIT>\".format(sep, line))<EOL><DEDENT>out(sep)<EOL>out(sep * count)<EOL><DEDENT>", "docstring": "prints a banner\n\n    sep -- string -- the character that will be on the line on the top and bottom\n        and before any of the lines, defaults to *\n    count -- integer -- the line width, defaults to 80", "id": "f6248:m20"}
{"signature": "def increment(itr, n=<NUM_LIT:1>, format_msg=\"<STR_LIT>\"):", "body": "for i, v in enumerate(itr, n):<EOL><INDENT>with prefix(format_msg, i):<EOL><INDENT>yield v<EOL><DEDENT><DEDENT>", "docstring": "Similar to enumerate but will set format_msg.format(n) into the prefix on\n    each iteration\n\n    :Example:\n        for v in increment([\"foo\", \"bar\"]):\n            echo.out(v) # 1. foo\\n2. bar\n\n    :param itr: iterator, any iterator you want to set a numeric prefix on on every\n        iteration\n    :param n: integer, the starting integer for the numeric prefix\n    :param format_msg: string, this will basically do: format_msg.format(n) so there\n        should only be one set of curly brackets\n    :returns: yield generator", "id": "f6248:m2"}
{"signature": "def hr(width=<NUM_LIT:0>):", "body": "if not width: width = globals()[\"<STR_LIT>\"]<EOL>bar(\"<STR_LIT:_>\", width=width)<EOL>blank()<EOL>", "docstring": "similar to the html horizontal rule in html", "id": "f6248:m9"}
{"signature": "def exception(e):", "body": "stderr.exception(e)<EOL>", "docstring": "print an exception message to stderr (this does not honor quiet)", "id": "f6248:m4"}
{"signature": "def br(count=<NUM_LIT:1>):", "body": "for x in range(count):<EOL><INDENT>out(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "print out a blank newline", "id": "f6248:m13"}
{"signature": "def ul(*lines):", "body": "bullets(*lines, numbers=True)<EOL>", "docstring": "unordered list", "id": "f6248:m14"}
{"signature": "def out(format_msg=\"<STR_LIT>\", *args, **kwargs):", "body": "logmethod = kwargs.get(\"<STR_LIT>\", stdout.info)<EOL>if format_msg != \"<STR_LIT>\":<EOL><INDENT>if Prefix.has():<EOL><INDENT>if isinstance(format_msg, basestring):<EOL><INDENT>format_msg = Prefix.get() + format_msg<EOL><DEDENT>else:<EOL><INDENT>format_msg = Prefix.get() + str(format_msg)<EOL><DEDENT><DEDENT>if isinstance(format_msg, basestring):<EOL><INDENT>if args or kwargs:<EOL><INDENT>s = format_msg.format(*args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>s = format_msg<EOL><DEDENT>logmethod(s)<EOL><INDENT>width = globals()[\"<STR_LIT:width>\"]<EOL>s = textwrap.fill(s, width=width)<EOL>stdout.info(s)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logmethod(str(format_msg))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logmethod(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "print format_msg to stdout, taking into account --quiet setting", "id": "f6248:m7"}
{"signature": "@property<EOL><INDENT>def body(self):<DEDENT>", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._body = inspect.getsource(self.module)<EOL><DEDENT>return self._body<EOL>", "docstring": "get the contents of the script", "id": "f6250:c0:m6"}
{"signature": "@property<EOL><INDENT>def module(self):<DEDENT>", "body": "<EOL>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>if \"<STR_LIT:__main__>\" in sys.modules:<EOL><INDENT>mod = sys.modules[\"<STR_LIT:__main__>\"]<EOL>path = self.normalize_path(mod.__file__)<EOL>if os.path.splitext(path) == os.path.splitext(self.path):<EOL><INDENT>self._module = mod<EOL><DEDENT>else:<EOL><INDENT>self._module = imp.load_source('<STR_LIT>', self.path)<EOL><DEDENT><DEDENT><DEDENT>return self._module<EOL>", "docstring": "load the module so we can actually run the script's function", "id": "f6250:c0:m5"}
{"signature": "def can_run_from_cli(self):", "body": "ret = False<EOL>ast_tree = ast.parse(self.body, self.path)<EOL>calls = self._find_calls(ast_tree, __name__, \"<STR_LIT>\")<EOL>for call in calls:<EOL><INDENT>if re.search(\"<STR_LIT>\".format(re.escape(call)), self.body):<EOL><INDENT>ret = True<EOL>break<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "return True if this script can be run from the command line", "id": "f6250:c0:m12"}
{"signature": "def run(self, raw_args):", "body": "parser = self.parser<EOL>args, kwargs = parser.parse_callback_args(raw_args)<EOL>callback = kwargs.pop(\"<STR_LIT>\")<EOL>if parser.has_injected_quiet():<EOL><INDENT>levels = kwargs.pop(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>logging.inject_quiet(levels)<EOL><DEDENT>try:<EOL><INDENT>ret_code = callback(*args, **kwargs)<EOL>ret_code = int(ret_code) if ret_code else <NUM_LIT:0><EOL><DEDENT>except ArgError as e:<EOL><INDENT>echo.err(\"<STR_LIT>\", parser.prog, str(e))<EOL>ret_code = <NUM_LIT:2><EOL><DEDENT>return ret_code<EOL>", "docstring": "parse and import the script, and then run the script's main function", "id": "f6250:c0:m9"}
{"signature": "def _find_calls(self, ast_tree, called_module, called_func):", "body": "s = set()<EOL>s.add(\"<STR_LIT>\".format(called_module, called_func))<EOL>if hasattr(ast_tree, '<STR_LIT:body>'):<EOL><INDENT>if isinstance(ast_tree.body, collections.Iterable):<EOL><INDENT>for ast_body in ast_tree.body:<EOL><INDENT>s.update(self._find_calls(ast_body, called_module, called_func))<EOL><DEDENT><DEDENT><DEDENT>elif hasattr(ast_tree, '<STR_LIT>'):<EOL><INDENT>if hasattr(ast_tree, '<STR_LIT>'):<EOL><INDENT>if ast_tree.module == called_module:<EOL><INDENT>for ast_name in ast_tree.names:<EOL><INDENT>if ast_name.name == called_func:<EOL><INDENT>s.add(unicode(ast_name.asname if ast_name.asname is not None else ast_name.name))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for ast_name in ast_tree.names:<EOL><INDENT>if hasattr(ast_name, '<STR_LIT:name>') and (ast_name.name == called_module):<EOL><INDENT>call = \"<STR_LIT>\".format(<EOL>ast_name.asname if ast_name.asname is not None else ast_name.name,<EOL>called_func<EOL>)<EOL>s.add(call)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return s<EOL>", "docstring": "scan the abstract source tree looking for possible ways to call the called_module\nand called_func\n\nborrowed from pout\n\nast_tree -- _ast.* instance -- the internal ast object that is being checked, returned from compile()\n    with ast.PyCF_ONLY_AST flag\ncalled_module -- string -- we are checking the ast for imports of this module\ncalled_func -- string -- we are checking the ast for aliases of this function\nreturn -- set -- the list of possible calls the ast_tree could make to call the called_func", "id": "f6250:c0:m13"}
{"signature": "def set_default(self, na):", "body": "kwargs = {}<EOL>if isinstance(na, (type, types.FunctionType)):<EOL><INDENT>kwargs['<STR_LIT:type>'] = na<EOL>kwargs['<STR_LIT>'] = True<EOL>kwargs[\"<STR_LIT:default>\"] = argparse.SUPPRESS<EOL><DEDENT>elif isinstance(na, bool):<EOL><INDENT>kwargs['<STR_LIT:action>'] = '<STR_LIT>' if na else '<STR_LIT:store_true>'<EOL>kwargs['<STR_LIT>'] = False<EOL><DEDENT>elif isinstance(na, (int, float, str)):<EOL><INDENT>kwargs['<STR_LIT:type>'] = type(na)<EOL>kwargs['<STR_LIT:default>'] = na<EOL>kwargs['<STR_LIT>'] = False<EOL><DEDENT>elif isinstance(na, (list, set)):<EOL><INDENT>na = list(na)<EOL>kwargs['<STR_LIT:action>'] = '<STR_LIT>'<EOL>kwargs['<STR_LIT>'] = True<EOL>if len(na) > <NUM_LIT:0>:<EOL><INDENT>if isinstance(na[<NUM_LIT:0>], type):<EOL><INDENT>kwargs['<STR_LIT:type>'] = na[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>kwargs['<STR_LIT:action>'] = '<STR_LIT:store>'<EOL>l = set()<EOL>ltype = None<EOL>for elt in na:<EOL><INDENT>vtype = type(elt)<EOL>l.add(elt)<EOL>if ltype is None:<EOL><INDENT>ltype = vtype<EOL><DEDENT>else:<EOL><INDENT>if ltype is not vtype:<EOL><INDENT>ltype = str<EOL><DEDENT><DEDENT><DEDENT>kwargs['<STR_LIT>'] = l<EOL>kwargs['<STR_LIT:type>'] = ltype<EOL><DEDENT><DEDENT><DEDENT>self.parser_kwargs.update(kwargs)<EOL>", "docstring": "this is used for introspection from the main() method when there is an\n        argument with a default value, this figures out how to set up the ArgParse\n        arguments", "id": "f6254:c1:m6"}
{"signature": "def _fill_text(self, text, width, indent):", "body": "lines = []<EOL>for line in text.splitlines(False):<EOL><INDENT>if line:<EOL><INDENT>lines.extend(textwrap.wrap(<EOL>line.strip(),<EOL>width,<EOL>initial_indent=indent,<EOL>subsequent_indent=indent<EOL>))<EOL><DEDENT>else:<EOL><INDENT>lines.append(line)<EOL><DEDENT><DEDENT>text = \"<STR_LIT:\\n>\".join(lines)<EOL>return text<EOL>", "docstring": "Overridden to not get rid of newlines\n\n        https://github.com/python/cpython/blob/2.7/Lib/argparse.py#L620", "id": "f6254:c4:m0"}
{"signature": "def make_buffer_get_request(self, expected_status=<NUM_LIT:200>, expected_count=None, **get_params):", "body": "response = self.client.get(<EOL>reverse('<STR_LIT>'),<EOL>get_params,<EOL>format='<STR_LIT>'<EOL>)<EOL>self.assertEqual(response.status_code, expected_status)<EOL>if expected_status == <NUM_LIT:200>:<EOL><INDENT>t_id = get_params.get(\"<STR_LIT>\")<EOL>if expected_count is None:<EOL><INDENT>expected_count = Buffer.objects.filter(transfer_session_id=t_id).count()<EOL><DEDENT>data = json.loads(response.content.decode())<EOL>if isinstance(data, dict) and \"<STR_LIT>\" in data:<EOL><INDENT>data = data[\"<STR_LIT>\"]<EOL><DEDENT>model_uuids = [d[\"<STR_LIT>\"] for d in data]<EOL>Buffer.objects.filter(transfer_session_id=t_id, model_uuid__in=model_uuids).delete()<EOL>errors = validate_and_create_buffer_data(data, TransferSession.objects.get(id=t_id))<EOL>self.assertFalse(errors)<EOL>self.assertEqual(expected_count, Buffer.objects.filter(transfer_session_id=t_id, model_uuid__in=model_uuids).count())<EOL>self.assertEqual(expected_count, len(data))<EOL>for record in data:<EOL><INDENT>self.assertEqual(<NUM_LIT:3>, len(record[\"<STR_LIT>\"]))<EOL><DEDENT>return data<EOL><DEDENT>", "docstring": "Make a GET request to the buffer endpoint. Warning: Deletes the local buffer instances before validating.", "id": "f6255:c6:m8"}
{"signature": "def max_parameter_substitution():", "body": "if os.path.isfile(SQLITE_VARIABLE_FILE_CACHE):<EOL><INDENT>return<EOL><DEDENT>conn = sqlite3.connect('<STR_LIT>')<EOL>low = <NUM_LIT:1><EOL>high = <NUM_LIT:1000>  <EOL>conn.execute('<STR_LIT>')<EOL>while low < high - <NUM_LIT:1>:<EOL><INDENT>guess = (low + high) // <NUM_LIT:2><EOL>try:<EOL><INDENT>statement = '<STR_LIT>' % '<STR_LIT:U+002C>'.join(['<STR_LIT:?>' for _ in range(guess)])<EOL>values = [i for i in range(guess)]<EOL>conn.execute(statement, values)<EOL><DEDENT>except sqlite3.DatabaseError as ex:<EOL><INDENT>if '<STR_LIT>' in str(ex):<EOL><INDENT>high = guess<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>else:<EOL><INDENT>low = guess<EOL><DEDENT><DEDENT>conn.close()<EOL>with open(SQLITE_VARIABLE_FILE_CACHE, '<STR_LIT:w>') as file:<EOL><INDENT>file.write(str(low))<EOL><DEDENT>", "docstring": "SQLite has a limit on the max number of variables allowed for parameter substitution. This limit is usually 999, but\ncan be compiled to a different number. This function calculates what the max is for the sqlite version running on the device.\nWe use the calculated value to chunk our SQL bulk insert statements when deserializing from the store to the app layer.", "id": "f6276:m0"}
{"signature": "def _self_referential_fk(klass_model):", "body": "for f in klass_model._meta.concrete_fields:<EOL><INDENT>if f.related_model:<EOL><INDENT>if issubclass(klass_model, f.related_model):<EOL><INDENT>return f.attname<EOL><DEDENT><DEDENT><DEDENT>return None<EOL>", "docstring": "Return whether this model has a self ref FK, and the name for the field", "id": "f6277:m0"}
{"signature": "def serialize_into_store(self, filter=None):", "body": "_serialize_into_store(self.profile, filter=filter)<EOL>", "docstring": "Takes data from app layer and serializes the models into the store.", "id": "f6277:c0:m1"}
{"signature": "@receiver(post_delete)<EOL>def add_to_deleted_models(sender, instance=None, *args, **kwargs):", "body": "if issubclass(sender, SyncableModel):<EOL><INDENT>instance._update_deleted_models()<EOL><DEDENT>", "docstring": "Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark\nthe model as deleted in the store.", "id": "f6280:m0"}
{"signature": "@transaction.atomic()<EOL>def _dequeue_into_store(transfersession):", "body": "with connection.cursor() as cursor:<EOL><INDENT>DBBackend._dequeuing_delete_rmcb_records(cursor, transfersession.id)<EOL>DBBackend._dequeuing_delete_buffered_records(cursor, transfersession.id)<EOL>current_id = InstanceIDModel.get_current_instance_and_increment_counter()<EOL>DBBackend._dequeuing_merge_conflict_buffer(cursor, current_id, transfersession.id)<EOL>DBBackend._dequeuing_merge_conflict_rmcb(cursor, transfersession.id)<EOL>DBBackend._dequeuing_update_rmcs_last_saved_by(cursor, current_id, transfersession.id)<EOL>DBBackend._dequeuing_delete_mc_rmcb(cursor, transfersession.id)<EOL>DBBackend._dequeuing_delete_mc_buffer(cursor, transfersession.id)<EOL>DBBackend._dequeuing_insert_remaining_buffer(cursor, transfersession.id)<EOL>DBBackend._dequeuing_insert_remaining_rmcb(cursor, transfersession.id)<EOL>DBBackend._dequeuing_delete_remaining_rmcb(cursor, transfersession.id)<EOL>DBBackend._dequeuing_delete_remaining_buffer(cursor, transfersession.id)<EOL><DEDENT>if getattr(settings, '<STR_LIT>', True):<EOL><INDENT>_deserialize_from_store(transfersession.sync_session.profile)<EOL><DEDENT>", "docstring": "Takes data from the buffers and merges into the store and record max counters.", "id": "f6298:m6"}
{"signature": "def from_int(data):", "body": "if not isinstance(data, int) and not isinstance(data, long):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>res = []<EOL>while data > <NUM_LIT:0> or not res:<EOL><INDENT>for j in range(<NUM_LIT:5>):<EOL><INDENT>if not j % <NUM_LIT:2>:<EOL><INDENT>res += CONSONANTS[(data & <NUM_LIT>)]<EOL>data >>= <NUM_LIT:4><EOL><DEDENT>else:<EOL><INDENT>res += VOWELS[(data & <NUM_LIT>)]<EOL>data >>= <NUM_LIT:2><EOL><DEDENT><DEDENT>if data > <NUM_LIT:0>:<EOL><INDENT>res += '<STR_LIT:->'<EOL><DEDENT><DEDENT>res.reverse()<EOL>return '<STR_LIT>'.join(res)<EOL>", "docstring": ":params data: integer\n:returns: proquint made from input data\n:type data: int\n:rtype: string", "id": "f6299:m0"}
{"signature": "def to_int(data):", "body": "if not isinstance(data, basestring):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>res = <NUM_LIT:0><EOL>for part in data.split('<STR_LIT:->'):<EOL><INDENT>if len(part) != <NUM_LIT:5>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>for j in range(<NUM_LIT:5>):<EOL><INDENT>try:<EOL><INDENT>if not j % <NUM_LIT:2>:<EOL><INDENT>res <<= <NUM_LIT:4><EOL>res |= CONSONANTS.index(part[j])<EOL><DEDENT>else:<EOL><INDENT>res <<= <NUM_LIT:2><EOL>res |= VOWELS.index(part[j])<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(part[j]))<EOL><DEDENT><DEDENT><DEDENT>return res<EOL>", "docstring": ":params data: proquint\n:returns: proquint decoded into an integer\n:type data: string\n:rtype: int", "id": "f6299:m1"}
{"signature": "def generate():", "body": "return from_int(int(uuid.uuid4().hex[:<NUM_LIT:8>], <NUM_LIT:16>)).replace('<STR_LIT:->', '<STR_LIT>')<EOL>", "docstring": ":returns: proquint\n:rtype: int", "id": "f6299:m2"}
{"signature": "def add_syncable_models():", "body": "import django.apps<EOL>from morango.models import SyncableModel<EOL>from morango.manager import SyncableModelManager<EOL>from morango.query import SyncableModelQuerySet<EOL>model_list = []<EOL>for model_class in django.apps.apps.get_models():<EOL><INDENT>if issubclass(model_class, SyncableModel):<EOL><INDENT>name = model_class.__name__<EOL>if _multiple_self_ref_fk_check(model_class):<EOL><INDENT>raise InvalidMorangoModelConfiguration(\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>from mptt import models<EOL>from morango.utils.morango_mptt import MorangoMPTTModel, MorangoMPTTTreeManager, MorangoTreeQuerySet<EOL>if issubclass(model_class, models.MPTTModel):<EOL><INDENT>if not issubclass(model_class, MorangoMPTTModel):<EOL><INDENT>raise InvalidMorangoModelConfiguration(\"<STR_LIT>\".format(name))<EOL><DEDENT>if not isinstance(model_class.objects, MorangoMPTTTreeManager):<EOL><INDENT>raise InvalidMPTTManager(\"<STR_LIT>\".format(name))<EOL><DEDENT>if not isinstance(model_class.objects.none(), MorangoTreeQuerySet):<EOL><INDENT>raise InvalidMPTTQuerySet(\"<STR_LIT>\".format(name))<EOL><DEDENT><DEDENT><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>if not isinstance(model_class.objects, SyncableModelManager):<EOL><INDENT>raise InvalidSyncableManager(\"<STR_LIT>\".format(name))<EOL><DEDENT>if not isinstance(model_class.objects.none(), SyncableModelQuerySet):<EOL><INDENT>raise InvalidSyncableQueryset(\"<STR_LIT>\".format(name))<EOL><DEDENT>if model_class._meta.many_to_many:<EOL><INDENT>raise UnsupportedFieldType(\"<STR_LIT>\")<EOL><DEDENT>if not hasattr(model_class, '<STR_LIT>'):<EOL><INDENT>raise InvalidMorangoModelConfiguration(\"<STR_LIT>\".format(name))<EOL><DEDENT>if not hasattr(model_class, '<STR_LIT>'):<EOL><INDENT>raise InvalidMorangoModelConfiguration(\"<STR_LIT>\".format(name))<EOL><DEDENT>profile = model_class.morango_profile<EOL>_profile_models[profile] = _profile_models.get(profile, [])<EOL>if model_class.morango_model_name is not None:<EOL><INDENT>_insert_model_into_profile_dict(model_class, profile)<EOL><DEDENT><DEDENT><DEDENT>for profile, model_list in iteritems(_profile_models):<EOL><INDENT>syncable_models_dict = OrderedDict()<EOL>for model_class in model_list:<EOL><INDENT>syncable_models_dict[model_class.morango_model_name] = model_class<EOL><DEDENT>_profile_models[profile] = syncable_models_dict<EOL><DEDENT>", "docstring": "Per profile, adds each model to a dictionary mapping the morango model name to its model class.\nWe sort by ForeignKey dependencies to safely sync data.", "id": "f6300:m3"}
{"signature": "def calculate_uuid(self):", "body": "<EOL>if self.uuid_input_fields is None:<EOL><INDENT>raise NotImplementedError(\"\"\"<STR_LIT>\"\"\")<EOL><DEDENT>if self.uuid_input_fields == \"<STR_LIT>\":<EOL><INDENT>return uuid.uuid4().hex<EOL><DEDENT>assert isinstance(self.uuid_input_fields, tuple), \"<STR_LIT>\"<EOL>hashable_input_vals = []<EOL>for field in self.uuid_input_fields:<EOL><INDENT>new_value = getattr(self, field)<EOL>if new_value:<EOL><INDENT>hashable_input_vals.append(str(new_value))<EOL><DEDENT><DEDENT>hashable_input = \"<STR_LIT::>\".join(hashable_input_vals)<EOL>if not hashable_input:<EOL><INDENT>return uuid.uuid4().hex<EOL><DEDENT>return sha2_uuid(hashable_input)<EOL>", "docstring": "Should return a 32-digit hex string for a UUID that is calculated as a function of a set of fields from the model.", "id": "f6301:c1:m0"}
{"signature": "def _bulk_insert_into_app_models(self, cursor, app_model, fields, db_values, placeholder_list):", "body": "<EOL>num_of_rows_able_to_insert = self.SQLITE_MAX_VARIABLE_NUMBER // len(fields)<EOL>num_of_values_able_to_insert = num_of_rows_able_to_insert * len(fields)<EOL>value_chunks = [db_values[x:x + num_of_values_able_to_insert] for x in range(<NUM_LIT:0>, len(db_values), num_of_values_able_to_insert)]<EOL>placeholder_chunks = [placeholder_list[x: x + num_of_rows_able_to_insert] for x in range(<NUM_LIT:0>, len(placeholder_list), num_of_rows_able_to_insert)]<EOL>fields = str(tuple(str(f.attname) for f in fields)).replace(\"<STR_LIT:'>\", '<STR_LIT>')<EOL>for values, params in zip(value_chunks, placeholder_chunks):<EOL><INDENT>placeholder_str = '<STR_LIT:U+002CU+0020>'.join(params).replace(\"<STR_LIT:'>\", '<STR_LIT>')<EOL>insert = \"\"\"<STR_LIT>\"\"\".format(app_model=app_model, fields=fields, placeholder_str=placeholder_str)<EOL>cursor.execute(insert, values)<EOL><DEDENT>", "docstring": "Example query:\n`REPLACE INTO model (F1,F2,F3) VALUES (%s, %s, %s), (%s, %s, %s), (%s, %s, %s)`\nwhere values=[1,2,3,4,5,6,7,8,9]", "id": "f6302:c0:m1"}
{"signature": "def authenticate_credentials(self, userargs, password, request=None):", "body": "credentials = {<EOL>'<STR_LIT:password>': password<EOL>}<EOL>if \"<STR_LIT:=>\" not in userargs:<EOL><INDENT>credentials[get_user_model().USERNAME_FIELD] = userargs<EOL><DEDENT>else:<EOL><INDENT>for arg in userargs.split(\"<STR_LIT:&>\"):<EOL><INDENT>key, val = arg.split(\"<STR_LIT:=>\")<EOL>credentials[key] = val<EOL><DEDENT><DEDENT>user = authenticate(**credentials)<EOL>if user is None:<EOL><INDENT>raise exceptions.AuthenticationFailed('<STR_LIT>')<EOL><DEDENT>if not user.is_active:<EOL><INDENT>raise exceptions.AuthenticationFailed('<STR_LIT>')<EOL><DEDENT>return (user, None)<EOL>", "docstring": "Authenticate the userargs and password against Django auth backends.\nThe \"userargs\" string may be just the username, or a querystring-encoded set of params.", "id": "f6314:c0:m0"}
{"signature": "def calculate_source_id(self):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Should return a string that uniquely defines the model instance or `None` for a random uuid.", "id": "f6317:c16:m8"}
{"signature": "def calculate_partition(self):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Should return a string specifying this model instance's partition, using `self.ID_PLACEHOLDER` in place of its own ID, if needed.", "id": "f6317:c16:m9"}
{"signature": "def _deserialize_store_model(self, fk_cache):", "body": "klass_model = _profile_models[self.profile][self.model_name]<EOL>if self.deleted:<EOL><INDENT>if self.hard_deleted:<EOL><INDENT>try:<EOL><INDENT>klass_model.objects.get(id=self.id).delete(hard_delete=True)<EOL><DEDENT>except klass_model.DoesNotExist:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>klass_model.objects.filter(id=self.id).delete()<EOL><DEDENT>return None<EOL><DEDENT>else:<EOL><INDENT>app_model = klass_model.deserialize(json.loads(self.serialized))<EOL>app_model._morango_source_id = self.source_id<EOL>app_model._morango_partition = self.partition<EOL>app_model._morango_dirty_bit = False<EOL>try:<EOL><INDENT>app_model.cached_clean_fields(fk_cache)<EOL>return app_model<EOL><DEDENT>except exceptions.ValidationError as e:<EOL><INDENT>logger.warn(\"<STR_LIT>\".format(model=klass_model.__name__, id=app_model.id, error=e))<EOL>fk_ids = [getattr(app_model, field.attname) for field in app_model._meta.fields if isinstance(field, ForeignKey)]<EOL>for fk_id in fk_ids:<EOL><INDENT>try:<EOL><INDENT>st_model = Store.objects.get(id=fk_id)<EOL>if st_model.deleted:<EOL><INDENT>if st_model.hard_deleted:<EOL><INDENT>app_model._update_hard_deleted_models()<EOL><DEDENT>app_model._update_deleted_models()<EOL>return None<EOL><DEDENT><DEDENT>except Store.DoesNotExist:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>raise e<EOL><DEDENT><DEDENT>", "docstring": "When deserializing a store model, we look at the deleted flags to know if we should delete the app model.\nUpon loading the app model in memory we validate the app models fields, if any errors occurs we follow\nforeign key relationships to see if the related model has been deleted to propagate that deletion to the target app model.\nWe return:\nNone => if the model was deleted successfully\nmodel => if the model validates successfully", "id": "f6317:c10:m0"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def _to(self, handle, fs):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Save the given FS to the given stream handle.\n\n        Parameters:\n            handle (`io.IOBase`): a writable stream in which to write\n                the filesystem in an archive.\n            fs (`fs.base.FS`): the filesystem to save.", "id": "f6330:c0:m4"}
{"signature": "def to_stream(self, fs):", "body": "if self.overwrite: <EOL><INDENT>fd, temp = tempfile.mkstemp()<EOL>os.close(fd)<EOL>self._to(temp, fs)<EOL>self.output.seek(self.initial_position)<EOL>with open(temp, '<STR_LIT:rb>') as f:<EOL><INDENT>shutil.copyfileobj(f, self.output)<EOL><DEDENT>os.remove(temp)<EOL><DEDENT>else:<EOL><INDENT>self._to(self.output, fs)<EOL><DEDENT>", "docstring": "Save the given FS, considering ``self.output`` as a stream.\n\n        Parameters:\n            fs (`fs.base.FS`): the filesystem to save in the archive.", "id": "f6330:c0:m3"}
{"signature": "def to_file(self, fs):", "body": "if self.overwrite: <EOL><INDENT>tmp = '<STR_LIT:.>'.join([self.output, '<STR_LIT>'])<EOL>self._to(tmp, fs)<EOL>shutil.move(tmp, self.output)<EOL><DEDENT>else:<EOL><INDENT>self._to(self.output, fs)<EOL><DEDENT>", "docstring": "Save the given FS, considering ``self.output`` as a filename.\n\n        Parameters:\n            fs (`fs.base.FS`): the filesystem to save in the archive.", "id": "f6330:c0:m2"}
{"signature": "def __init__(self, handle, **options):", "body": "super(ArchiveReadFS, self).__init__()<EOL>if isinstance(handle, six.binary_type):<EOL><INDENT>handle = fsdecode(fspath(handle))<EOL><DEDENT>if isinstance(handle, six.text_type):<EOL><INDENT>_path = os.path.expanduser(os.path.expandvars(handle))<EOL>_path = os.path.normpath(os.path.abspath(_path))<EOL>if os.path.exists(_path) and os.access(_path, os.R_OK):<EOL><INDENT>self._close_handle = True<EOL>self._handle = open(_path, '<STR_LIT:rb>')<EOL><DEDENT><DEDENT>elif hasattr(handle, '<STR_LIT>'):<EOL><INDENT>if handle.readable() and handle.seekable():<EOL><INDENT>self._close_handle = options.get('<STR_LIT>', True)<EOL>self._handle = handle<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise errors.CreateFailed(\"<STR_LIT>\".format(handle))<EOL><DEDENT>", "docstring": "Create a new archive reader filesystem.\n\n        Parameters:\n            handle (`io.IOBase` or `str`): a filename or a readable\n                file-like object storing the archive to read\n\n        Keyword Arguments:\n            close_handle (`boolean`): if ``True``, close the handle\n                when the filesystem is closed. **[default: True]**", "id": "f6330:c1:m0"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def compress(self, handle, fs):<DEDENT>", "body": "pass<EOL>", "docstring": "Compress ``fs`` and write the resulting archive to ``handle``.", "id": "f6331:c0:m1"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def iter_dirs(self, handle):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Return an iterator over the directories in the archive in ``handle``.", "id": "f6331:c1:m5"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def remove_archive(self, handle):<DEDENT>", "body": "pass<EOL>", "docstring": "Remove the archive in ``handle``.", "id": "f6331:c0:m2"}
{"signature": "def make_source_fs(self):", "body": "return open_fs('<STR_LIT>')<EOL>", "docstring": "Create the source filesystem.", "id": "f6331:c1:m9"}
{"signature": "def open_archive(fs_url, archive):", "body": "it = pkg_resources.iter_entry_points('<STR_LIT>')<EOL>entry_point = next((ep for ep in it if archive.endswith(ep.name)), None)<EOL>if entry_point is None:<EOL><INDENT>raise UnsupportedProtocol(<EOL>'<STR_LIT>'.format(archive))<EOL><DEDENT>try:<EOL><INDENT>archive_opener = entry_point.load()<EOL><DEDENT>except pkg_resources.DistributionNotFound as df: <EOL><INDENT>six.raise_from(UnsupportedProtocol(<EOL>'<STR_LIT>'.format(entry_point.name, df.req)), None)<EOL><DEDENT>try:<EOL><INDENT>binfile = None<EOL>archive_fs = None<EOL>fs = open_fs(fs_url)<EOL>if issubclass(archive_opener, base.ArchiveFS):<EOL><INDENT>try:<EOL><INDENT>binfile = fs.openbin(archive, '<STR_LIT>')<EOL><DEDENT>except errors.ResourceNotFound:<EOL><INDENT>binfile = fs.openbin(archive, '<STR_LIT:w>')<EOL><DEDENT>except errors.ResourceReadOnly:<EOL><INDENT>binfile = fs.openbin(archive, '<STR_LIT:r>')<EOL>archive_opener = archive_opener._read_fs_cls<EOL><DEDENT><DEDENT>elif issubclass(archive_opener, base.ArchiveReadFS):<EOL><INDENT>binfile = fs.openbin(archive, '<STR_LIT:r>')<EOL><DEDENT>if not hasattr(binfile, '<STR_LIT:name>'):<EOL><INDENT>binfile.name = basename(archive)<EOL><DEDENT>archive_fs = archive_opener(binfile)<EOL><DEDENT>except Exception:<EOL><INDENT>getattr(archive_fs, '<STR_LIT>', lambda: None)()<EOL>getattr(binfile, '<STR_LIT>', lambda: None)()<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>return archive_fs<EOL><DEDENT>", "docstring": "Open an archive on a filesystem.\n\n    This function tries to mimick the behaviour of `fs.open_fs` as closely\n    as possible: it accepts either a FS URL or a filesystem instance, and\n    will close all resources it had to open.\n\n    Arguments:\n        fs_url (FS or text_type): a FS URL, or a filesystem\n            instance, where the archive file is located.\n        archive (text_type): the path to the archive file on the\n            given filesystem.\n\n    Raises:\n        `fs.opener._errors.Unsupported`: when the archive type is not supported\n            (either the file extension is unknown or the opener requires unmet\n            dependencies).\n\n    Example:\n        >>> from fs.archive import open_archive\n        >>> with open_archive('mem://', 'test.tar.gz') as archive_fs:\n        ...     type(archive_fs)\n        <class 'fs.archive.tarfs.TarFS'>\n\n    Hint:\n        This function finds the entry points defined in group\n        ``fs.archive.open_archive``, using the names of the entry point\n        as the registered extension.", "id": "f6333:m0"}
{"signature": "def iso_name_increment(name, is_dir=False, max_length=<NUM_LIT:8>):", "body": "<EOL>if not is_dir and '<STR_LIT:.>' in name:<EOL><INDENT>name, ext = name.rsplit('<STR_LIT:.>')<EOL>ext = '<STR_LIT>'.format(ext)<EOL><DEDENT>else:<EOL><INDENT>ext = '<STR_LIT>'<EOL><DEDENT>for position, char in reversed(list(enumerate(name))):<EOL><INDENT>if char not in string.digits:<EOL><INDENT>break<EOL><DEDENT><DEDENT>base, tag = name[:position+<NUM_LIT:1>], name[position+<NUM_LIT:1>:]<EOL>tag = str(int(tag or <NUM_LIT:0>) + <NUM_LIT:1>)<EOL>if len(tag) + len(base) > max_length:<EOL><INDENT>base = base[:max_length - len(tag)]<EOL><DEDENT>return '<STR_LIT>'.join([base, tag, ext])<EOL>", "docstring": "Increment an ISO name to avoid name collision.\n\n    Example:\n        >>> iso_name_increment('foo.txt')\n        'foo1.txt'\n        >>> iso_name_increment('bar10')\n        'bar11'\n        >>> iso_name_increment('bar99', max_length=5)\n        'ba100'", "id": "f6339:m1"}
{"signature": "def iso_path_slugify(path, path_table, is_dir=False, strict=True):", "body": "<EOL>parent, base = split(path)<EOL>slug_parent = path_table[parent]<EOL>if is_dir:<EOL><INDENT>slug_base = iso_name_slugify(base)[:<NUM_LIT:8>]<EOL><DEDENT>else:<EOL><INDENT>name, ext = base.rsplit('<STR_LIT:.>', <NUM_LIT:1>) if '<STR_LIT:.>' in base else (base, '<STR_LIT>')<EOL>slug_base = '<STR_LIT:.>'.join([iso_name_slugify(name)[:<NUM_LIT:8>], ext])<EOL><DEDENT>if strict:<EOL><INDENT>slug_base = slug_base.upper()<EOL><DEDENT>slugs = set(path_table.values())<EOL>path_table[path] = slug = join(slug_parent, slug_base)<EOL>while slug in slugs:<EOL><INDENT>slug_base = iso_name_increment(slug_base, is_dir)<EOL>path_table[path] = slug = join(slug_parent, slug_base)<EOL><DEDENT>return slug<EOL>", "docstring": "Slugify a path, maintaining a map with the previously slugified paths.\n\n    The path table is used to prevent slugified names from collisioning,\n    using the `iso_name_increment` function to deduplicate slugs.\n\n    Example:\n        >>> path_table = {'/': '/'}\n        >>> iso_path_slugify('/\u00e9bc.txt', path_table)\n        '/_BC.TXT'\n        >>> iso_path_slugify('/\u00e0bc.txt', path_table)\n        '/_BC2.TXT'", "id": "f6339:m2"}
{"signature": "def unique(iterable, key=None):", "body": "seen = set()<EOL>seen_add = seen.add<EOL>if key is None:<EOL><INDENT>for element in filterfalse(seen.__contains__, iterable):<EOL><INDENT>seen_add(element)<EOL>yield element<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for element in iterable:<EOL><INDENT>k = key(element)<EOL>if k not in seen:<EOL><INDENT>seen_add(k)<EOL>yield element<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Yield unique elements, preserving order.", "id": "f6340:m0"}
{"signature": "async def skip(self):", "body": "await self.play()<EOL>", "docstring": "Plays the next track in the queue, if any.", "id": "f6342:c2:m17"}
{"signature": "async def disconnect(self):", "body": "if not self.is_connected:<EOL><INDENT>return<EOL><DEDENT>await self.stop()<EOL>ws = self._lavalink.bot._connection._get_websocket(int(self.guild_id))<EOL>await ws.voice_state(self.guild_id, None)<EOL>", "docstring": "Disconnects from the voice channel, if any.", "id": "f6342:c2:m5"}
{"signature": "async def set_pause(self, pause: bool):", "body": "await self._lavalink.ws.send(op='<STR_LIT>', guildId=self.guild_id, pause=pause)<EOL>self.paused = pause<EOL>", "docstring": "Sets the player's paused state.", "id": "f6342:c2:m18"}
{"signature": "def clear(self):", "body": "self._players.clear()<EOL>", "docstring": "Removes all of the players from the cache.", "id": "f6342:c3:m9"}
{"signature": "async def stop(self):", "body": "await self._lavalink.ws.send(op='<STR_LIT>', guildId=self.guild_id)<EOL>self.current = None<EOL>", "docstring": "Stops the player, if playing.", "id": "f6342:c2:m16"}
{"signature": "async def play(self, track_index: int = <NUM_LIT:0>, ignore_shuffle: bool = False):", "body": "if self.repeat and self.current:<EOL><INDENT>self.queue.append(self.current)<EOL><DEDENT>self.previous = self.current<EOL>self.current = None<EOL>self.position = <NUM_LIT:0><EOL>self.paused = False<EOL>if not self.queue:<EOL><INDENT>await self.stop()<EOL>await self._lavalink.dispatch_event(QueueEndEvent(self))<EOL><DEDENT>else:<EOL><INDENT>if self.shuffle and not ignore_shuffle:<EOL><INDENT>track = self.queue.pop(randrange(len(self.queue)))<EOL><DEDENT>else:<EOL><INDENT>track = self.queue.pop(min(track_index, len(self.queue) - <NUM_LIT:1>))<EOL><DEDENT>self.current = track<EOL>await self._lavalink.ws.send(op='<STR_LIT>', guildId=self.guild_id, track=track.track)<EOL>await self._lavalink.dispatch_event(TrackStartEvent(self, track))<EOL><DEDENT>", "docstring": "Plays the first track in the queue, if any or plays a track from the specified index in the queue.", "id": "f6342:c2:m12"}
{"signature": "def find(self, predicate):", "body": "found = self.find_all(predicate)<EOL>return found[<NUM_LIT:0>] if found else None<EOL>", "docstring": "Returns the first player in the list based on the given filter predicate. Could be None.", "id": "f6342:c3:m5"}
{"signature": "def __init__(self, lavalink, player):", "body": "if not issubclass(player, BasePlayer):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.lavalink = lavalink<EOL>self._player = player<EOL>self._players = {}<EOL>", "docstring": "Instantiates a Player Manager.\n\n:param lavalink:\n    Must be a lavalink.Client object.\n:param player:\n    Must implement lavalink.BasePlayer.", "id": "f6342:c3:m0"}
{"signature": "def add_at(self, index: int, requester: int, track: dict):", "body": "self.queue.insert(min(index, len(self.queue) - <NUM_LIT:1>), AudioTrack().build(track, requester))<EOL>", "docstring": "Adds a track at a specific index in the queue.", "id": "f6342:c2:m11"}
{"signature": "def remove(self, guild_id):", "body": "if guild_id in self._players:<EOL><INDENT>self._players[guild_id].cleanup()<EOL>del self._players[guild_id]<EOL><DEDENT>", "docstring": "Removes a player from the current players.", "id": "f6342:c3:m8"}
{"signature": "def __contains__(self, item):", "body": "return item in self._players<EOL>", "docstring": "Returns the presence of a player in the cache.", "id": "f6342:c3:m4"}
{"signature": "@property<EOL><INDENT>def connected_channel(self):<DEDENT>", "body": "if not self.channel_id:<EOL><INDENT>return None<EOL><DEDENT>return self._lavalink.bot.get_channel(int(self.channel_id))<EOL>", "docstring": "Returns the voice channel the player is connected to.", "id": "f6342:c2:m3"}
{"signature": "@property<EOL><INDENT>def is_playing(self):<DEDENT>", "body": "return self.connected_channel is not None and self.current is not None<EOL>", "docstring": "Returns the player's track state.", "id": "f6342:c2:m1"}
{"signature": "def add(self, requester: int, track: dict):", "body": "self.queue.append(AudioTrack().build(track, requester))<EOL>", "docstring": "Adds a track to the queue.", "id": "f6342:c2:m9"}
{"signature": "def delete(self, key: object):", "body": "try:<EOL><INDENT>del self._user_data[key]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Removes an item from the the stored user data.", "id": "f6342:c2:m8"}
{"signature": "async def set_volume(self, vol: int):", "body": "if self._lavalink._server_version <= <NUM_LIT:2>:<EOL><INDENT>self.volume = max(min(vol, <NUM_LIT>), <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>self.volume = max(min(vol, <NUM_LIT:1000>), <NUM_LIT:0>)<EOL><DEDENT>await self._lavalink.ws.send(op='<STR_LIT>', guildId=self.guild_id, volume=self.volume)<EOL>", "docstring": "Sets the player's volume (150% or 1000% limit imposed by lavalink depending on the version).", "id": "f6342:c2:m19"}
{"signature": "def get(self, guild_id):", "body": "if guild_id not in self._players:<EOL><INDENT>p = self._player(lavalink=self.lavalink, guild_id=guild_id)<EOL>self._players[guild_id] = p<EOL><DEDENT>return self._players[guild_id]<EOL>", "docstring": "Returns a player from the cache, or creates one if it does not exist.", "id": "f6342:c3:m7"}
{"signature": "async def send(self, **data):", "body": "if self._ws and self._ws.open:<EOL><INDENT>log.debug('<STR_LIT>'.format(str(data)))<EOL>await self._ws.send(json.dumps(data))<EOL><DEDENT>else:<EOL><INDENT>self._queue.append(data)<EOL>log.debug('<STR_LIT>'.format(str(data)))<EOL><DEDENT>", "docstring": "Sends data to the Lavalink server.", "id": "f6343:c0:m5"}
{"signature": "async def listen(self):", "body": "while not self._shutdown:<EOL><INDENT>try:<EOL><INDENT>data = json.loads(await self._ws.recv())<EOL><DEDENT>except websockets.ConnectionClosed as error:<EOL><INDENT>log.warning('<STR_LIT>'.format(str(error)))<EOL>for g in self._lavalink.players._players.copy().keys():<EOL><INDENT>ws = self._lavalink.bot._connection._get_websocket(int(g))<EOL>await ws.voice_state(int(g), None)<EOL><DEDENT>self._lavalink.players.clear()<EOL>if self._shutdown:<EOL><INDENT>break<EOL><DEDENT>if await self._attempt_reconnect():<EOL><INDENT>return<EOL><DEDENT>log.warning('<STR_LIT>')<EOL>break<EOL><DEDENT>op = data.get('<STR_LIT>', None)<EOL>log.debug('<STR_LIT>'.format(str(data)))<EOL>if not op:<EOL><INDENT>return log.debug('<STR_LIT>'.format(str(data)))<EOL><DEDENT>if op == '<STR_LIT>':<EOL><INDENT>log.debug('<STR_LIT>'.format(data['<STR_LIT:type>']))<EOL>player = self._lavalink.players[int(data['<STR_LIT>'])]<EOL>event = None<EOL>if data['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>event = TrackEndEvent(player, data['<STR_LIT>'], data['<STR_LIT>'])<EOL><DEDENT>elif data['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>event = TrackExceptionEvent(player, data['<STR_LIT>'], data['<STR_LIT:error>'])<EOL><DEDENT>elif data['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>event = TrackStuckEvent(player, data['<STR_LIT>'], data['<STR_LIT>'])<EOL><DEDENT>if event:<EOL><INDENT>await self._lavalink.dispatch_event(event)<EOL><DEDENT><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>await self._lavalink.update_state(data)<EOL><DEDENT>elif op == '<STR_LIT>':<EOL><INDENT>self._lavalink.stats._update(data)<EOL>await self._lavalink.dispatch_event(StatsUpdateEvent(self._lavalink.stats))<EOL><DEDENT><DEDENT>log.debug('<STR_LIT>')<EOL>await self._ws.close()<EOL>", "docstring": "Waits to receive a payload from the Lavalink server and processes it.", "id": "f6343:c0:m4"}
{"signature": "def format_time(time):", "body": "hours, remainder = divmod(time / <NUM_LIT:1000>, <NUM_LIT>)<EOL>minutes, seconds = divmod(remainder, <NUM_LIT>)<EOL>return '<STR_LIT>' % (hours, minutes, seconds)<EOL>", "docstring": "Formats the given time into HH:MM:SS.", "id": "f6344:m0"}
{"signature": "@property<EOL><INDENT>def thumbnail(self):<DEDENT>", "body": "if '<STR_LIT>' in self.uri:<EOL><INDENT>return \"<STR_LIT>\".format(self.identifier)<EOL><DEDENT>return \"<STR_LIT>\"<EOL>", "docstring": "Returns the video thumbnail. Could be an empty string.", "id": "f6345:c1:m1"}
{"signature": "def unregister_hook(self, func):", "body": "if func in self.hooks:<EOL><INDENT>self.hooks.remove(func)<EOL><DEDENT>", "docstring": "Unregisters a hook. For further explanation, please have a look at ``register_hook``.", "id": "f6348:c0:m2"}
{"signature": "async def get_tracks(self, query):", "body": "log.debug('<STR_LIT>'.format(query))<EOL>async with self.http.get(self.rest_uri + quote(query), headers={'<STR_LIT>': self.password}) as res:<EOL><INDENT>return await res.json(content_type=None)<EOL><DEDENT>", "docstring": "Returns a Dictionary containing search results for a given query.", "id": "f6348:c0:m5"}
{"signature": "async def update_state(self, data):", "body": "guild_id = int(data['<STR_LIT>'])<EOL>if guild_id in self.players:<EOL><INDENT>player = self.players.get(guild_id)<EOL>player.position = data['<STR_LIT:state>'].get('<STR_LIT>', <NUM_LIT:0>)<EOL>player.position_timestamp = data['<STR_LIT:state>']['<STR_LIT:time>']<EOL><DEDENT>", "docstring": "Updates a player's state when a payload with opcode ``playerUpdate`` is received.", "id": "f6348:c0:m4"}
{"signature": "async def dispatch_event(self, event):", "body": "log.debug('<STR_LIT>'.format(event.__class__.__name__, len(self.hooks)))<EOL>for hook in self.hooks:<EOL><INDENT>try:<EOL><INDENT>if asyncio.iscoroutinefunction(hook):<EOL><INDENT>await hook(event)<EOL><DEDENT>else:<EOL><INDENT>hook(event)<EOL><DEDENT><DEDENT>except Exception as e:  <EOL><INDENT>log.warning(<EOL>'<STR_LIT>'.format(hook.__name__, str(e)))<EOL><DEDENT><DEDENT>if isinstance(event, (TrackEndEvent, TrackExceptionEvent, TrackStuckEvent)) and event.player:<EOL><INDENT>await event.player.handle_event(event)<EOL><DEDENT>", "docstring": "Dispatches an event to all registered hooks.", "id": "f6348:c0:m3"}
{"signature": "@commands.command(name='<STR_LIT>')<EOL><INDENT>@commands.guild_only()<EOL>async def _find(self, ctx, *, query):<DEDENT>", "body": "if not query.startswith('<STR_LIT>') and not query.startswith('<STR_LIT>'):<EOL><INDENT>query = '<STR_LIT>' + query<EOL><DEDENT>tracks = await self.bot.lavalink.get_tracks(query)<EOL>if not tracks:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>tracks = tracks[:<NUM_LIT:10>]  <EOL>o = '<STR_LIT>'<EOL>for index, track in enumerate(tracks, start=<NUM_LIT:1>):<EOL><INDENT>track_title = track[\"<STR_LIT:info>\"][\"<STR_LIT:title>\"]<EOL>track_uri = track[\"<STR_LIT:info>\"][\"<STR_LIT>\"]<EOL>o += f'<STR_LIT>'<EOL><DEDENT>embed = discord.Embed(color=discord.Color.blurple(), description=o)<EOL>await ctx.send(embed=embed)<EOL>", "docstring": "Lists the first 10 search results from a given query.", "id": "f6349:c0:m14"}
{"signature": "@commands.command(name='<STR_LIT>')<EOL><INDENT>@commands.guild_only()<EOL>async def _stop(self, ctx):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>if not player.is_playing:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>player.queue.clear()<EOL>await player.stop()<EOL>await ctx.send('<STR_LIT>')<EOL>", "docstring": "Stops the player and clears its queue.", "id": "f6349:c0:m6"}
{"signature": "@commands.command(name='<STR_LIT>', aliases=['<STR_LIT>'])<EOL><INDENT>@commands.guild_only()<EOL>async def _pause(self, ctx):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>if not player.is_playing:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>if player.paused:<EOL><INDENT>await player.set_pause(False)<EOL>await ctx.send('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>await player.set_pause(True)<EOL>await ctx.send('<STR_LIT>')<EOL><DEDENT>", "docstring": "Pauses/Resumes the current track.", "id": "f6349:c0:m9"}
{"signature": "@commands.command(name='<STR_LIT>')<EOL><INDENT>@commands.guild_only()<EOL>async def _seek(self, ctx, *, time: str):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>if not player.is_playing:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>seconds = time_rx.search(time)<EOL>if not seconds:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>seconds = int(seconds.group()) * <NUM_LIT:1000><EOL>if time.startswith('<STR_LIT:->'):<EOL><INDENT>seconds *= -<NUM_LIT:1><EOL><DEDENT>track_time = player.position + seconds<EOL>await player.seek(track_time)<EOL>await ctx.send(f'<STR_LIT>')<EOL>", "docstring": "Seeks to a given position in a track.", "id": "f6349:c0:m4"}
{"signature": "@commands.command(name='<STR_LIT>', aliases=['<STR_LIT>', '<STR_LIT:n>', '<STR_LIT>'])<EOL><INDENT>@commands.guild_only()<EOL>async def _now(self, ctx):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>song = '<STR_LIT>'<EOL>if player.current:<EOL><INDENT>position = lavalink.Utils.format_time(player.position)<EOL>if player.current.stream:<EOL><INDENT>duration = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>duration = lavalink.Utils.format_time(player.current.duration)<EOL><DEDENT>song = f'<STR_LIT>'<EOL><DEDENT>embed = discord.Embed(color=discord.Color.blurple(), title='<STR_LIT>', description=song)<EOL>await ctx.send(embed=embed)<EOL>", "docstring": "Shows some stats about the currently playing song.", "id": "f6349:c0:m7"}
{"signature": "@commands.command(name='<STR_LIT>', aliases=['<STR_LIT:q>'])<EOL><INDENT>@commands.guild_only()<EOL>async def _queue(self, ctx, page: int = <NUM_LIT:1>):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>if not player.queue:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>items_per_page = <NUM_LIT:10><EOL>pages = math.ceil(len(player.queue) / items_per_page)<EOL>start = (page - <NUM_LIT:1>) * items_per_page<EOL>end = start + items_per_page<EOL>queue_list = '<STR_LIT>'<EOL>for index, track in enumerate(player.queue[start:end], start=start):<EOL><INDENT>queue_list += f'<STR_LIT>'<EOL><DEDENT>embed = discord.Embed(colour=discord.Color.blurple(),<EOL>description=f'<STR_LIT>')<EOL>embed.set_footer(text=f'<STR_LIT>')<EOL>await ctx.send(embed=embed)<EOL>", "docstring": "Shows the player's queue.", "id": "f6349:c0:m8"}
{"signature": "@commands.command(name='<STR_LIT>', aliases=['<STR_LIT>'])<EOL><INDENT>@commands.guild_only()<EOL>async def _previous(self, ctx):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>try:<EOL><INDENT>await player.play_previous()<EOL><DEDENT>except lavalink.NoPreviousTrack:<EOL><INDENT>await ctx.send('<STR_LIT>')<EOL><DEDENT>", "docstring": "Plays the previous song.", "id": "f6350:c0:m4"}
{"signature": "@commands.command(name='<STR_LIT>')<EOL><INDENT>@commands.guild_only()<EOL>async def _seek(self, ctx, *, time: str):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>if not player.is_playing:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>seconds = time_rx.search(time)<EOL>if not seconds:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>seconds = int(seconds.group()) * <NUM_LIT:1000><EOL>if time.startswith('<STR_LIT:->'):<EOL><INDENT>seconds *= -<NUM_LIT:1><EOL><DEDENT>track_time = player.position + seconds<EOL>await player.seek(track_time)<EOL>await ctx.send(f'<STR_LIT>')<EOL>", "docstring": "Seeks to a given position in a track.", "id": "f6350:c0:m7"}
{"signature": "@commands.command(name='<STR_LIT>', aliases=['<STR_LIT>'])<EOL><INDENT>@commands.guild_only()<EOL>async def _disconnect(self, ctx):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>if not player.is_connected:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>player.queue.clear()<EOL>await player.disconnect()<EOL>await ctx.send('<STR_LIT>')<EOL>", "docstring": "Disconnects the player from the voice channel and clears its queue.", "id": "f6350:c0:m18"}
{"signature": "@commands.command(name='<STR_LIT>', aliases=['<STR_LIT:q>'])<EOL><INDENT>@commands.guild_only()<EOL>async def _queue(self, ctx, page: int = <NUM_LIT:1>):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>if not player.queue:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>items_per_page = <NUM_LIT:10><EOL>pages = math.ceil(len(player.queue) / items_per_page)<EOL>start = (page - <NUM_LIT:1>) * items_per_page<EOL>end = start + items_per_page<EOL>queue_list = '<STR_LIT>'<EOL>for index, track in enumerate(player.queue[start:end], start=start):<EOL><INDENT>queue_list += f'<STR_LIT>'<EOL><DEDENT>embed = discord.Embed(colour=discord.Color.blurple(),<EOL>description=f'<STR_LIT>')<EOL>embed.set_footer(text=f'<STR_LIT>')<EOL>await ctx.send(embed=embed)<EOL>", "docstring": "Shows the player's queue.", "id": "f6350:c0:m11"}
{"signature": "@commands.command(name='<STR_LIT>', aliases=['<STR_LIT>', '<STR_LIT:n>', '<STR_LIT>'])<EOL><INDENT>@commands.guild_only()<EOL>async def _now(self, ctx):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>song = '<STR_LIT>'<EOL>if player.current:<EOL><INDENT>position = lavalink.Utils.format_time(player.position)<EOL>if player.current.stream:<EOL><INDENT>duration = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>duration = lavalink.Utils.format_time(player.current.duration)<EOL><DEDENT>song = f'<STR_LIT>'<EOL><DEDENT>embed = discord.Embed(color=discord.Color.blurple(),<EOL>title='<STR_LIT>', description=song)<EOL>await ctx.send(embed=embed)<EOL>", "docstring": "Shows some stats about the currently playing song.", "id": "f6350:c0:m10"}
{"signature": "@_playnow.before_invoke<EOL><INDENT>@_previous.before_invoke<EOL>@_play.before_invoke<EOL>async def ensure_voice(self, ctx):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>if not player.is_connected:<EOL><INDENT>if not ctx.author.voice or not ctx.author.voice.channel:<EOL><INDENT>await ctx.send('<STR_LIT>')<EOL>raise commands.CommandInvokeError(<EOL>'<STR_LIT>')<EOL><DEDENT>permissions = ctx.author.voice.channel.permissions_for(ctx.me)<EOL>if not permissions.connect or not permissions.speak:<EOL><INDENT>await ctx.send('<STR_LIT>')<EOL>raise commands.CommandInvokeError(<EOL>'<STR_LIT>')<EOL><DEDENT>player.store('<STR_LIT>', ctx.channel.id)<EOL>await player.connect(ctx.author.voice.channel.id)<EOL><DEDENT>else:<EOL><INDENT>if player.connected_channel.id != ctx.author.voice.channel.id:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "A few checks to make sure the bot can join a voice channel.", "id": "f6350:c0:m19"}
{"signature": "@commands.command(name='<STR_LIT>', aliases=['<STR_LIT>'])<EOL><INDENT>@commands.guild_only()<EOL>async def _pause(self, ctx):<DEDENT>", "body": "player = self.bot.lavalink.players.get(ctx.guild.id)<EOL>if not player.is_playing:<EOL><INDENT>return await ctx.send('<STR_LIT>')<EOL><DEDENT>if player.paused:<EOL><INDENT>await player.set_pause(False)<EOL>await ctx.send('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>await player.set_pause(True)<EOL>await ctx.send('<STR_LIT>')<EOL><DEDENT>", "docstring": "Pauses/Resumes the current track.", "id": "f6350:c0:m12"}
{"signature": "def is_main_alive():", "body": "for t in threading.enumerate():<EOL><INDENT>if t.name == '<STR_LIT>':<EOL><INDENT>return t.is_alive()<EOL><DEDENT><DEDENT>print('<STR_LIT>')<EOL>return False<EOL>", "docstring": "is \uba54\uc778 \uc4f0\ub808\ub4dc alive?\n:rtype: bool", "id": "f6354:m0"}
{"signature": "def batchstack(size, iterable=None, rest=False):", "body": "def stack(data):<EOL><INDENT>import numpy as np<EOL>return map(np.vstack, data)<EOL><DEDENT>fn = batchzip(size, rest=rest) >> flow(stack)<EOL>return fn if iterable is None else fn(iterable)<EOL>", "docstring": "todo : add example\n:param size:\n:param iterable:\n:param rest:\n:return:", "id": "f6355:m15"}
{"signature": "def batchzip(size, iterable=None, rest=False):", "body": "fn = ibatch(size, rest=rest) >> zipflow<EOL>return fn if iterable is None else fn(iterable)<EOL>", "docstring": "todo : add example\n:param size:\n:param iterable:\n:param rest:\n:return:", "id": "f6355:m14"}
{"signature": "def ibatch(size, iterable=None, rest=False):", "body": "@iterflow<EOL>def exact_size(it):<EOL><INDENT>it = iter(it)<EOL>while True:<EOL><INDENT>yield [it.next() for _ in xrange(size)]<EOL><DEDENT><DEDENT>@iterflow<EOL>def at_most(it):<EOL><INDENT>it = iter(it)<EOL>while True:<EOL><INDENT>data = []<EOL>for _ in xrange(size):<EOL><INDENT>try:<EOL><INDENT>data.append(it.next())<EOL><DEDENT>except StopIteration:<EOL><INDENT>if data:<EOL><INDENT>yield data<EOL><DEDENT>raise StopIteration<EOL><DEDENT><DEDENT>yield data<EOL><DEDENT><DEDENT>ibatchit = at_most if rest else exact_size<EOL>return ibatchit if iterable is None else ibatchit(iterable)<EOL>", "docstring": "add example\n:param size:\n:param iterable:\n:param rest:\n:return:", "id": "f6355:m13"}
{"signature": "@iterflow<EOL>def forever(it):", "body": "while True:<EOL><INDENT>i = iter(it)<EOL>try:<EOL><INDENT>yield i.next()<EOL><DEDENT>except StopIteration:<EOL><INDENT>raise StopIteration<EOL><DEDENT>while True:<EOL><INDENT>try:<EOL><INDENT>yield i.next()<EOL><DEDENT>except StopIteration:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "forever\n    todo : add example", "id": "f6355:m7"}
{"signature": "def push_job(self, fun, *args, **kwargs):", "body": "assert callable(fun)<EOL>return self.put((fun, args, kwargs), block=True)<EOL>", "docstring": "put job if possible, non-blocking\n:param fun:\n:param args:\n:param kwargs:\n:return:", "id": "f6356:c0:m4"}
{"signature": "def put_job(self, fun, *args, **kwargs):", "body": "if not args and not kwargs and isinstance(fun, (tuple, list)):<EOL><INDENT>fun, args, kwargs = fun<EOL><DEDENT>assert callable(fun)<EOL>return self.put((fun, args, kwargs), block=False)<EOL>", "docstring": "put job if possible, non-blocking\n:param fun:\n:param args:\n:param kwargs:\n:return:", "id": "f6356:c0:m5"}
{"signature": "def optional_str(deco):", "body": "@functools.wraps(deco)<EOL>def dispatcher(*args, **kwargs):<EOL><INDENT>if not kwargs and len(args) == <NUM_LIT:1>and not isinstance(args[<NUM_LIT:0>], str)and args[<NUM_LIT:0>] is not None:<EOL><INDENT>decorator = deco()<EOL>return decorator(args[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>decorator = deco(*args, **kwargs)<EOL>return decorator<EOL><DEDENT><DEDENT>return dispatcher<EOL>", "docstring": "string 1\uac1c\ub9cc deco \uc778\uc790\ub85c \uc62c \uc218 \uc788\ub2e4.\n:param deco:\n:return:", "id": "f6357:m2"}
{"signature": "def optional(deco):", "body": "@functools.wraps(deco)<EOL>def dispatcher(*args, **kwargs):<EOL><INDENT>decorator = deco(**kwargs)<EOL>if args:<EOL><INDENT>assert len(args) == <NUM_LIT:1><EOL>return decorator(args[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>return decorator<EOL><DEDENT><DEDENT>return dispatcher<EOL>", "docstring": "decorator option\uc740 kwargs\ub9cc \ud5c8\uc6a9\n:param deco:\n:return:", "id": "f6357:m1"}
{"signature": "def default(fun, **kwdefault):", "body": "@functools.wraps(fun)<EOL>def wrapped(*args, **kwargs):<EOL><INDENT>merge = wrapped.default.copy()<EOL>merge.update(kwargs)<EOL>return fun(*args, **merge)<EOL><DEDENT>wrapped.default = kwdefault<EOL>return wrapped<EOL>", "docstring": "change default value for function\nex)\ndef sample(a, b=1, c=1):\n    print 'from sample:', a, b, c\n    return a, b, c\nfun = default(sample, b=4,c=5)\nprint fun.default  # get default value dictionary\nfun(1)  # print 1, 5, 5 and return\n\n:param fun:\n:param kwdefault:\n:return:", "id": "f6357:m5"}
{"signature": "def setup_once(initfn):", "body": "def wrap(method):<EOL><INDENT>finit = initfn.__name__<EOL>fnname = method.__name__<EOL>@functools.wraps(method)<EOL>def wrapped(self, *args, **kwargs):<EOL><INDENT>@functools.wraps(method)<EOL>def aftersetup(*a, **kw):<EOL><INDENT>return method(self, *a, **kw)<EOL><DEDENT>setupfn = getattr(self, finit)<EOL>setupfn(*args, **kwargs)<EOL>res = method(self, *args, **kwargs)<EOL>setattr(self, fnname, aftersetup)<EOL>return res<EOL><DEDENT>return wrapped<EOL><DEDENT>return wrap<EOL>", "docstring": "call class instance method for initial setup ::\n\n    class B(object):\n\n        def init(self, a):\n            print 'init call:', a\n\n        @setup_once(init)\n        def mycall(self, a):\n            print 'real call:', a\n\n    b = B()\n    b.mycall(222)\n    b.mycall(333)\n\n:param function initfn:\n:return: decorated method", "id": "f6357:m6"}
{"signature": "def spawn(f, *args, **kwargs):", "body": "if args or kwargs:<EOL><INDENT>return Spawn(f, *args, **kwargs)<EOL><DEDENT>@wraps(f)<EOL>def wrapped(*args, **kwargs):<EOL><INDENT>return Spawn(f, *args, **kwargs)<EOL><DEDENT>return wrapped<EOL>", "docstring": "decorator", "id": "f6358:m1"}
{"signature": "@staticmethod<EOL><INDENT>def from_dict(dic):<DEDENT>", "body": "return DictObj({k: DictObj.convert_ifdic(v) for k, v in dic.items()})<EOL>", "docstring": "recursive dict to dictobj \ucee8\ubc84\ud2b8\n:param dic:\n:return:", "id": "f6359:c0:m10"}
{"signature": "def __getnewargs__(self):", "body": "return tuple()<EOL>", "docstring": "pickling related", "id": "f6359:c0:m4"}
{"signature": "def __sub__(self, other):", "body": "return DictObj({k: v for k, v in self.items() if k not in other})<EOL>", "docstring": ":type other: dict\n:rtype: dictobj: other dict\uc5d0 \uc5c6\ub294 items\ub9cc \ub9ac\ud134", "id": "f6359:c0:m2"}
{"signature": "def intersect(self, other):", "body": "return DictObj({k: self[k] for k in self if k in other})<EOL>", "docstring": "self\uc640 other \ud0a4\uac00 \ub3d9\uc77c\ud55c \uc544\uc774\ud15c\uc758 dictobj\n:type other: dict\n:rtype: dictobj:", "id": "f6359:c0:m8"}
{"signature": "def append_this_package_path(depth=<NUM_LIT:1>):", "body": "from .caller import caller<EOL>logg.debug('<STR_LIT>', caller.modulename(depth + <NUM_LIT:1>))<EOL>c = caller.abspath(depth + <NUM_LIT:1>)<EOL>logg.debug('<STR_LIT>', c)<EOL>p = guess_package_path(dirname(c))<EOL>if p:<EOL><INDENT>logg.debug('<STR_LIT>', p)<EOL>append_sys_path(p)<EOL><DEDENT>else:<EOL><INDENT>logg.debug('<STR_LIT>', c)<EOL><DEDENT>", "docstring": "this_package.py \uc5d0\uc11c \uc0ac\uc6a9\nimport snipy.this_package", "id": "f6360:m3"}
{"signature": "def append_sys_path(p):", "body": "if p not in sys.path:<EOL><INDENT>sys.path.insert(<NUM_LIT:0>, p)<EOL><DEDENT>", "docstring": "append system path", "id": "f6360:m2"}
{"signature": "def guess_package_path(searchfrom):", "body": "from snipy.io import fileutil<EOL>current = searchfrom + '<STR_LIT:/>'<EOL>init_found = False<EOL>pack_found = False<EOL>while not init_found and current != '<STR_LIT:/>':<EOL><INDENT>current = os.path.dirname(current)<EOL>initfile = os.path.join(current, '<STR_LIT>')<EOL>init_found = os.path.exists(initfile)<EOL><DEDENT>if not init_found:<EOL><INDENT>searchfrom = dirname(searchfrom)<EOL>for folder in fileutil.listfolder(searchfrom):<EOL><INDENT>current = os.path.join(searchfrom, folder)<EOL>initfile = os.path.join(current, '<STR_LIT>')<EOL>init_found = os.path.exists(initfile)<EOL>if init_found:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>while init_found:<EOL><INDENT>current = os.path.dirname(current)<EOL>initfile = os.path.join(current, '<STR_LIT>')<EOL>init_found = os.path.exists(initfile)<EOL>pack_found = not init_found<EOL><DEDENT>return current if pack_found else None<EOL>", "docstring": "package path. return None if failed to guess", "id": "f6360:m0"}
{"signature": "def find_package_path(searchfrom):", "body": "current = searchfrom + '<STR_LIT:/>'<EOL>init_found = False<EOL>pack_found = False<EOL>while not init_found and current != '<STR_LIT:/>':<EOL><INDENT>current = os.path.dirname(current)<EOL>initfile = os.path.join(current, '<STR_LIT>')<EOL>init_found = os.path.exists(initfile)<EOL><DEDENT>while init_found:<EOL><INDENT>current = os.path.dirname(current)<EOL>initfile = os.path.join(current, '<STR_LIT>')<EOL>init_found = os.path.exists(initfile)<EOL>pack_found = not init_found<EOL><DEDENT>return current if pack_found else None<EOL>", "docstring": "package path. return None if failed to guess", "id": "f6360:m1"}
{"signature": "@classmethod<EOL><INDENT>def path(cls, depth=<NUM_LIT:1>):<DEDENT>", "body": "depth += cls.extra_depth<EOL>return inspect.stack()[depth][<NUM_LIT:1>]<EOL>", "docstring": "caller path (*.py)", "id": "f6362:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def abspath(cls, depth=<NUM_LIT:1>):<DEDENT>", "body": "return os.path.abspath(cls.path(depth + <NUM_LIT:1>))<EOL>", "docstring": "caller path (*.py)", "id": "f6362:c0:m1"}
{"signature": "def imbox(xy, w, h, angle=<NUM_LIT:0.0>, **kwargs):", "body": "from matplotlib.patches import Rectangle<EOL>return imbound(Rectangle, xy, w, h, angle, **kwargs)<EOL>", "docstring": "draw boundary box\n:param xy: start index xy (ji)\n:param w: width\n:param h: height\n:param angle:\n:param kwargs:\n:return:", "id": "f6367:m15"}
{"signature": "def imshow_grid(images, grid=None, showfun=None, **opt):", "body": "<EOL>showfun = showfun or plt.imshow<EOL>count = len(images)<EOL>grid = grid or grid_recommend(count, sorted(images[<NUM_LIT:0>].shape[:<NUM_LIT:2>]))<EOL>res = []<EOL>for i, img in enumerate(images):<EOL><INDENT>plt.subplot2grid(grid, (i % grid[<NUM_LIT:0>], i // grid[<NUM_LIT:0>]))<EOL>res.append(showfun(img.squeeze(), **opt))<EOL><DEDENT>return res<EOL>", "docstring": ":param images: nhwc\n:return:", "id": "f6367:m2"}
{"signature": "def plt_range(*args, **kwargs):", "body": "wait = kwargs.pop('<STR_LIT>', True)<EOL>if not wait:<EOL><INDENT>for i in progress(range(*args)):<EOL><INDENT>yield i<EOL><DEDENT>return<EOL><DEDENT>class _holder(object):<EOL><INDENT>pass<EOL><DEDENT>hold = _holder()<EOL>hold.i = <NUM_LIT:0><EOL>hold.done = False<EOL>def press(event):<EOL><INDENT>hold.i += -<NUM_LIT:1> if event.key == '<STR_LIT:left>' else <NUM_LIT:1><EOL>hold.i = <NUM_LIT:0> if hold.i < <NUM_LIT:0> else hold.i<EOL><DEDENT>def onclose(event):<EOL><INDENT>hold.done = True<EOL><DEDENT>fig = kwargs.pop('<STR_LIT>', None)<EOL>figsize = kwargs.pop('<STR_LIT>', None)<EOL>if fig is None:<EOL><INDENT>fig = plt.gcf()<EOL>if figsize:<EOL><INDENT>fig.set_size_inches(figsize)<EOL><DEDENT><DEDENT>elif isinstance(fig, (int, str)):<EOL><INDENT>if figsize:<EOL><INDENT>fig = plt.figure(fig, figsize=figsize)<EOL><DEDENT>else:<EOL><INDENT>fig = plt.figure(fig)<EOL><DEDENT><DEDENT>elif isinstance(fig, plt.Figure):<EOL><INDENT>if figsize:<EOL><INDENT>fig.set_size_inches(figsize)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError<EOL><DEDENT>onkey_fig(press, fig)<EOL>onclose_fig(onclose, fig)<EOL>ranges = range(*args)<EOL>l = len(ranges)<EOL>while hold.i < l:<EOL><INDENT>print('<STR_LIT>', ranges[hold.i])<EOL>yield ranges[hold.i]  <EOL>before = hold.i<EOL>while before == hold.i:<EOL><INDENT>while not fig.waitforbuttonpress(<NUM_LIT>):<EOL><INDENT>if hold.done:<EOL><INDENT>return<EOL><DEDENT><DEDENT>while fig.waitforbuttonpress(<NUM_LIT:0.1>):<EOL><INDENT>if hold.done:<EOL><INDENT>return<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "for i in plot_range(n):\n    plt.imshow(imgs[i])\n\nleft arrow yield prev value\nother key yield next value\n:param args:\n:return:", "id": "f6367:m3"}
{"signature": "def imslic(img, n_segments=<NUM_LIT:100>, aspect=None):", "body": "from skimage.segmentation import (slic, mark_boundaries)<EOL>from skimage.morphology import (dilation)<EOL>if img.ndim == <NUM_LIT:2> or img.ndim == <NUM_LIT:3> and img.shape[-<NUM_LIT:1>] == <NUM_LIT:1>:<EOL><INDENT>imz = np.stack([img, img, img], <NUM_LIT:2>)<EOL><DEDENT>else:<EOL><INDENT>imz = img<EOL><DEDENT>slics = slic(imz, n_segments=n_segments)<EOL>boundaries = mark_boundaries(imz, slics)<EOL>return plt.imshow(boundaries, aspect=aspect)<EOL>", "docstring": "slic args :\nn_segments=100, compactness=10., max_iter=10,\nsigma=0, spacing=None,\nmultichannel=True, convert2lab=None, enforce_connectivity=True,\nmin_size_factor=0.5, max_size_factor=3, slic_zero=False\n\nmark_boundaries args:\nlabel_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0\n\nimshow args:\ncmap=None, norm=None, aspect=None, interpolation=None,\nalpha=None, vmin=None, vmax=None, origin=None,\nextent=None, shape=None, filternorm=1, filterrad=4.0,\nimlim=None, resample=None, url=None, hold=None, data=None,\n\n:param img:\n:param slicarg:\n:param slickw:\n:return:", "id": "f6367:m20"}
{"signature": "def plot_pause(timeout=None, msg='<STR_LIT>'):", "body": "if timeout is not None:<EOL><INDENT>print(msg or '<STR_LIT>'.format(timeout))<EOL>plt.waitforbuttonpress(timeout=timeout)<EOL>return True<EOL><DEDENT>print(msg or '<STR_LIT>')<EOL>while not plt.waitforbuttonpress(timeout=<NUM_LIT>):<EOL><INDENT>if not plt.get_fignums():<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return len(plt.get_fignums()) != <NUM_LIT:0><EOL>", "docstring": "todo : add some example\n:param timeout: wait time. if None, blocking\n:param msg:\n:return:", "id": "f6367:m5"}
{"signature": "def imshow_flat(images, grid=None, showfun=None, bfill=<NUM_LIT:1.0>, bsz=(<NUM_LIT:1>,<NUM_LIT:1>), **opt):", "body": "showfun = showfun or plt.imshow<EOL>count = len(images)<EOL>grid = grid or grid_recommend(count, ratio=sorted(images[<NUM_LIT:0>].shape[:<NUM_LIT:2>]))<EOL>flatted = flat_images(images, grid, bfill=bfill, bsz=bsz)<EOL>res = showfun(flatted, **opt)<EOL>plt.draw()<EOL>", "docstring": "imshow after applying flat_images\n:param images: [bhwc]\n:param grid: None for auto grid\n:param showfun: plt.imshow\n:param bfill: color for board fill\n:param bsz: size of board\n:param opt: option for showfun\n:return:", "id": "f6367:m8"}
{"signature": "def matshow(*args, **kwargs):", "body": "kwargs['<STR_LIT>'] = kwargs.pop('<STR_LIT>', '<STR_LIT:none>')<EOL>return plt.imshow(*args, **kwargs)<EOL>", "docstring": "imshow without interpolation like as matshow\n:param args:\n:param kwargs:\n:return:", "id": "f6367:m13"}
{"signature": "def imbound(clspatch, *args, **kwargs):", "body": "<EOL>c = kwargs.pop('<STR_LIT>', kwargs.get('<STR_LIT>', None))<EOL>kwargs.update(facecolor='<STR_LIT:none>', edgecolor=c)<EOL>return impatch(clspatch, *args, **kwargs)<EOL>", "docstring": ":param clspatch:\n:param args:\n:param kwargs:\n:return:", "id": "f6367:m17"}
{"signature": "def enum(name, *members, **withvalue):", "body": "if len(members) == <NUM_LIT:1>:<EOL><INDENT>if isinstance(members[<NUM_LIT:0>], str):<EOL><INDENT>members = members[<NUM_LIT:0>].split()<EOL><DEDENT>elif isinstance(members[<NUM_LIT:0>], (list, tuple)):<EOL><INDENT>members = members[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>dic = {v: v for v in members}<EOL>dic.update(withvalue)<EOL>return type(name, (Enum,), dic)<EOL>", "docstring": "class buider", "id": "f6368:m0"}
{"signature": "def imsize(fname):", "body": "from PIL import Image<EOL>im = Image.open(fname)<EOL>return im.size[<NUM_LIT:1>], im.size[<NUM_LIT:0>]<EOL>", "docstring": "return image size (height, width)\n:param fname:\n:return:", "id": "f6369:m30"}
{"signature": "def listfile(p):", "body": "try:<EOL><INDENT>for entry in scandir.scandir(p):<EOL><INDENT>if entry.is_file():<EOL><INDENT>yield entry.name<EOL><DEDENT><DEDENT><DEDENT>except OSError:<EOL><INDENT>return<EOL><DEDENT>", "docstring": "generator of list files in the path.\nfilenames only", "id": "f6369:m16"}
{"signature": "def listfolder(p):", "body": "for entry in scandir.scandir(p):<EOL><INDENT>if entry.is_dir():<EOL><INDENT>yield entry.name<EOL><DEDENT><DEDENT>", "docstring": "generator of list folder in the path.\nfolders only", "id": "f6369:m18"}
{"signature": "def tempfolder(prefix='<STR_LIT>'):", "body": "import uuid<EOL>p = prefix + str(uuid.uuid4())<EOL>d = tempdir()<EOL>tmpd = os.path.join(d, p)<EOL>return mkdir_if_not(tmpd, ispath=True)<EOL>", "docstring": "\uc784\uc2dc \ud3f4\ub354\ub97c \ub9cc\ub4e4\uc5b4\uc11c \ub9ac\ud134", "id": "f6369:m26"}
{"signature": "def any_match(fname, patterns, matchfun=None):", "body": "return any(fnmatches(fname, patterns, matchfun))<EOL>", "docstring": "ANY matches?\n:param str fname: file name\n:param list[str] patterns: list of filename pattern. see fnmatch.fnamtch\n:rtype: bool", "id": "f6369:m12"}
{"signature": "def findfolder(toppath, match='<STR_LIT:*>', exclude='<STR_LIT>'):", "body": "pred = _pred_pattern(match, exclude)<EOL>return (p for p in walkfolder(toppath, pred))<EOL>", "docstring": "recursively find folder path from toppath.\npatterns to decide to walk folder path or not\n:type toppath: str\n:type match: str or list(str)\n:type exclude: str or list(str)\n:rtype: generator for path str", "id": "f6369:m23"}
{"signature": "def _pred_pattern(match='<STR_LIT:*>', exclude='<STR_LIT>', patterntype='<STR_LIT>'):", "body": "m, x = match, exclude<EOL>if m == '<STR_LIT:*>':<EOL><INDENT>if not x:<EOL><INDENT>pred = lambda n: True<EOL><DEDENT>else:<EOL><INDENT>x = [x] if _is_str(x) else x<EOL>matcher = get_match_fun(x, patterntype)<EOL>pred = lambda n: not matcher(n)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>m = [m] if _is_str(m) else m<EOL>if not x:<EOL><INDENT>matcher = get_match_fun(m, patterntype)<EOL>pred = lambda n: matcher(n)<EOL><DEDENT>else:<EOL><INDENT>x = [x] if _is_str(x) else x<EOL>matcher_m = get_match_fun(m, patterntype)<EOL>matcher_x = get_match_fun(x, patterntype)<EOL>pred = lambda n: matcher_m(n) and not matcher_x(n)<EOL><DEDENT><DEDENT>return pred<EOL>", "docstring": "internal use", "id": "f6369:m22"}
{"signature": "def savefile(obj, filepath, compress=True):", "body": "try:<EOL><INDENT>import cPickle as pickle<EOL><DEDENT>except Exception:<EOL><INDENT>import pickle<EOL><DEDENT>import joblib<EOL>tmpfile = filepath + '<STR_LIT>'<EOL>mkdir_if_not(tmpfile)<EOL>if compress:<EOL><INDENT>joblib.dump(obj, tmpfile, compress=<NUM_LIT:3>, cache_size=<NUM_LIT:100>, protocol=pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>else:<EOL><INDENT>joblib.dump(obj, tmpfile, compress=<NUM_LIT:0>)<EOL><DEDENT>os.rename(tmpfile, filepath)<EOL>return obj<EOL>", "docstring": "\ud30c\uc77c \uc788\uc73c\uba74 \ub36e\uc5b4\uc500\n:param obj:\n:param str filepath:\n:param compress:\n:return:", "id": "f6369:m5"}
{"signature": "def walkfolder(toppath, pred):", "body": "for entry in scandir.scandir(toppath):<EOL><INDENT>if not entry.is_dir() or not pred(entry.name):<EOL><INDENT>continue<EOL><DEDENT>yield entry.path<EOL>for p in walkfolder(entry.path, pred):<EOL><INDENT>yield p<EOL><DEDENT><DEDENT>", "docstring": "walk folder if pred(foldername) is True\n:type toppath: str\n:type pred: function(str) => bool", "id": "f6369:m24"}
{"signature": "def listfolderpath(p):", "body": "for entry in scandir.scandir(p):<EOL><INDENT>if entry.is_dir():<EOL><INDENT>yield entry.path<EOL><DEDENT><DEDENT>", "docstring": "generator of list folder in the path.\nfolders only", "id": "f6369:m19"}
{"signature": "def getch():", "body": "try:<EOL><INDENT>termios.tcsetattr(_fd, termios.TCSANOW, _new_settings)<EOL>ch = sys.stdin.read(<NUM_LIT:1>)<EOL><DEDENT>finally:<EOL><INDENT>termios.tcsetattr(_fd, termios.TCSADRAIN, _old_settings)<EOL><DEDENT>return ch<EOL>", "docstring": "get character. waiting for key", "id": "f6371:m9"}
{"signature": "def put(xy, *args):", "body": "cmd = [TermCursor.save, TermCursor.move(*xy), '<STR_LIT>'.join(args), TermCursor.restore]<EOL>write('<STR_LIT>'.join(cmd))<EOL>", "docstring": "put text on on screen\na tuple as first argument tells absolute position for the text\ndoes not change TermCursor position\nargs = list of optional position, formatting tokens and strings", "id": "f6371:m7"}
{"signature": "def writexy(xy, *args):", "body": "write(TermCursor.moveby(*xy) + '<STR_LIT>'.join(args))<EOL>", "docstring": "writes text on on screen\na tuple as first argument gives the relative position to current TermCursor position\ndoes change TermCursor position\nargs = list of optional position, formatting tokens and strings", "id": "f6371:m6"}
{"signature": "def getpassword(prompt=\"<STR_LIT>\"):", "body": "fd = sys.stdin.fileno()<EOL>old = termios.tcgetattr(fd)<EOL>new = termios.tcgetattr(fd)<EOL>new[<NUM_LIT:3>] &= ~termios.ECHO          <EOL>try:<EOL><INDENT>termios.tcsetattr(fd, termios.TCSADRAIN, new)<EOL>passwd = raw_input(prompt)<EOL><DEDENT>finally:<EOL><INDENT>termios.tcsetattr(fd, termios.TCSADRAIN, old)<EOL><DEDENT>return passwd<EOL>", "docstring": "get user input without echo", "id": "f6371:m8"}
{"signature": "@contextmanager<EOL>def tictoc(name='<STR_LIT>'):", "body": "t = time.time()<EOL>yield<EOL>logg.info('<STR_LIT>' % (name, time.time() - t))<EOL>", "docstring": "with tictoc('any string or not'):\n    print 'cool~~~'\ncool~~~\n2015-12-30 14:39:28,458 [INFO] tictoc Elapsed: 7.10487365723e-05 secs\n:param name: str", "id": "f6374:m2"}
{"signature": "def toc(t=None, name='<STR_LIT>'):", "body": "try:<EOL><INDENT>t = t or tic.last_tic_time<EOL><DEDENT>except AttributeError:<EOL><INDENT>logg.warn('<STR_LIT>')<EOL>return<EOL><DEDENT>elapsed = time.time() - t<EOL>logg.info('<STR_LIT>' % (name, elapsed))<EOL>return elapsed<EOL>", "docstring": "ex1)\ntic()  # save start time - time1\ntoc()  # print elapsed time from last calling tic()\ntoc()  # print elapsed time from last calling tic()\n\nex2)\nt0 = tic()  # simple\nt1 = tic()\ntoc(t1)  # print time from t1\ntoc(t0)  # print time from t0\n\n:param t: time: \uc2dc\uc791 \uc2dc\uac04 (tic()\uc758 \ub9ac\ud134 \uac12)\n:param name: str: \ucd9c\ub825\uc2dc \ud3ec\ud568\ud560 \ubb38\uc790 ['tictoc']", "id": "f6374:m1"}
{"signature": "def __call__(self, s):", "body": "return ansi_str_restore(s, self.fmt)<EOL>", "docstring": "make ansi string", "id": "f6377:c0:m1"}
{"signature": "def __add__(self, other):", "body": "return ansi_str(other, self.fmt)<EOL>", "docstring": "make ansi string (simple ver.)", "id": "f6377:c0:m2"}
{"signature": "def tuple_arg(fn):", "body": "@wraps(fn)<EOL>def wrapped(*args, **kwargs):<EOL><INDENT>args = map(tuplefy, args)<EOL>return fn(*args, **kwargs)<EOL><DEDENT>return wrapped<EOL>", "docstring": "fun(1,2) -> fun((1,), (2,))\ub85c\nf(1,2,3) =>  f((1,), (2,), (3,))\n:param fn:\n:return:", "id": "f6378:m4"}
{"signature": "def tuple_args(fn):", "body": "@wraps(fn)<EOL>def wrapped(*args, **kwargs):<EOL><INDENT>if len(args) == <NUM_LIT:1>:<EOL><INDENT>if isinstance(args[<NUM_LIT:0>], tuple):<EOL><INDENT>return fn(args[<NUM_LIT:0>], **kwargs)<EOL><DEDENT>elif isinstance(args[<NUM_LIT:0>], list):<EOL><INDENT>return fn(tuple(args[<NUM_LIT:0>]), **kwargs)<EOL><DEDENT><DEDENT>return fn(args, **kwargs)<EOL><DEDENT>return wrapped<EOL>", "docstring": "args \ud30c\uc2f1 \uc720\ud2f8 function\nfun(p1, p2, ...pn, **kwargs) or fun([p1, p2, ..], **kwargs)\nex) \uc0d8\ud50c::\n\n    @tuple_arg\n    def f(args, **kwargs):\n        for d in args:\n            print d\n    f(1,2,3) =>  f([1,2,3])\n:param function fn:\n:return:", "id": "f6378:m5"}
{"signature": "def functional(ifunctional):", "body": "@wraps(ifunctional)<EOL>def wrapper(fn, *args, **kw):<EOL><INDENT>fn = ifunctional(fn)<EOL>if args or kw:<EOL><INDENT>return fn(*args, **kw)<EOL><DEDENT>else:<EOL><INDENT>return fn<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "fun(fn) -> function or\nfun(fn, args...) -> call of fn(args...)\n:param ifunctional: f\n:return: decorated function", "id": "f6378:m0"}
{"signature": "def interrupt_guard(msg='<STR_LIT>', reraise=True):", "body": "def echo():<EOL><INDENT>print(msg)<EOL><DEDENT>return on_interrupt(echo, reraise=reraise)<EOL>", "docstring": "context for guard keyboardinterrupt\nex)\nwith interrupt_guard('need long time'):\n    critical_work_to_prevent()\n\n:param str msg: message to print when interrupted\n:param reraise: re-raise or not when exit\n:return: context", "id": "f6378:m11"}
{"signature": "def optional_str(deco):", "body": "@wraps(deco)<EOL>def dispatcher(*args, **kwargs):<EOL><INDENT>if not kwargs and len(args) == <NUM_LIT:1> and not isinstance(args[<NUM_LIT:0>], str)and args[<NUM_LIT:0>] is not None:<EOL><INDENT>decorator = deco()<EOL>return decorator(args[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>decorator = deco(*args, **kwargs)<EOL>return decorator<EOL><DEDENT><DEDENT>return dispatcher<EOL>", "docstring": "string 1\uac1c\ub9cc deco \uc778\uc790\ub85c \uc624\uac70\ub098 \uc5c6\uac70\ub098.\n:param deco:\n:return:", "id": "f6378:m7"}
{"signature": "def patchproperty(*cls, **kwargs):", "body": "def _patch(fun):<EOL><INDENT>m = kwargs.pop('<STR_LIT>', None) or fun.__name__<EOL>p = property(fun)<EOL>for c in cls:<EOL><INDENT>setattr(c, m, p)<EOL><DEDENT><DEDENT>def wrap(fun):<EOL><INDENT>_patch(fun)<EOL>return fun<EOL><DEDENT>return wrap<EOL>", "docstring": "class getter \ud568\uc218 \ud328\uce58 decorator\nEX)\nclass B(A):\n    pass\n\n@patchproperty(B)\ndef prop(self):\n    return 'hello'\n\n:param cls:\n:param kwargs:", "id": "f6378:m9"}
{"signature": "def run(main=None, argv=None, **flags):", "body": "\"\"\"<STR_LIT>\"\"\"<EOL>import sys as _sys<EOL>import inspect<EOL>main = main or _sys.modules['<STR_LIT:__main__>'].main<EOL>if main.__doc__:<EOL><INDENT>docstring = main.__doc__.split('<STR_LIT>')[<NUM_LIT:0>]<EOL>_parser.usage = '<STR_LIT>'.format(docstring)  <EOL><DEDENT>try:<EOL><INDENT>a = inspect.getfullargspec(main)<EOL><DEDENT>except AttributeError:<EOL><INDENT>a = inspect.getargspec(main)  <EOL><DEDENT>if a.defaults:<EOL><INDENT>kwargs = dict(zip(reversed(a.args), reversed(a.defaults)))<EOL>add_flag(**kwargs)<EOL><DEDENT>else:<EOL><INDENT>kwargs = dict()<EOL><DEDENT>if a.defaults is None:<EOL><INDENT>nargs = len(a.args)<EOL><DEDENT>else:<EOL><INDENT>nargs = len(a.args) - len(a.defaults)<EOL><DEDENT>posargs = a.args[:nargs]<EOL>flag.add_args(posargs)<EOL>add_flag(**flags)<EOL>args = argv[<NUM_LIT:1>:] if argv else None<EOL>unparsed, kw = flag._parse_flags_kw(args=args)<EOL>d = flag.__dict__['<STR_LIT>']<EOL>args = [d[k] for k in posargs]<EOL>args += unparsed<EOL>kwargs.update({k: d[k] for k in kwargs.keys()})<EOL>kwargs.update(kw)<EOL>_sys.exit(main(*args, **kwargs))<EOL>", "docstring": ":param main: main or sys.modules['__main__'].main\n:param argv: argument list used in argument parse\n:param flags: flags to define with defaults\n:return:", "id": "f6385:m4"}
{"signature": "def __getattr__(self, name):", "body": "if not self.__dict__['<STR_LIT>']:<EOL><INDENT>self._parse_flags()<EOL><DEDENT>if name not in self.__dict__['<STR_LIT>']:<EOL><INDENT>return None<EOL><DEDENT>return self.__dict__['<STR_LIT>'][name]<EOL>", "docstring": "Retrieves the 'value' attribute of the flag --name.", "id": "f6385:c0:m10"}
{"signature": "def fetch(query, args=None, **kwargs):", "body": "cur = execute(kwargs.pop('<STR_LIT>', '<STR_LIT>'), query, args, **kwargs)<EOL>for r in cur:<EOL><INDENT>yield r<EOL><DEDENT>cur.connection.close()<EOL>", "docstring": "for record in fetch(query, args, **configs):\n    print record\n:param args:\n:param db: str: db \uc2a4\ud0a4\ub9c8\n:param query: \ucffc\ub9ac \uc2a4\ud2b8\ub9c1\n:param kwargs: db connection \ucd94\uac00 \uc778\uc790. \ubcf4\ud1b5 \uc0dd\ub7b5\n:return: iterator", "id": "f6387:m6"}
{"signature": "def insert(cursor, table, *args, **field_values):", "body": "commit = field_values.pop('<STR_LIT>', True)<EOL>q, a = None, None<EOL>if args is not None and len(args) > <NUM_LIT:0>:<EOL><INDENT>q = get_insert_query(table, field_count=len(args))<EOL>a = args<EOL><DEDENT>elif len(field_values) > <NUM_LIT:0>:<EOL><INDENT>q = get_insert_query(table, fields=field_values.keys())<EOL>a = field_values.values()<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>cursor.execute(q, args=a)<EOL>if commit:<EOL><INDENT>cursor.connection.commit()<EOL><DEDENT>", "docstring": "db\uc5d0 \ub808\ucf54\ub4dc \uc9d1\uc5b4\ub123\uae30\nex)\ncursor.insert(table, v1, v2,...)\nex)\ncursor.insert(table, id=v1, word=v2, commit=True)\n:param commit:\n:param cursor:\n:param table:\n:param args:\n:param field_values:\n:return:", "id": "f6387:m11"}
{"signature": "def _cursor_exit(cursor, exc_type, exc_value, traceback):", "body": "if exc_type is not None:<EOL><INDENT>print(exc_value, traceback)<EOL><DEDENT>cursor.connection.close()<EOL>", "docstring": "cursor with\ubb38\uacfc \uc4f8\uc218 \uc788\uac8c __exit__\uc5d0 \ubc14\uc778\ub529\n:param cursor:\n:param exc_type:\n:param exc_value:\n:param traceback:\n:return:", "id": "f6387:m3"}
{"signature": "def db_config(db):", "body": "return {<EOL>'<STR_LIT:host>': '<STR_LIT:127.0.0.1>',<EOL>'<STR_LIT:user>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': '<STR_LIT:utf8>',<EOL>'<STR_LIT>': db<EOL>}<EOL>", "docstring": ":param db: str: \ub514\ube44 \uc2a4\ud0a4\ub9c8\n:return: mysql connection \uacf5\ud1b5 \uc635\uc158", "id": "f6387:m0"}
{"signature": "def insert_or_update(cursor, table, commit=True, **field_values):", "body": "q = \"\"\"<STR_LIT>\"\"\" % table<EOL>l = len(field_values)<EOL>fields = field_values.keys()<EOL>field = '<STR_LIT:U+002C>'.join(fields)<EOL>value = '<STR_LIT:U+002C>'.join(['<STR_LIT:%s>'] * l)<EOL>kv = '<STR_LIT:U+002C>'.join(['<STR_LIT>'.format(f) for f in fields])<EOL>q = q.format(field, value, kv)<EOL>args = field_values.values() * <NUM_LIT:2><EOL>cursor.execute(q, args=args)<EOL>if commit:<EOL><INDENT>cursor.connection.commit()<EOL><DEDENT>", "docstring": "db update \ucffc\ub9ac \ube4c\ub529 \ubc0f \uc2e4\ud589, \ub2e8, commit\uc740\n:param cursor: \ucee4\uc11c\n:type cursor: Cursor\n:param table: \ud14c\uc774\ube14\uc774\ub984\n:type table: str\n:param commit: \ucee4\ubc0b \uc5ec\ubd80\n:type commit: bool\n:param field_values: insert \ub610\ub294 \uc5c5\ub370\uc774\ud2b8 \ud560 \ud544\ub4dc \ubc0f \uac12 dict pairs\n:type field_values:dict\n:return:", "id": "f6387:m13"}
{"signature": "def update(cursor, table, where_kv, commit=True, **field_values):", "body": "q = \"\"\"<STR_LIT>\"\"\" % table<EOL>fields = field_values.keys()<EOL>kv = '<STR_LIT:U+002C>'.join(['<STR_LIT>'.format(f) for f in fields])<EOL>where = '<STR_LIT>'.join(['<STR_LIT>'.format(f) for f in where_kv.keys()])<EOL>q = q.format(kv, where)<EOL>args = field_values.values() + where_kv.values()<EOL>cursor.execute(q, args=args)<EOL>if commit:<EOL><INDENT>cursor.connection.commit()<EOL><DEDENT>", "docstring": "db update \ucffc\ub9ac \ube4c\ub529 \ubc0f \uc2e4\ud589, \ub2e8, commit\uc740\n:param cursor: \ucee4\uc11c\n:type cursor: Cursor\n:param table: \ud14c\uc774\ube14 \uc774\ub984\n:type table: str\n:param where_kv: \uc5c5\ub370\uc774\ud2b8 where \uc870\uac74 dictionary, key:field, value:equal condition only\n:type where_kv: dict\n:param field_values: kwarg \uc5c5\ub370\uc774\ud2b8\uc6a9\n:type field_values: dict\n:param commit: \ucee4\ubc0b \uc5ec\ubd80\n:type commit: bool\n:return:", "id": "f6387:m12"}
{"signature": "def _create_index_file(<EOL>root_dir, location, image_files, dirs, force_no_processing=False):", "body": "<EOL>header_text ='<STR_LIT>' + location + '<STR_LIT>' + str(len(image_files)) + '<STR_LIT>'<EOL>html = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' + str(<NUM_LIT> / IMAGES_PER_ROW) + '<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' + header_text + '<STR_LIT>'<EOL>]<EOL>directories = []<EOL>if root_dir != location:<EOL><INDENT>directories = ['<STR_LIT:..>']<EOL><DEDENT>directories += dirs<EOL>if len(directories) > <NUM_LIT:0>:<EOL><INDENT>html.append('<STR_LIT>')<EOL><DEDENT>for directory in directories:<EOL><INDENT>link = directory + '<STR_LIT:/>' + INDEX_FILE_NAME<EOL>html += [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' + link + '<STR_LIT>' + directory + '<STR_LIT>',<EOL>'<STR_LIT>'<EOL>]<EOL><DEDENT>table_row_count = <NUM_LIT:1><EOL>html += ['<STR_LIT>', '<STR_LIT>']<EOL>for image_file in image_files:<EOL><INDENT>if table_row_count == <NUM_LIT:1>:<EOL><INDENT>html.append('<STR_LIT>')<EOL><DEDENT>img_src = _get_thumbnail_src_from_file(<EOL>location, image_file, force_no_processing<EOL>)<EOL>link_target = _get_image_link_target_from_file(<EOL>location, image_file, force_no_processing<EOL>)<EOL>html += [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' + link_target + '<STR_LIT>',<EOL>'<STR_LIT>' + img_src + '<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>]<EOL>if table_row_count == IMAGES_PER_ROW:<EOL><INDENT>table_row_count = <NUM_LIT:0><EOL>html.append('<STR_LIT>')<EOL><DEDENT>table_row_count += <NUM_LIT:1><EOL><DEDENT>html += ['<STR_LIT>', '<STR_LIT>']<EOL>html += [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>]<EOL>index_file_path = _get_index_file_path(location)<EOL>print('<STR_LIT>' % index_file_path)<EOL>index_file = open(index_file_path, '<STR_LIT:w>')<EOL>index_file.write('<STR_LIT:\\n>'.join(html))<EOL>index_file.close()<EOL>return index_file_path<EOL>", "docstring": "Create an index file in the given location, supplying known lists of\npresent image files and subdirectories.\n@param {String} root_dir - The root directory of the entire crawl. Used to\n    ascertain whether the given location is the top level.\n@param {String} location - The current directory of the crawl. The index\n    file will be created here.\n@param {[String]} image_files - A list of image file names in the location.\n    These will be displayed in the index file's gallery.\n@param {[String]} dirs - The subdirectories of the location directory.\n    These will be displayed as links further down the file structure.\n@param {Boolean=False} force_no_processing - If True, do not attempt to\n    actually process thumbnails, PIL images or anything. Simply index\n    <img> tags with original file src attributes.\n@return {String} The full path (location plus filename) of the newly\n    created index file. Intended for usage cleaning up created files.", "id": "f6389:m1"}
{"signature": "def serve_dir(dir_path):", "body": "<EOL>print('<STR_LIT>')<EOL>created_files = _create_index_files(dir_path, True)<EOL>if (PIL_ENABLED):<EOL><INDENT>print('<STR_LIT>')<EOL>background_indexer = BackgroundIndexFileGenerator(dir_path)<EOL>background_indexer.run()<EOL><DEDENT>_run_server()<EOL>_clean_up(created_files)<EOL>", "docstring": "Generate indexes and run server from the given directory downwards.\n@param {String} dir_path - The directory path (absolute, or relative to CWD)\n@return {None}", "id": "f6389:m12"}
{"signature": "def _get_image_link_target_from_file(dir_path, image_file, force_no_processing=False):", "body": "<EOL>if force_no_processing:<EOL><INDENT>return image_file<EOL><DEDENT>img = _get_image_from_file(dir_path, image_file)<EOL>if img.format.lower() in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>return _get_image_src_from_file(<EOL>dir_path, image_file, force_no_processing<EOL>)<EOL><DEDENT>return image_file<EOL>", "docstring": "Get the value to be used as the href for links from thumbnail images. For\nmost image formats this will simply be the image file name itself. However,\nsome image formats (tif) are not natively displayable by many browsers and\ntherefore we must link to image data in another format.\n@param {String} dir_path - The directory containing the image file\n@param {String} image_file - The filename of the image file within dir_path\n@param {Boolean=False} force_no_processing - If True, do not attempt to\n    actually process a thumbnail, PIL image or anything. Simply return the\n    image filename as src.\n@return {String} The href to use.", "id": "f6389:m4"}
{"signature": "def rand_blend_mask(shape, rand=rand.uniform(-<NUM_LIT:10>, <NUM_LIT:10>), **kwargs):", "body": "<EOL>z = rand(shape[<NUM_LIT:0>])  <EOL>noise = snoise2dz((shape[<NUM_LIT:1>], shape[<NUM_LIT:2>]), z, **kwargs)<EOL>return noise<EOL>", "docstring": "random blending masks", "id": "f6390:m12"}
{"signature": "def cropcenter(sz, img=None):", "body": "l = len(sz)<EOL>sz = np.array(sz)<EOL>def wrapped(im):<EOL><INDENT>imsz = np.array(im.shape)<EOL>s = (imsz[:l] - sz) / <NUM_LIT:2>  <EOL>to = s + sz  <EOL>slices = [slice(s, e) for s, e in zip(s, to)]<EOL>return im[slices]<EOL><DEDENT>if img is not None:<EOL><INDENT>return wrapped(img)<EOL><DEDENT>return wrapped<EOL>", "docstring": "if no img, then return crop function\n:param sz:\n:param img:\n:return:", "id": "f6390:m22"}
{"signature": "def snoise2dz(size, z, scale=<NUM_LIT>, octaves=<NUM_LIT:1>, persistence=<NUM_LIT>, lacunarity=<NUM_LIT>):", "body": "import noise<EOL>z_l = len(z)<EOL>data = np.empty((z_l, size[<NUM_LIT:0>], size[<NUM_LIT:1>]), dtype='<STR_LIT>')<EOL>for iz in range(z_l):<EOL><INDENT>zvalue = z[iz]<EOL>for y in range(size[<NUM_LIT:0>]):<EOL><INDENT>for x in range(size[<NUM_LIT:1>]):<EOL><INDENT>v = noise.snoise3(x * scale, y * scale, zvalue,<EOL>octaves=octaves, persistence=persistence, lacunarity=lacunarity)<EOL>data[iz, y, x] = v<EOL><DEDENT><DEDENT><DEDENT>data = data * <NUM_LIT:0.5> + <NUM_LIT:0.5><EOL>if __debug__:<EOL><INDENT>assert data.min() >= <NUM_LIT:0.> and data.max() <= <NUM_LIT:1.0><EOL><DEDENT>return data<EOL>", "docstring": "z as seeds\nscale\uc774 \uc791\uc744 \uc218\ub85d \ud328\ud134\uc774 \ucee4\uc9c0\ub294 \ud6a8\uacfc", "id": "f6390:m15"}
{"signature": "def pad_if_need(sz_atleast, img, mode='<STR_LIT>'):<EOL>", "body": "<EOL>imsz = img.shape[:<NUM_LIT:2>]  <EOL>padneed = np.asarray((sz_atleast[<NUM_LIT:0>] - imsz[<NUM_LIT:0>], sz_atleast[<NUM_LIT:1>] - imsz[<NUM_LIT:1>]))<EOL>if np.any(padneed > <NUM_LIT:0>):<EOL><INDENT>padding = np.zeros((img.ndim, <NUM_LIT:2>), dtype='<STR_LIT>')<EOL>padneed = np.maximum(padneed, <NUM_LIT:0>)<EOL>padding[:<NUM_LIT:2>, <NUM_LIT:0>] = padneed/<NUM_LIT:2><EOL>padding[:<NUM_LIT:2>, <NUM_LIT:1>] = padneed - padneed/<NUM_LIT:2><EOL>img = np.pad(img, padding, mode=mode)<EOL><DEDENT>return img<EOL>", "docstring": "pad img if need to guarantee minumum size\n:param sz_atleast: [H,W] at least\n:param img: image np.array [H,W, ...]\n:param mode: str, padding mode\n:return: padded image or asis if enought size", "id": "f6390:m25"}
{"signature": "def rand_rot90(*imagez):", "body": "return rand_apply_onebatch(np.rot90, imagez)<EOL>", "docstring": "rotate together", "id": "f6390:m5"}
{"signature": "def rand_crop(sz, *imagez):", "body": "def _rand_crop(*imgz):<EOL><INDENT>imsz = imgz[<NUM_LIT:0>].shape[:<NUM_LIT:2>]<EOL>assert imsz[<NUM_LIT:0>] >= sz[<NUM_LIT:0>] and imsz[<NUM_LIT:1>] >= sz[<NUM_LIT:1>]<EOL>si = np.random.randint(imsz[<NUM_LIT:0>] - sz[<NUM_LIT:0>]) if imsz[<NUM_LIT:0>] > sz[<NUM_LIT:0>] else <NUM_LIT:0><EOL>sj = np.random.randint(imsz[<NUM_LIT:1>] - sz[<NUM_LIT:1>]) if imsz[<NUM_LIT:1>] > sz[<NUM_LIT:1>] else <NUM_LIT:0><EOL>slicei = slice(si, si+sz[<NUM_LIT:0>])<EOL>slicej = slice(sj, sj+sz[<NUM_LIT:1>])<EOL>outs = tuple(img[slicei, slicej] for img in imgz)<EOL>return tuple_or_not(*outs)<EOL><DEDENT>return _rand_crop(*imagez) if imagez else _rand_crop<EOL>", "docstring": "random crop\n# assume imagez has same size (H, W)\n# assume sz is less or equal than size of image\n:param sz: cropped image sz\n:param imagez: imagez\n:return: rand cropped image pairs or function bound to sz", "id": "f6390:m6"}
{"signature": "@tuple_args<EOL>def rand_brightness(imagez, scale=<NUM_LIT:1.0>, randfun=rand.normal(<NUM_LIT:0.>, <NUM_LIT>), clamp=(<NUM_LIT:0.>, <NUM_LIT:1.>)):", "body": "l, h = clamp<EOL>r = randfun((imagez[<NUM_LIT:0>].shape[<NUM_LIT:0>], <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>)) * scale<EOL>def apply(im):<EOL><INDENT>im += r<EOL>im[im < l] = l<EOL>im[im > h] = h<EOL>return im<EOL><DEDENT>return tuple(map(apply, imagez))<EOL>", "docstring": ":param images:\n:param scale: scale for random value\n:param randfun: any randfun binding except shape\n:param clamp: clamping range\n:return:", "id": "f6390:m16"}
{"signature": "def rand_flipud(*imagez):", "body": "return rand_apply_onebatch(np.flipud, imagez)<EOL>", "docstring": "flip together", "id": "f6390:m4"}
{"signature": "def rotate_crop(centerij, sz, angle, img=None, mode='<STR_LIT>', **kwargs):", "body": "<EOL>from skimage import transform<EOL>sz = np.array(sz)<EOL>crop_half = int(np.ceil(np.sqrt(np.square(sz).sum())))<EOL>if centerij[<NUM_LIT:0>] >= crop_half or centerij[<NUM_LIT:1>] >= crop_half:<EOL><INDENT>raise NotImplementedError<EOL><DEDENT>slicei = slice(centerij[<NUM_LIT:0>] - crop_half, centerij[<NUM_LIT:0>] + crop_half)<EOL>slicej = slice(centerij[<NUM_LIT:1>] - crop_half, centerij[<NUM_LIT:1>] + crop_half)<EOL>def _rotate_cropcenter(im):<EOL><INDENT>enoughcrop = im[slicei, slicej]<EOL>rotated = transform.rotate(enoughcrop, angle, resize=False, preserve_range=True, mode=mode, **kwargs)<EOL>return cropcenter(sz, rotated)<EOL><DEDENT>if img is not None:<EOL><INDENT>return _rotate_cropcenter(img)<EOL><DEDENT>return _rotate_cropcenter<EOL>", "docstring": "rotate and crop\nif no img, then return crop function\n:param centerij:\n:param sz:\n:param angle:\n:param img: [h,w,d]\n:param mode: padding option\n:return: cropped image or function", "id": "f6390:m19"}
{"signature": "def haslogger(name):", "body": "return name in logging.Logger.manager.loggerDict or not name<EOL>", "docstring": "name\uc758 \ub85c\uac70\uac00 \uc788\ub294\uc9c0 \uc5c6\ub294\uc9c0 \uccb4\ud06c. root logger\ub294 \ubb34\uc870\uac74 \ub9cc\ub4e4\uc5b4\uc9c4\ub2e4.", "id": "f6392:m1"}
{"signature": "def getlogger(pkg='<STR_LIT>', handler=None):", "body": "from .caller import caller<EOL>if not pkg:<EOL><INDENT>m = caller.modulename()<EOL>s = m.split('<STR_LIT:.>', <NUM_LIT:1>)<EOL>if len(s) > <NUM_LIT:1>:<EOL><INDENT>pkg = s[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>if haslogger(pkg):<EOL><INDENT>return logging.getLogger(pkg)<EOL><DEDENT>else:<EOL><INDENT>logger = logging.getLogger(pkg)<EOL>logger.addHandler(handler or default_handler)<EOL>logger.setLevel(logging.DEBUG)<EOL>return logger<EOL><DEDENT>", "docstring": "\ud328\ud0a4\uc9c0 \ud639\uc740 \ucc44\ub110 \ub85c\uac70\nlogging.getLogger(package_name) or logg.getLogger()\n:param pkg: str", "id": "f6392:m0"}
{"signature": "def __getnewargs__(self):", "body": "return tuple()<EOL>", "docstring": "pickling related", "id": "f6393:c0:m17"}
{"signature": "def name(self):", "body": "i = self.__url.rfind('<STR_LIT:/>')<EOL>if self.__url[:i] == '<STR_LIT>':<EOL><INDENT>return \"<STR_LIT:/>\"<EOL><DEDENT>return self.__url[i+<NUM_LIT:1>:]<EOL>", "docstring": "Returns the name of the Firebase. If a Firebase instance points to\n'https://my_firebase.firebaseio.com/users' its name would be 'users'", "id": "f6396:c0:m3"}
{"signature": "def put_sync(self, **kwargs):", "body": "self.amust((\"<STR_LIT>\", \"<STR_LIT:data>\"), kwargs)<EOL>response = requests.put(self.url_correct(kwargs[\"<STR_LIT>\"],<EOL>kwargs.get(\"<STR_LIT>\", self.__auth)),<EOL>data=json.dumps(kwargs[\"<STR_LIT:data>\"]))<EOL>self.catch_error(response)<EOL>return response.content<EOL>", "docstring": "PUT:  puts data into the Firebase.\nRequires the 'point' parameter as a keyworded argument.", "id": "f6396:c0:m11"}
{"signature": "def get_sync(self, **kwargs):", "body": "self.amust((\"<STR_LIT>\",), kwargs)<EOL>response = requests.get(self.url_correct(kwargs[\"<STR_LIT>\"],<EOL>kwargs.get(\"<STR_LIT>\", self.__auth)))<EOL>self.catch_error(response)<EOL>return response.content<EOL>", "docstring": "GET:  gets data from the Firebase.\nRequires the 'point' parameter as a keyworded argument.", "id": "f6396:c0:m10"}
{"signature": "def attr(self):", "body": "return self.__attr['<STR_LIT:time>'], self.__attr['<STR_LIT:url>'],  self.__attr['<STR_LIT>']<EOL>", "docstring": "Returns a Tuple with the Attributes of the Firebase", "id": "f6396:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def __write(path, data, mode=\"<STR_LIT:w>\"):<DEDENT>", "body": "with open(path, mode) as data_file:<EOL><INDENT>data = json.dumps(data, indent=<NUM_LIT:4>)<EOL>data_file.write(data)<EOL>return data<EOL><DEDENT>", "docstring": "Writes to a File. Returns the data written.\npath - (string) path to the file to write to.\ndata - (json) data from a request.\nmode - (string) mode to open the file in. Default to 'w'. Overwrites.", "id": "f6396:c0:m7"}
{"signature": "@staticmethod<EOL><INDENT>def __read(path):<DEDENT>", "body": "try:<EOL><INDENT>with open(path, '<STR_LIT:r>') as data_file:<EOL><INDENT>data = data_file.read()<EOL>data = json.loads(data)<EOL>return data<EOL><DEDENT><DEDENT>except IOError as err:<EOL><INDENT>pass<EOL><DEDENT>except Exception as err:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Reads a File with contents in correct JSON format.\nReturns the data as Python objects.\npath - (string) path to the file", "id": "f6396:c0:m6"}
{"signature": "def __call__(self, r):", "body": "<EOL>log.debug(\"<STR_LIT>\", r, self.client)<EOL>content_type = r.headers.get(\"<STR_LIT:Content-Type>\", \"<STR_LIT>\")<EOL>if (<EOL>not content_type<EOL>and extract_params(r.body)<EOL>or self.client.signature_type == SIGNATURE_TYPE_BODY<EOL>):<EOL><INDENT>content_type = CONTENT_TYPE_FORM_URLENCODED<EOL><DEDENT>if not isinstance(content_type, unicode):<EOL><INDENT>content_type = content_type.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>is_form_encoded = CONTENT_TYPE_FORM_URLENCODED in content_type<EOL>log.debug(<EOL>\"<STR_LIT>\",<EOL>is_form_encoded or self.force_include_body,<EOL>)<EOL>if is_form_encoded:<EOL><INDENT>r.headers[\"<STR_LIT:Content-Type>\"] = CONTENT_TYPE_FORM_URLENCODED<EOL>r.url, headers, r.body = self.client.sign(<EOL>unicode(r.url), unicode(r.method), r.body or \"<STR_LIT>\", r.headers<EOL>)<EOL><DEDENT>elif self.force_include_body:<EOL><INDENT>r.url, headers, r.body = self.client.sign(<EOL>unicode(r.url), unicode(r.method), r.body or \"<STR_LIT>\", r.headers<EOL>)<EOL><DEDENT>else:<EOL><INDENT>r.url, headers, _ = self.client.sign(<EOL>unicode(r.url), unicode(r.method), None, r.headers<EOL>)<EOL><DEDENT>r.prepare_headers(headers)<EOL>r.url = to_native_string(r.url)<EOL>log.debug(\"<STR_LIT>\", r.url)<EOL>log.debug(\"<STR_LIT>\", headers)<EOL>log.debug(\"<STR_LIT>\", r.body)<EOL>return r<EOL>", "docstring": "Add OAuth parameters to the request.\n\n        Parameters may be included from the body if the content-type is\n        urlencoded, if no content type is set a guess is made.", "id": "f6399:c0:m1"}
{"signature": "def __init__(<EOL>self,<EOL>client_key,<EOL>client_secret=None,<EOL>resource_owner_key=None,<EOL>resource_owner_secret=None,<EOL>callback_uri=None,<EOL>signature_method=SIGNATURE_HMAC,<EOL>signature_type=SIGNATURE_TYPE_AUTH_HEADER,<EOL>rsa_key=None,<EOL>verifier=None,<EOL>client_class=None,<EOL>force_include_body=False,<EOL>**kwargs<EOL>):", "body": "super(OAuth1Session, self).__init__()<EOL>self._client = OAuth1(<EOL>client_key,<EOL>client_secret=client_secret,<EOL>resource_owner_key=resource_owner_key,<EOL>resource_owner_secret=resource_owner_secret,<EOL>callback_uri=callback_uri,<EOL>signature_method=signature_method,<EOL>signature_type=signature_type,<EOL>rsa_key=rsa_key,<EOL>verifier=verifier,<EOL>client_class=client_class,<EOL>force_include_body=force_include_body,<EOL>**kwargs<EOL>)<EOL>self.auth = self._client<EOL>", "docstring": "Construct the OAuth 1 session.\n\n        :param client_key: A client specific identifier.\n        :param client_secret: A client specific secret used to create HMAC and\n                              plaintext signatures.\n        :param resource_owner_key: A resource owner key, also referred to as\n                                   request token or access token depending on\n                                   when in the workflow it is used.\n        :param resource_owner_secret: A resource owner secret obtained with\n                                      either a request or access token. Often\n                                      referred to as token secret.\n        :param callback_uri: The URL the user is redirect back to after\n                             authorization.\n        :param signature_method: Signature methods determine how the OAuth\n                                 signature is created. The three options are\n                                 oauthlib.oauth1.SIGNATURE_HMAC (default),\n                                 oauthlib.oauth1.SIGNATURE_RSA and\n                                 oauthlib.oauth1.SIGNATURE_PLAIN.\n        :param signature_type: Signature type decides where the OAuth\n                               parameters are added. Either in the\n                               Authorization header (default) or to the URL\n                               query parameters or the request body. Defined as\n                               oauthlib.oauth1.SIGNATURE_TYPE_AUTH_HEADER,\n                               oauthlib.oauth1.SIGNATURE_TYPE_QUERY and\n                               oauthlib.oauth1.SIGNATURE_TYPE_BODY\n                               respectively.\n        :param rsa_key: The private RSA key as a string. Can only be used with\n                        signature_method=oauthlib.oauth1.SIGNATURE_RSA.\n        :param verifier: A verifier string to prove authorization was granted.\n        :param client_class: A subclass of `oauthlib.oauth1.Client` to use with\n                             `requests_oauthlib.OAuth1` instead of the default\n        :param force_include_body: Always include the request body in the\n                                   signature creation.\n        :param **kwargs: Additional keyword arguments passed to `OAuth1`", "id": "f6400:c3:m0"}
{"signature": "@property<EOL><INDENT>def authorized(self):<DEDENT>", "body": "if self._client.client.signature_method == SIGNATURE_RSA:<EOL><INDENT>return bool(self._client.client.resource_owner_key)<EOL><DEDENT>else:<EOL><INDENT>return (<EOL>bool(self._client.client.client_secret)<EOL>and bool(self._client.client.resource_owner_key)<EOL>and bool(self._client.client.resource_owner_secret)<EOL>)<EOL><DEDENT>", "docstring": "Boolean that indicates whether this session has an OAuth token\n        or not. If `self.authorized` is True, you can reasonably expect\n        OAuth-protected requests to the resource to succeed. If\n        `self.authorized` is False, you need the user to go through the OAuth\n        authentication dance before OAuth-protected requests to the resource\n        will succeed.", "id": "f6400:c3:m3"}
{"signature": "def rebuild_auth(self, prepared_request, response):", "body": "if \"<STR_LIT>\" in prepared_request.headers:<EOL><INDENT>prepared_request.headers.pop(\"<STR_LIT>\", True)<EOL>prepared_request.prepare_auth(self.auth)<EOL><DEDENT>return<EOL>", "docstring": "When being redirected we should always strip Authorization\nheader, since nonce may not be reused as per OAuth spec.", "id": "f6400:c3:m10"}
{"signature": "def authorization_url(self, url, request_token=None, **kwargs):", "body": "kwargs[\"<STR_LIT>\"] = request_token or self._client.client.resource_owner_key<EOL>log.debug(\"<STR_LIT>\", kwargs, url)<EOL>return add_params_to_uri(url, kwargs.items())<EOL>", "docstring": "Create an authorization URL by appending request_token and optional\n        kwargs to url.\n\n        This is the second step in the OAuth 1 workflow. The user should be\n        redirected to this authorization URL, grant access to you, and then\n        be redirected back to you. The redirection back can either be specified\n        during client registration or by supplying a callback URI per request.\n\n        :param url: The authorization endpoint URL.\n        :param request_token: The previously obtained request token.\n        :param kwargs: Optional parameters to append to the URL.\n        :returns: The authorization URL with new parameters embedded.\n\n        An example using a registered default callback URI.\n\n        >>> request_token_url = 'https://api.twitter.com/oauth/request_token'\n        >>> authorization_url = 'https://api.twitter.com/oauth/authorize'\n        >>> oauth_session = OAuth1Session('client-key', client_secret='secret')\n        >>> oauth_session.fetch_request_token(request_token_url)\n        {\n            'oauth_token': 'sdf0o9823sjdfsdf',\n            'oauth_token_secret': '2kjshdfp92i34asdasd',\n        }\n        >>> oauth_session.authorization_url(authorization_url)\n        'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf'\n        >>> oauth_session.authorization_url(authorization_url, foo='bar')\n        'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar'\n\n        An example using an explicit callback URI.\n\n        >>> request_token_url = 'https://api.twitter.com/oauth/request_token'\n        >>> authorization_url = 'https://api.twitter.com/oauth/authorize'\n        >>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback')\n        >>> oauth_session.fetch_request_token(request_token_url)\n        {\n            'oauth_token': 'sdf0o9823sjdfsdf',\n            'oauth_token_secret': '2kjshdfp92i34asdasd',\n        }\n        >>> oauth_session.authorization_url(authorization_url)\n        'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback'", "id": "f6400:c3:m4"}
{"signature": "@property<EOL><INDENT>def status_code(self):<DEDENT>", "body": "return self.response.status_code<EOL>", "docstring": "For backwards-compatibility purposes", "id": "f6400:c0:m1"}
{"signature": "def __call__(self, r):", "body": "if not is_secure_transport(r.url):<EOL><INDENT>raise InsecureTransportError()<EOL><DEDENT>r.url, r.headers, r.body = self._client.add_token(<EOL>r.url, http_method=r.method, body=r.body, headers=r.headers<EOL>)<EOL>return r<EOL>", "docstring": "Append an OAuth 2 token to the request.\n\n        Note that currently HTTPS is required for all requests. There may be\n        a token type that allows for plain HTTP in the future and then this\n        should be updated to allow plain HTTP on a white list basis.", "id": "f6401:c0:m1"}
{"signature": "def __init__(self, client_id=None, client=None, token=None):", "body": "self._client = client or WebApplicationClient(client_id, token=token)<EOL>if token:<EOL><INDENT>for k, v in token.items():<EOL><INDENT>setattr(self._client, k, v)<EOL><DEDENT><DEDENT>", "docstring": "Construct a new OAuth 2 authorization object.\n\n        :param client_id: Client id obtained during registration\n        :param client: :class:`oauthlib.oauth2.Client` to be used. Default is\n                       WebApplicationClient which is useful for any\n                       hosted application but not mobile or desktop.\n        :param token: Token dictionary, must include access_token\n                      and token_type.", "id": "f6401:c0:m0"}
{"signature": "def token_from_fragment(self, authorization_response):", "body": "self._client.parse_request_uri_response(<EOL>authorization_response, state=self._state<EOL>)<EOL>self.token = self._client.token<EOL>return self.token<EOL>", "docstring": "Parse token from the URI fragment, used by MobileApplicationClients.\n\n        :param authorization_response: The full URL of the redirect back to you\n        :return: A token dict", "id": "f6413:c1:m13"}
{"signature": "def authorization_url(self, url, state=None, **kwargs):", "body": "state = state or self.new_state()<EOL>return (<EOL>self._client.prepare_request_uri(<EOL>url,<EOL>redirect_uri=self.redirect_uri,<EOL>scope=self.scope,<EOL>state=state,<EOL>**kwargs<EOL>),<EOL>state,<EOL>)<EOL>", "docstring": "Form an authorization URL.\n\n        :param url: Authorization endpoint url, must be HTTPS.\n        :param state: An optional state string for CSRF protection. If not\n                      given it will be generated for you.\n        :param kwargs: Extra parameters to include.\n        :return: authorization_url, state", "id": "f6413:c1:m11"}
{"signature": "def request(<EOL>self,<EOL>method,<EOL>url,<EOL>data=None,<EOL>headers=None,<EOL>withhold_token=False,<EOL>client_id=None,<EOL>client_secret=None,<EOL>**kwargs<EOL>):", "body": "if not is_secure_transport(url):<EOL><INDENT>raise InsecureTransportError()<EOL><DEDENT>if self.token and not withhold_token:<EOL><INDENT>log.debug(<EOL>\"<STR_LIT>\",<EOL>len(self.compliance_hook[\"<STR_LIT>\"]),<EOL>)<EOL>for hook in self.compliance_hook[\"<STR_LIT>\"]:<EOL><INDENT>log.debug(\"<STR_LIT>\", hook)<EOL>url, headers, data = hook(url, headers, data)<EOL><DEDENT>log.debug(\"<STR_LIT>\", self.token)<EOL>try:<EOL><INDENT>url, headers, data = self._client.add_token(<EOL>url, http_method=method, body=data, headers=headers<EOL>)<EOL><DEDENT>except TokenExpiredError:<EOL><INDENT>if self.auto_refresh_url:<EOL><INDENT>log.debug(<EOL>\"<STR_LIT>\",<EOL>self.auto_refresh_url,<EOL>)<EOL>auth = kwargs.pop(\"<STR_LIT>\", None)<EOL>if client_id and client_secret and (auth is None):<EOL><INDENT>log.debug(<EOL>'<STR_LIT>',<EOL>client_id,<EOL>)<EOL>auth = requests.auth.HTTPBasicAuth(client_id, client_secret)<EOL><DEDENT>token = self.refresh_token(<EOL>self.auto_refresh_url, auth=auth, **kwargs<EOL>)<EOL>if self.token_updater:<EOL><INDENT>log.debug(<EOL>\"<STR_LIT>\", token, self.token_updater<EOL>)<EOL>self.token_updater(token)<EOL>url, headers, data = self._client.add_token(<EOL>url, http_method=method, body=data, headers=headers<EOL>)<EOL><DEDENT>else:<EOL><INDENT>raise TokenUpdated(token)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>log.debug(\"<STR_LIT>\", url, method)<EOL>log.debug(\"<STR_LIT>\", headers, data)<EOL>log.debug(\"<STR_LIT>\", kwargs)<EOL>return super(OAuth2Session, self).request(<EOL>method, url, headers=headers, data=data, **kwargs<EOL>)<EOL>", "docstring": "Intercept all requests and add the OAuth 2 token if present.", "id": "f6413:c1:m15"}
{"signature": "@property<EOL><INDENT>def authorized(self):<DEDENT>", "body": "return bool(self.access_token)<EOL>", "docstring": "Boolean that indicates whether this session has an OAuth token\n        or not. If `self.authorized` is True, you can reasonably expect\n        OAuth-protected requests to the resource to succeed. If\n        `self.authorized` is False, you need the user to go through the OAuth\n        authentication dance before OAuth-protected requests to the resource\n        will succeed.", "id": "f6413:c1:m10"}
{"signature": "def refresh_token(<EOL>self,<EOL>token_url,<EOL>refresh_token=None,<EOL>body=\"<STR_LIT>\",<EOL>auth=None,<EOL>timeout=None,<EOL>headers=None,<EOL>verify=True,<EOL>proxies=None,<EOL>**kwargs<EOL>):", "body": "if not token_url:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_secure_transport(token_url):<EOL><INDENT>raise InsecureTransportError()<EOL><DEDENT>refresh_token = refresh_token or self.token.get(\"<STR_LIT>\")<EOL>log.debug(<EOL>\"<STR_LIT>\", self.auto_refresh_kwargs<EOL>)<EOL>kwargs.update(self.auto_refresh_kwargs)<EOL>body = self._client.prepare_refresh_body(<EOL>body=body, refresh_token=refresh_token, scope=self.scope, **kwargs<EOL>)<EOL>log.debug(\"<STR_LIT>\", body)<EOL>if headers is None:<EOL><INDENT>headers = {<EOL>\"<STR_LIT>\": \"<STR_LIT:application/json>\",<EOL>\"<STR_LIT:Content-Type>\": (\"<STR_LIT>\"),<EOL>}<EOL><DEDENT>r = self.post(<EOL>token_url,<EOL>data=dict(urldecode(body)),<EOL>auth=auth,<EOL>timeout=timeout,<EOL>headers=headers,<EOL>verify=verify,<EOL>withhold_token=True,<EOL>proxies=proxies,<EOL>)<EOL>log.debug(\"<STR_LIT>\", r.status_code)<EOL>log.debug(\"<STR_LIT>\", r.headers, r.text)<EOL>log.debug(<EOL>\"<STR_LIT>\",<EOL>len(self.compliance_hook[\"<STR_LIT>\"]),<EOL>)<EOL>for hook in self.compliance_hook[\"<STR_LIT>\"]:<EOL><INDENT>log.debug(\"<STR_LIT>\", hook)<EOL>r = hook(r)<EOL><DEDENT>self.token = self._client.parse_request_body_response(r.text, scope=self.scope)<EOL>if not \"<STR_LIT>\" in self.token:<EOL><INDENT>log.debug(\"<STR_LIT>\")<EOL>self.token[\"<STR_LIT>\"] = refresh_token<EOL><DEDENT>return self.token<EOL>", "docstring": "Fetch a new access token using a refresh token.\n\n        :param token_url: The token endpoint, must be HTTPS.\n        :param refresh_token: The refresh_token to use.\n        :param body: Optional application/x-www-form-urlencoded body to add the\n                     include in the token request. Prefer kwargs over body.\n        :param auth: An auth tuple or method as accepted by `requests`.\n        :param timeout: Timeout of the request in seconds.\n        :param headers: A dict of headers to be used by `requests`.\n        :param verify: Verify SSL certificate.\n        :param proxies: The `proxies` argument will be passed to `requests`.\n        :param kwargs: Extra parameters to include in the token request.\n        :return: A token dict", "id": "f6413:c1:m14"}
{"signature": "def fetch_token(<EOL>self,<EOL>token_url,<EOL>code=None,<EOL>authorization_response=None,<EOL>body=\"<STR_LIT>\",<EOL>auth=None,<EOL>username=None,<EOL>password=None,<EOL>method=\"<STR_LIT:POST>\",<EOL>force_querystring=False,<EOL>timeout=None,<EOL>headers=None,<EOL>verify=True,<EOL>proxies=None,<EOL>include_client_id=None,<EOL>client_secret=None,<EOL>**kwargs<EOL>):", "body": "if not is_secure_transport(token_url):<EOL><INDENT>raise InsecureTransportError()<EOL><DEDENT>if not code and authorization_response:<EOL><INDENT>self._client.parse_request_uri_response(<EOL>authorization_response, state=self._state<EOL>)<EOL>code = self._client.code<EOL><DEDENT>elif not code and isinstance(self._client, WebApplicationClient):<EOL><INDENT>code = self._client.code<EOL>if not code:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" \"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>if isinstance(self._client, LegacyApplicationClient):<EOL><INDENT>if username is None:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if password is None:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>if username is not None:<EOL><INDENT>kwargs[\"<STR_LIT:username>\"] = username<EOL><DEDENT>if password is not None:<EOL><INDENT>kwargs[\"<STR_LIT:password>\"] = password<EOL><DEDENT>if auth is not None:<EOL><INDENT>if include_client_id is None:<EOL><INDENT>include_client_id = False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if include_client_id is not True:<EOL><INDENT>client_id = self.client_id<EOL>if client_id:<EOL><INDENT>log.debug(<EOL>'<STR_LIT>'<EOL>\"<STR_LIT>\",<EOL>client_id,<EOL>)<EOL>client_secret = client_secret if client_secret is not None else \"<STR_LIT>\"<EOL>auth = requests.auth.HTTPBasicAuth(client_id, client_secret)<EOL><DEDENT><DEDENT><DEDENT>if include_client_id:<EOL><INDENT>if client_secret is not None:<EOL><INDENT>kwargs[\"<STR_LIT>\"] = client_secret<EOL><DEDENT><DEDENT>body = self._client.prepare_request_body(<EOL>code=code,<EOL>body=body,<EOL>redirect_uri=self.redirect_uri,<EOL>include_client_id=include_client_id,<EOL>**kwargs<EOL>)<EOL>headers = headers or {<EOL>\"<STR_LIT>\": \"<STR_LIT:application/json>\",<EOL>\"<STR_LIT:Content-Type>\": \"<STR_LIT>\",<EOL>}<EOL>self.token = {}<EOL>request_kwargs = {}<EOL>if method.upper() == \"<STR_LIT:POST>\":<EOL><INDENT>request_kwargs[\"<STR_LIT>\" if force_querystring else \"<STR_LIT:data>\"] = dict(<EOL>urldecode(body)<EOL>)<EOL><DEDENT>elif method.upper() == \"<STR_LIT:GET>\":<EOL><INDENT>request_kwargs[\"<STR_LIT>\"] = dict(urldecode(body))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>r = self.request(<EOL>method=method,<EOL>url=token_url,<EOL>timeout=timeout,<EOL>headers=headers,<EOL>auth=auth,<EOL>verify=verify,<EOL>proxies=proxies,<EOL>**request_kwargs<EOL>)<EOL>log.debug(\"<STR_LIT>\", r.status_code)<EOL>log.debug(\"<STR_LIT>\", r.request.url)<EOL>log.debug(\"<STR_LIT>\", r.request.headers)<EOL>log.debug(\"<STR_LIT>\", r.request.body)<EOL>log.debug(\"<STR_LIT>\", r.headers, r.text)<EOL>log.debug(<EOL>\"<STR_LIT>\",<EOL>len(self.compliance_hook[\"<STR_LIT>\"]),<EOL>)<EOL>for hook in self.compliance_hook[\"<STR_LIT>\"]:<EOL><INDENT>log.debug(\"<STR_LIT>\", hook)<EOL>r = hook(r)<EOL><DEDENT>self._client.parse_request_body_response(r.text, scope=self.scope)<EOL>self.token = self._client.token<EOL>log.debug(\"<STR_LIT>\", self.token)<EOL>return self.token<EOL>", "docstring": "Generic method for fetching an access token from the token endpoint.\n\n        If you are using the MobileApplicationClient you will want to use\n        `token_from_fragment` instead of `fetch_token`.\n\n        The current implementation enforces the RFC guidelines.\n\n        :param token_url: Token endpoint URL, must use HTTPS.\n        :param code: Authorization code (used by WebApplicationClients).\n        :param authorization_response: Authorization response URL, the callback\n                                       URL of the request back to you. Used by\n                                       WebApplicationClients instead of code.\n        :param body: Optional application/x-www-form-urlencoded body to add the\n                     include in the token request. Prefer kwargs over body.\n        :param auth: An auth tuple or method as accepted by `requests`.\n        :param username: Username required by LegacyApplicationClients to appear\n                         in the request body.\n        :param password: Password required by LegacyApplicationClients to appear\n                         in the request body.\n        :param method: The HTTP method used to make the request. Defaults\n                       to POST, but may also be GET. Other methods should\n                       be added as needed.\n        :param force_querystring: If True, force the request body to be sent\n            in the querystring instead.\n        :param timeout: Timeout of the request in seconds.\n        :param headers: Dict to default request headers with.\n        :param verify: Verify SSL certificate.\n        :param proxies: The `proxies` argument is passed onto `requests`.\n        :param include_client_id: Should the request body include the\n                                  `client_id` parameter. Default is `None`,\n                                  which will attempt to autodetect. This can be\n                                  forced to always include (True) or never\n                                  include (False).\n        :param client_secret: The `client_secret` paired to the `client_id`.\n                              This is generally required unless provided in the\n                              `auth` tuple. If the value is `None`, it will be\n                              omitted from the request, however if the value is\n                              an empty string, an empty string will be sent.\n        :param kwargs: Extra parameters to include in the token request.\n        :return: A token dict", "id": "f6413:c1:m12"}
{"signature": "def profil_annuel(df, func='<STR_LIT>'):", "body": "func = _get_funky(func)<EOL>res = df.groupby(lambda x: x.month).aggregate(func)<EOL>res.index = [cal.month_name[i] for i in range(<NUM_LIT:1>,<NUM_LIT>)]<EOL>return res<EOL>", "docstring": "Calcul du profil annuel\n\nParam\u00e8tres:\ndf: DataFrame de donn\u00e9es dont l'index est une s\u00e9rie temporelle\n    (cf module xair par exemple)\nfunc: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)\n    soit la fonction elle-m\u00eame (np.mean, np.max, ...)\nRetourne:\nUn DataFrame de moyennes par mois", "id": "f6424:m3"}
{"signature": "def profil_hebdo(df, func='<STR_LIT>'):", "body": "func = _get_funky(func)<EOL>res = df.groupby(lambda x: x.weekday).aggregate(func)<EOL>res.index = [cal.day_name[i] for i in range(<NUM_LIT:0>,<NUM_LIT:7>)]<EOL>return res<EOL>", "docstring": "Calcul du profil journalier\n\nParam\u00e8tres:\ndf: DataFrame de donn\u00e9es dont l'index est une s\u00e9rie temporelle\n    (cf module xair par exemple)\nfunc: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)\n    soit la fonction elle-m\u00eame (np.mean, np.max, ...)\nRetourne:\nUn DataFrame de moyennes par journ\u00e9e sur la semaine", "id": "f6424:m2"}
{"signature": "def nmse(a, b):", "body": "return np.square(a - b).mean() / (a.mean() * b.mean())<EOL>", "docstring": "Returns the normalized mean square error of a and b", "id": "f6425:m10"}
{"signature": "def mae(a, b):", "body": "return np.absolute(a - b).mean()<EOL>", "docstring": "Returns the mean absolute error of a and b", "id": "f6425:m8"}
{"signature": "def determination(a, b):", "body": "return np.square(correlation(a, b))<EOL>", "docstring": "Returns the coefficient of determination between a and b", "id": "f6425:m16"}
{"signature": "def mean(a, rep=<NUM_LIT>, **kwargs):", "body": "return rfunc(a, ma.mean, rep, **kwargs)<EOL>", "docstring": "Compute the average along a 1D array like ma.mean,\n    but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep,\n    then the result is a masked value", "id": "f6425:m2"}
{"signature": "def min(a, rep=<NUM_LIT>, **kwargs):", "body": "return rfunc(a, ma.min, rep, **kwargs)<EOL>", "docstring": "Compute the min along a 1D array like ma.mean,\n    but with a representativity coefficient : if ma.count(a)/ma.size(a)>=rep,\n    then the result is a masked value", "id": "f6425:m4"}
{"signature": "def fmt(a, b):", "body": "return <NUM_LIT:100> * np.min([a, b], axis=<NUM_LIT:0>).sum() / np.max([a, b], axis=<NUM_LIT:0>).sum()<EOL>", "docstring": "Figure of merit in time", "id": "f6425:m19"}
{"signature": "def stderr(a, b):", "body": "return np.std(a - b)<EOL>", "docstring": "Returns the standard deviation of the errors between a and b", "id": "f6425:m7"}
{"signature": "def rmse(a, b):", "body": "return np.sqrt(np.square(a - b).mean())<EOL>", "docstring": "Returns the root mean square error betwwen a and b", "id": "f6425:m9"}
{"signature": "def correlation(a, b):", "body": "diff1 = a - a.mean()<EOL>diff2 = b - b.mean()<EOL>return (diff1 * diff2).mean() / (np.sqrt(np.square(diff1).mean() * np.square(diff2).mean()))<EOL>", "docstring": "Computes the correlation between a and b, says the Pearson's correlation\n    coefficient R", "id": "f6425:m15"}
{"signature": "def pb(df):", "body": "polluant = '<STR_LIT>'<EOL>if not isinstance(df.index.freq, pdoffset.Hour):<EOL><INDENT>raise FreqException(\"<STR_LIT>\")<EOL><DEDENT>res = {\"<STR_LIT>\": depassement(df.resample('<STR_LIT:A>', how='<STR_LIT>'), valeur=<NUM_LIT>),<EOL>\"<STR_LIT>\": depassement(df.resample('<STR_LIT:A>', how='<STR_LIT>'),<EOL>valeur=<NUM_LIT:0.5>),<EOL>}<EOL>return polluant, res<EOL>", "docstring": "Calculs r\u00e9glementaires pour le plomb\n\nParam\u00e8tres:\ndf: DataFrame contenant les mesures, avec un index temporel\n(voir xair.get_mesure)\n\nRetourne:\nUne s\u00e9rie de r\u00e9sultats dans un DataFrame :\n******\nunit\u00e9 (u): \u00b5g/m3 (microgramme par m\u00e8tre cube)\n\nObjectif de qualit\u00e9 en moyenne A: 0.25u\nValeur limite pour la sant\u00e9 humaine en moyenne A: 0.5u\n\nLes r\u00e9sultats sont donn\u00e9s en terme d'heure de d\u00e9passement", "id": "f6427:m14"}
{"signature": "def arsenic(df):", "body": "polluant = '<STR_LIT>'<EOL>if not isinstance(df.index.freq, pdoffset.Hour):<EOL><INDENT>raise FreqException(\"<STR_LIT>\")<EOL><DEDENT>res = {\"<STR_LIT>\": depassement(df.resample('<STR_LIT:A>', how='<STR_LIT>'), valeur=<NUM_LIT:6>),<EOL>}<EOL>return polluant, res<EOL>", "docstring": "Calculs r\u00e9glementaires pour l'arsenic\n\nParam\u00e8tres:\ndf: DataFrame contenant les mesures, avec un index temporel\n(voir xair.get_mesure)\n\nRetourne:\nUne s\u00e9rie de r\u00e9sultats dans un DataFrame :\n******\nunit\u00e9 (u): ng/m3 (nanogramme par m\u00e8tre cube)\n\nValeur cible en moyenne A: 6u\n\nLes r\u00e9sultats sont donn\u00e9s en terme d'heure de d\u00e9passement", "id": "f6427:m15"}
{"signature": "def bap(df):", "body": "polluant = '<STR_LIT>'<EOL>if not isinstance(df.index.freq, pdoffset.Hour):<EOL><INDENT>raise FreqException(\"<STR_LIT>\")<EOL><DEDENT>res = {\"<STR_LIT>\": depassement(df.resample('<STR_LIT:A>', how='<STR_LIT>'), valeur=<NUM_LIT:1>),<EOL>}<EOL>return polluant, res<EOL>", "docstring": "Calculs r\u00e9glementaires pour le benzo(a)pyr\u00e8ne\n\nParam\u00e8tres:\ndf: DataFrame contenant les mesures, avec un index temporel\n(voir xair.get_mesure)\n\nRetourne:\nUne s\u00e9rie de r\u00e9sultats dans un DataFrame :\n******\nunit\u00e9 (u): ng/m3 (nanogramme par m\u00e8tre cube)\n\nValeur cible en moyenne A: 1u\n\nLes r\u00e9sultats sont donn\u00e9s en terme d'heure de d\u00e9passement", "id": "f6427:m18"}
{"signature": "def nickel(df):", "body": "polluant = '<STR_LIT>'<EOL>if not isinstance(df.index.freq, pdoffset.Hour):<EOL><INDENT>raise FreqException(\"<STR_LIT>\")<EOL><DEDENT>res = {\"<STR_LIT>\": depassement(df.resample('<STR_LIT:A>', how='<STR_LIT>'), valeur=<NUM_LIT:20>),<EOL>}<EOL>return polluant, res<EOL>", "docstring": "Calculs r\u00e9glementaires pour le nickel\n\nParam\u00e8tres:\ndf: DataFrame contenant les mesures, avec un index temporel\n(voir xair.get_mesure)\n\nRetourne:\nUne s\u00e9rie de r\u00e9sultats dans un DataFrame :\n******\nunit\u00e9 (u): ng/m3 (nanogramme par m\u00e8tre cube)\n\nValeur cible en moyenne A: 20u\n\nLes r\u00e9sultats sont donn\u00e9s en terme d'heure de d\u00e9passement", "id": "f6427:m17"}
{"signature": "def aot40_vegetation(df, nb_an):", "body": "return _aot(df.tshift(<NUM_LIT:1>), nb_an=nb_an, limite=<NUM_LIT>, mois_debut=<NUM_LIT:5>, mois_fin=<NUM_LIT:7>,<EOL>heure_debut=<NUM_LIT:8>, heure_fin=<NUM_LIT>)<EOL>", "docstring": "Calcul de l'AOT40 du 1er mai au 31 juillet\n\n*AOT40 : AOT 40 ( exprim\u00e9 en micro g/m\u00b3 par heure ) signifie la somme des\ndiff\u00e9rences entre les concentrations horaires sup\u00e9rieures \u00e0 40 parties par\nmilliard ( 40 ppb soit 80 micro g/m\u00b3 ), durant une p\u00e9riode donn\u00e9e en\nutilisant uniquement les valeurs sur 1 heure mesur\u00e9es quotidiennement\nentre 8 heures (d\u00e9but de la mesure) et 20 heures (pile, fin de la mesure) CET,\nce qui correspond \u00e0 de 8h \u00e0 19h TU (donnant bien 12h de mesures, 8h donnant\nla moyenne horaire de 7h01 \u00e0 8h00)\n\nParam\u00e8tres:\ndf: DataFrame de mesures sur lequel appliqu\u00e9 le calcul\nnb_an: (int) Nombre d'ann\u00e9es contenu dans le df, et servant \u00e0 diviser le\nr\u00e9sultat retourn\u00e9\n\nRetourne:\nUn DataFrame de r\u00e9sultat de calcul", "id": "f6427:m4"}
{"signature": "def co(df):", "body": "polluant = '<STR_LIT>'<EOL>if not isinstance(df.index.freq, pdoffset.Hour):<EOL><INDENT>raise FreqException(\"<STR_LIT>\")<EOL><DEDENT>res = {\"<STR_LIT>\": depassement(moyennes_glissantes(df, sur=<NUM_LIT:8>),<EOL>valeur=<NUM_LIT:10>),<EOL>}<EOL>return polluant, res<EOL>", "docstring": "Calculs r\u00e9glementaires pour le monoxyde de carbone\n\nParam\u00e8tres:\ndf: DataFrame contenant les mesures, avec un index temporel\n(voir xair.get_mesure)\n\nRetourne:\nUne s\u00e9rie de r\u00e9sultats dans un DataFrame :\n******\nunit\u00e9 (u): \u00b5g/m3 (microgramme par m\u00e8tre cube)\n\nValeur limite pour la sant\u00e9 humaine max J 8H glissantes: 10000u\n\nLes r\u00e9sultats sont donn\u00e9s en terme d'heure de d\u00e9passement", "id": "f6427:m11"}
{"signature": "def show_max(df):", "body": "df = df.astype(pd.np.float)<EOL>res = list()<EOL>for c in df.columns:<EOL><INDENT>serie = df[c]<EOL>res.append(serie.where(cond=serie == serie.max(), other=pd.np.nan).dropna())<EOL><DEDENT>return pd.DataFrame(res).T<EOL>", "docstring": "Pour chaque serie (colonne) d'un DataFrame, va rechercher la (les) valeur(s)\n    et la (les) date(s) du (des) max.\n\n    Param\u00e8tres:\n    df: DataFrame de valeurs \u00e0 calculer\n\n    Retourne:\n    Un DataFrame montrant pour chaque serie (colonne), les valeurs maxs aux dates\n    d'apparition.", "id": "f6427:m22"}
{"signature": "def c6h6(df):", "body": "polluant = '<STR_LIT>'<EOL>if not isinstance(df.index.freq, pdoffset.Hour):<EOL><INDENT>raise FreqException(\"<STR_LIT>\")<EOL><DEDENT>res = {\"<STR_LIT>\": depassement(df.resample('<STR_LIT:A>', how='<STR_LIT>'), valeur=<NUM_LIT:2>),<EOL>\"<STR_LIT>\": depassement(df.resample('<STR_LIT:A>', how='<STR_LIT>'), valeur=<NUM_LIT:5>),<EOL>}<EOL>return polluant, res<EOL>", "docstring": "Calculs r\u00e9glementaires pour le benz\u00e8ne\n\nParam\u00e8tres:\ndf: DataFrame contenant les mesures, avec un index temporel\n(voir xair.get_mesure)\n\nRetourne:\nUne s\u00e9rie de r\u00e9sultats dans un DataFrame :\n******\nunit\u00e9 (u): \u00b5g/m3 (microgramme par m\u00e8tre cube)\n\nObjectif de qualit\u00e9 en moyenne A: 2u\nValeur limite pour la sant\u00e9 humaine en moyenne A: 5u\n\nLes r\u00e9sultats sont donn\u00e9s en terme d'heure de d\u00e9passement", "id": "f6427:m13"}
{"signature": "def print_synthese(fct, df):", "body": "res_count = dict()<EOL>polluant, res = fct(df)<EOL>print(\"<STR_LIT>\" % polluant)<EOL>print(\"<STR_LIT>\")<EOL>for k, v in res.items():<EOL><INDENT>comp = compresse(v)<EOL>if not comp.empty:<EOL><INDENT>comp.index.name = k<EOL>print(comp.to_string(na_rep='<STR_LIT>', float_format=lambda x: \"<STR_LIT>\" % x))<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\" % k)<EOL><DEDENT>res_count[k] = v.count()<EOL><DEDENT>res_count = pd.DataFrame(res_count).T<EOL>print(\"<STR_LIT>\")<EOL>print(res_count)<EOL>", "docstring": "Pr\u00e9sente une synth\u00e8se des calculs r\u00e9glementaires en fournissant les valeurs\ncalcul\u00e9es suivant les r\u00e9glementations d\u00e9finies dans chaque fonction de calcul\net un tableau de nombre de d\u00e9passement.\n\nParam\u00e8tres:\nfct: fonction renvoyant les \u00e9l\u00e9ments calcul\u00e9es\ndf: DataFrame de valeurs d'entr\u00e9e \u00e0 fournir \u00e0 la fonction\n\nRetourne:\nImprime sur l'\u00e9cran les valeurs synth\u00e9tis\u00e9es", "id": "f6427:m19"}
{"signature": "def nombre_depassement(df, valeur, freq=None):", "body": "dep = depassement(df, valeur)<EOL>if freq is not None:<EOL><INDENT>dep = dep.resample(freq, how='<STR_LIT>')<EOL><DEDENT>return dep.count()<EOL>", "docstring": "Calcule le nombre de d\u00e9passement d'une valeur sur l'int\u00e9gralit\u00e9 du temps,\nou suivant un regroupement temporel.\n\nParam\u00e8tres:\ndf: DataFrame de mesures sur lequel appliqu\u00e9 le calcul\nvaleur: (float) valeur \u00e0 chercher le d\u00e9passement (strictement sup\u00e9rieur \u00e0)\nfreq: (str ou None): Fr\u00e9quence de temps sur lequel effectu\u00e9 un regroupement.\nfreq peut prendre les valeurs 'H' pour heure, 'D' pour jour, 'W' pour semaine,\n'M' pour mois et 'A' pour ann\u00e9e, ou None pour ne pas faire de regroupement.\nLe nombre de d\u00e9passement sera alors regroup\u00e9 suivant cette fr\u00e9quence de temps.\n\nRetourne:\nUne Series du nombre de d\u00e9passement, total suivant la fr\u00e9quence intrins\u00e8que\ndu DataFrame d'entr\u00e9e, ou agglom\u00e9r\u00e9 suivant la fr\u00e9quence de temps choisie.", "id": "f6427:m3"}
{"signature": "def html_synthese(fct, df):", "body": "html = str()<EOL>res_count = dict()<EOL>buf = StringIO()<EOL>polluant, res = fct(df)<EOL>html += '<STR_LIT>'.format(polluant)<EOL>for k, v in res.items():<EOL><INDENT>buf.write(\"<STR_LIT>\")<EOL>comp = compresse(v)<EOL>if not comp.empty:<EOL><INDENT>comp.index.name = k<EOL>comp.to_html(buf=buf,<EOL>sparsify=True,<EOL>na_rep=\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>buf.write(<EOL>'<STR_LIT>'.format(<EOL>k))<EOL><DEDENT>buf.write(\"<STR_LIT>\")<EOL>res_count[k] = v.count()<EOL><DEDENT>res_count = pd.DataFrame(res_count).T<EOL>res_count.index.name = \"<STR_LIT>\"<EOL>html += \"<STR_LIT>\"<EOL>html += res_count.to_html(sparsify=True)<EOL>html += \"<STR_LIT>\"<EOL>html += buf.getvalue()<EOL>return html<EOL>", "docstring": "Retourne au format html une synth\u00e8se des calculs r\u00e9glementaires en\nfournissant les valeurs calcul\u00e9es suivant les r\u00e9glementations d\u00e9finies dans\nchaque fonction de calcul et un tableau de nombre de d\u00e9passement.\n\nParam\u00e8tres:\nfct: fonction renvoyant les \u00e9l\u00e9ments calcul\u00e9es\ndf: DataFrame de valeurs d'entr\u00e9e \u00e0 fournir \u00e0 la fonction\n\nRetourne:\nUne chaine de caract\u00e8re pr\u00eate \u00e0 \u00eatre utilis\u00e9 dans une page html", "id": "f6427:m21"}
{"signature": "def depassement(df, valeur):", "body": "dep = df.where(df > valeur)<EOL>return dep<EOL>", "docstring": "Calcule les d\u00e9passements d'une valeur.\n\nParam\u00e8tres:\ndf: DataFrame de mesures sur lequel appliqu\u00e9 le calcul\nvaleur: (float) valeur \u00e0 chercher le d\u00e9passement (strictement sup\u00e9rieur \u00e0)\n\nRetourne:\nUn DataFrame de valeurs, de m\u00eame taille (shape) que le df d'entr\u00e9e, dont toutes\nles valeurs sont supprim\u00e9es, sauf celles sup\u00e9rieures \u00e0 la valeur de r\u00e9f\u00e9rence", "id": "f6427:m2"}
{"signature": "def o3(df):", "body": "polluant = '<STR_LIT>'<EOL>if not isinstance(df.index.freq, pdoffset.Hour):<EOL><INDENT>raise FreqException(\"<STR_LIT>\")<EOL><DEDENT>res = {\"<STR_LIT>\": depassement(df, valeur=<NUM_LIT>),<EOL>\"<STR_LIT>\": depassement(df, valeur=<NUM_LIT>),<EOL>\"<STR_LIT>\": depassement(df, valeur=<NUM_LIT>),<EOL>\"<STR_LIT>\": consecutive(df, valeur=<NUM_LIT>, sur=<NUM_LIT:3>),<EOL>\"<STR_LIT>\": consecutive(df, valeur=<NUM_LIT>, sur=<NUM_LIT:3>),<EOL>\"<STR_LIT>\": depassement(<EOL>moyennes_glissantes(df, sur=<NUM_LIT:8>), valeur=<NUM_LIT>),<EOL>}<EOL>return polluant, res<EOL>", "docstring": "Calculs r\u00e9glementaires pour l'ozone\n\nParam\u00e8tres:\ndf: DataFrame contenant les mesures, avec un index temporel\n(voir xair.get_mesure)\n\nRetourne:\nUne s\u00e9rie de r\u00e9sultats dans un DataFrame :\n******\nunit\u00e9 (u): \u00b5g/m3 (microgramme par m\u00e8tre cube)\n\nSeuil de RI sur 1H: 180u\nSeuil d'Alerte sur 1H: 240u\nSeuil d'Alerte sur 3H cons\u00e9cutives: 240u\nSeuil d'Alerte sur 3H cons\u00e9cutives: 300u\nSeuil d'Alerte sur 1H: 360u\nObjectif de qualit\u00e9 pour la sant\u00e9 humaine sur 8H glissantes: 120u\n\nLes r\u00e9sultats sont donn\u00e9s en terme d'heure de d\u00e9passement", "id": "f6427:m12"}
{"signature": "def _aot(df, nb_an=<NUM_LIT:1>, limite=<NUM_LIT>, mois_debut=<NUM_LIT:5>, mois_fin=<NUM_LIT:7>,<EOL>heure_debut=<NUM_LIT:7>, heure_fin=<NUM_LIT>):", "body": "res = df[(df.index.month >= mois_debut) & (df.index.month <= mois_fin) &<EOL>(df.index.hour >= heure_debut) & (df.index.hour <= heure_fin)]<EOL>nb_valid = res.count()<EOL>nb_total = res.shape[<NUM_LIT:0>]<EOL>pcent = nb_valid.astype(pd.np.float) / nb_total * <NUM_LIT:100><EOL>brut = (res[res > limite] - limite) / nb_an<EOL>brut = brut.sum()<EOL>net = brut / nb_valid * nb_total<EOL>print(\"\"\"<STR_LIT>\"\"\".format(total=nb_total,<EOL>m_d=mois_debut, m_f=mois_fin,<EOL>h_d=heure_debut, h_f=heure_fin<EOL>)<EOL>)<EOL>aot = pd.DataFrame([brut.round(), nb_valid.round(), pcent.round(), net.round()],<EOL>index=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>return aot<EOL>", "docstring": "Calcul de l'AOT de mani\u00e8re param\u00e8trable. Voir AOT40_vegetation ou\nAOT40_foret pour des param\u00e8tres pr\u00e9alablement fix\u00e9s.\n\nParam\u00e8tres:\ndf: DataFrame de mesures sur lequel appliqu\u00e9 le calcul\nnb_an: (int) Nombre d'ann\u00e9es contenu dans le df, et servant \u00e0 diviser le\nr\u00e9sultat retourn\u00e9\nlimite: (float) valeur limite au del\u00e0 de laquelle les diff\u00e9rences seront\n    additionn\u00e9es pour calculer l'AOT\nmois_debut: (int) mois de d\u00e9but de calcul\nmois_fin: (int) mois de fin de calcul\nheure_debut: (int) premi\u00e8re heure de chaque jour apr\u00e8s laquelle les valeurs\n    sont retenues\nheure_fin: (int) derni\u00e8re heure de chaque jour avant laquelle les valeurs\n    sont retenues\n\nRetourne:\nUn DataFrame de r\u00e9sultat de calcul", "id": "f6427:m6"}
{"signature": "def liste_sites_prelevement(self):", "body": "_sql = \"\"\"<STR_LIT>\"\"\"<EOL>return psql.read_sql(_sql, self.conn)<EOL>", "docstring": "Liste les sites de pr\u00e9l\u00e8vements manuels", "id": "f6428:c0:m12"}
{"signature": "def disconnect(self):", "body": "self._close()<EOL>", "docstring": "Fermeture de la connexion \u00e0 la base", "id": "f6428:c0:m3"}
{"signature": "def is_invalid(e):", "body": "return e in INVALID_CODES<EOL>", "docstring": "Renvoie Vrai ou Faux suivant que e est dans la liste des codes invalides\n\n    Param\u00e8tres:\n    e: une lettre en majuscules", "id": "f6428:m0"}
{"signature": "def liste_reseaux_indices(self):", "body": "_sql = \"\"\"<STR_LIT>\"\"\"<EOL>return psql.read_sql(_sql, self.conn)<EOL>", "docstring": "Liste des r\u00e9seaux d'indices ATMO", "id": "f6428:c0:m11"}
{"signature": "def date_range(debut, fin, freq):", "body": "debut_dt = debut.replace(hour=<NUM_LIT:0>, minute=<NUM_LIT:0>, second=<NUM_LIT:0>, microsecond=<NUM_LIT:0>)<EOL>fin_dt = fin.replace(hour=<NUM_LIT>, minute=<NUM_LIT>, second=<NUM_LIT:0>, microsecond=<NUM_LIT:0>)<EOL>if freq in ('<STR_LIT:M>', '<STR_LIT:A>'):  <EOL><INDENT>freq += '<STR_LIT:S>'<EOL>debut_dt = debut_dt.replace(day=<NUM_LIT:1>, minute=<NUM_LIT:0>, second=<NUM_LIT:0>, microsecond=<NUM_LIT:0>)<EOL>fin_dt = fin_dt.replace(day=<NUM_LIT:1>, minute=<NUM_LIT:0>, second=<NUM_LIT:0>, microsecond=<NUM_LIT:0>)<EOL><DEDENT>dates_completes = pd.date_range(start=debut_dt, end=fin_dt, freq=freq)<EOL>return dates_completes<EOL>", "docstring": "G\u00e9n\u00e8re une liste de date en tenant compte des heures de d\u00e9but et fin d'une journ\u00e9e.\nLa date de d\u00e9but sera toujours cal\u00e9e \u00e0 0h, et celle de fin \u00e0 23h\n\nParam\u00e8tres:\ndebut: datetime repr\u00e9sentant la date de d\u00e9but\nfin: datetime repr\u00e9sentant la date de fin\nfreq: freq de temps. Valeurs possibles : T (minute), H (heure), D (jour),\nM (mois), Y (ann\u00e9e). Peux prendre des cycles, comme 15T pour 15 minutes", "id": "f6428:m4"}
{"signature": "def get_indices_et_ssi(self, reseau, debut, fin, complet=True):", "body": "if complet:<EOL><INDENT>i_str = \"<STR_LIT>\"<EOL>ssi_str = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>i_str = \"<STR_LIT>\"<EOL>ssi_str = \"<STR_LIT>\"<EOL><DEDENT>_sql = \"\"\"<STR_LIT>\"\"\".format(i_str, ssi_str, reseau, debut, fin)<EOL>df = psql.read_sql(_sql, self.conn)<EOL>df = df.set_index(['<STR_LIT>', '<STR_LIT:date>'])<EOL>return df<EOL>", "docstring": "Renvoie l'indice et les sous_indices\n        complet: renvoyer les complets ou les pr\u00e9vus\n        reseau: nom du r\u00e9seau \u00e0 renvoyer\n        debut: date de d\u00e9but \u00e0 renvoyer\n        fin: date de fin \u00e0 renvoyer\n\n        Renvoi : reseau, date, Indice, sous_ind NO2,PM10,O3,SO2", "id": "f6428:c0:m16"}
{"signature": "def etats_to_invalid(etats):", "body": "return etats.applymap(is_invalid)<EOL>", "docstring": "Transforme un dataframe de codes d'\u00e9tat en une grille d'invalidation.\n\nParam\u00e8tres:\netats: dataframe de codes d'\u00e9tat, tel que retourn\u00e9 par get_mesure avec le\nparam\u00e8tre brut=True\n\nRetourne:\nUn dataframe de bool\u00e9en. Chaque valeur est soit \u00e0 False, et la mesure\ncorrespondante (\u00e0 la m\u00eame position dans le dataframe de mesure) n'est pas\ninvalide, soit \u00e0 True et la mesure est invalide.", "id": "f6428:m1"}
{"signature": "def liste_mesures(self, reseau=None, station=None, parametre=None, mesure=None):", "body": "tbreseau = \"<STR_LIT>\"<EOL>conditions = []<EOL>if reseau:<EOL><INDENT>reseau = _format(reseau)<EOL>tbreseau = \"\"\"<STR_LIT>\"\"\"<EOL>conditions.append(\"\"\"<STR_LIT>\"\"\" % reseau)<EOL><DEDENT>if parametre:<EOL><INDENT>parametre = _format(parametre)<EOL>conditions.append(\"\"\"<STR_LIT>\"\"\" % parametre)<EOL><DEDENT>if station:<EOL><INDENT>station = _format(station)<EOL>conditions.append(\"\"\"<STR_LIT>\"\"\" % station)<EOL><DEDENT>if mesure:<EOL><INDENT>mesure = _format(mesure)<EOL>conditions.append(\"\"\"<STR_LIT>\"\"\" % mesure)<EOL><DEDENT>condition = \"<STR_LIT>\" % \"<STR_LIT>\".join(conditions) if conditions else \"<STR_LIT>\"<EOL>_sql = \"\"\"<STR_LIT>\"\"\" % (tbreseau, condition)<EOL>return psql.read_sql(_sql, self.conn)<EOL>", "docstring": "D\u00e9crit les mesures:\n- d'un ou des reseaux,\n- d'une ou des stations,\n- d'un ou des parametres\nou d\u00e9crit une (des) mesures suivant son (leur) identifiant(s)\nChaque attribut peut \u00eatre \u00e9tendu en rajoutant des noms s\u00e9par\u00e9s par des\nvirgules ou en les mettant dans une liste/tuple/pandas.Series.\nAinsi pour avoir la liste des mesures en vitesse et direction de vent:\nparametre=\"VV,DV\" ou = [\"VV\", \"DV\"]\nLes arguments sont combin\u00e9s ensemble pour la s\u00e9lection des mesures.\n\nParam\u00e8tres:\nreseau : nom du reseau dans lequel lister les mesures\nstation: nom de la station o\u00f9 lister les mesures\nparametre: Code chimique du parametre \u00e0 lister\nmesure: nom de la mesure \u00e0 d\u00e9crire", "id": "f6428:c0:m6"}
{"signature": "def get_indices(self, res, debut, fin):", "body": "res = _format(res)<EOL>_sql = \"\"\"<STR_LIT>\"\"\" % (res, debut, fin)<EOL>rep = psql.read_sql(_sql, self.conn)<EOL>df = rep.set_index(['<STR_LIT>', '<STR_LIT:date>'])<EOL>df = df['<STR_LIT>']<EOL>df = df.unstack('<STR_LIT>')<EOL>dates_completes = date_range(to_date(debut), to_date(fin), freq='<STR_LIT:D>')<EOL>df = df.reindex(dates_completes)<EOL>return df<EOL>", "docstring": "R\u00e9cup\u00e9ration des indices ATMO pour un r\u00e9seau donn\u00e9.\n\nParam\u00e8tres:\nres : Nom du ou des r\u00e9seaux \u00e0 chercher (str, list, pandas.Series)\ndebut: date de d\u00e9but, format YYYY-MM-JJ (str)\nfin: Date de fin, format YYYY-MM-JJ (str)", "id": "f6428:c0:m15"}
{"signature": "def get_sqltext(self, format_=<NUM_LIT:1>):", "body": "if format_ == <NUM_LIT:1>:<EOL><INDENT>_sql = \"\"\"<STR_LIT>\"\"\"<EOL><DEDENT>if format_ == <NUM_LIT:2>:<EOL><INDENT>_sql = \"\"\"<STR_LIT>\"\"\"<EOL><DEDENT>return psql.read_sql(_sql, self.conn)<EOL>", "docstring": "retourne les requ\u00eates actuellement lanc\u00e9es sur le serveur", "id": "f6428:c0:m17"}
{"signature": "def _connect(self):", "body": "try:<EOL><INDENT>self.conn = cx_Oracle.connect(self._ORA_FULL)<EOL>self.cursor = self.conn.cursor()<EOL>print('<STR_LIT>')<EOL><DEDENT>except cx_Oracle.Error as e:<EOL><INDENT>print(\"<STR_LIT>\" % (e))<EOL>raise cx_Oracle.Error('<STR_LIT>')<EOL><DEDENT>", "docstring": "Connexion \u00e0 la base XAIR", "id": "f6428:c0:m1"}
{"signature": "def _format(noms):", "body": "if isinstance(noms, (list, tuple, pd.Series)):<EOL><INDENT>noms = '<STR_LIT:U+002C>'.join(noms)<EOL><DEDENT>noms = noms.replace(\"<STR_LIT:U+002C>\", \"<STR_LIT>\")<EOL>return noms<EOL>", "docstring": "Formate une donn\u00e9e d'entr\u00e9e pour \u00eatre exploitable dans les fonctions liste_*\net get_*.\n\nParam\u00e8tres:\nnoms: cha\u00eene de caract\u00e8re, liste ou tuples de cha\u00eenes de caract\u00e8res ou\npandas.Series de cha\u00eenes de caract\u00e8res.\n\nRetourne:\nUne cha\u00eenes de caract\u00e8res dont chaque \u00e9l\u00e9ment est s\u00e9par\u00e9 du suivant par les\ncaract\u00e8res ',' (simples quotes comprises)", "id": "f6428:m3"}
{"signature": "def get_mesures(self, mes, debut=None, fin=None, freq='<STR_LIT:H>', format=None,<EOL>dayfirst=False, brut=False):", "body": "def create_index(index, freq):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>decalage = <NUM_LIT:1>  <EOL>if freq == '<STR_LIT:T>' or freq == '<STR_LIT>':<EOL><INDENT>f = pd.tseries.offsets.Minute<EOL>decalage = <NUM_LIT:15><EOL><DEDENT>if freq == '<STR_LIT:H>':<EOL><INDENT>f = pd.tseries.offsets.Hour<EOL><DEDENT>if freq == '<STR_LIT:D>':<EOL><INDENT>f = pd.tseries.offsets.Day<EOL><DEDENT>if freq == '<STR_LIT:M>':<EOL><INDENT>f = pd.tseries.offsets.MonthBegin<EOL><DEDENT>if freq == '<STR_LIT:A>':<EOL><INDENT>f = pd.tseries.offsets.YearBegin<EOL><DEDENT>else:<EOL><INDENT>f = pd.tseries.offsets.Hour<EOL><DEDENT>new_index = [date + f(int(delta) - decalage) for date, delta in index]<EOL>return new_index<EOL><DEDENT>mes = _format(mes)<EOL>debut = to_date(debut, dayfirst, format)<EOL>if not fin:<EOL><INDENT>fin = debut<EOL><DEDENT>else:<EOL><INDENT>fin = to_date(fin, dayfirst, format)<EOL><DEDENT>if freq in ('<STR_LIT>', '<STR_LIT:T>'):<EOL><INDENT>freq = '<STR_LIT>'<EOL><DEDENT>if freq == '<STR_LIT>':<EOL><INDENT>diviseur = <NUM_LIT><EOL>champ_val = '<STR_LIT:U+002C>'.join(['<STR_LIT>' % (x, x * <NUM_LIT:15>) for x in range(<NUM_LIT:1>, diviseur + <NUM_LIT:1>)])<EOL>champ_code = '<STR_LIT>'<EOL>table = '<STR_LIT>'<EOL><DEDENT>elif freq == '<STR_LIT:H>':<EOL><INDENT>diviseur = <NUM_LIT><EOL>champ_val = '<STR_LIT:U+002C>'.join(['<STR_LIT>' % (x, x) for x in range(<NUM_LIT:1>, diviseur + <NUM_LIT:1>)])<EOL>champ_code = '<STR_LIT>'<EOL>table = '<STR_LIT>'<EOL><DEDENT>elif freq == '<STR_LIT:D>':<EOL><INDENT>diviseur = <NUM_LIT:1><EOL>champ_val = '<STR_LIT>'<EOL>champ_code = '<STR_LIT>'<EOL>table = '<STR_LIT>'<EOL><DEDENT>elif freq == '<STR_LIT:M>':<EOL><INDENT>diviseur = <NUM_LIT:12><EOL>champ_val = '<STR_LIT:U+002C>'.join(['<STR_LIT>' % (x, x) for x in range(<NUM_LIT:1>, diviseur + <NUM_LIT:1>)])<EOL>champ_code = '<STR_LIT>'<EOL>table = '<STR_LIT>'<EOL><DEDENT>elif freq == '<STR_LIT:A>':<EOL><INDENT>diviseur = <NUM_LIT:1><EOL>champ_val = '<STR_LIT>'<EOL>champ_code = '<STR_LIT>'<EOL>table = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if table == '<STR_LIT>':<EOL><INDENT>champ_date = '<STR_LIT>'<EOL>debut_db = debut<EOL>fin_db = fin<EOL><DEDENT>else:<EOL><INDENT>champ_date = '<STR_LIT>'<EOL>debut_db = debut.replace(month=<NUM_LIT:1>, day=<NUM_LIT:1>, hour=<NUM_LIT:0>, minute=<NUM_LIT:0>)<EOL>fin_db = fin.replace(month=<NUM_LIT:12>, day=<NUM_LIT>, hour=<NUM_LIT>, minute=<NUM_LIT:0>)<EOL><DEDENT>debut_db = debut_db.strftime(\"<STR_LIT>\")<EOL>fin_db = fin_db.strftime(\"<STR_LIT>\")<EOL>_sql = \"\"\"<STR_LIT>\"\"\".format(champ_date=champ_date,<EOL>table=table,<EOL>champ_code=champ_code,<EOL>mes=mes,<EOL>champ_val=champ_val,<EOL>debut=debut_db,<EOL>fin=fin_db)<EOL>rep = psql.read_sql(_sql, self.conn)<EOL>df = rep.set_index(['<STR_LIT:id>', '<STR_LIT:date>'])<EOL>etats = df['<STR_LIT>']<EOL>df = df.drop('<STR_LIT>', axis=<NUM_LIT:1>)<EOL>df_stack = df.stack(dropna=False)<EOL>df = df_stack.unstack('<STR_LIT:id>')<EOL>index = create_index(df.index, freq)<EOL>df.reset_index(inplace=True, drop=True)<EOL>df['<STR_LIT:date>'] = index<EOL>df = df.set_index(['<STR_LIT:date>'])<EOL>etats = etats.unstack('<STR_LIT:id>')<EOL>etats.fillna(value=MISSING_CODE * diviseur, inplace=True)<EOL>etats = etats.sum(axis=<NUM_LIT:0>)<EOL>etats = pd.DataFrame(list(zip(*etats.apply(list))))<EOL>etats.index = df.index<EOL>etats.columns = df.columns<EOL>dates_completes = date_range(debut, fin, freq)<EOL>df = df.reindex(dates_completes)<EOL>etats = etats.reindex(dates_completes)<EOL>invalid = etats_to_invalid(etats)<EOL>if not brut:<EOL><INDENT>dfn = df.mask(invalid)  <EOL>return dfn<EOL><DEDENT>else:<EOL><INDENT>return df, etats<EOL><DEDENT>", "docstring": "R\u00e9cup\u00e9ration des donn\u00e9es de mesure.\n\nParam\u00e8tres:\nmes: Un nom de mesure ou plusieurs s\u00e9par\u00e9es par des virgules, une liste\n    (list, tuple, pandas.Series) de noms\ndebut: Chaine de caract\u00e8re ou objet datetime d\u00e9crivant la date de d\u00e9but.\n    D\u00e9faut=date du jour\nfin: Chaine de caract\u00e8re ou objet datetime d\u00e9crivant la date de fin.\n    D\u00e9faut=date de d\u00e9but\nfreq: fr\u00e9quence de temps. '15T' | 'H' | 'D' | 'M' | 'A' (15T pour quart-horaire)\nformat: chaine de caract\u00e8re d\u00e9crivant le format des dates (ex:\"%Y-%m-%d\"\n    pour debut/fin=\"2013-01-28\"). Appeler pyair.date.strtime_help() pour\n    obtenir la liste des codes possibles.\n    Defaut=\"%Y-%m-%d\"\ndayfirst: Si aucun format n'est fourni et que les dates sont des chaines\n    de caract\u00e8res, aide le d\u00e9crypteur \u00e0 transformer la date en objet datetime\n    en sp\u00e9cifiant que les dates commencent par le jour (ex:11/09/2012\n    pourrait \u00eatre interpret\u00e9 comme le 09 novembre si dayfirst=False)\nbrut: si oui ou non renvoyer le dataframe brut, non invalid\u00e9, et les\n    codes d'\u00e9tat des mesures\n    Defaut=False\n\nRetourne:\nUn dataframe contenant toutes les mesures demand\u00e9es.\nSi brut=True, renvoie le  dataframe des mesures brutes non invalid\u00e9es et\nle dataframe des codes d'\u00e9tats.\nLe dataframe valide (net) peut \u00eatre alors recalcul\u00e9 en faisant:\nbrut, etats = xr.get_mesure(..., brut=True)\ninvalides = etats_to_invalid(etats)\nnet = brut.mask(invalides)", "id": "f6428:c0:m13"}
{"signature": "def get_manuelles(self, site, code_parametre, debut, fin, court=False):", "body": "condition = \"<STR_LIT>\" % code_parametre<EOL>condition += \"<STR_LIT>\" % site<EOL>condition += \"<STR_LIT>\" % debut<EOL>condition += \"<STR_LIT>\" % fin<EOL>if court == False:<EOL><INDENT>select = \"\"\"<STR_LIT>\"\"\"<EOL><DEDENT>else:<EOL><INDENT>select = \"\"\"<STR_LIT>\"\"\"<EOL><DEDENT>_sql = \"\"\"<STR_LIT>\"\"\" % (select, condition)<EOL>return psql.read_sql(_sql, self.conn)<EOL>", "docstring": "Recup\u00e9ration des mesures manuelles (labo) pour un site\n\nsite: num\u00e9ro du site (voir fonction liste_sites_prelevement)\ncode_parametre: code ISO du param\u00e8tre \u00e0 rechercher (C6H6=V4)\ndebut: date de d\u00e9but du premier pr\u00e9l\u00e8vement\nfin: date de fin du dernier pr\u00e9l\u00e8vement\ncourt: Renvoie un tableau au format court ou long (colonnes)", "id": "f6428:c0:m14"}
{"signature": "def validate(method):", "body": "<EOL>name_error = '<STR_LIT>'<EOL>@functools.wraps(method)<EOL>def validator(self, name, *args):<EOL><INDENT>if name not in self.allowed_opts:<EOL><INDENT>raise ValueError(name_error.format(name))<EOL><DEDENT>return method(self, name, *args)<EOL><DEDENT>return validator<EOL>", "docstring": "Config option name value validator decorator.", "id": "f6461:m0"}
{"signature": "def isoperator(x):", "body": "return all(<EOL>hasattr(x, name) for name in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>)<EOL>", "docstring": "Returns `True` if the given object implements the required attributes for\nan operator.\n\nReturns:\n    bool", "id": "f6464:m0"}
{"signature": "def register_operators(*operators):", "body": "def validate(operator):<EOL><INDENT>if isoperator(operator):<EOL><INDENT>return True<EOL><DEDENT>raise NotImplementedError('<STR_LIT>'.format(operator))<EOL><DEDENT>def register(operator):<EOL><INDENT>for name in operator.operators:<EOL><INDENT>if name in Engine.operators:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>name,<EOL>operator.__name__<EOL>))<EOL><DEDENT>Engine.operators[name] = operator<EOL><DEDENT><DEDENT>[register(operator) for operator in operators if validate(operator)]<EOL>", "docstring": "Registers one or multiple operators in the test engine.", "id": "f6464:m1"}
{"signature": "def __getattr__(self, name):", "body": "return self.flags.get(name)<EOL>", "docstring": "Overloads class attribute accessor method in order to proxy access to\nthe store.", "id": "f6467:c0:m2"}
{"signature": "def __getattr__(self, name):", "body": "<EOL>if self._match_alias(name):<EOL><INDENT>self._test._engine.add_keyword(name)<EOL>return self<EOL><DEDENT>if self.is_on_access():<EOL><INDENT>self._on_access()<EOL><DEDENT>suboperator = self._match_suboperator(name)<EOL>if suboperator:<EOL><INDENT>self._test._engine.add_keyword(name)<EOL>if suboperator.is_on_access():<EOL><INDENT>suboperator._on_access()<EOL><DEDENT>return suboperator<EOL><DEDENT>return getattr(self._test, name)<EOL>", "docstring": "Overloads attribute accessor in order to load the assertion\noperator dynamically.", "id": "f6480:c0:m6"}
{"signature": "def __ror__(self, value):", "body": "return self._fn(value)._trigger()<EOL>", "docstring": "Overloads ``|`` operator.", "id": "f6480:c0:m7"}
{"signature": "def __gt__(self, value):", "body": "return self.__ror_(value)<EOL>", "docstring": "Overloads ``>`` operator.", "id": "f6480:c0:m8"}
{"signature": "@property<EOL><INDENT>def should(self):<DEDENT>", "body": "return self<EOL>", "docstring": "Alias name to self reference the current instance.\nRequired for DSL API.", "id": "f6482:c0:m1"}
{"signature": "def __getattr__(self, name):", "body": "<EOL>if self._global:<EOL><INDENT>subject = self._context_subject if self._context else empty<EOL>return Test(subject).__getattr__(name)<EOL><DEDENT>return OperatorResolver(self).resolve(name)<EOL>", "docstring": "Overloads class attribute accessor proxying calls dynamically\ninto assertion operators calls.\n\nThis method is invoked by Python runtime engine, not by developers.", "id": "f6482:c0:m5"}
{"signature": "def __ror__(self, value):", "body": "return self.__overload__(value)<EOL>", "docstring": "Overloads ``|`` operator.", "id": "f6482:c0:m13"}
{"signature": "def _clone(self):", "body": "test = Test(self._ctx.subject)<EOL>test._ctx = self._ctx.clone()<EOL>test._engine = self._engine.clone()<EOL>return test<EOL>", "docstring": "Clones the current `Test` instance.\n\nReturns:\n    grappa.Test", "id": "f6482:c0:m7"}
{"signature": "def __overload__(self, subject):", "body": "if isinstance(subject, Test):<EOL><INDENT>fork = subject._clone()<EOL>fork._ctx.chained = True<EOL>fork._ctx.subject = self._ctx.subject<EOL>return fork._trigger()<EOL><DEDENT>return self.__call__(subject, overload=True)<EOL>", "docstring": "Method triggered by magic methods executed via operator overloading.", "id": "f6482:c0:m11"}
{"signature": "def _trigger(self):", "body": "log.debug('<STR_LIT>'.format(self._ctx))<EOL>try:<EOL><INDENT>err = self._engine.run(self._ctx)<EOL><DEDENT>except Exception as _err:<EOL><INDENT>err = _err<EOL><DEDENT>finally:<EOL><INDENT>self._engine.reset()<EOL>self._root._engine.reset()<EOL><DEDENT>if err:<EOL><INDENT>raise err<EOL><DEDENT>return self<EOL>", "docstring": "Trigger assertions in the current test engine.\n\nRaises:\n    AssertionError: in case of assertion error.\n    Exception: in case of any other assertion error.", "id": "f6482:c0:m6"}
{"signature": "def __getattr__(self, name):", "body": "_test = getattr(test, name)<EOL>if inspect.ismethod(_test):<EOL><INDENT>return _test<EOL><DEDENT>_test._ctx.style = self._style<EOL>return _test<EOL>", "docstring": "Overloads object attribute accessory proxy to the currently used\noperator and parent test instance.", "id": "f6486:c0:m1"}
{"signature": "def run(self, ctx):", "body": "<EOL>if ctx.reverse:<EOL><INDENT>self.engine.reverse()<EOL><DEDENT>if self.engine.empty:<EOL><INDENT>raise AssertionError('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>return self.run_assertions(ctx)<EOL><DEDENT>except Exception as _err:<EOL><INDENT>if getattr(_err, '<STR_LIT>', False):<EOL><INDENT>raise _err<EOL><DEDENT>return self.render_error(ctx, _err)<EOL><DEDENT>", "docstring": "Runs the current phase.", "id": "f6488:c0:m3"}
{"signature": "def attribute(*args, **kw):", "body": "return operator(kind=Operator.Type.ATTRIBUTE, *args, **kw)<EOL>", "docstring": "Registers a new attribute only operator function in the test engine.\n\nArguments:\n    *args: variadic arguments.\n    **kw: variadic keyword arguments.\n\nReturns:\n    function", "id": "f6490:m1"}
{"signature": "def operator(name=None, operators=None, aliases=None, kind=None):", "body": "def delegator(assertion, subject, expected, *args, **kw):<EOL><INDENT>return assertion.test(subject, expected, *args, **kw)<EOL><DEDENT>def decorator(fn):<EOL><INDENT>operator = Operator(fn=fn, aliases=aliases, kind=kind)<EOL>_name = name if isinstance(name, six.string_types) else fn.__name__<EOL>operator.operators = (_name,)<EOL>_operators = operators<EOL>if isinstance(_operators, list):<EOL><INDENT>_operators = tuple(_operators)<EOL><DEDENT>if isinstance(_operators, tuple):<EOL><INDENT>operator.operators += _operators<EOL><DEDENT>Engine.register(operator)<EOL>return functools.partial(delegator, operator)<EOL><DEDENT>return decorator(name) if inspect.isfunction(name) else decorator<EOL>", "docstring": "Registers a new operator function in the test engine.\n\nArguments:\n    *args: variadic arguments.\n    **kw: variadic keyword arguments.\n\nReturns:\n    function", "id": "f6490:m0"}
{"signature": "def run(self, *args, **kw):", "body": "log.debug('<STR_LIT>'.format(<EOL>self.__class__.__name__, args<EOL>))<EOL>if self.kind == OperatorTypes.ATTRIBUTE:<EOL><INDENT>return self.match(self.ctx)<EOL><DEDENT>else:<EOL><INDENT>return self.run_matcher(*args, **kw)<EOL><DEDENT>", "docstring": "Runs the current operator with the subject arguments to test.\n\nThis method is implemented by matchers only.", "id": "f6491:c1:m6"}
{"signature": "def observe(matcher):", "body": "@functools.wraps(matcher)<EOL>def observer(self, subject, *expected, **kw):<EOL><INDENT>if hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.before(subject, *expected, **kw)<EOL><DEDENT>result = matcher(self, subject, *expected, **kw)<EOL>if result is not True and hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.after_error(result, subject, *expected, **kw)<EOL><DEDENT>if result is True and hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.after_success(subject, *expected, **kw)<EOL><DEDENT>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.show_diff = all([<EOL>isinstance(subject, six.string_types),<EOL>all([isinstance(x, six.string_types) for x in expected]),<EOL>])<EOL><DEDENT>return result<EOL><DEDENT>return observer<EOL>", "docstring": "Internal decorator to trigger operator hooks before/after\nmatcher execution.", "id": "f6491:c1:m3"}
{"signature": "@observe<EOL><INDENT>def run_matcher(self, subject, *expected, **kw):<DEDENT>", "body": "<EOL>self.expected = expected<EOL>_args = (subject,)<EOL>if self.kind == OperatorTypes.MATCHER:<EOL><INDENT>_args += expected<EOL><DEDENT>try:<EOL><INDENT>result = self.match(*_args, **kw)<EOL><DEDENT>except Exception as error:<EOL><INDENT>return self._make_error(error=error)<EOL><DEDENT>reasons = []<EOL>if isinstance(result, tuple):<EOL><INDENT>result, reasons = result<EOL><DEDENT>if result is False and self.ctx.negate:<EOL><INDENT>return True<EOL><DEDENT>if result is True and not self.ctx.negate:<EOL><INDENT>return True<EOL><DEDENT>return self._make_error(reasons=reasons)<EOL>", "docstring": "Runs the operator matcher test function.", "id": "f6491:c1:m5"}
{"signature": "@attribute(operators=(<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'<EOL>))<EOL>def not_be(ctx):", "body": "ctx.negate = True<EOL>", "docstring": "Semantic negation attributes providing chainable declarative DSL\nfor assertions.", "id": "f6509:m1"}
{"signature": "def get_product(self, code):", "body": "candidates = list(filter(lambda c: c.code == code, self.list_products()))<EOL>if not len(candidates):<EOL><INDENT>return None<EOL><DEDENT>return candidates[<NUM_LIT:0>]<EOL>", "docstring": "Gets product with given code\n\n        NOTE: Filters the list of products in this end due to API limitations", "id": "f6518:c5:m2"}
{"signature": "def list_products(self):", "body": "url = self.base_url<EOL>obdata = self.connection.make_get(url)<EOL>return ProductList(obdata, self)<EOL>", "docstring": "Lists all products in the system, returns ProductList you can iterate over.\n\n        Holvi API does not currently support server-side filtering so you will have to use Pythons filter() function as usual.", "id": "f6518:c5:m1"}
{"signature": "def _make_ppp(self, method, url, payload):", "body": "self._init_session()<EOL>self.session.cache.clear()<EOL>m = getattr(self.session, method)<EOL>r = m(url, json=payload)<EOL>try:<EOL><INDENT>r.raise_for_status()<EOL><DEDENT>except Timeout as e:<EOL><INDENT>raise ApiTimeout(e.__str__(), response=e.response)  <EOL><DEDENT>except HTTPError as e:<EOL><INDENT>if e.response.status_code in (<NUM_LIT>, <NUM_LIT>):<EOL><INDENT>raise AuthenticationError(e.__str__(), response=e.response)  <EOL><DEDENT>else:<EOL><INDENT>raise ApiError(e.__str__(), response=e.response)  <EOL><DEDENT><DEDENT>return r.json()<EOL>", "docstring": "Internal helper to make POST/PUT/PATCH requests (or whatever the underlying library supports)", "id": "f6519:c0:m7"}
{"signature": "@classmethod<EOL><INDENT>def singleton(self, poolname, authkey):<DEDENT>", "body": "global CONNECTION_MAP<EOL>mapkey = \"<STR_LIT>\" % (poolname, authkey)<EOL>if not mapkey in CONNECTION_MAP:<EOL><INDENT>CONNECTION_MAP[mapkey] = Connection(poolname, authkey)<EOL><DEDENT>return CONNECTION_MAP[mapkey]<EOL>", "docstring": "Get a singleton of a connection", "id": "f6519:c0:m0"}
{"signature": "def make_post(self, url, payload):", "body": "return self._make_ppp('<STR_LIT>', url, payload)<EOL>", "docstring": "Make a POST request", "id": "f6519:c0:m4"}
{"signature": "def _init_session(self):", "body": "if not self.session:<EOL><INDENT>self.session = requests.Session()<EOL>self.session.headers.update({<EOL>'<STR_LIT:Content-Type>': '<STR_LIT:application/json>',<EOL>'<STR_LIT>': '<STR_LIT>' % self.key<EOL>})<EOL><DEDENT>self.session.remove_expired_responses()<EOL>", "docstring": "Iniitializes a requests.Session for us if not already initialized", "id": "f6519:c0:m2"}
{"signature": "def make_put(self, url, payload):", "body": "return self._make_ppp('<STR_LIT>', url, payload)<EOL>", "docstring": "Make a PUT request", "id": "f6519:c0:m5"}
{"signature": "def save(self):", "body": "if self.code:<EOL><INDENT>raise HolviError(\"<STR_LIT>\")<EOL><DEDENT>send_json = self.to_holvi_dict()<EOL>send_json.update({<EOL>'<STR_LIT>': self.api.connection.pool<EOL>})<EOL>url = six.u(self.api.base_url + \"<STR_LIT>\")<EOL>stat = self.api.connection.make_post(url, send_json)<EOL>code = stat[\"<STR_LIT>\"].split(\"<STR_LIT:/>\")[-<NUM_LIT:2>]  <EOL>return (stat[\"<STR_LIT>\"], self.api.get_order(code))<EOL>", "docstring": "Saves this order to Holvi, returns a tuple with the order itself and checkout_uri", "id": "f6526:c0:m5"}
{"signature": "def _init_empty(self):", "body": "self._jsondata = {<EOL>\"<STR_LIT:code>\": None,<EOL>\"<STR_LIT>\": [],<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:email>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>}<EOL>", "docstring": "Creates the base set of attributes order has/needs", "id": "f6526:c0:m2"}
{"signature": "def get_category(self, code):", "body": "candidates = list(filter(lambda c: c.code == code, itertools.chain(self.list_income_categories(), self.list_expense_categories())))<EOL>if not len(candidates):<EOL><INDENT>return None<EOL><DEDENT>return candidates[<NUM_LIT:0>]<EOL>", "docstring": "Gets category with given code\n\n        NOTE: Filters the list of income and expense categories in this end due to API limitations", "id": "f6527:c6:m3"}
{"signature": "def int2fin_reference(n):", "body": "checksum = <NUM_LIT:10> - (sum([int(c) * i for c, i in zip(str(n)[::-<NUM_LIT:1>], it.cycle((<NUM_LIT:7>, <NUM_LIT:3>, <NUM_LIT:1>)))]) % <NUM_LIT:10>)<EOL>if checksum == <NUM_LIT:10>:<EOL><INDENT>checksum = <NUM_LIT:0><EOL><DEDENT>return \"<STR_LIT>\" % (n, checksum)<EOL>", "docstring": "Calculates a checksum for a Finnish national reference number", "id": "f6530:m0"}
{"signature": "def _map_holvi_json_properties(self):", "body": "pass<EOL>", "docstring": "For mapping properties from _jsondata to something more Pythonic\n\n        For really simple objects there is no need to implement this", "id": "f6530:c1:m2"}
{"signature": "def _init_empty(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Creates the base set of attributes object has/needs", "id": "f6530:c1:m5"}
{"signature": "def _get_iter(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Must set self._iter", "id": "f6530:c2:m1"}
{"signature": "def iso_reference_valid_char(c, raise_error=True):", "body": "if c in ISO_REFERENCE_VALID:<EOL><INDENT>return True<EOL><DEDENT>if raise_error:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (c, ISO_REFERENCE_VALID))<EOL><DEDENT>return False<EOL>", "docstring": "Helper to make sure the given character is valid for a reference number", "id": "f6530:m2"}
{"signature": "def iso_reference_isvalid(ref):", "body": "ref = str(ref)<EOL>cs_source = ref[<NUM_LIT:4>:] + ref[:<NUM_LIT:4>]<EOL>return (iso_reference_str2int(cs_source) % <NUM_LIT>) == <NUM_LIT:1><EOL>", "docstring": "Validates ISO reference number", "id": "f6530:m7"}
{"signature": "def fin_reference_isvalid(n):", "body": "return int2fin_reference(str(n)[:-<NUM_LIT:1>]) == str(n)<EOL>", "docstring": "Check that the given Finnish national reference number is valid (ie the checksum is valid)", "id": "f6530:m1"}
{"signature": "def str2iso_reference(n):", "body": "cs_source = n + '<STR_LIT>'<EOL>cs = <NUM_LIT> - (iso_reference_str2int(cs_source) % <NUM_LIT>)<EOL>return \"<STR_LIT>\" % (cs, n)<EOL>", "docstring": "Calculates checksum (and adds the RF prefix) for an international reference number from a string (can contain any characters valid for the reference)", "id": "f6530:m6"}
{"signature": "def _init_empty(self):", "body": "self._jsondata = {<EOL>\"<STR_LIT:code>\": None,<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": (datetime.datetime.now().date() + datetime.timedelta(days=<NUM_LIT>)).isoformat(),<EOL>\"<STR_LIT>\": datetime.datetime.now().date().isoformat(),<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:name>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:email>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": [],<EOL>}<EOL>", "docstring": "Creates the base set of attributes invoice has/needs", "id": "f6531:c0:m1"}
{"signature": "def get_invoice(self, invoice_code):", "body": "url = self.base_url + '<STR_LIT>'.format(code=invoice_code)<EOL>ijson = self.connection.make_get(url)<EOL>return Invoice(self, ijson)<EOL>", "docstring": "Retrieve given Invoice", "id": "f6531:c3:m2"}
{"signature": "def void(self):", "body": "return self.delete()<EOL>", "docstring": "Mark invoice as void in Holvi", "id": "f6531:c0:m5"}
{"signature": "def delete(self):", "body": "url = str(self.api.base_url + '<STR_LIT>').format(code=self.code)  <EOL>payload = {<EOL>'<STR_LIT>': True,<EOL>}<EOL>stat = self.api.connection.make_put(url, payload)<EOL>", "docstring": "Mark invoice as void in Holvi", "id": "f6531:c0:m6"}
{"signature": "def convertGribToTiff(listeFile,listParam,listLevel,liststep,grid,startDate,endDate,outFolder):", "body": "dicoValues={}<EOL>for l in listeFile:<EOL><INDENT>grbs = pygrib.open(l)<EOL>grbs.seek(<NUM_LIT:0>)<EOL>index=<NUM_LIT:1><EOL>for j in range(len(listLevel),<NUM_LIT:0>,-<NUM_LIT:1>):<EOL><INDENT>for i in range(len(listParam)-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>):<EOL><INDENT>grb = grbs[index]<EOL>p=grb.name.replace('<STR_LIT:U+0020>','<STR_LIT:_>')<EOL>if grb.level != <NUM_LIT:0>:<EOL><INDENT>l=str(grb.level)+'<STR_LIT:_>'+grb.typeOfLevel<EOL><DEDENT>else:<EOL><INDENT>l=grb.typeOfLevel<EOL><DEDENT>if p+'<STR_LIT:_>'+l not in dicoValues.keys():<EOL><INDENT>dicoValues[p+'<STR_LIT:_>'+l]=[]<EOL><DEDENT>dicoValues[p+'<STR_LIT:_>'+l].append(grb.values)<EOL>shape=grb.values.shape<EOL>lat,lon=grb.latlons()<EOL>geoparam=(lon.min(),lat.max(),grid,grid)<EOL>index+= <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>nbJour=(endDate-startDate).days+<NUM_LIT:1><EOL>for s in range(<NUM_LIT:0>, (len(liststep)*nbJour-len(listeFile))):<EOL><INDENT>for k in dicoValues.keys():<EOL><INDENT>dicoValues[k].append(np.full(shape, np.nan))<EOL><DEDENT><DEDENT>for i in range(len(dicoValues.keys())-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>):<EOL><INDENT>dictParam=dict((k,dicoValues[dicoValues.keys()[i]][k]) for k in range(<NUM_LIT:0>,len(dicoValues[dicoValues.keys()[i]])))<EOL>sorted(dictParam.items(), key=lambda x: x[<NUM_LIT:0>])<EOL>outputImg=outFolder+'<STR_LIT:/>'+dicoValues.keys()[i]+'<STR_LIT:_>'+startDate.strftime('<STR_LIT>')+'<STR_LIT:_>'+endDate.strftime('<STR_LIT>')+'<STR_LIT>'<EOL>writeTiffFromDicoArray(dictParam,outputImg,shape,geoparam)<EOL><DEDENT>for f in listeFile:<EOL><INDENT>os.remove(f)<EOL><DEDENT>", "docstring": "Convert GRIB to Tif", "id": "f6533:m18"}
{"signature": "@click.command()<EOL>@click.argument('<STR_LIT:filename>')<EOL>@click.option('<STR_LIT>', default=<NUM_LIT>, type=int)<EOL>@click.option('<STR_LIT>', default='<STR_LIT>')<EOL>@click.option('<STR_LIT>')<EOL>@click.option('<STR_LIT>')<EOL>def slinky(filename, seconds_available, bucket_name, aws_key, aws_secret):", "body": "if not os.environ.get('<STR_LIT>') and os.environ.get('<STR_LIT>'):<EOL><INDENT>print('<STR_LIT>')<EOL>exit()<EOL><DEDENT>print(create_temp_s3_link(filename, seconds_available, bucket_name))<EOL>", "docstring": "Simple program that creates an temp S3 link.", "id": "f6538:m0"}
{"signature": "def decode(self, code, terminator='<STR_LIT>'):", "body": "if code:<EOL><INDENT>if terminator not in code:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(<EOL>terminator if terminator != '<STR_LIT>' else '<STR_LIT>'<EOL>)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>wordlist = ['<STR_LIT>'] * len(code)<EOL>for i in range(len(code)):<EOL><INDENT>wordlist = sorted(<EOL>code[i] + wordlist[i] for i in range(len(code))<EOL>)<EOL><DEDENT>rows = [w for w in wordlist if w[-<NUM_LIT:1>] == terminator][<NUM_LIT:0>]<EOL>return rows.rstrip(terminator)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>", "docstring": "r\"\"\"Return a word decoded from BWT form.\n\n        Parameters\n        ----------\n        code : str\n            The word to transform from BWT form\n        terminator : str\n            A character added to signal the end of the string\n\n        Returns\n        -------\n        str\n            Word decoded by BWT\n\n        Raises\n        ------\n        ValueError\n            Specified terminator absent from code.\n\n        Examples\n        --------\n        >>> bwt = BWT()\n        >>> bwt.decode('n\\x00ilag')\n        'align'\n        >>> bwt.decode('annb\\x00aa')\n        'banana'\n        >>> bwt.decode('annb@aa', '@')\n        'banana'", "id": "f6541:c0:m1"}
{"signature": "def bwt_encode(word, terminator='<STR_LIT>'):", "body": "return BWT().encode(word, terminator)<EOL>", "docstring": "r\"\"\"Return the Burrows-Wheeler transformed form of a word.\n\n    This is a wrapper for :py:meth:`BWT.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform using BWT\n    terminator : str\n        A character added to signal the end of the string\n\n    Returns\n    -------\n    str\n        Word encoded by BWT\n\n    Examples\n    --------\n    >>> bwt_encode('align')\n    'n\\x00ilag'\n    >>> bwt_encode('banana')\n    'annb\\x00aa'\n    >>> bwt_encode('banana', '@')\n    'annb@aa'", "id": "f6541:m0"}
{"signature": "def rle_decode(text, use_bwt=True):", "body": "text = RLE().decode(text)<EOL>if use_bwt:<EOL><INDENT>text = BWT().decode(text)<EOL><DEDENT>return text<EOL>", "docstring": "r\"\"\"Perform decoding of run-length-encoding (RLE).\n\n    This is a wrapper for :py:meth:`RLE.decode`.\n\n    Parameters\n    ----------\n    text : str\n        A text string to decode\n    use_bwt : bool\n        Indicates whether to perform BWT decoding after RLE decoding\n\n    Returns\n    -------\n    str\n        Word decoded by RLE\n\n    Examples\n    --------\n    >>> rle_decode('n\\x00ilag')\n    'align'\n    >>> rle_decode('align', use_bwt=False)\n    'align'\n\n    >>> rle_decode('annb\\x00aa')\n    'banana'\n    >>> rle_decode('banana', use_bwt=False)\n    'banana'\n\n    >>> rle_decode('ab\\x00abbab5a')\n    'aaabaabababa'\n    >>> rle_decode('3abaabababa', False)\n    'aaabaabababa'", "id": "f6542:m1"}
{"signature": "def decode(self, longval, nbits):", "body": "val = Fraction(longval, long(<NUM_LIT:1>) << nbits)<EOL>letters = []<EOL>probs_items = [<EOL>(char, minval, maxval)<EOL>for (char, (minval, maxval)) in self._probs.items()<EOL>]<EOL>char = '<STR_LIT:\\x00>'<EOL>while True:<EOL><INDENT>for (char, minval, maxval) in probs_items:  <EOL><INDENT>if minval <= val < maxval:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if char == '<STR_LIT:\\x00>':<EOL><INDENT>break<EOL><DEDENT>letters.append(char)<EOL>delta = maxval - minval<EOL>val = (val - minval) / delta<EOL><DEDENT>return '<STR_LIT>'.join(letters)<EOL>", "docstring": "Decode the number to a string using the given statistics.\n\n        Parameters\n        ----------\n        longval : int\n            The first part of an encoded tuple from encode\n        nbits : int\n            The second part of an encoded tuple from encode\n\n        Returns\n        -------\n        str\n            The arithmetically decoded text\n\n        Example\n        -------\n        >>> ac = Arithmetic('the quick brown fox jumped over the lazy dog')\n        >>> ac.decode(16720586181, 34)\n        'align'", "id": "f6544:c0:m5"}
{"signature": "def encode(self, text):", "body": "text = text_type(text)<EOL>if '<STR_LIT:\\x00>' in text:<EOL><INDENT>text = text.replace('<STR_LIT:\\x00>', '<STR_LIT:U+0020>')<EOL><DEDENT>minval = Fraction(<NUM_LIT:0>)<EOL>maxval = Fraction(<NUM_LIT:1>)<EOL>for char in text + '<STR_LIT:\\x00>':<EOL><INDENT>prob_range = self._probs[char]<EOL>delta = maxval - minval<EOL>maxval = minval + prob_range[<NUM_LIT:1>] * delta<EOL>minval = minval + prob_range[<NUM_LIT:0>] * delta<EOL><DEDENT>delta = (maxval - minval) / <NUM_LIT:2><EOL>nbits = long(<NUM_LIT:0>)<EOL>while delta < <NUM_LIT:1>:<EOL><INDENT>nbits += <NUM_LIT:1><EOL>delta *= <NUM_LIT:2><EOL><DEDENT>if nbits == <NUM_LIT:0>:  <EOL><INDENT>return <NUM_LIT:0>, <NUM_LIT:0><EOL><DEDENT>avg = (maxval + minval) * <NUM_LIT:2> ** (nbits - <NUM_LIT:1>)<EOL>return avg.numerator // avg.denominator, nbits<EOL>", "docstring": "Encode a text using arithmetic coding.\n\n        Text and the 0-order probability statistics -> longval, nbits\n\n        The encoded number is Fraction(longval, 2**nbits)\n\n        Parameters\n        ----------\n        text : str\n            A string to encode\n\n        Returns\n        -------\n        tuple\n            The arithmetically coded text\n\n        Example\n        -------\n        >>> ac = Arithmetic('the quick brown fox jumped over the lazy dog')\n        >>> ac.encode('align')\n        (16720586181, 34)", "id": "f6544:c0:m4"}
{"signature": "def ac_encode(text, probs):", "body": "coder = Arithmetic()<EOL>coder.set_probs(probs)<EOL>return coder.encode(text)<EOL>", "docstring": "Encode a text using arithmetic coding with the provided probabilities.\n\n    This is a wrapper for :py:meth:`Arithmetic.encode`.\n\n    Parameters\n    ----------\n    text : str\n        A string to encode\n    probs : dict\n        A probability statistics dictionary generated by\n        :py:meth:`Arithmetic.train`\n\n    Returns\n    -------\n    tuple\n        The arithmetically coded text\n\n    Example\n    -------\n    >>> pr = ac_train('the quick brown fox jumped over the lazy dog')\n    >>> ac_encode('align', pr)\n    (16720586181, 34)", "id": "f6544:m1"}
{"signature": "def train(self, text):", "body": "text = text_type(text)<EOL>if '<STR_LIT:\\x00>' in text:<EOL><INDENT>text = text.replace('<STR_LIT:\\x00>', '<STR_LIT:U+0020>')<EOL><DEDENT>counts = Counter(text)<EOL>counts['<STR_LIT:\\x00>'] = <NUM_LIT:1><EOL>tot_letters = sum(counts.values())<EOL>tot = <NUM_LIT:0><EOL>self._probs = {}<EOL>prev = Fraction(<NUM_LIT:0>)<EOL>for char, count in sorted(<EOL>counts.items(), key=lambda x: (x[<NUM_LIT:1>], x[<NUM_LIT:0>]), reverse=True<EOL>):<EOL><INDENT>follow = Fraction(tot + count, tot_letters)<EOL>self._probs[char] = (prev, follow)<EOL>prev = follow<EOL>tot = tot + count<EOL><DEDENT>", "docstring": "r\"\"\"Generate a probability dict from the provided text.\n\n        Text to 0-order probability statistics as a dict\n\n        Parameters\n        ----------\n        text : str\n            The text data over which to calculate probability statistics. This\n            must not contain the NUL (0x00) character because that is used to\n            indicate the end of data.\n\n        Example\n        -------\n        >>> ac = Arithmetic()\n        >>> ac.train('the quick brown fox jumped over the lazy dog')\n        >>> ac.get_probs()\n        {' ': (Fraction(0, 1), Fraction(8, 45)),\n         'o': (Fraction(8, 45), Fraction(4, 15)),\n         'e': (Fraction(4, 15), Fraction(16, 45)),\n         'u': (Fraction(16, 45), Fraction(2, 5)),\n         't': (Fraction(2, 5), Fraction(4, 9)),\n         'r': (Fraction(4, 9), Fraction(22, 45)),\n         'h': (Fraction(22, 45), Fraction(8, 15)),\n         'd': (Fraction(8, 15), Fraction(26, 45)),\n         'z': (Fraction(26, 45), Fraction(3, 5)),\n         'y': (Fraction(3, 5), Fraction(28, 45)),\n         'x': (Fraction(28, 45), Fraction(29, 45)),\n         'w': (Fraction(29, 45), Fraction(2, 3)),\n         'v': (Fraction(2, 3), Fraction(31, 45)),\n         'q': (Fraction(31, 45), Fraction(32, 45)),\n         'p': (Fraction(32, 45), Fraction(11, 15)),\n         'n': (Fraction(11, 15), Fraction(34, 45)),\n         'm': (Fraction(34, 45), Fraction(7, 9)),\n         'l': (Fraction(7, 9), Fraction(4, 5)),\n         'k': (Fraction(4, 5), Fraction(37, 45)),\n         'j': (Fraction(37, 45), Fraction(38, 45)),\n         'i': (Fraction(38, 45), Fraction(13, 15)),\n         'g': (Fraction(13, 15), Fraction(8, 9)),\n         'f': (Fraction(8, 9), Fraction(41, 45)),\n         'c': (Fraction(41, 45), Fraction(14, 15)),\n         'b': (Fraction(14, 15), Fraction(43, 45)),\n         'a': (Fraction(43, 45), Fraction(44, 45)),\n         '\\x00': (Fraction(44, 45), Fraction(1, 1))}", "id": "f6544:c0:m3"}
{"signature": "def get_feature(vector, feature):", "body": "<EOL>if feature not in _FEATURE_MASK:<EOL><INDENT>raise AttributeError(<EOL>\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\".join(<EOL>(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>)<EOL>)<EOL>+ \"<STR_LIT:'>\"<EOL>)<EOL><DEDENT>mask = _FEATURE_MASK[feature]<EOL>pos_mask = mask >> <NUM_LIT:1><EOL>retvec = []<EOL>for char in vector:<EOL><INDENT>if char < <NUM_LIT:0>:<EOL><INDENT>retvec.append(float('<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>masked = char & mask<EOL>if masked == <NUM_LIT:0>:<EOL><INDENT>retvec.append(<NUM_LIT:0>)  <EOL><DEDENT>elif masked == mask:<EOL><INDENT>retvec.append(<NUM_LIT:2>)  <EOL><DEDENT>elif masked & pos_mask:<EOL><INDENT>retvec.append(<NUM_LIT:1>)  <EOL><DEDENT>else:<EOL><INDENT>retvec.append(-<NUM_LIT:1>)  <EOL><DEDENT><DEDENT><DEDENT>return retvec<EOL>", "docstring": "Get a feature vector.\n\n    This returns a list of ints, equal in length to the vector input,\n        representing presence/absence/neutrality with respect to a particular\n        phonetic feature.\n\n    Parameters\n    ----------\n    vector : list\n        A tuple or list of ints representing the phonetic features of a phone\n        or series of phones (such as is returned by the ipa_to_features\n        function)\n    feature : str\n        A feature name from the set:\n\n            - ``consonantal``\n            - ``sonorant``\n            - ``syllabic``\n            - ``labial``\n            - ``round``\n            - ``coronal``\n            - ``anterior``\n            - ``distributed``\n            - ``dorsal``\n            - ``high``\n            - ``low``\n            - ``back``\n            - ``tense``\n            - ``pharyngeal``\n            - ``ATR``\n            - ``voice``\n            - ``spread_glottis``\n            - ``constricted_glottis``\n            - ``continuant``\n            - ``strident``\n            - ``lateral``\n            - ``delayed_release``\n            - ``nasal``\n\n    Returns\n    -------\n    list of ints\n        A list indicating presence/absence/neutrality with respect to the\n        feature\n\n    Raises\n    ------\n    AttributeError\n        feature must be one of ...\n\n    Examples\n    --------\n    >>> tails = ipa_to_features('telz')\n    >>> get_feature(tails, 'consonantal')\n    [1, -1, 1, 1]\n    >>> get_feature(tails, 'sonorant')\n    [-1, 1, 1, -1]\n    >>> get_feature(tails, 'nasal')\n    [-1, -1, -1, -1]\n    >>> get_feature(tails, 'coronal')\n    [1, -1, 1, 1]", "id": "f6546:m1"}
{"signature": "def corpus_importer(self, corpus, n_val=<NUM_LIT:1>, bos='<STR_LIT>', eos='<STR_LIT>'):", "body": "if not corpus or not isinstance(corpus, Corpus):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>sentences = corpus.sents()<EOL>for sent in sentences:<EOL><INDENT>ngs = Counter(sent)<EOL>for key in ngs.keys():<EOL><INDENT>self._add_to_ngcorpus(self.ngcorpus, [key], ngs[key])<EOL><DEDENT>if n_val > <NUM_LIT:1>:<EOL><INDENT>if bos and bos != '<STR_LIT>':<EOL><INDENT>sent = [bos] + sent<EOL><DEDENT>if eos and eos != '<STR_LIT>':<EOL><INDENT>sent += [eos]<EOL><DEDENT>for i in range(<NUM_LIT:2>, n_val + <NUM_LIT:1>):<EOL><INDENT>for j in range(len(sent) - i + <NUM_LIT:1>):<EOL><INDENT>self._add_to_ngcorpus(<EOL>self.ngcorpus, sent[j : j + i], <NUM_LIT:1><EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "r\"\"\"Fill in self.ngcorpus from a Corpus argument.\n\n        Parameters\n        ----------\n        corpus :Corpus\n            The Corpus from which to initialize the n-gram corpus\n        n_val : int\n            Maximum n value for n-grams\n        bos : str\n            String to insert as an indicator of beginning of sentence\n        eos : str\n            String to insert as an indicator of end of sentence\n\n        Raises\n        ------\n        TypeError\n            Corpus argument of the Corpus class required.\n\n        Example\n        -------\n        >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n        >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n        >>> ngcorp = NGramCorpus()\n        >>> ngcorp.corpus_importer(Corpus(tqbf))", "id": "f6547:c0:m1"}
{"signature": "def tf(self, term):", "body": "if '<STR_LIT:U+0020>' in term:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>tcount = self.get_count(term)<EOL>if tcount == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>return <NUM_LIT:1> + log10(tcount)<EOL>", "docstring": "r\"\"\"Return term frequency.\n\n        Parameters\n        ----------\n        term : str\n            The term for which to calculate tf\n\n        Returns\n        -------\n        float\n            The term frequency (tf)\n\n        Raises\n        ------\n        ValueError\n            tf can only calculate the frequency of individual words\n\n        Examples\n        --------\n        >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n        >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n        >>> ngcorp = NGramCorpus(Corpus(tqbf))\n        >>> NGramCorpus(Corpus(tqbf)).tf('the')\n        1.3010299956639813\n        >>> NGramCorpus(Corpus(tqbf)).tf('fox')\n        1.0", "id": "f6547:c0:m5"}
{"signature": "def get_count(self, ngram, corpus=None):", "body": "if not corpus:<EOL><INDENT>corpus = self.ngcorpus<EOL><DEDENT>if not ngram:<EOL><INDENT>return corpus[None]<EOL><DEDENT>if isinstance(ngram, (text_type, str)):<EOL><INDENT>ngram = text_type(ngram).split()<EOL><DEDENT>if ngram[<NUM_LIT:0>] in corpus:<EOL><INDENT>return self.get_count(ngram[<NUM_LIT:1>:], corpus[ngram[<NUM_LIT:0>]])<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "r\"\"\"Get the count of an n-gram in the corpus.\n\n        Parameters\n        ----------\n        ngram : str\n            The n-gram to retrieve the count of from the n-gram corpus\n        corpus : Corpus\n            The corpus\n\n        Returns\n        -------\n        int\n            The n-gram count\n\n        Examples\n        --------\n        >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n        >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n        >>> ngcorp = NGramCorpus(Corpus(tqbf))\n        >>> NGramCorpus(Corpus(tqbf)).get_count('the')\n        2\n        >>> NGramCorpus(Corpus(tqbf)).get_count('fox')\n        1", "id": "f6547:c0:m2"}
{"signature": "def docs_of_words(self):", "body": "return [<EOL>[words for sents in doc for words in sents] for doc in self.corpus<EOL>]<EOL>", "docstring": "r\"\"\"Return the docs in the corpus, with sentences flattened.\n\n        Each list within the corpus represents all the words of that document.\n        Thus the sentence level of lists has been flattened.\n\n        Returns\n        -------\n        [[str]]\n            The docs in the corpus as a list of list of strs\n\n        Example\n        -------\n        >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n        >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n        >>> corp = Corpus(tqbf)\n        >>> corp.docs_of_words()\n        [['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy',\n        'dog.', 'And', 'then', 'it', 'slept.', 'And', 'the', 'dog', 'ran',\n        'off.']]\n        >>> len(corp.docs_of_words())\n        1", "id": "f6549:c0:m5"}
{"signature": "def idf(self, term, transform=None):", "body": "docs_with_term = <NUM_LIT:0><EOL>docs = self.docs_of_words()<EOL>for doc in docs:<EOL><INDENT>doc_set = set(doc)<EOL>if transform:<EOL><INDENT>transformed_doc = []<EOL>for word in doc_set:<EOL><INDENT>transformed_doc.append(transform(word))<EOL><DEDENT>doc_set = set(transformed_doc)<EOL><DEDENT>if term in doc_set:<EOL><INDENT>docs_with_term += <NUM_LIT:1><EOL><DEDENT><DEDENT>if docs_with_term == <NUM_LIT:0>:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>return log10(len(docs) / docs_with_term)<EOL>", "docstring": "r\"\"\"Calculate the Inverse Document Frequency of a term in the corpus.\n\n        Parameters\n        ----------\n        term : str\n            The term to calculate the IDF of\n        transform : function\n            A function to apply to each document term before checking for the\n            presence of term\n\n        Returns\n        -------\n        float\n            The IDF\n\n        Examples\n        --------\n        >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n\\n'\n        >>> tqbf += 'And then it slept.\\n\\n And the dog ran off.'\n        >>> corp = Corpus(tqbf)\n        >>> print(corp.docs())\n        [[['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy',\n        'dog.']],\n        [['And', 'then', 'it', 'slept.']],\n        [['And', 'the', 'dog', 'ran', 'off.']]]\n        >>> round(corp.idf('dog'), 10)\n        0.4771212547\n        >>> round(corp.idf('the'), 10)\n        0.1760912591", "id": "f6549:c0:m7"}
{"signature": "def sents(self):", "body": "return [words for sents in self.corpus for words in sents]<EOL>", "docstring": "r\"\"\"Return the sentences in the corpus.\n\n        Each list within a sentence represents the words within that sentence.\n\n        Returns\n        -------\n        [[str]]\n            The sentences in the corpus as a list of lists of strs\n\n        Example\n        -------\n        >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n        >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n        >>> corp = Corpus(tqbf)\n        >>> corp.sents()\n        [['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy',\n        'dog.'], ['And', 'then', 'it', 'slept.'], ['And', 'the', 'dog',\n        'ran', 'off.']]\n        >>> len(corp.sents())\n        3", "id": "f6549:c0:m3"}
{"signature": "def paras(self):", "body": "return self.docs()<EOL>", "docstring": "r\"\"\"Return the paragraphs in the corpus.\n\n        Each list within a paragraph represents the sentences in that doc, each\n        of which is in turn a list of words within that sentence.\n        This is identical to the docs() member function and exists only to\n        mirror part of NLTK's API for corpora.\n\n        Returns\n        -------\n        [[[str]]]\n            The paragraphs in the corpus as a list of lists of lists of strs\n\n        Example\n        -------\n        >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n        >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n        >>> corp = Corpus(tqbf)\n        >>> corp.paras()\n        [[['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy',\n        'dog.'], ['And', 'then', 'it', 'slept.'], ['And', 'the', 'dog',\n        'ran', 'off.']]]\n        >>> len(corp.paras())\n        1", "id": "f6549:c0:m2"}
{"signature": "def words(self):", "body": "return [words for sents in self.sents() for words in sents]<EOL>", "docstring": "r\"\"\"Return the words in the corpus as a single list.\n\n        Returns\n        -------\n        [str]\n            The words in the corpus as a list of strs\n\n        Example\n        -------\n        >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n        >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n        >>> corp = Corpus(tqbf)\n        >>> corp.words()\n        ['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy',\n        'dog.', 'And', 'then', 'it', 'slept.', 'And', 'the', 'dog', 'ran',\n        'off.']\n        >>> len(corp.words())\n        18", "id": "f6549:c0:m4"}
{"signature": "def __init__(<EOL>self,<EOL>corpus_text='<STR_LIT>',<EOL>doc_split='<STR_LIT>',<EOL>sent_split='<STR_LIT:\\n>',<EOL>filter_chars='<STR_LIT>',<EOL>stop_words=None,<EOL>):", "body": "self.corpus = []<EOL>self.doc_split = doc_split<EOL>self.sent_split = sent_split<EOL>for document in corpus_text.split(doc_split):<EOL><INDENT>doc = []<EOL>for sentence in (s.split() for s in document.split(sent_split)):<EOL><INDENT>if stop_words:<EOL><INDENT>for word in set(stop_words):<EOL><INDENT>while word in sentence:<EOL><INDENT>sentence.remove(word)<EOL><DEDENT><DEDENT><DEDENT>for char in set(filter_chars):<EOL><INDENT>sentence = [word.replace(char, '<STR_LIT>') for word in sentence]<EOL><DEDENT>if sentence:<EOL><INDENT>doc.append(sentence)<EOL><DEDENT><DEDENT>if doc:<EOL><INDENT>self.corpus.append(doc)<EOL><DEDENT><DEDENT>", "docstring": "r\"\"\"Initialize Corpus.\n\n        By default, when importing a corpus:\n            - two consecutive newlines divide documents\n            - single newlines divide sentences\n            - other whitespace divides words\n\n        Parameters\n        ----------\n        corpus_text : str\n            The corpus text as a single string\n        doc_split : str\n            A character or string used to split corpus_text into documents\n        sent_split : str\n            A character or string used to split documents into sentences\n        filter_chars : list\n            A list of characters (as a string, tuple, set, or list) to filter\n            out of the corpus text\n        stop_words : list\n            A list of words (as a tuple, set, or list) to filter out of the\n            corpus text\n\n        Example\n        -------\n        >>> tqbf = 'The quick brown fox jumped over the lazy dog.\\n'\n        >>> tqbf += 'And then it slept.\\n And the dog ran off.'\n        >>> corp = Corpus(tqbf)", "id": "f6549:c0:m0"}
{"signature": "def stem(self, word):", "body": "lowered = word.lower()<EOL>if lowered[-<NUM_LIT:3>:] == '<STR_LIT>' and lowered[-<NUM_LIT:4>:-<NUM_LIT:3>] not in {'<STR_LIT:e>', '<STR_LIT:a>'}:<EOL><INDENT>return word[:-<NUM_LIT:3>] + ('<STR_LIT:Y>' if word[-<NUM_LIT:1>:].isupper() else '<STR_LIT:y>')<EOL><DEDENT>if lowered[-<NUM_LIT:2>:] == '<STR_LIT>' and lowered[-<NUM_LIT:3>:-<NUM_LIT:2>] not in {'<STR_LIT:a>', '<STR_LIT:e>', '<STR_LIT:o>'}:<EOL><INDENT>return word[:-<NUM_LIT:1>]<EOL><DEDENT>if lowered[-<NUM_LIT:1>:] == '<STR_LIT:s>' and lowered[-<NUM_LIT:2>:-<NUM_LIT:1>] not in {'<STR_LIT:u>', '<STR_LIT:s>'}:<EOL><INDENT>return word[:-<NUM_LIT:1>]<EOL><DEDENT>return word<EOL>", "docstring": "Return the S-stemmed form of a word.\n\n        Parameters\n        ----------\n        word : str\n            The word to stem\n\n        Returns\n        -------\n        str\n            Word stem\n\n        Examples\n        --------\n        >>> stmr = SStemmer()\n        >>> stmr.stem('summaries')\n        'summary'\n        >>> stmr.stem('summary')\n        'summary'\n        >>> stmr.stem('towers')\n        'tower'\n        >>> stmr.stem('reading')\n        'reading'\n        >>> stmr.stem('census')\n        'census'", "id": "f6550:c0:m0"}
{"signature": "def uealite(<EOL>word,<EOL>max_word_length=<NUM_LIT:20>,<EOL>max_acro_length=<NUM_LIT:8>,<EOL>return_rule_no=False,<EOL>var='<STR_LIT>',<EOL>):", "body": "return UEALite().stem(<EOL>word, max_word_length, max_acro_length, return_rule_no, var<EOL>)<EOL>", "docstring": "Return UEA-Lite stem.\n\n    This is a wrapper for :py:meth:`UEALite.stem`.\n\n    Parameters\n    ----------\n    word : str\n        The word to stem\n    max_word_length : int\n        The maximum word length allowed\n    max_acro_length : int\n        The maximum acronym length allowed\n    return_rule_no : bool\n        If True, returns the stem along with rule number\n    var : str\n        Variant rules to use:\n\n            - ``Adams`` to use Jason Adams' rules\n            - ``Perl`` to use the original Perl rules\n\n    Returns\n    -------\n    str or (str, int)\n        Word stem\n\n    Examples\n    --------\n    >>> uealite('readings')\n    'read'\n    >>> uealite('insulted')\n    'insult'\n    >>> uealite('cussed')\n    'cuss'\n    >>> uealite('fancies')\n    'fancy'\n    >>> uealite('eroded')\n    'erode'", "id": "f6551:m0"}
{"signature": "def stem(self, word, alternate_vowels=False):", "body": "<EOL>word = normalize('<STR_LIT>', word.lower())<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>if len(word) > <NUM_LIT:2>:<EOL><INDENT>for i in range(<NUM_LIT:2>, len(word)):<EOL><INDENT>if word[i] in self._vowels and word[i - <NUM_LIT:2>] in self._vowels:<EOL><INDENT>if word[i - <NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>word = word[: i - <NUM_LIT:1>] + '<STR_LIT>' + word[i:]<EOL><DEDENT>elif word[i - <NUM_LIT:1>] == '<STR_LIT:y>':<EOL><INDENT>word = word[: i - <NUM_LIT:1>] + '<STR_LIT:Y>' + word[i:]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if alternate_vowels:<EOL><INDENT>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>r1_start = max(<NUM_LIT:3>, self._sb_r1(word))<EOL>r2_start = self._sb_r2(word)<EOL>niss_flag = False<EOL>if word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:3>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:2>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:2>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:2>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL>niss_flag = True<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:2>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL>niss_flag = True<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:1>:] == '<STR_LIT:e>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL>niss_flag = True<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:1>:] == '<STR_LIT:s>':<EOL><INDENT>if (<EOL>len(word[r1_start:]) >= <NUM_LIT:1><EOL>and len(word) >= <NUM_LIT:2><EOL>and word[-<NUM_LIT:2>] in self._s_endings<EOL>):<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>if niss_flag and word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT>if word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:3>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:2>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:2>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if (<EOL>len(word[r1_start:]) >= <NUM_LIT:2><EOL>and len(word) >= <NUM_LIT:6><EOL>and word[-<NUM_LIT:3>] in self._st_endings<EOL>):<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>if word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:4> and word[-<NUM_LIT:5>] != '<STR_LIT:e>':<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:4>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL>if word[-<NUM_LIT:2>:] in {'<STR_LIT>', '<STR_LIT>'} and len(word[r1_start:]) >= <NUM_LIT:2>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:4>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL>if word[-<NUM_LIT:4>:] == '<STR_LIT>' and len(word[r2_start:]) >= <NUM_LIT:4>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>' and len(word[r2_start:]) >= <NUM_LIT:2>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] in {'<STR_LIT:end>', '<STR_LIT>'}:<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:3>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL>if (<EOL>word[-<NUM_LIT:2>:] == '<STR_LIT>'<EOL>and len(word[r2_start:]) >= <NUM_LIT:2><EOL>and word[-<NUM_LIT:3>] != '<STR_LIT:e>'<EOL>):<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:2> and word[-<NUM_LIT:3>] != '<STR_LIT:e>':<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>for i in range(<NUM_LIT:0>, len(word)):<EOL><INDENT>if word[i] == '<STR_LIT:Y>':<EOL><INDENT>word = word[:i] + '<STR_LIT:y>' + word[i + <NUM_LIT:1> :]<EOL><DEDENT>elif word[i] == '<STR_LIT>':<EOL><INDENT>word = word[:i] + '<STR_LIT:u>' + word[i + <NUM_LIT:1> :]<EOL><DEDENT><DEDENT>_umlauts = dict(zip((ord(_) for _ in '<STR_LIT>'), '<STR_LIT>'))<EOL>word = word.translate(_umlauts)<EOL>return word<EOL>", "docstring": "Return Snowball German stem.\n\n        Parameters\n        ----------\n        word : str\n            The word to stem\n        alternate_vowels : bool\n            Composes ae as \u00e4, oe as \u00f6, and ue as \u00fc before running the algorithm\n\n        Returns\n        -------\n        str\n            Word stem\n\n        Examples\n        --------\n        >>> stmr = SnowballGerman()\n        >>> stmr.stem('lesen')\n        'les'\n        >>> stmr.stem('graues')\n        'grau'\n        >>> stmr.stem('buchstabieren')\n        'buchstabi'", "id": "f6557:c0:m0"}
{"signature": "def _undouble(self, word):", "body": "if (<EOL>len(word) > <NUM_LIT:1><EOL>and word[-<NUM_LIT:1>] == word[-<NUM_LIT:2>]<EOL>and word[-<NUM_LIT:1>] in {'<STR_LIT:d>', '<STR_LIT:k>', '<STR_LIT:t>'}<EOL>):<EOL><INDENT>return word[:-<NUM_LIT:1>]<EOL><DEDENT>return word<EOL>", "docstring": "Undouble endings -kk, -dd, and -tt.\n\n        Parameters\n        ----------\n        word : str\n          The word to stem\n\n        Returns\n        -------\n        str\n            The word with doubled endings undoubled", "id": "f6558:c0:m0"}
{"signature": "def stem(self, word):", "body": "<EOL>word = normalize('<STR_LIT>', text_type(word.lower()))<EOL>word = word.translate(self._accented)<EOL>for i in range(len(word)):<EOL><INDENT>if i == <NUM_LIT:0> and word[<NUM_LIT:0>] == '<STR_LIT:y>':<EOL><INDENT>word = '<STR_LIT:Y>' + word[<NUM_LIT:1>:]<EOL><DEDENT>elif word[i] == '<STR_LIT:y>' and word[i - <NUM_LIT:1>] in self._vowels:<EOL><INDENT>word = word[:i] + '<STR_LIT:Y>' + word[i + <NUM_LIT:1> :]<EOL><DEDENT>elif (<EOL>word[i] == '<STR_LIT:i>'<EOL>and word[i - <NUM_LIT:1>] in self._vowels<EOL>and i + <NUM_LIT:1> < len(word)<EOL>and word[i + <NUM_LIT:1>] in self._vowels<EOL>):<EOL><INDENT>word = word[:i] + '<STR_LIT:I>' + word[i + <NUM_LIT:1> :]<EOL><DEDENT><DEDENT>r1_start = max(<NUM_LIT:3>, self._sb_r1(word))<EOL>r2_start = self._sb_r2(word)<EOL>if word[-<NUM_LIT:5>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:5>:<EOL><INDENT>word = word[:-<NUM_LIT:3>] + '<STR_LIT:id>'<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:3> and (<EOL>word[-<NUM_LIT:4>] not in self._vowels and word[-<NUM_LIT:6>:-<NUM_LIT:3>] != '<STR_LIT>'<EOL>):<EOL><INDENT>word = self._undouble(word[:-<NUM_LIT:3>])<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:2> and (<EOL>word[-<NUM_LIT:3>] not in self._vowels and word[-<NUM_LIT:5>:-<NUM_LIT:2>] != '<STR_LIT>'<EOL>):<EOL><INDENT>word = self._undouble(word[:-<NUM_LIT:2>])<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if (<EOL>len(word[r1_start:]) >= <NUM_LIT:2><EOL>and word[-<NUM_LIT:3>] not in self._not_s_endings<EOL>):<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:1>:] == '<STR_LIT:s>':<EOL><INDENT>if (<EOL>len(word[r1_start:]) >= <NUM_LIT:1><EOL>and word[-<NUM_LIT:2>] not in self._not_s_endings<EOL>):<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>e_removed = False<EOL>if word[-<NUM_LIT:1>:] == '<STR_LIT:e>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:1> and word[-<NUM_LIT:2>] not in self._vowels:<EOL><INDENT>word = self._undouble(word[:-<NUM_LIT:1>])<EOL>e_removed = True<EOL><DEDENT><DEDENT>if word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:4> and word[-<NUM_LIT:5>] != '<STR_LIT:c>':<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL>if word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r1_start:]) >= <NUM_LIT:2> and (<EOL>word[-<NUM_LIT:3>] not in self._vowels and word[-<NUM_LIT:5>:-<NUM_LIT:2>] != '<STR_LIT>'<EOL>):<EOL><INDENT>word = self._undouble(word[:-<NUM_LIT:2>])<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:4>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL>if word[-<NUM_LIT:1>:] == '<STR_LIT:e>':<EOL><INDENT>if (<EOL>len(word[r1_start:]) >= <NUM_LIT:1><EOL>and word[-<NUM_LIT:2>] not in self._vowels<EOL>):<EOL><INDENT>word = self._undouble(word[:-<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:4>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] in ('<STR_LIT:end>', '<STR_LIT>'):<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:3>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL>if (<EOL>word[-<NUM_LIT:2>:] == '<STR_LIT>'<EOL>and len(word[r2_start:]) >= <NUM_LIT:2><EOL>and word[-<NUM_LIT:3>] != '<STR_LIT:e>'<EOL>):<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT>else:<EOL><INDENT>word = self._undouble(word)<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] == '<STR_LIT:bar>':<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:3> and e_removed:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if len(word[r2_start:]) >= <NUM_LIT:2> and word[-<NUM_LIT:3>] != '<STR_LIT:e>':<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>if (<EOL>len(word) >= <NUM_LIT:4><EOL>and word[-<NUM_LIT:3>] == word[-<NUM_LIT:2>]<EOL>and word[-<NUM_LIT:2>] in {'<STR_LIT:a>', '<STR_LIT:e>', '<STR_LIT:o>', '<STR_LIT:u>'}<EOL>and word[-<NUM_LIT:4>] not in self._vowels<EOL>and word[-<NUM_LIT:1>] not in self._vowels<EOL>and word[-<NUM_LIT:1>] != '<STR_LIT:I>'<EOL>):<EOL><INDENT>word = word[:-<NUM_LIT:2>] + word[-<NUM_LIT:1>]<EOL><DEDENT>for i in range(<NUM_LIT:0>, len(word)):<EOL><INDENT>if word[i] == '<STR_LIT:Y>':<EOL><INDENT>word = word[:i] + '<STR_LIT:y>' + word[i + <NUM_LIT:1> :]<EOL><DEDENT>elif word[i] == '<STR_LIT:I>':<EOL><INDENT>word = word[:i] + '<STR_LIT:i>' + word[i + <NUM_LIT:1> :]<EOL><DEDENT><DEDENT>return word<EOL>", "docstring": "Return Snowball Dutch stem.\n\n        Parameters\n        ----------\n        word : str\n            The word to stem\n\n        Returns\n        -------\n        str\n            Word stem\n\n        Examples\n        --------\n        >>> stmr = SnowballDutch()\n        >>> stmr.stem('lezen')\n        'lez'\n        >>> stmr.stem('opschorting')\n        'opschort'\n        >>> stmr.stem('ongrijpbaarheid')\n        'ongrijp'", "id": "f6558:c0:m1"}
{"signature": "def stem(self, word):", "body": "terminate = False<EOL>intact = True<EOL>while not terminate:<EOL><INDENT>for n in range(<NUM_LIT:6>, <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>if word[-n:] in self._rule_table[n]:<EOL><INDENT>accept = False<EOL>if len(self._rule_table[n][word[-n:]]) < <NUM_LIT:4>:<EOL><INDENT>for rule in self._rule_table[n][word[-n:]]:<EOL><INDENT>(<EOL>word,<EOL>accept,<EOL>intact,<EOL>terminate,<EOL>) = self._apply_rule(word, rule, intact, terminate)<EOL>if accept:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rule = self._rule_table[n][word[-n:]]<EOL>(word, accept, intact, terminate) = self._apply_rule(<EOL>word, rule, intact, terminate<EOL>)<EOL><DEDENT>if accept:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return word<EOL>", "docstring": "Return Paice-Husk stem.\n\n        Parameters\n        ----------\n        word : str\n            The word to stem\n\n        Returns\n        -------\n        str\n            Word stem\n\n        Examples\n        --------\n        >>> stmr = PaiceHusk()\n        >>> stmr.stem('assumption')\n        'assum'\n        >>> stmr.stem('verifiable')\n        'ver'\n        >>> stmr.stem('fancies')\n        'fant'\n        >>> stmr.stem('fanciful')\n        'fancy'\n        >>> stmr.stem('torment')\n        'tor'", "id": "f6559:c0:m3"}
{"signature": "def _sb_r2(self, term, r1_prefixes=None):", "body": "r1_start = self._sb_r1(term, r1_prefixes)<EOL>return r1_start + self._sb_r1(term[r1_start:])<EOL>", "docstring": "Return the R2 region, as defined in the Porter2 specification.\n\n        Parameters\n        ----------\n        term : str\n            The term to examine\n        r1_prefixes : set\n            Prefixes to consider\n\n        Returns\n        -------\n        int\n            Length of the R1 region", "id": "f6561:c0:m1"}
{"signature": "def _sb_r1(self, term, r1_prefixes=None):", "body": "vowel_found = False<EOL>if hasattr(r1_prefixes, '<STR_LIT>'):<EOL><INDENT>for prefix in r1_prefixes:<EOL><INDENT>if term[: len(prefix)] == prefix:<EOL><INDENT>return len(prefix)<EOL><DEDENT><DEDENT><DEDENT>for i in range(len(term)):<EOL><INDENT>if not vowel_found and term[i] in self._vowels:<EOL><INDENT>vowel_found = True<EOL><DEDENT>elif vowel_found and term[i] not in self._vowels:<EOL><INDENT>return i + <NUM_LIT:1><EOL><DEDENT><DEDENT>return len(term)<EOL>", "docstring": "Return the R1 region, as defined in the Porter2 specification.\n\n        Parameters\n        ----------\n        term : str\n            The term to examine\n        r1_prefixes : set\n            Prefixes to consider\n\n        Returns\n        -------\n        int\n            Length of the R1 region", "id": "f6561:c0:m0"}
{"signature": "def _sb_short_word(self, term, r1_prefixes=None):", "body": "if self._sb_r1(term, r1_prefixes) == len(<EOL>term<EOL>) and self._sb_ends_in_short_syllable(term):<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Return True iff term is a short word.\n\n        (...according to the Porter2 specification.)\n\n        Parameters\n        ----------\n        term : str\n            The term to examine\n        r1_prefixes : set\n            Prefixes to consider\n\n        Returns\n        -------\n        bool\n            True iff term is a short word", "id": "f6561:c0:m3"}
{"signature": "def _cond_d(self, word, suffix_len):", "body": "return len(word) - suffix_len >= <NUM_LIT:5><EOL>", "docstring": "Return Lovins' condition D.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m2"}
{"signature": "def _recode32(self, stem):", "body": "if stem[-<NUM_LIT:3>:-<NUM_LIT:2>] == '<STR_LIT:n>':<EOL><INDENT>return stem<EOL><DEDENT>return stem[:-<NUM_LIT:1>] + '<STR_LIT:s>'<EOL>", "docstring": "Return Lovins' conditional recode rule 32.\n\n        Parameters\n        ----------\n        stem : str\n            Word to stem\n\n        Returns\n        -------\n        str\n            Word stripped of suffix", "id": "f6562:c0:m32"}
{"signature": "def __init__(self):", "body": "self._suffix = {<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_c,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_g,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_f,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT:action>': self._cond_g,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_z,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_c,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_d,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_bb,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_c,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_i,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': self._cond_y,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_c,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_i,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_j,<EOL>'<STR_LIT>': self._cond_cc,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_f,<EOL>'<STR_LIT>': self._cond_f,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_k,<EOL>'<STR_LIT>': self._cond_i,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_y,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_l,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_m,<EOL>'<STR_LIT>': self._cond_n,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_h,<EOL>'<STR_LIT>': self._cond_f,<EOL>'<STR_LIT>': self._cond_f,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_bb,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_o,<EOL>'<STR_LIT>': self._cond_f,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_y,<EOL>'<STR_LIT>': self._cond_y,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': self._cond_c,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_l,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_p,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_m,<EOL>'<STR_LIT>': self._cond_n,<EOL>'<STR_LIT>': self._cond_q,<EOL>'<STR_LIT>': self._cond_c,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_aa,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_f,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_r,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_bb,<EOL>'<STR_LIT>': self._cond_x,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': self._cond_f,<EOL>'<STR_LIT>': self._cond_e,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': self._cond_b,<EOL>'<STR_LIT>': self._cond_s,<EOL>'<STR_LIT>': self._cond_t,<EOL>'<STR_LIT>': self._cond_u,<EOL>'<STR_LIT>': self._cond_v,<EOL>'<STR_LIT>': self._cond_r,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT:a>': None,<EOL>'<STR_LIT:e>': None,<EOL>'<STR_LIT:i>': None,<EOL>'<STR_LIT:o>': None,<EOL>'<STR_LIT:s>': self._cond_w,<EOL>'<STR_LIT:y>': self._cond_b,<EOL>}<EOL>self._recode = (<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT:rb>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', self._recode9),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT:end>', self._recode24),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', self._recode28),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', self._recode30),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', self._recode32),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>)<EOL>", "docstring": "Initialize the stemmer.", "id": "f6562:c0:m33"}
{"signature": "def _cond_cc(self, word, suffix_len):", "body": "return word[-suffix_len - <NUM_LIT:1>] == '<STR_LIT:l>'<EOL>", "docstring": "Return Lovins' condition CC.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m27"}
{"signature": "def _recode24(self, stem):", "body": "if stem[-<NUM_LIT:4>:-<NUM_LIT:3>] == '<STR_LIT:s>':<EOL><INDENT>return stem<EOL><DEDENT>return stem[:-<NUM_LIT:1>] + '<STR_LIT:s>'<EOL>", "docstring": "Return Lovins' conditional recode rule 24.\n\n        Parameters\n        ----------\n        stem : str\n            Word to stem\n\n        Returns\n        -------\n        str\n            Word stripped of suffix", "id": "f6562:c0:m29"}
{"signature": "def _cond_i(self, word, suffix_len):", "body": "return word[-suffix_len - <NUM_LIT:1>] not in {'<STR_LIT:e>', '<STR_LIT:o>'}<EOL>", "docstring": "Return Lovins' condition I.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m7"}
{"signature": "def _cond_y(self, word, suffix_len):", "body": "return word[-suffix_len - <NUM_LIT:2> : -suffix_len] == '<STR_LIT>'<EOL>", "docstring": "Return Lovins' condition Y.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m23"}
{"signature": "def lovins(word):", "body": "return Lovins().stem(word)<EOL>", "docstring": "Return Lovins stem.\n\n    This is a wrapper for :py:meth:`Lovins.stem`.\n\n    Parameters\n    ----------\n    word : str\n        The word to stem\n\n    Returns\n    -------\n    str: Word stem\n\n    Examples\n    --------\n    >>> lovins('reading')\n    'read'\n    >>> lovins('suspension')\n    'suspens'\n    >>> lovins('elusiveness')\n    'elus'", "id": "f6562:m0"}
{"signature": "def _cond_n(self, word, suffix_len):", "body": "if len(word) - suffix_len >= <NUM_LIT:3>:<EOL><INDENT>if word[-suffix_len - <NUM_LIT:3>] == '<STR_LIT:s>':<EOL><INDENT>if len(word) - suffix_len >= <NUM_LIT:4>:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Return Lovins' condition N.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m12"}
{"signature": "def _cond_bb(self, word, suffix_len):", "body": "return (<EOL>len(word) - suffix_len >= <NUM_LIT:3><EOL>and word[-suffix_len - <NUM_LIT:3> : -suffix_len] != '<STR_LIT>'<EOL>and word[-suffix_len - <NUM_LIT:4> : -suffix_len] != '<STR_LIT>'<EOL>)<EOL>", "docstring": "Return Lovins' condition BB.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m26"}
{"signature": "def _cond_h(self, word, suffix_len):", "body": "return (<EOL>word[-suffix_len - <NUM_LIT:1>] == '<STR_LIT:t>'<EOL>or word[-suffix_len - <NUM_LIT:2> : -suffix_len] == '<STR_LIT>'<EOL>)<EOL>", "docstring": "Return Lovins' condition H.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m6"}
{"signature": "def _cond_aa(self, word, suffix_len):", "body": "return word[-suffix_len - <NUM_LIT:1>] in {'<STR_LIT:d>', '<STR_LIT:f>', '<STR_LIT:l>', '<STR_LIT:t>'} or word[<EOL>-suffix_len - <NUM_LIT:2> : -suffix_len<EOL>] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}<EOL>", "docstring": "Return Lovins' condition AA.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m25"}
{"signature": "def _cond_e(self, word, suffix_len):", "body": "return word[-suffix_len - <NUM_LIT:1>] != '<STR_LIT:e>'<EOL>", "docstring": "Return Lovins' condition E.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m3"}
{"signature": "def _cond_g(self, word, suffix_len):", "body": "return len(word) - suffix_len >= <NUM_LIT:3> and word[-suffix_len - <NUM_LIT:1>] == '<STR_LIT:f>'<EOL>", "docstring": "Return Lovins' condition G.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m5"}
{"signature": "def _cond_o(self, word, suffix_len):", "body": "return word[-suffix_len - <NUM_LIT:1>] in {'<STR_LIT:i>', '<STR_LIT:l>'}<EOL>", "docstring": "Return Lovins' condition O.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m13"}
{"signature": "def _cond_l(self, word, suffix_len):", "body": "return (<EOL>word[-suffix_len - <NUM_LIT:1>] not in {'<STR_LIT:s>', '<STR_LIT:u>', '<STR_LIT:x>'}<EOL>or word[-suffix_len - <NUM_LIT:1>] == '<STR_LIT>'<EOL>)<EOL>", "docstring": "Return Lovins' condition L.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m10"}
{"signature": "def _cond_u(self, word, suffix_len):", "body": "return word[-suffix_len - <NUM_LIT:1>] in {'<STR_LIT:l>', '<STR_LIT:m>', '<STR_LIT:n>', '<STR_LIT:r>'}<EOL>", "docstring": "Return Lovins' condition U.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m19"}
{"signature": "def _recode30(self, stem):", "body": "if stem[-<NUM_LIT:4>:-<NUM_LIT:3>] == '<STR_LIT:m>':<EOL><INDENT>return stem<EOL><DEDENT>return stem[:-<NUM_LIT:1>] + '<STR_LIT:s>'<EOL>", "docstring": "Return Lovins' conditional recode rule 30.\n\n        Parameters\n        ----------\n        stem : str\n            Word to stem\n\n        Returns\n        -------\n        str\n            Word stripped of suffix", "id": "f6562:c0:m31"}
{"signature": "def _cond_c(self, word, suffix_len):", "body": "return len(word) - suffix_len >= <NUM_LIT:4><EOL>", "docstring": "Return Lovins' condition C.\n\n        Parameters\n        ----------\n        word : str\n            Word to check\n        suffix_len : int\n            Suffix length\n\n        Returns\n        -------\n        bool\n            True if condition is met", "id": "f6562:c0:m1"}
{"signature": "def stem(self, word):", "body": "word = normalize('<STR_LIT>', text_type(word.lower()))<EOL>word = '<STR_LIT>'.join(<EOL>c<EOL>for c in word<EOL>if c<EOL>in {<EOL>'<STR_LIT:a>',<EOL>'<STR_LIT:b>',<EOL>'<STR_LIT:c>',<EOL>'<STR_LIT:d>',<EOL>'<STR_LIT:e>',<EOL>'<STR_LIT:f>',<EOL>'<STR_LIT:g>',<EOL>'<STR_LIT:h>',<EOL>'<STR_LIT:i>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT:k>',<EOL>'<STR_LIT:l>',<EOL>'<STR_LIT:m>',<EOL>'<STR_LIT:n>',<EOL>'<STR_LIT:o>',<EOL>'<STR_LIT:p>',<EOL>'<STR_LIT:q>',<EOL>'<STR_LIT:r>',<EOL>'<STR_LIT:s>',<EOL>'<STR_LIT:t>',<EOL>'<STR_LIT:u>',<EOL>'<STR_LIT:v>',<EOL>'<STR_LIT:w>',<EOL>'<STR_LIT:x>',<EOL>'<STR_LIT:y>',<EOL>'<STR_LIT:z>',<EOL>}<EOL>)<EOL>word = word.replace('<STR_LIT>', '<STR_LIT:i>').replace('<STR_LIT:v>', '<STR_LIT:u>')<EOL>if word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if word[:-<NUM_LIT:3>] in self._keep_que or word == '<STR_LIT>':<EOL><INDENT>return {'<STR_LIT:n>': word, '<STR_LIT:v>': word}<EOL><DEDENT>else:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>noun = word<EOL>verb = word<EOL>for endlen in range(<NUM_LIT:4>, <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>if word[-endlen:] in self._n_endings[endlen]:<EOL><INDENT>if len(word) - <NUM_LIT:2> >= endlen:<EOL><INDENT>noun = word[:-endlen]<EOL><DEDENT>else:<EOL><INDENT>noun = word<EOL><DEDENT>break<EOL><DEDENT><DEDENT>for endlen in range(<NUM_LIT:6>, <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>if word[-endlen:] in self._v_endings_strip[endlen]:<EOL><INDENT>if len(word) - <NUM_LIT:2> >= endlen:<EOL><INDENT>verb = word[:-endlen]<EOL><DEDENT>else:<EOL><INDENT>verb = word<EOL><DEDENT>break<EOL><DEDENT>if word[-endlen:] in self._v_endings_alter[endlen]:<EOL><INDENT>if word[-endlen:] in {<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>}:<EOL><INDENT>new_word = word[:-endlen] + '<STR_LIT:i>'<EOL>addlen = <NUM_LIT:1><EOL><DEDENT>elif word[-endlen:] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>new_word = word[:-endlen] + '<STR_LIT>'<EOL>addlen = <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>new_word = word[:-endlen] + '<STR_LIT>'<EOL>addlen = <NUM_LIT:3><EOL><DEDENT>if len(new_word) >= <NUM_LIT:2> + addlen:<EOL><INDENT>verb = new_word<EOL><DEDENT>else:<EOL><INDENT>verb = word<EOL><DEDENT>break<EOL><DEDENT><DEDENT>return {'<STR_LIT:n>': noun, '<STR_LIT:v>': verb}<EOL>", "docstring": "Return the stem of a word according to the Schinke stemmer.\n\n        Parameters\n        ----------\n        word : str\n            The word to stem\n\n        Returns\n        -------\n        str\n            Word stem\n\n        Examples\n        --------\n        >>> stmr = Schinke()\n        >>> stmr.stem('atque')\n        {'n': 'atque', 'v': 'atque'}\n        >>> stmr.stem('census')\n        {'n': 'cens', 'v': 'censu'}\n        >>> stmr.stem('virum')\n        {'n': 'uir', 'v': 'uiru'}\n        >>> stmr.stem('populusque')\n        {'n': 'popul', 'v': 'populu'}\n        >>> stmr.stem('senatus')\n        {'n': 'senat', 'v': 'senatu'}", "id": "f6563:c0:m0"}
{"signature": "def schinke(word):", "body": "return Schinke().stem(word)<EOL>", "docstring": "Return the stem of a word according to the Schinke stemmer.\n\n    This is a wrapper for :py:meth:`Schinke.stem`.\n\n    Parameters\n    ----------\n    word : str\n        The word to stem\n\n    Returns\n    -------\n    str\n        Word stem\n\n    Examples\n    --------\n    >>> schinke('atque')\n    {'n': 'atque', 'v': 'atque'}\n    >>> schinke('census')\n    {'n': 'cens', 'v': 'censu'}\n    >>> schinke('virum')\n    {'n': 'uir', 'v': 'uiru'}\n    >>> schinke('populusque')\n    {'n': 'popul', 'v': 'populu'}\n    >>> schinke('senatus')\n    {'n': 'senat', 'v': 'senatu'}", "id": "f6563:m0"}
{"signature": "def sb_norwegian(word):", "body": "return SnowballNorwegian().stem(word)<EOL>", "docstring": "Return Snowball Norwegian stem.\n\n    This is a wrapper for :py:meth:`SnowballNorwegian.stem`.\n\n    Parameters\n    ----------\n    word : str\n        The word to stem\n\n    Returns\n    -------\n    str\n        Word stem\n\n    Examples\n    --------\n    >>> sb_norwegian('lese')\n    'les'\n    >>> sb_norwegian('suspensjon')\n    'suspensjon'\n    >>> sb_norwegian('sikkerhet')\n    'sikker'", "id": "f6564:m0"}
{"signature": "def _has_vowel(self, term):", "body": "for letter in term:<EOL><INDENT>if letter in self._vowels:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Return Porter helper function _has_vowel value.\n\n        Parameters\n        ----------\n        term : str\n            The word to scan for vowels\n\n        Returns\n        -------\n        bool\n            True iff a vowel exists in the term (as defined in the Porter\n            stemmer definition)", "id": "f6566:c0:m1"}
{"signature": "def _ends_in_doubled_cons(self, term):", "body": "return (<EOL>len(term) > <NUM_LIT:1><EOL>and term[-<NUM_LIT:1>] not in self._vowels<EOL>and term[-<NUM_LIT:2>] == term[-<NUM_LIT:1>]<EOL>)<EOL>", "docstring": "Return Porter helper function _ends_in_doubled_cons value.\n\n        Parameters\n        ----------\n        term : str\n            The word to check for a final doubled consonant\n\n        Returns\n        -------\n        bool\n            True iff the stem ends in a doubled consonant (as defined in the\n            Porter stemmer definition)", "id": "f6566:c0:m2"}
{"signature": "def stem(self, word, early_english=False):", "body": "<EOL>word = normalize('<STR_LIT>', text_type(word.lower()))<EOL>if len(word) < <NUM_LIT:3>:<EOL><INDENT>return word<EOL><DEDENT>if word[<NUM_LIT:0>] == '<STR_LIT:y>':<EOL><INDENT>word = '<STR_LIT:Y>' + word[<NUM_LIT:1>:]<EOL><DEDENT>for i in range(<NUM_LIT:1>, len(word)):<EOL><INDENT>if word[i] == '<STR_LIT:y>' and word[i - <NUM_LIT:1>] in self._vowels:<EOL><INDENT>word = word[:i] + '<STR_LIT:Y>' + word[i + <NUM_LIT:1> :]<EOL><DEDENT><DEDENT>if word[-<NUM_LIT:1>] == '<STR_LIT:s>':<EOL><INDENT>if word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT>elif word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>step1b_flag = False<EOL>if word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:3>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if self._has_vowel(word[:-<NUM_LIT:2>]):<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL>step1b_flag = True<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if self._has_vowel(word[:-<NUM_LIT:3>]):<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL>step1b_flag = True<EOL><DEDENT><DEDENT>elif early_english:<EOL><INDENT>if word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if self._has_vowel(word[:-<NUM_LIT:3>]):<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL>step1b_flag = True<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if self._has_vowel(word[:-<NUM_LIT:3>]):<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL>step1b_flag = True<EOL><DEDENT><DEDENT><DEDENT>if step1b_flag:<EOL><INDENT>if word[-<NUM_LIT:2>:] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>word += '<STR_LIT:e>'<EOL><DEDENT>elif self._ends_in_doubled_cons(word) and word[-<NUM_LIT:1>] not in {<EOL>'<STR_LIT:l>',<EOL>'<STR_LIT:s>',<EOL>'<STR_LIT:z>',<EOL>}:<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT>elif self._m_degree(word) == <NUM_LIT:1> and self._ends_in_cvc(word):<EOL><INDENT>word += '<STR_LIT:e>'<EOL><DEDENT><DEDENT>if word[-<NUM_LIT:1>] in {'<STR_LIT:Y>', '<STR_LIT:y>'} and self._has_vowel(word[:-<NUM_LIT:1>]):<EOL><INDENT>word = word[:-<NUM_LIT:1>] + '<STR_LIT:i>'<EOL><DEDENT>if len(word) > <NUM_LIT:1>:<EOL><INDENT>if word[-<NUM_LIT:2>] == '<STR_LIT:a>':<EOL><INDENT>if word[-<NUM_LIT:7>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:7>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:5>] + '<STR_LIT:e>'<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:6>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:6>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:2>] == '<STR_LIT:c>':<EOL><INDENT>if word[-<NUM_LIT:4>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:1>] + '<STR_LIT:e>'<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:2>] == '<STR_LIT:e>':<EOL><INDENT>if word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:2>] == '<STR_LIT:g>':<EOL><INDENT>if word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:2>] == '<STR_LIT:l>':<EOL><INDENT>if word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:3>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:1>] + '<STR_LIT:e>'<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:5>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:3>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:5>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:2>] == '<STR_LIT:o>':<EOL><INDENT>if word[-<NUM_LIT:7>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:7>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:5>] + '<STR_LIT:e>'<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:5>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:3>] + '<STR_LIT:e>'<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:2>] + '<STR_LIT:e>'<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:2>] == '<STR_LIT:s>':<EOL><INDENT>if word[-<NUM_LIT:5>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:7>:] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:7>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL><DEDENT><DEDENT><DEDENT>elif word[-<NUM_LIT:2>] == '<STR_LIT:t>':<EOL><INDENT>if word[-<NUM_LIT:5>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:5>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:3>] + '<STR_LIT:e>'<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:6>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:6>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:5>] + '<STR_LIT>'<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if word[-<NUM_LIT:5>:] in '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:5>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:5>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:5>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:3>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:0>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL><DEDENT><DEDENT>if word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:2>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:2>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:3>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:5>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:5>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:5>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:4>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:4>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:3>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:4>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:3>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:2>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>elif word[-<NUM_LIT:3>:] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:3>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:3>]<EOL><DEDENT><DEDENT>if word[-<NUM_LIT:1>] == '<STR_LIT:e>':<EOL><INDENT>if self._m_degree(word[:-<NUM_LIT:1>]) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT>elif self._m_degree(word[:-<NUM_LIT:1>]) == <NUM_LIT:1> and not self._ends_in_cvc(<EOL>word[:-<NUM_LIT:1>]<EOL>):<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>if word[-<NUM_LIT:2>:] == '<STR_LIT>' and self._m_degree(word) > <NUM_LIT:1>:<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT>for i in range(len(word)):<EOL><INDENT>if word[i] == '<STR_LIT:Y>':<EOL><INDENT>word = word[:i] + '<STR_LIT:y>' + word[i + <NUM_LIT:1> :]<EOL><DEDENT><DEDENT>return word<EOL>", "docstring": "Return Porter stem.\n\n        Parameters\n        ----------\n        word : str\n            The word to stem\n        early_english : bool\n            Set to True in order to remove -eth & -est (2nd & 3rd person\n            singular verbal agreement suffixes)\n\n        Returns\n        -------\n        str\n            Word stem\n\n        Examples\n        --------\n        >>> stmr = Porter()\n        >>> stmr.stem('reading')\n        'read'\n        >>> stmr.stem('suspension')\n        'suspens'\n        >>> stmr.stem('elusiveness')\n        'elus'\n\n        >>> stmr.stem('eateth', early_english=True)\n        'eat'", "id": "f6566:c0:m4"}
{"signature": "def count(self):", "body": "return sum(self.values())<EOL>", "docstring": "Return q-grams count.\n\n        Returns\n        -------\n        int\n            The total count of q-grams in a QGrams object\n\n        Examples\n        --------\n        >>> qg = QGrams('AATTATAT')\n        >>> qg.count()\n        9\n\n        >>> qg = QGrams('AATTATAT', qval=1, start_stop='')\n        >>> qg.count()\n        8\n\n        >>> qg = QGrams('AATTATAT', qval=3, start_stop='')\n        >>> qg.count()\n        6", "id": "f6569:c0:m1"}
{"signature": "def dm_soundex(word, max_length=<NUM_LIT:6>, zero_pad=True):", "body": "return DaitchMokotoff().encode(word, max_length, zero_pad)<EOL>", "docstring": "Return the Daitch-Mokotoff Soundex code for a word.\n\n    This is a wrapper for :py:meth:`DaitchMokotoff.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    max_length : int\n        The length of the code returned (defaults to 6; must be between 6 and\n        64)\n    zero_pad : bool\n        Pad the end of the return value with 0s to achieve a max_length string\n\n    Returns\n    -------\n    str\n        The Daitch-Mokotoff Soundex value\n\n    Examples\n    --------\n    >>> sorted(dm_soundex('Christopher'))\n    ['494379', '594379']\n    >>> dm_soundex('Niall')\n    {'680000'}\n    >>> dm_soundex('Smith')\n    {'463000'}\n    >>> dm_soundex('Schmidt')\n    {'463000'}\n\n    >>> sorted(dm_soundex('The quick brown fox', max_length=20,\n    ... zero_pad=False))\n    ['35457976754', '3557976754']", "id": "f6574:m0"}
{"signature": "def encode(self, word, max_length=-<NUM_LIT:1>, keep_vowels=False, vowel_char='<STR_LIT:*>'):", "body": "<EOL>word = unicode_normalize('<STR_LIT>', text_type(word.upper()))<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = '<STR_LIT>'.join(c for c in word if c in self._uc_set)<EOL>if word[:<NUM_LIT:3>] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>word = '<STR_LIT>' + word[<NUM_LIT:3>:]<EOL><DEDENT>elif word[:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>word = '<STR_LIT>' + word[<NUM_LIT:2>:]<EOL><DEDENT>pos = len(word) - <NUM_LIT:2><EOL>while pos > -<NUM_LIT:1>:<EOL><INDENT>if word[pos : pos + <NUM_LIT:2>] in {<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>}:<EOL><INDENT>word = word[: pos + <NUM_LIT:1>] + word[pos + <NUM_LIT:2> :]<EOL>pos += <NUM_LIT:1><EOL><DEDENT>pos -= <NUM_LIT:1><EOL><DEDENT>word = word.replace('<STR_LIT:X>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>pos = word.find('<STR_LIT>', <NUM_LIT:1>)<EOL>while pos != -<NUM_LIT:1>:<EOL><INDENT>if word[pos - <NUM_LIT:1> : pos] not in self._uc_vy_set:<EOL><INDENT>word = word[:pos] + '<STR_LIT:S>' + word[pos + <NUM_LIT:1> :]<EOL><DEDENT>pos = word.find('<STR_LIT>', pos + <NUM_LIT:1>)<EOL><DEDENT>word = word.replace('<STR_LIT:C>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT:S>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT:R>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT:T>', '<STR_LIT:D>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT:F>')<EOL>pos = word.find('<STR_LIT>', <NUM_LIT:0>)<EOL>while pos != -<NUM_LIT:1>:<EOL><INDENT>if pos > <NUM_LIT:1> and word[pos - <NUM_LIT:1> : pos] not in self._uc_vy_set | {<EOL>'<STR_LIT:L>',<EOL>'<STR_LIT:N>',<EOL>'<STR_LIT:R>',<EOL>}:<EOL><INDENT>word = word[: pos - <NUM_LIT:1>] + word[pos:]<EOL>pos -= <NUM_LIT:1><EOL><DEDENT>pos = word.find('<STR_LIT>', pos + <NUM_LIT:1>)<EOL><DEDENT>if max_length > <NUM_LIT:0> and word[-<NUM_LIT:1>:] == '<STR_LIT:E>':<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT>word = self._delete_consecutive_repeats(word)<EOL>if word[:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>word = word[<NUM_LIT:1>:]<EOL><DEDENT>if word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>word = word[:-<NUM_LIT:1>]<EOL><DEDENT>elif word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>if word[-<NUM_LIT:3>:-<NUM_LIT:2>] in self._uc_vy_set:<EOL><INDENT>word = word[:-<NUM_LIT:2>] + '<STR_LIT:F>'<EOL><DEDENT>else:<EOL><INDENT>word = word[:-<NUM_LIT:2>] + '<STR_LIT>'<EOL><DEDENT><DEDENT>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>if max_length > <NUM_LIT:0>:<EOL><INDENT>word = word.replace('<STR_LIT>', '<STR_LIT:F>')<EOL><DEDENT>first = <NUM_LIT:1> + (<NUM_LIT:1> if max_length > <NUM_LIT:0> else <NUM_LIT:0>)<EOL>code = '<STR_LIT>'<EOL>for pos, char in enumerate(word):<EOL><INDENT>if char in self._uc_vy_set:<EOL><INDENT>if first or keep_vowels:<EOL><INDENT>code += vowel_char<EOL>first -= <NUM_LIT:1><EOL><DEDENT><DEDENT>elif pos > <NUM_LIT:0> and char in {'<STR_LIT>', '<STR_LIT:H>'}:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>code += char<EOL><DEDENT><DEDENT>if max_length > <NUM_LIT:0>:<EOL><INDENT>if len(code) > max_length and code[-<NUM_LIT:1>:] == '<STR_LIT:S>':<EOL><INDENT>code = code[:-<NUM_LIT:1>]<EOL><DEDENT>if keep_vowels:<EOL><INDENT>code = code[:max_length]<EOL><DEDENT>else:<EOL><INDENT>code = code[: max_length + <NUM_LIT:2>]<EOL>while len(code) > max_length:<EOL><INDENT>vowels = len(code) - max_length<EOL>excess = vowels - <NUM_LIT:1><EOL>word = code<EOL>code = '<STR_LIT>'<EOL>for char in word:<EOL><INDENT>if char == vowel_char:<EOL><INDENT>if vowels:<EOL><INDENT>code += char<EOL>vowels -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>code += char<EOL><DEDENT><DEDENT>code = code[: max_length + excess]<EOL><DEDENT><DEDENT>code += '<STR_LIT:U+0020>' * (max_length - len(code))<EOL><DEDENT>return code<EOL>", "docstring": "r\"\"\"Return the Dolby Code of a name.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n        max_length : int\n            Maximum length of the returned Dolby code -- this also activates\n            the fixed-length code mode if it is greater than 0\n        keep_vowels : bool\n            If True, retains all vowel markers\n        vowel_char : str\n            The vowel marker character (default to \\*)\n\n        Returns\n        -------\n        str\n            The Dolby Code\n\n        Examples\n        --------\n        >>> pe = Dolby()\n        >>> pe.encode('Hansen')\n        'H*NSN'\n        >>> pe.encode('Larsen')\n        'L*RSN'\n        >>> pe.encode('Aagaard')\n        '*GR'\n        >>> pe.encode('Braaten')\n        'BR*DN'\n        >>> pe.encode('Sandvik')\n        'S*NVK'\n        >>> pe.encode('Hansen', max_length=6)\n        'H*NS*N'\n        >>> pe.encode('Larsen', max_length=6)\n        'L*RS*N'\n        >>> pe.encode('Aagaard', max_length=6)\n        '*G*R  '\n        >>> pe.encode('Braaten', max_length=6)\n        'BR*D*N'\n        >>> pe.encode('Sandvik', max_length=6)\n        'S*NF*K'\n\n        >>> pe.encode('Smith')\n        'SM*D'\n        >>> pe.encode('Waters')\n        'W*DRS'\n        >>> pe.encode('James')\n        'J*MS'\n        >>> pe.encode('Schmidt')\n        'SM*D'\n        >>> pe.encode('Ashcroft')\n        '*SKRFD'\n        >>> pe.encode('Smith', max_length=6)\n        'SM*D  '\n        >>> pe.encode('Waters', max_length=6)\n        'W*D*RS'\n        >>> pe.encode('James', max_length=6)\n        'J*M*S '\n        >>> pe.encode('Schmidt', max_length=6)\n        'SM*D  '\n        >>> pe.encode('Ashcroft', max_length=6)\n        '*SKRFD'", "id": "f6577:c0:m0"}
{"signature": "def dolby(word, max_length=-<NUM_LIT:1>, keep_vowels=False, vowel_char='<STR_LIT:*>'):", "body": "return Dolby().encode(word, max_length, keep_vowels, vowel_char)<EOL>", "docstring": "r\"\"\"Return the Dolby Code of a name.\n\n    This is a wrapper for :py:meth:`Dolby.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    max_length : int\n        Maximum length of the returned Dolby code -- this also activates the\n        fixed-length code mode if it is greater than 0\n    keep_vowels : bool\n        If True, retains all vowel markers\n    vowel_char : str\n        The vowel marker character (default to \\*)\n\n    Returns\n    -------\n    str\n        The Dolby Code\n\n    Examples\n    --------\n    >>> dolby('Hansen')\n    'H*NSN'\n    >>> dolby('Larsen')\n    'L*RSN'\n    >>> dolby('Aagaard')\n    '*GR'\n    >>> dolby('Braaten')\n    'BR*DN'\n    >>> dolby('Sandvik')\n    'S*NVK'\n    >>> dolby('Hansen', max_length=6)\n    'H*NS*N'\n    >>> dolby('Larsen', max_length=6)\n    'L*RS*N'\n    >>> dolby('Aagaard', max_length=6)\n    '*G*R  '\n    >>> dolby('Braaten', max_length=6)\n    'BR*D*N'\n    >>> dolby('Sandvik', max_length=6)\n    'S*NF*K'\n\n    >>> dolby('Smith')\n    'SM*D'\n    >>> dolby('Waters')\n    'W*DRS'\n    >>> dolby('James')\n    'J*MS'\n    >>> dolby('Schmidt')\n    'SM*D'\n    >>> dolby('Ashcroft')\n    '*SKRFD'\n    >>> dolby('Smith', max_length=6)\n    'SM*D  '\n    >>> dolby('Waters', max_length=6)\n    'W*D*RS'\n    >>> dolby('James', max_length=6)\n    'J*M*S '\n    >>> dolby('Schmidt', max_length=6)\n    'SM*D  '\n    >>> dolby('Ashcroft', max_length=6)\n    '*SKRFD'", "id": "f6577:m0"}
{"signature": "def encode(self, word):", "body": "word = unicode_normalize('<STR_LIT>', text_type(word.upper()))<EOL>for i, j in self._substitutions:<EOL><INDENT>word = word.replace(i, j)<EOL><DEDENT>word = word.translate(self._trans)<EOL>return '<STR_LIT>'.join(<EOL>c<EOL>for c in self._delete_consecutive_repeats(word)<EOL>if c in self._uc_set<EOL>)<EOL>", "docstring": "Return the Phonem code for a word.\n\n        Parameters\n        ----------\n        word : str\n        The word to transform\n\n        Returns\n        -------\n        str\n            The Phonem value\n\n        Examples\n        --------\n        >>> pe = Phonem()\n        >>> pe.encode('Christopher')\n        'CRYSDOVR'\n        >>> pe.encode('Niall')\n        'NYAL'\n        >>> pe.encode('Smith')\n        'SMYD'\n        >>> pe.encode('Schmidt')\n        'CMYD'", "id": "f6578:c0:m0"}
{"signature": "def statistics_canada(word, max_length=<NUM_LIT:4>):", "body": "return StatisticsCanada().encode(word, max_length)<EOL>", "docstring": "Return the Statistics Canada code for a word.\n\n    This is a wrapper for :py:meth:`StatisticsCanada.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    max_length : int\n        The maximum length (default 4) of the code to return\n\n    Returns\n    -------\n    str\n        The Statistics Canada name code value\n\n    Examples\n    --------\n    >>> statistics_canada('Christopher')\n    'CHRS'\n    >>> statistics_canada('Niall')\n    'NL'\n    >>> statistics_canada('Smith')\n    'SMTH'\n    >>> statistics_canada('Schmidt')\n    'SCHM'", "id": "f6579:m0"}
{"signature": "def caverphone(word, version=<NUM_LIT:2>):", "body": "return Caverphone().encode(word, version)<EOL>", "docstring": "Return the Caverphone code for a word.\n\n    This is a wrapper for :py:meth:`Caverphone.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    version : int\n        The version of Caverphone to employ for encoding (defaults to 2)\n\n    Returns\n    -------\n    str\n        The Caverphone value\n\n    Examples\n    --------\n    >>> caverphone('Christopher')\n    'KRSTFA1111'\n    >>> caverphone('Niall')\n    'NA11111111'\n    >>> caverphone('Smith')\n    'SMT1111111'\n    >>> caverphone('Schmidt')\n    'SKMT111111'\n\n    >>> caverphone('Christopher', 1)\n    'KRSTF1'\n    >>> caverphone('Niall', 1)\n    'N11111'\n    >>> caverphone('Smith', 1)\n    'SMT111'\n    >>> caverphone('Schmidt', 1)\n    'SKMT11'", "id": "f6581:m0"}
{"signature": "def pshp_soundex_last(lname, max_length=<NUM_LIT:4>, german=False):", "body": "return PSHPSoundexLast().encode(lname, max_length, german)<EOL>", "docstring": "Calculate the PSHP Soundex/Viewex Coding of a last name.\n\n    This is a wrapper for :py:meth:`PSHPSoundexLast.encode`.\n\n    Parameters\n    ----------\n    lname : str\n        The last name to encode\n    max_length : int\n        The length of the code returned (defaults to 4)\n    german : bool\n        Set to True if the name is German (different rules apply)\n\n    Returns\n    -------\n    str\n        The PSHP Soundex/Viewex Coding\n\n    Examples\n    --------\n    >>> pshp_soundex_last('Smith')\n    'S530'\n    >>> pshp_soundex_last('Waters')\n    'W350'\n    >>> pshp_soundex_last('James')\n    'J500'\n    >>> pshp_soundex_last('Schmidt')\n    'S530'\n    >>> pshp_soundex_last('Ashcroft')\n    'A225'", "id": "f6582:m0"}
{"signature": "def encode(self, lname, max_length=<NUM_LIT:4>, german=False):", "body": "lname = unicode_normalize('<STR_LIT>', text_type(lname.upper()))<EOL>lname = lname.replace('<STR_LIT>', '<STR_LIT>')<EOL>lname = '<STR_LIT>'.join(c for c in lname if c in self._uc_set)<EOL>if lname[:<NUM_LIT:3>] == '<STR_LIT>' or lname[:<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>lname = lname[<NUM_LIT:3>:].strip()<EOL><DEDENT>if not german:<EOL><INDENT>if lname[:<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>lname = '<STR_LIT:M>' + lname[<NUM_LIT:3>:]<EOL><DEDENT>elif lname[:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>lname = '<STR_LIT:M>' + lname[<NUM_LIT:2>:]<EOL><DEDENT><DEDENT>if lname[:<NUM_LIT:1>] in {'<STR_LIT:E>', '<STR_LIT:I>', '<STR_LIT:O>', '<STR_LIT>'}:<EOL><INDENT>lname = '<STR_LIT:A>' + lname[<NUM_LIT:1>:]<EOL><DEDENT>elif lname[:<NUM_LIT:2>] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>lname = '<STR_LIT>' + lname[<NUM_LIT:1>:]<EOL><DEDENT>elif lname[:<NUM_LIT:2>] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>lname = '<STR_LIT:S>' + lname[<NUM_LIT:1>:]<EOL><DEDENT>elif lname[:<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>lname = '<STR_LIT>' + lname[<NUM_LIT:1>:]<EOL><DEDENT>elif lname[:<NUM_LIT:1>] == '<STR_LIT:C>' and lname[:<NUM_LIT:2>] != '<STR_LIT>':<EOL><INDENT>lname = '<STR_LIT>' + lname[<NUM_LIT:1>:]<EOL><DEDENT>if lname[:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>lname = '<STR_LIT:N>' + lname[<NUM_LIT:1>:]<EOL><DEDENT>elif lname[:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>lname = '<STR_LIT:F>' + lname[<NUM_LIT:1>:]<EOL><DEDENT>elif lname[:<NUM_LIT:3>] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>lname = '<STR_LIT>' + lname[<NUM_LIT:1>:]<EOL><DEDENT>if german and lname[:<NUM_LIT:1>] in {'<STR_LIT>', '<STR_LIT:M>', '<STR_LIT:Y>', '<STR_LIT>'}:<EOL><INDENT>lname = {'<STR_LIT>': '<STR_LIT>', '<STR_LIT:M>': '<STR_LIT:N>', '<STR_LIT:Y>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT:S>'}[lname[<NUM_LIT:0>]] + lname[<EOL><NUM_LIT:1>:<EOL>]<EOL><DEDENT>code = lname[:<NUM_LIT:1>]<EOL>if german:  <EOL><INDENT>if lname[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>lname = lname[:-<NUM_LIT:3>]<EOL><DEDENT>elif lname[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>lname = lname[:-<NUM_LIT:2>]<EOL><DEDENT>if lname[-<NUM_LIT:3>:] == '<STR_LIT>':<EOL><INDENT>lname = lname[:-<NUM_LIT:3>]<EOL><DEDENT>elif lname[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>lname = lname[:-<NUM_LIT:2>]<EOL><DEDENT>if lname[-<NUM_LIT:1>:] == '<STR_LIT>':<EOL><INDENT>lname = lname[:-<NUM_LIT:1>]<EOL><DEDENT>elif lname[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>lname = lname[:-<NUM_LIT:2>]<EOL><DEDENT><DEDENT>if lname[-<NUM_LIT:1>:] == '<STR_LIT:R>':<EOL><INDENT>lname = lname[:-<NUM_LIT:1>] + '<STR_LIT:N>'<EOL><DEDENT>elif lname[-<NUM_LIT:2>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>lname = lname[:-<NUM_LIT:2>]<EOL><DEDENT>if lname[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>lname = lname[:-<NUM_LIT:2>]<EOL><DEDENT>elif lname[-<NUM_LIT:1>:] == '<STR_LIT:S>':<EOL><INDENT>lname = lname[:-<NUM_LIT:1>]<EOL><DEDENT>if not german:<EOL><INDENT>l5_repl = {'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'}<EOL>l4_repl = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}<EOL>if lname[-<NUM_LIT:5>:] in l5_repl:<EOL><INDENT>lname = lname[:-<NUM_LIT:5>] + l5_repl[lname[-<NUM_LIT:5>:]]<EOL><DEDENT>elif lname[-<NUM_LIT:4>:] in l4_repl:<EOL><INDENT>lname = lname[:-<NUM_LIT:4>] + l4_repl[lname[-<NUM_LIT:4>:]]<EOL><DEDENT><DEDENT>if lname[-<NUM_LIT:2>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>lname = lname[:-<NUM_LIT:1>]<EOL><DEDENT>if not german and lname[-<NUM_LIT:3>:] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>lname = lname[:-<NUM_LIT:3>] + '<STR_LIT:A>' + lname[-<NUM_LIT:2>:]<EOL><DEDENT>lname = lname.replace('<STR_LIT>', '<STR_LIT:C>')<EOL>lname = lname.replace('<STR_LIT>', '<STR_LIT:S>')<EOL>lname = lname.replace('<STR_LIT>', '<STR_LIT:T>')<EOL>lname = lname.replace('<STR_LIT>', '<STR_LIT:N>')<EOL>lname = lname.replace('<STR_LIT>', '<STR_LIT:N>')<EOL>lname = lname.replace('<STR_LIT>', '<STR_LIT:M>')<EOL>lname = lname.replace('<STR_LIT>', '<STR_LIT:M>')<EOL>lname = lname.replace('<STR_LIT>', '<STR_LIT>')<EOL>lname = lname.replace('<STR_LIT>', '<STR_LIT>')<EOL>lname = lname.translate(self._trans)<EOL>lname = self._delete_consecutive_repeats(lname)<EOL>code += lname[<NUM_LIT:1>:]<EOL>code = code.replace('<STR_LIT:0>', '<STR_LIT>')  <EOL>if max_length != -<NUM_LIT:1>:<EOL><INDENT>if len(code) < max_length:<EOL><INDENT>code += '<STR_LIT:0>' * (max_length - len(code))<EOL><DEDENT>else:<EOL><INDENT>code = code[:max_length]<EOL><DEDENT><DEDENT>return code<EOL>", "docstring": "Calculate the PSHP Soundex/Viewex Coding of a last name.\n\n        Parameters\n        ----------\n        lname : str\n            The last name to encode\n        max_length : int\n            The length of the code returned (defaults to 4)\n        german : bool\n            Set to True if the name is German (different rules apply)\n\n        Returns\n        -------\n        str\n            The PSHP Soundex/Viewex Coding\n\n        Examples\n        --------\n        >>> pe = PSHPSoundexLast()\n        >>> pe.encode('Smith')\n        'S530'\n        >>> pe.encode('Waters')\n        'W350'\n        >>> pe.encode('James')\n        'J500'\n        >>> pe.encode('Schmidt')\n        'S530'\n        >>> pe.encode('Ashcroft')\n        'A225'", "id": "f6582:c0:m0"}
{"signature": "def phonet(word, mode=<NUM_LIT:1>, lang='<STR_LIT>'):", "body": "return Phonet().encode(word, mode, lang)<EOL>", "docstring": "Return the phonet code for a word.\n\n    This is a wrapper for :py:meth:`Phonet.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    mode : int\n        The ponet variant to employ (1 or 2)\n    lang : str\n        ``de`` (default) for German, ``none`` for no language\n\n    Returns\n    -------\n    str\n        The phonet value\n\n    Examples\n    --------\n    >>> phonet('Christopher')\n    'KRISTOFA'\n    >>> phonet('Niall')\n    'NIAL'\n    >>> phonet('Smith')\n    'SMIT'\n    >>> phonet('Schmidt')\n    'SHMIT'\n\n    >>> phonet('Christopher', mode=2)\n    'KRIZTUFA'\n    >>> phonet('Niall', mode=2)\n    'NIAL'\n    >>> phonet('Smith', mode=2)\n    'ZNIT'\n    >>> phonet('Schmidt', mode=2)\n    'ZNIT'\n\n    >>> phonet('Christopher', lang='none')\n    'CHRISTOPHER'\n    >>> phonet('Niall', lang='none')\n    'NIAL'\n    >>> phonet('Smith', lang='none')\n    'SMITH'\n    >>> phonet('Schmidt', lang='none')\n    'SCHMIDT'", "id": "f6583:m0"}
{"signature": "def encode(self, word, mode=<NUM_LIT:1>, lang='<STR_LIT>'):", "body": "phonet_hash = Counter()<EOL>alpha_pos = Counter()<EOL>phonet_hash_1 = Counter()<EOL>phonet_hash_2 = Counter()<EOL>def _initialize_phonet(lang):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if lang == '<STR_LIT:none>':<EOL><INDENT>_phonet_rules = self._rules_no_lang<EOL><DEDENT>else:<EOL><INDENT>_phonet_rules = self._rules_german<EOL><DEDENT>phonet_hash['<STR_LIT>'] = -<NUM_LIT:1><EOL>for j in {<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>}:<EOL><INDENT>alpha_pos[j] = <NUM_LIT:1><EOL>phonet_hash[j] = -<NUM_LIT:1><EOL><DEDENT>for i, j in enumerate('<STR_LIT>'):<EOL><INDENT>alpha_pos[j] = i + <NUM_LIT:2><EOL>phonet_hash[j] = -<NUM_LIT:1><EOL><DEDENT>for i in range(<NUM_LIT>):<EOL><INDENT>for j in range(<NUM_LIT>):<EOL><INDENT>phonet_hash_1[i, j] = -<NUM_LIT:1><EOL>phonet_hash_2[i, j] = -<NUM_LIT:1><EOL><DEDENT><DEDENT>for i in range(len(_phonet_rules)):<EOL><INDENT>rule = _phonet_rules[i]<EOL>if rule and i % <NUM_LIT:3> == <NUM_LIT:0>:<EOL><INDENT>k = _phonet_rules[i][<NUM_LIT:0>]<EOL>if phonet_hash[k] < <NUM_LIT:0> and (<EOL>_phonet_rules[i + <NUM_LIT:1>] or _phonet_rules[i + <NUM_LIT:2>]<EOL>):<EOL><INDENT>phonet_hash[k] = i<EOL><DEDENT>if k and alpha_pos[k] >= <NUM_LIT:2>:<EOL><INDENT>k = alpha_pos[k]<EOL>j = k - <NUM_LIT:2><EOL>rule = rule[<NUM_LIT:1>:]<EOL>if not rule:<EOL><INDENT>rule = '<STR_LIT:U+0020>'<EOL><DEDENT>elif rule[<NUM_LIT:0>] == '<STR_LIT:(>':<EOL><INDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>else:<EOL><INDENT>rule = rule[<NUM_LIT:0>]<EOL><DEDENT>while rule and (rule[<NUM_LIT:0>] != '<STR_LIT:)>'):<EOL><INDENT>k = alpha_pos[rule[<NUM_LIT:0>]]<EOL>if k > <NUM_LIT:0>:<EOL><INDENT>if phonet_hash_1[j, k] < <NUM_LIT:0>:<EOL><INDENT>phonet_hash_1[j, k] = i<EOL>phonet_hash_2[j, k] = i<EOL><DEDENT>if phonet_hash_2[j, k] >= (i - <NUM_LIT:30>):<EOL><INDENT>phonet_hash_2[j, k] = i<EOL><DEDENT>else:<EOL><INDENT>k = -<NUM_LIT:1><EOL><DEDENT><DEDENT>if k <= <NUM_LIT:0>:<EOL><INDENT>if phonet_hash_1[j, <NUM_LIT:0>] < <NUM_LIT:0>:<EOL><INDENT>phonet_hash_1[j, <NUM_LIT:0>] = i<EOL><DEDENT>phonet_hash_2[j, <NUM_LIT:0>] = i<EOL><DEDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>def _phonet(term, mode, lang):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if lang == '<STR_LIT:none>':<EOL><INDENT>_phonet_rules = self._rules_no_lang<EOL><DEDENT>else:<EOL><INDENT>_phonet_rules = self._rules_german<EOL><DEDENT>char0 = '<STR_LIT>'<EOL>dest = term<EOL>if not term:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>term_length = len(term)<EOL>src = term.translate(self._upper_trans)<EOL>i = <NUM_LIT:0><EOL>j = <NUM_LIT:0><EOL>zeta = <NUM_LIT:0><EOL>while i < len(src):<EOL><INDENT>char = src[i]<EOL>pos = alpha_pos[char]<EOL>if pos >= <NUM_LIT:2>:<EOL><INDENT>xpos = pos - <NUM_LIT:2><EOL>if i + <NUM_LIT:1> == len(src):<EOL><INDENT>pos = alpha_pos['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>pos = alpha_pos[src[i + <NUM_LIT:1>]]<EOL><DEDENT>start1 = phonet_hash_1[xpos, pos]<EOL>start2 = phonet_hash_1[xpos, <NUM_LIT:0>]<EOL>end1 = phonet_hash_2[xpos, pos]<EOL>end2 = phonet_hash_2[xpos, <NUM_LIT:0>]<EOL>if (start2 >= <NUM_LIT:0>) and ((start1 < <NUM_LIT:0>) or (start2 < start1)):<EOL><INDENT>pos = start1<EOL>start1 = start2<EOL>start2 = pos<EOL>pos = end1<EOL>end1 = end2<EOL>end2 = pos<EOL><DEDENT>if (end1 >= start2) and (start2 >= <NUM_LIT:0>):<EOL><INDENT>if end2 > end1:<EOL><INDENT>end1 = end2<EOL><DEDENT>start2 = -<NUM_LIT:1><EOL>end2 = -<NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>pos = phonet_hash[char]<EOL>start1 = pos<EOL>end1 = <NUM_LIT><EOL>start2 = -<NUM_LIT:1><EOL>end2 = -<NUM_LIT:1><EOL><DEDENT>pos = start1<EOL>zeta0 = <NUM_LIT:0><EOL>if pos >= <NUM_LIT:0>:<EOL><INDENT>while (_phonet_rules[pos] is None) or (<EOL>_phonet_rules[pos][<NUM_LIT:0>] == char<EOL>):<EOL><INDENT>if pos > end1:<EOL><INDENT>if start2 > <NUM_LIT:0>:<EOL><INDENT>pos = start2<EOL>start1 = start2<EOL>start2 = -<NUM_LIT:1><EOL>end1 = end2<EOL>end2 = -<NUM_LIT:1><EOL>continue<EOL><DEDENT>break<EOL><DEDENT>if (_phonet_rules[pos] is None) or (<EOL>_phonet_rules[pos + mode] is None<EOL>):<EOL><INDENT>pos += <NUM_LIT:3><EOL>continue<EOL><DEDENT>matches = <NUM_LIT:1>  <EOL>priority = <NUM_LIT:5>  <EOL>rule = _phonet_rules[pos]<EOL>rule = rule[<NUM_LIT:1>:]<EOL>while (<EOL>rule<EOL>and (len(src) > (i + matches))<EOL>and (src[i + matches] == rule[<NUM_LIT:0>])<EOL>and not rule[<NUM_LIT:0>].isdigit()<EOL>and (rule not in '<STR_LIT>')<EOL>):<EOL><INDENT>matches += <NUM_LIT:1><EOL>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if rule and (rule[<NUM_LIT:0>] == '<STR_LIT:(>'):<EOL><INDENT>if (<EOL>(len(src) > (i + matches))<EOL>and src[i + matches].isalpha()<EOL>and (src[i + matches] in rule[<NUM_LIT:1>:])<EOL>):<EOL><INDENT>matches += <NUM_LIT:1><EOL>while rule and rule[<NUM_LIT:0>] != '<STR_LIT:)>':<EOL><INDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT><DEDENT>if rule:<EOL><INDENT>priority0 = ord(rule[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>priority0 = <NUM_LIT:0><EOL><DEDENT>matches0 = matches<EOL>while rule and rule[<NUM_LIT:0>] == '<STR_LIT:->' and matches > <NUM_LIT:1>:<EOL><INDENT>matches -= <NUM_LIT:1><EOL>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if rule and rule[<NUM_LIT:0>] == '<STR_LIT:<>':<EOL><INDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if rule and rule[<NUM_LIT:0>].isdigit():<EOL><INDENT>priority = int(rule[<NUM_LIT:0>])<EOL>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if rule and rule[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if (<EOL>not rule<EOL>or (<EOL>(rule[<NUM_LIT:0>] == '<STR_LIT>')<EOL>and ((i == <NUM_LIT:0>) or not src[i - <NUM_LIT:1>].isalpha())<EOL>and (<EOL>(rule[<NUM_LIT:1>:<NUM_LIT:2>] != '<STR_LIT:$>')<EOL>or (<EOL>not (<EOL>src[<EOL>i + matches0 : i + matches0 + <NUM_LIT:1><EOL>].isalpha()<EOL>)<EOL>and (<EOL>src[<EOL>i + matches0 : i + matches0 + <NUM_LIT:1><EOL>]<EOL>!= '<STR_LIT:.>'<EOL>)<EOL>)<EOL>)<EOL>)<EOL>or (<EOL>(rule[<NUM_LIT:0>] == '<STR_LIT:$>')<EOL>and (i > <NUM_LIT:0>)<EOL>and src[i - <NUM_LIT:1>].isalpha()<EOL>and (<EOL>(<EOL>not src[<EOL>i + matches0 : i + matches0 + <NUM_LIT:1><EOL>].isalpha()<EOL>)<EOL>and (<EOL>src[i + matches0 : i + matches0 + <NUM_LIT:1>]<EOL>!= '<STR_LIT:.>'<EOL>)<EOL>)<EOL>)<EOL>):<EOL><INDENT>pos0 = -<NUM_LIT:1><EOL>start3 = <NUM_LIT:0><EOL>start4 = <NUM_LIT:0><EOL>end3 = <NUM_LIT:0><EOL>end4 = <NUM_LIT:0><EOL>if (<EOL>(matches > <NUM_LIT:1>)<EOL>and src[i + matches : i + matches + <NUM_LIT:1>]<EOL>and (priority0 != ord('<STR_LIT:->'))<EOL>):<EOL><INDENT>char0 = src[i + matches - <NUM_LIT:1>]<EOL>pos0 = alpha_pos[char0]<EOL>if pos0 >= <NUM_LIT:2> and src[i + matches]:<EOL><INDENT>xpos = pos0 - <NUM_LIT:2><EOL>pos0 = alpha_pos[src[i + matches]]<EOL>start3 = phonet_hash_1[xpos, pos0]<EOL>start4 = phonet_hash_1[xpos, <NUM_LIT:0>]<EOL>end3 = phonet_hash_2[xpos, pos0]<EOL>end4 = phonet_hash_2[xpos, <NUM_LIT:0>]<EOL>if (start4 >= <NUM_LIT:0>) and (<EOL>(start3 < <NUM_LIT:0>) or (start4 < start3)<EOL>):<EOL><INDENT>pos0 = start3<EOL>start3 = start4<EOL>start4 = pos0<EOL>pos0 = end3<EOL>end3 = end4<EOL>end4 = pos0<EOL><DEDENT>if (end3 >= start4) and (start4 >= <NUM_LIT:0>):<EOL><INDENT>if end4 > end3:<EOL><INDENT>end3 = end4<EOL><DEDENT>start4 = -<NUM_LIT:1><EOL>end4 = -<NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>pos0 = phonet_hash[char0]<EOL>start3 = pos0<EOL>end3 = <NUM_LIT><EOL>start4 = -<NUM_LIT:1><EOL>end4 = -<NUM_LIT:1><EOL><DEDENT>pos0 = start3<EOL><DEDENT>if pos0 >= <NUM_LIT:0>:<EOL><INDENT>while (_phonet_rules[pos0] is None) or (<EOL>_phonet_rules[pos0][<NUM_LIT:0>] == char0<EOL>):<EOL><INDENT>if pos0 > end3:<EOL><INDENT>if start4 > <NUM_LIT:0>:<EOL><INDENT>pos0 = start4<EOL>start3 = start4<EOL>start4 = -<NUM_LIT:1><EOL>end3 = end4<EOL>end4 = -<NUM_LIT:1><EOL>continue<EOL><DEDENT>priority0 = -<NUM_LIT:1><EOL>break<EOL><DEDENT>if (_phonet_rules[pos0] is None) or (<EOL>_phonet_rules[pos0 + mode] is None<EOL>):<EOL><INDENT>pos0 += <NUM_LIT:3><EOL>continue<EOL><DEDENT>matches0 = matches<EOL>priority0 = <NUM_LIT:5><EOL>rule = _phonet_rules[pos0]<EOL>rule = rule[<NUM_LIT:1>:]<EOL>while (<EOL>rule<EOL>and (<EOL>src[<EOL>i + matches0 : i + matches0 + <NUM_LIT:1><EOL>]<EOL>== rule[<NUM_LIT:0>]<EOL>)<EOL>and (<EOL>not rule[<NUM_LIT:0>].isdigit()<EOL>or (rule in '<STR_LIT>')<EOL>)<EOL>):<EOL><INDENT>matches0 += <NUM_LIT:1><EOL>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if rule and rule[<NUM_LIT:0>] == '<STR_LIT:(>':<EOL><INDENT>if src[<EOL>i + matches0 : i + matches0 + <NUM_LIT:1><EOL>].isalpha() and (<EOL>src[i + matches0] in rule[<NUM_LIT:1>:]<EOL>):<EOL><INDENT>matches0 += <NUM_LIT:1><EOL>while rule and rule[<NUM_LIT:0>] != '<STR_LIT:)>':<EOL><INDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT><DEDENT>while rule and rule[<NUM_LIT:0>] == '<STR_LIT:->':<EOL><INDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if rule and rule[<NUM_LIT:0>] == '<STR_LIT:<>':<EOL><INDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if rule and rule[<NUM_LIT:0>].isdigit():<EOL><INDENT>priority0 = int(rule[<NUM_LIT:0>])<EOL>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if (<EOL>not rule<EOL>or<EOL>(<EOL>(rule[<NUM_LIT:0>] == '<STR_LIT:$>')<EOL>and not src[<EOL>i + matches0 : i + matches0 + <NUM_LIT:1><EOL>].isalpha()<EOL>and (<EOL>src[<EOL>i<EOL>+ matches0 : i<EOL>+ matches0<EOL>+ <NUM_LIT:1><EOL>]<EOL>!= '<STR_LIT:.>'<EOL>)<EOL>)<EOL>):<EOL><INDENT>if matches0 == matches:<EOL><INDENT>pos0 += <NUM_LIT:3><EOL>continue<EOL><DEDENT>if priority0 < priority:<EOL><INDENT>pos0 += <NUM_LIT:3><EOL>continue<EOL><DEDENT>break<EOL><DEDENT>pos0 += <NUM_LIT:3><EOL><DEDENT>if (priority0 >= priority) and (<EOL>(_phonet_rules[pos0] is not None)<EOL>and (_phonet_rules[pos0][<NUM_LIT:0>] == char0)<EOL>):<EOL><INDENT>pos += <NUM_LIT:3><EOL>continue<EOL><DEDENT><DEDENT>if _phonet_rules[pos] and (<EOL>'<STR_LIT:<>' in _phonet_rules[pos][<NUM_LIT:1>:]<EOL>):<EOL><INDENT>priority0 = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>priority0 = <NUM_LIT:0><EOL><DEDENT>rule = _phonet_rules[pos + mode]<EOL>if (priority0 == <NUM_LIT:1>) and (zeta == <NUM_LIT:0>):<EOL><INDENT>if (<EOL>(j > <NUM_LIT:0>)<EOL>and rule<EOL>and (<EOL>(dest[j - <NUM_LIT:1>] == char)<EOL>or (dest[j - <NUM_LIT:1>] == rule[<NUM_LIT:0>])<EOL>)<EOL>):<EOL><INDENT>j -= <NUM_LIT:1><EOL><DEDENT>zeta0 = <NUM_LIT:1><EOL>zeta += <NUM_LIT:1><EOL>matches0 = <NUM_LIT:0><EOL>while rule and src[i + matches0]:<EOL><INDENT>src = (<EOL>src[<NUM_LIT:0> : i + matches0]<EOL>+ rule[<NUM_LIT:0>]<EOL>+ src[i + matches0 + <NUM_LIT:1> :]<EOL>)<EOL>matches0 += <NUM_LIT:1><EOL>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if matches0 < matches:<EOL><INDENT>src = (<EOL>src[<NUM_LIT:0> : i + matches0]<EOL>+ src[i + matches :]<EOL>)<EOL><DEDENT>char = src[i]<EOL><DEDENT>else:<EOL><INDENT>i = i + matches - <NUM_LIT:1><EOL>zeta = <NUM_LIT:0><EOL>while len(rule) > <NUM_LIT:1>:<EOL><INDENT>if (j == <NUM_LIT:0>) or (dest[j - <NUM_LIT:1>] != rule[<NUM_LIT:0>]):<EOL><INDENT>dest = (<EOL>dest[<NUM_LIT:0>:j]<EOL>+ rule[<NUM_LIT:0>]<EOL>+ dest[min(len(dest), j + <NUM_LIT:1>) :]<EOL>)<EOL>j += <NUM_LIT:1><EOL><DEDENT>rule = rule[<NUM_LIT:1>:]<EOL><DEDENT>if not rule:<EOL><INDENT>rule = '<STR_LIT>'<EOL>char = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>char = rule[<NUM_LIT:0>]<EOL><DEDENT>if (<EOL>_phonet_rules[pos]<EOL>and '<STR_LIT>' in _phonet_rules[pos][<NUM_LIT:1>:]<EOL>):<EOL><INDENT>if char:<EOL><INDENT>dest = (<EOL>dest[<NUM_LIT:0>:j]<EOL>+ char<EOL>+ dest[min(len(dest), j + <NUM_LIT:1>) :]<EOL>)<EOL>j += <NUM_LIT:1><EOL><DEDENT>src = src[i + <NUM_LIT:1> :]<EOL>i = <NUM_LIT:0><EOL>zeta0 = <NUM_LIT:1><EOL><DEDENT><DEDENT>break<EOL><DEDENT>pos += <NUM_LIT:3><EOL>if pos > end1 and start2 > <NUM_LIT:0>:<EOL><INDENT>pos = start2<EOL>start1 = start2<EOL>end1 = end2<EOL>start2 = -<NUM_LIT:1><EOL>end2 = -<NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>if zeta0 == <NUM_LIT:0>:<EOL><INDENT>if char and ((j == <NUM_LIT:0>) or (dest[j - <NUM_LIT:1>] != char)):<EOL><INDENT>dest = (<EOL>dest[<NUM_LIT:0>:j] + char + dest[min(j + <NUM_LIT:1>, term_length) :]<EOL>)<EOL>j += <NUM_LIT:1><EOL><DEDENT>i += <NUM_LIT:1><EOL>zeta = <NUM_LIT:0><EOL><DEDENT><DEDENT>dest = dest[<NUM_LIT:0>:j]<EOL>return dest<EOL><DEDENT>_initialize_phonet(lang)<EOL>word = unicode_normalize('<STR_LIT>', text_type(word))<EOL>return _phonet(word, mode, lang)<EOL>", "docstring": "Return the phonet code for a word.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n        mode : int\n            The ponet variant to employ (1 or 2)\n        lang : str\n            ``de`` (default) for German, ``none`` for no language\n\n        Returns\n        -------\n        str\n            The phonet value\n\n        Examples\n        --------\n        >>> pe = Phonet()\n        >>> pe.encode('Christopher')\n        'KRISTOFA'\n        >>> pe.encode('Niall')\n        'NIAL'\n        >>> pe.encode('Smith')\n        'SMIT'\n        >>> pe.encode('Schmidt')\n        'SHMIT'\n\n        >>> pe.encode('Christopher', mode=2)\n        'KRIZTUFA'\n        >>> pe.encode('Niall', mode=2)\n        'NIAL'\n        >>> pe.encode('Smith', mode=2)\n        'ZNIT'\n        >>> pe.encode('Schmidt', mode=2)\n        'ZNIT'\n\n        >>> pe.encode('Christopher', lang='none')\n        'CHRISTOPHER'\n        >>> pe.encode('Niall', lang='none')\n        'NIAL'\n        >>> pe.encode('Smith', lang='none')\n        'SMITH'\n        >>> pe.encode('Schmidt', lang='none')\n        'SCHMIDT'", "id": "f6583:c0:m0"}
{"signature": "def encode(self, word, max_length=<NUM_LIT:4>):", "body": "word = unicode_normalize('<STR_LIT>', text_type(word.upper()))<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = '<STR_LIT>'.join(c for c in word if c in self._uc_set)<EOL>if word[:<NUM_LIT:2>] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>word = word[<NUM_LIT:1>:]<EOL><DEDENT>elif word[:<NUM_LIT:1>] == '<STR_LIT:X>':<EOL><INDENT>word = '<STR_LIT:S>' + word[<NUM_LIT:1>:]<EOL><DEDENT>elif word[:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>word = '<STR_LIT>' + word[<NUM_LIT:2>:]<EOL><DEDENT>word = (<EOL>word.replace('<STR_LIT>', '<STR_LIT>').replace('<STR_LIT>', '<STR_LIT>').replace('<STR_LIT>', '<STR_LIT:0>')<EOL>)<EOL>word = word.translate(self._trans)<EOL>word = self._delete_consecutive_repeats(word)<EOL>word = word.replace('<STR_LIT:0>', '<STR_LIT>')<EOL>if max_length != -<NUM_LIT:1>:<EOL><INDENT>if len(word) < max_length:<EOL><INDENT>word += '<STR_LIT:0>' * (max_length - len(word))<EOL><DEDENT>else:<EOL><INDENT>word = word[:max_length]<EOL><DEDENT><DEDENT>return word<EOL>", "docstring": "Return the SoundD code.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n        max_length : int\n            The length of the code returned (defaults to 4)\n\n        Returns\n        -------\n        str\n            The SoundD code\n\n        Examples\n        --------\n        >>> sound_d('Gough')\n        '2000'\n        >>> sound_d('pneuma')\n        '5500'\n        >>> sound_d('knight')\n        '5300'\n        >>> sound_d('trice')\n        '3620'\n        >>> sound_d('judge')\n        '2200'", "id": "f6584:c0:m0"}
{"signature": "def _pnums_with_leading_space(self, phonetic):", "body": "alt_start = phonetic.find('<STR_LIT:(>')<EOL>if alt_start == -<NUM_LIT:1>:<EOL><INDENT>return '<STR_LIT:U+0020>' + self._phonetic_number(phonetic)<EOL><DEDENT>prefix = phonetic[:alt_start]<EOL>alt_start += <NUM_LIT:1>  <EOL>alt_end = phonetic.find('<STR_LIT:)>', alt_start)<EOL>alt_string = phonetic[alt_start:alt_end]<EOL>alt_end += <NUM_LIT:1>  <EOL>suffix = phonetic[alt_end:]<EOL>alt_array = alt_string.split('<STR_LIT:|>')<EOL>result = '<STR_LIT>'<EOL>for alt in alt_array:<EOL><INDENT>result += self._pnums_with_leading_space(prefix + alt + suffix)<EOL><DEDENT>return result<EOL>", "docstring": "Join prefixes & suffixes in cases of alternate phonetic values.\n\n        Parameters\n        ----------\n        phonetic : str\n            A Beider-Morse phonetic encoding\n\n        Returns\n        -------\n        str\n            A Beider-Morse phonetic code", "id": "f6587:c0:m6"}
{"signature": "def _apply_rule_if_compat(self, phonetic, target, language_arg):", "body": "candidate = phonetic + target<EOL>if '<STR_LIT:[>' not in candidate:  <EOL><INDENT>return candidate<EOL><DEDENT>candidate = self._expand_alternates(candidate)<EOL>candidate_array = candidate.split('<STR_LIT:|>')<EOL>candidate = '<STR_LIT>'<EOL>found = False<EOL>for i in range(len(candidate_array)):<EOL><INDENT>this_candidate = candidate_array[i]<EOL>if language_arg != <NUM_LIT:1>:<EOL><INDENT>this_candidate = self._normalize_lang_attrs(<EOL>this_candidate + '<STR_LIT:[>' + str(language_arg) + '<STR_LIT:]>', False<EOL>)<EOL><DEDENT>if this_candidate != '<STR_LIT>':<EOL><INDENT>found = True<EOL>if candidate:<EOL><INDENT>candidate += '<STR_LIT:|>'<EOL><DEDENT>candidate += this_candidate<EOL><DEDENT><DEDENT>if not found:<EOL><INDENT>return None<EOL><DEDENT>if '<STR_LIT:|>' in candidate:<EOL><INDENT>candidate = '<STR_LIT:(>' + candidate + '<STR_LIT:)>'<EOL><DEDENT>return candidate<EOL>", "docstring": "Apply a phonetic regex if compatible.\n\n        tests for compatible language rules\n\n        to do so, apply the rule, expand the results, and detect alternatives\n            with incompatible attributes\n\n        then drop each alternative that has incompatible attributes and keep\n            those that are compatible\n\n        if there are no compatible alternatives left, return false\n\n        otherwise return the compatible alternatives\n\n        apply the rule\n\n        Parameters\n        ----------\n        phonetic : str\n            The Beider-Morse phonetic encoding (so far)\n        target : str\n            A proposed addition to the phonetic encoding\n        language_arg : int\n            An integer representing the target language of the phonetic\n            encoding\n\n        Returns\n        -------\n        str\n            A candidate encoding", "id": "f6587:c0:m10"}
{"signature": "def _expand_alternates(self, phonetic):", "body": "alt_start = phonetic.find('<STR_LIT:(>')<EOL>if alt_start == -<NUM_LIT:1>:<EOL><INDENT>return self._normalize_lang_attrs(phonetic, False)<EOL><DEDENT>prefix = phonetic[:alt_start]<EOL>alt_start += <NUM_LIT:1>  <EOL>alt_end = phonetic.find('<STR_LIT:)>', alt_start)<EOL>alt_string = phonetic[alt_start:alt_end]<EOL>alt_end += <NUM_LIT:1>  <EOL>suffix = phonetic[alt_end:]<EOL>alt_array = alt_string.split('<STR_LIT:|>')<EOL>result = '<STR_LIT>'<EOL>for i in range(len(alt_array)):<EOL><INDENT>alt = alt_array[i]<EOL>alternate = self._expand_alternates(prefix + alt + suffix)<EOL>if alternate != '<STR_LIT>' and alternate != '<STR_LIT>':<EOL><INDENT>if result != '<STR_LIT>':<EOL><INDENT>result += '<STR_LIT:|>'<EOL><DEDENT>result += alternate<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Expand phonetic alternates separated by |s.\n\n        Parameters\n        ----------\n        phonetic : str\n            A Beider-Morse phonetic encoding\n\n        Returns\n        -------\n        str\n            A Beider-Morse phonetic code", "id": "f6587:c0:m5"}
{"signature": "def encode(<EOL>self,<EOL>word,<EOL>language_arg=<NUM_LIT:0>,<EOL>name_mode='<STR_LIT>',<EOL>match_mode='<STR_LIT>',<EOL>concat=False,<EOL>filter_langs=False,<EOL>):", "body": "word = normalize('<STR_LIT>', text_type(word.strip().lower()))<EOL>name_mode = name_mode.strip().lower()[:<NUM_LIT:3>]<EOL>if name_mode not in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>name_mode = '<STR_LIT>'<EOL><DEDENT>if match_mode != '<STR_LIT>':<EOL><INDENT>match_mode = '<STR_LIT>'<EOL><DEDENT>all_langs = (<EOL>sum(_LANG_DICT[_] for _ in BMDATA[name_mode]['<STR_LIT>']) - <NUM_LIT:1><EOL>)<EOL>lang_choices = <NUM_LIT:0><EOL>if isinstance(language_arg, (int, float, long)):<EOL><INDENT>lang_choices = int(language_arg)<EOL><DEDENT>elif language_arg != '<STR_LIT>' and isinstance(language_arg, (text_type, str)):<EOL><INDENT>for lang in text_type(language_arg).lower().split('<STR_LIT:U+002C>'):<EOL><INDENT>if lang in _LANG_DICT and (_LANG_DICT[lang] & all_langs):<EOL><INDENT>lang_choices += _LANG_DICT[lang]<EOL><DEDENT>elif not filter_langs:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>+ name_mode<EOL>+ '<STR_LIT>'<EOL>+ lang<EOL>+ '<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT><DEDENT>if lang_choices == <NUM_LIT:0>:<EOL><INDENT>language_arg = self._language(word, name_mode)<EOL><DEDENT>else:<EOL><INDENT>language_arg = lang_choices<EOL><DEDENT>language_arg2 = self._language_index_from_code(language_arg, name_mode)<EOL>rules = BMDATA[name_mode]['<STR_LIT>'][language_arg2]<EOL>final_rules1 = BMDATA[name_mode][match_mode]['<STR_LIT>']<EOL>final_rules2 = BMDATA[name_mode][match_mode][language_arg2]<EOL>result = self._phonetic(<EOL>word,<EOL>name_mode,<EOL>rules,<EOL>final_rules1,<EOL>final_rules2,<EOL>language_arg,<EOL>concat,<EOL>)<EOL>result = self._phonetic_numbers(result)<EOL>return result<EOL>", "docstring": "Return the Beider-Morse Phonetic Matching encoding(s) of a term.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n        language_arg : int\n            The language of the term; supported values include:\n\n                - ``any``\n                - ``arabic``\n                - ``cyrillic``\n                - ``czech``\n                - ``dutch``\n                - ``english``\n                - ``french``\n                - ``german``\n                - ``greek``\n                - ``greeklatin``\n                - ``hebrew``\n                - ``hungarian``\n                - ``italian``\n                - ``latvian``\n                - ``polish``\n                - ``portuguese``\n                - ``romanian``\n                - ``russian``\n                - ``spanish``\n                - ``turkish``\n\n        name_mode : str\n            The name mode of the algorithm:\n\n                - ``gen`` -- general (default)\n                - ``ash`` -- Ashkenazi\n                - ``sep`` -- Sephardic\n\n        match_mode : str\n            Matching mode: ``approx`` or ``exact``\n        concat : bool\n            Concatenation mode\n        filter_langs : bool\n            Filter out incompatible languages\n\n        Returns\n        -------\n        tuple\n            The Beider-Morse phonetic value(s)\n\n        Raises\n        ------\n        ValueError\n            Unknown language\n\n        Examples\n        --------\n        >>> pe = BeiderMorse()\n        >>> pe.encode('Christopher')\n        'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir\n        xristofir xristYfir xristopi xritopir xritopi xristofi xritofir\n        xritofi tzristopir tzristofir zristopir zristopi zritopir zritopi\n        zristofir zristofi zritofir zritofi'\n        >>> pe.encode('Niall')\n        'nial niol'\n        >>> pe.encode('Smith')\n        'zmit'\n        >>> pe.encode('Schmidt')\n        'zmit stzmit'\n\n        >>> pe.encode('Christopher', language_arg='German')\n        'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir\n        xristofir xristYfir'\n        >>> pe.encode('Christopher', language_arg='English')\n        'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir\n        xristafir xrQstafir'\n        >>> pe.encode('Christopher', language_arg='German', name_mode='ash')\n        'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir\n        xristofir xristYfir'\n\n        >>> pe.encode('Christopher', language_arg='German', match_mode='exact')\n        'xriStopher xriStofer xristopher xristofer'", "id": "f6587:c0:m12"}
{"signature": "def _remove_dupes(self, phonetic):", "body": "alt_string = phonetic<EOL>alt_array = alt_string.split('<STR_LIT:|>')<EOL>result = '<STR_LIT:|>'<EOL>for i in range(len(alt_array)):<EOL><INDENT>alt = alt_array[i]<EOL>if alt and '<STR_LIT:|>' + alt + '<STR_LIT:|>' not in result:<EOL><INDENT>result += alt + '<STR_LIT:|>'<EOL><DEDENT><DEDENT>return result[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>", "docstring": "Remove duplicates from a phonetic encoding list.\n\n        Parameters\n        ----------\n        phonetic : str\n            A Beider-Morse phonetic encoding\n\n        Returns\n        -------\n        str\n            A Beider-Morse phonetic code", "id": "f6587:c0:m8"}
{"signature": "def _phonetic_number(self, phonetic):", "body": "if '<STR_LIT:[>' in phonetic:<EOL><INDENT>return phonetic[: phonetic.find('<STR_LIT:[>')]<EOL><DEDENT>return phonetic<EOL>", "docstring": "Remove bracketed text from the end of a string.\n\n        Parameters\n        ----------\n        phonetic : str\n            A Beider-Morse phonetic encoding\n\n        Returns\n        -------\n        str\n            A Beider-Morse phonetic code", "id": "f6587:c0:m4"}
{"signature": "def bmpm(<EOL>word,<EOL>language_arg=<NUM_LIT:0>,<EOL>name_mode='<STR_LIT>',<EOL>match_mode='<STR_LIT>',<EOL>concat=False,<EOL>filter_langs=False,<EOL>):", "body": "return BeiderMorse().encode(<EOL>word, language_arg, name_mode, match_mode, concat, filter_langs<EOL>)<EOL>", "docstring": "Return the Beider-Morse Phonetic Matching encoding(s) of a term.\n\n    This is a wrapper for :py:meth:`BeiderMorse.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    language_arg : str\n        The language of the term; supported values include:\n\n            - ``any``\n            - ``arabic``\n            - ``cyrillic``\n            - ``czech``\n            - ``dutch``\n            - ``english``\n            - ``french``\n            - ``german``\n            - ``greek``\n            - ``greeklatin``\n            - ``hebrew``\n            - ``hungarian``\n            - ``italian``\n            - ``latvian``\n            - ``polish``\n            - ``portuguese``\n            - ``romanian``\n            - ``russian``\n            - ``spanish``\n            - ``turkish``\n\n    name_mode : str\n        The name mode of the algorithm:\n\n            - ``gen`` -- general (default)\n            - ``ash`` -- Ashkenazi\n            - ``sep`` -- Sephardic\n\n    match_mode : str\n        Matching mode: ``approx`` or ``exact``\n    concat : bool\n        Concatenation mode\n    filter_langs : bool\n        Filter out incompatible languages\n\n    Returns\n    -------\n    tuple\n        The Beider-Morse phonetic value(s)\n\n    Examples\n    --------\n    >>> bmpm('Christopher')\n    'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir\n    xristYfir xristopi xritopir xritopi xristofi xritofir xritofi\n    tzristopir tzristofir zristopir zristopi zritopir zritopi zristofir\n    zristofi zritofir zritofi'\n    >>> bmpm('Niall')\n    'nial niol'\n    >>> bmpm('Smith')\n    'zmit'\n    >>> bmpm('Schmidt')\n    'zmit stzmit'\n\n    >>> bmpm('Christopher', language_arg='German')\n    'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir\n    xristYfir'\n    >>> bmpm('Christopher', language_arg='English')\n    'tzristofir tzrQstofir tzristafir tzrQstafir xristofir xrQstofir\n    xristafir xrQstafir'\n    >>> bmpm('Christopher', language_arg='German', name_mode='ash')\n    'xrQstopir xrQstYpir xristopir xristYpir xrQstofir xrQstYfir xristofir\n    xristYfir'\n\n    >>> bmpm('Christopher', language_arg='German', match_mode='exact')\n    'xriStopher xriStofer xristopher xristofer'", "id": "f6587:m0"}
{"signature": "def onca(word, max_length=<NUM_LIT:4>, zero_pad=True):", "body": "return ONCA().encode(word, max_length, zero_pad)<EOL>", "docstring": "Return the Oxford Name Compression Algorithm (ONCA) code for a word.\n\n    This is a wrapper for :py:meth:`ONCA.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    max_length : int\n        The maximum length (default 5) of the code to return\n    zero_pad : bool\n        Pad the end of the return value with 0s to achieve a max_length string\n\n    Returns\n    -------\n    str\n        The ONCA code\n\n    Examples\n    --------\n    >>> onca('Christopher')\n    'C623'\n    >>> onca('Niall')\n    'N400'\n    >>> onca('Smith')\n    'S530'\n    >>> onca('Schmidt')\n    'S530'", "id": "f6589:m0"}
{"signature": "def encode(self, word, max_length=-<NUM_LIT:1>):", "body": "def _foersvensker(lokal_ordet):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT:F>')<EOL>for i in self._harde_vokaler:<EOL><INDENT>lokal_ordet = lokal_ordet.replace(i + '<STR_LIT>', i + '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace(i + '<STR_LIT:Y>', i + '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace(i + '<STR_LIT:I>', i + '<STR_LIT>')<EOL><DEDENT>for i in self._mjuka_vokaler:<EOL><INDENT>lokal_ordet = lokal_ordet.replace(i + '<STR_LIT>', i + '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace(i + '<STR_LIT:Y>', i + '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace(i + '<STR_LIT:I>', i + '<STR_LIT>')<EOL><DEDENT>if '<STR_LIT:H>' in lokal_ordet:<EOL><INDENT>for i in self._uc_c_set:<EOL><INDENT>lokal_ordet = lokal_ordet.replace('<STR_LIT:H>' + i, i)<EOL><DEDENT><DEDENT>lokal_ordet = lokal_ordet.translate(self._substitutions)<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT>')<EOL>lokal_ordet = lokal_ordet.replace('<STR_LIT>', '<STR_LIT>')<EOL>return lokal_ordet<EOL><DEDENT>def _koda_foersta_ljudet(lokal_ordet):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if (<EOL>lokal_ordet[<NUM_LIT:0>:<NUM_LIT:1>] in self._mjuka_vokaler<EOL>or lokal_ordet[<NUM_LIT:0>:<NUM_LIT:1>] in self._harde_vokaler<EOL>):<EOL><INDENT>lokal_ordet = '<STR_LIT:$>' + lokal_ordet[<NUM_LIT:1>:]<EOL><DEDENT>elif lokal_ordet[<NUM_LIT:0>:<NUM_LIT:2>] in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>lokal_ordet = '<STR_LIT>' + lokal_ordet[<NUM_LIT:2>:]<EOL><DEDENT>elif (<EOL>lokal_ordet[<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT>'<EOL>and lokal_ordet[<NUM_LIT:1>:<NUM_LIT:2>] in self._mjuka_vokaler<EOL>):<EOL><INDENT>lokal_ordet = '<STR_LIT>' + lokal_ordet[<NUM_LIT:1>:]<EOL><DEDENT>elif lokal_ordet[<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>lokal_ordet = '<STR_LIT>' + lokal_ordet[<NUM_LIT:1>:]<EOL><DEDENT>elif lokal_ordet[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>' and lokal_ordet[<NUM_LIT:2>:<NUM_LIT:3>] in frozenset(<EOL>self._mjuka_vokaler | self._harde_vokaler<EOL>):<EOL><INDENT>lokal_ordet = '<STR_LIT:#>' + lokal_ordet[<NUM_LIT:2>:]<EOL><DEDENT>elif (<EOL>lokal_ordet[<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT:C>'<EOL>and lokal_ordet[<NUM_LIT:1>:<NUM_LIT:2>] in self._harde_vokaler<EOL>):<EOL><INDENT>lokal_ordet = '<STR_LIT>' + lokal_ordet[<NUM_LIT:1>:]<EOL><DEDENT>elif (<EOL>lokal_ordet[<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT:C>' and lokal_ordet[<NUM_LIT:1>:<NUM_LIT:2>] in self._uc_c_set<EOL>):<EOL><INDENT>lokal_ordet = '<STR_LIT>' + lokal_ordet[<NUM_LIT:1>:]<EOL><DEDENT>elif lokal_ordet[<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT:X>':<EOL><INDENT>lokal_ordet = '<STR_LIT:S>' + lokal_ordet[<NUM_LIT:1>:]<EOL><DEDENT>elif (<EOL>lokal_ordet[<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT:C>'<EOL>and lokal_ordet[<NUM_LIT:1>:<NUM_LIT:2>] in self._mjuka_vokaler<EOL>):<EOL><INDENT>lokal_ordet = '<STR_LIT:S>' + lokal_ordet[<NUM_LIT:1>:]<EOL><DEDENT>elif lokal_ordet[<NUM_LIT:0>:<NUM_LIT:3>] in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>lokal_ordet = '<STR_LIT:#>' + lokal_ordet[<NUM_LIT:3>:]<EOL><DEDENT>elif lokal_ordet[<NUM_LIT:0>:<NUM_LIT:2>] in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>lokal_ordet = '<STR_LIT:#>' + lokal_ordet[<NUM_LIT:2>:]<EOL><DEDENT>elif (<EOL>lokal_ordet[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>'<EOL>and lokal_ordet[<NUM_LIT:2>:<NUM_LIT:3>] in self._mjuka_vokaler<EOL>):<EOL><INDENT>lokal_ordet = '<STR_LIT:#>' + lokal_ordet[<NUM_LIT:2>:]<EOL><DEDENT>elif (<EOL>lokal_ordet[<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT>'<EOL>and lokal_ordet[<NUM_LIT:1>:<NUM_LIT:2>] in self._mjuka_vokaler<EOL>):<EOL><INDENT>lokal_ordet = '<STR_LIT:#>' + lokal_ordet[<NUM_LIT:1>:]<EOL><DEDENT>return lokal_ordet<EOL><DEDENT>word = unicode_normalize('<STR_LIT>', text_type(word.upper()))<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT:->', '<STR_LIT:U+0020>')<EOL>for adelstitel in self._adelstitler:<EOL><INDENT>while adelstitel in word:<EOL><INDENT>word = word.replace(adelstitel, '<STR_LIT:U+0020>')<EOL><DEDENT>if word.startswith(adelstitel[<NUM_LIT:1>:]):<EOL><INDENT>word = word[len(adelstitel) - <NUM_LIT:1> :]<EOL><DEDENT><DEDENT>ordlista = word.split()<EOL>ordlista = [<EOL>self._delete_consecutive_repeats(ordet) for ordet in ordlista<EOL>]<EOL>if not ordlista:<EOL><INDENT>return ('<STR_LIT>',)<EOL><DEDENT>ordlista = [_foersvensker(ordet) for ordet in ordlista]<EOL>ordlista = [<EOL>'<STR_LIT>'.join(c for c in ordet if c in self._uc_set)<EOL>for ordet in ordlista<EOL>]<EOL>ordlista = [_koda_foersta_ljudet(ordet) for ordet in ordlista]<EOL>rest = [ordet[<NUM_LIT:1>:] for ordet in ordlista]<EOL>rest = [ordet.replace('<STR_LIT>', '<STR_LIT:T>') for ordet in rest]<EOL>rest = [ordet.replace('<STR_LIT:X>', '<STR_LIT>') for ordet in rest]<EOL>for vokal in self._mjuka_vokaler:<EOL><INDENT>rest = [ordet.replace('<STR_LIT:C>' + vokal, '<STR_LIT>' + vokal) for ordet in rest]<EOL><DEDENT>rest = [ordet.translate(self._trans) for ordet in rest]<EOL>rest = [self._delete_consecutive_repeats(ordet) for ordet in rest]<EOL>rest = [ordet.replace('<STR_LIT>', '<STR_LIT>') for ordet in rest]<EOL>ordlista = [<EOL>'<STR_LIT>'.join(ordet) for ordet in zip((_[<NUM_LIT:0>:<NUM_LIT:1>] for _ in ordlista), rest)<EOL>]<EOL>if max_length > <NUM_LIT:0>:<EOL><INDENT>ordlista = [ordet[:max_length] for ordet in ordlista]<EOL><DEDENT>return tuple(ordlista)<EOL>", "docstring": "Return the SfinxBis code for a word.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n        max_length : int\n            The length of the code returned (defaults to unlimited)\n\n        Returns\n        -------\n        tuple\n            The SfinxBis value\n\n        Examples\n        --------\n        >>> pe = SfinxBis()\n        >>> pe.encode('Christopher')\n        ('K68376',)\n        >>> pe.encode('Niall')\n        ('N4',)\n        >>> pe.encode('Smith')\n        ('S53',)\n        >>> pe.encode('Schmidt')\n        ('S53',)\n\n        >>> pe.encode('Johansson')\n        ('J585',)\n        >>> pe.encode('Sj\u00f6berg')\n        ('#162',)", "id": "f6590:c0:m0"}
{"signature": "def encode(self, word, max_length=-<NUM_LIT:1>, zero_pad=False, retain_vowels=False):", "body": "<EOL>word = unicode_normalize('<STR_LIT>', text_type(word.upper()))<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = '<STR_LIT>'.join(c for c in word if c in self._uc_set)<EOL>sdx = word[:<NUM_LIT:1>] + word.translate(self._trans)<EOL>sdx = self._delete_consecutive_repeats(sdx)<EOL>if not retain_vowels:<EOL><INDENT>sdx = sdx.replace('<STR_LIT:0>', '<STR_LIT>')  <EOL><DEDENT>if max_length > <NUM_LIT:0>:<EOL><INDENT>if zero_pad:<EOL><INDENT>sdx += '<STR_LIT:0>' * max_length<EOL><DEDENT>sdx = sdx[:max_length]<EOL><DEDENT>return sdx<EOL>", "docstring": "Return the Refined Soundex code for a word.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n        max_length : int\n            The length of the code returned (defaults to unlimited)\n        zero_pad : bool\n            Pad the end of the return value with 0s to achieve a max_length\n            string\n        retain_vowels : bool\n            Retain vowels (as 0) in the resulting code\n\n        Returns\n        -------\n        str\n            The Refined Soundex value\n\n        Examples\n        --------\n        >>> pe = RefinedSoundex()\n        >>> pe.encode('Christopher')\n        'C393619'\n        >>> pe.encode('Niall')\n        'N87'\n        >>> pe.encode('Smith')\n        'S386'\n        >>> pe.encode('Schmidt')\n        'S386'", "id": "f6591:c0:m0"}
{"signature": "def encode(self, fname, max_length=<NUM_LIT:4>, german=False):", "body": "fname = unicode_normalize('<STR_LIT>', text_type(fname.upper()))<EOL>fname = fname.replace('<STR_LIT>', '<STR_LIT>')<EOL>fname = '<STR_LIT>'.join(c for c in fname if c in self._uc_set)<EOL>if fname == '<STR_LIT>':<EOL><INDENT>code = '<STR_LIT>'<EOL><DEDENT>elif fname == '<STR_LIT>':<EOL><INDENT>code = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>if fname[:<NUM_LIT:2>] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>fname = '<STR_LIT>' + fname[<NUM_LIT:1>:]<EOL><DEDENT>elif fname[:<NUM_LIT:2>] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>fname = '<STR_LIT:S>' + fname[<NUM_LIT:1>:]<EOL><DEDENT>elif fname[:<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>fname = '<STR_LIT>' + fname[<NUM_LIT:1>:]<EOL><DEDENT>elif fname[:<NUM_LIT:1>] == '<STR_LIT:C>' and fname[:<NUM_LIT:2>] != '<STR_LIT>':<EOL><INDENT>fname = '<STR_LIT>' + fname[<NUM_LIT:1>:]<EOL><DEDENT>if fname[:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>fname = '<STR_LIT:N>' + fname[<NUM_LIT:1>:]<EOL><DEDENT>elif fname[:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>fname = '<STR_LIT:F>' + fname[<NUM_LIT:1>:]<EOL><DEDENT>elif fname[:<NUM_LIT:3>] in {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>fname = '<STR_LIT>' + fname[<NUM_LIT:1>:]<EOL><DEDENT>if german and fname[:<NUM_LIT:1>] in {'<STR_LIT>', '<STR_LIT:M>', '<STR_LIT:Y>', '<STR_LIT>'}:<EOL><INDENT>fname = {'<STR_LIT>': '<STR_LIT>', '<STR_LIT:M>': '<STR_LIT:N>', '<STR_LIT:Y>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT:S>'}[<EOL>fname[<NUM_LIT:0>]<EOL>] + fname[<NUM_LIT:1>:]<EOL><DEDENT>code = fname[:<NUM_LIT:1>]<EOL>fname = fname.translate(self._trans)<EOL>fname = self._delete_consecutive_repeats(fname)<EOL>code += fname[<NUM_LIT:1>:]<EOL>syl_ptr = code.find('<STR_LIT:0>')<EOL>syl2_ptr = code[syl_ptr + <NUM_LIT:1> :].find('<STR_LIT:0>')<EOL>if syl_ptr != -<NUM_LIT:1> and syl2_ptr != -<NUM_LIT:1> and syl2_ptr - syl_ptr > -<NUM_LIT:1>:<EOL><INDENT>code = code[: syl_ptr + <NUM_LIT:2>]<EOL><DEDENT>code = code.replace('<STR_LIT:0>', '<STR_LIT>')  <EOL><DEDENT>if max_length != -<NUM_LIT:1>:<EOL><INDENT>if len(code) < max_length:<EOL><INDENT>code += '<STR_LIT:0>' * (max_length - len(code))<EOL><DEDENT>else:<EOL><INDENT>code = code[:max_length]<EOL><DEDENT><DEDENT>return code<EOL>", "docstring": "Calculate the PSHP Soundex/Viewex Coding of a first name.\n\n        Parameters\n        ----------\n        fname : str\n            The first name to encode\n        max_length : int\n            The length of the code returned (defaults to 4)\n        german : bool\n            Set to True if the name is German (different rules apply)\n\n        Returns\n        -------\n        str\n            The PSHP Soundex/Viewex Coding\n\n        Examples\n        --------\n        >>> pe = PSHPSoundexFirst()\n        >>> pe.encode('Smith')\n        'S530'\n        >>> pe.encode('Waters')\n        'W352'\n        >>> pe.encode('James')\n        'J700'\n        >>> pe.encode('Schmidt')\n        'S500'\n        >>> pe.encode('Ashcroft')\n        'A220'\n        >>> pe.encode('John')\n        'J500'\n        >>> pe.encode('Colin')\n        'K400'\n        >>> pe.encode('Niall')\n        'N400'\n        >>> pe.encode('Sally')\n        'S400'\n        >>> pe.encode('Jane')\n        'J500'", "id": "f6592:c0:m0"}
{"signature": "def russell_index_alpha(word):", "body": "return RussellIndex().encode_alpha(word)<EOL>", "docstring": "Return the Russell Index (alphabetic output) for the word.\n\n    This is a wrapper for :py:meth:`RussellIndex.encode_alpha`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n\n    Returns\n    -------\n    str\n        The Russell Index value as an alphabetic string\n\n    Examples\n    --------\n    >>> russell_index_alpha('Christopher')\n    'CRACDBR'\n    >>> russell_index_alpha('Niall')\n    'NAL'\n    >>> russell_index_alpha('Smith')\n    'CMAD'\n    >>> russell_index_alpha('Schmidt')\n    'CMAD'", "id": "f6593:m2"}
{"signature": "def _to_alpha(self, num):", "body": "num = '<STR_LIT>'.join(c for c in text_type(num) if c in self._num_set)<EOL>if num:<EOL><INDENT>return num.translate(self._num_trans)<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Convert the Russell Index integer to an alphabetic string.\n\n        This follows Robert C. Russell's Index algorithm, as described in\n        :cite:`Russell:1917`.\n\n        Parameters\n        ----------\n        num : int\n            A Russell Index integer value\n\n        Returns\n        -------\n        str\n            The Russell Index as an alphabetic string\n\n        Examples\n        --------\n        >>> pe = RussellIndex()\n        >>> pe._to_alpha(3813428)\n        'CRACDBR'\n        >>> pe._to_alpha(715)\n        'NAL'\n        >>> pe._to_alpha(3614)\n        'CMAD'", "id": "f6593:c0:m1"}
{"signature": "def russell_index_num_to_alpha(num):", "body": "return RussellIndex()._to_alpha(num)<EOL>", "docstring": "Convert the Russell Index integer to an alphabetic string.\n\n    This is a wrapper for :py:meth:`RussellIndex._to_alpha`.\n\n    Parameters\n    ----------\n    num : int\n        A Russell Index integer value\n\n    Returns\n    -------\n    str\n        The Russell Index as an alphabetic string\n\n    Examples\n    --------\n    >>> russell_index_num_to_alpha(3813428)\n    'CRACDBR'\n    >>> russell_index_num_to_alpha(715)\n    'NAL'\n    >>> russell_index_num_to_alpha(3614)\n    'CMAD'", "id": "f6593:m1"}
{"signature": "def soundex(word, max_length=<NUM_LIT:4>, var='<STR_LIT>', reverse=False, zero_pad=True):", "body": "return Soundex().encode(word, max_length, var, reverse, zero_pad)<EOL>", "docstring": "Return the Soundex code for a word.\n\n    This is a wrapper for :py:meth:`Soundex.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    max_length : int\n        The length of the code returned (defaults to 4)\n    var : str\n        The variant of the algorithm to employ (defaults to ``American``):\n\n            - ``American`` follows the American Soundex algorithm, as described\n              at :cite:`US:2007` and in :cite:`Knuth:1998`; this is also called\n              Miracode\n            - ``special`` follows the rules from the 1880-1910 US Census\n              retrospective re-analysis, in which h & w are not treated as\n              blocking consonants but as vowels. Cf. :cite:`Repici:2013`.\n            - ``Census`` follows the rules laid out in GIL 55 :cite:`US:1997`\n              by the US Census, including coding prefixed and unprefixed\n              versions of some names\n\n    reverse : bool\n        Reverse the word before computing the selected Soundex (defaults to\n        False); This results in \"Reverse Soundex\", which is useful for blocking\n        in cases where the initial elements may be in error.\n    zero_pad : bool\n        Pad the end of the return value with 0s to achieve a max_length string\n\n    Returns\n    -------\n    str\n        The Soundex value\n\n    Examples\n    --------\n    >>> soundex(\"Christopher\")\n    'C623'\n    >>> soundex(\"Niall\")\n    'N400'\n    >>> soundex('Smith')\n    'S530'\n    >>> soundex('Schmidt')\n    'S530'\n\n    >>> soundex('Christopher', max_length=-1)\n    'C623160000000000000000000000000000000000000000000000000000000000'\n    >>> soundex('Christopher', max_length=-1, zero_pad=False)\n    'C62316'\n\n    >>> soundex('Christopher', reverse=True)\n    'R132'\n\n    >>> soundex('Ashcroft')\n    'A261'\n    >>> soundex('Asicroft')\n    'A226'\n    >>> soundex('Ashcroft', var='special')\n    'A226'\n    >>> soundex('Asicroft', var='special')\n    'A226'", "id": "f6594:m0"}
{"signature": "def lein(word, max_length=<NUM_LIT:4>, zero_pad=True):", "body": "return Lein().encode(word, max_length, zero_pad)<EOL>", "docstring": "Return the Lein code for a word.\n\n    This is a wrapper for :py:meth:`Lein.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    max_length : int\n        The length of the code returned (defaults to 4)\n    zero_pad : bool\n        Pad the end of the return value with 0s to achieve a max_length string\n\n    Returns\n    -------\n    str\n        The Lein code\n\n    Examples\n    --------\n    >>> lein('Christopher')\n    'C351'\n    >>> lein('Niall')\n    'N300'\n    >>> lein('Smith')\n    'S210'\n    >>> lein('Schmidt')\n    'S521'", "id": "f6595:m0"}
{"signature": "def nrl(word):", "body": "return NRL().encode(word)<EOL>", "docstring": "Return the Naval Research Laboratory phonetic encoding of a word.\n\n    This is a wrapper for :py:meth:`NRL.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n\n    Returns\n    -------\n    str\n        The NRL phonetic encoding\n\n    Examples\n    --------\n    >>> nrl('the')\n    'DHAX'\n    >>> nrl('round')\n    'rAWnd'\n    >>> nrl('quick')\n    'kwIHk'\n    >>> nrl('eaten')\n    'IYtEHn'\n    >>> nrl('Smith')\n    'smIHTH'\n    >>> nrl('Larsen')\n    'lAArsEHn'", "id": "f6598:m0"}
{"signature": "def phonetic_spanish(word, max_length=-<NUM_LIT:1>):", "body": "return PhoneticSpanish().encode(word, max_length)<EOL>", "docstring": "Return the PhoneticSpanish coding of word.\n\n    This is a wrapper for :py:meth:`PhoneticSpanish.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    max_length : int\n        The length of the code returned (defaults to unlimited)\n\n    Returns\n    -------\n    str\n        The PhoneticSpanish code\n\n    Examples\n    --------\n    >>> phonetic_spanish('Perez')\n    '094'\n    >>> phonetic_spanish('Martinez')\n    '69364'\n    >>> phonetic_spanish('Gutierrez')\n    '83994'\n    >>> phonetic_spanish('Santiago')\n    '4638'\n    >>> phonetic_spanish('Nicol\u00e1s')\n    '6454'", "id": "f6599:m0"}
{"signature": "def encode(self, word):", "body": "pass<EOL>", "docstring": "Encode phonetically.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform", "id": "f6601:c0:m1"}
{"signature": "def encode_alpha(self, word):", "body": "return self.encode(word)<EOL>", "docstring": "Encode phonetically using alphabetic characters.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n\n        Returns\n        -------\n        str\n            The word transformed", "id": "f6601:c0:m2"}
{"signature": "def encode(self, word):", "body": "def _after(word, pos, letters):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return pos > <NUM_LIT:0> and word[pos - <NUM_LIT:1>] in letters<EOL><DEDENT>def _before(word, pos, letters):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return pos + <NUM_LIT:1> < len(word) and word[pos + <NUM_LIT:1>] in letters<EOL><DEDENT>sdx = '<STR_LIT>'<EOL>word = unicode_normalize('<STR_LIT>', text_type(word.upper()))<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = '<STR_LIT>'.join(c for c in word if c in self._uc_set)<EOL>if not word:<EOL><INDENT>return sdx<EOL><DEDENT>for i in range(len(word)):<EOL><INDENT>if word[i] in self._uc_v_set:<EOL><INDENT>sdx += '<STR_LIT:0>'<EOL><DEDENT>elif word[i] == '<STR_LIT:B>':<EOL><INDENT>sdx += '<STR_LIT:1>'<EOL><DEDENT>elif word[i] == '<STR_LIT:P>':<EOL><INDENT>if _before(word, i, {'<STR_LIT:H>'}):<EOL><INDENT>sdx += '<STR_LIT:3>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT:1>'<EOL><DEDENT><DEDENT>elif word[i] in {'<STR_LIT:D>', '<STR_LIT:T>'}:<EOL><INDENT>if _before(word, i, {'<STR_LIT:C>', '<STR_LIT:S>', '<STR_LIT>'}):<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT:2>'<EOL><DEDENT><DEDENT>elif word[i] in {'<STR_LIT:F>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>sdx += '<STR_LIT:3>'<EOL><DEDENT>elif word[i] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>sdx += '<STR_LIT:4>'<EOL><DEDENT>elif word[i] == '<STR_LIT:C>':<EOL><INDENT>if _after(word, i, {'<STR_LIT:S>', '<STR_LIT>'}):<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>elif i == <NUM_LIT:0>:<EOL><INDENT>if _before(<EOL>word, i, {'<STR_LIT:A>', '<STR_LIT:H>', '<STR_LIT>', '<STR_LIT:L>', '<STR_LIT:O>', '<STR_LIT>', '<STR_LIT:R>', '<STR_LIT>', '<STR_LIT:X>'}<EOL>):<EOL><INDENT>sdx += '<STR_LIT:4>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT><DEDENT>elif _before(word, i, {'<STR_LIT:A>', '<STR_LIT:H>', '<STR_LIT>', '<STR_LIT:O>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:X>'}):<EOL><INDENT>sdx += '<STR_LIT:4>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT><DEDENT>elif word[i] == '<STR_LIT:X>':<EOL><INDENT>if _after(word, i, {'<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'}):<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT><DEDENT>elif word[i] == '<STR_LIT:L>':<EOL><INDENT>sdx += '<STR_LIT:5>'<EOL><DEDENT>elif word[i] in {'<STR_LIT:M>', '<STR_LIT:N>'}:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>elif word[i] == '<STR_LIT:R>':<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>elif word[i] in {'<STR_LIT:S>', '<STR_LIT>'}:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT><DEDENT>sdx = self._delete_consecutive_repeats(sdx)<EOL>if sdx:<EOL><INDENT>sdx = sdx[:<NUM_LIT:1>] + sdx[<NUM_LIT:1>:].replace('<STR_LIT:0>', '<STR_LIT>')<EOL><DEDENT>return sdx<EOL>", "docstring": "Return the K\u00f6lner Phonetik (numeric output) code for a word.\n\n        While the output code is numeric, it is still a str because 0s can lead\n        the code.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n\n        Returns\n        -------\n        str\n            The K\u00f6lner Phonetik value as a numeric string\n\n        Example\n        -------\n        >>> pe = Koelner()\n        >>> pe.encode('Christopher')\n        '478237'\n        >>> pe.encode('Niall')\n        '65'\n        >>> pe.encode('Smith')\n        '862'\n        >>> pe.encode('Schmidt')\n        '862'\n        >>> pe.encode('M\u00fcller')\n        '657'\n        >>> pe.encode('Zimmermann')\n        '86766'", "id": "f6603:c0:m0"}
{"signature": "def koelner_phonetik_num_to_alpha(num):", "body": "return Koelner()._to_alpha(num)<EOL>", "docstring": "Convert a K\u00f6lner Phonetik code from numeric to alphabetic.\n\n    This is a wrapper for :py:meth:`Koelner._to_alpha`.\n\n    Parameters\n    ----------\n    num : str or int\n        A numeric K\u00f6lner Phonetik representation\n\n    Returns\n    -------\n    str\n        An alphabetic representation of the same word\n\n    Examples\n    --------\n    >>> koelner_phonetik_num_to_alpha('862')\n    'SNT'\n    >>> koelner_phonetik_num_to_alpha('657')\n    'NLR'\n    >>> koelner_phonetik_num_to_alpha('86766')\n    'SNRNN'", "id": "f6603:m1"}
{"signature": "def _to_alpha(self, num):", "body": "num = '<STR_LIT>'.join(c for c in text_type(num) if c in self._num_set)<EOL>return num.translate(self._num_trans)<EOL>", "docstring": "Convert a K\u00f6lner Phonetik code from numeric to alphabetic.\n\n        Parameters\n        ----------\n        num : str or int\n            A numeric K\u00f6lner Phonetik representation\n\n        Returns\n        -------\n        str\n            An alphabetic representation of the same word\n\n        Examples\n        --------\n        >>> pe = Koelner()\n        >>> pe._to_alpha('862')\n        'SNT'\n        >>> pe._to_alpha('657')\n        'NLR'\n        >>> pe._to_alpha('86766')\n        'SNRNN'", "id": "f6603:c0:m1"}
{"signature": "def encode(self, word, primary_only=False):", "body": "def _after(word, pos, letters):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if pos > <NUM_LIT:0> and word[pos - <NUM_LIT:1>] in letters:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL><DEDENT>def _before(word, pos, letters):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if pos + <NUM_LIT:1> < len(word) and word[pos + <NUM_LIT:1>] in letters:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL><DEDENT>word = unicode_normalize('<STR_LIT>', text_type(word.upper()))<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = word.replace('<STR_LIT>', '<STR_LIT>')<EOL>word = '<STR_LIT>'.join(c for c in word if c in self._uc_set)<EOL>variants = []<EOL>if primary_only:<EOL><INDENT>variants = [word]<EOL><DEDENT>else:<EOL><INDENT>pos = <NUM_LIT:0><EOL>if word[:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>variants.append(('<STR_LIT>', '<STR_LIT>'))<EOL>pos += <NUM_LIT:2><EOL><DEDENT>len_3_vars = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:O>',<EOL>'<STR_LIT>': '<STR_LIT:O>',<EOL>}<EOL>while pos < len(word):<EOL><INDENT>if word[pos : pos + <NUM_LIT:4>] == '<STR_LIT>':<EOL><INDENT>variants.append(('<STR_LIT>', '<STR_LIT:I>'))<EOL>pos += <NUM_LIT:4><EOL><DEDENT>elif word[pos : pos + <NUM_LIT:3>] in len_3_vars:<EOL><INDENT>variants.append(<EOL>(word[pos : pos + <NUM_LIT:3>], len_3_vars[word[pos : pos + <NUM_LIT:3>]])<EOL>)<EOL>pos += <NUM_LIT:3><EOL><DEDENT>elif word[pos : pos + <NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>variants.append(('<STR_LIT>', '<STR_LIT>'))<EOL>pos += <NUM_LIT:2><EOL><DEDENT>elif len(word[pos:]) == <NUM_LIT:3> and word[pos:] == '<STR_LIT>':<EOL><INDENT>variants.append(('<STR_LIT>', '<STR_LIT:O>'))<EOL>pos += <NUM_LIT:3><EOL><DEDENT>elif len(word[pos:]) == <NUM_LIT:1> and word[pos:] in {'<STR_LIT:A>', '<STR_LIT:O>'}:<EOL><INDENT>if word[pos:] == '<STR_LIT:O>':<EOL><INDENT>variants.append(('<STR_LIT:O>', '<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>variants.append(('<STR_LIT:A>', '<STR_LIT>'))<EOL><DEDENT>pos += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>variants.append((word[pos],))<EOL>pos += <NUM_LIT:1><EOL><DEDENT><DEDENT>variants = ['<STR_LIT>'.join(letters) for letters in product(*variants)]<EOL><DEDENT>def _haase_code(word):<EOL><INDENT>sdx = '<STR_LIT>'<EOL>for i in range(len(word)):<EOL><INDENT>if word[i] in self._uc_v_set:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>elif word[i] == '<STR_LIT:B>':<EOL><INDENT>sdx += '<STR_LIT:1>'<EOL><DEDENT>elif word[i] == '<STR_LIT:P>':<EOL><INDENT>if _before(word, i, {'<STR_LIT:H>'}):<EOL><INDENT>sdx += '<STR_LIT:3>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT:1>'<EOL><DEDENT><DEDENT>elif word[i] in {'<STR_LIT:D>', '<STR_LIT:T>'}:<EOL><INDENT>if _before(word, i, {'<STR_LIT:C>', '<STR_LIT:S>', '<STR_LIT>'}):<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT:2>'<EOL><DEDENT><DEDENT>elif word[i] in {'<STR_LIT:F>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>sdx += '<STR_LIT:3>'<EOL><DEDENT>elif word[i] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>sdx += '<STR_LIT:4>'<EOL><DEDENT>elif word[i] == '<STR_LIT:C>':<EOL><INDENT>if _after(word, i, {'<STR_LIT:S>', '<STR_LIT>'}):<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>elif i == <NUM_LIT:0>:<EOL><INDENT>if _before(<EOL>word,<EOL>i,<EOL>{'<STR_LIT:A>', '<STR_LIT:H>', '<STR_LIT>', '<STR_LIT:L>', '<STR_LIT:O>', '<STR_LIT>', '<STR_LIT:R>', '<STR_LIT>', '<STR_LIT:X>'},<EOL>):<EOL><INDENT>sdx += '<STR_LIT:4>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT><DEDENT>elif _before(word, i, {'<STR_LIT:A>', '<STR_LIT:H>', '<STR_LIT>', '<STR_LIT:O>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:X>'}):<EOL><INDENT>sdx += '<STR_LIT:4>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT><DEDENT>elif word[i] == '<STR_LIT:X>':<EOL><INDENT>if _after(word, i, {'<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'}):<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT><DEDENT>elif word[i] == '<STR_LIT:L>':<EOL><INDENT>sdx += '<STR_LIT:5>'<EOL><DEDENT>elif word[i] in {'<STR_LIT:M>', '<STR_LIT:N>'}:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>elif word[i] == '<STR_LIT:R>':<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT>elif word[i] in {'<STR_LIT:S>', '<STR_LIT>'}:<EOL><INDENT>sdx += '<STR_LIT>'<EOL><DEDENT><DEDENT>sdx = self._delete_consecutive_repeats(sdx)<EOL>return sdx<EOL><DEDENT>encoded = tuple(_haase_code(word) for word in variants)<EOL>if len(encoded) > <NUM_LIT:1>:<EOL><INDENT>encoded_set = set()<EOL>encoded_single = []<EOL>for code in encoded:<EOL><INDENT>if code not in encoded_set:<EOL><INDENT>encoded_set.add(code)<EOL>encoded_single.append(code)<EOL><DEDENT><DEDENT>return tuple(encoded_single)<EOL><DEDENT>return encoded<EOL>", "docstring": "Return the Haase Phonetik (numeric output) code for a word.\n\n        While the output code is numeric, it is nevertheless a str.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n        primary_only : bool\n            If True, only the primary code is returned\n\n        Returns\n        -------\n        tuple\n            The Haase Phonetik value as a numeric string\n\n        Examples\n        --------\n        >>> pe = Haase()\n        >>> pe.encode('Joachim')\n        ('9496',)\n        >>> pe.encode('Christoph')\n        ('4798293', '8798293')\n        >>> pe.encode('J\u00f6rg')\n        ('974',)\n        >>> pe.encode('Smith')\n        ('8692',)\n        >>> pe.encode('Schmidt')\n        ('8692', '4692')", "id": "f6604:c0:m0"}
{"signature": "def encode(self, word, max_length=-<NUM_LIT:1>):", "body": "<EOL>if max_length != -<NUM_LIT:1>:<EOL><INDENT>max_length = max(<NUM_LIT:4>, max_length)<EOL><DEDENT>else:<EOL><INDENT>max_length = <NUM_LIT:64><EOL><DEDENT>ename = '<STR_LIT>'.join(c for c in word.upper() if c.isalnum())<EOL>ename = ename.replace('<STR_LIT>', '<STR_LIT>')<EOL>if not ename:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if ename[<NUM_LIT:0>:<NUM_LIT:2>] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>ename = ename[<NUM_LIT:1>:]<EOL><DEDENT>elif ename[<NUM_LIT:0>] == '<STR_LIT:X>':<EOL><INDENT>ename = '<STR_LIT:S>' + ename[<NUM_LIT:1>:]<EOL><DEDENT>elif ename[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>ename = '<STR_LIT>' + ename[<NUM_LIT:2>:]<EOL><DEDENT>elen = len(ename) - <NUM_LIT:1><EOL>metaph = '<STR_LIT>'<EOL>for i in range(len(ename)):<EOL><INDENT>if len(metaph) >= max_length:<EOL><INDENT>break<EOL><DEDENT>if (<EOL>ename[i] not in {'<STR_LIT>', '<STR_LIT:T>'}<EOL>and i > <NUM_LIT:0><EOL>and ename[i - <NUM_LIT:1>] == ename[i]<EOL>):<EOL><INDENT>continue<EOL><DEDENT>if ename[i] in self._uc_v_set and i == <NUM_LIT:0>:<EOL><INDENT>metaph = ename[i]<EOL><DEDENT>elif ename[i] == '<STR_LIT:B>':<EOL><INDENT>if i != elen or ename[i - <NUM_LIT:1>] != '<STR_LIT:M>':<EOL><INDENT>metaph += ename[i]<EOL><DEDENT><DEDENT>elif ename[i] == '<STR_LIT:C>':<EOL><INDENT>if not (<EOL>i > <NUM_LIT:0><EOL>and ename[i - <NUM_LIT:1>] == '<STR_LIT:S>'<EOL>and ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] in self._frontv<EOL>):<EOL><INDENT>if ename[i + <NUM_LIT:1> : i + <NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>metaph += '<STR_LIT:X>'<EOL><DEDENT>elif ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] in self._frontv:<EOL><INDENT>metaph += '<STR_LIT:S>'<EOL><DEDENT>elif i > <NUM_LIT:0> and ename[i - <NUM_LIT:1> : i + <NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT>elif ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] == '<STR_LIT:H>':<EOL><INDENT>if (<EOL>i == <NUM_LIT:0><EOL>and i + <NUM_LIT:1> < elen<EOL>and ename[i + <NUM_LIT:2> : i + <NUM_LIT:3>] not in self._uc_v_set<EOL>):<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>metaph += '<STR_LIT:X>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT><DEDENT><DEDENT>elif ename[i] == '<STR_LIT:D>':<EOL><INDENT>if (<EOL>ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] == '<STR_LIT>'<EOL>and ename[i + <NUM_LIT:2> : i + <NUM_LIT:3>] in self._frontv<EOL>):<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>metaph += '<STR_LIT:T>'<EOL><DEDENT><DEDENT>elif ename[i] == '<STR_LIT>':<EOL><INDENT>if ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] == '<STR_LIT:H>' and not (<EOL>i + <NUM_LIT:1> == elen or ename[i + <NUM_LIT:2> : i + <NUM_LIT:3>] not in self._uc_v_set<EOL>):<EOL><INDENT>continue<EOL><DEDENT>elif i > <NUM_LIT:0> and (<EOL>(i + <NUM_LIT:1> == elen and ename[i + <NUM_LIT:1>] == '<STR_LIT:N>')<EOL>or (i + <NUM_LIT:3> == elen and ename[i + <NUM_LIT:1> : i + <NUM_LIT:4>] == '<STR_LIT>')<EOL>):<EOL><INDENT>continue<EOL><DEDENT>elif (<EOL>i - <NUM_LIT:1> > <NUM_LIT:0><EOL>and i + <NUM_LIT:1> <= elen<EOL>and ename[i - <NUM_LIT:1>] == '<STR_LIT:D>'<EOL>and ename[i + <NUM_LIT:1>] in self._frontv<EOL>):<EOL><INDENT>continue<EOL><DEDENT>elif ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>elif ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] in self._frontv:<EOL><INDENT>if i == <NUM_LIT:0> or ename[i - <NUM_LIT:1>] != '<STR_LIT>':<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT><DEDENT>elif ename[i] == '<STR_LIT:H>':<EOL><INDENT>if (<EOL>i > <NUM_LIT:0><EOL>and ename[i - <NUM_LIT:1>] in self._uc_v_set<EOL>and ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] not in self._uc_v_set<EOL>):<EOL><INDENT>continue<EOL><DEDENT>elif i > <NUM_LIT:0> and ename[i - <NUM_LIT:1>] in self._varson:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>metaph += '<STR_LIT:H>'<EOL><DEDENT><DEDENT>elif ename[i] in {'<STR_LIT:F>', '<STR_LIT>', '<STR_LIT:L>', '<STR_LIT:M>', '<STR_LIT:N>', '<STR_LIT:R>'}:<EOL><INDENT>metaph += ename[i]<EOL><DEDENT>elif ename[i] == '<STR_LIT>':<EOL><INDENT>if i > <NUM_LIT:0> and ename[i - <NUM_LIT:1>] == '<STR_LIT:C>':<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT><DEDENT>elif ename[i] == '<STR_LIT:P>':<EOL><INDENT>if ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] == '<STR_LIT:H>':<EOL><INDENT>metaph += '<STR_LIT:F>'<EOL><DEDENT>else:<EOL><INDENT>metaph += '<STR_LIT:P>'<EOL><DEDENT><DEDENT>elif ename[i] == '<STR_LIT>':<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT>elif ename[i] == '<STR_LIT:S>':<EOL><INDENT>if (<EOL>i > <NUM_LIT:0><EOL>and i + <NUM_LIT:2> <= elen<EOL>and ename[i + <NUM_LIT:1>] == '<STR_LIT:I>'<EOL>and ename[i + <NUM_LIT:2>] in '<STR_LIT>'<EOL>):<EOL><INDENT>metaph += '<STR_LIT:X>'<EOL><DEDENT>elif ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] == '<STR_LIT:H>':<EOL><INDENT>metaph += '<STR_LIT:X>'<EOL><DEDENT>else:<EOL><INDENT>metaph += '<STR_LIT:S>'<EOL><DEDENT><DEDENT>elif ename[i] == '<STR_LIT:T>':<EOL><INDENT>if (<EOL>i > <NUM_LIT:0><EOL>and i + <NUM_LIT:2> <= elen<EOL>and ename[i + <NUM_LIT:1>] == '<STR_LIT:I>'<EOL>and ename[i + <NUM_LIT:2>] in {'<STR_LIT:A>', '<STR_LIT:O>'}<EOL>):<EOL><INDENT>metaph += '<STR_LIT:X>'<EOL><DEDENT>elif ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] == '<STR_LIT:H>':<EOL><INDENT>metaph += '<STR_LIT:0>'<EOL><DEDENT>elif ename[i + <NUM_LIT:1> : i + <NUM_LIT:3>] != '<STR_LIT>':<EOL><INDENT>if ename[i - <NUM_LIT:1> : i] != '<STR_LIT:T>':<EOL><INDENT>metaph += '<STR_LIT:T>'<EOL><DEDENT><DEDENT><DEDENT>elif ename[i] == '<STR_LIT>':<EOL><INDENT>metaph += '<STR_LIT:F>'<EOL><DEDENT>elif ename[i] in '<STR_LIT>':<EOL><INDENT>if ename[i + <NUM_LIT:1> : i + <NUM_LIT:2>] in self._uc_v_set:<EOL><INDENT>metaph += ename[i]<EOL><DEDENT><DEDENT>elif ename[i] == '<STR_LIT:X>':<EOL><INDENT>metaph += '<STR_LIT>'<EOL><DEDENT>elif ename[i] == '<STR_LIT>':<EOL><INDENT>metaph += '<STR_LIT:S>'<EOL><DEDENT><DEDENT>return metaph<EOL>", "docstring": "Return the Metaphone code for a word.\n\n        Based on Lawrence Philips' Pick BASIC code from 1990\n        :cite:`Philips:1990`, as described in :cite:`Philips:1990b`.\n        This incorporates some corrections to the above code, particularly\n        some of those suggested by Michael Kuhn in :cite:`Kuhn:1995`.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n        max_length : int\n            The maximum length of the returned Metaphone code (defaults to 64,\n            but in Philips' original implementation this was 4)\n\n        Returns\n        -------\n        str\n            The Metaphone value\n\n        Examples\n        --------\n        >>> pe = Metaphone()\n        >>> pe.encode('Christopher')\n        'KRSTFR'\n        >>> pe.encode('Niall')\n        'NL'\n        >>> pe.encode('Smith')\n        'SM0'\n        >>> pe.encode('Schmidt')\n        'SKMTT'", "id": "f6606:c0:m0"}
{"signature": "def encode(self, word):", "body": "<EOL>word = unicode_normalize('<STR_LIT>', text_type(word.upper()))<EOL>word = word.translate({<NUM_LIT>: '<STR_LIT>', <NUM_LIT>: '<STR_LIT>'})<EOL>word = '<STR_LIT>'.join(c for c in word if c in self._uc_set)<EOL>for rule in self._rule_order:<EOL><INDENT>regex, repl = self._rule_table[rule]<EOL>if isinstance(regex, text_type):<EOL><INDENT>word = word.replace(regex, repl)<EOL><DEDENT>else:<EOL><INDENT>word = regex.sub(repl, word)<EOL><DEDENT><DEDENT>return word<EOL>", "docstring": "Return the FONEM code of a word.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n\n        Returns\n        -------\n        str\n            The FONEM code\n\n        Examples\n        --------\n        >>> pe = FONEM()\n        >>> pe.encode('Marchand')\n        'MARCHEN'\n        >>> pe.encode('Beaulieu')\n        'BOLIEU'\n        >>> pe.encode('Beaumont')\n        'BOMON'\n        >>> pe.encode('Legrand')\n        'LEGREN'\n        >>> pe.encode('Pelletier')\n        'PELETIER'", "id": "f6607:c0:m0"}
{"signature": "def fonem(word):", "body": "return FONEM().encode(word)<EOL>", "docstring": "Return the FONEM code of a word.\n\n    This is a wrapper for :py:meth:`FONEM.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n\n    Returns\n    -------\n    str\n        The FONEM code\n\n    Examples\n    --------\n    >>> fonem('Marchand')\n    'MARCHEN'\n    >>> fonem('Beaulieu')\n    'BOLIEU'\n    >>> fonem('Beaumont')\n    'BOMON'\n    >>> fonem('Legrand')\n    'LEGREN'\n    >>> fonem('Pelletier')\n    'PELETIER'", "id": "f6607:m0"}
{"signature": "def double_metaphone(word, max_length=-<NUM_LIT:1>):", "body": "return DoubleMetaphone().encode(word, max_length)<EOL>", "docstring": "Return the Double Metaphone code for a word.\n\n    This is a wrapper for :py:meth:`DoubleMetaphone.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    max_length : int\n        The maximum length of the returned Double Metaphone codes (defaults to\n        unlimited, but in Philips' original implementation this was 4)\n\n    Returns\n    -------\n    tuple\n        The Double Metaphone value(s)\n\n    Examples\n    --------\n    >>> double_metaphone('Christopher')\n    ('KRSTFR', '')\n    >>> double_metaphone('Niall')\n    ('NL', '')\n    >>> double_metaphone('Smith')\n    ('SM0', 'XMT')\n    >>> double_metaphone('Schmidt')\n    ('XMT', 'SMT')", "id": "f6611:m0"}
{"signature": "def reth_schek_phonetik(word):", "body": "return RethSchek().encode(word)<EOL>", "docstring": "Return Reth-Schek Phonetik code for a word.\n\n    This is a wrapper for :py:meth:`RethSchek.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n\n    Returns\n    -------\n    str\n        The Reth-Schek Phonetik code\n\n    Examples\n    --------\n    >>> reth_schek_phonetik('Joachim')\n    'JOAGHIM'\n    >>> reth_schek_phonetik('Christoph')\n    'GHRISDOF'\n    >>> reth_schek_phonetik('J\u00f6rg')\n    'JOERG'\n    >>> reth_schek_phonetik('Smith')\n    'SMID'\n    >>> reth_schek_phonetik('Schmidt')\n    'SCHMID'", "id": "f6612:m0"}
{"signature": "def eudex(word, max_length=<NUM_LIT:8>):", "body": "return Eudex().encode(word, max_length)<EOL>", "docstring": "Return the eudex phonetic hash of a word.\n\n    This is a wrapper for :py:meth:`Eudex.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n    max_length : int\n        The length in bits of the code returned (default 8)\n\n    Returns\n    -------\n    int\n        The eudex hash\n\n    Examples\n    --------\n    >>> eudex('Colin')\n    432345564238053650\n    >>> eudex('Christopher')\n    433648490138894409\n    >>> eudex('Niall')\n    648518346341351840\n    >>> eudex('Smith')\n    720575940412906756\n    >>> eudex('Schmidt')\n    720589151732307997", "id": "f6614:m0"}
{"signature": "def encode(self, word, max_length=<NUM_LIT:8>):", "body": "<EOL>word = '<STR_LIT>'.join(<EOL>char for char in word.lower() if char in self._initial_phones<EOL>)<EOL>if not word:<EOL><INDENT>word = '<STR_LIT>'<EOL><DEDENT>values = [self._initial_phones[word[<NUM_LIT:0>]]]<EOL>values += [self._trailing_phones[char] for char in word[<NUM_LIT:1>:]]<EOL>shifted_values = [_ >> <NUM_LIT:1> for _ in values]<EOL>condensed_values = [values[<NUM_LIT:0>]]<EOL>for n in range(<NUM_LIT:1>, len(shifted_values)):<EOL><INDENT>if shifted_values[n] != shifted_values[n - <NUM_LIT:1>]:<EOL><INDENT>condensed_values.append(values[n])<EOL><DEDENT><DEDENT>values = (<EOL>[condensed_values[<NUM_LIT:0>]]<EOL>+ [<NUM_LIT:0>] * max(<NUM_LIT:0>, max_length - len(condensed_values))<EOL>+ condensed_values[<NUM_LIT:1>:max_length]<EOL>)<EOL>hash_value = <NUM_LIT:0><EOL>for val in values:<EOL><INDENT>hash_value = (hash_value << <NUM_LIT:8>) | val<EOL><DEDENT>return hash_value<EOL>", "docstring": "Return the eudex phonetic hash of a word.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n        max_length : int\n            The length in bits of the code returned (default 8)\n\n        Returns\n        -------\n        int\n            The eudex hash\n\n        Examples\n        --------\n        >>> pe = Eudex()\n        >>> pe.encode('Colin')\n        432345564238053650\n        >>> pe.encode('Christopher')\n        433648490138894409\n        >>> pe.encode('Niall')\n        648518346341351840\n        >>> pe.encode('Smith')\n        720575940412906756\n        >>> pe.encode('Schmidt')\n        720589151732307997", "id": "f6614:c0:m0"}
{"signature": "def encode(self, word):", "body": "word = word.upper()<EOL>code = '<STR_LIT>'<EOL>skip = <NUM_LIT:0><EOL>if word[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>code = '<STR_LIT>'<EOL>skip = <NUM_LIT:2><EOL><DEDENT>elif word[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>code = '<STR_LIT>'<EOL>skip = <NUM_LIT:2><EOL><DEDENT>elif word[<NUM_LIT:0>:<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>code = '<STR_LIT:X>'<EOL>skip = <NUM_LIT:3><EOL><DEDENT>elif word[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>code = '<STR_LIT>'<EOL>skip = <NUM_LIT:2><EOL><DEDENT>elif word[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>code = '<STR_LIT:X>'<EOL>skip = <NUM_LIT:2><EOL><DEDENT>elif word[:<NUM_LIT:1>] == '<STR_LIT:C>':<EOL><INDENT>code = '<STR_LIT>'<EOL>skip = <NUM_LIT:1><EOL><DEDENT>elif word[:<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>code = '<STR_LIT>'<EOL>skip = <NUM_LIT:1><EOL><DEDENT>elif word[:<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>code = '<STR_LIT>'<EOL>skip = <NUM_LIT:1><EOL><DEDENT>if word[-<NUM_LIT:2>:] == '<STR_LIT>':<EOL><INDENT>word = word[:-<NUM_LIT:2>] + '<STR_LIT:T>'<EOL><DEDENT>elif word[-<NUM_LIT:2>:-<NUM_LIT:1>] in self._uc_v_set and word[-<NUM_LIT:1>:] == '<STR_LIT:D>':<EOL><INDENT>word = word[:-<NUM_LIT:2>]<EOL><DEDENT>for pos, char in enumerate(word):<EOL><INDENT>if skip:<EOL><INDENT>skip -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>for length in sorted(self._replacements, reverse=True):<EOL><INDENT>if word[pos : pos + length] in self._replacements[length]:<EOL><INDENT>code += self._replacements[length][<EOL>word[pos : pos + length]<EOL>]<EOL>skip = length - <NUM_LIT:1><EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if not pos or char not in self._uc_v_set:<EOL><INDENT>code += char<EOL><DEDENT><DEDENT><DEDENT><DEDENT>code = self._delete_consecutive_repeats(code)<EOL>return code<EOL>", "docstring": "Return the Norphone code.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform\n\n        Returns\n        -------\n        str\n            The Norphone code\n\n        Examples\n        --------\n        >>> pe = Norphone()\n        >>> pe.encode('Hansen')\n        'HNSN'\n        >>> pe.encode('Larsen')\n        'LRSN'\n        >>> pe.encode('Aagaard')\n        '\u00c5KRT'\n        >>> pe.encode('Braaten')\n        'BRTN'\n        >>> pe.encode('Sandvik')\n        'SNVK'", "id": "f6615:c0:m0"}
{"signature": "def norphone(word):", "body": "return Norphone().encode(word)<EOL>", "docstring": "Return the Norphone code.\n\n    This is a wrapper for :py:meth:`Norphone.encode`.\n\n    Parameters\n    ----------\n    word : str\n        The word to transform\n\n    Returns\n    -------\n    str\n        The Norphone code\n\n    Examples\n    --------\n    >>> norphone('Hansen')\n    'HNSN'\n    >>> norphone('Larsen')\n    'LRSN'\n    >>> norphone('Aagaard')\n    '\u00c5KRT'\n    >>> norphone('Braaten')\n    'BRTN'\n    >>> norphone('Sandvik')\n    'SNVK'", "id": "f6615:m0"}
{"signature": "def dist(self, src, tar, cost=(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>), local=False):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>mismatch_cost = cost[<NUM_LIT:2>]<EOL>return self.dist_abs(src, tar, cost, local) / (<EOL>max(len(src) * mismatch_cost, len(tar) * mismatch_cost)<EOL>)<EOL>", "docstring": "Return the normalized Editex distance between two strings.\n\n        The Editex distance is normalized by dividing the Editex distance\n        (calculated by any of the three supported methods) by the greater of\n        the number of characters in src times the cost of a delete and\n        the number of characters in tar times the cost of an insert.\n        For the case in which all operations have :math:`cost = 1`, this is\n        equivalent to the greater of the length of the two strings src & tar.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        cost : tuple\n            A 3-tuple representing the cost of the four possible edits: match,\n            same-group, and mismatch respectively (by default: (0, 1, 2))\n        local : bool\n            If True, the local variant of Editex is used\n\n        Returns\n        -------\n        int\n            Normalized Editex distance\n\n        Examples\n        --------\n        >>> cmp = Editex()\n        >>> round(cmp.dist('cat', 'hat'), 12)\n        0.333333333333\n        >>> round(cmp.dist('Niall', 'Neil'), 12)\n        0.2\n        >>> cmp.dist('aluminum', 'Catalan')\n        0.75\n        >>> cmp.dist('ATCG', 'TAGC')\n        0.75", "id": "f6617:c0:m1"}
{"signature": "def dist_editex(src, tar, cost=(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>), local=False):", "body": "return Editex().dist(src, tar, cost, local)<EOL>", "docstring": "Return the normalized Editex distance between two strings.\n\n    This is a wrapper for :py:meth:`Editex.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    cost : tuple\n        A 3-tuple representing the cost of the four possible edits: match,\n        same-group, and mismatch respectively (by default: (0, 1, 2))\n    local : bool\n        If True, the local variant of Editex is used\n\n    Returns\n    -------\n    int\n        Normalized Editex distance\n\n    Examples\n    --------\n    >>> round(dist_editex('cat', 'hat'), 12)\n    0.333333333333\n    >>> round(dist_editex('Niall', 'Neil'), 12)\n    0.2\n    >>> dist_editex('aluminum', 'Catalan')\n    0.75\n    >>> dist_editex('ATCG', 'TAGC')\n    0.75", "id": "f6617:m1"}
{"signature": "def sim_editex(src, tar, cost=(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>), local=False):", "body": "return Editex().sim(src, tar, cost, local)<EOL>", "docstring": "Return the normalized Editex similarity of two strings.\n\n    This is a wrapper for :py:meth:`Editex.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    cost : tuple\n        A 3-tuple representing the cost of the four possible edits: match,\n        same-group, and mismatch respectively (by default: (0, 1, 2))\n    local : bool\n        If True, the local variant of Editex is used\n\n    Returns\n    -------\n    int\n        Normalized Editex similarity\n\n    Examples\n    --------\n    >>> round(sim_editex('cat', 'hat'), 12)\n    0.666666666667\n    >>> round(sim_editex('Niall', 'Neil'), 12)\n    0.8\n    >>> sim_editex('aluminum', 'Catalan')\n    0.25\n    >>> sim_editex('ATCG', 'TAGC')\n    0.25", "id": "f6617:m2"}
{"signature": "def dist(self, *args, **kwargs):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Raise exception when called.\n\n        Parameters\n        ----------\n        *args\n            Variable length argument list\n        **kwargs\n            Arbitrary keyword arguments\n\n        Raises\n        ------\n        NotImplementedError\n            Method disabled for Chebyshev distance", "id": "f6618:c0:m2"}
{"signature": "def dist(self, src, tar):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>src_comp = self._rle.encode(src)<EOL>tar_comp = self._rle.encode(tar)<EOL>concat_comp = self._rle.encode(src + tar)<EOL>concat_comp2 = self._rle.encode(tar + src)<EOL>return (<EOL>min(len(concat_comp), len(concat_comp2))<EOL>- min(len(src_comp), len(tar_comp))<EOL>) / max(len(src_comp), len(tar_comp))<EOL>", "docstring": "Return the NCD between two strings using RLE.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        float\n            Compression distance\n\n        Examples\n        --------\n        >>> cmp = NCDrle()\n        >>> cmp.dist('cat', 'hat')\n        1.0\n        >>> cmp.dist('Niall', 'Neil')\n        1.0\n        >>> cmp.dist('aluminum', 'Catalan')\n        1.0\n        >>> cmp.dist('ATCG', 'TAGC')\n        1.0", "id": "f6620:c0:m0"}
{"signature": "def dist_ncd_rle(src, tar):", "body": "return NCDrle().dist(src, tar)<EOL>", "docstring": "Return the NCD between two strings using RLE.\n\n    This is a wrapper for :py:meth:`NCDrle.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Compression distance\n\n    Examples\n    --------\n    >>> dist_ncd_rle('cat', 'hat')\n    1.0\n    >>> dist_ncd_rle('Niall', 'Neil')\n    1.0\n    >>> dist_ncd_rle('aluminum', 'Catalan')\n    1.0\n    >>> dist_ncd_rle('ATCG', 'TAGC')\n    1.0", "id": "f6620:m0"}
{"signature": "def sim_ncd_rle(src, tar):", "body": "return NCDrle().sim(src, tar)<EOL>", "docstring": "Return the NCD similarity between two strings using RLE.\n\n    This is a wrapper for :py:meth:`NCDrle.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Compression similarity\n\n    Examples\n    --------\n    >>> sim_ncd_rle('cat', 'hat')\n    0.0\n    >>> sim_ncd_rle('Niall', 'Neil')\n    0.0\n    >>> sim_ncd_rle('aluminum', 'Catalan')\n    0.0\n    >>> sim_ncd_rle('ATCG', 'TAGC')\n    0.0", "id": "f6620:m1"}
{"signature": "def __init__(self, level=zlib.Z_DEFAULT_COMPRESSION):", "body": "self._compressor = zlib.compressobj(level)<EOL>", "docstring": "Initialize zlib compressor.\n\n        Parameters\n        ----------\n        level : int\n            The compression level (0 to 9)", "id": "f6621:c0:m0"}
{"signature": "def dist(self, src, tar):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>src = src.encode('<STR_LIT:utf-8>')<EOL>tar = tar.encode('<STR_LIT:utf-8>')<EOL>self._compressor.compress(src)<EOL>src_comp = self._compressor.flush(zlib.Z_FULL_FLUSH)<EOL>self._compressor.compress(tar)<EOL>tar_comp = self._compressor.flush(zlib.Z_FULL_FLUSH)<EOL>self._compressor.compress(src + tar)<EOL>concat_comp = self._compressor.flush(zlib.Z_FULL_FLUSH)<EOL>self._compressor.compress(tar + src)<EOL>concat_comp2 = self._compressor.flush(zlib.Z_FULL_FLUSH)<EOL>return (<EOL>min(len(concat_comp), len(concat_comp2))<EOL>- min(len(src_comp), len(tar_comp))<EOL>) / max(len(src_comp), len(tar_comp))<EOL>", "docstring": "Return the NCD between two strings using zlib compression.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        float\n            Compression distance\n\n        Examples\n        --------\n        >>> cmp = NCDzlib()\n        >>> cmp.dist('cat', 'hat')\n        0.3333333333333333\n        >>> cmp.dist('Niall', 'Neil')\n        0.45454545454545453\n        >>> cmp.dist('aluminum', 'Catalan')\n        0.5714285714285714\n        >>> cmp.dist('ATCG', 'TAGC')\n        0.4", "id": "f6621:c0:m1"}
{"signature": "def dist_euclidean(src, tar, qval=<NUM_LIT:2>, alphabet=None):", "body": "return Euclidean().dist(src, tar, qval, alphabet)<EOL>", "docstring": "Return the normalized Euclidean distance between two strings.\n\n    This is a wrapper for :py:meth:`Euclidean.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string (or QGrams/Counter objects) for comparison\n    tar : str\n        Target string (or QGrams/Counter objects) for comparison\n    qval : int\n        The length of each q-gram; 0 for non-q-gram version\n    alphabet : collection or int\n        The values or size of the alphabet\n\n    Returns\n    -------\n    float\n        The normalized Euclidean distance\n\n    Examples\n    --------\n    >>> round(dist_euclidean('cat', 'hat'), 12)\n    0.57735026919\n    >>> round(dist_euclidean('Niall', 'Neil'), 12)\n    0.683130051064\n    >>> round(dist_euclidean('Colin', 'Cuilen'), 12)\n    0.727606875109\n    >>> dist_euclidean('ATCG', 'TAGC')\n    1.0", "id": "f6622:m1"}
{"signature": "def dist_abs(self, src, tar, qval=<NUM_LIT:2>, normalized=False, alphabet=None):", "body": "return super(self.__class__, self).dist_abs(<EOL>src, tar, qval, <NUM_LIT:2>, normalized, alphabet<EOL>)<EOL>", "docstring": "Return the Euclidean distance between two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string (or QGrams/Counter objects) for comparison\n        tar : str\n            Target string (or QGrams/Counter objects) for comparison\n        qval : int\n            The length of each q-gram; 0 for non-q-gram version\n        normalized : bool\n            Normalizes to [0, 1] if True\n        alphabet : collection or int\n            The values or size of the alphabet\n\n        Returns\n        -------\n        float\n            The Euclidean distance\n\n        Examples\n        --------\n        >>> cmp = Euclidean()\n        >>> cmp.dist_abs('cat', 'hat')\n        2.0\n        >>> round(cmp.dist_abs('Niall', 'Neil'), 12)\n        2.645751311065\n        >>> cmp.dist_abs('Colin', 'Cuilen')\n        3.0\n        >>> round(cmp.dist_abs('ATCG', 'TAGC'), 12)\n        3.162277660168", "id": "f6622:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def sim_matrix(<EOL>src,<EOL>tar,<EOL>mat=None,<EOL>mismatch_cost=<NUM_LIT:0>,<EOL>match_cost=<NUM_LIT:1>,<EOL>symmetric=True,<EOL>alphabet=None,<EOL>):<DEDENT>", "body": "if alphabet:<EOL><INDENT>alphabet = tuple(alphabet)<EOL>for i in src:<EOL><INDENT>if i not in alphabet:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>for i in tar:<EOL><INDENT>if i not in alphabet:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>if src == tar:<EOL><INDENT>if mat and (src, src) in mat:<EOL><INDENT>return mat[(src, src)]<EOL><DEDENT>return match_cost<EOL><DEDENT>if mat and (src, tar) in mat:<EOL><INDENT>return mat[(src, tar)]<EOL><DEDENT>elif symmetric and mat and (tar, src) in mat:<EOL><INDENT>return mat[(tar, src)]<EOL><DEDENT>return mismatch_cost<EOL>", "docstring": "Return the matrix similarity of two strings.\n\n        With the default parameters, this is identical to sim_ident.\n        It is possible for sim_matrix to return values outside of the range\n        :math:`[0, 1]`, if values outside that range are present in mat,\n        mismatch_cost, or match_cost.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        mat : dict\n            A dict mapping tuples to costs; the tuples are (src, tar) pairs of\n            symbols from the alphabet parameter\n        mismatch_cost : float\n            The value returned if (src, tar) is absent from mat when src does\n            not equal tar\n        match_cost : float\n            The value returned if (src, tar) is absent from mat when src equals\n            tar\n        symmetric : bool\n            True if the cost of src not matching tar is identical to the cost\n            of tar not matching src; in this case, the values in mat need only\n            contain (src, tar) or (tar, src), not both\n        alphabet : str\n            A collection of tokens from which src and tar are drawn; if this is\n            defined a ValueError is raised if either tar or src is not found in\n            alphabet\n\n        Returns\n        -------\n        float\n            Matrix similarity\n\n        Raises\n        ------\n        ValueError\n            src value not in alphabet\n        ValueError\n            tar value not in alphabet\n\n        Examples\n        --------\n        >>> NeedlemanWunsch.sim_matrix('cat', 'hat')\n        0\n        >>> NeedlemanWunsch.sim_matrix('hat', 'hat')\n        1", "id": "f6623:c0:m0"}
{"signature": "def needleman_wunsch(src, tar, gap_cost=<NUM_LIT:1>, sim_func=sim_ident):", "body": "return NeedlemanWunsch().dist_abs(src, tar, gap_cost, sim_func)<EOL>", "docstring": "Return the Needleman-Wunsch score of two strings.\n\n    This is a wrapper for :py:meth:`NeedlemanWunsch.dist_abs`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    gap_cost : float\n        The cost of an alignment gap (1 by default)\n    sim_func : function\n        A function that returns the similarity of two characters (identity\n        similarity by default)\n\n    Returns\n    -------\n    float\n        Needleman-Wunsch score\n\n    Examples\n    --------\n    >>> needleman_wunsch('cat', 'hat')\n    2.0\n    >>> needleman_wunsch('Niall', 'Neil')\n    1.0\n    >>> needleman_wunsch('aluminum', 'Catalan')\n    -1.0\n    >>> needleman_wunsch('ATCG', 'TAGC')\n    0.0", "id": "f6623:m0"}
{"signature": "def dist_abs(self, src, tar, gap_cost=<NUM_LIT:1>, sim_func=sim_ident):", "body": "d_mat = np_zeros((len(src) + <NUM_LIT:1>, len(tar) + <NUM_LIT:1>), dtype=np_float32)<EOL>for i in range(len(src) + <NUM_LIT:1>):<EOL><INDENT>d_mat[i, <NUM_LIT:0>] = -(i * gap_cost)<EOL><DEDENT>for j in range(len(tar) + <NUM_LIT:1>):<EOL><INDENT>d_mat[<NUM_LIT:0>, j] = -(j * gap_cost)<EOL><DEDENT>for i in range(<NUM_LIT:1>, len(src) + <NUM_LIT:1>):<EOL><INDENT>for j in range(<NUM_LIT:1>, len(tar) + <NUM_LIT:1>):<EOL><INDENT>match = d_mat[i - <NUM_LIT:1>, j - <NUM_LIT:1>] + sim_func(src[i - <NUM_LIT:1>], tar[j - <NUM_LIT:1>])<EOL>delete = d_mat[i - <NUM_LIT:1>, j] - gap_cost<EOL>insert = d_mat[i, j - <NUM_LIT:1>] - gap_cost<EOL>d_mat[i, j] = max(match, delete, insert)<EOL><DEDENT><DEDENT>return d_mat[d_mat.shape[<NUM_LIT:0>] - <NUM_LIT:1>, d_mat.shape[<NUM_LIT:1>] - <NUM_LIT:1>]<EOL>", "docstring": "Return the Needleman-Wunsch score of two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        gap_cost : float\n            The cost of an alignment gap (1 by default)\n        sim_func : function\n            A function that returns the similarity of two characters (identity\n            similarity by default)\n\n        Returns\n        -------\n        float\n            Needleman-Wunsch score\n\n        Examples\n        --------\n        >>> cmp = NeedlemanWunsch()\n        >>> cmp.dist_abs('cat', 'hat')\n        2.0\n        >>> cmp.dist_abs('Niall', 'Neil')\n        1.0\n        >>> cmp.dist_abs('aluminum', 'Catalan')\n        -1.0\n        >>> cmp.dist_abs('ATCG', 'TAGC')\n        0.0", "id": "f6623:c0:m1"}
{"signature": "def dist_abs(self, src, tar, mode='<STR_LIT>', cost=(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>)):", "body": "ins_cost, del_cost, sub_cost, trans_cost = cost<EOL>if src == tar:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if not src:<EOL><INDENT>return len(tar) * ins_cost<EOL><DEDENT>if not tar:<EOL><INDENT>return len(src) * del_cost<EOL><DEDENT>d_mat = np_zeros((len(src) + <NUM_LIT:1>, len(tar) + <NUM_LIT:1>), dtype=np_int)<EOL>for i in range(len(src) + <NUM_LIT:1>):<EOL><INDENT>d_mat[i, <NUM_LIT:0>] = i * del_cost<EOL><DEDENT>for j in range(len(tar) + <NUM_LIT:1>):<EOL><INDENT>d_mat[<NUM_LIT:0>, j] = j * ins_cost<EOL><DEDENT>for i in range(len(src)):<EOL><INDENT>for j in range(len(tar)):<EOL><INDENT>d_mat[i + <NUM_LIT:1>, j + <NUM_LIT:1>] = min(<EOL>d_mat[i + <NUM_LIT:1>, j] + ins_cost,  <EOL>d_mat[i, j + <NUM_LIT:1>] + del_cost,  <EOL>d_mat[i, j]<EOL>+ (sub_cost if src[i] != tar[j] else <NUM_LIT:0>),  <EOL>)<EOL>if mode == '<STR_LIT>':<EOL><INDENT>if (<EOL>i + <NUM_LIT:1> > <NUM_LIT:1><EOL>and j + <NUM_LIT:1> > <NUM_LIT:1><EOL>and src[i] == tar[j - <NUM_LIT:1>]<EOL>and src[i - <NUM_LIT:1>] == tar[j]<EOL>):<EOL><INDENT>d_mat[i + <NUM_LIT:1>, j + <NUM_LIT:1>] = min(<EOL>d_mat[i + <NUM_LIT:1>, j + <NUM_LIT:1>],<EOL>d_mat[i - <NUM_LIT:1>, j - <NUM_LIT:1>] + trans_cost,<EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return d_mat[len(src), len(tar)]<EOL>", "docstring": "Return the Levenshtein distance between two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        mode : str\n            Specifies a mode for computing the Levenshtein distance:\n\n                - ``lev`` (default) computes the ordinary Levenshtein distance,\n                  in which edits may include inserts, deletes, and\n                  substitutions\n                - ``osa`` computes the Optimal String Alignment distance, in\n                  which edits may include inserts, deletes, substitutions, and\n                  transpositions but substrings may only be edited once\n\n        cost : tuple\n            A 4-tuple representing the cost of the four possible edits:\n            inserts, deletes, substitutions, and transpositions, respectively\n            (by default: (1, 1, 1, 1))\n\n        Returns\n        -------\n        int (may return a float if cost has float values)\n            The Levenshtein distance between src & tar\n\n        Examples\n        --------\n        >>> cmp = Levenshtein()\n        >>> cmp.dist_abs('cat', 'hat')\n        1\n        >>> cmp.dist_abs('Niall', 'Neil')\n        3\n        >>> cmp.dist_abs('aluminum', 'Catalan')\n        7\n        >>> cmp.dist_abs('ATCG', 'TAGC')\n        3\n\n        >>> cmp.dist_abs('ATCG', 'TAGC', mode='osa')\n        2\n        >>> cmp.dist_abs('ACTG', 'TAGC', mode='osa')\n        4", "id": "f6624:c0:m0"}
{"signature": "def dist(self, src, tar, mode='<STR_LIT>', cost=(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>)):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>ins_cost, del_cost = cost[:<NUM_LIT:2>]<EOL>return levenshtein(src, tar, mode, cost) / (<EOL>max(len(src) * del_cost, len(tar) * ins_cost)<EOL>)<EOL>", "docstring": "Return the normalized Levenshtein distance between two strings.\n\n        The Levenshtein distance is normalized by dividing the Levenshtein\n        distance (calculated by any of the three supported methods) by the\n        greater of the number of characters in src times the cost of a delete\n        and the number of characters in tar times the cost of an insert.\n        For the case in which all operations have :math:`cost = 1`, this is\n        equivalent to the greater of the length of the two strings src & tar.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        mode : str\n            Specifies a mode for computing the Levenshtein distance:\n\n                - ``lev`` (default) computes the ordinary Levenshtein distance,\n                  in which edits may include inserts, deletes, and\n                  substitutions\n                - ``osa`` computes the Optimal String Alignment distance, in\n                  which edits may include inserts, deletes, substitutions, and\n                  transpositions but substrings may only be edited once\n\n        cost : tuple\n            A 4-tuple representing the cost of the four possible edits:\n            inserts, deletes, substitutions, and transpositions, respectively\n            (by default: (1, 1, 1, 1))\n\n        Returns\n        -------\n        float\n            The normalized Levenshtein distance between src & tar\n\n        Examples\n        --------\n        >>> cmp = Levenshtein()\n        >>> round(cmp.dist('cat', 'hat'), 12)\n        0.333333333333\n        >>> round(cmp.dist('Niall', 'Neil'), 12)\n        0.6\n        >>> cmp.dist('aluminum', 'Catalan')\n        0.875\n        >>> cmp.dist('ATCG', 'TAGC')\n        0.75", "id": "f6624:c0:m1"}
{"signature": "def dist_abs(self, src, tar, gap_open=<NUM_LIT:1>, gap_ext=<NUM_LIT>, sim_func=sim_ident):", "body": "d_mat = np_zeros((len(src) + <NUM_LIT:1>, len(tar) + <NUM_LIT:1>), dtype=np_float32)<EOL>p_mat = np_zeros((len(src) + <NUM_LIT:1>, len(tar) + <NUM_LIT:1>), dtype=np_float32)<EOL>q_mat = np_zeros((len(src) + <NUM_LIT:1>, len(tar) + <NUM_LIT:1>), dtype=np_float32)<EOL>d_mat[<NUM_LIT:0>, <NUM_LIT:0>] = <NUM_LIT:0><EOL>p_mat[<NUM_LIT:0>, <NUM_LIT:0>] = float('<STR_LIT>')<EOL>q_mat[<NUM_LIT:0>, <NUM_LIT:0>] = float('<STR_LIT>')<EOL>for i in range(<NUM_LIT:1>, len(src) + <NUM_LIT:1>):<EOL><INDENT>d_mat[i, <NUM_LIT:0>] = float('<STR_LIT>')<EOL>p_mat[i, <NUM_LIT:0>] = -gap_open - gap_ext * (i - <NUM_LIT:1>)<EOL>q_mat[i, <NUM_LIT:0>] = float('<STR_LIT>')<EOL>q_mat[i, <NUM_LIT:1>] = -gap_open<EOL><DEDENT>for j in range(<NUM_LIT:1>, len(tar) + <NUM_LIT:1>):<EOL><INDENT>d_mat[<NUM_LIT:0>, j] = float('<STR_LIT>')<EOL>p_mat[<NUM_LIT:0>, j] = float('<STR_LIT>')<EOL>p_mat[<NUM_LIT:1>, j] = -gap_open<EOL>q_mat[<NUM_LIT:0>, j] = -gap_open - gap_ext * (j - <NUM_LIT:1>)<EOL><DEDENT>for i in range(<NUM_LIT:1>, len(src) + <NUM_LIT:1>):<EOL><INDENT>for j in range(<NUM_LIT:1>, len(tar) + <NUM_LIT:1>):<EOL><INDENT>sim_val = sim_func(src[i - <NUM_LIT:1>], tar[j - <NUM_LIT:1>])<EOL>d_mat[i, j] = max(<EOL>d_mat[i - <NUM_LIT:1>, j - <NUM_LIT:1>] + sim_val,<EOL>p_mat[i - <NUM_LIT:1>, j - <NUM_LIT:1>] + sim_val,<EOL>q_mat[i - <NUM_LIT:1>, j - <NUM_LIT:1>] + sim_val,<EOL>)<EOL>p_mat[i, j] = max(<EOL>d_mat[i - <NUM_LIT:1>, j] - gap_open, p_mat[i - <NUM_LIT:1>, j] - gap_ext<EOL>)<EOL>q_mat[i, j] = max(<EOL>d_mat[i, j - <NUM_LIT:1>] - gap_open, q_mat[i, j - <NUM_LIT:1>] - gap_ext<EOL>)<EOL><DEDENT><DEDENT>i, j = (n - <NUM_LIT:1> for n in d_mat.shape)<EOL>return max(d_mat[i, j], p_mat[i, j], q_mat[i, j])<EOL>", "docstring": "Return the Gotoh score of two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        gap_open : float\n            The cost of an open alignment gap (1 by default)\n        gap_ext : float\n            The cost of an alignment gap extension (0.4 by default)\n        sim_func : function\n            A function that returns the similarity of two characters (identity\n            similarity by default)\n\n        Returns\n        -------\n        float\n            Gotoh score\n\n        Examples\n        --------\n        >>> cmp = Gotoh()\n        >>> cmp.dist_abs('cat', 'hat')\n        2.0\n        >>> cmp.dist_abs('Niall', 'Neil')\n        1.0\n        >>> round(cmp.dist_abs('aluminum', 'Catalan'), 12)\n        -0.4\n        >>> cmp.dist_abs('cat', 'hat')\n        2.0", "id": "f6625:c0:m0"}
{"signature": "def sim_strcmp95(src, tar, long_strings=False):", "body": "return Strcmp95().sim(src, tar, long_strings)<EOL>", "docstring": "Return the strcmp95 similarity of two strings.\n\n    This is a wrapper for :py:meth:`Strcmp95.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    long_strings : bool\n        Set to True to increase the probability of a match when the number of\n        matched characters is large. This option allows for a little more\n        tolerance when the strings are large. It is not an appropriate test\n        when comparing fixed length fields such as phone and social security\n        numbers.\n\n    Returns\n    -------\n    float\n        Strcmp95 similarity\n\n    Examples\n    --------\n    >>> sim_strcmp95('cat', 'hat')\n    0.7777777777777777\n    >>> sim_strcmp95('Niall', 'Neil')\n    0.8454999999999999\n    >>> sim_strcmp95('aluminum', 'Catalan')\n    0.6547619047619048\n    >>> sim_strcmp95('ATCG', 'TAGC')\n    0.8333333333333334", "id": "f6626:m0"}
{"signature": "def dist_strcmp95(src, tar, long_strings=False):", "body": "return Strcmp95().dist(src, tar, long_strings)<EOL>", "docstring": "Return the strcmp95 distance between two strings.\n\n    This is a wrapper for :py:meth:`Strcmp95.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    long_strings : bool\n        Set to True to increase the probability of a match when the number of\n        matched characters is large. This option allows for a little more\n        tolerance when the strings are large. It is not an appropriate test\n        when comparing fixed length fields such as phone and social security\n        numbers.\n\n    Returns\n    -------\n    float\n        Strcmp95 distance\n\n    Examples\n    --------\n    >>> round(dist_strcmp95('cat', 'hat'), 12)\n    0.222222222222\n    >>> round(dist_strcmp95('Niall', 'Neil'), 12)\n    0.1545\n    >>> round(dist_strcmp95('aluminum', 'Catalan'), 12)\n    0.345238095238\n    >>> round(dist_strcmp95('ATCG', 'TAGC'), 12)\n    0.166666666667", "id": "f6626:m1"}
{"signature": "def sim(self, src, tar, long_strings=False):", "body": "def _in_range(char):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return <NUM_LIT> > ord(char) > <NUM_LIT:0><EOL><DEDENT>ying = src.strip().upper()<EOL>yang = tar.strip().upper()<EOL>if ying == yang:<EOL><INDENT>return <NUM_LIT:1.0><EOL><DEDENT>if not ying or not yang:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>adjwt = defaultdict(int)<EOL>for i in self._sp_mx:<EOL><INDENT>adjwt[(i[<NUM_LIT:0>], i[<NUM_LIT:1>])] = <NUM_LIT:3><EOL>adjwt[(i[<NUM_LIT:1>], i[<NUM_LIT:0>])] = <NUM_LIT:3><EOL><DEDENT>if len(ying) > len(yang):<EOL><INDENT>search_range = len(ying)<EOL>minv = len(yang)<EOL><DEDENT>else:<EOL><INDENT>search_range = len(yang)<EOL>minv = len(ying)<EOL><DEDENT>ying_flag = [<NUM_LIT:0>] * search_range<EOL>yang_flag = [<NUM_LIT:0>] * search_range<EOL>search_range = max(<NUM_LIT:0>, search_range // <NUM_LIT:2> - <NUM_LIT:1>)<EOL>num_com = <NUM_LIT:0><EOL>yl1 = len(yang) - <NUM_LIT:1><EOL>for i in range(len(ying)):<EOL><INDENT>low_lim = (i - search_range) if (i >= search_range) else <NUM_LIT:0><EOL>hi_lim = (i + search_range) if ((i + search_range) <= yl1) else yl1<EOL>for j in range(low_lim, hi_lim + <NUM_LIT:1>):<EOL><INDENT>if (yang_flag[j] == <NUM_LIT:0>) and (yang[j] == ying[i]):<EOL><INDENT>yang_flag[j] = <NUM_LIT:1><EOL>ying_flag[i] = <NUM_LIT:1><EOL>num_com += <NUM_LIT:1><EOL>break<EOL><DEDENT><DEDENT><DEDENT>if num_com == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>k = n_trans = <NUM_LIT:0><EOL>for i in range(len(ying)):<EOL><INDENT>if ying_flag[i] != <NUM_LIT:0>:<EOL><INDENT>j = <NUM_LIT:0><EOL>for j in range(k, len(yang)):  <EOL><INDENT>if yang_flag[j] != <NUM_LIT:0>:<EOL><INDENT>k = j + <NUM_LIT:1><EOL>break<EOL><DEDENT><DEDENT>if ying[i] != yang[j]:<EOL><INDENT>n_trans += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>n_trans //= <NUM_LIT:2><EOL>n_simi = <NUM_LIT:0><EOL>if minv > num_com:<EOL><INDENT>for i in range(len(ying)):<EOL><INDENT>if ying_flag[i] == <NUM_LIT:0> and _in_range(ying[i]):<EOL><INDENT>for j in range(len(yang)):<EOL><INDENT>if yang_flag[j] == <NUM_LIT:0> and _in_range(yang[j]):<EOL><INDENT>if (ying[i], yang[j]) in adjwt:<EOL><INDENT>n_simi += adjwt[(ying[i], yang[j])]<EOL>yang_flag[j] = <NUM_LIT:2><EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>num_sim = n_simi / <NUM_LIT> + num_com<EOL>weight = (<EOL>num_sim / len(ying)<EOL>+ num_sim / len(yang)<EOL>+ (num_com - n_trans) / num_com<EOL>)<EOL>weight /= <NUM_LIT><EOL>if weight > <NUM_LIT>:<EOL><INDENT>j = <NUM_LIT:4> if (minv >= <NUM_LIT:4>) else minv<EOL>i = <NUM_LIT:0><EOL>while (i < j) and (ying[i] == yang[i]) and (not ying[i].isdigit()):<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT>if i:<EOL><INDENT>weight += i * <NUM_LIT:0.1> * (<NUM_LIT:1.0> - weight)<EOL><DEDENT>if (<EOL>long_strings<EOL>and (minv > <NUM_LIT:4>)<EOL>and (num_com > i + <NUM_LIT:1>)<EOL>and (<NUM_LIT:2> * num_com >= minv + i)<EOL>):<EOL><INDENT>if not ying[<NUM_LIT:0>].isdigit():<EOL><INDENT>weight += (<NUM_LIT:1.0> - weight) * (<EOL>(num_com - i - <NUM_LIT:1>) / (len(ying) + len(yang) - i * <NUM_LIT:2> + <NUM_LIT:2>)<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return weight<EOL>", "docstring": "Return the strcmp95 similarity of two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        long_strings : bool\n            Set to True to increase the probability of a match when the number\n            of matched characters is large. This option allows for a little\n            more tolerance when the strings are large. It is not an appropriate\n            test when comparing fixed length fields such as phone and social\n            security numbers.\n\n        Returns\n        -------\n        float\n            Strcmp95 similarity\n\n        Examples\n        --------\n        >>> cmp = Strcmp95()\n        >>> cmp.sim('cat', 'hat')\n        0.7777777777777777\n        >>> cmp.sim('Niall', 'Neil')\n        0.8454999999999999\n        >>> cmp.sim('aluminum', 'Catalan')\n        0.6547619047619048\n        >>> cmp.sim('ATCG', 'TAGC')\n        0.8333333333333334", "id": "f6626:c0:m0"}
{"signature": "def lcsseq(src, tar):", "body": "return LCSseq().lcsseq(src, tar)<EOL>", "docstring": "Return the longest common subsequence of two strings.\n\n    This is a wrapper for :py:meth:`LCSseq.lcsseq`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    str\n        The longest common subsequence\n\n    Examples\n    --------\n    >>> lcsseq('cat', 'hat')\n    'at'\n    >>> lcsseq('Niall', 'Neil')\n    'Nil'\n    >>> lcsseq('aluminum', 'Catalan')\n    'aln'\n    >>> lcsseq('ATCG', 'TAGC')\n    'AC'", "id": "f6627:m0"}
{"signature": "def lcsseq(self, src, tar):", "body": "lengths = np_zeros((len(src) + <NUM_LIT:1>, len(tar) + <NUM_LIT:1>), dtype=np_int)<EOL>for i, src_char in enumerate(src):<EOL><INDENT>for j, tar_char in enumerate(tar):<EOL><INDENT>if src_char == tar_char:<EOL><INDENT>lengths[i + <NUM_LIT:1>, j + <NUM_LIT:1>] = lengths[i, j] + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>lengths[i + <NUM_LIT:1>, j + <NUM_LIT:1>] = max(<EOL>lengths[i + <NUM_LIT:1>, j], lengths[i, j + <NUM_LIT:1>]<EOL>)<EOL><DEDENT><DEDENT><DEDENT>result = '<STR_LIT>'<EOL>i, j = len(src), len(tar)<EOL>while i != <NUM_LIT:0> and j != <NUM_LIT:0>:<EOL><INDENT>if lengths[i, j] == lengths[i - <NUM_LIT:1>, j]:<EOL><INDENT>i -= <NUM_LIT:1><EOL><DEDENT>elif lengths[i, j] == lengths[i, j - <NUM_LIT:1>]:<EOL><INDENT>j -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>result = src[i - <NUM_LIT:1>] + result<EOL>i -= <NUM_LIT:1><EOL>j -= <NUM_LIT:1><EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Return the longest common subsequence of two strings.\n\n        Based on the dynamic programming algorithm from\n        http://rosettacode.org/wiki/Longest_common_subsequence\n        :cite:`rosettacode:2018b`. This is licensed GFDL 1.2.\n\n        Modifications include:\n            conversion to a numpy array in place of a list of lists\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        str\n            The longest common subsequence\n\n        Examples\n        --------\n        >>> sseq = LCSseq()\n        >>> sseq.lcsseq('cat', 'hat')\n        'at'\n        >>> sseq.lcsseq('Niall', 'Neil')\n        'Nil'\n        >>> sseq.lcsseq('aluminum', 'Catalan')\n        'aln'\n        >>> sseq.lcsseq('ATCG', 'TAGC')\n        'AC'", "id": "f6627:c0:m0"}
{"signature": "def sim_lcsseq(src, tar):", "body": "return LCSseq().sim(src, tar)<EOL>", "docstring": "r\"\"\"Return the longest common subsequence similarity of two strings.\n\n    This is a wrapper for :py:meth:`LCSseq.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        LCSseq similarity\n\n    Examples\n    --------\n    >>> sim_lcsseq('cat', 'hat')\n    0.6666666666666666\n    >>> sim_lcsseq('Niall', 'Neil')\n    0.6\n    >>> sim_lcsseq('aluminum', 'Catalan')\n    0.375\n    >>> sim_lcsseq('ATCG', 'TAGC')\n    0.5", "id": "f6627:m1"}
{"signature": "def sim_hamming(src, tar, diff_lens=True):", "body": "return Hamming().sim(src, tar, diff_lens)<EOL>", "docstring": "Return the normalized Hamming similarity of two strings.\n\n    This is a wrapper for :py:meth:`Hamming.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    diff_lens : bool\n        If True (default), this returns the Hamming distance for those\n        characters that have a matching character in both strings plus the\n        difference in the strings' lengths. This is equivalent to extending the\n        shorter string with obligatorily non-matching characters. If False, an\n        exception is raised in the case of strings of unequal lengths.\n\n    Returns\n    -------\n    float\n        The normalized Hamming similarity\n\n    Examples\n    --------\n    >>> round(sim_hamming('cat', 'hat'), 12)\n    0.666666666667\n    >>> sim_hamming('Niall', 'Neil')\n    0.4\n    >>> sim_hamming('aluminum', 'Catalan')\n    0.0\n    >>> sim_hamming('ATCG', 'TAGC')\n    0.0", "id": "f6628:m2"}
{"signature": "def dist(self, src, tar, diff_lens=True):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>return self.dist_abs(src, tar, diff_lens) / max(len(src), len(tar))<EOL>", "docstring": "Return the normalized Hamming distance between two strings.\n\n        Hamming distance normalized to the interval [0, 1].\n\n        The Hamming distance is normalized by dividing it\n        by the greater of the number of characters in src & tar (unless\n        diff_lens is set to False, in which case an exception is raised).\n\n        The arguments are identical to those of the hamming() function.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        diff_lens : bool\n            If True (default), this returns the Hamming distance for those\n            characters that have a matching character in both strings plus the\n            difference in the strings' lengths. This is equivalent to extending\n            the shorter string with obligatorily non-matching characters. If\n            False, an exception is raised in the case of strings of unequal\n            lengths.\n\n        Returns\n        -------\n        float\n            Normalized Hamming distance\n\n        Examples\n        --------\n        >>> cmp = Hamming()\n        >>> round(cmp.dist('cat', 'hat'), 12)\n        0.333333333333\n        >>> cmp.dist('Niall', 'Neil')\n        0.6\n        >>> cmp.dist('aluminum', 'Catalan')\n        1.0\n        >>> cmp.dist('ATCG', 'TAGC')\n        1.0", "id": "f6628:c0:m1"}
{"signature": "def dist_hamming(src, tar, diff_lens=True):", "body": "return Hamming().dist(src, tar, diff_lens)<EOL>", "docstring": "Return the normalized Hamming distance between two strings.\n\n    This is a wrapper for :py:meth:`Hamming.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    diff_lens : bool\n        If True (default), this returns the Hamming distance for those\n        characters that have a matching character in both strings plus the\n        difference in the strings' lengths. This is equivalent to extending the\n        shorter string with obligatorily non-matching characters. If False, an\n        exception is raised in the case of strings of unequal lengths.\n\n    Returns\n    -------\n    float\n        The normalized Hamming distance\n\n    Examples\n    --------\n    >>> round(dist_hamming('cat', 'hat'), 12)\n    0.333333333333\n    >>> dist_hamming('Niall', 'Neil')\n    0.6\n    >>> dist_hamming('aluminum', 'Catalan')\n    1.0\n    >>> dist_hamming('ATCG', 'TAGC')\n    1.0", "id": "f6628:m1"}
{"signature": "def sim_damerau(src, tar, cost=(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>)):", "body": "return DamerauLevenshtein().sim(src, tar, cost)<EOL>", "docstring": "Return the Damerau-Levenshtein similarity of two strings.\n\n    This is a wrapper of :py:meth:`DamerauLevenshtein.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    cost : tuple\n        A 4-tuple representing the cost of the four possible edits: inserts,\n        deletes, substitutions, and transpositions, respectively (by default:\n        (1, 1, 1, 1))\n\n    Returns\n    -------\n    float\n        The normalized Damerau-Levenshtein similarity\n\n    Examples\n    --------\n    >>> round(sim_damerau('cat', 'hat'), 12)\n    0.666666666667\n    >>> round(sim_damerau('Niall', 'Neil'), 12)\n    0.4\n    >>> sim_damerau('aluminum', 'Catalan')\n    0.125\n    >>> sim_damerau('ATCG', 'TAGC')\n    0.5", "id": "f6629:m2"}
{"signature": "def damerau_levenshtein(src, tar, cost=(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>)):", "body": "return DamerauLevenshtein().dist_abs(src, tar, cost)<EOL>", "docstring": "Return the Damerau-Levenshtein distance between two strings.\n\n    This is a wrapper of :py:meth:`DamerauLevenshtein.dist_abs`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    cost : tuple\n        A 4-tuple representing the cost of the four possible edits: inserts,\n        deletes, substitutions, and transpositions, respectively (by default:\n        (1, 1, 1, 1))\n\n    Returns\n    -------\n    int (may return a float if cost has float values)\n        The Damerau-Levenshtein distance between src & tar\n\n    Examples\n    --------\n    >>> damerau_levenshtein('cat', 'hat')\n    1\n    >>> damerau_levenshtein('Niall', 'Neil')\n    3\n    >>> damerau_levenshtein('aluminum', 'Catalan')\n    7\n    >>> damerau_levenshtein('ATCG', 'TAGC')\n    2", "id": "f6629:m0"}
{"signature": "def sim_mlipns(src, tar, threshold=<NUM_LIT>, max_mismatches=<NUM_LIT:2>):", "body": "return MLIPNS().sim(src, tar, threshold, max_mismatches)<EOL>", "docstring": "Return the MLIPNS similarity of two strings.\n\n    This is a wrapper for :py:meth:`MLIPNS.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    threshold : float\n        A number [0, 1] indicating the maximum similarity score, below which\n        the strings are considered 'similar' (0.25 by default)\n    max_mismatches : int\n        A number indicating the allowable number of mismatches to remove before\n        declaring two strings not similar (2 by default)\n\n    Returns\n    -------\n    float\n        MLIPNS similarity\n\n    Examples\n    --------\n    >>> sim_mlipns('cat', 'hat')\n    1.0\n    >>> sim_mlipns('Niall', 'Neil')\n    0.0\n    >>> sim_mlipns('aluminum', 'Catalan')\n    0.0\n    >>> sim_mlipns('ATCG', 'TAGC')\n    0.0", "id": "f6630:m0"}
{"signature": "def dist_mlipns(src, tar, threshold=<NUM_LIT>, max_mismatches=<NUM_LIT:2>):", "body": "return MLIPNS().dist(src, tar, threshold, max_mismatches)<EOL>", "docstring": "Return the MLIPNS distance between two strings.\n\n    This is a wrapper for :py:meth:`MLIPNS.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    threshold : float\n        A number [0, 1] indicating the maximum similarity score, below which\n        the strings are considered 'similar' (0.25 by default)\n    max_mismatches : int\n        A number indicating the allowable number of mismatches to remove before\n        declaring two strings not similar (2 by default)\n\n    Returns\n    -------\n    float\n        MLIPNS distance\n\n    Examples\n    --------\n    >>> dist_mlipns('cat', 'hat')\n    0.0\n    >>> dist_mlipns('Niall', 'Neil')\n    1.0\n    >>> dist_mlipns('aluminum', 'Catalan')\n    1.0\n    >>> dist_mlipns('ATCG', 'TAGC')\n    1.0", "id": "f6630:m1"}
{"signature": "def dist_lcsstr(src, tar):", "body": "return LCSstr().dist(src, tar)<EOL>", "docstring": "Return the longest common substring distance between two strings.\n\n    This is a wrapper for :py:meth:`LCSstr.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        LCSstr distance\n\n    Examples\n    --------\n    >>> dist_lcsstr('cat', 'hat')\n    0.33333333333333337\n    >>> dist_lcsstr('Niall', 'Neil')\n    0.8\n    >>> dist_lcsstr('aluminum', 'Catalan')\n    0.75\n    >>> dist_lcsstr('ATCG', 'TAGC')\n    0.75", "id": "f6631:m2"}
{"signature": "def sim_monge_elkan(src, tar, sim_func=sim_levenshtein, symmetric=False):", "body": "return MongeElkan().sim(src, tar, sim_func, symmetric)<EOL>", "docstring": "Return the Monge-Elkan similarity of two strings.\n\n    This is a wrapper for :py:meth:`MongeElkan.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    sim_func : function\n        Rhe internal similarity metric to employ\n    symmetric : bool\n        Return a symmetric similarity measure\n\n    Returns\n    -------\n    float\n        Monge-Elkan similarity\n\n    Examples\n    --------\n    >>> sim_monge_elkan('cat', 'hat')\n    0.75\n    >>> round(sim_monge_elkan('Niall', 'Neil'), 12)\n    0.666666666667\n    >>> round(sim_monge_elkan('aluminum', 'Catalan'), 12)\n    0.388888888889\n    >>> sim_monge_elkan('ATCG', 'TAGC')\n    0.5", "id": "f6632:m0"}
{"signature": "def dist_abs(self, src, tar):", "body": "if tar == src:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>elif not src:<EOL><INDENT>return len(tar)<EOL><DEDENT>elif not tar:<EOL><INDENT>return len(src)<EOL><DEDENT>src_bag = Counter(src)<EOL>tar_bag = Counter(tar)<EOL>return max(<EOL>sum((src_bag - tar_bag).values()),<EOL>sum((tar_bag - src_bag).values()),<EOL>)<EOL>", "docstring": "Return the bag distance between two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        int\n            Bag distance\n\n        Examples\n        --------\n        >>> cmp = Bag()\n        >>> cmp.dist_abs('cat', 'hat')\n        1\n        >>> cmp.dist_abs('Niall', 'Neil')\n        2\n        >>> cmp.dist_abs('aluminum', 'Catalan')\n        5\n        >>> cmp.dist_abs('ATCG', 'TAGC')\n        0\n        >>> cmp.dist_abs('abcdefg', 'hijklm')\n        7\n        >>> cmp.dist_abs('abcdefg', 'hijklmno')\n        8", "id": "f6633:c0:m0"}
{"signature": "def sim_bag(src, tar):", "body": "return Bag().sim(src, tar)<EOL>", "docstring": "Return the normalized bag similarity of two strings.\n\n    This is a wrapper for :py:meth:`Bag.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Normalized bag similarity\n\n    Examples\n    --------\n    >>> round(sim_bag('cat', 'hat'), 12)\n    0.666666666667\n    >>> sim_bag('Niall', 'Neil')\n    0.6\n    >>> sim_bag('aluminum', 'Catalan')\n    0.375\n    >>> sim_bag('ATCG', 'TAGC')\n    1.0", "id": "f6633:m2"}
{"signature": "def dist_ncd_arith(src, tar, probs=None):", "body": "return NCDarith().dist(src, tar, probs)<EOL>", "docstring": "Return the NCD between two strings using arithmetic coding.\n\n    This is a wrapper for :py:meth:`NCDarith.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    probs : dict\n        A dictionary trained with :py:meth:`Arithmetic.train`\n\n    Returns\n    -------\n    float\n        Compression distance\n\n    Examples\n    --------\n    >>> dist_ncd_arith('cat', 'hat')\n    0.5454545454545454\n    >>> dist_ncd_arith('Niall', 'Neil')\n    0.6875\n    >>> dist_ncd_arith('aluminum', 'Catalan')\n    0.8275862068965517\n    >>> dist_ncd_arith('ATCG', 'TAGC')\n    0.6923076923076923", "id": "f6634:m0"}
{"signature": "def dist(self, src, tar, probs=None):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>if probs is None:<EOL><INDENT>self._coder.train(src + tar)<EOL><DEDENT>else:<EOL><INDENT>self._coder.set_probs(probs)<EOL><DEDENT>src_comp = self._coder.encode(src)[<NUM_LIT:1>]<EOL>tar_comp = self._coder.encode(tar)[<NUM_LIT:1>]<EOL>concat_comp = self._coder.encode(src + tar)[<NUM_LIT:1>]<EOL>concat_comp2 = self._coder.encode(tar + src)[<NUM_LIT:1>]<EOL>return (<EOL>min(concat_comp, concat_comp2) - min(src_comp, tar_comp)<EOL>) / max(src_comp, tar_comp)<EOL>", "docstring": "Return the NCD between two strings using arithmetic coding.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        probs : dict\n            A dictionary trained with :py:meth:`Arithmetic.train`\n\n        Returns\n        -------\n        float\n            Compression distance\n\n        Examples\n        --------\n        >>> cmp = NCDarith()\n        >>> cmp.dist('cat', 'hat')\n        0.5454545454545454\n        >>> cmp.dist('Niall', 'Neil')\n        0.6875\n        >>> cmp.dist('aluminum', 'Catalan')\n        0.8275862068965517\n        >>> cmp.dist('ATCG', 'TAGC')\n        0.6923076923076923", "id": "f6634:c0:m1"}
{"signature": "def sim(self, src, tar, min_ss_len=None, left_ext=None, right_ext=None):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:1.0><EOL><DEDENT>if not src or not tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>max_len = max(len(src), len(tar))<EOL>if not (min_ss_len and left_ext and right_ext):<EOL><INDENT>if max_len >= <NUM_LIT:7>:<EOL><INDENT>min_ss_len = <NUM_LIT:2><EOL>left_ext = <NUM_LIT:2><EOL>right_ext = <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>min_ss_len = <NUM_LIT:1><EOL>left_ext = <NUM_LIT:0><EOL>right_ext = <NUM_LIT:0><EOL><DEDENT><DEDENT>pos = <NUM_LIT:0><EOL>match_len = <NUM_LIT:0><EOL>while True:<EOL><INDENT>if pos + min_ss_len > len(src):<EOL><INDENT>return match_len / max_len<EOL><DEDENT>hit_len = <NUM_LIT:0><EOL>ix = <NUM_LIT:1><EOL>substring = src[pos : pos + min_ss_len]<EOL>search_begin = pos - left_ext<EOL>if search_begin < <NUM_LIT:0>:<EOL><INDENT>search_begin = <NUM_LIT:0><EOL>left_ext_len = pos<EOL><DEDENT>else:<EOL><INDENT>left_ext_len = left_ext<EOL><DEDENT>if pos + min_ss_len + right_ext >= len(tar):<EOL><INDENT>right_ext_len = len(tar) - pos - min_ss_len<EOL><DEDENT>else:<EOL><INDENT>right_ext_len = right_ext<EOL><DEDENT>if (<EOL>search_begin + left_ext_len + min_ss_len + right_ext_len<EOL>> search_begin<EOL>):<EOL><INDENT>search_val = tar[<EOL>search_begin : (<EOL>search_begin<EOL>+ left_ext_len<EOL>+ min_ss_len<EOL>+ right_ext_len<EOL>)<EOL>]<EOL><DEDENT>else:<EOL><INDENT>search_val = '<STR_LIT>'<EOL><DEDENT>flagged_tar = '<STR_LIT>'<EOL>while substring in search_val and pos + ix <= len(src):<EOL><INDENT>hit_len = len(substring)<EOL>flagged_tar = tar.replace(substring, '<STR_LIT:#>' * hit_len)<EOL>if pos + min_ss_len + ix <= len(src):<EOL><INDENT>substring = src[pos : pos + min_ss_len + ix]<EOL><DEDENT>if pos + min_ss_len + right_ext_len + <NUM_LIT:1> <= len(tar):<EOL><INDENT>right_ext_len += <NUM_LIT:1><EOL><DEDENT>search_val = tar[<EOL>search_begin : (<EOL>search_begin<EOL>+ left_ext_len<EOL>+ min_ss_len<EOL>+ right_ext_len<EOL>)<EOL>]<EOL>ix += <NUM_LIT:1><EOL><DEDENT>if hit_len > <NUM_LIT:0>:<EOL><INDENT>tar = flagged_tar<EOL><DEDENT>match_len += hit_len<EOL>pos += ix<EOL><DEDENT>", "docstring": "Return the Baystat similarity.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        min_ss_len : int\n            Minimum substring length to be considered\n        left_ext :int\n            Left-side extension length\n        right_ext :int\n            Right-side extension length\n\n        Returns\n        -------\n        float\n            The Baystat similarity\n\n        Examples\n        --------\n        >>> cmp = Baystat()\n        >>> round(cmp.sim('cat', 'hat'), 12)\n        0.666666666667\n        >>> cmp.sim('Niall', 'Neil')\n        0.4\n        >>> round(cmp.sim('Colin', 'Cuilen'), 12)\n        0.166666666667\n        >>> cmp.sim('ATCG', 'TAGC')\n        0.0", "id": "f6636:c0:m0"}
{"signature": "def sim_baystat(src, tar, min_ss_len=None, left_ext=None, right_ext=None):", "body": "return Baystat().sim(src, tar, min_ss_len, left_ext, right_ext)<EOL>", "docstring": "Return the Baystat similarity.\n\n    This is a wrapper for :py:meth:`Baystat.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    min_ss_len : int\n        Minimum substring length to be considered\n    left_ext :int\n        Left-side extension length\n    right_ext :int\n        Right-side extension length\n\n    Returns\n    -------\n    float\n        The Baystat similarity\n\n    Examples\n    --------\n    >>> round(sim_baystat('cat', 'hat'), 12)\n    0.666666666667\n    >>> sim_baystat('Niall', 'Neil')\n    0.4\n    >>> round(sim_baystat('Colin', 'Cuilen'), 12)\n    0.166666666667\n    >>> sim_baystat('ATCG', 'TAGC')\n    0.0", "id": "f6636:m0"}
{"signature": "def dist_baystat(src, tar, min_ss_len=None, left_ext=None, right_ext=None):", "body": "return Baystat().dist(src, tar, min_ss_len, left_ext, right_ext)<EOL>", "docstring": "Return the Baystat distance.\n\n    This is a wrapper for :py:meth:`Baystat.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    min_ss_len : int\n        Minimum substring length to be considered\n    left_ext : int\n        Left-side extension length\n    right_ext : int\n        Right-side extension length\n\n    Returns\n    -------\n    float\n        The Baystat distance\n\n    Examples\n    --------\n    >>> round(dist_baystat('cat', 'hat'), 12)\n    0.333333333333\n    >>> dist_baystat('Niall', 'Neil')\n    0.6\n    >>> round(dist_baystat('Colin', 'Cuilen'), 12)\n    0.833333333333\n    >>> dist_baystat('ATCG', 'TAGC')\n    1.0", "id": "f6636:m1"}
{"signature": "def sift4_simplest(src, tar, max_offset=<NUM_LIT:5>):", "body": "return Sift4Simplest().dist_abs(src, tar, max_offset)<EOL>", "docstring": "Return the \"simplest\" Sift4 distance between two terms.\n\n    This is a wrapper for :py:meth:`Sift4Simplest.dist_abs`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    max_offset : int\n        The number of characters to search for matching letters\n\n    Returns\n    -------\n    int\n        The Sift4 distance according to the simplest formula\n\n    Examples\n    --------\n    >>> sift4_simplest('cat', 'hat')\n    1\n    >>> sift4_simplest('Niall', 'Neil')\n    2\n    >>> sift4_simplest('Colin', 'Cuilen')\n    3\n    >>> sift4_simplest('ATCG', 'TAGC')\n    2", "id": "f6637:m0"}
{"signature": "def dist_indel(src, tar):", "body": "return Indel().dist(src, tar)<EOL>", "docstring": "Return the normalized indel distance between two strings.\n\n    This is equivalent to normalized Levenshtein distance, when only inserts\n    and deletes are possible.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Normalized indel distance\n\n    Examples\n    --------\n    >>> round(dist_indel('cat', 'hat'), 12)\n    0.333333333333\n    >>> round(dist_indel('Niall', 'Neil'), 12)\n    0.333333333333\n    >>> round(dist_indel('Colin', 'Cuilen'), 12)\n    0.454545454545\n    >>> dist_indel('ATCG', 'TAGC')\n    0.5", "id": "f6638:m1"}
{"signature": "def dist_abs(self, src, tar):", "body": "return self._lev.dist_abs(<EOL>src, tar, mode='<STR_LIT>', cost=(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT>, <NUM_LIT>)<EOL>)<EOL>", "docstring": "Return the indel distance between two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        int\n            Indel distance\n\n        Examples\n        --------\n        >>> cmp = Indel()\n        >>> cmp.dist_abs('cat', 'hat')\n        2\n        >>> cmp.dist_abs('Niall', 'Neil')\n        3\n        >>> cmp.dist_abs('Colin', 'Cuilen')\n        5\n        >>> cmp.dist_abs('ATCG', 'TAGC')\n        4", "id": "f6638:c0:m0"}
{"signature": "def indel(src, tar):", "body": "return Indel().dist_abs(src, tar)<EOL>", "docstring": "Return the indel distance between two strings.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    int\n        Indel distance\n\n    Examples\n    --------\n    >>> indel('cat', 'hat')\n    2\n    >>> indel('Niall', 'Neil')\n    3\n    >>> indel('Colin', 'Cuilen')\n    5\n    >>> indel('ATCG', 'TAGC')\n    4", "id": "f6638:m0"}
{"signature": "def dist(self, src, tar):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>return self.dist_abs(src, tar) / (len(src) + len(tar))<EOL>", "docstring": "Return the normalized indel distance between two strings.\n\n        This is equivalent to normalized Levenshtein distance, when only\n        inserts and deletes are possible.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        float\n            Normalized indel distance\n\n        Examples\n        --------\n        >>> cmp = Indel()\n        >>> round(cmp.dist('cat', 'hat'), 12)\n        0.333333333333\n        >>> round(cmp.dist('Niall', 'Neil'), 12)\n        0.333333333333\n        >>> round(cmp.dist('Colin', 'Cuilen'), 12)\n        0.454545454545\n        >>> cmp.dist('ATCG', 'TAGC')\n        0.5", "id": "f6638:c0:m1"}
{"signature": "def sim_ratcliff_obershelp(src, tar):", "body": "return RatcliffObershelp().sim(src, tar)<EOL>", "docstring": "Return the Ratcliff-Obershelp similarity of two strings.\n\n    This is a wrapper for :py:meth:`RatcliffObershelp.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Ratcliff-Obershelp similarity\n\n    Examples\n    --------\n    >>> round(sim_ratcliff_obershelp('cat', 'hat'), 12)\n    0.666666666667\n    >>> round(sim_ratcliff_obershelp('Niall', 'Neil'), 12)\n    0.666666666667\n    >>> round(sim_ratcliff_obershelp('aluminum', 'Catalan'), 12)\n    0.4\n    >>> sim_ratcliff_obershelp('ATCG', 'TAGC')\n    0.5", "id": "f6639:m0"}
{"signature": "def sim(self, src, tar):", "body": "def _lcsstr_stl(src, tar):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>lengths = np_zeros((len(src) + <NUM_LIT:1>, len(tar) + <NUM_LIT:1>), dtype=np_int)<EOL>longest, src_longest, tar_longest = <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0><EOL>for i in range(<NUM_LIT:1>, len(src) + <NUM_LIT:1>):<EOL><INDENT>for j in range(<NUM_LIT:1>, len(tar) + <NUM_LIT:1>):<EOL><INDENT>if src[i - <NUM_LIT:1>] == tar[j - <NUM_LIT:1>]:<EOL><INDENT>lengths[i, j] = lengths[i - <NUM_LIT:1>, j - <NUM_LIT:1>] + <NUM_LIT:1><EOL>if lengths[i, j] > longest:<EOL><INDENT>longest = lengths[i, j]<EOL>src_longest = i<EOL>tar_longest = j<EOL><DEDENT><DEDENT>else:<EOL><INDENT>lengths[i, j] = <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>return src_longest - longest, tar_longest - longest, longest<EOL><DEDENT>def _sstr_matches(src, tar):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>src_start, tar_start, length = _lcsstr_stl(src, tar)<EOL>if length == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>return (<EOL>_sstr_matches(src[:src_start], tar[:tar_start])<EOL>+ length<EOL>+ _sstr_matches(<EOL>src[src_start + length :], tar[tar_start + length :]<EOL>)<EOL>)<EOL><DEDENT>if src == tar:<EOL><INDENT>return <NUM_LIT:1.0><EOL><DEDENT>elif not src or not tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>return <NUM_LIT:2> * _sstr_matches(src, tar) / (len(src) + len(tar))<EOL>", "docstring": "Return the Ratcliff-Obershelp similarity of two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        float\n            Ratcliff-Obershelp similarity\n\n        Examples\n        --------\n        >>> cmp = RatcliffObershelp()\n        >>> round(cmp.sim('cat', 'hat'), 12)\n        0.666666666667\n        >>> round(cmp.sim('Niall', 'Neil'), 12)\n        0.666666666667\n        >>> round(cmp.sim('aluminum', 'Catalan'), 12)\n        0.4\n        >>> cmp.sim('ATCG', 'TAGC')\n        0.5", "id": "f6639:c0:m0"}
{"signature": "def dist(src, tar, method=sim_levenshtein):", "body": "if callable(method):<EOL><INDENT>return <NUM_LIT:1> - method(src, tar)<EOL><DEDENT>else:<EOL><INDENT>raise AttributeError('<STR_LIT>' + str(method))<EOL><DEDENT>", "docstring": "Return a distance between two strings.\n\n    This is a generalized function for calling other distance functions.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    method : function\n        Specifies the similarity metric (:py:func:`sim_levenshtein` by default)\n        -- Note that this takes a similarity metric function, not a distance\n        metric function.\n\n    Returns\n    -------\n    float\n        Distance according to the specified function\n\n    Raises\n    ------\n    AttributeError\n        Unknown distance function\n\n    Examples\n    --------\n    >>> round(dist('cat', 'hat'), 12)\n    0.333333333333\n    >>> round(dist('Niall', 'Neil'), 12)\n    0.6\n    >>> dist('aluminum', 'Catalan')\n    0.875\n    >>> dist('ATCG', 'TAGC')\n    0.75", "id": "f6640:m1"}
{"signature": "def dist_abs(self, src, tar, qval=<NUM_LIT:2>, normalized=False, alphabet=None):", "body": "return super(self.__class__, self).dist_abs(<EOL>src, tar, qval, <NUM_LIT:1>, normalized, alphabet<EOL>)<EOL>", "docstring": "Return the Manhattan distance between two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string (or QGrams/Counter objects) for comparison\n        tar : str\n            Target string (or QGrams/Counter objects) for comparison\n        qval : int\n            The length of each q-gram; 0 for non-q-gram version\n        normalized : bool\n            Normalizes to [0, 1] if True\n        alphabet : collection or int\n            The values or size of the alphabet\n\n        Returns\n        -------\n        float\n            The Manhattan distance\n\n        Examples\n        --------\n        >>> cmp = Manhattan()\n        >>> cmp.dist_abs('cat', 'hat')\n        4.0\n        >>> cmp.dist_abs('Niall', 'Neil')\n        7.0\n        >>> cmp.dist_abs('Colin', 'Cuilen')\n        9.0\n        >>> cmp.dist_abs('ATCG', 'TAGC')\n        10.0", "id": "f6641:c0:m0"}
{"signature": "def dist(self, src, tar, qval=<NUM_LIT:2>, alphabet=None):", "body": "return self.dist_abs(src, tar, qval, True, alphabet)<EOL>", "docstring": "Return the normalized Manhattan distance between two strings.\n\n        The normalized Manhattan distance is a distance metric in\n        :math:`L^1`-space, normalized to [0, 1].\n\n        This is identical to Canberra distance.\n\n        Parameters\n        ----------\n        src : str\n            Source string (or QGrams/Counter objects) for comparison\n        tar : str\n            Target string (or QGrams/Counter objects) for comparison\n        qval : int\n            The length of each q-gram; 0 for non-q-gram version\n        alphabet : collection or int\n            The values or size of the alphabet\n\n        Returns\n        -------\n        float\n            The normalized Manhattan distance\n\n        Examples\n        --------\n        >>> cmp = Manhattan()\n        >>> cmp.dist('cat', 'hat')\n        0.5\n        >>> round(cmp.dist('Niall', 'Neil'), 12)\n        0.636363636364\n        >>> round(cmp.dist('Colin', 'Cuilen'), 12)\n        0.692307692308\n        >>> cmp.dist('ATCG', 'TAGC')\n        1.0", "id": "f6641:c0:m1"}
{"signature": "def sim_mra(src, tar):", "body": "return MRA().sim(src, tar)<EOL>", "docstring": "Return the normalized MRA similarity of two strings.\n\n    This is a wrapper for :py:meth:`MRA.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Normalized MRA similarity\n\n    Examples\n    --------\n    >>> sim_mra('cat', 'hat')\n    0.8333333333333334\n    >>> sim_mra('Niall', 'Neil')\n    1.0\n    >>> sim_mra('aluminum', 'Catalan')\n    0.0\n    >>> sim_mra('ATCG', 'TAGC')\n    0.8333333333333334", "id": "f6642:m1"}
{"signature": "def sim(self, src, tar):", "body": "return mra_compare(src, tar) / <NUM_LIT:6><EOL>", "docstring": "Return the normalized MRA similarity of two strings.\n\n        This is the MRA normalized to :math:`[0, 1]`, given that MRA itself is\n        constrained to the range :math:`[0, 6]`.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        float\n            Normalized MRA similarity\n\n        Examples\n        --------\n        >>> cmp = MRA()\n        >>> cmp.sim('cat', 'hat')\n        0.8333333333333334\n        >>> cmp.sim('Niall', 'Neil')\n        1.0\n        >>> cmp.sim('aluminum', 'Catalan')\n        0.0\n        >>> cmp.sim('ATCG', 'TAGC')\n        0.8333333333333334", "id": "f6642:c0:m1"}
{"signature": "def dist_prefix(src, tar):", "body": "return Prefix().dist(src, tar)<EOL>", "docstring": "Return the prefix distance between two strings.\n\n    This is a wrapper for :py:meth:`Prefix.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Prefix distance\n\n    Examples\n    --------\n    >>> dist_prefix('cat', 'hat')\n    1.0\n    >>> dist_prefix('Niall', 'Neil')\n    0.75\n    >>> dist_prefix('aluminum', 'Catalan')\n    1.0\n    >>> dist_prefix('ATCG', 'TAGC')\n    1.0", "id": "f6643:m1"}
{"signature": "def dist_ident(src, tar):", "body": "return Ident().dist(src, tar)<EOL>", "docstring": "Return the identity distance between two strings.\n\n    This is a wrapper for :py:meth:`Ident.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Identity distance\n\n    Examples\n    --------\n    >>> dist_ident('cat', 'hat')\n    1.0\n    >>> dist_ident('cat', 'cat')\n    0.0", "id": "f6644:m1"}
{"signature": "def sim_ident(src, tar):", "body": "return Ident().sim(src, tar)<EOL>", "docstring": "Return the identity similarity of two strings.\n\n    This is a wrapper for :py:meth:`Ident.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Identity similarity\n\n\n    Examples\n    --------\n    >>> sim_ident('cat', 'hat')\n    0.0\n    >>> sim_ident('cat', 'cat')\n    1.0", "id": "f6644:m0"}
{"signature": "def sim_ncd_bwtrle(src, tar):", "body": "return NCDbwtrle().sim(src, tar)<EOL>", "docstring": "Return the NCD similarity between two strings using BWT plus RLE.\n\n    This is a wrapper for :py:meth:`NCDbwtrle.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Compression similarity\n\n    Examples\n    --------\n    >>> sim_ncd_bwtrle('cat', 'hat')\n    0.25\n    >>> sim_ncd_bwtrle('Niall', 'Neil')\n    0.16666666666666663\n    >>> sim_ncd_bwtrle('aluminum', 'Catalan')\n    0.0\n    >>> sim_ncd_bwtrle('ATCG', 'TAGC')\n    0.19999999999999996", "id": "f6645:m1"}
{"signature": "def sim_length(src, tar):", "body": "return Length().sim(src, tar)<EOL>", "docstring": "Return the length similarity of two strings.\n\n    This is a wrapper for :py:meth:`Length.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Length similarity\n\n    Examples\n    --------\n    >>> sim_length('cat', 'hat')\n    1.0\n    >>> sim_length('Niall', 'Neil')\n    0.8\n    >>> sim_length('aluminum', 'Catalan')\n    0.875\n    >>> sim_length('ATCG', 'TAGC')\n    1.0", "id": "f6646:m0"}
{"signature": "def sim(self, src, tar):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:1.0><EOL><DEDENT>if not src or not tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>return (<EOL>len(src) / len(tar) if len(src) < len(tar) else len(tar) / len(src)<EOL>)<EOL>", "docstring": "Return the length similarity of two strings.\n\n        Length similarity is the ratio of the length of the shorter string to\n        the longer.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        float\n            Length similarity\n\n        Examples\n        --------\n        >>> cmp = Length()\n        >>> cmp.sim('cat', 'hat')\n        1.0\n        >>> cmp.sim('Niall', 'Neil')\n        0.8\n        >>> cmp.sim('aluminum', 'Catalan')\n        0.875\n        >>> cmp.sim('ATCG', 'TAGC')\n        1.0", "id": "f6646:c0:m0"}
{"signature": "def __init__(self, level=<NUM_LIT:9>):", "body": "self._level = level<EOL>", "docstring": "Initialize bzip2 compressor.\n\n        Parameters\n        ----------\n        level : int\n            The compression level (0 to 9)", "id": "f6647:c0:m0"}
{"signature": "def dist(self, src, tar):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>src = src.encode('<STR_LIT:utf-8>')<EOL>tar = tar.encode('<STR_LIT:utf-8>')<EOL>src_comp = bz2.compress(src, self._level)[<NUM_LIT:10>:]<EOL>tar_comp = bz2.compress(tar, self._level)[<NUM_LIT:10>:]<EOL>concat_comp = bz2.compress(src + tar, self._level)[<NUM_LIT:10>:]<EOL>concat_comp2 = bz2.compress(tar + src, self._level)[<NUM_LIT:10>:]<EOL>return (<EOL>min(len(concat_comp), len(concat_comp2))<EOL>- min(len(src_comp), len(tar_comp))<EOL>) / max(len(src_comp), len(tar_comp))<EOL>", "docstring": "Return the NCD between two strings using bzip2 compression.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        float\n            Compression distance\n\n        Examples\n        --------\n        >>> cmp = NCDbz2()\n        >>> cmp.dist('cat', 'hat')\n        0.06666666666666667\n        >>> cmp.dist('Niall', 'Neil')\n        0.03125\n        >>> cmp.dist('aluminum', 'Catalan')\n        0.17647058823529413\n        >>> cmp.dist('ATCG', 'TAGC')\n        0.03125", "id": "f6647:c0:m1"}
{"signature": "def sim(<EOL>self,<EOL>src,<EOL>tar,<EOL>qval=<NUM_LIT:1>,<EOL>mode='<STR_LIT>',<EOL>long_strings=False,<EOL>boost_threshold=<NUM_LIT>,<EOL>scaling_factor=<NUM_LIT:0.1>,<EOL>):", "body": "if mode == '<STR_LIT>':<EOL><INDENT>if boost_threshold > <NUM_LIT:1> or boost_threshold < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>+ '<STR_LIT>'<EOL>)<EOL><DEDENT>if scaling_factor > <NUM_LIT> or scaling_factor < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>+ '<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT>if src == tar:<EOL><INDENT>return <NUM_LIT:1.0><EOL><DEDENT>src = QGrams(src.strip(), qval)._ordered_list<EOL>tar = QGrams(tar.strip(), qval)._ordered_list<EOL>lens = len(src)<EOL>lent = len(tar)<EOL>if lens == <NUM_LIT:0> or lent == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>if lens > lent:<EOL><INDENT>search_range = lens<EOL>minv = lent<EOL><DEDENT>else:<EOL><INDENT>search_range = lent<EOL>minv = lens<EOL><DEDENT>src_flag = [<NUM_LIT:0>] * search_range<EOL>tar_flag = [<NUM_LIT:0>] * search_range<EOL>search_range = max(<NUM_LIT:0>, search_range // <NUM_LIT:2> - <NUM_LIT:1>)<EOL>num_com = <NUM_LIT:0><EOL>yl1 = lent - <NUM_LIT:1><EOL>for i in range(lens):<EOL><INDENT>low_lim = (i - search_range) if (i >= search_range) else <NUM_LIT:0><EOL>hi_lim = (i + search_range) if ((i + search_range) <= yl1) else yl1<EOL>for j in range(low_lim, hi_lim + <NUM_LIT:1>):<EOL><INDENT>if (tar_flag[j] == <NUM_LIT:0>) and (tar[j] == src[i]):<EOL><INDENT>tar_flag[j] = <NUM_LIT:1><EOL>src_flag[i] = <NUM_LIT:1><EOL>num_com += <NUM_LIT:1><EOL>break<EOL><DEDENT><DEDENT><DEDENT>if num_com == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>k = n_trans = <NUM_LIT:0><EOL>for i in range(lens):<EOL><INDENT>if src_flag[i] != <NUM_LIT:0>:<EOL><INDENT>j = <NUM_LIT:0><EOL>for j in range(k, lent):  <EOL><INDENT>if tar_flag[j] != <NUM_LIT:0>:<EOL><INDENT>k = j + <NUM_LIT:1><EOL>break<EOL><DEDENT><DEDENT>if src[i] != tar[j]:<EOL><INDENT>n_trans += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>n_trans //= <NUM_LIT:2><EOL>weight = (<EOL>num_com / lens + num_com / lent + (num_com - n_trans) / num_com<EOL>)<EOL>weight /= <NUM_LIT><EOL>if mode == '<STR_LIT>' and weight > boost_threshold:<EOL><INDENT>j = <NUM_LIT:4> if (minv >= <NUM_LIT:4>) else minv<EOL>i = <NUM_LIT:0><EOL>while (i < j) and (src[i] == tar[i]):<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT>weight += i * scaling_factor * (<NUM_LIT:1.0> - weight)<EOL>if (<EOL>long_strings<EOL>and (minv > <NUM_LIT:4>)<EOL>and (num_com > i + <NUM_LIT:1>)<EOL>and (<NUM_LIT:2> * num_com >= minv + i)<EOL>):<EOL><INDENT>weight += (<NUM_LIT:1.0> - weight) * (<EOL>(num_com - i - <NUM_LIT:1>) / (lens + lent - i * <NUM_LIT:2> + <NUM_LIT:2>)<EOL>)<EOL><DEDENT><DEDENT>return weight<EOL>", "docstring": "Return the Jaro or Jaro-Winkler similarity of two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        qval : int\n            The length of each q-gram (defaults to 1: character-wise matching)\n        mode : str\n            Indicates which variant of this distance metric to compute:\n\n                - ``winkler`` -- computes the Jaro-Winkler distance (default)\n                  which increases the score for matches near the start of the\n                  word\n                - ``jaro`` -- computes the Jaro distance\n\n        long_strings : bool\n            Set to True to \"Increase the probability of a match when the number\n            of matched characters is large. This option allows for a little\n            more tolerance when the strings are large. It is not an appropriate\n            test when comparing fixed length fields such as phone and social\n            security numbers.\" (Used in 'winkler' mode only.)\n        boost_threshold : float\n            A value between 0 and 1, below which the Winkler boost is not\n            applied (defaults to 0.7). (Used in 'winkler' mode only.)\n        scaling_factor : float\n            A value between 0 and 0.25, indicating by how much to boost scores\n            for matching prefixes (defaults to 0.1). (Used in 'winkler' mode\n            only.)\n\n        Returns\n        -------\n        float\n            Jaro or Jaro-Winkler similarity\n\n        Raises\n        ------\n        ValueError\n            Unsupported boost_threshold assignment; boost_threshold must be\n            between 0 and 1.\n        ValueError\n            Unsupported scaling_factor assignment; scaling_factor must be\n            between 0 and 0.25.'\n\n        Examples\n        --------\n        >>> round(sim_jaro_winkler('cat', 'hat'), 12)\n        0.777777777778\n        >>> round(sim_jaro_winkler('Niall', 'Neil'), 12)\n        0.805\n        >>> round(sim_jaro_winkler('aluminum', 'Catalan'), 12)\n        0.60119047619\n        >>> round(sim_jaro_winkler('ATCG', 'TAGC'), 12)\n        0.833333333333\n\n        >>> round(sim_jaro_winkler('cat', 'hat', mode='jaro'), 12)\n        0.777777777778\n        >>> round(sim_jaro_winkler('Niall', 'Neil', mode='jaro'), 12)\n        0.783333333333\n        >>> round(sim_jaro_winkler('aluminum', 'Catalan', mode='jaro'), 12)\n        0.60119047619\n        >>> round(sim_jaro_winkler('ATCG', 'TAGC', mode='jaro'), 12)\n        0.833333333333", "id": "f6648:c0:m0"}
{"signature": "def dist_jaro_winkler(<EOL>src,<EOL>tar,<EOL>qval=<NUM_LIT:1>,<EOL>mode='<STR_LIT>',<EOL>long_strings=False,<EOL>boost_threshold=<NUM_LIT>,<EOL>scaling_factor=<NUM_LIT:0.1>,<EOL>):", "body": "return JaroWinkler().dist(<EOL>src, tar, qval, mode, long_strings, boost_threshold, scaling_factor<EOL>)<EOL>", "docstring": "Return the Jaro or Jaro-Winkler distance between two strings.\n\n    This is a wrapper for :py:meth:`JaroWinkler.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    qval : int\n        The length of each q-gram (defaults to 1: character-wise matching)\n    mode : str\n        Indicates which variant of this distance metric to compute:\n\n            - ``winkler`` -- computes the Jaro-Winkler distance (default) which\n              increases the score for matches near the start of the word\n            - ``jaro`` -- computes the Jaro distance\n\n    long_strings : bool\n        Set to True to \"Increase the probability of a match when the number of\n        matched characters is large. This option allows for a little more\n        tolerance when the strings are large. It is not an appropriate test\n        when comparing fixedlength fields such as phone and social security\n        numbers.\" (Used in 'winkler' mode only.)\n    boost_threshold : float\n        A value between 0 and 1, below which the Winkler boost is not applied\n        (defaults to 0.7). (Used in 'winkler' mode only.)\n    scaling_factor : float\n        A value between 0 and 0.25, indicating by how much to boost scores for\n        matching prefixes (defaults to 0.1). (Used in 'winkler' mode only.)\n\n    Returns\n    -------\n    float\n        Jaro or Jaro-Winkler distance\n\n    Examples\n    --------\n    >>> round(dist_jaro_winkler('cat', 'hat'), 12)\n    0.222222222222\n    >>> round(dist_jaro_winkler('Niall', 'Neil'), 12)\n    0.195\n    >>> round(dist_jaro_winkler('aluminum', 'Catalan'), 12)\n    0.39880952381\n    >>> round(dist_jaro_winkler('ATCG', 'TAGC'), 12)\n    0.166666666667\n\n    >>> round(dist_jaro_winkler('cat', 'hat', mode='jaro'), 12)\n    0.222222222222\n    >>> round(dist_jaro_winkler('Niall', 'Neil', mode='jaro'), 12)\n    0.216666666667\n    >>> round(dist_jaro_winkler('aluminum', 'Catalan', mode='jaro'), 12)\n    0.39880952381\n    >>> round(dist_jaro_winkler('ATCG', 'TAGC', mode='jaro'), 12)\n    0.166666666667", "id": "f6648:m1"}
{"signature": "def dist_overlap(src, tar, qval=<NUM_LIT:2>):", "body": "return Overlap().dist(src, tar, qval)<EOL>", "docstring": "Return the overlap distance between two strings.\n\n    This is a wrapper for :py:meth:`Overlap.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string (or QGrams/Counter objects) for comparison\n    tar : str\n        Target string (or QGrams/Counter objects) for comparison\n    qval : int\n        The length of each q-gram; 0 for non-q-gram version\n\n    Returns\n    -------\n    float\n        Overlap distance\n\n    Examples\n    --------\n    >>> dist_overlap('cat', 'hat')\n    0.5\n    >>> dist_overlap('Niall', 'Neil')\n    0.6\n    >>> dist_overlap('aluminum', 'Catalan')\n    0.875\n    >>> dist_overlap('ATCG', 'TAGC')\n    1.0", "id": "f6649:m1"}
{"signature": "def dist_abs(<EOL>self,<EOL>src,<EOL>tar,<EOL>metric='<STR_LIT>',<EOL>cost=(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0.5>, <NUM_LIT:0.5>),<EOL>layout='<STR_LIT>',<EOL>):", "body": "ins_cost, del_cost, sub_cost, shift_cost = cost<EOL>if src == tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>if not src:<EOL><INDENT>return len(tar) * ins_cost<EOL><DEDENT>if not tar:<EOL><INDENT>return len(src) * del_cost<EOL><DEDENT>keyboard = self._keyboard[layout]<EOL>lowercase = {item for sublist in keyboard[<NUM_LIT:0>] for item in sublist}<EOL>uppercase = {item for sublist in keyboard[<NUM_LIT:1>] for item in sublist}<EOL>def _kb_array_for_char(char):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if char in lowercase:<EOL><INDENT>return keyboard[<NUM_LIT:0>]<EOL><DEDENT>elif char in uppercase:<EOL><INDENT>return keyboard[<NUM_LIT:1>]<EOL><DEDENT>raise ValueError(char + '<STR_LIT>')<EOL><DEDENT>def _substitution_cost(char1, char2):<EOL><INDENT>cost = sub_cost<EOL>cost *= metric_dict[metric](char1, char2) + shift_cost * (<EOL>_kb_array_for_char(char1) != _kb_array_for_char(char2)<EOL>)<EOL>return cost<EOL><DEDENT>def _get_char_coord(char, kb_array):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>for row in kb_array:  <EOL><INDENT>if char in row:<EOL><INDENT>return kb_array.index(row), row.index(char)<EOL><DEDENT><DEDENT><DEDENT>def _euclidean_keyboard_distance(char1, char2):<EOL><INDENT>row1, col1 = _get_char_coord(char1, _kb_array_for_char(char1))<EOL>row2, col2 = _get_char_coord(char2, _kb_array_for_char(char2))<EOL>return ((row1 - row2) ** <NUM_LIT:2> + (col1 - col2) ** <NUM_LIT:2>) ** <NUM_LIT:0.5><EOL><DEDENT>def _manhattan_keyboard_distance(char1, char2):<EOL><INDENT>row1, col1 = _get_char_coord(char1, _kb_array_for_char(char1))<EOL>row2, col2 = _get_char_coord(char2, _kb_array_for_char(char2))<EOL>return abs(row1 - row2) + abs(col1 - col2)<EOL><DEDENT>def _log_euclidean_keyboard_distance(char1, char2):<EOL><INDENT>return log(<NUM_LIT:1> + _euclidean_keyboard_distance(char1, char2))<EOL><DEDENT>def _log_manhattan_keyboard_distance(char1, char2):<EOL><INDENT>return log(<NUM_LIT:1> + _manhattan_keyboard_distance(char1, char2))<EOL><DEDENT>metric_dict = {<EOL>'<STR_LIT>': _euclidean_keyboard_distance,<EOL>'<STR_LIT>': _manhattan_keyboard_distance,<EOL>'<STR_LIT>': _log_euclidean_keyboard_distance,<EOL>'<STR_LIT>': _log_manhattan_keyboard_distance,<EOL>}<EOL>d_mat = np_zeros((len(src) + <NUM_LIT:1>, len(tar) + <NUM_LIT:1>), dtype=np_float32)<EOL>for i in range(len(src) + <NUM_LIT:1>):<EOL><INDENT>d_mat[i, <NUM_LIT:0>] = i * del_cost<EOL><DEDENT>for j in range(len(tar) + <NUM_LIT:1>):<EOL><INDENT>d_mat[<NUM_LIT:0>, j] = j * ins_cost<EOL><DEDENT>for i in range(len(src)):<EOL><INDENT>for j in range(len(tar)):<EOL><INDENT>d_mat[i + <NUM_LIT:1>, j + <NUM_LIT:1>] = min(<EOL>d_mat[i + <NUM_LIT:1>, j] + ins_cost,  <EOL>d_mat[i, j + <NUM_LIT:1>] + del_cost,  <EOL>d_mat[i, j]<EOL>+ (<EOL>_substitution_cost(src[i], tar[j])<EOL>if src[i] != tar[j]<EOL>else <NUM_LIT:0><EOL>),  <EOL>)<EOL><DEDENT><DEDENT>return d_mat[len(src), len(tar)]<EOL>", "docstring": "Return the typo distance between two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        metric : str\n            Supported values include: ``euclidean``, ``manhattan``,\n            ``log-euclidean``, and ``log-manhattan``\n        cost : tuple\n            A 4-tuple representing the cost of the four possible edits:\n            inserts, deletes, substitutions, and shift, respectively (by\n            default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be\n            significantly less than the cost of an insertion & deletion unless\n            a log metric is used.\n        layout : str\n            Name of the keyboard layout to use (Currently supported:\n            ``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)\n\n        Returns\n        -------\n        float\n            Typo distance\n\n        Raises\n        ------\n        ValueError\n            char not found in any keyboard layouts\n\n        Examples\n        --------\n        >>> cmp = Typo()\n        >>> cmp.dist_abs('cat', 'hat')\n        1.5811388\n        >>> cmp.dist_abs('Niall', 'Neil')\n        2.8251407\n        >>> cmp.dist_abs('Colin', 'Cuilen')\n        3.4142137\n        >>> cmp.dist_abs('ATCG', 'TAGC')\n        2.5\n\n        >>> cmp.dist_abs('cat', 'hat', metric='manhattan')\n        2.0\n        >>> cmp.dist_abs('Niall', 'Neil', metric='manhattan')\n        3.0\n        >>> cmp.dist_abs('Colin', 'Cuilen', metric='manhattan')\n        3.5\n        >>> cmp.dist_abs('ATCG', 'TAGC', metric='manhattan')\n        2.5\n\n        >>> cmp.dist_abs('cat', 'hat', metric='log-manhattan')\n        0.804719\n        >>> cmp.dist_abs('Niall', 'Neil', metric='log-manhattan')\n        2.2424533\n        >>> cmp.dist_abs('Colin', 'Cuilen', metric='log-manhattan')\n        2.2424533\n        >>> cmp.dist_abs('ATCG', 'TAGC', metric='log-manhattan')\n        2.3465736", "id": "f6650:c0:m0"}
{"signature": "def typo(src, tar, metric='<STR_LIT>', cost=(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0.5>, <NUM_LIT:0.5>), layout='<STR_LIT>'):", "body": "return Typo().dist_abs(src, tar, metric, cost, layout)<EOL>", "docstring": "Return the typo distance between two strings.\n\n    This is a wrapper for :py:meth:`Typo.typo`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    metric : str\n        Supported values include: ``euclidean``, ``manhattan``,\n        ``log-euclidean``, and ``log-manhattan``\n    cost : tuple\n        A 4-tuple representing the cost of the four possible edits: inserts,\n        deletes, substitutions, and shift, respectively (by default:\n        (1, 1, 0.5, 0.5)) The substitution & shift costs should be\n        significantly less than the cost of an insertion & deletion unless a\n        log metric is used.\n    layout : str\n        Name of the keyboard layout to use (Currently supported:\n        ``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)\n\n    Returns\n    -------\n    float\n        Typo distance\n\n    Examples\n    --------\n    >>> typo('cat', 'hat')\n    1.5811388\n    >>> typo('Niall', 'Neil')\n    2.8251407\n    >>> typo('Colin', 'Cuilen')\n    3.4142137\n    >>> typo('ATCG', 'TAGC')\n    2.5\n\n    >>> typo('cat', 'hat', metric='manhattan')\n    2.0\n    >>> typo('Niall', 'Neil', metric='manhattan')\n    3.0\n    >>> typo('Colin', 'Cuilen', metric='manhattan')\n    3.5\n    >>> typo('ATCG', 'TAGC', metric='manhattan')\n    2.5\n\n    >>> typo('cat', 'hat', metric='log-manhattan')\n    0.804719\n    >>> typo('Niall', 'Neil', metric='log-manhattan')\n    2.2424533\n    >>> typo('Colin', 'Cuilen', metric='log-manhattan')\n    2.2424533\n    >>> typo('ATCG', 'TAGC', metric='log-manhattan')\n    2.3465736", "id": "f6650:m0"}
{"signature": "def dist_minkowski(src, tar, qval=<NUM_LIT:2>, pval=<NUM_LIT:1>, alphabet=None):", "body": "return Minkowski().dist(src, tar, qval, pval, alphabet)<EOL>", "docstring": "Return normalized Minkowski distance of two strings.\n\n    This is a wrapper for :py:meth:`Minkowski.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string (or QGrams/Counter objects) for comparison\n    tar : str\n        Target string (or QGrams/Counter objects) for comparison\n    qval : int\n        The length of each q-gram; 0 for non-q-gram version\n    pval : int or float\n        The :math:`p`-value of the :math:`L^p`-space\n    alphabet : collection or int\n        The values or size of the alphabet\n\n    Returns\n    -------\n    float\n        The normalized Minkowski distance\n\n    Examples\n    --------\n    >>> dist_minkowski('cat', 'hat')\n    0.5\n    >>> round(dist_minkowski('Niall', 'Neil'), 12)\n    0.636363636364\n    >>> round(dist_minkowski('Colin', 'Cuilen'), 12)\n    0.692307692308\n    >>> dist_minkowski('ATCG', 'TAGC')\n    1.0", "id": "f6652:m1"}
{"signature": "def dist_abs(<EOL>self, src, tar, qval=<NUM_LIT:2>, pval=<NUM_LIT:1>, normalized=False, alphabet=None<EOL>):", "body": "q_src, q_tar = self._get_qgrams(src, tar, qval)<EOL>diffs = ((q_src - q_tar) + (q_tar - q_src)).values()<EOL>normalizer = <NUM_LIT:1><EOL>if normalized:<EOL><INDENT>totals = (q_src + q_tar).values()<EOL>if alphabet is not None:<EOL><INDENT>normalizer = (<EOL>alphabet if isinstance(alphabet, Number) else len(alphabet)<EOL>)<EOL><DEDENT>elif pval == <NUM_LIT:0>:<EOL><INDENT>normalizer = len(totals)<EOL><DEDENT>else:<EOL><INDENT>normalizer = sum(_ ** pval for _ in totals) ** (<NUM_LIT:1> / pval)<EOL><DEDENT><DEDENT>if len(diffs) == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>if pval == float('<STR_LIT>'):<EOL><INDENT>return max(diffs) / normalizer<EOL><DEDENT>if pval == <NUM_LIT:0>:<EOL><INDENT>return len(diffs) / normalizer<EOL><DEDENT>return sum(_ ** pval for _ in diffs) ** (<NUM_LIT:1> / pval) / normalizer<EOL>", "docstring": "Return the Minkowski distance (:math:`L^p`-norm) of two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string (or QGrams/Counter objects) for comparison\n        tar : str\n            Target string (or QGrams/Counter objects) for comparison\n        qval : int\n            The length of each q-gram; 0 for non-q-gram version\n        pval : int or float\n            The :math:`p`-value of the :math:`L^p`-space\n        normalized : bool\n            Normalizes to [0, 1] if True\n        alphabet : collection or int\n            The values or size of the alphabet\n\n        Returns\n        -------\n        float\n            The Minkowski distance\n\n        Examples\n        --------\n        >>> cmp = Minkowski()\n        >>> cmp.dist_abs('cat', 'hat')\n        4.0\n        >>> cmp.dist_abs('Niall', 'Neil')\n        7.0\n        >>> cmp.dist_abs('Colin', 'Cuilen')\n        9.0\n        >>> cmp.dist_abs('ATCG', 'TAGC')\n        10.0", "id": "f6652:c0:m0"}
{"signature": "def dist(self, src, tar, qval=<NUM_LIT:2>, pval=<NUM_LIT:1>, alphabet=None):", "body": "return self.dist_abs(src, tar, qval, pval, True, alphabet)<EOL>", "docstring": "Return normalized Minkowski distance of two strings.\n\n        The normalized Minkowski distance :cite:`Minkowski:1910` is a distance\n        metric in :math:`L^p`-space, normalized to [0, 1].\n\n        Parameters\n        ----------\n        src : str\n            Source string (or QGrams/Counter objects) for comparison\n        tar : str\n            Target string (or QGrams/Counter objects) for comparison\n        qval : int\n            The length of each q-gram; 0 for non-q-gram version\n        pval : int or float\n            The :math:`p`-value of the :math:`L^p`-space\n        alphabet : collection or int\n            The values or size of the alphabet\n\n        Returns\n        -------\n        float\n            The normalized Minkowski distance\n\n        Examples\n        --------\n        >>> cmp = Minkowski()\n        >>> cmp.dist('cat', 'hat')\n        0.5\n        >>> round(cmp.dist('Niall', 'Neil'), 12)\n        0.636363636364\n        >>> round(cmp.dist('Colin', 'Cuilen'), 12)\n        0.692307692308\n        >>> cmp.dist('ATCG', 'TAGC')\n        1.0", "id": "f6652:c0:m1"}
{"signature": "def minkowski(src, tar, qval=<NUM_LIT:2>, pval=<NUM_LIT:1>, normalized=False, alphabet=None):", "body": "return Minkowski().dist_abs(src, tar, qval, pval, normalized, alphabet)<EOL>", "docstring": "Return the Minkowski distance (:math:`L^p`-norm) of two strings.\n\n    This is a wrapper for :py:meth:`Minkowski.dist_abs`.\n\n    Parameters\n    ----------\n    src : str\n        Source string (or QGrams/Counter objects) for comparison\n    tar : str\n        Target string (or QGrams/Counter objects) for comparison\n    qval : int\n        The length of each q-gram; 0 for non-q-gram version\n    pval : int or float\n        The :math:`p`-value of the :math:`L^p`-space\n    normalized : bool\n        Normalizes to [0, 1] if True\n    alphabet : collection or int\n        The values or size of the alphabet\n\n    Returns\n    -------\n    float\n        The Minkowski distance\n\n    Examples\n    --------\n    >>> minkowski('cat', 'hat')\n    4.0\n    >>> minkowski('Niall', 'Neil')\n    7.0\n    >>> minkowski('Colin', 'Cuilen')\n    9.0\n    >>> minkowski('ATCG', 'TAGC')\n    10.0", "id": "f6652:m0"}
{"signature": "def sim_minkowski(src, tar, qval=<NUM_LIT:2>, pval=<NUM_LIT:1>, alphabet=None):", "body": "return Minkowski().sim(src, tar, qval, pval, alphabet)<EOL>", "docstring": "Return normalized Minkowski similarity of two strings.\n\n    This is a wrapper for :py:meth:`Minkowski.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string (or QGrams/Counter objects) for comparison\n    tar : str\n        Target string (or QGrams/Counter objects) for comparison\n    qval : int\n        The length of each q-gram; 0 for non-q-gram version\n    pval : int or float\n        The :math:`p`-value of the :math:`L^p`-space\n    alphabet : collection or int\n        The values or size of the alphabet\n\n    Returns\n    -------\n    float\n        The normalized Minkowski similarity\n\n    Examples\n    --------\n    >>> sim_minkowski('cat', 'hat')\n    0.5\n    >>> round(sim_minkowski('Niall', 'Neil'), 12)\n    0.363636363636\n    >>> round(sim_minkowski('Colin', 'Cuilen'), 12)\n    0.307692307692\n    >>> sim_minkowski('ATCG', 'TAGC')\n    0.0", "id": "f6652:m2"}
{"signature": "def sim(self, src, tar):", "body": "if src == tar:<EOL><INDENT>return <NUM_LIT:1.0><EOL><DEDENT>if not src or not tar:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>min_word, max_word = (src, tar) if len(src) < len(tar) else (tar, src)<EOL>min_len = len(min_word)<EOL>for i in range(min_len, <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>if min_word[-i:] == max_word[-i:]:<EOL><INDENT>return i / min_len<EOL><DEDENT><DEDENT>return <NUM_LIT:0.0><EOL>", "docstring": "Return the suffix similarity of two strings.\n\n        Suffix similarity is the ratio of the length of the shorter term that\n        exactly matches the longer term to the length of the shorter term,\n        beginning at the end of both terms.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n\n        Returns\n        -------\n        float\n            Suffix similarity\n\n        Examples\n        --------\n        >>> cmp = Suffix()\n        >>> cmp.sim('cat', 'hat')\n        0.6666666666666666\n        >>> cmp.sim('Niall', 'Neil')\n        0.25\n        >>> cmp.sim('aluminum', 'Catalan')\n        0.0\n        >>> cmp.sim('ATCG', 'TAGC')\n        0.0", "id": "f6653:c0:m0"}
{"signature": "def sim_suffix(src, tar):", "body": "return Suffix().sim(src, tar)<EOL>", "docstring": "Return the suffix similarity of two strings.\n\n    This is a wrapper for :py:meth:`Suffix.sim`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Suffix similarity\n\n    Examples\n    --------\n    >>> sim_suffix('cat', 'hat')\n    0.6666666666666666\n    >>> sim_suffix('Niall', 'Neil')\n    0.25\n    >>> sim_suffix('aluminum', 'Catalan')\n    0.0\n    >>> sim_suffix('ATCG', 'TAGC')\n    0.0", "id": "f6653:m0"}
{"signature": "def dist_suffix(src, tar):", "body": "return Suffix().dist(src, tar)<EOL>", "docstring": "Return the suffix distance between two strings.\n\n    This is a wrapper for :py:meth:`Suffix.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Suffix distance\n\n    Examples\n    --------\n    >>> dist_suffix('cat', 'hat')\n    0.33333333333333337\n    >>> dist_suffix('Niall', 'Neil')\n    0.75\n    >>> dist_suffix('aluminum', 'Catalan')\n    1.0\n    >>> dist_suffix('ATCG', 'TAGC')\n    1.0", "id": "f6653:m1"}
{"signature": "def dist_abs(self, src, tar, *args, **kwargs):", "body": "return self.dist(src, tar, *args, **kwargs)<EOL>", "docstring": "Return absolute distance.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        *args\n            Variable length argument list.\n        **kwargs\n            Arbitrary keyword arguments.\n\n        Returns\n        -------\n        int\n            Absolute distance", "id": "f6654:c0:m2"}
{"signature": "def dist_ncd_lzma(src, tar):", "body": "return NCDlzma().dist(src, tar)<EOL>", "docstring": "Return the NCD between two strings using LZMA compression.\n\n    This is a wrapper for :py:meth:`NCDlzma.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n\n    Returns\n    -------\n    float\n        Compression distance\n\n    Examples\n    --------\n    >>> dist_ncd_lzma('cat', 'hat')\n    0.08695652173913043\n    >>> dist_ncd_lzma('Niall', 'Neil')\n    0.16\n    >>> dist_ncd_lzma('aluminum', 'Catalan')\n    0.16\n    >>> dist_ncd_lzma('ATCG', 'TAGC')\n    0.08695652173913043", "id": "f6655:m0"}
{"signature": "def dist(self, src, tar, weights='<STR_LIT>', max_length=<NUM_LIT:8>):", "body": "return self.dist_abs(src, tar, weights, max_length, True)<EOL>", "docstring": "Return normalized distance between the Eudex hashes of two terms.\n\n        This is Eudex distance normalized to [0, 1].\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        weights : str, iterable, or generator function\n            The weights or weights generator function\n        max_length : int\n            The number of characters to encode as a eudex hash\n\n        Returns\n        -------\n        int\n            The normalized Eudex Hamming distance\n\n        Examples\n        --------\n        >>> cmp = Eudex()\n        >>> round(cmp.dist('cat', 'hat'), 12)\n        0.062745098039\n        >>> round(cmp.dist('Niall', 'Neil'), 12)\n        0.000980392157\n        >>> round(cmp.dist('Colin', 'Cuilen'), 12)\n        0.004901960784\n        >>> round(cmp.dist('ATCG', 'TAGC'), 12)\n        0.197549019608", "id": "f6656:c0:m3"}
{"signature": "def eudex_hamming(<EOL>src, tar, weights='<STR_LIT>', max_length=<NUM_LIT:8>, normalized=False<EOL>):", "body": "return Eudex().dist_abs(src, tar, weights, max_length, normalized)<EOL>", "docstring": "Calculate the Hamming distance between the Eudex hashes of two terms.\n\n    This is a wrapper for :py:meth:`Eudex.eudex_hamming`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    weights : str, iterable, or generator function\n        The weights or weights generator function\n    max_length : int\n        The number of characters to encode as a eudex hash\n    normalized : bool\n        Normalizes to [0, 1] if True\n\n    Returns\n    -------\n    int\n        The Eudex Hamming distance\n\n    Examples\n    --------\n    >>> eudex_hamming('cat', 'hat')\n    128\n    >>> eudex_hamming('Niall', 'Neil')\n    2\n    >>> eudex_hamming('Colin', 'Cuilen')\n    10\n    >>> eudex_hamming('ATCG', 'TAGC')\n    403\n\n    >>> eudex_hamming('cat', 'hat', weights='fibonacci')\n    34\n    >>> eudex_hamming('Niall', 'Neil', weights='fibonacci')\n    2\n    >>> eudex_hamming('Colin', 'Cuilen', weights='fibonacci')\n    7\n    >>> eudex_hamming('ATCG', 'TAGC', weights='fibonacci')\n    117\n\n    >>> eudex_hamming('cat', 'hat', weights=None)\n    1\n    >>> eudex_hamming('Niall', 'Neil', weights=None)\n    1\n    >>> eudex_hamming('Colin', 'Cuilen', weights=None)\n    2\n    >>> eudex_hamming('ATCG', 'TAGC', weights=None)\n    9\n\n    >>> # Using the OEIS A000142:\n    >>> eudex_hamming('cat', 'hat', [1, 1, 2, 6, 24, 120, 720, 5040])\n    1\n    >>> eudex_hamming('Niall', 'Neil', [1, 1, 2, 6, 24, 120, 720, 5040])\n    720\n    >>> eudex_hamming('Colin', 'Cuilen', [1, 1, 2, 6, 24, 120, 720, 5040])\n    744\n    >>> eudex_hamming('ATCG', 'TAGC', [1, 1, 2, 6, 24, 120, 720, 5040])\n    6243", "id": "f6656:m0"}
{"signature": "def dist_abs(<EOL>self, src, tar, weights='<STR_LIT>', max_length=<NUM_LIT:8>, normalized=False<EOL>):", "body": "<EOL>xored = eudex(src, max_length=max_length) ^ eudex(<EOL>tar, max_length=max_length<EOL>)<EOL>if not weights:<EOL><INDENT>binary = bin(xored)<EOL>distance = binary.count('<STR_LIT:1>')<EOL>if normalized:<EOL><INDENT>return distance / (len(binary) - <NUM_LIT:2>)<EOL><DEDENT>return distance<EOL><DEDENT>if callable(weights):<EOL><INDENT>weights = weights()<EOL><DEDENT>elif weights == '<STR_LIT>':<EOL><INDENT>weights = Eudex.gen_exponential()<EOL><DEDENT>elif weights == '<STR_LIT>':<EOL><INDENT>weights = Eudex.gen_fibonacci()<EOL><DEDENT>if isinstance(weights, GeneratorType):<EOL><INDENT>weights = [next(weights) for _ in range(max_length)][::-<NUM_LIT:1>]<EOL><DEDENT>distance = <NUM_LIT:0><EOL>max_distance = <NUM_LIT:0><EOL>while (xored or normalized) and weights:<EOL><INDENT>max_distance += <NUM_LIT:8> * weights[-<NUM_LIT:1>]<EOL>distance += bin(xored & <NUM_LIT>).count('<STR_LIT:1>') * weights.pop()<EOL>xored >>= <NUM_LIT:8><EOL><DEDENT>if normalized:<EOL><INDENT>distance /= max_distance<EOL><DEDENT>return distance<EOL>", "docstring": "Calculate the distance between the Eudex hashes of two terms.\n\n        Parameters\n        ----------\n        src : str\n            Source string for comparison\n        tar : str\n            Target string for comparison\n        weights : str, iterable, or generator function\n            The weights or weights generator function\n\n                - If set to ``None``, a simple Hamming distance is calculated.\n                - If set to ``exponential``, weight decays by powers of 2, as\n                  proposed in the eudex specification:\n                  https://github.com/ticki/eudex.\n                - If set to ``fibonacci``, weight decays through the Fibonacci\n                  series, as in the eudex reference implementation.\n                - If set to a callable function, this assumes it creates a\n                  generator and the generator is used to populate a series of\n                  weights.\n                - If set to an iterable, the iterable's values should be\n                  integers and will be used as the weights.\n\n        max_length : int\n            The number of characters to encode as a eudex hash\n        normalized : bool\n            Normalizes to [0, 1] if True\n\n        Returns\n        -------\n        int\n            The Eudex Hamming distance\n\n        Examples\n        --------\n        >>> cmp = Eudex()\n        >>> cmp.dist_abs('cat', 'hat')\n        128\n        >>> cmp.dist_abs('Niall', 'Neil')\n        2\n        >>> cmp.dist_abs('Colin', 'Cuilen')\n        10\n        >>> cmp.dist_abs('ATCG', 'TAGC')\n        403\n\n        >>> cmp.dist_abs('cat', 'hat', weights='fibonacci')\n        34\n        >>> cmp.dist_abs('Niall', 'Neil', weights='fibonacci')\n        2\n        >>> cmp.dist_abs('Colin', 'Cuilen', weights='fibonacci')\n        7\n        >>> cmp.dist_abs('ATCG', 'TAGC', weights='fibonacci')\n        117\n\n        >>> cmp.dist_abs('cat', 'hat', weights=None)\n        1\n        >>> cmp.dist_abs('Niall', 'Neil', weights=None)\n        1\n        >>> cmp.dist_abs('Colin', 'Cuilen', weights=None)\n        2\n        >>> cmp.dist_abs('ATCG', 'TAGC', weights=None)\n        9\n\n        >>> # Using the OEIS A000142:\n        >>> cmp.dist_abs('cat', 'hat', [1, 1, 2, 6, 24, 120, 720, 5040])\n        1\n        >>> cmp.dist_abs('Niall', 'Neil', [1, 1, 2, 6, 24, 120, 720, 5040])\n        720\n        >>> cmp.dist_abs('Colin', 'Cuilen',\n        ... [1, 1, 2, 6, 24, 120, 720, 5040])\n        744\n        >>> cmp.dist_abs('ATCG', 'TAGC', [1, 1, 2, 6, 24, 120, 720, 5040])\n        6243", "id": "f6656:c0:m2"}
{"signature": "def tanimoto_coeff(self, src, tar, qval=<NUM_LIT:2>):", "body": "coeff = self.sim(src, tar, qval)<EOL>if coeff != <NUM_LIT:0>:<EOL><INDENT>return log(coeff, <NUM_LIT:2>)<EOL><DEDENT>return float('<STR_LIT>')<EOL>", "docstring": "Return the Tanimoto distance between two strings.\n\n        Tanimoto distance :cite:`Tanimoto:1958` is\n        :math:`-log_{2} sim_{Tanimoto}(X, Y)`.\n\n        Parameters\n        ----------\n        src : str\n            Source string (or QGrams/Counter objects) for comparison\n        tar : str\n            Target string (or QGrams/Counter objects) for comparison\n        qval : int\n            The length of each q-gram; 0 for non-q-gram version\n\n        Returns\n        -------\n        float\n            Tanimoto distance\n\n        Examples\n        --------\n        >>> cmp = Jaccard()\n        >>> cmp.tanimoto_coeff('cat', 'hat')\n        -1.5849625007211563\n        >>> cmp.tanimoto_coeff('Niall', 'Neil')\n        -2.1699250014423126\n        >>> cmp.tanimoto_coeff('aluminum', 'Catalan')\n        -4.0\n        >>> cmp.tanimoto_coeff('ATCG', 'TAGC')\n        -inf", "id": "f6658:c0:m1"}
{"signature": "def sim(self, src, tar, qval=<NUM_LIT:2>):", "body": "return super(self.__class__, self).sim(src, tar, qval, <NUM_LIT:1>, <NUM_LIT:1>)<EOL>", "docstring": "r\"\"\"Return the Jaccard similarity of two strings.\n\n        Parameters\n        ----------\n        src : str\n            Source string (or QGrams/Counter objects) for comparison\n        tar : str\n            Target string (or QGrams/Counter objects) for comparison\n        qval : int\n            The length of each q-gram; 0 for non-q-gram version\n\n        Returns\n        -------\n        float\n            Jaccard similarity\n\n        Examples\n        --------\n        >>> cmp = Jaccard()\n        >>> cmp.sim('cat', 'hat')\n        0.3333333333333333\n        >>> cmp.sim('Niall', 'Neil')\n        0.2222222222222222\n        >>> cmp.sim('aluminum', 'Catalan')\n        0.0625\n        >>> cmp.sim('ATCG', 'TAGC')\n        0.0", "id": "f6658:c0:m0"}
{"signature": "def tanimoto(src, tar, qval=<NUM_LIT:2>):", "body": "return Jaccard().tanimoto_coeff(src, tar, qval)<EOL>", "docstring": "Return the Tanimoto coefficient of two strings.\n\n    This is a wrapper for :py:meth:`Jaccard.tanimoto_coeff`.\n\n    Parameters\n    ----------\n    src : str\n        Source string (or QGrams/Counter objects) for comparison\n    tar : str\n        Target string (or QGrams/Counter objects) for comparison\n    qval : int\n        The length of each q-gram; 0 for non-q-gram version\n\n    Returns\n    -------\n    float\n        Tanimoto distance\n\n    Examples\n    --------\n    >>> tanimoto('cat', 'hat')\n    -1.5849625007211563\n    >>> tanimoto('Niall', 'Neil')\n    -2.1699250014423126\n    >>> tanimoto('aluminum', 'Catalan')\n    -4.0\n    >>> tanimoto('ATCG', 'TAGC')\n    -inf", "id": "f6658:m2"}
{"signature": "def dist_sift4(src, tar, max_offset=<NUM_LIT:5>, max_distance=<NUM_LIT:0>):", "body": "return Sift4().dist(src, tar, max_offset, max_distance)<EOL>", "docstring": "Return the normalized \"common\" Sift4 distance between two terms.\n\n    This is a wrapper for :py:meth:`Sift4.dist`.\n\n    Parameters\n    ----------\n    src : str\n        Source string for comparison\n    tar : str\n        Target string for comparison\n    max_offset : int\n        The number of characters to search for matching letters\n    max_distance : int\n        The distance at which to stop and exit\n\n    Returns\n    -------\n    float\n        The normalized Sift4 distance\n\n    Examples\n    --------\n    >>> round(dist_sift4('cat', 'hat'), 12)\n    0.333333333333\n    >>> dist_sift4('Niall', 'Neil')\n    0.4\n    >>> dist_sift4('Colin', 'Cuilen')\n    0.5\n    >>> dist_sift4('ATCG', 'TAGC')\n    0.5", "id": "f6659:m1"}
{"signature": "def _synoname_word_approximation(<EOL>self, src_ln, tar_ln, src_fn='<STR_LIT>', tar_fn='<STR_LIT>', features=None<EOL>):", "body": "if features is None:<EOL><INDENT>features = {}<EOL><DEDENT>if '<STR_LIT>' not in features:<EOL><INDENT>features['<STR_LIT>'] = []<EOL><DEDENT>if '<STR_LIT>' not in features:<EOL><INDENT>features['<STR_LIT>'] = []<EOL><DEDENT>src_len_specials = len(features['<STR_LIT>'])<EOL>tar_len_specials = len(features['<STR_LIT>'])<EOL>if ('<STR_LIT>' in features and features['<STR_LIT>']) or (<EOL>'<STR_LIT>' in features and features['<STR_LIT>']<EOL>):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>full_tar1 = '<STR_LIT:U+0020>'.join((tar_ln, tar_fn)).replace('<STR_LIT:->', '<STR_LIT:U+0020>').strip()<EOL>for s_pos, s_type in features['<STR_LIT>']:<EOL><INDENT>if s_type == '<STR_LIT:a>':<EOL><INDENT>full_tar1 = full_tar1[<EOL>: -(<EOL><NUM_LIT:1><EOL>+ len(<EOL>self._stc._synoname_special_table[  <EOL>s_pos<EOL>][<NUM_LIT:1>]<EOL>)<EOL>)<EOL>]<EOL><DEDENT>elif s_type == '<STR_LIT:b>':<EOL><INDENT>loc = (<EOL>full_tar1.find(<EOL>'<STR_LIT:U+0020>'<EOL>+ self._stc._synoname_special_table[  <EOL>s_pos<EOL>][<NUM_LIT:1>]<EOL>+ '<STR_LIT:U+0020>'<EOL>)<EOL>+ <NUM_LIT:1><EOL>)<EOL>full_tar1 = (<EOL>full_tar1[:loc]<EOL>+ full_tar1[<EOL>loc<EOL>+ len(<EOL>self._stc._synoname_special_table[  <EOL>s_pos<EOL>][<NUM_LIT:1>]<EOL>) :<EOL>]<EOL>)<EOL><DEDENT>elif s_type == '<STR_LIT:c>':<EOL><INDENT>full_tar1 = full_tar1[<EOL><NUM_LIT:1><EOL>+ len(<EOL>self._stc._synoname_special_table[s_pos][  <EOL><NUM_LIT:1><EOL>]<EOL>) :<EOL>]<EOL><DEDENT><DEDENT>full_src1 = '<STR_LIT:U+0020>'.join((src_ln, src_fn)).replace('<STR_LIT:->', '<STR_LIT:U+0020>').strip()<EOL>for s_pos, s_type in features['<STR_LIT>']:<EOL><INDENT>if s_type == '<STR_LIT:a>':<EOL><INDENT>full_src1 = full_src1[<EOL>: -(<EOL><NUM_LIT:1><EOL>+ len(<EOL>self._stc._synoname_special_table[  <EOL>s_pos<EOL>][<NUM_LIT:1>]<EOL>)<EOL>)<EOL>]<EOL><DEDENT>elif s_type == '<STR_LIT:b>':<EOL><INDENT>loc = (<EOL>full_src1.find(<EOL>'<STR_LIT:U+0020>'<EOL>+ self._stc._synoname_special_table[  <EOL>s_pos<EOL>][<NUM_LIT:1>]<EOL>+ '<STR_LIT:U+0020>'<EOL>)<EOL>+ <NUM_LIT:1><EOL>)<EOL>full_src1 = (<EOL>full_src1[:loc]<EOL>+ full_src1[<EOL>loc<EOL>+ len(<EOL>self._stc._synoname_special_table[  <EOL>s_pos<EOL>][<NUM_LIT:1>]<EOL>) :<EOL>]<EOL>)<EOL><DEDENT>elif s_type == '<STR_LIT:c>':<EOL><INDENT>full_src1 = full_src1[<EOL><NUM_LIT:1><EOL>+ len(<EOL>self._stc._synoname_special_table[s_pos][  <EOL><NUM_LIT:1><EOL>]<EOL>) :<EOL>]<EOL><DEDENT><DEDENT>full_tar2 = full_tar1<EOL>for s_pos, s_type in features['<STR_LIT>']:<EOL><INDENT>if s_type == '<STR_LIT:d>':<EOL><INDENT>full_tar2 = full_tar2[<EOL>len(<EOL>self._stc._synoname_special_table[s_pos][  <EOL><NUM_LIT:1><EOL>]<EOL>) :<EOL>]<EOL><DEDENT>elif (<EOL>s_type == '<STR_LIT:X>'<EOL>and self._stc._synoname_special_table[s_pos][<NUM_LIT:1>]  <EOL>in full_tar2<EOL>):<EOL><INDENT>loc = full_tar2.find(<EOL>'<STR_LIT:U+0020>'<EOL>+ self._stc._synoname_special_table[s_pos][<NUM_LIT:1>]  <EOL>)<EOL>full_tar2 = (<EOL>full_tar2[:loc]<EOL>+ full_tar2[<EOL>loc<EOL>+ len(<EOL>self._stc._synoname_special_table[  <EOL>s_pos<EOL>][<NUM_LIT:1>]<EOL>) :<EOL>]<EOL>)<EOL><DEDENT><DEDENT>full_src2 = full_src1<EOL>for s_pos, s_type in features['<STR_LIT>']:<EOL><INDENT>if s_type == '<STR_LIT:d>':<EOL><INDENT>full_src2 = full_src2[<EOL>len(<EOL>self._stc._synoname_special_table[s_pos][  <EOL><NUM_LIT:1><EOL>]<EOL>) :<EOL>]<EOL><DEDENT>elif (<EOL>s_type == '<STR_LIT:X>'<EOL>and self._stc._synoname_special_table[s_pos][<NUM_LIT:1>]  <EOL>in full_src2<EOL>):<EOL><INDENT>loc = full_src2.find(<EOL>'<STR_LIT:U+0020>'<EOL>+ self._stc._synoname_special_table[s_pos][<NUM_LIT:1>]  <EOL>)<EOL>full_src2 = (<EOL>full_src2[:loc]<EOL>+ full_src2[<EOL>loc<EOL>+ len(<EOL>self._stc._synoname_special_table[  <EOL>s_pos<EOL>][<NUM_LIT:1>]<EOL>) :<EOL>]<EOL>)<EOL><DEDENT><DEDENT>full_tar1 = self._synoname_strip_punct(full_tar1)<EOL>tar1_words = full_tar1.split()<EOL>tar1_num_words = len(tar1_words)<EOL>full_src1 = self._synoname_strip_punct(full_src1)<EOL>src1_words = full_src1.split()<EOL>src1_num_words = len(src1_words)<EOL>full_tar2 = self._synoname_strip_punct(full_tar2)<EOL>tar2_words = full_tar2.split()<EOL>tar2_num_words = len(tar2_words)<EOL>full_src2 = self._synoname_strip_punct(full_src2)<EOL>src2_words = full_src2.split()<EOL>src2_num_words = len(src2_words)<EOL>if (<EOL>src1_num_words < <NUM_LIT:2><EOL>and src_len_specials == <NUM_LIT:0><EOL>and src2_num_words < <NUM_LIT:2><EOL>and tar_len_specials == <NUM_LIT:0><EOL>):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if (<EOL>tar1_num_words == <NUM_LIT:1><EOL>and src1_num_words == <NUM_LIT:1><EOL>and tar1_words[<NUM_LIT:0>] == src1_words[<NUM_LIT:0>]<EOL>):<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>if tar1_num_words < <NUM_LIT:2> and tar_len_specials == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>last_found = False<EOL>for word in tar1_words:<EOL><INDENT>if src_ln.endswith(word) or word + '<STR_LIT:U+0020>' in src_ln:<EOL><INDENT>last_found = True<EOL><DEDENT><DEDENT>if not last_found:<EOL><INDENT>for word in src1_words:<EOL><INDENT>if tar_ln.endswith(word) or word + '<STR_LIT:U+0020>' in tar_ln:<EOL><INDENT>last_found = True<EOL><DEDENT><DEDENT><DEDENT>matches = <NUM_LIT:0><EOL>if last_found:<EOL><INDENT>for i, s_word in enumerate(src1_words):<EOL><INDENT>for j, t_word in enumerate(tar1_words):<EOL><INDENT>if s_word == t_word:<EOL><INDENT>src1_words[i] = '<STR_LIT:@>'<EOL>tar1_words[j] = '<STR_LIT:@>'<EOL>matches += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>w_ratio = matches / max(tar1_num_words, src1_num_words)<EOL>if matches > <NUM_LIT:1> or (<EOL>matches == <NUM_LIT:1><EOL>and src1_num_words == <NUM_LIT:1><EOL>and tar1_num_words == <NUM_LIT:1><EOL>and (tar_len_specials > <NUM_LIT:0> or src_len_specials > <NUM_LIT:0>)<EOL>):<EOL><INDENT>return w_ratio<EOL><DEDENT>if (<EOL>tar2_num_words == <NUM_LIT:1><EOL>and src2_num_words == <NUM_LIT:1><EOL>and tar2_words[<NUM_LIT:0>] == src2_words[<NUM_LIT:0>]<EOL>):<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>if tar2_num_words < <NUM_LIT:2> and tar_len_specials == <NUM_LIT:0>:  <EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>last_found = False<EOL>for word in tar2_words:<EOL><INDENT>if src_ln.endswith(word) or word + '<STR_LIT:U+0020>' in src_ln:<EOL><INDENT>last_found = True<EOL><DEDENT><DEDENT>if not last_found:<EOL><INDENT>for word in src2_words:<EOL><INDENT>if tar_ln.endswith(word) or word + '<STR_LIT:U+0020>' in tar_ln:<EOL><INDENT>last_found = True<EOL><DEDENT><DEDENT><DEDENT>if not last_found:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>matches = <NUM_LIT:0><EOL>if last_found:<EOL><INDENT>for i, s_word in enumerate(src2_words):<EOL><INDENT>for j, t_word in enumerate(tar2_words):<EOL><INDENT>if s_word == t_word:<EOL><INDENT>src2_words[i] = '<STR_LIT:@>'<EOL>tar2_words[j] = '<STR_LIT:@>'<EOL>matches += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>w_ratio = matches / max(tar2_num_words, src2_num_words)<EOL>if matches > <NUM_LIT:1> or (<EOL>matches == <NUM_LIT:1><EOL>and src2_num_words == <NUM_LIT:1><EOL>and tar2_num_words == <NUM_LIT:1><EOL>and (tar_len_specials > <NUM_LIT:0> or src_len_specials > <NUM_LIT:0>)<EOL>):<EOL><INDENT>return w_ratio<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Return the Synoname word approximation score for two names.\n\n        Parameters\n        ----------\n        src_ln : str\n            Last name of the source\n        tar_ln : str\n            Last name of the target\n        src_fn : str\n            First name of the source (optional)\n        tar_fn : str\n            First name of the target (optional)\n        features : dict\n            A dict containing special features calculated using\n            :py:class:`fingerprint.SynonameToolcode` (optional)\n\n        Returns\n        -------\n        float\n            The word approximation score\n\n        Examples\n        --------\n        >>> pe = Synoname()\n        >>> pe._synoname_word_approximation('Smith Waterman', 'Waterman',\n        ... 'Tom Joe Bob', 'Tom Joe')\n        0.6", "id": "f6660:c0:m1"}
{"signature": "def precision_gain(self):", "body": "if self.population() == <NUM_LIT:0>:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>random_precision = self.cond_pos_pop() / self.population()<EOL>return self.precision() / random_precision<EOL>", "docstring": "r\"\"\"Return gain in precision.\n\n        The gain in precision is defined as:\n        :math:`G(precision) = \\frac{precision}{random~ precision}`\n\n        Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval)\n\n        Returns\n        -------\n        float\n            The gain in precision of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.precision_gain()\n        1.3142857142857143", "id": "f6662:c0:m17"}
{"signature": "def kappa_statistic(self):", "body": "if self.population() == <NUM_LIT:0>:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>random_accuracy = (<EOL>(self._tn + self._fp) * (self._tn + self._fn)<EOL>+ (self._fn + self._tp) * (self._fp + self._tp)<EOL>) / self.population() ** <NUM_LIT:2><EOL>return (self.accuracy() - random_accuracy) / (<NUM_LIT:1> - random_accuracy)<EOL>", "docstring": "r\"\"\"Return \u03ba statistic.\n\n        The \u03ba statistic is defined as:\n        :math:`\\kappa = \\frac{accuracy - random~ accuracy}\n        {1 - random~ accuracy}`\n\n        The \u03ba statistic compares the performance of the classifier relative to\n        the performance of a random classifier. :math:`\\kappa` = 0 indicates\n        performance identical to random. :math:`\\kappa` = 1 indicates perfect\n        predictive success. :math:`\\kappa` = -1 indicates perfect predictive\n        failure.\n\n        Returns\n        -------\n        float\n            The \u03ba statistic of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.kappa_statistic()\n        0.5344129554655871", "id": "f6662:c0:m51"}
{"signature": "def __str__(self):", "body": "return '<STR_LIT>'.format(<EOL>self._tp, self._tn, self._fp, self._fn<EOL>)<EOL>", "docstring": "Cast to str.\n\n        Returns\n        -------\n        str\n            A human-readable version of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> str(ct)\n        'tp:120, tn:60, fp:20, fn:30'", "id": "f6662:c0:m2"}
{"signature": "def recall(self):", "body": "if self._tp + self._fn == <NUM_LIT:0>:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>return self._tp / (self._tp + self._fn)<EOL>", "docstring": "r\"\"\"Return recall.\n\n        Recall is defined as :math:`\\frac{tp}{tp + fn}`\n\n        AKA sensitivity\n\n        AKA true positive rate (TPR)\n\n        Cf. https://en.wikipedia.org/wiki/Precision_and_recall\n\n        Cf. https://en.wikipedia.org/wiki/Sensitivity_(test)\n\n        Cf. https://en.wikipedia.org/wiki/Information_retrieval#Recall\n\n        Returns\n        -------\n        float\n            The recall of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.recall()\n        0.8", "id": "f6662:c0:m18"}
{"signature": "def informedness(self):", "body": "return self.recall() + self.specificity() - <NUM_LIT:1><EOL>", "docstring": "Return informedness.\n\n        Informedness is defined as :math:`sensitivity + specificity - 1`.\n\n        AKA Youden's J statistic (:cite:`Youden:1950`)\n\n        AKA DeltaP'\n\n        Cf. https://en.wikipedia.org/wiki/Youden%27s_J_statistic\n\n        Returns\n        -------\n        float\n            The informedness of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.informedness()\n        0.55", "id": "f6662:c0:m26"}
{"signature": "def pr_qmean(self):", "body": "return qmean((self.precision(), self.recall()))<EOL>", "docstring": "r\"\"\"Return quadratic mean of precision & recall.\n\n        The quadratic mean of precision and recall is defined as:\n        :math:`\\sqrt{\\frac{precision^{2} + recall^{2}}{2}}`\n\n        Cf. https://en.wikipedia.org/wiki/Quadratic_mean\n\n        Returns\n        -------\n        float\n            The quadratic mean of the confusion table's precision & recall\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.pr_qmean()\n        0.8290638930598233", "id": "f6662:c0:m31"}
{"signature": "def f2_score(self):", "body": "return self.fbeta_score(<NUM_LIT>)<EOL>", "docstring": "Return :math:`F_{2}`.\n\n        The :math:`F_{2}` score emphasizes recall over precision in comparison\n        to the :math:`F_{1}` score\n\n        Cf. https://en.wikipedia.org/wiki/F1_score\n\n        Returns\n        -------\n        float\n            The :math:`F_{2}` of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.f2_score()\n        0.8108108108108109", "id": "f6662:c0:m43"}
{"signature": "def npv(self):", "body": "if self._tn + self._fn == <NUM_LIT:0>:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>return self._tn / (self._tn + self._fn)<EOL>", "docstring": "r\"\"\"Return negative predictive value (NPV).\n\n        NPV is defined as :math:`\\frac{tn}{tn + fn}`\n\n        Cf. https://en.wikipedia.org/wiki/Negative_predictive_value\n\n        Returns\n        -------\n        float\n            The negative predictive value of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.npv()\n        0.6666666666666666", "id": "f6662:c0:m20"}
{"signature": "def correct_pop(self):", "body": "return self._tp + self._tn<EOL>", "docstring": "Return correct population.\n\n        Returns\n        -------\n        int\n            The correct population of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.correct_pop()\n        180", "id": "f6662:c0:m9"}
{"signature": "def pr_hoelder_mean(self, exp=<NUM_LIT:2>):", "body": "return hoelder_mean((self.precision(), self.recall()), exp)<EOL>", "docstring": "r\"\"\"Return H\u00f6lder (power/generalized) mean of precision & recall.\n\n        The power mean of precision and recall is defined as:\n        :math:`\\frac{1}{2} \\cdot\n        \\sqrt[exp]{precision^{exp} + recall^{exp}}`\n        for :math:`exp \\ne 0`, and the geometric mean for :math:`exp = 0`\n\n        Cf. https://en.wikipedia.org/wiki/Generalized_mean\n\n        Parameters\n        ----------\n        exp : float\n            The exponent of the H\u00f6lder mean\n\n        Returns\n        -------\n        float\n            The H\u00f6lder mean for the given exponent of the confusion table's\n            precision & recall\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.pr_hoelder_mean()\n        0.8290638930598233", "id": "f6662:c0:m38"}
{"signature": "def pr_amean(self):", "body": "return amean((self.precision(), self.recall()))<EOL>", "docstring": "r\"\"\"Return arithmetic mean of precision & recall.\n\n        The arithmetic mean of precision and recall is defined as:\n        :math:`\\frac{precision \\cdot recall}{2}`\n\n        Cf. https://en.wikipedia.org/wiki/Arithmetic_mean\n\n        Returns\n        -------\n        float\n            The arithmetic mean of the confusion table's precision & recall\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.pr_amean()\n        0.8285714285714285", "id": "f6662:c0:m28"}
{"signature": "def __eq__(self, other):", "body": "if isinstance(other, ConfusionTable):<EOL><INDENT>if id(self) == id(other):<EOL><INDENT>return True<EOL><DEDENT>if (<EOL>self._tp == other.true_pos()<EOL>and self._tn == other.true_neg()<EOL>and self._fp == other.false_pos()<EOL>and self._fn == other.false_neg()<EOL>):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>elif isinstance(other, (tuple, list)):<EOL><INDENT>if (<EOL>self._tp == other[<NUM_LIT:0>]<EOL>and self._tn == other[<NUM_LIT:1>]<EOL>and self._fp == other[<NUM_LIT:2>]<EOL>and self._fn == other[<NUM_LIT:3>]<EOL>):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>elif isinstance(other, dict):<EOL><INDENT>if (<EOL>self._tp == other['<STR_LIT>']<EOL>and self._tn == other['<STR_LIT>']<EOL>and self._fp == other['<STR_LIT>']<EOL>and self._fn == other['<STR_LIT>']<EOL>):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Perform eqality (==) comparison.\n\n        Compares a ConfusionTable to another ConfusionTable or its equivalent\n        in the form of a tuple, list, or dict.\n\n        Parameters\n        ----------\n        other : ConfusionTable\n            Another ConfusionTable object to compare to\n\n        Returns\n        -------\n        bool\n            True if two ConfusionTables are the same object or all four of\n            their attributes are equal\n\n        Examples\n        --------\n        >>> ct1 = ConfusionTable(120, 60, 20, 30)\n        >>> ct2 = ConfusionTable(120, 60, 20, 30)\n        >>> ct3 = ConfusionTable(60, 30, 10, 15)\n\n        >>> ct1 == ct2\n        True\n        >>> ct1 == ct3\n        False\n\n        >>> ct1 != ct2\n        False\n        >>> ct1 != ct3\n        True", "id": "f6662:c0:m1"}
{"signature": "def pr_heronian_mean(self):", "body": "return heronian_mean((self.precision(), self.recall()))<EOL>", "docstring": "r\"\"\"Return Heronian mean of precision & recall.\n\n        The Heronian mean of precision and recall is defined as:\n        :math:`\\frac{precision + \\sqrt{precision \\cdot recall} + recall}{3}`\n\n        Cf. https://en.wikipedia.org/wiki/Heronian_mean\n\n        Returns\n        -------\n        float\n            The Heronian mean of the confusion table's precision & recall\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.pr_heronian_mean()\n        0.8284071761178939", "id": "f6662:c0:m37"}
{"signature": "def __init__(self, tp=<NUM_LIT:0>, tn=<NUM_LIT:0>, fp=<NUM_LIT:0>, fn=<NUM_LIT:0>):", "body": "if isinstance(tp, (tuple, list)):<EOL><INDENT>if len(tp) == <NUM_LIT:4>:<EOL><INDENT>self._tp = tp[<NUM_LIT:0>]<EOL>self._tn = tp[<NUM_LIT:1>]<EOL>self._fp = tp[<NUM_LIT:2>]<EOL>self._fn = tp[<NUM_LIT:3>]<EOL><DEDENT>else:<EOL><INDENT>raise AttributeError(<EOL>'<STR_LIT>'<EOL>+ '<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT>elif isinstance(tp, dict):<EOL><INDENT>if '<STR_LIT>' in tp:<EOL><INDENT>self._tp = tp['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in tp:<EOL><INDENT>self._tn = tp['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in tp:<EOL><INDENT>self._fp = tp['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in tp:<EOL><INDENT>self._fn = tp['<STR_LIT>']<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._tp = tp<EOL>self._tn = tn<EOL>self._fp = fp<EOL>self._fn = fn<EOL><DEDENT>", "docstring": "Initialize ConfusionTable.\n\n        Parameters\n        ----------\n        tp : int or a tuple, list, or dict\n            True positives; If a tuple or list is supplied, it must include 4\n            values in the order [tp, tn, fp, fn]. If a dict is supplied, it\n            must have 4 keys, namely 'tp', 'tn', 'fp', & 'fn'.\n        tn : int\n            True negatives\n        fp : int\n            False positives\n        fn : int\n            False negatives\n\n        Raises\n        ------\n        AttributeError\n            ConfusionTable requires a 4-tuple when being created from a tuple.\n\n        Examples\n        --------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct == ConfusionTable((120, 60, 20, 30))\n        True\n        >>> ct == ConfusionTable([120, 60, 20, 30])\n        True\n        >>> ct == ConfusionTable({'tp': 120, 'tn': 60, 'fp': 20, 'fn': 30})\n        True", "id": "f6662:c0:m0"}
{"signature": "def population(self):", "body": "return self._tp + self._tn + self._fp + self._fn<EOL>", "docstring": "Return population, N.\n\n        Returns\n        -------\n        int\n            The population (N) of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.population()\n        230", "id": "f6662:c0:m15"}
{"signature": "def g_measure(self):", "body": "return self.pr_gmean()<EOL>", "docstring": "r\"\"\"Return G-measure.\n\n        :math:`G`-measure is the geometric mean of precision and recall:\n        :math:`\\sqrt{precision \\cdot recall}`\n\n        This is identical to the Fowlkes\u2013Mallows (FM) index for two\n        clusters.\n\n        Cf. https://en.wikipedia.org/wiki/F1_score#G-measure\n\n        Cf. https://en.wikipedia.org/wiki/Fowlkes%E2%80%93Mallows_index\n\n        Returns\n        -------\n        float\n            The :math:`G`-measure of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.g_measure()\n        0.828078671210825", "id": "f6662:c0:m48"}
{"signature": "def fhalf_score(self):", "body": "return self.fbeta_score(<NUM_LIT:0.5>)<EOL>", "docstring": "Return :math:`F_{0.5}` score.\n\n        The :math:`F_{0.5}` score emphasizes precision over recall in\n        comparison to the :math:`F_{1}` score\n\n        Cf. https://en.wikipedia.org/wiki/F1_score\n\n        Returns\n        -------\n        float\n            The :math:`F_{0.5}` score of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.fhalf_score()\n        0.8450704225352114", "id": "f6662:c0:m44"}
{"signature": "def markedness(self):", "body": "return self.precision() + self.npv() - <NUM_LIT:1><EOL>", "docstring": "Return markedness.\n\n        Markedness is defined as :math:`precision + npv - 1`\n\n        Returns\n        -------\n        float\n            The markedness of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.markedness()\n        0.5238095238095237", "id": "f6662:c0:m27"}
{"signature": "def fallout(self):", "body": "if self._fp + self._tn == <NUM_LIT:0>:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>return self._fp / (self._fp + self._tn)<EOL>", "docstring": "r\"\"\"Return fall-out.\n\n        Fall-out is defined as :math:`\\frac{fp}{fp + tn}`\n\n        AKA false positive rate (FPR)\n\n        Cf. https://en.wikipedia.org/wiki/Information_retrieval#Fall-out\n\n        Returns\n        -------\n        float\n            The fall-out of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.fallout()\n        0.25", "id": "f6662:c0:m21"}
{"signature": "def pr_ghmean(self):", "body": "return ghmean((self.precision(), self.recall()))<EOL>", "docstring": "Return geometric-harmonic mean of precision & recall.\n\n        Iterates between geometric & harmonic means until they converge to\n        a single value (rounded to 12 digits)\n\n        Cf. https://en.wikipedia.org/wiki/Geometric-harmonic_mean\n\n        Returns\n        -------\n        float\n            The geometric-harmonic mean of the confusion table's precision &\n            recall\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.pr_ghmean()\n        0.8278323841238441", "id": "f6662:c0:m40"}
{"signature": "def to_tuple(self):", "body": "return self._tp, self._tn, self._fp, self._fn<EOL>", "docstring": "Cast to tuple.\n\n        Returns\n        -------\n        tuple\n            The confusion table as a 4-tuple (tp, tn, fp, fn)\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.to_tuple()\n        (120, 60, 20, 30)", "id": "f6662:c0:m3"}
{"signature": "def e_score(self, beta=<NUM_LIT:1>):", "body": "return <NUM_LIT:1> - self.fbeta_score(beta)<EOL>", "docstring": "r\"\"\"Return :math:`E`-score.\n\n        This is Van Rijsbergen's effectiveness measure:\n        :math:`E=1-F_{\\beta}`.\n\n        Cf. https://en.wikipedia.org/wiki/Information_retrieval#F-measure\n\n        Parameters\n        ----------\n        beta : float\n            The :math:`\\beta` parameter in the above formula\n\n        Returns\n        -------\n        float\n            The :math:`E`-score of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.e_score()\n        0.17241379310344818", "id": "f6662:c0:m45"}
{"signature": "def accuracy_gain(self):", "body": "if self.population() == <NUM_LIT:0>:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>random_accuracy = (self.cond_pos_pop() / self.population()) ** <NUM_LIT:2> + (<EOL>self.cond_neg_pop() / self.population()<EOL>) ** <NUM_LIT:2><EOL>return self.accuracy() / random_accuracy<EOL>", "docstring": "r\"\"\"Return gain in accuracy.\n\n        The gain in accuracy is defined as:\n        :math:`G(accuracy) = \\frac{accuracy}{random~ accuracy}`\n\n        Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval)\n\n        Returns\n        -------\n        float\n            The gain in accuracy of the confusion table\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.accuracy_gain()\n        1.4325259515570934", "id": "f6662:c0:m24"}
{"signature": "def pr_cmean(self):", "body": "return cmean((self.precision(), self.recall()))<EOL>", "docstring": "r\"\"\"Return contraharmonic mean of precision & recall.\n\n        The contraharmonic mean is:\n        :math:`\\frac{precision^{2} + recall^{2}}{precision + recall}`\n\n        Cf. https://en.wikipedia.org/wiki/Contraharmonic_mean\n\n        Returns\n        -------\n        float\n            The contraharmonic mean of the confusion table's precision & recall\n\n        Example\n        -------\n        >>> ct = ConfusionTable(120, 60, 20, 30)\n        >>> ct.pr_cmean()\n        0.8295566502463055", "id": "f6662:c0:m32"}
{"signature": "def seiffert_mean(nums):", "body": "if len(nums) == <NUM_LIT:1>:<EOL><INDENT>return nums[<NUM_LIT:0>]<EOL><DEDENT>if len(nums) > <NUM_LIT:2>:<EOL><INDENT>raise AttributeError('<STR_LIT>')<EOL><DEDENT>if nums[<NUM_LIT:0>] + nums[<NUM_LIT:1>] == <NUM_LIT:0> or nums[<NUM_LIT:0>] - nums[<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>return (nums[<NUM_LIT:0>] - nums[<NUM_LIT:1>]) / (<EOL><NUM_LIT:2> * math.asin((nums[<NUM_LIT:0>] - nums[<NUM_LIT:1>]) / (nums[<NUM_LIT:0>] + nums[<NUM_LIT:1>]))<EOL>)<EOL>", "docstring": "r\"\"\"Return Seiffert's mean.\n\n    Seiffert's mean of two numbers x and y is:\n    :math:`\\frac{x - y}{4 \\cdot arctan \\sqrt{\\frac{x}{y}} - \\pi}`\n\n    It is defined in :cite:`Seiffert:1993`.\n\n    Parameters\n    ----------\n    nums : list\n        A series of numbers\n\n    Returns\n    -------\n    float\n        Sieffert's mean of nums\n\n    Raises\n    ------\n    AttributeError\n        seiffert_mean supports no more than two values\n\n    Examples\n    --------\n    >>> seiffert_mean([1, 2])\n    1.4712939827611637\n    >>> seiffert_mean([1, 0])\n    0.3183098861837907\n    >>> seiffert_mean([2, 4])\n    2.9425879655223275\n    >>> seiffert_mean([2, 1000])\n    336.84053300118825", "id": "f6663:m7"}
{"signature": "def midrange(nums):", "body": "return <NUM_LIT:0.5> * (max(nums) + min(nums))<EOL>", "docstring": "Return midrange.\n\n    The midrange is the arithmetic mean of the maximum & minimum of a series.\n\n    Cf. https://en.wikipedia.org/wiki/Midrange\n\n    Parameters\n    ----------\n    nums : list\n        A series of numbers\n\n    Returns\n    -------\n    float\n        The midrange of nums\n\n    Examples\n    --------\n    >>> midrange([1, 2, 3])\n    2.0\n    >>> midrange([1, 2, 2, 3])\n    2.0\n    >>> midrange([1, 2, 1000, 3])\n    500.5", "id": "f6663:m14"}
{"signature": "def imean(nums):", "body": "if len(nums) == <NUM_LIT:1>:<EOL><INDENT>return nums[<NUM_LIT:0>]<EOL><DEDENT>if len(nums) > <NUM_LIT:2>:<EOL><INDENT>raise AttributeError('<STR_LIT>')<EOL><DEDENT>if nums[<NUM_LIT:0>] <= <NUM_LIT:0> or nums[<NUM_LIT:1>] <= <NUM_LIT:0>:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>elif nums[<NUM_LIT:0>] == nums[<NUM_LIT:1>]:<EOL><INDENT>return nums[<NUM_LIT:0>]<EOL><DEDENT>return (<NUM_LIT:1> / math.e) * (nums[<NUM_LIT:0>] ** nums[<NUM_LIT:0>] / nums[<NUM_LIT:1>] ** nums[<NUM_LIT:1>]) ** (<EOL><NUM_LIT:1> / (nums[<NUM_LIT:0>] - nums[<NUM_LIT:1>])<EOL>)<EOL>", "docstring": "r\"\"\"Return identric (exponential) mean.\n\n    The identric mean of two numbers x and y is:\n    x if x = y\n    otherwise :math:`\\frac{1}{e} \\sqrt[x-y]{\\frac{x^x}{y^y}}`\n\n    Cf. https://en.wikipedia.org/wiki/Identric_mean\n\n    Parameters\n    ----------\n    nums : list\n        A series of numbers\n\n    Returns\n    -------\n    float\n        The identric mean of nums\n\n    Raises\n    ------\n    AttributeError\n        imean supports no more than two values\n\n    Examples\n    --------\n    >>> imean([1, 2])\n    1.4715177646857693\n    >>> imean([1, 0])\n    nan\n    >>> imean([2, 4])\n    2.9430355293715387", "id": "f6663:m6"}
{"signature": "def median(nums):", "body": "nums = sorted(nums)<EOL>mag = len(nums)<EOL>if mag % <NUM_LIT:2>:<EOL><INDENT>mag = int((mag - <NUM_LIT:1>) / <NUM_LIT:2>)<EOL>return nums[mag]<EOL><DEDENT>mag = int(mag / <NUM_LIT:2>)<EOL>med = (nums[mag - <NUM_LIT:1>] + nums[mag]) / <NUM_LIT:2><EOL>return med if not med.is_integer() else int(med)<EOL>", "docstring": "Return median.\n\n    With numbers sorted by value, the median is the middle value (if there is\n    an odd number of values) or the arithmetic mean of the two middle values\n    (if there is an even number of values).\n\n    Cf. https://en.wikipedia.org/wiki/Median\n\n    Parameters\n    ----------\n    nums : list\n        A series of numbers\n\n    Returns\n    -------\n    int or float\n        The median of nums\n\n    Examples\n    --------\n    >>> median([1, 2, 3])\n    2\n    >>> median([1, 2, 3, 4])\n    2.5\n    >>> median([1, 2, 2, 4])\n    2", "id": "f6663:m15"}
{"signature": "def hoelder_mean(nums, exp=<NUM_LIT:2>):", "body": "if exp == <NUM_LIT:0>:<EOL><INDENT>return gmean(nums)<EOL><DEDENT>return ((<NUM_LIT:1> / len(nums)) * sum(i ** exp for i in nums)) ** (<NUM_LIT:1> / exp)<EOL>", "docstring": "r\"\"\"Return H\u00f6lder (power/generalized) mean.\n\n    The H\u00f6lder mean is defined as:\n    :math:`\\sqrt[p]{\\frac{1}{|nums|} \\cdot \\sum\\limits_i{x_i^p}}`\n    for :math:`p \\ne 0`, and the geometric mean for :math:`p = 0`\n\n    Cf. https://en.wikipedia.org/wiki/Generalized_mean\n\n    Parameters\n    ----------\n    nums : list\n        A series of numbers\n    exp : numeric\n        The exponent of the H\u00f6lder mean\n\n    Returns\n    -------\n    float\n        The H\u00f6lder mean of nums for the given exponent\n\n    Examples\n    --------\n    >>> hoelder_mean([1, 2, 3, 4])\n    2.7386127875258306\n    >>> hoelder_mean([1, 2])\n    1.5811388300841898\n    >>> hoelder_mean([0, 5, 1000])\n    577.3574860228857", "id": "f6663:m10"}
{"signature": "def qmean(nums):", "body": "return (sum(i ** <NUM_LIT:2> for i in nums) / len(nums)) ** <NUM_LIT:0.5><EOL>", "docstring": "r\"\"\"Return quadratic mean.\n\n    The quadratic mean of precision and recall is defined as:\n    :math:`\\sqrt{\\sum\\limits_{i} \\frac{num_i^2}{|nums|}}`\n\n    Cf. https://en.wikipedia.org/wiki/Quadratic_mean\n\n    Parameters\n    ----------\n    nums : list\n        A series of numbers\n\n    Returns\n    -------\n    float\n        The quadratic mean of nums\n\n    Examples\n    --------\n    >>> qmean([1, 2, 3, 4])\n    2.7386127875258306\n    >>> qmean([1, 2])\n    1.5811388300841898\n    >>> qmean([0, 5, 1000])\n    577.3574860228857", "id": "f6663:m3"}
{"signature": "def mean_pairwise_similarity(<EOL>collection, metric=sim, mean_func=hmean, symmetric=False<EOL>):", "body": "if not callable(mean_func):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not callable(metric):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if hasattr(collection, '<STR_LIT>'):<EOL><INDENT>collection = collection.split()<EOL><DEDENT>if not hasattr(collection, '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif len(collection) < <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>collection = list(collection)<EOL>pairwise_values = []<EOL>for i in range(len(collection)):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, len(collection)):<EOL><INDENT>pairwise_values.append(metric(collection[i], collection[j]))<EOL>if symmetric:<EOL><INDENT>pairwise_values.append(metric(collection[j], collection[i]))<EOL><DEDENT><DEDENT><DEDENT>return mean_func(pairwise_values)<EOL>", "docstring": "Calculate the mean pairwise similarity of a collection of strings.\n\n    Takes the mean of the pairwise similarity between each member of a\n    collection, optionally in both directions (for asymmetric similarity\n    metrics.\n\n    Parameters\n    ----------\n    collection : list\n        A collection of terms or a string that can be split\n    metric : function\n        A similarity metric function\n    mean_func : function\n        A mean function that takes a list of values and returns a float\n    symmetric : bool\n        Set to True if all pairwise similarities should be calculated in both\n        directions\n\n    Returns\n    -------\n    float\n        The mean pairwise similarity of a collection of strings\n\n    Raises\n    ------\n    ValueError\n        mean_func must be a function\n    ValueError\n        metric must be a function\n    ValueError\n        collection is neither a string nor iterable type\n    ValueError\n        collection has fewer than two members\n\n    Examples\n    --------\n    >>> round(mean_pairwise_similarity(['Christopher', 'Kristof',\n    ... 'Christobal']), 12)\n    0.519801980198\n    >>> round(mean_pairwise_similarity(['Niall', 'Neal', 'Neil']), 12)\n    0.545454545455", "id": "f6665:m0"}
{"signature": "def pairwise_similarity_statistics(<EOL>src_collection,<EOL>tar_collection,<EOL>metric=sim,<EOL>mean_func=amean,<EOL>symmetric=False,<EOL>):", "body": "if not callable(mean_func):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not callable(metric):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if hasattr(src_collection, '<STR_LIT>'):<EOL><INDENT>src_collection = src_collection.split()<EOL><DEDENT>if not hasattr(src_collection, '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if hasattr(tar_collection, '<STR_LIT>'):<EOL><INDENT>tar_collection = tar_collection.split()<EOL><DEDENT>if not hasattr(tar_collection, '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>src_collection = list(src_collection)<EOL>tar_collection = list(tar_collection)<EOL>pairwise_values = []<EOL>for src in src_collection:<EOL><INDENT>for tar in tar_collection:<EOL><INDENT>pairwise_values.append(metric(src, tar))<EOL>if symmetric:<EOL><INDENT>pairwise_values.append(metric(tar, src))<EOL><DEDENT><DEDENT><DEDENT>return (<EOL>max(pairwise_values),<EOL>min(pairwise_values),<EOL>mean_func(pairwise_values),<EOL>std(pairwise_values, mean_func, <NUM_LIT:0>),<EOL>)<EOL>", "docstring": "Calculate the pairwise similarity statistics a collection of strings.\n\n    Calculate pairwise similarities among members of two collections,\n    returning the maximum, minimum, mean (according to a supplied function,\n    arithmetic mean, by default), and (population) standard deviation\n    of those similarities.\n\n    Parameters\n    ----------\n    src_collection : list\n        A collection of terms or a string that can be split\n    tar_collection : list\n        A collection of terms or a string that can be split\n    metric : function\n        A similarity metric function\n    mean_func : function\n        A mean function that takes a list of values and returns a float\n    symmetric : bool\n        Set to True if all pairwise similarities should be calculated in both\n        directions\n\n    Returns\n    -------\n    tuple\n        The max, min, mean, and standard deviation of similarities\n\n    Raises\n    ------\n    ValueError\n        mean_func must be a function\n    ValueError\n        metric must be a function\n    ValueError\n        src_collection is neither a string nor iterable\n    ValueError\n        tar_collection is neither a string nor iterable\n\n    Example\n    -------\n    >>> tuple(round(_, 12) for _ in pairwise_similarity_statistics(\n    ... ['Christopher', 'Kristof', 'Christobal'], ['Niall', 'Neal', 'Neil']))\n    (0.2, 0.0, 0.118614718615, 0.075070477184)", "id": "f6665:m1"}
{"signature": "def count_fingerprint(word, n_bits=<NUM_LIT:16>, most_common=MOST_COMMON_LETTERS_CG):", "body": "return Count().fingerprint(word, n_bits, most_common)<EOL>", "docstring": "Return the count fingerprint.\n\n    This is a wrapper for :py:meth:`Count.fingerprint`.\n\n    Parameters\n    ----------\n    word : str\n        The word to fingerprint\n    n_bits : int\n        Number of bits in the fingerprint returned\n    most_common : list\n        The most common tokens in the target language, ordered by frequency\n\n    Returns\n    -------\n    int\n        The count fingerprint\n\n    Examples\n    --------\n    >>> bin(count_fingerprint('hat'))\n    '0b1010000000001'\n    >>> bin(count_fingerprint('niall'))\n    '0b10001010000'\n    >>> bin(count_fingerprint('colin'))\n    '0b101010000'\n    >>> bin(count_fingerprint('atcg'))\n    '0b1010000000000'\n    >>> bin(count_fingerprint('entreatment'))\n    '0b1111010000100000'", "id": "f6667:m0"}
{"signature": "def str_fingerprint(phrase, joiner='<STR_LIT:U+0020>'):", "body": "return String().fingerprint(phrase, joiner)<EOL>", "docstring": "Return string fingerprint.\n\n    This is a wrapper for :py:meth:`String.fingerprint`.\n\n    Parameters\n    ----------\n    phrase : str\n        The string from which to calculate the fingerprint\n    joiner : str\n        The string that will be placed between each word\n\n    Returns\n    -------\n    str\n        The fingerprint of the phrase\n\n    Example\n    -------\n    >>> str_fingerprint('The quick brown fox jumped over the lazy dog.')\n    'brown dog fox jumped lazy over quick the'", "id": "f6669:m0"}
{"signature": "def occurrence_halved_fingerprint(<EOL>word, n_bits=<NUM_LIT:16>, most_common=MOST_COMMON_LETTERS_CG<EOL>):", "body": "return OccurrenceHalved().fingerprint(word, n_bits, most_common)<EOL>", "docstring": "Return the occurrence halved fingerprint.\n\n    This is a wrapper for :py:meth:`OccurrenceHalved.fingerprint`.\n\n    Parameters\n    ----------\n    word : str\n        The word to fingerprint\n    n_bits : int\n        Number of bits in the fingerprint returned\n    most_common : list\n        The most common tokens in the target language, ordered by frequency\n\n    Returns\n    -------\n    int\n        The occurrence halved fingerprint\n\n    Examples\n    --------\n    >>> bin(occurrence_halved_fingerprint('hat'))\n    '0b1010000000010'\n    >>> bin(occurrence_halved_fingerprint('niall'))\n    '0b10010100000'\n    >>> bin(occurrence_halved_fingerprint('colin'))\n    '0b1001010000'\n    >>> bin(occurrence_halved_fingerprint('atcg'))\n    '0b10100000000000'\n    >>> bin(occurrence_halved_fingerprint('entreatment'))\n    '0b1111010000110000'", "id": "f6671:m0"}
{"signature": "def fingerprint(self, word, n_bits=<NUM_LIT:16>, most_common=MOST_COMMON_LETTERS_CG):", "body": "if n_bits % <NUM_LIT:2>:<EOL><INDENT>n_bits += <NUM_LIT:1><EOL><DEDENT>w_len = len(word) // <NUM_LIT:2><EOL>w_1 = set(word[:w_len])<EOL>w_2 = set(word[w_len:])<EOL>fingerprint = <NUM_LIT:0><EOL>for letter in most_common:<EOL><INDENT>if n_bits:<EOL><INDENT>fingerprint <<= <NUM_LIT:1><EOL>if letter in w_1:<EOL><INDENT>fingerprint += <NUM_LIT:1><EOL><DEDENT>fingerprint <<= <NUM_LIT:1><EOL>if letter in w_2:<EOL><INDENT>fingerprint += <NUM_LIT:1><EOL><DEDENT>n_bits -= <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if n_bits > <NUM_LIT:0>:<EOL><INDENT>fingerprint <<= n_bits<EOL><DEDENT>return fingerprint<EOL>", "docstring": "Return the occurrence halved fingerprint.\n\n        Based on the occurrence halved fingerprint from :cite:`Cislak:2017`.\n\n        Parameters\n        ----------\n        word : str\n            The word to fingerprint\n        n_bits : int\n            Number of bits in the fingerprint returned\n        most_common : list\n            The most common tokens in the target language, ordered by frequency\n\n        Returns\n        -------\n        int\n            The occurrence halved fingerprint\n\n        Examples\n        --------\n        >>> ohf = OccurrenceHalved()\n        >>> bin(ohf.fingerprint('hat'))\n        '0b1010000000010'\n        >>> bin(ohf.fingerprint('niall'))\n        '0b10010100000'\n        >>> bin(ohf.fingerprint('colin'))\n        '0b1001010000'\n        >>> bin(ohf.fingerprint('atcg'))\n        '0b10100000000000'\n        >>> bin(ohf.fingerprint('entreatment'))\n        '0b1111010000110000'", "id": "f6671:c0:m0"}
{"signature": "def fingerprint(<EOL>self,<EOL>phrase,<EOL>phonetic_algorithm=double_metaphone,<EOL>joiner='<STR_LIT:U+0020>',<EOL>*args,<EOL>**kwargs<EOL>):", "body": "phonetic = '<STR_LIT>'<EOL>for word in phrase.split():<EOL><INDENT>word = phonetic_algorithm(word, *args, **kwargs)<EOL>if not isinstance(word, text_type) and hasattr(word, '<STR_LIT>'):<EOL><INDENT>word = word[<NUM_LIT:0>]<EOL><DEDENT>phonetic += word + joiner<EOL><DEDENT>phonetic = phonetic[: -len(joiner)]<EOL>return super(self.__class__, self).fingerprint(phonetic)<EOL>", "docstring": "Return the phonetic fingerprint of a phrase.\n\n        Parameters\n        ----------\n        phrase : str\n            The string from which to calculate the phonetic fingerprint\n        phonetic_algorithm : function\n            A phonetic algorithm that takes a string and returns a string\n            (presumably a phonetic representation of the original string). By\n            default, this function uses :py:func:`.double_metaphone`.\n        joiner : str\n            The string that will be placed between each word\n        *args\n            Variable length argument list\n        **kwargs\n            Arbitrary keyword arguments\n\n        Returns\n        -------\n        str\n            The phonetic fingerprint of the phrase\n\n        Examples\n        --------\n        >>> pf = Phonetic()\n        >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.')\n        '0 afr fks jmpt kk ls prn tk'\n        >>> from abydos.phonetic import soundex\n        >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.',\n        ... phonetic_algorithm=soundex)\n        'b650 d200 f200 j513 l200 o160 q200 t000'", "id": "f6673:c0:m0"}
{"signature": "def phonetic_fingerprint(<EOL>phrase, phonetic_algorithm=double_metaphone, joiner='<STR_LIT:U+0020>', *args, **kwargs<EOL>):", "body": "return Phonetic().fingerprint(<EOL>phrase, phonetic_algorithm, joiner, *args, **kwargs<EOL>)<EOL>", "docstring": "Return the phonetic fingerprint of a phrase.\n\n    This is a wrapper for :py:meth:`Phonetic.fingerprint`.\n\n    Parameters\n    ----------\n    phrase : str\n        The string from which to calculate the phonetic fingerprint\n    phonetic_algorithm : function\n        A phonetic algorithm that takes a string and returns a string\n        (presumably a phonetic representation of the original string). By\n        default, this function uses :py:func:`.double_metaphone`.\n    joiner : str\n        The string that will be placed between each word\n    *args\n        Variable length argument list\n    **kwargs\n        Arbitrary keyword arguments\n\n    Returns\n    -------\n    str\n        The phonetic fingerprint of the phrase\n\n    Examples\n    --------\n    >>> phonetic_fingerprint('The quick brown fox jumped over the lazy dog.')\n    '0 afr fks jmpt kk ls prn tk'\n    >>> from abydos.phonetic import soundex\n    >>> phonetic_fingerprint('The quick brown fox jumped over the lazy dog.',\n    ... phonetic_algorithm=soundex)\n    'b650 d200 f200 j513 l200 o160 q200 t000'", "id": "f6673:m0"}
{"signature": "def qgram_fingerprint(phrase, qval=<NUM_LIT:2>, start_stop='<STR_LIT>', joiner='<STR_LIT>'):", "body": "return QGram().fingerprint(phrase, qval, start_stop, joiner)<EOL>", "docstring": "Return Q-Gram fingerprint.\n\n    This is a wrapper for :py:meth:`QGram.fingerprint`.\n\n    Parameters\n    ----------\n    phrase : str\n        The string from which to calculate the q-gram fingerprint\n    qval : int\n        The length of each q-gram (by default 2)\n    start_stop : str\n        The start & stop symbol(s) to concatenate on either end of the phrase,\n        as defined in :py:class:`tokenizer.QGrams`\n    joiner : str\n        The string that will be placed between each word\n\n    Returns\n    -------\n    str\n        The q-gram fingerprint of the phrase\n\n    Examples\n    --------\n    >>> qgram_fingerprint('The quick brown fox jumped over the lazy dog.')\n    'azbrckdoedeleqerfoheicjukblampnfogovowoxpequrortthuiumvewnxjydzy'\n    >>> qgram_fingerprint('Christopher')\n    'cherhehrisopphristto'\n    >>> qgram_fingerprint('Niall')\n    'aliallni'", "id": "f6674:m0"}
{"signature": "def fingerprint(self, phrase, qval=<NUM_LIT:2>, start_stop='<STR_LIT>', joiner='<STR_LIT>'):", "body": "phrase = unicode_normalize('<STR_LIT>', text_type(phrase.strip().lower()))<EOL>phrase = '<STR_LIT>'.join(c for c in phrase if c.isalnum())<EOL>phrase = QGrams(phrase, qval, start_stop)<EOL>phrase = joiner.join(sorted(phrase))<EOL>return phrase<EOL>", "docstring": "Return Q-Gram fingerprint.\n\n        Parameters\n        ----------\n        phrase : str\n            The string from which to calculate the q-gram fingerprint\n        qval : int\n            The length of each q-gram (by default 2)\n        start_stop : str\n            The start & stop symbol(s) to concatenate on either end of the\n            phrase, as defined in :py:class:`tokenizer.QGrams`\n        joiner : str\n            The string that will be placed between each word\n\n        Returns\n        -------\n        str\n            The q-gram fingerprint of the phrase\n\n        Examples\n        --------\n        >>> qf = QGram()\n        >>> qf.fingerprint('The quick brown fox jumped over the lazy dog.')\n        'azbrckdoedeleqerfoheicjukblampnfogovowoxpequrortthuiumvewnxjydzy'\n        >>> qf.fingerprint('Christopher')\n        'cherhehrisopphristto'\n        >>> qf.fingerprint('Niall')\n        'aliallni'", "id": "f6674:c0:m0"}
{"signature": "def fingerprint(self, word):", "body": "word = unicode_normalize('<STR_LIT>', text_type(word.upper()))<EOL>word = '<STR_LIT>'.join(c for c in word if c in self._letters)<EOL>start = word[<NUM_LIT:0>:<NUM_LIT:1>]<EOL>consonant_part = '<STR_LIT>'<EOL>vowel_part = '<STR_LIT>'<EOL>for char in word[<NUM_LIT:1>:]:<EOL><INDENT>if char != start:<EOL><INDENT>if char in self._vowels:<EOL><INDENT>if char not in vowel_part:<EOL><INDENT>vowel_part += char<EOL><DEDENT><DEDENT>elif char not in consonant_part:<EOL><INDENT>consonant_part += char<EOL><DEDENT><DEDENT><DEDENT>return start + consonant_part + vowel_part<EOL>", "docstring": "Return the skeleton key.\n\n        Parameters\n        ----------\n        word : str\n            The word to transform into its skeleton key\n\n        Returns\n        -------\n        str\n            The skeleton key\n\n        Examples\n        --------\n        >>> sk = SkeletonKey()\n        >>> sk.fingerprint('The quick brown fox jumped over the lazy dog.')\n        'THQCKBRWNFXJMPDVLZYGEUIOA'\n        >>> sk.fingerprint('Christopher')\n        'CHRSTPIOE'\n        >>> sk.fingerprint('Niall')\n        'NLIA'", "id": "f6675:c0:m0"}
{"signature": "def synoname_toolcode(lname, fname='<STR_LIT>', qual='<STR_LIT>', normalize=<NUM_LIT:0>):", "body": "return SynonameToolcode().fingerprint(lname, fname, qual, normalize)<EOL>", "docstring": "Build the Synoname toolcode.\n\n    This is a wrapper for :py:meth:`SynonameToolcode.fingerprint`.\n\n    Parameters\n    ----------\n    lname : str\n        Last name\n    fname : str\n        First name (can be blank)\n    qual : str\n        Qualifier\n    normalize : int\n        Normalization mode (0, 1, or 2)\n\n    Returns\n    -------\n    tuple\n        The transformed names and the synoname toolcode\n\n    Examples\n    --------\n    >>> synoname_toolcode('hat')\n    ('hat', '', '0000000003$$h')\n    >>> synoname_toolcode('niall')\n    ('niall', '', '0000000005$$n')\n    >>> synoname_toolcode('colin')\n    ('colin', '', '0000000005$$c')\n    >>> synoname_toolcode('atcg')\n    ('atcg', '', '0000000004$$a')\n    >>> synoname_toolcode('entreatment')\n    ('entreatment', '', '0000000011$$e')\n\n    >>> synoname_toolcode('Ste.-Marie', 'Count John II', normalize=2)\n    ('ste.-marie ii', 'count john', '0200491310$015b049a127c$smcji')\n    >>> synoname_toolcode('Michelangelo IV', '', 'Workshop of')\n    ('michelangelo iv', '', '3000550015$055b$mi')", "id": "f6677:m0"}
{"signature": "def _corpus_file(name, corpora_dir=CORPORA):", "body": "return _super_corpus_file(name, corpora_dir)<EOL>", "docstring": "Return the path to a corpus file.\n\n    Parameters\n    ----------\n    name : str\n        Corpus file\n    corpora_dir : str\n        The directory containing the corpora\n\n    Returns\n    -------\n    str\n        The path to the corpus file", "id": "f6711:m0"}
{"signature": "def pydocstyle_color(score):", "body": "<EOL>score_cutoffs = (<NUM_LIT:0>, <NUM_LIT:10>, <NUM_LIT>, <NUM_LIT:50>, <NUM_LIT:100>)<EOL>for i in range(len(score_cutoffs)):<EOL><INDENT>if score <= score_cutoffs[i]:<EOL><INDENT>return BADGE_COLORS[i]<EOL><DEDENT><DEDENT>return BADGE_COLORS[-<NUM_LIT:1>]<EOL>", "docstring": "Return pydocstyle badge color.\n\n    Parameters\n    ----------\n    score : float\n        A pydocstyle score\n\n    Returns\n    -------\n    str\n        Badge color", "id": "f6819:m1"}
{"signature": "def pylint_color(score):", "body": "<EOL>score_cutoffs = (<NUM_LIT:10>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:5>)<EOL>for i in range(len(score_cutoffs)):<EOL><INDENT>if score >= score_cutoffs[i]:<EOL><INDENT>return BADGE_COLORS[i]<EOL><DEDENT><DEDENT>return BADGE_COLORS[-<NUM_LIT:1>]<EOL>", "docstring": "Return Pylint badge color.\n\n    Parameters\n    ----------\n    score : float\n        A Pylint score\n\n    Returns\n    -------\n    str\n        Badge color", "id": "f6819:m0"}
{"signature": "def main(argv):", "body": "first_col = <NUM_LIT:3><EOL>last_col = -<NUM_LIT:1><EOL>def print_usage():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>sys.stdout.write(<EOL>'<STR_LIT>' + '<STR_LIT>'<EOL>)<EOL>sys.exit(<NUM_LIT:2>)<EOL><DEDENT>def binarize(num):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if num == '<STR_LIT:0>':  <EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif num == '<STR_LIT>':  <EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif num == '<STR_LIT:1>':  <EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif num == '<STR_LIT:2>':  <EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT><DEDENT>def init_termdicts():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>ifile = codecs.open('<STR_LIT>', '<STR_LIT:r>', '<STR_LIT:utf-8>')<EOL>feature_mask = {}<EOL>keyline = ifile.readline().strip().split('<STR_LIT:U+002C>')[first_col:last_col]<EOL>mag = len(keyline)<EOL>for i in range(len(keyline)):<EOL><INDENT>features = '<STR_LIT>' + ('<STR_LIT>' * i) + '<STR_LIT>' + ('<STR_LIT>' * (mag - i - <NUM_LIT:1>))<EOL>feature_mask[keyline[i]] = int(features, <NUM_LIT:2>)<EOL><DEDENT>termdict = {}<EOL>for line in ifile:<EOL><INDENT>line = line.strip().rstrip('<STR_LIT:U+002C>')<EOL>if '<STR_LIT:#>' in line:<EOL><INDENT>line = line[: line.find('<STR_LIT:#>')].strip()<EOL><DEDENT>if line:<EOL><INDENT>line = line.split('<STR_LIT:U+002C>')<EOL>term = line[last_col]<EOL>features = '<STR_LIT>' + '<STR_LIT>'.join(<EOL>[binarize(val) for val in line[first_col:last_col]]<EOL>)<EOL>termdict[term] = int(features, <NUM_LIT:2>)<EOL><DEDENT><DEDENT>return termdict, feature_mask<EOL><DEDENT>def check_terms(sym, features, name, termdict):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if '<STR_LIT:#>' in name:<EOL><INDENT>name = name[: name.find('<STR_LIT:#>')].strip()<EOL><DEDENT>for term in name.split():<EOL><INDENT>if term in termdict:<EOL><INDENT>if termdict[term] & features != termdict[term]:<EOL><INDENT>sys.stdout.write(<EOL>'<STR_LIT>'<EOL>+ term<EOL>+ '<STR_LIT>'<EOL>+ sym<EOL>+ '<STR_LIT:\\n>'<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sys.stdout.write(<EOL>'<STR_LIT>'<EOL>+ term<EOL>+ '<STR_LIT>'<EOL>+ name<EOL>+ '<STR_LIT>'<EOL>+ sym<EOL>+ '<STR_LIT:\\n>'<EOL>)<EOL><DEDENT><DEDENT><DEDENT>def check_entailments(sym, features, feature_mask):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>entailments = {<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>',),<EOL>'<STR_LIT>': ('<STR_LIT>',),<EOL>}<EOL>for feature in entailments:<EOL><INDENT>fname = feature[<NUM_LIT:1>:]<EOL>if feature[<NUM_LIT:0>] == '<STR_LIT:+>':<EOL><INDENT>fm = (feature_mask[fname] >> <NUM_LIT:1>) & feature_mask[fname]<EOL><DEDENT>else:<EOL><INDENT>fm = (feature_mask[fname] << <NUM_LIT:1>) & feature_mask[fname]<EOL><DEDENT>if (features & fm) == fm:<EOL><INDENT>for ent in entailments[feature]:<EOL><INDENT>ename = ent[<NUM_LIT:1>:]<EOL>if ent[<NUM_LIT:0>] == '<STR_LIT:+>':<EOL><INDENT>efm = (feature_mask[ename] >> <NUM_LIT:1>) & feature_mask[ename]<EOL><DEDENT>elif ent[<NUM_LIT:0>] == '<STR_LIT:->':<EOL><INDENT>efm = (feature_mask[ename] << <NUM_LIT:1>) & feature_mask[ename]<EOL><DEDENT>elif ent[<NUM_LIT:0>] == '<STR_LIT:0>':<EOL><INDENT>efm = <NUM_LIT:0><EOL><DEDENT>elif ent[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>efm = feature_mask[ename]<EOL><DEDENT>if ent[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>if (features & efm) == <NUM_LIT:0>:<EOL><INDENT>sys.stdout.write(<EOL>'<STR_LIT>'<EOL>+ sym<EOL>+ '<STR_LIT>'<EOL>+ fname<EOL>+ '<STR_LIT>'<EOL>+ ename<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if (features & efm) != efm:<EOL><INDENT>sys.stdout.write(<EOL>'<STR_LIT>'<EOL>+ sym<EOL>+ '<STR_LIT>'<EOL>+ fname<EOL>+ '<STR_LIT>'<EOL>+ ename<EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>checkdict = {}  <EOL>checkset_s = set()  <EOL>checkset_f = set()  <EOL>termdict, feature_mask = init_termdicts()<EOL>ifile = '<STR_LIT>'<EOL>ofile = '<STR_LIT>'<EOL>try:<EOL><INDENT>opts = getopt.getopt(argv, '<STR_LIT>', ['<STR_LIT>', '<STR_LIT>'])[<NUM_LIT:0>]<EOL><DEDENT>except getopt.GetoptError:<EOL><INDENT>print_usage()<EOL><DEDENT>for opt, arg in opts:<EOL><INDENT>if opt == '<STR_LIT>':<EOL><INDENT>print_usage()<EOL><DEDENT>elif opt in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>ifile = codecs.open(arg, '<STR_LIT:r>', '<STR_LIT:utf-8>')<EOL><DEDENT>elif opt in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>ofile = codecs.open(arg, '<STR_LIT:w>', '<STR_LIT:utf-8>')<EOL><DEDENT><DEDENT>if not ifile:<EOL><INDENT>print_usage()<EOL><DEDENT>oline = '<STR_LIT>'<EOL>if not ofile:<EOL><INDENT>ofile = sys.stdout<EOL><DEDENT>ofile.write(oline + '<STR_LIT:\\n>')<EOL>keyline = ifile.readline().strip().split('<STR_LIT:U+002C>')[first_col:last_col]<EOL>for line in ifile:<EOL><INDENT>line = line.strip().rstrip('<STR_LIT:U+002C>')<EOL>if line.startswith('<STR_LIT>'):<EOL><INDENT>break<EOL><DEDENT>line = unicodedata.normalize('<STR_LIT>', line)<EOL>if not line or line.startswith('<STR_LIT:#>'):<EOL><INDENT>oline = '<STR_LIT:U+0020>' + line<EOL><DEDENT>else:<EOL><INDENT>line = line.strip().split('<STR_LIT:U+002C>')<EOL>if '<STR_LIT:#>' in line:<EOL><INDENT>line = line[: line.find('<STR_LIT:#>')]<EOL><DEDENT>symbol = line[<NUM_LIT:0>]<EOL>variant = int(line[<NUM_LIT:1>])<EOL>segmental = bool(line[<NUM_LIT:2>])<EOL>features = '<STR_LIT>' + '<STR_LIT>'.join(<EOL>[binarize(val) for val in line[first_col:last_col]]<EOL>)<EOL>name = line[-<NUM_LIT:1>].strip()<EOL>if not segmental:<EOL><INDENT>features = '<STR_LIT:->' + features<EOL><DEDENT>featint = int(features, <NUM_LIT:2>)<EOL>check_terms(symbol, featint, name, termdict)<EOL>check_entailments(symbol, featint, feature_mask)<EOL>if symbol in checkset_s:<EOL><INDENT>sys.stdout.write(<EOL>'<STR_LIT>' + symbol + '<STR_LIT>'<EOL>)<EOL><DEDENT>else:<EOL><INDENT>checkset_s.add(symbol)<EOL><DEDENT>if variant < <NUM_LIT:2>:<EOL><INDENT>if featint in checkset_f:<EOL><INDENT>sys.stdout.write(<EOL>'<STR_LIT>'<EOL>+ str(featint)<EOL>+ '<STR_LIT>'<EOL>+ '<STR_LIT>'<EOL>+ symbol<EOL>+ '<STR_LIT>'<EOL>+ checkdict[featint]<EOL>)<EOL><DEDENT>else:<EOL><INDENT>checkdict[featint] = symbol<EOL>checkset_f.add(featint)<EOL><DEDENT><DEDENT>if variant < <NUM_LIT:5>:<EOL><INDENT>oline = '<STR_LIT>'.format(<EOL>symbol, featint<EOL>)<EOL><DEDENT>else:<EOL><INDENT>oline = '<STR_LIT>'<EOL><DEDENT><DEDENT>if oline:<EOL><INDENT>ofile.write(oline + '<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>ofile.write('<STR_LIT>')<EOL>mag = len(keyline)<EOL>for i in range(len(keyline)):<EOL><INDENT>features = int('<STR_LIT>' + ('<STR_LIT>' * i) + '<STR_LIT>' + ('<STR_LIT>' * (mag - i - <NUM_LIT:1>)), <NUM_LIT:2>)<EOL>oline = '<STR_LIT>'.format(keyline[i], features)<EOL>ofile.write(oline + '<STR_LIT:\\n>')<EOL><DEDENT>ofile.write('<STR_LIT>')<EOL>", "docstring": "Read input file and write to output.\n\n    Parameters\n    ----------\n    argv : list\n        Arguments to the script", "id": "f6821:m0"}
{"signature": "def get_templates(t):", "body": "return [_get_template(s) for s in utilities.asiterable(t)]<EOL>", "docstring": "Find template file(s) *t* and return their real paths.\n\n    *t* can be a single string or a list of strings. A string should\n    be one of\n\n    1. a relative or absolute path,\n    2. a file in one of the directories listed in :data:`gromacs.config.path`,\n    3. a filename in the package template directory (defined in the template dictionary\n       :data:`gromacs.config.templates`) or\n    4. a key into :data:`~gromacs.config.templates`.\n\n    The first match (in this order) is returned for each input argument.\n\n    :Arguments: *t* : template file or key (string or list of strings)\n    :Returns:   list of os.path.realpath(*t*)\n    :Raises:    :exc:`ValueError` if no file can be located.", "id": "f6847:m3"}
{"signature": "def set_gmxrc_environment(gmxrc):", "body": "<EOL>envvars = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']<EOL>cmdargs = ['<STR_LIT>', '<STR_LIT:-c>', \"<STR_LIT>\".format(gmxrc,<EOL>'<STR_LIT:U+0020>'.join(['<STR_LIT>'.format(v) for v in envvars]))]<EOL>if not gmxrc:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>try:<EOL><INDENT>out = subprocess.check_output(cmdargs)<EOL>out = out.strip().split()<EOL>for key, value in zip(envvars, out):<EOL><INDENT>value = str(value.decode('<STR_LIT:ascii>').replace('<STR_LIT>', '<STR_LIT>'))  <EOL>os.environ[key] = value<EOL>logger.debug(\"<STR_LIT>\", key, value)<EOL><DEDENT><DEDENT>except (subprocess.CalledProcessError, OSError):<EOL><INDENT>logger.warning(\"<STR_LIT>\"<EOL>\"<STR_LIT>\", gmxrc)<EOL><DEDENT>", "docstring": "Set the environment from ``GMXRC`` provided in *gmxrc*.\n\n    Runs ``GMXRC`` in a subprocess and puts environment variables loaded by it\n    into this Python environment.\n\n    If *gmxrc* evaluates to ``False`` then nothing is done. If errors occur\n    then only a warning will be logged. Thus, it should be safe to just call\n    this function.", "id": "f6847:m8"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "self.filename = kwargs.pop('<STR_LIT:filename>', CONFIGNAME)<EOL>super(GMXConfigParser, self).__init__(*args, **kwargs)<EOL>self.set('<STR_LIT>', '<STR_LIT>',<EOL>os.path.join(\"<STR_LIT>\", os.path.basename(defaults['<STR_LIT>'])))<EOL>self.set('<STR_LIT>', '<STR_LIT>',<EOL>os.path.join(\"<STR_LIT>\", os.path.basename(defaults['<STR_LIT>'])))<EOL>self.add_section('<STR_LIT>')<EOL>self.set(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>self.set(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>self.set(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>self.set(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>self.set(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>self.set(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:yes>\")<EOL>self.add_section('<STR_LIT>')<EOL>self.set('<STR_LIT>', '<STR_LIT>', defaults['<STR_LIT>'])<EOL>self.set('<STR_LIT>', '<STR_LIT>', defaults['<STR_LIT>'])<EOL>self.set('<STR_LIT>', '<STR_LIT>', defaults['<STR_LIT>'])<EOL>default_cfg = get_template(self.cfg_template)<EOL>self.read_file(open(default_cfg))<EOL>self.read([self.filename])<EOL>", "docstring": "Reads and parses the configuration file.\n\n        Default values are loaded and then replaced with the values from\n        ``~/.gromacswrapper.cfg`` if that file exists. The global\n        configuration instance :data:`gromacswrapper.config.cfg` is updated\n        as are a number of global variables such as :data:`configdir`,\n        :data:`qscriptdir`, :data:`templatesdir`, :data:`logfilename`, ...\n\n        Normally, the configuration is only loaded when the :mod:`gromacswrapper`\n        package is imported but a re-reading of the configuration can be forced\n        anytime by calling :func:`get_configuration`.", "id": "f6847:c0:m0"}
{"signature": "def get_configuration(filename=CONFIGNAME):", "body": "global cfg, configuration    <EOL>cfg = GMXConfigParser(filename=filename)   <EOL>globals().update(cfg.configuration)        <EOL>configuration = cfg.configuration          <EOL>return cfg<EOL>", "docstring": "Reads and parses the configuration file.\n\n    Default values are loaded and then replaced with the values from\n    ``~/.gromacswrapper.cfg`` if that file exists. The global\n    configuration instance :data:`gromacswrapper.config.cfg` is updated\n    as are a number of global variables such as :data:`configdir`,\n    :data:`qscriptdir`, :data:`templatesdir`, :data:`logfilename`, ...\n\n    Normally, the configuration is only loaded when the :mod:`gromacs`\n    package is imported but a re-reading of the configuration can be forced\n    anytime by calling :func:`get_configuration`.\n\n    :Returns: a dict with all updated global configuration variables", "id": "f6847:m5"}
{"signature": "def get_template(t):", "body": "templates = [_get_template(s) for s in utilities.asiterable(t)]<EOL>if len(templates) == <NUM_LIT:1>:<EOL><INDENT>return templates[<NUM_LIT:0>]<EOL><DEDENT>return templates<EOL>", "docstring": "Find template file *t* and return its real path.\n\n    *t* can be a single string or a list of strings. A string\n    should be one of\n\n    1. a relative or absolute path,\n    2. a file in one of the directories listed in :data:`gromacs.config.path`,\n    3. a filename in the package template directory (defined in the template dictionary\n       :data:`gromacs.config.templates`) or\n    4. a key into :data:`~gromacs.config.templates`.\n\n    The first match (in this order) is returned. If the argument is a\n    single string then a single string is returned, otherwise a list\n    of strings.\n\n    :Arguments: *t* : template file or key (string or list of strings)\n    :Returns:   os.path.realpath(*t*) (or a list thereof)\n    :Raises:    :exc:`ValueError` if no file can be located.", "id": "f6847:m2"}
{"signature": "def generate_submit_scripts(templates, prefix=None, deffnm='<STR_LIT>', jobname='<STR_LIT>', budget=None,<EOL>mdrun_opts=None, walltime=<NUM_LIT:1.0>, jobarray_string=None, startdir=None,<EOL>npme=None, **kwargs):", "body": "if not jobname[<NUM_LIT:0>].isalpha():<EOL><INDENT>jobname = '<STR_LIT>'+jobname<EOL>wmsg = \"<STR_LIT>\".format(jobname)<EOL>logger.warn(wmsg)<EOL>warnings.warn(wmsg, category=AutoCorrectionWarning)<EOL><DEDENT>if prefix is None:<EOL><INDENT>prefix = \"<STR_LIT>\"<EOL><DEDENT>if mdrun_opts is not None:<EOL><INDENT>mdrun_opts = '<STR_LIT:\">'+str(mdrun_opts)+'<STR_LIT:\">'  <EOL><DEDENT>dirname = kwargs.pop('<STR_LIT>', os.path.curdir)<EOL>wt = Timedelta(hours=walltime)<EOL>walltime = wt.strftime(\"<STR_LIT>\")<EOL>wall_hours = wt.ashours<EOL>def write_script(template):<EOL><INDENT>submitscript = os.path.join(dirname, prefix + os.path.basename(template))<EOL>logger.info(\"<STR_LIT>\".format(**vars()))<EOL>qsystem = detect_queuing_system(template)<EOL>if qsystem is not None and (qsystem.name == '<STR_LIT>'):<EOL><INDENT>cbook.edit_txt(template,<EOL>[('<STR_LIT>','<STR_LIT>', deffnm),<EOL>('<STR_LIT>', '<STR_LIT>', jobname),<EOL>('<STR_LIT>', '<STR_LIT>', budget),<EOL>('<STR_LIT>', '<STR_LIT>', walltime),<EOL>('<STR_LIT>', '<STR_LIT>', wall_hours),<EOL>('<STR_LIT>', '<STR_LIT>', startdir),<EOL>('<STR_LIT>', '<STR_LIT>', npme),<EOL>('<STR_LIT>', '<STR_LIT>', mdrun_opts),  <EOL>('<STR_LIT>', '<STR_LIT>', jobarray_string),<EOL>],<EOL>newname=submitscript)<EOL>ext = os.path.splitext(submitscript)[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>cbook.edit_txt(template,<EOL>[('<STR_LIT>','<STR_LIT>', deffnm),<EOL>('<STR_LIT>', '<STR_LIT>', jobname),<EOL>('<STR_LIT>', '<STR_LIT>', budget),<EOL>('<STR_LIT>', '<STR_LIT>', walltime),<EOL>('<STR_LIT>', '<STR_LIT>', wall_hours),<EOL>('<STR_LIT>', '<STR_LIT>', startdir),<EOL>('<STR_LIT>', '<STR_LIT>', npme),<EOL>('<STR_LIT>', '<STR_LIT>', mdrun_opts),  <EOL>('<STR_LIT>', '<STR_LIT>', jobarray_string),<EOL>],<EOL>newname=submitscript)<EOL>ext = os.path.splitext(submitscript)[<NUM_LIT:1>]<EOL><DEDENT>if ext in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>os.chmod(submitscript, <NUM_LIT>)<EOL><DEDENT>return submitscript<EOL><DEDENT>return [write_script(template) for template in config.get_templates(templates)]<EOL>", "docstring": "Write scripts for queuing systems.\n\n\n    This sets up queuing system run scripts with a simple search and replace in\n    templates. See :func:`gromacs.cbook.edit_txt` for details. Shell scripts\n    are made executable.\n\n    :Arguments:\n      *templates*\n          Template file or list of template files. The \"files\" can also be names\n          or symbolic names for templates in the templates directory. See\n          :mod:`gromacs.config` for details and rules for writing templates.\n      *prefix*\n          Prefix for the final run script filename; by default the filename will be\n          the same as the template. [None]\n      *dirname*\n          Directory in which to place the submit scripts. [.]\n      *deffnm*\n          Default filename prefix for :program:`mdrun` ``-deffnm`` [md]\n      *jobname*\n          Name of the job in the queuing system. [MD]\n      *budget*\n          Which budget to book the runtime on [None]\n      *startdir*\n          Explicit path on the remote system (for run scripts that need to `cd`\n          into this directory at the beginning of execution) [None]\n      *mdrun_opts*\n          String of additional options for :program:`mdrun`.\n      *walltime*\n          Maximum runtime of the job in hours. [1]\n      *npme*\n          number of PME nodes\n      *jobarray_string*\n          Multi-line string that is spliced in for job array functionality\n          (see :func:`gromacs.qsub.generate_submit_array`; do not use manually)\n      *kwargs*\n          all other kwargs are ignored\n\n    :Returns: list of generated run scripts", "id": "f6848:m1"}
{"signature": "def has_arrays(self):", "body": "return self.array_variable is not None<EOL>", "docstring": "True if known how to do job arrays.", "id": "f6848:c0:m2"}
{"signature": "def isMine(self, scriptname):", "body": "suffix = os.path.splitext(scriptname)[<NUM_LIT:1>].lower()<EOL>if suffix.startswith('<STR_LIT:.>'):<EOL><INDENT>suffix = suffix[<NUM_LIT:1>:]<EOL><DEDENT>return self.suffix == suffix<EOL>", "docstring": "Primitive queuing system detection; only looks at suffix at the moment.", "id": "f6848:c0:m5"}
{"signature": "def array(self, directories):", "body": "if not self.has_arrays():<EOL><INDENT>raise NotImplementedError('<STR_LIT>'<EOL>'<STR_LIT>' % vars(self))<EOL><DEDENT>hrule = '<STR_LIT:#>'+<NUM_LIT>*'<STR_LIT:->'<EOL>lines = [<EOL>'<STR_LIT>',<EOL>hrule,<EOL>'<STR_LIT>',<EOL>self.array_flag(directories),<EOL>hrule,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']<EOL>for i,dirname in enumerate(asiterable(directories)):<EOL><INDENT>idx = i+<NUM_LIT:1>   <EOL>lines.append('<STR_LIT>'.format(**vars()))<EOL><DEDENT>lines.extend([<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(**vars(self)),<EOL>'<STR_LIT>',<EOL>hrule,<EOL>'<STR_LIT>'<EOL>])<EOL>return \"<STR_LIT:\\n>\".join(lines)<EOL>", "docstring": "Return multiline string for simple array jobs over *directories*.\n\n        .. Warning:: The string is in ``bash`` and hence the template must also\n                     be ``bash`` (and *not* ``csh`` or ``sh``).", "id": "f6848:c0:m4"}
{"signature": "def detect_queuing_system(scriptfile):", "body": "for qs in queuing_systems:<EOL><INDENT>if qs.isMine(scriptfile):<EOL><INDENT>return qs<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Return the queuing system for which *scriptfile* was written.", "id": "f6848:m0"}
{"signature": "def array_flag(self, directories):", "body": "return self.flag(self.array_option % (<NUM_LIT:1>,len(directories)))<EOL>", "docstring": "Return string to embed the array launching option in the script.", "id": "f6848:c0:m3"}
{"signature": "def tolist(self):", "body": "return self[:]<EOL>", "docstring": "Return contents as a simple list.", "id": "f6849:c0:m2"}
{"signature": "def _canonicalize(self, filename):", "body": "path, ext = os.path.splitext(filename)<EOL>if not ext:<EOL><INDENT>ext = \"<STR_LIT>\"<EOL><DEDENT>return path + ext<EOL>", "docstring": "Use .collection as extension unless provided", "id": "f6849:c0:m3"}
{"signature": "def __getnewargs__(self, *args, **kwargs):", "body": "return (self.tolist(),)<EOL>", "docstring": "Provide proper initialization to make pickling with protocol 2 work", "id": "f6849:c0:m4"}
{"signature": "def load(self, filename, append=False):", "body": "tmp = cPickle.load(open(self._canonicalize(filename), '<STR_LIT:rb>'))<EOL>if append:<EOL><INDENT>self.extend(tmp)<EOL><DEDENT>else:<EOL><INDENT>self[:] = tmp[:]<EOL><DEDENT>del tmp<EOL>", "docstring": "Load collection from pickled file *filename*.\n\n        *append* determines if the saved collection is added to the current one\n        or if it replaces the current content.\n\n        If no extension is provided, \".collection\" is appended.", "id": "f6849:c0:m1"}
{"signature": "def clear_handlers(logger):", "body": "for h in logger.handlers:<EOL><INDENT>logger.removeHandler(h)<EOL><DEDENT>", "docstring": "clean out handlers in the library top level logger \n\n    (only important for reload/debug cycles...)", "id": "f6850:m1"}
{"signature": "def create(logger_name, logfile='<STR_LIT>'):", "body": "logger = logging.getLogger(logger_name)<EOL>logger.setLevel(logging.DEBUG)<EOL>logfile = logging.FileHandler(logfile)<EOL>logfile_formatter = logging.Formatter('<STR_LIT>')<EOL>logfile.setFormatter(logfile_formatter)<EOL>logger.addHandler(logfile)<EOL>console = logging.StreamHandler()<EOL>console.setLevel(logging.INFO)<EOL>formatter = logging.Formatter('<STR_LIT>')<EOL>console.setFormatter(formatter)<EOL>logger.addHandler(console)<EOL>return logger<EOL>", "docstring": "Create a top level logger.\n\n    - The file logger logs everything (including DEBUG).\n    - The console logger only logs INFO and above.\n\n    Logging to a file and the console.\n\n    See http://docs.python.org/library/logging.html?#logging-to-multiple-destinations\n\n    The top level logger of the library is named 'gromacs'.  Note that\n    we are configuring this logger with console output. If the root\n    logger also does this then we will get two output lines to the\n    console. We'll live with this because this is a simple\n    convenience library...", "id": "f6850:m0"}
{"signature": "def render_pep440(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"],<EOL>pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n    Exceptions:\n    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f6851:m9"}
{"signature": "def get_config():", "body": "<EOL>cfg = VersioneerConfig()<EOL>cfg.VCS = \"<STR_LIT>\"<EOL>cfg.style = \"<STR_LIT>\"<EOL>cfg.tag_prefix = \"<STR_LIT>\"<EOL>cfg.parentdir_prefix = \"<STR_LIT>\"<EOL>cfg.versionfile_source = \"<STR_LIT>\"<EOL>cfg.verbose = False<EOL>return cfg<EOL>", "docstring": "Create, populate and return the VersioneerConfig() object.", "id": "f6851:m1"}
{"signature": "def render_git_describe(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n    Like 'git describe --tags --dirty --always'.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f6851:m13"}
{"signature": "def render(pieces, style):", "body": "if pieces[\"<STR_LIT:error>\"]:<EOL><INDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": pieces.get(\"<STR_LIT>\"),<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": pieces[\"<STR_LIT:error>\"],<EOL>\"<STR_LIT:date>\": None}<EOL><DEDENT>if not style or style == \"<STR_LIT:default>\":<EOL><INDENT>style = \"<STR_LIT>\"  <EOL><DEDENT>if style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_pre(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_post(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_old(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe_long(pieces)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % style)<EOL><DEDENT>return {\"<STR_LIT:version>\": rendered, \"<STR_LIT>\": pieces[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": pieces[\"<STR_LIT>\"], \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": pieces.get(\"<STR_LIT:date>\")}<EOL>", "docstring": "Render the given version pieces into the requested style.", "id": "f6851:m15"}
{"signature": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,<EOL>env=None):", "body": "assert isinstance(commands, list)<EOL>p = None<EOL>for c in commands:<EOL><INDENT>try:<EOL><INDENT>dispcmd = str([c] + args)<EOL>p = subprocess.Popen([c] + args, cwd=cwd, env=env,<EOL>stdout=subprocess.PIPE,<EOL>stderr=(subprocess.PIPE if hide_stderr<EOL>else None))<EOL>break<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>e = sys.exc_info()[<NUM_LIT:1>]<EOL>if e.errno == errno.ENOENT:<EOL><INDENT>continue<EOL><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % dispcmd)<EOL>print(e)<EOL><DEDENT>return None, None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % (commands,))<EOL><DEDENT>return None, None<EOL><DEDENT>stdout = p.communicate()[<NUM_LIT:0>].strip()<EOL>if sys.version_info[<NUM_LIT:0>] >= <NUM_LIT:3>:<EOL><INDENT>stdout = stdout.decode()<EOL><DEDENT>if p.returncode != <NUM_LIT:0>:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % dispcmd)<EOL>print(\"<STR_LIT>\" % stdout)<EOL><DEDENT>return None, p.returncode<EOL><DEDENT>return stdout, p.returncode<EOL>", "docstring": "Call the given command(s).", "id": "f6851:m3"}
{"signature": "def render_pep440_old(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "TAG[.postDISTANCE[.dev0]] .\n\n    The \".dev0\" means dirty.\n\n    Eexceptions:\n    1: no tags. 0.postDISTANCE[.dev0]", "id": "f6851:m12"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>date = keywords.get(\"<STR_LIT:date>\")<EOL>if date is not None:<EOL><INDENT>date = date.strip().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:T>\", <NUM_LIT:1>).replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\", <NUM_LIT:1>)<EOL><DEDENT>refnames = keywords[\"<STR_LIT>\"].strip()<EOL>if refnames.startswith(\"<STR_LIT>\"):<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>refs = set([r.strip() for r in refnames.strip(\"<STR_LIT>\").split(\"<STR_LIT:U+002C>\")])<EOL>TAG = \"<STR_LIT>\"<EOL>tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])<EOL>if not tags:<EOL><INDENT>tags = set([r for r in refs if re.search(r'<STR_LIT>', r)])<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(refs - tags))<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(sorted(tags)))<EOL><DEDENT>for ref in sorted(tags):<EOL><INDENT>if ref.startswith(tag_prefix):<EOL><INDENT>r = ref[len(tag_prefix):]<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % r)<EOL><DEDENT>return {\"<STR_LIT:version>\": r,<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": date}<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": \"<STR_LIT>\", \"<STR_LIT:date>\": None}<EOL>", "docstring": "Get version information from git keywords.", "id": "f6851:m6"}
{"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n    The \".dev0\" means dirty. Note that .dev0 sorts backwards\n    (a dirty tree will appear \"older\" than the corresponding clean one),\n    but you shouldn't be releasing software with -dirty anyways.\n\n    Exceptions:\n    1: no tags. 0.postDISTANCE[.dev0]", "id": "f6851:m11"}
{"signature": "def register(self,flag):", "body": "super(Flags,self).__setitem__(flag.name,flag)<EOL>", "docstring": "Register a new :class:`Flag` instance with the Flags registry.", "id": "f6852:c0:m3"}
{"signature": "def enable_gromacs_warnings(categories=None):", "body": "filter_gromacs_warnings('<STR_LIT>', categories=categories)<EOL>", "docstring": "Enable (\"always\") specified warnings from the gromacs package.\n\n    *categories* must be a list of warning classes or strings.\n    ``None`` selects the defaults, :data:`gromacs._less_important_warnings`.", "id": "f6853:m4"}
{"signature": "def start_logging(logfile=\"<STR_LIT>\"):", "body": "from . import log<EOL>log.create(\"<STR_LIT>\", logfile=logfile)<EOL>logging.getLogger(\"<STR_LIT>\").info(\"<STR_LIT>\",<EOL>__version__, logfile)<EOL>", "docstring": "Start logging of messages to file and console.\n\n    The default logfile is named ``gromacs.log`` and messages are\n    logged with the tag *gromacs*.", "id": "f6853:m0"}
{"signature": "def col(self, c):", "body": "m = self.COLOUR.search(c)<EOL>if not m:<EOL><INDENT>self.logger.fatal(\"<STR_LIT>\", c)<EOL>raise ParseError(\"<STR_LIT>\".format(c))<EOL><DEDENT>value = m.group('<STR_LIT:value>')<EOL>color = m.group('<STR_LIT>')<EOL>self.logger.debug(\"<STR_LIT>\", c.strip(), color, value)<EOL>return color, value<EOL>", "docstring": "Parse colour specification", "id": "f6854:c0:m7"}
{"signature": "def read(self, filename=None):", "body": "self._init_filename(filename)<EOL>self.parse()<EOL>", "docstring": "Read and parse mdp file *filename*.", "id": "f6854:c0:m3"}
{"signature": "def renumber_atoms(self):", "body": "if self.atoms:<EOL><INDENT>self._anumb_to_atom = {}<EOL>for i,atom in enumerate(self.atoms):<EOL><INDENT>atom.number = i+<NUM_LIT:1>   <EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.logger(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Reset the molecule's atoms :attr:`number` to be 1-indexed", "id": "f6855:c1:m2"}
{"signature": "def anumb_to_atom(self, anumb):", "body": "assert isinstance(anumb, int), \"<STR_LIT>\"<EOL>if not self._anumb_to_atom:   <EOL><INDENT>if self.atoms:<EOL><INDENT>for atom in self.atoms:<EOL><INDENT>self._anumb_to_atom[atom.number] = atom<EOL><DEDENT>return self._anumb_to_atom[anumb]<EOL><DEDENT>else:<EOL><INDENT>self.logger(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if anumb in self._anumb_to_atom:<EOL><INDENT>return self._anumb_to_atom[anumb]<EOL><DEDENT>else:<EOL><INDENT>self.logger(\"<STR_LIT>\".format(anumb))<EOL>return False<EOL><DEDENT><DEDENT>", "docstring": "Returns the atom object corresponding to an atom number", "id": "f6855:c1:m1"}
{"signature": "def write(self, filename):", "body": "SystemToGroTop(self, filename)<EOL>", "docstring": "Write the TOP object to a file", "id": "f6856:c0:m1"}
{"signature": "def assemble_topology(self):", "body": "self.logger.debug(\"<STR_LIT>\")<EOL>top = '<STR_LIT>'<EOL>self.logger.debug(\"<STR_LIT>\")<EOL>top += self.toptemplate<EOL>top = top.replace('<STR_LIT>',       '<STR_LIT>'.join( self._make_defaults(self.system)) )<EOL>top = top.replace('<STR_LIT>',      '<STR_LIT>'.join( self._make_atomtypes(self.system)) )<EOL>top = top.replace('<STR_LIT>',  '<STR_LIT>'.join( self._make_nonbond_param(self.system)) )<EOL>top = top.replace('<STR_LIT>',      '<STR_LIT>'.join( self._make_pairtypes(self.system)) )<EOL>top = top.replace('<STR_LIT>',      '<STR_LIT>'.join( self._make_bondtypes(self.system)) )<EOL>top = top.replace('<STR_LIT>','<STR_LIT>'.join( self._make_constrainttypes(self.system)))<EOL>top = top.replace('<STR_LIT>',     '<STR_LIT>'.join( self._make_angletypes(self.system)))<EOL>top = top.replace('<STR_LIT>',  '<STR_LIT>'.join( self._make_dihedraltypes(self.system)) )<EOL>top = top.replace('<STR_LIT>',  '<STR_LIT>'.join( self._make_impropertypes(self.system)) )<EOL>top = top.replace('<STR_LIT>',      '<STR_LIT>'.join( self._make_cmaptypes(self.system)) )<EOL>for i,(molname,m) in enumerate(self.system.dict_molname_mol.items()):<EOL><INDENT>itp = self.itptemplate<EOL>itp = itp.replace('<STR_LIT>',  '<STR_LIT>'.join( self._make_moleculetype(m, molname, m.exclusion_numb))  )<EOL>itp = itp.replace('<STR_LIT>',         '<STR_LIT>'.join( self._make_atoms(m))  )<EOL>itp = itp.replace('<STR_LIT>',         '<STR_LIT>'.join( self._make_bonds(m))  )<EOL>itp = itp.replace('<STR_LIT>',         '<STR_LIT>'.join( self._make_pairs(m))  )<EOL>itp = itp.replace('<STR_LIT>',       '<STR_LIT>'.join( self._make_settles(m))  )<EOL>itp = itp.replace('<STR_LIT>','<STR_LIT>'.join( self._make_virtual_sites3(m))  )<EOL>itp = itp.replace('<STR_LIT>',    '<STR_LIT>'.join( self._make_exclusions(m))  )<EOL>itp = itp.replace('<STR_LIT>',        '<STR_LIT>'.join( self._make_angles(m)) )<EOL>itp = itp.replace('<STR_LIT>',     '<STR_LIT>'.join( self._make_dihedrals(m)) )<EOL>itp = itp.replace('<STR_LIT>',     '<STR_LIT>'.join( self._make_impropers(m)) )<EOL>itp = itp.replace('<STR_LIT>',         '<STR_LIT>'.join( self._make_cmaps(m)) )<EOL>if not self.multiple_output:<EOL><INDENT>top += itp<EOL><DEDENT>else:<EOL><INDENT>outfile = \"<STR_LIT>\".format(molname)<EOL>top += '<STR_LIT>'.format( molname )<EOL>with open(outfile, \"<STR_LIT:w>\") as f:<EOL><INDENT>f.writelines([itp])<EOL><DEDENT><DEDENT><DEDENT>top += '<STR_LIT>'<EOL>top += '<STR_LIT>'<EOL>molecules = [(\"<STR_LIT>\", <NUM_LIT:0>)]<EOL>for m in self.system.molecules:<EOL><INDENT>if (molecules[-<NUM_LIT:1>][<NUM_LIT:0>] != m.name):<EOL><INDENT>molecules.append([m.name, <NUM_LIT:0>])<EOL><DEDENT>if molecules[-<NUM_LIT:1>][<NUM_LIT:0>] == m.name:<EOL><INDENT>molecules[-<NUM_LIT:1>][<NUM_LIT:1>] += <NUM_LIT:1><EOL><DEDENT><DEDENT>for molname, n in molecules[<NUM_LIT:1>:]:<EOL><INDENT>top += '<STR_LIT>'.format(molname, n)<EOL><DEDENT>top += '<STR_LIT:\\n>'<EOL>with open(self.outfile, '<STR_LIT:w>') as f:<EOL><INDENT>f.writelines([top])<EOL><DEDENT>", "docstring": "Call the various member self._make_* functions to convert the topology object into a string", "id": "f6856:c1:m2"}
{"signature": "def __init__(self, system, outfile=\"<STR_LIT>\", multiple_output=False):", "body": "self.logger = logging.getLogger('<STR_LIT>')<EOL>self.logger.debug(\"<STR_LIT>\")<EOL>self.system   = system<EOL>self.outfile = outfile<EOL>self.multiple_output = multiple_output<EOL>self.assemble_topology()<EOL>self.logger.debug(\"<STR_LIT>\")<EOL>", "docstring": "Initialize GROMACS topology writer.\n\n        :Arguments:\n          *system*\n              :class:`blocks.System` object, containing the topology\n          *outfile*\n              name of the file to write to\n          *multiple_output*\n              if True, write moleculetypes to separate files, named mol_MOLNAME.itp (default: False)", "id": "f6856:c1:m0"}
{"signature": "def to_unicode(obj):", "body": "if not isinstance(obj, six.string_types):<EOL><INDENT>return obj<EOL><DEDENT>try:<EOL><INDENT>obj = six.text_type(obj)<EOL><DEDENT>except TypeError:<EOL><INDENT>pass<EOL><DEDENT>return obj<EOL>", "docstring": "Convert obj to unicode (if it can be be converted).\n\n    Conversion is only attempted if `obj` is a string type (as\n    determined by :data:`six.string_types`).\n\n    .. versionchanged:: 0.7.0\n       removed `encoding keyword argument", "id": "f6857:m0"}
{"signature": "def _convert_fancy(self, field):", "body": "if self.sep is False:<EOL><INDENT>x = self._convert_singlet(field)<EOL><DEDENT>else:<EOL><INDENT>x = tuple([self._convert_singlet(s) for s in field.split(self.sep)])<EOL>if len(x) == <NUM_LIT:0>:<EOL><INDENT>x = '<STR_LIT>'<EOL><DEDENT>elif len(x) == <NUM_LIT:1>:<EOL><INDENT>x = x[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return x<EOL>", "docstring": "Convert to a list (sep != None) and convert list elements.", "id": "f6857:c0:m3"}
{"signature": "def to_int64(a):", "body": "<EOL>def promote_i4(typestr):<EOL><INDENT>if typestr[<NUM_LIT:1>:] == '<STR_LIT>':<EOL><INDENT>typestr = typestr[<NUM_LIT:0>]+'<STR_LIT>'<EOL><DEDENT>return typestr<EOL><DEDENT>dtype = [(name, promote_i4(typestr)) for name,typestr in a.dtype.descr]<EOL>return a.astype(dtype)<EOL>", "docstring": "Return view of the recarray with all int32 cast to int64.", "id": "f6857:m2"}
{"signature": "def write(self, filename=None):", "body": "self._init_filename(filename)<EOL>with utilities.openany(self.real_filename, '<STR_LIT:w>') as xvg:<EOL><INDENT>xvg.write(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>xvg.write(\"<STR_LIT>\".format(self.names))<EOL>for xyy in self.array.T:<EOL><INDENT>xyy.tofile(xvg, sep=\"<STR_LIT:U+0020>\", format=\"<STR_LIT>\")  <EOL>xvg.write('<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>", "docstring": "Write array to xvg file *filename* in NXY format.\n\n        .. Note:: Only plain files working at the moment, not compressed.", "id": "f6858:c0:m2"}
{"signature": "@property<EOL><INDENT>def max(self):<DEDENT>", "body": "return self.array[<NUM_LIT:1>:].max(axis=<NUM_LIT:1>)<EOL>", "docstring": "Maximum of the data columns.", "id": "f6858:c0:m8"}
{"signature": "def decimate_rms(self, a, maxpoints, **kwargs):", "body": "return self._decimate(numkit.timeseries.rms_histogrammed_function, a, maxpoints, **kwargs)<EOL>", "docstring": "Return data *a* rms-decimated on *maxpoints*.\n\n        Histograms each column into *maxpoints* bins and calculates\n        the root mean square sum in each bin as the decimated data,\n        using :func:`numkit.timeseries.rms_histogrammed_function`. The coarse\n        grained time in the first column contains the centers of the\n        histogram time.\n\n        If *a* contains <= *maxpoints* then *a* is simply returned;\n        otherwise a new array of the same dimensions but with a\n        reduced number of  *maxpoints* points is returned.\n\n        .. Note::\n\n           Assumes that the first column is time.", "id": "f6858:c0:m25"}
{"signature": "def set(self, a):", "body": "self.__array = numpy.asarray(a)<EOL>", "docstring": "Set the *array* data from *a* (i.e. completely replace).\n\n        No sanity checks at the moment...", "id": "f6858:c0:m16"}
{"signature": "def decimate_max(self, a, maxpoints, **kwargs):", "body": "return self._decimate(numkit.timeseries.max_histogrammed_function, a, maxpoints, **kwargs)<EOL>", "docstring": "Return data *a* max-decimated on *maxpoints*.\n\n        Histograms each column into *maxpoints* bins and calculates\n        the maximum in each bin as the decimated data, using\n        :func:`numkit.timeseries.max_histogrammed_function`. The coarse grained\n        time in the first column contains the centers of the histogram\n        time.\n\n        If *a* contains <= *maxpoints* then *a* is simply returned;\n        otherwise a new array of the same dimensions but with a\n        reduced number of  *maxpoints* points is returned.\n\n        .. Note::\n\n           Assumes that the first column is time.", "id": "f6858:c0:m24"}
{"signature": "@property<EOL><INDENT>def min(self):<DEDENT>", "body": "return self.array[<NUM_LIT:1>:].min(axis=<NUM_LIT:1>)<EOL>", "docstring": "Minimum of the data columns.", "id": "f6858:c0:m7"}
{"signature": "@property<EOL><INDENT>def array(self):<DEDENT>", "body": "if self.__array is None:<EOL><INDENT>self.parse()<EOL><DEDENT>return self.__array<EOL>", "docstring": "Represent xvg data as a (cached) numpy array.\n\n        The array is returned with column-first indexing, i.e. for a data file with\n        columns X Y1 Y2 Y3 ... the array a will be a[0] = X, a[1] = Y1, ... .", "id": "f6858:c0:m3"}
{"signature": "def plot_coarsened(self, **kwargs):", "body": "ax = kwargs.pop('<STR_LIT>', None)<EOL>columns = kwargs.pop('<STR_LIT>', Ellipsis)         <EOL>if columns is Ellipsis or columns is None:<EOL><INDENT>columns = numpy.arange(self.array.shape[<NUM_LIT:0>])<EOL><DEDENT>if len(columns) < <NUM_LIT:2>:<EOL><INDENT>raise MissingDataError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>color = kwargs.pop('<STR_LIT>', self.default_color_cycle)<EOL>try:<EOL><INDENT>cmap = matplotlib.cm.get_cmap(color)<EOL>colors = cmap(matplotlib.colors.Normalize()(numpy.arange(len(columns[<NUM_LIT:1>:]), dtype=float)))<EOL><DEDENT>except TypeError:<EOL><INDENT>colors = cycle(utilities.asiterable(color))<EOL><DEDENT>if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>t = columns[<NUM_LIT:0>]<EOL>kwargs['<STR_LIT>'] = True<EOL>kwargs['<STR_LIT>'] = ax<EOL>for column, color in zip(columns[<NUM_LIT:1>:], colors):<EOL><INDENT>kwargs['<STR_LIT>'] = color<EOL>self.errorbar(columns=[t, column, column], **kwargs)<EOL><DEDENT>return ax<EOL>", "docstring": "Plot data like :meth:`XVG.plot` with the range of **all** data shown.\n\n        Data are reduced to *maxpoints* (good results are obtained\n        with low values such as 100) and the actual range of observed\n        data is plotted as a translucent error band around the mean.\n\n        Each column in *columns* (except the abscissa, i.e. the first\n        column) is decimated (with :meth:`XVG.decimate`) and the range\n        of data is plotted alongside the mean using\n        :meth:`XVG.errorbar` (see for arguments). Additional\n        arguments:\n\n        :Kewords:\n           *maxpoints*\n                number of points (bins) to coarsen over\n           *color*\n                single color (used for all plots); sequence of colors\n                (will be repeated as necessary); or a matplotlib\n                colormap (e.g. \"jet\", see :mod:`matplotlib.cm`). The\n                default is to use the :attr:`XVG.default_color_cycle`.\n           *method*\n                Method to coarsen the data. See :meth:`XVG.decimate`\n\n        The *demean* keyword has no effect as it is required to be ``True``.\n\n        .. SeeAlso:: :meth:`XVG.plot`, :meth:`XVG.errorbar` and :meth:`XVG.decimate`", "id": "f6858:c0:m18"}
{"signature": "def __init__(self, filename=None, names=None, array=None, permissive=False, **kwargs):", "body": "self.__array = None           <EOL>self.__cache = {}             <EOL>self.savedata = kwargs.pop('<STR_LIT>', False)<EOL>if filename is not None:<EOL><INDENT>self._init_filename(filename)  <EOL><DEDENT>if names is None:<EOL><INDENT>self.names = []<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>self.names = names.split('<STR_LIT:U+002C>')<EOL><DEDENT>except AttributeError:<EOL><INDENT>self.names = names<EOL><DEDENT><DEDENT>self.metadata = kwargs.pop('<STR_LIT>', {})  <EOL>self.permissive = permissive<EOL>self.stride = kwargs.pop('<STR_LIT>', <NUM_LIT:1>)<EOL>self.corrupted_lineno = None      <EOL>self.ncorrel = kwargs.pop('<STR_LIT>', <NUM_LIT>)<EOL>self.__correlkwargs = {}          <EOL>if array is not None:<EOL><INDENT>self.set(array)<EOL><DEDENT>", "docstring": "Initialize the class from a xvg file.\n\n        :Arguments:\n              *filename*\n                    is the xvg file; it can only be of type XY or\n                    NXY. If it is supplied then it is read and parsed\n                    when :attr:`XVG.array` is accessed.\n              *names*\n                    optional labels for the columns (currently only\n                    written as comments to file); string with columns\n                    separated by commas or a list of strings\n              *array*\n                    read data from *array* (see :meth:`XVG.set`)\n              *permissive*\n                    ``False`` raises a :exc:`ValueError` and logs and errior\n                    when encountering data lines that it cannot parse.\n                    ``True`` ignores those lines and logs a warning---this is\n                    a risk because it might read a corrupted input file [``False``]\n              *stride*\n                    Only read every *stride* line of data [1].\n              *savedata*\n                    ``True`` includes the data (:attr:`XVG.array`` and\n                    associated caches) when the instance is pickled (see\n                    :mod:`pickle`); this is oftens not desirable because the\n                    data are already on disk (the xvg file *filename*) and the\n                    resulting pickle file can become very big. ``False`` omits\n                    those data from a pickle. [``False``]\n              *metadata*\n                    dictionary of metadata, which is not touched by the class", "id": "f6858:c0:m0"}
{"signature": "def _tcorrel(self, nstep=<NUM_LIT:100>, **kwargs):", "body": "t = self.array[<NUM_LIT:0>,::nstep]<EOL>r = gromacs.collections.Collection([numkit.timeseries.tcorrel(t, Y, nstep=<NUM_LIT:1>, **kwargs) for Y in self.array[<NUM_LIT:1>:,::nstep]])<EOL>return r<EOL>", "docstring": "Correlation \"time\" of data.\n\n        The 0-th column of the data is interpreted as a time and the\n        decay of the data is computed from the autocorrelation\n        function (using FFT).\n\n        .. SeeAlso:: :func:`numkit.timeseries.tcorrel`", "id": "f6858:c0:m9"}
{"signature": "def errorbar(self, **kwargs):", "body": "ax = kwargs.pop('<STR_LIT>', None)<EOL>color = kwargs.pop('<STR_LIT>', '<STR_LIT>')<EOL>filled = kwargs.pop('<STR_LIT>', True)<EOL>fill_alpha = kwargs.pop('<STR_LIT>', <NUM_LIT>)<EOL>kwargs.setdefault('<STR_LIT>', <NUM_LIT:0>)<EOL>kwargs.setdefault('<STR_LIT>', <NUM_LIT:1>)<EOL>kwargs.setdefault('<STR_LIT>', color)<EOL>kwargs.setdefault('<STR_LIT>', <NUM_LIT>)<EOL>kwargs.setdefault('<STR_LIT>', None)<EOL>columns = kwargs.pop('<STR_LIT>', Ellipsis)         <EOL>maxpoints = kwargs.pop('<STR_LIT>', self.maxpoints_default)<EOL>transform = kwargs.pop('<STR_LIT>', lambda x: x)  <EOL>method = kwargs.pop('<STR_LIT>', \"<STR_LIT>\")<EOL>if method != \"<STR_LIT>\":<EOL><INDENT>raise NotImplementedError(\"<STR_LIT>\")<EOL><DEDENT>error_method = kwargs.pop('<STR_LIT>', \"<STR_LIT>\")  <EOL>percentile = numpy.abs(kwargs.pop('<STR_LIT>', <NUM_LIT>))<EOL>demean = kwargs.pop('<STR_LIT>', False)<EOL>try:<EOL><INDENT>data = numpy.asarray(transform(self.array))[columns]<EOL><DEDENT>except IndexError:<EOL><INDENT>raise MissingDataError(\"<STR_LIT>\".format(columns))<EOL><DEDENT>if data.shape[-<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>raise MissingDataError(\"<STR_LIT>\")<EOL><DEDENT>a = numpy.zeros((data.shape[<NUM_LIT:0>], maxpoints), dtype=numpy.float64)<EOL>a[<NUM_LIT:0>:<NUM_LIT:2>] = self.decimate(\"<STR_LIT>\", data[<NUM_LIT:0>:<NUM_LIT:2>], maxpoints=maxpoints)<EOL>error_data = numpy.vstack((data[<NUM_LIT:0>], data[<NUM_LIT:2>:]))<EOL>if error_method == \"<STR_LIT>\":<EOL><INDENT>if percentile > <NUM_LIT:50>:<EOL><INDENT>upper_per = percentile<EOL>lower_per = <NUM_LIT:100> - percentile<EOL><DEDENT>else:<EOL><INDENT>upper_per = <NUM_LIT:100> - percentile<EOL>lower_per = percentile<EOL><DEDENT>upper = a[<NUM_LIT:2>:] = self.decimate(\"<STR_LIT>\", error_data, maxpoints=maxpoints,<EOL>per=upper_per, demean=False)[<NUM_LIT:1>:]<EOL>lower = self.decimate(\"<STR_LIT>\", error_data, maxpoints=maxpoints,<EOL>per=lower_per, demean=False)[<NUM_LIT:1>:]<EOL><DEDENT>else:<EOL><INDENT>a[<NUM_LIT:2>:] = self.decimate(error_method, error_data, maxpoints=maxpoints, demean=demean)[<NUM_LIT:1>:]<EOL>lower = None<EOL><DEDENT>ma = numpy.ma.MaskedArray(a, mask=numpy.logical_not(numpy.isfinite(a)))<EOL>if lower is not None:<EOL><INDENT>mlower = numpy.ma.MaskedArray(lower, mask=numpy.logical_not(numpy.isfinite(lower)))<EOL><DEDENT>X = ma[<NUM_LIT:0>]          <EOL>Y = ma[<NUM_LIT:1>]<EOL>try:<EOL><INDENT>kwargs['<STR_LIT>'] = ma[<NUM_LIT:3>]<EOL>kwargs['<STR_LIT>'] = ma[<NUM_LIT:2>]<EOL><DEDENT>except IndexError:<EOL><INDENT>try:<EOL><INDENT>kwargs['<STR_LIT>'] = ma[<NUM_LIT:2>]<EOL><DEDENT>except IndexError:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>if filled:<EOL><INDENT>if error_method == \"<STR_LIT>\":<EOL><INDENT>if demean:<EOL><INDENT>y1 = mlower[-<NUM_LIT:1>]<EOL>y2 = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>y1 = Y - mlower[-<NUM_LIT:1>]<EOL>y2 = Y + kwargs['<STR_LIT>']<EOL><DEDENT><DEDENT>else:<EOL><INDENT>y1 = Y - kwargs['<STR_LIT>']<EOL>y2 = Y + kwargs['<STR_LIT>']<EOL><DEDENT>ax.fill_between(X, y1, y2, color=color, alpha=fill_alpha)<EOL><DEDENT>else:<EOL><INDENT>if error_method == \"<STR_LIT>\":<EOL><INDENT>if demean:<EOL><INDENT>kwargs['<STR_LIT>'] = numpy.vstack((mlower[-<NUM_LIT:1>], kwargs['<STR_LIT>']))<EOL><DEDENT>else:<EOL><INDENT>kwargs['<STR_LIT>'] = numpy.vstack((Y - mlower[-<NUM_LIT:1>], Y + kwargs['<STR_LIT>']))<EOL><DEDENT>try:<EOL><INDENT>kwargs['<STR_LIT>'] = numpy.vstack((X - mlower[<NUM_LIT:0>], X + kwargs['<STR_LIT>']))<EOL><DEDENT>except (KeyError, IndexError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>ax.errorbar(X, Y, **kwargs)<EOL><DEDENT>for kw in \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\":<EOL><INDENT>kwargs.pop(kw, None)<EOL><DEDENT>kwargs['<STR_LIT>'] = <NUM_LIT:1.0><EOL>ax.plot(X, Y, color=color, **kwargs)<EOL>return ax<EOL>", "docstring": "errorbar plot for a single time series with errors.\n\n        Set *columns* keyword to select [x, y, dy] or [x, y, dx, dy],\n        e.g. ``columns=[0,1,2]``. See :meth:`XVG.plot` for\n        details. Only a single timeseries can be plotted and the user\n        needs to select the appropriate columns with the *columns*\n        keyword.\n\n        By default, the data are decimated (see :meth:`XVG.plot`) for\n        the default of *maxpoints* = 10000 by averaging data in\n        *maxpoints* bins.\n\n        x,y,dx,dy data can plotted with error bars in the x- and\n        y-dimension (use *filled* = ``False``).\n\n        For x,y,dy use *filled* = ``True`` to fill the region between\n        y\u00b1dy. *fill_alpha* determines the transparency of the fill\n        color. *filled* = ``False`` will draw lines for the error\n        bars. Additional keywords are passed to\n        :func:`pylab.errorbar`.\n\n        By default, the errors are decimated by plotting the 5% and\n        95% percentile of the data in each bin. The percentile can be\n        changed with the *percentile* keyword; e.g. *percentile* = 1\n        will plot the 1% and 99% perentile (as will *percentile* =\n        99).\n\n        The *error_method* keyword can be used to compute errors as\n        the root mean square sum (*error_method* = \"rms\") across each\n        bin instead of percentiles (\"percentile\"). The value of the\n        keyword *demean* is applied to the decimation of error data\n        alone.\n\n        .. SeeAlso::\n\n           :meth:`XVG.plot` lists keywords common to both methods.", "id": "f6858:c0:m19"}
{"signature": "def __init__(self, filename=None, autoconvert=True, **kwargs):", "body": "super(MDP, self).__init__(**kwargs)  <EOL>self.autoconvert = autoconvert<EOL>if filename is not None:<EOL><INDENT>self._init_filename(filename)<EOL>self.read(filename)<EOL><DEDENT>", "docstring": "Initialize mdp structure.\n\n        :Arguments:\n          *filename*\n              read from mdp file\n          *autoconvert* : boolean\n              ``True`` converts numerical values to python numerical types;\n              ``False`` keeps everything as strings [``True``]\n          *kwargs*\n              Populate the MDP with key=value pairs. (NO SANITY CHECKS; and also\n              does not work for keys that are not legal python variable names such\n              as anything that includes a minus '-' sign or starts with a number).", "id": "f6859:c0:m0"}
{"signature": "def size(self, name):", "body": "return len(self[name])<EOL>", "docstring": "Return number of entries for group *name*.", "id": "f6861:c0:m5"}
{"signature": "def get(self, name):", "body": "return self[name]<EOL>", "docstring": "Return index array for index group *name*.", "id": "f6861:c0:m3"}
{"signature": "def _transform(self, v):", "body": "return numpy.ravel(v).astype(int)<EOL>", "docstring": "Transform input to the stored representation.\n\n        Override eg with ``return set(v)`` for index lists as sets.", "id": "f6861:c0:m10"}
{"signature": "def join(self, *groupnames):", "body": "return self._sum([self[k] for k in groupnames if k in self])<EOL>", "docstring": "Return an index group that contains atoms from all  *groupnames*.\n\n        The method will silently ignore any groups that are not in the\n        index.\n\n        **Example**\n\n        Always make a solvent group from water and ions, even if not\n        all ions are present in all simulations::\n\n           I['SOLVENT'] = I.join('SOL', 'NA+', 'K+', 'CL-')", "id": "f6861:c2:m0"}
{"signature": "@property<EOL><INDENT>def ndxlist(self):<DEDENT>", "body": "return [{'<STR_LIT:name>': name, '<STR_LIT>': len(atomnumbers), '<STR_LIT>': nr+<NUM_LIT:1>} for<EOL>nr,(name,atomnumbers) in enumerate(self.items())]<EOL>", "docstring": "Return a list of groups in the same format as  :func:`gromacs.cbook.get_ndx_groups`.\n\n        Format:\n           [ {'name': group_name, 'natoms': number_atoms, 'nr':  # group_number}, ....]", "id": "f6861:c0:m8"}
{"signature": "def write(self, filename=None, ncol=ncol, format=format):", "body": "with open(self.filename(filename, ext='<STR_LIT>'), '<STR_LIT:w>') as ndx:<EOL><INDENT>for name in self:<EOL><INDENT>atomnumbers = self._getarray(name)  <EOL>ndx.write('<STR_LIT>'.format(name))<EOL>for k in range(<NUM_LIT:0>, len(atomnumbers), ncol):<EOL><INDENT>line = atomnumbers[k:k+ncol].astype(int)   <EOL>n = len(line)<EOL>ndx.write((\"<STR_LIT:U+0020>\".join(n*[format])+'<STR_LIT:\\n>') % tuple(line))<EOL><DEDENT>ndx.write('<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>", "docstring": "Write index file to *filename* (or overwrite the file that the index was read from)", "id": "f6861:c0:m2"}
{"signature": "def read(self, filename=None):", "body": "self._init_filename(filename)<EOL>data = odict()<EOL>with open(self.real_filename) as ndx:<EOL><INDENT>current_section = None<EOL>for line in ndx:<EOL><INDENT>line = line.strip()<EOL>if len(line) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>m = self.SECTION.match(line)<EOL>if m:<EOL><INDENT>current_section = m.group('<STR_LIT:name>')<EOL>data[current_section] = []  <EOL>continue<EOL><DEDENT>if current_section is not None:<EOL><INDENT>data[current_section].extend(map(int, line.split()))<EOL><DEDENT><DEDENT><DEDENT>super(NDX,self).update(odict([(name, self._transform(atomnumbers))<EOL>for name, atomnumbers in data.items()]))<EOL>", "docstring": "Read and parse index file *filename*.", "id": "f6861:c0:m1"}
{"signature": "def commandline(self, **mpiargs):", "body": "cmd = self.MDRUN.commandline()<EOL>if self.mpiexec:<EOL><INDENT>cmd = self.mpicommand(**mpiargs) + cmd<EOL><DEDENT>return cmd<EOL>", "docstring": "Returns simple command line to invoke mdrun.\n\n        If :attr:`mpiexec` is set then :meth:`mpicommand` provides the mpi\n        launcher command that prefixes the actual ``mdrun`` invocation:\n\n           :attr:`mpiexec` [*mpiargs*]  :attr:`mdrun` [*mdrun-args*]\n\n        The *mdrun-args* are set on initializing the class. Override\n        :meth:`mpicommand` to fit your system if the simple default\n        OpenMP launcher is not appropriate.", "id": "f6862:c0:m1"}
{"signature": "def get_double_or_single_prec_mdrun():", "body": "try:<EOL><INDENT>gromacs.mdrun_d(h=True, stdout=False, stderr=False)<EOL>logger.debug(\"<STR_LIT>\")<EOL>return gromacs.mdrun_d<EOL><DEDENT>except (AttributeError, GromacsError, OSError):<EOL><INDENT>wmsg = \"<STR_LIT>\"\"<STR_LIT>\"<EOL>logger.warn(wmsg)<EOL>warnings.warn(wmsg, category=AutoCorrectionWarning)<EOL>return gromacs.mdrun<EOL><DEDENT>", "docstring": "Return double precision ``mdrun`` or fall back to single precision.\n\n    This convenience function tries :func:`gromacs.mdrun_d` first and\n    if it cannot run it, falls back to :func:`gromacs.mdrun` (without\n    further checking).\n\n    .. versionadded:: 0.5.1", "id": "f6862:m2"}
{"signature": "def prehook(self, **kwargs):", "body": "cmd = ['<STR_LIT>', '<STR_LIT>']<EOL>logger.info(\"<STR_LIT>\"+\"<STR_LIT:U+0020>\".join(cmd))<EOL>rc = subprocess.call(cmd)<EOL>return rc<EOL>", "docstring": "Launch local smpd.", "id": "f6862:c3:m0"}
{"signature": "def check_mdrun_success(logfile):", "body": "if not os.path.exists(logfile):<EOL><INDENT>return None<EOL><DEDENT>with open(logfile, '<STR_LIT:rb>') as log:<EOL><INDENT>log.seek(-<NUM_LIT>, <NUM_LIT:2>)<EOL>for line in log:<EOL><INDENT>line = line.decode('<STR_LIT>')<EOL>if line.startswith(\"<STR_LIT>\"):<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Check if ``mdrun`` finished successfully.\n\n    Analyses the output from ``mdrun`` in *logfile*. Right now we are\n    simply looking for the line \"Finished mdrun on node\" in the last 1kb of\n    the file. (The file must be seeakable.)\n\n    :Arguments:\n      *logfile* : filename\n         Logfile produced by ``mdrun``.\n\n    :Returns: ``True`` if all ok, ``False`` if not finished, and\n              ``None`` if the *logfile* cannot be opened", "id": "f6862:m1"}
{"signature": "def posthook(self, **kwargs):", "body": "cmd = ['<STR_LIT>', '<STR_LIT>']<EOL>logger.info(\"<STR_LIT>\"+\"<STR_LIT:U+0020>\".join(cmd))<EOL>rc = subprocess.call(cmd)<EOL>return rc<EOL>", "docstring": "Shut down smpd", "id": "f6862:c3:m1"}
{"signature": "def prehook(self, **kwargs):", "body": "return<EOL>", "docstring": "Called directly before launching the process.", "id": "f6862:c0:m3"}
{"signature": "def mpicommand(self, *args, **kwargs):", "body": "if self.mpiexec is None:<EOL><INDENT>raise NotImplementedError(\"<STR_LIT>\")<EOL><DEDENT>ncores = kwargs.pop('<STR_LIT>', <NUM_LIT:8>)<EOL>return [self.mpiexec, '<STR_LIT>', str(ncores)]<EOL>", "docstring": "Return a list of the mpi command portion of the commandline.\n\n        Only allows primitive mpi at the moment:\n           *mpiexec* -n *ncores* *mdrun* *mdrun-args*\n\n        (This is a primitive example for OpenMP. Override it for more\n        complicated cases.)", "id": "f6862:c0:m2"}
{"signature": "def __iter__(self):", "body": "frames = self.all_frames<EOL>if len(frames) == <NUM_LIT:0>:<EOL><INDENT>self.extract()<EOL>frames = self.all_frames<EOL><DEDENT>for i in xrange(len(frames)):<EOL><INDENT>self.framenumber = i<EOL>yield self.current_framename<EOL><DEDENT>self.totalframes += len(frames)<EOL>", "docstring": "Primitive iterator.", "id": "f6863:c0:m4"}
{"signature": "def cleanup(self):", "body": "shutil.rmtree(self.framedir)<EOL>self.framedir = None<EOL>", "docstring": "Clean up all temporary frames (which can be HUGE).", "id": "f6863:c0:m6"}
{"signature": "def strip_fit(self, **kwargs):", "body": "kwargs.setdefault('<STR_LIT>', '<STR_LIT>')<EOL>kw_fit = {}<EOL>for k in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:input>'):<EOL><INDENT>if k in kwargs:<EOL><INDENT>kw_fit[k] = kwargs.pop(k)<EOL><DEDENT><DEDENT>kwargs['<STR_LIT:input>'] = kwargs.pop('<STR_LIT>', ['<STR_LIT>'])<EOL>kwargs['<STR_LIT>'] = kw_fit['<STR_LIT>'] = kwargs.pop('<STR_LIT>', self.force)<EOL>paths = self.strip_water(**kwargs)    <EOL>transformer_nowater = self.nowater[paths['<STR_LIT>']]  <EOL>return transformer_nowater.fit(**kw_fit)          <EOL>", "docstring": "Strip water and fit to the remaining system.\n\n        First runs :meth:`strip_water` and then :meth:`fit`; see there\n        for arguments.\n\n        - *strip_input* is used for :meth:`strip_water` (but is only useful in\n          special cases, e.g. when there is no Protein group defined. Then set\n          *strip_input* = ``['Other']``.\n\n        - *input* is passed on to :meth:`fit` and can contain the\n          ``[center_group, fit_group, output_group]``\n\n        - *fitgroup* is only passed to :meth:`fit` and just contains\n          the group to fit to (\"backbone\" by default)\n\n          .. warning:: *fitgroup* can only be a Gromacs default group and not\n                       a custom group (because the indices change after stripping)\n\n        - By default *fit* = \"rot+trans\" (and *fit* is passed to :meth:`fit`,\n          together with the *xy* = ``False`` keyword)\n\n        .. Note:: The call signature of :meth:`strip_water` is somewhat different from this one.", "id": "f6863:c2:m8"}
{"signature": "def _process_residue(self, selection, name=None):", "body": "if name is None:<EOL><INDENT>name = selection.replace('<STR_LIT::>', '<STR_LIT:_>')<EOL><DEDENT>m = self.RESIDUE.match(selection)<EOL>if not m:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(**vars()))<EOL><DEDENT>gmx_resid = self.gmx_resid(int(m.group('<STR_LIT>')))<EOL>residue = m.group('<STR_LIT>')<EOL>if len(residue) == <NUM_LIT:1>:<EOL><INDENT>gmx_resname = utilities.convert_aa_code(residue) <EOL><DEDENT>else:<EOL><INDENT>gmx_resname = residue                            <EOL><DEDENT>gmx_atomname = m.group('<STR_LIT>')<EOL>if gmx_atomname is None:<EOL><INDENT>gmx_atomname = '<STR_LIT>'<EOL><DEDENT>_selection = '<STR_LIT>'.format(**vars())<EOL>cmd = ['<STR_LIT>', '<STR_LIT>',<EOL>_selection,<EOL>'<STR_LIT>'.format(**vars()),<EOL>'<STR_LIT:q>']<EOL>fd, ndx = tempfile.mkstemp(suffix='<STR_LIT>', prefix=name+'<STR_LIT>')<EOL>rc,out,err = self.make_ndx(n=self.ndx, o=ndx, input=cmd)<EOL>self.check_output(out, \"<STR_LIT>\"<EOL>\"<STR_LIT>\" % vars())<EOL>return name, ndx<EOL>", "docstring": "Process residue/atom selection and return name and temp index file.", "id": "f6863:c1:m8"}
{"signature": "def keep_protein_only(self, os=None, o=None, on=None, compact=False,<EOL>groupname=\"<STR_LIT>\", **kwargs):", "body": "force = kwargs.pop('<STR_LIT>', self.force)<EOL>suffix = '<STR_LIT>'<EOL>newtpr = self.outfile(self.infix_filename(os, self.tpr, '<STR_LIT:_>'+suffix))<EOL>newxtc = self.outfile(self.infix_filename(o, self.xtc, '<STR_LIT:_>'+suffix))<EOL>newndx = self.outfile(self.infix_filename(on, self.tpr, '<STR_LIT:_>'+suffix, '<STR_LIT>'))<EOL>selection_ndx = suffix+\"<STR_LIT>\"    <EOL>if compact:<EOL><INDENT>TRJCONV = trj_compact<EOL>_input = kwargs.get('<STR_LIT:input>', ['<STR_LIT>'])<EOL>kwargs['<STR_LIT:input>'] = [_input[<NUM_LIT:0>], groupname]  <EOL>del _input<EOL><DEDENT>else:<EOL><INDENT>TRJCONV = gromacs.trjconv<EOL>kwargs['<STR_LIT:input>'] = [groupname]<EOL><DEDENT>selections = ['<STR_LIT:@>'+sel for sel in ['<STR_LIT>'] + kwargs.pop('<STR_LIT>',[])]<EOL>with utilities.in_dir(self.dirname):<EOL><INDENT>if not self.check_file_exists(newxtc, resolve=\"<STR_LIT>\", force=force):<EOL><INDENT>B = IndexBuilder(struct=self.tpr, selections=selections,<EOL>ndx=self.ndx, out_ndx=selection_ndx)<EOL>B.combine(name_all=groupname, operation=\"<STR_LIT:|>\", defaultgroups=True)<EOL>logger.info(\"<STR_LIT>\".format(**vars()))<EOL>gromacs.tpbconv(s=self.tpr, o=newtpr, n=selection_ndx, input=[groupname])<EOL>logger.info(\"<STR_LIT>\".format(**vars()))<EOL>gromacs.make_ndx(f=newtpr, o=newndx, input=['<STR_LIT:q>'], stderr=False, stdout=False)<EOL>logger.info(\"<STR_LIT>\".format(**vars()))<EOL>kwargs['<STR_LIT:s>'] = self.tpr<EOL>kwargs['<STR_LIT:f>'] = self.xtc<EOL>kwargs['<STR_LIT:n>'] = selection_ndx<EOL>kwargs['<STR_LIT:o>'] = newxtc<EOL>TRJCONV(**kwargs)<EOL>logger.info(\"<STR_LIT>\")<EOL>for ext in '<STR_LIT>', '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>kwargs['<STR_LIT:o>'] = self.filename(newtpr, ext=ext)<EOL>TRJCONV(dump=<NUM_LIT:0>, stdout=False, stderr=False, **kwargs)  <EOL><DEDENT>except:<EOL><INDENT>logger.exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % vars())<EOL><DEDENT><DEDENT><DEDENT>logger.info(\"<STR_LIT>\")<EOL><DEDENT>self.proteinonly[self.rp(newxtc)] = Transformer(dirname=self.dirname, s=newtpr,<EOL>f=newxtc, n=newndx, force=force)<EOL>return {'<STR_LIT>':self.rp(newtpr), '<STR_LIT>':self.rp(newxtc), '<STR_LIT>':self.rp(newndx)}<EOL>", "docstring": "Write xtc and tpr only containing the protein.\n\n        :Keywords:\n           *os*\n              Name of the output tpr file; by default use the original but\n              insert \"proteinonly\" before suffix.\n           *o*\n              Name of the output trajectory; by default use the original name but\n              insert \"proteinonly\" before suffix.\n           *on*\n              Name of a new index file.\n           *compact*\n              ``True``: write a compact and centered trajectory\n              ``False``: use trajectory as it is [``False``]\n           *groupname*\n              Name of the protein-only group.\n           *keepalso*\n              List of literal make_ndx selections of additional groups that should\n              be kept, e.g. ['resname DRUG', 'atom 6789'].\n           *force* : Boolean\n             - ``True``: overwrite existing trajectories\n             - ``False``: throw a IOError exception\n             - ``None``: skip existing and log a warning [default]\n           *kwargs*\n              are passed on to :func:`gromacs.cbook.trj_compact` (unless the\n              values have to be set to certain values such as s, f, n, o\n              keywords). The *input* keyword is always mangled: Only the first\n              entry (the group to centre the trajectory on) is kept, and as a\n              second group (the output group) *groupname* is used.\n\n        :Returns:\n              dictionary with keys *tpr*, *xtc*, *ndx* which are the names of the\n              the new files\n\n        .. warning:: The input tpr file should *not* have *any position restraints*;\n                     otherwise Gromacs will throw a hissy-fit and say\n\n                     *Software inconsistency error: Position restraint coordinates are\n                     missing*\n\n                     (This appears to be a bug in Gromacs 4.x.)", "id": "f6863:c2:m7"}
{"signature": "def cat(self, out_ndx=None):", "body": "if out_ndx is None:<EOL><INDENT>out_ndx = self.output<EOL><DEDENT>self.make_ndx(o=out_ndx, input=['<STR_LIT:q>'])<EOL>return out_ndx<EOL>", "docstring": "Concatenate input index files.\n\n        Generate a new index file that contains the default Gromacs index\n        groups (if a structure file was defined) and all index groups from the\n        input index files.\n\n        :Arguments:\n           out_ndx : filename\n              Name of the output index file; if ``None`` then use the default\n              provided to the constructore. [``None``].", "id": "f6863:c1:m5"}
{"signature": "def filter_grompp_options(**kwargs):", "body": "grompp_options = ('<STR_LIT:f>','<STR_LIT>','<STR_LIT:c>','<STR_LIT:r>','<STR_LIT:rb>','<STR_LIT:n>','<STR_LIT:p>','<STR_LIT>','<STR_LIT:o>','<STR_LIT:t>','<STR_LIT:e>',  <EOL>'<STR_LIT:h>', '<STR_LIT>', '<STR_LIT:version>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:v>', '<STR_LIT>',<EOL>'<STR_LIT:time>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>')<EOL>grompp = dict((k,v) for k,v in kwargs.items() if k in grompp_options)<EOL>other =  dict((k,v) for k,v in kwargs.items() if k not in grompp_options)<EOL>return grompp, other<EOL>", "docstring": "Returns one dictionary only containing valid :program:`grompp` options and everything else.\n\n    Option list is hard coded and nased on :class:`~gromacs.tools.grompp` 4.5.3.\n\n    :Returns: ``(grompp_dict, other_dict)``\n\n    .. versionadded:: 0.2.4", "id": "f6863:m7"}
{"signature": "def edit_mdp(mdp, new_mdp=None, extend_parameters=None, **substitutions):", "body": "if new_mdp is None:<EOL><INDENT>new_mdp = mdp<EOL><DEDENT>if extend_parameters is None:<EOL><INDENT>extend_parameters = ['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>extend_parameters = list(asiterable(extend_parameters))<EOL><DEDENT>substitutions = {k: v for k,v in substitutions.items() if v is not None}<EOL>params = list(substitutions.keys())   <EOL>def demangled(p):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return p.replace('<STR_LIT:_>', '<STR_LIT>')  <EOL><DEDENT>patterns = {parameter:<EOL>re.compile(\"\"\"<STR_LIT>\"\"\".format(demangled(parameter)), re.VERBOSE)<EOL>for parameter in substitutions}<EOL>with tempfile.TemporaryFile() as target:<EOL><INDENT>with open(mdp, '<STR_LIT:rb>') as src:<EOL><INDENT>logger.info(\"<STR_LIT>\".format(mdp, substitutions.keys()))<EOL>for line in src:<EOL><INDENT>line = line.decode('<STR_LIT:utf-8>')<EOL>new_line = line.strip()  <EOL>for p in params[:]:<EOL><INDENT>m = patterns[p].match(new_line)<EOL>if m:<EOL><INDENT>if m.group('<STR_LIT>') is None:<EOL><INDENT>comment = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>comment = \"<STR_LIT:U+0020>\"+m.group('<STR_LIT>')<EOL><DEDENT>assignment = m.group('<STR_LIT>')<EOL>if not assignment.endswith('<STR_LIT:U+0020>'):<EOL><INDENT>assignment += '<STR_LIT:U+0020>'<EOL><DEDENT>new_line = assignment<EOL>if p in extend_parameters:<EOL><INDENT>new_line += str(m.group('<STR_LIT:value>')) + '<STR_LIT:U+0020>'<EOL><DEDENT>value = \"<STR_LIT:U+0020>\".join(map(str, asiterable(substitutions[p])))<EOL>new_line += value + comment<EOL>params.remove(p)<EOL>break<EOL><DEDENT><DEDENT>target.write((new_line+'<STR_LIT:\\n>').encode('<STR_LIT:utf-8>'))<EOL><DEDENT><DEDENT>target.seek(<NUM_LIT:0>)<EOL>with open(new_mdp, '<STR_LIT:wb>') as final:<EOL><INDENT>shutil.copyfileobj(target, final)<EOL><DEDENT><DEDENT>if len(params) > <NUM_LIT:0>:<EOL><INDENT>logger.warn(\"<STR_LIT>\".format(**vars()))<EOL><DEDENT>return {p: substitutions[p] for p in params}<EOL>", "docstring": "Change values in a Gromacs mdp file.\n\n    Parameters and values are supplied as substitutions, eg ``nsteps=1000``.\n\n    By default the template mdp file is **overwritten in place**.\n\n    If a parameter does not exist in the template then it cannot be substituted\n    and the parameter/value pair is returned. The user has to check the\n    returned list in order to make sure that everything worked as expected. At\n    the moment it is not possible to automatically append the new values to the\n    mdp file because of ambiguities when having to replace dashes in parameter\n    names with underscores (see the notes below on dashes/underscores).\n\n    If a parameter is set to the value ``None`` then it will be ignored.\n\n    :Arguments:\n        *mdp* : filename\n            filename of input (and output filename of ``new_mdp=None``)\n        *new_mdp* : filename\n            filename of alternative output mdp file [None]\n        *extend_parameters* : string or list of strings\n            single parameter or list of parameters for which the new values\n            should be appended to the existing value in the mdp file. This\n            makes mostly sense for a single parameter, namely 'include', which\n            is set as the default. Set to ``[]`` to disable. ['include']\n        *substitutions*\n            parameter=value pairs, where parameter is defined by the Gromacs\n            mdp file; dashes in parameter names have to be replaced by\n            underscores. If a value is a list-like object then the items are\n            written as a sequence, joined with spaces, e.g. ::\n\n               ref_t=[310,310,310] --->  ref_t = 310 310 310\n\n    :Returns:\n        Dict of parameters that have *not* been substituted.\n\n    **Example** ::\n\n       edit_mdp('md.mdp', new_mdp='long_md.mdp', nsteps=100000, nstxtcout=1000, lincs_iter=2)\n\n    .. Note::\n\n       * Dashes in Gromacs mdp parameters have to be replaced by an underscore\n         when supplied as python keyword arguments (a limitation of python). For example\n         the MDP syntax is  ``lincs-iter = 4`` but the corresponding  keyword would be\n         ``lincs_iter = 4``.\n       * If the keyword is set as a dict key, eg ``mdp_params['lincs-iter']=4`` then one\n         does not have to substitute.\n       * Parameters *aa_bb* and *aa-bb* are considered the same (although this should\n         not be a problem in practice because there are no mdp parameters that only\n         differ by a underscore).\n       * This code is more compact in ``Perl`` as one can use ``s///`` operators:\n         ``s/^(\\s*${key}\\s*=\\s*).*/$1${val}/``\n\n    .. SeeAlso:: One can also load the mdp file with\n                :class:`gromacs.formats.MDP`, edit the object (a dict), and save it again.", "id": "f6863:m10"}
{"signature": "def combine(self, name_all=None, out_ndx=None, operation='<STR_LIT:|>', defaultgroups=False):", "body": "if not operation in ('<STR_LIT:|>', '<STR_LIT:&>', False):<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(<EOL>operation))<EOL><DEDENT>if name_all is None and operation:<EOL><INDENT>name_all = self.name_all or operation.join(self.indexfiles)<EOL><DEDENT>if out_ndx is None:<EOL><INDENT>out_ndx = self.output<EOL><DEDENT>if defaultgroups:<EOL><INDENT>fd, default_ndx = tempfile.mkstemp(suffix='<STR_LIT>', prefix='<STR_LIT>')<EOL>try:<EOL><INDENT>self.make_ndx(o=default_ndx, input=['<STR_LIT:q>'])<EOL><DEDENT>except:<EOL><INDENT>utilities.unlink_gmx(default_ndx)<EOL>raise<EOL><DEDENT>ndxfiles = [default_ndx]<EOL><DEDENT>else:<EOL><INDENT>ndxfiles = []<EOL><DEDENT>ndxfiles.extend(self.indexfiles.values())<EOL>if operation:<EOL><INDENT>try:<EOL><INDENT>fd, tmp_ndx = tempfile.mkstemp(suffix='<STR_LIT>', prefix='<STR_LIT>')<EOL>operation = '<STR_LIT:U+0020>'+operation.strip()+'<STR_LIT:U+0020>'<EOL>cmd = [operation.join(['<STR_LIT>'.format(gname) for gname in self.indexfiles]),<EOL>'<STR_LIT>', '<STR_LIT:q>']<EOL>rc,out,err = self.make_ndx(n=ndxfiles, o=tmp_ndx, input=cmd)<EOL>if self._is_empty_group(out):<EOL><INDENT>warnings.warn(\"<STR_LIT>\".format(**vars()),<EOL>category=BadParameterWarning)<EOL><DEDENT>groups = parse_ndxlist(out)<EOL>last = groups[-<NUM_LIT:1>]<EOL>name_cmd = [\"<STR_LIT>\".format(last['<STR_LIT>'], name_all), '<STR_LIT:q>']<EOL>rc,out,err = self.make_ndx(n=tmp_ndx, o=out_ndx, input=name_cmd)<EOL><DEDENT>finally:<EOL><INDENT>utilities.unlink_gmx(tmp_ndx)<EOL>if defaultgroups:<EOL><INDENT>utilities.unlink_gmx(default_ndx)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rc,out,err = self.make_ndx(n=ndxfiles, o=out_ndx, input=['<STR_LIT>','<STR_LIT:q>'])<EOL><DEDENT>return name_all, out_ndx<EOL>", "docstring": "Combine individual groups into a single one and write output.\n\n        :Keywords:\n           name_all : string\n              Name of the combined group, ``None`` generates a name.  [``None``]\n           out_ndx : filename\n              Name of the output file that will contain the individual groups\n              and the combined group. If ``None`` then default from the class\n              constructor is used. [``None``]\n           operation : character\n              Logical operation that is used to generate the combined group from\n              the individual groups: \"|\" (OR) or \"&\" (AND); if set to ``False``\n              then no combined group is created and only the individual groups\n              are written. [\"|\"]\n           defaultgroups : bool\n              ``True``: append everything to the default groups produced by\n              :program:`make_ndx` (or rather, the groups provided in the ndx file on\n              initialization --- if this was ``None`` then these are truly default groups);\n              ``False``: only use the generated groups\n\n        :Returns:\n           ``(combinedgroup_name, output_ndx)``, a tuple showing the\n           actual group name and the name of the file; useful when all names are autogenerated.\n\n        .. Warning:: The order of the atom numbers in the combined group is\n                     *not* guaranteed to be the same as the selections on input because\n                     ``make_ndx`` sorts them ascending. Thus you should be careful when\n                     using these index files for calculations of angles and dihedrals.\n                     Use :class:`gromacs.formats.NDX` in these cases.\n\n        .. SeeAlso:: :meth:`IndexBuilder.write`.", "id": "f6863:c1:m3"}
{"signature": "def parse_ndxlist(output):", "body": "m = NDXLIST.search(output)    <EOL>grouplist = m.group('<STR_LIT>')<EOL>return parse_groups(grouplist)<EOL>", "docstring": "Parse output from make_ndx to build list of index groups::\n\n      groups = parse_ndxlist(output)\n\n    output should be the standard output from ``make_ndx``, e.g.::\n\n       rc,output,junk = gromacs.make_ndx(..., input=('', 'q'), stdout=False, stderr=True)\n\n    (or simply use\n\n       rc,output,junk = cbook.make_ndx_captured(...)\n\n    which presets input, stdout and stderr; of course input can be overriden.)\n\n    :Returns:\n       The function returns a list of dicts (``groups``) with fields\n\n       name\n           name of the groups\n       nr\n           number of the group (starts at 0)\n       natoms\n           number of atoms in the group", "id": "f6863:m15"}
{"signature": "def get_ndx_groups(ndx, **kwargs):", "body": "fd, tmp_ndx = tempfile.mkstemp(suffix='<STR_LIT>')<EOL>kwargs['<STR_LIT:o>'] = tmp_ndx<EOL>try:<EOL><INDENT>g = parse_ndxlist(make_ndx_captured(n=ndx, **kwargs)[<NUM_LIT:1>])<EOL><DEDENT>finally:<EOL><INDENT>utilities.unlink_gmx(tmp_ndx)<EOL><DEDENT>return g<EOL>", "docstring": "Return a list of index groups in the index file *ndx*.\n\n    :Arguments:\n        - *ndx*  is a Gromacs index file.\n        - kwargs are passed to :func:`make_ndx_captured`.\n\n    :Returns:\n        list of groups as supplied by :func:`parse_ndxlist`\n\n    Alternatively, load the index file with\n    :class:`gromacs.formats.NDX` for full control.", "id": "f6863:m14"}
{"signature": "def _join_dirname(self, *args):", "body": "<EOL>return os.path.join(os.path.dirname(args[<NUM_LIT:0>]), *args[<NUM_LIT:1>:])<EOL>", "docstring": "return os.path.join(os.path.dirname(args[0]), *args[1:])", "id": "f6863:c2:m9"}
{"signature": "def make_ndx_captured(**kwargs):", "body": "kwargs['<STR_LIT>']=False   <EOL>user_input = kwargs.pop('<STR_LIT:input>',[])<EOL>user_input = [cmd for cmd in user_input if cmd != '<STR_LIT:q>']  <EOL>kwargs['<STR_LIT:input>'] = user_input + ['<STR_LIT>', '<STR_LIT:q>']                <EOL>return gromacs.make_ndx(**kwargs)<EOL>", "docstring": "make_ndx that captures all output\n\n    Standard :func:`~gromacs.make_ndx` command with the input and\n    output pre-set in such a way that it can be conveniently used for\n    :func:`parse_ndxlist`.\n\n    Example::\n      ndx_groups = parse_ndxlist(make_ndx_captured(n=ndx)[0])\n\n    Note that the convenient :func:`get_ndx_groups` function does exactly\n    that and can probably used in most cases.\n\n    :Arguments:\n        keywords are passed on to :func:`~gromacs.make_ndx`\n    :Returns:\n        (*returncode*, *output*, ``None``)", "id": "f6863:m13"}
{"signature": "def __init__(self, struct=None, selections=None, names=None, name_all=None,<EOL>ndx=None, out_ndx=\"<STR_LIT>\", offset=<NUM_LIT:0>):", "body": "self.structure = struct<EOL>self.ndx = ndx<EOL>self.output = out_ndx<EOL>self.name_all = name_all<EOL>self.offset = offset<EOL>self._command_counter = <NUM_LIT:0><EOL>if selections is None:<EOL><INDENT>selections = []<EOL><DEDENT>if not utilities.iterable(selections):<EOL><INDENT>selections = [selections]<EOL><DEDENT>self.selections = selections<EOL>if names is None:<EOL><INDENT>names = [None] * len(selections)<EOL><DEDENT>self.make_ndx = tools.Make_ndx(f=self.structure, n=self.ndx,<EOL>stdout=False, stderr=False)<EOL>self.indexfiles = dict([self.parse_selection(selection, name)<EOL>for selection, name in zip(selections, names)])<EOL>", "docstring": "Build a index group from the selection arguments.\n\n        If selections and a structure file are supplied then the individual\n        selections are constructed with separate calls to\n        :func:`gromacs.make_ndx`. Use :meth:`IndexBuilder.combine` to combine\n        them into a joint selection or :meth:`IndexBuilder.write` to simply write\n        out the individual named selections (useful with *names*).\n\n        :Arguments:\n\n           *struct* : filename\n              Structure file (tpr, pdb, ...)\n\n           *selections* : list\n              The list must contain strings or tuples, which must be be one of\n              the following constructs:\n\n                 \"<1-letter aa code><resid>[:<atom name]\"\n\n                     Selects the CA of the residue or the specified atom\n                     name.\n\n                     example: ``\"S312:OA\"`` or ``\"A22\"`` (equivalent to ``\"A22:CA\"``)\n\n                 (\"<1-letter aa code><resid>\", \"<1-letter aa code><resid>, [\"<atom name>\"])\n\n                    Selects a *range* of residues. If only two residue\n                    identifiers are provided then all atoms are\n                    selected. With an optional third atom identifier,\n                    only this atom anme is selected for each residue\n                    in the range. [EXPERIMENTAL]\n\n                 \"@<make_ndx selection>\"\n\n                     The ``@`` letter introduces a verbatim ``make_ndx``\n                     command. It will apply the given selection without any\n                     further processing or checks.\n\n                     example: ``\"@a 6234 - 6238\"`` or ``'@\"SOL\"'`` (note the quoting)\n                     or ``\"@r SER & r 312 & t OA\"``.\n\n           *names* : list\n              Strings to name the selections; if not supplied or if individuals\n              are ``None`` then a default name is created. When simply using\n              :meth:`IndexBuilder.write` then these should be supplied.\n\n           *name_all* : string\n              Name of the group that is generated by :meth:`IndexBuilder.combine`.\n\n           *offset* : int, dict\n              This number is added to the resids in the first selection scheme; this\n              allows names to be the same as in a crystal structure. If offset is a\n              dict then it is used to directly look up the resids.\n\n           *ndx* : filename or list of filenames\n              Optional input index file(s).\n\n           *out_ndx* : filename\n              Output index file.", "id": "f6863:c1:m0"}
{"signature": "def _translate_residue(self, selection, default_atomname='<STR_LIT>'):", "body": "m = self.RESIDUE.match(selection)<EOL>if not m:<EOL><INDENT>errmsg = \"<STR_LIT>\".format(**vars())<EOL>logger.error(errmsg)<EOL>raise ValueError(errmsg)<EOL><DEDENT>gmx_resid = self.gmx_resid(int(m.group('<STR_LIT>')))    <EOL>residue = m.group('<STR_LIT>')<EOL>if len(residue) == <NUM_LIT:1>:<EOL><INDENT>gmx_resname = utilities.convert_aa_code(residue) <EOL><DEDENT>else:<EOL><INDENT>gmx_resname = residue                            <EOL><DEDENT>gmx_atomname = m.group('<STR_LIT>')<EOL>if gmx_atomname is None:<EOL><INDENT>gmx_atomname = default_atomname<EOL><DEDENT>return {'<STR_LIT>':gmx_resname, '<STR_LIT>':gmx_resid, '<STR_LIT>':gmx_atomname}<EOL>", "docstring": "Translate selection for a single res to make_ndx syntax.", "id": "f6863:c1:m10"}
{"signature": "def create_portable_topology(topol, struct, **kwargs):", "body": "_topoldir, _topol = os.path.split(topol)<EOL>processed = kwargs.pop('<STR_LIT>', os.path.join(_topoldir, '<STR_LIT>'+_topol))<EOL>grompp_kwargs, mdp_kwargs = filter_grompp_options(**kwargs)<EOL>mdp_kwargs = add_mdp_includes(topol, mdp_kwargs)<EOL>with tempfile.NamedTemporaryFile(suffix='<STR_LIT>') as mdp:<EOL><INDENT>mdp.write('<STR_LIT>'.format(**mdp_kwargs))<EOL>mdp.flush()<EOL>grompp_kwargs['<STR_LIT:p>'] = topol<EOL>grompp_kwargs['<STR_LIT>'] = processed<EOL>grompp_kwargs['<STR_LIT:f>'] =  mdp.name<EOL>grompp_kwargs['<STR_LIT:c>'] = struct<EOL>grompp_kwargs['<STR_LIT:v>'] = False<EOL>try:<EOL><INDENT>gromacs.grompp(**grompp_kwargs)<EOL><DEDENT>finally:<EOL><INDENT>utilities.unlink_gmx('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT><DEDENT>return utilities.realpath(processed)<EOL>", "docstring": "Create a processed topology.\n\n    The processed (or portable) topology file does not contain any\n    ``#include`` statements and hence can be easily copied around. It\n    also makes it possible to re-grompp without having any special itp\n    files available.\n\n    :Arguments:\n      *topol*\n          topology file\n      *struct*\n          coordinat (structure) file\n\n    :Keywords:\n      *processed*\n          name of the new topology file; if not set then it is named like\n          *topol* but with ``pp_`` prepended\n      *includes*\n          path or list of paths of directories in which itp files are\n          searched for\n      *grompp_kwargs**\n          other options for :program:`grompp` such as ``maxwarn=2`` can\n          also be supplied\n\n    :Returns: full path to the processed topology", "id": "f6863:m8"}
{"signature": "def parse_selection(self, selection, name=None):", "body": "if type(selection) is tuple:<EOL><INDENT>process = self._process_range<EOL><DEDENT>elif selection.startswith('<STR_LIT:@>'):<EOL><INDENT>process = self._process_command<EOL>selection = selection[<NUM_LIT:1>:]<EOL><DEDENT>else:<EOL><INDENT>process = self._process_residue<EOL><DEDENT>return process(selection, name)<EOL>", "docstring": "Retuns (groupname, filename) with index group.", "id": "f6863:c1:m6"}
{"signature": "def extract(self):", "body": "<EOL>self.extractor.run()<EOL>", "docstring": "Extract frames from the trajectory to the temporary directory.", "id": "f6863:c0:m1"}
{"signature": "@property<EOL><INDENT>def all_frames(self):<DEDENT>", "body": "return glob.glob(self.frameglob)<EOL>", "docstring": "Unordered list of all frames currently held on disk.", "id": "f6863:c0:m2"}
{"signature": "def rp(self, *args):", "body": "try:<EOL><INDENT>p = os.path.join(*args)<EOL>if os.path.isabs(p):<EOL><INDENT>return p<EOL><DEDENT><DEDENT>except TypeError:<EOL><INDENT>pass<EOL><DEDENT>return utilities.realpath(self.dirname, *args)<EOL>", "docstring": "Return canonical path to file under *dirname* with components *args*\n\n         If *args* form an absolute path then just return it as the absolute path.", "id": "f6863:c2:m3"}
{"signature": "def __init__(self, s=\"<STR_LIT>\", f=\"<STR_LIT>\", n=None, force=None,<EOL>dirname=os.path.curdir, outdir=None):", "body": "self.tpr = self.filename(s, ext=\"<STR_LIT>\", use_my_ext=True)<EOL>self.xtc = self.filename(f, ext=\"<STR_LIT>\", use_my_ext=True)<EOL>self.ndx = n<EOL>self.dirname = dirname<EOL>self.outdir = utilities.realpath(outdir) if outdir is not None else None<EOL>self.force = force<EOL>self.nowater = {}     <EOL>self.proteinonly = {} <EOL>with utilities.in_dir(self.dirname, create=False):<EOL><INDENT>for f in (self.tpr, self.xtc, self.ndx):<EOL><INDENT>if f is None:<EOL><INDENT>continue<EOL><DEDENT>if not os.path.exists(f):<EOL><INDENT>msg = \"<STR_LIT>\".format(**vars())<EOL>warnings.warn(msg, category=MissingDataWarning)<EOL>logger.warn(msg)<EOL><DEDENT><DEDENT><DEDENT>logger.info(\"<STR_LIT>\", self)<EOL>", "docstring": "Set up Transformer with structure and trajectory.\n\n        Supply *n* = tpr, *f* = xtc (and *n* = ndx) relative to dirname.\n\n        :Keywords:\n           *s*\n              tpr file (or similar); note that this should not contain\n              position restraints if it is to be used with a reduced\n              system (see :meth:`~Transformer.strip_water`)\n           *f*\n              trajectory (xtc, trr, ...)\n           *n*\n              index file (it is typically safe to leave this as ``None``; in\n              cases where a trajectory needs to be centered on non-standard\n              groups this should contain those groups)\n           *force*\n              Set the default behaviour for handling existing files:\n                - ``True``: overwrite existing trajectories\n                - ``False``: throw a IOError exception\n                - ``None``: skip existing and log a warning [default]\n           *dirname*\n              directory in which all operations are performed, relative paths\n              are interpreted relative to *dirname* [.]\n           *outdir*\n              directory under which output files are placed; by default\n              the same directory where the input files live", "id": "f6863:c2:m0"}
{"signature": "def transform_args(self,*args,**kwargs):", "body": "newargs = self._combineargs(*args, **kwargs)<EOL>return self._build_arg_list(**newargs)<EOL>", "docstring": "Combine arguments and turn them into gromacs tool arguments.", "id": "f6864:c1:m8"}
{"signature": "def _commandline(self, *args, **kwargs):", "body": "<EOL>return [self.command_name] + self.transform_args(*args, **kwargs)<EOL>", "docstring": "Returns the command line (without pipes) as a list.", "id": "f6864:c0:m4"}
{"signature": "def _run_command(self, *args, **kwargs):", "body": "<EOL>use_input = kwargs.pop('<STR_LIT>', True)<EOL>capturefile = None<EOL>if environment.flags['<STR_LIT>'] is True:<EOL><INDENT>kwargs.setdefault('<STR_LIT>', PIPE)<EOL>kwargs.setdefault('<STR_LIT>', PIPE)<EOL><DEDENT>elif environment.flags['<STR_LIT>'] == \"<STR_LIT:file>\":<EOL><INDENT>if '<STR_LIT>' in kwargs and '<STR_LIT>' in kwargs:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>fn = environment.flags['<STR_LIT>']<EOL>capturefile = file(fn, \"<STR_LIT:w>\")   <EOL>if '<STR_LIT>' in kwargs and '<STR_LIT>' not in kwargs:<EOL><INDENT>kwargs.setdefault('<STR_LIT>', capturefile)<EOL><DEDENT>else:<EOL><INDENT>kwargs.setdefault('<STR_LIT>', STDOUT)<EOL>kwargs.setdefault('<STR_LIT>', capturefile)<EOL><DEDENT><DEDENT><DEDENT>try:<EOL><INDENT>p = self.Popen(*args, **kwargs)<EOL>out, err = p.communicate(use_input=use_input) <EOL><DEDENT>except:<EOL><INDENT>if capturefile is not None:<EOL><INDENT>logger.error(\"<STR_LIT>\", capturefile)<EOL><DEDENT>raise<EOL><DEDENT>finally:<EOL><INDENT>if capturefile is not None:<EOL><INDENT>capturefile.close()<EOL><DEDENT><DEDENT>rc = p.returncode<EOL>return (rc, out, err), p<EOL>", "docstring": "Execute the command; see the docs for __call__.\n\n        :Returns: a tuple of the *results* tuple ``(rc, stdout, stderr)`` and\n                  the :class:`Popen` instance.", "id": "f6864:c0:m3"}
{"signature": "def communicate(self, use_input=True):", "body": "if use_input:<EOL><INDENT>return super(PopenWithInput, self).communicate(self.input)<EOL><DEDENT>else:<EOL><INDENT>return super(PopenWithInput, self).communicate()<EOL><DEDENT>", "docstring": "Run the command, using the input that was set up on __init__ (for *use_input* = ``True``)", "id": "f6864:c2:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "self.args = args<EOL>self.kwargs = kwargs<EOL>", "docstring": "Set up the command class.\n\n        The arguments can always be provided as standard positional\n        arguments such as\n\n          ``\"-c\", \"config.conf\", \"-o\", \"output.dat\", \"--repeats=3\", \"-v\", \"input.dat\"``\n\n        In addition one can also use keyword arguments such as\n\n          ``c=\"config.conf\", o=\"output.dat\", repeats=3, v=True``\n\n        These are automatically transformed appropriately according to\n        simple rules:\n\n        * Any single-character keywords are assumed to be POSIX-style\n          options and will be prefixed with a single dash and the value\n          separated by a space.\n\n        * Any other keyword is assumed to be a GNU-style long option\n          and thus will be prefixed with two dashes and the value will\n          be joined directly with an equals sign and no space.\n\n        If this does not work (as for instance for the options of the\n        UNIX ``find`` command) then provide options and values in the\n        sequence of positional arguments.\n\n\n        *Example*\n\n        Create a ``Ls`` class whose instances execute the ``ls`` command::\n\n          LS = type(\"LS\", (gromacs.core.Command,), {'command_name': 'ls'})\n          ls = LS()\n          ls()        # lists directory like ls\n          ls(l=True)  # lists directory like ls -l\n\n        Now create an instance that performs a long directory listing by\n        default::\n\n          lslong = LS(l=True)\n          lslong()    # like ls -l", "id": "f6864:c0:m0"}
{"signature": "def merge_ndx(*args):", "body": "ndxs = []<EOL>struct = None<EOL>for fname in args:<EOL><INDENT>if fname.endswith('<STR_LIT>'):<EOL><INDENT>ndxs.append(fname)<EOL><DEDENT>else:<EOL><INDENT>if struct is not None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>struct = fname<EOL><DEDENT><DEDENT>fd, multi_ndx = tempfile.mkstemp(suffix='<STR_LIT>', prefix='<STR_LIT>')<EOL>os.close(fd)<EOL>atexit.register(os.unlink, multi_ndx)<EOL>if struct:<EOL><INDENT>make_ndx = registry['<STR_LIT>'](f=struct, n=ndxs, o=multi_ndx)<EOL><DEDENT>else:<EOL><INDENT>make_ndx = registry['<STR_LIT>'](n=ndxs, o=multi_ndx)<EOL><DEDENT>_, _, _ = make_ndx(input=['<STR_LIT:q>'], stdout=False, stderr=False)<EOL>return multi_ndx<EOL>", "docstring": "Takes one or more index files and optionally one structure file and\n    returns a path for a new merged index file.\n\n    :param args: index files and zero or one structure file\n    :return: path for the new merged index file", "id": "f6865:m5"}
{"signature": "def make_valid_identifier(name):", "body": "return name.replace('<STR_LIT:->', '<STR_LIT:_>').capitalize()<EOL>", "docstring": "Turns tool names into valid identifiers.\n\n    :param name: tool name\n    :return: valid identifier", "id": "f6865:m1"}
{"signature": "def unlink_gmx_backups(*args):", "body": "for path in args:<EOL><INDENT>dirname, filename = os.path.split(path)<EOL>fbaks = glob.glob(os.path.join(dirname, '<STR_LIT:#>'+filename+'<STR_LIT>'))<EOL>for bak in fbaks:<EOL><INDENT>unlink_f(bak)<EOL><DEDENT><DEDENT>", "docstring": "Unlink (rm) all backup files corresponding to the listed files.", "id": "f6866:m18"}
{"signature": "def isstream(obj):", "body": "signature_methods = (\"<STR_LIT>\",)<EOL>alternative_methods = (<EOL>(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"),<EOL>(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"))<EOL>for m in signature_methods:<EOL><INDENT>if not hasmethod(obj, m):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>alternative_results = [<EOL>numpy.all([hasmethod(obj, m) for m in alternatives])<EOL>for alternatives in alternative_methods]<EOL>return numpy.any(alternative_results)<EOL>", "docstring": "Detect if `obj` is a stream.\n\n    We consider anything a stream that has the methods\n\n    - ``close()``\n\n    and either set of the following\n\n    - ``read()``, ``readline()``, ``readlines()``\n    - ``write()``, ``writeline()``, ``writelines()``\n\n    :Arguments:\n      *obj*\n          stream or str\n\n    :Returns:\n      *bool*, ``True`` if `obj` is a stream, ``False`` otherwise\n\n    .. SeeAlso::\n       :mod:`io`\n\n\n    .. versionadded:: 0.7.1", "id": "f6866:m5"}
{"signature": "def _init_filename(self, filename=None, ext=None):", "body": "extension = ext or self.default_extension<EOL>filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True)<EOL>self.real_filename = os.path.realpath(filename)<EOL>", "docstring": "Initialize the current filename :attr:`FileUtils.real_filename` of the object.\n\n        Bit of a hack.\n\n        - The first invocation must have ``filename != None``; this will set a\n          default filename with suffix :attr:`FileUtils.default_extension`\n          unless another one was supplied.\n\n        - Subsequent invocations either change the filename accordingly or\n          ensure that the default filename is set with the proper suffix.", "id": "f6866:c1:m0"}
{"signature": "def activate_subplot(numPlot):", "body": "<EOL>from pylab import gcf, axes<EOL>numPlot -= <NUM_LIT:1>  <EOL>return axes(gcf().get_axes()[numPlot])<EOL>", "docstring": "Make subplot *numPlot* active on the canvas.\n\n    Use this if a simple ``subplot(numRows, numCols, numPlot)``\n    overwrites the subplot instead of activating it.", "id": "f6866:m21"}
{"signature": "def realpath(*args):", "body": "if None in args:<EOL><INDENT>return None<EOL><DEDENT>return os.path.realpath(<EOL>os.path.expandvars(os.path.expanduser(os.path.join(*args))))<EOL>", "docstring": "Join all args and return the real path, rooted at /.\n\n    Expands ``~`` and environment variables such as :envvar:`$HOME`.\n\n    Returns ``None`` if any of the args is none.", "id": "f6866:m8"}
{"signature": "def unlink_gmx(*args):", "body": "for path in args:<EOL><INDENT>unlink_f(path)<EOL><DEDENT>unlink_gmx_backups(*args)<EOL>", "docstring": "Unlink (remove) Gromacs file(s) and all corresponding backups.", "id": "f6866:m17"}
{"signature": "def convert_aa_code(x):", "body": "if len(x) == <NUM_LIT:1>:<EOL><INDENT>return amino_acid_codes[x.upper()]<EOL><DEDENT>elif len(x) == <NUM_LIT:3>:<EOL><INDENT>return inverse_aa_codes[x.upper()]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % x)<EOL><DEDENT>", "docstring": "Converts between 3-letter and 1-letter amino acid codes.", "id": "f6866:m6"}
{"signature": "def mkdir_p(path):", "body": "try:<EOL><INDENT>os.makedirs(path)<EOL><DEDENT>except OSError as err:<EOL><INDENT>if err.errno != errno.EEXIST:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>", "docstring": "Create a directory *path* with subdirs but do not complain if it exists.\n\n    This is like GNU ``mkdir -p path``.", "id": "f6866:m19"}
{"signature": "def remove_legend(ax=None):", "body": "from pylab import gca, draw<EOL>if ax is None:<EOL><INDENT>ax = gca()<EOL><DEDENT>ax.legend_ = None<EOL>draw()<EOL>", "docstring": "Remove legend for axes or gca.\n\n    See http://osdir.com/ml/python.matplotlib.general/2005-07/msg00285.html", "id": "f6866:m22"}
{"signature": "def find_files(directory, pattern):", "body": "for root, dirs, files in os.walk(directory):<EOL><INDENT>for basename in files:<EOL><INDENT>if fnmatch.fnmatch(basename, pattern):<EOL><INDENT>filename = os.path.join(root, basename)<EOL>yield filename<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Find files recursively under *directory*, matching *pattern* (generator).\n\n    *pattern* is a UNIX-style glob pattern as used ny :func:`fnmatch.fnmatch`.\n\n    Recipe by Bruno Oliveira from\n    http://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python", "id": "f6866:m11"}
{"signature": "def check_file_exists(self, filename, resolve='<STR_LIT>', force=None):", "body": "def _warn(x):<EOL><INDENT>msg = \"<STR_LIT>\".format(x)<EOL>logger.warn(msg)<EOL>warnings.warn(msg)<EOL>return True<EOL><DEDENT>def _raise(x):<EOL><INDENT>msg = \"<STR_LIT>\".format(x)<EOL>logger.error(msg)<EOL>raise IOError(errno.EEXIST, x, msg)<EOL><DEDENT>solutions = {'<STR_LIT:ignore>': lambda x: False,      <EOL>'<STR_LIT>': lambda x: True,     <EOL>'<STR_LIT>': _warn,<EOL>'<STR_LIT>': _warn,<EOL>'<STR_LIT>': _raise,<EOL>'<STR_LIT>': _raise,<EOL>}<EOL>if force is True:<EOL><INDENT>resolve = '<STR_LIT:ignore>'<EOL><DEDENT>elif force is False:<EOL><INDENT>resolve = '<STR_LIT>'<EOL><DEDENT>if not os.path.isfile(filename):<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return solutions[resolve](filename)<EOL><DEDENT>", "docstring": "If a file exists then continue with the action specified in ``resolve``.\n\n        ``resolve`` must be one of\n\n        \"ignore\"\n              always return ``False``\n        \"indicate\"\n              return ``True`` if it exists\n        \"warn\"\n              indicate and issue a :exc:`UserWarning`\n        \"exception\"\n              raise :exc:`IOError` if it exists\n\n        Alternatively, set *force* for the following behaviour (which\n        ignores *resolve*):\n\n        ``True``\n              same as *resolve* = \"ignore\" (will allow overwriting of files)\n        ``False``\n              same as *resolve* = \"exception\" (will prevent overwriting of files)\n        ``None``\n              ignored, do whatever *resolve* says", "id": "f6866:c1:m2"}
{"signature": "@contextmanager<EOL>def openany(datasource, mode='<STR_LIT>', reset=True):", "body": "stream = anyopen(datasource, mode=mode, reset=reset)<EOL>try:<EOL><INDENT>yield stream<EOL><DEDENT>finally:<EOL><INDENT>stream.close()<EOL><DEDENT>", "docstring": "Context manager for :func:`anyopen`.\n\n    Open the `datasource` and close it when the context of the :keyword:`with`\n    statement exits.\n\n    `datasource` can be a filename or a stream (see :func:`isstream`). A stream\n    is reset to its start if possible (via :meth:`~io.IOBase.seek` or\n    :meth:`~cString.StringIO.reset`).\n\n    The advantage of this function is that very different input sources\n    (\"streams\") can be used for a \"file\", ranging from files on disk (including\n    compressed files) to open file objects to sockets and strings---as long as\n    they have a file-like interface.\n\n    :Arguments:\n      *datasource*\n           a file or a stream\n      *mode*\n           {'r', 'w'} (optional), open in r(ead) or w(rite) mode\n      *reset*\n           bool (optional) try to read (`mode` 'r') the stream from the\n           start [``True``]\n\n\n    **Example**\n\n    Open a gzipped file and process it line by line::\n\n        with openany(\"input.pdb.gz\") as pdb:\n            for line in pdb:\n                if line.startswith('ATOM'):\n                    print(line)\n\n    Open a URL and read it::\n\n       import urllib2\n       with openany(urllib2.urlopen(\"https://www.mdanalysis.org/\")) as html:\n           print(html.read())\n\n\n    .. SeeAlso::\n       :func:`anyopen`", "id": "f6866:m1"}
{"signature": "def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False):", "body": "if filename is None:<EOL><INDENT>if not hasattr(self,'<STR_LIT>'):<EOL><INDENT>self._filename = None        <EOL><DEDENT>if self._filename:<EOL><INDENT>filename = self._filename<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>my_ext = None<EOL><DEDENT>else:<EOL><INDENT>filename, my_ext = os.path.splitext(filename)<EOL>if set_default:                  <EOL><INDENT>self._filename = filename<EOL><DEDENT><DEDENT>if my_ext and use_my_ext:<EOL><INDENT>ext = my_ext<EOL><DEDENT>if ext is not None:<EOL><INDENT>if ext.startswith(os.extsep):<EOL><INDENT>ext = ext[<NUM_LIT:1>:]  <EOL><DEDENT>if ext != \"<STR_LIT>\":<EOL><INDENT>filename = filename + os.extsep + ext<EOL><DEDENT><DEDENT>return filename<EOL>", "docstring": "Supply a file name for the class object.\n\n        Typical uses::\n\n           fn = filename()             ---> <default_filename>\n           fn = filename('name.ext')   ---> 'name'\n           fn = filename(ext='pickle') ---> <default_filename>'.pickle'\n           fn = filename('name.inp','pdf') --> 'name.pdf'\n           fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'\n\n        The returned filename is stripped of the extension\n        (``use_my_ext=False``) and if provided, another extension is\n        appended. Chooses a default if no filename is given.\n\n        Raises a ``ValueError`` exception if no default file name is known.\n\n        If ``set_default=True`` then the default filename is also set.\n\n        ``use_my_ext=True`` lets the suffix of a provided filename take\n        priority over a default ``ext`` tension.\n\n        .. versionchanged:: 0.3.1\n           An empty string as *ext* = \"\" will suppress appending an extension.", "id": "f6866:c1:m1"}
{"signature": "def check_mdpargs(d):", "body": "if len(d) > <NUM_LIT:0>:<EOL><INDENT>wmsg = \"<STR_LIT>\"+str(d)<EOL>logger.warn(wmsg)<EOL>warnings.warn(wmsg, category=UsageWarning)<EOL><DEDENT>return len(d) == <NUM_LIT:0><EOL>", "docstring": "Check if any arguments remain in dict *d*.", "id": "f6867:m6"}
{"signature": "def get_lipid_vdwradii(outdir=os.path.curdir, libdir=None):", "body": "vdwradii_dat = os.path.join(outdir, \"<STR_LIT>\")<EOL>if libdir is not None:<EOL><INDENT>filename = os.path.join(libdir, '<STR_LIT>')  <EOL>if not os.path.exists(filename):<EOL><INDENT>msg = '<STR_LIT>'.format(**vars())<EOL>logger.exception(msg)<EOL>raise OSError(msg, errno.ENOENT)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>filename = os.path.join(os.environ['<STR_LIT>'], '<STR_LIT>')<EOL><DEDENT>except KeyError:<EOL><INDENT>try:<EOL><INDENT>filename = os.path.join(os.environ['<STR_LIT>'], '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>except KeyError:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>logger.exception(msg)<EOL>raise OSError(msg, errno.ENOENT)<EOL><DEDENT><DEDENT><DEDENT>if not os.path.exists(filename):<EOL><INDENT>msg = \"<STR_LIT>\".format(**vars())<EOL>logger.exception(msg, errno.ENOENT)<EOL>raise OSError(msg)<EOL><DEDENT>patterns = vdw_lipid_resnames + list({x[:<NUM_LIT:3>] for x in vdw_lipid_resnames})<EOL>with open(vdwradii_dat, '<STR_LIT:w>') as outfile:<EOL><INDENT>outfile.write('<STR_LIT>')<EOL>for resname in patterns:<EOL><INDENT>for atom,radius in vdw_lipid_atom_radii.items():<EOL><INDENT>outfile.write('<STR_LIT>'.format(**vars()))<EOL><DEDENT><DEDENT>with open(filename, '<STR_LIT:r>') as infile:<EOL><INDENT>for line in infile:<EOL><INDENT>outfile.write(line)<EOL><DEDENT><DEDENT><DEDENT>logger.debug('<STR_LIT>'.format(**vars()))<EOL>return realpath(vdwradii_dat)<EOL>", "docstring": "Find vdwradii.dat and add special entries for lipids.\n\n    See :data:`gromacs.setup.vdw_lipid_resnames` for lipid\n    resnames. Add more if necessary.", "id": "f6867:m2"}
{"signature": "def delay(self):", "body": "self.tx.send = False<EOL>self.__save()<EOL>", "docstring": "save a transaction, but don't let it be picked up by the broadcaster", "id": "f6883:c0:m5"}
{"signature": "def queue(self):", "body": "self.tx.send = True<EOL>self.__save()<EOL>", "docstring": "queue a transaction to be picked up by the background transaction broadcaster", "id": "f6883:c0:m4"}
{"signature": "def db_for_write(self, model, **hints):", "body": "if model._meta.app_label == '<STR_LIT>':<EOL><INDENT>return ark_db['<STR_LIT>']<EOL><DEDENT>return None<EOL>", "docstring": "Attempts to write ark_app models go to ARK_DB_NAME.", "id": "f6886:c0:m1"}
{"signature": "def allow_relation(self, obj1, obj2, **hints):", "body": "if obj1._meta.app_label == '<STR_LIT>' orobj2._meta.app_label == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>return None<EOL>", "docstring": "Allow relations if a model in the ark is involved.", "id": "f6886:c0:m2"}
{"signature": "def configure_ci_job(<EOL>config_url, rosdistro_name, ci_build_name,<EOL>os_name, os_code_name, arch,<EOL>config=None, build_file=None,<EOL>index=None, dist_file=None,<EOL>jenkins=None, views=None,<EOL>is_disabled=False,<EOL>groovy_script=None,<EOL>build_targets=None,<EOL>dry_run=False,<EOL>underlay_source_paths=None,<EOL>trigger_timer=None):", "body": "if config is None:<EOL><INDENT>config = get_config_index(config_url)<EOL><DEDENT>if build_file is None:<EOL><INDENT>build_files = get_ci_build_files(config, rosdistro_name)<EOL>build_file = build_files[ci_build_name]<EOL><DEDENT>if build_targets is not None:<EOL><INDENT>build_file.targets = build_targets<EOL><DEDENT>if index is None:<EOL><INDENT>index = get_index(config.rosdistro_index_url)<EOL><DEDENT>if dist_file is None:<EOL><INDENT>dist_file = get_distribution_file(index, rosdistro_name, build_file)<EOL>if not dist_file:<EOL><INDENT>raise JobValidationError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if os_name not in build_file.targets.keys():<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % os_name +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(sorted(build_file.targets.keys())))<EOL><DEDENT>if os_code_name not in build_file.targets[os_name].keys():<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % os_code_name +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(sorted(build_file.targets[os_name].keys())))<EOL><DEDENT>if arch not in build_file.targets[os_name][os_code_name]:<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % arch +<EOL>'<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join(sorted(<EOL>build_file.targets[os_name][os_code_name])))<EOL><DEDENT>if len(build_file.underlay_from_ci_jobs) > <NUM_LIT:1>:<EOL><INDENT>raise JobValidationError(<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>' % len(build_file.underlay_from_ci_jobs))<EOL><DEDENT>underlay_source_job = None<EOL>if build_file.underlay_from_ci_jobs:<EOL><INDENT>underlay_source_job = get_ci_job_name(<EOL>rosdistro_name, os_name, os_code_name, arch,<EOL>build_file.underlay_from_ci_jobs[<NUM_LIT:0>])<EOL>underlay_source_paths = (underlay_source_paths or []) +['<STR_LIT>']<EOL><DEDENT>if jenkins is None:<EOL><INDENT>from ros_buildfarm.jenkins import connect<EOL>jenkins = connect(config.jenkins_url)<EOL><DEDENT>if views is None:<EOL><INDENT>view_name = get_ci_view_name(rosdistro_name)<EOL>configure_ci_view(jenkins, view_name, dry_run=dry_run)<EOL><DEDENT>job_name = get_ci_job_name(<EOL>rosdistro_name, os_name, os_code_name, arch, ci_build_name)<EOL>job_config = _get_ci_job_config(<EOL>index, rosdistro_name, build_file, os_name,<EOL>os_code_name, arch,<EOL>build_file.repos_files,<EOL>underlay_source_job,<EOL>underlay_source_paths,<EOL>trigger_timer,<EOL>is_disabled=is_disabled)<EOL>if isinstance(jenkins, object) and jenkins is not False:<EOL><INDENT>from ros_buildfarm.jenkins import configure_job<EOL>configure_job(jenkins, job_name, job_config, dry_run=dry_run)<EOL><DEDENT>return job_name, job_config<EOL>", "docstring": "Configure a single Jenkins CI job.\n\nThis includes the following steps:\n- clone the ros_buildfarm repository\n- write the distribution repository keys into files\n- invoke the ci/run_ci_job.py script", "id": "f6902:m1"}
{"signature": "def configure_ci_jobs(<EOL>config_url, rosdistro_name, ci_build_name,<EOL>groovy_script=None, dry_run=False):", "body": "config = get_config_index(config_url)<EOL>build_files = get_ci_build_files(config, rosdistro_name)<EOL>build_file = build_files[ci_build_name]<EOL>index = get_index(config.rosdistro_index_url)<EOL>targets = []<EOL>for os_name in build_file.targets.keys():<EOL><INDENT>for os_code_name in build_file.targets[os_name].keys():<EOL><INDENT>for arch in build_file.targets[os_name][os_code_name]:<EOL><INDENT>targets.append((os_name, os_code_name, arch))<EOL><DEDENT><DEDENT><DEDENT>print('<STR_LIT>')<EOL>for os_name, os_code_name, arch in targets:<EOL><INDENT>print('<STR_LIT>', os_name, os_code_name, arch)<EOL><DEDENT>dist_file = get_distribution_file(index, rosdistro_name, build_file)<EOL>if not dist_file:<EOL><INDENT>print('<STR_LIT>')<EOL>return<EOL><DEDENT>ci_view_name = get_ci_view_name(rosdistro_name)<EOL>from ros_buildfarm.jenkins import connect<EOL>jenkins = connect(config.jenkins_url) if groovy_script is None else False<EOL>view_configs = {}<EOL>views = {<EOL>ci_view_name: configure_ci_view(<EOL>jenkins, ci_view_name, dry_run=dry_run)<EOL>}<EOL>if not jenkins:<EOL><INDENT>view_configs.update(views)<EOL><DEDENT>groovy_data = {<EOL>'<STR_LIT>': dry_run,<EOL>'<STR_LIT>': len(view_configs),<EOL>}<EOL>ci_job_names = []<EOL>job_configs = OrderedDict()<EOL>is_disabled = False<EOL>for os_name, os_code_name, arch in targets:<EOL><INDENT>try:<EOL><INDENT>job_name, job_config = configure_ci_job(<EOL>config_url, rosdistro_name, ci_build_name,<EOL>os_name, os_code_name, arch,<EOL>config=config, build_file=build_file,<EOL>index=index, dist_file=dist_file,<EOL>jenkins=jenkins, views=views,<EOL>is_disabled=is_disabled,<EOL>groovy_script=groovy_script,<EOL>dry_run=dry_run,<EOL>trigger_timer=build_file.jenkins_job_schedule)<EOL>ci_job_names.append(job_name)<EOL>if groovy_script is not None:<EOL><INDENT>print(\"<STR_LIT>\" % job_name)<EOL>job_configs[job_name] = job_config<EOL><DEDENT><DEDENT>except JobValidationError as e:<EOL><INDENT>print(e.message, file=sys.stderr)<EOL><DEDENT><DEDENT>groovy_data['<STR_LIT>'] = len(job_configs)<EOL>groovy_data['<STR_LIT>'] = {}<EOL>if groovy_script is not None:<EOL><INDENT>print(<EOL>\"<STR_LIT>\" %<EOL>(groovy_script, len(job_configs)))<EOL>content = expand_template(<EOL>'<STR_LIT>', groovy_data)<EOL>write_groovy_script_and_configs(<EOL>groovy_script, content, job_configs, view_configs)<EOL><DEDENT>", "docstring": "Configure all Jenkins CI jobs.", "id": "f6902:m0"}
{"signature": "def configure_doc_job(<EOL>config_url, rosdistro_name, doc_build_name,<EOL>repo_name, os_name, os_code_name, arch,<EOL>config=None, build_file=None,<EOL>index=None, dist_file=None, dist_cache=None,<EOL>jenkins=None, views=None,<EOL>is_disabled=False,<EOL>groovy_script=None,<EOL>doc_repository=None,<EOL>dry_run=False):", "body": "if config is None:<EOL><INDENT>config = get_config_index(config_url)<EOL><DEDENT>if build_file is None:<EOL><INDENT>build_files = get_doc_build_files(config, rosdistro_name)<EOL>build_file = build_files[doc_build_name]<EOL><DEDENT>if index is None:<EOL><INDENT>index = get_index(config.rosdistro_index_url)<EOL><DEDENT>if dist_file is None:<EOL><INDENT>dist_file = get_distribution_file(index, rosdistro_name, build_file)<EOL>if not dist_file:<EOL><INDENT>raise JobValidationError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>repo_names = dist_file.repositories.keys()<EOL>if repo_name is not None:<EOL><INDENT>if repo_name not in repo_names:<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % repo_name +<EOL>'<STR_LIT>' %<EOL>'<STR_LIT:U+002CU+0020>'.join(sorted(repo_names)))<EOL><DEDENT>repo = dist_file.repositories[repo_name]<EOL>if not repo.doc_repository:<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % repo_name)<EOL><DEDENT>if not repo.doc_repository.version:<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % repo_name)<EOL><DEDENT>doc_repository = repo.doc_repository<EOL><DEDENT>if os_name not in build_file.targets.keys():<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % os_name +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(sorted(build_file.targets.keys())))<EOL><DEDENT>if os_code_name not in build_file.targets[os_name].keys():<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % os_code_name +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(sorted(build_file.targets[os_name].keys())))<EOL><DEDENT>if arch not in build_file.targets[os_name][os_code_name]:<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % arch +<EOL>'<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join(sorted(<EOL>build_file.targets[os_name][os_code_name])))<EOL><DEDENT>if dist_cache is None and build_file.notify_maintainers:<EOL><INDENT>dist_cache = get_distribution_cache(index, rosdistro_name)<EOL><DEDENT>if jenkins is None:<EOL><INDENT>from ros_buildfarm.jenkins import connect<EOL>jenkins = connect(config.jenkins_url)<EOL><DEDENT>if views is None:<EOL><INDENT>view_name = get_doc_view_name(<EOL>rosdistro_name, doc_build_name)<EOL>configure_doc_view(jenkins, view_name, dry_run=dry_run)<EOL><DEDENT>job_name = get_doc_job_name(<EOL>rosdistro_name, doc_build_name,<EOL>repo_name, os_name, os_code_name, arch)<EOL>job_config = _get_doc_job_config(<EOL>config, config_url, rosdistro_name, doc_build_name,<EOL>build_file, os_name, os_code_name, arch, doc_repository,<EOL>repo_name, dist_cache=dist_cache, is_disabled=is_disabled)<EOL>if isinstance(jenkins, object) and jenkins is not False:<EOL><INDENT>from ros_buildfarm.jenkins import configure_job<EOL>configure_job(jenkins, job_name, job_config, dry_run=dry_run)<EOL><DEDENT>return job_name, job_config<EOL>", "docstring": "Configure a single Jenkins doc job.\n\nThis includes the following steps:\n- clone the doc repository to use\n- clone the ros_buildfarm repository\n- write the distribution repository keys into files\n- invoke the run_doc_job.py script", "id": "f6905:m1"}
{"signature": "def configure_doc_jobs(<EOL>config_url, rosdistro_name, doc_build_name, groovy_script=None,<EOL>dry_run=False, whitelist_repository_names=None):", "body": "config = get_config_index(config_url)<EOL>build_files = get_doc_build_files(config, rosdistro_name)<EOL>build_file = build_files[doc_build_name]<EOL>index = get_index(config.rosdistro_index_url)<EOL>dist_cache = None<EOL>if build_file.notify_maintainers:<EOL><INDENT>dist_cache = get_distribution_cache(index, rosdistro_name)<EOL><DEDENT>targets = []<EOL>for os_name in build_file.targets.keys():<EOL><INDENT>for os_code_name in build_file.targets[os_name].keys():<EOL><INDENT>for arch in build_file.targets[os_name][os_code_name]:<EOL><INDENT>targets.append((os_name, os_code_name, arch))<EOL><DEDENT><DEDENT><DEDENT>print('<STR_LIT>')<EOL>for os_name, os_code_name, arch in targets:<EOL><INDENT>print('<STR_LIT>', os_name, os_code_name, arch)<EOL><DEDENT>dist_file = get_distribution_file(index, rosdistro_name, build_file)<EOL>if not dist_file:<EOL><INDENT>print('<STR_LIT>')<EOL>return<EOL><DEDENT>doc_view_name = get_doc_view_name(rosdistro_name, doc_build_name)<EOL>from ros_buildfarm.jenkins import connect<EOL>jenkins = connect(config.jenkins_url) if groovy_script is None else False<EOL>view_configs = {}<EOL>views = {}<EOL>views[doc_view_name] = configure_doc_view(<EOL>jenkins, doc_view_name, dry_run=dry_run)<EOL>if not jenkins:<EOL><INDENT>view_configs.update(views)<EOL><DEDENT>groovy_data = {<EOL>'<STR_LIT>': dry_run,<EOL>'<STR_LIT>': len(view_configs),<EOL>}<EOL>repo_names = dist_file.repositories.keys()<EOL>filtered_repo_names = build_file.filter_repositories(repo_names)<EOL>job_names = []<EOL>job_configs = OrderedDict()<EOL>for repo_name in sorted(repo_names):<EOL><INDENT>if whitelist_repository_names:<EOL><INDENT>if repo_name not in whitelist_repository_names:<EOL><INDENT>print(<EOL>\"<STR_LIT>\" %<EOL>repo_name, file=sys.stderr)<EOL>continue<EOL><DEDENT><DEDENT>is_disabled = repo_name not in filtered_repo_names<EOL>if is_disabled and build_file.skip_ignored_repositories:<EOL><INDENT>print(\"<STR_LIT>\" % repo_name,<EOL>file=sys.stderr)<EOL>continue<EOL><DEDENT>repo = dist_file.repositories[repo_name]<EOL>if not repo.doc_repository:<EOL><INDENT>print(\"<STR_LIT>\" % repo_name)<EOL>continue<EOL><DEDENT>if not repo.doc_repository.version:<EOL><INDENT>print(\"<STR_LIT>\" % repo_name)<EOL>continue<EOL><DEDENT>for os_name, os_code_name, arch in targets:<EOL><INDENT>try:<EOL><INDENT>job_name, job_config = configure_doc_job(<EOL>config_url, rosdistro_name, doc_build_name,<EOL>repo_name, os_name, os_code_name, arch,<EOL>config=config, build_file=build_file,<EOL>index=index, dist_file=dist_file,<EOL>dist_cache=dist_cache, jenkins=jenkins, views=views,<EOL>is_disabled=is_disabled,<EOL>groovy_script=groovy_script,<EOL>dry_run=dry_run)<EOL>job_names.append(job_name)<EOL>if groovy_script is not None:<EOL><INDENT>print(\"<STR_LIT>\" % job_name)<EOL>job_configs[job_name] = job_config<EOL><DEDENT><DEDENT>except JobValidationError as e:<EOL><INDENT>print(e.message, file=sys.stderr)<EOL><DEDENT><DEDENT><DEDENT>groovy_data['<STR_LIT>'] = len(job_configs)<EOL>groovy_data['<STR_LIT>'] = {}<EOL>job_prefix = '<STR_LIT>' % doc_view_name<EOL>if not whitelist_repository_names:<EOL><INDENT>groovy_data['<STR_LIT>']['<STR_LIT>'] = (job_prefix, job_names)<EOL>if groovy_script is None:<EOL><INDENT>from ros_buildfarm.jenkins import remove_jobs<EOL>print('<STR_LIT>')<EOL>remove_jobs(jenkins, job_prefix, job_names, dry_run=dry_run)<EOL><DEDENT><DEDENT>if groovy_script is not None:<EOL><INDENT>print(<EOL>\"<STR_LIT>\" %<EOL>(groovy_script, len(view_configs), len(job_configs)))<EOL>content = expand_template(<EOL>'<STR_LIT>', groovy_data)<EOL>write_groovy_script_and_configs(<EOL>groovy_script, content, job_configs, view_configs=view_configs)<EOL><DEDENT>", "docstring": "Configure all Jenkins doc jobs.\n\nL{configure_doc_job} will be invoked for doc repository and target\nwhich matches the build file criteria.", "id": "f6905:m0"}
{"signature": "def get_version_status(<EOL>package_descriptors, targets, repos_data,<EOL>strip_version=False, strip_os_code_name=False):", "body": "status = {}<EOL>for package_descriptor in package_descriptors.values():<EOL><INDENT>pkg_name = package_descriptor.pkg_name<EOL>debian_pkg_name = package_descriptor.debian_pkg_name<EOL>ref_version = package_descriptor.version<EOL>if strip_version:<EOL><INDENT>ref_version = _strip_version_suffix(ref_version)<EOL><DEDENT>status[pkg_name] = {}<EOL>for target in targets:<EOL><INDENT>statuses = []<EOL>for repo_data in repos_data:<EOL><INDENT>version = repo_data.get(target, {}).get(debian_pkg_name, None)<EOL>if strip_version:<EOL><INDENT>version = _strip_version_suffix(version)<EOL><DEDENT>if strip_os_code_name:<EOL><INDENT>version = _strip_os_code_name_suffix(<EOL>version, target.os_code_name)<EOL><DEDENT>if ref_version:<EOL><INDENT>if not version:<EOL><INDENT>statuses.append('<STR_LIT>')<EOL><DEDENT>elif version.startswith(ref_version):  <EOL><INDENT>statuses.append('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>if _version_is_gt_other(version, ref_version):<EOL><INDENT>statuses.append('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>statuses.append('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if not version:<EOL><INDENT>statuses.append('<STR_LIT:ignore>')<EOL><DEDENT>else:<EOL><INDENT>statuses.append('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>status[pkg_name][target] = statuses<EOL><DEDENT><DEDENT>return status<EOL>", "docstring": "For each package and target check if it is affected by a sync.\n\nThis is the case when the package version in the testing repo is different\nfrom the version in the main repo.\n\n:return: a dict indexed by package names containing\n  dicts indexed by targets containing\n  a list of status strings (one for each repo)", "id": "f6929:m7"}
{"signature": "def get_package_counts(package_descriptors, targets, repos_data):", "body": "counts = {}<EOL>for target in targets:<EOL><INDENT>counts[target] = [<NUM_LIT:0>] * len(repos_data)<EOL><DEDENT>for package_descriptor in package_descriptors.values():<EOL><INDENT>debian_pkg_name = package_descriptor.debian_pkg_name<EOL>for target in targets:<EOL><INDENT>for i, repo_data in enumerate(repos_data):<EOL><INDENT>version = repo_data.get(target, {}).get(debian_pkg_name, None)<EOL>if version:<EOL><INDENT>counts[target][i] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>return counts<EOL>", "docstring": "Get the number of packages per target and repository.\n\n:return: a dict indexed by targets containing\n  a list of integer values (one for each repo)", "id": "f6929:m11"}
{"signature": "def configure_devel_job(<EOL>config_url, rosdistro_name, source_build_name,<EOL>repo_name, os_name, os_code_name, arch,<EOL>pull_request=False,<EOL>config=None, build_file=None,<EOL>index=None, dist_file=None, dist_cache=None,<EOL>jenkins=None, views=None,<EOL>is_disabled=False,<EOL>groovy_script=None,<EOL>source_repository=None,<EOL>build_targets=None,<EOL>dry_run=False):", "body": "if config is None:<EOL><INDENT>config = get_config_index(config_url)<EOL><DEDENT>if build_file is None:<EOL><INDENT>build_files = get_source_build_files(config, rosdistro_name)<EOL>build_file = build_files[source_build_name]<EOL><DEDENT>if build_targets is not None:<EOL><INDENT>build_file.targets = build_targets<EOL><DEDENT>if index is None:<EOL><INDENT>index = get_index(config.rosdistro_index_url)<EOL><DEDENT>if dist_file is None:<EOL><INDENT>dist_file = get_distribution_file(index, rosdistro_name, build_file)<EOL>if not dist_file:<EOL><INDENT>raise JobValidationError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>repo_names = dist_file.repositories.keys()<EOL>if repo_name is not None:<EOL><INDENT>if repo_name not in repo_names:<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % repo_name +<EOL>'<STR_LIT>' %<EOL>'<STR_LIT:U+002CU+0020>'.join(sorted(repo_names)))<EOL><DEDENT>repo = dist_file.repositories[repo_name]<EOL>if not repo.source_repository:<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % repo_name)<EOL><DEDENT>if not repo.source_repository.version:<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % repo_name)<EOL><DEDENT>source_repository = repo.source_repository<EOL><DEDENT>if os_name not in build_file.targets.keys():<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % os_name +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(sorted(build_file.targets.keys())))<EOL><DEDENT>if os_code_name not in build_file.targets[os_name].keys():<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % os_code_name +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT:U+002CU+0020>'.join(sorted(build_file.targets[os_name].keys())))<EOL><DEDENT>if arch not in build_file.targets[os_name][os_code_name]:<EOL><INDENT>raise JobValidationError(<EOL>\"<STR_LIT>\" % arch +<EOL>'<STR_LIT>' % '<STR_LIT:U+002CU+0020>'.join(sorted(<EOL>build_file.targets[os_name][os_code_name])))<EOL><DEDENT>if dist_cache is None and build_file.notify_maintainers:<EOL><INDENT>dist_cache = get_distribution_cache(index, rosdistro_name)<EOL><DEDENT>if jenkins is None:<EOL><INDENT>from ros_buildfarm.jenkins import connect<EOL>jenkins = connect(config.jenkins_url)<EOL><DEDENT>if views is None:<EOL><INDENT>view_name = get_devel_view_name(<EOL>rosdistro_name, source_build_name, pull_request=pull_request)<EOL>configure_devel_view(jenkins, view_name, dry_run=dry_run)<EOL><DEDENT>job_name = get_devel_job_name(<EOL>rosdistro_name, source_build_name,<EOL>repo_name, os_name, os_code_name, arch, pull_request)<EOL>job_config = _get_devel_job_config(<EOL>index, config, rosdistro_name, source_build_name,<EOL>build_file, os_name, os_code_name, arch, source_repository,<EOL>repo_name, pull_request, job_name, dist_cache=dist_cache,<EOL>is_disabled=is_disabled)<EOL>if isinstance(jenkins, object) and jenkins is not False:<EOL><INDENT>from ros_buildfarm.jenkins import configure_job<EOL>configure_job(jenkins, job_name, job_config, dry_run=dry_run)<EOL><DEDENT>return job_name, job_config<EOL>", "docstring": "Configure a single Jenkins devel job.\n\nThis includes the following steps:\n- clone the source repository to use\n- clone the ros_buildfarm repository\n- write the distribution repository keys into files\n- invoke the release/run_devel_job.py script", "id": "f6933:m1"}
{"signature": "def attach_bundle(self, bundle):", "body": "if not isinstance(bundle, BlueprintBundle):<EOL><INDENT>raise IncompatibleBundle('<STR_LIT>'<EOL>.format(BlueprintBundle))<EOL><DEDENT>elif len(bundle.blueprints) == <NUM_LIT:0>:<EOL><INDENT>raise MissingBlueprints(\"<STR_LIT>\")<EOL><DEDENT>elif self._bundle_exists(bundle.path):<EOL><INDENT>raise ConflictingPath(\"<STR_LIT>\".format(bundle.path))<EOL><DEDENT>elif self._journey_path == bundle.path == '<STR_LIT:/>':<EOL><INDENT>raise ConflictingPath(\"<STR_LIT>\".format(bundle.path))<EOL><DEDENT>self._attached_bundles.append(bundle)<EOL>", "docstring": "Attaches a bundle object\n\n        :param bundle: :class:`flask_journey.BlueprintBundle` object\n        :raises:\n            - IncompatibleBundle if the bundle is not of type `BlueprintBundle`\n            - ConflictingPath if a bundle already exists at bundle.path\n            - MissingBlueprints if the bundle doesn't contain any blueprints", "id": "f7016:c0:m5"}
{"signature": "def init_app(self, app):", "body": "if len(self._attached_bundles) == <NUM_LIT:0>:<EOL><INDENT>raise NoBundlesAttached(\"<STR_LIT>\")<EOL><DEDENT>for bundle in self._attached_bundles:<EOL><INDENT>processed_bundle = {<EOL>'<STR_LIT:path>': bundle.path,<EOL>'<STR_LIT:description>': bundle.description,<EOL>'<STR_LIT>': []<EOL>}<EOL>for (bp, description) in bundle.blueprints:<EOL><INDENT>blueprint = self._register_blueprint(app, bp, bundle.path,<EOL>self.get_bp_path(bp), description)<EOL>processed_bundle['<STR_LIT>'].append(blueprint)<EOL><DEDENT>self._registered_bundles.append(processed_bundle)<EOL><DEDENT>", "docstring": "Initializes Journey extension\n\n        :param app: App passed from constructor or directly to init_app\n        :raises:\n            - NoBundlesAttached if no bundles has been attached attached", "id": "f7016:c0:m1"}
{"signature": "@property<EOL><INDENT>def routes_simple(self):<DEDENT>", "body": "routes = []<EOL>for bundle in self._registered_bundles:<EOL><INDENT>bundle_path = bundle['<STR_LIT:path>']<EOL>for blueprint in bundle['<STR_LIT>']:<EOL><INDENT>bp_path = blueprint['<STR_LIT:path>']<EOL>for child in blueprint['<STR_LIT>']:<EOL><INDENT>routes.append(<EOL>(<EOL>child['<STR_LIT>'],<EOL>bundle_path + bp_path + child['<STR_LIT:path>'],<EOL>child['<STR_LIT>']<EOL>)<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return routes<EOL>", "docstring": "Returns simple info about registered blueprints\n\n        :return: Tuple containing endpoint, path and allowed methods for each route", "id": "f7016:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def get_blueprint_routes(app, base_path):<DEDENT>", "body": "routes = []<EOL>for child in app.url_map.iter_rules():<EOL><INDENT>if child.rule.startswith(base_path):<EOL><INDENT>relative_path = child.rule[len(base_path):]<EOL>routes.append({<EOL>'<STR_LIT:path>': relative_path,<EOL>'<STR_LIT>': child.endpoint,<EOL>'<STR_LIT>': list(child.methods)<EOL>})<EOL><DEDENT><DEDENT>return routes<EOL>", "docstring": "Returns detailed information about registered blueprint routes matching the `BlueprintBundle` path\n\n        :param app: App instance to obtain rules from\n        :param base_path: Base path to return detailed route info for\n        :return: List of route detail dicts", "id": "f7016:c0:m8"}
{"signature": "def _validate_schema(obj):", "body": "if obj is not None and not isinstance(obj, Schema):<EOL><INDENT>raise IncompatibleSchema('<STR_LIT>'.format(Schema))<EOL><DEDENT>return obj<EOL>", "docstring": "Ensures the passed schema instance is compatible\n\n    :param obj: object to validate\n    :return: obj\n    :raises:\n        - IncompatibleSchema if the passed schema is of an incompatible type", "id": "f7019:m1"}
{"signature": "def _compress(self, input_str):", "body": "compressed_bits = cStringIO.StringIO()<EOL>f = gzip.GzipFile(fileobj=compressed_bits, mode='<STR_LIT:wb>')<EOL>f.write(input_str)<EOL>f.close()<EOL>return compressed_bits.getvalue()<EOL>", "docstring": "Compress the log message in order to send less bytes to the wire.", "id": "f7041:c0:m4"}
{"signature": "def _parse_config_file_impl(filename):", "body": "try:<EOL><INDENT>doc = yaml.load(file(filename).read())<EOL>project_id = doc[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>access_token = doc[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>api_domain = doc[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>return project_id, access_token, api_domain<EOL><DEDENT>except:<EOL><INDENT>return None, None, None<EOL><DEDENT>", "docstring": "Format for the file is:\n\n     credentials:\n         project_id: ...\n         access_token: ...\n         api_domain: ...\n\n:param filename: The filename to parse\n:return: A tuple with:\n            - project_id\n            - access_token\n            - api_domain", "id": "f7043:m1"}
{"signature": "def _encode_params(**kwargs):", "body": "args = []<EOL>for k, v in kwargs.iteritems():<EOL><INDENT>if isinstance(v, basestring):<EOL><INDENT>qv = v.encode('<STR_LIT:utf-8>') if isinstance(v, unicode) else v<EOL>args.append('<STR_LIT>' % (k, urllib.quote(qv, SAFE_URL_CARS)))<EOL><DEDENT>elif isinstance(v, collections.Iterable):<EOL><INDENT>for i in v:<EOL><INDENT>qv = i.encode('<STR_LIT:utf-8>') if isinstance(i, unicode) else str(i)<EOL>args.append('<STR_LIT>' % (k, urllib.quote(qv, SAFE_URL_CARS)))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>qv = str(v)<EOL>args.append('<STR_LIT>' % (k, urllib.quote(qv, SAFE_URL_CARS)))<EOL><DEDENT><DEDENT>return '<STR_LIT:&>'.join(args)<EOL>", "docstring": "Do url-encode parameters\n\n>>> _encode_params(a=1, b='R&D')\n'a=1&b=R%26D'\n>>> _encode_params(a=u'\\u4e2d\\u6587', b=['A', 'B', 123])\n'a=%E4%B8%AD%E6%96%87&b=A&b=B&b=123'", "id": "f7049:m0"}
{"signature": "def __init__(self, **kwargs):", "body": "interface = Auth(**kwargs)<EOL>self.base_url = '<STR_LIT>'<EOL>self.gbdx_connection = interface.gbdx_connection<EOL>self.logger = interface.logger<EOL>", "docstring": "Construct an instance of an AnswerFactory Project\n\nArgs:\n    **kwargs\n\nReturns:\n    An instance of a Project.", "id": "f7051:c1:m0"}
{"signature": "def list(self):", "body": "self.logger.debug('<STR_LIT>')<EOL>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url<EOL>}<EOL>r = self.gbdx_connection.get(url)<EOL>r.raise_for_status()<EOL>return r.json()<EOL>", "docstring": "Retrieves a list of AnswerFactory Recipes\n\nArgs:\n    None\n\nReturns:\n    A list of JSON representations of recipes", "id": "f7051:c0:m2"}
{"signature": "def delete(self, recipe_id):", "body": "self.logger.debug('<STR_LIT>' + recipe_id)<EOL>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url, '<STR_LIT>': recipe_id<EOL>}<EOL>r = self.gbdx_connection.delete(url)<EOL>r.raise_for_status()<EOL>", "docstring": "Deletes an AnswerFactory Recipe by id\n\nArgs:\n     recipe_id: The id of the recipe to delete\n\nReturns:\n     Nothing", "id": "f7051:c0:m4"}
{"signature": "def __init__(self, **kwargs):", "body": "interface = Auth(**kwargs)<EOL>self.base_url = '<STR_LIT>'<EOL>self.gbdx_connection = interface.gbdx_connection<EOL>self.logger = interface.logger<EOL>", "docstring": "Construct an instance of an AnswerFactory Recipe\n\nArgs:\n    **kwargs\n\nReturns:\n    An instance of a Recipe.", "id": "f7051:c0:m0"}
{"signature": "def delete(self, project_id):", "body": "self.logger.debug('<STR_LIT>' + project_id)<EOL>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url, '<STR_LIT>': project_id<EOL>}<EOL>r = self.gbdx_connection.delete(url)<EOL>r.raise_for_status()<EOL>", "docstring": "Deletes a project by id\n\nArgs:\n     project_id: The project id to delete\n\nReturns:\n     Nothing", "id": "f7051:c1:m3"}
{"signature": "def save(self, recipe):", "body": "<EOL>if '<STR_LIT:id>' in recipe and recipe['<STR_LIT:id>'] is not None:<EOL><INDENT>self.logger.debug(\"<STR_LIT>\" + json.dumps(recipe))<EOL>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url, '<STR_LIT>': recipe['<STR_LIT:id>']<EOL>}<EOL>r = self.gbdx_connection.put(url, json=recipe)<EOL>try:<EOL><INDENT>r.raise_for_status()<EOL><DEDENT>except:<EOL><INDENT>print(r.text)<EOL>raise<EOL><DEDENT>return recipe['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>self.logger.debug(\"<STR_LIT>\" + json.dumps(recipe))<EOL>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url<EOL>}<EOL>r = self.gbdx_connection.post(url, json=recipe)<EOL>try:<EOL><INDENT>r.raise_for_status()<EOL><DEDENT>except:<EOL><INDENT>print(r.text)<EOL>raise<EOL><DEDENT>recipe_json = r.json()<EOL>return recipe_json['<STR_LIT:id>']<EOL><DEDENT>", "docstring": "Saves an AnswerFactory Recipe\n\nArgs:\n    recipe (dict): Dictionary specifying a recipe\n\nReturns:\n    AnswerFactory Recipe id", "id": "f7051:c0:m3"}
{"signature": "def get(self, project_id):", "body": "self.logger.debug('<STR_LIT>' + project_id)<EOL>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url, '<STR_LIT>': project_id<EOL>}<EOL>r = self.gbdx_connection.get(url)<EOL>r.raise_for_status()<EOL>return r.json()<EOL>", "docstring": "Retrieves an AnswerFactory Project by id\n\nArgs:\n    project_id\n\nReturns:\n    A JSON representation of the project", "id": "f7051:c1:m1"}
{"signature": "def status(self, workflow_id):", "body": "self.logger.debug('<STR_LIT>' + workflow_id)<EOL>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.workflows_url, '<STR_LIT>': workflow_id<EOL>}<EOL>r = self.gbdx_connection.get(url)<EOL>r.raise_for_status()<EOL>return r.json()['<STR_LIT:state>']<EOL>", "docstring": "Checks workflow status.\n\n         Args:\n             workflow_id (str): Workflow id.\n\n         Returns:\n             Workflow status (str).", "id": "f7052:c0:m2"}
{"signature": "def search(self, lookback_h=<NUM_LIT:12>, owner=None, state=\"<STR_LIT:all>\"):", "body": "postdata = {<EOL>\"<STR_LIT>\": lookback_h,<EOL>\"<STR_LIT:state>\": state<EOL>}<EOL>if owner is not None:<EOL><INDENT>postdata['<STR_LIT>'] = owner<EOL><DEDENT>url = \"<STR_LIT>\".format(self.base_url)<EOL>headers = {'<STR_LIT:Content-Type>':'<STR_LIT:application/json>'}<EOL>r = self.gbdx_connection.post(url, headers=headers, data=json.dumps(postdata))<EOL>return r.json()<EOL>", "docstring": "Cancels GBDX batch workflow.\n\n         Params:\n            lookback_h (int): Look back time in hours.\n            owner (str): Workflow owner to search by\n            state (str): State to filter by, eg:\n                \"submitted\",\n                \"scheduled\",\n                \"started\",\n                \"canceled\",\n                \"cancelling\",\n                \"failed\",\n                \"succeeded\",\n                \"timedout\",\n                \"pending\",\n                \"running\",\n                \"complete\",\n                \"waiting\",\n                \"all\"\n\n         Returns:\n             Batch Workflow status (str).", "id": "f7052:c0:m11"}
{"signature": "def get_stdout(self, workflow_id, task_id):", "body": "url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.workflows_url, '<STR_LIT>': workflow_id, '<STR_LIT>': task_id<EOL>}<EOL>r = self.gbdx_connection.get(url)<EOL>r.raise_for_status()<EOL>return r.text<EOL>", "docstring": "Get stdout for a particular task.\n\n         Args:\n             workflow_id (str): Workflow id.\n             task_id (str): Task id.\n\n         Returns:\n             Stdout of the task (string).", "id": "f7052:c0:m4"}
{"signature": "def heartbeat(self):", "body": "url = '<STR_LIT>' % self.base_url<EOL>r = requests.get(url) <EOL>try:<EOL><INDENT>return r.json() == \"<STR_LIT>\"<EOL><DEDENT>except:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Check the heartbeat of the ordering API\n\nArgs: None\n\nReturns:  True or False", "id": "f7053:c0:m3"}
{"signature": "def status(self, order_id):", "body": "self.logger.debug('<STR_LIT>' + order_id)<EOL>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url, '<STR_LIT>': order_id<EOL>}<EOL>r = self.gbdx_connection.get(url)<EOL>r.raise_for_status()<EOL>return r.json().get(\"<STR_LIT>\", {})<EOL>", "docstring": "Checks imagery order status. There can be more than one image per\n           order and this function returns the status of all images\n           within the order.\n\n           Args:\n               order_id (str): The id of the order placed.\n\n           Returns:\n               List of dictionaries, one per image. Each dictionary consists\n               of the keys 'acquisition_id', 'location' and 'state'.", "id": "f7053:c0:m2"}
{"signature": "def __init__(self, **kwargs):", "body": "interface = Auth(**kwargs)<EOL>self.base_url = '<STR_LIT>' % interface.root_url<EOL>self.gbdx_connection = interface.gbdx_connection<EOL>self.logger = interface.logger<EOL>", "docstring": "Instantiate the GBDX Ordering Interface\n\n           Returns:\n               An instance of the Ordering interface.", "id": "f7053:c0:m0"}
{"signature": "def calc_toa_gain_offset(meta):", "body": "<EOL>sat_index = meta['<STR_LIT>'].upper() + \"<STR_LIT:_>\" + meta['<STR_LIT>'].upper()<EOL>acf = np.asarray(meta['<STR_LIT>'])  <EOL>ebw = np.asarray(meta['<STR_LIT>'])  <EOL>gain = np.asarray(constants.DG_ABSCAL_GAIN[sat_index])<EOL>scale = (acf / ebw) * gain<EOL>offset = np.asarray(constants.DG_ABSCAL_OFFSET[sat_index])<EOL>e_sun_index = meta['<STR_LIT>'].upper() + \"<STR_LIT:_>\" + meta['<STR_LIT>'].upper()<EOL>e_sun = np.asarray(constants.DG_ESUN[e_sun_index])<EOL>sun = ephem.Sun()<EOL>img_obs = ephem.Observer()<EOL>img_obs.lon = meta['<STR_LIT>'][<NUM_LIT:1>]<EOL>img_obs.lat = meta['<STR_LIT>'][<NUM_LIT:0>]<EOL>img_obs.elevation = meta['<STR_LIT>'][<NUM_LIT:2>]<EOL>img_obs.date = datetime.datetime.fromtimestamp(meta['<STR_LIT>']['<STR_LIT>'] / <NUM_LIT>).strftime(<EOL>'<STR_LIT>')<EOL>sun.compute(img_obs)<EOL>d_es = sun.earth_distance<EOL>theta_s = <NUM_LIT> - float(meta['<STR_LIT>'])<EOL>scale2 = (d_es ** <NUM_LIT:2> * np.pi) / (e_sun * np.cos(np.deg2rad(theta_s)))<EOL>return zip(scale, scale2, offset)<EOL>", "docstring": "Compute (gain, offset) tuples for each band of the specified image metadata", "id": "f7055:m4"}
{"signature": "def get_proj(prj_code):", "body": "if prj_code in CUSTOM_PRJ:<EOL><INDENT>proj = pyproj.Proj(CUSTOM_PRJ[prj_code])<EOL><DEDENT>else:<EOL><INDENT>proj = pyproj.Proj(init=prj_code)<EOL><DEDENT>return proj<EOL>", "docstring": "Helper method for handling projection codes that are unknown to pyproj\n\nArgs:\n    prj_code (str): an epsg proj code\n\nReturns:\n    projection: a pyproj projection", "id": "f7055:m0"}
{"signature": "def preview(image, **kwargs):", "body": "try:<EOL><INDENT>from IPython.display import Javascript, HTML, display<EOL>from gbdxtools.rda.interface import RDA<EOL>from gbdxtools import Interface<EOL>gbdx = Interface()<EOL><DEDENT>except:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>zoom = kwargs.get(\"<STR_LIT>\", <NUM_LIT:16>)<EOL>bands = kwargs.get(\"<STR_LIT>\")<EOL>if bands is None:<EOL><INDENT>bands = image._rgb_bands<EOL><DEDENT>wgs84_bounds = kwargs.get(\"<STR_LIT>\", list(loads(image.metadata[\"<STR_LIT:image>\"][\"<STR_LIT>\"]).bounds))<EOL>center = kwargs.get(\"<STR_LIT>\", list(shape(image).centroid.bounds[<NUM_LIT:0>:<NUM_LIT:2>]))<EOL>if image.proj != '<STR_LIT>':<EOL><INDENT>code = image.proj.split('<STR_LIT::>')[<NUM_LIT:1>]<EOL>conn = gbdx.gbdx_connection<EOL>proj_info = conn.get('<STR_LIT>'.format(code)).json()<EOL>tfm = partial(pyproj.transform, pyproj.Proj(init='<STR_LIT>'), pyproj.Proj(init=image.proj))<EOL>bounds = list(ops.transform(tfm, box(*wgs84_bounds)).bounds)<EOL><DEDENT>else:<EOL><INDENT>proj_info = {}<EOL>bounds = wgs84_bounds<EOL><DEDENT>if not image.options.get('<STR_LIT>'):<EOL><INDENT>rda = RDA()<EOL>dra = rda.HistogramDRA(image)<EOL>image = dra.aoi(bbox=image.bounds)<EOL><DEDENT>graph_id = image.rda_id<EOL>node_id = image.rda.graph()['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT:id>']<EOL>map_id = \"<STR_LIT>\".format(str(int(time.time())))<EOL>scales = '<STR_LIT:U+002C>'.join(['<STR_LIT:1>'] * len(bands))<EOL>offsets = '<STR_LIT:U+002C>'.join(['<STR_LIT:0>'] * len(bands))<EOL>display(HTML(Template('''<STR_LIT>''').substitute({\"<STR_LIT>\": map_id})))<EOL>js = Template(\"\"\"<STR_LIT>\"\"\").substitute({<EOL>\"<STR_LIT>\": map_id,<EOL>\"<STR_LIT>\": image.proj,<EOL>\"<STR_LIT>\": json.dumps(proj_info),<EOL>\"<STR_LIT>\": graph_id,<EOL>\"<STR_LIT>\": bounds,<EOL>\"<STR_LIT>\": \"<STR_LIT:U+002C>\".join(map(str, bands)),<EOL>\"<STR_LIT>\": node_id,<EOL>\"<STR_LIT>\": json.dumps(image.metadata[\"<STR_LIT:image>\"]),<EOL>\"<STR_LIT>\": json.dumps(image.metadata[\"<STR_LIT>\"]),<EOL>\"<STR_LIT>\": center,<EOL>\"<STR_LIT>\": zoom,<EOL>\"<STR_LIT>\": gbdx.gbdx_connection.access_token,<EOL>\"<STR_LIT>\": scales,<EOL>\"<STR_LIT>\": offsets,<EOL>\"<STR_LIT:url>\": VIRTUAL_RDA_URL<EOL>})<EOL>display(Javascript(js))<EOL>", "docstring": "Show a slippy map preview of the image. Requires iPython.\n\n    Args:\n        image (image): image object to display\n        zoom (int): zoom level to intialize the map, default is 16\n        center (list): center coordinates to initialize the map, defaults to center of image\n        bands (list): bands of image to display, defaults to the image's default RGB bands", "id": "f7055:m1"}
{"signature": "@lru_cache(maxsize=<NUM_LIT>)<EOL>def load_url(url, token, shape=(<NUM_LIT:8>, <NUM_LIT>, <NUM_LIT>)):", "body": "_, ext = os.path.splitext(urlparse(url).path)<EOL>success = False<EOL>for i in xrange(MAX_RETRIES):<EOL><INDENT>thread_id = threading.current_thread().ident<EOL>_curl = _curl_pool[thread_id]<EOL>_curl.setopt(_curl.URL, url)<EOL>_curl.setopt(pycurl.NOSIGNAL, <NUM_LIT:1>)<EOL>_curl.setopt(pycurl.HTTPHEADER, ['<STR_LIT>'.format(token)])<EOL>with NamedTemporaryFile(prefix=\"<STR_LIT>\", suffix=ext, delete=False) as temp: <EOL><INDENT>_curl.setopt(_curl.WRITEDATA, temp.file)<EOL>_curl.perform()<EOL>code = _curl.getinfo(pycurl.HTTP_CODE)<EOL>try:<EOL><INDENT>if(code != <NUM_LIT:200>):<EOL><INDENT>raise TypeError(\"<STR_LIT>\".format(url, code))<EOL><DEDENT>temp.file.flush()<EOL>temp.close()<EOL>arr = imread(temp.name)<EOL>if len(arr.shape) == <NUM_LIT:3>:<EOL><INDENT>arr = np.rollaxis(arr, <NUM_LIT:2>, <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>arr = np.expand_dims(arr, axis=<NUM_LIT:0>)<EOL><DEDENT>success = True<EOL>return arr<EOL><DEDENT>except Exception as e:<EOL><INDENT>_curl.close()<EOL>del _curl_pool[thread_id]<EOL><DEDENT>finally:<EOL><INDENT>temp.close()<EOL>os.remove(temp.name)<EOL><DEDENT><DEDENT><DEDENT>if success is False:<EOL><INDENT>raise TypeError(\"<STR_LIT>\".format(url, code))<EOL><DEDENT>return arr<EOL>", "docstring": "Loads a geotiff url inside a thread and returns as an ndarray", "id": "f7060:m0"}
{"signature": "def to_geotiff(arr, path='<STR_LIT>', proj=None, spec=None, bands=None, **kwargs):", "body": "assert has_rasterio, \"<STR_LIT>\" <EOL>try:<EOL><INDENT>img_md = arr.rda.metadata[\"<STR_LIT:image>\"]<EOL>x_size = img_md[\"<STR_LIT>\"]<EOL>y_size = img_md[\"<STR_LIT>\"]<EOL><DEDENT>except (AttributeError, KeyError):<EOL><INDENT>x_size = kwargs.get(\"<STR_LIT>\", <NUM_LIT>)<EOL>y_size = kwargs.get(\"<STR_LIT>\", <NUM_LIT>)<EOL><DEDENT>try:<EOL><INDENT>tfm = kwargs['<STR_LIT>'] if '<STR_LIT>' in kwargs else arr.affine<EOL><DEDENT>except:<EOL><INDENT>tfm = None<EOL><DEDENT>dtype = arr.dtype.name if arr.dtype.name != '<STR_LIT>' else '<STR_LIT>' <EOL>if spec is not None and spec.lower() == '<STR_LIT>':<EOL><INDENT>if bands is None:<EOL><INDENT>bands = arr._rgb_bands<EOL><DEDENT>if not arr.options.get('<STR_LIT>'):<EOL><INDENT>from gbdxtools.rda.interface import RDA<EOL>rda = RDA()<EOL>dra = rda.HistogramDRA(arr)<EOL>arr = dra.aoi(bbox=arr.bounds)<EOL><DEDENT>arr = arr[bands,...].astype(np.uint8)<EOL>dtype = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>if bands is not None:<EOL><INDENT>arr = arr[bands,...]<EOL><DEDENT><DEDENT>meta = {<EOL>'<STR_LIT:width>': arr.shape[<NUM_LIT:2>],<EOL>'<STR_LIT>': arr.shape[<NUM_LIT:1>],<EOL>'<STR_LIT:count>': arr.shape[<NUM_LIT:0>],<EOL>'<STR_LIT>': dtype,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': tfm<EOL>}<EOL>if proj is not None:<EOL><INDENT>meta[\"<STR_LIT>\"] = {'<STR_LIT>': proj}<EOL><DEDENT>if \"<STR_LIT>\" in kwargs and kwargs[\"<STR_LIT>\"]:<EOL><INDENT>meta.update(blockxsize=x_size, blockysize=y_size, tiled=\"<STR_LIT:yes>\")<EOL><DEDENT>with rasterio.open(path, \"<STR_LIT:w>\", **meta) as dst:<EOL><INDENT>writer = rio_writer(dst)<EOL>result = store(arr, writer, compute=False)<EOL>result.compute(scheduler=threaded_get)<EOL><DEDENT>return path<EOL>", "docstring": "Write out a geotiff file of the image\n\n    Args:\n        path (str): path to write the geotiff file to, default is ./output.tif\n        proj (str): EPSG string of projection to reproject to\n        spec (str): if set to 'rgb', write out color-balanced 8-bit RGB tif\n        bands (list): list of bands to export. If spec='rgb' will default to RGB bands\n\n    Returns:\n        str: path the geotiff was written to", "id": "f7065:m0"}
{"signature": "def _histogram_stretch(self, data, **kwargs):", "body": "limits = {}<EOL>for x in range(<NUM_LIT:3>):<EOL><INDENT>band = data[:,:,x]<EOL>try:<EOL><INDENT>limits[x] = np.percentile(band, kwargs.get(\"<STR_LIT>\", [<NUM_LIT:0>,<NUM_LIT:100>]))<EOL><DEDENT>except IndexError:<EOL><INDENT>return data<EOL><DEDENT><DEDENT>for x in range(<NUM_LIT:3>):<EOL><INDENT>band = data[:,:,x]<EOL>if <NUM_LIT:0> in band:<EOL><INDENT>band = np.ma.masked_values(band, <NUM_LIT:0>).compressed()<EOL><DEDENT>top = limits[x][<NUM_LIT:1>]<EOL>bottom = limits[x][<NUM_LIT:0>]<EOL>if top != bottom: <EOL><INDENT>data[:,:,x] = (data[:,:,x] - bottom) / float(top - bottom) * <NUM_LIT><EOL><DEDENT><DEDENT>data = np.clip(data, <NUM_LIT:0>, <NUM_LIT:255>).astype(\"<STR_LIT>\")<EOL>if \"<STR_LIT>\" in kwargs:<EOL><INDENT>invGamma = <NUM_LIT:1.0> / kwargs['<STR_LIT>']<EOL>lut = np.array([((i / <NUM_LIT>) ** invGamma) * <NUM_LIT:255><EOL>for i in np.arange(<NUM_LIT:0>, <NUM_LIT>)]).astype(\"<STR_LIT>\")<EOL>data = np.take(lut, data)<EOL><DEDENT>return data<EOL>", "docstring": "perform a contrast stretch and/or gamma adjustment", "id": "f7074:c0:m5"}
{"signature": "def rgb(self, **kwargs):", "body": "if \"<STR_LIT>\" in kwargs:<EOL><INDENT>use_bands = kwargs[\"<STR_LIT>\"]<EOL>assert len(use_bands) == <NUM_LIT:3>, '<STR_LIT>'<EOL>del kwargs[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>use_bands = self._rgb_bands<EOL><DEDENT>if kwargs.get('<STR_LIT>') == True:<EOL><INDENT>return self.histogram_match(use_bands, **kwargs)<EOL><DEDENT>if \"<STR_LIT>\" not in kwargs:<EOL><INDENT>if \"<STR_LIT>\" not in kwargs:<EOL><INDENT>if not self.options.get('<STR_LIT>'):<EOL><INDENT>kwargs['<STR_LIT>'] = [<NUM_LIT:2>,<NUM_LIT>]<EOL><DEDENT><DEDENT>return self.histogram_stretch(use_bands, **kwargs)<EOL><DEDENT>elif kwargs[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>return self.histogram_equalize(use_bands, **kwargs)<EOL><DEDENT>elif kwargs[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>return self.histogram_match(use_bands, **kwargs)<EOL><DEDENT>elif kwargs[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>return self.histogram_stretch(use_bands, stretch=[<NUM_LIT:0>, <NUM_LIT:100>], **kwargs)<EOL><DEDENT>elif kwargs[\"<STR_LIT>\"] == \"<STR_LIT:ignore>\" or self.options.get('<STR_LIT>'):<EOL><INDENT>data = self._read(self[use_bands,...], **kwargs)<EOL>return np.rollaxis(data, <NUM_LIT:0>, <NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Convert the image to a 3 band RGB for plotting\n\n        This method shares the same arguments as plot(). It will perform visual adjustment on the\n        image and prepare the data for plotting in MatplotLib. Values are converted to an\n        appropriate precision and the axis order is changed to put the band axis last.", "id": "f7074:c0:m0"}
{"signature": "def ndwi(self):", "body": "data = self._read(self[self._ndwi_bands,...]).astype(np.float32)<EOL>return (data[<NUM_LIT:1>,:,:] - data[<NUM_LIT:0>,:,:]) / (data[<NUM_LIT:0>,:,:] + data[<NUM_LIT:1>,:,:])<EOL>", "docstring": "Calculates Normalized Difference Water Index using Coastal and NIR2 bands for WV02, WV03.\nFor Landsat8 and sentinel2 calculated by using Green and NIR bands.\n\nReturns: numpy array of ndwi values", "id": "f7074:c0:m7"}
{"signature": "def histogram_equalize(self, use_bands, **kwargs):", "body": "data = self._read(self[use_bands,...], **kwargs)<EOL>data = np.rollaxis(data.astype(np.float32), <NUM_LIT:0>, <NUM_LIT:3>)<EOL>flattened = data.flatten()<EOL>if <NUM_LIT:0> in data:<EOL><INDENT>masked = np.ma.masked_values(data, <NUM_LIT:0>).compressed()<EOL>image_histogram, bin_edges = np.histogram(masked, <NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>image_histogram, bin_edges = np.histogram(flattened, <NUM_LIT>)<EOL><DEDENT>bins = (bin_edges[:-<NUM_LIT:1>] + bin_edges[<NUM_LIT:1>:]) / <NUM_LIT><EOL>cdf = image_histogram.cumsum() <EOL>cdf = cdf / float(cdf[-<NUM_LIT:1>])<EOL>image_equalized = np.interp(flattened, bins, cdf).reshape(data.shape)<EOL>if '<STR_LIT>' in kwargs or '<STR_LIT>' in kwargs:<EOL><INDENT>return self._histogram_stretch(image_equalized, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>return image_equalized<EOL><DEDENT>", "docstring": "Equalize and the histogram and normalize value range\n            Equalization is on all three bands, not per-band", "id": "f7074:c0:m2"}
{"signature": "def histogram_stretch(self, use_bands, **kwargs):", "body": "data = self._read(self[use_bands,...], **kwargs)<EOL>data = np.rollaxis(data.astype(np.float32), <NUM_LIT:0>, <NUM_LIT:3>)<EOL>return self._histogram_stretch(data, **kwargs)<EOL>", "docstring": "entry point for contrast stretching", "id": "f7074:c0:m4"}
{"signature": "def histogram_match(self, use_bands, blm_source=None, **kwargs):", "body": "assert has_rio, \"<STR_LIT>\"<EOL>data = self._read(self[use_bands,...], **kwargs)<EOL>data = np.rollaxis(data.astype(np.float32), <NUM_LIT:0>, <NUM_LIT:3>)<EOL>if <NUM_LIT:0> in data:<EOL><INDENT>data = np.ma.masked_values(data, <NUM_LIT:0>)<EOL><DEDENT>bounds = self._reproject(box(*self.bounds), from_proj=self.proj, to_proj=\"<STR_LIT>\").bounds<EOL>if blm_source == '<STR_LIT>':<EOL><INDENT>from gbdxtools.images.browse_image import BrowseImage<EOL>ref = BrowseImage(self.cat_id, bbox=bounds).read()<EOL><DEDENT>else:<EOL><INDENT>from gbdxtools.images.tms_image import TmsImage<EOL>tms = TmsImage(zoom=self._calc_tms_zoom(self.affine[<NUM_LIT:0>]), bbox=bounds, **kwargs)<EOL>ref = np.rollaxis(tms.read(), <NUM_LIT:0>, <NUM_LIT:3>)<EOL><DEDENT>out = np.dstack([rio_match(data[:,:,idx], ref[:,:,idx].astype(np.double)/<NUM_LIT>)<EOL>for idx in range(data.shape[-<NUM_LIT:1>])])<EOL>if '<STR_LIT>' in kwargs or '<STR_LIT>' in kwargs:<EOL><INDENT>return self._histogram_stretch(out, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>return out<EOL><DEDENT>", "docstring": "Match the histogram to existing imagery", "id": "f7074:c0:m3"}
{"signature": "def materialize(self, node=None, bounds=None, callback=None, out_format='<STR_LIT>', **kwargs):", "body": "kwargs.update({<EOL>\"<STR_LIT>\": node,<EOL>\"<STR_LIT>\": bounds,<EOL>\"<STR_LIT>\": callback,<EOL>\"<STR_LIT>\": out_format<EOL>})<EOL>return self.rda._materialize(**kwargs)<EOL>", "docstring": "Materializes images into gbdx user buckets in s3.\nNote: This method is only available to RDA based image classes. \n\nArgs:\n  node (str): the node in the graph to materialize\n  bounds (list): optional bbox for cropping what gets materialized in s3\n  out_format (str): VECTOR_TILE, VECTOR, TIF, TILE_STREAM\n  callback (str): a callback url like an `sns://`\nReturns:\n  job_id (str): the job_id of the materialization", "id": "f7077:c2:m10"}
{"signature": "def warp(self, dem=None, proj=\"<STR_LIT>\", **kwargs):", "body": "try:<EOL><INDENT>img_md = self.rda.metadata[\"<STR_LIT:image>\"]<EOL>x_size = img_md[\"<STR_LIT>\"]<EOL>y_size = img_md[\"<STR_LIT>\"]<EOL><DEDENT>except (AttributeError, KeyError):<EOL><INDENT>x_size = kwargs.get(\"<STR_LIT>\", <NUM_LIT>)<EOL>y_size = kwargs.get(\"<STR_LIT>\", <NUM_LIT>)<EOL><DEDENT>if self.proj is None:<EOL><INDENT>from_proj = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>from_proj = self.proj<EOL><DEDENT>try:<EOL><INDENT>center = wkt.loads(self.rda.metadata[\"<STR_LIT:image>\"][\"<STR_LIT>\"]).centroid<EOL>g = box(*(center.buffer(self.rda.metadata[\"<STR_LIT>\"][\"<STR_LIT>\"] / <NUM_LIT:2>).bounds))<EOL>tfm = partial(pyproj.transform, pyproj.Proj(init=\"<STR_LIT>\"), pyproj.Proj(init=proj))<EOL>gsd = kwargs.get(\"<STR_LIT>\", ops.transform(tfm, g).area ** <NUM_LIT:0.5>)<EOL>current_bounds = wkt.loads(self.rda.metadata[\"<STR_LIT:image>\"][\"<STR_LIT>\"]).bounds<EOL><DEDENT>except (AttributeError, KeyError, TypeError):<EOL><INDENT>tfm = partial(pyproj.transform, pyproj.Proj(init=self.proj), pyproj.Proj(init=proj))<EOL>gsd = kwargs.get(\"<STR_LIT>\", (ops.transform(tfm, shape(self)).area / (self.shape[<NUM_LIT:1>] * self.shape[<NUM_LIT:2>])) ** <NUM_LIT:0.5> )<EOL>current_bounds = self.bounds<EOL><DEDENT>tfm = partial(pyproj.transform, pyproj.Proj(init=from_proj), pyproj.Proj(init=proj))<EOL>itfm = partial(pyproj.transform, pyproj.Proj(init=proj), pyproj.Proj(init=from_proj))<EOL>output_bounds = ops.transform(tfm, box(*current_bounds)).bounds<EOL>gtf = Affine.from_gdal(output_bounds[<NUM_LIT:0>], gsd, <NUM_LIT:0.0>, output_bounds[<NUM_LIT:3>], <NUM_LIT:0.0>, -<NUM_LIT:1> * gsd)<EOL>ll = ~gtf * (output_bounds[:<NUM_LIT:2>])<EOL>ur = ~gtf * (output_bounds[<NUM_LIT:2>:])<EOL>x_chunks = int((ur[<NUM_LIT:0>] - ll[<NUM_LIT:0>]) / x_size) + <NUM_LIT:1><EOL>y_chunks = int((ll[<NUM_LIT:1>] - ur[<NUM_LIT:1>]) / y_size) + <NUM_LIT:1><EOL>num_bands = self.shape[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>dtype = RDA_TO_DTYPE[img_md[\"<STR_LIT>\"]]<EOL><DEDENT>except:<EOL><INDENT>dtype = '<STR_LIT>'<EOL><DEDENT>daskmeta = {<EOL>\"<STR_LIT>\": {},<EOL>\"<STR_LIT>\": (num_bands, y_size, x_size),<EOL>\"<STR_LIT>\": dtype,<EOL>\"<STR_LIT:name>\": \"<STR_LIT>\".format(self.name),<EOL>\"<STR_LIT>\": (num_bands, y_chunks * y_size, x_chunks * x_size)<EOL>}<EOL>def px_to_geom(xmin, ymin):<EOL><INDENT>xmax = int(xmin + x_size)<EOL>ymax = int(ymin + y_size)<EOL>bounds = list((gtf * (xmin, ymax)) + (gtf * (xmax, ymin)))<EOL>return box(*bounds)<EOL><DEDENT>full_bounds = box(*output_bounds)<EOL>dasks = []<EOL>if isinstance(dem, GeoDaskImage):<EOL><INDENT>if dem.proj != proj:<EOL><INDENT>dem = dem.warp(proj=proj, dem=dem)<EOL><DEDENT>dasks.append(dem.dask)<EOL><DEDENT>for y in xrange(y_chunks):<EOL><INDENT>for x in xrange(x_chunks):<EOL><INDENT>xmin = x * x_size<EOL>ymin = y * y_size<EOL>geometry = px_to_geom(xmin, ymin)<EOL>daskmeta[\"<STR_LIT>\"][(daskmeta[\"<STR_LIT:name>\"], <NUM_LIT:0>, y, x)] = (self._warp, geometry, gsd, dem, proj, dtype, <NUM_LIT:5>)<EOL><DEDENT><DEDENT>daskmeta[\"<STR_LIT>\"], _ = optimization.cull(HighLevelGraph.merge(daskmeta[\"<STR_LIT>\"], *dasks), list(daskmeta[\"<STR_LIT>\"].keys()))<EOL>gi = mapping(full_bounds)<EOL>gt = AffineTransform(gtf, proj)<EOL>image = GeoDaskImage(daskmeta, __geo_interface__ = gi, __geo_transform__ = gt)<EOL>return image[box(*output_bounds)]<EOL>", "docstring": "Delayed warp across an entire AOI or Image\n\n        Creates a new dask image by deferring calls to the warp_geometry on chunks\n\n        Args:\n            dem (ndarray): optional. A DEM for warping to specific elevation planes\n            proj (str): optional. An EPSG proj string to project the image data into (\"EPSG:32612\")\n\n        Returns:\n            daskarray: a warped image as deferred image array", "id": "f7081:c2:m10"}
{"signature": "def read(self, bands=None, **kwargs):", "body": "arr = self<EOL>if bands is not None:<EOL><INDENT>arr = self[bands, ...]<EOL><DEDENT>return arr.compute(scheduler=threaded_get)<EOL>", "docstring": "Reads data from a dask array and returns the computed ndarray matching the given bands\n\n        Args:\n            bands (list): band indices to read from the image. Returns bands in the order specified in the list of bands.\n\n        Returns:\n            ndarray: a numpy array of image data", "id": "f7081:c1:m2"}
{"signature": "def map_blocks(self, *args, **kwargs):", "body": "darr = super(GeoDaskImage, self).map_blocks(*args, **kwargs)<EOL>return GeoDaskImage(darr, __geo_interface__ = self.__geo_interface__,<EOL>__geo_transform__ = self.__geo_transform__)<EOL>", "docstring": "Queue a deferred function to run on each block of image\n\n        This is identical to Dask's map_block functinos, but returns a GeoDaskImage to preserve\n        the geospatial information.\n\n        Args: see dask.Array.map_blocks\n\n        Returns:\n            GeoDaskImage: a dask array with the function queued up to run when the image is read", "id": "f7081:c2:m0"}
{"signature": "def iterwindows(self, count=<NUM_LIT:64>, window_shape=(<NUM_LIT>, <NUM_LIT>)):", "body": "if count is None:<EOL><INDENT>while True:<EOL><INDENT>yield self.randwindow(window_shape)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for i in xrange(count):<EOL><INDENT>yield self.randwindow(window_shape)<EOL><DEDENT><DEDENT>", "docstring": "Iterate over random windows of an image\n\n        Args:\n            count (int): the number of the windows to generate. Defaults to 64, if `None` will continue to iterate over random windows until stopped.\n            window_shape (tuple): The desired shape of each image as (height, width) in pixels.\n\n        Yields:\n            image: an image of the given shape and same type.", "id": "f7081:c1:m4"}
{"signature": "def pxbounds(self, geom, clip=False):", "body": "try:<EOL><INDENT>if isinstance(geom, dict):<EOL><INDENT>if '<STR_LIT>' in geom:<EOL><INDENT>geom = shape(geom['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>geom = shape(geom)<EOL><DEDENT><DEDENT>elif isinstance(geom, BaseGeometry):<EOL><INDENT>geom = shape(geom)<EOL><DEDENT>else:<EOL><INDENT>geom = wkt.loads(geom)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>raise TypeError (\"<STR_LIT>\")<EOL><DEDENT>if geom.disjoint(shape(self)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>(xmin, ymin, xmax, ymax) = ops.transform(self.__geo_transform__.rev, geom).bounds<EOL>_nbands, ysize, xsize = self.shape<EOL>if clip:<EOL><INDENT>xmin = max(xmin, <NUM_LIT:0>)<EOL>ymin = max(ymin, <NUM_LIT:0>)<EOL>xmax = min(xmax, xsize)<EOL>ymax = min(ymax, ysize)<EOL><DEDENT>return (xmin, ymin, xmax, ymax)<EOL>", "docstring": "Returns the bounds of a geometry object in pixel coordinates\n\n        Args:\n            geom: Shapely geometry object or GeoJSON as Python dictionary or WKT string\n            clip (bool): Clip the bounds to the min/max extent of the image\n\n        Returns:\n            list: bounds in pixels [min x, min y, max x, max y] clipped to image bounds", "id": "f7081:c2:m7"}
{"signature": "def _parse_geoms(self, **kwargs):", "body": "bbox = kwargs.get('<STR_LIT>', None)<EOL>wkt_geom = kwargs.get('<STR_LIT>', None)<EOL>geojson = kwargs.get('<STR_LIT>', None)<EOL>if bbox is not None:<EOL><INDENT>g = box(*bbox)<EOL><DEDENT>elif wkt_geom is not None:<EOL><INDENT>g = wkt.loads(wkt_geom)<EOL><DEDENT>elif geojson is not None:<EOL><INDENT>g = shape(geojson)<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>if self.proj is None:<EOL><INDENT>return g<EOL><DEDENT>else:<EOL><INDENT>return self._reproject(g, from_proj=kwargs.get('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>", "docstring": "Finds supported geometry types, parses them and returns the bbox", "id": "f7081:c2:m13"}
{"signature": "def geotiff(self, **kwargs):", "body": "if '<STR_LIT>' not in kwargs:<EOL><INDENT>kwargs['<STR_LIT>'] = self.proj<EOL><DEDENT>return to_geotiff(self, **kwargs)<EOL>", "docstring": "Creates a geotiff on the filesystem\n\n        Args:\n            path (str): optional, path to write the geotiff file to, default is ./output.tif\n            proj (str): optional, EPSG string of projection to reproject to\n            spec (str): optional, if set to 'rgb', write out color-balanced 8-bit RGB tif\n            bands (list): optional, list of bands to export. If spec='rgb' will default to RGB bands,\n                otherwise will export all bands\n\n        Returns:\n            str: path the geotiff was written to", "id": "f7081:c2:m8"}
{"signature": "def aoi(self, **kwargs):", "body": "g = self._parse_geoms(**kwargs)<EOL>if g is None:<EOL><INDENT>return self<EOL><DEDENT>else:<EOL><INDENT>return self[g]<EOL><DEDENT>", "docstring": "Subsets the Image by the given bounds\n\n        Args:\n            bbox (list): optional. A bounding box array [minx, miny, maxx, maxy]\n            wkt (str): optional. A WKT geometry string\n            geojson (str): optional. A GeoJSON geometry dictionary\n\n        Returns:\n            image: an image instance of the same type", "id": "f7081:c2:m6"}
{"signature": "def can_acomp(cat_id):", "body": "url = '<STR_LIT>'.format(cat_id)<EOL>auth = Auth()<EOL>r = _req_with_retries(auth.gbdx_connection, url)<EOL>try: <EOL><INDENT>data = r.json()<EOL>return data['<STR_LIT>'] is not None<EOL><DEDENT>except:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Checks to see if a CatalogID can be atmos. compensated or not.\n\nArgs:\n  catalogID (str): The catalog ID from the platform catalog.\nReturns:\n  available (bool): Whether or not the image can be acomp'd", "id": "f7083:m5"}
{"signature": "def is_ordered(cat_id):", "body": "url = '<STR_LIT>'.format(cat_id)<EOL>auth = Auth()<EOL>r = _req_with_retries(auth.gbdx_connection, url)<EOL>if r is not None:<EOL><INDENT>return r.status_code == <NUM_LIT:200><EOL><DEDENT>return False<EOL>", "docstring": "Checks to see if a CatalogID has been ordered or not.\n\nArgs:\n  catalogID (str): The catalog ID from the platform catalog.\nReturns:\n  ordered (bool): Whether or not the image has been ordered", "id": "f7083:m4"}
{"signature": "def _tile_coords(self, bounds):", "body": "tfm = partial(pyproj.transform,<EOL>pyproj.Proj(init=\"<STR_LIT>\"),<EOL>pyproj.Proj(init=\"<STR_LIT>\"))<EOL>bounds = ops.transform(tfm, box(*bounds)).bounds<EOL>west, south, east, north = bounds<EOL>epsilon = <NUM_LIT><EOL>if east != west and north != south:<EOL><INDENT>west += epsilon<EOL>south += epsilon<EOL>east -= epsilon<EOL>north -= epsilon<EOL><DEDENT>params = [west, south, east, north, [self.zoom_level]]<EOL>tile_coords = [(tile.x, tile.y) for tile in mercantile.tiles(*params)]<EOL>xtiles, ytiles = zip(*tile_coords)<EOL>minx = min(xtiles)<EOL>miny = min(ytiles)<EOL>maxx = max(xtiles) <EOL>maxy = max(ytiles)<EOL>return minx, miny, maxx, maxy<EOL>", "docstring": "convert mercator bbox to tile index limits", "id": "f7086:c1:m11"}
{"signature": "def update_options(*dict_args):", "body": "result = {}<EOL>for dictionary in dict_args:<EOL><INDENT>result.update(dictionary)<EOL><DEDENT>return result<EOL>", "docstring": "Given any number of dicts, shallow copy and merge into a new dict,\nprecedence goes to key value pairs in latter dicts.", "id": "f7091:m0"}
{"signature": "@classmethod<EOL><INDENT>def acomp_available(cls, cat_id):<DEDENT>", "body": "return can_acomp(cat_id)<EOL>", "docstring": "Checks to see if a CatalogID can be atmos. compensated or not.\n\nArgs:\n  catalogID (str): The catalog ID from the platform catalog.\nReturns:\n  available (bool): Whether or not the image can be acomp'd", "id": "f7094:c0:m2"}
{"signature": "def get(self, ID, index='<STR_LIT>'):", "body": "url = self.get_url % index<EOL>r = self.gbdx_connection.get(url + ID)<EOL>r.raise_for_status()<EOL>return r.json()<EOL>", "docstring": "Retrieves a vector.  Not usually necessary because searching is the best way to find & get stuff.\n\n        Args:\n            ID (str): ID of the vector object\n            index (str): Optional.  Index the object lives in.  defaults to 'vector-web-s'\n\n        Returns:\n            record (dict): A dict object identical to the json representation of the catalog record", "id": "f7095:c0:m3"}
{"signature": "def __repr__(self):", "body": "if self.value:<EOL><INDENT>base = '<STR_LIT>' % (self.agg_type, self.value)<EOL><DEDENT>else:<EOL><INDENT>base = '<STR_LIT:%s>' % self.agg_type<EOL><DEDENT>if self.children:<EOL><INDENT>if isinstance(self.children, six.string_types):<EOL><INDENT>return '<STR_LIT>' % (base, self.children)<EOL><DEDENT>elif isinstance(self.children, AggregationDef):<EOL><INDENT>return '<STR_LIT>' % (base, self.children.__repr__())<EOL><DEDENT>else: <EOL><INDENT>kids = []<EOL>for child in self.children:<EOL><INDENT>kids.append(child.__repr__())<EOL><DEDENT>kids_str = '<STR_LIT>' % '<STR_LIT:U+002C>'.join(kids)<EOL>return '<STR_LIT>' % (base, kids_str)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return base<EOL><DEDENT>", "docstring": "Creates a string representation of an aggregation definition suitable for use in VectorServices calls\n\n        Returns:\n            A string representation of an aggregation definition suitable for use in VectorServices calls", "id": "f7095:c1:m1"}
{"signature": "def query_iteratively(self, searchAreaWkt, query, count=<NUM_LIT:100>, ttl='<STR_LIT>', index=default_index):", "body": "search_area_polygon = from_wkt(searchAreaWkt)<EOL>left, lower, right, upper = search_area_polygon.bounds<EOL>params = {<EOL>\"<STR_LIT:q>\": query,<EOL>\"<STR_LIT:count>\": min(count,<NUM_LIT:1000>),<EOL>\"<STR_LIT>\": ttl,<EOL>\"<STR_LIT:left>\": left,<EOL>\"<STR_LIT:right>\": right,<EOL>\"<STR_LIT>\": lower,<EOL>\"<STR_LIT>\": upper<EOL>}<EOL>url = self.query_index_page_url % index if index else self.query_page_url<EOL>r = self.gbdx_connection.get(url, params=params)<EOL>r.raise_for_status()<EOL>page = r.json()<EOL>paging_id = page['<STR_LIT>']<EOL>item_count = int(page['<STR_LIT>'])<EOL>data = page['<STR_LIT:data>']<EOL>num_results = <NUM_LIT:0><EOL>for vector in data:<EOL><INDENT>num_results += <NUM_LIT:1><EOL>if num_results > count: break<EOL>yield vector<EOL><DEDENT>if num_results == count:<EOL><INDENT>return<EOL><DEDENT>while paging_id and item_count > <NUM_LIT:0> and num_results < count:<EOL><INDENT>headers = {'<STR_LIT:Content-Type>':'<STR_LIT>'}<EOL>data = {<EOL>\"<STR_LIT>\": paging_id,<EOL>\"<STR_LIT>\": ttl<EOL>}<EOL>r = self.gbdx_connection.post(self.page_url, headers=headers, data=data)<EOL>r.raise_for_status()<EOL>page = r.json()<EOL>paging_id = page['<STR_LIT>']<EOL>item_count = int(page['<STR_LIT>'])<EOL>data = page['<STR_LIT:data>']<EOL>for vector in data:<EOL><INDENT>num_results += <NUM_LIT:1><EOL>if num_results > count: break<EOL>yield vector<EOL><DEDENT><DEDENT>", "docstring": "Perform a vector services query using the QUERY API\n(https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields)\n\nArgs:\n    searchAreaWkt: WKT Polygon of area to search\n    query: Elastic Search query\n    count: Maximum number of results to return\n    ttl: Amount of time for each temporary vector page to exist\n\nReturns:\n    generator of vector results", "id": "f7095:c0:m5"}
{"signature": "def create_from_wkt(self, wkt, item_type, ingest_source, **attributes):", "body": "<EOL>geojson = load_wkt(wkt).__geo_interface__<EOL>vector = {<EOL>'<STR_LIT:type>': \"<STR_LIT>\",<EOL>'<STR_LIT>': geojson,<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': item_type,<EOL>'<STR_LIT>': ingest_source,<EOL>'<STR_LIT>': attributes<EOL>}<EOL>}<EOL>return self.create(vector)[<NUM_LIT:0>]<EOL>", "docstring": "Create a single vector in the vector service\n\nArgs:\n    wkt (str): wkt representation of the geometry\n    item_type (str): item_type of the vector\n    ingest_source (str): source of the vector\n    attributes: a set of key-value pairs of attributes\n\nReturns:\n    id (str): string identifier of the vector created", "id": "f7095:c0:m2"}
{"signature": "def __init__(self, **kwargs):", "body": "interface = Auth(**kwargs)<EOL>self.gbdx_connection = interface.gbdx_connection<EOL>self.logger = interface.logger<EOL>self.query_url = '<STR_LIT>'<EOL>self.query_index_url = '<STR_LIT>'<EOL>self.query_page_url = '<STR_LIT>'<EOL>self.query_index_page_url = '<STR_LIT>'<EOL>self.page_url = '<STR_LIT>'<EOL>self.get_url = '<STR_LIT>'<EOL>self.create_url = '<STR_LIT>'<EOL>self.aggregations_url = '<STR_LIT>'<EOL>self.aggregations_by_index_url = '<STR_LIT>'<EOL>", "docstring": "Construct the Vectors interface class\n\n        Returns:\n            An instance of the Vectors interface class.", "id": "f7095:c0:m0"}
{"signature": "def map(self, features=None, query=None, styles=None,<EOL>bbox=[-<NUM_LIT>,-<NUM_LIT>,<NUM_LIT>,<NUM_LIT>], zoom=<NUM_LIT:10>, center=None, <EOL>image=None, image_bounds=None, cmap='<STR_LIT>',<EOL>api_key=os.environ.get('<STR_LIT>', None), **kwargs):", "body": "try:<EOL><INDENT>from IPython.display import display<EOL><DEDENT>except:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>assert api_key is not None, \"<STR_LIT>\"<EOL>if features is None and query is not None:<EOL><INDENT>wkt = box(*bbox).wkt<EOL>features = self.query(wkt, query, index=None)<EOL><DEDENT>elif features is None and query is None and image is None:<EOL><INDENT>print('<STR_LIT>')<EOL>return<EOL><DEDENT>if styles is not None and not isinstance(styles, list):<EOL><INDENT>styles = [styles]<EOL><DEDENT>geojson = {\"<STR_LIT:type>\":\"<STR_LIT>\", \"<STR_LIT>\": features}<EOL>if center is None and features is not None:<EOL><INDENT>union = cascaded_union([shape(f['<STR_LIT>']) for f in features])<EOL>lon, lat = union.centroid.coords[<NUM_LIT:0>]<EOL><DEDENT>elif center is None and image is not None:<EOL><INDENT>try:<EOL><INDENT>lon, lat = shape(image).centroid.coords[<NUM_LIT:0>]<EOL><DEDENT>except:<EOL><INDENT>lon, lat = box(*image_bounds).centroid.coords[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>lat, lon = center<EOL><DEDENT>map_id = \"<STR_LIT>\".format(str(int(time.time())))<EOL>map_data = VectorGeojsonLayer(geojson, styles=styles, **kwargs)<EOL>image_layer = self._build_image_layer(image, image_bounds, cmap)<EOL>template = BaseTemplate(map_id, **{<EOL>\"<STR_LIT>\": lat, <EOL>\"<STR_LIT>\": lon, <EOL>\"<STR_LIT>\": zoom,<EOL>\"<STR_LIT>\": json.dumps(map_data.datasource),<EOL>\"<STR_LIT>\": json.dumps(map_data.layers),<EOL>\"<STR_LIT>\": image_layer,<EOL>\"<STR_LIT>\": api_key,<EOL>\"<STR_LIT>\": '<STR_LIT>'<EOL>})<EOL>template.inject()<EOL>", "docstring": "Renders a mapbox gl map from a vector service query or a list of geojson features\n\nArgs:\n  features (list): a list of geojson features\n  query (str): a VectorServices query \n  styles (list): a list of VectorStyles to apply to the features  \n  bbox (list): a bounding box to query for features ([minx, miny, maxx, maxy])\n  zoom (int): the initial zoom level of the map\n  center (list): a list of [lat, lon] used to center the map\n  api_key (str): a valid Mapbox API key\n  image (dict): a CatalogImage or a ndarray\n  image_bounds (list): a list of bounds for image positioning \n  Use outside of GBDX Notebooks requires a MapBox API key, sign up for free at https://www.mapbox.com/pricing/\n  Pass the key using the `api_key` keyword or set an environmental variable called `MAPBOX API KEY`\n  cmap (str): MatPlotLib colormap to use for rendering single band images (default: viridis)", "id": "f7095:c0:m8"}
{"signature": "def get_chip(self, coordinates, catid, chip_type='<STR_LIT>', chip_format='<STR_LIT>', filename='<STR_LIT>'):", "body": "def t2s1(t):<EOL><INDENT>return str(t).strip('<STR_LIT>').replace('<STR_LIT:U+002C>', '<STR_LIT>')<EOL><DEDENT>def t2s2(t):<EOL><INDENT>return str(t).strip('<STR_LIT>').replace('<STR_LIT:U+0020>', '<STR_LIT>')<EOL><DEDENT>if len(coordinates) != <NUM_LIT:4>:<EOL><INDENT>print('<STR_LIT>')<EOL>return False<EOL><DEDENT>W, S, E, N = coordinates<EOL>box = ((W, S), (W, N), (E, N), (E, S), (W, S))<EOL>box_wkt = '<STR_LIT>' + '<STR_LIT:U+002C>'.join([t2s1(corner) for corner in box]) + '<STR_LIT>'<EOL>results = self.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=box_wkt)<EOL>description = self.describe_images(results)<EOL>pan_id, ms_id, num_bands = None, None, <NUM_LIT:0><EOL>for catid, images in description.items():<EOL><INDENT>for partnum, part in images['<STR_LIT>'].items():<EOL><INDENT>if '<STR_LIT>' in part.keys():<EOL><INDENT>pan_id = part['<STR_LIT>']['<STR_LIT:id>']<EOL>bucket = part['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in part.keys():<EOL><INDENT>ms_id = part['<STR_LIT>']['<STR_LIT:id>']<EOL>num_bands = <NUM_LIT:8><EOL>bucket = part['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>elif '<STR_LIT>' in part.keys():<EOL><INDENT>ms_id = part['<STR_LIT>']['<STR_LIT:id>']<EOL>num_bands = <NUM_LIT:4><EOL>bucket = part['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT><DEDENT><DEDENT>band_str = '<STR_LIT>'<EOL>if chip_type == '<STR_LIT>':<EOL><INDENT>band_str = pan_id + '<STR_LIT>'<EOL><DEDENT>elif chip_type == '<STR_LIT>':<EOL><INDENT>band_str = ms_id + '<STR_LIT:?>'<EOL><DEDENT>elif chip_type == '<STR_LIT>':<EOL><INDENT>if num_bands == <NUM_LIT:8>:<EOL><INDENT>band_str = ms_id + '<STR_LIT>' + pan_id<EOL><DEDENT>elif num_bands == <NUM_LIT:4>:<EOL><INDENT>band_str = ms_id + '<STR_LIT>' + pan_id<EOL><DEDENT><DEDENT>location_str = '<STR_LIT>'.format(t2s2((W, N)), t2s2((E, S)))<EOL>service_url = '<STR_LIT>' + bucket + '<STR_LIT:/>'<EOL>url = service_url + band_str + location_str<EOL>url += '<STR_LIT>' + chip_format + '<STR_LIT>' + self.gbdx_connection.access_token<EOL>r = requests.get(url)<EOL>if r.status_code == <NUM_LIT:200>:<EOL><INDENT>with open(filename, '<STR_LIT:wb>') as f:<EOL><INDENT>f.write(r.content)<EOL>return True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL>return False<EOL><DEDENT>", "docstring": "Downloads a native resolution, orthorectified chip in tif format\n        from a user-specified catalog id.\n\n        Args:\n            coordinates (list): Rectangle coordinates in order West, South, East, North.\n                                West and East are longitudes, North and South are latitudes.\n                                The maximum chip size is (2048 pix)x(2048 pix)\n            catid (str): The image catalog id.\n            chip_type (str): 'PAN' (panchromatic), 'MS' (multispectral), 'PS' (pansharpened).\n                             'MS' is 4 or 8 bands depending on sensor.\n            chip_format (str): 'TIF' or 'PNG'\n            filename (str): Where to save chip.\n\n        Returns:\n            True if chip is successfully downloaded; else False.", "id": "f7096:c0:m4"}
{"signature": "def get_images_by_catid_and_aoi(self, catid, aoi_wkt):", "body": "self.logger.debug('<STR_LIT>')<EOL>url = '<STR_LIT>' % self.base_url<EOL>body = {\"<STR_LIT>\": [\"<STR_LIT>\" % catid],<EOL>\"<STR_LIT>\": [\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": aoi_wkt}<EOL>r = self.gbdx_connection.post(url, data=json.dumps(body))<EOL>r.raise_for_status()<EOL>if r.status_code == <NUM_LIT:200>:<EOL><INDENT>results = r.json()<EOL>numresults = len(results['<STR_LIT>'])<EOL>self.logger.debug('<STR_LIT>'<EOL>% (numresults, catid))<EOL>return results<EOL><DEDENT>", "docstring": "Retrieves the IDAHO image records associated with a given catid.\n        Args:\n            catid (str): The source catalog ID from the platform catalog.\n            aoi_wkt (str): The well known text of the area of interest.\n        Returns:\n            results (json): The full catalog-search response for IDAHO images\n                            within the catID.", "id": "f7096:c0:m1"}
{"signature": "def create_leaflet_viewer(self, idaho_image_results, filename):", "body": "description = self.describe_images(idaho_image_results)<EOL>if len(description) > <NUM_LIT:0>:<EOL><INDENT>functionstring = '<STR_LIT>'<EOL>for catid, images in description.items():<EOL><INDENT>for partnum, part in images['<STR_LIT>'].items():<EOL><INDENT>num_images = len(list(part.keys()))<EOL>partname = None<EOL>if num_images == <NUM_LIT:1>:<EOL><INDENT>partname = [p for p in list(part.keys())][<NUM_LIT:0>]<EOL>pan_image_id = '<STR_LIT>'<EOL><DEDENT>elif num_images == <NUM_LIT:2>:<EOL><INDENT>partname = [p for p in list(part.keys()) if p is not '<STR_LIT>'][<NUM_LIT:0>]<EOL>pan_image_id = part['<STR_LIT>']['<STR_LIT:id>']<EOL><DEDENT>if not partname:<EOL><INDENT>self.logger.debug(\"<STR_LIT>\")<EOL>continue<EOL><DEDENT>bandstr = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:0>'<EOL>}.get(partname, '<STR_LIT>')<EOL>part_boundstr_wkt = part[partname]['<STR_LIT>']<EOL>part_polygon = from_wkt(part_boundstr_wkt)<EOL>bucketname = part[partname]['<STR_LIT>']<EOL>image_id = part[partname]['<STR_LIT:id>']<EOL>W, S, E, N = part_polygon.bounds<EOL>functionstring += \"<STR_LIT>\" % (<EOL>bucketname, image_id, W, S, E, N, pan_image_id)<EOL><DEDENT><DEDENT>__location__ = os.path.realpath(<EOL>os.path.join(os.getcwd(), os.path.dirname(__file__)))<EOL>try:<EOL><INDENT>with open(os.path.join(__location__, '<STR_LIT>'), '<STR_LIT:r>') as htmlfile:<EOL><INDENT>data = htmlfile.read().decode(\"<STR_LIT:utf8>\")<EOL><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>with open(os.path.join(__location__, '<STR_LIT>'), '<STR_LIT:r>') as htmlfile:<EOL><INDENT>data = htmlfile.read()<EOL><DEDENT><DEDENT>data = data.replace('<STR_LIT>', functionstring)<EOL>data = data.replace('<STR_LIT>', str(S))<EOL>data = data.replace('<STR_LIT>', str(W))<EOL>data = data.replace('<STR_LIT>', bandstr)<EOL>data = data.replace('<STR_LIT>', self.gbdx_connection.access_token)<EOL>with codecs.open(filename, '<STR_LIT:w>', '<STR_LIT:utf8>') as outputfile:<EOL><INDENT>self.logger.debug(\"<STR_LIT>\" % filename)<EOL>outputfile.write(data)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>", "docstring": "Create a leaflet viewer html file for viewing idaho images.\n\n        Args:\n            idaho_image_results (dict): IDAHO image result set as returned from\n                                        the catalog.\n            filename (str): Where to save output html file.", "id": "f7096:c0:m6"}
{"signature": "def paint(self):", "body": "<EOL>snippet = {<EOL>'<STR_LIT>': VectorStyle.get_style_value(self.opacity),<EOL>'<STR_LIT>': VectorStyle.get_style_value(self.color),<EOL>'<STR_LIT>': VectorStyle.get_style_value(self.width),<EOL>}<EOL>if self.translate:<EOL><INDENT>snippet['<STR_LIT>'] = self.translate<EOL><DEDENT>if self.dasharray:<EOL><INDENT>snippet['<STR_LIT>'] = VectorStyle.get_style_value(self.dasharray)<EOL><DEDENT>return snippet<EOL>", "docstring": "Renders a javascript snippet suitable for use as a mapbox-gl line paint entry\n\nReturns:\n    A dict that can be converted to a mapbox-gl javascript paint snippet", "id": "f7098:c2:m1"}
{"signature": "def _load_info(self):", "body": "url = '<STR_LIT>' % self.base_url<EOL>r = self.gbdx_connection.get(url)<EOL>r.raise_for_status()<EOL>return r.json()<EOL>", "docstring": "Get user info for GBDX S3, put into instance vars for convenience.\n\n        Args:\n            None.\n\n        Returns:\n            Dictionary with S3 access key, S3 secret key, S3 session token,\n            user bucket and user prefix (dict).", "id": "f7101:c0:m4"}
{"signature": "def delete(self, location):", "body": "bucket = self.info['<STR_LIT>']<EOL>prefix = self.info['<STR_LIT>']<EOL>self.logger.debug('<STR_LIT>')<EOL>s3conn = self.client <EOL>if location[<NUM_LIT:0>] == '<STR_LIT:/>':<EOL><INDENT>location = location[<NUM_LIT:1>:]<EOL><DEDENT>if location[-<NUM_LIT:1>] == '<STR_LIT:/>':<EOL><INDENT>location = location[:-<NUM_LIT:2>]<EOL><DEDENT>self.logger.debug('<STR_LIT>')<EOL>for s3key in s3conn.list_objects(Bucket=bucket, Prefix=(prefix+'<STR_LIT:/>'+location))['<STR_LIT>']:<EOL><INDENT>s3conn.delete_object(Bucket=bucket, Key=s3key['<STR_LIT>'])<EOL><DEDENT>self.logger.debug('<STR_LIT>')<EOL>", "docstring": "Delete content in bucket/prefix/location.\n           Location can be a directory or a file (e.g., my_dir or my_dir/my_image.tif)\n           If location is a directory, all files in the directory are deleted.\n           If it is a file, then that file is deleted.\n\n           Args:\n               location (str): S3 location within prefix. Can be a directory or\n                               a file (e.g., my_dir or my_dir/my_image.tif).", "id": "f7101:c0:m6"}
{"signature": "def upload(self, local_file, s3_path=None):", "body": "if not os.path.exists(local_file):<EOL><INDENT>raise Exception(local_file + \"<STR_LIT>\")<EOL><DEDENT>if s3_path is None:<EOL><INDENT>s3_path = os.path.basename(local_file)<EOL><DEDENT>bucket = self.info['<STR_LIT>']<EOL>prefix = self.info['<STR_LIT>']<EOL>self.logger.debug('<STR_LIT>')<EOL>s3conn = self.client <EOL>self.logger.debug('<STR_LIT>'.format(local_file))<EOL>s3conn.upload_file(local_file, bucket, prefix+'<STR_LIT:/>'+s3_path)<EOL>self.logger.debug('<STR_LIT>')<EOL>return '<STR_LIT>'.format(bucket, prefix, s3_path)<EOL>", "docstring": "Upload files to your DG S3 bucket/prefix.\n\nArgs:\n    local_file (str): a path to a local file to upload, directory structures are not mirrored\n    s3_path: a key (location) on s3 to upload the file to\n\nReturns:\n    str: s3 path file was saved to\n\nExamples:\n    >>> upload('path/to/image.tif')\n    'mybucket/myprefix/image.tif'\n\n    >>> upload('./images/image.tif')\n    'mybucket/myprefix/image.tif'\n\n    >>> upload('./images/image.tif', s3_path='images/image.tif')\n    'mybucket/myprefix/images/image.tif'", "id": "f7101:c0:m7"}
{"signature": "def execute(self):", "body": "<EOL>self.generate_workflow_description()<EOL>if self.batch_values:<EOL><INDENT>self.id = self.workflow.launch_batch_workflow(self.definition)<EOL><DEDENT>else:<EOL><INDENT>self.id = self.workflow.launch(self.definition)<EOL><DEDENT>return self.id<EOL>", "docstring": "Execute the workflow.\n\nArgs:\n    None\n\nReturns:\n    Workflow_id", "id": "f7102:c8:m5"}
{"signature": "def cancel(self):", "body": "if not self.id:<EOL><INDENT>raise WorkflowError('<STR_LIT>')<EOL><DEDENT>if self.batch_values:<EOL><INDENT>self.workflow.batch_workflow_cancel(self.id)<EOL><DEDENT>else:<EOL><INDENT>self.workflow.cancel(self.id)<EOL><DEDENT>", "docstring": "Cancel a running workflow.\n\nArgs:\n    None\n\nReturns:\n    None", "id": "f7102:c8:m8"}
{"signature": "def __init__(self, __task_type, **kwargs):", "body": "self.name = __task_type + '<STR_LIT:_>' + str(uuid.uuid4())[:<NUM_LIT:8>]<EOL>self.name = self.name.replace('<STR_LIT::>','<STR_LIT:_>')  <EOL>self.type = __task_type<EOL>task_registry = TaskRegistry()<EOL>self.definition = task_registry.get_definition(__task_type)<EOL>self.domain = self.definition.get('<STR_LIT>', [{'<STR_LIT>': {}}])[<NUM_LIT:0>]['<STR_LIT>'].get('<STR_LIT>', '<STR_LIT:default>')<EOL>self._timeout = self.definition['<STR_LIT>'].get('<STR_LIT>')<EOL>self.inputs = Inputs(self.input_ports, task=self)<EOL>self.outputs = Outputs(self.output_ports, self.name)<EOL>self.batch_values = None<EOL>self._impersonation_allowed = None<EOL>self.set(**kwargs)<EOL>", "docstring": "Construct an instance of GBDX Task\n\nArgs:\n    __task_type: name of the task\n    **kwargs: key=value pairs for inputs to set on the task\n\nReturns:\n    An instance of Task.", "id": "f7102:c7:m0"}
{"signature": "def get_matching_multiplex_port(self,name):", "body": "<EOL>matching_multiplex_ports = [self.__getattribute__(p) for p in self._portnames<EOL>if name.startswith(p)<EOL>and name != p<EOL>and hasattr(self, p)<EOL>and self.__getattribute__(p).is_multiplex<EOL>]<EOL>for port in matching_multiplex_ports:<EOL><INDENT>return port<EOL><DEDENT>return None<EOL>", "docstring": "Given a name, figure out if a multiplex port prefixes this name and return it.  Otherwise return none.", "id": "f7102:c4:m3"}
{"signature": "def get_most_recent_images(self, results, types=[], sensors=[], N=<NUM_LIT:1>):", "body": "if not len(results):<EOL><INDENT>return None<EOL><DEDENT>if types:<EOL><INDENT>results = [r for r in results if r['<STR_LIT:type>'] in types]<EOL><DEDENT>if sensors:<EOL><INDENT>results = [r for r in results if r['<STR_LIT>'].get('<STR_LIT>') in sensors]<EOL><DEDENT>newlist = sorted(results, key=lambda k: k['<STR_LIT>'].get('<STR_LIT>'), reverse=True)<EOL>return newlist[:N]<EOL>", "docstring": "Return the most recent image\n\n        Args:\n            results: a catalog resultset, as returned from a search\n            types: array of types you want. optional.\n            sensors: array of sensornames. optional.\n            N: number of recent images to return.  defaults to 1.\n\n        Returns:\n            single catalog item, or none if not found", "id": "f7104:c0:m9"}
{"signature": "def search(self, searchAreaWkt=None, filters=None, startDate=None, endDate=None, types=None):", "body": "<EOL>if not types:<EOL><INDENT>types = ['<STR_LIT>']<EOL><DEDENT>if startDate:<EOL><INDENT>startDateTime = datetime.datetime.strptime(startDate, '<STR_LIT>')<EOL><DEDENT>if endDate:<EOL><INDENT>endDateTime = datetime.datetime.strptime(endDate, '<STR_LIT>')<EOL><DEDENT>if startDate and endDate:<EOL><INDENT>diff = endDateTime - startDateTime<EOL>if diff.days < <NUM_LIT:0>:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>postdata = {<EOL>\"<STR_LIT>\": searchAreaWkt,<EOL>\"<STR_LIT>\": types,<EOL>\"<STR_LIT>\": startDate,<EOL>\"<STR_LIT>\": endDate,<EOL>}<EOL>if filters:<EOL><INDENT>postdata['<STR_LIT>'] = filters<EOL><DEDENT>if searchAreaWkt:<EOL><INDENT>postdata['<STR_LIT>'] = searchAreaWkt<EOL><DEDENT>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url<EOL>}<EOL>headers = {'<STR_LIT:Content-Type>':'<STR_LIT:application/json>'}<EOL>r = self.gbdx_connection.post(url, headers=headers, data=json.dumps(postdata))<EOL>r.raise_for_status()<EOL>results = r.json()['<STR_LIT>']<EOL>return results<EOL>", "docstring": "Perform a catalog search\n\n        Args:\n            searchAreaWkt: WKT Polygon of area to search.  Optional.\n            filters: Array of filters.  Optional.  Example:\n            [\n                \"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')\",\n                \"cloudCover < 10\",\n                \"offNadirAngle < 10\"\n            ]\n            startDate: string.  Optional.  Example: \"2004-01-01T00:00:00.000Z\"\n            endDate: string.  Optional.  Example: \"2004-01-01T00:00:00.000Z\"\n            types: Array of types to search for.  Optional.  Example (and default):  [\"Acquisition\"]\n\n        Returns:\n            catalog search resultset", "id": "f7104:c0:m8"}
{"signature": "def __init__(self, **kwargs):", "body": "interface = Auth(**kwargs)<EOL>self.base_url = '<STR_LIT>' % interface.root_url<EOL>self.gbdx_connection = interface.gbdx_connection<EOL>self.logger = interface.logger<EOL>", "docstring": "Construct the Catalog interface class\n\n        Returns:\n            An instance of the Catalog interface class.", "id": "f7104:c0:m0"}
{"signature": "def get_strip_metadata(self, catID):", "body": "self.logger.debug('<STR_LIT>')<EOL>url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url, '<STR_LIT>': catID<EOL>}<EOL>r = self.gbdx_connection.get(url)<EOL>if r.status_code == <NUM_LIT:200>:<EOL><INDENT>return r.json()['<STR_LIT>']<EOL><DEDENT>elif r.status_code == <NUM_LIT>:<EOL><INDENT>self.logger.debug('<STR_LIT>' % catID)<EOL>r.raise_for_status()<EOL><DEDENT>else:<EOL><INDENT>self.logger.debug('<STR_LIT>' % catID)<EOL>r.raise_for_status()<EOL><DEDENT>", "docstring": "Retrieves the strip catalog metadata given a cat ID.\n\n        Args:\n            catID (str): The source catalog ID from the platform catalog.\n\n        Returns:\n            metadata (dict): A metadata dictionary .\n\n            TODO: have this return a class object with interesting information exposed.", "id": "f7104:c0:m3"}
{"signature": "def search_address(self, address, filters=None, startDate=None, endDate=None, types=None):", "body": "lat, lng = self.get_address_coords(address)<EOL>return self.search_point(lat,lng, filters=filters, startDate=startDate, endDate=endDate, types=types)<EOL>", "docstring": "Perform a catalog search over an address string\n\n        Args:\n            address: any address string\n            filters: Array of filters.  Optional.  Example:\n            [\n                \"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')\",\n                \"cloudCover < 10\",\n                \"offNadirAngle < 10\"\n            ]\n            startDate: string.  Optional.  Example: \"2004-01-01T00:00:00.000Z\"\n            endDate: string.  Optional.  Example: \"2004-01-01T00:00:00.000Z\"\n            types: Array of types to search for.  Optional.  Example (and default):  [\"Acquisition\"]\n\n        Returns:\n            catalog search resultset", "id": "f7104:c0:m5"}
{"signature": "def get(self, catID, includeRelationships=False):", "body": "url = '<STR_LIT>' % {<EOL>'<STR_LIT>': self.base_url, '<STR_LIT>': catID<EOL>}<EOL>r = self.gbdx_connection.get(url)<EOL>r.raise_for_status()<EOL>return r.json()<EOL>", "docstring": "Retrieves the strip footprint WKT string given a cat ID.\n\n        Args:\n            catID (str): The source catalog ID from the platform catalog.\n            includeRelationships (bool): whether to include graph links to related objects.  Default False.\n\n        Returns:\n            record (dict): A dict object identical to the json representation of the catalog record", "id": "f7104:c0:m2"}
{"signature": "def deprecate_module_attr(mod, deprecated):", "body": "deprecated = set(deprecated)<EOL>class Wrapper(object):<EOL><INDENT>def __getattr__(self, attr):<EOL><INDENT>if attr in deprecated:<EOL><INDENT>warnings.warn(\"<STR_LIT>\".format(attr), GBDXDeprecation)<EOL><DEDENT>return getattr(mod, attr)<EOL><DEDENT>def __setattr__(self, attr, value):<EOL><INDENT>if attr in deprecated:<EOL><INDENT>warnings.warn(\"<STR_LIT>\".format(attr), GBDXDeprecation)<EOL><DEDENT>return setattr(mod, attr, value)<EOL><DEDENT><DEDENT>return Wrapper()<EOL>", "docstring": "Return a wrapped object that warns about deprecated accesses", "id": "f7105:m1"}
{"signature": "def get_definition(self, task_name):", "body": "r = self.gbdx_connection.get(self._base_url + '<STR_LIT:/>' + task_name)<EOL>raise_for_status(r)<EOL>return r.json()<EOL>", "docstring": "Gets definition of a registered GBDX task.\n\n        Args:\n            task_name (str): Task name.\n\n        Returns:\n            Dictionary representing the task definition.", "id": "f7106:c0:m3"}
{"signature": "def register(self, task_json=None, json_filename=None):", "body": "if not task_json and not json_filename:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if task_json and json_filename:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if json_filename:<EOL><INDENT>task_json = json.load(open(json_filename, '<STR_LIT:r>'))<EOL><DEDENT>r = self.gbdx_connection.post(self._base_url, json=task_json)<EOL>raise_for_status(r)<EOL>return r.text<EOL>", "docstring": "Registers a new GBDX task.\n\n        Args:\n            task_json (dict): Dictionary representing task definition.\n            json_filename (str): A full path of a file with json representing the task definition.\n            Only one out of task_json and json_filename should be provided.\n        Returns:\n            Response (str).", "id": "f7106:c0:m2"}
{"signature": "def update(self, task_name, task_json):", "body": "r = self.gbdx_connection.put(self._base_url + '<STR_LIT:/>' + task_name, json=task_json)<EOL>raise_for_status(r)<EOL>return r.json()<EOL>", "docstring": "Updates a GBDX task.\n\n        Args:\n            task_name (str): Task name.\n            task_json (dict): Dictionary representing updated task definition.\n\n        Returns:\n            Dictionary representing the updated task definition.", "id": "f7106:c0:m5"}
{"signature": "def _layer_def(self, style):", "body": "raise NotImplementedError()<EOL>", "docstring": "Constructs a layer def with the proper fields\n            - implemented in subclasses\n\n            Returns:\n                layer (dict): a layer json dict used for adding to maps", "id": "f7107:c0:m1"}
{"signature": "def _datasource_def(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Constructs a datasource def appropriate for the layer type\n            - implemented in subclasses\n\n            Returns\n                datasource (dict): a datasource json dict used for adding data to maps", "id": "f7107:c0:m2"}
{"signature": "@property<EOL><INDENT>def layers(self):<DEDENT>", "body": "layers = [self._layer_def(style) for style in self.styles]<EOL>return layers<EOL>", "docstring": "Renders the list of layers to add to the map.\n\n            Returns:\n                layers (list): list of layer entries suitable for use in mapbox-gl 'map.addLayer()' call", "id": "f7107:c0:m4"}
{"signature": "@contextlib.contextmanager<EOL><INDENT>def assertWarns(self, warning, *args, **kwargs):<DEDENT>", "body": "original_filters = warnings.filters[:]<EOL>warnings.simplefilter('<STR_LIT:error>')<EOL>if len(args) == <NUM_LIT:0> and len(kwargs) == <NUM_LIT:0>:<EOL><INDENT>with self.assertRaises(warning):<EOL><INDENT>yield<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.assertRaises(warning, *args, **kwargs)<EOL><DEDENT>warnings.filters = original_filters<EOL>", "docstring": "A test that checks if a specified wanring was raised", "id": "f7142:c0:m1"}
{"signature": "def line(self, line):", "body": "return line.rstrip()<EOL>", "docstring": "Returns string with trailing whitespace characters removed\n\n        Argument:\n            line - Input line to cut", "id": "f7163:c0:m1"}
{"signature": "def line(self, line):", "body": "return line<EOL>", "docstring": "Returns line untouched, expected to be byte array.\n\n        Argument:\n            line - Input line to cut", "id": "f7164:c0:m1"}
{"signature": "def lst(comma_list):", "body": "return comma_list.split(\"<STR_LIT:U+002C>\")<EOL>", "docstring": "Takes a string l and returns list split by comma\n\n    Used to take comma delimited list from command line argument, and store\n    python list when parsing via argparser.\n\n    Example:\n    >>>lst(\"1,2,3,4\")\n    [1,2,3,4]", "id": "f7165:m1"}
{"signature": "def _parse_args(args):", "body": "<EOL>parser = argparse.ArgumentParser(description=\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\",<EOL>usage=_usage()[len('<STR_LIT>'):])<EOL>parser.add_argument('<STR_LIT>', \"<STR_LIT>\", action='<STR_LIT:store>', type=lst, default=[],<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT:-c>', \"<STR_LIT>\", action='<STR_LIT:store>', type=lst, default=[],<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', \"<STR_LIT>\", action='<STR_LIT:store>', type=lst, default=[],<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', \"<STR_LIT>\", action='<STR_LIT:store>', default=\"<STR_LIT:\\t>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', \"<STR_LIT>\", action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>'+<EOL>'<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT>', \"<STR_LIT>\", action='<STR_LIT:store>', default=\"<STR_LIT:\\t>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument('<STR_LIT:file>', nargs='<STR_LIT:*>', default=\"<STR_LIT:->\",<EOL>help=\"<STR_LIT>\")<EOL>return parser.parse_args(args)<EOL>", "docstring": "Setup argparser to process arguments and generate help", "id": "f7165:m2"}
{"signature": "def group_val(group):", "body": "if group:<EOL><INDENT>return int(group)<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Returns value of regular expression group, if valid. 0 if not\n\n    Argument:\n        group - group to get value of", "id": "f7168:m0"}
{"signature": "def cut(self, line):", "body": "result = []<EOL>line = self.line(line)<EOL>for i, field in enumerate(self.positions):<EOL><INDENT>try:<EOL><INDENT>index = _setup_index(field)<EOL>try:<EOL><INDENT>result += line[index]<EOL><DEDENT>except IndexError:<EOL><INDENT>result.append(self.invalid_pos)<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>result.append(str(field))<EOL><DEDENT>except TypeError:<EOL><INDENT>result.extend(self._cut_range(line, int(field[<NUM_LIT:0>]), i))<EOL><DEDENT><DEDENT>return '<STR_LIT>'.join(result)<EOL>", "docstring": "Returns selected positions from cut input source in desired\n        arrangement.\n\n        Argument:\n            line -      input to cut", "id": "f7168:c0:m2"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def line(self, line):<DEDENT>", "body": "", "docstring": "Returns prepared line for cutting", "id": "f7168:c0:m1"}
{"signature": "def update_remote(self, remote_name=None):", "body": "raise NotImplemented()<EOL>", "docstring": "Update the (default) remote by pushing the tags and removing (deleted) tags. TODO: is this last thing possible?\n:return:", "id": "f7171:c0:m3"}
{"signature": "def __get_files_to_be_added(self, repository):", "body": "for root, dirs, files in os.walk(repository.working_dir):<EOL><INDENT>for f in files:<EOL><INDENT>relative_path = os.path.join(root, f)[len(repository.working_dir) + <NUM_LIT:1>:]<EOL>try:<EOL><INDENT>repository.head.commit.tree[relative_path] <EOL>yield relative_path<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>", "docstring": ":return: the files that have been modified and can be added", "id": "f7172:c0:m7"}
{"signature": "def record_results_and_push(self, results, remote_name='<STR_LIT>'):", "body": "<EOL>raise NotImplemented()<EOL>", "docstring": "Record the results of this experiment, by updating the tag and push to remote\n:param results:  A dictionary containing the results of the experiment.\n:type results: dict\n:param remote_name: the name of the remote to push the tags (or None for the default)\n:type remote_name: str", "id": "f7172:c0:m4"}
{"signature": "def __init__(self, name, parameters, directory=\"<STR_LIT:.>\", tag_prefix=\"<STR_LIT>\", description=None):", "body": "<EOL>self.__experiment_name = name + \"<STR_LIT:@>\" + time.strftime(\"<STR_LIT>\") + '<STR_LIT:.>' + str(random.randint(<NUM_LIT:0>, <NUM_LIT:100>))<EOL>self.__results_recorded = False<EOL>self.__repository_directory = directory<EOL>if tag_prefix[-<NUM_LIT:1>] != '<STR_LIT:/>':<EOL><INDENT>tag_prefix += '<STR_LIT:/>'<EOL><DEDENT>self.__tag_name = tag_prefix + self.__experiment_name<EOL>self.__parameters = parameters<EOL>self.__description = description<EOL>", "docstring": "Start logging a new experiment.\n:param name: the name of the experiment\n:type name: str\n:param parameters: a dictionary with all the parameters of the experiment.\n:type parameters: dict\n:param directory: a string of the directory of the git repository, where the experiment will be logged.\n:type directory: str\n:param tag_prefix: the prefix of the \"folder\" where the experiment-related tags will be placed\n:type tag_prefix: str", "id": "f7172:c0:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def setup(self, context):<DEDENT>", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Called when a task for this operator is initialized within a worker on the cluster\n\n        It provides the operator with the environment in which the bolt executes.\n        Note that ``__init__()`` should not be overriden for initialization of a bolt, as it is used\n        internally by BaseBolt; instead, ``initialize()`` should be used to initialize any custom\n        variables or connection to databases.\n\n        :type context: :class:`Context`\n        :param context: This object can be used to get information about this task's place within the\n                        topology, including the task id and component id of this task, input and output\n                        information, etc.\n        **Must be implemented by a subclass, otherwise NotImplementedError is raised.**", "id": "f7187:c0:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def transform(self, tup):<DEDENT>", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Process a single tuple of input\n\n        The Tuple object contains metadata on it about which component/stream/task it came from.\n        To emit a tuple, call ``context.emit(tuple)``.\n\n        **Must be implemented by a subclass, otherwise NotImplementedError is raised.**\n\n        :type tup: :class:`Tuple`\n        :param tup: Tuple to process", "id": "f7187:c0:m1"}
{"signature": "def build(self, bldr):", "body": "stage_names = sets.Set()<EOL>for source in self._sources:<EOL><INDENT>source._build(bldr, stage_names)<EOL><DEDENT>for source in self._sources:<EOL><INDENT>if not source._all_built():<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Builds the topology and returns the builder", "id": "f7190:c0:m2"}
{"signature": "def new_source(self, source):", "body": "source_streamlet = None<EOL>if callable(source):<EOL><INDENT>source_streamlet = SupplierStreamlet(source)<EOL><DEDENT>elif isinstance(source, Generator):<EOL><INDENT>source_streamlet = GeneratorStreamlet(source)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>self._sources.append(source_streamlet)<EOL>return source_streamlet<EOL>", "docstring": "Adds a new source to the computation DAG", "id": "f7190:c0:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def get_stream_name(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Fetches the stream name that we are operating on", "id": "f7192:c0:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def get_task_id(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Fetches the task id of the current instance of the operator", "id": "f7192:c0:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def get_state(self):<DEDENT>", "body": "pass<EOL>", "docstring": "The state where components can store any of their local state", "id": "f7192:c0:m5"}
{"signature": "def set_name(self, name):", "body": "self._name = name<EOL>return self<EOL>", "docstring": "Sets the name of the Streamlet", "id": "f7194:c0:m1"}
{"signature": "def outer_right_join(self, join_streamlet, window_config, join_function):", "body": "from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt<EOL>join_streamlet_result = JoinStreamlet(JoinBolt.OUTER_RIGHT, window_config,<EOL>join_function, self, join_streamlet)<EOL>self._add_child(join_streamlet_result)<EOL>join_streamlet._add_child(join_streamlet_result)<EOL>return join_streamlet_result<EOL>", "docstring": "Return a new Streamlet by outer right join_streamlet with this streamlet", "id": "f7194:c0:m16"}
{"signature": "@abstractmethod<EOL><INDENT>def _build_this(self, builder, stage_names):<DEDENT>", "body": "raise RuntimeError(\"<STR_LIT>\")<EOL>", "docstring": "This is the method that's implemented by the operators.\n        :type builder: TopologyBuilder\n        :param builder: The operator adds in the current streamlet as a spout/bolt", "id": "f7194:c0:m21"}
{"signature": "def reduce_by_window(self, window_config, reduce_function):", "body": "from heronpy.streamlet.impl.reducebywindowbolt import ReduceByWindowStreamlet<EOL>reduce_streamlet = ReduceByWindowStreamlet(window_config, reduce_function, self)<EOL>self._add_child(reduce_streamlet)<EOL>return reduce_streamlet<EOL>", "docstring": "Return a new Streamlet in which each element of this Streamlet are collected\n          over a window defined by window_config and then reduced using the reduce_function\n          reduce_function takes two element at one time and reduces them to one element that\n          is used in the subsequent operations.", "id": "f7194:c0:m10"}
{"signature": "def set_num_partitions(self, num_partitions):", "body": "self._num_partitions = num_partitions<EOL>return self<EOL>", "docstring": "Sets the number of partitions", "id": "f7194:c0:m3"}
{"signature": "def consume(self, consume_function):", "body": "from heronpy.streamlet.impl.consumebolt import ConsumeStreamlet<EOL>consume_streamlet = ConsumeStreamlet(consume_function, self)<EOL>self._add_child(consume_streamlet)<EOL>return<EOL>", "docstring": "Calls consume_function for each element of this streamlet. This function returns nothing", "id": "f7194:c0:m14"}
{"signature": "def flat_map(self, flatmap_function):", "body": "from heronpy.streamlet.impl.flatmapbolt import FlatMapStreamlet<EOL>fm_streamlet = FlatMapStreamlet(flatmap_function, self)<EOL>self._add_child(fm_streamlet)<EOL>return fm_streamlet<EOL>", "docstring": "Return a new Streamlet by applying map_function to each element of this Streamlet\n           and flattening the result", "id": "f7194:c0:m6"}
{"signature": "def outer_join(self, join_streamlet, window_config, join_function):", "body": "from heronpy.streamlet.impl.joinbolt import JoinStreamlet, JoinBolt<EOL>join_streamlet_result = JoinStreamlet(JoinBolt.OUTER, window_config,<EOL>join_function, self, join_streamlet)<EOL>self._add_child(join_streamlet_result)<EOL>join_streamlet._add_child(join_streamlet_result)<EOL>return join_streamlet_result<EOL>", "docstring": "Return a new Streamlet by outer join_streamlet with this streamlet", "id": "f7194:c0:m18"}
{"signature": "def clone(self, num_clones):", "body": "retval = []<EOL>for i in range(num_clones):<EOL><INDENT>retval.append(self.repartition(self.get_num_partitions()))<EOL><DEDENT>return retval<EOL>", "docstring": "Return num_clones number of streamlets each containing all elements\n        of the current streamlet", "id": "f7194:c0:m9"}
{"signature": "def filter(self, filter_function):", "body": "from heronpy.streamlet.impl.filterbolt import FilterStreamlet<EOL>filter_streamlet = FilterStreamlet(filter_function, self)<EOL>self._add_child(filter_streamlet)<EOL>return filter_streamlet<EOL>", "docstring": "Return a new Streamlet containing only the elements that satisfy filter_function", "id": "f7194:c0:m7"}
{"signature": "@abstractmethod<EOL><INDENT>def get(self):<DEDENT>", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Generate the next element\n        If there is nothing at the moment to generate, return None\n\n        **Must be implemented by a subclass, otherwise NotImplementedError is raised.**", "id": "f7196:c0:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def setup(self, context):<DEDENT>", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Called when a task for this operator is initialized within a worker on the cluster\n\n        It provides the operator with the environment in which the operator executes.\n        This should be used to initialize any custom variables or connection to databases.\n\n        :type context: :class:`Context`\n        :param context: This object can be used to get information about this task's place within the\n                        topology, including the task id and component id of this task, input and output\n                        information, etc.\n        **Must be implemented by a subclass, otherwise NotImplementedError is raised.**", "id": "f7196:c0:m0"}
{"signature": "def run(self, name, config, builder):", "body": "if not isinstance(name, str):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(config, Config):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(builder, Builder):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>bldr = TopologyBuilder(name=name)<EOL>builder.build(bldr)<EOL>bldr.set_config(config._api_config)<EOL>bldr.build_and_submit()<EOL>", "docstring": "Builds the topology and submits it", "id": "f7197:c0:m1"}
{"signature": "def __init__(self):", "body": "pass<EOL>", "docstring": "Nothing really", "id": "f7197:c0:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def choose_tasks(self, values):<DEDENT>", "body": "pass<EOL>", "docstring": "Implements a custom stream grouping\n\n        :param values: the values to group on\n        :rtype: list of int\n        :return: list of task ids to which these values are emitted", "id": "f7215:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def get_heron_options_from_env():<DEDENT>", "body": "heron_options_raw = os.environ.get(\"<STR_LIT>\")<EOL>if heron_options_raw is None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>options = {}<EOL>for option_line in heron_options_raw.replace(\"<STR_LIT>\", \"<STR_LIT:U+0020>\").split('<STR_LIT:U+002C>'):<EOL><INDENT>key, sep, value = option_line.partition(\"<STR_LIT:=>\")<EOL>if sep:<EOL><INDENT>options[key] = value<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % option_line)<EOL><DEDENT><DEDENT>return options<EOL>", "docstring": "Retrieves heron options from the `HERON_OPTIONS` environment variable.\n\n        Heron options have the following format:\n\n            cmdline.topologydefn.tmpdirectory=/var/folders/tmpdir\n            cmdline.topology.initial.state=PAUSED\n\n        In this case, the returned map will contain:\n\n            #!json\n            {\n              \"cmdline.topologydefn.tmpdirectory\": \"/var/folders/tmpdir\",\n              \"cmdline.topology.initial.state\": \"PAUSED\"\n            }\n\n        Currently supports the following options natively:\n\n        - `cmdline.topologydefn.tmpdirectory`: (required) the directory to which this\n        topology's defn file is written\n        - `cmdline.topology.initial.state`: (default: \"RUNNING\") the initial state of the topology\n        - `cmdline.topology.name`: (default: class name) topology name on deployment\n\n        Returns: map mapping from key to value", "id": "f7217:c0:m7"}
{"signature": "def add_spout(self, name, spout_cls, par, config=None, optional_outputs=None):", "body": "spout_spec = spout_cls.spec(name=name, par=par, config=config,<EOL>optional_outputs=optional_outputs)<EOL>self.add_spec(spout_spec)<EOL>return spout_spec<EOL>", "docstring": "Add a spout to the topology", "id": "f7217:c2:m2"}
{"signature": "def add_spec(self, *specs):", "body": "for spec in specs:<EOL><INDENT>if not isinstance(spec, HeronComponentSpec):<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>% str(spec))<EOL><DEDENT>if spec.name is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if spec.name == \"<STR_LIT>\":<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if spec.name in self._specs:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (spec.name, spec))<EOL><DEDENT>self._specs[spec.name] = spec<EOL><DEDENT>", "docstring": "Add specs to the topology\n\n        :type specs: HeronComponentSpec\n        :param specs: specs to add to the topology", "id": "f7217:c2:m1"}
{"signature": "def set_config(self, config):", "body": "if not isinstance(config, dict):<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % str(config))<EOL><DEDENT>self._topology_config = config<EOL>", "docstring": "Set topology-wide configuration to the topology\n\n        :type config: dict\n        :param config: topology-wide config", "id": "f7217:c2:m4"}
{"signature": "@classmethod<EOL><INDENT>def init_topology(mcs, classname, class_dict):<DEDENT>", "body": "if classname == '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>heron_options = TopologyType.get_heron_options_from_env()<EOL>initial_state = heron_options.get(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>tmp_directory = heron_options.get(\"<STR_LIT>\")<EOL>if tmp_directory is None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>topology_name = heron_options.get(\"<STR_LIT>\", classname)<EOL>topology_id = topology_name + str(uuid.uuid4())<EOL>topology = topology_pb2.Topology()<EOL>topology.id = topology_id<EOL>topology.name = topology_name<EOL>topology.state = topology_pb2.TopologyState.Value(initial_state)<EOL>topology.topology_config.CopyFrom(TopologyType.get_topology_config_protobuf(class_dict))<EOL>TopologyType.add_bolts_and_spouts(topology, class_dict)<EOL>class_dict['<STR_LIT>'] = topology_name<EOL>class_dict['<STR_LIT>'] = topology_id<EOL>class_dict['<STR_LIT>'] = topology<EOL>class_dict['<STR_LIT>'] = tmp_directory<EOL>class_dict['<STR_LIT>'] = heron_options<EOL>", "docstring": "Initializes a topology protobuf", "id": "f7217:c0:m6"}
{"signature": "@classmethod<EOL><INDENT>def write(cls):<DEDENT>", "body": "if cls.__name__ == '<STR_LIT>':<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>filename = \"<STR_LIT>\" % cls.topology_name<EOL>path = os.path.join(cls.topologydefn_tmpdir, filename)<EOL>with open(path, '<STR_LIT:wb>') as f:<EOL><INDENT>f.write(cls.protobuf_topology.SerializeToString())<EOL><DEDENT>", "docstring": "Writes the Topology .defn file to ``dest``\n\n        This classmethod is meant be used by heron-cli when submitting a topology.", "id": "f7217:c1:m0"}
{"signature": "def build_and_submit(self):", "body": "class_dict = self._construct_topo_class_dict()<EOL>topo_cls = TopologyType(self.topology_name, (Topology,), class_dict)<EOL>topo_cls.write()<EOL>", "docstring": "Builds the topology and submits to the destination", "id": "f7217:c2:m6"}
{"signature": "def update(self, value):", "body": "self.reducer.reduce(value)<EOL>", "docstring": "Updates a value and apply reduction", "id": "f7218:c5:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def init(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Called when this reducer is initialized/reinitialized", "id": "f7218:c3:m0"}
{"signature": "def __init__(self, reducer_cls):", "body": "self.reducer = reducer_cls()<EOL>self.reducer.init()<EOL>", "docstring": "Initializes ReducedMetric object\n\n        :param reducer_cls: IReducer class to use", "id": "f7218:c5:m0"}
{"signature": "def __init__(self, reducer):", "body": "self.value = {}<EOL>self.reducer = reducer<EOL>", "docstring": "Initializes MultiReducedMetric object\n\n        :param reducer: IReducer class to use", "id": "f7218:c6:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def extract(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Called to extract the current value", "id": "f7218:c3:m2"}
{"signature": "def incr(self, key, to_add=<NUM_LIT:1>):", "body": "if key not in self.value:<EOL><INDENT>self.value[key] = CountMetric()<EOL><DEDENT>self.value[key].incr(to_add)<EOL>", "docstring": "Increments the value of a given key by ``to_add``", "id": "f7218:c2:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def get(self, key):<DEDENT>", "body": "pass<EOL>", "docstring": "Gets the value corresponding to a key\n        :param key: The key whose value we want back\n        :return: The value associated with the key", "id": "f7221:c0:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def put(self, key, value):<DEDENT>", "body": "pass<EOL>", "docstring": "Puts {key, value} pair into the state\n        :param key: The key to get back the value\n        :param value: The value associated with the key", "id": "f7221:c0:m0"}
{"signature": "def get_this_sources(self):", "body": "return self.get_sources(self.get_component_id())<EOL>", "docstring": "Returns the declared inputs to this component\n        :return: map <streamId namedtuple (same structure as protobuf msg) -> gtype>, or\n                 None if not found", "id": "f7228:c0:m6"}
{"signature": "@abstractmethod<EOL><INDENT>def get_topology_name(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Returns the name of the topology\n        :return: the name of the topology", "id": "f7228:c0:m3"}
{"signature": "@abstractmethod<EOL><INDENT>def process(self, tup):<DEDENT>", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Process a single tuple of input\n\n        The Tuple object contains metadata on it about which component/stream/task it came from.\n        To emit a tuple, call ``self.emit(tuple)``.\n        Note that tick tuples are not passed to this method, as the ``process_tick()`` method is\n        responsible for processing them.\n\n        **Must be implemented by a subclass, otherwise NotImplementedError is raised.**\n\n        :type tup: :class:`Tuple`\n        :param tup: Tuple to process", "id": "f7229:c0:m1"}
{"signature": "def process(self, tup):", "body": "curtime = int(time.time())<EOL>self.current_tuples.append((tup, curtime))<EOL>self._expire(curtime)<EOL>", "docstring": "Process a single tuple of input\n\n        We add the (time, tuple) pair into our current_tuples. And then look for expiring\n        elemnents", "id": "f7231:c0:m4"}
{"signature": "def process_tick(self, tup):", "body": "curtime = int(time.time())<EOL>window_info = WindowContext(curtime - self.window_duration, curtime)<EOL>self.processWindow(window_info, list(self.current_tuples))<EOL>for tup in self.current_tuples:<EOL><INDENT>self.ack(tup)<EOL><DEDENT>self.current_tuples.clear()<EOL>", "docstring": "Called every window_duration", "id": "f7231:c1:m5"}
{"signature": "@abstractmethod<EOL><INDENT>def processWindow(self, window_info, tuples):<DEDENT>", "body": "pass<EOL>", "docstring": "The main interface that needs to be implemented.\n\n        This function is called every WINDOW_DURATION_SECS seconds\n        and contains the data in the last WINDOW_DURATION_SECS seconds\n        in a list tuples\n\n        :type window_info: :class:`WindowContext`\n        :param window_info: The information about the window\n\n        :type tuples: :class:`list of Tuples`\n        :param tuples: The list of tuples in this window", "id": "f7231:c1:m2"}
{"signature": "def fail(self, tup):", "body": "self.delegate.fail(tup)<EOL>", "docstring": "Indicate that processing of a Tuple has failed\n\n        It is compatible with StreamParse API.", "id": "f7232:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def spec(cls, name=None, inputs=None, par=<NUM_LIT:1>, config=None, optional_outputs=None):<DEDENT>", "body": "python_class_path = \"<STR_LIT>\" % (cls.__module__, cls.__name__)<EOL>if hasattr(cls, '<STR_LIT>'):<EOL><INDENT>_outputs = copy.copy(cls.outputs)<EOL><DEDENT>else:<EOL><INDENT>_outputs = []<EOL><DEDENT>if optional_outputs is not None:<EOL><INDENT>assert isinstance(optional_outputs, (list, tuple))<EOL>for out in optional_outputs:<EOL><INDENT>assert isinstance(out, (str, Stream))<EOL>_outputs.append(out)<EOL><DEDENT><DEDENT>return HeronComponentSpec(name, python_class_path, is_spout=False, par=par,<EOL>inputs=inputs, outputs=_outputs, config=config)<EOL>", "docstring": "Register this bolt to the topology and create ``HeronComponentSpec``\n\n        This method takes an optional ``outputs`` argument for supporting dynamic output fields\n        declaration. However, it is recommended that ``outputs`` should be declared as\n        an attribute of your ``Bolt`` subclass. Also, some ways of declaring inputs is not supported\n        in this implementation; please read the documentation below.\n\n        :type name: str\n        :param name: Name of this bolt.\n        :type inputs: dict or list\n        :param inputs: Streams that feed into this Bolt.\n\n                       Two forms of this are acceptable:\n\n                       1. A `dict` mapping from ``HeronComponentSpec`` to ``Grouping``.\n                          In this case, default stream is used.\n                       2. A `dict` mapping from ``GlobalStreamId`` to ``Grouping``.\n                          This ``GlobalStreamId`` object itself is different from StreamParse, because\n                          Heron does not use thrift, although its constructor method is compatible.\n                       3. A `list` of ``HeronComponentSpec``. In this case, default stream with\n                          SHUFFLE grouping is used.\n                       4. A `list` of ``GlobalStreamId``. In this case, SHUFFLE grouping is used.\n        :type par: int\n        :param par: Parallelism hint for this spout.\n        :type config: dict\n        :param config: Component-specific config settings.\n        :type optional_outputs: list of (str or Stream) or tuple of (str or Stream)\n        :param optional_outputs: Additional output fields for this bolt. These fields are added to\n                                 existing ``outputs`` class attributes of your bolt. This is an optional\n                                 argument, and exists only for supporting dynamic output field\n                                 declaration.", "id": "f7232:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def is_tick(tup):<DEDENT>", "body": "return tup.stream == TupleHelper.TICK_TUPLE_ID<EOL>", "docstring": "Returns whether or not a given HeronTuple is a tick Tuple\n\n        It is compatible with StreamParse API.", "id": "f7232:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _sanitize_config(custom_config):<DEDENT>", "body": "if not isinstance(custom_config, dict):<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>% str(type(custom_config)))<EOL><DEDENT>sanitized = {}<EOL>for key, value in custom_config.items():<EOL><INDENT>if not isinstance(key, str):<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>% (str(type(key)), str(key)))<EOL><DEDENT>if isinstance(value, bool):<EOL><INDENT>sanitized[key] = \"<STR_LIT:true>\" if value else \"<STR_LIT:false>\"<EOL><DEDENT>elif isinstance(value, (str, int, float)):<EOL><INDENT>sanitized[key] = str(value)<EOL><DEDENT>else:<EOL><INDENT>sanitized[key] = value<EOL><DEDENT><DEDENT>return sanitized<EOL>", "docstring": "Checks whether ``custom_config`` is sane and returns a sanitized dict <str -> (str|object)>\n\n        It checks if keys are all strings and sanitizes values of a given dictionary as follows:\n\n        - If string, number or boolean is given as a value, it is converted to string.\n          For string and number (int, float), it is converted to string by a built-in ``str()`` method.\n          For a boolean value, ``True`` is converted to \"true\" instead of \"True\", and ``False`` is\n          converted to \"false\" instead of \"False\", in order to keep the consistency with\n          Java configuration.\n\n        - If neither of the above is given as a value, it is inserted into the sanitized dict as it is.\n          These values will need to be serialized before adding to a protobuf message.", "id": "f7234:c0:m7"}
{"signature": "def __getitem__(self, stream_id):", "body": "if stream_id not in self.get_out_streamids():<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % stream_id)<EOL><DEDENT>component_id = self.name or self<EOL>return GlobalStreamId(componentId=component_id, streamId=stream_id)<EOL>", "docstring": "Get GlobalStreamId for a given stream_id", "id": "f7234:c0:m13"}
{"signature": "def get_out_streamids(self):", "body": "if self.outputs is None:<EOL><INDENT>return set()<EOL><DEDENT>if not isinstance(self.outputs, (list, tuple)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>% str(type(self.outputs)))<EOL><DEDENT>ret_lst = []<EOL>for output in self.outputs:<EOL><INDENT>if not isinstance(output, (str, Stream)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % str(output))<EOL><DEDENT>ret_lst.append(Stream.DEFAULT_STREAM_ID if isinstance(output, str) else output.stream_id)<EOL><DEDENT>return set(ret_lst)<EOL>", "docstring": "Returns a set of output stream ids registered for this component", "id": "f7234:c0:m12"}
{"signature": "def _sanitize_inputs(self):", "body": "ret = {}<EOL>if self.inputs is None:<EOL><INDENT>return<EOL><DEDENT>if isinstance(self.inputs, dict):<EOL><INDENT>for key, grouping in self.inputs.items():<EOL><INDENT>if not Grouping.is_grouping_sane(grouping):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if isinstance(key, HeronComponentSpec):<EOL><INDENT>if key.name is None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>global_streamid = GlobalStreamId(key.name, Stream.DEFAULT_STREAM_ID)<EOL>ret[global_streamid] = grouping<EOL><DEDENT>elif isinstance(key, GlobalStreamId):<EOL><INDENT>ret[key] = grouping<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % str(key))<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(self.inputs, (list, tuple)):<EOL><INDENT>for input_obj in self.inputs:<EOL><INDENT>if isinstance(input_obj, HeronComponentSpec):<EOL><INDENT>if input_obj.name is None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>global_streamid = GlobalStreamId(input_obj.name, Stream.DEFAULT_STREAM_ID)<EOL>ret[global_streamid] = Grouping.SHUFFLE<EOL><DEDENT>elif isinstance(input_obj, GlobalStreamId):<EOL><INDENT>ret[input_obj] = Grouping.SHUFFLE<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % str(input_obj))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % str(self.inputs))<EOL><DEDENT>return ret<EOL>", "docstring": "Sanitizes input fields and returns a map <GlobalStreamId -> Grouping>", "id": "f7234:c0:m9"}
{"signature": "def __init__(self, componentId, streamId):", "body": "if not isinstance(componentId, (str, HeronComponentSpec)):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if not isinstance(streamId, str):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self._component_id = componentId<EOL>self.stream_id = streamId<EOL>", "docstring": ":type componentId: str or HeronComponentSpec\n:param componentId: component id from which the tuple is emitted, or HeronComponentSpec object.\n:type streamId: str\n:param streamId: stream id through which the tuple is transmitted", "id": "f7234:c1:m0"}
{"signature": "def _get_comp_config(self):", "body": "proto_config = topology_pb2.Config()<EOL>key = proto_config.kvs.add()<EOL>key.key = TOPOLOGY_COMPONENT_PARALLELISM<EOL>key.value = str(self.parallelism)<EOL>key.type = topology_pb2.ConfigValueType.Value(\"<STR_LIT>\")<EOL>if self.custom_config is not None:<EOL><INDENT>sanitized = self._sanitize_config(self.custom_config)<EOL>for key, value in sanitized.items():<EOL><INDENT>if isinstance(value, str):<EOL><INDENT>kvs = proto_config.kvs.add()<EOL>kvs.key = key<EOL>kvs.value = value<EOL>kvs.type = topology_pb2.ConfigValueType.Value(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>kvs = proto_config.kvs.add()<EOL>kvs.key = key<EOL>kvs.serialized_value = default_serializer.serialize(value)<EOL>kvs.type = topology_pb2.ConfigValueType.Value(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>return proto_config<EOL>", "docstring": "Returns component-specific Config protobuf message\n\n        It first adds ``topology.component.parallelism``, and is overriden by\n        a user-defined component-specific configuration, specified by spec().", "id": "f7234:c0:m6"}
{"signature": "def _add_out_streams(self, spbl):", "body": "if self.outputs is None:<EOL><INDENT>return<EOL><DEDENT>output_map = self._sanitize_outputs()<EOL>for stream_id, out_fields in output_map.items():<EOL><INDENT>out_stream = spbl.outputs.add()<EOL>out_stream.stream.CopyFrom(self._get_stream_id(self.name, stream_id))<EOL>out_stream.schema.CopyFrom(self._get_stream_schema(out_fields))<EOL><DEDENT>", "docstring": "Adds outputs to a given protobuf Bolt or Spout message", "id": "f7234:c0:m10"}
{"signature": "def _get_bolt(self):", "body": "bolt = topology_pb2.Bolt()<EOL>bolt.comp.CopyFrom(self._get_base_component())<EOL>self._add_in_streams(bolt)<EOL>self._add_out_streams(bolt)<EOL>return bolt<EOL>", "docstring": "Returns Bolt protobuf message", "id": "f7234:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def _get_stream_schema(fields):<DEDENT>", "body": "stream_schema = topology_pb2.StreamSchema()<EOL>for field in fields:<EOL><INDENT>key = stream_schema.keys.add()<EOL>key.key = field<EOL>key.type = topology_pb2.Type.Value(\"<STR_LIT>\")<EOL><DEDENT>return stream_schema<EOL>", "docstring": "Returns a StreamSchema protobuf message", "id": "f7234:c0:m15"}
{"signature": "def get_protobuf(self):", "body": "if self.is_spout:<EOL><INDENT>return self._get_spout()<EOL><DEDENT>else:<EOL><INDENT>return self._get_bolt()<EOL><DEDENT>", "docstring": "Returns protobuf message (Spout or Bolt) of this component", "id": "f7234:c0:m2"}
{"signature": "def __init__(self, delegate):", "body": "self.delegate = delegate<EOL>self.logger = self.delegate.logger<EOL>", "docstring": "Initializes BaseComponent\n\n        :param delegate: SpoutInstance or BoltInstance", "id": "f7235:c0:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def spout_ack(self, spout_ack_info):<DEDENT>", "body": "pass<EOL>", "docstring": "Called in spout every time a tuple gets acked\n\n        :param spout_ack_info: SpoutAckInfo object", "id": "f7237:c0:m3"}
{"signature": "@abstractmethod<EOL><INDENT>def bolt_fail(self, bolt_fail_info):<DEDENT>", "body": "pass<EOL>", "docstring": "Called in bolt every time a tuple gets failed\n\n        :param bolt_fail_info: BoltFailInfo object", "id": "f7237:c0:m7"}
{"signature": "@abstractmethod<EOL><INDENT>def prepare(self, conf, context):<DEDENT>", "body": "pass<EOL>", "docstring": "Called after the spout/bolt's initialize() method is called\n\n        :param conf: component-specific configuration passed to the topology\n        :param context: topology context", "id": "f7237:c0:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def bolt_execute(self, bolt_execute_info):<DEDENT>", "body": "pass<EOL>", "docstring": "Called in bolt every time a tuple gets executed\n\n        :param bolt_execute_info: BoltExecuteInfo object", "id": "f7237:c0:m5"}
{"signature": "@classmethod<EOL><INDENT>def spec(cls, name=None, par=<NUM_LIT:1>, config=None, optional_outputs=None):<DEDENT>", "body": "python_class_path = \"<STR_LIT>\" % (cls.__module__, cls.__name__)<EOL>if hasattr(cls, '<STR_LIT>'):<EOL><INDENT>_outputs = copy.copy(cls.outputs)<EOL><DEDENT>else:<EOL><INDENT>_outputs = []<EOL><DEDENT>if optional_outputs is not None:<EOL><INDENT>assert isinstance(optional_outputs, (list, tuple))<EOL>for out in optional_outputs:<EOL><INDENT>assert isinstance(out, (str, Stream))<EOL>_outputs.append(out)<EOL><DEDENT><DEDENT>return HeronComponentSpec(name, python_class_path, is_spout=True, par=par,<EOL>inputs=None, outputs=_outputs, config=config)<EOL>", "docstring": "Register this spout to the topology and create ``HeronComponentSpec``\n\n        The usage of this method is compatible with StreamParse API, although it does not create\n        ``ShellBoltSpec`` but instead directly registers to a ``Topology`` class.\n\n        This method takes an optional ``outputs`` argument for supporting dynamic output fields\n        declaration. However, it is recommended that ``outputs`` should be declared as\n        an attribute of your ``Spout`` subclass. Also, some ways of declaring inputs is not supported\n        in this implementation; please read the documentation below.\n\n        :type name: str\n        :param name: Name of this spout.\n        :type par: int\n        :param par: Parallelism hint for this spout.\n        :type config: dict\n        :param config: Component-specific config settings.\n        :type optional_outputs: list of (str or Stream) or tuple of (str or Stream)\n        :param optional_outputs: Additional output fields for this spout. These fields are added to\n                                 existing ``outputs`` class attributes of your spout.\n                                 This is an optional argument, and exists only for supporting dynamic\n                                 output field declaration.", "id": "f7239:c0:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def deactivate(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Called when a spout has been deactivated\n\n        next_tuple() will not be called while a spout is deactivated.\n        The spout may or may not be reactivated in the future.", "id": "f7240:c0:m6"}
{"signature": "@abstractmethod<EOL><INDENT>def fail(self, tup_id):<DEDENT>", "body": "pass<EOL>", "docstring": "Determine that the tuple emitted by this spout with the tup_id has failed to be processed\n\n        It is compatible with StreamParse API.\n\n        The tuple emitted by this spout with the tup_id identifier has failed to be\n        fully processed. Typically, an implementation of this method will put that\n        message back on the queue to be replayed at a later time.\n\n        *Should be implemented by a subclass.*\n\n        :param tup_id: the ID of the HeronTuple that has failed either due to a bolt calling ``fail()``\n                       or timeout", "id": "f7240:c0:m4"}
{"signature": "def __init__(self, fields=None, name=DEFAULT_STREAM_ID, direct=False):", "body": "if fields is None:<EOL><INDENT>fields = []<EOL><DEDENT>elif isinstance(fields, (list, tuple)):<EOL><INDENT>fields = list(fields)<EOL>for field in fields:<EOL><INDENT>if not isinstance(field, str):<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % str(field))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % str(fields))<EOL><DEDENT>self.fields = fields<EOL>if name is None:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>elif isinstance(name, str):<EOL><INDENT>self.stream_id = name<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % str(name))<EOL><DEDENT>if isinstance(direct, bool):<EOL><INDENT>self.direct = direct<EOL>if self.direct:<EOL><INDENT>raise NotImplementedError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % str(direct))<EOL><DEDENT>", "docstring": ":type fields: `list` or `tuple` of `str`\n:param fields: field names for this stream\n:type name: str\n:param name: name of stream. Defaults to ``default``\n:type direct: bool\n:param direct: whether or not this stream is direct. Default is ``False``", "id": "f7242:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def custom(cls, customgrouper):<DEDENT>", "body": "if customgrouper is None:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(customgrouper, ICustomGrouping) and not isinstance(customgrouper, str):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>serialized = default_serializer.serialize(customgrouper)<EOL>return cls.custom_serialized(serialized, is_java=False)<EOL>", "docstring": "Custom grouping from a given implementation of ICustomGrouping\n\n        :param customgrouper: The ICustomGrouping implemention to use", "id": "f7242:c1:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def initialize(self, config):<DEDENT>", "body": "pass<EOL>", "docstring": "Initializes the serializer", "id": "f7243:c0:m0"}
{"signature": "def _make_skel_func(code, closures, base_globals=None):", "body": "closure = _reconstruct_closure(closures) if closures else None<EOL>if base_globals is None:<EOL><INDENT>base_globals = {}<EOL><DEDENT>base_globals['<STR_LIT>'] = __builtins__<EOL>return types.FunctionType(code, base_globals, None, None, closure)<EOL>", "docstring": "Creates a skeleton function object that contains just the provided\n      code and the correct number of cells in func_closure.  All other\n      func attributes (e.g. func_globals) are empty.", "id": "f7244:m13"}
{"signature": "def save_memoryview(self, obj):", "body": "Pickler.save_string(self, str(obj))<EOL>", "docstring": "Fallback to save_string", "id": "f7244:c0:m2"}
{"signature": "def save_file(self, obj): ", "body": "try:<EOL><INDENT>import StringIO as pystringIO <EOL><DEDENT>except ImportError:<EOL><INDENT>import io as pystringIO <EOL><DEDENT>if not hasattr(obj, '<STR_LIT:name>') or  not hasattr(obj, '<STR_LIT>'):<EOL><INDENT>raise pickle.PicklingError(\"<STR_LIT>\")<EOL><DEDENT>if obj is sys.stdout:<EOL><INDENT>return self.save_reduce(getattr, (sys, '<STR_LIT>'), obj=obj)<EOL><DEDENT>if obj is sys.stderr:<EOL><INDENT>return self.save_reduce(getattr, (sys, '<STR_LIT>'), obj=obj)<EOL><DEDENT>if obj is sys.stdin:<EOL><INDENT>raise pickle.PicklingError(\"<STR_LIT>\")<EOL><DEDENT>if  hasattr(obj, '<STR_LIT>') and obj.isatty():<EOL><INDENT>raise pickle.PicklingError(\"<STR_LIT>\")<EOL><DEDENT>if '<STR_LIT:r>' not in obj.mode:<EOL><INDENT>raise pickle.PicklingError(\"<STR_LIT>\")<EOL><DEDENT>name = obj.name<EOL>try:<EOL><INDENT>fsize = os.stat(name).st_size<EOL><DEDENT>except OSError:<EOL><INDENT>raise pickle.PicklingError(\"<STR_LIT>\" % name)<EOL><DEDENT>if obj.closed:<EOL><INDENT>retval = pystringIO.StringIO(\"<STR_LIT>\")<EOL>retval.close()<EOL><DEDENT>elif not fsize: <EOL><INDENT>retval = pystringIO.StringIO(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>tmpfile = file(name)<EOL>tst = tmpfile.read(<NUM_LIT:1>)<EOL><DEDENT>except IOError:<EOL><INDENT>raise pickle.PicklingError(\"<STR_LIT>\" % name)<EOL><DEDENT>tmpfile.close()<EOL>if tst != '<STR_LIT>':<EOL><INDENT>raise pickle.PicklingError(<EOL>\"<STR_LIT>\" % name)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>tmpfile = file(name)<EOL>contents = tmpfile.read()<EOL>tmpfile.close()<EOL><DEDENT>except IOError:<EOL><INDENT>raise pickle.PicklingError(\"<STR_LIT>\" % name)<EOL><DEDENT>retval = pystringIO.StringIO(contents)<EOL>curloc = obj.tell()<EOL>retval.seek(curloc)<EOL><DEDENT>retval.name = name<EOL>self.save(retval)<EOL>self.memoize(obj)<EOL>", "docstring": "Save a file", "id": "f7244:c0:m20"}
{"signature": "@classmethod<EOL><INDENT>def extract_code_globals(cls, co):<DEDENT>", "body": "out_names = cls._extract_code_globals_cache.get(co)<EOL>if out_names is None:<EOL><INDENT>try:<EOL><INDENT>names = co.co_names<EOL><DEDENT>except AttributeError:<EOL><INDENT>out_names = set()<EOL><DEDENT>else:<EOL><INDENT>out_names = set(names[oparg]<EOL>for op, oparg in _walk_global_ops(co))<EOL>if co.co_consts:<EOL><INDENT>for const in co.co_consts:<EOL><INDENT>if type(const) is types.CodeType: <EOL><INDENT>out_names |= cls.extract_code_globals(const)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>cls._extract_code_globals_cache[co] = out_names<EOL><DEDENT>return out_names<EOL>", "docstring": "Find all globals names read or written to by codeblock co", "id": "f7244:c0:m9"}
{"signature": "def save_reduce(self, func, args, state=None, <EOL>listitems=None, dictitems=None, obj=None):", "body": "<EOL>if not isinstance(args, tuple):<EOL><INDENT>raise pickle.PicklingError(\"<STR_LIT>\")<EOL><DEDENT>if not hasattr(func, '<STR_LIT>'):<EOL><INDENT>raise pickle.PicklingError(\"<STR_LIT>\")<EOL><DEDENT>save = self.save<EOL>write = self.write<EOL>if self.proto >= <NUM_LIT:2> and getattr(func, \"<STR_LIT>\", \"<STR_LIT>\") == \"<STR_LIT>\":<EOL><INDENT>cls = args[<NUM_LIT:0>]<EOL>if not hasattr(cls, \"<STR_LIT>\"):<EOL><INDENT>raise pickle.PicklingError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>if obj is not None and cls is not obj.__class__:<EOL><INDENT>raise pickle.PicklingError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>args = args[<NUM_LIT:1>:]<EOL>save(cls)<EOL>if hasattr(obj, '<STR_LIT>'):<EOL><INDENT>transient = obj.__transient__<EOL>state = state.copy()<EOL>for k in list(state.keys()):<EOL><INDENT>if k in transient:<EOL><INDENT>del state[k]<EOL><DEDENT><DEDENT><DEDENT>save(args)<EOL>write(pickle.NEWOBJ)<EOL><DEDENT>else:<EOL><INDENT>save(func)<EOL>save(args)<EOL>write(pickle.REDUCE)<EOL><DEDENT>if obj is not None:<EOL><INDENT>self.memoize(obj)<EOL><DEDENT>if listitems is not None:<EOL><INDENT>self._batch_appends(listitems)<EOL><DEDENT>if dictitems is not None:<EOL><INDENT>self._batch_setitems(dictitems)<EOL><DEDENT>if state is not None:<EOL><INDENT>save(state)<EOL>write(pickle.BUILD)<EOL><DEDENT>", "docstring": "Modified to support __transient__ on new objects\n        Change only affects protocol level 2 (which is always used by PiCloud", "id": "f7244:c0:m18"}
{"signature": "def FindCheckMacro(line):", "body": "for macro in _CHECK_MACROS:<EOL><INDENT>i = line.find(macro)<EOL>if i >= <NUM_LIT:0>:<EOL><INDENT>matched = Match(r'<STR_LIT>' + macro + r'<STR_LIT>', line)<EOL>if not matched:<EOL><INDENT>continue<EOL><DEDENT>return (macro, len(matched.group(<NUM_LIT:1>)))<EOL><DEDENT><DEDENT>return (None, -<NUM_LIT:1>)<EOL>", "docstring": "Find a replaceable CHECK-like macro.\n\n    Args:\n      line: line to search on.\n    Returns:\n      (macro name, start position), or (None, -1) if no replaceable\n      macro is found.", "id": "f7245:m67"}
{"signature": "def InExternC(self):", "body": "return self.stack and isinstance(self.stack[-<NUM_LIT:1>], _ExternCInfo)<EOL>", "docstring": "Check if we are currently one level inside an 'extern \"C\"' block.\n\n        Returns:\n          True if top of the stack is an extern block, False otherwise.", "id": "f7245:c11:m3"}
{"signature": "def CheckForNewlineAtEOF(filename, lines, error):", "body": "<EOL>if len(lines) < <NUM_LIT:3> or lines[-<NUM_LIT:2>]:<EOL><INDENT>error(filename, len(lines) - <NUM_LIT:2>, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Logs an error if there is no newline char at the end of the file.\n\n    Args:\n      filename: The name of the current file.\n      lines: An array of strings, each representing a line of the file.\n      error: The function to call with any errors found.", "id": "f7245:m41"}
{"signature": "def PrintErrorCounts(self):", "body": "for category, count in sorted(iteritems(self.errors_by_category)):<EOL><INDENT>self.PrintInfo('<STR_LIT>' %<EOL>(category, count))<EOL><DEDENT>if self.error_count > <NUM_LIT:0>:<EOL><INDENT>self.PrintInfo('<STR_LIT>' % self.error_count)<EOL><DEDENT>", "docstring": "Print a summary of errors by category, and the total.", "id": "f7245:c1:m10"}
{"signature": "def FilesBelongToSameModule(filename_cc, filename_h):", "body": "fileinfo_cc = FileInfo(filename_cc)<EOL>if not fileinfo_cc.Extension().lstrip('<STR_LIT:.>') in GetNonHeaderExtensions():<EOL><INDENT>return (False, '<STR_LIT>')<EOL><DEDENT>fileinfo_h = FileInfo(filename_h)<EOL>if not fileinfo_h.Extension().lstrip('<STR_LIT:.>') in GetHeaderExtensions():<EOL><INDENT>return (False, '<STR_LIT>')<EOL><DEDENT>filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]<EOL>matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())<EOL>if matched_test_suffix:<EOL><INDENT>filename_cc = filename_cc[:-len(matched_test_suffix.group(<NUM_LIT:1>))]<EOL><DEDENT>filename_cc = filename_cc.replace('<STR_LIT>', '<STR_LIT:/>')<EOL>filename_cc = filename_cc.replace('<STR_LIT>', '<STR_LIT:/>')<EOL>filename_h = filename_h[:-(len(fileinfo_h.Extension()))]<EOL>if filename_h.endswith('<STR_LIT>'):<EOL><INDENT>filename_h = filename_h[:-len('<STR_LIT>')]<EOL><DEDENT>filename_h = filename_h.replace('<STR_LIT>', '<STR_LIT:/>')<EOL>filename_h = filename_h.replace('<STR_LIT>', '<STR_LIT:/>')<EOL>files_belong_to_same_module = filename_cc.endswith(filename_h)<EOL>common_path = '<STR_LIT>'<EOL>if files_belong_to_same_module:<EOL><INDENT>common_path = filename_cc[:-len(filename_h)]<EOL><DEDENT>return files_belong_to_same_module, common_path<EOL>", "docstring": "Check if these two filenames belong to the same module.\n\n    The concept of a 'module' here is a as follows:\n    foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the\n    same 'module' if they are in the same directory.\n    some/path/public/xyzzy and some/path/internal/xyzzy are also considered\n    to belong to the same module here.\n\n    If the filename_cc contains a longer path than the filename_h, for example,\n    '/absolute/path/to/base/sysinfo.cc', and this file would include\n    'base/sysinfo.h', this function also produces the prefix needed to open the\n    header. This is used by the caller of this function to more robustly open the\n    header file. We don't have access to the real include paths in this context,\n    so we need this guesswork here.\n\n    Known bugs: tools/base/bar.cc and base/bar.h belong to the same module\n    according to this implementation. Because of this, this function gives\n    some false positives. This should be sufficiently rare in practice.\n\n    Args:\n      filename_cc: is the path for the source (e.g. .cc) file\n      filename_h: is the path for the header path\n\n    Returns:\n      Tuple with a bool and a string:\n      bool: True if filename_cc and filename_h belong to the same module.\n      string: the additional prefix needed to open the header file.", "id": "f7245:m86"}
{"signature": "def InnermostClass(self):", "body": "for i in range(len(self.stack), <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>classinfo = self.stack[i - <NUM_LIT:1>]<EOL>if isinstance(classinfo, _ClassInfo):<EOL><INDENT>return classinfo<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Get class info on the top of the stack.\n\n        Returns:\n          A _ClassInfo object if we are inside a class, or None otherwise.", "id": "f7245:c11:m9"}
{"signature": "def IncrementErrorCount(self, category):", "body": "self.error_count += <NUM_LIT:1><EOL>if self.counting in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if self.counting != '<STR_LIT>':<EOL><INDENT>category = category.split('<STR_LIT:/>')[<NUM_LIT:0>]<EOL><DEDENT>if category not in self.errors_by_category:<EOL><INDENT>self.errors_by_category[category] = <NUM_LIT:0><EOL><DEDENT>self.errors_by_category[category] += <NUM_LIT:1><EOL><DEDENT>", "docstring": "Bumps the module's error statistic.", "id": "f7245:c1:m9"}
{"signature": "def _AddFilters(filters):", "body": "_cpplint_state.AddFilters(filters)<EOL>", "docstring": "Adds more filter overrides.\n\n    Unlike _SetFilters, this function does not reset the current list of filters\n    available.\n\n    Args:\n      filters: A string of comma-separated filters (eg \"whitespace/indent\").\n               Each filter should start with + or -; else we die.", "id": "f7245:m19"}
{"signature": "def CheckForNonConstReference(filename, clean_lines, linenum,<EOL>nesting_state, error):", "body": "<EOL>line = clean_lines.elided[linenum]<EOL>if '<STR_LIT:&>' not in line:<EOL><INDENT>return<EOL><DEDENT>if IsDerivedFunction(clean_lines, linenum):<EOL><INDENT>return<EOL><DEDENT>if IsOutOfLineMethodDefinition(clean_lines, linenum):<EOL><INDENT>return<EOL><DEDENT>if linenum > <NUM_LIT:1>:<EOL><INDENT>previous = None<EOL>if Match(r'<STR_LIT>', line):<EOL><INDENT>previous = Search(r'<STR_LIT>',<EOL>clean_lines.elided[linenum - <NUM_LIT:1>])<EOL><DEDENT>elif Match(r'<STR_LIT>', line):<EOL><INDENT>previous = Search(r'<STR_LIT>',<EOL>clean_lines.elided[linenum - <NUM_LIT:1>])<EOL><DEDENT>if previous:<EOL><INDENT>line = previous.group(<NUM_LIT:1>) + line.lstrip()<EOL><DEDENT>else:<EOL><INDENT>endpos = line.rfind('<STR_LIT:>>')<EOL>if endpos > -<NUM_LIT:1>:<EOL><INDENT>(_, startline, startpos) = ReverseCloseExpression(<EOL>clean_lines, linenum, endpos)<EOL>if startpos > -<NUM_LIT:1> and startline < linenum:<EOL><INDENT>line = '<STR_LIT>'<EOL>for i in xrange(startline, linenum + <NUM_LIT:1>):<EOL><INDENT>line += clean_lines.elided[i].strip()<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if (nesting_state.previous_stack_top and<EOL>not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or<EOL>isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):<EOL><INDENT>return<EOL><DEDENT>if linenum > <NUM_LIT:0>:<EOL><INDENT>for i in xrange(linenum - <NUM_LIT:1>, max(<NUM_LIT:0>, linenum - <NUM_LIT:10>), -<NUM_LIT:1>):<EOL><INDENT>previous_line = clean_lines.elided[i]<EOL>if not Search(r'<STR_LIT>', previous_line):<EOL><INDENT>break<EOL><DEDENT>if Match(r'<STR_LIT>', previous_line):<EOL><INDENT>return<EOL><DEDENT><DEDENT><DEDENT>if Search(r'<STR_LIT>', line):<EOL><INDENT>return<EOL><DEDENT>if IsInitializerList(clean_lines, linenum):<EOL><INDENT>return<EOL><DEDENT>whitelisted_functions = (r'<STR_LIT>'<EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>')<EOL>if Search(whitelisted_functions, line):<EOL><INDENT>return<EOL><DEDENT>elif not Search(r'<STR_LIT>', line):<EOL><INDENT>for i in xrange(<NUM_LIT:2>):<EOL><INDENT>if (linenum > i and<EOL>Search(whitelisted_functions, clean_lines.elided[linenum - i - <NUM_LIT:1>])):<EOL><INDENT>return<EOL><DEDENT><DEDENT><DEDENT>decls = ReplaceAll(r'<STR_LIT>', '<STR_LIT:U+0020>', line)  <EOL>for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):<EOL><INDENT>if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and<EOL>not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:2>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' +<EOL>ReplaceAll('<STR_LIT>', '<STR_LIT:<>', parameter))<EOL><DEDENT><DEDENT>", "docstring": "Check for non-const references.\n\n    Separate from CheckLanguage since it scans backwards from current\n    line, instead of scanning forward.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      nesting_state: A NestingState instance which maintains information about\n                     the current stack of nested blocks being parsed.\n      error: The function to call with any errors found.", "id": "f7245:m82"}
{"signature": "def ProcessConfigOverrides(filename):", "body": "abs_filename = os.path.abspath(filename)<EOL>cfg_filters = []<EOL>keep_looking = True<EOL>while keep_looking:<EOL><INDENT>abs_path, base_name = os.path.split(abs_filename)<EOL>if not base_name:<EOL><INDENT>break  <EOL><DEDENT>cfg_file = os.path.join(abs_path, \"<STR_LIT>\")<EOL>abs_filename = abs_path<EOL>if not os.path.isfile(cfg_file):<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>with open(cfg_file) as file_handle:<EOL><INDENT>for line in file_handle:<EOL><INDENT>line, _, _ = line.partition('<STR_LIT:#>')  <EOL>if not line.strip():<EOL><INDENT>continue<EOL><DEDENT>name, _, val = line.partition('<STR_LIT:=>')<EOL>name = name.strip()<EOL>val = val.strip()<EOL>if name == '<STR_LIT>':<EOL><INDENT>keep_looking = False<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>cfg_filters.append(val)<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>if base_name:<EOL><INDENT>pattern = re.compile(val)<EOL>if pattern.match(base_name):<EOL><INDENT>_cpplint_state.PrintInfo('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(filename, cfg_file, base_name, val))<EOL>return False<EOL><DEDENT><DEDENT><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>global _line_length<EOL>try:<EOL><INDENT>_line_length = int(val)<EOL><DEDENT>except ValueError:<EOL><INDENT>_cpplint_state.PrintError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>global _valid_extensions<EOL>try:<EOL><INDENT>extensions = [ext.strip() for ext in val.split('<STR_LIT:U+002C>')]<EOL>_valid_extensions = set(extensions)<EOL><DEDENT>except ValueError:<EOL><INDENT>sys.stderr.write('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (val,))<EOL><DEDENT><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>global _header_extensions<EOL>try:<EOL><INDENT>extensions = [ext.strip() for ext in val.split('<STR_LIT:U+002C>')]<EOL>_header_extensions = set(extensions)<EOL><DEDENT>except ValueError:<EOL><INDENT>sys.stderr.write('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (val,))<EOL><DEDENT><DEDENT>elif name == '<STR_LIT:root>':<EOL><INDENT>global _root<EOL>_root = val<EOL><DEDENT>else:<EOL><INDENT>_cpplint_state.PrintError(<EOL>'<STR_LIT>' %<EOL>(name, cfg_file))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>except IOError:<EOL><INDENT>_cpplint_state.PrintError(<EOL>\"<STR_LIT>\" % cfg_file)<EOL>keep_looking = False<EOL><DEDENT><DEDENT>for cfg_filter in reversed(cfg_filters):<EOL><INDENT>_AddFilters(cfg_filter)<EOL><DEDENT>return True<EOL>", "docstring": "Loads the configuration files and processes the config overrides.\n\n    Args:\n      filename: The name of the file being processed by the linter.\n\n    Returns:\n      False if the current |filename| should not be processed further.", "id": "f7245:m99"}
{"signature": "def End(self):", "body": "self.in_a_function = False<EOL>", "docstring": "Stop analyzing function body.", "id": "f7245:c2:m4"}
{"signature": "def IsDerivedFunction(clean_lines, linenum):", "body": "<EOL>for i in xrange(linenum, max(-<NUM_LIT:1>, linenum - <NUM_LIT:10>), -<NUM_LIT:1>):<EOL><INDENT>match = Match(r'<STR_LIT>', clean_lines.elided[i])<EOL>if match:<EOL><INDENT>line, _, closing_paren = CloseExpression(<EOL>clean_lines, i, len(match.group(<NUM_LIT:1>)))<EOL>return (closing_paren >= <NUM_LIT:0> and<EOL>Search(r'<STR_LIT>', line[closing_paren:]))<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Check if current line contains an inherited function.\n\n    Args:\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n    Returns:\n      True if current line contains a function with \"override\"\n      virt-specifier.", "id": "f7245:m79"}
{"signature": "def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>line = line.replace('<STR_LIT>', '<STR_LIT>')<EOL>if line.count('<STR_LIT>') > line.count('<STR_LIT>'):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if (line.count('<STR_LIT:\">') - line.count('<STR_LIT>')) % <NUM_LIT:2>:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Logs an error if we see /* ... */ or \"...\" that extend past one line.\n\n    /* ... */ comments are legit inside macros, for one line.\n    Otherwise, we prefer // comments, so it's ok to warn about the\n    other.  Likewise, it's ok for strings to extend across multiple\n    lines, as long as a line continuation character (backslash)\n    terminates each line. Although not currently prohibited by the C++\n    style guide, it's ugly and unnecessary. We don't do well with either\n    in this lint program, so we warn about both.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m42"}
{"signature": "def IsCppString(line):", "body": "line = line.replace(r'<STR_LIT:\\\\>', '<STR_LIT>')  <EOL>return ((line.count('<STR_LIT:\">') - line.count(r'<STR_LIT>') - line.count(\"<STR_LIT>\")) & <NUM_LIT:1>) == <NUM_LIT:1><EOL>", "docstring": "Does line terminate so, that the next symbol is in string constant.\n\n    This function does not consider single-line nor multi-line comments.\n\n    Args:\n      line: is a partial line of code starting from the 0..n.\n\n    Returns:\n      True, if next character appended to 'line' is inside a\n      string constant.", "id": "f7245:m24"}
{"signature": "def CheckGlobalStatic(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>if linenum + <NUM_LIT:1> < clean_lines.NumLines() and not Search(r'<STR_LIT>', line):<EOL><INDENT>line += clean_lines.elided[linenum + <NUM_LIT:1>].strip()<EOL><DEDENT>match = Match(<EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>',<EOL>line)<EOL>if (match and<EOL>not Search(r'<STR_LIT>', line) and<EOL>not Search(r'<STR_LIT>', line) and<EOL>not Match(r'<STR_LIT>', match.group(<NUM_LIT:4>))):<EOL><INDENT>if Search(r'<STR_LIT>', line):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:4>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(match.group(<NUM_LIT:1>), match.group(<NUM_LIT:2>) or '<STR_LIT>', match.group(<NUM_LIT:3>)))<EOL><DEDENT>else:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:4>,<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if (Search(r'<STR_LIT>', line) or<EOL>Search(r'<STR_LIT>', line)):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:4>,<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Check for unsafe global or static objects.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m77"}
{"signature": "def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>fncall = line    <EOL>for pattern in (r'<STR_LIT>',<EOL>r'<STR_LIT>',<EOL>r'<STR_LIT>',<EOL>r'<STR_LIT>'):<EOL><INDENT>match = Search(pattern, line)<EOL>if match:<EOL><INDENT>fncall = match.group(<NUM_LIT:1>)    <EOL>break<EOL><DEDENT><DEDENT>if (  <EOL>not Search(r'<STR_LIT>',<EOL>fncall) and<EOL>not Search(r'<STR_LIT>', fncall) and<EOL>not Search(r'<STR_LIT>', fncall)):<EOL><INDENT>if Search(r'<STR_LIT>', fncall):      <EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:4>,<EOL>'<STR_LIT>')<EOL><DEDENT>elif Search(r'<STR_LIT>', fncall):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:2>,<EOL>'<STR_LIT>')<EOL><DEDENT>if (Search(r'<STR_LIT>', fncall) and<EOL>not Search(r'<STR_LIT>', fncall) and<EOL>not Search(r'<STR_LIT>', fncall) and<EOL>not Search(r'<STR_LIT>', fncall) and<EOL>not Search(r'<STR_LIT>', fncall)):<EOL><INDENT>if Search(r'<STR_LIT>', line):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:0>,<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:4>,<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if Search(r'<STR_LIT>', fncall):<EOL><INDENT>if Search(r'<STR_LIT>', fncall):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:2>,<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:2>,<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Checks for the correctness of various spacing around function calls.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m49"}
{"signature": "def Split(self):", "body": "googlename = self.RepositoryName()<EOL>project, rest = os.path.split(googlename)<EOL>return (project,) + os.path.splitext(rest)<EOL>", "docstring": "Splits the file into the directory, basename, and extension.\n\n        For 'chrome/browser/browser.cc', Split() would\n        return ('chrome/browser', 'browser', '.cc')\n\n        Returns:\n          A tuple of (directory, basename, extension).", "id": "f7245:c4:m3"}
{"signature": "def IsOutOfLineMethodDefinition(clean_lines, linenum):", "body": "<EOL>for i in xrange(linenum, max(-<NUM_LIT:1>, linenum - <NUM_LIT:10>), -<NUM_LIT:1>):<EOL><INDENT>if Match(r'<STR_LIT>', clean_lines.elided[i]):<EOL><INDENT>return Match(r'<STR_LIT>', clean_lines.elided[i]) is not None<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Check if current line contains an out-of-line method definition.\n\n    Args:\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n    Returns:\n      True if current line contains an out-of-line method definition.", "id": "f7245:m80"}
{"signature": "def ReplaceAll(pattern, rep, s):", "body": "if pattern not in _regexp_compile_cache:<EOL><INDENT>_regexp_compile_cache[pattern] = sre_compile.compile(pattern)<EOL><DEDENT>return _regexp_compile_cache[pattern].sub(rep, s)<EOL>", "docstring": "Replaces instances of pattern in a string with a replacement.\n\n    The compiled regex is kept in a cache shared by Match and Search.\n\n    Args:\n      pattern: regex pattern\n      rep: replacement text\n      s: search string\n\n    Returns:\n      string with replacements made (or original string if no replacements)", "id": "f7245:m9"}
{"signature": "def CheckOperatorSpacing(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>while True:<EOL><INDENT>match = Match(r'<STR_LIT>', line)<EOL>if match:<EOL><INDENT>line = match.group(<NUM_LIT:1>) + ('<STR_LIT:_>' * len(match.group(<NUM_LIT:2>))) + match.group(<NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if ((Search(r'<STR_LIT>', line) or<EOL>Search(r'<STR_LIT>', line))<EOL>and not Search(r'<STR_LIT>', line)<EOL>and not Search(r'<STR_LIT>', line)<EOL>and not Search(r'<STR_LIT>', line)):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:4>,<EOL>'<STR_LIT>')<EOL><DEDENT>match = Search(r'<STR_LIT>', line)<EOL>if match:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:3>,<EOL>'<STR_LIT>' % match.group(<NUM_LIT:1>))<EOL><DEDENT>elif not Match(r'<STR_LIT>', line):<EOL><INDENT>match = Match(r'<STR_LIT>', line)<EOL>if match:<EOL><INDENT>(_, _, end_pos) = CloseExpression(<EOL>clean_lines, linenum, len(match.group(<NUM_LIT:1>)))<EOL>if end_pos <= -<NUM_LIT:1>:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:3>,<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>match = Match(r'<STR_LIT>', line)<EOL>if match:<EOL><INDENT>(_, _, start_pos) = ReverseCloseExpression(<EOL>clean_lines, linenum, len(match.group(<NUM_LIT:1>)))<EOL>if start_pos <= -<NUM_LIT:1>:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:3>,<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>match = Search(r'<STR_LIT>', line)<EOL>if (match and not (match.group(<NUM_LIT:1>).isdigit() and match.group(<NUM_LIT:2>).isdigit()) and<EOL>not (match.group(<NUM_LIT:1>) == '<STR_LIT>' and match.group(<NUM_LIT:2>) == '<STR_LIT:;>')):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:3>,<EOL>'<STR_LIT>')<EOL><DEDENT>match = Search(r'<STR_LIT>', line)<EOL>if match:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:3>,<EOL>'<STR_LIT>')<EOL><DEDENT>match = Search(r'<STR_LIT>', line)<EOL>if match:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:4>,<EOL>'<STR_LIT>' % match.group(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "Checks for horizontal spacing around operators.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m56"}
{"signature": "def _ExpandDirectories(filenames):", "body": "expanded = set()<EOL>for filename in filenames:<EOL><INDENT>if not os.path.isdir(filename):<EOL><INDENT>expanded.add(filename)<EOL>continue<EOL><DEDENT>for root, _, files in os.walk(filename):<EOL><INDENT>for loopfile in files:<EOL><INDENT>fullname = os.path.join(root, loopfile)<EOL>if fullname.startswith('<STR_LIT:.>' + os.path.sep):<EOL><INDENT>fullname = fullname[len('<STR_LIT:.>' + os.path.sep):]<EOL><DEDENT>expanded.add(fullname)<EOL><DEDENT><DEDENT><DEDENT>filtered = []<EOL>for filename in expanded:<EOL><INDENT>if os.path.splitext(filename)[<NUM_LIT:1>][<NUM_LIT:1>:] in GetAllExtensions():<EOL><INDENT>filtered.append(filename)<EOL><DEDENT><DEDENT>return filtered<EOL>", "docstring": "Searches a list of filenames and replaces directories in the list with\n    all files descending from those directories. Files with extensions not in\n    the valid extensions list are excluded.\n\n    Args:\n      filenames: A list of files or directories\n\n    Returns:\n      A list of all files that are members of filenames or descended from a\n      directory in filenames", "id": "f7245:m104"}
{"signature": "def CheckCompletedBlocks(self, filename, error):", "body": "<EOL>for obj in self.stack:<EOL><INDENT>if isinstance(obj, _ClassInfo):<EOL><INDENT>error(filename, obj.starting_linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>' %<EOL>obj.name)<EOL><DEDENT>elif isinstance(obj, _NamespaceInfo):<EOL><INDENT>error(filename, obj.starting_linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>' %<EOL>obj.name)<EOL><DEDENT><DEDENT>", "docstring": "Checks that all classes and namespaces have been completely parsed.\n\n        Call this when all lines in a file have been processed.\n        Args:\n          filename: The name of the current file.\n          error: The function to call with any errors found.", "id": "f7245:c11:m10"}
{"signature": "def CheckInvalidIncrement(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>if _RE_PATTERN_INVALID_INCREMENT.match(line):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Checks for invalid increment *count++.\n\n    For example following function:\n    void increment_counter(int* count) {\n      *count++;\n    }\n    is invalid, because it effectively does count++, moving pointer, and should\n    be replaced with ++*count, (*count)++ or *count += 1.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m45"}
{"signature": "def InAsmBlock(self):", "body": "return self.stack and self.stack[-<NUM_LIT:1>].inline_asm != _NO_ASM<EOL>", "docstring": "Check if we are currently one level inside an inline ASM block.\n\n        Returns:\n          True if the top of the stack is a block containing inline ASM.", "id": "f7245:c11:m5"}
{"signature": "def _SetCountingStyle(level):", "body": "_cpplint_state.SetCountingStyle(level)<EOL>", "docstring": "Sets the module's counting options.", "id": "f7245:m16"}
{"signature": "def Search(pattern, s):", "body": "if pattern not in _regexp_compile_cache:<EOL><INDENT>_regexp_compile_cache[pattern] = sre_compile.compile(pattern)<EOL><DEDENT>return _regexp_compile_cache[pattern].search(s)<EOL>", "docstring": "Searches the string for the pattern, caching the compiled regexp.", "id": "f7245:m10"}
{"signature": "def CheckForBadCharacters(filename, lines, error):", "body": "for linenum, line in enumerate(lines):<EOL><INDENT>if unicode_escape_decode('<STR_LIT>') in line:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in line:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>, '<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Logs an error for each line containing bad characters.\n\n    Two kinds of bad characters:\n\n    1. Unicode replacement characters: These indicate that either the file\n    contained invalid UTF-8 (likely) or Unicode replacement characters (which\n    it shouldn't).  Note that it's possible for this to throw off line\n    numbering if the invalid UTF-8 occurred adjacent to a newline.\n\n    2. NUL bytes.  These are problematic for some tools.\n\n    Args:\n      filename: The name of the current file.\n      lines: An array of strings, each representing a line of the file.\n      error: The function to call with any errors found.", "id": "f7245:m40"}
{"signature": "def BaseName(self):", "body": "return self.Split()[<NUM_LIT:1>]<EOL>", "docstring": "File base name - text after the final slash, before the final period.", "id": "f7245:c4:m4"}
{"signature": "def CheckForCopyright(filename, lines, error):", "body": "<EOL>for line in range(<NUM_LIT:1>, min(len(lines), <NUM_LIT:11>)):<EOL><INDENT>if re.search(r'<STR_LIT>', lines[line], re.I): break<EOL><DEDENT>else:                       <EOL><INDENT>error(filename, <NUM_LIT:0>, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Logs an error if no Copyright message appears at the top of the file.", "id": "f7245:m35"}
{"signature": "def _FilterExcludedFiles(filenames):", "body": "exclude_paths = [os.path.abspath(f) for f in _excludes]<EOL>return [f for f in filenames if os.path.abspath(f) not in exclude_paths]<EOL>", "docstring": "Filters out files listed in the --exclude command line switch. File paths\n    in the switch are evaluated relative to the current working directory", "id": "f7245:m105"}
{"signature": "def IsBlockInNameSpace(nesting_state, is_forward_declaration):", "body": "if is_forward_declaration:<EOL><INDENT>return len(nesting_state.stack) >= <NUM_LIT:1> and (<EOL>isinstance(nesting_state.stack[-<NUM_LIT:1>], _NamespaceInfo))<EOL><DEDENT>return (len(nesting_state.stack) > <NUM_LIT:1> and<EOL>nesting_state.stack[-<NUM_LIT:1>].check_namespace_indentation and<EOL>isinstance(nesting_state.stack[-<NUM_LIT:2>], _NamespaceInfo))<EOL>", "docstring": "Checks that the new block is directly in a namespace.\n\n    Args:\n      nesting_state: The _NestingState object that contains info about our state.\n      is_forward_declaration: If the class is a forward declared class.\n    Returns:\n      Whether or not the new block is directly in a namespace.", "id": "f7245:m92"}
{"signature": "def UpdatePreprocessor(self, line):", "body": "if Match(r'<STR_LIT>', line):<EOL><INDENT>self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))<EOL><DEDENT>elif Match(r'<STR_LIT>', line):<EOL><INDENT>if self.pp_stack:<EOL><INDENT>if not self.pp_stack[-<NUM_LIT:1>].seen_else:<EOL><INDENT>self.pp_stack[-<NUM_LIT:1>].seen_else = True<EOL>self.pp_stack[-<NUM_LIT:1>].stack_before_else = copy.deepcopy(self.stack)<EOL><DEDENT>self.stack = copy.deepcopy(self.pp_stack[-<NUM_LIT:1>].stack_before_if)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>elif Match(r'<STR_LIT>', line):<EOL><INDENT>if self.pp_stack:<EOL><INDENT>if self.pp_stack[-<NUM_LIT:1>].seen_else:<EOL><INDENT>self.stack = self.pp_stack[-<NUM_LIT:1>].stack_before_else<EOL><DEDENT>self.pp_stack.pop()<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "Update preprocessor stack.\n\n        We need to handle preprocessors due to classes like this:\n          #ifdef SWIG\n          struct ResultDetailsPageElementExtensionPoint {\n          #else\n          struct ResultDetailsPageElementExtensionPoint : public Extension {\n          #endif\n\n        We make the following assumptions (good enough for most files):\n        - Preprocessor condition evaluates to true from #if up to first\n          #else/#elif/#endif.\n\n        - Preprocessor condition evaluates to false from #else/#elif up\n          to #endif.  We still perform lint checks on these lines, but\n          these do not affect nesting stack.\n\n        Args:\n          line: current line to check.", "id": "f7245:c11:m7"}
{"signature": "def _Filters():", "body": "return _cpplint_state.filters<EOL>", "docstring": "Returns the module's list of output filters, as a list.", "id": "f7245:m17"}
{"signature": "def CheckEnd(self, filename, clean_lines, linenum, error):", "body": "pass<EOL>", "docstring": "Run checks that applies to text after the closing brace.\n\n        This is mostly used for checking end of namespace comments.\n\n        Args:\n          filename: The name of the current file.\n          clean_lines: A CleansedLines instance containing the file.\n          linenum: The number of the line to check.\n          error: The function to call with any errors found.", "id": "f7245:c6:m2"}
{"signature": "def IsSource(self):", "body": "return _IsSourceExtension(self.Extension()[<NUM_LIT:1>:])<EOL>", "docstring": "File has a source file extension.", "id": "f7245:c4:m7"}
{"signature": "def Begin(self, function_name):", "body": "self.in_a_function = True<EOL>self.lines_in_function = <NUM_LIT:0><EOL>self.current_function = function_name<EOL>", "docstring": "Start analyzing function body.\n\n        Args:\n          function_name: The name of the function being tracked.", "id": "f7245:c2:m1"}
{"signature": "def ResetNolintSuppressions():", "body": "_error_suppressions.clear()<EOL>_global_error_suppressions.clear()<EOL>", "docstring": "Resets the set of NOLINT suppressions to empty.", "id": "f7245:m6"}
{"signature": "def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,<EOL>error):", "body": "<EOL>raw_lines = clean_lines.lines_without_raw_strings<EOL>line = raw_lines[linenum]<EOL>prev = raw_lines[linenum - <NUM_LIT:1>] if linenum > <NUM_LIT:0> else '<STR_LIT>'<EOL>if line.find('<STR_LIT:\\t>') != -<NUM_LIT:1>:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:1>,<EOL>'<STR_LIT>')<EOL><DEDENT>scope_or_label_pattern = r'<STR_LIT>'<EOL>classinfo = nesting_state.InnermostClass()<EOL>initial_spaces = <NUM_LIT:0><EOL>cleansed_line = clean_lines.elided[linenum]<EOL>while initial_spaces < len(line) and line[initial_spaces] == '<STR_LIT:U+0020>':<EOL><INDENT>initial_spaces += <NUM_LIT:1><EOL><DEDENT>if (not Search(r'<STR_LIT>', prev) and<EOL>(initial_spaces == <NUM_LIT:1> or initial_spaces == <NUM_LIT:3>) and<EOL>not Match(scope_or_label_pattern, cleansed_line) and<EOL>not (clean_lines.raw_lines[linenum] != line and<EOL>Match(r'<STR_LIT>', line))):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:3>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if line and line[-<NUM_LIT:1>].isspace():<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:4>,<EOL>'<STR_LIT>')<EOL><DEDENT>is_header_guard = False<EOL>if file_extension in GetHeaderExtensions():<EOL><INDENT>cppvar = GetHeaderGuardCPPVariable(filename)<EOL>if (line.startswith('<STR_LIT>' % cppvar) or<EOL>line.startswith('<STR_LIT>' % cppvar) or<EOL>line.startswith('<STR_LIT>' % cppvar)):<EOL><INDENT>is_header_guard = True<EOL><DEDENT><DEDENT>if (not line.startswith('<STR_LIT>') and not is_header_guard and<EOL>not Match(r'<STR_LIT>', line) and<EOL>not Match(r'<STR_LIT>', line) and<EOL>not Match(r'<STR_LIT>', line) and<EOL>not Match(r'<STR_LIT>', line)):<EOL><INDENT>line_width = GetLineWidth(line)<EOL>if line_width > _line_length:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:2>,<EOL>'<STR_LIT>' % _line_length)<EOL><DEDENT><DEDENT>if (cleansed_line.count('<STR_LIT:;>') > <NUM_LIT:1> and<EOL>not Match(r'<STR_LIT>',<EOL>line) and<EOL>cleansed_line.find('<STR_LIT>') == -<NUM_LIT:1> and<EOL>(GetPreviousNonBlankLine(clean_lines, linenum)[<NUM_LIT:0>].find('<STR_LIT>') == -<NUM_LIT:1> or<EOL>GetPreviousNonBlankLine(clean_lines, linenum)[<NUM_LIT:0>].find('<STR_LIT:;>') != -<NUM_LIT:1>) and<EOL>not ((cleansed_line.find('<STR_LIT>') != -<NUM_LIT:1> or<EOL>cleansed_line.find('<STR_LIT>') != -<NUM_LIT:1>) and<EOL>cleansed_line.find('<STR_LIT>') != -<NUM_LIT:1>)):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:0>,<EOL>'<STR_LIT>')<EOL><DEDENT>CheckBraces(filename, clean_lines, linenum, error)<EOL>CheckTrailingSemicolon(filename, clean_lines, linenum, error)<EOL>CheckEmptyBlockBody(filename, clean_lines, linenum, error)<EOL>CheckAccess(filename, clean_lines, linenum, nesting_state, error)<EOL>CheckSpacing(filename, clean_lines, linenum, nesting_state, error)<EOL>CheckOperatorSpacing(filename, clean_lines, linenum, error)<EOL>CheckParenthesisSpacing(filename, clean_lines, linenum, error)<EOL>CheckCommaSpacing(filename, clean_lines, linenum, error)<EOL>CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)<EOL>CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)<EOL>CheckCheck(filename, clean_lines, linenum, error)<EOL>CheckAltTokens(filename, clean_lines, linenum, error)<EOL>classinfo = nesting_state.InnermostClass()<EOL>if classinfo:<EOL><INDENT>CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)<EOL><DEDENT>", "docstring": "Checks rules from the 'C++ style rules' section of cppguide.html.\n\n    Most of these rules are hard to test (naming, comment style), but we\n    do what we can.  In particular we check for 2-space indents, line lengths,\n    tab usage, spaces inside code, etc.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      file_extension: The extension (without the dot) of the filename.\n      nesting_state: A NestingState instance which maintains information about\n                     the current stack of nested blocks being parsed.\n      error: The function to call with any errors found.", "id": "f7245:m71"}
{"signature": "def IsBlankLine(line):", "body": "return not line or line.isspace()<EOL>", "docstring": "Returns true if the given line is blank.\n\n    We consider a line to be blank if the line is empty or consists of\n    only white spaces.\n\n    Args:\n      line: A line of a string.\n\n    Returns:\n      True, if the given line is blank.", "id": "f7245:m50"}
{"signature": "def CheckParenthesisSpacing(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>match = Search(r'<STR_LIT>', line)<EOL>if match:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>' % match.group(<NUM_LIT:1>))<EOL><DEDENT>match = Search(r'<STR_LIT>'<EOL>r'<STR_LIT>',<EOL>line)<EOL>if match:<EOL><INDENT>if len(match.group(<NUM_LIT:2>)) != len(match.group(<NUM_LIT:4>)):<EOL><INDENT>if not (match.group(<NUM_LIT:3>) == '<STR_LIT:;>' and<EOL>len(match.group(<NUM_LIT:2>)) == <NUM_LIT:1> + len(match.group(<NUM_LIT:4>)) or<EOL>not match.group(<NUM_LIT:2>) and Search(r'<STR_LIT>', line)):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>' % match.group(<NUM_LIT:1>))<EOL><DEDENT><DEDENT>if len(match.group(<NUM_LIT:2>)) not in [<NUM_LIT:0>, <NUM_LIT:1>]:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>' %<EOL>match.group(<NUM_LIT:1>))<EOL><DEDENT><DEDENT>", "docstring": "Checks for horizontal spacing around parentheses.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m57"}
{"signature": "def RepositoryName(self):", "body": "fullname = self.FullName()<EOL>if os.path.exists(fullname):<EOL><INDENT>project_dir = os.path.dirname(fullname)<EOL>if _repository:<EOL><INDENT>repo = FileInfo(_repository).FullName()<EOL>root_dir = project_dir<EOL>while os.path.exists(root_dir):<EOL><INDENT>if os.path.normcase(root_dir) == os.path.normcase(repo):<EOL><INDENT>return os.path.relpath(fullname, root_dir).replace('<STR_LIT:\\\\>', '<STR_LIT:/>')<EOL><DEDENT>one_up_dir = os.path.dirname(root_dir)<EOL>if one_up_dir == root_dir:<EOL><INDENT>break<EOL><DEDENT>root_dir = one_up_dir<EOL><DEDENT><DEDENT>if os.path.exists(os.path.join(project_dir, \"<STR_LIT>\")):<EOL><INDENT>root_dir = project_dir<EOL>one_up_dir = os.path.dirname(root_dir)<EOL>while os.path.exists(os.path.join(one_up_dir, \"<STR_LIT>\")):<EOL><INDENT>root_dir = os.path.dirname(root_dir)<EOL>one_up_dir = os.path.dirname(one_up_dir)<EOL><DEDENT>prefix = os.path.commonprefix([root_dir, project_dir])<EOL>return fullname[len(prefix) + <NUM_LIT:1>:]<EOL><DEDENT>root_dir = current_dir = os.path.dirname(fullname)<EOL>while current_dir != os.path.dirname(current_dir):<EOL><INDENT>if (os.path.exists(os.path.join(current_dir, \"<STR_LIT>\")) or<EOL>os.path.exists(os.path.join(current_dir, \"<STR_LIT>\")) or<EOL>os.path.exists(os.path.join(current_dir, \"<STR_LIT>\"))):<EOL><INDENT>root_dir = current_dir<EOL><DEDENT>current_dir = os.path.dirname(current_dir)<EOL><DEDENT>if (os.path.exists(os.path.join(root_dir, \"<STR_LIT>\")) or<EOL>os.path.exists(os.path.join(root_dir, \"<STR_LIT>\")) or<EOL>os.path.exists(os.path.join(root_dir, \"<STR_LIT>\"))):<EOL><INDENT>prefix = os.path.commonprefix([root_dir, project_dir])<EOL>return fullname[len(prefix) + <NUM_LIT:1>:]<EOL><DEDENT><DEDENT>return fullname<EOL>", "docstring": "r\"\"\"FullName after removing the local path to the repository.\n\n        If we have a real absolute path name here we can try to do something smart:\n        detecting the root of the checkout and truncating /path/to/checkout from\n        the name so that we get header guards that don't include things like\n        \"C:\\Documents and Settings\\...\" or \"/home/username/...\" in them and thus\n        people on different computers who have checked the source out to different\n        locations won't see bogus errors.", "id": "f7245:c4:m2"}
{"signature": "def FlagCxx14Features(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>include = Match(r'<STR_LIT>', line)<EOL>if include and include.group(<NUM_LIT:1>) in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>('<STR_LIT>') % include.group(<NUM_LIT:1>))<EOL><DEDENT>", "docstring": "Flag those C++14 features that we restrict.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m97"}
{"signature": "def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):", "body": "<EOL>raw = clean_lines.lines_without_raw_strings<EOL>line = raw[linenum]<EOL>if (IsBlankLine(line) and<EOL>not nesting_state.InNamespaceBody() and<EOL>not nesting_state.InExternC()):<EOL><INDENT>elided = clean_lines.elided<EOL>prev_line = elided[linenum - <NUM_LIT:1>]<EOL>prevbrace = prev_line.rfind('<STR_LIT:{>')<EOL>if prevbrace != -<NUM_LIT:1> and prev_line[prevbrace:].find('<STR_LIT:}>') == -<NUM_LIT:1>:<EOL><INDENT>exception = False<EOL>if Match(r'<STR_LIT>', prev_line):  <EOL><INDENT>search_position = linenum-<NUM_LIT:2><EOL>while (search_position >= <NUM_LIT:0><EOL>and Match(r'<STR_LIT>', elided[search_position])):<EOL><INDENT>search_position -= <NUM_LIT:1><EOL><DEDENT>exception = (search_position >= <NUM_LIT:0><EOL>and elided[search_position][:<NUM_LIT:5>] == '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>exception = (Match(r'<STR_LIT>',<EOL>prev_line)<EOL>or Match(r'<STR_LIT>', prev_line))<EOL><DEDENT>if not exception:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:2>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if linenum + <NUM_LIT:1> < clean_lines.NumLines():<EOL><INDENT>next_line = raw[linenum + <NUM_LIT:1>]<EOL>if (next_line<EOL>and Match(r'<STR_LIT>', next_line)<EOL>and next_line.find('<STR_LIT>') == -<NUM_LIT:1>):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:3>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>matched = Match(r'<STR_LIT>', prev_line)<EOL>if matched:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:3>,<EOL>'<STR_LIT>' % matched.group(<NUM_LIT:1>))<EOL><DEDENT><DEDENT>next_line_start = <NUM_LIT:0><EOL>if linenum + <NUM_LIT:1> < clean_lines.NumLines():<EOL><INDENT>next_line = raw[linenum + <NUM_LIT:1>]<EOL>next_line_start = len(next_line) - len(next_line.lstrip())<EOL><DEDENT>CheckComment(line, filename, linenum, next_line_start, error)<EOL>line = clean_lines.elided[linenum]<EOL>if Search(r'<STR_LIT>', line) and not Search(r'<STR_LIT>', line):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>')<EOL><DEDENT>if (Search(r'<STR_LIT>', line) or<EOL>Search(r'<STR_LIT>', line)):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:2>,<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Checks for the correctness of various spacing issues in the code.\n\n    Things we check for: spaces around operators, spaces after\n    if/for/while/switch, no spaces around parens in function calls, two\n    spaces between code and comment, don't start a block with a blank\n    line, don't end a function with a blank line, don't add a blank line\n    after public/protected/private, don't have too many blank lines in a row.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      nesting_state: A NestingState instance which maintains information about\n                     the current stack of nested blocks being parsed.\n      error: The function to call with any errors found.", "id": "f7245:m55"}
{"signature": "def NoExtension(self):", "body": "return '<STR_LIT:/>'.join(self.Split()[<NUM_LIT:0>:<NUM_LIT:2>])<EOL>", "docstring": "File has no source file extension.", "id": "f7245:c4:m6"}
{"signature": "def CheckEnd(self, filename, clean_lines, linenum, error):", "body": "line = clean_lines.raw_lines[linenum]<EOL>if (linenum - self.starting_linenum < <NUM_LIT:10><EOL>and not Match(r'<STR_LIT>', line)):<EOL><INDENT>return<EOL><DEDENT>if self.name:<EOL><INDENT>if not Match((r'<STR_LIT>' +<EOL>re.escape(self.name) + r'<STR_LIT>'),<EOL>line):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>' %<EOL>self.name)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if not Match(r'<STR_LIT>', line):<EOL><INDENT>if Match(r'<STR_LIT>', line):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Check end of namespace comments.", "id": "f7245:c9:m1"}
{"signature": "def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)<EOL>if match:<EOL><INDENT>error(filename, linenum, '<STR_LIT>',<EOL><NUM_LIT:4>,  <EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Check that make_pair's template arguments are deduced.\n\n    G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are\n    specified explicitly, and such use isn't intended in any case.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m89"}
{"signature": "def _VerboseLevel():", "body": "return _cpplint_state.verbose_level<EOL>", "docstring": "Returns the module's verbosity setting.", "id": "f7245:m14"}
{"signature": "def GetIndentLevel(line):", "body": "indent = Match(r'<STR_LIT>', line)<EOL>if indent:<EOL><INDENT>return len(indent.group(<NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Return the number of leading spaces in line.\n\n    Args:\n      line: A string to check.\n\n    Returns:\n      An integer count of leading spaces, possibly zero.", "id": "f7245:m36"}
{"signature": "def CheckVlogArguments(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>if Search(r'<STR_LIT>', line):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Checks that VLOG() is only used for defining a logging level.\n\n    For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and\n    VLOG(FATAL) are not.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m44"}
{"signature": "def CheckPosixThreading(filename, clean_lines, linenum, error):", "body": "line = clean_lines.elided[linenum]<EOL>for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:<EOL><INDENT>if Search(pattern, line):<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:2>,<EOL>'<STR_LIT>' + multithread_safe_func +<EOL>'<STR_LIT>' + single_thread_func +<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Checks for calls to thread-unsafe functions.\n\n    Much code has been originally written without consideration of\n    multi-threading. Also, engineers are relying on their old experience;\n    they have learned posix before threading extensions were added. These\n    tests guide the engineers to use thread-safe functions (when using\n    posix directly).\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      error: The function to call with any errors found.", "id": "f7245:m43"}
{"signature": "def GetHeaderGuardCPPVariable(filename):", "body": "<EOL>filename = re.sub(r'<STR_LIT>', '<STR_LIT>', filename)<EOL>filename = re.sub(r'<STR_LIT>', r'<STR_LIT>', filename)<EOL>filename = filename.replace('<STR_LIT>', '<STR_LIT>').replace('<STR_LIT>', '<STR_LIT>')<EOL>fileinfo = FileInfo(filename)<EOL>file_path_from_root = fileinfo.RepositoryName()<EOL>if _root:<EOL><INDENT>suffix = os.sep<EOL>if suffix == '<STR_LIT:\\\\>':<EOL><INDENT>suffix += '<STR_LIT:\\\\>'<EOL><DEDENT>file_path_from_root = re.sub('<STR_LIT>' + _root + suffix, '<STR_LIT>', file_path_from_root)<EOL><DEDENT>return re.sub(r'<STR_LIT>', '<STR_LIT:_>', file_path_from_root).upper() + '<STR_LIT:_>'<EOL>", "docstring": "Returns the CPP variable that should be used as a header guard.\n\n    Args:\n      filename: The name of a C++ header file.\n\n    Returns:\n      The CPP variable that should be used as a header guard in the\n      named file.", "id": "f7245:m37"}
{"signature": "def InTemplateArgumentList(self, clean_lines, linenum, pos):", "body": "while linenum < clean_lines.NumLines():<EOL><INDENT>line = clean_lines.elided[linenum]<EOL>match = Match(r'<STR_LIT>', line[pos:])<EOL>if not match:<EOL><INDENT>linenum += <NUM_LIT:1><EOL>pos = <NUM_LIT:0><EOL>continue<EOL><DEDENT>token = match.group(<NUM_LIT:1>)<EOL>pos += len(match.group(<NUM_LIT:0>))<EOL>if token in ('<STR_LIT:{>', '<STR_LIT:}>', '<STR_LIT:;>'): return False<EOL>if token in ('<STR_LIT:>>', '<STR_LIT:=>', '<STR_LIT:[>', '<STR_LIT:]>', '<STR_LIT:.>'): return True<EOL>if token != '<STR_LIT:<>':<EOL><INDENT>pos += <NUM_LIT:1><EOL>if pos >= len(line):<EOL><INDENT>linenum += <NUM_LIT:1><EOL>pos = <NUM_LIT:0><EOL><DEDENT>continue<EOL><DEDENT>(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - <NUM_LIT:1>)<EOL>if end_pos < <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>linenum = end_line<EOL>pos = end_pos<EOL><DEDENT>return False<EOL>", "docstring": "Check if current position is inside template argument list.\n\n        Args:\n          clean_lines: A CleansedLines instance containing the file.\n          linenum: The number of the line to check.\n          pos: position just after the suspected template argument.\n        Returns:\n          True if (linenum, pos) is inside template arguments.", "id": "f7245:c11:m6"}
{"signature": "def CheckForFunctionLengths(filename, clean_lines, linenum,<EOL>function_state, error):", "body": "lines = clean_lines.lines<EOL>line = lines[linenum]<EOL>joined_line = '<STR_LIT>'<EOL>starting_func = False<EOL>regexp = r'<STR_LIT>'  <EOL>match_result = Match(regexp, line)<EOL>if match_result:<EOL><INDENT>function_name = match_result.group(<NUM_LIT:1>).split()[-<NUM_LIT:1>]<EOL>if function_name == '<STR_LIT>' or function_name == '<STR_LIT>' or (<EOL>not Match(r'<STR_LIT>', function_name)):<EOL><INDENT>starting_func = True<EOL><DEDENT><DEDENT>if starting_func:<EOL><INDENT>body_found = False<EOL>for start_linenum in range(linenum, clean_lines.NumLines()):<EOL><INDENT>start_line = lines[start_linenum]<EOL>joined_line += '<STR_LIT:U+0020>' + start_line.lstrip()<EOL>if Search(r'<STR_LIT>', start_line):  <EOL><INDENT>body_found = True<EOL>break                              <EOL><DEDENT>elif Search(r'<STR_LIT:{>', start_line):<EOL><INDENT>body_found = True<EOL>function = Search(r'<STR_LIT>', line).group(<NUM_LIT:1>)<EOL>if Match(r'<STR_LIT>', function):    <EOL><INDENT>parameter_regexp = Search(r'<STR_LIT>', joined_line)<EOL>if parameter_regexp:             <EOL><INDENT>function += parameter_regexp.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>function += '<STR_LIT>'<EOL><DEDENT>function_state.Begin(function)<EOL>break<EOL><DEDENT><DEDENT>if not body_found:<EOL><INDENT>error(filename, linenum, '<STR_LIT>', <NUM_LIT:5>,<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>elif Match(r'<STR_LIT>', line):  <EOL><INDENT>function_state.Check(error, filename, linenum)<EOL>function_state.End()<EOL><DEDENT>elif not Match(r'<STR_LIT>', line):<EOL><INDENT>function_state.Count()<EOL><DEDENT>", "docstring": "Reports for long function bodies.\n\n    For an overview why this is done, see:\n    https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions\n\n    Uses a simplistic algorithm assuming other style guidelines\n    (especially spacing) are followed.\n    Only checks unindented functions, so class members are unchecked.\n    Trivial bodies are unchecked, so constructors with huge initializer lists\n    may be missed.\n    Blank/comment lines are not counted so as to avoid encouraging the removal\n    of vertical space and comments just to get through a lint check.\n    NOLINT *on the last line of a function* disables this check.\n\n    Args:\n      filename: The name of the current file.\n      clean_lines: A CleansedLines instance containing the file.\n      linenum: The number of the line to check.\n      function_state: Current function name and lines in body so far.\n      error: The function to call with any errors found.", "id": "f7245:m52"}
{"signature": "def Error(filename, linenum, category, confidence, message):", "body": "if _ShouldPrintError(category, confidence, linenum):<EOL><INDENT>_cpplint_state.IncrementErrorCount(category)<EOL>if _cpplint_state.output_format == '<STR_LIT>':<EOL><INDENT>_cpplint_state.PrintError('<STR_LIT>' % (<EOL>filename, linenum, message, category, confidence))<EOL><DEDENT>elif _cpplint_state.output_format == '<STR_LIT>':<EOL><INDENT>sys.stderr.write('<STR_LIT>' % (<EOL>filename, linenum, message, category, confidence))<EOL><DEDENT>elif _cpplint_state.output_format == '<STR_LIT>':<EOL><INDENT>_cpplint_state.AddJUnitFailure(filename, linenum, message, category,<EOL>confidence)<EOL><DEDENT>else:<EOL><INDENT>final_message = '<STR_LIT>' % (<EOL>filename, linenum, message, category, confidence)<EOL>sys.stderr.write(final_message)<EOL><DEDENT><DEDENT>", "docstring": "Logs the fact we've found a lint error.\n\n    We log where the error was found, and also our confidence in the error,\n    that is, how certain we are this is a legitimate style regression, and\n    not a misidentification or a use that's sometimes justified.\n\n    False positives can be suppressed by the use of\n    \"cpplint(category)\"  comments on the offending line.  These are\n    parsed into _error_suppressions.\n\n    Args:\n      filename: The name of the file containing the error.\n      linenum: The number of the line containing the error.\n      category: A string used to describe the \"category\" this bug\n        falls under: \"whitespace\", say, or \"runtime\".  Categories\n        may have a hierarchy separated by slashes: \"whitespace/indent\".\n      confidence: A number from 1-5 representing a confidence score for\n        the error, with 5 meaning that we are certain of the problem,\n        and 1 meaning that it could be a legitimate construct.\n      message: The error message.", "id": "f7245:m23"}
{"signature": "def CleanseComments(line):", "body": "commentpos = line.find('<STR_LIT>')<EOL>if commentpos != -<NUM_LIT:1> and not IsCppString(line[:commentpos]):<EOL><INDENT>line = line[:commentpos].rstrip()<EOL><DEDENT>return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('<STR_LIT>', line)<EOL>", "docstring": "Removes //-comments and single-line C-style /* */ comments.\n\n    Args:\n      line: A line of C++ source.\n\n    Returns:\n      The line with single-line comments removed.", "id": "f7245:m30"}
{"signature": "def Check(self, error, filename, linenum):", "body": "if not self.in_a_function:<EOL><INDENT>return<EOL><DEDENT>if Match(r'<STR_LIT>', self.current_function):<EOL><INDENT>base_trigger = self._TEST_TRIGGER<EOL><DEDENT>else:<EOL><INDENT>base_trigger = self._NORMAL_TRIGGER<EOL><DEDENT>trigger = base_trigger * <NUM_LIT:2>**_VerboseLevel()<EOL>if self.lines_in_function > trigger:<EOL><INDENT>error_level = int(math.log(self.lines_in_function / base_trigger, <NUM_LIT:2>))<EOL>if error_level > <NUM_LIT:5>:<EOL><INDENT>error_level = <NUM_LIT:5><EOL><DEDENT>error(filename, linenum, '<STR_LIT>', error_level,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'  % (<EOL>self.current_function, self.lines_in_function, trigger))<EOL><DEDENT>", "docstring": "Report if too many lines in function body.\n\n        Args:\n          error: The function to call with any errors found.\n          filename: The name of the current file.\n          linenum: The number of the line to check.", "id": "f7245:c2:m3"}
{"signature": "def InClassDeclaration(self):", "body": "return self.stack and isinstance(self.stack[-<NUM_LIT:1>], _ClassInfo)<EOL>", "docstring": "Check if we are currently one level inside a class or struct declaration.\n\n        Returns:\n          True if top of the stack is a class/struct, False otherwise.", "id": "f7245:c11:m4"}
{"signature": "def FindNextMultiLineCommentStart(lines, lineix):", "body": "while lineix < len(lines):<EOL><INDENT>if lines[lineix].strip().startswith('<STR_LIT>'):<EOL><INDENT>if lines[lineix].strip().find('<STR_LIT>', <NUM_LIT:2>) < <NUM_LIT:0>:<EOL><INDENT>return lineix<EOL><DEDENT><DEDENT>lineix += <NUM_LIT:1><EOL><DEDENT>return len(lines)<EOL>", "docstring": "Find the beginning marker for a multiline comment.", "id": "f7245:m26"}
{"signature": "def ResetErrorCounts(self):", "body": "self.error_count = <NUM_LIT:0><EOL>self.errors_by_category = {}<EOL>", "docstring": "Sets the module's error statistic back to zero.", "id": "f7245:c1:m8"}
{"signature": "def _IsType(clean_lines, nesting_state, expr):", "body": "<EOL>last_word = Match(r'<STR_LIT>', expr)<EOL>if last_word:<EOL><INDENT>token = last_word.group(<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>token = expr<EOL><DEDENT>if _TYPES.match(token):<EOL><INDENT>return True<EOL><DEDENT>typename_pattern = (r'<STR_LIT>' + re.escape(token) +<EOL>r'<STR_LIT>')<EOL>block_index = len(nesting_state.stack) - <NUM_LIT:1><EOL>while block_index >= <NUM_LIT:0>:<EOL><INDENT>if isinstance(nesting_state.stack[block_index], _NamespaceInfo):<EOL><INDENT>return False<EOL><DEDENT>last_line = nesting_state.stack[block_index].starting_linenum<EOL>next_block_start = <NUM_LIT:0><EOL>if block_index > <NUM_LIT:0>:<EOL><INDENT>next_block_start = nesting_state.stack[block_index - <NUM_LIT:1>].starting_linenum<EOL><DEDENT>first_line = last_line<EOL>while first_line >= next_block_start:<EOL><INDENT>if clean_lines.elided[first_line].find('<STR_LIT>') >= <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>first_line -= <NUM_LIT:1><EOL><DEDENT>if first_line < next_block_start:<EOL><INDENT>block_index -= <NUM_LIT:1><EOL>continue<EOL><DEDENT>for i in xrange(first_line, last_line + <NUM_LIT:1>, <NUM_LIT:1>):<EOL><INDENT>if Search(typename_pattern, clean_lines.elided[i]):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>block_index -= <NUM_LIT:1><EOL><DEDENT>return False<EOL>", "docstring": "Check if expression looks like a type name, returns true if so.\n\n    Args:\n      clean_lines: A CleansedLines instance containing the file.\n      nesting_state: A NestingState instance which maintains information about\n                     the current stack of nested blocks being parsed.\n      expr: The expression to check.\n    Returns:\n      True, if token looks like a type.", "id": "f7245:m59"}
{"signature": "def SeenOpenBrace(self):", "body": "return (not self.stack) or self.stack[-<NUM_LIT:1>].seen_open_brace<EOL>", "docstring": "Check if we have seen the opening brace for the innermost block.\n\n        Returns:\n          True if we have seen the opening brace, False if the innermost\n          block is still expecting an opening brace.", "id": "f7245:c11:m1"}
{"signature": "def CheckBegin(self, filename, clean_lines, linenum, error):", "body": "pass<EOL>", "docstring": "Run checks that applies to text up to the opening brace.\n\n        This is mostly for checking the text after the class identifier\n        and the \"{\", usually where the base class is specified.  For other\n        blocks, there isn't much to check, so we always pass.\n\n        Args:\n          filename: The name of the current file.\n          clean_lines: A CleansedLines instance containing the file.\n          linenum: The number of the line to check.\n          error: The function to call with any errors found.", "id": "f7245:c6:m1"}
{"signature": "def get_http_response(self, server_host_port, path):", "body": "for _ in range(<NUM_LIT:0>, RETRY_ATTEMPTS):<EOL><INDENT>try:<EOL><INDENT>connection = HTTPConnection(server_host_port)<EOL>connection.request('<STR_LIT:GET>', path)<EOL>response = connection.getresponse()<EOL>return response<EOL><DEDENT>except Exception:<EOL><INDENT>time.sleep(RETRY_INTERVAL)<EOL>continue<EOL><DEDENT><DEDENT>raise status.TestFailure(\"<STR_LIT>\" % RETRY_ATTEMPTS)<EOL>", "docstring": "get HTTP response", "id": "f7273:c4:m3"}
{"signature": "def fetch_results(self):", "body": "try:<EOL><INDENT>if not os.path.exists(self.file_path):<EOL><INDENT>raise status.TestFailure(\"<STR_LIT>\" % self.file_path)<EOL><DEDENT>else:<EOL><INDENT>with open(self.file_path, \"<STR_LIT:r>\") as expected_result_file:<EOL><INDENT>return expected_result_file.read().rstrip()<EOL><DEDENT><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>raise status.TestFailure(\"<STR_LIT>\" % self.file_path, e)<EOL><DEDENT>", "docstring": "Read expected result from the expected result file", "id": "f7273:c2:m1"}
{"signature": "def _parse_actual_results(self, actual_results):", "body": "actual_nodes = dict()<EOL>actual_links = dict()<EOL>for bolt in actual_results.topology.bolts:<EOL><INDENT>name = bolt.comp.name<EOL>if name not in actual_links:<EOL><INDENT>actual_links[name] = set()<EOL><DEDENT>for input in bolt.inputs:<EOL><INDENT>actual_links[name].add(input.stream.component_name)<EOL><DEDENT><DEDENT>for instance in actual_results.instances:<EOL><INDENT>name = instance.info.component_name<EOL>if name not in actual_nodes:<EOL><INDENT>actual_nodes[name] = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>actual_nodes[name] += <NUM_LIT:1><EOL><DEDENT><DEDENT>return actual_nodes, actual_links<EOL>", "docstring": "Parse protobuf messege and generate actual_nodes and actual_links", "id": "f7273:c0:m4"}
{"signature": "def check_results(self):", "body": "expected_result = self.topology_structure_expected_results_handler.fetch_results()<EOL>actual_result = self.topology_structure_actual_results_handler.fetch_cur_pplan()<EOL>self.topology_structure_actual_results_handler.stop_state_mgr()<EOL>decoder = json.JSONDecoder(strict=False)<EOL>expected_results = decoder.decode(expected_result)<EOL>return self._compare(expected_results, actual_result)<EOL>", "docstring": "Checks the topology graph structure from zk with the expected results from local file", "id": "f7273:c0:m1"}
{"signature": "def _parse_expected_results(self, expected_results):", "body": "expected_nodes = dict()<EOL>expected_links = dict()<EOL>for bolt in expected_results[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>name = bolt[\"<STR_LIT>\"][\"<STR_LIT:name>\"]<EOL>if name not in expected_links:<EOL><INDENT>expected_links[name] = set()<EOL><DEDENT>for input in bolt[\"<STR_LIT>\"]:<EOL><INDENT>expected_links[name].add(input[\"<STR_LIT>\"][\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>for instance in expected_results[\"<STR_LIT>\"]:<EOL><INDENT>name = instance[\"<STR_LIT:info>\"][\"<STR_LIT>\"]<EOL>if name not in expected_nodes:<EOL><INDENT>expected_nodes[name] = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>expected_nodes[name] += <NUM_LIT:1><EOL><DEDENT><DEDENT>return  expected_nodes, expected_links<EOL>", "docstring": "Parse JSON file and generate expected_nodes and expected_links", "id": "f7273:c0:m3"}
{"signature": "def main():", "body": "log.configure(level=logging.DEBUG)<EOL>conf_file = DEFAULT_TEST_CONF_FILE<EOL>conf_string = pkgutil.get_data(__name__, conf_file)<EOL>decoder = json.JSONDecoder(strict=False)<EOL>conf = decoder.decode(conf_string)<EOL>parser = argparse.ArgumentParser(description='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>default=conf['<STR_LIT>'])<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>', default=conf['<STR_LIT>'])<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>', default=conf['<STR_LIT>'])<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>', default=conf['<STR_LIT>'])<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>', type=int,<EOL>default=conf['<STR_LIT>'])<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>', default=None)<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>', default=None)<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>default=conf['<STR_LIT>'])<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>', default=<NUM_LIT:1>)<EOL>args, unknown_args = parser.parse_known_args()<EOL>if unknown_args:<EOL><INDENT>logging.error('<STR_LIT>', sys.argv[<NUM_LIT:0>], unknown_args[<NUM_LIT:0>])<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>tests_start_time = int(time.time())<EOL>run_tests(conf, args)<EOL>total = len(failures) + len(successes)<EOL>logging.info(\"<STR_LIT>\" % (int(time.time()) - tests_start_time))<EOL>if not failures:<EOL><INDENT>logging.info(\"<STR_LIT>\", len(successes))<EOL>for test in successes:<EOL><INDENT>logging.info(\"<STR_LIT>\", (\"<STR_LIT>\" % test[<NUM_LIT:1>]).ljust(<NUM_LIT:8>), test[<NUM_LIT:0>])<EOL><DEDENT>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>logging.error(\"<STR_LIT>\", len(failures), total)<EOL>for test in failures:<EOL><INDENT>logging.error(\"<STR_LIT>\", (\"<STR_LIT>\" % test[<NUM_LIT:1>]).ljust(<NUM_LIT:8>), test[<NUM_LIT:0>])<EOL><DEDENT>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "main", "id": "f7274:m14"}
{"signature": "def main():", "body": "log.configure(level=logging.DEBUG)<EOL>conf_file = DEFAULT_TEST_CONF_FILE<EOL>conf_string = pkgutil.get_data(__name__, conf_file)<EOL>decoder = json.JSONDecoder(strict=False)<EOL>conf = decoder.decode(conf_string)<EOL>args = dict()<EOL>home_directory = os.path.expanduser(\"<STR_LIT>\")<EOL>args['<STR_LIT>'] = conf['<STR_LIT>']<EOL>args['<STR_LIT>'] = conf['<STR_LIT>']['<STR_LIT>']<EOL>args['<STR_LIT>'] = conf['<STR_LIT>']['<STR_LIT>']<EOL>args['<STR_LIT>'] = os.path.join(<EOL>home_directory,<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>conf['<STR_LIT>'],<EOL>getpass.getuser(),<EOL>args['<STR_LIT>']<EOL>)<EOL>args['<STR_LIT>'] = os.path.expanduser(conf['<STR_LIT>'])<EOL>args['<STR_LIT>'] = os.path.expanduser(conf['<STR_LIT>'])<EOL>args['<STR_LIT>'] = _random_port()<EOL>args['<STR_LIT>'] = os.path.join(args['<STR_LIT>'], conf['<STR_LIT>']['<STR_LIT>'])<EOL>args['<STR_LIT>'] = os.path.join(args['<STR_LIT>'], conf['<STR_LIT>']['<STR_LIT>'])<EOL>args['<STR_LIT>'] = conf['<STR_LIT>']<EOL>test_classes = TEST_CLASSES<EOL>if len(sys.argv) > <NUM_LIT:1>:<EOL><INDENT>first_arg = sys.argv[<NUM_LIT:1>]<EOL>class_tokens = first_arg.split(\"<STR_LIT:.>\")<EOL>if first_arg == \"<STR_LIT>\" or len(class_tokens) < <NUM_LIT:2>:<EOL><INDENT>usage()<EOL><DEDENT>import importlib<EOL>package_tokens = class_tokens[:-<NUM_LIT:1>]<EOL>test_class = class_tokens[-<NUM_LIT:1>]<EOL>if len(package_tokens) == <NUM_LIT:1>: <EOL><INDENT>test_module = \"<STR_LIT>\" + package_tokens[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>test_module = '<STR_LIT:.>'.join(package_tokens)<EOL><DEDENT>logging.info(\"<STR_LIT>\", test_module)<EOL>logging.info(\"<STR_LIT>\", test_class)<EOL>test_classes = [getattr(importlib.import_module(test_module), test_class)]<EOL><DEDENT>start_time = time.time()<EOL>(successes, failures) = run_tests(test_classes, args)<EOL>elapsed_time = time.time() - start_time<EOL>total = len(failures) + len(successes)<EOL>if not failures:<EOL><INDENT>logging.info(\"<STR_LIT>\", len(successes))<EOL>logging.info(\"<STR_LIT>\", elapsed_time)<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>logging.error(\"<STR_LIT>\", len(failures), total)<EOL>for test in failures:<EOL><INDENT>logging.error(\"<STR_LIT>\", test)<EOL><DEDENT>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "main", "id": "f7278:m3"}
{"signature": "def _get_processes():", "body": "<EOL>processes = subprocess.check_output(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>processes = processes.split('<STR_LIT:\\n>')<EOL>processes = processes[<NUM_LIT:1>:] <EOL>process_list = []<EOL>for process in processes:<EOL><INDENT>if process == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>pretuple = process.split('<STR_LIT:U+0020>', <NUM_LIT:1>)<EOL>process_list.append(ProcessTuple(pretuple[<NUM_LIT:0>], pretuple[<NUM_LIT:1>]))<EOL><DEDENT>return process_list<EOL>", "docstring": "returns a list of process tuples (pid, cmd)\nThis only applies only for local scheduler as it uses the ps command\nand assumes the topology will be running on different processes on same machine", "id": "f7281:m3"}
{"signature": "def _check_results(self):", "body": "expected_result = \"<STR_LIT>\"<EOL>actual_result = \"<STR_LIT>\"<EOL>retries_left = RETRY_COUNT<EOL>_sleep(\"<STR_LIT>\" % self.testname, RETRY_INTERVAL)<EOL>while retries_left > <NUM_LIT:0>:<EOL><INDENT>retries_left -= <NUM_LIT:1><EOL>try:<EOL><INDENT>with open(self.params['<STR_LIT>'], '<STR_LIT:r>') as f:<EOL><INDENT>expected_result = f.read()<EOL><DEDENT>with open(self.params['<STR_LIT>'], '<STR_LIT:r>') as g:<EOL><INDENT>actual_result = g.read()<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>message =\"<STR_LIT>\" % self.testname<EOL>if retries_left == <NUM_LIT:0>:<EOL><INDENT>raise status.TestFailure(message, e)<EOL><DEDENT>logging.error(message, e)<EOL><DEDENT>expected_sorted = sorted(expected_result.split('<STR_LIT:\\n>'))<EOL>actual_sorted = sorted(actual_result.split('<STR_LIT:\\n>'))<EOL>if expected_sorted == actual_sorted:<EOL><INDENT>break<EOL><DEDENT>if retries_left > <NUM_LIT:0>:<EOL><INDENT>expected_result = \"<STR_LIT>\"<EOL>actual_result = \"<STR_LIT>\"<EOL>expected_sorted = []<EOL>actual_sorted = []<EOL>logging.info(\"<STR_LIT>\"+ \"<STR_LIT>\",<EOL>self.testname, RETRY_COUNT - retries_left, RETRY_COUNT, RETRY_INTERVAL)<EOL>time.sleep(RETRY_INTERVAL)<EOL><DEDENT><DEDENT>if actual_sorted == expected_sorted:<EOL><INDENT>success = status.TestSuccess(<EOL>\"<STR_LIT>\" % self.testname)<EOL>logging.info(\"<STR_LIT>\", actual_sorted)<EOL>logging.info(\"<STR_LIT>\", expected_sorted)<EOL>return success<EOL><DEDENT>else:<EOL><INDENT>failure = status.TestFailure(<EOL>\"<STR_LIT>\" % self.testname)<EOL>logging.info(\"<STR_LIT>\", actual_sorted)<EOL>logging.info(\"<STR_LIT>\", expected_sorted)<EOL>raise failure<EOL><DEDENT>", "docstring": "get actual and expected result.\n        retry if results are not equal a predesignated amount of times", "id": "f7281:c0:m11"}
{"signature": "def emit(self, tup, tup_id=None, stream=Stream.DEFAULT_STREAM_ID,<EOL>direct_task=None, need_task_ids=None):", "body": "<EOL>self.tuples_to_complete += <NUM_LIT:1><EOL>if tup_id is None:<EOL><INDENT>Log.info(\"<STR_LIT>\" % str(tup))<EOL>_tup_id = integ_const.INTEGRATION_TEST_MOCK_MESSAGE_ID<EOL><DEDENT>else:<EOL><INDENT>_tup_id = tup_id<EOL><DEDENT>super(IntegrationTestSpout, self).emit(tup, _tup_id, stream, direct_task, need_task_ids)<EOL>", "docstring": "Emits from this integration test spout\n\n        Overriden method which will be called when user's spout calls emit()", "id": "f7289:c0:m7"}
{"signature": "def add_bolt(self, name, bolt_cls, par, inputs, config=None, optional_outputs=None):", "body": "assert isinstance(inputs, dict)<EOL>user_spec = bolt_cls.spec(name)<EOL>bolt_classpath = user_spec.python_class_path<EOL>if hasattr(bolt_cls, '<STR_LIT>'):<EOL><INDENT>user_outputs = bolt_cls.outputs<EOL><DEDENT>else:<EOL><INDENT>user_outputs = []<EOL><DEDENT>if optional_outputs is not None:<EOL><INDENT>user_outputs.extend(optional_outputs)<EOL><DEDENT>if config is None:<EOL><INDENT>_config = {}<EOL><DEDENT>else:<EOL><INDENT>_config = config<EOL><DEDENT>test_spec = IntegrationTestBolt.spec(name, par, inputs, _config,<EOL>user_bolt_classpath=bolt_classpath,<EOL>user_output_fields=user_outputs)<EOL>self.add_spec(test_spec)<EOL>self.bolts[name] = test_spec<EOL>return test_spec<EOL>", "docstring": "Add an integration_test bolt\n\n        Only dict based inputs is supported", "id": "f7292:c0:m2"}
{"signature": "def _add_all_grouping(self, child, parent, stream_id):", "body": "<EOL>child_component_spec = self.bolts[child]<EOL>child_inputs = child_component_spec.inputs<EOL>if parent in self.bolts:<EOL><INDENT>parent_component_spec = self.bolts[parent]<EOL><DEDENT>else:<EOL><INDENT>parent_component_spec = self.spouts[parent]<EOL><DEDENT>if stream_id == Stream.DEFAULT_STREAM_ID:<EOL><INDENT>child_inputs[parent_component_spec] = Grouping.ALL<EOL><DEDENT>else:<EOL><INDENT>child_inputs[parent_component_spec[stream_id]] = Grouping.ALL<EOL><DEDENT>", "docstring": "Adds all grouping between child component and parent component with a given stream id\n\n        :type child: str\n        :param child: child's component name\n        :type parent: str\n        :param parent: parent's component name\n        :type stream_id: str\n        :param stream_id: stream id", "id": "f7292:c0:m4"}
{"signature": "def create_mock_medium_topology(<EOL>self,<EOL>spout_parallelism=<NUM_LIT:1>,<EOL>bolt1_parallelism=<NUM_LIT:1>,<EOL>bolt2_parallelism=<NUM_LIT:1>,<EOL>bolt3_parallelism=<NUM_LIT:1>):", "body": "topology = protoTopology.Topology()<EOL>topology.id = \"<STR_LIT>\"<EOL>topology.name = \"<STR_LIT>\"<EOL>stream1 = protoTopology.StreamId()<EOL>stream1.id = \"<STR_LIT>\"<EOL>stream1.component_name = \"<STR_LIT>\"<EOL>stream2 = protoTopology.StreamId()<EOL>stream2.id = \"<STR_LIT>\"<EOL>stream2.component_name = \"<STR_LIT>\"<EOL>stream3 = protoTopology.StreamId()<EOL>stream3.id = \"<STR_LIT>\"<EOL>stream3.component_name = \"<STR_LIT>\"<EOL>stream4 = protoTopology.StreamId()<EOL>stream4.id = \"<STR_LIT>\"<EOL>stream4.component_name = \"<STR_LIT>\"<EOL>spout1 = self.create_mock_spout(\"<STR_LIT>\",<EOL>[stream1, stream2],<EOL>spout_parallelism)<EOL>topology.spouts.extend([spout1])<EOL>bolt1 = self.create_mock_bolt(\"<STR_LIT>\",<EOL>[stream1],<EOL>[stream3],<EOL>bolt1_parallelism)<EOL>bolt2 = self.create_mock_bolt(\"<STR_LIT>\",<EOL>[stream2],<EOL>[stream4],<EOL>bolt2_parallelism)<EOL>bolt3 = self.create_mock_bolt(\"<STR_LIT>\",<EOL>[stream3, stream4],<EOL>[],<EOL>bolt3_parallelism)<EOL>topology.bolts.extend([bolt1, bolt2, bolt3])<EOL>return topology<EOL>", "docstring": "Medium topology is a three stage topology\nwith one spout, two mid stage bolts, and one\nlast stage bolt.\nS -str1-> B1 -str3-> B3\nS -str2-> B2 -str4-> B3", "id": "f7321:c0:m7"}
{"signature": "def load_configs(self):", "body": "self.statemgr_config.set_state_locations(self.configs[STATEMGRS_KEY])<EOL>if EXTRA_LINKS_KEY in self.configs:<EOL><INDENT>for extra_link in self.configs[EXTRA_LINKS_KEY]:<EOL><INDENT>self.extra_links.append(self.validate_extra_link(extra_link))<EOL><DEDENT><DEDENT>", "docstring": "load config files", "id": "f7323:c0:m1"}
{"signature": "def validated_formatter(self, url_format):", "body": "<EOL>valid_parameters = {<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT:user>\",<EOL>}<EOL>dummy_formatted_url = url_format<EOL>for key, value in valid_parameters.items():<EOL><INDENT>dummy_formatted_url = dummy_formatted_url.replace(key, value)<EOL><DEDENT>if '<STR_LIT:$>' in dummy_formatted_url:<EOL><INDENT>raise Exception(\"<STR_LIT>\" % (url_format))<EOL><DEDENT>return url_format<EOL>", "docstring": "validate visualization url format", "id": "f7323:c0:m3"}
{"signature": "def spouts(self):", "body": "if self.physical_plan:<EOL><INDENT>return list(self.physical_plan.topology.spouts)<EOL><DEDENT>return []<EOL>", "docstring": "Returns a list of Spout (proto) messages", "id": "f7324:c0:m11"}
{"signature": "def set_physical_plan(self, physical_plan):", "body": "if not physical_plan:<EOL><INDENT>self.physical_plan = None<EOL>self.id = None<EOL><DEDENT>else:<EOL><INDENT>self.physical_plan = physical_plan<EOL>self.id = physical_plan.topology.id<EOL><DEDENT>self.trigger_watches()<EOL>", "docstring": "set physical plan", "id": "f7324:c0:m4"}
{"signature": "def get_status(self):", "body": "status = None<EOL>if self.physical_plan and self.physical_plan.topology:<EOL><INDENT>status = self.physical_plan.topology.state<EOL><DEDENT>if status == <NUM_LIT:1>:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>elif status == <NUM_LIT:2>:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>elif status == <NUM_LIT:3>:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Get the current state of this topology.\nThe state values are from the topology.proto\nRUNNING = 1, PAUSED = 2, KILLED = 3\nif the state is None \"Unknown\" is returned.", "id": "f7324:c0:m16"}
{"signature": "def unregister_watch(self, uid):", "body": "<EOL>Log.info(\"<STR_LIT>\" + str(uid))<EOL>self.watches.pop(uid, None)<EOL>", "docstring": "Unregister the watch with the given UUID.", "id": "f7324:c0:m2"}
{"signature": "def num_instances(self):", "body": "num = <NUM_LIT:0><EOL>components = self.spouts() + self.bolts()<EOL>for component in components:<EOL><INDENT>config = component.comp.config<EOL>for kvs in config.kvs:<EOL><INDENT>if kvs.key == api_constants.TOPOLOGY_COMPONENT_PARALLELISM:<EOL><INDENT>num += int(kvs.value)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>return num<EOL>", "docstring": "Number of spouts + bolts", "id": "f7324:c0:m10"}
{"signature": "def set_execution_state(self, execution_state):", "body": "if not execution_state:<EOL><INDENT>self.execution_state = None<EOL>self.cluster = None<EOL>self.environ = None<EOL><DEDENT>else:<EOL><INDENT>self.execution_state = execution_state<EOL>cluster, environ = self.get_execution_state_dc_environ(execution_state)<EOL>self.cluster = cluster<EOL>self.environ = environ<EOL>self.zone = cluster<EOL><DEDENT>self.trigger_watches()<EOL>", "docstring": "set exectuion state", "id": "f7324:c0:m7"}
{"signature": "def trigger_watches(self):", "body": "to_remove = []<EOL>for uid, callback in self.watches.items():<EOL><INDENT>try:<EOL><INDENT>callback(self)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.error(\"<STR_LIT>\" + str(e))<EOL>Log.debug(traceback.format_exc())<EOL>to_remove.append(uid)<EOL><DEDENT><DEDENT>for uid in to_remove:<EOL><INDENT>self.unregister_watch(uid)<EOL><DEDENT>", "docstring": "Call all the callbacks.\nIf any callback raises an Exception,\nunregister the corresponding watch.", "id": "f7324:c0:m3"}
{"signature": "def readObject(self):", "body": "try:<EOL><INDENT>_, res = self._read_and_exec_opcode(ident=<NUM_LIT:0>)<EOL>position_bak = self.object_stream.tell()<EOL>the_rest = self.object_stream.read()<EOL>if len(the_rest):<EOL><INDENT>log_error(\"<STR_LIT>\" % len(the_rest))<EOL>log_debug(self._create_hexdump(the_rest))<EOL><DEDENT>else:<EOL><INDENT>log_debug(\"<STR_LIT>\")<EOL><DEDENT>self.object_stream.seek(position_bak)<EOL>return res<EOL><DEDENT>except Exception:<EOL><INDENT>self._oops_dump_state()<EOL>raise<EOL><DEDENT>", "docstring": "read object", "id": "f7326:c3:m1"}
{"signature": "def get_class(self):", "body": "return self.classdesc<EOL>", "docstring": "get class", "id": "f7326:c1:m1"}
{"signature": "def log_error(message, ident=<NUM_LIT:0>):", "body": "Log.error(\"<STR_LIT:U+0020>\" * (ident * <NUM_LIT:2>) + str(message))<EOL>", "docstring": "log error info", "id": "f7326:m1"}
{"signature": "def do_classdesc(self, parent=None, ident=<NUM_LIT:0>):", "body": "<EOL>clazz = JavaClass()<EOL>log_debug(\"<STR_LIT>\", ident)<EOL>ba = self._readString()<EOL>clazz.name = ba<EOL>log_debug(\"<STR_LIT>\" % ba, ident)<EOL>(serialVersionUID, newHandle, classDescFlags) = self._readStruct(\"<STR_LIT>\")<EOL>clazz.serialVersionUID = serialVersionUID<EOL>clazz.flags = classDescFlags<EOL>self._add_reference(clazz)<EOL>log_debug(\"<STR_LIT>\" % (serialVersionUID, newHandle, classDescFlags), ident)<EOL>(length, ) = self._readStruct(\"<STR_LIT>\")<EOL>log_debug(\"<STR_LIT>\" % length, ident)<EOL>clazz.fields_names = []<EOL>clazz.fields_types = []<EOL>for _ in range(length):<EOL><INDENT>(typecode, ) = self._readStruct(\"<STR_LIT>\")<EOL>field_name = self._readString()<EOL>field_type = None<EOL>field_type = self._convert_char_to_type(typecode)<EOL>if field_type == self.TYPE_ARRAY:<EOL><INDENT>_, field_type = self._read_and_exec_opcode(<EOL>ident=ident+<NUM_LIT:1>, expect=[self.TC_STRING, self.TC_REFERENCE])<EOL>assert isinstance(field_type, str)<EOL>", "docstring": "do_classdesc", "id": "f7326:c3:m7"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7327:c0:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>instance = self.get_argument_instance()<EOL>topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)<EOL>ret = yield self.getInstanceJstack(topology_info, instance)<EOL>self.write_success_response(ret)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7328:c0:m1"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7328:c0:m0"}
{"signature": "def get_argument_topology(self):", "body": "try:<EOL><INDENT>topology = self.get_argument(constants.PARAM_TOPOLOGY)<EOL>return topology<EOL><DEDENT>except tornado.web.MissingArgumentError as e:<EOL><INDENT>raise Exception(e.log_message)<EOL><DEDENT>", "docstring": "Helper function to get topology argument.\nRaises exception if argument is missing.\nReturns the topology argument.", "id": "f7329:c0:m11"}
{"signature": "def get_argument_starttime(self):", "body": "try:<EOL><INDENT>starttime = self.get_argument(constants.PARAM_STARTTIME)<EOL>return starttime<EOL><DEDENT>except tornado.web.MissingArgumentError as e:<EOL><INDENT>raise Exception(e.log_message)<EOL><DEDENT>", "docstring": "Helper function to get starttime argument.\nRaises exception if argument is missing.\nReturns the starttime argument.", "id": "f7329:c0:m14"}
{"signature": "def get_argument_component(self):", "body": "try:<EOL><INDENT>component = self.get_argument(constants.PARAM_COMPONENT)<EOL>return component<EOL><DEDENT>except tornado.web.MissingArgumentError as e:<EOL><INDENT>raise Exception(e.log_message)<EOL><DEDENT>", "docstring": "Helper function to get component argument.\nRaises exception if argument is missing.\nReturns the component argument.", "id": "f7329:c0:m12"}
{"signature": "def get_argument_endtime(self):", "body": "try:<EOL><INDENT>endtime = self.get_argument(constants.PARAM_ENDTIME)<EOL>return endtime<EOL><DEDENT>except tornado.web.MissingArgumentError as e:<EOL><INDENT>raise Exception(e.log_message)<EOL><DEDENT>", "docstring": "Helper function to get endtime argument.\nRaises exception if argument is missing.\nReturns the endtime argument.", "id": "f7329:c0:m15"}
{"signature": "def prepare(self):", "body": "self.basehandler_starttime = time.time()<EOL>", "docstring": "Used for timing. Sets the basehandler_starttime to current time, and\nis used when writing the response back.\nSubclasses of BaseHandler must never use self.write, but instead use\nself.write_error or self.write_result methods to correctly include\nthe timing.", "id": "f7329:c0:m1"}
{"signature": "def validateInterval(self, startTime, endTime):", "body": "start = int(startTime)<EOL>end = int(endTime)<EOL>if start > end:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Helper function to validate interval.\nAn interval is valid if starttime and endtime are integrals,\nand starttime is less than the endtime.\nRaises exception if interval is not valid.", "id": "f7329:c0:m20"}
{"signature": "def get_argument_cluster(self):", "body": "try:<EOL><INDENT>return self.get_argument(constants.PARAM_CLUSTER)<EOL><DEDENT>except tornado.web.MissingArgumentError as e:<EOL><INDENT>raise Exception(e.log_message)<EOL><DEDENT>", "docstring": "Helper function to get request argument.\nRaises exception if argument is missing.\nReturns the cluster argument.", "id": "f7329:c0:m8"}
{"signature": "def set_default_headers(self):", "body": "self.set_header(\"<STR_LIT>\", \"<STR_LIT:*>\")<EOL>", "docstring": "Allow any domain to make queries to tracker.", "id": "f7329:c0:m0"}
{"signature": "def get_argument_role(self):", "body": "try:<EOL><INDENT>return self.get_argument(constants.PARAM_ROLE, default=None)<EOL><DEDENT>except tornado.web.MissingArgumentError as e:<EOL><INDENT>raise Exception(e.log_message)<EOL><DEDENT>", "docstring": "Helper function to get request argument.\nRaises exception if argument is missing.\nReturns the role argument.", "id": "f7329:c0:m9"}
{"signature": "def get_required_arguments_metricnames(self):", "body": "try:<EOL><INDENT>metricnames = self.get_arguments(constants.PARAM_METRICNAME)<EOL>if not metricnames:<EOL><INDENT>raise tornado.web.MissingArgumentError(constants.PARAM_METRICNAME)<EOL><DEDENT>return metricnames<EOL><DEDENT>except tornado.web.MissingArgumentError as e:<EOL><INDENT>raise Exception(e.log_message)<EOL><DEDENT>", "docstring": "Helper function to get metricname arguments.\nNotice that it is get_argument\"s\" variation, which means that this can be repeated.\nRaises exception if argument is missing.\nReturns a list of metricname arguments", "id": "f7329:c0:m19"}
{"signature": "def write_json_response(self, response):", "body": "self.write(tornado.escape.json_encode(response))<EOL>self.set_header(\"<STR_LIT:Content-Type>\", \"<STR_LIT:application/json>\")<EOL>", "docstring": "write back json response", "id": "f7329:c0:m4"}
{"signature": "def get_argument_instance(self):", "body": "try:<EOL><INDENT>instance = self.get_argument(constants.PARAM_INSTANCE)<EOL>return instance<EOL><DEDENT>except tornado.web.MissingArgumentError as e:<EOL><INDENT>raise Exception(e.log_message)<EOL><DEDENT>", "docstring": "Helper function to get instance argument.\nRaises exception if argument is missing.\nReturns the instance argument.", "id": "f7329:c0:m13"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7330:c2:m0"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7330:c1:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>container = self.get_argument(constants.PARAM_CONTAINER)<EOL>path = self.get_argument(constants.PARAM_PATH)<EOL>offset = self.get_argument_offset()<EOL>length = self.get_argument_length()<EOL>topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)<EOL>stmgr_id = \"<STR_LIT>\" + container<EOL>stmgr = topology_info[\"<STR_LIT>\"][\"<STR_LIT>\"][stmgr_id]<EOL>host = stmgr[\"<STR_LIT:host>\"]<EOL>shell_port = stmgr[\"<STR_LIT>\"]<EOL>file_data_url = \"<STR_LIT>\" %(host, shell_port, path, offset, length)<EOL>http_client = tornado.httpclient.AsyncHTTPClient()<EOL>response = yield http_client.fetch(file_data_url)<EOL>self.write_success_response(json.loads(response.body))<EOL>self.finish()<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7330:c0:m1"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)<EOL>runtime_state = topology_info[\"<STR_LIT>\"]<EOL>runtime_state[\"<STR_LIT>\"] = topology_info[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>topology = self.tracker.getTopologyByClusterRoleEnvironAndName(<EOL>cluster, role, environ, topology_name)<EOL>reg_summary = yield tornado.gen.Task(self.getStmgrsRegSummary, topology.tmaster)<EOL>for stmgr, reg in reg_summary.items():<EOL><INDENT>runtime_state[\"<STR_LIT>\"].setdefault(stmgr, {})[\"<STR_LIT>\"] = reg<EOL><DEDENT>self.write_success_response(runtime_state)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7332:c0:m2"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7332:c0:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def getComponentMetrics(self,<EOL>tmaster,<EOL>componentName,<EOL>metricNames,<EOL>instances,<EOL>interval,<EOL>callback=None):<DEDENT>", "body": "if not tmaster or not tmaster.host or not tmaster.stats_port:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>host = tmaster.host<EOL>port = tmaster.stats_port<EOL>metricRequest = tmaster_pb2.MetricRequest()<EOL>metricRequest.component_name = componentName<EOL>if len(instances) > <NUM_LIT:0>:<EOL><INDENT>for instance in instances:<EOL><INDENT>metricRequest.instance_id.append(instance)<EOL><DEDENT><DEDENT>for metricName in metricNames:<EOL><INDENT>metricRequest.metric.append(metricName)<EOL><DEDENT>metricRequest.interval = interval<EOL>metricRequestString = metricRequest.SerializeToString()<EOL>url = \"<STR_LIT>\".format(host, port)<EOL>request = tornado.httpclient.HTTPRequest(url,<EOL>body=metricRequestString,<EOL>method='<STR_LIT:POST>',<EOL>request_timeout=<NUM_LIT:5>)<EOL>Log.debug(\"<STR_LIT>\")<EOL>Log.debug(\"<STR_LIT>\" + url)<EOL>try:<EOL><INDENT>client = tornado.httpclient.AsyncHTTPClient()<EOL>result = yield client.fetch(request)<EOL>Log.debug(\"<STR_LIT>\")<EOL><DEDENT>except tornado.httpclient.HTTPError as e:<EOL><INDENT>raise Exception(str(e))<EOL><DEDENT>responseCode = result.code<EOL>if responseCode >= <NUM_LIT>:<EOL><INDENT>message = \"<STR_LIT>\" + responseCode<EOL>Log.error(message)<EOL>raise Exception(message)<EOL><DEDENT>metricResponse = tmaster_pb2.MetricResponse()<EOL>metricResponse.ParseFromString(result.body)<EOL>if metricResponse.status.status == common_pb2.NOTOK:<EOL><INDENT>if metricResponse.status.HasField(\"<STR_LIT:message>\"):<EOL><INDENT>Log.warn(\"<STR_LIT>\", metricResponse.status.message)<EOL><DEDENT><DEDENT>ret = {}<EOL>ret[\"<STR_LIT>\"] = metricResponse.interval<EOL>ret[\"<STR_LIT>\"] = componentName<EOL>ret[\"<STR_LIT>\"] = {}<EOL>for metric in metricResponse.metric:<EOL><INDENT>instance = metric.instance_id<EOL>for im in metric.metric:<EOL><INDENT>metricname = im.name<EOL>value = im.value<EOL>if metricname not in ret[\"<STR_LIT>\"]:<EOL><INDENT>ret[\"<STR_LIT>\"][metricname] = {}<EOL><DEDENT>ret[\"<STR_LIT>\"][metricname][instance] = value<EOL><DEDENT><DEDENT>raise tornado.gen.Return(ret)<EOL>", "docstring": "Get the specified metrics for the given component name of this topology.\nReturns the following dict on success:\n{\n  \"metrics\": {\n    <metricname>: {\n      <instance>: <numeric value>,\n      <instance>: <numeric value>,\n      ...\n    }, ...\n  },\n  \"interval\": <numeric value>,\n  \"component\": \"...\"\n}\n\nRaises exception on failure.", "id": "f7333:c0:m2"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "clusters = [statemgr.name for statemgr in self.tracker.state_managers]<EOL>self.write_success_response(clusters)<EOL>", "docstring": "get method", "id": "f7334:c0:m1"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7334:c0:m0"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7337:c0:m0"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7338:c0:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)<EOL>execution_state = topology_info[\"<STR_LIT>\"]<EOL>self.write_success_response(execution_state)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7338:c0:m1"}
{"signature": "def get(self):", "body": "self.redirect(\"<STR_LIT>\")<EOL>", "docstring": "get method", "id": "f7339:c0:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)<EOL>packing_plan = topology_info[\"<STR_LIT>\"]<EOL>self.write_success_response(packing_plan)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7342:c0:m1"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>instance = self.get_argument_instance()<EOL>topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)<EOL>ret = yield self.runInstanceJmap(topology_info, instance)<EOL>self.write_success_response(ret)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7343:c0:m1"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7343:c0:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def getComponentException(self, tmaster, component_name, instances=[], callback=None):<DEDENT>", "body": "if not tmaster or not tmaster.host or not tmaster.stats_port:<EOL><INDENT>return<EOL><DEDENT>exception_request = tmaster_pb2.ExceptionLogRequest()<EOL>exception_request.component_name = component_name<EOL>if len(instances) > <NUM_LIT:0>:<EOL><INDENT>exception_request.instances.extend(instances)<EOL><DEDENT>request_str = exception_request.SerializeToString()<EOL>port = str(tmaster.stats_port)<EOL>host = tmaster.host<EOL>url = \"<STR_LIT>\".format(host, port)<EOL>request = tornado.httpclient.HTTPRequest(url,<EOL>body=request_str,<EOL>method='<STR_LIT:POST>',<EOL>request_timeout=<NUM_LIT:5>)<EOL>Log.debug('<STR_LIT>', url)<EOL>try:<EOL><INDENT>client = tornado.httpclient.AsyncHTTPClient()<EOL>result = yield client.fetch(request)<EOL>Log.debug(\"<STR_LIT>\")<EOL><DEDENT>except tornado.httpclient.HTTPError as e:<EOL><INDENT>raise Exception(str(e))<EOL><DEDENT>responseCode = result.code<EOL>if responseCode >= <NUM_LIT>:<EOL><INDENT>message = \"<STR_LIT>\" + responseCode<EOL>Log.error(message)<EOL>raise tornado.gen.Return({<EOL>\"<STR_LIT:message>\": message<EOL>})<EOL><DEDENT>exception_response = tmaster_pb2.ExceptionLogResponse()<EOL>exception_response.ParseFromString(result.body)<EOL>if exception_response.status.status == common_pb2.NOTOK:<EOL><INDENT>if exception_response.status.HasField(\"<STR_LIT:message>\"):<EOL><INDENT>raise tornado.gen.Return({<EOL>\"<STR_LIT:message>\": exception_response.status.message<EOL>})<EOL><DEDENT><DEDENT>ret = []<EOL>for exception_log in exception_response.exceptions:<EOL><INDENT>ret.append({'<STR_LIT>': exception_log.hostname,<EOL>'<STR_LIT>': exception_log.instance_id,<EOL>'<STR_LIT>': exception_log.stacktrace,<EOL>'<STR_LIT>': exception_log.lasttime,<EOL>'<STR_LIT>': exception_log.firsttime,<EOL>'<STR_LIT:count>': str(exception_log.count),<EOL>'<STR_LIT>': exception_log.logging})<EOL><DEDENT>raise tornado.gen.Return(ret)<EOL>", "docstring": "Get all (last 1000) exceptions for 'component_name' of the topology.\nReturns an Array of exception logs on success.\nReturns json with message on failure.", "id": "f7344:c0:m2"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "<EOL>clusters = self.get_arguments(constants.PARAM_CLUSTER)<EOL>environs = self.get_arguments(constants.PARAM_ENVIRON)<EOL>role = self.get_argument_role()<EOL>ret = {}<EOL>topologies = self.tracker.topologies<EOL>for topology in topologies:<EOL><INDENT>cluster = topology.cluster<EOL>environ = topology.environ<EOL>if not cluster or not environ:<EOL><INDENT>continue<EOL><DEDENT>if clusters and cluster not in clusters:<EOL><INDENT>continue<EOL><DEDENT>if environs and environ not in environs:<EOL><INDENT>continue<EOL><DEDENT>if cluster not in ret:<EOL><INDENT>ret[cluster] = {}<EOL><DEDENT>if environ not in ret[cluster]:<EOL><INDENT>ret[cluster][environ] = {}<EOL><DEDENT>try:<EOL><INDENT>topology_info = self.tracker.getTopologyInfo(topology.name, cluster, role, environ)<EOL>if topology_info and \"<STR_LIT>\" in topology_info:<EOL><INDENT>ret[cluster][environ][topology.name] = topology_info[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>self.write_success_response(ret)<EOL>", "docstring": "get method", "id": "f7345:c0:m1"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7348:c0:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)<EOL>metadata = topology_info[\"<STR_LIT>\"]<EOL>self.write_success_response(metadata)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.error(\"<STR_LIT>\")<EOL>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7348:c0:m1"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>topology = self.tracker.getTopologyByClusterRoleEnvironAndName(<EOL>cluster, role, environ, topology_name)<EOL>start_time = self.get_argument_starttime()<EOL>end_time = self.get_argument_endtime()<EOL>self.validateInterval(start_time, end_time)<EOL>query = self.get_argument_query()<EOL>metrics = yield tornado.gen.Task(self.executeMetricsQuery,<EOL>topology.tmaster, query, int(start_time), int(end_time))<EOL>self.write_success_response(metrics)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7350:c0:m1"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def executeMetricsQuery(self, tmaster, queryString, start_time, end_time, callback=None):<DEDENT>", "body": "query = Query(self.tracker)<EOL>metrics = yield query.execute_query(tmaster, queryString, start_time, end_time)<EOL>ret = {}<EOL>ret[\"<STR_LIT>\"] = start_time<EOL>ret[\"<STR_LIT>\"] = end_time<EOL>ret[\"<STR_LIT>\"] = []<EOL>for metric in metrics:<EOL><INDENT>tl = {<EOL>\"<STR_LIT:data>\": metric.timeline<EOL>}<EOL>if metric.instance:<EOL><INDENT>tl[\"<STR_LIT>\"] = metric.instance<EOL><DEDENT>ret[\"<STR_LIT>\"].append(tl)<EOL><DEDENT>raise tornado.gen.Return(ret)<EOL>", "docstring": "Get the specified metrics for the given query in this topology.\nReturns the following dict on success:\n{\n  \"timeline\": [{\n    \"instance\": <instance>,\n    \"data\": {\n      <start_time> : <numeric value>,\n      <start_time> : <numeric value>,\n      ...\n    }\n  }, {\n    ...\n  }, ...\n  \"starttime\": <numeric value>,\n  \"endtime\": <numeric value>,\n},\n\nReturns the following dict on failure:\n{\n  \"message\": \"...\"\n}", "id": "f7350:c0:m2"}
{"signature": "def initialize(self, tracker):", "body": "self.tracker = tracker<EOL>", "docstring": "initialize", "id": "f7352:c0:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>instance = self.get_argument_instance()<EOL>topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)<EOL>result = yield getInstancePid(topology_info, instance)<EOL>self.write_success_response(result)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7352:c0:m1"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "try:<EOL><INDENT>cluster = self.get_argument_cluster()<EOL>role = self.get_argument_role()<EOL>environ = self.get_argument_environ()<EOL>topology_name = self.get_argument_topology()<EOL>topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)<EOL>physical_plan = topology_info[\"<STR_LIT>\"]<EOL>self.write_success_response(physical_plan)<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>self.write_error_response(e)<EOL><DEDENT>", "docstring": "get method", "id": "f7353:c0:m1"}
{"signature": "def create_parsers():", "body": "parser = argparse.ArgumentParser(<EOL>epilog='<STR_LIT>',<EOL>usage=\"<STR_LIT>\",<EOL>add_help=False)<EOL>parser = add_titles(parser)<EOL>parser = add_arguments(parser)<EOL>ya_parser = argparse.ArgumentParser(<EOL>parents=[parser],<EOL>formatter_class=SubcommandHelpFormatter,<EOL>add_help=False)<EOL>subparsers = ya_parser.add_subparsers(<EOL>title=\"<STR_LIT>\")<EOL>help_parser = subparsers.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>add_help=False)<EOL>help_parser.set_defaults(help=True)<EOL>subparsers.add_parser(<EOL>'<STR_LIT:version>',<EOL>help='<STR_LIT>',<EOL>add_help=True)<EOL>return parser, ya_parser<EOL>", "docstring": "create argument parser", "id": "f7356:m2"}
{"signature": "def parse_query_string(self, query):", "body": "if not query:<EOL><INDENT>return None<EOL><DEDENT>if query[<NUM_LIT:0>] == '<STR_LIT:(>':<EOL><INDENT>index = self.find_closing_braces(query)<EOL>if index != len(query) - <NUM_LIT:1>:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return self.parse_query_string(query[<NUM_LIT:1>:-<NUM_LIT:1>])<EOL><DEDENT><DEDENT>start_index = query.find(\"<STR_LIT:(>\")<EOL>if start_index < <NUM_LIT:0>:<EOL><INDENT>try:<EOL><INDENT>constant = float(query)<EOL>return constant<EOL><DEDENT>except ValueError:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>token = query[:start_index]<EOL>if token not in self.operators:<EOL><INDENT>raise Exception(\"<STR_LIT>\" + token)<EOL><DEDENT>rest_of_the_query = query[start_index:]<EOL>braces_end_index = self.find_closing_braces(rest_of_the_query)<EOL>if braces_end_index != len(rest_of_the_query) - <NUM_LIT:1>:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>parts = self.get_sub_parts(rest_of_the_query[<NUM_LIT:1>:-<NUM_LIT:1>])<EOL>if token == \"<STR_LIT>\":<EOL><INDENT>return self.operators[token](parts)<EOL><DEDENT>children = []<EOL>for part in parts:<EOL><INDENT>children.append(self.parse_query_string(part))<EOL><DEDENT>node = self.operators[token](children)<EOL>return node<EOL>", "docstring": "Returns a parse tree for the query, each of the node is a\n        subclass of Operator. This is both a lexical as well as syntax analyzer step.", "id": "f7357:c0:m4"}
{"signature": "def synch_topologies(self):", "body": "self.state_managers = statemanagerfactory.get_all_state_managers(self.config.statemgr_config)<EOL>try:<EOL><INDENT>for state_manager in self.state_managers:<EOL><INDENT>state_manager.start()<EOL><DEDENT><DEDENT>except Exception as ex:<EOL><INDENT>Log.error(\"<STR_LIT>\" % ex)<EOL>traceback.print_exc()<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>def on_topologies_watch(state_manager, topologies):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>Log.info(\"<STR_LIT>\")<EOL>Log.debug(\"<STR_LIT>\" + str(topologies))<EOL>existingTopologies = self.getTopologiesForStateLocation(state_manager.name)<EOL>existingTopNames = map(lambda t: t.name, existingTopologies)<EOL>Log.debug(\"<STR_LIT>\" + str(existingTopNames))<EOL>for name in existingTopNames:<EOL><INDENT>if name not in topologies:<EOL><INDENT>Log.info(\"<STR_LIT>\",<EOL>name, state_manager.rootpath)<EOL>self.removeTopology(name, state_manager.name)<EOL><DEDENT><DEDENT>for name in topologies:<EOL><INDENT>if name not in existingTopNames:<EOL><INDENT>self.addNewTopology(state_manager, name)<EOL><DEDENT><DEDENT><DEDENT>for state_manager in self.state_managers:<EOL><INDENT>onTopologiesWatch = partial(on_topologies_watch, state_manager)<EOL>state_manager.get_topologies(onTopologiesWatch)<EOL><DEDENT>", "docstring": "Sync the topologies with the statemgrs.", "id": "f7358:c0:m1"}
{"signature": "def extract_packing_plan(self, topology):", "body": "packingPlan = {<EOL>\"<STR_LIT:id>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": []<EOL>}<EOL>if not topology.packing_plan:<EOL><INDENT>return packingPlan<EOL><DEDENT>container_plans = topology.packing_plan.container_plans<EOL>containers = []<EOL>for container_plan in container_plans:<EOL><INDENT>instances = []<EOL>for instance_plan in container_plan.instance_plans:<EOL><INDENT>instance_resources = {\"<STR_LIT>\": instance_plan.resource.cpu,<EOL>\"<STR_LIT>\": instance_plan.resource.ram,<EOL>\"<STR_LIT>\": instance_plan.resource.disk}<EOL>instance = {\"<STR_LIT>\" : instance_plan.component_name,<EOL>\"<STR_LIT>\" : instance_plan.task_id,<EOL>\"<STR_LIT>\": instance_plan.component_index,<EOL>\"<STR_LIT>\": instance_resources}<EOL>instances.append(instance)<EOL><DEDENT>required_resource = {\"<STR_LIT>\": container_plan.requiredResource.cpu,<EOL>\"<STR_LIT>\": container_plan.requiredResource.ram,<EOL>\"<STR_LIT>\": container_plan.requiredResource.disk}<EOL>scheduled_resource = {}<EOL>if container_plan.scheduledResource:<EOL><INDENT>scheduled_resource = {\"<STR_LIT>\": container_plan.scheduledResource.cpu,<EOL>\"<STR_LIT>\": container_plan.scheduledResource.ram,<EOL>\"<STR_LIT>\": container_plan.scheduledResource.disk}<EOL><DEDENT>container = {\"<STR_LIT:id>\": container_plan.id,<EOL>\"<STR_LIT>\": instances,<EOL>\"<STR_LIT>\": required_resource,<EOL>\"<STR_LIT>\": scheduled_resource}<EOL>containers.append(container)<EOL><DEDENT>packingPlan[\"<STR_LIT:id>\"] = topology.packing_plan.id<EOL>packingPlan[\"<STR_LIT>\"] = containers<EOL>return json.dumps(packingPlan)<EOL>", "docstring": "Returns the representation of packing plan that will\nbe returned from Tracker.", "id": "f7358:c0:m14"}
{"signature": "def extract_execution_state(self, topology):", "body": "execution_state = topology.execution_state<EOL>executionState = {<EOL>\"<STR_LIT>\": execution_state.cluster,<EOL>\"<STR_LIT>\": execution_state.environ,<EOL>\"<STR_LIT>\": execution_state.role,<EOL>\"<STR_LIT>\": topology.name,<EOL>\"<STR_LIT>\": execution_state.submission_time,<EOL>\"<STR_LIT>\": execution_state.submission_user,<EOL>\"<STR_LIT>\": execution_state.release_state.release_username,<EOL>\"<STR_LIT>\": execution_state.release_state.release_tag,<EOL>\"<STR_LIT>\": execution_state.release_state.release_version,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": [],<EOL>}<EOL>for extra_link in self.config.extra_links:<EOL><INDENT>link = extra_link.copy()<EOL>link[\"<STR_LIT:url>\"] = self.config.get_formatted_url(executionState,<EOL>link[EXTRA_LINK_FORMATTER_KEY])<EOL>executionState[\"<STR_LIT>\"].append(link)<EOL><DEDENT>return executionState<EOL>", "docstring": "Returns the repesentation of execution state that will\nbe returned from Tracker.", "id": "f7358:c0:m7"}
{"signature": "def getTopologiesForStateLocation(self, name):", "body": "return filter(lambda t: t.state_manager_name == name, self.topologies)<EOL>", "docstring": "Returns all the topologies for a given state manager.", "id": "f7358:c0:m4"}
{"signature": "def extract_scheduler_location(self, topology):", "body": "schedulerLocation = {<EOL>\"<STR_LIT:name>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>}<EOL>if topology.scheduler_location:<EOL><INDENT>schedulerLocation[\"<STR_LIT:name>\"] = topology.scheduler_location.topology_name<EOL>schedulerLocation[\"<STR_LIT>\"] = topology.scheduler_location.http_endpoint<EOL>schedulerLocation[\"<STR_LIT>\"] =topology.scheduler_location.job_page_link[<NUM_LIT:0>]if len(topology.scheduler_location.job_page_link) > <NUM_LIT:0> else \"<STR_LIT>\"<EOL><DEDENT>return schedulerLocation<EOL>", "docstring": "Returns the representation of scheduler location that will\nbe returned from Tracker.", "id": "f7358:c0:m10"}
{"signature": "def bfs_depth(self, U):", "body": "bfs_queue = [[U, <NUM_LIT:0>]]  <EOL>visited = set()<EOL>max_depth = <NUM_LIT:0><EOL>while bfs_queue:<EOL><INDENT>[V, depth] = bfs_queue.pop()<EOL>if max_depth < depth:<EOL><INDENT>max_depth = depth<EOL><DEDENT>visited.add(V)<EOL>adj_set = self.edges[V]<EOL>for W in adj_set:<EOL><INDENT>if W not in visited:<EOL><INDENT>bfs_queue.append([W, depth + <NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT>return max_depth<EOL>", "docstring": "Returns the maximum distance between any vertex and U in the connected\ncomponent containing U\n:param U:\n:return:", "id": "f7359:c0:m3"}
{"signature": "def isOperator(self):", "body": "return True<EOL>", "docstring": "Returns True. This is just usefule for checking that an object is an operator or not.", "id": "f7360:c1:m2"}
{"signature": "def __init__(self, componentName, metricName, instance, start, end, timeline):", "body": "self.componentName = componentName<EOL>self.metricName = metricName<EOL>self.instance = instance<EOL>self.start = start<EOL>self.end = end<EOL>self.timeline = self.floorTimestamps(start, end, timeline)<EOL>", "docstring": "Takes (componentName, metricname, instance, timeline)", "id": "f7360:c0:m0"}
{"signature": "def floorTimestamps(self, start, end, timeline):", "body": "ret = {}<EOL>for timestamp, value in timeline.items():<EOL><INDENT>ts = timestamp / <NUM_LIT> * <NUM_LIT><EOL>if start <= ts <= end:<EOL><INDENT>ret[ts] = value<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "floor timestamp", "id": "f7360:c0:m1"}
{"signature": "def setDefault(self, constant, start, end):", "body": "starttime = start / <NUM_LIT> * <NUM_LIT><EOL>if starttime < start:<EOL><INDENT>starttime += <NUM_LIT><EOL><DEDENT>endtime = end / <NUM_LIT> * <NUM_LIT><EOL>while starttime <= endtime:<EOL><INDENT>if starttime not in self.timeline or self.timeline[starttime] == <NUM_LIT:0>:<EOL><INDENT>self.timeline[starttime] = constant<EOL><DEDENT>starttime += <NUM_LIT><EOL><DEDENT>", "docstring": "set default time", "id": "f7360:c0:m2"}
{"signature": "def get_heron_tracker_bin_dir():", "body": "bin_path = os.path.join(get_heron_tracker_dir(), BIN_DIR)<EOL>return bin_path<EOL>", "docstring": "This will provide heron tracker bin directory from .pex file.\n:return: absolute path of heron lib directory", "id": "f7361:m11"}
{"signature": "def make_shell_endpoint(topologyInfo, instance_id):", "body": "<EOL>pplan = topologyInfo[\"<STR_LIT>\"]<EOL>stmgrId = pplan[\"<STR_LIT>\"][instance_id][\"<STR_LIT>\"]<EOL>host = pplan[\"<STR_LIT>\"][stmgrId][\"<STR_LIT:host>\"]<EOL>shell_port = pplan[\"<STR_LIT>\"][stmgrId][\"<STR_LIT>\"]<EOL>return \"<STR_LIT>\" % (host, shell_port)<EOL>", "docstring": "Makes the http endpoint for the heron shell\nif shell port is present, otherwise returns None.", "id": "f7361:m1"}
{"signature": "def hex_escape(bin_str):", "body": "printable = string.ascii_letters + string.digits + string.punctuation + '<STR_LIT:U+0020>'<EOL>return '<STR_LIT>'.join(ch if ch in printable else r'<STR_LIT>'.format(ord(ch)) for ch in bin_str)<EOL>", "docstring": "Hex encode a binary string", "id": "f7361:m0"}
{"signature": "def make_shell_filestats_url(host, shell_port, path):", "body": "return \"<STR_LIT>\" % (host, shell_port, path)<EOL>", "docstring": "Make the url for filestats data in heron-shell\nfrom the info stored in stmgr.", "id": "f7361:m5"}
{"signature": "def cygpath(x):", "body": "command = ['<STR_LIT>', '<STR_LIT>', x]<EOL>p = subprocess.Popen(command, stdout=subprocess.PIPE)<EOL>output, _ = p.communicate()<EOL>lines = output.split(\"<STR_LIT:\\n>\")<EOL>return lines[<NUM_LIT:0>]<EOL>", "docstring": "This will return the path of input arg for windows\n:return: the path in windows", "id": "f7361:m8"}
{"signature": "def get_heron_tracker_conf_dir():", "body": "conf_path = os.path.join(get_heron_tracker_dir(), CONF_DIR)<EOL>return conf_path<EOL>", "docstring": "This will provide heron tracker conf directory from .pex file.\n:return: absolute path of heron conf directory", "id": "f7361:m12"}
{"signature": "def make_shell_logfile_data_url(host, shell_port, instance_id, offset, length):", "body": "return \"<STR_LIT>\" %(host, shell_port, instance_id, offset, length)<EOL>", "docstring": "Make the url for log-file data in heron-shell\nfrom the info stored in stmgr.", "id": "f7361:m4"}
{"signature": "def normalized_class_path(x):", "body": "if sys.platform == '<STR_LIT>':<EOL><INDENT>return cygpath(x)<EOL><DEDENT>return identity(x)<EOL>", "docstring": "This will return the class path depending on the platform\n:return: the class path", "id": "f7361:m9"}
{"signature": "def parse_config_file(config_file):", "body": "expanded_config_file_path = os.path.expanduser(config_file)<EOL>if not os.path.lexists(expanded_config_file_path):<EOL><INDENT>return None<EOL><DEDENT>configs = {}<EOL>with open(expanded_config_file_path, '<STR_LIT:r>') as f:<EOL><INDENT>configs = yaml.load(f)<EOL><DEDENT>return configs<EOL>", "docstring": "This will parse the config file for the tracker\n:return: the config or None if the file is not found", "id": "f7361:m13"}
{"signature": "def get_self_hostname():", "body": "return socket.gethostname()<EOL>", "docstring": "get hostname of self", "id": "f7362:m39"}
{"signature": "def run(command, parser, cl_args, unknown_args):", "body": "action = cl_args[\"<STR_LIT:action>\"]<EOL>if action == Action.SET:<EOL><INDENT>call_editor(get_inventory_file(cl_args))<EOL>update_config_files(cl_args)<EOL><DEDENT>elif action == Action.CLUSTER:<EOL><INDENT>action_type = cl_args[\"<STR_LIT:type>\"]<EOL>if action_type == Cluster.START:<EOL><INDENT>start_cluster(cl_args)<EOL><DEDENT>elif action_type == Cluster.STOP:<EOL><INDENT>if check_sure(cl_args, \"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"):<EOL><INDENT>stop_cluster(cl_args)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % action_type)<EOL><DEDENT><DEDENT>elif action == Action.TEMPLATE:<EOL><INDENT>update_config_files(cl_args)<EOL><DEDENT>elif action == Action.GET:<EOL><INDENT>action_type = cl_args[\"<STR_LIT:type>\"]<EOL>if action_type == Get.SERVICE_URL:<EOL><INDENT>print(get_service_url(cl_args))<EOL><DEDENT>elif action_type == Get.HERON_UI_URL:<EOL><INDENT>print(get_heron_ui_url(cl_args))<EOL><DEDENT>elif action_type == Get.HERON_TRACKER_URL:<EOL><INDENT>print(get_heron_tracker_url(cl_args))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % action_type)<EOL><DEDENT><DEDENT>elif action == Action.INFO:<EOL><INDENT>print_cluster_info(cl_args)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % action)<EOL><DEDENT>return SimpleResult(Status.Ok)<EOL>", "docstring": "runs parser", "id": "f7362:m1"}
{"signature": "def scp_cmd(src, dest, cl_args):", "body": "scp = '<STR_LIT>' % (src, dest)<EOL>return scp<EOL>", "docstring": "get scp command", "id": "f7362:m31"}
{"signature": "def get_remote_home(host, cl_args):", "body": "cmd = \"<STR_LIT>\"<EOL>if not is_self(host):<EOL><INDENT>cmd = ssh_remote_execute(cmd, host, cl_args)<EOL><DEDENT>pid = subprocess.Popen(cmd,<EOL>shell=True,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>return_code = pid.wait()<EOL>output = pid.communicate()<EOL>if return_code != <NUM_LIT:0>:<EOL><INDENT>Log.error(\"<STR_LIT>\" % (host, output))<EOL>sys.exit(-<NUM_LIT:1>)<EOL><DEDENT>return output[<NUM_LIT:0>].strip(\"<STR_LIT:\\n>\")<EOL>", "docstring": "get home directory of remote host", "id": "f7362:m37"}
{"signature": "def get_self_ip():", "body": "return socket.gethostbyname(socket.gethostname())<EOL>", "docstring": "get IP address of self", "id": "f7362:m38"}
{"signature": "def distribute_package(roles, cl_args):", "body": "Log.info(\"<STR_LIT>\")<EOL>masters = roles[Role.MASTERS]<EOL>slaves = roles[Role.SLAVES]<EOL>tar_file = tempfile.NamedTemporaryFile(suffix=\"<STR_LIT>\").name<EOL>Log.debug(\"<STR_LIT>\" % (cl_args[\"<STR_LIT>\"], tar_file))<EOL>make_tarfile(tar_file, cl_args[\"<STR_LIT>\"])<EOL>dist_nodes = masters.union(slaves)<EOL>scp_package(tar_file, dist_nodes, cl_args)<EOL>", "docstring": "distribute Heron packages to all nodes", "id": "f7362:m19"}
{"signature": "def create_parser(subparsers):", "body": "parser = subparsers.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>add_help=True<EOL>)<EOL>cli_args.add_titles(parser)<EOL>parser_action = parser.add_subparsers()<EOL>parser_cluster = parser_action.add_parser(<EOL>Action.CLUSTER,<EOL>help='<STR_LIT>',<EOL>add_help=True,<EOL>formatter_class=argparse.RawTextHelpFormatter,<EOL>)<EOL>parser_cluster.set_defaults(action=Action.CLUSTER)<EOL>parser_set = parser_action.add_parser(<EOL>Action.SET,<EOL>help='<STR_LIT>',<EOL>add_help=True,<EOL>formatter_class=argparse.RawTextHelpFormatter<EOL>)<EOL>parser_set.set_defaults(action=Action.SET)<EOL>parser_template = parser_action.add_parser(<EOL>Action.TEMPLATE,<EOL>help='<STR_LIT>',<EOL>add_help=True,<EOL>formatter_class=argparse.RawTextHelpFormatter<EOL>)<EOL>parser_template.set_defaults(action=Action.TEMPLATE)<EOL>parser_cluster.add_argument(<EOL>TYPE,<EOL>type=str,<EOL>choices={Cluster.START, Cluster.STOP},<EOL>help=\"<EOL>ces supports the following:<EOL>art     - Start standalone Heron cluster<EOL>op      - Stop standalone Heron cluster<EOL>)<EOL>parser_template.add_argument(<EOL>TYPE,<EOL>type=str,<EOL>choices={\"<STR_LIT>\"},<EOL>)<EOL>parser_get = parser_action.add_parser(<EOL>Action.GET,<EOL>help='<STR_LIT>',<EOL>add_help=True,<EOL>formatter_class=argparse.RawTextHelpFormatter<EOL>)<EOL>parser_get.set_defaults(action=Action.GET)<EOL>parser_get.add_argument(<EOL>TYPE,<EOL>type=str,<EOL>choices={Get.SERVICE_URL, Get.HERON_TRACKER_URL, Get.HERON_UI_URL},<EOL>help=\"\"\"<STR_LIT>\"\"\"<EOL>)<EOL>parser_info = parser_action.add_parser(<EOL>Action.INFO,<EOL>help='<STR_LIT>',<EOL>add_help=True,<EOL>formatter_class=argparse.RawTextHelpFormatter<EOL>)<EOL>parser_info.set_defaults(action=Action.INFO)<EOL>add_additional_args([parser_set, parser_cluster, parser_template, parser_get, parser_info])<EOL>parser.set_defaults(subcommand='<STR_LIT>')<EOL>return parser<EOL>", "docstring": "Create a subparser for the standalone command\n:param subparsers:\n:return:", "id": "f7362:m0"}
{"signature": "def template_scheduler_yaml(cl_args, masters):", "body": "single_master = masters[<NUM_LIT:0>]<EOL>scheduler_config_actual = \"<STR_LIT>\" % cl_args[\"<STR_LIT>\"]<EOL>scheduler_config_template = \"<STR_LIT>\"% cl_args[\"<STR_LIT>\"]<EOL>template_file(scheduler_config_template, scheduler_config_actual,<EOL>{\"<STR_LIT>\": \"<STR_LIT>\" % single_master})<EOL>", "docstring": "Template scheduler.yaml", "id": "f7362:m4"}
{"signature": "def start_cluster(cl_args):", "body": "roles = read_and_parse_roles(cl_args)<EOL>masters = roles[Role.MASTERS]<EOL>slaves = roles[Role.SLAVES]<EOL>zookeepers = roles[Role.ZOOKEEPERS]<EOL>Log.info(\"<STR_LIT>\")<EOL>Log.info(\"<STR_LIT>\" % list(masters))<EOL>Log.info(\"<STR_LIT>\" % list(slaves))<EOL>Log.info(\"<STR_LIT>\" % list(zookeepers))<EOL>if not masters:<EOL><INDENT>Log.error(\"<STR_LIT>\")<EOL>sys.exit(-<NUM_LIT:1>)<EOL><DEDENT>if not slaves:<EOL><INDENT>Log.error(\"<STR_LIT>\")<EOL>sys.exit(-<NUM_LIT:1>)<EOL><DEDENT>if not zookeepers:<EOL><INDENT>Log.error(\"<STR_LIT>\")<EOL>sys.exit(-<NUM_LIT:1>)<EOL><DEDENT>update_config_files(cl_args)<EOL>dist_nodes = list(masters.union(slaves))<EOL>if not (len(dist_nodes) == <NUM_LIT:1> and is_self(dist_nodes[<NUM_LIT:0>])):<EOL><INDENT>distribute_package(roles, cl_args)<EOL><DEDENT>start_master_nodes(masters, cl_args)<EOL>start_slave_nodes(slaves, cl_args)<EOL>start_api_server(masters, cl_args)<EOL>start_heron_tools(masters, cl_args)<EOL>Log.info(\"<STR_LIT>\")<EOL>", "docstring": "Start a Heron standalone cluster", "id": "f7362:m16"}
{"signature": "def get_heron_ui_url(cl_args):", "body": "roles = read_and_parse_roles(cl_args)<EOL>return \"<STR_LIT>\" % list(roles[Role.MASTERS])[<NUM_LIT:0>]<EOL>", "docstring": "get service url for standalone cluster", "id": "f7362:m12"}
{"signature": "def start_heron_tools(masters, cl_args):", "body": "single_master = list(masters)[<NUM_LIT:0>]<EOL>wait_for_master_to_start(single_master)<EOL>cmd = \"<STR_LIT>\"% (get_nomad_path(cl_args), get_heron_tools_job_file(cl_args))<EOL>Log.info(\"<STR_LIT>\" % single_master)<EOL>if not is_self(single_master):<EOL><INDENT>cmd = ssh_remote_execute(cmd, single_master, cl_args)<EOL><DEDENT>Log.debug(cmd)<EOL>pid = subprocess.Popen(cmd,<EOL>shell=True,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>return_code = pid.wait()<EOL>output = pid.communicate()<EOL>Log.debug(\"<STR_LIT>\" % (return_code, output))<EOL>if return_code != <NUM_LIT:0>:<EOL><INDENT>Log.error(\"<STR_LIT>\" % (single_master, output[<NUM_LIT:1>]))<EOL>sys.exit(-<NUM_LIT:1>)<EOL><DEDENT>wait_for_job_to_start(single_master, \"<STR_LIT>\")<EOL>Log.info(\"<STR_LIT>\")<EOL>", "docstring": "Start Heron tracker and UI", "id": "f7362:m18"}
{"signature": "def add_additional_args(parsers):", "body": "for parser in parsers:<EOL><INDENT>cli_args.add_verbose(parser)<EOL>cli_args.add_config(parser)<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=config.get_heron_dir(),<EOL>help='<STR_LIT>')<EOL><DEDENT>", "docstring": "add additional parameters to parser", "id": "f7362:m14"}
{"signature": "def start_master_nodes(masters, cl_args):", "body": "pids = []<EOL>for master in masters:<EOL><INDENT>Log.info(\"<STR_LIT>\" % master)<EOL>cmd = \"<STR_LIT>\"% (get_nomad_path(cl_args), get_nomad_master_config_file(cl_args))<EOL>if not is_self(master):<EOL><INDENT>cmd = ssh_remote_execute(cmd, master, cl_args)<EOL><DEDENT>Log.debug(cmd)<EOL>pid = subprocess.Popen(cmd,<EOL>shell=True,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>pids.append({\"<STR_LIT>\": pid, \"<STR_LIT>\": master})<EOL><DEDENT>errors = []<EOL>for entry in pids:<EOL><INDENT>pid = entry[\"<STR_LIT>\"]<EOL>return_code = pid.wait()<EOL>output = pid.communicate()<EOL>Log.debug(\"<STR_LIT>\" % (return_code, output))<EOL>if return_code != <NUM_LIT:0>:<EOL><INDENT>errors.append(\"<STR_LIT>\" % (entry[\"<STR_LIT>\"], output[<NUM_LIT:1>]))<EOL><DEDENT><DEDENT>if errors:<EOL><INDENT>for error in errors:<EOL><INDENT>Log.error(error)<EOL><DEDENT>sys.exit(-<NUM_LIT:1>)<EOL><DEDENT>Log.info(\"<STR_LIT>\")<EOL>", "docstring": "Start master nodes", "id": "f7362:m24"}
{"signature": "def get_hostname(ip_addr, cl_args):", "body": "if is_self(ip_addr):<EOL><INDENT>return get_self_hostname()<EOL><DEDENT>cmd = \"<STR_LIT>\"<EOL>ssh_cmd = ssh_remote_execute(cmd, ip_addr, cl_args)<EOL>pid = subprocess.Popen(ssh_cmd,<EOL>shell=True,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>return_code = pid.wait()<EOL>output = pid.communicate()<EOL>if return_code != <NUM_LIT:0>:<EOL><INDENT>Log.error(\"<STR_LIT>\" % (ip_addr, output))<EOL>sys.exit(-<NUM_LIT:1>)<EOL><DEDENT>return output[<NUM_LIT:0>].strip(\"<STR_LIT:\\n>\")<EOL>", "docstring": "get host name of remote host", "id": "f7362:m40"}
{"signature": "def make_tarfile(output_filename, source_dir):", "body": "with tarfile.open(output_filename, \"<STR_LIT>\") as tar:<EOL><INDENT>tar.add(source_dir, arcname=os.path.basename(source_dir))<EOL><DEDENT>", "docstring": "Tar a directory", "id": "f7362:m23"}
{"signature": "def scp_package(package_file, destinations, cl_args):", "body": "pids = []<EOL>for dest in destinations:<EOL><INDENT>if is_self(dest):<EOL><INDENT>continue<EOL><DEDENT>Log.info(\"<STR_LIT>\" % dest)<EOL>file_path = \"<STR_LIT>\"<EOL>dest_file_path = \"<STR_LIT>\" % (dest, file_path)<EOL>remote_cmd = \"<STR_LIT>\"\"<STR_LIT>\" % (file_path)<EOL>cmd = '<STR_LIT>'% (scp_cmd(package_file, dest_file_path, cl_args),<EOL>ssh_remote_execute(remote_cmd, dest, cl_args))<EOL>Log.debug(cmd)<EOL>pid = subprocess.Popen(cmd,<EOL>shell=True,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>pids.append({\"<STR_LIT>\": pid, \"<STR_LIT>\": dest})<EOL><DEDENT>errors = []<EOL>for entry in pids:<EOL><INDENT>pid = entry[\"<STR_LIT>\"]<EOL>return_code = pid.wait()<EOL>output = pid.communicate()<EOL>Log.debug(\"<STR_LIT>\" % (return_code, output))<EOL>if return_code != <NUM_LIT:0>:<EOL><INDENT>errors.append(\"<STR_LIT>\" % (entry[\"<STR_LIT>\"], output[<NUM_LIT:1>]))<EOL><DEDENT><DEDENT>if errors:<EOL><INDENT>for error in errors:<EOL><INDENT>Log.error(error)<EOL><DEDENT>sys.exit(-<NUM_LIT:1>)<EOL><DEDENT>Log.info(\"<STR_LIT>\")<EOL>", "docstring": "scp and extract package", "id": "f7362:m22"}
{"signature": "def stop_cluster(cl_args):", "body": "Log.info(\"<STR_LIT>\")<EOL>roles = read_and_parse_roles(cl_args)<EOL>masters = roles[Role.MASTERS]<EOL>slaves = roles[Role.SLAVES]<EOL>dist_nodes = masters.union(slaves)<EOL>if masters:<EOL><INDENT>try:<EOL><INDENT>single_master = list(masters)[<NUM_LIT:0>]<EOL>jobs = get_jobs(cl_args, single_master)<EOL>for job in jobs:<EOL><INDENT>job_id = job[\"<STR_LIT>\"]<EOL>Log.info(\"<STR_LIT>\" % job_id)<EOL>delete_job(cl_args, job_id, single_master)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>Log.debug(\"<STR_LIT>\")<EOL>Log.debug(sys.exc_info()[<NUM_LIT:0>])<EOL><DEDENT><DEDENT>for node in dist_nodes:<EOL><INDENT>Log.info(\"<STR_LIT>\" % node)<EOL>if not is_self(node):<EOL><INDENT>cmd = \"<STR_LIT>\"\"<STR_LIT>\"<EOL>cmd = ssh_remote_execute(cmd, node, cl_args)<EOL><DEDENT>else:<EOL><INDENT>cmd = \"<STR_LIT>\"\"<STR_LIT>\"<EOL><DEDENT>Log.debug(cmd)<EOL>pid = subprocess.Popen(cmd,<EOL>shell=True,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>return_code = pid.wait()<EOL>output = pid.communicate()<EOL>Log.debug(\"<STR_LIT>\" % (return_code, output))<EOL>Log.info(\"<STR_LIT>\" % node)<EOL>cmd = \"<STR_LIT>\"<EOL>if not is_self(node):<EOL><INDENT>cmd = ssh_remote_execute(cmd, node, cl_args)<EOL><DEDENT>Log.debug(cmd)<EOL>pid = subprocess.Popen(cmd,<EOL>shell=True,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>return_code = pid.wait()<EOL>output = pid.communicate()<EOL>Log.debug(\"<STR_LIT>\" % (return_code, output))<EOL><DEDENT>", "docstring": "teardown the cluster", "id": "f7362:m15"}
{"signature": "def run(handlers, command, parser, command_args, unknown_args):", "body": "if command in handlers:<EOL><INDENT>return handlers[command].run(command, parser, command_args, unknown_args)<EOL><DEDENT>else:<EOL><INDENT>err_context = '<STR_LIT>' % command<EOL>return result.SimpleResult(result.Status.InvocationError, err_context)<EOL><DEDENT>", "docstring": "Run the command\n:param command:\n:param parser:\n:param command_args:\n:param unknown_args:\n:return:", "id": "f7363:m2"}
{"signature": "def get_command_handlers():", "body": "return {<EOL>'<STR_LIT>': standalone,<EOL>}<EOL>", "docstring": "Create a map of command names and handlers", "id": "f7363:m0"}
{"signature": "def create_parser(command_handlers):", "body": "parser = argparse.ArgumentParser(<EOL>prog='<STR_LIT>',<EOL>epilog=HELP_EPILOG,<EOL>formatter_class=config.SubcommandHelpFormatter,<EOL>add_help=True)<EOL>subparsers = parser.add_subparsers(<EOL>title=\"<STR_LIT>\",<EOL>metavar='<STR_LIT>')<EOL>command_list = sorted(command_handlers.items())<EOL>for command in command_list:<EOL><INDENT>command[<NUM_LIT:1>].create_parser(subparsers)<EOL><DEDENT>return parser<EOL>", "docstring": "Main parser\n:return:", "id": "f7363:m1"}
{"signature": "def execute(handlers):", "body": "<EOL>check_environment()<EOL>parser = create_parser(handlers)<EOL>if len(sys.argv[<NUM_LIT:1>:]) == <NUM_LIT:0>:<EOL><INDENT>parser.print_help()<EOL>return <NUM_LIT:0><EOL><DEDENT>sys.argv = config.insert_bool_values(sys.argv)<EOL>try:<EOL><INDENT>args, unknown_args = parser.parse_known_args()<EOL><DEDENT>except ValueError as ex:<EOL><INDENT>Log.error(\"<STR_LIT>\", str(ex))<EOL>Log.debug(traceback.format_exc())<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>command_line_args = vars(args)<EOL>log.set_logging_level(command_line_args)<EOL>Log.debug(\"<STR_LIT>\", command_line_args)<EOL>command = command_line_args['<STR_LIT>']<EOL>Log.debug(\"<STR_LIT>\", command_line_args)<EOL>results = run(handlers, command, parser, command_line_args, unknown_args)<EOL>return <NUM_LIT:0> if result.is_successful(results) else <NUM_LIT:1><EOL>", "docstring": "Run the command\n:return:", "id": "f7363:m5"}
{"signature": "def cleanup(files):", "body": "for cur_file in files:<EOL><INDENT>if os.path.isdir(cur_file):<EOL><INDENT>shutil.rmtree(cur_file)<EOL><DEDENT>else:<EOL><INDENT>shutil.rmtree(os.path.dirname(cur_file))<EOL><DEDENT><DEDENT>", "docstring": ":param files:\n:return:", "id": "f7363:m3"}
{"signature": "def run(command, parser, cl_args, unknown_args):", "body": "configcommand = cl_args.get('<STR_LIT>', None)<EOL>if configcommand == '<STR_LIT>':<EOL><INDENT>return _set(cl_args)<EOL><DEDENT>elif configcommand == '<STR_LIT>':<EOL><INDENT>return _unset(cl_args)<EOL><DEDENT>else:<EOL><INDENT>return _list(cl_args)<EOL><DEDENT>", "docstring": ":param command:\n:param parser:\n:param args:\n:param unknown_args:\n:return:", "id": "f7366:m4"}
{"signature": "def check_direct_mode_cluster_definition(cluster, config_path):", "body": "config_path = config.get_heron_cluster_conf_dir(cluster, config_path)<EOL>if not os.path.isdir(config_path):<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Check the cluster definition for direct mode\n:param cluster:\n:param config_path:\n:return:", "id": "f7367:m1"}
{"signature": "def read_server_mode_cluster_definition(cluster, cl_args):", "body": "client_confs = dict()<EOL>client_confs[cluster] = cliconfig.cluster_config(cluster)<EOL>if cl_args.get('<STR_LIT>', None):<EOL><INDENT>client_confs[cluster]['<STR_LIT>'] = cl_args['<STR_LIT>']<EOL><DEDENT>return client_confs<EOL>", "docstring": "Read the cluster definition for server mode\n:param cluster:\n:param cl_args:\n:param config_file:\n:return:", "id": "f7367:m0"}
{"signature": "def create_parser(subparsers):", "body": "parser = subparsers.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>usage=\"<STR_LIT>\" +\"<STR_LIT>\",<EOL>add_help=True<EOL>)<EOL>cli_args.add_titles(parser)<EOL>cli_args.add_cluster_role_env(parser)<EOL>cli_args.add_topology_file(parser)<EOL>cli_args.add_topology_class(parser)<EOL>cli_args.add_config(parser)<EOL>cli_args.add_deactive_deploy(parser)<EOL>cli_args.add_dry_run(parser)<EOL>cli_args.add_extra_launch_classpath(parser)<EOL>cli_args.add_release_yaml_file(parser)<EOL>cli_args.add_service_url(parser)<EOL>cli_args.add_system_property(parser)<EOL>cli_args.add_verbose(parser)<EOL>parser.set_defaults(subcommand='<STR_LIT>')<EOL>return parser<EOL>", "docstring": "Create a subparser for the submit command\n:param subparsers:\n:return:", "id": "f7369:m1"}
{"signature": "def launch_topology_server(cl_args, topology_file, topology_defn_file, topology_name):", "body": "service_apiurl = cl_args['<STR_LIT>'] + rest.ROUTE_SIGNATURES['<STR_LIT>'][<NUM_LIT:1>]<EOL>service_method = rest.ROUTE_SIGNATURES['<STR_LIT>'][<NUM_LIT:0>]<EOL>data = dict(<EOL>name=topology_name,<EOL>cluster=cl_args['<STR_LIT>'],<EOL>role=cl_args['<STR_LIT>'],<EOL>environment=cl_args['<STR_LIT>'],<EOL>user=cl_args['<STR_LIT>'],<EOL>)<EOL>Log.info(\"<STR_LIT>\" + str(cl_args))<EOL>overrides = dict()<EOL>if '<STR_LIT>' in cl_args:<EOL><INDENT>overrides = config.parse_override_config(cl_args['<STR_LIT>'])<EOL><DEDENT>if overrides:<EOL><INDENT>data.update(overrides)<EOL><DEDENT>if cl_args['<STR_LIT>']:<EOL><INDENT>data[\"<STR_LIT>\"] = True<EOL><DEDENT>files = dict(<EOL>definition=open(topology_defn_file, '<STR_LIT:rb>'),<EOL>topology=open(topology_file, '<STR_LIT:rb>'),<EOL>)<EOL>err_ctxt = \"<STR_LIT>\" % (topology_name, launch_mode_msg(cl_args))<EOL>succ_ctxt = \"<STR_LIT>\" % (topology_name, launch_mode_msg(cl_args))<EOL>try:<EOL><INDENT>r = service_method(service_apiurl, data=data, files=files)<EOL>ok = r.status_code is requests.codes.ok<EOL>created = r.status_code is requests.codes.created<EOL>s = Status.Ok if created or ok else Status.HeronError<EOL>if s is Status.HeronError:<EOL><INDENT>Log.error(r.json().get('<STR_LIT:message>', \"<STR_LIT>\" % r.status_code))<EOL><DEDENT>elif ok:<EOL><INDENT>print(r.json().get(\"<STR_LIT>\"))<EOL><DEDENT><DEDENT>except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as err:<EOL><INDENT>Log.error(err)<EOL>return SimpleResult(Status.HeronError, err_ctxt, succ_ctxt)<EOL><DEDENT>return SimpleResult(s, err_ctxt, succ_ctxt)<EOL>", "docstring": "Launch a topology given topology jar, its definition file and configurations\n:param cl_args:\n:param topology_file:\n:param topology_defn_file:\n:param topology_name:\n:return:", "id": "f7369:m3"}
{"signature": "def launch_topologies(cl_args, topology_file, tmp_dir):", "body": "<EOL>defn_files = glob.glob(tmp_dir + '<STR_LIT>')<EOL>if len(defn_files) == <NUM_LIT:0>:<EOL><INDENT>return SimpleResult(Status.HeronError, \"<STR_LIT>\" % tmp_dir)<EOL><DEDENT>results = []<EOL>for defn_file in defn_files:<EOL><INDENT>topology_defn = topology_pb2.Topology()<EOL>try:<EOL><INDENT>handle = open(defn_file, \"<STR_LIT:rb>\")<EOL>topology_defn.ParseFromString(handle.read())<EOL>handle.close()<EOL><DEDENT>except Exception as e:<EOL><INDENT>err_context = \"<STR_LIT>\" % (defn_file, e)<EOL>return SimpleResult(Status.HeronError, err_context)<EOL><DEDENT>Log.info(\"<STR_LIT>\", topology_defn.name, launch_mode_msg(cl_args))<EOL>if cl_args['<STR_LIT>'] == config.SERVER_MODE:<EOL><INDENT>res = launch_topology_server(<EOL>cl_args, topology_file, defn_file, topology_defn.name)<EOL><DEDENT>else:<EOL><INDENT>res = launch_a_topology(<EOL>cl_args, tmp_dir, topology_file, defn_file, topology_defn.name)<EOL><DEDENT>results.append(res)<EOL><DEDENT>return results<EOL>", "docstring": "Launch topologies\n:param cl_args:\n:param topology_file:\n:param tmp_dir:\n:return: list(Responses)", "id": "f7369:m4"}
{"signature": "def launch_mode_msg(cl_args):", "body": "if cl_args['<STR_LIT>']:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>return \"<STR_LIT>\"<EOL>", "docstring": "Depending on the mode of launching a topology provide a message\n:param cl_args:\n:return:", "id": "f7369:m0"}
{"signature": "def build_extra_args_dict(cl_args):", "body": "<EOL>component_parallelism = cl_args['<STR_LIT>']<EOL>runtime_configs = cl_args['<STR_LIT>']<EOL>container_number = cl_args['<STR_LIT>']<EOL>if (component_parallelism and runtime_configs) or (container_number and runtime_configs):<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\" +<EOL>\"<STR_LIT>\")<EOL><DEDENT>dict_extra_args = {}<EOL>nothing_set = True<EOL>if component_parallelism:<EOL><INDENT>dict_extra_args.update({'<STR_LIT>': component_parallelism})<EOL>nothing_set = False<EOL><DEDENT>if container_number:<EOL><INDENT>dict_extra_args.update({'<STR_LIT>': container_number})<EOL>nothing_set = False<EOL><DEDENT>if runtime_configs:<EOL><INDENT>dict_extra_args.update({'<STR_LIT>': runtime_configs})<EOL>nothing_set = False<EOL><DEDENT>if nothing_set:<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\")<EOL><DEDENT>if cl_args['<STR_LIT>']:<EOL><INDENT>dict_extra_args.update({'<STR_LIT>': True})<EOL>if '<STR_LIT>' in cl_args:<EOL><INDENT>dict_extra_args.update({'<STR_LIT>': cl_args[\"<STR_LIT>\"]})<EOL><DEDENT><DEDENT>return dict_extra_args<EOL>", "docstring": "Build extra args map", "id": "f7371:m1"}
{"signature": "def get_heron_config():", "body": "opt_list = []<EOL>for (key, value) in config_opts.items():<EOL><INDENT>opt_list.append('<STR_LIT>' % (key, value))<EOL><DEDENT>all_opts = ('<STR_LIT:U+002C>'.join(opt_list)).replace('<STR_LIT:U+0020>', '<STR_LIT>')<EOL>return all_opts<EOL>", "docstring": "Get config opts from the global variable\n:return:", "id": "f7372:m0"}
{"signature": "def get_config(k):", "body": "global config_opts<EOL>if k in config_opts:<EOL><INDENT>return config_opts[k]<EOL><DEDENT>return None<EOL>", "docstring": "Get config opts from the config map\n:param k:\n:return:", "id": "f7372:m1"}
{"signature": "def run_direct(command, cl_args, action, extra_args=[], extra_lib_jars=[]):", "body": "topology_name = cl_args['<STR_LIT>']<EOL>new_args = [<EOL>\"<STR_LIT>\", cl_args['<STR_LIT>'],<EOL>\"<STR_LIT>\", cl_args['<STR_LIT>'],<EOL>\"<STR_LIT>\", cl_args['<STR_LIT>'],<EOL>\"<STR_LIT>\", cl_args['<STR_LIT>'],<EOL>\"<STR_LIT>\", config.get_heron_dir(),<EOL>\"<STR_LIT>\", cl_args['<STR_LIT>'],<EOL>\"<STR_LIT>\", cl_args['<STR_LIT>'],<EOL>\"<STR_LIT>\", config.get_heron_release_file(),<EOL>\"<STR_LIT>\", topology_name,<EOL>\"<STR_LIT>\", command,<EOL>]<EOL>new_args += extra_args<EOL>lib_jars = config.get_heron_libs(jars.scheduler_jars() + jars.statemgr_jars())<EOL>lib_jars += extra_lib_jars<EOL>if Log.getEffectiveLevel() == logging.DEBUG:<EOL><INDENT>new_args.append(\"<STR_LIT>\")<EOL><DEDENT>result = execute.heron_class(<EOL>'<STR_LIT>',<EOL>lib_jars,<EOL>extra_jars=[],<EOL>args=new_args<EOL>)<EOL>err_msg = \"<STR_LIT>\" % (action, topology_name)<EOL>succ_msg = \"<STR_LIT>\" % (action, topology_name)<EOL>result.add_context(err_msg, succ_msg)<EOL>return result<EOL>", "docstring": "helper function to take action on topologies\n:param command:\n:param cl_args:\n:param action:        description of action taken\n:return:", "id": "f7373:m3"}
{"signature": "def run(command, parser, args, unknown_args):", "body": "<EOL>command_help = args['<STR_LIT>']<EOL>if command_help == '<STR_LIT>':<EOL><INDENT>parser.print_help()<EOL>return SimpleResult(Status.Ok)<EOL><DEDENT>subparser = config.get_subparser(parser, command_help)<EOL>if subparser:<EOL><INDENT>print(subparser.format_help())<EOL>return SimpleResult(Status.Ok)<EOL><DEDENT>else:<EOL><INDENT>Log.error(\"<STR_LIT>\", command_help)<EOL>return SimpleResult(Status.InvocationError)<EOL><DEDENT>", "docstring": ":param command:\n:param parser:\n:param args:\n:param unknown_args:\n:return:", "id": "f7374:m1"}
{"signature": "def create_parser(command_handlers):", "body": "parser = argparse.ArgumentParser(<EOL>prog='<STR_LIT>',<EOL>epilog=HELP_EPILOG,<EOL>formatter_class=config.SubcommandHelpFormatter,<EOL>add_help=True)<EOL>subparsers = parser.add_subparsers(<EOL>title=\"<STR_LIT>\",<EOL>metavar='<STR_LIT>')<EOL>command_list = sorted(command_handlers.items())<EOL>for command in command_list:<EOL><INDENT>command[<NUM_LIT:1>].create_parser(subparsers)<EOL><DEDENT>return parser<EOL>", "docstring": "Main parser\n:return:", "id": "f7375:m1"}
{"signature": "def execute(handlers, local_commands):", "body": "<EOL>check_environment()<EOL>parser = create_parser(handlers)<EOL>if len(sys.argv[<NUM_LIT:1>:]) == <NUM_LIT:0>:<EOL><INDENT>parser.print_help()<EOL>return <NUM_LIT:0><EOL><DEDENT>sys.argv = config.insert_bool_values(sys.argv)<EOL>try:<EOL><INDENT>args, unknown_args = parser.parse_known_args()<EOL><DEDENT>except ValueError as ex:<EOL><INDENT>Log.error(\"<STR_LIT>\", str(ex))<EOL>Log.debug(traceback.format_exc())<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>command_line_args = vars(args)<EOL>log.set_logging_level(command_line_args)<EOL>Log.debug(\"<STR_LIT>\", command_line_args)<EOL>mmand to be execute<EOL>command = command_line_args['<STR_LIT>']<EOL>is_local_command = command in local_commands<EOL>if command == '<STR_LIT:version>':<EOL><INDENT>results = run(handlers, command, parser, command_line_args, unknown_args)<EOL>return <NUM_LIT:0> if result.is_successful(results) else <NUM_LIT:1><EOL><DEDENT>if not is_local_command:<EOL><INDENT>log.set_logging_level(command_line_args)<EOL>Log.debug(\"<STR_LIT>\", command_line_args)<EOL>command_line_args = extract_common_args(command, parser, command_line_args)<EOL>command_line_args = deployment_mode(command, parser, command_line_args)<EOL>if not command_line_args:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>if command_line_args['<STR_LIT>'] == config.DIRECT_MODE and command != \"<STR_LIT:version>\":<EOL><INDENT>cleaned_up_files.append(command_line_args['<STR_LIT>'])<EOL>atexit.register(cleanup, cleaned_up_files)<EOL><DEDENT><DEDENT>Log.debug(\"<STR_LIT>\", command_line_args)<EOL>start = time.time()<EOL>results = run(handlers, command, parser, command_line_args, unknown_args)<EOL>if not is_local_command:<EOL><INDENT>result.render(results)<EOL><DEDENT>end = time.time()<EOL>if not is_local_command:<EOL><INDENT>sys.stdout.flush()<EOL>Log.debug('<STR_LIT>', (end - start))<EOL><DEDENT>return <NUM_LIT:0> if result.is_successful(results) else <NUM_LIT:1><EOL>", "docstring": "Run the command\n:return:", "id": "f7375:m9"}
{"signature": "def run(command, parser, cl_args, unknown_args):", "body": "Log.debug(\"<STR_LIT>\", cl_args)<EOL>return cli_helper.run(command, cl_args, \"<STR_LIT>\")<EOL>", "docstring": ":param command:\n:param parser:\n:param cl_args:\n:param unknown_args:\n:return:", "id": "f7378:m1"}
{"signature": "def create_parser(subparsers):", "body": "return cli_helper.create_parser(subparsers, '<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": ":param subparsers:\n:return:", "id": "f7378:m0"}
{"signature": "def renderProcessStdOut(self, stdout):", "body": "<EOL>assert self.status is not None<EOL>if self.status == Status.Ok:<EOL><INDENT>self._do_log(Log.info, stdout)<EOL><DEDENT>elif self.status == Status.HeronError:<EOL><INDENT>self._do_log(Log.error, stdout)<EOL><DEDENT>elif self.status == Status.DryRun:<EOL><INDENT>self._do_print(sys.stdout, stdout)<EOL><DEDENT>elif self.status == Status.InvocationError:<EOL><INDENT>self._do_print(sys.stdout, stdout)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\" %(self.status.value, list(Status)))<EOL><DEDENT>", "docstring": "render stdout of shelled-out process\n            stdout always contains information Java process wants to\n            propagate back to cli, so we do special rendering here\n        :param stdout: all lines from shelled-out process\n        :return:", "id": "f7379:c3:m2"}
{"signature": "def renderProcessStdErr(self, stderr_line):", "body": "retcode = self.process.poll()<EOL>if retcode is not None and status_type(retcode) == Status.InvocationError:<EOL><INDENT>self._do_log(Log.error, stderr_line)<EOL><DEDENT>else:<EOL><INDENT>self._do_print(sys.stderr, stderr_line)<EOL><DEDENT>", "docstring": "render stderr of shelled-out process\n            stderr could be error message of failure of invoking process or\n            normal stderr output from successfully shelled-out process.\n            In the first case, ``Popen'' should fail fast and we should be able to\n            get return code immediately. We then render the failure message.\n            In the second case, we simply print stderr line in stderr.\n            The way to handle the first case is shaky but should be the best we can\n            do since we have conflicts of design goals here.\n        :param stderr_line: one line from shelled-out process\n        :return:", "id": "f7379:c3:m1"}
{"signature": "def add_context(self, err_context, succ_context=None):", "body": "self.err_context = err_context<EOL>self.succ_context = succ_context<EOL>", "docstring": "Prepend msg to add some context information\n\n        :param pmsg: context info\n        :return: None", "id": "f7379:c1:m4"}
{"signature": "def add_topology(parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>return parser<EOL>", "docstring": ":param parser:\n:return:", "id": "f7380:m2"}
{"signature": "def add_deactive_deploy(parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=False,<EOL>help='<STR_LIT>')<EOL>return parser<EOL>", "docstring": ":param parser:\n:return:", "id": "f7380:m9"}
{"signature": "def run(command, parser, cl_args, unknown_args):", "body": "Log.debug(\"<STR_LIT>\", cl_args)<EOL>return cli_helper.run(command, cl_args, \"<STR_LIT>\")<EOL>", "docstring": ":param command:\n:param parser:\n:param cl_args:\n:param unknown_args:\n:return:", "id": "f7381:m1"}
{"signature": "def create_parser(subparsers):", "body": "return cli_helper.create_parser(subparsers, '<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": ":param subparsers:\n:return:", "id": "f7381:m0"}
{"signature": "def uploader_jars():", "body": "jars = [<EOL>os.path.join(config.get_heron_lib_dir(), \"<STR_LIT>\", \"<STR_LIT:*>\")<EOL>]<EOL>return jars<EOL>", "docstring": "Get the uploader jars\n:return:", "id": "f7382:m3"}
{"signature": "def statemgr_jars():", "body": "jars = [<EOL>os.path.join(config.get_heron_lib_dir(), \"<STR_LIT>\", \"<STR_LIT:*>\")<EOL>]<EOL>return jars<EOL>", "docstring": "Get the statemgr jars\n:return:", "id": "f7382:m4"}
{"signature": "def scheduler_jars():", "body": "jars = [<EOL>os.path.join(config.get_heron_lib_dir(), \"<STR_LIT>\", \"<STR_LIT:*>\")<EOL>]<EOL>return jars<EOL>", "docstring": "Get the scheduler jars\n:return:", "id": "f7382:m2"}
{"signature": "def run(command, parser, cl_args, unknown_args):", "body": "Log.debug(\"<STR_LIT>\", cl_args)<EOL>return cli_helper.run(command, cl_args, \"<STR_LIT>\")<EOL>", "docstring": ":param command:\n:param parser:\n:param cl_args:\n:param unknown_args:\n:return:", "id": "f7383:m1"}
{"signature": "def run(command, parser, cl_args, unknown_args):", "body": "Log.debug(\"<STR_LIT>\", cl_args)<EOL>container_id = cl_args['<STR_LIT>']<EOL>if cl_args['<STR_LIT>'] == config.SERVER_MODE:<EOL><INDENT>dict_extra_args = {\"<STR_LIT>\": str(container_id)}<EOL>return cli_helper.run_server(command, cl_args, \"<STR_LIT>\", extra_args=dict_extra_args)<EOL><DEDENT>else:<EOL><INDENT>list_extra_args = [\"<STR_LIT>\", str(container_id)]<EOL>return cli_helper.run_direct(command, cl_args, \"<STR_LIT>\", extra_args=list_extra_args)<EOL><DEDENT>", "docstring": ":param command:\n:param parser:\n:param cl_args:\n:param unknown_args:\n:return:", "id": "f7384:m1"}
{"signature": "def create_parser(subparsers):", "body": "parser = subparsers.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>usage=\"<STR_LIT>\",<EOL>add_help=True)<EOL>args.add_titles(parser)<EOL>args.add_cluster_role_env(parser)<EOL>args.add_topology(parser)<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:?>',<EOL>type=int,<EOL>default=-<NUM_LIT:1>,<EOL>help='<STR_LIT>')<EOL>args.add_config(parser)<EOL>args.add_service_url(parser)<EOL>args.add_verbose(parser)<EOL>parser.set_defaults(subcommand='<STR_LIT>')<EOL>return parser<EOL>", "docstring": ":param subparsers:\n:return:", "id": "f7384:m0"}
{"signature": "def run(command, parser, known_args, unknown_args):", "body": "config.print_build_info()<EOL>return True<EOL>", "docstring": "run command", "id": "f7386:m1"}
{"signature": "def create_parser(subparsers):", "body": "parser = subparsers.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>usage=\"<STR_LIT>\",<EOL>add_help=True)<EOL>args.add_cluster_role_env(parser)<EOL>args.add_verbose(parser)<EOL>args.add_tracker_url(parser)<EOL>args.add_config(parser)<EOL>parser.set_defaults(subcommand='<STR_LIT>')<EOL>return subparsers<EOL>", "docstring": "create parser", "id": "f7387:m0"}
{"signature": "def show_cluster(cl_args, cluster):", "body": "try:<EOL><INDENT>result = tracker_access.get_cluster_topologies(cluster)<EOL>if not result:<EOL><INDENT>Log.error('<STR_LIT>' % cluster)<EOL>return False<EOL><DEDENT>result = result[cluster]<EOL><DEDENT>except Exception:<EOL><INDENT>Log.error(\"<STR_LIT>\", cl_args[\"<STR_LIT>\"])<EOL>return False<EOL><DEDENT>table, header, rest_count = to_table(result)<EOL>print('<STR_LIT>' % cluster)<EOL>if rest_count:<EOL><INDENT>print('<STR_LIT>' % rest_count)<EOL><DEDENT>print(tabulate(table, headers=header))<EOL>return True<EOL>", "docstring": "print topologies information to stdout", "id": "f7387:m2"}
{"signature": "def show_cluster_role_env(cl_args, cluster, role, env):", "body": "try:<EOL><INDENT>result = tracker_access.get_cluster_role_env_topologies(cluster, role, env)<EOL>if not result:<EOL><INDENT>Log.error('<STR_LIT>' % '<STR_LIT:/>'.join([cluster, role, env]))<EOL>return False<EOL><DEDENT>result = result[cluster]<EOL><DEDENT>except Exception:<EOL><INDENT>Log.error(\"<STR_LIT>\", cl_args[\"<STR_LIT>\"])<EOL>return False<EOL><DEDENT>table, header, rest_count = to_table(result)<EOL>print('<STR_LIT>' % (cluster, role, env))<EOL>if rest_count:<EOL><INDENT>print('<STR_LIT>' % rest_count)<EOL><DEDENT>print(tabulate(table, headers=header))<EOL>return True<EOL>", "docstring": "print topologies information to stdout", "id": "f7387:m4"}
{"signature": "def to_table(result):", "body": "max_count = <NUM_LIT:20><EOL>table, count = [], <NUM_LIT:0><EOL>for role, envs_topos in result.items():<EOL><INDENT>for env, topos in envs_topos.items():<EOL><INDENT>for topo in topos:<EOL><INDENT>count += <NUM_LIT:1><EOL>if count > max_count:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>table.append([role, env, topo])<EOL><DEDENT><DEDENT><DEDENT><DEDENT>header = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>rest_count = <NUM_LIT:0> if count <= max_count else count - max_count<EOL>return table, header, rest_count<EOL>", "docstring": "normalize raw result to table", "id": "f7387:m1"}
{"signature": "def show_cluster_role(cl_args, cluster, role):", "body": "try:<EOL><INDENT>result = tracker_access.get_cluster_role_topologies(cluster, role)<EOL>if not result:<EOL><INDENT>Log.error('<STR_LIT>' % '<STR_LIT:/>'.join([cluster, role]))<EOL>return False<EOL><DEDENT>result = result[cluster]<EOL><DEDENT>except Exception:<EOL><INDENT>Log.error(\"<STR_LIT>\", cl_args[\"<STR_LIT>\"])<EOL>return False<EOL><DEDENT>table, header, rest_count = to_table(result)<EOL>print('<STR_LIT>' % (cluster, role))<EOL>if rest_count:<EOL><INDENT>print('<STR_LIT>' % rest_count)<EOL><DEDENT>print(tabulate(table, headers=header))<EOL>return True<EOL>", "docstring": "print topologies information to stdout", "id": "f7387:m3"}
{"signature": "def run(command, parser, cl_args, unknown_args):", "body": "location = cl_args['<STR_LIT>'].split('<STR_LIT:/>')<EOL>if len(location) == <NUM_LIT:1>:<EOL><INDENT>return show_cluster(cl_args, *location)<EOL><DEDENT>elif len(location) == <NUM_LIT:2>:<EOL><INDENT>return show_cluster_role(cl_args, *location)<EOL><DEDENT>elif len(location) == <NUM_LIT:3>:<EOL><INDENT>return show_cluster_role_env(cl_args, *location)<EOL><DEDENT>else:<EOL><INDENT>Log.error('<STR_LIT>')<EOL>return False<EOL><DEDENT>", "docstring": "run command", "id": "f7387:m5"}
{"signature": "def parse_topo_loc(cl_args):", "body": "try:<EOL><INDENT>topo_loc = cl_args['<STR_LIT>'].split('<STR_LIT:/>')<EOL>topo_loc.append(cl_args['<STR_LIT>'])<EOL>if len(topo_loc) != <NUM_LIT:4>:<EOL><INDENT>raise<EOL><DEDENT>return topo_loc<EOL><DEDENT>except Exception:<EOL><INDENT>Log.error('<STR_LIT>')<EOL>raise<EOL><DEDENT>", "docstring": "parse topology location", "id": "f7388:m1"}
{"signature": "def run_components(command, parser, cl_args, unknown_args):", "body": "return run(cl_args, '<STR_LIT:all>')<EOL>", "docstring": "run components command", "id": "f7388:m6"}
{"signature": "def to_table(components, topo_info):", "body": "inputs, outputs = defaultdict(list), defaultdict(list)<EOL>for ctype, component in components.items():<EOL><INDENT>if ctype == '<STR_LIT>':<EOL><INDENT>for component_name, component_info in component.items():<EOL><INDENT>for input_stream in component_info['<STR_LIT>']:<EOL><INDENT>input_name = input_stream['<STR_LIT>']<EOL>inputs[component_name].append(input_name)<EOL>outputs[input_name].append(component_name)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>info = []<EOL>spouts_instance = topo_info['<STR_LIT>']['<STR_LIT>']<EOL>bolts_instance = topo_info['<STR_LIT>']['<STR_LIT>']<EOL>for ctype, component in components.items():<EOL><INDENT>if ctype == \"<STR_LIT>\":<EOL><INDENT>continue<EOL><DEDENT>for component_name, component_info in component.items():<EOL><INDENT>row = [ctype[:-<NUM_LIT:1>], component_name]<EOL>if ctype == '<STR_LIT>':<EOL><INDENT>row.append(len(spouts_instance[component_name]))<EOL><DEDENT>else:<EOL><INDENT>row.append(len(bolts_instance[component_name]))<EOL><DEDENT>row.append('<STR_LIT:U+002C>'.join(inputs.get(component_name, ['<STR_LIT:->'])))<EOL>row.append('<STR_LIT:U+002C>'.join(outputs.get(component_name, ['<STR_LIT:->'])))<EOL>info.append(row)<EOL><DEDENT><DEDENT>header = ['<STR_LIT:type>', '<STR_LIT:name>', '<STR_LIT>', '<STR_LIT:input>', '<STR_LIT>']<EOL>return info, header<EOL>", "docstring": "normalize raw logical plan info to table", "id": "f7388:m2"}
{"signature": "def create_parser(subparsers):", "body": "metrics_parser = subparsers.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>usage=\"<STR_LIT>\",<EOL>add_help=False)<EOL>args.add_cluster_role_env(metrics_parser)<EOL>args.add_topology_name(metrics_parser)<EOL>args.add_verbose(metrics_parser)<EOL>args.add_tracker_url(metrics_parser)<EOL>args.add_config(metrics_parser)<EOL>args.add_component_name(metrics_parser)<EOL>metrics_parser.set_defaults(subcommand='<STR_LIT>')<EOL>containers_parser = subparsers.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>usage=\"<STR_LIT>\",<EOL>add_help=False)<EOL>args.add_cluster_role_env(containers_parser)<EOL>args.add_topology_name(containers_parser)<EOL>args.add_verbose(containers_parser)<EOL>args.add_tracker_url(containers_parser)<EOL>args.add_config(containers_parser)<EOL>args.add_container_id(containers_parser)<EOL>containers_parser.set_defaults(subcommand='<STR_LIT>')<EOL>return subparsers<EOL>", "docstring": "create parser", "id": "f7390:m0"}
{"signature": "def run_containers(command, parser, cl_args, unknown_args):", "body": "cluster, role, env = cl_args['<STR_LIT>'], cl_args['<STR_LIT>'], cl_args['<STR_LIT>']<EOL>topology = cl_args['<STR_LIT>']<EOL>container_id = cl_args['<STR_LIT:id>']<EOL>try:<EOL><INDENT>result = tracker_access.get_topology_info(cluster, env, topology, role)<EOL><DEDENT>except:<EOL><INDENT>Log.error(\"<STR_LIT>\", cl_args[\"<STR_LIT>\"])<EOL>return False<EOL><DEDENT>containers = result['<STR_LIT>']['<STR_LIT>']<EOL>all_bolts, all_spouts = set(), set()<EOL>for _, bolts in result['<STR_LIT>']['<STR_LIT>'].items():<EOL><INDENT>all_bolts = all_bolts | set(bolts)<EOL><DEDENT>for _, spouts in result['<STR_LIT>']['<STR_LIT>'].items():<EOL><INDENT>all_spouts = all_spouts | set(spouts)<EOL><DEDENT>stmgrs = containers.keys()<EOL>stmgrs.sort()<EOL>if container_id is not None:<EOL><INDENT>try:<EOL><INDENT>normalized_cid = container_id - <NUM_LIT:1><EOL>if normalized_cid < <NUM_LIT:0>:<EOL><INDENT>raise<EOL><DEDENT>stmgrs = [stmgrs[normalized_cid]]<EOL><DEDENT>except:<EOL><INDENT>Log.error('<STR_LIT>' % container_id)<EOL>return False<EOL><DEDENT><DEDENT>table = []<EOL>for sid, name in enumerate(stmgrs):<EOL><INDENT>cid = sid + <NUM_LIT:1><EOL>host = containers[name][\"<STR_LIT:host>\"]<EOL>port = containers[name][\"<STR_LIT:port>\"]<EOL>pid = containers[name][\"<STR_LIT>\"]<EOL>instances = containers[name][\"<STR_LIT>\"]<EOL>bolt_nums = len([instance for instance in instances if instance in all_bolts])<EOL>spout_nums = len([instance for instance in instances if instance in all_spouts])<EOL>table.append([cid, host, port, pid, bolt_nums, spout_nums, len(instances)])<EOL><DEDENT>headers = [\"<STR_LIT>\", \"<STR_LIT:host>\", \"<STR_LIT:port>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>sys.stdout.flush()<EOL>print(tabulate(table, headers=headers))<EOL>return True<EOL>", "docstring": "run containers subcommand", "id": "f7390:m5"}
{"signature": "def parse_topo_loc(cl_args):", "body": "try:<EOL><INDENT>topo_loc = cl_args['<STR_LIT>'].split('<STR_LIT:/>')<EOL>topo_name = cl_args['<STR_LIT>']<EOL>topo_loc.append(topo_name)<EOL>if len(topo_loc) != <NUM_LIT:4>:<EOL><INDENT>raise<EOL><DEDENT>return topo_loc<EOL><DEDENT>except Exception:<EOL><INDENT>Log.error('<STR_LIT>')<EOL>raise<EOL><DEDENT>", "docstring": "parse topology location", "id": "f7390:m1"}
{"signature": "def create_parser():", "body": "help_epilog = '''<STR_LIT>'''<EOL>parser = argparse.ArgumentParser(<EOL>prog='<STR_LIT>',<EOL>epilog=help_epilog,<EOL>formatter_class=SubcommandHelpFormatter,<EOL>add_help=False)<EOL>subparsers = parser.add_subparsers(<EOL>title=\"<STR_LIT>\",<EOL>metavar='<STR_LIT>')<EOL>clusters.create_parser(subparsers)<EOL>logicalplan.create_parser(subparsers)<EOL>physicalplan.create_parser(subparsers)<EOL>topologies.create_parser(subparsers)<EOL>help.create_parser(subparsers)<EOL>version.create_parser(subparsers)<EOL>return parser<EOL>", "docstring": "create parser", "id": "f7392:m0"}
{"signature": "def run(command, *args):", "body": "<EOL>if command == '<STR_LIT>':<EOL><INDENT>return clusters.run(command, *args)<EOL><DEDENT>elif command == '<STR_LIT>':<EOL><INDENT>return topologies.run(command, *args)<EOL><DEDENT>elif command == '<STR_LIT>':<EOL><INDENT>return physicalplan.run_containers(command, *args)<EOL><DEDENT>elif command == '<STR_LIT>':<EOL><INDENT>return physicalplan.run_metrics(command, *args)<EOL><DEDENT>elif command == '<STR_LIT>':<EOL><INDENT>return logicalplan.run_components(command, *args)<EOL><DEDENT>elif command == '<STR_LIT>':<EOL><INDENT>return logicalplan.run_spouts(command, *args)<EOL><DEDENT>elif command == '<STR_LIT>':<EOL><INDENT>return logicalplan.run_bolts(command, *args)<EOL><DEDENT>elif command == '<STR_LIT>':<EOL><INDENT>return help.run(command, *args)<EOL><DEDENT>elif command == '<STR_LIT:version>':<EOL><INDENT>return version.run(command, *args)<EOL><DEDENT>return <NUM_LIT:1><EOL>", "docstring": "run command", "id": "f7392:m1"}
{"signature": "def extract_common_args(command, parser, cl_args):", "body": "try:<EOL><INDENT>cluster_role_env = cl_args['<STR_LIT>']<EOL>config_path = cl_args['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>subparser = config.get_subparser(parser, command)<EOL>print(subparser.format_help())<EOL>return dict()<EOL><DEDENT>cluster = config.get_heron_cluster(cluster_role_env)<EOL>config_path = config.get_heron_cluster_conf_dir(cluster, config_path)<EOL>new_cl_args = dict()<EOL>try:<EOL><INDENT>cluster_tuple = config.parse_cluster_role_env(cluster_role_env, config_path)<EOL>new_cl_args['<STR_LIT>'] = cluster_tuple[<NUM_LIT:0>]<EOL>new_cl_args['<STR_LIT>'] = cluster_tuple[<NUM_LIT:1>]<EOL>new_cl_args['<STR_LIT>'] = cluster_tuple[<NUM_LIT:2>]<EOL>new_cl_args['<STR_LIT>'] = config_path<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.error(\"<STR_LIT>\", str(e))<EOL>return dict()<EOL><DEDENT>cl_args.update(new_cl_args)<EOL>return cl_args<EOL>", "docstring": "extract common args", "id": "f7392:m2"}
{"signature": "def run(command, parser, cl_args, unknown_args):", "body": "try:<EOL><INDENT>clusters = tracker_access.get_clusters()<EOL><DEDENT>except:<EOL><INDENT>Log.error(\"<STR_LIT>\", cl_args[\"<STR_LIT>\"])<EOL>return False<EOL><DEDENT>print('<STR_LIT>')<EOL>for cluster in clusters:<EOL><INDENT>print('<STR_LIT>' % cluster)<EOL><DEDENT>return True<EOL>", "docstring": "run command", "id": "f7393:m1"}
{"signature": "def add_topology_name(parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>'<EOL>)<EOL>return parser<EOL>", "docstring": "add argument that specifies topology name", "id": "f7394:m11"}
{"signature": "def add_component_name(parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>type=str)<EOL>return parser<EOL>", "docstring": "add optional argument that specifies component name", "id": "f7394:m7"}
{"signature": "def add_spouts(parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>', help='<STR_LIT>', action='<STR_LIT:store_true>')<EOL>return parser<EOL>", "docstring": "add optional argument that displays spout only", "id": "f7394:m8"}
{"signature": "def add_cluster_role_env(parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>', help='<STR_LIT>', type=str,<EOL>metavar='<STR_LIT>')<EOL>return parser<EOL>", "docstring": "add argument that specifies topologies location", "id": "f7394:m10"}
{"signature": "def add_titles(parser):", "body": "parser._positionals.title = \"<STR_LIT>\"<EOL>parser._optionals.title = \"<STR_LIT>\"<EOL>return parser<EOL>", "docstring": "add titles", "id": "f7394:m1"}
{"signature": "def add_tracker_url(parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>' + DEFAULT_TRACKER_URL + '<STR_LIT>',<EOL>type=str, default=DEFAULT_TRACKER_URL)<EOL>return parser<EOL>", "docstring": "add optional tracker_url argument", "id": "f7394:m5"}
{"signature": "def add_verbose(parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>type=bool,<EOL>default=False)<EOL>return parser<EOL>", "docstring": "add optional verbose argument", "id": "f7394:m4"}
{"signature": "def insert_bool_values(command_line_args):", "body": "args1 = insert_bool('<STR_LIT>', command_line_args)<EOL>return args1<EOL>", "docstring": "insert boolean values", "id": "f7394:m3"}
{"signature": "def on_connection_close(self):", "body": "<EOL>self.connection_closed = True<EOL>", "docstring": ":return:", "id": "f7395:c7:m2"}
{"signature": "def get(self, cluster, environ, topology, comp_name, instance):", "body": "<EOL>options = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>comp_name=comp_name,<EOL>instance=instance,<EOL>active=\"<STR_LIT>\",<EOL>function=common.className,<EOL>baseUrl=self.baseUrl)<EOL>self.render(\"<STR_LIT>\", **options)<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param comp_name:\n:param instance:\n:return:", "id": "f7395:c1:m1"}
{"signature": "def get(self, cluster, environ, topology):", "body": "<EOL>options = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>active=\"<STR_LIT>\",<EOL>function=common.className,<EOL>baseUrl=self.baseUrl)<EOL>self.render(\"<STR_LIT>\", **options)<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:return:", "id": "f7395:c0:m1"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self, cluster, environ, topology):<DEDENT>", "body": "<EOL>execution_state = yield access.get_execution_state(cluster, environ, topology)<EOL>scheduler_location = yield access.get_scheduler_location(cluster, environ, topology)<EOL>job_page_link = scheduler_location[\"<STR_LIT>\"]<EOL>launched_at = datetime.utcfromtimestamp(execution_state['<STR_LIT>'])<EOL>launched_time = launched_at.strftime('<STR_LIT>')<EOL>options = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>execution_state=execution_state,<EOL>launched=launched_time,<EOL>status=\"<STR_LIT>\" if random.randint(<NUM_LIT:0>, <NUM_LIT:1>) else \"<STR_LIT>\",<EOL>active=\"<STR_LIT>\",<EOL>job_page_link=job_page_link,<EOL>function=common.className,<EOL>baseUrl=self.baseUrl<EOL>)<EOL>self.render(\"<STR_LIT>\", **options)<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:return:", "id": "f7395:c3:m1"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self, cluster, environ, topology, container):<DEDENT>", "body": "offset = self.get_argument(\"<STR_LIT>\")<EOL>length = self.get_argument(\"<STR_LIT>\")<EOL>path = self.get_argument(\"<STR_LIT:path>\")<EOL>data = yield access.get_container_file_data(cluster, environ, topology, container, path,<EOL>offset, length)<EOL>self.write(data)<EOL>self.finish()<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param container:\n:return:", "id": "f7395:c5:m1"}
{"signature": "def get(self):", "body": "self.redirect(u\"<STR_LIT>\")<EOL>", "docstring": ":return:", "id": "f7398:c0:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self, cluster, environ, topology, instance):<DEDENT>", "body": "pplan = yield access.get_physical_plan(cluster, environ, topology)<EOL>host = pplan['<STR_LIT>'][pplan['<STR_LIT>'][instance]['<STR_LIT>']]['<STR_LIT:host>']<EOL>result = json.loads((yield access.get_instance_pid(<EOL>cluster, environ, topology, instance)))<EOL>self.write('<STR_LIT>' % (<EOL>host,<EOL>tornado.escape.xhtml_escape(result['<STR_LIT>']),<EOL>tornado.escape.xhtml_escape(result['<STR_LIT>'])))<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param instance:\n:return:", "id": "f7399:c7:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "<EOL>topologies = yield access.get_topologies_states()<EOL>result = dict()<EOL>for cluster, cluster_value in topologies.items():<EOL><INDENT>result[cluster] = dict()<EOL>for environ, environ_value in cluster_value.items():<EOL><INDENT>result[cluster][environ] = dict()<EOL>for topology, topology_value in environ_value.items():<EOL><INDENT>if \"<STR_LIT>\" not in topology_value or topology_value[\"<STR_LIT>\"] is None:<EOL><INDENT>continue<EOL><DEDENT>if \"<STR_LIT>\" in topology_value:<EOL><INDENT>topology_value[\"<STR_LIT>\"] = topology_value[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>topology_value[\"<STR_LIT>\"] = '<STR_LIT:->'<EOL><DEDENT>result[cluster][environ][topology] = topology_value<EOL><DEDENT><DEDENT><DEDENT>self.write(result)<EOL>", "docstring": ":return:", "id": "f7399:c1:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self, cluster, environ, topology, instance):<DEDENT>", "body": "pplan = yield access.get_physical_plan(cluster, environ, topology)<EOL>host = pplan['<STR_LIT>'][pplan['<STR_LIT>'][instance]['<STR_LIT>']]['<STR_LIT:host>']<EOL>result = json.loads((yield access.run_instance_jmap(<EOL>cluster, environ, topology, instance)))<EOL>notes = \"<STR_LIT>\".join([<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % host<EOL>])<EOL>self.write('<STR_LIT>' % (<EOL>notes,<EOL>host,<EOL>tornado.escape.xhtml_escape(result['<STR_LIT>']),<EOL>tornado.escape.xhtml_escape(result['<STR_LIT>'])))<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param instance:\n:return:", "id": "f7399:c10:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self, cluster, environ, topology):<DEDENT>", "body": "start_time = time.time()<EOL>estate = yield access.get_execution_state(cluster, environ, topology)<EOL>result_map = dict(<EOL>status=\"<STR_LIT:success>\",<EOL>message=\"<STR_LIT>\",<EOL>version=common.VERSION,<EOL>executiontime=time.time() - start_time,<EOL>result=estate<EOL>)<EOL>self.write(result_map)<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:return:", "id": "f7399:c5:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self, cluster, environ, topology, instance):<DEDENT>", "body": "pplan = yield access.get_physical_plan(cluster, environ, topology)<EOL>host = pplan['<STR_LIT>'][pplan['<STR_LIT>'][instance]['<STR_LIT>']]['<STR_LIT:host>']<EOL>result = json.loads((yield access.get_instance_jstack(<EOL>cluster, environ, topology, instance)))<EOL>self.write('<STR_LIT>' % (<EOL>host,<EOL>tornado.escape.xhtml_escape(result['<STR_LIT>']),<EOL>tornado.escape.xhtml_escape(result['<STR_LIT>'])))<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param instance:\n:return:", "id": "f7399:c8:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self, cluster, environ, topology):<DEDENT>", "body": "start_time = time.time()<EOL>lplan = yield access.get_logical_plan(cluster, environ, topology)<EOL>result = dict(<EOL>status=\"<STR_LIT:success>\",<EOL>message=\"<STR_LIT>\",<EOL>version=common.VERSION,<EOL>executiontime=time.time() - start_time,<EOL>result=lplan<EOL>)<EOL>self.write(result)<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:return:", "id": "f7399:c2:m0"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "cluster = self.get_argument(\"<STR_LIT>\")<EOL>environ = self.get_argument(\"<STR_LIT>\")<EOL>topology = self.get_argument(\"<STR_LIT>\")<EOL>component = self.get_argument(\"<STR_LIT>\", default=None)<EOL>metricnames = self.get_arguments(\"<STR_LIT>\")<EOL>instances = self.get_arguments(\"<STR_LIT>\")<EOL>interval = self.get_argument(\"<STR_LIT>\", default=-<NUM_LIT:1>)<EOL>time_range = (<NUM_LIT:0>, interval)<EOL>compnames = [component] if component else (yield access.get_comps(cluster, environ, topology))<EOL>futures = {}<EOL>for comp in compnames:<EOL><INDENT>future = access.get_comp_metrics(<EOL>cluster, environ, topology, comp, instances,<EOL>metricnames, time_range)<EOL>futures[comp] = future<EOL><DEDENT>results = yield futures<EOL>self.write(results[component] if component else results)<EOL>", "docstring": ":return:", "id": "f7400:c0:m0"}
{"signature": "def get_time_ranges(ranges):", "body": "<EOL>now = int(time.time())<EOL>time_slots = dict()<EOL>for key, value in ranges.items():<EOL><INDENT>time_slots[key] = (now - value[<NUM_LIT:0>], now - value[<NUM_LIT:1>], value[<NUM_LIT:2>])<EOL><DEDENT>return (now, time_slots)<EOL>", "docstring": ":param ranges:\n:return:", "id": "f7403:m0"}
{"signature": "def create_parsers():", "body": "parser = argparse.ArgumentParser(<EOL>epilog='<STR_LIT>',<EOL>usage=\"<STR_LIT>\",<EOL>add_help=False)<EOL>parser = add_titles(parser)<EOL>parser = add_arguments(parser)<EOL>child_parser = argparse.ArgumentParser(<EOL>parents=[parser],<EOL>formatter_class=SubcommandHelpFormatter,<EOL>add_help=False)<EOL>subparsers = child_parser.add_subparsers(<EOL>title=\"<STR_LIT>\")<EOL>help_parser = subparsers.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>add_help=False)<EOL>version_parser = subparsers.add_parser(<EOL>'<STR_LIT:version>',<EOL>help='<STR_LIT>',<EOL>add_help=True)<EOL>help_parser.set_defaults(help=True)<EOL>version_parser.set_defaults(version=True)<EOL>return (parser, child_parser)<EOL>", "docstring": ":return:", "id": "f7408:m2"}
{"signature": "def add_arguments(parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>' + consts.DEFAULT_TRACKER_URL + '<STR_LIT>',<EOL>default=consts.DEFAULT_TRACKER_URL)<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>' + consts.DEFAULT_ADDRESS + '<STR_LIT>',<EOL>default=consts.DEFAULT_ADDRESS)<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>' + str(consts.DEFAULT_PORT) + '<STR_LIT:)>',<EOL>type=int,<EOL>default=consts.DEFAULT_PORT)<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>metavar='<STR_LIT>'<EOL>+ str(consts.DEFAULT_BASE_URL) + '<STR_LIT:)>',<EOL>default=consts.DEFAULT_BASE_URL)<EOL>return parser<EOL>", "docstring": ":param parser:\n:return:", "id": "f7408:m1"}
{"signature": "def get_cluster_role_topologies(cluster, role):", "body": "instance = tornado.ioloop.IOLoop.instance()<EOL>try:<EOL><INDENT>return instance.run_sync(lambda: API.get_cluster_role_topologies(cluster, role))<EOL><DEDENT>except Exception:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>raise<EOL><DEDENT>", "docstring": "Synced API call to get topologies under a cluster submitted by a role", "id": "f7410:m9"}
{"signature": "def queries_map():", "body": "qs = _all_metric_queries()<EOL>return dict(zip(qs[<NUM_LIT:0>], qs[<NUM_LIT:1>]) + zip(qs[<NUM_LIT:2>], qs[<NUM_LIT:3>]))<EOL>", "docstring": "map from query parameter to query name", "id": "f7410:m2"}
{"signature": "def get_cluster_topologies(cluster):", "body": "instance = tornado.ioloop.IOLoop.instance()<EOL>try:<EOL><INDENT>return instance.run_sync(lambda: API.get_cluster_topologies(cluster))<EOL><DEDENT>except Exception:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>raise<EOL><DEDENT>", "docstring": "Synced API call to get topologies under a cluster", "id": "f7410:m8"}
{"signature": "def get_cluster_role_env_topologies(cluster, role, env):", "body": "instance = tornado.ioloop.IOLoop.instance()<EOL>try:<EOL><INDENT>return instance.run_sync(lambda: API.get_cluster_role_env_topologies(cluster, role, env))<EOL><DEDENT>except Exception:<EOL><INDENT>Log.debug(traceback.format_exc())<EOL>raise<EOL><DEDENT>", "docstring": "Synced API call to get topologies under a cluster submitted by a role under env", "id": "f7410:m10"}
{"signature": "def fetch(self, cluster, metric, topology, component, instance, timerange, envirn=None):", "body": "pass<EOL>", "docstring": ":param cluster:\n:param metric:\n:param topology:\n:param component:\n:param instance:\n:param timerange:\n:param envirn:\n:return:", "id": "f7411:c0:m0"}
{"signature": "def fetch_max(self, cluster, metric, topology, component, instance, timerange, envirn=None):", "body": "pass<EOL>", "docstring": ":param cluster:\n:param metric:\n:param topology:\n:param component:\n:param instance:\n:param timerange:\n:param envirn:\n:return:", "id": "f7411:c0:m1"}
{"signature": "@tornado.gen.coroutine<EOL>def get_metrics(cluster, environment, topology, timerange, query, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environment,<EOL>topology=topology,<EOL>starttime=timerange[<NUM_LIT:0>],<EOL>endtime=timerange[<NUM_LIT:1>],<EOL>query=query)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>create_url(METRICS_QUERY_URL_FMT), params<EOL>)<EOL>logging.info(\"<STR_LIT>\", request_url)<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": "Get the metrics for a topology from tracker\n:param cluster:\n:param environment:\n:param topology:\n:param timerange:\n:param query:\n:param role:\n:return:", "id": "f7414:m19"}
{"signature": "@tornado.gen.coroutine<EOL>def get_component_exceptionsummary(cluster, environ, topology, component, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>component=component)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>create_url(EXCEPTION_SUMMARY_URL_FMT), params)<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": "Get summary of exception for a component\n:param cluster:\n:param environ:\n:param topology:\n:param component:\n:param role:\n:return:", "id": "f7414:m15"}
{"signature": "def get_container_file_download_url(cluster, environ, topology, container,<EOL>path, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>container=container,<EOL>path=path)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>create_url(FILE_DOWNLOAD_URL_FMT), params)<EOL>if role is not None:<EOL><INDENT>request_url = tornado.httputil.url_concat(request_url, dict(role=role))<EOL><DEDENT>return request_url<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param container:\n:param path:\n:param role:\n:return:", "id": "f7414:m26"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def fetch_backpressure(self, cluster, metric, topology, component, instance,timerange, is_max, environ=None):<DEDENT>", "body": "instances = yield get_instances(cluster, environ, topology)<EOL>if component != \"<STR_LIT:*>\":<EOL><INDENT>filtered_inst = [instance for instance in instances if instance.split(\"<STR_LIT:_>\")[<NUM_LIT:2>] == component]<EOL><DEDENT>else:<EOL><INDENT>filtered_inst = instances<EOL><DEDENT>futures_dict = {}<EOL>for inst in filtered_inst:<EOL><INDENT>query = queries.get(metric).format(inst)<EOL>futures_dict[inst] = get_metrics(cluster, environ, topology, timerange, query)<EOL><DEDENT>res = yield futures_dict<EOL>if not is_max:<EOL><INDENT>timelines = []<EOL>for key in res:<EOL><INDENT>result = res[key]<EOL>if len(result[\"<STR_LIT>\"]) > <NUM_LIT:0>:<EOL><INDENT>result[\"<STR_LIT>\"][<NUM_LIT:0>][\"<STR_LIT>\"] = key<EOL><DEDENT>timelines.extend(result[\"<STR_LIT>\"])<EOL><DEDENT>result = self.get_metric_response(timerange, timelines, is_max)<EOL><DEDENT>else:<EOL><INDENT>data = self.compute_max(res.values())<EOL>result = self.get_metric_response(timerange, data, is_max)<EOL><DEDENT>raise tornado.gen.Return(result)<EOL>", "docstring": ":param cluster:\n:param metric:\n:param topology:\n:param component:\n:param instance:\n:param timerange:\n:param isMax:\n:param environ:\n:return:", "id": "f7414:c0:m2"}
{"signature": "@tornado.gen.coroutine<EOL>def get_comps(cluster, environ, topology, role=None):", "body": "params = dict(cluster=cluster, environ=environ, topology=topology)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>create_url(LOGICALPLAN_URL_FMT), params)<EOL>lplan = yield fetch_url_as_json(request_url)<EOL>comps = lplan['<STR_LIT>'].keys() + lplan['<STR_LIT>'].keys()<EOL>raise tornado.gen.Return(comps)<EOL>", "docstring": "Get the list of component names for the topology from Heron Nest\n:param cluster:\n:param environ:\n:param topology:\n:param role:\n:return:", "id": "f7414:m11"}
{"signature": "@tornado.gen.coroutine<EOL>def get_comp_metrics_timeline(cluster, environ, topology, component,<EOL>instances, metricnames, time_range, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>component=component)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(create_url(METRICS_TIMELINE_URL_FMT), params)<EOL>if role is not None:<EOL><INDENT>request_url = tornado.httputil.url_concat(request_url, dict(role=role))<EOL><DEDENT>for metric_name in metricnames:<EOL><INDENT>request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name))<EOL><DEDENT>for instance in instances:<EOL><INDENT>request_url = tornado.httputil.url_concat(request_url, dict(instance=instance))<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>request_url, dict(starttime=time_range[<NUM_LIT:0>], endtime=time_range[<NUM_LIT:1>]))<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": "Get the minute-by-minute metrics for all instances of a topology from tracker\n:param cluster:\n:param environ:\n:param topology:\n:param component:\n:param instances:\n:param metricnames:   dict of display name to cuckoo name\n:param time_range:    2-tuple consisting of start and end of range\n:param role:\n:return:", "id": "f7414:m20"}
{"signature": "@tornado.gen.coroutine<EOL>def get_comp_metrics(cluster, environ, topology, component,<EOL>instances, metricnames, time_range, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>component=component)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>create_url(METRICS_URL_FMT), params)<EOL>for metric_name in metricnames:<EOL><INDENT>request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name))<EOL><DEDENT>for instance in instances:<EOL><INDENT>request_url = tornado.httputil.url_concat(request_url, dict(instance=instance))<EOL><DEDENT>request_url = tornado.httputil.url_concat(request_url, dict(interval=time_range[<NUM_LIT:1>]))<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": "Get the metrics for all the instances of a topology from Heron Nest\n:param cluster:\n:param environ:\n:param topology:\n:param component:\n:param instances:\n:param metricnames:   dict of display name to cuckoo name\n:param time_range:    2-tuple consisting of start and end of range\n:param role:\n:return:", "id": "f7414:m18"}
{"signature": "@tornado.gen.coroutine<EOL>def get_filestats(cluster, environ, topology, container, path, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>container=container,<EOL>path=path)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(create_url(FILESTATS_URL_FMT), params)<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param container:\n:param path:\n:param role:\n:return:", "id": "f7414:m28"}
{"signature": "@tornado.gen.coroutine<EOL>def run_instance_jmap(cluster, environ, topology, instance, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>instance=instance)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>create_url(JMAP_URL_FMT), params)<EOL>if role is not None:<EOL><INDENT>request_url = tornado.httputil.url_concat(request_url, dict(role=role))<EOL><DEDENT>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param instance:\n:param role:\n:return:", "id": "f7414:m25"}
{"signature": "def get_cluster_role_env_topologies(cluster, role, env):", "body": "return _get_topologies(cluster, role=role, env=env)<EOL>", "docstring": "Get the list of topologies given a cluster submitted by a given role under a given environment\n:param cluster:\n:param role:\n:param env:\n:return:", "id": "f7414:m8"}
{"signature": "@tornado.gen.coroutine<EOL>def get_container_file_data(cluster, environ, topology, container,<EOL>path, offset, length, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>container=container,<EOL>path=path,<EOL>offset=offset,<EOL>length=length)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>create_url(FILE_DATA_URL_FMT), params)<EOL>if role is not None:<EOL><INDENT>request_url = tornado.httputil.url_concat(request_url, dict(role=role))<EOL><DEDENT>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param container:\n:param path:\n:param offset:\n:param length:\n:param role:\n:return:", "id": "f7414:m27"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def fetch(self, cluster, metric, topology, component, instance, timerange, environ=None):<DEDENT>", "body": "components = [component] if component != \"<STR_LIT:*>\" else (yield get_comps(cluster, environ, topology))<EOL>futures = []<EOL>for comp in components:<EOL><INDENT>query = self.get_query(metric, comp, instance)<EOL>future = get_metrics(cluster, environ, topology, timerange, query)<EOL>futures.append(future)<EOL><DEDENT>results = yield futures<EOL>timelines = []<EOL>for result in results:<EOL><INDENT>timelines.extend(result[\"<STR_LIT>\"])<EOL><DEDENT>result = self.get_metric_response(timerange, timelines, False)<EOL>raise tornado.gen.Return(result)<EOL>", "docstring": ":param cluster:\n:param metric:\n:param topology:\n:param component:\n:param instance:\n:param timerange:\n:param environ:\n:return:", "id": "f7414:c0:m0"}
{"signature": "@tornado.gen.coroutine<EOL>def get_execution_state(cluster, environ, topology, role=None):", "body": "params = dict(cluster=cluster, environ=environ, topology=topology)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(create_url(EXECUTION_STATE_URL_FMT), params)<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": "Get the execution state of a topology in a cluster\n:param cluster:\n:param environ:\n:param topology:\n:param role:\n:return:", "id": "f7414:m9"}
{"signature": "def get_tracker_endpoint():", "body": "return options.tracker_url<EOL>", "docstring": "Get the endpoint for heron tracker\n:return:", "id": "f7414:m0"}
{"signature": "@tornado.gen.coroutine<EOL>def get_topologies():", "body": "request_url = create_url(TOPOLOGIES_URL_FMT)<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": "Get the list of topologies given a data center from heron tracker\n:return:", "id": "f7414:m3"}
{"signature": "@tornado.gen.coroutine<EOL><INDENT>def fetch_max(self, cluster, metric, topology, component, instance, timerange, environ=None):<DEDENT>", "body": "components = [component] if component != \"<STR_LIT:*>\" else (yield get_comps(cluster, environ, topology))<EOL>result = {}<EOL>futures = []<EOL>for comp in components:<EOL><INDENT>query = self.get_query(metric, comp, instance)<EOL>max_query = \"<STR_LIT>\" % query<EOL>future = get_metrics(cluster, environ, topology, timerange, max_query)<EOL>futures.append(future)<EOL><DEDENT>results = yield futures<EOL>data = self.compute_max(results)<EOL>result = self.get_metric_response(timerange, data, True)<EOL>raise tornado.gen.Return(result)<EOL>", "docstring": ":param cluster:\n:param metric:\n:param topology:\n:param component:\n:param instance:\n:param timerange:\n:param environ:\n:return:", "id": "f7414:c0:m1"}
{"signature": "@tornado.gen.coroutine<EOL>def get_scheduler_location(cluster, environ, topology, role=None):", "body": "params = dict(cluster=cluster, environ=environ, topology=topology)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>create_url(SCHEDULER_LOCATION_URL_FMT), params)<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": "Get the scheduler location of a topology in a cluster from tracker\n:param cluster:\n:param environ:\n:param topology:\n:param role:\n:return:", "id": "f7414:m14"}
{"signature": "def get_metric_response(self, timerange, data, isMax):", "body": "if isMax:<EOL><INDENT>return dict(<EOL>status=\"<STR_LIT:success>\",<EOL>starttime=timerange[<NUM_LIT:0>],<EOL>endtime=timerange[<NUM_LIT:1>],<EOL>result=dict(timeline=[dict(data=data)])<EOL>)<EOL><DEDENT>return dict(<EOL>status=\"<STR_LIT:success>\",<EOL>starttime=timerange[<NUM_LIT:0>],<EOL>endtime=timerange[<NUM_LIT:1>],<EOL>result=dict(timeline=data)<EOL>)<EOL>", "docstring": ":param timerange:\n:param data:\n:param isMax:\n:return:", "id": "f7414:c0:m4"}
{"signature": "def create_url(fmt):", "body": "return fmt % get_tracker_endpoint()<EOL>", "docstring": "Given an URL format, substitute with tracker service endpoint\n:param fmt:\n:return:", "id": "f7414:m1"}
{"signature": "@tornado.gen.coroutine<EOL>def get_instance_mem_histogram(cluster, environ, topology, instance, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology,<EOL>instance=instance)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(<EOL>create_url(HISTOGRAM_URL_FMT), params)<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param instance:\n:param role:\n:return:", "id": "f7414:m24"}
{"signature": "@tornado.gen.coroutine<EOL>def get_topology_info(cluster, environ, topology, role=None):", "body": "params = dict(<EOL>cluster=cluster,<EOL>environ=environ,<EOL>topology=topology)<EOL>if role is not None:<EOL><INDENT>params['<STR_LIT>'] = role<EOL><DEDENT>request_url = tornado.httputil.url_concat(create_url(INFO_URL_FMT), params)<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": ":param cluster:\n:param environ:\n:param topology:\n:param role:\n:return:", "id": "f7414:m21"}
{"signature": "@tornado.gen.coroutine<EOL>def get_topologies_states():", "body": "request_url = create_url(TOPOLOGIES_STATS_URL_FMT)<EOL>raise tornado.gen.Return((yield fetch_url_as_json(request_url)))<EOL>", "docstring": "Get the list of topologies and their states\n:return:", "id": "f7414:m4"}
{"signature": "def compute_max(self, multi_ts):", "body": "if len(multi_ts) > <NUM_LIT:0> and len(multi_ts[<NUM_LIT:0>][\"<STR_LIT>\"]) > <NUM_LIT:0>:<EOL><INDENT>keys = multi_ts[<NUM_LIT:0>][\"<STR_LIT>\"][<NUM_LIT:0>][\"<STR_LIT:data>\"].keys()<EOL>timelines = ([res[\"<STR_LIT>\"][<NUM_LIT:0>][\"<STR_LIT:data>\"][key] for key in keys] for res in multi_ts)<EOL>values = (max(v) for v in zip(*timelines))<EOL>return dict(zip(keys, values))<EOL><DEDENT>return {}<EOL>", "docstring": ":param multi_ts:\n:return:", "id": "f7414:c0:m3"}
{"signature": "def get_cluster_topologies(cluster):", "body": "return _get_topologies(cluster)<EOL>", "docstring": "Get the list of topologies given a cluster\n:param cluster:\n:return:", "id": "f7414:m6"}
{"signature": "def direct_mode_cluster_role_env(cluster_role_env, config_path):", "body": "<EOL>cli_conf_file = os.path.join(config_path, CLIENT_YAML)<EOL>if not os.path.isfile(cli_conf_file):<EOL><INDENT>return True<EOL><DEDENT>client_confs = {}<EOL>with open(cli_conf_file, '<STR_LIT:r>') as conf_file:<EOL><INDENT>client_confs = yaml.load(conf_file)<EOL>if not client_confs:<EOL><INDENT>return True<EOL><DEDENT>role_present = True if len(cluster_role_env[<NUM_LIT:1>]) > <NUM_LIT:0> else False<EOL>if ROLE_REQUIRED in client_confs and client_confs[ROLE_REQUIRED] and not role_present:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>% (cluster_role_env, ROLE_REQUIRED, cli_conf_file))<EOL><DEDENT>environ_present = True if len(cluster_role_env[<NUM_LIT:2>]) > <NUM_LIT:0> else False<EOL>if ENV_REQUIRED in client_confs and client_confs[ENV_REQUIRED] and not environ_present:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>% (cluster_role_env, ENV_REQUIRED, cli_conf_file))<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Check cluster/[role]/[environ], if they are required", "id": "f7415:m19"}
{"signature": "def get_classpath(jars):", "body": "return '<STR_LIT::>'.join(map(normalized_class_path, jars))<EOL>", "docstring": "Get the normalized class path of all jars", "id": "f7415:m5"}
{"signature": "def get_heron_conf_dir():", "body": "conf_path = os.path.join(get_heron_dir(), CONF_DIR)<EOL>return conf_path<EOL>", "docstring": "This will provide heron conf directory from .pex file.\n:return: absolute path of heron conf directory", "id": "f7415:m9"}
{"signature": "def get_java_path():", "body": "java_home = os.environ.get(\"<STR_LIT>\")<EOL>return os.path.join(java_home, BIN_DIR, \"<STR_LIT>\")<EOL>", "docstring": "Get the path of java executable", "id": "f7415:m24"}
{"signature": "def get_cluster_role_env(cluster_role_env):", "body": "parts = cluster_role_env.split('<STR_LIT:/>')[:<NUM_LIT:3>]<EOL>if len(parts) == <NUM_LIT:3>:<EOL><INDENT>return (parts[<NUM_LIT:0>], parts[<NUM_LIT:1>], parts[<NUM_LIT:2>])<EOL><DEDENT>if len(parts) == <NUM_LIT:2>:<EOL><INDENT>return (parts[<NUM_LIT:0>], parts[<NUM_LIT:1>], \"<STR_LIT>\")<EOL><DEDENT>if len(parts) == <NUM_LIT:1>:<EOL><INDENT>return (parts[<NUM_LIT:0>], \"<STR_LIT>\", \"<STR_LIT>\")<EOL><DEDENT>return (\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>", "docstring": "Parse cluster/[role]/[environ], supply empty string, if not provided", "id": "f7415:m18"}
{"signature": "def print_build_info(zipped_pex=False):", "body": "if zipped_pex:<EOL><INDENT>release_file = get_zipped_heron_release_file()<EOL><DEDENT>else:<EOL><INDENT>release_file = get_heron_release_file()<EOL><DEDENT>with open(release_file) as release_info:<EOL><INDENT>release_map = yaml.load(release_info)<EOL>release_items = sorted(release_map.items(), key=lambda tup: tup[<NUM_LIT:0>])<EOL>for key, value in release_items:<EOL><INDENT>print(\"<STR_LIT>\" % (key, value))<EOL><DEDENT><DEDENT>", "docstring": "Print build_info from release.yaml\n\n    :param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.", "id": "f7415:m27"}
{"signature": "def get_heron_bin_dir():", "body": "bin_path = os.path.join(get_heron_dir(), BIN_DIR)<EOL>return bin_path<EOL>", "docstring": "This will provide heron bin directory from .pex file.\n:return: absolute path of heron lib directory", "id": "f7415:m8"}
{"signature": "def check_release_file_exists():", "body": "release_file = get_heron_release_file()<EOL>if not os.path.isfile(release_file):<EOL><INDENT>Log.error(\"<STR_LIT>\" % release_file)<EOL>return False<EOL><DEDENT>return True<EOL>", "docstring": "Check if the release.yaml file exists", "id": "f7415:m26"}
{"signature": "def get_heron_dir():", "body": "go_above_dirs = <NUM_LIT:9><EOL>path = \"<STR_LIT:/>\".join(os.path.realpath(__file__).split('<STR_LIT:/>')[:-go_above_dirs])<EOL>return normalized_class_path(path)<EOL>", "docstring": "This will extract heron directory from .pex file.\n\nFor example,\nwhen __file__ is '/Users/heron-user/bin/heron/heron/tools/common/src/python/utils/config.pyc', and\nits real path is '/Users/heron-user/.heron/bin/heron/tools/common/src/python/utils/config.pyc',\nthe internal variable ``path`` would be '/Users/heron-user/.heron', which is the heron directory\n\nThis means the variable `go_above_dirs` below is 9.\n\n:return: root location of the .pex file", "id": "f7415:m6"}
{"signature": "def get_subparser(parser, command):", "body": "<EOL>subparsers_actions = [action for action in parser._actions<EOL>if isinstance(action, argparse._SubParsersAction)]<EOL>for subparsers_action in subparsers_actions:<EOL><INDENT>for choice, subparser in subparsers_action.choices.items():<EOL><INDENT>if choice == command:<EOL><INDENT>return subparser<EOL><DEDENT><DEDENT><DEDENT>return None<EOL>", "docstring": "Retrieve the given subparser from parser", "id": "f7415:m1"}
{"signature": "def insert_bool(param, command_args):", "body": "index = <NUM_LIT:0><EOL>found = False<EOL>for lelem in command_args:<EOL><INDENT>if lelem == '<STR_LIT>' and not found:<EOL><INDENT>break<EOL><DEDENT>if lelem == param:<EOL><INDENT>found = True<EOL>break<EOL><DEDENT>index = index + <NUM_LIT:1><EOL><DEDENT>if found:<EOL><INDENT>command_args.insert(index + <NUM_LIT:1>, '<STR_LIT:True>')<EOL><DEDENT>return command_args<EOL>", "docstring": ":param param:\n:param command_args:\n:return:", "id": "f7415:m29"}
{"signature": "def get_heron_cluster_conf_dir(cluster, default_config_path):", "body": "return os.path.join(default_config_path, cluster)<EOL>", "docstring": "This will provide heron cluster config directory, if config path is default\n:return: absolute path of heron cluster conf directory", "id": "f7415:m13"}
{"signature": "def get_heron_libs(local_jars):", "body": "heron_lib_dir = get_heron_lib_dir()<EOL>heron_libs = [os.path.join(heron_lib_dir, f) for f in local_jars]<EOL>return heron_libs<EOL>", "docstring": "Get all the heron lib jars with the absolute paths", "id": "f7415:m15"}
{"signature": "def valid_path(path):", "body": "<EOL>if path.endswith('<STR_LIT:*>'):<EOL><INDENT>Log.debug('<STR_LIT>', path[:-<NUM_LIT:1>])<EOL>if os.path.isdir(path[:-<NUM_LIT:1>]):<EOL><INDENT>return True<EOL><DEDENT>return False<EOL><DEDENT>Log.debug('<STR_LIT>', path)<EOL>if os.path.isdir(path):<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>Log.debug('<STR_LIT>', path)<EOL>if os.path.isfile(path):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Check if an entry in the class path exists as either a directory or a file", "id": "f7417:m0"}
{"signature": "def set_state_locations(self, state_locations):", "body": "self.locations = state_locations<EOL>self.validate_state_locations()<EOL>", "docstring": "set state locations", "id": "f7421:c0:m1"}
{"signature": "def get_pplan(self, topologyName, callback=None):", "body": "if callback:<EOL><INDENT>self.pplan_watchers[topologyName].append(callback)<EOL><DEDENT>else:<EOL><INDENT>pplan_path = self.get_pplan_path(topologyName)<EOL>with open(pplan_path) as f:<EOL><INDENT>data = f.read()<EOL>pplan = PhysicalPlan()<EOL>pplan.ParseFromString(data)<EOL>return pplan<EOL><DEDENT><DEDENT>", "docstring": "Get physical plan of a topology", "id": "f7422:c0:m9"}
{"signature": "def get_scheduler_location(self, topologyName, callback=None):", "body": "if callback:<EOL><INDENT>self.scheduler_location_watchers[topologyName].append(callback)<EOL><DEDENT>else:<EOL><INDENT>scheduler_location_path = self.get_scheduler_location_path(topologyName)<EOL>with open(scheduler_location_path) as f:<EOL><INDENT>data = f.read()<EOL>scheduler_location = SchedulerLocation()<EOL>scheduler_location.ParseFromString(data)<EOL>return scheduler_location<EOL><DEDENT><DEDENT>", "docstring": "Get scheduler location", "id": "f7422:c0:m16"}
{"signature": "def start(self):", "body": "self.monitoring_thread_stop_signal = False<EOL>self.monitoring_thread.start()<EOL>", "docstring": "start monitoring thread", "id": "f7422:c0:m1"}
{"signature": "def create_topology(self, topologyName, topology):", "body": "pass<EOL>", "docstring": "Create path is currently not supported in file based state manager.", "id": "f7422:c0:m6"}
{"signature": "def get_packing_plan(self, topologyName, callback=None):", "body": "if callback:<EOL><INDENT>self.packing_plan_watchers[topologyName].append(callback)<EOL><DEDENT>else:<EOL><INDENT>packing_plan_path = self.get_packing_plan_path(topologyName)<EOL>with open(packing_plan_path) as f:<EOL><INDENT>data = f.read()<EOL>packing_plan = PackingPlan()<EOL>packing_plan.ParseFromString(data)<EOL><DEDENT><DEDENT>", "docstring": "get packing plan", "id": "f7422:c0:m8"}
{"signature": "def create_pplan(self, topologyName, pplan):", "body": "pass<EOL>", "docstring": "Create path is currently not supported in file based state manager.", "id": "f7422:c0:m10"}
{"signature": "def get_tmaster(self, topologyName, callback=None):", "body": "if callback:<EOL><INDENT>self.tmaster_watchers[topologyName].append(callback)<EOL><DEDENT>else:<EOL><INDENT>tmaster_path = self.get_tmaster_path(topologyName)<EOL>with open(tmaster_path) as f:<EOL><INDENT>data = f.read()<EOL>tmaster = TMasterLocation()<EOL>tmaster.ParseFromString(data)<EOL>return tmaster<EOL><DEDENT><DEDENT>", "docstring": "Get tmaster", "id": "f7422:c0:m15"}
{"signature": "def delete_topology(self, topologyName):", "body": "pass<EOL>", "docstring": "Delete path is currently not supported in file based state manager.", "id": "f7422:c0:m7"}
{"signature": "def get_execution_state(self, topologyName, callback=None):", "body": "if callback:<EOL><INDENT>self.execution_state_watchers[topologyName].append(callback)<EOL><DEDENT>else:<EOL><INDENT>execution_state_path = self.get_execution_state_path(topologyName)<EOL>with open(execution_state_path) as f:<EOL><INDENT>data = f.read()<EOL>executionState = ExecutionState()<EOL>executionState.ParseFromString(data)<EOL>return executionState<EOL><DEDENT><DEDENT>", "docstring": "Get execution state", "id": "f7422:c0:m12"}
{"signature": "def create_execution_state(self, topologyName, executionState):", "body": "pass<EOL>", "docstring": "Create path is currently not supported in file based state manager.", "id": "f7422:c0:m13"}
{"signature": "def delete_execution_state(self, topologyName):", "body": "pass<EOL>", "docstring": "Delete path is currently not supported in file based state manager.", "id": "f7422:c0:m14"}
{"signature": "def configure(level, logfile=None):", "body": "log_format = \"<STR_LIT>\"<EOL>date_format = '<STR_LIT>'<EOL>logging.basicConfig(format=log_format, datefmt=date_format)<EOL>Log.setLevel(level)<EOL>if logfile is not None:<EOL><INDENT>fh = logging.FileHandler(logfile)<EOL>fh.setFormatter(logging.Formatter(log_format))<EOL>Log.addHandler(fh)<EOL><DEDENT>", "docstring": "configure logging", "id": "f7424:m0"}
{"signature": "def get_all_state_managers(conf):", "body": "state_managers = []<EOL>try:<EOL><INDENT>state_managers.extend(get_all_zk_state_managers(conf))<EOL>state_managers.extend(get_all_file_state_managers(conf))<EOL>return state_managers<EOL><DEDENT>except Exception as ex:<EOL><INDENT>LOG.error(\"<STR_LIT>\")<EOL>raise ex<EOL><DEDENT>", "docstring": "@param conf - An instance of Config class\nReads the config for requested state managers.\nInstantiates them, start and then return them.", "id": "f7425:m0"}
{"signature": "def get_pplan(self, topologyName, callback=None):", "body": "isWatching = False<EOL>ret = {<EOL>\"<STR_LIT:result>\": None<EOL>}<EOL>if callback:<EOL><INDENT>isWatching = True<EOL><DEDENT>else:<EOL><INDENT>def callback(data):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>ret[\"<STR_LIT:result>\"] = data<EOL><DEDENT><DEDENT>self._get_pplan_with_watch(topologyName, callback, isWatching)<EOL>return ret[\"<STR_LIT:result>\"]<EOL>", "docstring": "get physical plan", "id": "f7426:c0:m12"}
{"signature": "def delete_pplan(self, topologyName):", "body": "path = self.get_pplan_path(topologyName)<EOL>LOG.info(\"<STR_LIT>\".format(<EOL>topologyName, path))<EOL>try:<EOL><INDENT>self.client.delete(path)<EOL>return True<EOL><DEDENT>except NoNodeError:<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>except NotEmptyError:<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_NOT_EMPTY_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>except ZookeeperError:<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>except Exception:<EOL><INDENT>raise<EOL><DEDENT>", "docstring": "delete physical plan info", "id": "f7426:c0:m15"}
{"signature": "def _kazoo_client(self, hostportlist):", "body": "return KazooClient(hostportlist)<EOL>", "docstring": "For Unit testing, replace this method to not\nActually return a client", "id": "f7426:c0:m1"}
{"signature": "def create_pplan(self, topologyName, pplan):", "body": "if not pplan or not pplan.IsInitialized():<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>path = self.get_pplan_path(topologyName)<EOL>LOG.info(\"<STR_LIT>\".format(<EOL>topologyName, path))<EOL>pplanString = pplan.SerializeToString()<EOL>try:<EOL><INDENT>self.client.create(path, value=pplanString, makepath=True)<EOL>return True<EOL><DEDENT>except NoNodeError:<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>except NodeExistsError:<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>except ZookeeperError:<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>except Exception:<EOL><INDENT>raise<EOL><DEDENT>", "docstring": "create physical plan", "id": "f7426:c0:m14"}
{"signature": "def _get_topology_with_watch(self, topologyName, callback, isWatching):", "body": "path = self.get_topology_path(topologyName)<EOL>if isWatching:<EOL><INDENT>LOG.info(\"<STR_LIT>\" + path)<EOL><DEDENT>@self.client.DataWatch(path)<EOL>def watch_topology(data, stats):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if data:<EOL><INDENT>topology = Topology()<EOL>topology.ParseFromString(data)<EOL>callback(topology)<EOL><DEDENT>else:<EOL><INDENT>callback(None)<EOL><DEDENT>return isWatching<EOL><DEDENT>", "docstring": "Helper function to get pplan with\na callback. The future watch is placed\nonly if isWatching is True.", "id": "f7426:c0:m7"}
{"signature": "def _get_execution_state_with_watch(self, topologyName, callback, isWatching):", "body": "path = self.get_execution_state_path(topologyName)<EOL>if isWatching:<EOL><INDENT>LOG.info(\"<STR_LIT>\" + path)<EOL><DEDENT>@self.client.DataWatch(path)<EOL>def watch_execution_state(data, stats):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if data:<EOL><INDENT>executionState = ExecutionState()<EOL>executionState.ParseFromString(data)<EOL>callback(executionState)<EOL><DEDENT>else:<EOL><INDENT>callback(None)<EOL><DEDENT>return isWatching<EOL><DEDENT>", "docstring": "Helper function to get execution state with\na callback. The future watch is placed\nonly if isWatching is True.", "id": "f7426:c0:m17"}
{"signature": "def create_topology(self, topologyName, topology):", "body": "if not topology or not topology.IsInitialized():<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>path = self.get_topology_path(topologyName)<EOL>LOG.info(\"<STR_LIT>\".format(<EOL>topologyName, path))<EOL>topologyString = topology.SerializeToString()<EOL>try:<EOL><INDENT>self.client.create(path, value=topologyString, makepath=True)<EOL>return True<EOL><DEDENT>except NoNodeError:<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>except NodeExistsError:<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>except ZookeeperError:<EOL><INDENT>raise_(StateException(\"<STR_LIT>\",<EOL>StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[<NUM_LIT:2>])<EOL><DEDENT>except Exception:<EOL><INDENT>raise<EOL><DEDENT>", "docstring": "crate topology", "id": "f7426:c0:m8"}
{"signature": "def start(self):", "body": "if self.is_host_port_reachable():<EOL><INDENT>self.client = self._kazoo_client(_makehostportlist(self.hostportlist))<EOL><DEDENT>else:<EOL><INDENT>localhostports = self.establish_ssh_tunnel()<EOL>self.client = self._kazoo_client(_makehostportlist(localhostports))<EOL><DEDENT>self.client.start()<EOL>def on_connection_change(state):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>LOG.info(\"<STR_LIT>\" + state)<EOL><DEDENT>self.client.add_listener(on_connection_change)<EOL>", "docstring": "state Zookeeper", "id": "f7426:c0:m2"}
{"signature": "def get_topology(self, topologyName, callback=None):", "body": "isWatching = False<EOL>ret = {<EOL>\"<STR_LIT:result>\": None<EOL>}<EOL>if callback:<EOL><INDENT>isWatching = True<EOL><DEDENT>else:<EOL><INDENT>def callback(data):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>ret[\"<STR_LIT:result>\"] = data<EOL><DEDENT><DEDENT>self._get_topology_with_watch(topologyName, callback, isWatching)<EOL>return ret[\"<STR_LIT:result>\"]<EOL>", "docstring": "get topologies", "id": "f7426:c0:m6"}
{"signature": "def __replace(config, wildcards, config_file):", "body": "for config_key in config:<EOL><INDENT>config_value = config[config_key]<EOL>original_value = config_value<EOL>if isinstance(config_value, str):<EOL><INDENT>for token in wildcards:<EOL><INDENT>if wildcards[token]:<EOL><INDENT>config_value = config_value.replace(token, wildcards[token])<EOL><DEDENT><DEDENT>found = re.findall(r'<STR_LIT>', config_value)<EOL>if found:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" %<EOL>(config_key, original_value, config_file, \"<STR_LIT:U+002CU+0020>\".join(found)))<EOL><DEDENT>config[config_key] = config_value<EOL><DEDENT><DEDENT>return config<EOL>", "docstring": "For each kvp in config, do wildcard substitution on the values", "id": "f7427:m1"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def get_packing_plan(self, topologyName, callback=None):<DEDENT>", "body": "pass<EOL>", "docstring": "Gets the packing_plan for the topology.\nIf the callback is provided,\nsets watch on the path and calls the callback\nwith the new packing_plan.", "id": "f7428:c0:m26"}
{"signature": "def establish_ssh_tunnel(self):", "body": "localportlist = []<EOL>for (host, port) in self.hostportlist:<EOL><INDENT>localport = self.pick_unused_port()<EOL>self.tunnel.append(subprocess.Popen(<EOL>('<STR_LIT>', self.tunnelhost, '<STR_LIT>' % (localport, host, port))))<EOL>localportlist.append(('<STR_LIT:127.0.0.1>', localport))<EOL><DEDENT>return localportlist<EOL>", "docstring": "Establish an ssh tunnel for each local host and port\nthat can be used to communicate with the state host.", "id": "f7428:c0:m11"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def stop(self):<DEDENT>", "body": "pass<EOL>", "docstring": "If the state manager had connected to a remote server, it would need to stop as well.", "id": "f7428:c0:m14"}
{"signature": "def delete_topology_from_zk(self, topologyName):", "body": "self.delete_pplan(topologyName)<EOL>self.delete_execution_state(topologyName)<EOL>self.delete_topology(topologyName)<EOL>", "docstring": "Removes the topology entry from:\n1. topologies list,\n2. pplan,\n3. execution_state, and", "id": "f7428:c0:m35"}
{"signature": "@tunnelhost.setter<EOL><INDENT>def tunnelhost(self, newTunnelHost):<DEDENT>", "body": "self.__tunnelhost = newTunnelHost<EOL>", "docstring": "Setter for the tunnelhost to create the tunnel if host is not accessible", "id": "f7428:c0:m7"}
{"signature": "@rootpath.setter<EOL><INDENT>def rootpath(self, newRootPath):<DEDENT>", "body": "self.__hostport = newRootPath<EOL>", "docstring": "Setter for the path where the heron states are stored.", "id": "f7428:c0:m5"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def start(self):<DEDENT>", "body": "pass<EOL>", "docstring": "If the state manager needs to connect to a remote host.", "id": "f7428:c0:m13"}
{"signature": "@tornado.web.asynchronous<EOL><INDENT>def post(self):<DEDENT>", "body": "def status_finish(ret):<EOL><INDENT>self.set_status(ret)<EOL>self.finish()<EOL><DEDENT>def kill_parent():<EOL><INDENT>status_finish(<NUM_LIT:200>)<EOL>logger.info(\"<STR_LIT>\")<EOL>os.killpg(os.getppid(), signal.SIGTERM)<EOL><DEDENT>logger = logging.getLogger(__file__)<EOL>logger.info(\"<STR_LIT>\")<EOL>data = dict(urlparse.parse_qsl(self.request.body))<EOL>sharedSecret = data.get('<STR_LIT>')<EOL>if sharedSecret != options.secret:<EOL><INDENT>status_finish(<NUM_LIT>)<EOL>return<EOL><DEDENT>instanceId = data.get('<STR_LIT>')<EOL>if instanceId:<EOL><INDENT>filepath = instanceId + '<STR_LIT>'<EOL>if os.path.isfile(filepath): <EOL><INDENT>if instanceId.startswith('<STR_LIT>'): <EOL><INDENT>kill_parent()<EOL><DEDENT>else: <EOL><INDENT>fh = open(filepath)<EOL>firstLine = int(fh.readline())<EOL>fh.close()<EOL>logger.info(\"<STR_LIT>\" + instanceId + \"<STR_LIT:U+0020>\" + str(firstLine))<EOL>os.kill(firstLine, signal.SIGTERM)<EOL>status_finish(<NUM_LIT:200>)<EOL><DEDENT><DEDENT>else: <EOL><INDENT>logger.info(filepath + \"<STR_LIT>\")<EOL>status_finish(<NUM_LIT>)<EOL><DEDENT><DEDENT>else: <EOL><INDENT>kill_parent()<EOL><DEDENT>", "docstring": "post method", "id": "f7429:c0:m0"}
{"signature": "@tornado.web.asynchronous<EOL><INDENT>def get(self, path):<DEDENT>", "body": "logging.debug(\"<STR_LIT>\", path)<EOL>self.connection_closed = False<EOL>self.set_header(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>if not utils.check_path(path):<EOL><INDENT>self.write(\"<STR_LIT>\")<EOL>self.set_status(<NUM_LIT>)<EOL>self.finish()<EOL>return<EOL><DEDENT>if path is None or not os.path.isfile(path):<EOL><INDENT>self.write(\"<STR_LIT>\" % path)<EOL>self.set_status(<NUM_LIT>)<EOL>self.finish()<EOL>return<EOL><DEDENT>length = int(<NUM_LIT:4> * <NUM_LIT> * <NUM_LIT>)<EOL>offset = int(<NUM_LIT:0>)<EOL>while True:<EOL><INDENT>data = utils.read_chunk(path, offset=offset, length=length, escape_data=False)<EOL>if self.connection_closed or '<STR_LIT:data>' not in data or len(data['<STR_LIT:data>']) < length:<EOL><INDENT>break<EOL><DEDENT>offset += length<EOL>self.write(data['<STR_LIT:data>'])<EOL>self.flush()<EOL><DEDENT>if '<STR_LIT:data>' in data:<EOL><INDENT>self.write(data['<STR_LIT:data>'])<EOL><DEDENT>self.finish()<EOL>", "docstring": "get method", "id": "f7431:c0:m0"}
{"signature": "def on_connection_close(self):", "body": "<EOL>self.connection_closed = True<EOL>", "docstring": ":return:", "id": "f7431:m0"}
{"signature": "@tornado.web.asynchronous<EOL><INDENT>def get(self, pid):<DEDENT>", "body": "body = utils.str_cmd(['<STR_LIT>', pid], None, None)<EOL>self.content_type = '<STR_LIT:application/json>'<EOL>self.write(json.dumps(body))<EOL>self.finish()<EOL>", "docstring": "get method", "id": "f7433:c0:m0"}
{"signature": "@tornado.web.asynchronous<EOL><INDENT>def get(self, path):<DEDENT>", "body": "t = Template(utils.get_asset(\"<STR_LIT>\"))<EOL>if path is None:<EOL><INDENT>self.set_status(<NUM_LIT>)<EOL>self.write(\"<STR_LIT>\")<EOL>self.finish()<EOL>return<EOL><DEDENT>if not utils.check_path(path):<EOL><INDENT>self.write(\"<STR_LIT>\")<EOL>self.set_status(<NUM_LIT>)<EOL>self.finish()<EOL>return<EOL><DEDENT>args = dict(<EOL>filename=path,<EOL>jquery=utils.get_asset(\"<STR_LIT>\"),<EOL>pailer=utils.get_asset(\"<STR_LIT>\"),<EOL>css=utils.get_asset(\"<STR_LIT>\"),<EOL>)<EOL>self.write(t.generate(**args))<EOL>self.finish()<EOL>", "docstring": "get method", "id": "f7435:c0:m0"}
{"signature": "@tornado.web.asynchronous<EOL><INDENT>def get(self, pid):<DEDENT>", "body": "body = utils.str_cmd(['<STR_LIT>', '<STR_LIT>', pid], None, None)<EOL>self.content_type = '<STR_LIT:application/json>'<EOL>self.write(json.dumps(body))<EOL>self.finish()<EOL>", "docstring": "get method", "id": "f7436:c0:m0"}
{"signature": "@tornado.web.asynchronous<EOL><INDENT>def get(self, pid):<DEDENT>", "body": "utils.str_cmd(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'], None, None)<EOL>body = utils.str_cmd(['<STR_LIT>', '<STR_LIT>',<EOL>str(pid)], None, None)<EOL>utils.str_cmd(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'], None, None)<EOL>self.content_type = '<STR_LIT:application/json>'<EOL>self.write(json.dumps(body))<EOL>self.finish()<EOL>", "docstring": "get method", "id": "f7439:c0:m0"}
{"signature": "def get_listing(path):", "body": "if path != \"<STR_LIT:.>\":<EOL><INDENT>listing = sorted(['<STR_LIT:..>'] + os.listdir(path))<EOL><DEDENT>else:<EOL><INDENT>listing = sorted(os.listdir(path))<EOL><DEDENT>return listing<EOL>", "docstring": "Returns the list of files and directories in a path.\nPrepents a \"..\" (parent directory link) if path is not current dir.", "id": "f7443:m3"}
{"signature": "def chain(cmd_list):", "body": "command = '<STR_LIT>'.join(map(lambda x: '<STR_LIT:U+0020>'.join(x), cmd_list))<EOL>chained_proc = functools.reduce(pipe, [None] + cmd_list)<EOL>stdout_builder = proc.async_stdout_builder(chained_proc)<EOL>chained_proc.wait()<EOL>return {<EOL>'<STR_LIT>': command,<EOL>'<STR_LIT>': stdout_builder.result()<EOL>}<EOL>", "docstring": "Feed output of one command to the next and return final output\nReturns string output of chained application of commands.", "id": "f7443:m9"}
{"signature": "def get_asset(asset_name):", "body": "return pkgutil.get_data(\"<STR_LIT>\", os.path.join(\"<STR_LIT>\", asset_name))<EOL>", "docstring": "get assset", "id": "f7443:m11"}
{"signature": "def read_chunk(filename, offset=-<NUM_LIT:1>, length=-<NUM_LIT:1>, escape_data=False):", "body": "try:<EOL><INDENT>length = int(length)<EOL>offset = int(offset)<EOL><DEDENT>except ValueError:<EOL><INDENT>return {}<EOL><DEDENT>if not os.path.isfile(filename):<EOL><INDENT>return {}<EOL><DEDENT>try:<EOL><INDENT>fstat = os.stat(filename)<EOL><DEDENT>except Exception:<EOL><INDENT>return {}<EOL><DEDENT>if offset == -<NUM_LIT:1>:<EOL><INDENT>offset = fstat.st_size<EOL><DEDENT>if length == -<NUM_LIT:1>:<EOL><INDENT>length = fstat.st_size - offset<EOL><DEDENT>with open(filename, \"<STR_LIT:r>\") as fp:<EOL><INDENT>fp.seek(offset)<EOL>try:<EOL><INDENT>data = fp.read(length)<EOL><DEDENT>except IOError:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>if data:<EOL><INDENT>data = _escape_data(data) if escape_data else data<EOL>return dict(offset=offset, length=len(data), data=data)<EOL><DEDENT>return dict(offset=offset, length=<NUM_LIT:0>)<EOL>", "docstring": "Read a chunk of a file from an offset upto the length.", "id": "f7443:m5"}
{"signature": "def format_prefix(filename, sres):", "body": "try:<EOL><INDENT>pwent = pwd.getpwuid(sres.st_uid)<EOL>user = pwent.pw_name<EOL><DEDENT>except KeyError:<EOL><INDENT>user = sres.st_uid<EOL><DEDENT>try:<EOL><INDENT>grent = grp.getgrgid(sres.st_gid)<EOL>group = grent.gr_name<EOL><DEDENT>except KeyError:<EOL><INDENT>group = sres.st_gid<EOL><DEDENT>return '<STR_LIT>' % (<EOL>format_mode(sres),<EOL>sres.st_nlink,<EOL>user,<EOL>group,<EOL>sres.st_size,<EOL>format_mtime(sres.st_mtime),<EOL>)<EOL>", "docstring": "Prefix to a filename in the directory listing. This is to make the\nlisting similar to an output of \"ls -alh\".", "id": "f7443:m2"}
{"signature": "def check_path(path):", "body": "return not path.startswith(\"<STR_LIT:/>\") and \"<STR_LIT:..>\" not in path<EOL>", "docstring": "file path should be a relative path without \"..\" in it\n:param path: file path\n:return: true if the path is relative and doesn't contain \"..\"", "id": "f7443:m12"}
{"signature": "def get_a_mock_request_packet_and_raw():", "body": "reqid = REQID.generate()<EOL>message = mock_protobuf.get_mock_register_response()<EOL>pkt = convert_to_incoming_packet(reqid, message)<EOL>return pkt, reqid, message<EOL>", "docstring": "Returns a tuple of mock (IncomingPacket, REQID, RegisterResponse message)", "id": "f7448:m2"}
{"signature": "def convert_to_incoming_packet(reqid, message):", "body": "raw = OutgoingPacket.create_packet(reqid, message).raw<EOL>dispatcher = MockDispatcher()<EOL>dispatcher.prepare_with_raw(raw)<EOL>packet = IncomingPacket()<EOL>packet.read(dispatcher)<EOL>packet.data = packet.data<EOL>return packet<EOL>", "docstring": "Convert (reqid, message) pair to IncomingPacket object", "id": "f7448:m0"}
{"signature": "def prepare_eagain(self):", "body": "self.eagain_test = True<EOL>", "docstring": "prepare so that EAGAIN error is raised when recv() is called", "id": "f7448:c0:m5"}
{"signature": "def send(self, buf):", "body": "return len(buf)<EOL>", "docstring": "mock sends the content of a given buffer", "id": "f7448:c0:m8"}
{"signature": "def prepare_fatal(self):", "body": "self.fatal_error_test = True<EOL>", "docstring": "prepare so that RuntimeError is raised when recv() is called", "id": "f7448:c0:m6"}
{"signature": "def prepare_header_only(self):", "body": "pkt = get_mock_requst_packets(is_message=False)[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>self.to_be_received = pkt.header<EOL>", "docstring": "a packet with just a header (incomplete packet) will be prepared in the recv buffer", "id": "f7448:c0:m3"}
{"signature": "def recv(self, numbytes):", "body": "if self.fatal_error_test:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>elif self.eagain_test:<EOL><INDENT>raise socket.error(socket.errno.EAGAIN, \"<STR_LIT>\")<EOL><DEDENT>ret = self.to_be_received[:numbytes]<EOL>self.to_be_received = self.to_be_received[numbytes:]<EOL>return ret<EOL>", "docstring": "reads ``numbytes`` from the recv buffer", "id": "f7448:c0:m7"}
{"signature": "def get_a_sample_pplan():", "body": "spout_1 = mock_protobuf.get_mock_spout(component=mock_protobuf.get_mock_component(name=\"<STR_LIT>\"))<EOL>bolt_1 = mock_protobuf.get_mock_bolt(component=mock_protobuf.get_mock_component(name=\"<STR_LIT>\"))<EOL>bolt_2 = mock_protobuf.get_mock_bolt(component=mock_protobuf.get_mock_component(name=\"<STR_LIT>\"))<EOL>topology = mock_protobuf.get_mock_topology(spouts=[spout_1], bolts=[bolt_1, bolt_2])<EOL>instance_ids = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>task_ids = [<NUM_LIT:100>, <NUM_LIT:200>, <NUM_LIT>]<EOL>comp_indexes = [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>]<EOL>comp_names = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>instances = []<EOL>for i_id, t_id, c_i, c_name in zip(instance_ids, task_ids, comp_indexes, comp_names):<EOL><INDENT>info = mock_protobuf.get_mock_instance_info(task_id=t_id,<EOL>component_index=c_i,<EOL>component_name=c_name)<EOL>instance = mock_protobuf.get_mock_instance(instance_id=i_id, info=info)<EOL>instances.append(instance)<EOL><DEDENT>pplan = mock_protobuf.get_mock_pplan(topology=topology, instances=instances)<EOL>keys = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>zipped = zip(instance_ids, task_ids, comp_indexes, comp_names)<EOL>return pplan, [dict(zip(keys, z)) for z in zipped]<EOL>", "docstring": "Returns a legitimate looking physical plan\n\n    This topology has 1 spout and 2 bolts. Currently no input/output streams.\n    There is only one stream manager.\n\n    [Instance 1: spout1]\n      - instance_id = \"instance1\"\n      - task_id = 100\n      - component_index = 0\n      - component_name = \"spout1\"\n\n    [Instance 2: bolt1]\n      - instance_id = \"instance2\"\n      - task_id = 200\n      - component_index = 0\n      - component_name = \"bolt1\"\n\n    [instance 3: bolt2]\n      - instance_id = \"instance3\"\n      - task_id = 300\n      - component_index = 0\n      - component_name = \"bolt2\"\n\n    :returns: PhysicalPlan message and a list of dictionaries for each instance containing\n              (instance_id, task_id, comp_index, comp_name)", "id": "f7453:m0"}
{"signature": "def make_data_tuple_from_list(lst, serializer=PythonSerializer()):", "body": "data_tuple = tuple_pb2.HeronDataTuple()<EOL>data_tuple.key = <NUM_LIT:0><EOL>tuple_size_in_bytes = <NUM_LIT:0><EOL>for obj in lst:<EOL><INDENT>serialized = serializer.serialize(obj)<EOL>data_tuple.values.append(serialized)<EOL>tuple_size_in_bytes += len(serialized)<EOL><DEDENT>return data_tuple, tuple_size_in_bytes<EOL>", "docstring": "Make HeronDataTuple from a list of objects", "id": "f7453:m1"}
{"signature": "def get_mock_bolt(component=get_mock_component(), inputs=[], outputs=[]):", "body": "bolt = topology_pb2.Bolt()<EOL>bolt.comp.CopyFrom(component)<EOL>for i in inputs:<EOL><INDENT>added = bolt.inputs.add()<EOL>added.CopyFrom(i)<EOL><DEDENT>for o in outputs:<EOL><INDENT>added = bolt.outputs.add()<EOL>added.CopyFrom(o)<EOL><DEDENT>return bolt<EOL>", "docstring": "Returns a mock protobuf Bolt object from topology_pb2", "id": "f7463:m3"}
{"signature": "def get_mock_config(config_dict=None):", "body": "if config_dict is None:<EOL><INDENT>return topology_pb2.Config()<EOL><DEDENT>proto_config = topology_pb2.Config()<EOL>config_serializer = PythonSerializer()<EOL>assert isinstance(config_dict, dict)<EOL>for key, value in config_dict.items():<EOL><INDENT>if isinstance(value, bool):<EOL><INDENT>kvs = proto_config.kvs.add()<EOL>kvs.key = key<EOL>kvs.value = \"<STR_LIT:true>\" if value else \"<STR_LIT:false>\"<EOL>kvs.type = topology_pb2.ConfigValueType.Value(\"<STR_LIT>\")<EOL><DEDENT>elif isinstance(value, (str, int, float)):<EOL><INDENT>kvs = proto_config.kvs.add()<EOL>kvs.key = key<EOL>kvs.value = str(value)<EOL>kvs.type = topology_pb2.ConfigValueType.Value(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>kvs = proto_config.kvs.add()<EOL>kvs.key = key<EOL>kvs.serialized_value = config_serializer.serialize(value)<EOL>kvs.type = topology_pb2.ConfigValueType.Value(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return proto_config<EOL>", "docstring": "Returns a protobuf Config object from topology_pb2", "id": "f7463:m0"}
{"signature": "def get_pplan_builder_and_typename():", "body": "<EOL>builder = lambda: physical_plan_pb2.PhysicalPlan()<EOL>typename = builder().DESCRIPTOR.full_name<EOL>return builder, typename<EOL>", "docstring": "Returns a PhysicalPlan builder callable and typename 'PhysicalPlan", "id": "f7463:m13"}
{"signature": "def get_mock_assignment_message(pplan=get_mock_pplan()):", "body": "<EOL>mock_message = stmgr_pb2.NewInstanceAssignmentMessage()<EOL>mock_message.pplan.MergeFrom(pplan)<EOL>return mock_message<EOL>", "docstring": "Returns a mock protobuf NewInstanceAssignmentMessage object from stmgr_pb2", "id": "f7463:m11"}
{"signature": "def get_mock_component(name=\"<STR_LIT>\",<EOL>config=get_mock_config(),<EOL>python_cls=\"<STR_LIT>\"):", "body": "component = topology_pb2.Component()<EOL>component.name = name<EOL>component.spec = topology_pb2.ComponentObjectSpec.Value(\"<STR_LIT>\")<EOL>component.class_name = python_cls<EOL>component.config.CopyFrom(config)<EOL>return component<EOL>", "docstring": "Returns a mock protobuf Component object from topology_pb2", "id": "f7463:m1"}
{"signature": "def get_mock_stream_id(id=\"<STR_LIT>\", component_name=\"<STR_LIT>\"):", "body": "stream_id = topology_pb2.StreamId()<EOL>stream_id.id = id<EOL>stream_id.component_name = component_name<EOL>return stream_id<EOL>", "docstring": "Returns a mock protobuf StreamId from topology_pb2", "id": "f7463:m2"}
{"signature": "def get_many_mock_pplans():", "body": "pplans_lst = []<EOL>for i in range(<NUM_LIT:10>):<EOL><INDENT>_id = \"<STR_LIT>\" + str(i)<EOL>pplan = get_mock_pplan(stmgrs=[get_mock_stmgr(id=_id)])<EOL>pplans_lst.append(pplan)<EOL><DEDENT>return pplans_lst<EOL>", "docstring": "Returns a list of 10 PhysicalPlan objects, differing just by stream manager id", "id": "f7463:m14"}
{"signature": "def get_mock_spout(component=get_mock_component(), outputs=[]):", "body": "spout = topology_pb2.Spout()<EOL>spout.comp.CopyFrom(component)<EOL>for out in outputs:<EOL><INDENT>added = spout.outputs.add()<EOL>added.CopyFrom(out)<EOL><DEDENT>return spout<EOL>", "docstring": "Returns a mock protobuf Spout object from topology_pb2", "id": "f7463:m4"}
{"signature": "def log(self, message, level=None):", "body": "if level is None:<EOL><INDENT>_log_level = logging.INFO<EOL><DEDENT>else:<EOL><INDENT>if level == \"<STR_LIT>\" or level == \"<STR_LIT>\":<EOL><INDENT>_log_level = logging.DEBUG<EOL><DEDENT>elif level == \"<STR_LIT:info>\":<EOL><INDENT>_log_level = logging.INFO<EOL><DEDENT>elif level == \"<STR_LIT>\":<EOL><INDENT>_log_level = logging.WARNING<EOL><DEDENT>elif level == \"<STR_LIT:error>\":<EOL><INDENT>_log_level = logging.ERROR<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % str(level))<EOL><DEDENT><DEDENT>self.logger.log(_log_level, message)<EOL>", "docstring": "Log message, optionally providing a logging level\n\n        It is compatible with StreamParse API.\n\n        :type message: str\n        :param message: the log message to send\n        :type level: str\n        :param level: the logging level,\n                      one of: trace (=debug), debug, info, warn or error (default: info)", "id": "f7466:c0:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def invoke_deactivate(self):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Deactivate the instance", "id": "f7466:c0:m15"}
{"signature": "@abstractmethod<EOL><INDENT>def process_incoming_tuples(self):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Should be called when a tuple was buffered into in_stream", "id": "f7466:c0:m13"}
{"signature": "@abstractmethod<EOL><INDENT>def stop_component(self):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Do the basic clean for Heron Instance\n\n        Note that this method is not guaranteed to be invoked", "id": "f7466:c0:m12"}
{"signature": "def emit(self, tup, stream=Stream.DEFAULT_STREAM_ID,<EOL>anchors=None, direct_task=None, need_task_ids=False):", "body": "<EOL>self.pplan_helper.check_output_schema(stream, tup)<EOL>custom_target_task_ids = self.pplan_helper.choose_tasks_for_custom_grouping(stream, tup)<EOL>self.pplan_helper.context.invoke_hook_emit(tup, stream, None)<EOL>data_tuple = tuple_pb2.HeronDataTuple()<EOL>data_tuple.key = <NUM_LIT:0><EOL>if direct_task is not None:<EOL><INDENT>if not isinstance(direct_task, int):<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>% str(type(direct_task)))<EOL><DEDENT>data_tuple.dest_task_ids.append(direct_task)<EOL><DEDENT>elif custom_target_task_ids is not None:<EOL><INDENT>for task_id in custom_target_task_ids:<EOL><INDENT>data_tuple.dest_task_ids.append(task_id)<EOL><DEDENT><DEDENT>if anchors is not None:<EOL><INDENT>merged_roots = set()<EOL>for tup in [t for t in anchors if isinstance(t, HeronTuple) and t.roots is not None]:<EOL><INDENT>merged_roots.update(tup.roots)<EOL><DEDENT>for rt in merged_roots:<EOL><INDENT>to_add = data_tuple.roots.add()<EOL>to_add.CopyFrom(rt)<EOL><DEDENT><DEDENT>tuple_size_in_bytes = <NUM_LIT:0><EOL>start_time = time.time()<EOL>for obj in tup:<EOL><INDENT>serialized = self.serializer.serialize(obj)<EOL>data_tuple.values.append(serialized)<EOL>tuple_size_in_bytes += len(serialized)<EOL><DEDENT>serialize_latency_ns = (time.time() - start_time) * system_constants.SEC_TO_NS<EOL>self.bolt_metrics.serialize_data_tuple(stream, serialize_latency_ns)<EOL>super(BoltInstance, self).admit_data_tuple(stream_id=stream, data_tuple=data_tuple,<EOL>tuple_size_in_bytes=tuple_size_in_bytes)<EOL>self.bolt_metrics.update_emit_count(stream)<EOL>if need_task_ids:<EOL><INDENT>sent_task_ids = custom_target_task_ids or []<EOL>if direct_task is not None:<EOL><INDENT>sent_task_ids.append(direct_task)<EOL><DEDENT>return sent_task_ids<EOL><DEDENT>", "docstring": "Emits a new tuple from this Bolt\n\n        It is compatible with StreamParse API.\n\n        :type tup: list or tuple\n        :param tup: the new output Tuple to send from this bolt,\n                    should only contain only serializable data.\n        :type stream: str\n        :param stream: the ID of the stream to emit this Tuple to.\n                       Leave empty to emit to the default stream.\n        :type anchors: list\n        :param anchors: a list of HeronTuples to which the emitted Tuples should be anchored.\n        :type direct_task: int\n        :param direct_task: the task to send the Tupel to if performing a direct emit.\n        :type need_task_ids: bool\n        :param need_task_ids: indicate whether or not you would like the task IDs the Tuple was emitted.", "id": "f7467:c0:m5"}
{"signature": "def ack(self, tup):", "body": "if not isinstance(tup, HeronTuple):<EOL><INDENT>Log.error(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if self.acking_enabled:<EOL><INDENT>ack_tuple = tuple_pb2.AckTuple()<EOL>ack_tuple.ackedtuple = int(tup.id)<EOL>tuple_size_in_bytes = <NUM_LIT:0><EOL>for rt in tup.roots:<EOL><INDENT>to_add = ack_tuple.roots.add()<EOL>to_add.CopyFrom(rt)<EOL>tuple_size_in_bytes += rt.ByteSize()<EOL><DEDENT>super(BoltInstance, self).admit_control_tuple(ack_tuple, tuple_size_in_bytes, True)<EOL><DEDENT>process_latency_ns = (time.time() - tup.creation_time) * system_constants.SEC_TO_NS<EOL>self.pplan_helper.context.invoke_hook_bolt_ack(tup, process_latency_ns)<EOL>self.bolt_metrics.acked_tuple(tup.stream, tup.component, process_latency_ns)<EOL>", "docstring": "Indicate that processing of a Tuple has succeeded\n\n        It is compatible with StreamParse API.", "id": "f7467:c0:m10"}
{"signature": "def process_incoming_tuples(self):", "body": "<EOL>if self.output_helper.is_out_queue_available():<EOL><INDENT>self._read_tuples_and_execute()<EOL>self.output_helper.send_out_tuples()<EOL><DEDENT>else:<EOL><INDENT>self.bolt_metrics.update_out_queue_full_count()<EOL><DEDENT>", "docstring": "Should be called when tuple was buffered into in_stream\n\n        This method is equivalent to ``addBoltTasks()`` but\n        is designed for event-driven single-thread bolt.", "id": "f7467:c0:m6"}
{"signature": "def fail(self, tup):", "body": "if not isinstance(tup, HeronTuple):<EOL><INDENT>Log.error(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if self.acking_enabled:<EOL><INDENT>fail_tuple = tuple_pb2.AckTuple()<EOL>fail_tuple.ackedtuple = int(tup.id)<EOL>tuple_size_in_bytes = <NUM_LIT:0><EOL>for rt in tup.roots:<EOL><INDENT>to_add = fail_tuple.roots.add()<EOL>to_add.CopyFrom(rt)<EOL>tuple_size_in_bytes += rt.ByteSize()<EOL><DEDENT>super(BoltInstance, self).admit_control_tuple(fail_tuple, tuple_size_in_bytes, False)<EOL><DEDENT>fail_latency_ns = (time.time() - tup.creation_time) * system_constants.SEC_TO_NS<EOL>self.pplan_helper.context.invoke_hook_bolt_fail(tup, fail_latency_ns)<EOL>self.bolt_metrics.failed_tuple(tup.stream, tup.component, fail_latency_ns)<EOL>", "docstring": "Indicate that processing of a Tuple has failed\n\n        It is compatible with StreamParse API.", "id": "f7467:c0:m11"}
{"signature": "def send_request(self, request, context, response_type, timeout_sec):", "body": "<EOL>reqid = REQID.generate()<EOL>Log.debug(\"<STR_LIT>\" % (self._get_classname(), str(reqid)))<EOL>self.response_message_map[reqid] = response_type<EOL>self.context_map[reqid] = context<EOL>if timeout_sec > <NUM_LIT:0>:<EOL><INDENT>def timeout_task():<EOL><INDENT>self.handle_timeout(reqid)<EOL><DEDENT>self.looper.register_timer_task_in_sec(timeout_task, timeout_sec)<EOL><DEDENT>outgoing_pkt = OutgoingPacket.create_packet(reqid, request)<EOL>self._send_packet(outgoing_pkt)<EOL>", "docstring": "Sends a request message (REQID is non-zero)", "id": "f7471:c0:m12"}
{"signature": "def register_on_message(self, msg_builder):", "body": "message = msg_builder()<EOL>Log.debug(\"<STR_LIT>\" % message.DESCRIPTOR.full_name)<EOL>self.registered_message_map[message.DESCRIPTOR.full_name] = msg_builder<EOL>", "docstring": "Registers protobuf message builders that this client wants to receive\n\n        :param msg_builder: callable to create a protobuf message that this client wants to receive", "id": "f7471:c0:m11"}
{"signature": "@abstractmethod<EOL><INDENT>def on_incoming_message(self, message):<DEDENT>", "body": "pass<EOL>", "docstring": "Called when the client receives a message\n\n        Should be implemented by a subclass.", "id": "f7471:c0:m21"}
{"signature": "def send_message(self, message):", "body": "Log.debug(\"<STR_LIT>\" % self._get_classname())<EOL>outgoing_pkt = OutgoingPacket.create_packet(REQID.generate_zero(), message)<EOL>self._send_packet(outgoing_pkt)<EOL>", "docstring": "Sends a message (REQID is zero)", "id": "f7471:c0:m13"}
{"signature": "def _run_once(self):", "body": "try:<EOL><INDENT>self.do_wait()<EOL>self._execute_wakeup_tasks()<EOL>self._trigger_timers()<EOL><DEDENT>except Exception as e:<EOL><INDENT>Log.error(\"<STR_LIT>\" + str(e))<EOL>Log.error(traceback.format_exc())<EOL>self.should_exit = True<EOL><DEDENT>", "docstring": "Run once, should be called only from loop()", "id": "f7472:c0:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def wake_up(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Wakes up do_wait() operation, should be implemented by a subclass\n\n        Note that this method should be implemented in a thread-safe way.", "id": "f7472:c0:m5"}
{"signature": "def register_timer_task_in_sec(self, task, second):", "body": "<EOL>second_in_float = float(second)<EOL>expiration = time.time() + second_in_float<EOL>heappush(self.timer_tasks, (expiration, task))<EOL>", "docstring": "Registers a new timer task\n\n        :param task: function to be run at a specified second from now\n        :param second: how many seconds to wait before the timer is triggered", "id": "f7472:c0:m8"}
{"signature": "def add_wakeup_task(self, task):", "body": "self.wakeup_tasks.append(task)<EOL>self.wake_up()<EOL>", "docstring": "Add a wakeup task\n\n        :param task: function to be run as a wakeup task", "id": "f7472:c0:m6"}
{"signature": "def _trigger_timers(self):", "body": "current = time.time()<EOL>while len(self.timer_tasks) > <NUM_LIT:0> and (self.timer_tasks[<NUM_LIT:0>][<NUM_LIT:0>] - current <= <NUM_LIT:0>):<EOL><INDENT>task = heappop(self.timer_tasks)[<NUM_LIT:1>]<EOL>task()<EOL><DEDENT>", "docstring": "Triggers expired timers", "id": "f7472:c0:m12"}
{"signature": "@abstractmethod<EOL><INDENT>def do_wait(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Blocking operation, should be implemented by a subclass", "id": "f7472:c0:m4"}
{"signature": "def on_exit(self):", "body": "Log.info(\"<STR_LIT>\")<EOL>for task in self.exit_tasks:<EOL><INDENT>task()<EOL><DEDENT>", "docstring": "Called when exiting", "id": "f7472:c0:m3"}
{"signature": "def exit_loop(self):", "body": "self.should_exit = True<EOL>self.wake_up()<EOL>", "docstring": "Exits the loop", "id": "f7472:c0:m9"}
{"signature": "def _handle_initiate_stateful_checkpoint(self, ckptmsg):", "body": "self.heron_instance_cls.handle_initiate_stateful_checkpoint(ckptmsg)<EOL>", "docstring": "Called when new InitiateStatefulCheckpoint arrives", "id": "f7473:c0:m9"}
{"signature": "def _handle_new_tuples_2(self, hts2):", "body": "self.heron_instance_cls.handle_new_tuple_set_2(hts2)<EOL>", "docstring": "Called when new HeronTupleSet2 arrives", "id": "f7473:c0:m8"}
{"signature": "def _handle_register_response(self, response):", "body": "if response.status.status != common_pb2.StatusCode.Value(\"<STR_LIT:OK>\"):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>Log.info(\"<STR_LIT>\")<EOL>self.is_registered = True<EOL>if response.HasField(\"<STR_LIT>\"):<EOL><INDENT>Log.info(\"<STR_LIT>\")<EOL>self._handle_assignment_message(response.pplan)<EOL><DEDENT>else:<EOL><INDENT>Log.debug(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Called when a register response (RegisterInstanceResponse) arrives", "id": "f7473:c0:m7"}
{"signature": "@staticmethod<EOL><INDENT>def generate_zero():<DEDENT>", "body": "data_bytes = bytearray(<NUM_LIT:0> for i in range(REQID.REQID_SIZE))<EOL>return REQID(data_bytes)<EOL>", "docstring": "Generates a zero REQID for message", "id": "f7474:c3:m2"}
{"signature": "def get_datasize(self):", "body": "if not self.is_header_read:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>return HeronProtocol.unpack_int(self.header)<EOL>", "docstring": "Returns the datasize of the packet\n\n        :returns: (int) datasize of the packet, or -1 if header is incomplete", "id": "f7474:c2:m3"}
{"signature": "def pack(self):", "body": "return self.bytes<EOL>", "docstring": "Packs this REQID to bytestring", "id": "f7474:c3:m3"}
{"signature": "@staticmethod<EOL><INDENT>def pack_int(i):<DEDENT>", "body": "return struct.pack(HeronProtocol.INT_PACK_FMT, i)<EOL>", "docstring": "Packs int to bytestring", "id": "f7474:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def get_size_to_pack_string(string):<DEDENT>", "body": "return <NUM_LIT:4> + len(string)<EOL>", "docstring": "Get size to pack string, four byte used for specifying length of the string", "id": "f7474:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def create_packet(header, data):<DEDENT>", "body": "packet = IncomingPacket()<EOL>packet.header = header<EOL>packet.data = data<EOL>if len(header) == HeronProtocol.HEADER_SIZE:<EOL><INDENT>packet.is_header_read = True<EOL>if len(data) == packet.get_datasize():<EOL><INDENT>packet.is_complete = True<EOL><DEDENT><DEDENT>return packet<EOL>", "docstring": "Creates an IncomingPacket object from header and data\n\n        This method is for testing purposes", "id": "f7474:c2:m1"}
{"signature": "@staticmethod<EOL><INDENT>def generate():<DEDENT>", "body": "data_bytes = bytearray(random.getrandbits(<NUM_LIT:8>) for i in range(REQID.REQID_SIZE))<EOL>return REQID(data_bytes)<EOL>", "docstring": "Generates a random REQID for request", "id": "f7474:c3:m1"}
{"signature": "def send(self, dispatcher):", "body": "if self.sent_complete:<EOL><INDENT>return<EOL><DEDENT>sent = dispatcher.send(self.to_send)<EOL>self.to_send = self.to_send[sent:]<EOL>", "docstring": "Sends this outgoing packet to dispatcher's socket", "id": "f7474:c1:m4"}
{"signature": "@staticmethod<EOL><INDENT>def create_packet(reqid, message):<DEDENT>", "body": "assert message.IsInitialized()<EOL>packet = '<STR_LIT>'<EOL>typename = message.DESCRIPTOR.full_name<EOL>datasize = HeronProtocol.get_size_to_pack_string(typename) +REQID.REQID_SIZE + HeronProtocol.get_size_to_pack_message(message)<EOL>packet += HeronProtocol.pack_int(datasize)<EOL>packet += HeronProtocol.pack_int(len(typename))<EOL>packet += typename<EOL>packet += reqid.pack()<EOL>packet += HeronProtocol.pack_int(message.ByteSize())<EOL>packet += message.SerializeToString()<EOL>return OutgoingPacket(packet)<EOL>", "docstring": "Creates Outgoing Packet from a given reqid and message\n\n        :param reqid: REQID object\n        :param message: protocol buffer object", "id": "f7474:c1:m2"}
{"signature": "@property<EOL><INDENT>def sent_complete(self):<DEDENT>", "body": "return len(self.to_send) == <NUM_LIT:0><EOL>", "docstring": "Indicates whether this packet is successfully sent", "id": "f7474:c1:m3"}
{"signature": "def is_zero(self):", "body": "return self == REQID.generate_zero()<EOL>", "docstring": "Checks if this REQID is zero", "id": "f7474:c3:m4"}
{"signature": "def __init__(self, socket_map):", "body": "super(GatewayLooper, self).__init__()<EOL>self.sock_map = socket_map<EOL>self.pipe_r, self.pipe_w = os.pipe()<EOL>self.started = time.time()<EOL>Log.debug(\"<STR_LIT>\" + str(time.asctime()))<EOL>", "docstring": "Initializes a GatewayLooper instance\n\n        :param socket_map: socket map used for asyncore.dispatcher", "id": "f7475:c0:m0"}
{"signature": "def poll(self, timeout=<NUM_LIT:0.0>):", "body": "if self.sock_map is None:<EOL><INDENT>Log.warning(\"<STR_LIT>\")<EOL><DEDENT>readable_lst = []<EOL>writable_lst = []<EOL>error_lst = []<EOL>if self.sock_map is not None:<EOL><INDENT>for fd, obj in self.sock_map.items():<EOL><INDENT>is_r = obj.readable()<EOL>is_w = obj.writable()<EOL>if is_r:<EOL><INDENT>readable_lst.append(fd)<EOL><DEDENT>if is_w and not obj.accepting:<EOL><INDENT>writable_lst.append(fd)<EOL><DEDENT>if is_r or is_w:<EOL><INDENT>error_lst.append(fd)<EOL><DEDENT><DEDENT><DEDENT>readable_lst.append(self.pipe_r)<EOL>Log.debug(\"<STR_LIT>\" + str(timeout) + \"<STR_LIT>\" + str(self.sock_map))<EOL>try:<EOL><INDENT>readable_lst, writable_lst, error_lst =select.select(readable_lst, writable_lst, error_lst, timeout)<EOL><DEDENT>except select.error as err:<EOL><INDENT>Log.debug(\"<STR_LIT>\" + str(err))<EOL>if err.args[<NUM_LIT:0>] != errno.EINTR:<EOL><INDENT>raise<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT><DEDENT>Log.debug(\"<STR_LIT>\" + str(readable_lst) +<EOL>\"<STR_LIT>\" + str(writable_lst) + \"<STR_LIT>\" + str(error_lst))<EOL>if self.pipe_r in readable_lst:<EOL><INDENT>Log.debug(\"<STR_LIT>\")<EOL>os.read(self.pipe_r, <NUM_LIT>)<EOL>readable_lst.remove(self.pipe_r)<EOL><DEDENT>if self.sock_map is not None:<EOL><INDENT>for fd in readable_lst:<EOL><INDENT>obj = self.sock_map.get(fd)<EOL>if obj is None:<EOL><INDENT>continue<EOL><DEDENT>asyncore.read(obj)<EOL><DEDENT>for fd in writable_lst:<EOL><INDENT>obj = self.sock_map.get(fd)<EOL>if obj is None:<EOL><INDENT>continue<EOL><DEDENT>asyncore.write(obj)<EOL><DEDENT>for fd in error_lst:<EOL><INDENT>obj = self.sock_map.get(fd)<EOL>if obj is None:<EOL><INDENT>continue<EOL><DEDENT>asyncore._exception(obj)<EOL><DEDENT><DEDENT>", "docstring": "Modified version of poll() from asyncore module", "id": "f7475:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def make_tuple(stream, tuple_key, values, roots=None):<DEDENT>", "body": "component_name = stream.component_name<EOL>stream_id = stream.id<EOL>gen_task = roots[<NUM_LIT:0>].taskid if roots is not None and len(roots) > <NUM_LIT:0> else None<EOL>return HeronTuple(id=str(tuple_key), component=component_name, stream=stream_id,<EOL>task=gen_task, values=values, creation_time=time.time(), roots=roots)<EOL>", "docstring": "Creates a HeronTuple\n\n        :param stream: protobuf message ``StreamId``\n        :param tuple_key: tuple id\n        :param values: a list of values\n        :param roots: a list of protobuf message ``RootId``", "id": "f7476:c1:m0"}
{"signature": "def prepare(self, context):", "body": "for stream_id, targets in self.targets.items():<EOL><INDENT>for target in targets:<EOL><INDENT>target.prepare(context, stream_id)<EOL><DEDENT><DEDENT>", "docstring": "Prepares the custom grouping for this component", "id": "f7478:c0:m2"}
{"signature": "def choose_tasks(self, stream_id, values):", "body": "if stream_id not in self.targets:<EOL><INDENT>return []<EOL><DEDENT>ret = []<EOL>for target in self.targets[stream_id]:<EOL><INDENT>ret.extend(target.choose_tasks(values))<EOL><DEDENT>return ret<EOL>", "docstring": "Choose tasks for a given stream_id and values and Returns a list of target tasks", "id": "f7478:c0:m3"}
{"signature": "def add_ckpt_state(self, ckpt_id, ckpt_state):", "body": "<EOL>self._flush_remaining()<EOL>msg = ckptmgr_pb2.StoreInstanceStateCheckpoint()<EOL>istate = ckptmgr_pb2.InstanceStateCheckpoint()<EOL>istate.checkpoint_id = ckpt_id<EOL>istate.state = ckpt_state<EOL>msg.state.CopyFrom(istate)<EOL>self._push_tuple_to_stream(msg)<EOL>", "docstring": "Add the checkpoint state message to be sent back the stmgr\n\n        :param ckpt_id: The id of the checkpoint\n        :ckpt_state: The checkpoint state", "id": "f7479:c0:m4"}
{"signature": "def send_out_tuples(self):", "body": "self._flush_remaining()<EOL>", "docstring": "Sends out currently buffered tuples into the Out-Stream", "id": "f7479:c0:m1"}
{"signature": "def register_capacity(self, capacity):", "body": "self.capacity = capacity<EOL>", "docstring": "Registers the capacity of this communicator\n\n        By default, the capacity of HeronCommunicator is set to be ``sys.maxsize``", "id": "f7481:c0:m1"}
{"signature": "def get_size(self):", "body": "return self._buffer.qsize()<EOL>", "docstring": "Returns the size of the buffer", "id": "f7481:c0:m3"}
{"signature": "def is_empty(self):", "body": "return self._buffer.empty()<EOL>", "docstring": "Returns whether the buffer is empty", "id": "f7481:c0:m4"}
{"signature": "def prepare_custom_grouping(self, context):", "body": "self.custom_grouper.prepare(context)<EOL>", "docstring": "Prepares for custom grouping for this component\n\n        :param context: Topology context", "id": "f7482:c0:m17"}
{"signature": "def is_topology_killed(self):", "body": "return self.pplan.topology.state == topology_pb2.TopologyState.Value(\"<STR_LIT>\")<EOL>", "docstring": "Checks whether topology is already killed", "id": "f7482:c0:m8"}
{"signature": "def get_my_spout(self):", "body": "if self.is_spout:<EOL><INDENT>return self._my_spbl<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns spout instance, or ``None`` if bolt is assigned", "id": "f7482:c0:m3"}
{"signature": "def get_topology_state(self):", "body": "return self.pplan.topology.state<EOL>", "docstring": "Returns the current topology state", "id": "f7482:c0:m5"}
{"signature": "def get_my_bolt(self):", "body": "if self.is_spout:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return self._my_spbl<EOL><DEDENT>", "docstring": "Returns bolt instance, or ``None`` if spout is assigned", "id": "f7482:c0:m4"}
{"signature": "def get_topology_config(self):", "body": "if self.pplan.topology.HasField(\"<STR_LIT>\"):<EOL><INDENT>return self._get_dict_from_config(self.pplan.topology.topology_config)<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT>", "docstring": "Returns the topology config", "id": "f7482:c0:m9"}
{"signature": "def update_received_packet(self, received_pkt_size_bytes):", "body": "self.update_count(self.RECEIVED_PKT_COUNT)<EOL>self.update_count(self.RECEIVED_PKT_SIZE, incr_by=received_pkt_size_bytes)<EOL>", "docstring": "Update received packet metrics", "id": "f7483:c1:m1"}
{"signature": "def update_pending_tuples_count(self, count):", "body": "self.update_reduced_metric(self.PENDING_ACKED_COUNT, count)<EOL>", "docstring": "Apply updates to the pending tuples count", "id": "f7483:c3:m5"}
{"signature": "def failed_tuple(self, stream_id, fail_latency_ns):", "body": "self.update_count(self.FAIL_COUNT, key=stream_id)<EOL>self.update_reduced_metric(self.FAIL_LATENCY, fail_latency_ns, key=stream_id)<EOL>", "docstring": "Apply updates to the fail metrics", "id": "f7483:c3:m4"}
{"signature": "def update_emit_count(self, stream_id):", "body": "self.update_count(self.EMIT_COUNT, key=stream_id)<EOL>", "docstring": "Apply update to emit count", "id": "f7483:c2:m3"}
{"signature": "def serialize_data_tuple(self, stream_id, latency_in_ns):", "body": "self.update_count(self.TUPLE_SERIALIZATION_TIME_NS, incr_by=latency_in_ns, key=stream_id)<EOL>", "docstring": "Apply update to serialization metrics", "id": "f7483:c2:m4"}
{"signature": "def acked_tuple(self, stream_id, source_component, latency_in_ns):", "body": "self.update_count(self.ACK_COUNT, key=stream_id)<EOL>self.update_reduced_metric(self.PROCESS_LATENCY, latency_in_ns, stream_id)<EOL>global_stream_id = source_component + '<STR_LIT:/>' + stream_id<EOL>self.update_count(self.ACK_COUNT, key=global_stream_id)<EOL>self.update_reduced_metric(self.PROCESS_LATENCY, latency_in_ns, global_stream_id)<EOL>", "docstring": "Apply updates to the ack metrics", "id": "f7483:c4:m4"}
{"signature": "def register_metrics(self, context):", "body": "sys_config = system_config.get_sys_config()<EOL>interval = float(sys_config[constants.HERON_METRICS_EXPORT_INTERVAL_SEC])<EOL>collector = context.get_metrics_collector()<EOL>super(ComponentMetrics, self).register_metrics(collector, interval)<EOL>", "docstring": "Registers metrics to context\n\n        :param context: Topology Context", "id": "f7483:c2:m1"}
{"signature": "def failed_tuple(self, stream_id, source_component, latency_in_ns):", "body": "self.update_count(self.FAIL_COUNT, key=stream_id)<EOL>self.update_reduced_metric(self.FAIL_LATENCY, latency_in_ns, stream_id)<EOL>global_stream_id = source_component + '<STR_LIT:/>' + stream_id<EOL>self.update_count(self.FAIL_COUNT, key=global_stream_id)<EOL>self.update_reduced_metric(self.FAIL_LATENCY, latency_in_ns, global_stream_id)<EOL>", "docstring": "Apply updates to the fail metrics", "id": "f7483:c4:m5"}
{"signature": "def register_metric(self, name, metric, time_bucket_in_sec):", "body": "if name in self.metrics_map:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % name)<EOL><DEDENT>Log.debug(\"<STR_LIT>\", name, str(time_bucket_in_sec))<EOL>self.metrics_map[name] = metric<EOL>if time_bucket_in_sec in self.time_bucket_in_sec_to_metrics_name:<EOL><INDENT>self.time_bucket_in_sec_to_metrics_name[time_bucket_in_sec].append(name)<EOL><DEDENT>else:<EOL><INDENT>self.time_bucket_in_sec_to_metrics_name[time_bucket_in_sec] = [name]<EOL>self._register_timer_task(time_bucket_in_sec)<EOL><DEDENT>", "docstring": "Registers a given metric\n\n        :param name: name of the metric\n        :param metric: IMetric object to be registered\n        :param time_bucket_in_sec: time interval for update to the metrics manager", "id": "f7483:c5:m1"}
{"signature": "def update_reduced_metric(self, name, value, key=None):", "body": "if name not in self.metrics:<EOL><INDENT>Log.error(\"<STR_LIT>\", name)<EOL><DEDENT>if key is None and isinstance(self.metrics[name], ReducedMetric):<EOL><INDENT>self.metrics[name].update(value)<EOL><DEDENT>elif key is not None and isinstance(self.metrics[name], MultiReducedMetric):<EOL><INDENT>self.metrics[name].update(key, value)<EOL><DEDENT>else:<EOL><INDENT>Log.error(\"<STR_LIT>\", name)<EOL><DEDENT>", "docstring": "Update the value of ReducedMetric or MultiReducedMetric\n\n        :type name: str\n        :param name: name of the registered metric to be updated.\n        :param value: specifies a value to be reduced.\n        :type key: str or None\n        :param key: specifies a key for MultiReducedMetric. Needs to be `None` for updating\n                    ReducedMetric.", "id": "f7483:c0:m3"}
{"signature": "def _init_multi_count_metrics(self, pplan_helper):", "body": "to_init = [self.metrics[i] for i in self.to_multi_init<EOL>if i in self.metrics and isinstance(self.metrics[i], MultiCountMetric)]<EOL>for out_stream in pplan_helper.get_my_spout().outputs:<EOL><INDENT>stream_id = out_stream.stream.id<EOL>for metric in to_init:<EOL><INDENT>metric.add_key(stream_id)<EOL><DEDENT><DEDENT>", "docstring": "Initializes the default values for a necessary set of MultiCountMetrics", "id": "f7483:c3:m1"}
{"signature": "def get_component_id(self):", "body": "return self.task_to_component_map.get(self.get_task_id())<EOL>", "docstring": "Property to get the component id of this component", "id": "f7486:c0:m2"}
{"signature": "def invoke_hook_emit(self, values, stream_id, out_tasks):", "body": "if len(self.task_hooks) > <NUM_LIT:0>:<EOL><INDENT>emit_info = EmitInfo(values=values, stream_id=stream_id,<EOL>task_id=self.get_task_id(), out_tasks=out_tasks)<EOL>for task_hook in self.task_hooks:<EOL><INDENT>task_hook.emit(emit_info)<EOL><DEDENT><DEDENT>", "docstring": "invoke task hooks for every time a tuple is emitted in spout/bolt\n\n        :type values: list\n        :param values: values emitted\n        :type stream_id: str\n        :param stream_id: stream id into which tuple is emitted\n        :type out_tasks: list\n        :param out_tasks: list of custom grouping target task id", "id": "f7486:c0:m17"}
{"signature": "def get_sources(self, component_id):", "body": "<EOL>StreamId = namedtuple('<STR_LIT>', '<STR_LIT>')<EOL>if component_id in self.inputs:<EOL><INDENT>ret = {}<EOL>for istream in self.inputs.get(component_id):<EOL><INDENT>key = StreamId(id=istream.stream.id, component_name=istream.stream.component_name)<EOL>ret[key] = istream.gtype<EOL><DEDENT>return ret<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns the declared inputs to specified component\n\n        :return: map <streamId namedtuple (same structure as protobuf msg) -> gtype>, or\n                 None if not found", "id": "f7486:c0:m6"}
{"signature": "def invoke_hook_cleanup(self):", "body": "for task_hook in self.task_hooks:<EOL><INDENT>task_hook.clean_up()<EOL><DEDENT>", "docstring": "invoke task hooks for just before the spout/bolt's cleanup method", "id": "f7486:c0:m16"}
{"signature": "def get_component_tasks(self, component_id):", "body": "ret = []<EOL>for task_id, comp_id in self.task_to_component_map.items():<EOL><INDENT>if comp_id == component_id:<EOL><INDENT>ret.append(task_id)<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "Returns the task ids allocated for the given component id", "id": "f7486:c0:m8"}
{"signature": "def register_metric(self, name, metric, time_bucket_in_sec):", "body": "collector = self.get_metrics_collector()<EOL>collector.register_metric(name, metric, time_bucket_in_sec)<EOL>", "docstring": "Registers a new metric to this context", "id": "f7486:c0:m5"}
{"signature": "def invoke_hook_prepare(self):", "body": "for task_hook in self.task_hooks:<EOL><INDENT>task_hook.prepare(self.get_cluster_config(), self)<EOL><DEDENT>", "docstring": "invoke task hooks for after the spout/bolt's initialize() method", "id": "f7486:c0:m15"}
{"signature": "def get_task_id(self):", "body": "return self.task_id<EOL>", "docstring": "Property to get the task id of this component", "id": "f7486:c0:m1"}
{"signature": "def invoke_hook_bolt_fail(self, heron_tuple, fail_latency_ns):", "body": "if len(self.task_hooks) > <NUM_LIT:0>:<EOL><INDENT>bolt_fail_info = BoltFailInfo(heron_tuple=heron_tuple,<EOL>failing_task_id=self.get_task_id(),<EOL>fail_latency_ms=fail_latency_ns * system_constants.NS_TO_MS)<EOL>for task_hook in self.task_hooks:<EOL><INDENT>task_hook.bolt_fail(bolt_fail_info)<EOL><DEDENT><DEDENT>", "docstring": "invoke task hooks for every time bolt fails a tuple\n\n        :type heron_tuple: HeronTuple\n        :param heron_tuple: tuple that is failed\n        :type fail_latency_ns: float\n        :param fail_latency_ns: fail latency in nano seconds", "id": "f7486:c0:m22"}
{"signature": "def handle_new_tuple_set_2(self, hts2):", "body": "if self.my_pplan_helper is None or self.my_instance is None:<EOL><INDENT>Log.error(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>hts = tuple_pb2.HeronTupleSet()<EOL>if hts2.HasField('<STR_LIT>'):<EOL><INDENT>hts.control.CopyFrom(hts2.control)<EOL><DEDENT>else:<EOL><INDENT>hdts = tuple_pb2.HeronDataTupleSet()<EOL>hdts.stream.CopyFrom(hts2.data.stream)<EOL>try:<EOL><INDENT>for trunk in hts2.data.tuples:<EOL><INDENT>added_tuple = hdts.tuples.add()<EOL>added_tuple.ParseFromString(trunk)<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>Log.exception('<STR_LIT>')<EOL><DEDENT>hts.data.CopyFrom(hdts)<EOL><DEDENT>self.in_stream.offer(hts)<EOL>if self.my_pplan_helper.is_topology_running():<EOL><INDENT>self.my_instance.py_class.process_incoming_tuples()<EOL><DEDENT><DEDENT>", "docstring": "Called when new HeronTupleSet2 arrives\n           Convert(Assemble) HeronTupleSet2(raw byte array) to HeronTupleSet\n           See more at GitHub PR #1421\n        :param tuple_msg_set: HeronTupleSet2 type", "id": "f7491:c0:m2"}
{"signature": "def yaml_config_reader(config_path):", "body": "if not config_path.endswith(\"<STR_LIT>\"):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>with open(config_path, '<STR_LIT:r>') as f:<EOL><INDENT>config = yaml.load(f)<EOL><DEDENT>return config<EOL>", "docstring": "Reads yaml config file and returns auto-typed config_dict", "id": "f7491:m1"}
{"signature": "def handle_assignment_msg(self, pplan):", "body": "new_helper = PhysicalPlanHelper(pplan, self.instance.instance_id,<EOL>self.topo_pex_file_abs_path)<EOL>if self.my_pplan_helper is not None and(self.my_pplan_helper.my_component_name != new_helper.my_component_name or<EOL>self.my_pplan_helper.my_task_id != new_helper.my_task_id):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>new_helper.set_topology_context(self.metrics_collector)<EOL>if self.my_pplan_helper is None:<EOL><INDENT>Log.info(\"<STR_LIT>\")<EOL>Log.info(\"<STR_LIT>\")<EOL>self._handle_assignment_msg(new_helper)<EOL><DEDENT>else:<EOL><INDENT>Log.info(\"<STR_LIT>\")<EOL>Log.info(\"<STR_LIT>\",<EOL>self.my_pplan_helper.get_topology_state(), new_helper.get_topology_state())<EOL>self._handle_state_change_msg(new_helper)<EOL><DEDENT>", "docstring": "Called when new NewInstanceAssignmentMessage arrives\n\n        Tells this instance to become either spout/bolt.\n\n        :param pplan: PhysicalPlan proto", "id": "f7491:c0:m8"}
{"signature": "def handle_start_stateful_processing(self, start_msg):", "body": "Log.info(\"<STR_LIT>\" % start_msg.checkpoint_id)<EOL>self.is_stateful_started = True<EOL>self.start_instance_if_possible()<EOL>", "docstring": "Called when we receive StartInstanceStatefulProcessing message\n        :param start_msg: StartInstanceStatefulProcessing type", "id": "f7491:c0:m4"}
{"signature": "def get_command_changes(self, current_commands, updated_commands):", "body": "commands_to_kill = {}<EOL>commands_to_keep = {}<EOL>commands_to_start = {}<EOL>for current_name, current_command in list(current_commands.items()):<EOL><INDENT>if current_name in list(updated_commands.keys()) andcurrent_command == updated_commands[current_name] andnot current_name.startswith('<STR_LIT>'):<EOL><INDENT>commands_to_keep[current_name] = current_command<EOL><DEDENT>else:<EOL><INDENT>commands_to_kill[current_name] = current_command<EOL><DEDENT><DEDENT>for updated_name, updated_command in list(updated_commands.items()):<EOL><INDENT>if updated_name not in list(commands_to_keep.keys()):<EOL><INDENT>commands_to_start[updated_name] = updated_command<EOL><DEDENT><DEDENT>return commands_to_kill, commands_to_keep, commands_to_start<EOL>", "docstring": "Compares the current command with updated command to return a 3-tuple of dicts,\nkeyed by command name: commands_to_kill, commands_to_keep and commands_to_start.", "id": "f7494:c2:m30"}
{"signature": "def start_process_monitor(self):", "body": "<EOL>Log.info(\"<STR_LIT>\")<EOL>while True:<EOL><INDENT>if len(self.processes_to_monitor) > <NUM_LIT:0>:<EOL><INDENT>(pid, status) = os.wait()<EOL>with self.process_lock:<EOL><INDENT>if pid in list(self.processes_to_monitor.keys()):<EOL><INDENT>old_process_info = self.processes_to_monitor[pid]<EOL>name = old_process_info.name<EOL>command = old_process_info.command<EOL>Log.info(\"<STR_LIT>\" % (name, pid, status, command))<EOL>self._wait_process_std_out_err(name, old_process_info.process)<EOL>if os.path.isfile(\"<STR_LIT>\" % pid):<EOL><INDENT>os.system(\"<STR_LIT>\" % pid)<EOL><DEDENT>if old_process_info.attempts >= self.max_runs:<EOL><INDENT>Log.info(\"<STR_LIT>\" % name)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>time.sleep(self.interval_between_runs)<EOL>p = self._run_process(name, command)<EOL>del self.processes_to_monitor[pid]<EOL>self.processes_to_monitor[p.pid] =ProcessInfo(p, name, command, old_process_info.attempts + <NUM_LIT:1>)<EOL>log_pid_for_process(name, p.pid)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Monitor all processes in processes_to_monitor dict,\n        restarting any if they fail, up to max_runs times.", "id": "f7494:c2:m28"}
{"signature": "def get_commands_to_run(self):", "body": "<EOL>if len(self.packing_plan.container_plans) == <NUM_LIT:0>:<EOL><INDENT>return {}<EOL><DEDENT>if self._get_instance_plans(self.packing_plan, self.shard) is None and self.shard != <NUM_LIT:0>:<EOL><INDENT>retval = {}<EOL>retval['<STR_LIT>'] = Command([<EOL>'<STR_LIT:%s>' % self.heron_shell_binary,<EOL>'<STR_LIT>' % self.shell_port,<EOL>'<STR_LIT>' % (self.log_dir, self.shard),<EOL>'<STR_LIT>' % self.topology_id], self.shell_env)<EOL>return retval<EOL><DEDENT>if self.shard == <NUM_LIT:0>:<EOL><INDENT>commands = self._get_tmaster_processes()<EOL><DEDENT>else:<EOL><INDENT>self._untar_if_needed()<EOL>commands = self._get_streaming_processes()<EOL><DEDENT>commands.update(self._get_heron_support_processes())<EOL>return commands<EOL>", "docstring": "Prepare either TMaster or Streaming commands according to shard.\nThe Shell command is attached to all containers. The empty container plan and non-exist\ncontainer plan are bypassed.", "id": "f7494:c2:m29"}
{"signature": "def setup(executor):", "body": "<EOL>def signal_handler(signal_to_handle, frame):<EOL><INDENT>Log.info('<STR_LIT>', signal_to_handle)<EOL>executor.stop_state_manager_watches()<EOL>sys.exit(signal_to_handle)<EOL><DEDENT>def cleanup():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>Log.info('<STR_LIT>')<EOL>for pid in list(executor.processes_to_monitor.keys()):<EOL><INDENT>os.kill(pid, signal.SIGTERM)<EOL><DEDENT>time.sleep(<NUM_LIT:5>)<EOL>os.killpg(<NUM_LIT:0>, signal.SIGTERM)<EOL><DEDENT>shardid = executor.shard<EOL>log.configure(logfile='<STR_LIT>' % shardid)<EOL>pid = os.getpid()<EOL>sid = os.getsid(pid)<EOL>if pid != sid:<EOL><INDENT>Log.info('<STR_LIT>')<EOL>os.setpgrp() <EOL><DEDENT>Log.info('<STR_LIT>')<EOL>signal.signal(signal.SIGTERM, signal_handler)<EOL>Log.info('<STR_LIT>')<EOL>atexit.register(cleanup)<EOL>", "docstring": "Set up log, process and signal handlers", "id": "f7494:m13"}
{"signature": "def _get_instance_plans(self, packing_plan, container_id):", "body": "this_container_plan = None<EOL>for container_plan in packing_plan.container_plans:<EOL><INDENT>if container_plan.id == container_id:<EOL><INDENT>this_container_plan = container_plan<EOL><DEDENT><DEDENT>if this_container_plan is None:<EOL><INDENT>return None<EOL><DEDENT>return this_container_plan.instance_plans<EOL>", "docstring": "For the given packing_plan, return the container plan with the given container_id. If protobufs\nsupported maps, we could just get the plan by id, but it doesn't so we have a collection of\ncontainers to iterate over.", "id": "f7494:c2:m20"}
{"signature": "def _get_metrics_cache_cmd(self):", "body": "metricscachemgr_main_class = '<STR_LIT>'<EOL>metricscachemgr_cmd = [os.path.join(self.heron_java_home, '<STR_LIT>'),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>self.metricscache_manager_classpath,<EOL>metricscachemgr_main_class,<EOL>\"<STR_LIT>\", '<STR_LIT>',<EOL>\"<STR_LIT>\", self.metricscache_manager_master_port,<EOL>\"<STR_LIT>\", self.metricscache_manager_stats_port,<EOL>\"<STR_LIT>\", self.topology_name,<EOL>\"<STR_LIT>\", self.topology_id,<EOL>\"<STR_LIT>\", self.heron_internals_config_file,<EOL>\"<STR_LIT>\", self.override_config_file,<EOL>\"<STR_LIT>\", self.metrics_sinks_config_file,<EOL>\"<STR_LIT>\", self.cluster,<EOL>\"<STR_LIT>\", self.role,<EOL>\"<STR_LIT>\", self.environment]<EOL>return Command(metricscachemgr_cmd, self.shell_env)<EOL>", "docstring": "get the command to start the metrics manager processes", "id": "f7494:c2:m8"}
{"signature": "def _get_tmaster_processes(self):", "body": "retval = {}<EOL>tmaster_cmd_lst = [<EOL>self.tmaster_binary,<EOL>'<STR_LIT>' % self.topology_name,<EOL>'<STR_LIT>' % self.topology_id,<EOL>'<STR_LIT>' % self.state_manager_connection,<EOL>'<STR_LIT>' % self.state_manager_root,<EOL>'<STR_LIT>' % self.master_host,<EOL>'<STR_LIT>' % str(self.master_port),<EOL>'<STR_LIT>' % str(self.tmaster_controller_port),<EOL>'<STR_LIT>' % str(self.tmaster_stats_port),<EOL>'<STR_LIT>' % self.heron_internals_config_file,<EOL>'<STR_LIT>' % self.override_config_file,<EOL>'<STR_LIT>' % self.metrics_sinks_config_file,<EOL>'<STR_LIT>' % str(self.metrics_manager_port),<EOL>'<STR_LIT>' % str(self.checkpoint_manager_port)]<EOL>tmaster_env = self.shell_env.copy() if self.shell_env is not None else {}<EOL>tmaster_cmd = Command(tmaster_cmd_lst, tmaster_env)<EOL>if os.environ.get('<STR_LIT>') is not None:<EOL><INDENT>tmaster_cmd.env.update({<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\"<EOL>})<EOL><DEDENT>retval[\"<STR_LIT>\"] = tmaster_cmd<EOL>if self.metricscache_manager_mode.lower() != \"<STR_LIT>\":<EOL><INDENT>retval[\"<STR_LIT>\"] = self._get_metrics_cache_cmd()<EOL><DEDENT>if self.health_manager_mode.lower() != \"<STR_LIT>\":<EOL><INDENT>retval[\"<STR_LIT>\"] = self._get_healthmgr_cmd()<EOL><DEDENT>retval[self.metricsmgr_ids[<NUM_LIT:0>]] = self._get_metricsmgr_cmd(<EOL>self.metricsmgr_ids[<NUM_LIT:0>],<EOL>self.metrics_sinks_config_file,<EOL>self.metrics_manager_port)<EOL>if self.is_stateful_topology:<EOL><INDENT>retval.update(self._get_ckptmgr_process())<EOL><DEDENT>return retval<EOL>", "docstring": "get the command to start the tmaster processes", "id": "f7494:c2:m10"}
{"signature": "def _get_streaming_processes(self):", "body": "retval = {}<EOL>instance_plans = self._get_instance_plans(self.packing_plan, self.shard)<EOL>instance_info = []<EOL>for instance_plan in instance_plans:<EOL><INDENT>global_task_id = instance_plan.task_id<EOL>component_index = instance_plan.component_index<EOL>component_name = instance_plan.component_name<EOL>instance_id = \"<STR_LIT>\" % (str(self.shard), component_name, global_task_id)<EOL>instance_info.append((instance_id, component_name, global_task_id, component_index))<EOL><DEDENT>stmgr_cmd_lst = [<EOL>self.stmgr_binary,<EOL>'<STR_LIT>' % self.topology_name,<EOL>'<STR_LIT>' % self.topology_id,<EOL>'<STR_LIT>' % self.topology_defn_file,<EOL>'<STR_LIT>' % self.state_manager_connection,<EOL>'<STR_LIT>' % self.state_manager_root,<EOL>'<STR_LIT>' % self.stmgr_ids[self.shard],<EOL>'<STR_LIT>' % '<STR_LIT:U+002C>'.join([x[<NUM_LIT:0>] for x in instance_info]),<EOL>'<STR_LIT>' % self.master_host,<EOL>'<STR_LIT>' % str(self.master_port),<EOL>'<STR_LIT>' % str(self.tmaster_controller_port),<EOL>'<STR_LIT>' % str(self.metrics_manager_port),<EOL>'<STR_LIT>' % str(self.shell_port),<EOL>'<STR_LIT>' % self.heron_internals_config_file,<EOL>'<STR_LIT>' % self.override_config_file,<EOL>'<STR_LIT>' % str(self.checkpoint_manager_port),<EOL>'<STR_LIT>' % self.ckptmgr_ids[self.shard],<EOL>'<STR_LIT>' % self.metricscache_manager_mode.lower()]<EOL>stmgr_env = self.shell_env.copy() if self.shell_env is not None else {}<EOL>stmgr_cmd = Command(stmgr_cmd_lst, stmgr_env)<EOL>if os.environ.get('<STR_LIT>') is not None:<EOL><INDENT>stmgr_cmd.env.update({<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\"<EOL>})<EOL><DEDENT>retval[self.stmgr_ids[self.shard]] = stmgr_cmd<EOL>retval[self.metricsmgr_ids[self.shard]] = self._get_metricsmgr_cmd(<EOL>self.metricsmgr_ids[self.shard],<EOL>self.metrics_sinks_config_file,<EOL>self.metrics_manager_port<EOL>)<EOL>if self.is_stateful_topology:<EOL><INDENT>retval.update(self._get_ckptmgr_process())<EOL><DEDENT>if self.pkg_type == '<STR_LIT>' or self.pkg_type == '<STR_LIT>':<EOL><INDENT>retval.update(self._get_java_instance_cmd(instance_info))<EOL><DEDENT>elif self.pkg_type == '<STR_LIT>':<EOL><INDENT>retval.update(self._get_python_instance_cmd(instance_info))<EOL><DEDENT>elif self.pkg_type == '<STR_LIT>':<EOL><INDENT>retval.update(self._get_cpp_instance_cmd(instance_info))<EOL><DEDENT>elif self.pkg_type == '<STR_LIT>':<EOL><INDENT>retval.update(self._get_cpp_instance_cmd(instance_info))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.pkg_type)<EOL><DEDENT>return retval<EOL>", "docstring": "Returns the processes to handle streams, including the stream-mgr and the user code containing\nthe stream logic of the topology", "id": "f7494:c2:m18"}
{"signature": "def initialize(self):", "body": "create_folders = Command('<STR_LIT>' % self.log_dir, self.shell_env)<EOL>self.run_command_or_exit(create_folders)<EOL>chmod_logs_dir = Command('<STR_LIT>' % self.log_dir, self.shell_env)<EOL>self.run_command_or_exit(chmod_logs_dir)<EOL>chmod_x_binaries = [self.tmaster_binary, self.stmgr_binary, self.heron_shell_binary]<EOL>for binary in chmod_x_binaries:<EOL><INDENT>stat_result = os.stat(binary)[stat.ST_MODE]<EOL>if not stat_result & stat.S_IXOTH:<EOL><INDENT>chmod_binary = Command('<STR_LIT>' % binary, self.shell_env)<EOL>self.run_command_or_exit(chmod_binary)<EOL><DEDENT><DEDENT>log_pid_for_process(get_heron_executor_process_name(self.shard), os.getpid())<EOL>", "docstring": "Initialize the environment. Done with a method call outside of the constructor for 2 reasons:\n1. Unit tests probably won't want/need to do this\n2. We don't initialize the logger (also something unit tests don't want) until after the\nconstructor", "id": "f7494:c2:m4"}
{"signature": "def _get_healthmgr_cmd(self):", "body": "healthmgr_main_class = '<STR_LIT>'<EOL>healthmgr_cmd = [os.path.join(self.heron_java_home, '<STR_LIT>'),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>', self.health_manager_classpath,<EOL>healthmgr_main_class,<EOL>\"<STR_LIT>\", self.cluster,<EOL>\"<STR_LIT>\", self.role,<EOL>\"<STR_LIT>\", self.environment,<EOL>\"<STR_LIT>\", self.topology_name,<EOL>\"<STR_LIT>\", self.metrics_manager_port]<EOL>return Command(healthmgr_cmd, self.shell_env)<EOL>", "docstring": "get the command to start the topology health manager processes", "id": "f7494:c2:m9"}
{"signature": "def _start_processes(self, commands):", "body": "Log.info(\"<STR_LIT>\")<EOL>processes_to_monitor = {}<EOL>for (name, command) in list(commands.items()):<EOL><INDENT>p = self._run_process(name, command)<EOL>processes_to_monitor[p.pid] = ProcessInfo(p, name, command)<EOL>log_pid_for_process(name, p.pid)<EOL><DEDENT>with self.process_lock:<EOL><INDENT>self.processes_to_monitor.update(processes_to_monitor)<EOL><DEDENT>", "docstring": "Start all commands and add them to the dict of processes to be monitored", "id": "f7494:c2:m27"}
{"signature": "def launch(self):", "body": "with self.process_lock:<EOL><INDENT>current_commands = dict(list(map((lambda process: (process.name, process.command)),<EOL>list(self.processes_to_monitor.values()))))<EOL>updated_commands = self.get_commands_to_run()<EOL>commands_to_kill, commands_to_keep, commands_to_start =self.get_command_changes(current_commands, updated_commands)<EOL>Log.info(\"<STR_LIT>\" % sorted(current_commands.keys()))<EOL>Log.info(\"<STR_LIT>\" % sorted(updated_commands.keys()))<EOL>Log.info(\"<STR_LIT>\" % sorted(commands_to_kill.keys()))<EOL>Log.info(\"<STR_LIT>\" % sorted(commands_to_keep.keys()))<EOL>Log.info(\"<STR_LIT>\" % sorted(commands_to_start.keys()))<EOL>self._kill_processes(commands_to_kill)<EOL>self._start_processes(commands_to_start)<EOL>Log.info(\"<STR_LIT>\" %<EOL>(len(commands_to_kill), len(commands_to_keep),<EOL>len(commands_to_start), len(self.processes_to_monitor)))<EOL><DEDENT>", "docstring": "Determines the commands to be run and compares them with the existing running commands.\n        Then starts new ones required and kills old ones no longer required.", "id": "f7494:c2:m31"}
{"signature": "def load_pex(path_to_pex, include_deps=True):", "body": "abs_path_to_pex = os.path.abspath(path_to_pex)<EOL>Log.debug(\"<STR_LIT>\" % abs_path_to_pex)<EOL>if abs_path_to_pex not in sys.path:<EOL><INDENT>sys.path.insert(<NUM_LIT:0>, os.path.dirname(abs_path_to_pex))<EOL><DEDENT>if include_deps:<EOL><INDENT>for dep in _get_deps_list(abs_path_to_pex):<EOL><INDENT>to_join = os.path.join(os.path.dirname(abs_path_to_pex), dep)<EOL>if to_join not in sys.path:<EOL><INDENT>Log.debug(\"<STR_LIT>\" % dep)<EOL>sys.path.insert(<NUM_LIT:0>, to_join)<EOL><DEDENT><DEDENT><DEDENT>Log.debug(\"<STR_LIT>\" % str(sys.path))<EOL>", "docstring": "Loads pex file and its dependencies to the current python path", "id": "f7498:m1"}
{"signature": "def import_and_get_class(path_to_pex, python_class_name):", "body": "abs_path_to_pex = os.path.abspath(path_to_pex)<EOL>Log.debug(\"<STR_LIT>\" % abs_path_to_pex)<EOL>Log.debug(\"<STR_LIT>\" % python_class_name)<EOL>split = python_class_name.split('<STR_LIT:.>')<EOL>from_path = '<STR_LIT:.>'.join(split[:-<NUM_LIT:1>])<EOL>import_name = python_class_name.split('<STR_LIT:.>')[-<NUM_LIT:1>]<EOL>Log.debug(\"<STR_LIT>\" % (from_path, import_name))<EOL>if python_class_name.startswith(\"<STR_LIT>\"):<EOL><INDENT>try:<EOL><INDENT>mod = resolve_heron_suffix_issue(abs_path_to_pex, python_class_name)<EOL>return getattr(mod, import_name)<EOL><DEDENT>except:<EOL><INDENT>Log.error(\"<STR_LIT>\" % python_class_name)<EOL><DEDENT><DEDENT>mod = __import__(from_path, fromlist=[import_name], level=-<NUM_LIT:1>)<EOL>Log.debug(\"<STR_LIT>\" % str(mod))<EOL>return getattr(mod, import_name)<EOL>", "docstring": "Imports and load a class from a given pex file path and python class name\n\n    For example, if you want to get a class called `Sample` in\n    /some-path/sample.pex/heron/examples/src/python/sample.py,\n    ``path_to_pex`` needs to be ``/some-path/sample.pex``, and\n    ``python_class_name`` needs to be ``heron.examples.src.python.sample.Sample``", "id": "f7498:m3"}
{"signature": "def _get_deps_list(abs_path_to_pex):", "body": "pex = zipfile.ZipFile(abs_path_to_pex, mode='<STR_LIT:r>')<EOL>deps = list(set([re.match(egg_regex, i).group(<NUM_LIT:1>) for i in pex.namelist()<EOL>if re.match(egg_regex, i) is not None]))<EOL>return deps<EOL>", "docstring": "Get a list of paths to included dependencies in the specified pex file\n\n    Note that dependencies are located under `.deps` directory", "id": "f7498:m0"}
{"signature": "def init_rotating_logger(level, logfile, max_files, max_bytes):", "body": "logging.basicConfig()<EOL>root_logger = logging.getLogger()<EOL>log_format = \"<STR_LIT>\"<EOL>root_logger.setLevel(level)<EOL>handler = RotatingFileHandler(logfile, maxBytes=max_bytes, backupCount=max_files)<EOL>handler.setFormatter(logging.Formatter(fmt=log_format, datefmt=date_format))<EOL>root_logger.addHandler(handler)<EOL>for handler in root_logger.handlers:<EOL><INDENT>root_logger.debug(\"<STR_LIT>\" + str(handler))<EOL>if isinstance(handler, logging.StreamHandler):<EOL><INDENT>root_logger.debug(\"<STR_LIT>\" + str(handler))<EOL>root_logger.handlers.remove(handler)<EOL><DEDENT><DEDENT>", "docstring": "Initializes a rotating logger\n\n    It also makes sure that any StreamHandler is removed, so as to avoid stdout/stderr\n    constipation issues", "id": "f7499:m1"}
{"signature": "def async_stdout_builder(proc):", "body": "stdout_builder = StringBuilder()<EOL>async_stream_process_stdout(proc, stdout_builder.add)<EOL>return stdout_builder<EOL>", "docstring": "Save stdout into string builder\n    :param proc: the process to save stdout for\n    :return StringBuilder", "id": "f7501:m6"}
{"signature": "def async_stdout_stderr_builder(proc):", "body": "return async_stdout_builder(proc), async_stderr_builder(proc)<EOL>", "docstring": "Save stdout and stderr into string builders\n    :param proc: the process to save stdout and stderr for\n    :return (StringBuilder, StringBuilder)", "id": "f7501:m8"}
{"signature": "def async_stderr_builder(proc):", "body": "stderr_builder = StringBuilder()<EOL>async_stream_process_stderr(proc, stderr_builder.add)<EOL>return stderr_builder<EOL>", "docstring": "Save stderr into string builder\n    :param proc: the process to save stderr for\n    :return StringBuilder", "id": "f7501:m7"}
{"signature": "def stream_process_stdout(process, handler):", "body": "_stream_process_fileno(process.stdout, handler)<EOL>", "docstring": "Stream the stdout for a process out to display\n    :param process: the process to stream the stdout for\n    :param handler: a function that will be called for each stdout line\n    :return: None", "id": "f7501:m1"}
{"signature": "def tail(filename, n):", "body": "size = os.path.getsize(filename)<EOL>with open(filename, \"<STR_LIT:rb>\") as f:<EOL><INDENT>fm = mmap.mmap(f.fileno(), <NUM_LIT:0>, mmap.MAP_SHARED, mmap.PROT_READ)<EOL>try:<EOL><INDENT>for i in range(size - <NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>if fm[i] == '<STR_LIT:\\n>':<EOL><INDENT>n -= <NUM_LIT:1><EOL>if n == -<NUM_LIT:1>:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>return fm[i + <NUM_LIT:1> if i else <NUM_LIT:0>:].splitlines()<EOL><DEDENT>finally:<EOL><INDENT>fm.close()<EOL><DEDENT><DEDENT>", "docstring": "Returns last n lines from the filename. No exception handling", "id": "f7502:m0"}
{"signature": "def iterparse(filelike, encoding=None, handler_class=DrillHandler, xpath=None):", "body": "parser = expat.ParserCreate(encoding)<EOL>elem_iter = DrillElementIterator(filelike, parser)<EOL>handler = handler_class(elem_iter, xpath)<EOL>parser.buffer_text = <NUM_LIT:1><EOL>parser.StartElementHandler = handler.start_element<EOL>parser.EndElementHandler = handler.end_element<EOL>parser.CharacterDataHandler = handler.characters<EOL>return elem_iter<EOL>", "docstring": ":param filelike: A file-like object with a ``read`` method\n:returns: An iterator yielding :class:`XmlElement` objects", "id": "f7505:m5"}
{"signature": "def siblings(self, name=None):", "body": "if self.parent and self.index:<EOL><INDENT>for c in self.parent._children:<EOL><INDENT>if c.index != self.index and (name is None or name == c.tagname):<EOL><INDENT>yield c<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Yields all siblings of this node (not including the node itself).\n\n:param name: If specified, only consider elements with this tag name", "id": "f7505:c2:m23"}
{"signature": "def traverse(element, query, deep=False):", "body": "<EOL>part = query[<NUM_LIT:0>]<EOL>if not part:<EOL><INDENT>query = query[<NUM_LIT:1>:]<EOL>part = query[<NUM_LIT:0>]<EOL>deep = True<EOL><DEDENT>part, predicate = xpath_re.match(query[<NUM_LIT:0>]).groups()<EOL>for c in element._children:<EOL><INDENT>if part in ('<STR_LIT:*>', c.tagname) and c._match(predicate):<EOL><INDENT>if len(query) == <NUM_LIT:1>:<EOL><INDENT>yield c<EOL><DEDENT>else:<EOL><INDENT>for e in traverse(c, query[<NUM_LIT:1>:]):<EOL><INDENT>yield e<EOL><DEDENT><DEDENT><DEDENT>if deep:<EOL><INDENT>for e in traverse(c, query, deep=True):<EOL><INDENT>yield e<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Helper function to traverse an element tree rooted at element, yielding nodes matching the query.", "id": "f7505:m0"}
{"signature": "def append(self, name, attrs=None, data=None):", "body": "elem = self.__class__(name, attrs, data, parent=self, index=len(self._children))<EOL>self._children.append(elem)<EOL>return elem<EOL>", "docstring": "Called when the parser detects a start tag (child element) while in this node. Internally creates an\n:class:`XmlElement` and adds it to the end of this node's children.\n\n:param name: The tag name to add\n:param attrs: Attributes for the new tag\n:param data: CDATA for the new tag\n:returns: The newly-created element\n:rtype: :class:`XmlElement`", "id": "f7505:c2:m11"}
{"signature": "def items(self):", "body": "for key in sorted(self.attrs):<EOL><INDENT>yield key, self.attrs[key]<EOL><DEDENT>", "docstring": "A generator yielding ``(key, value)`` attribute pairs, sorted by key name.", "id": "f7505:c2:m14"}
{"signature": "def next(self, name=None):", "body": "if self.parent is None or self.index is None:<EOL><INDENT>return None<EOL><DEDENT>for idx in xrange(self.index + <NUM_LIT:1>, len(self.parent)):<EOL><INDENT>if name is None or self.parent[idx].tagname == name:<EOL><INDENT>return self.parent[idx]<EOL><DEDENT><DEDENT>", "docstring": "Returns the next sibling of this node.\n\n:param name: If specified, only consider elements with this tag name\n:rtype: :class:`XmlElement`", "id": "f7505:c2:m24"}
{"signature": "def first(self):", "body": "for e in self:<EOL><INDENT>return e<EOL><DEDENT>", "docstring": "Returns the first matching element of this query, or None if there was no match.", "id": "f7505:c1:m3"}
{"signature": "def __bool__(self):", "body": "return True<EOL>", "docstring": "If we exist, we should evaluate to True, even if __len__ returns 0.", "id": "f7505:c2:m4"}
{"signature": "def write(self, writer):", "body": "multiline = bool(self._children)<EOL>newline_start = multiline and not bool(self.data)<EOL>writer.start(self.tagname, self.attrs, newline=newline_start)<EOL>if self.data:<EOL><INDENT>writer.data(self.data, newline=bool(self._children))<EOL><DEDENT>for c in self._children:<EOL><INDENT>c.write(writer)<EOL><DEDENT>writer.end(self.tagname, indent=multiline)<EOL>", "docstring": "Writes an XML representation of this node (including descendants) to the specified file-like object.\n\n:param writer: An :class:`XmlWriter` instance to write this node to", "id": "f7505:c2:m8"}
{"signature": "def last(self, name=None):", "body": "for c in self.children(name, reverse=True):<EOL><INDENT>return c<EOL><DEDENT>", "docstring": "Returns the last child of this node.\n\n:param name: If specified, only consider elements with this tag name\n:rtype: :class:`XmlElement`", "id": "f7505:c2:m21"}
{"signature": "def path(self, include_root=False):", "body": "path = '<STR_LIT>' % (self.tagname, self.index or <NUM_LIT:0>)<EOL>p = self.parent<EOL>while p is not None:<EOL><INDENT>if p.parent or include_root:<EOL><INDENT>path = '<STR_LIT>' % (p.tagname, p.index or <NUM_LIT:0>, path)<EOL><DEDENT>p = p.parent<EOL><DEDENT>return path<EOL>", "docstring": "Returns a canonical path to this element, relative to the root node.\n\n:param include_root: If ``True``, include the root node in the path. Defaults to ``False``.", "id": "f7505:c2:m17"}
{"signature": "def _match(self, pred):", "body": "if not pred:<EOL><INDENT>return True<EOL><DEDENT>pred = pred[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>if pred.startswith('<STR_LIT:@>'):<EOL><INDENT>pred = pred[<NUM_LIT:1>:]<EOL>if '<STR_LIT:=>' in pred:<EOL><INDENT>attr, value = pred.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>if value[<NUM_LIT:0>] in ('<STR_LIT:\">', \"<STR_LIT:'>\"):<EOL><INDENT>value = value[<NUM_LIT:1>:]<EOL><DEDENT>if value[-<NUM_LIT:1>] in ('<STR_LIT:\">', \"<STR_LIT:'>\"):<EOL><INDENT>value = value[:-<NUM_LIT:1>]<EOL><DEDENT>return self.attrs.get(attr) == value<EOL><DEDENT>else:<EOL><INDENT>return pred in self.attrs<EOL><DEDENT><DEDENT>elif num_re.match(pred):<EOL><INDENT>index = int(pred)<EOL>if index < <NUM_LIT:0>:<EOL><INDENT>if self.parent:<EOL><INDENT>return self.index == (len(self.parent._children) + index)<EOL><DEDENT>else:<EOL><INDENT>return index == <NUM_LIT:0><EOL><DEDENT><DEDENT>else:<EOL><INDENT>return index == self.index<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if '<STR_LIT:=>' in pred:<EOL><INDENT>tag, value = pred.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>if value[<NUM_LIT:0>] in ('<STR_LIT:\">', \"<STR_LIT:'>\"):<EOL><INDENT>value = value[<NUM_LIT:1>:]<EOL><DEDENT>if value[-<NUM_LIT:1>] in ('<STR_LIT:\">', \"<STR_LIT:'>\"):<EOL><INDENT>value = value[:-<NUM_LIT:1>]<EOL><DEDENT>for c in self._children:<EOL><INDENT>if c.tagname == tag and c.data == value:<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for c in self._children:<EOL><INDENT>if c.tagname == pred:<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Helper function to determine if this node matches the given predicate.", "id": "f7505:c2:m16"}
{"signature": "def first(self, name=None):", "body": "for c in self.children(name):<EOL><INDENT>return c<EOL><DEDENT>", "docstring": "Returns the first child of this node.\n\n:param name: If specified, only consider elements with this tag name\n:rtype: :class:`XmlElement`", "id": "f7505:c2:m20"}
{"signature": "def __getattr__(self, name):", "body": "return self.first(name)<EOL>", "docstring": "Allows access to any attribute or child node directly.", "id": "f7505:c2:m7"}
{"signature": "def set(self, section, key, value):", "body": "if not section in self.config:<EOL><INDENT>self.config.add_section(section)<EOL><DEDENT>self.config.set(section, key, value)<EOL>", "docstring": "Creates the section value if it does not exists and sets the value.\nUse write_config to actually set the value.", "id": "f7508:c0:m2"}
{"signature": "def input_with_default(question, default):", "body": "return input(question + '<STR_LIT>'.format(default)) or default<EOL>", "docstring": "Helper function to return default value if string is empty.", "id": "f7508:m0"}
{"signature": "def write_config(self, initialize_indices=False):", "body": "if not os.path.exists(self.config_dir):<EOL><INDENT>os.mkdir(self.config_dir)<EOL><DEDENT>with open(self.config_file, '<STR_LIT:w>') as configfile:<EOL><INDENT>self.config.write(configfile)<EOL><DEDENT>if initialize_indices:<EOL><INDENT>index = self.get('<STR_LIT>', '<STR_LIT:index>')<EOL>from jackal import Host, Range, Service, User, Credential, Log<EOL>from jackal.core import create_connection<EOL>create_connection(self)<EOL>Host.init(index=\"<STR_LIT>\".format(index))<EOL>Range.init(index=\"<STR_LIT>\".format(index))<EOL>Service.init(index=\"<STR_LIT>\".format(index))<EOL>User.init(index=\"<STR_LIT>\".format(index))<EOL>Credential.init(index=\"<STR_LIT>\".format(index))<EOL>Log.init(index=\"<STR_LIT>\".format(index))<EOL><DEDENT>", "docstring": "Write the current config to disk to store them.", "id": "f7508:c0:m6"}
{"signature": "@property<EOL><INDENT>def config_dir(self):<DEDENT>", "body": "home = expanduser('<STR_LIT>')<EOL>config_dir = os.path.join(home, '<STR_LIT>')<EOL>return config_dir<EOL>", "docstring": "Returns the configuration directory", "id": "f7508:c0:m5"}
{"signature": "def object_to_id(self, obj):", "body": "try:<EOL><INDENT>return obj.username<EOL><DEDENT>except AttributeError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns the 'username' value of the given object if it exists, else returns None", "id": "f7510:c4:m3"}
{"signature": "def object_to_id(self, obj):", "body": "<EOL>search = Credential.search()<EOL>search = search.filter(\"<STR_LIT>\", username=obj.username)<EOL>search = search.filter(\"<STR_LIT>\", secret=obj.secret)<EOL>if obj.domain:<EOL><INDENT>search = search.filter(\"<STR_LIT>\", domain=obj.domain)<EOL><DEDENT>else:<EOL><INDENT>search = search.exclude(\"<STR_LIT>\", field=\"<STR_LIT>\")<EOL><DEDENT>if obj.host_ip:<EOL><INDENT>search = search.filter(\"<STR_LIT>\", host_ip=obj.host_ip)<EOL><DEDENT>else:<EOL><INDENT>search = search.exclude(\"<STR_LIT>\", field=\"<STR_LIT>\")<EOL><DEDENT>if obj.service_id:<EOL><INDENT>search = search.filter(\"<STR_LIT>\", service_id=obj.service_id)<EOL><DEDENT>else:<EOL><INDENT>search = search.exclude(\"<STR_LIT>\", field=\"<STR_LIT>\")<EOL><DEDENT>if search.count():<EOL><INDENT>result = search[<NUM_LIT:0>].execute()[<NUM_LIT:0>]<EOL>return result.meta.id<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id.", "id": "f7510:c5:m4"}
{"signature": "def object_to_id(self, obj):", "body": "try:<EOL><INDENT>return obj.address<EOL><DEDENT>except AttributeError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns the 'Address' value of the given object if it exists, else returns None", "id": "f7510:c2:m3"}
{"signature": "def find_object(self, username, secret, domain=None, host_ip=None, service_id=None):", "body": "<EOL>search = Credential.search()<EOL>search = search.filter(\"<STR_LIT>\", username=username)<EOL>search = search.filter(\"<STR_LIT>\", secret=secret)<EOL>if domain:<EOL><INDENT>search = search.filter(\"<STR_LIT>\", domain=domain)<EOL><DEDENT>else:<EOL><INDENT>search = search.exclude(\"<STR_LIT>\", field=\"<STR_LIT>\")<EOL><DEDENT>if host_ip:<EOL><INDENT>search = search.filter(\"<STR_LIT>\", host_ip=host_ip)<EOL><DEDENT>else:<EOL><INDENT>search = search.exclude(\"<STR_LIT>\", field=\"<STR_LIT>\")<EOL><DEDENT>if service_id:<EOL><INDENT>search = search.filter(\"<STR_LIT>\", service_id=service_id)<EOL><DEDENT>else:<EOL><INDENT>search = search.exclude(\"<STR_LIT>\", field=\"<STR_LIT>\")<EOL><DEDENT>if search.count():<EOL><INDENT>result = search[<NUM_LIT:0>].execute()[<NUM_LIT:0>]<EOL>return result<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id.", "id": "f7510:c5:m3"}
{"signature": "def get_credentials(self, *args, **kwargs):", "body": "arguments, _ = self.argparser.parse_known_args()<EOL>if self.is_pipe and self.use_pipe:<EOL><INDENT>return self.get_pipe(self.object_type)<EOL><DEDENT>elif arguments.tags or arguments.type or arguments.search or arguments.password or arguments.cracked or arguments.range or arguments.domain:<EOL><INDENT>return self.argument_search()<EOL><DEDENT>else:<EOL><INDENT>return self.search(*args, **kwargs)<EOL><DEDENT>", "docstring": "Retrieves the users from elastic.", "id": "f7510:c5:m5"}
{"signature": "@property<EOL><INDENT>def argparser(self):<DEDENT>", "body": "core_parser = self.core_parser<EOL>core_parser.add_argument('<STR_LIT>', '<STR_LIT>', type=str, help=\"<STR_LIT>\", nargs=\"<STR_LIT:+>\", default=[])<EOL>core_parser.add_argument('<STR_LIT>', '<STR_LIT>', type=str, help=\"<STR_LIT>\", nargs=\"<STR_LIT:+>\", default=[])<EOL>core_parser.add_argument('<STR_LIT>', '<STR_LIT>', help=\"<STR_LIT>\", action=\"<STR_LIT:store_true>\")<EOL>core_parser.add_argument('<STR_LIT>', '<STR_LIT>', type=str, help=\"<STR_LIT>\")<EOL>core_parser.add_argument('<STR_LIT>', '<STR_LIT>', type=str, help=\"<STR_LIT>\")<EOL>return core_parser<EOL>", "docstring": "Argparser option with search functionality specific for hosts.", "id": "f7510:c2:m5"}
{"signature": "def get_domains(self):", "body": "search = User.search()<EOL>search.aggs.bucket('<STR_LIT>', '<STR_LIT>', field='<STR_LIT>', order={'<STR_LIT>': '<STR_LIT>'}, size=<NUM_LIT:100>)<EOL>response = search.execute()<EOL>return [entry.key for entry in response.aggregations.domains.buckets]<EOL>", "docstring": "Retrieves the domains of the users from elastic.", "id": "f7510:c4:m6"}
{"signature": "def id_to_object(self, line):", "body": "cred = Credential.get(line)<EOL>return cred<EOL>", "docstring": "Resolves the given id to a credential object, if it doesn't exists it will be created.", "id": "f7510:c5:m2"}
{"signature": "def count(self, *args, **kwargs):", "body": "search = self.create_search(*args, **kwargs)<EOL>try:<EOL><INDENT>return search.count()<EOL><DEDENT>except NotFoundError:<EOL><INDENT>print_error(\"<STR_LIT>\")<EOL><DEDENT>except (ConnectionError, TransportError):<EOL><INDENT>print_error(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Returns the number of results after filtering with the given arguments.", "id": "f7510:c0:m3"}
{"signature": "def create_search(self, *args, **kwargs):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Creates an search object from the given arguments.", "id": "f7510:c0:m5"}
{"signature": "def print_error(string):", "body": "print_line('<STR_LIT>'.format(string))<EOL>", "docstring": "Prints a red [!] before the message", "id": "f7511:m5"}
{"signature": "def print_success(string):", "body": "print_line('<STR_LIT>'.format(string))<EOL>", "docstring": "Prints a green [+] before the message", "id": "f7511:m4"}
{"signature": "def get_own_ip():", "body": "own_ip = None<EOL>interfaces = psutil.net_if_addrs()<EOL>for _, details in interfaces.items():<EOL><INDENT>for detail in details:<EOL><INDENT>if detail.family == socket.AF_INET:<EOL><INDENT>ip_address = ipaddress.ip_address(detail.address)<EOL>if not (ip_address.is_link_local or ip_address.is_loopback):<EOL><INDENT>own_ip = str(ip_address)<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return own_ip<EOL>", "docstring": "Gets the IP from the inet interfaces.", "id": "f7511:m7"}
{"signature": "def print_json(data):", "body": "print_line(json.dumps(data, default=datetime_handler))<EOL>", "docstring": "Print the given data to stdout.", "id": "f7511:m2"}
{"signature": "def print_line(text):", "body": "try:<EOL><INDENT>signal.signal(signal.SIGPIPE, signal.SIG_DFL)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>sys.stdout.write(text)<EOL>if not text.endswith('<STR_LIT:\\n>'):<EOL><INDENT>sys.stdout.write('<STR_LIT:\\n>')<EOL><DEDENT>sys.stdout.flush()<EOL><DEDENT>except IOError:<EOL><INDENT>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>", "docstring": "Print the given line to stdout", "id": "f7511:m1"}
{"signature": "def create_query(section):", "body": "query = {}<EOL>if '<STR_LIT>' in section:<EOL><INDENT>query['<STR_LIT>'] = [section['<STR_LIT>']]<EOL><DEDENT>if '<STR_LIT>' in section:<EOL><INDENT>query['<STR_LIT>'] = bool(section['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in section:<EOL><INDENT>query['<STR_LIT>'] = [section['<STR_LIT>']]<EOL><DEDENT>if '<STR_LIT>' in section:<EOL><INDENT>query['<STR_LIT>'] = [section['<STR_LIT>']]<EOL><DEDENT>if '<STR_LIT>' in section:<EOL><INDENT>query['<STR_LIT>'] = [section['<STR_LIT>']]<EOL><DEDENT>return query<EOL>", "docstring": "Creates a search query based on the section of the config file.", "id": "f7512:m1"}
{"signature": "def pipe_worker(pipename, filename, object_type, query, format_string, unique=False):", "body": "print_notification(\"<STR_LIT>\".format(pipename))<EOL>object_type = object_type()<EOL>try:<EOL><INDENT>while True:<EOL><INDENT>uniq = set()<EOL>if os.path.exists(filename):<EOL><INDENT>os.remove(filename)<EOL><DEDENT>os.mkfifo(filename)<EOL>with open(filename, '<STR_LIT:w>') as pipe:<EOL><INDENT>print_success(\"<STR_LIT>\".format(pipename))<EOL>objects = object_type.search(**query)<EOL>for obj in objects:<EOL><INDENT>data = fmt.format(format_string, **obj.to_dict())<EOL>if unique:<EOL><INDENT>if not data in uniq:<EOL><INDENT>uniq.add(data)<EOL>pipe.write(data + '<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pipe.write(data + '<STR_LIT:\\n>')<EOL><DEDENT><DEDENT><DEDENT>os.unlink(filename)<EOL><DEDENT><DEDENT>except KeyboardInterrupt:<EOL><INDENT>print_notification(\"<STR_LIT>\".format(pipename))<EOL><DEDENT>except Exception as e:<EOL><INDENT>print_error(\"<STR_LIT>\".format(e, pipename))<EOL><DEDENT>finally:<EOL><INDENT>os.remove(filename)<EOL><DEDENT>", "docstring": "Starts the loop to provide the data from jackal.", "id": "f7512:m0"}
{"signature": "def main():", "body": "cred_search = CredentialSearch()<EOL>arg = argparse.ArgumentParser(parents=[cred_search.argparser], conflict_handler='<STR_LIT>')<EOL>arg.add_argument('<STR_LIT:-c>', '<STR_LIT>', help=\"<STR_LIT>\", action=\"<STR_LIT:store_true>\")<EOL>arguments = arg.parse_args()<EOL>if arguments.count:<EOL><INDENT>print_line(\"<STR_LIT>\".format(cred_search.argument_count()))<EOL><DEDENT>else:<EOL><INDENT>response = cred_search.get_credentials()<EOL>for hit in response:<EOL><INDENT>print_json(hit.to_dict(include_meta=True))<EOL><DEDENT><DEDENT>", "docstring": "Main credentials tool", "id": "f7513:m0"}
{"signature": "def overview():", "body": "search = Credential.search()<EOL>search.aggs.bucket('<STR_LIT>', '<STR_LIT>', field='<STR_LIT>', order={'<STR_LIT>': '<STR_LIT>'}, size=<NUM_LIT:20>).metric('<STR_LIT>', '<STR_LIT>', field='<STR_LIT:username>').metric('<STR_LIT>', '<STR_LIT>', field='<STR_LIT>').metric('<STR_LIT>', '<STR_LIT>', docvalue_fields=['<STR_LIT:username>'], size=<NUM_LIT:100>)<EOL>response = search.execute()<EOL>print_line(\"<STR_LIT>\".format(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"))<EOL>print_line(\"<STR_LIT:->\"*<NUM_LIT:100>)<EOL>for entry in response.aggregations.password_count.buckets:<EOL><INDENT>usernames = []<EOL>for creds in entry.top_hits:<EOL><INDENT>usernames.append(creds.username[<NUM_LIT:0>])<EOL><DEDENT>usernames = list(set(usernames))<EOL>print_line(\"<STR_LIT>\".format(entry.key, entry.doc_count, entry.host_count.value, entry.username_count.value, usernames))<EOL><DEDENT>", "docstring": "Provides an overview of the duplicate credentials.", "id": "f7513:m1"}
{"signature": "def parse_ips(ips, netmask, include_public):", "body": "hs = HostSearch()<EOL>rs = RangeSearch()<EOL>ranges = []<EOL>ips = list(set(ips))<EOL>included_ips = []<EOL>print_success(\"<STR_LIT>\".format(len(ips)))<EOL>for ip in ips:<EOL><INDENT>ip_address = ipaddress.ip_address(ip)<EOL>if include_public or ip_address.is_private:<EOL><INDENT>if len(ips) < <NUM_LIT:15>:<EOL><INDENT>print_success(\"<STR_LIT>\".format(ip))<EOL><DEDENT>host = hs.id_to_object(ip)<EOL>host.add_tag('<STR_LIT>')<EOL>host.save()<EOL>r = str(ipaddress.IPv4Network(\"<STR_LIT>\".format(ip, netmask), strict=False))<EOL>ranges.append(r)<EOL>included_ips.append(ip)<EOL><DEDENT>else:<EOL><INDENT>print_notification(\"<STR_LIT>\".format(ip))<EOL><DEDENT><DEDENT>ranges = list(set(ranges))<EOL>print_success(\"<STR_LIT>\".format(len(ranges)))<EOL>for rng in ranges:<EOL><INDENT>if len(ranges) < <NUM_LIT:15>:<EOL><INDENT>print_success(\"<STR_LIT>\".format(rng))<EOL><DEDENT>r = rs.id_to_object(rng)<EOL>r.add_tag('<STR_LIT>')<EOL>r.save()<EOL><DEDENT>stats = {}<EOL>stats['<STR_LIT>'] = included_ips<EOL>stats['<STR_LIT>'] = ranges<EOL>return stats<EOL>", "docstring": "Parses the list of ips, turns these into ranges based on the netmask given.\nSet include_public to True to include public IP adresses.", "id": "f7514:m4"}
{"signature": "def resolve_domains(domains, disable_zone=False):", "body": "dnsresolver = dns.resolver.Resolver()<EOL>ips = []<EOL>for domain in domains:<EOL><INDENT>print_notification(\"<STR_LIT>\".format(domain))<EOL>try:<EOL><INDENT>result = dnsresolver.query(domain, '<STR_LIT:A>')<EOL>for a in result.response.answer[<NUM_LIT:0>]:<EOL><INDENT>ips.append(str(a))<EOL>if not disable_zone:<EOL><INDENT>ips.extend(zone_transfer(str(a), domain))<EOL><DEDENT><DEDENT><DEDENT>except dns.resolver.NXDOMAIN as e:<EOL><INDENT>print_error(e)<EOL><DEDENT><DEDENT>return ips<EOL>", "docstring": "Resolves the list of domains and returns the ips.", "id": "f7514:m3"}
{"signature": "def get_resolv_dns():", "body": "result = []<EOL>try:<EOL><INDENT>for line in open('<STR_LIT>', '<STR_LIT:r>'):<EOL><INDENT>if line.startswith('<STR_LIT>'):<EOL><INDENT>result.append(line.strip().split('<STR_LIT:U+0020>')[<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT>except FileNotFoundError:<EOL><INDENT>pass<EOL><DEDENT>return result<EOL>", "docstring": "Returns the dns servers configured in /etc/resolv.conf", "id": "f7514:m1"}
{"signature": "def modify_input():", "body": "doc_mapper = DocMapper()<EOL>if doc_mapper.is_pipe:<EOL><INDENT>objects = [obj for obj in doc_mapper.get_pipe()]<EOL>modified = modify_data(objects)<EOL>for line in modified:<EOL><INDENT>obj = doc_mapper.line_to_object(line)<EOL>obj.save()<EOL><DEDENT>print_success(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>print_error(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "This functions gives the user a way to change the data that is given as input.", "id": "f7516:m1"}
{"signature": "def bruteforce(users, domain, password, host):", "body": "cs = CredentialSearch(use_pipe=False)<EOL>print_notification(\"<STR_LIT>\".format(host))<EOL>s = Server(host)<EOL>c = Connection(s)<EOL>for user in users:<EOL><INDENT>if c.rebind(user=\"<STR_LIT>\".format(domain, user.username), password=password, authentication=NTLM):<EOL><INDENT>print_success('<STR_LIT>'.format(user.username, password))<EOL>credential = cs.find_object(<EOL>user.username, password, domain=domain, host_ip=host)<EOL>if not credential:<EOL><INDENT>credential = Credential(username=user.username, secret=password,<EOL>domain=domain, host_ip=host, type=\"<STR_LIT>\", port=<NUM_LIT>)<EOL><DEDENT>credential.add_tag(tag)<EOL>credential.save()<EOL>user.add_tag(tag)<EOL>user.save()<EOL><DEDENT>else:<EOL><INDENT>print_error(\"<STR_LIT>\".format(user.username, password))<EOL><DEDENT><DEDENT>", "docstring": "Performs a bruteforce for the given users, password, domain on the given host.", "id": "f7517:m0"}
{"signature": "def start_scan(self, scan_id):", "body": "requests.post(self.url + '<STR_LIT>'.format(scan_id), verify=False, headers=self.headers)<EOL>", "docstring": "Starts the scan identified by the scan_id.s", "id": "f7518:c0:m3"}
{"signature": "def get_template_uuid(self):", "body": "response = requests.get(self.url + '<STR_LIT>', headers=self.headers, verify=False)<EOL>templates = json.loads(response.text)<EOL>for template in templates['<STR_LIT>']:<EOL><INDENT>if template['<STR_LIT:name>'] == self.template_name:<EOL><INDENT>return template['<STR_LIT>']<EOL><DEDENT><DEDENT>", "docstring": "Retrieves the uuid of the given template name.", "id": "f7518:c0:m1"}
{"signature": "def main():", "body": "config = Config()<EOL>core = HostSearch()<EOL>hosts = core.get_hosts(tags=['<STR_LIT>'], up=True)<EOL>hosts = [host for host in hosts]<EOL>host_ips = \"<STR_LIT:U+002C>\".join([str(host.address) for host in hosts])<EOL>url = config.get('<STR_LIT>', '<STR_LIT:host>')<EOL>access = config.get('<STR_LIT>', '<STR_LIT>')<EOL>secret = config.get('<STR_LIT>', '<STR_LIT>')<EOL>template_name = config.get('<STR_LIT>', '<STR_LIT>')<EOL>nessus = Nessus(access, secret, url, template_name)<EOL>scan_id = nessus.create_scan(host_ips)<EOL>nessus.start_scan(scan_id)<EOL>for host in hosts:<EOL><INDENT>host.add_tag('<STR_LIT>')<EOL>host.save()<EOL><DEDENT>Logger().log(\"<STR_LIT>\", \"<STR_LIT>\".format(len(hosts)), {'<STR_LIT>': len(hosts)})<EOL>", "docstring": "This function obtains hosts from core and starts a nessus scan on these hosts.\nThe nessus tag is appended to the host tags.", "id": "f7518:m0"}
{"signature": "def parse_domain_computers(filename):", "body": "with open(filename) as f:<EOL><INDENT>data = json.loads(f.read())<EOL><DEDENT>hs = HostSearch()<EOL>count = <NUM_LIT:0><EOL>entry_count = <NUM_LIT:0><EOL>print_notification(\"<STR_LIT>\".format(len(data)))<EOL>for system in data:<EOL><INDENT>entry_count += <NUM_LIT:1><EOL>parsed = parse_single_computer(system)<EOL>if parsed.ip:<EOL><INDENT>try:<EOL><INDENT>host = hs.id_to_object(parsed.ip)<EOL>host.description.append(parsed.description)<EOL>host.hostname.append(parsed.dns_hostname)<EOL>if parsed.os:<EOL><INDENT>host.os = parsed.os<EOL><DEDENT>host.domain_controller = parsed.dc<EOL>host.add_tag('<STR_LIT>')<EOL>host.save()<EOL>count += <NUM_LIT:1><EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>sys.stdout.write('<STR_LIT:\\r>')<EOL>sys.stdout.write(<EOL>\"<STR_LIT>\".format(entry_count, len(data), count))<EOL>sys.stdout.flush()<EOL><DEDENT>sys.stdout.write('<STR_LIT:\\r>')<EOL>return count<EOL>", "docstring": "Parse the file and extract the computers, import the computers that resolve into jackal.", "id": "f7519:m3"}
{"signature": "def parse_domain_users(domain_users_file, domain_groups_file):", "body": "with open(domain_users_file) as f:<EOL><INDENT>users = json.loads(f.read())<EOL><DEDENT>domain_groups = {}<EOL>if domain_groups_file:<EOL><INDENT>with open(domain_groups_file) as f:<EOL><INDENT>groups = json.loads(f.read())<EOL>for group in groups:<EOL><INDENT>sid = get_field(group, '<STR_LIT>')<EOL>domain_groups[int(sid.split('<STR_LIT:->')[-<NUM_LIT:1>])] = get_field(group, '<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>user_search = UserSearch()<EOL>count = <NUM_LIT:0><EOL>total = len(users)<EOL>print_notification(\"<STR_LIT>\".format(total))<EOL>for entry in users:<EOL><INDENT>result = parse_user(entry, domain_groups)<EOL>user = user_search.id_to_object(result['<STR_LIT:username>'])<EOL>user.name = result['<STR_LIT:name>']<EOL>user.domain.append(result['<STR_LIT>'])<EOL>user.description = result['<STR_LIT:description>']<EOL>user.groups.extend(result['<STR_LIT>'])<EOL>user.flags.extend(result['<STR_LIT>'])<EOL>user.sid = result['<STR_LIT>']<EOL>user.add_tag(\"<STR_LIT>\")<EOL>user.save()<EOL>count += <NUM_LIT:1><EOL>sys.stdout.write('<STR_LIT:\\r>')<EOL>sys.stdout.write(\"<STR_LIT>\".format(count, total))<EOL>sys.stdout.flush()<EOL><DEDENT>sys.stdout.write('<STR_LIT:\\r>')<EOL>return count<EOL>", "docstring": "Parses the domain users and groups files.", "id": "f7519:m5"}
{"signature": "def format():", "body": "argparser = argparse.ArgumentParser(description='<STR_LIT>')<EOL>argparser.add_argument('<STR_LIT>', metavar='<STR_LIT>', help='<STR_LIT>', nargs='<STR_LIT:?>')<EOL>arguments = argparser.parse_args()<EOL>service_style = \"<STR_LIT>\"<EOL>host_style = \"<STR_LIT>\"<EOL>ranges_style = \"<STR_LIT>\"<EOL>users_style = \"<STR_LIT>\"<EOL>if arguments.format:<EOL><INDENT>format_input(arguments.format)<EOL><DEDENT>else:<EOL><INDENT>doc_mapper = DocMapper()<EOL>if doc_mapper.is_pipe:<EOL><INDENT>for obj in doc_mapper.get_pipe():<EOL><INDENT>style = '<STR_LIT>'<EOL>if isinstance(obj, Range):<EOL><INDENT>style = ranges_style<EOL><DEDENT>elif isinstance(obj, Host):<EOL><INDENT>style = host_style<EOL><DEDENT>elif isinstance(obj, Service):<EOL><INDENT>style = service_style<EOL><DEDENT>elif isinstance(obj, User):<EOL><INDENT>style = users_style<EOL><DEDENT>print_line(fmt.format(style, **obj.to_dict(include_meta=True)))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print_error(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Formats the output of another tool in the given way.\nHas default styles for ranges, hosts and services.", "id": "f7520:m2"}
{"signature": "def main():", "body": "search = ServiceSearch()<EOL>services = search.get_services(up=True, tags=['<STR_LIT>'])<EOL>print_notification(\"<STR_LIT>\".format(len(services)))<EOL>urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)<EOL>pool = Pool(<NUM_LIT:100>)<EOL>count = <NUM_LIT:0><EOL>for service in services:<EOL><INDENT>count += <NUM_LIT:1><EOL>if count % <NUM_LIT:50> == <NUM_LIT:0>:<EOL><INDENT>print_notification(\"<STR_LIT>\".format(count, len(services)))<EOL><DEDENT>pool.spawn(check_service, service)<EOL><DEDENT>pool.join()<EOL>print_notification(\"<STR_LIT>\")<EOL>", "docstring": "Retrieves services starts check_service in a gevent pool of 100.", "id": "f7521:m1"}
{"signature": "def overview():", "body": "range_search = RangeSearch()<EOL>ranges = range_search.get_ranges()<EOL>if ranges:<EOL><INDENT>formatted_ranges = []<EOL>tags_lookup = {}<EOL>for r in ranges:<EOL><INDENT>formatted_ranges.append({'<STR_LIT>': r.range})<EOL>tags_lookup[r.range] = r.tags<EOL><DEDENT>search = Host.search()<EOL>search = search.filter('<STR_LIT>', status='<STR_LIT>')<EOL>search.aggs.bucket('<STR_LIT>', '<STR_LIT>', field='<STR_LIT:address>', ranges=formatted_ranges)<EOL>response = search.execute()<EOL>print_line(\"<STR_LIT>\".format(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"))<EOL>print_line(\"<STR_LIT:->\" * <NUM_LIT>)<EOL>for entry in response.aggregations.hosts.buckets:<EOL><INDENT>print_line(\"<STR_LIT>\".format(entry.key, entry.doc_count, tags_lookup[entry.key]))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print_error(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Creates a overview of the hosts per range.", "id": "f7523:m1"}
{"signature": "def overview():", "body": "search = Service.search()<EOL>search = search.filter(\"<STR_LIT>\", state='<STR_LIT>')<EOL>search.aggs.bucket('<STR_LIT>', '<STR_LIT>', field='<STR_LIT:port>', order={'<STR_LIT>': '<STR_LIT>'}, size=<NUM_LIT:100>).metric('<STR_LIT>', '<STR_LIT>', field='<STR_LIT:address>')<EOL>response = search.execute()<EOL>print_line(\"<STR_LIT>\")<EOL>print_line(\"<STR_LIT>\")<EOL>for entry in response.aggregations.port_count.buckets:<EOL><INDENT>print_line(\"<STR_LIT>\".format(entry.key, entry.unique_count.value))<EOL><DEDENT>", "docstring": "Function to create an overview of the services.\nWill print a list of ports found an the number of times the port was seen.", "id": "f7526:m1"}
{"signature": "def combine_files(self, f1, f2, f3):", "body": "with open(os.path.join(self.datadir, f3), '<STR_LIT:wb>') as new_file:<EOL><INDENT>with open(os.path.join(self.datadir, f1), '<STR_LIT:rb>') as file_1:<EOL><INDENT>new_file.write(file_1.read())<EOL><DEDENT>with open(os.path.join(self.datadir, f2), '<STR_LIT:rb>') as file_2:<EOL><INDENT>new_file.write(file_2.read())<EOL><DEDENT><DEDENT>", "docstring": "Combines the files 1 and 2 into 3.", "id": "f7529:c0:m3"}
{"signature": "def exploit(self):", "body": "search = ServiceSearch()<EOL>host_search = HostSearch()<EOL>services = search.get_services(tags=['<STR_LIT>'])<EOL>services = [service for service in services]<EOL>if len(services) == <NUM_LIT:0>:<EOL><INDENT>print_error(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if self.auto:<EOL><INDENT>print_success(\"<STR_LIT>\".format(len(services)))<EOL>for service in services:<EOL><INDENT>print_success(\"<STR_LIT>\" + str(service.address))<EOL>host = host_search.id_to_object(str(service.address))<EOL>system_os = '<STR_LIT>'<EOL>if host.os:<EOL><INDENT>system_os = host.os<EOL><DEDENT>else:<EOL><INDENT>system_os = self.detect_os(str(service.address))<EOL>host.os = system_os<EOL>host.save()<EOL><DEDENT>text = self.exploit_single(str(service.address), system_os)<EOL>print_notification(text)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>service_list = []<EOL>for service in services:<EOL><INDENT>host = host_search.id_to_object(str(service.address))<EOL>system_os = '<STR_LIT>'<EOL>if host.os:<EOL><INDENT>system_os = host.os<EOL><DEDENT>else:<EOL><INDENT>system_os = self.detect_os(str(service.address))<EOL>host.os = system_os<EOL>host.save()<EOL><DEDENT>service_list.append({'<STR_LIT>': service.address, '<STR_LIT>': system_os, '<STR_LIT:string>': \"<STR_LIT>\".format(ip=service.address, os=system_os, hostname=host.hostname)})<EOL><DEDENT>draw_interface(service_list, self.callback, \"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Starts the exploiting phase, you should run setup before running this function.\nif auto is set, this function will fire the exploit to all systems. Otherwise a curses interface is shown.", "id": "f7529:c0:m5"}
{"signature": "def callback(self, service):", "body": "return self.exploit_single(service['<STR_LIT>'], service['<STR_LIT>'])<EOL>", "docstring": "Callback for curses, will call exploit_single with the right arguments.", "id": "f7529:c0:m6"}
{"signature": "def include_hostnames(nmap_host):", "body": "if nmap_host.hostnames:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Function to filter out hosts with hostnames", "id": "f7530:m3"}
{"signature": "def os_discovery():", "body": "hs = HostSearch()<EOL>hosts = hs.get_hosts(ports=[<NUM_LIT>], tags=['<STR_LIT>'])<EOL>hosts = [host for host in hosts if not host.os]<EOL>host_dict = {}<EOL>for host in hosts:<EOL><INDENT>host_dict[str(host.address)] = host<EOL><DEDENT>arguments = \"<STR_LIT>\".split('<STR_LIT:U+0020>')<EOL>if len(hosts):<EOL><INDENT>count = <NUM_LIT:0><EOL>print_notification(\"<STR_LIT>\".format(len(hosts)))<EOL>result = nmap(arguments, [str(h.address) for h in hosts])<EOL>parser = NmapParser()<EOL>report = parser.parse_fromstring(result)<EOL>for nmap_host in report.hosts:<EOL><INDENT>for script_result in nmap_host.scripts_results:<EOL><INDENT>script_result = script_result.get('<STR_LIT>', {})<EOL>host = host_dict[str(nmap_host.address)]<EOL>if '<STR_LIT>' in script_result:<EOL><INDENT>host.hostname.append(script_result['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in script_result:<EOL><INDENT>count += <NUM_LIT:1><EOL>host.os = script_result['<STR_LIT>']<EOL><DEDENT>host_dict[str(nmap_host.address)] = host<EOL><DEDENT><DEDENT>for host in hosts:<EOL><INDENT>host.add_tag('<STR_LIT>')<EOL>host.save()<EOL><DEDENT>print_notification(\"<STR_LIT>\".format(count))<EOL><DEDENT>else:<EOL><INDENT>print_notification(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Performs os (and domain) discovery of smb hosts.", "id": "f7530:m9"}
{"signature": "def nmap_scan():", "body": "<EOL>hs = HostSearch()<EOL>config = Config()<EOL>nmap_types = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:all>']<EOL>options = {'<STR_LIT>':'<STR_LIT>', '<STR_LIT>':'<STR_LIT>', '<STR_LIT>': config.get('<STR_LIT>', '<STR_LIT>'), '<STR_LIT>': '<STR_LIT>', '<STR_LIT:all>': '<STR_LIT>'}<EOL>hs_parser = hs.argparser<EOL>argparser = argparse.ArgumentParser(parents=[hs_parser], conflict_handler='<STR_LIT>',description=\"<STR_LIT>\")<EOL>argparser.add_argument('<STR_LIT:type>', metavar='<STR_LIT:type>',help='<STR_LIT>',type=str, choices=nmap_types, default='<STR_LIT>', const='<STR_LIT>', nargs='<STR_LIT:?>')<EOL>arguments, extra_nmap_args = argparser.parse_known_args()<EOL>tags = nmap_types[nmap_types.index(arguments.type):]<EOL>tags = [\"<STR_LIT>\" + tag  for tag in tags]<EOL>hosts = hs.get_hosts(tags=tags)<EOL>hosts = [host for host in hosts]<EOL>nmap_args = []<EOL>nmap_args.extend(extra_nmap_args)<EOL>nmap_args.extend(options[arguments.type].split('<STR_LIT:U+0020>'))<EOL>print_notification(\"<STR_LIT>\".format(nmap_args, len(hosts)))<EOL>if len(hosts):<EOL><INDENT>result = nmap(nmap_args, [str(h.address) for h in hosts])<EOL>for host in hosts:<EOL><INDENT>host.add_tag(\"<STR_LIT>\".format(arguments.type))<EOL>host.save()<EOL><DEDENT>print_notification(\"<STR_LIT>\")<EOL>stats = import_nmap(result, \"<STR_LIT>\".format(arguments.type), check_function=all_hosts, import_services=True)<EOL>stats['<STR_LIT>'] = len(hosts)<EOL>stats['<STR_LIT:type>'] = arguments.type<EOL>Logger().log('<STR_LIT>', \"<STR_LIT>\".format(arguments.type, len(hosts)), stats)<EOL><DEDENT>else:<EOL><INDENT>print_notification(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Scans the given hosts with nmap.", "id": "f7530:m7"}
{"signature": "def start_processes(self):", "body": "self.relay = subprocess.Popen(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', self.targets_file, '<STR_LIT>', '<STR_LIT>', self.directory, '<STR_LIT>', self.output_file], cwd=self.directory)<EOL>self.responder = subprocess.Popen(['<STR_LIT>', '<STR_LIT>', self.interface_name])<EOL>", "docstring": "Starts the ntlmrelayx.py and responder processes.\nAssumes you have these programs in your path.", "id": "f7531:c0:m3"}
{"signature": "def callback(self, event):", "body": "<EOL>if event.mask == <NUM_LIT>:<EOL><INDENT>if event.name.endswith('<STR_LIT>'):<EOL><INDENT>print_success(\"<STR_LIT>\")<EOL>if event.name in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if event.name == '<STR_LIT>':<EOL><INDENT>self.domain_groups_file = event.pathname<EOL><DEDENT>if event.name == '<STR_LIT>':<EOL><INDENT>self.domain_users_file = event.pathname<EOL><DEDENT>if self.domain_groups_file and self.domain_users_file:<EOL><INDENT>print_success(\"<STR_LIT>\")<EOL>subprocess.Popen(['<STR_LIT>', self.domain_groups_file, self.domain_users_file])<EOL><DEDENT><DEDENT>elif event.name == '<STR_LIT>':<EOL><INDENT>print_success(\"<STR_LIT>\")<EOL>subprocess.Popen(['<STR_LIT>', event.pathname])<EOL><DEDENT>self.ldap_strings = []<EOL>self.write_targets()<EOL><DEDENT>if event.name.endswith('<STR_LIT>'):<EOL><INDENT>host = event.name.replace('<STR_LIT>', '<STR_LIT>')<EOL>print_success(\"<STR_LIT>\".format(host))<EOL>subprocess.Popen(['<STR_LIT>', event.pathname])<EOL>self.ips.remove(host)<EOL>self.write_targets()<EOL><DEDENT><DEDENT>", "docstring": "Function that gets called on each event from pyinotify.", "id": "f7531:c0:m4"}
{"signature": "def terminate_processes(self):", "body": "if self.relay:<EOL><INDENT>self.relay.terminate()<EOL><DEDENT>if self.responder:<EOL><INDENT>self.responder.terminate()<EOL><DEDENT>", "docstring": "Terminate the processes.", "id": "f7531:c0:m6"}
{"signature": "def wait(self):", "body": "try:<EOL><INDENT>self.relay.wait()<EOL>self.responder.wait()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>print_notification(\"<STR_LIT>\")<EOL><DEDENT>finally:<EOL><INDENT>self.terminate_processes()<EOL><DEDENT>", "docstring": "This function waits for the relay and responding processes to exit.\nCaptures KeyboardInterrupt to shutdown these processes.", "id": "f7531:c0:m7"}
{"signature": "def brutefore_passwords(ip, url, credentials, service):", "body": "auth_requests = []<EOL>for credential in credentials:<EOL><INDENT>split = credential.strip().split('<STR_LIT::>')<EOL>username = split[<NUM_LIT:0>]<EOL>password = '<STR_LIT>'<EOL>if len(split) > <NUM_LIT:1>:<EOL><INDENT>password = split[<NUM_LIT:1>]<EOL><DEDENT>auth_requests.append(grequests.get(url, auth=(username, password)))<EOL><DEDENT>results = grequests.map(auth_requests)<EOL>for result in results:<EOL><INDENT>if result and result.status_code == <NUM_LIT:200>:<EOL><INDENT>creds = result.request.headers['<STR_LIT>'].split('<STR_LIT:U+0020>')[<NUM_LIT:1>]<EOL>creds = base64.b64decode(creds).decode('<STR_LIT:utf-8>')<EOL>creds = creds.split('<STR_LIT::>')<EOL>print_success(\"<STR_LIT>\".format(<EOL>creds[<NUM_LIT:0>], creds[<NUM_LIT:1>], url))<EOL>credential = Credential(secret=creds[<NUM_LIT:1>], username=creds[<NUM_LIT:0>], type='<STR_LIT>', access_level='<STR_LIT>', service_id=service.id, host_ip=ip, description='<STR_LIT>')<EOL>credential.save()<EOL><DEDENT><DEDENT>", "docstring": "Bruteforce function, will try all the credentials at the same time, splits the given credentials at a ':'.", "id": "f7532:m0"}
{"signature": "def main():", "body": "services = ServiceSearch()<EOL>argparse = services.argparser<EOL>argparse.add_argument('<STR_LIT>', '<STR_LIT>', type=str, help=\"<STR_LIT>\")<EOL>arguments = argparse.parse_args()<EOL>if not arguments.file:<EOL><INDENT>print_error(\"<STR_LIT>\")<EOL>sys.exit()<EOL><DEDENT>services = services.get_services(search=[\"<STR_LIT>\"], up=True, tags=['<STR_LIT>'])<EOL>credentials = []<EOL>with open(arguments.file, '<STR_LIT:r>') as f:<EOL><INDENT>credentials = f.readlines()<EOL><DEDENT>for service in services:<EOL><INDENT>print_notification(\"<STR_LIT>\".format(service.address, service.port))<EOL>url = '<STR_LIT>'<EOL>gevent.spawn(brutefore_passwords, service.address, url.format(service.address, service.port), credentials, service)<EOL>service.add_tag('<STR_LIT>')<EOL>service.update(tags=service.tags)<EOL><DEDENT>gevent.wait()<EOL>Logger().log(\"<STR_LIT>\", \"<STR_LIT>\", {'<STR_LIT>': len(services)})<EOL>", "docstring": "Checks the arguments to brutefore and spawns greenlets to perform the bruteforcing.", "id": "f7532:m1"}
{"signature": "def initialize_indices():", "body": "Host.init()<EOL>Range.init()<EOL>Service.init()<EOL>User.init()<EOL>Credential.init()<EOL>Log.init()<EOL>", "docstring": "Initializes the indices", "id": "f7533:m1"}
{"signature": "def lesspager(lines):", "body": "cmd = \"<STR_LIT>\"<EOL>if sys.version_info[<NUM_LIT:0>] >= <NUM_LIT:3>:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>import subprocess<EOL>proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)<EOL>try:<EOL><INDENT>with io.TextIOWrapper(proc.stdin, errors='<STR_LIT>') as pipe:<EOL><INDENT>try:<EOL><INDENT>for l in lines:<EOL><INDENT>pipe.write(l)<EOL><DEDENT><DEDENT>except KeyboardInterrupt:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>except OSError:<EOL><INDENT>pass <EOL><DEDENT>while True:<EOL><INDENT>try:<EOL><INDENT>proc.wait()<EOL>break<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>proc = os.popen(cmd, '<STR_LIT:w>')<EOL>try:<EOL><INDENT>for l in lines:<EOL><INDENT>proc.write(l)<EOL><DEDENT><DEDENT>except IOError:<EOL><INDENT>proc.close()<EOL>sys.exit()<EOL><DEDENT><DEDENT>", "docstring": "Use for streaming writes to a less process\nTaken from pydoc.pipepager:\n/usr/lib/python2.7/pydoc.py\nand\n/usr/lib/python3.5/pydoc.py", "id": "f7536:m20"}
{"signature": "def pairwise(iterable):", "body": "a, b = itertools.tee(iterable)<EOL>next(b, None)<EOL>return six.moves.zip(a,b)<EOL>", "docstring": "s -> (s0,s1), (s1,s2), (s2, s3), ...", "id": "f7536:m16"}
{"signature": "def argmax(l,f=None):", "body": "if f:<EOL><INDENT>l = [f(i) for i in l]<EOL><DEDENT>return max(enumerate(l), key=lambda x:x[<NUM_LIT:1>])[<NUM_LIT:0>]<EOL>", "docstring": "http://stackoverflow.com/questions/5098580/implementing-argmax-in-python", "id": "f7536:m21"}
{"signature": "def __init__(self, zipURI):", "body": "self.filesize = None<EOL>self.zipURI = zipURI<EOL>self.tableOfContents = None<EOL>self.request = None<EOL>self.start = None<EOL>self.end = None<EOL>self.directory_end = None<EOL>self.raw_bytes = None<EOL>self.directory_size = None<EOL>", "docstring": "zipURI should be an HTTP URL hosted on a server that supports ranged requests.\nThe init function will determine if the file exists and raise a urllib2 exception if not.", "id": "f7538:c0:m0"}
{"signature": "def extractFile(self, filename):", "body": "files = [x for x in self.tableOfContents if x['<STR_LIT:filename>'] == filename]<EOL>if len(files) == <NUM_LIT:0>:<EOL><INDENT>raise FileNotFoundException()<EOL><DEDENT>fileRecord = files[<NUM_LIT:0>]<EOL>metaheadroom = <NUM_LIT>  <EOL>request = urllib.request.Request(self.zipURI)<EOL>start = fileRecord['<STR_LIT>']<EOL>end = fileRecord['<STR_LIT>'] + fileRecord['<STR_LIT>'] + metaheadroom<EOL>request.headers['<STR_LIT>'] = \"<STR_LIT>\" % (start, end)<EOL>handle = urllib.request.urlopen(request)<EOL>return_range = handle.headers.get('<STR_LIT>')<EOL>if return_range != \"<STR_LIT>\" % (start, end, self.filesize):<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>filedata = handle.read()<EOL>zip_n = unpack(\"<STR_LIT:H>\", filedata[<NUM_LIT>:<NUM_LIT>])[<NUM_LIT:0>]<EOL>zip_m = unpack(\"<STR_LIT:H>\", filedata[<NUM_LIT>:<NUM_LIT:30>])[<NUM_LIT:0>]<EOL>has_data_descriptor = bool(unpack(\"<STR_LIT:H>\", filedata[<NUM_LIT:6>:<NUM_LIT:8>])[<NUM_LIT:0>] & <NUM_LIT:8>)<EOL>comp_size = unpack(\"<STR_LIT:I>\", filedata[<NUM_LIT>:<NUM_LIT>])[<NUM_LIT:0>]<EOL>if comp_size == <NUM_LIT:0> and has_data_descriptor:<EOL><INDENT>comp_size = fileRecord['<STR_LIT>']<EOL><DEDENT>elif comp_size != fileRecord['<STR_LIT>']:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>raw_zip_data = filedata[<NUM_LIT:30> + zip_n + zip_m: <NUM_LIT:30> + zip_n + zip_m + comp_size]<EOL>uncompressed_data = \"<STR_LIT>\"<EOL>compression_method = unpack(\"<STR_LIT:H>\", filedata[<NUM_LIT:8>:<NUM_LIT:10>])[<NUM_LIT:0>]<EOL>if compression_method == <NUM_LIT:0>:<EOL><INDENT>return raw_zip_data<EOL><DEDENT>dec = zlib.decompressobj(-zlib.MAX_WBITS)<EOL>for chunk in raw_zip_data:<EOL><INDENT>rv = dec.decompress(chunk)<EOL>if rv:<EOL><INDENT>uncompressed_data = uncompressed_data + rv<EOL><DEDENT><DEDENT>return uncompressed_data<EOL>", "docstring": "This function will extract a single file from the remote zip without downloading\nthe entire zip file. The filename argument should match whatever is in the 'filename'\nkey of the tableOfContents.", "id": "f7538:c0:m5"}
{"signature": "@property<EOL><INDENT>def errors(self):<DEDENT>", "body": "return self.__errors<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#resource", "id": "f7552:c0:m2"}
{"signature": "def iter(self, relations=False, **options):", "body": "<EOL>if not self.tabular:<EOL><INDENT>message = '<STR_LIT>'<EOL>raise exceptions.DataPackageException(message)<EOL><DEDENT>if relations:<EOL><INDENT>relations = self.__get_relations()<EOL><DEDENT>return self.__get_table().iter(relations=relations, **options)<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#resource", "id": "f7552:c0:m14"}
{"signature": "@property<EOL><INDENT>def local(self):<DEDENT>", "body": "return self.__source_inspection.get('<STR_LIT>', False)<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#resource", "id": "f7552:c0:m7"}
{"signature": "@property<EOL><INDENT>def data(self):<DEDENT>", "body": "<EOL>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>return self.read(keyed=True)<EOL>", "docstring": "Return resource data", "id": "f7552:c0:m26"}
{"signature": "@property<EOL><INDENT>def descriptor(self):<DEDENT>", "body": "<EOL>return self.__next_descriptor<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#resource", "id": "f7552:c0:m4"}
{"signature": "@property<EOL><INDENT>def source(self):<DEDENT>", "body": "return self.__source_inspection.get('<STR_LIT:source>')<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#resource", "id": "f7552:c0:m11"}
{"signature": "def __init__(self, descriptor={}, base_path=None, strict=False, storage=None,<EOL>package=None, **options):", "body": "<EOL>if base_path is None:<EOL><INDENT>base_path = helpers.get_descriptor_base_path(descriptor)<EOL><DEDENT>if storage and not isinstance(storage, Storage):<EOL><INDENT>storage = Storage.connect(storage, **options)<EOL><DEDENT>descriptor = helpers.retrieve_descriptor(descriptor)<EOL>descriptor = helpers.dereference_resource_descriptor(descriptor, base_path)<EOL>if descriptor.get('<STR_LIT:url>'):<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>descriptor['<STR_LIT:path>'] = descriptor['<STR_LIT:url>']<EOL>del descriptor['<STR_LIT:url>']<EOL><DEDENT>self.__current_descriptor = deepcopy(descriptor)<EOL>self.__next_descriptor = deepcopy(descriptor)<EOL>self.__base_path = base_path<EOL>self.__package = package<EOL>self.__storage = storage<EOL>self.__relations = None<EOL>self.__strict = strict<EOL>self.__table = None<EOL>self.__errors = []<EOL>self.__table_options = options<EOL>self.__build()<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#resource", "id": "f7552:c0:m0"}
{"signature": "@property<EOL><INDENT>def multipart(self):<DEDENT>", "body": "return self.__source_inspection.get('<STR_LIT>', False)<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#resource", "id": "f7552:c0:m9"}
{"signature": "def _convert_path(path, name):", "body": "table = os.path.splitext(path)[<NUM_LIT:0>]<EOL>table = table.replace(os.path.sep, '<STR_LIT>')<EOL>if name is not None:<EOL><INDENT>table = '<STR_LIT>'.join([table, name])<EOL><DEDENT>table = re.sub('<STR_LIT>', '<STR_LIT:_>', table)<EOL>table = table.lower()<EOL>return table<EOL>", "docstring": "Convert resource's path and name to storage's table name.\n\n    Args:\n        path (str): resource path\n        name (str): resource name\n\n    Returns:\n        str: table name", "id": "f7553:m2"}
{"signature": "def push_datapackage(descriptor, backend, **backend_options):", "body": "<EOL>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>tables = []<EOL>schemas = []<EOL>datamap = {}<EOL>mapping = {}<EOL>model = Package(descriptor)<EOL>plugin = import_module('<STR_LIT>' % backend)<EOL>storage = plugin.Storage(**backend_options)<EOL>for resource in model.resources:<EOL><INDENT>if not resource.tabular:<EOL><INDENT>continue<EOL><DEDENT>name = resource.descriptor.get('<STR_LIT:name>', None)<EOL>table = _convert_path(resource.descriptor['<STR_LIT:path>'], name)<EOL>schema = resource.descriptor['<STR_LIT>']<EOL>data = resource.table.iter(keyed=True)<EOL>def values(schema, data):<EOL><INDENT>for item in data:<EOL><INDENT>row = []<EOL>for field in schema['<STR_LIT>']:<EOL><INDENT>row.append(item.get(field['<STR_LIT:name>'], None))<EOL><DEDENT>yield tuple(row)<EOL><DEDENT><DEDENT>tables.append(table)<EOL>schemas.append(schema)<EOL>datamap[table] = values(schema, data)<EOL>if name is not None:<EOL><INDENT>mapping[name] = table<EOL><DEDENT><DEDENT>schemas = _convert_schemas(mapping, schemas)<EOL>for table in tables:<EOL><INDENT>if table in storage.buckets:<EOL><INDENT>storage.delete(table)<EOL><DEDENT><DEDENT>storage.create(tables, schemas)<EOL>for table in storage.buckets:<EOL><INDENT>if table in datamap:<EOL><INDENT>storage.write(table, datamap[table])<EOL><DEDENT><DEDENT>return storage<EOL>", "docstring": "Push Data Package to storage.\n\n    All parameters should be used as keyword arguments.\n\n    Args:\n        descriptor (str): path to descriptor\n        backend (str): backend name like `sql` or `bigquery`\n        backend_options (dict): backend options mentioned in backend docs", "id": "f7553:m0"}
{"signature": "def _restore_resources(resources):", "body": "resources = deepcopy(resources)<EOL>for resource in resources:<EOL><INDENT>schema = resource['<STR_LIT>']<EOL>for fk in schema.get('<STR_LIT>', []):<EOL><INDENT>_, name = _restore_path(fk['<STR_LIT>']['<STR_LIT>'])<EOL>fk['<STR_LIT>']['<STR_LIT>'] = name<EOL><DEDENT><DEDENT>return resources<EOL>", "docstring": "Restore schemas from being compatible with storage schemas.\n\n    Foreign keys related operations.\n\n    Args:\n        list: resources from storage\n\n    Returns:\n        list: restored resources", "id": "f7553:m5"}
{"signature": "def _convert_schemas(mapping, schemas):", "body": "schemas = deepcopy(schemas)<EOL>for schema in schemas:<EOL><INDENT>for fk in schema.get('<STR_LIT>', []):<EOL><INDENT>resource = fk['<STR_LIT>']['<STR_LIT>']<EOL>if resource != '<STR_LIT>':<EOL><INDENT>if resource not in mapping:<EOL><INDENT>message = '<STR_LIT>'<EOL>message = message % (resource, fk)<EOL>raise ValueError(message)<EOL><DEDENT>fk['<STR_LIT>']['<STR_LIT>'] = mapping[resource]<EOL><DEDENT><DEDENT><DEDENT>return schemas<EOL>", "docstring": "Convert schemas to be compatible with storage schemas.\n\n    Foreign keys related operations.\n\n    Args:\n        mapping (dict): mapping between resource name and table name\n        schemas (list): schemas\n\n    Raises:\n        ValueError: if there is no resource\n            for some foreign key in given mapping\n\n    Returns:\n        list: converted schemas", "id": "f7553:m4"}
{"signature": "def _restore_path(table):", "body": "name = None<EOL>splited = table.split('<STR_LIT>')<EOL>path = splited[<NUM_LIT:0>]<EOL>if len(splited) == <NUM_LIT:2>:<EOL><INDENT>name = splited[<NUM_LIT:1>]<EOL><DEDENT>path = path.replace('<STR_LIT>', os.path.sep)<EOL>path += '<STR_LIT>'<EOL>return path, name<EOL>", "docstring": "Restore resource's path and name from storage's table.\n\n    Args:\n        table (str): table name\n\n    Returns:\n        (str, str): resource path and name", "id": "f7553:m3"}
{"signature": "def __init__(self, profile):", "body": "self._name = profile<EOL>self._registry = self._load_registry()<EOL>self._schema = self._load_schema(profile, self._registry)<EOL>self._validator = self._load_validator(self._schema, self._registry)<EOL>self._check_schema()<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#schema", "id": "f7555:c0:m0"}
{"signature": "def infer(pattern, base_path=None):", "body": "package = Package({}, base_path=base_path)<EOL>descriptor = package.infer(pattern)<EOL>return descriptor<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#infer", "id": "f7556:m0"}
{"signature": "def get_descriptor_base_path(descriptor):", "body": "<EOL>if isinstance(descriptor, six.string_types):<EOL><INDENT>if os.path.exists(descriptor):<EOL><INDENT>base_path = os.path.dirname(os.path.abspath(descriptor))<EOL><DEDENT>else:<EOL><INDENT>base_path = os.path.dirname(descriptor)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>base_path = '<STR_LIT:.>'<EOL><DEDENT>return base_path<EOL>", "docstring": "Get descriptor base path if string or return None.", "id": "f7558:m0"}
{"signature": "def expand_package_descriptor(descriptor):", "body": "descriptor.setdefault('<STR_LIT>', config.DEFAULT_DATA_PACKAGE_PROFILE)<EOL>for resource in descriptor.get('<STR_LIT>', []):<EOL><INDENT>expand_resource_descriptor(resource)<EOL><DEDENT>return descriptor<EOL>", "docstring": "Apply defaults to data package descriptor (IN-PLACE FOR NOW).", "id": "f7558:m4"}
{"signature": "def expand_resource_descriptor(descriptor):", "body": "descriptor.setdefault('<STR_LIT>', config.DEFAULT_RESOURCE_PROFILE)<EOL>if descriptor['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>schema = descriptor.get('<STR_LIT>')<EOL>if schema is not None:<EOL><INDENT>for field in schema.get('<STR_LIT>', []):<EOL><INDENT>field.setdefault('<STR_LIT:type>', config.DEFAULT_FIELD_TYPE)<EOL>field.setdefault('<STR_LIT>', config.DEFAULT_FIELD_FORMAT)<EOL><DEDENT>schema.setdefault('<STR_LIT>', config.DEFAULT_MISSING_VALUES)<EOL><DEDENT>dialect = descriptor.get('<STR_LIT>')<EOL>if dialect is not None:<EOL><INDENT>for key, value in config.DEFAULT_DIALECT.items():<EOL><INDENT>dialect.setdefault(key, value)<EOL><DEDENT><DEDENT><DEDENT>return descriptor<EOL>", "docstring": "Apply defaults to resource descriptor (IN-PLACE FOR NOW).", "id": "f7558:m5"}
{"signature": "def retrieve_descriptor(descriptor):", "body": "the_descriptor = descriptor<EOL>if the_descriptor is None:<EOL><INDENT>the_descriptor = {}<EOL><DEDENT>if isinstance(the_descriptor, six.string_types):<EOL><INDENT>try:<EOL><INDENT>if os.path.isfile(the_descriptor):<EOL><INDENT>with open(the_descriptor, '<STR_LIT:r>') as f:<EOL><INDENT>the_descriptor = json.load(f)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>req = requests.get(the_descriptor)<EOL>req.raise_for_status()<EOL>req.encoding = '<STR_LIT:utf8>'<EOL>the_descriptor = req.json()<EOL><DEDENT><DEDENT>except (IOError, requests.exceptions.RequestException) as error:<EOL><INDENT>message = '<STR_LIT>' % descriptor<EOL>six.raise_from(exceptions.DataPackageException(message), error)<EOL><DEDENT>except ValueError as error:<EOL><INDENT>message = '<STR_LIT>' % (descriptor, error)<EOL>six.raise_from(exceptions.DataPackageException(message), error)<EOL><DEDENT><DEDENT>if hasattr(the_descriptor, '<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>the_descriptor = json.load(the_descriptor)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>six.raise_from(exceptions.DataPackageException(str(e)), e)<EOL><DEDENT><DEDENT>if not isinstance(the_descriptor, dict):<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise exceptions.DataPackageException(msg.format(type(the_descriptor).__name__))<EOL><DEDENT>return the_descriptor<EOL>", "docstring": "Retrieve descriptor.", "id": "f7558:m1"}
{"signature": "def dereference_resource_descriptor(descriptor, base_path, base_descriptor=None):", "body": "PROPERTIES = ['<STR_LIT>', '<STR_LIT>']<EOL>if base_descriptor is None:<EOL><INDENT>base_descriptor = descriptor<EOL><DEDENT>for property in PROPERTIES:<EOL><INDENT>value = descriptor.get(property)<EOL>if not isinstance(value, six.string_types):<EOL><INDENT>continue<EOL><DEDENT>if value.startswith('<STR_LIT:#>'):<EOL><INDENT>try:<EOL><INDENT>pointer = jsonpointer.JsonPointer(value[<NUM_LIT:1>:])<EOL>descriptor[property] = pointer.resolve(base_descriptor)<EOL><DEDENT>except Exception as error:<EOL><INDENT>message = '<STR_LIT>' % (value, property)<EOL>six.raise_from(<EOL>exceptions.DataPackageException(message),<EOL>error<EOL>)<EOL><DEDENT><DEDENT>elif value.startswith('<STR_LIT:http>'):<EOL><INDENT>try:<EOL><INDENT>response = requests.get(value)<EOL>response.raise_for_status()<EOL>descriptor[property] = response.json()<EOL><DEDENT>except Exception as error:<EOL><INDENT>message = '<STR_LIT>' % (value, property)<EOL>six.raise_from(<EOL>exceptions.DataPackageException(message),<EOL>error<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if not is_safe_path(value):<EOL><INDENT>raise exceptions.DataPackageException(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (value, property))<EOL><DEDENT>if not base_path:<EOL><INDENT>raise exceptions.DataPackageException(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (value, property))<EOL><DEDENT>fullpath = os.path.join(base_path, value)<EOL>try:<EOL><INDENT>with io.open(fullpath, encoding='<STR_LIT:utf-8>') as file:<EOL><INDENT>descriptor[property] = json.load(file)<EOL><DEDENT><DEDENT>except Exception as error:<EOL><INDENT>message = '<STR_LIT>' % (value, property)<EOL>six.raise_from(<EOL>exceptions.DataPackageException(message),<EOL>error<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return descriptor<EOL>", "docstring": "Dereference resource descriptor (IN-PLACE FOR NOW).", "id": "f7558:m3"}
{"signature": "def is_safe_path(path):", "body": "contains_windows_var = lambda val: re.match(r'<STR_LIT>', val)<EOL>contains_posix_var = lambda val: re.match(r'<STR_LIT>', val)<EOL>unsafeness_conditions = [<EOL>os.path.isabs(path),<EOL>('<STR_LIT>' % os.path.sep) in path,<EOL>path.startswith('<STR_LIT>'),<EOL>os.path.expandvars(path) != path,<EOL>contains_windows_var(path),<EOL>contains_posix_var(path),<EOL>]<EOL>return not any(unsafeness_conditions)<EOL>", "docstring": "Check if path is safe and allowed.", "id": "f7558:m7"}
{"signature": "def __init__(self, descriptor=None, base_path=None, strict=False, storage=None,<EOL>schema=None, default_base_path=None, **options):", "body": "<EOL>if schema is not None:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>if isinstance(schema, six.string_types):<EOL><INDENT>if schema in ['<STR_LIT>', '<STR_LIT:default>']:<EOL><INDENT>schema = '<STR_LIT>'<EOL><DEDENT>elif schema == '<STR_LIT>':<EOL><INDENT>schema = '<STR_LIT>'<EOL><DEDENT>elif schema == '<STR_LIT>':<EOL><INDENT>schema = '<STR_LIT>'<EOL><DEDENT>descriptor['<STR_LIT>'] = schema<EOL><DEDENT><DEDENT>if default_base_path is not None:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>base_path = default_base_path<EOL><DEDENT>tempdir, descriptor = _extract_zip_if_possible(descriptor)<EOL>if tempdir:<EOL><INDENT>self.__tempdir = tempdir<EOL><DEDENT>if base_path is None:<EOL><INDENT>base_path = helpers.get_descriptor_base_path(descriptor)<EOL><DEDENT>if storage and not isinstance(storage, Storage):<EOL><INDENT>storage = Storage.connect(storage, **options)<EOL><DEDENT>if storage and not descriptor:<EOL><INDENT>descriptor = {'<STR_LIT>': []}<EOL>for bucket in storage.buckets:<EOL><INDENT>descriptor['<STR_LIT>'].append({'<STR_LIT:path>': bucket})<EOL><DEDENT><DEDENT>descriptor = helpers.retrieve_descriptor(descriptor)<EOL>descriptor = helpers.dereference_package_descriptor(descriptor, base_path)<EOL>for resource in descriptor.get('<STR_LIT>', []):<EOL><INDENT>url = resource.pop('<STR_LIT:url>', None)<EOL>if url is not None:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>resource['<STR_LIT:path>'] = [url]<EOL><DEDENT><DEDENT>self.__current_descriptor = deepcopy(descriptor)<EOL>self.__next_descriptor = deepcopy(descriptor)<EOL>self.__base_path = base_path<EOL>self.__storage = storage<EOL>self.__strict = strict<EOL>self.__resources = []<EOL>self.__errors = []<EOL>self.__build()<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#package", "id": "f7560:c0:m0"}
{"signature": "def _extract_zip_if_possible(descriptor):", "body": "tempdir = None<EOL>result = descriptor<EOL>try:<EOL><INDENT>if isinstance(descriptor, six.string_types):<EOL><INDENT>res = requests.get(descriptor)<EOL>res.raise_for_status()<EOL>result = res.content<EOL><DEDENT><DEDENT>except (IOError,<EOL>ValueError,<EOL>requests.exceptions.RequestException):<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>the_zip = result<EOL>if isinstance(the_zip, bytes):<EOL><INDENT>try:<EOL><INDENT>os.path.isfile(the_zip)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>the_zip = io.BytesIO(the_zip)<EOL><DEDENT><DEDENT>if zipfile.is_zipfile(the_zip):<EOL><INDENT>with zipfile.ZipFile(the_zip, '<STR_LIT:r>') as z:<EOL><INDENT>_validate_zip(z)<EOL>descriptor_path = [<EOL>f for f in z.namelist() if f.endswith('<STR_LIT>')][<NUM_LIT:0>]<EOL>tempdir = tempfile.mkdtemp('<STR_LIT>')<EOL>z.extractall(tempdir)<EOL>result = os.path.join(tempdir, descriptor_path)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>result = descriptor<EOL><DEDENT><DEDENT>except (TypeError,<EOL>zipfile.BadZipfile):<EOL><INDENT>pass<EOL><DEDENT>if hasattr(descriptor, '<STR_LIT>'):<EOL><INDENT>descriptor.seek(<NUM_LIT:0>)<EOL><DEDENT>return (tempdir, result)<EOL>", "docstring": "If descriptor is a path to zip file extract and return (tempdir, descriptor)", "id": "f7560:m0"}
{"signature": "@property<EOL><INDENT>def profile(self):<DEDENT>", "body": "return self.__profile<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#package", "id": "f7560:c0:m4"}
{"signature": "def validate(self):", "body": "<EOL>warnings.warn(<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>descriptor = self.to_dict()<EOL>self.profile.validate(descriptor)<EOL>", "docstring": "Validate this Data Package.", "id": "f7560:c0:m20"}
{"signature": "def _slugify_foreign_key(schema):", "body": "for foreign_key in schema.get('<STR_LIT>', []):<EOL><INDENT>foreign_key['<STR_LIT>']['<STR_LIT>'] = _slugify_resource_name(<EOL>foreign_key['<STR_LIT>'].get('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>return schema<EOL>", "docstring": "Slugify foreign key", "id": "f7560:m3"}
{"signature": "@property<EOL><INDENT>def required_attributes(self):<DEDENT>", "body": "<EOL>warnings.warn(<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>required = ()<EOL>try:<EOL><INDENT>if self.profile.required is not None:<EOL><INDENT>required = tuple(self.profile.required)<EOL><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>return required<EOL>", "docstring": "tuple: The schema's required attributed.", "id": "f7560:c0:m19"}
{"signature": "@property<EOL><INDENT>def valid(self):<DEDENT>", "body": "return not bool(self.__errors)<EOL>", "docstring": "https://github.com/frictionlessdata/tableschema-py#schema", "id": "f7560:c0:m2"}
{"signature": "def infer(self, pattern=False):", "body": "<EOL>if pattern:<EOL><INDENT>if not self.__base_path:<EOL><INDENT>message = '<STR_LIT>'<EOL>raise exceptions.DataPackageException(message)<EOL><DEDENT>options = {'<STR_LIT>': True} if '<STR_LIT>' in pattern else {}<EOL>for path in glob.glob(os.path.join(self.__base_path, pattern), **options):<EOL><INDENT>self.add_resource({'<STR_LIT:path>': os.path.relpath(path, self.__base_path)})<EOL><DEDENT><DEDENT>for index, resource in enumerate(self.resources):<EOL><INDENT>descriptor = resource.infer()<EOL>self.__current_descriptor['<STR_LIT>'][index] = descriptor<EOL>self.__build()<EOL><DEDENT>if self.__next_descriptor['<STR_LIT>'] == config.DEFAULT_DATA_PACKAGE_PROFILE:<EOL><INDENT>if self.resources and all(map(lambda resource: resource.tabular, self.resources)):<EOL><INDENT>self.__current_descriptor['<STR_LIT>'] = '<STR_LIT>'<EOL>self.__build()<EOL><DEDENT><DEDENT>return self.__current_descriptor<EOL>", "docstring": "https://github.com/frictionlessdata/datapackage-py#package", "id": "f7560:c0:m12"}
{"signature": "def safe(self):", "body": "<EOL>warnings.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>return True<EOL>", "docstring": "True: datapackage is always safe.", "id": "f7560:c0:m16"}
{"signature": "def to_json(self):", "body": "<EOL>warnings.warn(<EOL>'<STR_LIT>',<EOL>UserWarning)<EOL>return json.dumps(self.descriptor)<EOL>", "docstring": "str: Convert this Data Package to a JSON string.", "id": "f7560:c0:m23"}
{"signature": "@property<EOL><INDENT>def base_path(self):<DEDENT>", "body": "try:<EOL><INDENT>return self._BASE_PATH<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "str: The base path of this Registry (None if it's remote).", "id": "f7561:c0:m2"}
{"signature": "def _get_registry(self, registry_path_or_url):", "body": "if registry_path_or_url.startswith('<STR_LIT:http>'):<EOL><INDENT>profiles = self._load_json_url(registry_path_or_url)<EOL><DEDENT>else:<EOL><INDENT>profiles = self._load_json_file(registry_path_or_url)<EOL><DEDENT>try:<EOL><INDENT>registry = {}<EOL>for profile in profiles:<EOL><INDENT>registry[profile['<STR_LIT:id>']] = profile<EOL><DEDENT>return registry<EOL><DEDENT>except KeyError as e:<EOL><INDENT>msg = (<EOL>'<STR_LIT>'<EOL>).format(path=registry_path_or_url)<EOL>six.raise_from(ValueError(msg), e)<EOL><DEDENT>", "docstring": "dict: Return the registry as dict with profiles keyed by id.", "id": "f7561:c0:m5"}
{"signature": "def read(*paths):", "body": "basedir = os.path.dirname(__file__)<EOL>fullpath = os.path.join(basedir, *paths)<EOL>contents = io.open(fullpath, encoding='<STR_LIT:utf-8>').read().strip()<EOL>return contents<EOL>", "docstring": "Read a text file.", "id": "f7565:m0"}
{"signature": "def dump_xml(self, output_dir):", "body": "self.buffer = False<EOL>if not os.path.exists(output_dir):<EOL><INDENT>os.makedirs(output_dir)<EOL><DEDENT>with open(os.path.join(output_dir, '<STR_LIT>'), '<STR_LIT:w>') as output:<EOL><INDENT>document = XMLGenerator(output, '<STR_LIT:utf-8>')<EOL>document.startDocument()<EOL>document.startElement('<STR_LIT>', AttributesImpl({}))<EOL>suites = groupby(<EOL>self.testInfos, key=lambda test_info: self.test_case_name(test_info.test_method)<EOL>)<EOL>for suite_name, suite in suites:<EOL><INDENT>document.startElement('<STR_LIT>',<EOL>AttributesImpl({'<STR_LIT:name>': suite_name}))<EOL>for test_info in suite:<EOL><INDENT>document.startElement('<STR_LIT>', AttributesImpl({<EOL>'<STR_LIT>': suite_name,<EOL>'<STR_LIT:name>': self.test_method_name(test_info.test_method),<EOL>'<STR_LIT:time>': '<STR_LIT>' % (<EOL>test_info.end_time - test_info.start_time<EOL>).total_seconds()<EOL>}))<EOL>if test_info.result == TestInfo.RESULT.ERROR:<EOL><INDENT>document.startElement('<STR_LIT:error>', AttributesImpl({<EOL>'<STR_LIT:message>': smart_text(test_info.err[<NUM_LIT:1>])<EOL>}))<EOL>document.characters(self._exc_info_to_string(test_info.err, test_info.test_method))<EOL>document.endElement('<STR_LIT:error>')<EOL><DEDENT>elif test_info.result == TestInfo.RESULT.FAILURE:<EOL><INDENT>document.startElement('<STR_LIT>', AttributesImpl({<EOL>'<STR_LIT:message>': smart_text(test_info.err[<NUM_LIT:1>])<EOL>}))<EOL>document.characters(<EOL>self._exc_info_to_string(test_info.err, test_info.test_method).decode('<STR_LIT:utf-8>')<EOL>)<EOL>document.endElement('<STR_LIT>')<EOL><DEDENT>elif test_info.result == TestInfo.RESULT.UNEXPECTED_SUCCESS:<EOL><INDENT>document.startElement('<STR_LIT:error>', AttributesImpl({<EOL>'<STR_LIT:message>': '<STR_LIT>'<EOL>}))<EOL>document.endElement('<STR_LIT:error>')<EOL><DEDENT>elif test_info.result == TestInfo.RESULT.SKIPPED:<EOL><INDENT>document.startElement('<STR_LIT>', AttributesImpl({}))<EOL>document.characters(test_info.reason)<EOL>document.endElement('<STR_LIT>')<EOL><DEDENT>if test_info.stdout:<EOL><INDENT>document.startElement('<STR_LIT>', AttributesImpl({}))<EOL>document.characters(test_info.stdout)<EOL>document.endElement('<STR_LIT>')<EOL><DEDENT>if test_info.stderr:<EOL><INDENT>document.startElement('<STR_LIT>', AttributesImpl({}))<EOL>document.characters(test_info.stderr)<EOL>document.endElement('<STR_LIT>')<EOL><DEDENT>document.endElement('<STR_LIT>')<EOL><DEDENT>document.endElement('<STR_LIT>')<EOL><DEDENT>document.endElement('<STR_LIT>')<EOL>document.endDocument()<EOL><DEDENT>", "docstring": "Dumps test result to xml", "id": "f7571:c1:m13"}
{"signature": "def startTestRun(self):", "body": "super(XMLTestResult, self).startTestRun()<EOL>", "docstring": "Called once before any tests are executed.", "id": "f7571:c1:m1"}
{"signature": "def get_app_locations():", "body": "return [os.path.dirname(os.path.normpath(import_module(app_name).__file__))<EOL>for app_name in PROJECT_APPS]<EOL>", "docstring": "Returns list of paths to tested apps", "id": "f7580:m1"}
{"signature": "def get_task_options():", "body": "options = ()<EOL>task_classes = get_tasks()<EOL>for cls in task_classes:<EOL><INDENT>options += cls.option_list<EOL><DEDENT>return options<EOL>", "docstring": "Get the options for each task that will be run", "id": "f7581:m1"}
{"signature": "def debug():", "body": "png('<STR_LIT>', device=\"<STR_LIT>\")<EOL>", "docstring": "Debug function - runs if heimdall called directly.", "id": "f7587:m6"}
{"signature": "def screenshot(url, *args, **kwargs):", "body": "phantomscript = os.path.join(os.path.dirname(__file__),<EOL>'<STR_LIT>')<EOL>directory = kwargs.get('<STR_LIT>', '<STR_LIT>')<EOL>image_name = kwargs.get('<STR_LIT>', None) or _image_name_from_url(url)<EOL>ext = kwargs.get('<STR_LIT>', '<STR_LIT>').lower()<EOL>save_path = os.path.join(directory, image_name) + '<STR_LIT:.>' + ext<EOL>crop_to_visible = kwargs.get('<STR_LIT>', False)<EOL>cmd_args = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>phantomscript,<EOL>url,<EOL>'<STR_LIT>',<EOL>str(kwargs['<STR_LIT:width>']),<EOL>'<STR_LIT>',<EOL>str(kwargs['<STR_LIT>']),<EOL>'<STR_LIT>',<EOL>str(kwargs['<STR_LIT>']),<EOL>'<STR_LIT>',<EOL>directory,<EOL>'<STR_LIT>',<EOL>ext,<EOL>'<STR_LIT>',<EOL>str(image_name),<EOL>]<EOL>if crop_to_visible:<EOL><INDENT>cmd_args.append('<STR_LIT>')<EOL><DEDENT>output = subprocess.Popen(cmd_args,<EOL>stdout=subprocess.PIPE).communicate()[<NUM_LIT:0>]<EOL>return Screenshot(save_path, directory, image_name + '<STR_LIT:.>' + ext, ext)<EOL>", "docstring": "Call PhantomJS with the specified flags and options.", "id": "f7587:m4"}
{"signature": "def _image_name_from_url(url):", "body": "find = r'<STR_LIT>'<EOL>replace = '<STR_LIT:_>'<EOL>return re.sub(find, replace, url).strip('<STR_LIT:_>')<EOL>", "docstring": "Create a nice image name from the url.", "id": "f7587:m5"}
{"signature": "def save(url, *args, **kwargs):", "body": "device = heimdallDevice(kwargs.get('<STR_LIT>', None))<EOL>kwargs['<STR_LIT:width>'] = kwargs.get('<STR_LIT:width>', None) or device.width<EOL>kwargs['<STR_LIT>'] = kwargs.get('<STR_LIT>', None) or device.height<EOL>kwargs['<STR_LIT>'] = kwargs.get('<STR_LIT>', None) or device.user_agent<EOL>screenshot_image = screenshot(url, **kwargs)<EOL>if kwargs.get('<STR_LIT>'):<EOL><INDENT>image = Image.open(screenshot_image.path)<EOL>image.save(screenshot_image.path, optimize=True)<EOL><DEDENT>return screenshot_image<EOL>", "docstring": "Parse the options, set defaults and then fire up PhantomJS.", "id": "f7587:m0"}
{"signature": "def height(self):", "body": "return self._backend.height()<EOL>", "docstring": "Returns the height of the wallet.\n\n:rtype: int", "id": "f7609:c0:m2"}
{"signature": "def transfer(self, address, amount,<EOL>priority=prio.NORMAL, payment_id=None, unlock_time=<NUM_LIT:0>,<EOL>relay=True):", "body": "return self.accounts[<NUM_LIT:0>].transfer(<EOL>address,<EOL>amount,<EOL>priority=priority,<EOL>payment_id=payment_id,<EOL>unlock_time=unlock_time,<EOL>relay=relay)<EOL>", "docstring": "Sends a transfer from the default account. Returns a list of resulting transactions.\n\n:param address: destination :class:`Address <monero.address.Address>` or subtype\n:param amount: amount to send\n:param priority: transaction priority, implies fee. The priority can be a number\n            from 1 to 4 (unimportant, normal, elevated, priority) or a constant\n            from `monero.prio`.\n:param payment_id: ID for the payment (must be None if\n                :class:`IntegratedAddress <monero.address.IntegratedAddress>`\n                is used as the destination)\n:param unlock_time: the extra unlock delay\n:param relay: if `True`, the wallet will relay the transaction(s) to the network\n                immediately; when `False`, it will only return the transaction(s)\n                so they might be broadcasted later\n:rtype: list of :class:`Transaction <monero.transaction.Transaction>`", "id": "f7609:c0:m18"}
{"signature": "def export_key_images(self):", "body": "return self._backend.export_key_images()<EOL>", "docstring": "Exports signed key images as a list of dicts.\n\n:rtype: [dict, dict, ...]", "id": "f7609:c0:m10"}
{"signature": "def new_address(self, label=None):", "body": "return self.accounts[<NUM_LIT:0>].new_address(label=label)<EOL>", "docstring": "Creates a new address in the default account.\n\n:rtype: :class:`SubAddress <monero.address.SubAddress>`", "id": "f7609:c0:m16"}
{"signature": "def address(self):", "body": "return self.accounts[<NUM_LIT:0>].addresses()[<NUM_LIT:0>]<EOL>", "docstring": "Returns wallet's master address.\n\n:rtype: :class:`Address <monero.address.Address>`", "id": "f7609:c0:m14"}
{"signature": "def new_account(self, label=None):", "body": "acc, addr = self._backend.new_account(label=label)<EOL>assert acc.index == len(self.accounts)<EOL>self.accounts.append(acc)<EOL>return acc<EOL>", "docstring": "Creates new account, appends it to the :class:`Wallet`'s account list and returns it.\n\n:param label: account label as `str`\n:rtype: :class:`Account`", "id": "f7609:c0:m6"}
{"signature": "def seed(self):", "body": "return self._backend.seed()<EOL>", "docstring": "Returns word seed.\n\n:rtype: str", "id": "f7609:c0:m5"}
{"signature": "def addresses(self):", "body": "return self.accounts[<NUM_LIT:0>].addresses()<EOL>", "docstring": "Returns all addresses of the default account.\n\n:rtype: list of :class:`Address <monero.address.Address>` and\n        :class:`SubAddress <monero.address.SubAddress>`", "id": "f7609:c0:m15"}
{"signature": "def balances(self):", "body": "return self._backend.balances(account=self.index)<EOL>", "docstring": "Returns a tuple of balance and unlocked balance.\n\n:rtype: (Decimal, Decimal)", "id": "f7612:c0:m1"}
{"signature": "def transfer_multiple(self, destinations,<EOL>priority=prio.NORMAL, payment_id=None, unlock_time=<NUM_LIT:0>,<EOL>relay=True):", "body": "return self._backend.transfer(<EOL>destinations,<EOL>priority,<EOL>payment_id,<EOL>unlock_time,<EOL>account=self.index,<EOL>relay=relay)<EOL>", "docstring": "Sends a batch of transfers. Returns a list of resulting transactions.\n\n:param destinations: a list of destination and amount pairs:\n            [(:class:`Address <monero.address.Address>`, `Decimal`), ...]\n:param priority: transaction priority, implies fee. The priority can be a number\n            from 1 to 4 (unimportant, normal, elevated, priority) or a constant\n            from `monero.prio`.\n:param payment_id: ID for the payment (must be None if\n                :class:`IntegratedAddress <monero.address.IntegratedAddress>`\n                is used as the destination)\n:param unlock_time: the extra unlock delay\n:param relay: if `True`, the wallet will relay the transaction(s) to the network\n                immediately; when `False`, it will only return the transaction(s)\n                so they might be broadcasted later\n:rtype: list of :class:`Transaction <monero.transaction.Transaction>`", "id": "f7612:c0:m7"}
{"signature": "def addresses(self):", "body": "return self._backend.addresses(account=self.index)<EOL>", "docstring": "Returns all addresses of the account.\n\n:rtype: list", "id": "f7612:c0:m4"}
{"signature": "def new_address(self, label=None):", "body": "return self._backend.new_address(account=self.index, label=label)<EOL>", "docstring": "Creates a new address.\n\n:param label: address label as `str`\n:rtype: :class:`SubAddress <monero.address.SubAddress>`", "id": "f7612:c0:m5"}
{"signature": "def send_transaction(self, tx, relay=True):", "body": "return self._backend.send_transaction(tx.blob, relay=relay)<EOL>", "docstring": "Sends a transaction generated by a :class:`Wallet <monero.wallet.Wallet>`.\n\n:param tx: :class:`Transaction <monero.transaction.Transaction>`\n:param relay: whether to relay the transaction to peers. If `False`, the daemon will have\n        to mine the transaction itself in order to have it included in the blockchain.", "id": "f7629:c0:m3"}
{"signature": "def height(self):", "body": "return self._backend.info()['<STR_LIT>']<EOL>", "docstring": "Return daemon's chain height.\n\n:rtype: int", "id": "f7629:c0:m2"}
{"signature": "def spend_key(self):", "body": "return hexlify(self._decoded[<NUM_LIT:1>:<NUM_LIT>]).decode()<EOL>", "docstring": "Returns public spend key.\n\n        :rtype: str", "id": "f7630:c1:m1"}
{"signature": "def check_private_view_key(self, key):", "body": "return ed25519.public_from_secret_hex(key) == self.view_key()<EOL>", "docstring": "Checks if private view key matches this address.\n\n        :rtype: bool", "id": "f7630:c1:m2"}
{"signature": "def base_address(self):", "body": "prefix = <NUM_LIT> if self.is_testnet() else <NUM_LIT> if self.is_stagenet() else <NUM_LIT><EOL>data = bytearray([prefix]) + self._decoded[<NUM_LIT:1>:<NUM_LIT>]<EOL>checksum = keccak_256(data).digest()[:<NUM_LIT:4>]<EOL>return Address(base58.encode(hexlify(data + checksum)))<EOL>", "docstring": "Returns the base address without payment id.\n        :rtype: :class:`Address`", "id": "f7630:c3:m2"}
{"signature": "def check_private_spend_key(self, key):", "body": "return ed25519.public_from_secret_hex(key) == self.spend_key()<EOL>", "docstring": "Checks if private spend key matches this address.\n\n        :rtype: bool", "id": "f7630:c1:m3"}
{"signature": "def is_mainnet(self):", "body": "return self._decoded[<NUM_LIT:0>] == self._valid_netbytes[<NUM_LIT:0>]<EOL>", "docstring": "Returns `True` if the address belongs to mainnet.\n\n        :rtype: bool", "id": "f7630:c0:m1"}
{"signature": "def public_address(self, net='<STR_LIT>'):", "body": "if net not in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>netbyte = <NUM_LIT> if net == '<STR_LIT>' else <NUM_LIT> if net == '<STR_LIT>' else <NUM_LIT><EOL>data = \"<STR_LIT>\".format(netbyte, self.public_spend_key(), self.public_view_key())<EOL>h = keccak_256()<EOL>h.update(unhexlify(data))<EOL>checksum = h.hexdigest()<EOL>return address(base58.encode(data + checksum[<NUM_LIT:0>:<NUM_LIT:8>]))<EOL>", "docstring": "Returns the master :class:`Address <monero.address.Address>` represented by the seed.\n\n        :param net: the network, one of 'mainnet', 'testnet', 'stagenet'. Default is 'mainnet'.\n\n        :rtype: :class:`Address <monero.address.Address>`", "id": "f7631:c0:m12"}
{"signature": "def _encode_seed(self):", "body": "self.phrase = self.word_list.encode(self.hex)<EOL>", "docstring": "Convert hexadecimal string to mnemonic word representation with checksum.", "id": "f7631:c0:m2"}
{"signature": "def __init__(self, phrase_or_hex=\"<STR_LIT>\", wordlist=\"<STR_LIT>\"):", "body": "self.phrase = \"<STR_LIT>\" <EOL>self.hex = \"<STR_LIT>\" <EOL>self.word_list = wordlists.get_wordlist(wordlist)<EOL>self._ed_pub_spend_key = None<EOL>self._ed_pub_view_key = None<EOL>if phrase_or_hex:<EOL><INDENT>seed_split = phrase_or_hex.split(\"<STR_LIT:U+0020>\")<EOL>if len(seed_split) >= <NUM_LIT>:<EOL><INDENT>self.phrase = phrase_or_hex<EOL>if len(seed_split) == <NUM_LIT>:<EOL><INDENT>self._validate_checksum()<EOL><DEDENT>self._decode_seed()<EOL><DEDENT>elif len(seed_split) >= <NUM_LIT:12>:<EOL><INDENT>self.phrase = phrase_or_hex<EOL>if len(seed_split) == <NUM_LIT>:<EOL><INDENT>self._validate_checksum()<EOL><DEDENT>self._decode_seed()<EOL><DEDENT>elif len(seed_split) == <NUM_LIT:1>:<EOL><INDENT>if not len(phrase_or_hex) % <NUM_LIT:8> == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(hex=phrase_or_hex))<EOL><DEDENT>self.hex = phrase_or_hex<EOL>self._encode_seed()<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(arg=phrase_or_hex))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.hex = generate_hex()<EOL>self._encode_seed()<EOL><DEDENT>", "docstring": "If user supplied a seed string to the class, break it down and determine\n        if it's hexadecimal or mnemonic word string. Gather the values and store them.\n        If no seed is passed, automatically generate a new one from local system randomness.\n\n        :rtype: :class:`Seed <monero.seed.Seed>`", "id": "f7631:c0:m0"}
{"signature": "def verify(xml, stream):", "body": "<EOL>import xmlsec<EOL>signature_node = xmlsec.tree.find_node(xml, xmlsec.Node.SIGNATURE)<EOL>if signature_node is None:<EOL><INDENT>return False<EOL><DEDENT>ctx = xmlsec.SignatureContext()<EOL>ctx.register_id(xml)<EOL>for assertion in xml.xpath(\"<STR_LIT>\"):<EOL><INDENT>ctx.register_id(assertion)<EOL><DEDENT>key = None<EOL>for fmt in [<EOL>xmlsec.KeyFormat.PEM,<EOL>xmlsec.KeyFormat.CERT_PEM]:<EOL><INDENT>stream.seek(<NUM_LIT:0>)<EOL>try:<EOL><INDENT>key = xmlsec.Key.from_memory(stream, fmt)<EOL>break<EOL><DEDENT>except ValueError:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT>ctx.key = key<EOL>try:<EOL><INDENT>ctx.verify(signature_node)<EOL>return True<EOL><DEDENT>except Exception:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Verify the signaure of an XML document with the given certificate.\nReturns `True` if the document is signed with a valid signature.\nReturns `False` if the document is not signed or if the signature is\ninvalid.\n\n:param lxml.etree._Element xml: The document to sign\n:param file stream: The private key to sign the document with\n\n:rtype: Boolean", "id": "f7640:m1"}
{"signature": "def sign(xml, stream, password=None):", "body": "<EOL>import xmlsec<EOL>from saml.schema.base import _element_registry<EOL>element = _element_registry.get(xml.tag)<EOL>signature_node = xmlsec.template.create(<EOL>xml,<EOL>xmlsec.Transform.EXCL_C14N,<EOL>xmlsec.Transform.RSA_SHA1)<EOL>xml.insert(element.meta.signature_index, signature_node)<EOL>ref = xmlsec.template.add_reference(<EOL>signature_node, xmlsec.Transform.SHA1)<EOL>xmlsec.template.add_transform(ref, xmlsec.Transform.ENVELOPED)<EOL>ctx = xmlsec.SignatureContext()<EOL>key = xmlsec.Key.from_memory(stream, xmlsec.KeyFormat.PEM, password)<EOL>ctx.key = key<EOL>ctx.sign(signature_node)<EOL>", "docstring": "Sign an XML document with the given private key file. This will add a\n<Signature> element to the document.\n\n:param lxml.etree._Element xml: The document to sign\n:param file stream: The private key to sign the document with\n:param str password: The password used to access the private key\n\n:rtype: None\n\nExample usage:\n::\n    from saml import schema\n    from lxml import etree\n\n    document = schema.AuthenticationRequest()\n    xml_document = document.serialize()\n    with open('my_key_file.pem', 'r+') as stream:\n        sign(xml_document, stream)\n\n    print etree.tostring(xml_document)\n\nProduces the following XML document:\n\n.. code-block:: xml\n\n    <samlp:AuthnRequest\n        xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\"\n        xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\"\n        Version=\"2.0\" ID=\"_6087de0b111b44349a70ff40191a4c0c\"\n        IssueInstant=\"2015-03-16T21:06:39Z\">\n        <Signature xmlns=\"http://www.w3.org/2000/09/xmldsig#\">\n            <SignedInfo>\n                <CanonicalizationMethod\n                    Algorithm=\"http://www.w3.org/2001/10/xml-exc-c14n#\"/>\n                    <SignatureMethod\n                        Algorithm=\"http://www.w3.org/2000/\n                        09/xmldsig#rsa-sha1\"/>\n                        <Reference>\n                            <Transforms>\n                                <Transform\n                                    Algorithm=\"http://www.w3.org/2000/\n                                    09/xmldsig#enveloped-signature\"/>\n                            </Transforms>\n                            <DigestMethod\n                                Algorithm=\"http://www.w3.org/2000/\n                                09/xmldsig#sha1\"/>\n                                <DigestValue>\n                                    94O1FOjRE4JQYVDqStkYzne9StQ=\n                                </DigestValue>\n                        </Reference>\n            </SignedInfo>\n            <SignatureValue>\n                aFYRRjtB3bDyLLJzLZmsn0K4SXmOpFYJ+8R8D31VojgiF37FOElbE56UFbm8BAjn\n                l2AixrUGXP4djxoxxnfBD/reYw5yVuIVXlMxKec784nF2V4GyrfwJOKaNmlVPkq5\n                c8SI+EkKJ02mwiail0Zvjb9FzwvlYD+osMSXvJXVqnGHQDVFlhwbBRRVB6t44/M3\n                TzC4mLSVhuvcpsm4GTQSpGkHP7HvweKN/OTc0aTy8Kh/YUrImwnUCii+J0EW4nGg\n                71eZyq/IiSPnTD09WDHsWe3g29kpicZXqrQCWeLE2zfVKtyxxs7PyEmodH19jXyz\n                wh9hQ8t6PFO47Ros5aV0bw==\n            </SignatureValue>\n        </Signature>\n    </samlp:AuthnRequest>", "id": "f7640:m0"}
{"signature": "def __init__(self, meta, name, data, bases):", "body": "<EOL>self.name = meta.get('<STR_LIT:name>')<EOL>if self.name is None:<EOL><INDENT>self.name = pascalize(name)<EOL><DEDENT>self.namespace = meta.get('<STR_LIT>')<EOL>self.signature_index = meta.get('<STR_LIT>', <NUM_LIT:1>)<EOL>", "docstring": "Initializes the options object and defaults configuration not\nspecified.", "id": "f7644:c0:m0"}
{"signature": "def serialize(self):", "body": "<EOL>return self._serialize_item(self)<EOL>", "docstring": "Serializes the data in the instance state as an\nXML representation.", "id": "f7644:c5:m5"}
{"signature": "def prepare(self):", "body": "<EOL>attributes, elements = OrderedDict(), []<EOL>nsmap = dict([self.meta.namespace])<EOL>for name, item in self._items.items():<EOL><INDENT>if isinstance(item, Attribute):<EOL><INDENT>attributes[name] = item.prepare(self)<EOL><DEDENT>elif isinstance(item, Element):<EOL><INDENT>nsmap.update([item.namespace])<EOL>elements.append(item)<EOL><DEDENT><DEDENT>return attributes, elements, nsmap<EOL>", "docstring": "Prepare the date in the instance state for serialization.", "id": "f7644:c5:m2"}
{"signature": "def norm_encoding(name):", "body": "return codecs.lookup(name).name<EOL>", "docstring": "Normalizes an encoding name.", "id": "f7653:m1"}
{"signature": "def fspath(p):", "body": "try:<EOL><INDENT>return os.fspath(p)<EOL><DEDENT>except AttributeError:<EOL><INDENT>return p<EOL><DEDENT>", "docstring": "Like os.fspath but for Python <= 3.6.", "id": "f7653:m3"}
{"signature": "def single_byte_full_encoding(encoding):", "body": "if PY3:<EOL><INDENT>bytes_ = map(lambda i: bytes([i]), range(<NUM_LIT:0>, <NUM_LIT>))<EOL><DEDENT>else:<EOL><INDENT>bytes_ = map(chr, range(<NUM_LIT:0>, <NUM_LIT>))<EOL><DEDENT>for byte in bytes_:<EOL><INDENT>try:<EOL><INDENT>byte.decode(encoding)<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Whether the encoding can decode all byte values (e.g. latin1)", "id": "f7653:m2"}
{"signature": "@composite<EOL>def _path_root(draw, result_type):", "body": "<EOL>def tp(s='<STR_LIT>'):<EOL><INDENT>return _str_to_path(s, result_type)<EOL><DEDENT>if os.name != '<STR_LIT>':<EOL><INDENT>return tp(os.sep)<EOL><DEDENT>sep = sampled_from([os.sep, os.altsep or os.sep]).map(tp)<EOL>name = _filename(result_type)<EOL>char = characters(min_codepoint=ord(\"<STR_LIT:A>\"), max_codepoint=ord(\"<STR_LIT:z>\")).map(<EOL>lambda c: tp(str(c)))<EOL>relative = sep<EOL>drive = builds(lambda *x: tp().join(x), char, just(tp('<STR_LIT::>')), sep)<EOL>extended = builds(<EOL>lambda *x: tp().join(x), sep, sep, just(tp('<STR_LIT:?>')), sep, drive)<EOL>network = one_of([<EOL>builds(lambda *x: tp().join(x), sep, sep, name, sep, name, sep),<EOL>builds(lambda *x: tp().join(x),<EOL>sep, sep, just(tp('<STR_LIT:?>')), sep, name, sep, name, sep),<EOL>builds(lambda *x: tp().join(x),<EOL>sep, sep, just(tp('<STR_LIT:?>')), sep, just(tp('<STR_LIT>')), sep, name, sep,<EOL>name, sep),<EOL>builds(lambda *x: tp().join(x),<EOL>sep, sep, just(tp('<STR_LIT:.>')), sep, name, sep),<EOL>])<EOL>final = one_of(relative, drive, extended, network)<EOL>return draw(final)<EOL>", "docstring": "Generates a root component for a path.", "id": "f7654:m2"}
{"signature": "def _str_to_path(s, result_type):", "body": "assert isinstance(s, str)<EOL>if isinstance(s, bytes) and result_type is text_type:<EOL><INDENT>return s.decode('<STR_LIT:ascii>')<EOL><DEDENT>elif isinstance(s, text_type) and result_type is bytes:<EOL><INDENT>return s.encode('<STR_LIT:ascii>')<EOL><DEDENT>return s<EOL>", "docstring": "Given an ASCII str, returns a path of the given type.", "id": "f7654:m1"}
{"signature": "@classmethod<EOL><INDENT>def deserialize(cls, serializer, s_state, **kwargs):<DEDENT>", "body": "return serializer.deserialize_workflow(s_state, **kwargs)<EOL>", "docstring": "Deserializes a Workflow instance using the provided serializer.\n\n:type  serializer: :class:`SpiffWorkflow.serializer.base.Serializer`\n:param serializer: The serializer to use.\n:type  s_state: object\n:param s_state: The serialized workflow.\n:type  kwargs: dict\n:param kwargs: Passed to the serializer.\n:rtype:  Workflow\n:returns: The workflow instance.", "id": "f7705:c0:m17"}
{"signature": "def is_completed(self):", "body": "mask = Task.NOT_FINISHED_MASK<EOL>iter = Task.Iterator(self.task_tree, mask)<EOL>try:<EOL><INDENT>next(iter)<EOL><DEDENT>except StopIteration:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Returns True if the entire Workflow is completed, False otherwise.\n\n:rtype: bool\n:return: Whether the workflow is completed.", "id": "f7705:c0:m1"}
{"signature": "def __init__(self, workflow_spec, deserializing=False, **kwargs):", "body": "assert workflow_spec is not None<EOL>LOG.debug(\"<STR_LIT>\" % self.__str__())<EOL>self.spec = workflow_spec<EOL>self.data = {}<EOL>self.outer_workflow = kwargs.get('<STR_LIT>', self)<EOL>self.locks = {}<EOL>self.last_task = None<EOL>if deserializing:<EOL><INDENT>assert '<STR_LIT>' in workflow_spec.task_specs<EOL>root = workflow_spec.task_specs['<STR_LIT>']  <EOL><DEDENT>else:<EOL><INDENT>if '<STR_LIT>' in workflow_spec.task_specs:<EOL><INDENT>root = workflow_spec.task_specs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>root = specs.Simple(workflow_spec, '<STR_LIT>')<EOL><DEDENT><DEDENT>self.task_tree = Task(self, root)<EOL>self.success = True<EOL>self.debug = False<EOL>self.completed_event = Event()<EOL>self.task_tree.state = Task.COMPLETED<EOL>start = self.task_tree._add_child(self.spec.start, state=Task.FUTURE)<EOL>self.spec.start._predict(start)<EOL>if '<STR_LIT>' not in kwargs:<EOL><INDENT>start.task_spec._update(start)<EOL><DEDENT>", "docstring": "Constructor.\n\n:type workflow_spec: specs.WorkflowSpec\n:param workflow_spec: The workflow specification.\n:type deserializing: bool\n:param deserializing: set to true when deserializing to avoid\n  generating tasks twice (and associated problems with multiple\n  hierarchies of tasks)", "id": "f7705:c0:m0"}
{"signature": "def dump(self):", "body": "print(self.task_tree.dump())<EOL>", "docstring": "Like :meth:`get_dump`, but prints the output to the terminal instead\nof returning it.", "id": "f7705:c0:m15"}
{"signature": "def get_tasks_from_spec_name(self, name):", "body": "return [task for task in self.get_tasks()<EOL>if task.task_spec.name == name]<EOL>", "docstring": "Returns all tasks whose spec has the given name.\n\n:type name: str\n:param name: The name of a task spec.\n:rtype: Task\n:return: The task that relates to the spec with the given name.", "id": "f7705:c0:m9"}
{"signature": "def complete_task_from_id(self, task_id):", "body": "if task_id is None:<EOL><INDENT>raise WorkflowException(self.spec, '<STR_LIT>')<EOL><DEDENT>for task in self.task_tree:<EOL><INDENT>if task.id == task_id:<EOL><INDENT>return task.complete()<EOL><DEDENT><DEDENT>msg = '<STR_LIT>' % task_id<EOL>raise WorkflowException(self.spec, msg)<EOL>", "docstring": "Runs the task with the given id.\n\n:type  task_id: integer\n:param task_id: The id of the Task object.", "id": "f7705:c0:m11"}
{"signature": "def cancel(self, success=False):", "body": "self.success = success<EOL>cancel = []<EOL>mask = Task.NOT_FINISHED_MASK<EOL>for task in Task.Iterator(self.task_tree, mask):<EOL><INDENT>cancel.append(task)<EOL><DEDENT>for task in cancel:<EOL><INDENT>task.cancel()<EOL><DEDENT>", "docstring": "Cancels all open tasks in the workflow.\n\n:type  success: bool\n:param success: Whether the Workflow should be marked as successfully\n                completed.", "id": "f7705:c0:m6"}
{"signature": "def get_dump(self):", "body": "return self.task_tree.get_dump()<EOL>", "docstring": "Returns a complete dump of the current internal task tree for\ndebugging.\n\n:rtype:  str\n:returns: The debug information.", "id": "f7705:c0:m14"}
{"signature": "def do_engine_steps(self):", "body": "assert not self.read_only<EOL>engine_steps = list(<EOL>[t for t in self.get_tasks(Task.READY)<EOL>if self._is_engine_task(t.task_spec)])<EOL>while engine_steps:<EOL><INDENT>for task in engine_steps:<EOL><INDENT>task.complete()<EOL><DEDENT>engine_steps = list(<EOL>[t for t in self.get_tasks(Task.READY)<EOL>if self._is_engine_task(t.task_spec)])<EOL><DEDENT>", "docstring": "Execute any READY tasks that are engine specific (for example, gateways\nor script tasks). This is done in a loop, so it will keep completing\nthose tasks until there are only READY User tasks, or WAITING tasks\nleft.", "id": "f7706:c0:m2"}
{"signature": "def get_waiting_tasks(self):", "body": "return self.get_tasks(Task.WAITING)<EOL>", "docstring": "Returns a list of all WAITING tasks", "id": "f7706:c0:m5"}
{"signature": "def deserialize_workflow_spec(self, s_state, filename=None):", "body": "if isinstance(s_state, (str, bytes)):<EOL><INDENT>s_state = BytesIO(s_state)<EOL><DEDENT>package_zip = zipfile.ZipFile(<EOL>s_state, \"<STR_LIT:r>\", compression=zipfile.ZIP_DEFLATED)<EOL>config = configparser.ConfigParser()<EOL>ini_fp = TextIOWrapper(<EOL>package_zip.open(Packager.METADATA_FILE), encoding=\"<STR_LIT>\")<EOL>try:<EOL><INDENT>config.read_file(ini_fp)<EOL><DEDENT>finally:<EOL><INDENT>ini_fp.close()<EOL><DEDENT>parser_class = BpmnParser<EOL>try:<EOL><INDENT>parser_class_module = config.get(<EOL>'<STR_LIT>', '<STR_LIT>', fallback=None)<EOL><DEDENT>except TypeError:<EOL><INDENT>parser_class_module = config.get(<EOL>'<STR_LIT>', '<STR_LIT>', None)<EOL><DEDENT>if parser_class_module:<EOL><INDENT>mod = __import__(parser_class_module, fromlist=[<EOL>config.get('<STR_LIT>', '<STR_LIT>')])<EOL>parser_class = getattr(mod, config.get('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>parser = parser_class()<EOL>for info in package_zip.infolist():<EOL><INDENT>parts = os.path.split(info.filename)<EOL>if (len(parts) == <NUM_LIT:2> and<EOL>not parts[<NUM_LIT:0>] and parts[<NUM_LIT:1>].lower().endswith('<STR_LIT>')):<EOL><INDENT>try:<EOL><INDENT>svg = package_zip.read(info.filename[:-<NUM_LIT:5>] + '<STR_LIT>')<EOL><DEDENT>except KeyError:<EOL><INDENT>svg = None<EOL><DEDENT>bpmn_fp = package_zip.open(info)<EOL>try:<EOL><INDENT>bpmn = ET.parse(bpmn_fp)<EOL><DEDENT>finally:<EOL><INDENT>bpmn_fp.close()<EOL><DEDENT>parser.add_bpmn_xml(<EOL>bpmn, svg=svg,<EOL>filename='<STR_LIT>' % (filename, info.filename))<EOL><DEDENT><DEDENT>return parser.get_spec(config.get('<STR_LIT>', '<STR_LIT>'))<EOL>", "docstring": ":param s_state: a byte-string with the contents of the packaged\nworkflow archive, or a file-like object.\n\n:param filename: the name of the package file.", "id": "f7708:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def add_additional_options(cls, parser):<DEDENT>", "body": "group = OptionGroup(parser, \"<STR_LIT>\",<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>group.add_option(\"<STR_LIT>\", \"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\")<EOL>group.add_option(<EOL>\"<STR_LIT>\", \"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option_group(group)<EOL>", "docstring": "Override in subclass if required.", "id": "f7709:c0:m19"}
{"signature": "@classmethod<EOL><INDENT>def add_main_options(cls, parser):<DEDENT>", "body": "parser.add_option(\"<STR_LIT>\", \"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT>\", \"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT:-c>\", \"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option(<EOL>\"<STR_LIT>\", \"<STR_LIT>\", action=\"<STR_LIT:store_true>\",<EOL>dest=\"<STR_LIT>\", default=False,<EOL>help=\"<STR_LIT>\")<EOL>group = OptionGroup(parser, \"<STR_LIT>\",<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>group.add_option(\"<STR_LIT>\", dest=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option_group(group)<EOL>", "docstring": "Override in subclass if required.", "id": "f7709:c0:m18"}
{"signature": "def package_for_editor_signavio(self, spec, filename):", "body": "signavio_file = filename[:-len('<STR_LIT>')] + '<STR_LIT>'<EOL>if os.path.exists(signavio_file):<EOL><INDENT>self.write_file_to_package_zip(<EOL>\"<STR_LIT>\" + self._get_zip_path(signavio_file), signavio_file)<EOL>f = open(signavio_file, '<STR_LIT:r>')<EOL>try:<EOL><INDENT>signavio_tree = ET.parse(f)<EOL><DEDENT>finally:<EOL><INDENT>f.close()<EOL><DEDENT>svg_node = one(signavio_tree.findall('<STR_LIT>'))<EOL>self.write_to_package_zip(\"<STR_LIT>\" % spec.name, svg_node.text)<EOL><DEDENT>", "docstring": "Adds the SVG files to the archive for this BPMN file.", "id": "f7709:c0:m13"}
{"signature": "def __init__(self, package_file, entry_point_process, meta_data=None,<EOL>editor=None):", "body": "self.package_file = package_file<EOL>self.entry_point_process = entry_point_process<EOL>self.parser = self.PARSER_CLASS()<EOL>self.meta_data = meta_data or []<EOL>self.input_files = []<EOL>self.input_path_prefix = None<EOL>self.editor = editor<EOL>self.manifest = {}<EOL>", "docstring": "Constructor.\n\n:param package_file: a file-like object where the contents of the\npackage must be written to\n\n:param entry_point_process: the name or ID of the entry point process\n\n:param meta_data: A list of meta-data tuples to include in the\nmetadata.ini file (in addition to the standard ones)\n\n:param editor: The name of the editor used to create the source BPMN /\nSVG files. This activates additional hook method calls. (optional)", "id": "f7709:c0:m0"}
{"signature": "def add_bpmn_files(self, filenames):", "body": "self.input_files += filenames<EOL>", "docstring": "Add all filenames in the given list to the packager's set.", "id": "f7709:c0:m3"}
{"signature": "def _fix_call_activities_signavio(self, bpmn, filename):", "body": "for node in xpath_eval(bpmn)(\"<STR_LIT>\"):<EOL><INDENT>calledElement = node.get('<STR_LIT>', None)<EOL>if not calledElement:<EOL><INDENT>signavioMetaData = xpath_eval(node, extra_ns={<EOL>'<STR_LIT>': SIGNAVIO_NS})(<EOL>'<STR_LIT>')<EOL>if not signavioMetaData:<EOL><INDENT>raise ValidationException(<EOL>'<STR_LIT>',<EOL>node=node, filename=filename)<EOL><DEDENT>subprocess_reference = one(signavioMetaData).get('<STR_LIT>')<EOL>matches = []<EOL>for b in list(self.bpmn.values()):<EOL><INDENT>for p in xpath_eval(b)(\"<STR_LIT>\"):<EOL><INDENT>if (p.get('<STR_LIT:name>', p.get('<STR_LIT:id>', None)) ==<EOL>subprocess_reference):<EOL><INDENT>matches.append(p)<EOL><DEDENT><DEDENT><DEDENT>if not matches:<EOL><INDENT>raise ValidationException(<EOL>\"<STR_LIT>\" %<EOL>subprocess_reference, node=node, filename=filename)<EOL><DEDENT>if len(matches) != <NUM_LIT:1>:<EOL><INDENT>raise ValidationException(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % subprocess_reference, node=node,<EOL>filename=filename)<EOL><DEDENT>node.set('<STR_LIT>', matches[<NUM_LIT:0>].get('<STR_LIT:id>'))<EOL><DEDENT><DEDENT>", "docstring": "Signavio produces slightly invalid BPMN for call activity nodes... It\nis supposed to put a reference to the id of the called process in to\nthe calledElement attribute. Instead it stores a string (which is the\nname of the process - not its ID, in our interpretation) in an\nextension tag.\n\nThis code gets the name of the 'subprocess reference', finds a process\nwith a matching name, and sets the calledElement attribute to the id of\nthe process.", "id": "f7709:c0:m11"}
{"signature": "@classmethod<EOL><INDENT>def create_option_parser(cls):<DEDENT>", "body": "return OptionParser(<EOL>usage=(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"),<EOL>version=\"<STR_LIT>\" % (cls.get_version()))<EOL>", "docstring": "Override in subclass if required.", "id": "f7709:c0:m17"}
{"signature": "def entering_cancelled_state(self, my_task):", "body": "pass<EOL>", "docstring": "Called when a task enters the CANCELLED state.\n\nA subclass may override this method to do work when this happens.", "id": "f7711:c2:m12"}
{"signature": "def get_outgoing_sequences(self):", "body": "return iter(list(self.outgoing_sequence_flows_by_id.values()))<EOL>", "docstring": "Returns a list of outgoing sequences. Some may be None.", "id": "f7711:c2:m7"}
{"signature": "def __init__(self, wf_spec, name, lane=None, **kwargs):", "body": "super(BpmnSpecMixin, self).__init__(wf_spec, name, **kwargs)<EOL>self.outgoing_sequence_flows = {}<EOL>self.outgoing_sequence_flows_by_id = {}<EOL>self.lane = lane<EOL>self.documentation = None<EOL>", "docstring": "Constructor.\n\n:param lane: Indicates the name of the lane that this task belongs to\n(optional).", "id": "f7711:c2:m0"}
{"signature": "def connect_outgoing(self, taskspec, sequence_flow_id, sequence_flow_name,<EOL>documentation):", "body": "self.connect(taskspec)<EOL>s = SequenceFlow(<EOL>sequence_flow_id, sequence_flow_name, documentation, taskspec)<EOL>self.outgoing_sequence_flows[taskspec.name] = s<EOL>self.outgoing_sequence_flows_by_id[sequence_flow_id] = s<EOL>", "docstring": "Connect this task spec to the indicated child.\n\n:param sequence_flow_id: The ID of the connecting sequenceFlow node.\n\n:param sequence_flow_name: The name of the connecting sequenceFlow\nnode.", "id": "f7711:c2:m1"}
{"signature": "def get_outgoing_sequence_flow_by_id(self, id):", "body": "return self.outgoing_sequence_flows_by_id[id]<EOL>", "docstring": "Returns the outgoing SequenceFlow with the specified ID.", "id": "f7711:c2:m4"}
{"signature": "def entering_waiting_state(self, my_task):", "body": "pass<EOL>", "docstring": "Called when a task enters the WAITING state.\n\nA subclass may override this method to do work when this happens.", "id": "f7711:c2:m9"}
{"signature": "def connect_outgoing_if(self, condition, taskspec, sequence_flow_id,<EOL>sequence_flow_name, documentation):", "body": "self.connect_if(_BpmnCondition(condition), taskspec)<EOL>s = SequenceFlow(<EOL>sequence_flow_id, sequence_flow_name, documentation, taskspec)<EOL>self.outgoing_sequence_flows[taskspec.name] = s<EOL>self.outgoing_sequence_flows_by_id[sequence_flow_id] = s<EOL>", "docstring": "Connect this task spec to the indicated child, if the condition\nevaluates to true. This should only be called if the task has a\nconnect_if method (e.g. ExclusiveGateway).\n\n:param sequence_flow_id: The ID of the connecting sequenceFlow node.\n\n:param sequence_flow_name: The name of the connecting sequenceFlow\nnode.", "id": "f7711:c2:m2"}
{"signature": "def __init__(self, wf_spec, name, event_definition=None, **kwargs):", "body": "super(IntermediateCatchEvent, self).__init__(wf_spec, name, **kwargs)<EOL>self.event_definition = event_definition<EOL>", "docstring": "Constructor.\n\n:param event_definition: the EventDefinition that we must wait for.", "id": "f7717:c0:m0"}
{"signature": "def has_fired(self, my_task):", "body": "dt = my_task.workflow.script_engine.evaluate(my_task, self.dateTime)<EOL>if dt is None:<EOL><INDENT>return False<EOL><DEDENT>if dt.tzinfo:<EOL><INDENT>tz = dt.tzinfo<EOL>now = tz.fromutc(datetime.datetime.utcnow().replace(tzinfo=tz))<EOL><DEDENT>else:<EOL><INDENT>now = datetime.datetime.now()<EOL><DEDENT>return now > dt<EOL>", "docstring": "The Timer is considered to have fired if the evaluated dateTime\nexpression is before datetime.datetime.now()", "id": "f7720:c3:m1"}
{"signature": "def get_workflow_class(self):", "body": "return self.wf_class<EOL>", "docstring": "Returns the workflow class to instantiate for the sub workflow", "id": "f7723:c0:m3"}
{"signature": "def get_all_lanes(self):", "body": "done = set()<EOL>lanes = set()<EOL>def recursive_find(task_spec):<EOL><INDENT>if task_spec in done:<EOL><INDENT>return<EOL><DEDENT>done.add(task_spec)<EOL>if hasattr(task_spec, '<STR_LIT>') and task_spec.lane:<EOL><INDENT>lanes.add(task_spec.lane)<EOL><DEDENT>if hasattr(task_spec, '<STR_LIT>'):<EOL><INDENT>recursive_find(task_spec.spec.start)<EOL><DEDENT>for t in task_spec.outputs:<EOL><INDENT>recursive_find(t)<EOL><DEDENT><DEDENT>recursive_find(self.start)<EOL>return lanes<EOL>", "docstring": "Returns a set of the distinct lane names used in the process (including\ncalled activities)", "id": "f7725:c1:m1"}
{"signature": "def get_specs_depth_first(self):", "body": "done = set()<EOL>specs = [self]<EOL>def recursive_find(task_spec):<EOL><INDENT>if task_spec in done:<EOL><INDENT>return<EOL><DEDENT>done.add(task_spec)<EOL>if hasattr(task_spec, '<STR_LIT>'):<EOL><INDENT>specs.append(task_spec.spec)<EOL>recursive_find(task_spec.spec.start)<EOL><DEDENT>for t in task_spec.outputs:<EOL><INDENT>recursive_find(t)<EOL><DEDENT><DEDENT>recursive_find(self.start)<EOL>return specs<EOL>", "docstring": "Get the specs for all processes (including called ones), in depth first\norder.", "id": "f7725:c1:m2"}
{"signature": "def evaluate(self, task, expression):", "body": "if isinstance(expression, Operator):<EOL><INDENT>return expression._matches(task)<EOL><DEDENT>else:<EOL><INDENT>return self._eval(task, expression, **task.data)<EOL><DEDENT>", "docstring": "Evaluate the given expression, within the context of the given task and\nreturn the result.", "id": "f7727:c0:m0"}
{"signature": "def get_script(self):", "body": "return one(self.xpath('<STR_LIT>')).text<EOL>", "docstring": "Gets the script content from the node. A subclass can override this\nmethod, if the script needs to be pre-parsed. The result of this call\nwill be passed to the Script Engine for execution.", "id": "f7728:c9:m1"}
{"signature": "def get_timer_event_definition(self, timerEventDefinition):", "body": "timeDate = first(self.xpath('<STR_LIT>'))<EOL>return TimerEventDefinition(<EOL>self.node.get('<STR_LIT:name>', timeDate.text),<EOL>self.parser.parse_condition(<EOL>timeDate.text, None, None, None, None, self))<EOL>", "docstring": "Parse the timerEventDefinition node and return an instance of\nTimerEventDefinition\n\nThis currently only supports the timeDate node for specifying an expiry\ntime for the timer.", "id": "f7728:c10:m3"}
{"signature": "def get_message_event_definition(self, messageEventDefinition):", "body": "messageRef = first(self.xpath('<STR_LIT>'))<EOL>message = messageRef.get(<EOL>'<STR_LIT:name>') if messageRef is not None else self.node.get('<STR_LIT:name>')<EOL>return MessageEventDefinition(message)<EOL>", "docstring": "Parse the messageEventDefinition node and return an instance of\nMessageEventDefinition", "id": "f7728:c10:m2"}
{"signature": "def add_bpmn_xml(self, bpmn, svg=None, filename=None):", "body": "xpath = xpath_eval(bpmn)<EOL>processes = xpath('<STR_LIT>')<EOL>for process in processes:<EOL><INDENT>process_parser = self.PROCESS_PARSER_CLASS(<EOL>self, process, svg, filename=filename, doc_xpath=xpath)<EOL>if process_parser.get_id() in self.process_parsers:<EOL><INDENT>raise ValidationException(<EOL>'<STR_LIT>', node=process, filename=filename)<EOL><DEDENT>if process_parser.get_name() in self.process_parsers_by_name:<EOL><INDENT>raise ValidationException(<EOL>'<STR_LIT>', node=process, filename=filename)<EOL><DEDENT>self.process_parsers[process_parser.get_id()] = process_parser<EOL>self.process_parsers_by_name[<EOL>process_parser.get_name()] = process_parser<EOL><DEDENT>", "docstring": "Add the given lxml representation of the BPMN file to the parser's set.\n\n:param svg: Optionally, provide the text data for the SVG of the BPMN\n  file\n:param filename: Optionally, provide the source filename.", "id": "f7729:c0:m6"}
{"signature": "def get_process_parser(self, process_id_or_name):", "body": "if process_id_or_name in self.process_parsers_by_name:<EOL><INDENT>return self.process_parsers_by_name[process_id_or_name]<EOL><DEDENT>else:<EOL><INDENT>return self.process_parsers[process_id_or_name]<EOL><DEDENT>", "docstring": "Returns the ProcessParser for the given process ID or name. It matches\nby name first.", "id": "f7729:c0:m2"}
{"signature": "def add_bpmn_files_by_glob(self, g):", "body": "self.add_bpmn_files(glob.glob(g))<EOL>", "docstring": "Add all filenames matching the provided pattern (e.g. *.bpmn) to the\nparser's set.", "id": "f7729:c0:m4"}
{"signature": "def __init__(self):", "body": "self.process_parsers = {}<EOL>self.process_parsers_by_name = {}<EOL>", "docstring": "Constructor.", "id": "f7729:c0:m0"}
{"signature": "def first(nodes):", "body": "if len(nodes) >= <NUM_LIT:1>:<EOL><INDENT>return nodes[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Return the first node in the given list, or None, if the list is empty.", "id": "f7730:m1"}
{"signature": "def full_tag(tag):", "body": "return '<STR_LIT>' % (BPMN_MODEL_NS, tag)<EOL>", "docstring": "Return the full tag name including namespace for the given BPMN tag. In\nother words, the name with namespace\nhttp://www.omg.org/spec/BPMN/20100524/MODEL", "id": "f7730:m3"}
{"signature": "def parse_node(self, node):", "body": "if node.get('<STR_LIT:id>') in self.parsed_nodes:<EOL><INDENT>return self.parsed_nodes[node.get('<STR_LIT:id>')]<EOL><DEDENT>(node_parser, spec_class) = self.parser._get_parser_class(node.tag)<EOL>if not node_parser or not spec_class:<EOL><INDENT>raise ValidationException(<EOL>\"<STR_LIT>\",<EOL>node=node, filename=self.filename)<EOL><DEDENT>np = node_parser(self, spec_class, node)<EOL>task_spec = np.parse_node()<EOL>return task_spec<EOL>", "docstring": "Parses the specified child task node, and returns the task spec. This\ncan be called by a TaskParser instance, that is owned by this\nProcessParser.", "id": "f7731:c0:m3"}
{"signature": "def connect_outgoing(self, outgoing_task, outgoing_task_node,<EOL>sequence_flow_node, is_default):", "body": "self.task.connect_outgoing(<EOL>outgoing_task, sequence_flow_node.get('<STR_LIT:id>'),<EOL>sequence_flow_node.get(<EOL>'<STR_LIT:name>', None),<EOL>self.parser._parse_documentation(sequence_flow_node,<EOL>task_parser=self))<EOL>", "docstring": "Connects this task to the indicating outgoing task, with the details in\nthe sequence flow. A subclass can override this method to get extra\ninformation from the node.", "id": "f7732:c0:m6"}
{"signature": "def get_lane(self):", "body": "return self.process_parser.get_lane(self.get_id())<EOL>", "docstring": "Return the name of the lane that contains this task", "id": "f7732:c0:m2"}
{"signature": "def _is_descendant_of(self, parent):", "body": "if self.parent is None:<EOL><INDENT>return False<EOL><DEDENT>if self.parent == parent:<EOL><INDENT>return True<EOL><DEDENT>return self.parent._is_descendant_of(parent)<EOL>", "docstring": "Returns True if parent is in the list of ancestors, returns False\notherwise.\n\n:type  parent: Task\n:param parent: The parent that is searched in the ancestors.\n:rtype:  bool\n:returns: Whether the parent was found.", "id": "f7734:c0:m20"}
{"signature": "def _find_any(self, task_spec):", "body": "tasks = []<EOL>if self.task_spec == task_spec:<EOL><INDENT>tasks.append(self)<EOL><DEDENT>for child in self:<EOL><INDENT>if child.task_spec != task_spec:<EOL><INDENT>continue<EOL><DEDENT>tasks.append(child)<EOL><DEDENT>return tasks<EOL>", "docstring": "Returns any descendants that have the given task spec assigned.\n\n:type  task_spec: TaskSpec\n:param task_spec: The wanted task spec.\n:rtype:  list(Task)\n:returns: The tasks objects that are attached to the given task spec.", "id": "f7734:c0:m22"}
{"signature": "def _find_ancestor(self, task_spec):", "body": "if self.parent is None:<EOL><INDENT>return self<EOL><DEDENT>if self.parent.task_spec == task_spec:<EOL><INDENT>return self.parent<EOL><DEDENT>return self.parent._find_ancestor(task_spec)<EOL>", "docstring": "Returns the ancestor that has the given task spec assigned.\nIf no such ancestor was found, the root task is returned.\n\n:type  task_spec: TaskSpec\n:param task_spec: The wanted task spec.\n:rtype:  Task\n:returns: The ancestor.", "id": "f7734:c0:m23"}
{"signature": "def get_state(self):", "body": "return self.state<EOL>", "docstring": "Returns this Task's state.", "id": "f7734:c0:m28"}
{"signature": "def _sync_children(self, task_specs, state=MAYBE):", "body": "LOG.debug(\"<STR_LIT>\" % self.get_name())<EOL>if task_specs is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>add = task_specs[:]<EOL>remove = []<EOL>for child in self.children:<EOL><INDENT>if child.triggered:<EOL><INDENT>continue<EOL><DEDENT>if child.task_spec in add:<EOL><INDENT>add.remove(child.task_spec)<EOL>continue<EOL><DEDENT>if child._is_definite():<EOL><INDENT>raise WorkflowException(self.task_spec,<EOL>'<STR_LIT>' %<EOL>repr(child))<EOL><DEDENT>remove.append(child)<EOL><DEDENT>for child in remove:<EOL><INDENT>self.children.remove(child)<EOL><DEDENT>for task_spec in add:<EOL><INDENT>self._add_child(task_spec, state)<EOL><DEDENT>", "docstring": "This method syncs up the task's children with the given list of task\nspecs. In other words::\n\n    - Add one child for each given TaskSpec, unless that child already\n      exists.\n    - Remove all children for which there is no spec in the given list,\n      unless it is a \"triggered\" task.\n\n.. note::\n\n   It is an error if the task has a non-predicted child that is\n   not given in the TaskSpecs.\n\n:type  task_specs: list(TaskSpec)\n:param task_specs: The list of task specs that may become children.\n:type  state: integer\n:param state: The bitmask of states for the new children.", "id": "f7734:c0:m18"}
{"signature": "def _find_child_of(self, parent_task_spec):", "body": "if self.parent is None:<EOL><INDENT>return self<EOL><DEDENT>if self.parent.task_spec == parent_task_spec:<EOL><INDENT>return self<EOL><DEDENT>return self.parent._find_child_of(parent_task_spec)<EOL>", "docstring": "Returns the ancestor that has a task with the given task spec\nas a parent.\nIf no such ancestor was found, the root task is returned.\n\n:type  parent_task_spec: TaskSpec\n:param parent_task_spec: The wanted ancestor.\n:rtype:  Task\n:returns: The child of the given ancestor.", "id": "f7734:c0:m21"}
{"signature": "def trigger(self, *args):", "body": "self.task_spec._on_trigger(self, *args)<EOL>", "docstring": "If recursive is True, the state is applied to the tree recursively.", "id": "f7734:c0:m38"}
{"signature": "def get_data(self, name, default=None):", "body": "return self.data.get(name, default)<EOL>", "docstring": "Returns the value of the data field with the given name, or the given\ndefault value if the data field does not exist.\n\n:type  name: str\n:param name: A data field name.\n:type  default: obj\n:param default: Return this value if the data field does not exist.\n:rtype:  obj\n:returns: The value of the data field", "id": "f7734:c0:m35"}
{"signature": "def _child_added_notify(self, child):", "body": "assert child is not None<EOL>self.children.append(child)<EOL>", "docstring": "Called by another Task to let us know that a child was added.", "id": "f7734:c0:m9"}
{"signature": "def _assign_new_thread_id(self, recursive=True):", "body": "self.__class__.thread_id_pool += <NUM_LIT:1><EOL>self.thread_id = self.__class__.thread_id_pool<EOL>if not recursive:<EOL><INDENT>return self.thread_id<EOL><DEDENT>for child in self:<EOL><INDENT>child.thread_id = self.thread_id<EOL><DEDENT>return self.thread_id<EOL>", "docstring": "Assigns a new thread id to the task.\n\n:type  recursive: bool\n:param recursive: Whether to assign the id to children recursively.\n:rtype:  bool\n:returns: The new thread id.", "id": "f7734:c0:m17"}
{"signature": "def get_state_name(self):", "body": "state_name = []<EOL>for state, name in list(self.state_names.items()):<EOL><INDENT>if self._has_state(state):<EOL><INDENT>state_name.append(name)<EOL><DEDENT><DEDENT>return '<STR_LIT:|>'.join(state_name)<EOL>", "docstring": "Returns a textual representation of this Task's state.", "id": "f7734:c0:m29"}
{"signature": "def _get_root(self):", "body": "if self.parent is None:<EOL><INDENT>return self<EOL><DEDENT>return self.parent._get_root()<EOL>", "docstring": "Returns the top level parent.", "id": "f7734:c0:m7"}
{"signature": "def __init__(self,<EOL>left_attribute,<EOL>right_attribute=None,<EOL>right=None,<EOL>**kwargs):", "body": "if not right_attribute and not right:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>assert left_attribute is not None<EOL>self.left_attribute = left_attribute<EOL>self.right_attribute = right_attribute<EOL>self.right = right<EOL>", "docstring": "Constructor.\n\n:type  left_attribute: str\n:param left_attribute: The name of the attribute to which the value\n                       is assigned.\n:type  right: object\n:param right: A static value that, when given, is assigned to\n              left_attribute.\n:type  right_attribute: str\n:param right_attribute: When given, the attribute with the given\n                        name is used as the source (instead of the\n                        static value).\n:type  kwargs: dict\n:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.", "id": "f7735:c3:m0"}
{"signature": "@classmethod<EOL><INDENT>def deserialize(cls, serializer, s_state):<DEDENT>", "body": "return serializer.deserialize_pathattrib(s_state)<EOL>", "docstring": "Serializes the instance using the provided serializer.\n\n:type  serializer: :class:`SpiffWorkflow.serializer.base.Serializer`\n:param serializer: The serializer to use.\n:rtype:  object\n:returns: The serialized object.", "id": "f7735:c2:m2"}
{"signature": "def __init__(self, left, right):", "body": "Operator.__init__(self, left, right)<EOL>", "docstring": "Constructor.", "id": "f7735:c7:m0"}
{"signature": "@classmethod<EOL><INDENT>def deserialize(cls, serializer, s_state):<DEDENT>", "body": "return serializer.deserialize_assign(s_state)<EOL>", "docstring": "Serializes the instance using the provided serializer.\n\n:type  serializer: :class:`SpiffWorkflow.serializer.base.Serializer`\n:param serializer: The serializer to use.\n:rtype:  object\n:returns: The serialized object.", "id": "f7735:c3:m3"}
{"signature": "def serialize(self, serializer):", "body": "return serializer.serialize_pathattrib(self)<EOL>", "docstring": "Serializes the instance using the provided serializer.\n\n:type  serializer: :class:`SpiffWorkflow.serializer.base.Serializer`\n:param serializer: The serializer to use.\n:rtype:  object\n:returns: The serialized object.", "id": "f7735:c2:m1"}
{"signature": "def __call__(self, *args, **kwargs):", "body": "method = self.get_function()<EOL>if method is None:<EOL><INDENT>raise DeadMethodCalled('<STR_LIT>' + self.name)<EOL><DEDENT>method(*args, **kwargs)<EOL>", "docstring": "Proxied to the underlying function or method. Raises\n:class:`DeadMethodCalled` if the referenced function is dead.\n\n:rtype:  object :returns: Whatever the referenced function returned.", "id": "f7739:c1:m4"}
{"signature": "def ref(function, callback=None):", "body": "try:<EOL><INDENT>function.__func__<EOL><DEDENT>except AttributeError:<EOL><INDENT>return _WeakMethodFree(function, callback)<EOL><DEDENT>return _WeakMethodBound(function, callback)<EOL>", "docstring": "Returns a weak reference to the given method or function.\nIf the callback argument is not None, it is called as soon\nas the referenced function is garbage deleted.\n\n:type  function: callable\n:param function: The function to reference.\n:type  callback: callable\n:param callback: Called when the function dies.", "id": "f7739:m0"}
{"signature": "def __init__(self, name, callback):", "body": "self.name = name<EOL>self.callback = callback<EOL>", "docstring": "Constructor. Do not use directly, use :class:`ref()` instead.", "id": "f7739:c1:m0"}
{"signature": "def get_function(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Returns the referenced method/function if it is still alive.\nReturns None otherwise.\n\n:rtype:  callable|None\n:returns: The referenced function if it is still alive.", "id": "f7739:c1:m2"}
{"signature": "def connect(self, callback, *args, **kwargs):", "body": "if self.is_connected(callback):<EOL><INDENT>raise AttributeError('<STR_LIT>')<EOL><DEDENT>if self.hard_subscribers is None:<EOL><INDENT>self.hard_subscribers = []<EOL><DEDENT>self.hard_subscribers.append((callback, args, kwargs))<EOL>", "docstring": "Connects the event with the given callback.\nWhen the signal is emitted, the callback is invoked.\n\n.. note::\n\n    The signal handler is stored with a hard reference, so you\n    need to make sure to call :class:`disconnect()` if you want the\n    handler\n    to be garbage collected.\n\n:type  callback: object\n:param callback: The callback function.\n:type  args: tuple\n:param args: Optional arguments passed to the callback.\n:type  kwargs: dict\n:param kwargs: Optional keyword arguments passed to the callback.", "id": "f7740:c0:m2"}
{"signature": "def listen(self, callback, *args, **kwargs):", "body": "if self.lock is None:<EOL><INDENT>self.lock = Lock()<EOL><DEDENT>with self.lock:<EOL><INDENT>if self.is_connected(callback):<EOL><INDENT>raise AttributeError('<STR_LIT>')<EOL><DEDENT>if self.weak_subscribers is None:<EOL><INDENT>self.weak_subscribers = []<EOL><DEDENT>ref = weakmethod.ref(callback, self._try_disconnect)<EOL>self.weak_subscribers.append((ref, args, kwargs))<EOL><DEDENT>return ref<EOL>", "docstring": "Like :class:`connect()`, but uses a weak reference instead of a\nnormal reference.\nThe signal is automatically disconnected as soon as the handler\nis garbage collected.\n\n.. note::\n\n    Storing signal handlers as weak references means that if\n    your handler is a local function, it may be garbage collected. To\n    prevent this, use :class:`connect()` instead.\n\n:type  callback: object\n:param callback: The callback function.\n:type  args: tuple\n:param args: Optional arguments passed to the callback.\n:type  kwargs: dict\n:param kwargs: Optional keyword arguments passed to the callback.\n:rtype:  :class:`Exscript.util.weakmethod.WeakMethod`\n:returns: The newly created weak reference to the callback.", "id": "f7740:c0:m3"}
{"signature": "def __call__(self, *args, **kwargs):", "body": "return self.emit(*args, **kwargs)<EOL>", "docstring": "Like emit().", "id": "f7740:c0:m1"}
{"signature": "def serialize_operator_not_equal(self, op):", "body": "elem = etree.Element('<STR_LIT>')<EOL>return self.serialize_value_list(elem, op.args)<EOL>", "docstring": "Serializer for :meth:`SpiffWorkflow.operators.NotEqual`.\n\nExample::\n\n    <not-equals>\n        <value>text</value>\n        <value><attribute>foobar</attribute></value>\n        <value><path>foobar</path></value>\n    </not-equals>", "id": "f7744:c0:m14"}
{"signature": "def serialize_value_map(self, map_elem, thedict):", "body": "for key, value in sorted((str(k), v) for (k, v) in thedict.items()):<EOL><INDENT>var_elem = SubElement(map_elem, '<STR_LIT>')<EOL>SubElement(var_elem, '<STR_LIT:name>').text = str(key)<EOL>value_elem = SubElement(var_elem, '<STR_LIT:value>')<EOL>self.serialize_value(value_elem, value)<EOL><DEDENT>return map_elem<EOL>", "docstring": "Serializes a dictionary of key/value pairs, where the values are\neither strings, or Attrib, or PathAttrib objects.\n\nExample::\n\n    <variable>\n        <name>foo</name>\n        <value>text</value>\n    </variable>\n    <variable>\n        <name>foo2</name>\n        <value><attribute>foobar</attribute></value>\n    </variable>", "id": "f7744:c0:m8"}
{"signature": "def serialize_task_spec(self, spec, elem):", "body": "if spec.id is not None:<EOL><INDENT>SubElement(elem, '<STR_LIT:id>').text = str(spec.id)<EOL><DEDENT>SubElement(elem, '<STR_LIT:name>').text = spec.name<EOL>if spec.description:<EOL><INDENT>SubElement(elem, '<STR_LIT:description>').text = spec.description<EOL><DEDENT>if spec.manual:<EOL><INDENT>SubElement(elem, '<STR_LIT>')<EOL><DEDENT>if spec.internal:<EOL><INDENT>SubElement(elem, '<STR_LIT>')<EOL><DEDENT>SubElement(elem, '<STR_LIT>').text = str(spec.lookahead)<EOL>inputs = [t.name for t in spec.inputs]<EOL>outputs = [t.name for t in spec.outputs]<EOL>self.serialize_value_list(SubElement(elem, '<STR_LIT>'), inputs)<EOL>self.serialize_value_list(SubElement(elem, '<STR_LIT>'), outputs)<EOL>self.serialize_value_map(SubElement(elem, '<STR_LIT:data>'), spec.data)<EOL>self.serialize_value_map(SubElement(elem, '<STR_LIT>'), spec.defines)<EOL>self.serialize_value_list(SubElement(elem, '<STR_LIT>'),<EOL>spec.pre_assign)<EOL>self.serialize_value_list(SubElement(elem, '<STR_LIT>'),<EOL>spec.post_assign)<EOL>return elem<EOL>", "docstring": "Serializes common attributes of :meth:`SpiffWorkflow.specs.TaskSpec`.", "id": "f7744:c0:m23"}
{"signature": "def serialize_operator_match(self, op):", "body": "elem = etree.Element('<STR_LIT>')<EOL>return self.serialize_value_list(elem, op.args)<EOL>", "docstring": "Serializer for :meth:`SpiffWorkflow.operators.NotEqual`.\n\nExample::\n\n    <matches>\n        <value>text</value>\n        <value><attribute>foobar</attribute></value>\n    </matches>", "id": "f7744:c0:m20"}
{"signature": "def serialize_value(self, parent_elem, value):", "body": "if isinstance(value, (str, int)) or type(value).__name__ == '<STR_LIT:str>':<EOL><INDENT>parent_elem.text = str(value)<EOL><DEDENT>elif value is None:<EOL><INDENT>parent_elem.text = None<EOL><DEDENT>else:<EOL><INDENT>parent_elem.append(value.serialize(self))<EOL><DEDENT>", "docstring": "Serializes str, Attrib, or PathAttrib objects.\n\nExample::\n\n    <attribute>foobar</attribute>", "id": "f7744:c0:m6"}
{"signature": "def serialize_operator_equal(self, op):", "body": "elem = etree.Element('<STR_LIT>')<EOL>return self.serialize_value_list(elem, op.args)<EOL>", "docstring": "Serializer for :meth:`SpiffWorkflow.operators.Equal`.\n\nExample::\n\n    <equals>\n        <value>text</value>\n        <value><attribute>foobar</attribute></value>\n        <value><path>foobar</path></value>\n    </equals>", "id": "f7744:c0:m12"}
{"signature": "def serialize_pathattrib(self, op):", "body": "elem = etree.Element('<STR_LIT:path>')<EOL>elem.text = op.path<EOL>return elem<EOL>", "docstring": "Serializer for :meth:`SpiffWorkflow.operators.PathAttrib`.\n\nExample::\n\n    <path>foobar</path>", "id": "f7744:c0:m2"}
{"signature": "def deserialize_condition(self, workflow, start_node):", "body": "<EOL>condition = None<EOL>spec_name = None<EOL>for node in start_node.childNodes:<EOL><INDENT>if node.nodeType != minidom.Node.ELEMENT_NODE:<EOL><INDENT>continue<EOL><DEDENT>if node.nodeName.lower() == '<STR_LIT>':<EOL><INDENT>if spec_name is not None:<EOL><INDENT>_exc('<STR_LIT>' % spec_name)<EOL><DEDENT>if node.firstChild is None:<EOL><INDENT>_exc('<STR_LIT>')<EOL><DEDENT>spec_name = node.firstChild.nodeValue<EOL><DEDENT>elif node.nodeName.lower() in _op_map:<EOL><INDENT>if condition is not None:<EOL><INDENT>_exc('<STR_LIT>')<EOL><DEDENT>condition = self.deserialize_logical(node)<EOL><DEDENT>else:<EOL><INDENT>_exc('<STR_LIT>' % node.nodeName)<EOL><DEDENT><DEDENT>if condition is None:<EOL><INDENT>_exc('<STR_LIT>')<EOL><DEDENT>if spec_name is None:<EOL><INDENT>_exc('<STR_LIT>' % start_node.nodeName)<EOL><DEDENT>return condition, spec_name<EOL>", "docstring": "Reads the conditional statement from the given node.\n\nworkflow -- the workflow with which the concurrence is associated\nstart_node -- the xml structure (xml.dom.minidom.Node)", "id": "f7748:c0:m4"}
{"signature": "def _on_complete_hook(self, my_task):", "body": "times = int(valueof(my_task, self.times, <NUM_LIT:1>)) + self.queued<EOL>for i in range(times):<EOL><INDENT>for task_name in self.context:<EOL><INDENT>task = my_task.workflow.get_task_spec_from_name(task_name)<EOL>task._on_trigger(my_task)<EOL><DEDENT><DEDENT>self.queued = <NUM_LIT:0><EOL>TaskSpec._on_complete_hook(self, my_task)<EOL>", "docstring": "A hook into _on_complete() that does the task specific work.\n\n:type  my_task: Task\n:param my_task: A task in which this method is executed.\n:rtype:  bool\n:returns: True on success, False otherwise.", "id": "f7749:c0:m2"}
{"signature": "@classmethod<EOL><INDENT>def deserialize(cls, serializer, wf_spec, s_state, **kwargs):<DEDENT>", "body": "return serializer.deserialize_trigger(wf_spec,<EOL>s_state,<EOL>**kwargs)<EOL>", "docstring": "Deserializes the trigger using the provided serializer.", "id": "f7749:c0:m4"}
{"signature": "def __init__(self, wf_spec, name, context, times=<NUM_LIT:1>, **kwargs):", "body": "assert wf_spec is not None<EOL>assert name is not None<EOL>assert context is not None<EOL>assert isinstance(context, list)<EOL>TaskSpec.__init__(self, wf_spec, name, **kwargs)<EOL>self.context = context<EOL>self.times = times<EOL>self.queued = <NUM_LIT:0><EOL>", "docstring": "Constructor.\n\n:type  wf_spec: WorkflowSpec\n:param wf_spec: A reference to the workflow specification.\n:type  name: str\n:param name: The name of the task spec.\n:type  context: list(str)\n:param context: A list of the names of tasks that are to be triggered.\n:type  times: int or :class:`SpiffWorkflow.operators.Term`\n:param times: The number of signals before the trigger fires.\n:type  kwargs: dict\n:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.", "id": "f7749:c0:m0"}
{"signature": "def _on_trigger(self, my_task):", "body": "for task in my_task.workflow.task_tree._find_any(self):<EOL><INDENT>if task.thread_id != my_task.thread_id:<EOL><INDENT>continue<EOL><DEDENT>self._do_join(task)<EOL><DEDENT>", "docstring": "May be called to fire the Join before the incoming branches are\ncompleted.", "id": "f7750:c0:m8"}
{"signature": "def _start(self, my_task, force=False):", "body": "<EOL>if my_task._has_state(Task.COMPLETED):<EOL><INDENT>return True, None<EOL><DEDENT>if my_task._has_state(Task.READY):<EOL><INDENT>return True, None<EOL><DEDENT>if self.split_task is None:<EOL><INDENT>return self._check_threshold_unstructured(my_task, force)<EOL><DEDENT>return self._check_threshold_structured(my_task, force)<EOL>", "docstring": "Checks whether the preconditions for going to READY state are met.\nReturns True if the threshold was reached, False otherwise.\nAlso returns the list of tasks that yet need to be completed.", "id": "f7750:c0:m5"}
{"signature": "def _clear_celery_task_data(self, my_task):", "body": "<EOL>if '<STR_LIT>' in my_task.internal_data:<EOL><INDENT>history = my_task._get_internal_data('<STR_LIT>', [])<EOL>history.append(my_task._get_internal_data('<STR_LIT>'))<EOL>del my_task.internal_data['<STR_LIT>']<EOL>my_task._set_internal_data(task_history=history)<EOL><DEDENT>if '<STR_LIT>' in my_task.internal_data:<EOL><INDENT>del my_task.internal_data['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT:error>' in my_task.internal_data:<EOL><INDENT>del my_task.internal_data['<STR_LIT:error>']<EOL><DEDENT>if hasattr(my_task, '<STR_LIT>'):<EOL><INDENT>delattr(my_task, '<STR_LIT>')<EOL><DEDENT>if hasattr(my_task, '<STR_LIT>'):<EOL><INDENT>delattr(my_task, '<STR_LIT>')<EOL><DEDENT>", "docstring": "Clear celery task data", "id": "f7751:c0:m3"}
{"signature": "def _restart(self, my_task):", "body": "if not my_task._has_state(Task.WAITING):<EOL><INDENT>raise WorkflowException(my_task, \"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if my_task._get_internal_data('<STR_LIT>') is not None:<EOL><INDENT>if not hasattr(my_task, '<STR_LIT>'):<EOL><INDENT>task_id = my_task._get_internal_data('<STR_LIT>')<EOL>my_task.async_call = default_app.AsyncResult(task_id)<EOL>my_task.deserialized = True<EOL>my_task.async_call.state  <EOL><DEDENT>async_call = my_task.async_call<EOL>if async_call.state == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>elif async_call.state in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>async_call.revoke()<EOL>LOG.info(\"<STR_LIT>\" % (<EOL>async_call.state, async_call))<EOL><DEDENT>elif async_call.state == '<STR_LIT>':<EOL><INDENT>LOG.warning(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % async_call)<EOL><DEDENT>self._clear_celery_task_data(my_task)<EOL><DEDENT>return self._start(my_task)<EOL>", "docstring": "Abort celery task and retry it", "id": "f7751:c0:m2"}
{"signature": "def _start(self, my_task, force=False):", "body": "if (not hasattr(my_task, '<STR_LIT>')) or my_task.subprocess is None:<EOL><INDENT>my_task.subprocess = subprocess.Popen(self.args,<EOL>stderr=subprocess.STDOUT,<EOL>stdout=subprocess.PIPE)<EOL><DEDENT>if my_task.subprocess:<EOL><INDENT>my_task.subprocess.poll()<EOL>if my_task.subprocess.returncode is None:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>results = my_task.subprocess.communicate()<EOL>my_task.results = results<EOL>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Returns False when successfully fired, True otherwise", "id": "f7752:c0:m1"}
{"signature": "def __init__(self, wf_spec, name, args=None, **kwargs):", "body": "assert wf_spec is not None<EOL>assert name is not None<EOL>TaskSpec.__init__(self, wf_spec, name, **kwargs)<EOL>self.args = args<EOL>", "docstring": "Constructor.\n\n:type  wf_spec: WorkflowSpec\n:param wf_spec: A reference to the workflow specification.\n:type  name: str\n:param name: The name of the task spec.\n:type  args: list\n:param args: args to pass to process (first arg is the command).\n:type  kwargs: dict\n:param kwargs: kwargs to pass-through to TaskSpec initializer.", "id": "f7752:c0:m0"}
{"signature": "def _on_trigger(self, task_spec):", "body": "<EOL>my_task = self._find_my_task(task_spec)<EOL>if my_task._has_state(Task.COMPLETED):<EOL><INDENT>state = Task.READY<EOL><DEDENT>else:<EOL><INDENT>state = Task.FUTURE<EOL><DEDENT>for output in self.outputs:<EOL><INDENT>new_task = my_task._add_child(output, state)<EOL>new_task.triggered = True<EOL>output._predict(new_task)<EOL><DEDENT>", "docstring": "May be called after execute() was already completed to create an\nadditional outbound task.", "id": "f7753:c0:m2"}
{"signature": "def __init__(self, wf_spec, name, success=False, **kwargs):", "body": "TaskSpec.__init__(self, wf_spec, name, **kwargs)<EOL>self.cancel_successfully = success<EOL>", "docstring": "Constructor.\n\n:type  wf_spec: WorkflowSpec\n:param wf_spec: A reference to the workflow specification.\n:type  name: str\n:param name: The name of the task spec.\n:type  success: bool\n:param success: Whether to cancel successfully or unsuccessfully.\n:type  kwargs: dict\n:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.", "id": "f7754:c0:m0"}
{"signature": "def _connect_notify(self, task_spec):", "body": "raise WorkflowException(self, '<STR_LIT>')<EOL>", "docstring": "Called by the previous task to let us know that it exists.", "id": "f7755:c0:m1"}
{"signature": "def __init__(self,<EOL>wf_spec,<EOL>name,<EOL>split_task,<EOL>**kwargs):", "body": "assert split_task is not None<EOL>Join.__init__(self, wf_spec, name, split_task, **kwargs)<EOL>", "docstring": "Constructor.\n\n:type  wf_spec: :class:`SpiffWorkflow.specs.WorkflowSpec`\n:param wf_spec: A reference to the parent (usually a workflow).\n:type  name: string\n:param name: A name for the task.\n:type  split_task: str\n:param split_task: The name of the task spec that was previously\n                   used to split the branch.\n:type  kwargs: dict\n:param kwargs: See :class:`SpiffWorkflow.specs.Join`.", "id": "f7757:c0:m0"}
{"signature": "def __init__(self, wf_spec, name, mutex, **kwargs):", "body": "assert mutex is not None<EOL>TaskSpec.__init__(self, wf_spec, name, **kwargs)<EOL>self.mutex = mutex<EOL>", "docstring": "Constructor.\n\n:type  wf_spec: WorkflowSpec\n:param wf_spec: A reference to the workflow specification.\n:type  name: str\n:param name: The name of the task spec.\n:type  mutex: str\n:param mutex: The name of the mutex that should be acquired.\n:type  kwargs: dict\n:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.", "id": "f7759:c0:m0"}
{"signature": "def _on_complete_hook(self, my_task):", "body": "<EOL>for child in my_task.children:<EOL><INDENT>child.task_spec._update(child)<EOL><DEDENT>", "docstring": "A hook into _on_complete() that does the task specific work.\n\n:type  my_task: Task\n:param my_task: The associated task in the task tree.\n:rtype:  bool\n:returns: True on success, False otherwise.", "id": "f7760:c0:m20"}
{"signature": "def __init__(self, wf_spec, name, **kwargs):", "body": "assert wf_spec is not None<EOL>assert name is not None<EOL>self._wf_spec = wf_spec<EOL>self.id = None<EOL>self.name = str(name)<EOL>self.description = kwargs.get('<STR_LIT:description>', '<STR_LIT>')<EOL>self.inputs = []<EOL>self.outputs = []<EOL>self.manual = kwargs.get('<STR_LIT>', False)<EOL>self.internal = False  <EOL>self.data = kwargs.get('<STR_LIT:data>',        {})<EOL>self.defines = kwargs.get('<STR_LIT>',     {})<EOL>self.pre_assign = kwargs.get('<STR_LIT>',  [])<EOL>self.post_assign = kwargs.get('<STR_LIT>', [])<EOL>self.locks = kwargs.get('<STR_LIT>',        [])<EOL>self.lookahead = <NUM_LIT:2>  <EOL>self.entered_event = Event()<EOL>self.reached_event = Event()<EOL>self.ready_event = Event()<EOL>self.completed_event = Event()<EOL>self.cancelled_event = Event()<EOL>self.finished_event = Event()<EOL>self._wf_spec._add_notify(self)<EOL>self.data.update(self.defines)<EOL>assert self.id is not None<EOL>", "docstring": "Constructor.\n\nThe difference between the assignment of a data value using\nthe data argument versus pre_assign and post_assign is that\nchanges made using data are task-local, i.e. they are\nnot visible to other tasks.\nSimilarly, \"defines\" are spec data fields that, once defined, can\nno longer be modified.\n\n:type  wf_spec: WorkflowSpec\n:param wf_spec: A reference to the workflow specification that owns it.\n:type  name: string\n:param name: A name for the task.\n:type  lock: list(str)\n:param lock: A list of mutex names. The mutex is acquired\n             on entry of execute() and released on leave of\n             execute().\n:type  manual: bool\n:param manual: Whether this task requires a manual action to complete.\n:type  data: dict((str, object))\n:param data: name/value pairs\n:type  defines: dict((str, object))\n:param defines: name/value pairs\n:type  pre_assign: list((str, object))\n:param pre_assign: a list of name/value pairs\n:type  post_assign: list((str, object))\n:param post_assign: a list of name/value pairs", "id": "f7760:c0:m0"}
{"signature": "def _on_cancel(self, my_task):", "body": "self.cancelled_event.emit(my_task.workflow, my_task)<EOL>", "docstring": "May be called by another task to cancel the operation before it was\ncompleted.\n\n:type  my_task: Task\n:param my_task: The associated task in the task tree.", "id": "f7760:c0:m17"}
{"signature": "def _on_trigger(self, my_task):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "May be called by another task to trigger a task-specific\nevent.\n\n:type  my_task: Task\n:param my_task: The associated task in the task tree.\n:rtype:  boolean\n:returns: True on success, False otherwise.", "id": "f7760:c0:m18"}
{"signature": "def _connect_notify(self, taskspec):", "body": "self.inputs.append(taskspec)<EOL>", "docstring": "Called by the previous task to let us know that it exists.\n\n:type  taskspec: TaskSpec\n:param taskspec: The task by which this method is executed.", "id": "f7760:c0:m1"}
{"signature": "def _update(self, my_task):", "body": "my_task._inherit_data()<EOL>self._update_hook(my_task)<EOL>", "docstring": "Called whenever any event happens that may affect the\nstate of this task in the workflow. For example, if a predecessor\ncompletes it makes sure to call this method so we can react.", "id": "f7760:c0:m12"}
{"signature": "def _get_activated_tasks(self, my_task, destination):", "body": "return my_task.children<EOL>", "docstring": "Returns the list of tasks that were activated in the previous\ncall of execute(). Only returns tasks that point towards the\ndestination task, i.e. those which have destination as a\ndescendant.\n\n:type  my_task: Task\n:param my_task: The associated task in the task tree.\n:type  destination: Task\n:param destination: The destination task.", "id": "f7760:c0:m3"}
{"signature": "def connect(self, taskspec):", "body": "self.outputs.append(taskspec)<EOL>taskspec._connect_notify(self)<EOL>", "docstring": "Connect the *following* task to this one. In other words, the\ngiven task is added as an output task.\n\n:type  taskspec: TaskSpec\n:param taskspec: The new output task.", "id": "f7760:c0:m7"}
{"signature": "def _predict(self, my_task, seen=None, looked_ahead=<NUM_LIT:0>):", "body": "if my_task._is_finished():<EOL><INDENT>return<EOL><DEDENT>if seen is None:<EOL><INDENT>seen = []<EOL><DEDENT>elif self in seen:<EOL><INDENT>return<EOL><DEDENT>if not my_task._is_finished():<EOL><INDENT>self._predict_hook(my_task)<EOL><DEDENT>if not my_task._is_definite():<EOL><INDENT>if looked_ahead + <NUM_LIT:1> >= self.lookahead:<EOL><INDENT>return<EOL><DEDENT>seen.append(self)<EOL><DEDENT>for child in my_task.children:<EOL><INDENT>child.task_spec._predict(child, seen[:], looked_ahead + <NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Updates the branch such that all possible future routes are added.\n\nShould NOT be overwritten! Instead, overwrite _predict_hook().\n\n:type  my_task: Task\n:param my_task: The associated task in the task tree.\n:type  seen: list[taskspec]\n:param seen: A list of already visited tasks.\n:type  looked_ahead: integer\n:param looked_ahead: The depth of the predicted path so far.", "id": "f7760:c0:m10"}
{"signature": "def _on_ready_before_hook(self, my_task):", "body": "pass<EOL>", "docstring": "A hook into _on_ready() that does the task specific work.\n\n:type  my_task: Task\n:param my_task: The associated task in the task tree.", "id": "f7760:c0:m15"}
{"signature": "def follow(self, taskspec):", "body": "taskspec.connect(self)<EOL>", "docstring": "Make this task follow the provided one. In other words, this task is\nadded to the given task outputs.\n\nThis is an alias to connect, just easier to understand when reading\ncode - ex: my_task.follow(the_other_task)\nAdding it after being confused by .connect one times too many!\n\n:type  taskspec: TaskSpec\n:param taskspec: The task to follow.", "id": "f7760:c0:m8"}
{"signature": "def _on_ready_hook(self, my_task):", "body": "pass<EOL>", "docstring": "A hook into _on_ready() that does the task specific work.\n\n:type  my_task: Task\n:param my_task: The associated task in the task tree.", "id": "f7760:c0:m16"}
{"signature": "def _on_complete_hook(self, my_task):", "body": "<EOL>outputs = []<EOL>for condition, output in self.cond_task_specs:<EOL><INDENT>if self.choice is not None and output not in self.choice:<EOL><INDENT>continue<EOL><DEDENT>if condition is None:<EOL><INDENT>outputs.append(self._wf_spec.get_task_spec_from_name(output))<EOL>continue<EOL><DEDENT>if not condition._matches(my_task):<EOL><INDENT>continue<EOL><DEDENT>outputs.append(self._wf_spec.get_task_spec_from_name(output))<EOL><DEDENT>my_task._sync_children(outputs, Task.FUTURE)<EOL>for child in my_task.children:<EOL><INDENT>child.task_spec._update(child)<EOL><DEDENT>", "docstring": "Runs the task. Should not be called directly.\nReturns True if completed, False otherwise.", "id": "f7761:c0:m6"}
{"signature": "def _on_trigger(self, my_task, choice):", "body": "self.choice = choice<EOL>", "docstring": "Lets a caller narrow down the choice by using a Choose trigger.", "id": "f7761:c0:m4"}
{"signature": "def connect_if(self, condition, task_spec):", "body": "assert task_spec is not None<EOL>self.outputs.append(task_spec)<EOL>self.cond_task_specs.append((condition, task_spec.name))<EOL>task_spec._connect_notify(self)<EOL>", "docstring": "Connects a taskspec that is executed if the condition DOES match.\n\ncondition -- a condition (Condition)\ntaskspec -- the conditional task spec", "id": "f7761:c0:m2"}
{"signature": "def connect(self, task_spec):", "body": "return self.connect_if(None, task_spec)<EOL>", "docstring": "Convenience wrapper around connect_if() where condition is set to None.", "id": "f7761:c0:m1"}
{"signature": "def __init__(self, wf_spec, name, **kwargs):", "body": "super(MultiChoice, self).__init__(wf_spec, name, **kwargs)<EOL>self.cond_task_specs = []<EOL>self.choice = None<EOL>", "docstring": "Constructor.\n\n:type  wf_spec: WorkflowSpec\n:param wf_spec: A reference to the workflow specification.\n:type  name: str\n:param name: The name of the task spec.\n:type  kwargs: dict\n:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.", "id": "f7761:c0:m0"}
{"signature": "def connect(self, task_spec):", "body": "self.thread_starter.outputs.append(task_spec)<EOL>task_spec._connect_notify(self.thread_starter)<EOL>", "docstring": "Connect the *following* task to this one. In other words, the\ngiven task is added as an output task.\n\ntask -- the task to connect to.", "id": "f7767:c0:m1"}
{"signature": "def _get_activated_threads(self, my_task):", "body": "return my_task.children<EOL>", "docstring": "Returns the list of threads that were activated in the previous\ncall of execute().\n\nmy_task -- the task of this TaskSpec", "id": "f7767:c0:m3"}
{"signature": "def connect(self, task_spec):", "body": "assert self.default_task_spec is None<EOL>self.outputs.append(task_spec)<EOL>self.default_task_spec = task_spec.name<EOL>task_spec._connect_notify(self)<EOL>", "docstring": "Connects the task spec that is executed if no other condition\nmatches.\n\n:type  task_spec: TaskSpec\n:param task_spec: The following task spec.", "id": "f7768:c0:m1"}
{"signature": "def __init__(self, name=None, filename=None, nostart=False):", "body": "self.name = name or '<STR_LIT>'<EOL>self.description = '<STR_LIT>'<EOL>self.file = filename<EOL>self.task_specs = dict()<EOL>self.start = None<EOL>if not nostart:<EOL><INDENT>self.start = StartTask(self)<EOL><DEDENT>", "docstring": "Constructor.", "id": "f7772:c0:m0"}
{"signature": "def __init__(self, task, error):", "body": "WorkflowException.__init__(self, task.task_spec, error)<EOL>self.task = task<EOL>", "docstring": "Exception initialization.\n\n:param sender: the task that threw the exception\n:type sender: Task\n:param error: a human readable error message\n:type error: string", "id": "f7773:c1:m0"}
{"signature": "def __init__(self, sender, error):", "body": "Exception.__init__(self, '<STR_LIT>' % (sender.name, error))<EOL>self.sender = sender<EOL>", "docstring": "Standard exception class.\n\n:param sender: the task spec that threw the exception\n:type sender: TaskSpec\n:param error: a human readable error message\n:type error: string", "id": "f7773:c0:m0"}
{"signature": "def find_package_data(where='<STR_LIT:.>', package='<STR_LIT>',<EOL>exclude=standard_exclude,<EOL>exclude_directories=standard_exclude_directories,<EOL>only_in_packages=True,<EOL>show_ignored=False):", "body": "out = {}<EOL>stack = [(convert_path(where), '<STR_LIT>', package, only_in_packages)]<EOL>while stack:<EOL><INDENT>where, prefix, package, only_in_packages = stack.pop(<NUM_LIT:0>)<EOL>for name in os.listdir(where):<EOL><INDENT>fn = os.path.join(where, name)<EOL>if os.path.isdir(fn):<EOL><INDENT>bad_name = False<EOL>for pattern in exclude_directories:<EOL><INDENT>if (fnmatchcase(name, pattern)<EOL>or fn.lower() == pattern.lower()):<EOL><INDENT>bad_name = True<EOL>if show_ignored:<EOL><INDENT>sys.stderr.write(<EOL>\"<STR_LIT>\"<EOL>% (fn, pattern))<EOL><DEDENT>break<EOL><DEDENT><DEDENT>if bad_name:<EOL><INDENT>continue<EOL><DEDENT>if os.path.isfile(os.path.join(fn, '<STR_LIT>')):<EOL><INDENT>if not package:<EOL><INDENT>new_package = name<EOL><DEDENT>else:<EOL><INDENT>new_package = package + '<STR_LIT:.>' + name<EOL><DEDENT>stack.append((fn, '<STR_LIT>', new_package, False))<EOL><DEDENT>else:<EOL><INDENT>stack.append(<EOL>(fn, prefix + name + '<STR_LIT:/>', package, only_in_packages)<EOL>)<EOL><DEDENT><DEDENT>elif package or not only_in_packages:<EOL><INDENT>bad_name = False<EOL>for pattern in exclude:<EOL><INDENT>if (fnmatchcase(name, pattern)<EOL>or fn.lower() == pattern.lower()):<EOL><INDENT>bad_name = True<EOL>if show_ignored:<EOL><INDENT>sys.stderr.write(<EOL>\"<STR_LIT>\"<EOL>% (fn, pattern))<EOL><DEDENT>break<EOL><DEDENT><DEDENT>if bad_name:<EOL><INDENT>continue<EOL><DEDENT>out.setdefault(package, []).append(prefix + name)<EOL><DEDENT><DEDENT><DEDENT>return out<EOL>", "docstring": "Return a dictionary suitable for use in ``package_data``\nin a distutils ``setup.py`` file.\n\nThe dictionary looks like::\n\n    {'package': [files]}\n\nWhere ``files`` is a list of all the files in that package that\ndon't match anything in ``exclude``.\n\nIf ``only_in_packages`` is true, then top-level directories that\nare not packages won't be included (but directories under packages\nwill).\n\nDirectories matching any pattern in ``exclude_directories`` will\nbe ignored; by default directories with leading ``.``, ``CVS``,\nand ``_darcs`` will be ignored.\n\nIf ``show_ignored`` is true, then all the files that aren't\nincluded in package data are shown on stderr (for debugging\npurposes).\n\nNote patterns use wildcards, or can be exact paths (including\nleading ``./``), and all searching is case-insensitive.\n\nThis function is by Ian Bicking.", "id": "f7802:m2"}
{"signature": "def _after(self, response):", "body": "<EOL>if getattr(request, '<STR_LIT>', False):<EOL><INDENT>return response<EOL><DEDENT>duration = None<EOL>if getattr(request, '<STR_LIT>', None):<EOL><INDENT>duration = monotonic() - request._tracy_start_time<EOL><DEDENT>trace_id = None<EOL>if getattr(request, '<STR_LIT>', None):<EOL><INDENT>trace_id = request._tracy_id<EOL>response.headers[trace_header_id] = trace_id<EOL><DEDENT>trace_client = None<EOL>if getattr(request, '<STR_LIT>', None):<EOL><INDENT>trace_client = request._tracy_client<EOL><DEDENT>d = {'<STR_LIT>': response.status_code,<EOL>'<STR_LIT:url>': request.base_url,<EOL>'<STR_LIT>': request.remote_addr,<EOL>'<STR_LIT>': trace_client,<EOL>'<STR_LIT>': trace_id,<EOL>'<STR_LIT>': duration}<EOL>logger.info(None, extra=d)<EOL>return response<EOL>", "docstring": "Calculates the request duration, and adds a transaction\n        ID to the header.", "id": "f7803:c0:m3"}
{"signature": "@blueprint.route('<STR_LIT>', methods=['<STR_LIT:GET>', '<STR_LIT:POST>'])<EOL>@register_breadcrumb(blueprint, '<STR_LIT>', _('<STR_LIT>'))<EOL>@login_required<EOL>def new():", "body": "form = GroupForm(request.form)<EOL>if form.validate_on_submit():<EOL><INDENT>try:<EOL><INDENT>group = Group.create(admins=[current_user], **form.data)<EOL>flash(_('<STR_LIT>', name=group.name), '<STR_LIT:success>')<EOL>return redirect(url_for(\"<STR_LIT>\"))<EOL><DEDENT>except IntegrityError:<EOL><INDENT>flash(_('<STR_LIT>'), '<STR_LIT:error>')<EOL><DEDENT><DEDENT>return render_template(<EOL>\"<STR_LIT>\",<EOL>form=form,<EOL>)<EOL>", "docstring": "Create new group.", "id": "f7809:m4"}
{"signature": "@blueprint.route('<STR_LIT>', methods=['<STR_LIT:GET>', '<STR_LIT:POST>'])<EOL>@login_required<EOL>@register_breadcrumb(blueprint, '<STR_LIT>', _('<STR_LIT>'))<EOL>def new_member(group_id):", "body": "group = Group.query.get_or_404(group_id)<EOL>if group.can_invite_others(current_user):<EOL><INDENT>form = NewMemberForm()<EOL>if form.validate_on_submit():<EOL><INDENT>emails = filter(None, form.data['<STR_LIT>'].splitlines())<EOL>group.invite_by_emails(emails)<EOL>flash(_('<STR_LIT>'), '<STR_LIT:success>')<EOL>return redirect(url_for('<STR_LIT>', group_id=group.id))<EOL><DEDENT>return render_template(<EOL>\"<STR_LIT>\",<EOL>group=group,<EOL>form=form<EOL>)<EOL><DEDENT>flash(<EOL>_(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>group_name=group.name<EOL>),<EOL>'<STR_LIT:error>'<EOL>)<EOL>return redirect(url_for('<STR_LIT>'))<EOL>", "docstring": "Add (invite) new member.", "id": "f7809:m13"}
{"signature": "@blueprint.route('<STR_LIT>', methods=['<STR_LIT:POST>'])<EOL>@login_required<EOL>def delete(group_id):", "body": "group = Group.query.get_or_404(group_id)<EOL>if group.can_edit(current_user):<EOL><INDENT>try:<EOL><INDENT>group.delete()<EOL><DEDENT>except Exception as e:<EOL><INDENT>flash(str(e), \"<STR_LIT:error>\")<EOL>return redirect(url_for(\"<STR_LIT>\"))<EOL><DEDENT>flash(_('<STR_LIT>',<EOL>group_name=group.name), '<STR_LIT:success>')<EOL>return redirect(url_for(\"<STR_LIT>\"))<EOL><DEDENT>flash(<EOL>_(<EOL>'<STR_LIT>',<EOL>group_name=group.name<EOL>),<EOL>'<STR_LIT:error>'<EOL>)<EOL>return redirect(url_for(\"<STR_LIT>\"))<EOL>", "docstring": "Delete group.", "id": "f7809:m6"}
{"signature": "@blueprint.route('<STR_LIT>',<EOL>methods=['<STR_LIT:POST>'])<EOL>@login_required<EOL>def approve(group_id, user_id):", "body": "membership = Membership.query.get_or_404((user_id, group_id))<EOL>group = membership.group<EOL>if group.can_edit(current_user):<EOL><INDENT>try:<EOL><INDENT>membership.accept()<EOL><DEDENT>except Exception as e:<EOL><INDENT>flash(str(e), '<STR_LIT:error>')<EOL>return redirect(url_for('<STR_LIT>', group_id=membership.group.id))<EOL><DEDENT>flash(_('<STR_LIT>',<EOL>user=membership.user.email,<EOL>name=membership.group.name), '<STR_LIT:success>')<EOL>return redirect(url_for('<STR_LIT>', group_id=membership.group.id))<EOL><DEDENT>flash(<EOL>_(<EOL>'<STR_LIT>',<EOL>group_name=group.name<EOL>),<EOL>'<STR_LIT:error>'<EOL>)<EOL>return redirect(url_for('<STR_LIT>'))<EOL>", "docstring": "Approve a user.", "id": "f7809:m9"}
{"signature": "@blueprint.route('<STR_LIT>', methods=['<STR_LIT:GET>', '<STR_LIT:POST>'])<EOL>@blueprint.route('<STR_LIT>', methods=['<STR_LIT:GET>', '<STR_LIT:POST>'])<EOL>@register_breadcrumb(<EOL>blueprint, '<STR_LIT>', _('<STR_LIT>'),<EOL>dynamic_list_constructor=lambda:<EOL>[{'<STR_LIT:text>': get_group_name(request.view_args['<STR_LIT>'])},<EOL>{'<STR_LIT:text>': _('<STR_LIT>')}]<EOL>)<EOL>@login_required<EOL>def manage(group_id):", "body": "group = Group.query.get_or_404(group_id)<EOL>form = GroupForm(request.form, obj=group)<EOL>if form.validate_on_submit():<EOL><INDENT>if group.can_edit(current_user):<EOL><INDENT>try:<EOL><INDENT>group.update(**form.data)<EOL>flash(_('<STR_LIT>', name=group.name),<EOL>'<STR_LIT:success>')<EOL><DEDENT>except Exception as e:<EOL><INDENT>flash(str(e), '<STR_LIT:error>')<EOL>return render_template(<EOL>\"<STR_LIT>\",<EOL>form=form,<EOL>group=group,<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>flash(<EOL>_(<EOL>'<STR_LIT>',<EOL>group_name=group.name<EOL>),<EOL>'<STR_LIT:error>'<EOL>)<EOL><DEDENT><DEDENT>return render_template(<EOL>\"<STR_LIT>\",<EOL>form=form,<EOL>group=group,<EOL>)<EOL>", "docstring": "Manage your group.", "id": "f7809:m5"}
{"signature": "def init_config(self, app):", "body": "app.config.setdefault(<EOL>\"<STR_LIT>\",<EOL>app.config.get(\"<STR_LIT>\",<EOL>\"<STR_LIT>\"))<EOL>", "docstring": "Initialize configuration.", "id": "f7810:c0:m2"}
{"signature": "def init_app(self, app):", "body": "self.init_config(app)<EOL>app.register_blueprint(blueprint)<EOL>app.extensions['<STR_LIT>'] = self<EOL>", "docstring": "Flask application initialization.", "id": "f7810:c0:m1"}
{"signature": "def __init__(self, app=None):", "body": "if app:<EOL><INDENT>self.init_app(app)<EOL><DEDENT>", "docstring": "Extension initialization.", "id": "f7810:c0:m0"}
{"signature": "def __call__(self, form, field):", "body": "self.validate_data(form, field)<EOL>emails_org = field.data<EOL>emails = filter(None, emails_org.splitlines())<EOL>for email in emails:<EOL><INDENT>try:<EOL><INDENT>field.data = email<EOL>self.validate_email(form, field)<EOL><DEDENT>except (ValidationError, StopValidation):<EOL><INDENT>raise ValidationError('<STR_LIT>' + email)<EOL><DEDENT>finally:<EOL><INDENT>field.data = emails_org<EOL><DEDENT><DEDENT>", "docstring": "Parse emails and run validators.", "id": "f7811:c0:m1"}
{"signature": "def __call__(self, field, **kwargs):", "body": "html = \"<STR_LIT>\"<EOL>for subfield in field:<EOL><INDENT>label = subfield.label.text<EOL>if (field.default == subfield.data):<EOL><INDENT>subfield.checked = True<EOL><DEDENT>else:<EOL><INDENT>subfield.checked = False<EOL><DEDENT>description = self.descriptions.get(subfield.data, \"<STR_LIT>\")<EOL>html += ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>') % (subfield(), label, description)<EOL><DEDENT>return HTMLString(html)<EOL>", "docstring": "Render radio group.", "id": "f7813:c0:m1"}
{"signature": "def remove_admin(self, admin):", "body": "return GroupAdmin.delete(self, admin)<EOL>", "docstring": "Remove an admin from group (independent of membership state).\n\n        :param admin: Admin to be removed from group.", "id": "f7814:c3:m9"}
{"signature": "@classmethod<EOL><INDENT>def get_by_name(cls, name):<DEDENT>", "body": "try:<EOL><INDENT>return cls.query.filter_by(name=name).one()<EOL><DEDENT>except NoResultFound:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Query group by a group name.\n\n        :param name: Name of a group to search for.\n        :returns: Group object or None.", "id": "f7814:c3:m4"}
{"signature": "@classmethod<EOL><INDENT>def delete(cls, group, user):<DEDENT>", "body": "with db.session.begin_nested():<EOL><INDENT>cls.query.filter_by(group=group, user_id=user.get_id()).delete()<EOL><DEDENT>", "docstring": "Delete membership.", "id": "f7814:c4:m9"}
{"signature": "@classmethod<EOL><INDENT>def get(cls, group, user):<DEDENT>", "body": "try:<EOL><INDENT>m = cls.query.filter_by(user_id=user.get_id(), group=group).one()<EOL>return m<EOL><DEDENT>except Exception:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get membership for given user and group.\n\n        :param group: Group object.\n        :param user: User object.\n        :returns: Membership or None.", "id": "f7814:c4:m0"}
{"signature": "def members_count(self):", "body": "return Membership.query_by_group(self).count()<EOL>", "docstring": "Determine members count.\n\n        :returns: Number of memberships.", "id": "f7814:c3:m21"}
{"signature": "def update(self, name=None, description=None, privacy_policy=None,<EOL>subscription_policy=None, is_managed=None):", "body": "with db.session.begin_nested():<EOL><INDENT>if name is not None:<EOL><INDENT>self.name = name<EOL><DEDENT>if description is not None:<EOL><INDENT>self.description = description<EOL><DEDENT>if (<EOL>privacy_policy is not None and<EOL>PrivacyPolicy.validate(privacy_policy)<EOL>):<EOL><INDENT>self.privacy_policy = privacy_policy<EOL><DEDENT>if (<EOL>subscription_policy is not None and<EOL>SubscriptionPolicy.validate(subscription_policy)<EOL>):<EOL><INDENT>self.subscription_policy = subscription_policy<EOL><DEDENT>if is_managed is not None:<EOL><INDENT>self.is_managed = is_managed<EOL><DEDENT>db.session.merge(self)<EOL><DEDENT>return self<EOL>", "docstring": "Update group.\n\n        :param name: Name of group.\n        :param description: Description of group.\n        :param privacy_policy: PrivacyPolicy\n        :param subscription_policy: SubscriptionPolicy\n        :returns: Updated group", "id": "f7814:c3:m3"}
{"signature": "@classmethod<EOL><INDENT>def create(cls, name=None, description='<STR_LIT>', privacy_policy=None,<EOL>subscription_policy=None, is_managed=False, admins=None):<DEDENT>", "body": "assert name<EOL>assert privacy_policy is None or PrivacyPolicy.validate(privacy_policy)<EOL>assert subscription_policy is None orSubscriptionPolicy.validate(subscription_policy)<EOL>assert admins is None or isinstance(admins, list)<EOL>with db.session.begin_nested():<EOL><INDENT>obj = cls(<EOL>name=name,<EOL>description=description,<EOL>privacy_policy=privacy_policy,<EOL>subscription_policy=subscription_policy,<EOL>is_managed=is_managed,<EOL>)<EOL>db.session.add(obj)<EOL>for a in admins or []:<EOL><INDENT>db.session.add(GroupAdmin(<EOL>group=obj, admin_id=a.get_id(),<EOL>admin_type=resolve_admin_type(a)))<EOL><DEDENT><DEDENT>return obj<EOL>", "docstring": "Create a new group.\n\n        :param name: Name of group. Required and must be unique.\n        :param description: Description of group. Default: ``''``\n        :param privacy_policy: PrivacyPolicy\n        :param subscription_policy: SubscriptionPolicy\n        :param admins: list of user and/or group objects. Default: ``[]``\n        :returns: Newly created group\n        :raises: IntegrityError: if group with given name already exists", "id": "f7814:c3:m1"}
{"signature": "def delete(self):", "body": "with db.session.begin_nested():<EOL><INDENT>Membership.query_by_group(self).delete()<EOL>GroupAdmin.query_by_group(self).delete()<EOL>GroupAdmin.query_by_admin(self).delete()<EOL>db.session.delete(self)<EOL><DEDENT>", "docstring": "Delete a group and all associated memberships.", "id": "f7814:c3:m2"}
{"signature": "@classmethod<EOL><INDENT>def query_requests(cls, admin, eager=False):<DEDENT>", "body": "<EOL>if hasattr(admin, '<STR_LIT>') and admin.is_superadmin:<EOL><INDENT>q1 = GroupAdmin.query.with_entities(<EOL>GroupAdmin.group_id)<EOL><DEDENT>else:<EOL><INDENT>q1 = GroupAdmin.query_by_admin(admin).with_entities(<EOL>GroupAdmin.group_id)<EOL><DEDENT>q2 = Membership.query.filter(<EOL>Membership.state == MembershipState.PENDING_ADMIN,<EOL>Membership.id_group.in_(q1),<EOL>)<EOL>q3 = Membership.query_by_user(<EOL>user=admin, state=MembershipState.ACTIVE<EOL>).with_entities(Membership.id_group)<EOL>q4 = GroupAdmin.query.filter(<EOL>GroupAdmin.admin_type == '<STR_LIT>', GroupAdmin.admin_id.in_(q3)<EOL>).with_entities(GroupAdmin.group_id)<EOL>q5 = Membership.query.filter(<EOL>Membership.state == MembershipState.PENDING_ADMIN,<EOL>Membership.id_group.in_(q4))<EOL>query = q2.union(q5)<EOL>return query<EOL>", "docstring": "Get all pending group requests.", "id": "f7814:c4:m4"}
{"signature": "def subscribe(self, user):", "body": "if self.subscription_policy == SubscriptionPolicy.OPEN:<EOL><INDENT>return self.add_member(user)<EOL><DEDENT>elif self.subscription_policy == SubscriptionPolicy.APPROVAL:<EOL><INDENT>return self.add_member(user, state=MembershipState.PENDING_ADMIN)<EOL><DEDENT>elif self.subscription_policy == SubscriptionPolicy.CLOSED:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Subscribe a user to a group (done by users).\n\n        Wrapper around ``add_member()`` which checks subscription policy.\n\n        :param user: User to subscribe.\n        :returns: Newly created Membership or None.", "id": "f7814:c3:m14"}
{"signature": "@classmethod<EOL><INDENT>def search(cls, query, q):<DEDENT>", "body": "query = query.join(User).filter(<EOL>User.email.like('<STR_LIT>'.format(q)),<EOL>)<EOL>return query<EOL>", "docstring": "Modify query as so include only specific members.\n\n        :param query: Query object.\n        :param str q: Search string.\n        :returs: Query object.", "id": "f7814:c4:m6"}
{"signature": "@classmethod<EOL><INDENT>def query_by_admin(cls, admin):<DEDENT>", "body": "return cls.query.filter_by(<EOL>admin_type=resolve_admin_type(admin), admin_id=admin.get_id())<EOL>", "docstring": "Get all groups for for a specific admin.", "id": "f7814:c5:m4"}
{"signature": "def can_invite_others(self, user):", "body": "if self.is_managed:<EOL><INDENT>return False<EOL><DEDENT>elif self.is_admin(user):<EOL><INDENT>return True<EOL><DEDENT>elif self.subscription_policy != SubscriptionPolicy.CLOSED:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Determine if user can invite people to a group.\n\n        Be aware that this check is independent from the people (users) which\n        are going to be invited. The checked user is the one who invites\n        someone, NOT who is going to be invited.\n\n        :param user: User to be checked.\n        :returns: True or False.", "id": "f7814:c3:m19"}
{"signature": "@classmethod<EOL><INDENT>def query_by_names(cls, names):<DEDENT>", "body": "assert isinstance(names, list)<EOL>return cls.query.filter(cls.name.in_(names))<EOL>", "docstring": "Query group by a list of group names.\n\n        :param list names: List of the group names.\n        :returns: Query object.", "id": "f7814:c3:m5"}
{"signature": "@classmethod<EOL><INDENT>def get(cls, group, admin):<DEDENT>", "body": "try:<EOL><INDENT>ga = cls.query.filter_by(<EOL>group=group, admin_id=admin.get_id(),<EOL>admin_type=resolve_admin_type(admin)).one()<EOL>return ga<EOL><DEDENT>except Exception:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get specific GroupAdmin object.", "id": "f7814:c5:m1"}
{"signature": "@classmethod<EOL><INDENT>def validate(cls, policy):<DEDENT>", "body": "return policy in [cls.OPEN, cls.APPROVAL, cls.CLOSED]<EOL>", "docstring": "Validate subscription policy value.", "id": "f7814:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def describe(cls, policy):<DEDENT>", "body": "if cls.validate(policy):<EOL><INDENT>return cls.descriptions[policy]<EOL><DEDENT>", "docstring": "Policy description.", "id": "f7814:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def order(cls, query, field, s):<DEDENT>", "body": "if s == '<STR_LIT>':<EOL><INDENT>query = query.order_by(asc(field))<EOL><DEDENT>elif s == '<STR_LIT>':<EOL><INDENT>query = query.order_by(desc(field))<EOL><DEDENT>return query<EOL>", "docstring": "Modify query as so to order the results.\n\n        :param query: Query object.\n        :param str s: Orderinig: ``asc`` or ``desc``.\n        :returs: Query object.", "id": "f7814:c4:m7"}
{"signature": "def invite_by_emails(self, emails):", "body": "assert emails is None or isinstance(emails, list)<EOL>results = []<EOL>for email in emails:<EOL><INDENT>try:<EOL><INDENT>user = User.query.filter_by(email=email).one()<EOL>results.append(self.invite(user))<EOL><DEDENT>except NoResultFound:<EOL><INDENT>results.append(None)<EOL><DEDENT><DEDENT>return results<EOL>", "docstring": "Invite users to a group by emails.\n\n        :param list emails: Emails of users that shall be invited.\n        :returns list: Newly created Memberships or Nones.", "id": "f7814:c3:m13"}
{"signature": "def add_admin(self, admin):", "body": "return GroupAdmin.create(self, admin)<EOL>", "docstring": "Invite an admin to a group.\n\n        :param admin: Object to be added as an admin.\n        :returns: GroupAdmin object.", "id": "f7814:c3:m8"}
{"signature": "@classmethod<EOL><INDENT>def query_by_user(cls, user, **kwargs):<DEDENT>", "body": "return cls._filter(<EOL>cls.query.filter_by(user_id=user.get_id()),<EOL>**kwargs<EOL>)<EOL>", "docstring": "Get a user's memberships.", "id": "f7814:c4:m2"}
{"signature": "def create_tmp_dir():", "body": "'''<STR_LIT>'''<EOL>'''<STR_LIT>'''<EOL>global tmp_dir<EOL>tmp_dir = os.path.join('<STR_LIT>', str(uuid.uuid4()))<EOL>if not os.path.exists(tmp_dir):<EOL><INDENT>os.makedirs(tmp_dir)<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\" % tmp_dir)<EOL>return<EOL><DEDENT>", "docstring": "create a unique tmp dir to hold the downloaded local files", "id": "f7827:m4"}
{"signature": "def start_http_server():", "body": "global port_test<EOL>command = '<STR_LIT>' % port_test<EOL>os.system(command)<EOL>time.sleep(<NUM_LIT:1>)<EOL>", "docstring": "start a local http server for testing", "id": "f7827:m2"}
{"signature": "def delete_tmp_dir():", "body": "global tmp_dir<EOL>shutil.rmtree(tmp_dir)<EOL>", "docstring": "delete the tmp directory", "id": "f7830:m3"}
{"signature": "def create_tmp_dir():", "body": "global tmp_dir<EOL>tmp_dir = os.path.join('<STR_LIT>', '<STR_LIT>' + '<STR_LIT:.>' + str(uuid.uuid4()))   <EOL>if not os.path.exists(tmp_dir):<EOL><INDENT>os.makedirs(tmp_dir)<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\" % tmp_dir)<EOL>return<EOL><DEDENT>", "docstring": "create a unique tmp dir to hold the downloaded local files\nif the tmp_dir grenerated already exists, then simply return\nthe user simply try again to generate another unique tmp dir\n:return:", "id": "f7831:m3"}
{"signature": "def delete_tmp_dir():", "body": "shutil.rmtree(tmp_dir)<EOL>", "docstring": "delete the tmp directory\n:return:", "id": "f7831:m4"}
{"signature": "def _load(self, metrix):", "body": "if isinstance(metrix, TimeSeries):<EOL><INDENT>return metrix<EOL><DEDENT>if isinstance(metrix, dict):<EOL><INDENT>return TimeSeries(metrix)<EOL><DEDENT>return TimeSeries(utils.read_csv(metrix))<EOL>", "docstring": "Load time series.\n:param timeseries: a TimeSeries, a dictionary or a path to a csv file(str).\n:return TimeSeries: a TimeSeries object.", "id": "f7832:c0:m1"}
{"signature": "def _analyze(self):", "body": "output = defaultdict(list)<EOL>output_by_name = defaultdict(list)<EOL>scores = self.anomaly_detector.get_all_scores()<EOL>if self.anomalies:<EOL><INDENT>for anomaly in self.anomalies:<EOL><INDENT>metrix_scores = scores<EOL>start_t, end_t = anomaly.get_time_window()<EOL>t = anomaly.exact_timestamp<EOL>room = (end_t - start_t) / <NUM_LIT:2><EOL>if not room:<EOL><INDENT>room = <NUM_LIT:30><EOL><DEDENT>extended_start_t = start_t - room<EOL>extended_end_t = end_t + room<EOL>metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)<EOL>while len(metrix_scores_cropped) < <NUM_LIT:2>:<EOL><INDENT>extended_start_t = extended_start_t - room<EOL>extended_end_t = extended_end_t + room<EOL>metrix_scores_cropped = metrix_scores.crop(extended_start_t, extended_end_t)<EOL><DEDENT>for entry in self.related_metrices:<EOL><INDENT>try:<EOL><INDENT>entry_correlation_result = Correlator(self.metrix, entry, time_period=(extended_start_t, extended_end_t),<EOL>use_anomaly_score=True).get_correlation_result()<EOL>record = extended_start_t, extended_end_t, entry_correlation_result.__dict__, entry<EOL>record_by_name = extended_start_t, extended_end_t, entry_correlation_result.__dict__<EOL>output[t].append(record)<EOL>output_by_name[entry].append(record_by_name)<EOL><DEDENT>except exceptions.NotEnoughDataPoints:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT><DEDENT>self.output = output<EOL>self.output_by_name = output_by_name<EOL>", "docstring": "Analyzes if a matrix has anomalies.\nIf any anomaly is found, determine if the matrix correlates with any other matrixes.\nTo be implemented.", "id": "f7832:c0:m2"}
{"signature": "def __init__(self, metrix, related_metrices):", "body": "self.metrix = self._load(metrix)<EOL>self.anomaly_detector = AnomalyDetector(metrix)<EOL>self.related_metrices = related_metrices<EOL>self.anomalies = self.anomaly_detector.get_anomalies()<EOL>self._analyze()<EOL>", "docstring": "Initializer\n:param metrix: a TimeSeries, a dictionary or a path to a csv file(str)\n:param list related_metrixes: a list of time series.", "id": "f7832:c0:m0"}
{"signature": "def _get_anomaly_scores(self, time_series):", "body": "return AnomalyDetector(time_series, score_only=True).get_all_scores()<EOL>", "docstring": "Get anomaly scores of a time series.\n:param TimeSeries time_series: a time_series.", "id": "f7837:c0:m1"}
{"signature": "def __init__(self, time_series_a, time_series_b, time_period=None, use_anomaly_score=False, algorithm_name=None, algorithm_params=None):", "body": "self.time_series_a = self._load(time_series_a)<EOL>self.time_series_b = self._load(time_series_b)<EOL>if use_anomaly_score:<EOL><INDENT>self.time_series_a = self._get_anomaly_scores(self.time_series_a)<EOL>self.time_series_b = self._get_anomaly_scores(self.time_series_b)<EOL><DEDENT>if time_period:<EOL><INDENT>start_p, end_p = time_period<EOL>try:<EOL><INDENT>self.time_series_a = self.time_series_a.crop(start_p, end_p)<EOL>self.time_series_b = self.time_series_b.crop(start_p, end_p)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise exceptions.NotEnoughDataPoints<EOL><DEDENT><DEDENT>self._sanity_check()<EOL>self.algorithm_params = {'<STR_LIT>': self.time_series_a, '<STR_LIT>': self.time_series_b}<EOL>self._get_algorithm_and_params(algorithm_name, algorithm_params)<EOL>self._correlate()<EOL>", "docstring": "Initializer\n:param time_series_a: a TimeSeries, a dictionary or a path to a csv file(str).\n:param time_series_b: a TimeSeries, a dictionary or a path to a csv file(str).\n:param time_period: a tuple (start, end) representing a data period for considering correlation.\n:param str algorithm_name: name of the algorithm to use.\n:param dict algorithm_params: additional params for the specific algorithm.", "id": "f7837:c0:m0"}
{"signature": "def _get_algorithm_and_params(self, algorithm_name, algorithm_params):", "body": "algorithm_name = algorithm_name or CORRELATOR_ALGORITHM<EOL>try:<EOL><INDENT>self.algorithm = correlator_algorithms[algorithm_name]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise exceptions.AlgorithmNotFound('<STR_LIT>' + str(algorithm_name) + '<STR_LIT>')<EOL><DEDENT>if algorithm_params:<EOL><INDENT>if not isinstance(algorithm_params, dict):<EOL><INDENT>raise exceptions.InvalidDataFormat('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.algorithm_params = dict(algorithm_params.items() + self.algorithm_params.items())<EOL><DEDENT><DEDENT>", "docstring": "Get the specific algorithm and merge the algorithm params.\n:param str algorithm: name of the algorithm to use.\n:param dict algorithm_params: additional params for the specific algorithm.", "id": "f7837:c0:m3"}
{"signature": "def is_correlated(self, threshold=None):", "body": "return self.correlation_result if self.correlation_result.coefficient >= threshold else False<EOL>", "docstring": "Compare with a threshold to determine whether two timeseries correlate to each other.\n:return: a CorrelationResult object if two time series correlate otherwise false.", "id": "f7837:c0:m7"}
{"signature": "def _load(self, time_series):", "body": "if isinstance(time_series, TimeSeries):<EOL><INDENT>return time_series<EOL><DEDENT>if isinstance(time_series, dict):<EOL><INDENT>return TimeSeries(time_series)<EOL><DEDENT>return TimeSeries(utils.read_csv(time_series))<EOL>", "docstring": "Load time series into a TimeSeries object.\n:param timeseries: a TimeSeries, a dictionary or a path to a csv file(str).\n:return TimeSeries: a TimeSeries object.", "id": "f7837:c0:m2"}
{"signature": "def _sanity_check(self):", "body": "if len(self.time_series_a) < <NUM_LIT:2> or len(self.time_series_b) < <NUM_LIT:2>:<EOL><INDENT>raise exceptions.NotEnoughDataPoints('<STR_LIT>')<EOL><DEDENT>", "docstring": "Check if the time series have more than two data points.", "id": "f7837:c0:m4"}
{"signature": "def get_anomalies(self):", "body": "return getattr(self, '<STR_LIT>', [])<EOL>", "docstring": "Get anomalies.\n:return list: a list of Anomaly objects.", "id": "f7838:c0:m5"}
{"signature": "def get_all_scores(self):", "body": "return getattr(self, '<STR_LIT>', None)<EOL>", "docstring": "Get anomaly scores.\n:return: a TimeSeries object represents anomaly scores.", "id": "f7838:c0:m6"}
{"signature": "def _get_algorithm(self, algorithm_name):", "body": "try:<EOL><INDENT>algorithm = anomaly_detector_algorithms[algorithm_name]<EOL>return algorithm<EOL><DEDENT>except KeyError:<EOL><INDENT>raise exceptions.AlgorithmNotFound('<STR_LIT>' + str(algorithm_name) + '<STR_LIT>')<EOL><DEDENT>", "docstring": "Get the specific algorithm.\n:param str algorithm_name: name of the algorithm to use(file name).\n:return: algorithm object.", "id": "f7838:c0:m2"}
{"signature": "def _compute_derivatives(self):", "body": "derivatives = []<EOL>for i, (timestamp, value) in enumerate(self.time_series_items):<EOL><INDENT>if i > <NUM_LIT:0>:<EOL><INDENT>pre_item = self.time_series_items[i - <NUM_LIT:1>]<EOL>pre_timestamp = pre_item[<NUM_LIT:0>]<EOL>pre_value = pre_item[<NUM_LIT:1>]<EOL>td = timestamp - pre_timestamp<EOL>derivative = (value - pre_value) / td if td != <NUM_LIT:0> else value - pre_value<EOL>derivative = abs(derivative)<EOL>derivatives.append(derivative)<EOL><DEDENT><DEDENT>if derivatives:<EOL><INDENT>derivatives.insert(<NUM_LIT:0>, derivatives[<NUM_LIT:0>])<EOL><DEDENT>self.derivatives = derivatives<EOL>", "docstring": "Compute derivatives of the time series.", "id": "f7839:c0:m1"}
{"signature": "def _set_scores(self):", "body": "anom_scores_ema = self.exp_avg_detector.run()<EOL>anom_scores_deri = self.derivative_detector.run()<EOL>anom_scores = {}<EOL>for timestamp in anom_scores_ema.timestamps:<EOL><INDENT>anom_scores[timestamp] = max(anom_scores_ema[timestamp],<EOL>anom_scores_ema[timestamp] * DEFAULT_DETECTOR_EMA_WEIGHT + anom_scores_deri[timestamp] * (<NUM_LIT:1> - DEFAULT_DETECTOR_EMA_WEIGHT))<EOL>if anom_scores_ema[timestamp] > DEFAULT_DETECTOR_EMA_SIGNIFICANT:<EOL><INDENT>anom_scores[timestamp] = max(anom_scores[timestamp], anom_scores_deri[timestamp])<EOL><DEDENT><DEDENT>self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))<EOL>", "docstring": "Set anomaly scores using a weighted sum.", "id": "f7840:c0:m1"}
{"signature": "def _set_scores(self):", "body": "anom_scores = {}<EOL>for i, (timestamp, value) in enumerate(self.time_series.items()):<EOL><INDENT>baseline_value = self.baseline_time_series[i]<EOL>if baseline_value > <NUM_LIT:0>:<EOL><INDENT>diff_percent = <NUM_LIT:100> * (value - baseline_value) / baseline_value<EOL><DEDENT>elif value > <NUM_LIT:0>:<EOL><INDENT>diff_percent = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>diff_percent = <NUM_LIT:0.0><EOL><DEDENT>anom_scores[timestamp] = <NUM_LIT:0.0><EOL>if self.percent_threshold_upper and diff_percent > <NUM_LIT:0> and diff_percent > self.percent_threshold_upper:<EOL><INDENT>anom_scores[timestamp] = diff_percent<EOL><DEDENT>if self.percent_threshold_lower and diff_percent < <NUM_LIT:0> and diff_percent < self.percent_threshold_lower:<EOL><INDENT>anom_scores[timestamp] = -<NUM_LIT:1> * diff_percent<EOL><DEDENT><DEDENT>self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))<EOL>", "docstring": "Compute anomaly scores for the time series\nThis algorithm just takes the diff of threshold with current value as anomaly score", "id": "f7842:c0:m1"}
{"signature": "def _set_scores(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Compute anomaly scores for the time series.", "id": "f7844:c0:m3"}
{"signature": "def get_scores(self):", "body": "return self.anom_scores<EOL>", "docstring": "Get anomaly scores for the time series.\n:return TimeSeries: a TimeSeries representation of the anomaly scores.", "id": "f7844:c0:m4"}
{"signature": "def run(self):", "body": "self._set_scores()<EOL>return self.anom_scores<EOL>", "docstring": "Run the algorithm to get anomalies.\nreturn list: a list of Anomaly objects.", "id": "f7844:c0:m1"}
{"signature": "def _construct_all_SAX_chunk_dict(self):", "body": "lag_dicts = {}<EOL>fut_dicts = {}<EOL>length = self.time_series_length<EOL>lws = self.lag_window_size<EOL>fws = self.future_window_size<EOL>chunk_size = self.chunk_size<EOL>for i in range(length):<EOL><INDENT>if i < lws or i > length - fws:<EOL><INDENT>lag_dicts[i] = None<EOL><DEDENT>else:<EOL><INDENT>if lag_dicts[i - <NUM_LIT:1>] is None:<EOL><INDENT>lag_dict = self._construct_SAX_chunk_dict(self.sax[i - lws: i])<EOL>lag_dicts[i] = lag_dict<EOL>lw_leave_chunk = self.sax[<NUM_LIT:0>:chunk_size]<EOL>lw_enter_chunk = self.sax[i - chunk_size + <NUM_LIT:1>: i + <NUM_LIT:1>]<EOL>fut_dict = self._construct_SAX_chunk_dict(self.sax[i: i + fws])<EOL>fut_dicts[i] = fut_dict<EOL>fw_leave_chunk = self.sax[i: i + chunk_size]<EOL>fw_enter_chunk = self.sax[i + fws + <NUM_LIT:1> - chunk_size: i + fws + <NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>lag_dict = copy(lag_dicts[i - <NUM_LIT:1>])<EOL>lag_dict[lw_leave_chunk] -= <NUM_LIT:1><EOL>lag_dict[lw_enter_chunk] += <NUM_LIT:1><EOL>lag_dicts[i] = lag_dict<EOL>fut_dict = copy(fut_dicts[i - <NUM_LIT:1>])<EOL>fut_dict[fw_leave_chunk] -= <NUM_LIT:1><EOL>fut_dict[fw_enter_chunk] += <NUM_LIT:1><EOL>fut_dicts[i] = fut_dict<EOL>lw_leave_chunk = self.sax[i - lws: i - lws + chunk_size]<EOL>lw_enter_chunk = self.sax[i - chunk_size + <NUM_LIT:1>: i + <NUM_LIT:1>]<EOL>fw_leave_chunk = self.sax[i: i + chunk_size]<EOL>fw_enter_chunk = self.sax[i + fws + <NUM_LIT:1> - chunk_size: i + fws + <NUM_LIT:1>]<EOL><DEDENT><DEDENT><DEDENT>self.lag_dicts = lag_dicts<EOL>self.fut_dicts = fut_dicts<EOL>", "docstring": "Construct the chunk dicts for lagging window and future window at each index.\n e.g: Suppose we have a SAX sequence as '1234567890', both window sizes are 3, and the chunk size is 2.\n The first index that has a lagging window is 3. For index equals 3, the lagging window has sequence '123',\n the chunk to leave lagging window(lw_leave_chunk) is '12', and the chunk to enter lagging window(lw_enter_chunk) is '34'.\n Therefore, given chunk dicts at i, to compute chunk dicts at i+1, simply decrement the count for lw_leave_chunk,\n and increment the count for lw_enter_chunk from chunk dicts at i. Same method applies to future window as well.", "id": "f7845:c0:m5"}
{"signature": "def _generate_SAX(self):", "body": "sections = {}<EOL>self.value_min = self.time_series.min()<EOL>self.value_max = self.time_series.max()<EOL>section_height = (self.value_max - self.value_min) / self.precision<EOL>for section_number in range(self.precision):<EOL><INDENT>sections[section_number] = self.value_min + section_number * section_height<EOL><DEDENT>self.sax = '<STR_LIT>'.join(self._generate_SAX_single(sections, value) for value in self.time_series.values)<EOL>", "docstring": "Generate SAX representation for all values of the time series.", "id": "f7845:c0:m3"}
{"signature": "def __init__(self, time_series, baseline_time_series=None, precision=None, lag_window_size=None,<EOL>future_window_size=None, chunk_size=None):", "body": "super(BitmapDetector, self).__init__(self.__class__.__name__, time_series, baseline_time_series)<EOL>self.precision = precision if precision and precision > <NUM_LIT:0> else DEFAULT_BITMAP_PRECISION<EOL>self.chunk_size = chunk_size if chunk_size and chunk_size > <NUM_LIT:0> else DEFAULT_BITMAP_CHUNK_SIZE<EOL>if lag_window_size:<EOL><INDENT>self.lag_window_size = lag_window_size<EOL><DEDENT>else:<EOL><INDENT>self.lag_window_size = int(self.time_series_length * DEFAULT_BITMAP_LAGGING_WINDOW_SIZE_PCT)<EOL><DEDENT>if future_window_size:<EOL><INDENT>self.future_window_size = future_window_size<EOL><DEDENT>else:<EOL><INDENT>self.future_window_size = int(self.time_series_length * DEFAULT_BITMAP_LEADING_WINDOW_SIZE_PCT)<EOL><DEDENT>self._sanity_check()<EOL>", "docstring": "Initializer\n:param TimeSeries time_series: a TimeSeries object.\n:param TimeSeries baseline_time_series: baseline TimeSeries.\n:param int precision: how many sections to categorize values.\n:param int lag_window_size: lagging window size.\n:param int future_window_size: future window size.\n:param int chunk_size: chunk size.", "id": "f7845:c0:m0"}
{"signature": "def _construct_SAX_chunk_dict(self, sax):", "body": "frequency = defaultdict(int)<EOL>chunk_size = self.chunk_size<EOL>length = len(sax)<EOL>for i in range(length):<EOL><INDENT>if i + chunk_size <= length:<EOL><INDENT>chunk = sax[i: i + chunk_size]<EOL>frequency[chunk] += <NUM_LIT:1><EOL><DEDENT><DEDENT>return frequency<EOL>", "docstring": "Form a chunk frequency dictionary from a SAX representation.\n:param str sax: a SAX representation.\n:return dict: frequency dictionary for chunks in the SAX representation.", "id": "f7845:c0:m4"}
{"signature": "def _compute_anom_score_between_two_windows(self, i):", "body": "lag_window_chunk_dict = self.lag_dicts[i]<EOL>future_window_chunk_dict = self.fut_dicts[i]<EOL>score = <NUM_LIT:0><EOL>for chunk in lag_window_chunk_dict:<EOL><INDENT>if chunk in future_window_chunk_dict:<EOL><INDENT>score += math.pow(future_window_chunk_dict[chunk] - lag_window_chunk_dict[chunk], <NUM_LIT:2>)<EOL><DEDENT>else:<EOL><INDENT>score += math.pow(lag_window_chunk_dict[chunk], <NUM_LIT:2>)<EOL><DEDENT><DEDENT>for chunk in future_window_chunk_dict:<EOL><INDENT>if chunk not in lag_window_chunk_dict:<EOL><INDENT>score += math.pow(future_window_chunk_dict[chunk], <NUM_LIT:2>)<EOL><DEDENT><DEDENT>return score<EOL>", "docstring": "Compute distance difference between two windows' chunk frequencies,\nwhich is then marked as the anomaly score of the data point on the window boundary in the middle.\n:param int i: index of the data point between two windows.\n:return float: the anomaly score.", "id": "f7845:c0:m6"}
{"signature": "def _find_first_bigger(self, timestamps, target, lower_bound, upper_bound):", "body": "while lower_bound < upper_bound:<EOL><INDENT>pos = lower_bound + (upper_bound - lower_bound) / <NUM_LIT:2><EOL>if timestamps[pos] > target:<EOL><INDENT>upper_bound = pos<EOL><DEDENT>else:<EOL><INDENT>lower_bound = pos + <NUM_LIT:1><EOL><DEDENT><DEDENT>return pos<EOL>", "docstring": "Find the first element in timestamps whose value is bigger than target.\nparam list values: list of timestamps(epoch number).\nparam target: target value.\nparam lower_bound: lower bound for binary search.\nparam upper_bound: upper bound for binary search.", "id": "f7847:c0:m3"}
{"signature": "def __init__(self, time_series_a, time_series_b, max_shift_seconds=None, shift_impact=None):", "body": "super(CrossCorrelator, self).__init__(self.__class__.__name__, time_series_a, time_series_b)<EOL>self.shift_impact = shift_impact or DEFAULT_SHIFT_IMPACT<EOL>if max_shift_seconds is not None:<EOL><INDENT>self.max_shift_milliseconds = max_shift_seconds<EOL><DEDENT>else:<EOL><INDENT>self.max_shift_milliseconds = DEFAULT_ALLOWED_SHIFT_SECONDS * <NUM_LIT:1000><EOL><DEDENT>", "docstring": "Initializer\n:param TimeSeries time_series_a: TimeSeries a.\n:param TimeSeries time_series_b: TimeSeries b.\n:param int max_shift_milliseconds: allowed maximal shift seconds.\n:param time_period: if given, correlate the data inside the time period only.", "id": "f7847:c0:m0"}
{"signature": "def _detect_correlation(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Detect correlation.", "id": "f7849:c0:m1"}
{"signature": "def get_correlation_result(self):", "body": "return self.correlation_result<EOL>", "docstring": "Get correlation result.\n:return CorrelationResult: a CorrelationResult object represents the correlation result.", "id": "f7849:c0:m2"}
{"signature": "def __init__(self, class_name, time_series_a, time_series_b):", "body": "self.class_name = class_name<EOL>self.time_series_a = time_series_a<EOL>self.time_series_b = time_series_b<EOL>", "docstring": "Initializer\n:param class_name: name of extended class.\n:param TimeSeries time_series_a: TimeSeries a.\n:param TimeSeries time_series_b: TimeSeries b.", "id": "f7849:c0:m0"}
{"signature": "def run(self):", "body": "self._detect_correlation()<EOL>return self.correlation_result<EOL>", "docstring": "Execute algorithm.\n:return CorrelationResult: a CorrelationResult object represents the correlation result.", "id": "f7849:c0:m3"}
{"signature": "def get_root_causes(self):", "body": "return getattr(self, '<STR_LIT>', None)<EOL>", "docstring": "Get root causes.\n:return dict: a dict represents root causes for each anomaly.", "id": "f7850:c0:m2"}
{"signature": "def to_epoch(t_str):", "body": "try:<EOL><INDENT>t = float(t_str)<EOL>return t<EOL><DEDENT>except:<EOL><INDENT>for format in constants.TIMESTAMP_STR_FORMATS:<EOL><INDENT>try:<EOL><INDENT>t = datetime.datetime.strptime(t_str, format)<EOL>return float(time.mktime(t.utctimetuple()) * <NUM_LIT> + t.microsecond / <NUM_LIT>)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>raise exceptions.InvalidDataFormat<EOL>", "docstring": "Covert a timestamp string to an epoch number.\n:param str t_str: a timestamp string.\n:return int: epoch number of the timestamp.", "id": "f7851:m2"}
{"signature": "def read_csv(csv_name):", "body": "data = {}<EOL>if not isinstance(csv_name, (str, unicode)):<EOL><INDENT>raise exceptions.InvalidDataFormat('<STR_LIT>')<EOL><DEDENT>with open(csv_name, '<STR_LIT:r>') as csv_data:<EOL><INDENT>reader = csv.reader(csv_data, delimiter='<STR_LIT:U+002C>', quotechar='<STR_LIT:|>')<EOL>for row in reader:<EOL><INDENT>try:<EOL><INDENT>key = to_epoch(row[<NUM_LIT:0>])<EOL>value = float(row[<NUM_LIT:1>])<EOL>data[key] = value<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>return data<EOL>", "docstring": "Read data from a csv file into a dictionary.\n:param str csv_name: path to a csv file.\n:return dict: a dictionary represents the data in file.", "id": "f7851:m1"}
{"signature": "def __init__(self, start_timestamp, end_timestamp, anomaly_score, exact_timestamp):", "body": "self.start_timestamp = start_timestamp<EOL>self.end_timestamp = end_timestamp<EOL>self.anomaly_score = anomaly_score<EOL>self.exact_timestamp = exact_timestamp<EOL>", "docstring": "Construct an anomaly object.\n:param:start_timestamp: start time of the anomaly period.\n:param:end_timestamp: end time of the anomaly period.\n:param:anomly_score: the score of the anomaly.\n:param:exact_timestamp: the timestamp within the period where the anomaly likely happened.", "id": "f7852:c0:m0"}
{"signature": "def _generic_binary_op(self, other, op):", "body": "output = {}<EOL>if isinstance(other, TimeSeries):<EOL><INDENT>for key, value in self.items():<EOL><INDENT>if key in other:<EOL><INDENT>try:<EOL><INDENT>result = op(value, other[key])<EOL>if result is NotImplemented:<EOL><INDENT>other_type = type(other[key])<EOL>other_op = vars(other_type).get(op.__name__)<EOL>if other_op:<EOL><INDENT>output[key] = other_op(other_type(value), other[key])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>output[key] = result<EOL><DEDENT><DEDENT>except ZeroDivisionError:<EOL><INDENT>continue<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for key, value in self.items():<EOL><INDENT>try:<EOL><INDENT>result = op(value, other)<EOL>if result is NotImplemented:<EOL><INDENT>other_type = type(other)<EOL>other_op = vars(other_type).get(op.__name__)<EOL>if other_op:<EOL><INDENT>output[key] = other_op(other_type(value), other)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>output[key] = result<EOL><DEDENT><DEDENT>except ZeroDivisionError:<EOL><INDENT>continue<EOL><DEDENT><DEDENT><DEDENT>if output:<EOL><INDENT>return TimeSeries(output)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Perform the method operation specified in the op parameter on the values\nwithin the instance's time series values and either another time series\nor a constant number value.\n\n:param other: Time series of values or a constant number to use in calculations with instance's time series.\n:param func op: The method to perform the calculation between the values.\n:return: :class:`TimeSeries` object.", "id": "f7853:c0:m25"}
{"signature": "def percentile(self, n, default=None):", "body": "return numpy.asscalar(numpy.percentile(self.values, n)) if self.values else default<EOL>", "docstring": "Calculate the Nth Percentile value over the time series.\n\n:param int n: Integer value of the percentile to calculate.\n:param default: Value to return as a default should the calculation not be possible.\n:return: Float representing the Nth percentile value or `None`.", "id": "f7853:c0:m36"}
{"signature": "def max(self, default=None):", "body": "return numpy.asscalar(numpy.max(self.values)) if self.values else default<EOL>", "docstring": "Calculate the maximum value over the time series.\n\n:param default: Value to return as a default should the calculation not be possible.\n:return: Float representing the maximum value or `None`.", "id": "f7853:c0:m34"}
{"signature": "def sum(self, default=None):", "body": "return numpy.asscalar(numpy.sum(self.values)) if self.values else default<EOL>", "docstring": "Calculate the sum of all the values in the times series.\n\n:param default: Value to return as a default should the calculation not be possible.\n:return: Float representing the sum or `None`.", "id": "f7853:c0:m38"}
{"signature": "def smooth(self, smoothing_factor):", "body": "forward_smooth = {}<EOL>backward_smooth = {}<EOL>output = {}<EOL>if self:<EOL><INDENT>pre = self.values[<NUM_LIT:0>]<EOL>next = self.values[-<NUM_LIT:1>]<EOL>for key, value in self.items():<EOL><INDENT>forward_smooth[key] = smoothing_factor * pre + (<NUM_LIT:1> - smoothing_factor) * value<EOL>pre = forward_smooth[key]<EOL><DEDENT>for key, value in reversed(self.items()):<EOL><INDENT>backward_smooth[key] = smoothing_factor * next + (<NUM_LIT:1> - smoothing_factor) * value<EOL>next = backward_smooth[key]<EOL><DEDENT>for key in forward_smooth.keys():<EOL><INDENT>output[key] = (forward_smooth[key] + backward_smooth[key]) / <NUM_LIT:2><EOL><DEDENT><DEDENT>return TimeSeries(output)<EOL>", "docstring": "return a new time series which is a exponential smoothed version of the original data series.\nsoomth forward once, backward once, and then take the average.\n\n:param float smoothing_factor: smoothing factor\n:return: :class:`TimeSeries` object.", "id": "f7853:c0:m28"}
{"signature": "@property<EOL><INDENT>def end(self):<DEDENT>", "body": "return max(self.timestamps) if self.timestamps else None<EOL>", "docstring": "Return the latest timestamp in the time series.", "id": "f7853:c0:m2"}
{"signature": "def min(self, default=None):", "body": "return numpy.asscalar(numpy.min(self.values)) if self.values else default<EOL>", "docstring": "Calculate the minimum value over the time series.\n\n:param default: Value to return as a default should the calculation not be possible.\n:return: Float representing the maximum value or `None`.", "id": "f7853:c0:m35"}
{"signature": "def align(self, other):", "body": "if isinstance(other, TimeSeries):<EOL><INDENT>aligned, other_aligned = {}, {}<EOL>i, other_i = self.iteritems_silent(), other.iteritems_silent()<EOL>item, other_item = i.next(), other_i.next()<EOL>while item and other_item:<EOL><INDENT>timestamp, value = item<EOL>other_timestamp, other_value = other_item<EOL>if timestamp == other_timestamp:<EOL><INDENT>aligned[timestamp] = value<EOL>other_aligned[other_timestamp] = other_value<EOL>item = i.next()<EOL>other_item = other_i.next()<EOL><DEDENT>elif timestamp < other_timestamp:<EOL><INDENT>aligned[timestamp] = value<EOL>other_aligned[timestamp] = other_value<EOL>item = i.next()<EOL><DEDENT>else:<EOL><INDENT>aligned[other_timestamp] = value<EOL>other_aligned[other_timestamp] = other_value<EOL>other_item = other_i.next()<EOL><DEDENT><DEDENT>while item:<EOL><INDENT>timestamp, value = item<EOL>aligned[timestamp] = value<EOL>other_aligned[timestamp] = other.values[-<NUM_LIT:1>]<EOL>item = i.next()<EOL><DEDENT>while other_item:<EOL><INDENT>other_timestamp, other_value = other_item<EOL>aligned[other_timestamp] = self.values[-<NUM_LIT:1>]<EOL>other_aligned[other_timestamp] = other_value<EOL>other_item = other_i.next()<EOL><DEDENT>return TimeSeries(aligned), TimeSeries(other_aligned)<EOL><DEDENT>", "docstring": "Align two time series so that len(self) == len(other) and self.timstamps == other.timestamps.\n\n:return: :tuple:(`TimeSeries` object(the aligned self), `TimeSeries` object(the aligned other))", "id": "f7853:c0:m27"}
{"signature": "def graph_data_on_the_same_graph(list_of_plots, output_directory, resource_path, output_filename):", "body": "maximum_yvalue = -float('<STR_LIT>')<EOL>minimum_yvalue = float('<STR_LIT>')<EOL>plots = curate_plot_list(list_of_plots)<EOL>plot_count = len(plots)<EOL>if plot_count == <NUM_LIT:0>:<EOL><INDENT>return False, None<EOL><DEDENT>graph_height, graph_width, graph_title = get_graph_metadata(plots)<EOL>current_plot_count = <NUM_LIT:0><EOL>fig, axis = plt.subplots()<EOL>fig.set_size_inches(graph_width, graph_height)<EOL>if plot_count < <NUM_LIT:2>:<EOL><INDENT>fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)<EOL><DEDENT>else:<EOL><INDENT>fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,<EOL>right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - <NUM_LIT:2>))<EOL><DEDENT>for plot in plots:<EOL><INDENT>current_plot_count += <NUM_LIT:1><EOL>logger.info('<STR_LIT>' + plot.input_csv + '<STR_LIT>' + output_filename + '<STR_LIT>')<EOL>xval, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter='<STR_LIT:U+002C>')<EOL>axis.plot(xval, yval, linestyle='<STR_LIT:->', marker=None, color=get_current_color(current_plot_count), label=plot.plot_label)<EOL>axis.legend()<EOL>maximum_yvalue = max(maximum_yvalue, numpy.amax(yval) * (<NUM_LIT:1.0> + CONSTANTS.ZOOM_FACTOR * current_plot_count))<EOL>minimum_yvalue = min(minimum_yvalue, numpy.amin(yval) * (<NUM_LIT:1.0> - CONSTANTS.ZOOM_FACTOR * current_plot_count))<EOL><DEDENT>axis.yaxis.set_ticks_position('<STR_LIT:left>')<EOL>axis.set_xlabel(plots[<NUM_LIT:0>].x_label)<EOL>axis.set_ylabel(plots[<NUM_LIT:0>].y_label, fontsize=CONSTANTS.Y_LABEL_FONTSIZE)<EOL>axis.set_ylim([minimum_yvalue, maximum_yvalue])<EOL>axis.yaxis.grid(True)<EOL>axis.xaxis.grid(True)<EOL>axis.set_title(graph_title)<EOL>plot_file_name = os.path.join(output_directory, output_filename + \"<STR_LIT>\")<EOL>fig.savefig(plot_file_name)<EOL>plt.close()<EOL>with open(os.path.join(output_directory, output_filename + '<STR_LIT>'), '<STR_LIT:w>') as div_file:<EOL><INDENT>div_file.write('<STR_LIT>' + os.path.basename(plot_file_name).replace(\"<STR_LIT>\", \"<STR_LIT>\").replace(\"<STR_LIT>\", \"<STR_LIT>\") + '<STR_LIT>' +<EOL>resource_path + '<STR_LIT:/>' + os.path.basename(plot_file_name) + '<STR_LIT>' + os.path.basename(plot_file_name) +<EOL>'<STR_LIT>' + os.path.basename(plot_file_name) + '<STR_LIT>')<EOL><DEDENT>return True, os.path.join(output_directory, output_filename + '<STR_LIT>')<EOL>", "docstring": "graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF", "id": "f7860:m6"}
{"signature": "def highlight_region(plt, start_x, end_x):", "body": "start_x = convert_to_mdate(start_x)<EOL>end_x = convert_to_mdate(end_x)<EOL>plt.axvspan(start_x, end_x, color=CONSTANTS.HIGHLIGHT_COLOR, alpha=CONSTANTS.HIGHLIGHT_ALPHA)<EOL>", "docstring": "Highlight a region on the chart between the specified start and end x-co-ordinates.\nparam pyplot plt: matplotlibk pyplot which contains the charts to be highlighted\nparam string start_x : epoch time millis\nparam string end_x : epoch time millis", "id": "f7860:m4"}
{"signature": "def collect(self):", "body": "report_count = <NUM_LIT:0><EOL>if self.status != '<STR_LIT:OK>':<EOL><INDENT>return False<EOL><DEDENT>diff_stats = set(self.reports[<NUM_LIT:0>].stats) & set(self.reports[<NUM_LIT:1>].stats)<EOL>if diff_stats:<EOL><INDENT>self.reports[<NUM_LIT:0>].stats = diff_stats<EOL>self.reports[<NUM_LIT:1>].stats = diff_stats<EOL><DEDENT>else:<EOL><INDENT>self.status = '<STR_LIT>'<EOL>logger.error('<STR_LIT>')<EOL>return False<EOL><DEDENT>for report in self.reports:<EOL><INDENT>report.label = report_count<EOL>report_count += <NUM_LIT:1><EOL>report.local_location = os.path.join(self.resource_directory, str(report.label))<EOL>try:<EOL><INDENT>os.makedirs(report.local_location)<EOL><DEDENT>except OSError as exeption:<EOL><INDENT>if exeption.errno != errno.EEXIST:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>if report.remote_location != '<STR_LIT>':<EOL><INDENT>naarad.httpdownload.download_url_list(map(lambda x: report.remote_location + '<STR_LIT:/>' + self.resource_path + '<STR_LIT:/>' + x, report.stats), report.local_location)<EOL><DEDENT>else:<EOL><INDENT>for filename in report.stats:<EOL><INDENT>shutil.copy(os.path.join(os.path.join(report.location, self.resource_path), filename), report.local_location)<EOL><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "Identify what summary stats exist in both the diffed reports and download them to the diff report resources directory\n:return: True/False : return status of whether the download of summary stats succeeded.", "id": "f7861:c0:m8"}
{"signature": "def generate_diff_html(self):", "body": "if not os.path.exists(self.resource_directory):<EOL><INDENT>os.makedirs(self.resource_directory)<EOL><DEDENT>self.copy_local_includes()<EOL>div_html = '<STR_LIT>'<EOL>for plot_div in sorted(self.plot_files):<EOL><INDENT>with open(plot_div, '<STR_LIT:r>') as div_file:<EOL><INDENT>div_html += '<STR_LIT:\\n>' + div_file.read()<EOL><DEDENT><DEDENT>template_loader = FileSystemLoader(self.get_resources_location())<EOL>template_environment = Environment(loader=template_loader)<EOL>template_environment.filters['<STR_LIT>'] = naarad.utils.sanitize_string<EOL>diff_html = template_environment.get_template(CONSTANTS.TEMPLATE_HEADER).render(custom_stylesheet_includes=CONSTANTS.STYLESHEET_INCLUDES,<EOL>custom_javascript_includes=CONSTANTS.JAVASCRIPT_INCLUDES,<EOL>resource_path=self.resource_path,<EOL>report_title='<STR_LIT>') + '<STR_LIT:\\n>'<EOL>diff_html += template_environment.get_template(CONSTANTS.TEMPLATE_DIFF_PAGE).render(diff_data=self.diff_data, plot_div_content=div_html,<EOL>reports=self.reports, sla_failure_list=self.sla_failure_list,<EOL>sla_map=self.sla_map) + '<STR_LIT:\\n>'<EOL>diff_html += template_environment.get_template(CONSTANTS.TEMPLATE_FOOTER).render()<EOL>return diff_html<EOL>", "docstring": "Generate the summary diff report html from template\n:return: generated html to be written to disk", "id": "f7861:c0:m4"}
{"signature": "def plot_diff(self, graphing_library='<STR_LIT>'):", "body": "diff_datasource = sorted(set(self.reports[<NUM_LIT:0>].datasource) & set(self.reports[<NUM_LIT:1>].datasource))<EOL>graphed = False<EOL>for submetric in diff_datasource:<EOL><INDENT>baseline_csv = naarad.utils.get_default_csv(self.reports[<NUM_LIT:0>].local_location, (submetric + '<STR_LIT>'))<EOL>current_csv = naarad.utils.get_default_csv(self.reports[<NUM_LIT:1>].local_location, (submetric + '<STR_LIT>'))<EOL>if (not (naarad.utils.is_valid_file(baseline_csv) & naarad.utils.is_valid_file(current_csv))):<EOL><INDENT>continue<EOL><DEDENT>baseline_plot = PD(input_csv=baseline_csv, csv_column=<NUM_LIT:1>, series_name=submetric, y_label=submetric, precision=None, graph_height=<NUM_LIT>, graph_width=<NUM_LIT>,<EOL>graph_type='<STR_LIT>', plot_label='<STR_LIT>', x_label='<STR_LIT>')<EOL>current_plot = PD(input_csv=current_csv, csv_column=<NUM_LIT:1>, series_name=submetric, y_label=submetric, precision=None, graph_height=<NUM_LIT>, graph_width=<NUM_LIT>,<EOL>graph_type='<STR_LIT>', plot_label='<STR_LIT>', x_label='<STR_LIT>')<EOL>graphed, div_file = Diff.graphing_modules[graphing_library].graph_data_on_the_same_graph([baseline_plot, current_plot],<EOL>os.path.join(self.output_directory, self.resource_path),<EOL>self.resource_path, (submetric + '<STR_LIT>'))<EOL>if graphed:<EOL><INDENT>self.plot_files.append(div_file)<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Generate CDF diff plots of the submetrics", "id": "f7861:c0:m9"}
{"signature": "def copy_local_includes(self):", "body": "resource_folder = self.get_resources_location()<EOL>for stylesheet in self.stylesheet_includes:<EOL><INDENT>if ('<STR_LIT:http>' not in stylesheet) and naarad.utils.is_valid_file(os.path.join(resource_folder, stylesheet)):<EOL><INDENT>shutil.copy(os.path.join(resource_folder, stylesheet), self.resource_directory)<EOL><DEDENT><DEDENT>for javascript in self.javascript_includes:<EOL><INDENT>if ('<STR_LIT:http>' not in javascript) and naarad.utils.is_valid_file(os.path.join(resource_folder, javascript)):<EOL><INDENT>shutil.copy(os.path.join(resource_folder, javascript), self.resource_directory)<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Copy local js/css includes from naarad resources to the report/resources directory\n:return: None", "id": "f7861:c0:m2"}
{"signature": "def generate_client_charting_page(self, data_sources):", "body": "if not os.path.exists(self.resource_directory):<EOL><INDENT>os.makedirs(self.resource_directory)<EOL><DEDENT>self.copy_local_includes()<EOL>template_loader = FileSystemLoader(self.get_resources_location())<EOL>template_environment = Environment(loader=template_loader)<EOL>client_html = template_environment.get_template(CONSTANTS.TEMPLATE_HEADER).render(custom_stylesheet_includes=CONSTANTS.STYLESHEET_INCLUDES,<EOL>custom_javascript_includes=CONSTANTS.JAVASCRIPT_INCLUDES,<EOL>resource_path=self.resource_path,<EOL>report_title='<STR_LIT>') + '<STR_LIT:\\n>'<EOL>client_html += template_environment.get_template(CONSTANTS.TEMPLATE_DIFF_CLIENT_CHARTING).render(data_series=data_sources,<EOL>resource_path=self.resource_path) + '<STR_LIT:\\n>'<EOL>client_html += template_environment.get_template(CONSTANTS.TEMPLATE_FOOTER).render()<EOL>return client_html<EOL>", "docstring": "Create the client charting page for the diff report, with time series data from the two diffed reports.\n:return: generated html to be written to disk", "id": "f7861:c0:m3"}
{"signature": "def discover(self, metafile):", "body": "for report in self.reports:<EOL><INDENT>if report.remote_location == '<STR_LIT>':<EOL><INDENT>if naarad.utils.is_valid_file(os.path.join(os.path.join(report.location, self.resource_path), metafile)):<EOL><INDENT>with open(os.path.join(os.path.join(report.location, self.resource_path), metafile), '<STR_LIT:r>') as meta_file:<EOL><INDENT>if metafile == CONSTANTS.STATS_CSV_LIST_FILE:<EOL><INDENT>report.stats = meta_file.readlines()[<NUM_LIT:0>].split('<STR_LIT:U+002C>')<EOL><DEDENT>elif metafile == CONSTANTS.PLOTS_CSV_LIST_FILE:<EOL><INDENT>report.datasource = meta_file.readlines()[<NUM_LIT:0>].split('<STR_LIT:U+002C>')<EOL><DEDENT>elif metafile == CONSTANTS.CDF_PLOTS_CSV_LIST_FILE:<EOL><INDENT>report.cdf_datasource = meta_file.readlines()[<NUM_LIT:0>].split('<STR_LIT:U+002C>')<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>report.status = '<STR_LIT>'<EOL>self.status = '<STR_LIT>'<EOL>logger.error('<STR_LIT>', report.label)<EOL>return False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>stats_url = report.remote_location + '<STR_LIT:/>' + self.resource_path + '<STR_LIT:/>' + metafile<EOL>meta_file_data = naarad.httpdownload.stream_url(stats_url)<EOL>if meta_file_data:<EOL><INDENT>if metafile == CONSTANTS.STATS_CSV_LIST_FILE:<EOL><INDENT>report.stats = meta_file_data.split('<STR_LIT:U+002C>')<EOL><DEDENT>elif metafile == CONSTANTS.PLOTS_CSV_LIST_FILE:<EOL><INDENT>report.datasource = meta_file_data.split('<STR_LIT:U+002C>')<EOL><DEDENT>elif metafile == CONSTANTS.CDF_PLOTS_CSV_LIST_FILE:<EOL><INDENT>report.cdf_datasource = meta_file_data.split('<STR_LIT:U+002C>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>report.status = '<STR_LIT>'<EOL>self.status = '<STR_LIT>'<EOL>logger.error('<STR_LIT>', report.label)<EOL>return False<EOL><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "Determine what summary stats, time series, and CDF csv exist for the reports that need to be diffed.\n:return: boolean: return whether the summary stats / time series / CDF csv summary was successfully located", "id": "f7861:c0:m5"}
{"signature": "def collect_cdf_datasources(self):", "body": "report_count = <NUM_LIT:0><EOL>if self.status != '<STR_LIT:OK>':<EOL><INDENT>return False<EOL><DEDENT>diff_cdf_datasource = sorted(set(self.reports[<NUM_LIT:0>].cdf_datasource) & set(self.reports[<NUM_LIT:1>].cdf_datasource))<EOL>if diff_cdf_datasource:<EOL><INDENT>self.reports[<NUM_LIT:0>].cdf_datasource = diff_cdf_datasource<EOL>self.reports[<NUM_LIT:1>].cdf_datasource = diff_cdf_datasource<EOL><DEDENT>else:<EOL><INDENT>self.status = '<STR_LIT>'<EOL>logger.error('<STR_LIT>')<EOL>return False<EOL><DEDENT>for report in self.reports:<EOL><INDENT>report.label = report_count<EOL>report_count += <NUM_LIT:1><EOL>report.local_location = os.path.join(self.resource_directory, str(report.label))<EOL>try:<EOL><INDENT>os.makedirs(report.local_location)<EOL><DEDENT>except OSError as exeption:<EOL><INDENT>if exeption.errno != errno.EEXIST:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>if report.remote_location != '<STR_LIT>':<EOL><INDENT>naarad.httpdownload.download_url_list(map(lambda x: report.remote_location + '<STR_LIT:/>' + self.resource_path + '<STR_LIT:/>' + x + '<STR_LIT>', report.cdf_datasource),<EOL>report.local_location)<EOL><DEDENT>else:<EOL><INDENT>for filename in report.cdf_datasource:<EOL><INDENT>try:<EOL><INDENT>shutil.copy(os.path.join(os.path.join(report.location, self.resource_path), filename + '<STR_LIT>'), report.local_location)<EOL><DEDENT>except IOError as exeption:<EOL><INDENT>continue<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "Identify what cdf series exist in both the diffed reports and download them to the diff report resources directory\n:return: True/False : return status of whether the download of time series resources succeeded.", "id": "f7861:c0:m7"}
{"signature": "def process_tasks_line(self, words):", "body": "words = words[<NUM_LIT:1>:]<EOL>length = len(words) / <NUM_LIT:2>  <EOL>values = {}<EOL>for offset in range(length):<EOL><INDENT>k = words[<NUM_LIT:2> * offset + <NUM_LIT:1>].strip('<STR_LIT:U+002C>')<EOL>v = words[<NUM_LIT:2> * offset]<EOL>values['<STR_LIT>' + k] = v<EOL><DEDENT>self.put_values_into_data(values)<EOL>", "docstring": "Process the line starting with \"Tasks:\"\nExample log:   Tasks: 446 total,   1 running, 442 sleeping,   2 stopped,   1 zombie", "id": "f7863:c0:m3"}
{"signature": "def parse_innotop_mode_m(self):", "body": "with open(self.infile, '<STR_LIT:r>') as infh:<EOL><INDENT>max_row_quot = <NUM_LIT:0><EOL>valrow = -<NUM_LIT:1><EOL>thisrowcolumns = {}<EOL>data = {}<EOL>last_ts = None<EOL>while True:<EOL><INDENT>line1 = infh.readline()<EOL>words = line1.split()<EOL>infh.readline()<EOL>is_header = True<EOL>for word in words:<EOL><INDENT>if naarad.utils.is_number(word):<EOL><INDENT>last_ts = words[<NUM_LIT:0>].strip().replace('<STR_LIT:T>', '<STR_LIT:U+0020>')<EOL>is_header = False<EOL>break  <EOL><DEDENT><DEDENT>if len(words) > <NUM_LIT:2> and is_header:<EOL><INDENT>thisrowcolumns[max_row_quot] = words[<NUM_LIT:2>:]<EOL>for column in thisrowcolumns[max_row_quot]:<EOL><INDENT>data[column] = []<EOL><DEDENT>max_row_quot += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if not last_ts:<EOL><INDENT>logger.warn(\"<STR_LIT>\", self.infile)<EOL>return True<EOL><DEDENT>infh.seek(<NUM_LIT:0>)<EOL>is_bad_line = False<EOL>outfilehandlers = {}<EOL>for line in infh:<EOL><INDENT>l = line.strip().split('<STR_LIT:U+0020>', <NUM_LIT:1>)<EOL>if len(l) <= <NUM_LIT:1>:<EOL><INDENT>continue<EOL><DEDENT>ts = l[<NUM_LIT:0>].strip().replace('<STR_LIT:T>', '<STR_LIT:U+0020>')<EOL>if ts != last_ts:<EOL><INDENT>last_ts = ts<EOL>valrow = -<NUM_LIT:1><EOL><DEDENT>nameval = l[<NUM_LIT:1>].strip().split('<STR_LIT:\\t>', <NUM_LIT:1>)<EOL>try:<EOL><INDENT>words = nameval[<NUM_LIT:1>].split('<STR_LIT:\\t>')<EOL><DEDENT>except IndexError:<EOL><INDENT>logger.warn(\"<STR_LIT>\", line)<EOL>continue<EOL><DEDENT>valrow += <NUM_LIT:1><EOL>command = nameval[<NUM_LIT:0>]<EOL>if command not in outfilehandlers:<EOL><INDENT>outfilehandlers[command] = {}<EOL><DEDENT>quot = valrow % max_row_quot<EOL>columns = thisrowcolumns[quot]<EOL>for i in range(len(words)):<EOL><INDENT>if len(words) > len(columns):<EOL><INDENT>logger.warn(\"<STR_LIT>\", line)<EOL>logger.warn(\"<STR_LIT>\", len(words), len(columns))<EOL>break<EOL><DEDENT>if words[i] in columns:<EOL><INDENT>logger.warn(\"<STR_LIT>\", line)<EOL>valrow -= <NUM_LIT:1><EOL>break<EOL><DEDENT>if self.options and columns[i] not in self.options:<EOL><INDENT>continue<EOL><DEDENT>if columns[i] not in outfilehandlers[command]:<EOL><INDENT>outfilehandlers[command][columns[i]] = open(self.get_csv_C(command, columns[i]), '<STR_LIT:w>')<EOL>self.csv_files.append(self.get_csv_C(command, columns[i]))<EOL><DEDENT>ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)<EOL>outfilehandlers[command][columns[i]].write(ts + '<STR_LIT:U+002C>')<EOL>outfilehandlers[command][columns[i]].write(words[i])<EOL>outfilehandlers[command][columns[i]].write('<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>for command in outfilehandlers:<EOL><INDENT>for column in outfilehandlers[command]:<EOL><INDENT>outfilehandlers[command][column].close()<EOL><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "Special parsing method for Innotop \"Replication Status\" results (innotop --mode M)", "id": "f7865:c0:m5"}
{"signature": "def parse(self):", "body": "file_status = True<EOL>for infile in self.infile_list:<EOL><INDENT>file_status = file_status and naarad.utils.is_valid_file(infile)<EOL>if not file_status:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>status = self.parse_xml_jtl(self.aggregation_granularity)<EOL>gc.collect()<EOL>return status<EOL>", "docstring": "Parse the Jmeter file and calculate key stats\n\n:return: status of the metric parse", "id": "f7866:c0:m6"}
{"signature": "def parse_xml_jtl(self, granularity):", "body": "data = defaultdict(list)<EOL>processed_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))<EOL>for input_file in self.infile_list:<EOL><INDENT>logger.info('<STR_LIT>', input_file)<EOL>timestamp_format = None<EOL>tree = ElementTree.parse(input_file)<EOL>samples = tree.findall('<STR_LIT>') + tree.findall('<STR_LIT>')<EOL>for sample in samples:<EOL><INDENT>if not timestamp_format or timestamp_format == '<STR_LIT>':<EOL><INDENT>timestamp_format = naarad.utils.detect_timestamp_format(sample.get('<STR_LIT>'))<EOL><DEDENT>if timestamp_format == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>ts = naarad.utils.get_standardized_timestamp(sample.get('<STR_LIT>'), timestamp_format)<EOL>if ts == -<NUM_LIT:1>:<EOL><INDENT>continue<EOL><DEDENT>ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)<EOL>aggregate_timestamp, averaging_factor = self.get_aggregation_timestamp(ts, granularity)<EOL>self.aggregate_count_over_time(processed_data, sample, [self._sanitize_label(sample.get('<STR_LIT>')), '<STR_LIT>'], aggregate_timestamp)<EOL>self.aggregate_values_over_time(processed_data, sample, [self._sanitize_label(sample.get('<STR_LIT>')), '<STR_LIT>'], ['<STR_LIT:t>', '<STR_LIT>'], aggregate_timestamp)<EOL>logger.info('<STR_LIT>', input_file)<EOL><DEDENT><DEDENT>logger.info('<STR_LIT>')<EOL>self.average_values_for_plot(processed_data, data, averaging_factor)<EOL>logger.info('<STR_LIT>')<EOL>for csv in data.keys():<EOL><INDENT>self.csv_files.append(csv)<EOL>with open(csv, '<STR_LIT:w>') as csvf:<EOL><INDENT>csvf.write('<STR_LIT:\\n>'.join(sorted(data[csv])))<EOL><DEDENT><DEDENT>logger.info('<STR_LIT>')<EOL>self.calculate_key_stats(processed_data)<EOL>return True<EOL>", "docstring": "Parse Jmeter workload output in XML format and extract overall and per transaction data and key statistics\n\n:param string granularity: The time period over which to aggregate and average the raw data. Valid values are 'hour', 'minute' or 'second'\n:return: status of the metric parse", "id": "f7866:c0:m8"}
{"signature": "def aggregate_values_over_time(self, metric_store, line_data, transaction_list, metric_list, aggregate_timestamp):", "body": "for metric in metric_list:<EOL><INDENT>for transaction in transaction_list:<EOL><INDENT>metric_data = reduce(defaultdict.__getitem__, [metric, transaction, aggregate_timestamp], metric_store)<EOL>metric_data.append(float(line_data.get(metric)))<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Organize and store the data from the log line into the metric store by metric type, transaction, timestamp\n\n:param dict metric_store: The metric store used to store all the parsed jmeter log data\n:param dict line_data: dict with the extracted k:v from the log line\n:param list transaction_list: list of transaction to be used for storing the metrics from given line\n:param list metric_list: list of metrics to extract from the log line\n:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period\n:return: None", "id": "f7866:c0:m3"}
{"signature": "def _get_tuple(self, fields):", "body": "v1 = '<STR_LIT>'<EOL>v2 = '<STR_LIT>'<EOL>if len(fields) > <NUM_LIT:0>:<EOL><INDENT>v1 = fields[<NUM_LIT:0>]<EOL><DEDENT>if len(fields) > <NUM_LIT:1>:<EOL><INDENT>v2 = fields[<NUM_LIT:1>]<EOL><DEDENT>return v1, v2<EOL>", "docstring": ":param fields: a list which contains either 0,1,or 2 values\n:return: a tuple with default values of '';", "id": "f7867:c0:m1"}
{"signature": "def _add_data_line(self, data, col, value, ts):", "body": "if col in self.column_csv_map:<EOL><INDENT>out_csv = self.column_csv_map[col]<EOL><DEDENT>else:<EOL><INDENT>out_csv = self.get_csv(col)   <EOL>data[out_csv] = []<EOL><DEDENT>data[out_csv].append(ts + \"<STR_LIT:U+002C>\" + value)<EOL>", "docstring": "Append the data point to the dictionary of \"data\"\n:param data: The dictionary containing all data\n:param col: The sub-metric name e.g. 'host1_port1.host2_port2.SendQ'\n:param value: integer\n:param ts: timestamp\n:return: None", "id": "f7867:c0:m7"}
{"signature": "def _match_host_port(self, host, port, cur_host, cur_port):", "body": "<EOL>host_match = False<EOL>if not host:<EOL><INDENT>host_match = True<EOL><DEDENT>elif cur_host.startswith(host):  <EOL><INDENT>host_match = True<EOL><DEDENT>port_match = False<EOL>if not port:<EOL><INDENT>port_match = True<EOL><DEDENT>elif port == cur_port:<EOL><INDENT>port_match = True<EOL><DEDENT>return host_match and port_match<EOL>", "docstring": "Determine whether user-specified (host,port) matches current (cur_host, cur_port)\n:param host,port: The user input of (host,port)\n:param cur_host, cur_port: The current connection\n:return: True or Not", "id": "f7867:c0:m4"}
{"signature": "def aggregate_count_over_time(self, metric_store, groupby_name, aggregate_timestamp):", "body": "all_qps = metric_store['<STR_LIT>']<EOL>qps = all_qps[groupby_name]<EOL>if aggregate_timestamp in qps:<EOL><INDENT>qps[aggregate_timestamp] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>qps[aggregate_timestamp] = <NUM_LIT:1><EOL><DEDENT>return None<EOL>", "docstring": "Organize and store the count of data from the log line into the metric store by columnm, group name, timestamp\n\n:param dict metric_store: The metric store used to store all the parsed the log data\n:param string groupby_name: the group name that the log line belongs to\n:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period\n:return: None", "id": "f7872:c0:m13"}
{"signature": "def calculate_stats(self):", "body": "metric_type = self.metric_type.split('<STR_LIT:->')[<NUM_LIT:0>]<EOL>if metric_type in naarad.naarad_imports.metric_classes or metric_type in naarad.naarad_imports.aggregate_metric_classes:<EOL><INDENT>self.calculate_other_metric_stats()<EOL><DEDENT>else:<EOL><INDENT>self.calculate_base_metric_stats()<EOL><DEDENT>", "docstring": "Calculate stats with different function depending on the metric type:\nData is recorded in memory for base metric type, and use calculate_base_metric_stats()\nData is recorded in CSV file for other metric types, and use calculate_other_metric_stats()", "id": "f7872:c0:m18"}
{"signature": "def find_header(self, infile):", "body": "cpus = []<EOL>for line in infile:  <EOL><INDENT>if not self.is_header_line(line):<EOL><INDENT>continue<EOL><DEDENT>cpu_header = line.split()<EOL>for cpu_h in cpu_header[<NUM_LIT:2>:]:<EOL><INDENT>if not cpu_h.startswith('<STR_LIT>'):<EOL><INDENT>cpus = []  <EOL>break<EOL><DEDENT>else:<EOL><INDENT>cpus.append(cpu_h)<EOL><DEDENT><DEDENT>if len(cpus) > <NUM_LIT:0>:  <EOL><INDENT>break<EOL><DEDENT><DEDENT>return cpus<EOL>", "docstring": "Parses the file and tries to find the header line. The header line has format:\n\n  2014-10-29 00:28:42.15161        CPU0   CPU1   CPU2   CPU3  ...\n\nSo should always have CPU# for each core. This function verifies a good header and\nreturns the list of CPUs that exist from the header.\n\n:param infile: The opened file in read mode to find the header.\n:return cpus: A list of the core names so in this example ['CPU0', 'CPU1', ...]", "id": "f7873:c0:m3"}
{"signature": "def parse(self):", "body": "file_status = True<EOL>for input_file in self.infile_list:<EOL><INDENT>file_status = file_status and naarad.utils.is_valid_file(input_file)<EOL>if not file_status:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>status = True<EOL>cur_zone = None<EOL>cur_submetric = None<EOL>cur_value = None<EOL>data = {}  <EOL>for input_file in self.infile_list:<EOL><INDENT>logger.info('<STR_LIT>', input_file)<EOL>timestamp_format = None<EOL>with open(input_file) as fh:<EOL><INDENT>for line in fh:<EOL><INDENT>words = line.replace('<STR_LIT:U+002C>', '<STR_LIT:U+0020>').split()           <EOL>if len(words) < <NUM_LIT:3>:<EOL><INDENT>continue<EOL><DEDENT>ts = words[<NUM_LIT:0>] + \"<STR_LIT:U+0020>\" + words[<NUM_LIT:1>]<EOL>if not timestamp_format or timestamp_format == '<STR_LIT>':<EOL><INDENT>timestamp_format = naarad.utils.detect_timestamp_format(ts)<EOL><DEDENT>if timestamp_format == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)<EOL>if self.ts_out_of_range(ts):<EOL><INDENT>continue<EOL><DEDENT>if words[<NUM_LIT:2>] == '<STR_LIT>':  <EOL><INDENT>cols = words[<NUM_LIT:2>:]<EOL>cur_zone = '<STR_LIT:.>'.join(cols)<EOL>continue<EOL><DEDENT>elif words[<NUM_LIT:2>] == '<STR_LIT>':  <EOL><INDENT>cur_submetric = words[<NUM_LIT:2>] + '<STR_LIT:.>' + words[<NUM_LIT:3>]  <EOL>cur_value = words[<NUM_LIT:4>]<EOL><DEDENT>elif words[<NUM_LIT:2>] in self.processed_sub_metrics:<EOL><INDENT>cur_submetric = '<STR_LIT>' + '<STR_LIT:.>' + words[<NUM_LIT:2>]  <EOL>cur_value = words[<NUM_LIT:3>]<EOL><DEDENT>elif words[<NUM_LIT:2>] in self.skipped_sub_metrics:<EOL><INDENT>continue<EOL><DEDENT>else:   <EOL><INDENT>cur_submetric = words[<NUM_LIT:2>]<EOL>cur_value = words[<NUM_LIT:3>]<EOL><DEDENT>col = cur_zone + '<STR_LIT:.>' + cur_submetric  <EOL>if cur_zone and self.zones and cur_zone not in self.zones:<EOL><INDENT>continue<EOL><DEDENT>self.sub_metric_unit[col] = '<STR_LIT>'  <EOL>if self.sub_metrics and cur_submetric and cur_submetric not in self.sub_metrics:<EOL><INDENT>continue<EOL><DEDENT>if col in self.column_csv_map:<EOL><INDENT>out_csv = self.column_csv_map[col]<EOL><DEDENT>else:<EOL><INDENT>out_csv = self.get_csv(col)   <EOL>data[out_csv] = []<EOL><DEDENT>data[out_csv].append(ts + \"<STR_LIT:U+002C>\" + cur_value)<EOL><DEDENT><DEDENT><DEDENT>for csv in data.keys():<EOL><INDENT>self.csv_files.append(csv)<EOL>with open(csv, '<STR_LIT:w>') as fh:<EOL><INDENT>fh.write('<STR_LIT:\\n>'.join(sorted(data[csv])))<EOL><DEDENT><DEDENT>return status<EOL>", "docstring": "Parse the vmstat file\n:return: status of the metric parse", "id": "f7874:c0:m1"}
{"signature": "def analyze(self, input_directory, output_directory, **kwargs):", "body": "is_api_call = True<EOL>if len(self._analyses) == <NUM_LIT:0>:<EOL><INDENT>if '<STR_LIT>' not in kwargs.keys():<EOL><INDENT>return CONSTANTS.ERROR<EOL><DEDENT>self.create_analysis(kwargs['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT:args>' in kwargs:<EOL><INDENT>self._process_args(self._analyses[<NUM_LIT:0>], kwargs['<STR_LIT:args>'])<EOL>is_api_call = False<EOL><DEDENT>error_count = <NUM_LIT:0><EOL>self._input_directory = input_directory<EOL>self._output_directory = output_directory<EOL>for test_id in sorted(self._analyses.keys()):<EOL><INDENT>if not self._analyses[test_id].input_directory:<EOL><INDENT>self._analyses[test_id].input_directory = input_directory<EOL><DEDENT>if not self._analyses[test_id].output_directory:<EOL><INDENT>if len(self._analyses) > <NUM_LIT:1>:<EOL><INDENT>self._analyses[test_id].output_directory = os.path.join(output_directory, str(test_id))<EOL><DEDENT>else:<EOL><INDENT>self._analyses[test_id].output_directory = output_directory<EOL><DEDENT><DEDENT>if('<STR_LIT>' in kwargs.keys()) and (not self._analyses[test_id].config):<EOL><INDENT>self._analyses[test_id].config = kwargs['<STR_LIT>']<EOL><DEDENT>self._create_output_directories(self._analyses[test_id])<EOL>self._analyses[test_id].status = self.run(self._analyses[test_id], is_api_call, **kwargs)<EOL>if self._analyses[test_id].status != CONSTANTS.OK:<EOL><INDENT>error_count += <NUM_LIT:1><EOL><DEDENT><DEDENT>if len(self._analyses) == <NUM_LIT:1>:<EOL><INDENT>return self._analyses[<NUM_LIT:0>].status<EOL><DEDENT>elif error_count > <NUM_LIT:0>:<EOL><INDENT>return CONSTANTS.ERROR<EOL><DEDENT>else:<EOL><INDENT>return CONSTANTS.OK<EOL><DEDENT>", "docstring": "Run all the analysis saved in self._analyses, sorted by test_id.\nThis is useful when Naarad() is used by other programs and multiple analyses are run\nIn naarad CLI mode, len(_analyses) == 1\n:param: input_directory: location of log files\n:param: output_directory: root directory for analysis output\n:param: **kwargs: Optional keyword args\n:return: int: status code.", "id": "f7877:c1:m13"}
{"signature": "def get_failed_analyses(self):", "body": "failed_analyses = []<EOL>for test_id in self._analyses.keys():<EOL><INDENT>if self._analyses[test_id].status != CONSTANTS.OK:<EOL><INDENT>failed_analyses.append(test_id)<EOL><DEDENT><DEDENT>return failed_analyses<EOL>", "docstring": "Returns a list of test_id for which naarad analysis failed\n:return: list of test_ids", "id": "f7877:c1:m4"}
{"signature": "def create_analysis(self, config):", "body": "self._default_test_id += <NUM_LIT:1><EOL>self._analyses[self._default_test_id] = _Analysis(ts_start=None, config=config, test_id=self._default_test_id)<EOL>", "docstring": "Create Analysis and save in Naarad from config\n:param config:\n:return:", "id": "f7877:c1:m1"}
{"signature": "def discover_by_name(input_directory, output_directory):", "body": "metric_list = []<EOL>log_files = os.listdir(input_directory)<EOL>for log_file in log_files:<EOL><INDENT>if log_file in CONSTANTS.SUPPORTED_FILENAME_MAPPING.keys():<EOL><INDENT>metric_list.append(initialize_metric(CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], [log_file], None, [], output_directory, CONSTANTS.RESOURCE_PATH,<EOL>CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], None, None, {}, None, None, {}))<EOL><DEDENT>else:<EOL><INDENT>logger.warning('<STR_LIT>', log_file)<EOL><DEDENT><DEDENT>return metric_list<EOL>", "docstring": "Auto discover metric types from the files that exist in input_directory and return a list of metrics\n:param: input_directory: The location to scan for log files\n:param: output_directory: The location for the report", "id": "f7878:m42"}
{"signature": "def parse_run_step_section(config_obj, section):", "body": "kill_after_seconds = None<EOL>try:<EOL><INDENT>run_cmd = config_obj.get(section, '<STR_LIT>')<EOL>run_rank = int(config_obj.get(section, '<STR_LIT>'))<EOL><DEDENT>except ConfigParser.NoOptionError:<EOL><INDENT>logger.exception(\"<STR_LIT>\" + section)<EOL>sys.exit()<EOL><DEDENT>except ValueError:<EOL><INDENT>logger.error(\"<STR_LIT>\", config_obj.get(section, '<STR_LIT>'), section)<EOL>sys.exit()<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>run_type = config_obj.get(section, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>run_type = CONSTANTS.RUN_TYPE_WORKLOAD<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>run_order = config_obj.get(section, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>run_order = CONSTANTS.PRE_ANALYSIS_RUN<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>call_type = config_obj.get(section, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>call_type = '<STR_LIT>'<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>kill_after_seconds = int(config_obj.get(section, '<STR_LIT>'))<EOL><DEDENT>except ValueError:<EOL><INDENT>logger.error(\"<STR_LIT>\", config_obj.get(section, '<STR_LIT>'), section)<EOL><DEDENT><DEDENT>if call_type == '<STR_LIT>':<EOL><INDENT>run_step_obj = Local_Cmd(run_type, run_cmd, call_type, run_order, run_rank, kill_after_seconds=kill_after_seconds)<EOL><DEDENT>else:<EOL><INDENT>logger.error('<STR_LIT>')<EOL>run_step_obj = None<EOL><DEDENT>return run_step_obj<EOL>", "docstring": "Parse a RUN-STEP section in the config to return a Run_Step object\n:param config_obj: ConfigParser objection\n:param section: Section name\n:return: an initialized Run_Step object", "id": "f7878:m12"}
{"signature": "def get_run_time_period(run_steps):", "body": "init_ts_start = get_standardized_timestamp('<STR_LIT>', None)<EOL>ts_start = init_ts_start<EOL>ts_end = '<STR_LIT:0>'<EOL>for run_step in run_steps:<EOL><INDENT>if run_step.ts_start and run_step.ts_end:<EOL><INDENT>if run_step.ts_start < ts_start:<EOL><INDENT>ts_start = run_step.ts_start<EOL><DEDENT>if run_step.ts_end > ts_end:<EOL><INDENT>ts_end = run_step.ts_end<EOL><DEDENT><DEDENT><DEDENT>if ts_end == '<STR_LIT:0>':<EOL><INDENT>ts_end = None<EOL><DEDENT>if ts_start == init_ts_start:<EOL><INDENT>ts_start = None<EOL><DEDENT>logger.info('<STR_LIT>' + str(ts_start) + '<STR_LIT>' + str(ts_end))<EOL>return ts_start, ts_end<EOL>", "docstring": "This method finds the time range which covers all the Run_Steps\n\n:param run_steps: list of Run_Step objects\n:return: tuple of start and end timestamps", "id": "f7878:m6"}
{"signature": "def init_logging(logger, log_file, log_level):", "body": "with open(log_file, '<STR_LIT:w>'):<EOL><INDENT>pass<EOL><DEDENT>numeric_level = getattr(logging, log_level.upper(), None) if log_level else logging.INFO<EOL>if not isinstance(numeric_level, int):<EOL><INDENT>raise ValueError('<STR_LIT>' % log_level)<EOL><DEDENT>logger.setLevel(logging.DEBUG)<EOL>fh = logging.FileHandler(log_file)<EOL>fh.setLevel(logging.DEBUG)<EOL>ch = logging.StreamHandler()<EOL>ch.setLevel(numeric_level)<EOL>formatter = logging.Formatter('<STR_LIT>')<EOL>fh.setFormatter(formatter)<EOL>ch.setFormatter(formatter)<EOL>logger.addHandler(fh)<EOL>logger.addHandler(ch)<EOL>return CONSTANTS.OK<EOL>", "docstring": "Initialize the naarad logger.\n:param: logger: logger object to initialize\n:param: log_file: log file name\n:param: log_level: log level (debug, info, warn, error)", "id": "f7878:m37"}
{"signature": "def parse_basic_metric_options(config_obj, section):", "body": "infile = {}<EOL>aggr_hosts = None<EOL>aggr_metrics = None<EOL>ts_start = None<EOL>ts_end = None<EOL>precision = None<EOL>hostname = \"<STR_LIT:localhost>\"<EOL>rule_strings = {}<EOL>important_sub_metrics = None<EOL>anomaly_detection_metrics = None<EOL>try:<EOL><INDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>important_sub_metrics = config_obj.get(section, '<STR_LIT>').split()<EOL>config_obj.remove_option(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>hostname = config_obj.get(section, '<STR_LIT>')<EOL>config_obj.remove_option(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>infile = config_obj.get(section, '<STR_LIT>').split()<EOL>config_obj.remove_option(section, '<STR_LIT>')<EOL><DEDENT>label = sanitize_string_section_name(section)<EOL>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>ts_start = get_standardized_timestamp(config_obj.get(section, '<STR_LIT>'), None)<EOL>config_obj.remove_option(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>ts_end = get_standardized_timestamp(config_obj.get(section, '<STR_LIT>'), None)<EOL>config_obj.remove_option(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>precision = config_obj.get(section, '<STR_LIT>')<EOL>config_obj.remove_option(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>aggr_hosts = config_obj.get(section, '<STR_LIT>')<EOL>config_obj.remove_option(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>aggr_metrics = config_obj.get(section, '<STR_LIT>')<EOL>config_obj.remove_option(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>anomaly_detection_metrics = config_obj.get(section, '<STR_LIT>').split()<EOL>config_obj.remove_option(section, '<STR_LIT>')<EOL><DEDENT>rule_strings, other_options = get_rule_strings(config_obj, section)<EOL><DEDENT>except ConfigParser.NoOptionError:<EOL><INDENT>logger.exception(\"<STR_LIT>\" + section)<EOL>sys.exit()<EOL><DEDENT>return (hostname, infile, aggr_hosts, aggr_metrics, label, ts_start, ts_end, precision, aggr_metrics, other_options,<EOL>rule_strings, important_sub_metrics, anomaly_detection_metrics)<EOL>", "docstring": "Parse basic options from metric sections of the config\n:param config_obj: ConfigParser object\n:param section: Section name\n:return: all the parsed options", "id": "f7878:m9"}
{"signature": "def is_valid_url(url):", "body": "regex = re.compile(r'<STR_LIT>'<EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>', re.IGNORECASE)<EOL>if regex.match(url):<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Check if a given string is in the correct URL format or not\n\n:param str url:\n:return: True or False", "id": "f7878:m2"}
{"signature": "def extract_diff_sla_from_config_file(obj, options_file):", "body": "rule_strings = {}<EOL>config_obj = ConfigParser.ConfigParser()<EOL>config_obj.optionxform = str<EOL>config_obj.read(options_file)<EOL>for section in config_obj.sections():<EOL><INDENT>rule_strings, kwargs = get_rule_strings(config_obj, section)<EOL>for (key, val) in rule_strings.iteritems():<EOL><INDENT>set_sla(obj, section, key, val)<EOL><DEDENT><DEDENT>", "docstring": "Helper function to parse diff config file, which contains SLA rules for diff comparisons", "id": "f7878:m8"}
{"signature": "def is_valid_file(filename):", "body": "if os.path.exists(filename):<EOL><INDENT>if not os.path.getsize(filename):<EOL><INDENT>logger.warning('<STR_LIT>', filename)<EOL>return False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.warning('<STR_LIT>', filename)<EOL>return False<EOL><DEDENT>return True<EOL>", "docstring": "Check if the specifed file exists and is not empty\n\n:param filename: full path to the file that needs to be checked\n:return: Status, Message", "id": "f7878:m31"}
{"signature": "def initialize_aggregate_metric(section, aggr_hosts, aggr_metrics, metrics, outdir_default, resource_path, label, ts_start, ts_end, rule_strings,<EOL>important_sub_metrics, anomaly_detection_metrics, other_options):", "body": "metric = None<EOL>metric_type = section.split('<STR_LIT:->')[<NUM_LIT:0>]<EOL>metric = aggregate_metric_classes[metric_type](section, aggr_hosts, aggr_metrics, metrics, outdir_default, resource_path, label, ts_start, ts_end,<EOL>rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options)<EOL>return metric<EOL>", "docstring": "Initialize aggregate metric\n:param: section: config section name\n:param: aggr_hosts: list of hostnames to aggregate\n:param: aggr_metrics: list of metrics to aggregate\n:param: metrics: list of metric objects associated with the current naarad analysis\n:param: outdir_default: report location\n:param: resource_path: resource path for report\n:param: label: label for config section\n:param: ts_start: start time for analysis\n:param: ts_end: end time for analysis\n:param: rule_strings: list of slas\n:param: important_sub_metrics: list of important sub metrics\n:param: other_options: kwargs\n:return: metric object", "id": "f7878:m44"}
{"signature": "def initialize_metric(section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end, rule_strings,<EOL>important_sub_metrics, anomaly_detection_metrics, other_options):", "body": "metric = None<EOL>metric_type = section.split('<STR_LIT:->')[<NUM_LIT:0>]<EOL>if metric_type in metric_classes:<EOL><INDENT>if '<STR_LIT>' in metric_type:<EOL><INDENT>metric = metric_classes['<STR_LIT>'](section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end,<EOL>rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options)<EOL><DEDENT>else:<EOL><INDENT>metric = metric_classes[metric_type](section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end,<EOL>rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>metric = Metric(section, infile_list, hostname, aggr_metrics, output_directory, resource_path, label, ts_start, ts_end, rule_strings,<EOL>important_sub_metrics, anomaly_detection_metrics, **other_options)<EOL><DEDENT>return metric<EOL>", "docstring": "Initialize appropriate metric based on type of metric.\n:param: section: config section name or auto discovered metric type\n:param: infile_list: list of input log files for the metric\n:param: hostname: hostname associated with the logs origin\n:param: output_directory: report location\n:param: resource_path: resource path for report\n:param: label: label for config section or auto discovered metric type\n:param: ts_start: start time for analysis\n:param: ts_end: end time for analysis\n:param: rule_strings: list of slas\n:param: important_sub_metrics: list of important sub metrics\n:param: anomaly_detection_metrics: list of metrics to use for anomaly detection.\n:param: other_options: kwargs\n:return: metric object", "id": "f7878:m43"}
{"signature": "def get_standardized_timestamp(timestamp, ts_format):", "body": "if not timestamp:<EOL><INDENT>return None<EOL><DEDENT>if timestamp == '<STR_LIT>':<EOL><INDENT>timestamp = str(datetime.datetime.now())<EOL><DEDENT>if not ts_format:<EOL><INDENT>ts_format = detect_timestamp_format(timestamp)<EOL><DEDENT>try:<EOL><INDENT>if ts_format == '<STR_LIT>':<EOL><INDENT>logger.error('<STR_LIT>', timestamp)<EOL>return -<NUM_LIT:1><EOL><DEDENT>elif ts_format == '<STR_LIT>':<EOL><INDENT>ts = int(timestamp) * <NUM_LIT:1000><EOL><DEDENT>elif ts_format == '<STR_LIT>':<EOL><INDENT>ts = timestamp<EOL><DEDENT>elif ts_format == '<STR_LIT>':<EOL><INDENT>ts = int(timestamp[:<NUM_LIT:10>]) * <NUM_LIT:1000> + int(timestamp[<NUM_LIT:11>:])<EOL><DEDENT>elif ts_format in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>date_today = str(datetime.date.today())<EOL>dt_obj = datetime.datetime.strptime(date_today + '<STR_LIT:U+0020>' + timestamp, '<STR_LIT>' + ts_format)<EOL>ts = calendar.timegm(dt_obj.utctimetuple()) * <NUM_LIT:1000> + dt_obj.microsecond / <NUM_LIT:1000><EOL><DEDENT>else:<EOL><INDENT>dt_obj = datetime.datetime.strptime(timestamp, ts_format)<EOL>ts = calendar.timegm(dt_obj.utctimetuple()) * <NUM_LIT:1000> + dt_obj.microsecond / <NUM_LIT:1000><EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>return str(ts)<EOL>", "docstring": "Given a timestamp string, return a time stamp in the epoch ms format. If no date is present in\ntimestamp then today's date will be added as a prefix before conversion to epoch ms", "id": "f7878:m33"}
{"signature": "def parse_metric_section(config_obj, section, metric_classes, metrics, aggregate_metric_classes, outdir_default, resource_path):", "body": "(hostname, infile, aggr_hosts, aggr_metrics, label, ts_start, ts_end, precision, aggr_metrics, other_options,<EOL>rule_strings, important_sub_metrics, anomaly_detection_metrics) = parse_basic_metric_options(config_obj, section)<EOL>metric_type = section.split('<STR_LIT:->')[<NUM_LIT:0>]<EOL>if metric_type in aggregate_metric_classes:<EOL><INDENT>new_metric = initialize_aggregate_metric(section, aggr_hosts, aggr_metrics, metrics, outdir_default, resource_path, label, ts_start, ts_end, rule_strings,<EOL>important_sub_metrics, anomaly_detection_metrics, other_options)<EOL><DEDENT>else:<EOL><INDENT>new_metric = initialize_metric(section, infile, hostname, aggr_metrics, outdir_default, resource_path, label, ts_start, ts_end, rule_strings,<EOL>important_sub_metrics, anomaly_detection_metrics, other_options)<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT:ignore>') and config_obj.getint(section, '<STR_LIT:ignore>') == <NUM_LIT:1>:<EOL><INDENT>new_metric.ignore = True<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>new_metric.calc_metrics = config_obj.get(section, '<STR_LIT>')<EOL><DEDENT>new_metric.precision = precision<EOL>return new_metric<EOL>", "docstring": "Parse a metric section and create a Metric object\n:param config_obj: ConfigParser object\n:param section: Section name\n:param metric_classes: List of valid metric types\n:param metrics: List of all regular metric objects (used by aggregate metric)\n:param aggregate_metric_classes: List of all valid aggregate metric types\n:param outdir_default: Default output directory\n:param resource_path: Default resource directory\n:return: An initialized Metric object", "id": "f7878:m10"}
{"signature": "def is_valid_metric_name(metric_name):", "body": "reg = re.compile('<STR_LIT>')<EOL>if reg.match(metric_name) and not metric_name.startswith('<STR_LIT:.>'):<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "check the validity of metric_name in config; the metric_name will be used for creation of sub-dir, so only contains: alphabet, digits , '.', '-' and '_'\n:param str metric_name: metric_name\n:return: True if valid", "id": "f7878:m5"}
{"signature": "def detect_timestamp_format(timestamp):", "body": "time_formats = {<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>'),<EOL>'<STR_LIT>': re.compile(r'<STR_LIT>')<EOL>}<EOL>for time_format in time_formats:<EOL><INDENT>if re.match(time_formats[time_format], timestamp):<EOL><INDENT>return time_format<EOL><DEDENT><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Given an input timestamp string, determine what format is it likely in.\n\n:param string timestamp: the timestamp string for which we need to determine format\n:return: best guess timestamp format", "id": "f7878:m32"}
{"signature": "def parse_graph_section(config_obj, section, outdir_default, indir_default):", "body": "graph_timezone = None<EOL>graphing_library = CONSTANTS.DEFAULT_GRAPHING_LIBRARY<EOL>crossplots = []<EOL>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>graphing_library = config_obj.get(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>graphs_string = config_obj.get(section, '<STR_LIT>')<EOL>crossplots = graphs_string.split()<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>outdir_default = config_obj.get(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>outdir_default = config_obj.get(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>indir_default = config_obj.get(section, '<STR_LIT>')<EOL><DEDENT>if config_obj.has_option(section, '<STR_LIT>'):<EOL><INDENT>graph_timezone = config_obj.get(section, '<STR_LIT>')<EOL>if graph_timezone not in (\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>logger.warn('<STR_LIT>' + graph_timezone + '<STR_LIT>')<EOL>graph_timezone = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return graphing_library, crossplots, outdir_default, indir_default, graph_timezone<EOL>", "docstring": "Parse the GRAPH section of the config to extract useful values\n:param config_obj: ConfigParser object\n:param section: Section name\n:param outdir_default: Default output directory passed in args\n:param indir_default: Default input directory passed in args\n:return: List of options extracted from the GRAPH section", "id": "f7878:m13"}
{"signature": "def download_file(url):", "body": "try:<EOL><INDENT>(local_file, headers) = urllib.urlretrieve(url)<EOL><DEDENT>except:<EOL><INDENT>sys.exit(\"<STR_LIT>\" + url + \"<STR_LIT>\")<EOL><DEDENT>return local_file<EOL>", "docstring": "Download a file pointed to by url to a temp file on local disk\n\n:param str url:\n:return: local_file", "id": "f7878:m3"}
{"signature": "def download_url_list(url_list, outdir):", "body": "for url in url_list:<EOL><INDENT>download_url_single(url, outdir)<EOL><DEDENT>", "docstring": "Downloads list of http(s) urls to local files\n:param list url_list: list of URLs to download\n:param str outdir: Required. the local directory to put the downloadedfiles.\n  If this is not given, then the local file will be the original one, as given in url.\n:return None", "id": "f7880:m5"}
{"signature": "def download_url_single(inputs, outdir, outfile=None):", "body": "if not inputs or type(inputs) != str or not outdir or type(outdir) != str:<EOL><INDENT>logging.error(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>else:<EOL><INDENT>if not os.path.exists(outdir):<EOL><INDENT>os.makedirs(outdir)<EOL><DEDENT><DEDENT>output_file = handle_single_url(inputs, outdir, outfile)<EOL>return output_file<EOL>", "docstring": "Downloads a http(s) url to a local file\n:param str inputs:  the absolute url\n:param str outdir: Required. the local directory to put the downloadedfiles.\n:param str outfile: // Optional. If this is given, the downloaded url will be renated to outfile;\n  If this is not given, then the local file will be the original one, as given in url.\n:return: the local full path name of downloaded url", "id": "f7880:m3"}
{"signature": "def run(self):", "body": "cmd_args = shlex.split(self.run_cmd)<EOL>logger.info('<STR_LIT>', self.run_rank)<EOL>logger.info('<STR_LIT>' + str(cmd_args))<EOL>self.ts_start = time.strftime(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>self.process = subprocess.Popen(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=<NUM_LIT:1>)<EOL>if self.kill_after_seconds:<EOL><INDENT>self.timer = Timer(self.kill_after_seconds, self.kill)<EOL>self.timer.start()<EOL><DEDENT>for line in iter(self.process.stdout.readline, b'<STR_LIT>'):<EOL><INDENT>logger.info(line.strip())<EOL><DEDENT>self.process.communicate()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>logger.warning('<STR_LIT>')<EOL>self.kill()<EOL><DEDENT>if self.timer:<EOL><INDENT>self.timer.cancel()<EOL><DEDENT>self.ts_end = time.strftime(\"<STR_LIT>\")<EOL>logger.info('<STR_LIT>')<EOL>logger.info('<STR_LIT>' + self.ts_start + '<STR_LIT>' + self.ts_end)<EOL>", "docstring": "Run the command, infer time period to be used in metric analysis phase.\n:return: None", "id": "f7881:c0:m1"}
{"signature": "def __init__(self, run_type, run_cmd, call_type, run_order, run_rank, should_wait=True, kill_after_seconds=None):", "body": "self.run_type = run_type<EOL>self.run_cmd = run_cmd<EOL>self.call_type = call_type<EOL>self.run_order = run_order<EOL>self.run_rank = run_rank<EOL>self.should_wait = should_wait<EOL>self.kill_after_seconds = kill_after_seconds<EOL>self.timer = None<EOL>", "docstring": "Init method\n:param run_type: Type of run_step: \"workload\" only for now\n:param run_cmd: Details of command to be run. It could be a command or API call\n:param call_type: Kind of call -- local or remote\n:param run_order: When to run this w.r.t analysis. One of ('pre', 'in', 'post')\n:param run_rank: In what order to run this\n:param should_wait: Boolean whether naarad should wait for the run command to finish or not before moving on\n:param kill_after_seconds: Seconds for which the command should be run before being killed\n:return: None", "id": "f7882:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def html_to_text(cls, html):<DEDENT>", "body": "s = cls()<EOL>s.feed(html)<EOL>unescaped_data = s.unescape(s.get_data())<EOL>return escape_for_xml(unescaped_data, tags_to_keep=s.mathml_elements)<EOL>", "docstring": "Return stripped HTML, keeping only MathML.", "id": "f7888:c0:m7"}
{"signature": "def handle_charref(self, name):", "body": "self.fed.append('<STR_LIT>' % name)<EOL>", "docstring": "Return representation of numeric entities.", "id": "f7888:c0:m5"}
{"signature": "def handle_endtag(self, tag):", "body": "if tag in self.mathml_elements:<EOL><INDENT>self.fed.append(\"<STR_LIT>\".format(tag))<EOL><DEDENT>", "docstring": "Return representation of html end tag.", "id": "f7888:c0:m3"}
{"signature": "def handle_data(self, d):", "body": "self.fed.append(d)<EOL>", "docstring": "Return representation of pure text data.", "id": "f7888:c0:m1"}
{"signature": "def handle_entityref(self, name):", "body": "self.fed.append('<STR_LIT>' % name)<EOL>", "docstring": "Return representation of entities.", "id": "f7888:c0:m4"}
{"signature": "def get_records(self, url):", "body": "page = urllib2.urlopen(url)<EOL>pages = [BeautifulSoup(page)]<EOL>numpag = pages[<NUM_LIT:0>].body.findAll('<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>'})<EOL>if len(numpag) > <NUM_LIT:0>:<EOL><INDENT>if re.search('<STR_LIT>', numpag[<NUM_LIT:0>].string):<EOL><INDENT>for i in range(int(numpag[<NUM_LIT:0>].string)-<NUM_LIT:1>):<EOL><INDENT>page = urllib2.urlopen('<STR_LIT>' % (url, i+<NUM_LIT:2>))<EOL>pages.append(BeautifulSoup(page))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\" % (numpag[<NUM_LIT:0>].string))<EOL><DEDENT><DEDENT>impl = getDOMImplementation()<EOL>doc = impl.createDocument(None, \"<STR_LIT>\", None)<EOL>links = []<EOL>for page in pages:<EOL><INDENT>links += page.body.findAll('<STR_LIT:p>', attrs={'<STR_LIT:class>': '<STR_LIT:title>'})<EOL>links += page.body.findAll('<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT:title>'})<EOL><DEDENT>for link in links:<EOL><INDENT>record = self._get_record(link)<EOL>doc.firstChild.appendChild(record)<EOL><DEDENT>return doc.toprettyxml()<EOL>", "docstring": "Returns the records listed in the webpage given as\nparameter as a xml String.\n\n@param url: the url of the Journal, Book, Protocol or Reference work", "id": "f7889:c0:m2"}
{"signature": "def record_delete_subfield_from(rec, tag, subfield_position,<EOL>field_position_global=None,<EOL>field_position_local=None):", "body": "subfields = record_get_subfields(<EOL>rec, tag,<EOL>field_position_global=field_position_global,<EOL>field_position_local=field_position_local)<EOL>try:<EOL><INDENT>del subfields[subfield_position]<EOL><DEDENT>except IndexError:<EOL><INDENT>raise InvenioBibRecordFieldError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>{\"<STR_LIT>\": subfield_position,<EOL>\"<STR_LIT>\": str(field_position_local),<EOL>\"<STR_LIT>\": str(field_position_global),<EOL>\"<STR_LIT>\": tag})<EOL><DEDENT>if not subfields:<EOL><INDENT>if field_position_global is not None:<EOL><INDENT>for position, field in enumerate(rec[tag]):<EOL><INDENT>if field[<NUM_LIT:4>] == field_position_global:<EOL><INDENT>del rec[tag][position]<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>del rec[tag][field_position_local]<EOL><DEDENT>if not rec[tag]:<EOL><INDENT>del rec[tag]<EOL><DEDENT><DEDENT>", "docstring": "Delete subfield from position specified.\n\nSpecify the subfield by tag, field number and subfield position.", "id": "f7891:m17"}
{"signature": "def record_add_fields(rec, tag, fields, field_position_local=None,<EOL>field_position_global=None):", "body": "if field_position_local is None and field_position_global is None:<EOL><INDENT>for field in fields:<EOL><INDENT>record_add_field(<EOL>rec, tag, ind1=field[<NUM_LIT:1>],<EOL>ind2=field[<NUM_LIT:2>], subfields=field[<NUM_LIT:0>],<EOL>controlfield_value=field[<NUM_LIT:3>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>fields.reverse()<EOL>for field in fields:<EOL><INDENT>record_add_field(<EOL>rec, tag, ind1=field[<NUM_LIT:1>], ind2=field[<NUM_LIT:2>],<EOL>subfields=field[<NUM_LIT:0>], controlfield_value=field[<NUM_LIT:3>],<EOL>field_position_local=field_position_local,<EOL>field_position_global=field_position_global)<EOL><DEDENT><DEDENT>return field_position_local<EOL>", "docstring": "Add the fields into the record at the required position.\n\nThe position is specified by the tag and the field_position_local in the\nlist of fields.\n\n:param rec: a record structure\n:param tag: the tag of the fields to be moved\n:param field_position_local: the field_position_local to which the field\n                             will be inserted. If not specified, appends\n                             the fields to the tag.\n:param a: list of fields to be added\n:return: -1 if the operation failed, or the field_position_local if it was\n         successful", "id": "f7891:m11"}
{"signature": "def field_add_subfield(field, code, value):", "body": "field[<NUM_LIT:0>].append((code, value))<EOL>", "docstring": "Add a subfield to field 'field'.", "id": "f7891:m28"}
{"signature": "def _fields_sort_by_indicators(fields):", "body": "field_dict = {}<EOL>field_positions_global = []<EOL>for field in fields:<EOL><INDENT>field_dict.setdefault(field[<NUM_LIT:1>:<NUM_LIT:3>], []).append(field)<EOL>field_positions_global.append(field[<NUM_LIT:4>])<EOL><DEDENT>indicators = list(field_dict.keys())<EOL>indicators.sort()<EOL>field_list = []<EOL>for indicator in indicators:<EOL><INDENT>for field in field_dict[indicator]:<EOL><INDENT>field_list.append(field[:<NUM_LIT:4>] + (field_positions_global.pop(<NUM_LIT:0>),))<EOL><DEDENT><DEDENT>return field_list<EOL>", "docstring": "Sort a set of fields by their indicators.\n\n    Return a sorted list with correct global field positions.", "id": "f7891:m51"}
{"signature": "def record_get_field_instances(rec, tag=\"<STR_LIT>\", ind1=\"<STR_LIT:U+0020>\", ind2=\"<STR_LIT:U+0020>\"):", "body": "if not rec:<EOL><INDENT>return []<EOL><DEDENT>if not tag:<EOL><INDENT>return list(rec.items())<EOL><DEDENT>else:<EOL><INDENT>out = []<EOL>ind1, ind2 = _wash_indicators(ind1, ind2)<EOL>if '<STR_LIT:%>' in tag:<EOL><INDENT>for field_tag in rec:<EOL><INDENT>if _tag_matches_pattern(field_tag, tag):<EOL><INDENT>for possible_field_instance in rec[field_tag]:<EOL><INDENT>if (ind1 in ('<STR_LIT:%>', possible_field_instance[<NUM_LIT:1>]) and<EOL>ind2 in ('<STR_LIT:%>', possible_field_instance[<NUM_LIT:2>])):<EOL><INDENT>out.append(possible_field_instance)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for possible_field_instance in rec.get(tag, []):<EOL><INDENT>if (ind1 in ('<STR_LIT:%>', possible_field_instance[<NUM_LIT:1>]) and<EOL>ind2 in ('<STR_LIT:%>', possible_field_instance[<NUM_LIT:2>])):<EOL><INDENT>out.append(possible_field_instance)<EOL><DEDENT><DEDENT><DEDENT>return out<EOL><DEDENT>", "docstring": "Return the list of field instances for the specified tag and indications.\n\nReturn empty list if not found.\nIf tag is empty string, returns all fields\n\nParameters (tag, ind1, ind2) can contain wildcard %.\n\n:param rec: a record structure as returned by create_record()\n:param tag: a 3 characters long string\n:param ind1: a 1 character long string\n:param ind2: a 1 character long string\n:param code: a 1 character long string\n:return: a list of field tuples (Subfields, ind1, ind2, value,\n         field_position_global) where subfields is list of (code, value)", "id": "f7891:m6"}
{"signature": "def _correct_record(record):", "body": "errors = []<EOL>for tag in list(record.keys()):<EOL><INDENT>upper_bound = '<STR_LIT>'<EOL>n = len(tag)<EOL>if n > <NUM_LIT:3>:<EOL><INDENT>i = n - <NUM_LIT:3><EOL>while i > <NUM_LIT:0>:<EOL><INDENT>upper_bound = '<STR_LIT>' % ('<STR_LIT:0>', upper_bound)<EOL>i -= <NUM_LIT:1><EOL><DEDENT><DEDENT>if tag == '<STR_LIT:!>':<EOL><INDENT>errors.append((<NUM_LIT:1>, '<STR_LIT>' +<EOL>str([f[<NUM_LIT:4>] for f in record[tag]]) + '<STR_LIT:)>'))<EOL>record['<STR_LIT>'] = record.pop(tag)<EOL>tag = '<STR_LIT>'<EOL><DEDENT>elif not ('<STR_LIT>' <= tag <= upper_bound or<EOL>tag in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')):<EOL><INDENT>errors.append(<NUM_LIT:2>)<EOL>record['<STR_LIT>'] = record.pop(tag)<EOL>tag = '<STR_LIT>'<EOL><DEDENT>fields = []<EOL>for field in record[tag]:<EOL><INDENT>if field[<NUM_LIT:0>] == [] and field[<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>errors.append((<NUM_LIT:8>, '<STR_LIT>' + str(field[<NUM_LIT:4>]) + '<STR_LIT:)>'))<EOL><DEDENT>subfields = []<EOL>for subfield in field[<NUM_LIT:0>]:<EOL><INDENT>if subfield[<NUM_LIT:0>] == '<STR_LIT:!>':<EOL><INDENT>errors.append((<NUM_LIT:3>, '<STR_LIT>' + str(field[<NUM_LIT:4>]) + '<STR_LIT:)>'))<EOL>newsub = ('<STR_LIT>', subfield[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>newsub = subfield<EOL><DEDENT>subfields.append(newsub)<EOL><DEDENT>if field[<NUM_LIT:1>] == '<STR_LIT:!>':<EOL><INDENT>errors.append((<NUM_LIT:4>, '<STR_LIT>' + str(field[<NUM_LIT:4>]) + '<STR_LIT:)>'))<EOL>ind1 = \"<STR_LIT:U+0020>\"<EOL><DEDENT>else:<EOL><INDENT>ind1 = field[<NUM_LIT:1>]<EOL><DEDENT>if field[<NUM_LIT:2>] == '<STR_LIT:!>':<EOL><INDENT>errors.append((<NUM_LIT:5>, '<STR_LIT>' + str(field[<NUM_LIT:4>]) + '<STR_LIT:)>'))<EOL>ind2 = \"<STR_LIT:U+0020>\"<EOL><DEDENT>else:<EOL><INDENT>ind2 = field[<NUM_LIT:2>]<EOL><DEDENT>fields.append((subfields, ind1, ind2, field[<NUM_LIT:3>], field[<NUM_LIT:4>]))<EOL><DEDENT>record[tag] = fields<EOL><DEDENT>return errors<EOL>", "docstring": "Check and correct the structure of the record.\n\n:param record: the record data structure\n:return: a list of errors found", "id": "f7891:m60"}
{"signature": "def _validate_record_field_positions_global(record):", "body": "all_fields = []<EOL>for tag, fields in list(record.items()):<EOL><INDENT>previous_field_position_global = -<NUM_LIT:1><EOL>for field in fields:<EOL><INDENT>if field[<NUM_LIT:4>] < previous_field_position_global:<EOL><INDENT>return (\"<STR_LIT>\" %<EOL>tag)<EOL><DEDENT>previous_field_position_global = field[<NUM_LIT:4>]<EOL>if field[<NUM_LIT:4>] in all_fields:<EOL><INDENT>return (\"<STR_LIT>\" %<EOL>(field[<NUM_LIT:4>], tag))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Check if the global field positions in the record are valid.\n\nI.e., no duplicate global field positions and local field positions in the\nlist of fields are ascending.\n\n:param record: the record data structure\n:return: the first error found as a string or None if no error was found", "id": "f7891:m49"}
{"signature": "def record_modify_subfield(rec, tag, subfield_code, value, subfield_position,<EOL>field_position_global=None,<EOL>field_position_local=None):", "body": "subfields = record_get_subfields(<EOL>rec, tag,<EOL>field_position_global=field_position_global,<EOL>field_position_local=field_position_local)<EOL>try:<EOL><INDENT>subfields[subfield_position] = (subfield_code, value)<EOL><DEDENT>except IndexError:<EOL><INDENT>raise InvenioBibRecordFieldError(<EOL>\"<STR_LIT>\" % subfield_position)<EOL><DEDENT>", "docstring": "Modify subfield at specified position.\n\n    Specify the subfield by tag, field number and subfield position.", "id": "f7891:m20"}
{"signature": "def parse(self, path_to_xml=None):", "body": "if not path_to_xml:<EOL><INDENT>if not self.path:<EOL><INDENT>self.logger.error(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>path_to_xml = self.path<EOL><DEDENT>root = self._clean_xml(path_to_xml)<EOL>if root.tag.lower() == '<STR_LIT>':<EOL><INDENT>tree = ET.ElementTree(root)<EOL>self.records = element_tree_collection_to_records(tree)<EOL><DEDENT>elif root.tag.lower() == '<STR_LIT>':<EOL><INDENT>new_root = ET.Element('<STR_LIT>')<EOL>new_root.append(root)<EOL>tree = ET.ElementTree(new_root)<EOL>self.records = element_tree_collection_to_records(tree)<EOL><DEDENT>else:<EOL><INDENT>header_subs = get_request_subfields(root)<EOL>records = root.find('<STR_LIT>')<EOL>if records is None:<EOL><INDENT>records = root.find('<STR_LIT>')<EOL><DEDENT>if records is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>tree = ET.ElementTree(records)<EOL>for record, is_deleted in element_tree_oai_records(tree, header_subs):<EOL><INDENT>if is_deleted:<EOL><INDENT>self.deleted_records.append(<EOL>self.create_deleted_record(record)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>self.records.append(record)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Parse an XML document and clean any namespaces.", "id": "f7891:c2:m1"}
{"signature": "def _shift_field_positions_global(record, start, delta=<NUM_LIT:1>):", "body": "if not delta:<EOL><INDENT>return<EOL><DEDENT>for tag, fields in list(record.items()):<EOL><INDENT>newfields = []<EOL>for field in fields:<EOL><INDENT>if field[<NUM_LIT:4>] < start:<EOL><INDENT>newfields.append(field)<EOL><DEDENT>else:<EOL><INDENT>newfields.append(tuple(list(field[:<NUM_LIT:4>]) + [field[<NUM_LIT:4>] + delta]))<EOL><DEDENT><DEDENT>record[tag] = newfields<EOL><DEDENT>", "docstring": "Shift all global field positions.\n\nShift all global field positions with global field positions\nhigher or equal to 'start' from the value 'delta'.", "id": "f7891:m47"}
{"signature": "def record_match_subfields(rec, tag, ind1=\"<STR_LIT:U+0020>\", ind2=\"<STR_LIT:U+0020>\", sub_key=None,<EOL>sub_value='<STR_LIT>', sub_key2=None, sub_value2='<STR_LIT>',<EOL>case_sensitive=True):", "body": "if sub_key is None:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if sub_key2 is not None and sub_value2 is '<STR_LIT>':<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\")<EOL><DEDENT>ind1, ind2 = _wash_indicators(ind1, ind2)<EOL>if not case_sensitive:<EOL><INDENT>sub_value = sub_value.lower()<EOL>sub_value2 = sub_value2.lower()<EOL><DEDENT>for field in record_get_field_instances(rec, tag, ind1, ind2):<EOL><INDENT>subfields = dict(field_get_subfield_instances(field))<EOL>if not case_sensitive:<EOL><INDENT>for k, v in subfields.items():<EOL><INDENT>subfields[k] = v.lower()<EOL><DEDENT><DEDENT>if sub_key in subfields:<EOL><INDENT>if sub_value is '<STR_LIT>':<EOL><INDENT>return field[<NUM_LIT:4>]<EOL><DEDENT>else:<EOL><INDENT>if sub_value == subfields[sub_key]:<EOL><INDENT>if sub_key2 is None:<EOL><INDENT>return field[<NUM_LIT:4>]<EOL><DEDENT>else:<EOL><INDENT>if sub_key2 in subfields:<EOL><INDENT>if sub_value2 == subfields[sub_key2]:<EOL><INDENT>return field[<NUM_LIT:4>]<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Find subfield instances in a particular field.\n\nIt tests values in 1 of 3 possible ways:\n - Does a subfield code exist? (ie does 773__a exist?)\n - Does a subfield have a particular value? (ie 773__a == 'PhysX')\n - Do a pair of subfields have particular values?\n    (ie 035__2 == 'CDS' and 035__a == '123456')\n\nParameters:\n * rec - dictionary: a bibrecord structure\n * tag - string: the tag of the field (ie '773')\n * ind1, ind2 - char: a single characters for the MARC indicators\n * sub_key - char: subfield key to find\n * sub_value - string: subfield value of that key\n * sub_key2 - char: key of subfield to compare against\n * sub_value2 - string: expected value of second subfield\n * case_sensitive - bool: be case sensitive when matching values\n\n:return: false if no match found, else provides the field position (int)", "id": "f7891:m36"}
{"signature": "def filter_field_instances(field_instances, filter_subcode, filter_value,<EOL>filter_mode='<STR_LIT:e>'):", "body": "matched = []<EOL>if filter_mode == '<STR_LIT:e>':<EOL><INDENT>to_match = (filter_subcode, filter_value)<EOL>for instance in field_instances:<EOL><INDENT>if to_match in instance[<NUM_LIT:0>]:<EOL><INDENT>matched.append(instance)<EOL><DEDENT><DEDENT><DEDENT>elif filter_mode == '<STR_LIT:s>':<EOL><INDENT>for instance in field_instances:<EOL><INDENT>for subfield in instance[<NUM_LIT:0>]:<EOL><INDENT>if subfield[<NUM_LIT:0>] == filter_subcode andsubfield[<NUM_LIT:1>].find(filter_value) > -<NUM_LIT:1>:<EOL><INDENT>matched.append(instance)<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif filter_mode == '<STR_LIT:r>':<EOL><INDENT>reg_exp = re.compile(filter_value)<EOL>for instance in field_instances:<EOL><INDENT>for subfield in instance[<NUM_LIT:0>]:<EOL><INDENT>if subfield[<NUM_LIT:0>] == filter_subcode andreg_exp.match(subfield[<NUM_LIT:1>]) is not None:<EOL><INDENT>matched.append(instance)<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return matched<EOL>", "docstring": "Filter the given field.\n\n    Filters given field and returns only that field instances that contain\n    filter_subcode with given filter_value. As an input for search function\n    accepts output from record_get_field_instances function. Function can be\n    run in three modes:\n\n    - 'e' - looking for exact match in subfield value\n    - 's' - looking for substring in subfield value\n    - 'r' - looking for regular expression in subfield value\n\n    Example:\n\n    record_filter_field(record_get_field_instances(rec, '999', '%', '%'),\n                        'y', '2001')\n\n    In this case filter_subcode is 'y' and filter_value is '2001'.\n\n    :param field_instances: output from record_get_field_instances\n    :param filter_subcode: name of the subfield\n    :type filter_subcode: string\n    :param filter_value: value of the subfield\n    :type filter_value: string\n    :param filter_mode: 'e','s' or 'r'", "id": "f7891:m3"}
{"signature": "def record_has_field(rec, tag):", "body": "return tag in rec<EOL>", "docstring": "Check if the tag exists in the record.\n\n:param rec: the record data structure\n:param the: field\n:return: a boolean", "id": "f7891:m8"}
{"signature": "def _concat(alist):", "body": "return [element for single_list in alist for element in single_list]<EOL>", "docstring": "Concatenate a list of lists.", "id": "f7891:m53"}
{"signature": "def record_move_subfield(rec, tag, subfield_position, new_subfield_position,<EOL>field_position_global=None,<EOL>field_position_local=None):", "body": "subfields = record_get_subfields(<EOL>rec,<EOL>tag,<EOL>field_position_global=field_position_global,<EOL>field_position_local=field_position_local)<EOL>try:<EOL><INDENT>subfield = subfields.pop(subfield_position)<EOL>subfields.insert(new_subfield_position, subfield)<EOL><DEDENT>except IndexError:<EOL><INDENT>raise InvenioBibRecordFieldError(<EOL>\"<STR_LIT>\" % subfield_position)<EOL><DEDENT>", "docstring": "Move subfield at specified position.\n\n    Sspecify the subfield by tag, field number and subfield position to new\n    subfield position.", "id": "f7891:m21"}
{"signature": "def record_move_fields(rec, tag, field_positions_local,<EOL>field_position_local=None):", "body": "fields = record_delete_fields(<EOL>rec, tag,<EOL>field_positions_local=field_positions_local)<EOL>return record_add_fields(<EOL>rec, tag, fields,<EOL>field_position_local=field_position_local)<EOL>", "docstring": "Move some fields to the position specified by 'field_position_local'.\n\n:param rec: a record structure as returned by create_record()\n:param tag: the tag of the fields to be moved\n:param field_positions_local: the positions of the fields to move\n:param field_position_local: insert the field before that\n                             field_position_local. If unspecified, appends\n                             the fields :return: the field_position_local\n                             is the operation was successful", "id": "f7891:m12"}
{"signature": "def _record_sort_by_indicators(record):", "body": "for tag, fields in list(record.items()):<EOL><INDENT>record[tag] = _fields_sort_by_indicators(fields)<EOL><DEDENT>", "docstring": "Sort the fields inside the record by indicators.", "id": "f7891:m50"}
{"signature": "def record_strip_controlfields(rec):", "body": "for tag in list(rec.keys()):<EOL><INDENT>if tag[:<NUM_LIT:2>] == '<STR_LIT>' and rec[tag][<NUM_LIT:0>][<NUM_LIT:3>]:<EOL><INDENT>del rec[tag]<EOL><DEDENT><DEDENT>", "docstring": "Remove all non-empty controlfields from the record.\n\n:param rec:  A record dictionary structure\n:type  rec:  dictionary", "id": "f7891:m40"}
{"signature": "def field_xml_output(field, tag):", "body": "marcxml = []<EOL>if field[<NUM_LIT:3>]:<EOL><INDENT>marcxml.append('<STR_LIT>' %<EOL>(tag, MathMLParser.html_to_text(field[<NUM_LIT:3>])))<EOL><DEDENT>else:<EOL><INDENT>marcxml.append('<STR_LIT>' %<EOL>(tag, field[<NUM_LIT:1>], field[<NUM_LIT:2>]))<EOL>marcxml += [_subfield_xml_output(subfield) for subfield in field[<NUM_LIT:0>]]<EOL>marcxml.append('<STR_LIT>')<EOL><DEDENT>return '<STR_LIT:\\n>'.join(marcxml)<EOL>", "docstring": "Generate the XML for field 'field' and returns it as a string.", "id": "f7891:m30"}
{"signature": "def _warning(code):", "body": "if isinstance(code, str):<EOL><INDENT>return code<EOL><DEDENT>message = '<STR_LIT>'<EOL>if isinstance(code, tuple):<EOL><INDENT>if isinstance(code[<NUM_LIT:0>], str):<EOL><INDENT>message = code[<NUM_LIT:1>]<EOL>code = code[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return CFG_BIBRECORD_WARNING_MSGS.get(code, '<STR_LIT>') + message<EOL>", "docstring": "Return a warning message of code 'code'.\n\nIf code = (cd, str) it returns the warning message of code 'cd' and appends\nstr at the end", "id": "f7891:m61"}
{"signature": "def get_records(self):", "body": "return self.records<EOL>", "docstring": "Return all records found.", "id": "f7891:c2:m4"}
{"signature": "def create_field(subfields=None, ind1='<STR_LIT:U+0020>', ind2='<STR_LIT:U+0020>', controlfield_value='<STR_LIT>',<EOL>global_position=-<NUM_LIT:1>):", "body": "if subfields is None:<EOL><INDENT>subfields = []<EOL><DEDENT>ind1, ind2 = _wash_indicators(ind1, ind2)<EOL>field = (subfields, ind1, ind2, controlfield_value, global_position)<EOL>_check_field_validity(field)<EOL>return field<EOL>", "docstring": "Return a field created with the provided elements.\n\nGlobal position is set arbitrary to -1.", "id": "f7891:m0"}
{"signature": "def _order_by_tags(field1, field2):", "body": "return cmp(field1[<NUM_LIT:0>], field2[<NUM_LIT:0>])<EOL>", "docstring": "Function used to order the fields according to the tags.", "id": "f7891:m56"}
{"signature": "def field_get_subfields(field):", "body": "pairs = {}<EOL>for key, value in field[<NUM_LIT:0>]:<EOL><INDENT>if key in pairs and pairs[key] != value:<EOL><INDENT>pairs[key].append(value)<EOL><DEDENT>else:<EOL><INDENT>pairs[key] = [value]<EOL><DEDENT><DEDENT>return pairs<EOL>", "docstring": "Given a field, will place all subfields into a dictionary\n    Parameters:\n     * field - tuple: The field to get subfields for\n    Returns: a dictionary, codes as keys and a list of values as the value", "id": "f7891:m43"}
{"signature": "def record_drop_duplicate_fields(record):", "body": "out = {}<EOL>position = <NUM_LIT:0><EOL>tags = sorted(record.keys())<EOL>for tag in tags:<EOL><INDENT>fields = record[tag]<EOL>out[tag] = []<EOL>current_fields = set()<EOL>for full_field in fields:<EOL><INDENT>field = (tuple(full_field[<NUM_LIT:0>]),) + full_field[<NUM_LIT:1>:<NUM_LIT:4>]<EOL>if field not in current_fields:<EOL><INDENT>current_fields.add(field)<EOL>position += <NUM_LIT:1><EOL>out[tag].append(full_field[:<NUM_LIT:4>] + (position,))<EOL><DEDENT><DEDENT><DEDENT>return out<EOL>", "docstring": "Return a record where all the duplicate fields have been removed.\n\nFields are considered identical considering also the order of their\nsubfields.", "id": "f7891:m4"}
{"signature": "def _create_record_lxml(marcxml,<EOL>verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,<EOL>correct=CFG_BIBRECORD_DEFAULT_CORRECT,<EOL>keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):", "body": "parser = etree.XMLParser(dtd_validation=correct,<EOL>recover=(verbose <= <NUM_LIT:3>))<EOL>if correct:<EOL><INDENT>marcxml = '<STR_LIT>''<STR_LIT>' % (marcxml,)<EOL><DEDENT>try:<EOL><INDENT>tree = etree.parse(StringIO(marcxml), parser)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise InvenioBibRecordParserError(str(e))<EOL><DEDENT>record = {}<EOL>field_position_global = <NUM_LIT:0><EOL>controlfield_iterator = tree.iter(tag='<STR_LIT>')<EOL>for controlfield in controlfield_iterator:<EOL><INDENT>tag = controlfield.attrib.get('<STR_LIT>', '<STR_LIT:!>').encode(\"<STR_LIT>\")<EOL>ind1 = '<STR_LIT:U+0020>'<EOL>ind2 = '<STR_LIT:U+0020>'<EOL>text = controlfield.text<EOL>if text is None:<EOL><INDENT>text = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>text = text.encode(\"<STR_LIT>\")<EOL><DEDENT>subfields = []<EOL>if text or keep_singletons:<EOL><INDENT>field_position_global += <NUM_LIT:1><EOL>record.setdefault(tag, []).append((subfields, ind1, ind2, text,<EOL>field_position_global))<EOL><DEDENT><DEDENT>datafield_iterator = tree.iter(tag='<STR_LIT>')<EOL>for datafield in datafield_iterator:<EOL><INDENT>tag = datafield.attrib.get('<STR_LIT>', '<STR_LIT:!>').encode(\"<STR_LIT>\")<EOL>ind1 = datafield.attrib.get('<STR_LIT>', '<STR_LIT:!>').encode(\"<STR_LIT>\")<EOL>ind2 = datafield.attrib.get('<STR_LIT>', '<STR_LIT:!>').encode(\"<STR_LIT>\")<EOL>if ind1 in ('<STR_LIT>', '<STR_LIT:_>'):<EOL><INDENT>ind1 = '<STR_LIT:U+0020>'<EOL><DEDENT>if ind2 in ('<STR_LIT>', '<STR_LIT:_>'):<EOL><INDENT>ind2 = '<STR_LIT:U+0020>'<EOL><DEDENT>subfields = []<EOL>subfield_iterator = datafield.iter(tag='<STR_LIT>')<EOL>for subfield in subfield_iterator:<EOL><INDENT>code = subfield.attrib.get('<STR_LIT:code>', '<STR_LIT:!>').encode(\"<STR_LIT>\")<EOL>text = subfield.text<EOL>if text is None:<EOL><INDENT>text = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>text = text.encode(\"<STR_LIT>\")<EOL><DEDENT>if text or keep_singletons:<EOL><INDENT>subfields.append((code, text))<EOL><DEDENT><DEDENT>if subfields or keep_singletons:<EOL><INDENT>text = '<STR_LIT>'<EOL>field_position_global += <NUM_LIT:1><EOL>record.setdefault(tag, []).append((subfields, ind1, ind2, text,<EOL>field_position_global))<EOL><DEDENT><DEDENT>return record<EOL>", "docstring": "Create a record object using the LXML parser.\n\nIf correct == 1, then perform DTD validation\nIf correct == 0, then do not perform DTD validation\n\nIf verbose == 0, the parser will not give warnings.\nIf 1 <= verbose <= 3, the parser will not give errors, but will warn\n    the user about possible mistakes (implement me!)\nIf verbose > 3 then the parser will be strict and will stop in case of\n    well-formedness errors or DTD errors.", "id": "f7891:m52"}
{"signature": "def record_empty(rec):", "body": "for key in rec.keys():<EOL><INDENT>if key not in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "TODO.", "id": "f7891:m42"}
{"signature": "def _get_children_as_string(node):", "body": "out = []<EOL>if node:<EOL><INDENT>for child in node:<EOL><INDENT>if child.nodeType == child.TEXT_NODE:<EOL><INDENT>out.append(child.data)<EOL><DEDENT>else:<EOL><INDENT>out.append(_get_children_as_string(child.childNodes))<EOL><DEDENT><DEDENT><DEDENT>return '<STR_LIT>'.join(out)<EOL>", "docstring": "Iterate through all the children of a node.\n\n    Returns one string containing the values from all the text-nodes\n    recursively.", "id": "f7891:m58"}
{"signature": "def record_delete_fields(rec, tag, field_positions_local=None):", "body": "if tag not in rec:<EOL><INDENT>return []<EOL><DEDENT>new_fields, deleted_fields = [], []<EOL>for position, field in enumerate(rec.get(tag, [])):<EOL><INDENT>if field_positions_local is None or position in field_positions_local:<EOL><INDENT>deleted_fields.append(field)<EOL><DEDENT>else:<EOL><INDENT>new_fields.append(field)<EOL><DEDENT><DEDENT>if new_fields:<EOL><INDENT>rec[tag] = new_fields<EOL><DEDENT>else:<EOL><INDENT>del rec[tag]<EOL><DEDENT>return deleted_fields<EOL>", "docstring": "Delete all/some fields defined with MARC tag 'tag' from record 'rec'.\n\n:param rec: a record structure.\n:type rec: tuple\n:param tag: three letter field.\n:type tag: string\n:param field_position_local: if set, it is the list of local positions\n    within all the fields with the specified tag, that should be deleted.\n    If not set all the fields with the specified tag will be deleted.\n:type field_position_local: sequence\n:return: the list of deleted fields.\n:rtype: list\n:note: the record is modified in place.", "id": "f7891:m10"}
{"signature": "def concat(alist):", "body": "newl = []<EOL>for l in alist:<EOL><INDENT>newl.extend(l)<EOL><DEDENT>return newl<EOL>", "docstring": "Concatenate a list of lists.", "id": "f7891:m34"}
{"signature": "def field_get_subfield_values(field_instance, code):", "body": "return [subfield_value<EOL>for subfield_code, subfield_value in field_instance[<NUM_LIT:0>]<EOL>if subfield_code == code]<EOL>", "docstring": "Return subfield CODE values of the field instance FIELD.", "id": "f7891:m26"}
{"signature": "def print_rec(rec, format=<NUM_LIT:1>, tags=None):", "body": "if tags is None:<EOL><INDENT>tags = []<EOL><DEDENT>if format == <NUM_LIT:1>:<EOL><INDENT>text = record_xml_output(rec, tags)<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return text<EOL>", "docstring": "Print a record.\n\n:param format: 1 XML, 2 HTML (not implemented)\n:param tags: list of tags to be printed", "id": "f7891:m32"}
{"signature": "def _compare_fields(field1, field2, strict=True):", "body": "if strict:<EOL><INDENT>return field1[:<NUM_LIT:4>] == field2[:<NUM_LIT:4>]<EOL><DEDENT>else:<EOL><INDENT>if field1[<NUM_LIT:1>:<NUM_LIT:4>] != field2[<NUM_LIT:1>:<NUM_LIT:4>]:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return set(field1[<NUM_LIT:0>]) == set(field2[<NUM_LIT:0>])<EOL><DEDENT><DEDENT>", "docstring": "Compare 2 fields.\n\nIf strict is True, then the order of the subfield will be taken care of, if\nnot then the order of the subfields doesn't matter.\n\n:return: True if the field are equivalent, False otherwise.", "id": "f7891:m45"}
{"signature": "def record_get_field(rec, tag, field_position_global=None,<EOL>field_position_local=None):", "body": "if field_position_global is None and field_position_local is None:<EOL><INDENT>raise InvenioBibRecordFieldError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>elif field_position_global is not None andfield_position_local is not None:<EOL><INDENT>raise InvenioBibRecordFieldError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>elif field_position_global:<EOL><INDENT>if tag not in rec:<EOL><INDENT>raise InvenioBibRecordFieldError(\"<STR_LIT>\" % tag)<EOL><DEDENT>for field in rec[tag]:<EOL><INDENT>if field[<NUM_LIT:4>] == field_position_global:<EOL><INDENT>return field<EOL><DEDENT><DEDENT>raise InvenioBibRecordFieldError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (tag, field_position_global))<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>return rec[tag][field_position_local]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise InvenioBibRecordFieldError(\"<STR_LIT>\" % tag)<EOL><DEDENT>except IndexError:<EOL><INDENT>raise InvenioBibRecordFieldError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (tag, field_position_local))<EOL><DEDENT><DEDENT>", "docstring": "Return the the matching field.\n\nOne has to enter either a global field position or a local field position.\n\n:return: a list of subfield tuples (subfield code, value).\n:rtype: list", "id": "f7891:m14"}
{"signature": "def _warnings(alist):", "body": "return [_warning(element) for element in alist]<EOL>", "docstring": "Apply the function _warning() to every element in alist.", "id": "f7891:m62"}
{"signature": "def record_delete_subfield(rec, tag, subfield_code, ind1='<STR_LIT:U+0020>', ind2='<STR_LIT:U+0020>'):", "body": "ind1, ind2 = _wash_indicators(ind1, ind2)<EOL>for field in rec.get(tag, []):<EOL><INDENT>if field[<NUM_LIT:1>] == ind1 and field[<NUM_LIT:2>] == ind2:<EOL><INDENT>field[<NUM_LIT:0>][:] = [subfield for subfield in field[<NUM_LIT:0>]<EOL>if subfield_code != subfield[<NUM_LIT:0>]]<EOL><DEDENT><DEDENT>", "docstring": "Delete all subfields with subfield_code in the record.", "id": "f7891:m13"}
{"signature": "def record_modify_controlfield(rec, tag, controlfield_value,<EOL>field_position_global=None,<EOL>field_position_local=None):", "body": "field = record_get_field(<EOL>rec, tag,<EOL>field_position_global=field_position_global,<EOL>field_position_local=field_position_local)<EOL>new_field = (field[<NUM_LIT:0>], field[<NUM_LIT:1>], field[<NUM_LIT:2>], controlfield_value, field[<NUM_LIT:4>])<EOL>record_replace_field(<EOL>rec, tag, new_field,<EOL>field_position_global=field_position_global,<EOL>field_position_local=field_position_local)<EOL>", "docstring": "Modify controlfield at position specified by tag and field number.", "id": "f7891:m19"}
{"signature": "def _tag_matches_pattern(tag, pattern):", "body": "for char1, char2 in zip(tag, pattern):<EOL><INDENT>if char2 not in ('<STR_LIT:%>', char1):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Return true if MARC 'tag' matches a 'pattern'.\n\n    'pattern' is plain text, with % as wildcard\n\n    Both parameters must be 3 characters long strings.\n\n    .. doctest::\n\n        >>> _tag_matches_pattern(\"909\", \"909\")\n        True\n        >>> _tag_matches_pattern(\"909\", \"9%9\")\n        True\n        >>> _tag_matches_pattern(\"909\", \"9%8\")\n        False\n\n    :param tag: a 3 characters long string\n    :param pattern: a 3 characters long string\n    :return: False or True", "id": "f7891:m48"}
{"signature": "def create_deleted_record(self, record):", "body": "identifier = record_get_field_value(record,<EOL>tag=\"<STR_LIT>\",<EOL>code=\"<STR_LIT:a>\")<EOL>recid = identifier.split(\"<STR_LIT::>\")[-<NUM_LIT:1>]<EOL>try:<EOL><INDENT>source = identifier.split(\"<STR_LIT::>\")[<NUM_LIT:1>]<EOL><DEDENT>except IndexError:<EOL><INDENT>source = \"<STR_LIT>\"<EOL><DEDENT>record_add_field(record, \"<STR_LIT>\",<EOL>subfields=[(\"<STR_LIT>\", source), (\"<STR_LIT:a>\", recid)])<EOL>record_add_field(record, \"<STR_LIT>\",<EOL>subfields=[(\"<STR_LIT:c>\", \"<STR_LIT>\")])<EOL>return record<EOL>", "docstring": "Generate the record deletion if deleted form OAI-PMH.", "id": "f7891:c2:m3"}
{"signature": "def get_record_rich(self, filename, ref_extract_callback=None):", "body": "self.document = parse(filename)<EOL>rec = create_record()<EOL>articles = self.document.getElementsByTagName('<STR_LIT>')<EOL>for article in articles:<EOL><INDENT>article_type = article.getAttribute('<STR_LIT>')<EOL>if not article_type == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>doi = get_value_in_tag(self.document, '<STR_LIT>')<EOL>date = '<STR_LIT>'<EOL>for tag in self.document.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>year = get_value_in_tag(tag, '<STR_LIT>')<EOL>month = get_value_in_tag(tag, '<STR_LIT>').zfill(<NUM_LIT:2>)<EOL>day = get_value_in_tag(tag, '<STR_LIT>').zfill(<NUM_LIT:2>)<EOL>date = \"<STR_LIT>\" % (year, month, day)<EOL><DEDENT>if not date:<EOL><INDENT>for tag in self.document.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>year = get_value_in_tag(tag, '<STR_LIT>')<EOL>month = get_value_in_tag(tag, '<STR_LIT>').zfill(<NUM_LIT:2>)<EOL>day = get_value_in_tag(tag, '<STR_LIT>').zfill(<NUM_LIT:2>)<EOL>date = \"<STR_LIT>\" % (year, month, day)<EOL><DEDENT><DEDENT>first_page = get_value_in_tag(article, '<STR_LIT>')<EOL>last_page = get_value_in_tag(article, '<STR_LIT>')<EOL>subjects = article.getElementsByTagName('<STR_LIT>')<EOL>subjects = map(xml_to_text, subjects)<EOL>subject = '<STR_LIT:U+002CU+0020>'.join(subjects)<EOL>copyright_statement = get_value_in_tag(article, '<STR_LIT>')<EOL><DEDENT>journal = get_value_in_tag(self.document, '<STR_LIT>')<EOL>journal, volume = fix_journal_name(journal, self.journal_mappings)<EOL>issues = self.document.getElementsByTagName('<STR_LIT>')<EOL>for issue in issues:<EOL><INDENT>volume += get_value_in_tag(issue, '<STR_LIT>')<EOL>year = get_value_in_tag(issue, '<STR_LIT>')<EOL><DEDENT>title = get_value_in_tag(self.document, '<STR_LIT>')<EOL>authors = self.document.getElementsByTagName('<STR_LIT>')<EOL>affiliations = self.document.getElementsByTagName('<STR_LIT>')<EOL>def affiliation_pair(a):<EOL><INDENT>return a.getAttribute('<STR_LIT>'), get_value_in_tag(<EOL>a, '<STR_LIT>'<EOL>)<EOL><DEDENT>affiliations = map(affiliation_pair, affiliations)<EOL>affiliations = dict(affiliations)<EOL>def author_pair(a):<EOL><INDENT>surname = get_value_in_tag(a, '<STR_LIT>')<EOL>first_name = get_value_in_tag(a, '<STR_LIT>')<EOL>middle_name = get_value_in_tag(a, '<STR_LIT>')<EOL>if middle_name:<EOL><INDENT>name = '<STR_LIT>' % (surname, first_name, middle_name)<EOL><DEDENT>else:<EOL><INDENT>name = '<STR_LIT>' % (surname, first_name)<EOL><DEDENT>try:<EOL><INDENT>affid = a.getElementsByTagName(<EOL>'<STR_LIT>'<EOL>)[<NUM_LIT:0>].getAttribute('<STR_LIT>')<EOL>affiliation = affiliations[affid]<EOL><DEDENT>except IndexError:<EOL><INDENT>affiliation = '<STR_LIT>'<EOL><DEDENT>except KeyError:<EOL><INDENT>affiliation = '<STR_LIT>'<EOL><DEDENT>return name, affiliation<EOL><DEDENT>authors = map(author_pair, authors)<EOL>abstract = get_value_in_tag(self.document, '<STR_LIT>')<EOL>references = self.document.getElementsByTagName('<STR_LIT>')<EOL>for reference in references:<EOL><INDENT>subfields = []<EOL>label = reference.getAttribute('<STR_LIT:N>')<EOL>if label:<EOL><INDENT>subfields.append(('<STR_LIT:o>', label))<EOL><DEDENT>bibliosets = reference.getElementsByTagName('<STR_LIT>')<EOL>for tag in bibliosets:<EOL><INDENT>ref_year = get_value_in_tag(tag, '<STR_LIT>')<EOL>ref_journal = get_value_in_tag(tag, '<STR_LIT>')<EOL>ref_journal, ref_volume = fix_journal_name(<EOL>ref_journal, self.journal_mappings<EOL>)<EOL>ref_volume += get_value_in_tag(tag, '<STR_LIT>')<EOL>ref_page = get_value_in_tag(tag, '<STR_LIT>')<EOL>if ref_year:<EOL><INDENT>subfields.append(('<STR_LIT:y>', ref_year))<EOL><DEDENT>if ref_journal and ref_volume and ref_page:<EOL><INDENT>subfields.append(('<STR_LIT:s>', '<STR_LIT>' % (ref_journal,<EOL>ref_volume,<EOL>ref_page)))<EOL><DEDENT>reference.removeChild(tag)<EOL><DEDENT>text_ref = xml_to_text(reference)<EOL>if ref_extract_callback:<EOL><INDENT>ref_xml = ref_extract_callback(text_ref)<EOL>dom = parseString(ref_xml)<EOL>fields = dom.getElementsByTagName(\"<STR_LIT>\")[<NUM_LIT:0>]<EOL>fields = fields.getElementsByTagName(\"<STR_LIT>\")<EOL>if fields:<EOL><INDENT>subfields.append(('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>for field in fields:<EOL><INDENT>data = field.firstChild.data<EOL>code = field.getAttribute(\"<STR_LIT:code>\")<EOL>if code == '<STR_LIT:m>' and bibliosets:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>subfields.append((code, data))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>subfields.append(('<STR_LIT:m>', text_ref))<EOL><DEDENT>if subfields:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT:C>', ind2='<STR_LIT:5>',<EOL>subfields=subfields)<EOL><DEDENT><DEDENT>if title:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', title)])<EOL><DEDENT>if date:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:c>', date),<EOL>('<STR_LIT:t>', '<STR_LIT>')])<EOL><DEDENT>if doi:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT>', subfields=[('<STR_LIT:a>', doi),<EOL>('<STR_LIT:2>', '<STR_LIT>')])<EOL><DEDENT>if abstract:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', abstract),<EOL>('<STR_LIT>', '<STR_LIT>')])<EOL><DEDENT>first_author = True<EOL>for author in authors:<EOL><INDENT>if first_author:<EOL><INDENT>subfields = [('<STR_LIT:a>', author[<NUM_LIT:0>])]<EOL>if author[<NUM_LIT:1>]:<EOL><INDENT>subfields.append(('<STR_LIT:v>', author[<NUM_LIT:1>]))<EOL><DEDENT>record_add_field(rec, '<STR_LIT:100>', subfields=subfields)<EOL>first_author = False<EOL><DEDENT>else:<EOL><INDENT>subfields = [('<STR_LIT:a>', author[<NUM_LIT:0>])]<EOL>if author[<NUM_LIT:1>]:<EOL><INDENT>subfields.append(('<STR_LIT:v>', author[<NUM_LIT:1>]))<EOL><DEDENT>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL><DEDENT><DEDENT>subfields = []<EOL>if journal and volume and first_page:<EOL><INDENT>subfields.append(('<STR_LIT:s>', \"<STR_LIT>\" % (journal,<EOL>volume,<EOL>first_page)))<EOL><DEDENT>if first_page and last_page:<EOL><INDENT>try:<EOL><INDENT>nuber_of_pages = int(last_page) - int(first_page)<EOL>record_add_field(rec, '<STR_LIT>',<EOL>subfields=[('<STR_LIT:a>', str(nuber_of_pages))])<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>subfields.append(('<STR_LIT:c>', '<STR_LIT>' % (first_page,<EOL>last_page)))<EOL><DEDENT>if year:<EOL><INDENT>subfields.append(('<STR_LIT:y>', year))<EOL><DEDENT>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', '<STR_LIT>')])<EOL>if copyright_statement:<EOL><INDENT>record_add_field(rec, '<STR_LIT>',<EOL>subfields=[('<STR_LIT:f>', copyright_statement)])<EOL><DEDENT>if subject:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT:1>', ind2='<STR_LIT>',<EOL>subfields=[('<STR_LIT:2>', '<STR_LIT>'),<EOL>('<STR_LIT:a>', subject)])<EOL><DEDENT>try:<EOL><INDENT>return record_xml_output(rec)<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>message = \"<STR_LIT>\" + doi<EOL>sys.stderr.write(message)<EOL>return \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Gets the Marc xml of the files in xaml_rich directory\n\n:param fileName: the name of the file to parse.\n:type fileName: string\n\n:returns: a string with the marc xml version of the file.", "id": "f7892:c0:m7"}
{"signature": "def get_record(self, fileName, ref_extract_callback=None):", "body": "self.document = parse(fileName)<EOL>article_type = self._get_article_type()<EOL>if article_type not in ['<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>rec = create_record()<EOL>title, subtitle, notes = self._get_title()<EOL>subfields = []<EOL>if subtitle:<EOL><INDENT>subfields.append(('<STR_LIT:b>', subtitle))<EOL><DEDENT>if title:<EOL><INDENT>subfields.append(('<STR_LIT:a>', title))<EOL>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL><DEDENT>subjects = self.document.getElementsByTagName('<STR_LIT>')<EOL>subjects = map(xml_to_text, subjects)<EOL>for note_id in notes:<EOL><INDENT>note = self._get_note(note_id)<EOL>if note:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', note)])<EOL><DEDENT><DEDENT>for subject in subjects:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT:1>', ind2='<STR_LIT>',<EOL>subfields=[('<STR_LIT:2>', '<STR_LIT>'),<EOL>('<STR_LIT:a>', subject)])<EOL><DEDENT>keywords = self._get_keywords()<EOL>for keyword in keywords:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT:1>', subfields=[('<STR_LIT:a>', keyword),<EOL>('<STR_LIT>', '<STR_LIT>')])<EOL><DEDENT>journal, volume, issue, year, date, doi, page,fpage, lpage = self._get_publication_information()<EOL>astronomy_journals = ['<STR_LIT>', '<STR_LIT>']<EOL>if journal in astronomy_journals:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT:1>', ind2='<STR_LIT>',<EOL>subfields=[('<STR_LIT:2>', '<STR_LIT>'),<EOL>('<STR_LIT:a>', '<STR_LIT>')])<EOL><DEDENT>if date:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:c>', date),<EOL>('<STR_LIT:t>', '<STR_LIT>')])<EOL><DEDENT>if doi:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT>', subfields=[('<STR_LIT:a>', doi),<EOL>('<STR_LIT:2>', '<STR_LIT>')])<EOL><DEDENT>abstract = self._get_abstract()<EOL>abstract = self._format_abstract(abstract)<EOL>if abstract:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', abstract),<EOL>('<STR_LIT>', '<STR_LIT>')])<EOL><DEDENT>license, license_type, license_url = self._get_license()<EOL>subfields = []<EOL>if license:<EOL><INDENT>subfields.append(('<STR_LIT:a>', license))<EOL><DEDENT>if license_url:<EOL><INDENT>subfields.append(('<STR_LIT:u>', license_url))<EOL><DEDENT>if subfields:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL><DEDENT>if license_type == '<STR_LIT>':<EOL><INDENT>self._attach_fulltext(rec, doi)<EOL><DEDENT>number_of_pages = self._get_page_count()<EOL>if number_of_pages:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', number_of_pages)])<EOL><DEDENT>c_holder, c_year, c_statement = self._get_copyright()<EOL>if c_holder and c_year:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:d>', c_holder),<EOL>('<STR_LIT:g>', c_year),<EOL>('<STR_LIT:e>', '<STR_LIT>')])<EOL><DEDENT>elif c_statement:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:f>', c_statement),<EOL>('<STR_LIT:e>', '<STR_LIT>')])<EOL><DEDENT>subfields = []<EOL>if journal:<EOL><INDENT>subfields.append(('<STR_LIT:p>', journal))<EOL><DEDENT>if issue:<EOL><INDENT>subfields.append(('<STR_LIT:n>', issue))<EOL><DEDENT>if volume:<EOL><INDENT>subfields.append(('<STR_LIT:v>', volume))<EOL><DEDENT>if fpage and lpage:<EOL><INDENT>subfields.append(('<STR_LIT:c>', '<STR_LIT>' % (fpage,<EOL>lpage)))<EOL><DEDENT>elif page:<EOL><INDENT>subfields.append(('<STR_LIT:c>', page))<EOL><DEDENT>if year:<EOL><INDENT>subfields.append(('<STR_LIT:y>', year))<EOL><DEDENT>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', '<STR_LIT>')])<EOL>conference = '<STR_LIT>'<EOL>for tag in self.document.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>conference = xml_to_text(tag)<EOL><DEDENT>if conference:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', '<STR_LIT>')])<EOL>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', conference)])<EOL><DEDENT>self._add_references(rec, ref_extract_callback)<EOL>self._add_authors(rec)<EOL>try:<EOL><INDENT>return record_xml_output(rec)<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>message = \"<STR_LIT>\" + doi<EOL>sys.stderr.write(message)<EOL>return \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Gets the Marc xml of the files in xaml_jp directory\n\n:param fileName: the name of the file to parse.\n:type fileName: string\n:param refextract_callback: callback to be used to extract\n                            unstructured references. It should\n                            return a marcxml formated string\n                            of the reference.\n:type refextract_callback: callable\n\n:returns: a string with the marc xml version of the file.", "id": "f7892:c0:m6"}
{"signature": "def _get_reference(self, ref):", "body": "label = get_value_in_tag(ref, '<STR_LIT:label>')<EOL>label = re.sub('<STR_LIT>', '<STR_LIT>', label)<EOL>for innerref in ref.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>ref_type = innerref.getAttribute('<STR_LIT>')<EOL>institution = get_value_in_tag(innerref, '<STR_LIT>')<EOL>report_no = '<STR_LIT>'<EOL>for tag in innerref.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>if tag.getAttribute('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>if tag.hasChildNodes():<EOL><INDENT>report_no = get_all_text(tag)<EOL><DEDENT><DEDENT><DEDENT>doi = '<STR_LIT>'<EOL>for tag in innerref.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>if tag.getAttribute('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>doi = xml_to_text(tag)<EOL><DEDENT><DEDENT>collaboration = get_value_in_tag(innerref, '<STR_LIT>')<EOL>authors = []<EOL>person_groups = innerref.getElementsByTagName('<STR_LIT>')<EOL>for author_group in person_groups:<EOL><INDENT>if author_group.getAttribute('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>for author in author_group.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>if author.hasChildNodes():<EOL><INDENT>authors.append(get_all_text(author))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>editors = []<EOL>for editor_group in person_groups:<EOL><INDENT>if editor_group.getAttribute('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>for editor in editor_group.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>if editor.hasChildNodes():<EOL><INDENT>editors.append(get_all_text(editor))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>journal = get_value_in_tag(innerref, '<STR_LIT:source>')<EOL>journal, volume = fix_journal_name(journal, self.journal_mappings)<EOL>volume += get_value_in_tag(innerref, '<STR_LIT>')<EOL>if journal == '<STR_LIT>' or journal == '<STR_LIT>':<EOL><INDENT>issue = get_value_in_tag(innerref, '<STR_LIT>')<EOL>volume = volume[<NUM_LIT:2>:] + issue<EOL>journal = '<STR_LIT>'<EOL><DEDENT>page = get_value_in_tag(innerref, '<STR_LIT>')<EOL>year = get_value_in_tag(innerref, '<STR_LIT>')<EOL>external_link = get_value_in_tag(innerref, '<STR_LIT>')<EOL>arxiv = '<STR_LIT>'<EOL>for tag in innerref.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>if tag.getAttribute('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>if tag.hasChildNodes():<EOL><INDENT>arxiv = get_all_text(tag)<EOL><DEDENT><DEDENT><DEDENT>arxiv = format_arxiv_id(arxiv)<EOL>publisher = get_value_in_tag(innerref, '<STR_LIT>')<EOL>publisher_location = get_value_in_tag(innerref, '<STR_LIT>')<EOL>if publisher_location:<EOL><INDENT>publisher = publisher_location + '<STR_LIT>' + publisher<EOL><DEDENT>unstructured_text = []<EOL>for child in innerref.childNodes:<EOL><INDENT>if child.nodeType == child.TEXT_NODE:<EOL><INDENT>text = child.nodeValue.strip()<EOL>text = re.sub(r'<STR_LIT>', '<STR_LIT>', text).strip()<EOL>if text.startswith('<STR_LIT:U+002C>'):<EOL><INDENT>text = text[<NUM_LIT:1>:].strip()<EOL><DEDENT>if text.endswith('<STR_LIT>'):<EOL><INDENT>text = institution + \"<STR_LIT:U+0020>\" + text<EOL>institution = '<STR_LIT>'<EOL>text = text.strip()<EOL><DEDENT>elif text.endswith('<STR_LIT>'):<EOL><INDENT>text += '<STR_LIT:.>'<EOL><DEDENT>elif text.endswith('<STR_LIT>'):<EOL><INDENT>if institution:<EOL><INDENT>text += '<STR_LIT:U+0020>' + institution<EOL>institution = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>text = text[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>elif text.startswith('<STR_LIT>'):<EOL><INDENT>article_title = get_value_in_tag(innerref, '<STR_LIT>')<EOL>text = institution + \"<STR_LIT>\" + article_title + \"<STR_LIT>\"<EOL>institution = '<STR_LIT>'<EOL><DEDENT>elif text == u'<STR_LIT>':<EOL><INDENT>text = '<STR_LIT>'<EOL><DEDENT>ignore_text = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>if text.startswith('<STR_LIT>'):<EOL><INDENT>temp = re.sub(r'<STR_LIT>', '<STR_LIT>', text)<EOL>if temp:<EOL><INDENT>volume += temp<EOL><DEDENT><DEDENT>elif len(text) > <NUM_LIT:1> and text not in ignore_textand not (text.isdigit() or text[:-<NUM_LIT:1>].isdigit()):<EOL><INDENT>unstructured_text.append(text)<EOL><DEDENT><DEDENT><DEDENT>if unstructured_text:<EOL><INDENT>unstructured_text = \"<STR_LIT:U+0020>\".join(unstructured_text)<EOL><DEDENT>if ref_type == '<STR_LIT>':<EOL><INDENT>if volume and not volume.lower().startswith('<STR_LIT>'):<EOL><INDENT>volume = '<STR_LIT>' + volume<EOL><DEDENT>if volume and page:<EOL><INDENT>volume = volume + '<STR_LIT>' + page<EOL><DEDENT><DEDENT>yield ref_type, doi, authors, collaboration, journal, volume, page, year,label, arxiv, publisher, institution, unstructured_text, external_link,report_no, editors<EOL><DEDENT>", "docstring": "Retrieve the data for a reference.", "id": "f7893:c1:m1"}
{"signature": "def get_record(self, xml_file):", "body": "self.document = parse(xml_file)<EOL>if get_value_in_tag(self.document, \"<STR_LIT>\"):<EOL><INDENT>raise ApsPackageXMLError(\"<STR_LIT>\"<EOL>% (xml_file,))<EOL><DEDENT>page_count = self._get_page_count()<EOL>rec = create_record()<EOL>if page_count:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', page_count)])<EOL><DEDENT>pacscodes = self._get_pacscodes()<EOL>for pacscode in pacscodes:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:2>', '<STR_LIT>'),<EOL>('<STR_LIT:a>', pacscode)])<EOL><DEDENT>subject = self._get_subject()<EOL>if subject:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT:1>', ind2='<STR_LIT>', subfields=[('<STR_LIT:2>', '<STR_LIT>'),<EOL>('<STR_LIT:a>', subject)])<EOL><DEDENT>keywords = self._get_keywords()<EOL>if keywords:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT:1>', subfields=[('<STR_LIT:a>', '<STR_LIT:U+002CU+0020>'.join(keywords)),<EOL>('<STR_LIT>', '<STR_LIT>')])<EOL><DEDENT>title, subtitle, _ = self._get_title()<EOL>subfields = []<EOL>if subtitle:<EOL><INDENT>subfields.append(('<STR_LIT:b>', subtitle))<EOL><DEDENT>if title:<EOL><INDENT>subfields.append(('<STR_LIT:a>', title))<EOL>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL><DEDENT>journal, volume, issue, year, start_date, doi,article_id, _, _ = self._get_publication_information()<EOL>if start_date:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:c>', start_date),<EOL>('<STR_LIT:t>', '<STR_LIT>')])<EOL><DEDENT>if doi:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT>', subfields=[('<STR_LIT:a>', doi),<EOL>('<STR_LIT:2>', '<STR_LIT>')])<EOL><DEDENT>abstract = self._get_abstract()<EOL>if abstract:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', abstract),<EOL>('<STR_LIT>', '<STR_LIT>')])<EOL><DEDENT>license, license_type, license_url = self._get_license()<EOL>subfields = []<EOL>if license:<EOL><INDENT>subfields.append(('<STR_LIT:a>', license))<EOL><DEDENT>if license_url:<EOL><INDENT>subfields.append(('<STR_LIT:u>', license_url))<EOL><DEDENT>if subfields:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL><DEDENT>c_holder, c_year, c_statement = self._get_copyright()<EOL>c_holder, c_year, c_statement = self._get_copyright()<EOL>if c_holder and c_year:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:d>', c_holder),<EOL>('<STR_LIT:g>', c_year),<EOL>('<STR_LIT:e>', '<STR_LIT>')])<EOL><DEDENT>elif c_statement:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:f>', c_statement),<EOL>('<STR_LIT:e>', '<STR_LIT>')])<EOL><DEDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:p>', journal),<EOL>('<STR_LIT:v>', volume),<EOL>('<STR_LIT:n>', issue),<EOL>('<STR_LIT:y>', year),<EOL>('<STR_LIT:c>', article_id)])<EOL>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', '<STR_LIT>')])<EOL>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', '<STR_LIT>')])<EOL>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', '<STR_LIT>')])<EOL>self._add_authors(rec)<EOL>self._add_references(rec)<EOL>try:<EOL><INDENT>return record_xml_output(rec)<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>sys.stderr.write(\"\"\"<STR_LIT>\"\"\" + doi)<EOL>return \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Reads a xml file in JATS format and returns\n            a xml string in marc format", "id": "f7893:c1:m3"}
{"signature": "def setUp(self):", "body": "self.els = ElsevierPackage(no_harvest=True)<EOL>self.document = parse(pkg_resources.resource_filename(<EOL>'<STR_LIT>',<EOL>os.path.join('<STR_LIT:data>', '<STR_LIT>')<EOL>))<EOL>self.document540 = parse(pkg_resources.resource_filename(<EOL>'<STR_LIT>',<EOL>os.path.join('<STR_LIT:data>', '<STR_LIT>')<EOL>))<EOL>", "docstring": "Setup initial document.", "id": "f7899:c0:m0"}
{"signature": "def setUp(self):", "body": "self.pos = PosPackage()<EOL>sample_filepath = pkg_resources.resource_filename(<EOL>'<STR_LIT>',<EOL>os.path.join('<STR_LIT:data>', '<STR_LIT>')<EOL>)<EOL>self.pos.document = parse(sample_filepath)<EOL>", "docstring": "Setup test.", "id": "f7901:c0:m0"}
{"signature": "def setUp(self):", "body": "self.inspire_demo_data_path_oai = pkg_resources.resource_filename(<EOL>'<STR_LIT>',<EOL>os.path.join('<STR_LIT:data>', '<STR_LIT>')<EOL>)<EOL>self.inspire_demo_data_path = pkg_resources.resource_filename(<EOL>'<STR_LIT>',<EOL>os.path.join('<STR_LIT:data>', '<STR_LIT>')<EOL>)<EOL>", "docstring": "Load demo data.", "id": "f7904:c0:m0"}
{"signature": "def setUp(self):", "body": "from harvestingkit.bibrecord import BibRecordPackage<EOL>from harvestingkit.inspire_cds_package.from_inspire import Inspire2CDS<EOL>self.inspire_conf_demo_data_path = pkg_resources.resource_filename(<EOL>'<STR_LIT>',<EOL>os.path.join('<STR_LIT:data>', '<STR_LIT>')<EOL>)<EOL>bibrecs = BibRecordPackage(self.inspire_conf_demo_data_path)<EOL>bibrecs.parse()<EOL>self.parsed_record = bibrecs.get_records()[<NUM_LIT:0>]<EOL>self.package = Inspire2CDS(self.parsed_record)<EOL>self.recid = self.package.get_recid()<EOL>self.converted_record = self.package.get_record()<EOL>", "docstring": "Load demo data.", "id": "f7904:c3:m0"}
{"signature": "def setUp(self):", "body": "self.aps = ApsPackage(journal_mappings)<EOL>self.aps.document = parse(join(dirname(folder), aps_test_record))<EOL>", "docstring": "Setup sample parsing used in tests.", "id": "f7905:c0:m0"}
{"signature": "def setUp(self):", "body": "self.els = ElsevierPackage(CONSYN=True,<EOL>journal_mappings=journal_mappings)<EOL>self.document = parse(pkg_resources.resource_filename(<EOL>'<STR_LIT>',<EOL>os.path.join('<STR_LIT:data>', '<STR_LIT>')<EOL>))<EOL>", "docstring": "Setup initial document.", "id": "f7906:c0:m0"}
{"signature": "def setUp(self):", "body": "xml_example_record = \"\"\"<STR_LIT>\"\"\"<EOL>self.rec = bibrecord.create_record(xml_example_record, <NUM_LIT:1>, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>", "docstring": "Initialize stuff", "id": "f7907:c16:m0"}
{"signature": "def setUp(self):", "body": "xml_example_record = \"\"\"<STR_LIT>\"\"\"<EOL>self.rec = bibrecord.create_record(xml_example_record, <NUM_LIT:1>, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>", "docstring": "Initialize stuff", "id": "f7907:c11:m0"}
{"signature": "def setUp(self):", "body": "xmltext = pkg_resources.resource_string('<STR_LIT>',<EOL>os.path.join('<STR_LIT:data>', '<STR_LIT>'))<EOL>self.recs = [rec[<NUM_LIT:0>] for rec in bibrecord.create_records(xmltext)]<EOL>", "docstring": "Initialize stuff", "id": "f7907:c0:m0"}
{"signature": "def setUp(self):", "body": "self.xml_example_record = \"\"\"<STR_LIT>\"\"\"<EOL>self.rec = bibrecord.create_record(self.xml_example_record, <NUM_LIT:1>, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>", "docstring": "Initialize stuff", "id": "f7907:c4:m0"}
{"signature": "def setUp(self):", "body": "xml_example_record = \"\"\"<STR_LIT>\"\"\"<EOL>self.rec = bibrecord.create_record(xml_example_record, <NUM_LIT:1>, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>xml_example_record_empty = \"\"\"<STR_LIT>\"\"\"<EOL>self.rec_empty = bibrecord.create_record(xml_example_record_empty, <NUM_LIT:1>, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>", "docstring": "Initialize stuff", "id": "f7907:c9:m0"}
{"signature": "def setUp(self):", "body": "xml_example_record = \"\"\"<STR_LIT>\"\"\"<EOL>self.rec = bibrecord.create_record(xml_example_record, <NUM_LIT:1>, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>", "docstring": "Initialize stuff", "id": "f7907:c10:m0"}
{"signature": "def setUp(self):", "body": "xml_example_record = \"\"\"<STR_LIT>\"\"\"<EOL>self.rec = bibrecord.create_record(xml_example_record, <NUM_LIT:1>, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>", "docstring": "Initialize stuff", "id": "f7907:c8:m0"}
{"signature": "def setUp(self):", "body": "self.xml_example_record = \"\"\"<STR_LIT>\"\"\"<EOL>", "docstring": "Initialize stuff", "id": "f7907:c15:m0"}
{"signature": "def _normalize_article_dir_with_dtd(self, path):", "body": "if exists(join(path, '<STR_LIT>')):<EOL><INDENT>return<EOL><DEDENT>main_xml_content = open(join(path, '<STR_LIT>')).read()<EOL>arts = ['<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>']<EOL>tmp_extracted = <NUM_LIT:0><EOL>for art in arts:<EOL><INDENT>if art in main_xml_content:<EOL><INDENT>self._extract_correct_dtd_package(art.split('<STR_LIT:.>')[<NUM_LIT:0>], path)<EOL>tmp_extracted = <NUM_LIT:1><EOL><DEDENT><DEDENT>if not tmp_extracted:<EOL><INDENT>message = \"<STR_LIT>\" + path<EOL>message += \"<STR_LIT>\"<EOL>self.logger.error(message)<EOL>raise ValueError(message)<EOL><DEDENT>command = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>join(path, '<STR_LIT>'),<EOL>\"<STR_LIT>\", join(path, '<STR_LIT>')]<EOL>dummy, dummy, cmd_err = run_shell_command(command)<EOL>if cmd_err:<EOL><INDENT>message = \"<STR_LIT>\" % (<EOL>join(path, '<STR_LIT>'), cmd_err)<EOL>self.logger.error(message)<EOL>raise ValueError(message)<EOL><DEDENT>", "docstring": "main.xml from Elsevier assume the existence of a local DTD.\nThis procedure install the DTDs next to the main.xml file\nand normalize it using xmllint in order to resolve all namespaces\nand references.", "id": "f7908:c0:m6"}
{"signature": "def _normalize_issue_dir_with_dtd(self, path):", "body": "if exists(join(path, '<STR_LIT>')):<EOL><INDENT>return<EOL><DEDENT>issue_xml_content = open(join(path, '<STR_LIT>')).read()<EOL>sis = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>tmp_extracted = <NUM_LIT:0><EOL>for si in sis:<EOL><INDENT>if si in issue_xml_content:<EOL><INDENT>self._extract_correct_dtd_package(si.split('<STR_LIT:.>')[<NUM_LIT:0>], path)<EOL>tmp_extracted = <NUM_LIT:1><EOL><DEDENT><DEDENT>if not tmp_extracted:<EOL><INDENT>message = \"<STR_LIT>\" + path<EOL>message += \"<STR_LIT>\"<EOL>self.logger.error(message)<EOL>raise ValueError(message)<EOL><DEDENT>command = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>join(path, '<STR_LIT>'),<EOL>\"<STR_LIT>\", join(path, '<STR_LIT>')]<EOL>dummy, dummy, cmd_err = run_shell_command(command)<EOL>if cmd_err:<EOL><INDENT>message = \"<STR_LIT>\" % (<EOL>join(path, '<STR_LIT>'), cmd_err)<EOL>self.logger.error(message)<EOL>raise ValueError(message)<EOL><DEDENT>", "docstring": "issue.xml from Elsevier assume the existence of a local DTD.\nThis procedure install the DTDs next to the issue.xml file\nand normalize it using xmllint in order to resolve all namespaces\nand references.", "id": "f7908:c0:m5"}
{"signature": "def _crawl_elsevier_and_find_issue_xml(self):", "body": "self._found_issues = []<EOL>if not self.path and not self.package_name:<EOL><INDENT>for issue in self.conn._get_issues():<EOL><INDENT>dirname = issue.rstrip('<STR_LIT>')<EOL>try:<EOL><INDENT>self._normalize_issue_dir_with_dtd(dirname)<EOL>self._found_issues.append(dirname)<EOL><DEDENT>except Exception as err:<EOL><INDENT>register_exception()<EOL>print(\"<STR_LIT>\" % (dirname, err))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>def visit(dummy, dirname, names):<EOL><INDENT>if \"<STR_LIT>\" in names:<EOL><INDENT>try:<EOL><INDENT>self._normalize_issue_dir_with_dtd(dirname)<EOL>self._found_issues.append(dirname)<EOL><DEDENT>except Exception as err:<EOL><INDENT>register_exception()<EOL>print(\"<STR_LIT>\"<EOL>% (dirname, err))<EOL><DEDENT><DEDENT><DEDENT>walk(self.path, visit, None)<EOL><DEDENT>", "docstring": "Information about the current volume, issue, etc. is available\nin a file called issue.xml that is available in a higher directory.", "id": "f7908:c0:m3"}
{"signature": "def _crawl_elsevier_and_find_main_xml(self):", "body": "self.found_articles = []<EOL>if not self.path and not self.package_name:<EOL><INDENT>for doc in self.conn.found_articles:<EOL><INDENT>dirname = doc['<STR_LIT>'].rstrip('<STR_LIT>')<EOL>try:<EOL><INDENT>self._normalize_article_dir_with_dtd(dirname)<EOL>self.found_articles.append(dirname)<EOL><DEDENT>except Exception as err:<EOL><INDENT>register_exception()<EOL>print(\"<STR_LIT>\" % (dirname, err))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>def visit(dummy, dirname, names):<EOL><INDENT>if \"<STR_LIT>\" in names and \"<STR_LIT>\" in names:<EOL><INDENT>try:<EOL><INDENT>self._normalize_article_dir_with_dtd(dirname)<EOL>self.found_articles.append(dirname)<EOL><DEDENT>except Exception as err:<EOL><INDENT>register_exception()<EOL>print(\"<STR_LIT>\" % (dirname, err))<EOL><DEDENT><DEDENT><DEDENT>walk(self.path, visit, None)<EOL><DEDENT>", "docstring": "A package contains several subdirectory corresponding to each article.\nAn article is actually identified by the existence of a main.pdf and\na main.xml in a given directory.", "id": "f7908:c0:m2"}
{"signature": "def get_publication_date(self, xml_doc):", "body": "start_date = get_value_in_tag(xml_doc, \"<STR_LIT>\")<EOL>if not start_date:<EOL><INDENT>start_date = get_value_in_tag(xml_doc, \"<STR_LIT>\")<EOL>if not start_date:<EOL><INDENT>start_date = get_value_in_tag(xml_doc, '<STR_LIT>')<EOL>if start_date:<EOL><INDENT>start_date = datetime.datetime.strptime(<EOL>start_date, \"<STR_LIT>\"<EOL>)<EOL>return start_date.strftime(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>import dateutil.parser<EOL>start_date = re.sub('<STR_LIT>', <EOL>r'<STR_LIT>', start_date)<EOL>try:<EOL><INDENT>date = dateutil.parser.parse(start_date)<EOL><DEDENT>except ValueError:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if len(start_date.split(\"<STR_LIT:U+0020>\")) == <NUM_LIT:3>:<EOL><INDENT>return date.strftime(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return date.strftime(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if len(start_date) is <NUM_LIT:8>:<EOL><INDENT>start_date = time.strftime(<EOL>'<STR_LIT>', time.strptime(start_date, '<STR_LIT>'))<EOL><DEDENT>elif len(start_date) is <NUM_LIT:6>:<EOL><INDENT>start_date = time.strftime(<EOL>'<STR_LIT>', time.strptime(start_date, '<STR_LIT>'))<EOL><DEDENT>return start_date<EOL><DEDENT>", "docstring": "Return the best effort start_date.", "id": "f7908:c0:m28"}
{"signature": "def lock_issue():", "body": "<EOL>print >> sys.stderr, \"<STR_LIT>\"<EOL>", "docstring": "Locks the issu in case of error.", "id": "f7909:m0"}
{"signature": "def _normalize_article_dir_with_dtd(self, path):", "body": "files = [filename for filename in listdir(path)<EOL>if \"<STR_LIT>\" in filename]<EOL>if not files:<EOL><INDENT>files = [filename for filename in listdir(path)<EOL>if \"<STR_LIT>\" in filename]<EOL><DEDENT>if exists(join(path, '<STR_LIT>')):<EOL><INDENT>return<EOL><DEDENT>if '<STR_LIT>' in open(join(path, files[<NUM_LIT:0>])).read():<EOL><INDENT>path_normalized = mkdtemp(prefix=\"<STR_LIT>\",<EOL>dir=CFG_TMPSHAREDDIR)<EOL>ZipFile(CFG_SPRINGER_JATS_PATH).extractall(path_normalized)<EOL><DEDENT>elif '<STR_LIT>' in open(join(path, files[<NUM_LIT:0>])).read():<EOL><INDENT>path_normalized = mkdtemp(prefix=\"<STR_LIT>\",<EOL>dir=CFG_TMPSHAREDDIR)<EOL>ZipFile(CFG_SPRINGER_AV24_PATH).extractall(path_normalized)<EOL><DEDENT>else:<EOL><INDENT>error_msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>self.logger.error(error_msg % path)<EOL>raise ValueError(error_msg % path)<EOL><DEDENT>print(\"<STR_LIT>\" % (files[<NUM_LIT:0>],))<EOL>(cmd_exit_code,<EOL>cmd_out,<EOL>cmd_err) = run_shell_command((\"<STR_LIT>\"<EOL>\"<STR_LIT>\"),<EOL>(join(path, files[<NUM_LIT:0>]),<EOL>join(path_normalized,<EOL>'<STR_LIT>')))<EOL>if cmd_err:<EOL><INDENT>error_msg = \"<STR_LIT>\"<EOL>self.logger.error(error_msg % (join(path, '<STR_LIT>'), cmd_err))<EOL>raise ValueError(error_msg % (join(path, '<STR_LIT>'), cmd_err))<EOL><DEDENT>self.articles_normalized.append(path_normalized)<EOL>", "docstring": "TODO: main.xml from Springer assume the existence of a local DTD.\nThis procedure install the DTDs next to the main.xml file\nand normalize it using xmllint in order to resolve all namespaces\nand references.", "id": "f7910:c0:m7"}
{"signature": "def connect(self):", "body": "for tryed_connection_count in range(CFG_FTP_CONNECTION_ATTEMPTS):<EOL><INDENT>try:<EOL><INDENT>self.ftp = FtpHandler(self.config.SPRINGER.URL,<EOL>self.config.SPRINGER.LOGIN,<EOL>self.config.SPRINGER.PASSWORD)<EOL>self.logger.debug((\"<STR_LIT>\"<EOL>\"<STR_LIT>\"))<EOL>return<EOL><DEDENT>except socket_timeout_exception as err:<EOL><INDENT>self.logger.error(('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>% (tryed_connection_count+<NUM_LIT:1>,<EOL>CFG_FTP_CONNECTION_ATTEMPTS,<EOL>CFG_FTP_TIMEOUT_SLEEP_DURATION))<EOL>time.sleep(CFG_FTP_TIMEOUT_SLEEP_DURATION)<EOL><DEDENT>except Exception as err:<EOL><INDENT>self.logger.error((\"<STR_LIT>\"<EOL>\"<STR_LIT>\") % (err,))<EOL>break<EOL><DEDENT><DEDENT>raise LoginException(err)<EOL>", "docstring": "Logs into the specified ftp server and returns connector.", "id": "f7910:c0:m0"}
{"signature": "def _crawl_oxford_and_find_main_xml(self):", "body": "self.found_articles = []<EOL>def visit(arg, dirname, names):<EOL><INDENT>files = [filename for filename in names if \"<STR_LIT>\" in filename]<EOL>if files:<EOL><INDENT>try:<EOL><INDENT>for f in files:<EOL><INDENT>self.found_articles.append(join(dirname, f))<EOL><DEDENT><DEDENT>except Exception as err:<EOL><INDENT>register_exception()<EOL>print(\"<STR_LIT>\" % (dirname, err),<EOL>file=sys.stderr)<EOL><DEDENT><DEDENT><DEDENT>if hasattr(self, '<STR_LIT>'):<EOL><INDENT>walk(self.path_unpacked, visit, None)<EOL><DEDENT>elif self.path:<EOL><INDENT>walk(self.path, visit, None)<EOL><DEDENT>else:<EOL><INDENT>self.logger.info(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "A package contains several subdirectory corresponding to each article.\nAn article is actually identified by the existence of a main.pdf and\na main.xml in a given directory.", "id": "f7911:c0:m6"}
{"signature": "def get_record(self, filename, ref_extract_callback=None):", "body": "self.document = parse(filename)<EOL>article_type = self._get_article_type()<EOL>if article_type not in ['<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>rec = create_record()<EOL>title, subtitle, notes = self._get_title()<EOL>subfields = []<EOL>if subtitle:<EOL><INDENT>subfields.append(('<STR_LIT:b>', subtitle))<EOL><DEDENT>if title:<EOL><INDENT>title = fix_title_capitalization(title)<EOL>subfields.append(('<STR_LIT:a>', title))<EOL>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL><DEDENT>for note_id in notes:<EOL><INDENT>note = self._get_note(note_id)<EOL>if note:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', note)])<EOL><DEDENT><DEDENT>keywords = self._get_keywords()<EOL>for keyword in keywords:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT:1>', subfields=[('<STR_LIT:a>', keyword),<EOL>('<STR_LIT>', '<STR_LIT>')])<EOL><DEDENT>journal, volume, issue, year, date, doi, page,fpage, lpage = self._get_publication_information()<EOL>if date:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:c>', date),<EOL>('<STR_LIT:t>', '<STR_LIT>')])<EOL><DEDENT>if doi:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT>', subfields=[('<STR_LIT:a>', doi),<EOL>('<STR_LIT:2>', '<STR_LIT>')])<EOL><DEDENT>abstract = self._get_abstract()<EOL>if abstract:<EOL><INDENT>abstract = convert_html_subscripts_to_latex(abstract)<EOL>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', abstract),<EOL>('<STR_LIT>', '<STR_LIT>')])<EOL><DEDENT>license, license_type, license_url = self._get_license()<EOL>subfields = []<EOL>if license:<EOL><INDENT>subfields.append(('<STR_LIT:a>', license))<EOL><DEDENT>if license_url:<EOL><INDENT>subfields.append(('<STR_LIT:u>', license_url))<EOL><DEDENT>if subfields:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL><DEDENT>if license_type == '<STR_LIT>':<EOL><INDENT>self._attach_fulltext(rec, doi)<EOL><DEDENT>number_of_pages = self._get_page_count()<EOL>if number_of_pages:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:a>', number_of_pages)])<EOL><DEDENT>c_holder, c_year, c_statement = self._get_copyright()<EOL>if c_holder and c_year:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:d>', c_holder),<EOL>('<STR_LIT:g>', c_year),<EOL>('<STR_LIT:e>', '<STR_LIT>')])<EOL><DEDENT>elif c_statement:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[('<STR_LIT:f>', c_statement),<EOL>('<STR_LIT:e>', '<STR_LIT>')])<EOL><DEDENT>subfields = []<EOL>if journal:<EOL><INDENT>subfields.append(('<STR_LIT:p>', journal))<EOL><DEDENT>if issue:<EOL><INDENT>subfields.append(('<STR_LIT:n>', issue))<EOL><DEDENT>if volume:<EOL><INDENT>subfields.append(('<STR_LIT:v>', volume))<EOL><DEDENT>if fpage and lpage:<EOL><INDENT>subfields.append(('<STR_LIT:c>', '<STR_LIT>' % (fpage,<EOL>lpage)))<EOL><DEDENT>elif page:<EOL><INDENT>subfields.append(('<STR_LIT:c>', page))<EOL><DEDENT>if year:<EOL><INDENT>subfields.append(('<STR_LIT:y>', year))<EOL><DEDENT>if article_type == '<STR_LIT>':<EOL><INDENT>subfields.append(('<STR_LIT:m>', '<STR_LIT>'))<EOL><DEDENT>elif article_type == '<STR_LIT>':<EOL><INDENT>subfields.append(('<STR_LIT:m>', '<STR_LIT>'))<EOL><DEDENT>record_add_field(rec, '<STR_LIT>', subfields=subfields)<EOL>collections = self.get_collection(journal)<EOL>for collection in collections:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', subfields=[collection])<EOL><DEDENT>self._add_authors(rec)<EOL>if article_type in ['<STR_LIT>',<EOL>'<STR_LIT>']:<EOL><INDENT>related_article = self._get_related_article()<EOL>if related_article:<EOL><INDENT>record_add_field(rec, '<STR_LIT>', ind1='<STR_LIT>', subfields=[('<STR_LIT:a>', related_article),<EOL>('<STR_LIT:2>', '<STR_LIT>')])<EOL><DEDENT><DEDENT>try:<EOL><INDENT>return record_xml_output(rec)<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>message = \"<STR_LIT>\" + doi<EOL>sys.stderr.write(message)<EOL>return \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Get the MARCXML of the files in xaml_jp directory.\n\n        :param filename: the name of the file to parse.\n        :type filename: string\n        :param refextract_callback: callback to be used to extract\n                                    unstructured references. It should\n                                    return a marcxml formated string\n                                    of the reference.\n        :type refextract_callback: callable\n\n        :returns: a string with the marc xml version of the file.", "id": "f7914:c1:m7"}
{"signature": "def get_collection(self, journal):", "body": "conference = '<STR_LIT>'<EOL>for tag in self.document.getElementsByTagName('<STR_LIT>'):<EOL><INDENT>conference = xml_to_text(tag)<EOL><DEDENT>if conference or journal == \"<STR_LIT>\":<EOL><INDENT>return [('<STR_LIT:a>', '<STR_LIT>'), ('<STR_LIT:a>', '<STR_LIT>')]<EOL><DEDENT>elif self._get_article_type() == \"<STR_LIT>\":<EOL><INDENT>return [('<STR_LIT:a>', '<STR_LIT>'), ('<STR_LIT:a>', '<STR_LIT>')]<EOL><DEDENT>else:<EOL><INDENT>return [('<STR_LIT:a>', '<STR_LIT>'), ('<STR_LIT:a>', '<STR_LIT>')]<EOL><DEDENT>", "docstring": "Return this articles' collection.", "id": "f7914:c1:m6"}
{"signature": "def __init__(self, journal_mappings={}):", "body": "self.url_prefix = \"<STR_LIT>\"<EOL>super(WorldScientific, self).__init__(journal_mappings)<EOL>", "docstring": "Create instance of WorldScientific package.", "id": "f7914:c1:m0"}
{"signature": "def get_date(self, filename):", "body": "try:<EOL><INDENT>self.document = parse(filename)<EOL>return self._get_date()<EOL><DEDENT>except DateNotFoundException:<EOL><INDENT>print(\"<STR_LIT>\".format(filename))<EOL>return datetime.datetime.strftime(datetime.datetime.now(),<EOL>\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Return the date of the article in file.", "id": "f7914:c1:m2"}
{"signature": "def element_tree_collection_to_records(tree):", "body": "from .bibrecord import create_record<EOL>records = []<EOL>collection = tree.getroot()<EOL>for record_element in collection.getchildren():<EOL><INDENT>marcxml = ET.tostring(record_element, encoding=\"<STR_LIT:utf-8>\")<EOL>record, status, errors = create_record(marcxml)<EOL>if errors:<EOL><INDENT>print(str(status))<EOL><DEDENT>records.append(record)<EOL><DEDENT>return records<EOL>", "docstring": "Take an ElementTree and converts the nodes into BibRecord records.\n\n    This function is for a tree root of collection as such:\n    <collection>\n        <record>\n            <!-- MARCXML -->\n        </record>\n        <record> ... </record>\n    </collection>", "id": "f7916:m2"}
{"signature": "def collapse_initials(name):", "body": "if len(name.split(\"<STR_LIT:.>\")) > <NUM_LIT:1>:<EOL><INDENT>name = re.sub(r'<STR_LIT>', r'<STR_LIT>', name)<EOL><DEDENT>return name<EOL>", "docstring": "Remove the space between initials, eg T. A. --> T.A.", "id": "f7919:m8"}
{"signature": "def fix_dashes(string):", "body": "string = string.replace('<STR_LIT>', '<STR_LIT:->')<EOL>string = string.replace('<STR_LIT>', '<STR_LIT:->')<EOL>string = string.replace('<STR_LIT>', '<STR_LIT:->')<EOL>string = string.replace('<STR_LIT>', '<STR_LIT:->')<EOL>string = unidecode(string)<EOL>return re.sub(r'<STR_LIT>', '<STR_LIT:->', string)<EOL>", "docstring": "Fix bad Unicode special dashes in string.", "id": "f7919:m11"}
{"signature": "def fix_journal_name(journal, knowledge_base):", "body": "if not journal:<EOL><INDENT>return '<STR_LIT>', '<STR_LIT>'<EOL><DEDENT>if not knowledge_base:<EOL><INDENT>return journal, '<STR_LIT>'<EOL><DEDENT>if len(journal) < <NUM_LIT:2>:<EOL><INDENT>return journal, '<STR_LIT>'<EOL><DEDENT>volume = '<STR_LIT>'<EOL>if (journal[-<NUM_LIT:1>] <= '<STR_LIT>' and journal[-<NUM_LIT:1>] >= '<STR_LIT:A>')and (journal[-<NUM_LIT:2>] == '<STR_LIT:.>' or journal[-<NUM_LIT:2>] == '<STR_LIT:U+0020>'):<EOL><INDENT>volume += journal[-<NUM_LIT:1>]<EOL>journal = journal[:-<NUM_LIT:1>]<EOL><DEDENT>journal = journal.strip()<EOL>if journal.upper() in knowledge_base:<EOL><INDENT>journal = knowledge_base[journal.upper()].strip()<EOL><DEDENT>elif journal in knowledge_base:<EOL><INDENT>journal = knowledge_base[journal].strip()<EOL><DEDENT>elif '<STR_LIT:.>' in journal:<EOL><INDENT>journalnodots = journal.replace('<STR_LIT>', '<STR_LIT:U+0020>')<EOL>journalnodots = journalnodots.replace('<STR_LIT:.>', '<STR_LIT:U+0020>').strip().upper()<EOL>if journalnodots in knowledge_base:<EOL><INDENT>journal = knowledge_base[journalnodots].strip()<EOL><DEDENT><DEDENT>journal = journal.replace('<STR_LIT>', '<STR_LIT:.>')<EOL>return journal, volume<EOL>", "docstring": "Convert journal name to Inspire's short form.", "id": "f7919:m9"}
{"signature": "def record_add_field(rec, tag, ind1='<STR_LIT>', ind2='<STR_LIT>', subfields=[],<EOL>controlfield_value='<STR_LIT>'):", "body": "if controlfield_value:<EOL><INDENT>doc = etree.Element(\"<STR_LIT>\",<EOL>attrib={<EOL>\"<STR_LIT>\": tag,<EOL>})<EOL>doc.text = str(controlfield_value)<EOL><DEDENT>else:<EOL><INDENT>doc = etree.Element(\"<STR_LIT>\",<EOL>attrib={<EOL>\"<STR_LIT>\": tag,<EOL>\"<STR_LIT>\": ind1,<EOL>\"<STR_LIT>\": ind2,<EOL>})<EOL>for code, value in subfields:<EOL><INDENT>field = etree.SubElement(doc, \"<STR_LIT>\", attrib={\"<STR_LIT:code>\": code})<EOL>field.text = value<EOL><DEDENT><DEDENT>rec.append(doc)<EOL>return rec<EOL>", "docstring": "Add a MARCXML datafield as a new child to a XML document.", "id": "f7919:m2"}
{"signature": "def punctuate_authorname(an):", "body": "name = an.strip()<EOL>parts = [x for x in name.split('<STR_LIT:U+002C>') if x != '<STR_LIT>']<EOL>ret_str = '<STR_LIT>'<EOL>for idx, part in enumerate(parts):<EOL><INDENT>subparts = part.strip().split('<STR_LIT:U+0020>')<EOL>for sidx, substr in enumerate(subparts):<EOL><INDENT>ret_str += substr<EOL>if len(substr) == <NUM_LIT:1>:<EOL><INDENT>ret_str += '<STR_LIT:.>'<EOL><DEDENT>if sidx < (len(subparts) - <NUM_LIT:1>):<EOL><INDENT>ret_str += '<STR_LIT:U+0020>'<EOL><DEDENT><DEDENT>if idx < (len(parts) - <NUM_LIT:1>):<EOL><INDENT>ret_str += '<STR_LIT:U+002CU+0020>'<EOL><DEDENT><DEDENT>return ret_str.strip()<EOL>", "docstring": "Punctuate author names properly.\n\n    Expects input in the form 'Bloggs, J K' and will return 'Bloggs, J. K.'.", "id": "f7919:m20"}
{"signature": "def create_record():", "body": "return etree.Element(\"<STR_LIT>\")<EOL>", "docstring": "Return a new XML document.", "id": "f7919:m1"}
{"signature": "def convert_date_from_iso_to_human(value):", "body": "try:<EOL><INDENT>year, month, day = value.split(\"<STR_LIT:->\")<EOL><DEDENT>except ValueError:<EOL><INDENT>try:<EOL><INDENT>year, month, day = value.split(\"<STR_LIT:U+0020>\")<EOL><DEDENT>except ValueError:<EOL><INDENT>return value<EOL><DEDENT><DEDENT>try:<EOL><INDENT>date_object = datetime(int(year), int(month), int(day))<EOL><DEDENT>except TypeError:<EOL><INDENT>return value<EOL><DEDENT>return date_object.strftime(\"<STR_LIT>\")<EOL>", "docstring": "Convert a date-value to the ISO date standard for humans.", "id": "f7919:m22"}
{"signature": "def license_is_oa(license):", "body": "for oal in OA_LICENSES:<EOL><INDENT>if re.search(oal, license):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Return True if license is compatible with Open Access", "id": "f7919:m27"}
{"signature": "def return_letters_from_string(text):", "body": "out = \"<STR_LIT>\"<EOL>for letter in text:<EOL><INDENT>if letter.isalpha():<EOL><INDENT>out += letter<EOL><DEDENT><DEDENT>return out<EOL>", "docstring": "Get letters from string only.", "id": "f7919:m26"}
{"signature": "def create_logger(name,<EOL>filename=None,<EOL>logging_level=logging.DEBUG):", "body": "logger = logging.getLogger(name)<EOL>formatter = logging.Formatter(('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL>if filename:<EOL><INDENT>fh = logging.FileHandler(filename=filename)<EOL>fh.setFormatter(formatter)<EOL>logger.addHandler(fh)<EOL><DEDENT>ch = logging.StreamHandler()<EOL>ch.setFormatter(formatter)<EOL>logger.addHandler(ch)<EOL>logger.setLevel(logging_level)<EOL>return logger<EOL>", "docstring": "Create a logger object.", "id": "f7919:m16"}
{"signature": "def unzip(zipped_file, output_directory=None,<EOL>prefix=\"<STR_LIT>\", suffix=\"<STR_LIT>\"):", "body": "if not output_directory:<EOL><INDENT>try:<EOL><INDENT>output_directory = mkdtemp(suffix=suffix,<EOL>prefix=prefix)<EOL><DEDENT>except Exception as e:<EOL><INDENT>try:<EOL><INDENT>os.removedirs(output_directory)<EOL><DEDENT>except TypeError:<EOL><INDENT>pass<EOL><DEDENT>raise e<EOL><DEDENT><DEDENT>return _do_unzip(zipped_file, output_directory)<EOL>", "docstring": "Uncompress a zipped file from given filepath to an (optional) location.\n\n    If no location is given, a temporary folder will be generated inside\n    CFG_TMPDIR, prefixed with \"apsharvest_unzip_\".", "id": "f7919:m17"}
{"signature": "def convert_html_subscripts_to_latex(text):", "body": "text = re.sub(\"<STR_LIT>\", r\"<STR_LIT>\", text)<EOL>text = re.sub(\"<STR_LIT>\", r\"<STR_LIT>\", text)<EOL>return text<EOL>", "docstring": "Convert some HTML tags to latex equivalents.", "id": "f7919:m13"}
{"signature": "def download_file(from_url, to_filename=None,<EOL>chunk_size=<NUM_LIT> * <NUM_LIT:8>, retry_count=<NUM_LIT:3>):", "body": "if not to_filename:<EOL><INDENT>to_filename = get_temporary_file()<EOL><DEDENT>session = requests.Session()<EOL>adapter = requests.adapters.HTTPAdapter(max_retries=retry_count)<EOL>session.mount(from_url, adapter)<EOL>response = session.get(from_url, stream=True)<EOL>with open(to_filename, '<STR_LIT:wb>') as fd:<EOL><INDENT>for chunk in response.iter_content(chunk_size):<EOL><INDENT>fd.write(chunk)<EOL><DEDENT><DEDENT>return to_filename<EOL>", "docstring": "Download URL to a file.", "id": "f7919:m14"}
{"signature": "def get_filesize(self, filename):", "body": "result = []<EOL>def dir_callback(val):<EOL><INDENT>result.append(val.split()[<NUM_LIT:4>])<EOL><DEDENT>self._ftp.dir(filename, dir_callback)<EOL>return result[<NUM_LIT:0>]<EOL>", "docstring": "Returns the filesize of a file\n\n        :param filename: the full path to the file on the server.\n        :type filename: string\n\n        :returns: string representation of the filesize.", "id": "f7920:c0:m11"}
{"signature": "def cd(self, folder):", "body": "if folder.startswith('<STR_LIT:/>'):<EOL><INDENT>self._ftp.cwd(folder)<EOL><DEDENT>else:<EOL><INDENT>for subfolder in folder.split('<STR_LIT:/>'):<EOL><INDENT>if subfolder:<EOL><INDENT>self._ftp.cwd(subfolder)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Changes the working directory on the server.\n\n        :param folder: the desired directory.\n        :type folder: string", "id": "f7920:c0:m5"}
{"signature": "def close(self):", "body": "self._ftp.close()<EOL>", "docstring": "Closes the connection to the server.", "id": "f7920:c0:m2"}
{"signature": "def upload(self, filename, location='<STR_LIT>'):", "body": "current_folder = self._ftp.pwd()<EOL>self.mkdir(location)<EOL>self.cd(location)<EOL>fl = open(filename, '<STR_LIT:rb>')<EOL>filename = filename.split('<STR_LIT:/>')[-<NUM_LIT:1>]<EOL>self._ftp.storbinary('<STR_LIT>' % filename, fl)<EOL>fl.close()<EOL>self.cd(current_folder)<EOL>", "docstring": "Uploads a file on the server to the desired location\n\n        :param filename: the name of the file to be uploaded.\n        :type filename: string\n        :param location: the directory in which the file will\n                         be stored.\n        :type location: string", "id": "f7920:c0:m14"}
{"signature": "def connect(self):", "body": "self._ftp.connect()<EOL>self._ftp.login(user=self._username, passwd=self._passwd)<EOL>", "docstring": "Connects and logins to the server.", "id": "f7920:c0:m1"}
{"signature": "def download(self, source_file, target_folder='<STR_LIT>'):", "body": "current_folder = self._ftp.pwd()<EOL>if not target_folder.startswith('<STR_LIT:/>'):  <EOL><INDENT>target_folder = join(getcwd(), target_folder)<EOL><DEDENT>folder = os.path.dirname(source_file)<EOL>self.cd(folder)<EOL>if folder.startswith(\"<STR_LIT:/>\"):<EOL><INDENT>folder = folder[<NUM_LIT:1>:]<EOL><DEDENT>destination_folder = join(target_folder, folder)<EOL>if not os.path.exists(destination_folder):<EOL><INDENT>print(\"<STR_LIT>\", destination_folder)<EOL>os.makedirs(destination_folder)<EOL><DEDENT>source_file = os.path.basename(source_file)<EOL>destination = join(destination_folder, source_file)<EOL>try:<EOL><INDENT>with open(destination, '<STR_LIT:wb>') as result:<EOL><INDENT>self._ftp.retrbinary('<STR_LIT>' % (source_file,),<EOL>result.write)<EOL><DEDENT><DEDENT>except error_perm as e:  <EOL><INDENT>print(e)<EOL>remove(join(target_folder, source_file))<EOL>raise<EOL><DEDENT>self._ftp.cwd(current_folder)<EOL>", "docstring": "Downloads a file from the FTP server to target folder\n\n        :param source_file: the absolute path for the file on the server\n                   it can be the one of the files coming from\n                   FtpHandler.dir().\n        :type source_file: string\n        :param target_folder: relative or absolute path of the\n                              destination folder default is the\n                              working directory.\n        :type target_folder: string", "id": "f7920:c0:m4"}
{"signature": "def rm(self, filename):", "body": "try:<EOL><INDENT>self._ftp.delete(filename)<EOL><DEDENT>except error_perm:  <EOL><INDENT>try:<EOL><INDENT>current_folder = self._ftp.pwd()<EOL>self.cd(filename)<EOL><DEDENT>except error_perm:<EOL><INDENT>print('<STR_LIT>'<EOL>'<STR_LIT>' % (filename,))<EOL><DEDENT>else:<EOL><INDENT>self.cd(current_folder)<EOL>print('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (filename,))<EOL><DEDENT><DEDENT>", "docstring": "Delete a file from the server.\n\n        :param filename: the file to be deleted.\n        :type filename: string", "id": "f7920:c0:m9"}
{"signature": "def __init__(self, journal_mappings={}):", "body": "self.journal_mappings = journal_mappings<EOL>self.document = None<EOL>", "docstring": "Create a JatsPackage.", "id": "f7922:c0:m0"}
{"signature": "def strip_fields(self):", "body": "for tag in self.record.keys():<EOL><INDENT>if tag in self.fields_list:<EOL><INDENT>record_delete_fields(self.record, tag)<EOL><DEDENT><DEDENT>", "docstring": "Clear any fields listed in field_list.", "id": "f7924:c0:m13"}
{"signature": "def get_local_folder(self):", "body": "if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self.local_folder = mkdtemp()<EOL><DEDENT>return self.local_folder<EOL>", "docstring": "Return a path to a generated local folder.", "id": "f7924:c0:m9"}
{"signature": "@classmethod<EOL><INDENT>def from_source(cls, source):<DEDENT>", "body": "bibrecs = BibRecordPackage(source)<EOL>bibrecs.parse()<EOL>for bibrec in bibrecs.get_records():<EOL><INDENT>yield cls(bibrec)<EOL><DEDENT>", "docstring": "Yield single conversion objects from a MARCXML file or string.\n\n        >>> from harvestingkit.inspire_cds_package import Inspire2CDS\n        >>> for record in Inspire2CDS.from_source(\"inspire.xml\"):\n        >>>     xml = record.convert()", "id": "f7924:c0:m2"}
{"signature": "def keep_only_fields(self):", "body": "for tag in self.record.keys():<EOL><INDENT>if tag not in self.fields_list:<EOL><INDENT>record_delete_fields(self.record, tag)<EOL><DEDENT><DEDENT>", "docstring": "Keep only fields listed in field_list.", "id": "f7924:c0:m12"}
{"signature": "def get_recid(self):", "body": "try:<EOL><INDENT>return self.record['<STR_LIT>'][<NUM_LIT:0>][<NUM_LIT:3>]<EOL><DEDENT>except KeyError:<EOL><INDENT>return<EOL><DEDENT>", "docstring": "Return the record ID from 001.", "id": "f7924:c0:m8"}
{"signature": "def add_control_number(self, tag, value):", "body": "record_add_field(self.record,<EOL>tag,<EOL>controlfield_value=value)<EOL>", "docstring": "Add a control-number 00x for given tag with value.", "id": "f7924:c0:m15"}
{"signature": "def match(self, query=None, **kwargs):", "body": "from invenio.search_engine import perform_request_search<EOL>if not query:<EOL><INDENT>recid = self.record[\"<STR_LIT>\"][<NUM_LIT:0>][<NUM_LIT:3>]<EOL>return perform_request_search(p=\"<STR_LIT>\" % (recid,),<EOL>of=\"<STR_LIT:id>\")<EOL><DEDENT>else:<EOL><INDENT>if \"<STR_LIT>\" not in kwargs:<EOL><INDENT>kwargs[\"<STR_LIT>\"] = self.record[\"<STR_LIT>\"][<NUM_LIT:0>][<NUM_LIT:3>]<EOL><DEDENT>return perform_request_search(p=query % kwargs,<EOL>of=\"<STR_LIT:id>\")<EOL><DEDENT>", "docstring": "Try to match the current record to the database.", "id": "f7924:c0:m10"}
{"signature": "def get_xml(self):", "body": "return record_xml_output(self.record)<EOL>", "docstring": "Return the current record as MARCXML.", "id": "f7924:c0:m7"}
{"signature": "def update_system_numbers(self):", "body": "scn_035_fields = record_get_field_instances(self.record, '<STR_LIT>')<EOL>forbidden_values = [\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\"]<EOL>for field in scn_035_fields:<EOL><INDENT>subs = field_get_subfields(field)<EOL>if '<STR_LIT>' in subs:<EOL><INDENT>if '<STR_LIT:a>' not in subs:<EOL><INDENT>continue<EOL><DEDENT>for sub in subs['<STR_LIT>']:<EOL><INDENT>if sub.lower() in forbidden_values:<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>suffixes = [s.lower() for s in subs['<STR_LIT>']]<EOL>if '<STR_LIT>' in suffixes:<EOL><INDENT>new_subs = [('<STR_LIT:a>', '<STR_LIT>' % subs['<STR_LIT:a>'][<NUM_LIT:0>])]<EOL>record_add_field(<EOL>self.record, '<STR_LIT>', subfields=new_subs)<EOL>continue<EOL><DEDENT><DEDENT><DEDENT>if '<STR_LIT:a>' in subs:<EOL><INDENT>for sub in subs['<STR_LIT:a>']:<EOL><INDENT>if sub.lower() in forbidden_values:<EOL><INDENT>record_delete_field(self.record, tag=\"<STR_LIT>\",<EOL>field_position_global=field[<NUM_LIT:4>])<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "035 Externals.", "id": "f7925:c0:m5"}
{"signature": "def update_links_and_ffts(self):", "body": "figure_counter = <NUM_LIT:0><EOL>for field in record_get_field_instances(self.record,<EOL>tag='<STR_LIT>',<EOL>ind1='<STR_LIT:4>'):<EOL><INDENT>subs = field_get_subfields(field)<EOL>newsubs = []<EOL>remove = False<EOL>if '<STR_LIT:z>' in subs:<EOL><INDENT>is_figure = [s for s in subs['<STR_LIT:z>'] if \"<STR_LIT>\" in s.lower()]<EOL>if is_figure and '<STR_LIT:u>' in subs:<EOL><INDENT>is_subformat = [<EOL>s for s in subs['<STR_LIT:u>'] if \"<STR_LIT>\" in s.lower()]<EOL>if not is_subformat:<EOL><INDENT>url = subs['<STR_LIT:u>'][<NUM_LIT:0>]<EOL>if url.endswith(\"<STR_LIT>\"):<EOL><INDENT>fd, local_url = mkstemp(suffix=os.path.basename(url))<EOL>os.close(fd)<EOL>self.logger.info(<EOL>\"<STR_LIT>\" % (url, local_url))<EOL>plotfile = \"<STR_LIT>\"<EOL>try:<EOL><INDENT>plotfile = download_file(url=url,<EOL>download_to_file=local_url)<EOL><DEDENT>except Exception as e:<EOL><INDENT>self.logger.exception(e)<EOL>remove = True<EOL><DEDENT>if plotfile:<EOL><INDENT>converted = convert_images([plotfile])<EOL>if converted:<EOL><INDENT>url = converted.pop()<EOL>msg = \"<STR_LIT>\"% (local_url, url)<EOL>self.logger.info(msg)<EOL><DEDENT>else:<EOL><INDENT>msg = \"<STR_LIT>\"% (local_url,)<EOL>self.logger.error(msg)<EOL>url = None<EOL>remove = True<EOL><DEDENT><DEDENT><DEDENT>if url:<EOL><INDENT>newsubs.append(('<STR_LIT:a>', url))<EOL>newsubs.append(('<STR_LIT:t>', '<STR_LIT>'))<EOL>figure_counter += <NUM_LIT:1><EOL>if '<STR_LIT:y>' in subs:<EOL><INDENT>newsubs.append(<EOL>('<STR_LIT:d>', \"<STR_LIT>\" % (figure_counter, subs['<STR_LIT:y>'][<NUM_LIT:0>])))<EOL>newsubs.append(('<STR_LIT:n>', subs['<STR_LIT:y>'][<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>name = os.path.basename(<EOL>os.path.splitext(subs['<STR_LIT:u>'][<NUM_LIT:0>])[<NUM_LIT:0>])<EOL>newsubs.append(<EOL>('<STR_LIT:d>', \"<STR_LIT>\" % (figure_counter, name)))<EOL>newsubs.append(('<STR_LIT:n>', name))<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if not newsubs and '<STR_LIT:u>' in subs:<EOL><INDENT>is_fulltext = [s for s in subs['<STR_LIT:u>'] if \"<STR_LIT>\" in s]<EOL>if is_fulltext:<EOL><INDENT>newsubs = [('<STR_LIT:t>', '<STR_LIT>'), ('<STR_LIT:a>', subs['<STR_LIT:u>'][<NUM_LIT:0>])]<EOL><DEDENT><DEDENT>if not newsubs and '<STR_LIT:u>' in subs:<EOL><INDENT>remove = True<EOL>is_zipfile = [s for s in subs['<STR_LIT:u>'] if \"<STR_LIT>\" in s]<EOL>if is_zipfile:<EOL><INDENT>url = is_zipfile[<NUM_LIT:0>]<EOL>local_url = os.path.join(self.get_local_folder(), os.path.basename(url))<EOL>self.logger.info(\"<STR_LIT>\" %<EOL>(url, local_url))<EOL>zipped_archive = \"<STR_LIT>\"<EOL>try:<EOL><INDENT>zipped_archive = download_file(url=is_zipfile[<NUM_LIT:0>],<EOL>download_to_file=local_url)<EOL><DEDENT>except Exception as e:<EOL><INDENT>self.logger.exception(e)<EOL>remove = True<EOL><DEDENT>if zipped_archive:<EOL><INDENT>unzipped_archive = unzip(zipped_archive)<EOL>list_of_pngs = locate(\"<STR_LIT>\", unzipped_archive)<EOL>for png in list_of_pngs:<EOL><INDENT>if \"<STR_LIT>\" in png or \"<STR_LIT>\" in png:<EOL><INDENT>continue<EOL><DEDENT>figure_counter += <NUM_LIT:1><EOL>plotsubs = []<EOL>plotsubs.append(('<STR_LIT:a>', png))<EOL>caption = '<STR_LIT>' % (<EOL>figure_counter, os.path.basename(png))<EOL>plotsubs.append(('<STR_LIT:d>', caption))<EOL>plotsubs.append(('<STR_LIT:t>', '<STR_LIT>'))<EOL>record_add_field(<EOL>self.record, '<STR_LIT>', subfields=plotsubs)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if not remove and not newsubs and '<STR_LIT:u>' in subs:<EOL><INDENT>urls = ('<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>')<EOL>for val in subs['<STR_LIT:u>']:<EOL><INDENT>if any(url in val for url in urls):<EOL><INDENT>remove = True<EOL>break<EOL><DEDENT>if val.endswith('<STR_LIT>'):<EOL><INDENT>remove = True<EOL><DEDENT><DEDENT><DEDENT>if newsubs:<EOL><INDENT>record_add_field(self.record, '<STR_LIT>', subfields=newsubs)<EOL>remove = True<EOL><DEDENT>if remove:<EOL><INDENT>record_delete_field(self.record, '<STR_LIT>', ind1='<STR_LIT:4>',<EOL>field_position_global=field[<NUM_LIT:4>])<EOL><DEDENT><DEDENT>", "docstring": "FFT (856) Dealing with graphs.", "id": "f7925:c0:m16"}
{"signature": "def get_record(self):", "body": "self.update_system_numbers()<EOL>self.add_systemnumber(\"<STR_LIT>\")<EOL>self.fields_list = [<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:100>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\"<EOL>]<EOL>self.keep_only_fields()<EOL>self.determine_collections()<EOL>self.add_cms_link()<EOL>self.update_languages()<EOL>self.update_reportnumbers()<EOL>self.update_date()<EOL>self.update_pagenumber()<EOL>self.update_authors()<EOL>self.update_subject_categories(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>self.update_keywords()<EOL>self.update_experiments()<EOL>self.update_collaboration()<EOL>self.update_journals()<EOL>self.update_links_and_ffts()<EOL>if '<STR_LIT>' in self.collections:<EOL><INDENT>self.update_thesis_supervisors()<EOL>self.update_thesis_information()<EOL><DEDENT>if '<STR_LIT>' in self.collections:<EOL><INDENT>self.add_notes()<EOL><DEDENT>for collection in self.collections:<EOL><INDENT>record_add_field(self.record,<EOL>tag='<STR_LIT>',<EOL>subfields=[('<STR_LIT:a>', collection)])<EOL><DEDENT>self.remove_controlfields()<EOL>return self.record<EOL>", "docstring": "Override the base get_record.", "id": "f7925:c0:m1"}
{"signature": "def update_thesis_supervisors(self):", "body": "for field in record_get_field_instances(self.record, '<STR_LIT>'):<EOL><INDENT>record_add_field(self.record, '<STR_LIT>', subfields=field[<NUM_LIT:0>])<EOL><DEDENT>record_delete_fields(self.record, '<STR_LIT>')<EOL>", "docstring": "700 -> 701 Thesis supervisors.", "id": "f7925:c0:m10"}
{"signature": "def update_pagenumber(self):", "body": "for field in record_get_field_instances(self.record, '<STR_LIT>'):<EOL><INDENT>for idx, (key, value) in enumerate(field[<NUM_LIT:0>]):<EOL><INDENT>if key == '<STR_LIT:a>':<EOL><INDENT>if \"<STR_LIT>\" not in value and value != \"<STR_LIT>\":<EOL><INDENT>field[<NUM_LIT:0>][idx] = ('<STR_LIT:a>', re.sub(r'<STR_LIT>', '<STR_LIT>', value))<EOL><DEDENT>else:<EOL><INDENT>record_delete_field(self.record, '<STR_LIT>',<EOL>field_position_global=field[<NUM_LIT:4>])<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "300 page number.", "id": "f7925:c0:m8"}
{"signature": "def add_notes(self):", "body": "subs = [('<STR_LIT:a>', \"<STR_LIT>\")]<EOL>record_add_field(self.record, \"<STR_LIT>\", subfields=subs)<EOL>", "docstring": "500 - Preliminary results.", "id": "f7925:c0:m17"}
{"signature": "def update_thesis_information(self):", "body": "fields_501 = record_get_field_instances(self.record, '<STR_LIT>')<EOL>for idx, field in enumerate(fields_501):<EOL><INDENT>new_subs = []<EOL>for key, value in field[<NUM_LIT:0>]:<EOL><INDENT>if key == '<STR_LIT:a>':<EOL><INDENT>new_subs.append(('<STR_LIT:b>', value))<EOL><DEDENT>elif key == '<STR_LIT:b>':<EOL><INDENT>new_subs.append(('<STR_LIT:c>', value))<EOL><DEDENT>elif key == '<STR_LIT:c>':<EOL><INDENT>new_subs.append(('<STR_LIT:d>', value))<EOL><DEDENT>else:<EOL><INDENT>new_subs.append((key, value))<EOL><DEDENT><DEDENT>fields_501[idx] = field_swap_subfields(field, new_subs)<EOL><DEDENT>", "docstring": "501 degree info - move subfields.", "id": "f7925:c0:m11"}
{"signature": "def update_date(self):", "body": "for field in record_get_field_instances(self.record, '<STR_LIT>'):<EOL><INDENT>for idx, (key, value) in enumerate(field[<NUM_LIT:0>]):<EOL><INDENT>if key == \"<STR_LIT:c>\":<EOL><INDENT>field[<NUM_LIT:0>][idx] = (\"<STR_LIT:c>\", convert_date_to_iso(value))<EOL>record_delete_fields(self.record, \"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>if '<STR_LIT>' not in self.collections:<EOL><INDENT>for field in record_get_field_instances(self.record, '<STR_LIT>'):<EOL><INDENT>record_add_field(self.record, '<STR_LIT>', subfields=field[<NUM_LIT:0>])<EOL><DEDENT>record_delete_fields(self.record, '<STR_LIT>')<EOL><DEDENT>", "docstring": "269 Date normalization.", "id": "f7925:c0:m7"}
{"signature": "def __init__(self, bibrec, strip_fields_list=None):", "body": "super(CDS2Inspire, self).__init__(bibrec, strip_fields_list)<EOL>self.collections = set([])<EOL>", "docstring": "Create.", "id": "f7925:c0:m0"}
{"signature": "def determine_collections(self):", "body": "for value in record_get_field_values(self.record, '<STR_LIT>', code='<STR_LIT:a>'):<EOL><INDENT>if '<STR_LIT>' in value.upper():<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in value.upper():<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in value.upper():<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>if \"<STR_LIT>\" in value.upper():<EOL><INDENT>self.hidden = True<EOL><DEDENT><DEDENT>if self.is_published():<EOL><INDENT>self.collections.add(\"<STR_LIT>\")<EOL>self.collections.add(\"<STR_LIT>\")<EOL><DEDENT>if '<STR_LIT>' not in self.collections:<EOL><INDENT>from itertools import product<EOL>kb = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']<EOL>values = record_get_field_values(self.record, \"<STR_LIT>\", code='<STR_LIT:a>')<EOL>for val, rep in product(values, kb):<EOL><INDENT>if val.startswith(rep):<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT><DEDENT>if record_get_field_values(self.record, '<STR_LIT>',<EOL>filter_subfield_code=\"<STR_LIT:a>\",<EOL>filter_subfield_value=\"<STR_LIT>\"):<EOL><INDENT>self.collections.add(\"<STR_LIT>\")<EOL><DEDENT>self.collections.add('<STR_LIT>')<EOL>self.collections.add('<STR_LIT>')<EOL>if '<STR_LIT>' not in self.collections:<EOL><INDENT>for value in record_get_field_values(self.record,<EOL>tag='<STR_LIT>',<EOL>code='<STR_LIT:n>'):<EOL><INDENT>if value[-<NUM_LIT:2>:].isdigit():<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT><DEDENT>record_delete_fields(self.record, \"<STR_LIT>\")<EOL>", "docstring": "Try to determine which collections this record should belong to.", "id": "f7925:c0:m2"}
{"signature": "def update_reportnumbers(self):", "body": "rep_088_fields = record_get_field_instances(self.record, '<STR_LIT>')<EOL>for field in rep_088_fields:<EOL><INDENT>subs = field_get_subfields(field)<EOL>if '<STR_LIT>' in subs:<EOL><INDENT>for val in subs['<STR_LIT>']:<EOL><INDENT>if val.startswith('<STR_LIT>') or val.startswith('<STR_LIT>'):<EOL><INDENT>sf = [('<STR_LIT>', '<STR_LIT>'), ('<STR_LIT:b>', val)]<EOL>record_add_field(self.record, '<STR_LIT>', subfields=sf)<EOL><DEDENT><DEDENT><DEDENT>for key, val in field[<NUM_LIT:0>]:<EOL><INDENT>if key in ['<STR_LIT:a>', '<STR_LIT>'] and not val.startswith('<STR_LIT>'):<EOL><INDENT>record_add_field(<EOL>self.record, '<STR_LIT>', subfields=[('<STR_LIT:a>', val)])<EOL><DEDENT><DEDENT><DEDENT>record_delete_fields(self.record, \"<STR_LIT>\")<EOL>rep_037_fields = record_get_field_instances(self.record, '<STR_LIT>')<EOL>for field in rep_037_fields:<EOL><INDENT>subs = field_get_subfields(field)<EOL>if '<STR_LIT:a>' in subs:<EOL><INDENT>for value in subs['<STR_LIT:a>']:<EOL><INDENT>if '<STR_LIT>' in value:<EOL><INDENT>new_subs = [('<STR_LIT:a>', value), ('<STR_LIT>', '<STR_LIT>')]<EOL>for fld in record_get_field_instances(self.record,  '<STR_LIT>'):<EOL><INDENT>for key, val in field_get_subfield_instances(fld):<EOL><INDENT>if key == '<STR_LIT:a>':<EOL><INDENT>new_subs.append(('<STR_LIT:c>', val))<EOL>break<EOL><DEDENT><DEDENT><DEDENT>nf = create_field(subfields=new_subs)<EOL>record_replace_field(self.record, '<STR_LIT>', nf, field[<NUM_LIT:4>])<EOL><DEDENT><DEDENT><DEDENT>for key, val in field[<NUM_LIT:0>]:<EOL><INDENT>if key in ['<STR_LIT:a>', '<STR_LIT>'] and val.startswith('<STR_LIT>'):<EOL><INDENT>record_delete_field(<EOL>self.record, '<STR_LIT>', field_position_global=field[<NUM_LIT:4>])<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Handle reportnumbers.", "id": "f7925:c0:m6"}
{"signature": "def update_collaboration(self):", "body": "for field in record_get_field_instances(self.record, '<STR_LIT>'):<EOL><INDENT>subs = field_get_subfield_instances(field)<EOL>for idx, (key, value) in enumerate(subs[:]):<EOL><INDENT>if key == '<STR_LIT:5>':<EOL><INDENT>subs.pop(idx)<EOL><DEDENT>elif value.startswith('<STR_LIT>'):<EOL><INDENT>subs.pop(idx)<EOL><DEDENT><DEDENT>if len(subs) == <NUM_LIT:0>:<EOL><INDENT>record_delete_field(self.record,<EOL>tag='<STR_LIT>',<EOL>field_position_global=field[<NUM_LIT:4>])<EOL><DEDENT><DEDENT>", "docstring": "710 Collaboration.", "id": "f7925:c0:m14"}
{"signature": "def update_journals(self):", "body": "for field in record_get_field_instances(self.record, '<STR_LIT>'):<EOL><INDENT>subs = field_get_subfield_instances(field)<EOL>new_subs = []<EOL>for idx, (key, value) in enumerate(subs):<EOL><INDENT>if key == '<STR_LIT:p>':<EOL><INDENT>journal_name = self.get_config_item(value, \"<STR_LIT>\", allow_substring=False)<EOL>journal_name = journal_name.replace('<STR_LIT>', '<STR_LIT:.>').strip()<EOL>new_subs.append((key, journal_name))<EOL><DEDENT>else:<EOL><INDENT>new_subs.append((key, value))<EOL><DEDENT><DEDENT>record_delete_field(self.record, tag=\"<STR_LIT>\",<EOL>field_position_global=field[<NUM_LIT:4>])<EOL>record_add_field(self.record, \"<STR_LIT>\", subfields=new_subs)<EOL><DEDENT>", "docstring": "773 journal translations.", "id": "f7925:c0:m15"}
{"signature": "def is_published(self):", "body": "field980 = record_get_field_instances(self.record, '<STR_LIT>')<EOL>field773 = record_get_field_instances(self.record, '<STR_LIT>')<EOL>for f980 in field980:<EOL><INDENT>if '<STR_LIT:a>' in field_get_subfields(f980):<EOL><INDENT>for f773 in field773:<EOL><INDENT>if '<STR_LIT:p>' in field_get_subfields(f773):<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Check fields 980 and 773 to see if the record has already been published.\n\n        :return: True is published, else False", "id": "f7925:c0:m3"}
{"signature": "def update_dois(self):", "body": "dois = record_get_field_instances(self.record, '<STR_LIT>', ind1=\"<STR_LIT>\")<EOL>all_dois = {}<EOL>for field in dois:<EOL><INDENT>subs = field_get_subfield_instances(field)<EOL>subs_dict = dict(subs)<EOL>if subs_dict.get('<STR_LIT:a>'):<EOL><INDENT>if subs_dict['<STR_LIT:a>'] in all_dois:<EOL><INDENT>record_delete_field(self.record, tag='<STR_LIT>', ind1='<STR_LIT>', field_position_global=field[<NUM_LIT:4>])<EOL>continue<EOL><DEDENT>all_dois[subs_dict['<STR_LIT:a>']] = field<EOL><DEDENT><DEDENT>", "docstring": "Remove duplicate BibMatch DOIs.", "id": "f7926:c0:m18"}
{"signature": "def update_date_year(self):", "body": "dates = record_get_field_instances(self.record, '<STR_LIT>')<EOL>for field in dates:<EOL><INDENT>for idx, (key, value) in enumerate(field[<NUM_LIT:0>]):<EOL><INDENT>if key == '<STR_LIT:c>':<EOL><INDENT>field[<NUM_LIT:0>][idx] = ('<STR_LIT:c>', value[:<NUM_LIT:4>])<EOL><DEDENT>elif key == '<STR_LIT:t>':<EOL><INDENT>del field[<NUM_LIT:0>][idx]<EOL><DEDENT><DEDENT><DEDENT>if not dates:<EOL><INDENT>published_years = record_get_field_values(self.record, \"<STR_LIT>\", code=\"<STR_LIT:y>\")<EOL>if published_years:<EOL><INDENT>record_add_field(<EOL>self.record, \"<STR_LIT>\", subfields=[(\"<STR_LIT:c>\", published_years[<NUM_LIT:0>][:<NUM_LIT:4>])])<EOL><DEDENT>else:<EOL><INDENT>other_years = record_get_field_values(self.record, \"<STR_LIT>\", code=\"<STR_LIT:c>\")<EOL>if other_years:<EOL><INDENT>record_add_field(<EOL>self.record, \"<STR_LIT>\", subfields=[(\"<STR_LIT:c>\", other_years[<NUM_LIT:0>][:<NUM_LIT:4>])])<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "260 Date normalization.", "id": "f7926:c0:m24"}
{"signature": "def update_pagenumber(self):", "body": "pages = record_get_field_instances(self.record, '<STR_LIT>')<EOL>for field in pages:<EOL><INDENT>for idx, (key, value) in enumerate(field[<NUM_LIT:0>]):<EOL><INDENT>if key == '<STR_LIT:a>':<EOL><INDENT>field[<NUM_LIT:0>][idx] = ('<STR_LIT:a>', \"<STR_LIT>\".format(value))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "300 page number.", "id": "f7926:c0:m22"}
{"signature": "def update_collections(self):", "body": "for value in record_get_field_values(self.record, '<STR_LIT>', code='<STR_LIT:a>'):<EOL><INDENT>if '<STR_LIT>' in value.upper():<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in value.upper():<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in value.upper():<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in value.upper():<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in value.upper():<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>elif '<STR_LIT>' in value.upper() and\"<STR_LIT>\" not in self.collections:<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL>if self.is_published() and \"<STR_LIT>\" not in self.collections:<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT><DEDENT>if \"<STR_LIT>\" in value.upper():<EOL><INDENT>self.hidden = True<EOL><DEDENT><DEDENT>record_delete_fields(self.record, \"<STR_LIT>\")<EOL>if not self.collections:<EOL><INDENT>self.collections.add('<STR_LIT>')<EOL><DEDENT>for collection in self.collections:<EOL><INDENT>record_add_field(self.record,<EOL>tag='<STR_LIT>',<EOL>subfields=[('<STR_LIT:a>', collection)])<EOL>if collection in self.collection_base:<EOL><INDENT>subs = [('<STR_LIT:a>', self.collection_base[collection])]<EOL>record_add_field(self.record,<EOL>tag='<STR_LIT>',<EOL>subfields=subs)<EOL><DEDENT><DEDENT>", "docstring": "Try to determine which collections this record should belong to.", "id": "f7926:c0:m10"}
{"signature": "def update_author_to_proceeding(self):", "body": "titles = record_get_field_instances(self.record,<EOL>tag=\"<STR_LIT>\")<EOL>for title in titles:<EOL><INDENT>subs = field_get_subfields(title)<EOL>new_subs = []<EOL>if \"<STR_LIT:a>\" in subs:<EOL><INDENT>new_subs.append((\"<STR_LIT:a>\", subs['<STR_LIT:a>'][<NUM_LIT:0>]))<EOL><DEDENT>if \"<STR_LIT:b>\" in subs:<EOL><INDENT>new_subs.append((\"<STR_LIT:c>\", subs['<STR_LIT:b>'][<NUM_LIT:0>]))<EOL><DEDENT>record_add_field(self.record,<EOL>tag=\"<STR_LIT>\",<EOL>subfields=new_subs)<EOL><DEDENT>record_delete_fields(self.record, tag=\"<STR_LIT>\")<EOL>record_delete_fields(self.record, tag=\"<STR_LIT>\")<EOL>", "docstring": "Move author info from 245 to 111 proceeding style.", "id": "f7926:c0:m13"}
{"signature": "def update_oai_info(self):", "body": "for field in record_get_field_instances(self.record, '<STR_LIT>', ind1=\"<STR_LIT:C>\", ind2=\"<STR_LIT:O>\"):<EOL><INDENT>new_subs = []<EOL>for tag, value in field[<NUM_LIT:0>]:<EOL><INDENT>if tag == \"<STR_LIT:o>\":<EOL><INDENT>new_subs.append((\"<STR_LIT:a>\", value))<EOL><DEDENT>else:<EOL><INDENT>new_subs.append((tag, value))<EOL><DEDENT>if value in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>self.tag_as_cern = True<EOL><DEDENT><DEDENT>record_add_field(self.record, '<STR_LIT>', ind1=\"<STR_LIT>\", subfields=new_subs)<EOL><DEDENT>record_delete_fields(self.record, '<STR_LIT>')<EOL>", "docstring": "Add the 909 OAI info to 035.", "id": "f7926:c0:m6"}
{"signature": "def update_authors(self):", "body": "author_names = record_get_field_instances(self.record, '<STR_LIT:100>')<EOL>author_names.extend(record_get_field_instances(self.record, '<STR_LIT>'))<EOL>for field in author_names:<EOL><INDENT>subs = field_get_subfields(field)<EOL>for idx, (key, value) in enumerate(field[<NUM_LIT:0>]):<EOL><INDENT>if key == '<STR_LIT:a>':<EOL><INDENT>field[<NUM_LIT:0>][idx] = ('<STR_LIT:a>', value.replace(\"<STR_LIT:.>\", \"<STR_LIT:U+0020>\").strip())<EOL><DEDENT>elif key == '<STR_LIT:v>':<EOL><INDENT>del field[<NUM_LIT:0>][idx]<EOL><DEDENT><DEDENT>if subs.get(\"<STR_LIT:u>\", None) == \"<STR_LIT>\":<EOL><INDENT>self.tag_as_cern = True<EOL><DEDENT><DEDENT>", "docstring": "100 & 700 punctuate author names.", "id": "f7926:c0:m16"}
{"signature": "def update_languages(self):", "body": "language_fields = record_get_field_instances(self.record, '<STR_LIT>')<EOL>language = \"<STR_LIT>\"<EOL>record_delete_fields(self.record, \"<STR_LIT>\")<EOL>for field in language_fields:<EOL><INDENT>subs = field_get_subfields(field)<EOL>if '<STR_LIT:a>' in subs:<EOL><INDENT>language = self.get_config_item(subs['<STR_LIT:a>'][<NUM_LIT:0>], \"<STR_LIT>\")<EOL>break<EOL><DEDENT><DEDENT>new_subs = [('<STR_LIT:a>', language)]<EOL>record_add_field(self.record, \"<STR_LIT>\", subfields=new_subs)<EOL>", "docstring": "041 Language.", "id": "f7926:c0:m27"}
{"signature": "def update_reportnumbers(self):", "body": "report_037_fields = record_get_field_instances(self.record, '<STR_LIT>')<EOL>for field in report_037_fields:<EOL><INDENT>subs = field_get_subfields(field)<EOL>for val in subs.get(\"<STR_LIT:a>\", []):<EOL><INDENT>if \"<STR_LIT>\" not in val:<EOL><INDENT>record_delete_field(self.record,<EOL>tag=\"<STR_LIT>\",<EOL>field_position_global=field[<NUM_LIT:4>])<EOL>new_subs = [(code, val[<NUM_LIT:0>]) for code, val in subs.items()]<EOL>record_add_field(self.record, \"<STR_LIT>\", subfields=new_subs)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Update reportnumbers.", "id": "f7926:c0:m15"}
{"signature": "def update_links_and_ffts(self):", "body": "for field in record_get_field_instances(self.record,<EOL>tag='<STR_LIT>',<EOL>ind1='<STR_LIT:4>'):<EOL><INDENT>subs = field_get_subfields(field)<EOL>newsubs = []<EOL>url = subs.get(\"<STR_LIT:u>\", [])<EOL>if not url:<EOL><INDENT>record_delete_field(self.record, '<STR_LIT>', ind1='<STR_LIT:4>',<EOL>field_position_global=field[<NUM_LIT:4>])<EOL>continue<EOL><DEDENT>url = url[<NUM_LIT:0>]<EOL>if \"<STR_LIT>\" in url and url.endswith(\"<STR_LIT>\"):<EOL><INDENT>newsubs.append(('<STR_LIT:a>', url))<EOL>description = subs.get(\"<STR_LIT:y>\", [])<EOL>if description:<EOL><INDENT>newsubs.append(('<STR_LIT:d>', description[<NUM_LIT:0>]))<EOL><DEDENT>if newsubs:<EOL><INDENT>record_add_field(self.record, '<STR_LIT>', subfields=newsubs)<EOL>record_delete_field(self.record, '<STR_LIT>', ind1='<STR_LIT:4>',<EOL>field_position_global=field[<NUM_LIT:4>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for idx, (key, value) in enumerate(field[<NUM_LIT:0>]):<EOL><INDENT>if key == '<STR_LIT:w>':<EOL><INDENT>del field[<NUM_LIT:0>][idx]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "FFT (856) Dealing with files.", "id": "f7926:c0:m26"}
{"signature": "def update_thesis_supervisors(self):", "body": "for field in record_get_field_instances(self.record, '<STR_LIT>'):<EOL><INDENT>subs = list(field[<NUM_LIT:0>])<EOL>subs.append((\"<STR_LIT:e>\", \"<STR_LIT>\"))<EOL>record_add_field(self.record, '<STR_LIT>', subfields=subs)<EOL><DEDENT>record_delete_fields(self.record, '<STR_LIT>')<EOL>", "docstring": "700 -> 701 Thesis supervisors.", "id": "f7926:c0:m20"}
{"signature": "def update_journals(self):", "body": "for field in record_get_field_instances(self.record, '<STR_LIT>'):<EOL><INDENT>subs = field_get_subfield_instances(field)<EOL>new_subs = []<EOL>volume_letter = \"<STR_LIT>\"<EOL>journal_name = \"<STR_LIT>\"<EOL>for idx, (key, value) in enumerate(subs):<EOL><INDENT>if key == '<STR_LIT:p>':<EOL><INDENT>journal_name = self.get_config_item(value, \"<STR_LIT>\", allow_substring=False)<EOL>journal_name = journal_name.replace('<STR_LIT>', '<STR_LIT:.>').replace('<STR_LIT:.>', '<STR_LIT>').replace('<STR_LIT>', '<STR_LIT>').strip()<EOL><DEDENT>elif key == '<STR_LIT:v>':<EOL><INDENT>volume_letter = value<EOL><DEDENT>else:<EOL><INDENT>new_subs.append((key, value))<EOL><DEDENT><DEDENT>if not journal_name == \"<STR_LIT>\":<EOL><INDENT>letter = return_letters_from_string(volume_letter)<EOL>if letter:<EOL><INDENT>journal_name = \"<STR_LIT>\".format(journal_name, letter)<EOL>volume_letter = volume_letter.strip(letter)<EOL><DEDENT><DEDENT>if journal_name:<EOL><INDENT>new_subs.append((\"<STR_LIT:p>\", journal_name))<EOL><DEDENT>if volume_letter:<EOL><INDENT>new_subs.append((\"<STR_LIT:v>\", volume_letter))<EOL><DEDENT>record_delete_field(self.record, tag=\"<STR_LIT>\",<EOL>field_position_global=field[<NUM_LIT:4>])<EOL>record_add_field(self.record, \"<STR_LIT>\", subfields=new_subs)<EOL><DEDENT>", "docstring": "773 journal translations.", "id": "f7926:c0:m19"}
{"signature": "def update_system_numbers(self):", "body": "scn_035_fields = record_get_field_instances(self.record, '<STR_LIT>')<EOL>new_fields = []<EOL>for field in scn_035_fields:<EOL><INDENT>subs = field_get_subfields(field)<EOL>if '<STR_LIT>' in subs:<EOL><INDENT>if subs['<STR_LIT>'][<NUM_LIT:0>].lower() == \"<STR_LIT>\" and subs.get('<STR_LIT:a>'):<EOL><INDENT>self.add_control_number(\"<STR_LIT>\", subs.get('<STR_LIT:a>')[<NUM_LIT:0>])<EOL><DEDENT>if subs['<STR_LIT>'][<NUM_LIT:0>].lower() in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>new_fields.append(field_get_subfield_instances(field))<EOL><DEDENT>record_delete_fields(self.record, tag=\"<STR_LIT>\")<EOL>for field in new_fields:<EOL><INDENT>record_add_field(self.record, tag=\"<STR_LIT>\", subfields=field)<EOL><DEDENT>", "docstring": "035 Externals.", "id": "f7926:c0:m9"}
{"signature": "def __init__(self, bibrec, strip_fields_list=None):", "body": "super(Inspire2CDS, self).__init__(bibrec, strip_fields_list)<EOL>self.collections = set([])<EOL>self.tag_as_cern = False<EOL>self.collection_base = {<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>}<EOL>self.recid = None<EOL>self.conference_recid = None<EOL>self.conference_codes = None<EOL>self.conference_pages = None<EOL>", "docstring": "Create.", "id": "f7926:c0:m0"}
{"signature": "def get_record(self):", "body": "self.recid = self.get_recid()<EOL>self.remove_controlfields()<EOL>self.update_system_numbers()<EOL>self.add_systemnumber(\"<STR_LIT>\", recid=self.recid)<EOL>self.add_control_number(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>self.update_collections()<EOL>self.update_languages()<EOL>self.update_reportnumbers()<EOL>self.update_authors()<EOL>self.update_journals()<EOL>self.update_subject_categories(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>self.update_pagenumber()<EOL>self.update_notes()<EOL>self.update_experiments()<EOL>self.update_isbn()<EOL>self.update_dois()<EOL>self.update_links_and_ffts()<EOL>self.update_date()<EOL>self.update_date_year()<EOL>self.update_hidden_notes()<EOL>self.update_oai_info()<EOL>self.update_cnum()<EOL>self.update_conference_info()<EOL>self.fields_list = [<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>]<EOL>self.strip_fields()<EOL>if \"<STR_LIT>\" in self.collections:<EOL><INDENT>self.update_conference_111()<EOL>self.update_conference_links()<EOL>record_add_field(self.record, \"<STR_LIT>\", ind1=\"<STR_LIT:C>\", subfields=[(\"<STR_LIT:a>\", \"<STR_LIT>\")])<EOL><DEDENT>if \"<STR_LIT>\" in self.collections:<EOL><INDENT>self.update_thesis_information()<EOL>self.update_thesis_supervisors()<EOL><DEDENT>if \"<STR_LIT>\" in self.collections:<EOL><INDENT>self.update_title_to_proceeding()<EOL>self.update_author_to_proceeding()<EOL>record_add_field(self.record, \"<STR_LIT>\", ind1=\"<STR_LIT:C>\", subfields=[(\"<STR_LIT:a>\", \"<STR_LIT>\")])<EOL><DEDENT>if self.tag_as_cern:<EOL><INDENT>record_add_field(self.record, \"<STR_LIT>\", ind1=\"<STR_LIT:C>\", subfields=[(\"<STR_LIT:a>\", \"<STR_LIT>\")])<EOL><DEDENT>return self.record<EOL>", "docstring": "Override the base.", "id": "f7926:c0:m5"}
{"signature": "def update_cnum(self):", "body": "if \"<STR_LIT>\" not in self.collections:<EOL><INDENT>cnums = record_get_field_values(self.record, '<STR_LIT>', code=\"<STR_LIT:w>\")<EOL>for cnum in cnums:<EOL><INDENT>cnum_subs = [<EOL>(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>(\"<STR_LIT:a>\", cnum)<EOL>]<EOL>record_add_field(self.record, \"<STR_LIT>\", subfields=cnum_subs)<EOL><DEDENT><DEDENT>", "docstring": "Check if we shall add cnum in 035.", "id": "f7926:c0:m7"}
{"signature": "def update_title_to_proceeding(self):", "body": "titles = record_get_field_instances(self.record,<EOL>tag=\"<STR_LIT>\")<EOL>for title in titles:<EOL><INDENT>subs = field_get_subfields(title)<EOL>new_subs = []<EOL>if \"<STR_LIT:a>\" in subs:<EOL><INDENT>new_subs.append((\"<STR_LIT:a>\", subs['<STR_LIT:a>'][<NUM_LIT:0>]))<EOL><DEDENT>if \"<STR_LIT:b>\" in subs:<EOL><INDENT>new_subs.append((\"<STR_LIT:c>\", subs['<STR_LIT:b>'][<NUM_LIT:0>]))<EOL><DEDENT>record_add_field(self.record,<EOL>tag=\"<STR_LIT>\",<EOL>subfields=new_subs)<EOL><DEDENT>record_delete_fields(self.record, tag=\"<STR_LIT>\")<EOL>record_delete_fields(self.record, tag=\"<STR_LIT>\")<EOL>", "docstring": "Move title info from 245 to 111 proceeding style.", "id": "f7926:c0:m12"}
{"signature": "def collapse_initials(name):", "body": "if len(name.split()) > <NUM_LIT:1>:<EOL><INDENT>name = re.sub(r'<STR_LIT>', r'<STR_LIT>', name)<EOL><DEDENT>return name<EOL>", "docstring": "Removes the space between initials.\n        eg T. A. --> T.A.", "id": "f7928:m1"}
{"signature": "def fix_name_capitalization(lastname, givennames):", "body": "lastnames = lastname.split()<EOL>if len(lastnames) == <NUM_LIT:1>:<EOL><INDENT>if '<STR_LIT:->' in lastname:<EOL><INDENT>names = lastname.split('<STR_LIT:->')<EOL>names = [a[<NUM_LIT:0>] + a[<NUM_LIT:1>:].lower() for a in names]<EOL>lastname = '<STR_LIT:->'.join(names)<EOL><DEDENT>else:<EOL><INDENT>lastname = lastname[<NUM_LIT:0>] + lastname[<NUM_LIT:1>:].lower()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>names = []<EOL>for name in lastnames:<EOL><INDENT>if re.search(r'<STR_LIT>', name):<EOL><INDENT>names.append(name)<EOL><DEDENT>else:<EOL><INDENT>names.append(name[<NUM_LIT:0>] + name[<NUM_LIT:1>:].lower())<EOL><DEDENT><DEDENT>lastname = '<STR_LIT:U+0020>'.join(names)<EOL>lastname = collapse_initials(lastname)<EOL><DEDENT>names = []<EOL>for name in givennames:<EOL><INDENT>if re.search(r'<STR_LIT>', name):<EOL><INDENT>names.append(name)<EOL><DEDENT>else:<EOL><INDENT>names.append(name[<NUM_LIT:0>] + name[<NUM_LIT:1>:].lower())<EOL><DEDENT><DEDENT>givennames = '<STR_LIT:U+0020>'.join(names)<EOL>return lastname, givennames<EOL>", "docstring": "Converts capital letters to lower keeps first letter capital.", "id": "f7928:m2"}
{"signature": "def _read_file(self):", "body": "if not os.path.exists(self.db_path):<EOL><INDENT>return {}<EOL><DEDENT>with open(self.db_path, '<STR_LIT:r>') as f:<EOL><INDENT>content = f.read()<EOL>return json.loads(content)<EOL><DEDENT>", "docstring": "read the db file content\n:rtype: dict", "id": "f7934:c0:m4"}
{"signature": "def get(self, key, default=None):", "body": "if self.in_memory:<EOL><INDENT>return self._memory_db.get(key, default)<EOL><DEDENT>else:<EOL><INDENT>db = self._read_file()<EOL>return db.get(key, default)<EOL><DEDENT>", "docstring": "Get key value, return default if key doesn't exist", "id": "f7934:c0:m2"}
{"signature": "@property<EOL><INDENT>def in_memory(self):<DEDENT>", "body": "return self.db_path is None<EOL>", "docstring": "indicator for if ForgiveDB is in memory\n:rtype: bool", "id": "f7934:c0:m1"}
{"signature": "def __init__(self, path=None):", "body": "self.db_path = path<EOL>", "docstring": "Initialize a ForgiveDB instance using path as a json file storage.\nIf path is None, ForgiveDB will be in memory.\n\n:param path: the json file path\n:type path: str | unicode", "id": "f7934:c0:m0"}
{"signature": "def set(self, key, value):", "body": "if self.in_memory:<EOL><INDENT>self._memory_db[key] = value<EOL><DEDENT>else:<EOL><INDENT>db = self._read_file()<EOL>db[key] = value<EOL>with open(self.db_path, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(json.dumps(db, ensure_ascii=False, indent=<NUM_LIT:2>))<EOL><DEDENT><DEDENT>", "docstring": "Set key value", "id": "f7934:c0:m3"}
{"signature": "def get_events_vote_cluster(self, delegate_address):", "body": "delegate_pubkey = self.account_details(address=delegate_address)['<STR_LIT>']<EOL>plusvote = '<STR_LIT>'.format(delegate_pubkey=delegate_pubkey)<EOL>resultset = self._cursor.execute_and_fetchall(\"\"\"<STR_LIT>\"\"\".format(<EOL>address=delegate_address,<EOL>transactions=self.scheme['<STR_LIT>'],<EOL>blocks=self.scheme['<STR_LIT>'],<EOL>mem_accounts=self.scheme['<STR_LIT>'],<EOL>mem_accounts2delegates=self.scheme['<STR_LIT>'],<EOL>votes=self.scheme['<STR_LIT>'],<EOL>plusvote=plusvote))<EOL>res = {}<EOL>for i in resultset:<EOL><INDENT>if i[<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>res.update({i[<NUM_LIT:0>]: {<EOL>'<STR_LIT>': i[<NUM_LIT:0>],<EOL>'<STR_LIT>': i[<NUM_LIT:1>],<EOL>'<STR_LIT>': i[<NUM_LIT:2>],<EOL>'<STR_LIT>': i[<NUM_LIT:3>],<EOL>'<STR_LIT>': i[<NUM_LIT:4>],<EOL>'<STR_LIT>': i[<NUM_LIT:5>],<EOL>'<STR_LIT>': i[<NUM_LIT:6>],<EOL>'<STR_LIT:type>': i[<NUM_LIT:7>],<EOL>'<STR_LIT>': i[<NUM_LIT:8>],<EOL>'<STR_LIT>': i[<NUM_LIT:9>],<EOL>'<STR_LIT>': i[<NUM_LIT:10>]<EOL>}})<EOL><DEDENT>elif i[<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>res.update({i[<NUM_LIT:0>]: {<EOL>'<STR_LIT>': i[<NUM_LIT:0>],<EOL>'<STR_LIT>': i[<NUM_LIT:1>],<EOL>'<STR_LIT>': i[<NUM_LIT:2>],<EOL>'<STR_LIT>': i[<NUM_LIT:3>],<EOL>'<STR_LIT>': i[<NUM_LIT:8>],<EOL>'<STR_LIT:address>': i[<NUM_LIT:5>],<EOL>'<STR_LIT:username>': i[<NUM_LIT:6>],<EOL>'<STR_LIT>': i[<NUM_LIT:4>],<EOL>'<STR_LIT>': i[<NUM_LIT:10>]<EOL>}})<EOL><DEDENT><DEDENT>return res<EOL>", "docstring": "Returns all transactions and forged blocks by voters clustered around a single delegate_address", "id": "f7945:c2:m14"}
{"signature": "def arktimestamp(arkt, forfilename=False):", "body": "t = arkt + time.mktime((<NUM_LIT>, <NUM_LIT:3>, <NUM_LIT>, <NUM_LIT:15>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>))<EOL>return '<STR_LIT>' % (arkt, timestamp(t))<EOL>", "docstring": "Returns a human-readable timestamp given an Ark timestamp 'arct'.\n    An Ark timestamp is the number of seconds since Genesis block,\n    2017:03:21 15:55:44.", "id": "f7947:m3"}
{"signature": "def arkt_to_unixt(ark_timestamp):", "body": "res = datetime.datetime(<NUM_LIT>, <NUM_LIT:3>, <NUM_LIT>, <NUM_LIT:15>, <NUM_LIT>, <NUM_LIT>) + datetime.timedelta(seconds=ark_timestamp)<EOL>return res.timestamp()<EOL>", "docstring": "convert ark timestamp to unix timestamp", "id": "f7947:m5"}
{"signature": "def datetime_to_arkt(datetime):", "body": "return datetime.timestamp() - datetime.datetime(<NUM_LIT>, <NUM_LIT:3>, <NUM_LIT>, <NUM_LIT:15>, <NUM_LIT>, <NUM_LIT>).timestamp<EOL>", "docstring": "convert a datetime object to ark timestamp", "id": "f7947:m6"}
{"signature": "def timestamp(t = None, forfilename=False):", "body": "datetimesep = '<STR_LIT:U+0020>'<EOL>timesep     = '<STR_LIT::>'<EOL>if forfilename:<EOL><INDENT>datetimesep = '<STR_LIT:->'<EOL>timesep     = '<STR_LIT:->'<EOL><DEDENT>return time.strftime('<STR_LIT>' + datetimesep +<EOL>'<STR_LIT>' + timesep + '<STR_LIT>' + timesep + '<STR_LIT>',<EOL>time.localtime(t))<EOL>", "docstring": "Returns a human-readable timestamp given a Unix timestamp 't' or\n    for the current time. The Unix timestamp is the number of seconds since\n    start of epoch (1970-01-01 00:00:00).\n    When forfilename is True, then spaces and semicolons are replace with\n    hyphens. The returned string is usable as a (part of a) filename.", "id": "f7947:m2"}
{"signature": "def broadcast_tx(self, address, amount, secret, secondsecret=None, vendorfield='<STR_LIT>'):", "body": "peer = random.choice(self.PEERS)<EOL>park = Park(<EOL>peer,<EOL><NUM_LIT>,<EOL>constants.ARK_NETHASH,<EOL>'<STR_LIT>'<EOL>)<EOL>return park.transactions().create(address, str(amount), vendorfield, secret, secondsecret)<EOL>", "docstring": "broadcasts a transaction to the peerslist using ark-js library", "id": "f7948:c0:m5"}
{"signature": "def remove_peer(self, peer):", "body": "if type(peer) == list:<EOL><INDENT>for x in peer:<EOL><INDENT>check_url(x)<EOL>for i in self.PEERS:<EOL><INDENT>if x in i:<EOL><INDENT>self.PEERS.remove(i)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif type(peer) == str:<EOL><INDENT>check_url(peer)<EOL>for i in self.PEERS:<EOL><INDENT>if peer == i:<EOL><INDENT>self.PEERS.remove(i)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "remove one or multiple peers from PEERS variable\n\n:param peer(list or string):", "id": "f7948:c0:m2"}
{"signature": "def add_peer(self, peer):", "body": "if type(peer) == list:<EOL><INDENT>for i in peer:<EOL><INDENT>check_url(i)<EOL><DEDENT>self.PEERS.extend(peer)<EOL><DEDENT>elif type(peer) == str:<EOL><INDENT>check_url(peer)<EOL>self.PEERS.append(peer)<EOL><DEDENT>", "docstring": "Add a peer or multiple peers to the PEERS variable, takes a single string or a list.\n\n:param peer(list or string)", "id": "f7948:c0:m1"}
{"signature": "def get_events(delegate_pubkey):", "body": "res = DbCursor().execute_and_fetchall(\"\"\"<STR_LIT>\"\"\".format(delegate_pubkey))<EOL>Event = namedtuple(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>')<EOL>named_events = []<EOL>for i in res:<EOL><INDENT>tx_id = Event(<EOL>id=i[<NUM_LIT:0>],<EOL>amount=i[<NUM_LIT:1>],<EOL>fee=i[<NUM_LIT:2>],<EOL>timestamp=i[<NUM_LIT:3>],<EOL>recipientId=i[<NUM_LIT:4>],<EOL>senderId=i[<NUM_LIT:5>],<EOL>type=i[<NUM_LIT:6>],<EOL>raw=i[<NUM_LIT:7>]<EOL>)<EOL>named_events.append(tx_id)<EOL><DEDENT>return named_events<EOL>", "docstring": "returns a list of named tuples of all transactions relevant to a specific delegates voters.\n    Flow: finds all voters and unvoters, SELECTs all transactions of those voters, names all transactions according to\n    the scheme: 'transaction', 'id amount timestamp recipientId senderId rawasset type fee blockId", "id": "f7950:m5"}
{"signature": "@staticmethod<EOL><INDENT>def payout(address):<DEDENT>", "body": "qry = DbCursor().execute_and_fetchall(\"\"\"<STR_LIT>\"\"\".format(address))<EOL>Transaction = namedtuple(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>')<EOL>named_transactions = []<EOL>for i in qry:<EOL><INDENT>tx_id = Transaction(<EOL>id=i[<NUM_LIT:0>],<EOL>amount=i[<NUM_LIT:1>],<EOL>timestamp=i[<NUM_LIT:2>],<EOL>recipientId=i[<NUM_LIT:3>],<EOL>senderId=i[<NUM_LIT:4>],<EOL>rawasset=i[<NUM_LIT:5>],<EOL>type=i[<NUM_LIT:6>],<EOL>fee=i[<NUM_LIT:7>],<EOL>)<EOL>named_transactions.append(tx_id)<EOL><DEDENT>return named_transactions<EOL>", "docstring": "returns all received transactions between the address and registered delegate accounts\n        ORDER by timestamp ASC.", "id": "f7950:c10:m0"}
{"signature": "@staticmethod<EOL><INDENT>def votes(address):<DEDENT>", "body": "qry = DbCursor().execute_and_fetchall(\"\"\"<STR_LIT>\"\"\".format(address))<EOL>Vote = namedtuple(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>')<EOL>res = []<EOL>for i in qry:<EOL><INDENT>if i[<NUM_LIT:0>][<NUM_LIT:0>] == '<STR_LIT:+>':<EOL><INDENT>direction = True<EOL><DEDENT>elif i[<NUM_LIT:0>][<NUM_LIT:0>] == '<STR_LIT:->':<EOL><INDENT>direction = False<EOL><DEDENT>else:<EOL><INDENT>logger.fatal('<STR_LIT>'.format(i))<EOL>raise ParseError('<STR_LIT>'.format(i))<EOL><DEDENT>vote = Vote(<EOL>direction=direction,<EOL>delegate=i[<NUM_LIT:0>][<NUM_LIT:1>:],<EOL>timestamp=i[<NUM_LIT:1>],<EOL>)<EOL>res.append(vote)<EOL><DEDENT>return res<EOL>", "docstring": "Returns a list of namedtuples all votes made by an address, {(+/-)pubkeydelegate:timestamp}, timestamp DESC", "id": "f7950:c10:m2"}
{"signature": "@staticmethod<EOL><INDENT>def balance_over_time(address):<DEDENT>", "body": "forged_blocks = None<EOL>txhistory = Address.transactions(address)<EOL>delegates = Delegate.delegates()<EOL>for i in delegates:<EOL><INDENT>if address == i.address:<EOL><INDENT>forged_blocks = Delegate.blocks(i.pubkey)<EOL><DEDENT><DEDENT>balance_over_time = []<EOL>balance = <NUM_LIT:0><EOL>block = <NUM_LIT:0><EOL>Balance = namedtuple(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>')<EOL>for tx in txhistory:<EOL><INDENT>if forged_blocks:<EOL><INDENT>while forged_blocks[block].timestamp <= tx.timestamp:<EOL><INDENT>balance += (forged_blocks[block].reward + forged_blocks[block].totalFee)<EOL>balance_over_time.append(Balance(timestamp=forged_blocks[block].timestamp, amount=balance))<EOL>block += <NUM_LIT:1><EOL><DEDENT><DEDENT>if tx.senderId == address:<EOL><INDENT>balance -= (tx.amount + tx.fee)<EOL>res = Balance(timestamp=tx.timestamp, amount=balance)<EOL>balance_over_time.append(res)<EOL><DEDENT>if tx.recipientId == address:<EOL><INDENT>balance += tx.amount<EOL>res = Balance(timestamp=tx.timestamp, amount=balance)<EOL>balance_over_time.append(res)<EOL><DEDENT><DEDENT>if forged_blocks and block <= len(forged_blocks) - <NUM_LIT:1>:<EOL><INDENT>if forged_blocks[block].timestamp > txhistory[-<NUM_LIT:1>].timestamp:<EOL><INDENT>for i in forged_blocks[block:]:<EOL><INDENT>balance += (i.reward + i.totalFee)<EOL>res = Balance(timestamp=i.timestamp, amount=balance)<EOL>balance_over_time.append(res)<EOL><DEDENT><DEDENT><DEDENT>return balance_over_time<EOL>", "docstring": "returns a list of named tuples,  x.timestamp, x.amount including block rewards", "id": "f7950:c10:m4"}
{"signature": "@staticmethod<EOL><INDENT>def delegates():<DEDENT>", "body": "qry = DbCursor().execute_and_fetchall(\"\"\"<STR_LIT>\"\"\")<EOL>Delegate = namedtuple(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>')<EOL>res = []<EOL>for i in qry:<EOL><INDENT>registration = Delegate(<EOL>username=i[<NUM_LIT:0>],<EOL>pubkey=binascii.hexlify(i[<NUM_LIT:4>]).decode(\"<STR_LIT:utf-8>\"),<EOL>timestamp=i[<NUM_LIT:2>],<EOL>address=i[<NUM_LIT:3>],<EOL>transactionId=i[<NUM_LIT:1>]<EOL>)<EOL>res.append(registration)<EOL><DEDENT>return res<EOL>", "docstring": "returns a list of named tuples of all delegates.\n        {username: {'pubkey':pubkey, 'timestamp':timestamp, 'address':address}}", "id": "f7950:c11:m0"}
{"signature": "@staticmethod<EOL><INDENT>def blocks(delegate_pubkey=None, max_timestamp=None):<DEDENT>", "body": "if not delegate_pubkey:<EOL><INDENT>delegate_pubkey = c.DELEGATE['<STR_LIT>']<EOL><DEDENT>if max_timestamp:<EOL><INDENT>max_timestamp_sql = \"\"\"<STR_LIT>\"\"\".format(max_timestamp)<EOL><DEDENT>else:<EOL><INDENT>max_timestamp_sql = '<STR_LIT>'<EOL><DEDENT>qry = DbCursor().execute_and_fetchall(\"\"\"<STR_LIT>\"\"\".format(<EOL>max_timestamp_sql,<EOL>delegate_pubkey))<EOL>Block = namedtuple('<STR_LIT>',<EOL>'<STR_LIT>')<EOL>block_list = []<EOL>for block in qry:<EOL><INDENT>block_value = Block(timestamp=block[<NUM_LIT:0>],<EOL>height=block[<NUM_LIT:1>],<EOL>id=block[<NUM_LIT:2>],<EOL>totalFee=block[<NUM_LIT:3>],<EOL>reward=block[<NUM_LIT:4>])<EOL>block_list.append(block_value)<EOL><DEDENT>return block_list<EOL>", "docstring": "returns a list of named tuples of all blocks forged by a delegate.\n        if delegate_pubkey is not specified, set_delegate needs to be called in advance.\n        max_timestamp can be configured to retrieve blocks up to a certain timestamp.", "id": "f7950:c11:m5"}
{"signature": "def set_delegate(address=None, pubkey=None, secret=None):", "body": "c.DELEGATE['<STR_LIT>'] = address<EOL>c.DELEGATE['<STR_LIT>'] = pubkey<EOL>c.DELEGATE['<STR_LIT>'] = secret<EOL>", "docstring": "Set delegate parameters. Call set_delegate with no arguments to clear.", "id": "f7950:m1"}
{"signature": "@staticmethod<EOL><INDENT>def lastpayout(delegate_address, blacklist=None):<DEDENT>", "body": "if blacklist and len(blacklist) > <NUM_LIT:1>:<EOL><INDENT>command_blacklist = '<STR_LIT>' + str(tuple(blacklist))<EOL><DEDENT>elif blacklist and len(blacklist) == <NUM_LIT:1>:<EOL><INDENT>command_blacklist = '<STR_LIT>' + \"<STR_LIT:'>\" + blacklist[<NUM_LIT:0>] + \"<STR_LIT:'>\"<EOL><DEDENT>else:<EOL><INDENT>command_blacklist = \"<STR_LIT>\"<EOL><DEDENT>qry = DbCursor().execute_and_fetchall(\"\"\"<STR_LIT>\"\"\".format(delegate_address, command_blacklist))<EOL>result = []<EOL>Payout = namedtuple(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>')<EOL>for i in qry:<EOL><INDENT>payout = Payout(<EOL>address=i[<NUM_LIT:0>],<EOL>id=i[<NUM_LIT:1>],<EOL>timestamp=i[<NUM_LIT:2>]<EOL>)<EOL>result.append(payout)<EOL><DEDENT>return result<EOL>", "docstring": "Assumes that all send transactions from a delegate are payouts.\nUse blacklist to remove rewardwallet and other transactions if the\naddress is not a voter. blacklist can contain both addresses and transactionIds", "id": "f7950:c11:m1"}
{"signature": "def clear(self):", "body": "self._data.clear()<EOL>", "docstring": "Removes all items from dictionary", "id": "f7956:c0:m7"}
{"signature": "def get(self, key, default=_sentinel):", "body": "tup = self._data.get(key.lower())<EOL>if tup is not None:<EOL><INDENT>return tup[<NUM_LIT:1>]<EOL><DEDENT>elif default is not _sentinel:<EOL><INDENT>return default<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Gets the value from the key.\nIf the key doesn't exist, the default value is returned, otherwise None.\n\n:param key: The key\n:param default: The default value\n:return: The value", "id": "f7956:c0:m8"}
{"signature": "def create_map(self, type_from, type_to, mapping=None):", "body": "key_from = type_from.__name__<EOL>key_to = type_to.__name__<EOL>if mapping is None:<EOL><INDENT>mapping = {}<EOL><DEDENT>if key_from in self.mappings:<EOL><INDENT>inner_map = self.mappings[key_from]<EOL>if key_to in inner_map:<EOL><INDENT>raise ObjectMapperException(\"<STR_LIT>\".format(key_from, key_to))<EOL><DEDENT>else:<EOL><INDENT>inner_map[key_to] = mapping<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.mappings[key_from] = {}<EOL>self.mappings[key_from][key_to] = mapping<EOL><DEDENT>", "docstring": "Method for adding mapping definitions\n\n        :param type_from: source type\n        :param type_to: target type\n        :param mapping: dictionary of mapping definitions in a form {'target_property_name',\n                        lambda function from rhe source}\n\n        :return: None", "id": "f7957:c0:m1"}
{"signature": "def __init__(self):", "body": "<EOL>self.mappings = {}<EOL>pass<EOL>", "docstring": "Constructor\n\n        Args:\n          mappings: dictionary of the attribute conversions\n\n        Examples:\n\n            1. Mapping of the properties without mapping definition\n            In this case are mapped only these properties of the target class which\n            are in target and source classes. Other properties are not mapped.\n            Suppose we have class 'A' with attributes 'name' and 'last_name'\n            and class 'B' with attribute 'name'.\n            Initialization of the ObjectMapper will be:\n            mapper = ObjectMapper()\n            mapper.create_map(A, B)\n            instance_b = mapper.map(A(), B)\n\n            In this case, value of A.name will be copied into B.name.\n\n            2. Mapping with defined mapping functions\n            Suppose we have class 'A' with attributes 'first_name' and 'last_name'\n            , class 'B' with attribute 'full_name' and class 'C' with attribute reverse_name.\n            And want to map it in a way 'B.full_name' = 'A.first_name' + 'A.last_name' and\n            'C.reverse_name' = 'A.last_name' + 'A.first_name'\n            Initialization of the ObjectMapper will be:\n            mapper = ObjectMapper()\n            mapper.create_map(A, B, {'name': lambda a : a.first_name + \" \" + a.last_name})\n            mapper.create_map(A, C, {'name': lambda a : a.last_name + \" \" + a.first_name})\n\n            instance_b = mapper.map(A(), B)\n            instance_c = mapper.map(A(), C)\n\n            In this case, to the B.name will be mapped A.first_name + \" \" + A.last_name\n            In this case, to the C.name will be mapped A.last_name + \" \" + A.first_name\n\n            3. Mapping suppression\n            For some purposes, it can be needed to suppress some mapping.\n            Suppose we have class 'A' with attributes 'name' and 'last_name'\n            and class 'B' with attributes 'name' and 'last_name'.\n            And we want to map only the A.name into B.name, but not A.last_name to\n            b.last_name\n            Initialization of the ObjectMapper will be:\n            mapper = ObjectMapper()\n            mapper.create_map(A, B, {'last_name': None})\n\n            instance_b = mapper.map(A(), B)\n\n            In this case, value of A.name will be copied into B.name automatically by the attribute name 'name'.\n            Attribute A.last_name will be not mapped thanks the suppression (lambda function is None).\n\n            4. Case insensitive mapping\n            Suppose we have class 'A' with attributes 'Name' and 'Age' and\n            class 'B' with attributes 'name' and 'age' and we want to map 'A' to 'B' in a way\n            'A.Name' = 'B.name' and 'A.Age' = 'B.age'\n            Initialization of the ObjectMapper will be:\n            mapper = ObjectMapper()\n            mapper.create_map(A, B)\n            instance_b = mapper.map(A(), B, ignore_case=True)\n\n            In this case, the value of A.Name will be copied into B.name and\n            the value of A.Age will be copied into B.age.\n\n        :return: Instance of the ObjectMapper", "id": "f7957:c0:m0"}
{"signature": "def create(text,score,prompt_string, dump_data=False):", "body": "if dump_data:<EOL><INDENT>dump_input_data(text, score)<EOL><DEDENT>algorithm = select_algorithm(score)<EOL>results = {'<STR_LIT>': [],'<STR_LIT:success>' : False, '<STR_LIT>' : <NUM_LIT:0>, '<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>' : \"<STR_LIT>\", '<STR_LIT>' : \"<STR_LIT>\", '<STR_LIT>' : algorithm,<EOL>'<STR_LIT>' : score, '<STR_LIT:text>' : text, '<STR_LIT>' : prompt_string}<EOL>if len(text)!=len(score):<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>results['<STR_LIT>'].append(msg)<EOL>log.exception(msg)<EOL>return results<EOL><DEDENT>try:<EOL><INDENT>e_set = model_creator.create_essay_set(text, score, prompt_string)<EOL><DEDENT>except:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>results['<STR_LIT>'].append(msg)<EOL>log.exception(msg)<EOL><DEDENT>try:<EOL><INDENT>feature_ext, classifier, cv_error_results = model_creator.extract_features_and_generate_model(e_set, algorithm = algorithm)<EOL>results['<STR_LIT>']=cv_error_results['<STR_LIT>']<EOL>results['<STR_LIT>']=cv_error_results['<STR_LIT>']<EOL>results['<STR_LIT>']=feature_ext<EOL>results['<STR_LIT>']=classifier<EOL>results['<STR_LIT>'] = algorithm<EOL>results['<STR_LIT:success>']=True<EOL><DEDENT>except:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>results['<STR_LIT>'].append(msg)<EOL>log.exception(msg)<EOL><DEDENT>return results<EOL>", "docstring": "Creates a machine learning model from input text, associated scores, a prompt, and a path to the model\nTODO: Remove model path argument, it is needed for now to support legacy code\ntext - A list of strings containing the text of the essays\nscore - a list of integers containing score values\nprompt_string - the common prompt for the set of essays", "id": "f7960:m1"}
{"signature": "def create_generic(numeric_values, textual_values, target, algorithm = util_functions.AlgorithmTypes.regression):", "body": "algorithm = select_algorithm(target)<EOL>results = {'<STR_LIT>': [],'<STR_LIT:success>' : False, '<STR_LIT>' : <NUM_LIT:0>, '<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>' : \"<STR_LIT>\", '<STR_LIT>' : \"<STR_LIT>\", '<STR_LIT>' : algorithm}<EOL>if len(numeric_values)!=len(textual_values) or len(numeric_values)!=len(target):<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>results['<STR_LIT>'].append(msg)<EOL>log.exception(msg)<EOL>return results<EOL><DEDENT>try:<EOL><INDENT>pset = predictor_set.PredictorSet(essaytype=\"<STR_LIT:train>\")<EOL>for i in xrange(<NUM_LIT:0>, len(numeric_values)):<EOL><INDENT>pset.add_row(numeric_values[i], textual_values[i], target[i])<EOL><DEDENT><DEDENT>except:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>results['<STR_LIT>'].append(msg)<EOL>log.exception(msg)<EOL><DEDENT>try:<EOL><INDENT>feature_ext, classifier, cv_error_results = model_creator.extract_features_and_generate_model_predictors(pset, algorithm)<EOL>results['<STR_LIT>']=cv_error_results['<STR_LIT>']<EOL>results['<STR_LIT>']=cv_error_results['<STR_LIT>']<EOL>results['<STR_LIT>']=feature_ext<EOL>results['<STR_LIT>']=classifier<EOL>results['<STR_LIT:success>']=True<EOL><DEDENT>except:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>results['<STR_LIT>'].append(msg)<EOL>log.exception(msg)<EOL><DEDENT>return results<EOL>", "docstring": "Creates a model from a generic list numeric values and text values\nnumeric_values - A list of lists that are the predictors\ntextual_values - A list of lists that are the predictors\n(each item in textual_values corresponds to the similarly indexed counterpart in numeric_values)\ntarget - The variable that we are trying to predict.  A list of integers.\nalgorithm - the type of algorithm that will be used", "id": "f7960:m2"}
{"signature": "def get_algorithms(algorithm):", "body": "if algorithm == util_functions.AlgorithmTypes.classification:<EOL><INDENT>clf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=<NUM_LIT:100>, learn_rate=<NUM_LIT>,<EOL>max_depth=<NUM_LIT:4>, random_state=<NUM_LIT:1>,min_samples_leaf=<NUM_LIT:3>)<EOL>clf2=sklearn.ensemble.GradientBoostingClassifier(n_estimators=<NUM_LIT:100>, learn_rate=<NUM_LIT>,<EOL>max_depth=<NUM_LIT:4>, random_state=<NUM_LIT:1>,min_samples_leaf=<NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>clf = sklearn.ensemble.GradientBoostingRegressor(n_estimators=<NUM_LIT:100>, learn_rate=<NUM_LIT>,<EOL>max_depth=<NUM_LIT:4>, random_state=<NUM_LIT:1>,min_samples_leaf=<NUM_LIT:3>)<EOL>clf2=sklearn.ensemble.GradientBoostingRegressor(n_estimators=<NUM_LIT:100>, learn_rate=<NUM_LIT>,<EOL>max_depth=<NUM_LIT:4>, random_state=<NUM_LIT:1>,min_samples_leaf=<NUM_LIT:3>)<EOL><DEDENT>return clf, clf2<EOL>", "docstring": "Gets two classifiers for each type of algorithm, and returns them.  First for predicting, second for cv error.\ntype - one of util_functions.AlgorithmTypes", "id": "f7961:m5"}
{"signature": "def get_cv_error(clf,feats,scores):", "body": "results={'<STR_LIT:success>' : False, '<STR_LIT>' : <NUM_LIT:0>, '<STR_LIT>' : <NUM_LIT:0>}<EOL>try:<EOL><INDENT>cv_preds=util_functions.gen_cv_preds(clf,feats,scores)<EOL>err=numpy.mean(numpy.abs(numpy.array(cv_preds)-scores))<EOL>kappa=util_functions.quadratic_weighted_kappa(list(cv_preds),scores)<EOL>results['<STR_LIT>']=err<EOL>results['<STR_LIT>']=kappa<EOL>results['<STR_LIT:success>']=True<EOL><DEDENT>except ValueError as ex:<EOL><INDENT>msg = u\"<STR_LIT>\".format(ex=ex)<EOL>log.debug(msg)<EOL><DEDENT>except:<EOL><INDENT>log.exception(\"<STR_LIT>\")<EOL><DEDENT>return results<EOL>", "docstring": "Gets cross validated error for a given classifier, set of features, and scores\nclf - classifier\nfeats - features to feed into the classified and cross validate over\nscores - scores associated with the features -- feature row 1 associates with score 1, etc.", "id": "f7961:m4"}
{"signature": "def gen_feats(self, e_set):", "body": "bag_feats = self.gen_bag_feats(e_set)<EOL>length_feats = self.gen_length_feats(e_set)<EOL>prompt_feats = self.gen_prompt_feats(e_set)<EOL>overall_feats = numpy.concatenate((length_feats, prompt_feats, bag_feats), axis=<NUM_LIT:1>)<EOL>overall_feats = overall_feats.copy()<EOL>return overall_feats<EOL>", "docstring": "Generates bag of words, length, and prompt features from an essay set object\nreturns an array of features\ne_set - EssaySet object", "id": "f7965:c0:m6"}
{"signature": "def initialize_dictionaries(self, e_set, max_feats2 = <NUM_LIT:200>):", "body": "if(hasattr(e_set, '<STR_LIT>')):<EOL><INDENT>if(e_set._type == \"<STR_LIT:train>\"):<EOL><INDENT>nvocab = util_functions.get_vocab(e_set._text, e_set._score, max_feats2 = max_feats2)<EOL>svocab = util_functions.get_vocab(e_set._clean_stem_text, e_set._score, max_feats2 = max_feats2)<EOL>self._normal_dict = CountVectorizer(ngram_range=(<NUM_LIT:1>,<NUM_LIT:2>), vocabulary=nvocab)<EOL>self._stem_dict = CountVectorizer(ngram_range=(<NUM_LIT:1>,<NUM_LIT:2>), vocabulary=svocab)<EOL>self.dict_initialized = True<EOL>self._mean_spelling_errors=sum(e_set._spelling_errors)/float(len(e_set._spelling_errors))<EOL>self._spell_errors_per_character=sum(e_set._spelling_errors)/float(sum([len(t) for t in e_set._text]))<EOL>good_pos_tags,bad_pos_positions=self._get_grammar_errors(e_set._pos,e_set._text,e_set._tokens)<EOL>self._grammar_errors_per_character=(sum(good_pos_tags)/float(sum([len(t) for t in e_set._text])))<EOL>bag_feats=self.gen_bag_feats(e_set)<EOL>f_row_sum=numpy.sum(bag_feats[:,:])<EOL>self._mean_f_prop=f_row_sum/float(sum([len(t) for t in e_set._text]))<EOL>ret = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>raise util_functions.InputError(e_set, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise util_functions.InputError(e_set, \"<STR_LIT>\")<EOL><DEDENT>return ret<EOL>", "docstring": "Initializes dictionaries from an essay set object\nDictionaries must be initialized prior to using this to extract features\ne_set is an input essay set\nreturns a confirmation of initialization", "id": "f7965:c0:m1"}
{"signature": "def gen_bag_feats(self, e_set):", "body": "if(hasattr(self, '<STR_LIT>')):<EOL><INDENT>sfeats = self._stem_dict.transform(e_set._clean_stem_text)<EOL>nfeats = self._normal_dict.transform(e_set._text)<EOL>bag_feats = numpy.concatenate((sfeats.toarray(), nfeats.toarray()), axis=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>raise util_functions.InputError(self, \"<STR_LIT>\")<EOL><DEDENT>return bag_feats.copy()<EOL>", "docstring": "Generates bag of words features from an input essay set and trained FeatureExtractor\nGenerally called by gen_feats\nReturns an array of features\ne_set - EssaySet object", "id": "f7965:c0:m5"}
{"signature": "def _get_grammar_errors(self,pos,text,tokens):", "body": "word_counts = [max(len(t),<NUM_LIT:1>) for t in tokens]<EOL>good_pos_tags = []<EOL>min_pos_seq=<NUM_LIT:2><EOL>max_pos_seq=<NUM_LIT:4><EOL>bad_pos_positions=[]<EOL>for i in xrange(<NUM_LIT:0>, len(text)):<EOL><INDENT>pos_seq = [tag[<NUM_LIT:1>] for tag in pos[i]]<EOL>pos_ngrams = util_functions.ngrams(pos_seq, min_pos_seq, max_pos_seq)<EOL>long_pos_ngrams=[z for z in pos_ngrams if z.count('<STR_LIT:U+0020>')==(max_pos_seq-<NUM_LIT:1>)]<EOL>bad_pos_tuples=[[z,z+max_pos_seq] for z in xrange(<NUM_LIT:0>,len(long_pos_ngrams)) if long_pos_ngrams[z] not in self._good_pos_ngrams]<EOL>bad_pos_tuples.sort(key=operator.itemgetter(<NUM_LIT:1>))<EOL>to_delete=[]<EOL>for m in reversed(xrange(len(bad_pos_tuples)-<NUM_LIT:1>)):<EOL><INDENT>start, end = bad_pos_tuples[m]<EOL>for j in xrange(m+<NUM_LIT:1>, len(bad_pos_tuples)):<EOL><INDENT>lstart, lend = bad_pos_tuples[j]<EOL>if lstart >= start and lstart <= end:<EOL><INDENT>bad_pos_tuples[m][<NUM_LIT:1>]=bad_pos_tuples[j][<NUM_LIT:1>]<EOL>to_delete.append(j)<EOL><DEDENT><DEDENT><DEDENT>fixed_bad_pos_tuples=[bad_pos_tuples[z] for z in xrange(<NUM_LIT:0>,len(bad_pos_tuples)) if z not in to_delete]<EOL>bad_pos_positions.append(fixed_bad_pos_tuples)<EOL>overlap_ngrams = [z for z in pos_ngrams if z in self._good_pos_ngrams]<EOL>if (len(pos_ngrams)-len(overlap_ngrams))><NUM_LIT:0>:<EOL><INDENT>divisor=len(pos_ngrams)/len(pos_seq)<EOL><DEDENT>else:<EOL><INDENT>divisor=<NUM_LIT:1><EOL><DEDENT>if divisor == <NUM_LIT:0>:<EOL><INDENT>divisor=<NUM_LIT:1><EOL><DEDENT>good_grammar_ratio = (len(pos_ngrams)-len(overlap_ngrams))/divisor<EOL>good_pos_tags.append(good_grammar_ratio)<EOL><DEDENT>return good_pos_tags,bad_pos_positions<EOL>", "docstring": "Internal function to get the number of grammar errors in given text\npos - part of speech tagged text (list)\ntext - normal text (list)\ntokens - list of lists of tokenized text", "id": "f7965:c0:m3"}
{"signature": "def get_good_pos_ngrams(self):", "body": "if(os.path.isfile(NGRAM_PATH)):<EOL><INDENT>good_pos_ngrams = pickle.load(open(NGRAM_PATH, '<STR_LIT:rb>'))<EOL><DEDENT>elif os.path.isfile(ESSAY_CORPUS_PATH):<EOL><INDENT>essay_corpus = open(ESSAY_CORPUS_PATH).read()<EOL>essay_corpus = util_functions.sub_chars(essay_corpus)<EOL>good_pos_ngrams = util_functions.regenerate_good_tokens(essay_corpus)<EOL>pickle.dump(good_pos_ngrams, open(NGRAM_PATH, '<STR_LIT:wb>'))<EOL><DEDENT>else:<EOL><INDENT>good_pos_ngrams=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']<EOL><DEDENT>return set(good_pos_ngrams)<EOL>", "docstring": "Gets a set of gramatically correct part of speech sequences from an input file called essaycorpus.txt\nReturns the set and caches the file", "id": "f7965:c0:m2"}
{"signature": "def generate_additional_essays(self, e_text, e_score, dictionary=None, max_syns=<NUM_LIT:3>):", "body": "e_toks = nltk.word_tokenize(e_text)<EOL>all_syns = []<EOL>for word in e_toks:<EOL><INDENT>synonyms = util_functions.get_wordnet_syns(word)<EOL>if(len(synonyms) > max_syns):<EOL><INDENT>synonyms = random.sample(synonyms, max_syns)<EOL><DEDENT>all_syns.append(synonyms)<EOL><DEDENT>new_essays = []<EOL>for i in range(<NUM_LIT:0>, max_syns):<EOL><INDENT>syn_toks = e_toks<EOL>for z in range(<NUM_LIT:0>, len(e_toks)):<EOL><INDENT>if len(all_syns[z]) > i and (dictionary == None or e_toks[z] in dictionary):<EOL><INDENT>syn_toks[z] = all_syns[z][i]<EOL><DEDENT><DEDENT>new_essays.append(\"<STR_LIT:U+0020>\".join(syn_toks))<EOL><DEDENT>for z in xrange(<NUM_LIT:0>, len(new_essays)):<EOL><INDENT>self.add_essay(new_essays[z], e_score, <NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Substitute synonyms to generate extra essays from existing ones.\nThis is done to increase the amount of training data.\nShould only be used with lowest scoring essays.\ne_text is the text of the original essay.\ne_score is the score of the original essay.\ndictionary is a fixed dictionary (list) of words to replace.\nmax_syns defines the maximum number of additional essays to generate.  Do not set too high.", "id": "f7966:c0:m3"}
{"signature": "def update_prompt(self, prompt_text):", "body": "if(isinstance(prompt_text, basestring)):<EOL><INDENT>self._prompt = util_functions.sub_chars(prompt_text)<EOL>ret = self._prompt<EOL><DEDENT>else:<EOL><INDENT>raise util_functions.InputError(prompt_text, \"<STR_LIT>\")<EOL><DEDENT>return ret<EOL>", "docstring": "Update the default prompt string, which is \"\".\nprompt_text should be a string.\nReturns the prompt as a confirmation.", "id": "f7966:c0:m2"}
{"signature": "def __init__(self, essaytype=\"<STR_LIT:train>\"):", "body": "if(essaytype != \"<STR_LIT:train>\" and essaytype != \"<STR_LIT:test>\"):<EOL><INDENT>essaytype = \"<STR_LIT:train>\"<EOL><DEDENT>self._type = essaytype<EOL>self._score = []<EOL>self._text = []<EOL>self._id = []<EOL>self._clean_text = []<EOL>self._tokens = []<EOL>self._pos = []<EOL>self._clean_stem_text = []<EOL>self._generated = []<EOL>self._prompt = \"<STR_LIT>\"<EOL>self._spelling_errors = []<EOL>self._markup_text = []<EOL>", "docstring": "Initialize variables and check essay set type", "id": "f7966:c0:m0"}
{"signature": "def add_essay(self, essay_text, essay_score, essay_generated=<NUM_LIT:0>):", "body": "<EOL>if(len(self._id) > <NUM_LIT:0>):<EOL><INDENT>max_id = max(self._id)<EOL><DEDENT>else:<EOL><INDENT>max_id = <NUM_LIT:0><EOL><DEDENT>try:<EOL><INDENT>essay_text = essay_text.encode('<STR_LIT:ascii>', '<STR_LIT:ignore>')<EOL>if len(essay_text) < <NUM_LIT:5>:<EOL><INDENT>essay_text = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>except:<EOL><INDENT>log.exception(\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>essay_score = int(essay_score)<EOL>essay_text = str(essay_text)<EOL><DEDENT>except:<EOL><INDENT>log.exception(\"<STR_LIT>\".format(type(essay_score), type(essay_text)))<EOL><DEDENT>if isinstance(essay_score, int) and isinstance(essay_text, basestring)and (essay_generated == <NUM_LIT:0> or essay_generated == <NUM_LIT:1>):<EOL><INDENT>self._id.append(max_id + <NUM_LIT:1>)<EOL>self._score.append(essay_score)<EOL>try:<EOL><INDENT>essay_text = str(essay_text.encode('<STR_LIT:ascii>', '<STR_LIT:ignore>'))<EOL><DEDENT>except:<EOL><INDENT>essay_text = (essay_text.decode('<STR_LIT:utf-8>', '<STR_LIT:replace>')).encode('<STR_LIT:ascii>', '<STR_LIT:ignore>')<EOL><DEDENT>cleaned_essay = util_functions.sub_chars(essay_text).lower()<EOL>if(len(cleaned_essay) > MAXIMUM_ESSAY_LENGTH):<EOL><INDENT>cleaned_essay = cleaned_essay[<NUM_LIT:0>:MAXIMUM_ESSAY_LENGTH]<EOL><DEDENT>self._text.append(cleaned_essay)<EOL>cleaned_text, spell_errors, markup_text = util_functions.spell_correct(self._text[len(self._text) - <NUM_LIT:1>])<EOL>self._clean_text.append(cleaned_text)<EOL>self._spelling_errors.append(spell_errors)<EOL>self._markup_text.append(markup_text)<EOL>self._tokens.append(nltk.word_tokenize(self._clean_text[len(self._clean_text) - <NUM_LIT:1>]))<EOL>self._pos.append(nltk.pos_tag(self._clean_text[len(self._clean_text) - <NUM_LIT:1>].split(\"<STR_LIT:U+0020>\")))<EOL>self._generated.append(essay_generated)<EOL>porter = nltk.PorterStemmer()<EOL>por_toks = \"<STR_LIT:U+0020>\".join([porter.stem(w) for w in self._tokens[len(self._tokens) - <NUM_LIT:1>]])<EOL>self._clean_stem_text.append(por_toks)<EOL>ret = \"<STR_LIT>\" + self._text[len(self._text) - <NUM_LIT:1>] + \"<STR_LIT>\" + str(essay_score)<EOL><DEDENT>else:<EOL><INDENT>raise util_functions.InputError(essay_text, \"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Add new (essay_text,essay_score) pair to the essay set.\nessay_text must be a string.\nessay_score must be an int.\nessay_generated should not be changed by the user.\nReturns a confirmation that essay was added.", "id": "f7966:c0:m1"}
{"signature": "def grade(grader_data,submission):", "body": "<EOL>results = {'<STR_LIT>': [],'<STR_LIT>': [],'<STR_LIT>': <NUM_LIT:0>, '<STR_LIT>' : \"<STR_LIT>\", '<STR_LIT:success>' : False, '<STR_LIT>' : <NUM_LIT:0>}<EOL>has_error=False<EOL>grader_set=EssaySet(essaytype=\"<STR_LIT:test>\")<EOL>feedback = {}<EOL>model, extractor = get_classifier_and_ext(grader_data)<EOL>if '<STR_LIT>' not in grader_data:<EOL><INDENT>grader_data['<STR_LIT>'] = util_functions.AlgorithmTypes.classification<EOL><DEDENT>try:<EOL><INDENT>grader_set.add_essay(str(submission),<NUM_LIT:0>)<EOL>grader_set.update_prompt(str(grader_data['<STR_LIT>']))<EOL><DEDENT>except Exception:<EOL><INDENT>error_message = \"<STR_LIT>\".format(submission)<EOL>log.exception(error_message)<EOL>results['<STR_LIT>'].append(error_message)<EOL>has_error=True<EOL><DEDENT>try:<EOL><INDENT>grader_feats=extractor.gen_feats(grader_set)<EOL>feedback=extractor.gen_feedback(grader_set,grader_feats)[<NUM_LIT:0>]<EOL>results['<STR_LIT>']=int(model.predict(grader_feats)[<NUM_LIT:0>])<EOL><DEDENT>except Exception:<EOL><INDENT>error_message = \"<STR_LIT>\"<EOL>log.exception(error_message)<EOL>results['<STR_LIT>'].append(error_message)<EOL>has_error=True<EOL><DEDENT>try:<EOL><INDENT>results['<STR_LIT>'] = get_confidence_value(grader_data['<STR_LIT>'], model, grader_feats, results['<STR_LIT>'], grader_data['<STR_LIT>'])<EOL><DEDENT>except Exception:<EOL><INDENT>log.exception(\"<STR_LIT>\")<EOL><DEDENT>if not has_error:<EOL><INDENT>if( '<STR_LIT>' in feedback and feedback['<STR_LIT>']):<EOL><INDENT>results['<STR_LIT>']=<NUM_LIT:0><EOL>results['<STR_LIT>']=False<EOL><DEDENT>results['<STR_LIT:success>']=True<EOL>results['<STR_LIT>'] = {}<EOL>if '<STR_LIT>' in feedback and '<STR_LIT>' in feedback:<EOL><INDENT>results['<STR_LIT>'].update({<EOL>'<STR_LIT>' : feedback['<STR_LIT>'],<EOL>'<STR_LIT>' : feedback['<STR_LIT>'],<EOL>})<EOL><DEDENT>results['<STR_LIT>'].update(<EOL>{<EOL>'<STR_LIT>' : feedback['<STR_LIT>'],<EOL>'<STR_LIT>' : feedback['<STR_LIT>'],<EOL>'<STR_LIT>' : feedback['<STR_LIT>'],<EOL>}<EOL>)<EOL><DEDENT>else:<EOL><INDENT>results['<STR_LIT:success>']=False<EOL><DEDENT>return results<EOL>", "docstring": "Grades a specified submission using specified models\ngrader_data - A dictionary:\n{\n    'model' : trained model,\n    'extractor' : trained feature extractor,\n    'prompt' : prompt for the question,\n    'algorithm' : algorithm for the question,\n}\nsubmission - The student submission (string)", "id": "f7967:m0"}
{"signature": "def grade_generic(grader_data, numeric_features, textual_features):", "body": "results = {'<STR_LIT>': [],'<STR_LIT>': [],'<STR_LIT>': <NUM_LIT:0>, '<STR_LIT:success>' : False, '<STR_LIT>' : <NUM_LIT:0>}<EOL>has_error=False<EOL>grader_set=predictor_set.PredictorSet(essaytype=\"<STR_LIT:test>\")<EOL>model, extractor = get_classifier_and_ext(grader_data)<EOL>try:<EOL><INDENT>grader_set.add_row(numeric_features, textual_features,<NUM_LIT:0>)<EOL><DEDENT>except Exception:<EOL><INDENT>error_msg = \"<STR_LIT>\".format(numeric_features, textual_features)<EOL>log.exception(error_msg)<EOL>results['<STR_LIT>'].append(error_msg)<EOL>has_error=True<EOL><DEDENT>try:<EOL><INDENT>grader_feats=extractor.gen_feats(grader_set)<EOL>results['<STR_LIT>']=model.predict(grader_feats)[<NUM_LIT:0>]<EOL><DEDENT>except Exception:<EOL><INDENT>error_msg = \"<STR_LIT>\"<EOL>log.exception(error_msg)<EOL>results['<STR_LIT>'].append(error_msg)<EOL>has_error=True<EOL><DEDENT>try:<EOL><INDENT>results['<STR_LIT>'] = get_confidence_value(grader_data['<STR_LIT>'],model, grader_feats, results['<STR_LIT>'])<EOL><DEDENT>except Exception:<EOL><INDENT>log.exception(\"<STR_LIT>\")<EOL><DEDENT>if not has_error:<EOL><INDENT>results['<STR_LIT:success>'] = True<EOL><DEDENT>return results<EOL>", "docstring": "Grades a set of numeric and textual features using a generic model\ngrader_data -- dictionary containing:\n{\n    'algorithm' - Type of algorithm to use to score\n}\nnumeric_features - list of numeric features to predict on\ntextual_features - list of textual feature to predict on", "id": "f7967:m1"}
{"signature": "def f7(seq):", "body": "seen = set()<EOL>seen_add = seen.add<EOL>return [x for x in seq if x not in seen and not seen_add(x)]<EOL>", "docstring": "Makes a list unique", "id": "f7968:m4"}
{"signature": "def getMedian(numericValues):", "body": "theValues = sorted(numericValues)<EOL>if len(theValues) % <NUM_LIT:2> == <NUM_LIT:1>:<EOL><INDENT>return theValues[(len(theValues) + <NUM_LIT:1>) / <NUM_LIT:2> - <NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>lower = theValues[len(theValues) / <NUM_LIT:2> - <NUM_LIT:1>]<EOL>upper = theValues[len(theValues) / <NUM_LIT:2>]<EOL>return (float(lower + upper)) / <NUM_LIT:2><EOL><DEDENT>", "docstring": "Gets the median of a list of values\nReturns a float/int", "id": "f7968:m19"}
{"signature": "def spell_correct(string):", "body": "<EOL>f = tempfile.NamedTemporaryFile(mode='<STR_LIT:w>')<EOL>f.write(string)<EOL>f.flush()<EOL>f_path = os.path.abspath(f.name)<EOL>try:<EOL><INDENT>p = os.popen(aspell_path + \"<STR_LIT>\" + f_path + \"<STR_LIT>\")<EOL>incorrect = p.readlines()<EOL>p.close()<EOL><DEDENT>except Exception:<EOL><INDENT>log.exception(\"<STR_LIT>\")<EOL>return string,<NUM_LIT:0>, string<EOL><DEDENT>finally:<EOL><INDENT>f.close()<EOL><DEDENT>incorrect_words = list()<EOL>correct_spelling = list()<EOL>for i in range(<NUM_LIT:1>, len(incorrect)):<EOL><INDENT>if(len(incorrect[i]) > <NUM_LIT:10>):<EOL><INDENT>match = re.search(\"<STR_LIT::>\", incorrect[i])<EOL>if hasattr(match, \"<STR_LIT:start>\"):<EOL><INDENT>begstring = incorrect[i][<NUM_LIT:2>:match.start()]<EOL>begmatch = re.search(\"<STR_LIT:U+0020>\", begstring)<EOL>begword = begstring[<NUM_LIT:0>:begmatch.start()]<EOL>sugstring = incorrect[i][match.start() + <NUM_LIT:2>:]<EOL>sugmatch = re.search(\"<STR_LIT:U+002C>\", sugstring)<EOL>if hasattr(sugmatch, \"<STR_LIT:start>\"):<EOL><INDENT>sug = sugstring[<NUM_LIT:0>:sugmatch.start()]<EOL>incorrect_words.append(begword)<EOL>correct_spelling.append(sug)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>newstring = string<EOL>markup_string = string<EOL>already_subbed=[]<EOL>for i in range(<NUM_LIT:0>, len(incorrect_words)):<EOL><INDENT>sub_pat = r\"<STR_LIT>\" + incorrect_words[i] + r\"<STR_LIT>\"<EOL>sub_comp = re.compile(sub_pat)<EOL>newstring = re.sub(sub_comp, correct_spelling[i], newstring)<EOL>if incorrect_words[i] not in already_subbed:<EOL><INDENT>markup_string=re.sub(sub_comp,'<STR_LIT>' + incorrect_words[i] + \"<STR_LIT>\", markup_string)<EOL>already_subbed.append(incorrect_words[i])<EOL><DEDENT><DEDENT>return newstring,len(incorrect_words),markup_string<EOL>", "docstring": "Uses aspell to spell correct an input string.\nRequires aspell to be installed and added to the path.\nReturns the spell corrected string if aspell is found, original string if not.\nstring - string", "id": "f7968:m2"}
{"signature": "def ngrams(tokens, min_n, max_n):", "body": "all_ngrams = list()<EOL>n_tokens = len(tokens)<EOL>for i in xrange(n_tokens):<EOL><INDENT>for j in xrange(i + min_n, min(n_tokens, i + max_n) + <NUM_LIT:1>):<EOL><INDENT>all_ngrams.append(\"<STR_LIT:U+0020>\".join(tokens[i:j]))<EOL><DEDENT><DEDENT>return all_ngrams<EOL>", "docstring": "Generates ngrams(word sequences of fixed length) from an input token sequence.\ntokens is a list of words.\nmin_n is the minimum length of an ngram to return.\nmax_n is the maximum length of an ngram to return.\nreturns a list of ngrams (words separated by a space)", "id": "f7968:m3"}
{"signature": "def gen_preds(clf, arr):", "body": "if(hasattr(clf, \"<STR_LIT>\")):<EOL><INDENT>ret = clf.predict(arr)<EOL><DEDENT>else:<EOL><INDENT>ret = clf.predict(arr)<EOL><DEDENT>return ret<EOL>", "docstring": "Generates predictions on a novel data array using a fit classifier\nclf is a classifier that has already been fit\narr is a data array identical in dimension to the array clf was trained on\nReturns the array of predictions.", "id": "f7968:m11"}
{"signature": "def create_model_path(model_path):", "body": "if not model_path.startswith(\"<STR_LIT:/>\") and not model_path.startswith(\"<STR_LIT>\"):<EOL><INDENT>model_path=\"<STR_LIT:/>\" + model_path<EOL><DEDENT>if not model_path.startswith(\"<STR_LIT>\"):<EOL><INDENT>model_path = \"<STR_LIT>\" + model_path<EOL><DEDENT>if not model_path.endswith(\"<STR_LIT>\"):<EOL><INDENT>model_path+=\"<STR_LIT>\"<EOL><DEDENT>return model_path<EOL>", "docstring": "Creates a path to model files\nmodel_path - string", "id": "f7968:m0"}
{"signature": "def gen_cv_preds(clf, arr, sel_score, num_chunks=<NUM_LIT:3>):", "body": "cv_len = int(math.floor(len(sel_score) / num_chunks))<EOL>chunks = []<EOL>for i in range(<NUM_LIT:0>, num_chunks):<EOL><INDENT>range_min = i * cv_len<EOL>range_max = ((i + <NUM_LIT:1>) * cv_len)<EOL>if i == num_chunks - <NUM_LIT:1>:<EOL><INDENT>range_max = len(sel_score)<EOL><DEDENT>chunks.append(range(range_min, range_max))<EOL><DEDENT>preds = []<EOL>set_score = numpy.asarray(sel_score, dtype=numpy.int)<EOL>chunk_vec = numpy.asarray(range(<NUM_LIT:0>, len(chunks)))<EOL>for i in xrange(<NUM_LIT:0>, len(chunks)):<EOL><INDENT>loop_inds = list(<EOL>chain.from_iterable([chunks[int(z)] for z, m in enumerate(range(<NUM_LIT:0>, len(chunks))) if int(z) != i]))<EOL>sim_fit = clf.fit(arr[loop_inds], set_score[loop_inds])<EOL>preds.append(list(sim_fit.predict(arr[chunks[i]])))<EOL><DEDENT>all_preds = list(chain(*preds))<EOL>return(all_preds)<EOL>", "docstring": "Generates cross validated predictions using an input classifier and data.\nclf is a classifier that implements that implements the fit and predict methods.\narr is the input data array (X)\nsel_score is the target list (y).  y[n] corresponds to X[n,:]\nnum_chunks is the number of cross validation folds to use\nReturns an array of the predictions where prediction[n] corresponds to X[n,:]", "id": "f7968:m9"}
{"signature": "def edit_distance(s1, s2):", "body": "d = {}<EOL>lenstr1 = len(s1)<EOL>lenstr2 = len(s2)<EOL>for i in xrange(-<NUM_LIT:1>, lenstr1 + <NUM_LIT:1>):<EOL><INDENT>d[(i, -<NUM_LIT:1>)] = i + <NUM_LIT:1><EOL><DEDENT>for j in xrange(-<NUM_LIT:1>, lenstr2 + <NUM_LIT:1>):<EOL><INDENT>d[(-<NUM_LIT:1>, j)] = j + <NUM_LIT:1><EOL><DEDENT>for i in xrange(lenstr1):<EOL><INDENT>for j in xrange(lenstr2):<EOL><INDENT>if s1[i] == s2[j]:<EOL><INDENT>cost = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>cost = <NUM_LIT:1><EOL><DEDENT>d[(i, j)] = min(<EOL>d[(i - <NUM_LIT:1>, j)] + <NUM_LIT:1>, <EOL>d[(i, j - <NUM_LIT:1>)] + <NUM_LIT:1>, <EOL>d[(i - <NUM_LIT:1>, j - <NUM_LIT:1>)] + cost, <EOL>)<EOL>if i and j and s1[i] == s2[j - <NUM_LIT:1>] and s1[i - <NUM_LIT:1>] == s2[j]:<EOL><INDENT>d[(i, j)] = min(d[(i, j)], d[i - <NUM_LIT:2>, j - <NUM_LIT:2>] + cost) <EOL><DEDENT><DEDENT><DEDENT>return d[lenstr1 - <NUM_LIT:1>, lenstr2 - <NUM_LIT:1>]<EOL>", "docstring": "Calculates string edit distance between string 1 and string 2.\nDeletion, insertion, substitution, and transposition all increase edit distance.", "id": "f7968:m8"}
{"signature": "def encode_plus(s):", "body": "regex = r\"<STR_LIT>\"<EOL>pat = re.compile(regex)<EOL>return pat.sub(\"<STR_LIT>\", s)<EOL>", "docstring": "Literally encodes the plus sign\ninput is a string\nreturns the string with plus signs encoded", "id": "f7968:m18"}
{"signature": "def calc_list_average(l):", "body": "total = <NUM_LIT:0.0><EOL>for value in l:<EOL><INDENT>total += value<EOL><DEDENT>return total / len(l)<EOL>", "docstring": "Calculates the average value of a list of numbers\nReturns a float", "id": "f7968:m12"}
{"signature": "def get_wordnet_syns(word):", "body": "synonyms = []<EOL>regex = r\"<STR_LIT:_>\"<EOL>pat = re.compile(regex)<EOL>synset = nltk.wordnet.wordnet.synsets(word)<EOL>for ss in synset:<EOL><INDENT>for swords in ss.lemma_names:<EOL><INDENT>synonyms.append(pat.sub(\"<STR_LIT:U+0020>\", swords.lower()))<EOL><DEDENT><DEDENT>synonyms = f7(synonyms)<EOL>return synonyms<EOL>", "docstring": "Utilize wordnet (installed with nltk) to get synonyms for words\nword is the input word\nreturns a list of unique synonyms", "id": "f7968:m16"}
{"signature": "def gen_feats(self, p_set):", "body": "if self._initialized!=True:<EOL><INDENT>error_message = \"<STR_LIT>\"<EOL>log.exception(error_message)<EOL>raise util_functions.InputError(p_set, error_message)<EOL><DEDENT>textual_features = []<EOL>for i in range(<NUM_LIT:0>,len(p_set._essay_sets)):<EOL><INDENT>textual_features.append(self._extractors[i].gen_feats(p_set._essay_sets[i]))<EOL><DEDENT>textual_matrix = numpy.concatenate(textual_features, axis=<NUM_LIT:1>)<EOL>predictor_matrix = numpy.array(p_set._numeric_features)<EOL>print(textual_matrix.shape)<EOL>print(predictor_matrix.shape)<EOL>overall_matrix = numpy.concatenate((textual_matrix, predictor_matrix), axis=<NUM_LIT:1>)<EOL>return overall_matrix.copy()<EOL>", "docstring": "Generates features based on an iput p_set\np_set - PredictorSet", "id": "f7969:c0:m2"}
{"signature": "def __init__(self, essaytype = \"<STR_LIT:train>\"):", "body": "if(essaytype != \"<STR_LIT:train>\" and essaytype != \"<STR_LIT:test>\"):<EOL><INDENT>essaytype = \"<STR_LIT:train>\"<EOL><DEDENT>self._type = essaytype<EOL>self._target=[]<EOL>self._textual_features=[]<EOL>self._numeric_features=[]<EOL>self._essay_sets=[]<EOL>", "docstring": "Initialize variables and check essay set type", "id": "f7970:c0:m0"}
{"signature": "def is_requirement(line):", "body": "<EOL>line = line.strip()<EOL>return not (<EOL>line == '<STR_LIT>' or<EOL>line.startswith('<STR_LIT>') or<EOL>line.startswith('<STR_LIT:#>') or<EOL>line.startswith('<STR_LIT>') or<EOL>line.startswith('<STR_LIT>')<EOL>)<EOL>", "docstring": "Return True if the requirement line is a package requirement;\nthat is, it is not blank, a comment, or editable.", "id": "f7971:m0"}
{"signature": "def __init__(self, room, files, data={}, progress_callback=None, finished_callback=None, error_callback=None):", "body": "Thread.__init__(self)<EOL>self._connection_settings = room.get_campfire().get_connection().get_settings()<EOL>self._room = room<EOL>self._files = files<EOL>self._data = data<EOL>self._progress_callback = progress_callback<EOL>self._finished_callback = finished_callback<EOL>self._error_callback = error_callback<EOL>self._abort = False<EOL>self._uploading = False<EOL>", "docstring": "Initialize.\n\n        Args:\n            room (:class:`Room`): Room where we are uploading\n            files (dict): A dictionary, where key is the field name, and value is the file path\n\n        Kwargs:\n            data (dict): Additional data to post\n            progress_callback (func): Callback to call as file is uploaded (parameters: current, total)\n            finished_callback (func): Callback to call when upload is finished\n            error_callback (func): Callback to call when an error occurred (parameters: exception)", "id": "f7975:c0:m0"}
{"signature": "def add_data(self, data):", "body": "if not self._data:<EOL><INDENT>self._data = {}<EOL><DEDENT>self._data.update(data)<EOL>", "docstring": "Add POST data.\n\n        Args:\n            data (dict): key => value dictionary", "id": "f7975:c1:m1"}
{"signature": "def is_uploading(self):", "body": "return self._uploading<EOL>", "docstring": "Tell if upload is in progress.\n\n        Returns:\n            bool. Success", "id": "f7975:c0:m1"}
{"signature": "def stop(self):", "body": "self._abort = True<EOL>return self<EOL>", "docstring": "Stop uploading.\n\n        It is recommended that you call join() after stopping this thread.\n\n        Returns:\n            :class:`Stream`. Current instance to allow chaining", "id": "f7975:c0:m2"}
{"signature": "def run(self):", "body": "queue = Queue()<EOL>process = UploadProcess(self._connection_settings, self._room, queue, self._files)<EOL>if self._data:<EOL><INDENT>process.add_data(self._data)<EOL><DEDENT>process.start()<EOL>if not process.is_alive():<EOL><INDENT>return<EOL><DEDENT>self._uploading = True<EOL>done = False<EOL>while not self._abort and not done:<EOL><INDENT>if not process.is_alive():<EOL><INDENT>self._abort = True<EOL>break<EOL><DEDENT>messages = None<EOL>try:<EOL><INDENT>data = queue.get()<EOL>if not data:<EOL><INDENT>done = True<EOL>if self._finished_callback:<EOL><INDENT>self._finished_callback()<EOL><DEDENT><DEDENT>elif isinstance(data, tuple):<EOL><INDENT>sent, total = data<EOL>if self._progress_callback:<EOL><INDENT>self._progress_callback(sent, total)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._abort = True<EOL>if self._error_callback:<EOL><INDENT>self._error_callback(data, self._room)<EOL><DEDENT><DEDENT><DEDENT>except Empty:<EOL><INDENT>time.sleep(<NUM_LIT:0.5>)<EOL><DEDENT><DEDENT>self._uploading = False<EOL>if self._abort and not process.is_alive() and self._error_callback:<EOL><INDENT>self._error_callback(Exception(\"<STR_LIT>\"), self._room)<EOL><DEDENT>queue.close()<EOL>if process.is_alive():<EOL><INDENT>queue.close()<EOL>process.terminate()<EOL><DEDENT>process.join()<EOL>", "docstring": "Called by the thread, it runs the process.\n\n        NEVER call this method directly. Instead call start() to start the thread.\n\n        Before finishing the thread using this thread, call join()", "id": "f7975:c0:m3"}
{"signature": "def __init__(self, data={}):", "body": "self.set_data(data)<EOL>", "docstring": "Initialize.\n\n        Kwargs:\n            data (dict): Data", "id": "f7976:c0:m0"}
{"signature": "def set_setting(self, name, value):", "body": "self._settings[\"<STR_LIT:name>\"] = value<EOL>", "docstring": "Set a setting value.\n\n        Args:\n            name (str): Setting name\n            value: Setting value", "id": "f7977:c4:m4"}
{"signature": "def put(self, url=None, post_data={}, parse_data=False, key=None, parameters=None):", "body": "return self._fetch(\"<STR_LIT>\", url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, full_return=True)<EOL>", "docstring": "Issue a PUT request.\n\n        Kwargs:\n            url (str): Destination URL\n            post_data (dict): Dictionary of parameter and values\n            parse_data (bool): If true, parse response data\n            key (string): If parse_data==True, look for this key when parsing data\n            parameters (dict): Additional GET parameters to append to the URL\n\n        Returns:\n            dict. Response (a dict with keys: success, data, info, body)\n\n        Raises:\n            AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception", "id": "f7977:c4:m7"}
{"signature": "def _url(self, url=None, parameters=None):", "body": "uri = url or self._settings[\"<STR_LIT:url>\"]<EOL>if url and self._settings[\"<STR_LIT>\"]:<EOL><INDENT>uri = \"<STR_LIT>\" % (self._settings[\"<STR_LIT>\"], url)<EOL><DEDENT>uri += \"<STR_LIT>\"<EOL>if parameters:<EOL><INDENT>uri += \"<STR_LIT>\" % urllib.urlencode(parameters)<EOL><DEDENT>return uri<EOL>", "docstring": "Build destination URL.\n\n        Kwargs:\n            url (str): Destination URL\n            parameters (dict): Additional GET parameters to append to the URL\n\n        Returns:\n            str. URL", "id": "f7977:c4:m16"}
{"signature": "def _fetch(self, method, url=None, post_data=None, parse_data=True, key=None, parameters=None, listener=None, full_return=False):", "body": "headers = self.get_headers()<EOL>headers[\"<STR_LIT:Content-Type>\"] = \"<STR_LIT:application/json>\"<EOL>handlers = []<EOL>debuglevel = int(self._settings[\"<STR_LIT>\"])<EOL>handlers.append(urllib2.HTTPHandler(debuglevel=debuglevel))<EOL>if hasattr(httplib, \"<STR_LIT>\"):<EOL><INDENT>handlers.append(urllib2.HTTPSHandler(debuglevel=debuglevel))<EOL><DEDENT>handlers.append(urllib2.HTTPCookieProcessor(cookielib.CookieJar()))<EOL>password_url = self._get_password_url()<EOL>if password_url and \"<STR_LIT>\" not in headers:<EOL><INDENT>pwd_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()<EOL>pwd_manager.add_password(None, password_url, self._settings[\"<STR_LIT:user>\"], self._settings[\"<STR_LIT:password>\"])<EOL>handlers.append(HTTPBasicAuthHandler(pwd_manager))<EOL><DEDENT>opener = urllib2.build_opener(*handlers)<EOL>if post_data is not None:<EOL><INDENT>post_data = json.dumps(post_data)<EOL><DEDENT>uri = self._url(url, parameters)<EOL>request = RESTRequest(uri, method=method, headers=headers)<EOL>if post_data is not None:<EOL><INDENT>request.add_data(post_data)<EOL><DEDENT>response = None<EOL>try:<EOL><INDENT>response = opener.open(request)<EOL>body = response.read()<EOL>if password_url and password_url not in self._settings[\"<STR_LIT>\"] and request.has_header(\"<STR_LIT>\"):<EOL><INDENT>self._settings[\"<STR_LIT>\"][password_url] = request.get_header(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except urllib2.HTTPError as e:<EOL><INDENT>if e.code == <NUM_LIT>:<EOL><INDENT>raise AuthenticationError(\"<STR_LIT>\" % uri)<EOL><DEDENT>elif e.code == <NUM_LIT>:<EOL><INDENT>raise ConnectionError(\"<STR_LIT>\" % uri)<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>except urllib2.URLError as e:<EOL><INDENT>raise ConnectionError(\"<STR_LIT>\" % (uri, e))<EOL><DEDENT>finally:<EOL><INDENT>if response:<EOL><INDENT>response.close()<EOL><DEDENT>opener.close()<EOL><DEDENT>data = None<EOL>if parse_data:<EOL><INDENT>if not key:<EOL><INDENT>key = string.split(url, \"<STR_LIT:/>\")[<NUM_LIT:0>]<EOL><DEDENT>data = self.parse(body, key)<EOL><DEDENT>if full_return:<EOL><INDENT>info = response.info() if response else None<EOL>status = int(string.split(info[\"<STR_LIT:status>\"])[<NUM_LIT:0>]) if (info and \"<STR_LIT:status>\" in info) else None<EOL>return {<EOL>\"<STR_LIT:success>\": (status >= <NUM_LIT:200> and status < <NUM_LIT>), <EOL>\"<STR_LIT:data>\": data, <EOL>\"<STR_LIT:info>\": info, <EOL>\"<STR_LIT:body>\": body<EOL>}<EOL><DEDENT>return data<EOL>", "docstring": "Issue a request.\n\n        Args:\n            method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None\n\n        Kwargs:\n            url (str): Destination URL\n            post_data (str): A string of what to POST\n            parse_data (bool): If true, parse response data\n            key (string): If parse_data==True, look for this key when parsing data\n            parameters (dict): Additional GET parameters to append to the URL\n            listener (func): callback called when uploading a file\n            full_return (bool): If set to True, get a full response (with success, data, info, body)\n\n        Returns:\n            dict. Response. If full_return==True, a dict with keys: success, data, info, body, otherwise the parsed data\n\n        Raises:\n            AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError", "id": "f7977:c4:m15"}
{"signature": "def get_headers(self):", "body": "headers = {<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>}<EOL>password_url = self._get_password_url()<EOL>if password_url and password_url in self._settings[\"<STR_LIT>\"]:<EOL><INDENT>headers[\"<STR_LIT>\"] = self._settings[\"<STR_LIT>\"][password_url]<EOL><DEDENT>return headers<EOL>", "docstring": "Get headers.\n\n        Returns:\n            tuple: Headers", "id": "f7977:c4:m10"}
{"signature": "def set_method(self, method):", "body": "self._method = method<EOL>", "docstring": "Set HTTP method.\n\n        Args:\n            method (str): Method (GET/POST/PUT/DELETE/etc.)", "id": "f7977:c2:m1"}
{"signature": "@staticmethod<EOL><INDENT>def create_from_settings(settings):<DEDENT>", "body": "return Connection(<EOL>settings[\"<STR_LIT:url>\"], <EOL>settings[\"<STR_LIT>\"],<EOL>settings[\"<STR_LIT:user>\"],<EOL>settings[\"<STR_LIT:password>\"],<EOL>authorizations = settings[\"<STR_LIT>\"],<EOL>debug = settings[\"<STR_LIT>\"]<EOL>)<EOL>", "docstring": "Create a connection with given settings.\n\n        Args:\n            settings (dict): A dictionary of settings\n\n        Returns:\n            :class:`Connection`. The connection", "id": "f7977:c4:m1"}
{"signature": "def post(self, url=None, post_data={}, parse_data=False, key=None, parameters=None, listener=None):", "body": "return self._fetch(\"<STR_LIT:POST>\", url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, listener=listener, full_return=True)<EOL>", "docstring": "Issue a POST request.\n\n        Kwargs:\n            url (str): Destination URL\n            post_data (dict): Dictionary of parameter and values\n            parse_data (bool): If true, parse response data\n            key (string): If parse_data==True, look for this key when parsing data\n            parameters (dict): Additional GET parameters to append to the URL\n            listener (func): callback called when uploading a file\n\n        Returns:\n            dict. Response (a dict with keys: success, data, info, body)\n\n        Raises:\n            AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception", "id": "f7977:c4:m8"}
{"signature": "def _length(self):", "body": "self._build_chunk_headers()<EOL>length = <NUM_LIT:0><EOL>if self._data:<EOL><INDENT>for field in self._data:<EOL><INDENT>length += len(self._chunk_headers[field])<EOL>length += len(self._data[field])<EOL>length += <NUM_LIT:2><EOL><DEDENT><DEDENT>if self._files:<EOL><INDENT>for field in self._files:<EOL><INDENT>length += len(self._chunk_headers[field])<EOL>length += self._file_size(field)<EOL>length += <NUM_LIT:2><EOL><DEDENT><DEDENT>length += len(self.boundary)<EOL>length += <NUM_LIT:6><EOL>return length<EOL>", "docstring": "Returns total length for this request.\n\n        Returns:\n            int. Length", "id": "f7978:c0:m8"}
{"signature": "def _build_chunk_headers(self):", "body": "if hasattr(self, \"<STR_LIT>\") and self._chunk_headers:<EOL><INDENT>return<EOL><DEDENT>self._chunk_headers = {}<EOL>for field in self._files:<EOL><INDENT>self._chunk_headers[field] = self._headers(field, True)<EOL><DEDENT>for field in self._data:<EOL><INDENT>self._chunk_headers[field] = self._headers(field)<EOL><DEDENT>", "docstring": "Build headers for each field.", "id": "f7978:c0:m9"}
{"signature": "def _send_to_consumer(self, block):", "body": "self._consumer.write(block)<EOL>self._sent += len(block)<EOL>if self._callback:<EOL><INDENT>self._callback(self._sent, self.length)<EOL><DEDENT>", "docstring": "Send a block of bytes to the consumer.\n\n        Args:\n            block (str): Block of bytes", "id": "f7978:c0:m7"}
{"signature": "def _finish(self, forced=False):", "body": "if hasattr(self, \"<STR_LIT>\") and self._current_file_handle:<EOL><INDENT>self._current_file_handle.close()<EOL><DEDENT>if self._current_deferred:<EOL><INDENT>self._current_deferred.callback(self._sent)<EOL>self._current_deferred = None<EOL><DEDENT>if not forced and self._deferred:<EOL><INDENT>self._deferred.callback(self._sent)<EOL><DEDENT>", "docstring": "Cleanup code after asked to stop producing.\n\n        Kwargs:\n            forced (bool): If True, we were forced to stop", "id": "f7978:c0:m6"}
{"signature": "def stopProducing(self):", "body": "self._finish(True)<EOL>if self._deferred and self._sent < self.length:<EOL><INDENT>self._deferred.errback(Exception(\"<STR_LIT>\" % (self._sent, self.length)))<EOL><DEDENT>", "docstring": "Stop producing", "id": "f7978:c0:m4"}
{"signature": "def _file_type(self, field):", "body": "type = mimetypes.guess_type(self._files[field])[<NUM_LIT:0>]<EOL>return type.encode(\"<STR_LIT:utf-8>\") if isinstance(type, unicode) else str(type)<EOL>", "docstring": "Returns file type for given file field.\n\n        Args:\n            field (str): File field\n\n        Returns:\n            string. File type", "id": "f7978:c0:m12"}
{"signature": "def __init__(self, files={}, data={}, callback=None, deferred=None):", "body": "self._files = files<EOL>self._file_lengths = {}<EOL>self._data = data<EOL>self._callback = callback<EOL>self._deferred = deferred<EOL>self.boundary = self._boundary()<EOL>self.length = self._length()<EOL>", "docstring": "Initialize.\n\n        Kwargs:\n            files (dict): A dictionary, where key is the field name, and value is the file path\n            data (dict): Additional data to post\n            callback (func): Callback to inform progress (receives sent, and total)\n            deferred: Deferred to call when done, or when error occurs", "id": "f7978:c0:m0"}
{"signature": "def _boundary(self):", "body": "boundary = None<EOL>try:<EOL><INDENT>import uuid<EOL>boundary = uuid.uuid4().hex<EOL><DEDENT>except ImportError:<EOL><INDENT>import random, sha<EOL>bits = random.getrandbits(<NUM_LIT>)<EOL>boundary = sha.new(str(bits)).hexdigest()<EOL><DEDENT>return boundary<EOL>", "docstring": "Returns a random string to use as the boundary for a message.\n\n        Returns:\n            string. Boundary", "id": "f7978:c0:m11"}
{"signature": "def get_users(self, sort=True):", "body": "self._load()<EOL>if sort:<EOL><INDENT>self.users.sort(key=operator.itemgetter(\"<STR_LIT:name>\"))<EOL><DEDENT>return self.users<EOL>", "docstring": "Get list of users in the room.\n\n        Kwargs:\n            sort (bool): If True, sort rooms by name\n\n        Returns:\n            array. List of users", "id": "f7980:c0:m4"}
{"signature": "def get_stream(self, error_callback=None, live=True):", "body": "self.join()<EOL>return Stream(self, error_callback=error_callback, live=live)<EOL>", "docstring": "Get room stream to listen for messages.\n\n        Kwargs:\n            error_callback (func): Callback to call when an error occurred (parameters: exception)\n            live (bool): If True, issue a live stream, otherwise an offline stream\n\n        Returns:\n            :class:`Stream`. Stream", "id": "f7980:c0:m2"}
{"signature": "def transcript(self, for_date=None):", "body": "url = \"<STR_LIT>\" % self.id<EOL>if for_date:<EOL><INDENT>url = \"<STR_LIT>\" % (url, for_date.year, for_date.month, for_date.day)<EOL><DEDENT>messages = self._connection.get(url, key=\"<STR_LIT>\")<EOL>if messages:<EOL><INDENT>messages = [Message(self._campfire, message) for message in messages]<EOL><DEDENT>return messages<EOL>", "docstring": "Recent messages.\n\n        Kwargs:\n            for_date (date): If specified, get the transcript for this specific date\n\n        Returns:\n            array. Messages", "id": "f7980:c0:m12"}
{"signature": "def get_uploads(self):", "body": "return self._connection.get(\"<STR_LIT>\" % self.id, key=\"<STR_LIT>\")<EOL>", "docstring": "Get list of recent uploads.\n\n        Returns:\n            array. List of uploads", "id": "f7980:c0:m3"}
{"signature": "def upload(self, path, progress_callback=None, finished_callback=None, error_callback=None):", "body": "return Upload(<EOL>self,<EOL>{\"<STR_LIT>\": path},<EOL>progress_callback = progress_callback,<EOL>finished_callback = finished_callback,<EOL>error_callback = error_callback<EOL>)<EOL>", "docstring": "Create a new thread to upload a file (thread should be\n        then started with start() to perform upload.)\n\n        Args:\n            path (str): Path to file\n\n        Kwargs:\n            progress_callback (func): Callback to call as file is uploaded (parameters: current, total)\n            finished_callback (func): Callback to call when upload is finished\n            error_callback (func): Callback to call when an error occurred (parameters: exception)\n\n        Returns:\n            :class:`Upload`. Upload thread", "id": "f7980:c0:m14"}
{"signature": "def set_topic(self, topic):", "body": "if not topic:<EOL><INDENT>topic = '<STR_LIT>'<EOL><DEDENT>result = self._connection.put(\"<STR_LIT>\" % self.id, {\"<STR_LIT>\": {\"<STR_LIT>\": topic}})<EOL>if result[\"<STR_LIT:success>\"]:<EOL><INDENT>self._load()<EOL><DEDENT>return result[\"<STR_LIT:success>\"]<EOL>", "docstring": "Set the room topic.\n\n        Args:\n            topic (str): Topic\n\n        Returns:\n            bool. Success", "id": "f7980:c0:m10"}
{"signature": "def is_timestamp(self):", "body": "return self.type == self._TYPE_TIMESTAMP<EOL>", "docstring": "Tells if this message is a timestamp.\n\n        Returns:\n            bool. Success", "id": "f7983:c0:m7"}
{"signature": "def is_kick(self):", "body": "return self.type == self._TYPE_KICK<EOL>", "docstring": "Tells if this message is a room kick message.\n\n        Returns:\n            bool. Success", "id": "f7983:c0:m3"}
{"signature": "def remove_highlight(self):", "body": "return self._connection.delete(\"<STR_LIT>\" % self.id)[\"<STR_LIT:success>\"]<EOL>", "docstring": "Removes the highlight of a message.\n\n        Returns:\n            bool. Success", "id": "f7983:c0:m12"}
{"signature": "def is_leaving(self):", "body": "return self.type == self._TYPE_LEAVE<EOL>", "docstring": "Tells if this message is a room leave message.\n\n        Returns:\n            bool. Success", "id": "f7983:c0:m4"}
{"signature": "def is_upload(self):", "body": "return self.type == self._TYPE_UPLOAD<EOL>", "docstring": "Tells if this message is an upload message.\n\n        Returns:\n            bool. Success", "id": "f7983:c0:m10"}
{"signature": "def run(self):", "body": "if not self._queue:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>factory = LiveStreamFactory(self)<EOL>self._reactor.connectSSL(\"<STR_LIT>\", <NUM_LIT>, factory, ssl.ClientContextFactory())<EOL>self._reactor.run()<EOL>", "docstring": "Called by the process, it runs it.\n\n        NEVER call this method directly. Instead call start() to start the separate process.\n        If you don't want to use a second process, then call fetch() directly on this istance.\n\n        To stop, call terminate()", "id": "f7984:c2:m3"}
{"signature": "def fetch(self):", "body": "try:<EOL><INDENT>if not self._last_message_id:<EOL><INDENT>messages = self._connection.get(\"<STR_LIT>\" % self._room_id, key=\"<STR_LIT>\", parameters={<EOL>\"<STR_LIT>\": <NUM_LIT:1><EOL>})<EOL>self._last_message_id = messages[-<NUM_LIT:1>][\"<STR_LIT:id>\"]<EOL><DEDENT>messages = self._connection.get(\"<STR_LIT>\" % self._room_id, key=\"<STR_LIT>\", parameters={<EOL>\"<STR_LIT>\": self._last_message_id<EOL>})<EOL><DEDENT>except:<EOL><INDENT>messages = []<EOL><DEDENT>if messages:<EOL><INDENT>self._last_message_id = messages[-<NUM_LIT:1>][\"<STR_LIT:id>\"]<EOL><DEDENT>self.received(messages)<EOL>", "docstring": "Fetch new messages.", "id": "f7984:c1:m5"}
{"signature": "def detach(self, observer):", "body": "try:<EOL><INDENT>self._observers.remove(observer)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>return self<EOL>", "docstring": "Detach an observer.\n\n        Args:\n            observer (func): The observer function already attached\n\n        Returns:\n            :class:`Stream`. Current instance to allow chaining", "id": "f7984:c0:m2"}
{"signature": "def connected(self):", "body": "pass<EOL>", "docstring": "Callback when a connection is made.", "id": "f7984:c2:m5"}
{"signature": "def get_room_id(self):", "body": "return self._room_id<EOL>", "docstring": "Get room ID.\n\n        Returns:\n            int. Room ID", "id": "f7984:c1:m1"}
{"signature": "def connectionMade(self):", "body": "headers = [<EOL>\"<STR_LIT>\" % (\"<STR_LIT>\" % self.factory.get_stream().get_room_id())<EOL>]<EOL>connection_headers = self.factory.get_stream().get_connection().get_headers()<EOL>for header in connection_headers:<EOL><INDENT>headers.append(\"<STR_LIT>\" % (header, connection_headers[header]))<EOL><DEDENT>headers.append(\"<STR_LIT>\")<EOL>self.transport.write(\"<STR_LIT:\\r\\n>\".join(headers) + \"<STR_LIT>\")<EOL>self.factory.get_stream().set_protocol(self)<EOL>", "docstring": "Called when a connection is made, and used to send out headers", "id": "f7984:c3:m1"}
{"signature": "def is_streaming(self):", "body": "return self._streaming<EOL>", "docstring": "Tell if streaming is in progress.\n\n        Returns:\n            bool. Success", "id": "f7984:c0:m4"}
{"signature": "def set_protocol(self, protocol):", "body": "self._protocol = protocol<EOL>", "docstring": "Set protocol.\n\n        Args:\n            :class:`LiveStreamProtocol`: Protocol", "id": "f7984:c2:m2"}
{"signature": "def __init__(self):", "body": "self._in_header = True<EOL>self._headers = []<EOL>self._len_expected = None<EOL>self._buffer = \"<STR_LIT>\"<EOL>", "docstring": "Constructor.", "id": "f7984:c3:m0"}
{"signature": "def lineReceived(self, line):", "body": "while self._in_header:<EOL><INDENT>if line:<EOL><INDENT>self._headers.append(line)<EOL><DEDENT>else:<EOL><INDENT>http, status, message = self._headers[<NUM_LIT:0>].split(\"<STR_LIT:U+0020>\", <NUM_LIT:2>)<EOL>status = int(status)<EOL>if status == <NUM_LIT:200>:<EOL><INDENT>self.factory.get_stream().connected()<EOL><DEDENT>else:<EOL><INDENT>self.factory.continueTrying = <NUM_LIT:0><EOL>self.transport.loseConnection()<EOL>self.factory.get_stream().disconnected(RuntimeError(status, message))<EOL>return<EOL><DEDENT>self._in_header = False<EOL><DEDENT>break<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>self._len_expected = int(line, <NUM_LIT:16>)<EOL>self.setRawMode()<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "Callback issued by twisted when new line arrives.\n\n        Args:\n            line (str): Incoming line", "id": "f7984:c3:m2"}
{"signature": "def __init__(self, room, live=True, error_callback=None, pause=None, use_process=True):", "body": "if not live:<EOL><INDENT>if pause is None:<EOL><INDENT>pause = <NUM_LIT:1><EOL><DEDENT>assert pause > <NUM_LIT:0>, '<STR_LIT>'<EOL><DEDENT>elif pause is None:<EOL><INDENT>pause = <NUM_LIT><EOL><DEDENT>Thread.__init__(self)<EOL>self._room = room<EOL>self._live = live<EOL>self._observers = []<EOL>self._error_callback = error_callback<EOL>self._pause = pause<EOL>self._use_process = use_process<EOL>self._streaming = False<EOL>", "docstring": "Initialize.\n\n        Args:\n            room (:class:`Room`): Room that is being streamed\n\n        Kwargs:\n            live (bool): If True, issue a live stream, otherwise an offline stream\n            error_callback (func): A callback to call when an error occurs\n            pause (int): Pause in seconds between requests (if live==False), or pause\n                         between queue checks\n            use_process (bool): If True, use a separate process to fetch the messages\n\n        Raises:\n            AssertionError", "id": "f7984:c0:m0"}
{"signature": "def get_connection(self):", "body": "return self._connection<EOL>", "docstring": "Get connection\n\n        Returns:\n            :class:`Connection`. Connection", "id": "f7984:c2:m1"}
{"signature": "def set_callback(self, callback):", "body": "self._callback = callback<EOL>", "docstring": "Set callback.\n\n        Args:\n            callback (func): Called when new messages arrive", "id": "f7984:c1:m2"}
{"signature": "def run(self):", "body": "if self._live:<EOL><INDENT>self._use_process = True<EOL><DEDENT>self._abort = False<EOL>campfire = self._room.get_campfire()<EOL>if self._live:<EOL><INDENT>process = LiveStreamProcess(campfire.get_connection().get_settings(), self._room.id)<EOL><DEDENT>else:<EOL><INDENT>process = StreamProcess(campfire.get_connection().get_settings(), self._room.id, pause=self._pause)<EOL><DEDENT>if not self._use_process:<EOL><INDENT>process.set_callback(self.incoming)<EOL><DEDENT>if self._use_process:<EOL><INDENT>queue = Queue()<EOL>process.set_queue(queue)<EOL>process.start()<EOL>if not process.is_alive():<EOL><INDENT>return<EOL><DEDENT><DEDENT>self._streaming = True<EOL>while not self._abort:<EOL><INDENT>if self._use_process:<EOL><INDENT>if not process.is_alive():<EOL><INDENT>self._abort = True<EOL>break<EOL><DEDENT>try:<EOL><INDENT>incoming = queue.get_nowait()<EOL>if isinstance(incoming, list):<EOL><INDENT>self.incoming(incoming)<EOL><DEDENT>elif isinstance(incoming, Exception):<EOL><INDENT>self._abort = True<EOL>if self._error_callback:<EOL><INDENT>self._error_callback(incoming, self._room)<EOL><DEDENT><DEDENT><DEDENT>except Empty:<EOL><INDENT>time.sleep(self._pause)<EOL>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>process.fetch()<EOL>time.sleep(self._pause)<EOL><DEDENT><DEDENT>self._streaming = False<EOL>if self._use_process and self._abort and not process.is_alive() and self._error_callback:<EOL><INDENT>self._error_callback(Exception(\"<STR_LIT>\"), self._room)<EOL><DEDENT>if self._use_process:<EOL><INDENT>queue.close()<EOL>if process.is_alive():<EOL><INDENT>process.stop()<EOL>process.terminate()<EOL><DEDENT>process.join()<EOL><DEDENT>", "docstring": "Called by the thread, it runs the process.\n\n        NEVER call this method directly. Instead call start() to start the thread.\n\n        To stop, call stop(), and then join()", "id": "f7984:c0:m6"}
{"signature": "def __init__(self, settings, room_id, pause=<NUM_LIT:1>):", "body": "Process.__init__(self)<EOL>self._pause = pause<EOL>self._room_id = room_id<EOL>self._callback = None<EOL>self._queue = None<EOL>self._connection = Connection.create_from_settings(settings)<EOL>self._last_message_id = None<EOL>", "docstring": "Initialize.\n\n        Args:\n            settings (dict): Settings used to create a :class:`Connection` instance\n            room_id (int): Room ID\n\n        Kwargs:\n            pause (int): Pause in seconds between requests", "id": "f7984:c1:m0"}
{"signature": "def get_rooms(self, sort=True):", "body": "rooms = self._connection.get(\"<STR_LIT>\")<EOL>if sort:<EOL><INDENT>rooms.sort(key=operator.itemgetter(\"<STR_LIT:name>\"))<EOL><DEDENT>return rooms<EOL>", "docstring": "Get rooms list.\n\n        Kwargs:\n            sort (bool): If True, sort rooms by name\n\n        Returns:\n            array. List of rooms (each room is a dict)", "id": "f7985:c1:m3"}
{"signature": "def search(self, terms):", "body": "messages = self._connection.get(\"<STR_LIT>\" % urllib.quote_plus(terms), key=\"<STR_LIT>\")<EOL>if messages:<EOL><INDENT>messages = [Message(self, message) for message in messages]<EOL><DEDENT>return messages<EOL>", "docstring": "Search transcripts.\n\n        Args:\n            terms (str): Terms for search\n\n        Returns:\n            array. Messages", "id": "f7985:c1:m7"}
{"signature": "def __init__(self, subdomain, username, password, ssl=False, currentUser=None):", "body": "self.base_url = \"<STR_LIT>\" % (\"<STR_LIT:s>\" if ssl else \"<STR_LIT>\", subdomain)<EOL>self._settings = {<EOL>\"<STR_LIT>\": subdomain,<EOL>\"<STR_LIT:username>\": username,<EOL>\"<STR_LIT:password>\": password,<EOL>\"<STR_LIT>\": ssl<EOL>}<EOL>self._user = currentUser<EOL>self._users = {}<EOL>self._rooms = {}<EOL>if not self._user:<EOL><INDENT>_connection = Connection(url=\"<STR_LIT>\" % self.base_url, user=username, password=password)<EOL>user = _connection.get(key=\"<STR_LIT:user>\")<EOL><DEDENT>self._connection = Connection(<EOL>base_url=self.base_url, <EOL>user=self._user.token if self._user else user[\"<STR_LIT>\"], <EOL>password=\"<STR_LIT:x>\"<EOL>)<EOL>if self._user:<EOL><INDENT>self._user.set_connection(self._connection)<EOL><DEDENT>else:<EOL><INDENT>self._user = User(self, user[\"<STR_LIT:id>\"], current=True)<EOL>self._user.token = user[\"<STR_LIT>\"]<EOL><DEDENT>", "docstring": "Initialize.\n\n        Args:\n            subdomain (str): Campfire subdomain\n            username (str): User\n            password (str): pasword\n\n        Kwargs:\n            ssl (bool): enabled status of SSL\n            currentUser (:class:`User`): If specified, don't auto load current user, use this one instead", "id": "f7985:c1:m0"}
{"signature": "def get_user(self, id = None):", "body": "if not id:<EOL><INDENT>id = self._user.id<EOL><DEDENT>if id not in self._users:<EOL><INDENT>self._users[id] = self._user if id == self._user.id else User(self, id)<EOL><DEDENT>return self._users[id]<EOL>", "docstring": "Get user.\n\n        Returns:\n            :class:`User`. User", "id": "f7985:c1:m6"}
{"signature": "def MWAPIWrapper(func):", "body": "@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>self = args[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>result = func(*args, **kwargs)<EOL>return result<EOL><DEDENT>except ConnectionError:<EOL><INDENT>err_title = '<STR_LIT>'<EOL>err_message = '<STR_LIT>'.format(name=func.__name__, host=self.host)<EOL><DEDENT>except HTTPError as e:<EOL><INDENT>err_title = '<STR_LIT>'<EOL>err_message = '<STR_LIT>'.format(name=func.__name__,<EOL>host=self.host, detail=e.message)<EOL><DEDENT>except Timeout:<EOL><INDENT>err_title = '<STR_LIT>'<EOL>err_message = '<STR_LIT>'.format(name=func.__name__, host=self.host)<EOL><DEDENT>except TooManyRedirects:<EOL><INDENT>err_title = '<STR_LIT>'<EOL>err_message = '<STR_LIT>'.format(name=func.__name__, host=self.host)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>if e.message.find('<STR_LIT>') >= <NUM_LIT:0>:<EOL><INDENT>err_title = '<STR_LIT>'<EOL>err_message = '<STR_LIT>'.format(name=func.__name__, host=self.host)<EOL><DEDENT>else:<EOL><INDENT>err_title = '<STR_LIT>'<EOL>err_message = '<STR_LIT>'.format(name=func.__name__, msg=e.message)<EOL>self.log.error(e, exc_info=True)<EOL><DEDENT><DEDENT>except KeyError as e:<EOL><INDENT>err_title = '<STR_LIT>'<EOL>err_message = '<STR_LIT>'.format(name=func.__name__, key=e.message)<EOL>self.log.error(e, exc_info=True)<EOL><DEDENT>except MWAPIException as e:<EOL><INDENT>err_title = '<STR_LIT>'<EOL>err_message = e.message<EOL><DEDENT>self.log.error('<STR_LIT>', err_title, err_message)<EOL>return {'<STR_LIT:success>': False, '<STR_LIT>': err_title, '<STR_LIT>': err_message}<EOL><DEDENT>return wrapper<EOL>", "docstring": "MWAPIWrapper \u63a7\u5236API\u8bf7\u6c42\u5f02\u5e38\u7684\u88c5\u9970\u5668\n\u6839\u636erequests\u5e93\u5b9a\u4e49\u7684\u5f02\u5e38\u6765\u63a7\u5236\u8bf7\u6c42\u8fd4\u56de\u7684\u610f\u5916\u60c5\u51b5", "id": "f7987:m0"}
{"signature": "def stop(self, pin):", "body": "self.bbio_pwm.stop(pin)<EOL>", "docstring": "Stop PWM output on specified pin.", "id": "f7996:c1:m4"}
{"signature": "def set_frequency(self, pin, frequency_hz):", "body": "self.bbio_pwm.set_frequency(pin, frequency_hz)<EOL>", "docstring": "Set frequency (in Hz) of PWM output on specified pin.", "id": "f7996:c1:m3"}
{"signature": "def stop(self, pin):", "body": "if pin not in self.pwm:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(pin))<EOL><DEDENT>self.pwm[pin].stop()<EOL>del self.pwm[pin]<EOL>", "docstring": "Stop PWM output on specified pin.", "id": "f7996:c0:m4"}
{"signature": "def start(self, pin, dutycycle, frequency_hz=<NUM_LIT>):", "body": "if dutycycle < <NUM_LIT:0.0> or dutycycle > <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.rpi_gpio.setup(pin, self.rpi_gpio.OUT)<EOL>self.pwm[pin] = self.rpi_gpio.PWM(pin, frequency_hz)<EOL>self.pwm[pin].start(dutycycle)<EOL>", "docstring": "Enable PWM output on specified pin.  Set to intiial percent duty cycle\n        value (0.0 to 100.0) and frequency (in Hz).", "id": "f7996:c0:m1"}
{"signature": "def output(self, pin, value):", "body": "self.output_pins({pin: value})<EOL>", "docstring": "Set the specified pin the provided high/low value.  Value should be\n        either GPIO.HIGH/GPIO.LOW or a boolean (True = HIGH).", "id": "f7997:c0:m2"}
{"signature": "def setup(self, pin, value):", "body": "self._validate_pin(pin)<EOL>if value == GPIO.IN:<EOL><INDENT>self.iodir[int(pin/<NUM_LIT:8>)] |= <NUM_LIT:1> << (int(pin%<NUM_LIT:8>))<EOL><DEDENT>elif value == GPIO.OUT:<EOL><INDENT>self.iodir[int(pin/<NUM_LIT:8>)] &= ~(<NUM_LIT:1> << (int(pin%<NUM_LIT:8>)))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.write_iodir()<EOL>", "docstring": "Set the input or output mode for a specified pin.  Mode should be\n        either GPIO.OUT or GPIO.IN.", "id": "f7997:c0:m1"}
{"signature": "def pullup(self, pin, enabled):", "body": "self._validate_pin(pin)<EOL>if enabled:<EOL><INDENT>self.gppu[int(pin/<NUM_LIT:8>)] |= <NUM_LIT:1> << (int(pin%<NUM_LIT:8>))<EOL><DEDENT>else:<EOL><INDENT>self.gppu[int(pin/<NUM_LIT:8>)] &= ~(<NUM_LIT:1> << (int(pin%<NUM_LIT:8>)))<EOL><DEDENT>self.write_gppu()<EOL>", "docstring": "Turn on the pull-up resistor for the specified pin if enabled is True,\n        otherwise turn off the pull-up resistor.", "id": "f7997:c0:m6"}
{"signature": "def write_iodir(self, iodir=None):", "body": "if iodir is not None:<EOL><INDENT>self.iodir = iodir<EOL><DEDENT>self._device.writeList(self.IODIR, self.iodir)<EOL>", "docstring": "Write the specified byte value to the IODIR registor.  If no value\n        specified the current buffered value will be written.", "id": "f7997:c0:m8"}
{"signature": "def write_gpio(self, gpio=None):", "body": "if gpio is not None:<EOL><INDENT>self.gpio = gpio<EOL><DEDENT>self._device.writeList(self.GPIO, self.gpio)<EOL>", "docstring": "Write the specified byte value to the GPIO registor.  If no value\n        specified the current buffered value will be written.", "id": "f7997:c0:m7"}
{"signature": "def wait_for_edge(self, pin, edge):", "body": "self.rpi_gpio.wait_for_edge(pin, self._edge_mapping[edge])<EOL>", "docstring": "Wait for an edge.   Pin should be type IN.  Edge must be RISING,\n        FALLING or BOTH.", "id": "f7998:c1:m9"}
{"signature": "def input(self, pin):", "body": "return self.rpi_gpio.input(pin)<EOL>", "docstring": "Read the specified pin and return HIGH/true if the pin is pulled high,\n        or LOW/false if pulled low.", "id": "f7998:c1:m3"}
{"signature": "def add_event_callback(self, pin, callback, bouncetime=-<NUM_LIT:1>):", "body": "kwargs = {}<EOL>if bouncetime > <NUM_LIT:0>:<EOL><INDENT>kwargs['<STR_LIT>']=bouncetime<EOL><DEDENT>self.bbio_gpio.add_event_callback(pin, callback, **kwargs)<EOL>", "docstring": "Add a callback for an event already defined using add_event_detect().\n        Pin should be type IN.  Bouncetime is switch bounce timeout in ms for \n        callback", "id": "f7998:c2:m7"}
{"signature": "def setup_pins(self, pins):", "body": "<EOL>for pin, value in iter(pins.items()):<EOL><INDENT>self.setup(pin, value)<EOL><DEDENT>", "docstring": "Setup multiple pins as inputs or outputs at once.  Pins should be a\n        dict of pin name to pin type (IN or OUT).", "id": "f7998:c0:m8"}
{"signature": "def add_event_detect(self, pin, edge, callback=None, bouncetime=-<NUM_LIT:1>):", "body": "kwargs = {}<EOL>if callback:<EOL><INDENT>kwargs['<STR_LIT>']=callback<EOL><DEDENT>if bouncetime > <NUM_LIT:0>:<EOL><INDENT>kwargs['<STR_LIT>']=bouncetime<EOL><DEDENT>self.rpi_gpio.add_event_detect(pin, self._edge_mapping[edge], **kwargs)<EOL>", "docstring": "Enable edge detection events for a particular GPIO channel.  Pin \n        should be type IN.  Edge must be RISING, FALLING or BOTH.  Callback is a\n        function for the event.  Bouncetime is switch bounce timeout in ms for\n        callback", "id": "f7998:c1:m5"}
{"signature": "def is_low(self, pin):", "body": "return self.input(pin) == LOW<EOL>", "docstring": "Return true if the specified pin is pulled low.", "id": "f7998:c0:m6"}
{"signature": "def output(self, pin, value):", "body": "raise NotImplementedError<EOL>", "docstring": "Set the specified pin the provided high/low value.  Value should be\n        either HIGH/LOW or a boolean (true = high).", "id": "f7998:c0:m1"}
{"signature": "def wait_for_edge(self, pin, edge):", "body": "self.bbio_gpio.wait_for_edge(pin, self._edge_mapping[edge])<EOL>", "docstring": "Wait for an edge.   Pin should be type IN.  Edge must be RISING, \n        FALLING or BOTH.", "id": "f7998:c2:m9"}
{"signature": "def wait_for_edge(self, pin, edge):", "body": "raise NotImplementedError<EOL>", "docstring": "Wait for an edge.   Pin should be type IN.  Edge must be RISING, \n        FALLING or BOTH.", "id": "f7998:c0:m14"}
{"signature": "def setup(self, pin, mode, pull_up_down=PUD_OFF):", "body": "self.bbio_gpio.setup(pin, self._dir_mapping[mode],<EOL>pull_up_down=self._pud_mapping[pull_up_down])<EOL>", "docstring": "Set the input or output mode for a specified pin.  Mode should be\n        either OUTPUT or INPUT.", "id": "f7998:c2:m1"}
{"signature": "def set_high(self, pin):", "body": "self.output(pin, HIGH)<EOL>", "docstring": "Set the specified pin HIGH.", "id": "f7998:c0:m3"}
{"signature": "def event_detected(self, pin):", "body": "return self.bbio_gpio.event_detected(pin)<EOL>", "docstring": "Returns True if an edge has occured on a given GPIO.  You need to \n        enable edge detection using add_event_detect() first.   Pin should be \n        type IN.", "id": "f7998:c2:m8"}
{"signature": "def setup(self, pin, mode, pull_up_down=PUD_OFF):", "body": "self.rpi_gpio.setup(pin, self._dir_mapping[mode],<EOL>pull_up_down=self._pud_mapping[pull_up_down])<EOL>", "docstring": "Set the input or output mode for a specified pin.  Mode should be\n        either OUTPUT or INPUT.", "id": "f7998:c1:m1"}
{"signature": "def cleanup(self, pin=None):", "body": "if pin is None:<EOL><INDENT>self.bbio_gpio.cleanup()<EOL><DEDENT>else:<EOL><INDENT>self.bbio_gpio.cleanup(pin)<EOL><DEDENT>", "docstring": "Clean up GPIO event detection for specific pin, or all pins if none \n        is specified.", "id": "f7998:c2:m10"}
{"signature": "def add_event_detect(self, pin, edge, callback=None, bouncetime=-<NUM_LIT:1>):", "body": "kwargs = {}<EOL>if callback:<EOL><INDENT>kwargs['<STR_LIT>']=callback<EOL><DEDENT>if bouncetime > <NUM_LIT:0>:<EOL><INDENT>kwargs['<STR_LIT>']=bouncetime<EOL><DEDENT>self.mraa_gpio.Gpio.isr(self.mraa_gpio.Gpio(pin), self._edge_mapping[edge], **kwargs)<EOL>", "docstring": "Enable edge detection events for a particular GPIO channel.  Pin \n        should be type IN.  Edge must be RISING, FALLING or BOTH.  Callback is a\n        function for the event.  Bouncetime is switch bounce timeout in ms for \n        callback", "id": "f7998:c3:m4"}
{"signature": "def input(self,pin):", "body": "return self.mraa_gpio.Gpio.read(self.mraa_gpio.Gpio(pin))<EOL>", "docstring": "Read the specified pin and return HIGH/true if the pin is pulled high,\n        or LOW/false if pulled low.", "id": "f7998:c3:m3"}
{"signature": "def input_pins(self, pins):", "body": "<EOL>return [self.input(pin) for pin in pins]<EOL>", "docstring": "Read multiple pins specified in the given list and return list of pin values\n        GPIO.HIGH/True if the pin is pulled high, or GPIO.LOW/False if pulled low.", "id": "f7998:c0:m9"}
{"signature": "def remove_event_detect(self, pin):", "body": "self.rpi_gpio.remove_event_detect(pin)<EOL>", "docstring": "Remove edge detection for a particular GPIO channel.  Pin should be\n        type IN.", "id": "f7998:c1:m6"}
{"signature": "def is_high(self, pin):", "body": "return self.input(pin) == HIGH<EOL>", "docstring": "Return true if the specified pin is pulled high.", "id": "f7998:c0:m5"}
{"signature": "def setup(self,pin,mode):", "body": "self.mraa_gpio.Gpio.dir(self.mraa_gpio.Gpio(pin),self._dir_mapping[mode])<EOL>", "docstring": "Set the input or output mode for a specified pin.  Mode should be\n        either DIR_IN or DIR_OUT.", "id": "f7998:c3:m1"}
{"signature": "def remove_event_detect(self, pin):", "body": "self.mraa_gpio.Gpio.isrExit(self.mraa_gpio.Gpio(pin))<EOL>", "docstring": "Remove edge detection for a particular GPIO channel.  Pin should be\n        type IN.", "id": "f7998:c3:m5"}
{"signature": "def input_pins(self, pins):", "body": "<EOL>return [self.bbio_gpio.input(pin) for pin in pins]<EOL>", "docstring": "Read multiple pins specified in the given list and return list of pin values\n        GPIO.HIGH/True if the pin is pulled high, or GPIO.LOW/False if pulled low.", "id": "f7998:c2:m4"}
{"signature": "def pi_version():", "body": "<EOL>with open('<STR_LIT>', '<STR_LIT:r>') as infile:<EOL><INDENT>cpuinfo = infile.read()<EOL><DEDENT>match = re.search('<STR_LIT>', cpuinfo,<EOL>flags=re.MULTILINE | re.IGNORECASE)<EOL>if not match:<EOL><INDENT>return None<EOL><DEDENT>if match.group(<NUM_LIT:1>) == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>elif match.group(<NUM_LIT:1>) == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT:2><EOL><DEDENT>elif match.group(<NUM_LIT:1>) == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT:3><EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Detect the version of the Raspberry Pi.  Returns either 1, 2 or\n    None depending on if it's a Raspberry Pi 1 (model A, B, A+, B+),\n    Raspberry Pi 2 (model B+), or not a Raspberry Pi.", "id": "f8000:m2"}
{"signature": "def platform_detect():", "body": "<EOL>pi = pi_version()<EOL>if pi is not None:<EOL><INDENT>return RASPBERRY_PI<EOL><DEDENT>plat = platform.platform()<EOL>if plat.lower().find('<STR_LIT>') > -<NUM_LIT:1>:<EOL><INDENT>return BEAGLEBONE_BLACK<EOL><DEDENT>elif plat.lower().find('<STR_LIT>') > -<NUM_LIT:1>:<EOL><INDENT>return BEAGLEBONE_BLACK<EOL><DEDENT>elif plat.lower().find('<STR_LIT>') > -<NUM_LIT:1>:<EOL><INDENT>return BEAGLEBONE_BLACK<EOL><DEDENT>elif plat.lower().find('<STR_LIT>') > -<NUM_LIT:1>:<EOL><INDENT>return JETSON_NANO<EOL><DEDENT>try: <EOL><INDENT>import mraa <EOL>if mraa.getPlatformName()=='<STR_LIT>':<EOL><INDENT>return MINNOWBOARD<EOL><DEDENT><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>return UNKNOWN<EOL>", "docstring": "Detect if running on the Raspberry Pi or Beaglebone Black and return the\n    platform type.  Will return RASPBERRY_PI, BEAGLEBONE_BLACK, or UNKNOWN.", "id": "f8000:m0"}
{"signature": "def pi_revision():", "body": "<EOL>with open('<STR_LIT>', '<STR_LIT:r>') as infile:<EOL><INDENT>for line in infile:<EOL><INDENT>match = re.match('<STR_LIT>', line, flags=re.IGNORECASE)<EOL>if match and match.group(<NUM_LIT:1>) in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>elif match:<EOL><INDENT>return <NUM_LIT:2><EOL><DEDENT><DEDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Detect the revision number of a Raspberry Pi, useful for changing\n    functionality like default I2C bus based on revision.", "id": "f8000:m1"}
{"signature": "def readS16(self, register, little_endian=True):", "body": "result = self.readU16(register, little_endian)<EOL>if result > <NUM_LIT>:<EOL><INDENT>result -= <NUM_LIT><EOL><DEDENT>return result<EOL>", "docstring": "Read a signed 16-bit value from the specified register, with the\n        specified endianness (default little endian, or least significant byte\n        first).", "id": "f8002:c2:m21"}
{"signature": "def _transaction_end(self):", "body": "<EOL>self._command.append('<STR_LIT>')<EOL>self._ft232h._write('<STR_LIT>'.join(self._command))<EOL>return bytearray(self._ft232h._poll_read(self._expected))<EOL>", "docstring": "End I2C transaction and get response bytes, including ACKs.", "id": "f8002:c2:m3"}
{"signature": "def disable_FTDI_driver():", "body": "logger.debug('<STR_LIT>')<EOL>if sys.platform == '<STR_LIT>':<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>_check_running_as_root()<EOL>subprocess.call(<EOL>'<STR_LIT>', shell=True)<EOL>subprocess.call(<EOL>'<STR_LIT>', shell=True)<EOL><DEDENT>elif sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>_check_running_as_root()<EOL>subprocess.call('<STR_LIT>', shell=True)<EOL>subprocess.call('<STR_LIT>', shell=True)<EOL><DEDENT>", "docstring": "Disable the FTDI drivers for the current platform.  This is necessary\n    because they will conflict with libftdi and accessing the FT232H.  Note you\n    can enable the FTDI drivers again by calling enable_FTDI_driver.", "id": "f8002:m1"}
{"signature": "def writeRaw8(self, value):", "body": "value = value & <NUM_LIT><EOL>self._idle()<EOL>self._transaction_start()<EOL>self._i2c_start()<EOL>self._i2c_write_bytes([self._address_byte(False), value])<EOL>self._i2c_stop()<EOL>response = self._transaction_end()<EOL>self._verify_acks(response)<EOL>", "docstring": "Write an 8-bit value on the bus (without register).", "id": "f8002:c2:m12"}
{"signature": "def output(self, pin, value):", "body": "if pin < <NUM_LIT:0> or pin > <NUM_LIT:15>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self._output_pin(pin, value)<EOL>self.mpsse_write_gpio()<EOL>", "docstring": "Set the specified pin the provided high/low value.  Value should be\n        either HIGH/LOW or a boolean (true = high).", "id": "f8002:c0:m16"}
{"signature": "def ping(self):", "body": "self._idle()<EOL>self._transaction_start()<EOL>self._i2c_start()<EOL>self._i2c_write_bytes([self._address_byte(False)])<EOL>self._i2c_stop()<EOL>response = self._transaction_end()<EOL>if len(response) != <NUM_LIT:1>:<EOL><INDENT>raise RuntimeError(<EOL>'<STR_LIT>'.format(len(response)))<EOL><DEDENT>return ((response[<NUM_LIT:0>] & <NUM_LIT>) == <NUM_LIT>)<EOL>", "docstring": "Attempt to detect if a device at this address is present on the I2C\n        bus.  Will send out the device's address for writing and verify an ACK\n        is received.  Returns true if the ACK is received, and false if not.", "id": "f8002:c2:m11"}
{"signature": "def _mpsse_enable(self):", "body": "<EOL>self._check(ftdi.set_bitmode, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>self._check(ftdi.set_bitmode, <NUM_LIT:0>, <NUM_LIT:2>)<EOL>", "docstring": "Enable MPSSE mode on the FTDI device.", "id": "f8002:c0:m5"}
{"signature": "def setup(self, pin, mode):", "body": "self._setup_pin(pin, mode)<EOL>self.mpsse_write_gpio()<EOL>", "docstring": "Set the input or output mode for a specified pin.  Mode should be\n        either OUT or IN.", "id": "f8002:c0:m13"}
{"signature": "def enumerate_device_serials(vid=FT232H_VID, pid=FT232H_PID):", "body": "try:<EOL><INDENT>ctx = None<EOL>ctx = ftdi.new()<EOL>device_list = None<EOL>count, device_list = ftdi.usb_find_all(ctx, vid, pid)<EOL>if count < <NUM_LIT:0>:<EOL><INDENT>raise RuntimeError('<STR_LIT>'.format(<EOL>count, ftdi.get_error_string(self._ctx)))<EOL><DEDENT>devices = []<EOL>while device_list is not None:<EOL><INDENT>ret, manufacturer, description, serial = ftdi.usb_get_strings(<EOL>ctx, device_list.dev, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>)<EOL>if serial is not None:<EOL><INDENT>devices.append(serial)<EOL><DEDENT>device_list = device_list.next<EOL><DEDENT>return devices<EOL><DEDENT>finally:<EOL><INDENT>if device_list is not None:<EOL><INDENT>ftdi.list_free(device_list)<EOL><DEDENT>if ctx is not None:<EOL><INDENT>ftdi.free(ctx)<EOL><DEDENT><DEDENT>", "docstring": "Return a list of all FT232H device serial numbers connected to the\n    machine.  You can use these serial numbers to open a specific FT232H device\n    by passing it to the FT232H initializer's serial parameter.", "id": "f8002:m4"}
{"signature": "def setup_pins(self, pins, values={}, write=True):", "body": "<EOL>for pin, mode in iter(pins.items()):<EOL><INDENT>self._setup_pin(pin, mode)<EOL><DEDENT>for pin, value in iter(values.items()):<EOL><INDENT>self._output_pin(pin, value)<EOL><DEDENT>if write:<EOL><INDENT>self.mpsse_write_gpio()<EOL><DEDENT>", "docstring": "Setup multiple pins as inputs or outputs at once.  Pins should be a\n        dict of pin name to pin mode (IN or OUT).  Optional starting values of\n        pins can be provided in the values dict (with pin name to pin value).", "id": "f8002:c0:m14"}
{"signature": "def bulkread(self, data=[], lengthR='<STR_LIT:None>', readmode=<NUM_LIT:1>):", "body": "<EOL>if (<NUM_LIT:1> > lengthR > <NUM_LIT>) | (len(data) > <NUM_LIT>):<EOL><INDENT>print(<EOL>'<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>if (lengthR == '<STR_LIT:None>') & (readmode == <NUM_LIT:1>):<EOL><INDENT>lengthR = len(data)<EOL><DEDENT>commandW = <NUM_LIT> | (self.lsbfirst << <NUM_LIT:3>) | self.write_clock_ve<EOL>lengthW = len(data) - <NUM_LIT:1><EOL>len_lowW = (lengthW) & <NUM_LIT><EOL>len_highW = ((lengthW) >> <NUM_LIT:8>) & <NUM_LIT><EOL>commandR = <NUM_LIT> | (self.lsbfirst << <NUM_LIT:3>) | (self.read_clock_ve << <NUM_LIT:2>)<EOL>length = lengthR<EOL>if lengthR % <NUM_LIT:2> == <NUM_LIT:1>:<EOL><INDENT>length += <NUM_LIT:1><EOL><DEDENT>length = length/<NUM_LIT:2><EOL>lenremain = lengthR - length<EOL>len_lowR = (length - <NUM_LIT:1>) & <NUM_LIT><EOL>len_highR = ((length - <NUM_LIT:1>) >> <NUM_LIT:8>) & <NUM_LIT><EOL>logger.debug(<EOL>'<STR_LIT>'.format(commandW))<EOL>logger.debug('<STR_LIT>'.format(commandR))<EOL>self._assert_cs()<EOL>self._ft232h._write(str(bytearray((commandW, len_lowW, len_highW))))<EOL>self._ft232h._write(str(bytearray(data)))<EOL>self._ft232h._write(str(bytearray((commandR, len_lowR, len_highR))))<EOL>payload1 = self._ft232h._poll_read(length)<EOL>self._ft232h._write(str(bytearray((commandR, len_lowR, len_highR))))<EOL>payload2 = self._ft232h._poll_read(lenremain)<EOL>self._deassert_cs()<EOL>return bytearray(payload1 + payload2)<EOL>", "docstring": "Half-duplex SPI write then read. Send command and payload to slave as bytearray\n            then consequently read out response from the slave for length in bytes.\n        Designed for use with NOR or NAND flash chips, and possibly SD cards...etc...\n        Read command is cut in half and performed twice in series to prevent single byte errors.\n        Hardware limits per command are enforced before doing anything.\n        Read length is an optional argument, so that it can function similar to transfer\n            but still half-duplex.\n        For reading without writing, one can send a blank array or skip that argument.", "id": "f8002:c1:m8"}
{"signature": "def mpsse_write_gpio(self):", "body": "self._write(self.mpsse_gpio())<EOL>", "docstring": "Write the current MPSSE GPIO state to the FT232H chip.", "id": "f8002:c0:m10"}
{"signature": "def close(self):", "body": "if self._ctx is not None:<EOL><INDENT>ftdi.free(self._ctx)<EOL><DEDENT>self._ctx = None<EOL>", "docstring": "Close the FTDI device.  Will be automatically called when the program ends.", "id": "f8002:c0:m1"}
{"signature": "def enable_FTDI_driver():", "body": "logger.debug('<STR_LIT>')<EOL>if sys.platform == '<STR_LIT>':<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>_check_running_as_root()<EOL>subprocess.check_call(<EOL>'<STR_LIT>', shell=True)<EOL>subprocess.check_call(<EOL>'<STR_LIT>', shell=True)<EOL><DEDENT>elif sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>_check_running_as_root()<EOL>subprocess.check_call('<STR_LIT>', shell=True)<EOL>subprocess.check_call('<STR_LIT>', shell=True)<EOL><DEDENT>", "docstring": "Re-enable the FTDI drivers for the current platform.", "id": "f8002:m2"}
{"signature": "def _address_byte(self, read=True):", "body": "if read:<EOL><INDENT>return (self._address << <NUM_LIT:1>) | <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>return self._address << <NUM_LIT:1><EOL><DEDENT>", "docstring": "Return the address byte with the specified R/W bit set.  If read is\n        True the R/W bit will be 1, otherwise the R/W bit will be 0.", "id": "f8002:c2:m9"}
{"signature": "def write16(self, register, value, little_endian=True):", "body": "value = value & <NUM_LIT><EOL>value_low = value & <NUM_LIT><EOL>value_high = (value >> <NUM_LIT:8>) & <NUM_LIT><EOL>if not little_endian:<EOL><INDENT>value_low, value_high = value_high, value_low<EOL><DEDENT>self._idle()<EOL>self._transaction_start()<EOL>self._i2c_start()<EOL>self._i2c_write_bytes([self._address_byte(False), register, value_low,<EOL>value_high])<EOL>self._i2c_stop()<EOL>response = self._transaction_end()<EOL>self._verify_acks(response)<EOL>", "docstring": "Write a 16-bit value to the specified register.", "id": "f8002:c2:m14"}
{"signature": "def readS16BE(self, register):", "body": "return self.readS16(register, little_endian=False)<EOL>", "docstring": "Read a signed 16-bit value from the specified register, in big\n        endian byte order.", "id": "f8002:c2:m25"}
{"signature": "def _i2c_start(self):", "body": "<EOL>self._ft232h.output_pins({<NUM_LIT:0>: GPIO.HIGH, <NUM_LIT:1>: GPIO.LOW}, write=False)<EOL>self._command.append(self._ft232h.mpsse_gpio() * _REPEAT_DELAY)<EOL>self._ft232h.output_pins({<NUM_LIT:0>: GPIO.LOW, <NUM_LIT:1>: GPIO.LOW}, write=False)<EOL>self._command.append(self._ft232h.mpsse_gpio() * _REPEAT_DELAY)<EOL>", "docstring": "Send I2C start signal. Must be called within a transaction start/end.", "id": "f8002:c2:m4"}
{"signature": "def output_pins(self, pins, write=True):", "body": "for pin, value in iter(pins.items()):<EOL><INDENT>self._output_pin(pin, value)<EOL><DEDENT>if write:<EOL><INDENT>self.mpsse_write_gpio()<EOL><DEDENT>", "docstring": "Set multiple pins high or low at once.  Pins should be a dict of pin\n        name to pin value (HIGH/True for 1, LOW/False for 0).  All provided pins\n        will be set to the given values.", "id": "f8002:c0:m17"}
{"signature": "def mpsse_gpio(self):", "body": "level_low = chr(self._level & <NUM_LIT>)<EOL>level_high = chr((self._level >> <NUM_LIT:8>) & <NUM_LIT>)<EOL>dir_low = chr(self._direction & <NUM_LIT>)<EOL>dir_high = chr((self._direction >> <NUM_LIT:8>) & <NUM_LIT>)<EOL>return str(bytearray((<NUM_LIT>, level_low, dir_low, <NUM_LIT>, level_high, dir_high)))<EOL>", "docstring": "Return command to update the MPSSE GPIO state to the current direction\n        and level.", "id": "f8002:c0:m9"}
{"signature": "def get_i2c_device(self, address, **kwargs):", "body": "return I2CDevice(self, address, **kwargs)<EOL>", "docstring": "Return an I2CDevice instance using this FT232H object and the provided\n        I2C address.  Meant to be passed as the i2c_provider parameter to objects\n        which use the Adafruit_Python_GPIO library for I2C.", "id": "f8002:c0:m11"}
{"signature": "def set_mode(self, mode):", "body": "if mode < <NUM_LIT:0> or mode > <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if mode == <NUM_LIT:0>:<EOL><INDENT>self.write_clock_ve = <NUM_LIT:1><EOL>self.read_clock_ve = <NUM_LIT:0><EOL>clock_base = GPIO.LOW<EOL><DEDENT>elif mode == <NUM_LIT:1>:<EOL><INDENT>self.write_clock_ve = <NUM_LIT:0><EOL>self.read_clock_ve = <NUM_LIT:1><EOL>clock_base = GPIO.LOW<EOL><DEDENT>elif mode == <NUM_LIT:2>:<EOL><INDENT>self.write_clock_ve = <NUM_LIT:1><EOL>self.read_clock_ve = <NUM_LIT:0><EOL>clock_base = GPIO.HIGH<EOL><DEDENT>elif mode == <NUM_LIT:3>:<EOL><INDENT>self.write_clock_ve = <NUM_LIT:0><EOL>self.read_clock_ve = <NUM_LIT:1><EOL>clock_base = GPIO.HIGH<EOL><DEDENT>self._ft232h.setup_pins(<EOL>{<NUM_LIT:0>: GPIO.OUT, <NUM_LIT:1>: GPIO.OUT, <NUM_LIT:2>: GPIO.IN}, {<NUM_LIT:0>: clock_base})<EOL>", "docstring": "Set SPI mode which controls clock polarity and phase.  Should be a\n        numeric value 0, 1, 2, or 3.  See wikipedia page for details on meaning:\n        http://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus", "id": "f8002:c1:m4"}
{"signature": "def __init__(self, ft232h, address, clock_hz=<NUM_LIT>):", "body": "self._address = address<EOL>self._ft232h = ft232h<EOL>self._ft232h.mpsse_set_clock(clock_hz, three_phase=True)<EOL>self._ft232h._write('<STR_LIT>')<EOL>self._idle()<EOL>", "docstring": "Create an instance of the I2C device at the specified address on the\n        specified I2C bus number.", "id": "f8002:c2:m0"}
{"signature": "def readRaw8(self):", "body": "self._idle()<EOL>self._transaction_start()<EOL>self._i2c_start()<EOL>self._i2c_write_bytes([self._address_byte(False)])<EOL>self._i2c_stop()<EOL>self._i2c_idle()<EOL>self._i2c_start()<EOL>self._i2c_write_bytes([self._address_byte(True)])<EOL>self._i2c_read_bytes(<NUM_LIT:1>)<EOL>self._i2c_stop()<EOL>response = self._transaction_end()<EOL>self._verify_acks(response[:-<NUM_LIT:1>])<EOL>return response[-<NUM_LIT:1>]<EOL>", "docstring": "Read an 8-bit value on the bus (without register).", "id": "f8002:c2:m17"}
{"signature": "def mpsse_read_gpio(self):", "body": "<EOL>self._write('<STR_LIT>')<EOL>data = self._poll_read(<NUM_LIT:2>)<EOL>low_byte = ord(data[<NUM_LIT:0>])<EOL>high_byte = ord(data[<NUM_LIT:1>])<EOL>logger.debug('<STR_LIT>'.format(<EOL>low_byte, high_byte))<EOL>return (high_byte << <NUM_LIT:8>) | low_byte<EOL>", "docstring": "Read both GPIO bus states and return a 16 bit value with their state.\n        D0-D7 are the lower 8 bits and C0-C7 are the upper 8 bits.", "id": "f8002:c0:m8"}
{"signature": "def set_clock_hz(self, hz):", "body": "pass<EOL>", "docstring": "Set the speed of the SPI clock.  This is unsupported with the bit\n        bang SPI class and will be ignored.", "id": "f8003:c2:m1"}
{"signature": "def read(self, length, assert_ss=True, deassert_ss=True):", "body": "if self._miso is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if assert_ss and self._ss is not None:<EOL><INDENT>self._gpio.set_low(self._ss)<EOL><DEDENT>result = bytearray(length)<EOL>for i in range(length):<EOL><INDENT>for j in range(<NUM_LIT:8>):<EOL><INDENT>self._gpio.output(self._sclk, not self._clock_base)<EOL>if self._read_leading:<EOL><INDENT>if self._gpio.is_high(self._miso):<EOL><INDENT>result[i] |= self._read_shift(self._mask, j)<EOL><DEDENT>else:<EOL><INDENT>result[i] &= ~self._read_shift(self._mask, j)<EOL><DEDENT><DEDENT>self._gpio.output(self._sclk, self._clock_base)<EOL>if not self._read_leading:<EOL><INDENT>if self._gpio.is_high(self._miso):<EOL><INDENT>result[i] |= self._read_shift(self._mask, j)<EOL><DEDENT>else:<EOL><INDENT>result[i] &= ~self._read_shift(self._mask, j)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if deassert_ss and self._ss is not None:<EOL><INDENT>self._gpio.set_high(self._ss)<EOL><DEDENT>return result<EOL>", "docstring": "Half-duplex SPI read.  If assert_ss is true, the SS line will be\n        asserted low, the specified length of bytes will be clocked in the MISO\n        line, and if deassert_ss is true the SS line will be put back high.\n        Bytes which are read will be returned as a bytearray object.", "id": "f8003:c2:m6"}
{"signature": "def set_bit_order(self, order):", "body": "if order == MSBFIRST:<EOL><INDENT>self._device.lsbfirst = False<EOL><DEDENT>elif order == LSBFIRST:<EOL><INDENT>self._device.lsbfirst = True<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Set order of bits to be read/written over serial lines.  Should be\n        either MSBFIRST for most-significant first, or LSBFIRST for\n        least-signifcant first.", "id": "f8003:c0:m3"}
{"signature": "def set_mode(self, mode):", "body": "if mode < <NUM_LIT:0> or mode > <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if mode & <NUM_LIT>:<EOL><INDENT>self._clock_base = GPIO.HIGH<EOL><DEDENT>else:<EOL><INDENT>self._clock_base = GPIO.LOW<EOL><DEDENT>if mode & <NUM_LIT>:<EOL><INDENT>self._read_leading = False<EOL><DEDENT>else:<EOL><INDENT>self._read_leading = True<EOL><DEDENT>self._gpio.output(self._sclk, self._clock_base)<EOL>", "docstring": "Set SPI mode which controls clock polarity and phase.  Should be a\n        numeric value 0, 1, 2, or 3.  See wikipedia page for details on meaning:\n        http://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus", "id": "f8003:c2:m2"}
{"signature": "def set_bit_order(self, order):", "body": "if order == MSBFIRST:<EOL><INDENT>self._device.lsbmode(False)<EOL><DEDENT>elif order == LSBFIRST:<EOL><INDENT>self._device.lsbmode(True)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Set order of bits to be read/written over serial lines.  Should be\n        either MSBFIRST for most-significant first, or LSBFIRST for\n        least-signifcant first.", "id": "f8003:c1:m3"}
{"signature": "def set_clock_hz(self, hz):", "body": "self._device.max_speed_hz=hz<EOL>", "docstring": "Set the speed of the SPI clock in hertz.  Note that not all speeds\n        are supported and a lower speed might be chosen by the hardware.", "id": "f8003:c0:m1"}
{"signature": "def set_clock_hz(self, hz):", "body": "self._device.frequency(hz)<EOL>", "docstring": "Set the speed of the SPI clock in hertz.  Note that not all speeds\n        are supported and a lower speed might be chosen by the hardware.", "id": "f8003:c1:m1"}
{"signature": "def transfer(self, data):", "body": "return bytearray(self._device.xfer2(data))<EOL>", "docstring": "Full-duplex SPI read and write.  The specified array of bytes will be\n        clocked out the MOSI line, while simultaneously bytes will be read from\n        the MISO line.  Read bytes will be returned as a bytearray object.", "id": "f8003:c0:m7"}
{"signature": "def __init__(self, gpio, sclk, mosi=None, miso=None, ss=None):", "body": "self._gpio = gpio<EOL>self._sclk = sclk<EOL>self._mosi = mosi<EOL>self._miso = miso<EOL>self._ss = ss<EOL>gpio.setup(sclk, GPIO.OUT)<EOL>if mosi is not None:<EOL><INDENT>gpio.setup(mosi, GPIO.OUT)<EOL><DEDENT>if miso is not None:<EOL><INDENT>gpio.setup(miso, GPIO.IN)<EOL><DEDENT>if ss is not None:<EOL><INDENT>gpio.setup(ss, GPIO.OUT)<EOL>gpio.set_high(ss)<EOL><DEDENT>self.set_mode(<NUM_LIT:0>)<EOL>self.set_bit_order(MSBFIRST)<EOL>", "docstring": "Initialize bit bang (or software) based SPI.  Must provide a BaseGPIO\n        class, the SPI clock, and optionally MOSI, MISO, and SS (slave select)\n        pin numbers. If MOSI is set to None then writes will be disabled and fail\n        with an error, likewise for MISO reads will be disabled.  If SS is set to\n        None then SS will not be asserted high/low by the library when\n        transfering data.", "id": "f8003:c2:m0"}
{"signature": "def transfer(self, data, assert_ss=True, deassert_ss=True):", "body": "if self._mosi is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if self._miso is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if assert_ss and self._ss is not None:<EOL><INDENT>self._gpio.set_low(self._ss)<EOL><DEDENT>result = bytearray(len(data))<EOL>for i in range(len(data)):<EOL><INDENT>for j in range(<NUM_LIT:8>):<EOL><INDENT>if self._write_shift(data[i], j) & self._mask:<EOL><INDENT>self._gpio.set_high(self._mosi)<EOL><DEDENT>else:<EOL><INDENT>self._gpio.set_low(self._mosi)<EOL><DEDENT>self._gpio.output(self._sclk, not self._clock_base)<EOL>if self._read_leading:<EOL><INDENT>if self._gpio.is_high(self._miso):<EOL><INDENT>result[i] |= self._read_shift(self._mask, j)<EOL><DEDENT>else:<EOL><INDENT>result[i] &= ~self._read_shift(self._mask, j)<EOL><DEDENT><DEDENT>self._gpio.output(self._sclk, self._clock_base)<EOL>if not self._read_leading:<EOL><INDENT>if self._gpio.is_high(self._miso):<EOL><INDENT>result[i] |= self._read_shift(self._mask, j)<EOL><DEDENT>else:<EOL><INDENT>result[i] &= ~self._read_shift(self._mask, j)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if deassert_ss and self._ss is not None:<EOL><INDENT>self._gpio.set_high(self._ss)<EOL><DEDENT>return result<EOL>", "docstring": "Full-duplex SPI read and write.  If assert_ss is true, the SS line\n        will be asserted low, the specified bytes will be clocked out the MOSI\n        line while bytes will also be read from the MISO line, and if\n        deassert_ss is true the SS line will be put back high.  Bytes which are\n        read will be returned as a bytearray object.", "id": "f8003:c2:m7"}
{"signature": "def write(self, data, assert_ss=True, deassert_ss=True):", "body": "<EOL>if self._mosi is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if assert_ss and self._ss is not None:<EOL><INDENT>self._gpio.set_low(self._ss)<EOL><DEDENT>for byte in data:<EOL><INDENT>for i in range(<NUM_LIT:8>):<EOL><INDENT>if self._write_shift(byte, i) & self._mask:<EOL><INDENT>self._gpio.set_high(self._mosi)<EOL><DEDENT>else:<EOL><INDENT>self._gpio.set_low(self._mosi)<EOL><DEDENT>self._gpio.output(self._sclk, not self._clock_base)<EOL>self._gpio.output(self._sclk, self._clock_base)<EOL><DEDENT><DEDENT>if deassert_ss and self._ss is not None:<EOL><INDENT>self._gpio.set_high(self._ss)<EOL><DEDENT>", "docstring": "Half-duplex SPI write.  If assert_ss is True, the SS line will be\n        asserted low, the specified bytes will be clocked out the MOSI line, and\n        if deassert_ss is True the SS line be put back high.", "id": "f8003:c2:m5"}
{"signature": "def write(self, data):", "body": "self._device.write(bytearray(data))<EOL>", "docstring": "Half-duplex SPI write.  The specified array of bytes will be clocked\n        out the MOSI line.", "id": "f8003:c1:m5"}
{"signature": "def set_mode(self,mode):", "body": "if mode < <NUM_LIT:0> or mode > <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self._device.mode(mode)<EOL>", "docstring": "Set SPI mode which controls clock polarity and phase.  Should be a\n        numeric value 0, 1, 2, or 3.  See wikipedia page for details on meaning:\n        http://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus", "id": "f8003:c1:m2"}
{"signature": "def readU16LE(self, register):", "body": "return self.readU16(register, little_endian=True)<EOL>", "docstring": "Read an unsigned 16-bit value from the specified register, in little\n        endian byte order.", "id": "f8004:c0:m11"}
{"signature": "def readS16LE(self, register):", "body": "return self.readS16(register, little_endian=True)<EOL>", "docstring": "Read a signed 16-bit value from the specified register, in little\n        endian byte order.", "id": "f8004:c0:m13"}
{"signature": "def readU16(self, register, little_endian=True):", "body": "result = self._bus.read_word_data(self._address,register) & <NUM_LIT><EOL>self._logger.debug(\"<STR_LIT>\",<EOL>result, register, register+<NUM_LIT:1>)<EOL>if not little_endian:<EOL><INDENT>result = ((result << <NUM_LIT:8>) & <NUM_LIT>) + (result >> <NUM_LIT:8>)<EOL><DEDENT>return result<EOL>", "docstring": "Read an unsigned 16-bit value from the specified register, with the\n        specified endianness (default little endian, or least significant byte\n        first).", "id": "f8004:c0:m9"}
{"signature": "def readList(self, register, length):", "body": "results = self._bus.read_i2c_block_data(self._address, register, length)<EOL>self._logger.debug(\"<STR_LIT>\",<EOL>register, results)<EOL>return results<EOL>", "docstring": "Read a length number of bytes from the specified register.  Results\n        will be returned as a bytearray.", "id": "f8004:c0:m5"}
{"signature": "def readS8(self, register):", "body": "result = self.readU8(register)<EOL>if result > <NUM_LIT>:<EOL><INDENT>result -= <NUM_LIT><EOL><DEDENT>return result<EOL>", "docstring": "Read a signed byte from the specified register.", "id": "f8004:c0:m8"}
{"signature": "def readS16BE(self, register):", "body": "return self.readS16(register, little_endian=False)<EOL>", "docstring": "Read a signed 16-bit value from the specified register, in big\n        endian byte order.", "id": "f8004:c0:m14"}
{"signature": "def readU16BE(self, register):", "body": "return self.readU16(register, little_endian=False)<EOL>", "docstring": "Read an unsigned 16-bit value from the specified register, in big\n        endian byte order.", "id": "f8004:c0:m12"}
{"signature": "def write8(self, register, value):", "body": "value = value & <NUM_LIT><EOL>self._bus.write_byte_data(self._address, register, value)<EOL>self._logger.debug(\"<STR_LIT>\",<EOL>value, register)<EOL>", "docstring": "Write an 8-bit value to the specified register.", "id": "f8004:c0:m2"}
{"signature": "def readU8(self, register):", "body": "result = self._bus.read_byte_data(self._address, register) & <NUM_LIT><EOL>self._logger.debug(\"<STR_LIT>\",<EOL>result, register)<EOL>return result<EOL>", "docstring": "Read an unsigned byte from the specified register.", "id": "f8004:c0:m7"}
{"signature": "def get_download_url(self, version):", "body": "pass<EOL>", "docstring": "Get the download url for the given version.\n\nReturn None if unsupported", "id": "f8013:c0:m5"}
{"signature": "def get_changes(self, since_version):", "body": "pass<EOL>", "docstring": "Get a list of changes since (and not including) the given version name\n\n        Output should be in the format:\n        [\n            (commit, author, message),\n            ...\n        ]", "id": "f8013:c0:m2"}
{"signature": "def is_available(self, project_dir):", "body": "pass<EOL>", "docstring": "Is this VCS class available for the given project directory", "id": "f8013:c0:m1"}
{"signature": "def commit(self, message, files):", "body": "pass<EOL>", "docstring": "Commit the changes to files", "id": "f8013:c0:m3"}
{"signature": "def determine_paths(self, package_name=None, create_package_dir=False, dry_run=False):", "body": "<EOL>self.project_dir = Path(os.getenv('<STR_LIT>') or os.getcwd())<EOL>distribution = self.get_distribution()<EOL>if distribution:<EOL><INDENT>self.project_name = distribution.get_name()<EOL><DEDENT>else:<EOL><INDENT>self.project_name = self.project_dir.name<EOL><DEDENT>if os.path.isdir(self.project_dir / \"<STR_LIT:src>\"):<EOL><INDENT>package_search_dir = self.project_dir / \"<STR_LIT:src>\"<EOL><DEDENT>else:<EOL><INDENT>package_search_dir = self.project_dir<EOL><DEDENT>created_package_dir = False<EOL>if not package_name:<EOL><INDENT>package_name = self.project_name.replace(\"<STR_LIT:->\", \"<STR_LIT:_>\")<EOL>def get_matches(name):<EOL><INDENT>possibles = [n for n in os.listdir(package_search_dir) if os.path.isdir(package_search_dir / n)]<EOL>return difflib.get_close_matches(name, possibles, n=<NUM_LIT:1>, cutoff=<NUM_LIT>)<EOL><DEDENT>close = get_matches(package_name)<EOL>if not close and \"<STR_LIT:_>\" in package_name:<EOL><INDENT>short_package_name = \"<STR_LIT:_>\".join(package_name.split(\"<STR_LIT:_>\")[<NUM_LIT:1>:])<EOL>close = get_matches(short_package_name)<EOL><DEDENT>if not close:<EOL><INDENT>if create_package_dir:<EOL><INDENT>package_dir = package_search_dir / package_name<EOL>created_package_dir = True<EOL>if not dry_run:<EOL><INDENT>print(\"<STR_LIT>\" % package_dir)<EOL>os.mkdir(package_dir)<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\" % package_dir)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise CommandError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>package_name = close[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>self.package_name = package_name<EOL>self.package_dir = package_search_dir / package_name<EOL>if not os.path.exists(self.package_dir) and not created_package_dir:<EOL><INDENT>raise CommandError(\"<STR_LIT>\" % self.package_dir)<EOL><DEDENT>", "docstring": "Determine paths automatically and a little intelligently", "id": "f8017:c0:m3"}
{"signature": "def set_seeds(seed):", "body": "rng.seed(seed)<EOL>nprng.seed(seed)<EOL>", "docstring": "Sets the seeds of both random number generators used by Foolbox.\n\n    Parameters\n    ----------\n    seed : int\n        The seed for both random number generators.", "id": "f8026:m0"}
{"signature": "def binarized2_bn_model():", "body": "import tensorflow as tf<EOL>bounds = (<NUM_LIT:0>, <NUM_LIT:1>)<EOL>channel_axis = <NUM_LIT:3><EOL>channels = <NUM_LIT:10>  <EOL>def mean_brightness_net(images):<EOL><INDENT>logits = tf.reduce_mean(images, axis=(<NUM_LIT:1>, <NUM_LIT:2>))<EOL>return logits<EOL><DEDENT>images = tf.placeholder(tf.float32, (None, <NUM_LIT:5>, <NUM_LIT:5>, channels))<EOL>logits = mean_brightness_net(images)<EOL>def preprocessing(x):<EOL><INDENT>x = binarize(x, (<NUM_LIT:0>, <NUM_LIT:1>), included_in='<STR_LIT>')<EOL>def backward(x):<EOL><INDENT>return x<EOL><DEDENT>return x, backward<EOL><DEDENT>with tf.Session():<EOL><INDENT>model = TensorFlowModel(<EOL>images,<EOL>logits,<EOL>bounds=bounds,<EOL>channel_axis=channel_axis,<EOL>preprocessing=preprocessing)<EOL>yield model<EOL><DEDENT>", "docstring": "Creates a simple brightness model that does not require training.", "id": "f8043:m34"}
{"signature": "def eg_bn_model_factory(request):", "body": "GradientEstimator = request.param<EOL>def eg_bn_model():<EOL><INDENT>cm_model = contextmanager(bn_model)<EOL>with cm_model() as model:<EOL><INDENT>gradient_estimator = GradientEstimator(epsilon=<NUM_LIT>)<EOL>model = ModelWithEstimatedGradients(model, gradient_estimator)<EOL>yield model<EOL><DEDENT><DEDENT>return eg_bn_model<EOL>", "docstring": "Same as bn_model but with estimated gradient.", "id": "f8043:m10"}
{"signature": "@abstractmethod<EOL><INDENT>def is_adversarial(self, predictions, label):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Decides if predictions for an image are adversarial given\n        a reference label.\n\n        Parameters\n        ----------\n        predictions : :class:`numpy.ndarray`\n            A vector with the pre-softmax predictions for some image.\n        label : int\n            The label of the unperturbed reference image.\n\n        Returns\n        -------\n        bool\n            True if an image with the given predictions is an adversarial\n            example when the ground-truth class is given by label, False\n            otherwise.", "id": "f8073:c0:m1"}
{"signature": "def name(self):", "body": "names = (criterion.name() for criterion in self._criteria)<EOL>return '<STR_LIT>'.join(sorted(names))<EOL>", "docstring": "Concatenates the names of the given criteria in alphabetical order.\n\n        If a sub-criterion is itself a combined criterion, its name is\n        first split into the individual names and the names of the\n        sub-sub criteria is used instead of the name of the sub-criterion.\n        This is done recursively to ensure that the order and the hierarchy\n        of the criteria does not influence the name.\n\n        Returns\n        -------\n        str\n            The alphabetically sorted names of the sub-criteria concatenated\n            using double underscores between them.", "id": "f8073:c1:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def backward(self, gradient, image):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Backpropagates the gradient of some loss w.r.t. the logits\n        through the network and returns the gradient of that loss w.r.t\n        to the input image.\n\n        Parameters\n        ----------\n        gradient : `numpy.ndarray`\n            Gradient of some loss w.r.t. the logits.\n        image : `numpy.ndarray`\n            Single input with shape as expected by the model\n            (without the batch dimension).\n\n        Returns\n        -------\n        gradient : `numpy.ndarray`\n            The gradient w.r.t the image.\n\n        See Also\n        --------\n        :meth:`gradient`", "id": "f8079:c1:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def batch_predictions(self, images):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Calculates predictions for a batch of images.\n\n        Parameters\n        ----------\n        images : `numpy.ndarray`\n            Batch of inputs with shape as expected by the model.\n\n        Returns\n        -------\n        `numpy.ndarray`\n            Predictions (logits, i.e. before the softmax) with shape\n            (batch size, number of classes).\n\n        See Also\n        --------\n        :meth:`predictions`", "id": "f8079:c0:m7"}
{"signature": "@classmethod<EOL><INDENT>def from_keras(cls, model, bounds, input_shape=None,<EOL>channel_axis=<NUM_LIT:3>, preprocessing=(<NUM_LIT:0>, <NUM_LIT:1>)):<DEDENT>", "body": "import tensorflow as tf<EOL>if input_shape is None:<EOL><INDENT>try:<EOL><INDENT>input_shape = model.input_shape[<NUM_LIT:1>:]<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>with tf.keras.backend.get_session().as_default():<EOL><INDENT>inputs = tf.placeholder(tf.float32, (None,) + input_shape)<EOL>logits = model(inputs)<EOL>return cls(inputs, logits, bounds=bounds,<EOL>channel_axis=channel_axis, preprocessing=preprocessing)<EOL><DEDENT>", "docstring": "Alternative constructor for a TensorFlowModel that\n        accepts a `tf.keras.Model` instance.\n\n        Parameters\n        ----------\n        model : `tensorflow.keras.Model`\n            A `tensorflow.keras.Model` that accepts a single input tensor\n            and returns a single output tensor representing logits.\n        bounds : tuple\n            Tuple of lower and upper bound for the pixel values, usually\n            (0, 1) or (0, 255).\n        input_shape : tuple\n            The shape of a single input, e.g. (28, 28, 1) for MNIST.\n            If None, tries to get the the shape from the model's\n            input_shape attribute.\n        channel_axis : int\n            The index of the axis that represents color channels.\n        preprocessing: 2-element tuple with floats or numpy arrays\n            Elementwises preprocessing of input; we first subtract the first\n            element of preprocessing from the input and then divide the input\n            by the second element.", "id": "f8082:c0:m1"}
{"signature": "def binarize(x, values, threshold=None, included_in='<STR_LIT>'):", "body": "lower, upper = values<EOL>if threshold is None:<EOL><INDENT>threshold = (lower + upper) / <NUM_LIT><EOL><DEDENT>x = x.copy()<EOL>if included_in == '<STR_LIT>':<EOL><INDENT>x[x <= threshold] = lower<EOL>x[x > threshold] = upper<EOL><DEDENT>elif included_in == '<STR_LIT>':<EOL><INDENT>x[x < threshold] = lower<EOL>x[x >= threshold] = upper<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return x<EOL>", "docstring": "Binarizes the values of x.\n\n    Parameters\n    ----------\n    values : tuple of two floats\n        The lower and upper value to which the inputs are mapped.\n    threshold : float\n        The threshold; defaults to (values[0] + values[1]) / 2 if None.\n    included_in : str\n        Whether the threshold value itself belongs to the lower or\n        upper interval.", "id": "f8087:m3"}
{"signature": "def softmax(logits):", "body": "assert logits.ndim == <NUM_LIT:1><EOL>logits = logits - np.max(logits)<EOL>e = np.exp(logits)<EOL>return e / np.sum(e)<EOL>", "docstring": "Transforms predictions into probability values.\n\n    Parameters\n    ----------\n    logits : array_like\n        The logits predicted by the model.\n\n    Returns\n    -------\n    `numpy.ndarray`\n        Probability values corresponding to the logits.", "id": "f8087:m0"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True, epsilons=<NUM_LIT:1000>):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>image = a.original_image<EOL>bounds = a.bounds()<EOL>min_, max_ = bounds<EOL>if not isinstance(epsilons, Iterable):<EOL><INDENT>epsilons = np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, num=epsilons + <NUM_LIT:1>)[<NUM_LIT:1>:]<EOL><DEDENT>for epsilon in epsilons:<EOL><INDENT>noise = self._sample_noise(epsilon, image, bounds)<EOL>perturbed = image + epsilon * noise<EOL>perturbed = np.clip(perturbed, min_, max_)<EOL>_, is_adversarial = a.predictions(perturbed)<EOL>if is_adversarial:<EOL><INDENT>return<EOL><DEDENT><DEDENT>", "docstring": "Adds uniform or Gaussian noise to the image, gradually increasing\n        the standard deviation until the image is misclassified.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, unperturbed input as a `numpy.ndarray` or\n            an :class:`Adversarial` instance.\n        label : int\n            The reference label of the original input. Must be passed\n            if `a` is a `numpy.ndarray`, must not be passed if `a` is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial input, otherwise returns\n            the Adversarial object.\n        epsilons : int or Iterable[float]\n            Either Iterable of noise levels or number of noise levels\n            between 0 and 1 that should be tried.", "id": "f8089:c0:m0"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>binary_search_steps=<NUM_LIT:5>, max_iterations=<NUM_LIT:1000>,<EOL>confidence=<NUM_LIT:0>, learning_rate=<NUM_LIT>,<EOL>initial_const=<NUM_LIT>, abort_early=True):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>if not a.has_gradient():<EOL><INDENT>logging.fatal('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return<EOL><DEDENT>min_, max_ = a.bounds()<EOL>def to_attack_space(x):<EOL><INDENT>a = (min_ + max_) / <NUM_LIT:2><EOL>b = (max_ - min_) / <NUM_LIT:2><EOL>x = (x - a) / b<EOL>x = x * <NUM_LIT><EOL>return np.arctanh(x)<EOL><DEDENT>def to_model_space(x):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>x = np.tanh(x)<EOL>grad = <NUM_LIT:1> - np.square(x)<EOL>a = (min_ + max_) / <NUM_LIT:2><EOL>b = (max_ - min_) / <NUM_LIT:2><EOL>x = x * b + a<EOL>grad = grad * b<EOL>return x, grad<EOL><DEDENT>att_original = to_attack_space(a.original_image)<EOL>reconstructed_original, _ = to_model_space(att_original)<EOL>const = initial_const<EOL>lower_bound = <NUM_LIT:0><EOL>upper_bound = np.inf<EOL>for binary_search_step in range(binary_search_steps):<EOL><INDENT>if binary_search_step == binary_search_steps - <NUM_LIT:1> andbinary_search_steps >= <NUM_LIT:10>:<EOL><INDENT>const = upper_bound<EOL><DEDENT>logging.info('<STR_LIT>'.format(const))<EOL>att_perturbation = np.zeros_like(att_original)<EOL>optimizer = AdamOptimizer(att_perturbation.shape)<EOL>found_adv = False  <EOL>loss_at_previous_check = np.inf<EOL>for iteration in range(max_iterations):<EOL><INDENT>x, dxdp = to_model_space(att_original + att_perturbation)<EOL>logits, is_adv = a.predictions(x)<EOL>loss, dldx = self.loss_function(<EOL>const, a, x, logits, reconstructed_original,<EOL>confidence, min_, max_)<EOL>logging.info('<STR_LIT>'.format(<EOL>loss, a.distance))<EOL>assert dldx.shape == x.shape<EOL>assert dxdp.shape == x.shape<EOL>gradient = dldx * dxdp<EOL>att_perturbation += optimizer(gradient, learning_rate)<EOL>if is_adv:<EOL><INDENT>found_adv = True<EOL><DEDENT>if abort_early anditeration % (np.ceil(max_iterations / <NUM_LIT:10>)) == <NUM_LIT:0>:<EOL><INDENT>if not (loss <= <NUM_LIT> * loss_at_previous_check):<EOL><INDENT>break  <EOL><DEDENT>loss_at_previous_check = loss<EOL><DEDENT><DEDENT>if found_adv:<EOL><INDENT>logging.info('<STR_LIT>'.format(const))<EOL>upper_bound = const<EOL><DEDENT>else:<EOL><INDENT>logging.info('<STR_LIT>'<EOL>'<STR_LIT>'.format(const))<EOL>lower_bound = const<EOL><DEDENT>if upper_bound == np.inf:<EOL><INDENT>const *= <NUM_LIT:10><EOL><DEDENT>else:<EOL><INDENT>const = (lower_bound + upper_bound) / <NUM_LIT:2><EOL><DEDENT><DEDENT>", "docstring": "The L2 version of the Carlini & Wagner attack.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, unperturbed input as a `numpy.ndarray` or\n            an :class:`Adversarial` instance.\n        label : int\n            The reference label of the original input. Must be passed\n            if `a` is a `numpy.ndarray`, must not be passed if `a` is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial input, otherwise returns\n            the Adversarial object.\n        binary_search_steps : int\n            The number of steps for the binary search used to\n            find the optimal tradeoff-constant between distance and confidence.\n        max_iterations : int\n            The maximum number of iterations. Larger values are more\n            accurate; setting it too small will require a large learning rate\n            and will produce poor results.\n        confidence : int or float\n            Confidence of adversarial examples: a higher value produces\n            adversarials that are further away, but more strongly classified\n            as adversarial.\n        learning_rate : float\n            The learning rate for the attack algorithm. Smaller values\n            produce better results but take longer to converge.\n        initial_const : float\n            The initial tradeoff-constant to use to tune the relative\n            importance of distance and confidence. If `binary_search_steps`\n            is large, the initial constant is not important.\n        abort_early : bool\n            If True, Adam will be aborted if the loss hasn't decreased\n            for some time (a tenth of max_iterations).", "id": "f8092:c0:m0"}
{"signature": "def name(self):", "body": "return self.__class__.__name__<EOL>", "docstring": "Returns a human readable name that uniquely identifies\n        the attack with its hyperparameters.\n\n        Returns\n        -------\n        str\n            Human readable name that uniquely identifies the attack\n            with its hyperparameters.\n\n        Notes\n        -----\n        Defaults to the class name but subclasses can provide more\n        descriptive names and must take hyperparameters into account.", "id": "f8094:c0:m3"}
{"signature": "def _initialize(self):", "body": "pass<EOL>", "docstring": "Additional initializer that can be overwritten by\n        subclasses without redefining the full __init__ method\n        including all arguments and documentation.", "id": "f8094:c0:m1"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>max_pixels=<NUM_LIT:1000>):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>channel_axis = a.channel_axis(batch=False)<EOL>image = a.original_image<EOL>axes = [i for i in range(image.ndim) if i != channel_axis]<EOL>assert len(axes) == <NUM_LIT:2><EOL>h = image.shape[axes[<NUM_LIT:0>]]<EOL>w = image.shape[axes[<NUM_LIT:1>]]<EOL>min_, max_ = a.bounds()<EOL>pixels = nprng.permutation(h * w)<EOL>pixels = pixels[:max_pixels]<EOL>for i, pixel in enumerate(pixels):<EOL><INDENT>x = pixel % w<EOL>y = pixel // w<EOL>location = [x, y]<EOL>location.insert(channel_axis, slice(None))<EOL>location = tuple(location)<EOL>for value in [min_, max_]:<EOL><INDENT>perturbed = image.copy()<EOL>perturbed[location] = value<EOL>_, is_adv = a.predictions(perturbed)<EOL>if is_adv:<EOL><INDENT>return<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Perturbs just a single pixel and sets it to the min or max.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, correctly classified image. If image is a\n            numpy array, label must be passed as well. If image is\n            an :class:`Adversarial` instance, label must not be passed.\n        label : int\n            The reference label of the original image. Must be passed\n            if image is a numpy array, must not be passed if image is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial image, otherwise returns\n            the Adversarial object.\n        max_pixels : int\n            Maximum number of pixels to try.", "id": "f8095:c0:m0"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>steps=<NUM_LIT:100>, subsample=<NUM_LIT:10>, p=None):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>if not a.has_gradient():<EOL><INDENT>return<EOL><DEDENT>if a.target_class() is not None:<EOL><INDENT>logging.fatal('<STR_LIT>')<EOL>return<EOL><DEDENT>if p is None:<EOL><INDENT>if a._distance == MeanSquaredDistance:<EOL><INDENT>p = <NUM_LIT:2><EOL><DEDENT>elif a._distance == Linfinity:<EOL><INDENT>p = np.inf<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if not (<NUM_LIT:1> <= p <= np.inf):<EOL><INDENT>raise ValueError<EOL><DEDENT>if p not in [<NUM_LIT:2>, np.inf]:<EOL><INDENT>raise NotImplementedError<EOL><DEDENT>_label = a.original_class<EOL>logits, _ = a.predictions(a.original_image)<EOL>labels = np.argsort(logits)[::-<NUM_LIT:1>]<EOL>if subsample:<EOL><INDENT>logging.info('<STR_LIT>'.format(subsample))<EOL>assert isinstance(subsample, int)<EOL>labels = labels[:subsample]<EOL><DEDENT>def get_residual_labels(logits):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return [<EOL>k for k in labels<EOL>if logits[k] < logits[_label]]<EOL><DEDENT>perturbed = a.original_image<EOL>min_, max_ = a.bounds()<EOL>for step in range(steps):<EOL><INDENT>logits, grad, is_adv = a.predictions_and_gradient(perturbed)<EOL>if is_adv:<EOL><INDENT>return<EOL><DEDENT>loss = -crossentropy(logits=logits, label=_label)<EOL>residual_labels = get_residual_labels(logits)<EOL>losses = [<EOL>-crossentropy(logits=logits, label=k)<EOL>for k in residual_labels]<EOL>grads = [a.gradient(perturbed, label=k) for k in residual_labels]<EOL>diffs = [(l - loss, g - grad) for l, g in zip(losses, grads)]<EOL>if p == <NUM_LIT:2>:<EOL><INDENT>distances = [abs(dl) / (np.linalg.norm(dg) + <NUM_LIT>)<EOL>for dl, dg in diffs]<EOL><DEDENT>elif p == np.inf:<EOL><INDENT>distances = [abs(dl) / (np.sum(np.abs(dg)) + <NUM_LIT>)<EOL>for dl, dg in diffs]<EOL><DEDENT>else:  <EOL><INDENT>assert False<EOL><DEDENT>optimal = np.argmin(distances)<EOL>df, dg = diffs[optimal]<EOL>if p == <NUM_LIT:2>:<EOL><INDENT>perturbation = abs(df) / (np.linalg.norm(dg) + <NUM_LIT>)**<NUM_LIT:2> * (-dg)<EOL><DEDENT>elif p == np.inf:<EOL><INDENT>perturbation = abs(df) / (np.sum(np.abs(dg)) + <NUM_LIT>)* np.sign(-dg)<EOL><DEDENT>else:  <EOL><INDENT>assert False<EOL><DEDENT>perturbed = perturbed + <NUM_LIT> * perturbation<EOL>perturbed = np.clip(perturbed, min_, max_)<EOL><DEDENT>a.predictions(perturbed)<EOL>", "docstring": "Simple and close to optimal gradient-based\n        adversarial attack.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, unperturbed input as a `numpy.ndarray` or\n            an :class:`Adversarial` instance.\n        label : int\n            The reference label of the original input. Must be passed\n            if `a` is a `numpy.ndarray`, must not be passed if `a` is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial input, otherwise returns\n            the Adversarial object.\n        steps : int\n            Maximum number of steps to perform.\n        subsample : int\n            Limit on the number of the most likely classes that should\n            be considered. A small value is usually sufficient and much\n            faster.\n        p : int or float\n            Lp-norm that should be minimzed, must be 2 or np.inf.", "id": "f8096:c0:m0"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>epsilons=<NUM_LIT:1000>):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>image = a.original_image<EOL>min_, max_ = a.bounds()<EOL>target = (max_ + min_) / <NUM_LIT:2><EOL>if not isinstance(epsilons, Iterable):<EOL><INDENT>epsilons = np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, num=epsilons + <NUM_LIT:1>)[<NUM_LIT:1>:]<EOL><DEDENT>for epsilon in epsilons:<EOL><INDENT>perturbed = (<NUM_LIT:1> - epsilon) * image + epsilon * target<EOL>_, is_adversarial = a.predictions(perturbed)<EOL>if is_adversarial:<EOL><INDENT>return<EOL><DEDENT><DEDENT>", "docstring": "Reduces the contrast of the image until it is misclassified.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, unperturbed input as a `numpy.ndarray` or\n            an :class:`Adversarial` instance.\n        label : int\n            The reference label of the original input. Must be passed\n            if `a` is a `numpy.ndarray`, must not be passed if `a` is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial input, otherwise returns\n            the Adversarial object.\n        epsilons : int or Iterable[float]\n            Either Iterable of contrast levels or number of contrast\n            levels between 1 and 0 that should be tried. Epsilons are\n            one minus the contrast level.", "id": "f8097:c0:m0"}
{"signature": "def _saliency_map(self, a, image, target, labels, mask, fast=False):", "body": "<EOL>alphas = a.gradient(image, target) * mask<EOL>if fast:<EOL><INDENT>betas = -np.ones_like(alphas)<EOL><DEDENT>else:<EOL><INDENT>betas = np.sum([<EOL>a.gradient(image, label) * mask - alphas<EOL>for label in labels], <NUM_LIT:0>)<EOL><DEDENT>salmap = np.abs(alphas) * np.abs(betas) * np.sign(alphas * betas)<EOL>idx = np.argmin(salmap)<EOL>idx = np.unravel_index(idx, mask.shape)<EOL>pix_sign = np.sign(alphas)[idx]<EOL>return idx, pix_sign<EOL>", "docstring": "Implements Algorithm 3 in manuscript", "id": "f8099:c0:m1"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>epsilon=<NUM_LIT>,<EOL>num_random_targets=<NUM_LIT:0>,<EOL>maxiter=<NUM_LIT>):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>if not self._approximate_gradient and not a.has_gradient():<EOL><INDENT>return<EOL><DEDENT>original_class = a.original_class<EOL>target_class = a.target_class()<EOL>if target_class is None:<EOL><INDENT>if num_random_targets == <NUM_LIT:0> and self._approximate_gradient:<EOL><INDENT>num_random_targets = <NUM_LIT:1><EOL><DEDENT>if num_random_targets == <NUM_LIT:0>:<EOL><INDENT>gradient_attack = GradientAttack()<EOL>gradient_attack(a)<EOL>adv_img = a.image<EOL>if adv_img is None:  <EOL><INDENT>num_random_targets = <NUM_LIT:1><EOL>logging.warning('<STR_LIT>')  <EOL><DEDENT>else:<EOL><INDENT>logits, _ = a.predictions(adv_img)<EOL>target_class = np.argmax(logits)<EOL>target_classes = [target_class]<EOL>logging.info('<STR_LIT>'.format(target_class))  <EOL><DEDENT><DEDENT>if num_random_targets > <NUM_LIT:0>:<EOL><INDENT>num_classes = a.num_classes()<EOL>assert num_random_targets <= num_classes - <NUM_LIT:1><EOL>target_classes = rng.sample(<EOL>range(num_classes), num_random_targets + <NUM_LIT:1>)<EOL>target_classes = [t for t in target_classes if t != original_class]  <EOL>target_classes = target_classes[:num_random_targets]<EOL>str_target_classes = [str(t) for t in target_classes]<EOL>logging.info('<STR_LIT>'.format('<STR_LIT:U+002CU+0020>'.join(str_target_classes)))  <EOL><DEDENT><DEDENT>else:<EOL><INDENT>target_classes = [target_class]<EOL><DEDENT>a._reset()<EOL>for i, target_class in enumerate(target_classes):<EOL><INDENT>self._optimize(<EOL>a, target_class,<EOL>epsilon=epsilon, maxiter=maxiter)<EOL>if len(target_classes) > <NUM_LIT:1>:  <EOL><INDENT>logging.info('<STR_LIT>'.format(i + <NUM_LIT:1>, a.distance))<EOL><DEDENT><DEDENT>", "docstring": "Uses L-BFGS-B to minimize the distance between the image and the\n        adversarial as well as the cross-entropy between the predictions for\n        the adversarial and the the one-hot encoded target class.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, unperturbed input as a `numpy.ndarray` or\n            an :class:`Adversarial` instance.\n        label : int\n            The reference label of the original input. Must be passed\n            if `a` is a `numpy.ndarray`, must not be passed if `a` is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial input, otherwise returns\n            the Adversarial object.\n        epsilon : float\n            Epsilon of the binary search.\n        num_random_targets : int\n            Number of random target classes if no target class is given\n            by the criterion.\n        maxiter : int\n            Maximum number of iterations for L-BFGS-B.", "id": "f8103:c0:m2"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>binary_search=True,<EOL>epsilon=<NUM_LIT>,<EOL>stepsize=<NUM_LIT>,<EOL>iterations=<NUM_LIT:10>,<EOL>decay_factor=<NUM_LIT:1.0>,<EOL>random_start=False,<EOL>return_early=True):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>assert epsilon > <NUM_LIT:0><EOL>self._decay_factor = decay_factor<EOL>self._run(a, binary_search,<EOL>epsilon, stepsize, iterations,<EOL>random_start, return_early)<EOL>", "docstring": "Momentum-based iterative gradient attack known as\n        Momentum Iterative Method.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, unperturbed input as a `numpy.ndarray` or\n            an :class:`Adversarial` instance.\n        label : int\n            The reference label of the original input. Must be passed\n            if `a` is a `numpy.ndarray`, must not be passed if `a` is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial input, otherwise returns\n            the Adversarial object.\n        binary_search : bool\n            Whether to perform a binary search over epsilon and stepsize,\n            keeping their ratio constant and using their values to start\n            the search. If False, hyperparameters are not optimized.\n            Can also be an integer, specifying the number of binary\n            search steps (default 20).\n        epsilon : float\n            Limit on the perturbation size; if binary_search is True,\n            this value is only for initialization and automatically\n            adapted.\n        stepsize : float\n            Step size for gradient descent; if binary_search is True,\n            this value is only for initialization and automatically\n            adapted.\n        iterations : int\n            Number of iterations for each gradient descent run.\n        decay_factor : float\n            Decay factor used by the momentum term.\n        random_start : bool\n            Start the attack from a random point rather than from the\n            original input.\n        return_early : bool\n            Whether an individual gradient descent run should stop as\n            soon as an adversarial is found.", "id": "f8104:c15:m2"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>binary_search=True,<EOL>epsilon=<NUM_LIT>,<EOL>stepsize=<NUM_LIT>,<EOL>iterations=<NUM_LIT:10>,<EOL>random_start=False,<EOL>return_early=True):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>assert epsilon > <NUM_LIT:0><EOL>self._run(a, binary_search,<EOL>epsilon, stepsize, iterations,<EOL>random_start, return_early)<EOL>", "docstring": "Simple iterative gradient-based attack known as\n        Basic Iterative Method, Projected Gradient Descent or FGSM^k.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, unperturbed input as a `numpy.ndarray` or\n            an :class:`Adversarial` instance.\n        label : int\n            The reference label of the original input. Must be passed\n            if `a` is a `numpy.ndarray`, must not be passed if `a` is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial input, otherwise returns\n            the Adversarial object.\n        binary_search : bool or int\n            Whether to perform a binary search over epsilon and stepsize,\n            keeping their ratio constant and using their values to start\n            the search. If False, hyperparameters are not optimized.\n            Can also be an integer, specifying the number of binary\n            search steps (default 20).\n        epsilon : float\n            Limit on the perturbation size; if binary_search is True,\n            this value is only for initialization and automatically\n            adapted.\n        stepsize : float\n            Step size for gradient descent; if binary_search is True,\n            this value is only for initialization and automatically\n            adapted.\n        iterations : int\n            Number of iterations for each gradient descent run.\n        random_start : bool\n            Start the attack from a random point rather than from the\n            original input.\n        return_early : bool\n            Whether an individual gradient descent run should stop as\n            soon as an adversarial is found.", "id": "f8104:c11:m0"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>epsilons=<NUM_LIT:1000>, max_epsilon=<NUM_LIT:1>):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>return self._run(a, epsilons=epsilons, max_epsilon=max_epsilon)<EOL>", "docstring": "Adds the sign of the gradient to the image, gradually increasing\n        the magnitude until the image is misclassified.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, unperturbed input as a `numpy.ndarray` or\n            an :class:`Adversarial` instance.\n        label : int\n            The reference label of the original input. Must be passed\n            if `a` is a `numpy.ndarray`, must not be passed if `a` is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial input, otherwise returns\n            the Adversarial object.\n        epsilons : int or Iterable[float]\n            Either Iterable of step sizes in the direction of the sign of\n            the gradient or number of step sizes between 0 and max_epsilon\n            that should be tried.\n        max_epsilon : float\n            Largest step size if epsilons is not an iterable.", "id": "f8106:c2:m0"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>epsilons=<NUM_LIT:100>, repetitions=<NUM_LIT:10>):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>image = a.original_image<EOL>min_, max_ = a.bounds()<EOL>axis = a.channel_axis(batch=False)<EOL>channels = image.shape[axis]<EOL>shape = list(image.shape)<EOL>shape[axis] = <NUM_LIT:1><EOL>r = max_ - min_<EOL>pixels = np.prod(shape)<EOL>epsilons = min(epsilons, pixels)<EOL>max_epsilon = <NUM_LIT:1><EOL>for _ in range(repetitions):<EOL><INDENT>for epsilon in np.linspace(<NUM_LIT:0>, max_epsilon, num=epsilons + <NUM_LIT:1>)[<NUM_LIT:1>:]:<EOL><INDENT>p = epsilon<EOL>u = nprng.uniform(size=shape)<EOL>u = u.repeat(channels, axis=axis)<EOL>salt = (u >= <NUM_LIT:1> - p / <NUM_LIT:2>).astype(image.dtype) * r<EOL>pepper = -(u < p / <NUM_LIT:2>).astype(image.dtype) * r<EOL>perturbed = image + salt + pepper<EOL>perturbed = np.clip(perturbed, min_, max_)<EOL>if a.normalized_distance(perturbed) >= a.distance:<EOL><INDENT>continue<EOL><DEDENT>_, is_adversarial = a.predictions(perturbed)<EOL>if is_adversarial:<EOL><INDENT>max_epsilon = min(<NUM_LIT:1>, epsilon * <NUM_LIT>)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Increases the amount of salt and pepper noise until the\n        image is misclassified.\n\n        Parameters\n        ----------\n        input_or_adv : `numpy.ndarray` or :class:`Adversarial`\n            The original, unperturbed input as a `numpy.ndarray` or\n            an :class:`Adversarial` instance.\n        label : int\n            The reference label of the original input. Must be passed\n            if `a` is a `numpy.ndarray`, must not be passed if `a` is\n            an :class:`Adversarial` instance.\n        unpack : bool\n            If true, returns the adversarial input, otherwise returns\n            the Adversarial object.\n        epsilons : int\n            Number of steps to try between probability 0 and 1.\n        repetitions : int\n            Specifies how often the attack will be repeated.", "id": "f8107:c0:m0"}
{"signature": "@call_decorator<EOL><INDENT>def __call__(self, input_or_adv, label=None, unpack=True,<EOL>max_iter=<NUM_LIT:100>,<EOL>eta=<NUM_LIT>):<DEDENT>", "body": "a = input_or_adv<EOL>del input_or_adv<EOL>del label<EOL>del unpack<EOL>if not a.has_gradient():<EOL><INDENT>return<EOL><DEDENT>if a.target_class() is not None:<EOL><INDENT>logging.fatal('<STR_LIT>')<EOL>return<EOL><DEDENT>l2_norm = np.linalg.norm(a.original_image)<EOL>min_, max_ = a.bounds()<EOL>perturbed_image = a.original_image.copy()<EOL>for i in range(max_iter):<EOL><INDENT>logits, gradients, is_adversarial =a.predictions_and_gradient(perturbed_image)<EOL>if is_adversarial:<EOL><INDENT>return<EOL><DEDENT>score = np.max(softmax(logits))<EOL>gradients = -gradients / score<EOL>gradient_l2_norm = np.linalg.norm(gradients)<EOL>delta = self._delta(eta, l2_norm, score,<EOL>gradient_l2_norm, a.num_classes())<EOL>current_pertubation = self._perturbation(delta,<EOL>gradients,<EOL>gradient_l2_norm)<EOL>perturbed_image += current_pertubation<EOL>perturbed_image = np.clip(perturbed_image, min_, max_)<EOL><DEDENT>", "docstring": "Parameters\n----------\ninput_or_adv : `numpy.ndarray` or :class:`Adversarial`\n    The original, unperturbed input as a `numpy.ndarray` or\n    an :class:`Adversarial` instance.\nlabel : int\n    The reference label of the original input. Must be passed\n    if `a` is a `numpy.ndarray`, must not be passed if `a` is\n    an :class:`Adversarial` instance.\nunpack : bool\n    If true, returns the adversarial input, otherwise returns\n    the Adversarial object.\nmax_iter : int\n    The maximum number of iterations.\neta : float\n    the eta coefficient", "id": "f8108:c0:m0"}
{"signature": "def _create_vec_field(fval, gradf, d1x, d2x, color_axis, smooth=<NUM_LIT:0>):", "body": "if color_axis == <NUM_LIT:2>:<EOL><INDENT>gradf = _transpose_image(gradf)<EOL><DEDENT>c, h, w = gradf.shape  <EOL>alpha1 = np.sum(gradf * d1x, axis=<NUM_LIT:0>)<EOL>alpha2 = np.sum(gradf * d2x, axis=<NUM_LIT:0>)<EOL>norm_squared_alpha = (alpha1 ** <NUM_LIT:2>).sum() + (alpha2 ** <NUM_LIT:2>).sum()<EOL>if smooth > <NUM_LIT:0>:<EOL><INDENT>alpha1 = gaussian_filter(alpha1, smooth)<EOL>alpha2 = gaussian_filter(alpha2, smooth)<EOL>norm_squared_alpha = (alpha1 ** <NUM_LIT:2>).sum() + (alpha2 ** <NUM_LIT:2>).sum()<EOL>alpha1 = gaussian_filter(alpha1, smooth)<EOL>alpha2 = gaussian_filter(alpha2, smooth)<EOL><DEDENT>vec_field = np.empty((h, w, <NUM_LIT:2>))<EOL>vec_field[:, :, <NUM_LIT:0>] = -fval * alpha1 / norm_squared_alpha<EOL>vec_field[:, :, <NUM_LIT:1>] = -fval * alpha2 / norm_squared_alpha<EOL>return vec_field<EOL>", "docstring": "Calculate the deformation vector field\n    In:\n    fval: float\n    gradf: numpy.ndarray\n        of shape C x h x w with C = 3 or C = 1\n        (color channels), h, w >= 1.\n    d1x: numpy.ndarray\n        of shape C x h x w and [type] = 'Float' or 'Double'.\n    d2x: numpy.ndarray\n        of shape C x h x w and [type] = 'Float' or 'Double'.\n    smooth: float\n        Width of the Gaussian kernel used for smoothing\n        (default is 0 for no smoothing).\n    Out:\n    vec_field: numpy.ndarray\n        of shape (2, h, w).", "id": "f8110:m4"}
{"signature": "def _compose(image, vec_field, color_axis):", "body": "if color_axis == <NUM_LIT:2>:<EOL><INDENT>image = _transpose_image(image)<EOL><DEDENT>c, h, w = image.shape  <EOL>hrange = np.arange(h)<EOL>wrange = np.arange(w)<EOL>MGx, MGy = np.meshgrid(wrange, hrange)<EOL>defMGx = (MGx + vec_field[:, :, <NUM_LIT:0>]).clip(<NUM_LIT:0>, w - <NUM_LIT:1>)<EOL>defMGy = (MGy + vec_field[:, :, <NUM_LIT:1>]).clip(<NUM_LIT:0>, h - <NUM_LIT:1>)<EOL>new_image = np.empty_like(image)<EOL>for channel in range(c):<EOL><INDENT>interpolation = RectBivariateSpline(hrange, wrange, image[channel],<EOL>kx=<NUM_LIT:1>, ky=<NUM_LIT:1>)<EOL>new_image[channel] = interpolation(defMGy, defMGx, grid=False)<EOL><DEDENT>if color_axis == <NUM_LIT:2>:<EOL><INDENT>return _re_transpose_image(new_image)<EOL><DEDENT>else:<EOL><INDENT>return new_image<EOL><DEDENT>", "docstring": "Calculate the composition of the function image with the vector\n    field vec_field by interpolation.\n    new_func = compose(image, vec_field)\n    In:\n    image: numpy.ndarray\n        of shape C x h x w with C = 3 or C = 1 (color channels),\n        h, w >= 2, and [type] = 'Float' or 'Double'.\n        Contains the values of a function f: R ^ 2 -> R ^ C\n        on the grid {0, ..., h - 1} x {0, ..., w - 1}.\n    vec_field: numpy.array\n        of shape (h, w, 2)\n    vec_field[y, x, 0] is the x-coordinate of the vector vec_field[y, x]\n    vec_field[y, x, 1] is the y-coordinate of the vector vec_field[y, x]\n    positive x-direction is along rows from left to right\n    positive y-direction is along columns from above to below", "id": "f8110:m3"}
{"signature": "def __is_adversarial(self, image, predictions, in_bounds):", "body": "is_adversarial = self.__criterion.is_adversarial(<EOL>predictions, self.__original_class)<EOL>assert isinstance(is_adversarial, bool) orisinstance(is_adversarial, np.bool_)<EOL>if is_adversarial:<EOL><INDENT>is_best, distance = self.__new_adversarial(<EOL>image, predictions, in_bounds)<EOL><DEDENT>else:<EOL><INDENT>is_best = False<EOL>distance = None<EOL><DEDENT>return is_adversarial, is_best, distance<EOL>", "docstring": "Interface to criterion.is_adverarial that calls\n        __new_adversarial if necessary.\n\n        Parameters\n        ----------\n        predictions : :class:`numpy.ndarray`\n            A vector with the pre-softmax predictions for some image.\n        label : int\n            The label of the unperturbed reference image.", "id": "f8111:c1:m16"}
{"signature": "def gradient(self, image=None, label=None, strict=True):", "body": "assert self.has_gradient()<EOL>if image is None:<EOL><INDENT>image = self.__original_image<EOL><DEDENT>if label is None:<EOL><INDENT>label = self.__original_class<EOL><DEDENT>assert not strict or self.in_bounds(image)<EOL>self._total_gradient_calls += <NUM_LIT:1><EOL>gradient = self.__model.gradient(image, label)<EOL>assert gradient.shape == image.shape<EOL>return gradient<EOL>", "docstring": "Interface to model.gradient for attacks.\n\n        Parameters\n        ----------\n        image : `numpy.ndarray`\n            Single input with shape as expected by the model\n            (without the batch dimension).\n            Defaults to the original image.\n        label : int\n            Label used to calculate the loss that is differentiated.\n            Defaults to the original label.\n        strict : bool\n            Controls if the bounds for the pixel values should be checked.", "id": "f8111:c1:m25"}
{"signature": "def backward(self, gradient, image=None, strict=True):", "body": "assert self.has_gradient()<EOL>assert gradient.ndim == <NUM_LIT:1><EOL>if image is None:<EOL><INDENT>image = self.__original_image<EOL><DEDENT>assert not strict or self.in_bounds(image)<EOL>self._total_gradient_calls += <NUM_LIT:1><EOL>gradient = self.__model.backward(gradient, image)<EOL>assert gradient.shape == image.shape<EOL>return gradient<EOL>", "docstring": "Interface to model.backward for attacks.\n\n        Parameters\n        ----------\n        gradient : `numpy.ndarray`\n            Gradient of some loss w.r.t. the logits.\n        image : `numpy.ndarray`\n            Single input with shape as expected by the model\n            (without the batch dimension).\n\n        Returns\n        -------\n        gradient : `numpy.ndarray`\n            The gradient w.r.t the image.\n\n        See Also\n        --------\n        :meth:`gradient`", "id": "f8111:c1:m27"}
{"signature": "def reached_threshold(self):", "body": "return self.__threshold is not Noneand self.__best_distance <= self.__threshold<EOL>", "docstring": "Returns True if a threshold is given and the currently\n        best adversarial distance is smaller than the threshold.", "id": "f8111:c1:m14"}
{"signature": "def batch_predictions(<EOL>self, images, greedy=False, strict=True, return_details=False):", "body": "if strict:<EOL><INDENT>in_bounds = self.in_bounds(images)<EOL>assert in_bounds<EOL><DEDENT>self._total_prediction_calls += len(images)<EOL>predictions = self.__model.batch_predictions(images)<EOL>assert predictions.ndim == <NUM_LIT:2><EOL>assert predictions.shape[<NUM_LIT:0>] == images.shape[<NUM_LIT:0>]<EOL>if return_details:<EOL><INDENT>assert greedy<EOL><DEDENT>adversarials = []<EOL>for i in range(len(predictions)):<EOL><INDENT>if strict:<EOL><INDENT>in_bounds_i = True<EOL><DEDENT>else:<EOL><INDENT>in_bounds_i = self.in_bounds(images[i])<EOL><DEDENT>is_adversarial, is_best, distance = self.__is_adversarial(<EOL>images[i], predictions[i], in_bounds_i)<EOL>if is_adversarial and greedy:<EOL><INDENT>if return_details:<EOL><INDENT>return predictions, is_adversarial, i, is_best, distance<EOL><DEDENT>else:<EOL><INDENT>return predictions, is_adversarial, i<EOL><DEDENT><DEDENT>adversarials.append(is_adversarial)<EOL><DEDENT>if greedy:  <EOL><INDENT>if return_details:<EOL><INDENT>return predictions, False, None, False, None<EOL><DEDENT>else:<EOL><INDENT>return predictions, False, None<EOL><DEDENT><DEDENT>is_adversarial = np.array(adversarials)<EOL>assert is_adversarial.ndim == <NUM_LIT:1><EOL>assert is_adversarial.shape[<NUM_LIT:0>] == images.shape[<NUM_LIT:0>]<EOL>return predictions, is_adversarial<EOL>", "docstring": "Interface to model.batch_predictions for attacks.\n\n        Parameters\n        ----------\n        images : `numpy.ndarray`\n            Batch of inputs with shape as expected by the model.\n        greedy : bool\n            Whether the first adversarial should be returned.\n        strict : bool\n            Controls if the bounds for the pixel values should be checked.", "id": "f8111:c1:m24"}
{"signature": "@property<EOL><INDENT>def _criterion(self):  <DEDENT>", "body": "return self.__criterion<EOL>", "docstring": "Should not be used.", "id": "f8111:c1:m9"}
{"signature": "def channel_axis(self, batch):", "body": "axis = self.__model.channel_axis()<EOL>if not batch:<EOL><INDENT>axis = axis - <NUM_LIT:1><EOL><DEDENT>return axis<EOL>", "docstring": "Interface to model.channel_axis for attacks.\n\n        Parameters\n        ----------\n        batch : bool\n            Controls whether the index of the axis for a batch of images\n            (4 dimensions) or a single image (3 dimensions) should be returned.", "id": "f8111:c1:m21"}
{"signature": "def target_class(self):", "body": "try:<EOL><INDENT>target_class = self.__criterion.target_class()<EOL><DEDENT>except AttributeError:<EOL><INDENT>target_class = None<EOL><DEDENT>return target_class<EOL>", "docstring": "Interface to criterion.target_class for attacks.", "id": "f8111:c1:m17"}
{"signature": "@property<EOL><INDENT>def image(self):<DEDENT>", "body": "return self.__best_adversarial<EOL>", "docstring": "The best adversarial found so far.", "id": "f8111:c1:m2"}
{"signature": "def clone(git_uri):", "body": "hash_digest = sha256_hash(git_uri)<EOL>local_path = home_directory_path(FOLDER, hash_digest)<EOL>exists_locally = path_exists(local_path)<EOL>if not exists_locally:<EOL><INDENT>_clone_repo(git_uri, local_path)<EOL><DEDENT>else:<EOL><INDENT>logging.info(  <EOL>\"<STR_LIT>\")  <EOL><DEDENT>return local_path<EOL>", "docstring": "Clone a remote git repository to a local path.\n\n:param git_uri: the URI to the git repository to be cloned\n:return: the generated local path where the repository has been cloned to", "id": "f8113:m0"}
{"signature": "def get_model(url, module_name='<STR_LIT>', **kwargs):", "body": "repo_path = clone(url)<EOL>loader = ModelLoader.get()<EOL>model = loader.load(repo_path, module_name=module_name, **kwargs)<EOL>return model<EOL>", "docstring": "Provides utilities to download foolbox-compatible robust models\nto easily test attacks against them by simply providing a git-URL.\n\nExamples\n--------\n\nInstantiate a model:\n\n>>> from foolbox import zoo\n>>> url = \"https://github.com/bveliqi/foolbox-zoo-dummy.git\"\n>>> model = zoo.get_model(url)  # doctest: +SKIP\n\nOnly works with a foolbox-zoo compatible repository.\nI.e. models need to have a `foolbox_model.py` file\nwith a `create()`-function, which returns a foolbox-wrapped model.\n\nUsing the kwargs parameter it is possible to input an arbitrary number\nof parameters to this methods call. These parameters are forwarded to\nthe instantiated model.\n\nExample repositories:\n\n    - https://github.com/bethgelab/AnalysisBySynthesis\n    - https://github.com/bethgelab/mnist_challenge\n    - https://github.com/bethgelab/cifar10_challenge\n    - https://github.com/bethgelab/convex_adversarial\n    - https://github.com/wielandbrendel/logit-pairing-foolbox.git\n    - https://github.com/bethgelab/defensive-distillation.git\n\n:param url: URL to the git repository\n:param module_name: the name of the module to import\n:param kwargs: Optional set of parameters that will be used by the\n    to be instantiated model.\n:return: a foolbox-wrapped model instance", "id": "f8114:m0"}
{"signature": "def clean_workspace():", "body": "config = api.get_config()<EOL>workspace = config['<STR_LIT>']['<STR_LIT>']<EOL>default_workspace = os.path.expanduser(\"<STR_LIT>\")<EOL>if workspace != default_workspace and os.path.exists(workspace):<EOL><INDENT>print(\"<STR_LIT>\", workspace)<EOL>shutil.rmtree(workspace)<EOL><DEDENT>", "docstring": "Clean the working space", "id": "f8119:m0"}
{"signature": "def clean_workspace():", "body": "<EOL>result = runner.invoke(dgitmod.profile, ['<STR_LIT>'])<EOL>output = result.output<EOL>output = output.split(\"<STR_LIT:\\n>\")<EOL>workspaces = [o.strip() for o in output if \"<STR_LIT>\" in o]<EOL>if len(workspaces) > <NUM_LIT:0>:<EOL><INDENT>workspace = workspaces[<NUM_LIT:0>]<EOL>workspace = workspace.replace(\"<STR_LIT>\",\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>workspace = os.path.join(os.getcwd(), '<STR_LIT>')<EOL><DEDENT>default_workspace = os.path.expanduser(\"<STR_LIT>\")<EOL>if ((workspace != default_workspace) and<EOL>os.path.exists(workspace)):<EOL><INDENT>print(\"<STR_LIT>\", workspace)<EOL>shutil.rmtree(workspace)<EOL><DEDENT>", "docstring": "Clean the working space", "id": "f8120:m0"}
{"signature": "def get_schema(self, filename):", "body": "table_set = self.read_file(filename)<EOL>if table_set is None: <EOL><INDENT>return [] <EOL><DEDENT>row_set = table_set.tables[<NUM_LIT:0>]<EOL>offset, headers = headers_guess(row_set.sample)<EOL>row_set.register_processor(headers_processor(headers))<EOL>row_set.register_processor(offset_processor(offset + <NUM_LIT:1>))<EOL>types = type_guess(row_set.sample, strict=True)<EOL>sample = next(row_set.sample)<EOL>clean = lambda v: str(v) if not isinstance(v, str) else v <EOL>schema = []<EOL>for i, h in enumerate(headers):<EOL><INDENT>schema.append([h,<EOL>str(types[i]),<EOL>clean(sample[i].value)])<EOL><DEDENT>return schema<EOL>", "docstring": "Guess schema using messytables", "id": "f8122:c0:m4"}
{"signature": "def evaluate(self, repo, spec, args):", "body": "status = []<EOL>with cd(repo.rootdir):<EOL><INDENT>files = spec.get('<STR_LIT>', ['<STR_LIT:*>'])<EOL>resource_files = repo.find_matching_files(files)<EOL>files = glob2.glob(\"<STR_LIT>\")<EOL>disk_files = [f for f in files if os.path.isfile(f) and f != \"<STR_LIT>\"]<EOL>allfiles = list(set(resource_files + disk_files))<EOL>allfiles.sort()<EOL>for f in allfiles:<EOL><INDENT>if f in resource_files and f in disk_files:<EOL><INDENT>r = repo.get_resource(f)<EOL>coded_sha256 = r['<STR_LIT>']<EOL>computed_sha256 = compute_sha256(f)<EOL>if computed_sha256 != coded_sha256:<EOL><INDENT>status.append({<EOL>'<STR_LIT:target>': f,<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': self.name,<EOL>'<STR_LIT:description>': self.description,<EOL>'<STR_LIT:status>': '<STR_LIT>',<EOL>'<STR_LIT:message>': \"<STR_LIT>\"<EOL>})<EOL><DEDENT>else:<EOL><INDENT>status.append({<EOL>'<STR_LIT:target>': f,<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': self.name,<EOL>'<STR_LIT:description>': self.description,<EOL>'<STR_LIT:status>': '<STR_LIT:OK>',<EOL>'<STR_LIT:message>': \"<STR_LIT>\"<EOL>})<EOL><DEDENT><DEDENT>elif f in resource_files:<EOL><INDENT>status.append({<EOL>'<STR_LIT:target>': f,<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': self.name,<EOL>'<STR_LIT:description>': self.description,<EOL>'<STR_LIT:status>': '<STR_LIT>',<EOL>'<STR_LIT:message>': \"<STR_LIT>\"<EOL>})<EOL><DEDENT>else:<EOL><INDENT>status.append({<EOL>'<STR_LIT:target>': f,<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': self.name,<EOL>'<STR_LIT:description>': self.description,<EOL>'<STR_LIT:status>': '<STR_LIT>',<EOL>'<STR_LIT:message>': \"<STR_LIT>\"<EOL>})<EOL><DEDENT><DEDENT><DEDENT>return status<EOL>", "docstring": "Check the integrity of the datapackage.json", "id": "f8123:c0:m3"}
{"signature": "def evaluate(self, repo, spec, args):", "body": "status = []<EOL>if len(spec['<STR_LIT>']) == <NUM_LIT:0>: <EOL><INDENT>return status <EOL><DEDENT>with cd(repo.rootdir):<EOL><INDENT>rules = None <EOL>if '<STR_LIT>' in spec and len(spec['<STR_LIT>']) > <NUM_LIT:0>: <EOL><INDENT>rulesfiles = spec['<STR_LIT>']<EOL>rules = {} <EOL>for f in rulesfiles: <EOL><INDENT>d = json.loads(open(f).read())<EOL>rules.update(d)<EOL><DEDENT><DEDENT>elif '<STR_LIT>' in spec: <EOL><INDENT>rules = {<EOL>'<STR_LIT>': spec['<STR_LIT>'] <EOL>}<EOL><DEDENT>if rules is None or len(rules) == <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>raise InvalidParameters(\"<STR_LIT>\")<EOL><DEDENT>files = dict([(f, open(f).read()) for f in spec['<STR_LIT>']])<EOL>for r in rules:<EOL><INDENT>if '<STR_LIT>' not in rules[r]:<EOL><INDENT>continue<EOL><DEDENT>minr2 = float(rules[r]['<STR_LIT>'])<EOL>for f in files:<EOL><INDENT>match = re.search(r\"<STR_LIT>\", files[f])<EOL>if match is None:<EOL><INDENT>status.append({<EOL>'<STR_LIT:target>': f,<EOL>'<STR_LIT>': self.name,<EOL>'<STR_LIT:description>': self.description,<EOL>'<STR_LIT>': r,<EOL>'<STR_LIT:status>': \"<STR_LIT>\",<EOL>'<STR_LIT:message>': \"<STR_LIT>\"<EOL>})<EOL><DEDENT>else:<EOL><INDENT>r2 = match.group(<NUM_LIT:1>)<EOL>r2 = float(r2)<EOL>if r2 > minr2:<EOL><INDENT>status.append({<EOL>'<STR_LIT:target>': f,<EOL>'<STR_LIT>': self.name,<EOL>'<STR_LIT:description>': self.description,<EOL>'<STR_LIT>': r,<EOL>'<STR_LIT:status>': \"<STR_LIT:OK>\",<EOL>'<STR_LIT:message>': \"<STR_LIT>\"<EOL>})<EOL><DEDENT>else:<EOL><INDENT>status.append({<EOL>'<STR_LIT:target>': f,<EOL>'<STR_LIT>': self.name,<EOL>'<STR_LIT:description>': self.description,<EOL>'<STR_LIT>': r,<EOL>'<STR_LIT:status>': \"<STR_LIT>\",<EOL>'<STR_LIT:message>': \"<STR_LIT>\"<EOL>})<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return status<EOL>", "docstring": "Evaluate the files identified for checksum.", "id": "f8124:c0:m3"}
{"signature": "def init(self, username, reponame, force, backend=None):", "body": "key = self.key(username, reponame)<EOL>server_repodir = self.server_rootdir(username,<EOL>reponame,<EOL>create=False)<EOL>if os.path.exists(server_repodir) and not force:<EOL><INDENT>raise RepositoryExists()<EOL><DEDENT>if os.path.exists(server_repodir):<EOL><INDENT>shutil.rmtree(server_repodir)<EOL><DEDENT>os.makedirs(server_repodir)<EOL>with cd(server_repodir):<EOL><INDENT>git.init(\"<STR_LIT:.>\", \"<STR_LIT>\")<EOL><DEDENT>if backend is not None:<EOL><INDENT>backend.init_repo(server_repodir)<EOL><DEDENT>repodir = self.rootdir(username, reponame, create=False)<EOL>if os.path.exists(repodir) and not force:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if os.path.exists(repodir):<EOL><INDENT>shutil.rmtree(repodir)<EOL><DEDENT>os.makedirs(repodir)<EOL>with cd(os.path.dirname(repodir)):<EOL><INDENT>git.clone(server_repodir, '<STR_LIT>')<EOL><DEDENT>url = server_repodir<EOL>if backend is not None:<EOL><INDENT>url = backend.url(username, reponame)<EOL><DEDENT>repo = Repo(username, reponame)<EOL>repo.manager = self<EOL>repo.remoteurl = url<EOL>repo.rootdir = self.rootdir(username, reponame)<EOL>self.add(repo)<EOL>return repo<EOL>", "docstring": "Initialize a Git repo\n\nParameters\n----------\n\nusername, reponame : Repo name is tuple (name, reponame)\nforce: force initialization of the repo even if exists\nbackend: backend that must be used for this (e.g. s3)", "id": "f8125:c0:m13"}
{"signature": "def pull(self, repo, args=[]):", "body": "return self._run_generic_command(repo, [\"<STR_LIT>\"] + args)<EOL>", "docstring": "Pull from origin/filesystem based master\n\nParameters\n----------\n\nrepo: Repository object\nargs: git-specific args", "id": "f8125:c0:m6"}
{"signature": "def config(self, what='<STR_LIT>', params=None):", "body": "if what == '<STR_LIT>':<EOL><INDENT>return {<EOL>'<STR_LIT:name>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': [],<EOL>}<EOL><DEDENT>elif what == '<STR_LIT>':<EOL><INDENT>self.workspace = params['<STR_LIT>']['<STR_LIT>']<EOL>self.workspace = os.path.abspath(self.workspace)<EOL>self.username = params['<STR_LIT>']['<STR_LIT>']<EOL>self.fullname = params['<STR_LIT>']['<STR_LIT>']<EOL>self.email = params['<STR_LIT>']['<STR_LIT>']<EOL>repodir = os.path.join(self.workspace, '<STR_LIT>')<EOL>if not os.path.exists(repodir):<EOL><INDENT>return<EOL><DEDENT>for username in os.listdir(repodir):<EOL><INDENT>for reponame in os.listdir(os.path.join(repodir, username)):<EOL><INDENT>if self.is_my_repo(username, reponame):<EOL><INDENT>r = Repo(username, reponame)<EOL>r.rootdir = os.path.join(repodir, username, reponame)<EOL>package = os.path.join(r.rootdir, '<STR_LIT>')<EOL>if not os.path.exists(package):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\".format(username, reponame))<EOL>continue<EOL><DEDENT>packagedata = open(package).read()<EOL>r.package = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(packagedata)<EOL>r.manager = self<EOL>self.add(r)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Paramers:\n---------\n\nworkspace: Directory to store the dataset repositories\nemail:", "id": "f8125:c0:m20"}
{"signature": "def status(self, repo, args=[]):", "body": "return self._run_generic_command(repo, [\"<STR_LIT:status>\"] + args)<EOL>", "docstring": "Show status of the repo (pass thru git command)\n\nParameters\n----------\n\nrepo: Repository object\nargs: git-specific args", "id": "f8125:c0:m7"}
{"signature": "def log(self, repo, args=[]):", "body": "return self._run_generic_command(repo, [\"<STR_LIT>\"] + args)<EOL>", "docstring": "Show the log  (pass thru git command)\n\nParameters\n----------\n\nrepo: Repository object\nargs: git-specific args", "id": "f8125:c0:m10"}
{"signature": "def drop(self, repo, args=[]):", "body": "<EOL>rootdir = repo.rootdir<EOL>if os.path.exists(rootdir):<EOL><INDENT>print(\"<STR_LIT>\".format(rootdir))<EOL>shutil.rmtree(rootdir)<EOL><DEDENT>server_repodir = self.server_rootdir_from_repo(repo,<EOL>create=False)<EOL>if os.path.exists(server_repodir):<EOL><INDENT>print(\"<STR_LIT>\".format(server_repodir))<EOL>shutil.rmtree(server_repodir)<EOL><DEDENT>super(GitRepoManager, self).drop(repo)<EOL>return {<EOL>'<STR_LIT:status>': '<STR_LIT:success>',<EOL>'<STR_LIT:message>': \"<STR_LIT>\"<EOL>}<EOL>", "docstring": "Cleanup the repo", "id": "f8125:c0:m16"}
{"signature": "def stash(self, repo, args=[]):", "body": "return self._run_generic_command(repo, [\"<STR_LIT>\"] + args)<EOL>", "docstring": "Stash all the changes (pass thru git command)\n\nParameters\n----------\n\nrepo: Repository object\nargs: git-specific args", "id": "f8125:c0:m8"}
{"signature": "def commit(self, repo, args=[]):", "body": "return self._run_generic_command(repo, [\"<STR_LIT>\"] + args)<EOL>", "docstring": "Commit the changes to the repo (pass thru git command)\n\nParameters\n----------\n\nrepo: Repository object\nargs: git-specific args", "id": "f8125:c0:m11"}
{"signature": "def diff(self, repo, args=[]):", "body": "return self._run_generic_command(repo, [\"<STR_LIT>\"] + args)<EOL>", "docstring": "diff two repo versions  (pass thru git command)\n\nParameters\n----------\n\nrepo: Repository object\nargs: git-specific args", "id": "f8125:c0:m9"}
{"signature": "def url_is_valid(self, url):", "body": "<EOL>if url.startswith(\"<STR_LIT>\"):<EOL><INDENT>url = url.replace(\"<STR_LIT>\",\"<STR_LIT>\")<EOL><DEDENT>return os.path.exists(url)<EOL>", "docstring": "Check if a URL exists", "id": "f8127:c0:m1"}
{"signature": "def init(globalvars=None, show=False):", "body": "global config<EOL>profileini = getprofileini()<EOL>if os.path.exists(profileini):<EOL><INDENT>config = configparser.ConfigParser()<EOL>config.read(profileini)<EOL>mgr = plugins_get_mgr()<EOL>mgr.update_configs(config)<EOL>if show:<EOL><INDENT>for source in config:<EOL><INDENT>print(\"<STR_LIT>\" %(source))<EOL>for k in config[source]:<EOL><INDENT>print(\"<STR_LIT>\" % (k, config[source][k]))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>if not show:<EOL><INDENT>update(globalvars)<EOL><DEDENT><DEDENT>print(\"<STR_LIT>\")<EOL>", "docstring": "Load profile INI", "id": "f8131:m1"}
{"signature": "def __init__(self, name, version, description, supported=[]):", "body": "self.enable = '<STR_LIT:y>'<EOL>self.name = name<EOL>self.version = version<EOL>self.description = description<EOL>self.support = supported + [name]<EOL>self.initialize()<EOL>", "docstring": "Parameters:\n-----------\nname: Name of the service e.g., s3\nversion: Version of this implementation\ndescription: Text description of this service\nsupported: supported services with including name\n\nFor example, there may be multiple s3 implementations that\nsupport different kinds of services.", "id": "f8132:c0:m0"}
{"signature": "def initialize(self):", "body": "return<EOL>", "docstring": "Called to initialize sessions, internal objects etc.", "id": "f8132:c0:m1"}
{"signature": "def __init__(self, name, version, description, supported=[]):", "body": "self.enable = '<STR_LIT:y>'<EOL>self.name = name<EOL>self.version = version<EOL>self.description = description<EOL>self.support = supported + [name]<EOL>self.initialize()<EOL>", "docstring": "Parameters:\n-----------\nname: Name of the backend service e.g., s3\nversion: Version of this implementation\ndescription: Text description of this service\nsupported: supported services with including name\n\nFor example, there may be multiple s3 implementations that\nsupport different kinds of services.", "id": "f8133:c0:m0"}
{"signature": "def add(self, repo):", "body": "key = self.key(repo.username, repo.reponame)<EOL>repo.key = key<EOL>self.repos[key] = repo<EOL>return key<EOL>", "docstring": "Add repo to the internal lookup table...", "id": "f8135:c1:m15"}
{"signature": "def users(self):", "body": "return os.listdir(os.path.join(self.workspace, '<STR_LIT>'))<EOL>", "docstring": "Find users", "id": "f8135:c1:m10"}
{"signature": "def find_matching_files(self, includes):", "body": "if len(includes) == <NUM_LIT:0>: <EOL><INDENT>return [] <EOL><DEDENT>files = [f['<STR_LIT>'] for f in self.package['<STR_LIT>']]<EOL>includes = r'<STR_LIT:|>'.join([fnmatch.translate(x) for x in includes])<EOL>files = [f for f in files if re.match(includes, os.path.basename(f))] +[f for f in files if re.match(includes, f)]<EOL>files = list(set(files))<EOL>return files<EOL>", "docstring": "For various actions we need files that match patterns", "id": "f8135:c0:m1"}
{"signature": "def clone(self, repo, newusername, newreponame):", "body": "pass<EOL>", "docstring": "Clone repo", "id": "f8135:c1:m25"}
{"signature": "def lookup(self, username=None, reponame=None, key=None):", "body": "if key is None:<EOL><INDENT>key = self.key(username, reponame)<EOL><DEDENT>if key not in self.repos:<EOL><INDENT>raise UnknownRepository()<EOL><DEDENT>return self.repos[key]<EOL>", "docstring": "Lookup all available repos", "id": "f8135:c1:m9"}
{"signature": "def update_configs(self, config):", "body": "for what in self.plugins:  <EOL><INDENT>for key in self.plugins[what]: <EOL><INDENT>self.plugins[what][key].config(what='<STR_LIT>', params=config)<EOL><DEDENT><DEDENT>return<EOL>", "docstring": "Gather configuration requirements of all plugins", "id": "f8136:c0:m5"}
{"signature": "def discover_all_plugins(self):", "body": "for v in pkg_resources.iter_entry_points('<STR_LIT>'):<EOL><INDENT>m = v.load()<EOL>m.setup(self)<EOL><DEDENT>", "docstring": "Load all plugins from dgit extension", "id": "f8136:c0:m1"}
{"signature": "def plugins_show(what=None, name=None, version=None, details=False):", "body": "global pluginmgr<EOL>return pluginmgr.show(what, name, version, details)<EOL>", "docstring": "Show details of available plugins\n\nParameters\n----------\nwhat: Class of plugins e.g., backend\nname: Name of the plugin e.g., s3\nversion: Version of the plugin\ndetails: Show details be shown?", "id": "f8136:m2"}
{"signature": "def plugins_load():", "body": "global pluginmgr<EOL>if pluginmgr is not None:<EOL><INDENT>plugins_close()<EOL><DEDENT>pluginmgr = PluginManager([])<EOL>", "docstring": "Load plugins from various sources:\n\n- dgit/plugins\n- dgit_extensions package", "id": "f8136:m0"}
{"signature": "def post(self, repo):", "body": "pass<EOL>", "docstring": "Post to server", "id": "f8139:c0:m3"}
{"signature": "def initialize(self):", "body": "return<EOL>", "docstring": "Called to initialize sessions, internal objects etc.", "id": "f8139:c0:m2"}
{"signature": "def __init__(self, name, version, description, supported=[]):", "body": "self.enable = '<STR_LIT:y>'<EOL>self.name = name<EOL>self.version = version<EOL>self.description = description<EOL>self.support = supported + [name]<EOL>self.initialize()<EOL>", "docstring": "Parameters:\n-----------\nname: Name of the backend service e.g., s3\nversion: Version of this implementation\ndescription: Text description of this service\nsupported: supported services with including name\n\nFor example, there may be multiple s3 implementations that\nsupport different kinds of services.", "id": "f8140:c0:m0"}
{"signature": "def clone_repo(self, url, gitdir):", "body": "return<EOL>", "docstring": "Clone a repo at specified URL", "id": "f8140:c0:m3"}
{"signature": "def supported(self, url):", "body": "return False<EOL>", "docstring": "Check if a URL is supported by repo", "id": "f8140:c0:m4"}
{"signature": "def push(self, state, name):", "body": "return<EOL>", "docstring": "Push a data version to the server\n\nParameters\n----------\n\nstate: Overall state object that has dataset details\nname: name of the dataset", "id": "f8140:c0:m6"}
{"signature": "def compute_sha256(filename):", "body": "try:<EOL><INDENT>h = sha256()<EOL>fd = open(filename, '<STR_LIT:rb>')<EOL>while True:<EOL><INDENT>buf = fd.read(<NUM_LIT>)<EOL>if buf in [None, \"<STR_LIT>\"]:<EOL><INDENT>break<EOL><DEDENT>h.update(buf.encode('<STR_LIT:utf-8>'))<EOL><DEDENT>fd.close()<EOL>return h.hexdigest()<EOL><DEDENT>except:<EOL><INDENT>output = run([\"<STR_LIT>\", \"<STR_LIT>\", filename])<EOL>return output.split(\"<STR_LIT:U+0020>\")[<NUM_LIT:0>]<EOL><DEDENT>", "docstring": "Try the library. If it doesnt work, use the command line..", "id": "f8141:m6"}
{"signature": "def make_plugin_source(self, *args, **kwargs):", "body": "return PluginSource(self, *args, **kwargs)<EOL>", "docstring": "Creats a plugin source for this plugin base and returns it.\n        All parameters are forwarded to :class:`PluginSource`.", "id": "f8142:c2:m1"}
{"signature": "def list_plugins(self):", "body": "rv = []<EOL>for _, modname, ispkg in pkgutil.iter_modules(self.mod.__path__):<EOL><INDENT>rv.append(modname)<EOL><DEDENT>return sorted(rv)<EOL>", "docstring": "Returns a sorted list of all plugins that are available in this\n        plugin source.  This can be useful to automatically discover plugins\n        that are available and is usually used together with\n        :meth:`load_plugin`.", "id": "f8142:c3:m2"}
{"signature": "def enable(self):", "body": "self.enabled = True<EOL>", "docstring": "Enables the import hook which drives the plugin base system.\n        This is the default.", "id": "f8142:c5:m1"}
{"signature": "def disable(self):", "body": "self.enabled = False<EOL>", "docstring": "Disables the import hook and restores the default import system\n        behavior.  This effectively breaks pluginbase but can be useful\n        for testing purposes.", "id": "f8142:c5:m2"}
{"signature": "def load_plugin(self, name):", "body": "if '<STR_LIT:.>' in name:<EOL><INDENT>raise ImportError('<STR_LIT>')<EOL><DEDENT>with self:<EOL><INDENT>return __import__(self.base.package + '<STR_LIT:.>' + name,<EOL>globals(), {}, ['<STR_LIT>'])<EOL><DEDENT>", "docstring": "This automatically loads a plugin by the given name from the\n        current source and returns the module.  This is a convenient\n        alternative to the import statement and saves you from invoking\n        ``__import__`` or a similar function yourself.\n\n        :param name: the name of the plugin to load.", "id": "f8142:c3:m3"}
{"signature": "def transform(repo,<EOL>name=None,<EOL>filename=None,<EOL>force=False,<EOL>args=[]):", "body": "mgr = plugins_get_mgr()<EOL>specs = instantiate(repo, name, filename)<EOL>allresults = []<EOL>for s in specs:<EOL><INDENT>keys = mgr.search(what='<STR_LIT>',name=s)['<STR_LIT>']<EOL>for k in keys:<EOL><INDENT>t = mgr.get_by_key('<STR_LIT>', k)<EOL>result = t.evaluate(repo,<EOL>specs[s],<EOL>force,<EOL>args)<EOL>allresults.extend(result)<EOL><DEDENT><DEDENT>return allresults<EOL>", "docstring": "Materialize queries/other content within the repo.\n\nParameters\n----------\n\nrepo: Repository object\nname: Name of transformer, if any. If none, then all transformers specified in dgit.json will be included.\nfilename: Pattern that specifies files that must be processed by the generators selected. If none, then the default specification in dgit.json is used.", "id": "f8144:m1"}
{"signature": "def add_file_normal(f, targetdir, generator,script, source):", "body": "basename = os.path.basename(f)<EOL>if targetdir != \"<STR_LIT:.>\":<EOL><INDENT>relativepath = os.path.join(targetdir, basename)<EOL><DEDENT>else:<EOL><INDENT>relativepath = basename<EOL><DEDENT>relpath = os.path.relpath(f, os.getcwd())<EOL>filetype = '<STR_LIT:data>'<EOL>if script:<EOL><INDENT>filetype = '<STR_LIT>'<EOL>if generator:<EOL><INDENT>filetype = '<STR_LIT>'<EOL><DEDENT><DEDENT>update = OrderedDict([<EOL>('<STR_LIT:type>', filetype),<EOL>('<STR_LIT>', generator),<EOL>('<STR_LIT>', relativepath),<EOL>('<STR_LIT:content>', \"<STR_LIT>\"),<EOL>('<STR_LIT:source>', source),<EOL>('<STR_LIT>', f),<EOL>('<STR_LIT>', relpath)<EOL>])<EOL>update = annotate_record(update)<EOL>return (basename, update)<EOL>", "docstring": "Add a normal file including its source", "id": "f8145:m2"}
{"signature": "def add(repo, args, targetdir,<EOL>execute=False, generator=False,<EOL>includes=[], script=False,<EOL>source=None):", "body": "<EOL>if not execute:<EOL><INDENT>files = add_files(args=args,<EOL>targetdir=targetdir,<EOL>source=source,<EOL>script=script,<EOL>generator=generator)<EOL><DEDENT>else:<EOL><INDENT>files = run_executable(repo, args, includes)<EOL><DEDENT>if files is None or len(files) == <NUM_LIT:0>:<EOL><INDENT>return repo<EOL><DEDENT>filtered_files = []<EOL>package = repo.package<EOL>for h in files:<EOL><INDENT>found = False<EOL>for i, r in  enumerate(package['<STR_LIT>']):<EOL><INDENT>if h['<STR_LIT>'] == r['<STR_LIT>']:<EOL><INDENT>found = True<EOL>if h['<STR_LIT>'] == r['<STR_LIT>']:<EOL><INDENT>change = False<EOL>for attr in ['<STR_LIT:source>']:<EOL><INDENT>if h[attr] != r[attr]:<EOL><INDENT>r[attr] = h[attr]<EOL>change = True<EOL><DEDENT><DEDENT>if change:<EOL><INDENT>filtered_files.append(h)<EOL><DEDENT>continue<EOL><DEDENT>else:<EOL><INDENT>filtered_files.append(h)<EOL>package['<STR_LIT>'][i] = h<EOL><DEDENT>break<EOL><DEDENT><DEDENT>if not found:<EOL><INDENT>filtered_files.append(h)<EOL>package['<STR_LIT>'].append(h)<EOL><DEDENT><DEDENT>if len(filtered_files) == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>repo.manager.add_files(repo, filtered_files)<EOL>rootdir = repo.rootdir<EOL>with cd(rootdir):<EOL><INDENT>datapath = \"<STR_LIT>\"<EOL>with open(datapath, '<STR_LIT:w>') as fd:<EOL><INDENT>fd.write(json.dumps(package, indent=<NUM_LIT:4>))<EOL><DEDENT><DEDENT>return len(filtered_files)<EOL>", "docstring": "Add files to the repository by explicitly specifying them or by\nspecifying a pattern over files accessed during execution of an\nexecutable.\n\nParameters\n----------\n\nrepo: Repository\n\nargs: files or command line\n     (a) If simply adding files, then the list of files that must\n     be added (including any additional arguments to be passed to\n     git\n     (b) If files to be added are an output of a command line, then\n     args is the command lined\ntargetdir: Target directory to store the files\nexecute: Args are not files to be added but scripts that must be run.\nincludes: patterns used to select files to\nscript: Is this a script?\ngenerator: Is this a generator\nsource: Link to the original source of the data", "id": "f8145:m7"}
{"signature": "def extract_files(filename, includes):", "body": "<EOL>lines = open(filename).readlines()<EOL>files = {}<EOL>lines = [l.strip() for l in lines if '<STR_LIT>' in l]<EOL>for l in lines:<EOL><INDENT>matchedfile = re.search('<STR_LIT>', l)<EOL>if matchedfile is None:<EOL><INDENT>matchedfile = re.search('<STR_LIT>', l)<EOL><DEDENT>if matchedfile is None:<EOL><INDENT>continue<EOL><DEDENT>matchedfile = matchedfile.group(<NUM_LIT:1>)<EOL>if os.path.exists(matchedfile) and os.path.isfile(matchedfile):<EOL><INDENT>action = '<STR_LIT:input>' if '<STR_LIT>' in l else '<STR_LIT>'<EOL>matchedfile = os.path.relpath(matchedfile, \"<STR_LIT:.>\")<EOL>for i in includes:<EOL><INDENT>if fnmatch.fnmatch(matchedfile, i):<EOL><INDENT>if '<STR_LIT>' in matchedfile:<EOL><INDENT>continue<EOL><DEDENT>if matchedfile not in files:<EOL><INDENT>files[matchedfile] = [action]<EOL><DEDENT>else:<EOL><INDENT>if action not in files[matchedfile]:<EOL><INDENT>files[matchedfile].append(action)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if len(files) == <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return []<EOL><DEDENT>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>filenames = list(files.keys())<EOL>filenames.sort()<EOL>with tempfile.NamedTemporaryFile(suffix=\"<STR_LIT>\") as temp:<EOL><INDENT>temp.write(yaml.dump(filenames, default_flow_style=False).encode('<STR_LIT:utf-8>'))<EOL>temp.flush()<EOL>EDITOR = os.environ.get('<STR_LIT>','<STR_LIT>')<EOL>subprocess.call(\"<STR_LIT>\" %(EDITOR,temp.name), shell=True)<EOL>temp.seek(<NUM_LIT:0>)<EOL>data = temp.read()<EOL>selected = yaml.load(data)<EOL><DEDENT>print(\"<STR_LIT>\", len(selected), \"<STR_LIT>\")<EOL>if len(selected) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>filenames = [f for f in filenames if f in selected]<EOL>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL>input('<STR_LIT>')<EOL>prefixes = {}<EOL>for f in filenames:<EOL><INDENT>dirname = os.path.dirname(f)<EOL>if dirname == \"<STR_LIT>\":<EOL><INDENT>dirname = \"<STR_LIT:.>\"<EOL><DEDENT>prefixes[dirname] = dirname<EOL><DEDENT>while True:<EOL><INDENT>with tempfile.NamedTemporaryFile(suffix=\"<STR_LIT>\") as temp:<EOL><INDENT>temp.write(yaml.dump(prefixes, default_flow_style=False).encode('<STR_LIT:utf-8>'))<EOL>temp.flush()<EOL>EDITOR = os.environ.get('<STR_LIT>','<STR_LIT>')<EOL>subprocess.call(\"<STR_LIT>\" %(EDITOR,temp.name), shell=True)<EOL>temp.seek(<NUM_LIT:0>)<EOL>data = temp.read()<EOL>try:<EOL><INDENT>revised = yaml.load(data)<EOL><DEDENT>except Exception as e:<EOL><INDENT>revised = {}<EOL><DEDENT>if set(list(revised.keys())) == set(list(prefixes.keys())):<EOL><INDENT>prefixes = revised<EOL>break<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>input(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>if \"<STR_LIT:.>\" in prefixes:<EOL><INDENT>prefixes[\"<STR_LIT>\"] = prefixes[\"<STR_LIT:.>\"]<EOL><DEDENT>result = []<EOL>ts = datetime.now().isoformat()<EOL>for f in filenames:<EOL><INDENT>relativepath = prefixes[os.path.dirname(f)]<EOL>if relativepath == \"<STR_LIT:.>\":<EOL><INDENT>relativepath = os.path.basename(f)<EOL><DEDENT>else:<EOL><INDENT>relativepath = os.path.join(relativepath, os.path.basename(f))<EOL><DEDENT>result.append(OrderedDict([<EOL>('<STR_LIT>', relativepath),<EOL>('<STR_LIT:type>', '<STR_LIT>'),<EOL>('<STR_LIT>', files[f]),<EOL>('<STR_LIT>', mimetypes.guess_type(f)[<NUM_LIT:0>]),<EOL>('<STR_LIT:content>', open(f).read(<NUM_LIT>)),<EOL>('<STR_LIT>', compute_sha256(f)),<EOL>('<STR_LIT>', ts),<EOL>('<STR_LIT>', os.path.relpath(f, \"<STR_LIT:.>\")),<EOL>('<STR_LIT>', os.path.abspath(f)),<EOL>]))<EOL><DEDENT>print(json.dumps(result, indent=<NUM_LIT:4>))<EOL>return result<EOL>", "docstring": "Extract the files to be added based on the includes", "id": "f8145:m4"}
{"signature": "def instantiate(repo, validator_name=None, filename=None, rulesfiles=None):", "body": "default_validators = repo.options.get('<STR_LIT>', {})<EOL>validators = {}<EOL>if validator_name is not None:<EOL><INDENT>if validator_name in default_validators:<EOL><INDENT>validators = {<EOL>validator_name : default_validators[validator_name]<EOL>}<EOL><DEDENT>else:<EOL><INDENT>validators = {<EOL>validator_name : {<EOL>'<STR_LIT>': [],<EOL>'<STR_LIT>': {},<EOL>'<STR_LIT>': []<EOL>}<EOL>}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>validators = default_validators<EOL><DEDENT>if filename is not None:<EOL><INDENT>matching_files = repo.find_matching_files([filename])<EOL>if len(matching_files) == <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\", filename)<EOL>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>for v in validators:<EOL><INDENT>validators[v]['<STR_LIT>'] = matching_files<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for v in validators:<EOL><INDENT>if '<STR_LIT>' not in validators[v]:<EOL><INDENT>validators[v]['<STR_LIT>'] = []<EOL><DEDENT>elif len(validators[v]['<STR_LIT>']) > <NUM_LIT:0>:<EOL><INDENT>matching_files = repo.find_matching_files(validators[v]['<STR_LIT>'])<EOL>validators[v]['<STR_LIT>'] = matching_files<EOL><DEDENT><DEDENT><DEDENT>if rulesfiles is not None:<EOL><INDENT>matching_files = repo.find_matching_files([rulesfiles])<EOL>if len(matching_files) == <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\".format(rulesfiles,v))<EOL>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>for v in validators:<EOL><INDENT>validators[v]['<STR_LIT>'] = matching_files<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for v in validators:<EOL><INDENT>if '<STR_LIT>' not in validators[v]:<EOL><INDENT>validators[v]['<STR_LIT>'] = []<EOL><DEDENT>else:<EOL><INDENT>rulesfiles = validators[v]['<STR_LIT>']<EOL>matching_files = repo.find_matching_files(rulesfiles)<EOL>validators[v]['<STR_LIT>'] = matching_files<EOL><DEDENT><DEDENT><DEDENT>return validators<EOL>", "docstring": "Instantiate the validation specification", "id": "f8146:m0"}
{"signature": "def annotate_metadata_action(repo):", "body": "package = repo.package    <EOL>print(\"<STR_LIT>\")<EOL>with cd(repo.rootdir): <EOL><INDENT>filename = \"<STR_LIT>\"        <EOL>if os.path.exists(filename):             <EOL><INDENT>history = open(filename).readlines() <EOL>actions = []<EOL>for a in history: <EOL><INDENT>try: <EOL><INDENT>a = json.loads(a)<EOL>for x in ['<STR_LIT:code>']: <EOL><INDENT>if x not in a or a[x] == None: <EOL><INDENT>a[x] = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>actions.append(a)<EOL><DEDENT>except:<EOL><INDENT>pass <EOL><DEDENT><DEDENT>package['<STR_LIT>'] = actions<EOL><DEDENT><DEDENT>", "docstring": "Update metadata with the action history", "id": "f8147:m21"}
{"signature": "def annotate_metadata_platform(repo):", "body": "print(\"<STR_LIT>\")<EOL>package = repo.package<EOL>mgr = plugins_get_mgr()<EOL>repomgr = mgr.get(what='<STR_LIT>', name='<STR_LIT>')<EOL>package['<STR_LIT>'] = repomgr.get_metadata()<EOL>", "docstring": "Update metadata host information", "id": "f8147:m22"}
{"signature": "@log_repo_action<EOL>def commit(repo, args=[]):", "body": "return generic_repo_cmd(repo, '<STR_LIT>', args)<EOL>", "docstring": "Commit changes to the data repository\n\nParameters\n----------\n\nrepo: Repository object\nargs: Arguments to git command", "id": "f8147:m10"}
{"signature": "@log_repo_action<EOL>def status(repo, args=[]):", "body": "result = generic_repo_cmd(repo, '<STR_LIT:status>', args)<EOL>return result<EOL>", "docstring": "Show status of the repo\n\nParameters\n----------\n\nrepo: Repository object (result of lookup)\ndetails: Show internal details of the repo\nargs: Parameters to be passed to git status command", "id": "f8147:m14"}
{"signature": "def annotate_metadata_dependencies(repo):", "body": "options = repo.options<EOL>if '<STR_LIT>' not in options:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return []<EOL><DEDENT>repos = []<EOL>dependent_repos = options['<STR_LIT>']<EOL>for d in dependent_repos:<EOL><INDENT>if \"<STR_LIT:/>\" not in d:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>(username, reponame) = d.split(\"<STR_LIT:/>\")<EOL>try:<EOL><INDENT>repos.append(repo.manager.lookup(username, reponame))<EOL><DEDENT>except:<EOL><INDENT>print(\"<STR_LIT>\", d)<EOL><DEDENT><DEDENT>package = repo.package<EOL>package['<STR_LIT>'] = []<EOL>for r in repos:<EOL><INDENT>package['<STR_LIT>'].append({<EOL>'<STR_LIT:username>': r.username,<EOL>'<STR_LIT>': r.reponame,<EOL>})<EOL><DEDENT>", "docstring": "Collect information from the dependent repo's", "id": "f8147:m25"}
{"signature": "@log_repo_action<EOL>def remote(repo, args=[]):", "body": "return generic_repo_cmd(repo, '<STR_LIT>', args)<EOL>", "docstring": "Show remote\n\nParameters\n----------\n\nrepo: Repository object\nargs: Arguments to git command", "id": "f8147:m7"}
{"signature": "def annotate_metadata_code(repo, files):", "body": "package = repo.package<EOL>package['<STR_LIT:code>'] = []<EOL>for p in files:<EOL><INDENT>matching_files = glob2.glob(\"<STR_LIT>\".format(p))<EOL>for f in matching_files:<EOL><INDENT>absf = os.path.abspath(f)<EOL>print(\"<STR_LIT>\".format(f))<EOL>package['<STR_LIT:code>'].append(OrderedDict([<EOL>('<STR_LIT>', f),<EOL>('<STR_LIT>', repo.manager.permalink(repo, absf)),<EOL>('<STR_LIT>', mimetypes.guess_type(absf)[<NUM_LIT:0>]),<EOL>('<STR_LIT>', compute_sha256(absf))<EOL>]))<EOL><DEDENT><DEDENT>", "docstring": "Update metadata with the commit information", "id": "f8147:m20"}
{"signature": "@log_repo_action<EOL>def diff(repo, args=[]):", "body": "return generic_repo_cmd(repo, '<STR_LIT>', args)<EOL>", "docstring": "Diff between versions\n\nParameters\n----------\n\nrepo: Repository object\nargs: Arguments to git command", "id": "f8147:m13"}
{"signature": "def datapackage_exists(repo):", "body": "datapath = os.path.join(repo.rootdir, \"<STR_LIT>\")<EOL>return os.path.exists(datapath)<EOL>", "docstring": "Check if the datapackage exists...", "id": "f8147:m3"}
{"signature": "@log_repo_action<EOL>def pull(repo, args=[]):", "body": "return generic_repo_cmd(repo, '<STR_LIT>', args)<EOL>", "docstring": "Pull changes from the backend\n\nParameters\n----------\n\nrepo: Repository object\nargs: Arguments to git command", "id": "f8147:m9"}
{"signature": "def init(username, reponame, setup,<EOL>force=False, options=None,<EOL>noinput=False):", "body": "mgr = plugins_get_mgr()<EOL>repomgr = mgr.get(what='<STR_LIT>', name='<STR_LIT>')<EOL>backendmgr = None<EOL>if setup == '<STR_LIT>':<EOL><INDENT>backendmgr = mgr.get(what='<STR_LIT>', name='<STR_LIT>')<EOL><DEDENT>repo = repomgr.init(username, reponame, force, backendmgr)<EOL>(handle, gitignore) = tempfile.mkstemp()<EOL>with open(gitignore, '<STR_LIT:w>') as fd:<EOL><INDENT>fd.write(\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>filename = bootstrap_datapackage(repo, force, options, noinput)<EOL><DEDENT>except Exception as e:<EOL><INDENT>repomgr.drop(repo,[])<EOL>os.unlink(gitignore)<EOL>raise e<EOL><DEDENT>repo.run('<STR_LIT>',<EOL>[<EOL>{<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': filename,<EOL>},<EOL>{<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': gitignore,<EOL>},<EOL>])<EOL>os.unlink(filename)<EOL>os.unlink(gitignore)<EOL>args = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>repo.run('<STR_LIT>', args)<EOL>return repo<EOL>", "docstring": "Initialize an empty repository with datapackage.json\n\nParameters\n----------\n\nusername: Name of the user\nreponame: Name of the repo\nsetup: Specify the 'configuration' (git only, git+s3 backend etc)\nforce: Force creation of the files\noptions: Dictionary with content of dgit.json, if available.\nnoinput: Automatic operation with no human interaction", "id": "f8147:m17"}
{"signature": "def list_repos(remote=False):", "body": "mgr = plugins_get_mgr()<EOL>if not remote:<EOL><INDENT>repomgr = mgr.get(what='<STR_LIT>', name='<STR_LIT>')<EOL>repos = repomgr.get_repo_list()<EOL>repos.sort()<EOL>return repos<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "List repos\n\nParameters\n----------\n\nremote: Flag", "id": "f8147:m1"}
{"signature": "@log_repo_action<EOL>def shellcmd(repo, args):", "body": "with cd(repo.rootdir):<EOL><INDENT>result = run(args)<EOL>return result<EOL><DEDENT>", "docstring": "Run a shell command within the repo's context\n\nParameters\n----------\n\nrepo: Repository object\nargs: Shell command", "id": "f8147:m2"}
{"signature": "@log_repo_action<EOL>def show(repo, args=[]):", "body": "return generic_repo_cmd(repo, '<STR_LIT>', args)<EOL>", "docstring": "Show commit details\n\nParameters\n----------\n\nrepo: Repository object\nargs: Arguments to git command", "id": "f8147:m6"}
{"signature": "@log_repo_action<EOL>def delete(repo, args=[]):", "body": "<EOL>result = generic_repo_cmd(repo, '<STR_LIT>', args)<EOL>if result['<STR_LIT:status>'] != '<STR_LIT:success>': <EOL><INDENT>return status <EOL><DEDENT>with cd(repo.rootdir): <EOL><INDENT>package = repo.package <EOL>resources = package['<STR_LIT>'] <EOL>cleaned_resources = []<EOL>for r in resources: <EOL><INDENT>relativepath = r['<STR_LIT>'] <EOL>sha256 = r['<STR_LIT>'] <EOL>if relativepath not in ['<STR_LIT>', None]: <EOL><INDENT>if not os.path.exists(relativepath): <EOL><INDENT>print(\"<STR_LIT>\", relativepath) <EOL>continue <EOL><DEDENT><DEDENT>cleaned_resources.append(r) <EOL><DEDENT>package['<STR_LIT>'] = cleaned_resources <EOL>repo.package = package <EOL>with open('<STR_LIT>', '<STR_LIT:w>') as fd: <EOL><INDENT>fd.write(json.dumps(repo.package, indent=<NUM_LIT:4>))<EOL><DEDENT>return {<EOL>'<STR_LIT:status>': '<STR_LIT:success>',<EOL>'<STR_LIT:message>': '<STR_LIT>'<EOL>}<EOL><DEDENT>", "docstring": "Delete files\n\nParameters\n----------\n\nrepo: Repository object\nargs: Arguments to git command", "id": "f8147:m15"}
{"signature": "def bootstrap_datapackage(repo, force=False,<EOL>options=None, noinput=False):", "body": "print(\"<STR_LIT>\")<EOL>tsprefix = datetime.now().date().isoformat()<EOL>package = OrderedDict([<EOL>('<STR_LIT:title>', '<STR_LIT>'),<EOL>('<STR_LIT:description>', '<STR_LIT>'),<EOL>('<STR_LIT:username>', repo.username),<EOL>('<STR_LIT>', repo.reponame),<EOL>('<STR_LIT:name>', str(repo)),<EOL>('<STR_LIT:title>', \"<STR_LIT>\"),<EOL>('<STR_LIT:description>', \"<STR_LIT>\"),<EOL>('<STR_LIT>', []),<EOL>('<STR_LIT>', []),<EOL>('<STR_LIT>', getpass.getuser()),<EOL>('<STR_LIT>', datetime.now().isoformat()),<EOL>('<STR_LIT>', repo.remoteurl)<EOL>])<EOL>if options is not None:<EOL><INDENT>package['<STR_LIT:title>'] = options['<STR_LIT:title>']<EOL>package['<STR_LIT:description>'] = options['<STR_LIT:description>']<EOL><DEDENT>else:<EOL><INDENT>if noinput:<EOL><INDENT>raise IncompleteParameters(\"<STR_LIT>\")<EOL><DEDENT>for var in ['<STR_LIT:title>', '<STR_LIT:description>']:<EOL><INDENT>value = '<STR_LIT>'<EOL>while value in ['<STR_LIT>',None]:<EOL><INDENT>value = input('<STR_LIT>' + var.title() + \"<STR_LIT>\")<EOL>if len(value) == <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\".format(var.title()))<EOL><DEDENT><DEDENT>package[var] = value<EOL><DEDENT><DEDENT>(handle, filename) = tempfile.mkstemp()<EOL>with open(filename, '<STR_LIT:w>') as fd:<EOL><INDENT>fd.write(json.dumps(package, indent=<NUM_LIT:4>))<EOL><DEDENT>repo.package = package<EOL>return filename<EOL>", "docstring": "Create the datapackage file..", "id": "f8147:m16"}
{"signature": "def find_executable_files():", "body": "files = glob.glob(\"<STR_LIT:*>\") + glob.glob(\"<STR_LIT>\") + glob.glob('<STR_LIT>')<EOL>files = filter(lambda f: os.path.isfile(f), files)<EOL>executable = stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH<EOL>final = []<EOL>for filename in files:<EOL><INDENT>if os.path.isfile(filename):<EOL><INDENT>st = os.stat(filename)<EOL>mode = st.st_mode<EOL>if mode & executable:<EOL><INDENT>final.append(filename)<EOL>if len(final) > <NUM_LIT:5>:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return final<EOL>", "docstring": "Find max 5 executables that are responsible for this repo.", "id": "f8149:m0"}
{"signature": "def get_files_to_commit(autooptions):", "body": "workingdir = autooptions['<STR_LIT>']<EOL>includes = autooptions['<STR_LIT>']['<STR_LIT>']<EOL>excludes = autooptions['<STR_LIT>']['<STR_LIT>']<EOL>includes = r'<STR_LIT:|>'.join([fnmatch.translate(x) for x in includes])<EOL>excludes = r'<STR_LIT:|>'.join([fnmatch.translate(x) for x in excludes]) or r'<STR_LIT>'<EOL>matched_files = []<EOL>for root, dirs, files in os.walk(workingdir):<EOL><INDENT>dirs[:] = [d for d in dirs if not re.match(excludes, d)]<EOL>files = [f for f in files if not re.match(excludes, f)]<EOL>files = [f for f in files if re.match(includes, f)]<EOL>files = [os.path.join(root, f) for f in files]<EOL>matched_files.extend(files)<EOL><DEDENT>return matched_files<EOL>", "docstring": "Look through the local directory to pick up files to check", "id": "f8149:m3"}
{"signature": "def auto_add(repo, autooptions, files):", "body": "<EOL>mapping = { \"<STR_LIT:.>\": \"<STR_LIT>\" }<EOL>if (('<STR_LIT>' in autooptions) and<EOL>('<STR_LIT>' in autooptions['<STR_LIT>'])):<EOL><INDENT>mapping = autooptions['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>keys = mapping.keys()<EOL>keys = sorted(keys, key=lambda k: len(k), reverse=True)<EOL>count = <NUM_LIT:0><EOL>params = []<EOL>for f in files:<EOL><INDENT>relativepath = f<EOL>for k in keys:<EOL><INDENT>v = mapping[k]<EOL>if f.startswith(k + \"<STR_LIT:/>\"):<EOL><INDENT>relativepath = f.replace(k + \"<STR_LIT:/>\", v)<EOL>break<EOL><DEDENT><DEDENT>count += files_add(repo=repo,<EOL>args=[f],<EOL>targetdir=os.path.dirname(relativepath))<EOL><DEDENT>return count<EOL>", "docstring": "Cleanup the paths and add", "id": "f8149:m4"}
{"signature": "def get_diffs(history):", "body": "<EOL>mgr = plugins_get_mgr() <EOL>keys = mgr.search('<STR_LIT>')['<STR_LIT>']<EOL>representations = [mgr.get_by_key('<STR_LIT>', k) for k in keys]<EOL>for i in range(len(history)):<EOL><INDENT>if i+<NUM_LIT:1> > len(history) - <NUM_LIT:1>:<EOL><INDENT>continue<EOL><DEDENT>prev = history[i]<EOL>curr = history[i+<NUM_LIT:1>]<EOL>for c in curr['<STR_LIT>']:<EOL><INDENT>path = c['<STR_LIT:path>']<EOL>if c['<STR_LIT:path>'].endswith('<STR_LIT>'): <EOL><INDENT>continue <EOL><DEDENT>handler = None <EOL>for r in representations: <EOL><INDENT>if r.can_process(path): <EOL><INDENT>handler = r <EOL>break <EOL><DEDENT><DEDENT>if handler is None: <EOL><INDENT>continue <EOL><DEDENT>v1_hex = prev['<STR_LIT>']<EOL>v2_hex = curr['<STR_LIT>']<EOL>temp1 = tempfile.mkdtemp(prefix=\"<STR_LIT>\") <EOL>try: <EOL><INDENT>for h in [v1_hex, v2_hex]: <EOL><INDENT>filename = '<STR_LIT>'.format(temp1, h)<EOL>try:<EOL><INDENT>os.makedirs(os.path.dirname(filename))<EOL><DEDENT>except:<EOL><INDENT>pass <EOL><DEDENT>extractcmd = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', filename, h, path]<EOL>output = run(extractcmd)<EOL>if '<STR_LIT>' in output: <EOL><INDENT>raise Exception(\"<STR_LIT>\") <EOL><DEDENT>with cd(os.path.dirname(filename)): <EOL><INDENT>cmd = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>output = run(cmd) <EOL>if '<STR_LIT>' in output: <EOL><INDENT>print(\"<STR_LIT>\", temp1)<EOL>shutil.rmtree(temp1)<EOL>continue <EOL><DEDENT><DEDENT><DEDENT>path1 = os.path.join(temp1, v1_hex, path) <EOL>path2 = os.path.join(temp1, v2_hex, path) <EOL>if not os.path.exists(path1) or not os.path.exists(path2): <EOL><INDENT>shutil.rmtree(temp1)<EOL>continue <EOL><DEDENT>diff = handler.get_diff(path1, path2)<EOL>c['<STR_LIT>'] = diff<EOL><DEDENT>except Exception as e: <EOL><INDENT>shutil.rmtree(temp1)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Look at files and compute the diffs intelligently", "id": "f8150:m3"}
{"signature": "def dumpdb(args):", "body": "if len(args.args) < <NUM_LIT:2>:<EOL><INDENT>raise ParserError('<STR_LIT>')  <EOL><DEDENT>ds = _get_dataset(args)<EOL>db = Database(ds, fname=args.args[<NUM_LIT:1>])<EOL>mdpath = Path(args.args[<NUM_LIT:2>]) if len(args.args) > <NUM_LIT:2> else ds.tablegroup._fname<EOL>args.log.info('<STR_LIT>'.format(db.to_cldf(mdpath.parent, mdname=mdpath.name)))<EOL>", "docstring": "cldf dumpdb <DATASET> <SQLITE_DB_PATH> [<METADATA_PATH>]", "id": "f8167:m4"}
{"signature": "def createdb(args):", "body": "if len(args.args) < <NUM_LIT:2>:<EOL><INDENT>raise ParserError('<STR_LIT>')<EOL><DEDENT>ds = _get_dataset(args)<EOL>db = Database(ds, fname=args.args[<NUM_LIT:1>])<EOL>db.write_from_tg()<EOL>args.log.info('<STR_LIT>'.format(ds, db.fname))<EOL>", "docstring": "cldf createdb <DATASET> <SQLITE_DB_PATH>\n\nLoad CLDF dataset <DATASET> into a SQLite DB, where <DATASET> may be the path to\n- a CLDF metadata file\n- a CLDF core data file", "id": "f8167:m3"}
{"signature": "def validate(args):", "body": "ds = _get_dataset(args)<EOL>ds.validate(log=args.log)<EOL>", "docstring": "cldf validate <DATASET>\n\nValidate a dataset against the CLDF specification, i.e. check\n- whether required tables and columns are present\n- whether values for required columns are present\n- the referential integrity of the dataset", "id": "f8167:m1"}
{"signature": "@property<EOL><INDENT>def tab_classes(self):<DEDENT>", "body": "classes = super(MainNavigationBaseTab, self).tab_classes[:]<EOL>if self.current_tab.request.user.is_authenticated():<EOL><INDENT>classes += ['<STR_LIT>']<EOL><DEDENT>return classes<EOL>", "docstring": "If user is logged in, set ``logged_in_only`` class.", "id": "f8180:c0:m1"}
{"signature": "def get_context_data(self, **kwargs):", "body": "context = super(TabView, self).get_context_data(**kwargs)<EOL>context.update(kwargs)<EOL>process_tabs_kwargs = {<EOL>'<STR_LIT>': self.get_group_tabs(),<EOL>'<STR_LIT>': self,<EOL>'<STR_LIT>': self,<EOL>}<EOL>context['<STR_LIT>'] = self._process_tabs(**process_tabs_kwargs)<EOL>context['<STR_LIT>'] = self.tab_id<EOL>if self.tab_parent is not None:<EOL><INDENT>if self.tab_parent not in self._registry:<EOL><INDENT>msg = '<STR_LIT>' % self.tab_parent.__class__.__name__<EOL>raise ImproperlyConfigured(msg)<EOL><DEDENT>parent = self.tab_parent()<EOL>process_parents_kwargs = {<EOL>'<STR_LIT>': parent.get_group_tabs(),<EOL>'<STR_LIT>': self,<EOL>'<STR_LIT>': parent,<EOL>}<EOL>context['<STR_LIT>'] = self._process_tabs(**process_parents_kwargs)<EOL>context['<STR_LIT>'] = parent.tab_id<EOL><DEDENT>if self.tab_id in self._children:<EOL><INDENT>process_children_kwargs = {<EOL>'<STR_LIT>': [t() for t in self._children[self.tab_id]],<EOL>'<STR_LIT>': self,<EOL>'<STR_LIT>': None,<EOL>}<EOL>context['<STR_LIT>'] = self._process_tabs(**process_children_kwargs)<EOL><DEDENT>return context<EOL>", "docstring": "Adds tab information to context.\n\nTo retrieve a list of all group tab instances, use\n``{{ tabs }}`` in your template.\n\nThe id of the current tab is added as ``current_tab_id`` to the\ntemplate context.\n\nIf the current tab has a parent tab the parent's id is added to\nthe template context as ``parent_tab_id``. Instances of all tabs\nof the parent level are added as ``parent_tabs`` to the context.\n\nIf the current tab has children they are added to the template\ncontext as ``child_tabs``.", "id": "f8182:c1:m3"}
{"signature": "def get_group_tabs(self):", "body": "if self.tab_group is None:<EOL><INDENT>raise ImproperlyConfigured(<EOL>\"<STR_LIT>\" %<EOL>self.__class__.__name__)<EOL><DEDENT>group_members = [t for t in self._registry if t.tab_group == self.tab_group]<EOL>return [t() for t in group_members]<EOL>", "docstring": "Return instances of all other tabs that are members of the tab's\ntab group.", "id": "f8182:c1:m0"}
{"signature": "@property<EOL><INDENT>def tab_visible(self):<DEDENT>", "body": "return self.tab_label is not None<EOL>", "docstring": "Whether or not this tab is shown in the tab group. Or to be more exact,\nwhether or not this tab is contained in ``{{ tabs }}``.\n\nThe default behavior is to set the tab as visible if it has a label.", "id": "f8182:c1:m1"}
{"signature": "@property<EOL><INDENT>def rpc(self):<DEDENT>", "body": "class RPC:<EOL><INDENT>def _load(self, name):<EOL><INDENT>with open(<EOL>os.path.join(os.path.dirname(__file__), \"<STR_LIT>\")<EOL>) as fid:<EOL><INDENT>d = yaml.safe_load(fid)<EOL><DEDENT>return d.get(name)<EOL><DEDENT>def get_objects(self, ids, *args, **kwargs):<EOL><INDENT>return [self.get_object(x) for x in ids]<EOL><DEDENT>def get_object(self, id, *args, **kwargs):<EOL><INDENT>return get_object(id)<EOL><DEDENT>def get_account_history(self, *args, **kwargs):<EOL><INDENT>with open(<EOL>os.path.join(<EOL>os.path.dirname(__file__), \"<STR_LIT>\"<EOL>)<EOL>) as fid:<EOL><INDENT>history = yaml.safe_load(fid)<EOL>return history<EOL><DEDENT><DEDENT>def get_account_balances(self, account, *args, **kwargs):<EOL><INDENT>return [{\"<STR_LIT>\": \"<STR_LIT>\", \"<STR_LIT>\": <NUM_LIT>}]<EOL><DEDENT>def lookup_account_names(self, name, **kwargs):<EOL><INDENT>return [None]<EOL><DEDENT>def get_all_workers(self):<EOL><INDENT>return self._load(\"<STR_LIT>\")<EOL><DEDENT>def get_workers_by_account(self, name):<EOL><INDENT>return [self._load(\"<STR_LIT>\")[<NUM_LIT:0>]]<EOL><DEDENT>def get_dynamic_global_properties(self):<EOL><INDENT>return {<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": <NUM_LIT>,<EOL>}<EOL><DEDENT>def __getattr__(self, name):<EOL><INDENT>def fun(self, *args, **kwargs):<EOL><INDENT>return {}<EOL><DEDENT>return fun<EOL><DEDENT><DEDENT>return RPC()<EOL>", "docstring": "We are patching rpc similar to a regular RPC\n            connection. However, it will always return\n            an empty object!", "id": "f8193:c1:m4"}
{"signature": "def __setitem__(self, key, value):", "body": "if self._haveKey(key):<EOL><INDENT>query = (<EOL>\"<STR_LIT>\".format(<EOL>self.__tablename__, self.__value__, self.__key__<EOL>),<EOL>(value, key),<EOL>)<EOL><DEDENT>else:<EOL><INDENT>query = (<EOL>\"<STR_LIT>\".format(<EOL>self.__tablename__, self.__key__, self.__value__<EOL>),<EOL>(key, value),<EOL>)<EOL><DEDENT>connection = sqlite3.connect(self.sqlite_file)<EOL>cursor = connection.cursor()<EOL>cursor.execute(*query)<EOL>connection.commit()<EOL>", "docstring": "Sets an item in the store\n\n            :param str key: Key\n            :param str value: Value", "id": "f8220:c1:m2"}
{"signature": "def __len__(self):", "body": "query = \"<STR_LIT>\".format(self.__tablename__)<EOL>connection = sqlite3.connect(self.sqlite_file)<EOL>cursor = connection.cursor()<EOL>cursor.execute(query)<EOL>return len(cursor.fetchall())<EOL>", "docstring": "return lenght of store", "id": "f8220:c1:m6"}
{"signature": "def wipe(self):", "body": "query = \"<STR_LIT>\".format(self.__tablename__)<EOL>connection = sqlite3.connect(self.sqlite_file)<EOL>cursor = connection.cursor()<EOL>cursor.execute(query)<EOL>connection.commit()<EOL>", "docstring": "Wipe the store", "id": "f8220:c1:m11"}
{"signature": "def is_encrypted(self):", "body": "return True<EOL>", "docstring": "Returns True/False to indicate required use of unlock", "id": "f8221:c2:m0"}
{"signature": "def __iter__(self):", "body": "return dict.__iter__(self)<EOL>", "docstring": "Iterates through the store", "id": "f8221:c0:m4"}
{"signature": "def delete(self, pub):", "body": "raise NotImplementedError<EOL>", "docstring": "Delete a pubkey/privatekey pair from the store\n\n           :param str pub: Public key", "id": "f8221:c1:m4"}
{"signature": "def locked(self):", "body": "return False<EOL>", "docstring": "is the wallet locked?", "id": "f8221:c2:m2"}
{"signature": "def __setitem__(self, key, value):", "body": "return dict.__setitem__(self, key, value)<EOL>", "docstring": "Sets an item in the store", "id": "f8221:c0:m2"}
{"signature": "def wipe(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Wipe the store", "id": "f8221:c0:m10"}
{"signature": "def items(self):", "body": "return dict.items(self)<EOL>", "docstring": "Returns all items off the store as tuples", "id": "f8221:c0:m7"}
{"signature": "@classmethod<EOL><INDENT>def setdefault(cls, key, value):<DEDENT>", "body": "cls.defaults[key] = value<EOL>", "docstring": "Allows to define default values", "id": "f8221:c0:m0"}
{"signature": "def getPrivateKeyForPublicKey(self, pub):", "body": "raise NotImplementedError<EOL>", "docstring": "Returns the (possibly encrypted) private key that\n            corresponds to a public key\n\n           :param str pub: Public key\n\n           The encryption scheme is BIP38", "id": "f8221:c1:m2"}
{"signature": "def is_encrypted(self):", "body": "return False<EOL>", "docstring": "Returns True/False to indicate required use of unlock", "id": "f8221:c1:m0"}
{"signature": "def get(self, key, default=None):", "body": "return dict.get(self, key, default)<EOL>", "docstring": "Return the key if exists or a default value", "id": "f8221:c0:m8"}
{"signature": "def add(self, wif, pub=None):", "body": "raise NotImplementedError<EOL>", "docstring": "Add a new public/private key pair (correspondence has to be\n            checked elsewhere!)\n\n           :param str pub: Public key\n           :param str wif: Private key", "id": "f8221:c1:m3"}
{"signature": "def is_encrypted(self):", "body": "return False<EOL>", "docstring": "Returns False, as we are not encrypted here", "id": "f8222:c3:m4"}
{"signature": "def get_default_key_store(*args, config, **kwargs):", "body": "kwargs[\"<STR_LIT>\"] = kwargs.get(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>return SqliteEncryptedKeyStore(config=config, **kwargs)<EOL>", "docstring": "This method returns the default **key** store\n        that uses an SQLite database internally.\n\n        :params str appname: The appname that is used internally to distinguish\n            different SQLite files", "id": "f8224:m1"}
{"signature": "@property<EOL><INDENT>def masterkey(self):<DEDENT>", "body": "return self.decrypted_master<EOL>", "docstring": "Contains the **decrypted** master key", "id": "f8226:c0:m1"}
{"signature": "def locked(self):", "body": "return not self.unlocked()<EOL>", "docstring": "Is the store locked. E.g. Is a valid password known that can be\n            used to decrypt the master key?", "id": "f8226:c0:m3"}
{"signature": "def unlocked(self):", "body": "if self.password is not None:<EOL><INDENT>return bool(self.password)<EOL><DEDENT>else:<EOL><INDENT>if (<EOL>\"<STR_LIT>\" in os.environ<EOL>and os.environ[\"<STR_LIT>\"]<EOL>and self.config_key in self.config<EOL>and self.config[self.config_key]<EOL>):<EOL><INDENT>log.debug(\"<STR_LIT>\" \"<STR_LIT>\")<EOL>self.unlock(os.environ.get(\"<STR_LIT>\"))<EOL>return bool(self.password)<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Is the store unlocked so that I can decrypt the content?", "id": "f8226:c0:m4"}
{"signature": "def _decrypt_masterpassword(self):", "body": "aes = AESCipher(self.password)<EOL>checksum, encrypted_master = self.config[self.config_key].split(\"<STR_LIT:$>\")<EOL>try:<EOL><INDENT>decrypted_master = aes.decrypt(encrypted_master)<EOL><DEDENT>except Exception:<EOL><INDENT>self._raise_wrongmasterpassexception()<EOL><DEDENT>if checksum != self._derive_checksum(decrypted_master):<EOL><INDENT>self._raise_wrongmasterpassexception()<EOL><DEDENT>self.decrypted_master = decrypted_master<EOL>", "docstring": "Decrypt the encrypted masterkey", "id": "f8226:c0:m7"}
{"signature": "def _get_encrypted_masterpassword(self):", "body": "if not self.unlocked():<EOL><INDENT>raise WalletLocked<EOL><DEDENT>aes = AESCipher(self.password)<EOL>return \"<STR_LIT>\".format(<EOL>self._derive_checksum(self.masterkey), aes.encrypt(self.masterkey)<EOL>)<EOL>", "docstring": "Obtain the encrypted masterkey\n\n            .. note:: The encrypted masterkey is checksummed, so that we can\n                figure out that a provided password is correct or not. The\n                checksum is only 4 bytes long!", "id": "f8226:c0:m12"}
{"signature": "def _new_masterpassword(self, password):", "body": "<EOL>if self.config_key in self.config and self.config[self.config_key]:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>self.decrypted_master = hexlify(os.urandom(<NUM_LIT:32>)).decode(\"<STR_LIT:ascii>\")<EOL>self.password = password<EOL>self._save_encrypted_masterpassword()<EOL>return self.masterkey<EOL>", "docstring": "Generate a new random masterkey, encrypt it with the password and\n            store it in the store.\n\n            :param str password: Password to use for en-/de-cryption", "id": "f8226:c0:m10"}
{"signature": "def change_password(self, newpassword):", "body": "if not self.unlocked():<EOL><INDENT>raise WalletLocked<EOL><DEDENT>self.password = newpassword<EOL>self._save_encrypted_masterpassword()<EOL>", "docstring": "Change the password that allows to decrypt the master key", "id": "f8226:c0:m13"}
{"signature": "def encrypt(self, wif):", "body": "if not self.unlocked():<EOL><INDENT>raise WalletLocked<EOL><DEDENT>return format(bip38.encrypt(str(wif), self.masterkey), \"<STR_LIT>\")<EOL>", "docstring": "Encrypt the content according to BIP38\n\n            :param str wif: Unencrypted key", "id": "f8226:c0:m15"}
{"signature": "def __getattr__(self, name):", "body": "def method(*args, **kwargs):<EOL><INDENT>if \"<STR_LIT>\" not in kwargs:  <EOL><INDENT>if \"<STR_LIT>\" in kwargs:<EOL><INDENT>if kwargs[\"<STR_LIT>\"] in self.api_id and self.api_id[kwargs[\"<STR_LIT>\"]]:<EOL><INDENT>api_id = self.api_id[kwargs[\"<STR_LIT>\"]]<EOL><DEDENT>else:<EOL><INDENT>api_id = kwargs[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>api_id = <NUM_LIT:0><EOL><DEDENT><DEDENT>else:  <EOL><INDENT>api_id = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>self.num_retries = kwargs.get(\"<STR_LIT>\", self.num_retries)<EOL>query = {<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": [api_id, name, list(args)],<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:id>\": self.get_request_id(),<EOL>}<EOL>r = self.rpcexec(query)<EOL>message = self.parse_response(r)<EOL>return message<EOL><DEDENT>return method<EOL>", "docstring": "Map all methods to RPC calls and pass through the arguments", "id": "f8227:c0:m7"}
{"signature": "def __getattr__(self, name):", "body": "def method(*args):<EOL><INDENT>query = {\"<STR_LIT>\": name, \"<STR_LIT>\": args, \"<STR_LIT>\": \"<STR_LIT>\", \"<STR_LIT:id>\": <NUM_LIT:0>}<EOL>r = self.rpcexec(query)<EOL>return r<EOL><DEDENT>return method<EOL>", "docstring": "Map all methods to RPC calls and pass through the arguments", "id": "f8228:c0:m2"}
{"signature": "def rpcexec(self, payload):", "body": "log.debug(json.dumps(payload))<EOL>query = requests.post(self.url, json=payload, proxies=self.proxies())<EOL>if query.status_code != <NUM_LIT:200>:  <EOL><INDENT>raise HttpInvalidStatusCode(<EOL>\"<STR_LIT>\".format(query.status_code)<EOL>)<EOL><DEDENT>return query.text<EOL>", "docstring": "Execute a call by sending the payload\n\n            :param json payload: Payload data\n            :raises ValueError: if the server does not respond in proper JSON\n                                format\n            :raises HttpInvalidStatusCode: if the server returns a status code\n                that is not 200", "id": "f8230:c0:m1"}
{"signature": "def reset_counter(self):", "body": "self._cnt_retries = <NUM_LIT:0><EOL>for i in self._url_counter:<EOL><INDENT>self._url_counter[i] = <NUM_LIT:0><EOL><DEDENT>", "docstring": "reset the failed connection counters", "id": "f8231:c0:m7"}
{"signature": "def find_next(self):", "body": "if int(self.num_retries) < <NUM_LIT:0>:  <EOL><INDENT>self._cnt_retries += <NUM_LIT:1><EOL>sleeptime = (self._cnt_retries - <NUM_LIT:1>) * <NUM_LIT:2> if self._cnt_retries < <NUM_LIT:10> else <NUM_LIT:10><EOL>if sleeptime:<EOL><INDENT>log.warning(<EOL>\"<STR_LIT>\"<EOL>% (self.url, self._cnt_retries, self.num_retries)<EOL>+ \"<STR_LIT>\" % sleeptime<EOL>)<EOL>sleep(sleeptime)<EOL><DEDENT>return next(self.urls)<EOL><DEDENT>urls = [<EOL>k<EOL>for k, v in self._url_counter.items()<EOL>if (<EOL>int(self.num_retries) >= <NUM_LIT:0><EOL>and v <= self.num_retries<EOL>and (k != self.url or len(self._url_counter) == <NUM_LIT:1>)<EOL>)<EOL>]<EOL>if not len(urls):<EOL><INDENT>raise NumRetriesReached<EOL><DEDENT>url = urls[<NUM_LIT:0>]<EOL>return url<EOL>", "docstring": "Find the next url in the list", "id": "f8231:c0:m6"}
{"signature": "@property<EOL><INDENT>def api_id(self):<DEDENT>", "body": "return self.connection.api_id<EOL>", "docstring": "This allows to list api_ids, if they have been registered through\n            api_register() -- LEGACY\n\n            In previous API version, one would connect and register to APIs\n            like this\n\n            .. code-block:: python\n\n                self.api_id[\"database\"] = self.database(api_id=1)\n                self.api_id[\"history\"] = self.history(api_id=1)\n                self.api_id[\"network_broadcast\"] = self.network_broadcast(\n                    api_id=1)", "id": "f8231:c0:m11"}
{"signature": "def config(self):", "body": "return self.blockchain.rpc.get_object(\"<STR_LIT>\")<EOL>", "docstring": "Returns object 2.0.0", "id": "f8234:c0:m7"}
{"signature": "def wait_for_and_get_block(self, block_number, blocks_waiting_for=None):", "body": "if not blocks_waiting_for:<EOL><INDENT>blocks_waiting_for = max(<NUM_LIT:1>, block_number - self.get_current_block_num())<EOL><DEDENT>repetition = <NUM_LIT:0><EOL>while self.get_current_block_num() < block_number:<EOL><INDENT>repetition += <NUM_LIT:1><EOL>time.sleep(self.block_interval)<EOL>if repetition > blocks_waiting_for * self.max_block_wait_repetition:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>block = self.blockchain.rpc.get_block(block_number)<EOL>repetition = <NUM_LIT:0><EOL>while not block:<EOL><INDENT>repetition += <NUM_LIT:1><EOL>time.sleep(self.block_interval)<EOL>if repetition > self.max_block_wait_repetition:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>block = self.blockchain.rpc.get_block(block_number)<EOL><DEDENT>return block<EOL>", "docstring": "Get the desired block from the chain, if the current head block is\n            smaller (for both head and irreversible) then we wait, but a\n            maxmimum of blocks_waiting_for * max_block_wait_repetition time\n            before failure.\n\n            :param int block_number: desired block number\n            :param int blocks_waiting_for: (default) difference between\n                block_number and current head how many blocks we are willing to\n                wait, positive int", "id": "f8234:c0:m14"}
{"signature": "def chainParameters(self):", "body": "if not self._parameters:<EOL><INDENT>self.update_chain_parameters()<EOL><DEDENT>return self._parameters<EOL>", "docstring": "The blockchain parameters, such as fees, and committee-controlled\n            parameters are returned here", "id": "f8234:c0:m3"}
{"signature": "def block_time(self, block_num):", "body": "return self.block_class(block_num, blockchain_instance=self.blockchain).time()<EOL>", "docstring": "Returns a datetime of the block with the given block\n            number.\n\n            :param int block_num: Block number", "id": "f8234:c0:m11"}
{"signature": "def get_all_accounts(self, start=\"<STR_LIT>\", stop=\"<STR_LIT>\", steps=<NUM_LIT>, **kwargs):", "body": "lastname = start<EOL>while True:<EOL><INDENT>ret = self.blockchain.rpc.lookup_accounts(lastname, steps)<EOL>for account in ret:<EOL><INDENT>yield account[<NUM_LIT:0>]<EOL>if account[<NUM_LIT:0>] == stop:<EOL><INDENT>raise StopIteration<EOL><DEDENT><DEDENT>if lastname == ret[-<NUM_LIT:1>][<NUM_LIT:0>]:<EOL><INDENT>raise StopIteration<EOL><DEDENT>lastname = ret[-<NUM_LIT:1>][<NUM_LIT:0>]<EOL>if len(ret) < steps:<EOL><INDENT>raise StopIteration<EOL><DEDENT><DEDENT>", "docstring": "Yields account names between start and stop.\n\n            :param str start: Start at this account name\n            :param str stop: Stop at this account name\n            :param int steps: Obtain ``steps`` ret with a single call from RPC", "id": "f8234:c0:m18"}
{"signature": "def get_network(self):", "body": "return self.blockchain.rpc.get_network()<EOL>", "docstring": "Identify the network\n\n            :returns: Network parameters\n            :rtype: dict", "id": "f8234:c0:m5"}
{"signature": "def awaitTxConfirmation(self, transaction, limit=<NUM_LIT:10>):", "body": "counter = <NUM_LIT:10><EOL>for block in self.blocks():<EOL><INDENT>counter += <NUM_LIT:1><EOL>for tx in block[\"<STR_LIT>\"]:<EOL><INDENT>if sorted(tx[\"<STR_LIT>\"]) == sorted(transaction[\"<STR_LIT>\"]):<EOL><INDENT>return tx<EOL><DEDENT><DEDENT>if counter > limit:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Returns the transaction as seen by the blockchain after being\n            included into a block\n\n            .. note:: If you want instant confirmation, you need to instantiate\n                      class:`.blockchain.Blockchain` with\n                      ``mode=\"head\"``, otherwise, the call will wait until\n                      confirmed in an irreversible block.\n\n            .. note:: This method returns once the blockchain has included a\n                      transaction with the **same signature**. Even though the\n                      signature is not usually used to identify a transaction,\n                      it still cannot be forfeited and is derived from the\n                      transaction contented and thus identifies a transaction\n                      uniquely.", "id": "f8234:c0:m17"}
{"signature": "def info(self):", "body": "return self.blockchain.rpc.get_dynamic_global_properties()<EOL>", "docstring": "This call returns the *dynamic global properties*", "id": "f8234:c0:m2"}
{"signature": "@property<EOL><INDENT>def proposer(self):<DEDENT>", "body": "if \"<STR_LIT>\" in self:<EOL><INDENT>return self.account_class(self[\"<STR_LIT>\"])<EOL><DEDENT>", "docstring": "Return the proposer of the proposal if available in the backend,\n            else returns None", "id": "f8235:c0:m3"}
{"signature": "@property<EOL><INDENT>def is_bitasset(self):<DEDENT>", "body": "return \"<STR_LIT>\" in self<EOL>", "docstring": "Is the asset a market pegged asset?", "id": "f8236:c0:m5"}
{"signature": "@property<EOL><INDENT>def flags(self):<DEDENT>", "body": "return self[\"<STR_LIT>\"]<EOL>", "docstring": "List the permissions that are currently used (flags)", "id": "f8236:c0:m7"}
{"signature": "def update_cer(self, cer, account=None, **kwargs):", "body": "assert callable(self.blockchain.update_cer)<EOL>return self.blockchain.update_cer(<EOL>self[\"<STR_LIT>\"], cer, account=account, **kwargs<EOL>)<EOL>", "docstring": "Update the Core Exchange Rate (CER) of an asset", "id": "f8236:c0:m9"}
{"signature": "def refresh(self):", "body": "asset = self.blockchain.rpc.get_asset(self.identifier)<EOL>if not asset:<EOL><INDENT>raise AssetDoesNotExistsException(self.identifier)<EOL><DEDENT>super(Asset, self).__init__(asset, blockchain_instance=self.blockchain)<EOL>if self.full:<EOL><INDENT>if \"<STR_LIT>\" in asset:<EOL><INDENT>self[\"<STR_LIT>\"] = self.blockchain.rpc.get_object(<EOL>asset[\"<STR_LIT>\"]<EOL>)<EOL><DEDENT>self[\"<STR_LIT>\"] = self.blockchain.rpc.get_object(<EOL>asset[\"<STR_LIT>\"]<EOL>)<EOL><DEDENT>", "docstring": "Refresh the data from the API server", "id": "f8236:c0:m1"}
{"signature": "@property<EOL><INDENT>def symbol(self):<DEDENT>", "body": "return self[\"<STR_LIT>\"]<EOL>", "docstring": "Returns the symbol of the asset", "id": "f8237:c0:m3"}
{"signature": "@property<EOL><INDENT>def asset(self):<DEDENT>", "body": "if not self[\"<STR_LIT>\"]:<EOL><INDENT>self[\"<STR_LIT>\"] = self.asset_class(<EOL>self[\"<STR_LIT>\"], blockchain_instance=self.blockchain<EOL>)<EOL><DEDENT>return self[\"<STR_LIT>\"]<EOL>", "docstring": "Returns the asset as instance of :class:`.asset.Asset`", "id": "f8237:c0:m5"}
{"signature": "def __setitem__(self, key, value):", "body": "dict.__setitem__(self, key, value)<EOL>if (<EOL>\"<STR_LIT>\" in self and \"<STR_LIT>\" in self and self[\"<STR_LIT>\"] and self[\"<STR_LIT>\"]<EOL>):  <EOL><INDENT>dict.__setitem__(<EOL>self,<EOL>\"<STR_LIT>\",<EOL>self._safedivide(self[\"<STR_LIT>\"][\"<STR_LIT>\"], self[\"<STR_LIT>\"][\"<STR_LIT>\"]),<EOL>)<EOL><DEDENT>", "docstring": "Here we set \"price\" if we change quote or base", "id": "f8238:c0:m1"}
{"signature": "def invert(self):", "body": "tmp = self[\"<STR_LIT>\"]<EOL>self[\"<STR_LIT>\"] = self[\"<STR_LIT>\"]<EOL>self[\"<STR_LIT>\"] = tmp<EOL>if \"<STR_LIT>\" in self and self[\"<STR_LIT>\"]:<EOL><INDENT>self[\"<STR_LIT>\"] = self.amount_class(<EOL>self[\"<STR_LIT>\"][\"<STR_LIT>\"] * self[\"<STR_LIT>\"], self[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>)<EOL><DEDENT>return self<EOL>", "docstring": "Invert the price (e.g. go from ``USD/BTS`` into ``BTS/USD``)", "id": "f8238:c0:m7"}
{"signature": "def getPrivateKeyForPublicKey(self, pub):", "body": "if str(pub) not in self.store:<EOL><INDENT>raise KeyNotFound<EOL><DEDENT>return self.store.getPrivateKeyForPublicKey(str(pub))<EOL>", "docstring": "Obtain the private key for a given public key\n\n            :param str pub: Public Key", "id": "f8240:c0:m16"}
{"signature": "def getActiveKeyForAccount(self, name):", "body": "account = self.rpc.get_account(name)<EOL>for authority in account[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>try:<EOL><INDENT>return self.getPrivateKeyForPublicKey(authority[<NUM_LIT:0>])<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Obtain owner Active Key for an account from the wallet database", "id": "f8240:c0:m21"}
{"signature": "def lock(self):", "body": "if self.store.is_encrypted():<EOL><INDENT>return self.store.lock()<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Lock the wallet database", "id": "f8240:c0:m8"}
{"signature": "def getAccountFromPublicKey(self, pub):", "body": "<EOL>names = list(self.getAccountsFromPublicKey(str(pub)))<EOL>if names:<EOL><INDENT>return names[<NUM_LIT:0>]<EOL><DEDENT>", "docstring": "Obtain the first account name from public key", "id": "f8240:c0:m24"}
{"signature": "def removeAccount(self, account):", "body": "accounts = self.getAccounts()<EOL>for a in accounts:<EOL><INDENT>if a[\"<STR_LIT:name>\"] == account:<EOL><INDENT>self.store.delete(a[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>", "docstring": "Remove all keys associated with a given account", "id": "f8240:c0:m18"}
{"signature": "def getAccountsFromPublicKey(self, pub):", "body": "names = self.rpc.get_key_references([str(pub)])[<NUM_LIT:0>]<EOL>for name in names:<EOL><INDENT>yield name<EOL><DEDENT>", "docstring": "Obtain all accounts associated with a public key", "id": "f8240:c0:m23"}
{"signature": "def getAccountFromPrivateKey(self, wif):", "body": "pub = self.publickey_from_wif(wif)<EOL>return self.getAccountFromPublicKey(pub)<EOL>", "docstring": "Obtain account name from private key", "id": "f8240:c0:m22"}
{"signature": "def getAccounts(self):", "body": "pubkeys = self.getPublicKeys()<EOL>accounts = []<EOL>for pubkey in pubkeys:<EOL><INDENT>if pubkey[: len(self.prefix)] == self.prefix:<EOL><INDENT>accounts.extend(self.getAccountsFromPublicKey(pubkey))<EOL><DEDENT><DEDENT>return accounts<EOL>", "docstring": "Return all accounts installed in the wallet database", "id": "f8240:c0:m27"}
{"signature": "def created(self):", "body": "if len(self.store.getPublicKeys()):<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Do we have a wallet database already?", "id": "f8240:c0:m12"}
{"signature": "def getKeyType(self, account, pub):", "body": "for authority in [\"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>for key in account[authority][\"<STR_LIT>\"]:<EOL><INDENT>if str(pub) == key[<NUM_LIT:0>]:<EOL><INDENT>return authority<EOL><DEDENT><DEDENT><DEDENT>if str(pub) == account[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>return None<EOL>", "docstring": "Get key type", "id": "f8240:c0:m26"}
{"signature": "def removePrivateKeyFromPublicKey(self, pub):", "body": "self.store.delete(str(pub))<EOL>", "docstring": "Remove a key from the wallet database", "id": "f8240:c0:m17"}
{"signature": "def getOwnerKeyForAccount(self, name):", "body": "account = self.rpc.get_account(name)<EOL>for authority in account[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>key = self.getPrivateKeyForPublicKey(authority[<NUM_LIT:0>])<EOL>if key:<EOL><INDENT>return key<EOL><DEDENT><DEDENT>raise KeyNotFound<EOL>", "docstring": "Obtain owner Private Key for an account from the wallet database", "id": "f8240:c0:m19"}
{"signature": "def claim(self, account=None, **kwargs):", "body": "if not account:<EOL><INDENT>if \"<STR_LIT>\" in self.blockchain.config:<EOL><INDENT>account = self.blockchain.config[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>if not account:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>account = self.account_class(account, blockchain_instance=self.blockchain)<EOL>pubkeys = self.blockchain.wallet.getPublicKeys()<EOL>addresses = dict()<EOL>for p in pubkeys:<EOL><INDENT>if p[: len(self.blockchain.prefix)] != self.blockchain.prefix:<EOL><INDENT>continue<EOL><DEDENT>pubkey = self.publickey_class(p, prefix=self.blockchain.prefix)<EOL>addresses[<EOL>str(<EOL>self.address_class.from_pubkey(<EOL>pubkey,<EOL>compressed=False,<EOL>version=<NUM_LIT:0>,<EOL>prefix=self.blockchain.prefix,<EOL>)<EOL>)<EOL>] = pubkey<EOL>addresses[<EOL>str(<EOL>self.address_class.from_pubkey(<EOL>pubkey,<EOL>compressed=True,<EOL>version=<NUM_LIT:0>,<EOL>prefix=self.blockchain.prefix,<EOL>)<EOL>)<EOL>] = pubkey<EOL>addresses[<EOL>str(<EOL>self.address_class.from_pubkey(<EOL>pubkey,<EOL>compressed=False,<EOL>version=<NUM_LIT>,<EOL>prefix=self.blockchain.prefix,<EOL>)<EOL>)<EOL>] = pubkey<EOL>addresses[<EOL>str(<EOL>self.address_class.from_pubkey(<EOL>pubkey,<EOL>compressed=True,<EOL>version=<NUM_LIT>,<EOL>prefix=self.blockchain.prefix,<EOL>)<EOL>)<EOL>] = pubkey<EOL><DEDENT>if self[\"<STR_LIT>\"] not in addresses.keys():<EOL><INDENT>raise MissingKeyError(\"<STR_LIT>\".format(self[\"<STR_LIT>\"]))<EOL><DEDENT>op = self.operations.Balance_claim(<EOL>**{<EOL>\"<STR_LIT>\": {\"<STR_LIT>\": <NUM_LIT:0>, \"<STR_LIT>\": \"<STR_LIT>\"},<EOL>\"<STR_LIT>\": account[\"<STR_LIT:id>\"],<EOL>\"<STR_LIT>\": self[\"<STR_LIT:id>\"],<EOL>\"<STR_LIT>\": addresses[self[\"<STR_LIT>\"]],<EOL>\"<STR_LIT>\": self[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": self.blockchain.prefix,<EOL>}<EOL>)<EOL>signers = [<EOL>account[\"<STR_LIT:name>\"],  <EOL>addresses.get(self[\"<STR_LIT>\"]),  <EOL>]<EOL>return self.blockchain.finalizeOp(op, signers, \"<STR_LIT>\", **kwargs)<EOL>", "docstring": "Claim a balance from the genesis block\n\n            :param str balance_id: The identifier that identifies the balance\n                to claim (1.15.x)\n            :param str account: (optional) the account that owns the bet\n                (defaults to ``default_account``)", "id": "f8241:c0:m2"}
{"signature": "def incached(self, id):", "body": "return id in self._cache<EOL>", "docstring": "Is an element cached?", "id": "f8243:c1:m3"}
{"signature": "def get(self, key, default=None):", "body": "if key in self:<EOL><INDENT>return self[key]<EOL><DEDENT>else:<EOL><INDENT>return default<EOL><DEDENT>", "docstring": "Returns an element from the cache if available, else returns\n            the value provided as default or None", "id": "f8243:c0:m3"}
{"signature": "def refresh(self, *args, **kwargs):", "body": "raise NotImplementedError<EOL>", "docstring": "Interface that needs to be implemented. This method is\n            called when an object is requested that has not yet been\n            fetched/stored", "id": "f8243:c2:m0"}
{"signature": "def cache(self, key):", "body": "self.store(self, key)<EOL>", "docstring": "(legacy) store the current object with key ``key``.", "id": "f8243:c2:m6"}
{"signature": "def store(self, data, key=\"<STR_LIT:id>\"):", "body": "dict.__init__(self, data)<EOL>self._store_item(key)<EOL>", "docstring": "Cache the list\n\n            :param list data: List of objects to cache", "id": "f8243:c3:m1"}
{"signature": "def store(self, data, key=None, *args, **kwargs):", "body": "list.__init__(self, data)<EOL>self._store_items(self._cache_key(key))<EOL>", "docstring": "Cache the list\n\n            :param list data: List of objects to cache", "id": "f8243:c2:m3"}
{"signature": "def refresh(self):", "body": "dict.__init__(<EOL>self,<EOL>self.blockchain.rpc.get_object(self.identifier),<EOL>blockchain_instance=self.blockchain,<EOL>)<EOL>", "docstring": "This is the refresh method that overloads the prototype in\n            BlockchainObject.", "id": "f8243:c4:m0"}
{"signature": "def set_expiration(self, expiration):", "body": "self.default_expiration = expiration<EOL>", "docstring": "Set new default expiration time in seconds (default: 10s)", "id": "f8243:c0:m6"}
{"signature": "def balance(self, symbol):", "body": "if isinstance(symbol, dict) and \"<STR_LIT>\" in symbol:<EOL><INDENT>symbol = symbol[\"<STR_LIT>\"]<EOL><DEDENT>balances = self.balances<EOL>for b in balances:<EOL><INDENT>if b[\"<STR_LIT>\"] == symbol:<EOL><INDENT>return b<EOL><DEDENT><DEDENT>return self.amount_class(<NUM_LIT:0>, symbol, blockchain_instance=self.blockchain)<EOL>", "docstring": "Obtain the balance of a specific Asset. This call returns instances of\n            :class:`amount.Amount`.", "id": "f8244:c0:m7"}
{"signature": "def nolist(self, account):  ", "body": "assert callable(self.blockchain.account_whitelist)<EOL>return self.blockchain.account_whitelist(account, lists=[], account=self)<EOL>", "docstring": "Remove an other account from any list of this account", "id": "f8244:c0:m12"}
{"signature": "@property<EOL><INDENT>def is_ltm(self):<DEDENT>", "body": "return self.get(\"<STR_LIT:id>\") == self.get(\"<STR_LIT>\")<EOL>", "docstring": "Is the account a lifetime member (LTM)?", "id": "f8244:c0:m5"}
{"signature": "@property<EOL><INDENT>def balances(self):<DEDENT>", "body": "balances = self.blockchain.rpc.get_account_balances(self[\"<STR_LIT:id>\"], [])<EOL>return [<EOL>self.amount_class(b, blockchain_instance=self.blockchain)<EOL>for b in balances<EOL>if int(b[\"<STR_LIT>\"]) > <NUM_LIT:0><EOL>]<EOL>", "docstring": "List balances of an account. This call returns instances of\n            :class:`amount.Amount`.", "id": "f8244:c0:m6"}
{"signature": "def whitelist(self, account):  ", "body": "assert callable(self.blockchain.account_whitelist)<EOL>return self.blockchain.account_whitelist(account, lists=[\"<STR_LIT>\"], account=self)<EOL>", "docstring": "Add an other account to the whitelist of this account", "id": "f8244:c0:m10"}
{"signature": "@classmethod<EOL><INDENT>def set_shared_config(cls, config):<DEDENT>", "body": "assert isinstance(config, dict)<EOL>cls._sharedInstance.config.update(config)<EOL>if cls._sharedInstance.instance:<EOL><INDENT>cls._sharedInstance.instance = None<EOL><DEDENT>", "docstring": "This allows to set a config that will be used when calling\n            ``shared_blockchain_instance`` and allows to define the configuration\n            without requiring to actually create an instance", "id": "f8246:c1:m9"}
{"signature": "def get_instance_class(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Should return the Chain instance class, e.g. `bitshares.BitShares`", "id": "f8246:c1:m2"}
{"signature": "def shared_blockchain_instance(self):", "body": "if not self._sharedInstance.instance:<EOL><INDENT>klass = self.get_instance_class()<EOL>self._sharedInstance.instance = klass(**self._sharedInstance.config)<EOL><DEDENT>return self._sharedInstance.instance<EOL>", "docstring": "This method will initialize ``SharedInstance.instance`` and return it.\n            The purpose of this method is to have offer single default\n            instance that can be reused by multiple classes.", "id": "f8246:c1:m6"}
{"signature": "def define_classes(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Needs to define instance variables that provide classes", "id": "f8246:c1:m3"}
{"signature": "def sign(self, account=None, **kwargs):", "body": "if not account:<EOL><INDENT>if \"<STR_LIT>\" in self.blockchain.config:<EOL><INDENT>account = self.blockchain.config[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>if not account:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>account = self.account_class(account, blockchain_instance=self.blockchain)<EOL>wif = self.blockchain.wallet.getPrivateKeyForPublicKey(<EOL>account[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>)<EOL>payload = [<EOL>\"<STR_LIT>\",<EOL>account[\"<STR_LIT:name>\"],<EOL>\"<STR_LIT:key>\",<EOL>account[\"<STR_LIT>\"][\"<STR_LIT>\"],<EOL>\"<STR_LIT:time>\",<EOL>str(datetime.utcnow()),<EOL>\"<STR_LIT:text>\",<EOL>self.message,<EOL>]<EOL>enc_message = json.dumps(payload, separators=(\"<STR_LIT:U+002C>\", \"<STR_LIT::>\"))<EOL>signature = hexlify(sign_message(enc_message, wif)).decode(\"<STR_LIT:ascii>\")<EOL>return dict(signed=enc_message, payload=payload, signature=signature)<EOL>", "docstring": "Sign a message with an account's memo key\n\n            :param str account: (optional) the account that owns the bet\n                (defaults to ``default_account``)\n            :raises ValueError: If not account for signing is provided\n\n            :returns: the signed message encapsulated in a known format", "id": "f8247:c1:m1"}
{"signature": "def sign(self, account=None, **kwargs):", "body": "if not account:<EOL><INDENT>if \"<STR_LIT>\" in self.blockchain.config:<EOL><INDENT>account = self.blockchain.config[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>if not account:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>account = self.account_class(account, blockchain_instance=self.blockchain)<EOL>info = self.blockchain.info()<EOL>meta = dict(<EOL>timestamp=info[\"<STR_LIT:time>\"],<EOL>block=info[\"<STR_LIT>\"],<EOL>memokey=account[\"<STR_LIT>\"][\"<STR_LIT>\"],<EOL>account=account[\"<STR_LIT:name>\"],<EOL>)<EOL>wif = self.blockchain.wallet.getPrivateKeyForPublicKey(<EOL>account[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>)<EOL>message = self.message.strip()<EOL>enc_message = self.SIGNED_MESSAGE_META.format(**locals())<EOL>signature = hexlify(sign_message(enc_message, wif)).decode(\"<STR_LIT:ascii>\")<EOL>self.signed_by_account = account<EOL>self.signed_by_name = account[\"<STR_LIT:name>\"]<EOL>self.meta = meta<EOL>self.plain_message = message<EOL>return self.SIGNED_MESSAGE_ENCAPSULATED.format(<EOL>MESSAGE_SPLIT=self.MESSAGE_SPLIT, **locals()<EOL>)<EOL>", "docstring": "Sign a message with an account's memo key\n\n            :param str account: (optional) the account that owns the bet\n                (defaults to ``default_account``)\n            :raises ValueError: If not account for signing is provided\n\n            :returns: the signed message encapsulated in a known format", "id": "f8247:c0:m1"}
{"signature": "def time(self):", "body": "return parse_time(self[\"<STR_LIT>\"])<EOL>", "docstring": "Return a datatime instance for the timestamp of this block", "id": "f8248:c1:m2"}
{"signature": "def refresh(self):", "body": "block = self.blockchain.rpc.get_block_header(self.identifier)<EOL>if not block:<EOL><INDENT>raise BlockDoesNotExistsException<EOL><DEDENT>super(BlockHeader, self).__init__(<EOL>block, blockchain_instance=self.blockchain, use_cache=self._use_cache<EOL>)<EOL>", "docstring": "Even though blocks never change, you freshly obtain its contents\n            from an API with this method", "id": "f8248:c1:m1"}
{"signature": "def refresh(self):", "body": "block = self.blockchain.rpc.get_block(self.identifier)<EOL>if not block:<EOL><INDENT>raise BlockDoesNotExistsException<EOL><DEDENT>super(Block, self).__init__(<EOL>block, blockchain_instance=self.blockchain, use_cache=self._use_cache<EOL>)<EOL>", "docstring": "Even though blocks never change, you freshly obtain its contents\n            from an API with this method", "id": "f8248:c0:m1"}
{"signature": "def formatTimeFromNow(secs=None):", "body": "return datetime.utcfromtimestamp(time.time() + int(secs or <NUM_LIT:0>)).strftime(timeFormat)<EOL>", "docstring": "Properly Format Time that is `x` seconds in the future\n\n        :param int secs: Seconds to go in the future (`x>0`) or the\n                         past (`x<0`)\n        :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)\n        :rtype: str", "id": "f8249:m2"}
{"signature": "def formatTime(t):", "body": "if isinstance(t, float):<EOL><INDENT>return datetime.utcfromtimestamp(t).strftime(timeFormat)<EOL><DEDENT>if isinstance(t, datetime):<EOL><INDENT>return t.strftime(timeFormat)<EOL><DEDENT>", "docstring": "Properly Format Time for permlinks", "id": "f8249:m0"}
{"signature": "def formatTimeString(t):", "body": "return datetime.strptime(t, timeFormat)<EOL>", "docstring": "Properly Format Time for permlinks", "id": "f8249:m1"}
{"signature": "def decrypt(self, message):", "body": "if not message:<EOL><INDENT>return None<EOL><DEDENT>try:<EOL><INDENT>memo_wif = self.blockchain.wallet.getPrivateKeyForPublicKey(message[\"<STR_LIT:to>\"])<EOL>pubkey = message[\"<STR_LIT>\"]<EOL><DEDENT>except KeyNotFound:<EOL><INDENT>try:<EOL><INDENT>memo_wif = self.blockchain.wallet.getPrivateKeyForPublicKey(<EOL>message[\"<STR_LIT>\"]<EOL>)<EOL>pubkey = message[\"<STR_LIT:to>\"]<EOL><DEDENT>except KeyNotFound:<EOL><INDENT>raise MissingKeyError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format([message[\"<STR_LIT:to>\"], message[\"<STR_LIT>\"]])<EOL>)<EOL><DEDENT><DEDENT>if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self.chain_prefix = self.blockchain.prefix<EOL><DEDENT>return memo.decode_memo(<EOL>self.privatekey_class(memo_wif),<EOL>self.publickey_class(pubkey, prefix=self.chain_prefix),<EOL>message.get(\"<STR_LIT>\"),<EOL>message.get(\"<STR_LIT:message>\"),<EOL>)<EOL>", "docstring": "Decrypt a message\n\n            :param dict message: encrypted memo message\n            :returns: decrypted message\n            :rtype: str", "id": "f8250:c0:m3"}
{"signature": "def unlock_wallet(self, *args, **kwargs):", "body": "self.blockchain.wallet.unlock(*args, **kwargs)<EOL>return self<EOL>", "docstring": "Unlock the library internal wallet", "id": "f8250:c0:m1"}
{"signature": "def sign(self, tx=None, wifs=[]):", "body": "if tx:<EOL><INDENT>txbuffer = self.transactionbuilder_class(tx, blockchain_instance=self)<EOL><DEDENT>else:<EOL><INDENT>txbuffer = self.txbuffer<EOL><DEDENT>txbuffer.appendWif(wifs)<EOL>txbuffer.appendMissingSignatures()<EOL>txbuffer.sign()<EOL>return txbuffer.json()<EOL>", "docstring": "Sign a provided transaction witht he provided key(s)\n\n            :param dict tx: The transaction to be signed and returned\n            :param string wifs: One or many wif keys to use for signing\n                a transaction. If not present, the keys will be loaded\n                from the wallet as defined in \"missing_signatures\" key\n                of the transactions.", "id": "f8253:c0:m13"}
{"signature": "def broadcast(self, tx=None):", "body": "if tx:<EOL><INDENT>return self.transactionbuilder_class(<EOL>tx, blockchain_instance=self<EOL>).broadcast()<EOL><DEDENT>else:<EOL><INDENT>return self.txbuffer.broadcast()<EOL><DEDENT>", "docstring": "Broadcast a transaction to the Blockchain\n\n            :param tx tx: Signed transaction to broadcast", "id": "f8253:c0:m14"}
{"signature": "def new_wallet(self, pwd):", "body": "return self.wallet.create(pwd)<EOL>", "docstring": "Create a new wallet. This method is basically only calls\n            :func:`wallet.Wallet.create`.\n\n            :param str pwd: Password to use for the new wallet\n            :raises exceptions.WalletExists: if there is already a\n                wallet created", "id": "f8253:c0:m9"}
{"signature": "def set_default_account(self, account):", "body": "self.account_class(account)<EOL>self.config[\"<STR_LIT>\"] = account<EOL>", "docstring": "Set the default account to be used", "id": "f8253:c0:m7"}
{"signature": "def tx(self):", "body": "return self._txbuffers[<NUM_LIT:0>]<EOL>", "docstring": "Returns the default transaction buffer", "id": "f8253:c0:m17"}
{"signature": "@property<EOL><INDENT>def txbuffer(self):<DEDENT>", "body": "return self.tx()<EOL>", "docstring": "Returns the currently active tx buffer", "id": "f8253:c0:m15"}
{"signature": "def proposal(self, proposer=None, proposal_expiration=None, proposal_review=None):", "body": "if not self._propbuffer:<EOL><INDENT>return self.new_proposal(<EOL>self.tx(), proposer, proposal_expiration, proposal_review<EOL>)<EOL><DEDENT>if proposer:<EOL><INDENT>self._propbuffer[<NUM_LIT:0>].set_proposer(proposer)<EOL><DEDENT>if proposal_expiration:<EOL><INDENT>self._propbuffer[<NUM_LIT:0>].set_expiration(proposal_expiration)<EOL><DEDENT>if proposal_review:<EOL><INDENT>self._propbuffer[<NUM_LIT:0>].set_review(proposal_review)<EOL><DEDENT>return self._propbuffer[<NUM_LIT:0>]<EOL>", "docstring": "Return the default proposal buffer\n\n            ... note:: If any parameter is set, the default proposal\n               parameters will be changed!", "id": "f8253:c0:m18"}
{"signature": "def broadcast(self):", "body": "<EOL>if not self._is_signed():<EOL><INDENT>self.sign()<EOL><DEDENT>if \"<STR_LIT>\" not in self or not self[\"<STR_LIT>\"]:<EOL><INDENT>log.warning(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>ret = self.json()<EOL>if self.blockchain.nobroadcast:<EOL><INDENT>log.warning(\"<STR_LIT>\")<EOL>self.clear()<EOL>return ret<EOL><DEDENT>try:<EOL><INDENT>if self.blockchain.blocking:<EOL><INDENT>ret = self.blockchain.rpc.broadcast_transaction_synchronous(<EOL>ret, api=\"<STR_LIT>\"<EOL>)<EOL>ret.update(**ret.get(\"<STR_LIT>\", {}))<EOL><DEDENT>else:<EOL><INDENT>self.blockchain.rpc.broadcast_transaction(ret, api=\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>raise e<EOL><DEDENT>finally:<EOL><INDENT>self.clear()<EOL><DEDENT>return ret<EOL>", "docstring": "Broadcast a transaction to the blockchain network\n\n            :param tx tx: Signed transaction to broadcast", "id": "f8254:c1:m24"}
{"signature": "def json(self):", "body": "raw = self.get_raw()<EOL>if not raw:<EOL><INDENT>return dict()<EOL><DEDENT>return raw.json()<EOL>", "docstring": "Return the json formated version of this proposal", "id": "f8254:c0:m11"}
{"signature": "def clear(self):", "body": "self.ops = []<EOL>self.wifs = set()<EOL>self.signing_accounts = []<EOL>self[\"<STR_LIT>\"] = None<EOL>dict.__init__(self, {})<EOL>", "docstring": "Clear the transaction builder and start from scratch", "id": "f8254:c1:m25"}
{"signature": "def get_raw(self):", "body": "if not self.ops:<EOL><INDENT>return<EOL><DEDENT>ops = [self.operations.Op_wrapper(op=o) for o in list(self.ops)]<EOL>proposer = self.account_class(<EOL>self.proposer, blockchain_instance=self.blockchain<EOL>)<EOL>data = {<EOL>\"<STR_LIT>\": {\"<STR_LIT>\": <NUM_LIT:0>, \"<STR_LIT>\": \"<STR_LIT>\"},<EOL>\"<STR_LIT>\": proposer[\"<STR_LIT:id>\"],<EOL>\"<STR_LIT>\": formatTimeFromNow(self.proposal_expiration),<EOL>\"<STR_LIT>\": [o.json() for o in ops],<EOL>\"<STR_LIT>\": [],<EOL>}<EOL>if self.proposal_review:<EOL><INDENT>data.update({\"<STR_LIT>\": self.proposal_review})<EOL><DEDENT>ops = self.operations.Proposal_create(**data)<EOL>return self.operation_class(ops)<EOL>", "docstring": "Returns an instance of base \"Operations\" for further processing", "id": "f8254:c0:m13"}
{"signature": "def appendOps(self, ops, append_to=None):", "body": "if isinstance(ops, list):<EOL><INDENT>self.ops.extend(ops)<EOL><DEDENT>else:<EOL><INDENT>self.ops.append(ops)<EOL><DEDENT>self._set_require_reconstruction()<EOL>", "docstring": "Append op(s) to the transaction builder\n\n            :param list ops: One or a list of operations", "id": "f8254:c1:m14"}
{"signature": "def json(self):", "body": "if not self._is_constructed() or self._is_require_reconstruction():<EOL><INDENT>self.constructTx()<EOL><DEDENT>return dict(self)<EOL>", "docstring": "Show the transaction as plain json", "id": "f8254:c1:m13"}
{"signature": "def get_parent(self):", "body": "return self<EOL>", "docstring": "TransactionBuilders don't have parents, they are their own parent", "id": "f8254:c1:m12"}
{"signature": "def get_parent(self):", "body": "return self.parent<EOL>", "docstring": "This allows to referr to the actual parent of the Proposal", "id": "f8254:c0:m9"}
{"signature": "def appendOps(self, ops, append_to=None):", "body": "if isinstance(ops, list):<EOL><INDENT>self.ops.extend(ops)<EOL><DEDENT>else:<EOL><INDENT>self.ops.append(ops)<EOL><DEDENT>parent = self.parent<EOL>if parent:<EOL><INDENT>parent._set_require_reconstruction()<EOL><DEDENT>", "docstring": "Append op(s) to the transaction builder\n\n            :param list ops: One or a list of operations", "id": "f8254:c0:m6"}
{"signature": "def addSigningInformation(self, account, permission):", "body": "self.constructTx()<EOL>self[\"<STR_LIT>\"] = self.blockchain.rpc.chain_params<EOL>if isinstance(account, self.publickey_class):<EOL><INDENT>self[\"<STR_LIT>\"] = [str(account)]<EOL><DEDENT>else:<EOL><INDENT>accountObj = self.account_class(account)<EOL>authority = accountObj[permission]<EOL>self.update({\"<STR_LIT>\": {accountObj[\"<STR_LIT:name>\"]: authority}})<EOL>for account_auth in authority[\"<STR_LIT>\"]:<EOL><INDENT>account_auth_account = self.account_class(account_auth[<NUM_LIT:0>])<EOL>self[\"<STR_LIT>\"].update(<EOL>{account_auth[<NUM_LIT:0>]: account_auth_account.get(permission)}<EOL>)<EOL><DEDENT>self[\"<STR_LIT>\"] = [x[<NUM_LIT:0>] for x in authority[\"<STR_LIT>\"]]<EOL>for account_auth in authority[\"<STR_LIT>\"]:<EOL><INDENT>account_auth_account = self.account_class(account_auth[<NUM_LIT:0>])<EOL>self[\"<STR_LIT>\"].extend(<EOL>[x[<NUM_LIT:0>] for x in account_auth_account[permission][\"<STR_LIT>\"]]<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "This is a private method that adds side information to a\n            unsigned/partial transaction in order to simplify later\n            signing (e.g. for multisig or coldstorage)\n\n            FIXME: Does not work with owner keys!", "id": "f8254:c1:m26"}
{"signature": "def getOperationNameForId(self, i):", "body": "for key in self.ops:<EOL><INDENT>if int(self.ops[key]) is int(i):<EOL><INDENT>return key<EOL><DEDENT><DEDENT>raise ValueError(\"<STR_LIT>\" % i)<EOL>", "docstring": "Convert an operation id into the corresponding string", "id": "f8259:c0:m18"}
{"signature": "@data.setter<EOL><INDENT>def data(self, data):  <DEDENT>", "body": "self.update(data)<EOL>", "docstring": "Set data through a setter (backwards compatibility)", "id": "f8259:c1:m5"}
{"signature": "def formatTimeFromNow(secs=<NUM_LIT:0>):", "body": "return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeformat)<EOL>", "docstring": "Properly Format Time that is `x` seconds in the future\n\n     :param int secs: Seconds to go in the future (`x>0`) or the past (`x<0`)\n     :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)\n     :rtype: str", "id": "f8261:m1"}
{"signature": "def decrypt(encrypted_privkey, passphrase):", "body": "d = unhexlify(base58decode(encrypted_privkey))<EOL>d = d[<NUM_LIT:2>:]  <EOL>flagbyte = d[<NUM_LIT:0>:<NUM_LIT:1>]  <EOL>d = d[<NUM_LIT:1>:]  <EOL>assert flagbyte == b\"<STR_LIT>\", \"<STR_LIT>\"<EOL>salt = d[<NUM_LIT:0>:<NUM_LIT:4>]<EOL>d = d[<NUM_LIT:4>:-<NUM_LIT:4>]<EOL>if SCRYPT_MODULE == \"<STR_LIT>\":  <EOL><INDENT>key = scrypt.hash(passphrase, salt, <NUM_LIT>, <NUM_LIT:8>, <NUM_LIT:8>)<EOL><DEDENT>elif SCRYPT_MODULE == \"<STR_LIT>\":  <EOL><INDENT>key = scrypt.scrypt(bytes(passphrase, \"<STR_LIT:utf-8>\"), salt, <NUM_LIT>, <NUM_LIT:8>, <NUM_LIT:8>)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")  <EOL><DEDENT>derivedhalf1 = key[<NUM_LIT:0>:<NUM_LIT:32>]<EOL>derivedhalf2 = key[<NUM_LIT:32>:<NUM_LIT:64>]<EOL>encryptedhalf1 = d[<NUM_LIT:0>:<NUM_LIT:16>]<EOL>encryptedhalf2 = d[<NUM_LIT:16>:<NUM_LIT:32>]<EOL>aes = AES.new(derivedhalf2, AES.MODE_ECB)<EOL>decryptedhalf2 = aes.decrypt(encryptedhalf2)<EOL>decryptedhalf1 = aes.decrypt(encryptedhalf1)<EOL>privraw = decryptedhalf1 + decryptedhalf2<EOL>privraw = \"<STR_LIT>\" % (int(hexlify(privraw), <NUM_LIT:16>) ^ int(hexlify(derivedhalf1), <NUM_LIT:16>))<EOL>wif = Base58(privraw)<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>privkey = PrivateKey(format(wif, \"<STR_LIT>\"))<EOL>addr = format(privkey.bitcoin.address, \"<STR_LIT>\")<EOL>a = _bytes(addr)<EOL>saltverify = hashlib.sha256(hashlib.sha256(a).digest()).digest()[<NUM_LIT:0>:<NUM_LIT:4>]<EOL>if saltverify != salt:  <EOL><INDENT>raise SaltException(\"<STR_LIT>\")<EOL><DEDENT>return wif<EOL>", "docstring": "BIP0038 non-ec-multiply decryption. Returns WIF privkey.\n\n    :param Base58 encrypted_privkey: Private key\n    :param str passphrase: UTF-8 encoded passphrase for decryption\n    :return: BIP0038 non-ec-multiply decrypted key\n    :rtype: Base58\n    :raises SaltException: if checksum verification failed (e.g. wrong\n        password)", "id": "f8262:m2"}
{"signature": "def encrypt(privkey, passphrase):", "body": "if isinstance(privkey, str):<EOL><INDENT>privkey = PrivateKey(privkey)<EOL><DEDENT>else:<EOL><INDENT>privkey = PrivateKey(repr(privkey))<EOL><DEDENT>privkeyhex = repr(privkey)  <EOL>addr = format(privkey.bitcoin.address, \"<STR_LIT>\")<EOL>a = _bytes(addr)<EOL>salt = hashlib.sha256(hashlib.sha256(a).digest()).digest()[<NUM_LIT:0>:<NUM_LIT:4>]<EOL>if SCRYPT_MODULE == \"<STR_LIT>\":  <EOL><INDENT>key = scrypt.hash(passphrase, salt, <NUM_LIT>, <NUM_LIT:8>, <NUM_LIT:8>)<EOL><DEDENT>elif SCRYPT_MODULE == \"<STR_LIT>\":  <EOL><INDENT>key = scrypt.scrypt(bytes(passphrase, \"<STR_LIT:utf-8>\"), salt, <NUM_LIT>, <NUM_LIT:8>, <NUM_LIT:8>)<EOL><DEDENT>else:  <EOL><INDENT>raise ValueError(\"<STR_LIT>\")  <EOL><DEDENT>(derived_half1, derived_half2) = (key[:<NUM_LIT:32>], key[<NUM_LIT:32>:])<EOL>aes = AES.new(derived_half2, AES.MODE_ECB)<EOL>encrypted_half1 = _encrypt_xor(privkeyhex[:<NUM_LIT:32>], derived_half1[:<NUM_LIT:16>], aes)<EOL>encrypted_half2 = _encrypt_xor(privkeyhex[<NUM_LIT:32>:], derived_half1[<NUM_LIT:16>:], aes)<EOL>\"<STR_LIT>\"<EOL>payload = b\"<STR_LIT>\" + b\"<STR_LIT>\" + b\"<STR_LIT>\" + salt + encrypted_half1 + encrypted_half2<EOL>\"<STR_LIT>\"<EOL>checksum = hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:<NUM_LIT:4>]<EOL>privatkey = hexlify(payload + checksum).decode(\"<STR_LIT:ascii>\")<EOL>return Base58(privatkey)<EOL>", "docstring": "BIP0038 non-ec-multiply encryption. Returns BIP0038 encrypted privkey.\n\n    :param privkey: Private key\n    :type privkey: Base58\n    :param str passphrase: UTF-8 encoded passphrase for encryption\n    :return: BIP0038 non-ec-multiply encrypted wif key\n    :rtype: Base58", "id": "f8262:m1"}
{"signature": "def getOperationName(id: str):", "body": "if isinstance(id, str):<EOL><INDENT>assert id in operations.keys(), \"<STR_LIT>\".format(id)<EOL>return id<EOL><DEDENT>elif isinstance(id, int):<EOL><INDENT>return getOperationNameForId(id)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError<EOL><DEDENT>", "docstring": "This method returns the name representation of an operation given\n        its value as used in the API", "id": "f8263:m1"}
{"signature": "def varintdecode(data):  ", "body": "shift = <NUM_LIT:0><EOL>result = <NUM_LIT:0><EOL>for b in bytes(data):<EOL><INDENT>result |= (b & <NUM_LIT>) << shift<EOL>if not (b & <NUM_LIT>):<EOL><INDENT>break<EOL><DEDENT>shift += <NUM_LIT:7><EOL><DEDENT>return result<EOL>", "docstring": "Varint decoding", "id": "f8264:m1"}
{"signature": "def variable_buffer(s):", "body": "return varint(len(s)) + s<EOL>", "docstring": "Encode variable length buffer", "id": "f8264:m2"}
{"signature": "def __bytes__(self):", "body": "return unhexlify(self._hex)<EOL>", "docstring": "Return raw bytes\n\n            :return: Raw bytes of instance\n            :rtype: bytes", "id": "f8266:c0:m4"}
{"signature": "def __str__(self):", "body": "return gphBase58CheckEncode(self._hex)<EOL>", "docstring": "Return graphene-base58CheckEncoded string of data\n\n            :return: Base58 encoded data\n            :rtype: str", "id": "f8266:c0:m3"}
{"signature": "def __format__(self, _format):", "body": "return format(self._address, _format)<EOL>", "docstring": "May be issued to get valid \"MUSE\", \"PLAY\" or any other Graphene compatible\n            address with corresponding prefix.", "id": "f8268:c2:m4"}
{"signature": "def __bytes__(self):", "body": "return bytes(self._address)<EOL>", "docstring": "Returns the raw content of the ``Base58CheckEncoded`` address", "id": "f8268:c2:m5"}
{"signature": "def get_private(self):", "body": "encoded = \"<STR_LIT>\" % (self.brainkey, self.sequence)<EOL>a = _bytes(encoded)<EOL>s = hashlib.sha256(hashlib.sha512(a).digest()).digest()<EOL>return PrivateKey(hexlify(s).decode(\"<STR_LIT:ascii>\"), prefix=self.prefix)<EOL>", "docstring": "Derive private key from the brain key and the current sequence\n            number", "id": "f8268:c1:m5"}
{"signature": "@property<EOL><INDENT>def address(self):<DEDENT>", "body": "return GrapheneAddress.from_pubkey(repr(self), prefix=self.prefix)<EOL>", "docstring": "Obtain a GrapheneAddress from a public key", "id": "f8268:c4:m16"}
{"signature": "def get_private(self):", "body": "a = _bytes(self.account + self.role + self.password)<EOL>s = hashlib.sha256(a).digest()<EOL>return PrivateKey(hexlify(s).decode(\"<STR_LIT:ascii>\"), prefix=self.prefix)<EOL>", "docstring": "Derive private key from the brain key and the current sequence\n            number", "id": "f8268:c0:m1"}
{"signature": "def __format__(self, _format):", "body": "return format(self._wif, _format)<EOL>", "docstring": "Formats the instance of:doc:`Base58 <base58>` according to\n            ``_format``", "id": "f8268:c5:m10"}
{"signature": "def _derive_y_from_x(self, x, is_even):", "body": "curve = ecdsa.SECP256k1.curve<EOL>a, b, p = curve.a(), curve.b(), curve.p()<EOL>alpha = (pow(x, <NUM_LIT:3>, p) + a * x + b) % p<EOL>beta = ecdsa.numbertheory.square_root_mod_prime(alpha, p)<EOL>if (beta % <NUM_LIT:2>) == is_even:<EOL><INDENT>beta = p - beta<EOL><DEDENT>return beta<EOL>", "docstring": "Derive y point from x point", "id": "f8268:c4:m3"}
{"signature": "def __str__(self):", "body": "return format(self._pk, self.prefix)<EOL>", "docstring": "Returns the readable Graphene public key. This call is equivalent to\n            ``format(PublicKey, \"GPH\")``", "id": "f8268:c4:m11"}
{"signature": "def normalize(self, brainkey):", "body": "return \"<STR_LIT:U+0020>\".join(re.compile(\"<STR_LIT>\").split(brainkey))<EOL>", "docstring": "Correct formating with single whitespace syntax and no trailing\n            space", "id": "f8268:c1:m3"}
{"signature": "def __format__(self, _format):", "body": "return format(self._pk, _format)<EOL>", "docstring": "Formats the instance of:doc:`Base58 <base58>` according to\n            ``_format``", "id": "f8268:c4:m12"}
{"signature": "def derive_private_key(self, sequence):", "body": "encoded = \"<STR_LIT>\" % (str(self), sequence)<EOL>a = bytes(encoded, \"<STR_LIT:ascii>\")<EOL>s = hashlib.sha256(hashlib.sha512(a).digest()).digest()<EOL>return PrivateKey(hexlify(s).decode(\"<STR_LIT:ascii>\"), prefix=self.pubkey.prefix)<EOL>", "docstring": "Derive new private key from this private key and an arbitrary\n            sequence number", "id": "f8268:c5:m7"}
{"signature": "def __bytes__(self):", "body": "return bytes(self._pk)<EOL>", "docstring": "Returns the raw public key (has length 33)", "id": "f8268:c4:m13"}
{"signature": "def child(self, offset256):", "body": "a = bytes(self) + offset256<EOL>s = hashlib.sha256(a).digest()<EOL>return self.add(s)<EOL>", "docstring": "Derive new public key from this key and a sha256 \"offset", "id": "f8268:c4:m7"}
{"signature": "def derive_from_seed(self, offset):", "body": "seed = int(hexlify(bytes(self)).decode(\"<STR_LIT:ascii>\"), <NUM_LIT:16>)<EOL>z = int(hexlify(offset).decode(\"<STR_LIT:ascii>\"), <NUM_LIT:16>)<EOL>order = ecdsa.SECP256k1.order<EOL>secexp = (seed + z) % order<EOL>secret = \"<STR_LIT>\" % secexp<EOL>if len(secret) < <NUM_LIT:64>: <EOL><INDENT>secret = (\"<STR_LIT:0>\" * (<NUM_LIT:64>-len(secret))) + secret<EOL><DEDENT>return PrivateKey(secret, prefix=self.pubkey.prefix)<EOL>", "docstring": "Derive private key using \"generate_from_seed\" method.\n            Here, the key itself serves as a `seed`, and `offset`\n            is expected to be a sha256 digest.", "id": "f8268:c5:m9"}
{"signature": "@classmethod<EOL><INDENT>def from_pubkey(cls, pubkey, compressed=True, version=<NUM_LIT>, prefix=None):<DEDENT>", "body": "<EOL>pubkey = PublicKey(pubkey, prefix=prefix or Prefix.prefix)<EOL>if compressed:<EOL><INDENT>pubkey_plain = pubkey.compressed()<EOL><DEDENT>else:<EOL><INDENT>pubkey_plain = pubkey.uncompressed()<EOL><DEDENT>sha = hashlib.sha256(unhexlify(pubkey_plain)).hexdigest()<EOL>rep = hexlify(ripemd160(sha)).decode(\"<STR_LIT:ascii>\")<EOL>s = (\"<STR_LIT>\" % version) + rep<EOL>result = s + hexlify(doublesha256(s)[:<NUM_LIT:4>]).decode(\"<STR_LIT:ascii>\")<EOL>result = hexlify(ripemd160(result)).decode(\"<STR_LIT:ascii>\")<EOL>return cls(result, prefix=pubkey.prefix)<EOL>", "docstring": "Load an address provided the public key.\n\n            Version: 56 => PTS", "id": "f8268:c2:m1"}
{"signature": "def get_blind_private(self):", "body": "a = _bytes(self.brainkey)<EOL>return PrivateKey(hashlib.sha256(a).hexdigest(), prefix=self.prefix)<EOL>", "docstring": "Derive private key from the brain key (and no sequence number)", "id": "f8268:c1:m6"}
{"signature": "def recoverPubkeyParameter(message, digest, signature, pubkey):", "body": "if not isinstance(message, bytes):<EOL><INDENT>message = bytes(message, \"<STR_LIT:utf-8>\")  <EOL><DEDENT>for i in range(<NUM_LIT:0>, <NUM_LIT:4>):<EOL><INDENT>if SECP256K1_MODULE == \"<STR_LIT>\":  <EOL><INDENT>sig = pubkey.ecdsa_recoverable_deserialize(signature, i)<EOL>p = secp256k1.PublicKey(pubkey.ecdsa_recover(message, sig))<EOL>if p.serialize() == pubkey.serialize():<EOL><INDENT>return i<EOL><DEDENT><DEDENT>elif SECP256K1_MODULE == \"<STR_LIT>\" and not isinstance(pubkey, PublicKey):<EOL><INDENT>p = recover_public_key(digest, signature, i, message)<EOL>p_comp = hexlify(compressedPubkey(p))<EOL>pubkey_comp = hexlify(compressedPubkey(pubkey))<EOL>if p_comp == pubkey_comp:<EOL><INDENT>return i<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>p = recover_public_key(digest, signature, i)<EOL>p_comp = hexlify(compressedPubkey(p))<EOL>p_string = hexlify(p.to_string())<EOL>if isinstance(pubkey, PublicKey):  <EOL><INDENT>pubkey_string = bytes(repr(pubkey), \"<STR_LIT:ascii>\")<EOL><DEDENT>else:  <EOL><INDENT>pubkey_string = hexlify(pubkey.to_string())<EOL><DEDENT>if p_string == pubkey_string or p_comp == pubkey_string:  <EOL><INDENT>return i<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Use to derive a number that allows to easily recover the\n        public key from the signature", "id": "f8269:m3"}
{"signature": "def recover_public_key(digest, signature, i, message=None):", "body": "<EOL>curve = ecdsa.SECP256k1.curve<EOL>G = ecdsa.SECP256k1.generator<EOL>order = ecdsa.SECP256k1.order<EOL>yp = i % <NUM_LIT:2><EOL>r, s = ecdsa.util.sigdecode_string(signature, order)<EOL>x = r + (i // <NUM_LIT:2>) * order<EOL>alpha = ((x * x * x) + (curve.a() * x) + curve.b()) % curve.p()<EOL>beta = ecdsa.numbertheory.square_root_mod_prime(alpha, curve.p())<EOL>y = beta if (beta - yp) % <NUM_LIT:2> == <NUM_LIT:0> else curve.p() - beta<EOL>R = ecdsa.ellipticcurve.Point(curve, x, y, order)<EOL>e = ecdsa.util.string_to_number(digest)<EOL>Q = ecdsa.numbertheory.inverse_mod(r, order) * (s * R + (-e % order) * G)<EOL>if SECP256K1_MODULE == \"<STR_LIT>\" and message is not None:<EOL><INDENT>if not isinstance(message, bytes):<EOL><INDENT>message = bytes(message, \"<STR_LIT:utf-8>\")  <EOL><DEDENT>sigder = encode_dss_signature(r, s)<EOL>public_key = ec.EllipticCurvePublicNumbers(<EOL>Q._Point__x, Q._Point__y, ec.SECP256K1()<EOL>).public_key(default_backend())<EOL>public_key.verify(sigder, message, ec.ECDSA(hashes.SHA256()))<EOL>return public_key<EOL><DEDENT>else:<EOL><INDENT>if not ecdsa.VerifyingKey.from_public_point(<EOL>Q, curve=ecdsa.SECP256k1<EOL>).verify_digest(<EOL>signature, digest, sigdecode=ecdsa.util.sigdecode_string<EOL>):  <EOL><INDENT>return None  <EOL><DEDENT>return ecdsa.VerifyingKey.from_public_point(<EOL>Q, curve=ecdsa.SECP256k1<EOL>)<EOL><DEDENT>", "docstring": "Recover the public key from the the signature", "id": "f8269:m2"}
{"signature": "@property<EOL><INDENT>def id(self):<DEDENT>", "body": "<EOL>sigs = self.data[\"<STR_LIT>\"]<EOL>self.data.pop(\"<STR_LIT>\", None)<EOL>h = hashlib.sha256(bytes(self)).digest()<EOL>self.data[\"<STR_LIT>\"] = sigs<EOL>return hexlify(h[:<NUM_LIT:20>]).decode(\"<STR_LIT:ascii>\")<EOL>", "docstring": "The transaction id of this transaction", "id": "f8271:c1:m4"}
{"signature": "def formatTime(t):", "body": "if isinstance(t, (float, int)):<EOL><INDENT>return datetime.utcfromtimestamp(t).strftime(timeFormat)<EOL><DEDENT>elif isinstance(t, datetime):<EOL><INDENT>return t.strftime(timeFormat)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Properly Format Time for permlinks", "id": "f8272:m2"}
{"signature": "def _bytes(x):  ", "body": "if sys.version > \"<STR_LIT:3>\":<EOL><INDENT>return bytes(x, \"<STR_LIT:utf8>\")<EOL><DEDENT>else:  <EOL><INDENT>return x.__bytes__()<EOL><DEDENT>", "docstring": "Python3 and Python2 compatibility", "id": "f8272:m0"}
{"signature": "def formatTimeFromNow(secs=<NUM_LIT:0>):", "body": "return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeFormat)<EOL>", "docstring": "Properly Format Time that is `x` seconds in the future\n\n        :param int secs: Seconds to go in the future (`x>0`) or the\n                         past (`x<0`)\n        :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)\n        :rtype: str", "id": "f8272:m3"}
{"signature": "def encode_memo(priv, pub, nonce, message):", "body": "shared_secret = get_shared_secret(priv, pub)<EOL>aes = init_aes(shared_secret, nonce)<EOL>\"<STR_LIT>\"<EOL>raw = bytes(message, \"<STR_LIT:utf8>\")<EOL>checksum = hashlib.sha256(raw).digest()<EOL>raw = checksum[<NUM_LIT:0>:<NUM_LIT:4>] + raw<EOL>\"<STR_LIT>\"<EOL>raw = _pad(raw, <NUM_LIT:16>)<EOL>\"<STR_LIT>\"<EOL>return hexlify(aes.encrypt(raw)).decode(\"<STR_LIT:ascii>\")<EOL>", "docstring": "Encode a message with a shared secret between Alice and Bob\n\n        :param PrivateKey priv: Private Key (of Alice)\n        :param PublicKey pub: Public Key (of Bob)\n        :param int nonce: Random nonce\n        :param str message: Memo message\n        :return: Encrypted message\n        :rtype: hex", "id": "f8273:m4"}
{"signature": "def decode_memo(priv, pub, nonce, message):", "body": "shared_secret = get_shared_secret(priv, pub)<EOL>aes = init_aes(shared_secret, nonce)<EOL>\"<STR_LIT>\"<EOL>raw = bytes(message, \"<STR_LIT:ascii>\")<EOL>cleartext = aes.decrypt(unhexlify(raw))<EOL>\"<STR_LIT>\"<EOL>checksum = cleartext[<NUM_LIT:0>:<NUM_LIT:4>]<EOL>message = cleartext[<NUM_LIT:4>:]<EOL>message = _unpad(message, <NUM_LIT:16>)<EOL>\"<STR_LIT>\"<EOL>check = hashlib.sha256(message).digest()[<NUM_LIT:0>:<NUM_LIT:4>]<EOL>if check != checksum:  <EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return message.decode(\"<STR_LIT:utf8>\")<EOL>", "docstring": "Decode a message with a shared secret between Alice and Bob\n\n        :param PrivateKey priv: Private Key (of Bob)\n        :param PublicKey pub: Public Key (of Alice)\n        :param int nonce: Nonce used for Encryption\n        :param bytes message: Encrypted Memo message\n        :return: Decrypted message\n        :rtype: str\n        :raise ValueError: if message cannot be decoded as valid UTF-8\n               string", "id": "f8273:m5"}
{"signature": "def get_shared_secret(priv, pub):", "body": "pub_point = pub.point()<EOL>priv_point = int(repr(priv), <NUM_LIT:16>)<EOL>res = pub_point * priv_point<EOL>res_hex = \"<STR_LIT>\" % res.x()<EOL>res_hex = \"<STR_LIT:0>\" * (<NUM_LIT:64> - len(res_hex)) + res_hex<EOL>return res_hex<EOL>", "docstring": "Derive the share secret between ``priv`` and ``pub``\n\n        :param `Base58` priv: Private Key\n        :param `Base58` pub: Public Key\n        :return: Shared secret\n        :rtype: hex\n\n        The shared secret is generated such that::\n\n            Pub(Alice) * Priv(Bob) = Pub(Bob) * Priv(Alice)", "id": "f8273:m0"}
{"signature": "def init_aes(shared_secret, nonce):", "body": "\"<STR_LIT>\"<EOL>ss = hashlib.sha512(unhexlify(shared_secret)).digest()<EOL>\"<STR_LIT>\"<EOL>seed = bytes(str(nonce), \"<STR_LIT:ascii>\") + hexlify(ss)<EOL>seed_digest = hexlify(hashlib.sha512(seed).digest()).decode(\"<STR_LIT:ascii>\")<EOL>\"<STR_LIT>\"<EOL>key = unhexlify(seed_digest[<NUM_LIT:0>:<NUM_LIT:64>])<EOL>iv = unhexlify(seed_digest[<NUM_LIT:64>:<NUM_LIT>])<EOL>return AES.new(key, AES.MODE_CBC, iv)<EOL>", "docstring": "Initialize AES instance\n\n        :param hex shared_secret: Shared Secret to use as encryption key\n        :param int nonce: Random nonce\n        :return: AES instance\n        :rtype: AES", "id": "f8273:m1"}
{"signature": "def iter_nodes(self, key):", "body": "if len(self.ring) == <NUM_LIT:0>:<EOL><INDENT>yield None, None<EOL><DEDENT>node, pos = self.get_node_pos(key)<EOL>for k in self.sorted_keys[pos:]:<EOL><INDENT>yield k, self.ring[k]<EOL><DEDENT>", "docstring": "Given a string key it returns the nodes as a generator that can hold the key.", "id": "f8280:c0:m5"}
{"signature": "def add_node(self, node):", "body": "self.nodes.append(node)<EOL>for x in xrange(self.replicas):<EOL><INDENT>ring_key = self.hash_method(b(\"<STR_LIT>\" % (node, x)))<EOL>self.ring[ring_key] = node<EOL>self.sorted_keys.append(ring_key)<EOL><DEDENT>self.sorted_keys.sort()<EOL>", "docstring": "Adds a `node` to the hash ring (including a number of replicas).", "id": "f8280:c0:m1"}
{"signature": "def lock(self, name, timeout=None, sleep=<NUM_LIT:0.1>):", "body": "return Lock(self, name, timeout=timeout, sleep=sleep)<EOL>", "docstring": "Return a new Lock object using key ``name`` that mimics\nthe behavior of threading.Lock.\n\nIf specified, ``timeout`` indicates a maximum life for the lock.\nBy default, it will remain locked until release() is called.\n\n``sleep`` indicates the amount of time to sleep per loop iteration\nwhen the lock is in blocking mode and another client is currently\nholding the lock.", "id": "f8288:c0:m14"}
{"signature": "def clear(self):", "body": "for env in list(self):<EOL><INDENT>self.remove(env)<EOL><DEDENT>", "docstring": "Clear the environment cache", "id": "f8299:c0:m4"}
{"signature": "def load(self):", "body": "if not os.path.exists(self.path):<EOL><INDENT>return<EOL><DEDENT>with open(self.path, '<STR_LIT:r>') as f:<EOL><INDENT>env_data = yaml.load(f.read())<EOL><DEDENT>if env_data:<EOL><INDENT>for env in env_data:<EOL><INDENT>self.add(VirtualEnvironment(env['<STR_LIT:root>']))<EOL><DEDENT><DEDENT>", "docstring": "Load the environment cache from disk.", "id": "f8299:c0:m3"}
{"signature": "def validate(self):", "body": "for env in list(self):<EOL><INDENT>if not env.exists:<EOL><INDENT>self.remove(env)<EOL><DEDENT><DEDENT>", "docstring": "Validate all the entries in the environment cache.", "id": "f8299:c0:m2"}
{"signature": "def path_is_venv_resolver(resolver, path):", "body": "if isinstance(path, VirtualEnvironment):<EOL><INDENT>return path<EOL><DEDENT>raise ResolveError<EOL>", "docstring": "Checks if path is already a VirtualEnvironment", "id": "f8300:m0"}
{"signature": "def path_resolver(resolver, path):", "body": "path = unipath(path)<EOL>if is_environment(path):<EOL><INDENT>return VirtualEnvironment(path)<EOL><DEDENT>raise ResolveError<EOL>", "docstring": "Resolves VirtualEnvironments with a relative or absolute path", "id": "f8300:m1"}
{"signature": "def modules_path_resolver(resolver, path):", "body": "from .api import get_module_paths<EOL>for module_dir in get_module_paths():<EOL><INDENT>mod_path = unipath(module_dir, path)<EOL>if is_module(mod_path):<EOL><INDENT>return Module(mod_path)<EOL><DEDENT><DEDENT>raise ResolveError<EOL>", "docstring": "Resolves modules in CPENV_MODULES path and CPENV_HOME/modules", "id": "f8300:m6"}
{"signature": "def path_is_module_resolver(resolver, path):", "body": "if isinstance(path, Module):<EOL><INDENT>return path<EOL><DEDENT>raise ResolveError<EOL>", "docstring": "Checks if path is already a :class:`Module` object", "id": "f8300:m4"}
{"signature": "@cli.command()<EOL>@click.argument('<STR_LIT>', nargs=-<NUM_LIT:1>)<EOL>@click.option('<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', is_flag=True, help='<STR_LIT>')<EOL>def activate(paths, skip_local, skip_shared):", "body": "if not paths:<EOL><INDENT>ctx = click.get_current_context()<EOL>if cpenv.get_active_env():<EOL><INDENT>ctx.invoke(info)<EOL>return<EOL><DEDENT>click.echo(ctx.get_help())<EOL>examples = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>click.echo(examples)<EOL>return<EOL><DEDENT>if skip_local:<EOL><INDENT>cpenv.module_resolvers.remove(cpenv.resolver.module_resolver)<EOL>cpenv.module_resolvers.remove(cpenv.resolver.active_env_module_resolver)<EOL><DEDENT>if skip_shared:<EOL><INDENT>cpenv.module_resolvers.remove(cpenv.resolver.modules_path_resolver)<EOL><DEDENT>try:<EOL><INDENT>r = cpenv.resolve(*paths)<EOL><DEDENT>except cpenv.ResolveError as e:<EOL><INDENT>click.echo('<STR_LIT:\\n>' + str(e))<EOL>return<EOL><DEDENT>resolved = set(r.resolved)<EOL>active_modules = set()<EOL>env = cpenv.get_active_env()<EOL>if env:<EOL><INDENT>active_modules.add(env)<EOL><DEDENT>active_modules.update(cpenv.get_active_modules())<EOL>new_modules = resolved - active_modules<EOL>old_modules = active_modules & resolved<EOL>if old_modules and not new_modules:<EOL><INDENT>click.echo(<EOL>'<STR_LIT>'<EOL>+ bold('<STR_LIT:U+0020>'.join([obj.name for obj in old_modules]))<EOL>)<EOL>return<EOL><DEDENT>if env and contains_env(new_modules):<EOL><INDENT>click.echo('<STR_LIT>')<EOL>return<EOL><DEDENT>click.echo('<STR_LIT>')<EOL>click.echo(format_objects(r.resolved))<EOL>r.activate()<EOL>click.echo(blue('<STR_LIT>'))<EOL>modules = sorted(resolved | active_modules, key=_type_and_name)<EOL>prompt = '<STR_LIT::>'.join([obj.name for obj in modules])<EOL>shell.launch(prompt)<EOL>", "docstring": "Activate an environment", "id": "f8303:m8"}
{"signature": "@click.group()<EOL>@click.version_option(cpenv.__version__)<EOL>def cli():", "body": "EnvironmentCache.validate()<EOL>", "docstring": "Cpenv commands", "id": "f8303:m5"}
{"signature": "@module.command()<EOL>@click.argument('<STR_LIT>')<EOL>@click.option('<STR_LIT>', help='<STR_LIT>')<EOL>def create(name_or_path, config):", "body": "click.echo('<STR_LIT>'.format(name_or_path), nl=False)<EOL>try:<EOL><INDENT>module = cpenv.create_module(name_or_path, config)<EOL><DEDENT>except Exception as e:<EOL><INDENT>click.echo(bold_red('<STR_LIT>'))<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>click.echo(bold_green('<STR_LIT>'))<EOL>click.echo('<STR_LIT>')<EOL>click.echo(\"<STR_LIT>\")<EOL>click.echo('<STR_LIT>')<EOL>click.echo('<STR_LIT>')<EOL>click.echo('<STR_LIT>')<EOL><DEDENT>", "docstring": "Create a new template module.\n\n    You can also specify a filesystem path like \"./modules/new_module\"", "id": "f8303:m17"}
{"signature": "def format_objects(objects, children=False, columns=None, header=True):", "body": "columns = columns or ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>objects = sorted(objects, key=_type_and_name)<EOL>data = []<EOL>for obj in objects:<EOL><INDENT>if isinstance(obj, cpenv.VirtualEnvironment):<EOL><INDENT>data.append(get_info(obj))<EOL>modules = obj.get_modules()<EOL>if children and modules:<EOL><INDENT>for mod in modules:<EOL><INDENT>data.append(get_info(mod, indent=<NUM_LIT:2>, root=obj.path))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>data.append(get_info(obj))<EOL><DEDENT><DEDENT>maxes = [len(max(col, key=len)) for col in zip(*data)]<EOL>tmpl = '<STR_LIT>' % tuple(maxes)<EOL>lines = []<EOL>if header:<EOL><INDENT>lines.append('<STR_LIT:\\n>' + bold_blue(tmpl.format(*columns)))<EOL><DEDENT>for obj_data in data:<EOL><INDENT>lines.append(tmpl.format(*obj_data))<EOL><DEDENT>return '<STR_LIT:\\n>'.join(lines)<EOL>", "docstring": "Format a list of environments and modules for terminal output", "id": "f8303:m4"}
{"signature": "def _type_and_name(obj):", "body": "return isinstance(obj, cpenv.Module), obj.name<EOL>", "docstring": "Sort key for a list of environments and modules", "id": "f8303:m1"}
{"signature": "@cache.command()<EOL>@click.argument('<STR_LIT:path>')<EOL>def remove(path):", "body": "r = cpenv.resolve(path)<EOL>if isinstance(r.resolved[<NUM_LIT:0>], cpenv.VirtualEnvironment):<EOL><INDENT>EnvironmentCache.discard(r.resolved[<NUM_LIT:0>])<EOL>EnvironmentCache.save()<EOL><DEDENT>", "docstring": "Remove a cached environment. Removed paths will no longer be able to\n    be activated by name", "id": "f8303:m14"}
{"signature": "@cache.command()<EOL>def clear():", "body": "if click.confirm('<STR_LIT>'):<EOL><INDENT>cpenv.EnvironmentCache.clear()<EOL>click.echo('<STR_LIT>')<EOL><DEDENT>", "docstring": "Clear environment cache", "id": "f8303:m15"}
{"signature": "@module.command()<EOL>@click.argument('<STR_LIT:name>', required=False)<EOL>@click.argument('<STR_LIT:path>', required=False)<EOL>@click.option('<STR_LIT>', help='<STR_LIT>')<EOL>@click.option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>type=click.Choice(['<STR_LIT>', '<STR_LIT>']),<EOL>default='<STR_LIT>',<EOL>show_default=True)<EOL>def add(name, path, branch, type):", "body": "if not name and not path:<EOL><INDENT>ctx = click.get_current_context()<EOL>click.echo(ctx.get_help())<EOL>examples = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>click.echo(examples)<EOL>return<EOL><DEDENT>if not name:<EOL><INDENT>click.echo('<STR_LIT>')<EOL>return<EOL><DEDENT>if not path:<EOL><INDENT>click.echo('<STR_LIT>')<EOL><DEDENT>env = cpenv.get_active_env()<EOL>if type=='<STR_LIT>':<EOL><INDENT>if not env:<EOL><INDENT>click.echo('<STR_LIT>')<EOL>return<EOL><DEDENT>if click.confirm('<STR_LIT>'.format(name, env.name)):<EOL><INDENT>click.echo('<STR_LIT>', nl=False)<EOL>try:<EOL><INDENT>env.add_module(name, path, branch)<EOL><DEDENT>except:<EOL><INDENT>click.echo(bold_red('<STR_LIT>'))<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>click.echo(bold_green('<STR_LIT>'))<EOL><DEDENT><DEDENT>return<EOL><DEDENT>module_paths = cpenv.get_module_paths()<EOL>click.echo('<STR_LIT>')<EOL>for i, mod_path in enumerate(module_paths):<EOL><INDENT>click.echo('<STR_LIT>'.format(i, mod_path))<EOL><DEDENT>choice = click.prompt(<EOL>'<STR_LIT>',<EOL>type=int,<EOL>default=<NUM_LIT:0><EOL>)<EOL>module_root = module_paths[choice]<EOL>module_path = utils.unipath(module_root, name)<EOL>click.echo('<STR_LIT>'.format(module_path), nl=False)<EOL>try:<EOL><INDENT>cpenv.create_module(module_path, path, branch)<EOL><DEDENT>except:<EOL><INDENT>click.echo(bold_red('<STR_LIT>'))<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>click.echo(bold_green('<STR_LIT>'))<EOL><DEDENT>", "docstring": "Add a module to an environment. PATH can be a git repository path or\n    a filesystem path.", "id": "f8303:m18"}
{"signature": "@cli.command('<STR_LIT:list>')<EOL>def list_():", "body": "environments = cpenv.get_environments()<EOL>modules = cpenv.get_modules()<EOL>click.echo(format_objects(environments + modules, children=True))<EOL>", "docstring": "List available environments and modules", "id": "f8303:m7"}
{"signature": "@cli.group()<EOL>def cache():", "body": "", "docstring": "Environment cache commands", "id": "f8303:m11"}
{"signature": "def write_and_convert(self, text):", "body": "cursor = <NUM_LIT:0><EOL>text = self.convert_osc(text)<EOL>for match in self.ANSI_CSI_RE.finditer(text):<EOL><INDENT>start, end = match.span()<EOL>self.write_plain_text(text, cursor, start)<EOL>self.convert_ansi(*match.groups())<EOL>cursor = end<EOL><DEDENT>self.write_plain_text(text, cursor, len(text))<EOL>", "docstring": "Write the given text to our wrapped stream, stripping any ANSI\nsequences from the text, and optionally converting them into win32\ncalls.", "id": "f8305:c1:m5"}
{"signature": "def should_wrap(self):", "body": "return self.convert or self.strip or self.autoreset<EOL>", "docstring": "True if this class is actually needed. If false, then the output\nstream will not be affected, nor will win32 calls be issued, so\nwrapping stdout is not actually required. This will generally be\nFalse on non-Windows platforms, unless optional functionality like\nautoreset has been requested using kwargs to init()", "id": "f8305:c1:m1"}
{"signature": "def safe_load_all(stream):", "body": "return load_all(stream, SafeLoader)<EOL>", "docstring": "Parse all YAML documents in a stream\nand produce corresponding Python objects.\nResolve only basic YAML tags.", "id": "f8321:m7"}
{"signature": "def compose(stream, Loader=Loader):", "body": "loader = Loader(stream)<EOL>try:<EOL><INDENT>return loader.get_single_node()<EOL><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>", "docstring": "Parse the first YAML document in a stream\nand produce the corresponding representation tree.", "id": "f8321:m2"}
{"signature": "def add_multi_representer(data_type, multi_representer, Dumper=Dumper):", "body": "Dumper.add_multi_representer(data_type, multi_representer)<EOL>", "docstring": "Add a representer for the given type.\nMulti-representer is a function accepting a Dumper instance\nand an instance of the given data type or subtype\nand producing the corresponding representation node.", "id": "f8321:m20"}
{"signature": "def serialize_all(nodes, stream=None, Dumper=Dumper,<EOL>canonical=None, indent=None, width=None,<EOL>allow_unicode=None, line_break=None,<EOL>encoding='<STR_LIT:utf-8>', explicit_start=None, explicit_end=None,<EOL>version=None, tags=None):", "body": "getvalue = None<EOL>if stream is None:<EOL><INDENT>if encoding is None:<EOL><INDENT>from StringIO import StringIO<EOL><DEDENT>else:<EOL><INDENT>from cStringIO import StringIO<EOL><DEDENT>stream = StringIO()<EOL>getvalue = stream.getvalue<EOL><DEDENT>dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,<EOL>allow_unicode=allow_unicode, line_break=line_break,<EOL>encoding=encoding, version=version, tags=tags,<EOL>explicit_start=explicit_start, explicit_end=explicit_end)<EOL>try:<EOL><INDENT>dumper.open()<EOL>for node in nodes:<EOL><INDENT>dumper.serialize(node)<EOL><DEDENT>dumper.close()<EOL><DEDENT>finally:<EOL><INDENT>dumper.dispose()<EOL><DEDENT>if getvalue:<EOL><INDENT>return getvalue()<EOL><DEDENT>", "docstring": "Serialize a sequence of representation trees into a YAML stream.\nIf stream is None, return the produced string instead.", "id": "f8321:m9"}
{"signature": "def add_constructor(tag, constructor, Loader=Loader):", "body": "Loader.add_constructor(tag, constructor)<EOL>", "docstring": "Add a constructor for the given tag.\nConstructor is a function that accepts a Loader instance\nand a node object and produces the corresponding Python object.", "id": "f8321:m17"}
{"signature": "def compose_all(stream, Loader=Loader):", "body": "loader = Loader(stream)<EOL>try:<EOL><INDENT>while loader.check_node():<EOL><INDENT>yield loader.get_node()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>", "docstring": "Parse all YAML documents in a stream\nand produce corresponding representation trees.", "id": "f8321:m3"}
{"signature": "def parse(stream, Loader=Loader):", "body": "loader = Loader(stream)<EOL>try:<EOL><INDENT>while loader.check_event():<EOL><INDENT>yield loader.get_event()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>", "docstring": "Parse a YAML stream and produce parsing events.", "id": "f8321:m1"}
{"signature": "def add_representer(data_type, representer, Dumper=Dumper):", "body": "Dumper.add_representer(data_type, representer)<EOL>", "docstring": "Add a representer for the given type.\nRepresenter is a function accepting a Dumper instance\nand an instance of the given data type\nand producing the corresponding representation node.", "id": "f8321:m19"}
{"signature": "def load_all(stream, Loader=Loader):", "body": "loader = Loader(stream)<EOL>try:<EOL><INDENT>while loader.check_data():<EOL><INDENT>yield loader.get_data()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>", "docstring": "Parse all YAML documents in a stream\nand produce corresponding Python objects.", "id": "f8321:m5"}
{"signature": "def safe_dump(data, stream=None, **kwds):", "body": "return dump_all([data], stream, Dumper=SafeDumper, **kwds)<EOL>", "docstring": "Serialize a Python object into a YAML stream.\nProduce only basic YAML tags.\nIf stream is None, return the produced string instead.", "id": "f8321:m14"}
{"signature": "def safe_dump_all(documents, stream=None, **kwds):", "body": "return dump_all(documents, stream, Dumper=SafeDumper, **kwds)<EOL>", "docstring": "Serialize a sequence of Python objects into a YAML stream.\nProduce only basic YAML tags.\nIf stream is None, return the produced string instead.", "id": "f8321:m13"}
{"signature": "def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):", "body": "Loader.add_multi_constructor(tag_prefix, multi_constructor)<EOL>", "docstring": "Add a multi-constructor for the given tag prefix.\nMulti-constructor is called for a node if its tag starts with tag_prefix.\nMulti-constructor accepts a Loader instance, a tag suffix,\nand a node object and produces the corresponding Python object.", "id": "f8321:m18"}
{"signature": "def safe_load(stream):", "body": "return load(stream, SafeLoader)<EOL>", "docstring": "Parse the first YAML document in a stream\nand produce the corresponding Python object.\nResolve only basic YAML tags.", "id": "f8321:m6"}
{"signature": "def load(stream, Loader=Loader):", "body": "loader = Loader(stream)<EOL>try:<EOL><INDENT>return loader.get_single_data()<EOL><DEDENT>finally:<EOL><INDENT>loader.dispose()<EOL><DEDENT>", "docstring": "Parse the first YAML document in a stream\nand produce the corresponding Python object.", "id": "f8321:m4"}
{"signature": "def add_implicit_resolver(tag, regexp, first=None,<EOL>Loader=Loader, Dumper=Dumper):", "body": "Loader.add_implicit_resolver(tag, regexp, first)<EOL>Dumper.add_implicit_resolver(tag, regexp, first)<EOL>", "docstring": "Add an implicit scalar detector.\nIf an implicit scalar value matches the given regexp,\nthe corresponding tag is assigned to the scalar.\nfirst is a sequence of possible initial characters or None.", "id": "f8321:m15"}
{"signature": "def dump_all(documents, stream=None, Dumper=Dumper,<EOL>default_style=None, default_flow_style=None,<EOL>canonical=None, indent=None, width=None,<EOL>allow_unicode=None, line_break=None,<EOL>encoding='<STR_LIT:utf-8>', explicit_start=None, explicit_end=None,<EOL>version=None, tags=None):", "body": "getvalue = None<EOL>if stream is None:<EOL><INDENT>if encoding is None:<EOL><INDENT>from StringIO import StringIO<EOL><DEDENT>else:<EOL><INDENT>from cStringIO import StringIO<EOL><DEDENT>stream = StringIO()<EOL>getvalue = stream.getvalue<EOL><DEDENT>dumper = Dumper(stream, default_style=default_style,<EOL>default_flow_style=default_flow_style,<EOL>canonical=canonical, indent=indent, width=width,<EOL>allow_unicode=allow_unicode, line_break=line_break,<EOL>encoding=encoding, version=version, tags=tags,<EOL>explicit_start=explicit_start, explicit_end=explicit_end)<EOL>try:<EOL><INDENT>dumper.open()<EOL>for data in documents:<EOL><INDENT>dumper.represent(data)<EOL><DEDENT>dumper.close()<EOL><DEDENT>finally:<EOL><INDENT>dumper.dispose()<EOL><DEDENT>if getvalue:<EOL><INDENT>return getvalue()<EOL><DEDENT>", "docstring": "Serialize a sequence of Python objects into a YAML stream.\nIf stream is None, return the produced string instead.", "id": "f8321:m11"}
{"signature": "def serialize(node, stream=None, Dumper=Dumper, **kwds):", "body": "return serialize_all([node], stream, Dumper=Dumper, **kwds)<EOL>", "docstring": "Serialize a representation tree into a YAML stream.\nIf stream is None, return the produced string instead.", "id": "f8321:m10"}
{"signature": "def __init__(self):", "body": "<EOL>self.done = False<EOL>self.flow_level = <NUM_LIT:0><EOL>self.tokens = []<EOL>self.fetch_stream_start()<EOL>self.tokens_taken = <NUM_LIT:0><EOL>self.indent = -<NUM_LIT:1><EOL>self.indents = []<EOL>self.allow_simple_key = True<EOL>self.possible_simple_keys = {}<EOL>", "docstring": "Initialize the scanner.", "id": "f8327:c2:m0"}
{"signature": "def split_arg_string(string):", "body": "rv = []<EOL>for match in re.finditer(r\"<STR_LIT>\"<EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>', string, re.S):<EOL><INDENT>arg = match.group().strip()<EOL>if arg[:<NUM_LIT:1>] == arg[-<NUM_LIT:1>:] and arg[:<NUM_LIT:1>] in '<STR_LIT>':<EOL><INDENT>arg = arg[<NUM_LIT:1>:-<NUM_LIT:1>].encode('<STR_LIT:ascii>', '<STR_LIT>').decode('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>arg = type(string)(arg)<EOL><DEDENT>except UnicodeError:<EOL><INDENT>pass<EOL><DEDENT>rv.append(arg)<EOL><DEDENT>return rv<EOL>", "docstring": "Given an argument string this attempts to split it into small parts.", "id": "f8328:m4"}
{"signature": "def add_argument(self, dest, nargs=<NUM_LIT:1>, obj=None):", "body": "if obj is None:<EOL><INDENT>obj = dest<EOL><DEDENT>self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))<EOL>", "docstring": "Adds a positional argument named `dest` to the parser.\n\n        The `obj` can be used to identify the option in the order list\n        that is returned from the parser.", "id": "f8328:c3:m2"}
{"signature": "def parse_args(self, args):", "body": "state = ParsingState(args)<EOL>try:<EOL><INDENT>self._process_args_for_options(state)<EOL>self._process_args_for_args(state)<EOL><DEDENT>except UsageError:<EOL><INDENT>if self.ctx is None or not self.ctx.resilient_parsing:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>return state.opts, state.largs, state.order<EOL>", "docstring": "Parses positional arguments and returns ``(values, args, order)``\n        for the parsed options and arguments as well as the leftover\n        arguments if there are any.  The order is a list of objects as they\n        appear on the command line.  If arguments appear multiple times they\n        will be memorized multiple times as well.", "id": "f8328:c3:m3"}
{"signature": "def resolve_color_default(color=None):", "body": "if color is not None:<EOL><INDENT>return color<EOL><DEDENT>ctx = get_current_context(silent=True)<EOL>if ctx is not None:<EOL><INDENT>return ctx.color<EOL><DEDENT>", "docstring": "Internal helper to get the default value of the color flag.  If a\n    value is passed it's returned unchanged, otherwise it's looked up from\n    the current context.", "id": "f8329:m3"}
{"signature": "def push_context(ctx):", "body": "_local.__dict__.setdefault('<STR_LIT>', []).append(ctx)<EOL>", "docstring": "Pushes a new context to the current stack.", "id": "f8329:m1"}
{"signature": "def pop_context():", "body": "_local.stack.pop()<EOL>", "docstring": "Removes the top level from the stack.", "id": "f8329:m2"}
{"signature": "def _pipepager(text, cmd, color):", "body": "import subprocess<EOL>env = dict(os.environ)<EOL>cmd_detail = cmd.rsplit('<STR_LIT:/>', <NUM_LIT:1>)[-<NUM_LIT:1>].split()<EOL>if color is None and cmd_detail[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>less_flags = os.environ.get('<STR_LIT>', '<STR_LIT>') + '<STR_LIT:U+0020>'.join(cmd_detail[<NUM_LIT:1>:])<EOL>if not less_flags:<EOL><INDENT>env['<STR_LIT>'] = '<STR_LIT>'<EOL>color = True<EOL><DEDENT>elif '<STR_LIT:r>' in less_flags or '<STR_LIT:R>' in less_flags:<EOL><INDENT>color = True<EOL><DEDENT><DEDENT>if not color:<EOL><INDENT>text = strip_ansi(text)<EOL><DEDENT>c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,<EOL>env=env)<EOL>encoding = get_best_encoding(c.stdin)<EOL>try:<EOL><INDENT>c.stdin.write(text.encode(encoding, '<STR_LIT:replace>'))<EOL>c.stdin.close()<EOL><DEDENT>except (IOError, KeyboardInterrupt):<EOL><INDENT>pass<EOL><DEDENT>while True:<EOL><INDENT>try:<EOL><INDENT>c.wait()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Page through text by feeding it to another program.  Invoking a\n    pager through this might support colors.", "id": "f8330:m2"}
{"signature": "def pager(text, color=None):", "body": "stdout = _default_text_stdout()<EOL>if not isatty(sys.stdin) or not isatty(stdout):<EOL><INDENT>return _nullpager(stdout, text, color)<EOL><DEDENT>pager_cmd = (os.environ.get('<STR_LIT>', None) or '<STR_LIT>').strip()<EOL>if pager_cmd:<EOL><INDENT>if WIN:<EOL><INDENT>return _tempfilepager(text, pager_cmd, color)<EOL><DEDENT>return _pipepager(text, pager_cmd, color)<EOL><DEDENT>if os.environ.get('<STR_LIT>') in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>return _nullpager(stdout, text, color)<EOL><DEDENT>if WIN or sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>return _tempfilepager(text, '<STR_LIT>', color)<EOL><DEDENT>if hasattr(os, '<STR_LIT>') and os.system('<STR_LIT>') == <NUM_LIT:0>:<EOL><INDENT>return _pipepager(text, '<STR_LIT>', color)<EOL><DEDENT>import tempfile<EOL>fd, filename = tempfile.mkstemp()<EOL>os.close(fd)<EOL>try:<EOL><INDENT>if hasattr(os, '<STR_LIT>') and os.system('<STR_LIT>' % filename) == <NUM_LIT:0>:<EOL><INDENT>return _pipepager(text, '<STR_LIT>', color)<EOL><DEDENT>return _nullpager(stdout, text, color)<EOL><DEDENT>finally:<EOL><INDENT>os.unlink(filename)<EOL><DEDENT>", "docstring": "Decide what method to use for paging through text.", "id": "f8330:m1"}
{"signature": "def get_best_encoding(stream):", "body": "rv = getattr(stream, '<STR_LIT>', None) or sys.getdefaultencoding()<EOL>if is_ascii_encoding(rv):<EOL><INDENT>return '<STR_LIT:utf-8>'<EOL><DEDENT>return rv<EOL>", "docstring": "Returns the default stream encoding if not found.", "id": "f8332:m3"}
{"signature": "@contextlib.contextmanager<EOL><INDENT>def isolated_filesystem(self):<DEDENT>", "body": "cwd = os.getcwd()<EOL>t = tempfile.mkdtemp()<EOL>os.chdir(t)<EOL>try:<EOL><INDENT>yield t<EOL><DEDENT>finally:<EOL><INDENT>os.chdir(cwd)<EOL>try:<EOL><INDENT>shutil.rmtree(t)<EOL><DEDENT>except (OSError, IOError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "A context manager that creates a temporary folder and changes\n        the current working directory to it for isolated filesystem tests.", "id": "f8333:c2:m5"}
{"signature": "@contextlib.contextmanager<EOL><INDENT>def isolation(self, input=None, env=None, color=False):<DEDENT>", "body": "input = make_input_stream(input, self.charset)<EOL>old_stdin = sys.stdin<EOL>old_stdout = sys.stdout<EOL>old_stderr = sys.stderr<EOL>old_forced_width = clickpkg.formatting.FORCED_WIDTH<EOL>clickpkg.formatting.FORCED_WIDTH = <NUM_LIT><EOL>env = self.make_env(env)<EOL>if PY2:<EOL><INDENT>sys.stdout = sys.stderr = bytes_output = StringIO()<EOL>if self.echo_stdin:<EOL><INDENT>input = EchoingStdin(input, bytes_output)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>bytes_output = io.BytesIO()<EOL>if self.echo_stdin:<EOL><INDENT>input = EchoingStdin(input, bytes_output)<EOL><DEDENT>input = io.TextIOWrapper(input, encoding=self.charset)<EOL>sys.stdout = sys.stderr = io.TextIOWrapper(<EOL>bytes_output, encoding=self.charset)<EOL><DEDENT>sys.stdin = input<EOL>def visible_input(prompt=None):<EOL><INDENT>sys.stdout.write(prompt or '<STR_LIT>')<EOL>val = input.readline().rstrip('<STR_LIT:\\r\\n>')<EOL>sys.stdout.write(val + '<STR_LIT:\\n>')<EOL>sys.stdout.flush()<EOL>return val<EOL><DEDENT>def hidden_input(prompt=None):<EOL><INDENT>sys.stdout.write((prompt or '<STR_LIT>') + '<STR_LIT:\\n>')<EOL>sys.stdout.flush()<EOL>return input.readline().rstrip('<STR_LIT:\\r\\n>')<EOL><DEDENT>def _getchar(echo):<EOL><INDENT>char = sys.stdin.read(<NUM_LIT:1>)<EOL>if echo:<EOL><INDENT>sys.stdout.write(char)<EOL>sys.stdout.flush()<EOL><DEDENT>return char<EOL><DEDENT>default_color = color<EOL>def should_strip_ansi(stream=None, color=None):<EOL><INDENT>if color is None:<EOL><INDENT>return not default_color<EOL><DEDENT>return not color<EOL><DEDENT>old_visible_prompt_func = clickpkg.termui.visible_prompt_func<EOL>old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func<EOL>old__getchar_func = clickpkg.termui._getchar<EOL>old_should_strip_ansi = clickpkg.utils.should_strip_ansi<EOL>clickpkg.termui.visible_prompt_func = visible_input<EOL>clickpkg.termui.hidden_prompt_func = hidden_input<EOL>clickpkg.termui._getchar = _getchar<EOL>clickpkg.utils.should_strip_ansi = should_strip_ansi<EOL>old_env = {}<EOL>try:<EOL><INDENT>for key, value in iteritems(env):<EOL><INDENT>old_env[key] = os.environ.get(key)<EOL>if value is None:<EOL><INDENT>try:<EOL><INDENT>del os.environ[key]<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>os.environ[key] = value<EOL><DEDENT><DEDENT>yield bytes_output<EOL><DEDENT>finally:<EOL><INDENT>for key, value in iteritems(old_env):<EOL><INDENT>if value is None:<EOL><INDENT>try:<EOL><INDENT>del os.environ[key]<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>os.environ[key] = value<EOL><DEDENT><DEDENT>sys.stdout = old_stdout<EOL>sys.stderr = old_stderr<EOL>sys.stdin = old_stdin<EOL>clickpkg.termui.visible_prompt_func = old_visible_prompt_func<EOL>clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func<EOL>clickpkg.termui._getchar = old__getchar_func<EOL>clickpkg.utils.should_strip_ansi = old_should_strip_ansi<EOL>clickpkg.formatting.FORCED_WIDTH = old_forced_width<EOL><DEDENT>", "docstring": "A context manager that sets up the isolation for invoking of a\n        command line tool.  This sets up stdin with the given input data\n        and `os.environ` with the overrides from the given dictionary.\n        This also rebinds some internals in Click to be mocked (like the\n        prompt functionality).\n\n        This is automatically done in the :meth:`invoke` method.\n\n        .. versionadded:: 4.0\n           The ``color`` parameter was added.\n\n        :param input: the input stream to put into sys.stdin.\n        :param env: the environment overrides as dictionary.\n        :param color: whether the output should contain color codes. The\n                      application can still override this explicitly.", "id": "f8333:c2:m3"}
{"signature": "def fail(self, message, param=None, ctx=None):", "body": "raise BadParameter(message, ctx=ctx, param=param)<EOL>", "docstring": "Helper method to fail with an invalid value message.", "id": "f8334:c0:m5"}
{"signature": "def split_envvar_value(self, rv):", "body": "return (rv or '<STR_LIT>').split(self.envvar_list_splitter)<EOL>", "docstring": "Given a value from an environment variable this splits it up\n        into small chunks depending on the defined envvar list splitter.\n\n        If the splitter is set to `None`, which means that whitespace splits,\n        then leading and trailing whitespace is ignored.  Otherwise, leading\n        and trailing splitters usually lead to empty items being included.", "id": "f8334:c0:m4"}
{"signature": "def launch(url, wait=False, locate=False):", "body": "from ._termui_impl import open_url<EOL>return open_url(url, wait=wait, locate=locate)<EOL>", "docstring": "This function launches the given URL (or filename) in the default\n    viewer application for this file type.  If this is an executable, it\n    might launch the executable in a new session.  The return value is\n    the exit code of the launched application.  Usually, ``0`` indicates\n    success.\n\n    Examples::\n\n        click.launch('http://click.pocoo.org/')\n        click.launch('/my/downloaded/file', locate=True)\n\n    .. versionadded:: 2.0\n\n    :param url: URL or filename of the thing to launch.\n    :param wait: waits for the program to stop.\n    :param locate: if this is set to `True` then instead of launching the\n                   application associated with the URL it will attempt to\n                   launch a file manager with the file located.  This\n                   might have weird effects if the URL does not point to\n                   the filesystem.", "id": "f8338:m12"}
{"signature": "def progressbar(iterable=None, length=None, label=None, show_eta=True,<EOL>show_percent=None, show_pos=False,<EOL>item_show_func=None, fill_char='<STR_LIT:#>', empty_char='<STR_LIT:->',<EOL>bar_template='<STR_LIT>',<EOL>info_sep='<STR_LIT:U+0020>', width=<NUM_LIT>, file=None, color=None):", "body": "from ._termui_impl import ProgressBar<EOL>color = resolve_color_default(color)<EOL>return ProgressBar(iterable=iterable, length=length, show_eta=show_eta,<EOL>show_percent=show_percent, show_pos=show_pos,<EOL>item_show_func=item_show_func, fill_char=fill_char,<EOL>empty_char=empty_char, bar_template=bar_template,<EOL>info_sep=info_sep, file=file, label=label,<EOL>width=width, color=color)<EOL>", "docstring": "This function creates an iterable context manager that can be used\n    to iterate over something while showing a progress bar.  It will\n    either iterate over the `iterable` or `length` items (that are counted\n    up).  While iteration happens, this function will print a rendered\n    progress bar to the given `file` (defaults to stdout) and will attempt\n    to calculate remaining time and more.  By default, this progress bar\n    will not be rendered if the file is not a terminal.\n\n    The context manager creates the progress bar.  When the context\n    manager is entered the progress bar is already displayed.  With every\n    iteration over the progress bar, the iterable passed to the bar is\n    advanced and the bar is updated.  When the context manager exits,\n    a newline is printed and the progress bar is finalized on screen.\n\n    No printing must happen or the progress bar will be unintentionally\n    destroyed.\n\n    Example usage::\n\n        with progressbar(items) as bar:\n            for item in bar:\n                do_something_with(item)\n\n    Alternatively, if no iterable is specified, one can manually update the\n    progress bar through the `update()` method instead of directly\n    iterating over the progress bar.  The update method accepts the number\n    of steps to increment the bar with::\n\n        with progressbar(length=chunks.total_bytes) as bar:\n            for chunk in chunks:\n                process_chunk(chunk)\n                bar.update(chunks.bytes)\n\n    .. versionadded:: 2.0\n\n    .. versionadded:: 4.0\n       Added the `color` parameter.  Added a `update` method to the\n       progressbar object.\n\n    :param iterable: an iterable to iterate over.  If not provided the length\n                     is required.\n    :param length: the number of items to iterate over.  By default the\n                   progressbar will attempt to ask the iterator about its\n                   length, which might or might not work.  If an iterable is\n                   also provided this parameter can be used to override the\n                   length.  If an iterable is not provided the progress bar\n                   will iterate over a range of that length.\n    :param label: the label to show next to the progress bar.\n    :param show_eta: enables or disables the estimated time display.  This is\n                     automatically disabled if the length cannot be\n                     determined.\n    :param show_percent: enables or disables the percentage display.  The\n                         default is `True` if the iterable has a length or\n                         `False` if not.\n    :param show_pos: enables or disables the absolute position display.  The\n                     default is `False`.\n    :param item_show_func: a function called with the current item which\n                           can return a string to show the current item\n                           next to the progress bar.  Note that the current\n                           item can be `None`!\n    :param fill_char: the character to use to show the filled part of the\n                      progress bar.\n    :param empty_char: the character to use to show the non-filled part of\n                       the progress bar.\n    :param bar_template: the format string to use as template for the bar.\n                         The parameters in it are ``label`` for the label,\n                         ``bar`` for the progress bar and ``info`` for the\n                         info section.\n    :param info_sep: the separator between multiple info items (eta etc.)\n    :param width: the width of the progress bar in characters, 0 means full\n                  terminal width\n    :param file: the file to write to.  If this is not a terminal then\n                 only the label is printed.\n    :param color: controls if the terminal supports ANSI colors or not.  The\n                  default is autodetection.  This is only needed if ANSI\n                  codes are included anywhere in the progress bar output\n                  which is not the case by default.", "id": "f8338:m6"}
{"signature": "def get_terminal_size():", "body": "<EOL>if sys.version_info >= (<NUM_LIT:3>, <NUM_LIT:3>):<EOL><INDENT>import shutil<EOL>shutil_get_terminal_size = getattr(shutil, '<STR_LIT>', None)<EOL>if shutil_get_terminal_size:<EOL><INDENT>sz = shutil_get_terminal_size()<EOL>return sz.columns, sz.lines<EOL><DEDENT><DEDENT>if get_winterm_size is not None:<EOL><INDENT>return get_winterm_size()<EOL><DEDENT>def ioctl_gwinsz(fd):<EOL><INDENT>try:<EOL><INDENT>import fcntl<EOL>import termios<EOL>cr = struct.unpack(<EOL>'<STR_LIT>', fcntl.ioctl(fd, termios.TIOCGWINSZ, '<STR_LIT>'))<EOL><DEDENT>except Exception:<EOL><INDENT>return<EOL><DEDENT>return cr<EOL><DEDENT>cr = ioctl_gwinsz(<NUM_LIT:0>) or ioctl_gwinsz(<NUM_LIT:1>) or ioctl_gwinsz(<NUM_LIT:2>)<EOL>if not cr:<EOL><INDENT>try:<EOL><INDENT>fd = os.open(os.ctermid(), os.O_RDONLY)<EOL>try:<EOL><INDENT>cr = ioctl_gwinsz(fd)<EOL><DEDENT>finally:<EOL><INDENT>os.close(fd)<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if not cr or not cr[<NUM_LIT:0>] or not cr[<NUM_LIT:1>]:<EOL><INDENT>cr = (os.environ.get('<STR_LIT>', <NUM_LIT>),<EOL>os.environ.get('<STR_LIT>', DEFAULT_COLUMNS))<EOL><DEDENT>return int(cr[<NUM_LIT:1>]), int(cr[<NUM_LIT:0>])<EOL>", "docstring": "Returns the current size of the terminal as tuple in the form\n    ``(width, height)`` in columns and rows.", "id": "f8338:m4"}
{"signature": "def getchar(echo=False):", "body": "f = _getchar<EOL>if f is None:<EOL><INDENT>from ._termui_impl import getchar as f<EOL><DEDENT>return f(echo)<EOL>", "docstring": "Fetches a single character from the terminal and returns it.  This\n    will always return a unicode character and under certain rare\n    circumstances this might return more than one character.  The\n    situations which more than one character is returned is when for\n    whatever reason multiple characters end up in the terminal buffer or\n    standard input was not actually a terminal.\n\n    Note that this will always read from the terminal, even if something\n    is piped into the standard input.\n\n    .. versionadded:: 2.0\n\n    :param echo: if set to `True`, the character read will also show up on\n                 the terminal.  The default is to not show it.", "id": "f8338:m13"}
{"signature": "def secho(text, file=None, nl=True, err=False, color=None, **styles):", "body": "return echo(style(text, **styles), file=file, nl=nl, err=err, color=color)<EOL>", "docstring": "This function combines :func:`echo` and :func:`style` into one\n    call.  As such the following two calls are the same::\n\n        click.secho('Hello World!', fg='green')\n        click.echo(click.style('Hello World!', fg='green'))\n\n    All keyword arguments are forwarded to the underlying functions\n    depending on which one they go with.\n\n    .. versionadded:: 2.0", "id": "f8338:m10"}
{"signature": "def fail(self, message):", "body": "raise UsageError(message, self)<EOL>", "docstring": "Aborts the execution of the program with a specific error\n        message.\n\n        :param message: the error message to fail with.", "id": "f8340:c0:m13"}
{"signature": "def make_parser(self, ctx):", "body": "parser = OptionParser(ctx)<EOL>parser.allow_interspersed_args = ctx.allow_interspersed_args<EOL>parser.ignore_unknown_options = ctx.ignore_unknown_options<EOL>for param in self.get_params(ctx):<EOL><INDENT>param.add_to_parser(parser, ctx)<EOL><DEDENT>return parser<EOL>", "docstring": "Creates the underlying option parser for this command.", "id": "f8340:c2:m7"}
{"signature": "@property<EOL><INDENT>def human_readable_name(self):<DEDENT>", "body": "return self.name<EOL>", "docstring": "Returns the human readable name of this parameter.  This is the\n        same as the name for options, but the metavar for arguments.", "id": "f8340:c6:m1"}
{"signature": "def format_usage(self, ctx, formatter):", "body": "pieces = self.collect_usage_pieces(ctx)<EOL>formatter.write_usage(ctx.command_path, '<STR_LIT:U+0020>'.join(pieces))<EOL>", "docstring": "Writes the usage line into the formatter.", "id": "f8340:c2:m3"}
{"signature": "def prompt_for_value(self, ctx):", "body": "<EOL>default = self.get_default(ctx)<EOL>if self.is_bool_flag:<EOL><INDENT>return confirm(self.prompt, default)<EOL><DEDENT>return prompt(self.prompt, default=default,<EOL>hide_input=self.hide_input,<EOL>confirmation_prompt=self.confirmation_prompt,<EOL>value_proc=lambda x: self.process_value(ctx, x))<EOL>", "docstring": "This is an alternative flow that can be activated in the full\n        value processing if a value does not exist.  It will prompt the\n        user until a valid value exists and then returns the processed\n        value as result.", "id": "f8340:c7:m5"}
{"signature": "def lookup_default(self, name):", "body": "if self.default_map is not None:<EOL><INDENT>rv = self.default_map.get(name)<EOL>if callable(rv):<EOL><INDENT>rv = rv()<EOL><DEDENT>return rv<EOL><DEDENT>", "docstring": "Looks up the default for a parameter name.  This by default\n        looks into the :attr:`default_map` if available.", "id": "f8340:c0:m12"}
{"signature": "def list_commands(self, ctx):", "body": "return []<EOL>", "docstring": "Returns a list of subcommand names in the order they should\n        appear.", "id": "f8340:c3:m9"}
{"signature": "def format_commands(self, ctx, formatter):", "body": "rows = []<EOL>for subcommand in self.list_commands(ctx):<EOL><INDENT>cmd = self.get_command(ctx, subcommand)<EOL>if cmd is None:<EOL><INDENT>continue<EOL><DEDENT>help = cmd.short_help or '<STR_LIT>'<EOL>rows.append((subcommand, help))<EOL><DEDENT>if rows:<EOL><INDENT>with formatter.section('<STR_LIT>'):<EOL><INDENT>formatter.write_dl(rows)<EOL><DEDENT><DEDENT>", "docstring": "Extra format methods for multi methods that adds all the commands\n        after the options.", "id": "f8340:c3:m4"}
{"signature": "def find_root(self):", "body": "node = self<EOL>while node.parent is not None:<EOL><INDENT>node = node.parent<EOL><DEDENT>return node<EOL>", "docstring": "Finds the outermost context.", "id": "f8340:c0:m9"}
{"signature": "def main(self, args=None, prog_name=None, complete_var=None,<EOL>standalone_mode=True, **extra):", "body": "<EOL>if not PY2:<EOL><INDENT>_verify_python3_env()<EOL><DEDENT>else:<EOL><INDENT>_check_for_unicode_literals()<EOL><DEDENT>if args is None:<EOL><INDENT>args = get_os_args()<EOL><DEDENT>else:<EOL><INDENT>args = list(args)<EOL><DEDENT>if prog_name is None:<EOL><INDENT>prog_name = make_str(os.path.basename(<EOL>sys.argv and sys.argv[<NUM_LIT:0>] or __file__))<EOL><DEDENT>_bashcomplete(self, prog_name, complete_var)<EOL>try:<EOL><INDENT>try:<EOL><INDENT>with self.make_context(prog_name, args, **extra) as ctx:<EOL><INDENT>rv = self.invoke(ctx)<EOL>if not standalone_mode:<EOL><INDENT>return rv<EOL><DEDENT>ctx.exit()<EOL><DEDENT><DEDENT>except (EOFError, KeyboardInterrupt):<EOL><INDENT>echo(file=sys.stderr)<EOL>raise Abort()<EOL><DEDENT>except ClickException as e:<EOL><INDENT>if not standalone_mode:<EOL><INDENT>raise<EOL><DEDENT>e.show()<EOL>sys.exit(e.exit_code)<EOL><DEDENT>except IOError as e:<EOL><INDENT>if e.errno == errno.EPIPE:<EOL><INDENT>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>except Abort:<EOL><INDENT>if not standalone_mode:<EOL><INDENT>raise<EOL><DEDENT>echo('<STR_LIT>', file=sys.stderr)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "This is the way to invoke a script with all the bells and\n        whistles as a command line application.  This will always terminate\n        the application after a call.  If this is not wanted, ``SystemExit``\n        needs to be caught.\n\n        This method is also available by directly calling the instance of\n        a :class:`Command`.\n\n        .. versionadded:: 3.0\n           Added the `standalone_mode` flag to control the standalone mode.\n\n        :param args: the arguments that should be used for parsing.  If not\n                     provided, ``sys.argv[1:]`` is used.\n        :param prog_name: the program name that should be used.  By default\n                          the program name is constructed by taking the file\n                          name from ``sys.argv[0]``.\n        :param complete_var: the environment variable that controls the\n                             bash completion support.  The default is\n                             ``\"_<prog_name>_COMPLETE\"`` with prog name in\n                             uppercase.\n        :param standalone_mode: the default behavior is to invoke the script\n                                in standalone mode.  Click will then\n                                handle exceptions and convert them into\n                                error messages and the function will never\n                                return but shut down the interpreter.  If\n                                this is set to `False` they will be\n                                propagated to the caller and the return\n                                value of this function is the return value\n                                of :meth:`invoke`.\n        :param extra: extra keyword arguments are forwarded to the context\n                      constructor.  See :class:`Context` for more information.", "id": "f8340:c1:m6"}
{"signature": "def invoke(self, ctx):", "body": "if self.callback is not None:<EOL><INDENT>return ctx.invoke(self.callback, **ctx.params)<EOL><DEDENT>", "docstring": "Given a context, this invokes the attached callback (if it exists)\n        in the right way.", "id": "f8340:c2:m14"}
{"signature": "def invoke(*args, **kwargs):", "body": "self, callback = args[:<NUM_LIT:2>]<EOL>ctx = self<EOL>if isinstance(callback, Command):<EOL><INDENT>other_cmd = callback<EOL>callback = other_cmd.callback<EOL>ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)<EOL>if callback is None:<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>for param in other_cmd.params:<EOL><INDENT>if param.name not in kwargs and param.expose_value:<EOL><INDENT>kwargs[param.name] = param.get_default(ctx)<EOL><DEDENT><DEDENT><DEDENT>args = args[<NUM_LIT:2>:]<EOL>with augment_usage_errors(self):<EOL><INDENT>with ctx:<EOL><INDENT>return callback(*args, **kwargs)<EOL><DEDENT><DEDENT>", "docstring": "Invokes a command callback in exactly the way it expects.  There\n        are two ways to invoke this method:\n\n        1.  the first argument can be a callback and all other arguments and\n            keyword arguments are forwarded directly to the function.\n        2.  the first argument is a click command object.  In that case all\n            arguments are forwarded as well but proper click parameters\n            (options and click arguments) must be keyword arguments and Click\n            will fill in defaults.\n\n        Note that before Click 3.2 keyword arguments were not properly filled\n        in against the intention of this code and no context was created.  For\n        more information about this change and why it was done in a bugfix\n        release see :ref:`upgrade-to-3.2`.", "id": "f8340:c0:m18"}
{"signature": "def get_help(self):", "body": "return self.command.get_help(self)<EOL>", "docstring": "Helper method to get formatted help page for the current\n        context and command.", "id": "f8340:c0:m17"}
{"signature": "def get_command(self, ctx, cmd_name):", "body": "raise NotImplementedError()<EOL>", "docstring": "Given a context and a command name, this returns a\n        :class:`Command` object if it exists or returns `None`.", "id": "f8340:c3:m8"}
{"signature": "def exit(self, code=<NUM_LIT:0>):", "body": "sys.exit(code)<EOL>", "docstring": "Exits the application with a given exit code.", "id": "f8340:c0:m15"}
{"signature": "def _bashcomplete(cmd, prog_name, complete_var=None):", "body": "if complete_var is None:<EOL><INDENT>complete_var = '<STR_LIT>' % (prog_name.replace('<STR_LIT:->', '<STR_LIT:_>')).upper()<EOL><DEDENT>complete_instr = os.environ.get(complete_var)<EOL>if not complete_instr:<EOL><INDENT>return<EOL><DEDENT>from ._bashcomplete import bashcomplete<EOL>if bashcomplete(cmd, prog_name, complete_var, complete_instr):<EOL><INDENT>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Internal handler for the bash completion support.", "id": "f8340:m0"}
{"signature": "def iter_params_for_processing(invocation_order, declaration_order):", "body": "def sort_key(item):<EOL><INDENT>try:<EOL><INDENT>idx = invocation_order.index(item)<EOL><DEDENT>except ValueError:<EOL><INDENT>idx = float('<STR_LIT>')<EOL><DEDENT>return (not item.is_eager, idx)<EOL><DEDENT>return sorted(declaration_order, key=sort_key)<EOL>", "docstring": "Given a sequence of parameters in the order as should be considered\n    for processing and an iterable of parameters that exist, this returns\n    a list in the correct order as they should be processed.", "id": "f8340:m5"}
{"signature": "def ensure_object(self, object_type):", "body": "rv = self.find_object(object_type)<EOL>if rv is None:<EOL><INDENT>self.obj = rv = object_type()<EOL><DEDENT>return rv<EOL>", "docstring": "Like :meth:`find_object` but sets the innermost object to a\n        new instance of `object_type` if it does not exist.", "id": "f8340:c0:m11"}
{"signature": "def resultcallback(self, replace=False):", "body": "def decorator(f):<EOL><INDENT>old_callback = self.result_callback<EOL>if old_callback is None or replace:<EOL><INDENT>self.result_callback = f<EOL>return f<EOL><DEDENT>def function(__value, *args, **kwargs):<EOL><INDENT>return f(old_callback(__value, *args, **kwargs),<EOL>*args, **kwargs)<EOL><DEDENT>self.result_callback = rv = update_wrapper(function, f)<EOL>return rv<EOL><DEDENT>return decorator<EOL>", "docstring": "Adds a result callback to the chain command.  By default if a\n        result callback is already registered this will chain them but\n        this can be disabled with the `replace` parameter.  The result\n        callback is invoked with the return value of the subcommand\n        (or the list of return values from all subcommands if chaining\n        is enabled) as well as the parameters as they would be passed\n        to the main callback.\n\n        Example::\n\n            @click.group()\n            @click.option('-i', '--input', default=23)\n            def cli(input):\n                return 42\n\n            @cli.resultcallback()\n            def process_result(result, input):\n                return result + input\n\n        .. versionadded:: 3.0\n\n        :param replace: if set to `True` an already existing result\n                        callback will be removed.", "id": "f8340:c3:m3"}
{"signature": "def find_object(self, object_type):", "body": "node = self<EOL>while node is not None:<EOL><INDENT>if isinstance(node.obj, object_type):<EOL><INDENT>return node.obj<EOL><DEDENT>node = node.parent<EOL><DEDENT>", "docstring": "Finds the closest object of a given type.", "id": "f8340:c0:m10"}
{"signature": "def collect_usage_pieces(self, ctx):", "body": "rv = [self.options_metavar]<EOL>for param in self.get_params(ctx):<EOL><INDENT>rv.extend(param.get_usage_pieces(ctx))<EOL><DEDENT>return rv<EOL>", "docstring": "Returns all the pieces that go into the usage line and returns\n        it as a list of strings.", "id": "f8340:c2:m4"}
{"signature": "def get_help_option_names(self, ctx):", "body": "all_names = set(ctx.help_option_names)<EOL>for param in self.params:<EOL><INDENT>all_names.difference_update(param.opts)<EOL>all_names.difference_update(param.secondary_opts)<EOL><DEDENT>return all_names<EOL>", "docstring": "Returns the names for the help option.", "id": "f8340:c2:m5"}
{"signature": "def abort(self):", "body": "raise Abort()<EOL>", "docstring": "Aborts the script.", "id": "f8340:c0:m14"}
{"signature": "@contextmanager<EOL>def augment_usage_errors(ctx, param=None):", "body": "try:<EOL><INDENT>yield<EOL><DEDENT>except BadParameter as e:<EOL><INDENT>if e.ctx is None:<EOL><INDENT>e.ctx = ctx<EOL><DEDENT>if param is not None and e.param is None:<EOL><INDENT>e.param = param<EOL><DEDENT>raise<EOL><DEDENT>except UsageError as e:<EOL><INDENT>if e.ctx is None:<EOL><INDENT>e.ctx = ctx<EOL><DEDENT>raise<EOL><DEDENT>", "docstring": "Context manager that attaches extra information to exceptions that\n    fly.", "id": "f8340:m4"}
{"signature": "def type_cast_value(self, ctx, value):", "body": "if self.type.is_composite:<EOL><INDENT>if self.nargs <= <NUM_LIT:1>:<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % self.nargs)<EOL><DEDENT>if self.multiple:<EOL><INDENT>return tuple(self.type(x or (), self, ctx) for x in value or ())<EOL><DEDENT>return self.type(value or (), self, ctx)<EOL><DEDENT>def _convert(value, level):<EOL><INDENT>if level == <NUM_LIT:0>:<EOL><INDENT>return self.type(value, self, ctx)<EOL><DEDENT>return tuple(_convert(x, level - <NUM_LIT:1>) for x in value or ())<EOL><DEDENT>return _convert(value, (self.nargs != <NUM_LIT:1>) + bool(self.multiple))<EOL>", "docstring": "Given a value this runs it properly through the type system.\n        This automatically handles things like `nargs` and `multiple` as\n        well as composite types.", "id": "f8340:c6:m6"}
{"signature": "def get_default(self, ctx):", "body": "<EOL>if callable(self.default):<EOL><INDENT>rv = self.default()<EOL><DEDENT>else:<EOL><INDENT>rv = self.default<EOL><DEDENT>return self.type_cast_value(ctx, rv)<EOL>", "docstring": "Given a context variable this calculates the default value.", "id": "f8340:c6:m3"}
{"signature": "def format_options(self, ctx, formatter):", "body": "opts = []<EOL>for param in self.get_params(ctx):<EOL><INDENT>rv = param.get_help_record(ctx)<EOL>if rv is not None:<EOL><INDENT>opts.append(rv)<EOL><DEDENT><DEDENT>if opts:<EOL><INDENT>with formatter.section('<STR_LIT>'):<EOL><INDENT>formatter.write_dl(opts)<EOL><DEDENT><DEDENT>", "docstring": "Writes all the options into the formatter if they exist.", "id": "f8340:c2:m11"}
{"signature": "def open_file(filename, mode='<STR_LIT:r>', encoding=None, errors='<STR_LIT:strict>',<EOL>lazy=False, atomic=False):", "body": "if lazy:<EOL><INDENT>return LazyFile(filename, mode, encoding, errors, atomic=atomic)<EOL><DEDENT>f, should_close = open_stream(filename, mode, encoding, errors,<EOL>atomic=atomic)<EOL>if not should_close:<EOL><INDENT>f = KeepOpenFile(f)<EOL><DEDENT>return f<EOL>", "docstring": "This is similar to how the :class:`File` works but for manual\n    usage.  Files are opened non lazy by default.  This can open regular\n    files as well as stdin/stdout if ``'-'`` is passed.\n\n    If stdin/stdout is returned the stream is wrapped so that the context\n    manager will not close the stream accidentally.  This makes it possible\n    to always use the function like this without having to worry to\n    accidentally close a standard stream::\n\n        with open_file(filename) as f:\n            ...\n\n    .. versionadded:: 3.0\n\n    :param filename: the name of the file to open (or ``'-'`` for stdin/stdout).\n    :param mode: the mode in which to open the file.\n    :param encoding: the encoding to use.\n    :param errors: the error handling for this file.\n    :param lazy: can be flipped to true to open the file lazily.\n    :param atomic: in atomic mode writes go into a temporary file and it's\n                   moved on close.", "id": "f8341:m7"}
{"signature": "def get_app_dir(app_name, roaming=True, force_posix=False):", "body": "if WIN:<EOL><INDENT>key = roaming and '<STR_LIT>' or '<STR_LIT>'<EOL>folder = os.environ.get(key)<EOL>if folder is None:<EOL><INDENT>folder = os.path.expanduser('<STR_LIT>')<EOL><DEDENT>return os.path.join(folder, app_name)<EOL><DEDENT>if force_posix:<EOL><INDENT>return os.path.join(os.path.expanduser('<STR_LIT>' + _posixify(app_name)))<EOL><DEDENT>if sys.platform == '<STR_LIT>':<EOL><INDENT>return os.path.join(os.path.expanduser(<EOL>'<STR_LIT>'), app_name)<EOL><DEDENT>return os.path.join(<EOL>os.environ.get('<STR_LIT>', os.path.expanduser('<STR_LIT>')),<EOL>_posixify(app_name))<EOL>", "docstring": "r\"\"\"Returns the config folder for the application.  The default behavior\n    is to return whatever is most appropriate for the operating system.\n\n    To give you an idea, for an app called ``\"Foo Bar\"``, something like\n    the following folders could be returned:\n\n    Mac OS X:\n      ``~/Library/Application Support/Foo Bar``\n    Mac OS X (POSIX):\n      ``~/.foo-bar``\n    Unix:\n      ``~/.config/foo-bar``\n    Unix (POSIX):\n      ``~/.foo-bar``\n    Win XP (roaming):\n      ``C:\\Documents and Settings\\<user>\\Local Settings\\Application Data\\Foo Bar``\n    Win XP (not roaming):\n      ``C:\\Documents and Settings\\<user>\\Application Data\\Foo Bar``\n    Win 7 (roaming):\n      ``C:\\Users\\<user>\\AppData\\Roaming\\Foo Bar``\n    Win 7 (not roaming):\n      ``C:\\Users\\<user>\\AppData\\Local\\Foo Bar``\n\n    .. versionadded:: 2.0\n\n    :param app_name: the application name.  This should be properly capitalized\n                     and can contain whitespace.\n    :param roaming: controls if the folder should be roaming or not on Windows.\n                    Has no affect otherwise.\n    :param force_posix: if this is set to `True` then on any POSIX system the\n                        folder will be stored in the home folder with a leading\n                        dot instead of the XDG config home or darwin's\n                        application support folder.", "id": "f8341:m10"}
{"signature": "def get_os_args():", "body": "<EOL>if PY2 and WIN and _initial_argv_hash == _hash_py_argv():<EOL><INDENT>return _get_windows_argv()<EOL><DEDENT>return sys.argv[<NUM_LIT:1>:]<EOL>", "docstring": "This returns the argument part of sys.argv in the most appropriate\n    form for processing.  What this means is that this return value is in\n    a format that works for Click to process but does not necessarily\n    correspond well to what's actually standard for the interpreter.\n\n    On most environments the return value is ``sys.argv[:1]`` unchanged.\n    However if you are on Windows and running Python 2 the return value\n    will actually be a list of unicode strings instead because the\n    default behavior on that platform otherwise will not be able to\n    carry all possible values that sys.argv can have.\n\n    .. versionadded:: 6.0", "id": "f8341:m8"}
{"signature": "def make_str(value):", "body": "if isinstance(value, bytes):<EOL><INDENT>try:<EOL><INDENT>return value.decode(get_filesystem_encoding())<EOL><DEDENT>except UnicodeError:<EOL><INDENT>return value.decode('<STR_LIT:utf-8>', '<STR_LIT:replace>')<EOL><DEDENT><DEDENT>return text_type(value)<EOL>", "docstring": "Converts a value into a valid string.", "id": "f8341:m2"}
{"signature": "def open(self):", "body": "if self._f is not None:<EOL><INDENT>return self._f<EOL><DEDENT>try:<EOL><INDENT>rv, self.should_close = open_stream(self.name, self.mode,<EOL>self.encoding,<EOL>self.errors,<EOL>atomic=self.atomic)<EOL><DEDENT>except (IOError, OSError) as e:<EOL><INDENT>from .exceptions import FileError<EOL>raise FileError(self.name, hint=get_streerror(e))<EOL><DEDENT>self._f = rv<EOL>return rv<EOL>", "docstring": "Opens the file if it's not yet open.  This call might fail with\n        a :exc:`FileError`.  Not handling this error will produce an error\n        that Click shows.", "id": "f8341:c0:m3"}
{"signature": "def command(name=None, cls=None, **attrs):", "body": "if cls is None:<EOL><INDENT>cls = Command<EOL><DEDENT>def decorator(f):<EOL><INDENT>cmd = _make_command(f, name, attrs, cls)<EOL>cmd.__doc__ = f.__doc__<EOL>return cmd<EOL><DEDENT>return decorator<EOL>", "docstring": "Creates a new :class:`Command` and uses the decorated function as\n    callback.  This will also automatically attach all decorated\n    :func:`option`\\s and :func:`argument`\\s as parameters to the command.\n\n    The name of the command defaults to the name of the function.  If you\n    want to change that, you can pass the intended name as the first\n    argument.\n\n    All keyword arguments are forwarded to the underlying command class.\n\n    Once decorated the function turns into a :class:`Command` instance\n    that can be invoked as a command line utility or be attached to a\n    command :class:`Group`.\n\n    :param name: the name of the command.  This defaults to the function\n                 name.\n    :param cls: the command class to instantiate.  This defaults to\n                :class:`Command`.", "id": "f8342:m4"}
{"signature": "def help_option(*param_decls, **attrs):", "body": "def decorator(f):<EOL><INDENT>def callback(ctx, param, value):<EOL><INDENT>if value and not ctx.resilient_parsing:<EOL><INDENT>echo(ctx.get_help(), color=ctx.color)<EOL>ctx.exit()<EOL><DEDENT><DEDENT>attrs.setdefault('<STR_LIT>', True)<EOL>attrs.setdefault('<STR_LIT>', False)<EOL>attrs.setdefault('<STR_LIT>', '<STR_LIT>')<EOL>attrs.setdefault('<STR_LIT>', True)<EOL>attrs['<STR_LIT>'] = callback<EOL>return option(*(param_decls or ('<STR_LIT>',)), **attrs)(f)<EOL><DEDENT>return decorator<EOL>", "docstring": "Adds a ``--help`` option which immediately ends the program\n    printing out the help page.  This is usually unnecessary to add as\n    this is added by default to all commands unless suppressed.\n\n    Like :func:`version_option`, this is implemented as eager option that\n    prints in the callback and exits.\n\n    All arguments are forwarded to :func:`option`.", "id": "f8342:m12"}
{"signature": "def pass_context(f):", "body": "def new_func(*args, **kwargs):<EOL><INDENT>return f(get_current_context(), *args, **kwargs)<EOL><DEDENT>return update_wrapper(new_func, f)<EOL>", "docstring": "Marks a callback as wanting to receive the current context\n    object as first argument.", "id": "f8342:m0"}
{"signature": "def argument(*param_decls, **attrs):", "body": "def decorator(f):<EOL><INDENT>ArgumentClass = attrs.pop('<STR_LIT>', Argument)<EOL>_param_memo(f, ArgumentClass(param_decls, **attrs))<EOL>return f<EOL><DEDENT>return decorator<EOL>", "docstring": "Attaches an argument to the command.  All positional arguments are\n    passed as parameter declarations to :class:`Argument`; all keyword\n    arguments are forwarded unchanged (except ``cls``).\n    This is equivalent to creating an :class:`Argument` instance manually\n    and attaching it to the :attr:`Command.params` list.\n\n    :param cls: the argument class to instantiate.  This defaults to\n                :class:`Argument`.", "id": "f8342:m7"}
{"signature": "def pass_obj(f):", "body": "def new_func(*args, **kwargs):<EOL><INDENT>return f(get_current_context().obj, *args, **kwargs)<EOL><DEDENT>return update_wrapper(new_func, f)<EOL>", "docstring": "Similar to :func:`pass_context`, but only pass the object on the\n    context onwards (:attr:`Context.obj`).  This is useful if that object\n    represents the state of a nested system.", "id": "f8342:m1"}
{"signature": "def password_option(*param_decls, **attrs):", "body": "def decorator(f):<EOL><INDENT>attrs.setdefault('<STR_LIT>', True)<EOL>attrs.setdefault('<STR_LIT>', True)<EOL>attrs.setdefault('<STR_LIT>', True)<EOL>return option(*(param_decls or ('<STR_LIT>',)), **attrs)(f)<EOL><DEDENT>return decorator<EOL>", "docstring": "Shortcut for password prompts.\n\n    This is equivalent to decorating a function with :func:`option` with\n    the following parameters::\n\n        @click.command()\n        @click.option('--password', prompt=True, confirmation_prompt=True,\n                      hide_input=True)\n        def changeadmin(password):\n            pass", "id": "f8342:m10"}
{"signature": "def write_text(self, text):", "body": "text_width = max(self.width - self.current_indent, <NUM_LIT:11>)<EOL>indent = '<STR_LIT:U+0020>' * self.current_indent<EOL>self.write(wrap_text(text, text_width,<EOL>initial_indent=indent,<EOL>subsequent_indent=indent,<EOL>preserve_paragraphs=True))<EOL>self.write('<STR_LIT:\\n>')<EOL>", "docstring": "Writes re-indented text into the buffer.  This rewraps and\n        preserves paragraphs.", "id": "f8344:c0:m7"}
{"signature": "def write_heading(self, heading):", "body": "self.write('<STR_LIT>' % (self.current_indent, '<STR_LIT>', heading))<EOL>", "docstring": "Writes a heading into the buffer.", "id": "f8344:c0:m5"}
{"signature": "def write(self, string):", "body": "self.buffer.append(string)<EOL>", "docstring": "Writes a unicode string into the internal buffer.", "id": "f8344:c0:m1"}
{"signature": "def dedent(self):", "body": "self.current_indent -= self.indent_increment<EOL>", "docstring": "Decreases the indentation.", "id": "f8344:c0:m3"}
{"signature": "def wrap_text(text, width=<NUM_LIT>, initial_indent='<STR_LIT>', subsequent_indent='<STR_LIT>',<EOL>preserve_paragraphs=False):", "body": "from ._textwrap import TextWrapper<EOL>text = text.expandtabs()<EOL>wrapper = TextWrapper(width, initial_indent=initial_indent,<EOL>subsequent_indent=subsequent_indent,<EOL>replace_whitespace=False)<EOL>if not preserve_paragraphs:<EOL><INDENT>return wrapper.fill(text)<EOL><DEDENT>p = []<EOL>buf = []<EOL>indent = None<EOL>def _flush_par():<EOL><INDENT>if not buf:<EOL><INDENT>return<EOL><DEDENT>if buf[<NUM_LIT:0>].strip() == '<STR_LIT>':<EOL><INDENT>p.append((indent or <NUM_LIT:0>, True, '<STR_LIT:\\n>'.join(buf[<NUM_LIT:1>:])))<EOL><DEDENT>else:<EOL><INDENT>p.append((indent or <NUM_LIT:0>, False, '<STR_LIT:U+0020>'.join(buf)))<EOL><DEDENT>del buf[:]<EOL><DEDENT>for line in text.splitlines():<EOL><INDENT>if not line:<EOL><INDENT>_flush_par()<EOL>indent = None<EOL><DEDENT>else:<EOL><INDENT>if indent is None:<EOL><INDENT>orig_len = term_len(line)<EOL>line = line.lstrip()<EOL>indent = orig_len - term_len(line)<EOL><DEDENT>buf.append(line)<EOL><DEDENT><DEDENT>_flush_par()<EOL>rv = []<EOL>for indent, raw, text in p:<EOL><INDENT>with wrapper.extra_indent('<STR_LIT:U+0020>' * indent):<EOL><INDENT>if raw:<EOL><INDENT>rv.append(wrapper.indent_only(text))<EOL><DEDENT>else:<EOL><INDENT>rv.append(wrapper.fill(text))<EOL><DEDENT><DEDENT><DEDENT>return '<STR_LIT>'.join(rv)<EOL>", "docstring": "A helper function that intelligently wraps text.  By default, it\n    assumes that it operates on a single paragraph of text but if the\n    `preserve_paragraphs` parameter is provided it will intelligently\n    handle paragraphs (defined by two empty lines).\n\n    If paragraphs are handled, a paragraph can be prefixed with an empty\n    line containing the ``\\\\b`` character (``\\\\x08``) to indicate that\n    no rewrapping should happen in that block.\n\n    :param text: the text that should be rewrapped.\n    :param width: the maximum width for the text.\n    :param initial_indent: the initial indent that should be placed on the\n                           first line as a string.\n    :param subsequent_indent: the indent string that should be placed on\n                              each consecutive line.\n    :param preserve_paragraphs: if this flag is set then the wrapping will\n                                intelligently handle paragraphs.", "id": "f8344:m2"}
{"signature": "@contextmanager<EOL><INDENT>def indentation(self):<DEDENT>", "body": "self.indent()<EOL>try:<EOL><INDENT>yield<EOL><DEDENT>finally:<EOL><INDENT>self.dedent()<EOL><DEDENT>", "docstring": "A context manager that increases the indentation.", "id": "f8344:c0:m10"}
{"signature": "def get_global_hook_path():", "body": "return unipath(os.environ.get('<STR_LIT>', '<STR_LIT>'), '<STR_LIT>')<EOL>", "docstring": "Returns the global hook path", "id": "f8346:m0"}
{"signature": "def run_global_hook(hook_name, *args):", "body": "hook_finder = HookFinder(get_global_hook_path())<EOL>hook = hook_finder(hook_name)<EOL>if hook:<EOL><INDENT>hook.run(*args)<EOL><DEDENT>", "docstring": "Attempt to run a global hook by name with args", "id": "f8346:m1"}
{"signature": "def cmd():", "body": "if platform == '<STR_LIT>':<EOL><INDENT>return ['<STR_LIT>', '<STR_LIT>']<EOL><DEDENT>elif platform == '<STR_LIT>':<EOL><INDENT>ppid = os.getppid()<EOL>ppid_cmdline_file = '<STR_LIT>'.format(ppid)<EOL>try:<EOL><INDENT>with open(ppid_cmdline_file) as f:<EOL><INDENT>cmd = f.read()<EOL><DEDENT>if cmd.endswith('<STR_LIT:\\x00>'):<EOL><INDENT>cmd = cmd[:-<NUM_LIT:1>]<EOL><DEDENT>cmd = cmd.split('<STR_LIT:\\x00>')<EOL>return cmd + [binpath('<STR_LIT>')]<EOL><DEDENT>except:<EOL><INDENT>cmd = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>cmd = '<STR_LIT>'<EOL><DEDENT>return [cmd, binpath('<STR_LIT>')]<EOL>", "docstring": "Return a command to launch a subshell", "id": "f8347:m1"}
{"signature": "def run(*args, **kwargs):", "body": "kwargs.setdefault('<STR_LIT>', os.environ)<EOL>kwargs.setdefault('<STR_LIT>', True)<EOL>try:<EOL><INDENT>subprocess.check_call('<STR_LIT:U+0020>'.join(args), **kwargs)<EOL>return True<EOL><DEDENT>except subprocess.CalledProcessError:<EOL><INDENT>logger.debug('<STR_LIT>'.format(args))<EOL>return False<EOL><DEDENT>", "docstring": "Returns True if successful, False if failure", "id": "f8347:m0"}
{"signature": "def launch(prompt_prefix=None):", "body": "if prompt_prefix:<EOL><INDENT>os.environ['<STR_LIT>'] = prompt(prompt_prefix)<EOL><DEDENT>subprocess.call(cmd(), env=os.environ.data)<EOL>", "docstring": "Launch a subshell", "id": "f8347:m3"}
{"signature": "def find_repos(self, depth=<NUM_LIT:10>):", "body": "repos = []<EOL>for root, subdirs, files in walk_dn(self.root, depth=depth):<EOL><INDENT>if '<STR_LIT>' in root:<EOL><INDENT>continue<EOL><DEDENT>if '<STR_LIT>' in subdirs:<EOL><INDENT>repos.append(root)<EOL><DEDENT><DEDENT>return repos<EOL>", "docstring": "Get all git repositories within this environment", "id": "f8349:c0:m1"}
{"signature": "def clone(self, repo_path, destination, branch=None):", "body": "logger.debug('<STR_LIT>' + repo_path)<EOL>if not destination.startswith(self.env_path):<EOL><INDENT>destination = unipath(self.env_path, destination)<EOL><DEDENT>if branch:<EOL><INDENT>return shell.run('<STR_LIT>', '<STR_LIT>', repo_path, '<STR_LIT>', branch,<EOL>'<STR_LIT>', '<STR_LIT>', destination)<EOL><DEDENT>return shell.run('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', repo_path, destination)<EOL>", "docstring": "Clone a repository to a destination relative to envrionment root", "id": "f8349:c0:m2"}
{"signature": "def upgrade(self, package):", "body": "logger.debug('<STR_LIT>' + package)<EOL>shell.run(self.pip_path, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', package)<EOL>shell.run(self.pip_path, '<STR_LIT>', package)<EOL>", "docstring": "Update a python package using pip", "id": "f8349:c1:m3"}
{"signature": "def preprocess_dict(d):", "body": "out_env = {}<EOL>for k, v in d.items():<EOL><INDENT>if not type(v) in PREPROCESSORS:<EOL><INDENT>raise KeyError('<STR_LIT>'.format(type(v)))<EOL><DEDENT>out_env[k] = PREPROCESSORS[type(v)](v)<EOL><DEDENT>return out_env<EOL>", "docstring": "Preprocess a dict to be used as environment variables.\n\n:param d: dict to be processed", "id": "f8350:m18"}
{"signature": "def is_environment(path):", "body": "return os.path.exists(unipath(path, '<STR_LIT>'))<EOL>", "docstring": "Returns True if path refers to an environment", "id": "f8350:m2"}
{"signature": "def walk_dn(start_dir, depth=<NUM_LIT:10>):", "body": "start_depth = len(os.path.split(start_dir))<EOL>end_depth = start_depth + depth<EOL>for root, subdirs, files in os.walk(start_dir):<EOL><INDENT>yield root, subdirs, files<EOL>if len(os.path.split(root)) >= end_depth:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Walk down a directory tree. Same as os.walk but allows for a depth limit\nvia depth argument", "id": "f8350:m11"}
{"signature": "def _join_dict(d, k, v):", "body": "d[k] = v[platform]<EOL>", "docstring": "Add a dict value to an env dict", "id": "f8350:m19"}
{"signature": "def env_to_dict(env, pathsep=os.pathsep):", "body": "out_dict = {}<EOL>for k, v in env.iteritems():<EOL><INDENT>if pathsep in v:<EOL><INDENT>out_dict[k] = v.split(pathsep)<EOL><DEDENT>else:<EOL><INDENT>out_dict[k] = v<EOL><DEDENT><DEDENT>return out_dict<EOL>", "docstring": "Convert a dict containing environment variables into a standard dict.\nVariables containing multiple values will be split into a list based on\nthe argument passed to pathsep.\n\n:param env: Environment dict like os.environ.data\n:param pathsep: Path separator used to split variables", "id": "f8350:m23"}
{"signature": "def restore_env(env_dict):", "body": "if hasattr(sys, '<STR_LIT>'):<EOL><INDENT>sys.prefix = sys.real_prefix<EOL>del(sys.real_prefix)<EOL><DEDENT>replace_osenviron(expand_envvars(dict_to_env(env_dict)))<EOL>", "docstring": "Set environment variables in the current python process from a dict\n    containing envvars and values.", "id": "f8350:m28"}
{"signature": "def set_env_from_file(env_file):", "body": "with open(env_file, '<STR_LIT:r>') as f:<EOL><INDENT>env_dict = yaml.load(f.read())<EOL><DEDENT>if '<STR_LIT>' in env_dict:<EOL><INDENT>env_dict = env_dict['<STR_LIT>']<EOL><DEDENT>set_env(env_dict)<EOL>", "docstring": "Restore the current environment from an environment stored in a yaml\n    yaml file.\n\n    :param env_file: Path to environment yaml file.", "id": "f8350:m31"}
{"signature": "def is_home_environment(path):", "body": "home = unipath(os.environ.get('<STR_LIT>', '<STR_LIT>'))<EOL>path = unipath(path)<EOL>return path.startswith(home)<EOL>", "docstring": "Returns True if path is in CPENV_HOME", "id": "f8350:m1"}
{"signature": "def is_system_path(path):", "body": "return '<STR_LIT:\\\\>' in path or '<STR_LIT:/>' in path<EOL>", "docstring": "Returns True if path is a system path", "id": "f8350:m4"}
{"signature": "def is_redirecting(path):", "body": "candidate = unipath(path, '<STR_LIT>')<EOL>return os.path.exists(candidate) and os.path.isfile(candidate)<EOL>", "docstring": "Returns True if path contains a .cpenv file", "id": "f8350:m5"}
{"signature": "def unipath(*paths):", "body": "return os.path.normpath(expandpath(os.path.join(*paths)))<EOL>", "docstring": "Like os.path.join but also expands and normalizes path parts.", "id": "f8350:m8"}
{"signature": "def walk_up(start_dir, depth=<NUM_LIT:20>):", "body": "root = start_dir<EOL>for i in xrange(depth):<EOL><INDENT>contents = os.listdir(root)<EOL>subdirs, files = [], []<EOL>for f in contents:<EOL><INDENT>if os.path.isdir(os.path.join(root, f)):<EOL><INDENT>subdirs.append(f)<EOL><DEDENT>else:<EOL><INDENT>files.append(f)<EOL><DEDENT><DEDENT>yield root, subdirs, files<EOL>parent = os.path.dirname(root)<EOL>if parent and not parent == root:<EOL><INDENT>root = parent<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Walk up a directory tree", "id": "f8350:m13"}
{"signature": "def store_env(path=None):", "body": "path = path or get_store_env_tmp()<EOL>env_dict = yaml.safe_dump(os.environ.data, default_flow_style=False)<EOL>with open(path, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(env_dict)<EOL><DEDENT>return path<EOL>", "docstring": "Encode current environment as yaml and store in path or a temporary\n    file. Return the path to the stored environment.", "id": "f8350:m27"}
{"signature": "def join_dicts(*dicts):", "body": "out_dict = {}<EOL>for d in dicts:<EOL><INDENT>for k, v in d.iteritems():<EOL><INDENT>if not type(v) in JOINERS:<EOL><INDENT>raise KeyError('<STR_LIT>'.format(type(v)))<EOL><DEDENT>JOINERS[type(v)](out_dict, k, v)<EOL><DEDENT><DEDENT>return out_dict<EOL>", "docstring": "Join a bunch of dicts", "id": "f8350:m22"}
{"signature": "def touch(filepath):", "body": "with open(filepath, '<STR_LIT:a>'):<EOL><INDENT>os.utime(filepath, None)<EOL><DEDENT>", "docstring": "Touch the given filepath", "id": "f8350:m14"}
{"signature": "def redirect_to_env_paths(path):", "body": "with open(path, '<STR_LIT:r>') as f:<EOL><INDENT>redirected = f.read()<EOL><DEDENT>return shlex.split(redirected)<EOL>", "docstring": "Get environment path from redirect file", "id": "f8350:m6"}
{"signature": "def dict_to_env(d, pathsep=os.pathsep):", "body": "out_env = {}<EOL>for k, v in d.iteritems():<EOL><INDENT>if isinstance(v, list):<EOL><INDENT>out_env[k] = pathsep.join(v)<EOL><DEDENT>elif isinstance(v, string_types):<EOL><INDENT>out_env[k] = v<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>'.format(type(v)))<EOL><DEDENT><DEDENT>return out_env<EOL>", "docstring": "Convert a python dict to a dict containing valid environment variable\nvalues.\n\n:param d: Dict to convert to an env dict\n:param pathsep: Path separator used to join lists(default os.pathsep)", "id": "f8350:m24"}
{"signature": "def get_store_env_tmp():", "body": "tempdir = tempfile.gettempdir()<EOL>temp_name = '<STR_LIT>'<EOL>temp_path = unipath(tempdir, temp_name.format(random.getrandbits(<NUM_LIT:9>)))<EOL>if not os.path.exists(temp_path):<EOL><INDENT>return temp_path<EOL><DEDENT>else:<EOL><INDENT>return get_store_env_tmp()<EOL><DEDENT>", "docstring": "Returns an unused random filepath.", "id": "f8350:m26"}
{"signature": "def restore_env_from_file(env_file):", "body": "with open(env_file, '<STR_LIT:r>') as f:<EOL><INDENT>env_dict = yaml.load(f.read())<EOL><DEDENT>restore_env(env_dict)<EOL>", "docstring": "Restore the current environment from an environment stored in a yaml\n    yaml file.\n\n    :param env_file: Path to environment yaml file.", "id": "f8350:m29"}
{"signature": "@property<EOL><INDENT>def bin_path(self):<DEDENT>", "body": "if platform == '<STR_LIT>':<EOL><INDENT>return unipath(self.path, '<STR_LIT>')<EOL><DEDENT>return unipath(self.path, '<STR_LIT>')<EOL>", "docstring": "Path to environments bin", "id": "f8351:c1:m7"}
{"signature": "@property<EOL><INDENT>def command(self):<DEDENT>", "body": "cmd = self.config.get('<STR_LIT>', None)<EOL>if cmd is None:<EOL><INDENT>return<EOL><DEDENT>cmd = cmd[platform]<EOL>return cmd['<STR_LIT:path>'], cmd['<STR_LIT:args>']<EOL>", "docstring": "Command used to launch this application module", "id": "f8351:c2:m2"}
{"signature": "def _activate(self):", "body": "old_syspath = set(sys.path)<EOL>site.addsitedir(self.site_path)<EOL>site.addsitedir(self.bin_path)<EOL>new_syspaths = set(sys.path) - old_syspath<EOL>for path in new_syspaths:<EOL><INDENT>sys.path.remove(path)<EOL>sys.path.insert(<NUM_LIT:1>, path)<EOL><DEDENT>if not hasattr(sys, '<STR_LIT>'):<EOL><INDENT>sys.real_prefix = sys.prefix<EOL><DEDENT>sys.prefix = self.path<EOL>", "docstring": "Do some serious mangling to the current python environment...\nThis is necessary to activate an environment via python.", "id": "f8351:c1:m9"}
{"signature": "def create(name_or_path=None, config=None):", "body": "<EOL>if utils.is_system_path(name_or_path):<EOL><INDENT>path = unipath(name_or_path)<EOL><DEDENT>else:<EOL><INDENT>path = unipath(get_home_path(), name_or_path)<EOL><DEDENT>if os.path.exists(path):<EOL><INDENT>raise OSError('<STR_LIT>'.format(path))<EOL><DEDENT>env = VirtualEnvironment(path)<EOL>utils.ensure_path_exists(env.path)<EOL>if config:<EOL><INDENT>if utils.is_git_repo(config):<EOL><INDENT>Git('<STR_LIT>').clone(config, env.path)<EOL><DEDENT>else:<EOL><INDENT>shutil.copy2(config, env.config_path)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with open(env.config_path, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(defaults.environment_config)<EOL><DEDENT><DEDENT>utils.ensure_path_exists(env.hook_path)<EOL>utils.ensure_path_exists(env.modules_path)<EOL>env.run_hook('<STR_LIT>')<EOL>virtualenv.create_environment(env.path)<EOL>if not utils.is_home_environment(env.path):<EOL><INDENT>EnvironmentCache.add(env)<EOL>EnvironmentCache.save()<EOL><DEDENT>try:<EOL><INDENT>env.update()<EOL><DEDENT>except:<EOL><INDENT>utils.rmtree(path)<EOL>logger.debug('<STR_LIT>')<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>env.run_hook('<STR_LIT>')<EOL><DEDENT>return env<EOL>", "docstring": "Create a virtual environment. You can pass either the name of a new\n    environment to create in your CPENV_HOME directory OR specify a full path\n    to create an environment outisde your CPENV_HOME.\n\n    Create an environment in CPENV_HOME::\n\n        >>> cpenv.create('myenv')\n\n    Create an environment elsewhere::\n\n        >>> cpenv.create('~/custom_location/myenv')\n\n    :param name_or_path: Name or full path of environment\n    :param config: Environment configuration including dependencies etc...", "id": "f8352:m0"}
{"signature": "def get_home_path():", "body": "home = unipath(os.environ.get('<STR_LIT>', '<STR_LIT>'))<EOL>home_modules = unipath(home, '<STR_LIT>')<EOL>if not os.path.exists(home):<EOL><INDENT>os.makedirs(home)<EOL><DEDENT>if not os.path.exists(home_modules):<EOL><INDENT>os.makedirs(home_modules)<EOL><DEDENT>return home<EOL>", "docstring": ":returns: your home path...CPENV_HOME env var OR ~/.cpenv", "id": "f8352:m6"}
{"signature": "def deactivate():", "body": "if '<STR_LIT>' not in os.environ or '<STR_LIT>' not in os.environ:<EOL><INDENT>raise EnvironmentError('<STR_LIT>')<EOL><DEDENT>utils.restore_env_from_file(os.environ['<STR_LIT>'])<EOL>", "docstring": "Deactivates an environment by restoring all env vars to a clean state\n    stored prior to activating environments", "id": "f8352:m5"}
{"signature": "def get_active_env():", "body": "active = os.environ.get('<STR_LIT>', None)<EOL>if active:<EOL><INDENT>return VirtualEnvironment(active)<EOL><DEDENT>", "docstring": ":returns: the active environment as a :class:`VirtualEnvironment`\n    instance or None if one is not active.", "id": "f8352:m8"}
{"signature": "def get_modules():", "body": "modules = set()<EOL>cwd = os.getcwd()<EOL>for d in os.listdir(cwd):<EOL><INDENT>if d == '<STR_LIT>':<EOL><INDENT>modules.add(Module(cwd))<EOL><DEDENT>path = unipath(cwd, d)<EOL>if utils.is_module(path):<EOL><INDENT>modules.add(Module(cwd))<EOL><DEDENT><DEDENT>module_paths = get_module_paths()<EOL>for module_path in module_paths:<EOL><INDENT>for d in os.listdir(module_path):<EOL><INDENT>path = unipath(module_path, d)<EOL>if utils.is_module(path):<EOL><INDENT>modules.add(Module(path))<EOL><DEDENT><DEDENT><DEDENT>return sorted(list(modules), key=lambda x: x.name)<EOL>", "docstring": "Returns a list of available modules.", "id": "f8352:m11"}
{"signature": "def get_environment(name_or_path):", "body": "r = resolve(name_or_path)<EOL>return r.resolved[<NUM_LIT:0>]<EOL>", "docstring": "Get a :class:`VirtualEnvironment` by name or path.", "id": "f8352:m9"}
{"signature": "def get_info(pyfile):", "body": "info = {}<EOL>info_re = re.compile(r\"<STR_LIT>\")<EOL>with open(pyfile, '<STR_LIT:r>') as f:<EOL><INDENT>for line in f.readlines():<EOL><INDENT>match = info_re.search(line)<EOL>if match:<EOL><INDENT>info[match.group(<NUM_LIT:1>)] = match.group(<NUM_LIT:2>)<EOL><DEDENT><DEDENT><DEDENT>return info<EOL>", "docstring": "Retrieve dunder values from a pyfile", "id": "f8353:m0"}
{"signature": "def get_community_by_name(self, name, token=None):", "body": "parameters = dict()<EOL>parameters['<STR_LIT:name>'] = name<EOL>if token:<EOL><INDENT>parameters['<STR_LIT>'] = token<EOL><DEDENT>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Get a community based on its name.\n\n:param name: The name of the target community.\n:type name: string\n:param token: (optional) A valid token for the user in question.\n:type token: None | string\n:returns: The requested community.\n:rtype: dict", "id": "f8357:c1:m10"}
{"signature": "def search(self, search, token=None):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = search<EOL>if token:<EOL><INDENT>parameters['<STR_LIT>'] = token<EOL><DEDENT>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Get the resources corresponding to a given query.\n\n:param search: The search criterion.\n:type search: string\n:param token: (optional) The credentials to use when searching.\n:type token: None | string\n:returns: Dictionary containing the search result. Notable is the\n    dictionary item 'results', which is a list of item details.\n:rtype: dict", "id": "f8357:c1:m33"}
{"signature": "def delete_item(self, token, item_id):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = token<EOL>parameters['<STR_LIT:id>'] = item_id<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Delete the item with the passed in item_id.\n\n:param token: A valid token for the user in question.\n:type token: string\n:param item_id: The id of the item to be deleted.\n:type item_id: int | long\n:returns: None.\n:rtype: None", "id": "f8357:c1:m22"}
{"signature": "def create_small_thumbnail(self, token, item_id):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = token<EOL>parameters['<STR_LIT>'] = item_id<EOL>response = self.request(<EOL>'<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Create a 100x100 small thumbnail for the given item. It is used for\npreview purpose and displayed in the 'preview' and 'thumbnails'\nsidebar sections.\n\n:param token: A valid token for the user in question.\n:type token: string\n:param item_id: The item on which to set the thumbnail.\n:type item_id: int | long\n:returns: The item object (with the new thumbnail id) and the path\n    where the newly created thumbnail is stored.\n:rtype: dict", "id": "f8357:c5:m1"}
{"signature": "def search_item_by_name(self, name, token=None):", "body": "parameters = dict()<EOL>parameters['<STR_LIT:name>'] = name<EOL>if token:<EOL><INDENT>parameters['<STR_LIT>'] = token<EOL><DEDENT>response = self.request('<STR_LIT>', parameters)<EOL>return response['<STR_LIT>']<EOL>", "docstring": "Return all items.\n\n:param name: The name of the item to search by.\n:type name: string\n:param token: (optional) A valid token for the user in question.\n:type token: None | string\n:returns: A list of all items with the given name.\n:rtype: list[dict]", "id": "f8357:c1:m27"}
{"signature": "def list_modules(self):", "body": "response = self.request('<STR_LIT>')<EOL>return response['<STR_LIT>']<EOL>", "docstring": "List the enabled modules on the server.\n\n:returns: List of names of the enabled modules.\n:rtype: list[string]", "id": "f8357:c1:m2"}
{"signature": "def list_communities(self, token=None):", "body": "parameters = dict()<EOL>if token:<EOL><INDENT>parameters['<STR_LIT>'] = token<EOL><DEDENT>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "List all communities visible to a user.\n\n:param token: (optional) A valid token for the user in question.\n:type token: None | string\n:returns: The list of communities.\n:rtype: list[dict]", "id": "f8357:c1:m13"}
{"signature": "def get_default_api_key(self, email, password):", "body": "parameters = dict()<EOL>parameters['<STR_LIT:email>'] = email<EOL>parameters['<STR_LIT:password>'] = password<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response['<STR_LIT>']<EOL>", "docstring": "Get the default API key for a user.\n\n:param email: The email of the user.\n:type email: string\n:param password: The user's password.\n:type password: string\n:returns: API key to confirm that it was fetched successfully.\n:rtype: string", "id": "f8357:c1:m4"}
{"signature": "def get_server_version(self):", "body": "response = self.request('<STR_LIT>')<EOL>return response['<STR_LIT:version>']<EOL>", "docstring": "Get the version from the server.\n\n:returns: version code from the server\n:rtype: string", "id": "f8357:c1:m0"}
{"signature": "def get_user_by_id(self, user_id):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = user_id<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Get a user by the first and last name of that user.\n\n:param user_id: The id of the desired user.\n:type user_id: int | long\n:returns: The user requested.\n:rtype: dict", "id": "f8357:c1:m7"}
{"signature": "def add_condor_job(self, token, batchmaketaskid, jobdefinitionfilename,<EOL>outputfilename, errorfilename, logfilename,<EOL>postfilename):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = token<EOL>parameters['<STR_LIT>'] = batchmaketaskid<EOL>parameters['<STR_LIT>'] = jobdefinitionfilename<EOL>parameters['<STR_LIT>'] = outputfilename<EOL>parameters['<STR_LIT>'] = errorfilename<EOL>parameters['<STR_LIT>'] = logfilename<EOL>parameters['<STR_LIT>'] = postfilename<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Add a Condor DAG job to the Condor DAG associated with this\nBatchmake task\n\n:param token: A valid token for the user in question.\n:type token: string\n:param batchmaketaskid: id of the Batchmake task for this DAG\n:type batchmaketaskid: int | long\n:param jobdefinitionfilename: Filename of the definition file for the\n    job\n:type jobdefinitionfilename: string\n:param outputfilename: Filename of the output file for the job\n:type outputfilename: string\n:param errorfilename: Filename of the error file for the job\n:type errorfilename: string\n:param logfilename: Filename of the log file for the job\n:type logfilename: string\n:param postfilename: Filename of the post script log file for the job\n:type postfilename: string\n:return: The created Condor job DAO.\n:rtype: dict", "id": "f8357:c2:m1"}
{"signature": "def item_get(self, token, item_id):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = token<EOL>parameters['<STR_LIT:id>'] = item_id<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Get the attributes of the specified item.\n\n:param token: A valid token for the user in question.\n:type token: string\n:param item_id: The id of the requested item.\n:type item_id: int | string\n:returns: Dictionary of the item attributes.\n:rtype: dict", "id": "f8357:c1:m20"}
{"signature": "def solr_advanced_search(self, query, token=None, limit=<NUM_LIT:20>):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = query<EOL>parameters['<STR_LIT>'] = limit<EOL>if token:<EOL><INDENT>parameters['<STR_LIT>'] = token<EOL><DEDENT>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Search item metadata using Apache Solr.\n\n:param query: The Apache Lucene search query.\n:type query: string\n:param token: (optional) A valid token for the user in question.\n:type token: None | string\n:param limit: (optional) The limit of the search.\n:type limit: int | long\n:returns: The list of items that match the search query.\n:rtype: list[dict]", "id": "f8357:c6:m0"}
{"signature": "def add_condor_dag(self, token, batchmaketaskid, dagfilename,<EOL>dagmanoutfilename):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = token<EOL>parameters['<STR_LIT>'] = batchmaketaskid<EOL>parameters['<STR_LIT>'] = dagfilename<EOL>parameters['<STR_LIT>'] = dagmanoutfilename<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Add a Condor DAG to the given Batchmake task.\n\n:param token: A valid token for the user in question.\n:type token: string\n:param batchmaketaskid: id of the Batchmake task for this DAG\n:type batchmaketaskid: int | long\n:param dagfilename: Filename of the DAG file\n:type dagfilename: string\n:param dagmanoutfilename: Filename of the DAG processing output\n:type dagmanoutfilename: string\n:returns: The created Condor DAG DAO\n:rtype: dict", "id": "f8357:c2:m0"}
{"signature": "@property<EOL><INDENT>def debug(self):<DEDENT>", "body": "return self._debug<EOL>", "docstring": "Return the debug state of this driver.\n\n:returns: debug state\n:rtype: bool", "id": "f8357:c0:m4"}
{"signature": "def extract_dicommetadata(self, token, item_id):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = token<EOL>parameters['<STR_LIT>'] = item_id<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Extract DICOM metadata from the given item\n\n:param token: A valid token for the user in question.\n:type token: string\n:param item_id: id of the item to be extracted\n:type item_id: int | long\n:return: the item revision DAO\n:rtype: dict", "id": "f8357:c3:m0"}
{"signature": "def folder_children(self, token, folder_id):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = token<EOL>parameters['<STR_LIT:id>'] = folder_id<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Get the non-recursive children of the passed in folder_id.\n\n:param token: A valid token for the user in question.\n:type token: string\n:param folder_id: The id of the requested folder.\n:type folder_id: int | long\n:returns: Dictionary of two lists: 'folders' and 'items'.\n:rtype: dict[string, list]", "id": "f8357:c1:m16"}
{"signature": "def create_folder(self, token, name, parent_id, **kwargs):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = token<EOL>parameters['<STR_LIT:name>'] = name<EOL>parameters['<STR_LIT>'] = parent_id<EOL>parameters['<STR_LIT:description>'] = '<STR_LIT>'<EOL>optional_keys = ['<STR_LIT:description>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>for key in optional_keys:<EOL><INDENT>if key in kwargs:<EOL><INDENT>if key == '<STR_LIT>':<EOL><INDENT>if kwargs[key]:<EOL><INDENT>parameters['<STR_LIT>'] = kwargs[key]<EOL><DEDENT>continue<EOL><DEDENT>parameters[key] = kwargs[key]<EOL><DEDENT><DEDENT>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Create a folder at the destination specified.\n\n:param token: A valid token for the user in question.\n:type token: string\n:param name: The name of the folder to be created.\n:type name: string\n:param parent_id: The id of the targeted parent folder.\n:type parent_id: int | long\n:param description: (optional) The description text of the folder.\n:type description: string\n:param uuid: (optional) The UUID for the folder. It will be generated\n    if not given.\n:type uuid: string\n:param privacy: (optional) The privacy state of the folder\n    ('Public' or 'Private').\n:param reuse_existing: (optional) If true, will just return the\n    existing folder if there is one with the name provided.\n:type reuse_existing: bool\n:returns: Dictionary containing the details of the created folder.\n:rtype: dict", "id": "f8357:c1:m14"}
{"signature": "def __init__(self, url=\"<STR_LIT>\"):", "body": "self._api_suffix = '<STR_LIT>'<EOL>self._url = url<EOL>self._debug = False<EOL>self._verify_ssl_certificate = True<EOL>self.auth = None<EOL>", "docstring": "Constructor.\n\n:param url: (optional) URL of the server\n:type url: string", "id": "f8357:c0:m0"}
{"signature": "def create_item(self, token, name, parent_id, **kwargs):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = token<EOL>parameters['<STR_LIT:name>'] = name<EOL>parameters['<STR_LIT>'] = parent_id<EOL>optional_keys = ['<STR_LIT:description>', '<STR_LIT>', '<STR_LIT>']<EOL>for key in optional_keys:<EOL><INDENT>if key in kwargs:<EOL><INDENT>parameters[key] = kwargs[key]<EOL><DEDENT><DEDENT>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "Create an item to the server.\n\n:param token: A valid token for the user in question.\n:type token: string\n:param name: The name of the item to be created.\n:type name: string\n:param parent_id: The id of the destination folder.\n:type parent_id: int | long\n:param description: (optional) The description text of the item.\n:type description: string\n:param uuid: (optional) The UUID for the item. It will be generated if\n    not given.\n:type uuid: string\n:param privacy: (optional) The privacy state of the item\n    ('Public' or 'Private').\n:type privacy: string\n:returns: Dictionary containing the details of the created item.\n:rtype: dict", "id": "f8357:c1:m19"}
{"signature": "def mfa_otp_login(self, temp_token, one_time_pass):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = temp_token<EOL>parameters['<STR_LIT>'] = one_time_pass<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response['<STR_LIT>']<EOL>", "docstring": "Log in to get the real token using the temporary token and otp.\n\n:param temp_token: The temporary token or id returned from normal login\n:type temp_token: string\n:param one_time_pass: The one-time pass to be sent to the underlying\n    multi-factor engine.\n:type one_time_pass: string\n:returns: A standard token for interacting with the web api.\n:rtype: string", "id": "f8357:c4:m0"}
{"signature": "def create_submission(self, token, **kwargs):", "body": "parameters = {}<EOL>parameters['<STR_LIT>'] = token<EOL>optional_keys = ['<STR_LIT>', '<STR_LIT:name>']<EOL>for key in optional_keys:<EOL><INDENT>if key in kwargs:<EOL><INDENT>parameters[key] = kwargs[key]<EOL><DEDENT><DEDENT>return self.request('<STR_LIT>', parameters)<EOL>", "docstring": "Associate a result item with a particular scalar value.\n\n:param token: A valid token for the user in question.\n:type token: string\n:param uuid (optional) The uuid of the submission (must be unique)\n:type uuid: string\n:param name (optional) The name of the submission\n:type name: string\n:returns: The submission object that was created.\n:rtype: dict", "id": "f8357:c7:m1"}
{"signature": "def search_item_by_name_and_folder_name(self, name, folder_name,<EOL>token=None):", "body": "parameters = dict()<EOL>parameters['<STR_LIT:name>'] = name<EOL>parameters['<STR_LIT>'] = folder_name<EOL>if token:<EOL><INDENT>parameters['<STR_LIT>'] = token<EOL><DEDENT>response = self.request('<STR_LIT>',<EOL>parameters)<EOL>return response['<STR_LIT>']<EOL>", "docstring": "Return all items with a given name and parent folder name.\n\n:param name: The name of the item to search by.\n:type name: string\n:param folder_name: The name of the parent folder to search by.\n:type folder_name: string\n:param token: (optional) A valid token for the user in question.\n:type token: None | string\n:returns: A list of all items with the given name and parent folder\n    name.\n:rtype: list[dict]", "id": "f8357:c1:m29"}
{"signature": "def list_users(self, limit=<NUM_LIT:20>):", "body": "parameters = dict()<EOL>parameters['<STR_LIT>'] = limit<EOL>response = self.request('<STR_LIT>', parameters)<EOL>return response<EOL>", "docstring": "List the public users in the system.\n\n:param limit: (optional) The number of users to fetch.\n:type limit: int | long\n:returns: The list of users.\n:rtype: list[dict]", "id": "f8357:c1:m5"}
{"signature": "@property<EOL><INDENT>def url(self):<DEDENT>", "body": "return self._url<EOL>", "docstring": "Return the URL of the server.\n\n:returns: URL of the server\n:rtype: string", "id": "f8357:c0:m1"}
{"signature": "def set_auth(self, value):", "body": "for driver in self.drivers:<EOL><INDENT>driver.auth = value<EOL><DEDENT>", "docstring": "Set the authentication in all drivers attached to this communicator.\n\n:param value: authentication tuple to be passed to requests.request()\n:type value: None | tuple", "id": "f8358:c0:m9"}
{"signature": "@property<EOL><INDENT>def url(self):<DEDENT>", "body": "if len(self.drivers) > <NUM_LIT:0>:<EOL><INDENT>return self.drivers[<NUM_LIT:0>].url<EOL><DEDENT>else:<EOL><INDENT>return self._url<EOL><DEDENT>", "docstring": "Return the URL of the server.\n\n:returns: URL of the server\n:rtype: string", "id": "f8358:c0:m3"}
{"signature": "def __getattr__(self, name):", "body": "for driver in self.drivers:<EOL><INDENT>if hasattr(driver, name):<EOL><INDENT>return getattr(driver, name)<EOL><DEDENT><DEDENT>raise AttributeError('<STR_LIT>'<EOL>.format(type(self).__name__, name))<EOL>", "docstring": "Called when a function does not exist in the class. Pass the call down\nto one of the registered drivers.\n\n:raises AttributeError: if there is no function with the given name in\n    any of the drivers", "id": "f8358:c0:m1"}
{"signature": "def __init__(self, url, drivers=None):", "body": "if drivers is None:<EOL><INDENT>self._drivers = []<EOL>import inspect<EOL>base_driver_class = pydas.drivers.BaseDriver<EOL>for name, obj in inspect.getmembers(pydas.drivers):<EOL><INDENT>if inspect.isclass(obj):<EOL><INDENT>class_hierarchy = inspect.getmro(obj)<EOL>if base_driver_class in class_hierarchy andobj != base_driver_class:<EOL><INDENT>instance = obj(url)<EOL>self._drivers.append(instance)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self._drivers = drivers<EOL><DEDENT>self._url = url<EOL>", "docstring": "Constructor. Takes the URL of the Midas Server instance and an optional\nlist of drivers to use.\n\n:param url: URL of the server\n:type url: string\n:param drivers: (optional) list of drivers to be attached to this\n    communicator\n:type drivers: None | list [T <= pydas.drivers.BaseDriver]", "id": "f8358:c0:m0"}
{"signature": "@url.setter<EOL><INDENT>def url(self, value):<DEDENT>", "body": "for driver in self.drivers:<EOL><INDENT>driver.url = value<EOL><DEDENT>", "docstring": "Set the URL of the server in all drivers attached to this communicator.\n\n:param value: URL of the server\n:type value: string", "id": "f8358:c0:m4"}
{"signature": "@debug.setter<EOL><INDENT>def debug(self, value):<DEDENT>", "body": "for driver in self.drivers:<EOL><INDENT>driver.debug = value<EOL><DEDENT>", "docstring": "Set the debug state of all of drivers attached to this communicator.\n\n:param value: debug state of all drivers\n:type value: bool", "id": "f8358:c0:m6"}
{"signature": "@property<EOL><INDENT>def debug(self):<DEDENT>", "body": "return all(driver.debug for driver in self.drivers)<EOL>", "docstring": "Return whether the debug state of every driver is True.\n\n:returns: True if the debug state of every driver is True\n:rtype: bool", "id": "f8358:c0:m5"}
{"signature": "def _create_bitstream(file_path, local_file, item_id, log_ind=None):", "body": "checksum = _streaming_file_md5(file_path)<EOL>upload_token = session.communicator.generate_upload_token(<EOL>session.token, item_id, local_file, checksum)<EOL>if upload_token != '<STR_LIT>':<EOL><INDENT>log_trace = '<STR_LIT>'.format(file_path)<EOL>session.communicator.perform_upload(<EOL>upload_token, local_file, filepath=file_path, itemid=item_id)<EOL><DEDENT>else:<EOL><INDENT>log_trace = '<STR_LIT>''<STR_LIT>'.format(file_path)<EOL><DEDENT>if log_ind is not None:<EOL><INDENT>log_trace += log_ind<EOL><DEDENT>print(log_trace)<EOL>", "docstring": "Create a bitstream in the given item.\n\n:param file_path: full path to the local file\n:type file_path: string\n:param local_file: name of the local file\n:type local_file: string\n:param log_ind: (optional) any additional message to log upon creation of\n    the bitstream\n:type log_ind: None | string", "id": "f8359:m10"}
{"signature": "def add_item_download_callback(callback):", "body": "session.item_download_callbacks.append(callback)<EOL>", "docstring": "Pass a function to be called when an item has finished downloading.\nThis can be used for performing notifications of download progress or\ncalling additional API functions.\n\n:param callback: A function that takes four arguments. The first argument\n    is the communicator object of the current pydas context, the second is\n    the currently active API token and the third is the dict of item info,\n    the fourth argument is the local download path of the item.\n:type callback: (Communicator, string, dict, string) -> unknown", "id": "f8359:m4"}
{"signature": "def _create_or_reuse_folder(local_folder, parent_folder_id,<EOL>reuse_existing=False):", "body": "local_folder_name = os.path.basename(local_folder)<EOL>folder_id = None<EOL>if reuse_existing:<EOL><INDENT>children = session.communicator.folder_children(<EOL>session.token, parent_folder_id)<EOL>folders = children['<STR_LIT>']<EOL>for folder in folders:<EOL><INDENT>if folder['<STR_LIT:name>'] == local_folder_name:<EOL><INDENT>folder_id = folder['<STR_LIT>']<EOL>break<EOL><DEDENT><DEDENT><DEDENT>if folder_id is None:<EOL><INDENT>new_folder = session.communicator.create_folder(session.token,<EOL>local_folder_name,<EOL>parent_folder_id)<EOL>folder_id = new_folder['<STR_LIT>']<EOL><DEDENT>return folder_id<EOL>", "docstring": "Create a folder from the local file in the midas folder corresponding to\nthe parent folder id.\n\n:param local_folder: full path to a directory on the local file system\n:type local_folder: string\n:param parent_folder_id: id of parent folder on the Midas Server instance,\n    where the folder will be added\n:type parent_folder_id: int | long\n:param reuse_existing: (optional) whether to accept an existing folder of\n   the same name in the same location, or create a new one instead\n:type reuse_existing: bool", "id": "f8359:m8"}
{"signature": "def add_folder_download_callback(callback):", "body": "session.folder_download_callbacks.append(callback)<EOL>", "docstring": "Pass a function to be called when an folder has finished downloading,\nwhich happens after all of its items and recursive children folders\nhave downloaded.\nThis can be used for performing notifications of download progress or\ncalling additional API functions.\n\n:param callback: A function that takes four arguments. The first argument\n    is the communicator object of the current pydas context, the second is\n    the currently active API token, the third is the dict of folder info,\n    the fourth argument is the local download path of the folder.\n:type callback: (Communicator, string, dict, string) -> unknown", "id": "f8359:m5"}
{"signature": "def _descend_folder_for_id(parsed_path, folder_id):", "body": "if len(parsed_path) == <NUM_LIT:0>:<EOL><INDENT>return folder_id<EOL><DEDENT>session.token = verify_credentials()<EOL>base_folder = session.communicator.folder_get(session.token,<EOL>folder_id)<EOL>cur_folder_id = -<NUM_LIT:1><EOL>for path_part in parsed_path:<EOL><INDENT>cur_folder_id = base_folder['<STR_LIT>']<EOL>cur_children = session.communicator.folder_children(<EOL>session.token, cur_folder_id)<EOL>for inner_folder in cur_children['<STR_LIT>']:<EOL><INDENT>if inner_folder['<STR_LIT:name>'] == path_part:<EOL><INDENT>base_folder = session.communicator.folder_get(<EOL>session.token, inner_folder['<STR_LIT>'])<EOL>cur_folder_id = base_folder['<STR_LIT>']<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT><DEDENT>return cur_folder_id<EOL>", "docstring": "Descend a path to return a folder id starting from the given folder id.\n\n:param parsed_path: a list of folders from top to bottom of a hierarchy\n:type parsed_path: list[string]\n:param folder_id: The id of the folder from which to start the descent\n:type folder_id: int | long\n:returns: The id of the found folder or -1\n:rtype: int | long", "id": "f8359:m17"}
{"signature": "def download(server_path, local_path='<STR_LIT:.>'):", "body": "session.token = verify_credentials()<EOL>is_item, resource_id = _find_resource_id_from_path(server_path)<EOL>if resource_id == -<NUM_LIT:1>:<EOL><INDENT>print('<STR_LIT>'.format(server_path))<EOL><DEDENT>else:<EOL><INDENT>if is_item:<EOL><INDENT>_download_item(resource_id, local_path)<EOL><DEDENT>else:<EOL><INDENT>_download_folder_recursive(resource_id, local_path)<EOL><DEDENT><DEDENT>", "docstring": "Recursively download a file or item from the Midas Server instance.\n\n:param server_path: The location on the server to find the resource to\n    download\n:type server_path: string\n:param local_path: The location on the client to store the downloaded data\n:type local_path: string", "id": "f8359:m23"}
{"signature": "def _has_only_files(local_folder):", "body": "return not any(os.path.isdir(os.path.join(local_folder, entry))<EOL>for entry in os.listdir(local_folder))<EOL>", "docstring": "Return whether a folder contains only files. This will be False if the\nfolder contains any subdirectories.\n\n:param local_folder: full path to the local folder\n:type local_folder: string\n:returns: True if the folder contains only files\n:rtype: bool", "id": "f8359:m14"}
{"signature": "def _create_folder(local_folder, parent_folder_id):", "body": "new_folder = session.communicator.create_folder(<EOL>session.token, os.path.basename(local_folder), parent_folder_id)<EOL>return new_folder['<STR_LIT>']<EOL>", "docstring": "Function for creating a remote folder and returning the id. This should be\na building block for user-level functions.\n\n:param local_folder: full path to a local folder\n:type local_folder: string\n:param parent_folder_id: id of parent folder on the Midas Server instance,\n    where the new folder will be added\n:type parent_folder_id: int | long\n:returns: id of the remote folder that was created\n:rtype: int | long", "id": "f8359:m12"}
{"signature": "def _find_resource_id_from_path(path):", "body": "session.token = verify_credentials()<EOL>parsed_path = path.split('<STR_LIT:/>')<EOL>if parsed_path[-<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>parsed_path.pop()<EOL><DEDENT>if path.startswith('<STR_LIT>'):<EOL><INDENT>parsed_path.pop(<NUM_LIT:0>)  <EOL>parsed_path.pop(<NUM_LIT:0>)  <EOL>name = parsed_path.pop(<NUM_LIT:0>)  <EOL>firstname, lastname = name.split('<STR_LIT:_>')<EOL>end = parsed_path.pop()<EOL>user = session.communicator.get_user_by_name(firstname, lastname)<EOL>leaf_folder_id = _descend_folder_for_id(parsed_path, user['<STR_LIT>'])<EOL>return _search_folder_for_item_or_folder(end, leaf_folder_id)<EOL><DEDENT>elif path.startswith('<STR_LIT>'):<EOL><INDENT>print(parsed_path)<EOL>parsed_path.pop(<NUM_LIT:0>)  <EOL>parsed_path.pop(<NUM_LIT:0>)  <EOL>community_name = parsed_path.pop(<NUM_LIT:0>)  <EOL>end = parsed_path.pop()<EOL>community = session.communicator.get_community_by_name(community_name)<EOL>leaf_folder_id = _descend_folder_for_id(parsed_path,<EOL>community['<STR_LIT>'])<EOL>return _search_folder_for_item_or_folder(end, leaf_folder_id)<EOL><DEDENT>else:<EOL><INDENT>return False, -<NUM_LIT:1><EOL><DEDENT>", "docstring": "Get a folder id from a path on the server.\n\nWarning: This is NOT efficient at all.\n\nThe schema for this path is:\npath := \"/users/<name>/\" | \"/communities/<name>\" , {<subfolder>/}\nname := <firstname> , \"_\" , <lastname>\n\n:param path: The virtual path on the server.\n:type path: string\n:returns: a tuple indicating True or False about whether the resource is an\n    item and id of the resource i.e. (True, item_id) or (False, folder_id)\n:rtype: (bool, int | long)", "id": "f8359:m19"}
{"signature": "def _upload_as_item(local_file, parent_folder_id, file_path,<EOL>reuse_existing=False):", "body": "current_item_id = _create_or_reuse_item(local_file, parent_folder_id,<EOL>reuse_existing)<EOL>_create_bitstream(file_path, local_file, current_item_id)<EOL>for callback in session.item_upload_callbacks:<EOL><INDENT>callback(session.communicator, session.token, current_item_id)<EOL><DEDENT>", "docstring": "Function for doing an upload of a file as an item. This should be a\nbuilding block for user-level functions.\n\n:param local_file: name of local file to upload\n:type local_file: string\n:param parent_folder_id: id of parent folder on the Midas Server instance,\n    where the item will be added\n:type parent_folder_id: int | long\n:param file_path: full path to the file\n:type file_path: string\n:param reuse_existing: (optional) whether to accept an existing item of the\n    same name in the same location, or create a new one instead\n:type reuse_existing: bool", "id": "f8359:m11"}
{"signature": "def _search_folder_for_item_or_folder(name, folder_id):", "body": "session.token = verify_credentials()<EOL>children = session.communicator.folder_children(session.token, folder_id)<EOL>for folder in children['<STR_LIT>']:<EOL><INDENT>if folder['<STR_LIT:name>'] == name:<EOL><INDENT>return False, folder['<STR_LIT>']  <EOL><DEDENT><DEDENT>for item in children['<STR_LIT>']:<EOL><INDENT>if item['<STR_LIT:name>'] == name:<EOL><INDENT>return True, item['<STR_LIT>']  <EOL><DEDENT><DEDENT>return False, -<NUM_LIT:1><EOL>", "docstring": "Find an item or folder matching the name. A folder will be found first if\nboth are present.\n\n:param name: The name of the resource\n:type name: string\n:param folder_id: The folder to search within\n:type folder_id: int | long\n:returns: A tuple indicating whether the resource is an item an the id of\n    said resource. i.e. (True, item_id) or (False, folder_id). Note that in\n    the event that we do not find a result return (False, -1)\n:rtype: (bool, int | long)", "id": "f8359:m18"}
{"signature": "def add_item_upload_callback(callback):", "body": "session.item_upload_callbacks.append(callback)<EOL>", "docstring": "Pass a function to be called when an item is created. This can be quite\nuseful for performing actions such as notifications of upload progress as\nwell as calling additional API functions.\n\n:param callback: A function that takes three arguments. The first argument\n    is the communicator object of the current pydas context, the second is\n    the currently active API token and the third is the id of the item that\n    was created to result in the callback function's invocation.\n:type callback: (Communicator, string, int) -> unknown", "id": "f8359:m3"}
{"signature": "def _download_item(item_id, path='<STR_LIT:.>', item=None):", "body": "session.token = verify_credentials()<EOL>filename, content_iter = session.communicator.download_item(<EOL>item_id, session.token)<EOL>item_path = os.path.join(path, filename)<EOL>print('<STR_LIT>'.format(item_path))<EOL>out_file = open(item_path, '<STR_LIT:wb>')<EOL>for block in content_iter:<EOL><INDENT>out_file.write(block)<EOL><DEDENT>out_file.close()<EOL>for callback in session.item_download_callbacks:<EOL><INDENT>if not item:<EOL><INDENT>item = session.communicator.item_get(session.token, item_id)<EOL><DEDENT>callback(session.communicator, session.token, item, item_path)<EOL><DEDENT>", "docstring": "Download the requested item to the specified path.\n\n:param item_id: The id of the item to be downloaded\n:type item_id: int | long\n:param path: (optional) the location to download the item\n:type path: string\n:param item: The dict of item info\n:type item: dict | None", "id": "f8359:m22"}
{"signature": "def __init__(self, value):", "body": "super(PydasException, self).__init__()<EOL>self.value = value<EOL>self.method = None<EOL>self.code = None<EOL>", "docstring": "Override the constructor to support a basic message.\n\n:param value: Message to display.\n:type value: string", "id": "f8360:c0:m0"}
{"signature": "def get_exception_from_status_and_error_codes(status_code, error_code, value):", "body": "if status_code == requests.codes.bad_request:<EOL><INDENT>exception = BadRequest(value)<EOL><DEDENT>elif status_code == requests.codes.unauthorized:<EOL><INDENT>exception = Unauthorized(value)<EOL><DEDENT>elif status_code == requests.codes.forbidden:<EOL><INDENT>exception = Unauthorized(value)<EOL><DEDENT>elif status_code in [requests.codes.not_found, requests.codes.gone]:<EOL><INDENT>exception = NotFound(value)<EOL><DEDENT>elif status_code == requests.codes.method_not_allowed:<EOL><INDENT>exception = MethodNotAllowed(value)<EOL><DEDENT>elif status_code >= requests.codes.bad_request:<EOL><INDENT>exception = HTTPError(value)<EOL><DEDENT>else:<EOL><INDENT>exception = ResponseError(value)<EOL><DEDENT>if error_code == -<NUM_LIT:100>:  <EOL><INDENT>exception = InternalError(value)<EOL><DEDENT>elif error_code == -<NUM_LIT>:  <EOL><INDENT>exception = InvalidToken(value)<EOL><DEDENT>elif error_code == -<NUM_LIT>:  <EOL><INDENT>exception = UploadFailed(value)<EOL><DEDENT>elif error_code == -<NUM_LIT>:  <EOL><INDENT>exception = UploadTokenGenerationFailed(value)<EOL><DEDENT>elif error_code == -<NUM_LIT>:  <EOL><INDENT>exception = InvalidUploadToken(value)<EOL><DEDENT>elif error_code == -<NUM_LIT>:  <EOL><INDENT>exception = InvalidParameter(value)<EOL><DEDENT>elif error_code == -<NUM_LIT>:  <EOL><INDENT>exception = InvalidPolicy(value)<EOL><DEDENT>return exception<EOL>", "docstring": "Return an exception given status and error codes.\n\n:param status_code: HTTP status code.\n:type status_code: None | int\n:param error_code: Midas Server error code.\n:type error_code: None | int\n:param value: Message to display.\n:type value: string\n:returns: Exception.\n:rtype : pydas.exceptions.ResponseError", "id": "f8360:m0"}
{"signature": "def __init__(self, value, response=None):", "body": "super(ResponseError, self).__init__(value)<EOL>self.response = response<EOL>", "docstring": "Override the constructor to store the response received.\n\n:param value: Message to display.\n:type value: string\n:param response: (optional) Response received.\n:type response: requests.Response", "id": "f8360:c3:m0"}
{"signature": "def get_download_total(rows):", "body": "headers = rows.pop(<NUM_LIT:0>)<EOL>index = headers.index('<STR_LIT>')<EOL>total_downloads = sum(int(row[index]) for row in rows)<EOL>rows.insert(<NUM_LIT:0>, headers)<EOL>return total_downloads, index<EOL>", "docstring": "Return the total downloads, and the downloads column", "id": "f8370:m8"}
{"signature": "def fate(name):", "body": "return cached_download('<STR_LIT>' + name,<EOL>os.path.join('<STR_LIT>', name.replace('<STR_LIT:/>', os.path.sep)))<EOL>", "docstring": "Download and return a path to a sample from the FFmpeg test suite.\n\n    Data is handled by :func:`cached_download`.\n\n    See the `FFmpeg Automated Test Environment <https://www.ffmpeg.org/fate.html>`_", "id": "f8414:m2"}
{"signature": "def get_library_config(name):", "body": "try:<EOL><INDENT>proc = Popen(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', name], stdout=PIPE, stderr=PIPE)<EOL><DEDENT>except OSError:<EOL><INDENT>print('<STR_LIT>')<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>raw_cflags, err = proc.communicate()<EOL>if proc.wait():<EOL><INDENT>return<EOL><DEDENT>known, unknown = parse_cflags(raw_cflags.decode('<STR_LIT:utf8>'))<EOL>if unknown:<EOL><INDENT>print(\"<STR_LIT>\".format(unknown))<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>return known<EOL>", "docstring": "Get distutils-compatible extension extras for the given library.\n\n    This requires ``pkg-config``.", "id": "f8441:m1"}
{"signature": "def update_extend(dst, src):", "body": "for k, v in src.items():<EOL><INDENT>existing = dst.setdefault(k, [])<EOL>for x in v:<EOL><INDENT>if x not in existing:<EOL><INDENT>existing.append(x)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Update the `dst` with the `src`, extending values where lists.\n\n    Primiarily useful for integrating results from `get_library_config`.", "id": "f8441:m2"}
{"signature": "def new_compiler(*args, **kwargs):", "body": "make_silent = kwargs.pop('<STR_LIT>', True)<EOL>cc = _new_compiler(*args, **kwargs)<EOL>if is_msvc(cc):<EOL><INDENT>from distutils.msvc9compiler import get_build_version<EOL>if get_build_version() == <NUM_LIT:10>:<EOL><INDENT>cc.initialize()<EOL>for ldflags in [cc.ldflags_shared, cc.ldflags_shared_debug]:<EOL><INDENT>unique_extend(ldflags, ['<STR_LIT>'])<EOL><DEDENT><DEDENT>elif get_build_version() == <NUM_LIT>:<EOL><INDENT>make_silent = False<EOL><DEDENT><DEDENT>if make_silent:<EOL><INDENT>cc.spawn = _CCompiler_spawn_silent<EOL><DEDENT>return cc<EOL>", "docstring": "Create a C compiler.\n\n    :param bool silent: Eat all stdio? Defaults to ``True``.\n\n    All other arguments passed to ``distutils.ccompiler.new_compiler``.", "id": "f8441:m6"}
{"signature": "def fill_opacity(self, opacity):", "body": "opacity = pgmagick.DrawableFillOpacity(float(opacity))<EOL>self.drawer.append(opacity)<EOL>", "docstring": ":param opacity: 0.0 ~ 1.0", "id": "f8458:c1:m10"}
{"signature": "def _convert_vpathlist(input_obj):", "body": "vpl = pgmagick.VPathList()<EOL>for obj in input_obj:<EOL><INDENT>obj = pgmagick.PathMovetoAbs(pgmagick.Coordinate(obj[<NUM_LIT:0>], obj[<NUM_LIT:1>]))<EOL>vpl.append(obj)<EOL><DEDENT>return vpl<EOL>", "docstring": "convert from 'list' or 'tuple' object to pgmagick.VPathList.\n\n    :type input_obj: list or tuple", "id": "f8458:m3"}
{"signature": "def matte(self, x, y, paint_method):", "body": "paint_method = _convert_paintmethod(paint_method)<EOL>self.drawer.append(pgmagick.DrawableMatte(x, y, paint_method))<EOL>", "docstring": ":param paint_method: 'point' or 'replace' or 'floodfill' or\n                     'filltoborder' or 'reset'\n:type paint_method: str or pgmagick.PaintMethod", "id": "f8458:c1:m14"}
{"signature": "def text_antialias(self, flag=True):", "body": "antialias = pgmagick.DrawableTextAntialias(flag)<EOL>self.drawer.append(antialias)<EOL>", "docstring": "text antialias\n\n        :param flag: True or False. (default is True)\n        :type flag: bool", "id": "f8458:c1:m34"}
{"signature": "def get_exif_info(self):", "body": "_dict = {}<EOL>for tag in _EXIF_TAGS:<EOL><INDENT>ret = self.img.attribute(\"<STR_LIT>\" % tag)<EOL>if ret and ret != '<STR_LIT>':<EOL><INDENT>_dict[tag] = ret<EOL><DEDENT><DEDENT>return _dict<EOL>", "docstring": "return exif-tag dict", "id": "f8458:c0:m34"}
{"signature": "def bezier(self, points):", "body": "coordinates = pgmagick.CoordinateList()<EOL>for point in points:<EOL><INDENT>x, y = float(point[<NUM_LIT:0>]), float(point[<NUM_LIT:1>])<EOL>coordinates.append(pgmagick.Coordinate(x, y))<EOL><DEDENT>self.drawer.append(pgmagick.DrawableBezier(coordinates))<EOL>", "docstring": "Draw a Bezier-curve.\n\n        :param points: ex.) ((5, 5), (6, 6), (7, 7))\n        :type points: list", "id": "f8458:c1:m3"}
{"signature": "def version():", "body": "with io.open('<STR_LIT>') as input_file:<EOL><INDENT>for line in input_file:<EOL><INDENT>if line.startswith('<STR_LIT>'):<EOL><INDENT>return ast.parse(line).body[<NUM_LIT:0>].value.s<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Return version string.", "id": "f8473:m5"}
{"signature": "def merge(mer_inputs=MER_INPUTS, mer_output=MER_OUTPUT):", "body": "dirname = os.path.dirname(__file__)<EOL>output_file = os.path.join(dirname, DICT_DIRECTORY, mer_output)<EOL>lines = []<EOL>for in_file in MER_INPUTS:<EOL><INDENT>input_file = os.path.join(dirname, DICT_DIRECTORY, in_file)<EOL>with open(input_file, encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>for line in f:<EOL><INDENT>lines.append(line)<EOL><DEDENT><DEDENT><DEDENT>with open(output_file, '<STR_LIT:w>', encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>for line in lines:<EOL><INDENT>f.write(line)<EOL><DEDENT><DEDENT>", "docstring": "merge the phrase files into one file\n:param mer_inputs: the phrase files\n:param mer_output: the output file\n:return: None", "id": "f8477:m0"}
{"signature": "def set_conversion(self, conversion):", "body": "if self.conversion == conversion:<EOL><INDENT>return<EOL><DEDENT>else:<EOL><INDENT>self._dict_init_done = False<EOL>self.conversion = conversion<EOL><DEDENT>", "docstring": "set conversion\n:param conversion: the conversion of usage, options are\n 'hk2s', 's2hk', 's2t', 's2tw', 's2twp', 't2hk', 't2s', 't2tw', 'tw2s', and 'tw2sp'\n check the json file names in config directory\n:return: None", "id": "f8482:c0:m6"}
{"signature": "def _convert(self, string, dictionary = [], is_dict_group = False):", "body": "tree = StringTree(string)<EOL>for c_dict in dictionary:<EOL><INDENT>if isinstance(c_dict, tuple):<EOL><INDENT>tree.convert_tree(c_dict)<EOL>if not is_dict_group:<EOL><INDENT>tree = StringTree(\"<STR_LIT>\".join(tree.inorder()))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>tree = StringTree(self._convert(\"<STR_LIT>\".join(tree.inorder()), c_dict, True))<EOL><DEDENT><DEDENT>return \"<STR_LIT>\".join(tree.inorder())<EOL>", "docstring": "Convert string from Simplified Chinese to Traditional Chinese or vice versa\nIf a dictionary is part of a group of dictionaries, stop conversion on a word\nafter the first match is found.\n:param string: the input string\n:param dictionary: list of dictionaries to be applied against the string\n:param is_dict_group: indicates if this is a group of dictionaries in which only\n                      the first match in the dict group should be used\n:return: converted string", "id": "f8482:c0:m2"}
{"signature": "def _add_dict_chain(self, dict_chain, dict_dict):", "body": "if dict_dict.get('<STR_LIT:type>') == '<STR_LIT>':<EOL><INDENT>chain = []<EOL>for dict_item in dict_dict.get('<STR_LIT>'):<EOL><INDENT>self._add_dict_chain(chain, dict_item)<EOL><DEDENT>dict_chain.append(chain)<EOL><DEDENT>elif dict_dict.get('<STR_LIT:type>') == '<STR_LIT>':<EOL><INDENT>filename = dict_dict.get('<STR_LIT:file>')<EOL>dict_file = os.path.join(os.path.dirname(__file__), DICT_DIR, filename)<EOL>dict_chain.append(dict_file)<EOL><DEDENT>", "docstring": "add dict chain\n:param dict_chain: the dict chain to add to\n:param dict_dict: the dict to be added in\n:return: None", "id": "f8482:c0:m5"}
{"signature": "def node(self, node):", "body": "if node == self.node1:<EOL><INDENT>return self.node2<EOL><DEDENT>elif node == self.node2:<EOL><INDENT>return self.node1<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Return the other node", "id": "f8487:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def parse_dom(dom):<DEDENT>", "body": "root = dom.getElementsByTagName(\"<STR_LIT>\")[<NUM_LIT:0>]<EOL>graph = root.getElementsByTagName(\"<STR_LIT>\")[<NUM_LIT:0>]<EOL>name = graph.getAttribute('<STR_LIT:id>')<EOL>g = Graph(name)<EOL>for node in graph.getElementsByTagName(\"<STR_LIT>\"):<EOL><INDENT>n = g.add_node(id=node.getAttribute('<STR_LIT:id>'))<EOL>for attr in node.getElementsByTagName(\"<STR_LIT:data>\"):<EOL><INDENT>if attr.firstChild:<EOL><INDENT>n[attr.getAttribute(\"<STR_LIT:key>\")] = attr.firstChild.data<EOL><DEDENT>else:<EOL><INDENT>n[attr.getAttribute(\"<STR_LIT:key>\")] = \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>for edge in graph.getElementsByTagName(\"<STR_LIT>\"):<EOL><INDENT>source = edge.getAttribute('<STR_LIT:source>')<EOL>dest = edge.getAttribute('<STR_LIT:target>')<EOL>e = g.add_edge_by_id(source, dest)<EOL>for attr in edge.getElementsByTagName(\"<STR_LIT:data>\"):<EOL><INDENT>if attr.firstChild:<EOL><INDENT>e[attr.getAttribute(\"<STR_LIT:key>\")] = attr.firstChild.data<EOL><DEDENT>else:<EOL><INDENT>e[attr.getAttribute(\"<STR_LIT:key>\")] = \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>return g<EOL>", "docstring": "Parse dom into a Graph.\n\n        :param dom: dom as returned by minidom.parse or minidom.parseString\n        :return: A Graph representation", "id": "f8490:c0:m2"}
{"signature": "def parse_string(self, string):", "body": "dom = minidom.parseString(string)<EOL>return self.parse_dom(dom)<EOL>", "docstring": "Parse a string into a Graph.\n\n        :param string: String that is to be passed into Grapg\n        :return: Graph", "id": "f8490:c0:m4"}
{"signature": "def DFS_prefix(self, root=None):", "body": "if not root:<EOL><INDENT>root = self._root<EOL><DEDENT>return self._DFS_prefix(root)<EOL>", "docstring": "Depth-first search.\n\n.. seealso::\n   `Wikipedia DFS descritpion <http://en.wikipedia.org/wiki/Depth-first_search>`_\n\n:param root: first to start the search\n:return: list of nodes", "id": "f8491:c0:m1"}
{"signature": "def flush_empty_nodes(self):", "body": "pass<EOL>", "docstring": "not implemented", "id": "f8491:c1:m4"}
{"signature": "def BFS(self, root=None):", "body": "if not root:<EOL><INDENT>root = self.root()<EOL><DEDENT>queue = deque()<EOL>queue.append(root)<EOL>nodes = []<EOL>while len(queue) > <NUM_LIT:0>:<EOL><INDENT>x = queue.popleft()<EOL>nodes.append(x)<EOL>for child in x.children():<EOL><INDENT>queue.append(child)<EOL><DEDENT><DEDENT>return nodes<EOL>", "docstring": "Breadth-first search.\n\n.. seealso::\n   `Wikipedia BFS descritpion <http://en.wikipedia.org/wiki/Breadth-first_search>`_\n\n:param root: first to start the search\n:return: list of nodes", "id": "f8491:c0:m3"}
{"signature": "def difference(self, other, recursive=True):", "body": "if not isinstance(other, composite):<EOL><INDENT>raise AssertionError('<STR_LIT>'.format(type(other)))<EOL><DEDENT>if self.meta_type != other.meta_type:<EOL><INDENT>return self<EOL><DEDENT>if self.meta_type == '<STR_LIT:list>':<EOL><INDENT>keep = []<EOL>for item in self._list:<EOL><INDENT>if item not in other._list:<EOL><INDENT>if recursive and isinstance(item, composite):<EOL><INDENT>keep.extend(item.difference(other.index(item), recursive=True))<EOL><DEDENT>else:<EOL><INDENT>keep.append(item)<EOL><DEDENT><DEDENT><DEDENT>return composite(keep)<EOL><DEDENT>elif self.meta_type == '<STR_LIT>':<EOL><INDENT>keep = {}<EOL>for key in self._dict:<EOL><INDENT>item = self._dict[key]<EOL>if key in other._dict:<EOL><INDENT>if recursive andisinstance(item, composite) andisinstance(other.get(key), composite):<EOL><INDENT>keep[key] = item.difference(other.get(key), recursive=True)<EOL><DEDENT>elif item != other[key]:<EOL><INDENT>keep[key] = item<EOL><DEDENT><DEDENT>else:<EOL><INDENT>keep[key] = item<EOL><DEDENT><DEDENT>return composite(keep)<EOL><DEDENT>return<EOL>", "docstring": "Recursively compute difference of data. For dictionaries, items\nfor specific keys will be reduced to differences. For lists, items\nwill be reduced to differences. This method is meant to be analogous\nto set.difference for composite objects.\n\nArgs:\n    other (composite): Other composite object to difference with.\n    recursive (bool): Whether or not to perform the operation recursively,\n        for all nested composite objects.", "id": "f8499:c0:m19"}
{"signature": "def get(self, *args, **kwargs):", "body": "return self._dict.get(*args, **kwargs)<EOL>", "docstring": "Return item or None, depending on if item exists. This is\nmeant to be similar to dict.get() for safe access of a property.", "id": "f8499:c0:m22"}
{"signature": "def intersection(self, other, recursive=True):", "body": "if not isinstance(other, composite):<EOL><INDENT>raise AssertionError('<STR_LIT>'.format(type(other)))<EOL><DEDENT>if self.meta_type != other.meta_type:<EOL><INDENT>return composite({})<EOL><DEDENT>if self.meta_type == '<STR_LIT:list>':<EOL><INDENT>keep = []<EOL>for item in self._list:<EOL><INDENT>if item in other._list:<EOL><INDENT>if recursive and isinstance(item, composite):<EOL><INDENT>keep.extend(item.intersection(other.index(item), recursive=True))<EOL><DEDENT>else:<EOL><INDENT>keep.append(item)<EOL><DEDENT><DEDENT><DEDENT>return composite(keep)<EOL><DEDENT>elif self.meta_type == '<STR_LIT>':<EOL><INDENT>keep = {}<EOL>for key in self._dict:<EOL><INDENT>item = self._dict[key]<EOL>if key in other._dict:<EOL><INDENT>if recursive andisinstance(item, composite) andisinstance(other.get(key), composite):<EOL><INDENT>keep[key] = item.intersection(other.get(key), recursive=True)<EOL><DEDENT>elif item == other[key]:<EOL><INDENT>keep[key] = item<EOL><DEDENT><DEDENT><DEDENT>return composite(keep)<EOL><DEDENT>return<EOL>", "docstring": "Recursively compute intersection of data. For dictionaries, items\nfor specific keys will be reduced to unique items. For lists, items\nwill be reduced to unique items. This method is meant to be analogous\nto set.intersection for composite objects.\n\nArgs:\n    other (composite): Other composite object to intersect with.\n    recursive (bool): Whether or not to perform the operation recursively,\n        for all nested composite objects.", "id": "f8499:c0:m18"}
{"signature": "def prune(self, regex=r\"<STR_LIT>\"):", "body": "return filetree(self.root, ignore=self.ignore, regex=regex)<EOL>", "docstring": "Prune leaves of filetree according to specified\nregular expression.\n\nArgs:\n    regex (str): Regular expression to use in pruning tree.", "id": "f8499:c1:m12"}
{"signature": "def values(self):", "body": "if self.meta_type == '<STR_LIT:list>':<EOL><INDENT>return self._list<EOL><DEDENT>elif self.meta_type == '<STR_LIT>':<EOL><INDENT>return self._dict.values()<EOL><DEDENT>", "docstring": "Return keys for object, if they are available.", "id": "f8499:c0:m27"}
{"signature": "def index(self, item):", "body": "return self._list.index(item)<EOL>", "docstring": "Return index containing value.", "id": "f8499:c0:m21"}
{"signature": "def json(self):", "body": "data = {}<EOL>for item in self._data:<EOL><INDENT>if isinstance(self._data[item], filetree):<EOL><INDENT>data[item] = self._data[item].json()<EOL><DEDENT>else:<EOL><INDENT>data[item] = self._data[item]<EOL><DEDENT><DEDENT>return data<EOL>", "docstring": "Return JSON representation of object.", "id": "f8499:c1:m9"}
{"signature": "def keys(self):", "body": "if self.meta_type == '<STR_LIT:list>':<EOL><INDENT>return None<EOL><DEDENT>elif self.meta_type == '<STR_LIT>':<EOL><INDENT>return self._dict.keys()<EOL><DEDENT>", "docstring": "Return keys for object, if they are available.", "id": "f8499:c0:m25"}
{"signature": "def json(self):", "body": "if self.meta_type == '<STR_LIT:list>':<EOL><INDENT>ret = []<EOL>for dat in self._list:<EOL><INDENT>if not isinstance(dat, composite):<EOL><INDENT>ret.append(dat)<EOL><DEDENT>else:<EOL><INDENT>ret.append(dat.json())<EOL><DEDENT><DEDENT>return ret<EOL><DEDENT>elif self.meta_type == '<STR_LIT>':<EOL><INDENT>ret = {}<EOL>for key in self._dict:<EOL><INDENT>if not isinstance(self._dict[key], composite):<EOL><INDENT>ret[key] = self._dict[key]<EOL><DEDENT>else:<EOL><INDENT>ret[key] = self._dict[key].json()<EOL><DEDENT><DEDENT>return ret<EOL><DEDENT>", "docstring": "Return JSON representation of object.", "id": "f8499:c0:m30"}
{"signature": "def write_json(self, fh, pretty=True):", "body": "sjson = json.JSONEncoder().encode(self.json())<EOL>if pretty:<EOL><INDENT>json.dump(json.loads(sjson), fh, sort_keys=True, indent=<NUM_LIT:4>)<EOL><DEDENT>else:<EOL><INDENT>json.dump(json.loads(sjson), fh)<EOL><DEDENT>return<EOL>", "docstring": "Write composite object to file handle in JSON format.\n\nArgs:\n    fh (file): File handle to write to.\n    pretty (bool): Sort keys and indent in output.", "id": "f8499:c0:m31"}
{"signature": "@classmethod<EOL><INDENT>def from_json(cls, fh):<DEDENT>", "body": "if isinstance(fh, str):<EOL><INDENT>return cls(json.loads(fh))<EOL><DEDENT>else:<EOL><INDENT>return cls(json.load(fh))<EOL><DEDENT>", "docstring": "Load json from file handle.\n\nArgs:\n    fh (file): File handle to load from.\n\nExamlple:\n    >>> with open('data.json', 'r') as json:\n    >>>    data = composite.load(json)", "id": "f8499:c0:m2"}
{"signature": "def append(self, item):", "body": "if self.meta_type == '<STR_LIT>':<EOL><INDENT>raise AssertionError('<STR_LIT>')<EOL><DEDENT>if self.meta_type == '<STR_LIT:list>':<EOL><INDENT>self._list.append(item)<EOL><DEDENT>return<EOL>", "docstring": "Append to object, if object is list.", "id": "f8499:c0:m28"}
{"signature": "def write(self, fh, pretty=True):", "body": "return self.write_json(fh, pretty=pretty)<EOL>", "docstring": "API niceness defaulting to composite.write_json().", "id": "f8499:c0:m33"}
{"signature": "@classmethod<EOL><INDENT>def from_string(cls, string):<DEDENT>", "body": "return cls(eval(string))<EOL>", "docstring": "Load data from string.\n\nArgs:\n    string (str): String to load from.\n\nExamlple:\n    >>> with open('data.json', 'r') as json:\n    >>>     jdat = json.read()\n    >>> data = composite.from_string(jdat)", "id": "f8499:c0:m4"}
{"signature": "def deprecated(func):", "body": "@wraps(func)<EOL>def decorator(*args, **kwargs):<EOL><INDENT>warnings.warn(<EOL>\"<STR_LIT>\".format(func.__name__),<EOL>category=DeprecationWarning, stacklevel=<NUM_LIT:2><EOL>)<EOL>return func(*args, **kwargs)<EOL><DEDENT>return decorator<EOL>", "docstring": "Decorator for warning user of depricated functions before use.", "id": "f8502:m1"}
{"signature": "def require(method):", "body": "def decorator(func):<EOL><INDENT>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>if not hasattr(args[<NUM_LIT:0>], method):<EOL><INDENT>raise AssertionError('<STR_LIT>'.format(args[<NUM_LIT:0>].__class__.__name__, method))<EOL><DEDENT>callmethod = method + '<STR_LIT>'<EOL>if not hasattr(args[<NUM_LIT:0>], callmethod):<EOL><INDENT>setattr(args[<NUM_LIT:0>], callmethod, False)<EOL><DEDENT>if not getattr(args[<NUM_LIT:0>], callmethod):<EOL><INDENT>getattr(args[<NUM_LIT:0>], method)()<EOL>setattr(args[<NUM_LIT:0>], callmethod, True)<EOL><DEDENT>return func(*args, **kwargs)<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "Decorator for managing chained dependencies of different class\nproperties. The @require decorator allows developers to specify\nthat a function call must be operated on before another property\nor function call is accessed, so that data and processing for an\nentire class can be evaluated in a lazy way (i.e. not all upon\ninstantiation).\n\nExamples:\n\n    >>> class Foo(Bar):\n    >>>\n    >>>    def a(self):\n    >>>        print 'a!'\n    >>>        return 1\n    >>>\n    >>>    @require('a')\n    >>>    @property\n    >>>    def b(self):\n    >>>        print 'b!'\n    >>>        return self.a + 1\n    >>>\n    >>> foo = Foo()\n    >>> print foo.b\n    >>>\n    'a!'\n    'b!'\n    2", "id": "f8503:m0"}
{"signature": "def exception(exception):", "body": "def decorator(func):<EOL><INDENT>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>return func(*args, **kwargs)<EOL><DEDENT>except Exception as exe:<EOL><INDENT>raise raise_with_traceback(exception(exe))<EOL><DEDENT><DEDENT>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "Wrap function/method with specific exception if any\nexception occurs during function execution.\n\nArgs:\n    exception (Exception): Exception to re-cast error as.\n\nExamples:\n    >>> from gems import exception\n    >>>\n    >>> class MyCustomException(Exception):\n    >>>     pass\n    >>>\n    >>> @exception(MyCustomException)\n    >>> def func():\n    >>>     return 1 / 0\n    >>>\n    >>> func()\n    Traceback (most recent call last):\n      File \"<stdin>\", line 1, in <module>\n      File \"gems/decorators.py\", line 96, in wrapper\n        return func(*args, **kwargs)\n      File \"<stdin>\", line 3, in func\n    __main__.MyCustomException: integer division or modulo by zero", "id": "f8503:m1"}
{"signature": "def addOptionBool(self, name, value):", "body": "return self.options.AddOptionBool(str_to_cppstr(name), value)<EOL>", "docstring": ".. _addOptionBool:\n\nAdd a boolean option.\n\n:param name: The name of the option.\n:type name: str\n:param value: The value of the option.\n:type value: boolean\n:return: The result of the operation.\n:rtype: bool\n\n:see: addOption_, addOptionInt_, addOptionString_", "id": "f8510:c0:m5"}
{"signature": "def areLocked(self):", "body": "return self.options.AreLocked()<EOL>", "docstring": ".. _areLocked:\n\n Test whether the options have been locked.\n\n:return: true if the options have been locked.\n:rtype: boolean\n\n:see: lock_", "id": "f8510:c0:m4"}
{"signature": "def addOption(self, name, value):", "body": "if name not in PyOptionList:<EOL><INDENT>return False<EOL><DEDENT>if PyOptionList[name]['<STR_LIT:type>'] == \"<STR_LIT>\":<EOL><INDENT>return self.addOptionString(name, value)<EOL><DEDENT>elif PyOptionList[name]['<STR_LIT:type>'] == \"<STR_LIT>\":<EOL><INDENT>return self.addOptionBool(name, value)<EOL><DEDENT>elif PyOptionList[name]['<STR_LIT:type>'] == \"<STR_LIT>\":<EOL><INDENT>return self.addOptionInt(name, value)<EOL><DEDENT>return False<EOL>", "docstring": ".. _addOption:\n\nAdd an option.\n\n:param name: The name of the option.\n:type name: string\n:param value: The value of the option.\n:type value: boolean, integer, string\n:return: The result of the operation.\n:rtype: bool\n\n:see: addOptionBool_, addOptionInt_, addOptionString_", "id": "f8510:c0:m8"}
{"signature": "def addOptionString(self, name, value, append=False):", "body": "return self.options.AddOptionString(<EOL>str_to_cppstr(name), str_to_cppstr(value), append)<EOL>", "docstring": ".. _addOptionString:\n\nAdd a string option.\n\n:param name: The name of the option.  Option names are case insensitive and must be unique.\n:type name: str\n:param value: The value of the option.\n:type value: str\n:param append: Setting append to true will cause values read from the command line\n or XML file to be concatenated into a comma delimited set.  If _append is false,\n newer values will overwrite older ones.\n:type append: boolean\n:return: The result of the operation.\n:rtype: bool\n\n:see: addOption_, addOptionBool_, addOptionInt_", "id": "f8510:c0:m7"}
{"signature": "def __init__(self, config_path=None, user_path=\"<STR_LIT:.>\", cmd_line=\"<STR_LIT>\"):", "body": "if config_path is None:<EOL><INDENT>config_path = self.getConfigPath()<EOL><DEDENT>if config_path is None:<EOL><INDENT>raise LibZWaveException(\"<STR_LIT>\")<EOL><DEDENT>self._config_path = config_path<EOL>if user_path is None:<EOL><INDENT>user_path = \"<STR_LIT:.>\"<EOL><DEDENT>self._user_path = user_path<EOL>if cmd_line is None:<EOL><INDENT>cmd_line=\"<STR_LIT>\"<EOL><DEDENT>self._cmd_line = cmd_line<EOL>self.create(self._config_path, self._user_path, self._cmd_line)<EOL>", "docstring": "Create an option object and check that parameters are valid.\n\n:param device: The device to use\n:type device: str\n:param config_path: The openzwave config directory. If None, try to configure automatically.\n:type config_path: str\n:param user_path: The user directory\n:type user_path: str\n:param cmd_line: The \"command line\" options of the openzwave library\n:type cmd_line: str", "id": "f8510:c0:m0"}
{"signature": "def compile(code: list, consts: list, names: list, varnames: list,<EOL>func_name: str = \"<STR_LIT>\",<EOL>arg_count: int = <NUM_LIT:0>, kwarg_defaults: Tuple[Any] = (), use_safety_wrapper: bool = True):", "body": "varnames = tuple(varnames)<EOL>consts = tuple(consts)<EOL>names = tuple(names)<EOL>code = util.flatten(code)<EOL>if arg_count > len(varnames):<EOL><INDENT>raise CompileError(\"<STR_LIT>\")<EOL><DEDENT>if len(kwarg_defaults) > len(varnames):<EOL><INDENT>raise CompileError(\"<STR_LIT>\")<EOL><DEDENT>bc = compile_bytecode(code)<EOL>dis.dis(bc)<EOL>if PY36:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if bc[-<NUM_LIT:1>] != tokens.RETURN_VALUE:<EOL><INDENT>raise CompileError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>flags = <NUM_LIT:1> | <NUM_LIT:2> | <NUM_LIT:64><EOL>frame_data = inspect.stack()[<NUM_LIT:1>]<EOL>if sys.version_info[<NUM_LIT:0>:<NUM_LIT:2>] > (<NUM_LIT:3>, <NUM_LIT:3>):<EOL><INDENT>stack_size = _simulate_stack(dis._get_instructions_bytes(<EOL>bc, constants=consts, names=names, varnames=varnames)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>warnings.warn(\"<STR_LIT>\")<EOL>stack_size = <NUM_LIT><EOL><DEDENT>_optimize_warn_pass(dis._get_instructions_bytes(bc, constants=consts, names=names, varnames=varnames))<EOL>obb = types.CodeType(<EOL>arg_count,  <EOL><NUM_LIT:0>,  <EOL>len(varnames),  <EOL>stack_size,  <EOL>flags,  <EOL>bc,  <EOL>consts,  <EOL>names,  <EOL>varnames,  <EOL>frame_data[<NUM_LIT:1>],  <EOL>func_name,  <EOL>frame_data[<NUM_LIT:2>],  <EOL>b'<STR_LIT>',  <EOL>(),  <EOL>()  <EOL>)<EOL>f_globals = frame_data[<NUM_LIT:0>].f_globals<EOL>f = types.FunctionType(obb, f_globals)<EOL>f.__name__ = func_name<EOL>f.__defaults__ = kwarg_defaults<EOL>if use_safety_wrapper:<EOL><INDENT>def __safety_wrapper(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>return f(*args, **kwargs)<EOL><DEDENT>except SystemError as e:<EOL><INDENT>if '<STR_LIT>' not in '<STR_LIT:U+0020>'.join(e.args):<EOL><INDENT>raise<EOL><DEDENT>msg = \"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\".format(f.__name__)<EOL>file = io.StringIO()<EOL>with contextlib.redirect_stdout(file):<EOL><INDENT>dis.dis(f)<EOL><DEDENT>msg += file.getvalue()<EOL>raise SystemError(msg) from e<EOL><DEDENT><DEDENT>returned_func = __safety_wrapper<EOL>returned_func.wrapped = f<EOL><DEDENT>else:<EOL><INDENT>returned_func = f<EOL><DEDENT>return returned_func<EOL>", "docstring": "Compiles a set of bytecode instructions into a working function, using Python's bytecode\ncompiler.\n\n:param code: A list of bytecode instructions.\n:param consts: A list of constants to compile into the function.\n:param names: A list of names to compile into the function.\n:param varnames: A list of ``varnames`` to compile into the function.\n:param func_name: The name of the function to use.\n:param arg_count: The number of arguments this function takes. Must be ``<= len(varnames)``.\n:param kwarg_defaults: A tuple of defaults for kwargs.\n:param use_safety_wrapper: Use the safety wrapper? This hijacks SystemError to print better \\\n    stack traces.", "id": "f8513:m3"}
{"signature": "def _simulate_stack(code: list) -> int:", "body": "max_stack = <NUM_LIT:0><EOL>curr_stack = <NUM_LIT:0><EOL>def _check_stack(ins):<EOL><INDENT>if curr_stack < <NUM_LIT:0>:<EOL><INDENT>raise CompileError(\"<STR_LIT>\".format(ins))<EOL><DEDENT>if curr_stack > max_stack:<EOL><INDENT>return curr_stack<EOL><DEDENT><DEDENT>for instruction in code:<EOL><INDENT>assert isinstance(instruction, dis.Instruction)<EOL>if instruction.arg is not None:<EOL><INDENT>try:<EOL><INDENT>effect = dis.stack_effect(instruction.opcode, instruction.arg)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>raise CompileError(\"<STR_LIT>\"<EOL>.format(instruction.opcode)) from e<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>effect = dis.stack_effect(instruction.opcode)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>raise CompileError(\"<STR_LIT>\"<EOL>.format(instruction.opcode)) from e<EOL><DEDENT><DEDENT>curr_stack += effect<EOL>_should_new_stack = _check_stack(instruction)<EOL>if _should_new_stack:<EOL><INDENT>max_stack = _should_new_stack<EOL><DEDENT><DEDENT>return max_stack<EOL>", "docstring": "Simulates the actions of the stack, to check safety.\n\nThis returns the maximum needed stack.", "id": "f8513:m1"}
{"signature": "def generate_load_const(index: int) -> bytes:", "body": "return generate_simple_call(tokens.LOAD_CONST, index)<EOL>", "docstring": "Generates a LOAD_CONST instruction.\n\n:param index: The index of the const to load.\n:return: The generated bytecode.", "id": "f8514:m6"}
{"signature": "def generate_load_global(index: int) -> bytes:", "body": "return generate_simple_call(tokens.LOAD_GLOBAL, index)<EOL>", "docstring": "Generates a LOAD_GLOBAL instruction.\n\n:param index: The index of the global to load.\n:return: The generated bytecode.", "id": "f8514:m4"}
{"signature": "def generate_load_fast(index: int) -> bytes:", "body": "return generate_simple_call(tokens.LOAD_FAST, index)<EOL>", "docstring": "Generates a LOAD_FAST operation.\n\n:param index: The index of the varname to load.\n:return: The generated bytecode.", "id": "f8514:m5"}
{"signature": "def ensure_instruction(instruction: int) -> bytes:", "body": "if PY36:<EOL><INDENT>return instruction.to_bytes(<NUM_LIT:2>, byteorder=\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return instruction.to_bytes(<NUM_LIT:1>, byteorder=\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Wraps an instruction to be Python 3.6+ compatible. This does nothing on Python 3.5 and below.\n\nThis is most useful for operating on bare, single-width instructions such as\n``RETURN_FUNCTION`` in a version portable way.\n\n:param instruction: The instruction integer to use.\n:return: A safe bytes object, if applicable.", "id": "f8514:m0"}
{"signature": "def generate_simple_call(opcode: int, index: int):", "body": "bs = b\"<STR_LIT>\"<EOL>bs += opcode.to_bytes(<NUM_LIT:1>, byteorder=\"<STR_LIT>\")<EOL>if isinstance(index, int):<EOL><INDENT>if PY36:<EOL><INDENT>bs += index.to_bytes(<NUM_LIT:1>, byteorder=\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>bs += index.to_bytes(<NUM_LIT:2>, byteorder=\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>bs += index<EOL><DEDENT>return bs<EOL>", "docstring": "Generates a simple call, with an index for something.\n\n:param opcode: The opcode to generate.\n:param index: The index to use as an argument.\n:return:", "id": "f8514:m2"}
{"signature": "def _get_name_info(name_index, name_list):", "body": "argval = name_index<EOL>if name_list is not None:<EOL><INDENT>try:<EOL><INDENT>argval = name_list[name_index]<EOL><DEDENT>except IndexError:<EOL><INDENT>raise ValidationError(\"<STR_LIT>\".format(name_index)) from None<EOL><DEDENT>argrepr = argval<EOL><DEDENT>else:<EOL><INDENT>argrepr = repr(argval)<EOL><DEDENT>return argval, argrepr<EOL>", "docstring": "Helper to get optional details about named references\n\n       Returns the dereferenced name as both value and repr if the name\n       list is defined.\n       Otherwise returns the name index and its repr().", "id": "f8514:m9"}
{"signature": "def __init__(self, iterator: _PyteAugmentedValidator, body: list):", "body": "self.iterator = iterator<EOL>self._body = list(util.flatten(body))<EOL>", "docstring": "Represents a for operator.\n\n:param iterator: A :class:`.PyteAugmentedValidator` that represents the iterable.\n:param body: A list of instructions to execute on each loop.\n\nParameters:\n\n    iterator: _PyteAugmentedValidator\n        This should be a saved value that is iterable, i.e a saved list or something.\n\n    body: list\n        A list of instructions to execute, similarly to IF.", "id": "f8517:c0:m0"}
{"signature": "def to_bytes_36(self, previous: bytes):", "body": "<EOL>bc = b\"<STR_LIT>\"<EOL>it_bc = util.generate_bytecode_from_obb(self.iterator, previous)<EOL>bc += it_bc<EOL>bc += util.ensure_instruction(tokens.GET_ITER)<EOL>", "docstring": "A to-bytes specific to Python 3.6 and above.", "id": "f8517:c0:m2"}
{"signature": "def to_bytes_35(self, previous: bytes):", "body": "<EOL>bc = b\"<STR_LIT>\"<EOL>it_bc = util.generate_bytecode_from_obb(self.iterator, previous)<EOL>bc += it_bc<EOL>bc += util.generate_bytecode_from_obb(tokens.GET_ITER, b\"<STR_LIT>\")<EOL>prev_len = len(previous) + len(bc)<EOL>body_bc = b\"<STR_LIT>\"<EOL>for op in self._body:<EOL><INDENT>padded_bc = previous<EOL>padded_bc += b\"<STR_LIT>\"<EOL>padded_bc += bc<EOL>padded_bc += b\"<STR_LIT>\"<EOL>padded_bc += body_bc<EOL>body_bc += util.generate_bytecode_from_obb(op, padded_bc)<EOL><DEDENT>body_bc += util.generate_simple_call(tokens.JUMP_ABSOLUTE, prev_len + <NUM_LIT:3>)<EOL>body_bc += util.generate_bytecode_from_obb(tokens.POP_BLOCK, b\"<STR_LIT>\")<EOL>body_bc = util.generate_simple_call(tokens.FOR_ITER, len(body_bc) - <NUM_LIT:1>) + body_bc<EOL>bc = util.generate_simple_call(tokens.SETUP_LOOP, prev_len + len(body_bc) - <NUM_LIT:6>) + bc + body_bc<EOL>return bc<EOL>", "docstring": "A to-bytes specific to Python 3.5 and below.", "id": "f8517:c0:m1"}
{"signature": "def to_bytes(self, previous: bytes):", "body": "<EOL>if len(self.conditions) != len(self.body):<EOL><INDENT>raise exc.CompileError(\"<STR_LIT>\")<EOL><DEDENT>bc = b\"<STR_LIT>\"<EOL>prev_len = len(previous)<EOL>for condition, body in zip(self.conditions, self.body):<EOL><INDENT>cond_bytecode = condition.to_bytecode(previous)<EOL>bc += cond_bytecode<EOL>body_bc = compiler.compile_bytecode(body)<EOL>bdyl = len(body_bc)<EOL>gen_len = prev_len + len(cond_bytecode) + bdyl + <NUM_LIT:1><EOL>bc += generate_simple_call(tokens.POP_JUMP_IF_FALSE, gen_len)<EOL>bc += body_bc<EOL>prev_len = len(previous) + len(bc)<EOL><DEDENT>return bc<EOL>", "docstring": "Complex code ahead. Comments have been added in as needed.", "id": "f8519:c0:m1"}
{"signature": "def attr(self, item: _PyteAugmentedValidator):", "body": "self._attrs.append(item)<EOL>return self<EOL>", "docstring": "Add an attribute to the chain of attributes to load.", "id": "f8522:c1:m2"}
{"signature": "def _get_instructions_bytes(code, varnames=None, names=None, constants=None,<EOL>cells=None, linestarts=None, line_offset=<NUM_LIT:0>):", "body": "labels = dis.findlabels(code)<EOL>extended_arg = <NUM_LIT:0><EOL>starts_line = None<EOL>free = None<EOL>n = len(code)<EOL>i = <NUM_LIT:0><EOL>while i < n:<EOL><INDENT>op = code[i]<EOL>offset = i<EOL>if linestarts is not None:<EOL><INDENT>starts_line = linestarts.get(i, None)<EOL>if starts_line is not None:<EOL><INDENT>starts_line += line_offset<EOL><DEDENT><DEDENT>is_jump_target = i in labels<EOL>i = i + <NUM_LIT:1><EOL>arg = None<EOL>argval = None<EOL>argrepr = '<STR_LIT>'<EOL>if op >= dis.HAVE_ARGUMENT:<EOL><INDENT>arg = code[i] + code[i + <NUM_LIT:1>] * <NUM_LIT> + extended_arg<EOL>extended_arg = <NUM_LIT:0><EOL>i = i + <NUM_LIT:2><EOL>if op == dis.EXTENDED_ARG:<EOL><INDENT>extended_arg = arg * <NUM_LIT><EOL><DEDENT>argval = arg<EOL>if op in dis.hasconst:<EOL><INDENT>argval, argrepr = dis._get_const_info(arg, constants)<EOL><DEDENT>elif op in dis.hasname:<EOL><INDENT>argval, argrepr = dis._get_name_info(arg, names)<EOL><DEDENT>elif op in dis.hasjrel:<EOL><INDENT>argval = i + arg<EOL>argrepr = \"<STR_LIT>\" + repr(argval)<EOL><DEDENT>elif op in dis.haslocal:<EOL><INDENT>argval, argrepr = dis._get_name_info(arg, varnames)<EOL><DEDENT>elif op in dis.hascompare:<EOL><INDENT>argval = dis.cmp_op[arg]<EOL>argrepr = argval<EOL><DEDENT>elif op in dis.hasfree:<EOL><INDENT>argval, argrepr = dis._get_name_info(arg, cells)<EOL><DEDENT>elif op in dis.hasnargs:<EOL><INDENT>argrepr = \"<STR_LIT>\" % (code[i - <NUM_LIT:2>], code[i - <NUM_LIT:1>])<EOL><DEDENT><DEDENT>yield dis.Instruction(dis.opname[op], op,<EOL>arg, argval, argrepr,<EOL>offset, starts_line, is_jump_target)<EOL><DEDENT>", "docstring": "Iterate over the instructions in a bytecode string.\n\n    Generates a sequence of Instruction namedtuples giving the details of each\n    opcode.  Additional information about the code's runtime environment\n    (e.g. variable names, constants) can be specified using optional\n    arguments.", "id": "f8527:m0"}
{"signature": "def create_names(*args) -> superclasses.PyteAugmentedArgList:", "body": "return _create_validated(*args, name=\"<STR_LIT>\")<EOL>", "docstring": "Creates a new list of names.\n\n:param args: The args to use.", "id": "f8528:m1"}
{"signature": "def create_varnames(*args) -> superclasses.PyteAugmentedArgList:", "body": "return _create_validated(*args, name=\"<STR_LIT>\")<EOL>", "docstring": "Creates a new list of names.\n\n:param args: The args to use.", "id": "f8528:m3"}
{"signature": "def register_simple_chooser(self, model, **kwargs):", "body": "name = '<STR_LIT>'.format(model._meta.object_name)<EOL>attrs = {'<STR_LIT>': model}<EOL>attrs.update(kwargs)<EOL>chooser = type(name, (Chooser,), attrs)<EOL>self.register_chooser(chooser)<EOL>return model<EOL>", "docstring": "Generates a model chooser definition from a model, and adds it to the\nregistry.", "id": "f8567:c0:m2"}
{"signature": "def register_chooser(self, chooser, **kwargs):", "body": "if not issubclass(chooser, Chooser):<EOL><INDENT>return self.register_simple_chooser(chooser, **kwargs)<EOL><DEDENT>self.choosers[chooser.model] = chooser(**kwargs)<EOL>return chooser<EOL>", "docstring": "Adds a model chooser definition to the registry.", "id": "f8567:c0:m1"}
{"signature": "def kwarg_decorator(func):", "body": "@wraps(func)<EOL>def decorator(arg=None, **kwargs):<EOL><INDENT>if arg is None:<EOL><INDENT>return lambda arg: decorator(arg, **kwargs)<EOL><DEDENT>return func(arg, **kwargs)<EOL><DEDENT>return decorator<EOL>", "docstring": "Turns a function that accepts a single arg and some kwargs in to a\ndecorator that can optionally be called with kwargs:\n\n.. code-block:: python\n\n    @kwarg_decorator\n    def my_decorator(func, bar=True, baz=None):\n        ...\n\n    @my_decorator\n    def my_func():\n        pass\n\n    @my_decorator(bar=False)\n    def my_other_func():\n        pass", "id": "f8570:m0"}
{"signature": "def last_arg_decorator(func):", "body": "@wraps(func)<EOL>def decorator(*args, **kwargs):<EOL><INDENT>if signature_matches(func, args, kwargs):<EOL><INDENT>return func(*args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>return lambda last: func(*(args + (last,)), **kwargs)<EOL><DEDENT><DEDENT>return decorator<EOL>", "docstring": "Allows a function to be used as either a decorator with args, or called as\na normal function.\n\n@last_arg_decorator\ndef register_a_thing(foo, func, bar=True):\n    ..\n\n# Called as a decorator\n@register_a_thing(\"abc\", bar=False)\ndef my_func():\n    ...\n\n# Called as a normal function call\ndef my_other_func():\n    ...\n\nregister_a_thing(\"def\", my_other_func, bar=True)", "id": "f8570:m2"}
{"signature": "def _generate_circle(self):", "body": "total_weight = <NUM_LIT:0><EOL>for node in self.nodes:<EOL><INDENT>total_weight += self.weights.get(node, <NUM_LIT:1>)<EOL><DEDENT>for node in self.nodes:<EOL><INDENT>weight = <NUM_LIT:1><EOL>if node in self.weights:<EOL><INDENT>weight = self.weights.get(node)<EOL><DEDENT>factor = math.floor((<NUM_LIT> * len(self.nodes) * weight) / total_weight)<EOL>for j in range(<NUM_LIT:0>, int(factor)):<EOL><INDENT>b_key = bytearray(self._hash_digest('<STR_LIT>' % (node, j)))<EOL>for i in range(<NUM_LIT:0>, <NUM_LIT:3>):<EOL><INDENT>key = self._hash_val(b_key, lambda x: x + i * <NUM_LIT:4>)<EOL>self.ring[key] = node<EOL>self._sorted_keys.append(key)<EOL><DEDENT><DEDENT><DEDENT>self._sorted_keys.sort()<EOL>", "docstring": "Generates the circle.", "id": "f8573:c0:m1"}
{"signature": "def get_node(self, string_key):", "body": "pos = self.get_node_pos(string_key)<EOL>if pos is None:<EOL><INDENT>return None<EOL><DEDENT>return self.ring[self._sorted_keys[pos]]<EOL>", "docstring": "Given a string key a corresponding node in the hash ring is returned.\n\n        If the hash ring is empty, `None` is returned.", "id": "f8573:c0:m2"}
{"signature": "def gen_key(self, key):", "body": "b_key = self._hash_digest(key)<EOL>return self._hash_val(b_key, lambda x: x)<EOL>", "docstring": "Given a string key it returns a long value,\n        this long value represents a place on the hash ring.\n\n        md5 is currently used because it mixes well.", "id": "f8573:c0:m5"}
{"signature": "def get_node_pos(self, string_key):", "body": "if not self.ring:<EOL><INDENT>return None<EOL><DEDENT>key = self.gen_key(string_key)<EOL>nodes = self._sorted_keys<EOL>pos = bisect(nodes, key)<EOL>if pos == len(nodes):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>return pos<EOL><DEDENT>", "docstring": "Given a string key a corresponding node in the hash ring is returned\n        along with it's position in the ring.\n\n        If the hash ring is empty, (`None`, `None`) is returned.", "id": "f8573:c0:m3"}
{"signature": "def refresh_persistent_maps(self):", "body": "for robot in self._robots:<EOL><INDENT>resp2 = (requests.get(urljoin(<EOL>self.ENDPOINT,<EOL>'<STR_LIT>'.format(robot.serial)),<EOL>headers=self._headers))<EOL>resp2.raise_for_status()<EOL>self._persistent_maps.update({robot.serial: resp2.json()})<EOL><DEDENT>", "docstring": "Get information about persistent maps of the robots.\n\n:return:", "id": "f8582:c0:m8"}
{"signature": "@property<EOL><INDENT>def persistent_maps(self):<DEDENT>", "body": "self.refresh_persistent_maps()<EOL>return self._persistent_maps<EOL>", "docstring": "Return set of persistent maps for logged in account.\n\n:return:", "id": "f8582:c0:m7"}
{"signature": "@property<EOL><INDENT>def robots(self):<DEDENT>", "body": "if not self._robots:<EOL><INDENT>self.refresh_robots()<EOL><DEDENT>return self._robots<EOL>", "docstring": "Return set of robots for logged in account.\n\n:return:", "id": "f8582:c0:m2"}
{"signature": "def refresh_robots(self):", "body": "resp = requests.get(urljoin(self.ENDPOINT, '<STR_LIT>'),<EOL>headers=self._headers)<EOL>resp.raise_for_status()<EOL>for robot in resp.json()['<STR_LIT>']:<EOL><INDENT>if robot['<STR_LIT>'] is None:<EOL><INDENT>continue    <EOL><DEDENT>try:<EOL><INDENT>self._robots.add(Robot(name=robot['<STR_LIT:name>'],<EOL>serial=robot['<STR_LIT>'],<EOL>secret=robot['<STR_LIT>'],<EOL>traits=robot['<STR_LIT>'],<EOL>endpoint=robot['<STR_LIT>']))<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>print (\"<STR_LIT>\".format(robot['<STR_LIT:name>']))<EOL>continue<EOL><DEDENT><DEDENT>self.refresh_persistent_maps()<EOL>for robot in self._robots:<EOL><INDENT>robot.has_persistent_maps = robot.serial in self._persistent_maps<EOL><DEDENT>", "docstring": "Get information about robots connected to account.\n\n:return:", "id": "f8582:c0:m5"}
{"signature": "@property<EOL><INDENT>def maps(self):<DEDENT>", "body": "self.refresh_maps()<EOL>return self._maps<EOL>", "docstring": "Return set of userdata for logged in account.\n\n:return:", "id": "f8582:c0:m3"}
{"signature": "def refresh_maps(self):", "body": "for robot in self.robots:<EOL><INDENT>resp2 = (<EOL>requests.get(urljoin(self.ENDPOINT, '<STR_LIT>'.format(robot.serial)),<EOL>headers=self._headers))<EOL>resp2.raise_for_status()<EOL>self._maps.update({robot.serial: resp2.json()})<EOL><DEDENT>", "docstring": "Get information about maps of the robots.\n\n:return:", "id": "f8582:c0:m4"}
{"signature": "def add_provider(self, share, provider, readonly=False):", "body": "<EOL>share = \"<STR_LIT:/>\" + share.strip(\"<STR_LIT:/>\")<EOL>assert share not in self.provider_map<EOL>if compat.is_basestring(provider):<EOL><INDENT>provider = FilesystemProvider(provider, readonly)<EOL><DEDENT>elif type(provider) in (dict,):<EOL><INDENT>if \"<STR_LIT>\" in provider:<EOL><INDENT>prov_class = dynamic_import_class(provider[\"<STR_LIT>\"])<EOL>provider = prov_class(<EOL>*provider.get(\"<STR_LIT:args>\", []), **provider.get(\"<STR_LIT>\", {})<EOL>)<EOL><DEDENT>else:<EOL><INDENT>provider = FilesystemProvider(<EOL>provider[\"<STR_LIT:root>\"], bool(provider.get(\"<STR_LIT>\", False))<EOL>)<EOL><DEDENT><DEDENT>elif type(provider) in (list, tuple):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\".format(provider)<EOL>)<EOL><DEDENT>if not isinstance(provider, DAVProvider):<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(provider))<EOL><DEDENT>provider.set_share_path(share)<EOL>if self.mount_path:<EOL><INDENT>provider.set_mount_path(self.mount_path)<EOL><DEDENT>provider.set_lock_manager(self.lock_manager)<EOL>provider.set_prop_manager(self.prop_manager)<EOL>self.provider_map[share] = provider<EOL>self.sorted_share_list = [s.lower() for s in self.provider_map.keys()]<EOL>self.sorted_share_list = sorted(self.sorted_share_list, key=len, reverse=True)<EOL>return provider<EOL>", "docstring": "Add a provider to the provider_map routing table.", "id": "f8583:c0:m1"}
{"signature": "def resolve_provider(self, path):", "body": "<EOL>share = None<EOL>lower_path = path.lower()<EOL>for r in self.sorted_share_list:<EOL><INDENT>if r == \"<STR_LIT:/>\":<EOL><INDENT>share = r<EOL>break<EOL><DEDENT>elif lower_path == r or lower_path.startswith(r + \"<STR_LIT:/>\"):<EOL><INDENT>share = r<EOL>break<EOL><DEDENT><DEDENT>if share is None:<EOL><INDENT>return None, None<EOL><DEDENT>return share, self.provider_map.get(share)<EOL>", "docstring": "Get the registered DAVProvider for a given path.\n\n        Returns:\n            tuple: (share, provider)", "id": "f8583:c0:m2"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def basic_auth_user(self, realm, user_name, password, environ):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Check request access permissions for realm/user_name/password.\n\n        Called by http_authenticator for basic authentication requests.\n\n        Optionally set environment variables:\n\n            environ[\"wsgidav.auth.roles\"] = (<role>, ...)\n            environ[\"wsgidav.auth.permissions\"] = (<perm>, ...)\n\n        Args:\n            realm (str):\n            user_name (str):\n            password (str):\n            environ (dict):\n        Returns:\n            False if user is not known or not authorized\n            True if user is authorized", "id": "f8586:c0:m6"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def supports_http_digest_auth(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Signal if this DC instance supports the HTTP digest authentication theme.\n\n        If true, `HTTPAuthenticator` will call `dc.digest_auth_user()`,\n        so this method must be implemented as well.\n\n        Returns:\n            bool", "id": "f8586:c0:m7"}
{"signature": "def _compute_http_digest_a1(self, realm, user_name, password):", "body": "data = user_name + \"<STR_LIT::>\" + realm + \"<STR_LIT::>\" + password<EOL>A1 = md5(compat.to_bytes(data)).hexdigest()<EOL>return A1<EOL>", "docstring": "Internal helper for derived classes to compute a digest hash (A1 part).", "id": "f8586:c0:m8"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def require_authentication(self, realm, environ):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Return False to disable authentication for this request.\n\n        This method is called\n\n        - On startup, to check if anonymous access is allowed for a given share.\n          In this case, `environ` is None.\n        - For every request, before basic or digest authentication is handled.\n          If False is returned, we MAY also set environment variables for\n          anonymous access::\n\n                environment[\"wsgidav.auth.roles\"] = (<role>, ...)\n                environment[\"wsgidav.auth.permissions\"] = (<perm>, ...)\n                return False\n\n        Args:\n            realm (str):\n            environ (dict | None):\n        Returns:\n            False to allow anonymous access\n            True to force subsequent digest or basic authentication", "id": "f8586:c0:m4"}
{"signature": "def _calc_realm_from_path_provider(self, path_info, environ):", "body": "if environ:<EOL><INDENT>dav_provider = environ[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>_share, dav_provider = self.wsgidav_app.resolve_provider(path_info)<EOL><DEDENT>if not dav_provider:<EOL><INDENT>logger.warn(<EOL>\"<STR_LIT>\".format(<EOL>util.safe_re_encode(path_info, sys.stdout.encoding), None<EOL>)<EOL>)<EOL>return None<EOL><DEDENT>realm = dav_provider.share_path<EOL>if realm == \"<STR_LIT>\":<EOL><INDENT>realm = \"<STR_LIT:/>\"<EOL><DEDENT>return realm<EOL>", "docstring": "Internal helper for derived classes to implement get_domain_realm().", "id": "f8586:c0:m2"}
{"signature": "def require_authentication(self, realm, environ):", "body": "realm_entry = self._get_realm_entry(realm)<EOL>if realm_entry is None:<EOL><INDENT>_logger.error(<EOL>'<STR_LIT>'<EOL>\"<STR_LIT>\".format(realm)<EOL>)<EOL><DEDENT>return realm_entry is not True<EOL>", "docstring": "Return True if this realm requires authentication (grant anonymous access otherwise).", "id": "f8587:c0:m4"}
{"signature": "def digest_auth_user(self, realm, user_name, environ):", "body": "user = self._get_realm_entry(realm, user_name)<EOL>if user is None:<EOL><INDENT>return False<EOL><DEDENT>password = user.get(\"<STR_LIT:password>\")<EOL>environ[\"<STR_LIT>\"] = user.get(\"<STR_LIT>\", [])<EOL>return self._compute_http_digest_a1(realm, user_name, password)<EOL>", "docstring": "Computes digest hash A1 part.", "id": "f8587:c0:m7"}
{"signature": "def get_uri_name(uri):", "body": "return uri.strip(\"<STR_LIT:/>\").split(\"<STR_LIT:/>\")[-<NUM_LIT:1>]<EOL>", "docstring": "Return local name, i.e. last segment of URI.", "id": "f8588:m25"}
{"signature": "def add_property_response(multistatusEL, href, propList):", "body": "<EOL>nsCount = <NUM_LIT:1><EOL>nsDict = {}<EOL>nsMap = {}<EOL>propDict = {}<EOL>for name, value in propList:<EOL><INDENT>status = \"<STR_LIT>\"<EOL>if isinstance(value, DAVError):<EOL><INDENT>status = get_http_status_string(value)<EOL>value = None<EOL><DEDENT>ns, _ = split_namespace(name)<EOL>if ns != \"<STR_LIT>\" and ns not in nsDict and ns != \"<STR_LIT>\":<EOL><INDENT>nsDict[ns] = True<EOL>nsMap[\"<STR_LIT>\".format(nsCount)] = ns<EOL>nsCount += <NUM_LIT:1><EOL><DEDENT>propDict.setdefault(status, []).append((name, value))<EOL><DEDENT>responseEL = make_sub_element(multistatusEL, \"<STR_LIT>\", nsmap=nsMap)<EOL>etree.SubElement(responseEL, \"<STR_LIT>\").text = href<EOL>for status in propDict:<EOL><INDENT>propstatEL = etree.SubElement(responseEL, \"<STR_LIT>\")<EOL>propEL = etree.SubElement(propstatEL, \"<STR_LIT>\")<EOL>for name, value in propDict[status]:<EOL><INDENT>if value is None:<EOL><INDENT>etree.SubElement(propEL, name)<EOL><DEDENT>elif is_etree_element(value):<EOL><INDENT>propEL.append(value)<EOL><DEDENT>else:<EOL><INDENT>etree.SubElement(propEL, name).text = to_unicode_safe(value)<EOL><DEDENT><DEDENT>etree.SubElement(propstatEL, \"<STR_LIT>\").text = \"<STR_LIT>\".format(status)<EOL><DEDENT>", "docstring": "Append <response> element to <multistatus> element.\n\n    <prop> node depends on the value type:\n      - str or unicode: add element with this content\n      - None: add an empty element\n      - etree.Element: add XML element as child\n      - DAVError: add an empty element to an own <propstatus> for this status code\n\n    @param multistatusEL: etree.Element\n    @param href: global URL of the resource, e.g. 'http://server:port/path'.\n    @param propList: list of 2-tuples (name, value)", "id": "f8588:m33"}
{"signature": "def pop_path(path):", "body": "if path in (\"<STR_LIT>\", \"<STR_LIT:/>\"):<EOL><INDENT>return (\"<STR_LIT>\", \"<STR_LIT>\")<EOL><DEDENT>assert path.startswith(\"<STR_LIT:/>\")<EOL>first, _sep, rest = path.lstrip(\"<STR_LIT:/>\").partition(\"<STR_LIT:/>\")<EOL>return (first, \"<STR_LIT:/>\" + rest)<EOL>", "docstring": "Return '/a/b/c' -> ('a', '/b/c').", "id": "f8588:m12"}
{"signature": "def get_content_length(environ):", "body": "<EOL>try:<EOL><INDENT>return max(<NUM_LIT:0>, int(environ.get(\"<STR_LIT>\", <NUM_LIT:0>)))<EOL><DEDENT>except ValueError:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Return a positive CONTENT_LENGTH in a safe way (return 0 otherwise).", "id": "f8588:m21"}
{"signature": "def fail(value, context_info=None, src_exception=None, err_condition=None):", "body": "if isinstance(value, Exception):<EOL><INDENT>e = as_DAVError(value)<EOL><DEDENT>else:<EOL><INDENT>e = DAVError(value, context_info, src_exception, err_condition)<EOL><DEDENT>_logger.error(\"<STR_LIT>\".format(e.get_user_info()))<EOL>raise e<EOL>", "docstring": "Wrapper to raise (and log) DAVError.", "id": "f8588:m23"}
{"signature": "def evaluate_http_conditionals(dav_res, last_modified, entitytag, environ):", "body": "if not dav_res:<EOL><INDENT>return<EOL><DEDENT>if \"<STR_LIT>\" in environ and dav_res.support_etag():<EOL><INDENT>ifmatchlist = environ[\"<STR_LIT>\"].split(\"<STR_LIT:U+002C>\")<EOL>for ifmatchtag in ifmatchlist:<EOL><INDENT>ifmatchtag = ifmatchtag.strip('<STR_LIT>')<EOL>if ifmatchtag == entitytag or ifmatchtag == \"<STR_LIT:*>\":<EOL><INDENT>break<EOL><DEDENT>raise DAVError(HTTP_PRECONDITION_FAILED, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>ifModifiedSinceFailed = False<EOL>if \"<STR_LIT>\" in environ and dav_res.support_modified():<EOL><INDENT>ifmodtime = parse_time_string(environ[\"<STR_LIT>\"])<EOL>if ifmodtime and ifmodtime > last_modified:<EOL><INDENT>ifModifiedSinceFailed = True<EOL><DEDENT><DEDENT>ignoreIfModifiedSince = False<EOL>if \"<STR_LIT>\" in environ and dav_res.support_etag():<EOL><INDENT>ifmatchlist = environ[\"<STR_LIT>\"].split(\"<STR_LIT:U+002C>\")<EOL>for ifmatchtag in ifmatchlist:<EOL><INDENT>ifmatchtag = ifmatchtag.strip('<STR_LIT>')<EOL>if ifmatchtag == entitytag or ifmatchtag == \"<STR_LIT:*>\":<EOL><INDENT>if (<EOL>environ[\"<STR_LIT>\"] in (\"<STR_LIT:GET>\", \"<STR_LIT>\")<EOL>and not ifModifiedSinceFailed<EOL>):<EOL><INDENT>raise DAVError(HTTP_NOT_MODIFIED, \"<STR_LIT>\")<EOL><DEDENT>raise DAVError(<EOL>HTTP_PRECONDITION_FAILED, \"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>ignoreIfModifiedSince = True<EOL><DEDENT>if \"<STR_LIT>\" in environ and dav_res.support_modified():<EOL><INDENT>ifunmodtime = parse_time_string(environ[\"<STR_LIT>\"])<EOL>if ifunmodtime and ifunmodtime <= last_modified:<EOL><INDENT>raise DAVError(<EOL>HTTP_PRECONDITION_FAILED, \"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>if ifModifiedSinceFailed and not ignoreIfModifiedSince:<EOL><INDENT>raise DAVError(HTTP_NOT_MODIFIED, \"<STR_LIT>\")<EOL><DEDENT>return<EOL>", "docstring": "Handle 'If-...:' headers (but not 'If:' header).\n\n    If-Match\n        @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24\n        Only perform the action if the client supplied entity matches the\n        same entity on the server. This is mainly for methods like\n        PUT to only update a resource if it has not been modified since the\n        user last updated it.\n        If-Match: \"737060cd8c284d8af7ad3082f209582d\"\n    If-Modified-Since\n        @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25\n        Allows a 304 Not Modified to be returned if content is unchanged\n        If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT\n    If-None-Match\n        @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26\n        Allows a 304 Not Modified to be returned if content is unchanged,\n        see HTTP ETag\n        If-None-Match: \"737060cd8c284d8af7ad3082f209582d\"\n    If-Unmodified-Since\n        @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28\n        Only send the response if the entity has not been modified since a\n        specific time.", "id": "f8588:m39"}
{"signature": "def get_rfc1123_time(secs=None):", "body": "<EOL>return formatdate(timeval=secs, localtime=False, usegmt=True)<EOL>", "docstring": "Return <secs> in rfc 1123 date/time format (pass secs=None for current date).", "id": "f8588:m0"}
{"signature": "def is_equal_or_child_uri(parentUri, childUri):", "body": "return (<EOL>parentUri<EOL>and childUri<EOL>and (childUri.rstrip(\"<STR_LIT:/>\") + \"<STR_LIT:/>\").startswith(parentUri.rstrip(\"<STR_LIT:/>\") + \"<STR_LIT:/>\")<EOL>)<EOL>", "docstring": "Return True, if childUri is a child of parentUri or maps to the same resource.\n\n    Similar to <util.is_child_uri>_ ,  but this method also returns True, if parent\n    equals child. ('/a/b' is considered identical with '/a/b/').", "id": "f8588:m28"}
{"signature": "def calc_base64(s):", "body": "s = compat.to_bytes(s)<EOL>s = compat.base64_encodebytes(s).strip()  <EOL>return compat.to_native(s)<EOL>", "docstring": "Return base64 encoded binarystring.", "id": "f8588:m35"}
{"signature": "def parse_xml_body(environ, allow_empty=False):", "body": "<EOL>clHeader = environ.get(\"<STR_LIT>\", \"<STR_LIT>\").strip()<EOL>if clHeader == \"<STR_LIT>\":<EOL><INDENT>requestbody = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>content_length = int(clHeader)<EOL>if content_length < <NUM_LIT:0>:<EOL><INDENT>raise DAVError(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>raise DAVError(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT>if content_length == <NUM_LIT:0>:<EOL><INDENT>requestbody = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>requestbody = environ[\"<STR_LIT>\"].read(content_length)<EOL>environ[\"<STR_LIT>\"] = <NUM_LIT:1><EOL><DEDENT><DEDENT>if requestbody == \"<STR_LIT>\":<EOL><INDENT>if allow_empty:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise DAVError(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>try:<EOL><INDENT>rootEL = etree.fromstring(requestbody)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise DAVError(HTTP_BAD_REQUEST, \"<STR_LIT>\", src_exception=e)<EOL><DEDENT>if environ.get(\"<STR_LIT>\"):<EOL><INDENT>_logger.info(<EOL>\"<STR_LIT>\".format(<EOL>environ[\"<STR_LIT>\"],<EOL>compat.to_native(xml_to_bytes(rootEL, pretty_print=True)),<EOL>)<EOL>)<EOL>environ[\"<STR_LIT>\"] = False<EOL><DEDENT>return rootEL<EOL>", "docstring": "Read request body XML into an etree.Element.\n\n    Return None, if no request body was sent.\n    Raise HTTP_BAD_REQUEST, if something else went wrong.\n\n    TODO: this is a very relaxed interpretation: should we raise HTTP_BAD_REQUEST\n    instead, if CONTENT_LENGTH is missing, invalid, or 0?\n\n    RFC: For compatibility with HTTP/1.0 applications, HTTP/1.1 requests containing\n    a message-body MUST include a valid Content-Length header field unless the\n    server is known to be HTTP/1.1 compliant.\n    If a request contains a message-body and a Content-Length is not given, the\n    server SHOULD respond with 400 (bad request) if it cannot determine the\n    length of the message, or with 411 (length required) if it wishes to insist\n    on receiving a valid Content-Length.\"\n\n    So I'd say, we should accept a missing CONTENT_LENGTH, and try to read the\n    content anyway.\n    But WSGI doesn't guarantee to support input.read() without length(?).\n    At least it locked, when I tried it with a request that had a missing\n    content-type and no body.\n\n    Current approach: if CONTENT_LENGTH is\n\n    - valid and >0:\n      read body (exactly <CONTENT_LENGTH> bytes) and parse the result.\n    - 0:\n      Assume empty body and return None or raise exception.\n    - invalid (negative or not a number:\n      raise HTTP_BAD_REQUEST\n    - missing:\n      NOT: Try to read body until end and parse the result.\n      BUT: assume '0'\n    - empty string:\n      WSGI allows it to be empty or absent: treated like 'missing'.", "id": "f8588:m30"}
{"signature": "def get_rfc3339_time(secs=None):", "body": "return time.strftime(\"<STR_LIT>\", time.gmtime(secs))<EOL>", "docstring": "Return <secs> in RFC 3339 date/time format (pass secs=None for current date).\n\n    RFC 3339 is a subset of ISO 8601, used for '{DAV:}creationdate'.\n    See http://tools.ietf.org/html/rfc3339", "id": "f8588:m1"}
{"signature": "def _parse_gmt_time(timestring):", "body": "<EOL>try:<EOL><INDENT>return time.strptime(timestring, \"<STR_LIT>\")<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>return time.strptime(timestring, \"<STR_LIT>\")<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>return time.strptime(timestring, \"<STR_LIT>\")<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>return parsedate(timestring)<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT>return None<EOL>", "docstring": "Return a standard time tuple (see time and calendar), for a date/time string.", "id": "f8588:m4"}
{"signature": "def make_complete_url(environ, localUri=None):", "body": "url = environ[\"<STR_LIT>\"] + \"<STR_LIT>\"<EOL>if environ.get(\"<STR_LIT>\"):<EOL><INDENT>url += environ[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>url += environ[\"<STR_LIT>\"]<EOL>if environ[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>if environ[\"<STR_LIT>\"] != \"<STR_LIT>\":<EOL><INDENT>url += \"<STR_LIT::>\" + environ[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if environ[\"<STR_LIT>\"] != \"<STR_LIT>\":<EOL><INDENT>url += \"<STR_LIT::>\" + environ[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT><DEDENT>url += compat.quote(environ.get(\"<STR_LIT>\", \"<STR_LIT>\"))<EOL>if localUri is None:<EOL><INDENT>url += compat.quote(environ.get(\"<STR_LIT>\", \"<STR_LIT>\"))<EOL>if environ.get(\"<STR_LIT>\"):<EOL><INDENT>url += \"<STR_LIT:?>\" + environ[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>url += localUri  <EOL><DEDENT>return url<EOL>", "docstring": "URL reconstruction according to PEP 333.\n    @see https://www.python.org/dev/peps/pep-3333/#url-reconstruction", "id": "f8588:m29"}
{"signature": "def read_timeout_value_header(timeoutvalue):", "body": "timeoutsecs = <NUM_LIT:0><EOL>timeoutvaluelist = timeoutvalue.split(\"<STR_LIT:U+002C>\")<EOL>for timeoutspec in timeoutvaluelist:<EOL><INDENT>timeoutspec = timeoutspec.strip()<EOL>if timeoutspec.lower() == \"<STR_LIT>\":<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>listSR = reSecondsReader.findall(timeoutspec)<EOL>for secs in listSR:<EOL><INDENT>timeoutsecs = int(secs)<EOL>if timeoutsecs > MAX_FINITE_TIMEOUT_LIMIT:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>if timeoutsecs != <NUM_LIT:0>:<EOL><INDENT>return timeoutsecs<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return None<EOL>", "docstring": "Return -1 if infinite, else return numofsecs.", "id": "f8588:m38"}
{"signature": "def safe_re_encode(s, encoding_to, errors=\"<STR_LIT>\"):", "body": "<EOL>if not encoding_to:<EOL><INDENT>encoding_to = \"<STR_LIT>\"<EOL><DEDENT>if compat.is_bytes(s):<EOL><INDENT>s = s.decode(encoding_to, errors=errors).encode(encoding_to)<EOL><DEDENT>else:<EOL><INDENT>s = s.encode(encoding_to, errors=errors).decode(encoding_to)<EOL><DEDENT>return s<EOL>", "docstring": "Re-encode str or binary so that is compatible with a given encoding (replacing\n    unsupported chars).\n\n    We use ASCII as default, which gives us some output that contains \\x99 and \\u9999\n    for every character > 127, for easier debugging.\n    (e.g. if we don't know the encoding, see #87, #96)", "id": "f8588:m17"}
{"signature": "def dynamic_import_class(name):", "body": "import importlib<EOL>module_name, class_name = name.rsplit(\"<STR_LIT:.>\", <NUM_LIT:1>)<EOL>try:<EOL><INDENT>module = importlib.import_module(module_name)<EOL><DEDENT>except Exception as e:<EOL><INDENT>_logger.exception(\"<STR_LIT>\".format(name, e))<EOL>raise<EOL><DEDENT>the_class = getattr(module, class_name)<EOL>return the_class<EOL>", "docstring": "Import a class from a module string, e.g. ``my.module.ClassName``.", "id": "f8588:m8"}
{"signature": "def read_and_discard_input(environ):", "body": "if environ.get(\"<STR_LIT>\") or environ.get(\"<STR_LIT>\"):<EOL><INDENT>return<EOL><DEDENT>cl = get_content_length(environ)<EOL>assert cl >= <NUM_LIT:0><EOL>if cl == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>READ_ALL = True<EOL>environ[\"<STR_LIT>\"] = <NUM_LIT:1><EOL>if READ_ALL:<EOL><INDENT>environ[\"<STR_LIT>\"] = <NUM_LIT:1><EOL><DEDENT>wsgi_input = environ[\"<STR_LIT>\"]<EOL>if hasattr(wsgi_input, \"<STR_LIT>\") and hasattr(wsgi_input, \"<STR_LIT>\"):<EOL><INDENT>if wsgi_input._consumed == <NUM_LIT:0> and wsgi_input.length > <NUM_LIT:0>:<EOL><INDENT>if READ_ALL:<EOL><INDENT>n = wsgi_input.length<EOL><DEDENT>else:<EOL><INDENT>n = <NUM_LIT:1><EOL><DEDENT>body = wsgi_input.read(n)<EOL>_logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>n, body[:<NUM_LIT:50>]<EOL>)<EOL>)<EOL><DEDENT><DEDENT>elif hasattr(wsgi_input, \"<STR_LIT>\") and hasattr(wsgi_input._sock, \"<STR_LIT>\"):<EOL><INDENT>try:<EOL><INDENT>sock = wsgi_input._sock<EOL>timeout = sock.gettimeout()<EOL>sock.settimeout(<NUM_LIT:0>)<EOL>try:<EOL><INDENT>if READ_ALL:<EOL><INDENT>n = cl<EOL><DEDENT>else:<EOL><INDENT>n = <NUM_LIT:1><EOL><DEDENT>body = wsgi_input.read(n)<EOL>_logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>n, body[:<NUM_LIT:50>]<EOL>)<EOL>)<EOL><DEDENT>except socket.error as se:<EOL><INDENT>_logger.error(\"<STR_LIT>\".format(n, se))<EOL><DEDENT>sock.settimeout(timeout)<EOL><DEDENT>except Exception:<EOL><INDENT>_logger.error(\"<STR_LIT>\".format(sys.exc_info()))<EOL><DEDENT><DEDENT>", "docstring": "Read 1 byte from wsgi.input, if this has not been done yet.\n\n    Returning a response without reading from a request body might confuse the\n    WebDAV client.\n    This may happen, if an exception like '401 Not authorized', or\n    '500 Internal error' was raised BEFORE anything was read from the request\n    stream.\n\n    See GC issue 13, issue 23\n    See http://groups.google.com/group/paste-users/browse_frm/thread/fc0c9476047e9a47?hl=en\n\n    Note that with persistent sessions (HTTP/1.1) we must make sure, that the\n    'Connection: closed' header is set with the response, to prevent reusing\n    the current stream.", "id": "f8588:m22"}
{"signature": "def parse_if_header_dict(environ):", "body": "if \"<STR_LIT>\" in environ:<EOL><INDENT>return<EOL><DEDENT>if \"<STR_LIT>\" not in environ:<EOL><INDENT>environ[\"<STR_LIT>\"] = None<EOL>environ[\"<STR_LIT>\"] = []<EOL>return<EOL><DEDENT>iftext = environ[\"<STR_LIT>\"].strip()<EOL>if not iftext.startswith(\"<STR_LIT:<>\"):<EOL><INDENT>iftext = \"<STR_LIT>\" + iftext<EOL><DEDENT>ifDict = dict([])<EOL>ifLockList = []<EOL>resource1 = \"<STR_LIT:*>\"<EOL>for (tmpURLVar, URLVar, _tmpContentVar, contentVar) in reIfSeparator.findall(<EOL>iftext<EOL>):<EOL><INDENT>if tmpURLVar != \"<STR_LIT>\":<EOL><INDENT>resource1 = URLVar<EOL><DEDENT>else:<EOL><INDENT>listTagContents = []<EOL>testflag = True<EOL>for listitem in reIfTagListContents.findall(contentVar):<EOL><INDENT>if listitem.upper() != \"<STR_LIT>\":<EOL><INDENT>if listitem.startswith(\"<STR_LIT:[>\"):<EOL><INDENT>listTagContents.append(<EOL>(testflag, \"<STR_LIT>\", listitem.strip('<STR_LIT>'))<EOL>)<EOL><DEDENT>else:<EOL><INDENT>listTagContents.append(<EOL>(testflag, \"<STR_LIT>\", listitem.strip(\"<STR_LIT>\"))<EOL>)<EOL>ifLockList.append(listitem.strip(\"<STR_LIT>\"))<EOL><DEDENT><DEDENT>testflag = listitem.upper() != \"<STR_LIT>\"<EOL><DEDENT>if resource1 in ifDict:<EOL><INDENT>listTag = ifDict[resource1]<EOL><DEDENT>else:<EOL><INDENT>listTag = []<EOL>ifDict[resource1] = listTag<EOL><DEDENT>listTag.append(listTagContents)<EOL><DEDENT><DEDENT>environ[\"<STR_LIT>\"] = ifDict<EOL>environ[\"<STR_LIT>\"] = ifLockList<EOL>_logger.debug(\"<STR_LIT>\".format(pformat(ifDict)))<EOL>return<EOL>", "docstring": "Parse HTTP_IF header into a dictionary and lists, and cache the result.\n\n    @see http://www.webdav.org/specs/rfc4918.html#HEADER_If", "id": "f8588:m40"}
{"signature": "def byte_number_string(<EOL>number, thousandsSep=True, partition=False, base1024=True, appendBytes=True<EOL>):", "body": "magsuffix = \"<STR_LIT>\"<EOL>bytesuffix = \"<STR_LIT>\"<EOL>if partition:<EOL><INDENT>magnitude = <NUM_LIT:0><EOL>if base1024:<EOL><INDENT>while number >= <NUM_LIT>:<EOL><INDENT>magnitude += <NUM_LIT:1><EOL>number = number >> <NUM_LIT:10><EOL><DEDENT><DEDENT>else:<EOL><INDENT>while number >= <NUM_LIT:1000>:<EOL><INDENT>magnitude += <NUM_LIT:1><EOL>number /= <NUM_LIT><EOL><DEDENT><DEDENT>magsuffix = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:M>\", \"<STR_LIT>\", \"<STR_LIT:T>\", \"<STR_LIT:P>\"][magnitude]<EOL><DEDENT>if appendBytes:<EOL><INDENT>if number == <NUM_LIT:1>:<EOL><INDENT>bytesuffix = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>bytesuffix = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>if thousandsSep and (number >= <NUM_LIT:1000> or magsuffix):<EOL><INDENT>snum = \"<STR_LIT>\".format(number)<EOL><DEDENT>else:<EOL><INDENT>snum = str(number)<EOL><DEDENT>return \"<STR_LIT>\".format(snum, magsuffix, bytesuffix)<EOL>", "docstring": "Convert bytes into human-readable representation.", "id": "f8588:m20"}
{"signature": "def guess_mime_type(url):", "body": "(mimetype, _mimeencoding) = mimetypes.guess_type(url)<EOL>if not mimetype:<EOL><INDENT>ext = os.path.splitext(url)[<NUM_LIT:1>]<EOL>mimetype = _MIME_TYPES.get(ext)<EOL>_logger.debug(\"<STR_LIT>\".format(url, mimetype))<EOL><DEDENT>if not mimetype:<EOL><INDENT>mimetype = \"<STR_LIT>\"<EOL><DEDENT>return mimetype<EOL>", "docstring": "Use the mimetypes module to lookup the type for an extension.\n\n    This function also adds some extensions required for HTML5", "id": "f8588:m42"}
{"signature": "def shift_path(script_name, path_info):", "body": "segment, rest = pop_path(path_info)<EOL>return (segment, join_uri(script_name.rstrip(\"<STR_LIT:/>\"), segment), rest.rstrip(\"<STR_LIT:/>\"))<EOL>", "docstring": "Return ('/a', '/b/c') -> ('b', '/a/b', 'c').", "id": "f8588:m14"}
{"signature": "def parse_time_string(timestring):", "body": "result = _parse_gmt_time(timestring)<EOL>if result:<EOL><INDENT>return calendar.timegm(result)<EOL><DEDENT>return None<EOL>", "docstring": "Return the number of seconds since the epoch, for a date/time string.\n\n    Returns None for invalid input\n\n    The following time type strings are supported:\n\n    Sun, 06 Nov 1994 08:49:37 GMT  ; RFC 822, updated by RFC 1123\n    Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036\n    Sun Nov  6 08:49:37 1994       ; ANSI C's asctime() format", "id": "f8588:m3"}
{"signature": "def split_namespace(clarkName):", "body": "if clarkName.startswith(\"<STR_LIT:{>\") and \"<STR_LIT:}>\" in clarkName:<EOL><INDENT>ns, localname = clarkName.split(\"<STR_LIT:}>\", <NUM_LIT:1>)<EOL>return (ns[<NUM_LIT:1>:], localname)<EOL><DEDENT>return (\"<STR_LIT>\", clarkName)<EOL>", "docstring": "Return (namespace, localname) tuple for a property name in Clark Notation.\n\n    Namespace defaults to ''.\n    Example:\n    '{DAV:}foo'  -> ('DAV:', 'foo')\n    'bar'  -> ('', 'bar')", "id": "f8588:m15"}
{"signature": "def encode_mongo_key(s):", "body": "assert DOT_ESCAPE not in s<EOL>return s.replace(\"<STR_LIT:.>\", DOT_ESCAPE)<EOL>", "docstring": "Return an encoded version of `s` that may be used as MongoDB key.", "id": "f8589:m0"}
{"signature": "def _find_descendents(self, url):", "body": "<EOL>map_fun = \"\"\"<STR_LIT>\"\"\" % (<EOL>url + \"<STR_LIT:/>\"<EOL>)<EOL>vr = self.db.query(map_fun, include_docs=True)<EOL>for row in vr:<EOL><INDENT>yield row.doc<EOL><DEDENT>return<EOL>", "docstring": "Return properties document for url and all children.", "id": "f8590:c0:m9"}
{"signature": "def _find(self, url):", "body": "<EOL>vr = self.db.view(\"<STR_LIT>\", key=url, include_docs=True)<EOL>_logger.debug(\"<STR_LIT>\" % (url, len(vr)))<EOL>assert len(vr) <= <NUM_LIT:1>, \"<STR_LIT>\" % url<EOL>for row in vr:<EOL><INDENT>assert row.doc<EOL>return row.doc<EOL><DEDENT>return None<EOL>", "docstring": "Return properties document for path.", "id": "f8590:c0:m8"}
{"signature": "def remove_property(self, norm_url, name, dry_run=False, environ=None):", "body": "_logger.debug(<EOL>\"<STR_LIT>\".format(norm_url, name, dry_run)<EOL>)<EOL>if dry_run:<EOL><INDENT>return<EOL><DEDENT>self._lock.acquire_write()<EOL>try:<EOL><INDENT>if not self._loaded:<EOL><INDENT>self._lazy_open()<EOL><DEDENT>if norm_url in self._dict:<EOL><INDENT>locatordict = self._dict[norm_url]<EOL>if name in locatordict:<EOL><INDENT>del locatordict[name]<EOL>self._dict[norm_url] = locatordict<EOL>self._sync()<EOL><DEDENT><DEDENT>if __debug__ and self._verbose >= <NUM_LIT:4>:<EOL><INDENT>self._check()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>", "docstring": "Specifying the removal of a property that does not exist is NOT an error.", "id": "f8591:c0:m11"}
{"signature": "def _sync(self):", "body": "_logger.debug(\"<STR_LIT>\")<EOL>self._lock.acquire_write()  <EOL>try:<EOL><INDENT>if self._loaded:<EOL><INDENT>self._dict.sync()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>", "docstring": "Write persistent dictionary to disc.", "id": "f8591:c1:m3"}
{"signature": "def clear(self):", "body": "self._lock.acquire_write()<EOL>try:<EOL><INDENT>was_closed = self._dict is None<EOL>if was_closed:<EOL><INDENT>self.open()<EOL><DEDENT>if len(self._dict):<EOL><INDENT>self._dict.clear()<EOL>self._dict.sync()<EOL><DEDENT>if was_closed:<EOL><INDENT>self.close()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>", "docstring": "Delete all entries.", "id": "f8591:c1:m5"}
{"signature": "def read(self, size=<NUM_LIT:0>):", "body": "res = self.unread<EOL>self.unread = \"<STR_LIT>\"<EOL>while res == \"<STR_LIT>\" or size < <NUM_LIT:0> or (size > <NUM_LIT:0> and len(res) < size):<EOL><INDENT>try:<EOL><INDENT>res += compat.to_native(self.queue.get(True, <NUM_LIT:0.1>))<EOL><DEDENT>except compat.queue.Empty:<EOL><INDENT>if self.is_closed:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>if size > <NUM_LIT:0> and len(res) > size:<EOL><INDENT>self.unread = res[size:]<EOL>res = res[:size]<EOL><DEDENT>return res<EOL>", "docstring": "Read a chunk of bytes from queue.\n\n        size = 0: Read next chunk (arbitrary length)\n             > 0: Read one chunk of `size` bytes (or less if stream was closed)\n             < 0: Read all bytes as single chunk (i.e. blocks until stream is closed)\n\n        This method blocks until the requested size become available.\n        However, if close() was called, '' is returned immediately.", "id": "f8592:c0:m1"}
{"signature": "def write(self, chunk):", "body": "if self.is_closed:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if compat.is_basestring(chunk):<EOL><INDENT>self.queue.put(chunk)<EOL><DEDENT>else:  <EOL><INDENT>for o in chunk:<EOL><INDENT>self.queue.put(o)<EOL><DEDENT><DEDENT>", "docstring": "Put a chunk of bytes (or an iterable) to the queue.\n\n        May block if max_size number of chunks is reached.", "id": "f8592:c0:m2"}
{"signature": "def _get_context(self, environ, dav_res):", "body": "assert dav_res.is_collection<EOL>is_readonly = environ[\"<STR_LIT>\"].is_readonly()<EOL>context = {<EOL>\"<STR_LIT>\": (self.config.get(\"<STR_LIT>\") or \"<STR_LIT>\") + ASSET_SHARE,<EOL>\"<STR_LIT>\": [],<EOL>\"<STR_LIT:version>\": __version__,<EOL>\"<STR_LIT>\": compat.unquote(dav_res.get_href()),<EOL>\"<STR_LIT:url>\": dav_res.get_href(),  <EOL>\"<STR_LIT>\": util.get_uri_parent(dav_res.get_href()),<EOL>\"<STR_LIT>\": self.dir_config,<EOL>\"<STR_LIT>\": is_readonly,<EOL>\"<STR_LIT>\": \"<STR_LIT>\" if is_readonly else \"<STR_LIT>\",<EOL>\"<STR_LIT>\": False,<EOL>}<EOL>trailer = self.dir_config.get(\"<STR_LIT>\")<EOL>if trailer is True:<EOL><INDENT>trailer = \"<STR_LIT>\"<EOL><DEDENT>if trailer:<EOL><INDENT>trailer = trailer.replace(<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\".format(<EOL>__version__<EOL>),<EOL>)<EOL>trailer = trailer.replace(\"<STR_LIT>\", util.get_rfc1123_time())<EOL><DEDENT>context[\"<STR_LIT>\"] = trailer<EOL>rows = context[\"<STR_LIT>\"]<EOL>dirInfoList = dav_res.get_directory_info()<EOL>if dirInfoList is None:<EOL><INDENT>dirInfoList = []<EOL>childList = dav_res.get_descendants(depth=\"<STR_LIT:1>\", add_self=False)<EOL>for res in childList:<EOL><INDENT>di = res.get_display_info()<EOL>href = res.get_href()<EOL>ofe_prefix = None<EOL>tr_classes = []<EOL>a_classes = []<EOL>if res.is_collection:<EOL><INDENT>tr_classes.append(\"<STR_LIT>\")<EOL><DEDENT>if not is_readonly and not res.is_collection:<EOL><INDENT>ext = os.path.splitext(href)[<NUM_LIT:1>].lstrip(\"<STR_LIT:.>\").lower()<EOL>officeType = msOfficeExtToTypeMap.get(ext)<EOL>if officeType:<EOL><INDENT>if self.dir_config.get(\"<STR_LIT>\"):<EOL><INDENT>ofe_prefix = \"<STR_LIT>\".format(officeType)<EOL>a_classes.append(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>entry = {<EOL>\"<STR_LIT>\": href,<EOL>\"<STR_LIT>\": ofe_prefix,<EOL>\"<STR_LIT>\": \"<STR_LIT:U+0020>\".join(a_classes),<EOL>\"<STR_LIT>\": \"<STR_LIT:U+0020>\".join(tr_classes),<EOL>\"<STR_LIT>\": res.get_display_name(),<EOL>\"<STR_LIT>\": res.get_last_modified(),<EOL>\"<STR_LIT>\": res.is_collection,<EOL>\"<STR_LIT>\": res.get_content_length(),<EOL>\"<STR_LIT>\": di.get(\"<STR_LIT:type>\"),<EOL>\"<STR_LIT>\": di.get(\"<STR_LIT>\"),<EOL>}<EOL>dirInfoList.append(entry)<EOL><DEDENT><DEDENT>ignore_patterns = self.dir_config.get(\"<STR_LIT:ignore>\", [])<EOL>if compat.is_basestring(ignore_patterns):<EOL><INDENT>ignore_patterns = ignore_patterns.split(\"<STR_LIT:U+002C>\")<EOL><DEDENT>ignored_list = []<EOL>for entry in dirInfoList:<EOL><INDENT>ignore = False<EOL>for pat in ignore_patterns:<EOL><INDENT>if fnmatch(entry[\"<STR_LIT>\"], pat):<EOL><INDENT>ignored_list.append(entry[\"<STR_LIT>\"])<EOL>ignore = True<EOL>break<EOL><DEDENT><DEDENT>if ignore:<EOL><INDENT>continue<EOL><DEDENT>last_modified = entry.get(\"<STR_LIT>\")<EOL>if last_modified is None:<EOL><INDENT>entry[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>entry[\"<STR_LIT>\"] = util.get_rfc1123_time(last_modified)<EOL><DEDENT>entry[\"<STR_LIT>\"] = \"<STR_LIT:->\"<EOL>if not entry.get(\"<STR_LIT>\"):<EOL><INDENT>content_length = entry.get(\"<STR_LIT>\")<EOL>if content_length is not None:<EOL><INDENT>entry[\"<STR_LIT>\"] = util.byte_number_string(content_length)<EOL><DEDENT><DEDENT>rows.append(entry)<EOL><DEDENT>if ignored_list:<EOL><INDENT>_logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>len(ignored_list), ignored_list<EOL>)<EOL>)<EOL><DEDENT>sort = \"<STR_LIT:name>\"<EOL>if sort == \"<STR_LIT:name>\":<EOL><INDENT>rows.sort(<EOL>key=lambda v: \"<STR_LIT>\".format(<EOL>not v[\"<STR_LIT>\"], v[\"<STR_LIT>\"].lower()<EOL>)<EOL>)<EOL><DEDENT>if \"<STR_LIT>\" in environ:<EOL><INDENT>context.update(<EOL>{<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT>\": (environ.get(\"<STR_LIT>\") or \"<STR_LIT>\"),<EOL>\"<STR_LIT>\": environ.get(\"<STR_LIT>\"),<EOL>\"<STR_LIT>\": \"<STR_LIT:U+002CU+0020>\".join(environ.get(\"<STR_LIT>\") or []),<EOL>\"<STR_LIT>\": \"<STR_LIT:U+002CU+0020>\".join(<EOL>environ.get(\"<STR_LIT>\") or []<EOL>),<EOL>}<EOL>)<EOL><DEDENT>return context<EOL>", "docstring": "@see: http://www.webdav.org/specs/rfc4918.html#rfc.section.9.4", "id": "f8593:c0:m4"}
{"signature": "def _send_resource(self, environ, start_response, is_head_method):", "body": "path = environ[\"<STR_LIT>\"]<EOL>res = self._davProvider.get_resource_inst(path, environ)<EOL>if util.get_content_length(environ) != <NUM_LIT:0>:<EOL><INDENT>self._fail(<EOL>HTTP_MEDIATYPE_NOT_SUPPORTED,<EOL>\"<STR_LIT>\",<EOL>)<EOL><DEDENT>elif environ.setdefault(\"<STR_LIT>\", \"<STR_LIT:0>\") != \"<STR_LIT:0>\":<EOL><INDENT>self._fail(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT>elif res is None:<EOL><INDENT>self._fail(HTTP_NOT_FOUND)<EOL><DEDENT>elif res.is_collection:<EOL><INDENT>self._fail(<EOL>HTTP_FORBIDDEN,<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>)<EOL><DEDENT>self._evaluate_if_headers(res, environ)<EOL>filesize = res.get_content_length()<EOL>if filesize is None:<EOL><INDENT>filesize = -<NUM_LIT:1>  <EOL><DEDENT>last_modified = res.get_last_modified()<EOL>if last_modified is None:<EOL><INDENT>last_modified = -<NUM_LIT:1><EOL><DEDENT>entitytag = res.get_etag()<EOL>if entitytag is None:<EOL><INDENT>entitytag = \"<STR_LIT>\"<EOL><DEDENT>doignoreranges = (<EOL>not res.support_content_length()<EOL>or not res.support_ranges()<EOL>or filesize == <NUM_LIT:0><EOL>)<EOL>if (<EOL>\"<STR_LIT>\" in environ<EOL>and \"<STR_LIT>\" in environ<EOL>and not doignoreranges<EOL>):<EOL><INDENT>ifrange = environ[\"<STR_LIT>\"]<EOL>secstime = util.parse_time_string(ifrange)<EOL>if secstime:<EOL><INDENT>if last_modified != secstime:<EOL><INDENT>doignoreranges = True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ifrange = ifrange.strip('<STR_LIT>')<EOL>if entitytag is None or ifrange != entitytag:<EOL><INDENT>doignoreranges = True<EOL><DEDENT><DEDENT><DEDENT>ispartialranges = False<EOL>if \"<STR_LIT>\" in environ and not doignoreranges:<EOL><INDENT>ispartialranges = True<EOL>list_ranges, _totallength = util.obtain_content_ranges(<EOL>environ[\"<STR_LIT>\"], filesize<EOL>)<EOL>if len(list_ranges) == <NUM_LIT:0>:<EOL><INDENT>self._fail(HTTP_RANGE_NOT_SATISFIABLE)<EOL><DEDENT>(range_start, range_end, range_length) = list_ranges[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>(range_start, range_end, range_length) = (<NUM_LIT:0>, filesize - <NUM_LIT:1>, filesize)<EOL><DEDENT>mimetype = res.get_content_type()  <EOL>response_headers = []<EOL>if res.support_content_length():<EOL><INDENT>response_headers.append((\"<STR_LIT>\", str(range_length)))<EOL><DEDENT>if res.support_modified():<EOL><INDENT>response_headers.append(<EOL>(\"<STR_LIT>\", util.get_rfc1123_time(last_modified))<EOL>)<EOL><DEDENT>response_headers.append((\"<STR_LIT:Content-Type>\", mimetype))<EOL>response_headers.append((\"<STR_LIT>\", util.get_rfc1123_time()))<EOL>if res.support_etag():<EOL><INDENT>response_headers.append((\"<STR_LIT>\", '<STR_LIT>'.format(entitytag)))<EOL><DEDENT>if \"<STR_LIT>\" in environ[\"<STR_LIT>\"]:<EOL><INDENT>customHeaders = environ[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>for header, value in customHeaders:<EOL><INDENT>response_headers.append((header, value))<EOL><DEDENT><DEDENT>res.finalize_headers(environ, response_headers)<EOL>if ispartialranges:<EOL><INDENT>response_headers.append(<EOL>(<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\".format(range_start, range_end, filesize),<EOL>)<EOL>)<EOL>start_response(\"<STR_LIT>\", response_headers)<EOL><DEDENT>else:<EOL><INDENT>start_response(\"<STR_LIT>\", response_headers)<EOL><DEDENT>if is_head_method:<EOL><INDENT>yield b\"<STR_LIT>\"<EOL>return<EOL><DEDENT>fileobj = res.get_content()<EOL>if not doignoreranges:<EOL><INDENT>fileobj.seek(range_start)<EOL><DEDENT>contentlengthremaining = range_length<EOL>while <NUM_LIT:1>:<EOL><INDENT>if contentlengthremaining < <NUM_LIT:0> or contentlengthremaining > self.block_size:<EOL><INDENT>readbuffer = fileobj.read(self.block_size)<EOL><DEDENT>else:<EOL><INDENT>readbuffer = fileobj.read(contentlengthremaining)<EOL><DEDENT>assert compat.is_bytes(readbuffer)<EOL>yield readbuffer<EOL>contentlengthremaining -= len(readbuffer)<EOL>if len(readbuffer) == <NUM_LIT:0> or contentlengthremaining == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT><DEDENT>fileobj.close()<EOL>return<EOL>", "docstring": "If-Range\n    If the entity is unchanged, send me the part(s) that I am missing;\n    otherwise, send me the entire new entity\n    If-Range: \"737060cd8c284d8af7ad3082f209582d\"\n\n@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27", "id": "f8595:c0:m23"}
{"signature": "def do_POST(self, environ, start_response):", "body": "self._fail(HTTP_METHOD_NOT_ALLOWED)<EOL>", "docstring": "@see http://www.webdav.org/specs/rfc4918.html#METHOD_POST\n@see http://stackoverflow.com/a/22606899/19166", "id": "f8595:c0:m10"}
{"signature": "def do_PUT(self, environ, start_response):", "body": "path = environ[\"<STR_LIT>\"]<EOL>provider = self._davProvider<EOL>res = provider.get_resource_inst(path, environ)<EOL>parentRes = provider.get_resource_inst(util.get_uri_parent(path), environ)<EOL>isnewfile = res is None<EOL>if \"<STR_LIT>\" in environ:<EOL><INDENT>util.fail(HTTP_NOT_IMPLEMENTED, \"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in environ:<EOL><INDENT>util.fail(<EOL>HTTP_BAD_REQUEST, \"<STR_LIT>\"<EOL>)<EOL><DEDENT>if res and res.is_collection:<EOL><INDENT>self._fail(HTTP_METHOD_NOT_ALLOWED, \"<STR_LIT>\")<EOL><DEDENT>elif (<EOL>parentRes is None or not parentRes.is_collection<EOL>):  <EOL><INDENT>self._fail(HTTP_CONFLICT, \"<STR_LIT>\")<EOL><DEDENT>self._evaluate_if_headers(res, environ)<EOL>if isnewfile:<EOL><INDENT>self._check_write_permission(parentRes, \"<STR_LIT:0>\", environ)<EOL>res = parentRes.create_empty_resource(util.get_uri_name(path))<EOL><DEDENT>else:<EOL><INDENT>self._check_write_permission(res, \"<STR_LIT:0>\", environ)<EOL><DEDENT>try:<EOL><INDENT>content_length = max(-<NUM_LIT:1>, int(environ.get(\"<STR_LIT>\", -<NUM_LIT:1>)))<EOL><DEDENT>except ValueError:<EOL><INDENT>content_length = -<NUM_LIT:1><EOL><DEDENT>if (content_length < <NUM_LIT:0>) and (<EOL>environ.get(\"<STR_LIT>\", \"<STR_LIT>\").lower() != \"<STR_LIT>\"<EOL>):<EOL><INDENT>agent = environ.get(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>if \"<STR_LIT>\" in agent or \"<STR_LIT>\" in agent:  <EOL><INDENT>_logger.warning(<EOL>\"<STR_LIT>\"<EOL>)<EOL>content_length = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>util.fail(<EOL>HTTP_LENGTH_REQUIRED,<EOL>\"<STR_LIT>\".format(<EOL>environ.get(\"<STR_LIT>\")<EOL>),<EOL>)<EOL><DEDENT><DEDENT>hasErrors = False<EOL>try:<EOL><INDENT>if environ.get(\"<STR_LIT>\", \"<STR_LIT>\").lower() == \"<STR_LIT>\":<EOL><INDENT>data_stream = self._stream_data_chunked(environ, self.block_size)<EOL><DEDENT>else:<EOL><INDENT>data_stream = self._stream_data(<EOL>environ, content_length, self.block_size<EOL>)<EOL><DEDENT>fileobj = res.begin_write(content_type=environ.get(\"<STR_LIT>\"))<EOL>if getattr(fileobj, \"<STR_LIT>\", None):<EOL><INDENT>fileobj.writelines(data_stream)<EOL><DEDENT>else:<EOL><INDENT>for data in data_stream:<EOL><INDENT>fileobj.write(data)<EOL><DEDENT><DEDENT>fileobj.close()<EOL><DEDENT>except Exception as e:<EOL><INDENT>res.end_write(with_errors=True)<EOL>_logger.exception(\"<STR_LIT>\")<EOL>util.fail(e)<EOL><DEDENT>res.end_write(hasErrors)<EOL>headers = None<EOL>if res.support_etag():<EOL><INDENT>entitytag = res.get_etag()<EOL>if entitytag is not None:<EOL><INDENT>headers = [(\"<STR_LIT>\", '<STR_LIT>'.format(entitytag))]<EOL><DEDENT><DEDENT>if isnewfile:<EOL><INDENT>return util.send_status_response(<EOL>environ, start_response, HTTP_CREATED, add_headers=headers<EOL>)<EOL><DEDENT>return util.send_status_response(<EOL>environ, start_response, HTTP_NO_CONTENT, add_headers=headers<EOL>)<EOL>", "docstring": "@see: http://www.webdav.org/specs/rfc4918.html#METHOD_PUT", "id": "f8595:c0:m14"}
{"signature": "def _copy_or_move(self, environ, start_response, is_move):", "body": "srcPath = environ[\"<STR_LIT>\"]<EOL>provider = self._davProvider<EOL>srcRes = provider.get_resource_inst(srcPath, environ)<EOL>srcParentRes = provider.get_resource_inst(util.get_uri_parent(srcPath), environ)<EOL>if srcRes is None:<EOL><INDENT>self._fail(HTTP_NOT_FOUND)<EOL><DEDENT>if \"<STR_LIT>\" not in environ:<EOL><INDENT>self._fail(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT>if not environ.setdefault(\"<STR_LIT>\", \"<STR_LIT:T>\") in (\"<STR_LIT:T>\", \"<STR_LIT:F>\"):<EOL><INDENT>self._fail(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT>if util.get_content_length(environ) != <NUM_LIT:0>:<EOL><INDENT>body = environ[\"<STR_LIT>\"].read(util.get_content_length(environ))<EOL>environ[\"<STR_LIT>\"] = <NUM_LIT:1><EOL>_logger.info(\"<STR_LIT>\".format(body[:<NUM_LIT:50>]))<EOL><DEDENT>if srcRes.is_collection:<EOL><INDENT>environ.setdefault(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>if not environ[\"<STR_LIT>\"] in (\"<STR_LIT:0>\", \"<STR_LIT>\"):<EOL><INDENT>self._fail(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT>if is_move and environ[\"<STR_LIT>\"] != \"<STR_LIT>\":<EOL><INDENT>self._fail(<EOL>HTTP_BAD_REQUEST,<EOL>\"<STR_LIT>\",<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>environ.setdefault(\"<STR_LIT>\", \"<STR_LIT:0>\")<EOL>if not environ[\"<STR_LIT>\"] in (\"<STR_LIT:0>\", \"<STR_LIT>\"):<EOL><INDENT>self._fail(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT>environ[\"<STR_LIT>\"] = \"<STR_LIT:0>\"<EOL><DEDENT>destinationHeader = compat.unquote(environ[\"<STR_LIT>\"])<EOL>destScheme, destNetloc, destPath, _destParams, _destQuery, _destFrag = compat.urlparse(<EOL>destinationHeader, allow_fragments=False<EOL>)<EOL>if srcRes.is_collection:<EOL><INDENT>destPath = destPath.rstrip(\"<STR_LIT:/>\") + \"<STR_LIT:/>\"<EOL><DEDENT>if destScheme and destScheme.lower() != environ[\"<STR_LIT>\"].lower():<EOL><INDENT>self._fail(<EOL>HTTP_BAD_GATEWAY, \"<STR_LIT>\"<EOL>)<EOL><DEDENT>elif destNetloc and destNetloc.lower() != environ[\"<STR_LIT>\"].lower():<EOL><INDENT>self._fail(<EOL>HTTP_BAD_GATEWAY, \"<STR_LIT>\"<EOL>)<EOL><DEDENT>elif not destPath.startswith(provider.mount_path + provider.share_path):<EOL><INDENT>self._fail(HTTP_BAD_GATEWAY, \"<STR_LIT>\")<EOL><DEDENT>destPath = destPath[len(provider.mount_path + provider.share_path) :]<EOL>assert destPath.startswith(\"<STR_LIT:/>\")<EOL>destRes = provider.get_resource_inst(destPath, environ)<EOL>destExists = destRes is not None<EOL>destParentRes = provider.get_resource_inst(<EOL>util.get_uri_parent(destPath), environ<EOL>)<EOL>if not destParentRes or not destParentRes.is_collection:<EOL><INDENT>self._fail(HTTP_CONFLICT, \"<STR_LIT>\")<EOL><DEDENT>self._evaluate_if_headers(srcRes, environ)<EOL>self._evaluate_if_headers(destRes, environ)<EOL>if is_move:<EOL><INDENT>self._check_write_permission(srcRes, \"<STR_LIT>\", environ)<EOL>if srcParentRes:<EOL><INDENT>self._check_write_permission(srcParentRes, \"<STR_LIT:0>\", environ)<EOL><DEDENT><DEDENT>if not destExists:<EOL><INDENT>self._check_write_permission(destParentRes, \"<STR_LIT:0>\", environ)<EOL><DEDENT>self._check_write_permission(destRes, \"<STR_LIT>\", environ)<EOL>if srcPath == destPath:<EOL><INDENT>self._fail(HTTP_FORBIDDEN, \"<STR_LIT>\")<EOL><DEDENT>elif util.is_equal_or_child_uri(srcPath, destPath):<EOL><INDENT>self._fail(HTTP_FORBIDDEN, \"<STR_LIT>\")<EOL><DEDENT>if destExists and environ[\"<STR_LIT>\"] != \"<STR_LIT:T>\":<EOL><INDENT>self._fail(<EOL>HTTP_PRECONDITION_FAILED,<EOL>\"<STR_LIT>\",<EOL>)<EOL><DEDENT>error_list = []<EOL>success_code = HTTP_CREATED<EOL>if destExists:<EOL><INDENT>success_code = HTTP_NO_CONTENT<EOL><DEDENT>try:<EOL><INDENT>if is_move:<EOL><INDENT>handled = srcRes.handle_move(destPath)<EOL><DEDENT>else:<EOL><INDENT>isInfinity = environ[\"<STR_LIT>\"] == \"<STR_LIT>\"<EOL>handled = srcRes.handle_copy(destPath, isInfinity)<EOL><DEDENT>assert handled in (True, False) or type(handled) is list<EOL>if type(handled) is list:<EOL><INDENT>error_list = handled<EOL>handled = True<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>error_list = [(srcRes.get_href(), as_DAVError(e))]<EOL>handled = True<EOL><DEDENT>if handled:<EOL><INDENT>return self._send_response(<EOL>environ, start_response, srcRes, HTTP_NO_CONTENT, error_list<EOL>)<EOL><DEDENT>srcList = srcRes.get_descendants(add_self=True)<EOL>srcRootLen = len(srcPath)<EOL>destRootLen = len(destPath)<EOL>if destExists:<EOL><INDENT>if is_move or not destRes.is_collection or not srcRes.is_collection:<EOL><INDENT>_logger.debug(\"<STR_LIT>\".format(destRes))<EOL>destRes.delete()<EOL>destRes = None<EOL><DEDENT>else:<EOL><INDENT>reverseDestList = destRes.get_descendants(<EOL>depth_first=True, add_self=False<EOL>)<EOL>srcPathList = [s.path for s in srcList]<EOL>_logger.debug(\"<STR_LIT>\".format(srcPathList))<EOL>for dRes in reverseDestList:<EOL><INDENT>_logger.debug(\"<STR_LIT>\".format(dRes))<EOL>relUrl = dRes.path[destRootLen:]<EOL>sp = srcPath + relUrl<EOL>if sp not in srcPathList:<EOL><INDENT>_logger.debug(<EOL>\"<STR_LIT>\".format(dRes)<EOL>)<EOL>dRes.delete()<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if is_move and srcRes.support_recursive_move(destPath):<EOL><INDENT>hasConflicts = False<EOL>for s in srcList:<EOL><INDENT>try:<EOL><INDENT>self._evaluate_if_headers(s, environ)<EOL><DEDENT>except Exception:<EOL><INDENT>hasConflicts = True<EOL>break<EOL><DEDENT><DEDENT>if not hasConflicts:<EOL><INDENT>try:<EOL><INDENT>_logger.debug(\"<STR_LIT>\".format(srcRes, destPath))<EOL>error_list = srcRes.move_recursive(destPath)<EOL><DEDENT>except Exception as e:<EOL><INDENT>error_list = [(srcRes.get_href(), as_DAVError(e))]<EOL><DEDENT>return self._send_response(<EOL>environ, start_response, srcRes, success_code, error_list<EOL>)<EOL><DEDENT><DEDENT>ignoreDict = {}<EOL>for sRes in srcList:<EOL><INDENT>parentError = False<EOL>for ignorePath in ignoreDict.keys():<EOL><INDENT>if util.is_equal_or_child_uri(ignorePath, sRes.path):<EOL><INDENT>parentError = True<EOL>break<EOL><DEDENT><DEDENT>if parentError:<EOL><INDENT>_logger.debug(<EOL>\"<STR_LIT>\".format(sRes.path)<EOL>)<EOL>continue<EOL><DEDENT>try:<EOL><INDENT>relUrl = sRes.path[srcRootLen:]<EOL>dPath = destPath + relUrl<EOL>self._evaluate_if_headers(sRes, environ)<EOL>sRes.copy_move_single(dPath, is_move)<EOL>if is_move and not sRes.is_collection:<EOL><INDENT>sRes.delete()<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>ignoreDict[sRes.path] = True<EOL>error_list.append((sRes.get_href(), as_DAVError(e)))<EOL><DEDENT><DEDENT>if is_move:<EOL><INDENT>reverseSrcList = srcList[:]<EOL>reverseSrcList.reverse()<EOL>_logger.debug(\"<STR_LIT>\".format(ignoreDict))<EOL>for sRes in reverseSrcList:<EOL><INDENT>if not sRes.is_collection:<EOL><INDENT>continue<EOL><DEDENT>childError = False<EOL>for ignorePath in ignoreDict.keys():<EOL><INDENT>if util.is_equal_or_child_uri(sRes.path, ignorePath):<EOL><INDENT>childError = True<EOL>break<EOL><DEDENT><DEDENT>if childError:<EOL><INDENT>_logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>sRes.path<EOL>)<EOL>)<EOL>continue<EOL><DEDENT>try:<EOL><INDENT>_logger.debug(\"<STR_LIT>\".format(sRes))<EOL>sRes.delete()<EOL><DEDENT>except Exception as e:<EOL><INDENT>error_list.append((srcRes.get_href(), as_DAVError(e)))<EOL><DEDENT><DEDENT>_logger.debug(\"<STR_LIT>\".format(error_list))<EOL><DEDENT>return self._send_response(<EOL>environ, start_response, srcRes, success_code, error_list<EOL>)<EOL>", "docstring": "@see: http://www.webdav.org/specs/rfc4918.html#METHOD_COPY\n@see: http://www.webdav.org/specs/rfc4918.html#METHOD_MOVE", "id": "f8595:c0:m17"}
{"signature": "def _evaluate_if_headers(self, res, environ):", "body": "<EOL>if \"<STR_LIT>\" not in environ:<EOL><INDENT>util.parse_if_header_dict(environ)<EOL><DEDENT>if res is None:<EOL><INDENT>return<EOL><DEDENT>ifDict = environ[\"<STR_LIT>\"]<EOL>last_modified = -<NUM_LIT:1>  <EOL>entitytag = \"<STR_LIT>\"  <EOL>if res.get_last_modified() is not None:<EOL><INDENT>last_modified = res.get_last_modified()<EOL><DEDENT>if res.get_etag() is not None:<EOL><INDENT>entitytag = res.get_etag()<EOL><DEDENT>if (<EOL>\"<STR_LIT>\" in environ<EOL>or \"<STR_LIT>\" in environ<EOL>or \"<STR_LIT>\" in environ<EOL>or \"<STR_LIT>\" in environ<EOL>):<EOL><INDENT>util.evaluate_http_conditionals(res, last_modified, entitytag, environ)<EOL><DEDENT>if \"<STR_LIT>\" not in environ:<EOL><INDENT>return<EOL><DEDENT>refUrl = res.get_ref_url()<EOL>lockMan = self._davProvider.lock_manager<EOL>locktokenlist = []<EOL>if lockMan:<EOL><INDENT>lockList = lockMan.get_indirect_url_lock_list(<EOL>refUrl, environ[\"<STR_LIT>\"]<EOL>)<EOL>for lock in lockList:<EOL><INDENT>locktokenlist.append(lock[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>if not util.test_if_header_dict(res, ifDict, refUrl, locktokenlist, entitytag):<EOL><INDENT>self._fail(HTTP_PRECONDITION_FAILED, \"<STR_LIT>\")<EOL><DEDENT>return<EOL>", "docstring": "Apply HTTP headers on <path>, raising DAVError if conditions fail.\n\n        Add environ['wsgidav.conditions.if'] and environ['wsgidav.ifLockTokenList'].\n        Handle these headers:\n\n          - If-Match, If-Modified-Since, If-None-Match, If-Unmodified-Since:\n            Raising HTTP_PRECONDITION_FAILED or HTTP_NOT_MODIFIED\n          - If:\n            Raising HTTP_PRECONDITION_FAILED\n\n        @see http://www.webdav.org/specs/rfc4918.html#HEADER_If\n        @see util.evaluate_http_conditionals", "id": "f8595:c0:m6"}
{"signature": "def do_UNLOCK(self, environ, start_response):", "body": "path = environ[\"<STR_LIT>\"]<EOL>provider = self._davProvider<EOL>res = self._davProvider.get_resource_inst(path, environ)<EOL>lockMan = provider.lock_manager<EOL>if lockMan is None:<EOL><INDENT>self._fail(HTTP_NOT_IMPLEMENTED, \"<STR_LIT>\")<EOL><DEDENT>elif util.get_content_length(environ) != <NUM_LIT:0>:<EOL><INDENT>self._fail(<EOL>HTTP_MEDIATYPE_NOT_SUPPORTED,<EOL>\"<STR_LIT>\",<EOL>)<EOL><DEDENT>elif res is None:<EOL><INDENT>self._fail(HTTP_NOT_FOUND)<EOL><DEDENT>elif \"<STR_LIT>\" not in environ:<EOL><INDENT>self._fail(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT>self._evaluate_if_headers(res, environ)<EOL>lockToken = environ[\"<STR_LIT>\"].strip(\"<STR_LIT>\")<EOL>refUrl = res.get_ref_url()<EOL>if not lockMan.is_url_locked_by_token(refUrl, lockToken):<EOL><INDENT>self._fail(<EOL>HTTP_CONFLICT,<EOL>\"<STR_LIT>\",<EOL>err_condition=PRECONDITION_CODE_LockTokenMismatch,<EOL>)<EOL><DEDENT>if not lockMan.is_token_locked_by_user(lockToken, environ[\"<STR_LIT>\"]):<EOL><INDENT>self._fail(HTTP_FORBIDDEN, \"<STR_LIT>\")<EOL><DEDENT>lockMan.release(lockToken)<EOL>return util.send_status_response(environ, start_response, HTTP_NO_CONTENT)<EOL>", "docstring": "@see: http://www.webdav.org/specs/rfc4918.html#METHOD_UNLOCK", "id": "f8595:c0:m19"}
{"signature": "def _fail(self, value, context_info=None, src_exception=None, err_condition=None):", "body": "util.fail(value, context_info, src_exception, err_condition)<EOL>", "docstring": "Wrapper to raise (and log) DAVError.", "id": "f8595:c0:m3"}
{"signature": "def _send_response(<EOL>self, environ, start_response, root_res, success_code, error_list<EOL>):", "body": "assert success_code in (HTTP_CREATED, HTTP_NO_CONTENT, HTTP_OK)<EOL>if not error_list:<EOL><INDENT>return util.send_status_response(environ, start_response, success_code)<EOL><DEDENT>if len(error_list) == <NUM_LIT:1> and error_list[<NUM_LIT:0>][<NUM_LIT:0>] == root_res.get_href():<EOL><INDENT>return util.send_status_response(environ, start_response, error_list[<NUM_LIT:0>][<NUM_LIT:1>])<EOL><DEDENT>multistatusEL = xml_tools.make_multistatus_el()<EOL>for refurl, e in error_list:<EOL><INDENT>assert refurl.startswith(\"<STR_LIT:/>\")<EOL>assert isinstance(e, DAVError)<EOL>responseEL = etree.SubElement(multistatusEL, \"<STR_LIT>\")<EOL>etree.SubElement(responseEL, \"<STR_LIT>\").text = refurl<EOL>etree.SubElement(responseEL, \"<STR_LIT>\").text = \"<STR_LIT>\".format(<EOL>get_http_status_string(e)<EOL>)<EOL><DEDENT>return util.send_multi_status_response(environ, start_response, multistatusEL)<EOL>", "docstring": "Send WSGI response (single or multistatus).\n\n        - If error_list is None or [], then <success_code> is send as response.\n        - If error_list contains a single error with a URL that matches root_res,\n          then this error is returned.\n        - If error_list contains more than one error, then '207 Multi-Status' is\n          returned.", "id": "f8595:c0:m4"}
{"signature": "def do_MKCOL(self, environ, start_response):", "body": "path = environ[\"<STR_LIT>\"]<EOL>provider = self._davProvider<EOL>if util.get_content_length(environ) != <NUM_LIT:0>:<EOL><INDENT>self._fail(<EOL>HTTP_MEDIATYPE_NOT_SUPPORTED,<EOL>\"<STR_LIT>\",<EOL>)<EOL><DEDENT>if environ.setdefault(\"<STR_LIT>\", \"<STR_LIT:0>\") != \"<STR_LIT:0>\":<EOL><INDENT>self._fail(HTTP_BAD_REQUEST, \"<STR_LIT>\")<EOL><DEDENT>if provider.exists(path, environ):<EOL><INDENT>self._fail(<EOL>HTTP_METHOD_NOT_ALLOWED,<EOL>\"<STR_LIT>\",<EOL>)<EOL><DEDENT>parentRes = provider.get_resource_inst(util.get_uri_parent(path), environ)<EOL>if not parentRes or not parentRes.is_collection:<EOL><INDENT>self._fail(HTTP_CONFLICT, \"<STR_LIT>\")<EOL><DEDENT>self._check_write_permission(parentRes, \"<STR_LIT:0>\", environ)<EOL>parentRes.create_collection(util.get_uri_name(path))<EOL>return util.send_status_response(environ, start_response, HTTP_CREATED)<EOL>", "docstring": "Handle MKCOL request to create a new collection.\n\n        @see http://www.webdav.org/specs/rfc4918.html#METHOD_MKCOL", "id": "f8595:c0:m9"}
{"signature": "def delete(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Remove this resource (recursive).\n\n        Preconditions (ensured by caller):\n\n          - there are no conflicting locks or If-headers\n          - if support_recursive_delete() is False, and this is a collection,\n            all members have already been deleted.\n\n        When support_recursive_delete is True, this method must be prepared to\n        handle recursive deletes. This implies that child errors must be\n        reported as tuple list [ (<ref-url>, <DAVError>), ... ].\n        See http://www.webdav.org/specs/rfc4918.html#delete-collections\n\n        This function\n\n          - removes this resource\n          - if this is a non-empty collection, also removes all members.\n            Note that this may only occur, if support_recursive_delete is True.\n          - For recursive deletes, return a list of error tuples for all failed\n            resource paths.\n          - removes associated direct locks\n          - removes associated dead properties\n          - raises HTTP_FORBIDDEN for read-only resources\n          - raises HTTP_INTERNAL_ERROR on error\n\n        This method MUST be implemented by all providers that support write\n        access.", "id": "f8596:c0:m36"}
{"signature": "def get_last_modified(self):", "body": "return None<EOL>", "docstring": "Contains the Last-Modified header returned by a GET method without\n        accept headers.\n\n        Return None, if this live property is not supported.\n\n        Note that the last-modified date on a resource may reflect changes in\n        any part of the state of the resource, not necessarily just a change to\n        the response to the GET method. For example, a change in a property may\n        cause the last-modified date to change. The getlastmodified property\n        MUST be defined on any DAV compliant resource that returns the\n        Last-Modified header in response to a GET.\n\n        This method SHOULD be implemented, especially by non-collections.", "id": "f8596:c0:m9"}
{"signature": "def get_property_value(self, name):", "body": "refUrl = self.get_ref_url()<EOL>lm = self.provider.lock_manager<EOL>if lm and name == \"<STR_LIT>\":<EOL><INDENT>activelocklist = lm.get_url_lock_list(refUrl)<EOL>lockdiscoveryEL = etree.Element(name)<EOL>for lock in activelocklist:<EOL><INDENT>activelockEL = etree.SubElement(lockdiscoveryEL, \"<STR_LIT>\")<EOL>locktypeEL = etree.SubElement(activelockEL, \"<STR_LIT>\")<EOL>etree.SubElement(locktypeEL, \"<STR_LIT>\".format(\"<STR_LIT>\", lock[\"<STR_LIT:type>\"]))<EOL>lockscopeEL = etree.SubElement(activelockEL, \"<STR_LIT>\")<EOL>etree.SubElement(lockscopeEL, \"<STR_LIT>\".format(\"<STR_LIT>\", lock[\"<STR_LIT>\"]))<EOL>etree.SubElement(activelockEL, \"<STR_LIT>\").text = lock[\"<STR_LIT>\"]<EOL>if lock[\"<STR_LIT>\"]:<EOL><INDENT>ownerEL = xml_tools.string_to_xml(lock[\"<STR_LIT>\"])<EOL>activelockEL.append(ownerEL)<EOL><DEDENT>timeout = lock[\"<STR_LIT>\"]<EOL>if timeout < <NUM_LIT:0>:<EOL><INDENT>timeout = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>expire = lock[\"<STR_LIT>\"]<EOL>timeout = \"<STR_LIT>\" + str(int(expire - time.time()))<EOL><DEDENT>etree.SubElement(activelockEL, \"<STR_LIT>\").text = timeout<EOL>locktokenEL = etree.SubElement(activelockEL, \"<STR_LIT>\")<EOL>etree.SubElement(locktokenEL, \"<STR_LIT>\").text = lock[\"<STR_LIT>\"]<EOL>lockPath = self.provider.ref_url_to_path(lock[\"<STR_LIT:root>\"])<EOL>lockRes = self.provider.get_resource_inst(lockPath, self.environ)<EOL>lockHref = lockRes.get_href()<EOL>lockrootEL = etree.SubElement(activelockEL, \"<STR_LIT>\")<EOL>etree.SubElement(lockrootEL, \"<STR_LIT>\").text = lockHref<EOL><DEDENT>return lockdiscoveryEL<EOL><DEDENT>elif lm and name == \"<STR_LIT>\":<EOL><INDENT>supportedlockEL = etree.Element(name)<EOL>lockentryEL = etree.SubElement(supportedlockEL, \"<STR_LIT>\")<EOL>lockscopeEL = etree.SubElement(lockentryEL, \"<STR_LIT>\")<EOL>etree.SubElement(lockscopeEL, \"<STR_LIT>\")<EOL>locktypeEL = etree.SubElement(lockentryEL, \"<STR_LIT>\")<EOL>etree.SubElement(locktypeEL, \"<STR_LIT>\")<EOL>lockentryEL = etree.SubElement(supportedlockEL, \"<STR_LIT>\")<EOL>lockscopeEL = etree.SubElement(lockentryEL, \"<STR_LIT>\")<EOL>etree.SubElement(lockscopeEL, \"<STR_LIT>\")<EOL>locktypeEL = etree.SubElement(lockentryEL, \"<STR_LIT>\")<EOL>etree.SubElement(locktypeEL, \"<STR_LIT>\")<EOL>return supportedlockEL<EOL><DEDENT>elif name.startswith(\"<STR_LIT>\"):<EOL><INDENT>if name == \"<STR_LIT>\" and self.get_creation_date() is not None:<EOL><INDENT>return util.get_rfc3339_time(self.get_creation_date())<EOL><DEDENT>elif name == \"<STR_LIT>\" and self.get_content_type() is not None:<EOL><INDENT>return self.get_content_type()<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>if self.is_collection:<EOL><INDENT>resourcetypeEL = etree.Element(name)<EOL>etree.SubElement(resourcetypeEL, \"<STR_LIT>\")<EOL>return resourcetypeEL<EOL><DEDENT>return \"<STR_LIT>\"<EOL><DEDENT>elif (<EOL>name == \"<STR_LIT>\" and self.get_last_modified() is not None<EOL>):<EOL><INDENT>return util.get_rfc1123_time(self.get_last_modified())<EOL><DEDENT>elif (<EOL>name == \"<STR_LIT>\"<EOL>and self.get_content_length() is not None<EOL>):<EOL><INDENT>return str(self.get_content_length())<EOL><DEDENT>elif name == \"<STR_LIT>\" and self.get_etag() is not None:<EOL><INDENT>return self.get_etag()<EOL><DEDENT>elif name == \"<STR_LIT>\" and self.get_display_name() is not None:<EOL><INDENT>return self.get_display_name()<EOL><DEDENT>raise DAVError(HTTP_NOT_FOUND)<EOL><DEDENT>pm = self.provider.prop_manager<EOL>if pm:<EOL><INDENT>value = pm.get_property(refUrl, name, self.environ)<EOL>if value is not None:<EOL><INDENT>return xml_tools.string_to_xml(value)<EOL><DEDENT><DEDENT>raise DAVError(HTTP_NOT_FOUND)<EOL>", "docstring": "Return the value of a property.\n\n        name:\n            the property name in Clark notation.\n        return value:\n            may have different types, depending on the status:\n\n            - string or unicode: for standard property values.\n            - lxml.etree.Element: for complex values.\n\n            If the property is not available, a DAVError is raised.\n\n        This default implementation handles ``{DAV:}lockdiscovery`` and\n        ``{DAV:}supportedlock`` using the associated lock manager.\n\n        All other *live* properties (i.e. name starts with ``{DAV:}``) are\n        delegated to the self.xxx() getters.\n\n        Finally, other properties are considered *dead*, and are handled  by\n        the associated property manager.", "id": "f8596:c0:m23"}
{"signature": "def remove_all_properties(self, recursive):", "body": "if self.provider.prop_manager:<EOL><INDENT>self.provider.prop_manager.remove_properties(<EOL>self.get_ref_url(), self.environ<EOL>)<EOL><DEDENT>", "docstring": "Remove all associated dead properties.", "id": "f8596:c0:m25"}
{"signature": "def end_write(self, with_errors):", "body": "pass<EOL>", "docstring": "Called when PUT has finished writing.\n\n        This is only a notification that MAY be handled.", "id": "f8596:c1:m6"}
{"signature": "def support_recursive_move(self, dest_path):", "body": "return False<EOL>", "docstring": "Return True, if move_recursive() is available (see comments there).", "id": "f8596:c2:m10"}
{"signature": "def copy_move_single(self, dest_path, is_move):", "body": "raise NotImplementedError<EOL>", "docstring": "Copy or move this resource to destPath (non-recursive).\n\n        Preconditions (ensured by caller):\n\n          - there must not be any conflicting locks on destination\n          - overwriting is only allowed (i.e. destPath exists), when source and\n            dest are of the same type ((non-)collections) and a Overwrite='T'\n            was passed\n          - destPath must not be a child path of this resource\n\n        This function\n\n          - Overwrites non-collections content, if destination exists.\n          - MUST NOT copy collection members.\n          - MUST NOT copy locks.\n          - SHOULD copy live properties, when appropriate.\n            E.g. displayname should be copied, but creationdate should be\n            reset if the target did not exist before.\n            See http://www.webdav.org/specs/rfc4918.html#dav.properties\n          - SHOULD copy dead properties.\n          - raises HTTP_FORBIDDEN for read-only providers\n          - raises HTTP_INTERNAL_ERROR on error\n\n        When is_move is True,\n\n          - Live properties should be moved too (e.g. creationdate)\n          - Non-collections must be moved, not copied\n          - For collections, this function behaves like in copy-mode:\n            detination collection must be created and properties are copied.\n            Members are NOT created.\n            The source collection MUST NOT be removed.\n\n        This method MUST be implemented by all providers that support write\n        access.", "id": "f8596:c0:m38"}
{"signature": "def handle_move(self, dest_path):", "body": "return False<EOL>", "docstring": "Handle a MOVE request natively.\n\n        This method is called by the MOVE handler after checking for valid\n        request syntax and making sure that there are no conflicting locks and\n        If-headers.\n        Depending on the return value, this provider can control further\n        processing:\n\n        False:\n            handle_move() did not do anything. WsgiDAV will process the request\n            by calling delete() and copy_move_single() for every resource,\n            bottom-up.\n        True:\n            handle_move() has successfully performed the MOVE request.\n            HTTP_NO_CONTENT/HTTP_CREATED will be reported to the DAV client.\n        List of errors:\n            handle_move() tried to perform the move request, but failed\n            completely or partially. A list of errors is returned like\n            ``[ (<ref-url>, <DAVError>), ... ]``\n            These errors will be reported to the client.\n        DAVError raised:\n            handle_move() refuses to perform the move request. The DAVError\n            will be reported to the client.\n\n        An implementation may choose to apply other semantics and return True.\n        For example moving '/by_tag/cool/myres' to '/by_tag/hot/myres' may\n        simply remove the 'cool' tag from 'my_res' and add a 'hot' tag instead.\n        In this case, the resource might still be available by other URLs, so\n        locks and properties are not removed.\n\n        This default implementation returns ``False``, so standard processing\n        takes place.\n\n        Implementation of this method is OPTIONAL.", "id": "f8596:c0:m39"}
{"signature": "def get_property_names(self, is_allprop):", "body": "<EOL>propNameList = []<EOL>propNameList.append(\"<STR_LIT>\")<EOL>if self.get_creation_date() is not None:<EOL><INDENT>propNameList.append(\"<STR_LIT>\")<EOL><DEDENT>if self.get_content_length() is not None:<EOL><INDENT>assert not self.is_collection<EOL>propNameList.append(\"<STR_LIT>\")<EOL><DEDENT>if self.get_content_type() is not None:<EOL><INDENT>propNameList.append(\"<STR_LIT>\")<EOL><DEDENT>if self.get_last_modified() is not None:<EOL><INDENT>propNameList.append(\"<STR_LIT>\")<EOL><DEDENT>if self.get_display_name() is not None:<EOL><INDENT>propNameList.append(\"<STR_LIT>\")<EOL><DEDENT>if self.get_etag() is not None:<EOL><INDENT>propNameList.append(\"<STR_LIT>\")<EOL><DEDENT>if self.provider.lock_manager and not self.prevent_locking():<EOL><INDENT>propNameList.extend(_lockPropertyNames)<EOL><DEDENT>if self.provider.prop_manager:<EOL><INDENT>refUrl = self.get_ref_url()<EOL>propNameList.extend(<EOL>self.provider.prop_manager.get_properties(refUrl, self.environ)<EOL>)<EOL><DEDENT>return propNameList<EOL>", "docstring": "Return list of supported property names in Clark Notation.\n\n        Note that 'allprop', despite its name, which remains for\n        backward-compatibility, does not return every property, but only dead\n        properties and the live properties defined in RFC4918.\n\n        This default implementation returns a combination of:\n\n        - Supported standard live properties in the {DAV:} namespace, if the\n          related getter method returns not None.\n        - {DAV:}lockdiscovery and {DAV:}supportedlock, if a lock manager is\n          present\n        - If a property manager is present, then a list of dead properties is\n          appended\n\n        A resource provider may override this method, to add a list of\n        supported custom live property names.", "id": "f8596:c0:m21"}
{"signature": "def get_descendants(<EOL>self,<EOL>collections=True,<EOL>resources=True,<EOL>depth_first=False,<EOL>depth=\"<STR_LIT>\",<EOL>add_self=False,<EOL>):", "body": "assert depth in (\"<STR_LIT:0>\", \"<STR_LIT:1>\", \"<STR_LIT>\")<EOL>res = []<EOL>if add_self and not depth_first:<EOL><INDENT>res.append(self)<EOL><DEDENT>if depth != \"<STR_LIT:0>\" and self.is_collection:<EOL><INDENT>for child in self.get_member_list():<EOL><INDENT>if not child:<EOL><INDENT>self.get_member_list()<EOL><DEDENT>want = (collections and child.is_collection) or (<EOL>resources and not child.is_collection<EOL>)<EOL>if want and not depth_first:<EOL><INDENT>res.append(child)<EOL><DEDENT>if child.is_collection and depth == \"<STR_LIT>\":<EOL><INDENT>res.extend(<EOL>child.get_descendants(<EOL>collections, resources, depth_first, depth, add_self=False<EOL>)<EOL>)<EOL><DEDENT>if want and depth_first:<EOL><INDENT>res.append(child)<EOL><DEDENT><DEDENT><DEDENT>if add_self and depth_first:<EOL><INDENT>res.append(self)<EOL><DEDENT>return res<EOL>", "docstring": "Return a list _DAVResource objects of a collection (children,\n        grand-children, ...).\n\n        This default implementation calls self.get_member_list() recursively.\n\n        This function may also be called for non-collections (with add_self=True).\n\n        :Parameters:\n            depth_first : bool\n                use <False>, to list containers before content.\n                (e.g. when moving / copying branches.)\n                Use <True>, to list content before containers.\n                (e.g. when deleting branches.)\n            depth : string\n                '0' | '1' | 'infinity'", "id": "f8596:c0:m20"}
{"signature": "def support_modified(self):", "body": "return self.get_last_modified() is not None<EOL>", "docstring": "Return True, if this resource supports last modified dates.\n\n        This default implementation checks `self.get_last_modified() is None`.", "id": "f8596:c0:m14"}
{"signature": "def get_member_names(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Return list of (direct) collection member names (UTF-8 byte strings).\n\n        Every provider MUST provide this method for collection resources.", "id": "f8596:c0:m19"}
{"signature": "def resolve(self, script_name, path_info):", "body": "if path_info in (\"<STR_LIT>\", \"<STR_LIT:/>\"):<EOL><INDENT>return self<EOL><DEDENT>assert path_info.startswith(\"<STR_LIT:/>\")<EOL>name, rest = util.pop_path(path_info)<EOL>res = self.get_member(name)<EOL>if res is None or rest in (\"<STR_LIT>\", \"<STR_LIT:/>\"):<EOL><INDENT>return res<EOL><DEDENT>return res.resolve(util.join_uri(script_name, name), rest)<EOL>", "docstring": "Return a _DAVResource object for the path (None, if not found).\n\n        `path_info`: is a URL relative to this object.", "id": "f8596:c2:m12"}
{"signature": "def begin_write(self, content_type=None):", "body": "assert not self.is_collection<EOL>raise DAVError(HTTP_FORBIDDEN)<EOL>", "docstring": "Open content as a stream for writing.\n\n        This method MUST be implemented by all providers that support write\n        access.", "id": "f8596:c0:m32"}
{"signature": "def set_last_modified(self, dest_path, time_stamp, dry_run):", "body": "raise NotImplementedError<EOL>", "docstring": "Set last modified time for destPath to timeStamp on epoch-format", "id": "f8596:c0:m10"}
{"signature": "def move_recursive(self, dest_path):", "body": "raise DAVError(HTTP_FORBIDDEN)<EOL>", "docstring": "Move this resource and members to destPath.\n\n        This method is only called, when support_recursive_move() returns True.\n\n        MOVE is frequently used by clients to rename a file without changing its\n        parent collection, so it's not appropriate to reset all live properties\n        that are set at resource creation. For example, the DAV:creationdate\n        property value SHOULD remain the same after a MOVE.\n\n        Preconditions (ensured by caller):\n\n          - there must not be any conflicting locks or If-header on source\n          - there must not be any conflicting locks or If-header on destination\n          - destPath must not exist\n          - destPath must not be a member of this resource\n\n        This method must be prepared to handle recursive moves. This implies\n        that child errors must be reported as tuple list\n        [ (<ref-url>, <DAVError>), ... ].\n        See http://www.webdav.org/specs/rfc4918.html#move-collections\n\n        This function\n\n          - moves this resource and all members to destPath.\n          - MUST NOT move associated locks.\n            Instead, if the source (or children thereof) have locks, then\n            these locks should be removed.\n          - SHOULD maintain associated live properties, when applicable\n            See http://www.webdav.org/specs/rfc4918.html#dav.properties\n          - MUST maintain associated dead properties\n          - raises HTTP_FORBIDDEN for read-only resources\n          - raises HTTP_INTERNAL_ERROR on error\n\n        An implementation may choose to apply other semantics.\n        For example copying '/by_tag/cool/myres' to '/by_tag/new/myres' may\n        simply add a 'new' tag to 'my_res'.\n\n        This method is only called, when self.support_recursive_move() returns\n        True. Otherwise, the request server implements MOVE using delete/copy.\n\n        This method MAY be implemented in order to improve performance.", "id": "f8596:c0:m41"}
{"signature": "def support_recursive_delete(self):", "body": "return False<EOL>", "docstring": "Return True, if delete() may be called on non-empty collections\n        (see comments there).\n\n        This default implementation returns False.", "id": "f8596:c2:m7"}
{"signature": "def get_content(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Open content as a stream for reading.\n\n        Returns a file-like object / stream containing the contents of the\n        resource specified.\n        The application will close() the stream.\n\n        This method MUST be implemented by all providers.", "id": "f8596:c1:m3"}
{"signature": "def get_directory_info(self):", "body": "assert self.is_collection<EOL>return None<EOL>", "docstring": "Return a list of dictionaries with information for directory\n        rendering.\n\n        This default implementation return None, so the dir browser will\n        traverse all members.\n\n        This method COULD be implemented for collection resources.", "id": "f8596:c0:m5"}
{"signature": "def get_resource_inst(self, path, environ):", "body": "raise NotImplementedError<EOL>", "docstring": "Return a _DAVResource object for path.\n\n        Should be called only once per request and resource::\n\n            res = provider.get_resource_inst(path, environ)\n            if res and not res.is_collection:\n                print(res.get_content_type())\n\n        If <path> does not exist, None is returned.\n        <environ> may be used by the provider to implement per-request caching.\n\n        See _DAVResource for details.\n\n        This method MUST be implemented.", "id": "f8596:c3:m8"}
{"signature": "def finalize_headers(self, environ, response_headers):", "body": "pass<EOL>", "docstring": "Perform custom operations on the response headers.\n\n        This gets called before the response is started.\n        It enables adding additional headers or modifying the default ones.", "id": "f8596:c0:m43"}
{"signature": "def get_preferred_path(self):", "body": "if self.path in (\"<STR_LIT>\", \"<STR_LIT:/>\"):<EOL><INDENT>return \"<STR_LIT:/>\"<EOL><DEDENT>if self.is_collection and not self.path.endswith(\"<STR_LIT:/>\"):<EOL><INDENT>return self.path + \"<STR_LIT:/>\"<EOL><DEDENT>return self.path<EOL>", "docstring": "Return preferred mapping for a resource mapping.\n\n        Different URLs may map to the same resource, e.g.:\n            '/a/b' == '/A/b' == '/a/b/'\n        get_preferred_path() returns the same value for all these variants, e.g.:\n            '/a/b/'   (assuming resource names considered case insensitive)\n\n        @param path: a UTF-8 encoded, unquoted byte string.\n        @return: a UTF-8 encoded, unquoted byte string.", "id": "f8596:c0:m15"}
{"signature": "def get_content(self):", "body": "assert not self.is_collection<EOL>raise NotImplementedError<EOL>", "docstring": "Open content as a stream for reading.\n\n        Returns a file-like object / stream containing the contents of the\n        resource specified.\n        The calling application will close() the stream.\n\n        This method MUST be implemented by all providers.", "id": "f8596:c0:m31"}
{"signature": "def get_display_info(self):", "body": "if self.is_collection:<EOL><INDENT>return {\"<STR_LIT:type>\": \"<STR_LIT>\"}<EOL><DEDENT>elif os.extsep in self.name:<EOL><INDENT>ext = self.name.split(os.extsep)[-<NUM_LIT:1>].upper()<EOL>if len(ext) < <NUM_LIT:5>:<EOL><INDENT>return {\"<STR_LIT:type>\": \"<STR_LIT>\".format(ext)}<EOL><DEDENT><DEDENT>return {\"<STR_LIT:type>\": \"<STR_LIT>\"}<EOL>", "docstring": "Return additional info dictionary for displaying (optional).\n\n        This information is not part of the DAV specification, but meant for use\n        by the dir browser middleware.\n\n        This default implementation returns ``{'type': '...'}``", "id": "f8596:c0:m7"}
{"signature": "def is_collection(self, path, environ):", "body": "res = self.get_resource_inst(path, environ)<EOL>return res and res.is_collection<EOL>", "docstring": "Return True, if path maps to an existing collection resource.\n\n        This method should only be used, if no other information is queried\n        for <path>. Otherwise a _DAVResource should be created first.", "id": "f8596:c3:m10"}
{"signature": "def set_share_path(self, share_path):", "body": "<EOL>assert share_path == \"<STR_LIT>\" or share_path.startswith(\"<STR_LIT:/>\")<EOL>if share_path == \"<STR_LIT:/>\":<EOL><INDENT>share_path = \"<STR_LIT>\"  <EOL><DEDENT>assert share_path in (\"<STR_LIT>\", \"<STR_LIT:/>\") or not share_path.endswith(\"<STR_LIT:/>\")<EOL>self.share_path = share_path<EOL>", "docstring": "Set application location for this resource provider.\n\n        @param share_path: a UTF-8 encoded, unquoted byte string.", "id": "f8596:c3:m4"}
{"signature": "def resolve(self, script_name, path_info):", "body": "if path_info in (\"<STR_LIT>\", \"<STR_LIT:/>\"):<EOL><INDENT>return self<EOL><DEDENT>return None<EOL>", "docstring": "Return a _DAVResource object for the path (None, if not found).\n\n        Since non-collection don't have members, we return None if path is not\n        empty.", "id": "f8596:c1:m7"}
{"signature": "def exists(self, path, environ):", "body": "return self.get_resource_inst(path, environ) is not None<EOL>", "docstring": "Return True, if path maps to an existing resource.\n\n        This method should only be used, if no other information is queried\n        for <path>. Otherwise a _DAVResource should be created first.\n\n        This method SHOULD be overridden by a more efficient implementation.", "id": "f8596:c3:m9"}
{"signature": "def get_content_length(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Returns the byte length of the content.\n\n        MUST be implemented.\n\n        See also _DAVResource.get_content_length()", "id": "f8596:c1:m1"}
{"signature": "def get_member_list(self):", "body": "if not self.is_collection:<EOL><INDENT>raise NotImplementedError<EOL><DEDENT>memberList = []<EOL>for name in self.get_member_names():<EOL><INDENT>member = self.get_member(name)<EOL>assert member is not None<EOL>memberList.append(member)<EOL><DEDENT>return memberList<EOL>", "docstring": "Return a list of direct members (_DAVResource or derived objects).\n\n        This default implementation calls self.get_member_names() and\n        self.get_member() for each of them.\n        A provider COULD overwrite this for performance reasons.", "id": "f8596:c0:m18"}
{"signature": "def prevent_locking(self):", "body": "return False<EOL>", "docstring": "Return True, to prevent locking.\n\n        This default implementation returns ``False``, so standard processing\n        takes place: locking (and refreshing of locks) is implemented using\n        the lock manager, if one is configured.", "id": "f8596:c0:m26"}
{"signature": "def end_write(self, with_errors):", "body": "pass<EOL>", "docstring": "Called when PUT has finished writing.\n\n        This is only a notification. that MAY be handled.", "id": "f8596:c0:m33"}
{"signature": "def handle_delete(self):", "body": "return False<EOL>", "docstring": "Handle a DELETE request natively.\n\n        This method is called by the DELETE handler after checking for valid\n        request syntax and making sure that there are no conflicting locks and\n        If-headers.\n        Depending on the return value, this provider can control further\n        processing:\n\n        False:\n            handle_delete() did not do anything. WsgiDAV will process the request\n            by calling delete() for every resource, bottom-up.\n        True:\n            handle_delete() has successfully performed the DELETE request.\n            HTTP_NO_CONTENT will be reported to the DAV client.\n        List of errors:\n            handle_delete() tried to perform the delete request, but failed\n            completely or partially. A list of errors is returned like\n            ``[ (<ref-url>, <DAVError>), ... ]``\n            These errors will be reported to the client.\n        DAVError raised:\n            handle_delete() refuses to perform the delete request. The DAVError\n            will be reported to the client.\n\n        An implementation may choose to apply other semantics and return True.\n        For example deleting '/by_tag/cool/myres' may simply remove the 'cool'\n        tag from 'my_res'.\n        In this case, the resource might still be available by other URLs, so\n        locks and properties are not removed.\n\n        This default implementation returns ``False``, so standard processing\n        takes place.\n\n        Implementation of this method is OPTIONAL.", "id": "f8596:c0:m34"}
{"signature": "def get_member(self, name):", "body": "assert self.is_collection<EOL>return self.provider.get_resource_inst(<EOL>util.join_uri(self.path, name), self.environ<EOL>)<EOL>", "docstring": "Return child resource with a given name (None, if not found).\n\n        This method COULD be overridden by a derived class, for performance\n        reasons.\n        This default implementation calls self.provider.get_resource_inst().", "id": "f8596:c2:m5"}
{"signature": "def set_property_value(self, name, value, dry_run=False):", "body": "assert value is None or xml_tools.is_etree_element(value)<EOL>if name in _lockPropertyNames:<EOL><INDENT>raise DAVError(<EOL>HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty<EOL>)<EOL><DEDENT>config = self.environ[\"<STR_LIT>\"]<EOL>mutableLiveProps = config.get(\"<STR_LIT>\", [])<EOL>if (<EOL>name.startswith(\"<STR_LIT>\")<EOL>and name in _standardLivePropNames<EOL>and name in mutableLiveProps<EOL>):<EOL><INDENT>if name in (\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>try:<EOL><INDENT>return self.set_last_modified(self.path, value.text, dry_run)<EOL><DEDENT>except Exception:<EOL><INDENT>_logger.warning(<EOL>\"<STR_LIT>\".format(<EOL>self.path<EOL>)<EOL>)<EOL><DEDENT><DEDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>if name.startswith(\"<STR_LIT>\"):<EOL><INDENT>agent = self.environ.get(\"<STR_LIT>\", \"<STR_LIT:None>\")<EOL>win32_emu = config.get(\"<STR_LIT>\", {}).get(\"<STR_LIT>\", False)<EOL>if win32_emu and \"<STR_LIT>\" not in agent:<EOL><INDENT>if \"<STR_LIT>\" in name:<EOL><INDENT>return self.set_last_modified(self.path, value.text, dry_run)<EOL><DEDENT>elif \"<STR_LIT>\" in name:<EOL><INDENT>return True<EOL><DEDENT>elif \"<STR_LIT>\" in name:<EOL><INDENT>return True<EOL><DEDENT>elif \"<STR_LIT>\" in name:<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>pm = self.provider.prop_manager<EOL>if pm and not name.startswith(\"<STR_LIT>\"):<EOL><INDENT>refUrl = self.get_ref_url()<EOL>if value is None:<EOL><INDENT>return pm.remove_property(refUrl, name, dry_run, self.environ)<EOL><DEDENT>else:<EOL><INDENT>value = etree.tostring(value)<EOL>return pm.write_property(refUrl, name, value, dry_run, self.environ)<EOL><DEDENT><DEDENT>raise DAVError(HTTP_FORBIDDEN)<EOL>", "docstring": "Set a property value or remove a property.\n\n        value == None means 'remove property'.\n        Raise HTTP_FORBIDDEN if property is read-only, or not supported.\n\n        When dry_run is True, this function should raise errors, as in a real\n        run, but MUST NOT change any data.\n\n        This default implementation\n\n        - raises HTTP_FORBIDDEN, if trying to modify a locking property\n        - raises HTTP_FORBIDDEN, if trying to modify an immutable {DAV:}\n          property\n        - handles Windows' Win32LastModifiedTime to set the getlastmodified\n          property, if enabled\n        - stores everything else as dead property, if a property manager is\n          present.\n        - raises HTTP_FORBIDDEN, else\n\n        Removing a non-existing prop is NOT an error.\n\n        Note: RFC 4918 states that {DAV:}displayname 'SHOULD NOT be protected'\n\n        A resource provider may override this method, to update supported custom\n        live properties.", "id": "f8596:c0:m24"}
{"signature": "def get_content_type(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Contains the Content-Type header returned by a GET without accept\n        headers.\n\n        This getcontenttype property MUST be defined on any DAV compliant\n        resource that returns the Content-Type header in response to a GET.\n        See http://www.webdav.org/specs/rfc4918.html#PROPERTY_getcontenttype", "id": "f8596:c1:m2"}
{"signature": "def copy_move_single(self, dest_path, is_move):", "body": "if self.provider.readonly:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>fpDest = self.provider._loc_to_file_path(dest_path, self.environ)<EOL>assert not util.is_equal_or_child_uri(self.path, dest_path)<EOL>if not os.path.exists(fpDest):<EOL><INDENT>os.mkdir(fpDest)<EOL><DEDENT>try:<EOL><INDENT>shutil.copystat(self._file_path, fpDest)<EOL><DEDENT>except Exception:<EOL><INDENT>_logger.exception(\"<STR_LIT>\".format(self._file_path))<EOL><DEDENT>propMan = self.provider.prop_manager<EOL>if propMan:<EOL><INDENT>destRes = self.provider.get_resource_inst(dest_path, self.environ)<EOL>if is_move:<EOL><INDENT>propMan.move_properties(<EOL>self.get_ref_url(),<EOL>destRes.get_ref_url(),<EOL>with_children=False,<EOL>environ=self.environ,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>propMan.copy_properties(<EOL>self.get_ref_url(), destRes.get_ref_url(), self.environ<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "See DAVResource.copy_move_single()", "id": "f8599:c1:m11"}
{"signature": "def get_member_names(self):", "body": "<EOL>nameList = []<EOL>assert compat.is_unicode(self._file_path)<EOL>for name in os.listdir(self._file_path):<EOL><INDENT>if not compat.is_unicode(name):<EOL><INDENT>name = name.decode(sys.getfilesystemencoding())<EOL><DEDENT>assert compat.is_unicode(name)<EOL>fp = os.path.join(self._file_path, name)<EOL>if not os.path.isdir(fp) and not os.path.isfile(fp):<EOL><INDENT>_logger.debug(\"<STR_LIT>\".format(fp))<EOL>continue<EOL><DEDENT>name = compat.to_native(name)<EOL>nameList.append(name)<EOL><DEDENT>return nameList<EOL>", "docstring": "Return list of direct collection member names (utf-8 encoded).\n\n        See DAVCollection.get_member_names()", "id": "f8599:c1:m6"}
{"signature": "def _loc_to_file_path(self, path, environ=None):", "body": "root_path = self.root_folder_path<EOL>assert root_path is not None<EOL>assert compat.is_native(root_path)<EOL>assert compat.is_native(path)<EOL>path_parts = path.strip(\"<STR_LIT:/>\").split(\"<STR_LIT:/>\")<EOL>file_path = os.path.abspath(os.path.join(root_path, *path_parts))<EOL>if not file_path.startswith(root_path):<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\".format(<EOL>file_path<EOL>)<EOL>)<EOL><DEDENT>file_path = util.to_unicode_safe(file_path)<EOL>return file_path<EOL>", "docstring": "Convert resource path to a unicode absolute file path.\n        Optional environ argument may be useful e.g. in relation to per-user\n        sub-folder chrooting inside root_folder_path.", "id": "f8599:c2:m2"}
{"signature": "def copy_move_single(self, dest_path, is_move):", "body": "if self.provider.readonly:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>fpDest = self.provider._loc_to_file_path(dest_path, self.environ)<EOL>assert not util.is_equal_or_child_uri(self.path, dest_path)<EOL>shutil.copy2(self._file_path, fpDest)<EOL>propMan = self.provider.prop_manager<EOL>if propMan:<EOL><INDENT>destRes = self.provider.get_resource_inst(dest_path, self.environ)<EOL>if is_move:<EOL><INDENT>propMan.move_properties(<EOL>self.get_ref_url(),<EOL>destRes.get_ref_url(),<EOL>with_children=False,<EOL>environ=self.environ,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>propMan.copy_properties(<EOL>self.get_ref_url(), destRes.get_ref_url(), self.environ<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "See DAVResource.copy_move_single()", "id": "f8599:c0:m12"}
{"signature": "def begin_write(self, content_type=None):", "body": "assert not self.is_collection<EOL>if self.provider.readonly:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>return open(self._file_path, \"<STR_LIT:wb>\", BUFFER_SIZE)<EOL>", "docstring": "Open content as a stream for writing.\n\n        See DAVResource.begin_write()", "id": "f8599:c0:m10"}
{"signature": "def support_recursive_move(self, dest_path):", "body": "return True<EOL>", "docstring": "Return True, if move_recursive() is available (see comments there).", "id": "f8599:c0:m13"}
{"signature": "def set_last_modified(self, dest_path, time_stamp, dry_run):", "body": "<EOL>secs = util.parse_time_string(time_stamp)<EOL>if not dry_run:<EOL><INDENT>os.utime(self._file_path, (secs, secs))<EOL><DEDENT>return True<EOL>", "docstring": "Set last modified time for destPath to timeStamp on epoch-format", "id": "f8599:c1:m14"}
{"signature": "def move_recursive(self, dest_path):", "body": "if self.provider.readonly:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>fpDest = self.provider._loc_to_file_path(dest_path, self.environ)<EOL>assert not util.is_equal_or_child_uri(self.path, dest_path)<EOL>assert not os.path.exists(fpDest)<EOL>_logger.debug(\"<STR_LIT>\".format(self._file_path, fpDest))<EOL>shutil.move(self._file_path, fpDest)<EOL>if self.provider.prop_manager:<EOL><INDENT>destRes = self.provider.get_resource_inst(dest_path, self.environ)<EOL>self.provider.prop_manager.move_properties(<EOL>self.get_ref_url(),<EOL>destRes.get_ref_url(),<EOL>with_children=True,<EOL>environ=self.environ,<EOL>)<EOL><DEDENT>", "docstring": "See DAVResource.move_recursive()", "id": "f8599:c1:m13"}
{"signature": "def get_content(self):", "body": "assert not self.is_collection<EOL>return open(self._file_path, \"<STR_LIT:rb>\", BUFFER_SIZE)<EOL>", "docstring": "Open content as a stream for reading.\n\n        See DAVResource.get_content()", "id": "f8599:c0:m9"}
{"signature": "def create_collection(self, name):", "body": "assert \"<STR_LIT:/>\" not in name<EOL>if self.provider.readonly:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>path = util.join_uri(self.path, name)<EOL>fp = self.provider._loc_to_file_path(path, self.environ)<EOL>os.mkdir(fp)<EOL>", "docstring": "Create a new collection as member of self.\n\n        See DAVResource.create_collection()", "id": "f8599:c1:m9"}
{"signature": "def move_recursive(self, dest_path):", "body": "if self.provider.readonly:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>fpDest = self.provider._loc_to_file_path(dest_path, self.environ)<EOL>assert not util.is_equal_or_child_uri(self.path, dest_path)<EOL>assert not os.path.exists(fpDest)<EOL>_logger.debug(\"<STR_LIT>\".format(self._file_path, fpDest))<EOL>shutil.move(self._file_path, fpDest)<EOL>if self.provider.prop_manager:<EOL><INDENT>destRes = self.provider.get_resource_inst(dest_path, self.environ)<EOL>self.provider.prop_manager.move_properties(<EOL>self.get_ref_url(),<EOL>destRes.get_ref_url(),<EOL>with_children=True,<EOL>environ=self.environ,<EOL>)<EOL><DEDENT>", "docstring": "See DAVResource.move_recursive()", "id": "f8599:c0:m14"}
{"signature": "def create_empty_resource(self, name):", "body": "assert \"<STR_LIT:/>\" not in name<EOL>if self.provider.readonly:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>path = util.join_uri(self.path, name)<EOL>fp = self.provider._loc_to_file_path(path, self.environ)<EOL>f = open(fp, \"<STR_LIT:wb>\")<EOL>f.close()<EOL>return self.provider.get_resource_inst(path, self.environ)<EOL>", "docstring": "Create an empty (length-0) resource.\n\n        See DAVResource.create_empty_resource()", "id": "f8599:c1:m8"}
{"signature": "def get_member(self, name):", "body": "assert compat.is_native(name), \"<STR_LIT>\".format(name)<EOL>fp = os.path.join(self._file_path, compat.to_unicode(name))<EOL>path = util.join_uri(self.path, name)<EOL>if os.path.isdir(fp):<EOL><INDENT>res = FolderResource(path, self.environ, fp)<EOL><DEDENT>elif os.path.isfile(fp):<EOL><INDENT>res = FileResource(path, self.environ, fp)<EOL><DEDENT>else:<EOL><INDENT>_logger.debug(\"<STR_LIT>\".format(path))<EOL>res = None<EOL><DEDENT>return res<EOL>", "docstring": "Return direct collection member (DAVResource or derived).\n\n        See DAVCollection.get_member()", "id": "f8599:c1:m7"}
{"signature": "def get_resource_inst(self, path, environ):", "body": "self._count_get_resource_inst += <NUM_LIT:1><EOL>fp = self._loc_to_file_path(path, environ)<EOL>if not os.path.exists(fp):<EOL><INDENT>return None<EOL><DEDENT>if os.path.isdir(fp):<EOL><INDENT>return FolderResource(path, environ, fp)<EOL><DEDENT>return FileResource(path, environ, fp)<EOL>", "docstring": "Return info dictionary for path.\n\n        See DAVProvider.get_resource_inst()", "id": "f8599:c2:m4"}
{"signature": "def delete(self):", "body": "if self.provider.readonly:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>shutil.rmtree(self._file_path, ignore_errors=False)<EOL>self.remove_all_properties(True)<EOL>self.remove_all_locks(True)<EOL>", "docstring": "Remove this resource or collection (recursive).\n\n        See DAVResource.delete()", "id": "f8599:c1:m10"}
{"signature": "def compute_digest_response(<EOL>self, realm, user_name, method, uri, nonce, cnonce, qop, nc, environ<EOL>):", "body": "def md5h(data):<EOL><INDENT>return md5(compat.to_bytes(data)).hexdigest()<EOL><DEDENT>def md5kd(secret, data):<EOL><INDENT>return md5h(secret + \"<STR_LIT::>\" + data)<EOL><DEDENT>A1 = self.domain_controller.digest_auth_user(realm, user_name, environ)<EOL>if not A1:<EOL><INDENT>return False<EOL><DEDENT>A2 = method + \"<STR_LIT::>\" + uri<EOL>if qop:<EOL><INDENT>res = md5kd(<EOL>A1, nonce + \"<STR_LIT::>\" + nc + \"<STR_LIT::>\" + cnonce + \"<STR_LIT::>\" + qop + \"<STR_LIT::>\" + md5h(A2)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>res = md5kd(A1, nonce + \"<STR_LIT::>\" + md5h(A2))<EOL><DEDENT>return res<EOL>", "docstring": "Computes digest hash.\n\n        Calculation of the A1 (HA1) part is delegated to the dc interface method\n        `digest_auth_user()`.\n\n        Args:\n            realm (str):\n            user_name (str):\n            method (str): WebDAV Request Method\n            uri (str):\n            nonce (str): server generated nonce value\n            cnonce (str): client generated cnonce value\n            qop (str): quality of protection\n            nc (str) (number), nonce counter incremented by client\n        Returns:\n            MD5 hash string\n            or False if user rejected by domain controller", "id": "f8601:c0:m8"}
{"signature": "def get_response_page(self):", "body": "<EOL>if self.err_condition:<EOL><INDENT>return (\"<STR_LIT>\", compat.to_bytes(self.err_condition.as_string()))<EOL><DEDENT>status = get_http_status_string(self)<EOL>html = []<EOL>html.append(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL>html.append(\"<STR_LIT>\")<EOL>html.append(<EOL>\"<STR_LIT>\"<EOL>)<EOL>html.append(\"<STR_LIT>\".format(status))<EOL>html.append(\"<STR_LIT>\")<EOL>html.append(\"<STR_LIT>\".format(status))<EOL>html.append(\"<STR_LIT>\".format(compat.html_escape(self.get_user_info())))<EOL>html.append(\"<STR_LIT>\")<EOL>html.append(<EOL>\"<STR_LIT>\".format(<EOL>__version__, compat.html_escape(str(datetime.datetime.now()), \"<STR_LIT:utf-8>\")<EOL>)<EOL>)<EOL>html.append(\"<STR_LIT>\")<EOL>html = \"<STR_LIT:\\n>\".join(html)<EOL>return (\"<STR_LIT>\", compat.to_bytes(html))<EOL>", "docstring": "Return a tuple (content-type, response page).", "id": "f8602:c1:m4"}
{"signature": "def get_user_info(self):", "body": "if self.value in ERROR_DESCRIPTIONS:<EOL><INDENT>s = \"<STR_LIT:{}>\".format(ERROR_DESCRIPTIONS[self.value])<EOL><DEDENT>else:<EOL><INDENT>s = \"<STR_LIT:{}>\".format(self.value)<EOL><DEDENT>if self.context_info:<EOL><INDENT>s += \"<STR_LIT>\".format(self.context_info)<EOL><DEDENT>elif self.value in ERROR_RESPONSES:<EOL><INDENT>s += \"<STR_LIT>\".format(ERROR_RESPONSES[self.value])<EOL><DEDENT>if self.src_exception:<EOL><INDENT>s += \"<STR_LIT>\".format(self.src_exception)<EOL><DEDENT>if self.err_condition:<EOL><INDENT>s += \"<STR_LIT>\".format(self.err_condition)<EOL><DEDENT>return s<EOL>", "docstring": "Return readable string.", "id": "f8602:c1:m3"}
{"signature": "def _run_gevent(app, config, mode):", "body": "import gevent<EOL>import gevent.monkey<EOL>gevent.monkey.patch_all()<EOL>from gevent.pywsgi import WSGIServer<EOL>server_args = {<EOL>\"<STR_LIT>\": (config[\"<STR_LIT:host>\"], config[\"<STR_LIT:port>\"]),<EOL>\"<STR_LIT>\": app,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>}<EOL>protocol = \"<STR_LIT:http>\"<EOL>server_args.update(config.get(\"<STR_LIT>\", {}))<EOL>dav_server = WSGIServer(server_args[\"<STR_LIT>\"], app)<EOL>_logger.info(\"<STR_LIT>\".format(dav_server))<EOL>_logger.info(<EOL>\"<STR_LIT>\".format(protocol, config[\"<STR_LIT:host>\"], config[\"<STR_LIT:port>\"])<EOL>)<EOL>try:<EOL><INDENT>gevent.spawn(dav_server.serve_forever())<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>_logger.warning(\"<STR_LIT>\")<EOL><DEDENT>return<EOL>", "docstring": "Run WsgiDAV using gevent if gevent is installed.\n\n    See\n      https://github.com/gevent/gevent/blob/master/src/gevent/pywsgi.py#L1356\n      https://github.com/gevent/gevent/blob/master/src/gevent/server.py#L38\n     for more options", "id": "f8605:m5"}
{"signature": "def _init_command_line_options():", "body": "description = \"\"\"<STR_LIT>\"\"\"nsed under the MIT license.<EOL>https://github.com/mar10/wsgidav for additional information.<EOL>parser = argparse.ArgumentParser(<EOL>prog=\"<STR_LIT>\",<EOL>description=description,<EOL>epilog=epilog,<EOL>formatter_class=argparse.RawTextHelpFormatter,<EOL>)<EOL>parser.add_argument(<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>dest=\"<STR_LIT:port>\",<EOL>type=int,<EOL>help=\"<STR_LIT>\",<EOL>)<EOL>parser.add_argument(<EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",<EOL>dest=\"<STR_LIT:host>\",<EOL>help=(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>),<EOL>),<EOL>parser.add_argument(<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>dest=\"<STR_LIT>\",<EOL>action=FullExpandedPath,<EOL>help=\"<STR_LIT>\",<EOL>)<EOL>parser.add_argument(<EOL>\"<STR_LIT>\",<EOL>choices=(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"),<EOL>help=\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>)<EOL>parser.add_argument(<EOL>\"<STR_LIT>\",<EOL>choices=SUPPORTED_SERVERS.keys(),<EOL>help=\"<STR_LIT>\",<EOL>)<EOL>parser.add_argument(<EOL>\"<STR_LIT>\",<EOL>choices=(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>help=\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>)<EOL>qv_group = parser.add_mutually_exclusive_group()<EOL>qv_group.add_argument(<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>action=\"<STR_LIT:count>\",<EOL>default=<NUM_LIT:3>,<EOL>help=\"<STR_LIT>\",<EOL>)<EOL>qv_group.add_argument(<EOL>\"<STR_LIT>\", \"<STR_LIT>\", default=<NUM_LIT:0>, action=\"<STR_LIT:count>\", help=\"<STR_LIT>\"<EOL>)<EOL>qv_group = parser.add_mutually_exclusive_group()<EOL>qv_group.add_argument(<EOL>\"<STR_LIT:-c>\",<EOL>\"<STR_LIT>\",<EOL>dest=\"<STR_LIT>\",<EOL>action=FullExpandedPath,<EOL>help=(<EOL>\"<STR_LIT>\".format(<EOL>DEFAULT_CONFIG_FILES<EOL>)<EOL>),<EOL>)<EOL>qv_group.add_argument(<EOL>\"<STR_LIT>\",<EOL>action=\"<STR_LIT:store_true>\",<EOL>dest=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\".format(DEFAULT_CONFIG_FILES),<EOL>)<EOL>parser.add_argument(<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>action=\"<STR_LIT:store_true>\",<EOL>help=\"<STR_LIT>\",<EOL>)<EOL>args = parser.parse_args()<EOL>args.verbose -= args.quiet<EOL>del args.quiet<EOL>if args.root_path and not os.path.isdir(args.root_path):<EOL><INDENT>msg = \"<STR_LIT>\".format(args.root_path)<EOL>raise parser.error(msg)<EOL><DEDENT>if args.version:<EOL><INDENT>if args.verbose >= <NUM_LIT:4>:<EOL><INDENT>msg = \"<STR_LIT>\".format(<EOL>__version__, util.PYTHON_VERSION, platform.platform(aliased=True)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>msg = \"<STR_LIT:{}>\".format(__version__)<EOL><DEDENT>print(msg)<EOL>sys.exit()<EOL><DEDENT>if args.no_config:<EOL><INDENT>pass<EOL><DEDENT>elif args.config_file is None:<EOL><INDENT>for filename in DEFAULT_CONFIG_FILES:<EOL><INDENT>defPath = os.path.abspath(filename)<EOL>if os.path.exists(defPath):<EOL><INDENT>if args.verbose >= <NUM_LIT:3>:<EOL><INDENT>print(\"<STR_LIT>\".format(defPath))<EOL><DEDENT>args.config_file = defPath<EOL>break<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>args.config_file = os.path.abspath(args.config_file)<EOL>if not os.path.isfile(args.config_file):<EOL><INDENT>parser.error(<EOL>\"<STR_LIT>\".format(<EOL>args.config_file<EOL>)<EOL>)<EOL><DEDENT><DEDENT>cmdLineOpts = args.__dict__.copy()<EOL>if args.verbose >= <NUM_LIT:5>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>for k, v in cmdLineOpts.items():<EOL><INDENT>print(\"<STR_LIT>\".format(k, v))<EOL><DEDENT><DEDENT>return cmdLineOpts, parser<EOL>", "docstring": "Parse command line options into a dictionary.", "id": "f8605:m1"}
{"signature": "def _run_wsgiref(app, config, mode):", "body": "<EOL>from wsgiref.simple_server import make_server, software_version<EOL>version = \"<STR_LIT>\".format(__version__, software_version)<EOL>_logger.info(\"<STR_LIT>\".format(version))<EOL>_logger.warning(<EOL>\"<STR_LIT>\"<EOL>)<EOL>httpd = make_server(config[\"<STR_LIT:host>\"], config[\"<STR_LIT:port>\"], app)<EOL>try:<EOL><INDENT>httpd.serve_forever()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>_logger.warning(\"<STR_LIT>\")<EOL><DEDENT>return<EOL>", "docstring": "Run WsgiDAV using wsgiref.simple_server, on Python 2.5+.", "id": "f8605:m9"}
{"signature": "def _run_flup(app, config, mode):", "body": "<EOL>if mode == \"<STR_LIT>\":<EOL><INDENT>from flup.server.fcgi import WSGIServer, __version__ as flupver<EOL><DEDENT>elif mode == \"<STR_LIT>\":<EOL><INDENT>from flup.server.fcgi_fork import WSGIServer, __version__ as flupver<EOL><DEDENT>else:<EOL><INDENT>raise ValueError<EOL><DEDENT>_logger.info(<EOL>\"<STR_LIT>\".format(<EOL>__version__, WSGIServer.__module__, flupver<EOL>)<EOL>)<EOL>server = WSGIServer(<EOL>app,<EOL>bindAddress=(config[\"<STR_LIT:host>\"], config[\"<STR_LIT:port>\"]),<EOL>)<EOL>try:<EOL><INDENT>server.run()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>_logger.warning(\"<STR_LIT>\")<EOL><DEDENT>return<EOL>", "docstring": "Run WsgiDAV using flup.server.fcgi if Flup is installed.", "id": "f8605:m8"}
{"signature": "def _get_checked_path(path, config, must_exist=True, allow_none=True):", "body": "if path in (None, \"<STR_LIT>\"):<EOL><INDENT>if allow_none:<EOL><INDENT>return None<EOL><DEDENT>raise ValueError(\"<STR_LIT>\".format(path))<EOL><DEDENT>config_file = config.get(\"<STR_LIT>\")<EOL>if config_file and not os.path.isabs(path):<EOL><INDENT>path = os.path.normpath(os.path.join(os.path.dirname(config_file), path))<EOL><DEDENT>else:<EOL><INDENT>path = os.path.abspath(path)<EOL><DEDENT>if must_exist and not os.path.exists(path):<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(path))<EOL><DEDENT>return path<EOL>", "docstring": "Convert path to absolute if not None.", "id": "f8605:m0"}
{"signature": "def serve_forever_stoppable(self):", "body": "self.stop_request = False<EOL>self.stopped = False<EOL>while not self.stop_request:<EOL><INDENT>self.handle_request()<EOL><DEDENT>self.stopped = True<EOL>", "docstring": "Handle one request at a time until stop_serve_forever().\n\n        http://code.activestate.com/recipes/336012/", "id": "f8608:c1:m2"}
{"signature": "def stop_serve_forever(self):", "body": "assert hasattr(<EOL>self, \"<STR_LIT>\"<EOL>), \"<STR_LIT>\"<EOL>assert not self.stop_request, \"<STR_LIT>\"<EOL>self.stop_request = True<EOL>time.sleep(<NUM_LIT:0.1>)<EOL>if self.stopped:<EOL><INDENT>return<EOL><DEDENT>def _shutdownHandler(self):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>self.send_response(<NUM_LIT:200>)<EOL>self.end_headers()<EOL>self.server.stop_request = True<EOL><DEDENT>if not hasattr(ExtHandler, \"<STR_LIT>\"):<EOL><INDENT>ExtHandler.do_SHUTDOWN = _shutdownHandler<EOL><DEDENT>(host, port) = self.server_address<EOL>conn = http_client.HTTPConnection(\"<STR_LIT>\".format(host, port))<EOL>conn.request(\"<STR_LIT>\", \"<STR_LIT:/>\")<EOL>conn.getresponse()<EOL>assert self.stop_request<EOL>", "docstring": "Stop serve_forever_stoppable().", "id": "f8608:c1:m1"}
{"signature": "def close(self):", "body": "self._dict = None<EOL>", "docstring": "Called on shutdown.", "id": "f8609:c0:m5"}
{"signature": "def open(self):", "body": "assert self._dict is None<EOL>self._dict = {}<EOL>", "docstring": "Called before first use.\n\n        May be implemented to initialize a storage.", "id": "f8609:c0:m4"}
{"signature": "def _flush(self):", "body": "_logger.debug(\"<STR_LIT>\")<EOL>self._lock.acquire_write()  <EOL>try:<EOL><INDENT>self._dict.sync()<EOL><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>", "docstring": "Write persistent dictionary to disc.", "id": "f8609:c1:m2"}
{"signature": "def refresh(self, token, timeout):", "body": "assert token in self._dict, \"<STR_LIT>\"<EOL>assert timeout == -<NUM_LIT:1> or timeout > <NUM_LIT:0><EOL>if timeout < <NUM_LIT:0> or timeout > LockStorageDict.LOCK_TIME_OUT_MAX:<EOL><INDENT>timeout = LockStorageDict.LOCK_TIME_OUT_MAX<EOL><DEDENT>self._lock.acquire_write()<EOL>try:<EOL><INDENT>lock = self._dict[token]<EOL>lock[\"<STR_LIT>\"] = timeout<EOL>lock[\"<STR_LIT>\"] = time.time() + timeout<EOL>self._dict[token] = lock<EOL>self._flush()<EOL><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>return lock<EOL>", "docstring": "Modify an existing lock's timeout.\n\n        token:\n            Valid lock token.\n        timeout:\n            Suggested lifetime in seconds (-1 for infinite).\n            The real expiration time may be shorter than requested!\n        Returns:\n            Lock dictionary.\n            Raises ValueError, if token is invalid.", "id": "f8609:c0:m10"}
{"signature": "def delete(self, token):", "body": "self._lock.acquire_write()<EOL>try:<EOL><INDENT>lock = self._dict.get(token)<EOL>_logger.debug(\"<STR_LIT>\".format(lock_string(lock)))<EOL>if lock is None:<EOL><INDENT>return False<EOL><DEDENT>key = \"<STR_LIT>\".format(lock.get(\"<STR_LIT:root>\"))<EOL>if key in self._dict:<EOL><INDENT>tokList = self._dict[key]<EOL>if len(tokList) > <NUM_LIT:1>:<EOL><INDENT>tokList.remove(token)<EOL>self._dict[key] = tokList<EOL><DEDENT>else:<EOL><INDENT>del self._dict[key]<EOL><DEDENT><DEDENT>del self._dict[token]<EOL>self._flush()<EOL><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>return True<EOL>", "docstring": "Delete lock.\n\n        Returns True on success. False, if token does not exist, or is expired.", "id": "f8609:c0:m11"}
{"signature": "def refresh(self, token, timeout=None):", "body": "if timeout is None:<EOL><INDENT>timeout = LockManager.LOCK_TIME_OUT_DEFAULT<EOL><DEDENT>return self.storage.refresh(token, timeout)<EOL>", "docstring": "Set new timeout for lock, if existing and valid.", "id": "f8615:c0:m6"}
{"signature": "def __init__(self, storage):", "body": "assert hasattr(storage, \"<STR_LIT>\")<EOL>self._lock = ReadWriteLock()<EOL>self.storage = storage<EOL>self.storage.open()<EOL>", "docstring": "storage:\n    LockManagerStorage object", "id": "f8615:c0:m0"}
{"signature": "def check_write_permission(self, url, depth, token_list, principal):", "body": "assert compat.is_native(url)<EOL>assert depth in (\"<STR_LIT:0>\", \"<STR_LIT>\")<EOL>_logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>url, depth, token_list, principal<EOL>)<EOL>)<EOL>errcond = DAVErrorCondition(PRECONDITION_CODE_LockConflict)<EOL>self._lock.acquire_read()<EOL>try:<EOL><INDENT>u = url<EOL>while u:<EOL><INDENT>ll = self.get_url_lock_list(u)<EOL>_logger.debug(\"<STR_LIT>\".format(u))<EOL>for l in ll:<EOL><INDENT>_logger.debug(\"<STR_LIT>\".format(lock_string(l)))<EOL>if u != url and l[\"<STR_LIT>\"] != \"<STR_LIT>\":<EOL><INDENT>continue<EOL><DEDENT>elif principal == l[\"<STR_LIT>\"] and l[\"<STR_LIT>\"] in token_list:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>_logger.debug(<EOL>\"<STR_LIT>\".format(lock_string(l))<EOL>)<EOL>errcond.add_href(l[\"<STR_LIT:root>\"])<EOL><DEDENT><DEDENT>u = util.get_uri_parent(u)<EOL><DEDENT>if depth == \"<STR_LIT>\":<EOL><INDENT>childLocks = self.storage.get_lock_list(<EOL>url, include_root=False, include_children=True, token_only=False<EOL>)<EOL>for l in childLocks:<EOL><INDENT>assert util.is_child_uri(url, l[\"<STR_LIT:root>\"])<EOL>_logger.debug(<EOL>\"<STR_LIT>\".format(lock_string(l))<EOL>)<EOL>errcond.add_href(l[\"<STR_LIT:root>\"])<EOL><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>if len(errcond.hrefs) > <NUM_LIT:0>:<EOL><INDENT>raise DAVError(HTTP_LOCKED, err_condition=errcond)<EOL><DEDENT>return<EOL>", "docstring": "Check, if <principal> can modify <url>, otherwise raise HTTP_LOCKED.\n\n        If modifying <url> is prevented by a lock, DAVError(HTTP_LOCKED) is\n        raised. An embedded DAVErrorCondition contains the conflicting locks.\n\n        <url> may be modified by <principal>, if it is not currently locked\n        directly or indirectly (i.e. by a locked parent).\n        For depth-infinity operations, <url> also must not have locked children.\n\n        It is not enough to check whether a lock is owned by <principal>, but\n        also the token must be passed with the request. Because <principal> may\n        run two different applications.\n\n        See http://www.webdav.org/specs/rfc4918.html#lock-model\n            http://www.webdav.org/specs/rfc4918.html#rfc.section.7.4\n\n        TODO: verify assumptions:\n        - Parent locks WILL NOT be conflicting, if they are depth-0.\n        - Exclusive child locks WILL be conflicting, even if they are owned by <principal>.\n\n        @param url: URL that shall be modified, created, moved, or deleted\n        @param depth: \"0\"|\"infinity\"\n        @param token_list: list of lock tokens, that the principal submitted in If: header\n        @param principal: name of the principal requesting a lock\n\n        @return: None or raise error", "id": "f8615:c0:m16"}
{"signature": "def get_url_lock_list(self, url):", "body": "url = normalize_lock_root(url)<EOL>lockList = self.storage.get_lock_list(<EOL>url, include_root=True, include_children=False, token_only=False<EOL>)<EOL>return lockList<EOL>", "docstring": "Return list of lock_dict, if <url> is protected by at least one direct, valid lock.\n\n        Side effect: expired locks for this url are purged.", "id": "f8615:c0:m10"}
{"signature": "def release(self, token):", "body": "self.storage.delete(token)<EOL>", "docstring": "Delete lock.", "id": "f8615:c0:m8"}
{"signature": "def get_indirect_url_lock_list(self, url, principal=None):", "body": "url = normalize_lock_root(url)<EOL>lockList = []<EOL>u = url<EOL>while u:<EOL><INDENT>ll = self.storage.get_lock_list(<EOL>u, include_root=True, include_children=False, token_only=False<EOL>)<EOL>for l in ll:<EOL><INDENT>if u != url and l[\"<STR_LIT>\"] != \"<STR_LIT>\":<EOL><INDENT>continue  <EOL><DEDENT>if principal is None or principal == l[\"<STR_LIT>\"]:<EOL><INDENT>lockList.append(l)<EOL><DEDENT><DEDENT>u = util.get_uri_parent(u)<EOL><DEDENT>return lockList<EOL>", "docstring": "Return a list of valid lockDicts, that protect <path> directly or indirectly.\n\n        If a principal is given, only locks owned by this principal are returned.\n        Side effect: expired locks for this path and all parents are purged.", "id": "f8615:c0:m11"}
{"signature": "def xml_to_bytes(element, pretty_print=False):", "body": "if use_lxml:<EOL><INDENT>xml = etree.tostring(<EOL>element, encoding=\"<STR_LIT>\", xml_declaration=True, pretty_print=pretty_print<EOL>)<EOL><DEDENT>else:<EOL><INDENT>xml = etree.tostring(element, encoding=\"<STR_LIT>\")<EOL>if not xml.startswith(b\"<STR_LIT>\"):<EOL><INDENT>xml = b'<STR_LIT>' + xml<EOL><DEDENT><DEDENT>assert xml.startswith(b\"<STR_LIT>\")  <EOL>return xml<EOL>", "docstring": "Wrapper for etree.tostring, that takes care of unsupported pretty_print\n    option and prepends an encoding header.", "id": "f8617:m2"}
{"signature": "def make_prop_el():", "body": "if use_lxml:<EOL><INDENT>return etree.Element(\"<STR_LIT>\", nsmap={\"<STR_LIT:D>\": \"<STR_LIT>\"})<EOL><DEDENT>return etree.Element(\"<STR_LIT>\")<EOL>", "docstring": "Wrapper for etree.Element, that takes care of unsupported nsmap option.", "id": "f8617:m4"}
{"signature": "def string_to_xml(text):", "body": "try:<EOL><INDENT>return etree.XML(text)<EOL><DEDENT>except Exception:<EOL><INDENT>_logger.error(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL>_logger.error(\"<STR_LIT>\".format(text))<EOL>raise<EOL><DEDENT>", "docstring": "Convert XML string into etree.Element.", "id": "f8617:m1"}
{"signature": "def make_multistatus_el():", "body": "if use_lxml:<EOL><INDENT>return etree.Element(\"<STR_LIT>\", nsmap={\"<STR_LIT:D>\": \"<STR_LIT>\"})<EOL><DEDENT>return etree.Element(\"<STR_LIT>\")<EOL>", "docstring": "Wrapper for etree.Element, that takes care of unsupported nsmap option.", "id": "f8617:m3"}
{"signature": "def make_sub_element(parent, tag, nsmap=None):", "body": "if use_lxml:<EOL><INDENT>return etree.SubElement(parent, tag, nsmap=nsmap)<EOL><DEDENT>return etree.SubElement(parent, tag)<EOL>", "docstring": "Wrapper for etree.SubElement, that takes care of unsupported nsmap option.", "id": "f8617:m5"}
{"signature": "def element_content_as_string(element):", "body": "if len(element) == <NUM_LIT:0>:<EOL><INDENT>return element.text or \"<STR_LIT>\"  <EOL><DEDENT>stream = compat.StringIO()<EOL>for childnode in element:<EOL><INDENT>stream.write(xml_to_bytes(childnode, pretty_print=False) + \"<STR_LIT:\\n>\")<EOL><DEDENT>s = stream.getvalue()<EOL>stream.close()<EOL>return s<EOL>", "docstring": "Serialize etree.Element.\n\n    Note: element may contain more than one child or only text (i.e. no child\n          at all). Therefore the resulting string may raise an exception, when\n          passed back to etree.XML().", "id": "f8617:m6"}
{"signature": "def acquire_write(self, timeout=None):", "body": "if timeout is not None:<EOL><INDENT>endtime = time() + timeout<EOL><DEDENT>me, upgradewriter = currentThread(), False<EOL>self.__condition.acquire()<EOL>try:<EOL><INDENT>if self.__writer is me:<EOL><INDENT>self.__writercount += <NUM_LIT:1><EOL>return<EOL><DEDENT>elif me in self.__readers:<EOL><INDENT>if self.__upgradewritercount:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>upgradewriter = True<EOL>self.__upgradewritercount = self.__readers.pop(me)<EOL><DEDENT>else:<EOL><INDENT>self.__pendingwriters.append(me)<EOL><DEDENT>while True:<EOL><INDENT>if not self.__readers and self.__writer is None:<EOL><INDENT>if self.__upgradewritercount:<EOL><INDENT>if upgradewriter:<EOL><INDENT>self.__writer = me<EOL>self.__writercount = self.__upgradewritercount + <NUM_LIT:1><EOL>self.__upgradewritercount = <NUM_LIT:0><EOL>return<EOL><DEDENT><DEDENT>elif self.__pendingwriters[<NUM_LIT:0>] is me:<EOL><INDENT>self.__writer = me<EOL>self.__writercount = <NUM_LIT:1><EOL>self.__pendingwriters = self.__pendingwriters[<NUM_LIT:1>:]<EOL>return<EOL><DEDENT><DEDENT>if timeout is not None:<EOL><INDENT>remaining = endtime - time()<EOL>if remaining <= <NUM_LIT:0>:<EOL><INDENT>if upgradewriter:<EOL><INDENT>self.__readers[me] = self.__upgradewritercount<EOL>self.__upgradewritercount = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>self.__pendingwriters.remove(me)<EOL><DEDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>self.__condition.wait(remaining)<EOL><DEDENT>else:<EOL><INDENT>self.__condition.wait()<EOL><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>self.__condition.release()<EOL><DEDENT>", "docstring": "Acquire a write lock for the current thread, waiting at most\n        timeout seconds or doing a non-blocking check in case timeout is <= 0.\n\n        In case the write lock cannot be serviced due to the deadlock\n        condition mentioned above, a ValueError is raised.\n\n        In case timeout is None, the call to acquire_write blocks until the\n        lock request can be serviced.\n\n        In case the timeout expires before the lock could be serviced, a\n        RuntimeError is thrown.", "id": "f8619:c0:m2"}
{"signature": "def acquire_read(self, timeout=None):", "body": "if timeout is not None:<EOL><INDENT>endtime = time() + timeout<EOL><DEDENT>me = currentThread()<EOL>self.__condition.acquire()<EOL>try:<EOL><INDENT>if self.__writer is me:<EOL><INDENT>self.__writercount += <NUM_LIT:1><EOL>return<EOL><DEDENT>while True:<EOL><INDENT>if self.__writer is None:<EOL><INDENT>if self.__upgradewritercount or self.__pendingwriters:<EOL><INDENT>if me in self.__readers:<EOL><INDENT>self.__readers[me] += <NUM_LIT:1><EOL>return<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.__readers[me] = self.__readers.get(me, <NUM_LIT:0>) + <NUM_LIT:1><EOL>return<EOL><DEDENT><DEDENT>if timeout is not None:<EOL><INDENT>remaining = endtime - time()<EOL>if remaining <= <NUM_LIT:0>:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>self.__condition.wait(remaining)<EOL><DEDENT>else:<EOL><INDENT>self.__condition.wait()<EOL><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>self.__condition.release()<EOL><DEDENT>", "docstring": "Acquire a read lock for the current thread, waiting at most\n        timeout seconds or doing a non-blocking check in case timeout is <= 0.\n\n        In case timeout is None, the call to acquire_read blocks until the\n        lock request can be serviced.\n\n        In case the timeout expires before the lock could be serviced, a\n        RuntimeError is thrown.", "id": "f8619:c0:m1"}
{"signature": "def get_property_value(self, name):", "body": "<EOL>if name == \"<STR_LIT>\":<EOL><INDENT>return self.data[\"<STR_LIT:key>\"]<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self.data[\"<STR_LIT:title>\"]<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self.data[\"<STR_LIT:status>\"]<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self.data[\"<STR_LIT>\"]<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return \"<STR_LIT:U+002C>\".join(self.data[\"<STR_LIT>\"])<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self.data[\"<STR_LIT:description>\"]<EOL><DEDENT>return super(VirtualResource, self).get_property_value(name)<EOL>", "docstring": "Return the value of a property.\n\n        See get_property_value()", "id": "f8621:c3:m9"}
{"signature": "def handle_copy(self, dest_path, depth_infinity):", "body": "<EOL>if \"<STR_LIT>\" not in dest_path:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>catType, tag, _rest = util.save_split(dest_path.strip(\"<STR_LIT:/>\"), \"<STR_LIT:/>\", <NUM_LIT:2>)<EOL>assert catType == \"<STR_LIT>\"<EOL>if tag not in self.data[\"<STR_LIT>\"]:<EOL><INDENT>self.data[\"<STR_LIT>\"].append(tag)<EOL><DEDENT>return True<EOL>", "docstring": "Change semantic of COPY to add resource tags.", "id": "f8621:c3:m5"}
{"signature": "def get_property_names(self, is_allprop):", "body": "<EOL>propNameList = super(VirtualResource, self).get_property_names(is_allprop)<EOL>propNameList.extend(VirtualResource._supportedProps)<EOL>return propNameList<EOL>", "docstring": "Return list of supported property names in Clark Notation.\n\n        See DAVResource.get_property_names()", "id": "f8621:c3:m8"}
{"signature": "def handle_delete(self):", "body": "<EOL>if \"<STR_LIT>\" not in self.path:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>catType, tag, _rest = util.save_split(self.path.strip(\"<STR_LIT:/>\"), \"<STR_LIT:/>\", <NUM_LIT:2>)<EOL>assert catType == \"<STR_LIT>\"<EOL>assert tag in self.data[\"<STR_LIT>\"]<EOL>self.data[\"<STR_LIT>\"].remove(tag)<EOL>return True<EOL>", "docstring": "Change semantic of DELETE to remove resource tags.", "id": "f8621:c3:m4"}
{"signature": "def handle_move(self, dest_path):", "body": "<EOL>if \"<STR_LIT>\" not in self.path:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>if \"<STR_LIT>\" not in dest_path:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>catType, tag, _rest = util.save_split(self.path.strip(\"<STR_LIT:/>\"), \"<STR_LIT:/>\", <NUM_LIT:2>)<EOL>assert catType == \"<STR_LIT>\"<EOL>assert tag in self.data[\"<STR_LIT>\"]<EOL>self.data[\"<STR_LIT>\"].remove(tag)<EOL>catType, tag, _rest = util.save_split(dest_path.strip(\"<STR_LIT:/>\"), \"<STR_LIT:/>\", <NUM_LIT:2>)<EOL>assert catType == \"<STR_LIT>\"<EOL>if tag not in self.data[\"<STR_LIT>\"]:<EOL><INDENT>self.data[\"<STR_LIT>\"].append(tag)<EOL><DEDENT>return True<EOL>", "docstring": "Change semantic of MOVE to change resource tags.", "id": "f8621:c3:m6"}
{"signature": "def _get_log(self, limit=None):", "body": "self.ui.pushbuffer()<EOL>commands.log(self.ui, self.repo, limit=limit, date=None, rev=None, user=None)<EOL>res = self.ui.popbuffer().strip()<EOL>logList = []<EOL>for logentry in res.split(\"<STR_LIT>\"):<EOL><INDENT>log = {}<EOL>logList.append(log)<EOL>for line in logentry.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>k, v = line.split(\"<STR_LIT::>\", <NUM_LIT:1>)<EOL>assert k in (\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:user>\", \"<STR_LIT:date>\", \"<STR_LIT>\")<EOL>log[k.strip()] = v.strip()<EOL><DEDENT>log[\"<STR_LIT>\"] = util.parse_time_string(log[\"<STR_LIT:date>\"])<EOL>local_id, unid = log[\"<STR_LIT>\"].split(\"<STR_LIT::>\")<EOL>log[\"<STR_LIT>\"] = int(local_id)<EOL>log[\"<STR_LIT>\"] = unid<EOL><DEDENT>return logList<EOL>", "docstring": "Read log entries into a list of dictionaries.", "id": "f8623:c1:m1"}
{"signature": "def create_collection(self, name):", "body": "assert self.is_collection<EOL>self._check_write_access()<EOL>collpath = self._getFilePath(name)<EOL>os.mkdir(collpath)<EOL>filepath = self._getFilePath(name, \"<STR_LIT>\")<EOL>f = open(filepath, \"<STR_LIT:w>\")<EOL>f.write(\"<STR_LIT>\")<EOL>f.close()<EOL>commands.add(self.provider.ui, self.provider.repo, filepath)<EOL>", "docstring": "Create a new collection as member of self.\n\n        A dummy member is created, because Mercurial doesn't handle folders.", "id": "f8623:c0:m19"}
{"signature": "def delete(self):", "body": "self._check_write_access()<EOL>filepath = self._getFilePath()<EOL>commands.remove(self.provider.ui, self.provider.repo, filepath, force=True)<EOL>", "docstring": "Remove this resource (recursive).", "id": "f8623:c0:m24"}
{"signature": "def prevent_locking(self):", "body": "if self.rev is not None:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Return True, to prevent locking.\n\n        See prevent_locking()", "id": "f8623:c0:m17"}
{"signature": "def _check_write_access(self):", "body": "if self.rev is not None:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>", "docstring": "Raise HTTP_FORBIDDEN, if resource is unwritable.", "id": "f8623:c0:m3"}
{"signature": "def handle_copy(self, dest_path, depth_infinity):", "body": "destType, destHgPath = util.pop_path(dest_path)<EOL>destHgPath = destHgPath.strip(\"<STR_LIT:/>\")<EOL>ui = self.provider.ui<EOL>repo = self.provider.repo<EOL>_logger.info(\"<STR_LIT>\" % (self.localHgPath, destHgPath))<EOL>if self.rev is None and destType == \"<STR_LIT>\":<EOL><INDENT>commands.copy(ui, repo, self.localHgPath, destHgPath, force=True)<EOL><DEDENT>elif self.rev is None and destType == \"<STR_LIT>\":<EOL><INDENT>self._commit(\"<STR_LIT>\" % (self.path, dest_path))<EOL><DEDENT>else:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>return True<EOL>", "docstring": "Handle a COPY request natively.", "id": "f8623:c0:m25"}
{"signature": "def handle_move(self, dest_path):", "body": "destType, destHgPath = util.pop_path(dest_path)<EOL>destHgPath = destHgPath.strip(\"<STR_LIT:/>\")<EOL>ui = self.provider.ui<EOL>repo = self.provider.repo<EOL>_logger.info(\"<STR_LIT>\" % (self.localHgPath, destHgPath))<EOL>if self.rev is None and destType == \"<STR_LIT>\":<EOL><INDENT>commands.rename(ui, repo, self.localHgPath, destHgPath, force=True)<EOL><DEDENT>elif self.rev is None and destType == \"<STR_LIT>\":<EOL><INDENT>self._commit(\"<STR_LIT>\" % (self.path, dest_path))<EOL><DEDENT>else:<EOL><INDENT>raise DAVError(HTTP_FORBIDDEN)<EOL><DEDENT>return True<EOL>", "docstring": "Handle a MOVE request natively.", "id": "f8623:c0:m26"}
{"signature": "def set_property_value(self, name, value, dry_run=False):", "body": "raise DAVError(<EOL>HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty<EOL>)<EOL>", "docstring": "Set or remove property value.\n\n        See DAVResource.set_property_value()", "id": "f8624:c0:m14"}
{"signature": "def _init(self):", "body": "<EOL>self.provider._count_get_resource_inst_init += <NUM_LIT:1><EOL>tableName, primKey = self.provider._split_path(self.path)<EOL>display_type = \"<STR_LIT>\"<EOL>displayTypeComment = \"<STR_LIT>\"<EOL>contentType = \"<STR_LIT>\"<EOL>if tableName is None:<EOL><INDENT>display_type = \"<STR_LIT>\"<EOL><DEDENT>elif primKey is None:  <EOL><INDENT>display_type = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>contentType = \"<STR_LIT>\"<EOL>if primKey == \"<STR_LIT>\":<EOL><INDENT>display_type = \"<STR_LIT>\"<EOL>displayTypeComment = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>display_type = \"<STR_LIT>\"<EOL>displayTypeComment = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>is_collection = primKey is None<EOL>self._cache = {<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": contentType,<EOL>\"<STR_LIT>\": time.time(),<EOL>\"<STR_LIT>\": self.name,<EOL>\"<STR_LIT>\": hashlib.md5().update(self.path).hexdigest(),<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT>\": {\"<STR_LIT:type>\": display_type, \"<STR_LIT>\": displayTypeComment},<EOL>}<EOL>if not is_collection:<EOL><INDENT>self._cache[\"<STR_LIT>\"] = time.time()<EOL><DEDENT>_logger.debug(\"<STR_LIT>\" % self.provider._count_initConnection)<EOL>", "docstring": "Read resource information into self._cache, for cached access.\n\n        See DAVResource._init()", "id": "f8624:c0:m1"}
{"signature": "def get_resource_inst(self, path, environ):", "body": "<EOL>self._count_get_resource_inst += <NUM_LIT:1><EOL>if not self.exists(path, environ):<EOL><INDENT>return None<EOL><DEDENT>_tableName, primKey = self._split_path(path)<EOL>is_collection = primKey is None<EOL>return MySQLBrowserResource(self, path, is_collection, environ)<EOL>", "docstring": "Return info dictionary for path.\n\n        See get_resource_inst()", "id": "f8624:c1:m12"}
{"signature": "def get_member_list(self):", "body": "members = []<EOL>conn = self.provider._init_connection()<EOL>try:<EOL><INDENT>tableName, primKey = self.provider._split_path(self.path)<EOL>if tableName is None:<EOL><INDENT>retlist = self.provider._list_tables(conn)<EOL>for name in retlist:<EOL><INDENT>members.append(<EOL>MySQLBrowserResource(<EOL>self.provider,<EOL>util.join_uri(self.path, name),<EOL>True,<EOL>self.environ,<EOL>)<EOL>)<EOL><DEDENT><DEDENT>elif primKey is None:<EOL><INDENT>pri_key = self.provider._find_primary_key(conn, tableName)<EOL>if pri_key is not None:<EOL><INDENT>retlist = self.provider._list_fields(conn, tableName, pri_key)<EOL>for name in retlist:<EOL><INDENT>members.append(<EOL>MySQLBrowserResource(<EOL>self.provider,<EOL>util.join_uri(self.path, name),<EOL>False,<EOL>self.environ,<EOL>)<EOL>)<EOL><DEDENT><DEDENT>members.insert(<EOL><NUM_LIT:0>,<EOL>MySQLBrowserResource(<EOL>self.provider,<EOL>util.join_uri(self.path, \"<STR_LIT>\"),<EOL>False,<EOL>self.environ,<EOL>),<EOL>)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>conn.close()<EOL><DEDENT>return members<EOL>", "docstring": "Return list of (direct) collection member names (UTF-8 byte strings).\n\n        See DAVResource.get_member_list()", "id": "f8624:c0:m10"}
{"signature": "def get_property_names(self, is_allprop):", "body": "<EOL>propNames = super(MySQLBrowserResource, self).get_property_names(is_allprop)<EOL>tableName, primKey = self.provider._split_path(self.path)<EOL>if primKey is not None:<EOL><INDENT>conn = self.provider._init_connection()<EOL>fieldlist = self.provider._get_field_list(conn, tableName)<EOL>for fieldname in fieldlist:<EOL><INDENT>propNames.append(\"<STR_LIT>\" % (tableName, fieldname))<EOL><DEDENT>conn.close()<EOL><DEDENT>return propNames<EOL>", "docstring": "Return list of supported property names in Clark Notation.\n\n        Return supported live and dead properties. (See also DAVProvider.get_property_names().)\n\n        In addition, all table field names are returned as properties.", "id": "f8624:c0:m12"}
{"signature": "def get_content(self):", "body": "filestream = compat.StringIO()<EOL>tableName, primKey = self.provider._split_path(self.path)<EOL>if primKey is not None:<EOL><INDENT>conn = self.provider._init_connection()<EOL>listFields = self.provider._get_field_list(conn, tableName)<EOL>csvwriter = csv.DictWriter(filestream, listFields, extrasaction=\"<STR_LIT:ignore>\")<EOL>dictFields = {}<EOL>for field_name in listFields:<EOL><INDENT>dictFields[field_name] = field_name<EOL><DEDENT>csvwriter.writerow(dictFields)<EOL>if primKey == \"<STR_LIT>\":<EOL><INDENT>cursor = conn.cursor(MySQLdb.cursors.DictCursor)<EOL>cursor.execute(\"<STR_LIT>\" + self.provider._db + \"<STR_LIT:.>\" + tableName)<EOL>result_set = cursor.fetchall()<EOL>for row in result_set:<EOL><INDENT>csvwriter.writerow(row)<EOL><DEDENT>cursor.close()<EOL><DEDENT>else:<EOL><INDENT>row = self.provider._get_record_by_primary_key(conn, tableName, primKey)<EOL>if row is not None:<EOL><INDENT>csvwriter.writerow(row)<EOL><DEDENT><DEDENT>conn.close()<EOL><DEDENT>filestream.seek(<NUM_LIT:0>)<EOL>return filestream<EOL>", "docstring": "Open content as a stream for reading.\n\n        See DAVResource.get_content()", "id": "f8624:c0:m11"}
{"signature": "def run_wsgidav_server(with_auth, with_ssl, provider=None, **kwargs):", "body": "package_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"<STR_LIT:..>\"))<EOL>share_path = os.path.join(gettempdir(), \"<STR_LIT>\")<EOL>if not os.path.exists(share_path):<EOL><INDENT>os.mkdir(share_path)<EOL><DEDENT>if provider is None:<EOL><INDENT>provider = FilesystemProvider(share_path)<EOL><DEDENT>config = {<EOL>\"<STR_LIT:host>\": \"<STR_LIT:127.0.0.1>\",<EOL>\"<STR_LIT:port>\": <NUM_LIT>,<EOL>\"<STR_LIT>\": {\"<STR_LIT:/>\": provider},<EOL>\"<STR_LIT>\": {\"<STR_LIT>\": None},<EOL>\"<STR_LIT>\": {\"<STR_LIT>\": {\"<STR_LIT:*>\": True}},  <EOL>\"<STR_LIT>\": <NUM_LIT:1>,<EOL>\"<STR_LIT>\": [],<EOL>\"<STR_LIT>\": True,  <EOL>\"<STR_LIT>\": True,  <EOL>}<EOL>if with_auth:<EOL><INDENT>config[\"<STR_LIT>\"].update(<EOL>{\"<STR_LIT>\": True, \"<STR_LIT>\": False, \"<STR_LIT>\": False}<EOL>)<EOL>config[\"<STR_LIT>\"].update(<EOL>{<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:*>\": {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:password>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": [],<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:password>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": [],<EOL>},<EOL>}<EOL>}<EOL>}<EOL>)<EOL><DEDENT>if with_ssl:<EOL><INDENT>config.update(<EOL>{<EOL>\"<STR_LIT>\": os.path.join(<EOL>package_path, \"<STR_LIT>\"<EOL>),<EOL>\"<STR_LIT>\": os.path.join(<EOL>package_path, \"<STR_LIT>\"<EOL>),<EOL>\"<STR_LIT>\": None,<EOL>}<EOL>)<EOL><DEDENT>util.init_logging(config)<EOL>if kwargs.get(\"<STR_LIT>\"):<EOL><INDENT>config[\"<STR_LIT>\"] = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>app = WsgiDAVApp(config)<EOL>from wsgidav.server.server_cli import _run_cheroot<EOL>_run_cheroot(app, config, \"<STR_LIT>\")<EOL>", "docstring": "Start blocking WsgiDAV server (called as a separate process).", "id": "f8625:m1"}
{"signature": "def check_response(self, status=None):", "body": "__tracebackhide__ = True<EOL>res = self.response<EOL>full_status = \"<STR_LIT>\" % (res.status_code, res.reason)<EOL>content_length = int(res.headers.get(\"<STR_LIT>\", <NUM_LIT:0>))<EOL>if content_length and len(res.content) != content_length:<EOL><INDENT>raise AppError(<EOL>\"<STR_LIT>\"<EOL>% (content_length, len(res.content))<EOL>)<EOL><DEDENT>if status == \"<STR_LIT:*>\":<EOL><INDENT>return<EOL><DEDENT>if isinstance(status, (list, tuple)):<EOL><INDENT>if res.status_code not in status:<EOL><INDENT>raise AppError(<EOL>\"<STR_LIT>\".format(<EOL>full_status,<EOL>\"<STR_LIT:U+002CU+0020>\".join(map(str, status)),<EOL>self.request[\"<STR_LIT>\"],<EOL>self.request[\"<STR_LIT:path>\"],<EOL>res.content,<EOL>)<EOL>)<EOL><DEDENT>return<EOL><DEDENT>if status is None:<EOL><INDENT>if res.status_code >= <NUM_LIT:200> and res.status_code < <NUM_LIT>:<EOL><INDENT>return<EOL><DEDENT>raise AssertionError(<EOL>\"<STR_LIT>\".format(<EOL>full_status,<EOL>self.request[\"<STR_LIT>\"],<EOL>self.request[\"<STR_LIT:path>\"],<EOL>res.content,<EOL>)<EOL>)<EOL><DEDENT>if status != res.status_code:<EOL><INDENT>raise AppError(\"<STR_LIT>\" % (full_status, status))<EOL><DEDENT>", "docstring": "Raise an error, if self.response doesn\"t match expected status.\n\n        Inspired by paste.fixture", "id": "f8632:c1:m22"}
{"signature": "def propfind(<EOL>self, path, properties=\"<STR_LIT>\", namespace=\"<STR_LIT>\", depth=None, headers=None<EOL>):", "body": "<EOL>root = ElementTree.Element(\"<STR_LIT>\")<EOL>if is_native(properties):<EOL><INDENT>ElementTree.SubElement(root, \"<STR_LIT>\" % properties)<EOL><DEDENT>else:<EOL><INDENT>props = ElementTree.SubElement(root, \"<STR_LIT>\")<EOL>object_to_etree(props, properties, namespace=namespace)<EOL><DEDENT>tree = ElementTree.ElementTree(root)<EOL>body = self._tree_to_binary_body(tree)<EOL>if headers is None:<EOL><INDENT>headers = {}<EOL><DEDENT>if depth is not None:<EOL><INDENT>headers[\"<STR_LIT>\"] = depth<EOL><DEDENT>headers[\"<STR_LIT:Content-Type>\"] = '<STR_LIT>'<EOL>self._request(\"<STR_LIT>\", path, body=body, headers=headers)<EOL>if self.response is not None and hasattr(self.response, \"<STR_LIT>\") is True:<EOL><INDENT>property_responses = {}<EOL>for response in self.response.tree:<EOL><INDENT>property_href = response.find(\"<STR_LIT>\")<EOL>property_stat = response.find(\"<STR_LIT>\")<EOL>def parse_props(props):<EOL><INDENT>property_dict = {}<EOL>for prop in props:<EOL><INDENT>if prop.tag.find(\"<STR_LIT>\") is not -<NUM_LIT:1>:<EOL><INDENT>name = prop.tag.split(\"<STR_LIT:}>\")[-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>name = prop.tag<EOL><DEDENT>if len(list(prop)):<EOL><INDENT>property_dict[name] = parse_props(prop)<EOL><DEDENT>else:<EOL><INDENT>property_dict[name] = prop.text<EOL><DEDENT><DEDENT>return property_dict<EOL><DEDENT>if property_href is not None and property_stat is not None:<EOL><INDENT>property_dict = parse_props(property_stat.find(\"<STR_LIT>\"))<EOL>property_responses[property_href.text] = property_dict<EOL><DEDENT><DEDENT>return property_responses<EOL><DEDENT>", "docstring": "Property find. If properties arg is unspecified it defaults to 'allprop'.", "id": "f8632:c1:m17"}
{"signature": "def unlock(self, path, token, headers=None):", "body": "if headers is None:<EOL><INDENT>headers = {}<EOL><DEDENT>headers[\"<STR_LIT>\"] = \"<STR_LIT>\" % token<EOL>self._request(\"<STR_LIT>\", path, body=None, headers=headers)<EOL>", "docstring": "Unlock DAV resource with token", "id": "f8632:c1:m21"}
{"signature": "def _tree_to_binary_body(self, tree):", "body": "<EOL>body = BytesIO()<EOL>tree.write(body)<EOL>body = body.getvalue()  <EOL>body = b'<STR_LIT>' + body<EOL>assert is_bytes(body)<EOL>return body<EOL>", "docstring": "Return tree content as xml bytestring.", "id": "f8632:c1:m4"}
{"signature": "def copy_collection(<EOL>self, source, destination, depth=\"<STR_LIT>\", overwrite=True, headers=None<EOL>):", "body": "body = (<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b\"<STR_LIT>\"<EOL>)<EOL>if headers is None:<EOL><INDENT>headers = {}<EOL><DEDENT>headers[\"<STR_LIT:Content-Type>\"] = '<STR_LIT>'<EOL>self.copy(<EOL>source,<EOL>destination,<EOL>body=body,<EOL>depth=depth,<EOL>overwrite=overwrite,<EOL>headers=headers,<EOL>)<EOL>", "docstring": "Copy DAV collection.\n\n        Note: support for the \"propertybehavior\" request body for COPY and MOVE\n              has been removed with RFC4918", "id": "f8632:c1:m14"}
{"signature": "def put(self, path, body=None, f=None, headers=None):", "body": "assert body is None or is_bytes(body)<EOL>if f is not None:<EOL><INDENT>body = f.read()<EOL><DEDENT>self._request(\"<STR_LIT>\", path, body=body, headers=headers)<EOL>", "docstring": "Put resource with body", "id": "f8632:c1:m9"}
{"signature": "def proppatch(<EOL>self, path, set_props=None, remove_props=None, namespace=\"<STR_LIT>\", headers=None<EOL>):", "body": "root = ElementTree.Element(\"<STR_LIT>\")<EOL>if set_props is not None:<EOL><INDENT>prop_set = ElementTree.SubElement(root, \"<STR_LIT>\")<EOL>for p in set_props:<EOL><INDENT>prop_prop = ElementTree.SubElement(prop_set, \"<STR_LIT>\")<EOL>object_to_etree(prop_prop, p, namespace=namespace)<EOL><DEDENT><DEDENT>if remove_props is not None:<EOL><INDENT>prop_remove = ElementTree.SubElement(root, \"<STR_LIT>\")<EOL>for p in remove_props:<EOL><INDENT>prop_prop = ElementTree.SubElement(prop_remove, \"<STR_LIT>\")<EOL>object_to_etree(prop_prop, p, namespace=namespace)<EOL><DEDENT><DEDENT>tree = ElementTree.ElementTree(root)<EOL>body = self._tree_to_binary_body(tree)<EOL>if headers is None:<EOL><INDENT>headers = {}<EOL><DEDENT>headers[\"<STR_LIT:Content-Type>\"] = '<STR_LIT>'<EOL>self._request(\"<STR_LIT>\", path, body=body, headers=headers)<EOL>", "docstring": "Patch properties on a DAV resource.\n\n        If namespace is not specified, the DAV namespace is used for all properties.", "id": "f8632:c1:m18"}
{"signature": "def set_lock(<EOL>self,<EOL>path,<EOL>owner,<EOL>lock_type=\"<STR_LIT>\",<EOL>lock_scope=\"<STR_LIT>\",<EOL>depth=None,<EOL>headers=None,<EOL>):", "body": "root = ElementTree.Element(\"<STR_LIT>\")<EOL>object_to_etree(<EOL>root,<EOL>{\"<STR_LIT>\": lock_type, \"<STR_LIT>\": lock_scope, \"<STR_LIT>\": {\"<STR_LIT>\": owner}},<EOL>namespace=\"<STR_LIT>\",<EOL>)<EOL>tree = ElementTree.ElementTree(root)<EOL>if headers is None:<EOL><INDENT>headers = {}<EOL><DEDENT>if depth is not None:<EOL><INDENT>headers[\"<STR_LIT>\"] = depth<EOL><DEDENT>headers[\"<STR_LIT:Content-Type>\"] = '<STR_LIT>'<EOL>headers[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>body = self._tree_to_binary_body(tree)<EOL>self._request(\"<STR_LIT>\", path, body=body, headers=headers)<EOL>locks = self.response.tree.findall(\"<STR_LIT>\")<EOL>lock_list = []<EOL>for lock in locks:<EOL><INDENT>lock_list.append(lock[<NUM_LIT:0>].text.strip().strip(\"<STR_LIT:\\n>\"))<EOL><DEDENT>return lock_list<EOL>", "docstring": "Set a lock on a dav resource", "id": "f8632:c1:m19"}
{"signature": "def mkcol(self, path, headers=None):", "body": "self._request(\"<STR_LIT>\", path=path, headers=headers)<EOL>", "docstring": "Make DAV collection", "id": "f8632:c1:m11"}
{"signature": "def copy(<EOL>self,<EOL>source,<EOL>destination,<EOL>body=None,<EOL>depth=\"<STR_LIT>\",<EOL>overwrite=True,<EOL>headers=None,<EOL>):", "body": "<EOL>assert body is None or is_bytes(body)<EOL>if headers is None:<EOL><INDENT>headers = {\"<STR_LIT>\": destination}<EOL><DEDENT>else:<EOL><INDENT>headers[\"<STR_LIT>\"] = self._url.geturl() + destination<EOL><DEDENT>if overwrite is False:<EOL><INDENT>headers[\"<STR_LIT>\"] = \"<STR_LIT:F>\"<EOL><DEDENT>headers[\"<STR_LIT>\"] = depth<EOL>self._request(\"<STR_LIT>\", source, body=body, headers=headers)<EOL>", "docstring": "Copy DAV resource", "id": "f8632:c1:m13"}
{"signature": "def head(self, path, headers=None):", "body": "self._request(\"<STR_LIT>\", path, headers=headers)<EOL>", "docstring": "Basic HEAD request", "id": "f8632:c1:m8"}
{"signature": "def move_collection(<EOL>self, source, destination, depth=\"<STR_LIT>\", overwrite=True, headers=None<EOL>):", "body": "body = (<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b\"<STR_LIT>\"<EOL>)<EOL>if headers is None:<EOL><INDENT>headers = {}<EOL><DEDENT>headers[\"<STR_LIT:Content-Type>\"] = '<STR_LIT>'<EOL>self.move(<EOL>source, destination, body, depth=depth, overwrite=overwrite, headers=headers<EOL>)<EOL>", "docstring": "Move DAV collection and copy all properties.\n\n        Note: support for the \"propertybehavior\" request body for COPY and MOVE\n              has been removed with RFC4918", "id": "f8632:c1:m16"}
{"signature": "def post(self, path, body=None, headers=None):", "body": "assert body is None or is_bytes(body)<EOL>self._request(\"<STR_LIT:POST>\", path, body=body, headers=headers)<EOL>", "docstring": "POST resource with body", "id": "f8632:c1:m10"}
{"signature": "def _isLockResultFault(self, lock, conflictList, status=None):", "body": "try:<EOL><INDENT>if lock is not None:<EOL><INDENT>return False<EOL><DEDENT>if len(conflictList) < <NUM_LIT:1>:<EOL><INDENT>return False<EOL><DEDENT>resultTuple = conflictList[<NUM_LIT:0>]<EOL>if (<EOL>len(resultTuple) != <NUM_LIT:2><EOL>or not self._isLockDict(resultTuple[<NUM_LIT:0>])<EOL>or not isinstance(resultTuple[<NUM_LIT:1>], DAVError)<EOL>):<EOL><INDENT>return False<EOL><DEDENT>elif status and status != DAVError.value:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL><DEDENT>except Exception:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Return True, if it is a valid result tuple containing a DAVError.", "id": "f8635:c0:m5"}
{"signature": "def _acquire(<EOL>self,<EOL>url,<EOL>lock_type,<EOL>lock_scope,<EOL>lock_depth,<EOL>lock_owner,<EOL>timeout,<EOL>principal,<EOL>token_list,<EOL>):", "body": "try:<EOL><INDENT>return self.lm.acquire(<EOL>url,<EOL>lock_type,<EOL>lock_scope,<EOL>lock_depth,<EOL>lock_owner,<EOL>timeout,<EOL>principal,<EOL>token_list,<EOL>)<EOL><DEDENT>except DAVError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Wrapper for lm.acquire, that returns None instead of raising DAVError.", "id": "f8635:c0:m2"}
{"signature": "def _VarintBytes(value):", "body": "pieces = []<EOL>_EncodeVarint(pieces.append, value)<EOL>return b\"<STR_LIT>\".join(pieces)<EOL>", "docstring": "Encode the given integer as a varint and return the bytes.  This is only\n    called at startup time so it doesn't need to be fast.", "id": "f8647:m14"}
{"signature": "def _ModifiedSizer(compute_value_size, modify_value):", "body": "def SpecificSizer(field_number, is_repeated, is_packed):<EOL><INDENT>tag_size = _TagSize(field_number)<EOL>if is_packed:<EOL><INDENT>local_VarintSize = _VarintSize<EOL>def PackedFieldSize(value):<EOL><INDENT>result = <NUM_LIT:0><EOL>for element in value:<EOL><INDENT>result += compute_value_size(modify_value(element))<EOL><DEDENT>return result + local_VarintSize(result) + tag_size<EOL><DEDENT>return PackedFieldSize<EOL><DEDENT>elif is_repeated:<EOL><INDENT>def RepeatedFieldSize(value):<EOL><INDENT>result = tag_size * len(value)<EOL>for element in value:<EOL><INDENT>result += compute_value_size(modify_value(element))<EOL><DEDENT>return result<EOL><DEDENT>return RepeatedFieldSize<EOL><DEDENT>else:<EOL><INDENT>def FieldSize(value):<EOL><INDENT>return tag_size + compute_value_size(modify_value(value))<EOL><DEDENT>return FieldSize<EOL><DEDENT><DEDENT>return SpecificSizer<EOL>", "docstring": "Like SimpleSizer, but modify_value is invoked on each value before it is\n    passed to compute_value_size.  modify_value is typically ZigZagEncode.", "id": "f8647:m4"}
{"signature": "def MapSizer(field_descriptor):", "body": "<EOL>message_type = field_descriptor.message_type<EOL>message_sizer = MessageSizer(field_descriptor.number, False, False)<EOL>def FieldSize(map_value):<EOL><INDENT>total = <NUM_LIT:0><EOL>for key in map_value:<EOL><INDENT>value = map_value[key]<EOL>entry_msg = message_type._concrete_class(key=key, value=value)<EOL>total += message_sizer(entry_msg)<EOL><DEDENT>return total<EOL><DEDENT>return FieldSize<EOL>", "docstring": "Returns a sizer for a map field.", "id": "f8647:m11"}
{"signature": "def TagBytes(field_number, wire_type):", "body": "return _VarintBytes(wire_format.PackTag(field_number, wire_type))<EOL>", "docstring": "Encode the given tag and return the bytes.  Only called at startup.", "id": "f8647:m15"}
{"signature": "def _VarintEncoder():", "body": "def EncodeVarint(write, value):<EOL><INDENT>bits = value & <NUM_LIT><EOL>value >>= <NUM_LIT:7><EOL>while value:<EOL><INDENT>write(six.int2byte(<NUM_LIT>|bits))<EOL>bits = value & <NUM_LIT><EOL>value >>= <NUM_LIT:7><EOL><DEDENT>return write(six.int2byte(bits))<EOL><DEDENT>return EncodeVarint<EOL>", "docstring": "Return an encoder for a basic varint value (does not include tag).", "id": "f8647:m12"}
{"signature": "def StringSizer(field_number, is_repeated, is_packed):", "body": "tag_size = _TagSize(field_number)<EOL>local_VarintSize = _VarintSize<EOL>local_len = len<EOL>assert not is_packed<EOL>if is_repeated:<EOL><INDENT>def RepeatedFieldSize(value):<EOL><INDENT>result = tag_size * len(value)<EOL>for element in value:<EOL><INDENT>l = local_len(element.encode('<STR_LIT:utf-8>'))<EOL>result += local_VarintSize(l) + l<EOL><DEDENT>return result<EOL><DEDENT>return RepeatedFieldSize<EOL><DEDENT>else:<EOL><INDENT>def FieldSize(value):<EOL><INDENT>l = local_len(value.encode('<STR_LIT:utf-8>'))<EOL>return tag_size + local_VarintSize(l) + l<EOL><DEDENT>return FieldSize<EOL><DEDENT>", "docstring": "Returns a sizer for a string field.", "id": "f8647:m6"}
{"signature": "def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):", "body": "def SpecificEncoder(field_number, is_repeated, is_packed):<EOL><INDENT>if is_packed:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)<EOL>local_EncodeVarint = _EncodeVarint<EOL>def EncodePackedField(write, value):<EOL><INDENT>write(tag_bytes)<EOL>size = <NUM_LIT:0><EOL>for element in value:<EOL><INDENT>size += compute_value_size(modify_value(element))<EOL><DEDENT>local_EncodeVarint(write, size)<EOL>for element in value:<EOL><INDENT>encode_value(write, modify_value(element))<EOL><DEDENT><DEDENT>return EncodePackedField<EOL><DEDENT>elif is_repeated:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_type)<EOL>def EncodeRepeatedField(write, value):<EOL><INDENT>for element in value:<EOL><INDENT>write(tag_bytes)<EOL>encode_value(write, modify_value(element))<EOL><DEDENT><DEDENT>return EncodeRepeatedField<EOL><DEDENT>else:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_type)<EOL>def EncodeField(write, value):<EOL><INDENT>write(tag_bytes)<EOL>return encode_value(write, modify_value(value))<EOL><DEDENT>return EncodeField<EOL><DEDENT><DEDENT>return SpecificEncoder<EOL>", "docstring": "Like SimpleEncoder but additionally invokes modify_value on every value\n    before passing it to encode_value.  Usually modify_value is ZigZagEncode.", "id": "f8647:m17"}
{"signature": "def GroupEncoder(field_number, is_repeated, is_packed):", "body": "start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)<EOL>end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)<EOL>assert not is_packed<EOL>if is_repeated:<EOL><INDENT>def EncodeRepeatedField(write, value):<EOL><INDENT>for element in value:<EOL><INDENT>write(start_tag)<EOL>element._InternalSerialize(write)<EOL>write(end_tag)<EOL><DEDENT><DEDENT>return EncodeRepeatedField<EOL><DEDENT>else:<EOL><INDENT>def EncodeField(write, value):<EOL><INDENT>write(start_tag)<EOL>value._InternalSerialize(write)<EOL>return write(end_tag)<EOL><DEDENT>return EncodeField<EOL><DEDENT>", "docstring": "Returns an encoder for a group field.", "id": "f8647:m23"}
{"signature": "def MessageSetItemEncoder(field_number):", "body": "start_bytes = b\"<STR_LIT>\".join([<EOL>TagBytes(<NUM_LIT:1>, wire_format.WIRETYPE_START_GROUP),<EOL>TagBytes(<NUM_LIT:2>, wire_format.WIRETYPE_VARINT),<EOL>_VarintBytes(field_number),<EOL>TagBytes(<NUM_LIT:3>, wire_format.WIRETYPE_LENGTH_DELIMITED)])<EOL>end_bytes = TagBytes(<NUM_LIT:1>, wire_format.WIRETYPE_END_GROUP)<EOL>local_EncodeVarint = _EncodeVarint<EOL>def EncodeField(write, value):<EOL><INDENT>write(start_bytes)<EOL>local_EncodeVarint(write, value.ByteSize())<EOL>value._InternalSerialize(write)<EOL>return write(end_bytes)<EOL><DEDENT>return EncodeField<EOL>", "docstring": "Encoder for extensions of MessageSet.\n\n    The message set message looks like this:\n      message MessageSet {\n        repeated group Item = 1 {\n          required int32 type_id = 2;\n          required string message = 3;\n        }\n      }", "id": "f8647:m25"}
{"signature": "def _SimpleEncoder(wire_type, encode_value, compute_value_size):", "body": "def SpecificEncoder(field_number, is_repeated, is_packed):<EOL><INDENT>if is_packed:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)<EOL>local_EncodeVarint = _EncodeVarint<EOL>def EncodePackedField(write, value):<EOL><INDENT>write(tag_bytes)<EOL>size = <NUM_LIT:0><EOL>for element in value:<EOL><INDENT>size += compute_value_size(element)<EOL><DEDENT>local_EncodeVarint(write, size)<EOL>for element in value:<EOL><INDENT>encode_value(write, element)<EOL><DEDENT><DEDENT>return EncodePackedField<EOL><DEDENT>elif is_repeated:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_type)<EOL>def EncodeRepeatedField(write, value):<EOL><INDENT>for element in value:<EOL><INDENT>write(tag_bytes)<EOL>encode_value(write, element)<EOL><DEDENT><DEDENT>return EncodeRepeatedField<EOL><DEDENT>else:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_type)<EOL>def EncodeField(write, value):<EOL><INDENT>write(tag_bytes)<EOL>return encode_value(write, value)<EOL><DEDENT>return EncodeField<EOL><DEDENT><DEDENT>return SpecificEncoder<EOL>", "docstring": "Return a constructor for an encoder for fields of a particular type.\n\n    Args:\n        wire_type:  The field's wire type, for encoding tags.\n        encode_value:  A function which encodes an individual value, e.g.\n          _EncodeVarint().\n        compute_value_size:  A function which computes the size of an individual\n          value, e.g. _VarintSize().", "id": "f8647:m16"}
{"signature": "def MapEncoder(field_descriptor):", "body": "<EOL>message_type = field_descriptor.message_type<EOL>encode_message = MessageEncoder(field_descriptor.number, False, False)<EOL>def EncodeField(write, value):<EOL><INDENT>for key in value:<EOL><INDENT>entry_msg = message_type._concrete_class(key=key, value=value[key])<EOL>encode_message(write, entry_msg)<EOL><DEDENT><DEDENT>return EncodeField<EOL>", "docstring": "Encoder for extensions of MessageSet.\n\n    Maps always have a wire format like this:\n      message MapEntry {\n        key_type key = 1;\n        value_type value = 2;\n      }\n      repeated MapEntry map = N;", "id": "f8647:m26"}
{"signature": "def _StructPackEncoder(wire_type, format):", "body": "value_size = struct.calcsize(format)<EOL>def SpecificEncoder(field_number, is_repeated, is_packed):<EOL><INDENT>local_struct_pack = struct.pack<EOL>if is_packed:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)<EOL>local_EncodeVarint = _EncodeVarint<EOL>def EncodePackedField(write, value):<EOL><INDENT>write(tag_bytes)<EOL>local_EncodeVarint(write, len(value) * value_size)<EOL>for element in value:<EOL><INDENT>write(local_struct_pack(format, element))<EOL><DEDENT><DEDENT>return EncodePackedField<EOL><DEDENT>elif is_repeated:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_type)<EOL>def EncodeRepeatedField(write, value):<EOL><INDENT>for element in value:<EOL><INDENT>write(tag_bytes)<EOL>write(local_struct_pack(format, element))<EOL><DEDENT><DEDENT>return EncodeRepeatedField<EOL><DEDENT>else:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_type)<EOL>def EncodeField(write, value):<EOL><INDENT>write(tag_bytes)<EOL>return write(local_struct_pack(format, value))<EOL><DEDENT>return EncodeField<EOL><DEDENT><DEDENT>return SpecificEncoder<EOL>", "docstring": "Return a constructor for an encoder for a fixed-width field.\n\n    Args:\n        wire_type:  The field's wire type, for encoding tags.\n        format:  The format string to pass to struct.pack().", "id": "f8647:m18"}
{"signature": "def MessageEncoder(field_number, is_repeated, is_packed):", "body": "tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)<EOL>local_EncodeVarint = _EncodeVarint<EOL>assert not is_packed<EOL>if is_repeated:<EOL><INDENT>def EncodeRepeatedField(write, value):<EOL><INDENT>for element in value:<EOL><INDENT>write(tag)<EOL>local_EncodeVarint(write, element.ByteSize())<EOL>element._InternalSerialize(write)<EOL><DEDENT><DEDENT>return EncodeRepeatedField<EOL><DEDENT>else:<EOL><INDENT>def EncodeField(write, value):<EOL><INDENT>write(tag)<EOL>local_EncodeVarint(write, value.ByteSize())<EOL>return value._InternalSerialize(write)<EOL><DEDENT>return EncodeField<EOL><DEDENT>", "docstring": "Returns an encoder for a message field.", "id": "f8647:m24"}
{"signature": "def _FloatingPointEncoder(wire_type, format):", "body": "value_size = struct.calcsize(format)<EOL>if value_size == <NUM_LIT:4>:<EOL><INDENT>def EncodeNonFiniteOrRaise(write, value):<EOL><INDENT>if value == _POS_INF:<EOL><INDENT>write(b'<STR_LIT>')<EOL><DEDENT>elif value == _NEG_INF:<EOL><INDENT>write(b'<STR_LIT>')<EOL><DEDENT>elif value != value:           <EOL><INDENT>write(b'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>elif value_size == <NUM_LIT:8>:<EOL><INDENT>def EncodeNonFiniteOrRaise(write, value):<EOL><INDENT>if value == _POS_INF:<EOL><INDENT>write(b'<STR_LIT>')<EOL><DEDENT>elif value == _NEG_INF:<EOL><INDENT>write(b'<STR_LIT>')<EOL><DEDENT>elif value != value:                         <EOL><INDENT>write(b'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % value_size)<EOL><DEDENT>def SpecificEncoder(field_number, is_repeated, is_packed):<EOL><INDENT>local_struct_pack = struct.pack<EOL>if is_packed:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)<EOL>local_EncodeVarint = _EncodeVarint<EOL>def EncodePackedField(write, value):<EOL><INDENT>write(tag_bytes)<EOL>local_EncodeVarint(write, len(value) * value_size)<EOL>for element in value:<EOL><INDENT>try:<EOL><INDENT>write(local_struct_pack(format, element))<EOL><DEDENT>except SystemError:<EOL><INDENT>EncodeNonFiniteOrRaise(write, element)<EOL><DEDENT><DEDENT><DEDENT>return EncodePackedField<EOL><DEDENT>elif is_repeated:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_type)<EOL>def EncodeRepeatedField(write, value):<EOL><INDENT>for element in value:<EOL><INDENT>write(tag_bytes)<EOL>try:<EOL><INDENT>write(local_struct_pack(format, element))<EOL><DEDENT>except SystemError:<EOL><INDENT>EncodeNonFiniteOrRaise(write, element)<EOL><DEDENT><DEDENT><DEDENT>return EncodeRepeatedField<EOL><DEDENT>else:<EOL><INDENT>tag_bytes = TagBytes(field_number, wire_type)<EOL>def EncodeField(write, value):<EOL><INDENT>write(tag_bytes)<EOL>try:<EOL><INDENT>write(local_struct_pack(format, value))<EOL><DEDENT>except SystemError:<EOL><INDENT>EncodeNonFiniteOrRaise(write, value)<EOL><DEDENT><DEDENT>return EncodeField<EOL><DEDENT><DEDENT>return SpecificEncoder<EOL>", "docstring": "Return a constructor for an encoder for float fields.\n\n    This is like StructPackEncoder, but catches errors that may be due to\n    passing non-finite floating-point values to struct.pack, and makes a\n    second attempt to encode those values.\n\n    Args:\n        wire_type:  The field's wire type, for encoding tags.\n        format:  The format string to pass to struct.pack().", "id": "f8647:m19"}
{"signature": "def _VarintSize(value):", "body": "if value <= <NUM_LIT>: return <NUM_LIT:1><EOL>if value <= <NUM_LIT>: return <NUM_LIT:2><EOL>if value <= <NUM_LIT>: return <NUM_LIT:3><EOL>if value <= <NUM_LIT>: return <NUM_LIT:4><EOL>if value <= <NUM_LIT>: return <NUM_LIT:5><EOL>if value <= <NUM_LIT>: return <NUM_LIT:6><EOL>if value <= <NUM_LIT>: return <NUM_LIT:7><EOL>if value <= <NUM_LIT>: return <NUM_LIT:8><EOL>if value <= <NUM_LIT>: return <NUM_LIT:9><EOL>return <NUM_LIT:10><EOL>", "docstring": "Compute the size of a varint value.", "id": "f8647:m0"}
{"signature": "def add_list(self):", "body": "return self.values.add().list_value<EOL>", "docstring": "Appends and returns a list value as the next value in the list.", "id": "f8649:c8:m7"}
{"signature": "def GetCurrentTime(self):", "body": "self.FromDatetime(datetime.utcnow())<EOL>", "docstring": "Get the current UTC into Timestamp.", "id": "f8649:c3:m2"}
{"signature": "def ToTimedelta(self):", "body": "return timedelta(<EOL>seconds=self.seconds, microseconds=_RoundTowardZero(<EOL>self.nanos, _NANOS_PER_MICROSECOND))<EOL>", "docstring": "Converts Duration to timedelta.", "id": "f8649:c4:m10"}
{"signature": "def AddLeafNodes(self, prefix, node):", "body": "if not node:<EOL><INDENT>self.AddPath(prefix)<EOL><DEDENT>for name in node:<EOL><INDENT>child_path = prefix + '<STR_LIT:.>' + name<EOL>self.AddLeafNodes(child_path, node[name])<EOL><DEDENT>", "docstring": "Adds leaf nodes begin with prefix to this tree.", "id": "f8649:c6:m5"}
{"signature": "def get_or_create_struct(self, key):", "body": "return self.fields[key].struct_value<EOL>", "docstring": "Returns a struct for this key, creating if it didn't exist already.", "id": "f8649:c7:m3"}
{"signature": "def MergeMessage(<EOL>self, source, destination,<EOL>replace_message_field=False, replace_repeated_field=False):", "body": "tree = _FieldMaskTree(self)<EOL>tree.MergeMessage(<EOL>source, destination, replace_message_field, replace_repeated_field)<EOL>", "docstring": "Merges fields specified in FieldMask from source to destination.\n\n        Args:\n          source: Source message.\n          destination: The destination message to be merged into.\n          replace_message_field: Replace message field if True. Merge message\n              field if False.\n          replace_repeated_field: Replace repeated field if True. Append\n              elements of repeated field if False.", "id": "f8649:c5:m7"}
{"signature": "def MergeMessage(<EOL>self, source, destination,<EOL>replace_message, replace_repeated):", "body": "_MergeMessage(<EOL>self._root, source, destination, replace_message, replace_repeated)<EOL>", "docstring": "Merge all fields specified by this tree from source to destination.", "id": "f8649:c6:m6"}
{"signature": "def FromMicroseconds(self, micros):", "body": "self.seconds = micros // _MICROS_PER_SECOND<EOL>self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND<EOL>", "docstring": "Converts microseconds since epoch to Timestamp.", "id": "f8649:c3:m8"}
{"signature": "def FromJsonString(self, value):", "body": "self.Clear()<EOL>for path in value.split('<STR_LIT:U+002C>'):<EOL><INDENT>self.paths.append(path)<EOL><DEDENT>", "docstring": "Converts string to FieldMask according to proto3 JSON spec.", "id": "f8649:c5:m1"}
{"signature": "def ToNanoseconds(self):", "body": "return self.seconds * _NANOS_PER_SECOND + self.nanos<EOL>", "docstring": "Converts Timestamp to nanoseconds since epoch.", "id": "f8649:c3:m3"}
{"signature": "def FromMicroseconds(self, micros):", "body": "self._NormalizeDuration(<EOL>micros // _MICROS_PER_SECOND,<EOL>(micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND)<EOL>", "docstring": "Converts microseconds to Duration.", "id": "f8649:c4:m7"}
{"signature": "def CanonicalFormFromMask(self, mask):", "body": "tree = _FieldMaskTree(mask)<EOL>tree.ToFieldMask(self)<EOL>", "docstring": "Converts a FieldMask to the canonical form.\n\n        Removes paths that are covered by another path. For example,\n        \"foo.bar\" is covered by \"foo\" and will be removed if \"foo\"\n        is also in the FieldMask. Then sorts all paths in alphabetical order.\n\n        Args:\n          mask: The original FieldMask to be converted.", "id": "f8649:c5:m4"}
{"signature": "def get_or_create_list(self, key):", "body": "return self.fields[key].list_value<EOL>", "docstring": "Returns a list for this key, creating if it didn't exist already.", "id": "f8649:c7:m2"}
{"signature": "def FromMilliseconds(self, millis):", "body": "self.seconds = millis // _MILLIS_PER_SECOND<EOL>self.nanos = (millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND<EOL>", "docstring": "Converts milliseconds since epoch to Timestamp.", "id": "f8649:c3:m9"}
{"signature": "def FromSeconds(self, seconds):", "body": "self.seconds = seconds<EOL>self.nanos = <NUM_LIT:0><EOL>", "docstring": "Converts seconds since epoch to Timestamp.", "id": "f8649:c3:m10"}
{"signature": "def ToNanoseconds(self):", "body": "return self.seconds * _NANOS_PER_SECOND + self.nanos<EOL>", "docstring": "Converts a Duration to nanoseconds.", "id": "f8649:c4:m2"}
{"signature": "def Unpack(self, msg):", "body": "descriptor = msg.DESCRIPTOR<EOL>if not self.Is(descriptor):<EOL><INDENT>return False<EOL><DEDENT>msg.ParseFromString(self.value)<EOL>return True<EOL>", "docstring": "Unpacks the current Any message into specified message.", "id": "f8649:c2:m1"}
{"signature": "def ToSeconds(self):", "body": "return self.seconds<EOL>", "docstring": "Converts a Duration to seconds.", "id": "f8649:c4:m5"}
{"signature": "def items(self):", "body": "return [(value_descriptor.name, value_descriptor.number)<EOL>for value_descriptor in self._enum_type.values]<EOL>", "docstring": "Return a list of the (name, value) pairs of the enum.\n\n        These are returned in the order they were defined in the .proto file.", "id": "f8652:c0:m5"}
{"signature": "def Value(self, name):", "body": "if name in self._enum_type.values_by_name:<EOL><INDENT>return self._enum_type.values_by_name[name].number<EOL><DEDENT>raise ValueError('<STR_LIT>' % (<EOL>self._enum_type.name, name))<EOL>", "docstring": "Returns the value coresponding to the given enum name.", "id": "f8652:c0:m2"}
{"signature": "def keys(self):", "body": "return [value_descriptor.name<EOL>for value_descriptor in self._enum_type.values]<EOL>", "docstring": "Return a list of the string names in the enum.\n\n        These are returned in the order they were defined in the .proto file.", "id": "f8652:c0:m3"}
{"signature": "def CheckValue(self, proposed_value):", "body": "if not isinstance(proposed_value, self._acceptable_types):<EOL><INDENT>message = ('<STR_LIT>' %<EOL>(proposed_value, type(proposed_value), self._acceptable_types))<EOL>raise TypeError(message)<EOL><DEDENT>return proposed_value<EOL>", "docstring": "Type check the provided value and return it.\n\n        The returned value might have been normalized to another type.", "id": "f8653:c0:m1"}
{"signature": "def _RaiseInvalidWireType(buffer, pos, end):", "body": "raise _DecodeError('<STR_LIT>')<EOL>", "docstring": "Skip function for unknown wire types.  Raises an exception.", "id": "f8654:m21"}
{"signature": "def ReadTag(buffer, pos):", "body": "start = pos<EOL>while six.indexbytes(buffer, pos) & <NUM_LIT>:<EOL><INDENT>pos += <NUM_LIT:1><EOL><DEDENT>pos += <NUM_LIT:1><EOL>return (buffer[start:pos], pos)<EOL>", "docstring": "Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.\n\n    We return the raw bytes of the tag rather than decoding them.  The raw\n    bytes can then be used to look up the proper decoder.  This effectively allows\n    us to trade some work that would be done in pure-python (decoding a varint)\n    for work that is done in C (searching for a byte string in a hash table).\n    In a low-level language it would be much cheaper to decode the varint and\n    use that, but not in Python.", "id": "f8654:m2"}
{"signature": "def _VarintDecoder(mask, result_type):", "body": "def DecodeVarint(buffer, pos):<EOL><INDENT>result = <NUM_LIT:0><EOL>shift = <NUM_LIT:0><EOL>while <NUM_LIT:1>:<EOL><INDENT>b = six.indexbytes(buffer, pos)<EOL>result |= ((b & <NUM_LIT>) << shift)<EOL>pos += <NUM_LIT:1><EOL>if not (b & <NUM_LIT>):<EOL><INDENT>result &= mask<EOL>result = result_type(result)<EOL>return (result, pos)<EOL><DEDENT>shift += <NUM_LIT:7><EOL>if shift >= <NUM_LIT:64>:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>return DecodeVarint<EOL>", "docstring": "Return an encoder for a basic varint value (does not include tag).\n\n    Decoded values will be bitwise-anded with the given mask before being\n    returned, e.g. to limit them to 32 bits.  The returned decoder does not\n    take the usual \"end\" parameter -- the caller is expected to do bounds checking\n    after the fact (often the caller can defer such checking until later).  The\n    decoder returns a (value, new_pos) pair.", "id": "f8654:m0"}
{"signature": "def _DoubleDecoder():", "body": "local_unpack = struct.unpack<EOL>def InnerDecode(buffer, pos):<EOL><INDENT>new_pos = pos + <NUM_LIT:8><EOL>double_bytes = buffer[pos:new_pos]<EOL>if ((double_bytes[<NUM_LIT:7>:<NUM_LIT:8>] in b'<STR_LIT>')<EOL>and (double_bytes[<NUM_LIT:6>:<NUM_LIT:7>] >= b'<STR_LIT>')<EOL>and (double_bytes[<NUM_LIT:0>:<NUM_LIT:7>] != b'<STR_LIT>')):<EOL><INDENT>return (_NAN, new_pos)<EOL><DEDENT>result = local_unpack('<STR_LIT>', double_bytes)[<NUM_LIT:0>]<EOL>return (result, new_pos)<EOL><DEDENT>return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)<EOL>", "docstring": "Returns a decoder for a double field.\n\n    This code works around a bug in struct.unpack for not-a-number.", "id": "f8654:m7"}
{"signature": "def _SignedVarintDecoder(mask, result_type):", "body": "def DecodeVarint(buffer, pos):<EOL><INDENT>result = <NUM_LIT:0><EOL>shift = <NUM_LIT:0><EOL>while <NUM_LIT:1>:<EOL><INDENT>b = six.indexbytes(buffer, pos)<EOL>result |= ((b & <NUM_LIT>) << shift)<EOL>pos += <NUM_LIT:1><EOL>if not (b & <NUM_LIT>):<EOL><INDENT>if result > <NUM_LIT>:<EOL><INDENT>result -= (<NUM_LIT:1> << <NUM_LIT:64>)<EOL>result |= ~mask<EOL><DEDENT>else:<EOL><INDENT>result &= mask<EOL><DEDENT>result = result_type(result)<EOL>return (result, pos)<EOL><DEDENT>shift += <NUM_LIT:7><EOL>if shift >= <NUM_LIT:64>:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>return DecodeVarint<EOL>", "docstring": "Like _VarintDecoder() but decodes signed values.", "id": "f8654:m1"}
{"signature": "def _EndGroup(buffer, pos, end):", "body": "return -<NUM_LIT:1><EOL>", "docstring": "Skipping an END_GROUP tag returns -1 to tell the parent loop to break.", "id": "f8654:m19"}
{"signature": "def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):", "body": "local_DecodeVarint = _DecodeVarint<EOL>assert not is_packed<EOL>if is_repeated:<EOL><INDENT>tag_bytes = encoder.TagBytes(field_number,<EOL>wire_format.WIRETYPE_LENGTH_DELIMITED)<EOL>tag_len = len(tag_bytes)<EOL>def DecodeRepeatedField(buffer, pos, end, message, field_dict):<EOL><INDENT>value = field_dict.get(key)<EOL>if value is None:<EOL><INDENT>value = field_dict.setdefault(key, new_default(message))<EOL><DEDENT>while <NUM_LIT:1>:<EOL><INDENT>(size, pos) = local_DecodeVarint(buffer, pos)<EOL>new_pos = pos + size<EOL>if new_pos > end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>value.append(buffer[pos:new_pos])<EOL>pos = new_pos + tag_len<EOL>if buffer[new_pos:pos] != tag_bytes or new_pos == end:<EOL><INDENT>return new_pos<EOL><DEDENT><DEDENT><DEDENT>return DecodeRepeatedField<EOL><DEDENT>else:<EOL><INDENT>def DecodeField(buffer, pos, end, message, field_dict):<EOL><INDENT>(size, pos) = local_DecodeVarint(buffer, pos)<EOL>new_pos = pos + size<EOL>if new_pos > end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>field_dict[key] = buffer[pos:new_pos]<EOL>return new_pos<EOL><DEDENT>return DecodeField<EOL><DEDENT>", "docstring": "Returns a decoder for a bytes field.", "id": "f8654:m10"}
{"signature": "def MapDecoder(field_descriptor, new_default, is_message_map):", "body": "key = field_descriptor<EOL>tag_bytes = encoder.TagBytes(field_descriptor.number,<EOL>wire_format.WIRETYPE_LENGTH_DELIMITED)<EOL>tag_len = len(tag_bytes)<EOL>local_DecodeVarint = _DecodeVarint<EOL>message_type = field_descriptor.message_type<EOL>def DecodeMap(buffer, pos, end, message, field_dict):<EOL><INDENT>submsg = message_type._concrete_class()<EOL>value = field_dict.get(key)<EOL>if value is None:<EOL><INDENT>value = field_dict.setdefault(key, new_default(message))<EOL><DEDENT>while <NUM_LIT:1>:<EOL><INDENT>(size, pos) = local_DecodeVarint(buffer, pos)<EOL>new_pos = pos + size<EOL>if new_pos > end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>submsg.Clear()<EOL>if submsg._InternalParse(buffer, pos, new_pos) != new_pos:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>if is_message_map:<EOL><INDENT>value[submsg.key].MergeFrom(submsg.value)<EOL><DEDENT>else:<EOL><INDENT>value[submsg.key] = submsg.value<EOL><DEDENT>pos = new_pos + tag_len<EOL>if buffer[new_pos:pos] != tag_bytes or new_pos == end:<EOL><INDENT>return new_pos<EOL><DEDENT><DEDENT><DEDENT>return DecodeMap<EOL>", "docstring": "Returns a decoder for a map field.", "id": "f8654:m14"}
{"signature": "def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):", "body": "end_tag_bytes = encoder.TagBytes(field_number,<EOL>wire_format.WIRETYPE_END_GROUP)<EOL>end_tag_len = len(end_tag_bytes)<EOL>assert not is_packed<EOL>if is_repeated:<EOL><INDENT>tag_bytes = encoder.TagBytes(field_number,<EOL>wire_format.WIRETYPE_START_GROUP)<EOL>tag_len = len(tag_bytes)<EOL>def DecodeRepeatedField(buffer, pos, end, message, field_dict):<EOL><INDENT>value = field_dict.get(key)<EOL>if value is None:<EOL><INDENT>value = field_dict.setdefault(key, new_default(message))<EOL><DEDENT>while <NUM_LIT:1>:<EOL><INDENT>value = field_dict.get(key)<EOL>if value is None:<EOL><INDENT>value = field_dict.setdefault(key, new_default(message))<EOL><DEDENT>pos = value.add()._InternalParse(buffer, pos, end)<EOL>new_pos = pos+end_tag_len<EOL>if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>pos = new_pos + tag_len<EOL>if buffer[new_pos:pos] != tag_bytes or new_pos == end:<EOL><INDENT>return new_pos<EOL><DEDENT><DEDENT><DEDENT>return DecodeRepeatedField<EOL><DEDENT>else:<EOL><INDENT>def DecodeField(buffer, pos, end, message, field_dict):<EOL><INDENT>value = field_dict.get(key)<EOL>if value is None:<EOL><INDENT>value = field_dict.setdefault(key, new_default(message))<EOL><DEDENT>pos = value._InternalParse(buffer, pos, end)<EOL>new_pos = pos+end_tag_len<EOL>if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>return new_pos<EOL><DEDENT>return DecodeField<EOL><DEDENT>", "docstring": "Returns a decoder for a group field.", "id": "f8654:m11"}
{"signature": "def _SkipVarint(buffer, pos, end):", "body": "<EOL>while ord(buffer[pos:pos+<NUM_LIT:1>]) & <NUM_LIT>:<EOL><INDENT>pos += <NUM_LIT:1><EOL><DEDENT>pos += <NUM_LIT:1><EOL>if pos > end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>return pos<EOL>", "docstring": "Skip a varint value.  Returns the new position.", "id": "f8654:m15"}
{"signature": "def _SimpleDecoder(wire_type, decode_value):", "body": "def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):<EOL><INDENT>if is_packed:<EOL><INDENT>local_DecodeVarint = _DecodeVarint<EOL>def DecodePackedField(buffer, pos, end, message, field_dict):<EOL><INDENT>value = field_dict.get(key)<EOL>if value is None:<EOL><INDENT>value = field_dict.setdefault(key, new_default(message))<EOL><DEDENT>(endpoint, pos) = local_DecodeVarint(buffer, pos)<EOL>endpoint += pos<EOL>if endpoint > end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>while pos < endpoint:<EOL><INDENT>(element, pos) = decode_value(buffer, pos)<EOL>value.append(element)<EOL><DEDENT>if pos > endpoint:<EOL><INDENT>del value[-<NUM_LIT:1>]   <EOL>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>return pos<EOL><DEDENT>return DecodePackedField<EOL><DEDENT>elif is_repeated:<EOL><INDENT>tag_bytes = encoder.TagBytes(field_number, wire_type)<EOL>tag_len = len(tag_bytes)<EOL>def DecodeRepeatedField(buffer, pos, end, message, field_dict):<EOL><INDENT>value = field_dict.get(key)<EOL>if value is None:<EOL><INDENT>value = field_dict.setdefault(key, new_default(message))<EOL><DEDENT>while <NUM_LIT:1>:<EOL><INDENT>(element, new_pos) = decode_value(buffer, pos)<EOL>value.append(element)<EOL>pos = new_pos + tag_len<EOL>if buffer[new_pos:pos] != tag_bytes or new_pos >= end:<EOL><INDENT>if new_pos > end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>return new_pos<EOL><DEDENT><DEDENT><DEDENT>return DecodeRepeatedField<EOL><DEDENT>else:<EOL><INDENT>def DecodeField(buffer, pos, end, message, field_dict):<EOL><INDENT>(field_dict[key], pos) = decode_value(buffer, pos)<EOL>if pos > end:<EOL><INDENT>del field_dict[key]  <EOL>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>return pos<EOL><DEDENT>return DecodeField<EOL><DEDENT><DEDENT>return SpecificDecoder<EOL>", "docstring": "Return a constructor for a decoder for fields of a particular type.\n\n    Args:\n        wire_type:  The field's wire type.\n        decode_value:  A function which decodes an individual value, e.g.\n          _DecodeVarint()", "id": "f8654:m3"}
{"signature": "def MessageSetItemDecoder(extensions_by_number):", "body": "type_id_tag_bytes = encoder.TagBytes(<NUM_LIT:2>, wire_format.WIRETYPE_VARINT)<EOL>message_tag_bytes = encoder.TagBytes(<NUM_LIT:3>, wire_format.WIRETYPE_LENGTH_DELIMITED)<EOL>item_end_tag_bytes = encoder.TagBytes(<NUM_LIT:1>, wire_format.WIRETYPE_END_GROUP)<EOL>local_ReadTag = ReadTag<EOL>local_DecodeVarint = _DecodeVarint<EOL>local_SkipField = SkipField<EOL>def DecodeItem(buffer, pos, end, message, field_dict):<EOL><INDENT>message_set_item_start = pos<EOL>type_id = -<NUM_LIT:1><EOL>message_start = -<NUM_LIT:1><EOL>message_end = -<NUM_LIT:1><EOL>while <NUM_LIT:1>:<EOL><INDENT>(tag_bytes, pos) = local_ReadTag(buffer, pos)<EOL>if tag_bytes == type_id_tag_bytes:<EOL><INDENT>(type_id, pos) = local_DecodeVarint(buffer, pos)<EOL><DEDENT>elif tag_bytes == message_tag_bytes:<EOL><INDENT>(size, message_start) = local_DecodeVarint(buffer, pos)<EOL>pos = message_end = message_start + size<EOL><DEDENT>elif tag_bytes == item_end_tag_bytes:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>pos = SkipField(buffer, pos, end, tag_bytes)<EOL>if pos == -<NUM_LIT:1>:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>if pos > end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>if type_id == -<NUM_LIT:1>:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>if message_start == -<NUM_LIT:1>:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT>extension = extensions_by_number.get(type_id)<EOL>if extension is not None:<EOL><INDENT>value = field_dict.get(extension)<EOL>if value is None:<EOL><INDENT>value = field_dict.setdefault(<EOL>extension, extension.message_type._concrete_class())<EOL><DEDENT>if value._InternalParse(buffer, message_start,message_end) != message_end:<EOL><INDENT>raise _DecodeError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if not message._unknown_fields:<EOL><INDENT>message._unknown_fields = []<EOL><DEDENT>message._unknown_fields.append((MESSAGE_SET_ITEM_TAG,<EOL>buffer[message_set_item_start:pos]))<EOL><DEDENT>return pos<EOL><DEDENT>return DecodeItem<EOL>", "docstring": "Returns a decoder for a MessageSet item.\n\n    The parameter is the _extensions_by_number map for the message class.\n\n    The message set message looks like this:\n      message MessageSet {\n        repeated group Item = 1 {\n          required int32 type_id = 2;\n          required string message = 3;\n        }\n      }", "id": "f8654:m13"}
{"signature": "def __init__(self, parent_message, field):", "body": "super(_OneofListener, self).__init__(parent_message)<EOL>self._field = field<EOL>", "docstring": "Args:\n          parent_message: The message whose _Modified() method we should call when\n            we receive Modified() messages.\n          field: The descriptor of the field being set in the parent message.", "id": "f8655:c2:m0"}
{"signature": "def _AddSerializePartialToStringMethod(message_descriptor, cls):", "body": "def SerializePartialToString(self):<EOL><INDENT>out = BytesIO()<EOL>self._InternalSerialize(out.write)<EOL>return out.getvalue()<EOL><DEDENT>cls.SerializePartialToString = SerializePartialToString<EOL>def InternalSerialize(self, write_bytes):<EOL><INDENT>for field_descriptor, field_value in self.ListFields():<EOL><INDENT>field_descriptor._encoder(write_bytes, field_value)<EOL><DEDENT>for tag_bytes, value_bytes in self._unknown_fields:<EOL><INDENT>write_bytes(tag_bytes)<EOL>write_bytes(value_bytes)<EOL><DEDENT><DEDENT>cls._InternalSerialize = InternalSerialize<EOL>", "docstring": "Helper for _AddMessageMethods().", "id": "f8655:m35"}
{"signature": "def _AddListFieldsMethod(message_descriptor, cls):", "body": "def ListFields(self):<EOL><INDENT>all_fields = [item for item in self._fields.items() if _IsPresent(item)]<EOL>all_fields.sort(key = lambda item: item[<NUM_LIT:0>].number)<EOL>return all_fields<EOL><DEDENT>cls.ListFields = ListFields<EOL>", "docstring": "Helper for _AddMessageMethods().", "id": "f8655:m22"}
{"signature": "def __init__(self, parent_message):", "body": "<EOL>if isinstance(parent_message, weakref.ProxyType):<EOL><INDENT>self._parent_message_weakref = parent_message<EOL><DEDENT>else:<EOL><INDENT>self._parent_message_weakref = weakref.proxy(parent_message)<EOL><DEDENT>self.dirty = False<EOL>", "docstring": "Args:\n          parent_message: The message whose _Modified() method we should call when\n            we receive Modified() messages.", "id": "f8655:c1:m0"}
{"signature": "def _AddClearExtensionMethod(cls):", "body": "def ClearExtension(self, extension_handle):<EOL><INDENT>_VerifyExtensionHandle(self, extension_handle)<EOL>if extension_handle in self._fields:<EOL><INDENT>del self._fields[extension_handle]<EOL><DEDENT>self._Modified()<EOL><DEDENT>cls.ClearExtension = ClearExtension<EOL>", "docstring": "Helper for _AddMessageMethods().", "id": "f8655:m25"}
{"signature": "def _AddHasFieldMethod(message_descriptor, cls):", "body": "is_proto3 = (message_descriptor.syntax == \"<STR_LIT>\")<EOL>error_msg = _Proto3HasError if is_proto3 else _Proto2HasError<EOL>hassable_fields = {}<EOL>for field in message_descriptor.fields:<EOL><INDENT>if field.label == _FieldDescriptor.LABEL_REPEATED:<EOL><INDENT>continue<EOL><DEDENT>if (is_proto3 and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE and<EOL>not field.containing_oneof):<EOL><INDENT>continue<EOL><DEDENT>hassable_fields[field.name] = field<EOL><DEDENT>if not is_proto3:<EOL><INDENT>for oneof in message_descriptor.oneofs:<EOL><INDENT>hassable_fields[oneof.name] = oneof<EOL><DEDENT><DEDENT>def HasField(self, field_name):<EOL><INDENT>try:<EOL><INDENT>field = hassable_fields[field_name]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ValueError(error_msg % field_name)<EOL><DEDENT>if isinstance(field, descriptor_mod.OneofDescriptor):<EOL><INDENT>try:<EOL><INDENT>return HasField(self, self._oneofs[field].name)<EOL><DEDENT>except KeyError:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:<EOL><INDENT>value = self._fields.get(field)<EOL>return value is not None and value._is_present_in_parent<EOL><DEDENT>else:<EOL><INDENT>return field in self._fields<EOL><DEDENT><DEDENT><DEDENT>cls.HasField = HasField<EOL>", "docstring": "Helper for _AddMessageMethods().", "id": "f8655:m23"}
{"signature": "def __setitem__(self, extension_handle, value):", "body": "_VerifyExtensionHandle(self._extended_message, extension_handle)<EOL>if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or<EOL>extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % extension_handle.full_name)<EOL><DEDENT>type_checker = type_checkers.GetTypeChecker(extension_handle)<EOL>self._extended_message._fields[extension_handle] = (<EOL>type_checker.CheckValue(value))<EOL>self._extended_message._Modified()<EOL>", "docstring": "If extension_handle specifies a non-repeated, scalar extension\n        field, sets the value of that field.", "id": "f8655:c3:m5"}
{"signature": "def _IsPresent(item):", "body": "if item[<NUM_LIT:0>].label == _FieldDescriptor.LABEL_REPEATED:<EOL><INDENT>return bool(item[<NUM_LIT:1>])<EOL><DEDENT>elif item[<NUM_LIT:0>].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:<EOL><INDENT>return item[<NUM_LIT:1>]._is_present_in_parent<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Given a (FieldDescriptor, value) tuple from _fields, return true if the\n    value should be included in the list returned by ListFields().", "id": "f8655:m21"}
{"signature": "def _AddStrMethod(message_descriptor, cls):", "body": "def __str__(self):<EOL><INDENT>return text_format.MessageToString(self)<EOL><DEDENT>cls.__str__ = __str__<EOL>", "docstring": "Helper for _AddMessageMethods().", "id": "f8655:m29"}
{"signature": "def _AddSlots(message_descriptor, dictionary):", "body": "dictionary['<STR_LIT>'] = ['<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']<EOL>", "docstring": "Adds a __slots__ entry to dictionary, containing the names of all valid\n    attributes for this message type.\n\n    Args:\n      message_descriptor: A Descriptor instance describing this message type.\n      dictionary: Class dictionary to which we'll add a '__slots__' entry.", "id": "f8655:m2"}
{"signature": "def _AddUnicodeMethod(unused_message_descriptor, cls):", "body": "def __unicode__(self):<EOL><INDENT>return text_format.MessageToString(self, as_utf8=True).decode('<STR_LIT:utf-8>')<EOL><DEDENT>cls.__unicode__ = __unicode__<EOL>", "docstring": "Helper for _AddMessageMethods().", "id": "f8655:m31"}
{"signature": "def __init__(self, extended_message):", "body": "self._extended_message = extended_message<EOL>", "docstring": "extended_message: Message instance for which we are the Extensions dict.", "id": "f8655:c3:m0"}
{"signature": "def _AddPrivateHelperMethods(message_descriptor, cls):", "body": "def Modified(self):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if not self._cached_byte_size_dirty:<EOL><INDENT>self._cached_byte_size_dirty = True<EOL>self._listener_for_children.dirty = True<EOL>self._is_present_in_parent = True<EOL>self._listener.Modified()<EOL><DEDENT><DEDENT>def _UpdateOneofState(self, field):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>other_field = self._oneofs.setdefault(field.containing_oneof, field)<EOL>if other_field is not field:<EOL><INDENT>del self._fields[other_field]<EOL>self._oneofs[field.containing_oneof] = field<EOL><DEDENT><DEDENT>cls._Modified = Modified<EOL>cls.SetInParent = Modified<EOL>cls._UpdateOneofState = _UpdateOneofState<EOL>", "docstring": "Adds implementation of private helper methods to cls.", "id": "f8655:m44"}
{"signature": "def _FindExtensionByNumber(self, number):", "body": "return self._extended_message._extensions_by_number.get(number, None)<EOL>", "docstring": "Tries to find a known extension with the field number.\n\n        Args:\n          number: Extension field number.\n\n        Returns:\n          Extension field descriptor.", "id": "f8655:c3:m7"}
{"signature": "def _ReraiseTypeErrorWithFieldName(message_name, field_name):", "body": "exc = sys.exc_info()[<NUM_LIT:1>]<EOL>if len(exc.args) == <NUM_LIT:1> and type(exc) is TypeError:<EOL><INDENT>exc = TypeError('<STR_LIT>' % (str(exc), message_name, field_name))<EOL><DEDENT>six.reraise(type(exc), exc, sys.exc_info()[<NUM_LIT:2>])<EOL>", "docstring": "Re-raise the currently-handled TypeError with the field name added.", "id": "f8655:m11"}
{"signature": "def _AddEnumValues(descriptor, cls):", "body": "for enum_type in descriptor.enum_types:<EOL><INDENT>setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))<EOL>for enum_value in enum_type.values:<EOL><INDENT>setattr(cls, enum_value.name, enum_value.number)<EOL><DEDENT><DEDENT>", "docstring": "Sets class-level attributes for all enum fields defined in this message.\n\n    Also exporting a class-level object that can name enum values.\n\n    Args:\n      descriptor: Descriptor object for this message type.\n      cls: Class we're constructing for this message type.", "id": "f8655:m8"}
{"signature": "def _InternalUnpackAny(msg):", "body": "type_url = msg.type_url<EOL>db = symbol_database.Default()<EOL>if not type_url:<EOL><INDENT>return None<EOL><DEDENT>type_name = type_url.split(\"<STR_LIT:/>\")[-<NUM_LIT:1>]<EOL>descriptor = db.pool.FindMessageTypeByName(type_name)<EOL>if descriptor is None:<EOL><INDENT>return None<EOL><DEDENT>message_class = db.GetPrototype(descriptor)<EOL>message = message_class()<EOL>message.ParseFromString(msg.value)<EOL>return message<EOL>", "docstring": "Unpacks Any message and returns the unpacked message.\n\n    This internal method is differnt from public Any Unpack method which takes\n    the target message as argument. _InternalUnpackAny method does not have\n    target message type and need to find the message type in descriptor pool.\n\n    Args:\n      msg: An Any message to be unpacked.\n\n    Returns:\n      The unpacked message.", "id": "f8655:m27"}
{"signature": "def __new__(cls, name, bases, dictionary):", "body": "descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]<EOL>if descriptor.full_name in well_known_types.WKTBASES:<EOL><INDENT>bases += (well_known_types.WKTBASES[descriptor.full_name],)<EOL><DEDENT>_AddClassAttributesForNestedExtensions(descriptor, dictionary)<EOL>_AddSlots(descriptor, dictionary)<EOL>superclass = super(GeneratedProtocolMessageType, cls)<EOL>new_class = superclass.__new__(cls, name, bases, dictionary)<EOL>return new_class<EOL>", "docstring": "Custom allocation for runtime-generated class types.\n\n        We override __new__ because this is apparently the only place\n        where we can meaningfully set __slots__ on the class we're creating(?).\n        (The interplay between metaclasses and slots is not very well-documented).\n\n        Args:\n          name: Name of the class (ignored, but required by the\n            metaclass protocol).\n          bases: Base classes of the class we're constructing.\n            (Should be message.Message).  We ignore this field, but\n            it's required by the metaclass protocol\n          dictionary: The class dictionary of the class we're\n            constructing.  dictionary[_DESCRIPTOR_KEY] must contain\n            a Descriptor object describing this protocol message\n            type.\n\n        Returns:\n          Newly-allocated class.", "id": "f8655:c0:m0"}
{"signature": "def __getitem__(self, extension_handle):", "body": "_VerifyExtensionHandle(self._extended_message, extension_handle)<EOL>result = self._extended_message._fields.get(extension_handle)<EOL>if result is not None:<EOL><INDENT>return result<EOL><DEDENT>if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:<EOL><INDENT>result = extension_handle._default_constructor(self._extended_message)<EOL><DEDENT>elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:<EOL><INDENT>result = extension_handle.message_type._concrete_class()<EOL>try:<EOL><INDENT>result._SetListener(self._extended_message._listener_for_children)<EOL><DEDENT>except ReferenceError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return extension_handle.default_value<EOL><DEDENT>result = self._extended_message._fields.setdefault(<EOL>extension_handle, result)<EOL>return result<EOL>", "docstring": "Returns the current value of the given extension handle.", "id": "f8655:c3:m1"}
{"signature": "def _AddPropertiesForNonRepeatedCompositeField(field, cls):", "body": "<EOL>proto_field_name = field.name<EOL>property_name = _PropertyName(proto_field_name)<EOL>def getter(self):<EOL><INDENT>field_value = self._fields.get(field)<EOL>if field_value is None:<EOL><INDENT>field_value = field._default_constructor(self)<EOL>field_value = self._fields.setdefault(field, field_value)<EOL><DEDENT>return field_value<EOL><DEDENT>getter.__module__ = None<EOL>getter.__doc__ = '<STR_LIT>' % proto_field_name<EOL>def setter(self, new_value):<EOL><INDENT>raise AttributeError('<STR_LIT>'<EOL>'<STR_LIT>' % proto_field_name)<EOL><DEDENT>doc = '<STR_LIT>' % proto_field_name<EOL>setattr(cls, property_name, property(getter, setter, doc=doc))<EOL>", "docstring": "Adds a public property for a nonrepeated, composite protocol message field.\n    A composite field is a \"group\" or \"message\" field.\n\n    Clients can use this property to get the value of the field, but cannot\n    assign to the property directly.\n\n    Args:\n      field: A FieldDescriptor for this field.\n      cls: The class we're constructing.", "id": "f8655:m18"}
{"signature": "def __init__(self, message_listener, message_descriptor):", "body": "super(RepeatedCompositeFieldContainer, self).__init__(message_listener)<EOL>self._message_descriptor = message_descriptor<EOL>", "docstring": "Note that we pass in a descriptor instead of the generated directly,\nsince at the time we construct a _RepeatedCompositeFieldContainer we\nhaven't yet necessarily initialized the type that will be contained in the\ncontainer.\n\nArgs:\n  message_listener: A MessageListener implementation.\n    The RepeatedCompositeFieldContainer will call this object's\n    Modified() method when it is modified.\n  message_descriptor: A Descriptor instance describing the protocol type\n    that should be present in this container.  We'll use the\n    _concrete_class field of this descriptor when the client calls add().", "id": "f8656:c2:m0"}
{"signature": "def __init__(self, message_listener, message_descriptor, key_checker):", "body": "self._message_listener = message_listener<EOL>self._message_descriptor = message_descriptor<EOL>self._key_checker = key_checker<EOL>self._values = {}<EOL>", "docstring": "Args:\n  message_listener: A MessageListener implementation.\n    The ScalarMap will call this object's Modified() method when it\n    is modified.\n  key_checker: A type_checkers.ValueChecker instance to run on keys\n    inserted into this container.\n  value_checker: A type_checkers.ValueChecker instance to run on values\n    inserted into this container.", "id": "f8656:c4:m0"}
{"signature": "def __eq__(self, other):", "body": "if self is other:<EOL><INDENT>return True<EOL><DEDENT>if not isinstance(other, self.__class__):<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>return self._values == other._values<EOL>", "docstring": "Compares the current instance with another one.", "id": "f8656:c2:m9"}
{"signature": "def MergeFrom(self, other):", "body": "self.extend(other._values)<EOL>", "docstring": "Appends the contents of another repeated field of the same type to this\n        one, copying each individual message.", "id": "f8656:c2:m3"}
{"signature": "def __init__(self, message_listener, key_checker, value_checker):", "body": "self._message_listener = message_listener<EOL>self._key_checker = key_checker<EOL>self._value_checker = value_checker<EOL>self._values = {}<EOL>", "docstring": "Args:\n  message_listener: A MessageListener implementation.\n    The ScalarMap will call this object's Modified() method when it\n    is modified.\n  key_checker: A type_checkers.ValueChecker instance to run on keys\n    inserted into this container.\n  value_checker: A type_checkers.ValueChecker instance to run on values\n    inserted into this container.", "id": "f8656:c3:m0"}
{"signature": "def insert(self, key, value):", "body": "self._values.insert(key, self._type_checker.CheckValue(value))<EOL>if not self._message_listener.dirty:<EOL><INDENT>self._message_listener.Modified()<EOL><DEDENT>", "docstring": "Inserts the item at the specified position. Similar to list.insert().", "id": "f8656:c1:m2"}
{"signature": "def pop(self, key=-<NUM_LIT:1>):", "body": "value = self._values[key]<EOL>self.__delitem__(key)<EOL>return value<EOL>", "docstring": "Removes and returns an item at a given index. Similar to list.pop().", "id": "f8656:c2:m5"}
{"signature": "def __eq__(self, other):", "body": "if self is other:<EOL><INDENT>return True<EOL><DEDENT>if isinstance(other, self.__class__):<EOL><INDENT>return other._values == self._values<EOL><DEDENT>return other == self._values<EOL>", "docstring": "Compares the current instance with another one.", "id": "f8656:c1:m12"}
{"signature": "def remove(self, elem):", "body": "self._values.remove(elem)<EOL>self._message_listener.Modified()<EOL>", "docstring": "Removes an item from the list. Similar to list.remove().", "id": "f8656:c1:m5"}
{"signature": "def __ne__(self, other):", "body": "<EOL>return not self == other<EOL>", "docstring": "Checks if another instance isn't equal to this one.", "id": "f8656:c0:m3"}
{"signature": "def pop(self, key=-<NUM_LIT:1>):", "body": "value = self._values[key]<EOL>self.__delitem__(key)<EOL>return value<EOL>", "docstring": "Removes and returns an item at a given index. Similar to list.pop().", "id": "f8656:c1:m6"}
{"signature": "def MergeFrom(self, other):", "body": "self._values.extend(other._values)<EOL>self._message_listener.Modified()<EOL>", "docstring": "Appends the contents of another repeated field of the same type to this\n        one. We do not check the types of the individual fields.", "id": "f8656:c1:m4"}
{"signature": "def PrintField(self, field, value):", "body": "out = self.out<EOL>out.write('<STR_LIT:U+0020>' * self.indent)<EOL>if self.use_field_number:<EOL><INDENT>out.write(str(field.number))<EOL><DEDENT>else:<EOL><INDENT>if field.is_extension:<EOL><INDENT>out.write('<STR_LIT:[>')<EOL>if (field.containing_type.GetOptions().message_set_wire_format and<EOL>field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and<EOL>field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):<EOL><INDENT>out.write(field.message_type.full_name)<EOL><DEDENT>else:<EOL><INDENT>out.write(field.full_name)<EOL><DEDENT>out.write('<STR_LIT:]>')<EOL><DEDENT>elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:<EOL><INDENT>out.write(field.message_type.name)<EOL><DEDENT>else:<EOL><INDENT>out.write(field.name)<EOL><DEDENT><DEDENT>if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:<EOL><INDENT>out.write('<STR_LIT>')<EOL><DEDENT>self.PrintFieldValue(field, value)<EOL>if self.as_one_line:<EOL><INDENT>out.write('<STR_LIT:U+0020>')<EOL><DEDENT>else:<EOL><INDENT>out.write('<STR_LIT:\\n>')<EOL><DEDENT>", "docstring": "Print a single field name/value pair.", "id": "f8657:c3:m2"}
{"signature": "def ParseLines(self, lines, message):", "body": "self._allow_multiple_scalars = False<EOL>self._ParseOrMerge(lines, message)<EOL>return message<EOL>", "docstring": "Parses an text representation of a protocol message into a message.", "id": "f8657:c4:m2"}
{"signature": "def ParseFromString(self, text, message):", "body": "if not isinstance(text, str):<EOL><INDENT>text = text.decode('<STR_LIT:utf-8>')<EOL><DEDENT>return self.ParseLines(text.split('<STR_LIT:\\n>'), message)<EOL>", "docstring": "Parses an text representation of a protocol message into a message.", "id": "f8657:c4:m1"}
{"signature": "def Merge(text, message, allow_unknown_extension=False,<EOL>allow_field_number=False):", "body": "return MergeLines(text.split('<STR_LIT:\\n>'), message, allow_unknown_extension,<EOL>allow_field_number)<EOL>", "docstring": "Parses an text representation of a protocol message into a message.\n\n    Like Parse(), but allows repeated values for a non-repeated field, and uses\n    the last one.\n\n    Args:\n      text: Message text representation.\n      message: A protocol buffer message to merge into.\n      allow_unknown_extension: if True, skip over missing extensions and keep\n        parsing\n      allow_field_number: if True, both field number and field name are allowed.\n\n    Returns:\n      The same message passed as argument.\n\n    Raises:\n      ParseError: On text parsing problems.", "id": "f8657:m6"}
{"signature": "def _ConsumeSingleByteString(self):", "body": "text = self.token<EOL>if len(text) < <NUM_LIT:1> or text[<NUM_LIT:0>] not in _QUOTES:<EOL><INDENT>raise self._ParseError('<STR_LIT>' % (text,))<EOL><DEDENT>if len(text) < <NUM_LIT:2> or text[-<NUM_LIT:1>] != text[<NUM_LIT:0>]:<EOL><INDENT>raise self._ParseError('<STR_LIT>' % (text,))<EOL><DEDENT>try:<EOL><INDENT>result = text_encoding.CUnescape(text[<NUM_LIT:1>:-<NUM_LIT:1>])<EOL><DEDENT>except ValueError as e:<EOL><INDENT>raise self._ParseError(str(e))<EOL><DEDENT>self.NextToken()<EOL>return result<EOL>", "docstring": "Consume one token of a string literal.\n\n        String literals (whether bytes or text) can come in multiple adjacent\n        tokens which are automatically concatenated, like in C or Python.  This\n        method only consumes one token.\n\n        Returns:\n          The token parsed.\n        Raises:\n          ParseError: When the wrong format data is found.", "id": "f8657:c5:m21"}
{"signature": "def ParseLines(lines, message, allow_unknown_extension=False,<EOL>allow_field_number=False):", "body": "parser = _Parser(allow_unknown_extension, allow_field_number)<EOL>return parser.ParseLines(lines, message)<EOL>", "docstring": "Parses an text representation of a protocol message into a message.\n\n    Args:\n      lines: An iterable of lines of a message's text representation.\n      message: A protocol buffer message to merge into.\n      allow_unknown_extension: if True, skip over missing extensions and keep\n        parsing\n      allow_field_number: if True, both field number and field name are allowed.\n\n    Returns:\n      The same message passed as argument.\n\n    Raises:\n      ParseError: On text parsing problems.", "id": "f8657:m7"}
{"signature": "def Consume(self, token):", "body": "if not self.TryConsume(token):<EOL><INDENT>raise self._ParseError('<STR_LIT>' % token)<EOL><DEDENT>", "docstring": "Consumes a piece of text.\n\n        Args:\n          token: Text to consume.\n\n        Raises:\n          ParseError: If the text couldn't be consumed.", "id": "f8657:c5:m6"}
{"signature": "def ConsumeUint64(self):", "body": "try:<EOL><INDENT>result = ParseInteger(self.token, is_signed=False, is_long=True)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>raise self._ParseError(str(e))<EOL><DEDENT>self.NextToken()<EOL>return result<EOL>", "docstring": "Consumes an unsigned 64bit integer number.\n\n        Returns:\n          The integer parsed.\n\n        Raises:\n          ParseError: If an unsigned 64bit integer couldn't be consumed.", "id": "f8657:c5:m14"}
{"signature": "def __init__(self, out, indent=<NUM_LIT:0>, as_utf8=False, as_one_line=False,<EOL>pointy_brackets=False, use_index_order=False, float_format=None,<EOL>use_field_number=False):", "body": "self.out = out<EOL>self.indent = indent<EOL>self.as_utf8 = as_utf8<EOL>self.as_one_line = as_one_line<EOL>self.pointy_brackets = pointy_brackets<EOL>self.use_index_order = use_index_order<EOL>self.float_format = float_format<EOL>self.use_field_number = use_field_number<EOL>", "docstring": "Initialize the Printer.\n\n        Floating point values can be formatted compactly with 15 digits of\n        precision (which is the most that IEEE 754 \"double\" can guarantee)\n        using float_format='.15g'. To ensure that converting to text and back to a\n        proto will result in an identical value, float_format='.17g' should be used.\n\n        Args:\n          out: To record the text format result.\n          indent: The indent level for pretty print.\n          as_utf8: Produce text output in UTF8 format.\n          as_one_line: Don't introduce newlines between fields.\n          pointy_brackets: If True, use angle brackets instead of curly braces for\n            nesting.\n          use_index_order: If True, print fields of a proto message using the order\n            defined in source code instead of the field number. By default, use the\n            field number order.\n          float_format: If set, use this to specify floating point number formatting\n            (per the \"Format Specification Mini-Language\"); otherwise, str() is\n            used.\n          use_field_number: If True, print field numbers instead of names.", "id": "f8657:c3:m0"}
{"signature": "def _SkipFieldValue(tokenizer):", "body": "<EOL>if tokenizer.TryConsumeByteString():<EOL><INDENT>while tokenizer.TryConsumeByteString():<EOL><INDENT>pass<EOL><DEDENT>return<EOL><DEDENT>if (not tokenizer.TryConsumeIdentifier() and<EOL>not tokenizer.TryConsumeInt64() and<EOL>not tokenizer.TryConsumeUint64() and<EOL>not tokenizer.TryConsumeFloat()):<EOL><INDENT>raise ParseError('<STR_LIT>' + tokenizer.token)<EOL><DEDENT>", "docstring": "Skips over a field value.\n\n    Args:\n      tokenizer: A tokenizer to parse the field name and values.\n\n    Raises:\n      ParseError: In case an invalid field value is found.", "id": "f8657:m12"}
{"signature": "def ParseErrorPreviousToken(self, message):", "body": "return ParseError('<STR_LIT>' % (<EOL>self._previous_line + <NUM_LIT:1>, self._previous_column + <NUM_LIT:1>, message))<EOL>", "docstring": "Creates and *returns* a ParseError for the previously read token.\n\n        Args:\n          message: A message to set for the exception.\n\n        Returns:\n          A ParseError instance.", "id": "f8657:c5:m23"}
{"signature": "def _SkipField(tokenizer):", "body": "if tokenizer.TryConsume('<STR_LIT:[>'):<EOL><INDENT>tokenizer.ConsumeIdentifier()<EOL>while tokenizer.TryConsume('<STR_LIT:.>'):<EOL><INDENT>tokenizer.ConsumeIdentifier()<EOL><DEDENT>tokenizer.Consume('<STR_LIT:]>')<EOL><DEDENT>else:<EOL><INDENT>tokenizer.ConsumeIdentifier()<EOL><DEDENT>_SkipFieldContents(tokenizer)<EOL>if not tokenizer.TryConsume('<STR_LIT:U+002C>'):<EOL><INDENT>tokenizer.TryConsume('<STR_LIT:;>')<EOL><DEDENT>", "docstring": "Skips over a complete field (name and value/message).\n\n    Args:\n      tokenizer: A tokenizer to parse the field name and values.", "id": "f8657:m10"}
{"signature": "def _SkipFieldContents(tokenizer):", "body": "<EOL>if tokenizer.TryConsume('<STR_LIT::>') and not tokenizer.LookingAt(<EOL>'<STR_LIT:{>') and not tokenizer.LookingAt('<STR_LIT:<>'):<EOL><INDENT>_SkipFieldValue(tokenizer)<EOL><DEDENT>else:<EOL><INDENT>_SkipFieldMessage(tokenizer)<EOL><DEDENT>", "docstring": "Skips over contents (value or message) of a field.\n\n    Args:\n      tokenizer: A tokenizer to parse the field name and values.", "id": "f8657:m9"}
{"signature": "def ParseEnum(field, value):", "body": "enum_descriptor = field.enum_type<EOL>try:<EOL><INDENT>number = int(value, <NUM_LIT:0>)<EOL><DEDENT>except ValueError:<EOL><INDENT>enum_value = enum_descriptor.values_by_name.get(value, None)<EOL>if enum_value is None:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % (<EOL>enum_descriptor.full_name, value))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>enum_value = enum_descriptor.values_by_number.get(number, None)<EOL>if enum_value is None:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % (<EOL>enum_descriptor.full_name, number))<EOL><DEDENT><DEDENT>return enum_value.number<EOL>", "docstring": "Parse an enum value.\n\n    The value can be specified by a number (the enum value), or by\n    a string literal (the enum name).\n\n    Args:\n      field: Enum field descriptor.\n      value: String value.\n\n    Returns:\n      Enum value number.\n\n    Raises:\n      ValueError: If the enum value could not be parsed.", "id": "f8657:m16"}
{"signature": "def ConsumeInt64(self):", "body": "try:<EOL><INDENT>result = ParseInteger(self.token, is_signed=True, is_long=True)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>raise self._ParseError(str(e))<EOL><DEDENT>self.NextToken()<EOL>return result<EOL>", "docstring": "Consumes a signed 64bit integer number.\n\n        Returns:\n          The integer parsed.\n\n        Raises:\n          ParseError: If a signed 64bit integer couldn't be consumed.", "id": "f8657:c5:m12"}
{"signature": "def MergeLines(lines, message, allow_unknown_extension=False,<EOL>allow_field_number=False):", "body": "parser = _Parser(allow_unknown_extension, allow_field_number)<EOL>return parser.MergeLines(lines, message)<EOL>", "docstring": "Parses an text representation of a protocol message into a message.\n\n    Args:\n      lines: An iterable of lines of a message's text representation.\n      message: A protocol buffer message to merge into.\n      allow_unknown_extension: if True, skip over missing extensions and keep\n        parsing\n      allow_field_number: if True, both field number and field name are allowed.\n\n    Returns:\n      The same message passed as argument.\n\n    Raises:\n      ParseError: On text parsing problems.", "id": "f8657:m8"}
{"signature": "def MessageToString(message, as_utf8=False, as_one_line=False,<EOL>pointy_brackets=False, use_index_order=False,<EOL>float_format=None, use_field_number=False):", "body": "out = TextWriter(as_utf8)<EOL>printer = _Printer(out, <NUM_LIT:0>, as_utf8, as_one_line,<EOL>pointy_brackets, use_index_order, float_format,<EOL>use_field_number)<EOL>printer.PrintMessage(message)<EOL>result = out.getvalue()<EOL>out.close()<EOL>if as_one_line:<EOL><INDENT>return result.rstrip()<EOL><DEDENT>return result<EOL>", "docstring": "Convert protobuf message to text format.\n\n    Floating point values can be formatted compactly with 15 digits of\n    precision (which is the most that IEEE 754 \"double\" can guarantee)\n    using float_format='.15g'. To ensure that converting to text and back to a\n    proto will result in an identical value, float_format='.17g' should be used.\n\n    Args:\n      message: The protocol buffers message.\n      as_utf8: Produce text output in UTF8 format.\n      as_one_line: Don't introduce newlines between fields.\n      pointy_brackets: If True, use angle brackets instead of curly braces for\n        nesting.\n      use_index_order: If True, print fields of a proto message using the order\n        defined in source code instead of the field number. By default, use the\n        field number order.\n      float_format: If set, use this to specify floating point number formatting\n        (per the \"Format Specification Mini-Language\"); otherwise, str() is used.\n      use_field_number: If True, print field numbers instead of names.\n\n    Returns:\n      A string of the text formatted protocol buffer message.", "id": "f8657:m0"}
{"signature": "def PrintMessage(self, message):", "body": "fields = message.ListFields()<EOL>if self.use_index_order:<EOL><INDENT>fields.sort(key=lambda x: x[<NUM_LIT:0>].index)<EOL><DEDENT>for field, value in fields:<EOL><INDENT>if _IsMapEntry(field):<EOL><INDENT>for key in sorted(value):<EOL><INDENT>entry_submsg = field.message_type._concrete_class(<EOL>key=key, value=value[key])<EOL>self.PrintField(field, entry_submsg)<EOL><DEDENT><DEDENT>elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:<EOL><INDENT>for element in value:<EOL><INDENT>self.PrintField(field, element)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.PrintField(field, value)<EOL><DEDENT><DEDENT>", "docstring": "Convert protobuf message to text format.\n\n        Args:\n          message: The protocol buffers message.", "id": "f8657:c3:m1"}
{"signature": "def ConsumeUint32(self):", "body": "try:<EOL><INDENT>result = ParseInteger(self.token, is_signed=False, is_long=False)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>raise self._ParseError(str(e))<EOL><DEDENT>self.NextToken()<EOL>return result<EOL>", "docstring": "Consumes an unsigned 32bit integer number.\n\n        Returns:\n          The integer parsed.\n\n        Raises:\n          ParseError: If an unsigned 32bit integer couldn't be consumed.", "id": "f8657:c5:m10"}
{"signature": "def _ConvertValueMessage(value, message):", "body": "if isinstance(value, dict):<EOL><INDENT>_ConvertStructMessage(value, message.struct_value)<EOL><DEDENT>elif isinstance(value, list):<EOL><INDENT>_ConvertListValueMessage(value, message.list_value)<EOL><DEDENT>elif value is None:<EOL><INDENT>message.null_value = <NUM_LIT:0><EOL><DEDENT>elif isinstance(value, bool):<EOL><INDENT>message.bool_value = value<EOL><DEDENT>elif isinstance(value, six.string_types):<EOL><INDENT>message.string_value = value<EOL><DEDENT>elif isinstance(value, _INT_OR_FLOAT):<EOL><INDENT>message.number_value = value<EOL><DEDENT>else:<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Convert a JSON representation into Value message.", "id": "f8658:m19"}
{"signature": "def _GenericMessageToJsonObject(message, unused_including_default):", "body": "<EOL>return message.ToJsonString()<EOL>", "docstring": "Converts message by ToJsonString according to Proto3 JSON Specification.", "id": "f8658:m7"}
{"signature": "def _ConvertGenericMessage(value, message):", "body": "<EOL>message.FromJsonString(value)<EOL>", "docstring": "Convert a JSON representation into message with FromJsonString.", "id": "f8658:m18"}
{"signature": "def _ConvertInteger(value):", "body": "if isinstance(value, float):<EOL><INDENT>raise ParseError('<STR_LIT>'.format(value))<EOL><DEDENT>if isinstance(value, six.text_type) and value.find('<STR_LIT:U+0020>') != -<NUM_LIT:1>:<EOL><INDENT>raise ParseError('<STR_LIT>'.format(value))<EOL><DEDENT>return int(value)<EOL>", "docstring": "Convert an integer.\n\n    Args:\n      value: A scalar value to convert.\n\n    Returns:\n      The integer value.\n\n    Raises:\n      ParseError: If an integer couldn't be consumed.", "id": "f8658:m25"}
{"signature": "def _ConvertWrapperMessage(value, message):", "body": "field = message.DESCRIPTOR.fields_by_name['<STR_LIT:value>']<EOL>setattr(message, '<STR_LIT:value>', _ConvertScalarFieldValue(value, field))<EOL>", "docstring": "Convert a JSON representation into Wrapper message.", "id": "f8658:m22"}
{"signature": "def Parse(text, message):", "body": "if not isinstance(text, six.text_type): text = text.decode('<STR_LIT:utf-8>')<EOL>try:<EOL><INDENT>if sys.version_info < (<NUM_LIT:2>, <NUM_LIT:7>):<EOL><INDENT>js = json.loads(text)<EOL><DEDENT>else:<EOL><INDENT>js = json.loads(text, object_pairs_hook=_DuplicateChecker)<EOL><DEDENT><DEDENT>except ValueError as e:<EOL><INDENT>raise ParseError('<STR_LIT>'.format(str(e)))<EOL><DEDENT>_ConvertMessage(js, message)<EOL>return message<EOL>", "docstring": "Parses a JSON representation of a protocol message into a message.\n\n    Args:\n      text: Message JSON representation.\n      message: A protocol beffer message to merge into.\n\n    Returns:\n      The same message passed as argument.\n\n    Raises::\n      ParseError: On JSON parsing problems.", "id": "f8658:m14"}
{"signature": "def _RegularMessageToJsonObject(message, js, including_default_value_fields):", "body": "fields = message.ListFields()<EOL>include_default = including_default_value_fields<EOL>try:<EOL><INDENT>for field, value in fields:<EOL><INDENT>name = field.camelcase_name<EOL>if _IsMapEntry(field):<EOL><INDENT>v_field = field.message_type.fields_by_name['<STR_LIT:value>']<EOL>js_map = {}<EOL>for key in value:<EOL><INDENT>if isinstance(key, bool):<EOL><INDENT>if key:<EOL><INDENT>recorded_key = '<STR_LIT:true>'<EOL><DEDENT>else:<EOL><INDENT>recorded_key = '<STR_LIT:false>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>recorded_key = key<EOL><DEDENT>js_map[recorded_key] = _FieldToJsonObject(<EOL>v_field, value[key], including_default_value_fields)<EOL><DEDENT>js[name] = js_map<EOL><DEDENT>elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:<EOL><INDENT>js[name] = [_FieldToJsonObject(field, k, include_default)<EOL>for k in value]<EOL><DEDENT>else:<EOL><INDENT>js[name] = _FieldToJsonObject(field, value, include_default)<EOL><DEDENT><DEDENT>if including_default_value_fields:<EOL><INDENT>message_descriptor = message.DESCRIPTOR<EOL>for field in message_descriptor.fields:<EOL><INDENT>if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and<EOL>field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or<EOL>field.containing_oneof):<EOL><INDENT>continue<EOL><DEDENT>name = field.camelcase_name<EOL>if name in js:<EOL><INDENT>continue<EOL><DEDENT>if _IsMapEntry(field):<EOL><INDENT>js[name] = {}<EOL><DEDENT>elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:<EOL><INDENT>js[name] = []<EOL><DEDENT>else:<EOL><INDENT>js[name] = _FieldToJsonObject(field, field.default_value)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>except ValueError as e:<EOL><INDENT>raise SerializeToJsonError(<EOL>'<STR_LIT>'.format(field.name, e))<EOL><DEDENT>return js<EOL>", "docstring": "Converts normal message according to Proto3 JSON Specification.", "id": "f8658:m3"}
{"signature": "def _AnyMessageToJsonObject(message, including_default):", "body": "if not message.ListFields():<EOL><INDENT>return {}<EOL><DEDENT>js = {}<EOL>type_url = message.type_url<EOL>js['<STR_LIT>'] = type_url<EOL>sub_message = _CreateMessageFromTypeUrl(type_url)<EOL>sub_message.ParseFromString(message.value)<EOL>message_descriptor = sub_message.DESCRIPTOR<EOL>full_name = message_descriptor.full_name<EOL>if _IsWrapperMessage(message_descriptor):<EOL><INDENT>js['<STR_LIT:value>'] = _WrapperMessageToJsonObject(sub_message)<EOL>return js<EOL><DEDENT>if full_name in _WKTJSONMETHODS:<EOL><INDENT>js['<STR_LIT:value>'] = _WKTJSONMETHODS[full_name][<NUM_LIT:0>](sub_message, including_default)<EOL>return js<EOL><DEDENT>return _RegularMessageToJsonObject(sub_message, js, including_default)<EOL>", "docstring": "Converts Any message according to Proto3 JSON Specification.", "id": "f8658:m5"}
{"signature": "def _ConvertMessage(value, message):", "body": "message_descriptor = message.DESCRIPTOR<EOL>full_name = message_descriptor.full_name<EOL>if _IsWrapperMessage(message_descriptor):<EOL><INDENT>_ConvertWrapperMessage(value, message)<EOL><DEDENT>elif full_name in _WKTJSONMETHODS:<EOL><INDENT>_WKTJSONMETHODS[full_name][<NUM_LIT:1>](value, message)<EOL><DEDENT>else:<EOL><INDENT>_ConvertFieldValuePair(value, message)<EOL><DEDENT>", "docstring": "Convert a JSON object into a message.\n\n    Args:\n      value: A JSON object.\n      message: A WKT or regular protocol message to record the data.\n\n    Raises:\n      ParseError: In case of convert problems.", "id": "f8658:m16"}
{"signature": "def _ConvertFloat(value):", "body": "if value == '<STR_LIT>':<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>return float(value)<EOL><DEDENT>except ValueError:<EOL><INDENT>if value == _NEG_INFINITY:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>elif value == _INFINITY:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>elif value == _NAN:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise ParseError('<STR_LIT>'.format(value))<EOL><DEDENT><DEDENT>", "docstring": "Convert an floating point number.", "id": "f8658:m26"}
{"signature": "def GetMessages(self, files):", "body": "result = {}<EOL>for f in files:<EOL><INDENT>result.update(self._symbols_by_file[f])<EOL><DEDENT>return result<EOL>", "docstring": "Gets all the messages from a specified file.\n\n        This will find and resolve dependencies, failing if they are not registered\n        in the symbol database.\n\n\n        Args:\n          files: The file names to extract messages from.\n\n        Returns:\n          A dictionary mapping proto names to the message classes. This will include\n          any dependent messages as well as any messages defined in the same file as\n          a specified message.\n\n        Raises:\n          KeyError: if a file could not be found.", "id": "f8659:c0:m6"}
{"signature": "def RegisterEnumDescriptor(self, enum_descriptor):", "body": "self.pool.AddEnumDescriptor(enum_descriptor)<EOL>return enum_descriptor<EOL>", "docstring": "Registers the given enum descriptor in the local database.\n\n        Args:\n          enum_descriptor: a descriptor.EnumDescriptor.\n\n        Returns:\n          The provided descriptor.", "id": "f8659:c0:m2"}
{"signature": "def GetSymbol(self, symbol):", "body": "return self._symbols[symbol]<EOL>", "docstring": "Tries to find a symbol in the local database.\n\n        Currently, this method only returns message.Message instances, however, if\n        may be extended in future to support other symbol types.\n\n        Args:\n          symbol: A str, a protocol buffer symbol.\n\n        Returns:\n          A Python class corresponding to the symbol.\n\n        Raises:\n          KeyError: if the symbol could not be found.", "id": "f8659:c0:m4"}
{"signature": "def MakeSimpleProtoClass(fields, full_name=None, pool=None):", "body": "factory = message_factory.MessageFactory(pool=pool)<EOL>if full_name is not None:<EOL><INDENT>try:<EOL><INDENT>proto_cls = _GetMessageFromFactory(factory, full_name)<EOL>return proto_cls<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>field_items = fields.items()<EOL>if not isinstance(fields, OrderedDict):<EOL><INDENT>field_items = sorted(field_items)<EOL><DEDENT>fields_hash = hashlib.sha1()<EOL>for f_name, f_type in field_items:<EOL><INDENT>fields_hash.update(f_name.encode('<STR_LIT:utf-8>'))<EOL>fields_hash.update(str(f_type).encode('<STR_LIT:utf-8>'))<EOL><DEDENT>proto_file_name = fields_hash.hexdigest() + '<STR_LIT>'<EOL>if full_name is None:<EOL><INDENT>full_name = ('<STR_LIT>' +<EOL>fields_hash.hexdigest())<EOL>try:<EOL><INDENT>proto_cls = _GetMessageFromFactory(factory, full_name)<EOL>return proto_cls<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>factory.pool.Add(<EOL>_MakeFileDescriptorProto(proto_file_name, full_name, field_items))<EOL>return _GetMessageFromFactory(factory, full_name)<EOL>", "docstring": "Create a Protobuf class whose fields are basic types.\n\n    Note: this doesn't validate field names!\n\n    Args:\n      fields: dict of {name: field_type} mappings for each field in the proto. If\n          this is an OrderedDict the order will be maintained, otherwise the\n          fields will be sorted by name.\n      full_name: optional str, the fully-qualified name of the proto type.\n      pool: optional DescriptorPool instance.\n    Returns:\n      a class, the new protobuf class with a FileDescriptor.", "id": "f8661:m1"}
{"signature": "def __init__(self, name, full_name, index, containing_type, fields):", "body": "self.name = name<EOL>self.full_name = full_name<EOL>self.index = index<EOL>self.containing_type = containing_type<EOL>self.fields = fields<EOL>", "docstring": "Arguments are as described in the attribute description above.", "id": "f8663:c8:m0"}
{"signature": "def __init__(self, options, options_class_name, name, full_name,<EOL>file, containing_type, serialized_start=None,<EOL>serialized_end=None):", "body": "super(_NestedDescriptorBase, self).__init__(<EOL>options, options_class_name)<EOL>self.name = name<EOL>self.full_name = full_name<EOL>self.file = file<EOL>self.containing_type = containing_type<EOL>self._serialized_start = serialized_start<EOL>self._serialized_end = serialized_end<EOL>", "docstring": "Constructor.\n\n        Args:\n          options: Protocol message options or None\n            to use default message options.\n          options_class_name: (str) The class name of the above options.\n\n          name: (str) Name of this protocol message type.\n          full_name: (str) Fully-qualified name of this protocol message type,\n            which will include protocol \"package\" name and the name of any\n            enclosing types.\n          file: (FileDescriptor) Reference to file info.\n          containing_type: if provided, this is a nested descriptor, with this\n            descriptor as parent, otherwise None.\n          serialized_start: The start index (inclusive) in block in the\n            file.serialized_pb that describes this descriptor.\n          serialized_end: The end index (exclusive) in block in the\n            file.serialized_pb that describes this descriptor.", "id": "f8663:c3:m0"}
{"signature": "def GetTopLevelContainingType(self):", "body": "desc = self<EOL>while desc.containing_type is not None:<EOL><INDENT>desc = desc.containing_type<EOL><DEDENT>return desc<EOL>", "docstring": "Returns the root if this is a nested type, or itself if its the root.", "id": "f8663:c3:m1"}
{"signature": "def _SetOptions(self, options, options_class_name):", "body": "self._options = options<EOL>self._options_class_name = options_class_name<EOL>self.has_options = options is not None<EOL>", "docstring": "Sets the descriptor's options\n\n        This function is used in generated proto2 files to update descriptor\n        options. It must not be used outside proto2.", "id": "f8663:c2:m1"}
{"signature": "def EnumValueName(self, enum, value):", "body": "return self.enum_types_by_name[enum].values_by_number[value].name<EOL>", "docstring": "Returns the string name of an enum value.\n\n        This is just a small helper method to simplify a common operation.\n\n        Args:\n          enum: string name of the Enum.\n          value: int, value of the enum.\n\n        Returns:\n          string name of the enum value.\n\n        Raises:\n          KeyError if either the Enum doesn't exist or the value is not a valid\n            value for the enum.", "id": "f8663:c4:m2"}
{"signature": "def CopyToProto(self, proto):", "body": "if (self.file is not None and<EOL>self._serialized_start is not None and<EOL>self._serialized_end is not None):<EOL><INDENT>proto.ParseFromString(self.file.serialized_pb[<EOL>self._serialized_start:self._serialized_end])<EOL><DEDENT>else:<EOL><INDENT>raise Error('<STR_LIT>')<EOL><DEDENT>", "docstring": "Copies this to the matching proto in descriptor_pb2.\n\n        Args:\n          proto: An empty proto instance from descriptor_pb2.\n\n        Raises:\n          Error: If self couldnt be serialized, due to to few constructor arguments.", "id": "f8663:c3:m2"}
{"signature": "def CopyToProto(self, proto):", "body": "<EOL>super(EnumDescriptor, self).CopyToProto(proto)<EOL>", "docstring": "Copies this to a descriptor_pb2.EnumDescriptorProto.\n\n        Args:\n          proto: An empty descriptor_pb2.EnumDescriptorProto.", "id": "f8663:c6:m1"}
{"signature": "def __init__(self, name, full_name, filename, containing_type, fields,<EOL>nested_types, enum_types, extensions, options=None,<EOL>is_extendable=True, extension_ranges=None, oneofs=None,<EOL>file=None, serialized_start=None, serialized_end=None,<EOL>syntax=None):  ", "body": "super(Descriptor, self).__init__(<EOL>options, '<STR_LIT>', name, full_name, file,<EOL>containing_type, serialized_start=serialized_start,<EOL>serialized_end=serialized_end)<EOL>self.fields = fields<EOL>for field in self.fields:<EOL><INDENT>field.containing_type = self<EOL><DEDENT>self.fields_by_number = dict((f.number, f) for f in fields)<EOL>self.fields_by_name = dict((f.name, f) for f in fields)<EOL>self._fields_by_camelcase_name = None<EOL>self.nested_types = nested_types<EOL>for nested_type in nested_types:<EOL><INDENT>nested_type.containing_type = self<EOL><DEDENT>self.nested_types_by_name = dict((t.name, t) for t in nested_types)<EOL>self.enum_types = enum_types<EOL>for enum_type in self.enum_types:<EOL><INDENT>enum_type.containing_type = self<EOL><DEDENT>self.enum_types_by_name = dict((t.name, t) for t in enum_types)<EOL>self.enum_values_by_name = dict(<EOL>(v.name, v) for t in enum_types for v in t.values)<EOL>self.extensions = extensions<EOL>for extension in self.extensions:<EOL><INDENT>extension.extension_scope = self<EOL><DEDENT>self.extensions_by_name = dict((f.name, f) for f in extensions)<EOL>self.is_extendable = is_extendable<EOL>self.extension_ranges = extension_ranges<EOL>self.oneofs = oneofs if oneofs is not None else []<EOL>self.oneofs_by_name = dict((o.name, o) for o in self.oneofs)<EOL>for oneof in self.oneofs:<EOL><INDENT>oneof.containing_type = self<EOL><DEDENT>self.syntax = syntax or \"<STR_LIT>\"<EOL>", "docstring": "Arguments to __init__() are as described in the description\n        of Descriptor fields above.\n\n        Note that filename is an obsolete argument, that is not used anymore.\n        Please use file.name to access this as an attribute.", "id": "f8663:c4:m0"}
{"signature": "def _ParseOptions(message, string):", "body": "message.ParseFromString(string)<EOL>return message<EOL>", "docstring": "Parses serialized options.\n\n    This helper function is used to parse serialized options in generated\n    proto2 files. It must not be used outside proto2.", "id": "f8663:m0"}
{"signature": "def GetOptions(self):", "body": "if self._options:<EOL><INDENT>return self._options<EOL><DEDENT>from typy.google.protobuf import descriptor_pb2<EOL>try:<EOL><INDENT>options_class = getattr(descriptor_pb2, self._options_class_name)<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise RuntimeError('<STR_LIT>' %<EOL>(self._options_class_name))<EOL><DEDENT>self._options = options_class()<EOL>return self._options<EOL>", "docstring": "Retrieves descriptor options.\n\n        This method returns the options set or creates the default options for the\n        descriptor.", "id": "f8663:c2:m2"}
{"signature": "def __init__(self, name, full_name, index, number, type, cpp_type, label,<EOL>default_value, message_type, enum_type, containing_type,<EOL>is_extension, extension_scope, options=None,<EOL>has_default_value=True, containing_oneof=None):", "body": "super(FieldDescriptor, self).__init__(options, '<STR_LIT>')<EOL>self.name = name<EOL>self.full_name = full_name<EOL>self._camelcase_name = None<EOL>self.index = index<EOL>self.number = number<EOL>self.type = type<EOL>self.cpp_type = cpp_type<EOL>self.label = label<EOL>self.has_default_value = has_default_value<EOL>self.default_value = default_value<EOL>self.containing_type = containing_type<EOL>self.message_type = message_type<EOL>self.enum_type = enum_type<EOL>self.is_extension = is_extension<EOL>self.extension_scope = extension_scope<EOL>self.containing_oneof = containing_oneof<EOL>if api_implementation.Type() == '<STR_LIT>':<EOL><INDENT>if is_extension:<EOL><INDENT>self._cdescriptor = _message.default_pool.FindExtensionByName(full_name)<EOL><DEDENT>else:<EOL><INDENT>self._cdescriptor = _message.default_pool.FindFieldByName(full_name)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._cdescriptor = None<EOL><DEDENT>", "docstring": "The arguments are as described in the description of FieldDescriptor\n        attributes above.\n\n        Note that containing_type may be None, and may be set later if necessary\n        (to deal with circular references between message types, for example).\n        Likewise for extension_scope.", "id": "f8663:c5:m0"}
{"signature": "def MakeDescriptor(desc_proto, package='<STR_LIT>', build_file_if_cpp=True,<EOL>syntax=None):", "body": "if api_implementation.Type() == '<STR_LIT>' and build_file_if_cpp:<EOL><INDENT>from typy.google.protobuf import descriptor_pb2<EOL>file_descriptor_proto = descriptor_pb2.FileDescriptorProto()<EOL>file_descriptor_proto.message_type.add().MergeFrom(desc_proto)<EOL>proto_name = str(uuid.uuid4())<EOL>if package:<EOL><INDENT>file_descriptor_proto.name = os.path.join(package.replace('<STR_LIT:.>', '<STR_LIT:/>'),<EOL>proto_name + '<STR_LIT>')<EOL>file_descriptor_proto.package = package<EOL><DEDENT>else:<EOL><INDENT>file_descriptor_proto.name = proto_name + '<STR_LIT>'<EOL><DEDENT>_message.default_pool.Add(file_descriptor_proto)<EOL>result = _message.default_pool.FindFileByName(file_descriptor_proto.name)<EOL>if _USE_C_DESCRIPTORS:<EOL><INDENT>return result.message_types_by_name[desc_proto.name]<EOL><DEDENT><DEDENT>full_message_name = [desc_proto.name]<EOL>if package: full_message_name.insert(<NUM_LIT:0>, package)<EOL>enum_types = {}<EOL>for enum_proto in desc_proto.enum_type:<EOL><INDENT>full_name = '<STR_LIT:.>'.join(full_message_name + [enum_proto.name])<EOL>enum_desc = EnumDescriptor(<EOL>enum_proto.name, full_name, None, [<EOL>EnumValueDescriptor(enum_val.name, ii, enum_val.number)<EOL>for ii, enum_val in enumerate(enum_proto.value)])<EOL>enum_types[full_name] = enum_desc<EOL><DEDENT>nested_types = {}<EOL>for nested_proto in desc_proto.nested_type:<EOL><INDENT>full_name = '<STR_LIT:.>'.join(full_message_name + [nested_proto.name])<EOL>nested_desc = MakeDescriptor(nested_proto,<EOL>package='<STR_LIT:.>'.join(full_message_name),<EOL>build_file_if_cpp=False,<EOL>syntax=syntax)<EOL>nested_types[full_name] = nested_desc<EOL><DEDENT>fields = []<EOL>for field_proto in desc_proto.field:<EOL><INDENT>full_name = '<STR_LIT:.>'.join(full_message_name + [field_proto.name])<EOL>enum_desc = None<EOL>nested_desc = None<EOL>if field_proto.HasField('<STR_LIT>'):<EOL><INDENT>type_name = field_proto.type_name<EOL>full_type_name = '<STR_LIT:.>'.join(full_message_name +<EOL>[type_name[type_name.rfind('<STR_LIT:.>')+<NUM_LIT:1>:]])<EOL>if full_type_name in nested_types:<EOL><INDENT>nested_desc = nested_types[full_type_name]<EOL><DEDENT>elif full_type_name in enum_types:<EOL><INDENT>enum_desc = enum_types[full_type_name]<EOL><DEDENT><DEDENT>field = FieldDescriptor(<EOL>field_proto.name, full_name, field_proto.number - <NUM_LIT:1>,<EOL>field_proto.number, field_proto.type,<EOL>FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),<EOL>field_proto.label, None, nested_desc, enum_desc, None, False, None,<EOL>options=field_proto.options, has_default_value=False)<EOL>fields.append(field)<EOL><DEDENT>desc_name = '<STR_LIT:.>'.join(full_message_name)<EOL>return Descriptor(desc_proto.name, desc_name, None, None, fields,<EOL>list(nested_types.values()), list(enum_types.values()), [],<EOL>options=desc_proto.options)<EOL>", "docstring": "Make a protobuf Descriptor given a DescriptorProto protobuf.\n\n    Handles nested descriptors. Note that this is limited to the scope of defining\n    a message inside of another message. Composite fields can currently only be\n    resolved if the message is defined in the same scope as the field.\n\n    Args:\n      desc_proto: The descriptor_pb2.DescriptorProto protobuf message.\n      package: Optional package name for the new message Descriptor (string).\n      build_file_if_cpp: Update the C++ descriptor pool if api matches.\n                         Set to False on recursion, so no duplicates are created.\n      syntax: The syntax/semantics that should be used.  Set to \"proto3\" to get\n              proto3 field presence semantics.\n    Returns:\n      A Descriptor for protobuf messages.", "id": "f8663:m2"}
{"signature": "def GetMessages(file_protos):", "body": "for file_proto in file_protos:<EOL><INDENT>_FACTORY.pool.Add(file_proto)<EOL><DEDENT>return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos])<EOL>", "docstring": "Builds a dictionary of all the messages available in a set of files.\n\n    Args:\n      file_protos: A sequence of file protos to build messages out of.\n\n    Returns:\n      A dictionary mapping proto names to the message classes. This will include\n      any dependent messages as well as any messages defined in the same file as\n      a specified message.", "id": "f8664:m0"}
{"signature": "def __str__(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Outputs a human-readable representation of the message.", "id": "f8665:c3:m4"}
{"signature": "def CopyFrom(self, other_msg):", "body": "if self is other_msg:<EOL><INDENT>return<EOL><DEDENT>self.Clear()<EOL>self.MergeFrom(other_msg)<EOL>", "docstring": "Copies the content of the specified message into the current message.\n\n        The method clears the current message and then merges the specified\n        message using MergeFrom.\n\n        Args:\n          other_msg: Message to copy into the current one.", "id": "f8665:c3:m7"}
{"signature": "def ByteSize(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Returns the serialized size of this message.\n        Recursively calls ByteSize() on all contained messages.", "id": "f8665:c3:m22"}
{"signature": "def __getstate__(self):", "body": "return dict(serialized=self.SerializePartialToString())<EOL>", "docstring": "Support the pickle protocol.", "id": "f8665:c3:m24"}
{"signature": "def IsInitialized(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Checks if the message is initialized.\n\n        Returns:\n          The method returns True if the message is initialized (i.e. all of its\n          required fields are set).", "id": "f8665:c3:m10"}
{"signature": "def ParseFromString(self, serialized):", "body": "self.Clear()<EOL>self.MergeFromString(serialized)<EOL>", "docstring": "Parse serialized protocol buffer data into this message.\n\n        Like MergeFromString(), except we clear the object first and\n        do not return the value that MergeFromString returns.", "id": "f8665:c3:m12"}
{"signature": "def WhichOneof(self, oneof_group):", "body": "raise NotImplementedError<EOL>", "docstring": "Returns the name of the field that is set inside a oneof group, or\n        None if no field is set.  If no group with the given name exists, ValueError\n        will be raised.", "id": "f8665:c3:m18"}
{"signature": "def SerializePartialToString(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Serializes the protocol message to a binary string.\n\n        This method is similar to SerializeToString but doesn't check if the\n        message is initialized.\n\n        Returns:\n          A string representation of the partial message.", "id": "f8665:c3:m14"}
{"signature": "def __setstate__(self, state):", "body": "self.__init__()<EOL>self.ParseFromString(state['<STR_LIT>'])<EOL>", "docstring": "Support the pickle protocol.", "id": "f8665:c3:m25"}
{"signature": "def AddSerializedFile(self, serialized_file_desc_proto):", "body": "<EOL>from typy.google.protobuf import descriptor_pb2<EOL>file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(<EOL>serialized_file_desc_proto)<EOL>self.Add(file_desc_proto)<EOL>", "docstring": "Adds the FileDescriptorProto and its types to this pool.\n\n        Args:\n          serialized_file_desc_proto: A bytes string, serialization of the\n            FileDescriptorProto to add.", "id": "f8666:c0:m2"}
{"signature": "def _ExtractSymbols(self, descriptors):", "body": "for desc in descriptors:<EOL><INDENT>yield (_PrefixWithDot(desc.full_name), desc)<EOL>for symbol in self._ExtractSymbols(desc.nested_types):<EOL><INDENT>yield symbol<EOL><DEDENT>for enum in desc.enum_types:<EOL><INDENT>yield (_PrefixWithDot(enum.full_name), enum)<EOL><DEDENT><DEDENT>", "docstring": "Pulls out all the symbols from descriptor protos.\n\n        Args:\n          descriptors: The messages to extract descriptors from.\n        Yields:\n          A two element tuple of the type name and descriptor object.", "id": "f8666:c0:m19"}
{"signature": "def AddDescriptor(self, desc):", "body": "if not isinstance(desc, descriptor.Descriptor):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self._descriptors[desc.full_name] = desc<EOL>self.AddFileDescriptor(desc.file)<EOL>", "docstring": "Adds a Descriptor to the pool, non-recursively.\n\n        If the Descriptor contains nested messages or enums, the caller must\n        explicitly register them. This method also registers the FileDescriptor\n        associated with the message.\n\n        Args:\n          desc: A Descriptor.", "id": "f8666:c0:m3"}
{"signature": "def __init__(self, descriptor_db=None):", "body": "self._internal_db = descriptor_database.DescriptorDatabase()<EOL>self._descriptor_db = descriptor_db<EOL>self._descriptors = {}<EOL>self._enum_descriptors = {}<EOL>self._file_descriptors = {}<EOL>", "docstring": "Initializes a Pool of proto buffs.\n\n        The descriptor_db argument to the constructor is provided to allow\n        specialized file descriptor proto lookup code to be triggered on demand. An\n        example would be an implementation which will read and compile a file\n        specified in a call to FindFileByName() and not require the call to Add()\n        at all. Results from this database will be cached internally here as well.\n\n        Args:\n          descriptor_db: A secondary source of file descriptors.", "id": "f8666:c0:m0"}
{"signature": "def _SetAllFieldTypes(self, package, desc_proto, scope):", "body": "package = _PrefixWithDot(package)<EOL>main_desc = self._GetTypeFromScope(package, desc_proto.name, scope)<EOL>if package == '<STR_LIT:.>':<EOL><INDENT>nested_package = _PrefixWithDot(desc_proto.name)<EOL><DEDENT>else:<EOL><INDENT>nested_package = '<STR_LIT:.>'.join([package, desc_proto.name])<EOL><DEDENT>for field_proto, field_desc in zip(desc_proto.field, main_desc.fields):<EOL><INDENT>self._SetFieldType(field_proto, field_desc, nested_package, scope)<EOL><DEDENT>for extension_proto, extension_desc in (<EOL>zip(desc_proto.extension, main_desc.extensions)):<EOL><INDENT>extension_desc.containing_type = self._GetTypeFromScope(<EOL>nested_package, extension_proto.extendee, scope)<EOL>self._SetFieldType(extension_proto, extension_desc, nested_package, scope)<EOL><DEDENT>for nested_type in desc_proto.nested_type:<EOL><INDENT>self._SetAllFieldTypes(nested_package, nested_type, scope)<EOL><DEDENT>", "docstring": "Sets all the descriptor's fields's types.\n\n        This method also sets the containing types on any extensions.\n\n        Args:\n          package: The current package of desc_proto.\n          desc_proto: The message descriptor to update.\n          scope: Enclosing scope of available types.", "id": "f8666:c0:m16"}
{"signature": "def FindFileByName(self, file_name):", "body": "try:<EOL><INDENT>return self._file_descriptors[file_name]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>file_proto = self._internal_db.FindFileByName(file_name)<EOL><DEDENT>except KeyError as error:<EOL><INDENT>if self._descriptor_db:<EOL><INDENT>file_proto = self._descriptor_db.FindFileByName(file_name)<EOL><DEDENT>else:<EOL><INDENT>raise error<EOL><DEDENT><DEDENT>if not file_proto:<EOL><INDENT>raise KeyError('<STR_LIT>' % file_name)<EOL><DEDENT>return self._ConvertFileProtoToFileDescriptor(file_proto)<EOL>", "docstring": "Gets a FileDescriptor by file name.\n\n        Args:\n          file_name: The path to the file to get a descriptor for.\n\n        Returns:\n          A FileDescriptor for the named file.\n\n        Raises:\n          KeyError: if the file can not be found in the pool.", "id": "f8666:c0:m6"}
{"signature": "def _SetFieldType(self, field_proto, field_desc, package, scope):", "body": "if field_proto.type_name:<EOL><INDENT>desc = self._GetTypeFromScope(package, field_proto.type_name, scope)<EOL><DEDENT>else:<EOL><INDENT>desc = None<EOL><DEDENT>if not field_proto.HasField('<STR_LIT:type>'):<EOL><INDENT>if isinstance(desc, descriptor.Descriptor):<EOL><INDENT>field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE<EOL><DEDENT>else:<EOL><INDENT>field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM<EOL><DEDENT><DEDENT>field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType(<EOL>field_proto.type)<EOL>if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE<EOL>or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP):<EOL><INDENT>field_desc.message_type = desc<EOL><DEDENT>if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:<EOL><INDENT>field_desc.enum_type = desc<EOL><DEDENT>if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED:<EOL><INDENT>field_desc.has_default_value = False<EOL>field_desc.default_value = []<EOL><DEDENT>elif field_proto.HasField('<STR_LIT>'):<EOL><INDENT>field_desc.has_default_value = True<EOL>if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or<EOL>field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):<EOL><INDENT>field_desc.default_value = float(field_proto.default_value)<EOL><DEDENT>elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:<EOL><INDENT>field_desc.default_value = field_proto.default_value<EOL><DEDENT>elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:<EOL><INDENT>field_desc.default_value = field_proto.default_value.lower() == '<STR_LIT:true>'<EOL><DEDENT>elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:<EOL><INDENT>field_desc.default_value = field_desc.enum_type.values_by_name[<EOL>field_proto.default_value].number<EOL><DEDENT>elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:<EOL><INDENT>field_desc.default_value = text_encoding.CUnescape(<EOL>field_proto.default_value)<EOL><DEDENT>else:<EOL><INDENT>field_desc.default_value = int(field_proto.default_value)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>field_desc.has_default_value = False<EOL>if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or<EOL>field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):<EOL><INDENT>field_desc.default_value = <NUM_LIT:0.0><EOL><DEDENT>elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:<EOL><INDENT>field_desc.default_value = u'<STR_LIT>'<EOL><DEDENT>elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:<EOL><INDENT>field_desc.default_value = False<EOL><DEDENT>elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:<EOL><INDENT>field_desc.default_value = field_desc.enum_type.values[<NUM_LIT:0>].number<EOL><DEDENT>elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:<EOL><INDENT>field_desc.default_value = b'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>field_desc.default_value = <NUM_LIT:0><EOL><DEDENT><DEDENT>field_desc.type = field_proto.type<EOL>", "docstring": "Sets the field's type, cpp_type, message_type and enum_type.\n\n        Args:\n          field_proto: Data about the field in proto format.\n          field_desc: The descriptor to modiy.\n          package: The package the field's container is in.\n          scope: Enclosing scope of available types.", "id": "f8666:c0:m17"}
{"signature": "def FindMessageTypeByName(self, full_name):", "body": "full_name = _NormalizeFullyQualifiedName(full_name)<EOL>if full_name not in self._descriptors:<EOL><INDENT>self.FindFileContainingSymbol(full_name)<EOL><DEDENT>return self._descriptors[full_name]<EOL>", "docstring": "Loads the named descriptor from the pool.\n\n        Args:\n          full_name: The full name of the descriptor to load.\n\n        Returns:\n          The descriptor for the named type.", "id": "f8666:c0:m8"}
{"signature": "def _NormalizeFullyQualifiedName(name):", "body": "return name.lstrip('<STR_LIT:.>')<EOL>", "docstring": "Remove leading period from fully-qualified type name.\n\n    Due to b/13860351 in descriptor_database.py, types in the root namespace are\n    generated with a leading period. This function removes that prefix.\n\n    Args:\n      name: A str, the fully-qualified symbol name.\n\n    Returns:\n      A str, the normalized fully-qualified symbol name.", "id": "f8666:m0"}
{"signature": "def _MakeFieldDescriptor(self, field_proto, message_name, index,<EOL>is_extension=False):", "body": "if message_name:<EOL><INDENT>full_name = '<STR_LIT:.>'.join((message_name, field_proto.name))<EOL><DEDENT>else:<EOL><INDENT>full_name = field_proto.name<EOL><DEDENT>return descriptor.FieldDescriptor(<EOL>name=field_proto.name,<EOL>full_name=full_name,<EOL>index=index,<EOL>number=field_proto.number,<EOL>type=field_proto.type,<EOL>cpp_type=None,<EOL>message_type=None,<EOL>enum_type=None,<EOL>containing_type=None,<EOL>label=field_proto.label,<EOL>has_default_value=False,<EOL>default_value=None,<EOL>is_extension=is_extension,<EOL>extension_scope=None,<EOL>options=field_proto.options)<EOL>", "docstring": "Creates a field descriptor from a FieldDescriptorProto.\n\n        For message and enum type fields, this method will do a look up\n        in the pool for the appropriate descriptor for that type. If it\n        is unavailable, it will fall back to the _source function to\n        create it. If this type is still unavailable, construction will\n        fail.\n\n        Args:\n          field_proto: The proto describing the field.\n          message_name: The name of the containing message.\n          index: Index of the field\n          is_extension: Indication that this field is for an extension.\n\n        Returns:\n          An initialized FieldDescriptor object", "id": "f8666:c0:m15"}
{"signature": "def FindFieldByName(self, full_name):", "body": "full_name = _NormalizeFullyQualifiedName(full_name)<EOL>message_name, _, field_name = full_name.rpartition('<STR_LIT:.>')<EOL>message_descriptor = self.FindMessageTypeByName(message_name)<EOL>return message_descriptor.fields_by_name[field_name]<EOL>", "docstring": "Loads the named field descriptor from the pool.\n\n        Args:\n          full_name: The full name of the field descriptor to load.\n\n        Returns:\n          The field descriptor for the named field.", "id": "f8666:c0:m10"}
{"signature": "def _ConvertFileProtoToFileDescriptor(self, file_proto):", "body": "if file_proto.name not in self._file_descriptors:<EOL><INDENT>built_deps = list(self._GetDeps(file_proto.dependency))<EOL>direct_deps = [self.FindFileByName(n) for n in file_proto.dependency]<EOL>public_deps = [direct_deps[i] for i in file_proto.public_dependency]<EOL>file_descriptor = descriptor.FileDescriptor(<EOL>pool=self,<EOL>name=file_proto.name,<EOL>package=file_proto.package,<EOL>syntax=file_proto.syntax,<EOL>options=file_proto.options,<EOL>serialized_pb=file_proto.SerializeToString(),<EOL>dependencies=direct_deps,<EOL>public_dependencies=public_deps)<EOL>if _USE_C_DESCRIPTORS:<EOL><INDENT>def _AddMessageDescriptor(message_desc):<EOL><INDENT>self._descriptors[message_desc.full_name] = message_desc<EOL>for nested in message_desc.nested_types:<EOL><INDENT>_AddMessageDescriptor(nested)<EOL><DEDENT>for enum_type in message_desc.enum_types:<EOL><INDENT>_AddEnumDescriptor(enum_type)<EOL><DEDENT><DEDENT>def _AddEnumDescriptor(enum_desc):<EOL><INDENT>self._enum_descriptors[enum_desc.full_name] = enum_desc<EOL><DEDENT>for message_type in file_descriptor.message_types_by_name.values():<EOL><INDENT>_AddMessageDescriptor(message_type)<EOL><DEDENT>for enum_type in file_descriptor.enum_types_by_name.values():<EOL><INDENT>_AddEnumDescriptor(enum_type)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>scope = {}<EOL>for dependency in built_deps:<EOL><INDENT>scope.update(self._ExtractSymbols(<EOL>dependency.message_types_by_name.values()))<EOL>scope.update((_PrefixWithDot(enum.full_name), enum)<EOL>for enum in dependency.enum_types_by_name.values())<EOL><DEDENT>for message_type in file_proto.message_type:<EOL><INDENT>message_desc = self._ConvertMessageDescriptor(<EOL>message_type, file_proto.package, file_descriptor, scope,<EOL>file_proto.syntax)<EOL>file_descriptor.message_types_by_name[message_desc.name] = (<EOL>message_desc)<EOL><DEDENT>for enum_type in file_proto.enum_type:<EOL><INDENT>file_descriptor.enum_types_by_name[enum_type.name] = (<EOL>self._ConvertEnumDescriptor(enum_type, file_proto.package,<EOL>file_descriptor, None, scope))<EOL><DEDENT>for index, extension_proto in enumerate(file_proto.extension):<EOL><INDENT>extension_desc = self._MakeFieldDescriptor(<EOL>extension_proto, file_proto.package, index, is_extension=True)<EOL>extension_desc.containing_type = self._GetTypeFromScope(<EOL>file_descriptor.package, extension_proto.extendee, scope)<EOL>self._SetFieldType(extension_proto, extension_desc,<EOL>file_descriptor.package, scope)<EOL>file_descriptor.extensions_by_name[extension_desc.name] = (<EOL>extension_desc)<EOL><DEDENT>for desc_proto in file_proto.message_type:<EOL><INDENT>self._SetAllFieldTypes(file_proto.package, desc_proto, scope)<EOL><DEDENT>if file_proto.package:<EOL><INDENT>desc_proto_prefix = _PrefixWithDot(file_proto.package)<EOL><DEDENT>else:<EOL><INDENT>desc_proto_prefix = '<STR_LIT>'<EOL><DEDENT>for desc_proto in file_proto.message_type:<EOL><INDENT>desc = self._GetTypeFromScope(<EOL>desc_proto_prefix, desc_proto.name, scope)<EOL>file_descriptor.message_types_by_name[desc_proto.name] = desc<EOL><DEDENT><DEDENT>self.Add(file_proto)<EOL>self._file_descriptors[file_proto.name] = file_descriptor<EOL><DEDENT>return self._file_descriptors[file_proto.name]<EOL>", "docstring": "Creates a FileDescriptor from a proto or returns a cached copy.\n\n        This method also has the side effect of loading all the symbols found in\n        the file into the appropriate dictionaries in the pool.\n\n        Args:\n          file_proto: The proto to convert.\n\n        Returns:\n          A FileDescriptor matching the passed in proto.", "id": "f8666:c0:m12"}
{"signature": "def _NonImplementedMethod(self, method_name, rpc_controller, callback):", "body": "rpc_controller.SetFailed('<STR_LIT>' % method_name)<EOL>callback(None)<EOL>", "docstring": "The body of all methods in the generated service class.\n\n        Args:\n          method_name: Name of the method being executed.\n          rpc_controller: RPC controller used to execute this method.\n          callback: A callback which will be invoked when the method finishes.", "id": "f8668:c2:m6"}
{"signature": "def __init__(cls, name, bases, dictionary):", "body": "super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)<EOL>if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:<EOL><INDENT>return<EOL><DEDENT>descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]<EOL>service_stub_builder = _ServiceStubBuilder(descriptor)<EOL>service_stub_builder.BuildServiceStub(cls)<EOL>", "docstring": "Creates a message service stub class.\n\n        Args:\n          name: Name of the class (ignored, here).\n          bases: Base classes of the class being constructed.\n          dictionary: The class dictionary of the class being constructed.\n            dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object\n            describing this protocol service type.", "id": "f8668:c1:m0"}
{"signature": "def _CallMethod(self, srvc, method_descriptor,<EOL>rpc_controller, request, callback):", "body": "if method_descriptor.containing_service != self.descriptor:<EOL><INDENT>raise RuntimeError(<EOL>'<STR_LIT>')<EOL><DEDENT>method = getattr(srvc, method_descriptor.name)<EOL>return method(rpc_controller, request, callback)<EOL>", "docstring": "Calls the method described by a given method descriptor.\n\n        Args:\n          srvc: Instance of the service for which this method is called.\n          method_descriptor: Descriptor that represent the method to call.\n          rpc_controller: RPC controller to use for this method's execution.\n          request: Request protocol message.\n          callback: A callback to invoke after the method has completed.", "id": "f8668:c2:m2"}
{"signature": "def FindFileContainingSymbol(self, symbol):", "body": "return self._file_desc_protos_by_symbol[symbol]<EOL>", "docstring": "Finds the file descriptor proto containing the specified symbol.\n\n        The symbol should be a fully qualified name including the file descriptor's\n        package and any containing messages. Some examples:\n\n        'some.package.name.Message'\n        'some.package.name.Message.NestedEnum'\n\n        The file descriptor proto containing the specified symbol must be added to\n        this database using the Add method or else an error will be raised.\n\n        Args:\n          symbol: The fully qualified symbol name.\n\n        Returns:\n          The file descriptor proto containing the symbol.\n\n        Raises:\n          KeyError if no file contains the specified symbol.", "id": "f8669:c2:m3"}
{"signature": "def FindFileByName(self, name):", "body": "return self._file_desc_protos_by_file[name]<EOL>", "docstring": "Finds the file descriptor proto by file name.\n\n        Typically the file name is a relative path ending to a .proto file. The\n        proto with the given name will have to have been added to this database\n        using the Add method or else an error will be raised.\n\n        Args:\n          name: The file name to find.\n\n        Returns:\n          The file descriptor proto matching the name.\n\n        Raises:\n          KeyError if no file by the given name was added.", "id": "f8669:c2:m2"}
{"signature": "def CUnescape(text):", "body": "def ReplaceHex(m):<EOL><INDENT>if len(m.group(<NUM_LIT:1>)) & <NUM_LIT:1>:<EOL><INDENT>return m.group(<NUM_LIT:1>) + '<STR_LIT>' + m.group(<NUM_LIT:2>)<EOL><DEDENT>return m.group(<NUM_LIT:0>)<EOL><DEDENT>result = _CUNESCAPE_HEX.sub(ReplaceHex, text)<EOL>if str is bytes:  <EOL><INDENT>return result.decode('<STR_LIT>')<EOL><DEDENT>result = '<STR_LIT>'.join(_cescape_highbit_to_str[ord(c)] for c in result)<EOL>return (result.encode('<STR_LIT:ascii>')  <EOL>.decode('<STR_LIT>')<EOL>.encode('<STR_LIT>'))<EOL>", "docstring": "Unescape a text string with C-style escape sequences to UTF-8 bytes.", "id": "f8670:m1"}
{"signature": "def SetFailed(self, reason):", "body": "raise NotImplementedError<EOL>", "docstring": "Sets a failure reason.\n\n        Causes Failed() to return true on the client side.  \"reason\" will be\n        incorporated into the message returned by ErrorText().  If you find\n        you need to return machine-readable information about failures, you\n        should incorporate it into your response protocol buffer and should\n        NOT call SetFailed().", "id": "f8671:c2:m4"}
{"signature": "def NotifyOnCancel(self, callback):", "body": "raise NotImplementedError<EOL>", "docstring": "Sets a callback to invoke on cancel.\n\n        Asks that the given callback be called when the RPC is canceled.  The\n        callback will always be called exactly once.  If the RPC completes without\n        being canceled, the callback will be called after completion.  If the RPC\n        has already been canceled when NotifyOnCancel() is called, the callback\n        will be called immediately.\n\n        NotifyOnCancel() must be called no more than once per request.", "id": "f8671:c2:m6"}
{"signature": "def GetRequestClass(self, method_descriptor):", "body": "raise NotImplementedError<EOL>", "docstring": "Returns the class of the request message for the specified method.\n\n        CallMethod() requires that the request is of a particular subclass of\n        Message. GetRequestClass() gets the default instance of this required\n        type.\n\n        Example:\n          method = service.GetDescriptor().FindMethodByName(\"Foo\")\n          request = stub.GetRequestClass(method)()\n          request.ParseFromString(input)\n          service.CallMethod(method, request, callback)", "id": "f8671:c1:m2"}
{"signature": "def dispatch_first(self, func):", "body": "self.callees.appendleft(self._make_dispatch(func))<EOL>return self._make_wrapper(func)<EOL>", "docstring": "Adds the decorated function to this dispatch, at the FRONT of the order.\nUseful for allowing third parties to add overloaded functionality\nto be executed before default functionality.", "id": "f8679:c1:m7"}
{"signature": "@staticmethod<EOL><INDENT>def _make_param_matcher(annotation, kind=None):<DEDENT>", "body": "if isinstance(annotation, type) or (<EOL>isinstance(annotation, tuple) and<EOL>all(isinstance(a, type) for a in annotation)):<EOL><INDENT>if kind is Parameter.VAR_POSITIONAL:<EOL><INDENT>return (lambda args: all(isinstance(x, annotation) for x in args))<EOL><DEDENT>else:<EOL><INDENT>return (lambda x: isinstance(x, annotation))<EOL><DEDENT><DEDENT>elif callable(annotation):<EOL><INDENT>return annotation<EOL><DEDENT>else:<EOL><INDENT>return (lambda x: x == annotation)<EOL><DEDENT>", "docstring": "For a given annotation, return a function which, when called on a\nfunction argument, returns true if that argument matches the annotation.\nIf the annotation is a type, it calls isinstance; if it's a callable,\nit calls it on the object; otherwise, it performs a value comparison.\nIf the parameter is variadic (*args) and the annotation is a type, the\nmatcher will attempt to match each of the arguments in args", "id": "f8679:c1:m2"}
{"signature": "@classmethod<EOL><INDENT>def _make_dispatch(cls, func):<DEDENT>", "body": "sig = signature(func)<EOL>matchers = tuple(cls._make_all_matchers(sig.parameters.items()))<EOL>return (partial(cls._bind_args, sig, matchers), func)<EOL>", "docstring": "Create a dispatch pair for func- a tuple of (bind_args, func), where\nbind_args is a function that, when called with (args, kwargs), attempts\nto bind those args to the type signature of func, or else raise a\nTypeError", "id": "f8679:c1:m4"}
{"signature": "@property<EOL><INDENT>def registered_functions(self):<DEDENT>", "body": "return [callee[<NUM_LIT:1>] for callee in self.callees]<EOL>", "docstring": "Get a list of registered functions, in the order that they will be\nchecked.", "id": "f8679:c1:m11"}
{"signature": "@classmethod<EOL><INDENT>def _make_all_matchers(cls, parameters):<DEDENT>", "body": "for name, param in parameters:<EOL><INDENT>annotation = param.annotation<EOL>if annotation is not Parameter.empty:<EOL><INDENT>yield name, cls._make_param_matcher(annotation, param.kind)<EOL><DEDENT><DEDENT>", "docstring": "For every parameter, create a matcher if the parameter has an\nannotation.", "id": "f8679:c1:m3"}
{"signature": "def _make_wrapper(self, func):", "body": "<EOL>@wraps(func)<EOL>def executor(*args, **kwargs):<EOL><INDENT>return self.execute(args, kwargs)<EOL><DEDENT>executor.dispatch = self.dispatch<EOL>executor.dispatch_first = self.dispatch_first<EOL>executor.func = func<EOL>executor.lookup = self.lookup<EOL>return executor<EOL>", "docstring": "Makes a wrapper function that executes a dispatch call for func. The\nwrapper has the dispatch and dispatch_first attributes, so that\nadditional overloads can be added to the group.", "id": "f8679:c1:m5"}
{"signature": "@staticmethod<EOL><INDENT>def _bind_args(sig, param_matchers, args, kwargs):<DEDENT>", "body": "<EOL>bound = sig.bind(*args, **kwargs)<EOL>if not all(param_matcher(bound.arguments[param_name])<EOL>for param_name, param_matcher in param_matchers):<EOL><INDENT>raise TypeError<EOL><DEDENT>return bound<EOL>", "docstring": "Attempt to bind the args to the type signature. First try to just bind\nto the signature, then ensure that all arguments match the parameter\ntypes.", "id": "f8679:c1:m1"}
{"signature": "def description(self):", "body": "if self._description is None:<EOL><INDENT>self._load()<EOL><DEDENT>return self._description<EOL>", "docstring": "May hit server. See Cloudcast.sections", "id": "f8683:c5:m4"}
{"signature": "def authorize_url(self):", "body": "auth_url = OAUTH_ROOT + '<STR_LIT>'<EOL>params = {<EOL>'<STR_LIT>': self.client_id,<EOL>'<STR_LIT>': self.redirect_uri,<EOL>}<EOL>return \"<STR_LIT>\".format(auth_url, urlencode(params))<EOL>", "docstring": "Return a URL to redirect the user to for OAuth authentication.", "id": "f8683:c1:m1"}
{"signature": "def trim(docstring):", "body": "if not docstring:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>lines = six.u(docstring).expandtabs().splitlines()<EOL>lines = [line.strip() for line in lines]<EOL>res = six.u('<STR_LIT:\\n>').join(lines)<EOL>return res<EOL>", "docstring": "Remove the tabs to spaces, and remove the extra spaces / tabs that are in\nfront of the text in docstrings.\n\nImplementation taken from http://www.python.org/dev/peps/pep-0257/", "id": "f8699:m0"}
{"signature": "def __init__(self, definition_handler=DefinitionHandler(), ref=False,<EOL>type_converter=TypeConverter(),<EOL>parameter_converter=ParameterConverter(TypeConverter())):", "body": "self.parameter_registry = {}<EOL>self.type_converter = type_converter<EOL>self.parameter_converter = parameter_converter<EOL>self.definitions = definition_handler<EOL>self.ref = ref<EOL>", "docstring": ":param definition_handler:\n    Callable that handles swagger definition schemas.\n:param ref:\n    Specifies the ref value when calling from_xxx methods.", "id": "f8705:c2:m0"}
{"signature": "def _ref(self, resp, base_name=None):", "body": "name = base_name or resp.get('<STR_LIT:title>', '<STR_LIT>') or resp.get('<STR_LIT:name>', '<STR_LIT>')<EOL>pointer = self.json_pointer + name<EOL>self.response_registry[name] = resp<EOL>return {'<STR_LIT>': pointer}<EOL>", "docstring": "Store a response schema and return a reference to it.\n\n:param schema:\n    Swagger response definition.\n:param base_name:\n    Name that should be used for the reference.\n\n:rtype: dict\n:returns: JSON pointer to the original response definition.", "id": "f8705:c3:m2"}
{"signature": "def __call__(self, *args, **kwargs):", "body": "self.__dict__.update(**kwargs)<EOL>message = (\"<STR_LIT>\")<EOL>warnings.warn(message, DeprecationWarning)<EOL>return self.generate(*args, **kwargs)<EOL>", "docstring": "Deprecated alias of `generate`.", "id": "f8705:c4:m2"}
{"signature": "def _extract_operation_from_view(self, view, args):", "body": "op = {<EOL>'<STR_LIT>': {<EOL>'<STR_LIT:default>': {<EOL>'<STR_LIT:description>': '<STR_LIT>'<EOL>}<EOL>},<EOL>}<EOL>renderer = args.get('<STR_LIT>', '<STR_LIT>')<EOL>if \"<STR_LIT>\" in renderer:  <EOL><INDENT>produces = ['<STR_LIT:application/json>']<EOL><DEDENT>elif renderer == '<STR_LIT>':<EOL><INDENT>produces = ['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>produces = None<EOL><DEDENT>if produces:<EOL><INDENT>op.setdefault('<STR_LIT:produces>', produces)<EOL><DEDENT>consumes = args.get('<STR_LIT>')<EOL>if consumes is not None:<EOL><INDENT>consumes = to_list(consumes)<EOL>consumes = [x for x in consumes if not callable(x)]<EOL>op['<STR_LIT>'] = consumes<EOL><DEDENT>is_colander = self._is_colander_schema(args)<EOL>if is_colander:<EOL><INDENT>schema = self._extract_transform_colander_schema(args)<EOL>parameters = self.parameters.from_schema(schema)<EOL><DEDENT>else:<EOL><INDENT>parameters = None<EOL><DEDENT>if parameters:<EOL><INDENT>op['<STR_LIT>'] = parameters<EOL><DEDENT>if isinstance(view, six.string_types):<EOL><INDENT>if '<STR_LIT>' in args:<EOL><INDENT>ob = args['<STR_LIT>']<EOL>view_ = getattr(ob, view.lower())<EOL>docstring = trim(view_.__doc__)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>docstring = str(trim(view.__doc__))<EOL><DEDENT>if docstring and self.summary_docstrings:<EOL><INDENT>op['<STR_LIT>'] = docstring<EOL><DEDENT>if '<STR_LIT>' in args:<EOL><INDENT>op['<STR_LIT>'] = self.responses.from_schema_mapping(args['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in args:<EOL><INDENT>op['<STR_LIT>'] = args['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in args:<EOL><INDENT>op['<STR_LIT>'] = args['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in args:<EOL><INDENT>op['<STR_LIT>'] = args['<STR_LIT>']<EOL><DEDENT>return op<EOL>", "docstring": "Extract swagger operation details from colander view definitions.\n\n:param view:\n    View to extract information from.\n:param args:\n    Arguments from the view decorator.\n\n:rtype: dict\n:returns: Operation definition.", "id": "f8705:c4:m7"}
{"signature": "def from_schema(self, schema_node):", "body": "params = []<EOL>for param_schema in schema_node.children:<EOL><INDENT>location = param_schema.name<EOL>if location is '<STR_LIT:body>':<EOL><INDENT>name = param_schema.__class__.__name__<EOL>if name == '<STR_LIT:body>':<EOL><INDENT>name = schema_node.__class__.__name__ + '<STR_LIT>'<EOL><DEDENT>param = self.parameter_converter(location,<EOL>param_schema)<EOL>param['<STR_LIT:name>'] = name<EOL>if self.ref:<EOL><INDENT>param = self._ref(param)<EOL><DEDENT>params.append(param)<EOL><DEDENT>elif location in (('<STR_LIT:path>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:GET>')):<EOL><INDENT>for node_schema in param_schema.children:<EOL><INDENT>param = self.parameter_converter(location, node_schema)<EOL>if self.ref:<EOL><INDENT>param = self._ref(param)<EOL><DEDENT>params.append(param)<EOL><DEDENT><DEDENT><DEDENT>return params<EOL>", "docstring": "Creates a list of Swagger params from a colander request schema.\n\n:param schema_node:\n    Request schema to be transformed into Swagger.\n:param validators:\n    Validators used in colander with the schema.\n\n:rtype: list\n:returns: List of Swagger parameters.", "id": "f8705:c2:m1"}
{"signature": "def from_schema(self, schema_node, base_name=None):", "body": "return self._ref_recursive(self.type_converter(schema_node), self.ref, base_name)<EOL>", "docstring": "Creates a Swagger definition from a colander schema.\n\n:param schema_node:\n    Colander schema to be transformed into a Swagger definition.\n:param base_name:\n    Schema alternative title.\n\n:rtype: dict\n:returns: Swagger schema.", "id": "f8705:c1:m1"}
{"signature": "def generate(self, title=None, version=None, base_path=None,<EOL>info=None, swagger=None, **kwargs):", "body": "title = title or self.api_title<EOL>version = version or self.api_version<EOL>info = info or self.swagger.get('<STR_LIT:info>', {})<EOL>swagger = swagger or self.swagger<EOL>base_path = base_path or self.base_path<EOL>swagger = swagger.copy()<EOL>info.update(title=title, version=version)<EOL>swagger.update(swagger='<STR_LIT>', info=info, basePath=base_path)<EOL>paths, tags = self._build_paths()<EOL>if tags:<EOL><INDENT>swagger.setdefault('<STR_LIT>', [])<EOL>tag_names = {t['<STR_LIT:name>'] for t in swagger['<STR_LIT>']}<EOL>for tag in tags:<EOL><INDENT>if tag['<STR_LIT:name>'] not in tag_names:<EOL><INDENT>swagger['<STR_LIT>'].append(tag)<EOL><DEDENT><DEDENT><DEDENT>if paths:<EOL><INDENT>swagger.setdefault('<STR_LIT>', {})<EOL>merge_dicts(swagger['<STR_LIT>'], paths)<EOL><DEDENT>definitions = self.definitions.definition_registry<EOL>if definitions:<EOL><INDENT>swagger.setdefault('<STR_LIT>', {})<EOL>merge_dicts(swagger['<STR_LIT>'], definitions)<EOL><DEDENT>parameters = self.parameters.parameter_registry<EOL>if parameters:<EOL><INDENT>swagger.setdefault('<STR_LIT>', {})<EOL>merge_dicts(swagger['<STR_LIT>'], parameters)<EOL><DEDENT>responses = self.responses.response_registry<EOL>if responses:<EOL><INDENT>swagger.setdefault('<STR_LIT>', {})<EOL>merge_dicts(swagger['<STR_LIT>'], responses)<EOL><DEDENT>return swagger<EOL>", "docstring": "Generate a Swagger 2.0 documentation. Keyword arguments may be used\n        to provide additional information to build methods as such ignores.\n\n        :param title:\n            The name presented on the swagger document.\n        :param version:\n            The version of the API presented on the swagger document.\n        :param base_path:\n            The path that all requests to the API must refer to.\n        :param info:\n            Swagger info field.\n        :param swagger:\n            Extra fields that should be provided on the swagger documentation.\n\n        :rtype: dict\n        :returns: Full OpenAPI/Swagger compliant specification for the application.", "id": "f8705:c4:m1"}
{"signature": "def _ref(self, param, base_name=None):", "body": "name = base_name or param.get('<STR_LIT:title>', '<STR_LIT>') or param.get('<STR_LIT:name>', '<STR_LIT>')<EOL>pointer = self.json_pointer + name<EOL>self.parameter_registry[name] = param<EOL>return {'<STR_LIT>': pointer}<EOL>", "docstring": "Store a parameter schema and return a reference to it.\n\n:param schema:\n    Swagger parameter definition.\n:param base_name:\n    Name that should be used for the reference.\n\n:rtype: dict\n:returns: JSON pointer to the original parameter definition.", "id": "f8705:c2:m3"}
{"signature": "def cornice_enable_openapi_explorer(<EOL>config,<EOL>api_explorer_path='<STR_LIT>',<EOL>permission=NO_PERMISSION_REQUIRED,<EOL>route_factory=None,<EOL>**kwargs):", "body": "config.add_route('<STR_LIT>', api_explorer_path,<EOL>factory=route_factory)<EOL>config.add_view('<STR_LIT>',<EOL>permission=permission,<EOL>route_name='<STR_LIT>')<EOL>", "docstring": ":param config:\n    Pyramid configurator object\n:param api_explorer_path:\n    where to expose Swagger UI interface view\n:param permission:\n    pyramid permission for those views\n:param route_factory:\n    factory for context object for those routes\n\nThis registers and configures the view that serves api explorer", "id": "f8706:m2"}
{"signature": "@values.get(tags=['<STR_LIT>'], response_schemas=response_schemas)<EOL><INDENT>def get_value(request):<DEDENT>", "body": "key = request.matchdict['<STR_LIT:key>']<EOL>return _VALUES.get(key)<EOL>", "docstring": "Returns the value.", "id": "f8707:c2:m0"}
{"signature": "@values.put(tags=['<STR_LIT>'], validators=(colander_body_validator, ),<EOL>schema=BodySchema(), response_schemas=response_schemas)<EOL><INDENT>def set_value(request):<DEDENT>", "body": "key = request.matchdict['<STR_LIT:key>']<EOL>_VALUES[key] = request.json_body<EOL>return _VALUES.get(key)<EOL>", "docstring": "Set the value and returns *True* or *False*.", "id": "f8707:c2:m1"}
{"signature": "def teardown_databases(self, old_config, **kwargs):", "body": "pass<EOL>", "docstring": "Override the database teardown defined in parent class", "id": "f8713:c0:m2"}
{"signature": "def setup_databases(self, **kwargs):", "body": "pass<EOL>", "docstring": "Override the database creation defined in parent class", "id": "f8713:c0:m1"}
{"signature": "def get_variable(relpath, keyword='<STR_LIT>'):", "body": "for line in open(os.path.join(os.path.dirname(__file__), relpath), encoding='<STR_LIT>'):<EOL><INDENT>if keyword in line:<EOL><INDENT>if '<STR_LIT:\">' in line:<EOL><INDENT>return line.split('<STR_LIT:\">')[<NUM_LIT:1>]<EOL><DEDENT>elif \"<STR_LIT:'>\" in line:<EOL><INDENT>return line.split(\"<STR_LIT:'>\")[<NUM_LIT:1>]<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Read __version__ or other properties from a python file without importing it \n\n    from gist.github.com/technonik/406623 but with added keyward kwarg", "id": "f8714:m1"}
{"signature": "def skip_redundant(iterable, skipset=None):", "body": "if skipset is None: skipset = set()<EOL>for item in iterable:<EOL><INDENT>if item not in skipset:<EOL><INDENT>skipset.add(item)<EOL>yield item<EOL><DEDENT><DEDENT>", "docstring": "Redundant items are repeated items or items in the original skipset.", "id": "f8718:m0"}
{"signature": "def get_noconflict_metaclass(bases, left_metas, right_metas):", "body": "<EOL>metas = left_metas + tuple(map(type, bases)) + right_metas<EOL>needed_metas = remove_redundant(metas)<EOL>if needed_metas in memoized_metaclasses_map:<EOL><INDENT>return memoized_metaclasses_map[needed_metas]<EOL><DEDENT>elif not needed_metas:         <EOL><INDENT>meta = type<EOL><DEDENT>elif len(needed_metas) == <NUM_LIT:1>: <EOL><INDENT>meta = needed_metas[<NUM_LIT:0>]<EOL><DEDENT>elif needed_metas == bases: <EOL><INDENT>raise TypeError(\"<STR_LIT>\", needed_metas)<EOL><DEDENT>else: <EOL><INDENT>metaname = '<STR_LIT:_>' + '<STR_LIT>'.join([m.__name__ for m in needed_metas])<EOL>meta = classmaker()(metaname, needed_metas, {})<EOL><DEDENT>memoized_metaclasses_map[needed_metas] = meta<EOL>return meta<EOL>", "docstring": "Not intended to be used outside of this module, unless you know\n    what you are doing.", "id": "f8718:m2"}
{"signature": "def bug_info(exc_type, exc_value, exc_trace):", "body": "if hasattr(sys, '<STR_LIT>') or not sys.stderr.isatty():<EOL><INDENT>sys.__excepthook__(exc_type, exc_value, exc_trace)<EOL><DEDENT>else:<EOL><INDENT>import ipdb<EOL>traceback.print_exception(exc_type, exc_value, exc_trace)<EOL>print<EOL>ipdb.post_mortem(exc_trace)<EOL><DEDENT>", "docstring": "Prints the traceback and invokes the ipython debugger on any exception\n\n    Only invokes ipydb if you are outside ipython or python interactive session.\n    So scripts must be called from OS shell in order for exceptions to ipy-shell-out.\n\n    Dependencies:\n      Needs `pip install ipdb`\n\n    Arguments:\n      exc_type (type): The exception type/class (e.g. RuntimeError)\n      exc_value (Exception): The exception instance (e.g. the error message passed to the Exception constructor)\n      exc_trace (Traceback): The traceback instance\n\n    References:\n      http://stackoverflow.com/a/242531/623735\n\n    Example Usage:\n      $  python -c 'from pug import debug;x=[];x[0]'\n      Traceback (most recent call last):\n        File \"<string>\", line 1, in <module>\n      IndexError: list index out of range\n\n      > <string>(1)<module>()\n\n      ipdb> x\n      []\n      ipdb> locals()\n      {'__builtins__': <module '__builtin__' (built-in)>, '__package__': None, 'x': [], 'debug': <module 'pug.debug' from 'pug/debug.py'>, '__name__': '__main__', '__doc__': None}\n      ipdb>", "id": "f8719:m0"}
{"signature": "def force_frozenset(obj):", "body": "<EOL>return tuple(force_hashable(obj))<EOL>", "docstring": "Force frozenset() command to freeze the order and contents of multables and iterables like lists, dicts, generators\n\n    Useful for memoization and constructing dicts or hashtables where keys must be immutable", "id": "f8721:m0"}
{"signature": "def _denormalize(obj, fields=None, save=False, overwrite=False):", "body": "if not fields:<EOL><INDENT>meta = obj._meta<EOL>fields = [f.name for f in meta.fields if not hasattr(f, '<STR_LIT>') and not f.primary_key and hasattr(meta, '<STR_LIT>' + f.name) and hasattr(meta, '<STR_LIT:_>' + f.name)]<EOL><DEDENT>for field in fields:<EOL><INDENT>if not overwrite and not isinstance(getattr(obj, field, None), NoneType):<EOL><INDENT>continue<EOL><DEDENT>if hasattr(obj, field):<EOL><INDENT>setattr(obj, field, getattr(obj, '<STR_LIT:_>' + field, None))<EOL><DEDENT><DEDENT>if save:<EOL><INDENT>obj.save()<EOL><DEDENT>return obj<EOL>", "docstring": "Update/populate any database fields that are not related fields (FKs) but have `_get`ters to populate them with", "id": "f8721:m3"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_get_keywords(versionfile_abs):", "body": "<EOL>keywords = {}<EOL>try:<EOL><INDENT>f = open(versionfile_abs, \"<STR_LIT:r>\")<EOL>for line in f.readlines():<EOL><INDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT:date>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>f.close()<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>pass<EOL><DEDENT>return keywords<EOL>", "docstring": "Extract version information from the given file.", "id": "f8724:m5"}
{"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n    Exceptions:\n    1: no tags. 0.post.devDISTANCE", "id": "f8724:m10"}
{"signature": "def get_config():", "body": "<EOL>cfg = VersioneerConfig()<EOL>cfg.VCS = \"<STR_LIT>\"<EOL>cfg.style = \"<STR_LIT>\"<EOL>cfg.tag_prefix = \"<STR_LIT>\"<EOL>cfg.parentdir_prefix = \"<STR_LIT>\"<EOL>cfg.versionfile_source = \"<STR_LIT>\"<EOL>cfg.verbose = False<EOL>return cfg<EOL>", "docstring": "Create, populate and return the VersioneerConfig() object.", "id": "f8724:m1"}
{"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n    Like 'git describe --tags --dirty --always -long'.\n    The distance/hash is unconditional.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f8724:m14"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "GITS = [\"<STR_LIT>\"]<EOL>if sys.platform == \"<STR_LIT:win32>\":<EOL><INDENT>GITS = [\"<STR_LIT>\", \"<STR_LIT>\"]<EOL><DEDENT>out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\"], cwd=root,<EOL>hide_stderr=True)<EOL>if rc != <NUM_LIT:0>:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % root)<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>describe_out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\" % tag_prefix],<EOL>cwd=root)<EOL>if describe_out is None:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>describe_out = describe_out.strip()<EOL>full_out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\"], cwd=root)<EOL>if full_out is None:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>full_out = full_out.strip()<EOL>pieces = {}<EOL>pieces[\"<STR_LIT>\"] = full_out<EOL>pieces[\"<STR_LIT>\"] = full_out[:<NUM_LIT:7>]  <EOL>pieces[\"<STR_LIT:error>\"] = None<EOL>git_describe = describe_out<EOL>dirty = git_describe.endswith(\"<STR_LIT>\")<EOL>pieces[\"<STR_LIT>\"] = dirty<EOL>if dirty:<EOL><INDENT>git_describe = git_describe[:git_describe.rindex(\"<STR_LIT>\")]<EOL><DEDENT>if \"<STR_LIT:->\" in git_describe:<EOL><INDENT>mo = re.search(r'<STR_LIT>', git_describe)<EOL>if not mo:<EOL><INDENT>pieces[\"<STR_LIT:error>\"] = (\"<STR_LIT>\"<EOL>% describe_out)<EOL>return pieces<EOL><DEDENT>full_tag = mo.group(<NUM_LIT:1>)<EOL>if not full_tag.startswith(tag_prefix):<EOL><INDENT>if verbose:<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL>print(fmt % (full_tag, tag_prefix))<EOL><DEDENT>pieces[\"<STR_LIT:error>\"] = (\"<STR_LIT>\"<EOL>% (full_tag, tag_prefix))<EOL>return pieces<EOL><DEDENT>pieces[\"<STR_LIT>\"] = full_tag[len(tag_prefix):]<EOL>pieces[\"<STR_LIT>\"] = int(mo.group(<NUM_LIT:2>))<EOL>pieces[\"<STR_LIT>\"] = mo.group(<NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>pieces[\"<STR_LIT>\"] = None<EOL>count_out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>cwd=root)<EOL>pieces[\"<STR_LIT>\"] = int(count_out)  <EOL><DEDENT>date = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>cwd=root)[<NUM_LIT:0>].strip()<EOL>pieces[\"<STR_LIT:date>\"] = date.strip().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:T>\", <NUM_LIT:1>).replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\", <NUM_LIT:1>)<EOL>return pieces<EOL>", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n    This only gets called if the git-archive 'subst' keywords were *not*\n    expanded, and _version.py hasn't already been rewritten with a short\n    version string, meaning we're inside a checked out source tree.", "id": "f8724:m7"}
{"signature": "def register_vcs_handler(vcs, method):  ", "body": "def decorate(f):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if vcs not in HANDLERS:<EOL><INDENT>HANDLERS[vcs] = {}<EOL><DEDENT>HANDLERS[vcs][method] = f<EOL>return f<EOL><DEDENT>return decorate<EOL>", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f8724:m2"}
{"signature": "def render_pep440(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"],<EOL>pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n    Exceptions:\n    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f8724:m9"}
{"signature": "def get_keywords():", "body": "<EOL>git_refnames = \"<STR_LIT>\"<EOL>git_full = \"<STR_LIT>\"<EOL>git_date = \"<STR_LIT>\"<EOL>keywords = {\"<STR_LIT>\": git_refnames, \"<STR_LIT>\": git_full, \"<STR_LIT:date>\": git_date}<EOL>return keywords<EOL>", "docstring": "Get the keywords needed to look up the version information.", "id": "f8724:m0"}
{"signature": "def get_bits_per_pixel(data_format):", "body": "if data_format in component_8bit_formats:<EOL><INDENT>return <NUM_LIT:8><EOL><DEDENT>elif data_format in component_10bit_formats:<EOL><INDENT>return <NUM_LIT:10><EOL><DEDENT>elif data_format in component_12bit_formats:<EOL><INDENT>return <NUM_LIT:12><EOL><DEDENT>elif data_format in component_14bit_formats:<EOL><INDENT>return <NUM_LIT><EOL><DEDENT>elif data_format in component_16bit_formats:<EOL><INDENT>return <NUM_LIT:16><EOL><DEDENT>return None<EOL>", "docstring": "Returns the number of (used) bits per pixel.\nSo without padding.\nReturns None if format is not known.", "id": "f8732:m4"}
{"signature": "def _destroy_image_acquirer(self, ia):", "body": "id_ = None<EOL>if ia.device:<EOL><INDENT>ia.stop_image_acquisition()<EOL>ia._release_data_streams()<EOL>id_ = ia._device.id_<EOL>if ia.device.node_map:<EOL><INDENT>if ia._chunk_adapter:<EOL><INDENT>ia._chunk_adapter.detach_buffer()<EOL>ia._chunk_adapter = None<EOL>self._logger.info(<EOL>'<STR_LIT>'.format(<EOL>id_<EOL>)<EOL>)<EOL><DEDENT>ia.device.node_map.disconnect()<EOL>self._logger.info(<EOL>'<STR_LIT>'.format(<EOL>id_<EOL>)<EOL>)<EOL><DEDENT>if ia._device.is_open():<EOL><INDENT>ia._device.close()<EOL>self._logger.info(<EOL>'<STR_LIT>'.format(id_)<EOL>)<EOL><DEDENT><DEDENT>ia._device = None<EOL>if id_:<EOL><INDENT>self._logger.info(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(id_)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>self._logger.info(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if self._profiler:<EOL><INDENT>self._profiler.print_diff()<EOL><DEDENT>self._ias.remove(ia)<EOL>", "docstring": "Releases all external resources including the controlling device.", "id": "f8741:c20:m22"}
{"signature": "@property<EOL><INDENT>def payload(self):<DEDENT>", "body": "return self._payload<EOL>", "docstring": ":return: A containing object which derives from :class:`PayloadBase` class.", "id": "f8741:c8:m8"}
{"signature": "def __init__(self, thread: ThreadBase=None):", "body": "<EOL>assert thread<EOL>super().__init__()<EOL>self._thread = thread<EOL>self._locked_mutex = None<EOL>", "docstring": ":param thread:", "id": "f8741:c2:m0"}
{"signature": "@property<EOL><INDENT>def timestamp(self):<DEDENT>", "body": "timestamp = <NUM_LIT:0><EOL>try:<EOL><INDENT>timestamp = self._buffer.timestamp_ns<EOL><DEDENT>except (InvalidParameterException, NotImplementedException,<EOL>NotAvailableException):<EOL><INDENT>try:<EOL><INDENT>_ = self.timestamp_frequency<EOL><DEDENT>except InvalidParameterException:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>timestamp = self._buffer.timestamp<EOL><DEDENT>except (InvalidParameterException, NotAvailableException):<EOL><INDENT>timestamp = <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>return timestamp<EOL>", "docstring": ":return: The timestamp in the TL specific unit.", "id": "f8741:c8:m5"}
{"signature": "@property<EOL><INDENT>def interface(self):<DEDENT>", "body": "return self._interface<EOL>", "docstring": ":return: The parent :class:`Interface` module object of the connecting remote device.", "id": "f8741:c19:m16"}
{"signature": "def __init__(self, *, buffer=None, node_map=None, logger=None):", "body": "<EOL>assert buffer<EOL>assert node_map<EOL>self._logger = logger or get_logger(name=__name__)<EOL>super().__init__()<EOL>self._buffer = buffer<EOL>self._node_map = node_map<EOL>self._payload = self._build_payload(<EOL>buffer=buffer,<EOL>node_map=node_map,<EOL>logger=self._logger<EOL>)<EOL>", "docstring": ":param buffer:\n:param node_map:\n:param logger:", "id": "f8741:c8:m0"}
{"signature": "@property<EOL><INDENT>def worker(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "This method is abstract and should be reimplemented in any sub-class.\n\n:return: None.", "id": "f8741:c1:m8"}
{"signature": "@property<EOL><INDENT>def width(self):<DEDENT>", "body": "try:<EOL><INDENT>if self._part:<EOL><INDENT>value = self._part.width<EOL><DEDENT>else:<EOL><INDENT>value = self._buffer.width<EOL><DEDENT><DEDENT>except InvalidParameterException:<EOL><INDENT>value = self._node_map.Width.value<EOL><DEDENT>return value<EOL>", "docstring": ":return: The width of the data component in the buffer in number of pixels.", "id": "f8741:c7:m4"}
{"signature": "@property<EOL><INDENT>def height(self):<DEDENT>", "body": "try:<EOL><INDENT>if self._part:<EOL><INDENT>value = self._part.height<EOL><DEDENT>else:<EOL><INDENT>value = self._buffer.height<EOL><DEDENT><DEDENT>except InvalidParameterException:<EOL><INDENT>value = self._node_map.Height.value<EOL><DEDENT>return value<EOL>", "docstring": ":return: The height of the data component in the buffer in number of pixels.", "id": "f8741:c7:m5"}
{"signature": "def add_cti_file(self, file_path: str):", "body": "if not os.path.exists(file_path):<EOL><INDENT>self._logger.warning(<EOL>'<STR_LIT>'.format(file_path)<EOL>)<EOL><DEDENT>if file_path not in self._cti_files:<EOL><INDENT>self._cti_files.append(file_path)<EOL>self._logger.info(<EOL>'<STR_LIT>'.format(file_path)<EOL>)<EOL><DEDENT>", "docstring": "Adds a CTI file to work with to the CTI file list.\n\n:param file_path: Set a file path to the target CTI file.\n\n:return: None.", "id": "f8741:c20:m11"}
{"signature": "def __init__(self, *, mutex=None, worker=None, logger=None,<EOL>sleep_duration=_sleep_duration_default):", "body": "<EOL>super().__init__(mutex=mutex, logger=logger)<EOL>self._thread = None<EOL>self._worker = worker<EOL>self._sleep_duration = sleep_duration<EOL>", "docstring": ":param mutex:\n:param worker:\n:param logger:\n:param sleep_duration:", "id": "f8741:c3:m0"}
{"signature": "def __init__(self, *, buffer=None, node_map=None, logger=None):", "body": "<EOL>assert buffer<EOL>assert node_map<EOL>self._logger = logger or get_logger(name=__name__)<EOL>super().__init__(buffer=buffer, logger=self._logger)<EOL>", "docstring": ":param buffer:\n:param node_map:\n:param logger:", "id": "f8741:c13:m0"}
{"signature": "def start(self):", "body": "self._is_running = True<EOL>self._start()<EOL>self._logger.debug(<EOL>'<STR_LIT>'.format(self.id_)<EOL>)<EOL>", "docstring": ":return: None.", "id": "f8741:c1:m1"}
{"signature": "def _reset(self):", "body": "<EOL>for ia in self._ias:<EOL><INDENT>ia._destroy()<EOL><DEDENT>self._ias.clear()<EOL>self._logger.info('<STR_LIT>')<EOL>self.remove_cti_files()<EOL>self._release_gentl_producers()<EOL>if self._profiler:<EOL><INDENT>self._profiler.print_diff()<EOL><DEDENT>self._logger.info('<STR_LIT>')<EOL>", "docstring": "Initializes the :class:`Harvester` object. Once you reset the\n:class:`Harvester` object, all allocated resources, including buffers\nand remote device, will be released.\n\n:return: None.", "id": "f8741:c20:m16"}
{"signature": "@property<EOL><INDENT>def components(self):<DEDENT>", "body": "return self._components<EOL>", "docstring": ":return: A :class:`list` containing objects that derive from :const:`ComponentBase` class.", "id": "f8741:c9:m3"}
{"signature": "@property<EOL><INDENT>def cti_files(self):<DEDENT>", "body": "return self._cti_files<EOL>", "docstring": ":return: A :class:`list` object containing :class:`str` objects.", "id": "f8741:c20:m4"}
{"signature": "def represent_pixel_location(self):", "body": "if self.data is None:<EOL><INDENT>return None<EOL><DEDENT>return self._data.reshape(<EOL>self.height + self.y_padding,<EOL>int(self.width * self._num_components_per_pixel + self.x_padding)<EOL>)<EOL>", "docstring": "Returns a NumPy array that represents the 2D pixel location,\nwhich is defined by PFNC, of the original image data.\n\nYou may use the returned NumPy array for a calculation to map the\noriginal image to another format.\n\n:return: A NumPy array that represents the 2D pixel location.", "id": "f8741:c7:m1"}
{"signature": "def __init__(self, *, buffer=None, node_map=None, logger=None):", "body": "<EOL>assert buffer<EOL>assert node_map<EOL>self._logger = logger or get_logger(name=__name__)<EOL>super().__init__(buffer=buffer, logger=self._logger)<EOL>", "docstring": ":param buffer:\n:param node_map:\n:param logger:", "id": "f8741:c16:m0"}
{"signature": "@property<EOL><INDENT>def device_info_list(self):<DEDENT>", "body": "return self._device_info_list<EOL>", "docstring": ":return: A :class:`list` object containing :class:`DeviceInfo` objects", "id": "f8741:c20:m5"}
{"signature": "@property<EOL><INDENT>def system(self):<DEDENT>", "body": "return self._system<EOL>", "docstring": ":return: The parent :class:`System` module object of the connecting remote device.", "id": "f8741:c19:m17"}
{"signature": "def __init__(self, *, buffer=None, node_map=None, logger=None):", "body": "<EOL>assert buffer<EOL>assert node_map<EOL>self._logger = logger or get_logger(name=__name__)<EOL>super().__init__(buffer=buffer, logger=self._logger)<EOL>", "docstring": ":param buffer:\n:param node_map:\n:param logger:", "id": "f8741:c14:m0"}
{"signature": "@property<EOL><INDENT>def data_format(self):<DEDENT>", "body": "return self._buffer.data_format<EOL>", "docstring": ":return: The data type of the data component.", "id": "f8741:c5:m1"}
{"signature": "def __init__(self, *, mutex=None, logger=None):", "body": "<EOL>self._logger = logger or get_logger(name=__name__)<EOL>super().__init__()<EOL>self._is_running = False<EOL>self._mutex = mutex<EOL>", "docstring": ":param mutex:\n:param logger:", "id": "f8741:c1:m0"}
{"signature": "@property<EOL><INDENT>def payload_type(self):<DEDENT>", "body": "return self._buffer.payload_type<EOL>", "docstring": "TODO:\n:return:", "id": "f8741:c9:m1"}
{"signature": "@property<EOL><INDENT>def y_padding(self):<DEDENT>", "body": "try:<EOL><INDENT>if self._part:<EOL><INDENT>value = self._part.y_padding<EOL><DEDENT>else:<EOL><INDENT>value = self._buffer.padding_y<EOL><DEDENT><DEDENT>except (InvalidParameterException, NotImplementedException):<EOL><INDENT>value = <NUM_LIT:0><EOL><DEDENT>return value<EOL>", "docstring": ":return: The Y padding of the data component in the buffer in number of pixels.", "id": "f8741:c7:m12"}
{"signature": "def __init__(self, *, buffer=None, node_map=None, logger=None):", "body": "<EOL>assert buffer<EOL>assert node_map<EOL>self._logger = logger or get_logger(name=__name__)<EOL>super().__init__(buffer=buffer, logger=self._logger)<EOL>self._components.append(<EOL>self._build_component(<EOL>buffer=buffer, node_map=node_map<EOL>)<EOL>)<EOL>", "docstring": ":param buffer:\n:param node_map:\n:param logger:", "id": "f8741:c11:m0"}
{"signature": "def _start(self):", "body": "raise NotImplementedError<EOL>", "docstring": "This method is abstract and should be reimplemented in any sub-class.\n\nStarts its worker running.\n\n:return: None.", "id": "f8741:c1:m2"}
{"signature": "def __init__(self, *, profile=False, logger=None):", "body": "<EOL>self._logger = logger or get_logger(name=__name__)<EOL>super().__init__()<EOL>self._cti_files = []<EOL>self._producers = []<EOL>self._systems = []<EOL>self._interfaces = []<EOL>self._device_info_list = []<EOL>self._ias = []<EOL>self._has_revised_device_list = False<EOL>self._timeout_for_update = <NUM_LIT:1000>  <EOL>if profile:<EOL><INDENT>from harvesters._private.core.helper.profiler import Profiler<EOL>self._profiler = Profiler()<EOL><DEDENT>else:<EOL><INDENT>self._profiler = None<EOL><DEDENT>if self._profiler:<EOL><INDENT>self._profiler.print_diff()<EOL><DEDENT>self._finalizer = weakref.finalize(self, self._reset)<EOL>", "docstring": ":param profile:\n:param logger:", "id": "f8741:c20:m0"}
{"signature": "def start_image_acquisition(self):", "body": "if not self._create_ds_at_connection:<EOL><INDENT>self._setup_data_streams()<EOL><DEDENT>num_required_buffers = self._num_buffers<EOL>for data_stream in self._data_streams:<EOL><INDENT>try:<EOL><INDENT>num_buffers = data_stream.buffer_announce_min<EOL>if num_buffers < num_required_buffers:<EOL><INDENT>num_buffers = num_required_buffers<EOL><DEDENT><DEDENT>except InvalidParameterException as e:<EOL><INDENT>num_buffers = num_required_buffers<EOL>self._logger.debug(e, exc_info=True)<EOL><DEDENT>if data_stream.defines_payload_size():<EOL><INDENT>buffer_size = data_stream.payload_size<EOL><DEDENT>else:<EOL><INDENT>buffer_size = self.device.node_map.PayloadSize.value<EOL><DEDENT>raw_buffers = self._create_raw_buffers(<EOL>num_buffers, buffer_size<EOL>)<EOL>buffer_tokens = self._create_buffer_tokens(<EOL>raw_buffers<EOL>)<EOL>self._announced_buffers = self._announce_buffers(<EOL>data_stream=data_stream, _buffer_tokens=buffer_tokens<EOL>)<EOL>self._queue_announced_buffers(<EOL>data_stream=data_stream, buffers=self._announced_buffers<EOL>)<EOL><DEDENT>try:<EOL><INDENT>acq_mode = self.device.node_map.AcquisitionMode.value<EOL>if acq_mode == '<STR_LIT>':<EOL><INDENT>num_images_to_acquire = -<NUM_LIT:1><EOL><DEDENT>elif acq_mode == '<STR_LIT>':<EOL><INDENT>num_images_to_acquire = <NUM_LIT:1><EOL><DEDENT>elif acq_mode == '<STR_LIT>':<EOL><INDENT>num_images_to_acquire = self.device.node_map.AcquisitionFrameCount.value<EOL><DEDENT>else:<EOL><INDENT>num_images_to_acquire = -<NUM_LIT:1><EOL><DEDENT><DEDENT>except LogicalErrorException as e:<EOL><INDENT>num_images_to_acquire = -<NUM_LIT:1><EOL>self._logger.debug(e, exc_info=True)<EOL><DEDENT>self._num_images_to_acquire = num_images_to_acquire<EOL>try:<EOL><INDENT>self.device.node_map.TLParamsLocked.value = <NUM_LIT:1><EOL><DEDENT>except LogicalErrorException:<EOL><INDENT>pass<EOL><DEDENT>self._is_acquiring_images = True<EOL>for data_stream in self._data_streams:<EOL><INDENT>data_stream.start_acquisition(<EOL>ACQ_START_FLAGS_LIST.ACQ_START_FLAGS_DEFAULT,<EOL>self._num_images_to_acquire<EOL>)<EOL><DEDENT>if self.thread_image_acquisition:<EOL><INDENT>self.thread_image_acquisition.start()<EOL><DEDENT>self.device.node_map.AcquisitionStart.execute()<EOL>self._logger.info(<EOL>'<STR_LIT>'.format(self._device.id_)<EOL>)<EOL>if self._profiler:<EOL><INDENT>self._profiler.print_diff()<EOL><DEDENT>", "docstring": "Starts image acquisition.\n\n:return: None.", "id": "f8741:c19:m28"}
{"signature": "@property<EOL><INDENT>def x_padding(self):<DEDENT>", "body": "try:<EOL><INDENT>if self._part:<EOL><INDENT>value = self._part.x_padding<EOL><DEDENT>else:<EOL><INDENT>value = self._buffer.padding_x<EOL><DEDENT><DEDENT>except InvalidParameterException:<EOL><INDENT>value = <NUM_LIT:0><EOL><DEDENT>return value<EOL>", "docstring": "Returns\n:return: The X padding of the data component in the buffer in number of pixels.", "id": "f8741:c7:m11"}
{"signature": "def acquire(self):", "body": "raise NotImplementedError<EOL>", "docstring": "This method is abstract and should be reimplemented in any sub-class.\n\nAcquires a mutex.\n\n:return: None.", "id": "f8741:c1:m4"}
{"signature": "@property<EOL><INDENT>def data_format(self):<DEDENT>", "body": "return symbolics[self.data_format_value]<EOL>", "docstring": ":return: The data type of the data component as string.", "id": "f8741:c7:m7"}
{"signature": "def release(self):", "body": "raise NotImplementedError<EOL>", "docstring": "This method is abstract and should be reimplemented in any sub-class.\n\nReleases the acquired mutex.\n\n:return: None.", "id": "f8741:c1:m5"}
{"signature": "def __init__(self, *, buffer=None, part=None, node_map=None, logger=None):", "body": "<EOL>assert buffer<EOL>assert node_map<EOL>super().__init__(buffer=buffer)<EOL>self._logger = logger or get_logger(name=__name__)<EOL>self._part = part<EOL>self._node_map = node_map<EOL>self._data = None<EOL>self._num_components_per_pixel = <NUM_LIT:0><EOL>symbolic = self.data_format<EOL>if self.x_padding > <NUM_LIT:0>:<EOL><INDENT>dtype = '<STR_LIT>'<EOL>bytes_per_pixel_data_component = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>if symbolic in uint16_formats:<EOL><INDENT>dtype = '<STR_LIT>'<EOL>bytes_per_pixel_data_component = <NUM_LIT:2><EOL><DEDENT>elif symbolic in uint32_formats:<EOL><INDENT>dtype = '<STR_LIT>'<EOL>bytes_per_pixel_data_component = <NUM_LIT:4><EOL><DEDENT>elif symbolic in float32_formats:<EOL><INDENT>dtype = '<STR_LIT>'<EOL>bytes_per_pixel_data_component = <NUM_LIT:4><EOL><DEDENT>elif symbolic in uint8_formats:<EOL><INDENT>dtype = '<STR_LIT>'<EOL>bytes_per_pixel_data_component = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>self._data = None<EOL>return<EOL><DEDENT><DEDENT>if symbolic in lmn_444_location_formats:<EOL><INDENT>num_components_per_pixel = <NUM_LIT><EOL><DEDENT>elif symbolic in lmn_422_location_formats:<EOL><INDENT>num_components_per_pixel = <NUM_LIT><EOL><DEDENT>elif symbolic in lmn_411_location_formats:<EOL><INDENT>num_components_per_pixel = <NUM_LIT><EOL><DEDENT>elif symbolic in lmno_4444_location_formats:<EOL><INDENT>num_components_per_pixel = <NUM_LIT><EOL><DEDENT>elif symbolic in mono_location_formats orsymbolic in bayer_location_formats:<EOL><INDENT>num_components_per_pixel = <NUM_LIT:1.><EOL><DEDENT>else:<EOL><INDENT>self._data = None<EOL>return<EOL><DEDENT>self._num_components_per_pixel = num_components_per_pixel<EOL>self._symbolic = symbolic<EOL>width = self.width<EOL>height = self.height<EOL>if self._part:<EOL><INDENT>count = self._part.data_size<EOL>count //= bytes_per_pixel_data_component<EOL>data_offset = self._part.data_offset<EOL><DEDENT>else:<EOL><INDENT>count = width * height<EOL>count *= num_components_per_pixel<EOL>count += self.y_padding<EOL>data_offset = <NUM_LIT:0><EOL><DEDENT>if _is_logging_buffer_manipulation:<EOL><INDENT>self._logger.debug(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT:)>'.format(<EOL>len(self._buffer.raw_buffer),<EOL>int(count),<EOL>dtype,<EOL>data_offset,<EOL>symbolic,<EOL>self.x_padding,<EOL>self.y_padding,<EOL>)<EOL>)<EOL><DEDENT>self._data = np.frombuffer(<EOL>self._buffer.raw_buffer,<EOL>count=int(count),<EOL>dtype=dtype,<EOL>offset=data_offset<EOL>)<EOL>", "docstring": ":param buffer:\n:param part:\n:param node_map:", "id": "f8741:c7:m0"}
{"signature": "def __init__(self, base=None, worker=None,<EOL>sleep_duration=_sleep_duration_default):", "body": "<EOL>assert base<EOL>super().__init__(daemon=self._is_interactive())<EOL>self._worker = worker<EOL>self._base = base<EOL>self._sleep_duration = sleep_duration<EOL>", "docstring": ":param base:\n:param worker:\n:param sleep_duration:", "id": "f8741:c4:m0"}
{"signature": "@property<EOL><INDENT>def payload_type(self):<DEDENT>", "body": "return self._buffer.payload_type<EOL>", "docstring": ":return: The payload type that the :class:`Buffer` object contains.", "id": "f8741:c8:m7"}
{"signature": "def fetch_buffer(self, *, timeout=<NUM_LIT:0>, is_raw=False):", "body": "if not self.is_acquiring_images:<EOL><INDENT>raise TimeoutException<EOL><DEDENT>watch_timeout = True if timeout > <NUM_LIT:0> else False<EOL>buffer = None<EOL>base = time.time()<EOL>while buffer is None:<EOL><INDENT>if watch_timeout and (time.time() - base) > timeout:<EOL><INDENT>raise TimeoutException<EOL><DEDENT>else:<EOL><INDENT>with MutexLocker(self.thread_image_acquisition):<EOL><INDENT>if len(self._holding_filled_buffers) > <NUM_LIT:0>:<EOL><INDENT>if is_raw:<EOL><INDENT>buffer = self._holding_filled_buffers.pop(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>_buffer = self._holding_filled_buffers.pop(<NUM_LIT:0>)<EOL>self._update_chunk_data(buffer=_buffer)<EOL>buffer = Buffer(<EOL>buffer=_buffer,<EOL>node_map=self.device.node_map,<EOL>logger=self._logger<EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if _is_logging_buffer_manipulation:<EOL><INDENT>self._logger.debug(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT:.>'.format(<EOL>buffer._buffer.context,<EOL>buffer._buffer.frame_id,<EOL>buffer._buffer.parent.id_,<EOL>buffer._buffer.parent.parent.id_<EOL>)<EOL>)<EOL><DEDENT>return buffer<EOL>", "docstring": "Fetches the latest :class:`Buffer` object and returns it.\n\n:param timeout: Set timeout value in second.\n:param is_raw: Set :const:`True` if you need a raw GenTL Buffer module.\n\n:return: A :class:`Buffer` object.", "id": "f8741:c19:m31"}
{"signature": "def _compare_all_data_between_schemes(_from, _to):", "body": "def compare_group(_from, _to, group):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>source = DATA[_from][group]<EOL>actual = '<STR_LIT:U+0020>'.join(sanscript.transliterate(source, _from, _to).split())<EOL>expected = '<STR_LIT:U+0020>'.join(DATA[_to][group].split())<EOL>assert expected == actual, \"<STR_LIT>\" % (_from, _to, expected, actual)<EOL><DEDENT>for group in DATA[_from]:<EOL><INDENT>if _to in DATA and group in DATA[_to]:<EOL><INDENT>compare_group(_from, _to, group)<EOL><DEDENT><DEDENT>", "docstring": "Compare all data for `_from` and `_to`", "id": "f8751:m0"}
{"signature": "def get_approx_deduplicating_key(text, encoding_scheme=sanscript.DEVANAGARI):", "body": "if encoding_scheme == sanscript.DEVANAGARI:<EOL><INDENT>key = text<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>key = regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", key)<EOL>return key<EOL><DEDENT>else:<EOL><INDENT>logging.warning(\"<STR_LIT>\".format(encoding_scheme, text))<EOL>return regex.sub(\"<STR_LIT>\", \"<STR_LIT>\", text)<EOL><DEDENT>", "docstring": "Given some devanAgarI sanskrit text, this function produces a \"key\" so\nthat\n\n1] The key should be the same for different observed orthographical\nforms of the same text. For example:\n\n::\n\n    - \"dharmma\" vs \"dharma\"\n    - \"rAmaM gacChati\" vs \"rAma~N gacChati\" vs \"rAma~N gacChati\"\n    - \"kurvan eva\" vs \"kurvanneva\"\n\n2] The key should be different for different for different texts.\n\n-  \"stamba\" vs \"stambha\"\n\nThis function attempts to succeed at [1] and [2] almostall the time.\nLonger the text, probability of failing at [2] decreases, while\nprobability of failing at [1] increases (albeit very slightly).\n\nSources of orthographically divergent forms:\n\n-  Phonetically sensible grammar rules\n-  Neglect of sandhi while writing\n-  Punctuation, spaces, avagraha-s.\n-  Regional-language-influenced mistakes (La instead of la.)\n\nSome example applications of this function:\n\n-  Create a database of quotes or words with minimal duplication.\n-  Search a database of quotes or words while being robust to optional\n   forms.\n\nAlso see equivalent function in the scala indic-transliteration package.", "id": "f8752:m0"}
{"signature": "def _setup():", "body": "s = str.split<EOL>if sys.version_info < (<NUM_LIT:3>, <NUM_LIT:0>):<EOL><INDENT>s = unicode.split<EOL><DEDENT>def pop_all(some_dict, some_list):<EOL><INDENT>for scheme in some_list:<EOL><INDENT>some_dict.pop(scheme)<EOL><DEDENT><DEDENT>global SCHEMES<EOL>SCHEMES = copy.deepcopy(sanscript.SCHEMES)<EOL>pop_all(SCHEMES, [sanscript.ORIYA, sanscript.BENGALI, sanscript.GUJARATI])<EOL>SCHEMES[HK].update({<EOL>'<STR_LIT>': s(\"\"\"<STR_LIT>\"\"\") + s(\"\"\"<STR_LIT>\"\"\"),<EOL>'<STR_LIT>': s(\"\"\"<STR_LIT>\"\"\") + s(\"\"\"<STR_LIT>\"\"\"),<EOL>'<STR_LIT>': sanscript.SCHEMES[HK]['<STR_LIT>'] + s(\"\"\"<STR_LIT>\"\"\")<EOL>})<EOL>SCHEMES[ITRANS].update({<EOL>'<STR_LIT>': s(\"\"\"<STR_LIT>\"\"\") + s(\"\"\"<STR_LIT>\"\"\"),<EOL>'<STR_LIT>': s(\"\"\"<STR_LIT>\"\"\") + s(\"\"\"<STR_LIT>\"\"\"),<EOL>'<STR_LIT>': sanscript.SCHEMES[ITRANS]['<STR_LIT>'] + s(\"\"\"<STR_LIT>\"\"\")<EOL>})<EOL>pop_all(SCHEMES[ITRANS].synonym_map, s(\"\"\"<STR_LIT>\"\"\"))<EOL>SCHEMES[OPTITRANS].update({<EOL>'<STR_LIT>': s(\"\"\"<STR_LIT>\"\"\") + s(\"\"\"<STR_LIT>\"\"\"),<EOL>'<STR_LIT>': s(\"\"\"<STR_LIT>\"\"\") + s(\"\"\"<STR_LIT>\"\"\"),<EOL>'<STR_LIT>': sanscript.SCHEMES[OPTITRANS]['<STR_LIT>'] + s(\"\"\"<STR_LIT>\"\"\")<EOL>})<EOL>pop_all(SCHEMES[OPTITRANS].synonym_map, s(\"\"\"<STR_LIT>\"\"\"))<EOL>", "docstring": "Add a variety of default schemes.", "id": "f8755:m1"}
{"signature": "def detect(text):", "body": "if sys.version_info < (<NUM_LIT:3>, <NUM_LIT:0>):<EOL><INDENT>try:<EOL><INDENT>text = text.decode('<STR_LIT:utf-8>')<EOL><DEDENT>except UnicodeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>for L in text:<EOL><INDENT>code = ord(L)<EOL>if code >= BRAHMIC_FIRST_CODE_POINT:<EOL><INDENT>for name, start_code in BLOCKS:<EOL><INDENT>if start_code <= code <= BRAHMIC_LAST_CODE_POINT:<EOL><INDENT>return name<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if Regex.IAST_OR_KOLKATA_ONLY.search(text):<EOL><INDENT>if Regex.KOLKATA_ONLY.search(text):<EOL><INDENT>return Scheme.Kolkata<EOL><DEDENT>else:<EOL><INDENT>return Scheme.IAST<EOL><DEDENT><DEDENT>if Regex.ITRANS_ONLY.search(text):<EOL><INDENT>return Scheme.ITRANS<EOL><DEDENT>if Regex.SLP1_ONLY.search(text):<EOL><INDENT>return Scheme.SLP1<EOL><DEDENT>if Regex.VELTHUIS_ONLY.search(text):<EOL><INDENT>return Scheme.Velthuis<EOL><DEDENT>if Regex.ITRANS_OR_VELTHUIS_ONLY.search(text):<EOL><INDENT>return Scheme.ITRANS<EOL><DEDENT>return Scheme.HK<EOL>", "docstring": "Detect the input's transliteration scheme.\n\n      :param text: some text data, either a `unicode` or a `str` encoded\n                   in UTF-8.", "id": "f8756:m0"}
{"signature": "def transliterate(data, _from=None, _to=None, scheme_map=None, **kw):", "body": "if scheme_map is None:<EOL><INDENT>scheme_map = _get_scheme_map(_from, _to)<EOL><DEDENT>options = {<EOL>'<STR_LIT>': {'<STR_LIT>'},<EOL>'<STR_LIT>': set('<STR_LIT:<>'),<EOL>'<STR_LIT>': set('<STR_LIT:>>')<EOL>}<EOL>options.update(kw)<EOL>from indic_transliteration.sanscript.brahmic_mapper import _brahmic<EOL>from indic_transliteration.sanscript.roman_mapper import _roman<EOL>func = _roman if scheme_map.from_scheme.is_roman else _brahmic<EOL>return func(data, scheme_map, **options)<EOL>", "docstring": "Transliterate `data` with the given parameters::\n\n        output = transliterate('idam adbhutam', HK, DEVANAGARI)\n\n    Each time the function is called, a new :class:`SchemeMap` is created\n    to map the input scheme to the output scheme. This operation is fast\n    enough for most use cases. But for higher performance, you can pass a\n    pre-computed :class:`SchemeMap` instead::\n\n        scheme_map = SchemeMap(SCHEMES[HK], SCHEMES[DEVANAGARI])\n        output = transliterate('idam adbhutam', scheme_map=scheme_map)\n\n    :param data: the data to transliterate\n    :param scheme_map: the :class:`SchemeMap` to use. If specified, ignore\n                       `_from` and `_to`. If unspecified, create a\n                       :class:`SchemeMap` from `_from` to `_to`.", "id": "f8765:m1"}
{"signature": "@lru_cache(maxsize=<NUM_LIT:8>)<EOL>def _get_scheme_map(input_encoding, output_encoding):", "body": "return SchemeMap(SCHEMES[input_encoding], SCHEMES[output_encoding])<EOL>", "docstring": "Provides a caching layer on top of `SchemeMap` objects to allow faster\n    access to scheme maps we've instantiated once.\n\n    :param input_encoding: Input encoding. Must be defined in `SCHEMES`.\n    :param output_encoding: Input encoding. Must be defined in `SCHEMES`.", "id": "f8765:m0"}
{"signature": "def to_utf8(y):", "body": "out = []<EOL>for x in y:<EOL><INDENT>if x < <NUM_LIT>:<EOL><INDENT>out.append(x)<EOL><DEDENT>elif x < <NUM_LIT>:<EOL><INDENT>out.append((x >> <NUM_LIT:6>) | <NUM_LIT>)<EOL>out.append((x & <NUM_LIT>) | <NUM_LIT>)<EOL><DEDENT>elif x < <NUM_LIT>:<EOL><INDENT>out.append((x >> <NUM_LIT:12>) | <NUM_LIT>)<EOL>out.append(((x >> <NUM_LIT:6>) & <NUM_LIT>) | <NUM_LIT>)<EOL>out.append((x & <NUM_LIT>) | <NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>out.append((x >> <NUM_LIT>) | <NUM_LIT>)<EOL>out.append((x >> <NUM_LIT:12>) & <NUM_LIT>)<EOL>out.append(((x >> <NUM_LIT:6>) & <NUM_LIT>) | <NUM_LIT>)<EOL>out.append((x & <NUM_LIT>) | <NUM_LIT>)<EOL><DEDENT><DEDENT>return '<STR_LIT>'.join(map(chr, out))<EOL>", "docstring": "converts an array of integers to utf8 string", "id": "f8766:m2"}
{"signature": "def set_script(self, i):", "body": "if i in range(<NUM_LIT:1>, <NUM_LIT:10>):<EOL><INDENT>n = i - <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>raise IllegalInput(\"<STR_LIT>\" % (hex(i)))<EOL><DEDENT>if n > -<NUM_LIT:1>: <EOL><INDENT>self.curr_script = n<EOL>self.delta = n * DELTA<EOL><DEDENT>return<EOL>", "docstring": "set the value of delta to reflect the current codepage", "id": "f8766:c1:m2"}
{"signature": "def __init__(self, name, charRange, charClass=TLCharacter):", "body": "\"\"\"<STR_LIT>\"\"\"<EOL>charRange.sort()<EOL>for c in charRange:<EOL><INDENT>try:<EOL><INDENT>tlchar = charClass(c, self)<EOL>self[tlchar.chr] = tlchar<EOL><DEDENT>except ValueError: <EOL><INDENT>pass<EOL><DEDENT><DEDENT>self._longestEntry = <NUM_LIT:1><EOL>self.name = name<EOL>self.transliterationSchemes = {}<EOL>self._register()<EOL>", "docstring": "Set up a character block corresponding to a range of code points.\n\n        Keyword arguments:\n        name -- a string containing the name of the character block.\n                (should normally use a standard Unicode character block name)\n        range -- a list of code points. Reserved code points are ignored.\n        charClass -- the class to be used to create the characters.\n                     Should be a subclass of TLCharacter.", "id": "f8767:c1:m0"}
{"signature": "def _transliterate(self, text, outFormat):", "body": "return _Devanagari._transliterate(self, text, outFormat)<EOL>", "docstring": "Need to specify which superclass _transliterate() to call.", "id": "f8767:c6:m1"}
{"signature": "def __init__(self, blockName, schemeName, data, swapTable=None):", "body": "self.block = characterBlocks[blockName]<EOL>self.name = schemeName<EOL>for equiv, unicodeHexValue in data.items():<EOL><INDENT>self[equiv] = self.block[chr(unicodeHexValue)]<EOL>self[equiv].addEquivalent(self.name, equiv)<EOL><DEDENT>self._longestEntry = max([len(e) for e in data.keys()])<EOL>if self._longestEntry > <NUM_LIT:1>:<EOL><INDENT>self._parseTree = {}<EOL>self._parsedata = list(data.keys())<EOL>self._parsedata.sort()<EOL>self._setupParseTree(<NUM_LIT:0>, len(data) - <NUM_LIT:1>, <NUM_LIT:0>, self._parseTree)<EOL><DEDENT>if swapTable is not None:<EOL><INDENT>if not isinstance(swapTable, dict): raise (TypeError)<EOL><DEDENT>self.swapTable = swapTable<EOL>self._register()<EOL>", "docstring": "Set up a transliteration scheme.\n\n        Keyword arguments:\n        blockName -- a string containg the name of the character block this \n                     transliteration scheme is used for, \n                     e.g. 'CYRILLIC', 'DEVANAGARI'.\n        schemeName -- the name of the transliteration scheme. \n                      Must be unique.\n        data -- a dict containing the data for the transliteration scheme. \n                Keys are transliterated Unicode characters or strings.\n                Values are integers corresponding to Unicode code points.\n                For examples, see the data for the built-in transliteration\n                schemes.\n        swapTable -- a dict (default None) containing any non-standard\n                     letter combinations used in the transliteration scheme\n                     that we want to pre-process away before transliterating.\n                     See the ITRANS data for examples.\n\n        Raises:\n        KeyError: unknown block name.\n        TypeError: swapTable is not a dict", "id": "f8767:c2:m0"}
{"signature": "def __init__(self, blockName, schemeName, data, swapTable=None):", "body": "TransliterationScheme.__init__(self, blockName, schemeName, data, swapTable)<EOL>self._implicitA = False<EOL>", "docstring": "Set up a Devanagari transliteration scheme.\n\n        Extends TransliterationScheme.__init__", "id": "f8767:c6:m0"}
{"signature": "def _transliterate (self, text, outFormat):", "body": "result = []<EOL>for c in text:<EOL><INDENT>if c.isspace(): result.append(c)<EOL>try: <EOL><INDENT>result.append(self[c].equivalents[outFormat.name])<EOL><DEDENT>except KeyError:<EOL><INDENT>result.append(_unrecognised(c))<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Transliterate the text to the target transliteration scheme.", "id": "f8767:c1:m2"}
{"signature": "def resetOptions():", "body": "global options<EOL>defaultOptions = {<EOL>'<STR_LIT>' : '<STR_LIT:utf-8>',  <EOL>'<STR_LIT>' : '<STR_LIT:utf-8>', <EOL>'<STR_LIT>' : '<STR_LIT:?>', <EOL>'<STR_LIT>' : UNRECOGNISED_ECHO,   <EOL>'<STR_LIT>' : False,   <EOL>}<EOL>options = defaultOptions.copy()<EOL>", "docstring": "Reset options to their default values.", "id": "f8767:m0"}
{"signature": "def _transliterate(self, text, outFormat):", "body": "def getResult(): <EOL><INDENT>if curMatch.isspace():<EOL><INDENT>result.append(curMatch)<EOL>return<EOL><DEDENT>if prevMatch in self:<EOL><INDENT>prev = self[prevMatch]<EOL><DEDENT>else:<EOL><INDENT>prev = None<EOL><DEDENT>if nextMatch in self:<EOL><INDENT>next = self[nextMatch]<EOL><DEDENT>else:<EOL><INDENT>next = None<EOL><DEDENT>try:<EOL><INDENT>equiv = outFormat._equivalent(self[curMatch], <EOL>prev, <EOL>next, <EOL>self._implicitA)<EOL><DEDENT>except KeyError:<EOL><INDENT>equiv = _unrecognised(curMatch)<EOL><DEDENT>for e in equiv:<EOL><INDENT>result.append(e)<EOL><DEDENT><DEDENT>def incr(c):<EOL><INDENT>if self._longestEntry == <NUM_LIT:1>:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>return len(c)<EOL><DEDENT>result = []<EOL>text = self._preprocess(text)<EOL>i = <NUM_LIT:0><EOL>prevMatch = None<EOL>nextMatch = None<EOL>curMatch = self._getNextChar(text, i)<EOL>i = i + len(curMatch)<EOL>while i < len(text):<EOL><INDENT>nextMatch = self._getNextChar(text, i)<EOL>getResult()<EOL>i = i + len(nextMatch)<EOL>prevMatch = curMatch<EOL>curMatch = nextMatch<EOL>nextMatch = None<EOL><DEDENT>getResult() <EOL>return result<EOL>", "docstring": "Transliterate a devanagari text into the target format.\n\n        Transliterating a character to or from Devanagari is not a simple \n        lookup: it depends on the preceding and following characters.", "id": "f8767:c4:m0"}
{"signature": "def _transliterate (self, text, outFormat):", "body": "result = []<EOL>for c in text:<EOL><INDENT>if c.isspace(): result.append(c)<EOL>try: <EOL><INDENT>result.append(self[c].equivalents[outFormat.name])<EOL><DEDENT>except KeyError:<EOL><INDENT>result.append(_unrecognised(c))<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Transliterate the text to the target transliteration scheme.", "id": "f8768:c1:m2"}
{"signature": "def _transliterate(self, text, outFormat):", "body": "return _Devanagari._transliterate(self, text, outFormat)<EOL>", "docstring": "Need to specify which superclass _transliterate() to call.", "id": "f8768:c6:m1"}
{"signature": "def transliterate(text, inFormat, outFormat, requestOptions={}):", "body": "def asciiEncode(chr):<EOL><INDENT>value = ord(chr)<EOL>if value > <NUM_LIT:255>:<EOL><INDENT>return '<STR_LIT>' % (value)<EOL><DEDENT>return chr<EOL><DEDENT>try:<EOL><INDENT>options.update(requestOptions)<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>basestring = (str, bytes)<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>def findFormat(fmt):<EOL><INDENT>if isinstance(fmt, basestring):<EOL><INDENT>try:<EOL><INDENT>fmt = _names[fmt.upper()]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise (ValueError, '<STR_LIT>' + fmt)<EOL><DEDENT><DEDENT>return fmt<EOL><DEDENT>inFormat = findFormat(inFormat)<EOL>outFormat = findFormat(outFormat)<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>if not isinstance(text, basestring): <EOL><INDENT>raise (TypeError, \"<STR_LIT>\")<EOL><DEDENT>def getBlock(format):<EOL><INDENT>if isinstance(format, CharacterBlock):<EOL><INDENT>return format<EOL><DEDENT>else:<EOL><INDENT>return format.block<EOL><DEDENT><DEDENT>inBlock = getBlock(inFormat)<EOL>outBlock = getBlock(outFormat)<EOL>if not inBlock is outBlock:<EOL><INDENT>raise (ValueError, \"<STR_LIT>\")<EOL><DEDENT>if inFormat is outFormat:<EOL><INDENT>if inFormat._longestEntry == <NUM_LIT:1>:<EOL><INDENT>[inFormat[c] for c in set(text) if not c.isspace()] <EOL>return text<EOL><DEDENT><DEDENT>\"\"\"<STR_LIT>\"\"\"<EOL>result = inFormat._transliterate(text, outFormat)<EOL>if options['<STR_LIT>']:<EOL><INDENT>result = [asciiEncode(c) for c in result]<EOL><DEDENT>return u'<STR_LIT>'.join(result).encode(options['<STR_LIT>'])<EOL><DEDENT>finally:<EOL><INDENT>resetOptions()<EOL><DEDENT>", "docstring": "Transliterate a text.\n\n    Keyword arguments:\n    text -- a unicode string containing the text to be transliterated\n    inFormat -- the \"from\" CharacterBlock or TransliterationScheme, or its name\n    outFormat -- the target CharacterBlock or TransliterationScheme, or its name\n    requestOptions -- optional dict containing option settings that override the\n                      defaults for this request.\n\n    Returns a unicode object containing the text transliterated into the\n    target character set.\n\n    Raises:\n    ValueError -- unrecognised input or output format.\n    KeyError -- a character in text is not a member of inFormat, or has no\n    corresponding character defined in outFormat.", "id": "f8768:m2"}
{"signature": "def _preprocess(self, text):", "body": "return text<EOL>", "docstring": "Make our signature compatible with TransliterationScheme.", "id": "f8768:c1:m3"}
{"signature": "def _setupParseTree(self, rowFrom, rowTo, colIndex, tree):", "body": "if colIndex == self._longestEntry:<EOL><INDENT>return<EOL><DEDENT>prevchar = None<EOL>rowIndex = rowFrom<EOL>while rowIndex <= rowTo:<EOL><INDENT>if colIndex < len(self._parsedata[rowIndex]):<EOL><INDENT>c = self._parsedata[rowIndex][colIndex]<EOL>if c != prevchar:<EOL><INDENT>tree[c] = {}<EOL>if  prevchar is not None:<EOL><INDENT>self._setupParseTree(rowFrom, rowIndex - <NUM_LIT:1>, colIndex + <NUM_LIT:1>, tree[prevchar])<EOL><DEDENT>rowFrom = rowIndex<EOL>prevchar = c<EOL><DEDENT>if rowIndex == rowTo:<EOL><INDENT>self._setupParseTree(rowFrom, rowIndex, colIndex + <NUM_LIT:1>, tree[prevchar])<EOL><DEDENT><DEDENT>rowIndex = rowIndex + <NUM_LIT:1><EOL><DEDENT>", "docstring": "Build the search tree for multi-character encodings.", "id": "f8768:c2:m2"}
{"signature": "def __init__(self, blockName, schemeName, data, swapTable=None):", "body": "TransliterationScheme.__init__(self, blockName, schemeName, data, swapTable)<EOL>self._implicitA = False<EOL>", "docstring": "Set up a Devanagari transliteration scheme.\n\n        Extends TransliterationScheme.__init__", "id": "f8768:c6:m0"}
{"signature": "def _transliterate(self, text, outFormat):", "body": "def getResult(): <EOL><INDENT>if curMatch.isspace():<EOL><INDENT>result.append(curMatch)<EOL>return<EOL><DEDENT>if prevMatch in self:<EOL><INDENT>prev = self[prevMatch]<EOL><DEDENT>else:<EOL><INDENT>prev = None<EOL><DEDENT>if nextMatch in self:<EOL><INDENT>next = self[nextMatch]<EOL><DEDENT>else:<EOL><INDENT>next = None<EOL><DEDENT>try:<EOL><INDENT>equiv = outFormat._equivalent(self[curMatch], <EOL>prev, <EOL>next, <EOL>self._implicitA)<EOL><DEDENT>except KeyError:<EOL><INDENT>equiv = _unrecognised(curMatch)<EOL><DEDENT>for e in equiv:<EOL><INDENT>result.append(e)<EOL><DEDENT><DEDENT>def incr(c):<EOL><INDENT>if self._longestEntry == <NUM_LIT:1>:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>return len(c)<EOL><DEDENT>result = []<EOL>text = self._preprocess(text)<EOL>i = <NUM_LIT:0><EOL>prevMatch = None<EOL>nextMatch = None<EOL>curMatch = self._getNextChar(text, i)<EOL>i = i + len(curMatch)<EOL>while i < len(text):<EOL><INDENT>nextMatch = self._getNextChar(text, i)<EOL>getResult()<EOL>i = i + len(nextMatch)<EOL>prevMatch = curMatch<EOL>curMatch = nextMatch<EOL>nextMatch = None<EOL><DEDENT>getResult() <EOL>return result<EOL>", "docstring": "Transliterate a devanagari text into the target format.\n\n        Transliterating a character to or from Devanagari is not a simple \n        lookup: it depends on the preceding and following characters.", "id": "f8768:c4:m0"}
{"signature": "def __init__(self, unicodeHexValue, block):", "body": "self.isVowel = False<EOL>if unicodeHexValue in DevanagariCharacter._vowelRange:<EOL><INDENT>self.isVowel = True<EOL><DEDENT>self._dependentVowel = None<EOL>if unicodeHexValue in DevanagariCharacter._depVowelRange:<EOL><INDENT>vowel = block[chr(unicodeHexValue - DevanagariCharacter._vowelOffset)]<EOL>vowel._setDependentVowel(unicodeHexValue)<EOL>raise (ValueError) <EOL><DEDENT>TLCharacter.__init__(self, unicodeHexValue, block)<EOL>self.isConsonant = False<EOL>if self.isVowel == Falseand self.chr.isalpha()and self.unicodeHexValue not in (DevanagariCharacter._AVAGRAHA,<EOL>DevanagariCharacter._OM):<EOL><INDENT>self.isConsonant = True<EOL><DEDENT>", "docstring": "Create an object representing a Devanagari character.\n\n        Extends TLCharacter.__init__ to distinguish Devanagari standalone\n        vowels, dependent vowels and consonants.\n\n        Raises \n        ValueError -- for characters in the Devanagari dependent vowel range.\n                      We want these as variants of the corresponding standalone \n                      vowels, not as separate characters.", "id": "f8768:c3:m0"}
{"signature": "def main(argv=None):", "body": "print (transliterate('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'))<EOL>if argv is None:<EOL><INDENT>argv = sys.argv<EOL><DEDENT>try:    <EOL><INDENT>text, inFormat, outFormat = argv[<NUM_LIT:1>:<NUM_LIT:4>]<EOL><DEDENT>except ValueError:<EOL><INDENT>print (main.__doc__)<EOL>return <NUM_LIT:2><EOL><DEDENT>inFormat = inFormat.upper()<EOL>outFormat = outFormat.upper()<EOL>try:<EOL><INDENT>f = open(text)<EOL><DEDENT>except IOError:<EOL><INDENT>print (transliterate(text, inFormat, outFormat))<EOL>return <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>i = <NUM_LIT:1><EOL>for text in f.readlines():<EOL><INDENT>if len(text) > <NUM_LIT:0> and not text.startswith('<STR_LIT:#>'):<EOL><INDENT>print (transliterate(text, inFormat, outFormat).strip('<STR_LIT:\\n>'))<EOL><DEDENT>i = i + <NUM_LIT:1><EOL><DEDENT>f.close()<EOL>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Call transliterator from a command line.\n\n    python transliterator.py text inputFormat outputFormat\n\n    ... writes the transliterated text to stdout\n\n    text -- the text to be transliterated OR the name of a file containing the text\n    inputFormat -- the name of the character block or transliteration scheme that\n                   the text is to be transliterated FROM, e.g. 'CYRILLIC', 'IAST'.\n                   Not case-sensitive\n    outputFormat -- the name of the character block or transliteration scheme that\n                   the text is to be transliterated TO, e.g. 'CYRILLIC', 'IAST'.\n                   Not case-sensitive", "id": "f8768:m3"}
{"signature": "def push_force_master(self):", "body": "self._git.push('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "Pushes to master", "id": "f8774:c0:m8"}
{"signature": "def delete_tag(self, value):", "body": "self._git.tag('<STR_LIT>', value)<EOL>", "docstring": "Delete the tag provided", "id": "f8774:c0:m19"}
{"signature": "def commit(self, message, *args):", "body": "self._git.commit('<STR_LIT>', message, *args)<EOL>", "docstring": "Commits", "id": "f8774:c0:m5"}
{"signature": "def branch_upstream_to_master(self):", "body": "self._git.branch('<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "Branches upstream to master", "id": "f8774:c0:m10"}
{"signature": "def create_branch(self, name):", "body": "self._git.branch(name)<EOL>", "docstring": "Creates a branch", "id": "f8774:c0:m14"}
{"signature": "def add_tag(self, value):", "body": "self._git.tag(value)<EOL>", "docstring": "Tag with provided value", "id": "f8774:c0:m18"}
{"signature": "def remove_branch(self, name):", "body": "self._git.branch('<STR_LIT>', name)<EOL>", "docstring": "Removes a branch", "id": "f8774:c0:m15"}
{"signature": "def add_remote_origin(self, url):", "body": "self._git.remote('<STR_LIT>', '<STR_LIT>', url)<EOL>", "docstring": "Adds the remote origin", "id": "f8774:c0:m6"}
{"signature": "def remove(self, path):", "body": "self._git.rm('<STR_LIT>', path)<EOL>", "docstring": "Removes a path with force", "id": "f8774:c0:m3"}
{"signature": "def setUp(self):", "body": "pass<EOL>", "docstring": "Test set up\n\nThis is where you can setup things that you use throughout the tests. This method is called before every test.", "id": "f8776:c0:m0"}
{"signature": "def setup_logging(args):", "body": "handler = logging.StreamHandler()<EOL>handler.setLevel(args.log_level)<EOL>formatter = logging.Formatter(('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'))<EOL>handler.setFormatter(formatter)<EOL>LOGGER.addHandler(handler)<EOL>", "docstring": "This sets up the logging.\n\nNeeds the args to get the log level supplied\n:param args: The command line arguments", "id": "f8777:m1"}
{"signature": "def main():", "body": "args = get_arguments()<EOL>setup_logging(args)<EOL>version_path = os.path.abspath(os.path.join(<EOL>os.path.dirname(__file__),<EOL>'<STR_LIT:..>',<EOL>'<STR_LIT:..>',<EOL>'<STR_LIT>'<EOL>))<EOL>try:<EOL><INDENT>version_text = open(version_path).read().strip()<EOL><DEDENT>except Exception:<EOL><INDENT>print('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>try:<EOL><INDENT>semver.parse(version_text)<EOL><DEDENT>except ValueError:<EOL><INDENT>print(('<STR_LIT>'<EOL>'<STR_LIT>').format(version_text))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>new_version = version_text<EOL>if args.version:<EOL><INDENT>try:<EOL><INDENT>if semver.parse(args.version):<EOL><INDENT>new_version = args.version<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>print('<STR_LIT>'.format(args.version))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>elif args.bump_major:<EOL><INDENT>new_version = semver.bump_major(version_text)<EOL><DEDENT>elif args.bump_minor:<EOL><INDENT>new_version = semver.bump_minor(version_text)<EOL><DEDENT>elif args.bump_patch:<EOL><INDENT>new_version = semver.bump_patch(version_text)<EOL><DEDENT>try:<EOL><INDENT>with open(version_path, '<STR_LIT:w>') as version_file:<EOL><INDENT>version_file.write(new_version)<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>print('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>print(new_version)<EOL>", "docstring": "Main method.\n\nThis method holds what you want to execute when\nthe script is run on command line.", "id": "f8777:m2"}
{"signature": "def parse(version):", "body": "match = _REGEX.match(version)<EOL>if match is None:<EOL><INDENT>raise ValueError('<STR_LIT>' % version)<EOL><DEDENT>verinfo = match.groupdict()<EOL>verinfo['<STR_LIT>'] = int(verinfo['<STR_LIT>'])<EOL>verinfo['<STR_LIT>'] = int(verinfo['<STR_LIT>'])<EOL>verinfo['<STR_LIT>'] = int(verinfo['<STR_LIT>'])<EOL>return verinfo<EOL>", "docstring": "Parse version to major, minor, patch, pre-release, build parts.", "id": "f8779:m0"}
{"signature": "async def websocket_accept(self, message, stream_name):", "body": "is_first = not self.applications_accepting_frames<EOL>self.applications_accepting_frames.add(stream_name)<EOL>if is_first:<EOL><INDENT>await self.accept()<EOL><DEDENT>", "docstring": "Intercept downstream `websocket.accept` message and thus allow this upsteam application to accept websocket\nframes.", "id": "f8787:c0:m10"}
{"signature": "async def websocket_close(self, message, stream_name):", "body": "if stream_name in self.applications_accepting_frames:<EOL><INDENT>self.applications_accepting_frames.remove(stream_name)<EOL><DEDENT>if self.closing:<EOL><INDENT>return<EOL><DEDENT>if not self.applications_accepting_frames:<EOL><INDENT>await self.close(message.get(\"<STR_LIT:code>\"))<EOL><DEDENT>", "docstring": "Handle downstream `websocket.close` message.\n\nWill disconnect this upstream application from receiving any new frames.\n\nIf there are not more upstream applications accepting messages it will then call `close`.", "id": "f8787:c0:m11"}
{"signature": "async def _create_upstream_applications(self):", "body": "loop = asyncio.get_event_loop()<EOL>for steam_name, ApplicationsCls in self.applications.items():<EOL><INDENT>application = ApplicationsCls(self.scope)<EOL>upstream_queue = asyncio.Queue()<EOL>self.application_streams[steam_name] = upstream_queue<EOL>self.application_futures[steam_name] = loop.create_task(<EOL>application(<EOL>upstream_queue.get,<EOL>partial(self.dispatch_downstream, steam_name=steam_name)<EOL>)<EOL>)<EOL><DEDENT>", "docstring": "Create the upstream applications.", "id": "f8787:c0:m2"}
{"signature": "async def websocket_send(self, message, stream_name):", "body": "text = message.get(\"<STR_LIT:text>\")<EOL>json = await self.decode_json(text)<EOL>data = {<EOL>\"<STR_LIT>\": stream_name,<EOL>\"<STR_LIT>\": json<EOL>}<EOL>await self.send_json(data)<EOL>", "docstring": "Capture downstream websocket.send messages from the upstream applications.", "id": "f8787:c0:m9"}
{"signature": "async def websocket_disconnect(self, message):", "body": "<EOL>self.closing = True<EOL>await self.send_upstream(message)<EOL>await super().websocket_disconnect(message)<EOL>", "docstring": "Handle the disconnect message.\n\nThis is propagated to all upstream applications.", "id": "f8787:c0:m7"}
{"signature": "@time.setter<EOL><INDENT>def time(self, t):<DEDENT>", "body": "_time = arrow.get(t).format('<STR_LIT>')<EOL>self._time = datetime.datetime.strptime(_time, '<STR_LIT>')<EOL>", "docstring": "Convert any timestamp into a datetime and save as _time", "id": "f8792:c0:m7"}
{"signature": "def as_dict(self):", "body": "entry_dict = {}<EOL>entry_dict['<STR_LIT>'] = self.uuid<EOL>entry_dict['<STR_LIT>'] = self.time<EOL>entry_dict['<STR_LIT>'] = self.tz<EOL>if self.tags:<EOL><INDENT>entry_dict['<STR_LIT>'] = self.tags<EOL><DEDENT>entry_dict['<STR_LIT>'] = self.text<EOL>entry_dict['<STR_LIT>'] = self.starred<EOL>entry_dict['<STR_LIT>'] = self.location<EOL>return entry_dict<EOL>", "docstring": "Return a dict that represents the DayOneEntry", "id": "f8792:c0:m8"}
{"signature": "def add_tag(self, _tags):", "body": "if isinstance(_tags, list):<EOL><INDENT>for t in _tags:<EOL><INDENT>self.tags.append(t)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.tags.append(_tags)<EOL><DEDENT>", "docstring": "Add tag(s) to a DayOneEntry", "id": "f8792:c0:m3"}
{"signature": "def _file_path(self, uid):", "body": "file_name = '<STR_LIT>' % (uid)<EOL>return os.path.join(self.dayone_journal_path, file_name)<EOL>", "docstring": "Create and return full file path for DayOne entry", "id": "f8792:c1:m3"}
{"signature": "async def setup():", "body": "connection = await qtm.connect(\"<STR_LIT:127.0.0.1>\")<EOL>if connection is None:<EOL><INDENT>return<EOL><DEDENT>await connection.stream_frames(components=[\"<STR_LIT>\"], on_packet=on_packet)<EOL>", "docstring": "Main function", "id": "f8797:m1"}
{"signature": "def on_packet(packet):", "body": "print(\"<STR_LIT>\".format(packet.framenumber))<EOL>header, markers = packet.get_3d_markers()<EOL>print(\"<STR_LIT>\".format(header))<EOL>for marker in markers:<EOL><INDENT>print(\"<STR_LIT:\\t>\", marker)<EOL><DEDENT>", "docstring": "Callback function that is called everytime a data packet arrives from QTM", "id": "f8797:m0"}
{"signature": "async def package_receiver(queue):", "body": "LOG.info(\"<STR_LIT>\")<EOL>while True:<EOL><INDENT>packet = await queue.get()<EOL>if packet is None:<EOL><INDENT>break<EOL><DEDENT>LOG.info(\"<STR_LIT>\", packet.framenumber)<EOL>header, cameras = packet.get_2d_markers()<EOL>LOG.info(\"<STR_LIT>\", header)<EOL>for i, camera in enumerate(cameras, <NUM_LIT:1>):<EOL><INDENT>LOG.info(\"<STR_LIT>\", i)<EOL>for marker in camera:<EOL><INDENT>LOG.info(\"<STR_LIT>\", marker)<EOL><DEDENT><DEDENT><DEDENT>LOG.info(\"<STR_LIT>\")<EOL>", "docstring": "Asynchronous function that processes queue until None is posted in queue", "id": "f8798:m0"}
{"signature": "def connection_made(self, transport):", "body": "self.transport = transport<EOL>sock = transport.get_extra_info(\"<STR_LIT>\")<EOL>self.port = sock.getsockname()[<NUM_LIT:1>]<EOL>", "docstring": "On socket creation", "id": "f8802:c0:m1"}
{"signature": "@ComponentGetter(QRTComponentType.ComponentAnalogSingle, RTAnalogComponent)<EOL><INDENT>def get_analog_single(<EOL>self, component_info=None, data=None, component_position=None<EOL>):<DEDENT>", "body": "components = []<EOL>append_components = components.append<EOL>for _ in range(component_info.device_count):<EOL><INDENT>component_position, device = QRTPacket._get_exact(<EOL>RTAnalogDeviceSingle, data, component_position<EOL>)<EOL>RTAnalogDeviceSamples.format = struct.Struct(<EOL>RTAnalogDeviceSamples.format_str % device.channel_count<EOL>)<EOL>component_position, sample = QRTPacket._get_tuple(<EOL>RTAnalogDeviceSamples, data, component_position<EOL>)<EOL>append_components((device, sample))<EOL><DEDENT>return components<EOL>", "docstring": "Get a single analog data channel.", "id": "f8803:c5:m6"}
{"signature": "@ComponentGetter(QRTComponentType.Component3dNoLabelsRes, RT3DComponent)<EOL><INDENT>def get_3d_markers_no_label_residual(<EOL>self, component_info=None, data=None, component_position=None<EOL>):<DEDENT>", "body": "return self._get_3d_markers(<EOL>RT3DMarkerPositionNoLabelResidual, component_info, data, component_position<EOL>)<EOL>", "docstring": "Get 3D markers without label with residual.", "id": "f8803:c5:m17"}
{"signature": "@ComponentGetter(QRTComponentType.Component6d, RT6DComponent)<EOL><INDENT>def get_6d(self, component_info=None, data=None, component_position=None):<DEDENT>", "body": "components = []<EOL>append_components = components.append<EOL>for _ in range(component_info.body_count):<EOL><INDENT>component_position, position = QRTPacket._get_exact(<EOL>RT6DBodyPosition, data, component_position<EOL>)<EOL>component_position, matrix = QRTPacket._get_tuple(<EOL>RT6DBodyRotation, data, component_position<EOL>)<EOL>append_components((position, matrix))<EOL><DEDENT>return components<EOL>", "docstring": "Get 6D data.", "id": "f8803:c5:m9"}
{"signature": "@ComponentGetter(QRTComponentType.Component3d, RT3DComponent)<EOL><INDENT>def get_3d_markers(self, component_info=None, data=None, component_position=None):<DEDENT>", "body": "return self._get_3d_markers(<EOL>RT3DMarkerPosition, component_info, data, component_position<EOL>)<EOL>", "docstring": "Get 3D markers.", "id": "f8803:c5:m14"}
{"signature": "@ComponentGetter(QRTComponentType.Component6dEuler, RT6DComponent)<EOL><INDENT>def get_6d_euler(self, component_info=None, data=None, component_position=None):<DEDENT>", "body": "components = []<EOL>append_components = components.append<EOL>for _ in range(component_info.body_count):<EOL><INDENT>component_position, position = QRTPacket._get_exact(<EOL>RT6DBodyPosition, data, component_position<EOL>)<EOL>component_position, euler = QRTPacket._get_exact(<EOL>RT6DBodyEuler, data, component_position<EOL>)<EOL>append_components((position, euler))<EOL><DEDENT>return components<EOL>", "docstring": "Get 6D data with euler rotations.", "id": "f8803:c5:m11"}
{"signature": "@ComponentGetter(QRTComponentType.ComponentForce, RTForceComponent)<EOL><INDENT>def get_force(self, component_info=None, data=None, component_position=None):<DEDENT>", "body": "components = []<EOL>append_components = components.append<EOL>for _ in range(component_info.plate_count):<EOL><INDENT>component_position, plate = QRTPacket._get_exact(<EOL>RTForcePlate, data, component_position<EOL>)<EOL>force_list = []<EOL>for _ in range(plate.force_count):<EOL><INDENT>component_position, force = QRTPacket._get_exact(<EOL>RTForce, data, component_position<EOL>)<EOL>force_list.append(force)<EOL><DEDENT>append_components((plate, force_list))<EOL><DEDENT>return components<EOL>", "docstring": "Get force data.", "id": "f8803:c5:m7"}
{"signature": "@ComponentGetter(QRTComponentType.ComponentForceSingle, RTForceComponent)<EOL><INDENT>def get_force_single(self, component_info=None, data=None, component_position=None):<DEDENT>", "body": "components = []<EOL>append_components = components.append<EOL>for _ in range(component_info.plate_count):<EOL><INDENT>component_position, plate = QRTPacket._get_exact(<EOL>RTForcePlateSingle, data, component_position<EOL>)<EOL>component_position, force = QRTPacket._get_exact(<EOL>RTForce, data, component_position<EOL>)<EOL>append_components((plate, force))<EOL><DEDENT>return components<EOL>", "docstring": "Get a single force data channel.", "id": "f8803:c5:m8"}
{"signature": "@ComponentGetter(QRTComponentType.Component2d, RT2DComponent)<EOL><INDENT>def get_2d_markers(<EOL>self, component_info=None, data=None, component_position=None, index=None<EOL>):<DEDENT>", "body": "return self._get_2d_markers(<EOL>data, component_info, component_position, index=index<EOL>)<EOL>", "docstring": "Get 2D markers.\n\n        :param index: Specify which camera to get 2D from, will be returned as\n                      first entry in the returned array.", "id": "f8803:c5:m18"}
{"signature": "def data_received(self, data):", "body": "self._received_data += data<EOL>h_size = RTheader.size<EOL>data = self._received_data<EOL>size, type_ = RTheader.unpack_from(data, <NUM_LIT:0>)<EOL>while len(data) >= size:<EOL><INDENT>self._parse_received(data[h_size:size], type_)<EOL>data = data[size:]<EOL>if len(data) < h_size:<EOL><INDENT>break<EOL><DEDENT>size, type_ = RTheader.unpack_from(data, <NUM_LIT:0>)<EOL><DEDENT>self._received_data = data<EOL>", "docstring": "Received from QTM and route accordingly", "id": "f8805:c0:m1"}
{"signature": "async def byte_order(self):", "body": "return await asyncio.wait_for(<EOL>self._protocol.send_command(\"<STR_LIT>\"), timeout=self._timeout<EOL>)<EOL>", "docstring": "Get the byte order used when communicating\n            (should only ever be little endian using this library).", "id": "f8806:c0:m4"}
{"signature": "@validate_response([b\"<STR_LIT>\"])<EOL><INDENT>async def load_project(self, project_path):<DEDENT>", "body": "cmd = \"<STR_LIT>\" % project_path<EOL>return await asyncio.wait_for(<EOL>self._protocol.send_command(cmd), timeout=self._timeout<EOL>)<EOL>", "docstring": "Load a project.\n\n        :param project_path: Path to project you want to load.", "id": "f8806:c0:m19"}
{"signature": "async def get_current_frame(self, components=None) -> QRTPacket:", "body": "if components is None:<EOL><INDENT>components = [\"<STR_LIT:all>\"]<EOL><DEDENT>else:<EOL><INDENT>_validate_components(components)<EOL><DEDENT>cmd = \"<STR_LIT>\" % \"<STR_LIT:U+0020>\".join(components)<EOL>return await asyncio.wait_for(<EOL>self._protocol.send_command(cmd), timeout=self._timeout<EOL>)<EOL>", "docstring": "Get measured values from QTM for a single frame.\n\n        :param components: A list of components to receive, could be 'all' or any combination of\n                '2d', '2dlin', '3d', '3dres', '3dnolabels',\n                '3dnolabelsres', 'force', 'forcesingle', '6d', '6dres',\n                '6deuler', '6deulerres', 'gazevector', 'image', 'timecode',\n                'skeleton', 'skeleton:global'\n\n        :rtype: A :class:`qtm.QRTPacket` containing requested components", "id": "f8806:c0:m8"}
{"signature": "async def get_state(self):", "body": "await self._protocol.send_command(\"<STR_LIT>\", callback=False)<EOL>return await self._protocol.await_event()<EOL>", "docstring": "Get the latest state change of QTM. If the :func:`~qtm.connect` on_event\n        callback was set the callback will be called as well.\n\n        :rtype: A :class:`qtm.QRTEvent`", "id": "f8806:c0:m5"}
{"signature": "async def stream_frames_stop(self):", "body": "self._protocol.set_on_packet(None)<EOL>cmd = \"<STR_LIT>\"<EOL>await self._protocol.send_command(cmd, callback=False)<EOL>", "docstring": "Stop streaming frames.", "id": "f8806:c0:m10"}
{"signature": "async def qtm_version(self):", "body": "return await asyncio.wait_for(<EOL>self._protocol.send_command(\"<STR_LIT>\"), timeout=self._timeout<EOL>)<EOL>", "docstring": "Get the QTM version.", "id": "f8806:c0:m3"}
{"signature": "@validate_response([b\"<STR_LIT>\"])<EOL><INDENT>async def stop(self):<DEDENT>", "body": "cmd = \"<STR_LIT>\"<EOL>return await asyncio.wait_for(<EOL>self._protocol.send_command(cmd), timeout=self._timeout<EOL>)<EOL>", "docstring": "Stop RT from file.", "id": "f8806:c0:m16"}
{"signature": "@validate_response([b\"<STR_LIT>\"])<EOL><INDENT>async def load(self, filename):<DEDENT>", "body": "cmd = \"<STR_LIT>\" % filename<EOL>return await asyncio.wait_for(<EOL>self._protocol.send_command(cmd), timeout=self._timeout<EOL>)<EOL>", "docstring": "Load a measurement.\n\n        :param filename: Path to measurement you want to load.", "id": "f8806:c0:m17"}
{"signature": "async def connect(<EOL>host,<EOL>port=<NUM_LIT>,<EOL>version=\"<STR_LIT>\",<EOL>on_event=None,<EOL>on_disconnect=None,<EOL>timeout=<NUM_LIT:5>,<EOL>loop=None,<EOL>) -> QRTConnection:", "body": "loop = loop or asyncio.get_event_loop()<EOL>try:<EOL><INDENT>_, protocol = await loop.create_connection(<EOL>lambda: QTMProtocol(<EOL>loop=loop, on_event=on_event, on_disconnect=on_disconnect<EOL>),<EOL>host,<EOL>port,<EOL>)<EOL><DEDENT>except (ConnectionRefusedError, TimeoutError, OSError) as exception:<EOL><INDENT>LOG.error(exception)<EOL>return None<EOL><DEDENT>try:<EOL><INDENT>await protocol.set_version(version)<EOL><DEDENT>except QRTCommandException as exception:<EOL><INDENT>LOG.error(Exception)<EOL>return None<EOL><DEDENT>except TypeError as exception:  <EOL><INDENT>LOG.error(exception)<EOL>return None<EOL><DEDENT>return QRTConnection(protocol, timeout=timeout)<EOL>", "docstring": "Async function to connect to QTM\n\n    :param host: Address of the computer running QTM.\n    :param port: Port number to connect to, should be the port configured for little endian.\n    :param version: What version of the protocol to use, tested for 1.17 and above but could\n        work with lower versions as well.\n    :param on_disconnect: Function to be called when a disconnect from QTM occurs.\n    :param on_event: Function to be called when there's an event from QTM.\n    :param timeout: The default timeout time for calls to QTM.\n    :param loop: Alternative event loop, will use asyncio default if None.\n\n    :rtype: A :class:`.QRTConnection`", "id": "f8806:m1"}
{"signature": "@validate_response([b\"<STR_LIT>\"])<EOL><INDENT>async def save(self, filename, overwrite=False):<DEDENT>", "body": "cmd = \"<STR_LIT>\" % (filename, \"<STR_LIT>\" if overwrite else \"<STR_LIT>\")<EOL>return await asyncio.wait_for(<EOL>self._protocol.send_command(cmd), timeout=self._timeout<EOL>)<EOL>", "docstring": "Save a measurement.\n\n        :param filename: Filename you wish to save as.\n        :param overwrite: If QTM should overwrite existing measurement.", "id": "f8806:c0:m18"}
{"signature": "@validate_response([b\"<STR_LIT>\"])<EOL><INDENT>async def release_control(self):<DEDENT>", "body": "cmd = \"<STR_LIT>\"<EOL>return await asyncio.wait_for(<EOL>self._protocol.send_command(cmd), timeout=self._timeout<EOL>)<EOL>", "docstring": "Release control of QTM.", "id": "f8806:c0:m12"}
{"signature": "@validate_response([b\"<STR_LIT>\"])<EOL><INDENT>async def take_control(self, password):<DEDENT>", "body": "cmd = \"<STR_LIT>\" % password<EOL>return await asyncio.wait_for(<EOL>self._protocol.send_command(cmd), timeout=self._timeout<EOL>)<EOL>", "docstring": "Take control of QTM.\n\n        :param password: Password as entered in QTM.", "id": "f8806:c0:m11"}
{"signature": "def validate_response(expected_responses):", "body": "def internal_decorator(function):<EOL><INDENT>@wraps(function)<EOL>async def wrapper(*args, **kwargs):<EOL><INDENT>response = await function(*args, **kwargs)<EOL>for expected_response in expected_responses:<EOL><INDENT>if response.startswith(expected_response):<EOL><INDENT>return response<EOL><DEDENT><DEDENT>raise QRTCommandException(<EOL>\"<STR_LIT>\" % (expected_responses, response)<EOL>)<EOL><DEDENT>return wrapper<EOL><DEDENT>return internal_decorator<EOL>", "docstring": "Decorator to validate responses from QTM", "id": "f8806:m0"}
{"signature": "async def await_event(self, event=None, timeout=None):", "body": "if self.event_future is not None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>result = await asyncio.wait_for(self._wait_loop(event), timeout)<EOL>return result<EOL>", "docstring": "Wait for any or specified event", "id": "f8807:c1:m3"}
{"signature": "async def set_version(self, version):", "body": "version_cmd = \"<STR_LIT>\" % version<EOL>await self.send_command(version_cmd)<EOL>", "docstring": "Set version of RT protocol used to communicate with QTM", "id": "f8807:c1:m1"}
{"signature": "async def reboot(ip_address):", "body": "_, protocol = await asyncio.get_event_loop().create_datagram_endpoint(<EOL>QRebootProtocol,<EOL>local_addr=(ip_address, <NUM_LIT:0>),<EOL>allow_broadcast=True,<EOL>reuse_address=True,<EOL>)<EOL>LOG.info(\"<STR_LIT>\", ip_address)<EOL>protocol.send_reboot()<EOL>", "docstring": "async function to reboot QTM cameras", "id": "f8808:m0"}
{"signature": "def send_reboot(self):", "body": "self.transport.sendto(b\"<STR_LIT>\", (\"<STR_LIT>\", DEFAULT_DISCOVERY_PORT))<EOL>", "docstring": "Sends reboot package broadcast", "id": "f8808:c0:m2"}
{"signature": "def get_view_function(app, url, method):", "body": "<EOL>adapter = app.create_url_adapter(request)<EOL>try:<EOL><INDENT>match = adapter.match(url, method=method)<EOL><DEDENT>except RequestRedirect as ex:<EOL><INDENT>return get_view_function(app, ex.new_url, method)<EOL><DEDENT>except (MethodNotAllowed, NotFound):<EOL><INDENT>return None<EOL><DEDENT>try:<EOL><INDENT>return app.view_functions[match[<NUM_LIT:0>]]<EOL><DEDENT>except KeyError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Match a url and return the view and arguments\n    it will be called with, or None if there is no view.\n    Creds: http://stackoverflow.com/a/38488506", "id": "f8812:m7"}
{"signature": "def get_default_tag(app):", "body": "view_func = get_view_function(app, request.path, request.method)<EOL>if view_func:<EOL><INDENT>return view_func.__name__<EOL><DEDENT>", "docstring": "Get the name of the view function used to prevent having to set the tag\n    manually for every endpoint", "id": "f8812:m6"}
{"signature": "@staticmethod<EOL><INDENT>def get_usage(_id):<DEDENT>", "body": "url = USAGE_URL % _id<EOL>arequest = requests.get(url, headers=HEADERS)<EOL>status_code = str(arequest.status_code)<EOL>if status_code == '<STR_LIT>':<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>try:<EOL><INDENT>return arequest.json()<EOL><DEDENT>except ValueError:<EOL><INDENT>_LOGGER.info(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>", "docstring": "Pull a water heater's usage report from the API.", "id": "f8818:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def get_modes(_id):<DEDENT>", "body": "url = MODES_URL % _id<EOL>arequest = requests.get(url, headers=HEADERS)<EOL>status_code = str(arequest.status_code)<EOL>if status_code == '<STR_LIT>':<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>return arequest.json()<EOL>", "docstring": "Pull a water heater's modes from the API.", "id": "f8818:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def create_vacation(body):<DEDENT>", "body": "arequest = requests.post(VACATIONS_URL, headers=HEADERS, data=json.dumps(body))<EOL>status_code = str(arequest.status_code)<EOL>if status_code != '<STR_LIT>':<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\" + status_code)<EOL>_LOGGER.error(arequest.json())<EOL>return False<EOL><DEDENT>return arequest.json()<EOL>", "docstring": "Create a vacation.", "id": "f8818:c0:m7"}
{"signature": "def get_water_heaters(self):", "body": "water_heaters = []<EOL>for location in self.locations:<EOL><INDENT>_location_id = location.get(\"<STR_LIT:id>\")<EOL>for device in location.get(\"<STR_LIT>\"):<EOL><INDENT>if device.get(\"<STR_LIT:type>\") == \"<STR_LIT>\":<EOL><INDENT>water_heater_modes = self.api_interface.get_modes(device.get(\"<STR_LIT:id>\"))<EOL>water_heater_usage = self.api_interface.get_usage(device.get(\"<STR_LIT:id>\"))<EOL>water_heater = self.api_interface.get_device(device.get(\"<STR_LIT:id>\"))<EOL>vacations = self.api_interface.get_vacations()<EOL>device_vacations = []<EOL>for vacation in vacations:<EOL><INDENT>for equipment in vacation.get(\"<STR_LIT>\"):<EOL><INDENT>if equipment.get(\"<STR_LIT:id>\") == water_heater.get(\"<STR_LIT:id>\"):<EOL><INDENT>device_vacations.append(EcoNetVacation(vacation, self.api_interface))<EOL><DEDENT><DEDENT><DEDENT>water_heaters.append(EcoNetWaterHeater(water_heater, water_heater_modes, water_heater_usage,<EOL>_location_id,<EOL>device_vacations,<EOL>self.api_interface))<EOL><DEDENT><DEDENT><DEDENT>return water_heaters<EOL>", "docstring": "Return a list of water heater devices.\n\nParses the response from the locations endpoint in to a pyeconet.WaterHeater.", "id": "f8818:c1:m1"}
{"signature": "def _authenticate(self):", "body": "auth_url = BASE_URL + \"<STR_LIT>\"<EOL>payload = {'<STR_LIT:username>': self.email, '<STR_LIT:password>': self.password, '<STR_LIT>': '<STR_LIT:password>'}<EOL>arequest = requests.post(auth_url, data=payload, headers=BASIC_HEADERS)<EOL>status = arequest.status_code<EOL>if status != <NUM_LIT:200>:<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\" + str(status))<EOL>return False<EOL><DEDENT>response = arequest.json()<EOL>_LOGGER.debug(str(response))<EOL>self.token = response.get(\"<STR_LIT>\")<EOL>self.refresh_token = response.get(\"<STR_LIT>\")<EOL>_auth = HEADERS.get(\"<STR_LIT>\")<EOL>_auth = _auth % self.token<EOL>HEADERS[\"<STR_LIT>\"] = _auth<EOL>_LOGGER.info(\"<STR_LIT>\")<EOL>return True<EOL>", "docstring": "Authenticate with the API and return an authentication token.", "id": "f8818:c0:m9"}
{"signature": "@staticmethod<EOL><INDENT>def get_locations():<DEDENT>", "body": "arequest = requests.get(LOCATIONS_URL, headers=HEADERS)<EOL>status_code = str(arequest.status_code)<EOL>if status_code == '<STR_LIT>':<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>return arequest.json()<EOL>", "docstring": "Pull the accounts locations.", "id": "f8818:c0:m5"}
{"signature": "@staticmethod<EOL><INDENT>def get_device(_id):<DEDENT>", "body": "url = DEVICE_URL % _id<EOL>arequest = requests.get(url, headers=HEADERS)<EOL>status_code = str(arequest.status_code)<EOL>if status_code == '<STR_LIT>':<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>return arequest.json()<EOL>", "docstring": "Pull a device from the API.", "id": "f8818:c0:m4"}
{"signature": "def run(self):", "body": "logger.info(u'<STR_LIT>')<EOL>while not self._stop:<EOL><INDENT>xml = self._readxml()<EOL>if xml is None:<EOL><INDENT>break<EOL><DEDENT>if not self.modelize:<EOL><INDENT>logger.info(u'<STR_LIT>' % xml)<EOL>self.results.put(xml)<EOL>continue<EOL><DEDENT>if xml.tag == '<STR_LIT>':<EOL><INDENT>sentence = Sentence.from_shypo(xml.find('<STR_LIT>'), self.encoding)<EOL>logger.info(u'<STR_LIT>' % sentence)<EOL>self.results.put(sentence)<EOL><DEDENT>else:<EOL><INDENT>logger.info(u'<STR_LIT>' % xml)<EOL>self.results.put(xml)<EOL><DEDENT><DEDENT>logger.info(u'<STR_LIT>')<EOL>", "docstring": "Start listening to the server", "id": "f8823:c0:m2"}
{"signature": "def _readxml(self):", "body": "block = re.sub(r'<STR_LIT>', r'<STR_LIT>', self._readblock())<EOL>try:<EOL><INDENT>xml = XML(block)<EOL><DEDENT>except ParseError:<EOL><INDENT>xml = None<EOL><DEDENT>return xml<EOL>", "docstring": "Read a block and return the result as XML\n\n        :return: block as xml\n        :rtype: xml.etree.ElementTree", "id": "f8823:c0:m8"}
{"signature": "def connect(self):", "body": "try:<EOL><INDENT>logger.info(u'<STR_LIT>' % (self.host, self.port))<EOL>self.sock.connect((self.host, self.port))<EOL><DEDENT>except socket.error:<EOL><INDENT>raise ConnectionError()<EOL><DEDENT>self.state = CONNECTED<EOL>", "docstring": "Connect to the server\n\n        :raise ConnectionError: If socket cannot establish a connection", "id": "f8823:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def from_shypo(cls, xml, encoding='<STR_LIT:utf-8>'):<DEDENT>", "body": "score = float(xml.get('<STR_LIT>'))<EOL>words = [Word.from_whypo(w_xml, encoding) for w_xml in xml.findall('<STR_LIT>') if w_xml.get('<STR_LIT>') not in ['<STR_LIT>', '<STR_LIT>']]<EOL>return cls(words, score)<EOL>", "docstring": "Constructor from xml element *SHYPO*\n\n        :param xml.etree.ElementTree xml: the xml *SHYPO* element\n        :param string encoding: encoding of the xml", "id": "f8824:c0:m1"}
{"signature": "def run(*args):", "body": "if not settings.configured:<EOL><INDENT>settings.configure(**DEFAULT_SETTINGS)<EOL><DEDENT>django.setup()<EOL>parent = os.path.dirname(os.path.abspath(__file__))<EOL>sys.path.insert(<NUM_LIT:0>, parent)<EOL>if \"<STR_LIT>\" in args:<EOL><INDENT>check_migrations()<EOL><DEDENT>else:<EOL><INDENT>django.core.management.call_command(\"<STR_LIT>\", APP_NAME, *args)<EOL><DEDENT>", "docstring": "Check and/or create Django migrations.\n\nIf --check is present in the arguments then migrations are checked only.", "id": "f8828:m1"}
{"signature": "def execute(self):", "body": "<EOL>self.executed_at = now()<EOL>self.save()<EOL>with transaction.atomic():<EOL><INDENT>ret = BillingAgreement.execute(self.id)<EOL>ret.user = self.user<EOL>ret.save()<EOL>self.executed_agreement = ret<EOL>self.save()<EOL><DEDENT>return ret<EOL>", "docstring": "Execute the PreparedBillingAgreement by creating and executing a\nmatching BillingAgreement.", "id": "f8848:c1:m5"}
{"signature": "def get_version():", "body": "from . import __version__<EOL>return __version__<EOL>", "docstring": "Returns the current dj-paypal version", "id": "f8862:m2"}
{"signature": "def fix_django_headers(meta):", "body": "ret = {}<EOL>for k, v in meta.items():<EOL><INDENT>if k.startswith(\"<STR_LIT>\"):<EOL><INDENT>k = k[len(\"<STR_LIT>\"):]<EOL><DEDENT>elif k not in (\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>continue<EOL><DEDENT>ret[k.lower().replace(\"<STR_LIT:_>\", \"<STR_LIT:->\")] = v<EOL><DEDENT>return ret<EOL>", "docstring": "Fix this nonsensical API:\nhttps://docs.djangoproject.com/en/1.11/ref/request-response/\nhttps://code.djangoproject.com/ticket/20147", "id": "f8862:m0"}
{"signature": "def __register_library(self, module_name: str, attr: str, fallback: str = None):", "body": "<EOL>try:<EOL><INDENT>module = importlib.import_module(module_name)<EOL><DEDENT>except ImportError:<EOL><INDENT>if fallback is not None:<EOL><INDENT>module = importlib.import_module(fallback)<EOL>self.__logger.warn(module_name + \"<STR_LIT>\" + fallback)<EOL><DEDENT>else:<EOL><INDENT>self.__logger.warn(module_name + \"<STR_LIT>\")<EOL><DEDENT><DEDENT>if not attr in dir(self.__sketch):<EOL><INDENT>setattr(self.__sketch, attr, module)<EOL><DEDENT>else:<EOL><INDENT>self.__logger.warn(attr +\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Inserts Interpreter Library of imports into sketch in a very non-consensual way", "id": "f8869:c0:m1"}
{"signature": "def _expand_str(path_cfg, alias_dict, overriding_kargs):", "body": "if path_cfg in alias_dict:<EOL><INDENT>return _expand_str_alias(path_cfg, alias_dict, overriding_kargs)<EOL><DEDENT>return _expand_for_lambda_str(path_cfg, alias_dict, overriding_kargs)<EOL>", "docstring": "expand a path config given as a string", "id": "f9023:m1"}
{"signature": "def expand_path_cfg(path_cfg, alias_dict={ }, overriding_kargs={ }):", "body": "if isinstance(path_cfg, str):<EOL><INDENT>return _expand_str(path_cfg, alias_dict, overriding_kargs)<EOL><DEDENT>if isinstance(path_cfg, dict):<EOL><INDENT>return _expand_dict(path_cfg, alias_dict)<EOL><DEDENT>return _expand_tuple(path_cfg, alias_dict, overriding_kargs)<EOL>", "docstring": "expand a path config\n\n    Args:\n        path_cfg (str, tuple, dict): a config for path\n        alias_dict (dict): a dict for aliases\n        overriding_kargs (dict): to be used for recursive call", "id": "f9023:m0"}
{"signature": "def _expand_tuple(path_cfg, alias_dict, overriding_kargs):", "body": "<EOL>new_path_cfg = path_cfg[<NUM_LIT:0>]<EOL>new_overriding_kargs = path_cfg[<NUM_LIT:1>].copy()<EOL>new_overriding_kargs.update(overriding_kargs)<EOL>return expand_path_cfg(<EOL>new_path_cfg,<EOL>overriding_kargs=new_overriding_kargs,<EOL>alias_dict=alias_dict<EOL>)<EOL>", "docstring": "expand a path config given as a tuple", "id": "f9023:m4"}
{"signature": "def package_fullpath(self, package_index):", "body": "ret = os.path.join(self.path, self.package_relpath(package_index))<EOL>return ret<EOL>", "docstring": "Returns the full path of the package\n\n        This method returns the full path to the package. This method\n        simply constructs the path based on the convention and doesn't\n        check if the package actually exists.\n\n        Parameters\n        ----------\n        package_index :\n            a package index\n\n        Returns\n        -------\n        str\n            the full path to the package", "id": "f9046:c0:m12"}
{"signature": "def result_relpath(self, package_index):", "body": "dirname = '<STR_LIT>'.format(package_index)<EOL>ret = os.path.join('<STR_LIT>', dirname, '<STR_LIT>')<EOL>return ret<EOL>", "docstring": "Returns the relative path of the result\n\n        This method returns the path to the result relative to the\n        top dir of the working area. This method simply constructs the\n        path based on the convention and doesn't check if the result\n        actually exists.\n\n        Parameters\n        ----------\n        package_index :\n            a package index\n\n        Returns\n        -------\n        str\n            the relative path to the result", "id": "f9046:c0:m13"}
{"signature": "def package_relpath(self, package_index):", "body": "ret = '<STR_LIT>'.format(package_index)<EOL>return ret<EOL>", "docstring": "Returns the relative path of the package\n\n        This method returns the path to the package relative to the\n        top dir of the working area. This method simply constructs the\n        path based on the convention and doesn't check if the package\n        actually exists.\n\n        Parameters\n        ----------\n        package_index :\n            a package index\n\n        Returns\n        -------\n        str\n            the relative path to the package", "id": "f9046:c0:m10"}
{"signature": "def collect_result(self, package_index):", "body": "result_fullpath = self.result_fullpath(package_index)<EOL>try:<EOL><INDENT>with gzip.open(result_fullpath, '<STR_LIT:rb>') as f:<EOL><INDENT>result = pickle.load(f)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>logger = logging.getLogger(__name__)<EOL>logger.warning(e)<EOL>return None<EOL><DEDENT>return result<EOL>", "docstring": "Collect the result of a task\n\n        Parameters\n        ----------\n        package_index :\n            a package index\n\n        Returns\n        -------\n        obj\n            The result of the task", "id": "f9046:c0:m9"}
{"signature": "def failed_runids(self, runids):", "body": "<EOL>for i in runids:<EOL><INDENT>try:<EOL><INDENT>self.clusterprocids_finished.remove(i)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "Provide the run IDs of failed jobs\n\n\n        Returns\n        -------\n        None", "id": "f9050:c0:m6"}
{"signature": "def open(self):", "body": "self.workingArea.open()<EOL>self.runid_pkgidx_map = { }<EOL>self.runid_to_return = deque()<EOL>", "docstring": "open the drop box\n\n        You need to call this method before starting putting packages.\n\n        Returns\n        -------\n        None", "id": "f9052:c0:m2"}
{"signature": "def receive(self):", "body": "ret = [ ] <EOL>while True:<EOL><INDENT>if self.runid_pkgidx_map:<EOL><INDENT>self.runid_to_return.extend(self.dispatcher.poll())<EOL>ret.extend(self._collect_all_finished_pkgidx_result_pairs())<EOL><DEDENT>if not self.runid_pkgidx_map:<EOL><INDENT>break<EOL><DEDENT>time.sleep(self.sleep)<EOL><DEDENT>ret = sorted(ret, key=itemgetter(<NUM_LIT:0>))<EOL>return ret<EOL>", "docstring": "return pairs of package indices and results of all tasks\n\n        This method waits until all tasks finish.\n\n        Returns\n        -------\n        list\n            A list of pairs of package indices and results", "id": "f9052:c0:m7"}
{"signature": "def put_multiple(self, packages):", "body": "pkgidxs = [self.workingArea.put_package(p) for p in packages]<EOL>logger = logging.getLogger(__name__)<EOL>logger.info('<STR_LIT>'.format(<EOL>'<STR_LIT:U+002CU+0020>'.join(['<STR_LIT:{}>'.format(self.workingArea.package_relpath(i)) for i in pkgidxs])<EOL>))<EOL>runids = self.dispatcher.run_multiple(self.workingArea, pkgidxs)<EOL>self.runid_pkgidx_map.update(zip(runids, pkgidxs))<EOL>return pkgidxs<EOL>", "docstring": "put tasks\n\n        This method places multiple tasks in the working area and have\n        the dispatcher execute them.\n\n        Parameters\n        ----------\n        packages : list(callable)\n            A list of tasks\n\n        Returns\n        -------\n        list(int)\n            Package indices assigned by the working area", "id": "f9052:c0:m6"}
{"signature": "def terminate(self):", "body": "self.dispatcher.terminate()<EOL>", "docstring": "terminate the drop box\n\n        Returns\n        -------\n        None", "id": "f9052:c0:m3"}
{"signature": "def begin(self):", "body": "if self.isopen: return<EOL>self.dropbox.open()<EOL>self.isopen = True<EOL>", "docstring": "begin", "id": "f9057:c1:m2"}
{"signature": "def terminate(self):", "body": "self.dropbox.terminate()<EOL>", "docstring": "terminate", "id": "f9057:c1:m9"}
{"signature": "def receive_finished(self):", "body": "if not self.isopen:<EOL><INDENT>logger = logging.getLogger(__name__)<EOL>logger.warning('<STR_LIT>')<EOL>return<EOL><DEDENT>return self.dropbox.poll()<EOL>", "docstring": "return a list of pairs of IDs and results of finished tasks.\n\n        This method doesn't wait for tasks to finish. It returns IDs\n        and results which have already finished.\n\n        Returns\n        -------\n        list\n            A list of pairs of IDs and results", "id": "f9057:c1:m5"}
{"signature": "def put_multiple(self, task_args_kwargs_list):", "body": "if not self.isopen:<EOL><INDENT>logger = logging.getLogger(__name__)<EOL>logger.warning('<STR_LIT>')<EOL>return<EOL><DEDENT>packages = [ ]<EOL>for t in task_args_kwargs_list:<EOL><INDENT>try:<EOL><INDENT>task = t['<STR_LIT>']<EOL>args = t.get('<STR_LIT:args>', ())<EOL>kwargs = t.get('<STR_LIT>', {})<EOL>package = TaskPackage(task=task, args=args, kwargs=kwargs)<EOL><DEDENT>except TypeError:<EOL><INDENT>package = TaskPackage(task=t, args=(), kwargs={})<EOL><DEDENT>packages.append(package)<EOL><DEDENT>return self.dropbox.put_multiple(packages)<EOL>", "docstring": "put a list of tasks and their arguments\n\n        This method can be used to put multiple tasks at once. Calling\n        this method once with multiple tasks can be much faster than\n        calling `put()` multiple times.\n\n        Parameters\n        ----------\n        task_args_kwargs_list : list\n\n            A list of lists with three items that can be parameters of\n            `put()`, i.e., `task`, `args`, `kwargs`.\n\n        Returns\n        -------\n        list\n            A list of task IDs.", "id": "f9057:c1:m4"}
{"signature": "def receive(self):", "body": "pkgidx_result_pairs = self.receive_all()<EOL>if pkgidx_result_pairs is None:<EOL><INDENT>return<EOL><DEDENT>results = [r for _, r in pkgidx_result_pairs]<EOL>return results<EOL>", "docstring": "return a list results of all tasks.\n\n        This method waits for all tasks to finish.\n\n        Returns\n        -------\n        list\n            A list of results of the tasks. The results are sorted in\n            the order in which the tasks are put.", "id": "f9057:c1:m8"}
{"signature": "def poll(self):", "body": "finished_procs = [p for p in self.running_procs if p.poll() is not None]<EOL>self.running_procs = collections.deque([p for p in self.running_procs if p not in finished_procs])<EOL>for proc in finished_procs:<EOL><INDENT>stdout, stderr = proc.communicate()<EOL><DEDENT>finished_pids = [p.pid for p in finished_procs]<EOL>self.finished_pids.extend(finished_pids)<EOL>logger = logging.getLogger(__name__)<EOL>messages = '<STR_LIT>'.format(len(self.running_procs), len(self.finished_pids))<EOL>logger.info(messages)<EOL>return finished_pids<EOL>", "docstring": "check if the jobs are running and return a list of pids for\n        finished jobs", "id": "f9059:c0:m4"}
{"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n    Exceptions:\n    1: no tags. 0.post.devDISTANCE", "id": "f9060:m10"}
{"signature": "def render_git_describe(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n    Like 'git describe --tags --dirty --always'.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f9060:m13"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_get_keywords(versionfile_abs):", "body": "<EOL>keywords = {}<EOL>try:<EOL><INDENT>f = open(versionfile_abs, \"<STR_LIT:r>\")<EOL>for line in f.readlines():<EOL><INDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT:date>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>f.close()<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>pass<EOL><DEDENT>return keywords<EOL>", "docstring": "Extract version information from the given file.", "id": "f9060:m5"}
{"signature": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,<EOL>env=None):", "body": "assert isinstance(commands, list)<EOL>p = None<EOL>for c in commands:<EOL><INDENT>try:<EOL><INDENT>dispcmd = str([c] + args)<EOL>p = subprocess.Popen([c] + args, cwd=cwd, env=env,<EOL>stdout=subprocess.PIPE,<EOL>stderr=(subprocess.PIPE if hide_stderr<EOL>else None))<EOL>break<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>e = sys.exc_info()[<NUM_LIT:1>]<EOL>if e.errno == errno.ENOENT:<EOL><INDENT>continue<EOL><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % dispcmd)<EOL>print(e)<EOL><DEDENT>return None, None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % (commands,))<EOL><DEDENT>return None, None<EOL><DEDENT>stdout = p.communicate()[<NUM_LIT:0>].strip()<EOL>if sys.version_info[<NUM_LIT:0>] >= <NUM_LIT:3>:<EOL><INDENT>stdout = stdout.decode()<EOL><DEDENT>if p.returncode != <NUM_LIT:0>:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % dispcmd)<EOL>print(\"<STR_LIT>\" % stdout)<EOL><DEDENT>return None, p.returncode<EOL><DEDENT>return stdout, p.returncode<EOL>", "docstring": "Call the given command(s).", "id": "f9060:m3"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>date = keywords.get(\"<STR_LIT:date>\")<EOL>if date is not None:<EOL><INDENT>date = date.strip().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:T>\", <NUM_LIT:1>).replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\", <NUM_LIT:1>)<EOL><DEDENT>refnames = keywords[\"<STR_LIT>\"].strip()<EOL>if refnames.startswith(\"<STR_LIT>\"):<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>refs = set([r.strip() for r in refnames.strip(\"<STR_LIT>\").split(\"<STR_LIT:U+002C>\")])<EOL>TAG = \"<STR_LIT>\"<EOL>tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])<EOL>if not tags:<EOL><INDENT>tags = set([r for r in refs if re.search(r'<STR_LIT>', r)])<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(refs - tags))<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(sorted(tags)))<EOL><DEDENT>for ref in sorted(tags):<EOL><INDENT>if ref.startswith(tag_prefix):<EOL><INDENT>r = ref[len(tag_prefix):]<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % r)<EOL><DEDENT>return {\"<STR_LIT:version>\": r,<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": date}<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": \"<STR_LIT>\", \"<STR_LIT:date>\": None}<EOL>", "docstring": "Get version information from git keywords.", "id": "f9060:m6"}
{"signature": "def add(self, collector):", "body": "self.components.append(collector)<EOL>", "docstring": "add a collector\n\n\n        Args:\n            collector: the collector to be added", "id": "f9108:c0:m2"}
{"signature": "def begin(self):", "body": "pass<EOL>", "docstring": "does nothing.\n\n        Older versions of this class had implementations.", "id": "f9110:c0:m2"}
{"signature": "def receive(self):", "body": "ret = self.communicationChannel.receive_all()<EOL>self.nruns -= len(ret)<EOL>if self.nruns > <NUM_LIT:0>:<EOL><INDENT>import logging<EOL>logger = logging.getLogger(__name__)<EOL>logger.warning(<EOL>'<STR_LIT>'.format(<EOL>len(ret), self.nruns))<EOL><DEDENT>elif self.nruns < <NUM_LIT:0>:<EOL><INDENT>import logging<EOL>logger = logging.getLogger(__name__)<EOL>logger.warning(<EOL>'<STR_LIT>'.format(<EOL>len(ret), -self.nruns))<EOL><DEDENT>return ret<EOL>", "docstring": "Return pairs of run ids and results.\n\n        This method waits until all event loops finish", "id": "f9110:c0:m7"}
{"signature": "def run_multiple(self, eventLoops):", "body": "self.nruns += len(eventLoops)<EOL>return self.communicationChannel.put_multiple(eventLoops)<EOL>", "docstring": "run the event loops in the background.\n\n        Args:\n            eventLoops (list): a list of event loops to run", "id": "f9110:c0:m4"}
{"signature": "def end(self):", "body": "results = self.communicationChannel.receive()<EOL>if self.nruns != len(results):<EOL><INDENT>import logging<EOL>logger = logging.getLogger(__name__)<EOL>logger.warning(<EOL>'<STR_LIT>'.format(<EOL>len(results),<EOL>self.nruns<EOL>))<EOL><DEDENT>return results<EOL>", "docstring": "wait until all event loops end and returns the results.", "id": "f9110:c0:m8"}
{"signature": "def getVector(self, tree, branchName):", "body": "if (tree, branchName) in self.__class__.addressDict:<EOL><INDENT>return self.__class__.addressDict[(tree, branchName)]<EOL><DEDENT>itsVector = self._getVector(tree, branchName)<EOL>self.__class__.addressDict[(tree, branchName)] = itsVector<EOL>return itsVector<EOL>", "docstring": "return the ROOT.vector object for the branch.", "id": "f9112:c0:m0"}
{"signature": "def setUp(self):", "body": "os.chdir(os.path.dirname(__file__))<EOL>", "docstring": "Change directory to tests/ before any unit test.", "id": "f9134:c0:m0"}
{"signature": "def setUp(self):", "body": "os.chdir(os.path.dirname(__file__))<EOL>", "docstring": "Change directory to tests/ before any unit test.", "id": "f9137:c0:m0"}
{"signature": "def _validate_message_contents(self, message):", "body": "self.assertTrue(message.is_multipart())<EOL>payload = message.get_payload()<EOL>self.assertEqual(len(payload), <NUM_LIT:2>)<EOL>plaintext_contenttype = payload[<NUM_LIT:0>]['<STR_LIT:Content-Type>']<EOL>self.assertTrue(plaintext_contenttype.startswith(\"<STR_LIT>\"))<EOL>plaintext = payload[<NUM_LIT:0>].get_payload()<EOL>html_contenttype = payload[<NUM_LIT:1>]['<STR_LIT:Content-Type>']<EOL>self.assertTrue(html_contenttype.startswith(\"<STR_LIT>\"))<EOL>htmltext = payload[<NUM_LIT:1>].get_payload()<EOL>converted_html = markdown.markdown(plaintext)<EOL>self.assertEqual(\"<STR_LIT>\".format(converted_html),<EOL>htmltext.strip())<EOL>", "docstring": "Validate the contents and attachments of the message.", "id": "f9141:c0:m0"}
{"signature": "def close(self):", "body": "", "docstring": "Do nothing.", "id": "f9142:c0:m2"}
{"signature": "def sendmail(self, msg_from, msg_to, msg):", "body": "SMTP_dummy.msg_from = msg_from<EOL>SMTP_dummy.msg_to = msg_to<EOL>SMTP_dummy.msg = msg<EOL>", "docstring": "Remember the recipients.", "id": "f9142:c0:m1"}
{"signature": "@click.command(context_settings={\"<STR_LIT>\": ['<STR_LIT>', '<STR_LIT>']})<EOL>@click.version_option()  <EOL>@click.option(\"<STR_LIT>\", is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", default=True,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", is_flag=False, default=<NUM_LIT:1>,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>default=mailmerge.api.DATABASE_FILENAME_DEFAULT,<EOL>help=\"<STR_LIT>\" +<EOL>mailmerge.api.DATABASE_FILENAME_DEFAULT)<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>default=mailmerge.api.TEMPLATE_FILENAME_DEFAULT,<EOL>help=\"<STR_LIT>\" +<EOL>mailmerge.api.TEMPLATE_FILENAME_DEFAULT)<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>default=mailmerge.api.CONFIG_FILENAME_DEFAULT,<EOL>help=\"<STR_LIT>\" +<EOL>mailmerge.api.CONFIG_FILENAME_DEFAULT)<EOL>def cli(sample, dry_run, limit, no_limit,<EOL>database_filename, template_filename, config_filename):", "body": "<EOL>mailmerge.api.main(<EOL>sample=sample,<EOL>dry_run=dry_run,<EOL>limit=limit,<EOL>no_limit=no_limit,<EOL>database_filename=database_filename,<EOL>template_filename=template_filename,<EOL>config_filename=config_filename,<EOL>)<EOL>", "docstring": "Command line interface.", "id": "f9143:m0"}
{"signature": "def make_message_multipart(message):", "body": "if not message.is_multipart():<EOL><INDENT>multipart_message = email.mime.multipart.MIMEMultipart('<STR_LIT>')<EOL>for header_key in set(message.keys()):<EOL><INDENT>values = message.get_all(header_key, failobj=[])<EOL>for value in values:<EOL><INDENT>multipart_message[header_key] = value<EOL><DEDENT><DEDENT>original_text = message.get_payload()<EOL>multipart_message.attach(email.mime.text.MIMEText(original_text))<EOL>message = multipart_message<EOL><DEDENT>message = _create_boundary(message)<EOL>return message<EOL>", "docstring": "Convert a message into a multipart message.", "id": "f9145:m2"}
{"signature": "def convert_markdown(message):", "body": "assert message['<STR_LIT:Content-Type>'].startswith(\"<STR_LIT>\")<EOL>del message['<STR_LIT:Content-Type>']<EOL>message = make_message_multipart(message)<EOL>for payload_item in set(message.get_payload()):<EOL><INDENT>if payload_item['<STR_LIT:Content-Type>'].startswith('<STR_LIT>'):<EOL><INDENT>original_text = payload_item.get_payload()<EOL>html_text = markdown.markdown(original_text)<EOL>html_payload = future.backports.email.mime.text.MIMEText(<EOL>\"<STR_LIT>\".format(html_text),<EOL>\"<STR_LIT:html>\",<EOL>)<EOL>message.attach(html_payload)<EOL><DEDENT><DEDENT>return message<EOL>", "docstring": "Convert markdown in message text to HTML.", "id": "f9145:m3"}
{"signature": "def get_rate(self, zipcode, city=None, state=None, multiple_rates=False):", "body": "data = self.make_request_data(zipcode, city, state)<EOL>r = requests.get(self.url, params=data)<EOL>resp = r.json()<EOL>return self.process_response(resp, multiple_rates)<EOL>", "docstring": "Finds sales tax for given info.\nReturns Decimal of the tax rate, e.g. 8.750.", "id": "f9151:c0:m1"}
{"signature": "def _check_for_exceptions(self, resp, multiple_rates):", "body": "if resp['<STR_LIT>'] != <NUM_LIT:100>:<EOL><INDENT>raise exceptions.get_exception_for_code(resp['<STR_LIT>'])(resp)<EOL><DEDENT>results = resp['<STR_LIT>']<EOL>if len(results) == <NUM_LIT:0>:<EOL><INDENT>raise exceptions.ZipTaxNoResults('<STR_LIT>')<EOL><DEDENT>if len(results) > <NUM_LIT:1> and not multiple_rates:<EOL><INDENT>rates = [result['<STR_LIT>'] for result in results]<EOL>if len(set(rates)) != <NUM_LIT:1>:<EOL><INDENT>raise exceptions.ZipTaxMultipleResults('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Check if there are exceptions that should be raised", "id": "f9151:c0:m5"}
{"signature": "def make_request_data(self, zipcode, city, state):", "body": "data = {'<STR_LIT:key>': self.api_key,<EOL>'<STR_LIT>': str(zipcode),<EOL>'<STR_LIT>': city,<EOL>'<STR_LIT:state>': state<EOL>}<EOL>data = ZipTaxClient._clean_request_data(data)<EOL>return data<EOL>", "docstring": "Make the request params given location data", "id": "f9151:c0:m2"}
{"signature": "def registerGoodClass(self, class_):", "body": "<EOL>self._valid_classes.append(class_)<EOL>for name, cls in class_members(class_):<EOL><INDENT>if self.isValidClass(cls):<EOL><INDENT>self.registerGoodClass(cls)<EOL><DEDENT><DEDENT>", "docstring": "Internal bookkeeping to handle nested classes", "id": "f9163:c0:m5"}
{"signature": "def _assert_contains(haystack, needle, invert, escape=False):", "body": "myneedle = re.escape(needle) if escape else needle<EOL>matched = re.search(myneedle, haystack, re.M)<EOL>if (invert and matched) or (not invert and not matched):<EOL><INDENT>raise AssertionError(\"<STR_LIT>\" % (<EOL>needle,<EOL>\"<STR_LIT>\" if invert else \"<STR_LIT>\",<EOL>haystack<EOL>))<EOL><DEDENT>", "docstring": "Test for existence of ``needle`` regex within ``haystack``.\n\nSay ``escape`` to escape the ``needle`` if you aren't really using the\nregex feature & have special characters in it.", "id": "f9166:m3"}
{"signature": "def autohide(obj):", "body": "<EOL>for name, item in six.iteritems(vars(obj)):<EOL><INDENT>if callable(item) and name in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>item = hide(item)<EOL><DEDENT><DEDENT>for name, subclass in class_members(obj):<EOL><INDENT>autohide(subclass)<EOL><DEDENT>", "docstring": "Automatically hide setup() and teardown() methods, recursively.", "id": "f9167:m5"}
{"signature": "def flag_inner_classes(obj):", "body": "for tup in class_members(obj):<EOL><INDENT>tup[<NUM_LIT:1>]._parent = obj<EOL>tup[<NUM_LIT:1>]._parent_inst = None<EOL>tup[<NUM_LIT:1>].__getattr__ = my_getattr<EOL>flag_inner_classes(tup[<NUM_LIT:1>])<EOL><DEDENT>", "docstring": "Mutates any attributes on ``obj`` which are classes, with link to ``obj``.\n\nAdds a convenience accessor which instantiates ``obj`` and then calls its\n``setup`` method.\n\nRecurses on those objects as well.", "id": "f9167:m4"}
{"signature": "def remove_leading(needle, haystack):", "body": "if haystack[:len(needle)] == needle:<EOL><INDENT>return haystack[len(needle):]<EOL><DEDENT>return haystack<EOL>", "docstring": "Remove leading needle string (if exists).\n\n    >>> remove_leading('Test', 'TestThisAndThat')\n    'ThisAndThat'\n    >>> remove_leading('Test', 'ArbitraryName')\n    'ArbitraryName'", "id": "f9168:m1"}
{"signature": "def format_seconds(self, n_seconds):", "body": "func = self.ok<EOL>if n_seconds >= <NUM_LIT>:<EOL><INDENT>n_minutes, n_seconds = divmod(n_seconds, <NUM_LIT>)<EOL>return \"<STR_LIT>\" % (<EOL>func(\"<STR_LIT>\" % n_minutes),<EOL>func(\"<STR_LIT>\" % n_seconds))<EOL><DEDENT>else:<EOL><INDENT>return \"<STR_LIT>\" % (<EOL>func(\"<STR_LIT>\" % n_seconds))<EOL><DEDENT>", "docstring": "Format a time in seconds.", "id": "f9168:c2:m14"}
{"signature": "def complete_english(string):", "body": "for x, y in [(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>(\"<STR_LIT>\", \"<STR_LIT>\")]:<EOL><INDENT>string = string.replace(x, y)<EOL><DEDENT>return string<EOL>", "docstring": ">>> complete_english('dont do this')\n\"don't do this\"\n>>> complete_english('doesnt is matched as well')\n\"doesn't is matched as well\"", "id": "f9168:m5"}
{"signature": "def camel2word(string):", "body": "def wordize(match):<EOL><INDENT>return '<STR_LIT:U+0020>' + match.group(<NUM_LIT:1>).lower()<EOL><DEDENT>return string[<NUM_LIT:0>] + re.sub(r'<STR_LIT>', wordize, string[<NUM_LIT:1>:])<EOL>", "docstring": "Covert name from CamelCase to \"Normal case\".\n\n    >>> camel2word('CamelCase')\n    'Camel case'\n    >>> camel2word('CaseWithSpec')\n    'Case with spec'", "id": "f9168:m4"}
{"signature": "def __init__(self, key, operator=None, required=True, scope=None, iterate=False):", "body": "self.key = key<EOL>self.operator = operator<EOL>self.required = required<EOL>self.scope = scope<EOL>self.iterate = iterate<EOL>", "docstring": "Only `key` is required\n\nArguments:\noperator (str)     -- \"?\" optional,  \"!\" for complete arrays; defaults to None (i.e. required)\nrequired (boolean) -- whether the key is required in the output (defaults to True)\nscope (`Selector`) -- restrict extraction to elements matching this selector\niterate (boolean)  -- whether multiple objects will be extracted (defaults to False)", "id": "f9180:c1:m0"}
{"signature": "@classmethod<EOL><INDENT>def from_yamlfile(cls, fp, selector_handler=None, strict=False, debug=False):<DEDENT>", "body": "return cls.from_yamlstring(fp.read(), selector_handler=selector_handler, strict=strict, debug=debug)<EOL>", "docstring": "Create a Parselet instance from a file containing\nthe Parsley script as a YAML object\n\n>>> import parslepy\n>>> with open('parselet.yml') as fp:\n...     parslepy.Parselet.from_yamlfile(fp)\n...\n<parslepy.base.Parselet object at 0x2014e50>\n\n:param file fp: an open file-like pointer containing the Parsley script\n:rtype: :class:`.Parselet`\n\nOther arguments: same as for :class:`.Parselet` contructor", "id": "f9180:c4:m2"}
{"signature": "@classmethod<EOL><INDENT>def from_jsonfile(cls, fp, selector_handler=None, strict=False, debug=False):<DEDENT>", "body": "return cls._from_jsonlines(fp,<EOL>selector_handler=selector_handler, strict=strict, debug=debug)<EOL>", "docstring": "Create a Parselet instance from a file containing\nthe Parsley script as a JSON object\n\n>>> import parslepy\n>>> with open('parselet.json') as fp:\n...     parslepy.Parselet.from_jsonfile(fp)\n...\n<parslepy.base.Parselet object at 0x2014e50>\n\n:param file fp: an open file-like pointer containing the Parsley script\n:rtype: :class:`.Parselet`\n\nOther arguments: same as for :class:`.Parselet` contructor", "id": "f9180:c4:m1"}
{"signature": "def keys(self):", "body": "return self._keys(self.parselet_tree)<EOL>", "docstring": "Return a list of 1st level keys of the output data model\n\n>>> import parslepy\n>>> rules = {\n...     \"headingcss\": \"#main\",\n...     \"headingxpath\": \"//h1[@id='main']\"\n... }\n>>> p = parslepy.Parselet(rules)\n>>> sorted(p.keys())\n['headingcss', 'headingxpath']", "id": "f9180:c4:m12"}
{"signature": "def parse(self, fp, parser=None, context=None):", "body": "if parser is None:<EOL><INDENT>parser = lxml.etree.HTMLParser()<EOL><DEDENT>doc = lxml.etree.parse(fp, parser=parser).getroot()<EOL>return self.extract(doc, context=context)<EOL>", "docstring": "Parse an HTML or XML document and\nreturn the extacted object following the Parsley rules give at instantiation.\n\n:param fp: file-like object containing an HTML or XML document, or URL or filename\n:param parser: *lxml.etree._FeedParser* instance (optional); defaults to lxml.etree.HTMLParser()\n:param context: user-supplied context that will be passed to custom XPath extensions (as first argument)\n:rtype: Python :class:`dict` object with mapped extracted content\n:raises: :class:`.NonMatchingNonOptionalKey`\n\nTo parse from a string, use the :meth:`~base.Parselet.parse_fromstring` method instead.\n\nNote that the fp paramater is passed directly\nto `lxml.etree.parse <http://lxml.de/api/lxml.etree-module.html#parse>`_,\nso you can also give it an URL, and lxml will download it for you.\n(Also see `<http://lxml.de/tutorial.html#the-parse-function>`_.)", "id": "f9180:c4:m6"}
{"signature": "def __init__(self, parselet, selector_handler=None, strict=False, debug=False):", "body": "if debug:<EOL><INDENT>self.DEBUG = True<EOL><DEDENT>if strict:<EOL><INDENT>self.STRICT_MODE = True<EOL><DEDENT>self.parselet =  parselet<EOL>if not selector_handler:<EOL><INDENT>self.selector_handler = DefaultSelectorHandler(debug=self.DEBUG)<EOL><DEDENT>elif not(isinstance(selector_handler, SelectorHandler)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self.selector_handler = selector_handler<EOL><DEDENT>self.compile()<EOL>", "docstring": "Take a parselet and optional selector_handler\nand build an abstract representation of the Parsley extraction\nlogic.\n\nFour helper class methods can be used to instantiate a Parselet\nfrom JSON/YAML rules: :meth:`.from_jsonstring`, :meth:`.from_jsonfile`,\n:meth:`.from_yamlstring`, :meth:`.from_yamlfile`.\n\n:param dict parselet: Parsley script as a Python dict object\n:param boolean strict: Set to *True* is you want to\n    enforce that missing required keys raise an Exception; default is False\n    (i.e. lenient/non-strict mode)\n:param selector_handler: an instance of :class:`selectors.SelectorHandler`\n    optional selector handler instance;\n    defaults to an instance of :class:`selectors.DefaultSelectorHandler`\n:raises: :class:`.InvalidKeySyntax`\n\nExample:\n\n>>> import parslepy\n>>> rules = {\n...     \"heading\": \"h1#main\",\n...     \"news(li.newsitem)\": [{\n...         \"title\": \".\",\n...         \"url\": \"a/@href\"\n...     }],\n... }\n>>> p = parslepy.Parselet(rules)\n>>> type(p)\n<class 'parslepy.base.Parselet'>\n\nUse :meth:`~base.Parselet.extract` or :meth:`~base.Parselet.parse`\nto get extracted content from documents.", "id": "f9180:c4:m0"}
{"signature": "def compile(self):", "body": "if not isinstance(self.parselet, dict):<EOL><INDENT>raise ValueError(\"<STR_LIT>\"\"<STR_LIT>\")<EOL><DEDENT>self.parselet_tree = self._compile(self.parselet)<EOL>", "docstring": "Build the abstract Parsley tree starting from the root node\n(recursive)", "id": "f9180:c4:m8"}
{"signature": "@classmethod<EOL><INDENT>def _from_jsonlines(cls, lines, selector_handler=None, strict=False, debug=False):<DEDENT>", "body": "return cls(json.loads(<EOL>\"<STR_LIT:\\n>\".join([l for l in lines if not cls.REGEX_COMMENT_LINE.match(l)])<EOL>), selector_handler=selector_handler, strict=strict, debug=debug)<EOL>", "docstring": "Interpret input lines as a JSON Parsley script.\nPython-style comment lines are skipped.", "id": "f9180:c4:m5"}
{"signature": "def select(self, document, selector):", "body": "raise NotImplementedError<EOL>", "docstring": "Apply the selector on the document\n\n:param document: lxml-parsed document\n:param selector: input :class:`.Selector` to apply on the document\n:rtype: lxml.etree.Element list", "id": "f9183:c1:m2"}
{"signature": "def make(self, selection_string):", "body": "raise NotImplementedError<EOL>", "docstring": "Interpret a selection_string as a selector\nfor elements or element attributes in a (semi-)structured document.\nIn case of XPath selectors, this can also be a function call.\n\n:param selection_string: a string representing a selector\n:rtype: :class:`.Selector`", "id": "f9183:c1:m1"}
{"signature": "def make(self, selection):", "body": "cached = self._selector_cache.get(selection)<EOL>if cached:<EOL><INDENT>return cached<EOL><DEDENT>namespaces = self.EXSLT_NAMESPACES<EOL>self._add_parsley_ns(namespaces)<EOL>try:<EOL><INDENT>m = self.REGEX_ENDING_ATTRIBUTE.match(selection)<EOL>if m:<EOL><INDENT>cssxpath = css_to_xpath(m.group(\"<STR_LIT>\"))<EOL>attribute = m.group(\"<STR_LIT>\").replace('<STR_LIT:|>', '<STR_LIT::>')<EOL>cssxpath = \"<STR_LIT>\" % (cssxpath, attribute)<EOL><DEDENT>else:<EOL><INDENT>cssxpath = css_to_xpath(selection)<EOL><DEDENT>selector = lxml.etree.XPath(<EOL>cssxpath,<EOL>namespaces = self.namespaces,<EOL>extensions = self.extensions,<EOL>smart_strings=(self.SMART_STRINGS<EOL>or self._test_smart_strings_needed(selection)),<EOL>)<EOL><DEDENT>except tuple(self.CSSSELECT_SYNTAXERROR_EXCEPTIONS) as syntax_error:<EOL><INDENT>if self.DEBUG:<EOL><INDENT>print(repr(syntax_error), selection)<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>selector = lxml.etree.XPath(selection,<EOL>namespaces = self.namespaces,<EOL>extensions = self.extensions,<EOL>smart_strings=(self.SMART_STRINGS<EOL>or self._test_smart_strings_needed(selection)),<EOL>)<EOL><DEDENT>except lxml.etree.XPathSyntaxError as syntax_error:<EOL><INDENT>syntax_error.msg += \"<STR_LIT>\" % selection<EOL>raise syntax_error<EOL><DEDENT>except Exception as e:<EOL><INDENT>if self.DEBUG:<EOL><INDENT>print(repr(e), selection)<EOL><DEDENT>raise<EOL><DEDENT><DEDENT>except lxml.etree.XPathSyntaxError as syntax_error:<EOL><INDENT>syntax_error.msg += \"<STR_LIT>\" % selection<EOL>raise syntax_error<EOL><DEDENT>except Exception as e:<EOL><INDENT>if self.DEBUG:<EOL><INDENT>print(repr(e), selection)<EOL><DEDENT>raise<EOL><DEDENT>self._selector_cache[selection] = Selector(selector)<EOL>return self._selector_cache[selection]<EOL>", "docstring": "Scopes and selectors are tested in this order:\n* is this a CSS selector with an appended @something attribute?\n* is this a regular CSS selector?\n* is this an XPath expression?\n\nXPath expression can also use EXSLT functions (as long as they are\nunderstood by libxslt)", "id": "f9183:c3:m0"}
{"signature": "def _default_element_extract(self, element):", "body": "return parslepy.funcs.extract_text(element)<EOL>", "docstring": "Overridable method to change how matching Elements\nare represented in output", "id": "f9183:c2:m10"}
{"signature": "def extract(self, document, selector, debug_offset='<STR_LIT>'):", "body": "selected = self.select(document, selector)<EOL>if selected is not None:<EOL><INDENT>if isinstance(selected, (list, tuple)):<EOL><INDENT>if not len(selected):<EOL><INDENT>return<EOL><DEDENT>return [self._extract_single(m) for m in selected]<EOL><DEDENT>else:<EOL><INDENT>return self._extract_single(selected)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self.DEBUG:<EOL><INDENT>print(debug_offset, \"<STR_LIT>\")<EOL><DEDENT>return None<EOL><DEDENT>", "docstring": "Try and convert matching Elements to unicode strings.\n\nIf this fails, the selector evaluation probably already\nreturned some string(s) of some sort, or boolean value,\nor int/float, so return that instead.", "id": "f9183:c2:m9"}
{"signature": "def extract(self, document, selector):", "body": "raise NotImplementedError<EOL>", "docstring": "Apply the selector on the document\nand return a value for the matching elements (text content or\nelement attributes)\n\n:param document: lxml-parsed document\n:param selector: input :class:`.Selector`  to apply on the document\n:rtype: depends on the selector (string, boolean value, ...)\n\nReturn value can be single- or multi-valued.", "id": "f9183:c1:m3"}
{"signature": "def object_as_dict(obj):", "body": "return {c.key: getattr(obj, c.key)<EOL>for c in inspect(obj).mapper.column_attrs}<EOL>", "docstring": "Make a dict from SQLAlchemy object.", "id": "f9190:m0"}
{"signature": "def make_pdf_fixture(filename, text=None):", "body": "if text is None:<EOL><INDENT>text = \"<STR_LIT>\" % filename<EOL><DEDENT>from reportlab.pdfgen import canvas<EOL>output = BytesIO()<EOL>c = canvas.Canvas(output)<EOL>c.drawString(<NUM_LIT:100>, <NUM_LIT:100>, text)<EOL>c.showPage()<EOL>c.save()<EOL>return make_file_fixture(filename, stringio_to_base64(output))<EOL>", "docstring": "Generate a PDF fixture.\n\n    It's suitable for use with Werkzeug test client and Flask test request\n    context.\n    Use of this function requires that reportlab have been installed.\n\n    :param filename: Desired filename.\n    :param text: Text to include in PDF. Defaults to \"Filename: <filename>\", if\n        not specified.", "id": "f9191:m2"}
{"signature": "@classmethod<EOL><INDENT>def create(cls, object_type=None, object_uuid=None, **kwargs):<DEDENT>", "body": "assert '<STR_LIT>' in kwargs<EOL>kwargs.setdefault('<STR_LIT:status>', cls.default_status)<EOL>return super(DepositProvider, cls).create(<EOL>object_type=object_type, object_uuid=object_uuid, **kwargs)<EOL>", "docstring": "Create a new deposit identifier.\n\n        :param object_type: The object type (Default: ``None``)\n        :param object_uuid: The object UUID (Default: ``None``)\n        :param kwargs: It contains the pid value.", "id": "f9195:c0:m0"}
{"signature": "def deposit_minter(record_uuid, data):", "body": "provider = DepositProvider.create(<EOL>object_type='<STR_LIT>',<EOL>object_uuid=record_uuid,<EOL>pid_value=uuid.uuid4().hex,<EOL>)<EOL>data['<STR_LIT>'] = {<EOL>'<STR_LIT:id>': provider.pid.pid_value,<EOL>'<STR_LIT:status>': '<STR_LIT>',<EOL>}<EOL>return provider.pid<EOL>", "docstring": "Mint a deposit identifier.\n\n    A PID with the following characteristics is created:\n\n    .. code-block:: python\n\n        {\n            \"object_type\": \"rec\",\n            \"object_uuid\": record_uuid,\n            \"pid_value\": \"<new-pid-value>\",\n            \"pid_type\": \"depid\",\n        }\n\n    The following deposit meta information are updated:\n\n    .. code-block:: python\n\n        deposit['_deposit'] = {\n            \"id\": \"<new-pid-value>\",\n            \"status\": \"draft\",\n        }\n\n    :param record_uuid: Record UUID.\n    :param data: Record content.\n    :returns: A :class:`invenio_pidstore.models.PersistentIdentifier` object.", "id": "f9197:m0"}
{"signature": "def process_minter(value):", "body": "try:<EOL><INDENT>return current_pidstore.minters[value]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise click.BadParameter(<EOL>'<STR_LIT>'.format(<EOL>value, '<STR_LIT:U+002CU+0020>'.join(current_pidstore.minters.keys())<EOL>)<EOL>)<EOL><DEDENT>", "docstring": "Load minter from PIDStore registry based on given value.\n\n    :param value: Name of the minter.\n    :returns: The minter.", "id": "f9204:m0"}
{"signature": "@deposit.command()<EOL>@click.argument('<STR_LIT:source>', type=click.File('<STR_LIT:r>'), default=sys.stdin)<EOL>@click.option('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', multiple=True)<EOL>@click.option('<STR_LIT>', is_flag=True, default=False)<EOL>@with_appcontext<EOL>def create(source, ids, force, pid_minter=None):", "body": "", "docstring": "Create new deposit.", "id": "f9204:m4"}
{"signature": "@deposit.command()<EOL>@click.argument('<STR_LIT:source>')<EOL>@with_appcontext<EOL>def schema(source):", "body": "click.echo(process_schema(source))<EOL>", "docstring": "Create deposit schema from an existing schema.", "id": "f9204:m3"}
{"signature": "@deposit.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', multiple=True)<EOL>def edit(ids):", "body": "", "docstring": "Make selected deposits editable.", "id": "f9204:m6"}
{"signature": "def __init__(self, app=None):", "body": "if app:<EOL><INDENT>self.init_app(app)<EOL><DEDENT>", "docstring": "Extension initialization.\n\n        :param app: An instance of :class:`flask.Flask`.", "id": "f9205:c2:m0"}
{"signature": "def __init__(self, app):", "body": "self.app = app<EOL>", "docstring": "Initialize state.", "id": "f9205:c0:m0"}
{"signature": "@cached_property<EOL><INDENT>def schemaforms(self):<DEDENT>", "body": "_schemaforms = {<EOL>k: v['<STR_LIT>']<EOL>for k, v in self.app.config['<STR_LIT>'].items()<EOL>if '<STR_LIT>' in v<EOL>}<EOL>return defaultdict(<EOL>lambda: self.app.config['<STR_LIT>'], _schemaforms<EOL>)<EOL>", "docstring": "Load deposit schema forms.", "id": "f9205:c0:m2"}
{"signature": "def init_app(self, app):", "body": "self.init_config(app)<EOL>blueprint = rest.create_blueprint(<EOL>app.config['<STR_LIT>']<EOL>)<EOL>@app.before_first_request<EOL>def extend_default_endpoint_prefixes():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>endpoint_prefixes = utils.build_default_endpoint_prefixes(<EOL>dict(app.config['<STR_LIT>'])<EOL>)<EOL>current_records_rest = app.extensions['<STR_LIT>']<EOL>overlap = set(endpoint_prefixes.keys()) & set(<EOL>current_records_rest.default_endpoint_prefixes<EOL>)<EOL>if overlap:<EOL><INDENT>raise RuntimeError(<EOL>'<STR_LIT>'.format(<EOL>'<STR_LIT:U+002CU+0020>'.join(overlap)<EOL>)<EOL>)<EOL><DEDENT>current_records_rest.default_endpoint_prefixes.update(<EOL>endpoint_prefixes<EOL>)<EOL><DEDENT>app.register_blueprint(blueprint)<EOL>app.extensions['<STR_LIT>'] = _DepositState(app)<EOL>if app.config['<STR_LIT>']:<EOL><INDENT>post_action.connect(index_deposit_after_publish, sender=app,<EOL>weak=False)<EOL><DEDENT>", "docstring": "Flask application initialization.\n\n        Initialize the REST endpoints.  Connect all signals if\n        `DEPOSIT_REGISTER_SIGNALS` is True.\n\n        :param app: An instance of :class:`flask.Flask`.", "id": "f9205:c2:m1"}
{"signature": "def init_config(self, app):", "body": "for k in dir(config):<EOL><INDENT>if k.startswith('<STR_LIT>'):<EOL><INDENT>app.config.setdefault(k, getattr(config, k))<EOL><DEDENT><DEDENT>", "docstring": "Initialize configuration.\n\n        :param app: An instance of :class:`flask.Flask`.", "id": "f9205:c2:m2"}
{"signature": "@cached_property<EOL><INDENT>def jsonschemas(self):<DEDENT>", "body": "_jsonschemas = {<EOL>k: v['<STR_LIT>']<EOL>for k, v in self.app.config['<STR_LIT>'].items()<EOL>if '<STR_LIT>' in v<EOL>}<EOL>return defaultdict(<EOL>lambda: self.app.config['<STR_LIT>'], _jsonschemas<EOL>)<EOL>", "docstring": "Load deposit JSON schemas.", "id": "f9205:c0:m1"}
{"signature": "def deposits_filter():", "body": "if not has_request_context() or admin_permission_factory().can():<EOL><INDENT>return Q()<EOL><DEDENT>else:<EOL><INDENT>return Q(<EOL>'<STR_LIT>', **{'<STR_LIT>': getattr(current_user, '<STR_LIT:id>', <NUM_LIT:0>)}<EOL>)<EOL><DEDENT>", "docstring": "Filter list of deposits.\n\n    Permit to the user to see all if:\n\n    * The user is an admin (see\n        func:`invenio_deposit.permissions:admin_permission_factory`).\n\n    * It's called outside of a request.\n\n    Otherwise, it filters out any deposit where user is not the owner.", "id": "f9206:m0"}
{"signature": "def index_deposit_after_publish(sender, action=None, pid=None, deposit=None):", "body": "if action == '<STR_LIT>':<EOL><INDENT>_, record = deposit.fetch_published()<EOL>index_record.delay(str(record.id))<EOL><DEDENT>", "docstring": "Index the record after publishing.\n\n    .. note:: if the record is not published, it doesn't index.\n\n    :param sender: Who send the signal.\n    :param action: Action executed by the sender. (Default: ``None``)\n    :param pid: PID object. (Default: ``None``)\n    :param deposit: Deposit object. (Default: ``None``)", "id": "f9210:m0"}
{"signature": "@has_status<EOL><INDENT>@index(delete=True)<EOL>def delete(self, force=True, pid=None):<DEDENT>", "body": "pid = pid or self.pid<EOL>if self['<STR_LIT>'].get('<STR_LIT>'):<EOL><INDENT>raise PIDInvalidAction()<EOL><DEDENT>if pid:<EOL><INDENT>pid.delete()<EOL><DEDENT>return super(Deposit, self).delete(force=force)<EOL>", "docstring": "Delete deposit.\n\n        Status required: ``'draft'``.\n\n        :param force: Force deposit delete.  (Default: ``True``)\n        :param pid: Force pid object.  (Default: ``None``)\n        :returns: A new Deposit object.", "id": "f9213:c0:m14"}
{"signature": "@contextmanager<EOL><INDENT>def _process_files(self, record_id, data):<DEDENT>", "body": "if self.files:<EOL><INDENT>assert not self.files.bucket.locked<EOL>self.files.bucket.locked = True<EOL>snapshot = self.files.bucket.snapshot(lock=True)<EOL>data['<STR_LIT>'] = self.files.dumps(bucket=snapshot.id)<EOL>yield data<EOL>db.session.add(RecordsBuckets(<EOL>record_id=record_id, bucket_id=snapshot.id<EOL>))<EOL><DEDENT>else:<EOL><INDENT>yield data<EOL><DEDENT>", "docstring": "Snapshot bucket and add files in record during first publishing.", "id": "f9213:c0:m7"}
{"signature": "@property<EOL><INDENT>def pid(self):<DEDENT>", "body": "pid = self.deposit_fetcher(self.id, self)<EOL>return PersistentIdentifier.get(pid.pid_type,<EOL>pid.pid_value)<EOL>", "docstring": "Return an instance of deposit PID.", "id": "f9213:c0:m0"}
{"signature": "def index(method=None, delete=False):", "body": "if method is None:<EOL><INDENT>return partial(index, delete=delete)<EOL><DEDENT>@wraps(method)<EOL>def wrapper(self_or_cls, *args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>result = method(self_or_cls, *args, **kwargs)<EOL>try:<EOL><INDENT>if delete:<EOL><INDENT>self_or_cls.indexer.delete(result)<EOL><DEDENT>else:<EOL><INDENT>self_or_cls.indexer.index(result)<EOL><DEDENT><DEDENT>except RequestError:<EOL><INDENT>current_app.logger.exception('<STR_LIT>'.format(result))<EOL><DEDENT>return result<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator to update index.\n\n    :param method: Function wrapped. (Default: ``None``)\n    :param delete: If `True` delete the indexed record. (Default: ``None``)", "id": "f9213:m0"}
{"signature": "@property<EOL><INDENT>def record_schema(self):<DEDENT>", "body": "schema_path = current_jsonschemas.url_to_path(self['<STR_LIT>'])<EOL>schema_prefix = current_app.config['<STR_LIT>']<EOL>if schema_path and schema_path.startswith(schema_prefix):<EOL><INDENT>return current_jsonschemas.path_to_url(<EOL>schema_path[len(schema_prefix):]<EOL>)<EOL><DEDENT>", "docstring": "Convert deposit schema to a valid record schema.", "id": "f9213:c0:m1"}
{"signature": "def has_status(method=None, status='<STR_LIT>'):", "body": "if method is None:<EOL><INDENT>return partial(has_status, status=status)<EOL><DEDENT>@wraps(method)<EOL>def wrapper(self, *args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if status != self.status:<EOL><INDENT>raise PIDInvalidAction()<EOL><DEDENT>return method(self, *args, **kwargs)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Check that deposit has a defined status (default: draft).\n\n    :param method: Function executed if record has a defined status.\n        (Default: ``None``)\n    :param status: Defined status to check. (Default: ``'draft'``)", "id": "f9213:m1"}
{"signature": "def fetch_published(self):", "body": "pid_type = self['<STR_LIT>']['<STR_LIT>']['<STR_LIT:type>']<EOL>pid_value = self['<STR_LIT>']['<STR_LIT>']['<STR_LIT:value>']<EOL>resolver = Resolver(<EOL>pid_type=pid_type, object_type='<STR_LIT>',<EOL>getter=partial(self.published_record_class.get_record,<EOL>with_deleted=True)<EOL>)<EOL>return resolver.resolve(pid_value)<EOL>", "docstring": "Return a tuple with PID and published record.", "id": "f9213:c0:m3"}
{"signature": "def _publish_new(self, id_=None):", "body": "minter = current_pidstore.minters[<EOL>current_app.config['<STR_LIT>']<EOL>]<EOL>id_ = id_ or uuid.uuid4()<EOL>record_pid = minter(id_, self)<EOL>self['<STR_LIT>']['<STR_LIT>'] = {<EOL>'<STR_LIT:type>': record_pid.pid_type,<EOL>'<STR_LIT:value>': record_pid.pid_value,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>}<EOL>data = dict(self.dumps())<EOL>data['<STR_LIT>'] = self.record_schema<EOL>with self._process_files(id_, data):<EOL><INDENT>record = self.published_record_class.create(data, id_=id_)<EOL><DEDENT>return record<EOL>", "docstring": "Publish new deposit.\n\n        :param id_: The forced record UUID.", "id": "f9213:c0:m8"}
{"signature": "@preserve(fields=('<STR_LIT>', '<STR_LIT>'))<EOL><INDENT>def merge_with_published(self):<DEDENT>", "body": "pid, first = self.fetch_published()<EOL>lca = first.revisions[self['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']]<EOL>args = [lca.dumps(), first.dumps(), self.dumps()]<EOL>for arg in args:<EOL><INDENT>del arg['<STR_LIT>'], arg['<STR_LIT>']<EOL><DEDENT>args.append({})<EOL>m = Merger(*args)<EOL>try:<EOL><INDENT>m.run()<EOL><DEDENT>except UnresolvedConflictsException:<EOL><INDENT>raise MergeConflict()<EOL><DEDENT>return patch(m.unified_patches, lca)<EOL>", "docstring": "Merge changes with latest published version.", "id": "f9213:c0:m4"}
{"signature": "@property<EOL><INDENT>def files(self):<DEDENT>", "body": "files_ = super(Deposit, self).files<EOL>if files_:<EOL><INDENT>sort_by_ = files_.sort_by<EOL>def sort_by(*args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if '<STR_LIT>' != self.status:<EOL><INDENT>raise PIDInvalidAction()<EOL><DEDENT>return sort_by_(*args, **kwargs)<EOL><DEDENT>files_.sort_by = sort_by<EOL><DEDENT>return files_<EOL>", "docstring": "List of Files inside the deposit.\n\n        Add validation on ``sort_by`` method: if, at the time of files access,\n        the record is not a ``'draft'`` then a\n        :exc:`invenio_pidstore.errors.PIDInvalidAction` is rised.", "id": "f9213:c0:m20"}
{"signature": "@has_status(status='<STR_LIT>')<EOL><INDENT>@index<EOL>@mark_as_action<EOL>def edit(self, pid=None):<DEDENT>", "body": "pid = pid or self.pid<EOL>with db.session.begin_nested():<EOL><INDENT>before_record_update.send(<EOL>current_app._get_current_object(), record=self)<EOL>record_pid, record = self.fetch_published()<EOL>assert PIDStatus.REGISTERED == record_pid.status<EOL>assert record['<STR_LIT>'] == self['<STR_LIT>']<EOL>self.model.json = self._prepare_edit(record)<EOL>flag_modified(self.model, '<STR_LIT>')<EOL>db.session.merge(self.model)<EOL><DEDENT>after_record_update.send(<EOL>current_app._get_current_object(), record=self)<EOL>return self.__class__(self.model.json, model=self.model)<EOL>", "docstring": "Edit deposit.\n\n        #. The signal :data:`invenio_records.signals.before_record_update`\n           is sent before the edit execution.\n\n        #. The following meta information are saved inside the deposit:\n\n        .. code-block:: python\n\n            deposit['_deposit']['pid'] = record.revision_id\n            deposit['_deposit']['status'] = 'draft'\n            deposit['$schema'] = deposit_schema_from_record_schema\n\n        #. The signal :data:`invenio_records.signals.after_record_update` is\n            sent after the edit execution.\n\n        #. The deposit index is updated.\n\n        Status required: `published`.\n\n        .. note:: the process fails if the pid has status\n            :attr:`invenio_pidstore.models.PIDStatus.REGISTERED`.\n\n        :param pid: Force a pid object. (Default: ``None``)\n        :returns: A new Deposit object.", "id": "f9213:c0:m12"}
{"signature": "@require_api_auth()<EOL><INDENT>@require_oauth_scopes(write_scope.id)<EOL>@pass_record<EOL>@need_record_permission('<STR_LIT>')<EOL>def delete(self, pid, record, key):<DEDENT>", "body": "try:<EOL><INDENT>del record.files[str(key)]<EOL>record.commit()<EOL>db.session.commit()<EOL>return make_response('<STR_LIT>', <NUM_LIT>)<EOL><DEDENT>except KeyError:<EOL><INDENT>abort(<NUM_LIT>, '<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Handle DELETE deposit file.\n\n        Permission required: `update_permission_factory`.\n\n        :param pid: Pid object (from url).\n        :param record: Record object resolved from the pid.\n        :param key: Unique identifier for the file in the deposit.", "id": "f9217:c2:m3"}
{"signature": "def __init__(self, serializers, pid_type, ctx, *args, **kwargs):", "body": "super(DepositFilesResource, self).__init__(<EOL>serializers,<EOL>*args,<EOL>**kwargs<EOL>)<EOL>for key, value in ctx.items():<EOL><INDENT>setattr(self, key, value)<EOL><DEDENT>", "docstring": "Constructor.", "id": "f9217:c1:m0"}
{"signature": "@require_api_auth()<EOL><INDENT>@require_oauth_scopes(write_scope.id)<EOL>@pass_record<EOL>@need_record_permission('<STR_LIT>')<EOL>def put(self, pid, record, key):<DEDENT>", "body": "try:<EOL><INDENT>data = json.loads(request.data.decode('<STR_LIT:utf-8>'))<EOL>new_key = data['<STR_LIT:filename>']<EOL><DEDENT>except KeyError:<EOL><INDENT>raise WrongFile()<EOL><DEDENT>new_key_secure = secure_filename(new_key)<EOL>if not new_key_secure or new_key != new_key_secure:<EOL><INDENT>raise WrongFile()<EOL><DEDENT>try:<EOL><INDENT>obj = record.files.rename(str(key), new_key_secure)<EOL><DEDENT>except KeyError:<EOL><INDENT>abort(<NUM_LIT>)<EOL><DEDENT>record.commit()<EOL>db.session.commit()<EOL>return self.make_response(obj=obj, pid=pid, record=record)<EOL>", "docstring": "Handle the file rename through the PUT deposit file.\n\n        Permission required: `update_permission_factory`.\n\n        :param pid: Pid object (from url).\n        :param record: Record object resolved from the pid.\n        :param key: Unique identifier for the file in the deposit.", "id": "f9217:c2:m2"}
{"signature": "@use_kwargs(get_args)<EOL><INDENT>@pass_record<EOL>@need_record_permission('<STR_LIT>')<EOL>def get(self, pid, record, key, version_id, **kwargs):<DEDENT>", "body": "try:<EOL><INDENT>obj = record.files[str(key)].get_version(version_id=version_id)<EOL>return self.make_response(<EOL>obj=obj or abort(<NUM_LIT>), pid=pid, record=record)<EOL><DEDENT>except KeyError:<EOL><INDENT>abort(<NUM_LIT>)<EOL><DEDENT>", "docstring": "Get file.\n\n        Permission required: `read_permission_factory`.\n\n        :param pid: Pid object (from url).\n        :param record: Record object resolved from the pid.\n        :param key: Unique identifier for the file in the deposit.\n        :param version_id: File version. Optional. If no version is provided,\n            the last version is retrieved.\n        :returns: the file content.", "id": "f9217:c2:m1"}
{"signature": "def create_blueprint(endpoints):", "body": "blueprint = Blueprint(<EOL>'<STR_LIT>',<EOL>__name__,<EOL>url_prefix='<STR_LIT>',<EOL>)<EOL>create_error_handlers(blueprint)<EOL>for endpoint, options in (endpoints or {}).items():<EOL><INDENT>options = deepcopy(options)<EOL>if '<STR_LIT>' in options:<EOL><INDENT>files_serializers = options.get('<STR_LIT>')<EOL>files_serializers = {mime: obj_or_import_string(func)<EOL>for mime, func in files_serializers.items()}<EOL>del options['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>files_serializers = {}<EOL><DEDENT>if '<STR_LIT>' in options:<EOL><INDENT>serializers = options.get('<STR_LIT>')<EOL>serializers = {mime: obj_or_import_string(func)<EOL>for mime, func in serializers.items()}<EOL><DEDENT>else:<EOL><INDENT>serializers = {}<EOL><DEDENT>file_list_route = options.pop(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(options['<STR_LIT>'])<EOL>)<EOL>file_item_route = options.pop(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(options['<STR_LIT>'])<EOL>)<EOL>options.setdefault('<STR_LIT>', DepositSearch)<EOL>search_class = obj_or_import_string(options['<STR_LIT>'])<EOL>options.setdefault('<STR_LIT>', Deposit)<EOL>record_class = obj_or_import_string(options['<STR_LIT>'])<EOL>options.setdefault('<STR_LIT>', None)<EOL>for rule in records_rest_url_rules(endpoint, **options):<EOL><INDENT>blueprint.add_url_rule(**rule)<EOL><DEDENT>search_class_kwargs = {}<EOL>if options.get('<STR_LIT>'):<EOL><INDENT>search_class_kwargs['<STR_LIT:index>'] = options['<STR_LIT>']<EOL><DEDENT>if options.get('<STR_LIT>'):<EOL><INDENT>search_class_kwargs['<STR_LIT>'] = options['<STR_LIT>']<EOL><DEDENT>ctx = dict(<EOL>read_permission_factory=obj_or_import_string(<EOL>options.get('<STR_LIT>')<EOL>),<EOL>create_permission_factory=obj_or_import_string(<EOL>options.get('<STR_LIT>')<EOL>),<EOL>update_permission_factory=obj_or_import_string(<EOL>options.get('<STR_LIT>')<EOL>),<EOL>delete_permission_factory=obj_or_import_string(<EOL>options.get('<STR_LIT>')<EOL>),<EOL>record_class=record_class,<EOL>search_class=partial(search_class, **search_class_kwargs),<EOL>default_media_type=options.get('<STR_LIT>'),<EOL>)<EOL>deposit_actions = DepositActionResource.as_view(<EOL>DepositActionResource.view_name.format(endpoint),<EOL>serializers=serializers,<EOL>pid_type=options['<STR_LIT>'],<EOL>ctx=ctx,<EOL>)<EOL>blueprint.add_url_rule(<EOL>'<STR_LIT>'.format(<EOL>options['<STR_LIT>'],<EOL>'<STR_LIT:U+002C>'.join(extract_actions_from_class(record_class)),<EOL>),<EOL>view_func=deposit_actions,<EOL>methods=['<STR_LIT:POST>'],<EOL>)<EOL>deposit_files = DepositFilesResource.as_view(<EOL>DepositFilesResource.view_name.format(endpoint),<EOL>serializers=files_serializers,<EOL>pid_type=options['<STR_LIT>'],<EOL>ctx=ctx,<EOL>)<EOL>blueprint.add_url_rule(<EOL>file_list_route,<EOL>view_func=deposit_files,<EOL>methods=['<STR_LIT:GET>', '<STR_LIT:POST>', '<STR_LIT>'],<EOL>)<EOL>deposit_file = DepositFileResource.as_view(<EOL>DepositFileResource.view_name.format(endpoint),<EOL>serializers=files_serializers,<EOL>pid_type=options['<STR_LIT>'],<EOL>ctx=ctx,<EOL>)<EOL>blueprint.add_url_rule(<EOL>file_item_route,<EOL>view_func=deposit_file,<EOL>methods=['<STR_LIT:GET>', '<STR_LIT>', '<STR_LIT>'],<EOL>)<EOL><DEDENT>return blueprint<EOL>", "docstring": "Create Invenio-Deposit-REST blueprint.\n\n    See: :data:`invenio_deposit.config.DEPOSIT_REST_ENDPOINTS`.\n\n    :param endpoints: List of endpoints configuration.\n    :returns: The configured blueprint.", "id": "f9217:m1"}
{"signature": "def __init__(self, serializers, pid_type, ctx, *args, **kwargs):", "body": "super(DepositActionResource, self).__init__(<EOL>serializers,<EOL>default_media_type=ctx.get('<STR_LIT>'),<EOL>*args,<EOL>**kwargs<EOL>)<EOL>for key, value in ctx.items():<EOL><INDENT>setattr(self, key, value)<EOL><DEDENT>", "docstring": "Constructor.", "id": "f9217:c0:m0"}
{"signature": "def create_blueprint(endpoints):", "body": "from invenio_records_ui.views import create_url_rule<EOL>blueprint = Blueprint(<EOL>'<STR_LIT>',<EOL>__name__,<EOL>static_folder='<STR_LIT>',<EOL>template_folder='<STR_LIT>',<EOL>url_prefix='<STR_LIT>',<EOL>)<EOL>@blueprint.errorhandler(PIDDeletedError)<EOL>def tombstone_errorhandler(error):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return render_template(<EOL>current_app.config['<STR_LIT>'],<EOL>pid=error.pid,<EOL>record=error.record or {},<EOL>), <NUM_LIT><EOL><DEDENT>for endpoint, options in (endpoints or {}).items():<EOL><INDENT>options = deepcopy(options)<EOL>options.pop('<STR_LIT>', None)<EOL>options.pop('<STR_LIT>', None)<EOL>blueprint.add_url_rule(**create_url_rule(endpoint, **options))<EOL><DEDENT>@blueprint.route('<STR_LIT>')<EOL>@login_required<EOL>def index():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return render_template(current_app.config['<STR_LIT>'])<EOL><DEDENT>@blueprint.route('<STR_LIT>')<EOL>@login_required<EOL>def new():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>deposit_type = request.values.get('<STR_LIT:type>')<EOL>return render_template(<EOL>current_app.config['<STR_LIT>'],<EOL>record={'<STR_LIT>': {'<STR_LIT:id>': None}},<EOL>jsonschema=current_deposit.jsonschemas[deposit_type],<EOL>schemaform=current_deposit.schemaforms[deposit_type],<EOL>)<EOL><DEDENT>return blueprint<EOL>", "docstring": "Create Invenio-Deposit-UI blueprint.\n\n    See: :data:`invenio_deposit.config.DEPOSIT_RECORDS_UI_ENDPOINTS`.\n\n    :param endpoints: List of endpoints configuration.\n    :returns: The configured blueprint.", "id": "f9219:m0"}
{"signature": "def default_view_method(pid, record, template=None):", "body": "record_viewed.send(<EOL>current_app._get_current_object(),<EOL>pid=pid,<EOL>record=record,<EOL>)<EOL>deposit_type = request.values.get('<STR_LIT:type>')<EOL>return render_template(<EOL>template,<EOL>pid=pid,<EOL>record=record,<EOL>jsonschema=current_deposit.jsonschemas[deposit_type],<EOL>schemaform=current_deposit.schemaforms[deposit_type],<EOL>)<EOL>", "docstring": "Default view method.\n\n    Sends ``record_viewed`` signal and renders template.", "id": "f9219:m1"}
{"signature": "@fixtures.command()<EOL>@cli.with_appcontext<EOL>def records():", "body": "import pkg_resources<EOL>from dojson.contrib.marc21 import marc21<EOL>from dojson.contrib.marc21.utils import create_record, split_blob<EOL>from flask_login import login_user, logout_user<EOL>from invenio_accounts.models import User<EOL>from invenio_deposit.api import Deposit<EOL>users = User.query.all()<EOL>data_path = pkg_resources.resource_filename(<EOL>'<STR_LIT>', '<STR_LIT>'<EOL>)<EOL>with open(data_path) as source:<EOL><INDENT>with current_app.test_request_context():<EOL><INDENT>indexer = RecordIndexer()<EOL>with db.session.begin_nested():<EOL><INDENT>for index, data in enumerate(split_blob(source.read()),<EOL>start=<NUM_LIT:1>):<EOL><INDENT>login_user(users[index % len(users)])<EOL>record = marc21.do(create_record(data))<EOL>indexer.index(Deposit.create(record))<EOL>logout_user()<EOL><DEDENT><DEDENT>db.session.commit()<EOL><DEDENT><DEDENT>", "docstring": "Load records.", "id": "f9220:m1"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def list(self, path):<DEDENT>", "body": "", "docstring": "Create instance of subclass of :py:class:`aioftp.AbstractAsyncLister`.\nYou should subclass and implement `__anext__` method\nfor :py:class:`aioftp.AbstractAsyncLister` and return new instance.\n\n:param path: path to list\n:type path: :py:class:`pathlib.Path`\n\n:rtype: :py:class:`aioftp.AbstractAsyncLister`\n\nUsage:\n::\n\n    >>> async for p in pathio.list(path):\n    ...     # do\n\nor borring instance of :py:class:`list`:\n::\n\n    >>> paths = await pathio.list(path)\n    >>> paths\n    [path, path, path, ...]", "id": "f9224:c2:m8"}
{"signature": "@universal_exception<EOL><INDENT>@abc.abstractmethod<EOL>async def exists(self, path):<DEDENT>", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nCheck if path exists\n\n:param path: path to check\n:type path: :py:class:`pathlib.Path`\n\n:rtype: :py:class:`bool`", "id": "f9224:c2:m2"}
{"signature": "def open(self, *args, **kwargs):", "body": "return AsyncPathIOContext(self, args, kwargs)<EOL>", "docstring": "Create instance of :py:class:`aioftp.pathio.AsyncPathIOContext`,\nparameters passed to :py:meth:`aioftp.AbstractPathIO._open`\n\n:rtype: :py:class:`aioftp.pathio.AsyncPathIOContext`", "id": "f9224:c2:m11"}
{"signature": "@universal_exception<EOL><INDENT>@defend_file_methods<EOL>@abc.abstractmethod<EOL>async def read(self, file, block_size):<DEDENT>", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nRead some data from file\n\n:param file: file-object from :py:class:`aioftp.AbstractPathIO.open`\n\n:param block_size: bytes count to read\n:type block_size: :py:class:`int`\n\n:rtype: :py:class:`bytes`", "id": "f9224:c2:m14"}
{"signature": "def universal_exception(coro):", "body": "@functools.wraps(coro)<EOL>async def wrapper(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>return await coro(*args, **kwargs)<EOL><DEDENT>except (asyncio.CancelledError, NotImplementedError,<EOL>StopAsyncIteration):<EOL><INDENT>raise<EOL><DEDENT>except Exception:<EOL><INDENT>raise errors.PathIOError(reason=sys.exc_info())<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Decorator. Reraising any exception (except `CancelledError` and\n`NotImplementedError`) with universal exception\n:py:class:`aioftp.PathIOError`", "id": "f9224:m0"}
{"signature": "@universal_exception<EOL><INDENT>@abc.abstractmethod<EOL>async def mkdir(self, path, *, parents=False, exist_ok=False):<DEDENT>", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nMake directory\n\n:param path: path to create\n:type path: :py:class:`pathlib.Path`\n\n:param parents: create parents is does not exists\n:type parents: :py:class:`bool`\n\n:param exist_ok: do not raise exception if directory already exists\n:type exist_ok: :py:class:`bool`", "id": "f9224:c2:m5"}
{"signature": "@universal_exception<EOL><INDENT>@abc.abstractmethod<EOL>async def rmdir(self, path):<DEDENT>", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nRemove directory\n\n:param path: path to remove\n:type path: :py:class:`pathlib.Path`", "id": "f9224:c2:m6"}
{"signature": "@universal_exception<EOL><INDENT>@defend_file_methods<EOL>@abc.abstractmethod<EOL>async def write(self, file, data):<DEDENT>", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nWrite some data to file\n\n:param file: file-object from :py:class:`aioftp.AbstractPathIO.open`\n\n:param data: data to write\n:type data: :py:class:`bytes`", "id": "f9224:c2:m13"}
{"signature": "@universal_exception<EOL><INDENT>@abc.abstractmethod<EOL>async def unlink(self, path):<DEDENT>", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nRemove file\n\n:param path: path to remove\n:type path: :py:class:`pathlib.Path`", "id": "f9224:c2:m7"}
{"signature": "def list(self, path=\"<STR_LIT>\", *, recursive=False, raw_command=None):", "body": "class AsyncLister(AsyncListerMixin):<EOL><INDENT>stream = None<EOL>async def _new_stream(cls, local_path):<EOL><INDENT>cls.path = local_path<EOL>cls.parse_line = self.parse_mlsx_line<EOL>if raw_command not in [None, \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>f\"<STR_LIT>\")<EOL><DEDENT>if raw_command in [None, \"<STR_LIT>\"]:<EOL><INDENT>try:<EOL><INDENT>command = (\"<STR_LIT>\" + str(cls.path)).strip()<EOL>return await self.get_stream(command, \"<STR_LIT>\")<EOL><DEDENT>except errors.StatusCodeError as e:<EOL><INDENT>code = e.received_codes[-<NUM_LIT:1>]<EOL>if not code.matches(\"<STR_LIT>\") or raw_command is not None:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>if raw_command in [None, \"<STR_LIT>\"]:<EOL><INDENT>cls.parse_line = self.parse_list_line<EOL>command = (\"<STR_LIT>\" + str(cls.path)).strip()<EOL>return await self.get_stream(command, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>def __aiter__(cls):<EOL><INDENT>cls.directories = collections.deque()<EOL>return cls<EOL><DEDENT>async def __anext__(cls):<EOL><INDENT>if cls.stream is None:<EOL><INDENT>cls.stream = await cls._new_stream(path)<EOL><DEDENT>while True:<EOL><INDENT>line = await cls.stream.readline()<EOL>while not line:<EOL><INDENT>await cls.stream.finish()<EOL>if cls.directories:<EOL><INDENT>current_path, info = cls.directories.popleft()<EOL>cls.stream = await cls._new_stream(current_path)<EOL>line = await cls.stream.readline()<EOL><DEDENT>else:<EOL><INDENT>raise StopAsyncIteration<EOL><DEDENT><DEDENT>try:<EOL><INDENT>name, info = cls.parse_line(line)<EOL><DEDENT>except Exception:<EOL><INDENT>continue<EOL><DEDENT>stat = cls.path / name, info<EOL>if info[\"<STR_LIT:type>\"] == \"<STR_LIT>\" and recursive:<EOL><INDENT>cls.directories.append(stat)<EOL><DEDENT>return stat<EOL><DEDENT><DEDENT><DEDENT>return AsyncLister()<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\nList all files and directories in \"path\".\n\n:param path: directory or file path\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param recursive: list recursively\n:type recursive: :py:class:`bool`\n\n:param raw_command: optional ftp command to use in place of\n    fallback logic (must be one of \"MLSD\", \"LIST\")\n:type raw_command: :py:class:`str`\n\n:rtype: :py:class:`list` or `async for` context\n\n::\n\n    >>> # lazy list\n    >>> async for path, info in client.list():\n    ...     # no interaction with client should be here(!)\n\n    >>> # eager list\n    >>> for path, info in (await client.list()):\n    ...     # interaction with client allowed, since all paths are\n    ...     # collected already\n\n::\n\n    >>> stats = await client.list()", "id": "f9225:c3:m6"}
{"signature": "async def make_directory(self, path, *, parents=True):", "body": "path = pathlib.PurePosixPath(path)<EOL>need_create = []<EOL>while path.name and not await self.exists(path):<EOL><INDENT>need_create.append(path)<EOL>path = path.parent<EOL>if not parents:<EOL><INDENT>break<EOL><DEDENT><DEDENT>need_create.reverse()<EOL>for path in need_create:<EOL><INDENT>await self.command(\"<STR_LIT>\" + str(path), \"<STR_LIT>\")<EOL><DEDENT>", "docstring": ":py:func:`asyncio.coroutine`\n\nMake directory.\n\n:param path: path to directory to create\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param parents: create parents if does not exists\n:type parents: :py:class:`bool`", "id": "f9225:c3:m4"}
{"signature": "async def remove_file(self, path):", "body": "await self.command(\"<STR_LIT>\" + str(path), \"<STR_LIT>\")<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\nLow level remove method for removing file.\n\n:param path: file to remove\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`", "id": "f9225:c3:m12"}
{"signature": "async def get_passive_connection(self, conn_type=\"<STR_LIT:I>\",<EOL>commands=(\"<STR_LIT>\", \"<STR_LIT>\")):", "body": "functions = {<EOL>\"<STR_LIT>\": self._do_epsv,<EOL>\"<STR_LIT>\": self._do_pasv,<EOL>}<EOL>if not commands:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>await self.command(\"<STR_LIT>\" + conn_type, \"<STR_LIT>\")<EOL>for i, name in enumerate(commands, start=<NUM_LIT:1>):<EOL><INDENT>name = name.lower()<EOL>if name not in functions:<EOL><INDENT>raise ValueError(f\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>ip, port = await functions[name]()<EOL>break<EOL><DEDENT>except errors.StatusCodeError as e:<EOL><INDENT>is_last = i == len(commands)<EOL>if is_last or not e.received_codes[-<NUM_LIT:1>].matches(\"<STR_LIT>\"):<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>if ip in (\"<STR_LIT>\", None):<EOL><INDENT>ip = self.server_host<EOL><DEDENT>reader, writer = await open_connection(<EOL>ip,<EOL>port,<EOL>self.create_connection,<EOL>self.ssl,<EOL>)<EOL>return reader, writer<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\nGetting pair of reader, writer for passive connection with server.\n\n:param conn_type: connection type (\"I\", \"A\", \"E\", \"L\")\n:type conn_type: :py:class:`str`\n\n:param commands: sequence of commands to try to initiate passive\n    server creation. First success wins. Default is EPSV, then PASV.\n:type commands: :py:class:`list`\n\n:rtype: (:py:class:`asyncio.StreamReader`,\n    :py:class:`asyncio.StreamWriter`)", "id": "f9225:c3:m22"}
{"signature": "async def remove(self, path):", "body": "if await self.exists(path):<EOL><INDENT>info = await self.stat(path)<EOL>if info[\"<STR_LIT:type>\"] == \"<STR_LIT:file>\":<EOL><INDENT>await self.remove_file(path)<EOL><DEDENT>elif info[\"<STR_LIT:type>\"] == \"<STR_LIT>\":<EOL><INDENT>for name, info in (await self.list(path)):<EOL><INDENT>if info[\"<STR_LIT:type>\"] in (\"<STR_LIT>\", \"<STR_LIT:file>\"):<EOL><INDENT>await self.remove(name)<EOL><DEDENT><DEDENT>await self.remove_directory(path)<EOL><DEDENT><DEDENT>", "docstring": ":py:func:`asyncio.coroutine`\n\nHigh level remove method for removing path recursively (file or\ndirectory).\n\n:param path: path to remove\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`", "id": "f9225:c3:m13"}
{"signature": "def check_codes(self, expected_codes, received_code, info):", "body": "if not any(map(received_code.matches, expected_codes)):<EOL><INDENT>raise errors.StatusCodeError(expected_codes, received_code, info)<EOL><DEDENT>", "docstring": "Checks if any of expected matches received.\n\n:param expected_codes: tuple of expected codes\n:type expected_codes: :py:class:`tuple`\n\n:param received_code: received code for matching\n:type received_code: :py:class:`aioftp.Code`\n\n:param info: list of response lines from server\n:type info: :py:class:`list`\n\n:raises aioftp.StatusCodeError: if received code does not matches any\n    expected code", "id": "f9225:c2:m5"}
{"signature": "async def login(self, user=DEFAULT_USER, password=DEFAULT_PASSWORD,<EOL>account=DEFAULT_ACCOUNT):", "body": "code, info = await self.command(\"<STR_LIT>\" + user, (\"<STR_LIT>\", \"<STR_LIT>\"))<EOL>while code.matches(\"<STR_LIT>\"):<EOL><INDENT>if code == \"<STR_LIT>\":<EOL><INDENT>cmd = \"<STR_LIT>\" + password<EOL><DEDENT>elif code == \"<STR_LIT>\":<EOL><INDENT>cmd = \"<STR_LIT>\" + account<EOL><DEDENT>else:<EOL><INDENT>raise errors.StatusCodeError(\"<STR_LIT>\", code, info)<EOL><DEDENT>code, info = await self.command(cmd, (\"<STR_LIT>\", \"<STR_LIT>\"))<EOL><DEDENT>", "docstring": ":py:func:`asyncio.coroutine`\n\nServer authentication.\n\n:param user: username\n:type user: :py:class:`str`\n\n:param password: password\n:type password: :py:class:`str`\n\n:param account: account (almost always blank)\n:type account: :py:class:`str`\n\n:raises aioftp.StatusCodeError: if unknown code received", "id": "f9225:c3:m1"}
{"signature": "async def download(self, source, destination=\"<STR_LIT>\", *, write_into=False,<EOL>block_size=DEFAULT_BLOCK_SIZE):", "body": "source = pathlib.PurePosixPath(source)<EOL>destination = pathlib.Path(destination)<EOL>if not write_into:<EOL><INDENT>destination = destination / source.name<EOL><DEDENT>if await self.is_file(source):<EOL><INDENT>await self.path_io.mkdir(destination.parent,<EOL>parents=True, exist_ok=True)<EOL>async with self.path_io.open(destination, mode=\"<STR_LIT:wb>\") as file_out,self.download_stream(source) as stream:<EOL><INDENT>async for block in stream.iter_by_block(block_size):<EOL><INDENT>await file_out.write(block)<EOL><DEDENT><DEDENT><DEDENT>elif await self.is_dir(source):<EOL><INDENT>await self.path_io.mkdir(destination, parents=True, exist_ok=True)<EOL>for name, info in (await self.list(source)):<EOL><INDENT>full = destination / name.relative_to(source)<EOL>if info[\"<STR_LIT:type>\"] in (\"<STR_LIT:file>\", \"<STR_LIT>\"):<EOL><INDENT>await self.download(name, full, write_into=True,<EOL>block_size=block_size)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": ":py:func:`asyncio.coroutine`\n\nHigh level download method for downloading files and directories\nrecursively and save them to the file system.\n\n:param source: source path of file or directory on server side\n:type source: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param destination: destination path of file or directory on client\n    side\n:type destination: :py:class:`str` or :py:class:`pathlib.Path`\n\n:param write_into: write source into destination (if you want download\n    file and change it name, as well with directories)\n:type write_into: :py:class:`bool`\n\n:param block_size: block size for transaction\n:type block_size: :py:class:`int`", "id": "f9225:c3:m18"}
{"signature": "@staticmethod<EOL><INDENT>def parse_directory_response(s):<DEDENT>", "body": "seq_quotes = <NUM_LIT:0><EOL>start = False<EOL>directory = \"<STR_LIT>\"<EOL>for ch in s:<EOL><INDENT>if not start:<EOL><INDENT>if ch == \"<STR_LIT>\":<EOL><INDENT>start = True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if ch == \"<STR_LIT>\":<EOL><INDENT>seq_quotes += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>if seq_quotes == <NUM_LIT:1>:<EOL><INDENT>break<EOL><DEDENT>elif seq_quotes == <NUM_LIT:2>:<EOL><INDENT>seq_quotes = <NUM_LIT:0><EOL>directory += '<STR_LIT:\">'<EOL><DEDENT>directory += ch<EOL><DEDENT><DEDENT><DEDENT>return pathlib.PurePosixPath(directory)<EOL>", "docstring": "Parsing directory server response.\n\n:param s: response line\n:type s: :py:class:`str`\n\n:rtype: :py:class:`pathlib.PurePosixPath`", "id": "f9225:c2:m9"}
{"signature": "@async_enterable<EOL><INDENT>async def get_stream(self, *command_args, conn_type=\"<STR_LIT:I>\", offset=<NUM_LIT:0>):<DEDENT>", "body": "reader, writer = await self.get_passive_connection(conn_type)<EOL>if offset:<EOL><INDENT>await self.command(\"<STR_LIT>\" + str(offset), \"<STR_LIT>\")<EOL><DEDENT>await self.command(*command_args)<EOL>stream = DataConnectionThrottleStreamIO(<EOL>self,<EOL>reader,<EOL>writer,<EOL>throttles={\"<STR_LIT:_>\": self.throttle},<EOL>timeout=self.socket_timeout,<EOL>)<EOL>return stream<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\nCreate :py:class:`aioftp.DataConnectionThrottleStreamIO` for straight\nread/write io.\n\n:param command_args: arguments for :py:meth:`aioftp.Client.command`\n\n:param conn_type: connection type (\"I\", \"A\", \"E\", \"L\")\n:type conn_type: :py:class:`str`\n\n:param offset: byte offset for stream start position\n:type offset: :py:class:`int`\n\n:rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`", "id": "f9225:c3:m23"}
{"signature": "def append_stream(self, destination, *, offset=<NUM_LIT:0>):", "body": "return self.get_stream(<EOL>\"<STR_LIT>\" + str(destination),<EOL>\"<STR_LIT>\",<EOL>offset=offset,<EOL>)<EOL>", "docstring": "Create stream for append (write) data to `destination` file.\n\n:param destination: destination path of file on server side\n:type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param offset: byte offset for stream start position\n:type offset: :py:class:`int`\n\n:rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`", "id": "f9225:c3:m15"}
{"signature": "async def upload(self, source, destination=\"<STR_LIT>\", *, write_into=False,<EOL>block_size=DEFAULT_BLOCK_SIZE):", "body": "source = pathlib.Path(source)<EOL>destination = pathlib.PurePosixPath(destination)<EOL>if not write_into:<EOL><INDENT>destination = destination / source.name<EOL><DEDENT>if await self.path_io.is_file(source):<EOL><INDENT>await self.make_directory(destination.parent)<EOL>async with self.path_io.open(source, mode=\"<STR_LIT:rb>\") as file_in,self.upload_stream(destination) as stream:<EOL><INDENT>async for block in file_in.iter_by_block(block_size):<EOL><INDENT>await stream.write(block)<EOL><DEDENT><DEDENT><DEDENT>elif await self.path_io.is_dir(source):<EOL><INDENT>await self.make_directory(destination)<EOL>sources = collections.deque([source])<EOL>while sources:<EOL><INDENT>src = sources.popleft()<EOL>async for path in self.path_io.list(src):<EOL><INDENT>if write_into:<EOL><INDENT>relative = destination.name / path.relative_to(source)<EOL><DEDENT>else:<EOL><INDENT>relative = path.relative_to(source.parent)<EOL><DEDENT>if await self.path_io.is_dir(path):<EOL><INDENT>await self.make_directory(relative)<EOL>sources.append(path)<EOL><DEDENT>else:<EOL><INDENT>await self.upload(<EOL>path,<EOL>relative,<EOL>write_into=True,<EOL>block_size=block_size<EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": ":py:func:`asyncio.coroutine`\n\nHigh level upload method for uploading files and directories\nrecursively from file system.\n\n:param source: source path of file or directory on client side\n:type source: :py:class:`str` or :py:class:`pathlib.Path`\n\n:param destination: destination path of file or directory on server\n    side\n:type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:param write_into: write source into destination (if you want upload\n    file and change it name, as well with directories)\n:type write_into: :py:class:`bool`\n\n:param block_size: block size for transaction\n:type block_size: :py:class:`int`", "id": "f9225:c3:m16"}
{"signature": "async def stat(self, path):", "body": "path = pathlib.PurePosixPath(path)<EOL>try:<EOL><INDENT>code, info = await self.command(\"<STR_LIT>\" + str(path), \"<STR_LIT>\")<EOL>name, info = self.parse_mlsx_line(info[<NUM_LIT:1>].lstrip())<EOL>return info<EOL><DEDENT>except errors.StatusCodeError as e:<EOL><INDENT>if not e.received_codes[-<NUM_LIT:1>].matches(\"<STR_LIT>\"):<EOL><INDENT>raise<EOL><DEDENT><DEDENT>for p, info in await self.list(path.parent):<EOL><INDENT>if p.name == path.name:<EOL><INDENT>return info<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise errors.StatusCodeError(<EOL>Code(\"<STR_LIT>\"),<EOL>Code(\"<STR_LIT>\"),<EOL>\"<STR_LIT>\",<EOL>)<EOL><DEDENT>", "docstring": ":py:func:`asyncio.coroutine`\n\nGetting path stats.\n\n:param path: path for getting info\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:return: path info\n:rtype: :py:class:`dict`", "id": "f9225:c3:m7"}
{"signature": "async def get_current_directory(self):", "body": "code, info = await self.command(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>directory = self.parse_directory_response(info[-<NUM_LIT:1>])<EOL>return directory<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\nGetting current working directory.\n\n:rtype: :py:class:`pathlib.PurePosixPath`", "id": "f9225:c3:m2"}
{"signature": "def matches(self, mask):", "body": "return all(map(lambda m, c: not m.isdigit() or m == c, mask, self))<EOL>", "docstring": ":param mask: Template for comparision. If mask symbol is not digit\n    then it passes.\n:type mask: :py:class:`str`\n\n::\n\n    >>> Code(\"123\").matches(\"1\")\n    True\n    >>> Code(\"123\").matches(\"1x3\")\n    True", "id": "f9225:c0:m0"}
{"signature": "def parse_list_line_unix(self, b):", "body": "s = b.decode(encoding=self.encoding).rstrip()<EOL>info = {}<EOL>if s[<NUM_LIT:0>] == \"<STR_LIT:->\":<EOL><INDENT>info[\"<STR_LIT:type>\"] = \"<STR_LIT:file>\"<EOL><DEDENT>elif s[<NUM_LIT:0>] == \"<STR_LIT:d>\":<EOL><INDENT>info[\"<STR_LIT:type>\"] = \"<STR_LIT>\"<EOL><DEDENT>elif s[<NUM_LIT:0>] == \"<STR_LIT:l>\":<EOL><INDENT>info[\"<STR_LIT:type>\"] = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>info[\"<STR_LIT:type>\"] = \"<STR_LIT>\"<EOL><DEDENT>info[\"<STR_LIT>\"] = self.parse_unix_mode(s[<NUM_LIT:1>:<NUM_LIT:10>])<EOL>s = s[<NUM_LIT:10>:].lstrip()<EOL>i = s.index(\"<STR_LIT:U+0020>\")<EOL>info[\"<STR_LIT>\"] = s[:i]<EOL>if not info[\"<STR_LIT>\"].isdigit():<EOL><INDENT>raise ValueError<EOL><DEDENT>s = s[i:].lstrip()<EOL>i = s.index(\"<STR_LIT:U+0020>\")<EOL>info[\"<STR_LIT>\"] = s[:i]<EOL>s = s[i:].lstrip()<EOL>i = s.index(\"<STR_LIT:U+0020>\")<EOL>info[\"<STR_LIT>\"] = s[:i]<EOL>s = s[i:].lstrip()<EOL>i = s.index(\"<STR_LIT:U+0020>\")<EOL>info[\"<STR_LIT:size>\"] = s[:i]<EOL>if not info[\"<STR_LIT:size>\"].isdigit():<EOL><INDENT>raise ValueError<EOL><DEDENT>s = s[i:].lstrip()<EOL>info[\"<STR_LIT>\"] = self.parse_ls_date(s[:<NUM_LIT:12>])<EOL>s = s[<NUM_LIT:12>:].strip()<EOL>if info[\"<STR_LIT:type>\"] == \"<STR_LIT>\":<EOL><INDENT>i = s.rindex(\"<STR_LIT>\")<EOL>link_dst = s[i + <NUM_LIT:4>:]<EOL>link_src = s[:i]<EOL>i = -<NUM_LIT:2> if link_dst[-<NUM_LIT:1>] == \"<STR_LIT>\" or link_dst[-<NUM_LIT:1>] == \"<STR_LIT>\" else -<NUM_LIT:1><EOL>info[\"<STR_LIT:type>\"] = \"<STR_LIT>\" if link_dst[i] == \"<STR_LIT:/>\" else \"<STR_LIT:file>\"<EOL>s = link_src<EOL><DEDENT>return pathlib.PurePosixPath(s), info<EOL>", "docstring": "Attempt to parse a LIST line (similar to unix ls utility).\n\n:param b: response line\n:type b: :py:class:`bytes` or :py:class:`str`\n\n:return: (path, info)\n:rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)", "id": "f9225:c2:m13"}
{"signature": "@staticmethod<EOL><INDENT>def parse_epsv_response(s):<DEDENT>", "body": "matches = tuple(re.finditer(r\"<STR_LIT>\", s))<EOL>s = matches[-<NUM_LIT:1>].group()<EOL>port = int(s[<NUM_LIT:4>:-<NUM_LIT:2>])<EOL>return None, port<EOL>", "docstring": "Parsing `EPSV` (`message (|||port|)`) response.\n\n:param s: response line\n:type s: :py:class:`str`\n\n:return: (ip, port)\n:rtype: (:py:class:`None`, :py:class:`int`)", "id": "f9225:c2:m7"}
{"signature": "def parse_ls_date(self, s, *, now=None):", "body": "with setlocale(\"<STR_LIT:C>\"):<EOL><INDENT>try:<EOL><INDENT>if now is None:<EOL><INDENT>now = datetime.datetime.now()<EOL><DEDENT>d = datetime.datetime.strptime(s, \"<STR_LIT>\")<EOL>d = d.replace(year=now.year)<EOL>diff = (now - d).total_seconds()<EOL>if diff > HALF_OF_YEAR_IN_SECONDS:<EOL><INDENT>d = d.replace(year=now.year + <NUM_LIT:1>)<EOL><DEDENT>elif diff < -HALF_OF_YEAR_IN_SECONDS:<EOL><INDENT>d = d.replace(year=now.year - <NUM_LIT:1>)<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>d = datetime.datetime.strptime(s, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>return self.format_date_time(d)<EOL>", "docstring": "Parsing dates from the ls unix utility. For example,\n\"Nov 18  1958\" and \"Nov 18 12:29\".\n\n:param s: ls date\n:type s: :py:class:`str`\n\n:rtype: :py:class:`str`", "id": "f9225:c2:m12"}
{"signature": "async def response_writer(self, stream, response_queue):", "body": "while True:<EOL><INDENT>args = await response_queue.get()<EOL>try:<EOL><INDENT>await self.write_response(stream, *args)<EOL><DEDENT>finally:<EOL><INDENT>response_queue.task_done()<EOL><DEDENT><DEDENT>", "docstring": ":py:func:`asyncio.coroutine`\n\nWorker for write_response with current connection. Get data to response\nfrom queue, this is for right order of responses. Exits if received\n:py:class:`None`.\n\n:param stream: command connection stream\n:type connection: :py:class:`aioftp.StreamIO`\n\n:param response_queue:\n:type response_queue: :py:class:`asyncio.Queue`", "id": "f9227:c6:m6"}
{"signature": "async def close(self):", "body": "self.server.close()<EOL>tasks = [self.server.wait_closed()]<EOL>for connection in self.connections.values():<EOL><INDENT>connection._dispatcher.cancel()<EOL>tasks.append(connection._dispatcher)<EOL><DEDENT>logger.info(\"<STR_LIT>\", len(tasks))<EOL>await asyncio.wait(tasks)<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\nShutdown the server and close all connections.", "id": "f9227:c6:m2"}
{"signature": "async def syst(self, connection, rest):", "body": "connection.response(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>return True<EOL>", "docstring": "Return system type (always returns UNIX type: L8).", "id": "f9227:c10:m32"}
{"signature": "def acquire(self):", "body": "if self.value is not None:<EOL><INDENT>self.value -= <NUM_LIT:1><EOL>if self.value < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Acquire, decrementing the internal counter by one.", "id": "f9227:c5:m2"}
{"signature": "def get_paths(self, connection, path):", "body": "virtual_path = pathlib.PurePosixPath(path)<EOL>if not virtual_path.is_absolute():<EOL><INDENT>virtual_path = connection.current_directory / virtual_path<EOL><DEDENT>resolved_virtual_path = pathlib.PurePosixPath(\"<STR_LIT:/>\")<EOL>for part in virtual_path.parts[<NUM_LIT:1>:]:<EOL><INDENT>if part == \"<STR_LIT:..>\":<EOL><INDENT>resolved_virtual_path = resolved_virtual_path.parent<EOL><DEDENT>else:<EOL><INDENT>resolved_virtual_path /= part<EOL><DEDENT><DEDENT>base_path = connection.user.base_path<EOL>real_path = base_path / resolved_virtual_path.relative_to(\"<STR_LIT:/>\")<EOL>return real_path, resolved_virtual_path<EOL>", "docstring": "Return *real* and *virtual* paths, resolves \"..\" with \"up\" action.\n*Real* path is path for path_io, when *virtual* deals with\n\"user-view\" and user requests\n\n:param connection: internal options for current connected user\n:type connection: :py:class:`dict`\n\n:param path: received path from user\n:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`\n\n:return: (real_path, virtual_path)\n:rtype: (:py:class:`pathlib.Path`, :py:class:`pathlib.PurePosixPath`)", "id": "f9227:c10:m2"}
{"signature": "@property<EOL><INDENT>def address(self):<DEDENT>", "body": "return self.server_host, self.server_port<EOL>", "docstring": "Server listen socket host and port as :py:class:`tuple`", "id": "f9227:c6:m1"}
{"signature": "@abc.abstractmethod<EOL><INDENT>async def get_user(self, login):<DEDENT>", "body": "", "docstring": ":py:func:`asyncio.coroutine`\n\nGet user and response for USER call\n\n:param login: user's login\n:type login: :py:class:`str`", "id": "f9227:c2:m1"}
{"signature": "def worker(f):", "body": "@functools.wraps(f)<EOL>async def wrapper(cls, connection, rest):<EOL><INDENT>try:<EOL><INDENT>await f(cls, connection, rest)<EOL><DEDENT>except asyncio.CancelledError:<EOL><INDENT>connection.response(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>connection.response(\"<STR_LIT>\", \"<STR_LIT>\")<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Decorator. Abortable worker. If wrapped task will be cancelled by\ndispatcher, decorator will send ftp codes of successful interrupt.\n\n::\n\n    >>> @worker\n    ... async def worker(self, connection, rest):\n    ...     ...", "id": "f9227:m0"}
{"signature": "def locked(self):", "body": "return self.value == <NUM_LIT:0><EOL>", "docstring": "Returns True if semaphore-like can not be acquired.\n\n:rtype: :py:class:`bool`", "id": "f9227:c5:m1"}
{"signature": "async def write_response(self, stream, code, lines=\"<STR_LIT>\", list=False):", "body": "lines = wrap_with_container(lines)<EOL>write = functools.partial(self.write_line, stream)<EOL>if list:<EOL><INDENT>head, *body, tail = lines<EOL>await write(code + \"<STR_LIT:->\" + head)<EOL>for line in body:<EOL><INDENT>await write(\"<STR_LIT:U+0020>\" + line)<EOL><DEDENT>await write(code + \"<STR_LIT:U+0020>\" + tail)<EOL><DEDENT>else:<EOL><INDENT>*body, tail = lines<EOL>for line in body:<EOL><INDENT>await write(code + \"<STR_LIT:->\" + line)<EOL><DEDENT>await write(code + \"<STR_LIT:U+0020>\" + tail)<EOL><DEDENT>", "docstring": ":py:func:`asyncio.coroutine`\n\nComplex method for sending response.\n\n:param stream: command connection stream\n:type stream: :py:class:`aioftp.StreamIO`\n\n:param code: server response code\n:type code: :py:class:`str`\n\n:param lines: line or lines, which are response information\n:type lines: :py:class:`str` or :py:class:`collections.Iterable`\n\n:param list: if true, then lines will be sended without code prefix.\n    This is useful for **LIST** FTP command and some others.\n:type list: :py:class:`bool`", "id": "f9227:c6:m4"}
{"signature": "async def start(self, host=None, port=<NUM_LIT:0>, **kwargs):", "body": "self._start_server_extra_arguments = kwargs<EOL>self.connections = {}<EOL>self.server_host = host<EOL>self.server_port = port<EOL>self.server = await asyncio.start_server(<EOL>self.dispatcher,<EOL>host,<EOL>port,<EOL>ssl=self.ssl,<EOL>**self._start_server_extra_arguments,<EOL>)<EOL>for sock in self.server.sockets:<EOL><INDENT>if sock.family in (socket.AF_INET, socket.AF_INET6):<EOL><INDENT>host, port, *_ = sock.getsockname()<EOL>if not self.server_port:<EOL><INDENT>self.server_port = port<EOL><DEDENT>if not self.server_host:<EOL><INDENT>self.server_host = host<EOL><DEDENT>logger.info(\"<STR_LIT>\", host, port)<EOL><DEDENT><DEDENT>", "docstring": ":py:func:`asyncio.coroutine`\n\nStart server.\n\n:param host: ip address to bind for listening.\n:type host: :py:class:`str`\n\n:param port: port number to bind for listening.\n:type port: :py:class:`int`\n\n:param kwargs: keyword arguments, they passed to\n    :py:func:`asyncio.start_server`", "id": "f9227:c6:m0"}
{"signature": "@classmethod<EOL><INDENT>def from_limits(cls, read_speed_limit=None, write_speed_limit=None):<DEDENT>", "body": "return cls(read=Throttle(limit=read_speed_limit),<EOL>write=Throttle(limit=write_speed_limit))<EOL>", "docstring": "Simple wrapper for creation :py:class:`aioftp.StreamThrottle`\n\n:param read_speed_limit: stream read speed limit in bytes or\n    :py:class:`None` for unlimited\n:type read_speed_limit: :py:class:`int` or :py:class:`None`\n\n:param write_speed_limit: stream write speed limit in bytes or\n    :py:class:`None` for unlimited\n:type write_speed_limit: :py:class:`int` or :py:class:`None`", "id": "f9228:c5:m1"}
{"signature": "async def read(self, count=-<NUM_LIT:1>):", "body": "await self.wait(\"<STR_LIT>\")<EOL>start = _now()<EOL>data = await super().read(count)<EOL>self.append(\"<STR_LIT>\", data, start)<EOL>return data<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\n:py:meth:`aioftp.StreamIO.read` proxy", "id": "f9228:c6:m3"}
{"signature": "def clone(self):", "body": "return StreamThrottle(<EOL>read=self.read.clone(),<EOL>write=self.write.clone()<EOL>)<EOL>", "docstring": "Clone throttles without memory", "id": "f9228:c5:m0"}
{"signature": "@with_timeout(\"<STR_LIT>\")<EOL><INDENT>async def read(self, count=-<NUM_LIT:1>):<DEDENT>", "body": "return await self.reader.read(count)<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\nProxy for :py:meth:`asyncio.StreamReader.read`.\n\n:param count: block size for read operation\n:type count: :py:class:`int`", "id": "f9228:c3:m2"}
{"signature": "def append(self, name, data, start):", "body": "for throttle in self.throttles.values():<EOL><INDENT>getattr(throttle, name).append(data, start)<EOL><DEDENT>", "docstring": "Update timeout for all throttles\n\n:param name: name of throttle to append to (\"read\" or \"write\")\n:type name: :py:class:`str`\n\n:param data: bytes of data for count\n:type data: :py:class:`bytes`\n\n:param start: start of read/write time from\n    :py:meth:`asyncio.BaseEventLoop.time`\n:type start: :py:class:`float`", "id": "f9228:c6:m2"}
{"signature": "def iter_by_block(self, count=DEFAULT_BLOCK_SIZE):", "body": "return AsyncStreamIterator(lambda: self.read(count))<EOL>", "docstring": "Read/iterate stream by block.\n\n:rtype: :py:class:`aioftp.AsyncStreamIterator`\n\n::\n\n    >>> async for block in stream.iter_by_block(block_size):\n    ...     ...", "id": "f9228:c6:m9"}
{"signature": "def iter_by_line(self):", "body": "return AsyncStreamIterator(self.readline)<EOL>", "docstring": "Read/iterate stream by line.\n\n:rtype: :py:class:`aioftp.AsyncStreamIterator`\n\n::\n\n    >>> async for line in stream.iter_by_line():\n    ...     ...", "id": "f9228:c6:m8"}
{"signature": "def clone(self):", "body": "return Throttle(limit=self._limit, reset_rate=self.reset_rate)<EOL>", "docstring": "Clone throttle without memory", "id": "f9228:c4:m5"}
{"signature": "@with_timeout(\"<STR_LIT>\")<EOL><INDENT>async def readline(self):<DEDENT>", "body": "return await self.reader.readline()<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\nProxy for :py:meth:`asyncio.StreamReader.readline`.", "id": "f9228:c3:m1"}
{"signature": "def async_enterable(f):", "body": "@functools.wraps(f)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>class AsyncEnterableInstance:<EOL><INDENT>async def __aenter__(self):<EOL><INDENT>self.context = await f(*args, **kwargs)<EOL>return await self.context.__aenter__()<EOL><DEDENT>async def __aexit__(self, *args, **kwargs):<EOL><INDENT>await self.context.__aexit__(*args, **kwargs)<EOL><DEDENT>def __await__(self):<EOL><INDENT>return f(*args, **kwargs).__await__()<EOL><DEDENT><DEDENT>return AsyncEnterableInstance()<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator. Bring coroutine result up, so it can be used as async context\n\n::\n\n    >>> async def foo():\n    ...\n    ...     ...\n    ...     return AsyncContextInstance(...)\n    ...\n    ... ctx = await foo()\n    ... async with ctx:\n    ...\n    ...     # do\n\n::\n\n    >>> @async_enterable\n    ... async def foo():\n    ...\n    ...     ...\n    ...     return AsyncContextInstance(...)\n    ...\n    ... async with foo() as ctx:\n    ...\n    ...     # do\n    ...\n    ... ctx = await foo()\n    ... async with ctx:\n    ...\n    ...     # do", "id": "f9228:m3"}
{"signature": "@with_timeout(\"<STR_LIT>\")<EOL><INDENT>async def write(self, data):<DEDENT>", "body": "self.writer.write(data)<EOL>await self.writer.drain()<EOL>", "docstring": ":py:func:`asyncio.coroutine`\n\nCombination of :py:meth:`asyncio.StreamWriter.write` and\n:py:meth:`asyncio.StreamWriter.drain`.\n\n:param data: data to write\n:type data: :py:class:`bytes`", "id": "f9228:c3:m3"}
{"signature": "def stor(ftp=None):", "body": "if ftp is None:<EOL><INDENT>ftp = connect()<EOL>quit = True<EOL><DEDENT>else:<EOL><INDENT>quit = False<EOL><DEDENT>ftp.voidcmd('<STR_LIT>')<EOL>with contextlib.closing(ftp.transfercmd(\"<STR_LIT>\" + TESTFN)) as conn:<EOL><INDENT>chunk = b'<STR_LIT:x>' * BUFFER_LEN<EOL>total_sent = <NUM_LIT:0><EOL>while True:<EOL><INDENT>sent = conn.send(chunk)<EOL>total_sent += sent<EOL>if total_sent >= FILE_SIZE:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>ftp.voidresp()<EOL>if quit:<EOL><INDENT>ftp.quit()<EOL><DEDENT>return ftp<EOL>", "docstring": "Same as ftplib's storbinary() but just sends dummy data\n    instead of reading it from a real file.", "id": "f9248:m7"}
{"signature": "def timethis(what):", "body": "@contextlib.contextmanager<EOL>def benchmark():<EOL><INDENT>timer = time.clock if sys.platform == \"<STR_LIT:win32>\" else time.time<EOL>start = timer()<EOL>yield<EOL>stop = timer()<EOL>res = (stop - start)<EOL>print_bench(what, res, \"<STR_LIT>\")<EOL><DEDENT>if hasattr(what, \"<STR_LIT>\"):<EOL><INDENT>def timed(*args, **kwargs):<EOL><INDENT>with benchmark():<EOL><INDENT>return what(*args, **kwargs)<EOL><DEDENT><DEDENT>return timed<EOL><DEDENT>else:<EOL><INDENT>return benchmark()<EOL><DEDENT>", "docstring": "Utility function for making simple benchmarks (calculates time calls).\n    It can be used either as a context manager or as a decorator.", "id": "f9248:m4"}
{"signature": "def connect():", "body": "ftp_class = ftplib.FTP if not SSL else ftplib.FTP_TLS<EOL>ftp = ftp_class(timeout=TIMEOUT)<EOL>ftp.connect(HOST, PORT)<EOL>ftp.login(USER, PASSWORD)<EOL>if SSL:<EOL><INDENT>ftp.prot_p()  <EOL><DEDENT>return ftp<EOL>", "docstring": "Connect to FTP server, login and return an ftplib.FTP instance.", "id": "f9248:m5"}
{"signature": "def register_memory():", "body": "<EOL>def get_mem(proc):<EOL><INDENT>if os.name == '<STR_LIT>':<EOL><INDENT>mem = proc.memory_info_ex()<EOL>counter = mem.rss<EOL>if '<STR_LIT>' in mem._fields:<EOL><INDENT>counter -= mem.shared<EOL><DEDENT>return counter<EOL><DEDENT>else:<EOL><INDENT>return proc.get_memory_info().rss<EOL><DEDENT><DEDENT>if SERVER_PROC is not None:<EOL><INDENT>mem = get_mem(SERVER_PROC)<EOL>for child in SERVER_PROC.children():<EOL><INDENT>mem += get_mem(child)<EOL><DEDENT>server_memory.append(bytes2human(mem))<EOL><DEDENT>", "docstring": "Register an approximation of memory used by FTP server process\n    and all of its children.", "id": "f9248:m3"}
{"signature": "def human2bytes(s):", "body": "symbols = ('<STR_LIT:B>', '<STR_LIT>', '<STR_LIT:M>', '<STR_LIT>', '<STR_LIT:T>', '<STR_LIT:P>', '<STR_LIT:E>', '<STR_LIT>', '<STR_LIT:Y>')<EOL>letter = s[-<NUM_LIT:1>:].strip().upper()<EOL>num = s[:-<NUM_LIT:1>]<EOL>assert num.isdigit() and letter in symbols, s<EOL>num = float(num)<EOL>prefix = {symbols[<NUM_LIT:0>]: <NUM_LIT:1>}<EOL>for i, s in enumerate(symbols[<NUM_LIT:1>:]):<EOL><INDENT>prefix[s] = <NUM_LIT:1> << (i + <NUM_LIT:1>) * <NUM_LIT:10><EOL><DEDENT>return int(num * prefix[letter])<EOL>", "docstring": ">>> human2bytes('1M')\n1048576\n>>> human2bytes('1G')\n1073741824", "id": "f9248:m2"}
{"signature": "def heappush_max(heap, item):", "body": "heap.append(item)<EOL>_siftdown_max(heap, <NUM_LIT:0>, len(heap) - <NUM_LIT:1>)<EOL>", "docstring": "Push item onto heap, maintaining the heap invariant.", "id": "f9253:m2"}
{"signature": "def delete_rawl_without_commit(self, rawl_id):", "body": "return self.query(\"<STR_LIT>\", rawl_id, commit=False)<EOL>", "docstring": "Test a delete", "id": "f9255:c1:m4"}
{"signature": "def query(self, sql_string, *args, **kwargs):", "body": "commit = None<EOL>columns = None<EOL>if kwargs.get('<STR_LIT>') is not None:<EOL><INDENT>commit = kwargs.pop('<STR_LIT>')<EOL><DEDENT>if kwargs.get('<STR_LIT>') is not None:<EOL><INDENT>columns = kwargs.pop('<STR_LIT>')<EOL><DEDENT>query = self._assemble_simple(sql_string, *args, **kwargs)<EOL>return self._execute(query, commit=commit, working_columns=columns)<EOL>", "docstring": "Execute a DML query \n\n:sql_string:    An SQL string template\n:*args:         Arguments to be passed for query parameters.\n:commit:        Whether or not to commit the transaction after the query\n:returns:       Psycopg2 result", "id": "f9256:c3:m6"}
{"signature": "def insert_dict(self, value_dict, commit=False):", "body": "<EOL>for key in value_dict.keys():<EOL><INDENT>if key not in self.columns:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % key)<EOL><DEDENT><DEDENT>insert_cols = []<EOL>value_set = []<EOL>for col in self.columns:<EOL><INDENT>if col in value_dict:<EOL><INDENT>insert_cols.append(col)<EOL>value_set.append(value_dict[col])<EOL><DEDENT><DEDENT>placeholders = '<STR_LIT:U+002CU+0020>'.join([\"<STR_LIT>\" % x for x in range(<NUM_LIT:1>, len(value_set) + <NUM_LIT:1>)])<EOL>query = self._assemble_with_columns('''<STR_LIT>''' + self.table + '''<STR_LIT>''' + placeholders + '''<STR_LIT>''' + self.pk + '''<STR_LIT:U+0020>''', insert_cols, *value_set)<EOL>result = self._execute(query, commit=commit)<EOL>if len(result) > <NUM_LIT:0>:<EOL><INDENT>if hasattr(result[<NUM_LIT:0>], self.pk):<EOL><INDENT>return getattr(result[<NUM_LIT:0>], self.pk)<EOL><DEDENT>else:<EOL><INDENT>return result[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Execute an INSERT statement using a python dict\n\n:value_dict:    A dictionary representing all the columns(keys) and \n    values that should be part of the INSERT statement\n:commit:        Whether to automatically commit the transaction\n:returns:       Psycopg2 result", "id": "f9256:c3:m8"}
{"signature": "def all(self):", "body": "return self.select(\"<STR_LIT>\" + self.table + \"<STR_LIT:;>\", <EOL>self.columns)<EOL>", "docstring": "Retreive all single record from the table.  Should be implemented but not\nrequired.\n:returns:       List of results", "id": "f9256:c3:m10"}
{"signature": "def _assemble_simple(self, sql_str, *args, **kwargs):", "body": "query_string = sql.SQL(sql_str).format(<EOL>*[sql.Literal(a) for a in args]<EOL>)<EOL>return query_string<EOL>", "docstring": "Format a select statement with specific columns \n\n:sql_str:   An SQL string template\n:*args:     Arguments to use as query parameters.\n:returns:   Psycopg2 compiled query", "id": "f9256:c3:m3"}
{"signature": "def _assemble_with_columns(self, sql_str, columns, *args, **kwargs):", "body": "<EOL>qcols = []<EOL>for col in columns:<EOL><INDENT>if '<STR_LIT:.>' in col:<EOL><INDENT>wlist = col.split('<STR_LIT:.>')<EOL>qcols.append(sql.SQL('<STR_LIT:.>').join([sql.Identifier(x) for x in wlist]))<EOL><DEDENT>else:<EOL><INDENT>qcols.append(sql.Identifier(col))<EOL><DEDENT><DEDENT>query_string = sql.SQL(sql_str).format(<EOL>sql.SQL('<STR_LIT:U+002CU+0020>').join(qcols),<EOL>*[sql.Literal(a) for a in args]<EOL>)<EOL>return query_string<EOL>", "docstring": "Format a select statement with specific columns \n\n:sql_str:   An SQL string template\n:columns:   The columns to be selected and put into {0}\n:*args:     Arguments to use as query parameters.\n:returns:   Psycopg2 compiled query", "id": "f9256:c3:m1"}
{"signature": "def retry(<EOL>exceptions=(Exception,), interval=<NUM_LIT:0>, max_retries=<NUM_LIT:10>, success=None,<EOL>timeout=-<NUM_LIT:1>):", "body": "if not exceptions and success is None:<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>')<EOL><DEDENT>exceptions = exceptions or (_DummyException,)<EOL>_retries_error_msg = ('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>_timeout_error_msg = '<STR_LIT>'<EOL>@decorator<EOL>def wrapper(func, *args, **kwargs):<EOL><INDENT>signal.signal(<EOL>signal.SIGALRM, _timeout(<EOL>_timeout_error_msg.format(timeout, func.__name__)))<EOL>run_func = functools.partial(func, *args, **kwargs)<EOL>logger = logging.getLogger(func.__module__)<EOL>if max_retries < <NUM_LIT:0>:<EOL><INDENT>iterator = itertools.count()<EOL><DEDENT>else:<EOL><INDENT>iterator = range(max_retries)<EOL><DEDENT>if timeout > <NUM_LIT:0>:<EOL><INDENT>signal.alarm(timeout)<EOL><DEDENT>for num, _ in enumerate(iterator, <NUM_LIT:1>):<EOL><INDENT>try:<EOL><INDENT>result = run_func()<EOL>if success is None or success(result):<EOL><INDENT>signal.alarm(<NUM_LIT:0>)<EOL>return result<EOL><DEDENT><DEDENT>except exceptions:<EOL><INDENT>logger.exception(<EOL>'<STR_LIT>'.format(<EOL>func.__name__))<EOL>if num == max_retries:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>logger.warning(<EOL>'<STR_LIT>'.format(<EOL>func.__name__, interval))<EOL>time.sleep(interval)<EOL><DEDENT>else:<EOL><INDENT>raise MaximumRetriesExceeded(<EOL>_retries_error_msg.format(<EOL>max_retries, interval, func.__name__))<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Decorator to retry a function 'max_retries' amount of times\n\n    :param tuple exceptions: Exceptions to be caught for retries\n    :param int interval: Interval between retries in seconds\n    :param int max_retries: Maximum number of retries to have, if\n        set to -1 the decorator will loop forever\n    :param function success: Function to indicate success criteria\n    :param int timeout: Timeout interval in seconds, if -1 will retry forever\n    :raises MaximumRetriesExceeded: Maximum number of retries hit without\n        reaching the success criteria\n    :raises TypeError: Both exceptions and success were left None causing the\n        decorator to have no valid exit criteria.\n\n    Example:\n        Use it to decorate a function!\n\n        .. sourcecode:: python\n\n            from retry import retry\n\n            @retry(exceptions=(ArithmeticError,), success=lambda x: x > 0)\n            def foo(bar):\n                if bar < 0:\n                    raise ArithmeticError('testing this')\n                return bar\n            foo(5)\n            # Should return 5\n            foo(-1)\n            # Should raise ArithmeticError\n            foo(0)\n            # Should raise MaximumRetriesExceeded", "id": "f9258:m1"}
{"signature": "def validate_activatable_models():", "body": "for model in get_activatable_models():<EOL><INDENT>activatable_field = next((<EOL>f for f in model._meta.fields<EOL>if f.__class__ == models.BooleanField and f.name == model.ACTIVATABLE_FIELD_NAME<EOL>), None)<EOL>if activatable_field is None:<EOL><INDENT>raise ValidationError((<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(model)<EOL>))<EOL><DEDENT>if not model.ALLOW_CASCADE_DELETE:<EOL><INDENT>for field in model._meta.fields:<EOL><INDENT>if field.__class__ in (models.ForeignKey, models.OneToOneField):<EOL><INDENT>if field.remote_field.on_delete == models.CASCADE:<EOL><INDENT>raise ValidationError((<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(model))<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Raises a ValidationError for any ActivatableModel that has ForeignKeys or OneToOneFields that will\ncause cascading deletions to occur. This function also raises a ValidationError if the activatable\nmodel has not defined a Boolean field with the field name defined by the ACTIVATABLE_FIELD_NAME variable\non the model.", "id": "f9268:m1"}
{"signature": "def save(self, *args, **kwargs):", "body": "current_activable_value = getattr(self, self.ACTIVATABLE_FIELD_NAME)<EOL>is_active_changed = self.id is None or self.__original_activatable_value != current_activable_value<EOL>self.__original_activatable_value = current_activable_value<EOL>ret_val = super(BaseActivatableModel, self).save(*args, **kwargs)<EOL>if is_active_changed:<EOL><INDENT>model_activations_changed.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value)<EOL><DEDENT>if self.activatable_field_updated:<EOL><INDENT>model_activations_updated.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value)<EOL><DEDENT>return ret_val<EOL>", "docstring": "A custom save method that handles figuring out when something is activated or deactivated.", "id": "f9272:c2:m2"}
{"signature": "def does_not_exist(self):", "body": "if not isinstance(self.val, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if os.path.exists(self.val):<EOL><INDENT>self._err('<STR_LIT>' % self.val)<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is a path and that it does not exist.", "id": "f9307:c0:m68"}
{"signature": "def does_not_contain_entry(self, *args, **kwargs):", "body": "self._check_dict_like(self.val, check_values=False)<EOL>entries = list(args) + [{k:v} for k,v in kwargs.items()]<EOL>if len(entries) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>found = []<EOL>for e in entries:<EOL><INDENT>if type(e) is not dict:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if len(e) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>k = next(iter(e))<EOL>if k in self.val and e[k] == self.val[k]:<EOL><INDENT>found.append(e)<EOL><DEDENT><DEDENT>if found:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, self._fmt_items(entries), self._fmt_items(found)))<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is a dict and does not contain the given entry or entries.", "id": "f9307:c0:m61"}
{"signature": "def is_upper(self):", "body": "if not isinstance(self.val, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if len(self.val) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if self.val != self.val.upper():<EOL><INDENT>self._err('<STR_LIT>' % self.val)<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is non-empty string and all characters are uppercase.", "id": "f9307:c0:m51"}
{"signature": "def is_zero(self):", "body": "self._validate_number()<EOL>return self.is_equal_to(<NUM_LIT:0>)<EOL>", "docstring": "Asserts that val is numeric and equal to zero.", "id": "f9307:c0:m26"}
{"signature": "def does_not_contain(self, *items):", "body": "if len(items) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif len(items) == <NUM_LIT:1>:<EOL><INDENT>if items[<NUM_LIT:0>] in self.val:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, items[<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>found = []<EOL>for i in items:<EOL><INDENT>if i in self.val:<EOL><INDENT>found.append(i)<EOL><DEDENT><DEDENT>if found:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, self._fmt_items(items), self._fmt_items(found)))<EOL><DEDENT><DEDENT>return self<EOL>", "docstring": "Asserts that val does not contain the given item or items.", "id": "f9307:c0:m14"}
{"signature": "def is_equal_to(self, other, **kwargs):", "body": "if self._check_dict_like(self.val, check_values=False, return_as_bool=True) andself._check_dict_like(other, check_values=False, return_as_bool=True):<EOL><INDENT>if self._dict_not_equal(self.val, other, ignore=kwargs.get('<STR_LIT:ignore>'), include=kwargs.get('<STR_LIT>')):<EOL><INDENT>self._dict_err(self.val, other, ignore=kwargs.get('<STR_LIT:ignore>'), include=kwargs.get('<STR_LIT>'))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self.val != other:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, other))<EOL><DEDENT><DEDENT>return self<EOL>", "docstring": "Asserts that val is equal to other.", "id": "f9307:c0:m2"}
{"signature": "def when_called_with(self, *some_args, **some_kwargs):", "body": "if not self.expected:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>self.val(*some_args, **some_kwargs)<EOL><DEDENT>except BaseException as e:<EOL><INDENT>if issubclass(type(e), self.expected):<EOL><INDENT>return AssertionBuilder(str(e), self.description, self.kind)<EOL><DEDENT>else:<EOL><INDENT>self._err('<STR_LIT>' % (<EOL>self.val.__name__,<EOL>self.expected.__name__,<EOL>self._fmt_args_kwargs(*some_args, **some_kwargs),<EOL>type(e).__name__))<EOL><DEDENT><DEDENT>self._err('<STR_LIT>' % (<EOL>self.val.__name__,<EOL>self.expected.__name__,<EOL>self._fmt_args_kwargs(*some_args, **some_kwargs)))<EOL>", "docstring": "Asserts the val callable when invoked with the given args and kwargs raises the expected exception.", "id": "f9307:c0:m76"}
{"signature": "def contains_ignoring_case(self, *items):", "body": "if len(items) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if isinstance(self.val, str_types):<EOL><INDENT>if len(items) == <NUM_LIT:1>:<EOL><INDENT>if not isinstance(items[<NUM_LIT:0>], str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if items[<NUM_LIT:0>].lower() not in self.val.lower():<EOL><INDENT>self._err('<STR_LIT>' % (self.val, items[<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>missing = []<EOL>for i in items:<EOL><INDENT>if not isinstance(i, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if i.lower() not in self.val.lower():<EOL><INDENT>missing.append(i)<EOL><DEDENT><DEDENT>if missing:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, self._fmt_items(items), self._fmt_items(missing)))<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(self.val, Iterable):<EOL><INDENT>missing = []<EOL>for i in items:<EOL><INDENT>if not isinstance(i, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>found = False<EOL>for v in self.val:<EOL><INDENT>if not isinstance(v, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if i.lower() == v.lower():<EOL><INDENT>found = True<EOL>break<EOL><DEDENT><DEDENT>if not found:<EOL><INDENT>missing.append(i)<EOL><DEDENT><DEDENT>if missing:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, self._fmt_items(items), self._fmt_items(missing)))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is string and contains the given item or items.", "id": "f9307:c0:m43"}
{"signature": "def is_equal_to_ignoring_case(self, other):", "body": "if not isinstance(self.val, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if not isinstance(other, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if self.val.lower() != other.lower():<EOL><INDENT>self._err('<STR_LIT>' % (self.val, other))<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is case-insensitive equal to other.", "id": "f9307:c0:m42"}
{"signature": "def soft_fail(msg='<STR_LIT>'):", "body": "global _soft_ctx<EOL>if _soft_ctx:<EOL><INDENT>global _soft_err<EOL>_soft_err.append('<STR_LIT>' % msg if msg else '<STR_LIT>')<EOL>return<EOL><DEDENT>fail(msg)<EOL>", "docstring": "Adds error message to soft errors list if within soft assertions context.\n       Either just force test failure with the given message.", "id": "f9307:m5"}
{"signature": "def does_not_contain_duplicates(self):", "body": "try:<EOL><INDENT>if len(self.val) == len(set(self.val)):<EOL><INDENT>return self<EOL><DEDENT><DEDENT>except TypeError:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self._err('<STR_LIT>' % self.val)<EOL>", "docstring": "Asserts that val is iterable and does not contain any duplicate items.", "id": "f9307:c0:m18"}
{"signature": "def is_negative(self):", "body": "return self.is_less_than(<NUM_LIT:0>)<EOL>", "docstring": "Asserts that val is numeric and less than zero.", "id": "f9307:c0:m37"}
{"signature": "def assert_that(val, description='<STR_LIT>'):", "body": "global _soft_ctx<EOL>if _soft_ctx:<EOL><INDENT>return AssertionBuilder(val, description, '<STR_LIT>')<EOL><DEDENT>return AssertionBuilder(val, description)<EOL>", "docstring": "Factory method for the assertion builder with value to be tested and optional description.", "id": "f9307:m1"}
{"signature": "def is_iterable(self):", "body": "if not isinstance(self.val, Iterable):<EOL><INDENT>self._err('<STR_LIT>')<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is iterable collection.", "id": "f9307:c0:m53"}
{"signature": "def does_not_contain_key(self, *keys):", "body": "self._check_dict_like(self.val, check_values=False, check_getitem=False)<EOL>return self.does_not_contain(*keys)<EOL>", "docstring": "Asserts the val is a dict and does not contain the given key or keys.  Alias for does_not_contain().", "id": "f9307:c0:m57"}
{"signature": "def is_unicode(self):", "body": "if type(self.val) is not unicode:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, type(self.val).__name__))<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is a unicode string.", "id": "f9307:c0:m52"}
{"signature": "def is_not_in(self, *items):", "body": "if len(items) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>for i in items:<EOL><INDENT>if self.val == i:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, self._fmt_items(items)))<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Asserts that val is not equal to one of the given items.", "id": "f9307:c0:m22"}
{"signature": "def __init__(self, val, description='<STR_LIT>', kind=None, expected=None):", "body": "self.val = val<EOL>self.description = description<EOL>self.kind = kind<EOL>self.expected = expected<EOL>", "docstring": "Construct the assertion builder.", "id": "f9307:c0:m0"}
{"signature": "def is_inf(self):", "body": "self._validate_number()<EOL>self._validate_real()<EOL>if not math.isinf(self.val):<EOL><INDENT>self._err('<STR_LIT>' % self.val)<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is real number and Inf (infinity).", "id": "f9307:c0:m30"}
{"signature": "def is_not_close_to(self, other, tolerance):", "body": "self._validate_close_to_args(self.val, other, tolerance)<EOL>if self.val >= (other-tolerance) and self.val <= (other+tolerance):<EOL><INDENT>if type(self.val) is datetime.datetime:<EOL><INDENT>tolerance_seconds = tolerance.days * <NUM_LIT> + tolerance.seconds + tolerance.microseconds / <NUM_LIT><EOL>h, rem = divmod(tolerance_seconds, <NUM_LIT>)<EOL>m, s = divmod(rem, <NUM_LIT>)<EOL>self._err('<STR_LIT>' % (self.val.strftime('<STR_LIT>'), other.strftime('<STR_LIT>'), h, m, s))<EOL><DEDENT>else:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, other, tolerance))<EOL><DEDENT><DEDENT>return self<EOL>", "docstring": "Asserts that val is numeric and is not close to other within tolerance.", "id": "f9307:c0:m41"}
{"signature": "def does_not_match(self, pattern):", "body": "if not isinstance(self.val, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if not isinstance(pattern, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if len(pattern) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if re.search(pattern, self.val) is not None:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, pattern))<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is string and does not match regex pattern.", "id": "f9307:c0:m47"}
{"signature": "def is_greater_than(self, other):", "body": "self._validate_compareable(other)<EOL>if self.val <= other:<EOL><INDENT>if type(self.val) is datetime.datetime:<EOL><INDENT>self._err('<STR_LIT>' % (self.val.strftime('<STR_LIT>'), other.strftime('<STR_LIT>')))<EOL><DEDENT>else:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, other))<EOL><DEDENT><DEDENT>return self<EOL>", "docstring": "Asserts that val is numeric and is greater than other.", "id": "f9307:c0:m32"}
{"signature": "def is_type_of(self, some_type):", "body": "if type(some_type) is not type andnot issubclass(type(some_type), type):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if type(self.val) is not some_type:<EOL><INDENT>if hasattr(self.val, '<STR_LIT>'):<EOL><INDENT>t = self.val.__name__<EOL><DEDENT>elif hasattr(self.val, '<STR_LIT>'):<EOL><INDENT>t = self.val.__class__.__name__<EOL><DEDENT>else:<EOL><INDENT>t = '<STR_LIT>'<EOL><DEDENT>self._err('<STR_LIT>' % (self.val, t, some_type.__name__))<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is of the given type.", "id": "f9307:c0:m10"}
{"signature": "def starts_with(self, prefix):", "body": "if prefix is None:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if isinstance(self.val, str_types):<EOL><INDENT>if not isinstance(prefix, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if len(prefix) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not self.val.startswith(prefix):<EOL><INDENT>self._err('<STR_LIT>' % (self.val, prefix))<EOL><DEDENT><DEDENT>elif isinstance(self.val, Iterable):<EOL><INDENT>if len(self.val) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>first = next(iter(self.val))<EOL>if first != prefix:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, prefix))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is string or iterable and starts with prefix.", "id": "f9307:c0:m44"}
{"signature": "def is_child_of(self, parent):", "body": "self.is_file()<EOL>if not isinstance(parent, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>val_abspath = os.path.abspath(self.val)<EOL>parent_abspath = os.path.abspath(parent)<EOL>if not val_abspath.startswith(parent_abspath):<EOL><INDENT>self._err('<STR_LIT>' % (val_abspath, parent_abspath))<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is an existing path to a file and that file is a child of parent.", "id": "f9307:c0:m72"}
{"signature": "def is_digit(self):", "body": "if not isinstance(self.val, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if len(self.val) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not self.val.isdigit():<EOL><INDENT>self._err('<STR_LIT>' % self.val)<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is non-empty string and all characters are digits.", "id": "f9307:c0:m49"}
{"signature": "def _validate_real(self):", "body": "if isinstance(self.val, numbers.Real) is False:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Raise TypeError if val is not real number.", "id": "f9307:c0:m25"}
{"signature": "def is_true(self):", "body": "if not self.val:<EOL><INDENT>self._err('<STR_LIT>')<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is true.", "id": "f9307:c0:m6"}
{"signature": "def is_after(self, other):", "body": "if type(self.val) is not datetime.datetime:<EOL><INDENT>raise TypeError('<STR_LIT>' % type(self.val).__name__)<EOL><DEDENT>if type(other) is not datetime.datetime:<EOL><INDENT>raise TypeError('<STR_LIT>' % type(other).__name__)<EOL><DEDENT>if self.val <= other:<EOL><INDENT>self._err('<STR_LIT>' % (self.val.strftime('<STR_LIT>'), other.strftime('<STR_LIT>')))<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is a date and is after other date.", "id": "f9307:c0:m63"}
{"signature": "def contains_value(self, *values):", "body": "self._check_dict_like(self.val, check_getitem=False)<EOL>if len(values) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>missing = []<EOL>for v in values:<EOL><INDENT>if v not in self.val.values():<EOL><INDENT>missing.append(v)<EOL><DEDENT><DEDENT>if missing:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, self._fmt_items(values), self._fmt_items(missing)))<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is a dict and contains the given value or values.", "id": "f9307:c0:m58"}
{"signature": "def is_not_none(self):", "body": "if self.val is None:<EOL><INDENT>self._err('<STR_LIT>')<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is not none.", "id": "f9307:c0:m9"}
{"signature": "def is_not_equal_to(self, other):", "body": "if self.val == other:<EOL><INDENT>self._err('<STR_LIT>' % (self.val, other))<EOL><DEDENT>return self<EOL>", "docstring": "Asserts that val is not equal to other.", "id": "f9307:c0:m3"}
{"signature": "def analog_mapping_response(self, data):", "body": "self.analog_mapping_query_results = data<EOL>", "docstring": "This method handles an analog mapping query response message and stores the results to be retrieved\nvia get_analog_mapping_request_results() in pymata.py\n\n:param data: raw analog mapping data", "id": "f9310:c0:m23"}
{"signature": "def get_analog_response_table(self):", "body": "with self.pymata.data_lock:<EOL><INDENT>data = self.analog_response_table<EOL><DEDENT>return data<EOL>", "docstring": "This method returns the entire analog response table to the caller\n:return: The analog response table.", "id": "f9310:c0:m14"}
{"signature": "def run(self):", "body": "<EOL>self.command_dispatch.update({self.REPORT_VERSION: [self.report_version, <NUM_LIT:2>]})<EOL>self.command_dispatch.update({self.REPORT_FIRMWARE: [self.report_firmware, <NUM_LIT:1>]})<EOL>self.command_dispatch.update({self.ANALOG_MESSAGE: [self.analog_message, <NUM_LIT:2>]})<EOL>self.command_dispatch.update({self.DIGITAL_MESSAGE: [self.digital_message, <NUM_LIT:2>]})<EOL>self.command_dispatch.update({self.ENCODER_DATA: [self.encoder_data, <NUM_LIT:3>]})<EOL>self.command_dispatch.update({self.SONAR_DATA: [self.sonar_data, <NUM_LIT:3>]})<EOL>self.command_dispatch.update({self.STRING_DATA: [self._string_data, <NUM_LIT:2>]})<EOL>self.command_dispatch.update({self.I2C_REPLY: [self.i2c_reply, <NUM_LIT:2>]})<EOL>self.command_dispatch.update({self.CAPABILITY_RESPONSE: [self.capability_response, <NUM_LIT:2>]})<EOL>self.command_dispatch.update({self.PIN_STATE_RESPONSE: [self.pin_state_response, <NUM_LIT:2>]})<EOL>self.command_dispatch.update({self.ANALOG_MAPPING_RESPONSE: [self.analog_mapping_response, <NUM_LIT:2>]})<EOL>self.command_dispatch.update({self.STEPPER_DATA: [self.stepper_version_response, <NUM_LIT:2>]})<EOL>while not self.is_stopped():<EOL><INDENT>if len(self.pymata.command_deque):<EOL><INDENT>data = self.pymata.command_deque.popleft()<EOL>command_data = []<EOL>if data == self.START_SYSEX:<EOL><INDENT>while len(self.pymata.command_deque) == <NUM_LIT:0>:<EOL><INDENT>pass<EOL><DEDENT>sysex_command = self.pymata.command_deque.popleft()<EOL>dispatch_entry = self.command_dispatch.get(sysex_command)<EOL>method = dispatch_entry[<NUM_LIT:0>]<EOL>end_of_sysex = False<EOL>while not end_of_sysex:<EOL><INDENT>while len(self.pymata.command_deque) == <NUM_LIT:0>:<EOL><INDENT>pass<EOL><DEDENT>data = self.pymata.command_deque.popleft()<EOL>if data != self.END_SYSEX:<EOL><INDENT>command_data.append(data)<EOL><DEDENT>else:<EOL><INDENT>end_of_sysex = True<EOL>method(command_data)<EOL><DEDENT><DEDENT>continue<EOL><DEDENT>elif <NUM_LIT> <= data <= <NUM_LIT>:<EOL><INDENT>if <NUM_LIT> <= data <= <NUM_LIT>:<EOL><INDENT>port = data & <NUM_LIT><EOL>command_data.append(port)<EOL>data = <NUM_LIT><EOL><DEDENT>elif <NUM_LIT> <= data <= <NUM_LIT>:<EOL><INDENT>pin = data & <NUM_LIT><EOL>command_data.append(pin)<EOL>data = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>dispatch_entry = self.command_dispatch.get(data)<EOL>method = dispatch_entry[<NUM_LIT:0>]<EOL>num_args = dispatch_entry[<NUM_LIT:1>]<EOL>for i in range(num_args):<EOL><INDENT>while len(self.pymata.command_deque) == <NUM_LIT:0>:<EOL><INDENT>pass<EOL><DEDENT>data = self.pymata.command_deque.popleft()<EOL>command_data.append(data)<EOL><DEDENT>method(command_data)<EOL>continue<EOL><DEDENT><DEDENT>else:<EOL><INDENT>time.sleep(<NUM_LIT>)<EOL><DEDENT><DEDENT>", "docstring": "This method starts the thread that continuously runs to receive and interpret\nmessages coming from Firmata. This must be the last method in this file\nIt also checks the deque for messages to be sent to Firmata.", "id": "f9310:c0:m25"}
{"signature": "def digital_message(self, data):", "body": "port = data[<NUM_LIT:0>]<EOL>port_data = (data[self.MSB] << <NUM_LIT:7>) + data[self.LSB]<EOL>pin = port * <NUM_LIT:8><EOL>for pin in range(pin, min(pin + <NUM_LIT:8>, self.total_pins_discovered)):<EOL><INDENT>with self.pymata.data_lock:<EOL><INDENT>prev_data = self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]<EOL>self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE] = port_data & <NUM_LIT><EOL>if prev_data != port_data & <NUM_LIT>:<EOL><INDENT>callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]<EOL>if callback:<EOL><INDENT>callback([self.pymata.DIGITAL, pin,<EOL>self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]])<EOL><DEDENT><DEDENT>latching_entry = self.digital_latch_table[pin]<EOL>if latching_entry[self.LATCH_STATE] == self.LATCH_ARMED:<EOL><INDENT>if latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.DIGITAL_LATCH_LOW:<EOL><INDENT>if (port_data & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>if latching_entry[self.DIGITAL_LATCH_CALLBACK] is not None:<EOL><INDENT>self.digital_latch_table[pin] = [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, None]<EOL>latching_entry[self.DIGITAL_LATCH_CALLBACK](<EOL>[self.pymata.OUTPUT | self.pymata.LATCH_MODE,<EOL>pin, <NUM_LIT:0>, time.time()])<EOL><DEDENT>else:<EOL><INDENT>updated_latch_entry = latching_entry<EOL>updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED<EOL>updated_latch_entry[self.DIGITAL_LATCHED_DATA] = self.DIGITAL_LATCH_LOW<EOL>updated_latch_entry[self.DIGITAL_TIME_STAMP] = time.time()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>elif latching_entry[self.LATCHED_THRESHOLD_TYPE] == self.DIGITAL_LATCH_HIGH:<EOL><INDENT>if port_data & <NUM_LIT>:<EOL><INDENT>if latching_entry[self.DIGITAL_LATCH_CALLBACK] is not None:<EOL><INDENT>self.digital_latch_table[pin] = [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, None]<EOL>latching_entry[self.DIGITAL_LATCH_CALLBACK](<EOL>[self.pymata.OUTPUT | self.pymata.LATCH_MODE,<EOL>pin, <NUM_LIT:1>, time.time()])<EOL><DEDENT>else:<EOL><INDENT>updated_latch_entry = latching_entry<EOL>updated_latch_entry[self.LATCH_STATE] = self.LATCH_LATCHED<EOL>updated_latch_entry[self.DIGITAL_LATCHED_DATA] = self.DIGITAL_LATCH_HIGH<EOL>updated_latch_entry[self.DIGITAL_TIME_STAMP] = time.time()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>port_data >>= <NUM_LIT:1><EOL><DEDENT>", "docstring": "This method handles the incoming digital message.\nIt stores the data values in the digital response table.\nData is stored for all 8 bits of a  digital port\n\n:param data: Message data from Firmata\n\n:return: No return value.", "id": "f9310:c0:m11"}
{"signature": "def encoder_data(self, data):", "body": "prev_val = self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE]<EOL>val = int((data[self.MSB] << <NUM_LIT:7>) + data[self.LSB])<EOL>if val > <NUM_LIT>:<EOL><INDENT>val -= <NUM_LIT><EOL><DEDENT>pin = data[<NUM_LIT:0>]<EOL>with self.pymata.data_lock:<EOL><INDENT>self.digital_response_table[data[self.RESPONSE_TABLE_MODE]][self.RESPONSE_TABLE_PIN_DATA_VALUE] = val<EOL>if prev_val != val:<EOL><INDENT>callback = self.digital_response_table[pin][self.RESPONSE_TABLE_CALLBACK]<EOL>if callback is not None:<EOL><INDENT>callback([self.pymata.ENCODER, pin,<EOL>self.digital_response_table[pin][self.RESPONSE_TABLE_PIN_DATA_VALUE]])<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "This method handles the incoming encoder data message and stores\nthe data in the digital response table.\n\n:param data: Message data from Firmata\n\n:return: No return value.", "id": "f9310:c0:m12"}
{"signature": "def get_digital_latch_data(self, pin):", "body": "with self.pymata.data_lock:<EOL><INDENT>pin_data = self.digital_latch_table[pin]<EOL>current_latch_data = [pin,<EOL>pin_data[self.LATCH_STATE],<EOL>pin_data[self.DIGITAL_LATCHED_DATA],<EOL>pin_data[self.DIGITAL_TIME_STAMP],<EOL>pin_data[self.DIGITAL_LATCH_CALLBACK]]<EOL>if pin_data[self.LATCH_STATE] == self.LATCH_LATCHED:<EOL><INDENT>self.digital_latch_table[pin] = [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, None]<EOL><DEDENT><DEDENT>return current_latch_data<EOL>", "docstring": "This method reads the digital latch table for the specified pin and returns a list that contains:\n[latch_state, latched_data, and time_stamp].\nIf the latch state is latched, the entry in the table is cleared\n\n:param pin:  pin number\n\n:return: [latch_state, latched_data, and time_stamp]", "id": "f9310:c0:m8"}
{"signature": "def __init__(self, pymata):", "body": "<EOL>self.pymata = pymata<EOL>self.last_pin_query_results = []<EOL>self.capability_query_results = []<EOL>self.analog_mapping_query_results = []<EOL>self.total_pins_discovered = <NUM_LIT:0><EOL>self.number_of_analog_pins_discovered = <NUM_LIT:0><EOL>threading.Thread.__init__(self)<EOL>self.daemon = True<EOL>self.stop_event = threading.Event()<EOL>", "docstring": "constructor for CommandHandler class\n\n:param pymata: A reference to the pymata instance.", "id": "f9310:c0:m0"}
{"signature": "def send_command(self, command):", "body": "send_message = \"<STR_LIT>\"<EOL>for i in command:<EOL><INDENT>send_message += chr(i)<EOL><DEDENT>for data in send_message:<EOL><INDENT>self.pymata.transport.write(data)<EOL><DEDENT>", "docstring": "This method is used to transmit a non-sysex command.\n\n:param command: Command to send to firmata includes command + data formatted by caller\n\n:return : No return value.", "id": "f9310:c0:m17"}
{"signature": "def set_analog_latch(self, pin, threshold_type, threshold_value, cb):", "body": "with self.pymata.data_lock:<EOL><INDENT>self.analog_latch_table[pin] = [self.LATCH_ARMED, threshold_type, threshold_value, <NUM_LIT:0>, <NUM_LIT:0>, cb]<EOL><DEDENT>", "docstring": "This method \"arms\" a pin to allow data latching for the pin.\n\n:param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5\n\n:param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT  | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE\n\n:param threshold_value: numerical value\n\n:param cb: User provided callback function", "id": "f9310:c0:m5"}
{"signature": "def get_analog_latch_data(self, pin):", "body": "with self.pymata.data_lock:<EOL><INDENT>pin_data = self.analog_latch_table[pin]<EOL>current_latch_data = [pin,<EOL>pin_data[self.LATCH_STATE],<EOL>pin_data[self.ANALOG_LATCHED_DATA],<EOL>pin_data[self.ANALOG_TIME_STAMP],<EOL>pin_data[self.ANALOG_LATCH_CALLBACK]]<EOL>if pin_data[self.LATCH_STATE] == self.LATCH_LATCHED:<EOL><INDENT>self.analog_latch_table[pin] = [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, None]<EOL><DEDENT><DEDENT>return current_latch_data<EOL>", "docstring": "This method reads the analog latch table for the specified pin and returns a list that contains:\n[latch_state, latched_data, and time_stamp].\nIf the latch state is latched, the entry in the table is cleared\n\n:param pin:  pin number\n\n:return: [latch_state, latched_data, and time_stamp]", "id": "f9310:c0:m7"}
{"signature": "def get_stepper_version(self, timeout=<NUM_LIT:20>):", "body": "<EOL>start_time = time.time()<EOL>while self._command_handler.stepper_library_version <= <NUM_LIT:0>:<EOL><INDENT>if time.time() - start_time > timeout:<EOL><INDENT>if self.verbose is True:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>return<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return self._command_handler.stepper_library_version<EOL>", "docstring": "Get the stepper library version number.\n\n:param timeout: specify a time to allow arduino to process and return a version\n\n:return: the stepper version number if it was set.", "id": "f9311:c0:m25"}
{"signature": "def get_pymata_version(self):", "body": "return ['<STR_LIT:2>', '<STR_LIT>']<EOL>", "docstring": "Returns the PyMata version number in a list: [Major Number, Minor Number]\n\n:return:", "id": "f9311:c0:m23"}
{"signature": "def servo_config(self, pin, min_pulse=<NUM_LIT>, max_pulse=<NUM_LIT>):", "body": "self.set_pin_mode(pin, self.SERVO, self.OUTPUT)<EOL>command = [pin, min_pulse & <NUM_LIT>, (min_pulse >> <NUM_LIT:7>) & <NUM_LIT>,<EOL>max_pulse & <NUM_LIT>, (max_pulse >> <NUM_LIT:7>) & <NUM_LIT>]<EOL>self._command_handler.send_sysex(self._command_handler.SERVO_CONFIG, command)<EOL>", "docstring": "Configure a pin as a servo pin. Set pulse min, max in ms.\n\n:param pin: Servo Pin.\n\n:param min_pulse: Min pulse width in ms.\n\n:param max_pulse: Max pulse width in ms.\n\n:return: No return value", "id": "f9311:c0:m40"}
{"signature": "def i2c_config(self, read_delay_time=<NUM_LIT:0>, pin_type=None, clk_pin=<NUM_LIT:0>, data_pin=<NUM_LIT:0>):", "body": "data = [read_delay_time & <NUM_LIT>, (read_delay_time >> <NUM_LIT:7>) & <NUM_LIT>]<EOL>self._command_handler.send_sysex(self._command_handler.I2C_CONFIG, data)<EOL>if pin_type:<EOL><INDENT>if pin_type == self.DIGITAL:<EOL><INDENT>self._command_handler.digital_response_table[clk_pin][self._command_handler.RESPONSE_TABLE_MODE]= self.I2C<EOL>self._command_handler.digital_response_table[data_pin][self._command_handler.RESPONSE_TABLE_MODE]= self.I2C<EOL><DEDENT>else:<EOL><INDENT>self._command_handler.analog_response_table[clk_pin][self._command_handler.RESPONSE_TABLE_MODE]= self.I2C<EOL>self._command_handler.analog_response_table[data_pin][self._command_handler.RESPONSE_TABLE_MODE]= self.I2C<EOL><DEDENT><DEDENT>", "docstring": "NOTE: THIS METHOD MUST BE CALLED BEFORE ANY I2C REQUEST IS MADE\nThis method initializes Firmata for I2c operations.\nIt allows setting of a read time delay amount, and to optionally track\nthe pins as I2C in the appropriate response table.\nTo track pins: Set the pin_type to ANALOG or DIGITAL and provide the pin numbers.\nIf using ANALOG, pin numbers use the analog number, for example A4: use 4.\n\n:param read_delay_time: an optional parameter, default is 0\n\n:param pin_type: ANALOG or DIGITAL to select response table type to track pin numbers\n\n:param clk_pin: pin number (see comment above).\n\n:param data_pin: pin number (see comment above).\n\n:return: No Return Value", "id": "f9311:c0:m26"}
{"signature": "def encoder_config(self, pin_a, pin_b, cb=None):", "body": "data = [pin_a, pin_b]<EOL>self._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_MODE]= self.ENCODER<EOL>self._command_handler.digital_response_table[pin_a][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb<EOL>self.enable_digital_reporting(pin_a)<EOL>self._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_MODE]= self.ENCODER<EOL>self._command_handler.digital_response_table[pin_b][self._command_handler.RESPONSE_TABLE_CALLBACK] = cb<EOL>self.enable_digital_reporting(pin_b)<EOL>self._command_handler.send_sysex(self._command_handler.ENCODER_CONFIG, data)<EOL>", "docstring": "This command enables the rotary encoder (2 pin + ground) and will\nenable encoder reporting.\n\nNOTE: This command is not currently part of standard arduino firmata, but is provided for legacy\nsupport of CodeShield on an Arduino UNO.\n\nEncoder data is retrieved by performing a digital_read from pin a (encoder pin 1)\n\n:param pin_a: Encoder pin 1.\n\n:param pin_b: Encoder pin 2.\n\n:param cb: callback function to report encoder changes\n\n:return: No return value", "id": "f9311:c0:m12"}
{"signature": "def stepper_step(self, motor_speed, number_of_steps):", "body": "if number_of_steps > <NUM_LIT:0>:<EOL><INDENT>direction = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>direction = <NUM_LIT:0><EOL><DEDENT>abs_number_of_steps = abs(number_of_steps)<EOL>data = [self.STEPPER_STEP, motor_speed & <NUM_LIT>, (motor_speed >> <NUM_LIT:7>) & <NUM_LIT>, (motor_speed >> <NUM_LIT>) & <NUM_LIT>,<EOL>abs_number_of_steps & <NUM_LIT>, (abs_number_of_steps >> <NUM_LIT:7>) & <NUM_LIT>, direction]<EOL>self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)<EOL>", "docstring": "Move a stepper motor for the number of steps at the specified speed\n\n:param motor_speed: 21 bits of data to set motor speed\n\n:param number_of_steps: 14 bits for number of steps & direction\n                        positive is forward, negative is reverse", "id": "f9311:c0:m43"}
{"signature": "def pin_state_query(self, pin):", "body": "self._command_handler.send_sysex(self._command_handler.PIN_STATE_QUERY, [pin])<EOL>", "docstring": "This method issues a pin state query command. Data returned is retrieved via\na call to get_pin_state_query_results()\n:param pin: pin number", "id": "f9311:c0:m31"}
{"signature": "def i2c_stop_reading(self, address):", "body": "data = [address, self.I2C_STOP_READING]<EOL>self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)<EOL>", "docstring": "This method stops an I2C_READ_CONTINUOUSLY operation for the i2c device address specified.\n\n:param address: address of i2c device", "id": "f9311:c0:m29"}
{"signature": "def i2c_write(self, address, *args):", "body": "data = [address, self.I2C_WRITE]<EOL>for item in args:<EOL><INDENT>data.append(item & <NUM_LIT>)<EOL>data.append((item >> <NUM_LIT:7>) & <NUM_LIT>)<EOL><DEDENT>self._command_handler.send_sysex(self._command_handler.I2C_REQUEST, data)<EOL>", "docstring": "Write data to an i2c device.\n\n:param address: i2c device address\n\n:param args: A variable number of bytes to be sent to the device", "id": "f9311:c0:m28"}
{"signature": "def stepper_config(self, steps_per_revolution, stepper_pins):", "body": "data = [self.STEPPER_CONFIGURE, steps_per_revolution & <NUM_LIT>, (steps_per_revolution >> <NUM_LIT:7>) & <NUM_LIT>]<EOL>for pin in range(len(stepper_pins)):<EOL><INDENT>data.append(stepper_pins[pin])<EOL><DEDENT>self._command_handler.send_sysex(self._command_handler.STEPPER_DATA, data)<EOL>", "docstring": "Configure stepper motor prior to operation.\n\n:param steps_per_revolution: number of steps per motor revolution\n\n:param stepper_pins: a list of control pin numbers - either 4 or 2", "id": "f9311:c0:m42"}
{"signature": "def set_digital_latch(self, pin, threshold_type, cb=None):", "body": "if <NUM_LIT:0> <= threshold_type <= <NUM_LIT:1>:<EOL><INDENT>self._command_handler.set_digital_latch(pin, threshold_type, cb)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "This method \"arms\" a digital pin for its data to be latched and saved in the latching table\nIf a callback method is provided, when latching criteria is achieved, the callback function is called\nwith latching data notification. In that case, the latching table is not updated.\n\n:param pin: Digital pin number\n\n:param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW\n\n:param cb: callback function\n\n:return: True if successful, False if parameter data is invalid", "id": "f9311:c0:m37"}
{"signature": "def refresh_report_version(self):", "body": "command = [self._command_handler.REPORT_VERSION]<EOL>self._command_handler.send_command(command)<EOL>", "docstring": "This method will query firmata for the report version.\nRetrieve the report version via a call to get_firmata_version()", "id": "f9311:c0:m33"}
{"signature": "def disable_digital_reporting(self, pin):", "body": "port = pin // <NUM_LIT:8><EOL>command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_DISABLE]<EOL>self._command_handler.send_command(command)<EOL>", "docstring": "Disables digital reporting. By turning reporting off for this pin, reporting\nis disabled for all 8 bits in the \"port\" -\n\n:param pin: Pin and all pins for this port\n\n:return: No return value", "id": "f9311:c0:m9"}
{"signature": "def get_capability_query_results(self):", "body": "return self._command_handler.capability_query_results<EOL>", "docstring": "Retrieve the data returned by a previous call to capability_query()\n:return: Raw capability data returned by firmata", "id": "f9311:c0:m17"}
{"signature": "def enable_digital_reporting(self, pin):", "body": "port = pin // <NUM_LIT:8><EOL>command = [self._command_handler.REPORT_DIGITAL + port, self.REPORTING_ENABLE]<EOL>self._command_handler.send_command(command)<EOL>", "docstring": "Enables digital reporting. By turning reporting on for all 8 bits in the \"port\" -\nthis is part of Firmata's protocol specification.\n\n:param pin: Pin and all pins for this port\n\n:return: No return value", "id": "f9311:c0:m11"}
{"signature": "def get_pin_state_query_results(self):", "body": "r_data = self._command_handler.last_pin_query_results<EOL>self._command_handler.last_pin_query_results = []<EOL>return r_data<EOL>", "docstring": "This method returns the results of a previous call to pin_state_query() and then resets\nthe pin state query data to None\n\n:return: Raw pin state query data", "id": "f9311:c0:m22"}
{"signature": "def play_tone(self, pin, tone_command, frequency, duration):", "body": "<EOL>if tone_command == self.TONE_TONE:<EOL><INDENT>if duration:<EOL><INDENT>data = [tone_command, pin, frequency & <NUM_LIT>, (frequency >> <NUM_LIT:7>) & <NUM_LIT>, duration & <NUM_LIT>, (duration >> <NUM_LIT:7>) & <NUM_LIT>]<EOL><DEDENT>else:<EOL><INDENT>data = [tone_command, pin, frequency & <NUM_LIT>, (frequency >> <NUM_LIT:7>) & <NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:0>]<EOL><DEDENT>self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_MODE] =self.TONE<EOL><DEDENT>else:<EOL><INDENT>data = [tone_command, pin]<EOL><DEDENT>self._command_handler.send_sysex(self._command_handler.TONE_PLAY, data)<EOL>", "docstring": "This method will call the Tone library for the selected pin.\nIf the tone command is set to TONE_TONE, then the specified tone will be played.\nElse, if the tone command is TONE_NO_TONE, then any currently playing tone will be disabled.\nIt is intended for a future release of Arduino Firmata\n\n:param pin: Pin number\n\n:param tone_command: Either TONE_TONE, or TONE_NO_TONE\n\n:param frequency: Frequency of tone in hz\n\n:param duration: Duration of tone in milliseconds\n\n:return: No return value", "id": "f9311:c0:m32"}
{"signature": "def extended_analog(self, pin, data):", "body": "analog_data = [pin, data & <NUM_LIT>, (data >> <NUM_LIT:7>) & <NUM_LIT>, (data >> <NUM_LIT>) & <NUM_LIT>]<EOL>self._command_handler.send_sysex(self._command_handler.EXTENDED_ANALOG, analog_data)<EOL>", "docstring": "This method will send an extended data analog output command to the selected pin\n\n:param pin: 0 - 127\n\n:param data: 0 - 0xfffff", "id": "f9311:c0:m13"}
{"signature": "def reset(self):", "body": "<EOL>for pin in range(<NUM_LIT:0>, self._command_handler.total_pins_discovered):<EOL><INDENT>if self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE]== self.PWM:<EOL><INDENT>self.analog_write(pin, <NUM_LIT:0>)<EOL><DEDENT>elif self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE]== self.SERVO:<EOL><INDENT>self.analog_write(pin, <NUM_LIT:0>)<EOL><DEDENT>elif self._command_handler.digital_response_table[self._command_handler.RESPONSE_TABLE_MODE]== self.TONE:<EOL><INDENT>data = [self.TONE_NO_TONE, pin]<EOL>self._command_handler.send_sysex(self._command_handler.TONE_PLAY, data)<EOL><DEDENT>else:<EOL><INDENT>self.digital_write(pin, <NUM_LIT:0>)<EOL><DEDENT><DEDENT>self._command_handler.system_reset()<EOL>", "docstring": "This command sends a reset message to the Arduino. The response tables will be reinitialized\n:return: No return value.", "id": "f9311:c0:m35"}
{"signature": "def refresh_report_firmware(self):", "body": "self._command_handler.send_sysex(self._command_handler.REPORT_FIRMWARE, None)<EOL>", "docstring": "This method will query firmata to report firmware. Retrieve the report via a\ncall to get_firmata_firmware_version()", "id": "f9311:c0:m34"}
{"signature": "def digital_read(self, pin):", "body": "with self.data_lock:<EOL><INDENT>data =self._command_handler.digital_response_table[pin][self._command_handler.RESPONSE_TABLE_PIN_DATA_VALUE]<EOL><DEDENT>return data<EOL>", "docstring": "Retrieve the last digital data value received for the specified pin.\nNOTE: This command will return values for digital, pwm, etc,  pin types\n\n:param pin: Selected pin\n\n:return: The last value entered into the digital response table.", "id": "f9311:c0:m6"}
{"signature": "def set_analog_latch(self, pin, threshold_type, threshold_value, cb=None):", "body": "if self.ANALOG_LATCH_GT <= threshold_type <= self.ANALOG_LATCH_LTE:<EOL><INDENT>if <NUM_LIT:0> <= threshold_value <= <NUM_LIT>:<EOL><INDENT>self._command_handler.set_analog_latch(pin, threshold_type, threshold_value, cb)<EOL>return True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "This method \"arms\" an analog pin for its data to be latched and saved in the latching table\nIf a callback method is provided, when latching criteria is achieved, the callback function is called\nwith latching data notification. In that case, the latching table is not updated.\n\n:param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5\n\n:param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT  | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE\n\n:param threshold_value: numerical value - between 0 and 1023\n\n:param cb: callback method\n\n:return: True if successful, False if parameter data is invalid", "id": "f9311:c0:m36"}
{"signature": "def run(self):", "body": "while not self.is_stopped():<EOL><INDENT>try:<EOL><INDENT>if self.arduino.inWaiting():<EOL><INDENT>c = self.arduino.read()<EOL>self.command_deque.append(ord(c))<EOL><DEDENT>else:<EOL><INDENT>time.sleep(<NUM_LIT>)<EOL><DEDENT><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>except IOError:<EOL><INDENT>self.stop()<EOL><DEDENT><DEDENT>self.close()<EOL>", "docstring": "This method continually runs. If an incoming character is available on the serial port\nit is read and placed on the _command_deque\n@return: Never Returns", "id": "f9312:c0:m6"}
{"signature": "def __init__(self, port_id, command_deque, baud_rate):", "body": "self.port_id = port_id<EOL>self.command_deque = command_deque<EOL>self.baud_rate = baud_rate<EOL>threading.Thread.__init__(self)<EOL>self.daemon = True<EOL>self.arduino = serial.Serial(self.port_id, self.baud_rate,<EOL>timeout=int(self.timeout), writeTimeout=<NUM_LIT:0>)<EOL>self.stop_event = threading.Event()<EOL>if sys.platform == '<STR_LIT>':<EOL><INDENT>self.arduino.nonblocking()<EOL><DEDENT>", "docstring": "Constructor:\n\n:param command_deque: A reference to the deque shared with the _command_handler\n\n:param baud_rate: must match that of Arduino Sketch", "id": "f9312:c0:m0"}
{"signature": "def set_brightness(self, brightness):", "body": "if brightness > <NUM_LIT:15>:<EOL><INDENT>brightness = <NUM_LIT:15><EOL><DEDENT>brightness |= <NUM_LIT><EOL>self.brightness = brightness<EOL>self.firmata.i2c_write(<NUM_LIT>, brightness)<EOL>", "docstring": "Set the brightness level for the entire display\n@param brightness: brightness level (0 -15)", "id": "f9322:c0:m3"}
{"signature": "def output_entire_buffer(self):", "body": "green = <NUM_LIT:0><EOL>red = <NUM_LIT:0><EOL>for row in range(<NUM_LIT:0>, <NUM_LIT:8>):<EOL><INDENT>for col in range(<NUM_LIT:0>, <NUM_LIT:8>):<EOL><INDENT>if self.display_buffer[row][col] == self.LED_GREEN:<EOL><INDENT>green |= <NUM_LIT:1> << col<EOL><DEDENT>elif self.display_buffer[row][col] == self.LED_RED:<EOL><INDENT>red |= <NUM_LIT:1> << col<EOL><DEDENT>elif self.display_buffer[row][col] == self.LED_YELLOW:<EOL><INDENT>green |= <NUM_LIT:1> << col<EOL>red |= <NUM_LIT:1> << col<EOL><DEDENT>elif self.display_buffer[row][col] == self.LED_OFF:<EOL><INDENT>green &= ~(<NUM_LIT:1> << col)<EOL>red &= ~(<NUM_LIT:1> << col)<EOL><DEDENT><DEDENT>self.firmata.i2c_write(<NUM_LIT>, row * <NUM_LIT:2>, <NUM_LIT:0>, green)<EOL>self.firmata.i2c_write(<NUM_LIT>, row * <NUM_LIT:2> + <NUM_LIT:1>, <NUM_LIT:0>, red)<EOL><DEDENT>", "docstring": "Write the entire buffer to the display", "id": "f9322:c0:m6"}
{"signature": "def set_blink_rate(self, b):", "body": "if b > <NUM_LIT:3>:<EOL><INDENT>b = <NUM_LIT:0>  <EOL><DEDENT>self.firmata.i2c_write(self.board_address,<EOL>(self.HT16K33_BLINK_CMD | self.HT16K33_BLINK_DISPLAYON | (b << <NUM_LIT:1>)))<EOL>", "docstring": "Set the user's desired blink rate (0 - 3)\n@param b: blink rate", "id": "f9322:c0:m1"}
{"signature": "def clear_display_buffer(self):", "body": "for row in range(<NUM_LIT:0>, <NUM_LIT:8>):<EOL><INDENT>self.firmata.i2c_write(<NUM_LIT>, row * <NUM_LIT:2>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>self.firmata.i2c_write(<NUM_LIT>, (row * <NUM_LIT:2>) + <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>for column in range(<NUM_LIT:0>, <NUM_LIT:8>):<EOL><INDENT>self.display_buffer[row][column] = <NUM_LIT:0><EOL><DEDENT><DEDENT>", "docstring": "Set all led's to off.", "id": "f9322:c0:m7"}
{"signature": "def __init__(self, address, blink_rate, brightness):<EOL>", "body": "<EOL>self.firmata = PyMata(\"<STR_LIT>\")<EOL>self.board_address = address<EOL>self.blink_rate = blink_rate<EOL>self.brightness = brightness<EOL>self.clear_display_buffer()<EOL>self.firmata.i2c_config(<NUM_LIT:0>, self.firmata.ANALOG, <NUM_LIT:4>, <NUM_LIT:5>)<EOL>self.oscillator_set(self.OSCILLATOR_ON)<EOL>self.set_blink_rate(self.blink_rate)<EOL>self.set_brightness(self.brightness)<EOL>", "docstring": "@param address: I2C address of the device\n@param blink_rate: desired blink rate\n@param brightness: brightness level for the display", "id": "f9322:c0:m0"}
{"signature": "def set_pixel(self, row, column, color, suppress_write):<EOL>", "body": "if (row < <NUM_LIT:0>) or (row >= <NUM_LIT:8>):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if (column < <NUM_LIT:0>) or (column >= <NUM_LIT:8>):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>self.display_buffer[row][column] = color<EOL>green = <NUM_LIT:0><EOL>red = <NUM_LIT:0><EOL>for col in range(<NUM_LIT:0>, <NUM_LIT:8>):<EOL><INDENT>if self.display_buffer[row][col] == self.LED_GREEN:<EOL><INDENT>green |= <NUM_LIT:1> << col<EOL><DEDENT>elif self.display_buffer[row][col] == self.LED_RED:<EOL><INDENT>red |= <NUM_LIT:1> << col<EOL><DEDENT>elif self.display_buffer[row][col] == self.LED_YELLOW:<EOL><INDENT>green |= <NUM_LIT:1> << col<EOL>red |= <NUM_LIT:1> << col<EOL><DEDENT>elif self.display_buffer[row][col] == self.LED_OFF:<EOL><INDENT>green &= ~(<NUM_LIT:1> << col)<EOL>red &= ~(<NUM_LIT:1> << col)<EOL><DEDENT><DEDENT>if not suppress_write:<EOL><INDENT>self.firmata.i2c_write(<NUM_LIT>, row * <NUM_LIT:2>, <NUM_LIT:0>, green)<EOL>self.firmata.i2c_write(<NUM_LIT>, row * <NUM_LIT:2> + <NUM_LIT:1>, <NUM_LIT:0>, red)<EOL><DEDENT>", "docstring": "@param row: pixel row number\n@param column: pix column number\n@param color: pixel color (yellow is both red and green both on)\n@param suppress_write: if true, just sets the internal data structure, else writes out the pixel to the display", "id": "f9322:c0:m4"}
{"signature": "def set_bit_map(self, shape, color):", "body": "for row in range(<NUM_LIT:0>, <NUM_LIT:8>):<EOL><INDENT>data = shape[row]<EOL>bit_mask = <NUM_LIT><EOL>for column in range(<NUM_LIT:0>, <NUM_LIT:8>):<EOL><INDENT>if data & bit_mask:<EOL><INDENT>self.set_pixel(row, column, color, True)<EOL><DEDENT>bit_mask >>= <NUM_LIT:1><EOL><DEDENT><DEDENT>self.output_entire_buffer()<EOL>", "docstring": "Populate the bit map with the supplied \"shape\" and color\nand then write the entire bitmap to the display\n@param shape: pattern to display\n@param color: color for the pattern", "id": "f9322:c0:m5"}
{"signature": "def tearDown(self):", "body": "del mapped['<STR_LIT>']<EOL>del mapped['<STR_LIT>']<EOL>", "docstring": "Remove definitions of the built functions", "id": "f9339:c0:m1"}
{"signature": "def _get_catalysts_in_reaction(reaction: Reaction) -> Set[BaseAbundance]:", "body": "return {<EOL>reactant<EOL>for reactant in reaction.reactants<EOL>if reactant in reaction.products<EOL>}<EOL>", "docstring": "Return nodes that are both in reactants and reactions in a reaction.", "id": "f9353:m4"}
{"signature": "def list_abundance_cartesian_expansion(graph: BELGraph) -> None:", "body": "for u, v, k, d in list(graph.edges(keys=True, data=True)):<EOL><INDENT>if CITATION not in d:<EOL><INDENT>continue<EOL><DEDENT>if isinstance(u, ListAbundance) and isinstance(v, ListAbundance):<EOL><INDENT>for u_member, v_member in itt.product(u.members, v.members):<EOL><INDENT>graph.add_qualified_edge(<EOL>u_member, v_member,<EOL>relation=d[RELATION],<EOL>citation=d.get(CITATION),<EOL>evidence=d.get(EVIDENCE),<EOL>annotations=d.get(ANNOTATIONS),<EOL>)<EOL><DEDENT><DEDENT>elif isinstance(u, ListAbundance):<EOL><INDENT>for member in u.members:<EOL><INDENT>graph.add_qualified_edge(<EOL>member, v,<EOL>relation=d[RELATION],<EOL>citation=d.get(CITATION),<EOL>evidence=d.get(EVIDENCE),<EOL>annotations=d.get(ANNOTATIONS),<EOL>)<EOL><DEDENT><DEDENT>elif isinstance(v, ListAbundance):<EOL><INDENT>for member in v.members:<EOL><INDENT>graph.add_qualified_edge(<EOL>u, member,<EOL>relation=d[RELATION],<EOL>citation=d.get(CITATION),<EOL>evidence=d.get(EVIDENCE),<EOL>annotations=d.get(ANNOTATIONS),<EOL>)<EOL><DEDENT><DEDENT><DEDENT>_remove_list_abundance_nodes(graph)<EOL>", "docstring": "Expand all list abundances to simple subject-predicate-object networks.", "id": "f9353:m2"}
{"signature": "def flatten_list_abundance(node: ListAbundance) -> ListAbundance:", "body": "return node.__class__(list(chain.from_iterable(<EOL>(<EOL>flatten_list_abundance(member).members<EOL>if isinstance(member, ListAbundance) else<EOL>[member]<EOL>)<EOL>for member in node.members<EOL>)))<EOL>", "docstring": "Flattens the complex or composite abundance.", "id": "f9353:m0"}
{"signature": "def _reaction_cartesion_expansion_unqualified_helper(<EOL>graph: BELGraph,<EOL>u: BaseEntity,<EOL>v: BaseEntity,<EOL>d: dict,<EOL>) -> None:", "body": "if isinstance(u, Reaction) and isinstance(v, Reaction):<EOL><INDENT>enzymes = _get_catalysts_in_reaction(u) | _get_catalysts_in_reaction(v)<EOL>for reactant, product in chain(itt.product(u.reactants, u.products),<EOL>itt.product(v.reactants, v.products)):<EOL><INDENT>if reactant in enzymes or product in enzymes:<EOL><INDENT>continue<EOL><DEDENT>graph.add_unqualified_edge(<EOL>reactant, product, INCREASES<EOL>)<EOL><DEDENT>for product, reactant in itt.product(u.products, u.reactants):<EOL><INDENT>if reactant in enzymes or product in enzymes:<EOL><INDENT>continue<EOL><DEDENT>graph.add_unqualified_edge(<EOL>product, reactant, d[RELATION],<EOL>)<EOL><DEDENT><DEDENT>elif isinstance(u, Reaction):<EOL><INDENT>enzymes = _get_catalysts_in_reaction(u)<EOL>for product in u.products:<EOL><INDENT>if product in enzymes:<EOL><INDENT>continue<EOL><DEDENT>if v not in u.products and v not in u.reactants:<EOL><INDENT>graph.add_unqualified_edge(<EOL>product, v, INCREASES<EOL>)<EOL><DEDENT>for reactant in u.reactants:<EOL><INDENT>graph.add_unqualified_edge(<EOL>reactant, product, INCREASES<EOL>)<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(v, Reaction):<EOL><INDENT>enzymes = _get_catalysts_in_reaction(v)<EOL>for reactant in v.reactants:<EOL><INDENT>if reactant in enzymes:<EOL><INDENT>continue<EOL><DEDENT>if u not in v.products and u not in v.reactants:<EOL><INDENT>graph.add_unqualified_edge(<EOL>u, reactant, INCREASES<EOL>)<EOL><DEDENT>for product in v.products:<EOL><INDENT>graph.add_unqualified_edge(<EOL>reactant, product, INCREASES<EOL>)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Helper to deal with cartension expansion in unqualified edges.", "id": "f9353:m3"}
{"signature": "def remove_reified_nodes(graph: BELGraph) -> None:", "body": "_remove_list_abundance_nodes(graph)<EOL>_remove_reaction_nodes(graph)<EOL>", "docstring": "Remove complex nodes.", "id": "f9353:m6"}
{"signature": "def make_pubmed_abstract_group(pmids: Iterable[Union[str, int]]) -> Iterable[str]:", "body": "for pmid in set(pmids):<EOL><INDENT>yield '<STR_LIT>'<EOL>res = requests.get(title_url_fmt.format(pmid))<EOL>title = res.content.decode('<STR_LIT:utf-8>').strip()<EOL>yield '<STR_LIT>'.format(title, pmid)<EOL>res = requests.get(abstract_url_fmt.format(pmid))<EOL>abstract = res.content.decode('<STR_LIT:utf-8>').strip()<EOL>yield '<STR_LIT>'.format(abstract)<EOL>yield '<STR_LIT>'<EOL><DEDENT>", "docstring": "Build a skeleton for the citations' statements.\n\n    :param pmids: A list of PubMed identifiers\n    :return: An iterator over the lines of the citation section", "id": "f9354:m0"}
{"signature": "def get_entrez_gene_data(entrez_ids: Iterable[Union[str, int]]):", "body": "url = PUBMED_GENE_QUERY_URL.format('<STR_LIT:U+002C>'.join(str(x).strip() for x in entrez_ids))<EOL>response = requests.get(url)<EOL>tree = ElementTree.fromstring(response.content)<EOL>return {<EOL>element.attrib['<STR_LIT>']: {<EOL>'<STR_LIT>': _sanitize(element.find('<STR_LIT>').text),<EOL>'<STR_LIT:description>': element.find('<STR_LIT>').text<EOL>}<EOL>for element in tree.findall('<STR_LIT>')<EOL>}<EOL>", "docstring": "Get gene info from Entrez.", "id": "f9354:m2"}
{"signature": "def make_pubmed_gene_group(entrez_ids: Iterable[Union[str, int]]) -> Iterable[str]:", "body": "url = PUBMED_GENE_QUERY_URL.format('<STR_LIT:U+002C>'.join(str(x).strip() for x in entrez_ids))<EOL>response = requests.get(url)<EOL>tree = ElementTree.fromstring(response.content)<EOL>for x in tree.findall('<STR_LIT>'):<EOL><INDENT>yield '<STR_LIT>'.format(x.find('<STR_LIT>').text)<EOL>yield '<STR_LIT>'.format(x.attrib['<STR_LIT>'])<EOL>yield '<STR_LIT>'.format(x.find('<STR_LIT>').text.strip().replace('<STR_LIT:\\n>', '<STR_LIT>'))<EOL>yield '<STR_LIT>'<EOL><DEDENT>", "docstring": "Builds a skeleton for gene summaries\n\n    :param entrez_ids: A list of Entrez Gene identifiers to query the PubMed service\n    :return: An iterator over statement lines for NCBI Entrez Gene summaries", "id": "f9354:m3"}
{"signature": "def lint_file(in_file, out_file=None):", "body": "for line in in_file:<EOL><INDENT>print(line.strip(), file=out_file)<EOL><DEDENT>", "docstring": "Helps remove extraneous whitespace from the lines of a file\n\n    :param file in_file: A readable file or file-like\n    :param file out_file: A writable file or file-like", "id": "f9356:m0"}
{"signature": "@in_place_transformation<EOL>def remove_unweighted_leaves(graph: BELGraph, key: Optional[str] = None) -> None:", "body": "unweighted_leaves = list(get_unweighted_upstream_leaves(graph, key=key))<EOL>graph.remove_nodes_from(unweighted_leaves)<EOL>", "docstring": "Remove nodes that are leaves and that don't have a weight (or other key) attribute set.\n\n    :param graph: A BEL graph\n    :param key: The key in the node data dictionary representing the experimental data. Defaults to\n     :data:`pybel_tools.constants.WEIGHT`.", "id": "f9357:m3"}
{"signature": "def get_unweighted_upstream_leaves(graph: BELGraph, key: Optional[str] = None) -> Iterable[BaseEntity]:", "body": "if key is None:<EOL><INDENT>key = WEIGHT<EOL><DEDENT>return filter_nodes(graph, [node_is_upstream_leaf, data_missing_key_builder(key)])<EOL>", "docstring": "Get nodes with no incoming edges, one outgoing edge, and without the given key in its data dictionary.\n\n    .. seealso :: :func:`data_does_not_contain_key_builder`\n\n    :param graph: A BEL graph\n    :param key: The key in the node data dictionary representing the experimental data. Defaults to\n     :data:`pybel_tools.constants.WEIGHT`.\n    :return: An iterable over leaves (nodes with an in-degree of 0) that don't have the given annotation", "id": "f9357:m2"}
{"signature": "@in_place_transformation<EOL>def remove_unweighted_sources(graph: BELGraph, key: Optional[str] = None) -> None:", "body": "nodes = list(get_unweighted_sources(graph, key=key))<EOL>graph.remove_nodes_from(nodes)<EOL>", "docstring": "Prune unannotated nodes on the periphery of the sub-graph.\n\n    :param graph: A BEL graph\n    :param key: The key in the node data dictionary representing the experimental data. Defaults to\n     :data:`pybel_tools.constants.WEIGHT`.", "id": "f9357:m6"}
{"signature": "def get_pair_tuple(a: BaseEntity, b: BaseEntity) -> Tuple[str, str, str, str]:", "body": "return a.as_bel(), a.sha512, b.as_bel(), b.sha512<EOL>", "docstring": "Get the pair as a tuple of BEL/hashes.", "id": "f9359:m2"}
{"signature": "def get_network_summary_dict(graph: BELGraph) -> Mapping:", "body": "return dict(<EOL>function_count=count_functions(graph),<EOL>modifications_count=get_modifications_count(graph),<EOL>relation_count=count_relations(graph),<EOL>authors_count=count_authors(graph).most_common(<NUM_LIT:15>),<EOL>variants_count=count_variants(graph),<EOL>namespaces_count=count_namespaces(graph),<EOL>hub_data={<EOL>(<EOL>node.name or node.identifier<EOL>if NAME in node or IDENTIFIER in node else<EOL>str(node)<EOL>): degree<EOL>for node, degree in get_top_hubs(graph, n=<NUM_LIT:15>)<EOL>},<EOL>disease_data={<EOL>(<EOL>node.name or node.identifier<EOL>if NAME in node or IDENTIFIER in node else<EOL>str(node)<EOL>): count<EOL>for node, count in get_top_pathologies(graph, n=<NUM_LIT:15>)<EOL>},<EOL>regulatory_pairs=[<EOL>get_pair_tuple(u, v)<EOL>for u, v in get_regulatory_pairs(graph)<EOL>],<EOL>unstable_pairs=list(itt.chain(<EOL>(get_pair_tuple(u, v) + ('<STR_LIT>',) for u, v, in get_chaotic_pairs(graph)),<EOL>(get_pair_tuple(u, v) + ('<STR_LIT>',) for u, v, in get_dampened_pairs(graph)),<EOL>)),<EOL>contradictory_pairs=[<EOL>get_pair_tuple(u, v) + (relation,)<EOL>for u, v, relation in get_contradiction_summary(graph)<EOL>],<EOL>contradictory_triplets=list(itt.chain(<EOL>(get_triplet_tuple(a, b, c) + ('<STR_LIT>',) for a, b, c in<EOL>get_separate_unstable_correlation_triples(graph)),<EOL>(get_triplet_tuple(a, b, c) + ('<STR_LIT>',) for a, b, c in get_mutually_unstable_correlation_triples(graph)),<EOL>(get_triplet_tuple(a, b, c) + ('<STR_LIT>',) for a, b, c in get_jens_unstable(graph)),<EOL>(get_triplet_tuple(a, b, c) + ('<STR_LIT>',) for a, b, c in get_increase_mismatch_triplets(graph)),<EOL>(get_triplet_tuple(a, b, c) + ('<STR_LIT>',) for a, b, c in get_decrease_mismatch_triplets(graph)),<EOL>)),<EOL>unstable_triplets=list(itt.chain(<EOL>(get_triplet_tuple(a, b, c) + ('<STR_LIT>',) for a, b, c in get_chaotic_triplets(graph)),<EOL>(get_triplet_tuple(a, b, c) + ('<STR_LIT>',) for a, b, c in get_dampened_triplets(graph)),<EOL>)),<EOL>causal_pathologies=sorted({<EOL>get_pair_tuple(u, v) + (graph[u][v][k][RELATION],)<EOL>for u, v, k in filter_edges(graph, has_pathology_causal)<EOL>}),<EOL>undefined_namespaces=get_undefined_namespaces(graph),<EOL>undefined_annotations=get_undefined_annotations(graph),<EOL>namespaces_with_incorrect_names=get_namespaces_with_incorrect_names(graph),<EOL>unused_namespaces=get_unused_namespaces(graph),<EOL>unused_annotations=get_unused_annotations(graph),<EOL>unused_list_annotation_values=get_unused_list_annotation_values(graph),<EOL>naked_names=get_naked_names(graph),<EOL>error_count=count_error_types(graph),<EOL>error_groups=get_most_common_errors(graph),<EOL>syntax_errors=get_syntax_errors(graph),<EOL>citation_years=get_citation_years(graph),<EOL>confidence_count=count_confidences(graph),<EOL>)<EOL>", "docstring": "Create a summary dictionary.", "id": "f9359:m1"}
{"signature": "def to_html(graph: BELGraph) -> str:", "body": "context = get_network_summary_dict(graph)<EOL>summary_dict = graph.summary_dict()<EOL>citation_years = context['<STR_LIT>']<EOL>function_count = context['<STR_LIT>']<EOL>relation_count = context['<STR_LIT>']<EOL>error_count = context['<STR_LIT>']<EOL>transformations_count = context['<STR_LIT>']<EOL>hub_data = context['<STR_LIT>']<EOL>disease_data = context['<STR_LIT>']<EOL>authors_count = context['<STR_LIT>']<EOL>variants_count = context['<STR_LIT>']<EOL>namespaces_count = context['<STR_LIT>']<EOL>confidence_count = context['<STR_LIT>']<EOL>confidence_data = [<EOL>(label, confidence_count.get(label, <NUM_LIT:0>))<EOL>for label in ('<STR_LIT:None>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>]<EOL>template = environment.get_template('<STR_LIT>')<EOL>return template.render(<EOL>graph=graph,<EOL>chart_1_data=prepare_c3(function_count, '<STR_LIT>'),<EOL>chart_6_data=prepare_c3(namespaces_count, '<STR_LIT>'),<EOL>chart_5_data=prepare_c3(variants_count, '<STR_LIT>'),<EOL>number_variants=sum(variants_count.values()),<EOL>number_namespaces=len(namespaces_count),<EOL>chart_2_data=prepare_c3(relation_count, '<STR_LIT>'),<EOL>chart_4_data=prepare_c3(transformations_count, '<STR_LIT>') if transformations_count else None,<EOL>number_transformations=sum(transformations_count.values()),<EOL>chart_3_data=prepare_c3(error_count, '<STR_LIT>') if error_count else None,<EOL>chart_7_data=prepare_c3(hub_data, '<STR_LIT>'),<EOL>chart_9_data=prepare_c3(disease_data, '<STR_LIT>') if disease_data else None,<EOL>chart_authors_count=prepare_c3(authors_count, '<STR_LIT>'),<EOL>chart_10_data=prepare_c3_time_series(citation_years, '<STR_LIT>') if citation_years else None,<EOL>chart_confidence_count=prepare_c3(confidence_data, '<STR_LIT>'),<EOL>summary_dict=summary_dict,<EOL>**context<EOL>)<EOL>", "docstring": "Render the graph as an HTML string.\n\n    Common usage may involve writing to a file like:\n\n    >>> from pybel.examples import sialic_acid_graph\n    >>> with open('html_output.html', 'w') as file:\n    ...     print(to_html(sialic_acid_graph), file=file)", "id": "f9359:m0"}
{"signature": "def to_html(graph: BELGraph, chart: Optional[str] = None) -> str:", "body": "with open(os.path.join(HERE, '<STR_LIT>'), '<STR_LIT>') as f:<EOL><INDENT>html_template = Template(f.read())<EOL><DEDENT>return html_template.render(**_get_context(graph, chart=chart))<EOL>", "docstring": "Render the graph as an HTML string.\n\n    Common usage may involve writing to a file like:\n\n    >>> from pybel.examples import sialic_acid_graph\n    >>> with open('ideogram_output.html', 'w') as file:\n    ...     print(to_html(sialic_acid_graph), file=file)", "id": "f9363:m1"}
{"signature": "def to_jupyter(graph: BELGraph, chart: Optional[str] = None) -> Javascript:", "body": "with open(os.path.join(HERE, '<STR_LIT>'), '<STR_LIT>') as f:<EOL><INDENT>js_template = Template(f.read())<EOL><DEDENT>return Javascript(js_template.render(**_get_context(graph, chart=chart)))<EOL>", "docstring": "Render the graph as JavaScript in a Jupyter Notebook.", "id": "f9363:m0"}
{"signature": "def _generate_id() -> str:", "body": "return '<STR_LIT>'.join(random.sample('<STR_LIT>', <NUM_LIT:16>))<EOL>", "docstring": "Generate a random string of letters.", "id": "f9363:m3"}
{"signature": "@transformation<EOL>def get_subgraph_by_node_search(graph: BELGraph, query: Strings) -> BELGraph:", "body": "nodes = search_node_names(graph, query)<EOL>return get_subgraph_by_induction(graph, nodes)<EOL>", "docstring": "Get a sub-graph induced over all nodes matching the query string.\n\n    :param graph: A BEL Graph\n    :param query: A query string or iterable of query strings for node names\n\n    Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`.", "id": "f9365:m2"}
{"signature": "@transformation<EOL>def get_largest_component(graph: BELGraph) -> BELGraph:", "body": "biggest_component_nodes = max(nx.weakly_connected_components(graph), key=len)<EOL>return subgraph(graph, biggest_component_nodes)<EOL>", "docstring": "Get the giant component of a graph.", "id": "f9365:m3"}
{"signature": "def search_node_hgnc_names(graph, query):", "body": "return search_node_namespace_names(graph, query, namespace='<STR_LIT>')<EOL>", "docstring": "Search for nodes with the HGNC namespace and whose names containing a given string(s).\n\n    :param pybel.BELGraph graph: A BEL graph\n    :param query: The search query\n    :type query: str or iter[str]\n    :return: An iterator over nodes whose names match the search query\n    :rtype: iter", "id": "f9366:m2"}
{"signature": "def find_root_in_path(graph, path_nodes):", "body": "path_graph = graph.subgraph(path_nodes)<EOL>node_in_degree_tuple = sorted([(n, d) for n, d in path_graph.in_degree().items()], key=itemgetter(<NUM_LIT:1>))<EOL>node_out_degree_tuple = sorted([(n, d) for n, d in path_graph.out_degree().items()], key=itemgetter(<NUM_LIT:1>),<EOL>reverse=True)<EOL>tied_root_index = <NUM_LIT:0><EOL>for i in range(<NUM_LIT:0>, (len(node_in_degree_tuple) - <NUM_LIT:1>)):<EOL><INDENT>if node_in_degree_tuple[i][<NUM_LIT:1>] < node_in_degree_tuple[i + <NUM_LIT:1>][<NUM_LIT:1>]:<EOL><INDENT>tied_root_index = i<EOL>break<EOL><DEDENT><DEDENT>if tied_root_index != <NUM_LIT:0>:<EOL><INDENT>root_tuple = max(node_out_degree_tuple[:tied_root_index], key=itemgetter(<NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>root_tuple = node_in_degree_tuple[<NUM_LIT:0>]<EOL><DEDENT>return path_graph, root_tuple[<NUM_LIT:0>]<EOL>", "docstring": "Find the 'root' of the path -> The node with the lowest out degree, if multiple:\n         root is the one with the highest out degree among those with lowest out degree\n\n    :param pybel.BELGraph graph: A BEL Graph\n    :param list[tuple] path_nodes: A list of nodes in their order in a path\n    :return: A pair of the graph: graph of the path and the root node\n    :rtype: tuple[pybel.BELGraph,tuple]", "id": "f9367:m5"}
{"signature": "@lru_cache(maxsize=None)<EOL>def get_walks_exhaustive(graph, node, length):", "body": "if <NUM_LIT:0> == length:<EOL><INDENT>return (node,),<EOL><DEDENT>return tuple(<EOL>(node, key) + path<EOL>for neighbor in graph.edge[node]<EOL>for path in get_walks_exhaustive(graph, neighbor, length - <NUM_LIT:1>)<EOL>if node not in path<EOL>for key in graph.edge[node][neighbor]<EOL>)<EOL>", "docstring": "Gets all walks under a given length starting at a given node\n\n    :param networkx.Graph graph: A graph\n    :param node: Starting node\n    :param int length: The length of walks to get\n    :return: A list of paths\n    :rtype: list[tuple]", "id": "f9369:m1"}
{"signature": "def match_complex_metapath(graph, node, complex_metapath):", "body": "raise NotImplementedError<EOL>", "docstring": "Matches a complex metapath starting at the given node\n\n    :param pybel.BELGraph graph: A BEL graph\n    :param tuple node: A BEL node\n    :param list[str] complex_metapath: An iterable of alternating BEL nodes and relations\n    :return: An iterable over paths from the node matching the metapath\n    :rtype: iter[tuple]", "id": "f9369:m4"}
{"signature": "def match_simple_metapath(graph, node, simple_metapath):", "body": "if <NUM_LIT:0> == len(simple_metapath):<EOL><INDENT>yield node,<EOL><DEDENT>else:<EOL><INDENT>for neighbor in graph.edges[node]:<EOL><INDENT>if graph.nodes[neighbor][FUNCTION] == simple_metapath[<NUM_LIT:0>]:<EOL><INDENT>for path in match_simple_metapath(graph, neighbor, simple_metapath[<NUM_LIT:1>:]):<EOL><INDENT>if node not in path:<EOL><INDENT>yield (node,) + path<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Matches a simple metapath starting at the given node\n\n    :param pybel.BELGraph graph: A BEL graph\n    :param tuple node: A BEL node\n    :param list[str] simple_metapath: A list of BEL Functions\n    :return: An iterable over paths from the node matching the metapath\n    :rtype: iter[tuple]", "id": "f9369:m2"}
{"signature": "def convert_path_to_metapath(graph, nodes):", "body": "return [<EOL>graph.node[node][FUNCTION]<EOL>for node in nodes<EOL>]<EOL>", "docstring": "Converts a list of nodes to their corresponding functions\n\n    :param list[tuple] nodes: A list of BEL node tuples\n    :rtype: list[str]", "id": "f9369:m0"}
{"signature": "def convert_simple_walk(graph, simple_walk):", "body": "return [<EOL>graph.nodes[node][FUNCTION]<EOL>for node in simple_walk<EOL>]<EOL>", "docstring": "Converts a walk into a sequence of BEL functions\n\n    :param pybel.BELGraph graph: A BEL graph\n    :param iter[tuple] simple_walk: An iterable of BEL nodes\n    :return: A list of BEL functions of the walk\n    :rtype: list[str]", "id": "f9369:m3"}
{"signature": "def group_nodes_by_annotation_filtered(graph: BELGraph,<EOL>node_predicates: NodePredicates = None,<EOL>annotation: str = '<STR_LIT>',<EOL>) -> Mapping[str, Set[BaseEntity]]:", "body": "node_filter = concatenate_node_predicates(node_predicates)<EOL>return {<EOL>key: {<EOL>node<EOL>for node in nodes<EOL>if node_filter(graph, node)<EOL>}<EOL>for key, nodes in group_nodes_by_annotation(graph, annotation).items()<EOL>}<EOL>", "docstring": "Group the nodes occurring in edges by the given annotation, with a node filter applied.\n\n    :param graph: A BEL graph\n    :param node_predicates: A predicate or list of predicates (graph, node) -> bool\n    :param annotation: The annotation to use for grouping\n    :return: A dictionary of {annotation value: set of nodes}", "id": "f9371:m2"}
{"signature": "def average_node_annotation(graph: BELGraph,<EOL>key: str,<EOL>annotation: str = '<STR_LIT>',<EOL>aggregator: Optional[Callable[[Iterable[X]], X]] = None,<EOL>) -> Mapping[str, X]:", "body": "if aggregator is None:<EOL><INDENT>def aggregator(x):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return sum(x) / len(x)<EOL><DEDENT><DEDENT>result = {}<EOL>for subgraph, nodes in group_nodes_by_annotation(graph, annotation).items():<EOL><INDENT>values = [graph.nodes[node][key] for node in nodes if key in graph.nodes[node]]<EOL>result[subgraph] = aggregator(values)<EOL><DEDENT>return result<EOL>", "docstring": "Groups graph into subgraphs and assigns each subgraph a score based on the average of all nodes values\n    for the given node key\n\n    :param pybel.BELGraph graph: A BEL graph\n    :param key: The key in the node data dictionary representing the experimental data\n    :param annotation: A BEL annotation to use to group nodes\n    :param aggregator: A function from list of values -> aggregate value. Defaults to taking the average of a list of\n                       floats.\n    :type aggregator: lambda", "id": "f9371:m1"}
{"signature": "def all_edges_consistent(graph):", "body": "return all(<EOL>is_edge_consistent(graph, u, v)<EOL>for u, v in graph.edges()<EOL>)<EOL>", "docstring": "Return if all edges are consistent in a graph. Wraps :func:`pybel_tools.utils.is_edge_consistent`.\n\n    :param pybel.BELGraph graph: A BEL graph\n    :return: Are all edges consistent\n    :rtype: bool", "id": "f9372:m1"}
{"signature": "def is_edge_consistent(graph, u, v):", "body": "if not graph.has_edge(u, v):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(graph, u, v))<EOL><DEDENT>return <NUM_LIT:0> == len(set(d[RELATION] for d in graph.edge[u][v].values()))<EOL>", "docstring": "Check if all edges between two nodes have the same relation.\n\n    :param pybel.BELGraph graph: A BEL Graph\n    :param tuple u: The source BEL node\n    :param tuple v: The target BEL node\n    :return: If all edges from the source to target node have the same relation\n    :rtype: bool", "id": "f9372:m0"}
{"signature": "def export_namespace(graph, namespace, directory=None, cacheable=False):", "body": "directory = os.getcwd() if directory is None else directory<EOL>path = os.path.join(directory, '<STR_LIT>'.format(namespace))<EOL>with open(path, '<STR_LIT:w>') as file:<EOL><INDENT>log.info('<STR_LIT>', path)<EOL>right_names = get_names_by_namespace(graph, namespace)<EOL>log.info('<STR_LIT>', len(right_names), namespace)<EOL>wrong_names = get_incorrect_names_by_namespace(graph, namespace)<EOL>log.info('<STR_LIT>', len(right_names), namespace)<EOL>undefined_ns_names = get_undefined_namespace_names(graph, namespace)<EOL>log.info('<STR_LIT>', len(right_names), namespace)<EOL>names = (right_names | wrong_names | undefined_ns_names)<EOL>if <NUM_LIT:0> == len(names):<EOL><INDENT>log.warning('<STR_LIT>', namespace)<EOL><DEDENT>write_namespace(<EOL>namespace_name=namespace,<EOL>namespace_keyword=namespace,<EOL>namespace_domain='<STR_LIT>',<EOL>author_name=graph.authors,<EOL>author_contact=graph.contact,<EOL>citation_name=graph.name,<EOL>values=names,<EOL>cacheable=cacheable,<EOL>file=file<EOL>)<EOL><DEDENT>", "docstring": "Exports all names and missing names from the given namespace to its own BEL Namespace files in the given\n    directory.\n\n    Could be useful during quick and dirty curation, where planned namespace building is not a priority.\n\n    :param pybel.BELGraph graph: A BEL graph\n    :param str namespace: The namespace to process\n    :param str directory: The path to the directory where to output the namespace. Defaults to the current working\n                      directory returned by :func:`os.getcwd`\n    :param bool cacheable: Should the namespace be cacheable? Defaults to ``False`` because, in general, this operation\n                        will probably be used for evil, and users won't want to reload their entire cache after each\n                        iteration of curation.", "id": "f9375:m0"}
{"signature": "@uni_in_place_transformation<EOL>def enrich_unqualified(graph: BELGraph):", "body": "enrich_complexes(graph)<EOL>enrich_composites(graph)<EOL>enrich_reactions(graph)<EOL>enrich_variants(graph)<EOL>", "docstring": "Enrich the sub-graph with the unqualified edges from the graph.\n\n    The reason you might want to do this is you induce a sub-graph from the original graph based on an annotation\n    filter, but the unqualified edges that don't have annotations that most likely connect elements within your graph\n    are not included.\n\n    .. seealso::\n\n        This function thinly wraps the successive application of the following functions:\n\n        - :func:`enrich_complexes`\n        - :func:`enrich_composites`\n        - :func:`enrich_reactions`\n        - :func:`enrich_variants`\n\n    Equivalent to:\n\n    >>> enrich_complexes(graph)\n    >>> enrich_composites(graph)\n    >>> enrich_reactions(graph)\n    >>> enrich_variants(graph)", "id": "f9376:m13"}
{"signature": "@uni_in_place_transformation<EOL>def enrich_complexes(graph: BELGraph) -> None:", "body": "nodes = list(get_nodes_by_function(graph, COMPLEX))<EOL>for u in nodes:<EOL><INDENT>for v in u.members:<EOL><INDENT>graph.add_has_component(u, v)<EOL><DEDENT><DEDENT>", "docstring": "Add all of the members of the complex abundances to the graph.", "id": "f9376:m9"}
{"signature": "def count_targets(edge_iter: EdgeIterator) -> Counter:", "body": "return Counter(v for _, v, _ in edge_iter)<EOL>", "docstring": "Count the target nodes in an edge iterator with keys and data.\n\n    :return: A counter of target nodes in the iterable", "id": "f9376:m3"}
{"signature": "def get_subgraph_edges(graph: BELGraph,<EOL>annotation: str,<EOL>value: str,<EOL>source_filter=None,<EOL>target_filter=None,<EOL>):", "body": "if source_filter is None:<EOL><INDENT>source_filter = keep_node_permissive<EOL><DEDENT>if target_filter is None:<EOL><INDENT>target_filter = keep_node_permissive<EOL><DEDENT>for u, v, k, data in graph.edges(keys=True, data=True):<EOL><INDENT>if not edge_has_annotation(data, annotation):<EOL><INDENT>continue<EOL><DEDENT>if data[ANNOTATIONS][annotation] == value and source_filter(graph, u) and target_filter(graph, v):<EOL><INDENT>yield u, v, k, data<EOL><DEDENT><DEDENT>", "docstring": "Gets all edges from a given subgraph whose source and target nodes pass all of the given filters\n\n    :param pybel.BELGraph graph: A BEL graph\n    :param str annotation:  The annotation to search\n    :param str value: The annotation value to search by\n    :param source_filter: Optional filter for source nodes (graph, node) -> bool\n    :param target_filter: Optional filter for target nodes (graph, node) -> bool\n    :return: An iterable of (source node, target node, key, data) for all edges that match the annotation/value and\n             node filters\n    :rtype: iter[tuple]", "id": "f9376:m6"}
{"signature": "def get_peripheral_successor_edges(graph: BELGraph, subgraph: BELGraph) -> EdgeIterator:", "body": "for u in subgraph:<EOL><INDENT>for _, v, k in graph.out_edges(u, keys=True):<EOL><INDENT>if v not in subgraph:<EOL><INDENT>yield u, v, k<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Get the set of possible successor edges peripheral to the sub-graph.\n\n    The source nodes in this iterable are all inside the sub-graph, while the targets are outside.", "id": "f9376:m0"}
{"signature": "@uni_in_place_transformation<EOL>def expand_periphery(universe: BELGraph,<EOL>graph: BELGraph,<EOL>node_predicates: NodePredicates = None,<EOL>edge_predicates: EdgePredicates = None,<EOL>threshold: int = <NUM_LIT:2>,<EOL>) -> None:", "body": "nd = get_subgraph_peripheral_nodes(universe, graph, node_predicates=node_predicates,<EOL>edge_predicates=edge_predicates)<EOL>for node, dd in nd.items():<EOL><INDENT>pred_d = dd['<STR_LIT>']<EOL>succ_d = dd['<STR_LIT>']<EOL>in_subgraph_connections = set(pred_d) | set(succ_d)<EOL>if threshold > len(in_subgraph_connections):<EOL><INDENT>continue<EOL><DEDENT>graph.add_node(node, attr_dict=universe[node])<EOL>for u, edges in pred_d.items():<EOL><INDENT>for k, d in edges:<EOL><INDENT>safe_add_edge(graph, u, node, k, d)<EOL><DEDENT><DEDENT>for v, edges in succ_d.items():<EOL><INDENT>for k, d in edges:<EOL><INDENT>safe_add_edge(graph, node, v, k, d)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Iterates over all possible edges, peripheral to a given subgraph, that could be added from the given graph.\n    Edges could be added if they go to nodes that are involved in relationships that occur with more than the\n    threshold (default 2) number of nodes in the subgraph.\n\n    :param universe: The universe of BEL knowledge\n    :param graph: The (sub)graph to expand\n    :param threshold: Minimum frequency of betweenness occurrence to add a gap node\n\n    A reasonable edge filter to use is :func:`pybel_tools.filters.keep_causal_edges` because this function can allow\n    for huge expansions if there happen to be hub nodes.", "id": "f9376:m8"}
{"signature": "@uni_in_place_transformation<EOL>def expand_internal_causal(universe: BELGraph, graph: BELGraph) -> None:", "body": "expand_internal(universe, graph, edge_predicates=is_causal_relation)<EOL>", "docstring": "Add causal edges between entities in the sub-graph.\n\n    Is an extremely thin wrapper around :func:`expand_internal`.\n\n    :param universe: A BEL graph representing the universe of all knowledge\n    :param graph: The target BEL graph to enrich with causal relations between contained nodes\n\n    Equivalent to:\n\n    >>> from pybel_tools.mutation import expand_internal\n    >>> from pybel.struct.filters.edge_predicates import is_causal_relation\n    >>> expand_internal(universe, graph, edge_predicates=is_causal_relation)", "id": "f9376:m15"}
{"signature": "@uni_in_place_transformation<EOL>def expand_internal(universe: BELGraph, graph: BELGraph, edge_predicates: EdgePredicates = None) -> None:", "body": "edge_filter = and_edge_predicates(edge_predicates)<EOL>for u, v in itt.product(graph, repeat=<NUM_LIT:2>):<EOL><INDENT>if graph.has_edge(u, v) or not universe.has_edge(u, v):<EOL><INDENT>continue<EOL><DEDENT>rs = defaultdict(list)<EOL>for key, data in universe[u][v].items():<EOL><INDENT>if not edge_filter(universe, u, v, key):<EOL><INDENT>continue<EOL><DEDENT>rs[data[RELATION]].append((key, data))<EOL><DEDENT>if <NUM_LIT:1> == len(rs):<EOL><INDENT>relation = list(rs)[<NUM_LIT:0>]<EOL>for key, data in rs[relation]:<EOL><INDENT>graph.add_edge(u, v, key=key, **data)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>log.debug('<STR_LIT>', u, v)<EOL><DEDENT><DEDENT>", "docstring": "Edges between entities in the sub-graph that pass the given filters.\n\n    :param universe: The full graph\n    :param graph: A sub-graph to find the upstream information\n    :param edge_predicates: Optional list of edge filter functions (graph, node, node, key, data) -> bool", "id": "f9376:m14"}
{"signature": "def count_sources(edge_iter: EdgeIterator) -> Counter:", "body": "return Counter(u for u, _, _ in edge_iter)<EOL>", "docstring": "Count the source nodes in an edge iterator with keys and data.\n\n    :return: A counter of source nodes in the iterable", "id": "f9376:m2"}
{"signature": "def build_delete_node_by_hash(manager: Manager) -> Callable[[BELGraph, str], None]:", "body": "@in_place_transformation<EOL>def delete_node_by_hash(graph: BELGraph, node_hash: str) -> None:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>node = manager.get_dsl_by_hash(node_hash)<EOL>graph.remove_node(node)<EOL><DEDENT>return delete_node_by_hash<EOL>", "docstring": "Make a delete function that's bound to the manager.", "id": "f9377:m1"}
{"signature": "def build_expand_node_neighborhood_by_hash(manager: Manager) -> Callable[[BELGraph, BELGraph, str], None]:", "body": "@uni_in_place_transformation<EOL>def expand_node_neighborhood_by_hash(universe: BELGraph, graph: BELGraph, node_hash: str) -> None:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>node = manager.get_dsl_by_hash(node_hash)<EOL>return expand_node_neighborhood(universe, graph, node)<EOL><DEDENT>return expand_node_neighborhood_by_hash<EOL>", "docstring": "Make an expand function that's bound to the manager.", "id": "f9377:m0"}
{"signature": "@in_place_transformation<EOL>def remove_inconsistent_edges(graph: BELGraph) -> None:", "body": "for u, v in get_inconsistent_edges(graph):<EOL><INDENT>edges = [(u, v, k) for k in graph[u][v]]<EOL>graph.remove_edges_from(edges)<EOL><DEDENT>", "docstring": "Remove all edges between node pairs with inconsistent edges.\n\n    This is the all-or-nothing approach. It would be better to do more careful investigation of the evidences during\n    curation.", "id": "f9379:m0"}
{"signature": "def _collapse_edge_by_namespace(graph: BELGraph,<EOL>victim_namespaces: Strings,<EOL>survivor_namespaces: str,<EOL>relations: Strings) -> None:", "body": "relation_filter = build_relation_predicate(relations)<EOL>source_namespace_filter = build_source_namespace_filter(victim_namespaces)<EOL>target_namespace_filter = build_target_namespace_filter(survivor_namespaces)<EOL>edge_predicates = [<EOL>relation_filter,<EOL>source_namespace_filter,<EOL>target_namespace_filter<EOL>]<EOL>_collapse_edge_passing_predicates(graph, edge_predicates=edge_predicates)<EOL>", "docstring": "Collapse pairs of nodes with the given namespaces that have the given relationship.\n\n    :param graph: A BEL Graph\n    :param victim_namespaces: The namespace(s) of the node to collapse\n    :param survivor_namespaces: The namespace of the node to keep\n    :param relations: The relation(s) to search", "id": "f9380:m5"}
{"signature": "@in_place_transformation<EOL>def collapse_protein_variants(graph: BELGraph) -> None:", "body": "_collapse_variants_by_function(graph, PROTEIN)<EOL>", "docstring": "Collapse all protein's variants' edges to their parents, in-place.", "id": "f9380:m0"}
{"signature": "@in_place_transformation<EOL>def collapse_gene_variants(graph: BELGraph) -> None:", "body": "_collapse_variants_by_function(graph, GENE)<EOL>", "docstring": "Collapse all gene's variants' edges to their parents, in-place.", "id": "f9380:m1"}
{"signature": "@in_place_transformation<EOL>def collapse_equivalencies_by_namespace(graph: BELGraph, victim_namespace: Strings, survivor_namespace: str) -> None:", "body": "_collapse_edge_by_namespace(graph, victim_namespace, survivor_namespace, EQUIVALENT_TO)<EOL>", "docstring": "Collapse pairs of nodes with the given namespaces that have equivalence relationships.\n\n    :param graph: A BEL graph\n    :param victim_namespace: The namespace(s) of the node to collapse\n    :param survivor_namespace: The namespace of the node to keep\n\n    To convert all ChEBI names to InChI keys, assuming there are appropriate equivalence relations between nodes with\n    those namespaces:\n\n    >>> collapse_equivalencies_by_namespace(graph, 'CHEBI', 'CHEBIID')\n    >>> collapse_equivalencies_by_namespace(graph, 'CHEBIID', 'INCHI')", "id": "f9380:m6"}
{"signature": "@in_place_transformation<EOL>def collapse_nodes_with_same_names(graph: BELGraph) -> None:", "body": "survivor_mapping = defaultdict(set) <EOL>victims = set() <EOL>it = tqdm(itt.combinations(graph, r=<NUM_LIT:2>), total=graph.number_of_nodes() * (graph.number_of_nodes() - <NUM_LIT:1>) / <NUM_LIT:2>)<EOL>for a, b in it:<EOL><INDENT>if b in victims:<EOL><INDENT>continue<EOL><DEDENT>a_name, b_name = a.get(NAME), b.get(NAME)<EOL>if not a_name or not b_name or a_name.lower() != b_name.lower():<EOL><INDENT>continue<EOL><DEDENT>if a.keys() != b.keys():  <EOL><INDENT>continue<EOL><DEDENT>for k in set(a.keys()) - {NAME, NAMESPACE}:<EOL><INDENT>if a[k] != b[k]:  <EOL><INDENT>continue<EOL><DEDENT><DEDENT>survivor_mapping[a].add(b)<EOL>victims.add(b)<EOL><DEDENT>collapse_nodes(graph, survivor_mapping)<EOL>", "docstring": "Collapse all nodes with the same name, merging namespaces by picking first alphabetical one.", "id": "f9380:m15"}
{"signature": "@transformation<EOL>def collapse_to_protein_interactions(graph: BELGraph) -> BELGraph:", "body": "rv: BELGraph = graph.copy()<EOL>collapse_to_genes(rv)<EOL>def is_edge_ppi(_: BELGraph, u: BaseEntity, v: BaseEntity, __: str) -> bool:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return isinstance(u, Gene) and isinstance(v, Gene)<EOL><DEDENT>return get_subgraph_by_edge_filter(rv, edge_predicates=[has_polarity, is_edge_ppi])<EOL>", "docstring": "Collapse to a graph made of only causal gene/protein edges.", "id": "f9380:m14"}
{"signature": "@in_place_transformation<EOL>def collapse_flybase_to_hgnc(graph: BELGraph):", "body": "collapse_orthologies_by_namespace(graph, '<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "Collapse FlyBase orthologies to HGNC.", "id": "f9380:m11"}
{"signature": "@in_place_transformation<EOL>def collapse_rgd_to_hgnc(graph: BELGraph):", "body": "collapse_orthologies_by_namespace(graph, ['<STR_LIT>', '<STR_LIT>'], '<STR_LIT>')<EOL>", "docstring": "Collapse RGD orthologies to HGNC.", "id": "f9380:m10"}
{"signature": "@in_place_transformation<EOL>def collapse_mgi_to_hgnc(graph: BELGraph):", "body": "collapse_orthologies_by_namespace(graph, ['<STR_LIT>', '<STR_LIT>'], '<STR_LIT>')<EOL>", "docstring": "Collapse MGI orthologies to HGNC.", "id": "f9380:m9"}
{"signature": "@in_place_transformation<EOL>def enrich_pubmed_citations(graph: BELGraph, manager: Manager) -> Set[str]:", "body": "pmids = get_pubmed_identifiers(graph)<EOL>pmid_data, errors = get_citations_by_pmids(manager=manager, pmids=pmids)<EOL>for u, v, k in filter_edges(graph, has_pubmed):<EOL><INDENT>pmid = graph[u][v][k][CITATION][CITATION_REFERENCE].strip()<EOL>if pmid not in pmid_data:<EOL><INDENT>log.warning('<STR_LIT>', pmid)<EOL>errors.add(pmid)<EOL>continue<EOL><DEDENT>graph[u][v][k][CITATION].update(pmid_data[pmid])<EOL><DEDENT>return errors<EOL>", "docstring": "Overwrite all PubMed citations with values from NCBI's eUtils lookup service.\n\n    :return: A set of PMIDs for which the eUtils service crashed", "id": "f9382:m0"}
{"signature": "@uni_in_place_transformation<EOL>def update_context(universe: BELGraph, graph: BELGraph):", "body": "for namespace in get_namespaces(graph):<EOL><INDENT>if namespace in universe.namespace_url:<EOL><INDENT>graph.namespace_url[namespace] = universe.namespace_url[namespace]<EOL><DEDENT>elif namespace in universe.namespace_pattern:<EOL><INDENT>graph.namespace_pattern[namespace] = universe.namespace_pattern[namespace]<EOL><DEDENT>else:<EOL><INDENT>log.warning('<STR_LIT>', namespace)<EOL><DEDENT><DEDENT>for annotation in get_annotations(graph):<EOL><INDENT>if annotation in universe.annotation_url:<EOL><INDENT>graph.annotation_url[annotation] = universe.annotation_url[annotation]<EOL><DEDENT>elif annotation in universe.annotation_pattern:<EOL><INDENT>graph.annotation_pattern[annotation] = universe.annotation_pattern[annotation]<EOL><DEDENT>elif annotation in universe.annotation_list:<EOL><INDENT>graph.annotation_list[annotation] = universe.annotation_list[annotation]<EOL><DEDENT>else:<EOL><INDENT>log.warning('<STR_LIT>', annotation)<EOL><DEDENT><DEDENT>", "docstring": "Update the context of a subgraph from the universe of all knowledge.", "id": "f9382:m1"}
{"signature": "@in_place_transformation<EOL>def remove_highlight_edges(graph: BELGraph, edges=None):", "body": "for u, v, k, _ in graph.edges(keys=True, data=True) if edges is None else edges:<EOL><INDENT>if is_edge_highlighted(graph, u, v, k):<EOL><INDENT>del graph[u][v][k][EDGE_HIGHLIGHT]<EOL><DEDENT><DEDENT>", "docstring": "Remove the highlight from the given edges, or all edges if none given.\n\n    :param graph: A BEL graph\n    :param edges: The edges (4-tuple of u,v,k,d) to remove the highlight from)\n    :type edges: iter[tuple]", "id": "f9383:m5"}
{"signature": "@in_place_transformation<EOL>def remove_highlight_nodes(graph: BELGraph, nodes: Optional[Iterable[BaseEntity]]=None) -> None:", "body": "for node in graph if nodes is None else nodes:<EOL><INDENT>if is_node_highlighted(graph, node):<EOL><INDENT>del graph.node[node][NODE_HIGHLIGHT]<EOL><DEDENT><DEDENT>", "docstring": "Removes the highlight from the given nodes, or all nodes if none given.\n\n    :param graph: A BEL graph\n    :param nodes: The list of nodes to un-highlight", "id": "f9383:m2"}
{"signature": "@in_place_transformation<EOL>def highlight_edges(graph: BELGraph, edges=None, color: Optional[str]=None) -> None:", "body": "color = color or EDGE_HIGHLIGHT_DEFAULT_COLOR<EOL>for u, v, k, d in edges if edges is not None else graph.edges(keys=True, data=True):<EOL><INDENT>graph[u][v][k][EDGE_HIGHLIGHT] = color<EOL><DEDENT>", "docstring": "Adds a highlight tag to the given edges.\n\n    :param graph: A BEL graph\n    :param edges: The edges (4-tuples of u, v, k, d) to add a highlight tag on\n    :type edges: iter[tuple]\n    :param str color: The color to highlight (use something that works with CSS)", "id": "f9383:m3"}
{"signature": "@in_place_transformation<EOL>def remove_highlight_subgraph(graph: BELGraph, subgraph: BELGraph):", "body": "remove_highlight_nodes(graph, subgraph.nodes())<EOL>remove_highlight_edges(graph, subgraph.edges())<EOL>", "docstring": "Remove the highlight from all nodes/edges in the graph that are in the subgraph.\n\n    :param graph: The BEL graph to mutate\n    :param subgraph: The subgraph from which to remove the highlighting", "id": "f9383:m7"}
{"signature": "@in_place_transformation<EOL>def highlight_nodes(graph: BELGraph, nodes: Optional[Iterable[BaseEntity]] = None, color: Optional[str]=None):", "body": "color = color or NODE_HIGHLIGHT_DEFAULT_COLOR<EOL>for node in nodes if nodes is not None else graph:<EOL><INDENT>graph.node[node][NODE_HIGHLIGHT] = color<EOL><DEDENT>", "docstring": "Adds a highlight tag to the given nodes.\n\n    :param graph: A BEL graph\n    :param nodes: The nodes to add a highlight tag on\n    :param color: The color to highlight (use something that works with CSS)", "id": "f9383:m0"}
{"signature": "def is_edge_highlighted(graph: BELGraph, u, v, k) -> bool:", "body": "return EDGE_HIGHLIGHT in graph[u][v][k]<EOL>", "docstring": "Returns if the given edge is highlighted.\n\n    :param graph: A BEL graph\n    :return: Does the edge contain highlight information?\n    :rtype: bool", "id": "f9383:m4"}
{"signature": "@transformation<EOL>def random_by_edges(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:", "body": "percentage = percentage or <NUM_LIT><EOL>assert <NUM_LIT:0> < percentage <= <NUM_LIT:1><EOL>edges = graph.edges(keys=True)<EOL>n = int(graph.number_of_edges() * percentage)<EOL>subedges = random.sample(edges, n)<EOL>rv = graph.fresh_copy()<EOL>for u, v, k in subedges:<EOL><INDENT>safe_add_edge(rv, u, v, k, graph[u][v][k])<EOL><DEDENT>update_node_helper(graph, rv)<EOL>return rv<EOL>", "docstring": "Get a random graph by keeping a certain percentage of original edges.\n\n    :param graph: A BEL graph\n    :param percentage: What percentage of eges to take", "id": "f9384:m1"}
{"signature": "@transformation<EOL>def random_by_nodes(graph: BELGraph, percentage: Optional[float] = None) -> BELGraph:", "body": "percentage = percentage or <NUM_LIT><EOL>assert <NUM_LIT:0> < percentage <= <NUM_LIT:1><EOL>nodes = graph.nodes()<EOL>n = int(len(nodes) * percentage)<EOL>subnodes = random.sample(nodes, n)<EOL>result = graph.subgraph(subnodes)<EOL>update_node_helper(graph, result)<EOL>return result<EOL>", "docstring": "Get a random graph by inducing over a percentage of the original nodes.\n\n    :param graph: A BEL graph\n    :param percentage: The percentage of edges to keep", "id": "f9384:m0"}
{"signature": "@click.group(help=f\"<STR_LIT>\"<EOL>f\"<STR_LIT>\")<EOL>@click.version_option()<EOL>def main():", "body": "", "docstring": "Command Line Interface for PyBEL Tools.", "id": "f9385:m0"}
{"signature": "@document.command()<EOL>@click.argument('<STR_LIT>', nargs=-<NUM_LIT:1>)<EOL>@connection_option<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.File('<STR_LIT:r>'), default=sys.stdin, help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', help='<STR_LIT>'.format(os.getcwd()))<EOL>def serialize_namespaces(namespaces, connection: str, path, directory):", "body": "from .definition_utils import export_namespaces<EOL>graph = from_lines(path, manager=connection)<EOL>export_namespaces(namespaces, graph, directory)<EOL>", "docstring": "Parse a BEL document then serializes the given namespaces (errors and all) to the given directory.", "id": "f9385:m9"}
{"signature": "@namespace.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.File('<STR_LIT:r>'), default=sys.stdin, help=\"<STR_LIT>\")<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.File('<STR_LIT:w>'), default=sys.stdout,<EOL>help=\"<STR_LIT>\")<EOL>def convert_to_annotation(file, output):", "body": "resource = parse_bel_resource(file)<EOL>write_annotation(<EOL>keyword=resource['<STR_LIT>']['<STR_LIT>'],<EOL>values={k: '<STR_LIT>' for k in resource['<STR_LIT>']},<EOL>citation_name=resource['<STR_LIT>']['<STR_LIT>'],<EOL>description=resource['<STR_LIT>']['<STR_LIT>'],<EOL>file=output,<EOL>)<EOL>", "docstring": "Convert a namespace file to an annotation file.", "id": "f9385:m4"}
{"signature": "@in_place_transformation<EOL>def overlay_type_data(graph: BELGraph,<EOL>data: Mapping[str, float],<EOL>func: str,<EOL>namespace: str,<EOL>label: Optional[str] = None,<EOL>overwrite: bool = False,<EOL>impute: Optional[float] = None,<EOL>) -> None:", "body": "new_data = {<EOL>node: data.get(node[NAME], impute)<EOL>for node in filter_nodes(graph, function_namespace_inclusion_builder(func, namespace))<EOL>}<EOL>overlay_data(graph, new_data, label=label, overwrite=overwrite)<EOL>", "docstring": "Overlay tabular data on the network for data that comes from an data set with identifiers that lack\n    namespaces.\n\n    For example, if you want to overlay differential gene expression data from a table, that table\n    probably has HGNC identifiers, but no specific annotations that they are in the HGNC namespace or\n    that the entities to which they refer are RNA.\n\n    :param graph: A BEL Graph\n    :param dict data: A dictionary of {name: data}\n    :param func: The function of the keys in the data dictionary\n    :param namespace: The namespace of the keys in the data dictionary\n    :param label: The annotation label to put in the node dictionary\n    :param overwrite: Should old annotations be overwritten?\n    :param impute: The value to use for missing data", "id": "f9387:m1"}
{"signature": "@in_place_transformation<EOL>def overlay_data(graph: BELGraph,<EOL>data: Mapping[BaseEntity, Any],<EOL>label: Optional[str] = None,<EOL>overwrite: bool = False,<EOL>) -> None:", "body": "if label is None:<EOL><INDENT>label = WEIGHT<EOL><DEDENT>for node, value in data.items():<EOL><INDENT>if node not in graph:<EOL><INDENT>log.debug('<STR_LIT>', node)<EOL>continue<EOL><DEDENT>if label in graph.nodes[node] and not overwrite:<EOL><INDENT>log.debug('<STR_LIT>', label, node)<EOL>continue<EOL><DEDENT>graph.nodes[node][label] = value<EOL><DEDENT>", "docstring": "Overlays tabular data on the network\n\n    :param graph: A BEL Graph\n    :param data: A dictionary of {tuple node: data for that node}\n    :param label: The annotation label to put in the node dictionary\n    :param overwrite: Should old annotations be overwritten?", "id": "f9387:m0"}
{"signature": "def build_edge_data_filter(annotations: Mapping, partial_match: bool = True) -> EdgePredicate: ", "body": "@edge_predicate<EOL>def annotation_dict_filter(data: EdgeData) -> bool:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return subdict_matches(data, annotations, partial_match=partial_match)<EOL><DEDENT>return annotation_dict_filter<EOL>", "docstring": "Build a filter that keeps edges whose data dictionaries are super-dictionaries to the given dictionary.\n\n    :param annotations: The annotation query dict to match\n    :param partial_match: Should the query values be used as partial or exact matches? Defaults to :code:`True`.", "id": "f9389:m1"}
{"signature": "def build_target_namespace_filter(namespaces: Strings) -> EdgePredicate:", "body": "if isinstance(namespaces, str):<EOL><INDENT>def target_namespace_filter(_, __, v: BaseEntity, ___) -> bool:<EOL><INDENT>return node_has_namespace(v, namespaces)<EOL><DEDENT><DEDENT>elif isinstance(namespaces, Iterable):<EOL><INDENT>namespaces = set(namespaces)<EOL>def target_namespace_filter(_, __, v: BaseEntity, ___) -> bool:<EOL><INDENT>return node_has_namespaces(v, namespaces)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError<EOL><DEDENT>return target_namespace_filter<EOL>", "docstring": "Only passes for edges whose target nodes have the given namespace or one of the given namespaces\n\n    :param namespaces: The namespace or namespaces to filter by", "id": "f9389:m8"}
{"signature": "def node_has_namespace(node: BaseEntity, namespace: str) -> bool:", "body": "ns = node.get(NAMESPACE)<EOL>return ns is not None and ns == namespace<EOL>", "docstring": "Pass for nodes that have the given namespace.", "id": "f9389:m5"}
{"signature": "def build_author_inclusion_filter(authors: Strings) -> EdgePredicate:", "body": "if isinstance(authors, str):<EOL><INDENT>@edge_predicate<EOL>def author_filter(data: EdgeData) -> bool:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return has_authors(data) and authors in data[CITATION][CITATION_AUTHORS]<EOL><DEDENT><DEDENT>elif isinstance(authors, Iterable):<EOL><INDENT>authors = set(authors)<EOL>@edge_predicate<EOL>def author_filter(data: EdgeData) -> bool:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return has_authors(data) and any(<EOL>author in data[CITATION][CITATION_AUTHORS]<EOL>for author in authors<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError<EOL><DEDENT>return author_filter<EOL>", "docstring": "Pass only for edges with author information that matches one of the given authors.\n\n    :param authors: The author or list of authors to filter by", "id": "f9389:m4"}
{"signature": "@register_deprecated('<STR_LIT>')<EOL>@in_place_transformation<EOL>def remove_mouse_nodes(graph: BELGraph) -> None:", "body": "remove_nodes_by_namespace(graph, ['<STR_LIT>', '<STR_LIT>'])<EOL>", "docstring": "Remove nodes using the MGI and MGIID namespaces.", "id": "f9390:m2"}
{"signature": "@in_place_transformation<EOL>def remove_nodes_by_function_namespace(graph: BELGraph, func: str, namespace: Strings) -> None:", "body": "remove_filtered_nodes(graph, function_namespace_inclusion_builder(func, namespace))<EOL>", "docstring": "Remove nodes with the given function and namespace.\n\n    This might be useful to exclude information learned about distant species, such as excluding all information\n    from MGI and RGD in diseases where mice and rats don't give much insight to the human disease mechanism.", "id": "f9390:m4"}
{"signature": "@register_deprecated('<STR_LIT>')<EOL>@in_place_transformation<EOL>def remove_rat_nodes(graph: BELGraph) -> None:", "body": "remove_nodes_by_namespace(graph, ['<STR_LIT>', '<STR_LIT>'])<EOL>", "docstring": "Remove nodes using the RGD and RGDID namespaces.", "id": "f9390:m3"}
{"signature": "def function_exclusion_filter_builder(func: Strings) -> NodePredicate:", "body": "if isinstance(func, str):<EOL><INDENT>def function_exclusion_filter(_: BELGraph, node: BaseEntity) -> bool:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return node[FUNCTION] != func<EOL><DEDENT>return function_exclusion_filter<EOL><DEDENT>elif isinstance(func, Iterable):<EOL><INDENT>functions = set(func)<EOL>def functions_exclusion_filter(_: BELGraph, node: BaseEntity) -> bool:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return node[FUNCTION] not in functions<EOL><DEDENT>return functions_exclusion_filter<EOL><DEDENT>raise ValueError('<STR_LIT>'.format(func))<EOL>", "docstring": "Build a filter that fails on nodes of the given function(s).\n\n    :param func: A BEL Function or list/set/tuple of BEL functions", "id": "f9391:m6"}
{"signature": "def summarize_node_filter(graph: BELGraph, node_filters: NodePredicates) -> None:", "body": "passed = count_passed_node_filter(graph, node_filters)<EOL>print('<STR_LIT>'.format(passed, graph.number_of_nodes()))<EOL>", "docstring": "Print a summary of the number of nodes passing a given set of filters.\n\n    :param graph: A BEL graph\n    :param node_filters: A node filter or list/tuple of node filters", "id": "f9391:m0"}
{"signature": "def node_exclusion_filter_builder(nodes: Iterable[BaseEntity]) -> NodePredicate:", "body": "node_set = set(nodes)<EOL>def exclusion_filter(_: BELGraph, node: BaseEntity) -> bool:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return node not in node_set<EOL><DEDENT>return exclusion_filter<EOL>", "docstring": "Build a filter that fails on nodes in the given list.", "id": "f9391:m2"}
{"signature": "def node_inclusion_filter_builder(nodes: Iterable[BaseEntity]) -> NodePredicate:", "body": "node_set = set(nodes)<EOL>def inclusion_filter(_: BELGraph, node: BaseEntity) -> bool:<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return node in node_set<EOL><DEDENT>return inclusion_filter<EOL>", "docstring": "Build a filter that only passes on nodes in the given list.\n\n    :param nodes: An iterable of BEL nodes", "id": "f9391:m1"}
{"signature": "def function_inclusion_filter_builder(func: Strings) -> NodePredicate:", "body": "if isinstance(func, str):<EOL><INDENT>return _single_function_inclusion_filter_builder(func)<EOL><DEDENT>elif isinstance(func, Iterable):<EOL><INDENT>return _collection_function_inclusion_builder(func)<EOL><DEDENT>raise ValueError('<STR_LIT>'.format(func))<EOL>", "docstring": "Build a filter that only passes on nodes of the given function(s).\n\n    :param func: A BEL Function or list/set/tuple of BEL functions", "id": "f9391:m5"}
{"signature": "def get_graph_by_id(self, network_id: int) -> BELGraph:", "body": "return self.networks[network_id]<EOL>", "docstring": "Get a graph by its identifier.", "id": "f9395:c1:m2"}
{"signature": "def insert_graph(self, graph: BELGraph, **_kwargs) -> Network:", "body": "result = _Namespace()<EOL>result.id = len(self.networks)<EOL>self.networks[result.id] = graph<EOL>return result<EOL>", "docstring": "Insert a graph and return the resulting ORM object (mocked).", "id": "f9395:c1:m1"}
{"signature": "def get_graphs_by_ids(self, network_ids: Iterable[int]) -> List[BELGraph]:", "body": "return [<EOL>self.networks[network_id]<EOL>for network_id in network_ids<EOL>]<EOL>", "docstring": "Get several graphs by their identifiers.", "id": "f9395:c1:m3"}
{"signature": "def count_unique_relations(graph: BELGraph) -> Counter:", "body": "return Counter(itt.chain.from_iterable(get_edge_relations(graph).values()))<EOL>", "docstring": "Return a histogram of the different types of relations present in a graph.\n\n    Note: this operation only counts each type of edge once for each pair of nodes", "id": "f9396:m2"}
{"signature": "def count_annotation_values(graph: BELGraph, annotation: str) -> Counter:", "body": "return Counter(iter_annotation_values(graph, annotation))<EOL>", "docstring": "Count in how many edges each annotation appears in a graph\n\n    :param graph: A BEL graph\n    :param annotation: The annotation to count\n    :return: A Counter from {annotation value: frequency}", "id": "f9396:m4"}
{"signature": "def get_contradictory_pairs(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]:", "body": "for u, v in graph.edges():<EOL><INDENT>if pair_has_contradiction(graph, u, v):<EOL><INDENT>yield u, v<EOL><DEDENT><DEDENT>", "docstring": "Iterates over contradictory node pairs in the graph based on their causal relationships\n\n    :return: An iterator over (source, target) node pairs that have contradictory causal edges", "id": "f9396:m7"}
{"signature": "def relation_set_has_contradictions(relations: Set[str]) -> bool:", "body": "has_increases = any(relation in CAUSAL_INCREASE_RELATIONS for relation in relations)<EOL>has_decreases = any(relation in CAUSAL_DECREASE_RELATIONS for relation in relations)<EOL>has_cnc = any(relation == CAUSES_NO_CHANGE for relation in relations)<EOL>return <NUM_LIT:1> < sum([has_cnc, has_decreases, has_increases])<EOL>", "docstring": "Return if the set of BEL relations contains a contradiction.", "id": "f9397:m1"}
{"signature": "def calculate_error_by_annotation(graph: BELGraph, annotation: str) -> Mapping[str, List[str]]:", "body": "results = defaultdict(list)<EOL>for _, exc, ctx in graph.warnings:<EOL><INDENT>if not ctx or not edge_has_annotation(ctx, annotation):<EOL><INDENT>continue<EOL><DEDENT>values = ctx[ANNOTATIONS][annotation]<EOL>if isinstance(values, str):<EOL><INDENT>results[values].append(exc.__class__.__name__)<EOL><DEDENT>elif isinstance(values, Iterable):<EOL><INDENT>for value in values:<EOL><INDENT>results[value].append(exc.__class__.__name__)<EOL><DEDENT><DEDENT><DEDENT>return dict(results)<EOL>", "docstring": "Group the graph by a given annotation and builds lists of errors for each.\n\n    :return: A dictionary of {annotation value: list of errors}", "id": "f9400:m7"}
{"signature": "def group_errors(graph: BELGraph) -> Mapping[str, List[int]]:", "body": "warning_summary = defaultdict(list)<EOL>for _, exc, _ in graph.warnings:<EOL><INDENT>warning_summary[str(exc)].append(exc.line_number)<EOL><DEDENT>return dict(warning_summary)<EOL>", "docstring": "Group the errors together for analysis of the most frequent error.\n\n    :return: A dictionary of {error string: list of line numbers}", "id": "f9400:m8"}
{"signature": "def get_undefined_namespaces(graph: BELGraph) -> Set[str]:", "body": "return {<EOL>exc.namespace<EOL>for _, exc, _ in graph.warnings<EOL>if isinstance(exc, UndefinedNamespaceWarning)<EOL>}<EOL>", "docstring": "Get all namespaces that are used in the BEL graph aren't actually defined.", "id": "f9400:m1"}
{"signature": "def get_most_common_errors(graph: BELGraph, n: Optional[int] = <NUM_LIT:20>):", "body": "return count_dict_values(group_errors(graph)).most_common(n)<EOL>", "docstring": "Get the (n) most common errors in a graph.", "id": "f9400:m9"}
{"signature": "def get_names_including_errors_by_namespace(graph: BELGraph, namespace: str) -> Set[str]:", "body": "return get_names_by_namespace(graph, namespace) | get_incorrect_names_by_namespace(graph, namespace)<EOL>", "docstring": "Takes the names from the graph in a given namespace (:func:`pybel.struct.summary.get_names_by_namespace`) and\n    the erroneous names from the same namespace (:func:`get_incorrect_names_by_namespace`) and returns them together\n    as a unioned set\n\n    :return: The set of all correct and incorrect names from the given namespace in the graph", "id": "f9400:m10"}
{"signature": "def get_undefined_annotations(graph: BELGraph) -> Set[str]:", "body": "return {<EOL>exc.annotation<EOL>for _, exc, _ in graph.warnings<EOL>if isinstance(exc, UndefinedAnnotationWarning)<EOL>}<EOL>", "docstring": "Get all annotations that aren't actually defined.\n\n    :return: The set of all undefined annotations", "id": "f9400:m5"}
{"signature": "def get_incorrect_names_by_namespace(graph: BELGraph, namespace: str) -> Set[str]:", "body": "return {<EOL>exc.name<EOL>for _, exc, _ in graph.warnings<EOL>if isinstance(exc, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)) and exc.namespace == namespace<EOL>}<EOL>", "docstring": "Return the set of all incorrect names from the given namespace in the graph.\n\n    :return: The set of all incorrect names from the given namespace in the graph", "id": "f9400:m2"}
{"signature": "def get_names_including_errors(graph: BELGraph) -> Mapping[str, Set[str]]:", "body": "return {<EOL>namespace: get_names_including_errors_by_namespace(graph, namespace)<EOL>for namespace in get_namespaces(graph)<EOL>}<EOL>", "docstring": "Takes the names from the graph in a given namespace and the erroneous names from the same namespace and returns\n    them together as a unioned set\n\n    :return: The dict of the sets of all correct and incorrect names from the given namespace in the graph", "id": "f9400:m11"}
{"signature": "def remove_falsy_values(counter: Mapping[Any, int]) -> Mapping[Any, int]:", "body": "return {<EOL>label: count<EOL>for label, count in counter.items()<EOL>if count<EOL>}<EOL>", "docstring": "Remove all values that are zero.", "id": "f9402:m11"}
{"signature": "def get_causal_in_edges(<EOL>graph: BELGraph,<EOL>nbunch: Union[BaseEntity, Iterable[BaseEntity]],<EOL>) -> Set[Tuple[BaseEntity, BaseEntity]]:", "body": "return {<EOL>(u, v)<EOL>for u, v, k, d in graph.in_edges(nbunch, keys=True, data=True)<EOL>if is_causal_relation(graph, u, v, k, d)<EOL>}<EOL>", "docstring": "Get the in-edges to the given node that are causal.\n\n    :return: A set of (source, target) pairs where the target is the given node", "id": "f9402:m1"}
{"signature": "def get_activities(graph: BELGraph) -> Set[BaseEntity]:", "body": "return get_nodes(graph, has_activity)<EOL>", "docstring": "Get all nodes that have molecular activities.", "id": "f9402:m6"}
{"signature": "def _generate_citation_dict(graph: BELGraph) -> Mapping[str, Mapping[Tuple[BaseEntity, BaseEntity], str]]:", "body": "results = defaultdict(lambda: defaultdict(set))<EOL>for u, v, data in graph.edges(data=True):<EOL><INDENT>if CITATION not in data:<EOL><INDENT>continue<EOL><DEDENT>results[data[CITATION][CITATION_TYPE]][u, v].add(data[CITATION][CITATION_REFERENCE].strip())<EOL><DEDENT>return dict(results)<EOL>", "docstring": "Prepare a citation data dictionary from a graph.\n\n    :return: A dictionary of dictionaries {citation type: {(source, target): citation reference}", "id": "f9403:m0"}
{"signature": "def get_evidences_by_pmid(graph: BELGraph, pmids: Union[str, Iterable[str]]):", "body": "result = defaultdict(set)<EOL>for _, _, _, data in filter_edges(graph, build_pmid_inclusion_filter(pmids)):<EOL><INDENT>result[data[CITATION][CITATION_REFERENCE]].add(data[EVIDENCE])<EOL><DEDENT>return dict(result)<EOL>", "docstring": "Get a dictionary from the given PubMed identifiers to the sets of all evidence strings associated with each\n    in the graph.\n\n    :param graph: A BEL graph\n    :param pmids: An iterable of PubMed identifiers, as strings. Is consumed and converted to a set.\n    :return: A dictionary of {pmid: set of all evidence strings}\n    :rtype: dict", "id": "f9403:m12"}
{"signature": "def count_author_publications(graph: BELGraph) -> typing.Counter[str]:", "body": "authors = group_as_dict(_iter_author_publiations(graph))<EOL>return Counter(count_dict_values(count_defaultdict(authors)))<EOL>", "docstring": "Count the number of publications of each author to the given graph.", "id": "f9403:m6"}
{"signature": "def get_citation_years(graph: BELGraph) -> List[Tuple[int, int]]:", "body": "return create_timeline(count_citation_years(graph))<EOL>", "docstring": "Create a citation timeline counter from the graph.", "id": "f9403:m15"}
{"signature": "def count_authors_by_annotation(graph: BELGraph, annotation: str = '<STR_LIT>') -> Mapping[str, typing.Counter[str]]:", "body": "authors = group_as_dict(_iter_authors_by_annotation(graph, annotation=annotation))<EOL>return count_defaultdict(authors)<EOL>", "docstring": "Group the author counters by sub-graphs induced by the annotation.\n\n    :param graph: A BEL graph\n    :param annotation: The annotation to use to group the graph\n    :return: A dictionary of Counters {subgraph name: Counter from {author: frequency}}", "id": "f9403:m10"}
{"signature": "def get_pmid_by_keyword(keyword: str,<EOL>graph: Optional[BELGraph] = None,<EOL>pubmed_identifiers: Optional[Set[str]] = None,<EOL>) -> Set[str]:", "body": "if pubmed_identifiers is not None:<EOL><INDENT>return {<EOL>pubmed_identifier<EOL>for pubmed_identifier in pubmed_identifiers<EOL>if pubmed_identifier.startswith(keyword)<EOL>}<EOL><DEDENT>if graph is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return {<EOL>pubmed_identifier<EOL>for pubmed_identifier in iterate_pubmed_identifiers(graph)<EOL>if pubmed_identifier.startswith(keyword)<EOL>}<EOL>", "docstring": "Get the set of PubMed identifiers beginning with the given keyword string.\n\n    :param keyword: The beginning of a PubMed identifier\n    :param graph: A BEL graph\n    :param pubmed_identifiers: A set of pre-cached PubMed identifiers\n    :return: A set of PubMed identifiers starting with the given string", "id": "f9403:m1"}
{"signature": "def count_citations_by_annotation(graph: BELGraph, annotation: str) -> Mapping[str, typing.Counter[str]]:", "body": "citations = defaultdict(lambda: defaultdict(set))<EOL>for u, v, data in graph.edges(data=True):<EOL><INDENT>if not edge_has_annotation(data, annotation) or CITATION not in data:<EOL><INDENT>continue<EOL><DEDENT>k = data[ANNOTATIONS][annotation]<EOL>citations[k][u, v].add((data[CITATION][CITATION_TYPE], data[CITATION][CITATION_REFERENCE].strip()))<EOL><DEDENT>return {k: Counter(itt.chain.from_iterable(v.values())) for k, v in citations.items()}<EOL>", "docstring": "Group the citation counters by subgraphs induced by the annotation.\n\n    :param graph: A BEL graph\n    :param annotation: The annotation to use to group the graph\n    :return: A dictionary of Counters {subgraph name: Counter from {citation: frequency}}", "id": "f9403:m4"}
{"signature": "def count_confidences(graph: BELGraph) -> typing.Counter[str]:", "body": "return Counter(<EOL>(<EOL>'<STR_LIT:None>'<EOL>if ANNOTATIONS not in data or '<STR_LIT>' not in data[ANNOTATIONS] else<EOL>list(data[ANNOTATIONS]['<STR_LIT>'])[<NUM_LIT:0>]<EOL>)<EOL>for _, _, data in graph.edges(data=True)<EOL>if CITATION in data  <EOL>)<EOL>", "docstring": "Count the confidences in the graph.", "id": "f9403:m17"}
{"signature": "def count_pmids(graph: BELGraph) -> Counter:", "body": "return Counter(iterate_pubmed_identifiers(graph))<EOL>", "docstring": "Count the frequency of PubMed documents in a graph.\n\n    :return: A Counter from {(pmid, name): frequency}", "id": "f9403:m2"}
{"signature": "def create_timeline(year_counter: typing.Counter[int]) -> List[Tuple[int, int]]:", "body": "if not year_counter:<EOL><INDENT>return []<EOL><DEDENT>from_year = min(year_counter) - <NUM_LIT:1><EOL>until_year = datetime.now().year + <NUM_LIT:1><EOL>return [<EOL>(year, year_counter.get(year, <NUM_LIT:0>))<EOL>for year in range(from_year, until_year)<EOL>]<EOL>", "docstring": "Complete the Counter timeline.\n\n    :param Counter year_counter: counter dict for each year\n    :return: complete timeline", "id": "f9403:m16"}
{"signature": "def pairwise(iterable: Iterable[X]) -> Iterable[Tuple[X, X]]:", "body": "a, b = itt.tee(iterable)<EOL>next(b, None)<EOL>return zip(a, b)<EOL>", "docstring": "Iterate over pairs in list s -> (s0,s1), (s1,s2), (s2, s3), ...", "id": "f9404:m0"}
{"signature": "def barv(d, plt, title=None, rotation='<STR_LIT>'):", "body": "labels = sorted(d, key=d.get, reverse=True)<EOL>index = range(len(labels))<EOL>plt.xticks(index, labels, rotation=rotation)<EOL>plt.bar(index, [d[v] for v in labels])<EOL>if title is not None:<EOL><INDENT>plt.title(title)<EOL><DEDENT>", "docstring": "A convenience function for plotting a vertical bar plot from a Counter", "id": "f9404:m11"}
{"signature": "def set_percentage(x: Iterable[X], y: Iterable[X]) -> float:", "body": "a, b = set(x), set(y)<EOL>if not a:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>return len(a & b) / len(a)<EOL>", "docstring": "What percentage of x is contained within y?\n\n    :param set x: A set\n    :param set y: Another set\n    :return: The percentage of x contained within y", "id": "f9404:m4"}
{"signature": "def calculate_betweenness_centality(graph: BELGraph, number_samples: int = CENTRALITY_SAMPLES) -> Counter:", "body": "try:<EOL><INDENT>res = nx.betweenness_centrality(graph, k=number_samples)<EOL><DEDENT>except Exception:<EOL><INDENT>res = nx.betweenness_centrality(graph)<EOL><DEDENT>return Counter(res)<EOL>", "docstring": "Calculate the betweenness centrality over nodes in the graph.\n\n    Tries to do it with a certain number of samples, but then tries a complete approach if it fails.", "id": "f9404:m16"}
{"signature": "def calculate_single_tanimoto_set_distances(target: Iterable[X], dict_of_sets: Mapping[Y, Set[X]]) -> Mapping[Y, float]:", "body": "target_set = set(target)<EOL>return {<EOL>k: tanimoto_set_similarity(target_set, s)<EOL>for k, s in dict_of_sets.items()<EOL>}<EOL>", "docstring": "Return a dictionary of distances keyed by the keys in the given dict.\n\n    Distances are calculated based on pairwise tanimoto similarity of the sets contained\n\n    :param set target: A set\n    :param dict_of_sets: A dict of {x: set of y}\n    :type dict_of_sets: dict\n    :return: A similarity dicationary based on the set overlap (tanimoto) score between the target set and the sets in\n            dos\n    :rtype: dict", "id": "f9404:m7"}
{"signature": "def count_dict_values(dict_of_counters: Mapping[X, Sized]) -> typing.Counter[X]:", "body": "return Counter({<EOL>k: len(v)<EOL>for k, v in dict_of_counters.items()<EOL>})<EOL>", "docstring": "Count the number of elements in each value (can be list, Counter, etc).\n\n    :param dict_of_counters: A dictionary of things whose lengths can be measured (lists, Counters, dicts)\n    :return: A Counter with the same keys as the input but the count of the length of the values list/tuple/set/Counter", "id": "f9404:m3"}
{"signature": "def canonical_circulation(elements: T, key: Optional[Callable[[T], bool]] = None) -> T:", "body": "return min(get_circulations(elements), key=key)<EOL>", "docstring": "Get get a canonical representation of the ordered collection by finding its minimum circulation with the\n    given sort key", "id": "f9404:m18"}
{"signature": "def calculate_global_tanimoto_set_distances(dict_of_sets: Mapping[X, Set]) -> Mapping[X, Mapping[X, float]]:", "body": "universe = set(itt.chain.from_iterable(dict_of_sets.values()))<EOL>universe_size = len(universe)<EOL>result: Dict[X, Dict[X, float]] = defaultdict(dict)<EOL>for x, y in itt.combinations(dict_of_sets, <NUM_LIT:2>):<EOL><INDENT>result[x][y] = result[y][x] = <NUM_LIT:1.0> - len(dict_of_sets[x] | dict_of_sets[y]) / universe_size<EOL><DEDENT>for x in dict_of_sets:<EOL><INDENT>result[x][x] = <NUM_LIT:1.0> - len(x) / universe_size<EOL><DEDENT>return dict(result)<EOL>", "docstring": "r\"\"\"Calculate an alternative distance matrix based on the following equation.\n\n    .. math:: distance(A, B)=1- \\|A \\cup B\\| / \\| \\cup_{s \\in S} s\\|\n\n    :param dict_of_sets: A dict of {x: set of y}\n    :return: A similarity matrix based on the alternative tanimoto distance as a dict of dicts", "id": "f9404:m9"}
{"signature": "def count_defaultdict(dict_of_lists: Mapping[X, List[Y]]) -> Mapping[X, typing.Counter[Y]]:", "body": "return {<EOL>k: Counter(v)<EOL>for k, v in dict_of_lists.items()<EOL>}<EOL>", "docstring": "Count the number of elements in each value of the dictionary.", "id": "f9404:m2"}
{"signature": "def min_tanimoto_set_similarity(x: Iterable[X], y: Iterable[X]) -> float:", "body": "a, b = set(x), set(y)<EOL>if not a or not b:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>return len(a & b) / min(len(a), len(b))<EOL>", "docstring": "Calculate the tanimoto set similarity using the minimum size.\n\n    :param set x: A set\n    :param set y: Another set\n    :return: The similarity between", "id": "f9404:m6"}
{"signature": "def tanimoto_set_similarity(x: Iterable[X], y: Iterable[X]) -> float:", "body": "a, b = set(x), set(y)<EOL>union = a | b<EOL>if not union:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>return len(a & b) / len(union)<EOL>", "docstring": "Calculate the tanimoto set similarity.", "id": "f9404:m5"}
{"signature": "def find_activations(graph: BELGraph):", "body": "for u, v, key, data in graph.edges(keys=True, data=True):<EOL><INDENT>if u != v:<EOL><INDENT>continue<EOL><DEDENT>bel = graph.edge_to_bel(u, v, data)<EOL>line = data.get(LINE)<EOL>if line is None:<EOL><INDENT>continue  <EOL><DEDENT>elif has_protein_modification_increases_activity(graph, u, v, key):<EOL><INDENT>print(line, '<STR_LIT>', bel)<EOL>find_related(graph, v, data)<EOL><DEDENT>elif has_degradation_increases_activity(data):<EOL><INDENT>print(line, '<STR_LIT>', bel)<EOL>find_related(graph, v, data)<EOL><DEDENT>elif has_translocation_increases_activity(data):<EOL><INDENT>print(line, '<STR_LIT>', bel)<EOL>find_related(graph, v, data)<EOL><DEDENT>elif complex_increases_activity(graph, u, v, key):<EOL><INDENT>print(line, '<STR_LIT>', bel)<EOL>find_related(graph, v, data)<EOL><DEDENT>elif has_same_subject_object(graph, u, v, key):<EOL><INDENT>print(line, '<STR_LIT>', bel)<EOL><DEDENT>else:<EOL><INDENT>print(line, '<STR_LIT>', bel)<EOL><DEDENT><DEDENT>", "docstring": "Find edges that are A - A, meaning that some conditions in the edge best describe the interaction.", "id": "f9405:m10"}
{"signature": "def self_edge_filter(_: BELGraph, source: BaseEntity, target: BaseEntity, __: str) -> bool:", "body": "return source == target<EOL>", "docstring": "Check if the source and target nodes are the same.", "id": "f9405:m0"}
{"signature": "@edge_predicate<EOL>def has_translocation_increases_activity(data: Dict) -> bool:", "body": "return part_has_modifier(data, SUBJECT, TRANSLOCATION) and part_has_modifier(data, OBJECT, ACTIVITY)<EOL>", "docstring": "Check if the translocation of source causes activity of target.", "id": "f9405:m3"}
{"signature": "@edge_predicate<EOL>def has_degradation_increases_activity(data: Dict) -> bool:", "body": "return part_has_modifier(data, SUBJECT, DEGRADATION) and part_has_modifier(data, OBJECT, ACTIVITY)<EOL>", "docstring": "Check if the degradation of source causes activity of target.", "id": "f9405:m2"}
{"signature": "def compare(graph: BELGraph, annotation: str = '<STR_LIT>') -> Mapping[str, Mapping[str, float]]:", "body": "canonical_mechanisms = get_subgraphs_by_annotation(graph, annotation)<EOL>canonical_nodes = _transform_graph_dict_to_node_dict(canonical_mechanisms)<EOL>candidate_mechanisms = generate_bioprocess_mechanisms(graph)<EOL>candidate_nodes = _transform_graph_dict_to_node_dict(candidate_mechanisms)<EOL>results: Dict[str, Dict[str, float]] = defaultdict(dict)<EOL>it = itt.product(canonical_nodes.items(), candidate_nodes.items())<EOL>for (canonical_name, canonical_graph), (candidate_bp, candidate_graph) in it:<EOL><INDENT>tanimoto = tanimoto_set_similarity(candidate_nodes, canonical_nodes)<EOL>results[canonical_name][candidate_bp] = tanimoto<EOL><DEDENT>return dict(results)<EOL>", "docstring": "Compare generated mechanisms to actual ones.\n\n    1. Generates candidate mechanisms for each biological process\n    2. Gets sub-graphs for all NeuroMMSig signatures\n    3. Make tanimoto similarity comparison for all sets\n\n    :return: A dictionary table comparing the canonical subgraphs to generated ones", "id": "f9407:m0"}
{"signature": "def neurommsig_gene_ora(graph: BELGraph, genes: List[Gene]) -> float:", "body": "graph_genes = set(get_nodes_by_function(graph, GENE))<EOL>return len(graph_genes.intersection(genes)) / len(graph_genes)<EOL>", "docstring": "Calculate the percentage of target genes mappable to the graph.\n\n    Assume: graph central dogma inferred, collapsed to genes, collapsed variants", "id": "f9408:m3"}
{"signature": "def neurommsig_topology(graph: BELGraph, nodes: List[BaseEntity]) -> float:", "body": "nodes = list(nodes)<EOL>number_nodes = len(nodes)<EOL>if number_nodes <= <NUM_LIT:1>:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>unnormalized_sum = sum(<EOL>u in graph[v]<EOL>for u, v in itt.product(nodes, repeat=<NUM_LIT:2>)<EOL>if v in graph and u != v<EOL>)<EOL>return unnormalized_sum / (number_nodes * (number_nodes - <NUM_LIT:1.0>))<EOL>", "docstring": "Calculate the node neighbor score for a given list of nodes.\n\n    -  Doesn't consider self loops\n\n    .. math::\n\n         \\frac{\\sum_i^n N_G[i]}{n*(n-1)}", "id": "f9408:m5"}
{"signature": "def get_neurommsig_scores(graph: BELGraph,<EOL>genes: List[Gene],<EOL>annotation: str = '<STR_LIT>',<EOL>ora_weight: Optional[float] = None,<EOL>hub_weight: Optional[float] = None,<EOL>top_percent: Optional[float] = None,<EOL>topology_weight: Optional[float] = None,<EOL>preprocess: bool = False<EOL>) -> Optional[Mapping[str, float]]:", "body": "if preprocess:<EOL><INDENT>graph = neurommsig_graph_preprocessor.run(graph)<EOL><DEDENT>if not any(gene in graph for gene in genes):<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>return<EOL><DEDENT>subgraphs = get_subgraphs_by_annotation(graph, annotation=annotation)<EOL>return get_neurommsig_scores_prestratified(<EOL>subgraphs=subgraphs,<EOL>genes=genes,<EOL>ora_weight=ora_weight,<EOL>hub_weight=hub_weight,<EOL>top_percent=top_percent,<EOL>topology_weight=topology_weight,<EOL>)<EOL>", "docstring": "Preprocess the graph, stratify by the given annotation, then run the NeuroMMSig algorithm on each.\n\n    :param graph: A BEL graph\n    :param genes: A list of gene nodes\n    :param annotation: The annotation to use to stratify the graph to subgraphs\n    :param ora_weight: The relative weight of the over-enrichment analysis score from\n     :py:func:`neurommsig_gene_ora`. Defaults to 1.0.\n    :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.\n     Defaults to 1.0.\n    :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).\n    :param topology_weight: The relative weight of the topolgical analysis core from\n     :py:func:`neurommsig_topology`. Defaults to 1.0.\n    :param preprocess: If true, preprocess the graph.\n    :return: A dictionary from {annotation value: NeuroMMSig composite score}\n\n    Pre-processing steps:\n\n    1. Infer the central dogma with :func:``\n    2. Collapse all proteins, RNAs and miRNAs to genes with :func:``\n    3. Collapse variants to genes with :func:``", "id": "f9408:m0"}
{"signature": "def neurommsig_hubs(graph: BELGraph, genes: List[Gene], top_percent: Optional[float] = None) -> float:", "body": "top_percent = top_percent or <NUM_LIT><EOL>if graph.number_of_nodes() < <NUM_LIT:20>:<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>return <NUM_LIT:0.0><EOL><DEDENT>graph_genes = set(get_nodes_by_function(graph, GENE))<EOL>bc = Counter({<EOL>node: betweenness_centrality<EOL>for node, betweenness_centrality in calculate_betweenness_centality(graph).items()<EOL>if node in graph_genes<EOL>})<EOL>number_central_nodes = int(len(graph_genes) * top_percent)<EOL>if number_central_nodes < <NUM_LIT:1>:<EOL><INDENT>number_central_nodes = <NUM_LIT:1><EOL><DEDENT>number_mappable_central_nodes = sum(<EOL>node in genes<EOL>for node in bc.most_common(number_central_nodes)<EOL>)<EOL>return number_mappable_central_nodes / number_central_nodes<EOL>", "docstring": "Calculate the percentage of target genes mappable to the graph.\n\n    Assume: graph central dogma inferred, collapsed to genes, collapsed variants, graph has more than 20 nodes\n\n    :param graph: A BEL graph\n    :param genes: A list of nodes\n    :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).", "id": "f9408:m4"}
{"signature": "@click.command()<EOL>def main():", "body": "logging.basicConfig(level=logging.INFO)<EOL>log.setLevel(logging.INFO)<EOL>bms_base = get_bms_base()<EOL>neurommsig_base = get_neurommsig_base()<EOL>neurommsig_excel_dir = os.path.join(neurommsig_base, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>nift_values = get_nift_values()<EOL>log.info('<STR_LIT>')<EOL>ad_path = os.path.join(neurommsig_excel_dir, '<STR_LIT>', '<STR_LIT>')<EOL>ad_df = preprocess(ad_path)<EOL>with open(os.path.join(bms_base, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'), '<STR_LIT:w>') as ad_file:<EOL><INDENT>write_neurommsig_bel(ad_file, ad_df, mesh_alzheimer, nift_values)<EOL><DEDENT>log.info('<STR_LIT>')<EOL>pd_path = os.path.join(neurommsig_excel_dir, '<STR_LIT>', '<STR_LIT>')<EOL>pd_df = preprocess(pd_path)<EOL>with open(os.path.join(bms_base, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'), '<STR_LIT:w>') as pd_file:<EOL><INDENT>write_neurommsig_bel(pd_file, pd_df, mesh_parkinson, nift_values)<EOL><DEDENT>", "docstring": "Convert the Alzheimer's and Parkinson's disease NeuroMMSig excel sheets to BEL.", "id": "f9409:m0"}
{"signature": "def write_neurommsig_bel(file,<EOL>df: pd.DataFrame,<EOL>disease: str,<EOL>nift_values: Mapping[str, str],<EOL>):", "body": "write_neurommsig_biolerplate(disease, file)<EOL>missing_features = set()<EOL>fixed_caps = set()<EOL>nift_value_originals = set(nift_values.values())<EOL>graph = BELGraph(<EOL>name=f'<STR_LIT>',<EOL>description=f'<STR_LIT>',<EOL>authors='<STR_LIT>',<EOL>contact='<STR_LIT>',<EOL>version=time.strftime('<STR_LIT>'),<EOL>)<EOL>for pathway, pathway_df in df.groupby(pathway_column):<EOL><INDENT>sorted_pathway_df = pathway_df.sort_values(genes_column)<EOL>sliced_df = sorted_pathway_df[columns].itertuples()<EOL>for _, gene, pubmeds, lit_snps, gwas_snps, ld_block_snps, clinical_features, clinical_snps in sliced_df:<EOL><INDENT>gene = ensure_quotes(gene)<EOL>for snp in itt.chain(lit_snps or [], gwas_snps or [], ld_block_snps or [], clinical_snps or []):<EOL><INDENT>if not snp.strip():<EOL><INDENT>continue<EOL><DEDENT>graph.add_association(<EOL>Gene('<STR_LIT>', gene),<EOL>Gene('<STR_LIT>', snp),<EOL>evidence='<STR_LIT>',<EOL>citation='<STR_LIT>',<EOL>annotations={<EOL>'<STR_LIT>': disease,<EOL>},<EOL>)<EOL><DEDENT>for clinical_feature in clinical_features or []:<EOL><INDENT>if not clinical_feature.strip():<EOL><INDENT>continue<EOL><DEDENT>if clinical_feature.lower() not in nift_values:<EOL><INDENT>missing_features.add(clinical_feature)<EOL>continue<EOL><DEDENT>if clinical_feature not in nift_value_originals:<EOL><INDENT>fixed_caps.add((clinical_feature, nift_values[clinical_feature.lower()]))<EOL>clinical_feature = nift_values[clinical_feature.lower()]  <EOL><DEDENT>graph.add_association(<EOL>Gene('<STR_LIT>', gene),<EOL>Abundance('<STR_LIT>', clinical_feature),<EOL>evidence='<STR_LIT>',<EOL>citation='<STR_LIT>',<EOL>annotations={<EOL>'<STR_LIT>': disease,<EOL>},<EOL>)<EOL>if clinical_snps:<EOL><INDENT>for clinical_snp in clinical_snps:<EOL><INDENT>graph.add_association(<EOL>Gene('<STR_LIT>', clinical_snp),<EOL>Abundance('<STR_LIT>', clinical_feature),<EOL>evidence='<STR_LIT>',<EOL>citation='<STR_LIT>',<EOL>annotations={<EOL>'<STR_LIT>': disease,<EOL>},<EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if missing_features:<EOL><INDENT>log.warning('<STR_LIT>', disease)<EOL>for feature in missing_features:<EOL><INDENT>log.warning(feature)<EOL><DEDENT><DEDENT>if fixed_caps:<EOL><INDENT>log.warning('<STR_LIT>')<EOL>for broken, fixed in fixed_caps:<EOL><INDENT>log.warning('<STR_LIT>', broken, fixed)<EOL><DEDENT><DEDENT>", "docstring": "Writes the NeuroMMSigDB excel sheet to BEL\n\n    :param file: a file or file-like that can be writen to\n    :param df:\n    :param disease:\n    :param nift_values: a dictionary of lowercased to normal names in NIFT", "id": "f9410:m6"}
{"signature": "def munge_cell(cell, line=None, validators=None):", "body": "if pd.isnull(cell) or isinstance(cell, int):<EOL><INDENT>return None<EOL><DEDENT>c = '<STR_LIT:U+0020>'.join(cell.split())<EOL>if validators is not None and all(re.match(validator, c) is None for validator in validators):<EOL><INDENT>if line:<EOL><INDENT>log.info(\"<STR_LIT>\", line, c)<EOL><DEDENT>return None<EOL><DEDENT>return [x.strip() for x in str(c).strip().split('<STR_LIT:U+002C>')]<EOL>", "docstring": ":param cell:\n:param line:\n:param validators:\n:return:", "id": "f9410:m1"}
{"signature": "def get_dampened_pairs(graph: BELGraph) -> SetOfNodePairs:", "body": "cg = get_causal_subgraph(graph)<EOL>results = set()<EOL>for u, v, d in cg.edges(data=True):<EOL><INDENT>if d[RELATION] not in CAUSAL_DECREASE_RELATIONS:<EOL><INDENT>continue<EOL><DEDENT>if cg.has_edge(v, u) and any(dd[RELATION] in CAUSAL_DECREASE_RELATIONS for dd in cg[v][u].values()):<EOL><INDENT>results.add(tuple(sorted([u, v], key=str)))<EOL><DEDENT><DEDENT>return results<EOL>", "docstring": "Find pairs of nodes that have mutual causal edges that are decreasing each other such that ``A -| B`` and\n    ``B -| A``.\n\n    :return: A set of pairs of nodes with mutual causal edges", "id": "f9414:m3"}
{"signature": "def get_regulatory_pairs(graph: BELGraph) -> Set[NodePair]:", "body": "cg = get_causal_subgraph(graph)<EOL>results = set()<EOL>for u, v, d in cg.edges(data=True):<EOL><INDENT>if d[RELATION] not in CAUSAL_INCREASE_RELATIONS:<EOL><INDENT>continue<EOL><DEDENT>if cg.has_edge(v, u) and any(dd[RELATION] in CAUSAL_DECREASE_RELATIONS for dd in cg[v][u].values()):<EOL><INDENT>results.add((u, v))<EOL><DEDENT><DEDENT>return results<EOL>", "docstring": "Find pairs of nodes that have mutual causal edges that are regulating each other such that ``A -> B`` and\n    ``B -| A``.\n\n    :return: A set of pairs of nodes with mutual causal edges", "id": "f9414:m1"}
{"signature": "def get_chaotic_triplets(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "return _get_disregulated_triplets_helper(graph, CAUSAL_INCREASE_RELATIONS)<EOL>", "docstring": "Yield triples of nodes (A, B, C) that mutually increase each other, such as when ``A -> B``, ``B -> C``, and\n    ``C -> A``.", "id": "f9414:m16"}
{"signature": "def summarize_stability(graph: BELGraph) -> Mapping[str, int]:", "body": "regulatory_pairs = get_regulatory_pairs(graph)<EOL>chaotic_pairs = get_chaotic_pairs(graph)<EOL>dampened_pairs = get_dampened_pairs(graph)<EOL>contraditory_pairs = get_contradiction_summary(graph)<EOL>separately_unstable_triples = get_separate_unstable_correlation_triples(graph)<EOL>mutually_unstable_triples = get_mutually_unstable_correlation_triples(graph)<EOL>jens_unstable_triples = get_jens_unstable(graph)<EOL>increase_mismatch_triples = get_increase_mismatch_triplets(graph)<EOL>decrease_mismatch_triples = get_decrease_mismatch_triplets(graph)<EOL>chaotic_triples = get_chaotic_triplets(graph)<EOL>dampened_triples = get_dampened_triplets(graph)<EOL>return {<EOL>'<STR_LIT>': _count_or_len(regulatory_pairs),<EOL>'<STR_LIT>': _count_or_len(chaotic_pairs),<EOL>'<STR_LIT>': _count_or_len(dampened_pairs),<EOL>'<STR_LIT>': _count_or_len(contraditory_pairs),<EOL>'<STR_LIT>': _count_or_len(separately_unstable_triples),<EOL>'<STR_LIT>': _count_or_len(mutually_unstable_triples),<EOL>'<STR_LIT>': _count_or_len(jens_unstable_triples),<EOL>'<STR_LIT>': _count_or_len(increase_mismatch_triples),<EOL>'<STR_LIT>': _count_or_len(decrease_mismatch_triples),<EOL>'<STR_LIT>': _count_or_len(chaotic_triples),<EOL>'<STR_LIT>': _count_or_len(dampened_triples)<EOL>}<EOL>", "docstring": "Summarize the stability of the graph.", "id": "f9414:m18"}
{"signature": "def jens_transformation_beta(graph: BELGraph) -> DiGraph:", "body": "result = DiGraph()<EOL>for u, v, d in graph.edges(data=True):<EOL><INDENT>relation = d[RELATION]<EOL>if relation == NEGATIVE_CORRELATION:<EOL><INDENT>result.add_edge(u, v)<EOL>result.add_edge(v, u)<EOL><DEDENT>elif relation in CAUSAL_INCREASE_RELATIONS:<EOL><INDENT>result.add_edge(v, u)<EOL><DEDENT>elif relation in CAUSAL_DECREASE_RELATIONS:<EOL><INDENT>result.add_edge(u, v)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Apply Jens' Transformation (Type 2) to the graph.\n\n    1. Induce a sub-graph over causal and correlative relations\n    2. Transform edges with the following rules:\n        - increases => backwards decreases\n        - decreases => decreases\n        - positive correlation => delete\n        - negative correlation => two way decreases\n\n    The resulting graph can be used to search for 3-cycles, which now symbolize stable triples where ``A -> B``,\n    ``A -| C`` and ``B negativeCorrelation C``.", "id": "f9414:m10"}
{"signature": "def get_correlation_graph(graph: BELGraph) -> Graph:", "body": "result = Graph()<EOL>for u, v, d in graph.edges(data=True):<EOL><INDENT>if d[RELATION] not in CORRELATIVE_RELATIONS:<EOL><INDENT>continue<EOL><DEDENT>if not result.has_edge(u, v):<EOL><INDENT>result.add_edge(u, v, **{d[RELATION]: True})<EOL><DEDENT>elif d[RELATION] not in result[u][v]:<EOL><INDENT>log.log(<NUM_LIT:5>, '<STR_LIT>', u, v)<EOL>result[u][v][d[RELATION]] = True<EOL>result[v][u][d[RELATION]] = True<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Extract an undirected graph of only correlative relationships.", "id": "f9414:m4"}
{"signature": "def get_dampened_triplets(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "return _get_disregulated_triplets_helper(graph, CAUSAL_DECREASE_RELATIONS)<EOL>", "docstring": "Yield triples of nodes (A, B, C) that mutually decreases each other, such as when ``A -| B``,\n    ``B -| C``, and ``C -| A``.", "id": "f9414:m17"}
{"signature": "def jens_transformation_alpha(graph: BELGraph) -> DiGraph:", "body": "result = DiGraph()<EOL>for u, v, d in graph.edges(data=True):<EOL><INDENT>relation = d[RELATION]<EOL>if relation == POSITIVE_CORRELATION:<EOL><INDENT>result.add_edge(u, v)<EOL>result.add_edge(v, u)<EOL><DEDENT>elif relation in CAUSAL_INCREASE_RELATIONS:<EOL><INDENT>result.add_edge(u, v)<EOL><DEDENT>elif relation in CAUSAL_DECREASE_RELATIONS:<EOL><INDENT>result.add_edge(v, u)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Apply Jens' transformation (Type 1) to the graph.\n\n    1. Induce a sub-graph over causal + correlative edges\n    2. Transform edges by the following rules:\n        - increases => increases\n        - decreases => backwards increases\n        - positive correlation => two way increases\n        - negative correlation => delete\n\n    The resulting graph can be used to search for 3-cycles, which now symbolize unstable triplets where ``A -> B``,\n    ``A -| C`` and ``B positiveCorrelation C``.", "id": "f9414:m9"}
{"signature": "def get_triangles(graph: DiGraph) -> SetOfNodeTriples:", "body": "return {<EOL>tuple(sorted([a, b, c], key=str))<EOL>for a, b in graph.edges()<EOL>for c in graph.successors(b)<EOL>if graph.has_edge(c, a)<EOL>}<EOL>", "docstring": "Get a set of triples representing the 3-cycles from a directional graph.\n\n    Each 3-cycle is returned once, with nodes in sorted order.", "id": "f9414:m6"}
{"signature": "def get_contradiction_summary(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity, str]]:", "body": "for u, v in set(graph.edges()):<EOL><INDENT>relations = {data[RELATION] for data in graph[u][v].values()}<EOL>if relation_set_has_contradictions(relations):<EOL><INDENT>yield u, v, relations<EOL><DEDENT><DEDENT>", "docstring": "Yield triplets of (source node, target node, set of relations) for (source node, target node) pairs\n    that have multiple, contradictory relations.", "id": "f9414:m0"}
{"signature": "def get_correlation_triangles(graph: BELGraph) -> SetOfNodeTriples:", "body": "return {<EOL>tuple(sorted([n, u, v], key=str))<EOL>for n in graph<EOL>for u, v in itt.combinations(graph[n], <NUM_LIT:2>)<EOL>if graph.has_edge(u, v)<EOL>}<EOL>", "docstring": "Return a set of all triangles pointed by the given node.", "id": "f9414:m5"}
{"signature": "def get_jens_unstable(graph: BELGraph) -> Iterable[NodeTriple]:", "body": "r = jens_transformation_alpha(graph)<EOL>return get_triangles(r)<EOL>", "docstring": "Yield triples of nodes (A, B, C) where ``A -> B``, ``A -| C``, and ``C positiveCorrelation A``.\n\n    Calculated efficiently using the Jens Transformation.", "id": "f9414:m11"}
{"signature": "def calculate_concordance_probability_by_annotation(graph, annotation, key, cutoff=None, permutations=None,<EOL>percentage=None,<EOL>use_ambiguous=False):", "body": "result = [<EOL>(value, calculate_concordance_probability(<EOL>subgraph,<EOL>key,<EOL>cutoff=cutoff,<EOL>permutations=permutations,<EOL>percentage=percentage,<EOL>use_ambiguous=use_ambiguous,<EOL>))<EOL>for value, subgraph in get_subgraphs_by_annotation(graph, annotation).items()<EOL>]<EOL>return dict(result)<EOL>", "docstring": "Returns the results of concordance analysis on each subgraph, stratified by the given annotation.\n\n    :param pybel.BELGraph graph: A BEL graph\n    :param str annotation: The annotation to group by.\n    :param str key: The node data dictionary key storing the logFC\n    :param float cutoff: The optional logFC cutoff for significance\n    :param int permutations: The number of random permutations to test. Defaults to 500\n    :param float percentage: The percentage of the graph's edges to maintain. Defaults to 0.9\n    :param bool use_ambiguous: Compare to ambiguous edges as well\n    :rtype: dict[str,tuple]", "id": "f9416:m7"}
{"signature": "def get_cutoff(value: float, cutoff: Optional[float] = None) -> int:", "body": "cutoff = cutoff if cutoff is not None else <NUM_LIT:0><EOL>if value > cutoff:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>if value < (-<NUM_LIT:1> * cutoff):<EOL><INDENT>return - <NUM_LIT:1><EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Assign if a value is greater than or less than a cutoff.", "id": "f9416:m0"}
{"signature": "def bel_to_spia_matrices(graph: BELGraph) -> Mapping[str, pd.DataFrame]:", "body": "index_nodes = get_matrix_index(graph)<EOL>spia_matrices = build_spia_matrices(index_nodes)<EOL>for u, v, edge_data in graph.edges(data=True):<EOL><INDENT>if isinstance(u, CentralDogma) and isinstance(v, CentralDogma):<EOL><INDENT>update_spia_matrices(spia_matrices, u, v, edge_data)<EOL><DEDENT>elif isinstance(u, CentralDogma) and isinstance(v, ListAbundance):<EOL><INDENT>for node in v.members:<EOL><INDENT>if not isinstance(node, CentralDogma):<EOL><INDENT>continue<EOL><DEDENT>update_spia_matrices(spia_matrices, u, node, edge_data)<EOL><DEDENT><DEDENT>elif isinstance(u, ListAbundance) and isinstance(v, CentralDogma):<EOL><INDENT>for node in u.members:<EOL><INDENT>if not isinstance(node, CentralDogma):<EOL><INDENT>continue<EOL><DEDENT>update_spia_matrices(spia_matrices, node, v, edge_data)<EOL><DEDENT><DEDENT>elif isinstance(u, ListAbundance) and isinstance(v, ListAbundance):<EOL><INDENT>for sub_member, obj_member in product(u.members, v.members):<EOL><INDENT>if isinstance(sub_member, CentralDogma) and isinstance(obj_member, CentralDogma):<EOL><INDENT>update_spia_matrices(spia_matrices, sub_member, obj_member, edge_data)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return spia_matrices<EOL>", "docstring": "Create an excel sheet ready to be used in SPIA software.\n\n    :param graph: BELGraph\n    :return: dictionary with matrices", "id": "f9417:m0"}
{"signature": "def build_spia_matrices(nodes: Set[str]) -> Dict[str, pd.DataFrame]:", "body": "nodes = list(sorted(nodes))<EOL>matrices = OrderedDict()<EOL>for relation in KEGG_RELATIONS:<EOL><INDENT>matrices[relation] = pd.DataFrame(<NUM_LIT:0>, index=nodes, columns=nodes)<EOL><DEDENT>return matrices<EOL>", "docstring": "Build an adjacency matrix for each KEGG relationship and return in a dictionary.\n\n    :param nodes: A set of HGNC gene symbols\n    :return: Dictionary of adjacency matrix for each relationship", "id": "f9417:m2"}
{"signature": "@main.command()<EOL>@graph_pickle_argument<EOL>@directory_option<EOL>def run(graph, directory):", "body": "run_epicom(graph, directory)<EOL>", "docstring": "Run on an arbitrary graph.", "id": "f9419:m1"}
{"signature": "@click.group()<EOL>def main():", "body": "", "docstring": "Run EpiCom Reloaded.", "id": "f9419:m0"}
{"signature": "@main.command()<EOL>@click.option('<STR_LIT>', '<STR_LIT>', type=click.File('<STR_LIT:w>'), default=sys.stdout)<EOL>def multi(output):", "body": "graphs = [<EOL>get_ad_graph(),<EOL>get_ep_graph(),<EOL>get_pd_graph(),<EOL>]<EOL>multi_run_epicom(graphs, output)<EOL>", "docstring": "Run on all graphs.", "id": "f9419:m5"}
{"signature": "def build_database(manager: pybel.Manager, annotation_url: Optional[str] = None) -> None:", "body": "annotation_url = annotation_url or NEUROMMSIG_DEFAULT_URL<EOL>annotation = manager.get_namespace_by_url(annotation_url)<EOL>if annotation is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>networks = get_networks_using_annotation(manager, annotation)<EOL>dtis = ...<EOL>for network in networks:<EOL><INDENT>graph = network.as_bel()<EOL>scores = epicom_on_graph(graph, dtis)<EOL>for (drug_name, subgraph_name), score in scores.items():<EOL><INDENT>drug_model = get_drug_model(manager, drug_name)<EOL>subgraph_model = manager.get_annotation_entry(annotation_url, subgraph_name)<EOL>score_model = Score(<EOL>network=network,<EOL>annotation=subgraph_model,<EOL>drug=drug_model,<EOL>score=score<EOL>)<EOL>manager.session.add(score_model)<EOL><DEDENT><DEDENT>t = time.time()<EOL>logger.info('<STR_LIT>')<EOL>manager.session.commit()<EOL>logger.info('<STR_LIT>', time.time() - t)<EOL>", "docstring": "Build a database of scores for NeuroMMSig annotated graphs.\n\n    1. Get all networks that use the Subgraph annotation\n    2. run on each", "id": "f9420:m2"}
{"signature": "def get_drug_model(manager: pybel.Manager, name: str):", "body": "raise NotImplementedError<EOL>", "docstring": ":param pybel.manager.Manager manager:\n:param str name:\n:return: pybel.manager.models.NamespaceEntry", "id": "f9420:m1"}
{"signature": "def rank_edges(edges, edge_ranking=None):", "body": "edge_ranking = default_edge_ranking if edge_ranking is None else edge_ranking<EOL>edges_scores = [<EOL>(edge_id, edge_data[RELATION], edge_ranking[edge_data[RELATION]])<EOL>for edge_id, edge_data in edges.items()<EOL>]<EOL>return max(edges_scores, key=itemgetter(<NUM_LIT:2>))<EOL>", "docstring": "Return the highest ranked edge from a multiedge.\n\n    :param dict edges: dictionary with all edges between two nodes\n    :param dict edge_ranking: A dictionary of {relationship: score}\n    :return: Highest ranked edge\n    :rtype: tuple: (edge id, relation, score given ranking)", "id": "f9426:m3"}
{"signature": "def get_path_effect(graph, path, relationship_dict):", "body": "causal_effect = []<EOL>for predecessor, successor in pairwise(path):<EOL><INDENT>if pair_has_contradiction(graph, predecessor, successor):<EOL><INDENT>return Effect.ambiguous<EOL><DEDENT>edges = graph.get_edge_data(predecessor, successor)<EOL>edge_key, edge_relation, _ = rank_edges(edges)<EOL>relation = graph[predecessor][successor][edge_key][RELATION]<EOL>if relation not in relationship_dict or relationship_dict[relation] == <NUM_LIT:0>:<EOL><INDENT>return Effect.no_effect<EOL><DEDENT>causal_effect.append(relationship_dict[relation])<EOL><DEDENT>final_effect = reduce(lambda x, y: x * y, causal_effect)<EOL>return Effect.activation if final_effect == <NUM_LIT:1> else Effect.inhibition<EOL>", "docstring": "Calculate the final effect of the root node to the sink node in the path.\n\n    :param pybel.BELGraph graph: A BEL graph\n    :param list path: Path from root to sink node\n    :param dict relationship_dict: dictionary with relationship effects\n    :rtype: Effect", "id": "f9426:m2"}
{"signature": "def get_random_edge(self):", "body": "nodes = [<EOL>(n, self.in_out_ratio(n))<EOL>for n in self.unscored_nodes_iter()<EOL>if n != self.target_node<EOL>]<EOL>node, deg = min(nodes, key=itemgetter(<NUM_LIT:1>))<EOL>log.log(<NUM_LIT:5>, '<STR_LIT>', node, deg)<EOL>possible_edges = self.graph.in_edges(node, keys=True)<EOL>log.log(<NUM_LIT:5>, '<STR_LIT>', possible_edges)<EOL>edge_to_remove = random.choice(possible_edges)<EOL>log.log(<NUM_LIT:5>, '<STR_LIT>', edge_to_remove)<EOL>return edge_to_remove<EOL>", "docstring": "This function should be run when there are no leaves, but there are still unscored nodes. It will introduce\n        a probabilistic element to the algorithm, where some edges are disregarded randomly to eventually get a score\n        for the network. This means that the score can be averaged over many runs for a given graph, and a better\n        data structure will have to be later developed that doesn't destroy the graph (instead, annotates which edges\n        have been disregarded, later)\n\n           1. get all un-scored\n           2. rank by in-degree\n           3. weighted probability over all in-edges where lower in-degree means higher probability\n           4. pick randomly which edge\n\n        :return: A random in-edge to the lowest in/out degree ratio node. This is a 3-tuple of (node, node, key)\n        :rtype: tuple", "id": "f9429:c0:m5"}
{"signature": "def multirun(graph: BELGraph,<EOL>node: BaseEntity,<EOL>key: Optional[str] = None,<EOL>tag: Optional[str] = None,<EOL>default_score: Optional[float] = None,<EOL>runs: Optional[int] = None,<EOL>use_tqdm: bool = False,<EOL>) -> Iterable['<STR_LIT>']:", "body": "if runs is None:<EOL><INDENT>runs = <NUM_LIT:100><EOL><DEDENT>it = range(runs)<EOL>if use_tqdm:<EOL><INDENT>it = tqdm(it, total=runs)<EOL><DEDENT>for i in it:<EOL><INDENT>try:<EOL><INDENT>runner = Runner(graph, node, key=key, tag=tag, default_score=default_score)<EOL>runner.run()<EOL>yield runner<EOL><DEDENT>except Exception:<EOL><INDENT>log.debug('<STR_LIT>', i, node)<EOL><DEDENT><DEDENT>", "docstring": "Run the heat diffusion workflow multiple times, each time yielding a :class:`Runner` object upon completion.\n\n    :param graph: A BEL graph\n    :param node: The BEL node that is the focus of this analysis\n    :param key: The key in the node data dictionary representing the experimental data. Defaults to\n     :data:`pybel_tools.constants.WEIGHT`.\n    :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n    :param default_score: The initial score for all nodes. This number can go up or down.\n    :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n    :param use_tqdm: Should there be a progress bar for runners?\n    :return: An iterable over the runners after each iteration", "id": "f9429:m3"}
{"signature": "def calculate_average_score_by_annotation(<EOL>graph: BELGraph,<EOL>annotation: str,<EOL>key: Optional[str] = None,<EOL>runs: Optional[int] = None,<EOL>use_tqdm: bool = False,<EOL>) -> Mapping[str, float]:", "body": "candidate_mechanisms = generate_bioprocess_mechanisms(graph, key=key)<EOL>scores: Mapping[BaseEntity, Tuple] = calculate_average_scores_on_subgraphs(<EOL>subgraphs=candidate_mechanisms,<EOL>key=key,<EOL>runs=runs,<EOL>use_tqdm=use_tqdm,<EOL>)<EOL>subgraph_bp: Mapping[str, List[BaseEntity]] = defaultdict(list)<EOL>subgraphs: Mapping[str, BELGraph] = get_subgraphs_by_annotation(graph, annotation)<EOL>for annotation_value, subgraph in subgraphs.items():<EOL><INDENT>subgraph_bp[annotation_value].extend(get_nodes_by_function(subgraph, BIOPROCESS))<EOL><DEDENT>return {<EOL>annotation_value: np.average(scores[bp][<NUM_LIT:0>] for bp in bps)<EOL>for annotation_value, bps in subgraph_bp.items()<EOL>}<EOL>", "docstring": "For each sub-graph induced over the edges matching the annotation, calculate the average score\n    for all of the contained biological processes\n\n    Assumes you haven't done anything yet\n\n    1. Generates biological process upstream candidate mechanistic sub-graphs with\n       :func:`generate_bioprocess_mechanisms`\n    2. Calculates scores for each sub-graph with :func:`calculate_average_scores_on_sub-graphs`\n    3. Overlays data with pbt.integration.overlay_data\n    4. Calculates averages with pbt.selection.group_nodes.average_node_annotation\n\n    :param graph: A BEL graph\n    :param annotation: A BEL annotation\n    :param key: The key in the node data dictionary representing the experimental data. Defaults to\n     :data:`pybel_tools.constants.WEIGHT`.\n    :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n    :param use_tqdm: Should there be a progress bar for runners?\n    :return: A dictionary from {str annotation value: tuple scores}\n\n    Example Usage:\n\n    >>> import pybel\n    >>> from pybel_tools.integration import overlay_data\n    >>> from pybel_tools.analysis.heat import calculate_average_score_by_annotation\n    >>> graph = pybel.from_path(...)\n    >>> scores = calculate_average_score_by_annotation(graph, 'subgraph')", "id": "f9429:m7"}
{"signature": "def get_remaining_graph(self) -> BELGraph:", "body": "return self.graph.subgraph(self.unscored_nodes_iter())<EOL>", "docstring": "Allows for introspection on the algorithm at a given point by returning the sub-graph induced\n        by all unscored nodes\n\n        :return: The remaining un-scored BEL graph", "id": "f9429:c0:m14"}
{"signature": "def run_with_graph_transformation(self) -> Iterable[BELGraph]:", "body": "yield self.get_remaining_graph()<EOL>while not self.done_chomping():<EOL><INDENT>while not list(self.iter_leaves()):<EOL><INDENT>self.remove_random_edge()<EOL>yield self.get_remaining_graph()<EOL><DEDENT>self.score_leaves()<EOL>yield self.get_remaining_graph()<EOL><DEDENT>", "docstring": "Calculate scores for all leaves until there are none, removes edges until there are, and repeats until\n        all nodes have been scored. Also, yields the current graph at every step so you can make a cool animation\n        of how the graph changes throughout the course of the algorithm\n\n        :return: An iterable of BEL graphs", "id": "f9429:c0:m10"}
{"signature": "def calculate_average_scores_on_subgraphs(<EOL>subgraphs: Mapping[H, BELGraph],<EOL>key: Optional[str] = None,<EOL>tag: Optional[str] = None,<EOL>default_score: Optional[float] = None,<EOL>runs: Optional[int] = None,<EOL>use_tqdm: bool = False,<EOL>tqdm_kwargs: Optional[Mapping[str, Any]] = None,<EOL>) -> Mapping[H, Tuple[float, float, float, float, int, int]]:", "body": "results = {}<EOL>log.info('<STR_LIT>', len(subgraphs), runs)<EOL>it = subgraphs.items()<EOL>if use_tqdm:<EOL><INDENT>_tqdm_kwargs = dict(total=len(subgraphs), desc='<STR_LIT>')<EOL>if tqdm_kwargs:<EOL><INDENT>_tqdm_kwargs.update(tqdm_kwargs)<EOL><DEDENT>it = tqdm(it, **_tqdm_kwargs)<EOL><DEDENT>for node, subgraph in it:<EOL><INDENT>number_first_neighbors = subgraph.in_degree(node)<EOL>number_first_neighbors = <NUM_LIT:0> if isinstance(number_first_neighbors, dict) else number_first_neighbors<EOL>mechanism_size = subgraph.number_of_nodes()<EOL>runners = workflow(subgraph, node, key=key, tag=tag, default_score=default_score, runs=runs)<EOL>scores = [runner.get_final_score() for runner in runners]<EOL>if <NUM_LIT:0> == len(scores):<EOL><INDENT>results[node] = (<EOL>None,<EOL>None,<EOL>None,<EOL>None,<EOL>number_first_neighbors,<EOL>mechanism_size,<EOL>)<EOL>continue<EOL><DEDENT>scores = np.array(scores)<EOL>average_score = np.average(scores)<EOL>score_std = np.std(scores)<EOL>med_score = np.median(scores)<EOL>chi_2_stat, norm_p = stats.normaltest(scores)<EOL>results[node] = (<EOL>average_score,<EOL>score_std,<EOL>norm_p,<EOL>med_score,<EOL>number_first_neighbors,<EOL>mechanism_size,<EOL>)<EOL><DEDENT>return results<EOL>", "docstring": "Calculate the scores over precomputed candidate mechanisms.\n\n    :param subgraphs: A dictionary of keys to their corresponding subgraphs\n    :param key: The key in the node data dictionary representing the experimental data. Defaults to\n     :data:`pybel_tools.constants.WEIGHT`.\n    :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n    :param default_score: The initial score for all nodes. This number can go up or down.\n    :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n    :param use_tqdm: Should there be a progress bar for runners?\n    :return: A dictionary of keys to results tuples\n\n    Example Usage:\n\n    >>> import pandas as pd\n    >>> from pybel_tools.generation import generate_bioprocess_mechanisms\n    >>> from pybel_tools.analysis.heat import calculate_average_scores_on_subgraphs\n    >>> # load graph and data\n    >>> graph = ...\n    >>> candidate_mechanisms = generate_bioprocess_mechanisms(graph)\n    >>> scores = calculate_average_scores_on_subgraphs(candidate_mechanisms)\n    >>> pd.DataFrame.from_items(scores.items(), orient='index', columns=RESULT_LABELS)", "id": "f9429:m1"}
{"signature": "def in_out_ratio(self, node: BaseEntity) -> float:", "body": "return self.graph.in_degree(node) / float(self.graph.out_degree(node))<EOL>", "docstring": "Calculate the ratio of in-degree / out-degree of a node.", "id": "f9429:c0:m3"}
{"signature": "def score_leaves(self) -> Set[BaseEntity]:", "body": "leaves = set(self.iter_leaves())<EOL>if not leaves:<EOL><INDENT>log.warning('<STR_LIT>')<EOL>return set()<EOL><DEDENT>for leaf in leaves:<EOL><INDENT>self.graph.nodes[leaf][self.tag] = self.calculate_score(leaf)<EOL>log.log(<NUM_LIT:5>, '<STR_LIT>', leaf)<EOL><DEDENT>return leaves<EOL>", "docstring": "Calculate the score for all leaves.\n\n        :return: The set of leaf nodes that were scored", "id": "f9429:c0:m8"}
{"signature": "def __init__(self,<EOL>graph: BELGraph,<EOL>target_node: BaseEntity,<EOL>key: Optional[str] = None,<EOL>tag: Optional[str] = None,<EOL>default_score: Optional[float] = None,<EOL>) -> None:", "body": "self.graph: BELGraph = graph.copy()<EOL>self.target_node = target_node<EOL>self.key = key or WEIGHT<EOL>self.default_score = default_score or DEFAULT_SCORE<EOL>self.tag = tag or SCORE<EOL>for node, data in self.graph.nodes(data=True):<EOL><INDENT>if not self.graph.predecessors(node):<EOL><INDENT>self.graph.nodes[node][self.tag] = data.get(self.key, <NUM_LIT:0>)<EOL>log.log(<NUM_LIT:5>, '<STR_LIT>', target_node, self.graph.nodes[node][self.tag])<EOL><DEDENT><DEDENT>", "docstring": "Initialize the heat diffusion runner class.\n\n        :param graph: A BEL graph\n        :param target_node: The BEL node that is the focus of this analysis\n        :param key: The key in the node data dictionary representing the experimental data. Defaults to\n         :data:`pybel_tools.constants.WEIGHT`.\n        :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n        :param default_score: The initial score for all nodes. This number can go up or down.", "id": "f9429:c0:m0"}
{"signature": "def workflow_all(graph: BELGraph,<EOL>key: Optional[str] = None,<EOL>tag: Optional[str] = None,<EOL>default_score: Optional[float] = None,<EOL>runs: Optional[int] = None,<EOL>) -> Mapping[BaseEntity, List[Runner]]:", "body": "results = {}<EOL>for node in get_nodes_by_function(graph, BIOPROCESS):<EOL><INDENT>results[node] = workflow(graph, node, key=key, tag=tag, default_score=default_score, runs=runs)<EOL><DEDENT>return results<EOL>", "docstring": "Run the heat diffusion workflow and get runners for every possible candidate mechanism\n\n    1. Get all biological processes\n    2. Get candidate mechanism induced two level back from each biological process\n    3. Heat diffusion workflow for each candidate mechanism for multiple runs\n    4. Return all runner results\n\n    :param graph: A BEL graph\n    :param key: The key in the node data dictionary representing the experimental data. Defaults to\n     :data:`pybel_tools.constants.WEIGHT`.\n    :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n    :param default_score: The initial score for all nodes. This number can go up or down.\n    :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n    :return: A dictionary of {node: list of runners}", "id": "f9429:m5"}
{"signature": "def calculate_average_scores_on_graph(<EOL>graph: BELGraph,<EOL>key: Optional[str] = None,<EOL>tag: Optional[str] = None,<EOL>default_score: Optional[float] = None,<EOL>runs: Optional[int] = None,<EOL>use_tqdm: bool = False,<EOL>):", "body": "subgraphs = generate_bioprocess_mechanisms(graph, key=key)<EOL>scores = calculate_average_scores_on_subgraphs(<EOL>subgraphs,<EOL>key=key,<EOL>tag=tag,<EOL>default_score=default_score,<EOL>runs=runs,<EOL>use_tqdm=use_tqdm<EOL>)<EOL>return scores<EOL>", "docstring": "Calculate the scores over all biological processes in the sub-graph.\n\n    As an implementation, it simply computes the sub-graphs then calls :func:`calculate_average_scores_on_subgraphs` as\n    described in that function's documentation.\n\n    :param graph: A BEL graph with heats already on the nodes\n    :param key: The key in the node data dictionary representing the experimental data. Defaults to\n     :data:`pybel_tools.constants.WEIGHT`.\n    :param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'\n    :param default_score: The initial score for all nodes. This number can go up or down.\n    :param runs: The number of times to run the heat diffusion workflow. Defaults to 100.\n    :param use_tqdm: Should there be a progress bar for runners?\n    :return: A dictionary of {pybel node tuple: results tuple}\n    :rtype: dict[tuple, tuple]\n\n    Suggested usage with :mod:`pandas`:\n\n    >>> import pandas as pd\n    >>> from pybel_tools.analysis.heat import calculate_average_scores_on_graph\n    >>> graph = ...  # load graph and data\n    >>> scores = calculate_average_scores_on_graph(graph)\n    >>> pd.DataFrame.from_items(scores.items(), orient='index', columns=RESULT_LABELS)", "id": "f9429:m0"}
{"signature": "def done_chomping(self) -> bool:", "body": "return self.tag in self.graph.nodes[self.target_node]<EOL>", "docstring": "Determines if the algorithm is complete by checking if the target node of this analysis has been scored\n        yet. Because the algorithm removes edges when it gets stuck until it is un-stuck, it is always guaranteed to\n        finish.\n\n        :return: Is the algorithm done running?", "id": "f9429:c0:m11"}
{"signature": "def tearDown(self):", "body": "if os.path.exists(self.path):<EOL><INDENT>os.remove(self.path)<EOL><DEDENT>", "docstring": "Called after each test to remove the file", "id": "f9440:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def with_data(path, data):<DEDENT>", "body": "<EOL>if isinstance(data, str):<EOL><INDENT>data = json.loads(data)<EOL><DEDENT>if os.path.exists(path):<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>f = File(path)<EOL>f.data = data<EOL>return f<EOL><DEDENT>", "docstring": "Initialize a new file that starts out with some data. Pass data\n        as a list, dict, or JSON string.", "id": "f9441:c7:m1"}
{"signature": "def _initfile(path, data=\"<STR_LIT>\"):", "body": "data = {} if data.lower() == \"<STR_LIT>\" else []<EOL>if not os.path.exists(path):  <EOL><INDENT>dirname = os.path.dirname(path)<EOL>if dirname and not os.path.exists(dirname):<EOL><INDENT>raise IOError(<EOL>(\"<STR_LIT>\"<EOL>\"<STR_LIT>\").format(os.path.dirname(path))<EOL>)<EOL><DEDENT>with open(path, \"<STR_LIT:w>\") as f:<EOL><INDENT>json.dump(data, f)<EOL><DEDENT>return True<EOL><DEDENT>elif os.path.getsize(path) == <NUM_LIT:0>:  <EOL><INDENT>with open(path, \"<STR_LIT:w>\") as f:<EOL><INDENT>json.dump(data, f)<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Initialize an empty JSON file.", "id": "f9441:m0"}
{"signature": "@property<EOL><INDENT>def is_caching(self):<DEDENT>", "body": "return hasattr(self, \"<STR_LIT>\")<EOL>", "docstring": "Returns a boolean value describing whether a grouped write is\n        underway.", "id": "f9441:c4:m10"}
{"signature": "def get_reference_data(<EOL>self,<EOL>modified_since: Optional[datetime.datetime] = None<EOL>) -> GetReferenceDataResponse:", "body": "if modified_since is None:<EOL><INDENT>modified_since = datetime.datetime(year=<NUM_LIT>, month=<NUM_LIT:1>, day=<NUM_LIT:1>)<EOL><DEDENT>response = requests.get(<EOL>'<STR_LIT>'.format(API_URL_BASE),<EOL>headers={<EOL>'<STR_LIT>': self._format_dt(modified_since),<EOL>**self._get_headers(),<EOL>},<EOL>timeout=self._timeout,<EOL>)<EOL>if not response.ok:<EOL><INDENT>raise FuelCheckError.create(response)<EOL><DEDENT>return GetReferenceDataResponse.deserialize(response.json())<EOL>", "docstring": "Fetches API reference data.\n\n:param modified_since: The response will be empty if no\nchanges have been made to the reference data since this\ntimestamp, otherwise all reference data will be returned.", "id": "f9446:c0:m7"}
{"signature": "def get_fuel_prices_for_station(<EOL>self,<EOL>station: int<EOL>) -> List[Price]:", "body": "response = requests.get(<EOL>'<STR_LIT>'.format(API_URL_BASE, station),<EOL>headers=self._get_headers(),<EOL>timeout=self._timeout,<EOL>)<EOL>if not response.ok:<EOL><INDENT>raise FuelCheckError.create(response)<EOL><DEDENT>data = response.json()<EOL>return [Price.deserialize(data) for data in data['<STR_LIT>']]<EOL>", "docstring": "Gets the fuel prices for a specific fuel station.", "id": "f9446:c0:m4"}
{"signature": "def data_decorator(cls):", "body": "def generate_test_func(name, original_function, num, params):<EOL><INDENT>if original_function._provider_name_suffix:<EOL><INDENT>data_name = params[<NUM_LIT:0>]<EOL>params = params[<NUM_LIT:1>:]<EOL><DEDENT>else:<EOL><INDENT>data_name = num<EOL><DEDENT>expanded_name = '<STR_LIT>' % (name, data_name)<EOL>def generated_test_function(self):<EOL><INDENT>original_function(self, *params)<EOL><DEDENT>setattr(cls, expanded_name, generated_test_function)<EOL><DEDENT>for name in dir(cls):<EOL><INDENT>func = getattr(cls, name)<EOL>if hasattr(func, '<STR_LIT>'):<EOL><INDENT>num = <NUM_LIT:1><EOL>for params in getattr(cls, func._provider_method)():<EOL><INDENT>generate_test_func(name, func, num, params)<EOL>num += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return cls<EOL>", "docstring": "A class decorator that works with the @provider decorator to generate test\nmethod from a data provider", "id": "f9453:m1"}
{"signature": "def close(self):", "body": "if not self.socket:<EOL><INDENT>return<EOL><DEDENT>self.socket.close()<EOL>self.socket = None<EOL>", "docstring": "Closes any open connection", "id": "f9454:c2:m1"}
{"signature": "def ensure_connected(self):", "body": "if self.socket:<EOL><INDENT>return True<EOL><DEDENT>host, port = self.url_info<EOL>session = tls.TLSSession()<EOL>self.socket = tls.TLSSocket(host, port, timeout=self.timeout, session=session)<EOL>return False<EOL>", "docstring": "Make sure a valid tls.TLSSocket() is open to the server\n\n:return:\n    A boolean indicating if the connection was reused", "id": "f9454:c2:m4"}
{"signature": "def run():", "body": "setup_file = os.path.join(package_root, '<STR_LIT>')<EOL>git_wc_proc = subprocess.Popen(<EOL>['<STR_LIT>', '<STR_LIT:status>', '<STR_LIT>', '<STR_LIT>'],<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.STDOUT,<EOL>cwd=package_root<EOL>)<EOL>git_wc_status, _ = git_wc_proc.communicate()<EOL>if len(git_wc_status) > <NUM_LIT:0>:<EOL><INDENT>print(git_wc_status.decode('<STR_LIT:utf-8>').rstrip(), file=sys.stderr)<EOL>print('<STR_LIT>', file=sys.stderr)<EOL>return False<EOL><DEDENT>git_tag_proc = subprocess.Popen(<EOL>['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'],<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE,<EOL>cwd=package_root<EOL>)<EOL>tag, tag_error = git_tag_proc.communicate()<EOL>if len(tag_error) > <NUM_LIT:0>:<EOL><INDENT>print(tag_error.decode('<STR_LIT:utf-8>').rstrip(), file=sys.stderr)<EOL>print('<STR_LIT>', file=sys.stderr)<EOL>return False<EOL><DEDENT>if len(tag) == <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>', file=sys.stderr)<EOL>return False<EOL><DEDENT>tag = tag.decode('<STR_LIT:ascii>').strip()<EOL>setuptools.sandbox.run_setup(<EOL>setup_file,<EOL>['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>)<EOL>twine.cli.dispatch(['<STR_LIT>', '<STR_LIT>' % (package_name, tag)])<EOL>setuptools.sandbox.run_setup(<EOL>setup_file,<EOL>['<STR_LIT>']<EOL>)<EOL>", "docstring": "Creates a sdist .tar.gz and a bdist_wheel --univeral .whl and uploads\nthem to pypi\n\n:return:\n    A bool - if the packaging and upload process was successful", "id": "f9464:m0"}
{"signature": "def run():", "body": "print('<STR_LIT>')<EOL>md_files = []<EOL>for root, _, filenames in os.walk(os.path.join(package_root, '<STR_LIT>')):<EOL><INDENT>for filename in filenames:<EOL><INDENT>if not filename.endswith('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>md_files.append(os.path.join(root, filename))<EOL><DEDENT><DEDENT>parser = CommonMark.Parser()<EOL>for md_file in md_files:<EOL><INDENT>md_file_relative = md_file[len(package_root) + <NUM_LIT:1>:]<EOL>if md_file_relative in md_source_map:<EOL><INDENT>py_files = md_source_map[md_file_relative]<EOL>py_paths = [os.path.join(package_root, py_file) for py_file in py_files]<EOL><DEDENT>else:<EOL><INDENT>py_files = [os.path.basename(md_file).replace('<STR_LIT>', '<STR_LIT>')]<EOL>py_paths = [os.path.join(package_root, package_name, py_files[<NUM_LIT:0>])]<EOL>if not os.path.exists(py_paths[<NUM_LIT:0>]):<EOL><INDENT>continue<EOL><DEDENT><DEDENT>with open(md_file, '<STR_LIT:rb>') as f:<EOL><INDENT>markdown = f.read().decode('<STR_LIT:utf-8>')<EOL><DEDENT>original_markdown = markdown<EOL>md_lines = list(markdown.splitlines())<EOL>md_ast = parser.parse(markdown)<EOL>last_class = []<EOL>last = {}<EOL>sections = OrderedDict()<EOL>find_sections(md_ast, sections, last, last_class, markdown.count(\"<STR_LIT:\\n>\") + <NUM_LIT:1>)<EOL>md_chunks = {}<EOL>for index, py_file in enumerate(py_files):<EOL><INDENT>py_path = py_paths[index]<EOL>with open(os.path.join(py_path), '<STR_LIT:rb>') as f:<EOL><INDENT>code = f.read().decode('<STR_LIT:utf-8>')<EOL>module_ast = ast.parse(code, filename=py_file)<EOL>code_lines = list(code.splitlines())<EOL><DEDENT>for node in ast.iter_child_nodes(module_ast):<EOL><INDENT>walk_ast(node, code_lines, sections, md_chunks)<EOL><DEDENT><DEDENT>added_lines = <NUM_LIT:0><EOL>def _replace_md(key, sections, md_chunk, md_lines, added_lines):<EOL><INDENT>start, end = sections[key]<EOL>start -= <NUM_LIT:1><EOL>start += added_lines<EOL>end += added_lines<EOL>new_lines = md_chunk.split('<STR_LIT:\\n>')<EOL>added_lines += len(new_lines) - (end - start)<EOL>if start > <NUM_LIT:0> and md_lines[start][<NUM_LIT:0>:<NUM_LIT:4>] == '<STR_LIT>' and md_lines[start - <NUM_LIT:1>][<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT:>>':<EOL><INDENT>added_lines += <NUM_LIT:1><EOL>new_lines.insert(<NUM_LIT:0>, '<STR_LIT>')<EOL><DEDENT>md_lines[start:end] = new_lines<EOL>return added_lines<EOL><DEDENT>for key in sections:<EOL><INDENT>if key not in md_chunks:<EOL><INDENT>raise ValueError('<STR_LIT>' % key[<NUM_LIT:1>])<EOL><DEDENT>added_lines = _replace_md(key, sections, md_chunks[key], md_lines, added_lines)<EOL><DEDENT>markdown = '<STR_LIT:\\n>'.join(md_lines).strip() + '<STR_LIT:\\n>'<EOL>if original_markdown != markdown:<EOL><INDENT>with open(md_file, '<STR_LIT:wb>') as f:<EOL><INDENT>f.write(markdown.encode('<STR_LIT:utf-8>'))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Looks through the docs/ dir and parses each markdown document, looking for\nsections to update from Python docstrings. Looks for section headers in\nthe format:\n\n - ### `ClassName()` class\n - ##### `.method_name()` method\n - ##### `.attribute_name` attribute\n - ### `function_name()` function\n\nThe markdown content following these section headers up until the next\nsection header will be replaced by new markdown generated from the Python\ndocstrings of the associated source files.\n\nBy default maps docs/{name}.md to {modulename}/{name}.py. Allows for\ncustom mapping via the md_source_map variable.", "id": "f9465:m3"}
{"signature": "def _find_sections(md_ast, sections, last, last_class, total_lines=None):", "body": "def child_walker(node):<EOL><INDENT>for child, entering in node.walker():<EOL><INDENT>if child == node:<EOL><INDENT>continue<EOL><DEDENT>yield child, entering<EOL><DEDENT><DEDENT>for child, entering in child_walker(md_ast):<EOL><INDENT>if child.t == '<STR_LIT>':<EOL><INDENT>start_line = child.sourcepos[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>if child.level == <NUM_LIT:2>:<EOL><INDENT>if last:<EOL><INDENT>sections[(last['<STR_LIT>'], last['<STR_LIT>'])] = (last['<STR_LIT>'], start_line - <NUM_LIT:1>)<EOL>last.clear()<EOL><DEDENT><DEDENT>if child.level in set([<NUM_LIT:3>, <NUM_LIT:5>]):<EOL><INDENT>heading_elements = []<EOL>for heading_child, _ in child_walker(child):<EOL><INDENT>heading_elements.append(heading_child)<EOL><DEDENT>if len(heading_elements) != <NUM_LIT:2>:<EOL><INDENT>continue<EOL><DEDENT>first = heading_elements[<NUM_LIT:0>]<EOL>second = heading_elements[<NUM_LIT:1>]<EOL>if first.t != '<STR_LIT:code>':<EOL><INDENT>continue<EOL><DEDENT>if second.t != '<STR_LIT:text>':<EOL><INDENT>continue<EOL><DEDENT>type_name = second.literal.strip()<EOL>identifier = first.literal.strip().replace('<STR_LIT>', '<STR_LIT>').lstrip('<STR_LIT:.>')<EOL>if last:<EOL><INDENT>sections[(last['<STR_LIT>'], last['<STR_LIT>'])] = (last['<STR_LIT>'], start_line - <NUM_LIT:1>)<EOL>last.clear()<EOL><DEDENT>if type_name == '<STR_LIT>':<EOL><INDENT>if child.level != <NUM_LIT:3>:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>if type_name == '<STR_LIT:class>':<EOL><INDENT>if child.level != <NUM_LIT:3>:<EOL><INDENT>continue<EOL><DEDENT>last_class.append(identifier)<EOL><DEDENT>if type_name in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>if child.level != <NUM_LIT:5>:<EOL><INDENT>continue<EOL><DEDENT>identifier = last_class[-<NUM_LIT:1>] + '<STR_LIT:.>' + identifier<EOL><DEDENT>last.update({<EOL>'<STR_LIT>': type_name,<EOL>'<STR_LIT>': identifier,<EOL>'<STR_LIT>': start_line,<EOL>})<EOL><DEDENT><DEDENT>elif child.t == '<STR_LIT>':<EOL><INDENT>find_sections(child, sections, last, last_class)<EOL><DEDENT><DEDENT>if last:<EOL><INDENT>sections[(last['<STR_LIT>'], last['<STR_LIT>'])] = (last['<STR_LIT>'], total_lines)<EOL><DEDENT>", "docstring": "Walks through a CommonMark AST to find section headers that delineate\ncontent that should be updated by this script\n\n:param md_ast:\n    The AST of the markdown document\n\n:param sections:\n    A dict to store the start and end lines of a section. The key will be\n    a two-element tuple of the section type (\"class\", \"function\",\n    \"method\" or \"attribute\") and identifier. The values are a two-element\n    tuple of the start and end line number in the markdown document of the\n    section.\n\n:param last:\n    A dict containing information about the last section header seen.\n    Includes the keys \"type_name\", \"identifier\", \"start_line\".\n\n:param last_class:\n    A unicode string of the name of the last class found - used when\n    processing methods and attributes.\n\n:param total_lines:\n    An integer of the total number of lines in the markdown document -\n    used to work around a bug in the API of the Python port of CommonMark", "id": "f9465:m1"}
{"signature": "def _pep425_supports_manylinux():", "body": "try:<EOL><INDENT>import _manylinux<EOL>return bool(_manylinux.manylinux1_compatible)<EOL><DEDENT>except (ImportError, AttributeError):<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>proc = ctypes.CDLL(None)<EOL>gnu_get_libc_version = proc.gnu_get_libc_version<EOL>gnu_get_libc_version.restype = ctypes.c_char_p<EOL>ver = gnu_get_libc_version()<EOL>if not isinstance(ver, str_cls):<EOL><INDENT>ver = ver.decode('<STR_LIT:ascii>')<EOL><DEDENT>match = re.match(r'<STR_LIT>', ver)<EOL>return match and match.group(<NUM_LIT:1>) == '<STR_LIT:2>' and int(match.group(<NUM_LIT:2>)) >= <NUM_LIT:5><EOL><DEDENT>except (AttributeError):<EOL><INDENT>return False<EOL><DEDENT>", "docstring": ":return:\n    A boolean indicating if the machine can use manylinux1 packages", "id": "f9466:m2"}
{"signature": "def _pep425tags():", "body": "tags = []<EOL>versions = []<EOL>version_info = _pep425_version()<EOL>major = version_info[:-<NUM_LIT:1>]<EOL>for minor in range(version_info[-<NUM_LIT:1>], -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>versions.append('<STR_LIT>'.join(map(str, major + (minor,))))<EOL><DEDENT>impl = _pep425_implementation()<EOL>abis = []<EOL>abi = _pep425_get_abi()<EOL>if abi:<EOL><INDENT>abis.append(abi)<EOL><DEDENT>abi3 = _pep425_implementation() == '<STR_LIT>' and sys.version_info >= (<NUM_LIT:3>,)<EOL>if abi3:<EOL><INDENT>abis.append('<STR_LIT>')<EOL><DEDENT>abis.append('<STR_LIT:none>')<EOL>if sys.platform == '<STR_LIT>':<EOL><INDENT>plat_ver = platform.mac_ver()<EOL>ver_parts = plat_ver[<NUM_LIT:0>].split('<STR_LIT:.>')<EOL>minor = int(ver_parts[<NUM_LIT:1>])<EOL>arch = plat_ver[<NUM_LIT:2>]<EOL>if sys.maxsize == <NUM_LIT>:<EOL><INDENT>arch = '<STR_LIT>'<EOL><DEDENT>arches = []<EOL>while minor > <NUM_LIT:5>:<EOL><INDENT>arches.append('<STR_LIT>' % (minor, arch))<EOL>arches.append('<STR_LIT>' % (minor,))<EOL>arches.append('<STR_LIT>' % (minor,))<EOL>minor -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>if sys.platform == '<STR_LIT:win32>':<EOL><INDENT>if '<STR_LIT>' in sys.version.lower():<EOL><INDENT>arches = ['<STR_LIT>']<EOL><DEDENT>arches = [sys.platform]<EOL><DEDENT>elif hasattr(os, '<STR_LIT>'):<EOL><INDENT>(plat, _, _, _, machine) = os.uname()<EOL>plat = plat.lower().replace('<STR_LIT:/>', '<STR_LIT>')<EOL>machine.replace('<STR_LIT:U+0020>', '<STR_LIT:_>').replace('<STR_LIT:/>', '<STR_LIT:_>')<EOL>if plat == '<STR_LIT>' and sys.maxsize == <NUM_LIT>:<EOL><INDENT>machine = '<STR_LIT>'<EOL><DEDENT>arch = '<STR_LIT>' % (plat, machine)<EOL>if _pep425_supports_manylinux():<EOL><INDENT>arches = [arch.replace('<STR_LIT>', '<STR_LIT>'), arch]<EOL><DEDENT>else:<EOL><INDENT>arches = [arch]<EOL><DEDENT><DEDENT><DEDENT>for abi in abis:<EOL><INDENT>for arch in arches:<EOL><INDENT>tags.append(('<STR_LIT>' % (impl, versions[<NUM_LIT:0>]), abi, arch))<EOL><DEDENT><DEDENT>if abi3:<EOL><INDENT>for version in versions[<NUM_LIT:1>:]:<EOL><INDENT>for arch in arches:<EOL><INDENT>tags.append(('<STR_LIT>' % (impl, version), '<STR_LIT>', arch))<EOL><DEDENT><DEDENT><DEDENT>for arch in arches:<EOL><INDENT>tags.append(('<STR_LIT>' % (versions[<NUM_LIT:0>][<NUM_LIT:0>]), '<STR_LIT:none>', arch))<EOL><DEDENT>tags.append(('<STR_LIT>' % (impl, versions[<NUM_LIT:0>]), '<STR_LIT:none>', '<STR_LIT>'))<EOL>tags.append(('<STR_LIT>' % (impl, versions[<NUM_LIT:0>][<NUM_LIT:0>]), '<STR_LIT:none>', '<STR_LIT>'))<EOL>for i, version in enumerate(versions):<EOL><INDENT>tags.append(('<STR_LIT>' % (version,), '<STR_LIT:none>', '<STR_LIT>'))<EOL>if i == <NUM_LIT:0>:<EOL><INDENT>tags.append(('<STR_LIT>' % (version[<NUM_LIT:0>]), '<STR_LIT:none>', '<STR_LIT>'))<EOL><DEDENT><DEDENT>tags.append(('<STR_LIT>', '<STR_LIT:none>', '<STR_LIT>'))<EOL>return tags<EOL>", "docstring": ":return:\n    A list of 3-element tuples with unicode strings or None:\n     [0] implementation tag - cp33, pp27, cp26, py2, py2.py3\n     [1] abi tag - cp26m, None\n     [2] arch tag - linux_x86_64, macosx_10_10_x85_64, etc", "id": "f9466:m4"}
{"signature": "def _list_files(root):", "body": "dir_patterns, file_patterns = _gitignore(root)<EOL>paths = []<EOL>prefix = os.path.abspath(root) + os.sep<EOL>for base, dirs, files in os.walk(root):<EOL><INDENT>for d in dirs:<EOL><INDENT>for dir_pattern in dir_patterns:<EOL><INDENT>if fnmatch(d, dir_pattern):<EOL><INDENT>dirs.remove(d)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>for f in files:<EOL><INDENT>skip = False<EOL>for file_pattern in file_patterns:<EOL><INDENT>if fnmatch(f, file_pattern):<EOL><INDENT>skip = True<EOL>break<EOL><DEDENT><DEDENT>if skip:<EOL><INDENT>continue<EOL><DEDENT>full_path = os.path.join(base, f)<EOL>if full_path[:len(prefix)] == prefix:<EOL><INDENT>full_path = full_path[len(prefix):]<EOL><DEDENT>paths.append(full_path)<EOL><DEDENT><DEDENT>return sorted(paths)<EOL>", "docstring": "Lists all of the files in a directory, taking into account any .gitignore\nfile that is present\n\n:param root:\n    A unicode filesystem path\n\n:return:\n    A list of unicode strings, containing paths of all files not ignored\n    by .gitignore with root, using relative paths", "id": "f9468:m6"}
{"signature": "def _do_request(method, url, headers, data=None, query_params=None, timeout=<NUM_LIT:20>):", "body": "if query_params:<EOL><INDENT>url += '<STR_LIT:?>' + urlencode(query_params).replace('<STR_LIT:+>', '<STR_LIT>')<EOL><DEDENT>if isinstance(data, dict):<EOL><INDENT>data_bytes = {}<EOL>for key in data:<EOL><INDENT>data_bytes[key.encode('<STR_LIT:utf-8>')] = data[key].encode('<STR_LIT:utf-8>')<EOL><DEDENT>data = urlencode(data_bytes)<EOL>headers['<STR_LIT:Content-Type>'] = '<STR_LIT>'<EOL><DEDENT>if isinstance(data, str_cls):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>tempfd, tempf_path = tempfile.mkstemp('<STR_LIT>')<EOL>os.write(tempfd, data or b'<STR_LIT>')<EOL>os.close(tempfd)<EOL>if sys.platform == '<STR_LIT:win32>':<EOL><INDENT>powershell_exe = os.path.join('<STR_LIT>')<EOL>code = \"<STR_LIT>\"<EOL>code += \"<STR_LIT>\"<EOL>for key in headers:<EOL><INDENT>code += \"<STR_LIT>\" % (key, headers[key])<EOL><DEDENT>code += \"<STR_LIT>\" % (url, method, tempf_path)<EOL>code += \"<STR_LIT>\"<EOL>code += \"<STR_LIT>\"<EOL>stdout, stderr = _execute([powershell_exe, '<STR_LIT>', code], os.getcwd())<EOL>if stdout[-<NUM_LIT:2>:] == b'<STR_LIT:\\r\\n>' and b'<STR_LIT>' in stdout:<EOL><INDENT>stdout = stdout[<NUM_LIT:0>:-<NUM_LIT:2>]<EOL>parts = stdout.split(b'<STR_LIT>', <NUM_LIT:1>)<EOL>if len(parts) == <NUM_LIT:2>:<EOL><INDENT>stdout = parts[<NUM_LIT:0>] + b'<STR_LIT>' + codecs.decode(parts[<NUM_LIT:1>].replace(b'<STR_LIT:->', b'<STR_LIT>'), '<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>args = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>method,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'<EOL>]<EOL>for key in headers:<EOL><INDENT>args.append('<STR_LIT>')<EOL>args.append(\"<STR_LIT>\" % (key, headers[key]))<EOL><DEDENT>args.append('<STR_LIT>')<EOL>args.append('<STR_LIT>' % tempf_path)<EOL>args.append(url)<EOL>stdout, stderr = _execute(args, os.getcwd())<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>if tempf_path and os.path.exists(tempf_path):<EOL><INDENT>os.remove(tempf_path)<EOL><DEDENT><DEDENT>if len(stderr) > <NUM_LIT:0>:<EOL><INDENT>raise URLError(\"<STR_LIT>\" % (method, url, stderr))<EOL><DEDENT>parts = stdout.split(b'<STR_LIT>', <NUM_LIT:1>)<EOL>if len(parts) != <NUM_LIT:2>:<EOL><INDENT>raise URLError(\"<STR_LIT>\" % (method, url, stdout))<EOL><DEDENT>header_block, body = parts<EOL>content_type_header = None<EOL>content_len_header = None<EOL>for hline in header_block.decode('<STR_LIT>').splitlines():<EOL><INDENT>hline_parts = hline.split('<STR_LIT::>', <NUM_LIT:1>)<EOL>if len(hline_parts) != <NUM_LIT:2>:<EOL><INDENT>continue<EOL><DEDENT>name, val = hline_parts<EOL>name = name.strip().lower()<EOL>val = val.strip()<EOL>if name == '<STR_LIT>':<EOL><INDENT>content_type_header = val<EOL><DEDENT>if name == '<STR_LIT>':<EOL><INDENT>content_len_header = val<EOL><DEDENT><DEDENT>if content_type_header is None and content_len_header != '<STR_LIT:0>':<EOL><INDENT>raise URLError(\"<STR_LIT>\" % (method, url, stdout))<EOL><DEDENT>if content_type_header is None:<EOL><INDENT>content_type = '<STR_LIT>'<EOL>encoding = '<STR_LIT:utf-8>'<EOL><DEDENT>else:<EOL><INDENT>content_type, params = cgi.parse_header(content_type_header)<EOL>encoding = params.get('<STR_LIT>')<EOL><DEDENT>return (content_type, encoding, body)<EOL>", "docstring": "Performs an HTTP request\n\n:param method:\n    A unicode string of 'POST' or 'PUT'\n\n:param url;\n    A unicode string of the URL to request\n\n:param headers:\n    A dict of unicode strings, where keys are header names and values are\n    the header values.\n\n:param data:\n    A dict of unicode strings (to be encoded as\n    application/x-www-form-urlencoded), or a byte string of data.\n\n:param query_params:\n    A dict of unicode keys and values to pass as query params\n\n:param timeout:\n    An integer number of seconds to use as the timeout\n\n:return:\n    A 3-element tuple:\n     - 0: A unicode string of the response content-type\n     - 1: A unicode string of the response encoding, or None\n     - 2: A byte string of the response body", "id": "f9468:m8"}
{"signature": "def run(ci=False):", "body": "xml_report_path = os.path.join(package_root, '<STR_LIT>')<EOL>if os.path.exists(xml_report_path):<EOL><INDENT>os.unlink(xml_report_path)<EOL><DEDENT>cov = coverage.Coverage(include='<STR_LIT>' % package_name)<EOL>cov.start()<EOL>from .tests import run as run_tests<EOL>result = run_tests()<EOL>print()<EOL>if ci:<EOL><INDENT>suite = unittest.TestSuite()<EOL>loader = unittest.TestLoader()<EOL>for other_package in other_packages:<EOL><INDENT>for test_class in _load_package_tests(other_package):<EOL><INDENT>suite.addTest(loader.loadTestsFromTestCase(test_class))<EOL><DEDENT><DEDENT>if suite.countTestCases() > <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>')<EOL>sys.stdout.flush()<EOL>runner_result = unittest.TextTestRunner(stream=sys.stdout, verbosity=<NUM_LIT:1>).run(suite)<EOL>result = runner_result.wasSuccessful() and result<EOL>print()<EOL>sys.stdout.flush()<EOL><DEDENT><DEDENT>cov.stop()<EOL>cov.save()<EOL>cov.report(show_missing=False)<EOL>print()<EOL>sys.stdout.flush()<EOL>if ci:<EOL><INDENT>cov.xml_report()<EOL><DEDENT>if ci and result and os.path.exists(xml_report_path):<EOL><INDENT>_codecov_submit()<EOL>print()<EOL><DEDENT>return result<EOL>", "docstring": "Runs the tests while measuring coverage\n\n:param ci:\n    If coverage is being run in a CI environment - this triggers trying to\n    run the tests for the rest of modularcrypto and uploading coverage data\n\n:return:\n    A bool - if the tests ran successfully", "id": "f9468:m0"}
{"signature": "def _parse_env_var_file(data):", "body": "output = {}<EOL>for line in data.splitlines():<EOL><INDENT>line = line.strip()<EOL>if not line or '<STR_LIT:=>' not in line:<EOL><INDENT>continue<EOL><DEDENT>parts = line.split('<STR_LIT:=>')<EOL>if len(parts) != <NUM_LIT:2>:<EOL><INDENT>continue<EOL><DEDENT>name = parts[<NUM_LIT:0>]<EOL>value = parts[<NUM_LIT:1>]<EOL>if len(value) > <NUM_LIT:1>:<EOL><INDENT>if value[<NUM_LIT:0>] == '<STR_LIT:\">' and value[-<NUM_LIT:1>] == '<STR_LIT:\">':<EOL><INDENT>value = value[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>output[name] = value<EOL><DEDENT>return output<EOL>", "docstring": "Parses a basic VAR=\"value data\" file contents into a dict\n\n:param data:\n    A unicode string of the file data\n\n:return:\n    A dict of parsed name/value data", "id": "f9468:m4"}
{"signature": "def _extract_info(archive, info):", "body": "if isinstance(archive, zipfile.ZipFile):<EOL><INDENT>fn = info.filename<EOL>is_dir = fn.endswith('<STR_LIT:/>') or fn.endswith('<STR_LIT:\\\\>')<EOL>out = archive.read(info)<EOL>if is_dir and out == b'<STR_LIT>':<EOL><INDENT>return None<EOL><DEDENT>return out<EOL><DEDENT>info_file = archive.extractfile(info)<EOL>if info_file:<EOL><INDENT>return info_file.read()<EOL><DEDENT>return None<EOL>", "docstring": "Extracts the contents of an archive info object\n\n;param archive:\n    An archive from _open_archive()\n\n:param info:\n    An info object from _list_archive_members()\n\n:return:\n    None, or a byte string of the file contents", "id": "f9471:m7"}
{"signature": "def _extract_package(deps_dir, pkg_path):", "body": "if pkg_path.endswith('<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>zf = None<EOL>zf = zipfile.ZipFile(pkg_path, '<STR_LIT:r>')<EOL>for zi in zf.infolist():<EOL><INDENT>if not zi.filename.startswith('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>data = _extract_info(zf, zi)<EOL>if data is not None:<EOL><INDENT>dst_path = os.path.join(deps_dir, zi.filename[<NUM_LIT:8>:])<EOL>dst_dir = os.path.dirname(dst_path)<EOL>if not os.path.exists(dst_dir):<EOL><INDENT>os.makedirs(dst_dir)<EOL><DEDENT>with open(dst_path, '<STR_LIT:wb>') as f:<EOL><INDENT>f.write(data)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>if zf:<EOL><INDENT>zf.close()<EOL><DEDENT><DEDENT>return<EOL><DEDENT>if pkg_path.endswith('<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>zf = None<EOL>zf = zipfile.ZipFile(pkg_path, '<STR_LIT:r>')<EOL>zf.extractall(deps_dir)<EOL><DEDENT>finally:<EOL><INDENT>if zf:<EOL><INDENT>zf.close()<EOL><DEDENT><DEDENT>return<EOL><DEDENT>try:<EOL><INDENT>ar = None<EOL>ar = _open_archive(pkg_path)<EOL>pkg_name = None<EOL>base_path = _archive_single_dir(ar) or '<STR_LIT>'<EOL>if len(base_path):<EOL><INDENT>if '<STR_LIT:->' in base_path:<EOL><INDENT>pkg_name, _ = base_path.split('<STR_LIT:->', <NUM_LIT:1>)<EOL><DEDENT>base_path += '<STR_LIT:/>'<EOL><DEDENT>base_pkg_path = None<EOL>if pkg_name is not None:<EOL><INDENT>base_pkg_path = base_path + pkg_name + '<STR_LIT:/>'<EOL><DEDENT>src_path = base_path + '<STR_LIT>'<EOL>members = []<EOL>for info in _list_archive_members(ar):<EOL><INDENT>fn = _info_name(info)<EOL>if base_pkg_path is not None and fn.startswith(base_pkg_path):<EOL><INDENT>dst_path = fn[len(base_pkg_path) - len(pkg_name) - <NUM_LIT:1>:]<EOL>members.append((info, dst_path))<EOL>continue<EOL><DEDENT>if fn.startswith(src_path):<EOL><INDENT>members.append((info, fn[len(src_path):]))<EOL>continue<EOL><DEDENT><DEDENT>for info, path in members:<EOL><INDENT>info_data = _extract_info(ar, info)<EOL>if info_data is not None:<EOL><INDENT>dst_path = os.path.join(deps_dir, path)<EOL>dst_dir = os.path.dirname(dst_path)<EOL>if not os.path.exists(dst_dir):<EOL><INDENT>os.makedirs(dst_dir)<EOL><DEDENT>with open(dst_path, '<STR_LIT:wb>') as f:<EOL><INDENT>f.write(info_data)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>if ar:<EOL><INDENT>ar.close()<EOL><DEDENT><DEDENT>", "docstring": "Extract a .whl, .zip, .tar.gz or .tar.bz2 into a package path to\nuse when running CI tasks\n\n:param deps_dir:\n    A unicode string of the directory the package should be extracted to\n\n:param pkg_path:\n    A unicode string of the path to the archive", "id": "f9471:m8"}
{"signature": "def _info_name(info):", "body": "if isinstance(info, zipfile.ZipInfo):<EOL><INDENT>return info.filename.replace('<STR_LIT:\\\\>', '<STR_LIT:/>')<EOL><DEDENT>return info.name.replace('<STR_LIT:\\\\>', '<STR_LIT:/>')<EOL>", "docstring": "Returns a normalized file path for an archive info object\n\n:param info:\n    An info object from _list_archive_members()\n\n:return:\n    A unicode string with all directory separators normalized to \"/\"", "id": "f9471:m6"}
{"signature": "def _execute(params, cwd):", "body": "proc = subprocess.Popen(<EOL>params,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE,<EOL>cwd=cwd<EOL>)<EOL>stdout, stderr = proc.communicate()<EOL>code = proc.wait()<EOL>if code != <NUM_LIT:0>:<EOL><INDENT>e = OSError('<STR_LIT>' % (params, code, stderr))<EOL>e.stdout = stdout<EOL>e.stderr = stderr<EOL>raise e<EOL><DEDENT>return (stdout, stderr)<EOL>", "docstring": "Executes a subprocess\n\n:param params:\n    A list of the executable and arguments to pass to it\n\n:param cwd:\n    The working directory to execute the command in\n\n:return:\n    A 2-element tuple of (stdout, stderr)", "id": "f9471:m11"}
{"signature": "def _parse_requires(path):", "body": "python_version = '<STR_LIT:.>'.join(map(str_cls, sys.version_info[<NUM_LIT:0>:<NUM_LIT:2>]))<EOL>sys_platform = sys.platform<EOL>packages = []<EOL>with open(path, '<STR_LIT:rb>') as f:<EOL><INDENT>contents = f.read().decode('<STR_LIT:utf-8>')<EOL><DEDENT>for line in re.split(r'<STR_LIT>', contents):<EOL><INDENT>line = line.strip()<EOL>if not len(line):<EOL><INDENT>continue<EOL><DEDENT>if re.match(r'<STR_LIT>', line):<EOL><INDENT>continue<EOL><DEDENT>if '<STR_LIT:;>' in line:<EOL><INDENT>package, cond = line.split('<STR_LIT:;>', <NUM_LIT:1>)<EOL>package = package.strip()<EOL>cond = cond.strip()<EOL>cond = cond.replace('<STR_LIT>', repr(sys_platform))<EOL>cond = cond.replace('<STR_LIT>', repr(python_version))<EOL>if not eval(cond):<EOL><INDENT>continue<EOL><DEDENT><DEDENT>else:<EOL><INDENT>package = line.strip()<EOL><DEDENT>if re.match(r'<STR_LIT>', package):<EOL><INDENT>sub_req_file = re.sub(r'<STR_LIT>', '<STR_LIT>', package)<EOL>sub_req_file = os.path.abspath(os.path.join(os.path.dirname(path), sub_req_file))<EOL>packages.extend(_parse_requires(sub_req_file))<EOL>continue<EOL><DEDENT>if re.match(r'<STR_LIT>', package):<EOL><INDENT>packages.append({'<STR_LIT:type>': '<STR_LIT:url>', '<STR_LIT>': package})<EOL>continue<EOL><DEDENT>if '<STR_LIT>' in package:<EOL><INDENT>parts = package.split('<STR_LIT>')<EOL>package = parts[<NUM_LIT:0>].strip()<EOL>ver = parts[<NUM_LIT:1>].strip()<EOL>packages.append({'<STR_LIT:type>': '<STR_LIT>', '<STR_LIT>': package, '<STR_LIT>': ver})<EOL>continue<EOL><DEDENT>if '<STR_LIT>' in package:<EOL><INDENT>parts = package.split('<STR_LIT>')<EOL>package = parts[<NUM_LIT:0>].strip()<EOL>ver = parts[<NUM_LIT:1>].strip()<EOL>packages.append({'<STR_LIT:type>': '<STR_LIT>', '<STR_LIT>': package, '<STR_LIT>': ver})<EOL>continue<EOL><DEDENT>if re.search(r'<STR_LIT>', package):<EOL><INDENT>raise Exception('<STR_LIT>' % package)<EOL><DEDENT>packages.append({'<STR_LIT:type>': '<STR_LIT>', '<STR_LIT>': package})<EOL><DEDENT>return packages<EOL>", "docstring": "Does basic parsing of pip requirements files, to allow for\nusing something other than Python to do actual TLS requests\n\n:param path:\n    A path to a requirements file\n\n:return:\n    A list of dict objects containing the keys:\n     - 'type' ('any', 'url', '==', '>=')\n     - 'pkg'\n     - 'ver' (if 'type' == '==' or 'type' == '>=')", "id": "f9471:m10"}
{"signature": "def _list_archive_members(archive):", "body": "if isinstance(archive, zipfile.ZipFile):<EOL><INDENT>return archive.infolist()<EOL><DEDENT>return archive.getmembers()<EOL>", "docstring": ":param archive:\n    An archive from _open_archive()\n\n:return:\n    A list of info objects to be used with _info_name() and _extract_info()", "id": "f9471:m4"}
{"signature": "def _open_archive(path):", "body": "if path.endswith('<STR_LIT>'):<EOL><INDENT>return zipfile.ZipFile(path, '<STR_LIT:r>')<EOL><DEDENT>return tarfile.open(path, '<STR_LIT:r>')<EOL>", "docstring": ":param path:\n    A unicode string of the filesystem path to the archive\n\n:return:\n    An archive object", "id": "f9471:m3"}
{"signature": "def _archive_single_dir(archive):", "body": "common_root = None<EOL>for info in _list_archive_members(archive):<EOL><INDENT>fn = _info_name(info)<EOL>if fn in set(['<STR_LIT:.>', '<STR_LIT:/>']):<EOL><INDENT>continue<EOL><DEDENT>sep = None<EOL>if '<STR_LIT:/>' in fn:<EOL><INDENT>sep = '<STR_LIT:/>'<EOL><DEDENT>elif '<STR_LIT:\\\\>' in fn:<EOL><INDENT>sep = '<STR_LIT:\\\\>'<EOL><DEDENT>if sep is None:<EOL><INDENT>root_dir = fn<EOL><DEDENT>else:<EOL><INDENT>root_dir, _ = fn.split(sep, <NUM_LIT:1>)<EOL><DEDENT>if common_root is None:<EOL><INDENT>common_root = root_dir<EOL><DEDENT>else:<EOL><INDENT>if common_root != root_dir:<EOL><INDENT>return None<EOL><DEDENT><DEDENT><DEDENT>return common_root<EOL>", "docstring": "Check if all members of the archive are in a single top-level directory\n\n:param archive:\n    An archive from _open_archive()\n\n:return:\n    None if not a single top level directory in archive, otherwise a\n    unicode string of the top level directory name", "id": "f9471:m5"}
{"signature": "def _try_decode(byte_string):", "body": "try:<EOL><INDENT>return str_cls(byte_string, _encoding)<EOL><DEDENT>except (UnicodeDecodeError):<EOL><INDENT>for encoding in _fallback_encodings:<EOL><INDENT>try:<EOL><INDENT>return str_cls(byte_string, encoding, errors='<STR_LIT:strict>')<EOL><DEDENT>except (UnicodeDecodeError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>return str_cls(byte_string, errors='<STR_LIT:replace>')<EOL>", "docstring": "Tries decoding a byte string from the OS into a unicode string\n\n:param byte_string:\n    A byte string\n\n:return:\n    A unicode string", "id": "f9476:m0"}
{"signature": "def read_until(self, marker):", "body": "if not isinstance(marker, byte_cls) and not isinstance(marker, Pattern):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(marker)<EOL>))<EOL><DEDENT>output = b'<STR_LIT>'<EOL>is_regex = isinstance(marker, Pattern)<EOL>while True:<EOL><INDENT>if len(self._decrypted_bytes) > <NUM_LIT:0>:<EOL><INDENT>chunk = self._decrypted_bytes<EOL>self._decrypted_bytes = b'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>chunk = self.read(<NUM_LIT>)<EOL><DEDENT>offset = len(output)<EOL>output += chunk<EOL>if is_regex:<EOL><INDENT>match = marker.search(output)<EOL>if match is not None:<EOL><INDENT>end = match.end()<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>start = max(<NUM_LIT:0>, offset - len(marker) - <NUM_LIT:1>)<EOL>match = output.find(marker, start)<EOL>if match != -<NUM_LIT:1>:<EOL><INDENT>end = match + len(marker)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>self._decrypted_bytes = output[end:] + self._decrypted_bytes<EOL>return output[<NUM_LIT:0>:end]<EOL>", "docstring": "Reads data from the socket until a marker is found. Data read may\ninclude data beyond the marker.\n\n:param marker:\n    A byte string or regex object from re.compile(). Used to determine\n    when to stop reading. Regex objects are more inefficient since\n    they must scan the entire byte string of read data each time data\n    is read off the socket.\n\n:return:\n    A byte string of the data read", "id": "f9481:c3:m7"}
{"signature": "@property<EOL><INDENT>def port(self):<DEDENT>", "body": "return self.socket.getpeername()[<NUM_LIT:1>]<EOL>", "docstring": "An integer of the port number the socket is connected to", "id": "f9481:c3:m25"}
{"signature": "@property<EOL><INDENT>def cipher_suite(self):<DEDENT>", "body": "return self._cipher_suite<EOL>", "docstring": "A unicode string of the IANA cipher suite name of the negotiated\ncipher suite", "id": "f9481:c3:m18"}
{"signature": "def _handshake(self, renegotiate=False):", "body": "in_buffers = None<EOL>out_buffers = None<EOL>new_context_handle_pointer = None<EOL>try:<EOL><INDENT>if renegotiate:<EOL><INDENT>temp_context_handle_pointer = self._context_handle_pointer<EOL><DEDENT>else:<EOL><INDENT>new_context_handle_pointer = new(secur32, '<STR_LIT>')<EOL>temp_context_handle_pointer = new_context_handle_pointer<EOL><DEDENT>requested_flags = {<EOL>Secur32Const.ISC_REQ_REPLAY_DETECT: '<STR_LIT>',<EOL>Secur32Const.ISC_REQ_SEQUENCE_DETECT: '<STR_LIT>',<EOL>Secur32Const.ISC_REQ_CONFIDENTIALITY: '<STR_LIT>',<EOL>Secur32Const.ISC_REQ_ALLOCATE_MEMORY: '<STR_LIT>',<EOL>Secur32Const.ISC_REQ_INTEGRITY: '<STR_LIT>',<EOL>Secur32Const.ISC_REQ_STREAM: '<STR_LIT>',<EOL>Secur32Const.ISC_REQ_USE_SUPPLIED_CREDS: '<STR_LIT>',<EOL>}<EOL>self._context_flags = <NUM_LIT:0><EOL>for flag in requested_flags:<EOL><INDENT>self._context_flags |= flag<EOL><DEDENT>in_sec_buffer_desc_pointer, in_buffers = self._create_buffers(<NUM_LIT:2>)<EOL>in_buffers[<NUM_LIT:0>].BufferType = Secur32Const.SECBUFFER_TOKEN<EOL>out_sec_buffer_desc_pointer, out_buffers = self._create_buffers(<NUM_LIT:2>)<EOL>out_buffers[<NUM_LIT:0>].BufferType = Secur32Const.SECBUFFER_TOKEN<EOL>out_buffers[<NUM_LIT:1>].BufferType = Secur32Const.SECBUFFER_ALERT<EOL>output_context_flags_pointer = new(secur32, '<STR_LIT>')<EOL>if renegotiate:<EOL><INDENT>first_handle = temp_context_handle_pointer<EOL>second_handle = null()<EOL><DEDENT>else:<EOL><INDENT>first_handle = null()<EOL>second_handle = temp_context_handle_pointer<EOL><DEDENT>result = secur32.InitializeSecurityContextW(<EOL>self._session._credentials_handle,<EOL>first_handle,<EOL>self._hostname,<EOL>self._context_flags,<EOL><NUM_LIT:0>,<EOL><NUM_LIT:0>,<EOL>null(),<EOL><NUM_LIT:0>,<EOL>second_handle,<EOL>out_sec_buffer_desc_pointer,<EOL>output_context_flags_pointer,<EOL>null()<EOL>)<EOL>if result not in set([Secur32Const.SEC_E_OK, Secur32Const.SEC_I_CONTINUE_NEEDED]):<EOL><INDENT>handle_error(result, TLSError)<EOL><DEDENT>if not renegotiate:<EOL><INDENT>temp_context_handle_pointer = second_handle<EOL><DEDENT>else:<EOL><INDENT>temp_context_handle_pointer = first_handle<EOL><DEDENT>handshake_server_bytes = b'<STR_LIT>'<EOL>handshake_client_bytes = b'<STR_LIT>'<EOL>if out_buffers[<NUM_LIT:0>].cbBuffer > <NUM_LIT:0>:<EOL><INDENT>token = bytes_from_buffer(out_buffers[<NUM_LIT:0>].pvBuffer, out_buffers[<NUM_LIT:0>].cbBuffer)<EOL>handshake_client_bytes += token<EOL>self._socket.send(token)<EOL>out_buffers[<NUM_LIT:0>].cbBuffer = <NUM_LIT:0><EOL>secur32.FreeContextBuffer(out_buffers[<NUM_LIT:0>].pvBuffer)<EOL>out_buffers[<NUM_LIT:0>].pvBuffer = null()<EOL><DEDENT>in_data_buffer = buffer_from_bytes(<NUM_LIT>)<EOL>in_buffers[<NUM_LIT:0>].pvBuffer = cast(secur32, '<STR_LIT>', in_data_buffer)<EOL>bytes_read = b'<STR_LIT>'<EOL>while result != Secur32Const.SEC_E_OK:<EOL><INDENT>try:<EOL><INDENT>fail_late = False<EOL>bytes_read = self._socket.recv(<NUM_LIT>)<EOL>if bytes_read == b'<STR_LIT>':<EOL><INDENT>raise_disconnection()<EOL><DEDENT><DEDENT>except (socket_error_cls):<EOL><INDENT>fail_late = True<EOL><DEDENT>handshake_server_bytes += bytes_read<EOL>self._received_bytes += bytes_read<EOL>in_buffers[<NUM_LIT:0>].cbBuffer = len(self._received_bytes)<EOL>write_to_buffer(in_data_buffer, self._received_bytes)<EOL>result = secur32.InitializeSecurityContextW(<EOL>self._session._credentials_handle,<EOL>temp_context_handle_pointer,<EOL>self._hostname,<EOL>self._context_flags,<EOL><NUM_LIT:0>,<EOL><NUM_LIT:0>,<EOL>in_sec_buffer_desc_pointer,<EOL><NUM_LIT:0>,<EOL>null(),<EOL>out_sec_buffer_desc_pointer,<EOL>output_context_flags_pointer,<EOL>null()<EOL>)<EOL>if result == Secur32Const.SEC_E_INCOMPLETE_MESSAGE:<EOL><INDENT>in_buffers[<NUM_LIT:0>].BufferType = Secur32Const.SECBUFFER_TOKEN<EOL>if in_buffers[<NUM_LIT:1>].BufferType != Secur32Const.SECBUFFER_EMPTY:<EOL><INDENT>in_buffers[<NUM_LIT:1>].BufferType = Secur32Const.SECBUFFER_EMPTY<EOL>in_buffers[<NUM_LIT:1>].cbBuffer = <NUM_LIT:0><EOL>if not is_null(in_buffers[<NUM_LIT:1>].pvBuffer):<EOL><INDENT>secur32.FreeContextBuffer(in_buffers[<NUM_LIT:1>].pvBuffer)<EOL>in_buffers[<NUM_LIT:1>].pvBuffer = null()<EOL><DEDENT><DEDENT>if fail_late:<EOL><INDENT>raise_disconnection()<EOL><DEDENT>continue<EOL><DEDENT>if result == Secur32Const.SEC_E_ILLEGAL_MESSAGE:<EOL><INDENT>if detect_client_auth_request(handshake_server_bytes):<EOL><INDENT>raise_client_auth()<EOL><DEDENT>alert_info = parse_alert(handshake_server_bytes)<EOL>if alert_info and alert_info == (<NUM_LIT:2>, <NUM_LIT>):<EOL><INDENT>raise_protocol_version()<EOL><DEDENT>raise_handshake()<EOL><DEDENT>if result == Secur32Const.SEC_E_WRONG_PRINCIPAL:<EOL><INDENT>chain = extract_chain(handshake_server_bytes)<EOL>raise_hostname(chain[<NUM_LIT:0>], self._hostname)<EOL><DEDENT>if result == Secur32Const.SEC_E_CERT_EXPIRED:<EOL><INDENT>chain = extract_chain(handshake_server_bytes)<EOL>raise_expired_not_yet_valid(chain[<NUM_LIT:0>])<EOL><DEDENT>if result == Secur32Const.SEC_E_UNTRUSTED_ROOT:<EOL><INDENT>chain = extract_chain(handshake_server_bytes)<EOL>cert = chain[<NUM_LIT:0>]<EOL>oscrypto_cert = load_certificate(cert)<EOL>if not oscrypto_cert.self_signed:<EOL><INDENT>raise_no_issuer(cert)<EOL><DEDENT>raise_self_signed(cert)<EOL><DEDENT>if result == Secur32Const.SEC_E_INTERNAL_ERROR:<EOL><INDENT>if get_dh_params_length(handshake_server_bytes) < <NUM_LIT>:<EOL><INDENT>raise_dh_params()<EOL><DEDENT><DEDENT>if result == Secur32Const.SEC_I_INCOMPLETE_CREDENTIALS:<EOL><INDENT>raise_client_auth()<EOL><DEDENT>if result == Crypt32Const.TRUST_E_CERT_SIGNATURE:<EOL><INDENT>raise_weak_signature(cert)<EOL><DEDENT>if result == Secur32Const.SEC_E_INVALID_TOKEN:<EOL><INDENT>if out_buffers[<NUM_LIT:1>].cbBuffer > <NUM_LIT:0>:<EOL><INDENT>alert_bytes = bytes_from_buffer(out_buffers[<NUM_LIT:1>].pvBuffer, out_buffers[<NUM_LIT:1>].cbBuffer)<EOL>handshake_client_bytes += alert_bytes<EOL>alert_number = alert_bytes[<NUM_LIT:6>:<NUM_LIT:7>]<EOL>if alert_number == b'<STR_LIT>' or alert_number == b'<STR_LIT>':<EOL><INDENT>if '<STR_LIT>' in self._session._protocols and len(self._session._protocols) > <NUM_LIT:1>:<EOL><INDENT>chain = extract_chain(handshake_server_bytes)<EOL>raise _TLSDowngradeError(<EOL>'<STR_LIT>',<EOL>chain[<NUM_LIT:0>]<EOL>)<EOL><DEDENT><DEDENT><DEDENT>if detect_client_auth_request(handshake_server_bytes):<EOL><INDENT>raise_client_auth()<EOL><DEDENT>if detect_other_protocol(handshake_server_bytes):<EOL><INDENT>raise_protocol_error(handshake_server_bytes)<EOL><DEDENT>raise_handshake()<EOL><DEDENT>if result == Secur32Const.SEC_E_BUFFER_TOO_SMALL or result == Secur32Const.SEC_E_MESSAGE_ALTERED:<EOL><INDENT>if '<STR_LIT>' in self._session._protocols:<EOL><INDENT>raise _TLSRetryError('<STR_LIT>')<EOL><DEDENT><DEDENT>if fail_late:<EOL><INDENT>raise_disconnection()<EOL><DEDENT>if result == Secur32Const.SEC_E_INVALID_PARAMETER:<EOL><INDENT>if get_dh_params_length(handshake_server_bytes) < <NUM_LIT>:<EOL><INDENT>raise_dh_params()<EOL><DEDENT><DEDENT>if result not in set([Secur32Const.SEC_E_OK, Secur32Const.SEC_I_CONTINUE_NEEDED]):<EOL><INDENT>handle_error(result, TLSError)<EOL><DEDENT>if out_buffers[<NUM_LIT:0>].cbBuffer > <NUM_LIT:0>:<EOL><INDENT>token = bytes_from_buffer(out_buffers[<NUM_LIT:0>].pvBuffer, out_buffers[<NUM_LIT:0>].cbBuffer)<EOL>handshake_client_bytes += token<EOL>self._socket.send(token)<EOL>out_buffers[<NUM_LIT:0>].cbBuffer = <NUM_LIT:0><EOL>secur32.FreeContextBuffer(out_buffers[<NUM_LIT:0>].pvBuffer)<EOL>out_buffers[<NUM_LIT:0>].pvBuffer = null()<EOL><DEDENT>if in_buffers[<NUM_LIT:1>].BufferType == Secur32Const.SECBUFFER_EXTRA:<EOL><INDENT>extra_amount = in_buffers[<NUM_LIT:1>].cbBuffer<EOL>self._received_bytes = self._received_bytes[-extra_amount:]<EOL>in_buffers[<NUM_LIT:1>].BufferType = Secur32Const.SECBUFFER_EMPTY<EOL>in_buffers[<NUM_LIT:1>].cbBuffer = <NUM_LIT:0><EOL>secur32.FreeContextBuffer(in_buffers[<NUM_LIT:1>].pvBuffer)<EOL>in_buffers[<NUM_LIT:1>].pvBuffer = null()<EOL>if result == Secur32Const.SEC_E_OK:<EOL><INDENT>handshake_server_bytes = handshake_server_bytes[-extra_amount:]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._received_bytes = b'<STR_LIT>'<EOL><DEDENT><DEDENT>connection_info_pointer = struct(secur32, '<STR_LIT>')<EOL>result = secur32.QueryContextAttributesW(<EOL>temp_context_handle_pointer,<EOL>Secur32Const.SECPKG_ATTR_CONNECTION_INFO,<EOL>connection_info_pointer<EOL>)<EOL>handle_error(result, TLSError)<EOL>connection_info = unwrap(connection_info_pointer)<EOL>self._protocol = {<EOL>Secur32Const.SP_PROT_SSL2_CLIENT: '<STR_LIT>',<EOL>Secur32Const.SP_PROT_SSL3_CLIENT: '<STR_LIT>',<EOL>Secur32Const.SP_PROT_TLS1_CLIENT: '<STR_LIT>',<EOL>Secur32Const.SP_PROT_TLS1_1_CLIENT: '<STR_LIT>',<EOL>Secur32Const.SP_PROT_TLS1_2_CLIENT: '<STR_LIT>',<EOL>}.get(native(int, connection_info.dwProtocol), str_cls(connection_info.dwProtocol))<EOL>if self._protocol in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>session_info = parse_session_info(handshake_server_bytes, handshake_client_bytes)<EOL>self._cipher_suite = session_info['<STR_LIT>']<EOL>self._compression = session_info['<STR_LIT>']<EOL>self._session_id = session_info['<STR_LIT>']<EOL>self._session_ticket = session_info['<STR_LIT>']<EOL><DEDENT>output_context_flags = deref(output_context_flags_pointer)<EOL>for flag in requested_flags:<EOL><INDENT>if (flag | output_context_flags) == <NUM_LIT:0>:<EOL><INDENT>raise OSError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>requested_flags[flag]<EOL>))<EOL><DEDENT><DEDENT>if not renegotiate:<EOL><INDENT>self._context_handle_pointer = temp_context_handle_pointer<EOL>new_context_handle_pointer = None<EOL>stream_sizes_pointer = struct(secur32, '<STR_LIT>')<EOL>result = secur32.QueryContextAttributesW(<EOL>self._context_handle_pointer,<EOL>Secur32Const.SECPKG_ATTR_STREAM_SIZES,<EOL>stream_sizes_pointer<EOL>)<EOL>handle_error(result)<EOL>stream_sizes = unwrap(stream_sizes_pointer)<EOL>self._header_size = native(int, stream_sizes.cbHeader)<EOL>self._message_size = native(int, stream_sizes.cbMaximumMessage)<EOL>self._trailer_size = native(int, stream_sizes.cbTrailer)<EOL>self._buffer_size = self._header_size + self._message_size + self._trailer_size<EOL><DEDENT>if self._session._extra_trust_roots:<EOL><INDENT>self._extra_trust_root_validation()<EOL><DEDENT><DEDENT>except (OSError, socket_.error):<EOL><INDENT>self.close()<EOL>raise<EOL><DEDENT>finally:<EOL><INDENT>if out_buffers:<EOL><INDENT>if not is_null(out_buffers[<NUM_LIT:0>].pvBuffer):<EOL><INDENT>secur32.FreeContextBuffer(out_buffers[<NUM_LIT:0>].pvBuffer)<EOL><DEDENT>if not is_null(out_buffers[<NUM_LIT:1>].pvBuffer):<EOL><INDENT>secur32.FreeContextBuffer(out_buffers[<NUM_LIT:1>].pvBuffer)<EOL><DEDENT><DEDENT>if new_context_handle_pointer:<EOL><INDENT>secur32.DeleteSecurityContext(new_context_handle_pointer)<EOL><DEDENT><DEDENT>", "docstring": "Perform an initial TLS handshake, or a renegotiation\n\n:param renegotiate:\n    If the handshake is for a renegotiation", "id": "f9481:c3:m4"}
{"signature": "@property<EOL><INDENT>def session_id(self):<DEDENT>", "body": "return self._session_id<EOL>", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9481:c3:m21"}
{"signature": "def read_exactly(self, num_bytes):", "body": "output = b'<STR_LIT>'<EOL>remaining = num_bytes<EOL>while remaining > <NUM_LIT:0>:<EOL><INDENT>output += self.read(remaining)<EOL>remaining = num_bytes - len(output)<EOL><DEDENT>return output<EOL>", "docstring": "Reads exactly the specified number of bytes from the socket\n\n:param num_bytes:\n    An integer - the exact number of bytes to read\n\n:return:\n    A byte string of the data that was read", "id": "f9481:c3:m9"}
{"signature": "@property<EOL><INDENT>def hostname(self):<DEDENT>", "body": "return self._hostname<EOL>", "docstring": "A unicode string of the TLS server domain name or IP address", "id": "f9481:c3:m24"}
{"signature": "def select_write(self, timeout=None):", "body": "_, write_ready, _ = select.select([], [self._socket], [], timeout)<EOL>return len(write_ready) > <NUM_LIT:0><EOL>", "docstring": "Blocks until the socket is ready to be written to, or the timeout is hit\n\n:param timeout:\n    A float - the period of time to wait for the socket to be ready to\n    written to. None for no time limit.\n\n:return:\n    A boolean - if the socket is ready for writing. Will only be False\n    if timeout is not None.", "id": "f9481:c3:m11"}
{"signature": "def __init__(self, protocol=None, manual_validation=False, extra_trust_roots=None):", "body": "if not isinstance(manual_validation, bool):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(manual_validation)<EOL>))<EOL><DEDENT>self._manual_validation = manual_validation<EOL>if protocol is None:<EOL><INDENT>protocol = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL><DEDENT>if isinstance(protocol, str_cls):<EOL><INDENT>protocol = set([protocol])<EOL><DEDENT>elif not isinstance(protocol, set):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(protocol)<EOL>))<EOL><DEDENT>unsupported_protocols = protocol - set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>if unsupported_protocols:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(unsupported_protocols)<EOL>))<EOL><DEDENT>self._protocols = protocol<EOL>self._extra_trust_roots = []<EOL>if extra_trust_roots:<EOL><INDENT>for extra_trust_root in extra_trust_roots:<EOL><INDENT>if isinstance(extra_trust_root, Certificate):<EOL><INDENT>extra_trust_root = extra_trust_root.asn1<EOL><DEDENT>elif isinstance(extra_trust_root, byte_cls):<EOL><INDENT>extra_trust_root = parse_certificate(extra_trust_root)<EOL><DEDENT>elif isinstance(extra_trust_root, str_cls):<EOL><INDENT>with open(extra_trust_root, '<STR_LIT:rb>') as f:<EOL><INDENT>extra_trust_root = parse_certificate(f.read())<EOL><DEDENT><DEDENT>elif not isinstance(extra_trust_root, x509.Certificate):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(extra_trust_root)<EOL>))<EOL><DEDENT>self._extra_trust_roots.append(extra_trust_root)<EOL><DEDENT><DEDENT>self._obtain_credentials()<EOL>", "docstring": ":param protocol:\n    A unicode string or set of unicode strings representing allowable\n    protocols to negotiate with the server:\n\n     - \"TLSv1.2\"\n     - \"TLSv1.1\"\n     - \"TLSv1\"\n     - \"SSLv3\"\n\n    Default is: {\"TLSv1\", \"TLSv1.1\", \"TLSv1.2\"}\n\n:param manual_validation:\n    If certificate and certificate path validation should be skipped\n    and left to the developer to implement\n\n:param extra_trust_roots:\n    A list containing one or more certificates to be treated as trust\n    roots, in one of the following formats:\n     - A byte string of the DER encoded certificate\n     - A unicode string of the certificate filename\n     - An asn1crypto.x509.Certificate object\n     - An oscrypto.asymmetric.Certificate object\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9481:c2:m0"}
{"signature": "@property<EOL><INDENT>def socket(self):<DEDENT>", "body": "if self._context_handle_pointer is None:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>return self._socket<EOL>", "docstring": "The underlying socket.socket connection", "id": "f9481:c3:m26"}
{"signature": "def __init__(self, address, port, timeout=<NUM_LIT:10>, session=None):", "body": "self._received_bytes = b'<STR_LIT>'<EOL>self._decrypted_bytes = b'<STR_LIT>'<EOL>if address is None and port is None:<EOL><INDENT>self._socket = None<EOL><DEDENT>else:<EOL><INDENT>if not isinstance(address, str_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(address)<EOL>))<EOL><DEDENT>if not isinstance(port, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(port)<EOL>))<EOL><DEDENT>if timeout is not None and not isinstance(timeout, numbers.Number):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(timeout)<EOL>))<EOL><DEDENT>self._socket = socket_.create_connection((address, port), timeout)<EOL>self._socket.settimeout(timeout)<EOL><DEDENT>if session is None:<EOL><INDENT>session = TLSSession()<EOL><DEDENT>elif not isinstance(session, TLSSession):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(session)<EOL>))<EOL><DEDENT>self._session = session<EOL>if self._socket:<EOL><INDENT>self._hostname = address<EOL>try:<EOL><INDENT>self._handshake()<EOL><DEDENT>except (_TLSDowngradeError):<EOL><INDENT>self.close()<EOL>new_session = TLSSession(<EOL>session._protocols - set(['<STR_LIT>']),<EOL>session._manual_validation,<EOL>session._extra_trust_roots<EOL>)<EOL>session.__del__()<EOL>self._received_bytes = b'<STR_LIT>'<EOL>self._session = new_session<EOL>self._socket = socket_.create_connection((address, port), timeout)<EOL>self._socket.settimeout(timeout)<EOL>self._handshake()<EOL><DEDENT>except (_TLSRetryError):<EOL><INDENT>self._received_bytes = b'<STR_LIT>'<EOL>self._socket = socket_.create_connection((address, port), timeout)<EOL>self._socket.settimeout(timeout)<EOL>self._handshake()<EOL><DEDENT><DEDENT>", "docstring": ":param address:\n    A unicode string of the domain name or IP address to conenct to\n\n:param port:\n    An integer of the port number to connect to\n\n:param timeout:\n    An integer timeout to use for the socket\n\n:param session:\n    An oscrypto.tls.TLSSession object to allow for session reuse and\n    controlling the protocols and validation performed", "id": "f9481:c3:m1"}
{"signature": "def read_line(self):", "body": "return self.read_until(_line_regex)<EOL>", "docstring": "r\"\"\"\n        Reads a line from the socket, including the line ending of \"\\r\\n\", \"\\r\",\n        or \"\\n\"\n\n        :return:\n            A byte string of the next line from the socket", "id": "f9481:c3:m8"}
{"signature": "def select_read(self, timeout=None):", "body": "<EOL>if len(self._decrypted_bytes) > <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>read_ready, _, _ = select.select([self._socket], [], [], timeout)<EOL>return len(read_ready) > <NUM_LIT:0><EOL>", "docstring": "Blocks until the socket is ready to be read from, or the timeout is hit\n\n:param timeout:\n    A float - the period of time to wait for data to be read. None for\n    no time limit.\n\n:return:\n    A boolean - if data is ready to be read. Will only be False if\n    timeout is not None.", "id": "f9481:c3:m6"}
{"signature": "@property<EOL><INDENT>def session_ticket(self):<DEDENT>", "body": "return self._session_ticket<EOL>", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9481:c3:m22"}
{"signature": "def _bcrypt_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):", "body": "if hash_algorithm == '<STR_LIT>':<EOL><INDENT>digest = data<EOL><DEDENT>else:<EOL><INDENT>hash_constant = {<EOL>'<STR_LIT>': BcryptConst.BCRYPT_MD5_ALGORITHM,<EOL>'<STR_LIT>': BcryptConst.BCRYPT_SHA1_ALGORITHM,<EOL>'<STR_LIT>': BcryptConst.BCRYPT_SHA256_ALGORITHM,<EOL>'<STR_LIT>': BcryptConst.BCRYPT_SHA384_ALGORITHM,<EOL>'<STR_LIT>': BcryptConst.BCRYPT_SHA512_ALGORITHM<EOL>}[hash_algorithm]<EOL>digest = getattr(hashlib, hash_algorithm)(data).digest()<EOL><DEDENT>padding_info = null()<EOL>flags = <NUM_LIT:0><EOL>if certificate_or_public_key.algorithm == '<STR_LIT>':<EOL><INDENT>if rsa_pss_padding:<EOL><INDENT>flags = BcryptConst.BCRYPT_PAD_PSS<EOL>padding_info_struct_pointer = struct(bcrypt, '<STR_LIT>')<EOL>padding_info_struct = unwrap(padding_info_struct_pointer)<EOL>hash_buffer = buffer_from_unicode(hash_constant)<EOL>padding_info_struct.pszAlgId = cast(bcrypt, '<STR_LIT>', hash_buffer)<EOL>padding_info_struct.cbSalt = len(digest)<EOL><DEDENT>else:<EOL><INDENT>flags = BcryptConst.BCRYPT_PAD_PKCS1<EOL>padding_info_struct_pointer = struct(bcrypt, '<STR_LIT>')<EOL>padding_info_struct = unwrap(padding_info_struct_pointer)<EOL>if hash_algorithm == '<STR_LIT>':<EOL><INDENT>padding_info_struct.pszAlgId = null()<EOL><DEDENT>else:<EOL><INDENT>hash_buffer = buffer_from_unicode(hash_constant)<EOL>padding_info_struct.pszAlgId = cast(bcrypt, '<STR_LIT>', hash_buffer)<EOL><DEDENT><DEDENT>padding_info = cast(bcrypt, '<STR_LIT>', padding_info_struct_pointer)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>signature = algos.DSASignature.load(signature).to_p1363()<EOL><DEDENT>except (ValueError, OverflowError, TypeError):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT><DEDENT>res = bcrypt.BCryptVerifySignature(<EOL>certificate_or_public_key.key_handle,<EOL>padding_info,<EOL>digest,<EOL>len(digest),<EOL>signature,<EOL>len(signature),<EOL>flags<EOL>)<EOL>failure = res == BcryptConst.STATUS_INVALID_SIGNATURE<EOL>failure = failure or res == BcryptConst.STATUS_INVALID_PARAMETER<EOL>if failure:<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>handle_error(res)<EOL>", "docstring": "Verifies an RSA, DSA or ECDSA signature via CNG\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n    If PSS padding should be used for RSA keys\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9489:m24"}
{"signature": "def rsa_pkcs1v15_decrypt(private_key, ciphertext):", "body": "return _decrypt(private_key, ciphertext)<EOL>", "docstring": "Decrypts a byte string using an RSA private key. Uses PKCS#1 v1.5 padding.\n\n:param private_key:\n    A PrivateKey object\n\n:param ciphertext:\n    A byte string of the encrypted data\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the original plaintext", "id": "f9489:m39"}
{"signature": "def _advapi32_interpret_rsa_key_blob(bit_size, blob_struct, blob):", "body": "len1 = bit_size // <NUM_LIT:8><EOL>len2 = bit_size // <NUM_LIT:16><EOL>prime1_offset = len1<EOL>prime2_offset = prime1_offset + len2<EOL>exponent1_offset = prime2_offset + len2<EOL>exponent2_offset = exponent1_offset + len2<EOL>coefficient_offset = exponent2_offset + len2<EOL>private_exponent_offset = coefficient_offset + len2<EOL>public_exponent = blob_struct.rsapubkey.pubexp<EOL>modulus = int_from_bytes(blob[<NUM_LIT:0>:prime1_offset][::-<NUM_LIT:1>])<EOL>prime1 = int_from_bytes(blob[prime1_offset:prime2_offset][::-<NUM_LIT:1>])<EOL>prime2 = int_from_bytes(blob[prime2_offset:exponent1_offset][::-<NUM_LIT:1>])<EOL>exponent1 = int_from_bytes(blob[exponent1_offset:exponent2_offset][::-<NUM_LIT:1>])<EOL>exponent2 = int_from_bytes(blob[exponent2_offset:coefficient_offset][::-<NUM_LIT:1>])<EOL>coefficient = int_from_bytes(blob[coefficient_offset:private_exponent_offset][::-<NUM_LIT:1>])<EOL>private_exponent = int_from_bytes(blob[private_exponent_offset:private_exponent_offset + len1][::-<NUM_LIT:1>])<EOL>public_key_info = keys.PublicKeyInfo({<EOL>'<STR_LIT>': keys.PublicKeyAlgorithm({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}),<EOL>'<STR_LIT>': keys.RSAPublicKey({<EOL>'<STR_LIT>': modulus,<EOL>'<STR_LIT>': public_exponent,<EOL>}),<EOL>})<EOL>rsa_private_key = keys.RSAPrivateKey({<EOL>'<STR_LIT:version>': '<STR_LIT>',<EOL>'<STR_LIT>': modulus,<EOL>'<STR_LIT>': public_exponent,<EOL>'<STR_LIT>': private_exponent,<EOL>'<STR_LIT>': prime1,<EOL>'<STR_LIT>': prime2,<EOL>'<STR_LIT>': exponent1,<EOL>'<STR_LIT>': exponent2,<EOL>'<STR_LIT>': coefficient,<EOL>})<EOL>private_key_info = keys.PrivateKeyInfo({<EOL>'<STR_LIT:version>': <NUM_LIT:0>,<EOL>'<STR_LIT>': keys.PrivateKeyAlgorithm({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}),<EOL>'<STR_LIT>': rsa_private_key,<EOL>})<EOL>return (public_key_info, private_key_info)<EOL>", "docstring": "Takes a CryptoAPI RSA private key blob and converts it into the ASN.1\nstructures for the public and private keys\n\n:param bit_size:\n    The integer bit size of the key\n\n:param blob_struct:\n    An instance of the advapi32.RSAPUBKEY struct\n\n:param blob:\n    A byte string of the binary data after the header\n\n:return:\n    A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,\n    asn1crypto.keys.PrivateKeyInfo)", "id": "f9489:m5"}
{"signature": "def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):", "body": "flags = <NUM_LIT:0><EOL>if rsa_oaep_padding:<EOL><INDENT>flags = Advapi32Const.CRYPT_OAEP<EOL><DEDENT>out_len = new(advapi32, '<STR_LIT>', len(data))<EOL>res = advapi32.CryptEncrypt(<EOL>certificate_or_public_key.ex_key_handle,<EOL>null(),<EOL>True,<EOL>flags,<EOL>null(),<EOL>out_len,<EOL><NUM_LIT:0><EOL>)<EOL>handle_error(res)<EOL>buffer_len = deref(out_len)<EOL>buffer = buffer_from_bytes(buffer_len)<EOL>write_to_buffer(buffer, data)<EOL>pointer_set(out_len, len(data))<EOL>res = advapi32.CryptEncrypt(<EOL>certificate_or_public_key.ex_key_handle,<EOL>null(),<EOL>True,<EOL>flags,<EOL>buffer,<EOL>out_len,<EOL>buffer_len<EOL>)<EOL>handle_error(res)<EOL>return bytes_from_buffer(buffer, deref(out_len))[::-<NUM_LIT:1>]<EOL>", "docstring": "Encrypts a value using an RSA public key via CryptoAPI\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to encrypt with\n\n:param data:\n    A byte string of the data to encrypt\n\n:param rsa_oaep_padding:\n    If OAEP padding should be used instead of PKCS#1 v1.5\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the ciphertext", "id": "f9489:m33"}
{"signature": "def _advapi32_sign(private_key, data, hash_algorithm, rsa_pss_padding=False):", "body": "algo = private_key.algorithm<EOL>if algo == '<STR_LIT>' and hash_algorithm == '<STR_LIT>':<EOL><INDENT>padded_data = add_pkcs1v15_signature_padding(private_key.byte_size, data)<EOL>return raw_rsa_private_crypt(private_key, padded_data)<EOL><DEDENT>if algo == '<STR_LIT>' and rsa_pss_padding:<EOL><INDENT>hash_length = {<EOL>'<STR_LIT>': <NUM_LIT:20>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:32>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:64><EOL>}.get(hash_algorithm, <NUM_LIT:0>)<EOL>padded_data = add_pss_padding(hash_algorithm, hash_length, private_key.bit_size, data)<EOL>return raw_rsa_private_crypt(private_key, padded_data)<EOL><DEDENT>if private_key.algorithm == '<STR_LIT>' and hash_algorithm == '<STR_LIT>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>hash_handle = None<EOL>try:<EOL><INDENT>alg_id = {<EOL>'<STR_LIT>': Advapi32Const.CALG_MD5,<EOL>'<STR_LIT>': Advapi32Const.CALG_SHA1,<EOL>'<STR_LIT>': Advapi32Const.CALG_SHA_256,<EOL>'<STR_LIT>': Advapi32Const.CALG_SHA_384,<EOL>'<STR_LIT>': Advapi32Const.CALG_SHA_512,<EOL>}[hash_algorithm]<EOL>hash_handle_pointer = new(advapi32, '<STR_LIT>')<EOL>res = advapi32.CryptCreateHash(<EOL>private_key.context_handle,<EOL>alg_id,<EOL>null(),<EOL><NUM_LIT:0>,<EOL>hash_handle_pointer<EOL>)<EOL>handle_error(res)<EOL>hash_handle = unwrap(hash_handle_pointer)<EOL>res = advapi32.CryptHashData(hash_handle, data, len(data), <NUM_LIT:0>)<EOL>handle_error(res)<EOL>out_len = new(advapi32, '<STR_LIT>')<EOL>res = advapi32.CryptSignHashW(<EOL>hash_handle,<EOL>Advapi32Const.AT_SIGNATURE,<EOL>null(),<EOL><NUM_LIT:0>,<EOL>null(),<EOL>out_len<EOL>)<EOL>handle_error(res)<EOL>buffer_length = deref(out_len)<EOL>buffer_ = buffer_from_bytes(buffer_length)<EOL>res = advapi32.CryptSignHashW(<EOL>hash_handle,<EOL>Advapi32Const.AT_SIGNATURE,<EOL>null(),<EOL><NUM_LIT:0>,<EOL>buffer_,<EOL>out_len<EOL>)<EOL>handle_error(res)<EOL>output = bytes_from_buffer(buffer_, deref(out_len))<EOL>output = output[::-<NUM_LIT:1>]<EOL>if algo == '<STR_LIT>':<EOL><INDENT>half_len = len(output) // <NUM_LIT:2><EOL>output = output[half_len:] + output[:half_len]<EOL>output = algos.DSASignature.from_p1363(output).dump()<EOL><DEDENT>return output<EOL><DEDENT>finally:<EOL><INDENT>if hash_handle:<EOL><INDENT>advapi32.CryptDestroyHash(hash_handle)<EOL><DEDENT><DEDENT>", "docstring": "Generates an RSA, DSA or ECDSA signature via CryptoAPI\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n    If PSS padding should be used for RSA keys\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9489:m30"}
{"signature": "def load_public_key(source):", "body": "if isinstance(source, keys.PublicKeyInfo):<EOL><INDENT>public_key = source<EOL><DEDENT>elif isinstance(source, byte_cls):<EOL><INDENT>public_key = parse_public(source)<EOL><DEDENT>elif isinstance(source, str_cls):<EOL><INDENT>with open(source, '<STR_LIT:rb>') as f:<EOL><INDENT>public_key = parse_public(f.read())<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(public_key)<EOL>))<EOL><DEDENT>return _load_key(public_key, PublicKey)<EOL>", "docstring": "Loads a public key into a PublicKey object\n\n:param source:\n    A byte string of file contents, a unicode string filename or an\n    asn1crypto.keys.PublicKeyInfo object\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    oscrypto.errors.AsymmetricKeyError - when the public key is incompatible with the OS crypto library\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A PublicKey object", "id": "f9489:m16"}
{"signature": "def dsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _verify(certificate_or_public_key, signature, data, hash_algorithm)<EOL>", "docstring": "Verifies a DSA signature\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9489:m20"}
{"signature": "def ecdsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _sign(private_key, data, hash_algorithm)<EOL>", "docstring": "Generates an ECDSA signature\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9489:m28"}
{"signature": "def rsa_pkcs1v15_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _sign(private_key, data, hash_algorithm)<EOL>", "docstring": "Generates an RSASSA-PKCS-v1.5 signature.\n\nWhen the hash_algorithm is \"raw\", the operation is identical to RSA\nprivate key encryption. That is: the data is not hashed and no ASN.1\nstructure with an algorithm identifier of the hash algorithm is placed in\nthe encrypted byte string.\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9489:m25"}
{"signature": "def _bcrypt_interpret_ec_key_blob(key_type, blob_struct, blob):", "body": "magic = native(int, blob_struct.dwMagic)<EOL>key_byte_length = native(int, blob_struct.cbKey)<EOL>curve = {<EOL>BcryptConst.BCRYPT_ECDSA_PRIVATE_P256_MAGIC: '<STR_LIT>',<EOL>BcryptConst.BCRYPT_ECDSA_PRIVATE_P384_MAGIC: '<STR_LIT>',<EOL>BcryptConst.BCRYPT_ECDSA_PRIVATE_P521_MAGIC: '<STR_LIT>',<EOL>BcryptConst.BCRYPT_ECDSA_PUBLIC_P256_MAGIC: '<STR_LIT>',<EOL>BcryptConst.BCRYPT_ECDSA_PUBLIC_P384_MAGIC: '<STR_LIT>',<EOL>BcryptConst.BCRYPT_ECDSA_PUBLIC_P521_MAGIC: '<STR_LIT>',<EOL>}[magic]<EOL>public = b'<STR_LIT>' + blob[<NUM_LIT:0>:key_byte_length * <NUM_LIT:2>]<EOL>if key_type == '<STR_LIT>':<EOL><INDENT>return keys.PublicKeyInfo({<EOL>'<STR_LIT>': keys.PublicKeyAlgorithm({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': keys.ECDomainParameters(<EOL>name='<STR_LIT>',<EOL>value=curve<EOL>)<EOL>}),<EOL>'<STR_LIT>': public,<EOL>})<EOL><DEDENT>elif key_type == '<STR_LIT>':<EOL><INDENT>private = int_from_bytes(blob[key_byte_length * <NUM_LIT:2>:key_byte_length * <NUM_LIT:3>])<EOL>return keys.PrivateKeyInfo({<EOL>'<STR_LIT:version>': <NUM_LIT:0>,<EOL>'<STR_LIT>': keys.PrivateKeyAlgorithm({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': keys.ECDomainParameters(<EOL>name='<STR_LIT>',<EOL>value=curve<EOL>)<EOL>}),<EOL>'<STR_LIT>': keys.ECPrivateKey({<EOL>'<STR_LIT:version>': '<STR_LIT>',<EOL>'<STR_LIT>': private,<EOL>'<STR_LIT>': public,<EOL>}),<EOL>})<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(key_type)<EOL>))<EOL><DEDENT>", "docstring": "Take a CNG BCRYPT_ECCKEY_BLOB and converts it into an ASN.1 structure\n\n:param key_type:\n    A unicode string of \"private\" or \"public\"\n\n:param blob_struct:\n    An instance of BCRYPT_ECCKEY_BLOB\n\n:param blob:\n    A byte string of the binary data contained after the struct\n\n:return:\n    An asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n    object, based on the key_type param", "id": "f9489:m9"}
{"signature": "def _decrypt(private_key, ciphertext, rsa_oaep_padding=False):", "body": "if not isinstance(private_key, PrivateKey):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(private_key)<EOL>))<EOL><DEDENT>if not isinstance(ciphertext, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(ciphertext)<EOL>))<EOL><DEDENT>if not isinstance(rsa_oaep_padding, bool):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(rsa_oaep_padding)<EOL>))<EOL><DEDENT>if _backend == '<STR_LIT>':<EOL><INDENT>return _advapi32_decrypt(private_key, ciphertext, rsa_oaep_padding)<EOL><DEDENT>return _bcrypt_decrypt(private_key, ciphertext, rsa_oaep_padding)<EOL>", "docstring": "Encrypts a value using an RSA private key\n\n:param private_key:\n    A PrivateKey instance to decrypt with\n\n:param ciphertext:\n    A byte string of the data to decrypt\n\n:param rsa_oaep_padding:\n    If OAEP padding should be used instead of PKCS#1 v1.5\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9489:m35"}
{"signature": "def _bcrypt_generate_pair(algorithm, bit_size=None, curve=None):", "body": "if algorithm == '<STR_LIT>':<EOL><INDENT>alg_constant = BcryptConst.BCRYPT_RSA_ALGORITHM<EOL>struct_type = '<STR_LIT>'<EOL>private_blob_type = BcryptConst.BCRYPT_RSAFULLPRIVATE_BLOB<EOL>public_blob_type = BcryptConst.BCRYPT_RSAPUBLIC_BLOB<EOL><DEDENT>elif algorithm == '<STR_LIT>':<EOL><INDENT>alg_constant = BcryptConst.BCRYPT_DSA_ALGORITHM<EOL>if bit_size > <NUM_LIT>:<EOL><INDENT>struct_type = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>struct_type = '<STR_LIT>'<EOL><DEDENT>private_blob_type = BcryptConst.BCRYPT_DSA_PRIVATE_BLOB<EOL>public_blob_type = BcryptConst.BCRYPT_DSA_PUBLIC_BLOB<EOL><DEDENT>else:<EOL><INDENT>alg_constant = {<EOL>'<STR_LIT>': BcryptConst.BCRYPT_ECDSA_P256_ALGORITHM,<EOL>'<STR_LIT>': BcryptConst.BCRYPT_ECDSA_P384_ALGORITHM,<EOL>'<STR_LIT>': BcryptConst.BCRYPT_ECDSA_P521_ALGORITHM,<EOL>}[curve]<EOL>bit_size = {<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>}[curve]<EOL>struct_type = '<STR_LIT>'<EOL>private_blob_type = BcryptConst.BCRYPT_ECCPRIVATE_BLOB<EOL>public_blob_type = BcryptConst.BCRYPT_ECCPUBLIC_BLOB<EOL><DEDENT>alg_handle = open_alg_handle(alg_constant)<EOL>key_handle_pointer = new(bcrypt, '<STR_LIT>')<EOL>res = bcrypt.BCryptGenerateKeyPair(alg_handle, key_handle_pointer, bit_size, <NUM_LIT:0>)<EOL>handle_error(res)<EOL>key_handle = unwrap(key_handle_pointer)<EOL>res = bcrypt.BCryptFinalizeKeyPair(key_handle, <NUM_LIT:0>)<EOL>handle_error(res)<EOL>private_out_len = new(bcrypt, '<STR_LIT>')<EOL>res = bcrypt.BCryptExportKey(key_handle, null(), private_blob_type, null(), <NUM_LIT:0>, private_out_len, <NUM_LIT:0>)<EOL>handle_error(res)<EOL>private_buffer_length = deref(private_out_len)<EOL>private_buffer = buffer_from_bytes(private_buffer_length)<EOL>res = bcrypt.BCryptExportKey(<EOL>key_handle,<EOL>null(),<EOL>private_blob_type,<EOL>private_buffer,<EOL>private_buffer_length,<EOL>private_out_len,<EOL><NUM_LIT:0><EOL>)<EOL>handle_error(res)<EOL>private_blob_struct_pointer = struct_from_buffer(bcrypt, struct_type, private_buffer)<EOL>private_blob_struct = unwrap(private_blob_struct_pointer)<EOL>struct_size = sizeof(bcrypt, private_blob_struct)<EOL>private_blob = bytes_from_buffer(private_buffer, private_buffer_length)[struct_size:]<EOL>if algorithm == '<STR_LIT>':<EOL><INDENT>private_key = _bcrypt_interpret_rsa_key_blob('<STR_LIT>', private_blob_struct, private_blob)<EOL><DEDENT>elif algorithm == '<STR_LIT>':<EOL><INDENT>if bit_size > <NUM_LIT>:<EOL><INDENT>private_key = _bcrypt_interpret_dsa_key_blob('<STR_LIT>', <NUM_LIT:2>, private_blob_struct, private_blob)<EOL><DEDENT>else:<EOL><INDENT>private_key = _bcrypt_interpret_dsa_key_blob('<STR_LIT>', <NUM_LIT:1>, private_blob_struct, private_blob)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>private_key = _bcrypt_interpret_ec_key_blob('<STR_LIT>', private_blob_struct, private_blob)<EOL><DEDENT>public_out_len = new(bcrypt, '<STR_LIT>')<EOL>res = bcrypt.BCryptExportKey(key_handle, null(), public_blob_type, null(), <NUM_LIT:0>, public_out_len, <NUM_LIT:0>)<EOL>handle_error(res)<EOL>public_buffer_length = deref(public_out_len)<EOL>public_buffer = buffer_from_bytes(public_buffer_length)<EOL>res = bcrypt.BCryptExportKey(<EOL>key_handle,<EOL>null(),<EOL>public_blob_type,<EOL>public_buffer,<EOL>public_buffer_length,<EOL>public_out_len,<EOL><NUM_LIT:0><EOL>)<EOL>handle_error(res)<EOL>public_blob_struct_pointer = struct_from_buffer(bcrypt, struct_type, public_buffer)<EOL>public_blob_struct = unwrap(public_blob_struct_pointer)<EOL>struct_size = sizeof(bcrypt, public_blob_struct)<EOL>public_blob = bytes_from_buffer(public_buffer, public_buffer_length)[struct_size:]<EOL>if algorithm == '<STR_LIT>':<EOL><INDENT>public_key = _bcrypt_interpret_rsa_key_blob('<STR_LIT>', public_blob_struct, public_blob)<EOL><DEDENT>elif algorithm == '<STR_LIT>':<EOL><INDENT>if bit_size > <NUM_LIT>:<EOL><INDENT>public_key = _bcrypt_interpret_dsa_key_blob('<STR_LIT>', <NUM_LIT:2>, public_blob_struct, public_blob)<EOL><DEDENT>else:<EOL><INDENT>public_key = _bcrypt_interpret_dsa_key_blob('<STR_LIT>', <NUM_LIT:1>, public_blob_struct, public_blob)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>public_key = _bcrypt_interpret_ec_key_blob('<STR_LIT>', public_blob_struct, public_blob)<EOL><DEDENT>return (load_public_key(public_key), load_private_key(private_key))<EOL>", "docstring": "Generates a public/private key pair using CNG\n\n:param algorithm:\n    The key algorithm - \"rsa\", \"dsa\" or \"ec\"\n\n:param bit_size:\n    An integer - used for \"rsa\" and \"dsa\". For \"rsa\" the value maye be 1024,\n    2048, 3072 or 4096. For \"dsa\" the value may be 1024, plus 2048 or 3072\n    if on Windows 8 or newer.\n\n:param curve:\n    A unicode string - used for \"ec\" keys. Valid values include \"secp256r1\",\n    \"secp384r1\" and \"secp521r1\".\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A 2-element tuple of (PublicKey, PrivateKey). The contents of each key\n    may be saved by calling .asn1.dump().", "id": "f9489:m2"}
{"signature": "def __init__(self, key_handle, asn1):", "body": "PublicKey.__init__(self, key_handle, asn1)<EOL>", "docstring": ":param key_handle:\n    A CNG BCRYPT_KEY_HANDLE value (Vista and newer) or an HCRYPTKEY\n    (XP and 2003) from loading/importing the certificate\n\n:param asn1:\n    An asn1crypto.x509.Certificate object", "id": "f9489:c2:m0"}
{"signature": "def _verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(certificate_or_public_key)<EOL>))<EOL><DEDENT>if not isinstance(signature, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(signature)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>valid_hash_algorithms = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>if certificate_or_public_key.algorithm == '<STR_LIT>' and not rsa_pss_padding:<EOL><INDENT>valid_hash_algorithms |= set(['<STR_LIT>'])<EOL><DEDENT>if hash_algorithm not in valid_hash_algorithms:<EOL><INDENT>valid_hash_algorithms_error = '<STR_LIT>'<EOL>if certificate_or_public_key.algorithm == '<STR_LIT>' and not rsa_pss_padding:<EOL><INDENT>valid_hash_algorithms_error += '<STR_LIT>'<EOL><DEDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>valid_hash_algorithms_error,<EOL>repr(hash_algorithm)<EOL>))<EOL><DEDENT>if certificate_or_public_key.algorithm != '<STR_LIT>' and rsa_pss_padding is not False:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>certificate_or_public_key.algorithm.upper()<EOL>))<EOL><DEDENT>if hash_algorithm == '<STR_LIT>':<EOL><INDENT>if len(data) > certificate_or_public_key.byte_size - <NUM_LIT:11>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>certificate_or_public_key.byte_size,<EOL>len(data)<EOL>))<EOL><DEDENT><DEDENT>if _backend == '<STR_LIT>':<EOL><INDENT>if certificate_or_public_key.algorithm == '<STR_LIT>':<EOL><INDENT>return _pure_python_ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm)<EOL><DEDENT>return _advapi32_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding)<EOL><DEDENT>return _bcrypt_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding)<EOL>", "docstring": "Verifies an RSA, DSA or ECDSA signature\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n    If PSS padding should be used for RSA keys\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9489:m22"}
{"signature": "def _bcrypt_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):", "body": "flags = BcryptConst.BCRYPT_PAD_PKCS1<EOL>if rsa_oaep_padding is True:<EOL><INDENT>flags = BcryptConst.BCRYPT_PAD_OAEP<EOL>padding_info_struct_pointer = struct(bcrypt, '<STR_LIT>')<EOL>padding_info_struct = unwrap(padding_info_struct_pointer)<EOL>hash_buffer = buffer_from_unicode(BcryptConst.BCRYPT_SHA1_ALGORITHM)<EOL>padding_info_struct.pszAlgId = cast(bcrypt, '<STR_LIT>', hash_buffer)<EOL>padding_info_struct.pbLabel = null()<EOL>padding_info_struct.cbLabel = <NUM_LIT:0><EOL>padding_info = cast(bcrypt, '<STR_LIT>', padding_info_struct_pointer)<EOL><DEDENT>else:<EOL><INDENT>padding_info = null()<EOL><DEDENT>out_len = new(bcrypt, '<STR_LIT>')<EOL>res = bcrypt.BCryptEncrypt(<EOL>certificate_or_public_key.key_handle,<EOL>data,<EOL>len(data),<EOL>padding_info,<EOL>null(),<EOL><NUM_LIT:0>,<EOL>null(),<EOL><NUM_LIT:0>,<EOL>out_len,<EOL>flags<EOL>)<EOL>handle_error(res)<EOL>buffer_len = deref(out_len)<EOL>buffer = buffer_from_bytes(buffer_len)<EOL>res = bcrypt.BCryptEncrypt(<EOL>certificate_or_public_key.key_handle,<EOL>data,<EOL>len(data),<EOL>padding_info,<EOL>null(),<EOL><NUM_LIT:0>,<EOL>buffer,<EOL>buffer_len,<EOL>out_len,<EOL>flags<EOL>)<EOL>handle_error(res)<EOL>return bytes_from_buffer(buffer, deref(out_len))<EOL>", "docstring": "Encrypts a value using an RSA public key via CNG\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to encrypt with\n\n:param data:\n    A byte string of the data to encrypt\n\n:param rsa_oaep_padding:\n    If OAEP padding should be used instead of PKCS#1 v1.5\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the ciphertext", "id": "f9489:m34"}
{"signature": "def dsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _sign(private_key, data, hash_algorithm)<EOL>", "docstring": "Generates a DSA signature\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9489:m27"}
{"signature": "def _advapi32_load_key(key_object, key_info, container):", "body": "key_type = '<STR_LIT>' if isinstance(key_info, keys.PublicKeyInfo) else '<STR_LIT>'<EOL>algo = key_info.algorithm<EOL>if algo == '<STR_LIT>':<EOL><INDENT>provider = Advapi32Const.MS_ENH_RSA_AES_PROV<EOL><DEDENT>else:<EOL><INDENT>provider = Advapi32Const.MS_ENH_DSS_DH_PROV<EOL><DEDENT>context_handle = None<EOL>key_handle = None<EOL>try:<EOL><INDENT>context_handle = open_context_handle(provider, verify_only=key_type == '<STR_LIT>')<EOL>blob = _advapi32_create_blob(key_info, key_type, algo)<EOL>buffer_ = buffer_from_bytes(blob)<EOL>key_handle_pointer = new(advapi32, '<STR_LIT>')<EOL>res = advapi32.CryptImportKey(<EOL>context_handle,<EOL>buffer_,<EOL>len(blob),<EOL>null(),<EOL><NUM_LIT:0>,<EOL>key_handle_pointer<EOL>)<EOL>handle_error(res)<EOL>key_handle = unwrap(key_handle_pointer)<EOL>output = container(key_handle, key_object)<EOL>output.context_handle = context_handle<EOL>if algo == '<STR_LIT>':<EOL><INDENT>ex_blob = _advapi32_create_blob(key_info, key_type, algo, signing=False)<EOL>ex_buffer = buffer_from_bytes(ex_blob)<EOL>ex_key_handle_pointer = new(advapi32, '<STR_LIT>')<EOL>res = advapi32.CryptImportKey(<EOL>context_handle,<EOL>ex_buffer,<EOL>len(ex_blob),<EOL>null(),<EOL><NUM_LIT:0>,<EOL>ex_key_handle_pointer<EOL>)<EOL>handle_error(res)<EOL>output.ex_key_handle = unwrap(ex_key_handle_pointer)<EOL><DEDENT>return output<EOL><DEDENT>except (Exception):<EOL><INDENT>if key_handle:<EOL><INDENT>advapi32.CryptDestroyKey(key_handle)<EOL><DEDENT>if context_handle:<EOL><INDENT>close_context_handle(context_handle)<EOL><DEDENT>raise<EOL><DEDENT>", "docstring": "Loads a certificate, public key or private key into a Certificate,\nPublicKey or PrivateKey object via CryptoAPI\n\n:param key_object:\n    An asn1crypto.x509.Certificate, asn1crypto.keys.PublicKeyInfo or\n    asn1crypto.keys.PrivateKeyInfo object\n\n:param key_info:\n    An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo\n    object\n\n:param container:\n    The class of the object to hold the key_handle\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A PrivateKey, PublicKey or Certificate object, based on container", "id": "f9489:m12"}
{"signature": "def ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _verify(certificate_or_public_key, signature, data, hash_algorithm)<EOL>", "docstring": "Verifies an ECDSA signature\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9489:m21"}
{"signature": "@property<EOL><INDENT>def self_signed(self):<DEDENT>", "body": "if self._self_signed is None:<EOL><INDENT>self._self_signed = False<EOL>if self.asn1.self_signed in set(['<STR_LIT:yes>', '<STR_LIT>']):<EOL><INDENT>signature_algo = self.asn1['<STR_LIT>'].signature_algo<EOL>hash_algo = self.asn1['<STR_LIT>'].hash_algo<EOL>if signature_algo == '<STR_LIT>':<EOL><INDENT>verify_func = rsa_pkcs1v15_verify<EOL><DEDENT>elif signature_algo == '<STR_LIT>':<EOL><INDENT>verify_func = dsa_verify<EOL><DEDENT>elif signature_algo == '<STR_LIT>':<EOL><INDENT>verify_func = ecdsa_verify<EOL><DEDENT>else:<EOL><INDENT>raise OSError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>signature_algo<EOL>))<EOL><DEDENT>try:<EOL><INDENT>verify_func(<EOL>self,<EOL>self.asn1['<STR_LIT>'].native,<EOL>self.asn1['<STR_LIT>'].dump(),<EOL>hash_algo<EOL>)<EOL>self._self_signed = True<EOL><DEDENT>except (SignatureError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>return self._self_signed<EOL>", "docstring": ":return:\n    A boolean - if the certificate is self-signed", "id": "f9489:c2:m5"}
{"signature": "@property<EOL><INDENT>def byte_size(self):<DEDENT>", "body": "return self.asn1.byte_size<EOL>", "docstring": ":return:\n    The number of bytes in the key, as an integer", "id": "f9489:c0:m4"}
{"signature": "@property<EOL><INDENT>def bit_size(self):<DEDENT>", "body": "return self.asn1.public_key.bit_size<EOL>", "docstring": ":return:\n    The number of bits in the key, as an integer", "id": "f9489:c2:m3"}
{"signature": "def _bcrypt_interpret_dsa_key_blob(key_type, version, blob_struct, blob):", "body": "key_byte_length = native(int, blob_struct.cbKey)<EOL>if version == <NUM_LIT:1>:<EOL><INDENT>q = int_from_bytes(native(byte_cls, blob_struct.q))<EOL>g_offset = key_byte_length<EOL>public_offset = g_offset + key_byte_length<EOL>private_offset = public_offset + key_byte_length<EOL>p = int_from_bytes(blob[<NUM_LIT:0>:g_offset])<EOL>g = int_from_bytes(blob[g_offset:public_offset])<EOL><DEDENT>elif version == <NUM_LIT:2>:<EOL><INDENT>seed_byte_length = native(int, blob_struct.cbSeedLength)<EOL>group_byte_length = native(int, blob_struct.cbGroupSize)<EOL>q_offset = seed_byte_length<EOL>p_offset = q_offset + group_byte_length<EOL>g_offset = p_offset + key_byte_length<EOL>public_offset = g_offset + key_byte_length<EOL>private_offset = public_offset + key_byte_length<EOL>q = int_from_bytes(blob[q_offset:p_offset])<EOL>p = int_from_bytes(blob[p_offset:g_offset])<EOL>g = int_from_bytes(blob[g_offset:public_offset])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % repr(version))<EOL><DEDENT>if key_type == '<STR_LIT>':<EOL><INDENT>public = int_from_bytes(blob[public_offset:private_offset])<EOL>return keys.PublicKeyInfo({<EOL>'<STR_LIT>': keys.PublicKeyAlgorithm({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': keys.DSAParams({<EOL>'<STR_LIT:p>': p,<EOL>'<STR_LIT:q>': q,<EOL>'<STR_LIT:g>': g,<EOL>})<EOL>}),<EOL>'<STR_LIT>': core.Integer(public),<EOL>})<EOL><DEDENT>elif key_type == '<STR_LIT>':<EOL><INDENT>private = int_from_bytes(blob[private_offset:private_offset + key_byte_length])<EOL>return keys.PrivateKeyInfo({<EOL>'<STR_LIT:version>': <NUM_LIT:0>,<EOL>'<STR_LIT>': keys.PrivateKeyAlgorithm({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': keys.DSAParams({<EOL>'<STR_LIT:p>': p,<EOL>'<STR_LIT:q>': q,<EOL>'<STR_LIT:g>': g,<EOL>})<EOL>}),<EOL>'<STR_LIT>': core.Integer(private),<EOL>})<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(key_type)<EOL>))<EOL><DEDENT>", "docstring": "Take a CNG BCRYPT_DSA_KEY_BLOB or BCRYPT_DSA_KEY_BLOB_V2 and converts it\ninto an ASN.1 structure\n\n:param key_type:\n    A unicode string of \"private\" or \"public\"\n\n:param version:\n    An integer - 1 or 2, indicating the blob is BCRYPT_DSA_KEY_BLOB or\n    BCRYPT_DSA_KEY_BLOB_V2\n\n:param blob_struct:\n    An instance of BCRYPT_DSA_KEY_BLOB or BCRYPT_DSA_KEY_BLOB_V2\n\n:param blob:\n    A byte string of the binary data contained after the struct\n\n:return:\n    An asn1crypto.keys.PrivateKeyInfo or asn1crypto.keys.PublicKeyInfo\n    object, based on the key_type param", "id": "f9489:m8"}
{"signature": "def __init__(self, key_handle, asn1):", "body": "PrivateKey.__init__(self, key_handle, asn1)<EOL>", "docstring": ":param key_handle:\n    A CNG BCRYPT_KEY_HANDLE value (Vista and newer) or an HCRYPTKEY\n    (XP and 2003) from loading/importing the key\n\n:param asn1:\n    An asn1crypto.keys.PublicKeyInfo object", "id": "f9489:c1:m0"}
{"signature": "def _advapi32_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):", "body": "algo = certificate_or_public_key.algorithm<EOL>if algo == '<STR_LIT>' and rsa_pss_padding:<EOL><INDENT>hash_length = {<EOL>'<STR_LIT>': <NUM_LIT:20>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:32>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:64><EOL>}.get(hash_algorithm, <NUM_LIT:0>)<EOL>decrypted_signature = raw_rsa_public_crypt(certificate_or_public_key, signature)<EOL>key_size = certificate_or_public_key.bit_size<EOL>if not verify_pss_padding(hash_algorithm, hash_length, key_size, data, decrypted_signature):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>return<EOL><DEDENT>if algo == '<STR_LIT>' and hash_algorithm == '<STR_LIT>':<EOL><INDENT>padded_plaintext = raw_rsa_public_crypt(certificate_or_public_key, signature)<EOL>try:<EOL><INDENT>plaintext = remove_pkcs1v15_signature_padding(certificate_or_public_key.byte_size, padded_plaintext)<EOL>if not constant_compare(plaintext, data):<EOL><INDENT>raise ValueError()<EOL><DEDENT><DEDENT>except (ValueError):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>return<EOL><DEDENT>hash_handle = None<EOL>try:<EOL><INDENT>alg_id = {<EOL>'<STR_LIT>': Advapi32Const.CALG_MD5,<EOL>'<STR_LIT>': Advapi32Const.CALG_SHA1,<EOL>'<STR_LIT>': Advapi32Const.CALG_SHA_256,<EOL>'<STR_LIT>': Advapi32Const.CALG_SHA_384,<EOL>'<STR_LIT>': Advapi32Const.CALG_SHA_512,<EOL>}[hash_algorithm]<EOL>hash_handle_pointer = new(advapi32, '<STR_LIT>')<EOL>res = advapi32.CryptCreateHash(<EOL>certificate_or_public_key.context_handle,<EOL>alg_id,<EOL>null(),<EOL><NUM_LIT:0>,<EOL>hash_handle_pointer<EOL>)<EOL>handle_error(res)<EOL>hash_handle = unwrap(hash_handle_pointer)<EOL>res = advapi32.CryptHashData(hash_handle, data, len(data), <NUM_LIT:0>)<EOL>handle_error(res)<EOL>if algo == '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>signature = algos.DSASignature.load(signature).to_p1363()<EOL>half_len = len(signature) // <NUM_LIT:2><EOL>signature = signature[half_len:] + signature[:half_len]<EOL><DEDENT>except (ValueError, OverflowError, TypeError):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT><DEDENT>reversed_signature = signature[::-<NUM_LIT:1>]<EOL>res = advapi32.CryptVerifySignatureW(<EOL>hash_handle,<EOL>reversed_signature,<EOL>len(signature),<EOL>certificate_or_public_key.key_handle,<EOL>null(),<EOL><NUM_LIT:0><EOL>)<EOL>handle_error(res)<EOL><DEDENT>finally:<EOL><INDENT>if hash_handle:<EOL><INDENT>advapi32.CryptDestroyHash(hash_handle)<EOL><DEDENT><DEDENT>", "docstring": "Verifies an RSA, DSA or ECDSA signature via CryptoAPI\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n\n:param rsa_pss_padding:\n    If PSS padding should be used for RSA keys\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9489:m23"}
{"signature": "def _advapi32_interpret_dsa_key_blob(bit_size, public_blob, private_blob):", "body": "len1 = <NUM_LIT:20><EOL>len2 = bit_size // <NUM_LIT:8><EOL>q_offset = len2<EOL>g_offset = q_offset + len1<EOL>x_offset = g_offset + len2<EOL>y_offset = x_offset<EOL>p = int_from_bytes(private_blob[<NUM_LIT:0>:q_offset][::-<NUM_LIT:1>])<EOL>q = int_from_bytes(private_blob[q_offset:g_offset][::-<NUM_LIT:1>])<EOL>g = int_from_bytes(private_blob[g_offset:x_offset][::-<NUM_LIT:1>])<EOL>x = int_from_bytes(private_blob[x_offset:x_offset + len1][::-<NUM_LIT:1>])<EOL>y = int_from_bytes(public_blob[y_offset:y_offset + len2][::-<NUM_LIT:1>])<EOL>public_key_info = keys.PublicKeyInfo({<EOL>'<STR_LIT>': keys.PublicKeyAlgorithm({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': keys.DSAParams({<EOL>'<STR_LIT:p>': p,<EOL>'<STR_LIT:q>': q,<EOL>'<STR_LIT:g>': g,<EOL>})<EOL>}),<EOL>'<STR_LIT>': core.Integer(y),<EOL>})<EOL>private_key_info = keys.PrivateKeyInfo({<EOL>'<STR_LIT:version>': <NUM_LIT:0>,<EOL>'<STR_LIT>': keys.PrivateKeyAlgorithm({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': keys.DSAParams({<EOL>'<STR_LIT:p>': p,<EOL>'<STR_LIT:q>': q,<EOL>'<STR_LIT:g>': g,<EOL>})<EOL>}),<EOL>'<STR_LIT>': core.Integer(x),<EOL>})<EOL>return (public_key_info, private_key_info)<EOL>", "docstring": "Takes a CryptoAPI DSS private key blob and converts it into the ASN.1\nstructures for the public and private keys\n\n:param bit_size:\n    The integer bit size of the key\n\n:param public_blob:\n    A byte string of the binary data after the public key header\n\n:param private_blob:\n    A byte string of the binary data after the private key header\n\n:return:\n    A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,\n    asn1crypto.keys.PrivateKeyInfo)", "id": "f9489:m6"}
{"signature": "def handle_error(result):", "body": "if result:<EOL><INDENT>return<EOL><DEDENT>_, error_string = get_error()<EOL>if not isinstance(error_string, str_cls):<EOL><INDENT>error_string = _try_decode(error_string)<EOL><DEDENT>raise OSError(error_string)<EOL>", "docstring": "Extracts the last Windows error message into a python unicode string\n\n:param result:\n    A function result, 0 or None indicates failure\n\n:return:\n    A unicode string error message", "id": "f9492:m0"}
{"signature": "def des_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>return _decrypt('<STR_LIT>', key, data, iv, True)<EOL>", "docstring": "Decrypts DES ciphertext using a 56 bit key\n\n:param key:\n    The encryption key - a byte string 8 bytes long (includes error\n    correction bits)\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector used for encryption - a byte string\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9494:m11"}
{"signature": "def _bcrypt_decrypt(cipher, key, data, iv, padding):", "body": "key_handle = None<EOL>try:<EOL><INDENT>key_handle = _bcrypt_create_key_handle(cipher, key)<EOL>if iv is None:<EOL><INDENT>iv_len = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>iv_len = len(iv)<EOL><DEDENT>flags = <NUM_LIT:0><EOL>if padding is True:<EOL><INDENT>flags = BcryptConst.BCRYPT_BLOCK_PADDING<EOL><DEDENT>out_len = new(bcrypt, '<STR_LIT>')<EOL>res = bcrypt.BCryptDecrypt(<EOL>key_handle,<EOL>data,<EOL>len(data),<EOL>null(),<EOL>null(),<EOL><NUM_LIT:0>,<EOL>null(),<EOL><NUM_LIT:0>,<EOL>out_len,<EOL>flags<EOL>)<EOL>handle_error(res)<EOL>buffer_len = deref(out_len)<EOL>buffer = buffer_from_bytes(buffer_len)<EOL>iv_buffer = buffer_from_bytes(iv) if iv else null()<EOL>res = bcrypt.BCryptDecrypt(<EOL>key_handle,<EOL>data,<EOL>len(data),<EOL>null(),<EOL>iv_buffer,<EOL>iv_len,<EOL>buffer,<EOL>buffer_len,<EOL>out_len,<EOL>flags<EOL>)<EOL>handle_error(res)<EOL>return bytes_from_buffer(buffer, deref(out_len))<EOL><DEDENT>finally:<EOL><INDENT>if key_handle:<EOL><INDENT>bcrypt.BCryptDestroyKey(key_handle)<EOL><DEDENT><DEDENT>", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext via CNG\n\n:param cipher:\n    A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n    \"rc2\", \"rc4\"\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector - a byte string - unused for RC4\n\n:param padding:\n    Boolean, if padding should be used - unused for RC4\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9494:m19"}
{"signature": "def tripledes_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != <NUM_LIT:16> and len(key) != <NUM_LIT>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if not iv:<EOL><INDENT>iv = rand_bytes(<NUM_LIT:8>)<EOL><DEDENT>elif len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>cipher = '<STR_LIT>'<EOL>if len(key) == <NUM_LIT:16>:<EOL><INDENT>cipher = '<STR_LIT>'<EOL><DEDENT>return (iv, _encrypt(cipher, key, data, iv, True))<EOL>", "docstring": "Encrypts plaintext using 3DES in either 2 or 3 key mode\n\n:param key:\n    The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The 8-byte initialization vector to use - a byte string - set as None\n    to generate an appropriate one\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A tuple of two byte strings (iv, ciphertext)", "id": "f9494:m8"}
{"signature": "def aes_cbc_no_padding_decrypt(key, data, iv):", "body": "if len(key) not in [<NUM_LIT:16>, <NUM_LIT>, <NUM_LIT:32>]:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if len(iv) != <NUM_LIT:16>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>return _decrypt('<STR_LIT>', key, data, iv, False)<EOL>", "docstring": "Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no\npadding.\n\n:param key:\n    The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector - a byte string 16-bytes long\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9494:m1"}
{"signature": "def _decrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if cipher != '<STR_LIT>' and not isinstance(iv, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(iv)<EOL>))<EOL><DEDENT>if cipher != '<STR_LIT>' and padding is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if _backend == '<STR_LIT>':<EOL><INDENT>return _advapi32_decrypt(cipher, key, data, iv, padding)<EOL><DEDENT>return _bcrypt_decrypt(cipher, key, data, iv, padding)<EOL>", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext\n\n:param cipher:\n    A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n    \"rc2\", \"rc4\"\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector - a byte string - unused for RC4\n\n:param padding:\n    Boolean, if padding should be used - unused for RC4\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9494:m17"}
{"signature": "def _advapi32_decrypt(cipher, key, data, iv, padding):", "body": "context_handle = None<EOL>key_handle = None<EOL>try:<EOL><INDENT>context_handle, key_handle = _advapi32_create_handles(cipher, key, iv)<EOL>if cipher == '<STR_LIT>' and not padding:<EOL><INDENT>data += (b'<STR_LIT>' * <NUM_LIT:16>)<EOL><DEDENT>buffer = buffer_from_bytes(data)<EOL>out_len = new(advapi32, '<STR_LIT>', len(data))<EOL>res = advapi32.CryptDecrypt(<EOL>key_handle,<EOL>null(),<EOL>True,<EOL><NUM_LIT:0>,<EOL>buffer,<EOL>out_len<EOL>)<EOL>handle_error(res)<EOL>return bytes_from_buffer(buffer, deref(out_len))<EOL><DEDENT>finally:<EOL><INDENT>if key_handle:<EOL><INDENT>advapi32.CryptDestroyKey(key_handle)<EOL><DEDENT>if context_handle:<EOL><INDENT>close_context_handle(context_handle)<EOL><DEDENT><DEDENT>", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext via CryptoAPI\n\n:param cipher:\n    A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n    \"rc2\", \"rc4\"\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector - a byte string - unused for RC4\n\n:param padding:\n    Boolean, if padding should be used - unused for RC4\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9494:m18"}
{"signature": "def rc4_decrypt(key, data):", "body": "if len(key) < <NUM_LIT:5> or len(key) > <NUM_LIT:16>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>return _decrypt('<STR_LIT>', key, data, None, None)<EOL>", "docstring": "Decrypts RC4 ciphertext using a 40-128 bit key\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9494:m5"}
{"signature": "def _advapi32_encrypt(cipher, key, data, iv, padding):", "body": "context_handle = None<EOL>key_handle = None<EOL>try:<EOL><INDENT>context_handle, key_handle = _advapi32_create_handles(cipher, key, iv)<EOL>out_len = new(advapi32, '<STR_LIT>', len(data))<EOL>res = advapi32.CryptEncrypt(<EOL>key_handle,<EOL>null(),<EOL>True,<EOL><NUM_LIT:0>,<EOL>null(),<EOL>out_len,<EOL><NUM_LIT:0><EOL>)<EOL>handle_error(res)<EOL>buffer_len = deref(out_len)<EOL>buffer = buffer_from_bytes(buffer_len)<EOL>write_to_buffer(buffer, data)<EOL>pointer_set(out_len, len(data))<EOL>res = advapi32.CryptEncrypt(<EOL>key_handle,<EOL>null(),<EOL>True,<EOL><NUM_LIT:0>,<EOL>buffer,<EOL>out_len,<EOL>buffer_len<EOL>)<EOL>handle_error(res)<EOL>output = bytes_from_buffer(buffer, deref(out_len))<EOL>if cipher == '<STR_LIT>' and not padding:<EOL><INDENT>if output[-<NUM_LIT:16>:] != (b'<STR_LIT>' * <NUM_LIT:16>):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>output = output[:-<NUM_LIT:16>]<EOL><DEDENT>return output<EOL><DEDENT>finally:<EOL><INDENT>if key_handle:<EOL><INDENT>advapi32.CryptDestroyKey(key_handle)<EOL><DEDENT>if context_handle:<EOL><INDENT>close_context_handle(context_handle)<EOL><DEDENT><DEDENT>", "docstring": "Encrypts plaintext via CryptoAPI\n\n:param cipher:\n    A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n    \"rc2\", \"rc4\"\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The initialization vector - a byte string - unused for RC4\n\n:param padding:\n    Boolean, if padding should be used - unused for RC4\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the ciphertext", "id": "f9494:m15"}
{"signature": "def _encrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if cipher != '<STR_LIT>' and not isinstance(iv, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(iv)<EOL>))<EOL><DEDENT>if cipher != '<STR_LIT>' and not padding:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if _backend == '<STR_LIT>':<EOL><INDENT>return _advapi32_encrypt(cipher, key, data, iv, padding)<EOL><DEDENT>return _bcrypt_encrypt(cipher, key, data, iv, padding)<EOL>", "docstring": "Encrypts plaintext\n\n:param cipher:\n    A unicode string of \"aes\", \"des\", \"tripledes_2key\", \"tripledes_3key\",\n    \"rc2\", \"rc4\"\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The initialization vector - a byte string - unused for RC4\n\n:param padding:\n    Boolean, if padding should be used - unused for RC4\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the ciphertext", "id": "f9494:m14"}
{"signature": "def des_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if not iv:<EOL><INDENT>iv = rand_bytes(<NUM_LIT:8>)<EOL><DEDENT>elif len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>return (iv, _encrypt('<STR_LIT>', key, data, iv, True))<EOL>", "docstring": "Encrypts plaintext using DES with a 56 bit key\n\n:param key:\n    The encryption key - a byte string 8 bytes long (includes error\n    correction bits)\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The 8-byte initialization vector to use - a byte string - set as None\n    to generate an appropriate one\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A tuple of two byte strings (iv, ciphertext)", "id": "f9494:m10"}
{"signature": "def aes_cbc_pkcs7_decrypt(key, data, iv):", "body": "if len(key) not in [<NUM_LIT:16>, <NUM_LIT>, <NUM_LIT:32>]:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if len(iv) != <NUM_LIT:16>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>return _decrypt('<STR_LIT>', key, data, iv, True)<EOL>", "docstring": "Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key\n\n:param key:\n    The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector - a byte string 16-bytes long\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9494:m3"}
{"signature": "def rc2_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) < <NUM_LIT:5> or len(key) > <NUM_LIT:16>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>return _decrypt('<STR_LIT>', key, data, iv, True)<EOL>", "docstring": "Decrypts RC2 ciphertext using a 64 bit key\n\n:param key:\n    The encryption key - a byte string 8 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector used for encryption - a byte string\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9494:m7"}
{"signature": "def parse_pkcs12(data, password=None):", "body": "if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if password is not None:<EOL><INDENT>if not isinstance(password, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(password)<EOL>))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>password = b'<STR_LIT>'<EOL><DEDENT>certs = {}<EOL>private_keys = {}<EOL>pfx = pkcs12.Pfx.load(data)<EOL>auth_safe = pfx['<STR_LIT>']<EOL>if auth_safe['<STR_LIT>'].native != '<STR_LIT:data>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>authenticated_safe = pfx.authenticated_safe<EOL>mac_data = pfx['<STR_LIT>']<EOL>if mac_data:<EOL><INDENT>mac_algo = mac_data['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'].native<EOL>key_length = {<EOL>'<STR_LIT>': <NUM_LIT:20>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:32>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:64>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:32>,<EOL>}[mac_algo]<EOL>mac_key = pkcs12_kdf(<EOL>mac_algo,<EOL>password,<EOL>mac_data['<STR_LIT>'].native,<EOL>mac_data['<STR_LIT>'].native,<EOL>key_length,<EOL><NUM_LIT:3>  <EOL>)<EOL>hash_mod = getattr(hashlib, mac_algo)<EOL>computed_hmac = hmac.new(mac_key, auth_safe['<STR_LIT:content>'].contents, hash_mod).digest()<EOL>stored_hmac = mac_data['<STR_LIT>']['<STR_LIT>'].native<EOL>if not constant_compare(computed_hmac, stored_hmac):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>for content_info in authenticated_safe:<EOL><INDENT>content = content_info['<STR_LIT:content>']<EOL>if isinstance(content, core.OctetString):<EOL><INDENT>_parse_safe_contents(content.native, certs, private_keys, password)<EOL><DEDENT>elif isinstance(content, cms.EncryptedData):<EOL><INDENT>encrypted_content_info = content['<STR_LIT>']<EOL>encryption_algorithm_info = encrypted_content_info['<STR_LIT>']<EOL>encrypted_content = encrypted_content_info['<STR_LIT>'].native<EOL>decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)<EOL>_parse_safe_contents(decrypted_content, certs, private_keys, password)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT><DEDENT>key_fingerprints = set(private_keys.keys())<EOL>cert_fingerprints = set(certs.keys())<EOL>common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))<EOL>key = None<EOL>cert = None<EOL>other_certs = []<EOL>if len(common_fingerprints) >= <NUM_LIT:1>:<EOL><INDENT>fingerprint = common_fingerprints[<NUM_LIT:0>]<EOL>key = private_keys[fingerprint]<EOL>cert = certs[fingerprint]<EOL>other_certs = [certs[f] for f in certs if f != fingerprint]<EOL>return (key, cert, other_certs)<EOL><DEDENT>if len(private_keys) > <NUM_LIT:0>:<EOL><INDENT>first_key = sorted(list(private_keys.keys()))[<NUM_LIT:0>]<EOL>key = private_keys[first_key]<EOL><DEDENT>if len(certs) > <NUM_LIT:0>:<EOL><INDENT>first_key = sorted(list(certs.keys()))[<NUM_LIT:0>]<EOL>cert = certs[first_key]<EOL>del certs[first_key]<EOL><DEDENT>if len(certs) > <NUM_LIT:0>:<EOL><INDENT>other_certs = sorted(list(certs.values()))<EOL><DEDENT>return (key, cert, other_certs)<EOL>", "docstring": "Parses a PKCS#12 ANS.1 DER-encoded structure and extracts certs and keys\n\n:param data:\n    A byte string of a DER-encoded PKCS#12 file\n\n:param password:\n    A byte string of the password to any encrypted data\n\n:raises:\n    ValueError - when any of the parameters are of the wrong type or value\n    OSError - when an error is returned by one of the OS decryption functions\n\n:return:\n    A three-element tuple of:\n     1. An asn1crypto.keys.PrivateKeyInfo object\n     2. An asn1crypto.x509.Certificate object\n     3. A list of zero or more asn1crypto.x509.Certificate objects that are\n        \"extra\" certificates, possibly intermediates from the cert chain", "id": "f9495:m5"}
{"signature": "def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password):", "body": "decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher]<EOL>if encryption_algorithm_info.kdf == '<STR_LIT>':<EOL><INDENT>if encryption_algorithm_info.encryption_cipher == '<STR_LIT>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>enc_key = pbkdf2(<EOL>encryption_algorithm_info.kdf_hmac,<EOL>password,<EOL>encryption_algorithm_info.kdf_salt,<EOL>encryption_algorithm_info.kdf_iterations,<EOL>encryption_algorithm_info.key_length<EOL>)<EOL>enc_iv = encryption_algorithm_info.encryption_iv<EOL>plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)<EOL><DEDENT>elif encryption_algorithm_info.kdf == '<STR_LIT>':<EOL><INDENT>derived_output = pbkdf1(<EOL>encryption_algorithm_info.kdf_hmac,<EOL>password,<EOL>encryption_algorithm_info.kdf_salt,<EOL>encryption_algorithm_info.kdf_iterations,<EOL>encryption_algorithm_info.key_length + <NUM_LIT:8><EOL>)<EOL>enc_key = derived_output[<NUM_LIT:0>:<NUM_LIT:8>]<EOL>enc_iv = derived_output[<NUM_LIT:8>:<NUM_LIT:16>]<EOL>plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)<EOL><DEDENT>elif encryption_algorithm_info.kdf == '<STR_LIT>':<EOL><INDENT>enc_key = pkcs12_kdf(<EOL>encryption_algorithm_info.kdf_hmac,<EOL>password,<EOL>encryption_algorithm_info.kdf_salt,<EOL>encryption_algorithm_info.kdf_iterations,<EOL>encryption_algorithm_info.key_length,<EOL><NUM_LIT:1>  <EOL>)<EOL>if encryption_algorithm_info.encryption_cipher == '<STR_LIT>':<EOL><INDENT>plaintext = decrypt_func(enc_key, encrypted_content)<EOL><DEDENT>else:<EOL><INDENT>enc_iv = pkcs12_kdf(<EOL>encryption_algorithm_info.kdf_hmac,<EOL>password,<EOL>encryption_algorithm_info.kdf_salt,<EOL>encryption_algorithm_info.kdf_iterations,<EOL>encryption_algorithm_info.encryption_block_size,<EOL><NUM_LIT:2>   <EOL>)<EOL>plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)<EOL><DEDENT><DEDENT>return plaintext<EOL>", "docstring": "Decrypts encrypted ASN.1 data\n\n:param encryption_algorithm_info:\n    An instance of asn1crypto.pkcs5.Pkcs5EncryptionAlgorithm\n\n:param encrypted_content:\n    A byte string of the encrypted content\n\n:param password:\n    A byte string of the encrypted content's password\n\n:return:\n    A byte string of the decrypted plaintext", "id": "f9495:m7"}
{"signature": "def parse_private(data, password=None):", "body": "if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if password is not None:<EOL><INDENT>if not isinstance(password, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(password)<EOL>))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>password = b'<STR_LIT>'<EOL><DEDENT>if data[<NUM_LIT:0>:<NUM_LIT:5>] == b'<STR_LIT>':<EOL><INDENT>key_type, _, data = _unarmor_pem(data, password)<EOL>if key_type == '<STR_LIT>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>if key_type == '<STR_LIT>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>pki = keys.PrivateKeyInfo.load(data)<EOL>pki.native<EOL>return pki<EOL><DEDENT>except (ValueError):<EOL><INDENT>pass  <EOL><DEDENT>try:<EOL><INDENT>parsed_wrapper = keys.EncryptedPrivateKeyInfo.load(data)<EOL>encryption_algorithm_info = parsed_wrapper['<STR_LIT>']<EOL>encrypted_data = parsed_wrapper['<STR_LIT>'].native<EOL>decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)<EOL>pki = keys.PrivateKeyInfo.load(decrypted_data)<EOL>pki.native<EOL>return pki<EOL><DEDENT>except (ValueError):<EOL><INDENT>pass  <EOL><DEDENT>try:<EOL><INDENT>parsed = keys.RSAPrivateKey.load(data)<EOL>parsed.native<EOL>return keys.PrivateKeyInfo.wrap(parsed, '<STR_LIT>')<EOL><DEDENT>except (ValueError):<EOL><INDENT>pass  <EOL><DEDENT>try:<EOL><INDENT>parsed = keys.DSAPrivateKey.load(data)<EOL>parsed.native<EOL>return keys.PrivateKeyInfo.wrap(parsed, '<STR_LIT>')<EOL><DEDENT>except (ValueError):<EOL><INDENT>pass  <EOL><DEDENT>try:<EOL><INDENT>parsed = keys.ECPrivateKey.load(data)<EOL>parsed.native<EOL>return keys.PrivateKeyInfo.wrap(parsed, '<STR_LIT>')<EOL><DEDENT>except (ValueError):<EOL><INDENT>pass  <EOL><DEDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL>", "docstring": "Loads a private key from a DER or PEM-formatted file. Supports RSA, DSA and\nEC private keys. Works with the follow formats:\n\n - RSAPrivateKey (PKCS#1)\n - ECPrivateKey (SECG SEC1 V2)\n - DSAPrivateKey (OpenSSL)\n - PrivateKeyInfo (RSA/DSA/EC - PKCS#8)\n - EncryptedPrivateKeyInfo (RSA/DSA/EC - PKCS#8)\n - Encrypted RSAPrivateKey (PEM only, OpenSSL)\n - Encrypted DSAPrivateKey (PEM only, OpenSSL)\n - Encrypted ECPrivateKey (PEM only, OpenSSL)\n\n:param data:\n    A byte string to load the private key from\n\n:param password:\n    The password to unencrypt the private key\n\n:raises:\n    ValueError - when the data does not appear to contain a private key, or the password is invalid\n\n:return:\n    An asn1crypto.keys.PrivateKeyInfo object", "id": "f9495:m2"}
{"signature": "def _unarmor_pem(data, password=None):", "body": "object_type, headers, der_bytes = pem.unarmor(data)<EOL>type_regex = '<STR_LIT>'<EOL>armor_type = re.match(type_regex, object_type)<EOL>if not armor_type:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>pem_header = armor_type.group(<NUM_LIT:1>)<EOL>data = data.strip()<EOL>if pem_header in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>algo = armor_type.group(<NUM_LIT:2>).lower()<EOL>return ('<STR_LIT>', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))<EOL><DEDENT>key_type = pem_header.lower()<EOL>algo = None<EOL>if key_type == '<STR_LIT>':<EOL><INDENT>key_type = '<STR_LIT>'<EOL><DEDENT>elif key_type == '<STR_LIT>':<EOL><INDENT>key_type = '<STR_LIT>'<EOL>algo = '<STR_LIT>'<EOL><DEDENT>return (key_type, algo, der_bytes)<EOL>", "docstring": "Removes PEM-encoding from a public key, private key or certificate. If the\nprivate key is encrypted, the password will be used to decrypt it.\n\n:param data:\n    A byte string of the PEM-encoded data\n\n:param password:\n    A byte string of the encryption password, or None\n\n:return:\n    A 3-element tuple in the format: (key_type, algorithm, der_bytes). The\n    key_type will be a unicode string of \"public key\", \"private key\" or\n    \"certificate\". The algorithm will be a unicode string of \"rsa\", \"dsa\"\n    or \"ec\".", "id": "f9495:m3"}
{"signature": "def _cached_path_needs_update(ca_path, cache_length):", "body": "exists = os.path.exists(ca_path)<EOL>if not exists:<EOL><INDENT>return True<EOL><DEDENT>stats = os.stat(ca_path)<EOL>if stats.st_mtime < time.time() - cache_length * <NUM_LIT> * <NUM_LIT>:<EOL><INDENT>return True<EOL><DEDENT>if stats.st_size == <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Checks to see if a cache file needs to be refreshed\n\n:param ca_path:\n    A unicode string of the path to the cache file\n\n:param cache_length:\n    An integer representing the number of hours the cache is valid for\n\n:return:\n    A boolean - True if the cache needs to be updated, False if the file\n    is up-to-date", "id": "f9498:m5"}
{"signature": "def clear_cache(temp_dir=None):", "body": "with memory_lock:<EOL><INDENT>_module_values['<STR_LIT>'] = None<EOL>_module_values['<STR_LIT>'] = None<EOL><DEDENT>ca_path, temp = _ca_path(temp_dir)<EOL>if temp:<EOL><INDENT>with path_lock:<EOL><INDENT>if os.path.exists(ca_path):<EOL><INDENT>os.remove(ca_path)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Clears any cached info that was exported from the OS trust store. This will\nensure the latest changes are returned from calls to get_list() and\nget_path(), but at the expense of re-exporting and parsing all certificates.\n\n:param temp_dir:\n    The temporary directory to cache the CA certs in on OS X and Windows.\n    Needs to have secure permissions so other users can not modify the\n    contents. Must be the same value passed to get_path().", "id": "f9498:m2"}
{"signature": "def get_path(temp_dir=None, cache_length=<NUM_LIT>, cert_callback=None):", "body": "ca_path, temp = _ca_path(temp_dir)<EOL>if temp and _cached_path_needs_update(ca_path, cache_length):<EOL><INDENT>empty_set = set()<EOL>any_purpose = '<STR_LIT>'<EOL>apple_ssl = '<STR_LIT>'<EOL>win_server_auth = '<STR_LIT>'<EOL>with path_lock:<EOL><INDENT>if _cached_path_needs_update(ca_path, cache_length):<EOL><INDENT>with open(ca_path, '<STR_LIT:wb>') as f:<EOL><INDENT>for cert, trust_oids, reject_oids in extract_from_system(cert_callback, True):<EOL><INDENT>if sys.platform == '<STR_LIT>':<EOL><INDENT>if trust_oids != empty_set and any_purpose not in trust_oidsand apple_ssl not in trust_oids:<EOL><INDENT>if cert_callback:<EOL><INDENT>cert_callback(Certificate.load(cert), '<STR_LIT>')<EOL><DEDENT>continue<EOL><DEDENT>if reject_oids != empty_set and (apple_ssl in reject_oids<EOL>or any_purpose in reject_oids):<EOL><INDENT>if cert_callback:<EOL><INDENT>cert_callback(Certificate.load(cert), '<STR_LIT>')<EOL><DEDENT>continue<EOL><DEDENT><DEDENT>elif sys.platform == '<STR_LIT:win32>':<EOL><INDENT>if trust_oids != empty_set and any_purpose not in trust_oidsand win_server_auth not in trust_oids:<EOL><INDENT>if cert_callback:<EOL><INDENT>cert_callback(Certificate.load(cert), '<STR_LIT>')<EOL><DEDENT>continue<EOL><DEDENT>if reject_oids != empty_set and (win_server_auth in reject_oids<EOL>or any_purpose in reject_oids):<EOL><INDENT>if cert_callback:<EOL><INDENT>cert_callback(Certificate.load(cert), '<STR_LIT>')<EOL><DEDENT>continue<EOL><DEDENT><DEDENT>if cert_callback:<EOL><INDENT>cert_callback(Certificate.load(cert), None)<EOL><DEDENT>f.write(armor('<STR_LIT>', cert))<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if not ca_path:<EOL><INDENT>raise CACertsError('<STR_LIT>')<EOL><DEDENT>return ca_path<EOL>", "docstring": "Get the filesystem path to a file that contains OpenSSL-compatible CA certs.\n\nOn OS X and Windows, there are extracted from the system certificate store\nand cached in a file on the filesystem. This path should not be writable\nby other users, otherwise they could inject CA certs into the trust list.\n\n:param temp_dir:\n    The temporary directory to cache the CA certs in on OS X and Windows.\n    Needs to have secure permissions so other users can not modify the\n    contents.\n\n:param cache_length:\n    The number of hours to cache the CA certs on OS X and Windows\n\n:param cert_callback:\n    A callback that is called once for each certificate in the trust store.\n    It should accept two parameters: an asn1crypto.x509.Certificate object,\n    and a reason. The reason will be None if the certificate is being\n    exported, otherwise it will be a unicode string of the reason it won't.\n    This is only called on Windows and OS X when passed to this function.\n\n:raises:\n    oscrypto.errors.CACertsError - when an error occurs exporting/locating certs\n\n:return:\n    The full filesystem path to a CA certs file", "id": "f9498:m0"}
{"signature": "def _extract_error():", "body": "error_num = errno()<EOL>try:<EOL><INDENT>error_string = os.strerror(error_num)<EOL><DEDENT>except (ValueError):<EOL><INDENT>return str_cls(error_num)<EOL><DEDENT>if isinstance(error_string, str_cls):<EOL><INDENT>return error_string<EOL><DEDENT>return _try_decode(error_string)<EOL>", "docstring": "Extracts the last OS error message into a python unicode string\n\n:return:\n    A unicode string error message", "id": "f9502:m1"}
{"signature": "@staticmethod<EOL><INDENT>def cf_dictionary_from_pairs(pairs):<DEDENT>", "body": "length = len(pairs)<EOL>keys = []<EOL>values = []<EOL>for pair in pairs:<EOL><INDENT>key, value = pair<EOL>keys.append(key)<EOL>values.append(value)<EOL><DEDENT>keys = (CFStringRef * length)(*keys)<EOL>values = (CFTypeRef * length)(*values)<EOL>return CoreFoundation.CFDictionaryCreate(<EOL>CoreFoundation.kCFAllocatorDefault,<EOL>_cast_pointer_p(byref(keys)),<EOL>_cast_pointer_p(byref(values)),<EOL>length,<EOL>kCFTypeDictionaryKeyCallBacks,<EOL>kCFTypeDictionaryValueCallBacks<EOL>)<EOL>", "docstring": "Creates a CFDictionaryRef object from a list of 2-element tuples\nrepresenting the key and value. Each key should be a CFStringRef and each\nvalue some sort of CF* type.\n\n:param pairs:\n    A list of 2-element tuples\n\n:return:\n    A CFDictionaryRef", "id": "f9504:c0:m8"}
{"signature": "@classmethod<EOL><INDENT>def native(cls, value):<DEDENT>", "body": "type_id = CoreFoundation.CFGetTypeID(value)<EOL>if type_id in cls._native_map:<EOL><INDENT>return cls._native_map[type_id](value)<EOL><DEDENT>else:<EOL><INDENT>return value<EOL><DEDENT>", "docstring": "Converts a CF* object into its python equivalent\n\n:param value:\n    The CF* object to convert\n\n:return:\n    The native python object", "id": "f9504:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def cf_data_to_bytes(value):<DEDENT>", "body": "start = CoreFoundation.CFDataGetBytePtr(value)<EOL>num_bytes = CoreFoundation.CFDataGetLength(value)<EOL>return string_at(start, num_bytes)<EOL>", "docstring": "Extracts a bytestring from a CFData object\n\n:param value:\n    A CFData object\n\n:return:\n    A byte string", "id": "f9504:c0:m6"}
{"signature": "@staticmethod<EOL><INDENT>def cf_number_to_number(value):<DEDENT>", "body": "type_ = CoreFoundation.CFNumberGetType(_cast_pointer_p(value))<EOL>c_type = {<EOL><NUM_LIT:1>: c_byte,              <EOL><NUM_LIT:2>: ctypes.c_short,      <EOL><NUM_LIT:3>: ctypes.c_int32,      <EOL><NUM_LIT:4>: ctypes.c_int64,      <EOL><NUM_LIT:5>: ctypes.c_float,      <EOL><NUM_LIT:6>: ctypes.c_double,     <EOL><NUM_LIT:7>: c_byte,              <EOL><NUM_LIT:8>: ctypes.c_short,      <EOL><NUM_LIT:9>: ctypes.c_int,        <EOL><NUM_LIT:10>: c_long,             <EOL><NUM_LIT:11>: ctypes.c_longlong,  <EOL><NUM_LIT:12>: ctypes.c_float,     <EOL><NUM_LIT>: ctypes.c_double,    <EOL><NUM_LIT>: c_long,             <EOL><NUM_LIT:15>: ctypes.c_int,       <EOL><NUM_LIT:16>: ctypes.c_double,    <EOL>}[type_]<EOL>output = c_type(<NUM_LIT:0>)<EOL>CoreFoundation.CFNumberGetValue(_cast_pointer_p(value), type_, byref(output))<EOL>return output.value<EOL>", "docstring": "Converts a CFNumber object to a python float or integer\n\n:param value:\n    The CFNumber object\n\n:return:\n    A python number (float or integer)", "id": "f9504:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def cf_array_from_list(values):<DEDENT>", "body": "length = len(values)<EOL>values = (CFTypeRef * length)(*values)<EOL>return CoreFoundation.CFArrayCreate(<EOL>CoreFoundation.kCFAllocatorDefault,<EOL>_cast_pointer_p(byref(values)),<EOL>length,<EOL>kCFTypeArrayCallBacks<EOL>)<EOL>", "docstring": "Creates a CFArrayRef object from a list of CF* type objects.\n\n:param values:\n    A list of CF* type object\n\n:return:\n    A CFArrayRef", "id": "f9504:c0:m9"}
{"signature": "@staticmethod<EOL><INDENT>def cf_data_from_bytes(bytes_):<DEDENT>", "body": "return CoreFoundation.CFDataCreate(<EOL>CoreFoundation.kCFAllocatorDefault,<EOL>bytes_,<EOL>len(bytes_)<EOL>)<EOL>", "docstring": "Creates a CFDataRef object from a byte string\n\n:param bytes_:\n    The data to create the CFData object from\n\n:return:\n    A CFDataRef", "id": "f9504:c0:m7"}
{"signature": "@staticmethod<EOL><INDENT>def cf_string_to_unicode(value):<DEDENT>", "body": "string = CoreFoundation.CFStringGetCStringPtr(<EOL>_cast_pointer_p(value),<EOL>kCFStringEncodingUTF8<EOL>)<EOL>if string is None:<EOL><INDENT>buffer = buffer_from_bytes(<NUM_LIT>)<EOL>result = CoreFoundation.CFStringGetCString(<EOL>_cast_pointer_p(value),<EOL>buffer,<EOL><NUM_LIT>,<EOL>kCFStringEncodingUTF8<EOL>)<EOL>if not result:<EOL><INDENT>raise OSError('<STR_LIT>')<EOL><DEDENT>string = byte_string_from_buffer(buffer)<EOL><DEDENT>if string is not None:<EOL><INDENT>string = string.decode('<STR_LIT:utf-8>')<EOL><DEDENT>return string<EOL>", "docstring": "Creates a python unicode string from a CFString object\n\n:param value:\n    The CFString to convert\n\n:return:\n    A python unicode string", "id": "f9504:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def register_native_mapping(cls, type_id, callback):<DEDENT>", "body": "cls._native_map[int(type_id)] = callback<EOL>", "docstring": "Register a function to convert a core foundation data type into its\nequivalent in python\n\n:param type_id:\n    The CFTypeId for the type\n\n:param callback:\n    A callback to pass the CFType object to", "id": "f9504:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def cf_dictionary_to_dict(dictionary):<DEDENT>", "body": "dict_length = CoreFoundation.CFDictionaryGetCount(dictionary)<EOL>keys = (CFTypeRef * dict_length)()<EOL>values = (CFTypeRef * dict_length)()<EOL>CoreFoundation.CFDictionaryGetKeysAndValues(<EOL>dictionary,<EOL>_cast_pointer_p(keys),<EOL>_cast_pointer_p(values)<EOL>)<EOL>output = {}<EOL>for index in range(<NUM_LIT:0>, dict_length):<EOL><INDENT>output[CFHelpers.native(keys[index])] = CFHelpers.native(values[index])<EOL><DEDENT>return output<EOL>", "docstring": "Converts a CFDictionary object into a python dictionary\n\n:param dictionary:\n    The CFDictionary to convert\n\n:return:\n    A python dict", "id": "f9504:c0:m2"}
{"signature": "def _cert_callback(callback, der_cert, reason):", "body": "if not callback:<EOL><INDENT>return<EOL><DEDENT>callback(x509.Certificate.load(der_cert), reason)<EOL>", "docstring": "Constructs an asn1crypto.x509.Certificate object and calls the export\ncallback\n\n:param callback:\n    The callback to call\n\n:param der_cert:\n    A byte string of the DER-encoded certificate\n\n:param reason:\n    None if cert is being exported, or a unicode string of the reason it\n    is not being exported", "id": "f9505:m2"}
{"signature": "def _cert_details(cert_pointer):", "body": "data_pointer = None<EOL>try:<EOL><INDENT>data_pointer = Security.SecCertificateCopyData(cert_pointer)<EOL>der_cert = CFHelpers.cf_data_to_bytes(data_pointer)<EOL>cert_hash = hashlib.sha1(der_cert).digest()<EOL>return (der_cert, cert_hash)<EOL><DEDENT>finally:<EOL><INDENT>if data_pointer is not None:<EOL><INDENT>CoreFoundation.CFRelease(data_pointer)<EOL><DEDENT><DEDENT>", "docstring": "Return the certificate and a hash of it\n\n:param cert_pointer:\n    A SecCertificateRef\n\n:return:\n    A 2-element tuple:\n     - [0]: A byte string of the SHA1 hash of the cert\n     - [1]: A byte string of the DER-encoded contents of the cert", "id": "f9505:m3"}
{"signature": "def extract_from_system(cert_callback=None, callback_only_on_failure=False):", "body": "certs_pointer_pointer = new(CoreFoundation, '<STR_LIT>')<EOL>res = Security.SecTrustCopyAnchorCertificates(certs_pointer_pointer)<EOL>handle_sec_error(res)<EOL>certs_pointer = unwrap(certs_pointer_pointer)<EOL>certificates = {}<EOL>trust_info = {}<EOL>all_purposes = '<STR_LIT>'<EOL>default_trust = (set(), set())<EOL>length = CoreFoundation.CFArrayGetCount(certs_pointer)<EOL>for index in range(<NUM_LIT:0>, length):<EOL><INDENT>cert_pointer = CoreFoundation.CFArrayGetValueAtIndex(certs_pointer, index)<EOL>der_cert, cert_hash = _cert_details(cert_pointer)<EOL>certificates[cert_hash] = der_cert<EOL><DEDENT>CoreFoundation.CFRelease(certs_pointer)<EOL>for domain in [SecurityConst.kSecTrustSettingsDomainUser, SecurityConst.kSecTrustSettingsDomainAdmin]:<EOL><INDENT>cert_trust_settings_pointer_pointer = new(CoreFoundation, '<STR_LIT>')<EOL>res = Security.SecTrustSettingsCopyCertificates(domain, cert_trust_settings_pointer_pointer)<EOL>if res == SecurityConst.errSecNoTrustSettings:<EOL><INDENT>continue<EOL><DEDENT>handle_sec_error(res)<EOL>cert_trust_settings_pointer = unwrap(cert_trust_settings_pointer_pointer)<EOL>length = CoreFoundation.CFArrayGetCount(cert_trust_settings_pointer)<EOL>for index in range(<NUM_LIT:0>, length):<EOL><INDENT>cert_pointer = CoreFoundation.CFArrayGetValueAtIndex(cert_trust_settings_pointer, index)<EOL>trust_settings_pointer_pointer = new(CoreFoundation, '<STR_LIT>')<EOL>res = Security.SecTrustSettingsCopyTrustSettings(cert_pointer, domain, trust_settings_pointer_pointer)<EOL>if res == SecurityConst.errSecItemNotFound:<EOL><INDENT>continue<EOL><DEDENT>if res == SecurityConst.errSecInvalidTrustSettings:<EOL><INDENT>der_cert, cert_hash = _cert_details(cert_pointer)<EOL>if cert_hash in certificates:<EOL><INDENT>_cert_callback(<EOL>cert_callback,<EOL>certificates[cert_hash],<EOL>'<STR_LIT>'<EOL>)<EOL>del certificates[cert_hash]<EOL><DEDENT>continue<EOL><DEDENT>handle_sec_error(res)<EOL>trust_settings_pointer = unwrap(trust_settings_pointer_pointer)<EOL>trust_oids = set()<EOL>reject_oids = set()<EOL>settings_length = CoreFoundation.CFArrayGetCount(trust_settings_pointer)<EOL>for settings_index in range(<NUM_LIT:0>, settings_length):<EOL><INDENT>settings_dict_entry = CoreFoundation.CFArrayGetValueAtIndex(trust_settings_pointer, settings_index)<EOL>settings_dict = CFHelpers.cf_dictionary_to_dict(settings_dict_entry)<EOL>policy_oid = settings_dict.get('<STR_LIT>', {}).get('<STR_LIT>', all_purposes)<EOL>trust_result = settings_dict.get('<STR_LIT>', <NUM_LIT:1>)<EOL>should_trust = trust_result != <NUM_LIT:0> and trust_result != <NUM_LIT:3><EOL>if should_trust:<EOL><INDENT>trust_oids.add(policy_oid)<EOL><DEDENT>else:<EOL><INDENT>reject_oids.add(policy_oid)<EOL><DEDENT><DEDENT>der_cert, cert_hash = _cert_details(cert_pointer)<EOL>if all_purposes in reject_oids:<EOL><INDENT>if cert_hash in certificates:<EOL><INDENT>_cert_callback(<EOL>cert_callback,<EOL>certificates[cert_hash],<EOL>'<STR_LIT>'<EOL>)<EOL>del certificates[cert_hash]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if all_purposes in trust_oids:<EOL><INDENT>trust_oids = set([all_purposes])<EOL><DEDENT>trust_info[cert_hash] = (trust_oids, reject_oids)<EOL><DEDENT>CoreFoundation.CFRelease(trust_settings_pointer)<EOL><DEDENT>CoreFoundation.CFRelease(cert_trust_settings_pointer)<EOL><DEDENT>output = []<EOL>for cert_hash in certificates:<EOL><INDENT>if not callback_only_on_failure:<EOL><INDENT>_cert_callback(cert_callback, certificates[cert_hash], None)<EOL><DEDENT>cert_trust_info = trust_info.get(cert_hash, default_trust)<EOL>output.append((certificates[cert_hash], cert_trust_info[<NUM_LIT:0>], cert_trust_info[<NUM_LIT:1>]))<EOL><DEDENT>return output<EOL>", "docstring": "Extracts trusted CA certificates from the OS X trusted root keychain.\n\n:param cert_callback:\n    A callback that is called once for each certificate in the trust store.\n    It should accept two parameters: an asn1crypto.x509.Certificate object,\n    and a reason. The reason will be None if the certificate is being\n    exported, otherwise it will be a unicode string of the reason it won't.\n\n:param callback_only_on_failure:\n    A boolean - if the callback should only be called when a certificate is\n    not exported.\n\n:raises:\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A list of 3-element tuples:\n     - 0: a byte string of a DER-encoded certificate\n     - 1: a set of unicode strings that are OIDs of purposes to trust the\n          certificate for\n     - 2: a set of unicode strings that are OIDs of purposes to reject the\n          certificate for", "id": "f9505:m1"}
{"signature": "def __init__(self, address, port, timeout=<NUM_LIT:10>, session=None):", "body": "self._done_handshake = False<EOL>self._server_hello = b'<STR_LIT>'<EOL>self._client_hello = b'<STR_LIT>'<EOL>self._decrypted_bytes = b'<STR_LIT>'<EOL>if address is None and port is None:<EOL><INDENT>self._socket = None<EOL><DEDENT>else:<EOL><INDENT>if not isinstance(address, str_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(address)<EOL>))<EOL><DEDENT>if not isinstance(port, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(port)<EOL>))<EOL><DEDENT>if timeout is not None and not isinstance(timeout, numbers.Number):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(timeout)<EOL>))<EOL><DEDENT>self._socket = socket_.create_connection((address, port), timeout)<EOL>self._socket.settimeout(timeout)<EOL><DEDENT>if session is None:<EOL><INDENT>session = TLSSession()<EOL><DEDENT>elif not isinstance(session, TLSSession):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(session)<EOL>))<EOL><DEDENT>self._session = session<EOL>if self._socket:<EOL><INDENT>self._hostname = address<EOL>self._handshake()<EOL><DEDENT>", "docstring": ":param address:\n    A unicode string of the domain name or IP address to conenct to\n\n:param port:\n    An integer of the port number to connect to\n\n:param timeout:\n    An integer timeout to use for the socket\n\n:param session:\n    An oscrypto.tls.TLSSession object to allow for session reuse and\n    controlling the protocols and validation performed", "id": "f9507:c1:m1"}
{"signature": "@property<EOL><INDENT>def certificate(self):<DEDENT>", "body": "if self._session_context is None:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>if self._certificate is None:<EOL><INDENT>self._read_certificates()<EOL><DEDENT>return self._certificate<EOL>", "docstring": "An asn1crypto.x509.Certificate object of the end-entity certificate\npresented by the server", "id": "f9507:c1:m16"}
{"signature": "def _raise_closed(self):", "body": "if self._local_closed:<EOL><INDENT>raise TLSDisconnectError('<STR_LIT>')<EOL><DEDENT>elif self._gracefully_closed:<EOL><INDENT>raise TLSGracefulDisconnectError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise TLSDisconnectError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Raises an exception describing if the local or remote end closed the\nconnection", "id": "f9507:c1:m15"}
{"signature": "@property<EOL><INDENT>def protocol(self):<DEDENT>", "body": "return self._protocol<EOL>", "docstring": "A unicode string of: \"TLSv1.2\", \"TLSv1.1\", \"TLSv1\", \"SSLv3\"", "id": "f9507:c1:m19"}
{"signature": "def _write_callback(connection_id, data_buffer, data_length_pointer):", "body": "try:<EOL><INDENT>self = _connection_refs.get(connection_id)<EOL>if not self:<EOL><INDENT>socket = _socket_refs.get(connection_id)<EOL><DEDENT>else:<EOL><INDENT>socket = self._socket<EOL><DEDENT>if not self and not socket:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>data_length = deref(data_length_pointer)<EOL>data = bytes_from_buffer(data_buffer, data_length)<EOL>if self and not self._done_handshake:<EOL><INDENT>self._client_hello += data<EOL><DEDENT>error = None<EOL>try:<EOL><INDENT>sent = socket.send(data)<EOL><DEDENT>except (socket_.error) as e:<EOL><INDENT>error = e.errno<EOL><DEDENT>if error is not None and error != errno.EAGAIN:<EOL><INDENT>if error == errno.ECONNRESET or error == errno.EPIPE:<EOL><INDENT>return SecurityConst.errSSLClosedNoNotify<EOL><DEDENT>return SecurityConst.errSSLClosedAbort<EOL><DEDENT>if sent != data_length:<EOL><INDENT>pointer_set(data_length_pointer, sent)<EOL>return SecurityConst.errSSLWouldBlock<EOL><DEDENT>return <NUM_LIT:0><EOL><DEDENT>except (KeyboardInterrupt) as e:<EOL><INDENT>self._exception = e<EOL>return SecurityConst.errSSLPeerUserCancelled<EOL><DEDENT>", "docstring": "Callback called by Secure Transport to actually write to the socket\n\n:param connection_id:\n    An integer identifing the connection\n\n:param data_buffer:\n    A char pointer FFI type containing the data to write\n\n:param data_length_pointer:\n    A size_t pointer FFI type of the amount of data to write. Will be\n    overwritten with the amount of data actually written on return.\n\n:return:\n    An integer status code of the result - 0 for success", "id": "f9507:m2"}
{"signature": "def shutdown(self):", "body": "self._shutdown(True)<EOL>", "docstring": "Shuts down the TLS session and then shuts down the underlying socket", "id": "f9507:c1:m12"}
{"signature": "def __init__(self, protocol=None, manual_validation=False, extra_trust_roots=None):", "body": "if not isinstance(manual_validation, bool):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(manual_validation)<EOL>))<EOL><DEDENT>self._manual_validation = manual_validation<EOL>if protocol is None:<EOL><INDENT>protocol = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL><DEDENT>if isinstance(protocol, str_cls):<EOL><INDENT>protocol = set([protocol])<EOL><DEDENT>elif not isinstance(protocol, set):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(protocol)<EOL>))<EOL><DEDENT>unsupported_protocols = protocol - set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>if unsupported_protocols:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(unsupported_protocols)<EOL>))<EOL><DEDENT>self._protocols = protocol<EOL>self._extra_trust_roots = []<EOL>if extra_trust_roots:<EOL><INDENT>for extra_trust_root in extra_trust_roots:<EOL><INDENT>if isinstance(extra_trust_root, Certificate):<EOL><INDENT>extra_trust_root = extra_trust_root.asn1<EOL><DEDENT>elif isinstance(extra_trust_root, byte_cls):<EOL><INDENT>extra_trust_root = parse_certificate(extra_trust_root)<EOL><DEDENT>elif isinstance(extra_trust_root, str_cls):<EOL><INDENT>with open(extra_trust_root, '<STR_LIT:rb>') as f:<EOL><INDENT>extra_trust_root = parse_certificate(f.read())<EOL><DEDENT><DEDENT>elif not isinstance(extra_trust_root, x509.Certificate):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(extra_trust_root)<EOL>))<EOL><DEDENT>self._extra_trust_roots.append(extra_trust_root)<EOL><DEDENT><DEDENT>self._peer_id = rand_bytes(<NUM_LIT:8>)<EOL>", "docstring": ":param protocol:\n    A unicode string or set of unicode strings representing allowable\n    protocols to negotiate with the server:\n\n     - \"TLSv1.2\"\n     - \"TLSv1.1\"\n     - \"TLSv1\"\n     - \"SSLv3\"\n\n    Default is: {\"TLSv1\", \"TLSv1.1\", \"TLSv1.2\"}\n\n:param manual_validation:\n    If certificate and certificate path validation should be skipped\n    and left to the developer to implement\n\n:param extra_trust_roots:\n    A list containing one or more certificates to be treated as trust\n    roots, in one of the following formats:\n     - A byte string of the DER encoded certificate\n     - A unicode string of the certificate filename\n     - An asn1crypto.x509.Certificate object\n     - An oscrypto.asymmetric.Certificate object\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9507:c0:m0"}
{"signature": "def read_line(self):", "body": "return self.read_until(_line_regex)<EOL>", "docstring": "r\"\"\"\n        Reads a line from the socket, including the line ending of \"\\r\\n\", \"\\r\",\n        or \"\\n\"\n\n        :return:\n            A byte string of the next line from the socket", "id": "f9507:c1:m7"}
{"signature": "@property<EOL><INDENT>def session(self):<DEDENT>", "body": "return self._session<EOL>", "docstring": "The oscrypto.tls.TLSSession object used for this connection", "id": "f9507:c1:m23"}
{"signature": "def select_read(self, timeout=None):", "body": "<EOL>if len(self._decrypted_bytes) > <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>read_ready, _, _ = select.select([self._socket], [], [], timeout)<EOL>return len(read_ready) > <NUM_LIT:0><EOL>", "docstring": "Blocks until the socket is ready to be read from, or the timeout is hit\n\n:param timeout:\n    A float - the period of time to wait for data to be read. None for\n    no time limit.\n\n:return:\n    A boolean - if data is ready to be read. Will only be False if\n    timeout is not None.", "id": "f9507:c1:m4"}
{"signature": "@property<EOL><INDENT>def session_id(self):<DEDENT>", "body": "return self._session_id<EOL>", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9507:c1:m21"}
{"signature": "def _handshake(self):", "body": "session_context = None<EOL>ssl_policy_ref = None<EOL>crl_search_ref = None<EOL>crl_policy_ref = None<EOL>ocsp_search_ref = None<EOL>ocsp_policy_ref = None<EOL>policy_array_ref = None<EOL>try:<EOL><INDENT>if osx_version_info < (<NUM_LIT:10>, <NUM_LIT:8>):<EOL><INDENT>session_context_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SSLNewContext(False, session_context_pointer)<EOL>handle_sec_error(result)<EOL>session_context = unwrap(session_context_pointer)<EOL><DEDENT>else:<EOL><INDENT>session_context = Security.SSLCreateContext(<EOL>null(),<EOL>SecurityConst.kSSLClientSide,<EOL>SecurityConst.kSSLStreamType<EOL>)<EOL><DEDENT>result = Security.SSLSetIOFuncs(<EOL>session_context,<EOL>_read_callback_pointer,<EOL>_write_callback_pointer<EOL>)<EOL>handle_sec_error(result)<EOL>self._connection_id = id(self) % <NUM_LIT><EOL>_connection_refs[self._connection_id] = self<EOL>_socket_refs[self._connection_id] = self._socket<EOL>result = Security.SSLSetConnection(session_context, self._connection_id)<EOL>handle_sec_error(result)<EOL>utf8_domain = self._hostname.encode('<STR_LIT:utf-8>')<EOL>result = Security.SSLSetPeerDomainName(<EOL>session_context,<EOL>utf8_domain,<EOL>len(utf8_domain)<EOL>)<EOL>handle_sec_error(result)<EOL>if osx_version_info >= (<NUM_LIT:10>, <NUM_LIT:10>):<EOL><INDENT>disable_auto_validation = self._session._manual_validation or self._session._extra_trust_roots<EOL>explicit_validation = (not self._session._manual_validation) and self._session._extra_trust_roots<EOL><DEDENT>else:<EOL><INDENT>disable_auto_validation = True<EOL>explicit_validation = not self._session._manual_validation<EOL><DEDENT>if osx_version_info < (<NUM_LIT:10>, <NUM_LIT:8>):<EOL><INDENT>for protocol in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>protocol_const = _PROTOCOL_STRING_CONST_MAP[protocol]<EOL>enabled = protocol in self._session._protocols<EOL>result = Security.SSLSetProtocolVersionEnabled(<EOL>session_context,<EOL>protocol_const,<EOL>enabled<EOL>)<EOL>handle_sec_error(result)<EOL><DEDENT>if disable_auto_validation:<EOL><INDENT>result = Security.SSLSetEnableCertVerify(session_context, False)<EOL>handle_sec_error(result)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>protocol_consts = [_PROTOCOL_STRING_CONST_MAP[protocol] for protocol in self._session._protocols]<EOL>min_protocol = min(protocol_consts)<EOL>max_protocol = max(protocol_consts)<EOL>result = Security.SSLSetProtocolVersionMin(<EOL>session_context,<EOL>min_protocol<EOL>)<EOL>handle_sec_error(result)<EOL>result = Security.SSLSetProtocolVersionMax(<EOL>session_context,<EOL>max_protocol<EOL>)<EOL>handle_sec_error(result)<EOL>if disable_auto_validation:<EOL><INDENT>result = Security.SSLSetSessionOption(<EOL>session_context,<EOL>SecurityConst.kSSLSessionOptionBreakOnServerAuth,<EOL>True<EOL>)<EOL>handle_sec_error(result)<EOL><DEDENT><DEDENT>supported_ciphers_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SSLGetNumberSupportedCiphers(session_context, supported_ciphers_pointer)<EOL>handle_sec_error(result)<EOL>supported_ciphers = deref(supported_ciphers_pointer)<EOL>cipher_buffer = buffer_from_bytes(supported_ciphers * <NUM_LIT:4>)<EOL>supported_cipher_suites_pointer = cast(Security, '<STR_LIT>', cipher_buffer)<EOL>result = Security.SSLGetSupportedCiphers(<EOL>session_context,<EOL>supported_cipher_suites_pointer,<EOL>supported_ciphers_pointer<EOL>)<EOL>handle_sec_error(result)<EOL>supported_ciphers = deref(supported_ciphers_pointer)<EOL>supported_cipher_suites = array_from_pointer(<EOL>Security,<EOL>'<STR_LIT>',<EOL>supported_cipher_suites_pointer,<EOL>supported_ciphers<EOL>)<EOL>good_ciphers = []<EOL>for supported_cipher_suite in supported_cipher_suites:<EOL><INDENT>cipher_suite = int_to_bytes(supported_cipher_suite, width=<NUM_LIT:2>)<EOL>cipher_suite_name = CIPHER_SUITE_MAP.get(cipher_suite, cipher_suite)<EOL>good_cipher = _cipher_blacklist_regex.search(cipher_suite_name) is None<EOL>if good_cipher:<EOL><INDENT>good_ciphers.append(supported_cipher_suite)<EOL><DEDENT><DEDENT>num_good_ciphers = len(good_ciphers)<EOL>good_ciphers_array = new(Security, '<STR_LIT>', num_good_ciphers)<EOL>array_set(good_ciphers_array, good_ciphers)<EOL>good_ciphers_pointer = cast(Security, '<STR_LIT>', good_ciphers_array)<EOL>result = Security.SSLSetEnabledCiphers(<EOL>session_context,<EOL>good_ciphers_pointer,<EOL>num_good_ciphers<EOL>)<EOL>handle_sec_error(result)<EOL>peer_id = self._session._peer_id + self._hostname.encode('<STR_LIT:utf-8>')<EOL>result = Security.SSLSetPeerID(session_context, peer_id, len(peer_id))<EOL>handle_sec_error(result)<EOL>handshake_result = Security.SSLHandshake(session_context)<EOL>if self._exception is not None:<EOL><INDENT>exception = self._exception<EOL>self._exception = None<EOL>raise exception<EOL><DEDENT>while handshake_result == SecurityConst.errSSLWouldBlock:<EOL><INDENT>handshake_result = Security.SSLHandshake(session_context)<EOL>if self._exception is not None:<EOL><INDENT>exception = self._exception<EOL>self._exception = None<EOL>raise exception<EOL><DEDENT><DEDENT>if osx_version_info < (<NUM_LIT:10>, <NUM_LIT:8>) and osx_version_info >= (<NUM_LIT:10>, <NUM_LIT:7>):<EOL><INDENT>do_validation = explicit_validation and handshake_result == <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>do_validation = explicit_validation and handshake_result == SecurityConst.errSSLServerAuthCompleted<EOL><DEDENT>if do_validation:<EOL><INDENT>trust_ref_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SSLCopyPeerTrust(<EOL>session_context,<EOL>trust_ref_pointer<EOL>)<EOL>handle_sec_error(result)<EOL>trust_ref = unwrap(trust_ref_pointer)<EOL>cf_string_hostname = CFHelpers.cf_string_from_unicode(self._hostname)<EOL>ssl_policy_ref = Security.SecPolicyCreateSSL(True, cf_string_hostname)<EOL>result = CoreFoundation.CFRelease(cf_string_hostname)<EOL>handle_cf_error(result)<EOL>ocsp_oid_pointer = struct(Security, '<STR_LIT>')<EOL>ocsp_oid = unwrap(ocsp_oid_pointer)<EOL>ocsp_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_OCSP)<EOL>ocsp_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_OCSP)<EOL>ocsp_oid.Data = cast(Security, '<STR_LIT>', ocsp_oid_buffer)<EOL>ocsp_search_ref_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SecPolicySearchCreate(<EOL>SecurityConst.CSSM_CERT_X_509v3,<EOL>ocsp_oid_pointer,<EOL>null(),<EOL>ocsp_search_ref_pointer<EOL>)<EOL>handle_sec_error(result)<EOL>ocsp_search_ref = unwrap(ocsp_search_ref_pointer)<EOL>ocsp_policy_ref_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SecPolicySearchCopyNext(ocsp_search_ref, ocsp_policy_ref_pointer)<EOL>handle_sec_error(result)<EOL>ocsp_policy_ref = unwrap(ocsp_policy_ref_pointer)<EOL>ocsp_struct_pointer = struct(Security, '<STR_LIT>')<EOL>ocsp_struct = unwrap(ocsp_struct_pointer)<EOL>ocsp_struct.Version = SecurityConst.CSSM_APPLE_TP_OCSP_OPTS_VERSION<EOL>ocsp_struct.Flags = (<EOL>SecurityConst.CSSM_TP_ACTION_OCSP_DISABLE_NET |<EOL>SecurityConst.CSSM_TP_ACTION_OCSP_CACHE_READ_DISABLE<EOL>)<EOL>ocsp_struct_bytes = struct_bytes(ocsp_struct_pointer)<EOL>cssm_data_pointer = struct(Security, '<STR_LIT>')<EOL>cssm_data = unwrap(cssm_data_pointer)<EOL>cssm_data.Length = len(ocsp_struct_bytes)<EOL>ocsp_struct_buffer = buffer_from_bytes(ocsp_struct_bytes)<EOL>cssm_data.Data = cast(Security, '<STR_LIT>', ocsp_struct_buffer)<EOL>result = Security.SecPolicySetValue(ocsp_policy_ref, cssm_data_pointer)<EOL>handle_sec_error(result)<EOL>crl_oid_pointer = struct(Security, '<STR_LIT>')<EOL>crl_oid = unwrap(crl_oid_pointer)<EOL>crl_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_CRL)<EOL>crl_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_CRL)<EOL>crl_oid.Data = cast(Security, '<STR_LIT>', crl_oid_buffer)<EOL>crl_search_ref_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SecPolicySearchCreate(<EOL>SecurityConst.CSSM_CERT_X_509v3,<EOL>crl_oid_pointer,<EOL>null(),<EOL>crl_search_ref_pointer<EOL>)<EOL>handle_sec_error(result)<EOL>crl_search_ref = unwrap(crl_search_ref_pointer)<EOL>crl_policy_ref_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SecPolicySearchCopyNext(crl_search_ref, crl_policy_ref_pointer)<EOL>handle_sec_error(result)<EOL>crl_policy_ref = unwrap(crl_policy_ref_pointer)<EOL>crl_struct_pointer = struct(Security, '<STR_LIT>')<EOL>crl_struct = unwrap(crl_struct_pointer)<EOL>crl_struct.Version = SecurityConst.CSSM_APPLE_TP_CRL_OPTS_VERSION<EOL>crl_struct.CrlFlags = <NUM_LIT:0><EOL>crl_struct_bytes = struct_bytes(crl_struct_pointer)<EOL>cssm_data_pointer = struct(Security, '<STR_LIT>')<EOL>cssm_data = unwrap(cssm_data_pointer)<EOL>cssm_data.Length = len(crl_struct_bytes)<EOL>crl_struct_buffer = buffer_from_bytes(crl_struct_bytes)<EOL>cssm_data.Data = cast(Security, '<STR_LIT>', crl_struct_buffer)<EOL>result = Security.SecPolicySetValue(crl_policy_ref, cssm_data_pointer)<EOL>handle_sec_error(result)<EOL>policy_array_ref = CFHelpers.cf_array_from_list([<EOL>ssl_policy_ref,<EOL>crl_policy_ref,<EOL>ocsp_policy_ref<EOL>])<EOL>result = Security.SecTrustSetPolicies(trust_ref, policy_array_ref)<EOL>handle_sec_error(result)<EOL>if self._session._extra_trust_roots:<EOL><INDENT>ca_cert_refs = []<EOL>ca_certs = []<EOL>for cert in self._session._extra_trust_roots:<EOL><INDENT>ca_cert = load_certificate(cert)<EOL>ca_certs.append(ca_cert)<EOL>ca_cert_refs.append(ca_cert.sec_certificate_ref)<EOL><DEDENT>result = Security.SecTrustSetAnchorCertificatesOnly(trust_ref, False)<EOL>handle_sec_error(result)<EOL>array_ref = CFHelpers.cf_array_from_list(ca_cert_refs)<EOL>result = Security.SecTrustSetAnchorCertificates(trust_ref, array_ref)<EOL>handle_sec_error(result)<EOL><DEDENT>result_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SecTrustEvaluate(trust_ref, result_pointer)<EOL>handle_sec_error(result)<EOL>trust_result_code = deref(result_pointer)<EOL>invalid_chain_error_codes = set([<EOL>SecurityConst.kSecTrustResultProceed,<EOL>SecurityConst.kSecTrustResultUnspecified<EOL>])<EOL>if trust_result_code not in invalid_chain_error_codes:<EOL><INDENT>handshake_result = SecurityConst.errSSLXCertChainInvalid<EOL><DEDENT>else:<EOL><INDENT>handshake_result = Security.SSLHandshake(session_context)<EOL>while handshake_result == SecurityConst.errSSLWouldBlock:<EOL><INDENT>handshake_result = Security.SSLHandshake(session_context)<EOL><DEDENT><DEDENT><DEDENT>self._done_handshake = True<EOL>handshake_error_codes = set([<EOL>SecurityConst.errSSLXCertChainInvalid,<EOL>SecurityConst.errSSLCertExpired,<EOL>SecurityConst.errSSLCertNotYetValid,<EOL>SecurityConst.errSSLUnknownRootCert,<EOL>SecurityConst.errSSLNoRootCert,<EOL>SecurityConst.errSSLHostNameMismatch,<EOL>SecurityConst.errSSLInternal,<EOL>])<EOL>if handshake_result in handshake_error_codes:<EOL><INDENT>trust_ref_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SSLCopyPeerTrust(<EOL>session_context,<EOL>trust_ref_pointer<EOL>)<EOL>handle_sec_error(result)<EOL>trust_ref = unwrap(trust_ref_pointer)<EOL>result_code_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SecTrustGetCssmResultCode(trust_ref, result_code_pointer)<EOL>result_code = deref(result_code_pointer)<EOL>chain = extract_chain(self._server_hello)<EOL>self_signed = False<EOL>revoked = False<EOL>expired = False<EOL>not_yet_valid = False<EOL>no_issuer = False<EOL>cert = None<EOL>bad_hostname = False<EOL>if chain:<EOL><INDENT>cert = chain[<NUM_LIT:0>]<EOL>oscrypto_cert = load_certificate(cert)<EOL>self_signed = oscrypto_cert.self_signed<EOL>revoked = result_code == SecurityConst.CSSMERR_TP_CERT_REVOKED<EOL>no_issuer = not self_signed and result_code == SecurityConst.CSSMERR_TP_NOT_TRUSTED<EOL>expired = result_code == SecurityConst.CSSMERR_TP_CERT_EXPIRED<EOL>not_yet_valid = result_code == SecurityConst.CSSMERR_TP_CERT_NOT_VALID_YET<EOL>bad_hostname = result_code == SecurityConst.CSSMERR_APPLETP_HOSTNAME_MISMATCH<EOL>if osx_version_info >= (<NUM_LIT:10>, <NUM_LIT:12>):<EOL><INDENT>validity = cert['<STR_LIT>']['<STR_LIT>']<EOL>not_before = validity['<STR_LIT>'].chosen.native<EOL>not_after = validity['<STR_LIT>'].chosen.native<EOL>utcnow = datetime.datetime.now(timezone.utc)<EOL>expired = not_after < utcnow<EOL>not_yet_valid = not_before > utcnow<EOL><DEDENT><DEDENT>if chain and chain[<NUM_LIT:0>].hash_algo in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise_weak_signature(chain[<NUM_LIT:0>])<EOL><DEDENT>if revoked:<EOL><INDENT>raise_revoked(cert)<EOL><DEDENT>if bad_hostname:<EOL><INDENT>raise_hostname(cert, self._hostname)<EOL><DEDENT>elif expired or not_yet_valid:<EOL><INDENT>raise_expired_not_yet_valid(cert)<EOL><DEDENT>elif no_issuer:<EOL><INDENT>raise_no_issuer(cert)<EOL><DEDENT>elif self_signed:<EOL><INDENT>raise_self_signed(cert)<EOL><DEDENT>if detect_client_auth_request(self._server_hello):<EOL><INDENT>raise_client_auth()<EOL><DEDENT>raise_verification(cert)<EOL><DEDENT>if handshake_result == SecurityConst.errSSLPeerHandshakeFail:<EOL><INDENT>if detect_client_auth_request(self._server_hello):<EOL><INDENT>raise_client_auth()<EOL><DEDENT>raise_handshake()<EOL><DEDENT>if handshake_result == SecurityConst.errSSLWeakPeerEphemeralDHKey:<EOL><INDENT>raise_dh_params()<EOL><DEDENT>if handshake_result == SecurityConst.errSSLPeerProtocolVersion:<EOL><INDENT>raise_protocol_version()<EOL><DEDENT>if handshake_result in set([SecurityConst.errSSLRecordOverflow, SecurityConst.errSSLProtocol]):<EOL><INDENT>self._server_hello += _read_remaining(self._socket)<EOL>raise_protocol_error(self._server_hello)<EOL><DEDENT>if handshake_result in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):<EOL><INDENT>if not self._done_handshake:<EOL><INDENT>self._server_hello += _read_remaining(self._socket)<EOL><DEDENT>if detect_other_protocol(self._server_hello):<EOL><INDENT>raise_protocol_error(self._server_hello)<EOL><DEDENT>raise_disconnection()<EOL><DEDENT>if osx_version_info < (<NUM_LIT:10>, <NUM_LIT:10>):<EOL><INDENT>dh_params_length = get_dh_params_length(self._server_hello)<EOL>if dh_params_length is not None and dh_params_length < <NUM_LIT>:<EOL><INDENT>raise_dh_params()<EOL><DEDENT><DEDENT>would_block = handshake_result == SecurityConst.errSSLWouldBlock<EOL>server_auth_complete = handshake_result == SecurityConst.errSSLServerAuthCompleted<EOL>manual_validation = self._session._manual_validation and server_auth_complete<EOL>if not would_block and not manual_validation:<EOL><INDENT>handle_sec_error(handshake_result, TLSError)<EOL><DEDENT>self._session_context = session_context<EOL>protocol_const_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SSLGetNegotiatedProtocolVersion(<EOL>session_context,<EOL>protocol_const_pointer<EOL>)<EOL>handle_sec_error(result)<EOL>protocol_const = deref(protocol_const_pointer)<EOL>self._protocol = _PROTOCOL_CONST_STRING_MAP[protocol_const]<EOL>cipher_int_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SSLGetNegotiatedCipher(<EOL>session_context,<EOL>cipher_int_pointer<EOL>)<EOL>handle_sec_error(result)<EOL>cipher_int = deref(cipher_int_pointer)<EOL>cipher_bytes = int_to_bytes(cipher_int, width=<NUM_LIT:2>)<EOL>self._cipher_suite = CIPHER_SUITE_MAP.get(cipher_bytes, cipher_bytes)<EOL>session_info = parse_session_info(<EOL>self._server_hello,<EOL>self._client_hello<EOL>)<EOL>self._compression = session_info['<STR_LIT>']<EOL>self._session_id = session_info['<STR_LIT>']<EOL>self._session_ticket = session_info['<STR_LIT>']<EOL><DEDENT>except (OSError, socket_.error):<EOL><INDENT>if session_context:<EOL><INDENT>if osx_version_info < (<NUM_LIT:10>, <NUM_LIT:8>):<EOL><INDENT>result = Security.SSLDisposeContext(session_context)<EOL>handle_sec_error(result)<EOL><DEDENT>else:<EOL><INDENT>result = CoreFoundation.CFRelease(session_context)<EOL>handle_cf_error(result)<EOL><DEDENT><DEDENT>self._session_context = None<EOL>self.close()<EOL>raise<EOL><DEDENT>finally:<EOL><INDENT>if ssl_policy_ref:<EOL><INDENT>result = CoreFoundation.CFRelease(ssl_policy_ref)<EOL>handle_cf_error(result)<EOL>ssl_policy_ref = None<EOL><DEDENT>if crl_policy_ref:<EOL><INDENT>result = CoreFoundation.CFRelease(crl_policy_ref)<EOL>handle_cf_error(result)<EOL>crl_policy_ref = None<EOL><DEDENT>if ocsp_policy_ref:<EOL><INDENT>result = CoreFoundation.CFRelease(ocsp_policy_ref)<EOL>handle_cf_error(result)<EOL>ocsp_policy_ref = None<EOL><DEDENT>if policy_array_ref:<EOL><INDENT>result = CoreFoundation.CFRelease(policy_array_ref)<EOL>handle_cf_error(result)<EOL>policy_array_ref = None<EOL><DEDENT><DEDENT>", "docstring": "Perform an initial TLS handshake", "id": "f9507:c1:m2"}
{"signature": "def read(self, max_length):", "body": "if not isinstance(max_length, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(max_length)<EOL>))<EOL><DEDENT>if self._session_context is None:<EOL><INDENT>if self._decrypted_bytes != b'<STR_LIT>':<EOL><INDENT>output = self._decrypted_bytes<EOL>self._decrypted_bytes = b'<STR_LIT>'<EOL>return output<EOL><DEDENT>self._raise_closed()<EOL><DEDENT>buffered_length = len(self._decrypted_bytes)<EOL>if buffered_length >= max_length:<EOL><INDENT>output = self._decrypted_bytes[<NUM_LIT:0>:max_length]<EOL>self._decrypted_bytes = self._decrypted_bytes[max_length:]<EOL>return output<EOL><DEDENT>if buffered_length > <NUM_LIT:0> and not self.select_read(<NUM_LIT:0>):<EOL><INDENT>output = self._decrypted_bytes<EOL>self._decrypted_bytes = b'<STR_LIT>'<EOL>return output<EOL><DEDENT>to_read = max_length - len(self._decrypted_bytes)<EOL>read_buffer = buffer_from_bytes(to_read)<EOL>processed_pointer = new(Security, '<STR_LIT>')<EOL>result = Security.SSLRead(<EOL>self._session_context,<EOL>read_buffer,<EOL>to_read,<EOL>processed_pointer<EOL>)<EOL>if self._exception is not None:<EOL><INDENT>exception = self._exception<EOL>self._exception = None<EOL>raise exception<EOL><DEDENT>if result and result not in set([SecurityConst.errSSLWouldBlock, SecurityConst.errSSLClosedGraceful]):<EOL><INDENT>handle_sec_error(result, TLSError)<EOL><DEDENT>if result and result == SecurityConst.errSSLClosedGraceful:<EOL><INDENT>self._gracefully_closed = True<EOL>self._shutdown(False)<EOL>self._raise_closed()<EOL><DEDENT>bytes_read = deref(processed_pointer)<EOL>output = self._decrypted_bytes + bytes_from_buffer(read_buffer, bytes_read)<EOL>self._decrypted_bytes = output[max_length:]<EOL>return output[<NUM_LIT:0>:max_length]<EOL>", "docstring": "Reads data from the TLS-wrapped socket\n\n:param max_length:\n    The number of bytes to read - output may be less than this\n\n:raises:\n    socket.socket - when a non-TLS socket error occurs\n    oscrypto.errors.TLSError - when a TLS-related error occurs\n    oscrypto.errors.TLSDisconnectError - when the connection disconnects\n    oscrypto.errors.TLSGracefulDisconnectError - when the remote end gracefully closed the connection\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the data read", "id": "f9507:c1:m3"}
{"signature": "@property<EOL><INDENT>def intermediates(self):<DEDENT>", "body": "if self._session_context is None:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>if self._certificate is None:<EOL><INDENT>self._read_certificates()<EOL><DEDENT>return self._intermediates<EOL>", "docstring": "A list of asn1crypto.x509.Certificate objects that were presented as\nintermediates by the server", "id": "f9507:c1:m17"}
{"signature": "def read_exactly(self, num_bytes):", "body": "output = b'<STR_LIT>'<EOL>remaining = num_bytes<EOL>while remaining > <NUM_LIT:0>:<EOL><INDENT>output += self.read(remaining)<EOL>remaining = num_bytes - len(output)<EOL><DEDENT>return output<EOL>", "docstring": "Reads exactly the specified number of bytes from the socket\n\n:param num_bytes:\n    An integer - the exact number of bytes to read\n\n:return:\n    A byte string of the data that was read", "id": "f9507:c1:m8"}
{"signature": "@property<EOL><INDENT>def cipher_suite(self):<DEDENT>", "body": "return self._cipher_suite<EOL>", "docstring": "A unicode string of the IANA cipher suite name of the negotiated\ncipher suite", "id": "f9507:c1:m18"}
{"signature": "@property<EOL><INDENT>def hostname(self):<DEDENT>", "body": "return self._hostname<EOL>", "docstring": "A unicode string of the TLS server domain name or IP address", "id": "f9507:c1:m24"}
{"signature": "def _shutdown(self, manual):", "body": "if self._session_context is None:<EOL><INDENT>return<EOL><DEDENT>result = Security.SSLClose(self._session_context)<EOL>if osx_version_info < (<NUM_LIT:10>, <NUM_LIT:8>):<EOL><INDENT>result = Security.SSLDisposeContext(self._session_context)<EOL>handle_sec_error(result)<EOL><DEDENT>else:<EOL><INDENT>result = CoreFoundation.CFRelease(self._session_context)<EOL>handle_cf_error(result)<EOL><DEDENT>self._session_context = None<EOL>if manual:<EOL><INDENT>self._local_closed = True<EOL><DEDENT>try:<EOL><INDENT>self._socket.shutdown(socket_.SHUT_RDWR)<EOL><DEDENT>except (socket_.error):<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Shuts down the TLS session and then shuts down the underlying socket\n\n:param manual:\n    A boolean if the connection was manually shutdown", "id": "f9507:c1:m11"}
{"signature": "def handle_sec_error(error, exception_class=None):", "body": "if error == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>if error in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):<EOL><INDENT>raise TLSDisconnectError('<STR_LIT>')<EOL><DEDENT>if error == SecurityConst.errSSLClosedGraceful:<EOL><INDENT>raise TLSGracefulDisconnectError('<STR_LIT>')<EOL><DEDENT>cf_error_string = Security.SecCopyErrorMessageString(error, null())<EOL>output = CFHelpers.cf_string_to_unicode(cf_error_string)<EOL>CoreFoundation.CFRelease(cf_error_string)<EOL>if output is None or output == '<STR_LIT>':<EOL><INDENT>output = '<STR_LIT>' % error<EOL><DEDENT>if exception_class is None:<EOL><INDENT>exception_class = OSError<EOL><DEDENT>raise exception_class(output)<EOL>", "docstring": "Checks a Security OSStatus error code and throws an exception if there is an\nerror to report\n\n:param error:\n    An OSStatus\n\n:param exception_class:\n    The exception class to use for the exception if an error occurred\n\n:raises:\n    OSError - when the OSStatus contains an error", "id": "f9508:m0"}
{"signature": "@staticmethod<EOL><INDENT>def cf_array_from_list(values):<DEDENT>", "body": "length = len(values)<EOL>return CoreFoundation.CFArrayCreate(<EOL>CoreFoundation.kCFAllocatorDefault,<EOL>values,<EOL>length,<EOL>ffi.addressof(CoreFoundation.kCFTypeArrayCallBacks)<EOL>)<EOL>", "docstring": "Creates a CFArrayRef object from a list of CF* type objects.\n\n:param values:\n    A list of CF* type object\n\n:return:\n    A CFArrayRef", "id": "f9510:c0:m9"}
{"signature": "@staticmethod<EOL><INDENT>def cf_string_to_unicode(value):<DEDENT>", "body": "string_ptr = CoreFoundation.CFStringGetCStringPtr(<EOL>value,<EOL>kCFStringEncodingUTF8<EOL>)<EOL>string = None if is_null(string_ptr) else ffi.string(string_ptr)<EOL>if string is None:<EOL><INDENT>buffer = buffer_from_bytes(<NUM_LIT>)<EOL>result = CoreFoundation.CFStringGetCString(<EOL>value,<EOL>buffer,<EOL><NUM_LIT>,<EOL>kCFStringEncodingUTF8<EOL>)<EOL>if not result:<EOL><INDENT>raise OSError('<STR_LIT>')<EOL><DEDENT>string = byte_string_from_buffer(buffer)<EOL><DEDENT>if string is not None:<EOL><INDENT>string = string.decode('<STR_LIT:utf-8>')<EOL><DEDENT>return string<EOL>", "docstring": "Creates a python unicode string from a CFString object\n\n:param value:\n    The CFString to convert\n\n:return:\n    A python unicode string", "id": "f9510:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def register_native_mapping(cls, type_id, callback):<DEDENT>", "body": "cls._native_map[int(type_id)] = callback<EOL>", "docstring": "Register a function to convert a core foundation data type into its\nequivalent in python\n\n:param type_id:\n    The CFTypeId for the type\n\n:param callback:\n    A callback to pass the CFType object to", "id": "f9510:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def cf_dictionary_from_pairs(pairs):<DEDENT>", "body": "length = len(pairs)<EOL>keys = []<EOL>values = []<EOL>for pair in pairs:<EOL><INDENT>key, value = pair<EOL>keys.append(key)<EOL>values.append(value)<EOL><DEDENT>return CoreFoundation.CFDictionaryCreate(<EOL>CoreFoundation.kCFAllocatorDefault,<EOL>keys,<EOL>values,<EOL>length,<EOL>ffi.addressof(CoreFoundation.kCFTypeDictionaryKeyCallBacks),<EOL>ffi.addressof(CoreFoundation.kCFTypeDictionaryValueCallBacks)<EOL>)<EOL>", "docstring": "Creates a CFDictionaryRef object from a list of 2-element tuples\nrepresenting the key and value. Each key should be a CFStringRef and each\nvalue some sort of CF* type.\n\n:param pairs:\n    A list of 2-element tuples\n\n:return:\n    A CFDictionaryRef", "id": "f9510:c0:m8"}
{"signature": "@staticmethod<EOL><INDENT>def cf_data_to_bytes(value):<DEDENT>", "body": "start = CoreFoundation.CFDataGetBytePtr(value)<EOL>num_bytes = CoreFoundation.CFDataGetLength(value)<EOL>return ffi.buffer(start, num_bytes)[:]<EOL>", "docstring": "Extracts a bytestring from a CFData object\n\n:param value:\n    A CFData object\n\n:return:\n    A byte string", "id": "f9510:c0:m6"}
{"signature": "@classmethod<EOL><INDENT>def native(cls, value):<DEDENT>", "body": "type_id = CoreFoundation.CFGetTypeID(value)<EOL>if type_id in cls._native_map:<EOL><INDENT>return cls._native_map[type_id](value)<EOL><DEDENT>else:<EOL><INDENT>return value<EOL><DEDENT>", "docstring": "Converts a CF* object into its python equivalent\n\n:param value:\n    The CF* object to convert\n\n:return:\n    The native python object", "id": "f9510:c0:m3"}
{"signature": "@property<EOL><INDENT>def curve(self):<DEDENT>", "body": "return self.public_key.curve<EOL>", "docstring": ":return:\n    A unicode string of EC curve name", "id": "f9511:c2:m2"}
{"signature": "def rsa_pkcs1v15_encrypt(certificate_or_public_key, data):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(certificate_or_public_key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>key_length = certificate_or_public_key.byte_size<EOL>buffer = buffer_from_bytes(key_length)<EOL>output_length = new(Security, '<STR_LIT>', key_length)<EOL>result = Security.SecKeyEncrypt(<EOL>certificate_or_public_key.sec_key_ref,<EOL>SecurityConst.kSecPaddingPKCS1,<EOL>data,<EOL>len(data),<EOL>buffer,<EOL>output_length<EOL>)<EOL>handle_sec_error(result)<EOL>return bytes_from_buffer(buffer, deref(output_length))<EOL>", "docstring": "Encrypts a byte string using an RSA public key or certificate. Uses PKCS#1\nv1.5 padding.\n\n:param certificate_or_public_key:\n    A PublicKey or Certificate object\n\n:param data:\n    A byte string, with a maximum length 11 bytes less than the key length\n    (in bytes)\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the encrypted data", "id": "f9511:m8"}
{"signature": "def load_public_key(source):", "body": "if isinstance(source, keys.PublicKeyInfo):<EOL><INDENT>public_key = source<EOL><DEDENT>elif isinstance(source, byte_cls):<EOL><INDENT>public_key = parse_public(source)<EOL><DEDENT>elif isinstance(source, str_cls):<EOL><INDENT>with open(source, '<STR_LIT:rb>') as f:<EOL><INDENT>public_key = parse_public(f.read())<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(source)<EOL>))<EOL><DEDENT>return _load_key(public_key)<EOL>", "docstring": "Loads a public key into a PublicKey object\n\n:param source:\n    A byte string of file contents, a unicode string filename or an\n    asn1crypto.keys.PublicKeyInfo object\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    oscrypto.errors.AsymmetricKeyError - when the public key is incompatible with the OS crypto library\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A PublicKey object", "id": "f9511:m5"}
{"signature": "def rsa_pss_sign(private_key, data, hash_algorithm):", "body": "if not isinstance(private_key, PrivateKey):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(private_key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if private_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>hash_length = {<EOL>'<STR_LIT>': <NUM_LIT:20>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:32>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:64><EOL>}.get(hash_algorithm, <NUM_LIT:0>)<EOL>encoded_data = add_pss_padding(hash_algorithm, hash_length, private_key.bit_size, data)<EOL>key_length = private_key.byte_size<EOL>buffer = buffer_from_bytes(key_length)<EOL>output_length = new(Security, '<STR_LIT>', key_length)<EOL>result = Security.SecKeyDecrypt(<EOL>private_key.sec_key_ref,<EOL>SecurityConst.kSecPaddingNone,<EOL>encoded_data,<EOL>len(encoded_data),<EOL>buffer,<EOL>output_length<EOL>)<EOL>handle_sec_error(result)<EOL>return bytes_from_buffer(buffer, deref(output_length))<EOL>", "docstring": "Generates an RSASSA-PSS signature. For the PSS padding the mask gen\nalgorithm will be mgf1 using the same hash algorithm as the signature. The\nsalt length with be the length of the hash algorithm, and the trailer field\nwith be the standard 0xBC byte.\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or\n    \"sha512\"\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9511:m20"}
{"signature": "def ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _verify(certificate_or_public_key, signature, data, hash_algorithm)<EOL>", "docstring": "Verifies an ECDSA signature\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9511:m17"}
{"signature": "def _encrypt(certificate_or_public_key, data, padding):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(certificate_or_public_key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if not padding:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>cf_data = None<EOL>sec_transform = None<EOL>try:<EOL><INDENT>cf_data = CFHelpers.cf_data_from_bytes(data)<EOL>error_pointer = new(CoreFoundation, '<STR_LIT>')<EOL>sec_transform = Security.SecEncryptTransformCreate(<EOL>certificate_or_public_key.sec_key_ref,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>if padding:<EOL><INDENT>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecPaddingKey,<EOL>padding,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL><DEDENT>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecTransformInputAttributeName,<EOL>cf_data,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>ciphertext = Security.SecTransformExecute(sec_transform, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>return CFHelpers.cf_data_to_bytes(ciphertext)<EOL><DEDENT>finally:<EOL><INDENT>if cf_data:<EOL><INDENT>CoreFoundation.CFRelease(cf_data)<EOL><DEDENT>if sec_transform:<EOL><INDENT>CoreFoundation.CFRelease(sec_transform)<EOL><DEDENT><DEDENT>", "docstring": "Encrypts plaintext using an RSA public key or certificate\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey object\n\n:param data:\n    The plaintext - a byte string\n\n:param padding:\n    The padding mode to use, specified as a kSecPadding*Key value\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the ciphertext", "id": "f9511:m12"}
{"signature": "@property<EOL><INDENT>def sec_key_ref(self):<DEDENT>", "body": "return self.public_key.sec_key_ref<EOL>", "docstring": ":return:\n    The SecKeyRef of the public key", "id": "f9511:c2:m5"}
{"signature": "def dsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _sign(private_key, data, hash_algorithm)<EOL>", "docstring": "Generates a DSA signature\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or\n    \"sha512\"\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9511:m21"}
{"signature": "def _verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(certificate_or_public_key)<EOL>))<EOL><DEDENT>if not isinstance(signature, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(signature)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>valid_hash_algorithms = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>if certificate_or_public_key.algorithm == '<STR_LIT>':<EOL><INDENT>valid_hash_algorithms |= set(['<STR_LIT>'])<EOL><DEDENT>if hash_algorithm not in valid_hash_algorithms:<EOL><INDENT>valid_hash_algorithms_error = '<STR_LIT>'<EOL>if certificate_or_public_key.algorithm == '<STR_LIT>':<EOL><INDENT>valid_hash_algorithms_error += '<STR_LIT>'<EOL><DEDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>valid_hash_algorithms_error,<EOL>repr(hash_algorithm)<EOL>))<EOL><DEDENT>if certificate_or_public_key.algorithm == '<STR_LIT>' and hash_algorithm == '<STR_LIT>':<EOL><INDENT>if len(data) > certificate_or_public_key.byte_size - <NUM_LIT:11>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>certificate_or_public_key.byte_size,<EOL>len(data)<EOL>))<EOL><DEDENT>result = Security.SecKeyRawVerify(<EOL>certificate_or_public_key.sec_key_ref,<EOL>SecurityConst.kSecPaddingPKCS1,<EOL>data,<EOL>len(data),<EOL>signature,<EOL>len(signature)<EOL>)<EOL>if result == SecurityConst.errSecVerifyFailed or result == SecurityConst.errSSLCrypto:<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>handle_sec_error(result)<EOL>return<EOL><DEDENT>cf_signature = None<EOL>cf_data = None<EOL>cf_hash_length = None<EOL>sec_transform = None<EOL>try:<EOL><INDENT>error_pointer = new(CoreFoundation, '<STR_LIT>')<EOL>cf_signature = CFHelpers.cf_data_from_bytes(signature)<EOL>sec_transform = Security.SecVerifyTransformCreate(<EOL>certificate_or_public_key.sec_key_ref,<EOL>cf_signature,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>hash_constant = {<EOL>'<STR_LIT>': Security.kSecDigestMD5,<EOL>'<STR_LIT>': Security.kSecDigestSHA1,<EOL>'<STR_LIT>': Security.kSecDigestSHA2,<EOL>'<STR_LIT>': Security.kSecDigestSHA2,<EOL>'<STR_LIT>': Security.kSecDigestSHA2,<EOL>'<STR_LIT>': Security.kSecDigestSHA2<EOL>}[hash_algorithm]<EOL>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecDigestTypeAttribute,<EOL>hash_constant,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>if hash_algorithm in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>hash_length = {<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT><EOL>}[hash_algorithm]<EOL>cf_hash_length = CFHelpers.cf_number_from_integer(hash_length)<EOL>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecDigestLengthAttribute,<EOL>cf_hash_length,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL><DEDENT>if certificate_or_public_key.algorithm == '<STR_LIT>':<EOL><INDENT>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecPaddingKey,<EOL>Security.kSecPaddingPKCS1Key,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL><DEDENT>cf_data = CFHelpers.cf_data_from_bytes(data)<EOL>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecTransformInputAttributeName,<EOL>cf_data,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>res = Security.SecTransformExecute(sec_transform, error_pointer)<EOL>if not is_null(error_pointer):<EOL><INDENT>error = unwrap(error_pointer)<EOL>if not is_null(error):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT><DEDENT>res = bool(CoreFoundation.CFBooleanGetValue(res))<EOL>if not res:<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>if sec_transform:<EOL><INDENT>CoreFoundation.CFRelease(sec_transform)<EOL><DEDENT>if cf_signature:<EOL><INDENT>CoreFoundation.CFRelease(cf_signature)<EOL><DEDENT>if cf_data:<EOL><INDENT>CoreFoundation.CFRelease(cf_data)<EOL><DEDENT>if cf_hash_length:<EOL><INDENT>CoreFoundation.CFRelease(cf_hash_length)<EOL><DEDENT><DEDENT>", "docstring": "Verifies an RSA, DSA or ECDSA signature\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9511:m18"}
{"signature": "@property<EOL><INDENT>def byte_size(self):<DEDENT>", "body": "return self.asn1.byte_size<EOL>", "docstring": ":return:\n    The number of bytes in the key, as an integer", "id": "f9511:c0:m4"}
{"signature": "@property<EOL><INDENT>def bit_size(self):<DEDENT>", "body": "return self.public_key.bit_size<EOL>", "docstring": ":return:\n    The number of bits in the public key, as an integer", "id": "f9511:c2:m3"}
{"signature": "def _decrypt(private_key, ciphertext, padding):", "body": "if not isinstance(private_key, PrivateKey):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(private_key)<EOL>))<EOL><DEDENT>if not isinstance(ciphertext, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(ciphertext)<EOL>))<EOL><DEDENT>if not padding:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>cf_data = None<EOL>sec_transform = None<EOL>try:<EOL><INDENT>cf_data = CFHelpers.cf_data_from_bytes(ciphertext)<EOL>error_pointer = new(CoreFoundation, '<STR_LIT>')<EOL>sec_transform = Security.SecDecryptTransformCreate(<EOL>private_key.sec_key_ref,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecPaddingKey,<EOL>padding,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecTransformInputAttributeName,<EOL>cf_data,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>plaintext = Security.SecTransformExecute(sec_transform, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>return CFHelpers.cf_data_to_bytes(plaintext)<EOL><DEDENT>finally:<EOL><INDENT>if cf_data:<EOL><INDENT>CoreFoundation.CFRelease(cf_data)<EOL><DEDENT>if sec_transform:<EOL><INDENT>CoreFoundation.CFRelease(sec_transform)<EOL><DEDENT><DEDENT>", "docstring": "Decrypts RSA ciphertext using a private key\n\n:param private_key:\n    A PrivateKey object\n\n:param ciphertext:\n    The ciphertext - a byte string\n\n:param padding:\n    The padding mode to use, specified as a kSecPadding*Key value\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9511:m13"}
{"signature": "def rsa_pkcs1v15_decrypt(private_key, ciphertext):", "body": "if not isinstance(private_key, PrivateKey):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(private_key)<EOL>))<EOL><DEDENT>if not isinstance(ciphertext, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(ciphertext)<EOL>))<EOL><DEDENT>key_length = private_key.byte_size<EOL>buffer = buffer_from_bytes(key_length)<EOL>output_length = new(Security, '<STR_LIT>', key_length)<EOL>if osx_version_info < (<NUM_LIT:10>, <NUM_LIT:8>):<EOL><INDENT>padding = SecurityConst.kSecPaddingNone<EOL><DEDENT>else:<EOL><INDENT>padding = SecurityConst.kSecPaddingPKCS1<EOL><DEDENT>result = Security.SecKeyDecrypt(<EOL>private_key.sec_key_ref,<EOL>padding,<EOL>ciphertext,<EOL>len(ciphertext),<EOL>buffer,<EOL>output_length<EOL>)<EOL>handle_sec_error(result)<EOL>output = bytes_from_buffer(buffer, deref(output_length))<EOL>if osx_version_info < (<NUM_LIT:10>, <NUM_LIT:8>):<EOL><INDENT>output = remove_pkcs1v15_encryption_padding(key_length, output)<EOL><DEDENT>return output<EOL>", "docstring": "Decrypts a byte string using an RSA private key. Uses PKCS#1 v1.5 padding.\n\n:param private_key:\n    A PrivateKey object\n\n:param ciphertext:\n    A byte string of the encrypted data\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the original plaintext", "id": "f9511:m9"}
{"signature": "def ecdsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _sign(private_key, data, hash_algorithm)<EOL>", "docstring": "Generates an ECDSA signature\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or\n    \"sha512\"\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9511:m22"}
{"signature": "def rsa_pkcs1v15_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _sign(private_key, data, hash_algorithm)<EOL>", "docstring": "Generates an RSASSA-PKCS-v1.5 signature.\n\nWhen the hash_algorithm is \"raw\", the operation is identical to RSA\nprivate key encryption. That is: the data is not hashed and no ASN.1\nstructure with an algorithm identifier of the hash algorithm is placed in\nthe encrypted byte string.\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\",\n    \"sha512\" or \"raw\"\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9511:m19"}
{"signature": "def _load_x509(certificate):", "body": "source = certificate.dump()<EOL>cf_source = None<EOL>try:<EOL><INDENT>cf_source = CFHelpers.cf_data_from_bytes(source)<EOL>sec_key_ref = Security.SecCertificateCreateWithData(CoreFoundation.kCFAllocatorDefault, cf_source)<EOL>return Certificate(sec_key_ref, certificate)<EOL><DEDENT>finally:<EOL><INDENT>if cf_source:<EOL><INDENT>CoreFoundation.CFRelease(cf_source)<EOL><DEDENT><DEDENT>", "docstring": "Loads an ASN.1 object of an x509 certificate into a Certificate object\n\n:param certificate:\n    An asn1crypto.x509.Certificate object\n\n:return:\n    A Certificate object", "id": "f9511:m3"}
{"signature": "def dsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return _verify(certificate_or_public_key, signature, data, hash_algorithm)<EOL>", "docstring": "Verifies a DSA signature\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9511:m16"}
{"signature": "@property<EOL><INDENT>def bit_size(self):<DEDENT>", "body": "return self.asn1.bit_size<EOL>", "docstring": ":return:\n    The number of bits in the key, as an integer", "id": "f9511:c0:m3"}
{"signature": "def _encrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if cipher != Security.kSecAttrKeyTypeRC4 and not isinstance(iv, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(iv)<EOL>))<EOL><DEDENT>if cipher != Security.kSecAttrKeyTypeRC4 and not padding:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>cf_dict = None<EOL>cf_key = None<EOL>cf_data = None<EOL>cf_iv = None<EOL>sec_key = None<EOL>sec_transform = None<EOL>try:<EOL><INDENT>cf_dict = CFHelpers.cf_dictionary_from_pairs([(Security.kSecAttrKeyType, cipher)])<EOL>cf_key = CFHelpers.cf_data_from_bytes(key)<EOL>cf_data = CFHelpers.cf_data_from_bytes(data)<EOL>error_pointer = new(CoreFoundation, '<STR_LIT>')<EOL>sec_key = Security.SecKeyCreateFromData(cf_dict, cf_key, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>sec_transform = Security.SecEncryptTransformCreate(sec_key, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>if cipher != Security.kSecAttrKeyTypeRC4:<EOL><INDENT>Security.SecTransformSetAttribute(sec_transform, Security.kSecModeCBCKey, null(), error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>Security.SecTransformSetAttribute(sec_transform, Security.kSecPaddingKey, padding, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>cf_iv = CFHelpers.cf_data_from_bytes(iv)<EOL>Security.SecTransformSetAttribute(sec_transform, Security.kSecIVKey, cf_iv, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL><DEDENT>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecTransformInputAttributeName,<EOL>cf_data,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>ciphertext = Security.SecTransformExecute(sec_transform, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>return CFHelpers.cf_data_to_bytes(ciphertext)<EOL><DEDENT>finally:<EOL><INDENT>if cf_dict:<EOL><INDENT>CoreFoundation.CFRelease(cf_dict)<EOL><DEDENT>if cf_key:<EOL><INDENT>CoreFoundation.CFRelease(cf_key)<EOL><DEDENT>if cf_data:<EOL><INDENT>CoreFoundation.CFRelease(cf_data)<EOL><DEDENT>if cf_iv:<EOL><INDENT>CoreFoundation.CFRelease(cf_iv)<EOL><DEDENT>if sec_key:<EOL><INDENT>CoreFoundation.CFRelease(sec_key)<EOL><DEDENT>if sec_transform:<EOL><INDENT>CoreFoundation.CFRelease(sec_transform)<EOL><DEDENT><DEDENT>", "docstring": "Encrypts plaintext\n\n:param cipher:\n    A kSecAttrKeyType* value that specifies the cipher to use\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The initialization vector - a byte string - unused for RC4\n\n:param padding:\n    The padding mode to use, specified as a kSecPadding*Key value - unused for RC4\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the ciphertext", "id": "f9513:m12"}
{"signature": "def _decrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if cipher != Security.kSecAttrKeyTypeRC4 and not isinstance(iv, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(iv)<EOL>))<EOL><DEDENT>if cipher != Security.kSecAttrKeyTypeRC4 and not padding:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>cf_dict = None<EOL>cf_key = None<EOL>cf_data = None<EOL>cf_iv = None<EOL>sec_key = None<EOL>sec_transform = None<EOL>try:<EOL><INDENT>cf_dict = CFHelpers.cf_dictionary_from_pairs([(Security.kSecAttrKeyType, cipher)])<EOL>cf_key = CFHelpers.cf_data_from_bytes(key)<EOL>cf_data = CFHelpers.cf_data_from_bytes(data)<EOL>error_pointer = new(CoreFoundation, '<STR_LIT>')<EOL>sec_key = Security.SecKeyCreateFromData(cf_dict, cf_key, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>sec_transform = Security.SecDecryptTransformCreate(sec_key, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>if cipher != Security.kSecAttrKeyTypeRC4:<EOL><INDENT>Security.SecTransformSetAttribute(sec_transform, Security.kSecModeCBCKey, null(), error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>Security.SecTransformSetAttribute(sec_transform, Security.kSecPaddingKey, padding, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>cf_iv = CFHelpers.cf_data_from_bytes(iv)<EOL>Security.SecTransformSetAttribute(sec_transform, Security.kSecIVKey, cf_iv, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL><DEDENT>Security.SecTransformSetAttribute(<EOL>sec_transform,<EOL>Security.kSecTransformInputAttributeName,<EOL>cf_data,<EOL>error_pointer<EOL>)<EOL>handle_cf_error(error_pointer)<EOL>plaintext = Security.SecTransformExecute(sec_transform, error_pointer)<EOL>handle_cf_error(error_pointer)<EOL>return CFHelpers.cf_data_to_bytes(plaintext)<EOL><DEDENT>finally:<EOL><INDENT>if cf_dict:<EOL><INDENT>CoreFoundation.CFRelease(cf_dict)<EOL><DEDENT>if cf_key:<EOL><INDENT>CoreFoundation.CFRelease(cf_key)<EOL><DEDENT>if cf_data:<EOL><INDENT>CoreFoundation.CFRelease(cf_data)<EOL><DEDENT>if cf_iv:<EOL><INDENT>CoreFoundation.CFRelease(cf_iv)<EOL><DEDENT>if sec_key:<EOL><INDENT>CoreFoundation.CFRelease(sec_key)<EOL><DEDENT>if sec_transform:<EOL><INDENT>CoreFoundation.CFRelease(sec_transform)<EOL><DEDENT><DEDENT>", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext\n\n:param cipher:\n    A kSecAttrKeyType* value that specifies the cipher to use\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector - a byte string - unused for RC4\n\n:param padding:\n    The padding mode to use, specified as a kSecPadding*Key value - unused for RC4\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9513:m13"}
{"signature": "def rc2_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) < <NUM_LIT:5> or len(key) > <NUM_LIT:16>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if not iv:<EOL><INDENT>iv = rand_bytes(<NUM_LIT:8>)<EOL><DEDENT>elif len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>return (iv, _encrypt(Security.kSecAttrKeyTypeRC2, key, data, iv, Security.kSecPaddingPKCS5Key))<EOL>", "docstring": "Encrypts plaintext using RC2 with a 64 bit key\n\n:param key:\n    The encryption key - a byte string 8 bytes long\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The 8-byte initialization vector to use - a byte string - set as None\n    to generate an appropriate one\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A tuple of two byte strings (iv, ciphertext)", "id": "f9513:m6"}
{"signature": "def rc4_encrypt(key, data):", "body": "if len(key) < <NUM_LIT:5> or len(key) > <NUM_LIT:16>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>return _encrypt(Security.kSecAttrKeyTypeRC4, key, data, None, None)<EOL>", "docstring": "Encrypts plaintext using RC4 with a 40-128 bit key\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The plaintext - a byte string\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the ciphertext", "id": "f9513:m4"}
{"signature": "def tripledes_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != <NUM_LIT:16> and len(key) != <NUM_LIT>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>if len(key) == <NUM_LIT:16>:<EOL><INDENT>key = key + key[<NUM_LIT:0>:<NUM_LIT:8>]<EOL><DEDENT>return _decrypt(Security.kSecAttrKeyType3DES, key, data, iv, Security.kSecPaddingPKCS5Key)<EOL>", "docstring": "Decrypts 3DES ciphertext in either 2 or 3 key mode\n\n:param key:\n    The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector used for encryption - a byte string\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9513:m9"}
{"signature": "def des_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>return _decrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key)<EOL>", "docstring": "Decrypts DES ciphertext using a 56 bit key\n\n:param key:\n    The encryption key - a byte string 8 bytes long (includes error correction bits)\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector used for encryption - a byte string\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the plaintext", "id": "f9513:m11"}
{"signature": "def ecdsa_sign(private_key, data, hash_algorithm):", "body": "if not hasattr(private_key, '<STR_LIT>') or not isinstance(private_key.asn1, keys.PrivateKeyInfo):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(private_key)<EOL>))<EOL><DEDENT>curve_name = private_key.curve<EOL>if curve_name not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if hash_algorithm not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(hash_algorithm)<EOL>))<EOL><DEDENT>hash_func = getattr(hashlib, hash_algorithm)<EOL>ec_private_key = private_key.asn1['<STR_LIT>'].parsed<EOL>private_key_bytes = ec_private_key['<STR_LIT>'].contents<EOL>private_key_int = ec_private_key['<STR_LIT>'].native<EOL>curve_num_bytes = CURVE_BYTES[curve_name]<EOL>curve_base_point = {<EOL>'<STR_LIT>': SECP256R1_BASE_POINT,<EOL>'<STR_LIT>': SECP384R1_BASE_POINT,<EOL>'<STR_LIT>': SECP521R1_BASE_POINT,<EOL>}[curve_name]<EOL>n = curve_base_point.order<EOL>digest = hash_func(data).digest()<EOL>hash_length = len(digest)<EOL>h = int_from_bytes(digest, signed=False) % n<EOL>V = b'<STR_LIT>' * hash_length<EOL>K = b'<STR_LIT:\\x00>' * hash_length<EOL>K = hmac.new(K, V + b'<STR_LIT:\\x00>' + private_key_bytes + digest, hash_func).digest()<EOL>V = hmac.new(K, V, hash_func).digest()<EOL>K = hmac.new(K, V + b'<STR_LIT>' + private_key_bytes + digest, hash_func).digest()<EOL>V = hmac.new(K, V, hash_func).digest()<EOL>r = <NUM_LIT:0><EOL>s = <NUM_LIT:0><EOL>while True:<EOL><INDENT>T = b'<STR_LIT>'<EOL>while len(T) < curve_num_bytes:<EOL><INDENT>V = hmac.new(K, V, hash_func).digest()<EOL>T += V<EOL><DEDENT>k = int_from_bytes(T[<NUM_LIT:0>:curve_num_bytes], signed=False)<EOL>if k == <NUM_LIT:0> or k >= n:<EOL><INDENT>continue<EOL><DEDENT>r = (curve_base_point * k).x % n<EOL>if r == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>s = (inverse_mod(k, n) * (h + (private_key_int * r) % n)) % n<EOL>if s == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>break<EOL><DEDENT>return DSASignature({'<STR_LIT:r>': r, '<STR_LIT:s>': s}).dump()<EOL>", "docstring": "Generates an ECDSA signature in pure Python (thus slow)\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9516:m1"}
{"signature": "def ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "has_asn1 = hasattr(certificate_or_public_key, '<STR_LIT>')<EOL>if not has_asn1 or not isinstance(certificate_or_public_key.asn1, (keys.PublicKeyInfo, Certificate)):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(certificate_or_public_key)<EOL>))<EOL><DEDENT>curve_name = certificate_or_public_key.curve<EOL>if curve_name not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>if not isinstance(signature, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(signature)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if hash_algorithm not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(hash_algorithm)<EOL>))<EOL><DEDENT>asn1 = certificate_or_public_key.asn1<EOL>if isinstance(asn1, Certificate):<EOL><INDENT>asn1 = asn1.public_key<EOL><DEDENT>curve_base_point = {<EOL>'<STR_LIT>': SECP256R1_BASE_POINT,<EOL>'<STR_LIT>': SECP384R1_BASE_POINT,<EOL>'<STR_LIT>': SECP521R1_BASE_POINT,<EOL>}[curve_name]<EOL>x, y = asn1['<STR_LIT>'].to_coords()<EOL>n = curve_base_point.order<EOL>public_key_point = PrimePoint(curve_base_point.curve, x, y, n)<EOL>try:<EOL><INDENT>signature = DSASignature.load(signature)<EOL>r = signature['<STR_LIT:r>'].native<EOL>s = signature['<STR_LIT:s>'].native<EOL><DEDENT>except (ValueError):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>invalid = <NUM_LIT:0><EOL>invalid |= r < <NUM_LIT:1><EOL>invalid |= r >= n<EOL>invalid |= s < <NUM_LIT:1><EOL>invalid |= s >= n<EOL>if invalid:<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>hash_func = getattr(hashlib, hash_algorithm)<EOL>digest = hash_func(data).digest()<EOL>z = int_from_bytes(digest, signed=False) % n<EOL>w = inverse_mod(s, n)<EOL>u1 = (z * w) % n<EOL>u2 = (r * w) % n<EOL>hash_point = (curve_base_point * u1) + (public_key_point * u2)<EOL>if r != (hash_point.x % n):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Verifies an ECDSA signature in pure Python (thus slow)\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9516:m2"}
{"signature": "def pbkdf2_iteration_calculator(hash_algorithm, key_length, target_ms=<NUM_LIT:100>, quiet=False):", "body": "if hash_algorithm not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(hash_algorithm)<EOL>))<EOL><DEDENT>if not isinstance(key_length, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(key_length)<EOL>))<EOL><DEDENT>if key_length < <NUM_LIT:1>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(key_length)<EOL>))<EOL><DEDENT>if not isinstance(target_ms, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(target_ms)<EOL>))<EOL><DEDENT>if target_ms < <NUM_LIT:1>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(target_ms)<EOL>))<EOL><DEDENT>if pbkdf2.pure_python:<EOL><INDENT>raise OSError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>iterations = <NUM_LIT><EOL>password = '<STR_LIT>'.encode('<STR_LIT:utf-8>')<EOL>salt = rand_bytes(key_length)<EOL>def _measure():<EOL><INDENT>start = _get_start()<EOL>pbkdf2(hash_algorithm, password, salt, iterations, key_length)<EOL>observed_ms = _get_elapsed(start)<EOL>if not quiet:<EOL><INDENT>print('<STR_LIT>' % (iterations, observed_ms))<EOL><DEDENT>return <NUM_LIT:1.0> / target_ms * observed_ms<EOL><DEDENT>fraction = _measure()<EOL>iterations = int(iterations / fraction / <NUM_LIT>)<EOL>fraction = _measure()<EOL>iterations = iterations / fraction<EOL>round_factor = -<NUM_LIT:3> if iterations < <NUM_LIT> else -<NUM_LIT:4><EOL>result = int(round(iterations, round_factor))<EOL>if result > <NUM_LIT>:<EOL><INDENT>result = (result // <NUM_LIT>) * <NUM_LIT><EOL><DEDENT>return result<EOL>", "docstring": "Runs pbkdf2() twice to determine the approximate number of iterations to\nuse to hit a desired time per run. Use this on a production machine to\ndynamically adjust the number of iterations as high as you can.\n\n:param hash_algorithm:\n    The string name of the hash algorithm to use: \"md5\", \"sha1\", \"sha224\",\n    \"sha256\", \"sha384\", \"sha512\"\n\n:param key_length:\n    The length of the desired key in bytes\n\n:param target_ms:\n    The number of milliseconds the derivation should take\n\n:param quiet:\n    If no output should be printed as attempts are made\n\n:return:\n    An integer number of iterations of PBKDF2 using the specified hash\n    that will take at least target_ms", "id": "f9517:m0"}
{"signature": "def remove_pkcs1v15_signature_padding(key_length, data):", "body": "if _backend != '<STR_LIT>':<EOL><INDENT>raise SystemError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>return _remove_pkcs1v15_padding(key_length, data, '<STR_LIT>')<EOL>", "docstring": "Removes PKCS#1 v1.5 padding from a signed message using constant time\noperations\n\n:param key_length:\n    An integer of the number of bytes in the key\n\n:param data:\n    A byte string to unpad\n\n:return:\n    The unpadded data as a byte string", "id": "f9518:m5"}
{"signature": "def _is_osx_107():", "body": "if sys.platform != '<STR_LIT>':<EOL><INDENT>return False<EOL><DEDENT>version = platform.mac_ver()[<NUM_LIT:0>]<EOL>return tuple(map(int, version.split('<STR_LIT:.>')))[<NUM_LIT:0>:<NUM_LIT:2>] == (<NUM_LIT:10>, <NUM_LIT:7>)<EOL>", "docstring": ":return:\n    A bool if the current machine is running OS X 10.7", "id": "f9518:m0"}
{"signature": "def verify_pss_padding(hash_algorithm, salt_length, key_length, message, signature):", "body": "if _backend != '<STR_LIT>' and sys.platform != '<STR_LIT>':<EOL><INDENT>raise SystemError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>if not isinstance(message, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(message)<EOL>))<EOL><DEDENT>if not isinstance(signature, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(signature)<EOL>))<EOL><DEDENT>if not isinstance(salt_length, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(salt_length)<EOL>))<EOL><DEDENT>if salt_length < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(salt_length)<EOL>))<EOL><DEDENT>if hash_algorithm not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(hash_algorithm)<EOL>))<EOL><DEDENT>hash_func = getattr(hashlib, hash_algorithm)<EOL>em_bits = key_length - <NUM_LIT:1><EOL>em_len = int(math.ceil(em_bits / <NUM_LIT:8>))<EOL>message_digest = hash_func(message).digest()<EOL>hash_length = len(message_digest)<EOL>if em_len < hash_length + salt_length + <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>if signature[-<NUM_LIT:1>:] != b'<STR_LIT>':<EOL><INDENT>return False<EOL><DEDENT>zero_bits = (<NUM_LIT:8> * em_len) - em_bits<EOL>masked_db_length = em_len - hash_length - <NUM_LIT:1><EOL>masked_db = signature[<NUM_LIT:0>:masked_db_length]<EOL>first_byte = ord(masked_db[<NUM_LIT:0>:<NUM_LIT:1>])<EOL>bits_that_should_be_zero = first_byte >> (<NUM_LIT:8> - zero_bits)<EOL>if bits_that_should_be_zero != <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>m_prime_digest = signature[masked_db_length:masked_db_length + hash_length]<EOL>db_mask = _mgf1(hash_algorithm, m_prime_digest, em_len - hash_length - <NUM_LIT:1>)<EOL>left_bit_mask = ('<STR_LIT:0>' * zero_bits) + ('<STR_LIT:1>' * (<NUM_LIT:8> - zero_bits))<EOL>left_int_mask = int(left_bit_mask, <NUM_LIT:2>)<EOL>if left_int_mask != <NUM_LIT:255>:<EOL><INDENT>db_mask = chr_cls(left_int_mask & ord(db_mask[<NUM_LIT:0>:<NUM_LIT:1>])) + db_mask[<NUM_LIT:1>:]<EOL><DEDENT>db = int_to_bytes(int_from_bytes(masked_db) ^ int_from_bytes(db_mask))<EOL>if len(db) < len(masked_db):<EOL><INDENT>db = (b'<STR_LIT:\\x00>' * (len(masked_db) - len(db))) + db<EOL><DEDENT>zero_length = em_len - hash_length - salt_length - <NUM_LIT:2><EOL>zero_string = b'<STR_LIT:\\x00>' * zero_length<EOL>if not constant_compare(db[<NUM_LIT:0>:zero_length], zero_string):<EOL><INDENT>return False<EOL><DEDENT>if db[zero_length:zero_length + <NUM_LIT:1>] != b'<STR_LIT>':<EOL><INDENT>return False<EOL><DEDENT>salt = db[<NUM_LIT:0> - salt_length:]<EOL>m_prime = (b'<STR_LIT:\\x00>' * <NUM_LIT:8>) + message_digest + salt<EOL>h_prime = hash_func(m_prime).digest()<EOL>return constant_compare(m_prime_digest, h_prime)<EOL>", "docstring": "Verifies the PSS padding on an encoded message\n\n:param hash_algorithm:\n    The string name of the hash algorithm to use: \"sha1\", \"sha224\",\n    \"sha256\", \"sha384\", \"sha512\"\n\n:param salt_length:\n    The length of the salt as an integer - typically the same as the length\n    of the output from the hash_algorithm\n\n:param key_length:\n    The length of the RSA key, in bits\n\n:param message:\n    A byte string of the message to pad\n\n:param signature:\n    The signature to verify\n\n:return:\n    A boolean indicating if the signature is invalid", "id": "f9518:m2"}
{"signature": "def add_pkcs1v15_signature_padding(key_length, data):", "body": "if _backend != '<STR_LIT>':<EOL><INDENT>raise SystemError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT>return _add_pkcs1v15_padding(key_length, data, '<STR_LIT>')<EOL>", "docstring": "Adds PKCS#1 v1.5 padding to a message to be signed\n\n:param key_length:\n    An integer of the number of bytes in the key\n\n:param data:\n    A byte string to pad\n\n:return:\n    The padded data as a byte string", "id": "f9518:m4"}
{"signature": "def _mgf1(hash_algorithm, seed, mask_length):", "body": "if not isinstance(seed, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(seed)<EOL>))<EOL><DEDENT>if not isinstance(mask_length, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(mask_length)<EOL>))<EOL><DEDENT>if mask_length < <NUM_LIT:1>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(mask_length)<EOL>))<EOL><DEDENT>if hash_algorithm not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(hash_algorithm)<EOL>))<EOL><DEDENT>output = b'<STR_LIT>'<EOL>hash_length = {<EOL>'<STR_LIT>': <NUM_LIT:20>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:32>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:64><EOL>}[hash_algorithm]<EOL>iterations = int(math.ceil(mask_length / hash_length))<EOL>pack = struct.Struct(b'<STR_LIT>').pack<EOL>hash_func = getattr(hashlib, hash_algorithm)<EOL>for counter in range(<NUM_LIT:0>, iterations):<EOL><INDENT>b = pack(counter)<EOL>output += hash_func(seed + b).digest()<EOL><DEDENT>return output[<NUM_LIT:0>:mask_length]<EOL>", "docstring": "The PKCS#1 MGF1 mask generation algorithm\n\n:param hash_algorithm:\n    The string name of the hash algorithm to use: \"sha1\", \"sha224\",\n    \"sha256\", \"sha384\", \"sha512\"\n\n:param seed:\n    A byte string to use as the seed for the mask\n\n:param mask_length:\n    The desired mask length, as an integer\n\n:return:\n    A byte string of the mask", "id": "f9518:m3"}
{"signature": "def _add_pkcs1v15_padding(key_length, data, operation):", "body": "if operation == '<STR_LIT>':<EOL><INDENT>second_byte = b'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>second_byte = b'<STR_LIT>'<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if not isinstance(key_length, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(key_length)<EOL>))<EOL><DEDENT>if key_length < <NUM_LIT:64>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(key_length)<EOL>))<EOL><DEDENT>if len(data) > key_length - <NUM_LIT:11>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>key_length - <NUM_LIT:11>,<EOL>len(data)<EOL>))<EOL><DEDENT>required_bytes = key_length - <NUM_LIT:3> - len(data)<EOL>padding = b'<STR_LIT>'<EOL>while required_bytes > <NUM_LIT:0>:<EOL><INDENT>temp_padding = rand_bytes(required_bytes)<EOL>temp_padding = b'<STR_LIT>'.join(temp_padding.split(b'<STR_LIT:\\x00>'))<EOL>padding += temp_padding<EOL>required_bytes -= len(temp_padding)<EOL><DEDENT>return b'<STR_LIT:\\x00>' + second_byte + padding + b'<STR_LIT:\\x00>' + data<EOL>", "docstring": "Adds PKCS#1 v1.5 padding to a message\n\n:param key_length:\n    An integer of the number of bytes in the key\n\n:param data:\n    A byte string to unpad\n\n:param operation:\n    A unicode string of \"encrypting\" or \"signing\"\n\n:return:\n    The padded data as a byte string", "id": "f9518:m7"}
{"signature": "def type_name(value):", "body": "if inspect.isclass(value):<EOL><INDENT>cls = value<EOL><DEDENT>else:<EOL><INDENT>cls = value.__class__<EOL><DEDENT>if cls.__module__ in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>return cls.__name__<EOL><DEDENT>return '<STR_LIT>' % (cls.__module__, cls.__name__)<EOL>", "docstring": "Returns a user-readable name for the type of an object\n\n:param value:\n    A value to get the type name of\n\n:return:\n    A unicode string of the object's type name", "id": "f9519:m0"}
{"signature": "def rand_bytes(length):", "body": "if not isinstance(length, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(length)<EOL>))<EOL><DEDENT>if length < <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if length > <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return os.urandom(length)<EOL>", "docstring": "Returns a number of random bytes suitable for cryptographic purposes\n\n:param length:\n    The desired number of bytes\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by OpenSSL\n\n:return:\n    A byte string", "id": "f9520:m0"}
{"signature": "def pretty_message(string, *params):", "body": "output = textwrap.dedent(string)<EOL>if output.find('<STR_LIT:\\n>') != -<NUM_LIT:1>:<EOL><INDENT>output = re.sub('<STR_LIT>', '<STR_LIT:U+0020>', output)<EOL><DEDENT>if params:<EOL><INDENT>output = output % params<EOL><DEDENT>output = output.strip()<EOL>return output<EOL>", "docstring": "Takes a multi-line string and does the following:\n\n - dedents\n - converts newlines with text before and after into a single line\n - strips leading and trailing whitespace\n\n:param string:\n    The string to format\n\n:param *params:\n    Params to interpolate into the string\n\n:return:\n    The formatted string", "id": "f9523:m0"}
{"signature": "def raise_handshake():", "body": "raise TLSError('<STR_LIT>')<EOL>", "docstring": "Raises a TLSError due to a handshake error\n\n:raises:\n    TLSError", "id": "f9524:m18"}
{"signature": "def parse_tls_records(data):", "body": "pointer = <NUM_LIT:0><EOL>data_len = len(data)<EOL>while pointer < data_len:<EOL><INDENT>if data[pointer:pointer + <NUM_LIT:1>] == b'<STR_LIT>':<EOL><INDENT>break<EOL><DEDENT>length = int_from_bytes(data[pointer + <NUM_LIT:3>:pointer + <NUM_LIT:5>])<EOL>yield (<EOL>data[pointer:pointer + <NUM_LIT:1>],<EOL>data[pointer + <NUM_LIT:1>:pointer + <NUM_LIT:3>],<EOL>data[pointer + <NUM_LIT:5>:pointer + <NUM_LIT:5> + length]<EOL>)<EOL>pointer += <NUM_LIT:5> + length<EOL><DEDENT>", "docstring": "Creates a generator returning tuples of information about each record\nin a byte string of data from a TLS client or server. Stops as soon as it\nfind a ChangeCipherSpec message since all data from then on is encrypted.\n\n:param data:\n    A byte string of TLS records\n\n:return:\n    A generator that yields 3-element tuples:\n    [0] Byte string of record type\n    [1] Byte string of protocol version\n    [2] Byte string of record data", "id": "f9524:m5"}
{"signature": "def raise_protocol_error(server_handshake_bytes):", "body": "other_protocol = detect_other_protocol(server_handshake_bytes)<EOL>if other_protocol:<EOL><INDENT>raise TLSError('<STR_LIT>' % other_protocol)<EOL><DEDENT>raise TLSError('<STR_LIT>')<EOL>", "docstring": "Raises a TLSError due to a protocol error\n\n:param server_handshake_bytes:\n    A byte string of the handshake data received from the server\n\n:raises:\n    TLSError", "id": "f9524:m17"}
{"signature": "def parse_handshake_messages(data):", "body": "pointer = <NUM_LIT:0><EOL>data_len = len(data)<EOL>while pointer < data_len:<EOL><INDENT>length = int_from_bytes(data[pointer + <NUM_LIT:1>:pointer + <NUM_LIT:4>])<EOL>yield (<EOL>data[pointer:pointer + <NUM_LIT:1>],<EOL>data[pointer + <NUM_LIT:4>:pointer + <NUM_LIT:4> + length]<EOL>)<EOL>pointer += <NUM_LIT:4> + length<EOL><DEDENT>", "docstring": "Creates a generator returning tuples of information about each message in\na byte string of data from a TLS handshake record\n\n:param data:\n    A byte string of a TLS handshake record data\n\n:return:\n    A generator that yields 2-element tuples:\n    [0] Byte string of message type\n    [1] Byte string of message data", "id": "f9524:m6"}
{"signature": "def parse_alert(server_handshake_bytes):", "body": "for record_type, _, record_data in parse_tls_records(server_handshake_bytes):<EOL><INDENT>if record_type != b'<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>if len(record_data) != <NUM_LIT:2>:<EOL><INDENT>return None<EOL><DEDENT>return (int_from_bytes(record_data[<NUM_LIT:0>:<NUM_LIT:1>]), int_from_bytes(record_data[<NUM_LIT:1>:<NUM_LIT:2>]))<EOL><DEDENT>return None<EOL>", "docstring": "Parses the handshake for protocol alerts\n\n:param server_handshake_bytes:\n    A byte string of the handshake data received from the server\n\n:return:\n    None or an 2-element tuple of integers:\n     0: 1 (warning) or 2 (fatal)\n     1: The alert description (see https://tools.ietf.org/html/rfc5246#section-7.2)", "id": "f9524:m3"}
{"signature": "def raise_disconnection():", "body": "raise TLSDisconnectError('<STR_LIT>')<EOL>", "docstring": "Raises a TLSDisconnectError due to a disconnection\n\n:raises:\n    TLSDisconnectError", "id": "f9524:m16"}
{"signature": "def _parse_hello_extensions(data):", "body": "if data == b'<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>extentions_length = int_from_bytes(data[<NUM_LIT:0>:<NUM_LIT:2>])<EOL>extensions_start = <NUM_LIT:2><EOL>extensions_end = <NUM_LIT:2> + extentions_length<EOL>pointer = extensions_start<EOL>while pointer < extensions_end:<EOL><INDENT>extension_type = int_from_bytes(data[pointer:pointer + <NUM_LIT:2>])<EOL>extension_length = int_from_bytes(data[pointer + <NUM_LIT:2>:pointer + <NUM_LIT:4>])<EOL>yield (<EOL>extension_type,<EOL>data[pointer + <NUM_LIT:4>:pointer + <NUM_LIT:4> + extension_length]<EOL>)<EOL>pointer += <NUM_LIT:4> + extension_length<EOL><DEDENT>", "docstring": "Creates a generator returning tuples of information about each extension\nfrom a byte string of extension data contained in a ServerHello ores\nClientHello message\n\n:param data:\n    A byte string of a extension data from a TLS ServerHello or ClientHello\n    message\n\n:return:\n    A generator that yields 2-element tuples:\n    [0] Byte string of extension type\n    [1] Byte string of extension data", "id": "f9524:m7"}
{"signature": "def raise_hostname(certificate, hostname):", "body": "is_ip = re.match('<STR_LIT>', hostname) or hostname.find('<STR_LIT::>') != -<NUM_LIT:1><EOL>if is_ip:<EOL><INDENT>hostname_type = '<STR_LIT>' % hostname<EOL><DEDENT>else:<EOL><INDENT>hostname_type = '<STR_LIT>' % hostname<EOL><DEDENT>message = '<STR_LIT>' % hostname_type<EOL>valid_ips = '<STR_LIT:U+002CU+0020>'.join(certificate.valid_ips)<EOL>valid_domains = '<STR_LIT:U+002CU+0020>'.join(certificate.valid_domains)<EOL>if valid_domains:<EOL><INDENT>message += '<STR_LIT>' % valid_domains<EOL><DEDENT>if valid_domains and valid_ips:<EOL><INDENT>message += '<STR_LIT>'<EOL><DEDENT>if valid_ips:<EOL><INDENT>message += '<STR_LIT>' % valid_ips<EOL><DEDENT>raise TLSVerificationError(message, certificate)<EOL>", "docstring": "Raises a TLSVerificationError due to a hostname mismatch\n\n:param certificate:\n    An asn1crypto.x509.Certificate object\n\n:raises:\n    TLSVerificationError", "id": "f9524:m8"}
{"signature": "def raise_client_auth():", "body": "message = '<STR_LIT>'<EOL>raise TLSError(message)<EOL>", "docstring": "Raises a TLSError indicating client authentication is required\n\n:raises:\n    TLSError", "id": "f9524:m11"}
{"signature": "def detect_other_protocol(server_handshake_bytes):", "body": "if server_handshake_bytes[<NUM_LIT:0>:<NUM_LIT:5>] == b'<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if server_handshake_bytes[<NUM_LIT:0>:<NUM_LIT:4>] == b'<STR_LIT>':<EOL><INDENT>if re.match(b'<STR_LIT>', server_handshake_bytes, re.I):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT><DEDENT>if server_handshake_bytes[<NUM_LIT:0>:<NUM_LIT:4>] == b'<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if server_handshake_bytes[<NUM_LIT:0>:<NUM_LIT:4>] == b'<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if server_handshake_bytes[<NUM_LIT:0>:<NUM_LIT:4>] == b'<STR_LIT>' or server_handshake_bytes[<NUM_LIT:0>:<NUM_LIT:9>] == b'<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return None<EOL>", "docstring": "Looks at the server handshake bytes to try and detect a different protocol\n\n:param server_handshake_bytes:\n    A byte string of the handshake data received from the server\n\n:return:\n    None, or a unicode string of \"ftp\", \"http\", \"imap\", \"pop3\", \"smtp\"", "id": "f9524:m21"}
{"signature": "def dump_dh_parameters(dh_parameters, encoding='<STR_LIT>'):", "body": "if encoding not in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(encoding)<EOL>))<EOL><DEDENT>if not isinstance(dh_parameters, algos.DHParameters):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(dh_parameters)<EOL>))<EOL><DEDENT>output = dh_parameters.dump()<EOL>if encoding == '<STR_LIT>':<EOL><INDENT>output = pem.armor('<STR_LIT>', output)<EOL><DEDENT>return output<EOL>", "docstring": "Serializes an asn1crypto.algos.DHParameters object into a byte string\n\n:param dh_parameters:\n    An asn1crypto.algos.DHParameters object\n\n:param encoding:\n    A unicode string of \"pem\" or \"der\"\n\n:return:\n    A byte string of the encoded DH parameters", "id": "f9525:m0"}
{"signature": "def dump_private_key(private_key, passphrase, encoding='<STR_LIT>', target_ms=<NUM_LIT:200>):", "body": "if encoding not in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(encoding)<EOL>))<EOL><DEDENT>if passphrase is not None:<EOL><INDENT>if not isinstance(passphrase, str_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(passphrase)<EOL>))<EOL><DEDENT>if passphrase == '<STR_LIT>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT><DEDENT>is_oscrypto = isinstance(private_key, PrivateKey)<EOL>if not isinstance(private_key, keys.PrivateKeyInfo) and not is_oscrypto:<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(private_key)<EOL>))<EOL><DEDENT>if is_oscrypto:<EOL><INDENT>private_key = private_key.asn1<EOL><DEDENT>output = private_key.dump()<EOL>if passphrase is not None:<EOL><INDENT>cipher = '<STR_LIT>'<EOL>key_length = <NUM_LIT:32><EOL>kdf_hmac = '<STR_LIT>'<EOL>kdf_salt = rand_bytes(key_length)<EOL>iterations = pbkdf2_iteration_calculator(kdf_hmac, key_length, target_ms=target_ms, quiet=True)<EOL>if iterations < <NUM_LIT>:<EOL><INDENT>iterations = <NUM_LIT><EOL><DEDENT>passphrase_bytes = passphrase.encode('<STR_LIT:utf-8>')<EOL>key = pbkdf2(kdf_hmac, passphrase_bytes, kdf_salt, iterations, key_length)<EOL>iv, ciphertext = aes_cbc_pkcs7_encrypt(key, output, None)<EOL>output = keys.EncryptedPrivateKeyInfo({<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': algos.Pbkdf2Salt(<EOL>name='<STR_LIT>',<EOL>value=kdf_salt<EOL>),<EOL>'<STR_LIT>': iterations,<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': kdf_hmac,<EOL>'<STR_LIT>': core.Null()<EOL>}<EOL>}<EOL>},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': cipher,<EOL>'<STR_LIT>': iv<EOL>}<EOL>}<EOL>},<EOL>'<STR_LIT>': ciphertext<EOL>}).dump()<EOL><DEDENT>if encoding == '<STR_LIT>':<EOL><INDENT>if passphrase is None:<EOL><INDENT>object_type = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>object_type = '<STR_LIT>'<EOL><DEDENT>output = pem.armor(object_type, output)<EOL><DEDENT>return output<EOL>", "docstring": "Serializes a private key object into a byte string of the PKCS#8 format\n\n:param private_key:\n    An oscrypto.asymmetric.PrivateKey or asn1crypto.keys.PrivateKeyInfo\n    object\n\n:param passphrase:\n    A unicode string of the passphrase to encrypt the private key with.\n    A passphrase of None will result in no encryption. A blank string will\n    result in a ValueError to help ensure that the lack of passphrase is\n    intentional.\n\n:param encoding:\n    A unicode string of \"pem\" or \"der\"\n\n:param target_ms:\n    Use PBKDF2 with the number of iterations that takes about this many\n    milliseconds on the current machine.\n\n:raises:\n    ValueError - when a blank string is provided for the passphrase\n\n:return:\n    A byte string of the encoded and encrypted public key", "id": "f9525:m3"}
{"signature": "def dump_openssl_private_key(private_key, passphrase):", "body": "if passphrase is not None:<EOL><INDENT>if not isinstance(passphrase, str_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(passphrase)<EOL>))<EOL><DEDENT>if passphrase == '<STR_LIT>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>'''<EOL>))<EOL><DEDENT><DEDENT>is_oscrypto = isinstance(private_key, PrivateKey)<EOL>if not isinstance(private_key, keys.PrivateKeyInfo) and not is_oscrypto:<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(private_key)<EOL>))<EOL><DEDENT>if is_oscrypto:<EOL><INDENT>private_key = private_key.asn1<EOL><DEDENT>output = private_key.unwrap().dump()<EOL>headers = None<EOL>if passphrase is not None:<EOL><INDENT>iv = rand_bytes(<NUM_LIT:16>)<EOL>headers = OrderedDict()<EOL>headers['<STR_LIT>'] = '<STR_LIT>'<EOL>headers['<STR_LIT>'] = '<STR_LIT>' % binascii.hexlify(iv).decode('<STR_LIT:ascii>')<EOL>key_length = <NUM_LIT:16><EOL>passphrase_bytes = passphrase.encode('<STR_LIT:utf-8>')<EOL>key = hashlib.md5(passphrase_bytes + iv[<NUM_LIT:0>:<NUM_LIT:8>]).digest()<EOL>while key_length > len(key):<EOL><INDENT>key += hashlib.md5(key + passphrase_bytes + iv[<NUM_LIT:0>:<NUM_LIT:8>]).digest()<EOL><DEDENT>key = key[<NUM_LIT:0>:key_length]<EOL>iv, output = aes_cbc_pkcs7_encrypt(key, output, iv)<EOL><DEDENT>if private_key.algorithm == '<STR_LIT>':<EOL><INDENT>object_type = '<STR_LIT>'<EOL><DEDENT>elif private_key.algorithm == '<STR_LIT>':<EOL><INDENT>object_type = '<STR_LIT>'<EOL><DEDENT>elif private_key.algorithm == '<STR_LIT>':<EOL><INDENT>object_type = '<STR_LIT>'<EOL><DEDENT>return pem.armor(object_type, output, headers=headers)<EOL>", "docstring": "Serializes a private key object into a byte string of the PEM formats used\nby OpenSSL. The format chosen will depend on the type of private key - RSA,\nDSA or EC.\n\nDo not use this method unless you really must interact with a system that\ndoes not support PKCS#8 private keys. The encryption provided by PKCS#8 is\nfar superior to the OpenSSL formats. This is due to the fact that the\nOpenSSL formats don't stretch the passphrase, making it very easy to\nbrute-force.\n\n:param private_key:\n    An oscrypto.asymmetric.PrivateKey or asn1crypto.keys.PrivateKeyInfo\n    object\n\n:param passphrase:\n    A unicode string of the passphrase to encrypt the private key with.\n    A passphrase of None will result in no encryption. A blank string will\n    result in a ValueError to help ensure that the lack of passphrase is\n    intentional.\n\n:raises:\n    ValueError - when a blank string is provided for the passphrase\n\n:return:\n    A byte string of the encoded and encrypted public key", "id": "f9525:m4"}
{"signature": "def dump_certificate(certificate, encoding='<STR_LIT>'):", "body": "if encoding not in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(encoding)<EOL>))<EOL><DEDENT>is_oscrypto = isinstance(certificate, Certificate)<EOL>if not isinstance(certificate, x509.Certificate) and not is_oscrypto:<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(certificate)<EOL>))<EOL><DEDENT>if is_oscrypto:<EOL><INDENT>certificate = certificate.asn1<EOL><DEDENT>output = certificate.dump()<EOL>if encoding == '<STR_LIT>':<EOL><INDENT>output = pem.armor('<STR_LIT>', output)<EOL><DEDENT>return output<EOL>", "docstring": "Serializes a certificate object into a byte string\n\n:param certificate:\n    An oscrypto.asymmetric.Certificate or asn1crypto.x509.Certificate object\n\n:param encoding:\n    A unicode string of \"pem\" or \"der\"\n\n:return:\n    A byte string of the encoded certificate", "id": "f9525:m2"}
{"signature": "def use_winlegacy():", "body": "if sys.platform != '<STR_LIT:win32>':<EOL><INDENT>plat = platform.system() or sys.platform<EOL>if plat == '<STR_LIT>':<EOL><INDENT>plat = '<STR_LIT>'<EOL><DEDENT>raise EnvironmentError('<STR_LIT>' % plat)<EOL><DEDENT>with _backend_lock:<EOL><INDENT>if _module_values['<STR_LIT>'] is not None:<EOL><INDENT>raise RuntimeError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>_module_values['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>", "docstring": "Forces use of the legacy Windows CryptoAPI. This should only be used on\nWindows XP or for testing. It is less full-featured than the Cryptography\nNext Generation (CNG) API, and as a result the elliptic curve and PSS\npadding features are implemented in pure Python. This isn't ideal, but it\na shim for end-user client code. No one is going to run a server on Windows\nXP anyway, right?!\n\n:raises:\n    EnvironmentError - when this function is called on an operating system other than Windows\n    RuntimeError - when this function is called after another part of oscrypto has been imported", "id": "f9526:m3"}
{"signature": "def _backend_config():", "body": "if backend() != '<STR_LIT>':<EOL><INDENT>return {}<EOL><DEDENT>if _module_values['<STR_LIT>'] is not None:<EOL><INDENT>return _module_values['<STR_LIT>']<EOL><DEDENT>with _backend_lock:<EOL><INDENT>if _module_values['<STR_LIT>'] is not None:<EOL><INDENT>return _module_values['<STR_LIT>']<EOL><DEDENT>_module_values['<STR_LIT>'] = {}<EOL>return _module_values['<STR_LIT>']<EOL><DEDENT>", "docstring": ":return:\n    A dict of config info for the backend. Only currently used by \"openssl\",\n    it may contains zero or more of the following keys:\n     - \"libcrypto_path\"\n     - \"libssl_path\"", "id": "f9526:m1"}
{"signature": "def pkcs12_kdf(hash_algorithm, password, salt, iterations, key_length, id_):", "body": "if not isinstance(password, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(password)<EOL>))<EOL><DEDENT>if not isinstance(salt, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(salt)<EOL>))<EOL><DEDENT>if not isinstance(iterations, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(iterations)<EOL>))<EOL><DEDENT>if iterations < <NUM_LIT:1>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(iterations)<EOL>))<EOL><DEDENT>if not isinstance(key_length, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(key_length)<EOL>))<EOL><DEDENT>if key_length < <NUM_LIT:1>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(key_length)<EOL>))<EOL><DEDENT>if hash_algorithm not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(hash_algorithm)<EOL>))<EOL><DEDENT>if id_ not in set([<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>]):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(id_)<EOL>))<EOL><DEDENT>utf16_password = password.decode('<STR_LIT:utf-8>').encode('<STR_LIT>') + b'<STR_LIT>'<EOL>algo = getattr(hashlib, hash_algorithm)<EOL>u = {<EOL>'<STR_LIT>': <NUM_LIT:16>,<EOL>'<STR_LIT>': <NUM_LIT:20>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:32>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:64><EOL>}[hash_algorithm]<EOL>if hash_algorithm in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>v = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>v = <NUM_LIT:64><EOL><DEDENT>d = chr_cls(id_) * v<EOL>s = b'<STR_LIT>'<EOL>if salt != b'<STR_LIT>':<EOL><INDENT>s_len = v * int(math.ceil(float(len(salt)) / v))<EOL>while len(s) < s_len:<EOL><INDENT>s += salt<EOL><DEDENT>s = s[<NUM_LIT:0>:s_len]<EOL><DEDENT>p = b'<STR_LIT>'<EOL>if utf16_password != b'<STR_LIT>':<EOL><INDENT>p_len = v * int(math.ceil(float(len(utf16_password)) / v))<EOL>while len(p) < p_len:<EOL><INDENT>p += utf16_password<EOL><DEDENT>p = p[<NUM_LIT:0>:p_len]<EOL><DEDENT>i = s + p<EOL>c = int(math.ceil(float(key_length) / u))<EOL>a = b'<STR_LIT:\\x00>' * (c * u)<EOL>for num in range(<NUM_LIT:1>, c + <NUM_LIT:1>):<EOL><INDENT>a2 = algo(d + i).digest()<EOL>for _ in range(<NUM_LIT:2>, iterations + <NUM_LIT:1>):<EOL><INDENT>a2 = algo(a2).digest()<EOL><DEDENT>if num < c:<EOL><INDENT>b = b'<STR_LIT>'<EOL>while len(b) < v:<EOL><INDENT>b += a2<EOL><DEDENT>b = int_from_bytes(b[<NUM_LIT:0>:v]) + <NUM_LIT:1><EOL>for num2 in range(<NUM_LIT:0>, len(i) // v):<EOL><INDENT>start = num2 * v<EOL>end = (num2 + <NUM_LIT:1>) * v<EOL>i_num2 = i[start:end]<EOL>i_num2 = int_to_bytes(int_from_bytes(i_num2) + b)<EOL>i_num2_l = len(i_num2)<EOL>if i_num2_l > v:<EOL><INDENT>i_num2 = i_num2[i_num2_l - v:]<EOL><DEDENT>i = i[<NUM_LIT:0>:start] + i_num2 + i[end:]<EOL><DEDENT><DEDENT>begin = (num - <NUM_LIT:1>) * u<EOL>to_copy = min(key_length, u)<EOL>a = a[<NUM_LIT:0>:begin] + a2[<NUM_LIT:0>:to_copy] + a[begin + to_copy:]<EOL><DEDENT>return a[<NUM_LIT:0>:key_length]<EOL>", "docstring": "KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19\n\n:param hash_algorithm:\n    The string name of the hash algorithm to use: \"md5\", \"sha1\", \"sha224\",\n    \"sha256\", \"sha384\", \"sha512\"\n\n:param password:\n    A byte string of the password to use an input to the KDF\n\n:param salt:\n    A cryptographic random byte string\n\n:param iterations:\n    The numbers of iterations to use when deriving the key\n\n:param key_length:\n    The length of the desired key in bytes\n\n:param id_:\n    The ID of the usage - 1 for key, 2 for iv, 3 for mac\n\n:return:\n    The derived key as a byte string", "id": "f9527:m0"}
{"signature": "@property<EOL><INDENT>def session(self):<DEDENT>", "body": "return self._session<EOL>", "docstring": "The oscrypto.tls.TLSSession object used for this connection", "id": "f9532:c1:m24"}
{"signature": "def __init__(self, protocol=None, manual_validation=False, extra_trust_roots=None):", "body": "if not isinstance(manual_validation, bool):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(manual_validation)<EOL>))<EOL><DEDENT>self._manual_validation = manual_validation<EOL>if protocol is None:<EOL><INDENT>protocol = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL><DEDENT>if isinstance(protocol, str_cls):<EOL><INDENT>protocol = set([protocol])<EOL><DEDENT>elif not isinstance(protocol, set):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(protocol)<EOL>))<EOL><DEDENT>valid_protocols = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>unsupported_protocols = protocol - valid_protocols<EOL>if unsupported_protocols:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(unsupported_protocols)<EOL>))<EOL><DEDENT>self._protocols = protocol<EOL>self._extra_trust_roots = []<EOL>if extra_trust_roots:<EOL><INDENT>for extra_trust_root in extra_trust_roots:<EOL><INDENT>if isinstance(extra_trust_root, Certificate):<EOL><INDENT>extra_trust_root = extra_trust_root.asn1<EOL><DEDENT>elif isinstance(extra_trust_root, byte_cls):<EOL><INDENT>extra_trust_root = parse_certificate(extra_trust_root)<EOL><DEDENT>elif isinstance(extra_trust_root, str_cls):<EOL><INDENT>with open(extra_trust_root, '<STR_LIT:rb>') as f:<EOL><INDENT>extra_trust_root = parse_certificate(f.read())<EOL><DEDENT><DEDENT>elif not isinstance(extra_trust_root, x509.Certificate):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(extra_trust_root)<EOL>))<EOL><DEDENT>self._extra_trust_roots.append(extra_trust_root)<EOL><DEDENT><DEDENT>ssl_ctx = None<EOL>try:<EOL><INDENT>if libcrypto_version_info < (<NUM_LIT:1>, <NUM_LIT:1>):<EOL><INDENT>method = libssl.SSLv23_method()<EOL><DEDENT>else:<EOL><INDENT>method = libssl.TLS_method()<EOL><DEDENT>ssl_ctx = libssl.SSL_CTX_new(method)<EOL>if is_null(ssl_ctx):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>self._ssl_ctx = ssl_ctx<EOL>libssl.SSL_CTX_set_timeout(ssl_ctx, <NUM_LIT>)<EOL>libssl.SSL_CTX_ctrl(<EOL>ssl_ctx,<EOL>LibsslConst.SSL_CTRL_SET_SESS_CACHE_MODE,<EOL>LibsslConst.SSL_SESS_CACHE_CLIENT,<EOL>null()<EOL>)<EOL>if sys.platform in set(['<STR_LIT:win32>', '<STR_LIT>']):<EOL><INDENT>trust_list_path = _trust_list_path<EOL>if trust_list_path is None:<EOL><INDENT>trust_list_path = get_path()<EOL><DEDENT>if sys.platform == '<STR_LIT:win32>':<EOL><INDENT>path_encoding = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>path_encoding = '<STR_LIT:utf-8>'<EOL><DEDENT>result = libssl.SSL_CTX_load_verify_locations(<EOL>ssl_ctx,<EOL>trust_list_path.encode(path_encoding),<EOL>null()<EOL>)<EOL><DEDENT>else:<EOL><INDENT>result = libssl.SSL_CTX_set_default_verify_paths(ssl_ctx)<EOL><DEDENT>handle_openssl_error(result)<EOL>verify_mode = LibsslConst.SSL_VERIFY_NONE if manual_validation else LibsslConst.SSL_VERIFY_PEER<EOL>libssl.SSL_CTX_set_verify(ssl_ctx, verify_mode, null())<EOL>result = libssl.SSL_CTX_set_cipher_list(<EOL>ssl_ctx,<EOL>(<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>b'<STR_LIT>'<EOL>)<EOL>)<EOL>handle_openssl_error(result)<EOL>disabled_protocols = set(['<STR_LIT>'])<EOL>disabled_protocols |= (valid_protocols - self._protocols)<EOL>for disabled_protocol in disabled_protocols:<EOL><INDENT>libssl.SSL_CTX_ctrl(<EOL>ssl_ctx,<EOL>LibsslConst.SSL_CTRL_OPTIONS,<EOL>_PROTOCOL_MAP[disabled_protocol],<EOL>null()<EOL>)<EOL><DEDENT>if self._extra_trust_roots:<EOL><INDENT>x509_store = libssl.SSL_CTX_get_cert_store(ssl_ctx)<EOL>for cert in self._extra_trust_roots:<EOL><INDENT>oscrypto_cert = load_certificate(cert)<EOL>result = libssl.X509_STORE_add_cert(<EOL>x509_store,<EOL>oscrypto_cert.x509<EOL>)<EOL>handle_openssl_error(result)<EOL><DEDENT><DEDENT><DEDENT>except (Exception):<EOL><INDENT>if ssl_ctx:<EOL><INDENT>libssl.SSL_CTX_free(ssl_ctx)<EOL><DEDENT>self._ssl_ctx = None<EOL>raise<EOL><DEDENT>", "docstring": ":param protocol:\n    A unicode string or set of unicode strings representing allowable\n    protocols to negotiate with the server:\n\n     - \"TLSv1.2\"\n     - \"TLSv1.1\"\n     - \"TLSv1\"\n     - \"SSLv3\"\n\n    Default is: {\"TLSv1\", \"TLSv1.1\", \"TLSv1.2\"}\n\n:param manual_validation:\n    If certificate and certificate path validation should be skipped\n    and left to the developer to implement\n\n:param extra_trust_roots:\n    A list containing one or more certificates to be treated as trust\n    roots, in one of the following formats:\n     - A byte string of the DER encoded certificate\n     - A unicode string of the certificate filename\n     - An asn1crypto.x509.Certificate object\n     - An oscrypto.asymmetric.Certificate object\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9532:c0:m0"}
{"signature": "def shutdown(self):", "body": "self._shutdown(True)<EOL>", "docstring": "Shuts down the TLS session and then shuts down the underlying socket", "id": "f9532:c1:m13"}
{"signature": "def read_until(self, marker):", "body": "if not isinstance(marker, byte_cls) and not isinstance(marker, Pattern):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(marker)<EOL>))<EOL><DEDENT>output = b'<STR_LIT>'<EOL>is_regex = isinstance(marker, Pattern)<EOL>while True:<EOL><INDENT>if len(self._decrypted_bytes) > <NUM_LIT:0>:<EOL><INDENT>chunk = self._decrypted_bytes<EOL>self._decrypted_bytes = b'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>if self._ssl is None:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>to_read = libssl.SSL_pending(self._ssl) or <NUM_LIT><EOL>chunk = self.read(to_read)<EOL><DEDENT>offset = len(output)<EOL>output += chunk<EOL>if is_regex:<EOL><INDENT>match = marker.search(output)<EOL>if match is not None:<EOL><INDENT>end = match.end()<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>start = max(<NUM_LIT:0>, offset - len(marker) - <NUM_LIT:1>)<EOL>match = output.find(marker, start)<EOL>if match != -<NUM_LIT:1>:<EOL><INDENT>end = match + len(marker)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>self._decrypted_bytes = output[end:] + self._decrypted_bytes<EOL>return output[<NUM_LIT:0>:end]<EOL>", "docstring": "Reads data from the socket until a marker is found. Data read includes\nthe marker.\n\n:param marker:\n    A byte string or regex object from re.compile(). Used to determine\n    when to stop reading. Regex objects are more inefficient since\n    they must scan the entire byte string of read data each time data\n    is read off the socket.\n\n:return:\n    A byte string of the data read, including the marker", "id": "f9532:c1:m7"}
{"signature": "def __init__(self, address, port, timeout=<NUM_LIT:10>, session=None):", "body": "self._raw_bytes = b'<STR_LIT>'<EOL>self._decrypted_bytes = b'<STR_LIT>'<EOL>if address is None and port is None:<EOL><INDENT>self._socket = None<EOL><DEDENT>else:<EOL><INDENT>if not isinstance(address, str_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(address)<EOL>))<EOL><DEDENT>if not isinstance(port, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(port)<EOL>))<EOL><DEDENT>if timeout is not None and not isinstance(timeout, numbers.Number):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(timeout)<EOL>))<EOL><DEDENT>self._socket = socket_.create_connection((address, port), timeout)<EOL>self._socket.settimeout(timeout)<EOL><DEDENT>if session is None:<EOL><INDENT>session = TLSSession()<EOL><DEDENT>elif not isinstance(session, TLSSession):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(session)<EOL>))<EOL><DEDENT>self._session = session<EOL>if self._socket:<EOL><INDENT>self._hostname = address<EOL>self._handshake()<EOL><DEDENT>", "docstring": ":param address:\n    A unicode string of the domain name or IP address to conenct to\n\n:param port:\n    An integer of the port number to connect to\n\n:param timeout:\n    An integer timeout to use for the socket\n\n:param session:\n    An oscrypto.tls.TLSSession object to allow for session reuse and\n    controlling the protocols and validation performed", "id": "f9532:c1:m1"}
{"signature": "@property<EOL><INDENT>def port(self):<DEDENT>", "body": "return self.socket.getpeername()[<NUM_LIT:1>]<EOL>", "docstring": "An integer of the port number the socket is connected to", "id": "f9532:c1:m26"}
{"signature": "@property<EOL><INDENT>def hostname(self):<DEDENT>", "body": "return self._hostname<EOL>", "docstring": "A unicode string of the TLS server domain name or IP address", "id": "f9532:c1:m25"}
{"signature": "@property<EOL><INDENT>def session_ticket(self):<DEDENT>", "body": "return self._session_ticket<EOL>", "docstring": "A unicode string of \"new\" or \"reused\" or None for no ticket", "id": "f9532:c1:m23"}
{"signature": "@property<EOL><INDENT>def compression(self):<DEDENT>", "body": "return self._compression<EOL>", "docstring": "A boolean if compression is enabled", "id": "f9532:c1:m21"}
{"signature": "def write(self, data):", "body": "data_len = len(data)<EOL>while data_len:<EOL><INDENT>if self._ssl is None:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>result = libssl.SSL_write(self._ssl, data, data_len)<EOL>self._raw_write()<EOL>if result <= <NUM_LIT:0>:<EOL><INDENT>error = libssl.SSL_get_error(self._ssl, result)<EOL>if error == LibsslConst.SSL_ERROR_WANT_READ:<EOL><INDENT>if self._raw_read() != b'<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>raise_disconnection()<EOL><DEDENT>elif error == LibsslConst.SSL_ERROR_WANT_WRITE:<EOL><INDENT>self._raw_write()<EOL>continue<EOL><DEDENT>elif error == LibsslConst.SSL_ERROR_ZERO_RETURN:<EOL><INDENT>self._gracefully_closed = True<EOL>self._shutdown(False)<EOL>self._raise_closed()<EOL><DEDENT>else:<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>, TLSError)<EOL><DEDENT><DEDENT>data = data[result:]<EOL>data_len = len(data)<EOL><DEDENT>", "docstring": "Writes data to the TLS-wrapped socket\n\n:param data:\n    A byte string to write to the socket\n\n:raises:\n    socket.socket - when a non-TLS socket error occurs\n    oscrypto.errors.TLSError - when a TLS-related error occurs\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9532:c1:m10"}
{"signature": "def select_write(self, timeout=None):", "body": "_, write_ready, _ = select.select([], [self._socket], [], timeout)<EOL>return len(write_ready) > <NUM_LIT:0><EOL>", "docstring": "Blocks until the socket is ready to be written to, or the timeout is hit\n\n:param timeout:\n    A float - the period of time to wait for the socket to be ready to\n    written to. None for no time limit.\n\n:return:\n    A boolean - if the socket is ready for writing. Will only be False\n    if timeout is not None.", "id": "f9532:c1:m11"}
{"signature": "def _shutdown(self, manual):", "body": "if self._ssl is None:<EOL><INDENT>return<EOL><DEDENT>while True:<EOL><INDENT>result = libssl.SSL_shutdown(self._ssl)<EOL>try:<EOL><INDENT>self._raw_write()<EOL><DEDENT>except (TLSDisconnectError):<EOL><INDENT>pass<EOL><DEDENT>if result >= <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>if result < <NUM_LIT:0>:<EOL><INDENT>error = libssl.SSL_get_error(self._ssl, result)<EOL>if error == LibsslConst.SSL_ERROR_WANT_READ:<EOL><INDENT>if self._raw_read() != b'<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>elif error == LibsslConst.SSL_ERROR_WANT_WRITE:<EOL><INDENT>self._raw_write()<EOL>continue<EOL><DEDENT>else:<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>, TLSError)<EOL><DEDENT><DEDENT><DEDENT>if manual:<EOL><INDENT>self._local_closed = True<EOL><DEDENT>libssl.SSL_free(self._ssl)<EOL>self._ssl = None<EOL>self._rbio = None<EOL>self._wbio = None<EOL>try:<EOL><INDENT>self._socket.shutdown(socket_.SHUT_RDWR)<EOL><DEDENT>except (socket_.error):<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Shuts down the TLS session and then shuts down the underlying socket\n\n:param manual:\n    A boolean if the connection was manually shutdown", "id": "f9532:c1:m12"}
{"signature": "def close(self):", "body": "try:<EOL><INDENT>self.shutdown()<EOL><DEDENT>finally:<EOL><INDENT>if self._socket:<EOL><INDENT>try:<EOL><INDENT>self._socket.close()<EOL><DEDENT>except (socket_.error):<EOL><INDENT>pass<EOL><DEDENT>self._socket = None<EOL><DEDENT><DEDENT>", "docstring": "Shuts down the TLS session and socket and forcibly closes it", "id": "f9532:c1:m14"}
{"signature": "@classmethod<EOL><INDENT>def wrap(cls, socket, hostname, session=None):<DEDENT>", "body": "if not isinstance(socket, socket_.socket):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(socket)<EOL>))<EOL><DEDENT>if not isinstance(hostname, str_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(hostname)<EOL>))<EOL><DEDENT>if session is not None and not isinstance(session, TLSSession):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(session)<EOL>))<EOL><DEDENT>new_socket = cls(None, None, session=session)<EOL>new_socket._socket = socket<EOL>new_socket._hostname = hostname<EOL>new_socket._handshake()<EOL>return new_socket<EOL>", "docstring": "Takes an existing socket and adds TLS\n\n:param socket:\n    A socket.socket object to wrap with TLS\n\n:param hostname:\n    A unicode string of the hostname or IP the socket is connected to\n\n:param session:\n    An existing TLSSession object to allow for session reuse, specific\n    protocol or manual certificate validation\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9532:c1:m0"}
{"signature": "def _raw_write(self):", "body": "data_available = libssl.BIO_ctrl_pending(self._wbio)<EOL>if data_available == <NUM_LIT:0>:<EOL><INDENT>return b'<STR_LIT>'<EOL><DEDENT>to_read = min(self._buffer_size, data_available)<EOL>read = libssl.BIO_read(self._wbio, self._bio_write_buffer, to_read)<EOL>to_write = bytes_from_buffer(self._bio_write_buffer, read)<EOL>output = to_write<EOL>while len(to_write):<EOL><INDENT>raise_disconnect = False<EOL>try:<EOL><INDENT>sent = self._socket.send(to_write)<EOL><DEDENT>except (socket_.error) as e:<EOL><INDENT>if e.errno == <NUM_LIT> or e.errno == <NUM_LIT:32>:<EOL><INDENT>raise_disconnect = True<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>if raise_disconnect:<EOL><INDENT>raise_disconnection()<EOL><DEDENT>to_write = to_write[sent:]<EOL>if len(to_write):<EOL><INDENT>self.select_write()<EOL><DEDENT><DEDENT>return output<EOL>", "docstring": "Takes ciphertext from the memory bio and writes it to the\nsocket.\n\n:return:\n    A byte string of ciphertext going to the socket. Used\n    for debugging the handshake only.", "id": "f9532:c1:m4"}
{"signature": "def _handshake(self):", "body": "self._ssl = None<EOL>self._rbio = None<EOL>self._wbio = None<EOL>try:<EOL><INDENT>self._ssl = libssl.SSL_new(self._session._ssl_ctx)<EOL>if is_null(self._ssl):<EOL><INDENT>self._ssl = None<EOL>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>mem_bio = libssl.BIO_s_mem()<EOL>self._rbio = libssl.BIO_new(mem_bio)<EOL>if is_null(self._rbio):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>self._wbio = libssl.BIO_new(mem_bio)<EOL>if is_null(self._wbio):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>libssl.SSL_set_bio(self._ssl, self._rbio, self._wbio)<EOL>utf8_domain = self._hostname.encode('<STR_LIT:utf-8>')<EOL>libssl.SSL_ctrl(<EOL>self._ssl,<EOL>LibsslConst.SSL_CTRL_SET_TLSEXT_HOSTNAME,<EOL>LibsslConst.TLSEXT_NAMETYPE_host_name,<EOL>utf8_domain<EOL>)<EOL>libssl.SSL_set_connect_state(self._ssl)<EOL>if self._session._ssl_session:<EOL><INDENT>libssl.SSL_set_session(self._ssl, self._session._ssl_session)<EOL><DEDENT>self._bio_write_buffer = buffer_from_bytes(self._buffer_size)<EOL>self._read_buffer = buffer_from_bytes(self._buffer_size)<EOL>handshake_server_bytes = b'<STR_LIT>'<EOL>handshake_client_bytes = b'<STR_LIT>'<EOL>while True:<EOL><INDENT>result = libssl.SSL_do_handshake(self._ssl)<EOL>handshake_client_bytes += self._raw_write()<EOL>if result == <NUM_LIT:1>:<EOL><INDENT>break<EOL><DEDENT>error = libssl.SSL_get_error(self._ssl, result)<EOL>if error == LibsslConst.SSL_ERROR_WANT_READ:<EOL><INDENT>chunk = self._raw_read()<EOL>if chunk == b'<STR_LIT>':<EOL><INDENT>if handshake_server_bytes == b'<STR_LIT>':<EOL><INDENT>raise_disconnection()<EOL><DEDENT>if detect_client_auth_request(handshake_server_bytes):<EOL><INDENT>raise_client_auth()<EOL><DEDENT>raise_protocol_error(handshake_server_bytes)<EOL><DEDENT>handshake_server_bytes += chunk<EOL><DEDENT>elif error == LibsslConst.SSL_ERROR_WANT_WRITE:<EOL><INDENT>handshake_client_bytes += self._raw_write()<EOL><DEDENT>elif error == LibsslConst.SSL_ERROR_ZERO_RETURN:<EOL><INDENT>self._gracefully_closed = True<EOL>self._shutdown(False)<EOL>self._raise_closed()<EOL><DEDENT>else:<EOL><INDENT>info = peek_openssl_error()<EOL>if libcrypto_version_info < (<NUM_LIT:1>, <NUM_LIT:1>):<EOL><INDENT>dh_key_info = (<EOL><NUM_LIT:20>,<EOL>LibsslConst.SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,<EOL>LibsslConst.SSL_R_DH_KEY_TOO_SMALL<EOL>)<EOL><DEDENT>else:<EOL><INDENT>dh_key_info = (<EOL><NUM_LIT:20>,<EOL>LibsslConst.SSL_F_TLS_PROCESS_SKE_DHE,<EOL>LibsslConst.SSL_R_DH_KEY_TOO_SMALL<EOL>)<EOL><DEDENT>if info == dh_key_info:<EOL><INDENT>raise_dh_params()<EOL><DEDENT>if libcrypto_version_info < (<NUM_LIT:1>, <NUM_LIT:1>):<EOL><INDENT>unknown_protocol_info = (<EOL><NUM_LIT:20>,<EOL>LibsslConst.SSL_F_SSL23_GET_SERVER_HELLO,<EOL>LibsslConst.SSL_R_UNKNOWN_PROTOCOL<EOL>)<EOL><DEDENT>else:<EOL><INDENT>unknown_protocol_info = (<EOL><NUM_LIT:20>,<EOL>LibsslConst.SSL_F_SSL3_GET_RECORD,<EOL>LibsslConst.SSL_R_WRONG_VERSION_NUMBER<EOL>)<EOL><DEDENT>if info == unknown_protocol_info:<EOL><INDENT>raise_protocol_error(handshake_server_bytes)<EOL><DEDENT>tls_version_info_error = (<EOL><NUM_LIT:20>,<EOL>LibsslConst.SSL_F_SSL23_GET_SERVER_HELLO,<EOL>LibsslConst.SSL_R_TLSV1_ALERT_PROTOCOL_VERSION<EOL>)<EOL>if info == tls_version_info_error:<EOL><INDENT>raise_protocol_version()<EOL><DEDENT>handshake_error_info = (<EOL><NUM_LIT:20>,<EOL>LibsslConst.SSL_F_SSL23_GET_SERVER_HELLO,<EOL>LibsslConst.SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE<EOL>)<EOL>if info == handshake_error_info:<EOL><INDENT>raise_handshake()<EOL><DEDENT>handshake_failure_info = (<EOL><NUM_LIT:20>,<EOL>LibsslConst.SSL_F_SSL3_READ_BYTES,<EOL>LibsslConst.SSL_R_SSLV3_ALERT_HANDSHAKE_FAILURE<EOL>)<EOL>if info == handshake_failure_info:<EOL><INDENT>raise_client_auth()<EOL><DEDENT>if libcrypto_version_info < (<NUM_LIT:1>, <NUM_LIT:1>):<EOL><INDENT>cert_verify_failed_info = (<EOL><NUM_LIT:20>,<EOL>LibsslConst.SSL_F_SSL3_GET_SERVER_CERTIFICATE,<EOL>LibsslConst.SSL_R_CERTIFICATE_VERIFY_FAILED<EOL>)<EOL><DEDENT>else:<EOL><INDENT>cert_verify_failed_info = (<EOL><NUM_LIT:20>,<EOL>LibsslConst.SSL_F_TLS_PROCESS_SERVER_CERTIFICATE,<EOL>LibsslConst.SSL_R_CERTIFICATE_VERIFY_FAILED<EOL>)<EOL><DEDENT>if info == cert_verify_failed_info:<EOL><INDENT>verify_result = libssl.SSL_get_verify_result(self._ssl)<EOL>chain = extract_chain(handshake_server_bytes)<EOL>self_signed = False<EOL>time_invalid = False<EOL>no_issuer = False<EOL>cert = None<EOL>oscrypto_cert = None<EOL>if chain:<EOL><INDENT>cert = chain[<NUM_LIT:0>]<EOL>oscrypto_cert = load_certificate(cert)<EOL>self_signed = oscrypto_cert.self_signed<EOL>issuer_error_codes = set([<EOL>LibsslConst.X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT,<EOL>LibsslConst.X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN,<EOL>LibsslConst.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY<EOL>])<EOL>if verify_result in issuer_error_codes:<EOL><INDENT>no_issuer = not self_signed<EOL><DEDENT>time_error_codes = set([<EOL>LibsslConst.X509_V_ERR_CERT_HAS_EXPIRED,<EOL>LibsslConst.X509_V_ERR_CERT_NOT_YET_VALID<EOL>])<EOL>time_invalid = verify_result in time_error_codes<EOL><DEDENT>if time_invalid:<EOL><INDENT>raise_expired_not_yet_valid(cert)<EOL><DEDENT>if no_issuer:<EOL><INDENT>raise_no_issuer(cert)<EOL><DEDENT>if self_signed:<EOL><INDENT>raise_self_signed(cert)<EOL><DEDENT>if oscrypto_cert and oscrypto_cert.asn1.hash_algo in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise_weak_signature(oscrypto_cert)<EOL><DEDENT>raise_verification(cert)<EOL><DEDENT>handle_openssl_error(<NUM_LIT:0>, TLSError)<EOL><DEDENT><DEDENT>session_info = parse_session_info(<EOL>handshake_server_bytes,<EOL>handshake_client_bytes<EOL>)<EOL>self._protocol = session_info['<STR_LIT>']<EOL>self._cipher_suite = session_info['<STR_LIT>']<EOL>self._compression = session_info['<STR_LIT>']<EOL>self._session_id = session_info['<STR_LIT>']<EOL>self._session_ticket = session_info['<STR_LIT>']<EOL>if self._cipher_suite.find('<STR_LIT>') != -<NUM_LIT:1>:<EOL><INDENT>dh_params_length = get_dh_params_length(handshake_server_bytes)<EOL>if dh_params_length < <NUM_LIT>:<EOL><INDENT>self.close()<EOL>raise_dh_params()<EOL><DEDENT><DEDENT>if self._session_id == '<STR_LIT>' or self._session_ticket == '<STR_LIT>':<EOL><INDENT>if self._session._ssl_session:<EOL><INDENT>libssl.SSL_SESSION_free(self._session._ssl_session)<EOL><DEDENT>self._session._ssl_session = libssl.SSL_get1_session(self._ssl)<EOL><DEDENT>if not self._session._manual_validation:<EOL><INDENT>if self.certificate.hash_algo in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise_weak_signature(self.certificate)<EOL><DEDENT>if not self.certificate.is_valid_domain_ip(self._hostname):<EOL><INDENT>raise_hostname(self.certificate, self._hostname)<EOL><DEDENT><DEDENT><DEDENT>except (OSError, socket_.error):<EOL><INDENT>if self._ssl:<EOL><INDENT>libssl.SSL_free(self._ssl)<EOL>self._ssl = None<EOL>self._rbio = None<EOL>self._wbio = None<EOL><DEDENT>else:<EOL><INDENT>if self._rbio:<EOL><INDENT>libssl.BIO_free(self._rbio)<EOL>self._rbio = None<EOL><DEDENT>if self._wbio:<EOL><INDENT>libssl.BIO_free(self._wbio)<EOL>self._wbio = None<EOL><DEDENT><DEDENT>self.close()<EOL>raise<EOL><DEDENT>", "docstring": "Perform an initial TLS handshake", "id": "f9532:c1:m2"}
{"signature": "def read(self, max_length):", "body": "if not isinstance(max_length, int_types):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(max_length)<EOL>))<EOL><DEDENT>buffered_length = len(self._decrypted_bytes)<EOL>if buffered_length >= max_length:<EOL><INDENT>output = self._decrypted_bytes[<NUM_LIT:0>:max_length]<EOL>self._decrypted_bytes = self._decrypted_bytes[max_length:]<EOL>return output<EOL><DEDENT>if self._ssl is None:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>if buffered_length > <NUM_LIT:0> and not self.select_read(<NUM_LIT:0>):<EOL><INDENT>output = self._decrypted_bytes<EOL>self._decrypted_bytes = b'<STR_LIT>'<EOL>return output<EOL><DEDENT>to_read = min(self._buffer_size, max_length - buffered_length)<EOL>output = self._decrypted_bytes<EOL>again = True<EOL>while again:<EOL><INDENT>again = False<EOL>result = libssl.SSL_read(self._ssl, self._read_buffer, to_read)<EOL>self._raw_write()<EOL>if result <= <NUM_LIT:0>:<EOL><INDENT>error = libssl.SSL_get_error(self._ssl, result)<EOL>if error == LibsslConst.SSL_ERROR_WANT_READ:<EOL><INDENT>if self._raw_read() != b'<STR_LIT>':<EOL><INDENT>again = True<EOL>continue<EOL><DEDENT>raise_disconnection()<EOL><DEDENT>elif error == LibsslConst.SSL_ERROR_WANT_WRITE:<EOL><INDENT>self._raw_write()<EOL>again = True<EOL>continue<EOL><DEDENT>elif error == LibsslConst.SSL_ERROR_ZERO_RETURN:<EOL><INDENT>self._gracefully_closed = True<EOL>self._shutdown(False)<EOL>break<EOL><DEDENT>else:<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>, TLSError)<EOL><DEDENT><DEDENT>output += bytes_from_buffer(self._read_buffer, result)<EOL><DEDENT>if self._gracefully_closed and len(output) == <NUM_LIT:0>:<EOL><INDENT>self._raise_closed()<EOL><DEDENT>self._decrypted_bytes = output[max_length:]<EOL>return output[<NUM_LIT:0>:max_length]<EOL>", "docstring": "Reads data from the TLS-wrapped socket\n\n:param max_length:\n    The number of bytes to read - output may be less than this\n\n:raises:\n    socket.socket - when a non-TLS socket error occurs\n    oscrypto.errors.TLSError - when a TLS-related error occurs\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the data read", "id": "f9532:c1:m5"}
{"signature": "def ecdsa_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>certificate_or_public_key.algorithm.upper()<EOL>))<EOL><DEDENT>return _verify(certificate_or_public_key, signature, data, hash_algorithm)<EOL>", "docstring": "Verifies an ECDSA signature\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9533:m17"}
{"signature": "@property<EOL><INDENT>def algorithm(self):<DEDENT>", "body": "return self.asn1.algorithm<EOL>", "docstring": ":return:\n    A unicode string of \"rsa\", \"dsa\" or \"ec\"", "id": "f9533:c0:m1"}
{"signature": "def ecdsa_sign(private_key, data, hash_algorithm):", "body": "if private_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>private_key.algorithm.upper()<EOL>))<EOL><DEDENT>return _sign(private_key, data, hash_algorithm)<EOL>", "docstring": "Generates an ECDSA signature\n\n:param private_key:\n    The PrivateKey to generate the signature with\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the signature", "id": "f9533:m22"}
{"signature": "def rsa_pkcs1v15_encrypt(certificate_or_public_key, data):", "body": "return _encrypt(certificate_or_public_key, data, LibcryptoConst.RSA_PKCS1_PADDING)<EOL>", "docstring": "Encrypts a byte string using an RSA public key or certificate. Uses PKCS#1\nv1.5 padding.\n\n:param certificate_or_public_key:\n    A PublicKey or Certificate object\n\n:param data:\n    A byte string, with a maximum length 11 bytes less than the key length\n    (in bytes)\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the encrypted data", "id": "f9533:m8"}
{"signature": "def rsa_pss_verify(certificate_or_public_key, signature, data, hash_algorithm):", "body": "if certificate_or_public_key.algorithm != '<STR_LIT>':<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>certificate_or_public_key.algorithm.upper()<EOL>))<EOL><DEDENT>return _verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=True)<EOL>", "docstring": "Verifies an RSASSA-PSS signature. For the PSS padding the mask gen algorithm\nwill be mgf1 using the same hash algorithm as the signature. The salt length\nwith be the length of the hash algorithm, and the trailer field with be the\nstandard 0xBC byte.\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9533:m15"}
{"signature": "def rsa_oaep_encrypt(certificate_or_public_key, data):", "body": "return _encrypt(certificate_or_public_key, data, LibcryptoConst.RSA_PKCS1_OAEP_PADDING)<EOL>", "docstring": "Encrypts a byte string using an RSA public key or certificate. Uses PKCS#1\nOAEP padding with SHA1.\n\n:param certificate_or_public_key:\n    A PublicKey or Certificate object\n\n:param data:\n    A byte string, with a maximum length 41 bytes (or more) less than the\n    key length (in bytes)\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the encrypted data", "id": "f9533:m10"}
{"signature": "def load_pkcs12(source, password=None):", "body": "if password is not None:<EOL><INDENT>if isinstance(password, str_cls):<EOL><INDENT>password = password.encode('<STR_LIT:utf-8>')<EOL><DEDENT>if not isinstance(password, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(password)<EOL>))<EOL><DEDENT><DEDENT>if isinstance(source, str_cls):<EOL><INDENT>with open(source, '<STR_LIT:rb>') as f:<EOL><INDENT>source = f.read()<EOL><DEDENT><DEDENT>elif not isinstance(source, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(source)<EOL>))<EOL><DEDENT>key_info, cert_info, extra_certs_info = parse_pkcs12(source, password)<EOL>key = None<EOL>cert = None<EOL>if key_info:<EOL><INDENT>key = _load_key(key_info)<EOL><DEDENT>if cert_info:<EOL><INDENT>cert = _load_x509(cert_info)<EOL><DEDENT>extra_certs = [_load_x509(info) for info in extra_certs_info]<EOL>return (key, cert, extra_certs)<EOL>", "docstring": "Loads a .p12 or .pfx file into a PrivateKey object and one or more\nCertificates objects\n\n:param source:\n    A byte string of file contents or a unicode string filename\n\n:param password:\n    A byte or unicode string to decrypt the PKCS12 file. Unicode strings\n    will be encoded using UTF-8.\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    oscrypto.errors.AsymmetricKeyError - when a contained key is incompatible with the OS crypto library\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A three-element tuple containing (PrivateKey, Certificate, [Certificate, ...])", "id": "f9533:m7"}
{"signature": "def __init__(self, evp_pkey, asn1):", "body": "self.evp_pkey = evp_pkey<EOL>self.asn1 = asn1<EOL>self._lib = libcrypto<EOL>", "docstring": ":param evp_pkey:\n    An OpenSSL EVP_PKEY value from loading/importing the key\n\n:param asn1:\n    An asn1crypto.keys.PrivateKeyInfo object", "id": "f9533:c0:m0"}
{"signature": "@property<EOL><INDENT>def evp_pkey(self):<DEDENT>", "body": "return self.public_key.evp_pkey<EOL>", "docstring": ":return:\n    The EVP_PKEY of the public key this certificate contains", "id": "f9533:c2:m5"}
{"signature": "def load_private_key(source, password=None):", "body": "if isinstance(source, keys.PrivateKeyInfo):<EOL><INDENT>private_object = source<EOL><DEDENT>else:<EOL><INDENT>if password is not None:<EOL><INDENT>if isinstance(password, str_cls):<EOL><INDENT>password = password.encode('<STR_LIT:utf-8>')<EOL><DEDENT>if not isinstance(password, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(password)<EOL>))<EOL><DEDENT><DEDENT>if isinstance(source, str_cls):<EOL><INDENT>with open(source, '<STR_LIT:rb>') as f:<EOL><INDENT>source = f.read()<EOL><DEDENT><DEDENT>elif not isinstance(source, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(source)<EOL>))<EOL><DEDENT>private_object = parse_private(source, password)<EOL><DEDENT>return _load_key(private_object)<EOL>", "docstring": "Loads a private key into a PrivateKey object\n\n:param source:\n    A byte string of file contents, a unicode string filename or an\n    asn1crypto.keys.PrivateKeyInfo object\n\n:param password:\n    A byte or unicode string to decrypt the private key file. Unicode\n    strings will be encoded using UTF-8. Not used is the source is a\n    PrivateKeyInfo object.\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    oscrypto.errors.AsymmetricKeyError - when the private key is incompatible with the OS crypto library\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A PrivateKey object", "id": "f9533:m4"}
{"signature": "@property<EOL><INDENT>def bit_size(self):<DEDENT>", "body": "return self.public_key.bit_size<EOL>", "docstring": ":return:\n    The number of bits in the public key, as an integer", "id": "f9533:c2:m3"}
{"signature": "@property<EOL><INDENT>def algorithm(self):<DEDENT>", "body": "return self.public_key.algorithm<EOL>", "docstring": ":return:\n    A unicode string of \"rsa\", \"dsa\" or \"ec\"", "id": "f9533:c2:m1"}
{"signature": "def generate_pair(algorithm, bit_size=None, curve=None):", "body": "if algorithm not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(algorithm)<EOL>))<EOL><DEDENT>if algorithm == '<STR_LIT>':<EOL><INDENT>if bit_size not in set([<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>]):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(bit_size)<EOL>))<EOL><DEDENT><DEDENT>elif algorithm == '<STR_LIT>':<EOL><INDENT>if libcrypto_version_info < (<NUM_LIT:1>,):<EOL><INDENT>if bit_size != <NUM_LIT>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(bit_size)<EOL>))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if bit_size not in set([<NUM_LIT>, <NUM_LIT>, <NUM_LIT>]):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(bit_size)<EOL>))<EOL><DEDENT><DEDENT><DEDENT>elif algorithm == '<STR_LIT>':<EOL><INDENT>if curve not in set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>repr(curve)<EOL>))<EOL><DEDENT><DEDENT>if algorithm == '<STR_LIT>':<EOL><INDENT>rsa = None<EOL>exponent = None<EOL>try:<EOL><INDENT>rsa = libcrypto.RSA_new()<EOL>if is_null(rsa):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>exponent_pointer = new(libcrypto, '<STR_LIT>')<EOL>result = libcrypto.BN_dec2bn(exponent_pointer, b'<STR_LIT>')<EOL>handle_openssl_error(result)<EOL>exponent = unwrap(exponent_pointer)<EOL>result = libcrypto.RSA_generate_key_ex(rsa, bit_size, exponent, null())<EOL>handle_openssl_error(result)<EOL>buffer_length = libcrypto.i2d_RSAPublicKey(rsa, null())<EOL>if buffer_length < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(buffer_length)<EOL><DEDENT>buffer = buffer_from_bytes(buffer_length)<EOL>result = libcrypto.i2d_RSAPublicKey(rsa, buffer_pointer(buffer))<EOL>if result < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(result)<EOL><DEDENT>public_key_bytes = bytes_from_buffer(buffer, buffer_length)<EOL>buffer_length = libcrypto.i2d_RSAPrivateKey(rsa, null())<EOL>if buffer_length < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(buffer_length)<EOL><DEDENT>buffer = buffer_from_bytes(buffer_length)<EOL>result = libcrypto.i2d_RSAPrivateKey(rsa, buffer_pointer(buffer))<EOL>if result < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(result)<EOL><DEDENT>private_key_bytes = bytes_from_buffer(buffer, buffer_length)<EOL><DEDENT>finally:<EOL><INDENT>if rsa:<EOL><INDENT>libcrypto.RSA_free(rsa)<EOL><DEDENT>if exponent:<EOL><INDENT>libcrypto.BN_free(exponent)<EOL><DEDENT><DEDENT><DEDENT>elif algorithm == '<STR_LIT>':<EOL><INDENT>dsa = None<EOL>try:<EOL><INDENT>dsa = libcrypto.DSA_new()<EOL>if is_null(dsa):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>result = libcrypto.DSA_generate_parameters_ex(dsa, bit_size, null(), <NUM_LIT:0>, null(), null(), null())<EOL>handle_openssl_error(result)<EOL>result = libcrypto.DSA_generate_key(dsa)<EOL>handle_openssl_error(result)<EOL>buffer_length = libcrypto.i2d_DSA_PUBKEY(dsa, null())<EOL>if buffer_length < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(buffer_length)<EOL><DEDENT>buffer = buffer_from_bytes(buffer_length)<EOL>result = libcrypto.i2d_DSA_PUBKEY(dsa, buffer_pointer(buffer))<EOL>if result < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(result)<EOL><DEDENT>public_key_bytes = bytes_from_buffer(buffer, buffer_length)<EOL>buffer_length = libcrypto.i2d_DSAPrivateKey(dsa, null())<EOL>if buffer_length < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(buffer_length)<EOL><DEDENT>buffer = buffer_from_bytes(buffer_length)<EOL>result = libcrypto.i2d_DSAPrivateKey(dsa, buffer_pointer(buffer))<EOL>if result < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(result)<EOL><DEDENT>private_key_bytes = bytes_from_buffer(buffer, buffer_length)<EOL><DEDENT>finally:<EOL><INDENT>if dsa:<EOL><INDENT>libcrypto.DSA_free(dsa)<EOL><DEDENT><DEDENT><DEDENT>elif algorithm == '<STR_LIT>':<EOL><INDENT>ec_key = None<EOL>try:<EOL><INDENT>curve_id = {<EOL>'<STR_LIT>': LibcryptoConst.NID_X9_62_prime256v1,<EOL>'<STR_LIT>': LibcryptoConst.NID_secp384r1,<EOL>'<STR_LIT>': LibcryptoConst.NID_secp521r1,<EOL>}[curve]<EOL>ec_key = libcrypto.EC_KEY_new_by_curve_name(curve_id)<EOL>if is_null(ec_key):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>result = libcrypto.EC_KEY_generate_key(ec_key)<EOL>handle_openssl_error(result)<EOL>libcrypto.EC_KEY_set_asn1_flag(ec_key, LibcryptoConst.OPENSSL_EC_NAMED_CURVE)<EOL>buffer_length = libcrypto.i2o_ECPublicKey(ec_key, null())<EOL>if buffer_length < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(buffer_length)<EOL><DEDENT>buffer = buffer_from_bytes(buffer_length)<EOL>result = libcrypto.i2o_ECPublicKey(ec_key, buffer_pointer(buffer))<EOL>if result < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(result)<EOL><DEDENT>public_key_point_bytes = bytes_from_buffer(buffer, buffer_length)<EOL>public_key = keys.PublicKeyInfo({<EOL>'<STR_LIT>': keys.PublicKeyAlgorithm({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': keys.ECDomainParameters(<EOL>name='<STR_LIT>',<EOL>value=curve<EOL>)<EOL>}),<EOL>'<STR_LIT>': public_key_point_bytes<EOL>})<EOL>public_key_bytes = public_key.dump()<EOL>buffer_length = libcrypto.i2d_ECPrivateKey(ec_key, null())<EOL>if buffer_length < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(buffer_length)<EOL><DEDENT>buffer = buffer_from_bytes(buffer_length)<EOL>result = libcrypto.i2d_ECPrivateKey(ec_key, buffer_pointer(buffer))<EOL>if result < <NUM_LIT:0>:<EOL><INDENT>handle_openssl_error(result)<EOL><DEDENT>private_key_bytes = bytes_from_buffer(buffer, buffer_length)<EOL><DEDENT>finally:<EOL><INDENT>if ec_key:<EOL><INDENT>libcrypto.EC_KEY_free(ec_key)<EOL><DEDENT><DEDENT><DEDENT>return (load_public_key(public_key_bytes), load_private_key(private_key_bytes))<EOL>", "docstring": "Generates a public/private key pair\n\n:param algorithm:\n    The key algorithm - \"rsa\", \"dsa\" or \"ec\"\n\n:param bit_size:\n    An integer - used for \"rsa\" and \"dsa\". For \"rsa\" the value maye be 1024,\n    2048, 3072 or 4096. For \"dsa\" the value may be 1024, plus 2048 or 3072\n    if OpenSSL 1.0.0 or newer is available.\n\n:param curve:\n    A unicode string - used for \"ec\" keys. Valid values include \"secp256r1\",\n    \"secp384r1\" and \"secp521r1\".\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A 2-element tuple of (PublicKey, PrivateKey). The contents of each key\n    may be saved by calling .asn1.dump().", "id": "f9533:m0"}
{"signature": "def _encrypt(certificate_or_public_key, data, padding):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(certificate_or_public_key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>rsa = None<EOL>try:<EOL><INDENT>buffer_size = libcrypto.EVP_PKEY_size(certificate_or_public_key.evp_pkey)<EOL>buffer = buffer_from_bytes(buffer_size)<EOL>rsa = libcrypto.EVP_PKEY_get1_RSA(certificate_or_public_key.evp_pkey)<EOL>res = libcrypto.RSA_public_encrypt(len(data), data, buffer, rsa, padding)<EOL>handle_openssl_error(res)<EOL>return bytes_from_buffer(buffer, res)<EOL><DEDENT>finally:<EOL><INDENT>if rsa:<EOL><INDENT>libcrypto.RSA_free(rsa)<EOL><DEDENT><DEDENT>", "docstring": "Encrypts plaintext using an RSA public key or certificate\n\n:param certificate_or_public_key:\n    A PublicKey, Certificate or PrivateKey object\n\n:param data:\n    The byte string to encrypt\n\n:param padding:\n    The padding mode to use\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library\n\n:return:\n    A byte string of the encrypted data", "id": "f9533:m12"}
{"signature": "@property<EOL><INDENT>def bit_size(self):<DEDENT>", "body": "return self.asn1.bit_size<EOL>", "docstring": ":return:\n    The number of bits in the key, as an integer", "id": "f9533:c0:m3"}
{"signature": "def _verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):", "body": "if not isinstance(certificate_or_public_key, (Certificate, PublicKey)):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(certificate_or_public_key)<EOL>))<EOL><DEDENT>if not isinstance(signature, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(signature)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>valid_hash_algorithms = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>if certificate_or_public_key.algorithm == '<STR_LIT>' and not rsa_pss_padding:<EOL><INDENT>valid_hash_algorithms |= set(['<STR_LIT>'])<EOL><DEDENT>if hash_algorithm not in valid_hash_algorithms:<EOL><INDENT>valid_hash_algorithms_error = '<STR_LIT>'<EOL>if certificate_or_public_key.algorithm == '<STR_LIT>' and not rsa_pss_padding:<EOL><INDENT>valid_hash_algorithms_error += '<STR_LIT>'<EOL><DEDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>valid_hash_algorithms_error,<EOL>repr(hash_algorithm)<EOL>))<EOL><DEDENT>if certificate_or_public_key.algorithm != '<STR_LIT>' and rsa_pss_padding:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>certificate_or_public_key.algorithm.upper()<EOL>))<EOL><DEDENT>if certificate_or_public_key.algorithm == '<STR_LIT>' and hash_algorithm == '<STR_LIT>':<EOL><INDENT>if len(data) > certificate_or_public_key.byte_size - <NUM_LIT:11>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>certificate_or_public_key.byte_size,<EOL>len(data)<EOL>))<EOL><DEDENT>rsa = None<EOL>try:<EOL><INDENT>rsa = libcrypto.EVP_PKEY_get1_RSA(certificate_or_public_key.evp_pkey)<EOL>if is_null(rsa):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>buffer_size = libcrypto.EVP_PKEY_size(certificate_or_public_key.evp_pkey)<EOL>decrypted_buffer = buffer_from_bytes(buffer_size)<EOL>decrypted_length = libcrypto.RSA_public_decrypt(<EOL>len(signature),<EOL>signature,<EOL>decrypted_buffer,<EOL>rsa,<EOL>LibcryptoConst.RSA_PKCS1_PADDING<EOL>)<EOL>handle_openssl_error(decrypted_length)<EOL>decrypted_bytes = bytes_from_buffer(decrypted_buffer, decrypted_length)<EOL>if not constant_compare(data, decrypted_bytes):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>return<EOL><DEDENT>finally:<EOL><INDENT>if rsa:<EOL><INDENT>libcrypto.RSA_free(rsa)<EOL><DEDENT><DEDENT><DEDENT>evp_md_ctx = None<EOL>rsa = None<EOL>dsa = None<EOL>dsa_sig = None<EOL>ec_key = None<EOL>ecdsa_sig = None<EOL>try:<EOL><INDENT>if libcrypto_version_info < (<NUM_LIT:1>, <NUM_LIT:1>):<EOL><INDENT>evp_md_ctx = libcrypto.EVP_MD_CTX_create()<EOL><DEDENT>else:<EOL><INDENT>evp_md_ctx = libcrypto.EVP_MD_CTX_new()<EOL><DEDENT>evp_md = {<EOL>'<STR_LIT>': libcrypto.EVP_md5,<EOL>'<STR_LIT>': libcrypto.EVP_sha1,<EOL>'<STR_LIT>': libcrypto.EVP_sha224,<EOL>'<STR_LIT>': libcrypto.EVP_sha256,<EOL>'<STR_LIT>': libcrypto.EVP_sha384,<EOL>'<STR_LIT>': libcrypto.EVP_sha512<EOL>}[hash_algorithm]()<EOL>if libcrypto_version_info < (<NUM_LIT:1>,):<EOL><INDENT>if certificate_or_public_key.algorithm == '<STR_LIT>' and rsa_pss_padding:<EOL><INDENT>digest = getattr(hashlib, hash_algorithm)(data).digest()<EOL>rsa = libcrypto.EVP_PKEY_get1_RSA(certificate_or_public_key.evp_pkey)<EOL>if is_null(rsa):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>buffer_size = libcrypto.EVP_PKEY_size(certificate_or_public_key.evp_pkey)<EOL>decoded_buffer = buffer_from_bytes(buffer_size)<EOL>decoded_length = libcrypto.RSA_public_decrypt(<EOL>len(signature),<EOL>signature,<EOL>decoded_buffer,<EOL>rsa,<EOL>LibcryptoConst.RSA_NO_PADDING<EOL>)<EOL>handle_openssl_error(decoded_length)<EOL>res = libcrypto.RSA_verify_PKCS1_PSS(<EOL>rsa,<EOL>digest,<EOL>evp_md,<EOL>decoded_buffer,<EOL>LibcryptoConst.EVP_MD_CTX_FLAG_PSS_MDLEN<EOL>)<EOL><DEDENT>elif certificate_or_public_key.algorithm == '<STR_LIT>':<EOL><INDENT>res = libcrypto.EVP_DigestInit_ex(evp_md_ctx, evp_md, null())<EOL>handle_openssl_error(res)<EOL>res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))<EOL>handle_openssl_error(res)<EOL>res = libcrypto.EVP_VerifyFinal(<EOL>evp_md_ctx,<EOL>signature,<EOL>len(signature),<EOL>certificate_or_public_key.evp_pkey<EOL>)<EOL><DEDENT>elif certificate_or_public_key.algorithm == '<STR_LIT>':<EOL><INDENT>digest = getattr(hashlib, hash_algorithm)(data).digest()<EOL>signature_buffer = buffer_from_bytes(signature)<EOL>signature_pointer = buffer_pointer(signature_buffer)<EOL>dsa_sig = libcrypto.d2i_DSA_SIG(null(), signature_pointer, len(signature))<EOL>if is_null(dsa_sig):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>dsa = libcrypto.EVP_PKEY_get1_DSA(certificate_or_public_key.evp_pkey)<EOL>if is_null(dsa):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>res = libcrypto.DSA_do_verify(digest, len(digest), dsa_sig, dsa)<EOL><DEDENT>elif certificate_or_public_key.algorithm == '<STR_LIT>':<EOL><INDENT>digest = getattr(hashlib, hash_algorithm)(data).digest()<EOL>signature_buffer = buffer_from_bytes(signature)<EOL>signature_pointer = buffer_pointer(signature_buffer)<EOL>ecdsa_sig = libcrypto.d2i_ECDSA_SIG(null(), signature_pointer, len(signature))<EOL>if is_null(ecdsa_sig):<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>ec_key = libcrypto.EVP_PKEY_get1_EC_KEY(certificate_or_public_key.evp_pkey)<EOL>if is_null(ec_key):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>res = libcrypto.ECDSA_do_verify(digest, len(digest), ecdsa_sig, ec_key)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>evp_pkey_ctx_pointer_pointer = new(libcrypto, '<STR_LIT>')<EOL>res = libcrypto.EVP_DigestVerifyInit(<EOL>evp_md_ctx,<EOL>evp_pkey_ctx_pointer_pointer,<EOL>evp_md,<EOL>null(),<EOL>certificate_or_public_key.evp_pkey<EOL>)<EOL>handle_openssl_error(res)<EOL>evp_pkey_ctx_pointer = unwrap(evp_pkey_ctx_pointer_pointer)<EOL>if rsa_pss_padding:<EOL><INDENT>res = libcrypto.EVP_PKEY_CTX_ctrl(<EOL>evp_pkey_ctx_pointer,<EOL>LibcryptoConst.EVP_PKEY_RSA,<EOL>-<NUM_LIT:1>,  <EOL>LibcryptoConst.EVP_PKEY_CTRL_RSA_PADDING,<EOL>LibcryptoConst.RSA_PKCS1_PSS_PADDING,<EOL>null()<EOL>)<EOL>handle_openssl_error(res)<EOL>res = libcrypto.EVP_PKEY_CTX_ctrl(<EOL>evp_pkey_ctx_pointer,<EOL>LibcryptoConst.EVP_PKEY_RSA,<EOL>LibcryptoConst.EVP_PKEY_OP_SIGN | LibcryptoConst.EVP_PKEY_OP_VERIFY,<EOL>LibcryptoConst.EVP_PKEY_CTRL_RSA_PSS_SALTLEN,<EOL>-<NUM_LIT:1>,<EOL>null()<EOL>)<EOL>handle_openssl_error(res)<EOL><DEDENT>res = libcrypto.EVP_DigestUpdate(evp_md_ctx, data, len(data))<EOL>handle_openssl_error(res)<EOL>res = libcrypto.EVP_DigestVerifyFinal(evp_md_ctx, signature, len(signature))<EOL><DEDENT>if res < <NUM_LIT:1>:<EOL><INDENT>raise SignatureError('<STR_LIT>')<EOL><DEDENT>handle_openssl_error(res)<EOL><DEDENT>finally:<EOL><INDENT>if evp_md_ctx:<EOL><INDENT>if libcrypto_version_info < (<NUM_LIT:1>, <NUM_LIT:1>):<EOL><INDENT>libcrypto.EVP_MD_CTX_destroy(evp_md_ctx)<EOL><DEDENT>else:<EOL><INDENT>libcrypto.EVP_MD_CTX_free(evp_md_ctx)<EOL><DEDENT><DEDENT>if rsa:<EOL><INDENT>libcrypto.RSA_free(rsa)<EOL><DEDENT>if dsa:<EOL><INDENT>libcrypto.DSA_free(dsa)<EOL><DEDENT>if dsa_sig:<EOL><INDENT>libcrypto.DSA_SIG_free(dsa_sig)<EOL><DEDENT>if ec_key:<EOL><INDENT>libcrypto.EC_KEY_free(ec_key)<EOL><DEDENT>if ecdsa_sig:<EOL><INDENT>libcrypto.ECDSA_SIG_free(ecdsa_sig)<EOL><DEDENT><DEDENT>", "docstring": "Verifies an RSA, DSA or ECDSA signature\n\n:param certificate_or_public_key:\n    A Certificate or PublicKey instance to verify the signature with\n\n:param signature:\n    A byte string of the signature to verify\n\n:param data:\n    A byte string of the data the signature is for\n\n:param hash_algorithm:\n    A unicode string of \"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\" or \"sha512\"\n\n:param rsa_pss_padding:\n    If the certificate_or_public_key is an RSA key, this enables PSS padding\n\n:raises:\n    oscrypto.errors.SignatureError - when the signature is determined to be invalid\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by the OS crypto library", "id": "f9533:m18"}
{"signature": "def tripledes_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != <NUM_LIT:16> and len(key) != <NUM_LIT>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if not iv:<EOL><INDENT>iv = rand_bytes(<NUM_LIT:8>)<EOL><DEDENT>elif len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>cipher = '<STR_LIT>'<EOL>if len(key) == <NUM_LIT:16>:<EOL><INDENT>key = key + key[<NUM_LIT:0>:<NUM_LIT:8>]<EOL>cipher = '<STR_LIT>'<EOL><DEDENT>return (iv, _encrypt(cipher, key, data, iv, True))<EOL>", "docstring": "Encrypts plaintext using 3DES in CBC mode using either the 2 or 3 key\nvariant (16 or 24 byte long key) and PKCS#5 padding.\n\n:param key:\n    The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The initialization vector - a byte string 8-bytes long or None\n    to generate an IV\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by OpenSSL\n\n:return:\n    A tuple of two byte strings (iv, ciphertext)", "id": "f9537:m9"}
{"signature": "def aes_cbc_no_padding_encrypt(key, data, iv):", "body": "cipher = _calculate_aes_cipher(key)<EOL>if not iv:<EOL><INDENT>iv = rand_bytes(<NUM_LIT:16>)<EOL><DEDENT>elif len(iv) != <NUM_LIT:16>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>if len(data) % <NUM_LIT:16> != <NUM_LIT:0>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(data)<EOL>))<EOL><DEDENT>return (iv, _encrypt(cipher, key, data, iv, False))<EOL>", "docstring": "Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and\nno padding. This means the ciphertext must be an exact multiple of 16 bytes\nlong.\n\n:param key:\n    The encryption key - a byte string either 16, 24 or 32 bytes long\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The initialization vector - either a byte string 16-bytes long or None\n    to generate an IV\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by OpenSSL\n\n:return:\n    A tuple of two byte strings (iv, ciphertext)", "id": "f9537:m0"}
{"signature": "def _decrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if cipher != '<STR_LIT>' and not isinstance(iv, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(iv)<EOL>))<EOL><DEDENT>if cipher != '<STR_LIT>' and padding is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>evp_cipher_ctx = None<EOL>try:<EOL><INDENT>evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()<EOL>if is_null(evp_cipher_ctx):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)<EOL>if iv is None:<EOL><INDENT>iv = null()<EOL><DEDENT>if cipher in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())<EOL>handle_openssl_error(res)<EOL>res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))<EOL>handle_openssl_error(res)<EOL>if cipher == '<STR_LIT>':<EOL><INDENT>res = libcrypto.EVP_CIPHER_CTX_ctrl(<EOL>evp_cipher_ctx,<EOL>LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,<EOL>len(key) * <NUM_LIT:8>,<EOL>null()<EOL>)<EOL>handle_openssl_error(res)<EOL><DEDENT>evp_cipher = null()<EOL><DEDENT>res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)<EOL>handle_openssl_error(res)<EOL>if padding is not None:<EOL><INDENT>res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))<EOL>handle_openssl_error(res)<EOL><DEDENT>buffer = buffer_from_bytes(buffer_size)<EOL>output_length = new(libcrypto, '<STR_LIT>')<EOL>res = libcrypto.EVP_DecryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))<EOL>handle_openssl_error(res)<EOL>output = bytes_from_buffer(buffer, deref(output_length))<EOL>res = libcrypto.EVP_DecryptFinal_ex(evp_cipher_ctx, buffer, output_length)<EOL>handle_openssl_error(res)<EOL>output += bytes_from_buffer(buffer, deref(output_length))<EOL>return output<EOL><DEDENT>finally:<EOL><INDENT>if evp_cipher_ctx:<EOL><INDENT>libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)<EOL><DEDENT><DEDENT>", "docstring": "Decrypts AES/RC4/RC2/3DES/DES ciphertext\n\n:param cipher:\n    A unicode string of \"aes128\", \"aes192\", \"aes256\", \"des\",\n    \"tripledes_2key\", \"tripledes_3key\", \"rc2\", \"rc4\"\n\n:param key:\n    The encryption key - a byte string 5-32 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector - a byte string - unused for RC4\n\n:param padding:\n    Boolean, if padding should be used - unused for RC4\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by OpenSSL\n\n:return:\n    A byte string of the plaintext", "id": "f9537:m14"}
{"signature": "def rc4_decrypt(key, data):", "body": "if len(key) < <NUM_LIT:5> or len(key) > <NUM_LIT:16>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>return _decrypt('<STR_LIT>', key, data, None, None)<EOL>", "docstring": "Decrypts RC4 ciphertext using a 40-128 bit key\n\n:param key:\n    The encryption key - a byte string 5-16 bytes long\n\n:param data:\n    The ciphertext - a byte string\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by OpenSSL\n\n:return:\n    A byte string of the plaintext", "id": "f9537:m6"}
{"signature": "def _encrypt(cipher, key, data, iv, padding):", "body": "if not isinstance(key, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(key)<EOL>))<EOL><DEDENT>if not isinstance(data, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(data)<EOL>))<EOL><DEDENT>if cipher != '<STR_LIT>' and not isinstance(iv, byte_cls):<EOL><INDENT>raise TypeError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>type_name(iv)<EOL>))<EOL><DEDENT>if cipher != '<STR_LIT>' and not padding:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>evp_cipher_ctx = None<EOL>try:<EOL><INDENT>evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()<EOL>if is_null(evp_cipher_ctx):<EOL><INDENT>handle_openssl_error(<NUM_LIT:0>)<EOL><DEDENT>evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)<EOL>if iv is None:<EOL><INDENT>iv = null()<EOL><DEDENT>if cipher in set(['<STR_LIT>', '<STR_LIT>']):<EOL><INDENT>res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())<EOL>handle_openssl_error(res)<EOL>res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))<EOL>handle_openssl_error(res)<EOL>if cipher == '<STR_LIT>':<EOL><INDENT>res = libcrypto.EVP_CIPHER_CTX_ctrl(<EOL>evp_cipher_ctx,<EOL>LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,<EOL>len(key) * <NUM_LIT:8>,<EOL>null()<EOL>)<EOL>handle_openssl_error(res)<EOL><DEDENT>evp_cipher = null()<EOL><DEDENT>res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)<EOL>handle_openssl_error(res)<EOL>if padding is not None:<EOL><INDENT>res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))<EOL>handle_openssl_error(res)<EOL><DEDENT>buffer = buffer_from_bytes(buffer_size)<EOL>output_length = new(libcrypto, '<STR_LIT>')<EOL>res = libcrypto.EVP_EncryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))<EOL>handle_openssl_error(res)<EOL>output = bytes_from_buffer(buffer, deref(output_length))<EOL>res = libcrypto.EVP_EncryptFinal_ex(evp_cipher_ctx, buffer, output_length)<EOL>handle_openssl_error(res)<EOL>output += bytes_from_buffer(buffer, deref(output_length))<EOL>return output<EOL><DEDENT>finally:<EOL><INDENT>if evp_cipher_ctx:<EOL><INDENT>libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)<EOL><DEDENT><DEDENT>", "docstring": "Encrypts plaintext\n\n:param cipher:\n    A unicode string of \"aes128\", \"aes192\", \"aes256\", \"des\",\n    \"tripledes_2key\", \"tripledes_3key\", \"rc2\", \"rc4\"\n\n:param key:\n    The encryption key - a byte string 5-32 bytes long\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The initialization vector - a byte string - unused for RC4\n\n:param padding:\n    Boolean, if padding should be used - unused for RC4\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by OpenSSL\n\n:return:\n    A byte string of the ciphertext", "id": "f9537:m13"}
{"signature": "def des_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>return _decrypt('<STR_LIT>', key, data, iv, True)<EOL>", "docstring": "Decrypts DES ciphertext in CBC mode using a 56 bit key and PKCS#5 padding.\n\n:param key:\n    The encryption key - a byte string 8 bytes long (includes error correction bits)\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector - a byte string 8-bytes long\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by OpenSSL\n\n:return:\n    A byte string of the plaintext", "id": "f9537:m12"}
{"signature": "def tripledes_cbc_pkcs5_decrypt(key, data, iv):", "body": "if len(key) != <NUM_LIT:16> and len(key) != <NUM_LIT>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>cipher = '<STR_LIT>'<EOL>if len(key) == <NUM_LIT:16>:<EOL><INDENT>key = key + key[<NUM_LIT:0>:<NUM_LIT:8>]<EOL>cipher = '<STR_LIT>'<EOL><DEDENT>return _decrypt(cipher, key, data, iv, True)<EOL>", "docstring": "Decrypts 3DES ciphertext in CBC mode using either the 2 or 3 key variant\n(16 or 24 byte long key) and PKCS#5 padding.\n\n:param key:\n    The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)\n\n:param data:\n    The ciphertext - a byte string\n\n:param iv:\n    The initialization vector - a byte string 8-bytes long\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by OpenSSL\n\n:return:\n    A byte string of the plaintext", "id": "f9537:m10"}
{"signature": "def des_cbc_pkcs5_encrypt(key, data, iv):", "body": "if len(key) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(key)<EOL>))<EOL><DEDENT>if not iv:<EOL><INDENT>iv = rand_bytes(<NUM_LIT:8>)<EOL><DEDENT>elif len(iv) != <NUM_LIT:8>:<EOL><INDENT>raise ValueError(pretty_message(<EOL>'''<STR_LIT>''',<EOL>len(iv)<EOL>))<EOL><DEDENT>return (iv, _encrypt('<STR_LIT>', key, data, iv, True))<EOL>", "docstring": "Encrypts plaintext using DES in CBC mode with a 56 bit key and PKCS#5\npadding.\n\n:param key:\n    The encryption key - a byte string 8 bytes long (includes error correction bits)\n\n:param data:\n    The plaintext - a byte string\n\n:param iv:\n    The initialization vector - a byte string 8-bytes long or None\n    to generate an IV\n\n:raises:\n    ValueError - when any of the parameters contain an invalid value\n    TypeError - when any of the parameters are of the wrong type\n    OSError - when an error is returned by OpenSSL\n\n:return:\n    A tuple of two byte strings (iv, ciphertext)", "id": "f9537:m11"}
{"signature": "def _setup_evp_encrypt_decrypt(cipher, data):", "body": "evp_cipher = {<EOL>'<STR_LIT>': libcrypto.EVP_aes_128_cbc,<EOL>'<STR_LIT>': libcrypto.EVP_aes_192_cbc,<EOL>'<STR_LIT>': libcrypto.EVP_aes_256_cbc,<EOL>'<STR_LIT>': libcrypto.EVP_rc2_cbc,<EOL>'<STR_LIT>': libcrypto.EVP_rc4,<EOL>'<STR_LIT>': libcrypto.EVP_des_cbc,<EOL>'<STR_LIT>': libcrypto.EVP_des_ede_cbc,<EOL>'<STR_LIT>': libcrypto.EVP_des_ede3_cbc,<EOL>}[cipher]()<EOL>if cipher == '<STR_LIT>':<EOL><INDENT>buffer_size = len(data)<EOL><DEDENT>else:<EOL><INDENT>block_size = {<EOL>'<STR_LIT>': <NUM_LIT:16>,<EOL>'<STR_LIT>': <NUM_LIT:16>,<EOL>'<STR_LIT>': <NUM_LIT:16>,<EOL>'<STR_LIT>': <NUM_LIT:8>,<EOL>'<STR_LIT>': <NUM_LIT:8>,<EOL>'<STR_LIT>': <NUM_LIT:8>,<EOL>'<STR_LIT>': <NUM_LIT:8>,<EOL>}[cipher]<EOL>buffer_size = block_size * int(math.ceil(len(data) / block_size))<EOL><DEDENT>return (evp_cipher, buffer_size)<EOL>", "docstring": "Creates an EVP_CIPHER pointer object and determines the buffer size\nnecessary for the parameter specified.\n\n:param evp_cipher_ctx:\n    An EVP_CIPHER_CTX pointer\n\n:param cipher:\n    A unicode string of \"aes128\", \"aes192\", \"aes256\", \"des\",\n    \"tripledes_2key\", \"tripledes_3key\", \"rc2\", \"rc4\"\n\n:param key:\n    The key byte string\n\n:param data:\n    The plaintext or ciphertext as a byte string\n\n:param padding:\n    If padding is to be used\n\n:return:\n    A 2-element tuple with the first element being an EVP_CIPHER pointer\n    and the second being an integer that is the required buffer size", "id": "f9537:m15"}
{"signature": "def mkdir_p(*args, **kwargs):", "body": "try:<EOL><INDENT>return os.mkdir(*args, **kwargs)<EOL><DEDENT>except OSError as exc:<EOL><INDENT>if exc.errno != errno.EEXIST:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>", "docstring": "Like `mkdir`, but does not raise an exception if the\n    directory already exists.", "id": "f9552:m2"}
{"signature": "def build_sdist(self, sdist_directory, config_settings=None):", "body": "return self._call_hook('<STR_LIT>', {<EOL>'<STR_LIT>': abspath(sdist_directory),<EOL>'<STR_LIT>': config_settings,<EOL>})<EOL>", "docstring": "Build an sdist from this project.\n\n        Returns the name of the newly created file.\n\n        This calls the 'build_sdist' backend hook in a subprocess.", "id": "f9553:c3:m6"}
{"signature": "def get_requires_for_build_sdist(self, config_settings=None):", "body": "return self._call_hook('<STR_LIT>', {<EOL>'<STR_LIT>': config_settings<EOL>})<EOL>", "docstring": "Identify packages required for building a wheel\n\n        Returns a list of dependency specifications, e.g.:\n            [\"setuptools >= 26\"]\n\n        This does not include requirements specified in pyproject.toml.\n        It returns the result of calling the equivalently named hook in a\n        subprocess.", "id": "f9553:c3:m5"}
{"signature": "def get_requires_for_build_wheel(self, config_settings=None):", "body": "return self._call_hook('<STR_LIT>', {<EOL>'<STR_LIT>': config_settings<EOL>})<EOL>", "docstring": "Identify packages required for building a wheel\n\n        Returns a list of dependency specifications, e.g.:\n            [\"wheel >= 0.25\", \"setuptools\"]\n\n        This does not include requirements specified in pyproject.toml.\n        It returns the result of calling the equivalently named hook in a\n        subprocess.", "id": "f9553:c3:m2"}
{"signature": "def default_subprocess_runner(cmd, cwd=None, extra_environ=None):", "body": "env = os.environ.copy()<EOL>if extra_environ:<EOL><INDENT>env.update(extra_environ)<EOL><DEDENT>check_call(cmd, cwd=cwd, env=env)<EOL>", "docstring": "The default method of calling the wrapper subprocess.", "id": "f9553:m1"}
{"signature": "def build_wheel(<EOL>self, wheel_directory, config_settings=None,<EOL>metadata_directory=None):", "body": "if metadata_directory is not None:<EOL><INDENT>metadata_directory = abspath(metadata_directory)<EOL><DEDENT>return self._call_hook('<STR_LIT>', {<EOL>'<STR_LIT>': abspath(wheel_directory),<EOL>'<STR_LIT>': config_settings,<EOL>'<STR_LIT>': metadata_directory,<EOL>})<EOL>", "docstring": "Build a wheel from this project.\n\n        Returns the name of the newly created file.\n\n        In general, this will call the 'build_wheel' hook in the backend.\n        However, if that was previously called by\n        'prepare_metadata_for_build_wheel', and the same metadata_directory is\n        used, the previously built wheel will be copied to wheel_directory.", "id": "f9553:c3:m4"}
{"signature": "def pip_install(self, reqs):", "body": "if not reqs:<EOL><INDENT>return<EOL><DEDENT>log.info('<STR_LIT>', reqs)<EOL>check_call([<EOL>sys.executable, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', self.path] + list(reqs))<EOL>", "docstring": "Install dependencies into this env by calling pip in a subprocess", "id": "f9554:c0:m2"}
{"signature": "def prepare_metadata_for_build_wheel(metadata_directory, config_settings):", "body": "backend = _build_backend()<EOL>try:<EOL><INDENT>hook = backend.prepare_metadata_for_build_wheel<EOL><DEDENT>except AttributeError:<EOL><INDENT>return _get_wheel_metadata_from_wheel(backend, metadata_directory,<EOL>config_settings)<EOL><DEDENT>else:<EOL><INDENT>return hook(metadata_directory, config_settings)<EOL><DEDENT>", "docstring": "Invoke optional prepare_metadata_for_build_wheel\n\n    Implements a fallback by building a wheel if the hook isn't defined.", "id": "f9557:m3"}
{"signature": "def build_wheel(wheel_directory, config_settings, metadata_directory=None):", "body": "prebuilt_whl = _find_already_built_wheel(metadata_directory)<EOL>if prebuilt_whl:<EOL><INDENT>shutil.copy2(prebuilt_whl, wheel_directory)<EOL>return os.path.basename(prebuilt_whl)<EOL><DEDENT>return _build_backend().build_wheel(wheel_directory, config_settings,<EOL>metadata_directory)<EOL>", "docstring": "Invoke the mandatory build_wheel hook.\n\n    If a wheel was already built in the\n    prepare_metadata_for_build_wheel fallback, this\n    will copy it rather than rebuilding the wheel.", "id": "f9557:m7"}
{"signature": "def _dist_info_files(whl_zip):", "body": "res = []<EOL>for path in whl_zip.namelist():<EOL><INDENT>m = re.match(r'<STR_LIT>', path)<EOL>if m:<EOL><INDENT>res.append(path)<EOL><DEDENT><DEDENT>if res:<EOL><INDENT>return res<EOL><DEDENT>raise Exception(\"<STR_LIT>\")<EOL>", "docstring": "Identify the .dist-info folder inside a wheel ZipFile.", "id": "f9557:m4"}
{"signature": "def contained_in(filename, directory):", "body": "filename = os.path.normcase(os.path.abspath(filename))<EOL>directory = os.path.normcase(os.path.abspath(directory))<EOL>return os.path.commonprefix([filename, directory]) == directory<EOL>", "docstring": "Test if a file is located within the given directory.", "id": "f9557:m0"}
{"signature": "def get_requires_for_build_wheel(config_settings):", "body": "backend = _build_backend()<EOL>try:<EOL><INDENT>hook = backend.get_requires_for_build_wheel<EOL><DEDENT>except AttributeError:<EOL><INDENT>return []<EOL><DEDENT>else:<EOL><INDENT>return hook(config_settings)<EOL><DEDENT>", "docstring": "Invoke the optional get_requires_for_build_wheel hook\n\n    Returns [] if the hook is not defined.", "id": "f9557:m2"}
{"signature": "def get_requests(self, params={}):", "body": "if \"<STR_LIT:status>\" in params:<EOL><INDENT>params['<STR_LIT:status>'] = '<STR_LIT:U+002C>'.join(map(str, params['<STR_LIT:status>']))<EOL><DEDENT>requests = []<EOL>users = {}<EOL>messages = {}<EOL>params['<STR_LIT>'] = <NUM_LIT:0><EOL>while True:<EOL><INDENT>param_list = [(k, params[k]) for k in sorted(params)]<EOL>url = \"<STR_LIT>\" % urlencode(param_list)<EOL>data = self._get_resource(url)<EOL>for entry in data[\"<STR_LIT>\"]:<EOL><INDENT>user = Users.user_from_json(entry)<EOL>users[user.user_id] = user<EOL><DEDENT>for entry in data[\"<STR_LIT>\"]:<EOL><INDENT>request = self.request_from_json(entry)<EOL>requests.append(request)<EOL><DEDENT>for entry in data[\"<STR_LIT>\"]:<EOL><INDENT>message = Messages.message_from_json(entry)<EOL>if message.request_id not in messages:<EOL><INDENT>messages[message.request_id] = []<EOL><DEDENT>messages[message.request_id].append(message)<EOL><DEDENT>if not data['<STR_LIT>']:<EOL><INDENT>break<EOL><DEDENT>params['<STR_LIT>'] += <NUM_LIT:1><EOL><DEDENT>for request in requests:<EOL><INDENT>request.user = users.get(request.user_id, None)<EOL>request.messages = messages.get(request.request_id, [])<EOL><DEDENT>return requests<EOL>", "docstring": "List requests\n\nhttp://dev.wheniwork.com/#listing-requests", "id": "f9560:c0:m0"}
{"signature": "def delete_messages(self, messages):", "body": "url = \"<STR_LIT>\" % urlencode([('<STR_LIT>', \"<STR_LIT:U+002C>\".join(messages))])<EOL>data = self._delete_resource(url)<EOL>return data<EOL>", "docstring": "Delete existing messages.\n\nhttp://dev.wheniwork.com/#delete-existing-message", "id": "f9570:c0:m4"}
{"signature": "def create_message(self, params={}):", "body": "url = \"<STR_LIT>\"<EOL>body = params<EOL>data = self._post_resource(url, body)<EOL>return self.message_from_json(data[\"<STR_LIT:message>\"])<EOL>", "docstring": "Creates a message\n\nhttp://dev.wheniwork.com/#create/update-message", "id": "f9570:c0:m2"}
{"signature": "def update_message(self, message):", "body": "url = \"<STR_LIT>\" % message.message_id<EOL>data = self._put_resource(url, message.json_data())<EOL>return self.message_from_json(data)<EOL>", "docstring": "Modify an existing message.\n\nhttp://dev.wheniwork.com/#create/update-message", "id": "f9570:c0:m3"}
{"signature": "def get_locations(self):", "body": "url = \"<STR_LIT>\"<EOL>data = self._get_resource(url)<EOL>locations = []<EOL>for entry in data['<STR_LIT>']:<EOL><INDENT>locations.append(self.location_from_json(entry))<EOL><DEDENT>return locations<EOL>", "docstring": "Returns a list of locations.\n\nhttp://dev.wheniwork.com/#listing-locations", "id": "f9571:c0:m1"}
{"signature": "def get_location(self, location_id):", "body": "url = \"<STR_LIT>\" % location_id<EOL>return self.location_from_json(self._get_resource(url)[\"<STR_LIT:location>\"])<EOL>", "docstring": "Returns location data.\n\nhttp://dev.wheniwork.com/#get-existing-location", "id": "f9571:c0:m0"}
{"signature": "def get_sites(self):", "body": "url = \"<STR_LIT>\"<EOL>data = self._get_resource(url)<EOL>sites = []<EOL>for entry in data['<STR_LIT>']:<EOL><INDENT>sites.append(self.site_from_json(entry))<EOL><DEDENT>return sites<EOL>", "docstring": "Returns a list of sites.\n\nhttp://dev.wheniwork.com/#listing-sites", "id": "f9572:c0:m1"}
{"signature": "def create_site(self, params={}):", "body": "url = \"<STR_LIT>\"<EOL>body = params<EOL>data = self._post_resource(url, body)<EOL>return self.site_from_json(data[\"<STR_LIT>\"])<EOL>", "docstring": "Creates a site\n\nhttp://dev.wheniwork.com/#create-update-site", "id": "f9572:c0:m2"}
{"signature": "def get_users(self, params={}):", "body": "param_list = [(k, params[k]) for k in sorted(params)]<EOL>url = \"<STR_LIT>\" % urlencode(param_list)<EOL>data = self._get_resource(url)<EOL>users = []<EOL>for entry in data[\"<STR_LIT>\"]:<EOL><INDENT>users.append(self.user_from_json(entry))<EOL><DEDENT>return users<EOL>", "docstring": "Returns a list of users.\n\nhttp://dev.wheniwork.com/#listing-users", "id": "f9576:c0:m1"}
{"signature": "def get_shifts(self, params={}):", "body": "param_list = [(k, params[k]) for k in sorted(params)]<EOL>url = \"<STR_LIT>\" % urlencode(param_list)<EOL>data = self._get_resource(url)<EOL>shifts = []<EOL>locations = {}<EOL>sites = {}<EOL>positions = {}<EOL>users = {}<EOL>for entry in data.get(\"<STR_LIT>\", []):<EOL><INDENT>location = Locations.location_from_json(entry)<EOL>locations[location.location_id] = location<EOL><DEDENT>for entry in data.get(\"<STR_LIT>\", []):<EOL><INDENT>site = Sites.site_from_json(entry)<EOL>sites[site.site_id] = site<EOL><DEDENT>for entry in data.get(\"<STR_LIT>\", []):<EOL><INDENT>position = Positions.position_from_json(entry)<EOL>positions[position.position_id] = position<EOL><DEDENT>for entry in data.get(\"<STR_LIT>\", []):<EOL><INDENT>user = Users.user_from_json(entry)<EOL>users[user.user_id] = user<EOL><DEDENT>for entry in data[\"<STR_LIT>\"]:<EOL><INDENT>shift = self.shift_from_json(entry)<EOL>shifts.append(shift)<EOL><DEDENT>for shift in shifts:<EOL><INDENT>shift.location = locations.get(shift.location_id, None)<EOL>shift.site = sites.get(shift.site_id, None)<EOL>shift.position = positions.get(shift.position_id, None)<EOL>shift.user = users.get(shift.user_id, None)<EOL><DEDENT>return shifts<EOL>", "docstring": "List shifts\n\nhttp://dev.wheniwork.com/#listing-shifts", "id": "f9577:c0:m0"}
{"signature": "def delete_shifts(self, shifts):", "body": "url = \"<STR_LIT>\" % urlencode(<EOL>{'<STR_LIT>': \"<STR_LIT:U+002C>\".join(str(s) for s in shifts)})<EOL>data = self._delete_resource(url)<EOL>return data<EOL>", "docstring": "Delete existing shifts.\n\nhttp://dev.wheniwork.com/#delete-shift", "id": "f9577:c0:m2"}
{"signature": "@property<EOL><INDENT>def is_scanning(self):<DEDENT>", "body": "return self._is_scanning<EOL>", "docstring": "Return True if the BLE adapter is scanning for devices, otherwise\n        return False.", "id": "f9580:c0:m5"}
{"signature": "def _state_changed(self, state):", "body": "logger.debug('<STR_LIT>'.format(state))<EOL>if state == <NUM_LIT:5>:<EOL><INDENT>self._powered_off.clear()<EOL>self._powered_on.set()<EOL><DEDENT>elif state == <NUM_LIT:4>:<EOL><INDENT>self._powered_on.clear()<EOL>self._powered_off.set()<EOL><DEDENT>", "docstring": "Called when the power state changes.", "id": "f9580:c0:m1"}
{"signature": "def __init__(self):", "body": "self._is_scanning = False<EOL>self._powered_on = threading.Event()<EOL>self._powered_off = threading.Event()<EOL>", "docstring": "Create an instance of the bluetooth adapter from the provided bluez\n        DBus object.", "id": "f9580:c0:m0"}
{"signature": "def stop_scan(self, timeout_sec=TIMEOUT_SEC):", "body": "get_provider()._central_manager.stopScan()<EOL>self._is_scanning = False<EOL>", "docstring": "Stop scanning for BLE devices.", "id": "f9580:c0:m4"}
{"signature": "def power_on(self, timeout_sec=TIMEOUT_SEC):", "body": "<EOL>self._powered_on.clear()<EOL>IOBluetoothPreferenceSetControllerPowerState(<NUM_LIT:1>)<EOL>if not self._powered_on.wait(timeout_sec):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Power on Bluetooth.", "id": "f9580:c0:m6"}
{"signature": "def _user_thread_main(self, target):", "body": "try:<EOL><INDENT>return_code = target()<EOL>if return_code is None:<EOL><INDENT>return_code = <NUM_LIT:0><EOL><DEDENT>AppHelper.callAfter(lambda: sys.exit(return_code))<EOL><DEDENT>except Exception as ex:<EOL><INDENT>AppHelper.callAfter(self._raise_error, sys.exc_info())<EOL><DEDENT>", "docstring": "Main entry point for the thread that will run user's code.", "id": "f9581:c1:m3"}
{"signature": "def peripheral_didDiscoverServices_(self, peripheral, services):", "body": "logger.debug('<STR_LIT>')<EOL>for service in peripheral.services():<EOL><INDENT>if service_list().get(service) is None:<EOL><INDENT>service_list().add(service, CoreBluetoothGattService(service))<EOL><DEDENT>peripheral.discoverCharacteristics_forService_(None, service)<EOL><DEDENT>", "docstring": "Called when services are discovered for a device.", "id": "f9581:c0:m5"}
{"signature": "def peripheral_didReadRSSI_error_(self, peripheral, rssi, error):", "body": "logger.debug('<STR_LIT>')<EOL>if error is not None:<EOL><INDENT>return<EOL><DEDENT>device = device_list().get(peripheral)<EOL>if device is not None:<EOL><INDENT>device._rssi_changed(rssi)<EOL><DEDENT>", "docstring": "Called when a new RSSI value for the peripheral is available.", "id": "f9581:c0:m12"}
{"signature": "def run_mainloop_with(self, target):", "body": "<EOL>self._user_thread = threading.Thread(target=self._user_thread_main,<EOL>args=(target,))<EOL>self._user_thread.daemon = True<EOL>self._user_thread.start()<EOL>try:<EOL><INDENT>AppHelper.runConsoleEventLoop(installInterrupt=True)<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>AppHelper.stopEventLoop()<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>", "docstring": "Start the OS's main loop to process asyncronous BLE events and then\n        run the specified target function in a background thread.  Target\n        function should be a function that takes no parameters and optionally\n        return an integer response code.  When the target function stops\n        executing or returns with value then the main loop will be stopped and\n        the program will exit with the returned code.\n\n        Note that an OS main loop is required to process asyncronous BLE events\n        and this function is provided as a convenience for writing simple tools\n        and scripts that don't need to be full-blown GUI applications.  If you\n        are writing a GUI application that has a main loop (a GTK glib main loop\n        on Linux, or a Cocoa main loop on OSX) then you don't need to call this\n        function.", "id": "f9581:c1:m2"}
{"signature": "def centralManager_didConnectPeripheral_(self, manager, peripheral):", "body": "logger.debug('<STR_LIT>')<EOL>peripheral.setDelegate_(self)<EOL>peripheral.discoverServices_(None)<EOL>device = device_list().get(peripheral)<EOL>if device is not None:<EOL><INDENT>device._set_connected()<EOL><DEDENT>", "docstring": "Called when a device is connected.", "id": "f9581:c0:m2"}
{"signature": "def list_adapters(self):", "body": "return [self._adapter]<EOL>", "docstring": "Return a list of BLE adapter objects connected to the system.", "id": "f9581:c1:m5"}
{"signature": "def clear_cached_data(self):", "body": "<EOL>if self._adapter.is_powered:<EOL><INDENT>self._adapter.power_off()<EOL><DEDENT>with open(os.devnull, '<STR_LIT:w>') as devnull:<EOL><INDENT>subprocess.call('<STR_LIT>',<EOL>shell=True, stdout=devnull, stderr=subprocess.STDOUT)<EOL>subprocess.call('<STR_LIT>',<EOL>shell=True, stdout=devnull, stderr=subprocess.STDOUT)<EOL><DEDENT>", "docstring": "Clear the internal bluetooth device cache.  This is useful if a device\n        changes its state like name and it can't be detected with the new state\n        anymore.  WARNING: This will delete some files underneath the running user's\n        ~/Library/Preferences/ folder!\n\n        See this Stackoverflow question for information on what the function does:\n        http://stackoverflow.com/questions/20553957/how-can-i-clear-the-corebluetooth-cache-on-macos", "id": "f9581:c1:m7"}
{"signature": "def initialize(self):", "body": "<EOL>self._central_manager = CBCentralManager.alloc()<EOL>self._central_manager.initWithDelegate_queue_options_(self._central_delegate,<EOL>None, None)<EOL>", "docstring": "Initialize the BLE provider.  Must be called once before any other\n        calls are made to the provider.", "id": "f9581:c1:m1"}
{"signature": "def peripheral_didUpdateValueForCharacteristic_error_(self, peripheral, characteristic, error):", "body": "logger.debug('<STR_LIT>')<EOL>if error is not None:<EOL><INDENT>return<EOL><DEDENT>device = device_list().get(peripheral)<EOL>if device is not None:<EOL><INDENT>device._characteristic_changed(characteristic)<EOL><DEDENT>", "docstring": "Called when characteristic value was read or updated.", "id": "f9581:c0:m10"}
{"signature": "def peripheral_didDiscoverCharacteristicsForService_error_(self, peripheral, service, error):", "body": "logger.debug('<STR_LIT>')<EOL>if error is not None:<EOL><INDENT>return<EOL><DEDENT>for char in service.characteristics():<EOL><INDENT>if characteristic_list().get(char) is None:<EOL><INDENT>characteristic_list().add(char, CoreBluetoothGattCharacteristic(char))<EOL><DEDENT>peripheral.discoverDescriptorsForCharacteristic_(char)<EOL><DEDENT>device = device_list().get(peripheral)<EOL>if device is not None:<EOL><INDENT>device._characteristics_discovered(service)<EOL><DEDENT>", "docstring": "Called when characteristics are discovered for a service.", "id": "f9581:c0:m6"}
{"signature": "def centralManager_didDiscoverPeripheral_advertisementData_RSSI_(self, manager, peripheral, data, rssi):", "body": "logger.debug('<STR_LIT>')<EOL>device = device_list().get(peripheral)<EOL>if device is None:<EOL><INDENT>device = device_list().add(peripheral, CoreBluetoothDevice(peripheral))<EOL><DEDENT>device._update_advertised(data)<EOL>", "docstring": "Called when the BLE adapter found a device while scanning, or has\n        new advertisement data for a device.", "id": "f9581:c0:m1"}
{"signature": "def list_devices(self):", "body": "return self._devices.list()<EOL>", "docstring": "Return a list of BLE devices known to the system.", "id": "f9581:c1:m6"}
{"signature": "def _raise_error(self, exec_info):", "body": "<EOL>raise_(exec_info[<NUM_LIT:1>], None, exec_info[<NUM_LIT:2>])<EOL>", "docstring": "Raise an exception from the provided exception info.  Used to cause\n        the main thread to stop with an error.", "id": "f9581:c1:m4"}
{"signature": "def centralManager_didDisconnectPeripheral_error_(self, manager, peripheral, error):", "body": "logger.debug('<STR_LIT>')<EOL>device = device_list().get(peripheral)<EOL>if device is not None:<EOL><INDENT>device._set_disconnected()<EOL>device_list().remove(peripheral)<EOL><DEDENT>", "docstring": "Called when a device is disconnected.", "id": "f9581:c0:m4"}
{"signature": "def nsuuid_to_uuid(nsuuid):", "body": "return uuid.UUID(nsuuid.UUIDString())<EOL>", "docstring": "Convert Objective-C NSUUID type to native Python UUID type.", "id": "f9582:m2"}
{"signature": "def uuid_to_cbuuid(uuid):", "body": "return CBUUID.UUIDWithString_(str(uuid))<EOL>", "docstring": "Convert native Python UUID type to Objective-C CBUUID type.", "id": "f9582:m1"}
{"signature": "def cbuuid_to_uuid(cbuuid):", "body": "data = cbuuid.data().bytes()<EOL>template = '<STR_LIT>' if len(data) <= <NUM_LIT:4> else '<STR_LIT>'<EOL>value = template.format(hexlify(data.tobytes()[:<NUM_LIT:16>]).decode('<STR_LIT:ascii>'))<EOL>return uuid.UUID(hex=value)<EOL>", "docstring": "Convert Objective-C CBUUID type to native Python UUID type.", "id": "f9582:m0"}
{"signature": "def list(self):", "body": "with self._lock:<EOL><INDENT>return self._metadata.values()<EOL><DEDENT>", "docstring": "Return list of all metadata objects.", "id": "f9583:c0:m1"}
{"signature": "def read_value(self, timeout_sec=TIMEOUT_SEC):", "body": "<EOL>self._value_read.clear()<EOL>self._device._peripheral.readValueForCharacteristic_(self._characteristic)<EOL>if not self._value_read.wait(timeout_sec):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>return self._characteristic.value()<EOL>", "docstring": "Read the value of this characteristic.", "id": "f9584:c1:m3"}
{"signature": "def stop_notify(self):", "body": "self._device._peripheral.setNotifyValue_forCharacteristic_(False,<EOL>self._characteristic)<EOL>", "docstring": "Disable notification of changes for this characteristic.", "id": "f9584:c1:m6"}
{"signature": "@property<EOL><INDENT>def uuid(self):<DEDENT>", "body": "return cbuuid_to_uuid(self._characteristic.UUID())<EOL>", "docstring": "Return the UUID of this GATT characteristic.", "id": "f9584:c1:m2"}
{"signature": "@property<EOL><INDENT>def uuid(self):<DEDENT>", "body": "return cbuuid_to_uuid(self._service.UUID())<EOL>", "docstring": "Return the UUID of this GATT service.", "id": "f9584:c0:m1"}
{"signature": "@property<EOL><INDENT>def uuid(self):<DEDENT>", "body": "return cbuuid_to_uuid(self._descriptor.UUID())<EOL>", "docstring": "Return the UUID of this GATT descriptor.", "id": "f9584:c2:m2"}
{"signature": "def start_notify(self, on_change):", "body": "<EOL>self._device._notify_characteristic(self._characteristic, on_change)<EOL>self._device._peripheral.setNotifyValue_forCharacteristic_(True,<EOL>self._characteristic)<EOL>", "docstring": "Enable notification of changes for this characteristic on the\n        specified on_change callback.  on_change should be a function that takes\n        one parameter which is the value (as a string of bytes) of the changed\n        characteristic value.", "id": "f9584:c1:m5"}
{"signature": "def __init__(self, peripheral):", "body": "self._peripheral = peripheral<EOL>self._advertised = []<EOL>self._discovered_services = set()<EOL>self._char_on_changed = {}<EOL>self._rssi = None<EOL>self._connected = threading.Event()<EOL>self._disconnected = threading.Event()<EOL>self._discovered = threading.Event()<EOL>self._rssi_read = threading.Event()<EOL>", "docstring": "Create an instance of the CoreBluetooth device from the provided\n        CBPeripheral instance.", "id": "f9585:c0:m0"}
{"signature": "def _rssi_changed(self, rssi):", "body": "self._rssi = rssi<EOL>self._rssi_read.set()<EOL>", "docstring": "Called when the RSSI signal strength has been read.", "id": "f9585:c0:m11"}
{"signature": "def list_services(self):", "body": "return service_list().get_all(self._peripheral.services())<EOL>", "docstring": "Return a list of GattService objects that have been discovered for\n        this device.", "id": "f9585:c0:m12"}
{"signature": "def connect(self, timeout_sec=TIMEOUT_SEC):", "body": "self._central_manager.connectPeripheral_options_(self._peripheral, None)<EOL>if not self._connected.wait(timeout_sec):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Connect to the device.  If not connected within the specified timeout\n        then an exception is thrown.", "id": "f9585:c0:m2"}
{"signature": "@property<EOL><INDENT>def is_connected(self):<DEDENT>", "body": "return self._connected.is_set()<EOL>", "docstring": "Return True if the device is connected to the system, otherwise False.", "id": "f9585:c0:m17"}
{"signature": "@property<EOL><INDENT>def advertised(self):<DEDENT>", "body": "return self._advertised<EOL>", "docstring": "Return a list of UUIDs for services that are advertised by this\n        device.", "id": "f9585:c0:m14"}
{"signature": "def discover(self, service_uuids, char_uuids, timeout_sec=TIMEOUT_SEC):", "body": "<EOL>if not self._discovered.wait(timeout_sec):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Wait up to timeout_sec for the specified services and characteristics\n        to be discovered on the device.  If the timeout is exceeded without\n        discovering the services and characteristics then an exception is thrown.", "id": "f9585:c0:m13"}
{"signature": "def _notify_characteristic(self, characteristic, on_change):", "body": "<EOL>self._char_on_changed[characteristic] = on_change<EOL>", "docstring": "Call the specified on_change callback when this characteristic\n        changes.", "id": "f9585:c0:m8"}
{"signature": "def _descriptor_changed(self, descriptor):", "body": "<EOL>desc = descriptor_list().get(descriptor)<EOL>if desc is not None:<EOL><INDENT>desc._value_read.set()<EOL><DEDENT>", "docstring": "Called when the specified descriptor has changed its value.", "id": "f9585:c0:m10"}
{"signature": "def _set_disconnected(self):", "body": "self._connected.clear()<EOL>self._disconnected.set()<EOL>", "docstring": "Set the connected event.", "id": "f9585:c0:m5"}
{"signature": "@abc.abstractproperty<EOL><INDENT>def name(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Return the name of this BLE network adapter.", "id": "f9586:c0:m0"}
{"signature": "@abc.abstractproperty<EOL><INDENT>def is_powered(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Return True if the BLE adapter is powered up, otherwise return False.", "id": "f9586:c0:m6"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def power_on(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Power on this BLE adapter.", "id": "f9586:c0:m4"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def power_off(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Power off this BLE adapter.", "id": "f9586:c0:m5"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def stop_scan(self, timeout_sec):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Stop scanning for BLE devices with this adapter.", "id": "f9586:c0:m2"}
{"signature": "def find_device(self, service_uuids=[], name=None, timeout_sec=TIMEOUT_SEC):", "body": "start = time.time()<EOL>while True:<EOL><INDENT>found = self.find_devices(service_uuids, name)<EOL>if len(found) > <NUM_LIT:0>:<EOL><INDENT>return found[<NUM_LIT:0>]<EOL><DEDENT>if time.time()-start >= timeout_sec:<EOL><INDENT>return None<EOL><DEDENT>time.sleep(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Return the first device that advertises the specified service UUIDs or\n        has the specified name. Will wait up to timeout_sec seconds for the device\n        to be found, and if the timeout is zero then it will not wait at all and\n        immediately return a result.  When no device is found a value of None is\n        returned.", "id": "f9587:c0:m8"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def clear_cached_data(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Clear any internally cached BLE device data.  Necessary in some cases\n        to prevent issues with stale device data getting cached by the OS.", "id": "f9587:c0:m4"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def disconnect_devices(self, service_uuids):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Disconnect any connected devices that have any of the specified\n        service UUIDs.", "id": "f9587:c0:m5"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def initialize(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Initialize the BLE provider.  Must be called once before any other\n        calls are made to the provider.", "id": "f9587:c0:m0"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def list_adapters(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Return a list of BLE adapter objects connected to the system.", "id": "f9587:c0:m2"}
{"signature": "def find_devices(self, service_uuids=[], name=None):", "body": "<EOL>expected = set(service_uuids)<EOL>devices = self.list_devices()<EOL>found = []<EOL>for device in devices:<EOL><INDENT>if name is not None:<EOL><INDENT>if device.name == name:<EOL><INDENT>found.append(device)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>actual = set(device.advertised)<EOL>if actual >= expected:<EOL><INDENT>found.append(device)<EOL><DEDENT><DEDENT><DEDENT>return found<EOL>", "docstring": "Return devices that advertise the specified service UUIDs and/or have\n        the specified name.  Service_uuids should be a list of Python uuid.UUID\n        objects and is optional.  Name is a string device name to look for and is\n        also optional.  Will not block, instead it returns immediately with a\n        list of found devices (which might be empty).", "id": "f9587:c0:m7"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def list_devices(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Return a list of BLE devices known to the system.", "id": "f9587:c0:m3"}
{"signature": "def get_default_adapter(self):", "body": "adapters = self.list_adapters()<EOL>if len(adapters) > <NUM_LIT:0>:<EOL><INDENT>return adapters[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Return the first BLE adapter found, or None if no adapters are\n        available.", "id": "f9587:c0:m6"}
{"signature": "@abc.abstractproperty<EOL><INDENT>def uuid(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Return the UUID of this GATT descriptor.", "id": "f9589:c2:m0"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def read_value(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Read the value of this characteristic.", "id": "f9589:c1:m1"}
{"signature": "def find_descriptor(self, uuid):", "body": "for desc in self.list_descriptors():<EOL><INDENT>if desc.uuid == uuid:<EOL><INDENT>return desc<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Return the first child descriptor found that has the specified\n        UUID.  Will return None if no descriptor that matches is found.", "id": "f9589:c1:m6"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def connect(self, timeout_sec):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Connect to the BLE device.", "id": "f9590:c0:m0"}
{"signature": "@abc.abstractproperty<EOL><INDENT>def rssi(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Return the RSSI signal strength in decibels.", "id": "f9590:c0:m8"}
{"signature": "@abc.abstractproperty<EOL><INDENT>def advertised(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Return a list of UUIDs for services that are advertised by this\n        device.", "id": "f9590:c0:m4"}
{"signature": "@abc.abstractproperty<EOL><INDENT>def discover(self, service_uuids, char_uuids, timeout_sec=<NUM_LIT:30>):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Wait up to timeout_sec for the specified services and characteristics\n        to be discovered on the device.  If the timeout is exceeded without\n        discovering the services and characteristics then an exception is thrown.", "id": "f9590:c0:m3"}
{"signature": "def find_service(self, uuid):", "body": "for service in self.list_services():<EOL><INDENT>if service.uuid == uuid:<EOL><INDENT>return service<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Return the first child service found that has the specified\n        UUID.  Will return None if no service that matches is found.", "id": "f9590:c0:m9"}
{"signature": "def get_provider():", "body": "global _provider<EOL>if _provider is None:<EOL><INDENT>if sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>from .bluez_dbus.provider import BluezProvider<EOL>_provider = BluezProvider()<EOL><DEDENT>elif sys.platform == '<STR_LIT>':<EOL><INDENT>from .corebluetooth.provider import CoreBluetoothProvider<EOL>_provider = CoreBluetoothProvider()<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>'.format(sys.platform))<EOL><DEDENT><DEDENT>return _provider<EOL>", "docstring": "Return an instance of the BLE provider for the current platform.", "id": "f9592:m0"}
{"signature": "def __init__(self, device):", "body": "<EOL>self._uart = device.find_service(UART_SERVICE_UUID)<EOL>if self._uart is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>self._tx = self._uart.find_characteristic(TX_CHAR_UUID)<EOL>self._rx = self._uart.find_characteristic(RX_CHAR_UUID)<EOL>if self._tx is None or self._rx is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>self._queue = queue.Queue()<EOL>self._rx.start_notify(self._rx_received)<EOL>", "docstring": "Initialize UART from provided bluez device.", "id": "f9593:c0:m0"}
{"signature": "def write(self, data):", "body": "self._tx.write_value(data)<EOL>", "docstring": "Write a string of data to the UART device.", "id": "f9593:c0:m2"}
{"signature": "def set_color(self, r, g, b):", "body": "<EOL>command = '<STR_LIT>'.format(chr(r & <NUM_LIT>),<EOL>chr(g & <NUM_LIT>),<EOL>chr(b & <NUM_LIT>))<EOL>self._color.write_value(command)<EOL>", "docstring": "Set the red, green, blue color of the bulb.", "id": "f9594:c0:m1"}
{"signature": "def __init__(self, device):", "body": "<EOL>self._dis = device.find_service(DIS_SERVICE_UUID)<EOL>self._manufacturer = self._dis.find_characteristic(MANUFACTURER_CHAR_UUID)<EOL>self._model = self._dis.find_characteristic(MODEL_CHAR_UUID)<EOL>self._serial = self._dis.find_characteristic(SERIAL_CHAR_UUID)<EOL>self._hw_revision = self._dis.find_characteristic(HW_REVISION_CHAR_UUID)<EOL>self._sw_revision = self._dis.find_characteristic(SW_REVISION_CHAR_UUID)<EOL>self._fw_revision = self._dis.find_characteristic(FW_REVISION_CHAR_UUID)<EOL>self._sys_id = self._dis.find_characteristic(SYS_ID_CHAR_UUID)<EOL>self._reg_cert = self._dis.find_characteristic(REG_CERT_CHAR_UUID)<EOL>self._pnp_id = self._dis.find_characteristic(PNP_ID_CHAR_UUID)<EOL>", "docstring": "Initialize device information from provided bluez device.", "id": "f9595:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def disconnect_devices(cls):<DEDENT>", "body": "return get_provider().disconnect_devices(service_uuids=cls.ADVERTISED)<EOL>", "docstring": "Disconnect any currently connected devices that implement this\n        service.", "id": "f9597:c0:m2"}
{"signature": "@classmethod<EOL><INDENT>def discover(cls, device, timeout_sec=TIMEOUT_SEC):<DEDENT>", "body": "device.discover(cls.SERVICES, cls.CHARACTERISTICS, timeout_sec)<EOL>", "docstring": "Wait until the specified device has discovered the expected services\n        and characteristics for this service.  Should be called once before other\n        calls are made on the service.  Returns true if the service has been\n        discovered in the specified timeout, or false if not discovered.", "id": "f9597:c0:m3"}
{"signature": "@property<EOL><INDENT>def is_scanning(self):<DEDENT>", "body": "return self._props.Get(_INTERFACE, '<STR_LIT>')<EOL>", "docstring": "Return True if the BLE adapter is scanning for devices, otherwise\n        return False.", "id": "f9598:c0:m5"}
{"signature": "def disconnect_devices(self, service_uuids=[]):", "body": "service_uuids = set(service_uuids)<EOL>for device in self.list_devices():<EOL><INDENT>if not device.is_connected:<EOL><INDENT>continue<EOL><DEDENT>device_uuids = set(map(lambda x: x.uuid, device.list_services()))<EOL>if device_uuids >= service_uuids:<EOL><INDENT>device.disconnect()<EOL><DEDENT><DEDENT>", "docstring": "Disconnect any connected devices that have the specified list of\n        service UUIDs.  The default is an empty list which means all devices\n        are disconnected.", "id": "f9599:c0:m5"}
{"signature": "def list_adapters(self):", "body": "return map(BluezAdapter, self._get_objects('<STR_LIT>'))<EOL>", "docstring": "Return a list of BLE adapter objects connected to the system.", "id": "f9599:c0:m6"}
{"signature": "def _get_objects(self, interface, parent_path='<STR_LIT>'):", "body": "<EOL>parent_path = parent_path.lower()<EOL>objects = []<EOL>for opath, interfaces in iteritems(self._bluez.GetManagedObjects()):<EOL><INDENT>if interface in interfaces.keys() and opath.lower().startswith(parent_path):<EOL><INDENT>objects.append(self._bus.get_object('<STR_LIT>', opath))<EOL><DEDENT><DEDENT>return objects<EOL>", "docstring": "Return a list of all bluez DBus objects that implement the requested\n        interface name and are under the specified path.  The default is to\n        search devices under the root of all bluez objects.", "id": "f9599:c0:m8"}
{"signature": "def _get_objects_by_path(self, paths):", "body": "return map(lambda x: self._bus.get_object('<STR_LIT>', x), paths)<EOL>", "docstring": "Return a list of all bluez DBus objects from the provided list of paths.", "id": "f9599:c0:m9"}
{"signature": "def run_mainloop_with(self, target):", "body": "<EOL>self._user_thread = threading.Thread(target=self._user_thread_main, args=(target,))<EOL>self._user_thread.daemon = True  <EOL>self._user_thread.start()<EOL>self._gobject_mainloop = GObject.MainLoop()<EOL>try:<EOL><INDENT>self._gobject_mainloop.run()  <EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>self._gobject_mainloop.quit()<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>if self._exception is not None:<EOL><INDENT>raise_(self._exception[<NUM_LIT:1>], None, self._exception[<NUM_LIT:2>])<EOL><DEDENT>else:<EOL><INDENT>sys.exit(self._return_code)<EOL><DEDENT>", "docstring": "Start the OS's main loop to process asyncronous BLE events and then\n        run the specified target function in a background thread.  Target\n        function should be a function that takes no parameters and optionally\n        return an integer response code.  When the target function stops\n        executing or returns with value then the main loop will be stopped and\n        the program will exit with the returned code.\n\n        Note that an OS main loop is required to process asyncronous BLE events\n        and this function is provided as a convenience for writing simple tools\n        and scripts that don't need to be full-blown GUI applications.  If you\n        are writing a GUI application that has a main loop (a GTK glib main loop\n        on Linux, or a Cocoa main loop on OSX) then you don't need to call this\n        function.", "id": "f9599:c0:m2"}
{"signature": "def list_devices(self):", "body": "return map(BluezDevice, self._get_objects('<STR_LIT>'))<EOL>", "docstring": "Return a list of BLE devices known to the system.", "id": "f9599:c0:m7"}
{"signature": "def write_value(self, value):", "body": "self._characteristic.WriteValue(value)<EOL>", "docstring": "Write the specified value to this characteristic.", "id": "f9600:c1:m3"}
{"signature": "def list_characteristics(self):", "body": "paths = self._props.Get(_SERVICE_INTERFACE, '<STR_LIT>')<EOL>return map(BluezGattCharacteristic,<EOL>get_provider()._get_objects_by_path(paths))<EOL>", "docstring": "Return list of GATT characteristics that have been discovered for this\n        service.", "id": "f9600:c0:m2"}
{"signature": "def __init__(self, dbus_obj):", "body": "self._characteristic = dbus.Interface(dbus_obj, _CHARACTERISTIC_INTERFACE)<EOL>self._props = dbus.Interface(dbus_obj, '<STR_LIT>')<EOL>", "docstring": "Create an instance of the GATT characteristic from the provided bluez\n        DBus object.", "id": "f9600:c1:m0"}
{"signature": "@property<EOL><INDENT>def uuid(self):<DEDENT>", "body": "return uuid.UUID(str(self._props.Get(_CHARACTERISTIC_INTERFACE, '<STR_LIT>')))<EOL>", "docstring": "Return the UUID of this GATT characteristic.", "id": "f9600:c1:m1"}
{"signature": "@property<EOL><INDENT>def uuid(self):<DEDENT>", "body": "return uuid.UUID(str(self._props.Get(_DESCRIPTOR_INTERFACE, '<STR_LIT>')))<EOL>", "docstring": "Return the UUID of this GATT descriptor.", "id": "f9600:c2:m1"}
{"signature": "def read_value(self):", "body": "return self._descriptor.ReadValue()<EOL>", "docstring": "Read the value of this descriptor.", "id": "f9600:c2:m2"}
{"signature": "def read_value(self):", "body": "return self._characteristic.ReadValue()<EOL>", "docstring": "Read the value of this characteristic.", "id": "f9600:c1:m2"}
{"signature": "@property<EOL><INDENT>def rssi(self):<DEDENT>", "body": "return self._props.Get(_INTERFACE, '<STR_LIT>')<EOL>", "docstring": "Return the RSSI signal strength in decibels.", "id": "f9601:c0:m10"}
{"signature": "@property<EOL><INDENT>def advertised(self):<DEDENT>", "body": "uuids = []<EOL>try:<EOL><INDENT>uuids = self._props.Get(_INTERFACE, '<STR_LIT>')<EOL><DEDENT>except dbus.exceptions.DBusException as ex:<EOL><INDENT>if ex.get_dbus_name() != '<STR_LIT>':<EOL><INDENT>raise ex<EOL><DEDENT><DEDENT>return [uuid.UUID(str(x)) for x in uuids]<EOL>", "docstring": "Return a list of UUIDs for services that are advertised by this\n        device.", "id": "f9601:c0:m6"}
{"signature": "def discover(self, service_uuids, char_uuids, timeout_sec=TIMEOUT_SEC):", "body": "<EOL>expected_services = set(service_uuids)<EOL>expected_chars = set(char_uuids)<EOL>start = time.time()<EOL>while True:<EOL><INDENT>actual_services = set(self.advertised)<EOL>chars = map(BluezGattCharacteristic,<EOL>get_provider()._get_objects(_CHARACTERISTIC_INTERFACE,<EOL>self._device.object_path))<EOL>actual_chars = set(map(lambda x: x.uuid, chars))<EOL>if actual_services >= expected_services and actual_chars >= expected_chars:<EOL><INDENT>return True<EOL><DEDENT>if time.time()-start >= timeout_sec:<EOL><INDENT>return False<EOL><DEDENT>time.sleep(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Wait up to timeout_sec for the specified services and characteristics\n        to be discovered on the device.  If the timeout is exceeded without\n        discovering the services and characteristics then an exception is thrown.", "id": "f9601:c0:m5"}
{"signature": "@property<EOL><INDENT>def _adapter(self):<DEDENT>", "body": "return self._props.Get(_INTERFACE, '<STR_LIT>')<EOL>", "docstring": "Return the DBus path to the adapter that owns this device.", "id": "f9601:c0:m11"}
{"signature": "def status_job(self, fn=None, name=None, timeout=<NUM_LIT:3>):", "body": "if fn is None:<EOL><INDENT>def decorator(fn):<EOL><INDENT>self.add_status_job(fn, name, timeout)<EOL><DEDENT>return decorator<EOL><DEDENT>else:<EOL><INDENT>self.add_status_job(fn, name, timeout)<EOL><DEDENT>", "docstring": "Decorator that invokes `add_status_job`.\n\n        ::\n\n            @app.status_job\n            def postgresql():\n                # query/ping postgres\n\n            @app.status_job(name=\"Active Directory\")\n            def active_directory():\n                # query active directory\n\n            @app.status_job(timeout=5)\n            def paypal():\n                # query paypal, timeout after 5 seconds", "id": "f9608:c0:m3"}
{"signature": "def add_status_job(self, job_func, name=None, timeout=<NUM_LIT:3>):", "body": "job_name = job_func.__name__ if name is None else name<EOL>job = (job_name, timeout, job_func)<EOL>self._jobs.append(job)<EOL>", "docstring": "Adds a job to be included during calls to the `/status` endpoint.\n\n        :param job_func: the status function.\n        :param name: the name used in the JSON response for the given status\n                     function. The name of the function is the default.\n        :param timeout: the time limit before the job status is set to\n                        \"timeout exceeded\".", "id": "f9608:c0:m2"}
{"signature": "def print_err(*args, **kwargs):", "body": "if kwargs.get('<STR_LIT:file>', None) is None:<EOL><INDENT>kwargs['<STR_LIT:file>'] = sys.stderr<EOL><DEDENT>print(*args, **kwargs)<EOL>", "docstring": "A wrapper for print() that uses stderr by default.", "id": "f9612:m10"}
{"signature": "def print_header(cmd):", "body": "textcolors = {'<STR_LIT>': '<STR_LIT>'}<EOL>libcolors = {'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'}<EOL>vercolors = {'<STR_LIT>': '<STR_LIT>'}<EOL>execolors = {'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'}<EOL>argcolors = {'<STR_LIT>': '<STR_LIT>'}<EOL>def fmt_app_info(name, ver):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return C('<STR_LIT>', **textcolors).join(<EOL>C(name, **libcolors),<EOL>C(ver, **vercolors)<EOL>)<EOL><DEDENT>def fmt_cmd_args(cmdargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return C('<STR_LIT:U+0020>').join(<EOL>C(cmdargs[<NUM_LIT:0>], **execolors),<EOL>C('<STR_LIT:U+0020>').join(C(s, **argcolors) for s in cmdargs[<NUM_LIT:1>:]),<EOL>).join('<STR_LIT:(>', '<STR_LIT:)>', style='<STR_LIT>')<EOL><DEDENT>print('<STR_LIT>'.format(<EOL>C('<STR_LIT:U+0020>').join(<EOL>C('<STR_LIT>', **textcolors),<EOL>fmt_app_info(APPNAME, APPVERSION),<EOL>C('<STR_LIT>', **textcolors),<EOL>fmt_app_info('<STR_LIT>', green_version),<EOL>fmt_cmd_args(cmd),<EOL>)<EOL>))<EOL>print(<EOL>C('<STR_LIT>').join(<EOL>C('<STR_LIT>', '<STR_LIT>'),<EOL>C(os.getcwd(), '<STR_LIT>', style='<STR_LIT>'),<EOL>),<EOL>)<EOL>", "docstring": "Print some info about the Colr and Green versions being used.", "id": "f9612:m11"}
{"signature": "def main():", "body": "global DEBUG<EOL>argd = docopt(USAGESTR, version=VERSIONSTR, script=SCRIPT)<EOL>DEBUG = argd['<STR_LIT>']<EOL>width = parse_int(argd['<STR_LIT>'] or DEFAULT_WIDTH) or <NUM_LIT:1><EOL>indent = parse_int(argd['<STR_LIT>'] or (argd['<STR_LIT>'] or <NUM_LIT:0>))<EOL>prepend = '<STR_LIT:U+0020>' * (indent * <NUM_LIT:4>)<EOL>if prepend and argd['<STR_LIT>']:<EOL><INDENT>width -= len(prepend)<EOL><DEDENT>userprepend = argd['<STR_LIT>'] or (argd['<STR_LIT>'] or '<STR_LIT>')<EOL>prepend = '<STR_LIT>'.join((prepend, userprepend))<EOL>if argd['<STR_LIT>']:<EOL><INDENT>width -= len(userprepend)<EOL><DEDENT>userappend = argd['<STR_LIT>'] or (argd['<STR_LIT>'] or '<STR_LIT>')<EOL>if argd['<STR_LIT>']:<EOL><INDENT>width -= len(userappend)<EOL><DEDENT>if argd['<STR_LIT>']:<EOL><INDENT>argd['<STR_LIT>'] = (<EOL>(try_read_file(w) if len(w) < <NUM_LIT> else w)<EOL>for w in argd['<STR_LIT>']<EOL>)<EOL>words = '<STR_LIT:U+0020>'.join((w for w in argd['<STR_LIT>'] if w))<EOL><DEDENT>else:<EOL><INDENT>words = read_stdin()<EOL><DEDENT>block = FormatBlock(words).iter_format_block(<EOL>chars=argd['<STR_LIT>'],<EOL>fill=argd['<STR_LIT>'],<EOL>prepend=prepend,<EOL>strip_first=argd['<STR_LIT>'],<EOL>append=userappend,<EOL>strip_last=argd['<STR_LIT>'],<EOL>width=width,<EOL>newlines=argd['<STR_LIT>'],<EOL>lstrip=argd['<STR_LIT>'],<EOL>)<EOL>for i, line in enumerate(block):<EOL><INDENT>if argd['<STR_LIT>']:<EOL><INDENT>print('<STR_LIT>'.format(i + <NUM_LIT:1>, line))<EOL><DEDENT>else:<EOL><INDENT>print(line)<EOL><DEDENT><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Main entry point, expects doctopt arg dict as argd.", "id": "f9614:m0"}
{"signature": "def read_stdin():", "body": "if sys.stdin.isatty() and sys.stdout.isatty():<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>return sys.stdin.read()<EOL>", "docstring": "Read from stdin, but print a helpful message if it's a tty.", "id": "f9614:m4"}
{"signature": "def debug(*args, **kwargs):", "body": "if not (DEBUG and args):<EOL><INDENT>return None<EOL><DEDENT>parent = kwargs.get('<STR_LIT>', None)<EOL>with suppress(KeyError):<EOL><INDENT>kwargs.pop('<STR_LIT>')<EOL><DEDENT>backlevel = kwargs.get('<STR_LIT>', <NUM_LIT:1>)<EOL>with suppress(KeyError):<EOL><INDENT>kwargs.pop('<STR_LIT>')<EOL><DEDENT>frame = inspect.currentframe()<EOL>while backlevel > <NUM_LIT:0>:<EOL><INDENT>frame = frame.f_back<EOL>backlevel -= <NUM_LIT:1><EOL><DEDENT>fname = os.path.split(frame.f_code.co_filename)[-<NUM_LIT:1>]<EOL>lineno = frame.f_lineno<EOL>if parent:<EOL><INDENT>func = '<STR_LIT>'.format(parent.__class__.__name__, frame.f_code.co_name)<EOL><DEDENT>else:<EOL><INDENT>func = frame.f_code.co_name<EOL><DEDENT>lineinfo = '<STR_LIT>'.format(<EOL>C(fname, '<STR_LIT>'),<EOL>C(str(lineno).ljust(<NUM_LIT:4>), '<STR_LIT>'),<EOL>C().join(C(func, '<STR_LIT>'), '<STR_LIT>').ljust(<NUM_LIT:20>)<EOL>)<EOL>pargs = list(C(a, '<STR_LIT>').str() for a in args)<EOL>pargs[<NUM_LIT:0>] = '<STR_LIT>'.join((lineinfo, pargs[<NUM_LIT:0>]))<EOL>print_err(*pargs, **kwargs)<EOL>", "docstring": "Print a message only if DEBUG is truthy.", "id": "f9614:m1"}
{"signature": "def try_read_file(s):", "body": "try:<EOL><INDENT>with open(s, '<STR_LIT:r>') as f:<EOL><INDENT>data = f.read()<EOL><DEDENT><DEDENT>except FileNotFoundError:<EOL><INDENT>return s<EOL><DEDENT>except EnvironmentError as ex:<EOL><INDENT>print_err('<STR_LIT>'.format(s, ex))<EOL>return None<EOL><DEDENT>return data<EOL>", "docstring": "If `s` is a file name, read the file and return it's content.\n        Otherwise, return the original string.\n        Returns None if the file was opened, but errored during reading.", "id": "f9614:m5"}
{"signature": "def format(<EOL>self, text=None,<EOL>width=<NUM_LIT>, chars=False, fill=False, newlines=False,<EOL>prepend=None, append=None, strip_first=False, strip_last=False,<EOL>lstrip=False):", "body": "<EOL>return '<STR_LIT:\\n>'.join(<EOL>self.iter_format_block(<EOL>(self.text if text is None else text) or '<STR_LIT>',<EOL>prepend=prepend,<EOL>append=append,<EOL>strip_first=strip_first,<EOL>strip_last=strip_last,<EOL>width=width,<EOL>chars=chars,<EOL>fill=fill,<EOL>newlines=newlines,<EOL>lstrip=lstrip<EOL>)<EOL>)<EOL>", "docstring": "Format a long string into a block of newline seperated text.\n            Arguments:\n                See iter_format_block().", "id": "f9616:c0:m3"}
{"signature": "def get_code_indices(s: Any) -> Dict[int, str]:", "body": "indices = {}<EOL>i = <NUM_LIT:0><EOL>codes = get_codes(s)<EOL>for code in codes:<EOL><INDENT>codeindex = s.index(code)<EOL>realindex = i + codeindex<EOL>indices[realindex] = code<EOL>codelen = len(code)<EOL>i = realindex + codelen<EOL>s = s[codeindex + codelen:]<EOL><DEDENT>return indices<EOL>", "docstring": "Retrieve a dict of {index: escape_code} for a given string.\n        If no escape codes are found, an empty dict is returned.", "id": "f9617:m1"}
{"signature": "def get_codes(s: Any) -> List[str]:", "body": "return codegrabpat.findall(str(s))<EOL>", "docstring": "Grab all escape codes from a string.\n        Returns a list of all escape codes.", "id": "f9617:m0"}
{"signature": "def finalize(self, result=None):", "body": "if not self.settings_path:<EOL><INDENT>return<EOL><DEDENT>from django.test.utils import teardown_test_environment<EOL>from django.db import connection<EOL>from django.conf import settings<EOL>self.call_plugins_method('<STR_LIT>', settings, connection)<EOL>try:<EOL><INDENT>connection.creation.destroy_test_db(<EOL>self.old_db,<EOL>verbosity=self.verbosity,<EOL>)<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT>self.call_plugins_method('<STR_LIT>', settings, connection)<EOL>self.call_plugins_method(<EOL>'<STR_LIT>', settings, teardown_test_environment)<EOL>teardown_test_environment()<EOL>self.call_plugins_method('<STR_LIT>', settings)<EOL>", "docstring": "Clean up any created database and schema.", "id": "f9619:c1:m24"}
{"signature": "def begin(self):", "body": "for plugin in self.nose_config.plugins.plugins:<EOL><INDENT>if getattr(plugin, '<STR_LIT>', False):<EOL><INDENT>self.django_plugins.append(plugin)<EOL><DEDENT><DEDENT>os.environ['<STR_LIT>'] = self.settings_module<EOL>if self.conf.addPaths:<EOL><INDENT>map(add_path, self.conf.where)<EOL><DEDENT>try:<EOL><INDENT>__import__(self.settings_module)<EOL>self.settings_path = self.settings_module<EOL><DEDENT>except ImportError:<EOL><INDENT>self.settings_path = get_settings_path(self.settings_module)<EOL>if not self.settings_path:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>add_path(self.settings_path)<EOL>sys.path.append(self.settings_path)<EOL><DEDENT>from django.conf import settings<EOL>settings.DEBUG = False<EOL>self.call_plugins_method('<STR_LIT>', settings)<EOL>from django.core import management<EOL>from django.test.utils import setup_test_environment<EOL>if hasattr(settings, '<STR_LIT>'):<EOL><INDENT>self.old_db = settings.DATABASES['<STR_LIT:default>']['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>self.old_db = settings.DATABASE_NAME<EOL><DEDENT>from django.db import connections<EOL>self._monkeypatch_test_classes()<EOL>for connection in connections.all():<EOL><INDENT>self.call_plugins_method(<EOL>'<STR_LIT>', settings, setup_test_environment,<EOL>connection)<EOL><DEDENT>try:<EOL><INDENT>setup_test_environment()<EOL><DEDENT>except RuntimeError:  <EOL><INDENT>pass<EOL><DEDENT>import django<EOL>if hasattr(django, '<STR_LIT>'):<EOL><INDENT>django.setup()<EOL><DEDENT>self.call_plugins_method('<STR_LIT>', settings)<EOL>management.get_commands()<EOL>if self.django_version < self.DJANGO_1_7:<EOL><INDENT>management._commands['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>for connection in connections.all():<EOL><INDENT>self.call_plugins_method(<EOL>'<STR_LIT>', settings, connection, management)<EOL>connection.creation.create_test_db(<EOL>verbosity=self.verbosity,<EOL>autoclobber=True,<EOL>)<EOL>logger.debug(\"<STR_LIT>\")<EOL>self._num_syncdb_calls += <NUM_LIT:1><EOL>self.call_plugins_method('<STR_LIT>', settings, connection)<EOL><DEDENT>self.store_original_transaction_methods()<EOL>", "docstring": "Create the test database and schema, if needed, and switch the\nconnection over to that database. Then call install() to install\nall apps listed in the loaded settings module.", "id": "f9619:c1:m17"}
{"signature": "def _random_token(self, bits=<NUM_LIT>):", "body": "alphabet = string.ascii_letters + string.digits + '<STR_LIT>'<EOL>num_letters = int(math.ceil(bits / <NUM_LIT>))<EOL>return '<STR_LIT>'.join(random.choice(alphabet) for i in range(num_letters))<EOL>", "docstring": "Generates a random token, using the url-safe base64 alphabet.\nThe \"bits\" argument specifies the bits of randomness to use.", "id": "f9621:c0:m1"}
{"signature": "def _wait_for_connection(self, port):", "body": "connected = False<EOL>max_tries = <NUM_LIT:10><EOL>num_tries = <NUM_LIT:0><EOL>wait_time = <NUM_LIT:0.5><EOL>while not connected or num_tries >= max_tries:<EOL><INDENT>time.sleep(wait_time)<EOL>try:<EOL><INDENT>af = socket.AF_INET<EOL>addr = ('<STR_LIT:127.0.0.1>', port)<EOL>sock = socket.socket(af, socket.SOCK_STREAM)<EOL>sock.connect(addr)<EOL><DEDENT>except socket.error:<EOL><INDENT>if sock:<EOL><INDENT>sock.close()<EOL><DEDENT>num_tries += <NUM_LIT:1><EOL>continue<EOL><DEDENT>connected = True<EOL><DEDENT>if not connected:<EOL><INDENT>print(\"<STR_LIT>\", file=sys.stderr)<EOL><DEDENT>", "docstring": "Wait until we can make a socket connection to sphinx.", "id": "f9626:c0:m8"}
{"signature": "def options(self, parser, env=None):", "body": "if env is None:<EOL><INDENT>env = os.environ<EOL><DEDENT>parser.add_option(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>',<EOL>)<EOL>super(SphinxSearchPlugin, self).options(parser, env)<EOL>", "docstring": "Sphinx config file that can optionally take the following python\ntemplate string arguments:\n\n``database_name``\n``database_password``\n``database_username``\n``database_host``\n``database_port``\n``sphinx_search_data_dir``\n``searchd_log_dir``", "id": "f9626:c0:m1"}
{"signature": "def func():", "body": "pass<EOL>", "docstring": "Function-level test\n    >>> 1+3\n    4", "id": "f9640:m0"}
{"signature": "def callback(self):", "body": "", "docstring": "please implement", "id": "f9670:c1:m4"}
{"signature": "def make_body(self, data):", "body": "", "docstring": "please implement", "id": "f9670:c1:m1"}
{"signature": "def check_import_dashboard_stackexchange(elastic_url, import_file, es_index=None,<EOL>data_sources=None, add_vis_studies=False, strict=False):", "body": "if \"<STR_LIT>\" in data_sources and \"<STR_LIT>\" not in data_sources:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Check that stackexchange data sources adds also stackoverflow\n        data source which is the name used in panels", "id": "f9677:m0"}
{"signature": "def __add_types(self, raw_conf):", "body": "typed_conf = {}<EOL>for s in raw_conf.keys():<EOL><INDENT>typed_conf[s] = {}<EOL>for option in raw_conf[s]:<EOL><INDENT>val = raw_conf[s][option]<EOL>if len(val) > <NUM_LIT:1> and (val[<NUM_LIT:0>] == '<STR_LIT:\">' and val[-<NUM_LIT:1>] == '<STR_LIT:\">'):<EOL><INDENT>typed_conf[s][option] = val[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>elif len(val) > <NUM_LIT:1> and (val[<NUM_LIT:0>] == '<STR_LIT:[>' and val[-<NUM_LIT:1>] == '<STR_LIT:]>'):<EOL><INDENT>typed_conf[s][option] = val[<NUM_LIT:1>:-<NUM_LIT:1>].replace('<STR_LIT:U+0020>', '<STR_LIT>').split('<STR_LIT:U+002C>')<EOL><DEDENT>elif val.lower() in ['<STR_LIT:true>', '<STR_LIT:false>']:<EOL><INDENT>typed_conf[s][option] = True if val.lower() == '<STR_LIT:true>' else False<EOL><DEDENT>elif val.lower() == '<STR_LIT:none>':<EOL><INDENT>typed_conf[s][option] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>typed_conf[s][option] = int(val)<EOL><DEDENT>except ValueError:<EOL><INDENT>typed_conf[s][option] = val<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return typed_conf<EOL>", "docstring": "Convert to int, boolean, list, None types config items", "id": "f9685:c0:m11"}
{"signature": "def __init__(self, conf_file, conf_list=[]):", "body": "self.conf_list = [conf_file] + conf_list<EOL>self.raw_conf = None<EOL>self.__read_conf_files()<EOL>", "docstring": "Initialize object.\n\n        The object can be initialized with a configuration file,\n        and, optionally, with a list of other configuration files.\n        If the list of other configuration files exist, it will\n        be read, in order, after the configuration file.\n        Values set in a file read later will overwrite values set\n        in files read earlier. Values not set by any file will\n        be set to the default values, when possible.\n\n        :param conf_file; configuration file name\n        :param conf_list: list of other configuration files (default: empty)", "id": "f9685:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def general_params(cls):<DEDENT>", "body": "params = {}<EOL>params_general = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": <NUM_LIT>,<EOL>\"<STR_LIT:type>\": int,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": True,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": \"<STR_LIT:file>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": <NUM_LIT>,  <EOL>\"<STR_LIT:type>\": int,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": <NUM_LIT:5>,<EOL>\"<STR_LIT:type>\": int,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": <NUM_LIT:1000>,<EOL>\"<STR_LIT:type>\": int,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": <NUM_LIT:100>,<EOL>\"<STR_LIT:type>\": int,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": ALIASES_JSON,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": MENU_YAML,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": int,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>}<EOL>}<EOL>}<EOL>params_projects = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": PROJECTS_JSON,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>}<EOL>}<EOL>}<EOL>params_phases = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": True,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": True,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": True,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": True,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>}<EOL>}<EOL>}<EOL>general_config_params = [params_general, params_projects, params_phases]<EOL>for section_params in general_config_params:<EOL><INDENT>params.update(section_params)<EOL><DEDENT>params_collection = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:password>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT:user>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT:url>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>}<EOL>}<EOL>}<EOL>params_enrichment = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:url>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": True,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": <NUM_LIT:2>,<EOL>\"<STR_LIT:type>\": int,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT:user>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT:password>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>}<EOL>}<EOL>}<EOL>params_panels = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:strict>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": True,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": True,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>}<EOL>}<EOL>}<EOL>params_report = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": [],<EOL>\"<STR_LIT:type>\": list,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>}<EOL>}<EOL>}<EOL>params_sortinghat = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT:True>\",<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": [\"<STR_LIT:email>\"],<EOL>\"<STR_LIT:type>\": list,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": <NUM_LIT>,<EOL>\"<STR_LIT:type>\": int,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT:host>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT:user>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT:root>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT:password>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>\"<STR_LIT:type>\": list,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": True,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": [],<EOL>\"<STR_LIT:type>\": list,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": None,<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": [],<EOL>\"<STR_LIT:type>\": list,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": [],<EOL>\"<STR_LIT:type>\": list,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT:default>\": False,<EOL>\"<STR_LIT:type>\": bool,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>}<EOL>}<EOL>}<EOL>params_track_items = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT:default>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": str,<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\"<EOL>}<EOL>}<EOL>}<EOL>tasks_config_params = [params_collection, params_enrichment, params_panels,<EOL>params_report, params_sortinghat, params_track_items]<EOL>for section_params in tasks_config_params:<EOL><INDENT>params.update(section_params)<EOL><DEDENT>return params<EOL>", "docstring": "Define all the possible config params", "id": "f9685:c0:m2"}
{"signature": "def __create_arthur_json(self, repo, backend_args):", "body": "backend_args = self._compose_arthur_params(self.backend_section, repo)<EOL>if self.backend_section == '<STR_LIT>':<EOL><INDENT>backend_args['<STR_LIT>'] = os.path.join(self.REPOSITORY_DIR, repo)<EOL><DEDENT>backend_args['<STR_LIT>'] = self.backend_tag(repo)<EOL>ajson = {\"<STR_LIT>\": [{}]}<EOL>ajson[\"<STR_LIT>\"][<NUM_LIT:0>]['<STR_LIT>'] = self.backend_tag(repo)<EOL>ajson[\"<STR_LIT>\"][<NUM_LIT:0>]['<STR_LIT>'] = self.backend_section.split(\"<STR_LIT::>\")[<NUM_LIT:0>]<EOL>ajson[\"<STR_LIT>\"][<NUM_LIT:0>]['<STR_LIT>'] = backend_args<EOL>ajson[\"<STR_LIT>\"][<NUM_LIT:0>]['<STR_LIT>'] = backend_args['<STR_LIT>']<EOL>ajson[\"<STR_LIT>\"][<NUM_LIT:0>]['<STR_LIT>'] = {}<EOL>ajson[\"<STR_LIT>\"][<NUM_LIT:0>]['<STR_LIT>'] = {\"<STR_LIT>\": self.ARTHUR_TASK_DELAY}<EOL>es_col_url = self._get_collection_url()<EOL>es_index = self.conf[self.backend_section]['<STR_LIT>']<EOL>es = ElasticSearch(es_col_url, es_index)<EOL>connector = get_connector_from_name(self.backend_section)<EOL>klass = connector[<NUM_LIT:0>]  <EOL>signature = inspect.signature(klass.fetch)<EOL>last_activity = None<EOL>filter_ = {\"<STR_LIT:name>\": \"<STR_LIT>\", \"<STR_LIT:value>\": backend_args['<STR_LIT>']}<EOL>if '<STR_LIT>' in signature.parameters:<EOL><INDENT>last_activity = es.get_last_item_field('<STR_LIT>', [filter_])<EOL>if last_activity:<EOL><INDENT>ajson[\"<STR_LIT>\"][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>'] = last_activity.isoformat()<EOL><DEDENT><DEDENT>elif '<STR_LIT>' in signature.parameters:<EOL><INDENT>last_activity = es.get_last_item_field('<STR_LIT>', [filter_])<EOL>if last_activity:<EOL><INDENT>ajson[\"<STR_LIT>\"][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>'] = last_activity<EOL><DEDENT><DEDENT>if last_activity:<EOL><INDENT>logging.info(\"<STR_LIT>\", last_activity)<EOL><DEDENT>return(ajson)<EOL>", "docstring": "Create the JSON for configuring arthur to collect data\n\n        https://github.com/grimoirelab/arthur#adding-tasks\n        Sample for git:\n\n        {\n        \"tasks\": [\n            {\n                \"task_id\": \"arthur.git\",\n                \"backend\": \"git\",\n                \"backend_args\": {\n                    \"gitpath\": \"/tmp/arthur_git/\",\n                    \"uri\": \"https://github.com/grimoirelab/arthur.git\"\n                },\n                \"category\": \"commit\",\n                \"archive_args\": {\n                    \"archive_path\": '/tmp/test_archives',\n                    \"fetch_from_archive\": false,\n                    \"archive_after\": None\n                },\n                \"scheduler_args\": {\n                    \"delay\": 10\n                }\n            }\n        ]\n        }", "id": "f9687:c1:m5"}
{"signature": "def __feed_arthur(self):", "body": "with self.ARTHUR_FEED_LOCK:<EOL><INDENT>if (time.time() - self.ARTHUR_LAST_MEMORY_CHECK) > <NUM_LIT:5> * self.ARTHUR_LAST_MEMORY_CHECK_TIME:<EOL><INDENT>self.ARTHUR_LAST_MEMORY_CHECK = time.time()<EOL>logger.debug(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>memory_size = self.measure_memory(self.arthur_items) / (<NUM_LIT> * <NUM_LIT>)<EOL><DEDENT>except RuntimeError as ex:<EOL><INDENT>logger.warning(\"<STR_LIT>\", ex)<EOL>memory_size = self.ARTHUR_LAST_MEMORY_SIZE<EOL><DEDENT>self.ARTHUR_LAST_MEMORY_CHECK_TIME = time.time() - self.ARTHUR_LAST_MEMORY_CHECK<EOL>logger.debug(\"<STR_LIT>\",<EOL>memory_size, self.ARTHUR_LAST_MEMORY_CHECK_TIME)<EOL>self.ARTHUR_LAST_MEMORY_SIZE = memory_size<EOL><DEDENT>if self.ARTHUR_LAST_MEMORY_SIZE > self.ARTHUR_MAX_MEMORY_SIZE:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>logger.info(\"<STR_LIT>\")<EOL>db_url = self.config.get_conf()['<STR_LIT>']['<STR_LIT>']<EOL>conn = redis.StrictRedis.from_url(db_url)<EOL>logger.debug(\"<STR_LIT>\", db_url)<EOL>pipe = conn.pipeline()<EOL>pipe.lrange(Q_STORAGE_ITEMS, <NUM_LIT:0>, self.ARTHUR_REDIS_ITEMS - <NUM_LIT:1>)<EOL>pipe.ltrim(Q_STORAGE_ITEMS, self.ARTHUR_REDIS_ITEMS, -<NUM_LIT:1>)<EOL>items = pipe.execute()[<NUM_LIT:0>]<EOL>for item in items:<EOL><INDENT>arthur_item = pickle.loads(item)<EOL>if arthur_item['<STR_LIT>'] not in self.arthur_items:<EOL><INDENT>self.arthur_items[arthur_item['<STR_LIT>']] = []<EOL><DEDENT>self.arthur_items[arthur_item['<STR_LIT>']].append(arthur_item)<EOL><DEDENT>for tag in self.arthur_items:<EOL><INDENT>if self.arthur_items[tag]:<EOL><INDENT>logger.debug(\"<STR_LIT>\", tag, len(self.arthur_items[tag]))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Feed Ocean with backend data collected from arthur redis queue", "id": "f9687:c1:m2"}
{"signature": "def __feed_backend_arthur(self, repo):", "body": "<EOL>self.__feed_arthur()<EOL>tag = self.backend_tag(repo)<EOL>logger.debug(\"<STR_LIT>\", self.arthur_items.keys())<EOL>logger.debug(\"<STR_LIT>\", tag)<EOL>if tag in self.arthur_items:<EOL><INDENT>logger.debug(\"<STR_LIT>\", tag)<EOL>while self.arthur_items[tag]:<EOL><INDENT>yield self.arthur_items[tag].pop()<EOL><DEDENT><DEDENT>", "docstring": "Feed Ocean with backend data collected from arthur redis queue", "id": "f9687:c1:m4"}
{"signature": "def __check_looks_like_uri(self, uri):", "body": "if uri.split('<STR_LIT:/>')[<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>elif uri.split('<STR_LIT:/>')[<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>if uri.split('<STR_LIT:/>')[<NUM_LIT:5>] == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise GithubFileNotFound('<STR_LIT>' % uri)<EOL><DEDENT>", "docstring": "Checks the URI looks like a RAW uri in github:\n\n        - 'https://raw.githubusercontent.com/github/hubot/master/README.md'\n        - 'https://github.com/github/hubot/raw/master/README.md'\n\n        :param uri: uri of the file", "id": "f9688:c0:m1"}
{"signature": "def read_file_from_uri(self, uri):", "body": "logger.debug(\"<STR_LIT>\" % (uri))<EOL>self.__check_looks_like_uri(uri)<EOL>try:<EOL><INDENT>req = urllib.request.Request(uri)<EOL>req.add_header('<STR_LIT>', '<STR_LIT>' % self.token)<EOL>r = urllib.request.urlopen(req)<EOL><DEDENT>except urllib.error.HTTPError as err:<EOL><INDENT>if err.code == <NUM_LIT>:<EOL><INDENT>raise GithubFileNotFound('<STR_LIT>' % uri)<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>return r.read().decode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "Reads the file from Github\n\n        :param uri: URI of the Github raw File\n\n        :returns: UTF-8 text with the content", "id": "f9688:c0:m2"}
{"signature": "def execute_tasks(self, tasks_cls):", "body": "self.execute_batch_tasks(tasks_cls)<EOL>", "docstring": "Just a wrapper to the execute_batch_tasks method", "id": "f9690:c0:m6"}
{"signature": "def __execute_initial_load(self):", "body": "if self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>tasks_cls = [TaskPanels, TaskPanelsMenu]<EOL>self.execute_tasks(tasks_cls)<EOL><DEDENT>if self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>tasks_cls = [TaskInitSortingHat]<EOL>self.execute_tasks(tasks_cls)<EOL><DEDENT>logger.info(\"<STR_LIT>\")<EOL>tasks_cls = [TaskProjects]<EOL>self.execute_tasks(tasks_cls)<EOL>logger.info(\"<STR_LIT>\")<EOL>return<EOL>", "docstring": "Tasks that should be done just one time", "id": "f9690:c0:m10"}
{"signature": "def execute_batch_tasks(self, tasks_cls, big_delay=<NUM_LIT:0>, small_delay=<NUM_LIT:0>, wait_for_threads=True):", "body": "def _split_tasks(tasks_cls):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>backend_t = []<EOL>global_t = []<EOL>for t in tasks_cls:<EOL><INDENT>if t.is_backend_task(t):<EOL><INDENT>backend_t.append(t)<EOL><DEDENT>else:<EOL><INDENT>global_t.append(t)<EOL><DEDENT><DEDENT>return backend_t, global_t<EOL><DEDENT>backend_tasks, global_tasks = _split_tasks(tasks_cls)<EOL>logger.debug('<STR_LIT>' % (backend_tasks))<EOL>logger.debug('<STR_LIT>' % (global_tasks))<EOL>threads = []<EOL>stopper = threading.Event()<EOL>if len(backend_tasks) > <NUM_LIT:0>:<EOL><INDENT>repos_backend = self._get_repos_by_backend()<EOL>for backend in repos_backend:<EOL><INDENT>t = TasksManager(backend_tasks, backend, stopper, self.config, small_delay)<EOL>threads.append(t)<EOL>t.start()<EOL><DEDENT><DEDENT>if len(global_tasks) > <NUM_LIT:0>:<EOL><INDENT>gt = TasksManager(global_tasks, \"<STR_LIT>\", stopper, self.config, big_delay)<EOL>threads.append(gt)<EOL>gt.start()<EOL>if big_delay > <NUM_LIT:0>:<EOL><INDENT>when = datetime.now() + timedelta(seconds=big_delay)<EOL>when_str = when.strftime('<STR_LIT>')<EOL>logger.info(\"<STR_LIT>\" % (global_tasks, when_str))<EOL><DEDENT><DEDENT>if wait_for_threads:<EOL><INDENT>time.sleep(<NUM_LIT:1>)  <EOL>stopper.set()  <EOL><DEDENT>for t in threads:<EOL><INDENT>t.join()<EOL><DEDENT>self.__check_queue_for_errors()<EOL>logger.debug(\"<STR_LIT>\")<EOL>", "docstring": "Start a task manager per backend to complete the tasks.\n\n:param task_cls: list of tasks classes to be executed\n:param big_delay: seconds before global tasks are executed, should be days usually\n:param small_delay: seconds before backend tasks are executed, should be minutes\n:param wait_for_threads: boolean to set when threads are infinite or\n                        should be synchronized in a meeting point", "id": "f9690:c0:m8"}
{"signature": "def execute_nonstop_tasks(self, tasks_cls):", "body": "self.execute_batch_tasks(tasks_cls,<EOL>self.conf['<STR_LIT>']['<STR_LIT>'],<EOL>self.conf['<STR_LIT>']['<STR_LIT>'], False)<EOL>", "docstring": "Just a wrapper to the execute_batch_tasks method", "id": "f9690:c0:m7"}
{"signature": "def start(self):", "body": "<EOL>logger.info(\"<STR_LIT>\")<EOL>logger.info(\"<STR_LIT>\")<EOL>logger.info(\"<STR_LIT>\")<EOL>logger.info(\"<STR_LIT>\")<EOL>if not self.check_es_access():<EOL><INDENT>print('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>if not self.check_redis_access():<EOL><INDENT>print('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if not self.check_arthur_access():<EOL><INDENT>print('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>if not self.check_bestiary_access():<EOL><INDENT>print('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>self.__execute_initial_load()<EOL>all_tasks_cls = []<EOL>all_tasks_cls.append(TaskProjects)  <EOL>if self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>if not self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>all_tasks_cls.append(TaskRawDataCollection)<EOL><DEDENT>else:<EOL><INDENT>all_tasks_cls.append(TaskRawDataArthurCollection)<EOL><DEDENT><DEDENT>if self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>all_tasks_cls.append(TaskIdentitiesLoad)<EOL>all_tasks_cls.append(TaskIdentitiesMerge)<EOL>all_tasks_cls.append(TaskIdentitiesExport)<EOL><DEDENT>if self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>all_tasks_cls.append(TaskEnrich)<EOL><DEDENT>if self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>all_tasks_cls.append(TaskTrackItems)<EOL><DEDENT>if self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>all_tasks_cls.append(TaskReport)<EOL><DEDENT>while True:<EOL><INDENT>if not all_tasks_cls:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>break<EOL><DEDENT>try:<EOL><INDENT>if not self.conf['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>self.execute_batch_tasks(all_tasks_cls,<EOL>self.conf['<STR_LIT>']['<STR_LIT>'],<EOL>self.conf['<STR_LIT>']['<STR_LIT>'])<EOL>self.execute_batch_tasks(all_tasks_cls,<EOL>self.conf['<STR_LIT>']['<STR_LIT>'],<EOL>self.conf['<STR_LIT>']['<STR_LIT>'])<EOL>break<EOL><DEDENT>else:<EOL><INDENT>self.execute_nonstop_tasks(all_tasks_cls)<EOL><DEDENT><DEDENT>except DataCollectionError as e:<EOL><INDENT>logger.error(str(e))<EOL>var = traceback.format_exc()<EOL>logger.error(var)<EOL><DEDENT>except DataEnrichmentError as e:<EOL><INDENT>logger.error(str(e))<EOL>var = traceback.format_exc()<EOL>logger.error(var)<EOL><DEDENT><DEDENT>logger.info(\"<STR_LIT>\")<EOL>", "docstring": "This method defines the workflow of SirMordred. So it calls to:\n- initialize the databases\n- execute the different phases for the first iteration\n  (collection, identities, enrichment)\n- start the collection and enrichment in parallel by data source\n- start also the Sorting Hat merge", "id": "f9690:c0:m11"}
{"signature": "def compose_mbox(projects):", "body": "mbox_archives = '<STR_LIT>'<EOL>mailing_lists_projects = [project for project in projects if '<STR_LIT>' in projects[project]]<EOL>for mailing_lists in mailing_lists_projects:<EOL><INDENT>projects[mailing_lists]['<STR_LIT>'] = []<EOL>for mailing_list in projects[mailing_lists]['<STR_LIT>']:<EOL><INDENT>if '<STR_LIT>' in mailing_list:<EOL><INDENT>name = mailing_list.split('<STR_LIT>')[<NUM_LIT:1>]<EOL><DEDENT>elif '<STR_LIT>' in mailing_list:<EOL><INDENT>name = mailing_list.split('<STR_LIT>')[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>name = mailing_list.split('<STR_LIT:@>')[<NUM_LIT:0>]<EOL><DEDENT>list_new = \"<STR_LIT>\" % (name, mbox_archives, name, name)<EOL>projects[mailing_lists]['<STR_LIT>'].append(list_new)<EOL><DEDENT><DEDENT>return projects<EOL>", "docstring": "Compose projects.json only for mbox, but using the mailing_lists lists\n\n    change: 'https://dev.eclipse.org/mailman/listinfo/emft-dev'\n    to: 'emfg-dev /home/bitergia/mboxes/emft-dev.mbox/emft-dev.mbox\n\n    :param projects: projects.json\n    :return: projects.json with mbox", "id": "f9692:m0"}
{"signature": "def compose_projects_json(projects, data):", "body": "projects = compose_git(projects, data)<EOL>projects = compose_mailing_lists(projects, data)<EOL>projects = compose_bugzilla(projects, data)<EOL>projects = compose_github(projects, data)<EOL>projects = compose_gerrit(projects)<EOL>projects = compose_mbox(projects)<EOL>return projects<EOL>", "docstring": "Compose projects.json with all data sources\n\n    :param projects: projects.json\n    :param data: eclipse JSON\n    :return: projects.json with all data sources", "id": "f9692:m7"}
{"signature": "def compose_git(projects, data):", "body": "for p in [project for project in data if len(data[project]['<STR_LIT>']) > <NUM_LIT:0>]:<EOL><INDENT>repos = []<EOL>for url in data[p]['<STR_LIT>']:<EOL><INDENT>if len(url['<STR_LIT:url>'].split()) > <NUM_LIT:1>:  <EOL><INDENT>repo = url['<STR_LIT:url>'].split()[<NUM_LIT:1>].replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>repo = url['<STR_LIT:url>'].replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>if repo not in repos:<EOL><INDENT>repos.append(repo)<EOL><DEDENT><DEDENT>projects[p]['<STR_LIT>'] = repos<EOL><DEDENT>return projects<EOL>", "docstring": "Compose projects.json for git\n\n    We need to replace '/c/' by '/gitroot/' for instance\n\n    change: 'http://git.eclipse.org/c/xwt/org.eclipse.xwt.git'\n    to: 'http://git.eclipse.org/gitroot/xwt/org.eclipse.xwt.git'\n\n    :param projects: projects.json\n    :param data: eclipse JSON\n    :return: projects.json with git", "id": "f9692:m2"}
{"signature": "@classmethod<EOL><INDENT>def get_repos_by_backend_section(cls, backend_section, raw=True):<DEDENT>", "body": "repos = []<EOL>projects = TaskProjects.get_projects()<EOL>for pro in projects:<EOL><INDENT>if backend_section in projects[pro]:<EOL><INDENT>if cls.GLOBAL_PROJECT not in projects:<EOL><INDENT>repos += projects[pro][backend_section]<EOL><DEDENT>else:<EOL><INDENT>if raw:<EOL><INDENT>if pro != cls.GLOBAL_PROJECT:<EOL><INDENT>if backend_section not in projects[cls.GLOBAL_PROJECT]:<EOL><INDENT>repos += projects[pro][backend_section]<EOL><DEDENT>elif backend_section in projects[pro] and backend_section in projects[cls.GLOBAL_PROJECT]:<EOL><INDENT>repos += projects[cls.GLOBAL_PROJECT][backend_section]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>not_in_unknown = [projects[pro] for pro in projects if pro != cls.GLOBAL_PROJECT][<NUM_LIT:0>]<EOL>if backend_section not in not_in_unknown:<EOL><INDENT>repos += projects[cls.GLOBAL_PROJECT][backend_section]<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if pro != cls.GLOBAL_PROJECT:<EOL><INDENT>if backend_section not in projects[cls.GLOBAL_PROJECT]:<EOL><INDENT>repos += projects[pro][backend_section]<EOL><DEDENT>elif backend_section in projects[pro] and backend_section in projects[cls.GLOBAL_PROJECT]:<EOL><INDENT>repos += projects[pro][backend_section]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>not_in_unknown_prj = [projects[prj] for prj in projects if prj != cls.GLOBAL_PROJECT]<EOL>not_in_unknown_sections = list(set([section for prj in not_in_unknown_prj<EOL>for section in list(prj.keys())]))<EOL>if backend_section not in not_in_unknown_sections:<EOL><INDENT>repos += projects[pro][backend_section]<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>logger.debug(\"<STR_LIT>\", backend_section, repos, raw)<EOL>repos = list(set(repos))<EOL>return repos<EOL>", "docstring": "return list with the repositories for a backend_section", "id": "f9693:c0:m4"}
{"signature": "def __init__(self, tasks_cls, backend_section, stopper, config, timer=<NUM_LIT:0>):", "body": "super().__init__(name=backend_section)  <EOL>self.config = config<EOL>self.tasks_cls = tasks_cls  <EOL>self.tasks = []  <EOL>self.backend_section = backend_section<EOL>self.stopper = stopper  <EOL>self.timer = timer<EOL>self.thread_id = None<EOL>", "docstring": ":tasks_cls : tasks classes to be executed using the backend\n:backend_section: perceval backend section name\n:config: config object for the manager", "id": "f9695:c0:m0"}
{"signature": "def __kibiter_version(self):", "body": "version = None<EOL>es_url = self.conf['<STR_LIT>']['<STR_LIT:url>']<EOL>config_url = '<STR_LIT>'<EOL>url = urijoin(es_url, config_url)<EOL>version = None<EOL>try:<EOL><INDENT>res = self.grimoire_con.get(url)<EOL>res.raise_for_status()<EOL>version = res.json()['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>logger.debug(\"<STR_LIT>\", version)<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL><DEDENT>return version<EOL>", "docstring": "Get the kibiter vesion.\n\n        :param major: major Elasticsearch version", "id": "f9696:c0:m2"}
{"signature": "def __upload_title(self, kibiter_major):", "body": "if kibiter_major == \"<STR_LIT>\":<EOL><INDENT>resource = \"<STR_LIT>\"<EOL>data = {\"<STR_LIT>\": {\"<STR_LIT:name>\": self.project_name}}<EOL>mapping_resource = \"<STR_LIT>\"<EOL>mapping = {\"<STR_LIT>\": \"<STR_LIT:true>\"}<EOL>url = urijoin(self.conf['<STR_LIT>']['<STR_LIT:url>'], resource)<EOL>mapping_url = urijoin(self.conf['<STR_LIT>']['<STR_LIT:url>'],<EOL>mapping_resource)<EOL>logger.debug(\"<STR_LIT>\")<EOL>res = self.grimoire_con.put(mapping_url, data=json.dumps(mapping),<EOL>headers=ES6_HEADER)<EOL>try:<EOL><INDENT>res.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>logger.error(res.json())<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>res = self.grimoire_con.post(url, data=json.dumps(data),<EOL>headers=ES6_HEADER)<EOL>try:<EOL><INDENT>res.raise_for_status()<EOL><DEDENT>except requests.exceptions.HTTPError:<EOL><INDENT>logger.error(\"<STR_LIT>\")<EOL>logger.error(res.json())<EOL><DEDENT><DEDENT>", "docstring": "Upload to Kibiter the title for the dashboard.\n\n        The title is shown on top of the dashboard menu, and is Usually\n        the name of the project being dashboarded.\n        This is done only for Kibiter 6.x.\n\n        :param kibiter_major: major version of kibiter", "id": "f9696:c1:m3"}
{"signature": "def __get_menu_entries(self, kibiter_major):", "body": "menu_entries = []<EOL>for entry in self.panels_menu:<EOL><INDENT>if entry['<STR_LIT:source>'] not in self.data_sources:<EOL><INDENT>continue<EOL><DEDENT>parent_menu_item = {<EOL>'<STR_LIT:name>': entry['<STR_LIT:name>'],<EOL>'<STR_LIT:title>': entry['<STR_LIT:name>'],<EOL>'<STR_LIT:description>': \"<STR_LIT>\",<EOL>'<STR_LIT:type>': \"<STR_LIT>\",<EOL>'<STR_LIT>': []<EOL>}<EOL>for subentry in entry['<STR_LIT>']:<EOL><INDENT>try:<EOL><INDENT>dash_name = get_dashboard_name(subentry['<STR_LIT>'])<EOL><DEDENT>except FileNotFoundError:<EOL><INDENT>logging.error(\"<STR_LIT>\", subentry['<STR_LIT>'])<EOL>continue<EOL><DEDENT>child_item = {<EOL>\"<STR_LIT:name>\": subentry['<STR_LIT:name>'],<EOL>\"<STR_LIT:title>\": subentry['<STR_LIT:name>'],<EOL>\"<STR_LIT:description>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": dash_name<EOL>}<EOL>parent_menu_item['<STR_LIT>'].append(child_item)<EOL><DEDENT>menu_entries.append(parent_menu_item)<EOL><DEDENT>return menu_entries<EOL>", "docstring": "Get the menu entries from the panel definition", "id": "f9696:c1:m6"}
{"signature": "def retain_identities(self, retention_time):", "body": "enrich_es = self.conf['<STR_LIT>']['<STR_LIT:url>']<EOL>sortinghat_db = self.db<EOL>current_data_source = self.get_backend(self.backend_section)<EOL>active_data_sources = self.config.get_active_data_sources()<EOL>if retention_time is None:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if retention_time <= <NUM_LIT:0>:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>retain_identities(retention_time, enrich_es, sortinghat_db, current_data_source, active_data_sources)<EOL>", "docstring": "Retain the identities in SortingHat based on the `retention_time`\n        value declared in the setup.cfg.\n\n        :param retention_time: maximum number of minutes wrt the current date to retain the SortingHat data", "id": "f9700:c0:m8"}
{"signature": "def __studies(self, retention_time):", "body": "cfg = self.config.get_conf()<EOL>if '<STR_LIT>' not in cfg[self.backend_section] or notcfg[self.backend_section]['<STR_LIT>']:<EOL><INDENT>logger.debug('<STR_LIT>' % self.backend_section)<EOL>return<EOL><DEDENT>studies = [study for study in cfg[self.backend_section]['<STR_LIT>'] if study.strip() != \"<STR_LIT>\"]<EOL>if not studies:<EOL><INDENT>logger.debug('<STR_LIT>' % self.backend_section)<EOL>return<EOL><DEDENT>logger.debug(\"<STR_LIT>\" % (self.backend_section, studies))<EOL>time.sleep(<NUM_LIT:2>)  <EOL>enrich_backend = self._get_enrich_backend()<EOL>ocean_backend = self._get_ocean_backend(enrich_backend)<EOL>active_studies = []<EOL>all_studies = enrich_backend.studies<EOL>all_studies_names = [study.__name__ for study in enrich_backend.studies]<EOL>logger.debug(\"<STR_LIT>\", self.backend_section, all_studies_names)<EOL>logger.debug(\"<STR_LIT>\", studies)<EOL>cfg_studies_types = [study.split(\"<STR_LIT::>\")[<NUM_LIT:0>] for study in studies]<EOL>if not set(cfg_studies_types).issubset(set(all_studies_names)):<EOL><INDENT>logger.error('<STR_LIT>', self.backend_section, studies)<EOL>raise RuntimeError('<STR_LIT>', self.backend_section, studies)<EOL><DEDENT>for study in enrich_backend.studies:<EOL><INDENT>if study.__name__ in cfg_studies_types:<EOL><INDENT>active_studies.append(study)<EOL><DEDENT><DEDENT>enrich_backend.studies = active_studies<EOL>print(\"<STR_LIT>\" % (self.backend_section,<EOL>[study for study in studies]))<EOL>studies_args = self.__load_studies()<EOL>do_studies(ocean_backend, enrich_backend, studies_args, retention_time=retention_time)<EOL>enrich_backend.studies = all_studies<EOL>", "docstring": "Execute the studies configured for the current backend", "id": "f9700:c0:m7"}
{"signature": "def __autorefresh_studies(self, cfg):", "body": "if '<STR_LIT>' not in self.conf[self.backend_section] or'<STR_LIT>' not in self.conf[self.backend_section]['<STR_LIT>']:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>aoc_index = self.conf['<STR_LIT>'].get('<STR_LIT>', GitEnrich.GIT_AOC_ENRICHED)<EOL>if not aoc_index:<EOL><INDENT>aoc_index = GitEnrich.GIT_AOC_ENRICHED<EOL><DEDENT>logger.debug(\"<STR_LIT>\", aoc_index)<EOL>es = Elasticsearch([self.conf['<STR_LIT>']['<STR_LIT:url>']], timeout=<NUM_LIT:100>,<EOL>verify_certs=self._get_enrich_backend().elastic.requests.verify)<EOL>if not es.indices.exists(index=aoc_index):<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>aoc_backend = GitEnrich(self.db_sh, None, cfg['<STR_LIT>']['<STR_LIT>'],<EOL>self.db_user, self.db_password, self.db_host)<EOL>aoc_backend.mapping = None<EOL>aoc_backend.roles = ['<STR_LIT>']<EOL>elastic_enrich = get_elastic(self.conf['<STR_LIT>']['<STR_LIT:url>'],<EOL>aoc_index, clean=False, backend=aoc_backend)<EOL>aoc_backend.set_elastic(elastic_enrich)<EOL>self.__autorefresh(aoc_backend, studies=True)<EOL>", "docstring": "Execute autorefresh for areas of code study if configured", "id": "f9700:c0:m6"}
{"signature": "def get_panels(config):", "body": "task = TaskPanels(config)<EOL>task.execute()<EOL>task = TaskPanelsMenu(config)<EOL>task.execute()<EOL>logging.info(\"<STR_LIT>\")<EOL>", "docstring": "Execute the panels phase\n\n    :param config: a Mordred config object", "id": "f9702:m4"}
{"signature": "def get_identities(config):", "body": "TaskProjects(config).execute()<EOL>task = TaskIdentitiesMerge(config)<EOL>task.execute()<EOL>logging.info(\"<STR_LIT>\")<EOL>", "docstring": "Execute the merge identities phase\n\n    :param config: a Mordred config object", "id": "f9702:m2"}
{"signature": "def get_params_parser():", "body": "parser = argparse.ArgumentParser(add_help=False)<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT:store_true>',<EOL>help=argparse.SUPPRESS)<EOL>parser.add_argument(\"<STR_LIT>\", action='<STR_LIT:store_true>', dest='<STR_LIT>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", action='<STR_LIT:store_true>', dest='<STR_LIT>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", action='<STR_LIT:store_true>', dest='<STR_LIT>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", action='<STR_LIT:store_true>', dest='<STR_LIT>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", action='<STR_LIT:store_true>', dest='<STR_LIT>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", dest='<STR_LIT>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_argument(\"<STR_LIT>\", dest='<STR_LIT>', default=[],<EOL>nargs='<STR_LIT:*>', help=\"<STR_LIT>\")<EOL>if len(sys.argv) == <NUM_LIT:1>:<EOL><INDENT>parser.print_help()<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return parser<EOL>", "docstring": "Parse command line arguments", "id": "f9702:m6"}
{"signature": "def config_logging(debug):", "body": "if debug:<EOL><INDENT>logging.basicConfig(level=logging.DEBUG, format='<STR_LIT>')<EOL>logging.debug(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>logging.basicConfig(level=logging.INFO, format='<STR_LIT>')<EOL><DEDENT>", "docstring": "Config logging level output output", "id": "f9702:m5"}
{"signature": "def get_enrich(config, backend_section):", "body": "TaskProjects(config).execute()<EOL>task = TaskEnrich(config, backend_section=backend_section)<EOL>try:<EOL><INDENT>task.execute()<EOL>logging.info(\"<STR_LIT>\")<EOL><DEDENT>except Exception as e:<EOL><INDENT>logging.error(str(e))<EOL>sys.exit(-<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Execute the enrich phase for a given backend section\n\n    :param config: a Mordred config object\n    :param backend_section: the backend section where the enrich phase is executed", "id": "f9702:m3"}
{"signature": "@property<EOL><INDENT>def blue(self):<DEDENT>", "body": "return self._color[<NUM_LIT:2>]<EOL>", "docstring": "The blue component of the RGB color representation.", "id": "f9713:c2:m12"}
{"signature": "def __eq__(self, other):", "body": "if isinstance(other, Color):<EOL><INDENT>return self.equality_fn(self, other)<EOL><DEDENT>return False<EOL>", "docstring": "Equals", "id": "f9713:c2:m1"}
{"signature": "def offset_random_web(seed, amount=<NUM_LIT:1>):", "body": "return rgb_to_web(offset_random_rgb(seed, amount))<EOL>", "docstring": "Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized\noffset from the seed.\n\n:param seed:\n:param amount:\n:return:", "id": "f9713:m5"}
{"signature": "def __ne__(self, other):", "body": "return not self.__eq__(other)<EOL>", "docstring": "Not Equals", "id": "f9713:c2:m2"}
{"signature": "def random_web():", "body": "return rgb_to_web(random_rgb())<EOL>", "docstring": "Generate a uniformly random WEB value.\n\n:return:", "id": "f9713:m2"}
{"signature": "@property<EOL><INDENT>def green(self):<DEDENT>", "body": "return self._color[<NUM_LIT:1>]<EOL>", "docstring": "The green component of the RGB color representation.", "id": "f9713:c2:m10"}
{"signature": "def color_run(start_color, end_color, step_count, inclusive=True, to_color=True):", "body": "if isinstance(start_color, Color):<EOL><INDENT>start_color = start_color.rgb<EOL><DEDENT>if isinstance(end_color, Color):<EOL><INDENT>end_color = end_color.rgb<EOL><DEDENT>step = tuple((end_color[i] - start_color[i])/step_count for i in range(<NUM_LIT:3>))<EOL>add = lambda x, y: tuple(sum(z) for z in zip(x, y))<EOL>mult = lambda x, y: tuple(y * z for z in x)<EOL>run = [add(start_color, mult(step, i)) for i in range(<NUM_LIT:1>, step_count)]<EOL>if inclusive:<EOL><INDENT>run = [start_color] + run + [end_color]<EOL><DEDENT>return run if not to_color else [Color(c) for c in run]<EOL>", "docstring": "Given a start color, end color, and a number of steps, returns a list of colors which represent a 'scale' between\nthe start and end color.\n\n:param start_color: The color starting the run\n:param end_color: The color ending the run\n:param step_count: The number of colors to have between the start and end color\n:param inclusive: Flag determining whether to include start and end values in run (default True)\n:param to_color: Flag indicating return values should be Color objects (default True)\n:return: List of colors between the start and end color\n:rtype: list", "id": "f9713:m6"}
{"signature": "def random_hex():", "body": "return rgb_to_hex(random_rgb())<EOL>", "docstring": "Generate a uniformly random HEX value.\n\n:return: A string representing a random HEX value between 000000 and FFFFFF inclusive", "id": "f9713:m1"}
{"signature": "def offset_random_rgb(seed, amount=<NUM_LIT:1>):", "body": "r, g, b = seed<EOL>results = []<EOL>for _ in range(amount):<EOL><INDENT>base_val = ((r + g + b) / <NUM_LIT:3>) + <NUM_LIT:1>  <EOL>new_val = base_val + (random.random() * rgb_max_val / <NUM_LIT:5>)  <EOL>ratio = new_val / base_val<EOL>results.append((min(int(r*ratio), rgb_max_val), min(int(g*ratio), rgb_max_val), min(int(b*ratio), rgb_max_val)))<EOL><DEDENT>return results[<NUM_LIT:0>] if len(results) > <NUM_LIT:1> else results<EOL>", "docstring": "Given a seed color, generate a specified number of random colors (1 color by default) determined by a randomized\noffset from the seed.\n\n:param seed:\n:param amount:\n:return:", "id": "f9713:m3"}
{"signature": "def __init__(self, color=None, **kwargs):", "body": "self.equality_fn = RGB_eq<EOL>self.arithmetic = ArithmeticModel.LIGHT<EOL>if isinstance(color, Color):<EOL><INDENT>self._color = color._color<EOL><DEDENT>else:<EOL><INDENT>self._color = color if color else rgb_min<EOL><DEDENT>for k, v in kwargs.items():<EOL><INDENT>setattr(self, k, v)<EOL><DEDENT>", "docstring": "Initialization", "id": "f9713:c2:m0"}
{"signature": "@property<EOL><INDENT>def rgb(self):<DEDENT>", "body": "return self._color<EOL>", "docstring": "An RGB representation of the color.", "id": "f9713:c2:m14"}
{"signature": "def __str__(self):", "body": "return \"<STR_LIT:{}>\".format(self._color)<EOL>", "docstring": "String representation", "id": "f9713:c2:m6"}
{"signature": "@property<EOL><INDENT>def red(self):<DEDENT>", "body": "return self._color[<NUM_LIT:0>]<EOL>", "docstring": "The red component of the RGB color representation.", "id": "f9713:c2:m8"}
{"signature": "@property<EOL><INDENT>def web(self):<DEDENT>", "body": "return rgb_to_web(self.rgb)<EOL>", "docstring": "A WEB representation of the color.", "id": "f9713:c2:m19"}
{"signature": "@property<EOL><INDENT>def hex(self):<DEDENT>", "body": "return rgb_to_hex(self.rgb)<EOL>", "docstring": "A 6-char HEX representation of the color, with a prepended octothorpe.", "id": "f9713:c2:m16"}
{"signature": "def hex_to_web(_hex):", "body": "try:<EOL><INDENT>return web_colors[hex_to_rgb(_hex)]<EOL><DEDENT>except KeyError:<EOL><INDENT>return _hex<EOL><DEDENT>", "docstring": "Convert a HEX color representation to a WEB color representation.\n\nhex :: hex -> [000000, FFFFFF]\n\n:param _hex: The 3- or 6-char hexadecimal string representing the color value.\n:return: WEB representation of the input HEX value.\n:rtype: str", "id": "f9719:m5"}
{"signature": "def hsv_to_web(hsv):", "body": "return rgb_to_web(hsv_to_rgb(hsv))<EOL>", "docstring": "Convert an HSV color representation to a WEB color representation.\n\n(h, s, v) :: h -> [0, 360)\n             s -> [0, 1]\n             v -> [0, 1]\n\n:param hsv: A tuple of three numeric values corresponding to the hue, saturation, and value.\n:return: WEB representation of the input HSV value.\n:rtype: str", "id": "f9719:m18"}
{"signature": "def yiq_to_web(yiq):", "body": "return rgb_to_web(yiq_to_rgb(yiq))<EOL>", "docstring": "Convert a YIQ color representation to a WEB color representation.\n\n(y, i, q) :: y -> [0, 1]\n             i -> [-0.5957, 0.5957]\n             q -> [-0.5226, 0.5226]\n\n:param yiq: A tuple of three numeric values corresponding to the luma and chrominance.\n:return: WEB representation of the input YIQ value.\n:rtype: str", "id": "f9719:m14"}
{"signature": "def hex_to_yiq(_hex):", "body": "return rgb_to_yiq(hex_to_rgb(_hex))<EOL>", "docstring": "Convert a HEX color representation to a YIQ color representation.\n\nhex :: hex -> [000000, FFFFFF]\n\n:param _hex: The 3- or 6-char hexadecimal string representing the color value.\n:return: YIQ representation of the input HEX value.\n:rtype: tuple", "id": "f9719:m6"}
{"signature": "def hsv_to_yiq(hsv):", "body": "return rgb_to_yiq(hsv_to_rgb(hsv))<EOL>", "docstring": "Convert an HSV color representation to a YIQ color representation.\n\n(h, s, v) :: h -> [0, 360)\n             s -> [0, 1]\n             v -> [0, 1]\n\n:param hsv: A tuple of three numeric values corresponding to the hue, saturation, and value.\n:return: YIQ representation of the input HSV value.\n:rtype: tuple", "id": "f9719:m19"}
{"signature": "def web_to_yiq(web):", "body": "return rgb_to_yiq(web_to_rgb(web))<EOL>", "docstring": "Convert a WEB color representation to a YIQ color representation.\n\nweb :: web -> [000000, FFFFFF]\n            | in static.web_colors\n\n:param web: The WEB string representation of a color.\n:return: YIQ representation of the input WEB value.\n:rtype: tuple", "id": "f9719:m10"}
{"signature": "def create_base(self):", "body": "self.base_dict = {}<EOL>_start_arrays = []<EOL>for tokens, word in self.chain_generator():<EOL><INDENT>self.base_dict.setdefault(tokens, []).append(word)<EOL>if tokens[<NUM_LIT:0>] == \"<STR_LIT>\":  <EOL><INDENT>_start_arrays.append(tokens)<EOL><DEDENT><DEDENT>self.start_arrays = tuple(<EOL>frozenset(self.get_corrected_arrays(_start_arrays))<EOL>)<EOL>", "docstring": "\u041c\u0435\u0442\u043e\u0434 \u0441\u043e\u0437\u0434\u0430\u0451\u0442 \u0431\u0430\u0437\u043e\u0432\u044b\u0439 \u0441\u043b\u043e\u0432\u0430\u0440\u044c, \u043d\u0430 \u043e\u0441\u043d\u043e\u0432\u0435 \u043c\u0430\u0441\u0441\u0438\u0432\u0430 \u0442\u043e\u043a\u0435\u043d\u043e\u0432.\n\u0412\u044b\u0437\u044b\u0432\u0430\u0435\u0442\u0441\u044f \u0438\u0437 \u043c\u0435\u0442\u043e\u0434\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f.", "id": "f9726:c1:m8"}
{"signature": "def get_optimal_variant(self, variants, start_words, **kwargs):", "body": "if not start_words:<EOL><INDENT>return (choice(variants), {})<EOL><DEDENT>_variants = []<EOL>_weights = []<EOL>for tok in frozenset(variants):<EOL><INDENT>if not self.token_is_correct(tok):<EOL><INDENT>continue<EOL><DEDENT>weight = variants.count(tok)<EOL>for word in start_words:<EOL><INDENT>for token in self.ONLY_WORDS.finditer(word.strip().lower()):<EOL><INDENT>if token.group() == tok:<EOL><INDENT>weight <<= <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>_variants.append(tok)<EOL>_weights.append(weight)<EOL><DEDENT>if not _variants:<EOL><INDENT>return (choice(variants), {})<EOL><DEDENT>return (choices(_variants, weights=_weights, k=<NUM_LIT:1>)[<NUM_LIT:0>], {})<EOL>", "docstring": "\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442 \u043e\u043f\u0442\u0438\u043c\u0430\u043b\u044c\u043d\u044b\u0439 \u0432\u0430\u0440\u0438\u0430\u043d\u0442, \u0438\u0437 \u0432\u044b\u0431\u043e\u0440\u043a\u0438.", "id": "f9726:c1:m4"}
{"signature": "def start_generation(self, *start_words, **kwargs):", "body": "out_text = \"<STR_LIT>\"<EOL>_need_capialize = True<EOL>for token in self._get_generate_tokens(*start_words, **kwargs):<EOL><INDENT>if token in \"<STR_LIT>\":<EOL><INDENT>_need_capialize = True<EOL>continue<EOL><DEDENT>if self.ONLY_WORDS.search(token):<EOL><INDENT>out_text += \"<STR_LIT:U+0020>\"<EOL><DEDENT>if _need_capialize:<EOL><INDENT>_need_capialize = False<EOL>token = token.title()<EOL><DEDENT>out_text += token<EOL><DEDENT>return out_text.strip()<EOL>", "docstring": "\u0413\u0435\u043d\u0435\u0440\u0438\u0440\u0443\u0435\u0442 \u043f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u0435.\n:start_words: \u041f\u043e\u043f\u044b\u0442\u0430\u0442\u044c\u0441\u044f \u043d\u0430\u0447\u0430\u0442\u044c \u043f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u0435 \u0441 \u044d\u0442\u0438\u0445 \u0441\u043b\u043e\u0432.", "id": "f9726:c1:m6"}
{"signature": "def _parse_from_text(self, text):", "body": "if not isinstance(text, str):<EOL><INDENT>raise MarkovTextExcept(\"<STR_LIT>\")<EOL><DEDENT>text = text.strip().lower()<EOL>need_start_token = True<EOL>token = \"<STR_LIT:$>\"  <EOL>for token in self.WORD_OR_MARKS.finditer(text):<EOL><INDENT>token = token.group()<EOL>if need_start_token:<EOL><INDENT>need_start_token = False<EOL>yield \"<STR_LIT>\"<EOL><DEDENT>yield token<EOL>if self.END_TOKENS.search(token):<EOL><INDENT>need_start_token = True<EOL>yield \"<STR_LIT:$>\"<EOL><DEDENT><DEDENT>if token != \"<STR_LIT:$>\":<EOL><INDENT>yield \"<STR_LIT:$>\"<EOL><DEDENT>", "docstring": "\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442 \u0433\u0435\u043d\u0435\u0440\u0430\u0442\u043e\u0440 \u0442\u043e\u043a\u0435\u043d\u043e\u0432, \u0438\u0437 \u0442\u0435\u043a\u0441\u0442\u0430.", "id": "f9726:c1:m16"}
{"signature": "def load_dump(self, name=None):", "body": "name = name or \"<STR_LIT>\"<EOL>dump_file = os_join(<EOL>self.temp_folder,<EOL>\"<STR_LIT>\".format(name)<EOL>)<EOL>if not isfile(dump_file):<EOL><INDENT>raise MarkovTextExcept(\"<STR_LIT>\".format(dump_file))<EOL><DEDENT>with open(dump_file, \"<STR_LIT:rb>\") as js_file:<EOL><INDENT>self.tokens_array = tuple(json.load(js_file))<EOL><DEDENT>self.create_base()<EOL>", "docstring": "\u0417\u0430\u0433\u0440\u0443\u0436\u0430\u0435\u0442 \u0431\u0430\u0437\u0443 \u0441 \u0436\u0451\u0441\u0442\u043a\u043e\u0433\u043e \u0434\u0438\u0441\u043a\u0430.\n\u0422\u0435\u043a\u0443\u0449\u0430\u044f \u0431\u0430\u0437\u0430 \u0437\u0430\u043c\u0435\u043d\u044f\u0435\u0442\u0441\u044f.\n:name: \u0418\u043c\u044f \u0444\u0430\u0439\u043b\u0430, \u0431\u0435\u0437 \u0440\u0430\u0441\u0448\u0438\u0440\u0435\u043d\u0438\u044f.", "id": "f9726:c1:m12"}
{"signature": "def _parse_from_file(self, file_path):", "body": "file_path = abspath(file_path)<EOL>if not isfile(file_path):<EOL><INDENT>raise MarkovTextExcept(\"<STR_LIT>\")<EOL><DEDENT>with open(file_path, \"<STR_LIT:rb>\") as txt_file:<EOL><INDENT>for line in txt_file:<EOL><INDENT>text = line.decode(\"<STR_LIT:utf-8>\", \"<STR_LIT:ignore>\").strip()<EOL>if not text:<EOL><INDENT>continue<EOL><DEDENT>yield from self._parse_from_text(text)<EOL><DEDENT><DEDENT>", "docstring": "\u0441\u043c. \u043e\u043f\u0438\u0441\u0430\u043d\u0438\u0435 _parse_from_text.\n\u0422\u043e\u043b\u044c\u043a\u043e \u043d\u0430 \u0432\u0445\u043e\u0434 \u043f\u043e\u0434\u0430\u0451\u0442\u0441\u044f \u043d\u0435 \u0442\u0435\u043a\u0441\u0442, \u0430 \u043f\u0443\u0442\u044c \u043a \u0444\u0430\u0439\u043b\u0443.", "id": "f9726:c1:m17"}
{"signature": "def tokenize(sentence):", "body": "tokens = []<EOL>class Vars:<EOL><INDENT>start_pos = -<NUM_LIT:1><EOL>last_type = '<STR_LIT:o>'<EOL><DEDENT>def update(c, i):<EOL><INDENT>if c.isalpha() or c in '<STR_LIT>':<EOL><INDENT>t = '<STR_LIT:a>'<EOL><DEDENT>elif c.isdigit() or c == '<STR_LIT:#>':<EOL><INDENT>t = '<STR_LIT:n>'<EOL><DEDENT>elif c.isspace():<EOL><INDENT>t = '<STR_LIT:s>'<EOL><DEDENT>else:<EOL><INDENT>t = '<STR_LIT:o>'<EOL><DEDENT>if t != Vars.last_type or t == '<STR_LIT:o>':<EOL><INDENT>if Vars.start_pos >= <NUM_LIT:0>:<EOL><INDENT>token = sentence[Vars.start_pos:i].lower()<EOL>if token not in '<STR_LIT>':<EOL><INDENT>tokens.append(token)<EOL><DEDENT><DEDENT>Vars.start_pos = -<NUM_LIT:1> if t == '<STR_LIT:s>' else i<EOL><DEDENT>Vars.last_type = t<EOL><DEDENT>for i, char in enumerate(sentence):<EOL><INDENT>update(char, i)<EOL><DEDENT>update('<STR_LIT:U+0020>', len(sentence))<EOL>return tokens<EOL>", "docstring": "Converts a single sentence into a list of individual significant units\nArgs:\n    sentence (str): Input string ie. 'This is a sentence.'\nReturns:\n    list<str>: List of tokens ie. ['this', 'is', 'a', 'sentence']", "id": "f9728:m1"}
{"signature": "def expand_parentheses(sent):", "body": "return SentenceTreeParser(sent).expand_parentheses()<EOL>", "docstring": "['1', '(', '2', '|', '3, ')'] -> [['1', '2'], ['1', '3']]\nFor example:\n\nWill it (rain|pour) (today|tomorrow|)?\n\n---->\n\nWill it rain today?\nWill it rain tomorrow?\nWill it rain?\nWill it pour today?\nWill it pour tomorrow?\nWill it pour?\n\nArgs:\n    sent (list<str>): List of tokens in sentence\nReturns:\n    list<list<str>>: Multiple possible sentences from original", "id": "f9728:m2"}
{"signature": "def resolve_conflicts(inputs, outputs):", "body": "data = {}<EOL>for inp, out in zip(inputs, outputs):<EOL><INDENT>tup = tuple(inp)<EOL>if tup in data:<EOL><INDENT>data[tup].append(out)<EOL><DEDENT>else:<EOL><INDENT>data[tup] = [out]<EOL><DEDENT><DEDENT>inputs, outputs = [], []<EOL>for inp, outs in data.items():<EOL><INDENT>inputs.append(list(inp))<EOL>combined = [<NUM_LIT:0>] * len(outs[<NUM_LIT:0>])<EOL>for i in range(len(combined)):<EOL><INDENT>combined[i] = max(j[i] for j in outs)<EOL><DEDENT>outputs.append(combined)<EOL><DEDENT>return inputs, outputs<EOL>", "docstring": "Checks for duplicate inputs and if there are any,\nremove one and set the output to the max of the two outputs\nArgs:\n    inputs (list<list<float>>): Array of input vectors\n    outputs (list<list<float>>): Array of output vectors\nReturns:\n    tuple<inputs, outputs>: The modified inputs and outputs", "id": "f9728:m4"}
{"signature": "def _expand_tree(self, tree):", "body": "return tree.expand()<EOL>", "docstring": "Expand a list of sub sentences to all combinated sentences.\n['1', ['2', '3']] -> [['1', '2'], ['1', '3']]", "id": "f9733:c4:m3"}
{"signature": "def expand(self):", "body": "old_expanded = [[]]<EOL>for sub in self._tree:<EOL><INDENT>sub_expanded = sub.expand()<EOL>new_expanded = []<EOL>while len(old_expanded) > <NUM_LIT:0>:<EOL><INDENT>sentence = old_expanded.pop()<EOL>for new in sub_expanded:<EOL><INDENT>new_expanded.append(sentence + new)<EOL><DEDENT><DEDENT>old_expanded = new_expanded<EOL><DEDENT>return old_expanded<EOL>", "docstring": "Creates a combination of all sub-sentences.\n\nReturns:\n    List<List<str>>: A list with all subsentence expansions combined in\n                        every possible way", "id": "f9733:c2:m0"}
{"signature": "def _parse_expr(self):", "body": "<EOL>sentence_list = []<EOL>cur_sentence = []<EOL>sentence_list.append(Sentence(cur_sentence))<EOL>while self._current_position < len(self.tokens):<EOL><INDENT>cur = self.tokens[self._current_position]<EOL>self._current_position += <NUM_LIT:1><EOL>if cur == '<STR_LIT:(>':<EOL><INDENT>subexpr = self._parse_expr()<EOL>normal_brackets = False<EOL>if len(subexpr.tree()) == <NUM_LIT:1>:<EOL><INDENT>normal_brackets = True<EOL>cur_sentence.append(Word('<STR_LIT:(>'))<EOL><DEDENT>cur_sentence.append(subexpr)<EOL>if normal_brackets:<EOL><INDENT>cur_sentence.append(Word('<STR_LIT:)>'))<EOL><DEDENT><DEDENT>elif cur == '<STR_LIT:|>':<EOL><INDENT>cur_sentence = []<EOL>sentence_list.append(Sentence(cur_sentence))<EOL><DEDENT>elif cur == '<STR_LIT:)>':<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>cur_sentence.append(Word(cur))<EOL><DEDENT><DEDENT>return Options(sentence_list)<EOL>", "docstring": "Generate sentence token trees from the current position to\nthe next closing parentheses / end of the list and return it\n['1', '(', '2', '|', '3, ')'] -> ['1', [['2'], ['3']]]\n['2', '|', '3'] -> [['2'], ['3']]", "id": "f9733:c4:m2"}
{"signature": "def expand(self):", "body": "return [[self._tree]]<EOL>", "docstring": "Creates one sentence that contains exactly that word.\n\nReturns:\n    List<List<str>>: A list with the given string as sentence\n                        (= token/string list)", "id": "f9733:c1:m0"}
{"signature": "def expand(self):", "body": "return [[]]<EOL>", "docstring": "Expanded version of the fragment. In this case an empty sentence.\n\nReturns:\n    List<List<str>>: A list with an empty sentence (= token/string list)", "id": "f9733:c0:m2"}
{"signature": "@_save_args<EOL><INDENT>def add_entity(self, name, lines, reload_cache=False):<DEDENT>", "body": "Entity.verify_name(name)<EOL>self.entities.add(Entity.wrap_name(name), lines, reload_cache)<EOL>self.padaos.add_entity(name, lines)<EOL>self.must_train = True<EOL>", "docstring": "Adds an entity that matches the given lines.\n\nExample:\n    self.add_intent('weather', ['will it rain on {weekday}?'])\n    self.add_entity('{weekday}', ['monday', 'tuesday', 'wednesday'])  # ...\n\nArgs:\n    name (str): The name of the entity\n    lines (list<str>): Lines of example extracted entities\n    reload_cache (bool): Whether to refresh all of cache", "id": "f9734:c0:m3"}
{"signature": "def train(self, debug=True, force=False, single_thread=False, timeout=<NUM_LIT:20>):", "body": "if not self.must_train and not force:<EOL><INDENT>return<EOL><DEDENT>self.padaos.compile()<EOL>self.train_thread = Thread(target=self._train, kwargs=dict(<EOL>debug=debug,<EOL>single_thread=single_thread,<EOL>timeout=timeout<EOL>), daemon=True)<EOL>self.train_thread.start()<EOL>self.train_thread.join(timeout)<EOL>self.must_train = False<EOL>return not self.train_thread.is_alive()<EOL>", "docstring": "Trains all the loaded intents that need to be updated\nIf a cache file exists with the same hash as the intent file,\nthe intent will not be trained and just loaded from file\n\nArgs:\n    debug (bool): Whether to print a message to stdout each time a new intent is trained\n    force (bool): Whether to force training if already finished\n    single_thread (bool): Whether to force running in a single thread\n    timeout (float): Seconds before cancelling training\nReturns:\n    bool: True if training succeeded without timeout", "id": "f9734:c0:m10"}
{"signature": "def calc_intent(self, query):", "body": "matches = self.calc_intents(query)<EOL>if len(matches) == <NUM_LIT:0>:<EOL><INDENT>return MatchData('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>best_match = max(matches, key=lambda x: x.conf)<EOL>best_matches = (match for match in matches if match.conf == best_match.conf)<EOL>return min(best_matches, key=lambda x: sum(map(len, x.matches.values())))<EOL>", "docstring": "Tests all the intents against the query and returns\nmatch data of the best intent\n\nArgs:\n    query (str): Input sentence to test against intents\nReturns:\n    MatchData: Best intent match", "id": "f9734:c0:m13"}
{"signature": "@_save_args<EOL><INDENT>def load_file(self, *args, **kwargs):<DEDENT>", "body": "self.load_intent(*args, **kwargs)<EOL>", "docstring": "Legacy. Use load_intent instead", "id": "f9734:c0:m5"}
{"signature": "@_save_args<EOL><INDENT>def add_intent(self, name, lines, reload_cache=False):<DEDENT>", "body": "self.intents.add(name, lines, reload_cache)<EOL>self.padaos.add_intent(name, lines)<EOL>self.must_train = True<EOL>", "docstring": "Creates a new intent, optionally checking the cache first\n\nArgs:\n    name (str): The associated name of the intent\n    lines (list<str>): All the sentences that should activate the intent\n    reload_cache: Whether to ignore cached intent if exists", "id": "f9734:c0:m2"}
{"signature": "def calc_intents(self, query):", "body": "if self.must_train:<EOL><INDENT>self.train()<EOL><DEDENT>intents = {} if self.train_thread and self.train_thread.is_alive() else {<EOL>i.name: i for i in self.intents.calc_intents(query, self.entities)<EOL>}<EOL>sent = tokenize(query)<EOL>for perfect_match in self.padaos.calc_intents(query):<EOL><INDENT>name = perfect_match['<STR_LIT:name>']<EOL>intents[name] = MatchData(name, sent, matches=perfect_match['<STR_LIT>'], conf=<NUM_LIT:1.0>)<EOL><DEDENT>return list(intents.values())<EOL>", "docstring": "Tests all the intents against the query and returns\ndata on how well each one matched against the query\n\nArgs:\n    query (str): Input sentence to test against intents\nReturns:\n    list<MatchData>: List of intent matches\nSee calc_intent() for a description of the returned MatchData", "id": "f9734:c0:m12"}
{"signature": "@_save_args<EOL><INDENT>def load_intent(self, name, file_name, reload_cache=False):<DEDENT>", "body": "self.intents.load(name, file_name, reload_cache)<EOL>with open(file_name) as f:<EOL><INDENT>self.padaos.add_intent(name, f.read().split('<STR_LIT:\\n>'))<EOL><DEDENT>self.must_train = True<EOL>", "docstring": "Loads an intent, optionally checking the cache first\n\nArgs:\n    name (str): The associated name of the intent\n    file_name (str): The location of the intent file\n    reload_cache (bool): Whether to refresh all of cache", "id": "f9734:c0:m6"}
{"signature": "@_save_args<EOL><INDENT>def load_entity(self, name, file_name, reload_cache=False):<DEDENT>", "body": "Entity.verify_name(name)<EOL>self.entities.load(Entity.wrap_name(name), file_name, reload_cache)<EOL>with open(file_name) as f:<EOL><INDENT>self.padaos.add_entity(name, f.read().split('<STR_LIT:\\n>'))<EOL><DEDENT>self.must_train = True<EOL>", "docstring": "Loads an entity, optionally checking the cache first\n\nArgs:\n    name (str): The associated name of the entity\n    file_name (str): The location of the entity file\n    reload_cache (bool): Whether to refresh all of cache", "id": "f9734:c0:m4"}
{"signature": "def _train_and_save(obj, cache, data, print_updates):", "body": "obj.train(data)<EOL>if print_updates:<EOL><INDENT>print('<STR_LIT>' + obj.name + '<STR_LIT:.>')<EOL><DEDENT>obj.save(cache)<EOL>", "docstring": "Internal pickleable function used to train objects in another process", "id": "f9740:m0"}
{"signature": "def randomseed(func):", "body": "def wrapper(*args, **kwargs):<EOL><INDENT>np.random.seed(<NUM_LIT>)<EOL>return func(*args, **kwargs)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Sets the seed of numpy's random number generator", "id": "f9757:m0"}
{"signature": "def generate_lowrank_matrix(n=<NUM_LIT:10>, m=<NUM_LIT:20>, k=<NUM_LIT:3>, eta=<NUM_LIT>, seed=<NUM_LIT>):", "body": "print(\"<STR_LIT>\")<EOL>global Xtrue, Xobs<EOL>np.random.seed(seed)<EOL>Xtrue = np.sin(np.linspace(<NUM_LIT:0>, <NUM_LIT:2> * np.pi, n)).reshape(-<NUM_LIT:1>, <NUM_LIT:1>).dot(<EOL>np.cos(np.linspace(<NUM_LIT:0>, <NUM_LIT:2> * np.pi, m)).reshape(<NUM_LIT:1>, -<NUM_LIT:1>))<EOL>Xobs = Xtrue + eta * np.random.randn(n, m)<EOL>return Xobs, Xtrue<EOL>", "docstring": "Generate an n-by-m noisy low-rank matrix", "id": "f9760:m0"}
{"signature": "def gradient_optimizer(coro):", "body": "class GradientOptimizer(Optimizer):<EOL><INDENT>@wraps(coro)<EOL>def __init__(self, *args, **kwargs):<EOL><INDENT>self.algorithm = coro(*args, **kwargs)<EOL>self.algorithm.send(None)<EOL>self.operators = []<EOL><DEDENT>def set_transform(self, func):<EOL><INDENT>self.transform = compose(destruct, func, self.restruct)<EOL><DEDENT>def minimize(self, f_df, x0, display=sys.stdout, maxiter=<NUM_LIT>):<EOL><INDENT>self.display = display<EOL>self.theta = x0<EOL>xk = self.algorithm.send(destruct(x0).copy())<EOL>store = defaultdict(list)<EOL>runtimes = []<EOL>if len(self.operators) == <NUM_LIT:0>:<EOL><INDENT>self.operators = [proxops.identity()]<EOL><DEDENT>obj, grad = wrap(f_df, x0)<EOL>transform = compose(destruct, *reversed(self.operators), self.restruct)<EOL>self.optional_print(tp.header(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']))<EOL>try:<EOL><INDENT>for k in count():<EOL><INDENT>tstart = perf_counter()<EOL>f = obj(xk)<EOL>df = grad(xk)<EOL>xk = transform(self.algorithm.send(df))<EOL>runtimes.append(perf_counter() - tstart)<EOL>store['<STR_LIT:f>'].append(f)<EOL>self.optional_print(tp.row([k,<EOL>f,<EOL>np.linalg.norm(destruct(df)),<EOL>tp.humantime(runtimes[-<NUM_LIT:1>])]))<EOL>if k >= maxiter:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>except KeyboardInterrupt:<EOL><INDENT>pass<EOL><DEDENT>self.optional_print(tp.bottom(<NUM_LIT:4>))<EOL>self.optional_print(u'<STR_LIT>'.format(store['<STR_LIT:f>'][-<NUM_LIT:1>]))<EOL>self.optional_print(u'<STR_LIT>'.format(tp.humantime(sum(runtimes))))<EOL>self.optional_print(u'<STR_LIT>'.format(<EOL>tp.humantime(np.mean(runtimes)),<EOL>tp.humantime(np.std(runtimes)),<EOL>))<EOL>return OptimizeResult({<EOL>'<STR_LIT:x>': self.restruct(xk),<EOL>'<STR_LIT:f>': f,<EOL>'<STR_LIT>': self.restruct(df),<EOL>'<STR_LIT:k>': k,<EOL>'<STR_LIT>': np.array(store['<STR_LIT:f>']),<EOL>})<EOL><DEDENT><DEDENT>return GradientOptimizer<EOL>", "docstring": "Turns a coroutine into a gradient based optimizer.", "id": "f9764:m0"}
{"signature": "@gradient_optimizer<EOL>def adam(lr=<NUM_LIT>, beta=(<NUM_LIT>, <NUM_LIT>), epsilon=<NUM_LIT>):", "body": "xk = yield<EOL>mk = np.zeros_like(xk)<EOL>vk = np.zeros_like(xk)<EOL>b1, b2 = beta<EOL>for k in count(start=<NUM_LIT:1>):<EOL><INDENT>grad = yield xk<EOL>mk *= b1<EOL>mk += (<NUM_LIT:1.> - b1) * grad<EOL>vk *= b2<EOL>vk += (<NUM_LIT:1.> - b2) * (grad ** <NUM_LIT:2>)<EOL>momentum_norm = mk / (<NUM_LIT:1> - b1 ** k)<EOL>velocity_norm = np.sqrt(vk / (<NUM_LIT:1> - b2 ** k))<EOL>xk -= lr * momentum_norm / (epsilon + velocity_norm)<EOL><DEDENT>", "docstring": "ADAM\n\nParameters\n----------\nlr : float, optional\n    Learnin rate (Default: 1e-3)\n\nbeta : (float, float)\n    (Default: (0.9, 0.999))\n\nepsilon : float\n    (Default: 1e-8)", "id": "f9765:m5"}
{"signature": "@gradient_optimizer<EOL>def rmsprop(lr=<NUM_LIT>, damping=<NUM_LIT>, decay=<NUM_LIT>):", "body": "xk = yield<EOL>rms = np.zeros_like(xk)<EOL>for k in count():<EOL><INDENT>grad = yield xk<EOL>rms *= decay<EOL>rms += (<NUM_LIT:1> - decay) * grad**<NUM_LIT:2><EOL>xk -= lr * grad / (damping + np.sqrt(rms))<EOL><DEDENT>", "docstring": "RMSProp\n\nParameters\n----------\nlr : float, optional\n    Learning rate (Default: 1e-3)\n\ndamping : float, optional\n    Damping term (Default: 1e-12)\n\ndecay : float, optional\n    Decay of the learning rate (Default: 0)", "id": "f9765:m2"}
{"signature": "@gradient_optimizer<EOL>def sag(nterms=<NUM_LIT:10>, lr=<NUM_LIT>):", "body": "xk = yield<EOL>gradients = deque([], nterms)<EOL>for k in count():<EOL><INDENT>grad = yield xk<EOL>gradients.append(grad)<EOL>xk -= lr * np.mean(gradients, axis=<NUM_LIT:0>)<EOL><DEDENT>", "docstring": "Stochastic Average Gradient (SAG)\n\nParameters\n----------\nnterms : int, optional\n    Number of gradient evaluations to use in the average (Default: 10)\n\nlr : float, optional\n    (Default: 1e-3)", "id": "f9765:m3"}
{"signature": "@proxify<EOL>def squared_error(x, rho, x_obs):", "body": "return (x + x_obs / rho) / (<NUM_LIT:1.> + <NUM_LIT:1.> / rho)<EOL>", "docstring": "Proximal operator for squared error (l2 or Fro. norm)\n\nsquared_error(x_obs)\n\nParameters\n----------\nx_obs : array_like\n    Observed array or matrix that you want to stay close to", "id": "f9766:m3"}
{"signature": "@proxify<EOL>def nonneg(x, rho):", "body": "return np.maximum(x, <NUM_LIT:0>)<EOL>", "docstring": "Projection onto the non-negative orthant", "id": "f9766:m6"}
{"signature": "@proxify<EOL>def lbfgs(x, rho, f_df, maxiter=<NUM_LIT:20>):", "body": "def f_df_augmented(theta):<EOL><INDENT>f, df = f_df(theta)<EOL>obj = f + (rho / <NUM_LIT>) * np.linalg.norm(theta - x) ** <NUM_LIT:2><EOL>grad = df + rho * (theta - x)<EOL>return obj, grad<EOL><DEDENT>res = scipy_minimize(f_df_augmented, x, jac=True, method='<STR_LIT>',<EOL>options={'<STR_LIT>': maxiter, '<STR_LIT>': False})<EOL>return res.x<EOL>", "docstring": "Minimize the proximal operator of a given objective using L-BFGS\n\nParameters\n----------\nf_df : function\n    Returns the objective and gradient of the function to minimize\n\nmaxiter : int\n    Maximum number of L-BFGS iterations", "id": "f9766:m4"}
{"signature": "def __init__(self, A, b):", "body": "self.P = A.T.dot(A)<EOL>self.q = A.T.dot(b)<EOL>self.n = self.q.size<EOL>", "docstring": "Proximal operator for solving a linear least squares system, Ax = b\n\nParameters\n----------\nA : array_like\n    Sensing matrix (Ax = b)\n\nb : array_like\n    Responses (Ax = b)", "id": "f9766:c1:m0"}
{"signature": "@proxify<EOL>def smooth(x, rho, penalty, axis=<NUM_LIT:0>, newshape=None):", "body": "orig_shape = x.shape<EOL>if newshape is not None:<EOL><INDENT>x = x.reshape(newshape)<EOL><DEDENT>n = x.shape[axis]<EOL>lap_op = spdiags([(<NUM_LIT:2> + rho / penalty) * np.ones(n),<EOL>-<NUM_LIT:1> * np.ones(n), -<NUM_LIT:1> * np.ones(n)],<EOL>[<NUM_LIT:0>, -<NUM_LIT:1>, <NUM_LIT:1>], n, n, format='<STR_LIT>')<EOL>A = penalty * lap_op<EOL>b = rho * np.rollaxis(x, axis, <NUM_LIT:0>)<EOL>return np.rollaxis(spsolve(A, b), axis, <NUM_LIT:0>).reshape(orig_shape)<EOL>", "docstring": "Applies a smoothing operator along one dimension\n\ncurrently only accepts a matrix as input\n\nParameters\n----------\npenalty : float\n\naxis : int, optional\n    Axis along which to apply the smoothing (Default: 0)\n\nnewshape : tuple, optional\n    Desired shape of the parameters to apply the nuclear norm to. The given\n    parameters are reshaped to an array with this shape, or not reshaped if\n    the value of newshape is None. (Default: None)", "id": "f9766:m7"}
{"signature": "@proxify<EOL>def columns(x, rho, proxop):", "body": "xnext = np.zeros_like(x)<EOL>for ix in range(x.shape[<NUM_LIT:1>]):<EOL><INDENT>xnext[:, ix] = proxop(x[:, ix], rho)<EOL><DEDENT>return xnext<EOL>", "docstring": "Applies a proximal operator to the columns of a matrix", "id": "f9766:m11"}
{"signature": "@proxify<EOL>def simplex(x, rho):", "body": "<EOL>u = np.flipud(np.sort(x.ravel()))<EOL>lambdas = (<NUM_LIT:1> - np.cumsum(u)) / (<NUM_LIT:1.> + np.arange(u.size))<EOL>ix = np.where(u + lambdas > <NUM_LIT:0>)[<NUM_LIT:0>].max()<EOL>return np.maximum(x + lambdas[ix], <NUM_LIT:0>)<EOL>", "docstring": "Projection onto the probability simplex\n\nhttp://arxiv.org/pdf/1309.1541v1.pdf", "id": "f9766:m10"}
{"signature": "@objective(xstar=(-<NUM_LIT>, -<NUM_LIT>))<EOL>def mccormick(theta):", "body": "x, y = theta<EOL>obj = np.sin(x + y) + (x - y)**<NUM_LIT:2> - <NUM_LIT> * x + <NUM_LIT> * y + <NUM_LIT:1><EOL>grad = np.array([np.cos(x + y) + <NUM_LIT:2> * (x - y) - <NUM_LIT>,<EOL>np.cos(x + y) - <NUM_LIT:2> * (x - y) + <NUM_LIT>])<EOL>return obj, grad<EOL>", "docstring": "McCormick function", "id": "f9768:m7"}
{"signature": "@objective(xstar=(<NUM_LIT:0>, <NUM_LIT:0>))<EOL>def camel(theta):", "body": "x, y = theta<EOL>obj = <NUM_LIT:2> * x ** <NUM_LIT:2> - <NUM_LIT> * x ** <NUM_LIT:4> + x ** <NUM_LIT:6> / <NUM_LIT:6> + x * y + y ** <NUM_LIT:2><EOL>grad = np.array([<EOL><NUM_LIT:4> * x - <NUM_LIT> * x ** <NUM_LIT:3> + x ** <NUM_LIT:5> + y,<EOL>x + <NUM_LIT:2> * y<EOL>])<EOL>return obj, grad<EOL>", "docstring": "Three-hump camel function", "id": "f9768:m8"}
{"signature": "@objective(xstar=(<NUM_LIT>, <NUM_LIT>))<EOL>def michalewicz(theta):", "body": "x, y = theta<EOL>obj = - np.sin(x) * np.sin(x ** <NUM_LIT:2> / np.pi) ** <NUM_LIT:20> -np.sin(y) * np.sin(<NUM_LIT:2> * y ** <NUM_LIT:2> / np.pi) ** <NUM_LIT:20><EOL>grad = np.array([<EOL>- np.cos(x) * np.sin(x ** <NUM_LIT:2> / np.pi) ** <NUM_LIT:20> - (<NUM_LIT> / np.pi) * x *<EOL>np.sin(x) * np.sin(x ** <NUM_LIT:2> / np.pi) ** <NUM_LIT> * np.cos(x ** <NUM_LIT:2> / np.pi),<EOL>- np.cos(y) * np.sin(<NUM_LIT:2> * y ** <NUM_LIT:2> / np.pi) ** <NUM_LIT:20> - (<NUM_LIT> / np.pi) * y * np.sin(y) *<EOL>np.sin(<NUM_LIT:2> * y ** <NUM_LIT:2> / np.pi) ** <NUM_LIT> * np.cos(<NUM_LIT:2> * y ** <NUM_LIT:2> / np.pi),<EOL>])<EOL>return obj, grad<EOL>", "docstring": "Michalewicz function", "id": "f9768:m9"}
{"signature": "def objective(param_scales=(<NUM_LIT:1>, <NUM_LIT:1>), xstar=None, seed=None):", "body": "ndim = len(param_scales)<EOL>def decorator(func):<EOL><INDENT>@wraps(func)<EOL>def wrapper(theta):<EOL><INDENT>return func(theta)<EOL><DEDENT>def param_init():<EOL><INDENT>np.random.seed(seed)<EOL>return np.random.randn(ndim,) * np.array(param_scales)<EOL><DEDENT>wrapper.ndim = ndim<EOL>wrapper.param_init = param_init<EOL>wrapper.xstar = xstar<EOL>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "Gives objective functions a number of dimensions and parameter range\n\n    Parameters\n    ----------\n    param_scales : (int, int)\n        Scale (std. dev.) for choosing each parameter\n\n    xstar : array_like\n        Optimal parameters", "id": "f9768:m0"}
{"signature": "def wrap(f_df, xref, size=<NUM_LIT:1>):", "body": "memoized_f_df = lrucache(lambda x: f_df(restruct(x, xref)), size)<EOL>objective = compose(first, memoized_f_df)<EOL>gradient = compose(destruct, second, memoized_f_df)<EOL>return objective, gradient<EOL>", "docstring": "Memoizes an objective + gradient function, and splits it into\ntwo functions that return just the objective and gradient, respectively.\n\nParameters\n----------\nf_df : function\n    Must be unary (takes a single argument)\n\nxref : list, dict, or array_like\n    The form of the parameters\n\nsize : int, optional\n    Size of the cache (Default=1)", "id": "f9770:m0"}
{"signature": "def check_grad(f_df, xref, stepsize=<NUM_LIT>, tol=<NUM_LIT>, width=<NUM_LIT:15>, style='<STR_LIT>', out=sys.stdout):", "body": "CORRECT = u'<STR_LIT>'<EOL>INCORRECT = u'<STR_LIT>'<EOL>obj, grad = wrap(f_df, xref, size=<NUM_LIT:0>)<EOL>x0 = destruct(xref)<EOL>df = grad(x0)<EOL>out.write(tp.header([\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"], width=width, style=style) + \"<STR_LIT:\\n>\")<EOL>out.flush()<EOL>def parse_error(number):<EOL><INDENT>failure = \"<STR_LIT>\"<EOL>passing = \"<STR_LIT>\"<EOL>warning = \"<STR_LIT>\"<EOL>end = \"<STR_LIT>\"<EOL>base = \"<STR_LIT>\"<EOL>if error < <NUM_LIT:0.1> * tol:<EOL><INDENT>return base.format(passing, error, end)<EOL><DEDENT>elif error < tol:<EOL><INDENT>return base.format(warning, error, end)<EOL><DEDENT>else:<EOL><INDENT>return base.format(failure, error, end)<EOL><DEDENT><DEDENT>num_errors = <NUM_LIT:0><EOL>for j in range(x0.size):<EOL><INDENT>dx = np.zeros(x0.size)<EOL>dx[j] = stepsize<EOL>df_approx = (obj(x0 + dx) - obj(x0 - dx)) / (<NUM_LIT:2> * stepsize)<EOL>df_analytic = df[j]<EOL>abs_error = np.linalg.norm(df_approx - df_analytic)<EOL>error = abs_error if np.allclose(abs_error, <NUM_LIT:0>) else abs_error /(np.linalg.norm(df_analytic) + np.linalg.norm(df_approx))<EOL>num_errors += error >= tol<EOL>errstr = CORRECT if error < tol else INCORRECT<EOL>out.write(tp.row([df_approx, df_analytic, parse_error(error) + '<STR_LIT:U+0020>' + errstr],<EOL>width=width, style=style) + \"<STR_LIT:\\n>\")<EOL>out.flush()<EOL><DEDENT>out.write(tp.bottom(<NUM_LIT:3>, width=width, style=style) + \"<STR_LIT:\\n>\")<EOL>return num_errors<EOL>", "docstring": "Compares the numerical gradient to the analytic gradient\n\nParameters\n----------\nf_df : function\n    The analytic objective and gradient function to check\n\nx0 : array_like\n    Parameter values to check the gradient at\n\nstepsize : float, optional\n    Stepsize for the numerical gradient. Too big and this will poorly estimate the gradient.\n    Too small and you will run into precision issues (default: 1e-6)\n\ntol : float, optional\n    Tolerance to use when coloring correct/incorrect gradients (default: 1e-5)\n\nwidth : int, optional\n    Width of the table columns (default: 15)\n\nstyle : string, optional\n    Style of the printed table, see tableprint for a list of styles (default: 'round')", "id": "f9770:m3"}
{"signature": "def create_db():", "body": "try:<EOL><INDENT>return psycopg2.connect(**db_state['<STR_LIT>'])<EOL><DEDENT>except psycopg2.OperationalError as exc:<EOL><INDENT>nosuch_db = '<STR_LIT>' % db_state['<STR_LIT>']['<STR_LIT>']<EOL>if nosuch_db in str(exc):<EOL><INDENT>try:<EOL><INDENT>master = psycopg2.connect(database='<STR_LIT>')<EOL>master.rollback()<EOL>master.autocommit = True<EOL>cursor = master.cursor()<EOL>cursor.execute('<STR_LIT>' % db_state['<STR_LIT>']['<STR_LIT>'])<EOL>cursor.close()<EOL>master.close()<EOL><DEDENT>except psycopg2.Error as exc:<EOL><INDENT>message = ('<STR_LIT>'<EOL>+ db_state['<STR_LIT>']['<STR_LIT>']<EOL>+ '<STR_LIT>' % exc)<EOL>raise RuntimeError(message)<EOL><DEDENT>else:<EOL><INDENT>conn = psycopg2.connect(**db_state['<STR_LIT>'])<EOL>db_state['<STR_LIT>'] = True<EOL>return conn<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "connect to test db", "id": "f9777:m1"}
{"signature": "def __init__(self, connection, table, xform):", "body": "super(RenameReplace, self).__init__(connection, table)<EOL>self.xform = xform<EOL>", "docstring": "xform must be a function which translates old\nnames to new ones, used on tables & pk constraints", "id": "f9780:c1:m0"}
{"signature": "def _is_url_arg(p):", "body": "return p.startswith('<STR_LIT:[>')<EOL>", "docstring": "Is an argument of the URL.\n\n>>> _is_url_arg('[idAction]')\nTrue\n>>> _is_url_arg('actions')\nFalse", "id": "f9786:m0"}
{"signature": "def _is_api_definition(line):", "body": "return line.split('<STR_LIT:U+0020>', <NUM_LIT:1>)[<NUM_LIT:0>] in HTTP_METHODS<EOL>", "docstring": "Is a definition of a Trello endpoint.\n\n>>> _is_api_definition('GET /1/actions/[idAction]')\nTrue\n>>> _is_api_definition('action')\nFalse", "id": "f9786:m1"}
{"signature": "def main():", "body": "ep = requests.get(TRELLO_API_DOC).content<EOL>root = html.fromstring(ep)<EOL>links = root.xpath('<STR_LIT>')<EOL>pages = [requests.get(TRELLO_API_DOC + u)<EOL>for u in links if u.endswith('<STR_LIT>')]<EOL>endpoints = []<EOL>for page in pages:<EOL><INDENT>root = html.fromstring(page.content)<EOL>sections = root.xpath('<STR_LIT>')<EOL>for sec in sections:<EOL><INDENT>ep_html = etree.tostring(sec).decode('<STR_LIT:utf-8>')<EOL>ep_text = html2text(ep_html).splitlines()<EOL>match = EP_DESC_REGEX.match(ep_text[<NUM_LIT:0>])<EOL>if not match:<EOL><INDENT>continue<EOL><DEDENT>ep_method, ep_url = match.groups()<EOL>ep_text[<NUM_LIT:0>] = '<STR_LIT:U+0020>'.join([ep_method, ep_url])<EOL>ep_doc = b64encode(gzip.compress('<STR_LIT:\\n>'.join(ep_text).encode('<STR_LIT:utf-8>')))<EOL>endpoints.append((ep_method, ep_url, ep_doc))<EOL><DEDENT><DEDENT>print(yaml.dump(create_tree(endpoints)))<EOL>", "docstring": "Prints the complete YAML.", "id": "f9786:m4"}
{"signature": "def _camelcase_to_underscore(url):", "body": "def upper2underscore(text):<EOL><INDENT>for char in text:<EOL><INDENT>if char.islower():<EOL><INDENT>yield char<EOL><DEDENT>else:<EOL><INDENT>yield '<STR_LIT:_>'<EOL>if char.isalpha():<EOL><INDENT>yield char.lower()<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return '<STR_LIT>'.join(upper2underscore(url))<EOL>", "docstring": "Translate camelCase into underscore format.\n\n>>> _camelcase_to_underscore('minutesBetweenSummaries')\n'minutes_between_summaries'", "id": "f9786:m2"}
{"signature": "@property<EOL><INDENT>def _url(self):<DEDENT>", "body": "if self._api_arg:<EOL><INDENT>mypart = str(self._api_arg)<EOL><DEDENT>else:<EOL><INDENT>mypart = self._name<EOL><DEDENT>if self._parent:<EOL><INDENT>return '<STR_LIT:/>'.join(filter(None, [self._parent._url, mypart]))<EOL><DEDENT>else:<EOL><INDENT>return mypart<EOL><DEDENT>", "docstring": "Resolve the URL to this point.\n\n>>> trello = TrelloAPIV1('APIKEY')\n>>> trello.batch._url\n'1/batch'\n>>> trello.boards(board_id='BOARD_ID')._url\n'1/boards/BOARD_ID'\n>>> trello.boards(board_id='BOARD_ID')(field='FIELD')._url\n'1/boards/BOARD_ID/FIELD'\n>>> trello.boards(board_id='BOARD_ID').cards(filter='FILTER')._url\n'1/boards/BOARD_ID/cards/FILTER'", "id": "f9787:c0:m2"}
{"signature": "def generate_api(version):", "body": "def get_partial_api(key, token=None):<EOL><INDENT>return TrelloAPI(ENDPOINTS[version], version, key, token=token)<EOL><DEDENT>get_partial_api.__doc__ =\"\"\"<STR_LIT>\"\"\".format(version)<EOL>return get_partial_api<EOL>", "docstring": "Generates a factory function to instantiate the API with the given\nversion.", "id": "f9787:m0"}
{"signature": "def cmp(self, other):", "body": "if isinstance(other, Range):<EOL><INDENT>start = self.start.replace(tzinfo=other.start.tz) if other.start.tz and self.start.tz is None else self.start<EOL>end = self.end.replace(tzinfo=other.end.tz) if other.end.tz and self.end.tz is None else self.end<EOL>if start == other.start and end == other.end:<EOL><INDENT>return <NUM_LIT:0> <EOL><DEDENT>elif start < other.start:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>elif isinstance(other, Date):<EOL><INDENT>if other.tz and self.start.tz is None:<EOL><INDENT>return <NUM_LIT:0> if other == self.start.replace(tzinfo=other.tz) else -<NUM_LIT:1> if other > self.start.replace(tzinfo=other.start.tz) else <NUM_LIT:1><EOL><DEDENT>return <NUM_LIT:0> if other == self.start else -<NUM_LIT:1> if other > self.start else <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>return self.cmp(Range(other, tz=self.start.tz))<EOL><DEDENT>", "docstring": "*Note: checks Range.start() only*\n        Key: self = [], other = {}\n            * [   {----]----} => -1\n            * {---[---}  ] => 1\n            * [---]  {---} => -1\n            * [---] same as {---} => 0\n            * [--{-}--] => -1", "id": "f9792:c0:m15"}
{"signature": "def cut(self, by, from_start=True):", "body": "s, e = copy(self.start), copy(self.end)<EOL>if from_start:<EOL><INDENT>e = s + by<EOL><DEDENT>else:<EOL><INDENT>s = e - by<EOL><DEDENT>return Range(s, e)<EOL>", "docstring": "Cuts this object from_start to the number requestd\n        returns new instance", "id": "f9792:c0:m17"}
{"signature": "def __str__(self):", "body": "return str(self.date)<EOL>", "docstring": "Returns date in representation of `%x %X` ie `2013-02-17 00:00:00`", "id": "f9794:c0:m23"}
{"signature": "def replace(self, **k):", "body": "if self.date != '<STR_LIT>':<EOL><INDENT>return Date(self.date.replace(**k))<EOL><DEDENT>else:<EOL><INDENT>return Date('<STR_LIT>')<EOL><DEDENT>", "docstring": "Note returns a new Date obj", "id": "f9794:c0:m17"}
{"signature": "def epoll_poller(timeout=<NUM_LIT:0.0>, map=None):", "body": "if map is None:<EOL><INDENT>map = asyncore.socket_map<EOL><DEDENT>pollster = select.epoll()<EOL>if map:<EOL><INDENT>for fd, obj in iteritems(map):<EOL><INDENT>flags = <NUM_LIT:0><EOL>if obj.readable():<EOL><INDENT>flags |= select.POLLIN | select.POLLPRI<EOL><DEDENT>if obj.writable():<EOL><INDENT>flags |= select.POLLOUT<EOL><DEDENT>if flags:<EOL><INDENT>flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL<EOL>pollster.register(fd, flags)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>r = pollster.poll(timeout)<EOL><DEDENT>except select.error as err:<EOL><INDENT>if err.args[<NUM_LIT:0>] != EINTR:<EOL><INDENT>raise<EOL><DEDENT>r = []<EOL><DEDENT>for fd, flags in r:<EOL><INDENT>obj = map.get(fd)<EOL>if obj is None:<EOL><INDENT>continue<EOL><DEDENT>asyncore.readwrite(obj, flags)<EOL><DEDENT><DEDENT>", "docstring": "A poller which uses epoll(), supported on Linux 2.5.44 and newer\n\nBorrowed from here:\nhttps://github.com/m13253/python-asyncore-epoll/blob/master/asyncore_epoll.py#L200", "id": "f9796:m1"}
{"signature": "def serve_forever(self, poll_interval=<NUM_LIT:0.5>):", "body": "logger.info('<STR_LIT>'.format(<EOL>self.server_name, self.server_port)<EOL>)<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>self.poll_once(poll_interval)<EOL><DEDENT>except (KeyboardInterrupt, SystemExit):<EOL><INDENT>break<EOL><DEDENT><DEDENT>self.handle_close()<EOL>logger.info('<STR_LIT>')<EOL>", "docstring": "Start serving HTTP requests\n\nThis method blocks the current thread.\n\n:param poll_interval: polling timeout\n:return:", "id": "f9796:c2:m6"}
{"signature": "def finish_response(self):", "body": "self.iterator = iter(self.result)<EOL>", "docstring": "Get WSGI response iterator for sending in handle_write", "id": "f9796:c0:m2"}
{"signature": "def make_server(host, port, app=None,<EOL>server_class=AsyncWsgiServer,<EOL>handler_class=AsyncWsgiHandler,<EOL>ws_handler_class=None,<EOL>ws_path='<STR_LIT>'):", "body": "handler_class.ws_handler_class = ws_handler_class<EOL>handler_class.ws_path = ws_path<EOL>httpd = server_class((host, port), RequestHandlerClass=handler_class)<EOL>httpd.set_app(app)<EOL>return httpd<EOL>", "docstring": "Create server instance with an optional WebSocket handler\n\n    For pure WebSocket server ``app`` may be ``None`` but an attempt to access\n    any path other than ``ws_path`` will cause server error.\n\n    :param host: hostname or IP\n    :type host: str\n    :param port: server port\n    :type port: int\n    :param app: WSGI application\n    :param server_class: WSGI server class, defaults to AsyncWsgiServer\n    :param handler_class: WSGI handler class, defaults to AsyncWsgiHandler\n    :param ws_handler_class: WebSocket hanlder class, defaults to ``None``\n    :param ws_path: WebSocket path on the server, defaults to '/ws'\n    :type ws_path: str, optional\n    :return: initialized server instance", "id": "f9796:m3"}
{"signature": "def handleClose(self):", "body": "pass<EOL>", "docstring": "Called when a websocket server gets a Close frame from a client.", "id": "f9797:c2:m3"}
{"signature": "def sendFragmentStart(self, data):", "body": "opcode = BINARY<EOL>if _check_unicode(data):<EOL><INDENT>opcode = TEXT<EOL><DEDENT>self._sendMessage(True, opcode, data)<EOL>", "docstring": "Send the start of a data fragment stream to a websocket client.\nSubsequent data should be sent using sendFragment().\nA fragment stream is completed when sendFragmentEnd() is called.\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f9797:c2:m8"}
{"signature": "def sendMessage(self, data):", "body": "opcode = BINARY<EOL>if _check_unicode(data):<EOL><INDENT>opcode = TEXT<EOL><DEDENT>self._sendMessage(False, opcode, data)<EOL>", "docstring": "Send websocket data frame to the client.\n\nIf data is a unicode object then the frame is sent as Text.\nIf the data is a bytearray object then the frame is sent as Binary.", "id": "f9797:c2:m11"}
{"signature": "def close(self, status=<NUM_LIT:1000>, reason=u'<STR_LIT>'):", "body": "try:<EOL><INDENT>if self.closed is False:<EOL><INDENT>close_msg = bytearray()<EOL>close_msg.extend(struct.pack(\"<STR_LIT>\", status))<EOL>if _check_unicode(reason):<EOL><INDENT>close_msg.extend(reason.encode('<STR_LIT:utf-8>'))<EOL><DEDENT>else:<EOL><INDENT>close_msg.extend(reason)<EOL><DEDENT>self._sendMessage(False, CLOSE, close_msg)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self.closed = True<EOL><DEDENT>", "docstring": "Send Close frame to the client. The underlying socket is only closed\nwhen the client acknowledges the Close frame.\n\nstatus is the closing identifier.\nreason is the reason for the close.", "id": "f9797:c2:m6"}
{"signature": "@classmethod<EOL><INDENT>def get(cls, update_dict=None, remove_fields=None):<DEDENT>", "body": "cls.demo_course_count += <NUM_LIT:1><EOL>course_copy = copy.deepcopy(cls.DEMO_COURSE)<EOL>if update_dict:<EOL><INDENT>if \"<STR_LIT:content>\" in update_dict:<EOL><INDENT>course_copy[\"<STR_LIT:content>\"].update(update_dict[\"<STR_LIT:content>\"])<EOL>del update_dict[\"<STR_LIT:content>\"]<EOL><DEDENT>course_copy.update(update_dict)<EOL><DEDENT>course_copy.update({\"<STR_LIT:id>\": \"<STR_LIT>\".format(course_copy[\"<STR_LIT:id>\"], cls.demo_course_count)})<EOL>if remove_fields:<EOL><INDENT>for remove_field in remove_fields:<EOL><INDENT>if remove_field in course_copy:<EOL><INDENT>del course_copy[remove_field]<EOL><DEDENT><DEDENT><DEDENT>return course_copy<EOL>", "docstring": "get a new demo course", "id": "f9806:c0:m0"}
{"signature": "@property<EOL><INDENT>def _is_elastic(self):<DEDENT>", "body": "return isinstance(self.searcher, ElasticSearchEngine)<EOL>", "docstring": "check search engine implementation, to manage cleanup differently", "id": "f9806:c1:m0"}
{"signature": "def assert_results_returned_event(self, search_term, size, page, total):", "body": "returned_results_call = self.mock_tracker.emit.mock_calls[<NUM_LIT:1>]  <EOL>expected_result = call('<STR_LIT>', {<EOL>\"<STR_LIT>\": six.text_type(search_term),<EOL>\"<STR_LIT>\": size,<EOL>\"<STR_LIT>\": page,<EOL>\"<STR_LIT>\": total,<EOL>})<EOL>self.assertEqual(expected_result, returned_results_call)<EOL>", "docstring": "Ensures an results returned event was emitted", "id": "f9807:c0:m5"}
{"signature": "def assert_no_events_were_emitted(self):", "body": "self.assertFalse(self.mock_tracker.emit.called)<EOL>", "docstring": "Ensures no events were emitted since the last event related assertion", "id": "f9807:c0:m3"}
{"signature": "@property<EOL><INDENT>def additional_property(self):<DEDENT>", "body": "return \"<STR_LIT>\"<EOL>", "docstring": "additional property that should appear within processed results", "id": "f9810:c1:m0"}
{"signature": "def should_remove(self, user):", "body": "return \"<STR_LIT>\" in self._results_fields<EOL>", "docstring": "remove items when url is None", "id": "f9810:c1:m2"}
{"signature": "def search(self, **kwargs):  ", "body": "raise exceptions.ElasticsearchException(\"<STR_LIT>\")<EOL>", "docstring": "this will definitely fail", "id": "f9813:c4:m0"}
{"signature": "def post_discovery_request(body):", "body": "address = '<STR_LIT>'<EOL>response = Client().post(address, body)<EOL>return getattr(response, \"<STR_LIT>\", <NUM_LIT>), json.loads(getattr(response, \"<STR_LIT:content>\", None))<EOL>", "docstring": "Helper method to post the request and process the response", "id": "f9813:m1"}
{"signature": "def post_request(body, course_id=None):", "body": "address = '<STR_LIT:/>' if course_id is None else '<STR_LIT>'.format(course_id)<EOL>response = Client().post(address, body)<EOL>return getattr(response, \"<STR_LIT>\", <NUM_LIT>), json.loads(getattr(response, \"<STR_LIT:content>\", None))<EOL>", "docstring": "Helper method to post the request and process the response", "id": "f9813:m0"}
{"signature": "@property<EOL><INDENT>def searcher(self):<DEDENT>", "body": "if self._searcher is None:<EOL><INDENT>self._searcher = SearchEngine.get_search_engine(TEST_INDEX_NAME)<EOL><DEDENT>return self._searcher<EOL>", "docstring": "cached instance of search engine", "id": "f9813:c0:m0"}
{"signature": "def json_date_to_datetime(json_date_string_value):", "body": "if \"<STR_LIT:T>\" in json_date_string_value:<EOL><INDENT>if \"<STR_LIT:.>\" in json_date_string_value:<EOL><INDENT>format_string = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>format_string = \"<STR_LIT>\"<EOL><DEDENT>if json_date_string_value.endswith(\"<STR_LIT>\"):<EOL><INDENT>format_string += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>else:<EOL><INDENT>format_string = \"<STR_LIT>\"<EOL><DEDENT>return datetime.strptime(<EOL>json_date_string_value,<EOL>format_string<EOL>)<EOL>", "docstring": "converts json date string to date object", "id": "f9814:m0"}
{"signature": "def search(self,<EOL>query_string=None,<EOL>field_dictionary=None,<EOL>filter_dictionary=None,<EOL>exclude_dictionary=None,<EOL>facet_terms=None,<EOL>**kwargs):  ", "body": "if MockSearchEngine._disabled:<EOL><INDENT>return {<EOL>\"<STR_LIT>\": <NUM_LIT:10>,<EOL>\"<STR_LIT>\": <NUM_LIT:0>,<EOL>\"<STR_LIT>\": <NUM_LIT:0>,<EOL>\"<STR_LIT>\": []<EOL>}<EOL><DEDENT>documents_to_search = []<EOL>if \"<STR_LIT>\" in kwargs:<EOL><INDENT>documents_to_search = MockSearchEngine.load_doc_type(self.index_name, kwargs[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>index = MockSearchEngine.load_index(self.index_name)<EOL>for doc_type in index:<EOL><INDENT>documents_to_search.extend(index[doc_type])<EOL><DEDENT><DEDENT>if field_dictionary:<EOL><INDENT>documents_to_search = _filter_intersection(documents_to_search, field_dictionary)<EOL><DEDENT>if filter_dictionary:<EOL><INDENT>documents_to_search = _filter_intersection(documents_to_search, filter_dictionary, True)<EOL><DEDENT>if query_string:<EOL><INDENT>documents_to_search = _process_query_string(documents_to_search, query_string)<EOL><DEDENT>if \"<STR_LIT>\" in kwargs:<EOL><INDENT>if not exclude_dictionary:<EOL><INDENT>exclude_dictionary = {}<EOL><DEDENT>if \"<STR_LIT:id>\" not in exclude_dictionary:<EOL><INDENT>exclude_dictionary[\"<STR_LIT:id>\"] = []<EOL><DEDENT>exclude_dictionary[\"<STR_LIT:id>\"].extend(kwargs[\"<STR_LIT>\"])<EOL><DEDENT>if exclude_dictionary:<EOL><INDENT>documents_to_search = _process_exclude_dictionary(documents_to_search, exclude_dictionary)<EOL><DEDENT>def score_documents(documents_to_search):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>search_results = []<EOL>max_score = <NUM_LIT:0><EOL>while documents_to_search:<EOL><INDENT>current_doc = documents_to_search[<NUM_LIT:0>]<EOL>score = len([d for d in documents_to_search if d == current_doc])<EOL>if score > max_score:<EOL><INDENT>max_score = score<EOL><DEDENT>documents_to_search = [d for d in documents_to_search if d != current_doc]<EOL>data = copy.copy(current_doc)<EOL>search_results.append(<EOL>{<EOL>\"<STR_LIT>\": score,<EOL>\"<STR_LIT:data>\": data,<EOL>}<EOL>)<EOL><DEDENT>return search_results, max_score<EOL><DEDENT>search_results, max_score = score_documents(documents_to_search)<EOL>results = MockSearchEngine._paginate_results(<EOL>kwargs[\"<STR_LIT:size>\"] if \"<STR_LIT:size>\" in kwargs else None,<EOL>kwargs[\"<STR_LIT>\"] if \"<STR_LIT>\" in kwargs else None,<EOL>sorted(search_results, key=lambda k: k[\"<STR_LIT>\"])<EOL>)<EOL>response = {<EOL>\"<STR_LIT>\": <NUM_LIT:10>,<EOL>\"<STR_LIT>\": len(search_results),<EOL>\"<STR_LIT>\": max_score,<EOL>\"<STR_LIT>\": results<EOL>}<EOL>if facet_terms:<EOL><INDENT>response[\"<STR_LIT>\"] = _count_facet_values(documents_to_search, facet_terms)<EOL><DEDENT>return response<EOL>", "docstring": "Perform search upon documents within index", "id": "f9814:c0:m14"}
{"signature": "def index(self, doc_type, sources):  ", "body": "if not MockSearchEngine._disabled:<EOL><INDENT>doc_ids = [s[\"<STR_LIT:id>\"] for s in sources if \"<STR_LIT:id>\" in s]<EOL>MockSearchEngine.remove_documents(self.index_name, doc_type, doc_ids)<EOL>MockSearchEngine.add_documents(self.index_name, doc_type, sources)<EOL><DEDENT>", "docstring": "Add/update documents of given type to the index", "id": "f9814:c0:m12"}
{"signature": "@classmethod<EOL><INDENT>def load_index(cls, index_name):<DEDENT>", "body": "cls._load_from_file()<EOL>if index_name not in cls._mock_elastic:<EOL><INDENT>cls._mock_elastic[index_name] = {}<EOL>cls._write_to_file()<EOL><DEDENT>return cls._mock_elastic[index_name]<EOL>", "docstring": "load the index, if necessary from the backed file", "id": "f9814:c0:m6"}
{"signature": "def remove(self, doc_type, doc_ids):  ", "body": "if not MockSearchEngine._disabled:<EOL><INDENT>MockSearchEngine.remove_documents(self.index_name, doc_type, doc_ids)<EOL><DEDENT>", "docstring": "Remove documents of type with given ids from the index", "id": "f9814:c0:m13"}
{"signature": "@staticmethod<EOL><INDENT>def _paginate_results(size, from_, raw_results):<DEDENT>", "body": "results = raw_results<EOL>if size:<EOL><INDENT>start = <NUM_LIT:0><EOL>if from_ is not None:<EOL><INDENT>start = from_<EOL><DEDENT>results = raw_results[start:start + size]<EOL><DEDENT>return results<EOL>", "docstring": "Give the correct page of results", "id": "f9814:c0:m5"}
{"signature": "def _process_exclude_dictionary(documents_to_search, exclude_dictionary):", "body": "for exclude_property in exclude_dictionary:<EOL><INDENT>exclude_values = exclude_dictionary[exclude_property]<EOL>if not isinstance(exclude_values, list):<EOL><INDENT>exclude_values = [exclude_values]<EOL><DEDENT>documents_to_search = [<EOL>document<EOL>for document in documents_to_search<EOL>if document.get(exclude_property) not in exclude_values<EOL>]<EOL><DEDENT>return documents_to_search<EOL>", "docstring": "remove results that have fields that match in the exclude_dictionary", "id": "f9814:m4"}
{"signature": "@classmethod<EOL><INDENT>def _write_to_file(cls, create_if_missing=False):<DEDENT>", "body": "file_name = cls._backing_file(create_if_missing)<EOL>if file_name:<EOL><INDENT>with open(file_name, \"<STR_LIT>\") as dict_file:<EOL><INDENT>json.dump(cls._mock_elastic, dict_file, cls=DjangoJSONEncoder)<EOL><DEDENT><DEDENT>", "docstring": "write the index dict to the backing file", "id": "f9814:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def destroy(cls):<DEDENT>", "body": "cls._mock_elastic = {}<EOL>cls._write_to_file()<EOL>", "docstring": "Clean out the dictionary for test resets", "id": "f9814:c0:m10"}
{"signature": "@classmethod<EOL><INDENT>def set_search_enviroment(cls, **kwargs):<DEDENT>", "body": "initializer = _load_class(getattr(settings, \"<STR_LIT>\", None), cls)()<EOL>return initializer.initialize(**kwargs)<EOL>", "docstring": "Called from within search handler\nFinds desired subclass and calls initialize method", "id": "f9816:c0:m1"}
{"signature": "def initialize(self, **kwargs):", "body": "pass<EOL>", "docstring": "empty base implementation", "id": "f9816:c0:m0"}
{"signature": "def _process_field_values(request):", "body": "return {<EOL>field_key: request.POST[field_key]<EOL>for field_key in request.POST<EOL>if field_key in course_discovery_filter_fields()<EOL>}<EOL>", "docstring": "Create separate dictionary of supported filter values provided", "id": "f9817:m1"}
{"signature": "@require_POST<EOL>def do_search(request, course_id=None):", "body": "<EOL>SearchInitializer.set_search_enviroment(request=request, course_id=course_id)<EOL>results = {<EOL>\"<STR_LIT:error>\": _(\"<STR_LIT>\")<EOL>}<EOL>status_code = <NUM_LIT><EOL>search_term = request.POST.get(\"<STR_LIT>\", None)<EOL>try:<EOL><INDENT>if not search_term:<EOL><INDENT>raise ValueError(_('<STR_LIT>'))<EOL><DEDENT>size, from_, page = _process_pagination_values(request)<EOL>track.emit(<EOL>'<STR_LIT>',<EOL>{<EOL>\"<STR_LIT>\": search_term,<EOL>\"<STR_LIT>\": size,<EOL>\"<STR_LIT>\": page,<EOL>}<EOL>)<EOL>results = perform_search(<EOL>search_term,<EOL>user=request.user,<EOL>size=size,<EOL>from_=from_,<EOL>course_id=course_id<EOL>)<EOL>status_code = <NUM_LIT:200><EOL>track.emit(<EOL>'<STR_LIT>',<EOL>{<EOL>\"<STR_LIT>\": search_term,<EOL>\"<STR_LIT>\": size,<EOL>\"<STR_LIT>\": page,<EOL>\"<STR_LIT>\": results[\"<STR_LIT>\"],<EOL>}<EOL>)<EOL><DEDENT>except ValueError as invalid_err:<EOL><INDENT>results = {<EOL>\"<STR_LIT:error>\": six.text_type(invalid_err)<EOL>}<EOL>log.debug(six.text_type(invalid_err))<EOL><DEDENT>except QueryParseError:<EOL><INDENT>results = {<EOL>\"<STR_LIT:error>\": _('<STR_LIT>')<EOL>}<EOL><DEDENT>except Exception as err:  <EOL><INDENT>results = {<EOL>\"<STR_LIT:error>\": _('<STR_LIT>').format(search_string=search_term)<EOL>}<EOL>log.exception(<EOL>'<STR_LIT>',<EOL>search_term,<EOL>request.user.id,<EOL>err<EOL>)<EOL><DEDENT>return JsonResponse(results, status=status_code)<EOL>", "docstring": "Search view for http requests\n\nArgs:\n    request (required) - django request object\n    course_id (optional) - course_id within which to restrict search\n\nReturns:\n    http json response with the following fields\n        \"took\" - how many seconds the operation took\n        \"total\" - how many results were found\n        \"max_score\" - maximum score from these results\n        \"results\" - json array of result documents\n\n        or\n\n        \"error\" - displayable information about an error that occured on the server\n\nPOST Params:\n    \"search_string\" (required) - text upon which to search\n    \"page_size\" (optional)- how many results to return per page (defaults to 20, with maximum cutoff at 100)\n    \"page_index\" (optional) - for which page (zero-indexed) to include results (defaults to 0)", "id": "f9817:m2"}
{"signature": "@require_POST<EOL>def course_discovery(request):", "body": "results = {<EOL>\"<STR_LIT:error>\": _(\"<STR_LIT>\")<EOL>}<EOL>status_code = <NUM_LIT><EOL>search_term = request.POST.get(\"<STR_LIT>\", None)<EOL>try:<EOL><INDENT>size, from_, page = _process_pagination_values(request)<EOL>field_dictionary = _process_field_values(request)<EOL>track.emit(<EOL>'<STR_LIT>',<EOL>{<EOL>\"<STR_LIT>\": search_term,<EOL>\"<STR_LIT>\": size,<EOL>\"<STR_LIT>\": page,<EOL>}<EOL>)<EOL>results = course_discovery_search(<EOL>search_term=search_term,<EOL>size=size,<EOL>from_=from_,<EOL>field_dictionary=field_dictionary,<EOL>)<EOL>track.emit(<EOL>'<STR_LIT>',<EOL>{<EOL>\"<STR_LIT>\": search_term,<EOL>\"<STR_LIT>\": size,<EOL>\"<STR_LIT>\": page,<EOL>\"<STR_LIT>\": results[\"<STR_LIT>\"],<EOL>}<EOL>)<EOL>status_code = <NUM_LIT:200><EOL><DEDENT>except ValueError as invalid_err:<EOL><INDENT>results = {<EOL>\"<STR_LIT:error>\": six.text_type(invalid_err)<EOL>}<EOL>log.debug(six.text_type(invalid_err))<EOL><DEDENT>except QueryParseError:<EOL><INDENT>results = {<EOL>\"<STR_LIT:error>\": _('<STR_LIT>')<EOL>}<EOL><DEDENT>except Exception as err:  <EOL><INDENT>results = {<EOL>\"<STR_LIT:error>\": _('<STR_LIT>').format(search_string=search_term)<EOL>}<EOL>log.exception(<EOL>'<STR_LIT>',<EOL>search_term,<EOL>request.user.id,<EOL>err<EOL>)<EOL><DEDENT>return JsonResponse(results, status=status_code)<EOL>", "docstring": "Search for courses\n\nArgs:\n    request (required) - django request object\n\nReturns:\n    http json response with the following fields\n        \"took\" - how many seconds the operation took\n        \"total\" - how many results were found\n        \"max_score\" - maximum score from these resutls\n        \"results\" - json array of result documents\n\n        or\n\n        \"error\" - displayable information about an error that occured on the server\n\nPOST Params:\n    \"search_string\" (optional) - text with which to search for courses\n    \"page_size\" (optional)- how many results to return per page (defaults to 20, with maximum cutoff at 100)\n    \"page_index\" (optional) - for which page (zero-indexed) to include results (defaults to 0)", "id": "f9817:m3"}
{"signature": "def field_dictionary(self, **kwargs):", "body": "field_dictionary = {}<EOL>if \"<STR_LIT>\" in kwargs and kwargs[\"<STR_LIT>\"]:<EOL><INDENT>field_dictionary[\"<STR_LIT>\"] = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>return field_dictionary<EOL>", "docstring": "base implementation which add course if provided", "id": "f9818:c0:m1"}
{"signature": "def filter_dictionary(self, **kwargs):", "body": "return {\"<STR_LIT>\": DateRange(None, datetime.utcnow())}<EOL>", "docstring": "base implementation which filters via start_date", "id": "f9818:c0:m0"}
{"signature": "def _check_mappings(self, doc_type, body):", "body": "<EOL>exclude_fields = [\"<STR_LIT:content>\"]<EOL>field_properties = getattr(settings, \"<STR_LIT>\", {})<EOL>def field_property(field_name, field_value):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>prop_val = None<EOL>if field_name in field_properties:<EOL><INDENT>prop_val = field_properties[field_name]<EOL><DEDENT>elif isinstance(field_value, dict):<EOL><INDENT>props = {fn: field_property(fn, field_value[fn]) for fn in field_value}<EOL>prop_val = {\"<STR_LIT>\": props}<EOL><DEDENT>else:<EOL><INDENT>prop_val = {<EOL>\"<STR_LIT:type>\": \"<STR_LIT:string>\",<EOL>\"<STR_LIT:index>\": \"<STR_LIT>\",<EOL>}<EOL><DEDENT>return prop_val<EOL><DEDENT>new_properties = {<EOL>field: field_property(field, value)<EOL>for field, value in body.items()<EOL>if (field not in exclude_fields) and (field not in self._get_mappings(doc_type).get('<STR_LIT>', {}))<EOL>}<EOL>if new_properties:<EOL><INDENT>self._es.indices.put_mapping(<EOL>index=self.index_name,<EOL>doc_type=doc_type,<EOL>body={<EOL>doc_type: {<EOL>\"<STR_LIT>\": new_properties,<EOL>}<EOL>}<EOL>)<EOL>self._clear_mapping(doc_type)<EOL><DEDENT>", "docstring": "We desire to index content so that anything we want to be textually searchable(and therefore needing to be\nanalysed), but the other fields are designed to be filters, and only require an exact match. So, we want to\nset up the mappings for these fields as \"not_analyzed\" - this will allow our filters to work faster because\nthey only have to work off exact matches", "id": "f9819:c0:m7"}
{"signature": "def _clear_mapping(self, doc_type):", "body": "ElasticSearchEngine.set_mappings(self.index_name, doc_type, {})<EOL>", "docstring": "Remove the cached mappings, so that they get loaded from ES next time they are requested", "id": "f9819:c0:m5"}
{"signature": "def _process_facet_terms(facet_terms):", "body": "elastic_facets = {}<EOL>for facet in facet_terms:<EOL><INDENT>facet_term = {\"<STR_LIT>\": facet}<EOL>if facet_terms[facet]:<EOL><INDENT>for facet_option in facet_terms[facet]:<EOL><INDENT>facet_term[facet_option] = facet_terms[facet][facet_option]<EOL><DEDENT><DEDENT>elastic_facets[facet] = {<EOL>\"<STR_LIT>\": facet_term<EOL>}<EOL><DEDENT>return elastic_facets<EOL>", "docstring": "We have a list of terms with which we return facets", "id": "f9819:m6"}
{"signature": "def _translate_hits(es_response):", "body": "def translate_result(result):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>translated_result = copy.copy(result)<EOL>data = translated_result.pop(\"<STR_LIT>\")<EOL>translated_result.update({<EOL>\"<STR_LIT:data>\": data,<EOL>\"<STR_LIT>\": translated_result[\"<STR_LIT>\"]<EOL>})<EOL>return translated_result<EOL><DEDENT>def translate_facet(result):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>terms = {term[\"<STR_LIT>\"]: term[\"<STR_LIT:count>\"] for term in result[\"<STR_LIT>\"]}<EOL>return {<EOL>\"<STR_LIT>\": terms,<EOL>\"<STR_LIT>\": result[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": result[\"<STR_LIT>\"],<EOL>}<EOL><DEDENT>results = [translate_result(hit) for hit in es_response[\"<STR_LIT>\"][\"<STR_LIT>\"]]<EOL>response = {<EOL>\"<STR_LIT>\": es_response[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": es_response[\"<STR_LIT>\"][\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": es_response[\"<STR_LIT>\"][\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": results,<EOL>}<EOL>if \"<STR_LIT>\" in es_response:<EOL><INDENT>response[\"<STR_LIT>\"] = {facet: translate_facet(es_response[\"<STR_LIT>\"][facet]) for facet in es_response[\"<STR_LIT>\"]}<EOL><DEDENT>return response<EOL>", "docstring": "Provide resultset in our desired format from elasticsearch results", "id": "f9819:m0"}
{"signature": "def remove(self, doc_type, doc_ids, **kwargs):", "body": "try:<EOL><INDENT>actions = []<EOL>for doc_id in doc_ids:<EOL><INDENT>log.debug(\"<STR_LIT>\", doc_type, doc_id)<EOL>action = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>\"<STR_LIT>\": self.index_name,<EOL>\"<STR_LIT>\": doc_type,<EOL>\"<STR_LIT>\": doc_id<EOL>}<EOL>actions.append(action)<EOL><DEDENT>bulk(self._es, actions, **kwargs)<EOL><DEDENT>except BulkIndexError as ex:<EOL><INDENT>valid_errors = [error for error in ex.errors if error['<STR_LIT>']['<STR_LIT:status>'] != <NUM_LIT>]<EOL>if valid_errors:<EOL><INDENT>log.exception(\"<STR_LIT>\")<EOL>raise<EOL><DEDENT><DEDENT>", "docstring": "Implements call to remove the documents from the index", "id": "f9819:c0:m9"}
{"signature": "@classmethod<EOL><INDENT>def log_indexing_error(cls, indexing_errors):<DEDENT>", "body": "indexing_errors_log = []<EOL>for indexing_error in indexing_errors:<EOL><INDENT>indexing_errors_log.append(str(indexing_error))<EOL><DEDENT>raise exceptions.ElasticsearchException('<STR_LIT:U+002CU+0020>'.join(indexing_errors_log))<EOL>", "docstring": "Logs indexing errors and raises a general ElasticSearch Exception", "id": "f9819:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def get_cache_item_name(index_name, doc_type):<DEDENT>", "body": "return \"<STR_LIT>\".format(<EOL>index_name,<EOL>doc_type<EOL>)<EOL>", "docstring": "name-formatter for cache_item_name", "id": "f9819:c0:m0"}
{"signature": "def _process_field_filters(field_dictionary):", "body": "return [_get_filter_field(field, field_value) for field, field_value in field_dictionary.items()]<EOL>", "docstring": "We have a field_dictionary - we match the values using a \"term\" filter in elasticsearch", "id": "f9819:m3"}
{"signature": "def _process_exclude_dictionary(exclude_dictionary):", "body": "<EOL>not_properties = []<EOL>for exclude_property in exclude_dictionary:<EOL><INDENT>exclude_values = exclude_dictionary[exclude_property]<EOL>if not isinstance(exclude_values, list):<EOL><INDENT>exclude_values = [exclude_values]<EOL><DEDENT>not_properties.extend([{\"<STR_LIT>\": {exclude_property: exclude_value}} for exclude_value in exclude_values])<EOL><DEDENT>if not not_properties:<EOL><INDENT>return {}<EOL><DEDENT>return {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": not_properties<EOL>}<EOL>}<EOL>}<EOL>", "docstring": "Based on values in the exclude_dictionary generate a list of term queries that\nwill filter out unwanted results.", "id": "f9819:m5"}
{"signature": "def _process_field_queries(field_dictionary):", "body": "def field_item(field):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return {<EOL>\"<STR_LIT>\": {<EOL>field: field_dictionary[field]<EOL>}<EOL>}<EOL><DEDENT>return [field_item(field) for field in field_dictionary]<EOL>", "docstring": "We have a field_dictionary - we want to match the values for an elasticsearch \"match\" query\nThis is only potentially useful when trying to tune certain search operations", "id": "f9819:m2"}
{"signature": "def _get_filter_field(field_name, field_value):", "body": "filter_field = None<EOL>if isinstance(field_value, ValueRange):<EOL><INDENT>range_values = {}<EOL>if field_value.lower:<EOL><INDENT>range_values.update({\"<STR_LIT>\": field_value.lower_string})<EOL><DEDENT>if field_value.upper:<EOL><INDENT>range_values.update({\"<STR_LIT>\": field_value.upper_string})<EOL><DEDENT>filter_field = {<EOL>\"<STR_LIT>\": {<EOL>field_name: range_values<EOL>}<EOL>}<EOL><DEDENT>elif _is_iterable(field_value):<EOL><INDENT>filter_field = {<EOL>\"<STR_LIT>\": {<EOL>field_name: field_value<EOL>}<EOL>}<EOL><DEDENT>else:<EOL><INDENT>filter_field = {<EOL>\"<STR_LIT>\": {<EOL>field_name: field_value<EOL>}<EOL>}<EOL><DEDENT>return filter_field<EOL>", "docstring": "Return field to apply into filter, if an array then use a range, otherwise look for a term match", "id": "f9819:m1"}
{"signature": "def index(self, doc_type, sources, **kwargs):", "body": "try:<EOL><INDENT>actions = []<EOL>for source in sources:<EOL><INDENT>self._check_mappings(doc_type, source)<EOL>id_ = source['<STR_LIT:id>'] if '<STR_LIT:id>' in source else None<EOL>log.debug(\"<STR_LIT>\", doc_type, id_)<EOL>action = {<EOL>\"<STR_LIT>\": self.index_name,<EOL>\"<STR_LIT>\": doc_type,<EOL>\"<STR_LIT>\": id_,<EOL>\"<STR_LIT>\": source<EOL>}<EOL>actions.append(action)<EOL><DEDENT>_, indexing_errors = bulk(<EOL>self._es,<EOL>actions,<EOL>**kwargs<EOL>)<EOL>if indexing_errors:<EOL><INDENT>ElasticSearchEngine.log_indexing_error(indexing_errors)<EOL><DEDENT><DEDENT>except Exception as ex:<EOL><INDENT>log.exception(\"<STR_LIT>\", str(ex))<EOL>raise<EOL><DEDENT>", "docstring": "Implements call to add documents to the ES index\nNote the call to _check_mappings which will setup fields with the desired mappings", "id": "f9819:c0:m8"}
{"signature": "def _load_class(class_path, default):", "body": "if class_path is None:<EOL><INDENT>return default<EOL><DEDENT>component = class_path.rsplit('<STR_LIT:.>', <NUM_LIT:1>)<EOL>result_processor = getattr(<EOL>importlib.import_module(component[<NUM_LIT:0>]),<EOL>component[<NUM_LIT:1>],<EOL>default<EOL>) if len(component) > <NUM_LIT:1> else default<EOL>return result_processor<EOL>", "docstring": "Loads the class from the class_path string", "id": "f9822:m0"}
{"signature": "@property<EOL><INDENT>def lower_string(self):<DEDENT>", "body": "return self._lower.isoformat()<EOL>", "docstring": "use isoformat for _lower date's string format", "id": "f9822:c1:m1"}
{"signature": "def perform_search(<EOL>search_term,<EOL>user=None,<EOL>size=<NUM_LIT:10>,<EOL>from_=<NUM_LIT:0>,<EOL>course_id=None):", "body": "<EOL>(field_dictionary, filter_dictionary, exclude_dictionary) = SearchFilterGenerator.generate_field_filters(<EOL>user=user,<EOL>course_id=course_id<EOL>)<EOL>searcher = SearchEngine.get_search_engine(getattr(settings, \"<STR_LIT>\", \"<STR_LIT>\"))<EOL>if not searcher:<EOL><INDENT>raise NoSearchEngineError(\"<STR_LIT>\")<EOL><DEDENT>results = searcher.search_string(<EOL>search_term,<EOL>field_dictionary=field_dictionary,<EOL>filter_dictionary=filter_dictionary,<EOL>exclude_dictionary=exclude_dictionary,<EOL>size=size,<EOL>from_=from_,<EOL>doc_type=\"<STR_LIT>\",<EOL>)<EOL>for result in results[\"<STR_LIT>\"]:<EOL><INDENT>result[\"<STR_LIT:data>\"] = SearchResultProcessor.process_result(result[\"<STR_LIT:data>\"], search_term, user)<EOL><DEDENT>results[\"<STR_LIT>\"] = len([r for r in results[\"<STR_LIT>\"] if r[\"<STR_LIT:data>\"] is None])<EOL>results[\"<STR_LIT>\"] = [r for r in results[\"<STR_LIT>\"] if r[\"<STR_LIT:data>\"] is not None]<EOL>return results<EOL>", "docstring": "Call the search engine with the appropriate parameters", "id": "f9823:m2"}
{"signature": "def search(self,<EOL>query_string=None,<EOL>field_dictionary=None,<EOL>filter_dictionary=None,<EOL>exclude_dictionary=None,<EOL>facet_terms=None,<EOL>**kwargs):  ", "body": "raise NotImplementedError<EOL>", "docstring": "This operation is called to search for matching documents within the search index", "id": "f9824:c0:m3"}
{"signature": "def remove(self, doc_type, doc_ids, **kwargs):", "body": "raise NotImplementedError<EOL>", "docstring": "This operation is called to remove documents of given type from the search index", "id": "f9824:c0:m2"}
{"signature": "def search_fields(self, field_dictionary, **kwargs):", "body": "return self.search(field_dictionary=field_dictionary, **kwargs)<EOL>", "docstring": "Helper function when primary search is for a set of matching fields", "id": "f9824:c0:m5"}
{"signature": "@staticmethod<EOL><INDENT>def get_search_engine(index=None):<DEDENT>", "body": "search_engine_class = _load_class(getattr(settings, \"<STR_LIT>\", None), None)<EOL>return search_engine_class(index=index) if search_engine_class else None<EOL>", "docstring": "Returns the desired implementor (defined in settings)", "id": "f9824:c0:m6"}
{"signature": "def index(self, doc_type, sources, **kwargs):", "body": "raise NotImplementedError<EOL>", "docstring": "This operation is called to add documents of given type to the search index", "id": "f9824:c0:m1"}
{"signature": "@property<EOL><INDENT>def excerpt(self):<DEDENT>", "body": "if \"<STR_LIT:content>\" not in self._results_fields:<EOL><INDENT>return None<EOL><DEDENT>match_phrases = [self._match_phrase]<EOL>if six.PY2:<EOL><INDENT>separate_phrases = [<EOL>phrase.decode('<STR_LIT:utf-8>')<EOL>for phrase in shlex.split(self._match_phrase.encode('<STR_LIT:utf-8>'))<EOL>]<EOL><DEDENT>else:<EOL><INDENT>separate_phrases = [<EOL>phrase<EOL>for phrase in shlex.split(self._match_phrase)<EOL>]<EOL><DEDENT>if len(separate_phrases) > <NUM_LIT:1>:<EOL><INDENT>match_phrases.extend(separate_phrases)<EOL><DEDENT>else:<EOL><INDENT>match_phrases = separate_phrases<EOL><DEDENT>matches = SearchResultProcessor.find_matches(<EOL>SearchResultProcessor.strings_in_dictionary(self._results_fields[\"<STR_LIT:content>\"]),<EOL>match_phrases,<EOL>DESIRED_EXCERPT_LENGTH<EOL>)<EOL>excerpt_text = ELLIPSIS.join(matches)<EOL>for match_word in match_phrases:<EOL><INDENT>excerpt_text = SearchResultProcessor.decorate_matches(excerpt_text, match_word)<EOL><DEDENT>return excerpt_text<EOL>", "docstring": "Property to display a useful excerpt representing the matches within the results", "id": "f9825:c0:m7"}
{"signature": "@staticmethod<EOL><INDENT>def find_matches(strings, words, length_hoped):<DEDENT>", "body": "lower_words = [w.lower() for w in words]<EOL>def has_match(string):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>lower_string = string.lower()<EOL>for test_word in lower_words:<EOL><INDENT>if test_word in lower_string:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL><DEDENT>shortened_strings = [textwrap.wrap(s) for s in strings]<EOL>short_string_list = list(chain.from_iterable(shortened_strings))<EOL>matches = [ms for ms in short_string_list if has_match(ms)]<EOL>cumulative_len = <NUM_LIT:0><EOL>break_at = None<EOL>for idx, match in enumerate(matches):<EOL><INDENT>cumulative_len += len(match)<EOL>if cumulative_len >= length_hoped:<EOL><INDENT>break_at = idx<EOL>break<EOL><DEDENT><DEDENT>return matches[<NUM_LIT:0>:break_at]<EOL>", "docstring": "Used by default property excerpt", "id": "f9825:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def strings_in_dictionary(dictionary):<DEDENT>", "body": "strings = [value for value in six.itervalues(dictionary) if not isinstance(value, dict)]<EOL>for child_dict in [dv for dv in six.itervalues(dictionary) if isinstance(dv, dict)]:<EOL><INDENT>strings.extend(SearchResultProcessor.strings_in_dictionary(child_dict))<EOL><DEDENT>return strings<EOL>", "docstring": "Used by default implementation for finding excerpt", "id": "f9825:c0:m1"}
{"signature": "def should_remove(self, user):  ", "body": "return False<EOL>", "docstring": "Override this in a class in order to add in last-chance access checks to the search process\nYour application will want to make this decision", "id": "f9825:c0:m4"}
{"signature": "def contentEncoding(requestHeaders, encoding=None):", "body": "if encoding is None:<EOL><INDENT>encoding = b'<STR_LIT:utf-8>'<EOL><DEDENT>headers = _splitHeaders(<EOL>requestHeaders.getRawHeaders(b'<STR_LIT:Content-Type>', []))<EOL>if headers:<EOL><INDENT>return headers[<NUM_LIT:0>][<NUM_LIT:1>].get(b'<STR_LIT>', encoding)<EOL><DEDENT>return encoding<EOL>", "docstring": "Extract an encoding from a ``Content-Type`` header.\n\n@type  requestHeaders: `twisted.web.http_headers.Headers`\n@param requestHeaders: Request headers.\n\n@type  encoding: `bytes`\n@param encoding: Default encoding to assume if the ``Content-Type``\n    header is lacking one. Defaults to ``UTF-8``.\n\n@rtype: `bytes`\n@return: Content encoding.", "id": "f9827:m2"}
{"signature": "def _parseAccept(headers):", "body": "def sort(value):<EOL><INDENT>return float(value[<NUM_LIT:1>].get('<STR_LIT:q>', <NUM_LIT:1>))<EOL><DEDENT>return OrderedDict(sorted(_splitHeaders(headers), key=sort, reverse=True))<EOL>", "docstring": "Parse and sort an ``Accept`` header.\n\nThe header is sorted according to the ``q`` parameter for each header value.\n\n@rtype: `OrderedDict` mapping `bytes` to `dict`\n@return: Mapping of media types to header parameters.", "id": "f9827:m0"}
{"signature": "def _splitHeaders(headers):", "body": "return [cgi.parse_header(value)<EOL>for value in chain.from_iterable(<EOL>s.split('<STR_LIT:U+002C>') for s in headers<EOL>if s)]<EOL>", "docstring": "Split an HTTP header whose components are separated with commas.\n\nEach component is then split on semicolons and the component arguments\nconverted into a `dict`.\n\n@return: `list` of 2-`tuple` of `bytes`, `dict`\n@return: List of header arguments and mapping of component argument names\n    to values.", "id": "f9827:m1"}
{"signature": "def __init__(self, wrappedResource):", "body": "self._wrappedResource = wrappedResource<EOL>Resource.__init__(self)<EOL>", "docstring": ":type  wrappedResource: `ISpinneretResource`\n:param wrappedResource: Spinneret resource to wrap in an `IResource`.", "id": "f9828:c3:m0"}
{"signature": "def __init__(self, handlers, fallback=False):", "body": "Resource.__init__(self)<EOL>self._handlers = list(handlers)<EOL>self._fallback = fallback<EOL>self._acceptHandlers = {}<EOL>for handler in self._handlers:<EOL><INDENT>for acceptType in handler.acceptTypes:<EOL><INDENT>if acceptType in self._acceptHandlers:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % (acceptType,))<EOL><DEDENT>self._acceptHandlers[acceptType] = handler<EOL><DEDENT><DEDENT>", "docstring": ":type  handlers: ``iterable`` of `INegotiableResource` and either\n    `IResource` or `ISpinneretResource`.\n:param handlers: Iterable of negotiable resources, either\n    `ISpinneretResource` or `IResource`, to use as handlers for\n    negotiation.\n\n:type  fallback: `bool`\n:param fallback: Fall back to the first handler in the case where\n    negotiation fails?", "id": "f9828:c4:m0"}
{"signature": "def locateChild(request, segments):", "body": "", "docstring": "Locate another object which can be adapted to `IResource`.\n\n:type  request: `IRequest <twisted:twisted.web.iweb.IRequest>`\n:param request: Request.\n\n:type  segments: ``sequence`` of `bytes`\n:param segments: Sequence of strings giving the remaining query\n    segments to resolve.\n\n:rtype: 2-`tuple` of `IResource`, `IRenderable` or `URLPath` and\n    a ``sequence`` of `bytes`\n:return: Pair of an `IResource`, `IRenderable` or `URLPath` and\n    a sequence of the remaining path segments to be process, or\n    a `Deferred` containing the aforementioned result.", "id": "f9829:c0:m0"}
{"signature": "def Float(value, encoding=None):", "body": "try:<EOL><INDENT>return float(Text(value, encoding))<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Parse a value as a floating point number.\n\n:type  value: `unicode` or `bytes`\n:param value: Text value to parse.\n\n:type  encoding: `bytes`\n:param encoding: Encoding to treat `bytes` values as, defaults to\n    ``utf-8``.\n\n:rtype: `float`\n:return: Parsed float or ``None`` if ``value`` could not be parsed as a\n    float.", "id": "f9836:m5"}
{"signature": "def Delimited(value, parser=Text, delimiter=u'<STR_LIT:U+002C>', encoding=None):", "body": "value = Text(value, encoding)<EOL>if value is None or value == u'<STR_LIT>':<EOL><INDENT>return []<EOL><DEDENT>return map(parser, value.split(delimiter))<EOL>", "docstring": "Parse a value as a delimited list.\n\n:type  value: `unicode` or `bytes`\n:param value: Text value to parse.\n\n:type  parser: `callable` taking a `unicode` parameter\n:param parser: Callable to map over the delimited text values.\n\n:type  delimiter: `unicode`\n:param delimiter: Delimiter text.\n\n:type  encoding: `bytes`\n:param encoding: Encoding to treat `bytes` values as, defaults to\n    ``utf-8``.\n\n:rtype: `list`\n:return: List of parsed values.", "id": "f9836:m7"}
{"signature": "def Timestamp(value, _divisor=<NUM_LIT:1.>, tz=UTC, encoding=None):", "body": "value = Float(value, encoding)<EOL>if value is not None:<EOL><INDENT>value = value / _divisor<EOL>return datetime.fromtimestamp(value, tz)<EOL><DEDENT>return None<EOL>", "docstring": "Parse a value as a POSIX timestamp in seconds.\n\n:type  value: `unicode` or `bytes`\n:param value: Text value to parse, which should be the number of seconds\n    since the epoch.\n\n:type  _divisor: `float`\n:param _divisor: Number to divide the value by.\n\n:type  tz: `tzinfo`\n:param tz: Timezone, defaults to UTC.\n\n:type  encoding: `bytes`\n:param encoding: Encoding to treat `bytes` values as, defaults to\n    ``utf-8``.\n\n:rtype: `datetime.datetime`\n:return: Parsed datetime or ``None`` if ``value`` could not be parsed.", "id": "f9836:m8"}
{"signature": "def Text(value, encoding=None):", "body": "if encoding is None:<EOL><INDENT>encoding = '<STR_LIT:utf-8>'<EOL><DEDENT>if isinstance(value, bytes):<EOL><INDENT>return value.decode(encoding)<EOL><DEDENT>elif isinstance(value, unicode):<EOL><INDENT>return value<EOL><DEDENT>return None<EOL>", "docstring": "Parse a value as text.\n\n:type  value: `unicode` or `bytes`\n:param value: Text value to parse\n\n:type  encoding: `bytes`\n:param encoding: Encoding to treat ``bytes`` values as, defaults to\n    ``utf-8``.\n\n:rtype: `unicode`\n:return: Parsed text or ``None`` if ``value`` is neither `bytes` nor\n    `unicode`.", "id": "f9836:m3"}
{"signature": "def Integer(value, base=<NUM_LIT:10>, encoding=None):", "body": "try:<EOL><INDENT>return int(Text(value, encoding), base)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Parse a value as an integer.\n\n:type  value: `unicode` or `bytes`\n:param value: Text value to parse\n\n:type  base: `unicode` or `bytes`\n:param base: Base to assume ``value`` is specified in.\n\n:type  encoding: `bytes`\n:param encoding: Encoding to treat ``bytes`` values as, defaults to\n    ``utf-8``.\n\n:rtype: `int`\n:return: Parsed integer or ``None`` if ``value`` could not be parsed as an\n    integer.", "id": "f9836:m4"}
{"signature": "def parse(expected, query):", "body": "return dict(<EOL>(key, parser(query.get(key, [])))<EOL>for key, parser in expected.items())<EOL>", "docstring": "Parse query parameters.\n\n:type  expected: `dict` mapping `bytes` to `callable`\n:param expected: Mapping of query argument names to argument parsing\n    callables.\n\n:type  query: `dict` mapping `bytes` to `list` of `bytes`\n:param query: Mapping of query argument names to lists of argument values,\n    this is the form that Twisted Web's `IRequest.args\n    <twisted:twisted.web.iweb.IRequest.args>` value takes.\n\n:rtype: `dict` mapping `bytes` to `object`\n:return: Mapping of query argument names to parsed argument values.", "id": "f9836:m10"}
{"signature": "def many(func):", "body": "def _many(result):<EOL><INDENT>if _isSequenceTypeNotText(result):<EOL><INDENT>return map(func, result)<EOL><DEDENT>return []<EOL><DEDENT>return maybe(_many, default=[])<EOL>", "docstring": "Create a callable that applies ``func`` to every value in a sequence.\n\nIf the value is not a sequence then an empty list is returned.\n\n:type  func: `callable`\n:param func: Callable to be applied to the first result.", "id": "f9836:m2"}
{"signature": "def subroute(self, *components):", "body": "def _factory(f):<EOL><INDENT>self._addRoute(f, subroute(*components))<EOL>return f<EOL><DEDENT>return _factory<EOL>", "docstring": "See `txspinneret.route.subroute`.\n\nThis decorator can be stacked with itself to specify multiple routes\nwith a single handler.", "id": "f9838:c1:m6"}
{"signature": "def __init__(self, obj, routes):", "body": "self._obj = obj<EOL>self._routes = routes<EOL>", "docstring": ":param obj: Parent object containing the route handler.\n\n:type  routes: `list` of 3-`tuple` containing `bytes`, `callable`,\n    `callable`\n:param routes: List of 3-tuple containing the route handler name, the\n    route handler function and the matcher function.", "id": "f9838:c0:m0"}
{"signature": "def Integer(name, base=<NUM_LIT:10>, encoding=None):", "body": "def _match(request, value):<EOL><INDENT>return name, query.Integer(<EOL>value,<EOL>base=base,<EOL>encoding=contentEncoding(request.requestHeaders, encoding))<EOL><DEDENT>return _match<EOL>", "docstring": "Match an integer route parameter.\n\n:type  name: `bytes`\n:param name: Route parameter name.\n\n:type  base: `int`\n:param base: Base to interpret the value in.\n\n:type  encoding: `bytes`\n:param encoding: Default encoding to assume if the ``Content-Type``\n    header is lacking one.\n\n:return: ``callable`` suitable for use with `route` or `subroute`.", "id": "f9838:m1"}
{"signature": "def routedResource(f, routerAttribute='<STR_LIT>'):", "body": "return wraps(f)(<EOL>lambda *a, **kw: getattr(f(*a, **kw), routerAttribute).resource())<EOL>", "docstring": "Decorate a router-producing callable to instead produce a resource.\n\nThis simply produces a new callable that invokes the original callable, and\ncalls ``resource`` on the ``routerAttribute``.\n\nIf the router producer has multiple routers the attribute can be altered to\nchoose the appropriate one, for example:\n\n.. code-block:: python\n\n    class _ComplexRouter(object):\n        router = Router()\n        privateRouter = Router()\n\n        @router.route('/')\n        def publicRoot(self, request, params):\n            return SomethingPublic(...)\n\n        @privateRouter.route('/')\n        def privateRoot(self, request, params):\n            return SomethingPrivate(...)\n\n    PublicResource = routedResource(_ComplexRouter)\n    PrivateResource = routedResource(_ComplexRouter, 'privateRouter')\n\n:type  f: ``callable``\n:param f: Callable producing an object with a `Router` attribute, for\n    example, a type.\n\n:type  routerAttribute: `str`\n:param routerAttribute: Name of the `Router` attribute on the result of\n    calling ``f``.\n\n:rtype: `callable`\n:return: Callable producing an `IResource`.", "id": "f9838:m5"}
{"signature": "def get_version():", "body": "version_module_path = os.path.join(<EOL>os.path.dirname(__file__), \"<STR_LIT>\", \"<STR_LIT>\")<EOL>with open(version_module_path) as version_module:<EOL><INDENT>exec(version_module.read())<EOL><DEDENT>return locals()[\"<STR_LIT>\"]<EOL>", "docstring": "Get the version from version module without importing more than\nnecessary.", "id": "f9839:m0"}
{"signature": "def read(path):", "body": "with open(path) as f:<EOL><INDENT>return f.read()<EOL><DEDENT>", "docstring": "Read the contents of a file.", "id": "f9839:m1"}
{"signature": "def delete_async(self, url, name, callback=None, params=None, headers=None):", "body": "if not name: name = '<STR_LIT>'<EOL>params = params or {}<EOL>headers = headers or {}<EOL>endpoint = self._build_endpoint_url(url, name)<EOL>self._authenticate(params, headers)<EOL>process_pool.apply_async(make_delete_request,<EOL>args=(endpoint, params, headers), callback=callback)<EOL>", "docstring": "Asynchronous DELETE request with the process pool.", "id": "f9848:c2:m12"}
{"signature": "def post_async(self, url, data, callback=None, params=None, headers=None):", "body": "params = params or {}<EOL>headers = headers or {}<EOL>endpoint = self._build_endpoint_url(url, None)<EOL>self._authenticate(params, headers)<EOL>data = json.dumps(data, cls=JSONEncoder)<EOL>process_pool.apply_async(make_post_request,<EOL>args=(endpoint, data, params, headers),<EOL>callback=callback)<EOL>", "docstring": "Asynchronous POST request with the process pool.", "id": "f9848:c2:m8"}
{"signature": "@http_connection(<NUM_LIT>)<EOL><INDENT>def patch(self, url, data, params=None, headers=None, connection=None):<DEDENT>", "body": "params = params or {}<EOL>headers = headers or {}<EOL>endpoint = self._build_endpoint_url(url, None)<EOL>self._authenticate(params, headers)<EOL>data = json.dumps(data, cls=JSONEncoder)<EOL>return make_patch_request(endpoint, data, params, headers,<EOL>connection=connection)<EOL>", "docstring": "Synchronous POST request. ``data`` must be a JSONable value.", "id": "f9848:c2:m9"}
{"signature": "def _build_endpoint_url(self, url, name=None):", "body": "if not url.endswith(self.URL_SEPERATOR):<EOL><INDENT>url = url + self.URL_SEPERATOR<EOL><DEDENT>if name is None:<EOL><INDENT>name = '<STR_LIT>'<EOL><DEDENT>return '<STR_LIT>' % (urlparse.urljoin(self.dsn, url), name,<EOL>self.NAME_EXTENSION)<EOL>", "docstring": "Method that constructs a full url with the given url and the\nsnapshot name.\n\nExample:\nfull_url = _build_endpoint_url('/users', '1')\nfull_url => 'http://firebase.localhost/users/1.json'", "id": "f9848:c2:m1"}
{"signature": "@http_connection(<NUM_LIT>)<EOL>def make_get_request(url, params, headers, connection):", "body": "timeout = getattr(connection, '<STR_LIT>')<EOL>response = connection.get(url, params=params, headers=headers, timeout=timeout)<EOL>if response.ok or response.status_code == <NUM_LIT>:<EOL><INDENT>return response.json() if response.content else None<EOL><DEDENT>else:<EOL><INDENT>response.raise_for_status()<EOL><DEDENT>", "docstring": "Helper function that makes an HTTP GET request to the given firebase\nendpoint. Timeout is 60 seconds.\n`url`: The full URL of the firebase endpoint (DSN appended.)\n`params`: Python dict that is appended to the URL like a querystring.\n`headers`: Python dict. HTTP request headers.\n`connection`: Predefined HTTP connection instance. If not given, it\nis supplied by the `decorators.http_connection` function.\n\nThe returning value is a Python dict deserialized by the JSON decoder. However,\nif the status code is not 2x or 403, an requests.HTTPError is raised.\n\nconnection = connection_pool.get_available_connection()\nresponse = make_get_request('http://firebase.localhost/users', {'print': silent'},\n                            {'X_FIREBASE_SOMETHING': 'Hi'}, connection)\nresponse => {'1': 'John Doe', '2': 'Jane Doe'}", "id": "f9848:m0"}
{"signature": "@http_connection(<NUM_LIT>)<EOL><INDENT>def delete(self, url, name, params=None, headers=None, connection=None):<DEDENT>", "body": "if not name: name = '<STR_LIT>'<EOL>params = params or {}<EOL>headers = headers or {}<EOL>endpoint = self._build_endpoint_url(url, name)<EOL>self._authenticate(params, headers)<EOL>return make_delete_request(endpoint, params, headers, connection=connection)<EOL>", "docstring": "Synchronous DELETE request. ``data`` must be a JSONable value.", "id": "f9848:c2:m11"}
{"signature": "@http_connection(<NUM_LIT>)<EOL><INDENT>def get(self, url, name, params=None, headers=None, connection=None):<DEDENT>", "body": "if name is None: name = '<STR_LIT>'<EOL>params = params or {}<EOL>headers = headers or {}<EOL>endpoint = self._build_endpoint_url(url, name)<EOL>self._authenticate(params, headers)<EOL>return make_get_request(endpoint, params, headers, connection=connection)<EOL>", "docstring": "Synchronous GET request.", "id": "f9848:c2:m3"}
{"signature": "@atexit.register<EOL>def close_process_pool():", "body": "process_pool.close()<EOL>process_pool.join()<EOL>process_pool.terminate()<EOL>", "docstring": "Clean up function that closes and terminates the process pool\ndefined in the ``async`` file.", "id": "f9850:m0"}
{"signature": "def __new__(cls, obj, *args, **kwargs):", "body": "try:<EOL><INDENT>cache = cls.__dict__[\"<STR_LIT>\"]<EOL><DEDENT>except KeyError:<EOL><INDENT>cls._class_proxy_cache = cache = {}<EOL><DEDENT>try:<EOL><INDENT>theclass = cache[obj.__class__]<EOL><DEDENT>except KeyError:<EOL><INDENT>cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)<EOL><DEDENT>ins = object.__new__(theclass)<EOL>theclass.__init__(ins, obj, *args, **kwargs)<EOL>return ins<EOL>", "docstring": "creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are\npassed to this class' __init__, so deriving classes can define an\n__init__ method of their own.\nnote: _class_proxy_cache is unique per deriving class (each deriving\nclass must hold its own cache)", "id": "f9853:c0:m10"}
{"signature": "def _load_config():", "body": "config_path = _get_config_path()<EOL>with open(config_path, '<STR_LIT:r>') as f:<EOL><INDENT>return yaml.safe_load(f)<EOL><DEDENT>", "docstring": "Config loading\n    Raises:\n        IOError         on missing config file\n        SyntaxError     on invalid json syntax\n    :return: {dict} loaded but unvalidated config", "id": "f9873:m3"}
{"signature": "def _create_default_config():", "body": "config_path = _get_config_path()<EOL>with open(config_path, '<STR_LIT>') as f:<EOL><INDENT>yaml.dump(_default_config, f, default_flow_style=False)<EOL><DEDENT>", "docstring": "Writes the full default configuration to the appropriate place.\n    Raises:\n        IOError  - on unsuccesful file write\n    :return: None", "id": "f9873:m5"}
{"signature": "def get():", "body": "config = {}<EOL>try:<EOL><INDENT>config = _load_config()<EOL><DEDENT>except IOError:<EOL><INDENT>try:<EOL><INDENT>_create_default_config()<EOL>config = _load_config()<EOL><DEDENT>except IOError as e:<EOL><INDENT>raise ConfigError(_FILE_CREATION_ERROR.format(e.args[<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>except SyntaxError as e:<EOL><INDENT>raise ConfigError(_JSON_SYNTAX_ERROR.format(e.args[<NUM_LIT:0>]))<EOL><DEDENT>except Exception:<EOL><INDENT>raise ConfigError(_JSON_SYNTAX_ERROR.format('<STR_LIT>'))<EOL><DEDENT>try:<EOL><INDENT>_validate(config)<EOL><DEDENT>except KeyError as e:<EOL><INDENT>raise ConfigError(_MANDATORY_KEY_ERROR.format(e.args[<NUM_LIT:0>]))<EOL><DEDENT>except SyntaxError as e:<EOL><INDENT>raise ConfigError(_INVALID_KEY_ERROR.format(e.args[<NUM_LIT:0>]))<EOL><DEDENT>except ValueError as e:<EOL><INDENT>raise ConfigError(_INVALID_VALUE_ERROR.format(e.args[<NUM_LIT:0>]))<EOL><DEDENT>config['<STR_LIT>'] = os.path.expanduser(config['<STR_LIT>'])<EOL>_complete_config(config)<EOL>return config<EOL>", "docstring": "Only API function for the config module.\n\n    :return: {dict}     loaded validated configuration.", "id": "f9873:m0"}
{"signature": "def _data_integrity_check(data):", "body": "deps = []<EOL>for command in data['<STR_LIT>']:<EOL><INDENT>if '<STR_LIT>' in data['<STR_LIT>'][command]:<EOL><INDENT>for d in data['<STR_LIT>'][command]['<STR_LIT>']:<EOL><INDENT>deps.append({<EOL>'<STR_LIT:d>': d,<EOL>'<STR_LIT:c>': command<EOL>})<EOL><DEDENT><DEDENT><DEDENT>for d in deps:<EOL><INDENT>if d['<STR_LIT:d>'] not in data['<STR_LIT>']:<EOL><INDENT>raise error.ProjectfileError({<EOL>'<STR_LIT:error>': error.PROJECTFILE_INVALID_DEPENDENCY.format(d['<STR_LIT:d>'], d['<STR_LIT:c>'])<EOL>})<EOL><DEDENT><DEDENT>", "docstring": "Checks if all command dependencies refers to and existing command. If not, a ProjectfileError\n    will be raised with the problematic dependency and it's command.\n\n    :param data: parsed raw data set.\n    :return: None", "id": "f9880:m3"}
{"signature": "def check_path_action(self):", "body": "class CheckPathAction(argparse.Action):<EOL><INDENT>def __call__(self, parser, args, value, option_string=None):<EOL><INDENT>if type(value) is list:<EOL><INDENT>value = value[<NUM_LIT:0>]<EOL><DEDENT>user_value = value<EOL>if option_string == '<STR_LIT:None>':<EOL><INDENT>if not os.path.isdir(value):<EOL><INDENT>_current_user = os.path.expanduser(\"<STR_LIT>\")<EOL>if not value.startswith(_current_user)and not value.startswith(os.getcwd()):<EOL><INDENT>if os.path.isdir(os.path.join(_current_user, value)):<EOL><INDENT>value = os.path.join(_current_user, value)<EOL><DEDENT>elif os.path.isdir(os.path.join(os.getcwd(), value)):<EOL><INDENT>value = os.path.join(os.getcwd(), value)<EOL><DEDENT>else:<EOL><INDENT>value = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>value = None<EOL><DEDENT><DEDENT><DEDENT>elif option_string == '<STR_LIT>':<EOL><INDENT>if not os.path.isdir(value):<EOL><INDENT>if not os.path.isdir(os.path.join(args.target, value)):<EOL><INDENT>value = None<EOL><DEDENT><DEDENT><DEDENT>if not value:<EOL><INDENT>logger.error(\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>user_value, option_string)<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>setattr(args, self.dest, value)<EOL><DEDENT><DEDENT>return CheckPathAction<EOL>", "docstring": "custom command line action to check file exist", "id": "f9890:c0:m8"}
{"signature": "def create_subparsers(self, parser):", "body": "subparsers = parser.add_subparsers()<EOL>for name in self.config['<STR_LIT>']:<EOL><INDENT>subparser = subparsers.add_parser(name)<EOL>self.create_commands(self.config['<STR_LIT>'][name], subparser)<EOL><DEDENT>", "docstring": "get config for subparser and create commands", "id": "f9890:c0:m4"}
{"signature": "def get_or_guess_paths_to_mutate(paths_to_mutate):", "body": "if paths_to_mutate is None:<EOL><INDENT>this_dir = os.getcwd().split(os.sep)[-<NUM_LIT:1>]<EOL>if isdir('<STR_LIT>'):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif isdir('<STR_LIT:src>'):<EOL><INDENT>return '<STR_LIT:src>'<EOL><DEDENT>elif isdir(this_dir):<EOL><INDENT>return this_dir<EOL><DEDENT>elif isdir(this_dir.replace('<STR_LIT:->', '<STR_LIT:_>')):<EOL><INDENT>return this_dir.replace('<STR_LIT:->', '<STR_LIT:_>')<EOL><DEDENT>elif isdir(this_dir.replace('<STR_LIT:U+0020>', '<STR_LIT:_>')):<EOL><INDENT>return this_dir.replace('<STR_LIT:U+0020>', '<STR_LIT:_>')<EOL><DEDENT>elif isdir(this_dir.replace('<STR_LIT:->', '<STR_LIT>')):<EOL><INDENT>return this_dir.replace('<STR_LIT:->', '<STR_LIT>')<EOL><DEDENT>elif isdir(this_dir.replace('<STR_LIT:U+0020>', '<STR_LIT>')):<EOL><INDENT>return this_dir.replace('<STR_LIT:U+0020>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise FileNotFoundError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return paths_to_mutate<EOL><DEDENT>", "docstring": ":type paths_to_mutate: str or None\n:rtype: str", "id": "f9897:m2"}
{"signature": "def do_apply(mutation_pk, dict_synonyms, backup):", "body": "filename, mutation_id = filename_and_mutation_id_from_pk(int(mutation_pk))<EOL>update_line_numbers(filename)<EOL>context = Context(<EOL>mutation_id=mutation_id,<EOL>filename=filename,<EOL>dict_synonyms=dict_synonyms,<EOL>)<EOL>mutate_file(<EOL>backup=backup,<EOL>context=context,<EOL>)<EOL>if context.number_of_performed_mutations == <NUM_LIT:0>:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Apply a specified mutant to the source code\n\n    :param mutation_pk: mutmut cache primary key of the mutant to apply\n    :type mutation_pk: str\n\n    :param dict_synonyms: list of synonym keywords for a python dictionary\n    :type dict_synonyms: list[str]\n\n    :param backup: if :obj:`True` create a backup of the source file\n        before applying the mutation\n    :type backup: bool", "id": "f9897:m3"}
{"signature": "def read_coverage_data():", "body": "print('<STR_LIT>')<EOL>from coverage import Coverage<EOL>cov = Coverage('<STR_LIT>')<EOL>cov.load()<EOL>return cov.get_data()<EOL>", "docstring": ":rtype: CoverageData or None", "id": "f9897:m11"}
{"signature": "def compute_exit_code(config, exception=None):", "body": "code = <NUM_LIT:0><EOL>if exception is not None:<EOL><INDENT>code = code | <NUM_LIT:1><EOL><DEDENT>if config.surviving_mutants > <NUM_LIT:0>:<EOL><INDENT>code = code | <NUM_LIT:2><EOL><DEDENT>if config.surviving_mutants_timeout > <NUM_LIT:0>:<EOL><INDENT>code = code | <NUM_LIT:4><EOL><DEDENT>if config.suspicious_mutants > <NUM_LIT:0>:<EOL><INDENT>code = code | <NUM_LIT:8><EOL><DEDENT>return code<EOL>", "docstring": "Compute an exit code for mutmut mutation testing\n\n    The following exit codes are available for mutmut:\n     * 0 if all mutants were killed (OK_KILLED)\n     * 1 if a fatal error occurred\n     * 2 if one or more mutants survived (BAD_SURVIVED)\n     * 4 if one or more mutants timed out (BAD_TIMEOUT)\n     * 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)\n\n     Exit codes 1 to 8 will be bit-ORed so that it is possible to know what\n     different mutant statuses occurred during mutation testing.\n\n    :param exception:\n    :type exception: Exception\n    :param config:\n    :type config: Config\n\n    :return: integer noting the exit code of the mutation tests.\n    :rtype: int", "id": "f9897:m16"}
{"signature": "def argument_mutation(children, context, **_):", "body": "if len(context.stack) >= <NUM_LIT:3> and context.stack[-<NUM_LIT:3>].type in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>stack_pos_of_power_node = -<NUM_LIT:3><EOL><DEDENT>elif len(context.stack) >= <NUM_LIT:4> and context.stack[-<NUM_LIT:4>].type in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>stack_pos_of_power_node = -<NUM_LIT:4><EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT>power_node = context.stack[stack_pos_of_power_node]<EOL>if power_node.children[<NUM_LIT:0>].type == '<STR_LIT:name>' and power_node.children[<NUM_LIT:0>].value in context.dict_synonyms:<EOL><INDENT>c = children[<NUM_LIT:0>]<EOL>if c.type == '<STR_LIT:name>':<EOL><INDENT>children = children[:]<EOL>children[<NUM_LIT:0>] = Name(c.value + '<STR_LIT>', start_pos=c.start_pos, prefix=c.prefix)<EOL>return children<EOL><DEDENT><DEDENT>", "docstring": ":type context: Context", "id": "f9898:m5"}
{"signature": "def mutate(context):", "body": "try:<EOL><INDENT>result = parse(context.source, error_recovery=False)<EOL><DEDENT>except Exception:<EOL><INDENT>print('<STR_LIT>' % context.filename)<EOL>print('<STR_LIT>')<EOL>raise<EOL><DEDENT>mutate_list_of_nodes(result, context=context)<EOL>mutated_source = result.get_code().replace('<STR_LIT>', '<STR_LIT:U+0020>')<EOL>if context.remove_newline_at_end:<EOL><INDENT>assert mutated_source[-<NUM_LIT:1>] == '<STR_LIT:\\n>'<EOL>mutated_source = mutated_source[:-<NUM_LIT:1>]<EOL><DEDENT>if context.number_of_performed_mutations:<EOL><INDENT>assert context.source != mutated_source<EOL><DEDENT>context.mutated_source = mutated_source<EOL>return mutated_source, context.number_of_performed_mutations<EOL>", "docstring": ":type context: Context\n:return: tuple: mutated source code, number of mutations performed\n:rtype: tuple[str, int]", "id": "f9898:m12"}
{"signature": "def mutate_list_of_nodes(node, context):", "body": "return_annotation_started = False<EOL>for child_node in node.children:<EOL><INDENT>if child_node.type == '<STR_LIT>' and child_node.value == '<STR_LIT>':<EOL><INDENT>return_annotation_started = True<EOL><DEDENT>if return_annotation_started and child_node.type == '<STR_LIT>' and child_node.value == '<STR_LIT::>':<EOL><INDENT>return_annotation_started = False<EOL><DEDENT>if return_annotation_started:<EOL><INDENT>continue<EOL><DEDENT>mutate_node(child_node, context=context)<EOL>if context.number_of_performed_mutations and context.mutation_id != ALL:<EOL><INDENT>return<EOL><DEDENT><DEDENT>", "docstring": ":type context: Context", "id": "f9898:m14"}
{"signature": "def list_mutations(context):", "body": "assert context.mutation_id == ALL<EOL>mutate(context)<EOL>return context.performed_mutation_ids<EOL>", "docstring": ":type context: Context", "id": "f9898:m16"}
{"signature": "def mutate_file(backup, context):", "body": "with open(context.filename) as f:<EOL><INDENT>code = f.read()<EOL><DEDENT>context.source = code<EOL>if backup:<EOL><INDENT>with open(context.filename + '<STR_LIT>', '<STR_LIT:w>') as f:<EOL><INDENT>f.write(code)<EOL><DEDENT><DEDENT>result, number_of_mutations_performed = mutate(context)<EOL>with open(context.filename, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(result)<EOL><DEDENT>return number_of_mutations_performed<EOL>", "docstring": ":type backup: bool\n:type context: Context", "id": "f9898:m17"}
{"signature": "def count_mutations(context):", "body": "assert context.mutation_id == ALL<EOL>mutate(context)<EOL>return context.number_of_performed_mutations<EOL>", "docstring": ":type context: Context", "id": "f9898:m15"}
{"signature": "def read(self, uri):", "body": "return self.__resolver__.getTextualNode(uri).export(Mimetypes.XML.TEI), \"<STR_LIT>\"<EOL>", "docstring": "Retrieve the contents of the resource\n\n        :param uri: the URI of the resource to be retrieved\n        :type uri: str\n        :return: the contents of the resource\n        :rtype: str", "id": "f9924:c5:m2"}
{"signature": "@staticmethod<EOL><INDENT>def match(uri):<DEDENT>", "body": "<EOL>return CTSRetriever.__reg_exp__.match(uri) is not None<EOL>", "docstring": "Check to see if this URI is retrievable by this Retriever implementation\n\n        :param uri: the URI of the resource to be retrieved\n        :type uri: str\n        :return: True if it can be, False if not\n        :rtype: bool", "id": "f9924:c5:m1"}
{"signature": "def match(self, uri):", "body": "<EOL>return HTTPRetriever.__reg_exp__.match(uri) is not None<EOL>", "docstring": "Check to see if this URI is retrievable by this Retriever implementation\n\n        :param uri: the URI of the resource to be retrieved\n        :type uri: str\n        :return: True if it can be, False if not\n        :rtype: bool", "id": "f9924:c3:m0"}
{"signature": "def read(self, uri):", "body": "return None, \"<STR_LIT>\"<EOL>", "docstring": "Retrieve the contents of the resource\n        :param uri: the URI of the resource to be retrieved\n        :type uri: str\n        :return: the contents of the resource and it's mime type in a tuple\n        :rtype: str, str", "id": "f9924:c2:m1"}
{"signature": "def process(self, nemo):", "body": "self.__nemo__ = nemo<EOL>for annotation in self.__annotations__:<EOL><INDENT>annotation.target.expanded = frozenset(<EOL>self.__getinnerreffs__(<EOL>objectId=annotation.target.objectId,<EOL>subreference=annotation.target.subreference<EOL>)<EOL>)<EOL><DEDENT>", "docstring": "Register nemo and parses annotations\n\n        .. note:: Process parses the annotation and extends informations about the target URNs by retrieving resource in range\n\n        :param nemo: Nemo", "id": "f9925:c0:m2"}
{"signature": "def to_json(self):", "body": "if self.subreference is not None:<EOL><INDENT>return {<EOL>\"<STR_LIT:source>\": self.objectId,<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:value>\": self.subreference<EOL>}<EOL>}<EOL><DEDENT>else:<EOL><INDENT>return {\"<STR_LIT:source>\": self.objectId}<EOL><DEDENT>", "docstring": "Method to call to get a serializable object for json.dump or jsonify based on the target\n\n        :return: dict", "id": "f9926:c0:m3"}
{"signature": "def expand(self):", "body": "<EOL>return []<EOL>", "docstring": "Expand the contents of the Annotation if it is expandable  (i.e. if it references  multiple resources)\n\n        :return: the list of expanded resources\n        :rtype: list(AnnotationResource)", "id": "f9926:c1:m9"}
{"signature": "def getResource(self, sha):", "body": "return None<EOL>", "docstring": "Retrieve a single annotation resource by sha\n\n        :param sha: The sha of the resource\n        :type sha: str\n        :return: the requested annotation resource\n        :rtype: AnnotationResource", "id": "f9927:c0:m2"}
{"signature": "def f_annotation_filter(annotations, type_uri, number):", "body": "filtered = [<EOL>annotation<EOL>for annotation in annotations<EOL>if annotation.type_uri == type_uri<EOL>]<EOL>number = min([len(filtered), number])<EOL>if number == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return filtered[number-<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "Annotation filtering filter\n\n    :param annotations: List of annotations\n    :type annotations: [AnnotationResource]\n    :param type_uri: URI Type on which to filter\n    :type type_uri: str\n    :param number: Number of the annotation to return\n    :type number: int\n    :return: Annotation(s) matching the request\n    :rtype: [AnnotationResource] or AnnotationResource", "id": "f9929:m7"}
{"signature": "def f_i18n_citation_type(string, lang=\"<STR_LIT>\"):", "body": "s = \"<STR_LIT:U+0020>\".join(string.strip(\"<STR_LIT:%>\").split(\"<STR_LIT:|>\"))<EOL>return s.capitalize()<EOL>", "docstring": "Take a string of form %citation_type|passage% and format it for human\n\n    :param string: String of formation %citation_type|passage%\n    :param lang: Language to translate to\n    :return: Human Readable string\n\n    .. note :: To Do : Use i18n tools and provide real i18n", "id": "f9929:m6"}
{"signature": "def f_order_resource_by_lang(versions_list):", "body": "return sorted(versions_list, key=itemgetter(\"<STR_LIT>\"))<EOL>", "docstring": "Takes a list of versions and put translations after editions\n\n    :param versions_list: List of text versions\n    :type versions_list: [Text]\n    :return: List where first members will be editions\n    :rtype: [Text]", "id": "f9929:m3"}
{"signature": "def f_is_str(value):", "body": "return isinstance(value, str)<EOL>", "docstring": "Check if object is a string\n\n    :param value: object to check against\n    :return: Return if value is a string", "id": "f9929:m5"}
{"signature": "def f_i18n_iso(isocode, lang=\"<STR_LIT>\"):", "body": "if lang not in flask_nemo._data.AVAILABLE_TRANSLATIONS:<EOL><INDENT>lang = \"<STR_LIT>\"<EOL><DEDENT>try:<EOL><INDENT>return flask_nemo._data.ISOCODES[isocode][lang]<EOL><DEDENT>except KeyError:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Replace isocode by its language equivalent\n\n    :param isocode: Three character long language code\n    :param lang: Lang in which to return the language name\n    :return: Full Text Language Name", "id": "f9929:m2"}
{"signature": "def r_annotation(self, sha):", "body": "annotation = self.__queryinterface__.getResource(sha)<EOL>if not annotation:<EOL><INDENT>return \"<STR_LIT>\", <NUM_LIT><EOL><DEDENT>response = {<EOL>\"<STR_LIT>\": type(self).JSONLD_CONTEXT,<EOL>\"<STR_LIT:id>\": url_for(\"<STR_LIT>\", sha=annotation.sha),<EOL>\"<STR_LIT:body>\": url_for(\"<STR_LIT>\", sha=annotation.sha),<EOL>\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:target>\": annotation.target.to_json(),<EOL>\"<STR_LIT>\": [annotation.uri],<EOL>\"<STR_LIT>\": annotation.type_uri,<EOL>\"<STR_LIT>\": annotation.slug<EOL>}<EOL>return jsonify(response)<EOL>", "docstring": "Route to retrieve contents of an annotation resource\n\n        :param uri: The uri of the annotation resource\n        :type uri: str\n        :return: annotation contents\n        :rtype: {str: Any}", "id": "f9931:c0:m2"}
{"signature": "def r_annotation_body(self, sha):", "body": "annotation = self.__queryinterface__.getResource(sha)<EOL>if not annotation:<EOL><INDENT>return \"<STR_LIT>\", <NUM_LIT><EOL><DEDENT>content = annotation.read()<EOL>if isinstance(content, Response):<EOL><INDENT>return content<EOL><DEDENT>headers = {\"<STR_LIT:Content-Type>\": annotation.mimetype}<EOL>return Response(content, headers=headers)<EOL>", "docstring": "Route to retrieve contents of an annotation resource\n\n        :param uri: The uri of the annotation resource\n        :type uri: str\n        :return: annotation contents\n        :rtype: {str: Any}", "id": "f9931:c0:m3"}
{"signature": "def join_or_single(start, end):", "body": "if start == end:<EOL><INDENT>return start<EOL><DEDENT>else:<EOL><INDENT>return \"<STR_LIT>\".format(<EOL>start,<EOL>end<EOL>)<EOL><DEDENT>", "docstring": "Join passages range. If they are the same, return a single part of the range\n\n    :param start: Start of the passage range\n    :param end: End of the passage range\n    :return: Finale Passage Chunk Identifier", "id": "f9932:m1"}
{"signature": "def scheme_chunker(text, getreffs):", "body": "level = len(text.citation)<EOL>types = [citation.name for citation in text.citation]<EOL>if types == [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>level = <NUM_LIT:2><EOL><DEDENT>elif types == [\"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>return line_chunker(text, getreffs)<EOL><DEDENT>return [tuple([reff.split(\"<STR_LIT::>\")[-<NUM_LIT:1>]]*<NUM_LIT:2>) for reff in getreffs(level=level)]<EOL>", "docstring": "This is the scheme chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata\n\n    :param text: Text Object representing either an edition or a translation\n    :type text: MyCapytains.resources.inventory.Text\n    :param getreffs: callback function which retrieves a list of references\n    :type getreffs: function\n\n    :return: List of urn references with their human readable version\n    :rtype: [(str, str)]", "id": "f9933:m1"}
{"signature": "def line_chunker(text, getreffs, lines=<NUM_LIT:30>):", "body": "level = len(text.citation)<EOL>source_reffs = [reff.split(\"<STR_LIT::>\")[-<NUM_LIT:1>] for reff in getreffs(level=level)]<EOL>reffs = []<EOL>i = <NUM_LIT:0><EOL>while i + lines - <NUM_LIT:1> < len(source_reffs):<EOL><INDENT>reffs.append(tuple([source_reffs[i]+\"<STR_LIT:->\"+source_reffs[i+lines-<NUM_LIT:1>], source_reffs[i]]))<EOL>i += lines<EOL><DEDENT>if i < len(source_reffs):<EOL><INDENT>reffs.append(tuple([source_reffs[i]+\"<STR_LIT:->\"+source_reffs[len(source_reffs)-<NUM_LIT:1>], source_reffs[i]]))<EOL><DEDENT>return reffs<EOL>", "docstring": "Groups line reference together\n\n    :param text: Text object\n    :type text: MyCapytains.resources.text.api\n    :param getreffs: Callback function to retrieve text\n    :type getreffs: function(level)\n    :param lines: Number of lines to use by group\n    :type lines: int\n    :return: List of grouped urn references with their human readable version\n    :rtype: [(str, str)]", "id": "f9933:m2"}
{"signature": "def default_chunker(text, getreffs):", "body": "level = len(text.citation)<EOL>return [tuple([reff.split(\"<STR_LIT::>\")[-<NUM_LIT:1>]]*<NUM_LIT:2>) for reff in getreffs(level=level)]<EOL>", "docstring": "This is the default chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata\n\n    :param text: Text Object representing either an edition or a translation\n    :type text: MyCapytains.resources.inventory.Text\n    :param getreffs: callback function which retrieves a list of references\n    :type getreffs: function\n\n    :return: List of urn references with their human readable version\n    :rtype: [(str, str)]", "id": "f9933:m0"}
{"signature": "def level_chunker(text, getreffs, level=<NUM_LIT:1>):", "body": "references = getreffs(level=level)<EOL>return [(ref.split(\"<STR_LIT::>\")[-<NUM_LIT:1>], ref.split(\"<STR_LIT::>\")[-<NUM_LIT:1>]) for ref in references]<EOL>", "docstring": "Chunk a text at the passage level\n\n    :param text: Text object\n    :type text: MyCapytains.resources.text.api\n    :param getreffs: Callback function to retrieve text\n    :type getreffs: function(level)\n    :return: List of urn references with their human readable version\n    :rtype: [(str, str)]", "id": "f9933:m3"}
{"signature": "def r_assets(self, filetype, asset):", "body": "if filetype in self.assets and asset in self.assets[filetype] and self.assets[filetype][asset]:<EOL><INDENT>return send_from_directory(<EOL>directory=self.assets[filetype][asset],<EOL>filename=asset<EOL>)<EOL><DEDENT>abort(<NUM_LIT>)<EOL>", "docstring": "Route for specific assets.\n\n        :param filetype: Asset Type\n        :param asset: Filename of an asset\n        :return: Response", "id": "f9934:c0:m23"}
{"signature": "def create_blueprint(self):", "body": "self.register_plugins()<EOL>self.blueprint = Blueprint(<EOL>self.name,<EOL>\"<STR_LIT>\",<EOL>url_prefix=self.prefix,<EOL>template_folder=self.template_folder,<EOL>static_folder=self.static_folder,<EOL>static_url_path=self.static_url_path<EOL>)<EOL>for url, name, methods, instance in self._urls:<EOL><INDENT>self.blueprint.add_url_rule(<EOL>url,<EOL>view_func=self.view_maker(name, instance),<EOL>endpoint=_plugin_endpoint_rename(name, instance),<EOL>methods=methods<EOL>)<EOL><DEDENT>for url, name, methods, instance in self._semantic_url:<EOL><INDENT>self.blueprint.add_url_rule(<EOL>url,<EOL>view_func=self.view_maker(name, instance),<EOL>endpoint=_plugin_endpoint_rename(name, instance)+\"<STR_LIT>\",<EOL>methods=methods<EOL>)<EOL><DEDENT>self.register_assets()<EOL>self.register_filters()<EOL>self.__templates_namespaces__.extend(self.__instance_templates__)<EOL>for namespace, directory in self.__templates_namespaces__[::-<NUM_LIT:1>]:<EOL><INDENT>if namespace not in self.__template_loader__:<EOL><INDENT>self.__template_loader__[namespace] = []<EOL><DEDENT>self.__template_loader__[namespace].append(<EOL>jinja2.FileSystemLoader(op.abspath(directory))<EOL>)<EOL><DEDENT>self.blueprint.jinja_loader = jinja2.PrefixLoader(<EOL>{namespace: jinja2.ChoiceLoader(paths) for namespace, paths in self.__template_loader__.items()},<EOL>\"<STR_LIT>\"<EOL>)<EOL>if self.cache is not None:<EOL><INDENT>for func, instance in self.cached:<EOL><INDENT>setattr(instance, func.__name__, self.cache.memoize()(func))<EOL><DEDENT><DEDENT>return self.blueprint<EOL>", "docstring": "Create blueprint and register rules\n\n        :return: Blueprint of the current nemo app\n        :rtype: flask.Blueprint", "id": "f9934:c0:m25"}
{"signature": "def main_collections(self, lang=None):", "body": "return sorted([<EOL>{<EOL>\"<STR_LIT:id>\": member.id,<EOL>\"<STR_LIT:label>\": str(member.get_label(lang=lang)),<EOL>\"<STR_LIT>\": str(member.model),<EOL>\"<STR_LIT:type>\": str(member.type),<EOL>\"<STR_LIT:size>\": member.size<EOL>}<EOL>for member in self.resolver.getMetadata().members<EOL>], key=itemgetter(\"<STR_LIT:label>\"))<EOL>", "docstring": "Retrieve main parent collections of a repository\n\n        :param lang: Language to retrieve information in\n        :return: Sorted collections representations", "id": "f9934:c0:m27"}
{"signature": "def r_collection(self, objectId, lang=None):", "body": "collection = self.resolver.getMetadata(objectId)<EOL>return {<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:label>\": str(collection.get_label(lang)),<EOL>\"<STR_LIT:id>\": collection.id,<EOL>\"<STR_LIT>\": str(collection.model),<EOL>\"<STR_LIT:type>\": str(collection.type),<EOL>},<EOL>\"<STR_LIT>\": self.make_members(collection, lang=lang),<EOL>\"<STR_LIT>\": self.make_parents(collection, lang=lang)<EOL>},<EOL>}<EOL>", "docstring": "Collection content browsing route function\n\n        :param objectId: Collection identifier\n        :type objectId: str\n        :param lang: Lang in which to express main data\n        :type lang: str\n        :return: Template and collections contained in given collection\n        :rtype: {str: Any}", "id": "f9934:c0:m19"}
{"signature": "def r_collections(self, lang=None):", "body": "collection = self.resolver.getMetadata()<EOL>return {<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": collection.get_label(lang),<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": self.make_members(collection, lang=lang)<EOL>}<EOL>}<EOL>", "docstring": "Retrieve the top collections of the inventory\n\n        :param lang: Lang in which to express main data\n        :type lang: str\n        :return: Collections information and template\n        :rtype: {str: Any}", "id": "f9934:c0:m18"}
{"signature": "def r_first_passage(self, objectId):", "body": "collection, reffs = self.get_reffs(objectId=objectId, export_collection=True)<EOL>first, _ = reffs[<NUM_LIT:0>]<EOL>return redirect(<EOL>url_for(\"<STR_LIT>\", objectId=objectId, subreference=first, semantic=self.semantic(collection))<EOL>)<EOL>", "docstring": "Provides a redirect to the first passage of given objectId\n\n        :param objectId: Collection identifier\n        :type objectId: str\n        :return: Redirection to the first passage of given text", "id": "f9934:c0:m21"}
{"signature": "def get_collection(self, objectId):", "body": "return self.inventory[objectId]<EOL>", "docstring": "Retrieve a collection in the inventory\n\n        :param objectId: Collection Identifier\n        :type objectId: str\n        :return: Requested collection\n        :rtype: Collection", "id": "f9934:c0:m8"}
{"signature": "def make_parents(self, collection, lang=None):", "body": "return [<EOL>{<EOL>\"<STR_LIT:id>\": member.id,<EOL>\"<STR_LIT:label>\": str(member.get_label(lang)),<EOL>\"<STR_LIT>\": str(member.model),<EOL>\"<STR_LIT:type>\": str(member.type),<EOL>\"<STR_LIT:size>\": member.size<EOL>}<EOL>for member in collection.parents<EOL>if member.get_label()<EOL>]<EOL>", "docstring": "Build parents list for given collection\n\n        :param collection: Collection to build dict view of for its members\n        :param lang: Language to express data in\n        :return: List of basic objects", "id": "f9934:c0:m16"}
{"signature": "def init_app(self, app=None):", "body": "<EOL>if app:<EOL><INDENT>self.app = app<EOL><DEDENT>self.register()<EOL>", "docstring": "Initiate the application\n\n        :param app: Flask application on which to add the extension\n        :type app: flask.Flask", "id": "f9934:c0:m4"}
{"signature": "def make_coins(self, collection, text, subreference=\"<STR_LIT>\", lang=None):", "body": "if lang is None:<EOL><INDENT>lang = self.__default_lang__<EOL><DEDENT>return \"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\".format(<EOL>title=quote(str(text.get_title(lang))), author=quote(str(text.get_creator(lang))),<EOL>cid=url_for(\"<STR_LIT>\", objectId=collection.id, _external=True),<EOL>language=collection.lang, pages=quote(subreference), edition=quote(str(text.get_description(lang)))<EOL>)<EOL>", "docstring": "Creates a CoINS Title string from information\n\n        :param collection: Collection to create coins from\n        :param text: Text/Passage object\n        :param subreference: Subreference\n        :param lang: Locale information\n        :return: Coins HTML title value", "id": "f9934:c0:m13"}
{"signature": "def r_index(self):", "body": "return {\"<STR_LIT>\": \"<STR_LIT>\"}<EOL>", "docstring": "Homepage route function\n\n        :return: Template to use for Home page\n        :rtype: {str: str}", "id": "f9934:c0:m17"}
{"signature": "def _plugin_endpoint_rename(fn_name, instance):", "body": "if instance and instance.namespaced:<EOL><INDENT>fn_name = \"<STR_LIT>\".format(instance.name, fn_name[<NUM_LIT:2>:])<EOL><DEDENT>return fn_name<EOL>", "docstring": "Rename endpoint function name to avoid conflict when namespacing is set to true\n\n    :param fn_name: Name of the route function\n    :param instance: Instance bound to the function\n    :return: Name of the new namespaced function name", "id": "f9934:m0"}
{"signature": "def semantic(self, collection, parent=None):", "body": "if parent is not None:<EOL><INDENT>collections = parent.parents[::-<NUM_LIT:1>] + [parent, collection]<EOL><DEDENT>else:<EOL><INDENT>collections = collection.parents[::-<NUM_LIT:1>] + [collection]<EOL><DEDENT>return filters.slugify(\"<STR_LIT>\".join([item.get_label() for item in collections if item.get_label()]))<EOL>", "docstring": "Generates a SEO friendly string for given collection\n\n        :param collection: Collection object to generate string for\n        :param parent: Current collection parent\n        :return: SEO/URL Friendly string", "id": "f9934:c0:m12"}
{"signature": "def register(self):", "body": "if self.app is not None:<EOL><INDENT>if not self.blueprint:<EOL><INDENT>self.blueprint = self.create_blueprint()<EOL><DEDENT>self.app.register_blueprint(self.blueprint)<EOL>if self.cache is None:<EOL><INDENT>setattr(self.app.jinja_env, \"<STR_LIT>\", self)<EOL>self.app.jinja_env.add_extension(FakeCacheExtension)<EOL><DEDENT>return self.blueprint<EOL><DEDENT>return None<EOL>", "docstring": "Register the app using Blueprint\n\n        :return: Nemo blueprint\n        :rtype: flask.Blueprint", "id": "f9934:c0:m31"}
{"signature": "def render(self, **kwargs):", "body": "return kwargs<EOL>", "docstring": "View Rendering function that gets triggered before nemo renders the resources and adds informations to \\\n        pass to the templates\n\n        :param kwargs: Dictionary of arguments to pass to the template\n        :return: Dictionary of arguments to pass to the template", "id": "f9937:c0:m13"}
{"signature": "def subscribe(self, event, bet_ids):", "body": "if not self._subscriptions.get(event):<EOL><INDENT>self._subscriptions[event] = set()<EOL><DEDENT>self._subscriptions[event] = self._subscriptions[event].union(bet_ids)<EOL>", "docstring": "Subscribe to event for given bet ids.", "id": "f9942:c2:m24"}
{"signature": "def stakes_in(self, bet):", "body": "return self._stakes_by_side(bet, self.SIDE_IN)<EOL>", "docstring": "Return all stakes on 'in' side for given bet.", "id": "f9942:c2:m8"}
{"signature": "def get_project_slug(self, bet):", "body": "if bet.get('<STR_LIT>'):<EOL><INDENT>params = json.loads(bet['<STR_LIT>'])<EOL>return params.get('<STR_LIT>')<EOL><DEDENT>return None<EOL>", "docstring": "Return slug of a project that given bet is associated with\n        or None if bet is not associated with any project.", "id": "f9942:c2:m5"}
{"signature": "def get_bets(self, type=None, order_by=None, state=None, project_id=None,<EOL>page=None, page_size=None):", "body": "if page is None:<EOL><INDENT>page = <NUM_LIT:1><EOL><DEDENT>if page_size is None:<EOL><INDENT>page_size = <NUM_LIT:100><EOL><DEDENT>if state == '<STR_LIT:all>':<EOL><INDENT>_states = []  <EOL><DEDENT>elif state == '<STR_LIT>':<EOL><INDENT>_states = self.CLOSED_STATES<EOL><DEDENT>else:<EOL><INDENT>_states = self.ACTIVE_STATES<EOL><DEDENT>url = urljoin(<EOL>self.settings['<STR_LIT>'],<EOL>'<STR_LIT>'.format(page, page_size))<EOL>url += '<STR_LIT>'.format('<STR_LIT:U+002C>'.join(_states))<EOL>if type is not None:<EOL><INDENT>url += '<STR_LIT>'.format(type)<EOL><DEDENT>if order_by in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>url += '<STR_LIT>'.format(order_by)<EOL><DEDENT>if project_id is not None:<EOL><INDENT>url += '<STR_LIT>'.format(project_id)<EOL><DEDENT>res = self._req(url)<EOL>return res['<STR_LIT>']['<STR_LIT>']<EOL>", "docstring": "Return bets with given filters and ordering.\n\n        :param type: return bets only with this type.\n                     Use None to include all (default).\n        :param order_by: '-last_stake' or 'last_stake' to sort by stake's\n                         created date or None for default ordering.\n        :param state: one of 'active', 'closed', 'all' (default 'active').\n        :param project_id: return bets associated with given project id in kava\n        :param page: default 1.\n        :param page_site: page size (default 100).", "id": "f9942:c2:m4"}
{"signature": "def stakes_out(self, bet):", "body": "return self._stakes_by_side(bet, self.SIDE_OUT)<EOL>", "docstring": "Return all stakes on 'out' side for given bet.", "id": "f9942:c2:m9"}
{"signature": "def post_process(self, group, event, is_new, is_sample, **kwargs):", "body": "if not self.is_configured(group.project):<EOL><INDENT>return<EOL><DEDENT>host = self.get_option('<STR_LIT>', group.project)<EOL>port = int(self.get_option('<STR_LIT>', group.project))<EOL>prefix = self.get_option('<STR_LIT>', group.project)<EOL>hostname = self.get_option('<STR_LIT>', group.project) or socket.gethostname()<EOL>resolve_age = group.project.get_option('<STR_LIT>', None)<EOL>now = int(time.time())<EOL>template = '<STR_LIT>' % (prefix, group.project.slug)<EOL>level = group.get_level_display()<EOL>label = template % level<EOL>groups = group.project.group_set.filter(status=STATUS_UNRESOLVED)<EOL>if resolve_age:<EOL><INDENT>oldest = timezone.now() - timedelta(hours=int(resolve_age))<EOL>groups = groups.filter(last_seen__gt=oldest)<EOL><DEDENT>num_errors = groups.filter(level=group.level).count()<EOL>metric = Metric(hostname, label, num_errors, now)<EOL>log.info('<STR_LIT>', label, num_errors)<EOL>send_to_zabbix([metric], host, port)<EOL>", "docstring": "Process error.", "id": "f9947:c0:m1"}
{"signature": "def isA(instance, typeList):", "body": "return any(map(lambda iType: isinstance(instance,iType), typeList))<EOL>", "docstring": "Return true if ``instance`` is an instance of any the Directive\ntypes in ``typeList``", "id": "f9951:m0"}
{"signature": "def match(self, subsetLines, subsetOffset, fileName):", "body": "raise NotImplementedError()<EOL>", "docstring": "Search through lines for match.\nWhat is returned is defined by implementations", "id": "f9953:c1:m2"}
{"signature": "def match(self, subsetLines, offsetOfSubset, fileName):", "body": "for (offset,l) in enumerate(subsetLines):<EOL><INDENT>for t in self.regex:<EOL><INDENT>m = t.Regex.search(l)<EOL>if m != None:<EOL><INDENT>truePosition = offset + offsetOfSubset<EOL>_logger.debug('<STR_LIT>'.format(str(truePosition+ <NUM_LIT:1>)))<EOL>_logger.debug('<STR_LIT>'.format(l))<EOL>self.failed = True<EOL>self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +<NUM_LIT:1>)<EOL>raise DirectiveException(self)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Search through lines for match.\nRaise an Exception if a match", "id": "f9953:c8:m3"}
{"signature": "def match(self, subsetLines, offsetOfSubset, fileName):", "body": "for (offset,l) in enumerate(subsetLines):<EOL><INDENT>for literal in self.literals:<EOL><INDENT>column = l.find(literal.Literal)<EOL>if column != -<NUM_LIT:1>:<EOL><INDENT>truePosition = offset + offsetOfSubset<EOL>_logger.debug('<STR_LIT>'.format(line=str(truePosition+ <NUM_LIT:1>), col=column))<EOL>_logger.debug('<STR_LIT>'.format(l))<EOL>self.failed = True<EOL>self.matchLocation = CheckFileParser.FileLocation(fileName, truePosition +<NUM_LIT:1>)<EOL>raise DirectiveException(self)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Search through lines for match.\nRaise an Exception if a match", "id": "f9953:c9:m3"}
{"signature": "def scriptEntryPoint():", "body": "import sys<EOL>return main(sys.argv)<EOL>", "docstring": "This provides an entry point for disutils", "id": "f9955:m2"}
{"signature": "def monitor(self, sleep=<NUM_LIT:5>):", "body": "manager = FileModificationObjectManager()<EOL>timestamps = {}<EOL>filebodies = {}<EOL>for file in self.f_repository:<EOL><INDENT>timestamps[file] = self._get_mtime(file)<EOL>filebodies[file] = open(file).read()<EOL><DEDENT>while True:<EOL><INDENT>for file in self.f_repository:<EOL><INDENT>mtime = timestamps[file]<EOL>fbody = filebodies[file]<EOL>modified = self._check_modify(file, mtime, fbody)<EOL>if not modified:<EOL><INDENT>continue<EOL><DEDENT>new_mtime = self._get_mtime(file)<EOL>new_fbody = open(file).read()<EOL>obj = FileModificationObject(<EOL>file,<EOL>(mtime, new_mtime),<EOL>(fbody, new_fbody) )<EOL>timestamps[file] = new_mtime<EOL>filebodies[file] = new_fbody<EOL>manager.add_object(obj)<EOL>yield obj<EOL><DEDENT>time.sleep(sleep)<EOL><DEDENT>", "docstring": "Run file modification monitor.\n\n        The monitor can catch file modification using timestamp and file body. \n        Monitor has timestamp data and file body data. And insert timestamp \n        data and file body data before into while roop. In while roop, monitor \n        get new timestamp and file body, and then monitor compare new timestamp\n        to originaltimestamp. If new timestamp and file body differ original,\n        monitor regard thease changes as `modification`. Then monitor create\n        instance of FileModificationObjectManager and FileModificationObject,\n        and monitor insert FileModificationObject to FileModificationObject-\n        Manager. Then, yield this object.\n\n        :param sleep: How times do you sleep in while roop.", "id": "f9958:c3:m3"}
{"signature": "def add_files(self, filelist, **kwargs):", "body": "<EOL>if not isinstance(filelist, list):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>for file in filelist:<EOL><INDENT>self.add_file(file)<EOL><DEDENT>", "docstring": "Append files to file repository.\n\n        ModificationMonitor can append files to repository using this.\n        Please put the list of file names to `filelist` argument.\n\n        :param filelist: the list of file nmaes", "id": "f9958:c3:m2"}
{"signature": "@register.filter<EOL>def getitem(dictionary, keyvar):", "body": "try:<EOL><INDENT>return dictionary[keyvar]<EOL><DEDENT>except KeyError:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>", "docstring": "Custom django template filter that allows access to an item of a\n    dictionary through the key contained in a template variable.  Example:\n\n    .. code-block:: python\n\n        context_data = {\n            'data':{\n                'foo':'bar',\n            },\n            'key':'foo',\n        }\n\n        template = Template('{% load awltags %}{{data|getitem:key}}')\n        context = Context(context_data)\n        result = template.render(context)\n\n        >>> result\n        'bar'\n\n    .. note::\n        Any KeyErrors are ignored and return an empty string", "id": "f9980:m0"}
{"signature": "def django_logging_dict(log_dir, handlers=['<STR_LIT:file>'], filename='<STR_LIT>'):", "body": "d = default_logging_dict(log_dir, handlers, filename)<EOL>d['<STR_LIT>'].update({<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT:class>':'<STR_LIT>',<EOL>}<EOL>})<EOL>d['<STR_LIT>'].update({<EOL>'<STR_LIT>': { <EOL>'<STR_LIT>': ['<STR_LIT:file>', '<STR_LIT>'],<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': False,<EOL>},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': ['<STR_LIT:file>', '<STR_LIT>'],<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': False,<EOL>},<EOL>})<EOL>return d<EOL>", "docstring": "Extends :func:`logthing.utils.default_logging_dict` with django\n    specific values.", "id": "f9982:m0"}
{"signature": "def extra_context(request):", "body": "host = os.environ.get('<STR_LIT>', None)or request.get_host()<EOL>d = {<EOL>'<STR_LIT>':request,<EOL>'<STR_LIT>':host,<EOL>'<STR_LIT>':request.path.startswith('<STR_LIT>'),<EOL>}<EOL>return d<EOL>", "docstring": "Adds useful global items to the context for use in templates.\n\n    * *request*: the request object\n    * *HOST*: host name of server\n    * *IN_ADMIN*: True if you are in the django admin area", "id": "f9984:m0"}
{"signature": "def field_names(self, admin_model):", "body": "request = FakeRequest(user=self.admin_user)<EOL>return admin_model.get_list_display(request)<EOL>", "docstring": "Returns the names of the fields/columns used by the given admin\n        model.\n\n        :param admin_model:\n            Instance of a :class:`admin.ModelAdmin` object that is responsible\n            for displaying the change list\n        :returns:\n            List of field names", "id": "f9985:c1:m6"}
{"signature": "def initiate(self):", "body": "self.site = admin.sites.AdminSite()<EOL>self.admin_user = create_admin(self.USERNAME, self.EMAIL, self.PASSWORD)<EOL>self.authed = False<EOL>", "docstring": "Sets up the :class:`AdminSite` and creates a user with the\n        appropriate privileges.  This should be called from the inheritor's\n        :class:`TestCase.setUp` method.", "id": "f9985:c1:m0"}
{"signature": "def authed_get(self, url, response_code=<NUM_LIT:200>, headers={}, follow=False):", "body": "if not self.authed:<EOL><INDENT>self.authorize()<EOL><DEDENT>response = self.client.get(url, follow=follow, **headers)<EOL>self.assertEqual(response_code, response.status_code)<EOL>return response<EOL>", "docstring": "Does a django test client ``get`` against the given url after\n        logging in the admin first.\n\n        :param url:\n            URL to fetch\n        :param response_code:\n            Expected response code from the URL fetch.  This value is\n            asserted.  Defaults to 200\n        :param headers:\n            Optional dictionary of headers to send in the request\n        :param follow:\n            When True, the get call will follow any redirect requests.\n            Defaults to False.\n        :returns:\n            Django testing ``Response`` object", "id": "f9985:c1:m2"}
{"signature": "def __init__(self, user=None, method='<STR_LIT:GET>', cookies={}, data={}):", "body": "super(FakeRequest, self).__init__()<EOL>self.method = method<EOL>self.COOKIES = cookies<EOL>if user:<EOL><INDENT>self.user = user<EOL><DEDENT>if method == '<STR_LIT:GET>':<EOL><INDENT>self.GET = data<EOL><DEDENT>else:<EOL><INDENT>self.POST = data<EOL><DEDENT>self.path = '<STR_LIT>'<EOL>", "docstring": "Constructor\n\n        :param user:\n            Django User object to include in the request.  Defaults to None.\n            If none is given then the parameter is not set at all\n        :param method:\n            Request method.  Defaults to 'GET'\n        :param cookies:\n            Dict containing cookies for the request.  Defaults to empty\n        :param data:\n            Dict for get or post fields.  Defaults to empty", "id": "f9985:c0:m0"}
{"signature": "def authorize(self):", "body": "response = self.client.login(username=self.USERNAME, <EOL>password=self.PASSWORD)<EOL>self.assertTrue(response)<EOL>self.authed = True<EOL>", "docstring": "Authenticates the superuser account via the web login.", "id": "f9985:c1:m1"}
{"signature": "def render_page_to_string(request, page_name, data={}):", "body": "return render_to_string(page_name, data, request=request)<EOL>", "docstring": "A shortcut for using ``render_to_string`` with a\n    :class:`RequestContext` automatically.", "id": "f9990:m1"}
{"signature": "def refetch(obj):", "body": "return obj.__class__.objects.get(id=obj.id)<EOL>", "docstring": "Queries the database for the same object that is passed in, refetching\n    its contents in case they are stale.\n\n    :param obj:\n        Object to refetch\n\n    :returns:\n        Refreshed version of the object", "id": "f9990:m2"}
{"signature": "def render_page(request, page_name, data={}):", "body": "return render(request, page_name, data)<EOL>", "docstring": ".. deprecated:: 0.12\n    Use ``django.shortcuts.render`` instead\n\nThis function was a wrapper for ``render_to_response`` that handled\nrequest context.  The ``django.shortcuts.render`` method does the same\nthing, so this just wraps that now.", "id": "f9990:m0"}
{"signature": "@classmethod<EOL><INDENT>def lock_until_commit(cls, name):<DEDENT>", "body": "Lock.objects.select_for_update().get(name=name)<EOL>", "docstring": "Grabs this lock and holds it (using ``select_for_update()``) until\n        the next commit is done.\n\n        :param name:\n            Name for a previously created ``Lock`` object", "id": "f9991:c1:m0"}
{"signature": "def _all(self):", "body": "return chain(*self.querysets)<EOL>", "docstring": "Iterates records in all subquerysets", "id": "f9991:c4:m3"}
{"signature": "def json_post_required(*decorator_args):", "body": "def decorator(method):<EOL><INDENT>@wraps(method)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>field = decorator_args[<NUM_LIT:0>]<EOL>if len(decorator_args) == <NUM_LIT:2>:<EOL><INDENT>request_name = decorator_args[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>request_name = field<EOL><DEDENT>request = args[<NUM_LIT:0>]<EOL>if request.method != '<STR_LIT:POST>':<EOL><INDENT>logger.error('<STR_LIT>')<EOL>raise Http404('<STR_LIT>')<EOL><DEDENT>if field not in request.POST:<EOL><INDENT>s = '<STR_LIT>' % field<EOL>logger.error(s)<EOL>raise Http404(s)<EOL><DEDENT>setattr(request, request_name, json.loads(request.POST[field]))<EOL>return method(*args, **kwargs)<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "View decorator that enforces that the method was called using POST and\n    contains a field containing a JSON dictionary. This method should\n    only be used to wrap views and assumes the first argument of the method\n    being wrapped is a ``request`` object.\n\n    .. code-block:: python\n\n        @json_post_required('data', 'json_data')\n        def some_view(request):\n            username = request.json_data['username']\n\n    :param field:\n        The name of the POST field that contains a JSON dictionary\n    :param request_name:\n        [optional] Name of the parameter on the request to put the\n        deserialized JSON data. If not given the field name is used", "id": "f9992:m1"}
{"signature": "def grouped_filter(self):", "body": "return self.__class__.objects.all()<EOL>", "docstring": "This method should be overridden in order to allow groupings of\n        ``RankModel`` objects.  The default is there is a single group which\n        are all instances of the inheriting class.  \n\n        An example with a grouped model would be::\n\n            class Grouped(RankedModel):\n                group_number = models.IntegerField()\n\n                def grouped_filter(self):\n                    return Grouped.objects.filter(\n                        group_number=self.group_number)\n\n        :returns:\n            :class:`QuerySet` of ``RankedModel`` objects that are in the same\n            group.", "id": "f9995:c0:m4"}
{"signature": "def fancy_modeladmin(*args):", "body": "global klass_count<EOL>klass_count += <NUM_LIT:1><EOL>name = '<STR_LIT>' % klass_count<EOL>klass = type(name, (FancyModelAdmin,), {})<EOL>klass.list_display = []<EOL>if len(args) > <NUM_LIT:0>:<EOL><INDENT>klass.add_displays(*args)<EOL><DEDENT>return klass<EOL>", "docstring": "Returns a new copy of a :class:`FancyModelAdmin` class (a class, not\n    an instance!). This can then be inherited from when declaring a model\n    admin class. The :class:`FancyModelAdmin` class has additional methods\n    for managing the ``list_display`` attribute.\n\n    :param ``*args``: [optional] any arguments given will be added to the\n        ``list_display`` property using regular django ``list_display``\n        functionality.\n\n    This function is meant as a replacement for :func:`make_admin_obj_mixin`,\n    it does everything the old one does with fewer bookkeeping needs for the\n    user as well as adding functionality.\n\n    Example usage:\n\n    .. code-block:: python\n\n        # ---- models.py file ----\n        class Author(models.Model):\n            name = models.CharField(max_length=100)\n\n\n        class Book(models.Model):\n            title = models.CharField(max_length=100)\n            author = models.ForeignKey(Author, on_delete=models.CASCADE)\n\n\n    .. code-block:: python\n\n        # ---- admin.py file ----\n        @admin.register(Author)\n        class Author(admin.ModelAdmin):\n            list_display = ('name', )\n\n\n        base = fany_list_display_modeladmin()\n        base.add_displays('id', 'name')\n        base.add_obj_link('author', 'Our Authors',\n            '{{obj.name}} (id={{obj.id}})')\n\n        @admin.register(Book)\n        class BookAdmin(base):\n            list_display = ('name', 'show_author')\n\n\n    A sample django admin page for \"Book\" would have the table:\n\n    +----+---------------------------------+------------------------+\n    | ID | Name                            | Our Authors            |\n    +====+=================================+========================+\n    |  1 | Hitchhikers Guide To The Galaxy | *Douglas Adams (id=1)* |\n    +----+---------------------------------+------------------------+\n    |  2 | War and Peace                   | *Tolstoy (id=2)*       |\n    +----+---------------------------------+------------------------+\n    |  3 | Dirk Gently                     | *Douglas Adams (id=1)* |\n    +----+---------------------------------+------------------------+\n\n\n    See :class:`FancyModelAdmin` for a full list of functionality\n    provided by the returned base class.", "id": "f9998:m4"}
{"signature": "@classmethod<EOL><INDENT>def add_display(cls, attr, title='<STR_LIT>'):<DEDENT>", "body": "global klass_count<EOL>klass_count += <NUM_LIT:1><EOL>fn_name = '<STR_LIT>' % klass_count<EOL>cls.list_display.append(fn_name)<EOL>if not title:<EOL><INDENT>title = attr.capitalize()<EOL><DEDENT>def _ref(self, obj):<EOL><INDENT>_, _, value = lookup_field(attr, obj, cls)<EOL>return value<EOL><DEDENT>_ref.short_description = title<EOL>_ref.allow_tags = True<EOL>_ref.admin_order_field = attr<EOL>setattr(cls, fn_name, _ref)<EOL>", "docstring": "Adds a ``list_display`` property without any extra wrappers,\n        similar to :func:`add_displays`, but can also change the title.\n\n        :param attr:\n            Name of the attribute to add to the display\n\n        :param title:\n            Title for the column of the django admin table.  If not given it\n            defaults to a capitalized version of ``attr``", "id": "f9998:c0:m1"}
{"signature": "def _obj_display(obj, display='<STR_LIT>'):", "body": "result = '<STR_LIT>'<EOL>if not display:<EOL><INDENT>result = str(obj)<EOL><DEDENT>else:<EOL><INDENT>template = Template(display)<EOL>context = Context({'<STR_LIT>':obj})<EOL>result = template.render(context)<EOL><DEDENT>return result<EOL>", "docstring": "Returns string representation of an object, either the default or based\n    on the display template passed in.", "id": "f9998:m2"}
{"signature": "@classmethod<EOL><INDENT>def add_displays(cls, *args):<DEDENT>", "body": "for arg in args:<EOL><INDENT>cls.list_display.append(arg)<EOL><DEDENT>", "docstring": "Each arg is added to the ``list_display`` property without any\n        extra wrappers, using only the regular django functionality", "id": "f9998:c0:m0"}
{"signature": "def admin_obj_attr(obj, attr):", "body": "try:<EOL><INDENT>field_obj = get_obj_attr(obj, attr)<EOL>if not field_obj:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return field_obj<EOL>", "docstring": "A safe version of :func:``utils.get_obj_attr`` that returns and empty\n    string in the case of an exception or an empty object", "id": "f9998:m1"}
{"signature": "def create_validator():", "body": "field_names = (<EOL>'<STR_LIT>', <EOL>'<STR_LIT>', <EOL>'<STR_LIT>', <EOL>'<STR_LIT>', <EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>)<EOL>validator = CSVValidator(field_names)<EOL>validator.add_header_check('<STR_LIT>', '<STR_LIT>')<EOL>validator.add_record_length_check('<STR_LIT>', '<STR_LIT>')<EOL>validator.add_value_check('<STR_LIT>', int, <EOL>'<STR_LIT>', '<STR_LIT>')<EOL>validator.add_value_check('<STR_LIT>', int, <EOL>'<STR_LIT>', '<STR_LIT>')<EOL>validator.add_value_check('<STR_LIT>', enumeration('<STR_LIT:M>', '<STR_LIT:F>'), <EOL>'<STR_LIT>', '<STR_LIT>')<EOL>validator.add_value_check('<STR_LIT>', number_range_inclusive(<NUM_LIT:0>, <NUM_LIT>, int), <EOL>'<STR_LIT>', '<STR_LIT>')<EOL>validator.add_value_check('<STR_LIT>', datetime_string('<STR_LIT>'),<EOL>'<STR_LIT>', '<STR_LIT>')<EOL>def check_age_variables(r):<EOL><INDENT>age_years = int(r['<STR_LIT>'])<EOL>age_months = int(r['<STR_LIT>'])<EOL>valid = (age_months >= age_years * <NUM_LIT:12> and <EOL>age_months % age_years < <NUM_LIT:12>)<EOL>if not valid:<EOL><INDENT>raise RecordError('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT><DEDENT>validator.add_record_check(check_age_variables)<EOL>return validator<EOL>", "docstring": "Create an example CSV validator for patient demographic data.", "id": "f10011:m0"}
{"signature": "def search_pattern(regex):", "body": "prog = re.compile(regex)<EOL>def checker(v):<EOL><INDENT>result = prog.search(v)<EOL>if result is None:<EOL><INDENT>raise ValueError(v)<EOL><DEDENT><DEDENT>return checker<EOL>", "docstring": "Return a value check function which raises a ValueError if the supplied\nregular expression does not match anywhere in the value, see also\n`re.search`.", "id": "f10012:m2"}
{"signature": "def add_value_predicate(self, field_name, value_predicate,<EOL>code=VALUE_PREDICATE_FALSE,<EOL>message=MESSAGES[VALUE_PREDICATE_FALSE],<EOL>modulus=<NUM_LIT:1>):", "body": "assert field_name in self._field_names, '<STR_LIT>' % field_name<EOL>assert callable(value_predicate), '<STR_LIT>'<EOL>t = field_name, value_predicate, code, message, modulus<EOL>self._value_predicates.append(t)<EOL>", "docstring": "Add a value predicate function for the specified field.\n\nN.B., everything you can do with value predicates can also be done with\nvalue check functions, whether you use one or the other is a matter of\nstyle.\n\nArguments\n---------\n\n`field_name` - the name of the field to attach the value predicate\nfunction to\n\n`value_predicate` - a function that accepts a single argument (a value)\nand returns False if the value is not valid\n\n`code` - problem code to report if a value is not valid, defaults to\n`VALUE_PREDICATE_FALSE`\n\n`message` - problem message to report if a value is not valid\n\n`modulus` - apply the check to every nth record, defaults to 1 (check\nevery record)", "id": "f10012:c1:m4"}
{"signature": "def _as_dict(self, r):", "body": "d = dict()<EOL>for i, f in enumerate(self._field_names):<EOL><INDENT>d[f] = r[i] if i < len(r) else None<EOL><DEDENT>return d<EOL>", "docstring": "Convert the record to a dictionary using field names as keys.", "id": "f10012:c1:m24"}
{"signature": "def datetime_range_exclusive(min, max, format):", "body": "dmin = datetime.strptime(min, format)<EOL>dmax = datetime.strptime(max, format)<EOL>def checker(v):<EOL><INDENT>dv = datetime.strptime(v, format)<EOL>if dv <= dmin or dv >= dmax:<EOL><INDENT>raise ValueError(v)<EOL><DEDENT><DEDENT>return checker<EOL>", "docstring": "Return a value check function which raises a ValueError if the supplied\nvalue when converted to a datetime using the supplied `format` string is\nless than or equal to `min` or greater than or equal to `max`.", "id": "f10012:m7"}
{"signature": "def _apply_skips(self, i, r,<EOL>summarize=False,<EOL>report_unexpected_exceptions=True,<EOL>context=None):", "body": "for skip in self._skips:<EOL><INDENT>try:<EOL><INDENT>result = skip(r)<EOL>if result is True:<EOL><INDENT>yield True<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>if report_unexpected_exceptions:<EOL><INDENT>p = {'<STR_LIT:code>': UNEXPECTED_EXCEPTION}<EOL>if not summarize:<EOL><INDENT>p['<STR_LIT:message>'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)<EOL>p['<STR_LIT>'] = i + <NUM_LIT:1><EOL>p['<STR_LIT>'] = r<EOL>p['<STR_LIT>'] = e<EOL>p['<STR_LIT>'] = '<STR_LIT>' % (skip.__name__,<EOL>skip.__doc__)<EOL>if context is not None: p['<STR_LIT>'] = context<EOL><DEDENT>yield p<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Apply skip functions on `r`.", "id": "f10012:c1:m23"}
{"signature": "def match_pattern(regex):", "body": "prog = re.compile(regex)<EOL>def checker(v):<EOL><INDENT>result = prog.match(v)<EOL>if result is None:<EOL><INDENT>raise ValueError(v)<EOL><DEDENT><DEDENT>return checker<EOL>", "docstring": "Return a value check function which raises a ValueError if the value does\nnot match the supplied regular expression, see also `re.match`.", "id": "f10012:m1"}
{"signature": "def _apply_value_predicates(self, i, r,<EOL>summarize=False,<EOL>report_unexpected_exceptions=True,<EOL>context=None):", "body": "for field_name, predicate, code, message, modulus in self._value_predicates:<EOL><INDENT>if i % modulus == <NUM_LIT:0>: <EOL><INDENT>fi = self._field_names.index(field_name)<EOL>if fi < len(r): <EOL><INDENT>value = r[fi]<EOL>try:<EOL><INDENT>valid = predicate(value)<EOL>if not valid:<EOL><INDENT>p = {'<STR_LIT:code>': code}<EOL>if not summarize:<EOL><INDENT>p['<STR_LIT:message>'] = message<EOL>p['<STR_LIT>'] = i + <NUM_LIT:1><EOL>p['<STR_LIT>'] = fi + <NUM_LIT:1><EOL>p['<STR_LIT>'] = field_name<EOL>p['<STR_LIT:value>'] = value<EOL>p['<STR_LIT>'] = r<EOL>if context is not None: p['<STR_LIT>'] = context<EOL><DEDENT>yield p<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>if report_unexpected_exceptions:<EOL><INDENT>p = {'<STR_LIT:code>': UNEXPECTED_EXCEPTION}<EOL>if not summarize:<EOL><INDENT>p['<STR_LIT:message>'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)<EOL>p['<STR_LIT>'] = i + <NUM_LIT:1><EOL>p['<STR_LIT>'] = fi + <NUM_LIT:1><EOL>p['<STR_LIT>'] = field_name<EOL>p['<STR_LIT:value>'] = value<EOL>p['<STR_LIT>'] = r<EOL>p['<STR_LIT>'] = e<EOL>p['<STR_LIT>'] = '<STR_LIT>' % (predicate.__name__,<EOL>predicate.__doc__)<EOL>if context is not None: p['<STR_LIT>'] = context<EOL><DEDENT>yield p<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Apply value predicates on the given record `r`.", "id": "f10012:c1:m15"}
{"signature": "def _apply_each_methods(self, i, r,<EOL>summarize=False,<EOL>report_unexpected_exceptions=True,<EOL>context=None):", "body": "for a in dir(self):<EOL><INDENT>if a.startswith('<STR_LIT>'):<EOL><INDENT>rdict = self._as_dict(r)<EOL>f = getattr(self, a)<EOL>try:<EOL><INDENT>f(rdict)<EOL><DEDENT>except Exception as e:<EOL><INDENT>if report_unexpected_exceptions:<EOL><INDENT>p = {'<STR_LIT:code>': UNEXPECTED_EXCEPTION}<EOL>if not summarize:<EOL><INDENT>p['<STR_LIT:message>'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)<EOL>p['<STR_LIT>'] = i + <NUM_LIT:1><EOL>p['<STR_LIT>'] = r<EOL>p['<STR_LIT>'] = e<EOL>p['<STR_LIT>'] = '<STR_LIT>' % (f.__name__,<EOL>f.__doc__)<EOL>if context is not None: p['<STR_LIT>'] = context<EOL><DEDENT>yield p<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Invoke 'each' methods on `r`.", "id": "f10012:c1:m19"}
{"signature": "def datetime_range_inclusive(min, max, format):", "body": "dmin = datetime.strptime(min, format)<EOL>dmax = datetime.strptime(max, format)<EOL>def checker(v):<EOL><INDENT>dv = datetime.strptime(v, format)<EOL>if dv < dmin or dv > dmax:<EOL><INDENT>raise ValueError(v)<EOL><DEDENT><DEDENT>return checker<EOL>", "docstring": "Return a value check function which raises a ValueError if the supplied\nvalue when converted to a datetime using the supplied `format` string is\nless than `min` or greater than `max`.", "id": "f10012:m6"}
{"signature": "def _apply_header_checks(self, i, r, summarize=False, context=None):", "body": "for code, message in self._header_checks:<EOL><INDENT>if tuple(r) != self._field_names:<EOL><INDENT>p = {'<STR_LIT:code>': code}<EOL>if not summarize:<EOL><INDENT>p['<STR_LIT:message>'] = message<EOL>p['<STR_LIT>'] = i + <NUM_LIT:1><EOL>p['<STR_LIT>'] = tuple(r)<EOL>p['<STR_LIT>'] = set(self._field_names) - set(r)<EOL>p['<STR_LIT>'] = set(r) - set(self._field_names)<EOL>if context is not None: p['<STR_LIT>'] = context<EOL><DEDENT>yield p<EOL><DEDENT><DEDENT>", "docstring": "Apply header checks on the given record `r`.", "id": "f10012:c1:m13"}
{"signature": "def __init__(self, field_names):", "body": "self._field_names = tuple(field_names)<EOL>self._value_checks = []<EOL>self._header_checks = []<EOL>self._record_length_checks = []<EOL>self._value_predicates = []<EOL>self._record_checks = []<EOL>self._record_predicates = []<EOL>self._unique_checks = []<EOL>self._skips = []<EOL>", "docstring": "Instantiate a `CSVValidator`, supplying expected `field_names` as a\nsequence of strings.", "id": "f10012:c1:m0"}
{"signature": "def ivalidate(self, data,<EOL>expect_header_row=True,<EOL>ignore_lines=<NUM_LIT:0>,<EOL>summarize=False,<EOL>context=None,<EOL>report_unexpected_exceptions=True):", "body": "unique_sets = self._init_unique_sets() <EOL>for i, r in enumerate(data):<EOL><INDENT>if expect_header_row and i == ignore_lines:<EOL><INDENT>for p in self._apply_header_checks(i, r, summarize, context):<EOL><INDENT>yield p<EOL><DEDENT><DEDENT>elif i >= ignore_lines:<EOL><INDENT>skip = False<EOL>for p in self._apply_skips(i, r, summarize,<EOL>report_unexpected_exceptions,<EOL>context):<EOL><INDENT>if p is True:<EOL><INDENT>skip = True<EOL><DEDENT>else:<EOL><INDENT>yield p<EOL><DEDENT><DEDENT>if not skip:<EOL><INDENT>for p in self._apply_each_methods(i, r, summarize,<EOL>report_unexpected_exceptions,<EOL>context):<EOL><INDENT>yield p <EOL><DEDENT>for p in self._apply_value_checks(i, r, summarize,<EOL>report_unexpected_exceptions,<EOL>context):<EOL><INDENT>yield p<EOL><DEDENT>for p in self._apply_record_length_checks(i, r, summarize,<EOL>context):<EOL><INDENT>yield p<EOL><DEDENT>for p in self._apply_value_predicates(i, r, summarize,<EOL>report_unexpected_exceptions,<EOL>context):<EOL><INDENT>yield p<EOL><DEDENT>for p in self._apply_record_checks(i, r, summarize,<EOL>report_unexpected_exceptions,<EOL>context):<EOL><INDENT>yield p<EOL><DEDENT>for p in self._apply_record_predicates(i, r, summarize,<EOL>report_unexpected_exceptions,<EOL>context):<EOL><INDENT>yield p<EOL><DEDENT>for p in self._apply_unique_checks(i, r, unique_sets, summarize):<EOL><INDENT>yield p<EOL><DEDENT>for p in self._apply_check_methods(i, r, summarize,<EOL>report_unexpected_exceptions,<EOL>context):<EOL><INDENT>yield p<EOL><DEDENT>for p in self._apply_assert_methods(i, r, summarize,<EOL>report_unexpected_exceptions,<EOL>context):<EOL><INDENT>yield p<EOL><DEDENT><DEDENT><DEDENT><DEDENT>for p in self._apply_finally_assert_methods(summarize,<EOL>report_unexpected_exceptions,<EOL>context):<EOL><INDENT>yield p<EOL><DEDENT>", "docstring": "Validate `data` and return a iterator over problems found.\n\nUse this function rather than validate() if you expect a large number\nof problems.\n\nArguments\n---------\n\n`data` - any source of row-oriented data, e.g., as provided by a\n`csv.reader`, or a list of lists of strings, or ...\n\n`expect_header_row` - does the data contain a header row (i.e., the\nfirst record is a list of field names)? Defaults to True.\n\n`ignore_lines` - ignore n lines (rows) at the beginning of the data\n\n`summarize` - only report problem codes, no other details\n\n`context` - a dictionary of any additional information to be added to\nany problems found - useful if problems are being aggregated from\nmultiple validators\n\n`report_unexpected_exceptions` - value check function, value predicates,\nrecord check functions, record predicates, and other user-supplied\nvalidation functions may raise unexpected exceptions. If this argument\nis true, any unexpected exceptions will be reported as validation\nproblems; if False, unexpected exceptions will be handled silently.", "id": "f10012:c1:m10"}
{"signature": "def _apply_record_checks(self, i, r,<EOL>summarize=False,<EOL>report_unexpected_exceptions=True,<EOL>context=None):", "body": "for check, modulus in self._record_checks:<EOL><INDENT>if i % modulus == <NUM_LIT:0>: <EOL><INDENT>rdict = self._as_dict(r)<EOL>try:<EOL><INDENT>check(rdict)<EOL><DEDENT>except RecordError as e:<EOL><INDENT>code = e.code if e.code is not None else RECORD_CHECK_FAILED<EOL>p = {'<STR_LIT:code>': code}<EOL>if not summarize:<EOL><INDENT>message = e.message if e.message is not None else MESSAGES[RECORD_CHECK_FAILED]<EOL>p['<STR_LIT:message>'] = message<EOL>p['<STR_LIT>'] = i + <NUM_LIT:1><EOL>p['<STR_LIT>'] = r<EOL>if context is not None: p['<STR_LIT>'] = context<EOL>if e.details is not None: p['<STR_LIT>'] = e.details<EOL><DEDENT>yield p<EOL><DEDENT>except Exception as e:<EOL><INDENT>if report_unexpected_exceptions:<EOL><INDENT>p = {'<STR_LIT:code>': UNEXPECTED_EXCEPTION}<EOL>if not summarize:<EOL><INDENT>p['<STR_LIT:message>'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)<EOL>p['<STR_LIT>'] = i + <NUM_LIT:1><EOL>p['<STR_LIT>'] = r<EOL>p['<STR_LIT>'] = e<EOL>p['<STR_LIT>'] = '<STR_LIT>' % (check.__name__,<EOL>check.__doc__)<EOL>if context is not None: p['<STR_LIT>'] = context<EOL><DEDENT>yield p<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Apply record checks on `r`.", "id": "f10012:c1:m16"}
{"signature": "def add_record_check(self, record_check, modulus=<NUM_LIT:1>):", "body": "assert callable(record_check), '<STR_LIT>'<EOL>t = record_check, modulus<EOL>self._record_checks.append(t)<EOL>", "docstring": "Add a record check function.\n\nArguments\n---------\n\n`record_check` - a function that accepts a single argument (a record as\na dictionary of values indexed by field name) and raises a\n`RecordError` if the record is not valid\n\n`modulus` - apply the check to every nth record, defaults to 1 (check\nevery record)", "id": "f10012:c1:m5"}
{"signature": "def decode(message, pblite, ignore_first_item=False):", "body": "if not isinstance(pblite, list):<EOL><INDENT>logger.warning('<STR_LIT>',<EOL>type(pblite))<EOL>return<EOL><DEDENT>if ignore_first_item:<EOL><INDENT>pblite = pblite[<NUM_LIT:1>:]<EOL><DEDENT>if pblite and isinstance(pblite[-<NUM_LIT:1>], dict):<EOL><INDENT>extra_fields = {int(field_number): value for field_number, value<EOL>in pblite[-<NUM_LIT:1>].items()}<EOL>pblite = pblite[:-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>extra_fields = {}<EOL><DEDENT>fields_values = itertools.chain(enumerate(pblite, start=<NUM_LIT:1>),<EOL>extra_fields.items())<EOL>for field_number, value in fields_values:<EOL><INDENT>if value is None:<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>field = message.DESCRIPTOR.fields_by_number[field_number]<EOL><DEDENT>except KeyError:<EOL><INDENT>if value not in [[], '<STR_LIT>', <NUM_LIT:0>]:<EOL><INDENT>logger.debug('<STR_LIT>'<EOL>'<STR_LIT>', message.__class__.__name__, field_number,<EOL>value)<EOL><DEDENT>continue<EOL><DEDENT>if field.label == FieldDescriptor.LABEL_REPEATED:<EOL><INDENT>_decode_repeated_field(message, field, value)<EOL><DEDENT>else:<EOL><INDENT>_decode_field(message, field, value)<EOL><DEDENT><DEDENT>", "docstring": "Decode pblite to Protocol Buffer message.\n\n    This method is permissive of decoding errors and will log them as warnings\n    and continue decoding where possible.\n\n    The first element of the outer pblite list must often be ignored using the\n    ignore_first_item parameter because it contains an abbreviation of the name\n    of the protobuf message (eg.  cscmrp for ClientSendChatMessageResponseP)\n    that's not part of the protobuf.\n\n    Args:\n        message: protocol buffer message instance to decode into.\n        pblite: list representing a pblite-serialized message.\n        ignore_first_item: If True, ignore the item at index 0 in the pblite\n            list, making the item at index 1 correspond to field 1 in the\n            message.", "id": "f10015:m2"}
{"signature": "def preprocess(self, text):", "body": "<EOL>return text.replace('<STR_LIT:U+0020>', '<STR_LIT>')<EOL>", "docstring": "Preprocess text before parsing", "id": "f10017:c1:m1"}
{"signature": "def postprocess(self, text):", "body": "<EOL>return markdown_unescape_regex.sub(r'<STR_LIT>', text)<EOL>", "docstring": "Postprocess text after parsing", "id": "f10017:c1:m2"}
{"signature": "def _best_effort_decode(data_bytes):", "body": "decoder = Utf8IncrementalDecoder()<EOL>return decoder.decode(data_bytes)<EOL>", "docstring": "Decode as much of data_bytes as possible as UTF-8.", "id": "f10018:m0"}
{"signature": "@property<EOL><INDENT>def is_connected(self):<DEDENT>", "body": "return self._is_connected<EOL>", "docstring": "Whether the channel is currently connected.", "id": "f10018:c2:m1"}
{"signature": "async def send_maps(self, map_list):", "body": "params = {<EOL>'<STR_LIT>': <NUM_LIT:8>,  <EOL>'<STR_LIT>': <NUM_LIT>,  <EOL>'<STR_LIT>': '<STR_LIT>',  <EOL>}<EOL>if self._gsessionid_param is not None:<EOL><INDENT>params['<STR_LIT>'] = self._gsessionid_param<EOL><DEDENT>if self._sid_param is not None:<EOL><INDENT>params['<STR_LIT>'] = self._sid_param<EOL><DEDENT>data_dict = dict(count=len(map_list), ofs=<NUM_LIT:0>)<EOL>for map_num, map_ in enumerate(map_list):<EOL><INDENT>for map_key, map_val in map_.items():<EOL><INDENT>data_dict['<STR_LIT>'.format(map_num, map_key)] = map_val<EOL><DEDENT><DEDENT>res = await self._session.fetch(<EOL>'<STR_LIT>', CHANNEL_URL, params=params, data=data_dict<EOL>)<EOL>return res<EOL>", "docstring": "Sends a request to the server containing maps (dicts).", "id": "f10018:c2:m3"}
{"signature": "async def _on_push_data(self, data_bytes):", "body": "logger.debug('<STR_LIT>'.format(data_bytes))<EOL>for chunk in self._chunk_parser.get_chunks(data_bytes):<EOL><INDENT>if not self._is_connected:<EOL><INDENT>if self._on_connect_called:<EOL><INDENT>self._is_connected = True<EOL>await self.on_reconnect.fire()<EOL><DEDENT>else:<EOL><INDENT>self._on_connect_called = True<EOL>self._is_connected = True<EOL>await self.on_connect.fire()<EOL><DEDENT><DEDENT>container_array = json.loads(chunk)<EOL>for inner_array in container_array:<EOL><INDENT>array_id, data_array = inner_array<EOL>logger.debug('<STR_LIT>',<EOL>array_id, data_array)<EOL>await self.on_receive_array.fire(data_array)<EOL><DEDENT><DEDENT>", "docstring": "Parse push data and trigger events.", "id": "f10018:c2:m6"}
{"signature": "def _parse_sid_response(res):", "body": "res = json.loads(list(ChunkParser().get_chunks(res))[<NUM_LIT:0>])<EOL>sid = res[<NUM_LIT:0>][<NUM_LIT:1>][<NUM_LIT:1>]<EOL>gsessionid = res[<NUM_LIT:1>][<NUM_LIT:1>][<NUM_LIT:0>]['<STR_LIT>']<EOL>return (sid, gsessionid)<EOL>", "docstring": "Parse response format for request for new channel SID.\n\n    Example format (after parsing JS):\n    [   [0,[\"c\",\"SID_HERE\",\"\",8]],\n        [1,[{\"gsid\":\"GSESSIONID_HERE\"}]]]\n\n    Returns (SID, gsessionid) tuple.", "id": "f10018:m1"}
{"signature": "async def send_offnetwork_invitation(<EOL>self, send_offnetwork_invitation_request<EOL>):", "body": "response = hangouts_pb2.SendOffnetworkInvitationResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>send_offnetwork_invitation_request,<EOL>response)<EOL>return response<EOL>", "docstring": "Send an email to invite a non-Google contact to Hangouts.", "id": "f10019:c0:m27"}
{"signature": "async def set_active_client(self, set_active_client_request):", "body": "response = hangouts_pb2.SetActiveClientResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>set_active_client_request, response)<EOL>return response<EOL>", "docstring": "Set the active client.", "id": "f10019:c0:m28"}
{"signature": "async def get_suggested_entities(self, get_suggested_entities_request):", "body": "response = hangouts_pb2.GetSuggestedEntitiesResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>get_suggested_entities_request, response)<EOL>return response<EOL>", "docstring": "Return suggested contacts.", "id": "f10019:c0:m20"}
{"signature": "async def send_chat_message(self, send_chat_message_request):", "body": "response = hangouts_pb2.SendChatMessageResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>send_chat_message_request, response)<EOL>return response<EOL>", "docstring": "Send a chat message to a conversation.", "id": "f10019:c0:m25"}
{"signature": "async def delete_conversation(self, delete_conversation_request):", "body": "response = hangouts_pb2.DeleteConversationResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>delete_conversation_request, response)<EOL>return response<EOL>", "docstring": "Leave a one-to-one conversation.\n\n        One-to-one conversations are \"sticky\"; they can't actually be deleted.\n        This API clears the event history of the specified conversation up to\n        ``delete_upper_bound_timestamp``, hiding it if no events remain.", "id": "f10019:c0:m14"}
{"signature": "async def connect(self):", "body": "proxy = os.environ.get('<STR_LIT>')<EOL>self._session = http_utils.Session(self._cookies, proxy=proxy)<EOL>try:<EOL><INDENT>self._channel = channel.Channel(<EOL>self._session, self._max_retries, self._retry_backoff_base<EOL>)<EOL>self._channel.on_connect.add_observer(self.on_connect.fire)<EOL>self._channel.on_reconnect.add_observer(self.on_reconnect.fire)<EOL>self._channel.on_disconnect.add_observer(self.on_disconnect.fire)<EOL>self._channel.on_receive_array.add_observer(self._on_receive_array)<EOL>self._listen_future = asyncio.ensure_future(self._channel.listen())<EOL>try:<EOL><INDENT>await self._listen_future<EOL><DEDENT>except asyncio.CancelledError:<EOL><INDENT>self._listen_future.cancel()<EOL><DEDENT>logger.info(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>finally:<EOL><INDENT>await self._session.close()<EOL><DEDENT>", "docstring": "Establish a connection to the chat server.\n\n        Returns when an error has occurred, or :func:`disconnect` has been\n        called.", "id": "f10019:c0:m1"}
{"signature": "async def get_entity_by_id(self, get_entity_by_id_request):", "body": "response = hangouts_pb2.GetEntityByIdResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>get_entity_by_id_request, response)<EOL>return response<EOL>", "docstring": "Return one or more user entities.\n\n        Searching by phone number only finds entities when their phone number\n        is in your contacts (and not always even then), and can't be used to\n        find Google Voice contacts.", "id": "f10019:c0:m17"}
{"signature": "async def set_focus(self, set_focus_request):", "body": "response = hangouts_pb2.SetFocusResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>set_focus_request, response)<EOL>return response<EOL>", "docstring": "Set focus to a conversation.", "id": "f10019:c0:m30"}
{"signature": "async def _on_receive_array(self, array):", "body": "if array[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>pass  <EOL><DEDENT>else:<EOL><INDENT>wrapper = json.loads(array[<NUM_LIT:0>]['<STR_LIT:p>'])<EOL>if '<STR_LIT:3>' in wrapper:<EOL><INDENT>self._client_id = wrapper['<STR_LIT:3>']['<STR_LIT:2>']<EOL>logger.info('<STR_LIT>', self._client_id)<EOL>await self._add_channel_services()<EOL><DEDENT>if '<STR_LIT:2>' in wrapper:<EOL><INDENT>pblite_message = json.loads(wrapper['<STR_LIT:2>']['<STR_LIT:2>'])<EOL>if pblite_message[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>batch_update = hangouts_pb2.BatchUpdate()<EOL>pblite.decode(batch_update, pblite_message,<EOL>ignore_first_item=True)<EOL>for state_update in batch_update.state_update:<EOL><INDENT>logger.debug('<STR_LIT>', state_update)<EOL>header = state_update.state_update_header<EOL>self._active_client_state = header.active_client_state<EOL>await self.on_state_update.fire(state_update)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.info('<STR_LIT>', pblite_message[<NUM_LIT:0>])<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Parse channel array and call the appropriate events.", "id": "f10019:c0:m8"}
{"signature": "async def set_group_link_sharing_enabled(<EOL>self, set_group_link_sharing_enabled_request<EOL>):", "body": "response = hangouts_pb2.SetGroupLinkSharingEnabledResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>set_group_link_sharing_enabled_request,<EOL>response)<EOL>return response<EOL>", "docstring": "Set whether group link sharing is enabled for a conversation.", "id": "f10019:c0:m31"}
{"signature": "async def set_typing(self, set_typing_request):", "body": "response = hangouts_pb2.SetTypingResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>set_typing_request, response)<EOL>return response<EOL>", "docstring": "Set the typing status of a conversation.", "id": "f10019:c0:m33"}
{"signature": "async def easter_egg(self, easter_egg_request):", "body": "response = hangouts_pb2.EasterEggResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>easter_egg_request, response)<EOL>return response<EOL>", "docstring": "Send an easter egg event to a conversation.", "id": "f10019:c0:m15"}
{"signature": "def get_request_header(self):", "body": "<EOL>if self._client_id is not None:<EOL><INDENT>self._request_header.client_identifier.resource = self._client_id<EOL><DEDENT>return self._request_header<EOL>", "docstring": "Return ``request_header`` for use when constructing requests.\n\n        Returns:\n            Populated request header.", "id": "f10019:c0:m3"}
{"signature": "async def set_presence(self, set_presence_request):", "body": "response = hangouts_pb2.SetPresenceResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>set_presence_request, response)<EOL>return response<EOL>", "docstring": "Set the presence status.", "id": "f10019:c0:m32"}
{"signature": "async def sync_all_new_events(self, sync_all_new_events_request):", "body": "response = hangouts_pb2.SyncAllNewEventsResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>sync_all_new_events_request, response)<EOL>return response<EOL>", "docstring": "List all events occurring at or after a timestamp.", "id": "f10019:c0:m34"}
{"signature": "async def _base_request(self, url, content_type, response_type, data):", "body": "headers = {<EOL>'<STR_LIT>': content_type,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}<EOL>params = {<EOL>'<STR_LIT>': response_type,<EOL>'<STR_LIT:key>': API_KEY,<EOL>}<EOL>res = await self._session.fetch(<EOL>'<STR_LIT>', url, headers=headers, params=params, data=data,<EOL>)<EOL>return res<EOL>", "docstring": "Send a generic authenticated POST request.\n\n        Args:\n            url (str): URL of request.\n            content_type (str): Request content type.\n            response_type (str): The desired response format. Valid options\n                are: 'json' (JSON), 'protojson' (pblite), and 'proto' (binary\n                Protocol Buffer). 'proto' requires manually setting an extra\n                header 'X-Goog-Encode-Response-If-Executable: base64'.\n            data (str): Request body data.\n\n        Returns:\n            FetchResponse: Response containing HTTP code, cookies, and body.\n\n        Raises:\n            NetworkError: If the request fails.", "id": "f10019:c0:m11"}
{"signature": "async def set_active(self):", "body": "is_active = (self._active_client_state ==<EOL>hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE)<EOL>timed_out = (time.time() - self._last_active_secs ><EOL>SETACTIVECLIENT_LIMIT_SECS)<EOL>if not is_active or timed_out:<EOL><INDENT>self._active_client_state = (<EOL>hangouts_pb2.ACTIVE_CLIENT_STATE_IS_ACTIVE<EOL>)<EOL>self._last_active_secs = time.time()<EOL>if self._email is None:<EOL><INDENT>try:<EOL><INDENT>get_self_info_request = hangouts_pb2.GetSelfInfoRequest(<EOL>request_header=self.get_request_header(),<EOL>)<EOL>get_self_info_response = await self.get_self_info(<EOL>get_self_info_request<EOL>)<EOL><DEDENT>except exceptions.NetworkError as e:<EOL><INDENT>logger.warning('<STR_LIT>'<EOL>.format(e))<EOL>return<EOL><DEDENT>self._email = (<EOL>get_self_info_response.self_entity.properties.email[<NUM_LIT:0>]<EOL>)<EOL><DEDENT>if self._client_id is None:<EOL><INDENT>logger.info(<EOL>'<STR_LIT>'<EOL>)<EOL>return<EOL><DEDENT>try:<EOL><INDENT>set_active_request = hangouts_pb2.SetActiveClientRequest(<EOL>request_header=self.get_request_header(),<EOL>is_active=True,<EOL>full_jid=\"<STR_LIT>\".format(self._email, self._client_id),<EOL>timeout_secs=ACTIVE_TIMEOUT_SECS,<EOL>)<EOL>await self.set_active_client(set_active_request)<EOL><DEDENT>except exceptions.NetworkError as e:<EOL><INDENT>logger.warning('<STR_LIT>'.format(e))<EOL><DEDENT>else:<EOL><INDENT>logger.info('<STR_LIT>'<EOL>.format(ACTIVE_TIMEOUT_SECS))<EOL><DEDENT><DEDENT>", "docstring": "Set this client as active.\n\n        While a client is active, no other clients will raise notifications.\n        Call this method whenever there is an indication the user is\n        interacting with this client. This method may be called very\n        frequently, and it will only make a request when necessary.", "id": "f10019:c0:m5"}
{"signature": "async def modify_otr_status(self, modify_otr_status_request):", "body": "response = hangouts_pb2.ModifyOTRStatusResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>modify_otr_status_request, response)<EOL>return response<EOL>", "docstring": "Enable or disable message history in a conversation.", "id": "f10019:c0:m26"}
{"signature": "async def get_group_conversation_url(self,<EOL>get_group_conversation_url_request):", "body": "response = hangouts_pb2.GetGroupConversationUrlResponse()<EOL>await self._pb_request('<STR_LIT>',<EOL>get_group_conversation_url_request,<EOL>response)<EOL>return response<EOL>", "docstring": "Get URL to allow others to join a group conversation.", "id": "f10019:c0:m18"}
{"signature": "@property<EOL><INDENT>def name(self):<DEDENT>", "body": "custom_name = self._conversation.name<EOL>return None if custom_name == '<STR_LIT>' else custom_name<EOL>", "docstring": "The conversation's custom name (:class:`str`)\n\n        May be ``None`` if conversation has no custom name.", "id": "f10020:c0:m3"}
{"signature": "async def _sync_all_conversations(client):", "body": "conv_states = []<EOL>sync_timestamp = None<EOL>request = hangouts_pb2.SyncRecentConversationsRequest(<EOL>request_header=client.get_request_header(),<EOL>max_conversations=CONVERSATIONS_PER_REQUEST,<EOL>max_events_per_conversation=<NUM_LIT:1>,<EOL>sync_filter=[<EOL>hangouts_pb2.SYNC_FILTER_INBOX,<EOL>hangouts_pb2.SYNC_FILTER_ARCHIVED,<EOL>]<EOL>)<EOL>for _ in range(MAX_CONVERSATION_PAGES):<EOL><INDENT>logger.info(<EOL>'<STR_LIT>', request.last_event_timestamp<EOL>)<EOL>response = await client.sync_recent_conversations(request)<EOL>conv_states = list(response.conversation_state) + conv_states<EOL>sync_timestamp = parsers.from_timestamp(<EOL>response.response_header.current_server_time<EOL>)<EOL>if response.continuation_end_timestamp == <NUM_LIT:0>:<EOL><INDENT>logger.info('<STR_LIT>')<EOL>break<EOL><DEDENT>else:<EOL><INDENT>request.last_event_timestamp = response.continuation_end_timestamp<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.warning('<STR_LIT>')<EOL><DEDENT>logger.info('<STR_LIT>', len(conv_states))<EOL>return conv_states, sync_timestamp<EOL>", "docstring": "Sync all conversations by making paginated requests.\n\n    Conversations are ordered by ascending sort timestamp.\n\n    Args:\n        client (Client): Connected client.\n\n    Raises:\n        NetworkError: If the requests fail.\n\n    Returns:\n        tuple of list of ``ConversationState`` messages and sync timestamp", "id": "f10020:m1"}
{"signature": "def _get_default_delivery_medium(self):", "body": "medium_options = (<EOL>self._conversation.self_conversation_state.delivery_medium_option<EOL>)<EOL>try:<EOL><INDENT>default_medium = medium_options[<NUM_LIT:0>].delivery_medium<EOL><DEDENT>except IndexError:<EOL><INDENT>logger.warning('<STR_LIT>', self.id_)<EOL>default_medium = hangouts_pb2.DeliveryMedium(<EOL>medium_type=hangouts_pb2.DELIVERY_MEDIUM_BABEL<EOL>)<EOL><DEDENT>for medium_option in medium_options:<EOL><INDENT>if medium_option.current_default:<EOL><INDENT>default_medium = medium_option.delivery_medium<EOL><DEDENT><DEDENT>return default_medium<EOL>", "docstring": "Return default DeliveryMedium to use for sending messages.\n\n        Use the first option, or an option that's marked as the current\n        default.", "id": "f10020:c0:m17"}
{"signature": "async def build_user_conversation_list(client):", "body": "conv_states, sync_timestamp = await _sync_all_conversations(client)<EOL>required_user_ids = set()<EOL>for conv_state in conv_states:<EOL><INDENT>required_user_ids |= {<EOL>user.UserID(chat_id=part.id.chat_id, gaia_id=part.id.gaia_id)<EOL>for part in conv_state.conversation.participant_data<EOL>}<EOL><DEDENT>required_entities = []<EOL>if required_user_ids:<EOL><INDENT>logger.debug('<STR_LIT>'<EOL>.format(required_user_ids))<EOL>try:<EOL><INDENT>response = await client.get_entity_by_id(<EOL>hangouts_pb2.GetEntityByIdRequest(<EOL>request_header=client.get_request_header(),<EOL>batch_lookup_spec=[<EOL>hangouts_pb2.EntityLookupSpec(<EOL>gaia_id=user_id.gaia_id,<EOL>create_offnetwork_gaia=True,<EOL>)<EOL>for user_id in required_user_ids<EOL>],<EOL>)<EOL>)<EOL>for entity_result in response.entity_result:<EOL><INDENT>required_entities.extend(entity_result.entity)<EOL><DEDENT><DEDENT>except exceptions.NetworkError as e:<EOL><INDENT>logger.warning('<STR_LIT>'.format(e))<EOL><DEDENT><DEDENT>conv_part_list = []<EOL>for conv_state in conv_states:<EOL><INDENT>conv_part_list.extend(conv_state.conversation.participant_data)<EOL><DEDENT>get_self_info_response = await client.get_self_info(<EOL>hangouts_pb2.GetSelfInfoRequest(<EOL>request_header=client.get_request_header(),<EOL>)<EOL>)<EOL>self_entity = get_self_info_response.self_entity<EOL>user_list = user.UserList(client, self_entity, required_entities,<EOL>conv_part_list)<EOL>conversation_list = ConversationList(client, conv_states,<EOL>user_list, sync_timestamp)<EOL>return (user_list, conversation_list)<EOL>", "docstring": "Build :class:`.UserList` and :class:`.ConversationList`.\n\n    This method requests data necessary to build the list of conversations and\n    users. Users that are not in the contact list but are participating in a\n    conversation will also be retrieved.\n\n    Args:\n        client (Client): Connected client.\n\n    Returns:\n        (:class:`.UserList`, :class:`.ConversationList`):\n            Tuple of built objects.", "id": "f10020:m0"}
{"signature": "def update_conversation(self, conversation):", "body": "<EOL>new_state = conversation.self_conversation_state<EOL>old_state = self._conversation.self_conversation_state<EOL>self._conversation = conversation<EOL>if not new_state.delivery_medium_option:<EOL><INDENT>new_state.delivery_medium_option.extend(<EOL>old_state.delivery_medium_option<EOL>)<EOL><DEDENT>old_timestamp = old_state.self_read_state.latest_read_timestamp<EOL>new_timestamp = new_state.self_read_state.latest_read_timestamp<EOL>if new_timestamp == <NUM_LIT:0>:<EOL><INDENT>new_state.self_read_state.latest_read_timestamp = old_timestamp<EOL><DEDENT>for new_entry in conversation.read_state:<EOL><INDENT>tstamp = parsers.from_timestamp(new_entry.latest_read_timestamp)<EOL>if tstamp == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>uid = parsers.from_participantid(new_entry.participant_id)<EOL>if uid not in self._watermarks or self._watermarks[uid] < tstamp:<EOL><INDENT>self._watermarks[uid] = tstamp<EOL><DEDENT><DEDENT>", "docstring": "Update the internal state of the conversation.\n\n        This method is used by :class:`.ConversationList` to maintain this\n        instance.\n\n        Args:\n            conversation: ``Conversation`` message.", "id": "f10020:c0:m13"}
{"signature": "async def set_notification_level(self, level):", "body": "await self._client.set_conversation_notification_level(<EOL>hangouts_pb2.SetConversationNotificationLevelRequest(<EOL>request_header=self._client.get_request_header(),<EOL>conversation_id=hangouts_pb2.ConversationId(id=self.id_),<EOL>level=level,<EOL>)<EOL>)<EOL>", "docstring": "Set the notification level of this conversation.\n\n        Args:\n            level: ``NOTIFICATION_LEVEL_QUIET`` to disable notifications, or\n                ``NOTIFICATION_LEVEL_RING`` to enable them.\n\n        Raises:\n            .NetworkError: If the request fails.", "id": "f10020:c0:m22"}
{"signature": "async def send_message(self, segments, image_file=None, image_id=None,<EOL>image_user_id=None):", "body": "async with self._send_message_lock:<EOL><INDENT>if image_file:<EOL><INDENT>try:<EOL><INDENT>uploaded_image = await self._client.upload_image(<EOL>image_file, return_uploaded_image=True<EOL>)<EOL><DEDENT>except exceptions.NetworkError as e:<EOL><INDENT>logger.warning('<STR_LIT>'.format(e))<EOL>raise<EOL><DEDENT>image_id = uploaded_image.image_id<EOL><DEDENT>try:<EOL><INDENT>request = hangouts_pb2.SendChatMessageRequest(<EOL>request_header=self._client.get_request_header(),<EOL>event_request_header=self._get_event_request_header(),<EOL>message_content=hangouts_pb2.MessageContent(<EOL>segment=[seg.serialize() for seg in segments],<EOL>),<EOL>)<EOL>if image_id is not None:<EOL><INDENT>request.existing_media.photo.photo_id = image_id<EOL><DEDENT>if image_user_id is not None:<EOL><INDENT>request.existing_media.photo.user_id = image_user_id<EOL>request.existing_media.photo.is_custom_user_id = True<EOL><DEDENT>await self._client.send_chat_message(request)<EOL><DEDENT>except exceptions.NetworkError as e:<EOL><INDENT>logger.warning('<STR_LIT>'.format(e))<EOL>raise<EOL><DEDENT><DEDENT>", "docstring": "Send a message to this conversation.\n\n        A per-conversation lock is acquired to ensure that messages are sent in\n        the correct order when this method is called multiple times\n        asynchronously.\n\n        Args:\n            segments: List of :class:`.ChatMessageSegment` objects to include\n                in the message.\n            image_file: (optional) File-like object containing an image to be\n                attached to the message.\n            image_id: (optional) ID of an Picasa photo to be attached to the\n                message. If you specify both ``image_file`` and ``image_id``\n                together, ``image_file`` takes precedence and ``image_id`` will\n                be ignored.\n            image_user_id: (optional) Picasa user ID, required only if\n                ``image_id`` refers to an image from a different Picasa user,\n                such as Google's sticker user.\n\n        Raises:\n            .NetworkError: If the message cannot be sent.", "id": "f10020:c0:m19"}
{"signature": "async def update_read_timestamp(self, read_timestamp=None):", "body": "if read_timestamp is None:<EOL><INDENT>read_timestamp = (self.events[-<NUM_LIT:1>].timestamp if self.events else<EOL>datetime.datetime.now(datetime.timezone.utc))<EOL><DEDENT>if read_timestamp > self.latest_read_timestamp:<EOL><INDENT>logger.info(<EOL>'<STR_LIT>'<EOL>.format(self.id_, self.latest_read_timestamp, read_timestamp)<EOL>)<EOL>state = self._conversation.self_conversation_state<EOL>state.self_read_state.latest_read_timestamp = (<EOL>parsers.to_timestamp(read_timestamp)<EOL>)<EOL>try:<EOL><INDENT>await self._client.update_watermark(<EOL>hangouts_pb2.UpdateWatermarkRequest(<EOL>request_header=self._client.get_request_header(),<EOL>conversation_id=hangouts_pb2.ConversationId(<EOL>id=self.id_<EOL>),<EOL>last_read_timestamp=parsers.to_timestamp(<EOL>read_timestamp<EOL>),<EOL>)<EOL>)<EOL><DEDENT>except exceptions.NetworkError as e:<EOL><INDENT>logger.warning('<STR_LIT>'.format(e))<EOL>raise<EOL><DEDENT><DEDENT>", "docstring": "Update the timestamp of the latest event which has been read.\n\n        This method will avoid making an API request if it will have no effect.\n\n        Args:\n            read_timestamp (datetime.datetime): (optional) Timestamp to set.\n                Defaults to the timestamp of the newest event.\n\n        Raises:\n            .NetworkError: If the timestamp cannot be updated.", "id": "f10020:c0:m24"}
{"signature": "def _add_conversation(self, conversation, events=[],<EOL>event_cont_token=None):", "body": "<EOL>conv_id = conversation.conversation_id.id<EOL>logger.debug('<STR_LIT>'.format(conv_id))<EOL>conv = Conversation(self._client, self._user_list, conversation,<EOL>events, event_cont_token)<EOL>self._conv_dict[conv_id] = conv<EOL>return conv<EOL>", "docstring": "Add new conversation from hangouts_pb2.Conversation", "id": "f10020:c1:m4"}
{"signature": "@property<EOL><INDENT>def id_(self):<DEDENT>", "body": "return self._conversation.conversation_id.id<EOL>", "docstring": "The conversation's ID (:class:`str`).", "id": "f10020:c0:m1"}
{"signature": "async def leave_conversation(self, conv_id):", "body": "logger.info('<STR_LIT>'.format(conv_id))<EOL>await self._conv_dict[conv_id].leave()<EOL>del self._conv_dict[conv_id]<EOL>", "docstring": "Leave a conversation.\n\n        Args:\n            conv_id (str): ID of conversation to leave.", "id": "f10020:c1:m3"}
{"signature": "def next_event(self, event_id, prev=False):", "body": "i = self.events.index(self._events_dict[event_id])<EOL>if prev and i > <NUM_LIT:0>:<EOL><INDENT>return self.events[i - <NUM_LIT:1>]<EOL><DEDENT>elif not prev and i + <NUM_LIT:1> < len(self.events):<EOL><INDENT>return self.events[i + <NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get the event following another event in this conversation.\n\n        Args:\n            event_id (str): ID of the event.\n            prev (bool): If ``True``, return the previous event rather than the\n                next event. Defaults to ``False``.\n\n        Raises:\n            KeyError: If no such :class:`.ConversationEvent` is known.\n\n        Returns:\n            :class:`.ConversationEvent` or ``None`` if there is no following\n            event.", "id": "f10020:c0:m26"}
{"signature": "async def get_events(self, event_id=None, max_events=<NUM_LIT:50>):", "body": "if event_id is None:<EOL><INDENT>conv_events = self._events[-<NUM_LIT:1> * max_events:]<EOL><DEDENT>else:<EOL><INDENT>conv_event = self.get_event(event_id)<EOL>if self._events[<NUM_LIT:0>].id_ != event_id:<EOL><INDENT>conv_events = self._events[self._events.index(conv_event) + <NUM_LIT:1>:]<EOL><DEDENT>else:<EOL><INDENT>logger.info('<STR_LIT>'<EOL>.format(self.id_, conv_event.timestamp))<EOL>res = await self._client.get_conversation(<EOL>hangouts_pb2.GetConversationRequest(<EOL>request_header=self._client.get_request_header(),<EOL>conversation_spec=hangouts_pb2.ConversationSpec(<EOL>conversation_id=hangouts_pb2.ConversationId(<EOL>id=self.id_<EOL>)<EOL>),<EOL>include_event=True,<EOL>max_events_per_conversation=max_events,<EOL>event_continuation_token=self._event_cont_token<EOL>)<EOL>)<EOL>if res.conversation_state.HasField('<STR_LIT>'):<EOL><INDENT>self.update_conversation(<EOL>res.conversation_state.conversation<EOL>)<EOL><DEDENT>self._event_cont_token = (<EOL>res.conversation_state.event_continuation_token<EOL>)<EOL>conv_events = [self._wrap_event(event) for event<EOL>in res.conversation_state.event]<EOL>logger.info('<STR_LIT>'<EOL>.format(len(conv_events), self.id_))<EOL>for conv_event in reversed(conv_events):<EOL><INDENT>if conv_event.id_ not in self._events_dict:<EOL><INDENT>self._events.insert(<NUM_LIT:0>, conv_event)<EOL>self._events_dict[conv_event.id_] = conv_event<EOL><DEDENT>else:<EOL><INDENT>logger.info(<EOL>'<STR_LIT>',<EOL>self.id_, conv_event.id_<EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return conv_events<EOL>", "docstring": "Get events from this conversation.\n\n        Makes a request to load historical events if necessary.\n\n        Args:\n            event_id (str): (optional) If provided, return events preceding\n                this event, otherwise return the newest events.\n            max_events (int): Maximum number of events to return. Defaults to\n                50.\n\n        Returns:\n            List of :class:`.ConversationEvent` instances, ordered\n            newest-first.\n\n        Raises:\n            KeyError: If ``event_id`` does not correspond to a known event.\n            .NetworkError: If the events could not be requested.", "id": "f10020:c0:m25"}
{"signature": "def get_event(self, event_id):", "body": "return self._events_dict[event_id]<EOL>", "docstring": "Get an event in this conversation by its ID.\n\n        Args:\n            event_id (str): ID of the event.\n\n        Raises:\n            KeyError: If no such :class:`.ConversationEvent` is known.\n\n        Returns:\n            :class:`.ConversationEvent` with the given ID.", "id": "f10020:c0:m27"}
{"signature": "async def leave(self):", "body": "is_group_conversation = (self._conversation.type ==<EOL>hangouts_pb2.CONVERSATION_TYPE_GROUP)<EOL>try:<EOL><INDENT>if is_group_conversation:<EOL><INDENT>await self._client.remove_user(<EOL>hangouts_pb2.RemoveUserRequest(<EOL>request_header=self._client.get_request_header(),<EOL>event_request_header=self._get_event_request_header(),<EOL>)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>await self._client.delete_conversation(<EOL>hangouts_pb2.DeleteConversationRequest(<EOL>request_header=self._client.get_request_header(),<EOL>conversation_id=hangouts_pb2.ConversationId(<EOL>id=self.id_<EOL>),<EOL>delete_upper_bound_timestamp=parsers.to_timestamp(<EOL>datetime.datetime.now(tz=datetime.timezone.utc)<EOL>)<EOL>)<EOL>)<EOL><DEDENT><DEDENT>except exceptions.NetworkError as e:<EOL><INDENT>logger.warning('<STR_LIT>'.format(e))<EOL>raise<EOL><DEDENT>", "docstring": "Leave this conversation.\n\n        Raises:\n            .NetworkError: If conversation cannot be left.", "id": "f10020:c0:m20"}
{"signature": "def _on_watermark_notification(self, notif):", "body": "<EOL>if self.get_user(notif.user_id).is_self:<EOL><INDENT>logger.info('<STR_LIT>'<EOL>.format(self.id_, notif.read_timestamp))<EOL>self_conversation_state = (<EOL>self._conversation.self_conversation_state<EOL>)<EOL>self_conversation_state.self_read_state.latest_read_timestamp = (<EOL>parsers.to_timestamp(notif.read_timestamp)<EOL>)<EOL><DEDENT>previous_timestamp = self._watermarks.get(<EOL>notif.user_id,<EOL>datetime.datetime.min.replace(tzinfo=datetime.timezone.utc)<EOL>)<EOL>if notif.read_timestamp > previous_timestamp:<EOL><INDENT>logger.info(('<STR_LIT>' +<EOL>'<STR_LIT>').format(self.id_,<EOL>notif.user_id.chat_id,<EOL>notif.read_timestamp))<EOL>self._watermarks[notif.user_id] = notif.read_timestamp<EOL><DEDENT>", "docstring": "Handle a watermark notification.", "id": "f10020:c0:m12"}
{"signature": "def get_all(self, include_archived=False):", "body": "return [conv for conv in self._conv_dict.values()<EOL>if not conv.is_archived or include_archived]<EOL>", "docstring": "Get all the conversations.\n\n        Args:\n            include_archived (bool): (optional) Whether to include archived\n                conversations. Defaults to ``False``.\n\n        Returns:\n            List of all :class:`.Conversation` objects.", "id": "f10020:c1:m1"}
{"signature": "def _get_event_request_header(self):", "body": "otr_status = (hangouts_pb2.OFF_THE_RECORD_STATUS_OFF_THE_RECORD<EOL>if self.is_off_the_record else<EOL>hangouts_pb2.OFF_THE_RECORD_STATUS_ON_THE_RECORD)<EOL>return hangouts_pb2.EventRequestHeader(<EOL>conversation_id=hangouts_pb2.ConversationId(id=self.id_),<EOL>client_generated_id=self._client.get_client_generated_id(),<EOL>expected_otr=otr_status,<EOL>delivery_medium=self._get_default_delivery_medium(),<EOL>)<EOL>", "docstring": "Return EventRequestHeader for conversation.", "id": "f10020:c0:m18"}
{"signature": "@staticmethod<EOL><INDENT>def get_email():<DEDENT>", "body": "print('<STR_LIT>')<EOL>return input('<STR_LIT>')<EOL>", "docstring": "Prompt for email.\n\n        Returns:\n            str: Google account email address.", "id": "f10021:c1:m0"}
{"signature": "def get_auth(credentials_prompt, refresh_token_cache, manual_login=False):", "body": "with requests.Session() as session:<EOL><INDENT>session.headers = {'<STR_LIT>': USER_AGENT}<EOL>try:<EOL><INDENT>logger.info('<STR_LIT>')<EOL>refresh_token = refresh_token_cache.get()<EOL>if refresh_token is None:<EOL><INDENT>raise GoogleAuthError(\"<STR_LIT>\")<EOL><DEDENT>access_token = _auth_with_refresh_token(session, refresh_token)<EOL><DEDENT>except GoogleAuthError as e:<EOL><INDENT>logger.info('<STR_LIT>', e)<EOL>logger.info('<STR_LIT>')<EOL>if manual_login:<EOL><INDENT>authorization_code = (<EOL>credentials_prompt.get_authorization_code()<EOL>)<EOL><DEDENT>else:<EOL><INDENT>authorization_code = _get_authorization_code(<EOL>session, credentials_prompt<EOL>)<EOL><DEDENT>access_token, refresh_token = _auth_with_code(<EOL>session, authorization_code<EOL>)<EOL>refresh_token_cache.set(refresh_token)<EOL><DEDENT>logger.info('<STR_LIT>')<EOL>return _get_session_cookies(session, access_token)<EOL><DEDENT>", "docstring": "Authenticate with Google.\n\n    Args:\n        refresh_token_cache (RefreshTokenCache): Cache to use so subsequent\n            logins may not require credentials.\n        credentials_prompt (CredentialsPrompt): Prompt to use if credentials\n            are required to log in.\n        manual_login (bool): If true, prompt user to log in through a browser\n            and enter authorization code manually. Defaults to false.\n\n    Returns:\n        dict: Google session cookies.\n\n    Raises:\n        GoogleAuthError: If authentication with Google fails.", "id": "f10021:m0"}
{"signature": "def has_selector(self, selector):", "body": "return len(self._page.soup.select(selector)) > <NUM_LIT:0><EOL>", "docstring": "Return True if selector matches an element on the current page.", "id": "f10021:c3:m1"}
{"signature": "@staticmethod<EOL><INDENT>def get_authorization_code():<DEDENT>", "body": "print(MANUAL_LOGIN_INSTRUCTIONS)<EOL>return input('<STR_LIT>')<EOL>", "docstring": "Prompt for authorization code.\n\n        Returns:\n            str: Google account authorization code.", "id": "f10021:c1:m3"}
{"signature": "def _auth_with_refresh_token(session, refresh_token):", "body": "<EOL>token_request_data = {<EOL>'<STR_LIT>': OAUTH2_CLIENT_ID,<EOL>'<STR_LIT>': OAUTH2_CLIENT_SECRET,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': refresh_token,<EOL>}<EOL>res = _make_token_request(session, token_request_data)<EOL>return res['<STR_LIT>']<EOL>", "docstring": "Authenticate using OAuth refresh token.\n\n    Raises GoogleAuthError if authentication fails.\n\n    Returns access token string.", "id": "f10021:m3"}
{"signature": "@staticmethod<EOL><INDENT>def get_password():<DEDENT>", "body": "return getpass.getpass()<EOL>", "docstring": "Prompt for password.\n\n        Returns:\n            str: Google account password.", "id": "f10021:c1:m1"}
{"signature": "def from_participantid(participant_id):", "body": "return user.UserID(<EOL>chat_id=participant_id.chat_id,<EOL>gaia_id=participant_id.gaia_id<EOL>)<EOL>", "docstring": "Convert hangouts_pb2.ParticipantId to UserID.", "id": "f10032:m2"}
{"signature": "def get_all(self):", "body": "return self._user_dict.values()<EOL>", "docstring": "Get all known users.\n\n        Returns:\n            List of :class:`~hangups.user.User` instances.", "id": "f10033:c1:m2"}
{"signature": "def _on_state_update(self, state_update):", "body": "if state_update.HasField('<STR_LIT>'):<EOL><INDENT>self._handle_conversation(state_update.conversation)<EOL><DEDENT>", "docstring": "Receive a StateUpdate", "id": "f10033:c1:m4"}
{"signature": "@staticmethod<EOL><INDENT>def from_conv_part_data(conv_part_data, self_user_id):<DEDENT>", "body": "user_id = UserID(chat_id=conv_part_data.id.chat_id,<EOL>gaia_id=conv_part_data.id.gaia_id)<EOL>return User(user_id, conv_part_data.fallback_name, None, None, [],<EOL>(self_user_id == user_id) or (self_user_id is None))<EOL>", "docstring": "Construct user from ``ConversationParticipantData`` message.\n\n        Args:\n            conv_part_id: ``ConversationParticipantData`` message.\n            self_user_id (~hangups.user.UserID or None): The ID of the current\n                user. If ``None``, assume ``conv_part_id`` is the current user.\n\n        Returns:\n            :class:`~hangups.user.User` object.", "id": "f10033:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def from_entity(entity, self_user_id):<DEDENT>", "body": "user_id = UserID(chat_id=entity.id.chat_id,<EOL>gaia_id=entity.id.gaia_id)<EOL>return User(user_id, entity.properties.display_name,<EOL>entity.properties.first_name,<EOL>entity.properties.photo_url,<EOL>entity.properties.email,<EOL>(self_user_id == user_id) or (self_user_id is None))<EOL>", "docstring": "Construct user from ``Entity`` message.\n\n        Args:\n            entity: ``Entity`` message.\n            self_user_id (~hangups.user.UserID or None): The ID of the current\n                user. If ``None``, assume ``entity`` is the current user.\n\n        Returns:\n            :class:`~hangups.user.User` object.", "id": "f10033:c0:m2"}
{"signature": "def replace_emoticons(string):", "body": "return _replace_words(HANGOUTS_EMOTICONS_TO_EMOJI, string)<EOL>", "docstring": "Replace emoticon words in string with corresponding emoji.", "id": "f10034:m0"}
{"signature": "def _replace_words(replacements, string):", "body": "output_lines = []<EOL>for line in string.split('<STR_LIT:\\n>'):<EOL><INDENT>output_words = []<EOL>for word in line.split('<STR_LIT:U+0020>'):<EOL><INDENT>new_word = replacements.get(word, word)<EOL>output_words.append(new_word)<EOL><DEDENT>output_lines.append(output_words)<EOL><DEDENT>return '<STR_LIT:\\n>'.join('<STR_LIT:U+0020>'.join(output_words) for output_words in output_lines)<EOL>", "docstring": "Replace words with corresponding values in replacements dict.\n\n    Words must be separated by spaces or newlines.", "id": "f10034:m1"}
{"signature": "async def _load(self):", "body": "try:<EOL><INDENT>conv_events = await self._conversation.get_events(<EOL>self._conversation.events[<NUM_LIT:0>].id_<EOL>)<EOL><DEDENT>except (IndexError, hangups.NetworkError):<EOL><INDENT>conv_events = []<EOL><DEDENT>if not conv_events:<EOL><INDENT>self._first_loaded = True<EOL><DEDENT>if self._focus_position == self.POSITION_LOADING and conv_events:<EOL><INDENT>self.set_focus(conv_events[-<NUM_LIT:1>].id_)<EOL><DEDENT>else:<EOL><INDENT>self._modified()<EOL><DEDENT>self._refresh_watermarked_events()<EOL>self._is_loading = False<EOL>", "docstring": "Load more events for this conversation.", "id": "f10036:c14:m2"}
{"signature": "def _on_return(self, text):", "body": "<EOL>if not text:<EOL><INDENT>return<EOL><DEDENT>elif text.startswith('<STR_LIT>') and len(text.split('<STR_LIT:U+0020>')) == <NUM_LIT:2>:<EOL><INDENT>filename = text.split('<STR_LIT:U+0020>')[<NUM_LIT:1>]<EOL>image_file = open(filename, '<STR_LIT:rb>')<EOL>text = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>image_file = None<EOL><DEDENT>text = replace_emoticons(text)<EOL>segments = hangups.ChatMessageSegment.from_str(text)<EOL>self._coroutine_queue.put(<EOL>self._handle_send_message(<EOL>self._conversation.send_message(<EOL>segments, image_file=image_file<EOL>)<EOL>)<EOL>)<EOL>", "docstring": "Called when the user presses return on the send message widget.", "id": "f10036:c15:m4"}
{"signature": "def get_conv_widget(self, conv_id):", "body": "if conv_id not in self._conv_widgets:<EOL><INDENT>set_title_cb = (lambda widget, title:<EOL>self._tabbed_window.set_tab(widget, title=title))<EOL>widget = ConversationWidget(<EOL>self._client, self._coroutine_queue,<EOL>self._conv_list.get(conv_id), set_title_cb, self._keys,<EOL>self._datetimefmt<EOL>)<EOL>self._conv_widgets[conv_id] = widget<EOL><DEDENT>return self._conv_widgets[conv_id]<EOL>", "docstring": "Return an existing or new ConversationWidget.", "id": "f10036:c1:m6"}
{"signature": "def _get_label(self):", "body": "return get_conv_name(self._conversation, show_unread=True)<EOL>", "docstring": "Return the button's label generated from the conversation.", "id": "f10036:c7:m1"}
{"signature": "def get_menu_widget(self, close_callback):", "body": "return ConversationMenu(<EOL>self._coroutine_queue, self._conversation, close_callback,<EOL>self._keys<EOL>)<EOL>", "docstring": "Return the menu widget associated with this widget.", "id": "f10036:c15:m1"}
{"signature": "def _on_event(self, _):", "body": "self._button.set_label(self._get_label())<EOL>", "docstring": "Update the button's label when an event occurs.", "id": "f10036:c7:m2"}
{"signature": "def keypress(self, size, key):", "body": "<EOL>return super().keypress(size, key)<EOL>", "docstring": "forward the call", "id": "f10036:c3:m0"}
{"signature": "async def _on_connect(self):", "body": "self._user_list, self._conv_list = (<EOL>await hangups.build_user_conversation_list(self._client)<EOL>)<EOL>self._conv_list.on_event.add_observer(self._on_event)<EOL>conv_picker = ConversationPickerWidget(self._conv_list,<EOL>self.on_select_conversation,<EOL>self._keys)<EOL>self._tabbed_window = TabbedWindowWidget(self._keys)<EOL>self._tabbed_window.set_tab(conv_picker, switch=True,<EOL>title='<STR_LIT>')<EOL>self._urwid_loop.widget = self._tabbed_window<EOL>", "docstring": "Handle connecting for the first time.", "id": "f10036:c1:m9"}
{"signature": "def put(self, coro):", "body": "<EOL>assert asyncio.iscoroutine(coro)<EOL>self._queue.put_nowait(coro)<EOL>", "docstring": "Put a coroutine in the queue to be executed.", "id": "f10036:c2:m1"}
{"signature": "def prev_position(self, position):", "body": "return self._get_position(position, prev=True)<EOL>", "docstring": "Return the position above position or raise IndexError.", "id": "f10036:c14:m9"}
{"signature": "def _on_typing(self, typing_message):", "body": "self._typing_statuses[typing_message.user_id] = typing_message.status<EOL>self._update()<EOL>", "docstring": "Handle typing updates.", "id": "f10036:c12:m6"}
{"signature": "def _on_reconnect(self):", "body": "self._is_connected = True<EOL>self._update()<EOL>", "docstring": "Hide reconnecting message when reconnected.", "id": "f10036:c12:m4"}
{"signature": "def dir_maker(path):", "body": "directory = os.path.dirname(path)<EOL>if directory != '<STR_LIT>' and not os.path.isdir(directory):<EOL><INDENT>try:<EOL><INDENT>os.makedirs(directory)<EOL><DEDENT>except OSError as e:<EOL><INDENT>sys.exit('<STR_LIT>'.format(e))<EOL><DEDENT><DEDENT>", "docstring": "Create a directory if it does not exist.", "id": "f10036:m2"}
{"signature": "def next_position(self, position):", "body": "return self._get_position(position)<EOL>", "docstring": "Return the position below position or raise IndexError.", "id": "f10036:c14:m8"}
{"signature": "def _hide_menu(self):", "body": "self._urwid_loop.widget = self._tabbed_window<EOL>", "docstring": "Hide the overlay menu.", "id": "f10036:c1:m5"}
{"signature": "def _show_menu(self):", "body": "<EOL>current_widget = self._tabbed_window.get_current_widget()<EOL>if hasattr(current_widget, '<STR_LIT>'):<EOL><INDENT>menu_widget = current_widget.get_menu_widget(self._hide_menu)<EOL>overlay = urwid.Overlay(menu_widget, self._tabbed_window,<EOL>align='<STR_LIT>', width=('<STR_LIT>', <NUM_LIT>),<EOL>valign='<STR_LIT>', height=('<STR_LIT>', <NUM_LIT>))<EOL>self._urwid_loop.widget = overlay<EOL><DEDENT>", "docstring": "Show the overlay menu.", "id": "f10036:c1:m4"}
{"signature": "def _on_disconnect(self):", "body": "self._is_connected = False<EOL>self._update()<EOL>", "docstring": "Show reconnecting message when disconnected.", "id": "f10036:c12:m3"}
{"signature": "@contextlib.contextmanager<EOL>def bracketed_paste_mode():", "body": "sys.stdout.write('<STR_LIT>')<EOL>try:<EOL><INDENT>yield<EOL><DEDENT>finally:<EOL><INDENT>sys.stdout.write('<STR_LIT>')<EOL><DEDENT>", "docstring": "Context manager for enabling/disabling bracketed paste mode.", "id": "f10036:m1"}
{"signature": "def _clear_message(self):", "body": "self._message = None<EOL>self._message_handle = None<EOL>self._update()<EOL>", "docstring": "Clear the temporary message.", "id": "f10036:c12:m2"}
{"signature": "def _update(self):", "body": "typing_users = [self._conversation.get_user(user_id)<EOL>for user_id, status in self._typing_statuses.items()<EOL>if status == hangups.TYPING_TYPE_STARTED]<EOL>displayed_names = [user.first_name for user in typing_users<EOL>if not user.is_self]<EOL>if displayed_names:<EOL><INDENT>typing_message = '<STR_LIT>'.format(<EOL>'<STR_LIT:U+002CU+0020>'.join(sorted(displayed_names)),<EOL>'<STR_LIT>' if len(displayed_names) == <NUM_LIT:1> else '<STR_LIT>'<EOL>)<EOL><DEDENT>else:<EOL><INDENT>typing_message = '<STR_LIT>'<EOL><DEDENT>if not self._is_connected:<EOL><INDENT>self._widget.set_text(\"<STR_LIT>\")<EOL><DEDENT>elif self._message is not None:<EOL><INDENT>self._widget.set_text(self._message)<EOL><DEDENT>else:<EOL><INDENT>self._widget.set_text(typing_message)<EOL><DEDENT>", "docstring": "Update status text.", "id": "f10036:c12:m7"}
{"signature": "@staticmethod<EOL><INDENT>def _get_date_str(timestamp, datetimefmt, show_date=False):<DEDENT>", "body": "fmt = '<STR_LIT>'<EOL>if show_date:<EOL><INDENT>fmt += '<STR_LIT:\\n>'+datetimefmt.get('<STR_LIT:date>', '<STR_LIT>')+'<STR_LIT:\\n>'<EOL><DEDENT>fmt += datetimefmt.get('<STR_LIT:time>', '<STR_LIT>')<EOL>return timestamp.astimezone(tz=None).strftime(fmt)<EOL>", "docstring": "Convert UTC datetime into user interface string.", "id": "f10036:c13:m1"}
{"signature": "async def consume(self):", "body": "while True:<EOL><INDENT>coro = await self._queue.get()<EOL>assert asyncio.iscoroutine(coro)<EOL>await coro<EOL><DEDENT>", "docstring": "Consume coroutines from the queue by executing them.", "id": "f10036:c2:m2"}
{"signature": "def _exception_handler(self, _loop, context):", "body": "<EOL>self._coroutine_queue.put(self._client.disconnect())<EOL>default_exception = Exception(context.get('<STR_LIT:message>'))<EOL>self._exception = context.get('<STR_LIT>', default_exception)<EOL>", "docstring": "Handle exceptions from the asyncio loop.", "id": "f10036:c1:m2"}
{"signature": "@staticmethod<EOL><INDENT>def from_conversation_event(conversation, conv_event, prev_conv_event,<EOL>datetimefmt, watermark_users=None):<DEDENT>", "body": "user = conversation.get_user(conv_event.user_id)<EOL>if prev_conv_event is not None:<EOL><INDENT>is_new_day = (conv_event.timestamp.astimezone(tz=None).date() !=<EOL>prev_conv_event.timestamp.astimezone(tz=None).date())<EOL><DEDENT>else:<EOL><INDENT>is_new_day = False<EOL><DEDENT>if isinstance(conv_event, hangups.ChatMessageEvent):<EOL><INDENT>return MessageWidget(conv_event.timestamp, conv_event.text,<EOL>datetimefmt, user, show_date=is_new_day,<EOL>watermark_users=watermark_users)<EOL><DEDENT>elif isinstance(conv_event, hangups.RenameEvent):<EOL><INDENT>if conv_event.new_name == '<STR_LIT>':<EOL><INDENT>text = ('<STR_LIT>'<EOL>.format(user.first_name))<EOL><DEDENT>else:<EOL><INDENT>text = ('<STR_LIT>'<EOL>.format(user.first_name, conv_event.new_name))<EOL><DEDENT>return MessageWidget(conv_event.timestamp, text, datetimefmt,<EOL>show_date=is_new_day,<EOL>watermark_users=watermark_users)<EOL><DEDENT>elif isinstance(conv_event, hangups.MembershipChangeEvent):<EOL><INDENT>event_users = [conversation.get_user(user_id) for user_id<EOL>in conv_event.participant_ids]<EOL>names = '<STR_LIT:U+002CU+0020>'.join([user.full_name for user in event_users])<EOL>if conv_event.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN:<EOL><INDENT>text = ('<STR_LIT>'<EOL>.format(user.first_name, names))<EOL><DEDENT>else:  <EOL><INDENT>text = ('<STR_LIT>'.format(names))<EOL><DEDENT>return MessageWidget(conv_event.timestamp, text, datetimefmt,<EOL>show_date=is_new_day,<EOL>watermark_users=watermark_users)<EOL><DEDENT>elif isinstance(conv_event, hangups.HangoutEvent):<EOL><INDENT>text = {<EOL>hangups.HANGOUT_EVENT_TYPE_START: (<EOL>'<STR_LIT>'<EOL>),<EOL>hangups.HANGOUT_EVENT_TYPE_END: (<EOL>'<STR_LIT>'<EOL>),<EOL>hangups.HANGOUT_EVENT_TYPE_ONGOING: (<EOL>'<STR_LIT>'<EOL>),<EOL>}.get(conv_event.event_type, '<STR_LIT>')<EOL>return MessageWidget(conv_event.timestamp, text, datetimefmt,<EOL>show_date=is_new_day,<EOL>watermark_users=watermark_users)<EOL><DEDENT>elif isinstance(conv_event, hangups.GroupLinkSharingModificationEvent):<EOL><INDENT>status_on = hangups.GROUP_LINK_SHARING_STATUS_ON<EOL>status_text = ('<STR_LIT>' if conv_event.new_status == status_on<EOL>else '<STR_LIT>')<EOL>text = '<STR_LIT>'.format(user.first_name,<EOL>status_text)<EOL>return MessageWidget(conv_event.timestamp, text, datetimefmt,<EOL>show_date=is_new_day,<EOL>watermark_users=watermark_users)<EOL><DEDENT>else:<EOL><INDENT>text = '<STR_LIT>'<EOL>return MessageWidget(conv_event.timestamp, text, datetimefmt,<EOL>show_date=is_new_day,<EOL>watermark_users=watermark_users)<EOL><DEDENT>", "docstring": "Return MessageWidget representing a ConversationEvent.\n\n        Returns None if the ConversationEvent does not have a widget\n        representation.", "id": "f10036:c13:m3"}
{"signature": "def keypress(self, size, key):", "body": "<EOL>self._coroutine_queue.put(self._client.set_active())<EOL>self._coroutine_queue.put(self._conversation.update_read_timestamp())<EOL>return super().keypress(size, key)<EOL>", "docstring": "Handle marking messages as read and keeping client active.", "id": "f10036:c15:m2"}
{"signature": "def _on_event(self, conv_event):", "body": "if isinstance(conv_event, hangups.ChatMessageEvent):<EOL><INDENT>self._typing_statuses[conv_event.user_id] = (<EOL>hangups.TYPING_TYPE_STOPPED<EOL>)<EOL>self._update()<EOL><DEDENT>", "docstring": "Make users stop typing when they send a message.", "id": "f10036:c12:m5"}
{"signature": "def get_focus(self):", "body": "return (self[self._focus_position], self._focus_position)<EOL>", "docstring": "Return (widget, position) tuple.", "id": "f10036:c14:m11"}
{"signature": "def set_terminal_title(title):", "body": "sys.stdout.write(\"<STR_LIT>\".format(title))<EOL>", "docstring": "Use an xterm escape sequence to set the terminal title.", "id": "f10036:m0"}
{"signature": "def add_conversation_tab(self, conv_id, switch=False):", "body": "conv_widget = self.get_conv_widget(conv_id)<EOL>self._tabbed_window.set_tab(conv_widget, switch=switch,<EOL>title=conv_widget.title)<EOL>", "docstring": "Add conversation tab if not present, and optionally switch to it.", "id": "f10036:c1:m7"}
{"signature": "def _input_filter(self, keys, _):", "body": "if keys == [self._keys['<STR_LIT>']]:<EOL><INDENT>if self._urwid_loop.widget == self._tabbed_window:<EOL><INDENT>self._show_menu()<EOL><DEDENT>else:<EOL><INDENT>self._hide_menu()<EOL><DEDENT><DEDENT>elif keys == [self._keys['<STR_LIT>']]:<EOL><INDENT>self._coroutine_queue.put(self._client.disconnect())<EOL><DEDENT>else:<EOL><INDENT>return keys<EOL><DEDENT>", "docstring": "Handle global keybindings.", "id": "f10036:c1:m3"}
{"signature": "def add_color_to_scheme(scheme, name, foreground, background, palette_colors):", "body": "if foreground is None and background is None:<EOL><INDENT>return scheme<EOL><DEDENT>new_scheme = []<EOL>for item in scheme:<EOL><INDENT>if item[<NUM_LIT:0>] == name:<EOL><INDENT>if foreground is None:<EOL><INDENT>foreground = item[<NUM_LIT:1>]<EOL><DEDENT>if background is None:<EOL><INDENT>background = item[<NUM_LIT:2>]<EOL><DEDENT>if palette_colors > <NUM_LIT:16>:<EOL><INDENT>new_scheme.append((name, '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', foreground, background))<EOL><DEDENT>else:<EOL><INDENT>new_scheme.append((name, foreground, background))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>new_scheme.append(item)<EOL><DEDENT><DEDENT>return new_scheme<EOL>", "docstring": "Add foreground and background colours to a color scheme", "id": "f10037:m1"}
{"signature": "def get_conv_name(conv, truncate=False, show_unread=False):", "body": "num_unread = len([conv_event for conv_event in conv.unread_events if<EOL>isinstance(conv_event, hangups.ChatMessageEvent) and<EOL>not conv.get_user(conv_event.user_id).is_self])<EOL>if show_unread and num_unread > <NUM_LIT:0>:<EOL><INDENT>postfix = '<STR_LIT>'.format(num_unread)<EOL><DEDENT>else:<EOL><INDENT>postfix = '<STR_LIT>'<EOL><DEDENT>if conv.name is not None:<EOL><INDENT>return conv.name + postfix<EOL><DEDENT>else:<EOL><INDENT>participants = sorted(<EOL>(user for user in conv.users if not user.is_self),<EOL>key=lambda user: user.id_<EOL>)<EOL>names = [user.first_name for user in participants]<EOL>if not participants:<EOL><INDENT>return \"<STR_LIT>\" + postfix<EOL><DEDENT>if len(participants) == <NUM_LIT:1>:<EOL><INDENT>return participants[<NUM_LIT:0>].full_name + postfix<EOL><DEDENT>elif truncate and len(participants) > <NUM_LIT:2>:<EOL><INDENT>return ('<STR_LIT:U+002CU+0020>'.join(names[:<NUM_LIT:2>] + ['<STR_LIT>'.format(len(names) - <NUM_LIT:2>)]) +<EOL>postfix)<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT:U+002CU+0020>'.join(names) + postfix<EOL><DEDENT><DEDENT>", "docstring": "Return a readable name for a conversation.\n\n    If the conversation has a custom name, use the custom name. Otherwise, for\n    one-to-one conversations, the name is the full name of the other user. For\n    group conversations, the name is a comma-separated list of first names. If\n    the group conversation is empty, the name is \"Empty Conversation\".\n\n    If truncate is true, only show up to two names in a group conversation.\n\n    If show_unread is True, if there are unread chat messages, show the number\n    of unread chat messages in parentheses after the conversation name.", "id": "f10037:m0"}
{"signature": "def remove_observer(self, callback):", "body": "if callback not in self._observers:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>.format(callback, self))<EOL><DEDENT>self._observers.remove(callback)<EOL>", "docstring": "Remove an observer from this event.\n\n        Args:\n            callback: A function or coroutine callback to remove from this\n                event.\n\n        Raises:\n            ValueError: If the callback is not an observer of this event.", "id": "f10038:c0:m2"}
{"signature": "@property<EOL><INDENT>def attachments(self):<DEDENT>", "body": "raw_attachments = self._event.chat_message.message_content.attachment<EOL>if raw_attachments is None:<EOL><INDENT>raw_attachments = []<EOL><DEDENT>attachments = []<EOL>for attachment in raw_attachments:<EOL><INDENT>for embed_item_type in attachment.embed_item.type:<EOL><INDENT>known_types = [<EOL>hangouts_pb2.ITEM_TYPE_PLUS_PHOTO,<EOL>hangouts_pb2.ITEM_TYPE_PLACE_V2,<EOL>hangouts_pb2.ITEM_TYPE_PLACE,<EOL>hangouts_pb2.ITEM_TYPE_THING,<EOL>]<EOL>if embed_item_type not in known_types:<EOL><INDENT>logger.warning('<STR_LIT>'<EOL>'<STR_LIT>', embed_item_type)<EOL><DEDENT><DEDENT>if attachment.embed_item.HasField('<STR_LIT>'):<EOL><INDENT>attachments.append(<EOL>attachment.embed_item.plus_photo.thumbnail.image_url<EOL>)<EOL><DEDENT><DEDENT>return attachments<EOL>", "docstring": "List of attachments in the message (:class:`list`).", "id": "f10039:c2:m2"}
{"signature": "@property<EOL><INDENT>def participant_ids(self):<DEDENT>", "body": "return [user.UserID(chat_id=id_.chat_id, gaia_id=id_.gaia_id)<EOL>for id_ in self._event.membership_change.participant_ids]<EOL>", "docstring": ":class:`~hangups.user.UserID` of users involved (:class:`list`).", "id": "f10039:c5:m1"}
{"signature": "@property<EOL><INDENT>def old_otr_status(self):<DEDENT>", "body": "return self._event.otr_modification.old_otr_status<EOL>", "docstring": "The conversation's old OTR status.\n\n        May be either ``OFF_THE_RECORD_STATUS_OFF_THE_RECORD`` or\n        ``OFF_THE_RECORD_STATUS_ON_THE_RECORD``.", "id": "f10039:c3:m1"}
{"signature": "@staticmethod<EOL><INDENT>def from_str(text):<DEDENT>", "body": "segment_list = chat_message_parser.parse(text)<EOL>return [ChatMessageSegment(segment.text, **segment.params)<EOL>for segment in segment_list]<EOL>", "docstring": "Construct :class:`ChatMessageSegment` list parsed from a string.\n\n        Args:\n            text (str): Text to parse. May contain line breaks, URLs and\n                formatting markup (simplified Markdown and HTML) to be\n                converted into equivalent segments.\n\n        Returns:\n            List of :class:`ChatMessageSegment` objects.", "id": "f10039:c1:m1"}
{"signature": "@property<EOL><INDENT>def type_(self):<DEDENT>", "body": "return self._event.membership_change.type<EOL>", "docstring": "The type of membership change.\n\n        May be either ``MEMBERSHIP_CHANGE_TYPE_JOIN`` or\n        ``MEMBERSHIP_CHANGE_TYPE_LEAVE``.", "id": "f10039:c5:m0"}
{"signature": "@property<EOL><INDENT>def old_name(self):<DEDENT>", "body": "return self._event.conversation_rename.old_name<EOL>", "docstring": "The conversation's old name (:class:`str`).\n\n        May be an empty string if the conversation had no previous name.", "id": "f10039:c4:m1"}
{"signature": "def __init__(self, text, segment_type=None,<EOL>is_bold=False, is_italic=False, is_strikethrough=False,<EOL>is_underline=False, link_target=None):", "body": "if segment_type is not None:<EOL><INDENT>self.type_ = segment_type<EOL><DEDENT>elif link_target is not None:<EOL><INDENT>self.type_ = hangouts_pb2.SEGMENT_TYPE_LINK<EOL><DEDENT>else:<EOL><INDENT>self.type_ = hangouts_pb2.SEGMENT_TYPE_TEXT<EOL><DEDENT>self.text = text<EOL>self.is_bold = is_bold<EOL>self.is_italic = is_italic<EOL>self.is_strikethrough = is_strikethrough<EOL>self.is_underline = is_underline<EOL>self.link_target = link_target<EOL>", "docstring": "Create a new chat message segment.", "id": "f10039:c1:m0"}
{"signature": "async def lookup_entities(client, args):", "body": "lookup_spec = _get_lookup_spec(args.entity_identifier)<EOL>request = hangups.hangouts_pb2.GetEntityByIdRequest(<EOL>request_header=client.get_request_header(),<EOL>batch_lookup_spec=[lookup_spec],<EOL>)<EOL>res = await client.get_entity_by_id(request)<EOL>for entity_result in res.entity_result:<EOL><INDENT>for entity in entity_result.entity:<EOL><INDENT>print(entity)<EOL><DEDENT><DEDENT>", "docstring": "Search for entities by phone number, email, or gaia_id.", "id": "f10045:m0"}
{"signature": "async def _async_main(example_coroutine, client, args):", "body": "<EOL>task = asyncio.ensure_future(client.connect())<EOL>on_connect = asyncio.Future()<EOL>client.on_connect.add_observer(lambda: on_connect.set_result(None))<EOL>done, _ = await asyncio.wait(<EOL>(on_connect, task), return_when=asyncio.FIRST_COMPLETED<EOL>)<EOL>await asyncio.gather(*done)<EOL>try:<EOL><INDENT>await example_coroutine(client, args)<EOL><DEDENT>except asyncio.CancelledError:<EOL><INDENT>pass<EOL><DEDENT>finally:<EOL><INDENT>await client.disconnect()<EOL>await task<EOL><DEDENT>", "docstring": "Run the example coroutine.", "id": "f10052:m2"}
{"signature": "def _get_parser(extra_args):", "body": "parser = argparse.ArgumentParser(<EOL>formatter_class=argparse.ArgumentDefaultsHelpFormatter,<EOL>)<EOL>dirs = appdirs.AppDirs('<STR_LIT>', '<STR_LIT>')<EOL>default_token_path = os.path.join(dirs.user_cache_dir, '<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>', default=default_token_path,<EOL>help='<STR_LIT>'<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>',<EOL>help='<STR_LIT>'<EOL>)<EOL>for extra_arg in extra_args:<EOL><INDENT>parser.add_argument(extra_arg, required=True)<EOL><DEDENT>return parser<EOL>", "docstring": "Return ArgumentParser with any extra arguments.", "id": "f10052:m1"}
{"signature": "def run_example(example_coroutine, *extra_args):", "body": "args = _get_parser(extra_args).parse_args()<EOL>logging.basicConfig(level=logging.DEBUG if args.debug else logging.WARNING)<EOL>cookies = hangups.auth.get_auth_stdin(args.token_path)<EOL>client = hangups.Client(cookies)<EOL>loop = asyncio.get_event_loop()<EOL>task = asyncio.ensure_future(_async_main(example_coroutine, client, args),<EOL>loop=loop)<EOL>try:<EOL><INDENT>loop.run_until_complete(task)<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>task.cancel()<EOL>loop.run_until_complete(task)<EOL><DEDENT>finally:<EOL><INDENT>loop.close()<EOL><DEDENT>", "docstring": "Run a hangups example coroutine.\n\n    Args:\n        example_coroutine (coroutine): Coroutine to run with a connected\n            hangups client and arguments namespace as arguments.\n        extra_args (str): Any extra command line arguments required by the\n            example.", "id": "f10052:m0"}
{"signature": "def print_table(col_tuple, row_tuples):", "body": "col_widths = [max(len(str(row[col])) for row in [col_tuple] + row_tuples)<EOL>for col in range(len(col_tuple))]<EOL>format_str = '<STR_LIT:U+0020>'.join('<STR_LIT>'.format(col_width)<EOL>for col_width in col_widths)<EOL>header_border = '<STR_LIT:U+0020>'.join('<STR_LIT:=>' * col_width for col_width in col_widths)<EOL>print(header_border)<EOL>print(format_str.format(*col_tuple))<EOL>print(header_border)<EOL>for row_tuple in row_tuples:<EOL><INDENT>print(format_str.format(*row_tuple))<EOL><DEDENT>print(header_border)<EOL>print()<EOL>", "docstring": "Print column headers and rows as a reStructuredText table.\n\n    Args:\n        col_tuple: Tuple of column name strings.\n        row_tuples: List of tuples containing row data.", "id": "f10057:m0"}
{"signature": "def generate_enum_doc(enum_descriptor, locations, path, name_prefix='<STR_LIT>'):", "body": "print(make_subsection(name_prefix + enum_descriptor.name))<EOL>location = locations[path]<EOL>if location.HasField('<STR_LIT>'):<EOL><INDENT>print(textwrap.dedent(location.leading_comments))<EOL><DEDENT>row_tuples = []<EOL>for value_index, value in enumerate(enum_descriptor.value):<EOL><INDENT>field_location = locations[path + (<NUM_LIT:2>, value_index)]<EOL>row_tuples.append((<EOL>make_code(value.name),<EOL>value.number,<EOL>textwrap.fill(get_comment_from_location(field_location), INFINITY),<EOL>))<EOL><DEDENT>print_table(('<STR_LIT:Name>', '<STR_LIT>', '<STR_LIT>'), row_tuples)<EOL>", "docstring": "Generate doc for an enum.\n\n    Args:\n        enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum\n            to generate docs for.\n        locations: Dictionary of location paths tuples to\n            descriptor_pb2.SourceCodeInfo.Location instances.\n        path: Path tuple to the enum definition.\n        name_prefix: Optional prefix for this enum's name.", "id": "f10057:m6"}
{"signature": "def make_code(text):", "body": "return '<STR_LIT>'.format(text)<EOL>", "docstring": "Format text as reStructuredText code.\n\n    Args:\n        text: Text string to format.\n\n    Returns:\n        Formatted text string.", "id": "f10057:m3"}
{"signature": "def generate_message_doc(message_descriptor, locations, path, name_prefix='<STR_LIT>'):", "body": "<EOL>prefixed_name = name_prefix + message_descriptor.name<EOL>print(make_subsection(prefixed_name))<EOL>location = locations[path]<EOL>if location.HasField('<STR_LIT>'):<EOL><INDENT>print(textwrap.dedent(location.leading_comments))<EOL><DEDENT>row_tuples = []<EOL>for field_index, field in enumerate(message_descriptor.field):<EOL><INDENT>field_location = locations[path + (<NUM_LIT:2>, field_index)]<EOL>if field.type not in [<NUM_LIT:11>, <NUM_LIT>]:<EOL><INDENT>type_str = TYPE_TO_STR[field.type]<EOL><DEDENT>else:<EOL><INDENT>type_str = make_link(field.type_name.lstrip('<STR_LIT:.>'))<EOL><DEDENT>row_tuples.append((<EOL>make_code(field.name),<EOL>field.number,<EOL>type_str,<EOL>LABEL_TO_STR[field.label],<EOL>textwrap.fill(get_comment_from_location(field_location), INFINITY),<EOL>))<EOL><DEDENT>print_table(('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'),<EOL>row_tuples)<EOL>nested_types = enumerate(message_descriptor.nested_type)<EOL>for index, nested_message_desc in nested_types:<EOL><INDENT>generate_message_doc(nested_message_desc, locations,<EOL>path + (<NUM_LIT:3>, index),<EOL>name_prefix=prefixed_name + '<STR_LIT:.>')<EOL><DEDENT>for index, nested_enum_desc in enumerate(message_descriptor.enum_type):<EOL><INDENT>generate_enum_doc(nested_enum_desc, locations, path + (<NUM_LIT:4>, index),<EOL>name_prefix=prefixed_name + '<STR_LIT:.>')<EOL><DEDENT>", "docstring": "Generate docs for message and nested messages and enums.\n\n    Args:\n        message_descriptor: descriptor_pb2.DescriptorProto instance for message\n            to generate docs for.\n        locations: Dictionary of location paths tuples to\n            descriptor_pb2.SourceCodeInfo.Location instances.\n        path: Path tuple to the message definition.\n        name_prefix: Optional prefix for this message's name.", "id": "f10057:m7"}
{"signature": "def compile_protofile(proto_file_path):", "body": "out_file = tempfile.mkstemp()[<NUM_LIT:1>]<EOL>try:<EOL><INDENT>subprocess.check_output(['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', out_file,<EOL>proto_file_path])<EOL><DEDENT>except subprocess.CalledProcessError as e:<EOL><INDENT>sys.exit('<STR_LIT>'.format(e.returncode))<EOL><DEDENT>return out_file<EOL>", "docstring": "Compile proto file to descriptor set.\n\n    Args:\n        proto_file_path: Path to proto file to compile.\n\n    Returns:\n        Path to file containing compiled descriptor set.\n\n    Raises:\n        SystemExit if the compilation fails.", "id": "f10057:m8"}
{"signature": "@fixture<EOL>def post_token_dropbox(dropbox_container, config, post_token):", "body": "from briefkasten import parse_post_token<EOL>return dropbox_container.get_dropbox(<EOL>parse_post_token(<EOL>post_token,<EOL>secret=config.registry.settings['<STR_LIT>']<EOL>)<EOL>)<EOL>", "docstring": "returns a dropbox instance matching the given post_token", "id": "f10063:m8"}
{"signature": "@view_config(<EOL>route_name='<STR_LIT>',<EOL>request_method='<STR_LIT:GET>',<EOL>renderer='<STR_LIT>')<EOL>def dropbox_form(request):", "body": "from briefkasten import generate_post_token<EOL>token = generate_post_token(secret=request.registry.settings['<STR_LIT>'])<EOL>return dict(<EOL>action=request.route_url('<STR_LIT>', token=token),<EOL>fileupload_url=request.route_url('<STR_LIT>', token=token),<EOL>**defaults(request))<EOL>", "docstring": "generates a dropbox uid and renders the submission form with a signed version of that id", "id": "f10069:m1"}
{"signature": "@view_config(<EOL>route_name='<STR_LIT>',<EOL>request_method='<STR_LIT:POST>')<EOL>def dropbox_submission(dropbox, request):", "body": "try:<EOL><INDENT>data = dropbox_schema.deserialize(request.POST)<EOL><DEDENT>except Exception:<EOL><INDENT>return HTTPFound(location=request.route_url('<STR_LIT>'))<EOL><DEDENT>dropbox.message = data.get('<STR_LIT:message>')<EOL>if '<STR_LIT>' in dropbox.settings:<EOL><INDENT>dropbox.from_watchdog = is_equal(<EOL>unicode(dropbox.settings['<STR_LIT>']),<EOL>data.pop('<STR_LIT>', u'<STR_LIT>'))<EOL><DEDENT>if data.get('<STR_LIT>') is not None:<EOL><INDENT>dropbox.add_attachment(data['<STR_LIT>'])<EOL><DEDENT>dropbox.submit()<EOL>drop_url = request.route_url('<STR_LIT>', drop_id=dropbox.drop_id)<EOL>print(\"<STR_LIT>\" % drop_url)<EOL>return HTTPFound(location=drop_url)<EOL>", "docstring": "handles the form submission, redirects to the dropbox's status page.", "id": "f10069:m3"}
{"signature": "@view_config(<EOL>route_name='<STR_LIT>',<EOL>accept='<STR_LIT:application/json>',<EOL>renderer='<STR_LIT>',<EOL>request_method='<STR_LIT:POST>')<EOL>def dropbox_fileupload(dropbox, request):", "body": "attachment = request.POST['<STR_LIT>']<EOL>attached = dropbox.add_attachment(attachment)<EOL>return dict(<EOL>files=[dict(<EOL>name=attached,<EOL>type=attachment.type,<EOL>)]<EOL>)<EOL>", "docstring": "accepts a single file upload and adds it to the dropbox as attachment", "id": "f10069:m2"}
{"signature": "def _create_archive(self):", "body": "self.status = '<STR_LIT>'<EOL>return self._create_encrypted_zip(source='<STR_LIT>', fs_target_dir=self.container.fs_archive_cleansed)<EOL>", "docstring": "creates an encrypted archive of the dropbox outside of the drop directory.", "id": "f10070:c1:m9"}
{"signature": "@property<EOL><INDENT>def fs_cleansed_attachments(self):<DEDENT>", "body": "if exists(self.fs_cleansed_attachment_container):<EOL><INDENT>return [join(self.fs_cleansed_attachment_container, attachment)<EOL>for attachment in listdir(self.fs_cleansed_attachment_container)]<EOL><DEDENT>else:<EOL><INDENT>return []<EOL><DEDENT>", "docstring": "returns a list of absolute paths to the cleansed attachements", "id": "f10070:c1:m25"}
{"signature": "def sanitize_filename(filename):", "body": "<EOL>token = generate_drop_id()<EOL>name, extension = splitext(filename)<EOL>if extension:<EOL><INDENT>return '<STR_LIT>' % (token, extension)<EOL><DEDENT>else:<EOL><INDENT>return token<EOL><DEDENT>", "docstring": "preserve the file ending, but replace the name with a random token", "id": "f10070:m1"}
{"signature": "@property<EOL><INDENT>def fs_dirty_attachments(self):<DEDENT>", "body": "if exists(self.fs_attachment_container):<EOL><INDENT>return [join(self.fs_attachment_container, attachment)<EOL>for attachment in listdir(self.fs_attachment_container)]<EOL><DEDENT>else:<EOL><INDENT>return []<EOL><DEDENT>", "docstring": "returns a list of absolute paths to the attachements", "id": "f10070:c1:m24"}
{"signature": "@property<EOL><INDENT>def message(self):<DEDENT>", "body": "try:<EOL><INDENT>with open(join(self.fs_path, '<STR_LIT:message>')) as message_file:<EOL><INDENT>return '<STR_LIT>'.join([line.decode('<STR_LIT:utf-8>') for line in message_file.readlines()])<EOL><DEDENT><DEDENT>except IOError:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>", "docstring": "returns the user submitted text", "id": "f10070:c1:m14"}
{"signature": "def get_dropbox(self, drop_id):", "body": "return Dropbox(self, drop_id=drop_id)<EOL>", "docstring": "returns the dropbox with the given id, if it does not exist an empty dropbox\n        will be created and returned", "id": "f10070:c0:m3"}
{"signature": "def cleanup(self):", "body": "try:<EOL><INDENT>remove(join(self.fs_path, '<STR_LIT:message>'))<EOL>remove(join(self.fs_path, '<STR_LIT>'))<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>shutil.rmtree(join(self.fs_path, '<STR_LIT>'), ignore_errors=True)<EOL>shutil.rmtree(join(self.fs_path, '<STR_LIT>'), ignore_errors=True)<EOL>", "docstring": "ensures that no data leaks from drop after processing by\n        removing all data except the status file", "id": "f10070:c1:m4"}
{"signature": "@property<EOL><INDENT>def num_attachments(self):<DEDENT>", "body": "if exists(self.fs_attachment_container):<EOL><INDENT>return len(listdir(self.fs_attachment_container))<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "returns the current number of uploaded attachments in the filesystem", "id": "f10070:c1:m11"}
{"signature": "@message.setter<EOL><INDENT>def message(self, newtext):<DEDENT>", "body": "self._write_message(self.fs_path, '<STR_LIT:message>', newtext)<EOL>", "docstring": "overwrite the message text. this also updates the corresponding file.", "id": "f10070:c1:m15"}
{"signature": "def sendMultiPart(smtp, gpg_context, sender, recipients, subject, text, attachments):", "body": "sent = <NUM_LIT:0><EOL>for to in recipients:<EOL><INDENT>if not to.startswith('<STR_LIT:<>'):<EOL><INDENT>uid = '<STR_LIT>' % to<EOL><DEDENT>else:<EOL><INDENT>uid = to<EOL><DEDENT>if not checkRecipient(gpg_context, uid):<EOL><INDENT>continue<EOL><DEDENT>msg = MIMEMultipart()<EOL>msg['<STR_LIT>'] = sender<EOL>msg['<STR_LIT>'] = to<EOL>msg['<STR_LIT>'] = subject<EOL>msg[\"<STR_LIT>\"] = formatdate(localtime=True)<EOL>msg.preamble = u'<STR_LIT>'<EOL>attach = MIMEText(str(gpg_context.encrypt(text.encode('<STR_LIT:utf-8>'), uid, always_trust=True)))<EOL>attach.set_charset('<STR_LIT>')<EOL>msg.attach(attach)<EOL>for attachment in attachments:<EOL><INDENT>with open(attachment, '<STR_LIT:rb>') as fp:<EOL><INDENT>attach = MIMEBase('<STR_LIT>', '<STR_LIT>')<EOL>attach.set_payload(str(gpg_context.encrypt_file(fp, uid, always_trust=True)))<EOL><DEDENT>attach.add_header('<STR_LIT>', '<STR_LIT>', filename=basename('<STR_LIT>' % attachment))<EOL>msg.attach(attach)<EOL><DEDENT>smtp.begin()<EOL>smtp.sendmail(sender, to, msg.as_string())<EOL>smtp.quit()<EOL>sent += <NUM_LIT:1><EOL><DEDENT>return sent<EOL>", "docstring": "a helper method that composes and sends an email with attachments\n    requires a pre-configured smtplib.SMTP instance", "id": "f10072:m2"}
{"signature": "def dropbox_factory(request):", "body": "try:<EOL><INDENT>return request.registry.settings['<STR_LIT>'].get_dropbox(request.matchdict['<STR_LIT>'])<EOL><DEDENT>except KeyError:<EOL><INDENT>raise HTTPNotFound('<STR_LIT>')<EOL><DEDENT>", "docstring": "expects the id of an existing dropbox and returns its instance", "id": "f10073:m3"}
{"signature": "def german_locale(request):", "body": "return '<STR_LIT>'<EOL>", "docstring": "a 'negotiator' that always returns german", "id": "f10073:m6"}
{"signature": "def generate_post_token(secret):", "body": "return URLSafeTimedSerializer(secret, salt=u'<STR_LIT>').dumps(generate_drop_id())<EOL>", "docstring": "returns a URL safe, signed token that contains a UUID", "id": "f10073:m0"}
{"signature": "def main(global_config, **settings):", "body": "return configure(global_config, **settings).make_wsgi_app()<EOL>", "docstring": "Configure and create the main application.", "id": "f10073:m8"}
{"signature": "@fab.task<EOL>def upload_poudriere_assets():", "body": "upload_distfiles()<EOL>upload_packages()<EOL>", "docstring": "upload local ports tree, distfiles, and packages from poudriere", "id": "f10074:m7"}
{"signature": "@task<EOL>def upload_backend(index='<STR_LIT>', user=None):", "body": "get_vars()<EOL>use_devpi(index=index)<EOL>with fab.lcd('<STR_LIT>'):<EOL><INDENT>fab.local('<STR_LIT>')<EOL><DEDENT>", "docstring": "Build the backend and upload it to the remote server at the given index", "id": "f10075:m3"}
{"signature": "@task<EOL>def upload_pgp_keys():", "body": "get_vars()<EOL>upload_target = '<STR_LIT>'<EOL>with fab.settings(fab.hide('<STR_LIT>')):<EOL><INDENT>fab.run('<STR_LIT>' % upload_target)<EOL>fab.run('<STR_LIT>' % upload_target)<EOL>local_key_path = path.join(fab.env['<STR_LIT>'], fab.env.instance.config['<STR_LIT>'])<EOL>remote_key_path = '<STR_LIT>'.format(**AV)<EOL>rsync('<STR_LIT>', local_key_path, '<STR_LIT>' % upload_target)<EOL>fab.run('<STR_LIT>' % (AV['<STR_LIT>'], remote_key_path))<EOL>fab.run('<STR_LIT>' % remote_key_path)<EOL>with fab.shell_env(GNUPGHOME=remote_key_path):<EOL><INDENT>fab.sudo('''<STR_LIT>''' % upload_target,<EOL>user=AV['<STR_LIT>'], shell_escape=False)<EOL><DEDENT>fab.run('<STR_LIT>' % upload_target)<EOL><DEDENT>", "docstring": "upload and/or update the PGP keys for editors, import them into PGP", "id": "f10075:m2"}
{"signature": "def start(self):", "body": "if self.extra_args:<EOL><INDENT>sys.exit('<STR_LIT>'.format(self.name))<EOL><DEDENT>else:<EOL><INDENT>if self._toggle_value:<EOL><INDENT>nbextensions.install_nbextension_python(<EOL>_pkg_name, overwrite=True, symlink=False,<EOL>user=self.user, sys_prefix=self.sys_prefix, prefix=None,<EOL>nbextensions_dir=None, logger=None)<EOL><DEDENT>else:<EOL><INDENT>nbextensions.uninstall_nbextension_python(<EOL>_pkg_name, user=self.user, sys_prefix=self.sys_prefix,<EOL>prefix=None, nbextensions_dir=None, logger=None)<EOL><DEDENT>self.toggle_nbextension_python(_pkg_name)<EOL>self.toggle_server_extension_python(_pkg_name)<EOL><DEDENT>", "docstring": "Perform the App's actions as configured.", "id": "f10083:c0:m3"}
{"signature": "def parse_command_line(self, argv=None):", "body": "conflicting_flags = set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>if len(conflicting_flags.intersection(set(argv))) > <NUM_LIT:1>:<EOL><INDENT>raise serverextensions.ArgumentConflict(<EOL>'<STR_LIT>')<EOL><DEDENT>return super(ToggleJupyterTensorboardApp,<EOL>self).parse_command_line(argv)<EOL>", "docstring": "Overriden to check for conflicting flags\nSince notebook version doesn't do it well (or, indeed, at all)", "id": "f10083:c0:m0"}
{"signature": "def get_category(category_id):", "body": "try:<EOL><INDENT>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', category_id)<EOL>if res.get('<STR_LIT:id>'):<EOL><INDENT>return Category(res)<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Return a PYBOSSA Category for the category_id.\n\n    :param category_id: PYBOSSA Category ID\n    :type category_id: integer\n    :rtype: PYBOSSA Category\n    :returns: A PYBOSSA Category object", "id": "f10088:m9"}
{"signature": "def delete_project(project_id):", "body": "try:<EOL><INDENT>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', project_id)<EOL>if type(res).__name__ == '<STR_LIT:bool>':<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Delete a Project with id = project_id.\n\n    :param project_id: PYBOSSA Project ID\n    :type project_id: integer\n    :returns: True -- the response status code", "id": "f10088:m7"}
{"signature": "def update_helping_material(helpingmaterial):", "body": "try:<EOL><INDENT>helpingmaterial_id = helpingmaterial.id<EOL>helpingmaterial = _forbidden_attributes(helpingmaterial)<EOL>res = _pybossa_req('<STR_LIT>', '<STR_LIT>',<EOL>helpingmaterial_id, payload=helpingmaterial.data)<EOL>if res.get('<STR_LIT:id>'):<EOL><INDENT>return HelpingMaterial(res)<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Update a helping material for a given helping material ID.\n\n    :param helpingmaterial: PYBOSSA helping material", "id": "f10088:m29"}
{"signature": "def __setattr__(self, name, value):", "body": "data = self.__dict__['<STR_LIT:data>']<EOL>if name == '<STR_LIT:data>':<EOL><INDENT>self.__dict__['<STR_LIT:data>'] = value<EOL>return True<EOL><DEDENT>if name in data:<EOL><INDENT>data[name] = value<EOL>return True<EOL><DEDENT>raise AttributeError('<STR_LIT>' + name)<EOL>", "docstring": "Set attribute.", "id": "f10088:c0:m2"}
{"signature": "def find_category(**kwargs):", "body": "try:<EOL><INDENT>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', params=kwargs)<EOL>if type(res).__name__ == '<STR_LIT:list>':<EOL><INDENT>return [Category(category) for category in res]<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Return a list with matching Category arguments.\n\n    :param kwargs: PYBOSSA Category members\n    :rtype: list\n    :returns: A list of project that match the kwargs", "id": "f10088:m10"}
{"signature": "def create_category(name, description):", "body": "try:<EOL><INDENT>category = dict(name=name, short_name=name.lower().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\"),<EOL>description=description)<EOL>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', payload=category)<EOL>if res.get('<STR_LIT:id>'):<EOL><INDENT>return Category(res)<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Create a Category.\n\n    :param name: PYBOSSA Category Name\n    :type name: string\n    :param description: PYBOSSA Category description\n    :type decription: string\n    :returns: True -- the response status code", "id": "f10088:m11"}
{"signature": "def set(key, val):", "body": "global _opts<EOL>_opts[key] = val<EOL>", "docstring": "Set key to value.", "id": "f10088:m0"}
{"signature": "def delete_task(task_id):", "body": "<EOL>try:<EOL><INDENT>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', task_id)<EOL>if type(res).__name__ == '<STR_LIT:bool>':<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Delete a task for a given task ID.\n\n    :param task: PYBOSSA task", "id": "f10088:m18"}
{"signature": "def update_result(result):", "body": "try:<EOL><INDENT>result_id = result.id<EOL>result = _forbidden_attributes(result)<EOL>res = _pybossa_req('<STR_LIT>', '<STR_LIT:result>', result_id, payload=result.data)<EOL>if res.get('<STR_LIT:id>'):<EOL><INDENT>return Result(res)<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Update a result for a given result ID.\n\n    :param result: PYBOSSA result", "id": "f10088:m24"}
{"signature": "def update_task(task):", "body": "try:<EOL><INDENT>task_id = task.id<EOL>task = _forbidden_attributes(task)<EOL>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', task_id, payload=task.data)<EOL>if res.get('<STR_LIT:id>'):<EOL><INDENT>return Task(res)<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Update a task for a given task ID.\n\n    :param task: PYBOSSA task", "id": "f10088:m17"}
{"signature": "def update_project(project):", "body": "try:<EOL><INDENT>project_id = project.id<EOL>project = _forbidden_attributes(project)<EOL>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', project_id, payload=project.data)<EOL>if res.get('<STR_LIT:id>'):<EOL><INDENT>return Project(res)<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Update a project instance.\n\n    :param project: PYBOSSA project\n    :type project: PYBOSSA Project\n    :returns: True -- the response status code", "id": "f10088:m6"}
{"signature": "def find_helping_materials(project_id, **kwargs):", "body": "try:<EOL><INDENT>kwargs['<STR_LIT>'] = project_id<EOL>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', params=kwargs)<EOL>if type(res).__name__ == '<STR_LIT:list>':<EOL><INDENT>return [HelpingMaterial(helping) for helping in res]<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Return a list of matched helping materials for a given project ID.\n\n    :param project_id: PYBOSSA Project ID\n    :type project_id: integer\n    :param kwargs: PYBOSSA HelpingMaterial members\n    :type info: dict\n    :rtype: list\n    :returns: A list of helping materials that match the kwargs", "id": "f10088:m28"}
{"signature": "def __repr__(self):  ", "body": "return '<STR_LIT>' + str(self.id) + '<STR_LIT:)>'<EOL>", "docstring": "Return representation.", "id": "f10088:c5:m0"}
{"signature": "def get_project(project_id):", "body": "try:<EOL><INDENT>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', project_id)<EOL>if res.get('<STR_LIT:id>'):<EOL><INDENT>return Project(res)<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Return a PYBOSSA Project for the project_id.\n\n    :param project_id: PYBOSSA Project ID\n    :type project_id: integer\n    :rtype: PYBOSSA Project\n    :returns: A PYBOSSA Project object", "id": "f10088:m3"}
{"signature": "def create_project(name, short_name, description):", "body": "try:<EOL><INDENT>project = dict(name=name, short_name=short_name,<EOL>description=description)<EOL>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', payload=project)<EOL>if res.get('<STR_LIT:id>'):<EOL><INDENT>return Project(res)<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Create a project.\n\n    :param name: PYBOSSA Project Name\n    :type name: string\n    :param short_name: PYBOSSA Project short name or slug\n    :type short_name: string\n    :param description: PYBOSSA Project description\n    :type decription: string\n    :returns: True -- the response status code", "id": "f10088:m5"}
{"signature": "def create_helpingmaterial(project_id, info, media_url=None, file_path=None):", "body": "try:<EOL><INDENT>helping = dict(<EOL>project_id=project_id,<EOL>info=info,<EOL>media_url=None,<EOL>)<EOL>if file_path:<EOL><INDENT>files = {'<STR_LIT:file>': open(file_path, '<STR_LIT:rb>')}<EOL>payload = {'<STR_LIT>': project_id}<EOL>res = _pybossa_req('<STR_LIT>', '<STR_LIT>',<EOL>payload=payload, files=files)<EOL><DEDENT>else:<EOL><INDENT>res = _pybossa_req('<STR_LIT>', '<STR_LIT>', payload=helping)<EOL><DEDENT>if res.get('<STR_LIT:id>'):<EOL><INDENT>return HelpingMaterial(res)<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Create a helping material for a given project ID.\n\n    :param project_id: PYBOSSA Project ID\n    :type project_id: integer\n    :param info: PYBOSSA Helping Material info JSON field\n    :type info: dict\n    :param media_url: URL for a media file (image, video or audio)\n    :type media_url: string\n    :param file_path: File path to the local image, video or sound to upload. \n    :type file_path: string\n    :returns: True -- the response status code", "id": "f10088:m26"}
{"signature": "def find_results(project_id, **kwargs):", "body": "try:<EOL><INDENT>kwargs['<STR_LIT>'] = project_id<EOL>res = _pybossa_req('<STR_LIT>', '<STR_LIT:result>', params=kwargs)<EOL>if type(res).__name__ == '<STR_LIT:list>':<EOL><INDENT>return [Result(result) for result in res]<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Return a list of matched results for a given project ID.\n\n    :param project_id: PYBOSSA Project ID\n    :type project_id: integer\n    :param kwargs: PYBOSSA Results members\n    :type info: dict\n    :rtype: list\n    :returns: A list of results that match the kwargs", "id": "f10088:m23"}
{"signature": "def _pybossa_req(method, domain, id=None, payload=None, params={},<EOL>headers={'<STR_LIT>': '<STR_LIT:application/json>'},<EOL>files=None):", "body": "url = _opts['<STR_LIT>'] + '<STR_LIT>' + domain<EOL>if id is not None:<EOL><INDENT>url += '<STR_LIT:/>' + str(id)<EOL><DEDENT>if '<STR_LIT>' in _opts:<EOL><INDENT>params['<STR_LIT>'] = _opts['<STR_LIT>']<EOL><DEDENT>if method == '<STR_LIT>':<EOL><INDENT>r = requests.get(url, params=params)<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>if files is None and headers['<STR_LIT>'] == '<STR_LIT:application/json>':<EOL><INDENT>r = requests.post(url, params=params, headers=headers,<EOL>data=json.dumps(payload))<EOL><DEDENT>else:<EOL><INDENT>r = requests.post(url, params=params, files=files, data=payload)<EOL><DEDENT><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>r = requests.put(url, params=params, headers=headers,<EOL>data=json.dumps(payload))<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>r = requests.delete(url, params=params, headers=headers,<EOL>data=json.dumps(payload))<EOL><DEDENT>if r.status_code // <NUM_LIT:100> == <NUM_LIT:2>:<EOL><INDENT>if r.text and r.text != '<STR_LIT>':<EOL><INDENT>return json.loads(r.text)<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return json.loads(r.text)<EOL><DEDENT>", "docstring": "Send a JSON request.\n\nReturns True if everything went well, otherwise it returns the status\ncode of the response.", "id": "f10088:m1"}
{"signature": "def get_categories(limit=<NUM_LIT:20>, offset=<NUM_LIT:0>, last_id=None):", "body": "if last_id is not None:<EOL><INDENT>params = dict(limit=limit, last_id=last_id)<EOL><DEDENT>else:<EOL><INDENT>params = dict(limit=limit, offset=offset)<EOL>print(OFFSET_WARNING)<EOL><DEDENT>try:<EOL><INDENT>res = _pybossa_req('<STR_LIT>', '<STR_LIT>',<EOL>params=params)<EOL>if type(res).__name__ == '<STR_LIT:list>':<EOL><INDENT>return [Category(category) for category in res]<EOL><DEDENT>else:<EOL><INDENT>raise TypeError<EOL><DEDENT><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Return a list of registered categories.\n\n    :param limit: Number of returned items, default 20\n    :type limit: integer\n    :param offset: Offset for the query, default 0\n    :type offset: integer\n    :param last_id: id of the last category, used for pagination. If provided, offset is ignored\n    :type last_id: integer\n    :rtype: list\n    :returns: A list of PYBOSSA Categories", "id": "f10088:m8"}
{"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n    The \".dev0\" means dirty. Note that .dev0 sorts backwards\n    (a dirty tree will appear \"older\" than the corresponding clean one),\n    but you shouldn't be releasing software with -dirty anyways.\n\n    Exceptions:\n    1: no tags. 0.postDISTANCE[.dev0]", "id": "f10100:m11"}
{"signature": "def render_git_describe(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n    Like 'git describe --tags --dirty --always'.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f10100:m13"}
{"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "dirname = os.path.basename(root)<EOL>if not dirname.startswith(parentdir_prefix):<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (root, dirname, parentdir_prefix))<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>return {\"<STR_LIT:version>\": dirname[len(parentdir_prefix):],<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": None}<EOL>", "docstring": "Try to determine the version from the parent directory name.\n\n    Source tarballs conventionally unpack into a directory that includes\n    both the project name and a version string.", "id": "f10100:m4"}
{"signature": "def render_pep440(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"],<EOL>pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n    Exceptions:\n    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f10100:m9"}
{"signature": "def plus_or_dot(pieces):", "body": "if \"<STR_LIT:+>\" in pieces.get(\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>return \"<STR_LIT:.>\"<EOL><DEDENT>return \"<STR_LIT:+>\"<EOL>", "docstring": "Return a + if we don't already have one, else return a .", "id": "f10100:m8"}
{"signature": "def register_vcs_handler(vcs, method):  ", "body": "def decorate(f):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if vcs not in HANDLERS:<EOL><INDENT>HANDLERS[vcs] = {}<EOL><DEDENT>HANDLERS[vcs][method] = f<EOL>return f<EOL><DEDENT>return decorate<EOL>", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f10100:m2"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_get_keywords(versionfile_abs):", "body": "<EOL>keywords = {}<EOL>try:<EOL><INDENT>f = open(versionfile_abs, \"<STR_LIT:r>\")<EOL>for line in f.readlines():<EOL><INDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>f.close()<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>pass<EOL><DEDENT>return keywords<EOL>", "docstring": "Extract version information from the given file.", "id": "f10100:m5"}
{"signature": "def get_config():", "body": "<EOL>cfg = VersioneerConfig()<EOL>cfg.VCS = \"<STR_LIT>\"<EOL>cfg.style = \"<STR_LIT>\"<EOL>cfg.tag_prefix = \"<STR_LIT:v>\"<EOL>cfg.parentdir_prefix = \"<STR_LIT>\"<EOL>cfg.versionfile_source = \"<STR_LIT>\"<EOL>cfg.verbose = False<EOL>return cfg<EOL>", "docstring": "Create, populate and return the VersioneerConfig() object.", "id": "f10100:m1"}
{"signature": "def read_stream_tuples(schema, stream, *, buffer_size=io.DEFAULT_BUFFER_SIZE):", "body": "reader = _lancaster.Reader(schema, _get_datetime_flags(schema))<EOL>buf = stream.read(buffer_size)<EOL>remainder = b'<STR_LIT>'<EOL>while len(buf) > <NUM_LIT:0>:<EOL><INDENT>values, n = reader.read_seq_tuples(buf)<EOL>yield from values<EOL>remainder = buf[n:]<EOL>buf = stream.read(buffer_size)<EOL>if len(buf) > <NUM_LIT:0> and len(remainder) > <NUM_LIT:0>:<EOL><INDENT>ba = bytearray()<EOL>ba.extend(remainder)<EOL>ba.extend(buf)<EOL>buf = memoryview(ba).tobytes()<EOL><DEDENT><DEDENT>if len(remainder) > <NUM_LIT:0>:<EOL><INDENT>raise EOFError('<STR_LIT>'<EOL>'<STR_LIT>'.format(len(remainder)))<EOL><DEDENT>", "docstring": "Using a schema, deserialize a stream of consecutive Avro values\n    into tuples.\n\n    This assumes the input is avro records of simple values (numbers,\n    strings, etc.).\n\n    :param str schema: json string representing the Avro schema, field\n        names may include 'is_datetime' boolean fields to force\n        decoding long values of epoch nanoseconds into datetime\n        objects\n    :param file-like stream: a buffered stream of binary input\n    :param int buffer_size: size of bytes to read from the stream each time\n    :return: yields a sequence of python tuples deserialized from the stream", "id": "f10101:m2"}
{"signature": "def render(pieces, style):", "body": "if pieces[\"<STR_LIT:error>\"]:<EOL><INDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": pieces.get(\"<STR_LIT>\"),<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": pieces[\"<STR_LIT:error>\"]}<EOL><DEDENT>if not style or style == \"<STR_LIT:default>\":<EOL><INDENT>style = \"<STR_LIT>\"  <EOL><DEDENT>if style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_pre(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_post(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_old(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe_long(pieces)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % style)<EOL><DEDENT>return {\"<STR_LIT:version>\": rendered, \"<STR_LIT>\": pieces[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": pieces[\"<STR_LIT>\"], \"<STR_LIT:error>\": None}<EOL>", "docstring": "Render the given version pieces into the requested style.", "id": "f10103:m18"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>refnames = keywords[\"<STR_LIT>\"].strip()<EOL>if refnames.startswith(\"<STR_LIT>\"):<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>refs = set([r.strip() for r in refnames.strip(\"<STR_LIT>\").split(\"<STR_LIT:U+002C>\")])<EOL>TAG = \"<STR_LIT>\"<EOL>tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])<EOL>if not tags:<EOL><INDENT>tags = set([r for r in refs if re.search(r'<STR_LIT>', r)])<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(refs-tags))<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(sorted(tags)))<EOL><DEDENT>for ref in sorted(tags):<EOL><INDENT>if ref.startswith(tag_prefix):<EOL><INDENT>r = ref[len(tag_prefix):]<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % r)<EOL><DEDENT>return {\"<STR_LIT:version>\": r,<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": None<EOL>}<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": \"<STR_LIT>\"}<EOL>", "docstring": "Get version information from git keywords.", "id": "f10103:m5"}
{"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n    Like 'git describe --tags --dirty --always -long'.\n    The distance/hash is unconditional.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f10103:m17"}
{"signature": "def plus_or_dot(pieces):", "body": "if \"<STR_LIT:+>\" in pieces.get(\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>return \"<STR_LIT:.>\"<EOL><DEDENT>return \"<STR_LIT:+>\"<EOL>", "docstring": "Return a + if we don't already have one, else return a .", "id": "f10103:m11"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_get_keywords(versionfile_abs):", "body": "<EOL>keywords = {}<EOL>try:<EOL><INDENT>f = open(versionfile_abs, \"<STR_LIT:r>\")<EOL>for line in f.readlines():<EOL><INDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if line.strip().startswith(\"<STR_LIT>\"):<EOL><INDENT>mo = re.search(r'<STR_LIT>', line)<EOL>if mo:<EOL><INDENT>keywords[\"<STR_LIT>\"] = mo.group(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>f.close()<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>pass<EOL><DEDENT>return keywords<EOL>", "docstring": "Extract version information from the given file.", "id": "f10103:m4"}
{"signature": "def scan_setup_py():", "body": "found = set()<EOL>setters = False<EOL>errors = <NUM_LIT:0><EOL>with open(\"<STR_LIT>\", \"<STR_LIT:r>\") as f:<EOL><INDENT>for line in f.readlines():<EOL><INDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>found.add(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>found.add(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>found.add(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>setters = True<EOL><DEDENT>if \"<STR_LIT>\" in line:<EOL><INDENT>setters = True<EOL><DEDENT><DEDENT><DEDENT>if len(found) != <NUM_LIT:3>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>errors += <NUM_LIT:1><EOL><DEDENT>if setters:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>errors += <NUM_LIT:1><EOL><DEDENT>return errors<EOL>", "docstring": "Validate the contents of setup.py against Versioneer's expectations.", "id": "f10103:m23"}
{"signature": "def get_config_from_root(root):", "body": "<EOL>setup_cfg = os.path.join(root, \"<STR_LIT>\")<EOL>parser = configparser.SafeConfigParser()<EOL>with open(setup_cfg, \"<STR_LIT:r>\") as f:<EOL><INDENT>parser.readfp(f)<EOL><DEDENT>VCS = parser.get(\"<STR_LIT>\", \"<STR_LIT>\")  <EOL>def get(parser, name):<EOL><INDENT>if parser.has_option(\"<STR_LIT>\", name):<EOL><INDENT>return parser.get(\"<STR_LIT>\", name)<EOL><DEDENT>return None<EOL><DEDENT>cfg = VersioneerConfig()<EOL>cfg.VCS = VCS<EOL>cfg.style = get(parser, \"<STR_LIT>\") or \"<STR_LIT>\"<EOL>cfg.versionfile_source = get(parser, \"<STR_LIT>\")<EOL>cfg.versionfile_build = get(parser, \"<STR_LIT>\")<EOL>cfg.tag_prefix = get(parser, \"<STR_LIT>\")<EOL>if cfg.tag_prefix in (\"<STR_LIT>\", '<STR_LIT>'):<EOL><INDENT>cfg.tag_prefix = \"<STR_LIT>\"<EOL><DEDENT>cfg.parentdir_prefix = get(parser, \"<STR_LIT>\")<EOL>cfg.verbose = get(parser, \"<STR_LIT>\")<EOL>return cfg<EOL>", "docstring": "Read the project setup.cfg file to determine Versioneer config.", "id": "f10103:m1"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "if not os.path.exists(os.path.join(root, \"<STR_LIT>\")):<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % root)<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>GITS = [\"<STR_LIT>\"]<EOL>if sys.platform == \"<STR_LIT:win32>\":<EOL><INDENT>GITS = [\"<STR_LIT>\", \"<STR_LIT>\"]<EOL><DEDENT>describe_out = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\" % tag_prefix],<EOL>cwd=root)<EOL>if describe_out is None:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>describe_out = describe_out.strip()<EOL>full_out = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\"], cwd=root)<EOL>if full_out is None:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>full_out = full_out.strip()<EOL>pieces = {}<EOL>pieces[\"<STR_LIT>\"] = full_out<EOL>pieces[\"<STR_LIT>\"] = full_out[:<NUM_LIT:7>]  <EOL>pieces[\"<STR_LIT:error>\"] = None<EOL>git_describe = describe_out<EOL>dirty = git_describe.endswith(\"<STR_LIT>\")<EOL>pieces[\"<STR_LIT>\"] = dirty<EOL>if dirty:<EOL><INDENT>git_describe = git_describe[:git_describe.rindex(\"<STR_LIT>\")]<EOL><DEDENT>if \"<STR_LIT:->\" in git_describe:<EOL><INDENT>mo = re.search(r'<STR_LIT>', git_describe)<EOL>if not mo:<EOL><INDENT>pieces[\"<STR_LIT:error>\"] = (\"<STR_LIT>\"<EOL>% describe_out)<EOL>return pieces<EOL><DEDENT>full_tag = mo.group(<NUM_LIT:1>)<EOL>if not full_tag.startswith(tag_prefix):<EOL><INDENT>if verbose:<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL>print(fmt % (full_tag, tag_prefix))<EOL><DEDENT>pieces[\"<STR_LIT:error>\"] = (\"<STR_LIT>\"<EOL>% (full_tag, tag_prefix))<EOL>return pieces<EOL><DEDENT>pieces[\"<STR_LIT>\"] = full_tag[len(tag_prefix):]<EOL>pieces[\"<STR_LIT>\"] = int(mo.group(<NUM_LIT:2>))<EOL>pieces[\"<STR_LIT>\"] = mo.group(<NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>pieces[\"<STR_LIT>\"] = None<EOL>count_out = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>cwd=root)<EOL>pieces[\"<STR_LIT>\"] = int(count_out)  <EOL><DEDENT>return pieces<EOL>", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n    This only gets called if the git-archive 'subst' keywords were *not*\n    expanded, and _version.py hasn't already been rewritten with a short\n    version string, meaning we're inside a checked out source tree.", "id": "f10103:m6"}
{"signature": "def render_git_describe(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n    Like 'git describe --tags --dirty --always'.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f10103:m16"}
{"signature": "def get_cmdclass():", "body": "if \"<STR_LIT>\" in sys.modules:<EOL><INDENT>del sys.modules[\"<STR_LIT>\"]<EOL><DEDENT>cmds = {}<EOL>from distutils.core import Command<EOL>class cmd_version(Command):<EOL><INDENT>description = \"<STR_LIT>\"<EOL>user_options = []<EOL>boolean_options = []<EOL>def initialize_options(self):<EOL><INDENT>pass<EOL><DEDENT>def finalize_options(self):<EOL><INDENT>pass<EOL><DEDENT>def run(self):<EOL><INDENT>vers = get_versions(verbose=True)<EOL>print(\"<STR_LIT>\" % vers[\"<STR_LIT:version>\"])<EOL>print(\"<STR_LIT>\" % vers.get(\"<STR_LIT>\"))<EOL>print(\"<STR_LIT>\" % vers.get(\"<STR_LIT>\"))<EOL>if vers[\"<STR_LIT:error>\"]:<EOL><INDENT>print(\"<STR_LIT>\" % vers[\"<STR_LIT:error>\"])<EOL><DEDENT><DEDENT><DEDENT>cmds[\"<STR_LIT:version>\"] = cmd_version<EOL>if \"<STR_LIT>\" in sys.modules:<EOL><INDENT>from setuptools.command.build_py import build_py as _build_py<EOL><DEDENT>else:<EOL><INDENT>from distutils.command.build_py import build_py as _build_py<EOL><DEDENT>class cmd_build_py(_build_py):<EOL><INDENT>def run(self):<EOL><INDENT>root = get_root()<EOL>cfg = get_config_from_root(root)<EOL>versions = get_versions()<EOL>_build_py.run(self)<EOL>if cfg.versionfile_build:<EOL><INDENT>target_versionfile = os.path.join(self.build_lib,<EOL>cfg.versionfile_build)<EOL>print(\"<STR_LIT>\" % target_versionfile)<EOL>write_to_version_file(target_versionfile, versions)<EOL><DEDENT><DEDENT><DEDENT>cmds[\"<STR_LIT>\"] = cmd_build_py<EOL>if \"<STR_LIT>\" in sys.modules:  <EOL><INDENT>from cx_Freeze.dist import build_exe as _build_exe<EOL>class cmd_build_exe(_build_exe):<EOL><INDENT>def run(self):<EOL><INDENT>root = get_root()<EOL>cfg = get_config_from_root(root)<EOL>versions = get_versions()<EOL>target_versionfile = cfg.versionfile_source<EOL>print(\"<STR_LIT>\" % target_versionfile)<EOL>write_to_version_file(target_versionfile, versions)<EOL>_build_exe.run(self)<EOL>os.unlink(target_versionfile)<EOL>with open(cfg.versionfile_source, \"<STR_LIT:w>\") as f:<EOL><INDENT>LONG = LONG_VERSION_PY[cfg.VCS]<EOL>f.write(LONG %<EOL>{\"<STR_LIT>\": \"<STR_LIT:$>\",<EOL>\"<STR_LIT>\": cfg.style,<EOL>\"<STR_LIT>\": cfg.tag_prefix,<EOL>\"<STR_LIT>\": cfg.parentdir_prefix,<EOL>\"<STR_LIT>\": cfg.versionfile_source,<EOL>})<EOL><DEDENT><DEDENT><DEDENT>cmds[\"<STR_LIT>\"] = cmd_build_exe<EOL>del cmds[\"<STR_LIT>\"]<EOL><DEDENT>if \"<STR_LIT>\" in sys.modules:<EOL><INDENT>from setuptools.command.sdist import sdist as _sdist<EOL><DEDENT>else:<EOL><INDENT>from distutils.command.sdist import sdist as _sdist<EOL><DEDENT>class cmd_sdist(_sdist):<EOL><INDENT>def run(self):<EOL><INDENT>versions = get_versions()<EOL>self._versioneer_generated_versions = versions<EOL>self.distribution.metadata.version = versions[\"<STR_LIT:version>\"]<EOL>return _sdist.run(self)<EOL><DEDENT>def make_release_tree(self, base_dir, files):<EOL><INDENT>root = get_root()<EOL>cfg = get_config_from_root(root)<EOL>_sdist.make_release_tree(self, base_dir, files)<EOL>target_versionfile = os.path.join(base_dir, cfg.versionfile_source)<EOL>print(\"<STR_LIT>\" % target_versionfile)<EOL>write_to_version_file(target_versionfile,<EOL>self._versioneer_generated_versions)<EOL><DEDENT><DEDENT>cmds[\"<STR_LIT>\"] = cmd_sdist<EOL>return cmds<EOL>", "docstring": "Get the custom setuptools/distutils subclasses used by Versioneer.", "id": "f10103:m21"}
{"signature": "def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):", "body": "assert isinstance(commands, list)<EOL>p = None<EOL>for c in commands:<EOL><INDENT>try:<EOL><INDENT>dispcmd = str([c] + args)<EOL>p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,<EOL>stderr=(subprocess.PIPE if hide_stderr<EOL>else None))<EOL>break<EOL><DEDENT>except EnvironmentError:<EOL><INDENT>e = sys.exc_info()[<NUM_LIT:1>]<EOL>if e.errno == errno.ENOENT:<EOL><INDENT>continue<EOL><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % dispcmd)<EOL>print(e)<EOL><DEDENT>return None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % (commands,))<EOL><DEDENT>return None<EOL><DEDENT>stdout = p.communicate()[<NUM_LIT:0>].strip()<EOL>if sys.version_info[<NUM_LIT:0>] >= <NUM_LIT:3>:<EOL><INDENT>stdout = stdout.decode()<EOL><DEDENT>if p.returncode != <NUM_LIT:0>:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % dispcmd)<EOL><DEDENT>return None<EOL><DEDENT>return stdout<EOL>", "docstring": "Call the given command(s).", "id": "f10103:m3"}
{"signature": "def render_pep440(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"],<EOL>pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n    Exceptions:\n    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f10103:m12"}
{"signature": "def parse(self, text):", "body": "return parser.parse(text).parsed<EOL>", "docstring": "Simple convenience function to unwrap the array of parameters.", "id": "f10106:c0:m0"}
{"signature": "def access_control_headers(self, header):", "body": "<EOL>response, _ = self.client.options(self.right_path)<EOL>assert response.status == http.client.OK<EOL>assert header not in response<EOL>response, _ = self.client.options(<EOL>self.right_path,<EOL>headers={<EOL>'<STR_LIT>': self.right_origin,<EOL>'<STR_LIT>': self.default_method})<EOL>assert response.status == http.client.OK<EOL>assert header not in response<EOL>response, _ = self.client.options(<EOL>self.right_path, headers={'<STR_LIT>': '<STR_LIT:*>'})<EOL>assert response.status == http.client.OK<EOL>assert header not in response<EOL>response, _ = self.client.options(self.left_path)<EOL>assert response.status == http.client.OK<EOL>assert header not in response<EOL>response, _ = self.client.options(<EOL>self.left_path,<EOL>headers={<EOL>'<STR_LIT>': self.left_origin,<EOL>'<STR_LIT>': self.default_method})<EOL>assert response.status == http.client.OK<EOL>assert header not in response<EOL>", "docstring": "Test each Access-Control-Allow header here, since they\nall do the same thing.", "id": "f10115:c0:m2"}
{"signature": "def __str__(self):", "body": "o = StringIO()<EOL>o.write('<STR_LIT:(>')<EOL>if self.negated:<EOL><INDENT>o.write('<STR_LIT>')<EOL><DEDENT>o.write('<STR_LIT:.>'.join(self.path))<EOL>if self.values:<EOL><INDENT>o.write('<STR_LIT>' % REVERSED_OPERATOR_SUFFIX_MAP[self.operator])<EOL><DEDENT>o.write('<STR_LIT>'.join(map(lambda x: \"<STR_LIT>\".format(str(x)), self.values)))<EOL>o.write('<STR_LIT:)>')<EOL>return o.getvalue()<EOL>", "docstring": "Format this query segment in a human-readable representation\nintended for debugging.", "id": "f10131:c1:m2"}
{"signature": "def split_segments(text, closing_paren=False):", "body": "buf = StringIO()<EOL>segments = []<EOL>combinators = []<EOL>last_group = False<EOL>iterator = iter(text)<EOL>last_negation = False<EOL>for character in iterator:<EOL><INDENT>if character in COMBINATORS:<EOL><INDENT>if last_negation:<EOL><INDENT>buf.write(constants.OPERATOR_NEGATION)<EOL><DEDENT>val = buf.getvalue()<EOL>reset_stringio(buf)<EOL>if not last_group and not len(val):<EOL><INDENT>raise ValueError('<STR_LIT>' % character)<EOL><DEDENT>if len(val):<EOL><INDENT>segments.append(parse_segment(val))<EOL><DEDENT>combinators.append(COMBINATORS[character])<EOL><DEDENT>elif character == constants.GROUP_BEGIN:<EOL><INDENT>if buf.tell():<EOL><INDENT>raise ValueError('<STR_LIT>' % character)<EOL><DEDENT>seg = split_segments(iterator, True)<EOL>if last_negation:<EOL><INDENT>seg = UnarySegmentCombinator(seg)<EOL><DEDENT>segments.append(seg)<EOL>last_group = True<EOL>continue<EOL><DEDENT>elif character == constants.GROUP_END:<EOL><INDENT>val = buf.getvalue()<EOL>if not buf.tell() or not closing_paren:<EOL><INDENT>raise ValueError('<STR_LIT>' % character)<EOL><DEDENT>segments.append(parse_segment(val))<EOL>return combine(segments, combinators)<EOL><DEDENT>elif character == constants.OPERATOR_NEGATION and not buf.tell():<EOL><INDENT>last_negation = True<EOL>continue<EOL><DEDENT>else:<EOL><INDENT>if last_negation:<EOL><INDENT>buf.write(constants.OPERATOR_NEGATION)<EOL><DEDENT>if last_group:<EOL><INDENT>raise ValueError('<STR_LIT>' % character)<EOL><DEDENT>buf.write(character)<EOL><DEDENT>last_negation = False<EOL>last_group = False<EOL><DEDENT>else:<EOL><INDENT>if closing_paren:<EOL><INDENT>raise ValueError('<STR_LIT>' % constants.GROUP_END)<EOL><DEDENT>if not last_group:<EOL><INDENT>segments.append(parse_segment(buf.getvalue()))<EOL><DEDENT><DEDENT>return combine(segments, combinators)<EOL>", "docstring": "Return objects representing segments.", "id": "f10131:m2"}
{"signature": "def __init__(self, **kwargs):", "body": "<EOL>self.__dict__.update(kwargs)<EOL>", "docstring": "Initialize authentication protocol; establish parameters.", "id": "f10134:c0:m0"}
{"signature": "def unauthenticated(self):", "body": "raise http.exceptions.Forbidden()<EOL>", "docstring": "Callback that is invoked when after a user is determined to\nbe unauthenticated.", "id": "f10134:c0:m2"}
{"signature": "def authenticate(self, request):", "body": "return None<EOL>", "docstring": "Gets the a user if they are authenticated; else None.\n\n        @retval False    Unable to authenticate.\n        @retval None     Able to authenticate but failed.\n        @retval <user>   User object representing the current user.", "id": "f10134:c0:m1"}
{"signature": "def can_serialize(self, data=None):", "body": "try:<EOL><INDENT>self.serialize(data)<EOL>return True<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Tests this serializer to see if it can serialize.", "id": "f10135:c0:m1"}
{"signature": "def is_accessible(self, user, method, resource):", "body": "return True<EOL>", "docstring": "Determines the accessibility to a resource endpoint for a particular\nmethod. An inaccessible resource is indistinguishable from a\nnon-existant resource.\n\n@param[in] user\n    The user in question that is being checked.\n\n@param[in] method\n    The method in question that is being performed (eg. 'GET').\n\n@param[in] resource\n    The resource instance that is being authorized.\n\n@returns\n    Returns true if the user can access the resource for\n    the passed operation; otherwise, false.", "id": "f10140:c0:m0"}
{"signature": "def filter(self, user, operation, resource, iterable):", "body": "return iterable<EOL>", "docstring": "Filters an iterable to contain only the items for which the user\nis authorized to perform the operation on.\n\n@param[in] user\n    The user in question that is being checked.\n\n@param[in] operation\n    The operation in question that is being performed (eg. 'read').\n\n@param[in] resource\n    The resource instance that is being authorized.\n\n@param[in] iterable\n    The iterable of objects to be checked. This method is called\n    from the model connector so the actual value of this parameter\n    depends on the model connector (eg. it may be a django queryset).\n\n@returns\n    Returns an iterable containing the remaining objects.", "id": "f10140:c0:m4"}
{"signature": "def unauthorized(self):", "body": "raise http.exceptions.Forbidden()<EOL>", "docstring": "Informs the client that it is not authrozied for the resource.", "id": "f10140:c0:m3"}
{"signature": "def inaccessible(self):", "body": "raise http.exceptions.Forbidden()<EOL>", "docstring": "Informs the client that the resource is inaccessible.", "id": "f10140:c0:m1"}
{"signature": "@property<EOL><INDENT>def host(self):<DEDENT>", "body": "return self.headers.get('<STR_LIT>') or '<STR_LIT:127.0.0.1>'<EOL>", "docstring": "Retrieves the hostname, normally from the `Host` header.", "id": "f10141:c1:m4"}
{"signature": "@staticmethod<EOL><INDENT>def _Header(sequence, name):<DEDENT>", "body": "return tuple(sequence._headers.get(name, '<STR_LIT>').split('<STR_LIT:U+002C>'))<EOL>", "docstring": "Returns the passed header as a tuple.\n\n        Implements a facade so that the response headers can override\n        this to provide a mutable sequence header.", "id": "f10141:c0:m0"}
{"signature": "def getlist(self, name):", "body": "return self.headers.getlist(name)<EOL>", "docstring": "Retrieves a the multi-valued list of the header with\nthe passed name.", "id": "f10141:c1:m15"}
{"signature": "def getlist(self, name):", "body": "return self._sequence[name]<EOL>", "docstring": "Retrieves the passed header as a tuple of its values.", "id": "f10141:c0:m9"}
{"signature": "def read(self, deserialize=False, format=None):", "body": "if deserialize:<EOL><INDENT>data, _ = self.deserialize(format=format)<EOL>return data<EOL><DEDENT>content = self._read()<EOL>if not content:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if type(content) is six.binary_type:<EOL><INDENT>content = content.decode(self.encoding)<EOL><DEDENT>return content<EOL>", "docstring": "Read and return the request data.\n\n        @param[in] deserialize\n            True to deserialize the resultant text using a determiend format\n            or the passed format.\n\n        @param[in] format\n            A specific format to deserialize in; if provided, no detection is\n            done. If not provided, the content-type header is looked at to\n            determine an appropriate deserializer.", "id": "f10141:c1:m10"}
{"signature": "@property<EOL><INDENT>def uri(self):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Returns the complete URI of the request.", "id": "f10141:c1:m7"}
{"signature": "def __getitem__(self, name):", "body": "return self.headers[name]<EOL>", "docstring": "Retrieves a header with the passed name.", "id": "f10141:c1:m13"}
{"signature": "def _read(self):", "body": "return None<EOL>", "docstring": "Read and return the request data.\n\n        @note Connectors should override this method.", "id": "f10141:c1:m9"}
{"signature": "def values(self):", "body": "return self.headers.values()<EOL>", "docstring": "Return a new view of the header values.", "id": "f10141:c1:m18"}
{"signature": "def reverse(self, name):", "body": "return self.headers.reverse(name)<EOL>", "docstring": "Reverse the elements of the list, in place.", "id": "f10144:c1:m34"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def __delitem__(self, name):<DEDENT>", "body": "", "docstring": "Removes a header with the passed name.\n\n        @param[in] name\n            The case-insensitive name of the header to remove\n            from the response.", "id": "f10144:c0:m1"}
{"signature": "def getlist(self, name):", "body": "return self.headers.getlist(name)<EOL>", "docstring": "Retrieves the passed header as a sequence of its values.", "id": "f10144:c1:m35"}
{"signature": "def remove(self, name, value):", "body": "return self._sequence[name].remove(value)<EOL>", "docstring": "Remove the first item with the passed value from the\nlist for the named header.", "id": "f10144:c0:m5"}
{"signature": "def index(self, name, value):", "body": "return self.headers.index(name, value)<EOL>", "docstring": "Return the index in the list of the first item whose value is x in\nthe values of the named header.", "id": "f10144:c1:m31"}
{"signature": "@status.setter<EOL><INDENT>def status(self, value):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Sets the status code of the response.", "id": "f10144:c1:m4"}
{"signature": "def require_open(self):", "body": "self.require_not_closed()<EOL>if self.streaming:<EOL><INDENT>raise exceptions.InvalidOperation('<STR_LIT>')<EOL><DEDENT>", "docstring": "Raises an exception if the response is not open.", "id": "f10144:c1:m2"}
{"signature": "def write(self, chunk, serialize=False, format=None):", "body": "<EOL>self.require_not_closed()<EOL>if chunk is None:<EOL><INDENT>return<EOL><DEDENT>if serialize or format is not None:<EOL><INDENT>self.serialize(chunk, format=format)<EOL>return  <EOL><DEDENT>if type(chunk) is six.binary_type:<EOL><INDENT>self._length += len(chunk)<EOL>self._stream.write(chunk)<EOL><DEDENT>elif isinstance(chunk, six.string_types):<EOL><INDENT>encoding = self.encoding<EOL>if encoding is not None:<EOL><INDENT>chunk = chunk.encode(encoding)<EOL><DEDENT>else:<EOL><INDENT>raise exceptions.InvalidOperation(<EOL>'<STR_LIT>')<EOL><DEDENT>self._length += len(chunk)<EOL>self._stream.write(chunk)<EOL><DEDENT>elif isinstance(chunk, collections.Iterable):<EOL><INDENT>for section in chunk:<EOL><INDENT>self.write(section)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise exceptions.InvalidOperation(<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Writes the given chunk to the output buffer.\n\n        @param[in] chunk\n            Either a byte array, a unicode string, or a generator. If `chunk`\n            is a generator then calling `self.write(<generator>)` is\n            equivalent to:\n\n            @code\n                for x in <generator>:\n                    self.write(x)\n                    self.flush()\n            @endcode\n\n        @param[in] serialize\n            True to serialize the lines in a determined serializer.\n\n        @param[in] format\n            A specific format to serialize in; if provided, no detection is\n            done. If not provided, the accept header (as well as the URL\n            extension) is looked at to determine an appropriate serializer.", "id": "f10144:c1:m14"}
{"signature": "def __delitem__(self, name):", "body": "del self.headers<EOL>", "docstring": "Removes a header with the passed name.", "id": "f10144:c1:m21"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def close(self):<DEDENT>", "body": "<EOL>self.require_not_closed()<EOL>if not self.streaming or self.asynchronous:<EOL><INDENT>if '<STR_LIT>' not in self.headers:<EOL><INDENT>self.headers['<STR_LIT>'] = self.tell()<EOL><DEDENT><DEDENT>self.flush()<EOL>self._closed = True<EOL>", "docstring": "Flush and close the stream.\n\n        This is called automatically by the base resource on resources\n        unless the resource is operating asynchronously; in that case,\n        this method MUST be called in order to signal the end of the request.\n        If not the request will simply hang as it is waiting for some\n        thread to tell it to return to the client.", "id": "f10144:c1:m11"}
{"signature": "def tell(self):", "body": "return self._length<EOL>", "docstring": "Return the current stream position.", "id": "f10144:c1:m13"}
{"signature": "def serialize(self, data, format=None):", "body": "return self._resource.serialize(data, response=self, format=format)<EOL>", "docstring": "Serializes the data into this response using a serializer.\n\n        @param[in] data\n            The data to be serialized.\n\n        @param[in] format\n            A specific format to serialize in; if provided, no detection is\n            done. If not provided, the accept header (as well as the URL\n            extension) is looked at to determine an appropriate serializer.\n\n        @returns\n            A tuple of the serialized text and an instance of the\n            serializer used.", "id": "f10144:c1:m15"}
{"signature": "def __setitem__(self, name, value):", "body": "self.headers[name] = value<EOL>", "docstring": "Stores a header with the passed name.", "id": "f10144:c1:m20"}
{"signature": "def __nonzero__(self):", "body": "return not self._closed<EOL>", "docstring": "Test if the response is closed.", "id": "f10144:c1:m23"}
{"signature": "def send(self, *args, **kwargs):", "body": "self.write(*args, **kwargs)<EOL>self.flush()<EOL>", "docstring": "Writes the passed chunk and flushes it to the client.", "id": "f10144:c1:m17"}
{"signature": "@property<EOL><INDENT>def status(self):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Gets the status code of the response.", "id": "f10144:c1:m3"}
{"signature": "@body.setter<EOL><INDENT>def body(self, value):<DEDENT>", "body": "self._body = value<EOL>", "docstring": "Sets the response body to the passed value.\n\n        @note\n            During asynchronous or streaming responses, remember that\n            the `body` property refers to the portion of the response *not*\n            sent to the client.", "id": "f10144:c1:m6"}
{"signature": "def dasherize(value):", "body": "value = value.strip()<EOL>value = re.sub(r'<STR_LIT>', r'<STR_LIT>', value)<EOL>value = re.sub(r'<STR_LIT>', r'<STR_LIT:->', value)<EOL>value = re.sub(r'<STR_LIT>', r'<STR_LIT>', value)<EOL>value = value.lower()<EOL>return value<EOL>", "docstring": "Dasherizes the passed value.", "id": "f10150:m0"}
{"signature": "def import_module(name):", "body": "try:<EOL><INDENT>return importlib.import_module(name)<EOL><DEDENT>except ImportError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Attempt to import a module; returns None if unsuccessful.", "id": "f10152:m0"}
{"signature": "def cons(collection, value):", "body": "if isinstance(value, collections.Mapping):<EOL><INDENT>if collection is None:<EOL><INDENT>collection = {}<EOL><DEDENT>collection.update(**value)<EOL><DEDENT>elif isinstance(value, six.string_types):<EOL><INDENT>if collection is None:<EOL><INDENT>collection = []<EOL><DEDENT>collection.append(value)<EOL><DEDENT>elif isinstance(value, collections.Iterable):<EOL><INDENT>if collection is None:<EOL><INDENT>collection = []<EOL><DEDENT>collection.extend(value)<EOL><DEDENT>else:<EOL><INDENT>if collection is None:<EOL><INDENT>collection = []<EOL><DEDENT>collection.append(value)<EOL><DEDENT>return collection<EOL>", "docstring": "Extends a collection with a value.", "id": "f10154:m0"}
{"signature": "def parse(specifiers):", "body": "specifiers = \"<STR_LIT>\".join(specifiers.split())<EOL>for specifier in specifiers.split('<STR_LIT:U+002C>'):<EOL><INDENT>if len(specifier) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>count = specifier.count('<STR_LIT:->')<EOL>if (count and specifier[<NUM_LIT:0>] == '<STR_LIT:->') or not count:<EOL><INDENT>yield int(specifier), int(specifier)<EOL>continue<EOL><DEDENT>specifier = list(map(int, specifier.split('<STR_LIT:->')))<EOL>if len(specifier) == <NUM_LIT:2>:<EOL><INDENT>if specifier[<NUM_LIT:0>] < <NUM_LIT:0> or specifier[<NUM_LIT:1>] < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if specifier[<NUM_LIT:1>] < specifier[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>yield tuple(specifier)<EOL>continue<EOL><DEDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Consumes set specifiers as text and forms a generator to retrieve\nthe requested ranges.\n\n@param[in] specifiers\n    Expected syntax is from the byte-range-specifier ABNF found in the\n    [RFC 2616]; eg. 15-17,151,-16,26-278,15\n\n@returns\n    Consecutive tuples that describe the requested range; eg. (1, 72) or\n    (1, 1) [read as 1 to 72 or 1 to 1].", "id": "f10160:m0"}
{"signature": "def set(self, target, value):", "body": "if not self._set:<EOL><INDENT>return<EOL><DEDENT>if self.path is None:<EOL><INDENT>self.set = lambda *a: None<EOL>return None<EOL><DEDENT>if self._segments[target.__class__]:<EOL><INDENT>self.get(target)<EOL><DEDENT>if self._segments[target.__class__]:<EOL><INDENT>return<EOL><DEDENT>parent_getter = compose(*self._getters[target.__class__][:-<NUM_LIT:1>])<EOL>target = parent_getter(target)<EOL>func = self._make_setter(self.path.split('<STR_LIT:.>')[-<NUM_LIT:1>], target.__class__)<EOL>func(target, value)<EOL>def setter(target, value):<EOL><INDENT>func(parent_getter(target), value)<EOL><DEDENT>self.set = setter<EOL>", "docstring": "Set the value of this attribute for the passed object.", "id": "f10168:c0:m3"}
{"signature": "def prepare(self, value):", "body": "<EOL>return value<EOL>", "docstring": "Prepare the value for serialization and presentation to the client.", "id": "f10168:c0:m4"}
{"signature": "def resource(**kwargs):", "body": "def inner(function):<EOL><INDENT>name = kwargs.pop('<STR_LIT:name>', None)<EOL>if name is None:<EOL><INDENT>name = utils.dasherize(function.__name__)<EOL><DEDENT>methods = kwargs.pop('<STR_LIT>', None)<EOL>if isinstance(methods, six.string_types):<EOL><INDENT>methods = methods,<EOL><DEDENT>handler = (function, methods)<EOL>if name not in _resources:<EOL><INDENT>_handlers[name] = []<EOL>from armet import resources<EOL>kwargs['<STR_LIT:name>'] = name<EOL>class LightweightResource(resources.Resource):<EOL><INDENT>Meta = type(str('<STR_LIT:Meta>'), (), kwargs)<EOL>def route(self, request, response):<EOL><INDENT>for handler, methods in _handlers[name]:<EOL><INDENT>if methods is None or request.method in methods:<EOL><INDENT>return handler(request, response)<EOL><DEDENT><DEDENT>resources.Resource.route(self)<EOL><DEDENT><DEDENT>_resources[name] = LightweightResource<EOL><DEDENT>_handlers[name].append(handler)<EOL>return _resources[name]<EOL><DEDENT>return inner<EOL>", "docstring": "Wraps the decorated function in a lightweight resource.", "id": "f10172:m1"}
{"signature": "@property<EOL><INDENT>def allowed_operations(self):<DEDENT>", "body": "if self.slug is not None:<EOL><INDENT>return self.meta.detail_allowed_operations<EOL><DEDENT>return self.meta.list_allowed_operations<EOL>", "docstring": "Retrieves the allowed operations for this request.", "id": "f10180:c0:m2"}
{"signature": "def get(self, request, response):", "body": "<EOL>self.assert_operations('<STR_LIT>')<EOL>items = self.read()<EOL>if not items:<EOL><INDENT>raise http.exceptions.NotFound()<EOL><DEDENT>if (isinstance(items, Iterable)<EOL>and not isinstance(items, six.string_types)) and items:<EOL><INDENT>items = pagination.paginate(self.request, self.response, items)<EOL><DEDENT>self.make_response(items)<EOL>", "docstring": "Processes a `GET` request.", "id": "f10180:c0:m13"}
{"signature": "@classmethod<EOL><INDENT>def parse(cls, path):<DEDENT>", "body": "<EOL>for resource, pattern in cls.meta.patterns:<EOL><INDENT>match = re.match(pattern, path)<EOL>if match is not None:<EOL><INDENT>return resource, match.groupdict(), match.string[match.end():]<EOL><DEDENT><DEDENT>return None if not cls.meta.patterns else False<EOL>", "docstring": "Parses out parameters and separates them out of the path.\n\n        This uses one of the many defined patterns on the options class. But,\n        it defaults to a no-op if there are no defined patterns.", "id": "f10185:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def traverse(cls, request, params=None):<DEDENT>", "body": "<EOL>result = cls.parse(request.path)<EOL>if result is None:<EOL><INDENT>return cls, {}<EOL><DEDENT>elif not result:<EOL><INDENT>raise http.exceptions.NotFound()<EOL><DEDENT>resource, data, rest = result<EOL>if params:<EOL><INDENT>data.update(params)<EOL><DEDENT>if resource is None:<EOL><INDENT>return cls, data<EOL><DEDENT>if data.get('<STR_LIT:path>') is not None:<EOL><INDENT>request.path = data.pop('<STR_LIT:path>')<EOL><DEDENT>elif rest is not None:<EOL><INDENT>request.path = rest<EOL><DEDENT>result = resource.traverse(request, params=data)<EOL>return result<EOL>", "docstring": "Traverses down the path and determines the accessed resource.\n\n        This makes use of the patterns array to implement simple traversal.\n        This defaults to a no-op if there are no defined patterns.", "id": "f10185:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def redirect(cls, request, response):<DEDENT>", "body": "if cls.meta.legacy_redirect:<EOL><INDENT>if request.method in ('<STR_LIT:GET>', '<STR_LIT>',):<EOL><INDENT>response.status = http.client.MOVED_PERMANENTLY<EOL><DEDENT>else:<EOL><INDENT>response.status = http.client.TEMPORARY_REDIRECT<EOL><DEDENT><DEDENT>else:<EOL><INDENT>response.status = http.client.PERMANENT_REDIRECT<EOL><DEDENT>response.close()<EOL>", "docstring": "Redirect to the canonical URI for this resource.", "id": "f10185:c0:m1"}
{"signature": "def options(self, request, response):", "body": "<EOL>response['<STR_LIT>'] = '<STR_LIT:U+002CU+0020>'.join(self.meta.http_allowed_methods)<EOL>response.status = http.client.OK<EOL>", "docstring": "Process an `OPTIONS` request.\n\n        Used to initiate a cross-origin request. All handling specific to\n        CORS requests is done on every request however this method also\n        returns a list of available methods.", "id": "f10185:c0:m15"}
{"signature": "def random_string(length):", "body": "str_list = [random.choice(string.digits + string.ascii_letters) for i in range(length)]<EOL>return '<STR_LIT>'.join(str_list)<EOL>", "docstring": "Generate random string with parameter length.\nExample:\n\n    >>> from eggit.egg_string import random_string\n    >>> random_string(8)\n    'q4f2eaT4'\n    >>>", "id": "f10194:m0"}
{"signature": "@staticmethod<EOL><INDENT>def get_datetime_object(datetime_str):<DEDENT>", "body": "try:<EOL><INDENT>dft = DTFormat()<EOL>return datetime.strptime(datetime_str, dft.datetime_format)<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get datetime object from datetime string\n\nexample:\n    DateTimeUtils.get_datetime_object('2018-01-01 00:00:00')\n\n:param str string: datetime string\n:return: datetime object\n:rtype: datetime", "id": "f10195:c1:m1"}
{"signature": "@staticmethod<EOL><INDENT>def timestamp_to_datetime(timestamp):<DEDENT>", "body": "if isinstance(timestamp, (int, float, str)):<EOL><INDENT>try:<EOL><INDENT>timestamp = float(timestamp)<EOL>if timestamp.is_integer():<EOL><INDENT>timestamp = int(timestamp)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT>temp = str(timestamp).split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL>if len(temp) == <NUM_LIT>:<EOL><INDENT>timestamp = timestamp / <NUM_LIT><EOL><DEDENT>if len(temp) < <NUM_LIT:10>:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>return datetime.fromtimestamp(timestamp)<EOL>", "docstring": "1514736000 --> datetime object\n\n:param int timestamp: unix timestamp (int)\n:return: datetime object or None\n:rtype: datetime or None", "id": "f10195:c1:m4"}
{"signature": "@staticmethod<EOL><INDENT>def datetime_str_to_timestamp(datetime_str):<DEDENT>", "body": "try:<EOL><INDENT>dtf = DTFormat()<EOL>struct_time = time.strptime(datetime_str, dtf.datetime_format)<EOL>return time.mktime(struct_time)<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "'2018-01-01 00:00:00' (str) --> 1514736000\n\n:param str datetime_str: datetime string\n:return: unix timestamp (int) or None\n:rtype: int or None", "id": "f10195:c1:m0"}
{"signature": "@staticmethod<EOL><INDENT>def timestamp_to_datetime_str(timestamp):<DEDENT>", "body": "return DateTimeUtils.get_datetime_string(DateTimeUtils.timestamp_to_datetime(timestamp))<EOL>", "docstring": "1514736000 --> '2018-01-01 00:00:00' (str)\n\n:param int timestamp: unix timestamp\n:return: datetime str\n:rtype: str", "id": "f10195:c1:m5"}
{"signature": "@staticmethod<EOL><INDENT>def get_datetime_string(datetime_obj):<DEDENT>", "body": "if isinstance(datetime_obj, datetime):<EOL><INDENT>dft = DTFormat()<EOL>return datetime_obj.strftime(dft.datetime_format)<EOL><DEDENT>return None<EOL>", "docstring": "Get datetime string from datetime object\n\n:param datetime datetime_obj: datetime object\n:return: datetime string\n:rtype: str", "id": "f10195:c1:m2"}
{"signature": "def __init__(self, current_page, total_page_count, items, total_item_count, page_size=<NUM_LIT:10>):", "body": "self.current_page = current_page<EOL>self.total_page_count = total_page_count<EOL>self.items = items<EOL>self.total_item_count = total_item_count<EOL>self.page_size = page_size<EOL>", "docstring": ":param int current_page: Current page number\n:param int total_page_count: Total page count\n:param object items: Paging data\n:param int total_item_count: Total item count\n:param int page_size: How many items per page", "id": "f10200:c0:m0"}
{"signature": "def format_cookies(path):", "body": "with open(path, '<STR_LIT:r>') as f:<EOL><INDENT>_cookies = {}<EOL>for row in f.read().split('<STR_LIT:;>'):<EOL><INDENT>k, v = row.strip().split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>_cookies[k] = v<EOL><DEDENT>return _cookies<EOL><DEDENT>", "docstring": "\u5c06 cookie \u5b57\u7b26\u4e32\u8f6c\u5316\u4e3a\u5b57\u5178\n\n:param path: cookies \u6587\u4ef6\u8def\u5f84\n:return: cookies \u5b57\u5178", "id": "f10204:m0"}
{"signature": "def delete_empty_dir(directory):", "body": "if os.path.exists(directory):<EOL><INDENT>if os.path.isdir(directory):<EOL><INDENT>for d in os.listdir(directory):<EOL><INDENT>path = os.path.join(directory, d)<EOL>if os.path.isdir(path):<EOL><INDENT>delete_empty_dir(path)<EOL><DEDENT><DEDENT><DEDENT>if not os.listdir(directory):<EOL><INDENT>os.rmdir(directory)<EOL>print(\"<STR_LIT>\" + directory)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "\u5220\u9664\u7a7a\u76ee\u5f55\n\n:param directory: \u76ee\u5f55\u8def\u5f84", "id": "f10204:m1"}
{"signature": "@contextmanager<EOL>def memoryit_block(group_by='<STR_LIT>', limit=<NUM_LIT:10>, label='<STR_LIT>'):", "body": "tracemalloc.start()<EOL>_start = tracemalloc.take_snapshot()<EOL>try:<EOL><INDENT>yield<EOL><DEDENT>finally:<EOL><INDENT>_end = tracemalloc.take_snapshot()<EOL>stats = _end.compare_to(_start, group_by)<EOL>_print(stats, limit, label)<EOL><DEDENT>", "docstring": "\u8ffd\u8e2a\u4ee3\u7801\u5757\u5185\u5b58\u6d88\u8017\u60c5\u51b5\n\n:param group_by: \u7edf\u8ba1\u5206\u7ec4\uff0c\u6709 'filename', 'lineno', 'traceback' \u53ef\u9009\n:param limit: \u9650\u5236\u8f93\u51fa\u884c\u6570\n:param label: \u4ee3\u7801\u5757\u6807\u7b7e", "id": "f10205:m2"}
{"signature": "def timeit(unit='<STR_LIT:s>'):", "body": "def wrapper(func):<EOL><INDENT>@wraps(func)<EOL>def inner(*args, **kwargs):<EOL><INDENT>start = time.time()<EOL>_result = func(*args, **kwargs)<EOL>_format(unit, time.time() - start, func.__name__ + '<STR_LIT>')<EOL>return _result<EOL><DEDENT>return inner<EOL><DEDENT>return wrapper<EOL>", "docstring": "\u6d4b\u8bd5\u51fd\u6570\u8017\u65f6\n\n:param unit: \u65f6\u95f4\u5355\u4f4d\uff0c\u6709 's','m','h' \u53ef\u9009\uff08seconds\uff0cminutes\uff0chours\uff09", "id": "f10206:m3"}
{"signature": "def _green(string):", "body": "return '<STR_LIT>'.format(string)<EOL>", "docstring": "\u5c06\u5b57\u4f53\u8f6c\u53d8\u4e3a\u7eff\u8272", "id": "f10206:m0"}
{"signature": "@contextmanager<EOL>def timeit_block(unit='<STR_LIT:s>', label=\"<STR_LIT>\"):", "body": "start = time.time()<EOL>try:<EOL><INDENT>yield<EOL><DEDENT>finally:<EOL><INDENT>_format(unit, time.time() - start, label)<EOL><DEDENT>", "docstring": "\u6d4b\u8bd5\u4ee3\u7801\u5757\u8017\u65f6\n\n:param unit: \u65f6\u95f4\u5355\u4f4d\uff0c\u6709 's','m','h' \u53ef\u9009\uff08seconds\uff0cminutes\uff0chours\uff09\n:param label: \u4ee3\u7801\u5757\u6807\u7b7e", "id": "f10206:m2"}
{"signature": "def loads(s, separator=DEFAULT, index_separator=DEFAULT, cls=dict, list_cls=list):", "body": "if isinstance(s, six.text_type):<EOL><INDENT>io = StringIO(s)<EOL><DEDENT>else:<EOL><INDENT>io = BytesIO(s)<EOL><DEDENT>return load(<EOL>fp=io,<EOL>separator=separator,<EOL>index_separator=index_separator,<EOL>cls=cls,<EOL>list_cls=list_cls,<EOL>)<EOL>", "docstring": "Loads an object from a string.\n\n    :param s: An object to parse\n    :type s: bytes or str\n    :param separator: The separator between key and value.  Defaults to u'|' or b'|', depending on the types.\n    :param index_separator: The separator between key and index.  Defaults to u'_' or b'_', depending on the types.\n    :param cls: A callable that returns a Mapping that is filled with pairs.  The most common alternate option would be OrderedDict.\n    :param list_cls: A callable that takes an iterable and returns a sequence.", "id": "f10211:m3"}
{"signature": "def load(fp, separator=DEFAULT, index_separator=DEFAULT, cls=dict, list_cls=list):", "body": "converter = None<EOL>output = cls()<EOL>arraykeys = set()<EOL>for line in fp:<EOL><INDENT>if converter is None:<EOL><INDENT>if isinstance(line, six.text_type):<EOL><INDENT>converter = six.u<EOL><DEDENT>else:<EOL><INDENT>converter = six.b<EOL><DEDENT>default_separator = converter('<STR_LIT:|>')<EOL>default_index_separator = converter('<STR_LIT:_>')<EOL>newline = converter('<STR_LIT:\\n>')<EOL>if separator is DEFAULT:<EOL><INDENT>separator = default_separator<EOL><DEDENT>if index_separator is DEFAULT:<EOL><INDENT>index_separator = default_index_separator<EOL><DEDENT><DEDENT>key, value = line.strip().split(separator, <NUM_LIT:1>)<EOL>keyparts = key.split(index_separator)<EOL>try:<EOL><INDENT>index = int(keyparts[-<NUM_LIT:1>])<EOL>endwithint = True<EOL><DEDENT>except ValueError:<EOL><INDENT>endwithint = False<EOL><DEDENT>if len(keyparts) > <NUM_LIT:1> and endwithint:<EOL><INDENT>basekey = key.rsplit(index_separator, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>if basekey not in arraykeys:<EOL><INDENT>arraykeys.add(basekey)<EOL><DEDENT>if basekey in output:<EOL><INDENT>if not isinstance(output[basekey], dict):<EOL><INDENT>output[basekey] = {-<NUM_LIT:1>: output[basekey]}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>output[basekey] = {}<EOL><DEDENT>output[basekey][index] = value<EOL><DEDENT>else:<EOL><INDENT>if key in output and isinstance(output[key], dict):<EOL><INDENT>output[key][-<NUM_LIT:1>] = value<EOL><DEDENT>else:<EOL><INDENT>output[key] = value<EOL><DEDENT><DEDENT><DEDENT>for key in arraykeys:<EOL><INDENT>output[key] = list_cls(pair[<NUM_LIT:1>] for pair in sorted(six.iteritems(output[key])))<EOL><DEDENT>return output<EOL>", "docstring": "Load an object from the file pointer.\n\n    :param fp: A readable filehandle.\n    :param separator: The separator between key and value.  Defaults to u'|' or b'|', depending on the types.\n    :param index_separator: The separator between key and index.  Defaults to u'_' or b'_', depending on the types.\n    :param cls: A callable that returns a Mapping that is filled with pairs.  The most common alternate option would be OrderedDict.\n    :param list_cls: A callable that takes an iterable and returns a sequence.", "id": "f10211:m2"}
{"signature": "def dump(obj, fp, startindex=<NUM_LIT:1>, separator=DEFAULT, index_separator=DEFAULT):", "body": "if startindex < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(startindex))<EOL><DEDENT>try:<EOL><INDENT>firstkey = next(iter(obj.keys()))<EOL><DEDENT>except StopIteration:<EOL><INDENT>return<EOL><DEDENT>if isinstance(firstkey, six.text_type):<EOL><INDENT>converter = six.u<EOL><DEDENT>else:<EOL><INDENT>converter = six.b<EOL><DEDENT>default_separator = converter('<STR_LIT:|>')<EOL>default_index_separator = converter('<STR_LIT:_>')<EOL>newline = converter('<STR_LIT:\\n>')<EOL>if separator is DEFAULT:<EOL><INDENT>separator = default_separator<EOL><DEDENT>if index_separator is DEFAULT:<EOL><INDENT>index_separator = default_index_separator<EOL><DEDENT>for key, value in six.iteritems(obj):<EOL><INDENT>if isinstance(value, (list, tuple, set)):<EOL><INDENT>for index, item in enumerate(value, start=startindex):<EOL><INDENT>fp.write(key)<EOL>fp.write(index_separator)<EOL>fp.write(converter(str(index)))<EOL>fp.write(separator)<EOL>fp.write(item)<EOL>fp.write(newline)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>fp.write(key)<EOL>fp.write(separator)<EOL>fp.write(value)<EOL>fp.write(newline)<EOL><DEDENT><DEDENT>", "docstring": "Dump an object in req format to the fp given.\n\n    :param Mapping obj: The object to serialize.  Must have a keys method.\n    :param fp: A writable that can accept all the types given.\n    :param separator: The separator between key and value.  Defaults to u'|' or b'|', depending on the types.\n    :param index_separator: The separator between key and index.  Defaults to u'_' or b'_', depending on the types.", "id": "f10211:m0"}
{"signature": "def b64_encode(data: bytes) -> bytes:", "body": "encoded = urlsafe_b64encode(data)<EOL>return encoded.replace(b'<STR_LIT:=>', b'<STR_LIT>')<EOL>", "docstring": ":param data: Data the encode.\n:type data: bytes\n:return: Base 64 encoded data with padding removed.\n:rtype: bytes", "id": "f10213:m0"}
{"signature": "def compare_token(expected: Union[str, bytes],<EOL>actual: Union[str, bytes]) -> bool:", "body": "expected = util.to_bytes(expected)<EOL>actual = util.to_bytes(actual)<EOL>_, expected_sig_seg = expected.rsplit(b'<STR_LIT:.>', <NUM_LIT:1>)<EOL>_, actual_sig_seg = actual.rsplit(b'<STR_LIT:.>', <NUM_LIT:1>)<EOL>expected_sig = util.b64_decode(expected_sig_seg)<EOL>actual_sig = util.b64_decode(actual_sig_seg)<EOL>return compare_signature(expected_sig, actual_sig)<EOL>", "docstring": "Compares the given tokens.\n\n:param expected: The expected token.\n:type expected: Union[str, bytes]\n:param actual: The actual token.\n:type actual: Union[str, bytes]\n:return: Do the tokens match?\n:rtype: bool", "id": "f10214:m5"}
{"signature": "def get_algorithm(alg: str) -> Callable:", "body": "if alg not in algorithms:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(alg))<EOL><DEDENT>return algorithms[alg]<EOL>", "docstring": ":param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.\n:type alg: str\n:return: The requested algorithm.\n:rtype: Callable\n:raises: ValueError", "id": "f10214:m0"}
{"signature": "@property<EOL><INDENT>def valid_to(self) -> Union[int, None]:<DEDENT>", "body": "return self.registered_claims.get('<STR_LIT>')<EOL>", "docstring": ":return: Expires (`exp`) claim from the token.\n:rtype: Union[int, None]", "id": "f10214:c0:m9"}
{"signature": "@property<EOL><INDENT>def issued_at(self) -> Union[int, None]:<DEDENT>", "body": "return self.registered_claims.get('<STR_LIT>')<EOL>", "docstring": ":return: Issued at (`iat`) claim from the token.\n:rtype: Union[int, None]", "id": "f10214:c0:m13"}
{"signature": "def _pop_claims_from_payload(self):", "body": "claims_in_payload = [k for k in self.payload.keys() if<EOL>k in registered_claims.values()]<EOL>for name in claims_in_payload:<EOL><INDENT>self.registered_claims[name] = self.payload.pop(name)<EOL><DEDENT>", "docstring": "Check for registered claims in the payload and move them to the\nregistered_claims property, overwriting any extant claims.", "id": "f10214:c0:m18"}
{"signature": "def compare(self, jwt: '<STR_LIT>', compare_dates: bool = False) -> bool:", "body": "if self.secret != jwt.secret:<EOL><INDENT>return False<EOL><DEDENT>if self.payload != jwt.payload:<EOL><INDENT>return False<EOL><DEDENT>if self.alg != jwt.alg:<EOL><INDENT>return False<EOL><DEDENT>if self.header != jwt.header:<EOL><INDENT>return False<EOL><DEDENT>expected_claims = self.registered_claims<EOL>actual_claims = jwt.registered_claims<EOL>if not compare_dates:<EOL><INDENT>strip = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>expected_claims = {k: {v if k not in strip else None} for k, v in<EOL>expected_claims.items()}<EOL>actual_claims = {k: {v if k not in strip else None} for k, v in<EOL>actual_claims.items()}<EOL><DEDENT>if expected_claims != actual_claims:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Compare against another `Jwt`.\n\n:param jwt: The token to compare against.\n:type jwt: Jwt\n:param compare_dates: Should the comparision take dates into account?\n:type compare_dates: bool\n:return: Are the two Jwt's the same?\n:rtype: bool", "id": "f10214:c0:m21"}
{"signature": "@property<EOL><INDENT>def subject(self) -> Union[str, None]:<DEDENT>", "body": "return self.registered_claims.get('<STR_LIT>')<EOL>", "docstring": ":return: Subject (`sub`) claim from the token.\n:rtype: Union[str, None]", "id": "f10214:c0:m5"}
{"signature": "def valid(self, time: int = None) -> bool:", "body": "if time is None:<EOL><INDENT>epoch = datetime(<NUM_LIT>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>now = datetime.utcnow()<EOL>time = int((now - epoch).total_seconds())<EOL><DEDENT>if isinstance(self.valid_from, int) and time < self.valid_from:<EOL><INDENT>return False<EOL><DEDENT>if isinstance(self.valid_to, int) and time > self.valid_to:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Is the token valid? This method only checks the timestamps within the\ntoken and compares them against the current time if none is provided.\n\n:param time: The timestamp to validate against\n:type time: Union[int, None]\n:return: The validity of the token.\n:rtype: bool", "id": "f10214:c0:m17"}
{"signature": "@issuer.setter<EOL><INDENT>def issuer(self, issuer: str):<DEDENT>", "body": "self.registered_claims['<STR_LIT>'] = issuer<EOL>", "docstring": "Sets the issuer (`iss`) claim in the token.\n\n:param issuer: New value.\n:type issuer: str", "id": "f10214:c0:m4"}
{"signature": "def encode(secret: Union[str, bytes], payload: dict = None,<EOL>alg: str = default_alg, header: dict = None) -> str:", "body": "secret = util.to_bytes(secret)<EOL>payload = payload or {}<EOL>header = header or {}<EOL>header_json = util.to_bytes(json.dumps(header))<EOL>header_b64 = util.b64_encode(header_json)<EOL>payload_json = util.to_bytes(json.dumps(payload))<EOL>payload_b64 = util.b64_encode(payload_json)<EOL>pre_signature = util.join(header_b64, payload_b64)<EOL>signature = _hash(secret, pre_signature, alg)<EOL>signature_b64 = util.b64_encode(signature)<EOL>token = util.join(pre_signature, signature_b64)<EOL>return util.from_bytes(token)<EOL>", "docstring": ":param secret: The secret used to encode the token.\n:type secret: Union[str, bytes]\n:param payload: The payload to be encoded in the token.\n:type payload: dict\n:param alg: The algorithm used to hash the token.\n:type alg: str\n:param header: The header to be encoded in the token.\n:type header: dict\n:return: A new token\n:rtype: str", "id": "f10214:m2"}
{"signature": "@header.setter<EOL><INDENT>def header(self, header: dict):<DEDENT>", "body": "self._header = header<EOL>", "docstring": "Sets the token header.\n\n:param header: New header\n:type header: dict", "id": "f10214:c0:m2"}
{"signature": "def compare_signature(expected: Union[str, bytes],<EOL>actual: Union[str, bytes]) -> bool:", "body": "expected = util.to_bytes(expected)<EOL>actual = util.to_bytes(actual)<EOL>return hmac.compare_digest(expected, actual)<EOL>", "docstring": "Compares the given signatures.\n\n:param expected: The expected signature.\n:type expected: Union[str, bytes]\n:param actual: The actual signature.\n:type actual: Union[str, bytes]\n:return: Do the signatures match?\n:rtype: bool", "id": "f10214:m4"}
{"signature": "def __common_triplet(input_string, consonants, vowels):", "body": "output = consonants<EOL>while len(output) < <NUM_LIT:3>:<EOL><INDENT>try:<EOL><INDENT>output += vowels.pop(<NUM_LIT:0>)<EOL><DEDENT>except IndexError:<EOL><INDENT>output += '<STR_LIT:X>'<EOL><DEDENT><DEDENT>return output[:<NUM_LIT:3>]<EOL>", "docstring": "__common_triplet(input_string, consonants, vowels) -> string", "id": "f10221:m1"}
{"signature": "def __surname_triplet(input_string):", "body": "consonants, vowels = __consonants_and_vowels(input_string)<EOL>return __common_triplet(input_string, consonants, vowels)<EOL>", "docstring": "__surname_triplet(input_string) -> string", "id": "f10221:m3"}
{"signature": "def build(surname, name, birthday, sex, municipality):", "body": "<EOL>output = __surname_triplet(surname) + __name_triplet(name)<EOL>output += str(birthday.year)[<NUM_LIT:2>:]<EOL>output += MONTHSCODE[birthday.month - <NUM_LIT:1>]<EOL>output += \"<STR_LIT>\" % (sex.upper() == '<STR_LIT:M>' and birthday.day or <NUM_LIT> + birthday.day)<EOL>output += municipality<EOL>output += control_code(output)<EOL>assert isvalid(output)<EOL>return output<EOL>", "docstring": "``build(surname, name, birthday, sex, municipality) -> string``\n\n    Computes the fiscal code for the given person data.\n\n    eg: build('Rocca', 'Emanuele', datetime.datetime(1983, 11, 18), 'M', 'D969') \n        -> RCCMNL83S18D969H", "id": "f10221:m6"}
{"signature": "def get_sex(code):", "body": "assert isvalid(code)<EOL>return int(code[<NUM_LIT:9>:<NUM_LIT:11>]) < <NUM_LIT:32> and '<STR_LIT:M>' or '<STR_LIT:F>'<EOL>", "docstring": "``get_sex(code) -> string``\n\n    The sex of the person whose fiscal code is 'code'.\n\n    eg: sex('RCCMNL83S18D969H') -> 'M'\n        sex('CNTCHR83T41D969D') -> 'F'", "id": "f10221:m8"}
{"signature": "def predict(self, X):", "body": "x = X<EOL>if not isinstance(X, list):<EOL><INDENT>x = [X]<EOL><DEDENT>y = self.estimator.predict(x)<EOL>y = [item[<NUM_LIT:0>] for item in y]<EOL>y = [self._remove_prefix(label) for label in y]<EOL>if not isinstance(X, list):<EOL><INDENT>y = y[<NUM_LIT:0>]<EOL><DEDENT>return y<EOL>", "docstring": "In order to obtain the most likely label for a list of text\n\n        Parameters\n        ----------\n        X : list of string\n            Raw texts\n\n        Returns\n        -------\n        C : list of string\n            List labels", "id": "f10241:c0:m3"}
{"signature": "def fit(self, X, y, model_filename=None):", "body": "train_file = \"<STR_LIT>\"<EOL>X = [x.replace(\"<STR_LIT:\\n>\", \"<STR_LIT:U+0020>\") for x in X]<EOL>y = [_.replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:->\") for _ in y]<EOL>lines = [\"<STR_LIT>\".format(self.prefix, j, i) for i, j in zip(X, y)]<EOL>content = \"<STR_LIT:\\n>\".join(lines)<EOL>write(train_file, Text(content))<EOL>if model_filename:<EOL><INDENT>self.estimator = ft.supervised(train_file, model_filename)<EOL><DEDENT>else:<EOL><INDENT>self.estimator = ft.supervised(train_file, '<STR_LIT>')<EOL>os.remove('<STR_LIT>')<EOL><DEDENT>os.remove(train_file)<EOL>", "docstring": "Fit FastText according to X, y\n\n        Parameters\n        ----------\n        X : list of string\n            each item is a raw text\n        y : list of string\n            each item is a label", "id": "f10241:c0:m1"}
{"signature": "def fit(self, X, y):", "body": "<EOL>word_vector_transformer = WordVectorTransformer(padding='<STR_LIT>')<EOL>X = word_vector_transformer.fit_transform(X)<EOL>X = LongTensor(X)<EOL>self.word_vector_transformer = word_vector_transformer<EOL>y_transformer = LabelEncoder()<EOL>y = y_transformer.fit_transform(y)<EOL>y = torch.from_numpy(y)<EOL>self.y_transformer = y_transformer<EOL>dataset = CategorizedDataset(X, y)<EOL>dataloader = DataLoader(dataset,<EOL>batch_size=self.batch_size,<EOL>shuffle=True,<EOL>num_workers=<NUM_LIT:4>)<EOL>KERNEL_SIZES = self.kernel_sizes<EOL>NUM_KERNEL = self.num_kernel<EOL>EMBEDDING_DIM = self.embedding_dim<EOL>model = TextCNN(<EOL>vocab_size=word_vector_transformer.get_vocab_size(),<EOL>embedding_dim=EMBEDDING_DIM,<EOL>output_size=len(self.y_transformer.classes_),<EOL>kernel_sizes=KERNEL_SIZES,<EOL>num_kernel=NUM_KERNEL)<EOL>if USE_CUDA:<EOL><INDENT>model = model.cuda()<EOL><DEDENT>EPOCH = self.epoch<EOL>LR = self.lr<EOL>loss_function = nn.CrossEntropyLoss()<EOL>optimizer = optim.Adam(model.parameters(), lr=LR)<EOL>for epoch in range(EPOCH):<EOL><INDENT>losses = []<EOL>for i, data in enumerate(dataloader):<EOL><INDENT>X, y = data<EOL>X, y = Variable(X), Variable(y)<EOL>optimizer.zero_grad()<EOL>model.train()<EOL>output = model(X)<EOL>loss = loss_function(output, y)<EOL>losses.append(loss.data.tolist()[<NUM_LIT:0>])<EOL>loss.backward()<EOL>optimizer.step()<EOL>if i % <NUM_LIT:100> == <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" % (<EOL>epoch, EPOCH, np.mean(losses)))<EOL>losses = []<EOL><DEDENT><DEDENT><DEDENT>self.model = model<EOL>", "docstring": "Fit KimCNNClassifier according to X, y\n\n        Parameters\n        ----------\n        X : list of string\n            each item is a raw text\n        y : list of string\n            each item is a label", "id": "f10244:c2:m1"}
{"signature": "def predict(self, X):", "body": "if isinstance(X[<NUM_LIT:0>], list):<EOL><INDENT>return [self.estimator.tag(x) for x in X]<EOL><DEDENT>return self.estimator.tag(X)<EOL>", "docstring": "Predict class labels for samples in X.\n\n        Parameters\n        ----------\n        X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n            Samples.", "id": "f10245:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def log(model_folder, binary_file=\"<STR_LIT>\",<EOL>log_folder=\"<STR_LIT>\"):<DEDENT>", "body": "file = join(model_folder, binary_file)<EOL>vectorizer = joblib.load(file)<EOL>output = []<EOL>for token in vectorizer.vocabulary_:<EOL><INDENT>index = vectorizer.vocabulary_[token]<EOL>ngram = len(token.split(\"<STR_LIT:U+0020>\"))<EOL>output.append({<EOL>\"<STR_LIT>\": token,<EOL>\"<STR_LIT>\": ngram,<EOL>\"<STR_LIT>\": vectorizer.idf_[index],<EOL>\"<STR_LIT>\": vectorizer.period_[index].item(),<EOL>\"<STR_LIT>\": vectorizer.df_[index],<EOL>})<EOL><DEDENT>output = sorted(output, key=lambda item: item[\"<STR_LIT>\"])<EOL>content = json.dumps(output, ensure_ascii=False)<EOL>write(join(log_folder, \"<STR_LIT>\"), content)<EOL>", "docstring": "Parameters\n----------\nmodel_folder : string\n    folder contains binaries file of model\nbinary_file : string\n    file path to tfidf binary file\nlog_folder : string\n    log folder", "id": "f10247:c0:m0"}
{"signature": "def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):", "body": "columnwidth = max([len(x) for x in labels] + [<NUM_LIT:5>])  <EOL>empty_cell = \"<STR_LIT:U+0020>\" * columnwidth<EOL>print(\"<STR_LIT:U+0020>\" + empty_cell, end=\"<STR_LIT:U+0020>\")<EOL>for label in labels:<EOL><INDENT>print(\"<STR_LIT>\".format(columnwidth) % label, end=\"<STR_LIT:U+0020>\")<EOL><DEDENT>print()<EOL>for i, label1 in enumerate(labels):<EOL><INDENT>print(\"<STR_LIT>\".format(columnwidth) % label1, end=\"<STR_LIT:U+0020>\")<EOL>for j in range(len(labels)):<EOL><INDENT>cell = \"<STR_LIT>\".format(columnwidth) % cm[i, j]<EOL>if hide_zeroes:<EOL><INDENT>cell = cell if float(cm[i, j]) != <NUM_LIT:0> else empty_cell<EOL><DEDENT>if hide_diagonal:<EOL><INDENT>cell = cell if i != j else empty_cell<EOL><DEDENT>if hide_threshold:<EOL><INDENT>cell = cell if cm[i, j] > hide_threshold else empty_cell<EOL><DEDENT>print(cell, end=\"<STR_LIT:U+0020>\")<EOL><DEDENT>print()<EOL><DEDENT>", "docstring": "pretty print for confusion matrixes", "id": "f10253:m0"}
{"signature": "def transform(self, transformer):", "body": "self.transformers.append(transformer)<EOL>from languageflow.transformer.tagged import TaggedTransformer<EOL>if isinstance(transformer, TaggedTransformer):<EOL><INDENT>self.X, self.y = transformer.transform(self.sentences)<EOL><DEDENT>if isinstance(transformer, TfidfVectorizer):<EOL><INDENT>self.X = transformer.fit_transform(self.X)<EOL><DEDENT>if isinstance(transformer, CountVectorizer):<EOL><INDENT>self.X = transformer.fit_transform(self.X)<EOL><DEDENT>if isinstance(transformer, NumberRemover):<EOL><INDENT>self.X = transformer.transform(self.X)<EOL><DEDENT>if isinstance(transformer, MultiLabelBinarizer):<EOL><INDENT>self.y = transformer.fit_transform(self.y)<EOL><DEDENT>", "docstring": "Add transformer to flow and apply transformer to data in flow\n\nParameters\n----------\ntransformer : Transformer\n    a transformer to transform data", "id": "f10255:c0:m2"}
{"signature": "def add_model(self, model):", "body": "self.models.append(model)<EOL>", "docstring": "Add model to flow", "id": "f10255:c0:m3"}
{"signature": "def __init__(self, filepath):", "body": "data_folder = join(dirname(dirname(__file__)), \"<STR_LIT:data>\")<EOL>data_file = join(data_folder, filepath)<EOL>self.data_file = data_file<EOL>self.words_data = None<EOL>", "docstring": "load words from Ho Ngoc Duc's dictionary\n\n        :param str filepath: filename of dictionary data\n        :type filepath: str", "id": "f10259:c0:m0"}
{"signature": "def serve(self, port=<NUM_LIT>):", "body": "from http.server import HTTPServer, CGIHTTPRequestHandler<EOL>os.chdir(self.log_folder)<EOL>httpd = HTTPServer(('<STR_LIT>', port), CGIHTTPRequestHandler)<EOL>print(\"<STR_LIT>\" + str(httpd.server_port))<EOL>webbrowser.open('<STR_LIT>'.format(port))<EOL>httpd.serve_forever()<EOL>", "docstring": "Start LanguageBoard web application\n\n        Parameters\n        ----------\n        port: int\n            port to serve web application", "id": "f10261:c0:m1"}
{"signature": "def url_to_filename(url: str, etag: str = None) -> str:", "body": "url_bytes = url.encode('<STR_LIT:utf-8>')<EOL>b64_bytes = base64.b64encode(url_bytes)<EOL>decoded = b64_bytes.decode('<STR_LIT:utf-8>')<EOL>if etag:<EOL><INDENT>etag = etag.replace('<STR_LIT:\">', '<STR_LIT>')<EOL>return f\"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>return decoded<EOL><DEDENT>", "docstring": "Converts a url into a filename in a reversible way.\nIf `etag` is specified, add it on the end, separated by a period\n(which necessarily won't appear in the base64-encoded filename).\nGet rid of the quotes in the etag, since Windows doesn't like them.", "id": "f10262:m1"}
{"signature": "@staticmethod<EOL><INDENT>def set_slower_interval(use_slower_interval: bool) -> None:<DEDENT>", "body": "if use_slower_interval:<EOL><INDENT>Tqdm.default_mininterval = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>Tqdm.default_mininterval = <NUM_LIT:0.1><EOL><DEDENT>", "docstring": "If ``use_slower_interval`` is ``True``, we will dramatically slow down ``tqdm's`` default\noutput rate.  ``tqdm's`` default output rate is great for interactively watching progress,\nbut it is not great for log files.  You might want to set this if you are primarily going\nto be looking at output through log files, not the terminal.", "id": "f10262:c0:m1"}
{"signature": "def transform(self, raw_documents):", "body": "return [self._remove(document) for document in raw_documents]<EOL>", "docstring": "Remove number in each document\n\nParameters\n----------\nraw_documents : iterable\n    An iterable which yields either str, unicode\n\nReturns\n-------\nX : iterable\n    cleaned documents", "id": "f10273:c0:m2"}
{"signature": "def Text(text):", "body": "if not is_unicode(text):<EOL><INDENT>text = text.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>text = unicodedata.normalize(\"<STR_LIT>\", text)<EOL>return text<EOL>", "docstring": "provide a wrapper for python string\n    map byte to str (python 3)\n    map str to unicode (python 2)\n    all string in utf-8 encoding\n    normalize string to NFC", "id": "f10274:m0"}
{"signature": "def load_yaml_config(filepath):", "body": "with open(filepath) as f:<EOL><INDENT>return yaml.load(f)<EOL><DEDENT>", "docstring": "Load yaml config file at the given path.", "id": "f10279:m4"}
{"signature": "def fetch_public_key(repo):", "body": "keyurl = '<STR_LIT>'.format(repo)<EOL>data = json.loads(urlopen(keyurl).read().decode())<EOL>if '<STR_LIT:key>' not in data:<EOL><INDENT>errmsg = \"<STR_LIT>\".format(repo)<EOL>errmsg += \"<STR_LIT>\"<EOL>raise ValueError(errmsg)<EOL><DEDENT>return data['<STR_LIT:key>']<EOL>", "docstring": "Download RSA public key Travis will use for this repo.\n\n    Travis API docs: http://docs.travis-ci.com/api/#repository-keys", "id": "f10279:m2"}
{"signature": "def main(args):", "body": "public_key = fetch_public_key(args.repo)<EOL>password = args.password or getpass('<STR_LIT>')<EOL>update_travis_deploy_password(encrypt(public_key, password.encode()))<EOL>print(\"<STR_LIT>\")<EOL>", "docstring": "Add a PyPI password to .travis.yml so that Travis can deploy to PyPI.\n\n    Fetch the Travis public key for the repo, and encrypt the PyPI password\n    with it before adding, so that only Travis can decrypt and use the PyPI\n    password.", "id": "f10279:m7"}
{"signature": "def prepend_line(filepath, line):", "body": "with open(filepath) as f:<EOL><INDENT>lines = f.readlines()<EOL><DEDENT>lines.insert(<NUM_LIT:0>, line)<EOL>with open(filepath, '<STR_LIT:w>') as f:<EOL><INDENT>f.writelines(lines)<EOL><DEDENT>", "docstring": "Rewrite a file adding a line to its beginning.", "id": "f10279:m3"}
{"signature": "def encrypt(pubkey, password):", "body": "key = load_key(pubkey)<EOL>encrypted_password = key.encrypt(password, PKCS1v15())<EOL>return base64.b64encode(encrypted_password)<EOL>", "docstring": "Encrypt password using given RSA public key and encode it with base64.\n\n    The encrypted password can only be decrypted by someone with the\n    private key (in this case, only Travis).", "id": "f10279:m1"}
{"signature": "def basicConfig(**kwargs):", "body": "logging.basicConfig(**kwargs)<EOL>logging._acquireLock()<EOL>try:<EOL><INDENT>stream = logging.root.handlers[<NUM_LIT:0>]<EOL>stream.setFormatter(<EOL>ColoredFormatter(<EOL>fmt=kwargs.get('<STR_LIT>', BASIC_FORMAT),<EOL>datefmt=kwargs.get('<STR_LIT>', None)))<EOL><DEDENT>finally:<EOL><INDENT>logging._releaseLock()<EOL><DEDENT>", "docstring": "Call ``logging.basicConfig`` and override the formatter it creates.", "id": "f10288:m0"}
{"signature": "def ensure_configured(func):", "body": "@functools.wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>if len(logging.root.handlers) == <NUM_LIT:0>:<EOL><INDENT>basicConfig()<EOL><DEDENT>return func(*args, **kwargs)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Modify a function to call ``basicConfig`` first if no handlers exist.", "id": "f10288:m1"}
{"signature": "def format(self, record):", "body": "record = ColoredRecord(record)<EOL>record.log_color = self.color(self.log_colors, record.levelname)<EOL>if self.secondary_log_colors:<EOL><INDENT>for name, log_colors in list(self.secondary_log_colors.items()):<EOL><INDENT>color = self.color(log_colors, record.levelname)<EOL>setattr(record, name + '<STR_LIT>', color)<EOL><DEDENT><DEDENT>if sys.version_info > (<NUM_LIT:2>, <NUM_LIT:7>):<EOL><INDENT>message = super(ColoredFormatter, self).format(record)<EOL><DEDENT>else:<EOL><INDENT>message = logging.Formatter.format(self, record)<EOL><DEDENT>if self.reset and not message.endswith(escape_codes['<STR_LIT>']):<EOL><INDENT>message += escape_codes['<STR_LIT>']<EOL><DEDENT>return message<EOL>", "docstring": "Format a message from a record object.", "id": "f10289:c1:m2"}
{"signature": "def format(self, record):", "body": "if isinstance(self.fmt, dict):<EOL><INDENT>self._fmt = self.fmt[record.levelname]<EOL>if sys.version_info > (<NUM_LIT:3>, <NUM_LIT:2>):<EOL><INDENT>if self.style not in logging._STYLES:<EOL><INDENT>raise ValueError('<STR_LIT>' % '<STR_LIT:U+002C>'.join(<EOL>list(logging._STYLES.keys())))<EOL><DEDENT>self._style = logging._STYLES[self.style][<NUM_LIT:0>](self._fmt)<EOL><DEDENT><DEDENT>if sys.version_info > (<NUM_LIT:2>, <NUM_LIT:7>):<EOL><INDENT>message = super(LevelFormatter, self).format(record)<EOL><DEDENT>else:<EOL><INDENT>message = ColoredFormatter.format(self, record)<EOL><DEDENT>return message<EOL>", "docstring": "Customize the message format based on the log level.", "id": "f10289:c2:m1"}
{"signature": "def __init__(self, fmt=None, datefmt=None, style='<STR_LIT:%>',<EOL>log_colors=None, reset=True,<EOL>secondary_log_colors=None):", "body": "if sys.version_info > (<NUM_LIT:2>, <NUM_LIT:7>):<EOL><INDENT>super(LevelFormatter, self).__init__(<EOL>fmt=fmt, datefmt=datefmt, style=style, log_colors=log_colors,<EOL>reset=reset, secondary_log_colors=secondary_log_colors)<EOL><DEDENT>else:<EOL><INDENT>ColoredFormatter.__init__(<EOL>self, fmt=fmt, datefmt=datefmt, style=style,<EOL>log_colors=log_colors, reset=reset,<EOL>secondary_log_colors=secondary_log_colors)<EOL><DEDENT>self.style = style<EOL>self.fmt = fmt<EOL>", "docstring": "Set the per-loglevel format that will be used.\n\nSupports fmt as a dict. All other args are passed on to the\n``colorlog.ColoredFormatter`` constructor.\n\n:Parameters:\n- fmt (dict):\n    A mapping of log levels (represented as strings, e.g. 'WARNING') to\n    different formatters. (*New in version 2.7.0)\n(All other parameters are the same as in colorlog.ColoredFormatter)\n\nExample:\n\nformatter = colorlog.LevelFormatter(fmt={\n    'DEBUG':'%(log_color)s%(msg)s (%(module)s:%(lineno)d)',\n    'INFO': '%(log_color)s%(msg)s',\n    'WARNING': '%(log_color)sWARN: %(msg)s (%(module)s:%(lineno)d)',\n    'ERROR': '%(log_color)sERROR: %(msg)s (%(module)s:%(lineno)d)',\n    'CRITICAL': '%(log_color)sCRIT: %(msg)s (%(module)s:%(lineno)d)',\n})", "id": "f10289:c2:m0"}
{"signature": "def parse_colors(sequence):", "body": "return '<STR_LIT>'.join(escape_codes[n] for n in sequence.split('<STR_LIT:U+002C>') if n)<EOL>", "docstring": "Return escape codes from a color sequence.", "id": "f10290:m1"}
{"signature": "def path(filename):", "body": "return os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)<EOL>", "docstring": "Return an absolute path to a file in the current directory.", "id": "f10293:m0"}
{"signature": "@property<EOL><INDENT>def has_exception(self):<DEDENT>", "body": "return bool(self._exception)<EOL>", "docstring": "Returns True if self._exception is not empty.", "id": "f10302:c0:m16"}
{"signature": "@property<EOL><INDENT>def stderr(self):<DEDENT>", "body": "if self._streaming:<EOL><INDENT>stderr = []<EOL>while not self.__stderr.empty():<EOL><INDENT>try:<EOL><INDENT>line = self.__stderr.get_nowait()<EOL>stderr.append(line)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>stderr = self.__stderr<EOL><DEDENT>return stderr<EOL>", "docstring": "Converts stderr string to a list.", "id": "f10302:c0:m11"}
{"signature": "@property<EOL><INDENT>def stdout(self):<DEDENT>", "body": "if self._streaming:<EOL><INDENT>stdout = []<EOL>while not self.__stdout.empty():<EOL><INDENT>try:<EOL><INDENT>line = self.__stdout.get_nowait()<EOL>stdout.append(line)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>stdout =  self.__stdout<EOL><DEDENT>return stdout<EOL>", "docstring": "Converts stdout string to a list.", "id": "f10302:c0:m10"}
{"signature": "@property<EOL><INDENT>def is_success(self):<DEDENT>", "body": "return self.is_complete and self.rc == <NUM_LIT:0><EOL>", "docstring": "Returns if the result of the command was a success.\nTrue for success, False for failure.", "id": "f10302:c0:m14"}
{"signature": "@property<EOL><INDENT>def is_failure(self):<DEDENT>", "body": "return self.is_complete and not self.rc == <NUM_LIT:0><EOL>", "docstring": "Returns if the result of the command was a failure.\nTrue for failure, False for succes.", "id": "f10302:c0:m15"}
{"signature": "def print_stdout(self, always_print=False):", "body": "if self.__stdout or always_print:<EOL><INDENT>self.__echo.info(\"<STR_LIT>\" + \"<STR_LIT:->\" * <NUM_LIT:100>)<EOL>self.__format_lines_info(self.stdout)<EOL>self.__echo.info(\"<STR_LIT>\" + \"<STR_LIT:->\" * <NUM_LIT:100>)<EOL><DEDENT>", "docstring": "Prints the stdout to console - if there is any stdout, otherwise does nothing.\n:param always_print:   print the stdout, even if there is nothing in the buffer (default: false)", "id": "f10302:c0:m17"}
{"signature": "def run(self, halt_on_nonzero=True, quiet=False, q=False, streaming=False):", "body": "commands = str(self)<EOL>if not (quiet or q):<EOL><INDENT>self._echo.cmd(commands)<EOL><DEDENT>env = self._context[<NUM_LIT:0>].get('<STR_LIT>', {}) if len(self._context) > <NUM_LIT:0> else os.environ<EOL>executable = self.current_context.get('<STR_LIT>')<EOL>try:<EOL><INDENT>process = subprocess.Popen(commands,<EOL>bufsize=<NUM_LIT:1>,<EOL>shell=True,<EOL>env=env,<EOL>stdin=subprocess.PIPE,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE,<EOL>executable=executable,<EOL>universal_newlines=True)<EOL>result = Result(process, commands, self._context, streaming, halt_on_nonzero=halt_on_nonzero)<EOL><DEDENT>except Exception as e:<EOL><INDENT>result = Result(None, commands, self._context, exception=e)<EOL>result.dump_exception()<EOL>if halt_on_nonzero:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self.clear()<EOL><DEDENT>return result<EOL>", "docstring": "After building your commands, call `run()` to have your code executed.", "id": "f10303:c0:m8"}
{"signature": "@property<EOL><INDENT>def current_context(self):<DEDENT>", "body": "return self._context[-<NUM_LIT:1>] if len(self._context) > <NUM_LIT:0> else {}<EOL>", "docstring": "Returns the context that Sultan is running on", "id": "f10303:c0:m2"}
{"signature": "def spit(self):", "body": "self._echo.log(str(self))<EOL>", "docstring": "Logs to the logger the command.", "id": "f10303:c0:m12"}
{"signature": "def and_(self):", "body": "self._add(And(self, \"<STR_LIT>\"))<EOL>return self<EOL>", "docstring": "Combines multiple commands using `&&`.\n\nUsage::\n\n    # runs: 'cd /tmp && touch foobar.txt'\n    s = Sultan()\n    s.cd(\"/tmp\").and_().touch(\"foobar.txt\").run()", "id": "f10303:c0:m14"}
{"signature": "def pipe(self):", "body": "self._add(Pipe(self, '<STR_LIT:|>'))<EOL>return self<EOL>", "docstring": "Pipe commands in Sultan.\n\nUsage::\n\n    # runs: 'cat /var/log/foobar.log | grep 192.168.1.1'\n    s = Sultan()\n    s.cat(\"/var/log/foobar.log\").pipe().grep(\"192.168.1.1\").run()", "id": "f10303:c0:m13"}
{"signature": "def callLater(delay, func, *args, **kwargs):", "body": "pool = NSAutoreleasePool.alloc().init()<EOL>obj = PyObjCAppHelperCaller_wrap.alloc().initWithArgs_((func, args, kwargs))<EOL>obj.callLater_(delay)<EOL>del obj<EOL>del pool<EOL>", "docstring": "call a function on the main thread after a delay (async)", "id": "f10308:m1"}
{"signature": "def callAfter(func, *args, **kwargs):", "body": "pool = NSAutoreleasePool.alloc().init()<EOL>obj = PyObjCAppHelperCaller_wrap.alloc().initWithArgs_((func, args, kwargs))<EOL>obj.callAfter_(None)<EOL>del obj<EOL>del pool<EOL>", "docstring": "call a function on the main thread (async)", "id": "f10308:m0"}
{"signature": "def endSheetMethod(meth):", "body": "return objc.selector(meth, signature=b'<STR_LIT>')<EOL>", "docstring": "Return a selector that can be used as the delegate callback for\nsheet methods", "id": "f10308:m3"}
{"signature": "def stopEventLoop():", "body": "stopper = PyObjCAppHelperRunLoopStopper_wrap.currentRunLoopStopper()<EOL>if stopper is None:<EOL><INDENT>if NSApp() is not None:<EOL><INDENT>NSApp().terminate_(None)<EOL>return True<EOL><DEDENT>return False<EOL><DEDENT>NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(<EOL><NUM_LIT:0.0>,<EOL>stopper,<EOL>'<STR_LIT>',<EOL>None,<EOL>False)<EOL>return True<EOL>", "docstring": "Stop the current event loop if possible\nreturns True if it expects that it was successful, False otherwise", "id": "f10308:m2"}
{"signature": "def registerevent(self, event_name, fn_name, *args):", "body": "if not isinstance(event_name, str):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self._pollEvents._callback[event_name] = [event_name, fn_name, args]<EOL>return self._remote_registerevent(event_name)<EOL>", "docstring": "Register at-spi event\n\n@param event_name: Event name in at-spi format.\n@type event_name: string\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10311:c3:m35"}
{"signature": "def removecallback(self, window_name):", "body": "if window_name in self._pollEvents._callback:<EOL><INDENT>del self._pollEvents._callback[window_name]<EOL><DEDENT>return self._remote_removecallback(window_name)<EOL>", "docstring": "Remove registered callback on window create\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10311:c3:m34"}
{"signature": "def imagecapture(self, window_name=None, out_file=None, x=<NUM_LIT:0>, y=<NUM_LIT:0>,<EOL>width=None, height=None):", "body": "if not out_file:<EOL><INDENT>out_file = tempfile.mktemp('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>out_file = os.path.expanduser(out_file)<EOL><DEDENT>if _ldtp_windows_env:<EOL><INDENT>if width == None:<EOL><INDENT>width = -<NUM_LIT:1><EOL><DEDENT>if height == None:<EOL><INDENT>height = -<NUM_LIT:1><EOL><DEDENT>if window_name == None:<EOL><INDENT>window_name = '<STR_LIT>'<EOL><DEDENT><DEDENT>data = self._remote_imagecapture(window_name, x, y, width, height)<EOL>f = open(out_file, '<STR_LIT:wb>')<EOL>f.write(b64decode(data))<EOL>f.close()<EOL>return out_file<EOL>", "docstring": "Captures screenshot of the whole desktop or given window\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param x: x co-ordinate value\n@type x: integer\n@param y: y co-ordinate value\n@type y: integer\n@param width: width co-ordinate value\n@type width: integer\n@param height: height co-ordinate value\n@type height: integer\n\n@return: screenshot filename\n@rtype: string", "id": "f10311:c3:m11"}
{"signature": "def log(self, message, level=logging.DEBUG):", "body": "if _ldtp_debug:<EOL><INDENT>print(message)<EOL><DEDENT>self.logger.log(level, str(message))<EOL>return <NUM_LIT:1><EOL>", "docstring": "Logs the message in the root logger with the log level\n@param message: Message to be logged\n@type message: string\n@param level: Log level, defaul DEBUG\n@type level: integer\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10311:c3:m5"}
{"signature": "def __del__(self):", "body": "self._stop = True<EOL>", "docstring": "Stop callback when destroying this class", "id": "f10311:c5:m1"}
{"signature": "def startlog(self, filename, overwrite=True):", "body": "if not filename:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if overwrite:<EOL><INDENT>_mode = '<STR_LIT:w>'<EOL><DEDENT>else:<EOL><INDENT>_mode = '<STR_LIT:a>'<EOL><DEDENT>self._file_logger = self.logging.FileHandler(os.path.expanduser(filename), _mode)<EOL>_formatter = self.logging.Formatter('<STR_LIT>')<EOL>self._file_logger.setFormatter(_formatter)<EOL>self.logger.addHandler(_file_logger)<EOL>if _ldtp_debug:<EOL><INDENT>self._file_logger.setLevel(logging.DEBUG)<EOL><DEDENT>else:<EOL><INDENT>self._file_logger.setLevel(logging.ERROR)<EOL><DEDENT>return <NUM_LIT:1><EOL>", "docstring": "@param filename: Start logging on the specified file\n@type filename: string\n@param overwrite: Overwrite or append\n    False - Append log to an existing file\n    True - Write log to a new file. If file already exist, \n    then erase existing file content and start log\n@type overwrite: boolean\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10311:c3:m6"}
{"signature": "def onwindowcreate(self, window_name, fn_name, *args):", "body": "self._pollEvents._callback[window_name] = [\"<STR_LIT>\", fn_name, args]<EOL>return self._remote_onwindowcreate(window_name)<EOL>", "docstring": "On window create, call the function with given arguments\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10311:c3:m33"}
{"signature": "def __del__(self):", "body": "self._stop = True<EOL>", "docstring": "Stop polling when destroying this class", "id": "f10311:c4:m1"}
{"signature": "def setmin(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>object_handle.AXValue = <NUM_LIT:0><EOL>return <NUM_LIT:1><EOL>", "docstring": "Set min value\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m3"}
{"signature": "def verifyscrollbarhorizontal(self, window_name, object_name):", "body": "try:<EOL><INDENT>object_handle = self._get_object_handle(window_name, object_name)<EOL>if object_handle.AXOrientation == \"<STR_LIT>\":<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Verify scrollbar is horizontal\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m1"}
{"signature": "def scrolldown(self, window_name, object_name):", "body": "if not self.verifyscrollbarvertical(window_name, object_name):<EOL><INDENT>raise LdtpServerException('<STR_LIT>')<EOL><DEDENT>return self.setmax(window_name, object_name)<EOL>", "docstring": "Scroll down\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m5"}
{"signature": "def scrollup(self, window_name, object_name):", "body": "if not self.verifyscrollbarvertical(window_name, object_name):<EOL><INDENT>raise LdtpServerException('<STR_LIT>')<EOL><DEDENT>return self.setmin(window_name, object_name)<EOL>", "docstring": "Scroll up\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m4"}
{"signature": "def verifyscrollbarvertical(self, window_name, object_name):", "body": "try:<EOL><INDENT>object_handle = self._get_object_handle(window_name, object_name)<EOL>if object_handle.AXOrientation == \"<STR_LIT>\":<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Verify scrollbar is vertical\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m0"}
{"signature": "def oneleft(self, window_name, object_name, iterations):", "body": "if not self.verifyscrollbarhorizontal(window_name, object_name):<EOL><INDENT>raise LdtpServerException('<STR_LIT>')<EOL><DEDENT>object_handle = self._get_object_handle(window_name, object_name)<EOL>i = <NUM_LIT:0><EOL>minValue = <NUM_LIT:1.0> / <NUM_LIT:8><EOL>flag = False<EOL>while i < iterations:<EOL><INDENT>if object_handle.AXValue <= <NUM_LIT:0>:<EOL><INDENT>raise LdtpServerException('<STR_LIT>')<EOL><DEDENT>object_handle.AXValue -= minValue<EOL>time.sleep(<NUM_LIT:1.0> / <NUM_LIT:100>)<EOL>flag = True<EOL>i += <NUM_LIT:1><EOL><DEDENT>if flag:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>raise LdtpServerException('<STR_LIT>')<EOL><DEDENT>", "docstring": "Press scrollbar left with number of iterations\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param interations: iterations to perform on slider increase\n@type iterations: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10315:c0:m11"}
{"signature": "def selectlastrow(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>cell = object_handle.AXRows[-<NUM_LIT:1>]<EOL>if not cell.AXSelected:<EOL><INDENT>object_handle.activate()<EOL>cell.AXSelected = True<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:1><EOL>", "docstring": "Select last row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m6"}
{"signature": "def gettablerowindex(self, window_name, object_name, row_text):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>index = <NUM_LIT:0><EOL>for cell in object_handle.AXRows:<EOL><INDENT>if re.match(row_text,<EOL>cell.AXChildren[<NUM_LIT:0>].AXValue):<EOL><INDENT>return index<EOL><DEDENT>index += <NUM_LIT:1><EOL><DEDENT>raise LdtpServerException(u\"<STR_LIT>\" % row_text)<EOL>", "docstring": "Get table row index matching given text\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text: Row text to select\n@type row_text: string\n\n@return: row index matching the text on success.\n@rtype: integer", "id": "f10317:c0:m14"}
{"signature": "def getcellsize(self, window_name, object_name, row_index, column=<NUM_LIT:0>):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>count = len(object_handle.AXRows)<EOL>if row_index < <NUM_LIT:0> or row_index > count:<EOL><INDENT>raise LdtpServerException('<STR_LIT>' % row_index)<EOL><DEDENT>cell = object_handle.AXRows[row_index]<EOL>count = len(cell.AXChildren)<EOL>if column < <NUM_LIT:0> or column > count:<EOL><INDENT>raise LdtpServerException('<STR_LIT>' % column)<EOL><DEDENT>obj = cell.AXChildren[column]<EOL>if not re.search(\"<STR_LIT>\", obj.AXRole):<EOL><INDENT>obj = cell.AXChildren[column]<EOL><DEDENT>return self._getobjectsize(obj)<EOL>", "docstring": "Get cell size\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n\n@return: cell coordinates on success.\n@rtype: list", "id": "f10317:c0:m9"}
{"signature": "def getcellvalue(self, window_name, object_name, row_index, column=<NUM_LIT:0>):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>count = len(object_handle.AXRows)<EOL>if row_index < <NUM_LIT:0> or row_index > count:<EOL><INDENT>raise LdtpServerException('<STR_LIT>' % row_index)<EOL><DEDENT>cell = object_handle.AXRows[row_index]<EOL>count = len(cell.AXChildren)<EOL>if column < <NUM_LIT:0> or column > count:<EOL><INDENT>raise LdtpServerException('<STR_LIT>' % column)<EOL><DEDENT>obj = cell.AXChildren[column]<EOL>if not re.search(\"<STR_LIT>\", obj.AXRole):<EOL><INDENT>obj = cell.AXChildren[column]<EOL><DEDENT>return obj.AXValue<EOL>", "docstring": "Get cell value\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n\n@return: cell value on success.\n@rtype: string", "id": "f10317:c0:m8"}
{"signature": "def selectrowpartialmatch(self, window_name, object_name, row_text):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>for cell in object_handle.AXRows:<EOL><INDENT>if re.search(row_text,<EOL>cell.AXChildren[<NUM_LIT:0>].AXValue):<EOL><INDENT>if not cell.AXSelected:<EOL><INDENT>object_handle.activate()<EOL>cell.AXSelected = True<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>raise LdtpServerException(u\"<STR_LIT>\" % row_text)<EOL>", "docstring": "Select row partial match\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text: Row text to select\n@type row_text: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m4"}
{"signature": "def checkrow(self, window_name, object_name, row_index, column=<NUM_LIT:0>):", "body": "raise LdtpServerException(\"<STR_LIT>\")<EOL>", "docstring": "Check row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n\n@return: cell value on success.\n@rtype: string", "id": "f10317:c0:m11"}
{"signature": "def getrowcount(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>return len(object_handle.AXRows)<EOL>", "docstring": "Get count of rows in table object.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: Number of rows.\n@rtype: integer", "id": "f10317:c0:m0"}
{"signature": "def uncheckrow(self, window_name, object_name, row_index, column=<NUM_LIT:0>):", "body": "raise LdtpServerException(\"<STR_LIT>\")<EOL>", "docstring": "Check row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_index: Row index to get\n@type row_index: integer\n@param column: Column index to get, default value 0\n@type column: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m13"}
{"signature": "def multiselect(self, window_name, object_name, row_text_list, partial_match=False):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>object_handle.activate()<EOL>selected = False<EOL>try:<EOL><INDENT>window = self._get_front_most_window()<EOL><DEDENT>except (IndexError,):<EOL><INDENT>window = self._get_any_window()<EOL><DEDENT>for row_text in row_text_list:<EOL><INDENT>selected = False<EOL>for cell in object_handle.AXRows:<EOL><INDENT>parent_cell = cell<EOL>cell = self._getfirstmatchingchild(cell, \"<STR_LIT>\")<EOL>if not cell:<EOL><INDENT>continue<EOL><DEDENT>if re.match(row_text, cell.AXValue):<EOL><INDENT>selected = True<EOL>if not parent_cell.AXSelected:<EOL><INDENT>x, y, width, height = self._getobjectsize(parent_cell)<EOL>window.clickMouseButtonLeftWithMods((x + width / <NUM_LIT:2>,<EOL>y + height / <NUM_LIT:2>),<EOL>['<STR_LIT>'])<EOL>self.wait(<NUM_LIT:0.5>)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>break<EOL><DEDENT><DEDENT>if not selected:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % row_text)<EOL><DEDENT><DEDENT>if not selected:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\")<EOL><DEDENT>return <NUM_LIT:1><EOL>", "docstring": "Select multiple row\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param row_text_list: Row list with matching text to select\n@type row_text: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10317:c0:m2"}
{"signature": "def mouseleftclick(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>self._grabfocus(object_handle)<EOL>x, y, width, height = self._getobjectsize(object_handle)<EOL>object_handle.clickMouseButtonLeft((x + width / <NUM_LIT:2>, y + height / <NUM_LIT:2>))<EOL>return <NUM_LIT:1><EOL>", "docstring": "Mouse left click on an object.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10319:c0:m0"}
{"signature": "def generatemouseevent(self, x, y, eventType=\"<STR_LIT>\",<EOL>drag_button_override='<STR_LIT>'):", "body": "if drag_button_override not in mouse_click_override:<EOL><INDENT>raise ValueError('<STR_LIT>' %drag_button_override)<EOL><DEDENT>global drag_button_remembered<EOL>point = (x, y)<EOL>button = centre  <EOL>click_type = None<EOL>if eventType == \"<STR_LIT>\" or eventType == \"<STR_LIT>\":<EOL><INDENT>if drag_button_override is not '<STR_LIT>':<EOL><INDENT>events = [mouse_click_override[drag_button_override]]<EOL><DEDENT>elif drag_button_remembered:<EOL><INDENT>events = [drag_button_remembered]<EOL><DEDENT>else:<EOL><INDENT>events = [move]<EOL><DEDENT>if eventType == \"<STR_LIT>\":<EOL><INDENT>point = CGEventGetLocation(CGEventCreate(None))<EOL>point.x += x<EOL>point.y += y<EOL><DEDENT><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [press_left]<EOL>drag_button_remembered = drag_left<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [release_left]<EOL>drag_button_remembered = None<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [press_left, release_left]<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [press_left, release_left]<EOL>click_type = double_click<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [press_other]<EOL>drag_button_remembered = drag_other<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [release_other]<EOL>drag_button_remembered = None<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [press_other, release_other]<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [press_other, release_other]<EOL>click_type = double_click<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [press_right]<EOL>drag_button_remembered = drag_right<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [release_right]<EOL>drag_button_remembered = None<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [press_right, release_right]<EOL><DEDENT>elif eventType == \"<STR_LIT>\":<EOL><INDENT>events = [press_right, release_right]<EOL>click_type = double_click<EOL><DEDENT>else:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % eventType)<EOL><DEDENT>for event in events:<EOL><INDENT>CG_event = CGEventCreateMouseEvent(None, event, point, button)<EOL>if click_type:<EOL><INDENT>CGEventSetIntegerValueField(<EOL>CG_event, kCGMouseEventClickState, click_type)<EOL><DEDENT>CGEventPost(kCGHIDEventTap, CG_event)<EOL>time.sleep(<NUM_LIT>)<EOL><DEDENT>return <NUM_LIT:1><EOL>", "docstring": "Generate mouse event on x, y co-ordinates.\n\n@param x: X co-ordinate\n@type x: int\n@param y: Y co-ordinate\n@type y: int\n@param eventType: Mouse click type\n@type eventType: str\n@param drag_button_override: Any drag_xxx value\n        Only relevant for movements, i.e. |type| = \"abs\" or \"rel\"\n        Quartz is not fully compatible with windows, so for drags\n        the drag button must be explicitly defined. generatemouseevent\n        will remember the last button pressed by default, and drag\n        that button, use this argument to override that.\n@type drag_button_override: str\n\n@return: 1 on success.\n@rtype: integer", "id": "f10319:c0:m2"}
{"signature": "def mouserightclick(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>self._grabfocus(object_handle)<EOL>x, y, width, height = self._getobjectsize(object_handle)<EOL>object_handle.clickMouseButtonRight((x + width / <NUM_LIT:2>, y + height / <NUM_LIT:2>))<EOL>return <NUM_LIT:1><EOL>", "docstring": "Mouse right click on an object.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10319:c0:m1"}
{"signature": "def verifyshowlist(self, window_name, object_name):", "body": "return self.verifydropdown(window_name, object_name)<EOL>", "docstring": "Verify drop down list / menu poped up\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10321:c0:m6"}
{"signature": "def hidelist(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>object_handle.activate()<EOL>object_handle.sendKey(AXKeyCodeConstants.ESCAPE)<EOL>return <NUM_LIT:1><EOL>", "docstring": "Hide combo box list / menu\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10321:c0:m4"}
{"signature": "def getallitem(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>object_handle.Press()<EOL>self.wait(<NUM_LIT:1>)<EOL>child = None<EOL>try:<EOL><INDENT>if not object_handle.AXChildren:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\")<EOL><DEDENT>children = object_handle.AXChildren[<NUM_LIT:0>]<EOL>if not children:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\")<EOL><DEDENT>children = children.AXChildren<EOL>items = []<EOL>for child in children:<EOL><INDENT>label = self._get_title(child)<EOL>if label:<EOL><INDENT>items.append(label)<EOL><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>if child:<EOL><INDENT>child.Cancel()<EOL><DEDENT><DEDENT>return items<EOL>", "docstring": "Get all combo box item\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: list of string on success.\n@rtype: list", "id": "f10321:c0:m2"}
{"signature": "def selectitem(self, window_name, object_name, item_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>self._grabfocus(object_handle.AXWindow)<EOL>try:<EOL><INDENT>object_handle.Press()<EOL><DEDENT>except AttributeError:<EOL><INDENT>x, y, width, height = self._getobjectsize(object_handle)<EOL>self.generatemouseevent(x + <NUM_LIT:5>, y + <NUM_LIT:5>, \"<STR_LIT>\")<EOL>self.wait(<NUM_LIT:5>)<EOL>handle = self._get_sub_menu_handle(object_handle, item_name)<EOL>x, y, width, height = self._getobjectsize(handle)<EOL>self.generatemouseevent(x + <NUM_LIT:5>, y + <NUM_LIT:5>, \"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>self.wait(<NUM_LIT:1>)<EOL>menu_list = re.split(\"<STR_LIT:;>\", item_name)<EOL>try:<EOL><INDENT>menu_handle = self._internal_menu_handler(object_handle, menu_list,<EOL>True)<EOL>self.wait(<NUM_LIT:1>)<EOL>if not menu_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" %menu_list[-<NUM_LIT:1>])<EOL><DEDENT>menu_handle.Press()<EOL><DEDENT>except LdtpServerException:<EOL><INDENT>object_handle.activate()<EOL>object_handle.sendKey(AXKeyCodeConstants.ESCAPE)<EOL>raise<EOL><DEDENT>return <NUM_LIT:1><EOL>", "docstring": "Select combo box / layered pane item\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param item_name: Item name to select\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10321:c0:m0"}
{"signature": "def enterstring(self, window_name, object_name='<STR_LIT>', data='<STR_LIT>'):", "body": "if not object_name and not data:<EOL><INDENT>return self.generatekeyevent(window_name)<EOL><DEDENT>else:<EOL><INDENT>object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>self._grabfocus(object_handle)<EOL>object_handle.sendKeys(data)<EOL>return <NUM_LIT:1><EOL><DEDENT>", "docstring": "Type string sequence.\n\n@param window_name: Window name to focus on, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to focus on, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m3"}
{"signature": "def pastetext(self, window_name, object_name, position=<NUM_LIT:0>):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>size = object_handle.AXNumberOfCharacters<EOL>if position > size:<EOL><INDENT>position = size<EOL><DEDENT>if position < <NUM_LIT:0>:<EOL><INDENT>position = <NUM_LIT:0><EOL><DEDENT>clipboard = Clipboard.paste()<EOL>data = object_handle.AXValue<EOL>object_handle.AXValue = data[:position] + clipboard + data[position:]<EOL>return <NUM_LIT:1><EOL>", "docstring": "paste text from start position to end position\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param position: Position to paste the text, default 0\n@type object_name: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m17"}
{"signature": "def inserttext(self, window_name, object_name, position, data):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>existing_data = object_handle.AXValue<EOL>size = len(existing_data)<EOL>if position < <NUM_LIT:0>:<EOL><INDENT>position = <NUM_LIT:0><EOL><DEDENT>if position > size:<EOL><INDENT>position = size<EOL><DEDENT>object_handle.AXValue = existing_data[:position] + data +existing_data[position:]<EOL>return <NUM_LIT:1><EOL>", "docstring": "Insert string sequence in given position.\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param position: position where text has to be entered.\n@type data: int\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m6"}
{"signature": "def cuttext(self, window_name, object_name, start_position, end_position=-<NUM_LIT:1>):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>size = object_handle.AXNumberOfCharacters<EOL>if end_position == -<NUM_LIT:1> or end_position > size:<EOL><INDENT>end_position = size<EOL><DEDENT>if start_position < <NUM_LIT:0>:<EOL><INDENT>start_position = <NUM_LIT:0><EOL><DEDENT>data = object_handle.AXValue<EOL>Clipboard.copy(data[start_position:end_position])<EOL>object_handle.AXValue = data[:start_position] + data[end_position:]<EOL>return <NUM_LIT:1><EOL>", "docstring": "cut text from start position to end position\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param start_position: Start position\n@type object_name: integer\n@param end_position: End position, default -1\nCut all the text from start position till end\n@type object_name: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m14"}
{"signature": "def generatekeyevent(self, data):", "body": "KeyComboAction(data)<EOL>return <NUM_LIT:1><EOL>", "docstring": "Generates key event to the system, this simulates the best user like\ninteraction via keyboard.\n\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m0"}
{"signature": "def copytext(self, window_name, object_name, start_position, end_position=-<NUM_LIT:1>):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>size = object_handle.AXNumberOfCharacters<EOL>if end_position == -<NUM_LIT:1> or end_position > size:<EOL><INDENT>end_position = size<EOL><DEDENT>if start_position < <NUM_LIT:0>:<EOL><INDENT>start_position = <NUM_LIT:0><EOL><DEDENT>data = object_handle.AXValue<EOL>Clipboard.copy(data[start_position:end_position])<EOL>return <NUM_LIT:1><EOL>", "docstring": "copy text from start position to end position\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param start_position: Start position\n@type object_name: integer\n@param end_position: End position, default -1\nCopy all the text from start position till end\n@type object_name: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m15"}
{"signature": "def istextstateenabled(self, window_name, object_name):", "body": "try:<EOL><INDENT>object_handle = self._get_object_handle(window_name, object_name)<EOL>if object_handle.AXEnabled:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except LdtpServerException:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Verifies text state enabled or not\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10324:c0:m9"}
{"signature": "def settextvalue(self, window_name, object_name, data):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>object_handle.AXValue = data<EOL>return <NUM_LIT:1><EOL>", "docstring": "Type string sequence.\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param data: data to type.\n@type data: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m4"}
{"signature": "def verifypartialmatch(self, window_name, object_name, partial_text):", "body": "try:<EOL><INDENT>if re.search(fnmatch.translate(partial_text),<EOL>self.gettextvalue(window_name,<EOL>object_name)):<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Verify partial text\n\n@param window_name: Window name to type in, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to type in, either full name,\nLDTP's name convention, or a Unix glob. \n@type object_name: string\n@param partial_text: Partial text to match\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10324:c0:m7"}
{"signature": "def doesmenuitemexist(self, window_name, object_name):", "body": "try:<EOL><INDENT>menu_handle = self._get_menu_handle(window_name, object_name,<EOL>False)<EOL>return <NUM_LIT:1><EOL><DEDENT>except LdtpServerException:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Check a menu item exist.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n@param strict_hierarchy: Mandate menu hierarchy if set to True\n@type object_name: boolean\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m2"}
{"signature": "def listsubmenus(self, window_name, object_name):", "body": "menu_handle = self._get_menu_handle(window_name, object_name)<EOL>role, label = self._ldtpize_accessible(menu_handle)<EOL>menu_clicked = False<EOL>try:<EOL><INDENT>if not menu_handle.AXChildren:<EOL><INDENT>menu_clicked = True<EOL>try:<EOL><INDENT>menu_handle.Press()<EOL>self.wait(<NUM_LIT:1>)<EOL><DEDENT>except atomac._a11y.ErrorCannotComplete:<EOL><INDENT>pass<EOL><DEDENT>if not menu_handle.AXChildren:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" %label)<EOL><DEDENT><DEDENT>children = menu_handle.AXChildren[<NUM_LIT:0>]<EOL>sub_menus = []<EOL>for current_menu in children.AXChildren:<EOL><INDENT>role, label = self._ldtpize_accessible(current_menu)<EOL>if not label:<EOL><INDENT>continue<EOL><DEDENT>sub_menus.append(u\"<STR_LIT>\" % (role, label))<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>if menu_clicked:<EOL><INDENT>menu_handle.Cancel()<EOL><DEDENT><DEDENT>return sub_menus<EOL>", "docstring": "List children of menu item\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: menu item in list on success.\n@rtype: list", "id": "f10325:c0:m4"}
{"signature": "def menuitemenabled(self, window_name, object_name):", "body": "try:<EOL><INDENT>menu_handle = self._get_menu_handle(window_name, object_name,<EOL>False)<EOL>if menu_handle.AXEnabled:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except LdtpServerException:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Verify a menu item is enabled\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m3"}
{"signature": "def verifymenucheck(self, window_name, object_name):", "body": "try:<EOL><INDENT>menu_handle = self._get_menu_handle(window_name, object_name,<EOL>False)<EOL>try:<EOL><INDENT>if menu_handle.AXMenuItemMarkChar:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except atomac._a11y.Error:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>except LdtpServerException:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Verify a menu item is checked\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m5"}
{"signature": "def menucheck(self, window_name, object_name):", "body": "menu_handle = self._get_menu_handle(window_name, object_name)<EOL>if not menu_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>try:<EOL><INDENT>if menu_handle.AXMenuItemMarkChar:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except atomac._a11y.Error:<EOL><INDENT>pass<EOL><DEDENT>menu_handle.Press()<EOL>return <NUM_LIT:1><EOL>", "docstring": "Check (click) a menu item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m7"}
{"signature": "def verifymenuuncheck(self, window_name, object_name):", "body": "try:<EOL><INDENT>menu_handle = self._get_menu_handle(window_name, object_name,<EOL>False)<EOL>try:<EOL><INDENT>if not menu_handle.AXMenuItemMarkChar:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except atomac._a11y.Error:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except LdtpServerException:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Verify a menu item is un-checked\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob. Or menu heirarchy\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10325:c0:m6"}
{"signature": "def objectexist(self, window_name, object_name):", "body": "try:<EOL><INDENT>object_handle = self._get_object_handle(window_name, object_name)<EOL>return <NUM_LIT:1><EOL><DEDENT>except LdtpServerException:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Checks whether a window or component exists.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 if GUI was found, 0 if not.\n@rtype: integer", "id": "f10327:c0:m33"}
{"signature": "def getobjectlist(self, window_name):", "body": "try:<EOL><INDENT>window_handle, name, app = self._get_window_handle(window_name, True)<EOL>object_list = self._get_appmap(window_handle, name, True)<EOL><DEDENT>except atomac._a11y.ErrorInvalidUIElement:<EOL><INDENT>self._windows = {}<EOL>window_handle, name, app = self._get_window_handle(window_name, True)<EOL>object_list = self._get_appmap(window_handle, name, True)<EOL><DEDENT>return object_list.keys()<EOL>", "docstring": "Get list of items in given GUI.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: list of items in LDTP naming convention.\n@rtype: list", "id": "f10327:c0:m12"}
{"signature": "def activatewindow(self, window_name):", "body": "window_handle = self._get_window_handle(window_name)<EOL>self._grabfocus(window_handle)<EOL>return <NUM_LIT:1><EOL>", "docstring": "Activate window.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m21"}
{"signature": "def minimizewindow(self, window_name):", "body": "return self._singleclick(window_name, \"<STR_LIT>\")<EOL>", "docstring": "Minimize window.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m19"}
{"signature": "def stateenabled(self, window_name, object_name):", "body": "try:<EOL><INDENT>object_handle = self._get_object_handle(window_name, object_name)<EOL>if object_handle.AXEnabled:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except LdtpServerException:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Check whether an object state is enabled or not\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10327:c0:m34"}
{"signature": "def verifyuncheck(self, window_name, object_name):", "body": "try:<EOL><INDENT>object_handle = self._get_object_handle(window_name, object_name,<EOL>wait_for_object=False)<EOL>if object_handle.AXValue == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>except LdtpServerException:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Verify uncheck item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success 0 on failure.\n@rtype: integer", "id": "f10327:c0:m38"}
{"signature": "def poll_events(self):", "body": "if not self._callback_event:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return self._callback_event.pop()<EOL>", "docstring": "Poll for any registered events or window create events\n\n@return: window name\n@rtype: string", "id": "f10327:c0:m6"}
{"signature": "def stopprocessmonitor(self, process_name):", "body": "if process_name in self._process_stats:<EOL><INDENT>self._process_stats[process_name].stop()<EOL><DEDENT>return <NUM_LIT:1><EOL>", "docstring": "Stop memory and CPU monitoring\n\n@param process_name: Process name, ex: firefox-bin.\n@type process_name: string\n\n@return: 1 on success\n@rtype: integer", "id": "f10327:c0:m9"}
{"signature": "def getmemorystat(self, process_name):", "body": "<EOL>_stat_inst = ProcessStats(process_name)<EOL>_stat_list = []<EOL>for p in _stat_inst.get_cpu_memory_stat():<EOL><INDENT>try:<EOL><INDENT>_stat_list.append(round(p.get_memory_percent(), <NUM_LIT:2>))<EOL><DEDENT>except psutil.AccessDenied:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return _stat_list<EOL>", "docstring": "get memory stat\n\n@param process_name: Process name, ex: firefox-bin.\n@type process_name: string\n\n@return: memory stat list on success, else empty list\n        If same process name, running multiple instance,\n        get the stat of all the process memory usage\n@rtype: list", "id": "f10327:c0:m11"}
{"signature": "def getchild(self, window_name, child_name='<STR_LIT>', role='<STR_LIT>', parent='<STR_LIT>'):", "body": "matches = []<EOL>if role:<EOL><INDENT>role = re.sub('<STR_LIT:U+0020>', '<STR_LIT:_>', role)<EOL><DEDENT>self._windows = {}<EOL>if parent and (child_name or role):<EOL><INDENT>_window_handle, _window_name =self._get_window_handle(window_name)[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>if not _window_handle:<EOL><INDENT>raise LdtpServerException('<STR_LIT>' %window_name)<EOL><DEDENT>appmap = self._get_appmap(_window_handle, _window_name)<EOL>obj = self._get_object_map(window_name, parent)<EOL>def _get_all_children_under_obj(obj, child_list):<EOL><INDENT>if role and obj['<STR_LIT:class>'] == role:<EOL><INDENT>child_list.append(obj['<STR_LIT:label>'])<EOL><DEDENT>elif child_name and self._match_name_to_appmap(child_name, obj):<EOL><INDENT>child_list.append(obj['<STR_LIT:label>'])<EOL><DEDENT>if obj:<EOL><INDENT>children = obj['<STR_LIT>']<EOL><DEDENT>if not children:<EOL><INDENT>return child_list<EOL><DEDENT>for child in children.split():<EOL><INDENT>return _get_all_children_under_obj(appmap[child],<EOL>child_list)<EOL><DEDENT><DEDENT>matches = _get_all_children_under_obj(obj, [])<EOL>if not matches:<EOL><INDENT>if child_name:<EOL><INDENT>_name = '<STR_LIT>' % child_name<EOL><DEDENT>if role:<EOL><INDENT>_role = '<STR_LIT>' % role<EOL><DEDENT>if parent:<EOL><INDENT>_parent = '<STR_LIT>' % parent<EOL><DEDENT>exception = '<STR_LIT>' % (_name, _role, _parent)<EOL>raise LdtpServerException(exception)<EOL><DEDENT>return matches<EOL><DEDENT>_window_handle, _window_name =self._get_window_handle(window_name)[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>if not _window_handle:<EOL><INDENT>raise LdtpServerException('<STR_LIT>' %window_name)<EOL><DEDENT>appmap = self._get_appmap(_window_handle, _window_name)<EOL>for name in appmap.keys():<EOL><INDENT>obj = appmap[name]<EOL>if role and not child_name and obj['<STR_LIT:class>'] == role:<EOL><INDENT>matches.append(name)<EOL><DEDENT>if parent and child_name and not role andself._match_name_to_appmap(parent, obj):<EOL><INDENT>matches.append(name)<EOL><DEDENT>if child_name and not role andself._match_name_to_appmap(child_name, obj):<EOL><INDENT>return name<EOL>matches.append(name)<EOL><DEDENT>if role and child_name and obj['<STR_LIT:class>'] == role andself._match_name_to_appmap(child_name, obj):<EOL><INDENT>matches.append(name)<EOL><DEDENT><DEDENT>if not matches:<EOL><INDENT>_name = '<STR_LIT>'<EOL>_role = '<STR_LIT>'<EOL>_parent = '<STR_LIT>'<EOL>if child_name:<EOL><INDENT>_name = '<STR_LIT>' % child_name<EOL><DEDENT>if role:<EOL><INDENT>_role = '<STR_LIT>' % role<EOL><DEDENT>if parent:<EOL><INDENT>_parent = '<STR_LIT>' % parent<EOL><DEDENT>exception = '<STR_LIT>' % (_name, _role, _parent)<EOL>raise LdtpServerException(exception)<EOL><DEDENT>return matches<EOL>", "docstring": "Gets the list of object available in the window, which matches\ncomponent name or role name or both.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param child_name: Child name to search for.\n@type child_name: string\n@param role: role name to search for, or an empty string for wildcard.\n@type role: string\n@param parent: parent name to search for, or an empty string for wildcard.\n@type role: string\n@return: list of matched children names\n@rtype: list", "id": "f10327:c0:m15"}
{"signature": "def click(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>size = self._getobjectsize(object_handle)<EOL>self._grabfocus(object_handle)<EOL>self.wait(<NUM_LIT:0.5>)<EOL>self.generatemouseevent(size[<NUM_LIT:0>] + size[<NUM_LIT:2>] / <NUM_LIT:2>, size[<NUM_LIT:1>] + size[<NUM_LIT:3>] / <NUM_LIT:2>, \"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL>", "docstring": "Click item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m22"}
{"signature": "def waittillguinotexist(self, window_name, object_name='<STR_LIT>', guiTimeOut=<NUM_LIT:30>):", "body": "timeout = <NUM_LIT:0><EOL>while timeout < guiTimeOut:<EOL><INDENT>if not self.guiexist(window_name, object_name):<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>time.sleep(<NUM_LIT:1>)<EOL>timeout += <NUM_LIT:1><EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Wait till a window does not exist.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param guiTimeOut: Wait timeout in seconds\n@type guiTimeOut: integer\n\n@return: 1 if GUI has gone away, 0 if not.\n@rtype: integer", "id": "f10327:c0:m32"}
{"signature": "def waittillguiexist(self, window_name, object_name='<STR_LIT>',<EOL>guiTimeOut=<NUM_LIT:30>, state='<STR_LIT>'):", "body": "timeout = <NUM_LIT:0><EOL>while timeout < guiTimeOut:<EOL><INDENT>if self.guiexist(window_name, object_name):<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>time.sleep(<NUM_LIT:1>)<EOL>timeout += <NUM_LIT:1><EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Wait till a window or component exists.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@param guiTimeOut: Wait timeout in seconds\n@type guiTimeOut: integer\n@param state: Object state used only when object_name is provided.\n@type object_name: string\n\n@return: 1 if GUI was found, 0 if not.\n@rtype: integer", "id": "f10327:c0:m31"}
{"signature": "def grabfocus(self, window_name, object_name=None):", "body": "if not object_name:<EOL><INDENT>handle, name, app = self._get_window_handle(window_name)<EOL><DEDENT>else:<EOL><INDENT>handle = self._get_object_handle(window_name, object_name)<EOL><DEDENT>return self._grabfocus(handle)<EOL>", "docstring": "Grab focus.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m27"}
{"signature": "def getcpustat(self, process_name):", "body": "<EOL>_stat_inst = ProcessStats(process_name)<EOL>_stat_list = []<EOL>for p in _stat_inst.get_cpu_memory_stat():<EOL><INDENT>try:<EOL><INDENT>_stat_list.append(p.get_cpu_percent())<EOL><DEDENT>except psutil.AccessDenied:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return _stat_list<EOL>", "docstring": "get CPU stat for the give process name\n\n@param process_name: Process name, ex: firefox-bin.\n@type process_name: string\n\n@return: cpu stat list on success, else empty list\n        If same process name, running multiple instance,\n        get the stat of all the process CPU usage\n@rtype: list", "id": "f10327:c0:m10"}
{"signature": "def getallstates(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>_obj_states = []<EOL>if object_handle.AXEnabled:<EOL><INDENT>_obj_states.append(\"<STR_LIT>\")<EOL><DEDENT>if object_handle.AXFocused:<EOL><INDENT>_obj_states.append(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>if object_handle.AXFocused:<EOL><INDENT>_obj_states.append(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if re.match(\"<STR_LIT>\", object_handle.AXRole, re.M | re.U | re.L) orre.match(\"<STR_LIT>\", object_handle.AXRole,<EOL>re.M | re.U | re.L):<EOL><INDENT>if object_handle.AXValue:<EOL><INDENT>_obj_states.append(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return _obj_states<EOL>", "docstring": "Get all states of given object\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: list of string on success.\n@rtype: list", "id": "f10327:c0:m23"}
{"signature": "def hasstate(self, window_name, object_name, state, guiTimeOut=<NUM_LIT:0>):", "body": "try:<EOL><INDENT>object_handle = self._get_object_handle(window_name, object_name)<EOL>if state == \"<STR_LIT>\":<EOL><INDENT>return int(object_handle.AXEnabled)<EOL><DEDENT>elif state == \"<STR_LIT>\":<EOL><INDENT>return int(object_handle.AXFocused)<EOL><DEDENT>elif state == \"<STR_LIT>\":<EOL><INDENT>return int(object_handle.AXFocused)<EOL><DEDENT>elif state == \"<STR_LIT>\":<EOL><INDENT>if re.match(\"<STR_LIT>\", object_handle.AXRole,<EOL>re.M | re.U | re.L) orre.match(\"<STR_LIT>\", object_handle.AXRole,<EOL>re.M | re.U | re.L):<EOL><INDENT>if object_handle.AXValue:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "has state\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n@type window_name: string\n@param state: State of the current object.\n@type object_name: string\n@param guiTimeOut: Wait timeout in seconds\n@type guiTimeOut: integer\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m24"}
{"signature": "def launchapp(self, cmd, args=[], delay=<NUM_LIT:0>, env=<NUM_LIT:1>, lang=\"<STR_LIT:C>\"):", "body": "try:<EOL><INDENT>atomac.NativeUIElement.launchAppByBundleId(cmd)<EOL>return <NUM_LIT:1><EOL><DEDENT>except RuntimeError:<EOL><INDENT>if atomac.NativeUIElement.launchAppByBundlePath(cmd, args):<EOL><INDENT>try:<EOL><INDENT>time.sleep(int(delay))<EOL><DEDENT>except ValueError:<EOL><INDENT>time.sleep(<NUM_LIT:5>)<EOL><DEDENT>return <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % cmd)<EOL><DEDENT><DEDENT>", "docstring": "Launch application.\n\n@param cmd: Command line string to execute.\n@type cmd: string\n@param args: Arguments to the application\n@type args: list\n@param delay: Delay after the application is launched\n@type delay: int\n@param env: GNOME accessibility environment to be set or not\n@type env: int\n@param lang: Application language to be used\n@type lang: string\n\n@return: 1 on success\n@rtype: integer\n\n@raise LdtpServerException: When command fails", "id": "f10327:c0:m16"}
{"signature": "def getapplist(self):", "body": "app_list = []<EOL>self._update_apps()<EOL>for gui in self._running_apps:<EOL><INDENT>name = gui.localizedName()<EOL>try:<EOL><INDENT>name = unicode(name)<EOL><DEDENT>except NameError:<EOL><INDENT>name = str(name)<EOL><DEDENT>except UnicodeEncodeError:<EOL><INDENT>pass<EOL><DEDENT>app_list.append(name)<EOL><DEDENT>return list(set(app_list))<EOL>", "docstring": "Get all accessibility application name that are currently running\n\n@return: list of appliction name of string type on success.\n@rtype: list", "id": "f10327:c0:m3"}
{"signature": "def maximizewindow(self, window_name):", "body": "return self._singleclick(window_name, \"<STR_LIT>\")<EOL>", "docstring": "Maximize window.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m20"}
{"signature": "def getwindowsize(self, window_name):", "body": "return self.getobjectsize(window_name)<EOL>", "docstring": "Get window size.\n\n@param window_name: Window name to get size of.\n@type window_name: string\n\n@return: list of dimensions [x, y, w, h]\n@rtype: list", "id": "f10327:c0:m26"}
{"signature": "def uncheck(self, window_name, object_name):", "body": "object_handle = self._get_object_handle(window_name, object_name)<EOL>if not object_handle.AXEnabled:<EOL><INDENT>raise LdtpServerException(u\"<STR_LIT>\" % object_name)<EOL><DEDENT>if object_handle.AXValue == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>self._grabfocus(object_handle)<EOL>x, y, width, height = self._getobjectsize(object_handle)<EOL>self.generatemouseevent(x + width / <NUM_LIT:2>, y + height / <NUM_LIT:2>, \"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL>", "docstring": "Uncheck item.\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param object_name: Object name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type object_name: string\n\n@return: 1 on success.\n@rtype: integer", "id": "f10327:c0:m36"}
{"signature": "def getwindowlist(self):", "body": "return self._get_windows(True).keys()<EOL>", "docstring": "Get all accessibility window that are currently open\n\n@return: list of window names in LDTP format of string type on success.\n@rtype: list", "id": "f10327:c0:m4"}
{"signature": "def _ldtpize_accessible(self, acc):", "body": "actual_role = self._get_role(acc)<EOL>label = self._get_title(acc)<EOL>if re.match(\"<STR_LIT>\", actual_role, re.M | re.U | re.L):<EOL><INDENT>strip = r\"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>strip = r\"<STR_LIT>\"<EOL><DEDENT>if label:<EOL><INDENT>label = re.sub(strip, u\"<STR_LIT>\", label)<EOL><DEDENT>role = abbreviated_roles.get(actual_role, \"<STR_LIT>\")<EOL>if self._ldtp_debug and role == \"<STR_LIT>\":<EOL><INDENT>print(actual_role, acc)<EOL><DEDENT>return role, label<EOL>", "docstring": "Get LDTP format accessibile name\n\n@param acc: Accessible handle\n@type acc: object\n\n@return: object type, stripped object name (associated / direct),\n                associated label\n@rtype: tuple", "id": "f10328:c2:m8"}
{"signature": "def _glob_match(self, pattern, string):", "body": "<EOL>return bool(re.match(fnmatch.translate(pattern), string,<EOL>re.M | re.U | re.L))<EOL>", "docstring": "Match given string, by escaping regex characters", "id": "f10328:c2:m9"}
{"signature": "def sendGlobalKey(self, keychr):", "body": "return self._sendKey(keychr, globally=True)<EOL>", "docstring": "Send one character without modifiers to the system.\n\n        It will not send an event directly to the application, system will\n        dispatch it to the window which has keyboard focus.\n\n        Parameters: keychr - Single keyboard character which will be sent.", "id": "f10331:c1:m15"}
{"signature": "def _getBundleId(self):", "body": "ra = AppKit.NSRunningApplication<EOL>app = ra.runningApplicationWithProcessIdentifier_(<EOL>self._getPid())<EOL>return app.bundleIdentifier()<EOL>", "docstring": "Return the bundle ID of the application.", "id": "f10331:c0:m43"}
{"signature": "@classmethod<EOL><INDENT>def setSystemWideTimeout(cls, timeout=<NUM_LIT:0.0>):<DEDENT>", "body": "return cls.getSystemObject().setTimeout(timeout)<EOL>", "docstring": "Set the system-wide accessibility timeout.\n\n        Optional: timeout (non-negative float; defaults to 0)\n                  A value of 0 will reset the timeout to the system default.\n        Returns: None.", "id": "f10331:c0:m7"}
{"signature": "@staticmethod<EOL><INDENT>def _isSingleCharacter(keychr):<DEDENT>", "body": "if not keychr:<EOL><INDENT>return False<EOL><DEDENT>if len(keychr) == <NUM_LIT:1>:<EOL><INDENT>return True<EOL><DEDENT>return keychr.count('<STR_LIT:<>') == <NUM_LIT:1> and keychr.count('<STR_LIT:>>') == <NUM_LIT:1> andkeychr[<NUM_LIT:0>] == '<STR_LIT:<>' and keychr[-<NUM_LIT:1>] == '<STR_LIT:>>'<EOL>", "docstring": "Check whether given keyboard character is a single character.\n\n        Parameters: key character which will be checked.\n        Returns: True when given key character is a single character.", "id": "f10331:c0:m22"}
{"signature": "def getElementAtPosition(self, coord):", "body": "return self._getElementAtPosition(float(coord[<NUM_LIT:0>]), float(coord[<NUM_LIT:1>]))<EOL>", "docstring": "Return the AXUIElement at the given coordinates.\n\n        If self is behind other windows, this function will return self.", "id": "f10331:c1:m7"}
{"signature": "def tripleClickMouse(self, coord):", "body": "<EOL>modFlags = <NUM_LIT:0><EOL>for i in range(<NUM_LIT:2>):<EOL><INDENT>self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags)<EOL><DEDENT>self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags,<EOL>clickCount=<NUM_LIT:3>)<EOL>self._postQueuedEvents()<EOL>", "docstring": "Triple-click primary mouse button.\n\n        Parameters: coordinates to click (assume primary is left button)\n        Returns: None", "id": "f10331:c1:m30"}
{"signature": "def textFields(self, match=None):", "body": "return self._convenienceMatch('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of textfields with an optional match parameter.", "id": "f10331:c1:m43"}
{"signature": "def waitForFocusToMatchCriteria(self, timeout=<NUM_LIT:10>, **kwargs):", "body": "def _matchFocused(retelem, **kwargs):<EOL><INDENT>return retelem if retelem._match(**kwargs) else None<EOL><DEDENT>retelem = None<EOL>return self._waitFor(timeout, '<STR_LIT>',<EOL>callback=_matchFocused,<EOL>args=(retelem,),<EOL>**kwargs)<EOL>", "docstring": "Convenience method to wait for focused element to change\n        (to element matching kwargs criteria).\n\n        Returns: Element or None", "id": "f10331:c0:m27"}
{"signature": "def waitFor(self, timeout, notification, **kwargs):", "body": "return self._waitFor(timeout, notification, **kwargs)<EOL>", "docstring": "Generic wait for a UI event that matches the specified\n        criteria to occur.\n\n        For customization of the callback, use keyword args labeled\n        'callback', 'args', and 'kwargs' for the callback fn, callback args,\n        and callback kwargs, respectively.  Also note that on return,\n        the observer-returned UI element will be included in the first\n        argument if 'args' are given.  Note also that if the UI element is\n        destroyed, callback should not use it, otherwise the function will\n        hang.", "id": "f10331:c1:m31"}
{"signature": "def staticTextsR(self, match=None):", "body": "return self._convenienceMatchR('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of statictexts with an optional match parameter", "id": "f10331:c1:m52"}
{"signature": "def _generateFind(self, **kwargs):", "body": "for needle in self._generateChildren():<EOL><INDENT>if needle._match(**kwargs):<EOL><INDENT>yield needle<EOL><DEDENT><DEDENT>", "docstring": "Generator which yields matches on AXChildren.", "id": "f10331:c0:m34"}
{"signature": "@classmethod<EOL><INDENT>def getAppRefByBundleId(cls, bundleId):<DEDENT>", "body": "ra = AppKit.NSRunningApplication<EOL>apps = ra.runningApplicationsWithBundleIdentifier_(bundleId)<EOL>if len(apps) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(('<STR_LIT>'<EOL>'<STR_LIT>' % bundleId))<EOL><DEDENT>pid = apps[<NUM_LIT:0>].processIdentifier()<EOL>return cls.getAppRefByPid(pid)<EOL>", "docstring": "Get the top level element for the application with the specified\nbundle ID, such as com.vmware.fusion.", "id": "f10331:c0:m2"}
{"signature": "def _clearEventQueue(self):", "body": "if hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.eventList.clear()<EOL><DEDENT>", "docstring": "Clear the event queue.", "id": "f10331:c0:m13"}
{"signature": "def _findFirstR(self, **kwargs):", "body": "for item in self._generateFindR(**kwargs):<EOL><INDENT>return item<EOL><DEDENT>", "docstring": "Search recursively for the first object that matches the criteria.", "id": "f10331:c0:m39"}
{"signature": "def findFirstR(self, **kwargs):", "body": "return self._findFirstR(**kwargs)<EOL>", "docstring": "Search recursively for the first object that matches the\n        criteria.", "id": "f10331:c1:m4"}
{"signature": "def _generateChildrenR(self, target=None):", "body": "if target is None:<EOL><INDENT>target = self<EOL><DEDENT>try:<EOL><INDENT>children = target.AXChildren<EOL><DEDENT>except _a11y.Error:<EOL><INDENT>return<EOL><DEDENT>if children:<EOL><INDENT>for child in children:<EOL><INDENT>yield child<EOL>for c in self._generateChildrenR(child):<EOL><INDENT>yield c<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Generator which recursively yields all AXChildren of the object.", "id": "f10331:c0:m31"}
{"signature": "def dragMouseButtonLeft(self, coord, dest_coord, interval=<NUM_LIT:0.5>):", "body": "modFlags = <NUM_LIT:0><EOL>self._queueMouseButton(coord, Quartz.kCGMouseButtonLeft, modFlags,<EOL>dest_coord=dest_coord)<EOL>self._postQueuedEvents(interval=interval)<EOL>", "docstring": "Drag the left mouse button without modifiers pressed.\n\n        Parameters: coordinates to click on screen (tuple (x, y))\n                    dest coordinates to drag to (tuple (x, y))\n                    interval to send event of btn down, drag and up\n        Returns: None", "id": "f10331:c1:m21"}
{"signature": "def genericElementsR(self, match=None):", "body": "return self._convenienceMatchR('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of genericelements with an optional match parameter.", "id": "f10331:c1:m54"}
{"signature": "def genericElements(self, match=None):", "body": "return self._convenienceMatch('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of genericelements with an optional match parameter.", "id": "f10331:c1:m53"}
{"signature": "def _performAction(self, action):", "body": "try:<EOL><INDENT>_a11y.AXUIElement._performAction(self, '<STR_LIT>' % action)<EOL><DEDENT>except _a11y.ErrorUnsupported as e:<EOL><INDENT>sierra_ver = '<STR_LIT>'<EOL>if mac_ver()[<NUM_LIT:0>] < sierra_ver:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "Perform the specified action.", "id": "f10331:c0:m29"}
{"signature": "def _matchOther(self, obj, **kwargs):", "body": "if obj is not None:<EOL><INDENT>if self._findFirstR(**kwargs):<EOL><INDENT>return obj._match(**kwargs)<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Perform _match but on another object, not self.", "id": "f10331:c0:m33"}
{"signature": "def textAreas(self, match=None):", "body": "return self._convenienceMatch('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of text areas with an optional match parameter.", "id": "f10331:c1:m41"}
{"signature": "def _generateChildren(self):", "body": "try:<EOL><INDENT>children = self.AXChildren<EOL><DEDENT>except _a11y.Error:<EOL><INDENT>return<EOL><DEDENT>if children:<EOL><INDENT>for child in children:<EOL><INDENT>yield child<EOL><DEDENT><DEDENT>", "docstring": "Generator which yields all AXChildren of the object.", "id": "f10331:c0:m30"}
{"signature": "def buttonsR(self, match=None):", "body": "return self._convenienceMatchR('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of buttons with an optional match parameter.", "id": "f10331:c1:m46"}
{"signature": "def _getApplication(self):", "body": "app = self<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>app = app.AXParent<EOL><DEDENT>except _a11y.ErrorUnsupported:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return app<EOL>", "docstring": "Get the base application UIElement.\n\n        If the UIElement is a child of the application, it will try\n        to get the AXParent until it reaches the top application level\n        element.", "id": "f10331:c0:m40"}
{"signature": "def waitForValueToChange(self, timeout=<NUM_LIT:10>):", "body": "<EOL>callback = AXCallbacks.returnElemCallback<EOL>retelem = None<EOL>return self.waitFor(timeout, '<STR_LIT>', callback=callback,<EOL>args=(retelem,))<EOL>", "docstring": "Convenience method to wait for value attribute of given element to\n        change.\n\n        Some types of elements (e.g. menu items) have their titles change,\n        so this will not work for those.  This seems to work best if you set\n        the notification at the application level.\n\n        Returns: Element or None", "id": "f10331:c1:m36"}
{"signature": "def buttons(self, match=None):", "body": "return self._convenienceMatch('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of buttons with an optional match parameter.", "id": "f10331:c1:m45"}
{"signature": "def textFieldsR(self, match=None):", "body": "return self._convenienceMatchR('<STR_LIT>', '<STR_LIT>',<EOL>match)<EOL>", "docstring": "Return a list of textfields with an optional match parameter.", "id": "f10331:c1:m44"}
{"signature": "def waitForSheetToAppear(self, timeout=<NUM_LIT:10>):", "body": "return self.waitForCreation(timeout, '<STR_LIT>')<EOL>", "docstring": "Convenience method to wait for a sheet to appear.\n\n        Returns: the sheet that appeared (element) or None", "id": "f10331:c1:m35"}
{"signature": "@classmethod<EOL><INDENT>def getAppRefByLocalizedName(cls, name):<DEDENT>", "body": "<EOL>apps = cls._getRunningApps()<EOL>for app in apps:<EOL><INDENT>if fnmatch.fnmatch(app.localizedName(), name):<EOL><INDENT>pid = app.processIdentifier()<EOL>return cls.getAppRefByPid(pid)<EOL><DEDENT><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Get the top level element for the application with the specified\n        localized name, such as VMware Fusion.\n\n        Wildcards are also allowed.", "id": "f10331:c0:m3"}
{"signature": "def staticTexts(self, match=None):", "body": "return self._convenienceMatch('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of statictexts with an optional match parameter.", "id": "f10331:c1:m51"}
{"signature": "@classmethod<EOL><INDENT>def getAppRefByPid(cls, pid):<DEDENT>", "body": "return _a11y.getAppRefByPid(cls, pid)<EOL>", "docstring": "Get the top level element for the application specified by pid.", "id": "f10331:c0:m1"}
{"signature": "def findAll(self, **kwargs):", "body": "return self._findAll(**kwargs)<EOL>", "docstring": "Return a list of all children that match the specified criteria.", "id": "f10331:c1:m5"}
{"signature": "def sendKeyWithModifiers(self, keychr, modifiers):", "body": "return self._sendKeyWithModifiers(keychr, modifiers, False)<EOL>", "docstring": "Send one character with modifiers pressed\n\n        Parameters: key character, modifiers (list) (e.g. [SHIFT] or\n                    [COMMAND, SHIFT] (assuming you've first used\n                    from pyatom.AXKeyCodeConstants import *))", "id": "f10331:c1:m19"}
{"signature": "@classmethod<EOL><INDENT>def getAnyAppWithWindow(cls):<DEDENT>", "body": "<EOL>apps = cls._getRunningApps()<EOL>for app in apps:<EOL><INDENT>pid = app.processIdentifier()<EOL>ref = cls.getAppRefByPid(pid)<EOL>if hasattr(ref, '<STR_LIT>') and len(ref.windows()) > <NUM_LIT:0>:<EOL><INDENT>return ref<EOL><DEDENT><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Get a random app that has windows.\n\n        Raise a ValueError exception if no GUI applications are found.", "id": "f10331:c0:m5"}
{"signature": "def popUpItem(self, *args):", "body": "self.Press()<EOL>time.sleep(<NUM_LIT>)<EOL>return self._menuItem(self, *args)<EOL>", "docstring": "Return the specified item in a pop up menu.", "id": "f10331:c1:m11"}
{"signature": "def _pressModifiers(self, modifiers, pressed=True, globally=False):", "body": "if not isinstance(modifiers, list):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.keyboard = AXKeyboard.loadKeyboard()<EOL><DEDENT>modFlags = <NUM_LIT:0><EOL>for nextMod in modifiers:<EOL><INDENT>if nextMod not in self.keyboard:<EOL><INDENT>errStr = '<STR_LIT>'<EOL>self._clearEventQueue()<EOL>raise ValueError(errStr % self.keyboard[nextMod])<EOL><DEDENT>modEvent = Quartz.CGEventCreateKeyboardEvent(<EOL>Quartz.CGEventSourceCreate(<NUM_LIT:0>),<EOL>self.keyboard[nextMod],<EOL>pressed<EOL>)<EOL>if not pressed:<EOL><INDENT>Quartz.CGEventSetFlags(modEvent, <NUM_LIT:0>)<EOL><DEDENT>if globally:<EOL><INDENT>self._queueEvent(Quartz.CGEventPost, (<NUM_LIT:0>, modEvent))<EOL><DEDENT>else:<EOL><INDENT>macVer, _, _ = platform.mac_ver()<EOL>macVer = int(macVer.split('<STR_LIT:.>')[<NUM_LIT:1>])<EOL>if macVer > <NUM_LIT:10>:<EOL><INDENT>appPid = self._getPid()<EOL>self._queueEvent(Quartz.CGEventPostToPid, (appPid, modEvent))<EOL><DEDENT>else:<EOL><INDENT>appPsn = self._getPsnForPid(self._getPid())<EOL>self._queueEvent(Quartz.CGEventPostToPSN, (appPsn, modEvent))<EOL><DEDENT><DEDENT>modFlags += AXKeyboard.modKeyFlagConstants[nextMod]<EOL><DEDENT>return modFlags<EOL>", "docstring": "Press given modifiers (provided in list form).\n\n        Parameters: modifiers list, global or app specific\n        Optional: keypressed state (default is True (down))\n        Returns: Unsigned int representing flags to set", "id": "f10331:c0:m18"}
{"signature": "def slidersR(self, match=None):", "body": "return self._convenienceMatchR('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of sliders with an optional match parameter.", "id": "f10331:c1:m64"}
{"signature": "def _queueMouseButton(self, coord, mouseButton, modFlags, clickCount=<NUM_LIT:1>,<EOL>dest_coord=None):", "body": "<EOL>mouseButtons = {<EOL>Quartz.kCGMouseButtonLeft: '<STR_LIT>',<EOL>Quartz.kCGMouseButtonRight: '<STR_LIT>',<EOL>}<EOL>if mouseButton not in mouseButtons:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>eventButtonDown = getattr(Quartz,<EOL>'<STR_LIT>' % mouseButtons[mouseButton])<EOL>eventButtonUp = getattr(Quartz,<EOL>'<STR_LIT>' % mouseButtons[mouseButton])<EOL>eventButtonDragged = getattr(Quartz,<EOL>'<STR_LIT>' % mouseButtons[<EOL>mouseButton])<EOL>buttonDown = Quartz.CGEventCreateMouseEvent(None,<EOL>eventButtonDown,<EOL>coord,<EOL>mouseButton)<EOL>Quartz.CGEventSetFlags(buttonDown, modFlags)<EOL>Quartz.CGEventSetIntegerValueField(buttonDown,<EOL>Quartz.kCGMouseEventClickState,<EOL>int(clickCount))<EOL>if dest_coord:<EOL><INDENT>buttonDragged = Quartz.CGEventCreateMouseEvent(None,<EOL>eventButtonDragged,<EOL>dest_coord,<EOL>mouseButton)<EOL>Quartz.CGEventSetFlags(buttonDragged, modFlags)<EOL>buttonUp = Quartz.CGEventCreateMouseEvent(None,<EOL>eventButtonUp,<EOL>dest_coord,<EOL>mouseButton)<EOL><DEDENT>else:<EOL><INDENT>buttonUp = Quartz.CGEventCreateMouseEvent(None,<EOL>eventButtonUp,<EOL>coord,<EOL>mouseButton)<EOL><DEDENT>Quartz.CGEventSetFlags(buttonUp, modFlags)<EOL>Quartz.CGEventSetIntegerValueField(buttonUp,<EOL>Quartz.kCGMouseEventClickState,<EOL>int(clickCount))<EOL>self._queueEvent(Quartz.CGEventPost,<EOL>(Quartz.kCGSessionEventTap, buttonDown))<EOL>if dest_coord:<EOL><INDENT>self._queueEvent(Quartz.CGEventPost,<EOL>(Quartz.kCGHIDEventTap, buttonDragged))<EOL><DEDENT>self._queueEvent(Quartz.CGEventPost,<EOL>(Quartz.kCGSessionEventTap, buttonUp))<EOL>", "docstring": "Private method to handle generic mouse button clicking.\n\n        Parameters: coord (x, y) to click, mouseButton (e.g.,\n                    kCGMouseButtonLeft), modFlags set (int)\n        Optional: clickCount (default 1; set to 2 for double-click; 3 for\n                  triple-click on host)\n        Returns: None", "id": "f10331:c0:m24"}
{"signature": "def _findAll(self, **kwargs):", "body": "result = []<EOL>for item in self._generateFind(**kwargs):<EOL><INDENT>result.append(item)<EOL><DEDENT>return result<EOL>", "docstring": "Return a list of all children that match the specified criteria.", "id": "f10331:c0:m36"}
{"signature": "def popUpButtons(self, match=None):", "body": "return self._convenienceMatch('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of popup menus with an optional match parameter.", "id": "f10331:c1:m59"}
{"signature": "def _match(self, **kwargs):", "body": "for k in kwargs.keys():<EOL><INDENT>try:<EOL><INDENT>val = getattr(self, k)<EOL><DEDENT>except _a11y.Error:<EOL><INDENT>return False<EOL><DEDENT>if sys.version_info[:<NUM_LIT:2>] <= (<NUM_LIT:2>, <NUM_LIT:6>):<EOL><INDENT>if isinstance(val, basestring):<EOL><INDENT>if not fnmatch.fnmatch(unicode(val), kwargs[k]):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if val != kwargs[k]:<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT>elif sys.version_info[<NUM_LIT:0>] == <NUM_LIT:3>:<EOL><INDENT>if isinstance(val, str):<EOL><INDENT>if not fnmatch.fnmatch(val, str(kwargs[k])):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if val != kwargs[k]:<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if isinstance(val, str) or isinstance(val, unicode):<EOL><INDENT>if not fnmatch.fnmatch(val, kwargs[k]):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if val != kwargs[k]:<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "Method which indicates if the object matches specified criteria.\n\n        Match accepts criteria as kwargs and looks them up on attributes.\n        Actual matching is performed with fnmatch, so shell-like wildcards\n        work within match strings. Examples:\n\n        obj._match(AXTitle='Terminal*')\n        obj._match(AXRole='TextField', AXRoleDescription='search text field')", "id": "f10331:c0:m32"}
{"signature": "def _queueEvent(self, event, args):", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.eventList = deque([(event, args)])<EOL>return<EOL><DEDENT>self.eventList.append((event, args))<EOL>", "docstring": "Private method to queue events to run.\n\n        Each event in queue is a tuple (event call, args to event call).", "id": "f10331:c0:m14"}
{"signature": "def sendKeys(self, keystr):", "body": "return self._sendKeys(keystr)<EOL>", "docstring": "Send a series of characters with no modifiers.", "id": "f10331:c1:m16"}
{"signature": "def _releaseModifierKeys(self, modifiers):", "body": "modFlags = self._releaseModifiers(modifiers)<EOL>self._postQueuedEvents()<EOL>return modFlags<EOL>", "docstring": "Release given modifier keys (provided in list form).\n\n        Parameters: modifiers list\n        Returns: Unsigned int representing flags to set", "id": "f10331:c0:m21"}
{"signature": "def getLocalizedName(self):", "body": "return self._getLocalizedName()<EOL>", "docstring": "Return the localized name of the application.", "id": "f10331:c1:m13"}
{"signature": "def sliders(self, match=None):", "body": "return self._convenienceMatch('<STR_LIT>', '<STR_LIT>', match)<EOL>", "docstring": "Return a list of sliders with an optional match parameter.", "id": "f10331:c1:m63"}
{"signature": "def stop(self):", "body": "try:<EOL><INDENT>self.alive = False<EOL>self.join(self.sleep_time)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Stop the thread", "id": "f10334:c1:m2"}
{"signature": "def registerkbevent(keys, modifiers, fn_name, *args):", "body": "event_name = \"<STR_LIT>\" % (keys, modifiers)<EOL>_pollEvents._callback[event_name] = [event_name, fn_name, args]<EOL>return _remote_registerkbevent(keys, modifiers)<EOL>", "docstring": "Register keystroke events\n\n@param keys: key to listen\n@type keys: string\n@param modifiers: control / alt combination using gtk MODIFIERS\n@type modifiers: int\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10334:m37"}
{"signature": "def __del__(self):", "body": "try:<EOL><INDENT>self.alive = False<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Stop polling when destroying this class", "id": "f10334:c0:m1"}
{"signature": "def registerevent(event_name, fn_name, *args):", "body": "if not isinstance(event_name, str):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>_pollEvents._callback[event_name] = [event_name, fn_name, args]<EOL>return _remote_registerevent(event_name)<EOL>", "docstring": "Register at-spi event\n\n@param event_name: Event name in at-spi format.\n@type event_name: string\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10334:m35"}
{"signature": "def windowuptime(window_name):", "body": "tmp_time = _remote_windowuptime(window_name)<EOL>if tmp_time:<EOL><INDENT>tmp_time = tmp_time.split('<STR_LIT:->')<EOL>start_time = tmp_time[<NUM_LIT:0>].split('<STR_LIT:U+0020>')<EOL>end_time = tmp_time[<NUM_LIT:1>].split('<STR_LIT:U+0020>')<EOL>_start_time = datetime.datetime(int(start_time[<NUM_LIT:0>]), int(start_time[<NUM_LIT:1>]),<EOL>int(start_time[<NUM_LIT:2>]), int(start_time[<NUM_LIT:3>]),<EOL>int(start_time[<NUM_LIT:4>]), int(start_time[<NUM_LIT:5>]))<EOL>_end_time = datetime.datetime(int(end_time[<NUM_LIT:0>]), int(end_time[<NUM_LIT:1>]),<EOL>int(end_time[<NUM_LIT:2>]), int(end_time[<NUM_LIT:3>]),<EOL>int(end_time[<NUM_LIT:4>]), int(end_time[<NUM_LIT:5>]))<EOL>return _start_time, _end_time<EOL><DEDENT>return None<EOL>", "docstring": "Get window uptime\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n\n@return: \"starttime, endtime\" as datetime python object", "id": "f10334:m39"}
{"signature": "def log(message, level=logging.DEBUG):", "body": "if _ldtp_debug:<EOL><INDENT>print(message)<EOL><DEDENT>logger.log(level, str(message))<EOL>return <NUM_LIT:1><EOL>", "docstring": "Logs the message in the root logger with the log level\n@param message: Message to be logged\n@type message: string\n@param level: Log level, defaul DEBUG\n@type level: integer\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10334:m4"}
{"signature": "def __del__(self):", "body": "try:<EOL><INDENT>self.alive = False<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Stop callback when destroying this class", "id": "f10334:c1:m1"}
{"signature": "def startlog(filename, overwrite=True):", "body": "if not filename:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if overwrite:<EOL><INDENT>_mode = '<STR_LIT:w>'<EOL><DEDENT>else:<EOL><INDENT>_mode = '<STR_LIT:a>'<EOL><DEDENT>global _file_logger<EOL>_file_logger = logging.FileHandler(os.path.expanduser(filename), _mode)<EOL>_formatter = logging.Formatter('<STR_LIT>')<EOL>_file_logger.setFormatter(_formatter)<EOL>logger.addHandler(_file_logger)<EOL>if _ldtp_debug:<EOL><INDENT>_file_logger.setLevel(logging.DEBUG)<EOL><DEDENT>else:<EOL><INDENT>_file_logger.setLevel(logging.ERROR)<EOL><DEDENT>return <NUM_LIT:1><EOL>", "docstring": "@param filename: Start logging on the specified file\n@type filename: string\n@param overwrite: Overwrite or append\n    False - Append log to an existing file\n    True - Write log to a new file. If file already exist, \n    then erase existing file content and start log\n@type overwrite: boolean\n\n@return: 1 on success and 0 on error\n@rtype: integer", "id": "f10334:m5"}
{"signature": "def stop(self):", "body": "try:<EOL><INDENT>self.alive = False<EOL>self.join(self.sleep_time)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Stop the thread", "id": "f10334:c0:m2"}
{"signature": "def stoplog():", "body": "global _file_logger<EOL>if _file_logger:<EOL><INDENT>logger.removeHandler(_file_logger)<EOL>_file_logger = None<EOL><DEDENT>return <NUM_LIT:1><EOL>", "docstring": "Stop logging.\n\n    @return: 1 on success and 0 on error\n    @rtype: integer", "id": "f10334:m6"}
{"signature": "def onwindowcreate(window_name, fn_name, *args):", "body": "_pollEvents._callback[window_name] = [\"<STR_LIT>\", fn_name, args]<EOL>return _remote_onwindowcreate(window_name)<EOL>", "docstring": "On window create, call the function with given arguments\n\n@param window_name: Window name to look for, either full name,\nLDTP's name convention, or a Unix glob.\n@type window_name: string\n@param fn_name: Callback function\n@type fn_name: function\n@param *args: arguments to be passed to the callback function\n@type *args: var args\n\n@return: 1 if registration was successful, 0 if not.\n@rtype: integer", "id": "f10334:m33"}
{"signature": "def returnElemCallback(retelem):", "body": "return retelem<EOL>", "docstring": "Callback for when a sheet appears.\n\n    Returns: element returned by observer callback", "id": "f10336:m1"}
{"signature": "def elemDisappearedCallback(retelem, obj, **kwargs):", "body": "return not obj.findFirstR(**kwargs)<EOL>", "docstring": "Callback for checking if a UI element is no longer onscreen.\n\n    kwargs should contains some unique set of identifier (e.g. title/value, role)\n    Returns:  Boolean", "id": "f10336:m0"}
{"signature": "def read(fname):", "body": "return open(os.path.join(os.path.dirname(__file__), fname)).read()<EOL>", "docstring": "Returns the contents of the specified file located in the same dir as\nthe script", "id": "f10338:m0"}
{"signature": "def get_tracking_clans(self, **params: keys):", "body": "url = self.api.CLAN + '<STR_LIT>'<EOL>return self._get_model(url, **params)<EOL>", "docstring": "Get a list of clans that are being\n        tracked by having either cr-api.com or\n        royaleapi.com in the description\n\n        Parameters\n        ----------\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*max: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*page: Optional[int] = None\n            Works with max, the zero-based page of the\n            items\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m25"}
{"signature": "@typecasted<EOL><INDENT>def get_clan_battles(self, *tags: crtag, **params: keys):<DEDENT>", "body": "url = self.api.CLAN + '<STR_LIT:/>' + '<STR_LIT:U+002C>'.join(tags) + '<STR_LIT>'<EOL>return self._get_model(url, **params)<EOL>", "docstring": "Get the battle log from everyone in the clan\n\n        Parameters\n        ----------\n        \\*tags: str\n            Valid player tags. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        \\*\\*type: str\n            Filters what kind of battles. Pick from:\n            :all:, :war:, :clanMate:\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*max: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*page: Optional[int] = None\n            Works with max, the zero-based page of the\n            items\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m27"}
{"signature": "@typecasted<EOL><INDENT>def get_full_tournaments(self, **params: tournamentfilter):<DEDENT>", "body": "url = self.api.TOURNAMENT + '<STR_LIT>'<EOL>return self._get_model(url, PartialTournament, **params)<EOL>", "docstring": "Get a list of tournaments that are full\n\n        \\*\\*1k: Optional[int] = 0\n            Set to 1 to filter tournaments that have\n            at least 1000 max players\n        \\*\\*open: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            open\n        \\*\\*inprep: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            in preperation\n        \\*\\*joinable: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            joinable\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*max: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*page: Optional[int] = None\n            Works with max, the zero-based page of the\n            items\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m45"}
{"signature": "@typecasted<EOL><INDENT>def get_top_players(self, country_key='<STR_LIT>', **params: keys):<DEDENT>", "body": "url = self.api.TOP + '<STR_LIT>' + str(country_key)<EOL>return self._get_model(url, PartialPlayerClan, **params)<EOL>", "docstring": "Get a list of top players\n\n        location_id: Optional[str] = ''\n            A location ID or '' (global)\n            See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n            for a list of acceptable location IDs\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*max: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*page: Optional[int] = None\n            Works with max, the zero-based page of the\n            items\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m35"}
{"signature": "@typecasted<EOL><INDENT>def get_clan_war_log(self, tag: crtag, **params: keys):<DEDENT>", "body": "url = self.api.CLAN + '<STR_LIT:/>' + tag + '<STR_LIT>'<EOL>return self._get_model(url, **params)<EOL>", "docstring": "Get a clan's war log\n\n        Parameters\n        ----------\n        \\*tags: str\n            Valid clan tags. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*max: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*page: Optional[int] = None\n            Works with max, the zero-based page of the\n            items\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m30"}
{"signature": "@typecasted<EOL><INDENT>def get_joinable_tournaments(self, **params: tournamentfilter):<DEDENT>", "body": "url = self.api.TOURNAMENT + '<STR_LIT>'<EOL>return self._get_model(url, PartialTournament, **params)<EOL>", "docstring": "Get a list of tournaments that are joinable\n\n        \\*\\*1k: Optional[int] = 0\n            Set to 1 to filter tournaments that have\n            at least 1000 max players\n        \\*\\*open: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            open\n        \\*\\*full: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            full\n        \\*\\*inprep: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            in preperation\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*max: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*page: Optional[int] = None\n            Works with max, the zero-based page of the\n            items\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m44"}
{"signature": "@typecasted<EOL><INDENT>def get_popular_players(self, **params: keys):<DEDENT>", "body": "url = self.api.POPULAR + '<STR_LIT>'<EOL>return self._get_model(url, PartialPlayerClan, **params)<EOL>", "docstring": "Get a list of most queried players\n\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*max: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*page: Optional[int] = None\n            Works with max, the zero-based page of the\n            items\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m37"}
{"signature": "@typecasted<EOL><INDENT>def get_player_verify(self, tag: crtag, apikey: str, **params: keys):<DEDENT>", "body": "url = self.api.PLAYER + '<STR_LIT:/>' + tag + '<STR_LIT>'<EOL>params.update({'<STR_LIT>': apikey})<EOL>return self._get_model(url, FullPlayer, **params)<EOL>", "docstring": "Check the API Key of a player.\n        This endpoint has been **restricted** to\n        certain members of the community\n\n        Parameters\n        ----------\n        tag: str\n            A valid tournament tag. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        apikey: str\n            The API Key in the player's settings\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m20"}
{"signature": "@typecasted<EOL><INDENT>def get_tournament(self, tag: crtag, **params: keys):<DEDENT>", "body": "url = self.api.TOURNAMENT + '<STR_LIT:/>' + tag<EOL>return self._get_model(url, **params)<EOL>", "docstring": "Get a tournament information\n\n        Parameters\n        ----------\n        tag: str\n            A valid tournament tag. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m31"}
{"signature": "def get_version(self):", "body": "return self._get_model(self.api.VERSION)<EOL>", "docstring": "Gets the version of RoyaleAPI. Returns a string", "id": "f10347:c0:m16"}
{"signature": "@typecasted<EOL><INDENT>def get_clan_war(self, tag: crtag, **params: keys):<DEDENT>", "body": "url = self.api.CLAN + '<STR_LIT:/>' + tag + '<STR_LIT>'<EOL>return self._get_model(url, **params)<EOL>", "docstring": "Get inforamtion about a clan's current clan war\n\n        Parameters\n        ----------\n        *tag: str\n            A valid clan tag. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m29"}
{"signature": "@typecasted<EOL><INDENT>def get_1k_tournaments(self, **params: tournamentfilter):<DEDENT>", "body": "url = self.api.TOURNAMENT + '<STR_LIT>'<EOL>return self._get_model(url, PartialTournament, **params)<EOL>", "docstring": "Get a list of tournaments that have at least 1000\n        max players\n\n        \\*\\*open: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            open\n        \\*\\*full: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            full\n        \\*\\*inprep: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            in preperation\n        \\*\\*joinable: Optional[int] = 0\n            Set to 1 to filter tournaments that are\n            joinable\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*max: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*page: Optional[int] = None\n            Works with max, the zero-based page of the\n            items\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m42"}
{"signature": "@typecasted<EOL><INDENT>def get_popular_tournaments(self, **params: keys):<DEDENT>", "body": "url = self.api.POPULAR + '<STR_LIT>'<EOL>return self._get_model(url, PartialTournament, **params)<EOL>", "docstring": "Get a list of most queried tournaments\n\n        \\*\\*keys: Optional[list] = None\n            Filter which keys should be included in the\n            response\n        \\*\\*exclude: Optional[list] = None\n            Filter which keys should be excluded from the\n            response\n        \\*\\*max: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*page: Optional[int] = None\n            Works with max, the zero-based page of the\n            items\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10347:c0:m38"}
{"signature": "def typecasted(func):", "body": "signature = inspect.signature(func).parameters.items()<EOL>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>args = list(args)<EOL>new_args = []<EOL>new_kwargs = {}<EOL>for _, param in signature:<EOL><INDENT>converter = param.annotation<EOL>if converter is inspect._empty:<EOL><INDENT>converter = lambda a: a  <EOL><DEDENT>if param.kind is param.POSITIONAL_OR_KEYWORD:<EOL><INDENT>if args:<EOL><INDENT>to_conv = args.pop(<NUM_LIT:0>)<EOL>new_args.append(converter(to_conv))<EOL><DEDENT><DEDENT>elif param.kind is param.VAR_POSITIONAL:<EOL><INDENT>for a in args:<EOL><INDENT>new_args.append(converter(a))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for k, v in kwargs.items():<EOL><INDENT>nk, nv = converter(k, v)<EOL>new_kwargs[nk] = nv<EOL><DEDENT><DEDENT><DEDENT>return func(*new_args, **new_kwargs)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator that converts arguments via annotations.", "id": "f10349:m0"}
{"signature": "def get_clan(self):", "body": "try:<EOL><INDENT>return self.client.get_clan(self.clan.tag)<EOL><DEDENT>except AttributeError:<EOL><INDENT>try:<EOL><INDENT>return self.client.get_clan(self.tag)<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "(a)sync function to return clan.", "id": "f10350:c3:m0"}
{"signature": "@classmethod<EOL><INDENT>def Async(cls, token, session=None, **options):<DEDENT>", "body": "return cls(token, session=session, is_async=True, **options)<EOL>", "docstring": "Returns the client in async mode.", "id": "f10352:c0:m2"}
{"signature": "@typecasted<EOL><INDENT>def search_clans(self, **params: clansearch):<DEDENT>", "body": "url = self.api.CLAN<EOL>return self._get_model(url, PartialClan, **params)<EOL>", "docstring": "Search for a clan. At least one\n        of the filters must be present\n\n        Parameters\n        ----------\n        name: Optional[str]\n            The name of a clan\n            (has to be at least 3 characters long)\n        locationId: Optional[int]\n            A location ID\n        minMembers: Optional[int]\n            The minimum member count\n            of a clan\n        maxMembers: Optional[int]\n            The maximum member count\n            of a clan\n        minScore: Optional[int]\n            The minimum trophy score of\n            a clan\n        \\*\\*limit: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m21"}
{"signature": "@typecasted<EOL><INDENT>def get_player(self, tag: crtag, timeout=None):<DEDENT>", "body": "url = self.api.PLAYER + '<STR_LIT:/>' + tag<EOL>return self._get_model(url, FullPlayer, timeout=timeout)<EOL>", "docstring": "Get information about a player\n\n        Parameters\n        ----------\n        tag: str\n            A valid tournament tag. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m16"}
{"signature": "@typecasted<EOL><INDENT>def get_clan_war_log(self, tag: crtag, **params: keys):<DEDENT>", "body": "url = self.api.CLAN + '<STR_LIT:/>' + tag + '<STR_LIT>'<EOL>return self._get_model(url, **params)<EOL>", "docstring": "Get a clan's war log\n\n        Parameters\n        ----------\n        tag: str\n            A valid tournament tag. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        \\*\\*limit: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m24"}
{"signature": "def get_datetime(self, timestamp: str, unix=True):", "body": "time = datetime.strptime(timestamp, '<STR_LIT>')<EOL>if unix:<EOL><INDENT>return int(time.timestamp())<EOL><DEDENT>else:<EOL><INDENT>return time<EOL><DEDENT>", "docstring": "Converts a %Y%m%dT%H%M%S.%fZ to a UNIX timestamp\n        or a datetime.datetime object\n\n        Parameters\n        ---------\n        timestamp: str\n            A timstamp in the %Y%m%dT%H%M%S.%fZ format, usually returned by the API\n            in the ``created_time`` field for example (eg. 20180718T145906.000Z)\n        unix: Optional[bool] = True\n            Whether to return a POSIX timestamp (seconds since epoch) or not\n\n        Returns int or datetime.datetime", "id": "f10352:c0:m39"}
{"signature": "@typecasted<EOL><INDENT>def get_player_chests(self, tag: crtag, timeout: int=None):<DEDENT>", "body": "url = self.api.PLAYER + '<STR_LIT:/>' + tag + '<STR_LIT>'<EOL>return self._get_model(url, timeout=timeout)<EOL>", "docstring": "Get information about a player's chest cycle\n\n        Parameters\n        ----------\n        tag: str\n            A valid tournament tag. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m19"}
{"signature": "@typecasted<EOL><INDENT>def get_tournament(self, tag: crtag, timeout=<NUM_LIT:0>):<DEDENT>", "body": "url = self.api.TOURNAMENT + '<STR_LIT:/>' + tag<EOL>return self._get_model(url, PartialTournament, timeout=timeout)<EOL>", "docstring": "Get a tournament information\n\n        Parameters\n        ----------\n        tag: str\n            A valid tournament tag. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m25"}
{"signature": "@typecasted<EOL><INDENT>def get_player_battles(self, tag: crtag, **params: keys):<DEDENT>", "body": "url = self.api.PLAYER + '<STR_LIT:/>' + tag + '<STR_LIT>'<EOL>return self._get_model(url, **params)<EOL>", "docstring": "Get a player's battle log\n\n        Parameters\n        ----------\n        tag: str\n            A valid tournament tag. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        \\*\\*limit: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m18"}
{"signature": "@typecasted<EOL><INDENT>def get_top_clanwar_clans(self, location_id='<STR_LIT>', **params: keys):<DEDENT>", "body": "url = self.api.LOCATIONS + '<STR_LIT:/>' + str(location_id) + '<STR_LIT>'<EOL>return self._get_model(url, PartialClan, **params)<EOL>", "docstring": "Get a list of top clan war clans\n\n        Parameters\n        ----------\n        location_id: Optional[str] = 'global'\n            A location ID or global\n            See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n            for a list of acceptable location IDs\n        \\*\\*limit: Optional[int] = None\n            Limit the number of items returned in the response\n        \\*\\*timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m32"}
{"signature": "@typecasted<EOL><INDENT>def get_location(self, location_id: int, timeout: int=None):<DEDENT>", "body": "url = self.api.LOCATIONS + '<STR_LIT:/>' + str(location_id)<EOL>return self._get_model(url, timeout=timeout)<EOL>", "docstring": "Get a location information\n\n        Parameters\n        ----------\n        location_id: int\n            A location ID\n            See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json\n            for a list of acceptable location IDs\n        timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m30"}
{"signature": "@typecasted<EOL><INDENT>def get_player_verify(self, tag: crtag, apikey: str, timeout=None):<DEDENT>", "body": "url = self.api.PLAYER + '<STR_LIT:/>' + tag + '<STR_LIT>'<EOL>return self._get_model(url, FullPlayer, timeout=timeout, method='<STR_LIT:POST>', json={'<STR_LIT>': apikey})<EOL>", "docstring": "Check the API Key of a player.\n        This endpoint has been **restricted** to\n        certain members of the community\n\n        Raises BadRequest if the apikey is invalid\n\n        Parameters\n        ----------\n        tag: str\n            A valid tournament tag. Minimum length: 3\n            Valid characters: 0289PYLQGRJCUV\n        apikey: str\n            The API Key in the player's settings\n        timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m17"}
{"signature": "def get_deck_link(self, deck: BaseAttrDict):", "body": "deck_link = '<STR_LIT>'<EOL>for i in deck:<EOL><INDENT>card = self.get_card_info(i.name)<EOL>deck_link += '<STR_LIT>'.format(card)<EOL><DEDENT>return deck_link<EOL>", "docstring": "Form a deck link\n\n        Parameters\n        ---------\n        deck: official_api.models.BaseAttrDict\n            An object is a deck. Can be retrieved from ``Player.current_deck``\n\n        Returns str", "id": "f10352:c0:m38"}
{"signature": "def get_arena_image(self, obj: BaseAttrDict):", "body": "badge_id = obj.arena.id<EOL>for i in self.constants.arenas:<EOL><INDENT>if i.id == badge_id:<EOL><INDENT>return '<STR_LIT>'.format(i.arena_id)<EOL><DEDENT><DEDENT>", "docstring": "Get the arena image URL\n\n        Parameters\n        ---------\n        obj: official_api.models.BaseAttrDict\n            An object that has the arena ID in ``.arena.id``\n            Can be ``Profile`` for example.\n\n        Returns None or str", "id": "f10352:c0:m35"}
{"signature": "@typecasted<EOL><INDENT>def get_all_locations(self, timeout: int=None):<DEDENT>", "body": "url = self.api.LOCATIONS<EOL>return self._get_model(url, timeout=timeout)<EOL>", "docstring": "Get a list of all locations\n\n        Parameters\n        ----------\n        timeout: Optional[int] = None\n            Custom timeout that overwrites Client.timeout", "id": "f10352:c0:m29"}
{"signature": "def get_clan(self):", "body": "try:<EOL><INDENT>return self.client.get_clan(self.clan.tag)<EOL><DEDENT>except AttributeError:<EOL><INDENT>try:<EOL><INDENT>return self.client.get_clan(self.tag)<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "(a)sync function to return clan.", "id": "f10354:c3:m0"}
{"signature": "def get_player(self):", "body": "return self.client.get_player(self.tag)<EOL>", "docstring": "(a)sync function to return player.", "id": "f10354:c5:m0"}
{"signature": "def models_preparing(app):", "body": "def wrapper(resource, parent):<EOL><INDENT>if isinstance(resource, DeclarativeMeta):<EOL><INDENT>resource = ListResource(resource)<EOL><DEDENT>if not getattr(resource, '<STR_LIT>', None):<EOL><INDENT>resource.__parent__ = parent<EOL><DEDENT>return resource<EOL><DEDENT>resources_preparing_factory(app, wrapper)<EOL>", "docstring": "Wrap all sqlalchemy model in settings.", "id": "f10359:m0"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "pass<EOL>", "docstring": "Mock constructor to accept the query_config parameters.", "id": "f10384:c0:m0"}
{"signature": "def date_range(start_date, end_date):", "body": "if start_date >= end_date:<EOL><INDENT>for n in range((start_date - end_date).days + <NUM_LIT:1>):<EOL><INDENT>yield end_date + datetime.timedelta(n)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for n in range((end_date - start_date).days + <NUM_LIT:1>):<EOL><INDENT>yield start_date + datetime.timedelta(n)<EOL><DEDENT><DEDENT>", "docstring": "Get all dates in a given range.", "id": "f10384:m4"}
{"signature": "def mock_iter_entry_points_factory(data, mocked_group):", "body": "from pkg_resources import iter_entry_points<EOL>def entrypoints(group, name=None):<EOL><INDENT>if group == mocked_group:<EOL><INDENT>for entrypoint in data:<EOL><INDENT>yield entrypoint<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for x in iter_entry_points(group=group, name=name):<EOL><INDENT>yield x<EOL><DEDENT><DEDENT><DEDENT>return entrypoints<EOL>", "docstring": "Create a mock iter_entry_points function.", "id": "f10384:m0"}
{"signature": "def build_file_unique_id(doc):", "body": "doc['<STR_LIT>'] = '<STR_LIT>'.format(doc['<STR_LIT>'], doc['<STR_LIT>'])<EOL>return doc<EOL>", "docstring": "Build file unique identifier.", "id": "f10392:m1"}
{"signature": "def file_download_event_builder(event, sender_app, obj=None, **kwargs):", "body": "event.update(dict(<EOL>timestamp=datetime.datetime.utcnow().isoformat(),<EOL>bucket_id=str(obj.bucket_id),<EOL>file_id=str(obj.file_id),<EOL>file_key=obj.key,<EOL>size=obj.file.size,<EOL>referrer=request.referrer,<EOL>**get_user()<EOL>))<EOL>return event<EOL>", "docstring": "Build a file-download event.", "id": "f10392:m0"}
{"signature": "def build_record_unique_id(doc):", "body": "doc['<STR_LIT>'] = '<STR_LIT>'.format(doc['<STR_LIT>'], doc['<STR_LIT>'])<EOL>return doc<EOL>", "docstring": "Build record unique identifier.", "id": "f10392:m2"}
{"signature": "def register_queries():", "body": "return [<EOL>dict(<EOL>query_name='<STR_LIT>',<EOL>query_class=ESDateHistogramQuery,<EOL>query_config=dict(<EOL>index='<STR_LIT>',<EOL>doc_type='<STR_LIT>',<EOL>copy_fields=dict(<EOL>bucket_id='<STR_LIT>',<EOL>file_key='<STR_LIT>',<EOL>),<EOL>required_filters=dict(<EOL>bucket_id='<STR_LIT>',<EOL>file_key='<STR_LIT>',<EOL>)<EOL>)<EOL>),<EOL>dict(<EOL>query_name='<STR_LIT>',<EOL>query_class=ESTermsQuery,<EOL>query_config=dict(<EOL>index='<STR_LIT>',<EOL>doc_type='<STR_LIT>',<EOL>copy_fields=dict(<EOL>),<EOL>required_filters=dict(<EOL>bucket_id='<STR_LIT>',<EOL>),<EOL>aggregated_fields=['<STR_LIT>']<EOL>)<EOL>),<EOL>]<EOL>", "docstring": "Register queries.", "id": "f10394:m2"}
{"signature": "def register_aggregations():", "body": "return [dict(<EOL>aggregation_name='<STR_LIT>',<EOL>templates='<STR_LIT>',<EOL>aggregator_class=StatAggregator,<EOL>aggregator_config=dict(<EOL>client=current_search_client,<EOL>event='<STR_LIT>',<EOL>aggregation_field='<STR_LIT>',<EOL>aggregation_interval='<STR_LIT>',<EOL>copy_fields=dict(<EOL>file_key='<STR_LIT>',<EOL>bucket_id='<STR_LIT>',<EOL>file_id='<STR_LIT>',<EOL>),<EOL>metric_aggregation_fields={<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>',<EOL>{'<STR_LIT>': <NUM_LIT:1000>}),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT:size>', {}),<EOL>},<EOL>)), dict(<EOL>aggregation_name='<STR_LIT>',<EOL>templates='<STR_LIT>',<EOL>aggregator_class=StatAggregator,<EOL>aggregator_config=dict(<EOL>client=current_search_client,<EOL>event='<STR_LIT>',<EOL>aggregation_field='<STR_LIT>',<EOL>aggregation_interval='<STR_LIT>',<EOL>copy_fields=dict(<EOL>record_id='<STR_LIT>',<EOL>pid_type='<STR_LIT>',<EOL>pid_value='<STR_LIT>',<EOL>),<EOL>metric_aggregation_fields={<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>',<EOL>{'<STR_LIT>': <NUM_LIT:1000>}),<EOL>},<EOL>))]<EOL>", "docstring": "Register sample aggregations.", "id": "f10394:m1"}
{"signature": "def filter_robots(query):", "body": "return query.filter('<STR_LIT>', is_robot=False)<EOL>", "docstring": "Modify an elasticsearch query so that robot events are filtered out.", "id": "f10402:m0"}
{"signature": "@property<EOL><INDENT>def aggregation_doc_type(self):<DEDENT>", "body": "return '<STR_LIT>'.format(<EOL>self.event, self.aggregation_interval)<EOL>", "docstring": "Get document type for the aggregation.", "id": "f10402:c0:m2"}
{"signature": "def _format_range_dt(self, d):", "body": "if not isinstance(d, six.string_types):<EOL><INDENT>d = d.isoformat()<EOL><DEDENT>return '<STR_LIT>'.format(<EOL>d, self.dt_rounding_map[self.aggregation_interval])<EOL>", "docstring": "Format range filter datetime to the closest aggregation interval.", "id": "f10402:c0:m6"}
{"signature": "def agg_iter(self, lower_limit=None, upper_limit=None):", "body": "lower_limit = lower_limit or self.get_bookmark().isoformat()<EOL>upper_limit = upper_limit or (<EOL>datetime.datetime.utcnow().replace(microsecond=<NUM_LIT:0>).isoformat())<EOL>aggregation_data = {}<EOL>self.agg_query = Search(using=self.client,<EOL>index=self.event_index).filter('<STR_LIT>', timestamp={<EOL>'<STR_LIT>': self._format_range_dt(lower_limit),<EOL>'<STR_LIT>': self._format_range_dt(upper_limit)})<EOL>for modifier in self.query_modifiers:<EOL><INDENT>self.agg_query = modifier(self.agg_query)<EOL><DEDENT>hist = self.agg_query.aggs.bucket(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>field='<STR_LIT>',<EOL>interval=self.aggregation_interval<EOL>)<EOL>terms = hist.bucket(<EOL>'<STR_LIT>', '<STR_LIT>', field=self.aggregation_field, size=<NUM_LIT:0><EOL>)<EOL>top = terms.metric(<EOL>'<STR_LIT>', '<STR_LIT>', size=<NUM_LIT:1>, sort={'<STR_LIT>': '<STR_LIT>'}<EOL>)<EOL>for dst, (metric, src, opts) in self.metric_aggregation_fields.items():<EOL><INDENT>terms.metric(dst, metric, field=src, **opts)<EOL><DEDENT>results = self.agg_query.execute()<EOL>index_name = None<EOL>for interval in results.aggregations['<STR_LIT>'].buckets:<EOL><INDENT>interval_date = datetime.datetime.strptime(<EOL>interval['<STR_LIT>'], '<STR_LIT>')<EOL>for aggregation in interval['<STR_LIT>'].buckets:<EOL><INDENT>aggregation_data['<STR_LIT>'] = interval_date.isoformat()<EOL>aggregation_data[self.aggregation_field] = aggregation['<STR_LIT:key>']<EOL>aggregation_data['<STR_LIT:count>'] = aggregation['<STR_LIT>']<EOL>if self.metric_aggregation_fields:<EOL><INDENT>for f in self.metric_aggregation_fields:<EOL><INDENT>aggregation_data[f] = aggregation[f]['<STR_LIT:value>']<EOL><DEDENT><DEDENT>doc = aggregation.top_hit.hits.hits[<NUM_LIT:0>]['<STR_LIT>']<EOL>for destination, source in self.copy_fields.items():<EOL><INDENT>if isinstance(source, six.string_types):<EOL><INDENT>aggregation_data[destination] = doc[source]<EOL><DEDENT>else:<EOL><INDENT>aggregation_data[destination] = source(<EOL>doc,<EOL>aggregation_data<EOL>)<EOL><DEDENT><DEDENT>index_name = '<STR_LIT>'.format(self.event,<EOL>interval_date.strftime(<EOL>self.index_name_suffix))<EOL>self.indices.add(index_name)<EOL>yield dict(_id='<STR_LIT>'.<EOL>format(aggregation['<STR_LIT:key>'],<EOL>interval_date.strftime(<EOL>self.doc_id_suffix)),<EOL>_index=index_name,<EOL>_type=self.aggregation_doc_type,<EOL>_source=aggregation_data)<EOL><DEDENT><DEDENT>self.last_index_written = index_name<EOL>", "docstring": "Aggregate and return dictionary to be indexed in ES.", "id": "f10402:c0:m7"}
{"signature": "def delete(self, start_date=None, end_date=None):", "body": "aggs_query = Search(<EOL>using=self.client,<EOL>index=self.aggregation_alias,<EOL>doc_type=self.aggregation_doc_type<EOL>).extra(_source=False)<EOL>range_args = {}<EOL>if start_date:<EOL><INDENT>range_args['<STR_LIT>'] = self._format_range_dt(<EOL>start_date.replace(microsecond=<NUM_LIT:0>))<EOL><DEDENT>if end_date:<EOL><INDENT>range_args['<STR_LIT>'] = self._format_range_dt(<EOL>end_date.replace(microsecond=<NUM_LIT:0>))<EOL><DEDENT>if range_args:<EOL><INDENT>aggs_query = aggs_query.filter('<STR_LIT>', timestamp=range_args)<EOL><DEDENT>bookmarks_query = Search(<EOL>using=self.client,<EOL>index=self.aggregation_alias,<EOL>doc_type=self.bookmark_doc_type<EOL>).sort({'<STR_LIT:date>': {'<STR_LIT>': '<STR_LIT>'}})<EOL>if range_args:<EOL><INDENT>bookmarks_query = bookmarks_query.filter('<STR_LIT>', date=range_args)<EOL><DEDENT>def _delete_actions():<EOL><INDENT>for query in (aggs_query, bookmarks_query):<EOL><INDENT>affected_indices = set()<EOL>for doc in query.scan():<EOL><INDENT>affected_indices.add(doc.meta.index)<EOL>yield dict(_index=doc.meta.index,<EOL>_op_type='<STR_LIT>',<EOL>_id=doc.meta.id,<EOL>_type=doc.meta.doc_type)<EOL><DEDENT>current_search_client.indices.flush(<EOL>index='<STR_LIT:U+002C>'.join(affected_indices), wait_if_ongoing=True)<EOL><DEDENT><DEDENT>bulk(self.client, _delete_actions(), refresh=True)<EOL>", "docstring": "Delete aggregation documents.", "id": "f10402:c0:m10"}
{"signature": "def set_bookmark(self):", "body": "def _success_date():<EOL><INDENT>bookmark = {<EOL>'<STR_LIT:date>': self.new_bookmark or datetime.datetime.utcnow().<EOL>strftime(self.doc_id_suffix)<EOL>}<EOL>yield dict(_index=self.last_index_written,<EOL>_type=self.bookmark_doc_type,<EOL>_source=bookmark)<EOL><DEDENT>if self.last_index_written:<EOL><INDENT>bulk(self.client,<EOL>_success_date(),<EOL>stats_only=True)<EOL><DEDENT>", "docstring": "Set bookmark for starting next aggregation.", "id": "f10402:c0:m5"}
{"signature": "def list_bookmarks(self, start_date=None, end_date=None, limit=None):", "body": "query = Search(<EOL>using=self.client,<EOL>index=self.aggregation_alias,<EOL>doc_type=self.bookmark_doc_type<EOL>).sort({'<STR_LIT:date>': {'<STR_LIT>': '<STR_LIT>'}})<EOL>range_args = {}<EOL>if start_date:<EOL><INDENT>range_args['<STR_LIT>'] = self._format_range_dt(<EOL>start_date.replace(microsecond=<NUM_LIT:0>))<EOL><DEDENT>if end_date:<EOL><INDENT>range_args['<STR_LIT>'] = self._format_range_dt(<EOL>end_date.replace(microsecond=<NUM_LIT:0>))<EOL><DEDENT>if range_args:<EOL><INDENT>query = query.filter('<STR_LIT>', date=range_args)<EOL><DEDENT>return query[<NUM_LIT:0>:limit].execute() if limit else query.scan()<EOL>", "docstring": "List the aggregation's bookmarks.", "id": "f10402:c0:m9"}
{"signature": "@aggregations.command('<STR_LIT>')<EOL>@aggr_arg<EOL>@click.option('<STR_LIT>', callback=_parse_date)<EOL>@click.option('<STR_LIT>', callback=_parse_date)<EOL>@click.confirmation_option(<EOL>prompt='<STR_LIT>')<EOL>@with_appcontext<EOL>def _aggregations_delete(aggregation_types=None,<EOL>start_date=None, end_date=None):", "body": "aggregation_types = (aggregation_types or<EOL>list(current_stats.enabled_aggregations))<EOL>for a in aggregation_types:<EOL><INDENT>aggr_cfg = current_stats.aggregations[a]<EOL>aggregator = aggr_cfg.aggregator_class(<EOL>name=aggr_cfg.name, **aggr_cfg.aggregator_config)<EOL>aggregator.delete(start_date, end_date)<EOL><DEDENT>", "docstring": "Delete computed aggregations.", "id": "f10404:m10"}
{"signature": "@stats.group()<EOL>def aggregations():", "body": "", "docstring": "Aggregation management commands.", "id": "f10404:m8"}
{"signature": "@events.command('<STR_LIT>')<EOL>@click.argument('<STR_LIT>', nargs=-<NUM_LIT:1>, callback=_validate_event_type)<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True)<EOL>@with_appcontext<EOL>def _events_process(event_types=None, eager=False):", "body": "event_types = event_types or list(current_stats.enabled_events)<EOL>if eager:<EOL><INDENT>process_events.apply((event_types,), throw=True)<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>process_events.delay(event_types)<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>')<EOL><DEDENT>", "docstring": "Process stats events.", "id": "f10404:m7"}
{"signature": "def __init__(self, app=None, **kwargs):", "body": "if app:<EOL><INDENT>self.init_app(app, **kwargs)<EOL><DEDENT>", "docstring": "Extension initialization.", "id": "f10405:c1:m0"}
{"signature": "@cached_property<EOL><INDENT>def permission_factory(self):<DEDENT>", "body": "return load_or_import_from_config(<EOL>'<STR_LIT>', app=self.app<EOL>)<EOL>", "docstring": "Load default permission factory for Buckets collections.", "id": "f10405:c0:m7"}
{"signature": "def consume(self, event_type, no_ack=True, payload=True):", "body": "assert event_type in self.events<EOL>return current_queues.queues['<STR_LIT>'.format(event_type)].consume(<EOL>payload=payload)<EOL>", "docstring": "Comsume all pending events.", "id": "f10405:c0:m9"}
{"signature": "@cached_property<EOL><INDENT>def _aggregations_config(self):<DEDENT>", "body": "result = {}<EOL>for ep in iter_entry_points(<EOL>group=self.entry_point_group_aggs):<EOL><INDENT>for cfg in ep.load()():<EOL><INDENT>if cfg['<STR_LIT>'] not in self.enabled_aggregations:<EOL><INDENT>continue<EOL><DEDENT>elif cfg['<STR_LIT>'] in result:<EOL><INDENT>raise DuplicateAggregationError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(cfg['<STR_LIT>'], ep.name))<EOL><DEDENT>cfg.update(<EOL>self.enabled_aggregations[cfg['<STR_LIT>']] or {}<EOL>)<EOL>result[cfg['<STR_LIT>']] = cfg<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Load aggregation configurations.", "id": "f10405:c0:m3"}
{"signature": "def init_config(self, app):", "body": "for k in dir(config):<EOL><INDENT>if k.startswith('<STR_LIT>'):<EOL><INDENT>app.config.setdefault(k, getattr(config, k))<EOL><DEDENT><DEDENT>", "docstring": "Initialize configuration.", "id": "f10405:c1:m2"}
{"signature": "def publish(self, event_type, events):", "body": "assert event_type in self.events<EOL>current_queues.queues['<STR_LIT>'.format(event_type)].publish(events)<EOL>", "docstring": "Publish events.", "id": "f10405:c0:m8"}
{"signature": "def validate_arguments(self, start_date, end_date, **kwargs):", "body": "if set(kwargs) < set(self.required_filters):<EOL><INDENT>raise InvalidRequestInputError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(set(self.required_filters.keys()),<EOL>self.query_name)<EOL>)<EOL><DEDENT>", "docstring": "Validate query arguments.", "id": "f10407:c2:m1"}
{"signature": "def __init__(self, query_name, doc_type, index, client=None,<EOL>*args, **kwargs):", "body": "super(ESQuery, self).__init__()<EOL>self.index = index<EOL>self.client = client or current_search_client<EOL>self.query_name = query_name<EOL>self.doc_type = doc_type<EOL>", "docstring": "Constructor.\n\n        :param doc_type: queried document type.\n        :param index: queried index.\n        :param client: elasticsearch client used to query.", "id": "f10407:c0:m0"}
{"signature": "def __init__(self, time_field='<STR_LIT>', copy_fields=None,<EOL>query_modifiers=None, required_filters=None,<EOL>metric_fields=None, *args, **kwargs):", "body": "super(ESDateHistogramQuery, self).__init__(*args, **kwargs)<EOL>self.time_field = time_field<EOL>self.copy_fields = copy_fields or {}<EOL>self.query_modifiers = query_modifiers or []<EOL>self.required_filters = required_filters or {}<EOL>self.metric_fields = metric_fields or {'<STR_LIT:value>': ('<STR_LIT>', '<STR_LIT:count>', {})}<EOL>self.allowed_metrics = {<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}<EOL>if any(v not in self.allowed_metrics<EOL>for k, (v, _, _) in (self.metric_fields or {}).items()):<EOL><INDENT>raise(ValueError('<STR_LIT>'<EOL>.format('<STR_LIT:U+002CU+0020>'.join(self.allowed_metrics))))<EOL><DEDENT>", "docstring": "Constructor.\n\n        :param time_field: name of the timestamp field.\n        :param copy_fields: list of fields to copy from the top hit document\n            into the resulting aggregation.\n        :param query_modifiers: List of functions accepting a ``query`` and\n            ``**kwargs`` (same as provided to the ``run`` method), that will\n            be applied to the aggregation query.\n        :param required_filters: Dict of \"mandatory query parameter\" ->\n            \"filtered field\".\n        :param metric_fields: Dict of \"destination field\" ->\n            tuple(\"metric type\", \"source field\", \"metric_options\").", "id": "f10407:c1:m0"}
{"signature": "def hash_id(iso_timestamp, msg):", "body": "return '<STR_LIT>'.format(iso_timestamp,<EOL>hashlib.sha1(<EOL>msg.get('<STR_LIT>').encode('<STR_LIT:utf-8>') +<EOL>str(msg.get('<STR_LIT>')).<EOL>encode('<STR_LIT:utf-8>')).<EOL>hexdigest())<EOL>", "docstring": "Generate event id, optimized for ES.", "id": "f10409:m3"}
{"signature": "def __init__(self, queue, prefix='<STR_LIT>', suffix='<STR_LIT>', client=None,<EOL>preprocessors=None, double_click_window=<NUM_LIT:10>):", "body": "self.queue = queue<EOL>self.client = client or current_search_client<EOL>self.doctype = queue.routing_key<EOL>self.index = '<STR_LIT>'.format(prefix, self.queue.routing_key)<EOL>self.suffix = suffix<EOL>self.preprocessors = [<EOL>obj_or_import_string(preproc) for preproc in preprocessors<EOL>] if preprocessors is not None else self.default_preprocessors<EOL>self.double_click_window = double_click_window<EOL>", "docstring": "Initialize indexer.\n\n        :param prefix: prefix appended to elasticsearch indices' name.\n        :param suffix: suffix appended to elasticsearch indices' name.\n        :param double_click_window: time window during which similar events are\n            deduplicated (counted as one occurence).\n        :param client: elasticsearch client.\n        :param preprocessors: a list of functions which are called on every\n            event before it is indexed. Each function should return the\n            processed event. If it returns None, the event is filtered and\n            won't be indexed.", "id": "f10409:c0:m0"}
{"signature": "def flag_robots(doc):", "body": "doc['<STR_LIT>'] = '<STR_LIT>' in doc and is_robot(doc['<STR_LIT>'])<EOL>return doc<EOL>", "docstring": "Flag events which are created by robots.\n\n    The list of robots is defined by the `COUNTER-robots Python package\n    <https://github.com/inveniosoftware/counter-robots>`_ , which follows the\n    `list defined by Project COUNTER\n    <https://www.projectcounter.org/appendices/850-2/>`_ that was later split\n    into robots and machines by `the Make Data Count project\n    <https://github.com/CDLUC3/Make-Data-Count/tree/master/user-agents>`_.", "id": "f10409:m1"}
{"signature": "def actionsiter(self):", "body": "for msg in self.queue.consume():<EOL><INDENT>try:<EOL><INDENT>for preproc in self.preprocessors:<EOL><INDENT>msg = preproc(msg)<EOL>if msg is None:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if msg is None:<EOL><INDENT>continue<EOL><DEDENT>suffix = arrow.get(msg.get('<STR_LIT>')).strftime(self.suffix)<EOL>ts = parser.parse(msg.get('<STR_LIT>'))<EOL>ts = ts.replace(microsecond=<NUM_LIT:0>)<EOL>msg['<STR_LIT>'] = ts.isoformat()<EOL>if self.double_click_window > <NUM_LIT:0>:<EOL><INDENT>timestamp = mktime(utc.localize(ts).utctimetuple())<EOL>ts = ts.fromtimestamp(<EOL>timestamp // self.double_click_window *<EOL>self.double_click_window<EOL>)<EOL><DEDENT>yield dict(<EOL>_id=hash_id(ts.isoformat(), msg),<EOL>_op_type='<STR_LIT:index>',<EOL>_index='<STR_LIT>'.format(self.index, suffix),<EOL>_type=self.doctype,<EOL>_source=msg,<EOL>)<EOL><DEDENT>except Exception:<EOL><INDENT>current_app.logger.exception(u'<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Iterator.", "id": "f10409:c0:m1"}
{"signature": "@shared_task<EOL>def process_events(event_types):", "body": "results = []<EOL>for e in event_types:<EOL><INDENT>processor = current_stats.events[e].processor_class(<EOL>**current_stats.events[e].processor_config)<EOL>results.append((e, processor.run()))<EOL><DEDENT>return results<EOL>", "docstring": "Index statistics events.", "id": "f10411:m0"}
{"signature": "def get_user():", "body": "return dict(<EOL>ip_address=request.remote_addr,<EOL>user_agent=request.user_agent.string,<EOL>user_id=(<EOL>current_user.get_id() if current_user.is_authenticated else None<EOL>),<EOL>session_id=session.get('<STR_LIT>')<EOL>)<EOL>", "docstring": "User information.\n\n    .. note::\n\n       **Privacy note** A users IP address, user agent string, and user id\n       (if logged in) is sent to a message queue, where it is stored for about\n       5 minutes. The information is used to:\n\n       - Detect robot visits from the user agent string.\n       - Generate an anonymized visitor id (using a random salt per day).\n       - Detect the users host contry based on the IP address.\n\n       The information is then discarded.", "id": "f10412:m2"}
{"signature": "def default_permission_factory(query_name, params):", "body": "from invenio_stats import current_stats<EOL>if current_stats.queries[query_name].permission_factory is None:<EOL><INDENT>return AllowAllPermission<EOL><DEDENT>else:<EOL><INDENT>return current_stats.queries[query_name].permission_factory(<EOL>query_name, params<EOL>)<EOL><DEDENT>", "docstring": "Default permission factory.\n\n    It enables by default the statistics if they don't have a dedicated\n    permission factory.", "id": "f10412:m5"}
{"signature": "def load_or_import_from_config(key, app=None, default=None):", "body": "app = app or current_app<EOL>imp = app.config.get(key)<EOL>return obj_or_import_string(imp, default=default)<EOL>", "docstring": "Load or import value from config.\n\n    :returns: The loaded value.", "id": "f10412:m4"}
{"signature": "@classmethod<EOL><INDENT>def condense_ranges(cls, ranges):<DEDENT>", "body": "result = []<EOL>if ranges:<EOL><INDENT>ranges.sort(key=lambda tup: tup[<NUM_LIT:0>])<EOL>result.append(ranges[<NUM_LIT:0>])<EOL>for i in range(<NUM_LIT:1>, len(ranges)):<EOL><INDENT>if result[-<NUM_LIT:1>][<NUM_LIT:1>] + <NUM_LIT:1> >= ranges[i][<NUM_LIT:0>]:<EOL><INDENT>result[-<NUM_LIT:1>] = (result[-<NUM_LIT:1>][<NUM_LIT:0>], max(result[-<NUM_LIT:1>][<NUM_LIT:1>], ranges[i][<NUM_LIT:1>]))<EOL><DEDENT>else:<EOL><INDENT>result.append(ranges[i])<EOL><DEDENT><DEDENT><DEDENT>return result<EOL>", "docstring": "Sorts and removes overlaps", "id": "f10417:c1:m4"}
{"signature": "@classmethod<EOL><INDENT>def parse_byteranges(cls, environ):<DEDENT>", "body": "r = []<EOL>s = environ.get(cls.header_range, '<STR_LIT>').replace('<STR_LIT:U+0020>','<STR_LIT>').lower()<EOL>if s:<EOL><INDENT>l = s.split('<STR_LIT:=>')<EOL>if len(l) == <NUM_LIT:2>:<EOL><INDENT>unit, vals = tuple(l)<EOL>if unit == '<STR_LIT>' and vals:<EOL><INDENT>gen_rng = ( tuple(rng.split('<STR_LIT:->')) for rng in vals.split('<STR_LIT:U+002C>') if '<STR_LIT:->' in rng )<EOL>for start, end in gen_rng:<EOL><INDENT>if start or end:<EOL><INDENT>r.append( (int(start) if start else None, int(end) if end else None) )<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return r<EOL>", "docstring": "Outputs a list of tuples with ranges or the empty list\nAccording to the rfc, start or end values can be omitted", "id": "f10417:c1:m1"}
{"signature": "@classmethod<EOL><INDENT>def check_ranges(cls, ranges, length):<DEDENT>", "body": "result = []<EOL>for start, end in ranges:<EOL><INDENT>if isinstance(start, int) or isinstance(end, int):<EOL><INDENT>if isinstance(start, int) and not (<NUM_LIT:0> <= start < length):<EOL><INDENT>continue<EOL><DEDENT>elif isinstance(start, int) and isinstance(end, int) and not (start <= end):<EOL><INDENT>continue<EOL><DEDENT>elif start is None and end == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>result.append( (start,end) )<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Removes errored ranges", "id": "f10417:c1:m2"}
{"signature": "def _parse_frequencies(self):", "body": "frequencies = OrderedDict([<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>')])<EOL>pref_freq = '<STR_LIT>'<EOL>for source in frequencies.keys():<EOL><INDENT>freq_key = '<STR_LIT>' + source<EOL>if freq_key in self.info:<EOL><INDENT>frequencies[source] = self.info[freq_key]<EOL>if pref_freq == '<STR_LIT>':<EOL><INDENT>pref_freq = frequencies[source]<EOL><DEDENT><DEDENT><DEDENT>return pref_freq, frequencies<EOL>", "docstring": "Parse frequency data in ClinVar VCF", "id": "f10419:c1:m2"}
{"signature": "def as_dict(self, *args, **kwargs):", "body": "self_as_dict = super(ClinVarAllele, self).as_dict(*args, **kwargs)<EOL>self_as_dict['<STR_LIT>'] = self.hgvs<EOL>self_as_dict['<STR_LIT>'] = self.clnalleleid<EOL>self_as_dict['<STR_LIT>'] = self.clnsig<EOL>self_as_dict['<STR_LIT>'] = self.clndn<EOL>self_as_dict['<STR_LIT>'] = self.clndisdb<EOL>self_as_dict['<STR_LIT>'] = self.clnvi<EOL>return self_as_dict<EOL>", "docstring": "Return ClinVarAllele data as dict object.", "id": "f10419:c0:m1"}
{"signature": "def as_dict(self):", "body": "self_as_dict = dict()<EOL>self_as_dict['<STR_LIT>'] = self.sequence<EOL>if hasattr(self, '<STR_LIT>'):<EOL><INDENT>self_as_dict['<STR_LIT>'] = self.frequency<EOL><DEDENT>return self_as_dict<EOL>", "docstring": "Return Allele data as dict object.", "id": "f10420:c0:m3"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "sequence = kwargs['<STR_LIT>']<EOL>if '<STR_LIT>' in kwargs:<EOL><INDENT>frequency = kwargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>frequency = '<STR_LIT>'<EOL><DEDENT>if not (re.match(r'<STR_LIT>', sequence) or<EOL>re.match(r'<STR_LIT>', sequence)):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.sequence = sequence<EOL>if frequency:<EOL><INDENT>try:<EOL><INDENT>if (float(frequency) < <NUM_LIT:0.0> or<EOL>float(frequency) > <NUM_LIT:1.0>):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>if not frequency == '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>' +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>self.frequency = frequency<EOL><DEDENT>", "docstring": "Initialize Allele object\n\nRequired arguments:\nsequence:  Short string of DNA letters (ACGT) for the allele.\n           May be empty (to represent a deletion).\n\nOptional arguments:\nfrequency: a string representation of a float between 0 and 1", "id": "f10420:c0:m0"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "vcf_line = kwargs['<STR_LIT>']<EOL>skip_info = ('<STR_LIT>' in kwargs and kwargs['<STR_LIT>'])<EOL>vcf_fields = vcf_line.strip().split('<STR_LIT:\\t>')<EOL>self.chrom = vcf_fields[<NUM_LIT:0>]<EOL>self.start = int(vcf_fields[<NUM_LIT:1>])<EOL>self.ref_allele = vcf_fields[<NUM_LIT:3>]<EOL>if vcf_fields[<NUM_LIT:4>] == '<STR_LIT:.>':<EOL><INDENT>self.alt_alleles = []<EOL><DEDENT>else:<EOL><INDENT>self.alt_alleles = vcf_fields[<NUM_LIT:4>].split('<STR_LIT:U+002C>')<EOL><DEDENT>if not skip_info:<EOL><INDENT>self.info = self._parse_info(vcf_fields[<NUM_LIT:7>])<EOL><DEDENT>self.alleles = self._parse_allele_data()<EOL>", "docstring": "Store data from a VCF line.", "id": "f10420:c1:m0"}
{"signature": "def as_dict(self):", "body": "self_as_dict = {'<STR_LIT>': self.chrom,<EOL>'<STR_LIT:start>': self.start,<EOL>'<STR_LIT>': self.ref_allele,<EOL>'<STR_LIT>': self.alt_alleles,<EOL>'<STR_LIT>': [x.as_dict() for x in self.alleles]}<EOL>try:<EOL><INDENT>self_as_dict['<STR_LIT:info>'] = self.info<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>return self_as_dict<EOL>", "docstring": "Dict representation of parsed VCF data", "id": "f10420:c1:m4"}
{"signature": "def __str__(self):", "body": "return self.as_json()<EOL>", "docstring": "Print Allele object as dict object data.", "id": "f10420:c0:m2"}
{"signature": "def __unicode__(self):", "body": "return self.as_json()<EOL>", "docstring": "Print Allele object as dict object data.", "id": "f10420:c0:m1"}
{"signature": "def _parse_genotype(self, vcf_fields):", "body": "format_col = vcf_fields[<NUM_LIT:8>].split('<STR_LIT::>')<EOL>genome_data = vcf_fields[<NUM_LIT:9>].split('<STR_LIT::>')<EOL>try:<EOL><INDENT>gt_idx = format_col.index('<STR_LIT>')<EOL><DEDENT>except ValueError:<EOL><INDENT>return []<EOL><DEDENT>return [int(x) for x in re.split(r'<STR_LIT>', genome_data[gt_idx]) if<EOL>x != '<STR_LIT:.>']<EOL>", "docstring": "Parse genotype from VCF line data", "id": "f10423:c0:m2"}
{"signature": "def itertable(table):", "body": "for item in table:<EOL><INDENT>res = {<EOL>k.lower(): nfd(v) if isinstance(v, text_type) else v for k, v in item.items()}<EOL>for extra in res.pop('<STR_LIT>', []):<EOL><INDENT>k, _, v = extra.partition('<STR_LIT::>')<EOL>res[k.strip()] = v.strip()<EOL><DEDENT>yield res<EOL><DEDENT>", "docstring": "Auxiliary function for iterating over a data table.", "id": "f10437:m3"}
{"signature": "def __getitem__(self, sound):", "body": "return self.resolve_sound(sound)<EOL>", "docstring": "Return a Sound instance matching the specification.", "id": "f10437:c0:m2"}
{"signature": "def _norm(self, string):", "body": "nstring = norm(string)<EOL>if \"<STR_LIT:/>\" in string:<EOL><INDENT>s, t = string.split('<STR_LIT:/>')<EOL>nstring = t<EOL><DEDENT>return self.normalize(nstring)<EOL>", "docstring": "Extended normalization: normalize by list of norm-characers, split\n        by character \"/\".", "id": "f10441:c0:m2"}
{"signature": "def __init__(self, id_):", "body": "if hasattr(self, '<STR_LIT>'):<EOL><INDENT>return<EOL><DEDENT>assert id_<EOL>system = pkg_path('<STR_LIT>', id_)<EOL>if not (system.exists() and system.is_dir()):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(id_))<EOL><DEDENT>self.system = TableGroup.from_file(<EOL>pkg_path('<STR_LIT>', '<STR_LIT>'))<EOL>self.system._fname = system / '<STR_LIT>'<EOL>self.features = {'<STR_LIT>': {}, '<STR_LIT>': {}, '<STR_LIT>': {}}<EOL>self._feature_values = {}<EOL>features = jsonlib.load(pkg_path('<STR_LIT>', '<STR_LIT>'))<EOL>self.diacritics = dict(<EOL>consonant={}, vowel={}, click={}, diphthong={}, tone={}, cluster={})<EOL>for dia in itertable(self.system.tabledict['<STR_LIT>']):<EOL><INDENT>if not dia['<STR_LIT>'] and not dia['<STR_LIT>']:<EOL><INDENT>self.features[dia['<STR_LIT:type>']][dia['<STR_LIT:value>']] = dia['<STR_LIT>']<EOL><DEDENT>self._feature_values[dia['<STR_LIT:value>']] = dia['<STR_LIT>']<EOL>self.diacritics[dia['<STR_LIT:type>']][dia['<STR_LIT>']] = dia['<STR_LIT:value>']<EOL><DEDENT>self.sound_classes = {}<EOL>self.columns = {}  <EOL>self.sounds = {}  <EOL>self._covered = {}<EOL>aliases = []<EOL>for cls in [Consonant, Vowel, Tone, Marker]:  <EOL><INDENT>type_ = cls.__name__.lower()<EOL>self.sound_classes[type_] = cls<EOL>self.columns[type_] = [<EOL>c['<STR_LIT:name>'].lower() for c in<EOL>self.system.tabledict['<STR_LIT>'.format(type_)]<EOL>.asdict()['<STR_LIT>']['<STR_LIT>']]<EOL>for l, item in enumerate(itertable(<EOL>self.system.tabledict['<STR_LIT>'.format(type_)])):<EOL><INDENT>if item['<STR_LIT>'] in self.sounds:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(<EOL>type_ + '<STR_LIT>', l + <NUM_LIT:2>, item['<STR_LIT>']))<EOL><DEDENT>sound = cls(ts=self, **item)<EOL>for key, value in item.items():<EOL><INDENT>if key not in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'} andvalue and value not in self._feature_values:<EOL><INDENT>self._feature_values[value] = key<EOL>if type_ != '<STR_LIT>' and value not in features[type_][key]:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\".format(<EOL>key, value, l + <NUM_LIT:2>))<EOL><DEDENT><DEDENT><DEDENT>self.sounds[item['<STR_LIT>']] = sound<EOL>if not sound.alias:<EOL><INDENT>if sound.featureset in self.features:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(<EOL>type_ + '<STR_LIT>', l + <NUM_LIT:2>, sound.name))<EOL><DEDENT>self.features[sound.featureset] = sound<EOL><DEDENT>else:<EOL><INDENT>aliases += [(l, sound.type, sound.featureset)]<EOL><DEDENT><DEDENT><DEDENT>if [x for x in aliases if x[<NUM_LIT:2>] not in self.features]:  <EOL><INDENT>error = '<STR_LIT:U+002CU+0020>'.join(<EOL>text_type(x[<NUM_LIT:0>] + <NUM_LIT:2>) + '<STR_LIT:/>' + text_type(x[<NUM_LIT:1>])<EOL>for x in aliases if x[<NUM_LIT:2>] not in self.features)<EOL>raise ValueError(<EOL>'<STR_LIT>'.format(error))<EOL><DEDENT>self._regex = None<EOL>self._update_regex()<EOL>self._normalize = {<EOL>norm(r['<STR_LIT:source>']): norm(r['<STR_LIT:target>'])<EOL>for r in itertable(self.system.tabledict['<STR_LIT>'])}<EOL>", "docstring": ":param system: The name of a transcription system or a directory containing one.", "id": "f10441:c0:m0"}
{"signature": "def _parse(self, string):", "body": "nstring = self._norm(string)<EOL>if nstring in self.sounds:<EOL><INDENT>sound = self.sounds[nstring]<EOL>sound.normalized = nstring != string<EOL>sound.source = string<EOL>return sound<EOL><DEDENT>match = list(self._regex.finditer(nstring))<EOL>if len(match) == <NUM_LIT:2>:<EOL><INDENT>sound1 = self._parse(nstring[:match[<NUM_LIT:1>].start()])<EOL>sound2 = self._parse(nstring[match[<NUM_LIT:1>].start():])<EOL>if '<STR_LIT>' not in (sound1.type, sound2.type) andsound1.type == sound2.type:<EOL><INDENT>if sound1.type == '<STR_LIT>':<EOL><INDENT>return Diphthong.from_sounds(  <EOL>string, sound1, sound2, self)<EOL><DEDENT>elif sound1.type == '<STR_LIT>' andsound1.manner in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>') andsound2.manner in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>return Cluster.from_sounds(  <EOL>string, sound1, sound2, self)<EOL><DEDENT><DEDENT>return UnknownSound(grapheme=nstring, source=string, ts=self)  <EOL><DEDENT>if len(match) != <NUM_LIT:1>:<EOL><INDENT>return UnknownSound(grapheme=nstring, source=string, ts=self)  <EOL><DEDENT>pre, mid, post = nstring.partition(nstring[match[<NUM_LIT:0>].start():match[<NUM_LIT:0>].end()])<EOL>base_sound = self.sounds[mid]<EOL>if isinstance(base_sound, Marker):  <EOL><INDENT>assert pre or post<EOL>return UnknownSound(grapheme=nstring, source=string, ts=self)  <EOL><DEDENT>features = attr.asdict(base_sound)<EOL>features.update(<EOL>source=string,<EOL>generated=True,<EOL>normalized=nstring != string,<EOL>base=base_sound.grapheme)<EOL>grapheme, sound = '<STR_LIT>', '<STR_LIT>'<EOL>for dia in [p + EMPTY for p in pre]:<EOL><INDENT>feature = self.diacritics[base_sound.type].get(dia, {})<EOL>if not feature:<EOL><INDENT>return UnknownSound(  <EOL>grapheme=nstring, source=string, ts=self)<EOL><DEDENT>features[self._feature_values[feature]] = feature<EOL>grapheme += dia[<NUM_LIT:0>]<EOL>sound += self.features[base_sound.type][feature][<NUM_LIT:0>]<EOL><DEDENT>grapheme += base_sound.grapheme<EOL>sound += base_sound.s<EOL>for dia in [EMPTY + p for p in post]:<EOL><INDENT>feature = self.diacritics[base_sound.type].get(dia, {})<EOL>if not feature:<EOL><INDENT>return UnknownSound(  <EOL>grapheme=nstring, source=string, ts=self)<EOL><DEDENT>features[self._feature_values[feature]] = feature<EOL>grapheme += dia[<NUM_LIT:1>]<EOL>sound += self.features[base_sound.type][feature][<NUM_LIT:1>]<EOL><DEDENT>features['<STR_LIT>'] = sound<EOL>new_sound = self.sound_classes[base_sound.type](**features)<EOL>if text_type(new_sound) != sound:<EOL><INDENT>new_sound.alias = True<EOL><DEDENT>if grapheme != sound:<EOL><INDENT>new_sound.alias = True<EOL>new_sound.grapheme = grapheme<EOL><DEDENT>return new_sound<EOL>", "docstring": "Parse a string and return its features.\n\n        :param string: A one-symbol string in NFD\n\n        Notes\n        -----\n        Strategy is rather simple: we determine the base part of a string and\n        then search left and right of this part for the additional features as\n        expressed by the diacritics. Fails if a segment has more than one basic\n        part.", "id": "f10441:c0:m5"}
{"signature": "def normalize(self, string):", "body": "return '<STR_LIT>'.join([self._normalize.get(x, x) for x in nfd(string)])<EOL>", "docstring": "Normalize the string according to normalization list", "id": "f10441:c0:m3"}
{"signature": "@property<EOL><INDENT>def codepoints(self):<DEDENT>", "body": "return '<STR_LIT:U+0020>'.join('<STR_LIT>' + ('<STR_LIT>' + hex(ord(x))[<NUM_LIT:2>:])[-<NUM_LIT:4>:] for x in self.__unicode__())<EOL>", "docstring": "Return unicode codepoint(s) for a grapheme.", "id": "f10442:c0:m5"}
{"signature": "@property<EOL><INDENT>def table(self):<DEDENT>", "body": "tbl = []<EOL>features = [<EOL>f for f in self._name_order if f not in self.ts.columns[self.type]]<EOL>if self.generated and self.s != self.source:<EOL><INDENT>tbl += [self.__unicode__() + '<STR_LIT>' + self.source]<EOL><DEDENT>else:<EOL><INDENT>tbl += [self.__unicode__()]<EOL><DEDENT>for name in self.ts.columns[self.type][<NUM_LIT:1>:]:<EOL><INDENT>if name != '<STR_LIT>' and name != '<STR_LIT>':<EOL><INDENT>tbl += [getattr(self, name) or '<STR_LIT>']<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>tbl += ['<STR_LIT:+>' if getattr(self, name) else '<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>bundle = []<EOL>for f in features:<EOL><INDENT>val = getattr(self, f)<EOL>if val:<EOL><INDENT>bundle += ['<STR_LIT>'.format(f, val)]<EOL><DEDENT><DEDENT>tbl += ['<STR_LIT:U+002C>'.join(bundle)]<EOL><DEDENT><DEDENT>return tbl<EOL>", "docstring": "Returns the tabular representation of the sound as given in our data", "id": "f10442:c2:m11"}
{"signature": "@property<EOL><INDENT>def uname(self):<DEDENT>", "body": "try:<EOL><INDENT>return '<STR_LIT>'.join(unicodedata.name(ss) for ss in self.__unicode__())<EOL><DEDENT>except TypeError:<EOL><INDENT>return '<STR_LIT:->'<EOL><DEDENT>except ValueError:<EOL><INDENT>return '<STR_LIT:?>'<EOL><DEDENT>", "docstring": "Return unicode name(s) for a character set.", "id": "f10442:c0:m4"}
{"signature": "def __unicode__(self):", "body": "<EOL>if not self.generated:<EOL><INDENT>if not self.alias and self.grapheme in self.ts.sounds:<EOL><INDENT>return self.grapheme<EOL><DEDENT>elif self.alias and self.featureset in self.ts.features:<EOL><INDENT>return text_type(self.ts.features[self.featureset])<EOL><DEDENT>raise ValueError(<EOL>'<STR_LIT>'.format(self.grapheme))  <EOL><DEDENT>elements = [f for f in self._features() if f not in EXCLUDE_FEATURES] + [self.type]<EOL>base_str = self.base or '<STR_LIT>'<EOL>base_graphemes = []<EOL>while elements:<EOL><INDENT>base = self.ts.features.get(frozenset(elements))<EOL>if base:<EOL><INDENT>base_graphemes.append(base.grapheme)<EOL><DEDENT>elements.pop(<NUM_LIT:0>)<EOL><DEDENT>base_str = base_graphemes[-<NUM_LIT:1>] if base_graphemes else base_str or '<STR_LIT>'<EOL>base_vals = {<EOL>self.ts._feature_values[elm] for elm in<EOL>self.ts.sounds[base_str].name.split('<STR_LIT:U+0020>')[:-<NUM_LIT:1>]} ifbase_str != '<STR_LIT>' else {}<EOL>out = []<EOL>for p in self._write_order['<STR_LIT>']:<EOL><INDENT>if p not in base_vals and getattr(self, p, '<STR_LIT>') in self._features():<EOL><INDENT>out.append(<EOL>norm(self.ts.features[self.type].get(getattr(self, p, '<STR_LIT>'), '<STR_LIT>')))<EOL><DEDENT><DEDENT>out.append(base_str)<EOL>for p in self._write_order['<STR_LIT>']:<EOL><INDENT>if p not in base_vals and getattr(self, p, '<STR_LIT>') in self._features():<EOL><INDENT>out.append(<EOL>norm(self.ts.features[self.type].get(getattr(self, p, '<STR_LIT>'), '<STR_LIT>')))<EOL><DEDENT><DEDENT>return '<STR_LIT>'.join(out)<EOL>", "docstring": "Return the reference representation of the sound.\n\nNote\n----\nWe first try to return the non-alias value in our data. If this fails,\nwe create the sound based on it's feature representation.", "id": "f10442:c2:m9"}
{"signature": "def resolve_sound(self, sound):", "body": "sound = sound if isinstance(sound, Symbol) else self.system[sound]<EOL>if sound.name in self.data:<EOL><INDENT>return self.data[sound.name]['<STR_LIT>']<EOL><DEDENT>if not sound.type == '<STR_LIT>':<EOL><INDENT>if sound.type in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>return self.resolve_sound(sound.from_sound)<EOL><DEDENT>name = [<EOL>s for s in sound.name.split('<STR_LIT:U+0020>') if<EOL>self.system._feature_values.get(s, '<STR_LIT>') not in<EOL>['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']]<EOL>while len(name) >= <NUM_LIT:4>:<EOL><INDENT>sound = self.system.get('<STR_LIT:U+0020>'.join(name))<EOL>if sound and sound.name in self.data:<EOL><INDENT>return self.resolve_sound(sound)<EOL><DEDENT>name.pop(<NUM_LIT:0>)<EOL><DEDENT><DEDENT>raise KeyError(\"<STR_LIT>\")<EOL>", "docstring": "Function tries to identify a sound in the data.\n\n        Notes\n        -----\n        The function tries to resolve sounds to take a sound with less complex\n        features in order to yield the next approximate sound class, if the\n        transcription data are sound classes.", "id": "f10444:c0:m1"}
{"signature": "def _normalize_csp_header(header):", "body": "return {p.strip() for p in (header or '<STR_LIT>').split('<STR_LIT:;>')}<EOL>", "docstring": "Normalize a CSP header for consistent comparisons.", "id": "f10450:m2"}
{"signature": "def __init__(self, app=None, **kwargs):", "body": "self.limiter = None<EOL>self.talisman = None<EOL>if app:<EOL><INDENT>self.init_app(app, **kwargs)<EOL><DEDENT>", "docstring": "r\"\"\"Extension initialization.\n\n        :param app: An instance of :class:`~flask.Flask`.\n        :param \\**kwargs: Keyword arguments are passed to ``init_app`` method.", "id": "f10457:c0:m0"}
{"signature": "def config_loader(app, **kwargs_config):", "body": "<EOL>local_templates_path = os.path.join(app.instance_path, '<STR_LIT>')<EOL>if os.path.exists(local_templates_path):<EOL><INDENT>app.jinja_loader = ChoiceLoader([<EOL>FileSystemLoader(local_templates_path),<EOL>app.jinja_loader,<EOL>])<EOL><DEDENT>app.jinja_options = dict(<EOL>app.jinja_options,<EOL>cache_size=<NUM_LIT:1000>,<EOL>bytecode_cache=BytecodeCache(app)<EOL>)<EOL>invenio_config_loader(app, **kwargs_config)<EOL>", "docstring": "Configuration loader.\n\n    Adds support for loading templates from the Flask application's instance\n    folder (``<instance_folder>/templates``).", "id": "f10462:m0"}
{"signature": "def __init__(self,config=None,rules=None):", "body": "self._rules = rules or []<EOL>self._dict = config or {}<EOL>self.enforce_rules()<EOL>", "docstring": ":param config: Old configuration\n:type config: behaving like dict or Config", "id": "f10465:c0:m0"}
{"signature": "def to_float(option,value):", "body": "if type(value) is str:<EOL><INDENT>try:<EOL><INDENT>value=float(value)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return (option,value)<EOL>", "docstring": "Converts string values to floats when appropriate", "id": "f10465:m2"}
{"signature": "def zip_dir(zip_name, source_dir,rename_source_dir=False):", "body": "src_path = Path(source_dir).expanduser().resolve()<EOL>with ZipFile(zip_name, '<STR_LIT:w>', ZIP_DEFLATED) as zf:<EOL><INDENT>for file in src_path.rglob('<STR_LIT:*>'):<EOL><INDENT>path_in_zip = str(file.relative_to(src_path.parent))<EOL>if rename_source_dir != False:<EOL><INDENT>_,tail = path_in_zip.split(os.sep,<NUM_LIT:1>)<EOL>path_in_zip=os.sep.join([rename_source_dir,tail])<EOL><DEDENT>zf.write(str(file.resolve()), path_in_zip)<EOL><DEDENT><DEDENT>", "docstring": "https://stackoverflow.com/questions/1855095/how-to-create-a-zip-archive-of-a-directory", "id": "f10481:m4"}
{"signature": "def unique(seq):", "body": "has = []<EOL>return [x for x in seq if not (x in has or has.append(x))]<EOL>", "docstring": "https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-whilst-preserving-order", "id": "f10482:m0"}
{"signature": "def __rmul__(self, other):", "body": "F = RFunction()<EOL>for key in self.keys():<EOL><INDENT>F[key] = other * self[key]<EOL><DEDENT>return F<EOL>", "docstring": "Vector space operation: Multiply real-valued function with real", "id": "f10482:c0:m5"}
{"signature": "def __add__(self, other):", "body": "F = RFunction()<EOL>for key in self.keys():<EOL><INDENT>F[key] = self[key]<EOL><DEDENT>for key in other.keys():<EOL><INDENT>if key in F.keys():<EOL><INDENT>F[key] += other[key]<EOL><DEDENT>else:<EOL><INDENT>F[key] = other[key]<EOL><DEDENT><DEDENT>return F<EOL>", "docstring": "Vector space operation: Add two real-valued functions", "id": "f10482:c0:m2"}
{"signature": "def expand_domain(self, X):", "body": "for x in X:<EOL><INDENT>self[x] = None<EOL><DEDENT>", "docstring": "Expand domain\n\n:param X: New elements of domain\n:type X: Iterable", "id": "f10482:c0:m1"}
{"signature": "def __init__(self, init_dict=None):", "body": "dict.__init__(self)<EOL>if init_dict:<EOL><INDENT>for key in init_dict:<EOL><INDENT>self[key] = init_dict[key]<EOL><DEDENT><DEDENT>", "docstring": ":param init_dict: Initial state of function\n:type init_dict: Dictionary whos values support addition and scalar multiplication", "id": "f10482:c0:m0"}
{"signature": "def unwrap(self):", "body": "if self.__tight:<EOL><INDENT>return array(self.__wrapped)<EOL><DEDENT>return self.__wrapped<EOL>", "docstring": "r'''Returns the encapsulated ndarray.\n\n            If the wrapper is \"tight\", a copy of the encapsulated ndarray is\n            returned. Otherwise, the encapsulated ndarray itself is returned.", "id": "f10484:c0:m3"}
{"signature": "def integral(A=None,dF=None,F=None,axis = <NUM_LIT:0>,trapez = False,cumulative = False):", "body": "ndim = max(v.ndim for v in (A,dF,F) if v is not None)<EOL>def broadcast(x):<EOL><INDENT>new_shape = [<NUM_LIT:1>]*ndim<EOL>new_shape[axis] = -<NUM_LIT:1><EOL>return np.reshape(x,new_shape)<EOL><DEDENT>if F is not None:<EOL><INDENT>assert(dF is None)<EOL>if F.ndim<ndim:<EOL><INDENT>F = broadcast(F)<EOL><DEDENT>N = F.shape[axis]<EOL>dF = F.take(indices = range(<NUM_LIT:1>,N),axis = axis)-F.take(indices = range(N-<NUM_LIT:1>),axis = axis)<EOL><DEDENT>elif dF is not None:<EOL><INDENT>if dF.ndim<ndim:<EOL><INDENT>dF = broadcast(dF)<EOL><DEDENT>N = dF.shape[axis]+<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>if A.ndim<ndim:<EOL><INDENT>A = broadcast(A)<EOL><DEDENT>N = A.shape[axis]<EOL><DEDENT>if A is not None:<EOL><INDENT>if trapez:<EOL><INDENT>midA = (A.take(indices = range(<NUM_LIT:1>,N),axis = axis)+A.take(indices = range(N-<NUM_LIT:1>),axis = axis))/<NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>midA = A.take(indices=range(N-<NUM_LIT:1>),axis=axis)<EOL><DEDENT>if dF is not None:<EOL><INDENT>dY = midA*dF<EOL><DEDENT>else:<EOL><INDENT>dY = midA<EOL><DEDENT><DEDENT>else:<EOL><INDENT>dY = dF<EOL><DEDENT>pad_shape = list(dY.shape)<EOL>pad_shape[axis] = <NUM_LIT:1><EOL>pad = np.zeros(pad_shape)<EOL>if cumulative:<EOL><INDENT>return np.concatenate((pad,np.cumsum(dY,axis = axis)),axis = axis)<EOL><DEDENT>else:<EOL><INDENT>return np.sum(dY,axis = axis)<EOL><DEDENT>", "docstring": "Turns an array A of length N (the function values in N points)\nand an array dF of length N-1 (the masses of the N-1 intervals)\ninto an array of length N (the integral \\int A dF at N points, with first entry 0)\n\n:param A: Integrand (optional, default ones, length N)\n:param dF: Integrator (optional, default ones, length N-1)\n:param F: Alternative to dF (optional, length N)\n:param trapez: Use trapezoidal rule (else left point)", "id": "f10484:m6"}
{"signature": "def grid_evaluation(X, Y, f,vectorized=True):", "body": "XX = np.reshape(np.concatenate([X[..., None], Y[..., None]], axis=<NUM_LIT:2>), (X.size, <NUM_LIT:2>), order='<STR_LIT:C>')<EOL>if vectorized:<EOL><INDENT>ZZ = f(XX)<EOL><DEDENT>else:<EOL><INDENT>ZZ = np.array([f(x) for x in XX])<EOL><DEDENT>return np.reshape(ZZ, X.shape, order='<STR_LIT:C>')<EOL>", "docstring": "Evaluate function on given grid and return values in grid format\n\nAssume X and Y are 2-dimensional arrays containing x and y coordinates, \nrespectively, of a two-dimensional grid, and f is a function that takes\n1-d arrays with two entries. This function evaluates f on the grid points\ndescribed by X and Y and returns another 2-dimensional array of the shape \nof X and Y that contains the values of f.\n\n:param X: 2-dimensional array of x-coordinates\n:param Y: 2-dimensional array of y-coordinates\n:param f: function to be evaluated on grid\n:param vectorized: `f` can handle arrays of inputs\n:return: 2-dimensional array of values of f", "id": "f10484:m8"}
{"signature": "def weighted_median(values, weights):", "body": "if len(values) == <NUM_LIT:1>:<EOL><INDENT>return values[<NUM_LIT:0>]<EOL><DEDENT>if len(values) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>values = [float(value) for value in values]<EOL>indices_sorted = np.argsort(values)<EOL>values = [values[ind] for ind in indices_sorted]<EOL>weights = [weights[ind] for ind in indices_sorted]<EOL>total_weight = sum(weights)<EOL>below_weight = <NUM_LIT:0><EOL>i = -<NUM_LIT:1><EOL>while below_weight < total_weight / <NUM_LIT:2>:<EOL><INDENT>i += <NUM_LIT:1><EOL>below_weight += weights[i]<EOL><DEDENT>return values[i]<EOL>", "docstring": "Returns element such that sum of weights below and above are (roughly) equal\n\n:param values: Values whose median is sought\n:type values: List of reals\n:param weights: Weights of each value\n:type weights: List of positive reals\n:return: value of weighted median\n:rtype: Real", "id": "f10484:m11"}
{"signature": "@validate_args(warnings=False)<EOL>def EasyHPC(backend:In('<STR_LIT>', '<STR_LIT>')|Function='<STR_LIT>',<EOL>n_tasks:In('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:count>')='<STR_LIT>',<EOL>n_results:In('<STR_LIT>', '<STR_LIT>')='<STR_LIT>',<EOL>aux_output:Bool=True,  <EOL>reduce:Function=None,<EOL>split_job=NotPassed,<EOL>parallel = True,<EOL>method = None,<EOL>pool = None<EOL>):", "body": "self = argparse.Namespace()<EOL>direct_call =  (~String&Function).valid(backend)<EOL>if direct_call:<EOL><INDENT>f = backend<EOL>backend = '<STR_LIT>'<EOL><DEDENT>if backend == '<STR_LIT>': <EOL><INDENT>self.processor = _MPI_processor<EOL>self.finalizer = _MPI_finalizer<EOL><DEDENT>if backend == '<STR_LIT>':<EOL><INDENT>self.processor = _MP_processor<EOL>self.finalizer = None<EOL><DEDENT>self.info = argparse.Namespace()<EOL>self.info.n_tasks = n_tasks<EOL>self.info.n_results = n_results<EOL>self.info.parallel = parallel<EOL>self.info.reduce = reduce<EOL>self.info.wrap_MPI = False<EOL>self.info.aux_output = aux_output <EOL>self.info.method = method<EOL>self.info.pool = pool or Pool()<EOL>self.info.split_job = split_job<EOL>if self.info.n_tasks == '<STR_LIT>':<EOL><INDENT>if self.info.n_results == '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if NotPassed(self.info.split_job):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if direct_call:<EOL><INDENT>def _lam(*args,**kwargs):<EOL><INDENT>return _MultiProcessorWrapper_call(args,kwargs,f,self.processor,self.finalizer,self.info)<EOL><DEDENT>return _lam<EOL><DEDENT>return lambda f: _easy_hpc_call(f,self)<EOL>", "docstring": ":param n_tasks: How many tasks does the decorated function handle? \n:param n_results: If the decorated function handles many tasks at once, are the results reduced (n_results = 'one') or not (as many results as tasks)?\n:param reduce: Function that reduces multiple outputs to a single output\n:param splitjob: Function that converts an input (to the decorated function) that represents one large job to two smaller jobs\n\nNOTE: don't turn this into a class, you'll run into strange pickling errors", "id": "f10489:m1"}
{"signature": "def plot_indices(mis, dims=None, weights=None, groups=<NUM_LIT:1>,legend = True,index_labels=None, colors = None,axis_labels = None,size_exponent=<NUM_LIT:0.1>,ax=None):", "body": "if weights is None:<EOL><INDENT>weights = {mi: <NUM_LIT:1> for mi in mis}<EOL><DEDENT>if Function.valid(weights):<EOL><INDENT>weights = {mi:weights(mi) for mi in mis}<EOL><DEDENT>values = list(weights.values())<EOL>if Integer.valid(groups):<EOL><INDENT>N_g = groups<EOL>groups = [[mi for mi in mis if (weights[mi] > np.percentile(values, <NUM_LIT:100>/groups*g) or g==<NUM_LIT:0>) and weights[mi] <= np.percentile(values, <NUM_LIT:100>/groups*(g+<NUM_LIT:1>))] for g in range(N_g)]<EOL>group_names = ['<STR_LIT>'.format(<NUM_LIT:100>/N_g*(N_g-i-<NUM_LIT:1>),<NUM_LIT:100>/N_g*(N_g-i)) for i in reversed(range(N_g))]<EOL><DEDENT>else:<EOL><INDENT>if Function.valid(groups):<EOL><INDENT>groups = {mi:groups(mi) for mi in mis}<EOL><DEDENT>group_names = unique(list(groups.values()))<EOL>groups = [[mi for mi in mis if groups[mi]==name] for name in group_names]<EOL>N_g = len(group_names)<EOL><DEDENT>if colors is None: <EOL><INDENT>colors = matplotlib.cm.rainbow(np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, N_g))  <EOL><DEDENT>if Dict.valid(mis):<EOL><INDENT>if index_labels is None or weights is None:<EOL><INDENT>temp = list(mis.keys())<EOL>if (List|Tuple).valid(temp[<NUM_LIT:0>]):<EOL><INDENT>if not (index_labels is None and weights is None):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>weights = {mi:mis[mi][<NUM_LIT:0>] for mi in mis}<EOL>index_labels=  {mi:mis[mi][<NUM_LIT:1>] for mi in mis}<EOL><DEDENT>else:<EOL><INDENT>if weights is None:<EOL><INDENT>weights = mis<EOL><DEDENT>else:<EOL><INDENT>index_labels = mis<EOL><DEDENT><DEDENT>mis = temp<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if dims is None:<EOL><INDENT>try:<EOL><INDENT>dims = len(mis[<NUM_LIT:0>])<EOL><DEDENT>except TypeError:<EOL><INDENT>dims = sorted(list(set.union(*(set(mi.active_dims()) for mi in mis))))   <EOL><DEDENT><DEDENT>if len(dims) > <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(dims) < <NUM_LIT:1>:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL>return<EOL><DEDENT>if ax is None:<EOL><INDENT>fig = plt.figure() <EOL>if len(dims) == <NUM_LIT:3>:<EOL><INDENT>ax = fig.gca(projection='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>ax = fig.gca()<EOL><DEDENT><DEDENT>size_function = lambda mi: sum([weights[mi2] for mi2 in mis if mi.equal_mod(mi2, lambda dim: dim not in dims)]) <EOL>sizes = {mi: np.power(size_function(mi), size_exponent) for mi in mis}<EOL>for i,plot_indices in enumerate(groups):<EOL><INDENT>X = np.array([mi[dims[<NUM_LIT:0>]] for mi in plot_indices])<EOL>if len(dims) > <NUM_LIT:1>:<EOL><INDENT>Y = np.array([mi[dims[<NUM_LIT:1>]] for mi in plot_indices])<EOL><DEDENT>else:<EOL><INDENT>Y = np.array([<NUM_LIT:0> for mi in plot_indices])<EOL><DEDENT>if len(dims) > <NUM_LIT:2>:<EOL><INDENT>Z = np.array([mi[dims[<NUM_LIT:2>]] for mi in plot_indices])<EOL><DEDENT>else:<EOL><INDENT>Z = np.array([<NUM_LIT:0> for mi in plot_indices])   <EOL><DEDENT>sizes_plot = np.array([sizes[mi] for mi in plot_indices])<EOL>if weights:<EOL><INDENT>if len(dims) == <NUM_LIT:3>:<EOL><INDENT>ax.scatter(X, Y, Z, s = <NUM_LIT:50> * sizes_plot / max(sizes.values()), color=colors[i], alpha=<NUM_LIT:1>)            <EOL><DEDENT>else:<EOL><INDENT>ax.scatter(X, Y, s = <NUM_LIT:50> * sizes_plot / max(sizes.values()), color=colors[i], alpha=<NUM_LIT:1>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if len(dims) == <NUM_LIT:3>:<EOL><INDENT>ax.scatter(X, Y, Z,color = colors[i],alpha=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>ax.scatter(X, Y,color=colors[i],alpha=<NUM_LIT:1>)<EOL><DEDENT><DEDENT>if True:<EOL><INDENT>if len(dims)==<NUM_LIT:3>:<EOL><INDENT>axs='<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>axs='<STR_LIT>'<EOL><DEDENT>extents = np.array([getattr(ax, '<STR_LIT>'.format(dim))() for dim in axs])<EOL>sz = extents[:,<NUM_LIT:1>] - extents[:,<NUM_LIT:0>]<EOL>maxsize = max(abs(sz))<EOL>for dim in axs:<EOL><INDENT>getattr(ax, '<STR_LIT>'.format(dim))(<NUM_LIT:0>, maxsize)<EOL><DEDENT><DEDENT><DEDENT>if axis_labels is not None:<EOL><INDENT>ax.set_xlabel(axis_labels[<NUM_LIT:0>])<EOL>if len(dims)><NUM_LIT:1>:<EOL><INDENT>ax.set_ylabel(axis_labels[<NUM_LIT:1>])<EOL><DEDENT>if len(dims)><NUM_LIT:1>:<EOL><INDENT>ax.set_zlabel(axis_labels[<NUM_LIT:2>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ax.set_xlabel('<STR_LIT>' + str(dims[<NUM_LIT:0>])+'<STR_LIT:$>',size=<NUM_LIT:20>)<EOL>if len(dims) > <NUM_LIT:1>:<EOL><INDENT>ax.set_ylabel('<STR_LIT>' + str(dims[<NUM_LIT:1>])+'<STR_LIT:$>',size=<NUM_LIT:20>)<EOL><DEDENT>if len(dims) > <NUM_LIT:2>:<EOL><INDENT>ax.set_zlabel('<STR_LIT>' + str(dims[<NUM_LIT:2>])+'<STR_LIT:$>',size=<NUM_LIT:20>)<EOL><DEDENT>plt.grid()<EOL><DEDENT>x_coordinates = [mi[dims[<NUM_LIT:0>]] for mi in mis]<EOL>xticks=list(range(min(x_coordinates),max(x_coordinates)+<NUM_LIT:1>))<EOL>ax.set_xticks(xticks)<EOL>if len(dims)><NUM_LIT:1>:<EOL><INDENT>y_coordinates = [mi[dims[<NUM_LIT:1>]] for mi in mis]<EOL>ax.set_yticks(list(range(min(y_coordinates),max(y_coordinates)+<NUM_LIT:1>)))<EOL><DEDENT>if len(dims)><NUM_LIT:2>:<EOL><INDENT>z_coordinates = [mi[dims[<NUM_LIT:2>]] for mi in mis]<EOL>ax.set_zticks(list(range(min(z_coordinates),max(z_coordinates)+<NUM_LIT:1>)))<EOL><DEDENT>if index_labels:<EOL><INDENT>for mi in index_labels:<EOL><INDENT>ax.annotate('<STR_LIT>'.format(index_labels[mi]),xy=(mi[<NUM_LIT:0>],mi[<NUM_LIT:1>]))<EOL><DEDENT><DEDENT>if legend and len(group_names)><NUM_LIT:1>:<EOL><INDENT>ax.legend([patches.Patch(color=color) for color in np.flipud(colors)],group_names)<EOL><DEDENT>return ax<EOL>", "docstring": "Plot multi-index set\n\n:param mis: Multi-index set\n:type mis: Iterable of SparseIndices\n:param dims: Which dimensions to use for plotting\n:type dims: List of integers.\n:param weights: Weights associated with each multi-index\n:type weights: Dictionary\n:param quantiles: Number of groups plotted in different colors\n:type quantiles: Integer>=1 or list of colors\n\nTODO: exchange index_labels and dims, exchange quantiles and dims", "id": "f10490:m1"}
{"signature": "def plot3D(X, Y, Z):", "body": "fig = plt.figure()<EOL>ax = Axes3D(fig)<EOL>light = LightSource(<NUM_LIT>, <NUM_LIT>)<EOL>illuminated_surface = light.shade(Z, cmap=cm.coolwarm)  <EOL>Xmin = np.amin(X)<EOL>Xmax = np.amax(X)<EOL>Ymin = np.amin(Y)<EOL>Ymax = np.amax(Y)<EOL>Zmin = np.amin(Z)<EOL>Zmax = np.amax(Z)<EOL>ax.contourf(X, Y, Z, zdir='<STR_LIT:x>', offset=Xmin - <NUM_LIT:0.1> * (Xmax - Xmin), cmap=cm.coolwarm, alpha=<NUM_LIT:1>)  <EOL>ax.contourf(X, Y, Z, zdir='<STR_LIT:y>', offset=Ymax + <NUM_LIT:0.1> * (Ymax - Ymin), cmap=cm.coolwarm, alpha=<NUM_LIT:1>)  <EOL>ax.contourf(X, Y, Z, zdir='<STR_LIT:z>', offset=Zmin - <NUM_LIT:0.1> * (Zmax - Zmin), cmap=cm.coolwarm, alpha=<NUM_LIT:1>)  <EOL>ax.plot_surface(X, Y, Z, cstride=<NUM_LIT:5>, rstride=<NUM_LIT:5>, facecolors=illuminated_surface, alpha=<NUM_LIT:0.5>)<EOL>plt.show()<EOL>", "docstring": "Surface plot.\n\nGenerate X and Y using, for example\n      X,Y = np.mgrid[0:1:50j, 0:1:50j]\n    or\n      X,Y= np.meshgrid([0,1,2],[1,2,3]).\n\n:param X: 2D-Array of x-coordinates\n:param Y: 2D-Array of y-coordinates\n:param Z: 2D-Array of z-coordinates", "id": "f10490:m3"}
{"signature": "def add_runtime(function):", "body": "def wrapper(*args,**kwargs):  <EOL><INDENT>pr=cProfile.Profile()<EOL>pr.enable()<EOL>output = function(*args,**kwargs)<EOL>pr.disable()<EOL>return pr,output<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator that adds a runtime profile object to the output", "id": "f10492:m1"}
{"signature": "def print_memory(function):", "body": "import memory_profiler<EOL>def wrapper(*args,**kwargs):<EOL><INDENT>m = StringIO()<EOL>temp_func = memory_profiler.profile(func = function,stream=m,precision=<NUM_LIT:4>)<EOL>output = temp_func(*args,**kwargs)<EOL>print(m.getvalue())<EOL>m.close()<EOL>return output<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator that prints memory information at each call of the function", "id": "f10492:m2"}
{"signature": "def print_runtime(function):", "body": "def wrapper(*args,**kwargs):<EOL><INDENT>pr=cProfile.Profile()<EOL>pr.enable()<EOL>output = function(*args,**kwargs)<EOL>pr.disable()<EOL>ps = pstats.Stats(pr)<EOL>ps.sort_stats('<STR_LIT>').print_stats(<NUM_LIT:20>)<EOL>return output<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator that prints running time information at each call of the function", "id": "f10492:m9"}
{"signature": "def log_calls(function):", "body": "def wrapper(self,*args,**kwargs):  <EOL><INDENT>self.log.log(group=function.__name__,message='<STR_LIT>') <EOL>function(self,*args,**kwargs)<EOL>self.log.log(group=function.__name__,message='<STR_LIT>') <EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator that logs function calls in their self.log", "id": "f10492:m0"}
{"signature": "def print_profile(function):", "body": "import memory_profiler<EOL>def wrapper(*args,**kwargs):<EOL><INDENT>m=StringIO()<EOL>pr=cProfile.Profile()<EOL>pr.enable()<EOL>temp_func = memory_profiler.profile(func=function,stream=m,precision=<NUM_LIT:4>)<EOL>output = temp_func(*args,**kwargs)<EOL>print(m.getvalue())<EOL>pr.disable()<EOL>ps = pstats.Stats(pr)<EOL>ps.sort_stats('<STR_LIT>').print_stats('<STR_LIT>',<NUM_LIT:20>)<EOL>m.close()<EOL>return output<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator that prints memory and runtime information at each call of the function", "id": "f10492:m3"}
{"signature": "def declaration(function):", "body": "function,name=_strip_function(function)<EOL>if not function.__code__.co_code in [empty_function.__code__.co_code, doc_string_only_function.__code__.co_code]: <EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>def not_implemented_function(*args,**kwargs):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(args[<NUM_LIT:0>],name))<EOL><DEDENT>not_implemented_function.__qualname__=not_implemented_function.__name__ <EOL>return default(not_implemented_function,name=name)<EOL>", "docstring": "Declare abstract function. \n\nRequires function to be empty except for docstring describing semantics.\nTo apply function, first argument must come with implementation of semantics.", "id": "f10492:m8"}
{"signature": "def split_list(l,N):", "body": "npmode = isinstance(l,np.ndarray)<EOL>if npmode:<EOL><INDENT>l=list(l)<EOL><DEDENT>g=np.concatenate((np.array([<NUM_LIT:0>]),np.cumsum(split_integer(len(l),length=N))))<EOL>s=[l[g[i]:g[i+<NUM_LIT:1>]] for i in range(N)]<EOL>if npmode:<EOL><INDENT>s=[np.array(sl) for sl in s]<EOL><DEDENT>return s<EOL>", "docstring": "Subdivide list into N lists", "id": "f10493:m8"}
{"signature": "def cmd_exists(cmd):", "body": "return shutil.which(cmd) is not None<EOL>", "docstring": "Check whether given command is available on system", "id": "f10493:m6"}
{"signature": "def random_word(length,dictionary = False):", "body": "if dictionary:<EOL><INDENT>try:<EOL><INDENT>with open('<STR_LIT>') as fp:<EOL><INDENT>words = [word.lower()[:-<NUM_LIT:1>] for word in fp.readlines() if re.match('<STR_LIT>'.format('<STR_LIT:{>'+str(length)+'<STR_LIT:}>'),word)]<EOL><DEDENT>return random.choice(words)<EOL><DEDENT>except FileNotFoundError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>vowels = list('<STR_LIT>')<EOL>consonants = list('<STR_LIT>')<EOL>pairs = [(random.choice(consonants),random.choice(vowels)) for _ in range(length//<NUM_LIT:2>+<NUM_LIT:1>)] <EOL>return '<STR_LIT>'.join([l for p in pairs for l in p])[:length]<EOL>", "docstring": "Creates random lowercase words from dictionary or by alternating vowels and consonants\n\nThe second method chooses from 85**length words.\nThe dictionary method chooses from 3000--12000 words for 3<=length<=12\n(though this of course depends on the available dictionary)\n\n:param length: word length\n:param dictionary: Try reading from dictionary, else fall back to artificial words", "id": "f10493:m10"}
{"signature": "def ld_to_dl(ld):", "body": "if ld:<EOL><INDENT>keys = list(ld[<NUM_LIT:0>])<EOL>dl = {key:[d[key] for d in ld] for key in keys}<EOL>return dl<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT>", "docstring": "Convert list of dictionaries to dictionary of lists", "id": "f10493:m1"}
{"signature": "def smart_range(*args):", "body": "if len(args)==<NUM_LIT:1>:<EOL><INDENT>string_input = True<EOL>string = args[<NUM_LIT:0>].replace('<STR_LIT:U+0020>','<STR_LIT>')<EOL>original_args=string.split('<STR_LIT:U+002C>')<EOL>args = []<EOL>for arg in original_args:<EOL><INDENT>try:<EOL><INDENT>args.append(ast.literal_eval(arg))<EOL><DEDENT>except (ValueError,SyntaxError):<EOL><INDENT>try:<EOL><INDENT>args.append(eval(arg,{'<STR_LIT>':{}}))<EOL><DEDENT>except (NameError,SyntaxError):<EOL><INDENT>args.append(arg)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>string_input = False<EOL><DEDENT>arg_start = args[<NUM_LIT:0>]<EOL>if len(args)><NUM_LIT:2>:<EOL><INDENT>arg_step = args[<NUM_LIT:1>]<EOL>if len(args)><NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>arg_step = None<EOL><DEDENT>arg_end = args[-<NUM_LIT:1>]<EOL>if String.valid(arg_start) and len(arg_start)==<NUM_LIT:1>:<EOL><INDENT>range_type = '<STR_LIT>'<EOL><DEDENT>elif all(Integer.valid(arg) for arg in args):<EOL><INDENT>range_type = '<STR_LIT>'<EOL><DEDENT>else: <EOL><INDENT>if string_input and original_args[<NUM_LIT:0>][<NUM_LIT:0>] in ['<STR_LIT:(>','<STR_LIT:[>']:<EOL><INDENT>range_type = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>range_type = '<STR_LIT:float>'<EOL><DEDENT><DEDENT>if range_type == '<STR_LIT>':<EOL><INDENT>start = ord(arg_start)<EOL>step = (ord(arg_step)- start) if arg_step else <NUM_LIT:1><EOL>end = ord(arg_end)<EOL>out = [chr(i) for i in range(start,end+step,step)]<EOL>if np.sign(step)*(ord(out[-<NUM_LIT:1>])-end)><NUM_LIT:0>:<EOL><INDENT>del out[-<NUM_LIT:1>]<EOL><DEDENT>return out<EOL><DEDENT>elif range_type == '<STR_LIT>':<EOL><INDENT>if string_input:<EOL><INDENT>if len(args)==<NUM_LIT:2> and all('<STR_LIT>' in oa for oa in original_args):<EOL><INDENT>bases,exponents = zip(*[oa.split('<STR_LIT>') for oa in original_args])<EOL>if len(set(bases))==<NUM_LIT:1>:<EOL><INDENT>return [int(bases[<NUM_LIT:0>])**exponent for exponent in smart_range('<STR_LIT:U+002C>'.join(exponents))]<EOL><DEDENT><DEDENT><DEDENT>start = arg_start<EOL>step = (arg_step - arg_start) if arg_step is not None else <NUM_LIT:1><EOL>end = arg_end<EOL>out = list(range(start,end+step,step))<EOL>if np.sign(step)*(out[-<NUM_LIT:1>]-end)><NUM_LIT:0>:<EOL><INDENT>del out[-<NUM_LIT:1>]<EOL><DEDENT>return out<EOL><DEDENT>elif range_type == '<STR_LIT:float>':<EOL><INDENT>if len(args)==<NUM_LIT:2> and all('<STR_LIT>' in oa for oa in original_args):<EOL><INDENT>bases,exponents = zip(*[oa.split('<STR_LIT>') for oa in original_args])<EOL>if len(set(bases))==<NUM_LIT:1>:<EOL><INDENT>return [float(bases[<NUM_LIT:0>])**exponent for exponent in smart_range('<STR_LIT:U+002C>'.join(exponents)) ]<EOL><DEDENT><DEDENT>if len(args) == <NUM_LIT:2>:<EOL><INDENT>raise ValueError()<EOL><DEDENT>start = arg_start<EOL>step = arg_step - arg_start<EOL>end = arg_end<EOL>out = list(np.arange(start,end+<NUM_LIT>*step,step))<EOL>return out<EOL><DEDENT>elif range_type == '<STR_LIT>':<EOL><INDENT>lopen,start = (original_args[<NUM_LIT:0>][<NUM_LIT:0>]=='<STR_LIT:(>'),float(original_args[<NUM_LIT:0>][<NUM_LIT:1>:])<EOL>end,N = original_args[<NUM_LIT:1>].split('<STR_LIT:/>')<EOL>end,ropen = float(end[:-<NUM_LIT:1>]),(end[-<NUM_LIT:1>]=='<STR_LIT:)>')<EOL>N = ast.literal_eval(N)+lopen +ropen<EOL>points = np.linspace(start,end,num=N)<EOL>return points[lopen:len(points)-ropen]<EOL><DEDENT>", "docstring": "smart_range(1,3,9)==[1,3,5,7,9]", "id": "f10493:m0"}
{"signature": "def string_from_seconds(seconds):", "body": "td = str(timedelta(seconds = seconds))<EOL>parts = td.split('<STR_LIT:.>')<EOL>if len(parts) == <NUM_LIT:1>:<EOL><INDENT>td = td+'<STR_LIT>'<EOL><DEDENT>elif len(parts) == <NUM_LIT:2>:<EOL><INDENT>td = '<STR_LIT:.>'.join([parts[<NUM_LIT:0>],parts[<NUM_LIT:1>][:<NUM_LIT:2>]])<EOL><DEDENT>return td<EOL>", "docstring": "Converts seconds into elapsed time string of form \n\n(X days(s)?,)? HH:MM:SS.YY", "id": "f10493:m11"}
{"signature": "def best_units(self, sequence):", "body": "<EOL>ts_range = self.value(max(sequence)) - self.value(min(sequence))<EOL>package = self.determine_package(sequence[<NUM_LIT:0>])<EOL>if package == '<STR_LIT>':<EOL><INDENT>cuts = [<EOL>(<NUM_LIT>, '<STR_LIT>'),<EOL>(<NUM_LIT>, '<STR_LIT>'),<EOL>(<NUM_LIT>, '<STR_LIT:s>'),<EOL>(<NUM_LIT:9>, '<STR_LIT:m>'),<EOL>(<NUM_LIT:6>, '<STR_LIT:h>'),<EOL>(<NUM_LIT:4>, '<STR_LIT:d>'),<EOL>(<NUM_LIT:4>, '<STR_LIT:w>'),<EOL>(<NUM_LIT:4>, '<STR_LIT:M>'),<EOL>(<NUM_LIT:3>, '<STR_LIT:y>')]<EOL>denomination = NANOSECONDS<EOL>base_units = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>cuts = [<EOL>(<NUM_LIT>, '<STR_LIT:s>'),<EOL>(<NUM_LIT:9>, '<STR_LIT:m>'),<EOL>(<NUM_LIT:6>, '<STR_LIT:h>'),<EOL>(<NUM_LIT:4>, '<STR_LIT:d>'),<EOL>(<NUM_LIT:4>, '<STR_LIT:w>'),<EOL>(<NUM_LIT:4>, '<STR_LIT:M>'),<EOL>(<NUM_LIT:3>, '<STR_LIT:y>')]<EOL>denomination = SECONDS<EOL>base_units = '<STR_LIT>'<EOL><DEDENT>for size, units in reversed(cuts):<EOL><INDENT>if ts_range >= size*denomination[units]:<EOL><INDENT>return units<EOL><DEDENT><DEDENT>return base_units<EOL>", "docstring": "Determine good units for representing a sequence of timedeltas", "id": "f10499:c8:m3"}
{"signature": "def __call__(self, major, limits=None, n=None):", "body": "if not self.trans.dataspace_is_numerical:<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if limits is None:<EOL><INDENT>limits = min_max(major)<EOL><DEDENT>if n is None:<EOL><INDENT>n = self.n<EOL><DEDENT>major = self._extend_breaks(major)<EOL>major = self.trans.inverse(major)<EOL>limits = self.trans.inverse(limits)<EOL>minor = minor_breaks(n)(major, limits)<EOL>return self.trans.transform(minor)<EOL>", "docstring": "Minor breaks for transformed scales\n\nParameters\n----------\nmajor : array_like\n    Major breaks\nlimits : array_like | None\n    Limits of the scale. If *array_like*, must be\n    of size 2. If **None**, then the minimum and\n    maximum of the major breaks are used.\nn : int\n    Number of minor breaks between the major\n    breaks. If **None**, then *self.n* is used.\n\nReturns\n-------\nout : array_like\n    Minor breaks", "id": "f10499:c5:m1"}
{"signature": "def __call__(self, limits):", "body": "Q = self.Q<EOL>w = self.w<EOL>only_inside = self.only_inside<EOL>simplicity_max = self.simplicity_max<EOL>density_max = self.density_max<EOL>coverage_max = self.coverage_max<EOL>simplicity = self.simplicity<EOL>coverage = self.coverage<EOL>density = self.density<EOL>legibility = self.legibility<EOL>log10 = np.log10<EOL>ceil = np.ceil<EOL>floor = np.floor<EOL>dmin, dmax = limits<EOL>if dmin > dmax:<EOL><INDENT>dmin, dmax = dmax, dmin<EOL><DEDENT>elif dmin == dmax:<EOL><INDENT>return np.array([dmin])<EOL><DEDENT>best_score = -<NUM_LIT:2><EOL>j = <NUM_LIT:1><EOL>while j < float('<STR_LIT>'):<EOL><INDENT>for q in Q:<EOL><INDENT>sm = simplicity_max(q, j)<EOL>if w[<NUM_LIT:0>]*sm + w[<NUM_LIT:1>] + w[<NUM_LIT:2>] + w[<NUM_LIT:3>] < best_score:<EOL><INDENT>j = float('<STR_LIT>')<EOL>break<EOL><DEDENT>k = <NUM_LIT:2><EOL>while k < float('<STR_LIT>'):<EOL><INDENT>dm = density_max(k)<EOL>if w[<NUM_LIT:0>]*sm + w[<NUM_LIT:1>] + w[<NUM_LIT:2>]*dm + w[<NUM_LIT:3>] < best_score:<EOL><INDENT>break<EOL><DEDENT>delta = (dmax-dmin)/(k+<NUM_LIT:1>)/j/q<EOL>z = ceil(log10(delta))<EOL>while z < float('<STR_LIT>'):<EOL><INDENT>step = j*q*(<NUM_LIT:10>**z)<EOL>cm = coverage_max(dmin, dmax, step*(k-<NUM_LIT:1>))<EOL>if w[<NUM_LIT:0>]*sm + w[<NUM_LIT:1>]*cm + w[<NUM_LIT:2>]*dm + w[<NUM_LIT:3>] < best_score:<EOL><INDENT>break<EOL><DEDENT>min_start = int(floor(dmax/step)*j - (k-<NUM_LIT:1>)*j)<EOL>max_start = int(ceil(dmin/step)*j)<EOL>if min_start > max_start:<EOL><INDENT>z = z+<NUM_LIT:1><EOL>break<EOL><DEDENT>for start in range(min_start, max_start+<NUM_LIT:1>):<EOL><INDENT>lmin = start * (step/j)<EOL>lmax = lmin + step*(k-<NUM_LIT:1>)<EOL>lstep = step<EOL>s = simplicity(q, j, lmin, lmax, lstep)<EOL>c = coverage(dmin, dmax, lmin, lmax)<EOL>d = density(k, dmin, dmax, lmin, lmax)<EOL>l = legibility(lmin, lmax, lstep)<EOL>score = w[<NUM_LIT:0>]*s + w[<NUM_LIT:1>]*c + w[<NUM_LIT:2>]*d + w[<NUM_LIT:3>]*l<EOL>if (score > best_score and<EOL>(not only_inside or<EOL>(lmin >= dmin and lmax <= dmax))):<EOL><INDENT>best_score = score<EOL>best = (lmin, lmax, lstep, q, k)<EOL><DEDENT><DEDENT>z = z+<NUM_LIT:1><EOL><DEDENT>k = k+<NUM_LIT:1><EOL><DEDENT><DEDENT>j = j+<NUM_LIT:1><EOL><DEDENT>try:<EOL><INDENT>locs = best[<NUM_LIT:0>] + np.arange(best[<NUM_LIT:4>])*best[<NUM_LIT:2>]<EOL><DEDENT>except UnboundLocalError:<EOL><INDENT>locs = []<EOL><DEDENT>return locs<EOL>", "docstring": "Calculate the breaks\n\nParameters\n----------\nlimits : array\n    Minimum and maximum values.\n\nReturns\n-------\nout : array_like\n    Sequence of break points.", "id": "f10499:c9:m8"}
{"signature": "def value(self, td):", "body": "if self.package == '<STR_LIT>':<EOL><INDENT>return td.value<EOL><DEDENT>else:<EOL><INDENT>return td.total_seconds()<EOL><DEDENT>", "docstring": "Return the numeric value representation on a timedelta", "id": "f10499:c8:m4"}
{"signature": "def rescale(x, to=(<NUM_LIT:0>, <NUM_LIT:1>), _from=None):", "body": "if _from is None:<EOL><INDENT>_from = np.min(x), np.max(x)<EOL><DEDENT>return np.interp(x, _from, to)<EOL>", "docstring": "Rescale numeric vector to have specified minimum and maximum.\n\nParameters\n----------\nx : array_like | numeric\n    1D vector of values to manipulate.\nto : tuple\n    output range (numeric vector of length two)\n_from : tuple\n    input range (numeric vector of length two).\n    If not given, is calculated from the range of x\n\nReturns\n-------\nout : array_like\n    Rescaled values\n\nExamples\n--------\n>>> x = [0, 2, 4, 6, 8, 10]\n>>> rescale(x)\narray([0. , 0.2, 0.4, 0.6, 0.8, 1. ])\n>>> rescale(x, to=(0, 2))\narray([0. , 0.4, 0.8, 1.2, 1.6, 2. ])\n>>> rescale(x, to=(0, 2), _from=(0, 20))\narray([0. , 0.2, 0.4, 0.6, 0.8, 1. ])", "id": "f10500:m0"}
{"signature": "def expand_range_distinct(range, expand=(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>), zero_width=<NUM_LIT:1>):", "body": "if len(expand) == <NUM_LIT:2>:<EOL><INDENT>expand = tuple(expand) * <NUM_LIT:2><EOL><DEDENT>lower = expand_range(range, expand[<NUM_LIT:0>], expand[<NUM_LIT:1>], zero_width)[<NUM_LIT:0>]<EOL>upper = expand_range(range, expand[<NUM_LIT:2>], expand[<NUM_LIT:3>], zero_width)[<NUM_LIT:1>]<EOL>return (lower, upper)<EOL>", "docstring": "Expand a range with a multiplicative or additive constants\n\nSimilar to :func:`expand_range` but both sides of the range\nexpanded using different constants\n\nParameters\n----------\nrange : tuple\n    Range of data. Size 2\nexpand : tuple\n    Length 2 or 4. If length is 2, then the same constants\n    are used for both sides. If length is 4 then the first\n    two are are the Multiplicative (*mul*) and Additive (*add*)\n    constants for the lower limit, and the second two are\n    the constants for the upper limit.\nzero_width : int | float | timedelta\n    Distance to use if range has zero width\n\nReturns\n-------\nout : tuple\n    Expanded range\n\nExamples\n--------\n>>> expand_range_distinct((3, 8))\n(3, 8)\n>>> expand_range_distinct((0, 10), (0.1, 0))\n(-1.0, 11.0)\n>>> expand_range_distinct((0, 10), (0.1, 0, 0.1, 0))\n(-1.0, 11.0)\n>>> expand_range_distinct((0, 10), (0.1, 0, 0, 0))\n(-1.0, 10)\n>>> expand_range_distinct((0, 10), (0, 2))\n(-2, 12)\n>>> expand_range_distinct((0, 10), (0, 2, 0, 2))\n(-2, 12)\n>>> expand_range_distinct((0, 10), (0, 0, 0, 2))\n(0, 12)\n>>> expand_range_distinct((0, 10), (.1, 2))\n(-3.0, 13.0)\n>>> expand_range_distinct((0, 10), (.1, 2, .1, 2))\n(-3.0, 13.0)\n>>> expand_range_distinct((0, 10), (0, 0, .1, 2))\n(0, 13.0)", "id": "f10500:m9"}
{"signature": "def squish_infinite(x, range=(<NUM_LIT:0>, <NUM_LIT:1>)):", "body": "xtype = type(x)<EOL>if not hasattr(x, '<STR_LIT>'):<EOL><INDENT>x = np.asarray(x)<EOL><DEDENT>x[x == -np.inf] = range[<NUM_LIT:0>]<EOL>x[x == np.inf] = range[<NUM_LIT:1>]<EOL>if not isinstance(x, xtype):<EOL><INDENT>x = xtype(x)<EOL><DEDENT>return x<EOL>", "docstring": "Truncate infinite values to a range.\n\nParameters\n----------\nx : array_like\n    Values that should have infinities squished.\nrange : tuple\n    The range onto which to squish the infinites.\n    Must be of size 2.\n\nReturns\n-------\nout : array_like\n    Values with infinites squished.\n\nExamples\n--------\n>>> squish_infinite([0, .5, .25, np.inf, .44])\n[0.0, 0.5, 0.25, 1.0, 0.44]\n>>> squish_infinite([0, -np.inf, .5, .25, np.inf], (-10, 9))\n[0.0, -10.0, 0.5, 0.25, 9.0]", "id": "f10500:m3"}
{"signature": "def squish(x, range=(<NUM_LIT:0>, <NUM_LIT:1>), only_finite=True):", "body": "xtype = type(x)<EOL>if not hasattr(x, '<STR_LIT>'):<EOL><INDENT>x = np.asarray(x)<EOL><DEDENT>finite = np.isfinite(x) if only_finite else True<EOL>x[np.logical_and(x < range[<NUM_LIT:0>], finite)] = range[<NUM_LIT:0>]<EOL>x[np.logical_and(x > range[<NUM_LIT:1>], finite)] = range[<NUM_LIT:1>]<EOL>if not isinstance(x, xtype):<EOL><INDENT>x = xtype(x)<EOL><DEDENT>return x<EOL>", "docstring": "Squish values into range.\n\nParameters\n----------\nx : array_like\n    Values that should have out of range values squished.\nrange : tuple\n    The range onto which to squish the values.\nonly_finite: boolean\n    When true, only squishes finite values.\n\nReturns\n-------\nout : array_like\n    Values with out of range values squished.\n\nExamples\n--------\n>>> squish([-1.5, 0.2, 0.5, 0.8, 1.0, 1.2])\n[0.0, 0.2, 0.5, 0.8, 1.0, 1.0]\n\n>>> squish([-np.inf, -1.5, 0.2, 0.5, 0.8, 1.0, np.inf], only_finite=False)\n[0.0, 0.0, 0.2, 0.5, 0.8, 1.0, 1.0]", "id": "f10500:m4"}
{"signature": "@staticmethod<EOL><INDENT>def inverse(x):<DEDENT>", "body": "return num2date(x)<EOL>", "docstring": "Transform to date from numerical format", "id": "f10509:c7:m1"}
{"signature": "def gettrans(t):", "body": "obj = t<EOL>if isinstance(obj, str):<EOL><INDENT>name = '<STR_LIT>'.format(obj)<EOL>obj = globals()[name]()<EOL><DEDENT>if callable(obj):<EOL><INDENT>obj = obj()<EOL><DEDENT>if isinstance(obj, type):<EOL><INDENT>obj = obj()<EOL><DEDENT>if not isinstance(obj, trans):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return obj<EOL>", "docstring": "Return a trans object\n\nParameters\n----------\nt : str | callable | type | trans\n    name of transformation function\n\nReturns\n-------\nout : trans", "id": "f10509:m5"}
{"signature": "@staticmethod<EOL><INDENT>def transform(x):<DEDENT>", "body": "<EOL>try:<EOL><INDENT>x = np.array([_x.total_seconds()*<NUM_LIT:10>**<NUM_LIT:6> for _x in x])<EOL><DEDENT>except TypeError:<EOL><INDENT>x = x.total_seconds()*<NUM_LIT:10>**<NUM_LIT:6><EOL><DEDENT>return x<EOL>", "docstring": "Transform from Timeddelta to numerical format", "id": "f10509:c8:m0"}
{"signature": "def exp_trans(base=None, **kwargs):", "body": "<EOL>if base is None:<EOL><INDENT>name = '<STR_LIT>'<EOL>base = np.exp(<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>name = '<STR_LIT>'.format(base)<EOL><DEDENT>def transform(x):<EOL><INDENT>return base ** x<EOL><DEDENT>def inverse(x):<EOL><INDENT>return np.log(x)/np.log(base)<EOL><DEDENT>kwargs['<STR_LIT>'] = base<EOL>return trans_new(name, transform, inverse, **kwargs)<EOL>", "docstring": "Create a exponential transform class for *base*\n\nThis is inverse of the log transform.\n\nParameters\n----------\nbase : float\n    Base of the logarithm\nkwargs : dict\n    Keyword arguments passed onto\n    :func:`trans_new`. Should not include\n    the `transform` or `inverse`.\n\nReturns\n-------\nout : type\n    Exponential transform class", "id": "f10509:m2"}
{"signature": "@classmethod<EOL><INDENT>def train(cls, new_data, old=None):<DEDENT>", "body": "if not len(new_data):<EOL><INDENT>return old<EOL><DEDENT>if not hasattr(new_data, '<STR_LIT>'):<EOL><INDENT>new_data = np.asarray(new_data)<EOL><DEDENT>if new_data.dtype.kind not in CONTINUOUS_KINDS:<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>if old is not None:<EOL><INDENT>new_data = np.hstack([new_data, old])<EOL><DEDENT>return min_max(new_data, na_rm=True, finite=True)<EOL>", "docstring": "Train a continuous scale\n\nParameters\n----------\nnew_data : array_like\n    New values\nold : array_like\n    Old range. Most likely a tuple of length 2.\n\nReturns\n-------\nout : tuple\n    Limits(range) of the scale", "id": "f10510:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def apply(cls, x, palette, na_value=None):<DEDENT>", "body": "limits = cls.train(x)<EOL>return cls.map(x, palette, limits, na_value)<EOL>", "docstring": "Scale data discretely\n\nParameters\n----------\nx : array_like\n    Discrete values to scale\npalette : callable ``f(x)``\n    Palette to use\nna_value : object\n    Value to use for missing values.\n\nReturns\n-------\nout : array_like\n    Scaled values", "id": "f10510:c1:m0"}
{"signature": "@classmethod<EOL><INDENT>def map(cls, x, palette, limits, na_value=None, oob=censor):<DEDENT>", "body": "x = oob(rescale(x, _from=limits))<EOL>pal = palette(x)<EOL>try:<EOL><INDENT>pal[pd.isnull(x)] = na_value<EOL><DEDENT>except TypeError:<EOL><INDENT>pal = [v if not pd.isnull(v) else na_value for v in pal]<EOL><DEDENT>return pal<EOL>", "docstring": "Map values to a continuous palette\n\nParameters\n----------\nx : array_like\n    Continuous values to scale\npalette : callable ``f(x)``\n    palette to use\nna_value : object\n    Value to use for missing values.\noob : callable ``f(x)``\n    Function to deal with values that are\n    beyond the limits\n\nReturns\n-------\nout : array_like\n    Values mapped onto a palette", "id": "f10510:c0:m2"}
{"signature": "@classmethod<EOL><INDENT>def apply(cls, x, palette, na_value=None, trans=None):<DEDENT>", "body": "if trans is not None:<EOL><INDENT>x = trans.transform(x)<EOL><DEDENT>limits = cls.train(x)<EOL>return cls.map(x, palette, limits, na_value)<EOL>", "docstring": "Scale data continuously\n\nParameters\n----------\nx : array_like\n    Continuous values to scale\npalette : callable ``f(x)``\n    Palette to use\nna_value : object\n    Value to use for missing values.\ntrans : trans\n    How to transform the data before scaling. If\n    ``None``, no transformation is done.\n\nReturns\n-------\nout : array_like\n    Scaled values", "id": "f10510:c0:m0"}
{"signature": "def render(pieces, style):", "body": "if pieces[\"<STR_LIT:error>\"]:<EOL><INDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": pieces.get(\"<STR_LIT>\"),<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": pieces[\"<STR_LIT:error>\"],<EOL>\"<STR_LIT:date>\": None}<EOL><DEDENT>if not style or style == \"<STR_LIT:default>\":<EOL><INDENT>style = \"<STR_LIT>\"  <EOL><DEDENT>if style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_pre(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_post(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_old(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe_long(pieces)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % style)<EOL><DEDENT>return {\"<STR_LIT:version>\": rendered, \"<STR_LIT>\": pieces[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": pieces[\"<STR_LIT>\"], \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": pieces.get(\"<STR_LIT:date>\")}<EOL>", "docstring": "Render the given version pieces into the requested style.", "id": "f10511:m15"}
{"signature": "def get_versions():", "body": "<EOL>cfg = get_config()<EOL>verbose = cfg.verbose<EOL>try:<EOL><INDENT>return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,<EOL>verbose)<EOL><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>root = os.path.realpath(__file__)<EOL>for i in cfg.versionfile_source.split('<STR_LIT:/>'):<EOL><INDENT>root = os.path.dirname(root)<EOL><DEDENT><DEDENT>except NameError:<EOL><INDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\", \"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:date>\": None}<EOL><DEDENT>try:<EOL><INDENT>pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)<EOL>return render(pieces, cfg.style)<EOL><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>if cfg.parentdir_prefix:<EOL><INDENT>return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)<EOL><DEDENT><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\", \"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": \"<STR_LIT>\", \"<STR_LIT:date>\": None}<EOL>", "docstring": "Get version information or return default if unable to do so.", "id": "f10511:m16"}
{"signature": "def get_keywords():", "body": "<EOL>git_refnames = \"<STR_LIT>\"<EOL>git_full = \"<STR_LIT>\"<EOL>git_date = \"<STR_LIT>\"<EOL>keywords = {\"<STR_LIT>\": git_refnames, \"<STR_LIT>\": git_full, \"<STR_LIT:date>\": git_date}<EOL>return keywords<EOL>", "docstring": "Get the keywords needed to look up the version information.", "id": "f10511:m0"}
{"signature": "def render_pep440(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"],<EOL>pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n    Exceptions:\n    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f10511:m9"}
{"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n    The \".dev0\" means dirty. Note that .dev0 sorts backwards\n    (a dirty tree will appear \"older\" than the corresponding clean one),\n    but you shouldn't be releasing software with -dirty anyways.\n\n    Exceptions:\n    1: no tags. 0.postDISTANCE[.dev0]", "id": "f10511:m11"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "GITS = [\"<STR_LIT>\"]<EOL>if sys.platform == \"<STR_LIT:win32>\":<EOL><INDENT>GITS = [\"<STR_LIT>\", \"<STR_LIT>\"]<EOL><DEDENT>out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\"], cwd=root,<EOL>hide_stderr=True)<EOL>if rc != <NUM_LIT:0>:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % root)<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>describe_out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\" % tag_prefix],<EOL>cwd=root)<EOL>if describe_out is None:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>describe_out = describe_out.strip()<EOL>full_out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\"], cwd=root)<EOL>if full_out is None:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>full_out = full_out.strip()<EOL>pieces = {}<EOL>pieces[\"<STR_LIT>\"] = full_out<EOL>pieces[\"<STR_LIT>\"] = full_out[:<NUM_LIT:7>]  <EOL>pieces[\"<STR_LIT:error>\"] = None<EOL>git_describe = describe_out<EOL>dirty = git_describe.endswith(\"<STR_LIT>\")<EOL>pieces[\"<STR_LIT>\"] = dirty<EOL>if dirty:<EOL><INDENT>git_describe = git_describe[:git_describe.rindex(\"<STR_LIT>\")]<EOL><DEDENT>if \"<STR_LIT:->\" in git_describe:<EOL><INDENT>mo = re.search(r'<STR_LIT>', git_describe)<EOL>if not mo:<EOL><INDENT>pieces[\"<STR_LIT:error>\"] = (\"<STR_LIT>\"<EOL>% describe_out)<EOL>return pieces<EOL><DEDENT>full_tag = mo.group(<NUM_LIT:1>)<EOL>if not full_tag.startswith(tag_prefix):<EOL><INDENT>if verbose:<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL>print(fmt % (full_tag, tag_prefix))<EOL><DEDENT>pieces[\"<STR_LIT:error>\"] = (\"<STR_LIT>\"<EOL>% (full_tag, tag_prefix))<EOL>return pieces<EOL><DEDENT>pieces[\"<STR_LIT>\"] = full_tag[len(tag_prefix):]<EOL>pieces[\"<STR_LIT>\"] = int(mo.group(<NUM_LIT:2>))<EOL>pieces[\"<STR_LIT>\"] = mo.group(<NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>pieces[\"<STR_LIT>\"] = None<EOL>count_out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>cwd=root)<EOL>pieces[\"<STR_LIT>\"] = int(count_out)  <EOL><DEDENT>date = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>cwd=root)[<NUM_LIT:0>].strip()<EOL>pieces[\"<STR_LIT:date>\"] = date.strip().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:T>\", <NUM_LIT:1>).replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\", <NUM_LIT:1>)<EOL>return pieces<EOL>", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n    This only gets called if the git-archive 'subst' keywords were *not*\n    expanded, and _version.py hasn't already been rewritten with a short\n    version string, meaning we're inside a checked out source tree.", "id": "f10511:m7"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>date = keywords.get(\"<STR_LIT:date>\")<EOL>if date is not None:<EOL><INDENT>date = date.strip().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:T>\", <NUM_LIT:1>).replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\", <NUM_LIT:1>)<EOL><DEDENT>refnames = keywords[\"<STR_LIT>\"].strip()<EOL>if refnames.startswith(\"<STR_LIT>\"):<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>refs = set([r.strip() for r in refnames.strip(\"<STR_LIT>\").split(\"<STR_LIT:U+002C>\")])<EOL>TAG = \"<STR_LIT>\"<EOL>tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])<EOL>if not tags:<EOL><INDENT>tags = set([r for r in refs if re.search(r'<STR_LIT>', r)])<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(refs - tags))<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(sorted(tags)))<EOL><DEDENT>for ref in sorted(tags):<EOL><INDENT>if ref.startswith(tag_prefix):<EOL><INDENT>r = ref[len(tag_prefix):]<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % r)<EOL><DEDENT>return {\"<STR_LIT:version>\": r,<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": date}<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": \"<STR_LIT>\", \"<STR_LIT:date>\": None}<EOL>", "docstring": "Get version information from git keywords.", "id": "f10511:m6"}
{"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n    Like 'git describe --tags --dirty --always -long'.\n    The distance/hash is unconditional.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f10511:m14"}
{"signature": "def _tidyup_labels(self, labels):", "body": "def remove_zeroes(s):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>tup = s.split('<STR_LIT:e>')<EOL>if len(tup) == <NUM_LIT:2>:<EOL><INDENT>mantissa = tup[<NUM_LIT:0>].rstrip('<STR_LIT:0>').rstrip('<STR_LIT:.>')<EOL>exponent = int(tup[<NUM_LIT:1>])<EOL>if exponent:<EOL><INDENT>s = '<STR_LIT>' % (mantissa, exponent)<EOL><DEDENT>else:<EOL><INDENT>s = mantissa<EOL><DEDENT><DEDENT>return s<EOL><DEDENT>def as_exp(s):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return s if '<STR_LIT:e>' in s else '<STR_LIT>'.format(float(s))<EOL><DEDENT>has_e = np.array(['<STR_LIT:e>' in x for x in labels])<EOL>if not np.all(has_e) and not np.all(~has_e):<EOL><INDENT>labels = [as_exp(x) for x in labels]<EOL><DEDENT>labels = [remove_zeroes(x) for x in labels]<EOL>return labels<EOL>", "docstring": "Make all labels uniform in format and remove redundant zeros\nfor labels in exponential format.\n\nParameters\n----------\nlabels : list-like\n    Labels to be tidied.\n\nReturns\n-------\nout : list-like\n    Labels", "id": "f10513:c6:m1"}
{"signature": "def __call__(self, x):", "body": "if self.style == '<STR_LIT>':<EOL><INDENT>return [self.fmt.format(val) for val in x]<EOL><DEDENT>elif self.style == '<STR_LIT>':<EOL><INDENT>return [self.fmt % val for val in x]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n    Input\n\nReturns\n-------\nout : list\n    List of strings.", "id": "f10513:c0:m1"}
{"signature": "def __call__(self, x):", "body": "if len(x) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>if self.base == <NUM_LIT:10>:<EOL><INDENT>xmin = int(np.floor(np.log10(np.min(x))))<EOL>xmax = int(np.ceil(np.log10(np.max(x))))<EOL>emin, emax = self.exponent_limits<EOL>all_multiples = np.all(<EOL>[np.log10(num).is_integer() for num in x])<EOL>if same_log10_order_of_magnitude(x):<EOL><INDENT>f = mpl_format()<EOL>f.formatter.set_powerlimits((emin, emax))<EOL>return f(x)<EOL><DEDENT>elif all_multiples and (xmin <= emin or xmax >= emax):<EOL><INDENT>fmt = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>fmt = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>fmt = '<STR_LIT>'<EOL><DEDENT>labels = [fmt.format(num) for num in x]<EOL>return self._tidyup_labels(labels)<EOL>", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n    Input\n\nReturns\n-------\nout : list\n    List of strings.", "id": "f10513:c6:m2"}
{"signature": "def __call__(self, x):", "body": "if len(x) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>_precision = precision(x)<EOL>x = round_any(x, _precision / <NUM_LIT:100>) * <NUM_LIT:100><EOL>if _precision > <NUM_LIT:1>:<EOL><INDENT>digits = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>digits = abs(int(np.log10(_precision)))<EOL><DEDENT>formatter = currency_format(prefix='<STR_LIT>',<EOL>suffix='<STR_LIT:%>',<EOL>digits=digits,<EOL>big_mark=self.big_mark)<EOL>labels = formatter(x)<EOL>pattern = re.compile(r'<STR_LIT>')<EOL>if all(pattern.search(val) for val in labels):<EOL><INDENT>labels = [pattern.sub('<STR_LIT:%>', val) for val in labels]<EOL><DEDENT>return labels<EOL>", "docstring": "Format a sequence of inputs\n\nParameters\n----------\nx : array\n    Input\n\nReturns\n-------\nout : list\n    List of strings.", "id": "f10513:c3:m1"}
{"signature": "def _format(formatter, x):", "body": "<EOL>formatter.create_dummy_axis()<EOL>formatter.set_locs([val for val in x if ~np.isnan(val)])<EOL>try:<EOL><INDENT>oom = int(formatter.orderOfMagnitude)<EOL><DEDENT>except AttributeError:<EOL><INDENT>oom = <NUM_LIT:0><EOL><DEDENT>labels = [formatter(tick) for tick in x]<EOL>pattern = re.compile(r'<STR_LIT>')<EOL>for i, label in enumerate(labels):<EOL><INDENT>match = pattern.search(label)<EOL>if match:<EOL><INDENT>labels[i] = pattern.sub('<STR_LIT>', label)<EOL><DEDENT><DEDENT>if oom:<EOL><INDENT>labels = ['<STR_LIT>'.format(s, oom) if s != '<STR_LIT:0>' else s<EOL>for s in labels]<EOL><DEDENT>return labels<EOL>", "docstring": "Helper to format and tidy up", "id": "f10513:m0"}
{"signature": "def min_max(x, na_rm=False, finite=True):", "body": "if not hasattr(x, '<STR_LIT>'):<EOL><INDENT>x = np.asarray(x)<EOL><DEDENT>if na_rm and finite:<EOL><INDENT>x = x[np.isfinite(x)]<EOL><DEDENT>elif not na_rm and np.any(np.isnan(x)):<EOL><INDENT>return np.nan, np.nan<EOL><DEDENT>elif na_rm:<EOL><INDENT>x = x[~np.isnan(x)]<EOL><DEDENT>elif finite:<EOL><INDENT>x = x[~np.isinf(x)]<EOL><DEDENT>if (len(x)):<EOL><INDENT>return np.min(x), np.max(x)<EOL><DEDENT>else:<EOL><INDENT>return float('<STR_LIT>'), float('<STR_LIT>')<EOL><DEDENT>", "docstring": "Return the minimum and maximum of x\n\nParameters\n----------\nx : array_like\n    Sequence\nna_rm : bool\n    Whether to remove ``nan`` values.\nfinite : bool\n    Whether to consider only finite values.\n\nReturns\n-------\nout : tuple\n    (minimum, maximum) of x", "id": "f10514:m1"}
{"signature": "def first_element(obj):", "body": "if isinstance(obj, Iterator):<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>return next(iter(obj))<EOL>", "docstring": "Return the first element of `obj`\n\nParameters\n----------\nobj : iterable\n    Should not be an iterator\n\nReturns\n-------\nout : object\n    First element of `obj`. Raise a class:`StopIteration`\n    exception if `obj` is empty.", "id": "f10514:m4"}
{"signature": "def match(v1, v2, nomatch=-<NUM_LIT:1>, incomparables=None, start=<NUM_LIT:0>):", "body": "v2_indices = {}<EOL>for i, x in enumerate(v2):<EOL><INDENT>if x not in v2_indices:<EOL><INDENT>v2_indices[x] = i<EOL><DEDENT><DEDENT>v1_to_v2_map = [nomatch] * len(v1)<EOL>skip = set(incomparables) if incomparables else set()<EOL>for i, x in enumerate(v1):<EOL><INDENT>if x in skip:<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>v1_to_v2_map[i] = v2_indices[x] + start<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return v1_to_v2_map<EOL>", "docstring": "Return a vector of the positions of (first)\nmatches of its first argument in its second.\n\nParameters\n----------\nv1: array_like\n    Values to be matched\n\nv2: array_like\n    Values to be matched against\n\nnomatch: int\n    Value to be returned in the case when\n    no match is found.\n\nincomparables: array_like\n    Values that cannot be matched. Any value in ``v1``\n    matching a value in this list is assigned the nomatch\n    value.\nstart: int\n    Type of indexing to use. Most likely 0 or 1", "id": "f10514:m2"}
{"signature": "def crayon_palette(colors):", "body": "return [crayon_rgb[name] for name in colors]<EOL>", "docstring": "Make a palette with color names from Crayola crayons.\n\nThe colors come from\nhttp://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors\n\nParameters\n----------\ncolors : list of strings\n    List of keys in the ``mizani.external.crayloax_rgb`` dictionary.\n\nReturns\n-------\npalette : list\n    List of colors as RGB hex strings.\n\nExamples\n--------\n>>> palette = crayon_palette(['almond', 'silver', 'yellow'])\n>>> palette\n['#eed9c4', '#c9c0bb', '#fbe870']\n\n>>> from mizani.external import crayon_rgb\n>>> list(sorted(crayon_rgb.keys()))[:5]\n['almond', 'antique brass', 'apricot', 'aquamarine', 'asparagus']", "id": "f10515:m15"}
{"signature": "def gradient_n_pal(colors, values=None, name='<STR_LIT>'):", "body": "<EOL>if values is None:<EOL><INDENT>colormap = mcolors.LinearSegmentedColormap.from_list(<EOL>name, colors)<EOL><DEDENT>else:<EOL><INDENT>colormap = mcolors.LinearSegmentedColormap.from_list(<EOL>name, list(zip(values, colors)))<EOL><DEDENT>def _gradient_n_pal(vals):<EOL><INDENT>return ratios_to_colors(vals, colormap)<EOL><DEDENT>return _gradient_n_pal<EOL>", "docstring": "Create a n color gradient palette\n\nParameters\n----------\ncolors : list\n    list of colors\nvalues : list, optional\n    list of points in the range [0, 1] at which to\n    place each color. Must be the same size as\n    `colors`. Default to evenly space the colors\nname : str\n    Name to call the resultant MPL colormap\n\nReturns\n-------\nout : function\n    Continuous color palette that takes a single\n    parameter either a :class:`float` or a sequence\n    of floats maps those value(s) onto the palette\n    and returns color(s). The float(s) must be\n    in the range [0, 1].\n\nExamples\n--------\n>>> palette = gradient_n_pal(['red', 'blue'])\n>>> palette([0, .25, .5, .75, 1])\n['#ff0000', '#bf0040', '#7f0080', '#3f00c0', '#0000ff']", "id": "f10515:m9"}
{"signature": "def xkcd_palette(colors):", "body": "return [xkcd_rgb[name] for name in colors]<EOL>", "docstring": "Make a palette with color names from the xkcd color survey.\n\nSee xkcd for the full list of colors: http://xkcd.com/color/rgb/\n\nParameters\n----------\ncolors : list of strings\n    List of keys in the ``mizani.external.xkcd_rgb`` dictionary.\n\nReturns\n-------\npalette : list\n    List of colors as RGB hex strings.\n\nExamples\n--------\n>>> palette = xkcd_palette(['red', 'green', 'blue'])\n>>> palette\n['#e50000', '#15b01a', '#0343df']\n\n>>> from mizani.external import xkcd_rgb\n>>> list(sorted(xkcd_rgb.keys()))[:5]\n['acid green', 'adobe', 'algae', 'algae green', 'almost black']", "id": "f10515:m14"}
{"signature": "def hue_pal(h=<NUM_LIT>, l=<NUM_LIT>, s=<NUM_LIT>, color_space='<STR_LIT>'):", "body": "if not all([<NUM_LIT:0> <= val <= <NUM_LIT:1> for val in (h, l, s)]):<EOL><INDENT>msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(h, l, s))<EOL>raise ValueError(msg)<EOL><DEDENT>if color_space not in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg)<EOL><DEDENT>name = '<STR_LIT>'.format(color_space)<EOL>palette = globals()[name]<EOL>def _hue_pal(n):<EOL><INDENT>colors = palette(n, h=h, l=l, s=s)<EOL>return [mcolors.rgb2hex(c) for c in colors]<EOL><DEDENT>return _hue_pal<EOL>", "docstring": "Utility for making hue palettes for color schemes.\n\nParameters\n----------\nh : float\n    first hue. In the [0, 1] range\nl : float\n    lightness. In the [0, 1] range\ns : float\n    saturation. In the [0, 1] range\ncolor_space : 'hls' | 'husl'\n    Color space to use for the palette\n\nReturns\n-------\nout : function\n    A discrete color palette that takes a single\n    :class:`int` parameter ``n`` and returns ``n``\n    equally spaced colors. Though the palette\n    is continuous, since it is varies the hue it\n    is good for categorical data. However if ``n``\n    is large enough the colors show continuity.\n\nExamples\n--------\n>>> hue_pal()(5)\n['#db5f57', '#b9db57', '#57db94', '#5784db', '#c957db']\n>>> hue_pal(color_space='husl')(5)\n['#e0697e', '#9b9054', '#569d79', '#5b98ab', '#b675d7']", "id": "f10515:m6"}
{"signature": "def manual_pal(values):", "body": "max_n = len(values)<EOL>def _manual_pal(n):<EOL><INDENT>if n > max_n:<EOL><INDENT>msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>warnings.warn(msg.format(max_n, n))<EOL><DEDENT>return values[:n]<EOL><DEDENT>return _manual_pal<EOL>", "docstring": "Create a palette from a list of values\n\nParameters\n----------\nvalues : sequence\n    Values that will be returned by the palette function.\n\nReturns\n-------\nout : function\n    A function palette that takes a single\n    :class:`int` parameter ``n`` and returns ``n`` values.\n\nExamples\n--------\n>>> palette = manual_pal(['a', 'b', 'c', 'd', 'e'])\n>>> palette(3)\n['a', 'b', 'c']", "id": "f10515:m13"}
{"signature": "def grey_pal(start=<NUM_LIT>, end=<NUM_LIT>):", "body": "gamma = <NUM_LIT><EOL>ends = ((<NUM_LIT:0.0>, start, start), (<NUM_LIT:1.0>, end, end))<EOL>cdict = {'<STR_LIT>': ends, '<STR_LIT>': ends, '<STR_LIT>': ends}<EOL>grey_cmap = mcolors.LinearSegmentedColormap('<STR_LIT>', cdict)<EOL>def continuous_grey_palette(n):<EOL><INDENT>colors = []<EOL>for x in np.linspace(start**gamma, end**gamma, n):<EOL><INDENT>x = (x ** (<NUM_LIT:1.>/gamma) - start) / (end - start)<EOL>colors.append(mcolors.rgb2hex(grey_cmap(x)))<EOL><DEDENT>return colors<EOL><DEDENT>return continuous_grey_palette<EOL>", "docstring": "Utility for creating continuous grey scale palette\n\nParameters\n----------\nstart : float\n    grey value at low end of palette\nend : float\n    grey value at high end of palette\n\nReturns\n-------\nout : function\n    Continuous color palette that takes a single\n    :class:`int` parameter ``n`` and returns ``n``\n    equally spaced colors.\n\nExamples\n--------\n>>> palette = grey_pal()\n>>> palette(5)\n['#333333', '#737373', '#989898', '#b5b5b5', '#cccccc']", "id": "f10515:m5"}
{"signature": "def cubehelix_pal(start=<NUM_LIT:0>, rot=<NUM_LIT>, gamma=<NUM_LIT:1.0>, hue=<NUM_LIT>,<EOL>light=<NUM_LIT>, dark=<NUM_LIT>, reverse=False):", "body": "cdict = mpl._cm.cubehelix(gamma, start, rot, hue)<EOL>cubehelix_cmap = mpl.colors.LinearSegmentedColormap('<STR_LIT>', cdict)<EOL>def cubehelix_palette(n):<EOL><INDENT>values = np.linspace(light, dark, n)<EOL>return [mcolors.rgb2hex(cubehelix_cmap(x)) for x in values]<EOL><DEDENT>return cubehelix_palette<EOL>", "docstring": "Utility for creating continuous palette from the cubehelix system.\n\nThis produces a colormap with linearly-decreasing (or increasing)\nbrightness. That means that information will be preserved if printed to\nblack and white or viewed by someone who is colorblind.\n\nParameters\n----------\nstart : float (0 <= start <= 3)\n    The hue at the start of the helix.\nrot : float\n    Rotations around the hue wheel over the range of the palette.\ngamma : float (0 <= gamma)\n    Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1)\n    colors.\nhue : float (0 <= hue <= 1)\n    Saturation of the colors.\ndark : float (0 <= dark <= 1)\n    Intensity of the darkest color in the palette.\nlight : float (0 <= light <= 1)\n    Intensity of the lightest color in the palette.\nreverse : bool\n    If True, the palette will go from dark to light.\n\nReturns\n-------\nout : function\n    Continuous color palette that takes a single\n    :class:`int` parameter ``n`` and returns ``n``\n    equally spaced colors.\n\n\nReferences\n----------\nGreen, D. A. (2011). \"A colour scheme for the display of astronomical\nintensity images\". Bulletin of the Astromical Society of India, Vol. 39,\np. 289-295.\n\nExamples\n--------\n>>> palette = cubehelix_pal()\n>>> palette(5)\n['#edd1cb', '#d499a7', '#aa688f', '#6e4071', '#2d1e3e']", "id": "f10515:m16"}
{"signature": "def area_pal(range=(<NUM_LIT:1>, <NUM_LIT:6>)):", "body": "def area_palette(x):<EOL><INDENT>return rescale(np.sqrt(x), to=range, _from=(<NUM_LIT:0>, <NUM_LIT:1>))<EOL><DEDENT>return area_palette<EOL>", "docstring": "Point area palette (continuous).\n\nParameters\n----------\nrange : tuple\n    Numeric vector of length two, giving range of possible sizes.\n    Should be greater than 0.\n\nReturns\n-------\nout : function\n    Palette function that takes a sequence of values\n    in the range ``[0, 1]`` and returns values in\n    the specified range.\n\nExamples\n--------\n>>> x = np.arange(0, .6, .1)**2\n>>> palette = area_pal()\n>>> palette(x)\narray([1. , 1.5, 2. , 2.5, 3. , 3.5])\n\nThe results are equidistant because the input ``x`` is in\narea space, i.e it is squared.", "id": "f10515:m3"}
{"signature": "def desaturate_pal(color, prop, reverse=False):", "body": "if not <NUM_LIT:0> <= prop <= <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>rgb = mcolors.colorConverter.to_rgb(color)<EOL>h, l, s = colorsys.rgb_to_hls(*rgb)<EOL>s *= prop<EOL>desaturated_color = colorsys.hls_to_rgb(h, l, s)<EOL>colors = [color, desaturated_color]<EOL>if reverse:<EOL><INDENT>colors = colors[::-<NUM_LIT:1>]<EOL><DEDENT>return gradient_n_pal(colors, name='<STR_LIT>')<EOL>", "docstring": "Create a palette that desaturate a color by some proportion\n\nParameters\n----------\ncolor : matplotlib color\n    hex, rgb-tuple, or html color name\nprop : float\n    saturation channel of color will be multiplied by\n    this value\nreverse : bool\n    Whether to reverse the palette.\n\nReturns\n-------\nout : function\n    Continuous color palette that takes a single\n    parameter either a :class:`float` or a sequence\n    of floats maps those value(s) onto the palette\n    and returns color(s). The float(s) must be\n    in the range [0, 1].\n\nExamples\n--------\n>>> palette = desaturate_pal('red', .1)\n>>> palette([0, .25, .5, .75, 1])\n['#ff0000', '#e21d1d', '#c53a3a', '#a95656', '#8c7373']", "id": "f10515:m12"}
{"signature": "def abs_area(max):", "body": "def abs_area_palette(x):<EOL><INDENT>return rescale(np.sqrt(np.abs(x)), to=(<NUM_LIT:0>, max), _from=(<NUM_LIT:0>, <NUM_LIT:1>))<EOL><DEDENT>return abs_area_palette<EOL>", "docstring": "Point area palette (continuous), with area proportional to value.\n\nParameters\n----------\nmax : float\n    A number representing the maximum size\n\nReturns\n-------\nout : function\n    Palette function that takes a sequence of values\n    in the range ``[0, 1]`` and returns values in the range\n    ``[0, max]``.\n\nExamples\n--------\n>>> x = np.arange(0, .8, .1)**2\n>>> palette = abs_area(5)\n>>> palette(x)\narray([0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5])\n\nCompared to :func:`area_pal`, :func:`abs_area` will handle values\nin the range ``[-1, 0]`` without returning ``np.nan``. And values\nwhose absolute value is greater than 1 will be clipped to the\nmaximum.", "id": "f10515:m4"}
{"signature": "def hls_palette(n_colors=<NUM_LIT:6>, h=<NUM_LIT>, l=<NUM_LIT>, s=<NUM_LIT>):", "body": "hues = np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, n_colors + <NUM_LIT:1>)[:-<NUM_LIT:1>]<EOL>hues += h<EOL>hues %= <NUM_LIT:1><EOL>hues -= hues.astype(int)<EOL>palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]<EOL>return palette<EOL>", "docstring": "Get a set of evenly spaced colors in HLS hue space.\n\nh, l, and s should be between 0 and 1\n\nParameters\n----------\n\nn_colors : int\n    number of colors in the palette\nh : float\n    first hue\nl : float\n    lightness\ns : float\n    saturation\n\nReturns\n-------\npalette : list\n    List of colors as RGB hex strings.\n\nSee Also\n--------\nhusl_palette : Make a palette using evenly spaced circular\n    hues in the HUSL system.\n\nExamples\n--------\n>>> len(hls_palette(2))\n2\n>>> len(hls_palette(9))\n9", "id": "f10515:m0"}
{"signature": "def check_dependencies():", "body": "pass<EOL>", "docstring": "Check for system level dependencies", "id": "f10518:m0"}
{"signature": "def get_package_data():", "body": "return {}<EOL>", "docstring": "Return package data\n\nFor example:\n\n    {'': ['*.txt', '*.rst'],\n     'hello': ['*.msg']}\n\nmeans:\n    - If any package contains *.txt or *.rst files,\n      include them\n    - And include any *.msg files found in\n      the 'hello' package, too:", "id": "f10518:m2"}
{"signature": "def list_available_drivers():", "body": "return drivers.available_drivers()<EOL>", "docstring": "Returns a list of string with the names of available drivers.\n\n    Available means that the driver is installed and can be used. For example,\n    it will not contain \"Raspberry\" if you're not running on a Raspberry Pi,\n    even if the raspberry.py script is present in the drivers directory.\n\n    @returns a list of strings that can be fed to `ahio.new_driver` to get an\n    instance of the desired driver.", "id": "f10519:m3"}
{"signature": "def clear_path():", "body": "drivers.clear_path()<EOL>", "docstring": "Clears the list of folders where to load drivers from", "id": "f10519:m2"}
{"signature": "def driver_info(name):", "body": "return drivers.driver_info(name)<EOL>", "docstring": "Returns driver metadata.\n\n    Returns a class which static properties contains metadata from the\n    driver, such as name and availability.\n\n    @returns a subclass from `ahio.abstract_driver.AbstractahioDriverInfo` with\n    metadata from the driver.", "id": "f10519:m4"}
{"signature": "def map_pin(self, abstract_pin_id, physical_pin_id):", "body": "if physical_pin_id:<EOL><INDENT>self._pin_mapping[abstract_pin_id] = physical_pin_id<EOL><DEDENT>else:<EOL><INDENT>self._pin_mapping.pop(abstract_pin_id, None)<EOL><DEDENT>", "docstring": "Maps a pin number to a physical device pin.\n\n        To make it easy to change drivers without having to refactor a lot of\n        code, this library does not use the names set by the driver to identify\n        a pin. This function will map a number, that will be used by other\n        functions, to a physical pin represented by the drivers pin id. That\n        way, if you need to use another pin or change the underlying driver\n        completly, you only need to redo the mapping.\n\n        If you're developing a driver, keep in mind that your driver will not\n        know about this. The other functions will translate the mapped pin to\n        your id before calling your function.\n\n        @arg abstract_pin_id the id that will identify this pin in the\n        other function calls. You can choose what you want.\n\n        @arg physical_pin_id the id returned in the driver.\n            See `AbstractDriver.available_pins`. Setting it to None removes the\n            mapping.", "id": "f10520:c1:m1"}
{"signature": "def set_pin_interpolation(self,<EOL>pin,<EOL>read_min,<EOL>read_max,<EOL>write_min,<EOL>write_max):", "body": "if type(pin) is list:<EOL><INDENT>args = (read_min, read_max, write_min, write_max)<EOL>for p in pin:<EOL><INDENT>self.set_pin_interpolation(p, *args)<EOL><DEDENT>return<EOL><DEDENT>valid_read = (read_min is not None and read_max is not None)<EOL>valid_write = (write_min is not None and write_max is not None)<EOL>if not valid_read and not valid_write:<EOL><INDENT>self._pin_lin.pop(pin, None)<EOL>return<EOL><DEDENT>pin_id = self._pin_mapping.get(pin, None)<EOL>pins = [pin for pin in self.available_pins() if pin_id == pin['<STR_LIT:id>']]<EOL>read = pins[<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>write = pins[<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>valid_read = valid_read and read<EOL>valid_write = valid_write and write<EOL>self._pin_lin[pin] = {<EOL>'<STR_LIT>': (*read, read_min, read_max) if valid_read else None,<EOL>'<STR_LIT>': (write_min, write_max, *write) if valid_write else None<EOL>}<EOL>", "docstring": "Interpolates input and output values for `pin`.\n\n        Changes the output and input of `AbstractDriver.read` and\n        `AbstractDriver.write` functions to use a value in range\n        (`read_min`, `read_max`) or (`write_min`, `write_max`) instead of the\n        values returned by `available_pins` (analog only). The conversion is\n        done using linear interpolation. If `read_min`, `read_max`, `write_min`\n        and `write_max` are all None or don't form valid pairs (like, read_min\n        has a value but read_max is None), the pin is deregistered. If you pass\n        a pair but leave the other with None values, only one direction is\n        registered.\n\n        @arg pin pin id you've set using `AbstractDriver.map_pin`\n        @arg read_min the min value for the linear interpolation of\n             `AbstractDriver.read`.\n        @arg read_max the max value for the linear interpolation of\n             `AbstractDriver.read`.\n        @arg write_min the min value for the linear interpolation of\n             `AbstractDriver.write`.\n        @arg write_max the max value for the linear interpolation of\n             `AbstractDriver.write`.", "id": "f10520:c1:m4"}
{"signature": "def mapped_pins(self):", "body": "return self._pin_mapping<EOL>", "docstring": "Returns a dictionary containing the mapped pins.\n\n        Each key of the dictionary is the ID you set with map_pin, and each\n        value is the driver-specific ID.\n\n        @returns a dictionary of mapped pins", "id": "f10520:c1:m2"}
{"signature": "def read(self, pin):", "body": "if type(pin) is list:<EOL><INDENT>return [self.read(p) for p in pin]<EOL><DEDENT>pin_id = self._pin_mapping.get(pin, None)<EOL>if pin_id:<EOL><INDENT>value = self._read(pin_id)<EOL>lpin = self._pin_lin.get(pin, None)<EOL>if lpin and type(lpin['<STR_LIT>']) is tuple:<EOL><INDENT>read_range = lpin['<STR_LIT>']<EOL>value = self._linear_interpolation(value, *read_range)<EOL><DEDENT>return value<EOL><DEDENT>else:<EOL><INDENT>raise KeyError('<STR_LIT>' % pin)<EOL><DEDENT>", "docstring": "Reads value from pin `pin`.\n\n        Returns the value read from pin `pin`. If it's an analog pin, returns\n        a number in analog.input_range. If it's digital, returns\n        `ahio.LogicValue`.\n\n        If you're developing a driver, implement _read(self, pin)\n\n        @arg pin the pin to read from\n        @returns the value read from the pin\n\n        @throw KeyError if pin isn't mapped.", "id": "f10520:c1:m10"}
{"signature": "def set_pin_type(self, pin, ptype):", "body": "if type(pin) is list:<EOL><INDENT>for p in pin:<EOL><INDENT>self.set_pin_type(p, ptype)<EOL><DEDENT>return<EOL><DEDENT>pin_id = self._pin_mapping.get(pin, None)<EOL>if type(ptype) is not ahio.PortType:<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>elif pin_id:<EOL><INDENT>self._set_pin_type(pin_id, ptype)<EOL><DEDENT>else:<EOL><INDENT>raise KeyError('<STR_LIT>' % pin)<EOL><DEDENT>", "docstring": "Sets pin `pin` to `type`.\n\n        The pin should support the requested mode. Calling this function\n        on a unmapped pin does nothing. Calling it with a unsupported mode\n        throws RuntimeError.\n\n        If you're developing a driver, you should implement\n        _set_pin_type(self, pin, ptype) where `pin` will be one of your\n        internal IDs. If a pin is set to OUTPUT, put it on LOW state.\n\n        @arg pin pin id you've set using `AbstractDriver.map_pin`\n        @arg mode a value from `AbstractDriver.PortType`\n\n        @throw KeyError if pin isn't mapped.\n        @throw RuntimeError if type is not supported by pin.", "id": "f10520:c1:m7"}
{"signature": "def write(self, pin, value, pwm=False):", "body": "if type(pin) is list:<EOL><INDENT>for p in pin:<EOL><INDENT>self.write(p, value, pwm)<EOL><DEDENT>return<EOL><DEDENT>if pwm and type(value) is not int and type(value) is not float:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>pin_id = self._pin_mapping.get(pin, None)<EOL>if pin_id:<EOL><INDENT>lpin = self._pin_lin.get(pin, None)<EOL>if lpin and type(lpin['<STR_LIT>']) is tuple:<EOL><INDENT>write_range = lpin['<STR_LIT>']<EOL>value = self._linear_interpolation(value, *write_range)<EOL><DEDENT>self._write(pin_id, value, pwm)<EOL><DEDENT>else:<EOL><INDENT>raise KeyError('<STR_LIT>' % pin)<EOL><DEDENT>", "docstring": "Sets the output to the given value.\n\n        Sets `pin` output to given value. If the pin is in INPUT mode, do\n        nothing. If it's an analog pin, value should be in write_range.\n        If it's not in the allowed range, it will be clamped. If pin is in\n        digital mode, value can be `ahio.LogicValue` if `pwm` = False, or a\n        number between 0 and 1 if `pwm` = True. If PWM is False, the pin will\n        be set to HIGH or LOW, if `pwm` is True, a PWM wave with the given\n        cycle will be created. If the pin does not support PWM and `pwm` is\n        True, raise RuntimeError. The `pwm` argument should be ignored in case\n        the pin is analog. If value is not valid for the given\n        pwm/analog|digital combination, raise TypeError.\n\n        If you're developing a driver, implement _write(self, pin, value, pwm)\n\n        @arg pin the pin to write to\n        @arg value the value to write on the pin\n        @arg pwm wether the output should be a pwm wave\n\n        @throw RuntimeError if the pin does not support PWM and `pwm` is True.\n        @throw TypeError if value is not valid for this pin's mode and pwm\n               value.\n        @throw KeyError if pin isn't mapped.", "id": "f10520:c1:m9"}
{"signature": "def available_pins(self):", "body": "raise NotImplementedMethod()<EOL>", "docstring": "Returns available pins.\n\n        Returns a list of dictionaries indicating the available pins and it's\n        capabilities. It should follow this format:\n        \\verbatim\n        [ {\n            'id': 1, # some value that represents this pin in your\n                     # implementation.\n                     # prefer numbers and Enums. This value will be used\n                     # in `map_pin(a,p)`\n            'name': 'Pin 1', # a name that can be shown to the user, if needed\n            'analog': {\n                'input': True, # if analog input is available\n                'output': False, # if analog output is available\n                'read_range': (0, 1023), # if input is supported, what is the\n                                         # valid range (both inclusive)\n                'write_range': (0, 5) # if output is supported, what is the\n                                      #valid range (both inclusive)\n            },\n            'digital': {\n                'input': True, # if digital input is available\n                'output': True, # if digital output is available\n                'pwm': True # if pwm generation is available\n            }\n        }]\n        \\endverbatim\n\n        If you're developing a driver, you should override this function.\n\n        @returns a list of dictionaries", "id": "f10520:c1:m0"}
{"signature": "def setup(self, port):", "body": "port = str(port)<EOL>self._serial = serial.Serial(port, <NUM_LIT>, timeout=<NUM_LIT:2>)<EOL>time.sleep(<NUM_LIT:2>)  <EOL>if not self._serial.is_open:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>self._serial.write(b'<STR_LIT>')<EOL>if self._serial.read() != b'<STR_LIT>':<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>ps = [p for p in self.available_pins() if p['<STR_LIT>']['<STR_LIT>']]<EOL>for pin in ps:<EOL><INDENT>self._set_pin_direction(pin['<STR_LIT:id>'], ahio.Direction.Output)<EOL><DEDENT>", "docstring": "Connects to an Arduino UNO on serial port `port`.\n\n        @throw RuntimeError can't connect to Arduino", "id": "f10522:c1:m2"}
{"signature": "def new_driver_object(name):", "body": "driver = __locate_driver_named(name)<EOL>return driver.Driver() if driver else None<EOL>", "docstring": "Instantiates a new object of the named driver.\n\n    The API used by the returned object can be seen in\n    `ahio.abstract_driver.AbstractDriver`\n\n    @returns a Driver object from the required type of None if it's not\n    available", "id": "f10526:m5"}
{"signature": "def driver_info(name):", "body": "driver = __locate_driver_named(name)<EOL>return driver.ahioDriverInfo if driver else None<EOL>", "docstring": "Returns driver metadata.\n\n    Returns a class which static properties contains metadata from the\n    driver, such as name and availability.\n\n    @returns a subclass from `ahio.abstract_driver.AbstractahioDriverInfo` with\n    metadata from the driver.", "id": "f10526:m4"}
{"signature": "def available_drivers():", "body": "global __modules<EOL>global __available<EOL>if type(__modules) is not list:<EOL><INDENT>__modules = list(__modules)<EOL><DEDENT>if not __available:<EOL><INDENT>__available = [d.ahioDriverInfo.NAME<EOL>for d in __modules<EOL>if d.ahioDriverInfo.AVAILABLE]<EOL><DEDENT>return __available<EOL>", "docstring": "Returns a list of available drivers names.", "id": "f10526:m3"}
{"signature": "def setup(self, address, port):", "body": "address = str(address)<EOL>port = int(port)<EOL>self._socket = socket.socket()<EOL>self._socket.connect((address, port))<EOL>self._socket.send(b'<STR_LIT>')<EOL>with self._socket.makefile() as f:<EOL><INDENT>if f.readline().strip() != '<STR_LIT:OK>':<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Connects to server at `address`:`port`.\n\n        Connects to a TCP server listening at `address`:`port` that implements\n        the protocol described in the file \"Generic TCP I:O Protocol.md\"\n\n        @arg address IP or address to connect to.\n        @arg port port to connect to.\n\n        @throw RuntimeError if connection was successiful but protocol isn't\n               supported.\n        @throw any exception thrown by `socket.socket`'s methods.", "id": "f10527:c1:m2"}
{"signature": "def get(self, url, params={}):", "body": "params.update({'<STR_LIT>': self.api_key})<EOL>try:<EOL><INDENT>response = requests.get(self.host + url, params=params)<EOL><DEDENT>except RequestException as e:<EOL><INDENT>response = e.args<EOL><DEDENT>return self.json_parse(response.content)<EOL>", "docstring": "Issues a GET request against the API, properly formatting the params\n\n:param url: a string, the url you are requesting\n:param params: a dict, the key-value of all the paramaters needed\n               in the request\n:returns: a dict parsed of the JSON response", "id": "f10530:c0:m1"}
{"signature": "def post(self, url, params={}, files=None):", "body": "params.update({'<STR_LIT>': self.api_key})<EOL>try:<EOL><INDENT>response = requests.post(self.host + url, data=params, files=files)<EOL>return self.json_parse(response.content)<EOL><DEDENT>except RequestException as e:<EOL><INDENT>return self.json_parse(e.args)<EOL><DEDENT>", "docstring": "Issues a POST request against the API, allows for multipart data uploads\n\n:param url: a string, the url you are requesting\n:param params: a dict, the key-value of all the parameters needed\n               in the request\n:param files: a list, the list of tuples of files\n\n:returns: a dict parsed of the JSON response", "id": "f10530:c0:m2"}
{"signature": "def __init__(self, api_key):", "body": "<EOL>self.ApiResourceMixin.set_api_key(api_key)<EOL>self.products = self.ProductsResource()<EOL>self.quote = self.QuoteResource()<EOL>self.status = self.StatusResource()<EOL>self.webhooks = self.WebhooksResource()<EOL>", "docstring": "Initializes the ShirtsIOClient object, creating the ShirtsIORequest\nobject which deals with all request formatting.\n\n:param api_key: a string, the user specific secret, received\n                     from the /access_token endpoint\n\n:returns: None", "id": "f10531:c0:m0"}
{"signature": "def validate_params(required, optional, params):", "body": "missing_fields = [x for x in required if x not in params]<EOL>if missing_fields:<EOL><INDENT>field_strings = \"<STR_LIT:U+002CU+0020>\".join(missing_fields)<EOL>raise Exception(\"<STR_LIT>\" % field_strings)<EOL><DEDENT>disallowed_fields = [x for x in params if x not in optional and x not in required]<EOL>if disallowed_fields:<EOL><INDENT>field_strings = \"<STR_LIT:U+002CU+0020>\".join(disallowed_fields)<EOL>raise Exception(\"<STR_LIT>\" % field_strings)<EOL><DEDENT>", "docstring": "Helps us validate the parameters for the request\n\n:param valid_options: a list of strings of valid options for the\n                      api request\n:param params: a dict, the key-value store which we really only care about\n               the key which has tells us what the user is using for the\n               API request\n\n:returns: None or throws an exception if the validation fails", "id": "f10532:m0"}
{"signature": "def remove_near_duplicate_relation(triples, threshold=<NUM_LIT>):", "body": "logging.debug(\"<STR_LIT>\")<EOL>_assert_threshold(threshold)<EOL>duplicate_rel_counter = defaultdict(list)<EOL>relations = set()<EOL>for t in triples:<EOL><INDENT>duplicate_rel_counter[t.relation].append(f\"<STR_LIT>\")<EOL>relations.add(t.relation)<EOL><DEDENT>relations = list(relations)<EOL>num_triples = len(triples)<EOL>removal_relation_set = set()<EOL>for rel, values in duplicate_rel_counter.items():<EOL><INDENT>duplicate_rel_counter[rel] = Superminhash(values)<EOL><DEDENT>for i in relations:<EOL><INDENT>for j in relations:<EOL><INDENT>if i == j or i in removal_relation_set or j in removal_relation_set: continue<EOL>close_relations = [i]<EOL>if _set_close_to(duplicate_rel_counter[i], duplicate_rel_counter[j], threshold):<EOL><INDENT>close_relations.append(j)<EOL><DEDENT><DEDENT>if len(close_relations) > <NUM_LIT:1>:<EOL><INDENT>close_relations.pop(np.random.randint(len(close_relations)))<EOL>removal_relation_set |= set(close_relations)<EOL><DEDENT><DEDENT>logging.info(\"<STR_LIT>\".format(len(removal_relation_set), str(removal_relation_set)))<EOL>return list(filterfalse(lambda x: x.relation in removal_relation_set, triples))<EOL>", "docstring": "If entity pairs in a relation is as close as another relations, only keep one relation of such set.", "id": "f10537:m10"}
{"signature": "def build_index_and_mapping(triples):", "body": "ents = bidict()<EOL>rels = bidict()<EOL>ent_id = <NUM_LIT:0><EOL>rel_id = <NUM_LIT:0><EOL>collected = []<EOL>for t in triples:<EOL><INDENT>for e in (t.head, t.tail):<EOL><INDENT>if e not in ents:<EOL><INDENT>ents[e] = ent_id<EOL>ent_id += <NUM_LIT:1><EOL><DEDENT><DEDENT>if t.relation not in rels:<EOL><INDENT>rels[t.relation] = rel_id<EOL>rel_id += <NUM_LIT:1><EOL><DEDENT>collected.append(kgedata.TripleIndex(ents[t.head], rels[t.relation], ents[t.tail]))<EOL><DEDENT>return collected, ents, rels<EOL>", "docstring": "index all triples into indexes and return their mappings", "id": "f10537:m0"}
{"signature": "def read_openke_translation(filename, delimiter='<STR_LIT:\\t>', entity_first=True):", "body": "result = {}<EOL>with open(filename, \"<STR_LIT:r>\") as f:<EOL><INDENT>_ = next(f) <EOL>for line in f:<EOL><INDENT>line_slice = line.rstrip().split(delimiter)<EOL>if not entity_first:<EOL><INDENT>line_slice = list(reversed(line_slice))<EOL><DEDENT>result[line_slice[<NUM_LIT:0>]] = line_slice[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Returns map with entity or relations from plain text.", "id": "f10541:m10"}
{"signature": "def read_translation(filename):", "body": "translation = triple_pb.Translation()<EOL>with open(filename, \"<STR_LIT:rb>\") as f:<EOL><INDENT>translation.ParseFromString(f.read())<EOL><DEDENT>def unwrap_translation_units(units):<EOL><INDENT>for u in units: yield u.element, u.index<EOL><DEDENT>return (list(unwrap_translation_units(translation.entities)),<EOL>list(unwrap_translation_units(translation.relations)))<EOL>", "docstring": "Returns protobuf mapcontainer. Read from translation file.", "id": "f10541:m9"}
{"signature": "def logon(self, username, password):", "body": "if self._token:<EOL><INDENT>self.logoff()<EOL><DEDENT>try:<EOL><INDENT>response = self.__makerequest(<EOL>'<STR_LIT>', email=username, password=password)<EOL><DEDENT>except FogBugzAPIError:<EOL><INDENT>e = sys.exc_info()[<NUM_LIT:1>]<EOL>raise FogBugzLogonError(e)<EOL><DEDENT>self._token = response.token.string<EOL>if type(self._token) == CData:<EOL><INDENT>self._token = self._token.encode('<STR_LIT:utf-8>')<EOL><DEDENT>", "docstring": "Logs the user on to FogBugz.\n\nReturns None for a successful login.", "id": "f10557:c4:m1"}
{"signature": "def token(self, token):", "body": "self._token = token<EOL>", "docstring": "Set the token without actually logging on.  More secure.", "id": "f10557:c4:m3"}
{"signature": "def authenticate(self, request):", "body": "try:<EOL><INDENT>oauth_request = oauth_provider.utils.get_oauth_request(request)<EOL><DEDENT>except oauth.Error as err:<EOL><INDENT>raise exceptions.AuthenticationFailed(err.message)<EOL><DEDENT>if not oauth_request:<EOL><INDENT>return None<EOL><DEDENT>oauth_params = oauth_provider.consts.OAUTH_PARAMETERS_NAMES<EOL>found = any(param for param in oauth_params if param in oauth_request)<EOL>missing = list(param for param in oauth_params if param not in oauth_request)<EOL>if not found:<EOL><INDENT>return None<EOL><DEDENT>if missing:<EOL><INDENT>msg = '<STR_LIT>' % ('<STR_LIT:U+002CU+0020>'.join(missing))<EOL>raise exceptions.AuthenticationFailed(msg)<EOL><DEDENT>if not self.check_nonce(request, oauth_request):<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise exceptions.AuthenticationFailed(msg)<EOL><DEDENT>try:<EOL><INDENT>consumer_key = oauth_request.get_parameter('<STR_LIT>')<EOL>consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key)<EOL><DEDENT>except oauth_provider.store.InvalidConsumerError:<EOL><INDENT>msg = '<STR_LIT>' % oauth_request.get_parameter('<STR_LIT>')<EOL>raise exceptions.AuthenticationFailed(msg)<EOL><DEDENT>if consumer.status != oauth_provider.consts.ACCEPTED:<EOL><INDENT>msg = '<STR_LIT>' % consumer.get_status_display()<EOL>raise exceptions.AuthenticationFailed(msg)<EOL><DEDENT>try:<EOL><INDENT>token_param = oauth_request.get_parameter('<STR_LIT>')<EOL>token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param)<EOL><DEDENT>except oauth_provider.store.InvalidTokenError:<EOL><INDENT>msg = '<STR_LIT>' % oauth_request.get_parameter('<STR_LIT>')<EOL>raise exceptions.AuthenticationFailed(msg)<EOL><DEDENT>try:<EOL><INDENT>self.validate_token(request, consumer, token)<EOL><DEDENT>except oauth.Error as err:<EOL><INDENT>raise exceptions.AuthenticationFailed(err.message)<EOL><DEDENT>user = token.user<EOL>if not user.is_active:<EOL><INDENT>msg = '<STR_LIT>' % user.username<EOL>raise exceptions.AuthenticationFailed(msg)<EOL><DEDENT>return (token.user, token)<EOL>", "docstring": "Returns two-tuple of (user, token) if authentication succeeds,\nor None otherwise.", "id": "f10563:c0:m1"}
{"signature": "def get_packages(package):", "body": "return [dirpath<EOL>for dirpath, dirnames, filenames in os.walk(package)<EOL>if os.path.exists(os.path.join(dirpath, '<STR_LIT>'))]<EOL>", "docstring": "Return root package and all sub-packages.", "id": "f10569:m1"}
{"signature": "def _compute_follow(self):", "body": "self._follow[self.start_symbol].add(END_OF_INPUT)<EOL>while True:<EOL><INDENT>changed = False<EOL>for nonterminal, productions in self.nonterminals.items():<EOL><INDENT>for production in productions:<EOL><INDENT>for i, symbol in enumerate(production.rhs):<EOL><INDENT>if symbol not in self.nonterminals:<EOL><INDENT>continue<EOL><DEDENT>first = self.first(production.rhs[i + <NUM_LIT:1>:])<EOL>new_follow = first - set([EPSILON])<EOL>if EPSILON in first or i == (len(production.rhs) - <NUM_LIT:1>):<EOL><INDENT>new_follow |= self._follow[nonterminal]<EOL><DEDENT>if new_follow - self._follow[symbol]:<EOL><INDENT>self._follow[symbol] |= new_follow<EOL>changed = True<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if not changed:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Computes the FOLLOW set for every non-terminal in the grammar.\n\n        Tenatively based on _compute_follow in PLY.", "id": "f10574:c2:m3"}
{"signature": "def closure(self, rules):", "body": "closure = set()<EOL>todo = set(rules)<EOL>while todo:<EOL><INDENT>rule = todo.pop()<EOL>closure.add(rule)<EOL>if rule.at_end:<EOL><INDENT>continue<EOL><DEDENT>symbol = rule.rhs[rule.pos]<EOL>for production in self.nonterminals[symbol]:<EOL><INDENT>for first in self.first(rule.rest):<EOL><INDENT>if EPSILON in production.rhs:<EOL><INDENT>new_rule = DottedRule(production, <NUM_LIT:1>, first)<EOL><DEDENT>else:<EOL><INDENT>new_rule = DottedRule(production, <NUM_LIT:0>, first)<EOL><DEDENT>if new_rule not in closure:<EOL><INDENT>todo.add(new_rule)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return frozenset(closure)<EOL>", "docstring": "Fills out the entire closure based on some initial dotted rules.\n\n        Args:\n            rules - an iterable of DottedRules\n\n        Returns: frozenset of DottedRules", "id": "f10574:c2:m6"}
{"signature": "def move_dot(self):", "body": "return self.__class__(self.production, self.pos + <NUM_LIT:1>, self.lookahead)<EOL>", "docstring": "Returns the DottedRule that results from moving the dot.", "id": "f10574:c1:m7"}
{"signature": "def initial_closure(self):", "body": "first_rule = DottedRule(self.start, <NUM_LIT:0>, END_OF_INPUT)<EOL>return self.closure([first_rule])<EOL>", "docstring": "Computes the initial closure using the START_foo production.", "id": "f10574:c2:m4"}
{"signature": "@staticmethod<EOL><INDENT>def compute_precedence(terminals, productions, precedence_levels):<DEDENT>", "body": "precedence = collections.OrderedDict()<EOL>for terminal in terminals:<EOL><INDENT>precedence[terminal] = DEFAULT_PREC<EOL><DEDENT>level_precs = range(len(precedence_levels), <NUM_LIT:0>, -<NUM_LIT:1>)<EOL>for i, level in zip(level_precs, precedence_levels):<EOL><INDENT>assoc = level[<NUM_LIT:0>]<EOL>for symbol in level[<NUM_LIT:1>:]:<EOL><INDENT>precedence[symbol] = (assoc, i)<EOL><DEDENT><DEDENT>for production, prec_symbol in productions:<EOL><INDENT>if prec_symbol is None:<EOL><INDENT>prod_terminals = [symbol for symbol in production.rhs<EOL>if symbol in terminals] or [None]<EOL>precedence[production] = precedence.get(prod_terminals[-<NUM_LIT:1>],<EOL>DEFAULT_PREC)<EOL><DEDENT>else:<EOL><INDENT>precedence[production] = precedence.get(prec_symbol,<EOL>DEFAULT_PREC)<EOL><DEDENT><DEDENT>return precedence<EOL>", "docstring": "Computes the precedence of terminal and production.\n\n        The precedence of a terminal is it's level in the PRECEDENCE tuple. For\n        a production, the precedence is the right-most terminal (if it exists).\n        The default precedence is DEFAULT_PREC - (LEFT, 0).\n\n        Returns:\n            precedence - dict[terminal | production] = (assoc, level)", "id": "f10580:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def make_tables(grammar, precedence):<DEDENT>", "body": "ACTION = {}<EOL>GOTO = {}<EOL>labels = {}<EOL>def get_label(closure):<EOL><INDENT>if closure not in labels:<EOL><INDENT>labels[closure] = len(labels)<EOL><DEDENT>return labels[closure]<EOL><DEDENT>def resolve_shift_reduce(lookahead, s_action, r_action):<EOL><INDENT>s_assoc, s_level = precedence[lookahead]<EOL>r_assoc, r_level = precedence[r_action[<NUM_LIT:1>]]<EOL>if s_level < r_level:<EOL><INDENT>return r_action<EOL><DEDENT>elif s_level == r_level and r_assoc == LEFT:<EOL><INDENT>return r_action<EOL><DEDENT>else:<EOL><INDENT>return s_action<EOL><DEDENT><DEDENT>initial, closures, goto = grammar.closures()<EOL>for closure in closures:<EOL><INDENT>label = get_label(closure)<EOL>for rule in closure:<EOL><INDENT>new_action, lookahead = None, rule.lookahead<EOL>if not rule.at_end:<EOL><INDENT>symbol = rule.rhs[rule.pos]<EOL>is_terminal = symbol in grammar.terminals<EOL>has_goto = symbol in goto[closure]<EOL>if is_terminal and has_goto:<EOL><INDENT>next_state = get_label(goto[closure][symbol])<EOL>new_action, lookahead = ('<STR_LIT>', next_state), symbol<EOL><DEDENT><DEDENT>elif rule.production == grammar.start and rule.at_end:<EOL><INDENT>new_action = ('<STR_LIT>',)<EOL><DEDENT>elif rule.at_end:<EOL><INDENT>new_action = ('<STR_LIT>', rule.production)<EOL><DEDENT>if new_action is None:<EOL><INDENT>continue<EOL><DEDENT>prev_action = ACTION.get((label, lookahead))<EOL>if prev_action is None or prev_action == new_action:<EOL><INDENT>ACTION[label, lookahead] = new_action<EOL><DEDENT>else:<EOL><INDENT>types = (prev_action[<NUM_LIT:0>], new_action[<NUM_LIT:0>])<EOL>if types == ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>chosen = resolve_shift_reduce(lookahead,<EOL>prev_action,<EOL>new_action)<EOL><DEDENT>elif types == ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>chosen = resolve_shift_reduce(lookahead,<EOL>new_action,<EOL>prev_action)<EOL><DEDENT>else:<EOL><INDENT>raise TableConflictError(prev_action, new_action)<EOL><DEDENT>ACTION[label, lookahead] = chosen<EOL><DEDENT><DEDENT>for symbol in grammar.nonterminals:<EOL><INDENT>if symbol in goto[closure]:<EOL><INDENT>GOTO[label, symbol] = get_label(goto[closure][symbol])<EOL><DEDENT><DEDENT><DEDENT>return get_label(initial), ACTION, GOTO<EOL>", "docstring": "Generates the ACTION and GOTO tables for the grammar.\n\n        Returns:\n            action - dict[state][lookahead] = (action, ...)\n            goto - dict[state][just_reduced] = new_state", "id": "f10580:c0:m2"}
{"signature": "def document_func_view(serializer_class=None,<EOL>response_serializer_class=None,<EOL>filter_backends=None,<EOL>permission_classes=None,<EOL>authentication_classes=None,<EOL>doc_format_args=list(),<EOL>doc_format_kwargs=dict()):", "body": "def decorator(func):<EOL><INDENT>if serializer_class:<EOL><INDENT>func.cls.serializer_class = func.view_class.serializer_class = serializer_class<EOL><DEDENT>if response_serializer_class:<EOL><INDENT>func.cls.response_serializer_class = func.view_class.response_serializer_class = response_serializer_class<EOL><DEDENT>if filter_backends:<EOL><INDENT>func.cls.filter_backends = func.view_class.filter_backends = filter_backends<EOL><DEDENT>if permission_classes:<EOL><INDENT>func.cls.permission_classes = func.view_class.permission_classes = permission_classes<EOL><DEDENT>if authentication_classes:<EOL><INDENT>func.cls.authentication_classes = func.view_class.authentication_classes = authentication_classes<EOL><DEDENT>if doc_format_args or doc_format_kwargs:<EOL><INDENT>func.cls.__doc__ = func.view_class.__doc__ = getdoc(func).format(*doc_format_args, **doc_format_kwargs)<EOL><DEDENT>return func<EOL><DEDENT>return decorator<EOL>", "docstring": "Decorator to make functional view documentable via drf-autodocs", "id": "f10612:m0"}
{"signature": "def remove(path):", "body": "try:<EOL><INDENT>if isfile(path):<EOL><INDENT>os.remove(path)<EOL><DEDENT>else:<EOL><INDENT>shutil.rmtree(path, ignore_errors=True)<EOL><DEDENT><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Remove file or dir if exist", "id": "f10614:m1"}
{"signature": "def copy(src, dst):", "body": "(szip, dzip) = (src.endswith(\"<STR_LIT>\"), dst.endswith(\"<STR_LIT>\"))<EOL>logging.info(\"<STR_LIT>\"%(src, dst))<EOL>if szip and dzip:<EOL><INDENT>shutil.copy2(src, dst)<EOL><DEDENT>elif szip:<EOL><INDENT>with zipfile.ZipFile(src, mode='<STR_LIT:r>') as z:<EOL><INDENT>tmpdir = tempfile.mkdtemp()<EOL>try:<EOL><INDENT>z.extractall(tmpdir)<EOL>if len(z.namelist()) != <NUM_LIT:1>:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"\"<STR_LIT>\"%src)<EOL><DEDENT>tmpfile = join(tmpdir,z.namelist()[<NUM_LIT:0>])<EOL>try:<EOL><INDENT>os.remove(dst)<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>shutil.move(tmpfile, dst)<EOL><DEDENT>finally:<EOL><INDENT>shutil.rmtree(tmpdir, ignore_errors=True)<EOL><DEDENT><DEDENT><DEDENT>elif dzip:<EOL><INDENT>with zipfile.ZipFile(dst, mode='<STR_LIT:w>', compression=ZIP_DEFLATED) as z:<EOL><INDENT>z.write(src, arcname=basename(src))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>shutil.copy2(src, dst)<EOL><DEDENT>", "docstring": "File copy that support compress and decompress of zip files", "id": "f10614:m0"}
{"signature": "def cmd_init_push_to_cloud(args):", "body": "(lcat, ccat) = (args.local_catalog, args.cloud_catalog)<EOL>logging.info(\"<STR_LIT>\"%(lcat, ccat))<EOL>if not isfile(lcat):<EOL><INDENT>args.error(\"<STR_LIT>\"%lcat)<EOL><DEDENT>if isfile(ccat):<EOL><INDENT>args.error(\"<STR_LIT>\"%ccat)<EOL><DEDENT>(lmeta, cmeta) = (\"<STR_LIT>\"%lcat, \"<STR_LIT>\"%ccat)<EOL>if isfile(lmeta):<EOL><INDENT>args.error(\"<STR_LIT>\"%lmeta)<EOL><DEDENT>if isfile(cmeta):<EOL><INDENT>args.error(\"<STR_LIT>\"%cmeta)<EOL><DEDENT>logging.info(\"<STR_LIT>\"%(lcat))<EOL>if not lock_file(lcat):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"%lcat)<EOL><DEDENT>util.copy(lcat, ccat)<EOL>mfile = MetaFile(lmeta)<EOL>utcnow = datetime.utcnow().strftime(DATETIME_FORMAT)[:-<NUM_LIT:4>]<EOL>mfile['<STR_LIT>']['<STR_LIT>'] = hashsum(lcat)<EOL>mfile['<STR_LIT>']['<STR_LIT>'] = utcnow<EOL>mfile['<STR_LIT>']['<STR_LIT:filename>'] = lcat<EOL>mfile['<STR_LIT>']['<STR_LIT:filename>'] = ccat<EOL>mfile['<STR_LIT>']['<STR_LIT>'] = hashsum(lcat)<EOL>mfile['<STR_LIT>']['<STR_LIT>'] = utcnow<EOL>mfile.flush()<EOL>mfile = MetaFile(cmeta)<EOL>mfile['<STR_LIT>']['<STR_LIT>'] = True<EOL>mfile['<STR_LIT>']['<STR_LIT>'] = hashsum(lcat)<EOL>mfile['<STR_LIT>']['<STR_LIT>'] = utcnow<EOL>mfile['<STR_LIT>']['<STR_LIT:filename>'] = basename(ccat)<EOL>mfile.flush()<EOL>if not args.no_smart_previews:<EOL><INDENT>copy_smart_previews(lcat, ccat, local2cloud=True)<EOL><DEDENT>logging.info(\"<STR_LIT>\"%(lcat))<EOL>unlock_file(lcat)<EOL>logging.info(\"<STR_LIT>\")<EOL>", "docstring": "Initiate the local catalog and push it the cloud", "id": "f10617:m4"}
{"signature": "def copy_smart_previews(local_catalog, cloud_catalog, local2cloud=True):", "body": "lcat_noext = local_catalog[<NUM_LIT:0>:local_catalog.rfind(\"<STR_LIT>\")]<EOL>ccat_noext = cloud_catalog[<NUM_LIT:0>:cloud_catalog.rfind(\"<STR_LIT>\")]<EOL>lsmart = join(dirname(local_catalog),\"<STR_LIT>\"%basename(lcat_noext))<EOL>csmart = join(dirname(cloud_catalog),\"<STR_LIT>\"%basename(ccat_noext))<EOL>if local2cloud and os.path.isdir(lsmart):<EOL><INDENT>logging.info(\"<STR_LIT>\"%(lsmart, csmart))<EOL>distutils.dir_util.copy_tree(lsmart,csmart, update=<NUM_LIT:1>)<EOL><DEDENT>elif os.path.isdir(csmart):<EOL><INDENT>logging.info(\"<STR_LIT>\"%(csmart, lsmart))<EOL>distutils.dir_util.copy_tree(csmart,lsmart, update=<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Copy Smart Previews from local to cloud or\n       vica versa when 'local2cloud==False'\n       NB: nothing happens if source dir doesn't exist", "id": "f10617:m2"}
{"signature": "def write(args):", "body": "logging.info(\"<STR_LIT>\"%args.config_file)<EOL>if args.config_file is None:<EOL><INDENT>return<EOL><DEDENT>config = cparser.ConfigParser()<EOL>config.add_section(\"<STR_LIT>\")<EOL>for p in [x for x in dir(args) if not x.startswith(\"<STR_LIT:_>\")]:<EOL><INDENT>if p in IGNORE_ARGS:<EOL><INDENT>continue<EOL><DEDENT>value = getattr(args, p)<EOL>if value is not None:<EOL><INDENT>config.set('<STR_LIT>', p, str(value))<EOL><DEDENT><DEDENT>with open(args.config_file, '<STR_LIT:w>') as f:<EOL><INDENT>config.write(f)<EOL><DEDENT>", "docstring": "Writing the configure file with the attributes in 'args", "id": "f10619:m1"}
{"signature": "def read(args):", "body": "if args.config_file is None or not isfile(args.config_file):<EOL><INDENT>return<EOL><DEDENT>logging.info(\"<STR_LIT>\"%args.config_file)<EOL>config = cparser.ConfigParser()<EOL>config.read(args.config_file)<EOL>if not config.has_section('<STR_LIT>'):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>for (name, value) in config.items('<STR_LIT>'):<EOL><INDENT>if value == \"<STR_LIT:True>\":<EOL><INDENT>value = True<EOL><DEDENT>elif value == \"<STR_LIT:False>\":<EOL><INDENT>value = False<EOL><DEDENT>if getattr(args, name) is None:<EOL><INDENT>setattr(args, name, value)<EOL><DEDENT><DEDENT>", "docstring": "Reading the configure file and adds non-existing attributes to 'args", "id": "f10619:m0"}
{"signature": "@utils.retry_loop(<NUM_LIT:3>)<EOL><INDENT>def update_group_states_for_vifs(self, vifs, ack):<DEDENT>", "body": "vif_keys = [self.vif_key(vif.device_id, vif.mac_address)<EOL>for vif in vifs]<EOL>self.set_fields(vif_keys, SECURITY_GROUP_ACK, ack)<EOL>", "docstring": "Updates security groups by setting the ack field", "id": "f10622:c0:m8"}
{"signature": "def serialize_rules(self, rules):", "body": "<EOL>serialized = []<EOL>for rule in rules:<EOL><INDENT>direction = rule[\"<STR_LIT>\"]<EOL>source = '<STR_LIT>'<EOL>destination = '<STR_LIT>'<EOL>if rule.get(\"<STR_LIT>\"):<EOL><INDENT>prefix = rule[\"<STR_LIT>\"]<EOL>if direction == \"<STR_LIT>\":<EOL><INDENT>source = self._convert_remote_network(prefix)<EOL><DEDENT>else:<EOL><INDENT>if (Capabilities.EGRESS not in<EOL>CONF.QUARK.environment_capabilities):<EOL><INDENT>raise q_exc.EgressSecurityGroupRulesNotEnabled()<EOL><DEDENT>else:<EOL><INDENT>destination = self._convert_remote_network(prefix)<EOL><DEDENT><DEDENT><DEDENT>optional_fields = {}<EOL>protocol_map = protocols.PROTOCOL_MAP[rule[\"<STR_LIT>\"]]<EOL>if rule[\"<STR_LIT>\"] == protocol_map[\"<STR_LIT>\"]:<EOL><INDENT>optional_fields[\"<STR_LIT>\"] = rule[\"<STR_LIT>\"]<EOL>optional_fields[\"<STR_LIT>\"] = rule[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>optional_fields[\"<STR_LIT>\"] = rule[\"<STR_LIT>\"]<EOL>optional_fields[\"<STR_LIT>\"] = rule[\"<STR_LIT>\"]<EOL><DEDENT>payload = {\"<STR_LIT>\": rule[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": rule[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": source,<EOL>\"<STR_LIT>\": destination,<EOL>\"<STR_LIT:action>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": direction}<EOL>payload.update(optional_fields)<EOL>serialized.append(payload)<EOL><DEDENT>return serialized<EOL>", "docstring": "Creates a payload for the redis server.", "id": "f10622:c0:m1"}
{"signature": "def filter_factory(global_conf, **local_conf):", "body": "conf = global_conf.copy()<EOL>conf.update(local_conf)<EOL>def wrapper(app):<EOL><INDENT>return ResponseAsyncIdAdder(app, conf)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Returns a WSGI filter app for use with paste.deploy.", "id": "f10626:m0"}
{"signature": "def make_case2(context):", "body": "query = context.session.query(models.IPAddress)<EOL>period_start, period_end = billing.calc_periods()<EOL>ip_list = billing.build_full_day_ips(query, period_start, period_end)<EOL>import random<EOL>ind = random.randint(<NUM_LIT:0>, len(ip_list) - <NUM_LIT:1>)<EOL>address = ip_list[ind]<EOL>address.allocated_at = datetime.datetime.utcnow() -datetime.timedelta(days=<NUM_LIT:1>)<EOL>context.session.add(address)<EOL>context.session.flush()<EOL>", "docstring": "This is a helper method for testing.\n\n    When run with the current context, it will create a case 2 entries\n    in the database. See top of file for what case 2 is.", "id": "f10628:m0"}
{"signature": "def serve_rpc(self):", "body": "if cfg.CONF.QUARK_ASYNC.rpc_workers < <NUM_LIT:1>:<EOL><INDENT>cfg.CONF.set_override('<STR_LIT>', <NUM_LIT:1>, \"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>rpc = service.RpcWorker(self.plugins)<EOL>launcher = common_service.ProcessLauncher(CONF, wait_interval=<NUM_LIT:1.0>)<EOL>launcher.launch_service(rpc, workers=CONF.QUARK_ASYNC.rpc_workers)<EOL>return launcher<EOL><DEDENT>except Exception:<EOL><INDENT>with excutils.save_and_reraise_exception():<EOL><INDENT>LOG.exception(_LE('<STR_LIT>'<EOL>'<STR_LIT>'))<EOL><DEDENT><DEDENT>", "docstring": "Launches configured # of workers per loaded plugin.", "id": "f10629:c0:m4"}
{"signature": "def run(self):", "body": "self.start_api_and_rpc_workers()<EOL>", "docstring": "Start of async worker process.", "id": "f10629:c0:m6"}
{"signature": "def _populate_segment_allocation_range(self, sa_range):", "body": "<EOL>id_range = xrange(sa_range['<STR_LIT>'],<EOL>sa_range['<STR_LIT>'] + <NUM_LIT:1>)<EOL>sa_dicts = []<EOL>total = <NUM_LIT:0><EOL>for i in id_range:<EOL><INDENT>sa_dicts.append({<EOL>'<STR_LIT>': sa_range['<STR_LIT>'],<EOL>'<STR_LIT>': sa_range['<STR_LIT>'],<EOL>'<STR_LIT:id>': i,<EOL>'<STR_LIT>': sa_range['<STR_LIT:id>'],<EOL>'<STR_LIT>': True<EOL>})<EOL>total = total + <NUM_LIT:1><EOL><DEDENT>db_api.segment_allocation_range_populate_bulk(self.context, sa_dicts)<EOL>self.context.session.flush()<EOL>allocs = db_api.segment_allocation_find(<EOL>self.context, segment_allocation_range_id=sa_range['<STR_LIT:id>']).all()<EOL>self.assertEqual(len(allocs), len(id_range))<EOL>", "docstring": "Populate a given segment range.", "id": "f10649:c0:m2"}
{"signature": "def _sa_range_to_dict(self, sa_range, allocations=None):", "body": "size = (sa_range['<STR_LIT>'] + <NUM_LIT:1>) - sa_range['<STR_LIT>']<EOL>sa_range_dict = dict(sa_range)<EOL>sa_range_dict.pop('<STR_LIT>')<EOL>sa_range_dict['<STR_LIT:size>'] = size<EOL>if allocations is not None:<EOL><INDENT>sa_range_dict['<STR_LIT>'] = size - allocations<EOL><DEDENT>return sa_range_dict<EOL>", "docstring": "Helper to turn a model into a dict for assertions.", "id": "f10649:c0:m5"}
{"signature": "def _assert_tags(self, model, tags=None):", "body": "tags = tags if tags else []<EOL>expected_tags = (self.existing_tags + tags)<EOL>self.assertEqual(sorted(model.tags),<EOL>sorted(expected_tags))<EOL>", "docstring": "Assert given tags and already existing tags are present.", "id": "f10674:c3:m2"}
{"signature": "def get_columns(table):", "body": "inspector = get_inspector()<EOL>return inspector.get_columns(table)<EOL>", "docstring": "Returns list of columns for given table.", "id": "f10710:m2"}
{"signature": "def sg_gather_associated_ports(context, group):", "body": "if not group:<EOL><INDENT>return None<EOL><DEDENT>if not hasattr(group, \"<STR_LIT>\") or len(group.ports) <= <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>return group.ports<EOL>", "docstring": "Gather all ports associated to security group.\n\n    Returns:\n    * list, or None", "id": "f10742:m62"}
{"signature": "def segment_allocation_range_populate_bulk(context, sa_dicts):", "body": "context.session.bulk_insert_mappings(<EOL>models.SegmentAllocation,<EOL>sa_dicts<EOL>)<EOL>", "docstring": "Bulk-insert deallocated segment allocations.\n\n    NOTE(morgabra): This is quite performant when populating large ranges,\n    but you don't get any ORM conveniences or protections here.", "id": "f10742:m91"}
{"signature": "def security_group_rule_update(context, rule, **kwargs):", "body": "rule.update(kwargs)<EOL>context.session.add(rule)<EOL>return rule<EOL>", "docstring": "Updates a security group rule.\n\n    NOTE(alexm) this is non-standard functionality.", "id": "f10742:m72"}
{"signature": "def is_tag(self, tag):", "body": "return tag[<NUM_LIT:0>:len(self.get_prefix())] == self.get_prefix()<EOL>", "docstring": "Is a given tag this type?", "id": "f10743:c1:m9"}
{"signature": "def validate(self, value):", "body": "try:<EOL><INDENT>vlan_id_int = int(value)<EOL>assert vlan_id_int >= self.MIN_VLAN_ID<EOL>assert vlan_id_int <= self.MAX_VLAN_ID<EOL><DEDENT>except Exception:<EOL><INDENT>msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % {'<STR_LIT>': value,<EOL>'<STR_LIT>': self.MIN_VLAN_ID,<EOL>'<STR_LIT>': self.MAX_VLAN_ID})<EOL>raise TagValidationError(value, msg)<EOL><DEDENT>return True<EOL>", "docstring": "Validates a VLAN ID.\n\n        :param value: The VLAN ID to validate against.\n        :raises TagValidationError: Raised if the VLAN ID is invalid.", "id": "f10743:c2:m0"}
{"signature": "def pop(self, model):", "body": "tags = self._pop(model)<EOL>if tags:<EOL><INDENT>for tag in tags:<EOL><INDENT>value = self.deserialize(tag)<EOL>try:<EOL><INDENT>self.validate(value)<EOL>return value<EOL><DEDENT>except TagValidationError:<EOL><INDENT>continue<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Pop all matching tags off the port, return a valid one.", "id": "f10743:c1:m8"}
{"signature": "def set(self, model, value):", "body": "self.validate(value)<EOL>self._pop(model)<EOL>value = self.serialize(value)<EOL>model.tags.append(value)<EOL>", "docstring": "Set tag on model object.", "id": "f10743:c1:m5"}
{"signature": "def update_ports_for_sg(self, context, portid, jobid):", "body": "port = db_api.port_find(context, id=portid, scope=db_api.ONE)<EOL>if not port:<EOL><INDENT>LOG.warning(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>net_driver = port_api._get_net_driver(port.network, port=port)<EOL>base_net_driver = port_api._get_net_driver(port.network)<EOL>sg_list = [sg for sg in port.security_groups]<EOL>success = False<EOL>error = None<EOL>retries = <NUM_LIT:3><EOL>retry_delay = <NUM_LIT:2><EOL>for retry in xrange(retries):<EOL><INDENT>try:<EOL><INDENT>net_driver.update_port(context, port_id=port[\"<STR_LIT>\"],<EOL>mac_address=port[\"<STR_LIT>\"],<EOL>device_id=port[\"<STR_LIT>\"],<EOL>base_net_driver=base_net_driver,<EOL>security_groups=sg_list)<EOL>success = True<EOL>error = None<EOL>break<EOL><DEDENT>except Exception as error:<EOL><INDENT>LOG.warning(\"<STR_LIT>\")<EOL>time.sleep(retry_delay)<EOL><DEDENT><DEDENT>status_str = \"<STR_LIT>\"<EOL>if not success:<EOL><INDENT>status_str = \"<STR_LIT>\" % (<EOL>portid, retries, error)<EOL><DEDENT>update_body = dict(completed=True, status=status_str)<EOL>update_body = dict(job=update_body)<EOL>job_api.update_job(context.elevated(), jobid, update_body)<EOL>", "docstring": "Updates the ports through redis.", "id": "f10744:c6:m0"}
{"signature": "def start_rpc_listeners(self):", "body": "self._setup_rpc()<EOL>if not self.endpoints:<EOL><INDENT>return []<EOL><DEDENT>self.conn = n_rpc.create_connection()<EOL>self.conn.create_consumer(self.topic, self.endpoints,<EOL>fanout=False)<EOL>return self.conn.consume_in_threads()<EOL>", "docstring": "Configure all listeners here", "id": "f10746:c0:m2"}
{"signature": "def build_payload(ipaddress,<EOL>event_type,<EOL>event_time=None,<EOL>start_time=None,<EOL>end_time=None):", "body": "<EOL>payload = {<EOL>'<STR_LIT>': str(event_type),<EOL>'<STR_LIT>': str(ipaddress.used_by_tenant_id),<EOL>'<STR_LIT>': str(ipaddress.address_readable),<EOL>'<STR_LIT>': int(ipaddress.version),<EOL>'<STR_LIT>': str(ipaddress.address_type),<EOL>'<STR_LIT:id>': str(ipaddress.id)<EOL>}<EOL>if event_type == IP_EXISTS:<EOL><INDENT>if start_time is None or end_time is None:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>.format(event_type))<EOL><DEDENT>payload.update({<EOL>'<STR_LIT>': str(convert_timestamp(start_time)),<EOL>'<STR_LIT>': str(convert_timestamp(end_time))<EOL>})<EOL><DEDENT>elif event_type in [IP_ADD, IP_DEL, IP_ASSOC, IP_DISASSOC]:<EOL><INDENT>if event_time is None:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>.format(event_type))<EOL><DEDENT>payload.update({<EOL>'<STR_LIT>': str(convert_timestamp(event_time)),<EOL>'<STR_LIT>': str(ipaddress.subnet_id),<EOL>'<STR_LIT>': str(ipaddress.network_id),<EOL>'<STR_LIT>': True if ipaddress.network_id == PUBLIC_NETWORK_ID<EOL>else False,<EOL>})<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(event_type))<EOL><DEDENT>return payload<EOL>", "docstring": "Method builds a payload out of the passed arguments.\n\n    Parameters:\n        `ipaddress`: the models.IPAddress object\n        `event_type`: USAGE,CREATE,DELETE,SUSPEND,or UNSUSPEND\n        `start_time`: startTime for cloudfeeds\n        `end_time`: endTime for cloudfeeds\n    Returns a dictionary suitable to notify billing.\n    Message types mapping to cloud feeds for references:\n        ip.exists       - USAGE\n        ip.add          - CREATE\n        ip.delete       - DELETE\n        ip.associate    - UP\n        ip.disassociate  - DOWN\n    Refer to: http://rax.io/cf-api for more details.", "id": "f10747:m2"}
{"signature": "def convert_timestamp(ts):", "body": "return ts.replace(microsecond=<NUM_LIT:0>).isoformat() + '<STR_LIT>'<EOL>", "docstring": "Converts the timestamp to a format suitable for Billing.\n\n    Examples of a good timestamp for startTime, endTime, and eventTime:\n        '2016-05-20T00:00:00Z'\n    We must drop microseconds so that Yagi does not get upset.\n    Note the trailing 'Z'. Python does not add the 'Z' so we tack it on\n    ourselves.", "id": "f10747:m7"}
{"signature": "def build_full_day_ips(query, period_start, period_end):", "body": "<EOL>ip_list = query.filter(models.IPAddress.version == <NUM_LIT:4>).filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).filter(models.IPAddress.used_by_tenant_id is not None).filter(models.IPAddress.allocated_at != null()).filter(models.IPAddress.allocated_at < period_start).filter(or_(models.IPAddress._deallocated is False,<EOL>models.IPAddress.deallocated_at == null(),<EOL>models.IPAddress.deallocated_at >= period_end)).all()<EOL>return ip_list<EOL>", "docstring": "Method to build an IP list for the case 1\n\n    when the IP was allocated before the period start\n    and is still allocated after the period end.\n    This method only looks at public IPv4 addresses.", "id": "f10747:m3"}
{"signature": "def get_instances(self, session):", "body": "LOG.debug(\"<STR_LIT>\")<EOL>recs = session.xenapi.VM.get_all_records()<EOL>is_inst = lambda r: (r['<STR_LIT>'].lower() == '<STR_LIT>' and<EOL>not r['<STR_LIT>'] and<EOL>not r['<STR_LIT>'] and<EOL>('<STR_LIT>' in r['<STR_LIT>'] or<EOL>r['<STR_LIT>'].startswith('<STR_LIT>')))<EOL>instances = dict()<EOL>for vm_ref, rec in recs.iteritems():<EOL><INDENT>if not is_inst(rec):<EOL><INDENT>continue<EOL><DEDENT>instances[vm_ref] = VM(ref=vm_ref,<EOL>uuid=rec[\"<STR_LIT>\"][\"<STR_LIT>\"],<EOL>vifs=rec[\"<STR_LIT>\"],<EOL>dom_id=rec[\"<STR_LIT>\"])<EOL><DEDENT>return instances<EOL>", "docstring": "Returns a dict of `VM OpaqueRef` (str) -> `xapi.VM`.", "id": "f10748:c1:m3"}
{"signature": "def update_interfaces(self, added_sg, updated_sg, removed_sg):", "body": "if not (added_sg or updated_sg or removed_sg):<EOL><INDENT>return<EOL><DEDENT>with self.sessioned() as session:<EOL><INDENT>self._set_security_groups(session, added_sg)<EOL>self._unset_security_groups(session, removed_sg)<EOL>combined = added_sg + updated_sg + removed_sg<EOL>self._refresh_interfaces(session, combined)<EOL><DEDENT>", "docstring": "Handles changes to interfaces' security groups\n\n        Calls refresh_interfaces on argument VIFs. Set security groups on\n        added_sg's VIFs. Unsets security groups on removed_sg's VIFs.", "id": "f10748:c1:m8"}
{"signature": "def __init__(self, device_id, record, ref):", "body": "self.device_id = device_id<EOL>self.record = record<EOL>self.ref = ref<EOL>self.success = False<EOL>", "docstring": "Constructs VIF\n\n        `device_id` and `mac_address` should be strings if they will later be\n        compared to decoded VIF instances (via from_string).\n\n        `ref` is the OpaqueRef string for the vif as returned from xenapi.", "id": "f10748:c0:m0"}
{"signature": "def run():", "body": "groups_client = sg_cli.SecurityGroupsClient()<EOL>xapi_client = xapi.XapiClient()<EOL>interfaces = set()<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>interfaces = xapi_client.get_interfaces()<EOL><DEDENT>except Exception:<EOL><INDENT>LOG.exception(\"<STR_LIT>\")<EOL>_sleep()<EOL>continue<EOL><DEDENT>try:<EOL><INDENT>sg_states = groups_client.get_security_group_states(interfaces)<EOL>new_sg, updated_sg, removed_sg = partition_vifs(xapi_client,<EOL>interfaces,<EOL>sg_states)<EOL>xapi_client.update_interfaces(new_sg, updated_sg, removed_sg)<EOL>groups_to_ack = [v for v in new_sg + updated_sg if v.success]<EOL>sg_sts_curr = groups_client.get_security_group_states(interfaces)<EOL>groups_to_ack = get_groups_to_ack(groups_to_ack, sg_states,<EOL>sg_sts_curr)<EOL>ack_groups(groups_client, groups_to_ack)<EOL><DEDENT>except Exception:<EOL><INDENT>LOG.exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>_sleep()<EOL>continue<EOL><DEDENT>_sleep()<EOL><DEDENT>", "docstring": "Fetches changes and applies them to VIFs periodically\n\n    Process as of RM11449:\n    * Get all groups from redis\n    * Fetch ALL VIFs from Xen\n    * Walk ALL VIFs and partition them into added, updated and removed\n    * Walk the final \"modified\" VIFs list and apply flows to each", "id": "f10749:m5"}
{"signature": "def is_isonet_vif(vif):", "body": "nicira_iface_id = vif.record.get('<STR_LIT>').get('<STR_LIT>')<EOL>if nicira_iface_id:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Determine if a vif is on isonet\n\n    Returns True if a vif belongs to an isolated network by checking\n    for a nicira interface id.", "id": "f10749:m1"}
{"signature": "def partition_vifs(xapi_client, interfaces, security_group_states):", "body": "added = []<EOL>updated = []<EOL>removed = []<EOL>for vif in interfaces:<EOL><INDENT>if ('<STR_LIT>' in CONF.QUARK.environment_capabilities and<EOL>is_isonet_vif(vif)):<EOL><INDENT>continue<EOL><DEDENT>vif_has_groups = vif in security_group_states<EOL>if vif.tagged and vif_has_groups andsecurity_group_states[vif][sg_cli.SECURITY_GROUP_ACK]:<EOL><INDENT>continue<EOL><DEDENT>if vif.tagged:<EOL><INDENT>if vif_has_groups:<EOL><INDENT>updated.append(vif)<EOL><DEDENT>else:<EOL><INDENT>removed.append(vif)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if vif_has_groups:<EOL><INDENT>added.append(vif)<EOL><DEDENT><DEDENT><DEDENT>return added, updated, removed<EOL>", "docstring": "Splits VIFs into three explicit categories and one implicit\n\n    Added - Groups exist in Redis that have not been ack'd and the VIF\n            is not tagged.\n            Action: Tag the VIF and apply flows\n    Updated - Groups exist in Redis that have not been ack'd and the VIF\n              is already tagged\n              Action: Do not tag the VIF, do apply flows\n    Removed - Groups do NOT exist in Redis but the VIF is tagged\n              Action: Untag the VIF, apply default flows\n    Self-Heal - Groups are ack'd in Redis but the VIF is untagged. We treat\n                this case as if it were an \"added\" group.\n                Action: Tag the VIF and apply flows\n    NOOP - The VIF is not tagged and there are no matching groups in Redis.\n           This is our implicit category\n           Action: Do nothing", "id": "f10749:m2"}
{"signature": "def _check_collisions(self, new_range, existing_ranges):", "body": "def _contains(num, r1):<EOL><INDENT>return (num >= r1[<NUM_LIT:0>] and<EOL>num <= r1[<NUM_LIT:1>])<EOL><DEDENT>def _is_overlap(r1, r2):<EOL><INDENT>return (_contains(r1[<NUM_LIT:0>], r2) or<EOL>_contains(r1[<NUM_LIT:1>], r2) or<EOL>_contains(r2[<NUM_LIT:0>], r1) or<EOL>_contains(r2[<NUM_LIT:1>], r1))<EOL><DEDENT>for existing_range in existing_ranges:<EOL><INDENT>if _is_overlap(new_range, existing_range):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Check for overlapping ranges.", "id": "f10751:c0:m2"}
{"signature": "def _try_allocate(self, context, segment_id, network_id):", "body": "LOG.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% (network_id, segment_id, self.segment_type))<EOL>filter_dict = {<EOL>\"<STR_LIT>\": segment_id,<EOL>\"<STR_LIT>\": self.segment_type,<EOL>\"<STR_LIT>\": False<EOL>}<EOL>available_ranges = db_api.segment_allocation_range_find(<EOL>context, scope=db_api.ALL, **filter_dict)<EOL>available_range_ids = [r[\"<STR_LIT:id>\"] for r in available_ranges]<EOL>try:<EOL><INDENT>with context.session.begin(subtransactions=True):<EOL><INDENT>filter_dict = {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT>\": segment_id,<EOL>\"<STR_LIT>\": self.segment_type,<EOL>\"<STR_LIT>\": available_range_ids<EOL>}<EOL>allocations = db_api.segment_allocation_find(<EOL>context, lock_mode=True, **filter_dict).limit(<NUM_LIT:100>).all()<EOL>if allocations:<EOL><INDENT>allocation = random.choice(allocations)<EOL>update_dict = {<EOL>\"<STR_LIT>\": False,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": network_id<EOL>}<EOL>allocation = db_api.segment_allocation_update(<EOL>context, allocation, **update_dict)<EOL>LOG.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% (allocation[\"<STR_LIT:id>\"], network_id, segment_id,<EOL>self.segment_type))<EOL>return allocation<EOL><DEDENT><DEDENT><DEDENT>except Exception:<EOL><INDENT>LOG.exception(\"<STR_LIT>\")<EOL><DEDENT>LOG.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% (network_id, segment_id, self.segment_type))<EOL>", "docstring": "Find a deallocated network segment id and reallocate it.\n\n        NOTE(morgabra) This locks the segment table, but only the rows\n        in use by the segment, which is pretty handy if we ever have\n        more than 1 segment or segment type.", "id": "f10751:c0:m8"}
{"signature": "def delete_scalingip(context, id):", "body": "LOG.info('<STR_LIT>' % (id, context.tenant_id))<EOL>_delete_flip(context, id, ip_types.SCALING)<EOL>", "docstring": "Deallocate a scaling IP.\n\n    :param context: neutron api request context.\n    :param id: id of the scaling ip", "id": "f10753:m18"}
{"signature": "def create_scalingip(context, content):", "body": "LOG.info('<STR_LIT>',<EOL>context.tenant_id, content)<EOL>network_id = content.get('<STR_LIT>')<EOL>ip_address = content.get('<STR_LIT>')<EOL>requested_ports = content.get('<STR_LIT>', [])<EOL>network = _get_network(context, network_id)<EOL>port_fixed_ips = {}<EOL>for req_port in requested_ports:<EOL><INDENT>port = _get_port(context, req_port['<STR_LIT>'])<EOL>fixed_ip = _get_fixed_ip(context, req_port.get('<STR_LIT>'),<EOL>port)<EOL>port_fixed_ips[port.id] = {\"<STR_LIT:port>\": port, \"<STR_LIT>\": fixed_ip}<EOL><DEDENT>scip = _allocate_ip(context, network, None, ip_address, ip_types.SCALING)<EOL>_create_flip(context, scip, port_fixed_ips)<EOL>return v._make_scaling_ip_dict(scip)<EOL>", "docstring": "Allocate or reallocate a scaling IP.\n\n    :param context: neutron api request context.\n    :param content: dictionary describing the scaling ip, with keys\n        as listed in the RESOURCE_ATTRIBUTE_MAP object in\n        neutron/api/v2/attributes.py.  All keys will be populated.\n\n    :returns: Dictionary containing details for the new scaling IP.  If values\n        are declared in the fields parameter, then only those keys will be\n        present.", "id": "f10753:m16"}
{"signature": "def create_floatingip(context, content):", "body": "LOG.info('<STR_LIT>' %<EOL>(id, context.tenant_id, content))<EOL>network_id = content.get('<STR_LIT>')<EOL>if not network_id:<EOL><INDENT>raise n_exc.BadRequest(resource='<STR_LIT>',<EOL>msg='<STR_LIT>')<EOL><DEDENT>fixed_ip_address = content.get('<STR_LIT>')<EOL>ip_address = content.get('<STR_LIT>')<EOL>port_id = content.get('<STR_LIT>')<EOL>port = None<EOL>port_fixed_ip = {}<EOL>network = _get_network(context, network_id)<EOL>if port_id:<EOL><INDENT>port = _get_port(context, port_id)<EOL>fixed_ip = _get_fixed_ip(context, fixed_ip_address, port)<EOL>port_fixed_ip = {port.id: {'<STR_LIT:port>': port, '<STR_LIT>': fixed_ip}}<EOL><DEDENT>flip = _allocate_ip(context, network, port, ip_address, ip_types.FLOATING)<EOL>_create_flip(context, flip, port_fixed_ip)<EOL>return v._make_floating_ip_dict(flip, port_id)<EOL>", "docstring": "Allocate or reallocate a floating IP.\n\n    :param context: neutron api request context.\n    :param content: dictionary describing the floating ip, with keys\n        as listed in the RESOURCE_ATTRIBUTE_MAP object in\n        neutron/api/v2/attributes.py.  All keys will be populated.\n\n    :returns: Dictionary containing details for the new floating IP.  If values\n        are declared in the fields parameter, then only those keys will be\n        present.", "id": "f10753:m10"}
{"signature": "def _update_flip(context, flip_id, ip_type, requested_ports):", "body": "<EOL>notifications = {<EOL>billing.IP_ASSOC: set(),<EOL>billing.IP_DISASSOC: set()<EOL>}<EOL>context.session.begin()<EOL>try:<EOL><INDENT>flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE)<EOL>if not flip:<EOL><INDENT>if ip_type == ip_types.SCALING:<EOL><INDENT>raise q_exc.ScalingIpNotFound(id=flip_id)<EOL><DEDENT>raise q_exc.FloatingIpNotFound(id=flip_id)<EOL><DEDENT>current_ports = flip.ports<EOL>req_port_ids = [request_port.get('<STR_LIT>')<EOL>for request_port in requested_ports]<EOL>curr_port_ids = [curr_port.id for curr_port in current_ports]<EOL>added_port_ids = [port_id for port_id in req_port_ids<EOL>if port_id and port_id not in curr_port_ids]<EOL>removed_port_ids = [port_id for port_id in curr_port_ids<EOL>if port_id not in req_port_ids]<EOL>remaining_port_ids = set(curr_port_ids) - set(removed_port_ids)<EOL>if (ip_type == ip_types.FLOATING and curr_port_ids and<EOL>curr_port_ids == req_port_ids):<EOL><INDENT>d = dict(flip_id=flip_id, port_id=curr_port_ids[<NUM_LIT:0>])<EOL>raise q_exc.PortAlreadyAssociatedToFloatingIp(**d)<EOL><DEDENT>if (ip_type == ip_types.FLOATING and<EOL>not curr_port_ids and not req_port_ids):<EOL><INDENT>raise q_exc.FloatingIpUpdateNoPortIdSupplied()<EOL><DEDENT>flip_subnet = v._make_subnet_dict(flip.subnet)<EOL>for added_port_id in added_port_ids:<EOL><INDENT>port = _get_port(context, added_port_id)<EOL>nw = port.network<EOL>nw_ports = v._make_ports_list(nw.ports)<EOL>fixed_ips = [ip.get('<STR_LIT>') for p in nw_ports<EOL>for ip in p.get('<STR_LIT>')]<EOL>gw_ip = flip_subnet.get('<STR_LIT>')<EOL>if gw_ip in fixed_ips:<EOL><INDENT>port_with_gateway_ip = None<EOL>for port in nw_ports:<EOL><INDENT>for ip in port.get('<STR_LIT>'):<EOL><INDENT>if gw_ip in ip.get('<STR_LIT>'):<EOL><INDENT>port_with_gateway_ip = port<EOL>break<EOL><DEDENT><DEDENT><DEDENT>port_id = port_with_gateway_ip.get('<STR_LIT:id>')<EOL>network_id = port_with_gateway_ip.get('<STR_LIT>')<EOL>raise q_exc.FixedIpAllocatedToGatewayIp(port_id=port_id,<EOL>network_id=network_id)<EOL><DEDENT><DEDENT>port_fixed_ips = {}<EOL>for port_id in remaining_port_ids:<EOL><INDENT>port = db_api.port_find(context, id=port_id, scope=db_api.ONE)<EOL>fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)<EOL>port_fixed_ips[port_id] = {'<STR_LIT:port>': port, '<STR_LIT>': fixed_ip}<EOL><DEDENT>for port_id in removed_port_ids:<EOL><INDENT>port = db_api.port_find(context, id=port_id, scope=db_api.ONE)<EOL>flip = db_api.port_disassociate_ip(context, [port], flip)<EOL>notifications[billing.IP_DISASSOC].add(flip)<EOL>fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)<EOL>if fixed_ip:<EOL><INDENT>flip = db_api.floating_ip_disassociate_fixed_ip(<EOL>context, flip, fixed_ip)<EOL><DEDENT><DEDENT>for port_id in added_port_ids:<EOL><INDENT>port = db_api.port_find(context, id=port_id, scope=db_api.ONE)<EOL>if not port:<EOL><INDENT>raise n_exc.PortNotFound(port_id=port_id)<EOL><DEDENT>if any(ip for ip in port.ip_addresses<EOL>if (ip.get('<STR_LIT>') == ip_types.FLOATING)):<EOL><INDENT>raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id)<EOL><DEDENT>if any(ip for ip in port.ip_addresses<EOL>if (ip.get('<STR_LIT>') == ip_types.SCALING)):<EOL><INDENT>raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id)<EOL><DEDENT>fixed_ip = _get_next_available_fixed_ip(port)<EOL>LOG.info('<STR_LIT>' % fixed_ip)<EOL>if not fixed_ip:<EOL><INDENT>raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)<EOL><DEDENT>port_fixed_ips[port_id] = {'<STR_LIT:port>': port, '<STR_LIT>': fixed_ip}<EOL>flip = db_api.port_associate_ip(context, [port], flip, [port_id])<EOL>notifications[billing.IP_ASSOC].add(flip)<EOL>flip = db_api.floating_ip_associate_fixed_ip(context, flip,<EOL>fixed_ip)<EOL><DEDENT>flip_driver = registry.DRIVER_REGISTRY.get_driver()<EOL>if not remaining_port_ids and not added_port_ids:<EOL><INDENT>flip_driver.remove_floating_ip(flip)<EOL><DEDENT>elif added_port_ids and not curr_port_ids:<EOL><INDENT>flip_driver.register_floating_ip(flip, port_fixed_ips)<EOL><DEDENT>else:<EOL><INDENT>flip_driver.update_floating_ip(flip, port_fixed_ips)<EOL><DEDENT>context.session.commit()<EOL><DEDENT>except Exception:<EOL><INDENT>context.session.rollback()<EOL>raise<EOL><DEDENT>for notif_type, flip_set in notifications.iteritems():<EOL><INDENT>for flip in flip_set:<EOL><INDENT>billing.notify(context, notif_type, flip)<EOL><DEDENT><DEDENT>context.session.refresh(flip)<EOL>return flip<EOL>", "docstring": "Update a flip based IPAddress\n\n    :param context: neutron api request context.\n    :param flip_id: id of the flip or scip\n    :param ip_type: ip_types.FLOATING | ip_types.SCALING\n    :param requested_ports: dictionary of the structure:\n    {\"port_id\": \"<id of port>\", \"fixed_ip\": \"<fixed ip address>\"}\n    :return: quark.models.IPAddress", "id": "f10753:m8"}
{"signature": "def get_floatingips(context, filters=None, fields=None, sorts=['<STR_LIT:id>'],<EOL>limit=None, marker=None, page_reverse=False):", "body": "LOG.info('<STR_LIT>' %<EOL>(context.tenant_id, filters, fields))<EOL>floating_ips = _get_ips_by_type(context, ip_types.FLOATING,<EOL>filters=filters, fields=fields)<EOL>return [v._make_floating_ip_dict(flip) for flip in floating_ips]<EOL>", "docstring": "Retrieve a list of floating ips.\n\n    :param context: neutron api request context.\n    :param filters: a dictionary with keys that are valid keys for\n        a floating ip as listed in the RESOURCE_ATTRIBUTE_MAP object\n        in neutron/api/v2/attributes.py.  Values in this dictionary\n        are an iterable containing values that will be used for an exact\n        match comparison for that value.  Each result returned by this\n        function will have matched one of the values for each key in\n        filters.\n    :param fields: a list of strings that are valid keys in a\n        floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n        object in neutron/api/v2/attributes.py. Only these fields\n        will be returned.\n\n    :returns: List of floating IPs that are accessible to the tenant who\n        submits the request (as indicated by the tenant id of the context)\n        as well as any filters.", "id": "f10753:m14"}
{"signature": "def update_scalingip(context, id, content):", "body": "LOG.info('<STR_LIT>' %<EOL>(id, context.tenant_id, content))<EOL>requested_ports = content.get('<STR_LIT>', [])<EOL>flip = _update_flip(context, id, ip_types.SCALING, requested_ports)<EOL>return v._make_scaling_ip_dict(flip)<EOL>", "docstring": "Update an existing scaling IP.\n\n    :param context: neutron api request context.\n    :param id: id of the scaling ip\n    :param content: dictionary with keys indicating fields to update.\n        valid keys are those that have a value of True for 'allow_put'\n        as listed in the RESOURCE_ATTRIBUTE_MAP object in\n        neutron/api/v2/attributes.py.\n\n    :returns: Dictionary containing details for the new scaling IP.  If values\n        are declared in the fields parameter, then only those keys will be\n        present.", "id": "f10753:m17"}
{"signature": "def get_floatingip(context, id, fields=None):", "body": "LOG.info('<STR_LIT>' % (id, context.tenant_id))<EOL>filters = {'<STR_LIT>': ip_types.FLOATING, '<STR_LIT>': False}<EOL>floating_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,<EOL>**filters)<EOL>if not floating_ip:<EOL><INDENT>raise q_exc.FloatingIpNotFound(id=id)<EOL><DEDENT>return v._make_floating_ip_dict(floating_ip)<EOL>", "docstring": "Retrieve a floating IP.\n\n    :param context: neutron api request context.\n    :param id: The UUID of the floating IP.\n    :param fields: a list of strings that are valid keys in a\n        floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n        object in neutron/api/v2/attributes.py. Only these fields\n        will be returned.\n\n    :returns: Dictionary containing details for the floating IP.  If values\n        are declared in the fields parameter, then only those keys will be\n        present.", "id": "f10753:m13"}
{"signature": "def _validate_subnet_cidr(context, network_id, new_subnet_cidr):", "body": "if neutron_cfg.cfg.CONF.allow_overlapping_ips:<EOL><INDENT>return<EOL><DEDENT>try:<EOL><INDENT>new_subnet_ipset = netaddr.IPSet([new_subnet_cidr])<EOL><DEDENT>except TypeError:<EOL><INDENT>LOG.exception(\"<STR_LIT>\" % new_subnet_cidr)<EOL>raise n_exc.BadRequest(resource=\"<STR_LIT>\",<EOL>msg=\"<STR_LIT>\")<EOL><DEDENT>filters = {<EOL>'<STR_LIT>': network_id,<EOL>'<STR_LIT>': [False]<EOL>}<EOL>subnet_list = db_api.subnet_find(context=context.elevated(), **filters)<EOL>for subnet in subnet_list:<EOL><INDENT>if (netaddr.IPSet([subnet.cidr]) & new_subnet_ipset):<EOL><INDENT>err_msg = (_(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\") %<EOL>{'<STR_LIT>': new_subnet_cidr,<EOL>'<STR_LIT>': network_id})<EOL>LOG.error(_(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"),<EOL>{'<STR_LIT>': new_subnet_cidr,<EOL>'<STR_LIT>': subnet.id,<EOL>'<STR_LIT>': subnet.cidr})<EOL>raise n_exc.InvalidInput(error_message=err_msg)<EOL><DEDENT><DEDENT>", "docstring": "Validate the CIDR for a subnet.\n\n    Verifies the specified CIDR does not overlap with the ones defined\n    for the other subnets specified for this network, or with any other\n    CIDR if overlapping IPs are disabled.", "id": "f10754:m0"}
{"signature": "def create_subnet(context, subnet):", "body": "LOG.info(\"<STR_LIT>\" % context.tenant_id)<EOL>net_id = subnet[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>with context.session.begin():<EOL><INDENT>net = db_api.network_find(context=context, limit=None, sorts=['<STR_LIT:id>'],<EOL>marker=None, page_reverse=False, fields=None,<EOL>id=net_id, scope=db_api.ONE)<EOL>if not net:<EOL><INDENT>raise n_exc.NetworkNotFound(net_id=net_id)<EOL><DEDENT>sub_attrs = subnet[\"<STR_LIT>\"]<EOL>always_pop = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\"]<EOL>admin_only = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\"]<EOL>utils.filter_body(context, sub_attrs, admin_only, always_pop)<EOL>_validate_subnet_cidr(context, net_id, sub_attrs[\"<STR_LIT>\"])<EOL>cidr = netaddr.IPNetwork(sub_attrs[\"<STR_LIT>\"])<EOL>err_vals = {'<STR_LIT>': sub_attrs[\"<STR_LIT>\"], '<STR_LIT>': net_id}<EOL>err = _(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>if cidr.version == <NUM_LIT:6> and cidr.prefixlen > <NUM_LIT:64>:<EOL><INDENT>err_vals[\"<STR_LIT>\"] = <NUM_LIT><EOL>err_msg = err % err_vals<EOL>raise n_exc.InvalidInput(error_message=err_msg)<EOL><DEDENT>elif cidr.version == <NUM_LIT:4> and cidr.prefixlen > <NUM_LIT:30>:<EOL><INDENT>err_vals[\"<STR_LIT>\"] = <NUM_LIT><EOL>err_msg = err % err_vals<EOL>raise n_exc.InvalidInput(error_message=err_msg)<EOL><DEDENT>net_subnets = get_subnets(context,<EOL>filters=dict(network_id=net_id))<EOL>if not context.is_admin:<EOL><INDENT>v4_count, v6_count = <NUM_LIT:0>, <NUM_LIT:0><EOL>for subnet in net_subnets:<EOL><INDENT>if netaddr.IPNetwork(subnet['<STR_LIT>']).version == <NUM_LIT:6>:<EOL><INDENT>v6_count += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>v4_count += <NUM_LIT:1><EOL><DEDENT><DEDENT>if cidr.version == <NUM_LIT:6>:<EOL><INDENT>tenant_quota_v6 = context.session.query(qdv.Quota).filter_by(<EOL>tenant_id=context.tenant_id,<EOL>resource='<STR_LIT>').first()<EOL>if tenant_quota_v6 != -<NUM_LIT:1>:<EOL><INDENT>quota.QUOTAS.limit_check(<EOL>context, context.tenant_id,<EOL>v6_subnets_per_network=v6_count + <NUM_LIT:1>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>tenant_quota_v4 = context.session.query(qdv.Quota).filter_by(<EOL>tenant_id=context.tenant_id,<EOL>resource='<STR_LIT>').first()<EOL>if tenant_quota_v4 != -<NUM_LIT:1>:<EOL><INDENT>quota.QUOTAS.limit_check(<EOL>context, context.tenant_id,<EOL>v4_subnets_per_network=v4_count + <NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>gateway_ip = utils.pop_param(sub_attrs, \"<STR_LIT>\")<EOL>dns_ips = utils.pop_param(sub_attrs, \"<STR_LIT>\", [])<EOL>host_routes = utils.pop_param(sub_attrs, \"<STR_LIT>\", [])<EOL>allocation_pools = utils.pop_param(sub_attrs, \"<STR_LIT>\", None)<EOL>sub_attrs[\"<STR_LIT>\"] = net<EOL>new_subnet = db_api.subnet_create(context, **sub_attrs)<EOL>cidrs = []<EOL>alloc_pools = allocation_pool.AllocationPools(sub_attrs[\"<STR_LIT>\"],<EOL>allocation_pools)<EOL>if isinstance(allocation_pools, list):<EOL><INDENT>cidrs = alloc_pools.get_policy_cidrs()<EOL><DEDENT>quota.QUOTAS.limit_check(<EOL>context,<EOL>context.tenant_id,<EOL>alloc_pools_per_subnet=len(alloc_pools))<EOL>ip_policies.ensure_default_policy(cidrs, [new_subnet])<EOL>new_subnet[\"<STR_LIT>\"] = db_api.ip_policy_create(context,<EOL>exclude=cidrs)<EOL>quota.QUOTAS.limit_check(context, context.tenant_id,<EOL>routes_per_subnet=len(host_routes))<EOL>default_route = None<EOL>for route in host_routes:<EOL><INDENT>netaddr_route = netaddr.IPNetwork(route[\"<STR_LIT>\"])<EOL>if netaddr_route.value == routes.DEFAULT_ROUTE.value:<EOL><INDENT>if default_route:<EOL><INDENT>raise q_exc.DuplicateRouteConflict(<EOL>subnet_id=new_subnet[\"<STR_LIT:id>\"])<EOL><DEDENT>default_route = route<EOL>gateway_ip = default_route[\"<STR_LIT>\"]<EOL>alloc_pools.validate_gateway_excluded(gateway_ip)<EOL><DEDENT>new_subnet[\"<STR_LIT>\"].append(db_api.route_create(<EOL>context, cidr=route[\"<STR_LIT>\"], gateway=route[\"<STR_LIT>\"]))<EOL><DEDENT>quota.QUOTAS.limit_check(context, context.tenant_id,<EOL>dns_nameservers_per_subnet=len(dns_ips))<EOL>for dns_ip in dns_ips:<EOL><INDENT>new_subnet[\"<STR_LIT>\"].append(db_api.dns_create(<EOL>context, ip=netaddr.IPAddress(dns_ip)))<EOL><DEDENT>if gateway_ip and default_route is None:<EOL><INDENT>alloc_pools.validate_gateway_excluded(gateway_ip)<EOL>new_subnet[\"<STR_LIT>\"].append(db_api.route_create(<EOL>context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip))<EOL><DEDENT><DEDENT>subnet_dict = v._make_subnet_dict(new_subnet)<EOL>subnet_dict[\"<STR_LIT>\"] = gateway_ip<EOL>return subnet_dict<EOL>", "docstring": "Create a subnet.\n\n    Create a subnet which represents a range of IP addresses\n    that can be allocated to devices\n\n    : param context: neutron api request context\n    : param subnet: dictionary describing the subnet, with keys\n        as listed in the RESOURCE_ATTRIBUTE_MAP object in\n        neutron/api/v2/attributes.py.  All keys will be populated.", "id": "f10754:m1"}
{"signature": "def get_subnet(context, id, fields=None):", "body": "LOG.info(\"<STR_LIT>\" %<EOL>(id, context.tenant_id, fields))<EOL>subnet = db_api.subnet_find(context=context, limit=None,<EOL>page_reverse=False, sorts=['<STR_LIT:id>'],<EOL>marker_obj=None, fields=None, id=id,<EOL>join_dns=True, join_routes=True,<EOL>scope=db_api.ONE)<EOL>if not subnet:<EOL><INDENT>raise n_exc.SubnetNotFound(subnet_id=id)<EOL><DEDENT>cache = subnet.get(\"<STR_LIT>\")<EOL>if not cache:<EOL><INDENT>new_cache = subnet.allocation_pools<EOL>db_api.subnet_update_set_alloc_pool_cache(context, subnet, new_cache)<EOL><DEDENT>return v._make_subnet_dict(subnet)<EOL>", "docstring": "Retrieve a subnet.\n\n    : param context: neutron api request context\n    : param id: UUID representing the subnet to fetch.\n    : param fields: a list of strings that are valid keys in a\n        subnet dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n        object in neutron/api/v2/attributes.py. Only these fields\n        will be returned.", "id": "f10754:m4"}
{"signature": "def update_security_group_rule(context, id, security_group_rule):", "body": "LOG.info(\"<STR_LIT>\" %<EOL>(context.tenant_id))<EOL>new_rule = security_group_rule[\"<STR_LIT>\"]<EOL>new_rule = _filter_update_security_group_rule(new_rule)<EOL>with context.session.begin():<EOL><INDENT>rule = db_api.security_group_rule_find(context, id=id,<EOL>scope=db_api.ONE)<EOL>if not rule:<EOL><INDENT>raise sg_ext.SecurityGroupRuleNotFound(id=id)<EOL><DEDENT>db_rule = db_api.security_group_rule_update(context, rule, **new_rule)<EOL>group_id = db_rule.group_id<EOL>group = db_api.security_group_find(context, id=group_id,<EOL>scope=db_api.ONE)<EOL>if not group:<EOL><INDENT>raise sg_ext.SecurityGroupNotFound(id=group_id)<EOL><DEDENT><DEDENT>if group:<EOL><INDENT>_perform_async_update_rule(context, group_id, group, rule.id,<EOL>RULE_UPDATE)<EOL><DEDENT>return v._make_security_group_rule_dict(db_rule)<EOL>", "docstring": "Updates a rule and updates the ports", "id": "f10756:m10"}
{"signature": "def _filter_update_security_group_rule(rule):", "body": "allowed = ['<STR_LIT>', '<STR_LIT>']<EOL>filtered = {}<EOL>for k, val in rule.iteritems():<EOL><INDENT>if k in allowed:<EOL><INDENT>if isinstance(val, basestring) andlen(val) <= GROUP_NAME_MAX_LENGTH:<EOL><INDENT>filtered[k] = val<EOL><DEDENT><DEDENT><DEDENT>return filtered<EOL>", "docstring": "Only two fields are allowed for modification:\n\n        external_service and external_service_id", "id": "f10756:m1"}
{"signature": "@env.has_capability(env.Capabilities.SG_UPDATE_ASYNC)<EOL>def _perform_async_update_rule(context, id, db_sg_group, rule_id, action):", "body": "rpc_reply = None<EOL>sg_rpc = sg_rpc_api.QuarkSGAsyncProcessClient()<EOL>ports = db_api.sg_gather_associated_ports(context, db_sg_group)<EOL>if len(ports) > <NUM_LIT:0>:<EOL><INDENT>rpc_reply = sg_rpc.start_update(context, id, rule_id, action)<EOL>if rpc_reply:<EOL><INDENT>job_id = rpc_reply['<STR_LIT>']<EOL>job_api.add_job_to_context(context, job_id)<EOL><DEDENT>else:<EOL><INDENT>LOG.error(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Updates a SG rule async and return the job information.\n\n    Only happens if the security group has associated ports. If the async\n    connection fails the update continues (legacy mode).", "id": "f10756:m8"}
{"signature": "@utils.exc_wrapper(internal=True)<EOL>def update_port(context, id, port):", "body": "LOG.info(\"<STR_LIT>\" % (id, context.tenant_id))<EOL>port_db = db_api.port_find(context, id=id, scope=db_api.ONE)<EOL>if not port_db:<EOL><INDENT>raise n_exc.PortNotFound(port_id=id)<EOL><DEDENT>port_dict = port[\"<STR_LIT:port>\"]<EOL>fixed_ips = port_dict.pop(\"<STR_LIT>\", None)<EOL>admin_only = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\"]<EOL>always_filter = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>utils.filter_body(context, port_dict, admin_only=admin_only,<EOL>always_filter=always_filter)<EOL>if fixed_ips:<EOL><INDENT>quota.QUOTAS.limit_check(context, context.tenant_id,<EOL>fixed_ips_per_port=len(fixed_ips))<EOL><DEDENT>new_security_groups = utils.pop_param(port_dict, \"<STR_LIT>\")<EOL>if new_security_groups is not None:<EOL><INDENT>if (Capabilities.TENANT_NETWORK_SG not in<EOL>CONF.QUARK.environment_capabilities):<EOL><INDENT>if not STRATEGY.is_provider_network(port_db[\"<STR_LIT>\"]):<EOL><INDENT>raise q_exc.TenantNetworkSecurityGroupRulesNotEnabled()<EOL><DEDENT><DEDENT><DEDENT>if new_security_groups is not None and not port_db[\"<STR_LIT>\"]:<EOL><INDENT>raise q_exc.SecurityGroupsRequireDevice()<EOL><DEDENT>group_ids, security_group_mods = _make_security_group_list(<EOL>context, new_security_groups)<EOL>quota.QUOTAS.limit_check(context, context.tenant_id,<EOL>security_groups_per_port=len(group_ids))<EOL>if fixed_ips is not None:<EOL><INDENT>ipam_driver = ipam.IPAM_REGISTRY.get_strategy(<EOL>ipam.QuarkIpamANY.get_name())<EOL>addresses, subnet_ids = [], []<EOL>ip_addresses = {}<EOL>for fixed_ip in fixed_ips:<EOL><INDENT>subnet_id = fixed_ip.get(\"<STR_LIT>\")<EOL>ip_address = fixed_ip.get(\"<STR_LIT>\")<EOL>if not (subnet_id or ip_address):<EOL><INDENT>raise n_exc.BadRequest(<EOL>resource=\"<STR_LIT>\",<EOL>msg=\"<STR_LIT>\")<EOL><DEDENT>if ip_address and not subnet_id:<EOL><INDENT>raise n_exc.BadRequest(<EOL>resource=\"<STR_LIT>\",<EOL>msg=\"<STR_LIT>\")<EOL><DEDENT>if subnet_id and ip_address:<EOL><INDENT>ip_netaddr = None<EOL>try:<EOL><INDENT>ip_netaddr = netaddr.IPAddress(ip_address).ipv6()<EOL><DEDENT>except netaddr.AddrFormatError:<EOL><INDENT>raise n_exc.InvalidInput(<EOL>error_message=\"<STR_LIT>\")<EOL><DEDENT>ip_addresses[ip_netaddr] = subnet_id<EOL><DEDENT>else:<EOL><INDENT>subnet_ids.append(subnet_id)<EOL><DEDENT><DEDENT>port_ips = set([netaddr.IPAddress(int(a[\"<STR_LIT:address>\"]))<EOL>for a in port_db[\"<STR_LIT>\"]])<EOL>new_ips = set([a for a in ip_addresses.keys()])<EOL>ips_to_allocate = list(new_ips - port_ips)<EOL>ips_to_deallocate = list(port_ips - new_ips)<EOL>for ip in ips_to_allocate:<EOL><INDENT>if ip in ip_addresses:<EOL><INDENT>allocated = []<EOL>ipam_driver.allocate_ip_address(<EOL>context, allocated, port_db[\"<STR_LIT>\"],<EOL>port_db[\"<STR_LIT:id>\"], reuse_after=None, ip_addresses=[ip],<EOL>subnets=[ip_addresses[ip]])<EOL>addresses.extend(allocated)<EOL><DEDENT><DEDENT>for ip in ips_to_deallocate:<EOL><INDENT>ipam_driver.deallocate_ips_by_port(<EOL>context, port_db, ip_address=ip)<EOL><DEDENT>for subnet_id in subnet_ids:<EOL><INDENT>ipam_driver.allocate_ip_address(<EOL>context, addresses, port_db[\"<STR_LIT>\"], port_db[\"<STR_LIT:id>\"],<EOL>reuse_after=CONF.QUARK.ipam_reuse_after,<EOL>subnets=[subnet_id])<EOL><DEDENT>if addresses:<EOL><INDENT>port_dict[\"<STR_LIT>\"] = port_db[\"<STR_LIT>\"]<EOL>port_dict[\"<STR_LIT>\"].extend(addresses)<EOL><DEDENT><DEDENT>net_driver = _get_net_driver(port_db.network, port=port_db)<EOL>base_net_driver = _get_net_driver(port_db.network)<EOL>kwargs = {}<EOL>if new_security_groups is not None:<EOL><INDENT>kwargs[\"<STR_LIT>\"] = security_group_mods<EOL><DEDENT>net_driver.update_port(context, port_id=port_db[\"<STR_LIT>\"],<EOL>mac_address=port_db[\"<STR_LIT>\"],<EOL>device_id=port_db[\"<STR_LIT>\"],<EOL>base_net_driver=base_net_driver,<EOL>**kwargs)<EOL>port_dict[\"<STR_LIT>\"] = security_group_mods<EOL>with context.session.begin():<EOL><INDENT>port = db_api.port_update(context, port_db, **port_dict)<EOL><DEDENT>if port_db in context.session:<EOL><INDENT>context.session.expunge(port_db)<EOL><DEDENT>port_db = db_api.port_find(context, id=id, scope=db_api.ONE)<EOL>return v._make_port_dict(port_db)<EOL>", "docstring": "Update values of a port.\n\n    : param context: neutron api request context\n    : param id: UUID representing the port to update.\n    : param port: dictionary with keys indicating fields to update.\n        valid keys are those that have a value of True for 'allow_put'\n        as listed in the RESOURCE_ATTRIBUTE_MAP object in\n        neutron/api/v2/attributes.py.", "id": "f10757:m6"}
{"signature": "@utils.exc_wrapper(internal=True)<EOL>def create_port(context, port):", "body": "LOG.info(\"<STR_LIT>\" % context.tenant_id)<EOL>port_attrs = port[\"<STR_LIT:port>\"]<EOL>admin_only = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\"]<EOL>utils.filter_body(context, port_attrs, admin_only=admin_only)<EOL>port_attrs = port[\"<STR_LIT:port>\"]<EOL>mac_address = utils.pop_param(port_attrs, \"<STR_LIT>\", None)<EOL>use_forbidden_mac_range = utils.pop_param(port_attrs,<EOL>\"<STR_LIT>\", False)<EOL>segment_id = utils.pop_param(port_attrs, \"<STR_LIT>\")<EOL>fixed_ips = utils.pop_param(port_attrs, \"<STR_LIT>\")<EOL>if \"<STR_LIT>\" not in port_attrs:<EOL><INDENT>port_attrs['<STR_LIT>'] = \"<STR_LIT>\"<EOL><DEDENT>device_id = port_attrs['<STR_LIT>']<EOL>if \"<STR_LIT>\" not in port_attrs:<EOL><INDENT>port_attrs['<STR_LIT>'] = \"<STR_LIT>\"<EOL><DEDENT>instance_node_id = port_attrs['<STR_LIT>']<EOL>net_id = port_attrs[\"<STR_LIT>\"]<EOL>port_id = uuidutils.generate_uuid()<EOL>net = db_api.network_find(context=context, limit=None, sorts=['<STR_LIT:id>'],<EOL>marker=None, page_reverse=False, fields=None,<EOL>id=net_id, scope=db_api.ONE)<EOL>if not net:<EOL><INDENT>raise n_exc.NetworkNotFound(net_id=net_id)<EOL><DEDENT>_raise_if_unauthorized(context, net)<EOL>if device_id:<EOL><INDENT>existing_ports = db_api.port_find(context,<EOL>network_id=net_id,<EOL>device_id=device_id,<EOL>scope=db_api.ONE)<EOL>if existing_ports:<EOL><INDENT>raise n_exc.BadRequest(<EOL>resource=\"<STR_LIT:port>\", msg=\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if fixed_ips:<EOL><INDENT>quota.QUOTAS.limit_check(context, context.tenant_id,<EOL>fixed_ips_per_port=len(fixed_ips))<EOL><DEDENT>if not STRATEGY.is_provider_network(net_id):<EOL><INDENT>segment_id = None<EOL>port_count = db_api.port_count_all(context, network_id=[net_id],<EOL>tenant_id=[context.tenant_id])<EOL>quota.QUOTAS.limit_check(<EOL>context, context.tenant_id,<EOL>ports_per_network=port_count + <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>if not segment_id:<EOL><INDENT>raise q_exc.AmbiguousNetworkId(net_id=net_id)<EOL><DEDENT><DEDENT>network_plugin = utils.pop_param(port_attrs, \"<STR_LIT>\")<EOL>if not network_plugin:<EOL><INDENT>network_plugin = net[\"<STR_LIT>\"]<EOL><DEDENT>port_attrs[\"<STR_LIT>\"] = network_plugin<EOL>ipam_driver = _get_ipam_driver(net, port=port_attrs)<EOL>net_driver = _get_net_driver(net, port=port_attrs)<EOL>base_net_driver = _get_net_driver(net)<EOL>security_groups = utils.pop_param(port_attrs, \"<STR_LIT>\")<EOL>if security_groups is not None:<EOL><INDENT>raise q_exc.SecurityGroupsNotImplemented()<EOL><DEDENT>group_ids, security_groups = _make_security_group_list(context,<EOL>security_groups)<EOL>quota.QUOTAS.limit_check(context, context.tenant_id,<EOL>security_groups_per_port=len(group_ids))<EOL>addresses = []<EOL>backend_port = None<EOL>with utils.CommandManager().execute() as cmd_mgr:<EOL><INDENT>@cmd_mgr.do<EOL>def _allocate_ips(fixed_ips, net, port_id, segment_id, mac,<EOL>**kwargs):<EOL><INDENT>if fixed_ips:<EOL><INDENT>if (STRATEGY.is_provider_network(net_id) and<EOL>not context.is_admin):<EOL><INDENT>raise n_exc.NotAuthorized()<EOL><DEDENT>ips, subnets = split_and_validate_requested_subnets(context,<EOL>net_id,<EOL>segment_id,<EOL>fixed_ips)<EOL>kwargs[\"<STR_LIT>\"] = ips<EOL>kwargs[\"<STR_LIT>\"] = subnets<EOL><DEDENT>ipam_driver.allocate_ip_address(<EOL>context, addresses, net[\"<STR_LIT:id>\"], port_id,<EOL>CONF.QUARK.ipam_reuse_after, segment_id=segment_id,<EOL>mac_address=mac, **kwargs)<EOL><DEDENT>@cmd_mgr.undo<EOL>def _allocate_ips_undo(addr, **kwargs):<EOL><INDENT>LOG.info(\"<STR_LIT>\")<EOL>if addresses:<EOL><INDENT>for address in addresses:<EOL><INDENT>try:<EOL><INDENT>with context.session.begin():<EOL><INDENT>ipam_driver.deallocate_ip_address(context, address,<EOL>**kwargs)<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>LOG.exception(\"<STR_LIT>\" % address)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>@cmd_mgr.do<EOL>def _allocate_mac(net, port_id, mac_address,<EOL>use_forbidden_mac_range=False,<EOL>**kwargs):<EOL><INDENT>mac = ipam_driver.allocate_mac_address(<EOL>context, net[\"<STR_LIT:id>\"], port_id, CONF.QUARK.ipam_reuse_after,<EOL>mac_address=mac_address,<EOL>use_forbidden_mac_range=use_forbidden_mac_range, **kwargs)<EOL>return mac<EOL><DEDENT>@cmd_mgr.undo<EOL>def _allocate_mac_undo(mac, **kwargs):<EOL><INDENT>LOG.info(\"<STR_LIT>\")<EOL>if mac:<EOL><INDENT>try:<EOL><INDENT>with context.session.begin():<EOL><INDENT>ipam_driver.deallocate_mac_address(context,<EOL>mac[\"<STR_LIT:address>\"])<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>LOG.exception(\"<STR_LIT>\" % mac)<EOL><DEDENT><DEDENT><DEDENT>@cmd_mgr.do<EOL>def _allocate_backend_port(mac, addresses, net, port_id, **kwargs):<EOL><INDENT>backend_port = net_driver.create_port(<EOL>context, net[\"<STR_LIT:id>\"],<EOL>port_id=port_id,<EOL>security_groups=group_ids,<EOL>device_id=device_id,<EOL>instance_node_id=instance_node_id,<EOL>mac_address=mac,<EOL>addresses=addresses,<EOL>base_net_driver=base_net_driver)<EOL>_filter_backend_port(backend_port)<EOL>return backend_port<EOL><DEDENT>@cmd_mgr.undo<EOL>def _allocate_back_port_undo(backend_port,<EOL>**kwargs):<EOL><INDENT>LOG.info(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>backend_port_uuid = None<EOL>if backend_port:<EOL><INDENT>backend_port_uuid = backend_port.get(\"<STR_LIT>\")<EOL><DEDENT>net_driver.delete_port(context, backend_port_uuid)<EOL><DEDENT>except Exception:<EOL><INDENT>LOG.exception(<EOL>\"<STR_LIT>\" % backend_port)<EOL><DEDENT><DEDENT>@cmd_mgr.do<EOL>def _allocate_db_port(port_attrs, backend_port, addresses, mac,<EOL>**kwargs):<EOL><INDENT>port_attrs[\"<STR_LIT>\"] = net[\"<STR_LIT:id>\"]<EOL>port_attrs[\"<STR_LIT:id>\"] = port_id<EOL>port_attrs[\"<STR_LIT>\"] = security_groups<EOL>LOG.info(\"<STR_LIT>\" % backend_port)<EOL>port_attrs.update(backend_port)<EOL>with context.session.begin():<EOL><INDENT>new_port = db_api.port_create(<EOL>context, addresses=addresses, mac_address=mac[\"<STR_LIT:address>\"],<EOL>backend_key=backend_port[\"<STR_LIT>\"], **port_attrs)<EOL><DEDENT>return new_port<EOL><DEDENT>@cmd_mgr.undo<EOL>def _allocate_db_port_undo(new_port,<EOL>**kwargs):<EOL><INDENT>LOG.info(\"<STR_LIT>\")<EOL>if not new_port:<EOL><INDENT>return<EOL><DEDENT>try:<EOL><INDENT>with context.session.begin():<EOL><INDENT>db_api.port_delete(context, new_port)<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>LOG.exception(<EOL>\"<STR_LIT>\" % backend_port)<EOL><DEDENT><DEDENT>mac = _allocate_mac(net, port_id, mac_address,<EOL>use_forbidden_mac_range=use_forbidden_mac_range)<EOL>_allocate_ips(fixed_ips, net, port_id, segment_id, mac)<EOL>backend_port = _allocate_backend_port(mac, addresses, net, port_id)<EOL>new_port = _allocate_db_port(port_attrs, backend_port, addresses, mac)<EOL><DEDENT>return v._make_port_dict(new_port)<EOL>", "docstring": "Create a port\n\n    Create a port which is a connection point of a device (e.g., a VM\n    NIC) to attach to a L2 Neutron network.\n    : param context: neutron api request context\n    : param port: dictionary describing the port, with keys\n        as listed in the RESOURCE_ATTRIBUTE_MAP object in\n        neutron/api/v2/attributes.py.  All keys will be populated.", "id": "f10757:m5"}
{"signature": "def get_port_for_ip_address(context, ip_id, id, fields=None):", "body": "LOG.info(\"<STR_LIT>\" %<EOL>(id, context.tenant_id, fields))<EOL>addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE)<EOL>if not addr:<EOL><INDENT>raise q_exc.IpAddressNotFound(addr_id=ip_id)<EOL><DEDENT>filters = {'<STR_LIT>': [ip_id]}<EOL>results = db_api.port_find(context, id=id, fields=fields,<EOL>scope=db_api.ONE, **filters)<EOL>if not results:<EOL><INDENT>raise n_exc.PortNotFound(port_id=id)<EOL><DEDENT>return v._make_port_for_ip_dict(addr, results)<EOL>", "docstring": "Retrieve a port.\n\n    : param context: neutron api request context\n    : param id: UUID representing the port to fetch.\n    : param fields: a list of strings that are valid keys in a\n        port dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n        object in neutron/api/v2/attributes.py. Only these fields\n        will be returned.", "id": "f10758:m16"}
{"signature": "def update_port_for_ip_address(context, ip_id, id, port):", "body": "LOG.info(\"<STR_LIT>\" % (id, context.tenant_id))<EOL>sanitize_list = ['<STR_LIT>']<EOL>with context.session.begin():<EOL><INDENT>addr = db_api.ip_address_find(context, id=ip_id, scope=db_api.ONE)<EOL>if not addr:<EOL><INDENT>raise q_exc.IpAddressNotFound(addr_id=ip_id)<EOL><DEDENT>port_db = db_api.port_find(context, id=id, scope=db_api.ONE)<EOL>if not port_db:<EOL><INDENT>raise q_exc.PortNotFound(port_id=id)<EOL><DEDENT>port_dict = {k: port['<STR_LIT:port>'][k] for k in sanitize_list}<EOL>require_da = False<EOL>service = port_dict.get('<STR_LIT>')<EOL>if require_da and _shared_ip_and_active(addr, except_port=id):<EOL><INDENT>raise q_exc.PortRequiresDisassociation()<EOL><DEDENT>addr.set_service_for_port(port_db, service)<EOL>context.session.add(addr)<EOL><DEDENT>return v._make_port_for_ip_dict(addr, port_db)<EOL>", "docstring": "Update values of a port.\n\n    : param context: neutron api request context\n    : param ip_id: UUID representing the ip associated with port to update\n    : param id: UUID representing the port to update.\n    : param port: dictionary with keys indicating fields to update.\n        valid keys are those that have a value of True for 'allow_put'\n        as listed in the RESOURCE_ATTRIBUTE_MAP object in\n        neutron/api/v2/attributes.py.", "id": "f10758:m17"}
{"signature": "def delete_job(context, id, **filters):", "body": "LOG.info(\"<STR_LIT>\" % (id, context.tenant_id))<EOL>if not context.is_admin:<EOL><INDENT>raise n_exc.NotAuthorized()<EOL><DEDENT>with context.session.begin():<EOL><INDENT>job = db_api.async_transaction_find(context, id=id, scope=db_api.ONE,<EOL>**filters)<EOL>if not job:<EOL><INDENT>raise q_exc.JobNotFound(job_id=id)<EOL><DEDENT>db_api.async_transaction_delete(context, job)<EOL><DEDENT>", "docstring": "Delete an ip address.\n\n    : param context: neutron api request context\n    : param id: UUID representing the ip address to delete.", "id": "f10759:m5"}
{"signature": "def get_networks_count(context, filters=None):", "body": "LOG.info(\"<STR_LIT>\" %<EOL>(context.tenant_id, filters))<EOL>return db_api.network_count_all(context)<EOL>", "docstring": "Return the number of networks.\n\n    The result depends on the identity of the user making the request\n    (as indicated by the context) as well as any filters.\n    : param context: neutron api request context\n    : param filters: a dictionary with keys that are valid keys for\n        a network as listed in the RESOURCE_ATTRIBUTE_MAP object\n        in neutron/api/v2/attributes.py.  Values in this dictiontary\n        are an iterable containing values that will be used for an exact\n        match comparison for that value.  Each result returned by this\n        function will have matched one of the values for each key in\n        filters.\n\n    NOTE: this method is optional, as it was not part of the originally\n          defined plugin API.", "id": "f10763:m5"}
{"signature": "def delete_mac_address_range(context, id):", "body": "LOG.info(\"<STR_LIT>\" %<EOL>(id, context.tenant_id))<EOL>if not context.is_admin:<EOL><INDENT>raise n_exc.NotAuthorized()<EOL><DEDENT>with context.session.begin():<EOL><INDENT>mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE)<EOL>if not mar:<EOL><INDENT>raise q_exc.MacAddressRangeNotFound(<EOL>mac_address_range_id=id)<EOL><DEDENT>_delete_mac_address_range(context, mar)<EOL><DEDENT>", "docstring": "Delete a mac_address_range.\n\n    : param context: neutron api request context\n    : param id: UUID representing the mac_address_range to delete.", "id": "f10764:m5"}
{"signature": "def get_mac_address_range(context, id, fields=None):", "body": "LOG.info(\"<STR_LIT>\" %<EOL>(id, context.tenant_id, fields))<EOL>if not context.is_admin:<EOL><INDENT>raise n_exc.NotAuthorized()<EOL><DEDENT>mac_address_range = db_api.mac_address_range_find(<EOL>context, id=id, scope=db_api.ONE)<EOL>if not mac_address_range:<EOL><INDENT>raise q_exc.MacAddressRangeNotFound(<EOL>mac_address_range_id=id)<EOL><DEDENT>return v._make_mac_range_dict(mac_address_range)<EOL>", "docstring": "Retrieve a mac_address_range.\n\n    : param context: neutron api request context\n    : param id: UUID representing the network to fetch.\n    : param fields: a list of strings that are valid keys in a\n        network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP\n        object in neutron/api/v2/attributes.py. Only these fields\n        will be returned.", "id": "f10764:m1"}
{"signature": "@classmethod<EOL><INDENT>def get_resources(cls):<DEDENT>", "body": "plugin = directory.get_plugin()<EOL>controller = IPPoliciesController(plugin)<EOL>return [extensions.ResourceExtension(Ip_policies.get_alias(),<EOL>controller)]<EOL>", "docstring": "Returns Ext Resources.", "id": "f10767:c1:m6"}
{"signature": "@classmethod<EOL><INDENT>def get_resources(cls):<DEDENT>", "body": "ip_controller = IpAddressesController(<EOL>directory.get_plugin())<EOL>ip_port_controller = IpAddressPortController(<EOL>directory.get_plugin())<EOL>resources = []<EOL>resources.append(extensions.ResourceExtension(<EOL>Ip_addresses.get_alias(),<EOL>ip_controller))<EOL>parent = {'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'}<EOL>resources.append(extensions.ResourceExtension(<EOL>'<STR_LIT>', ip_port_controller, parent=parent))<EOL>return resources<EOL>", "docstring": "Returns Ext Resources.", "id": "f10771:c2:m6"}
{"signature": "@classmethod<EOL><INDENT>def get_resources(cls):<DEDENT>", "body": "controller = RoutesController(directory.get_plugin())<EOL>return [extensions.ResourceExtension(<EOL>Routes.get_alias(),<EOL>controller)]<EOL>", "docstring": "Returns Ext Resources.", "id": "f10776:c1:m6"}
{"signature": "def opt_args_decorator(func):", "body": "@wraps(func)<EOL>def wrapped_dec(*args, **kwargs):<EOL><INDENT>if len(args) == <NUM_LIT:1> and len(kwargs) == <NUM_LIT:0> and callable(args[<NUM_LIT:0>]):<EOL><INDENT>return func(args[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>return lambda realf: func(realf, *args, **kwargs)<EOL><DEDENT><DEDENT>return wrapped_dec<EOL>", "docstring": "A decorator to be used on another decorator\n\n    This is done to allow separate handling on the basis of argument values", "id": "f10785:m5"}
{"signature": "def _validate_allocation_pools(self):", "body": "ip_pools = self._alloc_pools<EOL>subnet_cidr = self._subnet_cidr<EOL>LOG.debug(_(\"<STR_LIT>\"))<EOL>ip_sets = []<EOL>for ip_pool in ip_pools:<EOL><INDENT>try:<EOL><INDENT>start_ip = netaddr.IPAddress(ip_pool['<STR_LIT:start>'])<EOL>end_ip = netaddr.IPAddress(ip_pool['<STR_LIT:end>'])<EOL><DEDENT>except netaddr.AddrFormatError:<EOL><INDENT>LOG.info(_(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"),<EOL>{'<STR_LIT:start>': ip_pool['<STR_LIT:start>'],<EOL>'<STR_LIT:end>': ip_pool['<STR_LIT:end>']})<EOL>raise n_exc_ext.InvalidAllocationPool(pool=ip_pool)<EOL><DEDENT>if (start_ip.version != self._subnet_cidr.version or<EOL>end_ip.version != self._subnet_cidr.version):<EOL><INDENT>LOG.info(_(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"))<EOL>raise n_exc_ext.InvalidAllocationPool(pool=ip_pool)<EOL><DEDENT>if end_ip < start_ip:<EOL><INDENT>LOG.info(_(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"),<EOL>{'<STR_LIT:start>': ip_pool['<STR_LIT:start>'], '<STR_LIT:end>': ip_pool['<STR_LIT:end>']})<EOL>raise n_exc_ext.InvalidAllocationPool(pool=ip_pool)<EOL><DEDENT>if (start_ip < self._subnet_first_ip or<EOL>end_ip > self._subnet_last_ip):<EOL><INDENT>LOG.info(_(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"),<EOL>{'<STR_LIT:start>': ip_pool['<STR_LIT:start>'],<EOL>'<STR_LIT:end>': ip_pool['<STR_LIT:end>']})<EOL>raise n_exc_ext.OutOfBoundsAllocationPool(<EOL>pool=ip_pool,<EOL>subnet_cidr=subnet_cidr)<EOL><DEDENT>ip_sets.append(netaddr.IPSet(netaddr.IPRange(<EOL>ip_pool['<STR_LIT:start>'],<EOL>ip_pool['<STR_LIT:end>']).cidrs()))<EOL><DEDENT>LOG.debug(_(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"))<EOL>ip_ranges = ip_pools[:]<EOL>for l_cursor in xrange(len(ip_sets)):<EOL><INDENT>for r_cursor in xrange(l_cursor + <NUM_LIT:1>, len(ip_sets)):<EOL><INDENT>if ip_sets[l_cursor] & ip_sets[r_cursor]:<EOL><INDENT>l_range = ip_ranges[l_cursor]<EOL>r_range = ip_ranges[r_cursor]<EOL>LOG.info(_(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"),<EOL>{'<STR_LIT>': l_range, '<STR_LIT>': r_range})<EOL>raise n_exc_ext.OverlappingAllocationPools(<EOL>pool_1=l_range,<EOL>pool_2=r_range,<EOL>subnet_cidr=subnet_cidr)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Validate IP allocation pools.\n\n        Verify start and end address for each allocation pool are valid,\n        ie: constituted by valid and appropriately ordered IP addresses.\n        Also, verify pools do not overlap among themselves.\n        Finally, verify that each range fall within the subnet's CIDR.", "id": "f10788:c0:m2"}
{"signature": "def get_lswitch_ids_for_network(self, context, network_id):", "body": "lswitches = self._lswitches_for_network(context, network_id)<EOL>return [s['<STR_LIT>'] for s in lswitches]<EOL>", "docstring": "Public interface for fetching lswitch ids for a given network.\n\n        NOTE(morgabra) This is here because calling private methods\n        from outside the class feels wrong, and we need to be able to\n        fetch lswitch ids for use in other drivers.", "id": "f10794:c0:m19"}
{"signature": "def update_security_group(self, context, group_id, **group):", "body": "raise NotImplementedError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>", "docstring": "Update a security group.\n\n        :raises NotImplementedError: This driver does not implement security\n                                     groups.\n\n        NOTE: Security groups will be supported in the future, but for now\n        they are explicitly disallowed.", "id": "f10795:c2:m20"}
{"signature": "def delete_port(self, context, port_id, **kwargs):", "body": "LOG.info(\"<STR_LIT>\" % (context.tenant_id, port_id))<EOL>try:<EOL><INDENT>self._delete_port(context, port_id)<EOL>LOG.info(\"<STR_LIT>\" % (port_id))<EOL><DEDENT>except Exception:<EOL><INDENT>LOG.error(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (port_id))<EOL><DEDENT>", "docstring": "Delete a port.\n\n        :param context: neutron api request context.\n        :param port_id: neutron port id.\n        :param kwargs: optional kwargs.\n        :raises IronicException: If the client is unable to delete the\n            downstream port for any reason, the exception will be logged\n            and IronicException raised.", "id": "f10795:c2:m13"}
{"signature": "def diag_network(self, *args, **kwargs):", "body": "raise NotImplementedError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>", "docstring": "Diagnose a network.\n\n        :raises NotImplementedError: This driver does not manage networks.\n\n        NOTE: This is a no-op in the base driver, but this raises here as to\n        explicitly disallow network operations in case of a misconfiguration.", "id": "f10795:c2:m17"}
{"signature": "def delete_security_group_rule(self, context, group_id, rule):", "body": "raise NotImplementedError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>", "docstring": "Delete a security group rule.\n\n        :raises NotImplementedError: This driver does not implement security\n                                     groups.\n\n        NOTE: Security groups will be supported in the future, but for now\n        they are explicitly disallowed.", "id": "f10795:c2:m22"}
{"signature": "def create_network(self, *args, **kwargs):", "body": "raise NotImplementedError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>", "docstring": "Create a network.\n\n        :raises NotImplementedError: This driver does not manage networks.\n\n        NOTE: This is a no-op in the base driver, but this raises here as to\n        explicitly disallow network operations in case of a misconfiguration.", "id": "f10795:c2:m15"}
{"signature": "def diag_port(self, context, port_id, **kwargs):", "body": "LOG.info(\"<STR_LIT>\" % port_id)<EOL>try:<EOL><INDENT>port = self._client.show_port(port_id)<EOL><DEDENT>except Exception as e:<EOL><INDENT>msg = \"<STR_LIT>\" % (str(e))<EOL>LOG.exception(msg)<EOL>raise IronicException(msg=msg)<EOL><DEDENT>return {\"<STR_LIT>\": port}<EOL>", "docstring": "Diagnose a port.\n\n        :param context: neutron api request context.\n        :param port_id: neutron port id.\n        :param kwargs: optional kwargs.\n        :raises IronicException: If the client is unable to fetch the\n            downstream port for any reason, the exception will be\n            logged and IronicException raised.", "id": "f10795:c2:m14"}
{"signature": "def delete_network(self, *args, **kwargs):", "body": "raise NotImplementedError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>", "docstring": "Delete a network.\n\n        :raises NotImplementedError: This driver does not manage networks.\n\n        NOTE: This is a no-op in the base driver, but this raises here as to\n        explicitly disallow network operations in case of a misconfiguration.", "id": "f10795:c2:m16"}
{"signature": "def select_ipam_strategy(self, network_id, network_strategy, **kwargs):", "body": "LOG.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (network_id, network_strategy))<EOL>net_type = \"<STR_LIT>\"<EOL>if STRATEGY.is_provider_network(network_id):<EOL><INDENT>net_type = \"<STR_LIT>\"<EOL><DEDENT>strategy = self._ipam_strategies.get(net_type, {})<EOL>default = strategy.get(\"<STR_LIT:default>\")<EOL>overrides = strategy.get(\"<STR_LIT>\", {})<EOL>if network_strategy in overrides:<EOL><INDENT>LOG.info(\"<STR_LIT>\"<EOL>% (overrides[network_strategy]))<EOL>return overrides[network_strategy]<EOL><DEDENT>if default:<EOL><INDENT>LOG.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (default))<EOL>return default<EOL><DEDENT>LOG.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (network_strategy))<EOL>return network_strategy<EOL>", "docstring": "Return relevant IPAM strategy name.\n\n        :param network_id: neutron network id.\n        :param network_strategy: default strategy for the network.\n\n        NOTE(morgabra) This feels like a hack but I can't think of a better\n        idea. The root problem is we can now attach ports to networks with\n        a different backend driver/ipam strategy than the network speficies.\n\n        We handle the the backend driver part with allowing network_plugin to\n        be specified for port objects. This works pretty well because nova or\n        whatever knows when we are hooking up an Ironic node so it can pass\n        along that key during port_create().\n\n        IPAM is a little trickier, especially in Ironic's case, because we\n        *must* use a specific IPAM for provider networks. There isn't really\n        much of an option other than involve the backend driver when selecting\n        the IPAM strategy.", "id": "f10795:c2:m5"}
{"signature": "def update_floating_ip(self, floating_ip, port_fixed_ips):", "body": "url = \"<STR_LIT>\" % (CONF.QUARK.floating_ip_base_url,<EOL>floating_ip[\"<STR_LIT>\"])<EOL>timeout = CONF.QUARK.unicorn_api_timeout_seconds<EOL>req = self._build_request_body(floating_ip, port_fixed_ips)<EOL>try:<EOL><INDENT>LOG.info(\"<STR_LIT>\"<EOL>% (url, req))<EOL>r = requests.put(url, data=json.dumps(req), timeout=timeout)<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOG.error(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% (floating_ip.id, e.message))<EOL>raise ex.RegisterFloatingIpFailure(id=floating_ip.id)<EOL><DEDENT>if r.status_code != <NUM_LIT:200> and r.status_code != <NUM_LIT>:<EOL><INDENT>msg = \"<STR_LIT>\"\"<STR_LIT>\" % (r.status_code, r.json())<EOL>LOG.error(\"<STR_LIT>\" % msg)<EOL>raise ex.RegisterFloatingIpFailure(id=floating_ip.id)<EOL><DEDENT>", "docstring": "Update an existing floating ip with Unicorn\n\n        :param floating_ip: The quark.db.models.IPAddress to update\n        :param port_fixed_ips: A dictionary containing the port and fixed ips\n        to associate the floating IP with.  Has the structure of:\n        {\"<id of port>\": {\"port\": <quark.db.models.Port>,\n         \"fixed_ip\": \"<fixed ip address>\"}}\n        :return: None", "id": "f10798:c0:m3"}
{"signature": "def decode_exactly(geohash):", "body": "lat_interval, lon_interval = (-<NUM_LIT>, <NUM_LIT>), (-<NUM_LIT>, <NUM_LIT>)<EOL>lat_err, lon_err = <NUM_LIT>, <NUM_LIT><EOL>is_even = True<EOL>for c in geohash:<EOL><INDENT>cd = __decodemap[c]<EOL>for mask in [<NUM_LIT:16>, <NUM_LIT:8>, <NUM_LIT:4>, <NUM_LIT:2>, <NUM_LIT:1>]:<EOL><INDENT>if is_even: <EOL><INDENT>lon_err /= <NUM_LIT:2><EOL>if cd & mask:<EOL><INDENT>lon_interval = ((lon_interval[<NUM_LIT:0>]+lon_interval[<NUM_LIT:1>])/<NUM_LIT:2>, lon_interval[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>lon_interval = (lon_interval[<NUM_LIT:0>], (lon_interval[<NUM_LIT:0>]+lon_interval[<NUM_LIT:1>])/<NUM_LIT:2>)<EOL><DEDENT><DEDENT>else:      <EOL><INDENT>lat_err /= <NUM_LIT:2><EOL>if cd & mask:<EOL><INDENT>lat_interval = ((lat_interval[<NUM_LIT:0>]+lat_interval[<NUM_LIT:1>])/<NUM_LIT:2>, lat_interval[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>lat_interval = (lat_interval[<NUM_LIT:0>], (lat_interval[<NUM_LIT:0>]+lat_interval[<NUM_LIT:1>])/<NUM_LIT:2>)<EOL><DEDENT><DEDENT>is_even = not is_even<EOL><DEDENT><DEDENT>lat = (lat_interval[<NUM_LIT:0>] + lat_interval[<NUM_LIT:1>]) / <NUM_LIT:2><EOL>lon = (lon_interval[<NUM_LIT:0>] + lon_interval[<NUM_LIT:1>]) / <NUM_LIT:2><EOL>return lat, lon, lat_err, lon_err<EOL>", "docstring": "Decode the geohash to its exact values, including the error\nmargins of the result.  Returns four float values: latitude,\nlongitude, the plus/minus error for latitude (as a positive\nnumber) and the plus/minus error for longitude (as a positive\nnumber).", "id": "f10801:m0"}
{"signature": "def sts_conn(service, service_type='<STR_LIT>', future_expiration_minutes=<NUM_LIT:15>):", "body": "def decorator(f):<EOL><INDENT>@wraps(f)<EOL>def decorated_function(*args, **kwargs):<EOL><INDENT>if kwargs.get(\"<STR_LIT>\"):<EOL><INDENT>kwargs[service_type] = kwargs.pop(\"<STR_LIT>\")<EOL>kwargs.pop(\"<STR_LIT>\", None)<EOL>kwargs.pop(\"<STR_LIT>\", None)<EOL><DEDENT>else:<EOL><INDENT>kwargs[service_type] = boto3_cached_conn(<EOL>service,<EOL>service_type=service_type,<EOL>future_expiration_minutes=future_expiration_minutes,<EOL>account_number=kwargs.pop('<STR_LIT>', None),<EOL>assume_role=kwargs.pop('<STR_LIT>', None),<EOL>session_name=kwargs.pop('<STR_LIT>', '<STR_LIT>'),<EOL>external_id=kwargs.pop('<STR_LIT>', None),<EOL>region=kwargs.pop('<STR_LIT>', '<STR_LIT>'),<EOL>arn_partition=kwargs.pop('<STR_LIT>', '<STR_LIT>')<EOL>)<EOL><DEDENT>return f(*args, **kwargs)<EOL><DEDENT>return decorated_function<EOL><DEDENT>return decorator<EOL>", "docstring": "This will wrap all calls with an STS AssumeRole if the required parameters are sent over.\nNamely, it requires the following in the kwargs:\n- Service Type (Required)\n- Account Number (Required for Assume Role)\n- IAM Role Name (Required for Assume Role)\n- Region (Optional, but recommended)\n- AWS Partition (Optional, defaults to 'aws' if none specified)\n- IAM Session Name (Optional, but recommended to appear in CloudTrail)\n\nIf `force_client` is set to a boto3 client, then this will simply pass that in as the client.\n`force_client` is mostly useful for mocks and tests.\n:param service:\n:param service_type:\n:param future_expiration_minutes:\n:return:", "id": "f10811:m5"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def describe_target_health(target_group_arn, targets=None, client=None):", "body": "kwargs = dict(TargetGroupArn=target_group_arn)<EOL>if targets:<EOL><INDENT>kwargs.update(Targets=targets)<EOL><DEDENT>return client.describe_target_health(**kwargs)['<STR_LIT>']<EOL>", "docstring": "Permission: elasticloadbalancing:DescribeTargetHealth", "id": "f10813:m8"}
{"signature": "@paginated('<STR_LIT>', response_pagination_marker='<STR_LIT>')<EOL>@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def describe_listeners(load_balancer_arn=None, listener_arns=None, client=None):", "body": "kwargs = dict()<EOL>if load_balancer_arn:<EOL><INDENT>kwargs.update(dict(LoadBalancerArn=load_balancer_arn))<EOL><DEDENT>if listener_arns:<EOL><INDENT>kwargs.update(dict(ListenerArns=listener_arns))<EOL><DEDENT>return client.describe_listeners(**kwargs)<EOL>", "docstring": "Permission: elasticloadbalancing:DescribeListeners", "id": "f10813:m1"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def describe_rules(listener_arn=None, rule_arns=None, client=None):", "body": "kwargs = dict()<EOL>if listener_arn:<EOL><INDENT>kwargs.update(dict(ListenerArn=listener_arn))<EOL><DEDENT>if rule_arns:<EOL><INDENT>kwargs.update(dict(RuleArns=rule_arns))<EOL><DEDENT>return client.describe_rules(**kwargs)['<STR_LIT>']<EOL>", "docstring": "Permission: elasticloadbalancing:DescribeRules", "id": "f10813:m3"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def get_bucket_accelerate_configuration(client=None, **kwargs):", "body": "return client.get_bucket_accelerate_configuration(**kwargs)<EOL>", "docstring": "Bucket='string'", "id": "f10817:m11"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def get_bucket_website(client=None, **kwargs):", "body": "return client.get_bucket_website(**kwargs)<EOL>", "docstring": "Bucket='string'", "id": "f10817:m8"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def get_bucket_tagging(client=None, **kwargs):", "body": "return client.get_bucket_tagging(**kwargs)<EOL>", "docstring": "Bucket='string'", "id": "f10817:m4"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def get_bucket_logging(client=None, **kwargs):", "body": "return client.get_bucket_logging(**kwargs)<EOL>", "docstring": "Bucket='string'", "id": "f10817:m7"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def get_bucket_acl(client=None, **kwargs):", "body": "return client.get_bucket_acl(**kwargs)<EOL>", "docstring": "Bucket='string'", "id": "f10817:m2"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def get_bucket_policy(client=None, **kwargs):", "body": "return client.get_bucket_policy(**kwargs)<EOL>", "docstring": "Bucket='string'", "id": "f10817:m3"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def get_bucket_cors(client=None, **kwargs):", "body": "return client.get_bucket_cors(**kwargs)<EOL>", "docstring": "Bucket='string'", "id": "f10817:m9"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def get_bucket_lifecycle_configuration(client=None, **kwargs):", "body": "return client.get_bucket_lifecycle_configuration(**kwargs)<EOL>", "docstring": "Bucket='string'", "id": "f10817:m6"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def get_bucket_versioning(client=None, **kwargs):", "body": "return client.get_bucket_versioning(**kwargs)<EOL>", "docstring": "Bucket='string'", "id": "f10817:m5"}
{"signature": "@sts_conn('<STR_LIT>')<EOL>@rate_limited()<EOL>def describe_rule(client=None, **kwargs):", "body": "return client.describe_rule(**kwargs)<EOL>", "docstring": "Name='string'", "id": "f10820:m1"}
{"signature": "@sts_conn('<STR_LIT>', service_type='<STR_LIT>')<EOL>@paginated('<STR_LIT>')<EOL>@rate_limited()<EOL>def list_group_policies(group_name, client=None, **kwargs):", "body": "return client.list_group_policies(GroupName=group_name, **kwargs)<EOL>", "docstring": "Lets the IAM group inline policies for a given group.", "id": "f10821:m36"}
{"signature": "@sts_conn('<STR_LIT>', service_type='<STR_LIT>')<EOL>@rate_limited()<EOL>def get_managed_policy_document(policy_arn, policy_metadata=None, client=None, **kwargs):", "body": "if not policy_metadata:<EOL><INDENT>policy_metadata = client.get_policy(PolicyArn=policy_arn)<EOL><DEDENT>policy_document = client.get_policy_version(PolicyArn=policy_arn,<EOL>VersionId=policy_metadata['<STR_LIT>']['<STR_LIT>'])<EOL>return policy_document['<STR_LIT>']['<STR_LIT>']<EOL>", "docstring": "Retrieve the currently active (i.e. 'default') policy version document for a policy.\n\n    :param policy_arn:\n    :param policy_metadata: This is a previously fetch managed policy response from boto/cloudaux.\n                            This is used to prevent unnecessary API calls to get the initial policy default version id.\n    :param client:\n    :param kwargs:\n    :return:", "id": "f10821:m13"}
{"signature": "@sts_conn('<STR_LIT>', service_type='<STR_LIT>')<EOL>@paginated('<STR_LIT>')<EOL>@rate_limited()<EOL>def list_groups_for_user(user_name, client=None, **kwargs):", "body": "return client.list_groups_for_user(UserName=user_name, **kwargs)<EOL>", "docstring": "Lists the IAM groups that is attached to a given IAM user.", "id": "f10821:m39"}
{"signature": "@paginated('<STR_LIT>')<EOL>@rate_limited()<EOL>def _get_users_for_group(client, **kwargs):", "body": "return client.get_group(**kwargs)<EOL>", "docstring": "Fetch the paginated users attached to the group.", "id": "f10821:m34"}
{"signature": "@openstack_conn()<EOL>def list_items(conn=None, **kwargs):", "body": "return [x for x in getattr( getattr( conn, kwargs.pop('<STR_LIT>') ),<EOL>kwargs.pop('<STR_LIT>'))(**kwargs)]<EOL>", "docstring": ":rtype: ``list``", "id": "f10835:m0"}
{"signature": "@registry.register(flag=FLAGS.INTERNET_GATEWAY, depends_on=FLAGS.BASE, key=\"<STR_LIT>\")<EOL>def get_internet_gateway(vpc, **conn):", "body": "result = {}<EOL>ig_result = describe_internet_gateways(Filters=[{\"<STR_LIT:Name>\": \"<STR_LIT>\", \"<STR_LIT>\": [vpc[\"<STR_LIT:id>\"]]}], **conn)<EOL>if ig_result:<EOL><INDENT>result.update({<EOL>\"<STR_LIT>\": ig_result[<NUM_LIT:0>][\"<STR_LIT>\"][<NUM_LIT:0>][\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": ig_result[<NUM_LIT:0>][\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": ig_result[<NUM_LIT:0>].get(\"<STR_LIT>\", [])<EOL>})<EOL><DEDENT>return result<EOL>", "docstring": "Gets the Internet Gateway details about a VPC", "id": "f10838:m2"}
{"signature": "@modify_output<EOL>def get_vpc(vpc_id, flags=FLAGS.ALL, **conn):", "body": "<EOL>if not conn.get(\"<STR_LIT>\"):<EOL><INDENT>raise CloudAuxException({\"<STR_LIT:message>\": \"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\": vpc_id})<EOL><DEDENT>if not conn.get(\"<STR_LIT>\"):<EOL><INDENT>raise CloudAuxException({\"<STR_LIT:message>\": \"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\": vpc_id})<EOL><DEDENT>start = {<EOL>'<STR_LIT>': \"<STR_LIT>\".format(region=conn[\"<STR_LIT>\"],<EOL>account=conn[\"<STR_LIT>\"],<EOL>vpc_id=vpc_id),<EOL>'<STR_LIT:id>': vpc_id<EOL>}<EOL>return registry.build_out(flags, start_with=start, pass_datastructure=True, **conn)<EOL>", "docstring": "Orchestrates all the calls required to fully fetch details about a VPC:\n\n{\n    \"Arn\": ...,\n    \"Region\": ...,\n    \"Name\": ...,\n    \"Id\": ...,\n    \"Tags: ...,\n    \"VpcPeeringConnections\": ...,\n    \"ClassicLink\": ...,\n    \"DhcpOptionsId\": ...,\n    \"InternetGateway\": ...,\n    \"IsDefault\": ...,\n    \"CidrBlock\": ...,\n    \"CidrBlockAssociationSet\": ...,\n    \"Ipv6CidrBlockAssociationSet\": ...,\n    \"InstanceTenancy\": ...,\n    \"RouteTables\": ...,\n    \"NetworkAcls\": ...,\n    \"FlowLogs\": ...,\n    \"Subnets\": ...,\n    \"Attributes\": ...,\n    \"FlowLogs\": ...,\n    \"_version\": 1\n}\n\n:param vpc_id: The ID of the VPC\n:param flags:\n:param conn:\n:return:", "id": "f10838:m8"}
{"signature": "@registry.register(flag=FLAGS.ROUTE_TABLES, depends_on=FLAGS.BASE, key=\"<STR_LIT>\")<EOL>def get_route_tables(vpc, **conn):", "body": "route_tables = describe_route_tables(Filters=[{\"<STR_LIT:Name>\": \"<STR_LIT>\", \"<STR_LIT>\": [vpc[\"<STR_LIT:id>\"]]}], **conn)<EOL>rt_ids = []<EOL>for r in route_tables:<EOL><INDENT>rt_ids.append(r[\"<STR_LIT>\"])<EOL><DEDENT>return rt_ids<EOL>", "docstring": "Gets the VPC Route Tables", "id": "f10838:m5"}
{"signature": "@registry.register(flag=FLAGS.NETWORK_ACLS, depends_on=FLAGS.BASE, key=\"<STR_LIT>\")<EOL>def get_network_acls(vpc, **conn):", "body": "route_tables = describe_network_acls(Filters=[{\"<STR_LIT:Name>\": \"<STR_LIT>\", \"<STR_LIT>\": [vpc[\"<STR_LIT:id>\"]]}], **conn)<EOL>nacl_ids = []<EOL>for r in route_tables:<EOL><INDENT>nacl_ids.append(r[\"<STR_LIT>\"])<EOL><DEDENT>return nacl_ids<EOL>", "docstring": "Gets the VPC Network ACLs", "id": "f10838:m6"}
{"signature": "@registry.register(flag=FLAGS.FLOW_LOGS, depends_on=FLAGS.BASE, key=\"<STR_LIT>\")<EOL>def get_vpc_flow_logs(vpc, **conn):", "body": "fl_result = describe_flow_logs(Filters=[{\"<STR_LIT:Name>\": \"<STR_LIT>\", \"<STR_LIT>\": [vpc[\"<STR_LIT:id>\"]]}], **conn)<EOL>fl_ids = []<EOL>for fl in fl_result:<EOL><INDENT>fl_ids.append(fl[\"<STR_LIT>\"])<EOL><DEDENT>return fl_ids<EOL>", "docstring": "Gets the VPC Flow Logs for a VPC", "id": "f10838:m0"}
{"signature": "@modify_output<EOL>def get_load_balancer(load_balancer, flags=FLAGS.ALL ^ FLAGS.POLICY_TYPES, **conn):", "body": "<EOL>try:<EOL><INDENT>basestring<EOL><DEDENT>except NameError as _:<EOL><INDENT>basestring = str<EOL><DEDENT>if isinstance(load_balancer, basestring):<EOL><INDENT>load_balancer = dict(LoadBalancerName=load_balancer)<EOL><DEDENT>return registry.build_out(flags, start_with=load_balancer, pass_datastructure=True, **conn)<EOL>", "docstring": "Fully describes an ELB.\n\n:param loadbalancer: Could be an ELB Name or a dictionary. Likely the return value from a previous call to describe_load_balancers. At a minimum, must contain a key titled 'LoadBalancerName'.\n:param flags: Flags describing which sections should be included in the return value. Default is FLAGS.ALL minus FLAGS.POLICY_TYPES.\n:return: Returns a dictionary describing the ELB with the fields described in the flags parameter.", "id": "f10839:m7"}
{"signature": "def _reformat_policy(policy):", "body": "policy_name = policy['<STR_LIT>']<EOL>ret = {}<EOL>ret['<STR_LIT:type>'] = policy['<STR_LIT>']<EOL>attrs = policy['<STR_LIT>']<EOL>if ret['<STR_LIT:type>'] != '<STR_LIT>':<EOL><INDENT>return policy_name, ret<EOL><DEDENT>attributes = dict()<EOL>for attr in attrs:<EOL><INDENT>attributes[attr['<STR_LIT>']] = attr['<STR_LIT>']<EOL><DEDENT>ret['<STR_LIT>'] = dict()<EOL>ret['<STR_LIT>']['<STR_LIT>'] = bool(attributes.get('<STR_LIT>'))<EOL>ret['<STR_LIT>']['<STR_LIT>'] = bool(attributes.get('<STR_LIT>'))<EOL>ret['<STR_LIT>']['<STR_LIT>'] = bool(attributes.get('<STR_LIT>'))<EOL>ret['<STR_LIT>']['<STR_LIT>'] = bool(attributes.get('<STR_LIT>'))<EOL>ret['<STR_LIT>']['<STR_LIT>'] = bool(attributes.get('<STR_LIT>'))<EOL>ret['<STR_LIT>'] = bool(attributes.get('<STR_LIT>'))<EOL>ret['<STR_LIT>'] = attributes.get('<STR_LIT>', None)<EOL>non_ciphers = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>]<EOL>ciphers = []<EOL>for cipher in attributes:<EOL><INDENT>if attributes[cipher] == '<STR_LIT:true>' and cipher not in non_ciphers:<EOL><INDENT>ciphers.append(cipher)<EOL><DEDENT><DEDENT>ciphers.sort()<EOL>ret['<STR_LIT>'] = ciphers<EOL>return policy_name, ret<EOL>", "docstring": "Policies returned from boto3 are massive, ugly, and difficult to read.\nThis method flattens and reformats the policy.\n\n:param policy: Result from invoking describe_load_balancer_policies(...)\n:return: Returns a tuple containing policy_name and the reformatted policy dict.", "id": "f10839:m0"}
{"signature": "@modify_output<EOL>def get_image(image_id, flags=FLAGS.ALL, **conn):", "body": "image = dict(ImageId=image_id)<EOL>conn['<STR_LIT>'] = conn.get('<STR_LIT>', '<STR_LIT>')<EOL>return registry.build_out(flags, image, **conn)<EOL>", "docstring": "Orchestrates all the calls required to fully build out an EC2 Image (AMI, AKI, ARI)\n\n{\n    \"Architecture\": \"x86_64\", \n    \"Arn\": \"arn:aws:ec2:us-east-1::image/ami-11111111\", \n    \"BlockDeviceMappings\": [], \n    \"CreationDate\": \"2013-07-11T16:04:06.000Z\", \n    \"Description\": \"...\", \n    \"Hypervisor\": \"xen\", \n    \"ImageId\": \"ami-11111111\", \n    \"ImageLocation\": \"111111111111/...\", \n    \"ImageType\": \"machine\", \n    \"KernelId\": \"aki-88888888\", \n    \"LaunchPermissions\": [], \n    \"Name\": \"...\", \n    \"OwnerId\": \"111111111111\", \n    \"ProductCodes\": [], \n    \"Public\": false, \n    \"RamdiskId\": {}, \n    \"RootDeviceName\": \"/dev/sda1\", \n    \"RootDeviceType\": \"ebs\", \n    \"SriovNetSupport\": \"simple\",\n    \"State\": \"available\", \n    \"Tags\": [], \n    \"VirtualizationType\": \"hvm\", \n    \"_version\": 1\n}\n\n:param image_id: str ami id\n:param flags: By default, set to ALL fields\n:param conn: dict containing enough information to make a connection to the desired account.\nMust at least have 'assume_role' key.\n:return: dict containing a fully built out image.", "id": "f10841:m5"}
{"signature": "@modify_output<EOL>def get_elbv2(alb, flags=FLAGS.ALL, **conn):", "body": "<EOL>try:<EOL><INDENT>basestring<EOL><DEDENT>except NameError as _:<EOL><INDENT>basestring = str<EOL><DEDENT>if isinstance(alb, basestring):<EOL><INDENT>from cloudaux.orchestration.aws.arn import ARN<EOL>alb_arn = ARN(alb)<EOL>if alb_arn.error:<EOL><INDENT>alb = dict(LoadBalancerName=alb)<EOL><DEDENT>else:<EOL><INDENT>alb = dict(LoadBalancerArn=alb)<EOL><DEDENT><DEDENT>return registry.build_out(flags, start_with=alb, pass_datastructure=True, **conn)<EOL>", "docstring": "Fully describes an ALB (ELBv2).\n\n:param alb: Could be an ALB Name, ALB ARN, or a dictionary. Likely the return value from a previous call to describe_load_balancers. At a minimum, must contain a key titled 'LoadBalancerArn'.\n:param flags: Flags describing which sections should be included in the return value. Default is FLAGS.ALL.\n:return: Returns a dictionary describing the ALB with the fields described in the flags parameter.", "id": "f10842:m8"}
{"signature": "def _get_name_from_structure(item, default):", "body": "if item.get(default):<EOL><INDENT>return item.get(default)<EOL><DEDENT>if item.get('<STR_LIT>'):<EOL><INDENT>arn = item.get('<STR_LIT>')<EOL>item_arn = ARN(arn)<EOL>if item_arn.error:<EOL><INDENT>raise CloudAuxException('<STR_LIT>'.format(arn=arn))<EOL><DEDENT>return item_arn.parsed_name<EOL><DEDENT>raise MissingFieldException('<STR_LIT>'.format(input=item))<EOL>", "docstring": "Given a possibly sparsely populated item dictionary, try to retrieve the item name.\nFirst try the default field.  If that doesn't exist, try to parse the from the ARN.\n:param item: dict containing (at the very least) item_name and/or arn\n:return: item name", "id": "f10845:m2"}
{"signature": "def _conn_from_arn(arn):", "body": "arn = ARN(arn)<EOL>if arn.error:<EOL><INDENT>raise CloudAuxException('<STR_LIT>'.format(arn=arn))<EOL><DEDENT>return dict(<EOL>account_number=arn.account_number,<EOL>)<EOL>", "docstring": "Extracts the account number from an ARN.\n:param arn: Amazon ARN containing account number.\n:return: dictionary with a single account_number key that can be merged with an existing\nconnection dictionary containing fields such as assume_role, session_name, region.", "id": "f10845:m1"}
{"signature": "@registry.register(flag=FLAGS.INLINE_POLICIES, key='<STR_LIT>')<EOL>def get_inline_policies(group, **conn):", "body": "policy_list = list_group_policies(group['<STR_LIT>'])<EOL>policy_documents = {}<EOL>for policy in policy_list:<EOL><INDENT>policy_documents[policy] = get_group_policy_document(group['<STR_LIT>'], policy, **conn)<EOL><DEDENT>return policy_documents<EOL>", "docstring": "Get the inline policies for the group.", "id": "f10848:m0"}
{"signature": "@modify_output<EOL>def get_group(group, flags=FLAGS.BASE | FLAGS.INLINE_POLICIES | FLAGS.MANAGED_POLICIES, **conn):", "body": "if not group.get('<STR_LIT>'):<EOL><INDENT>raise MissingFieldException('<STR_LIT>')<EOL><DEDENT>group = modify(group, output='<STR_LIT>')<EOL>_conn_from_args(group, conn)<EOL>return registry.build_out(flags, start_with=group, pass_datastructure=True, **conn)<EOL>", "docstring": "Orchestrates all the calls required to fully build out an IAM Group in the following format:\n\n{\n    \"Arn\": ...,\n    \"GroupName\": ...,\n    \"Path\": ...,\n    \"GroupId\": ...,\n    \"CreateDate\": ...,  # str\n    \"InlinePolicies\": ...,\n    \"ManagedPolicies\": ...,  # These are just the names of the Managed Policies.\n    \"Users\": ...,  # False by default -- these are just the names of the users.\n    \"_version\": 1\n}\n\n:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the `get_group` call\n              multiple times.\n:param group: dict MUST contain the GroupName and also a combination of either the ARN or the account_number.\n:param output: Determines whether keys should be returned camelized or underscored.\n:param conn: dict containing enough information to make a connection to the desired account.\n             Must at least have 'assume_role' key.\n:return: dict containing fully built out Group.", "id": "f10848:m4"}
{"signature": "@registry.register(flag=FLAGS.USERS, key='<STR_LIT>')<EOL>def get_users(group, **conn):", "body": "group_details = get_group_api(group['<STR_LIT>'], **conn)<EOL>user_list = []<EOL>for user in group_details.get('<STR_LIT>', []):<EOL><INDENT>user_list.append(user['<STR_LIT>'])<EOL><DEDENT>return user_list<EOL>", "docstring": "Gets a list of the usernames that are a part of this group.", "id": "f10848:m2"}
{"signature": "@registry.register(flag=FLAGS.BASE)<EOL>def _get_base(role, **conn):", "body": "base_fields = frozenset(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>needs_base = False<EOL>for field in base_fields:<EOL><INDENT>if field not in role:<EOL><INDENT>needs_base = True<EOL>break<EOL><DEDENT><DEDENT>if needs_base:<EOL><INDENT>role_name = _get_name_from_structure(role, '<STR_LIT>')<EOL>role = CloudAux.go('<STR_LIT>', RoleName=role_name, **conn)<EOL>role = role['<STR_LIT>']<EOL><DEDENT>role.update(dict(CreateDate=get_iso_string(role['<STR_LIT>'])))<EOL>role['<STR_LIT>'] = <NUM_LIT:3><EOL>return role<EOL>", "docstring": "Determine whether the boto get_role call needs to be made or if we already have all that data\nin the role object.\n:param role: dict containing (at the very least) role_name and/or arn.\n:param conn: dict containing enough information to make a connection to the desired account.\n:return: Camelized dict describing role containing all all base_fields.", "id": "f10849:m4"}
{"signature": "@modify_output<EOL>def get_role(role, flags=FLAGS.ALL, **conn):", "body": "role = modify(role, output='<STR_LIT>')<EOL>_conn_from_args(role, conn)<EOL>return registry.build_out(flags, start_with=role, pass_datastructure=True, **conn)<EOL>", "docstring": "Orchestrates all the calls required to fully build out an IAM Role in the following format:\n\n{\n    \"Arn\": ...,\n    \"AssumeRolePolicyDocument\": ...,\n    \"CreateDate\": ...,  # str\n    \"InlinePolicies\": ...,\n    \"InstanceProfiles\": ...,\n    \"ManagedPolicies\": ...,\n    \"Path\": ...,\n    \"RoleId\": ...,\n    \"RoleName\": ...,\n    \"Tags\": {},\n    \"_version\": 3\n}\n\n:param role: dict containing (at the very least) role_name and/or arn.\n:param output: Determines whether keys should be returned camelized or underscored.\n:param conn: dict containing enough information to make a connection to the desired account.\nMust at least have 'assume_role' key.\n:return: dict containing a fully built out role.", "id": "f10849:m5"}
{"signature": "def get_all_users(flags=FLAGS.ACCESS_KEYS | FLAGS.MFA_DEVICES | FLAGS.LOGIN_PROFILE | FLAGS.SIGNING_CERTIFICATES,<EOL>**conn):", "body": "users = []<EOL>account_users = get_account_authorization_details('<STR_LIT>', **conn)<EOL>for user in account_users:<EOL><INDENT>temp_user = {<EOL>'<STR_LIT>': user['<STR_LIT>'],<EOL>'<STR_LIT>': get_iso_string(user['<STR_LIT>']),<EOL>'<STR_LIT>': user['<STR_LIT>'],<EOL>'<STR_LIT>': user['<STR_LIT>'],<EOL>'<STR_LIT>': [<EOL>{<EOL>\"<STR_LIT:name>\": x['<STR_LIT>'],<EOL>\"<STR_LIT>\": x['<STR_LIT>']<EOL>} for x in user['<STR_LIT>']<EOL>],<EOL>'<STR_LIT>': user['<STR_LIT>'],<EOL>'<STR_LIT>': user['<STR_LIT>'],<EOL>'<STR_LIT>': user['<STR_LIT>']<EOL>}<EOL>user = modify(temp_user, output='<STR_LIT>')<EOL>_conn_from_args(user, conn)<EOL>users.append(registry.build_out(flags, start_with=user, pass_datastructure=True, **conn))<EOL><DEDENT>return users<EOL>", "docstring": "Returns a list of Users represented as dictionary below:\n\n{\n    \"Arn\": ...,\n    \"AccessKeys\": ...,\n    \"CreateDate\": ...,  # str\n    \"InlinePolicies\": ...,\n    \"ManagedPolicies\": ...,\n    \"MFADevices\": ...,\n    \"Path\": ...,\n    \"UserId\": ...,\n    \"UserName\": ...,\n    \"SigningCerts\": ...\n}\n\n:param flags:\n:param conn: dict containing enough information to make a connection to the desired account.\n:return: list of dicts containing fully built out user.", "id": "f10852:m8"}
{"signature": "@modify_output<EOL>def get_server_certificate(server_certificate, flags=FLAGS.BASE, **conn):", "body": "if not server_certificate.get('<STR_LIT>'):<EOL><INDENT>raise MissingFieldException('<STR_LIT>')<EOL><DEDENT>server_certificate = modify(server_certificate, output='<STR_LIT>')<EOL>_conn_from_args(server_certificate, conn)<EOL>return registry.build_out(flags, start_with=server_certificate, pass_datastructure=True, **conn)<EOL>", "docstring": "Orchestrates all the calls required to fully build out an IAM User in the following format:\n\n{\n    \"Arn\": ...,\n    \"ServerCertificateName\": ...,\n    \"Path\": ...,\n    \"ServerCertificateId\": ...,\n    \"UploadDate\": ...,  # str\n    \"Expiration\": ...,  # str\n    \"CertificateBody\": ...,\n    \"CertificateChain\": ...,\n    \"_version\": 1\n}\n\n:param flags: By default, Users is disabled. This is somewhat expensive as it has to call the\n              `get_server_certificate` call multiple times.\n:param server_certificate: dict MUST contain the ServerCertificateName and also a combination of\n                           either the ARN or the account_number.\n:param output: Determines whether keys should be returned camelized or underscored.\n:param conn: dict containing enough information to make a connection to the desired account.\n             Must at least have 'assume_role' key.\n:return: dict containing fully built out Server Certificate.", "id": "f10854:m1"}
{"signature": "def get_item(item, **kwargs):", "body": "_item = {}<EOL>for k,v in inspect.getmembers(item, lambda a:not(inspect.isroutine(a))):<EOL><INDENT>if not k.startswith('<STR_LIT:_>') and not k in ignore_list:<EOL><INDENT>_item[k] = v<EOL><DEDENT><DEDENT>return sub_dict(_item)<EOL>", "docstring": "API versioning for each OpenStack service is independent. Generically capture\n    the public members (non-routine and non-private) of the OpenStack SDK objects.\n\nNote the lack of the modify_output decorator. Preserving the field naming allows\n    us to reconstruct objects and orchestrate from stored items.", "id": "f10857:m0"}
{"signature": "def sub_list(l):", "body": "r = []<EOL>for i in l:<EOL><INDENT>if type(i) in prims:<EOL><INDENT>r.append(i)<EOL><DEDENT>elif type(i) is list:<EOL><INDENT>r.append(sub_list(i))<EOL><DEDENT>elif type(i) is dict:<EOL><INDENT>r.append(sub_dict(i))<EOL><DEDENT>else:<EOL><INDENT>r.append(str(i))<EOL><DEDENT><DEDENT>r = sorted(r)<EOL>return r<EOL>", "docstring": "Recursively walk a data-structure sorting any lists along the way.\nAny unknown types get mapped to string representation\n\n:param l: list\n:return: sorted list, where any child lists are also sorted.", "id": "f10857:m1"}
{"signature": "def _modify(item, func):", "body": "result = dict()<EOL>for key in item:<EOL><INDENT>result[func(key)] = item[key]<EOL><DEDENT>return result<EOL>", "docstring": "Modifies each item.keys() string based on the func passed in.\nOften used with inflection's camelize or underscore methods.\n\n:param item: dictionary representing item to be modified\n:param func: function to run on each key string\n:return: dictionary where each key has been modified by func.", "id": "f10858:m0"}
{"signature": "def modify(item, output='<STR_LIT>'):", "body": "if output == '<STR_LIT>':<EOL><INDENT>return _modify(item, camelize)<EOL><DEDENT>elif output == '<STR_LIT>':<EOL><INDENT>return _modify(item, underscore)<EOL><DEDENT>", "docstring": "Calls _modify and either passes the inflection.camelize method or the inflection.underscore method.\n\n:param item: dictionary representing item to be modified\n:param output: string 'camelized' or 'underscored'\n:return:", "id": "f10858:m1"}
{"signature": "def call(self, function_expr, **kwargs):", "body": "if '<STR_LIT:.>' in function_expr:<EOL><INDENT>tech, service_type, function_name = function_expr.split('<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>tech = self.conn_details.get('<STR_LIT>')<EOL>service_type = self.conn_details.get('<STR_LIT>', '<STR_LIT>')<EOL>function_name = function_expr<EOL><DEDENT>@sts_conn(tech, service_type=service_type)<EOL>def wrapped_method(function_name, **nargs):<EOL><INDENT>service_type = nargs.pop(nargs.pop('<STR_LIT>', '<STR_LIT>'))<EOL>return getattr(service_type, function_name)(**nargs)<EOL><DEDENT>kwargs.update(self.conn_details)<EOL>if '<STR_LIT>' in kwargs:<EOL><INDENT>del kwargs['<STR_LIT>']<EOL><DEDENT>return wrapped_method(function_name, **kwargs)<EOL>", "docstring": "cloudaux = CloudAux(\n    **{'account_number': '000000000000',\n       'assume_role': 'role_name',\n       'session_name': 'testing',\n       'region': 'us-east-1',\n       'tech': 'kms',\n       'service_type': 'client'\n    })\n\ncloudaux.call(\"list_aliases\")\ncloudaux.call(\"kms.client.list_aliases\")", "id": "f10862:c0:m1"}
{"signature": "def get_iso_string(input):", "body": "return input.replace(tzinfo=None, microsecond=<NUM_LIT:0>).isoformat() + '<STR_LIT>'<EOL>", "docstring": "Strips out the microseconds from datetime objects, and returns a proper ISO-format UTC string.\n\n    :param input: Datetime object.\n    :returns string: A datetime ISO format string with", "id": "f10862:m0"}
{"signature": "@staticmethod<EOL><INDENT>def go(function_expr, **kwargs):<DEDENT>", "body": "if '<STR_LIT:.>' in function_expr:<EOL><INDENT>tech, service_type, function_name = function_expr.split('<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>tech = kwargs.pop('<STR_LIT>')<EOL>service_type = kwargs.get('<STR_LIT>')<EOL>function_name = function_expr<EOL><DEDENT>@sts_conn(tech, service_type=service_type)<EOL>def wrapped_method(function_name, **nargs):<EOL><INDENT>service_type = nargs.pop(nargs.pop('<STR_LIT>', '<STR_LIT>'))<EOL>return getattr(service_type, function_name)(**nargs)<EOL><DEDENT>return wrapped_method(function_name, **kwargs)<EOL>", "docstring": "CloudAux.go(\n    'list_aliases',\n    **{\n        'account_number': '000000000000',\n        'assume_role': 'role_name',\n        'session_name': 'cloudaux',\n        'region': 'us-east-1',\n        'tech': 'kms',\n        'service_type': 'client'\n    })\n\nCloudAux.go(\n    'kms.client.list_aliases',\n    **{\n        'account_number': '000000000000',\n        'assume_role': 'role_name',\n        'session_name': 'cloudaux',\n        'region': 'us-east-1'\n    })", "id": "f10862:c0:m2"}
{"signature": "def _update_cache_stats(self, key, result):", "body": "if result is None:<EOL><INDENT>self._CACHE_STATS['<STR_LIT>'].setdefault(key,<EOL>{'<STR_LIT>': <NUM_LIT:0>, '<STR_LIT>': <NUM_LIT:0>, '<STR_LIT>': <NUM_LIT:0>})<EOL><DEDENT>else:<EOL><INDENT>self._CACHE_STATS['<STR_LIT>'][key][result] +=<NUM_LIT:1><EOL><DEDENT>", "docstring": "Update the cache stats.\n\nIf no cache-result is specified, we iniitialize the key.\nOtherwise, we increment the correct cache-result.\n\nNote the behavior for expired.  A client can be expired and the key\nstill exists.", "id": "f10864:c0:m6"}
{"signature": "def get_access_details(self, key=None):", "body": "if key in self._CACHE_STATS:<EOL><INDENT>return self._CACHE_STATS['<STR_LIT>'][key]<EOL><DEDENT>else:<EOL><INDENT>return self._CACHE_STATS['<STR_LIT>']<EOL><DEDENT>", "docstring": "Get access details in cache.", "id": "f10864:c0:m7"}
{"signature": "def insert(self, key, obj, future_expiration_minutes=<NUM_LIT:15>):", "body": "expiration_time = self._calculate_expiration(future_expiration_minutes)<EOL>self._CACHE[key] = (expiration_time, obj)<EOL>return True<EOL>", "docstring": "Insert item into cache.\n\n:param key: key to look up in cache.\n:type key: ``object``\n\n:param obj: item to store in cache.\n:type obj: varies\n\n:param future_expiration_minutes: number of minutes item is valid\n:type param: ``int``\n\n:returns: True\n:rtype: ``bool``", "id": "f10864:c0:m2"}
{"signature": "def get(self, key, delete_if_expired=True):", "body": "self._update_cache_stats(key, None)<EOL>if key in self._CACHE:<EOL><INDENT>(expiration, obj) = self._CACHE[key]<EOL>if expiration > self._now():<EOL><INDENT>self._update_cache_stats(key, '<STR_LIT>')<EOL>return obj<EOL><DEDENT>else:<EOL><INDENT>if delete_if_expired:<EOL><INDENT>self.delete(key)<EOL>self._update_cache_stats(key, '<STR_LIT>')<EOL>return None<EOL><DEDENT><DEDENT><DEDENT>self._update_cache_stats(key, '<STR_LIT>')<EOL>return None<EOL>", "docstring": "Retrieve key from Cache.\n\n:param key: key to look up in cache.\n:type key: ``object``\n\n:param delete_if_expired: remove value from cache if it is expired.\n                          Default is True.\n:type delete_if_expired: ``bool``\n\n:returns: value from cache or None\n:rtype: varies or None", "id": "f10864:c0:m1"}
{"signature": "def get_available_clients(service):", "body": "details = GOOGLE_CLIENT_MAP.get(service, None)<EOL>if details:<EOL><INDENT>return [details]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Return clients available for this service.\n\n:param service: Google Cloud service name. Examples: 'iam', 'gce'.\n:type service: ``str``\n\n:return: list of dictionaries describing the clients available.\n:rtype: ``list``", "id": "f10865:m2"}
{"signature": "def _gcp_client(project, mod_name, pkg_name, key_file=None, http_auth=None,<EOL>user_agent=None):", "body": "client = None<EOL>if http_auth is None:<EOL><INDENT>http_auth = _googleauth(key_file=key_file, user_agent=user_agent)<EOL><DEDENT>try:<EOL><INDENT>google_module = importlib.import_module('<STR_LIT:.>' + mod_name,<EOL>package=pkg_name)<EOL>client = google_module.Client(use_GAX=USE_GAX, project=project,<EOL>http=http_auth)<EOL><DEDENT>except ImportError as ie:<EOL><INDENT>import_err = '<STR_LIT>' % (pkg_name, mod_name)<EOL>raise ImportError(import_err)<EOL><DEDENT>except TypeError:<EOL><INDENT>client = google_module.Client(project=project, http=http_auth)<EOL><DEDENT>if user_agent and hasattr(client, '<STR_LIT>'):<EOL><INDENT>client.user_agent = user_agent<EOL><DEDENT>return client<EOL>", "docstring": "Private GCP client builder.\n\n:param project: Google Cloud project string.\n:type project: ``str``\n\n:param mod_name: Module name to load.  Should be found in sys.path.\n:type mod_name: ``str``\n\n:param pkg_name: package name that mod_name is part of.  Default is 'google.cloud' .\n:type pkg_name: ``str``\n\n:param key_file: Default is None.\n:type key_file: ``str`` or None\n\n:param http_auth: httplib2 authorized client. Default is None.\n:type http_auth: :class: `HTTPLib2`\n\n:param user_agent: User Agent string to use in requests. Default is None.\n:type http_auth: ``str`` or None\n\n:return: GCP client\n:rtype: ``object``", "id": "f10865:m4"}
{"signature": "def get_gcp_client(**kwargs):", "body": "return _gcp_client(project=kwargs['<STR_LIT>'], mod_name=kwargs['<STR_LIT>'],<EOL>pkg_name=kwargs.get('<STR_LIT>', '<STR_LIT>'),<EOL>key_file=kwargs.get('<STR_LIT>', None),<EOL>http_auth=kwargs.get('<STR_LIT:http>', None),<EOL>user_agent=kwargs.get('<STR_LIT>', None))<EOL>", "docstring": "Public GCP client builder.", "id": "f10865:m3"}
{"signature": "def _googleauth(key_file=None, scopes=[], user_agent=None):", "body": "if key_file:<EOL><INDENT>if not scopes:<EOL><INDENT>scopes = DEFAULT_SCOPES<EOL><DEDENT>creds = ServiceAccountCredentials.from_json_keyfile_name(key_file,<EOL>scopes=scopes)<EOL><DEDENT>else:<EOL><INDENT>creds = GoogleCredentials.get_application_default()<EOL><DEDENT>http = Http()<EOL>if user_agent:<EOL><INDENT>http = set_user_agent(http, user_agent)<EOL><DEDENT>http_auth = creds.authorize(http)<EOL>return http_auth<EOL>", "docstring": "Google http_auth helper.\n\nIf key_file is not specified, default credentials will be used.\n\nIf scopes is specified (and key_file), will be used instead of DEFAULT_SCOPES\n\n:param key_file: path to key file to use. Default is None\n:type key_file: ``str``\n\n:param scopes: scopes to set.  Default is DEFAUL_SCOPES\n:type scopes: ``list``\n\n:param user_agent: User Agent string to use in requests. Default is None.\n:type http_auth: ``str`` or None\n\n:return: HTTPLib2 authorized client.\n:rtype: :class: `HTTPLib2`", "id": "f10865:m7"}
{"signature": "@gcp_conn('<STR_LIT>')<EOL>def list_firewall_rules(client=None, **kwargs):", "body": "return gce_list(service=client.firewalls(),<EOL>**kwargs)<EOL>", "docstring": ":rtype: ``list``", "id": "f10866:m0"}
{"signature": "def get_object_in_bucket(**kwargs):", "body": "bucket = get_bucket(**kwargs)<EOL>if bucket:<EOL><INDENT>return bucket.get_blob(kwargs['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Retrieve object from Bucket.\n\n:param Bucket: name of bucket\n:type Bucket: ``str``\n\n:returns: object from bucket or None\n:rtype ``object`` or None", "id": "f10868:m4"}
{"signature": "@gcp_conn('<STR_LIT>')<EOL>def get_bucket(client=None, **kwargs):", "body": "bucket = client.lookup_bucket(kwargs['<STR_LIT>'])<EOL>return bucket<EOL>", "docstring": "Get bucket object.\n\n:param client: client object to use.\n:type client: Google Cloud Storage client\n\n:returns: Bucket object\n:rtype: ``object``", "id": "f10868:m1"}
{"signature": "def get_user_agent(**kwargs):", "body": "user_agent = kwargs.get('<STR_LIT>', None)<EOL>if not user_agent:<EOL><INDENT>return get_user_agent_default()<EOL><DEDENT>return user_agent<EOL>", "docstring": "If there is a useragent, find it.\n\nLook in the keywords for user_agent. If not found,\nreturn get_user_agent_default", "id": "f10869:m10"}
{"signature": "def get_gcp_stats():", "body": "from cloudaux.gcp.decorators import _GCP_STATS<EOL>return _GCP_STATS<EOL>", "docstring": "Retrieve stats, such as function timings.", "id": "f10869:m8"}
{"signature": "def service_list(service=None, key_name=None, **kwargs):", "body": "resp_list = []<EOL>req = service.list(**kwargs)<EOL>while req is not None:<EOL><INDENT>resp = req.execute()<EOL>if key_name and key_name in resp:<EOL><INDENT>resp_list.extend(resp[key_name])<EOL><DEDENT>else:<EOL><INDENT>resp_list.append(resp)<EOL><DEDENT>if hasattr(service, '<STR_LIT>'):<EOL><INDENT>req = service.list_next(previous_request=req,<EOL>previous_response=resp)<EOL><DEDENT>else:<EOL><INDENT>req = None<EOL><DEDENT><DEDENT>return resp_list<EOL>", "docstring": "General list function for Google APIs.", "id": "f10869:m5"}
{"signature": "def get_cache_access_details(key=None):", "body": "from cloudaux.gcp.decorators import _GCP_CACHE<EOL>return _GCP_CACHE.get_access_details(key=key)<EOL>", "docstring": "Retrieve detailed cache information.", "id": "f10869:m7"}
{"signature": "def gce_list_aggregated(service=None, key_name='<STR_LIT:name>', **kwargs):", "body": "resp_list = []<EOL>req = service.aggregatedList(**kwargs)<EOL>while req is not None:<EOL><INDENT>resp = req.execute()<EOL>for location, item in resp['<STR_LIT>'].items():<EOL><INDENT>if key_name in item:<EOL><INDENT>resp_list.extend(item[key_name])<EOL><DEDENT><DEDENT>req = service.aggregatedList_next(previous_request=req,<EOL>previous_response=resp)<EOL><DEDENT>return resp_list<EOL>", "docstring": "General aggregated list function for the GCE service.", "id": "f10869:m3"}
{"signature": "def _build_key(func_name, args, kwargs):", "body": "return \"<STR_LIT>\" % (func_name, args, kwargs)<EOL>", "docstring": "Builds key for cache and stats.", "id": "f10870:m0"}
{"signature": "def gcp_stats():", "body": "def decorator(f):<EOL><INDENT>@wraps(f)<EOL>def decorated_function(*args, **kwargs):<EOL><INDENT>start_time = time.time()<EOL>result = f(*args, **kwargs)<EOL>end_time = time.time()<EOL>strkey = _build_key(f.__name__, args, kwargs)<EOL>_GCP_STATS.setdefault(strkey, []).append(end_time - start_time)<EOL>return result<EOL><DEDENT>return decorated_function<EOL><DEDENT>return decorator<EOL>", "docstring": "Collect stats\n\nSpecifically, time function calls\n:returns: function response\n:rtype: varies", "id": "f10870:m2"}
{"signature": "@gcp_conn('<STR_LIT>')<EOL>def get_serviceaccount_keys(client=None, **kwargs):", "body": "service_account=kwargs.pop('<STR_LIT>')<EOL>kwargs['<STR_LIT:name>'] = service_account<EOL>return service_list(client.projects().serviceAccounts().keys(),<EOL>key_name='<STR_LIT>', **kwargs)<EOL>", "docstring": "service_account='string'", "id": "f10871:m2"}
{"signature": "@gcp_conn('<STR_LIT>')<EOL>def get_serviceaccount(client=None, **kwargs):", "body": "service_account=kwargs.pop('<STR_LIT>')<EOL>resp = client.projects().serviceAccounts().get(<EOL>name=service_account).execute()<EOL>return resp<EOL>", "docstring": "service_account='string'", "id": "f10871:m1"}
{"signature": "def write_backreferences(seen_backrefs, gallery_conf,<EOL>target_dir, fname, snippet):", "body": "example_file = os.path.join(target_dir, fname)<EOL>backrefs = scan_used_functions(example_file, gallery_conf)<EOL>for backref in backrefs:<EOL><INDENT>include_path = os.path.join(gallery_conf['<STR_LIT>'],<EOL>'<STR_LIT>' % backref)<EOL>seen = backref in seen_backrefs<EOL>with open(include_path, '<STR_LIT:a>' if seen else '<STR_LIT:w>') as ex_file:<EOL><INDENT>if not seen:<EOL><INDENT>heading = '<STR_LIT>' % backref<EOL>ex_file.write(heading + '<STR_LIT:\\n>')<EOL>ex_file.write('<STR_LIT>' * len(heading) + '<STR_LIT:\\n>')<EOL><DEDENT>ex_file.write(_thumbnail_div(target_dir, fname, snippet,<EOL>is_backref=True))<EOL>seen_backrefs.add(backref)<EOL><DEDENT><DEDENT>", "docstring": "Writes down back reference files, which include a thumbnail list\n    of examples using a certain module", "id": "f10905:m4"}
{"signature": "def get_short_module_name(module_name, obj_name):", "body": "parts = module_name.split('<STR_LIT:.>')<EOL>short_name = module_name<EOL>for i in range(len(parts) - <NUM_LIT:1>, <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>short_name = '<STR_LIT:.>'.join(parts[:i])<EOL>try:<EOL><INDENT>exec('<STR_LIT>' % (short_name, obj_name))<EOL><DEDENT>except ImportError:<EOL><INDENT>short_name = '<STR_LIT:.>'.join(parts[:(i + <NUM_LIT:1>)])<EOL>break<EOL><DEDENT><DEDENT>return short_name<EOL>", "docstring": "Get the shortest possible module name", "id": "f10905:m0"}
{"signature": "def save_figures(image_path, fig_count, gallery_conf):", "body": "figure_list = []<EOL>fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()<EOL>for fig_mngr in fig_managers:<EOL><INDENT>fig = plt.figure(fig_mngr.num)<EOL>kwargs = {}<EOL>to_rgba = matplotlib.colors.colorConverter.to_rgba<EOL>for attr in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>fig_attr = getattr(fig, '<STR_LIT>' + attr)()<EOL>default_attr = matplotlib.rcParams['<STR_LIT>' + attr]<EOL>if to_rgba(fig_attr) != to_rgba(default_attr):<EOL><INDENT>kwargs[attr] = fig_attr<EOL><DEDENT><DEDENT>current_fig = image_path.format(fig_count + fig_mngr.num)<EOL>fig.savefig(current_fig, **kwargs)<EOL>figure_list.append(current_fig)<EOL><DEDENT>if gallery_conf.get('<STR_LIT>', False):<EOL><INDENT>from mayavi import mlab<EOL>e = mlab.get_engine()<EOL>last_matplotlib_fig_num = len(figure_list)<EOL>total_fig_num = last_matplotlib_fig_num + len(e.scenes)<EOL>mayavi_fig_nums = range(last_matplotlib_fig_num, total_fig_num)<EOL>for scene, mayavi_fig_num in zip(e.scenes, mayavi_fig_nums):<EOL><INDENT>current_fig = image_path.format(mayavi_fig_num)<EOL>mlab.savefig(current_fig, figure=scene)<EOL>scale_image(current_fig, current_fig, <NUM_LIT>, <NUM_LIT>)<EOL>figure_list.append(current_fig)<EOL><DEDENT>mlab.close(all=True)<EOL><DEDENT>return figure_list<EOL>", "docstring": "Save all open matplotlib figures of the example code-block\n\n    Parameters\n    ----------\n    image_path : str\n        Path where plots are saved (format string which accepts figure number)\n    fig_count : int\n        Previous figure number count. Figure number add from this number\n\n    Returns\n    -------\n    list of strings containing the full path to each figure", "id": "f10906:m8"}
{"signature": "def get_docstring_and_rest(filename):", "body": "with open(filename) as f:<EOL><INDENT>content = f.read()<EOL><DEDENT>node = ast.parse(content)<EOL>if not isinstance(node, ast.Module):<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(node.__class__.__name__))<EOL><DEDENT>if node.body and isinstance(node.body[<NUM_LIT:0>], ast.Expr) andisinstance(node.body[<NUM_LIT:0>].value, ast.Str):<EOL><INDENT>docstring_node = node.body[<NUM_LIT:0>]<EOL>docstring = docstring_node.value.s<EOL>rest = content.split('<STR_LIT:\\n>', docstring_node.lineno)[-<NUM_LIT:1>]<EOL>return docstring, rest<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>.format(filename))<EOL><DEDENT>", "docstring": "Separate `filename` content between docstring and the rest\n\n    Strongly inspired from ast.get_docstring.\n\n    Returns\n    -------\n    docstring: str\n        docstring of `filename`\n    rest: str\n        `filename` content without the docstring", "id": "f10906:m0"}
{"signature": "def generate_file_rst(fname, target_dir, src_dir, gallery_conf):", "body": "src_file = os.path.join(src_dir, fname)<EOL>example_file = os.path.join(target_dir, fname)<EOL>shutil.copyfile(src_file, example_file)<EOL>image_dir = os.path.join(target_dir, '<STR_LIT>')<EOL>if not os.path.exists(image_dir):<EOL><INDENT>os.makedirs(image_dir)<EOL><DEDENT>base_image_name = os.path.splitext(fname)[<NUM_LIT:0>]<EOL>image_fname = '<STR_LIT>' + base_image_name + '<STR_LIT>'<EOL>image_path = os.path.join(image_dir, image_fname)<EOL>script_blocks = split_code_and_text_blocks(example_file)<EOL>amount_of_code = sum([len(bcontent)<EOL>for blabel, bcontent in script_blocks<EOL>if blabel == '<STR_LIT:code>'])<EOL>if _plots_are_current(example_file, image_path):<EOL><INDENT>return amount_of_code<EOL><DEDENT>time_elapsed = <NUM_LIT:0><EOL>ref_fname = example_file.replace(os.path.sep, '<STR_LIT:_>')<EOL>example_rst = \"\"\"<STR_LIT>\"\"\".format(ref_fname)<EOL>example_nb = Notebook(fname, target_dir)<EOL>filename_pattern = gallery_conf.get('<STR_LIT>')<EOL>if re.search(filename_pattern, src_file) and gallery_conf['<STR_LIT>']:<EOL><INDENT>example_globals = {'<STR_LIT>': '<STR_LIT>'}<EOL>fig_count = <NUM_LIT:0><EOL>is_example_notebook_like = len(script_blocks) > <NUM_LIT:2><EOL>for blabel, bcontent in script_blocks:<EOL><INDENT>if blabel == '<STR_LIT:code>':<EOL><INDENT>code_output, rtime, fig_count = execute_script(bcontent,<EOL>example_globals,<EOL>image_path,<EOL>fig_count,<EOL>src_file,<EOL>gallery_conf)<EOL>time_elapsed += rtime<EOL>example_nb.add_code_cell(bcontent)<EOL>if is_example_notebook_like:<EOL><INDENT>example_rst += codestr2rst(bcontent) + '<STR_LIT:\\n>'<EOL>example_rst += code_output<EOL><DEDENT>else:<EOL><INDENT>example_rst += code_output<EOL>example_rst += codestr2rst(bcontent) + '<STR_LIT:\\n>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>example_rst += text2string(bcontent) + '<STR_LIT:\\n>'<EOL>example_nb.add_markdown_cell(text2string(bcontent))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for blabel, bcontent in script_blocks:<EOL><INDENT>if blabel == '<STR_LIT:code>':<EOL><INDENT>example_rst += codestr2rst(bcontent) + '<STR_LIT:\\n>'<EOL>example_nb.add_code_cell(bcontent)<EOL><DEDENT>else:<EOL><INDENT>example_rst += bcontent + '<STR_LIT:\\n>'<EOL>example_nb.add_markdown_cell(text2string(bcontent))<EOL><DEDENT><DEDENT><DEDENT>save_thumbnail(image_path, base_image_name, gallery_conf)<EOL>time_m, time_s = divmod(time_elapsed, <NUM_LIT>)<EOL>example_nb.save_file()<EOL>with open(os.path.join(target_dir, base_image_name + '<STR_LIT>'), '<STR_LIT:w>') as f:<EOL><INDENT>example_rst += CODE_DOWNLOAD.format(time_m, time_s, fname,<EOL>example_nb.file_name)<EOL>f.write(example_rst)<EOL><DEDENT>return amount_of_code<EOL>", "docstring": "Generate the rst file for a given example.\n\n        Returns the amout of code (in characters) of the corresponding\n        files.", "id": "f10906:m13"}
{"signature": "def get_md5sum(src_file):", "body": "with open(src_file, '<STR_LIT:r>') as src_data:<EOL><INDENT>src_content = src_data.read()<EOL>if sys.version_info[<NUM_LIT:0>] == <NUM_LIT:3>:<EOL><INDENT>src_content = src_content.encode('<STR_LIT:utf-8>')<EOL><DEDENT>src_md5 = hashlib.md5(src_content).hexdigest()<EOL><DEDENT>return src_md5<EOL>", "docstring": "Returns md5sum of file", "id": "f10906:m5"}
{"signature": "def setup(app):", "body": "app.add_config_value('<STR_LIT>', True, '<STR_LIT:html>')<EOL>app.add_config_value('<STR_LIT>', False, '<STR_LIT:html>')<EOL>app.add_config_value('<STR_LIT>', gallery_conf, '<STR_LIT:html>')<EOL>app.add_stylesheet('<STR_LIT>')<EOL>app.connect('<STR_LIT>', generate_gallery_rst)<EOL>app.connect('<STR_LIT>', embed_code_links)<EOL>", "docstring": "Setup sphinx-gallery sphinx extension", "id": "f10908:m2"}
{"signature": "def generate_gallery_rst(app):", "body": "try:<EOL><INDENT>plot_gallery = eval(app.builder.config.plot_gallery)<EOL><DEDENT>except TypeError:<EOL><INDENT>plot_gallery = bool(app.builder.config.plot_gallery)<EOL><DEDENT>gallery_conf.update(app.config.sphinx_gallery_conf)<EOL>gallery_conf.update(plot_gallery=plot_gallery)<EOL>gallery_conf.update(abort_on_example_error=app.builder.config.abort_on_example_error)<EOL>app.config.sphinx_gallery_conf = gallery_conf<EOL>app.config.html_static_path.append(glr_path_static())<EOL>clean_gallery_out(app.builder.outdir)<EOL>examples_dirs = gallery_conf['<STR_LIT>']<EOL>gallery_dirs = gallery_conf['<STR_LIT>']<EOL>if not isinstance(examples_dirs, list):<EOL><INDENT>examples_dirs = [examples_dirs]<EOL><DEDENT>if not isinstance(gallery_dirs, list):<EOL><INDENT>gallery_dirs = [gallery_dirs]<EOL><DEDENT>mod_examples_dir = os.path.relpath(gallery_conf['<STR_LIT>'],<EOL>app.builder.srcdir)<EOL>seen_backrefs = set()<EOL>for examples_dir, gallery_dir in zip(examples_dirs, gallery_dirs):<EOL><INDENT>examples_dir = os.path.relpath(examples_dir,<EOL>app.builder.srcdir)<EOL>gallery_dir = os.path.relpath(gallery_dir,<EOL>app.builder.srcdir)<EOL>for workdir in [examples_dir, gallery_dir, mod_examples_dir]:<EOL><INDENT>if not os.path.exists(workdir):<EOL><INDENT>os.makedirs(workdir)<EOL><DEDENT><DEDENT>fhindex = open(os.path.join(gallery_dir, '<STR_LIT>'), '<STR_LIT:w>')<EOL>fhindex.write(generate_dir_rst(examples_dir, gallery_dir, gallery_conf,<EOL>seen_backrefs))<EOL>for directory in sorted(os.listdir(examples_dir)):<EOL><INDENT>if os.path.isdir(os.path.join(examples_dir, directory)):<EOL><INDENT>src_dir = os.path.join(examples_dir, directory)<EOL>target_dir = os.path.join(gallery_dir, directory)<EOL>fhindex.write(generate_dir_rst(src_dir, target_dir,<EOL>gallery_conf,<EOL>seen_backrefs))<EOL><DEDENT><DEDENT>fhindex.flush()<EOL><DEDENT>", "docstring": "Generate the Main examples gallery reStructuredText\n\n    Start the sphinx-gallery configuration and recursively scan the examples\n    directories in order to populate the examples gallery", "id": "f10908:m1"}
{"signature": "def add_code_cell(self, code):", "body": "code_cell = {<EOL>\"<STR_LIT>\": \"<STR_LIT:code>\",<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": {\"<STR_LIT>\": False},<EOL>\"<STR_LIT>\": [],<EOL>\"<STR_LIT:source>\": [code.strip()]<EOL>}<EOL>self.work_notebook[\"<STR_LIT>\"].append(code_cell)<EOL>", "docstring": "Add a code cell to the notebook\n\n        Parameters\n        ----------\n        code : str\n            Cell content", "id": "f10909:c0:m1"}
{"signature": "def _get_link(self, cobj):", "body": "fname_idx = None<EOL>full_name = cobj['<STR_LIT>'] + '<STR_LIT:.>' + cobj['<STR_LIT:name>']<EOL>if full_name in self._searchindex['<STR_LIT>']:<EOL><INDENT>value = self._searchindex['<STR_LIT>'][full_name]<EOL>if isinstance(value, dict):<EOL><INDENT>value = value[next(iter(value.keys()))]<EOL><DEDENT>fname_idx = value[<NUM_LIT:0>]<EOL><DEDENT>elif cobj['<STR_LIT>'] in self._searchindex['<STR_LIT>']:<EOL><INDENT>value = self._searchindex['<STR_LIT>'][cobj['<STR_LIT>']]<EOL>if cobj['<STR_LIT:name>'] in value.keys():<EOL><INDENT>fname_idx = value[cobj['<STR_LIT:name>']][<NUM_LIT:0>]<EOL><DEDENT><DEDENT>if fname_idx is not None:<EOL><INDENT>fname = self._searchindex['<STR_LIT>'][fname_idx] + '<STR_LIT>'<EOL>if self._is_windows:<EOL><INDENT>fname = fname.replace('<STR_LIT:/>', '<STR_LIT:\\\\>')<EOL>link = os.path.join(self.doc_url, fname)<EOL><DEDENT>else:<EOL><INDENT>link = posixpath.join(self.doc_url, fname)<EOL><DEDENT>if hasattr(link, '<STR_LIT>'):<EOL><INDENT>link = link.decode('<STR_LIT:utf-8>', '<STR_LIT:replace>')<EOL><DEDENT>if link in self._page_cache:<EOL><INDENT>html = self._page_cache[link]<EOL><DEDENT>else:<EOL><INDENT>html = get_data(link, self.gallery_dir)<EOL>self._page_cache[link] = html<EOL><DEDENT>comb_names = [cobj['<STR_LIT>'] + '<STR_LIT:.>' + cobj['<STR_LIT:name>']]<EOL>if self.extra_modules_test is not None:<EOL><INDENT>for mod in self.extra_modules_test:<EOL><INDENT>comb_names.append(mod + '<STR_LIT:.>' + cobj['<STR_LIT:name>'])<EOL><DEDENT><DEDENT>url = False<EOL>if hasattr(html, '<STR_LIT>'):<EOL><INDENT>html = html.decode('<STR_LIT:utf-8>', '<STR_LIT:replace>')<EOL><DEDENT>for comb_name in comb_names:<EOL><INDENT>if hasattr(comb_name, '<STR_LIT>'):<EOL><INDENT>comb_name = comb_name.decode('<STR_LIT:utf-8>', '<STR_LIT:replace>')<EOL><DEDENT>if comb_name in html:<EOL><INDENT>url = link + u'<STR_LIT:#>' + comb_name<EOL><DEDENT><DEDENT>link = url<EOL><DEDENT>else:<EOL><INDENT>link = False<EOL><DEDENT>return link<EOL>", "docstring": "Get a valid link, False if not found", "id": "f10910:c0:m1"}
{"signature": "def _get_data(url):", "body": "if url.startswith('<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>resp = urllib.urlopen(url)<EOL>encoding = resp.headers.dict.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>except AttributeError:<EOL><INDENT>resp = urllib.request.urlopen(url)<EOL>encoding = resp.headers.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>data = resp.read()<EOL>if encoding == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>elif encoding == '<STR_LIT>':<EOL><INDENT>data = StringIO(data)<EOL>data = gzip.GzipFile(fileobj=data).read()<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with open(url, '<STR_LIT:r>') as fid:<EOL><INDENT>data = fid.read()<EOL><DEDENT><DEDENT>return data<EOL>", "docstring": "Helper function to get data over http or from a local file", "id": "f10910:m0"}
{"signature": "def resolve(self, cobj, this_url):", "body": "full_name = cobj['<STR_LIT>'] + '<STR_LIT:.>' + cobj['<STR_LIT:name>']<EOL>link = self._link_cache.get(full_name, None)<EOL>if link is None:<EOL><INDENT>link = self._get_link(cobj)<EOL>self._link_cache[full_name] = link<EOL><DEDENT>if link is False or link is None:<EOL><INDENT>return None<EOL><DEDENT>if self.relative:<EOL><INDENT>link = os.path.relpath(link, start=this_url)<EOL>if self._is_windows:<EOL><INDENT>link = link.replace('<STR_LIT:\\\\>', '<STR_LIT:/>')<EOL><DEDENT>link = link[<NUM_LIT:3>:]<EOL><DEDENT>return link<EOL>", "docstring": "Resolve the link to the documentation, returns None if not found\n\n        Parameters\n        ----------\n        cobj : dict\n            Dict with information about the \"code object\" for which we are\n            resolving a link.\n            cobi['name'] : function or class name (str)\n            cobj['module_short'] : shortened module name (str)\n            cobj['module'] : module name (str)\n        this_url: str\n            URL of the current page. Needed to construct relative URLs\n            (only used if relative=True in constructor).\n\n        Returns\n        -------\n        link : str | None\n            The link (URL) to the documentation.", "id": "f10910:c0:m2"}
{"signature": "def parse_sphinx_searchindex(searchindex):", "body": "<EOL>if hasattr(searchindex, '<STR_LIT>'):<EOL><INDENT>searchindex = searchindex.decode('<STR_LIT>')<EOL><DEDENT>query = '<STR_LIT>'<EOL>pos = searchindex.find(query)<EOL>if pos < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>sel = _select_block(searchindex[pos:], '<STR_LIT:{>', '<STR_LIT:}>')<EOL>objects = _parse_dict_recursive(sel)<EOL>query = '<STR_LIT>'<EOL>pos = searchindex.find(query)<EOL>if pos < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>filenames = searchindex[pos + len(query) + <NUM_LIT:1>:]<EOL>filenames = filenames[:filenames.find('<STR_LIT:]>')]<EOL>filenames = [f.strip('<STR_LIT:\">') for f in filenames.split('<STR_LIT:U+002C>')]<EOL>return filenames, objects<EOL>", "docstring": "Parse a Sphinx search index\n\n    Parameters\n    ----------\n    searchindex : str\n        The Sphinx search index (contents of searchindex.js)\n\n    Returns\n    -------\n    filenames : list of str\n        The file names parsed from the search index.\n    objects : dict\n        The objects parsed from the search index.", "id": "f10910:m4"}
{"signature": "def corrmtx(x_input, m, method='<STR_LIT>'):", "body": "valid_methods = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']<EOL>if method not in valid_methods:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % valid_methods)<EOL><DEDENT>from scipy.linalg import toeplitz<EOL>N = len(x_input)<EOL>if isinstance(x_input, list):<EOL><INDENT>x = numpy.array(x_input)<EOL><DEDENT>else:<EOL><INDENT>x = x_input.copy()<EOL><DEDENT>if x.dtype == complex:<EOL><INDENT>complex_type = True<EOL><DEDENT>else:<EOL><INDENT>complex_type = False<EOL><DEDENT>if method in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>Lp = toeplitz(x[<NUM_LIT:0>:m], [<NUM_LIT:0>]*(m+<NUM_LIT:1>))<EOL><DEDENT>Tp = toeplitz(x[m:N], x[m::-<NUM_LIT:1>])<EOL>if method in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>Up = toeplitz([<NUM_LIT:0>]*(m+<NUM_LIT:1>), numpy.insert(x[N:N-m-<NUM_LIT:1>:-<NUM_LIT:1>],<NUM_LIT:0>,<NUM_LIT:0>))<EOL><DEDENT>if method == '<STR_LIT>':<EOL><INDENT>if complex_type == True:<EOL><INDENT>C = numpy.zeros((N+m, m+<NUM_LIT:1>), dtype=complex)<EOL><DEDENT>else:<EOL><INDENT>C = numpy.zeros((N+m, m+<NUM_LIT:1>))<EOL><DEDENT>for i in range(<NUM_LIT:0>, m):<EOL><INDENT>C[i] = Lp[i]<EOL><DEDENT>for i in range(m, N):<EOL><INDENT>C[i] = Tp[i-m]<EOL><DEDENT>for i in range(N, N+m):<EOL><INDENT>C[i] = Up[i-N]<EOL><DEDENT><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>if complex_type == True:<EOL><INDENT>C = numpy.zeros((N, m+<NUM_LIT:1>), dtype=complex)<EOL><DEDENT>else:<EOL><INDENT>C = numpy.zeros((N, m+<NUM_LIT:1>))<EOL><DEDENT>for i in range(<NUM_LIT:0>, m):<EOL><INDENT>C[i] = Lp[i]<EOL><DEDENT>for i in range(m, N):<EOL><INDENT>C[i] = Tp[i-m]<EOL><DEDENT><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>if complex_type == True:<EOL><INDENT>C = numpy.zeros((N, m+<NUM_LIT:1>), dtype=complex)<EOL><DEDENT>else:<EOL><INDENT>C = numpy.zeros((N, m+<NUM_LIT:1>))<EOL><DEDENT>for i in range(<NUM_LIT:0>, N-m):<EOL><INDENT>C[i] = Tp[i]<EOL><DEDENT>for i in range(N-m, N):<EOL><INDENT>C[i] = Up[i-N+m]<EOL><DEDENT><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>return Tp<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>if complex_type == True:<EOL><INDENT>C = numpy.zeros((<NUM_LIT:2>*(N-m), m+<NUM_LIT:1>), dtype=complex)<EOL><DEDENT>else:<EOL><INDENT>C = numpy.zeros((<NUM_LIT:2>*(N-m), m+<NUM_LIT:1>))<EOL><DEDENT>for i in range(<NUM_LIT:0>, N-m):<EOL><INDENT>C[i] = Tp[i]<EOL><DEDENT>Tp = numpy.fliplr(Tp.conj())<EOL>for i in range(N-m, <NUM_LIT:2>*(N-m)):<EOL><INDENT>C[i] = Tp[i-N+m]<EOL><DEDENT><DEDENT>return C<EOL>", "docstring": "r\"\"\"Correlation matrix\n\n    This function is used by PSD estimator functions. It generates\n    the correlation matrix from a correlation data set and a maximum lag.\n\n    :param array x: autocorrelation samples (1D)\n    :param int m: the maximum lag\n\n    Depending on the choice of the method, the correlation matrix has different\n    sizes, but the number of rows is always m+1.\n\n    Method can be :\n\n    * 'autocorrelation': (default) X is the (n+m)-by-(m+1) rectangular Toeplitz\n      matrix derived using prewindowed and postwindowed data.\n    * 'prewindowed': X is the n-by-(m+1) rectangular Toeplitz matrix derived\n      using prewindowed data only.\n    * 'postwindowed': X is the n-by-(m+1) rectangular Toeplitz matrix that\n      derived using postwindowed data only.\n    * 'covariance': X is the (n-m)-by-(m+1) rectangular Toeplitz matrix\n      derived using nonwindowed data.\n    * 'modified': X is the 2(n-m)-by-(m+1) modified rectangular Toeplitz\n      matrix that generates an autocorrelation estimate for the length n data\n      vector x, derived using forward and backward prediction error estimates.\n\n\n    :return:\n        * the autocorrelation matrix\n        * R, the (m+1)-by-(m+1) autocorrelation matrix estimate ``R= X'*X``.\n\n    .. rubric:: Algorithm details:\n\n    The **autocorrelation** matrix is a :math:`(N+p) \\times (p+1)` rectangular Toeplilz\n    data matrix:\n\n    .. math:: X_p = \\begin{pmatrix}L_p\\\\T_p\\\\Up\\end{pmatrix}\n\n    where the lower triangular :math:`p \\times (p+1)` matrix :math:`L_p` is\n\n    .. math:: L_p =\n        \\begin{pmatrix}\n        x[1]     &  \\cdots     & 0     & 0        \\\\\n        \\vdots    &  \\ddots     & \\vdots & \\vdots    \\\\\n        x[p]     &  \\cdots     & x[1]  & 0\n        \\end{pmatrix}\n\n    where the rectangular :math:`(N-p) \\times (p+1)` matrix :math:`T_p` is\n\n    .. math:: T_p =\n        \\begin{pmatrix}\n        x[p+1]     &  \\cdots    & x[1]        \\\\\n        \\vdots    &  \\ddots     & \\vdots    \\\\\n        x[N-p]     &  \\cdots    & x[p+1]        \\\\\n        \\vdots    &  \\ddots     & \\vdots    \\\\\n        x[N]     &  \\cdots      & x[N-p]\n        \\end{pmatrix}\n\n    and where the upper triangular :math:`p \\times (p+1)` matrix :math:`U_p` is\n\n    .. math:: U_p =\n        \\begin{pmatrix}\n        0         &  x[N]      & \\cdots     & x[N-p+1]        \\\\\n        \\vdots    &  \\vdots    & \\ddots & \\vdots    \\\\\n        0         &  0         & \\cdots  & x[N]\n        \\end{pmatrix}\n\n    From this definition, the prewindowed matrix is\n\n    .. math:: X_p = \\begin{pmatrix}L_p\\\\T_p\\end{pmatrix}\n\n    the postwindowed matrix is\n\n    .. math:: X_p = \\begin{pmatrix}T_p\\\\U_p\\end{pmatrix}\n\n    the covariance matrix is:\n\n    .. math:: X_p = \\begin{pmatrix}T_p\\end{pmatrix}\n\n    and the modified covariance matrix is:\n\n    .. math:: X_p = \\begin{pmatrix}T_p\\\\T_p^*\\end{pmatrix}", "id": "f10916:m1"}
{"signature": "def CORRELATION(x, y=None, maxlags=None, norm='<STR_LIT>'):", "body": "assert norm in ['<STR_LIT>','<STR_LIT>', '<STR_LIT>', None]<EOL>x = np.array(x)<EOL>if y is None:<EOL><INDENT>y = x<EOL><DEDENT>else:<EOL><INDENT>y = np.array(y)<EOL><DEDENT>N = max(len(x), len(y))<EOL>if len(x) < N:<EOL><INDENT>x = y.copy()<EOL>x.resize(N)<EOL><DEDENT>if len(y) < N:<EOL><INDENT>y = y.copy()<EOL>y.resize(N)<EOL><DEDENT>if maxlags is None:<EOL><INDENT>maxlags = N - <NUM_LIT:1><EOL><DEDENT>assert maxlags < N, '<STR_LIT>'<EOL>realdata = np.isrealobj(x) and np.isrealobj(y)<EOL>if realdata == True:<EOL><INDENT>r = np.zeros(maxlags, dtype=float)<EOL><DEDENT>else:<EOL><INDENT>r = np.zeros(maxlags, dtype=complex)<EOL><DEDENT>if norm == '<STR_LIT>':<EOL><INDENT>rmsx = pylab_rms_flat(x)<EOL>rmsy = pylab_rms_flat(y)<EOL><DEDENT>for k in range(<NUM_LIT:0>, maxlags+<NUM_LIT:1>):<EOL><INDENT>nk = N - k - <NUM_LIT:1><EOL>if realdata == True:<EOL><INDENT>sum = <NUM_LIT:0><EOL>for j in range(<NUM_LIT:0>, nk+<NUM_LIT:1>):<EOL><INDENT>sum = sum + x[j+k] * y[j]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sum = <NUM_LIT:0.> + <NUM_LIT><EOL>for j in range(<NUM_LIT:0>, nk+<NUM_LIT:1>):<EOL><INDENT>sum = sum + x[j+k] * y[j].conjugate()<EOL><DEDENT><DEDENT>if k == <NUM_LIT:0>:<EOL><INDENT>if norm in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>r0 = sum/float(N)<EOL><DEDENT>elif norm is None:<EOL><INDENT>r0 = sum<EOL><DEDENT>else:<EOL><INDENT>r0 =  <NUM_LIT:1.><EOL><DEDENT><DEDENT>else:<EOL><INDENT>if norm == '<STR_LIT>':<EOL><INDENT>r[k-<NUM_LIT:1>] = sum / float(N-k)<EOL><DEDENT>elif norm == '<STR_LIT>':<EOL><INDENT>r[k-<NUM_LIT:1>] = sum / float(N)<EOL><DEDENT>elif norm is None:<EOL><INDENT>r[k-<NUM_LIT:1>] = sum<EOL><DEDENT>elif norm == '<STR_LIT>':<EOL><INDENT>r[k-<NUM_LIT:1>] =  sum/(rmsx*rmsy)/float(N)<EOL><DEDENT><DEDENT><DEDENT>r = np.insert(r, <NUM_LIT:0>, r0)<EOL>return r<EOL>", "docstring": "r\"\"\"Correlation function\n\n    This function should give the same results as :func:`xcorr` but it\n    returns the positive lags only. Moreover the algorithm does not use\n    FFT as compared to other algorithms.\n\n    :param array x: first data array of length N\n    :param array y: second data array of length N. If not specified, computes the\n        autocorrelation.\n    :param int maxlags: compute cross correlation between [0:maxlags]\n        when maxlags is not specified, the range of lags is [0:maxlags].\n    :param str norm: normalisation in ['biased', 'unbiased', None, 'coeff']\n\n        * *biased*   correlation=raw/N,\n        * *unbiased* correlation=raw/(N-`|lag|`)\n        * *coeff*    correlation=raw/(rms(x).rms(y))/N\n        * None       correlation=raw\n\n    :return:\n        * a numpy.array correlation sequence,  r[1,N]\n        * a float for the zero-lag correlation,  r[0]\n\n    The *unbiased* correlation has the form:\n\n    .. math::\n\n        \\hat{r}_{xx} = \\frac{1}{N-m}T \\sum_{n=0}^{N-m-1} x[n+m]x^*[n] T\n\n    The *biased* correlation differs by the front factor only:\n\n    .. math::\n\n        \\check{r}_{xx} = \\frac{1}{N}T \\sum_{n=0}^{N-m-1} x[n+m]x^*[n] T\n\n    with :math:`0\\leq m\\leq N-1`.\n\n    .. doctest::\n\n        >>> from spectrum import CORRELATION\n        >>> x = [1,2,3,4,5]\n        >>> res = CORRELATION(x,x, maxlags=0, norm='biased')\n        >>> res[0]\n        11.0\n\n    .. note:: this function should be replaced by :func:`xcorr`.\n\n    .. seealso:: :func:`xcorr`", "id": "f10917:m1"}
{"signature": "def xcorr(x, y=None, maxlags=None, norm='<STR_LIT>'):", "body": "N = len(x)<EOL>if y is None:<EOL><INDENT>y = x<EOL><DEDENT>assert len(x) == len(y), '<STR_LIT>'<EOL>if maxlags is None:<EOL><INDENT>maxlags = N-<NUM_LIT:1><EOL>lags = np.arange(<NUM_LIT:0>, <NUM_LIT:2>*N-<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>assert maxlags <= N, '<STR_LIT>'<EOL>lags = np.arange(N-maxlags-<NUM_LIT:1>, N+maxlags)<EOL><DEDENT>res = np.correlate(x, y, mode='<STR_LIT>')<EOL>if norm == '<STR_LIT>':<EOL><INDENT>Nf = float(N)<EOL>res = res[lags] / float(N)    <EOL><DEDENT>elif norm == '<STR_LIT>':<EOL><INDENT>res = res[lags] / (float(N)-abs(np.arange(-N+<NUM_LIT:1>, N)))[lags]<EOL><DEDENT>elif norm == '<STR_LIT>':<EOL><INDENT>Nf = float(N)<EOL>rms = pylab_rms_flat(x) * pylab_rms_flat(y)<EOL>res = res[lags] / rms / Nf<EOL><DEDENT>else:<EOL><INDENT>res = res[lags]<EOL><DEDENT>lags = np.arange(-maxlags, maxlags+<NUM_LIT:1>)<EOL>return res, lags<EOL>", "docstring": "Cross-correlation using numpy.correlate\n\n    Estimates the cross-correlation (and autocorrelation) sequence of a random\n    process of length N. By default, there is no normalisation and the output\n    sequence of the cross-correlation has a length 2*N+1.\n\n    :param array x: first data array of length N\n    :param array y: second data array of length N. If not specified, computes the\n        autocorrelation.\n    :param int maxlags: compute cross correlation between [-maxlags:maxlags]\n        when maxlags is not specified, the range of lags is [-N+1:N-1].\n    :param str option: normalisation in ['biased', 'unbiased', None, 'coeff']\n\n    The true cross-correlation sequence is\n\n    .. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])\n\n    However, in practice, only a finite segment of one realization of the\n    infinite-length random process is available.\n\n    The correlation is estimated using numpy.correlate(x,y,'full').\n    Normalisation is handled by this function using the following cases:\n\n        * 'biased': Biased estimate of the cross-correlation function\n        * 'unbiased': Unbiased estimate of the cross-correlation function\n        * 'coeff': Normalizes the sequence so the autocorrelations at zero\n           lag is 1.0.\n\n    :return:\n        * a numpy.array containing the cross-correlation sequence (length 2*N-1)\n        * lags vector\n\n    .. note:: If x and y are not the same length, the shorter vector is\n        zero-padded to the length of the longer vector.\n\n    .. rubric:: Examples\n\n    .. doctest::\n\n        >>> from spectrum import xcorr\n        >>> x = [1,2,3,4,5]\n        >>> c, l = xcorr(x,x, maxlags=0, norm='biased')\n        >>> c\n        array([ 11.])\n\n    .. seealso:: :func:`CORRELATION`.", "id": "f10917:m2"}
{"signature": "def mexican(lb, ub, n):", "body": "if n <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>x = numpy.linspace(lb, ub, n)<EOL>psi = (<NUM_LIT:1.>-x**<NUM_LIT>) * (<NUM_LIT>/(numpy.sqrt(<NUM_LIT>)*pi**<NUM_LIT>)) * numpy.exp(-x**<NUM_LIT:2>/<NUM_LIT>)<EOL>return psi<EOL>", "docstring": "r\"\"\"Generate the mexican hat wavelet\n\n    The Mexican wavelet is:\n\n    .. math:: w[x] = \\cos{5x}  \\exp^{-x^2/2}\n\n    :param lb: lower bound\n    :param ub: upper bound\n    :param int n: waveform data samples\n    :return: the waveform\n\n    .. plot::\n        :include-source:\n        :width: 80%\n\n        from spectrum import mexican\n        from pylab import plot\n        plot(mexican(0, 10, 100))", "id": "f10920:m2"}
{"signature": "def morlet(lb, ub, n):", "body": "if n <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>x = numpy.linspace(lb, ub, n)<EOL>psi = numpy.cos(<NUM_LIT:5>*x) * numpy.exp(-x**<NUM_LIT:2>/<NUM_LIT>)<EOL>return psi<EOL>", "docstring": "r\"\"\"Generate the Morlet waveform\n\n\n    The Morlet waveform is defined as follows:\n\n    .. math:: w[x] = \\cos{5x}  \\exp^{-x^2/2}\n\n    :param lb: lower bound\n    :param ub: upper bound\n    :param int n: waveform data samples\n\n\n    .. plot::\n        :include-source:\n        :width: 80%\n\n        from spectrum import morlet\n        from pylab import plot\n        plot(morlet(0,10,100))", "id": "f10920:m0"}
{"signature": "def eqtflength(b,a):", "body": "d = abs(len(b)-len(a))<EOL>if d != <NUM_LIT:0>:<EOL><INDENT>if len(a) > len(b):<EOL><INDENT>try:<EOL><INDENT>b.extend([<NUM_LIT:0.>]*d)<EOL><DEDENT>except:<EOL><INDENT>b = np.append(b, [<NUM_LIT:0>]*d)<EOL><DEDENT><DEDENT>elif len(b)>len(a):<EOL><INDENT>try:<EOL><INDENT>a.extend([<NUM_LIT:0.>]*d)<EOL><DEDENT>except:<EOL><INDENT>a = np.append(a, [<NUM_LIT:0>]*d)<EOL><DEDENT><DEDENT>return b,a<EOL><DEDENT>else:<EOL><INDENT>return b,a<EOL><DEDENT>", "docstring": "Given two list or arrays, pad with zeros the shortest array\n\n    :param b: list or array\n    :param a: list or array\n\n\n    .. doctest::\n\n        >>> from spectrum.transfer import eqtflength\n        >>> a = [1,2]\n        >>> b = [1,2,3,4]\n        >>> a, b, = eqtflength(a,b)", "id": "f10921:m2"}
{"signature": "def zpk2tf(z, p, k):", "body": "import scipy.signal<EOL>b, a = scipy.signal.zpk2tf(z, p, k)<EOL>return b, a<EOL>", "docstring": "r\"\"\"Return polynomial transfer function representation from zeros and poles\n\n    :param ndarray z: Zeros of the transfer function.\n    :param ndarray p: Poles of the transfer function.\n    :param float k: System gain.\n\n    :return:\n        b : ndarray Numerator polynomial.\n        a : ndarray Numerator and denominator polynomials.\n\n    :func:`zpk2tf` forms transfer function polynomials from the zeros, poles, and gains\n    of a system in factored form.\n\n    zpk2tf(z,p,k) finds a rational transfer function\n\n    .. math:: \\frac{B(s)}{A(s)} = \\frac{b_1 s^{n-1}+\\dots b_{n-1}s+b_n}{a_1 s^{m-1}+\\dots a_{m-1}s+a_m}\n\n    given a system in factored transfer function form\n\n    .. math:: H(s) = \\frac{Z(s)}{P(s)} = k \\frac{(s-z_1)(s-z_2)\\dots(s-z_m)}{(s-p_1)(s-p_2)\\dots(s-p_n)}\n\n\n    with p being the pole locations, and z the zero locations, with as many.\n    The gains for each numerator transfer function are in vector k.\n    The zeros and poles must be real or come in complex conjugate pairs.\n    The polynomial denominator coefficients are returned in row vector a and\n    the polynomial numerator coefficients are returned in matrix b, which has\n    as many rows as there are columns of z.\n\n    Inf values can be used as place holders in z if some columns have fewer zeros than others.\n\n    .. note:: wrapper of scipy function zpk2tf", "id": "f10921:m9"}
{"signature": "def window_bartlett(N):", "body": "from numpy import bartlett<EOL>return bartlett(N)<EOL>", "docstring": "r\"\"\"Bartlett window (wrapping of numpy.bartlett) also known as Fejer\n\n    :param int N: window length\n\n    The Bartlett window is defined as\n\n    .. math:: w(n) = \\frac{2}{N-1} \\left(\n              \\frac{N-1}{2} - \\left|n - \\frac{N-1}{2}\\right|\n              \\right)\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'bartlett')\n\n    .. seealso:: numpy.bartlett, :func:`create_window`, :class:`Window`.", "id": "f10924:m7"}
{"signature": "def window_hann(N):", "body": "from numpy import hanning<EOL>return hanning(N)<EOL>", "docstring": "r\"\"\"Hann window (or Hanning). (wrapping of numpy.bartlett)\n\n    :param int N: window length\n\n    The Hanning window is also known as the Cosine Bell. Usually, it is called\n    Hann window, to avoid confusion with the Hamming window.\n\n    .. math:: w(n) =  0.5\\left(1- \\cos\\left(\\frac{2\\pi n}{N-1}\\right)\\right)\n               \\qquad 0 \\leq n \\leq M-1\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'hanning')\n\n    .. seealso:: numpy.hanning, :func:`create_window`, :class:`Window`.", "id": "f10924:m9"}
{"signature": "def window_chebwin(N, attenuation=<NUM_LIT:50>):", "body": "import scipy.signal<EOL>return scipy.signal.chebwin(N, attenuation)<EOL>", "docstring": "Cheb window\n\n    :param N: window length\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'chebwin', attenuation=50)\n\n    .. seealso:: scipy.signal.chebwin, :func:`create_window`, :class:`Window`", "id": "f10924:m11"}
{"signature": "def window_cauchy(N, alpha=<NUM_LIT:3>):", "body": "n = linspace(-N/<NUM_LIT>, (N)/<NUM_LIT>, N)<EOL>w = <NUM_LIT:1.>/(<NUM_LIT:1.>+ (alpha*n/(N/<NUM_LIT>))**<NUM_LIT:2>)<EOL>return w<EOL>", "docstring": "r\"\"\"Cauchy tapering window\n\n    :param int N: window length\n    :param float alpha: parameter of the poisson window\n\n    .. math:: w(n) = \\frac{1}{1+\\left(\\frac{\\alpha*n}{N/2}\\right)**2}\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'cauchy', alpha=3)\n        window_visu(64, 'cauchy', alpha=4)\n        window_visu(64, 'cauchy', alpha=5)\n\n\n    .. seealso:: :func:`window_poisson`, :func:`window_hann`", "id": "f10924:m28"}
{"signature": "def window_kaiser(N, beta=<NUM_LIT>, method='<STR_LIT>'):", "body": "if N == <NUM_LIT:1>:<EOL><INDENT>return ones(<NUM_LIT:1>)<EOL><DEDENT>if method == '<STR_LIT>':<EOL><INDENT>from numpy import kaiser<EOL>return kaiser(N, beta)<EOL><DEDENT>else:<EOL><INDENT>return _kaiser(N, beta)<EOL><DEDENT>", "docstring": "r\"\"\"Kaiser window\n\n    :param N: window length\n    :param beta: kaiser parameter (default is 8.6)\n\n    To obtain a Kaiser window that designs an FIR filter with\n    sidelobe attenuation of :math:`\\alpha` dB, use the following :math:`\\beta` where\n    :math:`\\beta = \\pi \\alpha`.\n\n    .. math::\n\n        w_n = \\frac{I_0\\left(\\pi\\alpha\\sqrt{1-\\left(\\frac{2n}{M}-1\\right)^2}\\right)} {I_0(\\pi \\alpha)}\n\n    where\n\n      * :math:`I_0` is the zeroth order Modified Bessel function of the first kind.\n      * :math:`\\alpha` is a real number that determines the shape of the \n        window. It determines the trade-off between main-lobe width and side \n        lobe level.\n      * the length of the sequence is N=M+1.\n\n    The Kaiser window can approximate many other windows by varying \n    the :math:`\\beta` parameter:\n\n    ===== ========================\n    beta  Window shape\n    ===== ========================\n    0     Rectangular\n    5     Similar to a Hamming\n    6     Similar to a Hanning\n    8.6   Similar to a Blackman\n    ===== ========================\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from pylab import plot, legend, xlim\n        from spectrum import window_kaiser\n        N = 64\n        for beta in [1,2,4,8,16]:\n            plot(window_kaiser(N, beta), label='beta='+str(beta))\n        xlim(0,N)\n        legend()\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'kaiser', beta=8.)\n\n    .. seealso:: numpy.kaiser, :func:`spectrum.window.create_window`", "id": "f10924:m5"}
{"signature": "def enbw(data):", "body": "N = len(data)<EOL>return N * np.sum(data**<NUM_LIT:2>) / np.sum(data)**<NUM_LIT:2><EOL>", "docstring": "r\"\"\"Computes the equivalent noise bandwidth\n\n    .. math:: ENBW = N \\frac{\\sum_{n=1}^{N} w_n^2}{\\left(\\sum_{n=1}^{N} w_n \\right)^2}\n\n    .. doctest::\n\n        >>> from spectrum import create_window, enbw\n        >>> w = create_window(64, 'rectangular')\n        >>> enbw(w)\n        1.0\n\n    The following table contains the ENBW values for some of the\n    implemented windows in this module (with N=16384). They have been\n    double checked against litterature (Source: [Harris]_, [Marple]_).\n\n    If not present, it means that it has not been checked.\n\n    =================== ============ =============\n    name                 ENBW        litterature\n    =================== ============ =============\n    rectangular         1.           1.\n    triangle            1.3334       1.33\n    Hann                1.5001       1.5\n    Hamming             1.3629       1.36\n    blackman            1.7268       1.73\n    kaiser              1.7\n    blackmanharris,4    2.004        2.\n    riesz               1.2000       1.2\n    riemann             1.32         1.3\n    parzen              1.917        1.92\n    tukey 0.25          1.102        1.1\n    bohman              1.7858       1.79\n    poisson 2           1.3130       1.3\n    hanningpoisson 0.5  1.609        1.61\n    cauchy              1.489        1.48\n    lanczos             1.3\n    =================== ============ =============", "id": "f10924:m1"}
{"signature": "def _coeff4(N, a0, a1, a2, a3):", "body": "if N == <NUM_LIT:1>:<EOL><INDENT>return ones(<NUM_LIT:1>)<EOL><DEDENT>n = arange(<NUM_LIT:0>, N)<EOL>N1 = N - <NUM_LIT:1.><EOL>w = a0 -a1*cos(<NUM_LIT>*pi*n / N1) + a2*cos(<NUM_LIT>*pi*n / N1) - a3*cos(<NUM_LIT>*pi*n / N1)<EOL>return w<EOL>", "docstring": "a common internal function to some window functions with 4 coeffs\n\n\n    For the blackmna harris for instance, the results are identical to octave if N is odd\n    but not for even values...if n =0 whatever N is, the w(0) must be equal to a0-a1+a2-a3, which\n    is the case here, but not in octave...", "id": "f10924:m15"}
{"signature": "def plot_frequencies(self, mindB=None, maxdB=None, norm=True):", "body": "from pylab import plot, title, xlim, grid, ylim, xlabel, ylabel<EOL>self.compute_response(norm=norm)<EOL>plot(self.frequencies, self.response)<EOL>title(\"<STR_LIT>\" % (self.enbw))<EOL>ylabel('<STR_LIT>')<EOL>xlabel('<STR_LIT>')<EOL>xlim(-<NUM_LIT:0.5>, <NUM_LIT:0.5>)<EOL>y0, y1 = ylim()<EOL>if mindB:<EOL><INDENT>y0 = mindB<EOL><DEDENT>if maxdB is not None:<EOL><INDENT>y1 = maxdB<EOL><DEDENT>else:<EOL><INDENT>y1 = max(self.response)<EOL><DEDENT>ylim(y0, y1)<EOL>grid(True)<EOL>", "docstring": "Plot the window in the frequency domain\n\n        :param mindB: change the default lower y bound\n        :param maxdB: change the default upper lower bound\n        :param bool norm: if True, normalise the frequency response.\n\n        .. plot::\n            :width: 80%\n            :include-source:\n\n            from spectrum.window import Window\n            w = Window(64, name='hamming')\n            w.plot_frequencies()", "id": "f10924:c0:m10"}
{"signature": "def window_blackman_nuttall(N):", "body": "a0 = <NUM_LIT><EOL>a1 = <NUM_LIT><EOL>a2 = <NUM_LIT><EOL>a3 = <NUM_LIT><EOL>return _coeff4(N, a0, a1, a2, a3)<EOL>", "docstring": "r\"\"\"Blackman Nuttall window\n\n    returns a minimum, 4-term Blackman-Harris window. The window is minimum in the sense that its maximum sidelobes are minimized.\n    The coefficients for this window differ from the Blackman-Harris window coefficients and produce slightly lower sidelobes.\n\n    :param N: window length\n\n    .. math:: w(n) = a_0 - a_1 \\cos\\left(\\frac{2\\pi n}{N-1}\\right)+ a_2 \\cos\\left(\\frac{4\\pi n}{N-1}\\right)- a_3 \\cos\\left(\\frac{6\\pi n}{N-1}\\right)\n\n    with :math:`a_0 = 0.3635819`, :math:`a_1 = 0.4891775`, :math:`a_2=0.1365995` and :math:`0_3=.0106411`\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'blackman_nuttall', mindB=-80)\n\n    .. seealso:: :func:`spectrum.window.create_window`\n    .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m17"}
{"signature": "def window_riemann(N):", "body": "n = linspace(-N/<NUM_LIT>, (N)/<NUM_LIT>, N)<EOL>w = sin(n/float(N)*<NUM_LIT>*pi) / (n / float(N)*<NUM_LIT>*pi)<EOL>return w<EOL>", "docstring": "r\"\"\"Riemann tapering window\n\n    :param int N: window length\n\n    .. math:: w(n) = 1 - \\left| \\frac{n}{N/2}  \\right|^2\n\n    with :math:`-N/2 \\leq n \\leq N/2`.\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'riesz')\n\n    .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m25"}
{"signature": "def window_poisson_hanning(N, alpha=<NUM_LIT:2>):", "body": "w1 = window_hann(N)<EOL>w2 = window_poisson(N, alpha=alpha)<EOL>return w1*w2<EOL>", "docstring": "r\"\"\"Hann-Poisson tapering window\n\n    This window is constructed as the product of the Hanning and Poisson\n    windows. The parameter **alpha** is the Poisson parameter.\n\n    :param int N: window length\n    :param float alpha: parameter of the poisson window\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'poisson_hanning', alpha=0.5)\n        window_visu(64, 'poisson_hanning', alpha=1)\n        window_visu(64, 'poisson_hanning')\n\n    .. seealso:: :func:`window_poisson`, :func:`window_hann`", "id": "f10924:m27"}
{"signature": "def info(self):", "body": "print(self)<EOL>", "docstring": "Print object information such as length and name", "id": "f10924:c0:m13"}
{"signature": "def window_bartlett_hann(N):", "body": "if N == <NUM_LIT:1>:<EOL><INDENT>return ones(<NUM_LIT:1>)<EOL><DEDENT>n = arange(<NUM_LIT:0>, N)<EOL>a0 = <NUM_LIT><EOL>a1 = <NUM_LIT><EOL>a2 = <NUM_LIT><EOL>win = a0 -  a1 *abs(n/(N-<NUM_LIT:1.>)-<NUM_LIT:0.5>) -a2 * cos(<NUM_LIT:2>*pi*n/(N-<NUM_LIT:1.>))<EOL>return win<EOL>", "docstring": "r\"\"\"Bartlett-Hann window\n\n    :param N: window length\n\n    .. math:: w(n) = a_0 + a_1 \\left| \\frac{n}{N-1} -\\frac{1}{2}\\right| - a_2 \\cos \\left( \\frac{2\\pi n}{N-1} \\right)\n\n    with :math:`a_0 = 0.62`, :math:`a_1 = 0.48` and :math:`a_2=0.38`\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'bartlett_hann')\n\n    .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m14"}
{"signature": "def plot_time_freq(self, mindB=-<NUM_LIT:100>, maxdB=None, norm=True,<EOL>yaxis_label_position=\"<STR_LIT:right>\"):", "body": "from pylab import subplot, gca<EOL>subplot(<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:1>)<EOL>self.plot_window()<EOL>subplot(<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:2>)<EOL>self.plot_frequencies(mindB=mindB, maxdB=maxdB, norm=norm)<EOL>if yaxis_label_position==\"<STR_LIT:left>\":<EOL><INDENT>try: tight_layout()<EOL>except: pass<EOL><DEDENT>else:<EOL><INDENT>ax = gca()<EOL>ax.yaxis.set_label_position(\"<STR_LIT:right>\")<EOL><DEDENT>", "docstring": "Plotting method to plot both time and frequency domain results.\n\n        See :meth:`plot_frequencies` for the optional arguments.\n\n        .. plot::\n            :width: 80%\n            :include-source:\n\n            from spectrum.window import Window\n            w = Window(64, name='hamming')\n            w.plot_time_freq()", "id": "f10924:c0:m12"}
{"signature": "def _kaiser(n, beta):", "body": "from scipy.special import iv as besselI<EOL>m = n - <NUM_LIT:1><EOL>k = arange(<NUM_LIT:0>, m)<EOL>k = <NUM_LIT> * beta / m * sqrt (k * (m - k))<EOL>w = besselI (<NUM_LIT:0>, k) / besselI (<NUM_LIT:0>, beta)<EOL>return w<EOL>", "docstring": "Independant Kaiser window\n\n    For the definition of the Kaiser window, see A. V. Oppenheim & R. W. Schafer, \"Discrete-Time Signal Processing\".\n\n    The continuous version of width n centered about x=0 is:\n\n    .. note:: 2 times slower than scipy.kaiser", "id": "f10924:m2"}
{"signature": "def window_rectangle(N):", "body": "return ones(N)<EOL>", "docstring": "r\"\"\"Kaiser window\n\n    :param N: window length\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'rectangle')", "id": "f10924:m4"}
{"signature": "def window_bohman(N):", "body": "x = linspace(-<NUM_LIT:1>, <NUM_LIT:1>, N)<EOL>w = (<NUM_LIT:1.>-abs(x)) * cos(pi*abs(x)) + <NUM_LIT:1.>/pi * sin(pi*abs(x))<EOL>return w<EOL>", "docstring": "r\"\"\"Bohman tapering window\n\n    :param N: window length\n\n    .. math:: w(n) = (1-|x|) \\cos (\\pi |x|) + \\frac{1}{\\pi} \\sin(\\pi |x|)\n\n    where x is a length N vector of linearly spaced values between\n    -1 and 1.\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import window_visu\n        window_visu(64, 'bohman')\n\n    .. seealso:: :func:`create_window`, :class:`Window`", "id": "f10924:m19"}
{"signature": "def plot_window(self):", "body": "from pylab import plot, xlim, grid, title, ylabel, axis<EOL>x = linspace(<NUM_LIT:0>, <NUM_LIT:1>, self.N)<EOL>xlim(<NUM_LIT:0>, <NUM_LIT:1>)<EOL>plot(x, self.data)<EOL>grid(True)<EOL>title('<STR_LIT>' % (self.name.capitalize(), self.N))<EOL>ylabel('<STR_LIT>')<EOL>axis([<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT>])<EOL>", "docstring": "Plot the window in the time domain\n\n        .. plot::\n            :width: 80%\n            :include-source:\n\n            from spectrum.window import Window\n            w = Window(64, name='hamming')\n            w.plot_window()", "id": "f10924:c0:m11"}
{"signature": "def poly2ac(poly, efinal):", "body": "results = rlevinson(poly, efinal)<EOL>return results[<NUM_LIT:0>]<EOL>", "docstring": "Convert prediction filter polynomial to autocorrelation sequence\n\n    :param array poly: the AR parameters\n    :param efinal: an estimate of the final error\n    :return: the autocorrelation  sequence in complex format.\n\n    .. doctest::\n\n        >>> from numpy import array\n        >>> from spectrum import poly2ac\n        >>> poly = [ 1. ,  0.38 , -0.05]\n        >>> efinal = 4.1895\n        >>> poly2ac(poly, efinal)\n        array([ 5.00+0.j, -2.00+0.j,  1.01-0.j])", "id": "f10925:m2"}
{"signature": "def poly2lsf(a):", "body": "<EOL>a = numpy.array(a)<EOL>if a[<NUM_LIT:0>] != <NUM_LIT:1>:<EOL><INDENT>a/=a[<NUM_LIT:0>]<EOL><DEDENT>if max(numpy.abs(numpy.roots(a))) >= <NUM_LIT:1.0>:<EOL><INDENT>error('<STR_LIT>');<EOL><DEDENT>p  = len(a)-<NUM_LIT:1>   <EOL>a1 = numpy.concatenate((a, numpy.array([<NUM_LIT:0>])))<EOL>a2 = a1[-<NUM_LIT:1>::-<NUM_LIT:1>]<EOL>P1 = a1 - a2        <EOL>Q1 = a1 + a2        <EOL>if p%<NUM_LIT:2>: <EOL><INDENT>P, r = deconvolve(P1,[<NUM_LIT:1>, <NUM_LIT:0> ,-<NUM_LIT:1>])<EOL>Q = Q1<EOL><DEDENT>else:          <EOL><INDENT>P, r = deconvolve(P1, [<NUM_LIT:1>, -<NUM_LIT:1>])<EOL>Q, r = deconvolve(Q1, [<NUM_LIT:1>,  <NUM_LIT:1>])<EOL><DEDENT>rP  = numpy.roots(P)<EOL>rQ  = numpy.roots(Q)<EOL>aP  = numpy.angle(rP[<NUM_LIT:1>::<NUM_LIT:2>])<EOL>aQ  = numpy.angle(rQ[<NUM_LIT:1>::<NUM_LIT:2>])<EOL>lsf = sorted(numpy.concatenate((-aP,-aQ)))<EOL>return lsf<EOL>", "docstring": "Prediction polynomial to line spectral frequencies.\n\n    converts the prediction polynomial specified by A,\n    into the corresponding line spectral frequencies, LSF.\n    normalizes the prediction polynomial by A(1).\n\n    .. doctest::\n\n        >>> from spectrum import poly2lsf\n        >>> a = [1.0000,  0.6149, 0.9899, 0.0000 ,0.0031, -0.0082]\n        >>> lsf = poly2lsf(a)\n        >>> lsf =  array([0.7842, 1.5605, 1.8776, 1.8984, 2.3593])\n\n    .. seealso:: lsf2poly, poly2rc, poly2qc, rc2is", "id": "f10925:m12"}
{"signature": "def lar2rc(g):", "body": "assert numpy.isrealobj(g), '<STR_LIT>'<EOL>return -numpy.tanh(-numpy.array(g)/<NUM_LIT:2>)<EOL>", "docstring": "Convert log area ratios to reflection coefficients.\n\n    :param g:  log area ratios\n    :returns: the reflection coefficients\n\n    .. seealso: :func:`rc2lar`, :func:`poly2rc`, :func:`ac2rc`, :func:`is2rc`.\n\n    :References:\n       [1] J. Makhoul, \"Linear Prediction: A Tutorial Review,\" Proc. IEEE,  Vol.63, No.4, pp.561-580, Apr 1975.", "id": "f10925:m10"}
{"signature": "def rc2is(k):", "body": "assert numpy.isrealobj(k), '<STR_LIT>'<EOL>if max(numpy.abs(k)) >= <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return (<NUM_LIT:2>/numpy.pi)*numpy.arcsin(k)<EOL>", "docstring": "Convert reflection coefficients to inverse sine parameters.\n\n    :param k: reflection coefficients\n    :return: inverse sine parameters\n\n    .. seealso:: :func:`is2rc`, :func:`rc2poly`, :func:`rc2acC`, :func:`rc2lar`.\n\n    Reference: J.R. Deller, J.G. Proakis, J.H.L. Hansen, \"Discrete-Time\n       Processing of Speech Signals\", Prentice Hall, Section 7.4.5.", "id": "f10925:m8"}
{"signature": "def lsf2poly(lsf):", "body": "<EOL>lsf = numpy.array(lsf)<EOL>if max(lsf) > numpy.pi or min(lsf) < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>p = len(lsf) <EOL>z  = numpy.exp(<NUM_LIT> * lsf)<EOL>rQ = z[<NUM_LIT:0>::<NUM_LIT:2>]<EOL>rP = z[<NUM_LIT:1>::<NUM_LIT:2>]<EOL>rQ = numpy.concatenate((rQ, rQ.conjugate()))<EOL>rP = numpy.concatenate((rP, rP.conjugate()))<EOL>Q  = numpy.poly(rQ);<EOL>P  = numpy.poly(rP);<EOL>if p%<NUM_LIT:2>:<EOL><INDENT>P1 = numpy.convolve(P, [<NUM_LIT:1>, <NUM_LIT:0>, -<NUM_LIT:1>])<EOL>Q1 = Q<EOL><DEDENT>else:<EOL><INDENT>P1 = numpy.convolve(P, [<NUM_LIT:1>, -<NUM_LIT:1>])<EOL>Q1 = numpy.convolve(Q, [<NUM_LIT:1>,  <NUM_LIT:1>])<EOL><DEDENT>a = <NUM_LIT> * (P1+Q1)<EOL>return a[<NUM_LIT:0>:-<NUM_LIT:1>:<NUM_LIT:1>]<EOL>", "docstring": "Convert line spectral frequencies to prediction filter coefficients\n\n    returns a vector a containing the prediction filter coefficients from a vector lsf of line spectral frequencies.\n\n    .. doctest::\n\n        >>> from spectrum import lsf2poly\n        >>> lsf = [0.7842 ,   1.5605  ,  1.8776 ,   1.8984,    2.3593]\n        >>> a = lsf2poly(lsf)\n\n    # array([  1.00000000e+00,   6.14837835e-01,   9.89884967e-01,\n    # 9.31594056e-05,   3.13713832e-03,  -8.12002261e-03 ])\n\n    .. seealso:: poly2lsf, rc2poly, ac2poly, rc2is", "id": "f10925:m11"}
{"signature": "def rc2lar(k):", "body": "assert numpy.isrealobj(k), '<STR_LIT>'<EOL>if max(numpy.abs(k)) >= <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return -<NUM_LIT:2> * numpy.arctanh(-numpy.array(k))<EOL>", "docstring": "Convert reflection coefficients to log area ratios.\n\n    :param k: reflection coefficients\n    :return: inverse sine parameters\n\n    The log area ratio is defined by G = log((1+k)/(1-k)) , where the K\n    parameter is the reflection coefficient.\n\n    .. seealso:: :func:`lar2rc`, :func:`rc2poly`, :func:`rc2ac`, :func:`rc2ic`.\n\n    :References:\n       [1] J. Makhoul, \"Linear Prediction: A Tutorial Review,\" Proc. IEEE, Vol.63, No.4, pp.561-580, Apr 1975.", "id": "f10925:m9"}
{"signature": "def is2rc(inv_sin):", "body": "return numpy.sin(numpy.array(inv_sin)*numpy.pi/<NUM_LIT:2>)<EOL>", "docstring": "Convert inverse sine parameters to reflection coefficients.\n\n    :param inv_sin: inverse sine parameters\n    :return: reflection coefficients\n\n    .. seealso::  :func:`rc2is`, :func:`poly2rc`, :func:`ac2rc`, :func:`lar2rc`.\n\n    :Reference: J.R. Deller, J.G. Proakis, J.H.L. Hansen, \n        \"Discrete-Time Processing of Speech Signals\", Prentice Hall, Section 7.4.5.", "id": "f10925:m7"}
{"signature": "def __init__(self, name, N):", "body": "<EOL>self.__name = None<EOL>self.name = name<EOL>self.__N = N<EOL>self.__rho = <NUM_LIT:0><EOL>self.__k = None<EOL>self.__old_data = None<EOL>self.__data = None<EOL>self.__norm = True<EOL>", "docstring": "Create a criteria object\n\n        :param name: a string or list of strings containing valid criteria\n            method's name\n        :param int N: size of the data sample.", "id": "f10926:c0:m0"}
{"signature": "def MDL(N, rho, k):", "body": "from numpy import log<EOL>mdl = N* log(rho) + k * log(N)<EOL>return mdl<EOL>", "docstring": "r\"\"\"Minimum Description Length\n\n    .. math:: MDL(k) = N log \\rho_k + p \\log N\n\n    :validation: results", "id": "f10926:m5"}
{"signature": "def AICc(N, rho, k, norm=True):", "body": "from numpy import log, array<EOL>p = k  <EOL>res = log(rho) + <NUM_LIT> * (p+<NUM_LIT:1>) / (N-p-<NUM_LIT:2>)<EOL>return res<EOL>", "docstring": "r\"\"\"corrected Akaike information criterion\n\n    .. math:: AICc(k) = log(\\rho_k) + 2 \\frac{k+1}{N-k-2}\n\n\n    :validation: double checked versus octave.", "id": "f10926:m1"}
{"signature": "def mdl_eigen(s, N):", "body": "import numpy as np<EOL>kmdl = []<EOL>n = len(s)<EOL>for k in range(<NUM_LIT:0>, n-<NUM_LIT:1>):<EOL><INDENT>ak = <NUM_LIT:1.>/(n-k) * np.sum(s[k+<NUM_LIT:1>:])<EOL>gk = np.prod(s[k+<NUM_LIT:1>:]**(<NUM_LIT:1.>/(n-k)))<EOL>kmdl.append( -(n-k)*N * np.log(gk/ak) + <NUM_LIT:0.5>*k*(<NUM_LIT>*n-k)*np.log(N))<EOL><DEDENT>return kmdl<EOL>", "docstring": "r\"\"\"MDL order-selection using eigen values\n\n    :param s: a list of `p` sorted eigen values\n    :param N: the size of the input data. To be defined precisely.\n\n    :return:\n        * an array containing the AIC values\n\n    .. math:: MDL(k) = (n-k)N \\ln \\frac{g(k)}{a(k)} + 0.5k(2n-k) log(N)\n\n    .. seealso:: :func:`aic_eigen` for details\n\n    :References:\n        * [Marple]_ Chap 13,\n        * [Wax]_", "id": "f10926:m8"}
{"signature": "def CAT(N, rho, k):", "body": "from numpy import zeros, arange<EOL>cat = zeros(len(rho))<EOL>for p in arange(<NUM_LIT:1>, len(rho)+<NUM_LIT:1>):<EOL><INDENT>rho_p = float(N)/(N-p)*rho[p-<NUM_LIT:1>]<EOL>s = <NUM_LIT:0><EOL>for j in range(<NUM_LIT:1>, p+<NUM_LIT:1>):<EOL><INDENT>rho_j = float(N)/(N-j)*rho[j-<NUM_LIT:1>]<EOL>s = s + <NUM_LIT:1.>/rho_j<EOL><DEDENT>cat[p-<NUM_LIT:1>] = s/float(N) - <NUM_LIT:1.>/rho_p<EOL><DEDENT>return cat<EOL>", "docstring": "r\"\"\"Criterion Autoregressive Transfer Function :\n\n    .. math::  CAT(k) = \\frac{1}{N} \\sum_{i=1}^k \\frac{1}{\\rho_i} - \\frac{\\rho_i}{\\rho_k}\n\n    .. todo:: validation", "id": "f10926:m6"}
{"signature": "def AKICc(N, rho, k):", "body": "from numpy import log, array<EOL>p = k<EOL>res = log(rho) + p/N/(N-p) + (<NUM_LIT>-(p+<NUM_LIT>)/N) * (p+<NUM_LIT:1.>) / (N-p-<NUM_LIT>)<EOL>return res<EOL>", "docstring": "r\"\"\"approximate corrected Kullback information\n\n    .. math:: AKICc(k) = log(rho_k) + \\frac{p}{N*(N-k)} + (3-\\frac{k+2}{N})*\\frac{k+1}{N-k-2}", "id": "f10926:m3"}
{"signature": "def __call__(self, rho=None, k=None, N=None, norm=True):", "body": "self.__norm = norm<EOL>if N is not None:<EOL><INDENT>self.N = N<EOL><DEDENT>if rho is not None:<EOL><INDENT>self.rho = rho<EOL><DEDENT>if k is not None:<EOL><INDENT>self.__k = k<EOL><DEDENT>self.__norm = norm<EOL>f = eval(self.name)<EOL>self.data = f(self.N, self.rho, self.k)<EOL>if self.old_data is not None and self.data is not None:<EOL><INDENT>if self.data > self.old_data:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Call the criteria function correspondign to :attr:`name`.", "id": "f10926:c0:m11"}
{"signature": "def __init__(self, data, sampling=<NUM_LIT:1.>, ar_order=None, ma_order=None,<EOL>lag=-<NUM_LIT:1>, NFFT=None, detrend=None, scale_by_freq=True):", "body": "super(ParametricSpectrum, self).__init__(data, sampling=sampling,<EOL>NFFT=NFFT,<EOL>scale_by_freq=scale_by_freq,<EOL>detrend=detrend)<EOL>if ar_order is None and ma_order is None:<EOL><INDENT>raise errors.SpectrumARMAError<EOL><DEDENT>self.__ar_order = ar_order<EOL>self.__ma_order = ma_order<EOL>self.ar_order = ar_order<EOL>self.ma_order = ma_order<EOL>self.lag = lag<EOL>self.__ar = None<EOL>self.__ma = None<EOL>self.__reflection = None<EOL>self.__rho = None<EOL>", "docstring": "**Constructor**\n\n        See the class documentation for the parameters.\n\n        .. rubric:: Additional attributes to those inherited\n            from :class:`Spectrum`:\n\n        * :attr:`ar_order`, the ar order of the PSD estimates\n        * :attr:`ma_order`, the ar order of the PSD estimates", "id": "f10927:c2:m0"}
{"signature": "def centerdc(self):", "body": "return list(self.centerdc_gen())<EOL>", "docstring": "Return the two-sided frequency range as a list (see\n        :meth:`centerdc_gen` for details).", "id": "f10927:c0:m11"}
{"signature": "def __init__(self, data, data_y=None, sampling=<NUM_LIT:1.>,<EOL>detrend=None, scale_by_freq=True, NFFT=None):", "body": "<EOL>self.__data = None<EOL>self.__data_y = None<EOL>self.__sampling = None<EOL>self.__detrend = None<EOL>self.__scale_by_freq = None<EOL>self.__sides = None<EOL>self.__N = None<EOL>self.__NFFT = None<EOL>self.__df = None<EOL>self.__datatype = None<EOL>self.__psd = None<EOL>self.__method = None<EOL>self.data = data<EOL>if data_y is not None:<EOL><INDENT>self.data_y = data_y<EOL><DEDENT>self.sampling = sampling<EOL>self.sides = '<STR_LIT:default>'<EOL>self._range = Range(self.__data.size, sampling) <EOL>self.modified = True<EOL>self.sampling = sampling<EOL>self.scale_by_freq = scale_by_freq<EOL>self.NFFT = NFFT<EOL>self.method = self.__class__<EOL>", "docstring": "**Constructor**\n\n        .. rubric:: Attributes:\n\n        From the input parameters, the following attributes are set:\n\n          * :attr:`data` (updates :attr:`N`, :attr:`df`, :attr:`datatype`)\n          * :attr:`data_y` used for cross PSD only (correlogram)\n          * :attr:`detrend`\n          * :attr:`sampling` (updates :attr:`df`)\n          * :attr:`scale_by_freq`\n          * :attr:`NFFT` (reset :attr:`sides`, :attr:`df`)\n\n        The following read-only attributes are set during the initialisation:\n\n          * :attr:`datatype`\n          * :attr:`df`\n          * :attr:`N`\n\n        And finally, additional read-write attributes are available:\n\n          * :attr:`psd`: used to store the PSD data array, which size depends\n            on :attr:`sides` i.e., one-sided for real data and two-sided for\n            the complex data.\n          * :attr:`sides`: if set, changed the :attr:`psd`.", "id": "f10927:c1:m0"}
{"signature": "def get_converted_psd(self, sides):", "body": "if sides == self.sides:<EOL><INDENT>return self.__psd<EOL><DEDENT>if self.datatype == '<STR_LIT>':<EOL><INDENT>assert sides != '<STR_LIT>',\"<STR_LIT>\"<EOL><DEDENT>if self.sides == '<STR_LIT>':<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>if sides == '<STR_LIT>':<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>newpsd = numpy.concatenate((self.psd[<NUM_LIT:0>:-<NUM_LIT:1>]/<NUM_LIT>, list(reversed(self.psd[<NUM_LIT:0>:-<NUM_LIT:1>]/<NUM_LIT>))))<EOL>newpsd[-<NUM_LIT:1>] = self.psd[-<NUM_LIT:1>]<EOL>newpsd[<NUM_LIT:0>] *= <NUM_LIT><EOL><DEDENT>elif sides == '<STR_LIT>':<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>P0 = self.psd[<NUM_LIT:0>]<EOL>P1 = self.psd[-<NUM_LIT:1>]<EOL>newpsd = numpy.concatenate((self.psd[-<NUM_LIT:1>:<NUM_LIT:0>:-<NUM_LIT:1>]/<NUM_LIT>, self.psd[<NUM_LIT:0>:-<NUM_LIT:1>]/<NUM_LIT>))<EOL>newpsd[<NUM_LIT:0>] = P1<EOL><DEDENT><DEDENT>elif self.sides == '<STR_LIT>':<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>if sides == '<STR_LIT>':<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>midN = (len(self.psd)-<NUM_LIT:2>) / <NUM_LIT:2><EOL>newpsd = numpy.array(self.psd[<NUM_LIT:0>:int(midN)+<NUM_LIT:2>]*<NUM_LIT:2>)<EOL>newpsd[<NUM_LIT:0>] /= <NUM_LIT:2><EOL>newpsd[-<NUM_LIT:1>] = self.psd[-<NUM_LIT:1>]<EOL><DEDENT>elif sides == '<STR_LIT>':<EOL><INDENT>newpsd = stools.twosided_2_centerdc(self.psd)<EOL><DEDENT><DEDENT>elif self.sides == '<STR_LIT>': <EOL><INDENT>logging.debug('<STR_LIT>')<EOL>if sides == '<STR_LIT>':<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>midN = int(len(self.psd) / <NUM_LIT:2>)<EOL>P1 = self.psd[<NUM_LIT:0>]<EOL>newpsd = numpy.append(self.psd[midN:]*<NUM_LIT:2>, P1)<EOL><DEDENT>elif sides == '<STR_LIT>':<EOL><INDENT>newpsd = stools.centerdc_2_twosided(self.psd)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return newpsd<EOL>", "docstring": "This function returns the PSD in the **sides** format\n\n        :param str sides: the PSD format in ['onesided', 'twosided', 'centerdc']\n        :return: the expected PSD.\n\n        .. doctest::\n\n            from spectrum import *\n            p = pcovar(marple_data, 15)\n            centerdc_psd = p.get_converted_psd('centerdc')\n\n        .. note:: this function does not change the object, in particular, it\n            does not change the :attr:`psd` attribute. If you want to change\n            the psd on the fly, change the attribute :attr:`sides`.", "id": "f10927:c1:m27"}
{"signature": "def frequencies(self, sides=None):", "body": "<EOL>if sides is None:<EOL><INDENT>sides = self.sides<EOL><DEDENT>if sides not in self._sides_choices:<EOL><INDENT>raise errors.SpectrumChoiceError(sides, self._sides_choices)<EOL><DEDENT>if sides == '<STR_LIT>':<EOL><INDENT>return self._range.onesided()<EOL><DEDENT>if sides == '<STR_LIT>':<EOL><INDENT>return self._range.twosided()<EOL><DEDENT>if sides == '<STR_LIT>':<EOL><INDENT>return self._range.centerdc()<EOL><DEDENT>", "docstring": "Return the frequency vector according to :attr:`sides`", "id": "f10927:c1:m26"}
{"signature": "def twosided_gen(self):", "body": "for a in range(<NUM_LIT:0>, self.N):<EOL><INDENT>yield a * self.df<EOL><DEDENT>", "docstring": "Returns the twosided frequency range as a generator\n\n        ::\n\n            >>> print(list(Range(8).centerdc_gen()))\n            [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875]", "id": "f10927:c0:m7"}
{"signature": "def plot(self, filename=None, norm=False, ylim=None,<EOL>sides=None,  **kargs):", "body": "import pylab<EOL>from pylab import ylim as plt_ylim<EOL>_ = self.psd<EOL>if sides is not None:<EOL><INDENT>if sides not in self._sides_choices:<EOL><INDENT>raise errors.SpectrumChoiceError(sides, self._sides_choices)<EOL><DEDENT><DEDENT>if sides is None or sides == self.sides:<EOL><INDENT>frequencies = self.frequencies()<EOL>psd = self.psd<EOL>sides = self.sides<EOL><DEDENT>elif sides is not None:<EOL><INDENT>if self.datatype == '<STR_LIT>':<EOL><INDENT>if sides == '<STR_LIT>':<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>logging.debug(\"<STR_LIT>\")<EOL>frequencies = self.frequencies(sides=sides)<EOL>psd = self.get_converted_psd(sides)<EOL><DEDENT>if len(psd) != len(frequencies):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (len(psd), len(frequencies)))<EOL><DEDENT>if '<STR_LIT>' in list(kargs.keys()):<EOL><INDENT>save_ax = pylab.gca()<EOL>pylab.sca(kargs['<STR_LIT>'])<EOL>rollback = True<EOL>del kargs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>rollback = False<EOL><DEDENT>if norm:<EOL><INDENT>pylab.plot(frequencies, <NUM_LIT:10> * stools.log10(psd/max(psd)),  **kargs)<EOL><DEDENT>else:<EOL><INDENT>pylab.plot(frequencies, <NUM_LIT:10> * stools.log10(psd),**kargs)<EOL><DEDENT>pylab.xlabel('<STR_LIT>')<EOL>pylab.ylabel('<STR_LIT>')<EOL>pylab.grid(True)<EOL>if ylim:<EOL><INDENT>plt_ylim(ylim)<EOL><DEDENT>if sides == '<STR_LIT>':<EOL><INDENT>pylab.xlim(<NUM_LIT:0>, self.sampling/<NUM_LIT>)<EOL><DEDENT>elif sides == '<STR_LIT>':<EOL><INDENT>pylab.xlim(<NUM_LIT:0>, self.sampling)<EOL><DEDENT>elif sides == '<STR_LIT>':<EOL><INDENT>pylab.xlim(-self.sampling/<NUM_LIT>, self.sampling/<NUM_LIT>)<EOL><DEDENT>if filename:<EOL><INDENT>pylab.savefig(filename)<EOL><DEDENT>if rollback:<EOL><INDENT>pylab.sca(save_ax)<EOL><DEDENT>del psd, frequencies<EOL>", "docstring": "a simple plotting routine to plot the PSD versus frequency.\n\n        :param str filename: save the figure into a file\n        :param norm: False by default. If True, the PSD is normalised.\n        :param ylim: readjust the y range .\n        :param sides: if not provided, :attr:`sides` is used. See :attr:`sides`\n            for details.\n        :param kargs: any optional argument accepted by :func:`pylab.plot`.\n\n        .. plot::\n            :width: 80%\n            :include-source:\n\n            from spectrum import *\n            p = Periodogram(marple_data)\n            p.plot(norm=True, marker='o')", "id": "f10927:c1:m28"}
{"signature": "def arcovar_marple(x, order):", "body": "assert len(x) >= order, \"<STR_LIT>\"<EOL>x = np.array(x)<EOL>N = len(x)<EOL>r0 = sum(abs(x)**<NUM_LIT>)<EOL>r1 = abs(x[<NUM_LIT:0>])**<NUM_LIT:2><EOL>rN = abs(x[N-<NUM_LIT:1>])**<NUM_LIT:2><EOL>pf = r0 - r1<EOL>pb = r0 - rN<EOL>delta = <NUM_LIT:1.> - r1 / r0<EOL>gamma = <NUM_LIT:1.> - rN / r0<EOL>c = np.zeros(N, dtype=complex)<EOL>d = np.zeros(N, dtype=complex)<EOL>r = np.zeros(N, dtype=complex)<EOL>af = np.zeros(N, dtype=complex)<EOL>ab = np.zeros(N, dtype=complex)<EOL>c[<NUM_LIT:0>] = x[N-<NUM_LIT:1>].conjugate() / r0<EOL>d[<NUM_LIT:0>] = x[<NUM_LIT:0>].conjugate() / r0<EOL>if order == <NUM_LIT:0>:<EOL><INDENT>pf = r0 / float(N)<EOL>pb = pf<EOL>return af, pf, ab, pb, <NUM_LIT:0><EOL><DEDENT>pbv = []<EOL>for m in range(<NUM_LIT:0>, order+<NUM_LIT:1>):<EOL><INDENT>logging.debug('<STR_LIT>', m)<EOL>logging.debug(c[<NUM_LIT:0>:<NUM_LIT:2>])<EOL>logging.debug(d[<NUM_LIT:0>:<NUM_LIT:2>])<EOL>r1 = <NUM_LIT:1.>/pf<EOL>r2 = <NUM_LIT:1.>/pb<EOL>r3 = <NUM_LIT:1.>/delta<EOL>r4 = <NUM_LIT:1.>/gamma<EOL>temp = <NUM_LIT:0.>+<NUM_LIT><EOL>for k in range(m+<NUM_LIT:1>, N):<EOL><INDENT>temp = temp + x[k]*x[k-m-<NUM_LIT:1>].conjugate()<EOL><DEDENT>r[m] =  temp.conjugate()<EOL>theta = x[<NUM_LIT:0>] * c[m]<EOL>if m == <NUM_LIT:0>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>for k in range(<NUM_LIT:0>, m):<EOL><INDENT>theta = theta + x[m-k] * c[k]                   <EOL>r[k] = r[k] - x[N-m-<NUM_LIT:1>] * x[N-m+k].conjugate() <EOL>temp = temp + af[m-k-<NUM_LIT:1>] * r[k].conjugate()<EOL><DEDENT><DEDENT>\"\"\"<STR_LIT>\"\"\"<EOL>c1 = -temp * r2<EOL>c2 = -r1 * temp.conjugate()<EOL>c3 = theta * r3<EOL>c4 = r4 *theta.conjugate()<EOL>af[m] = c1                    <EOL>ab[m] = c2                    <EOL>save = c[m]<EOL>c[m] = save + c3*d[m]<EOL>d[m] = d[m] + c4*save<EOL>if m == <NUM_LIT:0>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>for k in range(<NUM_LIT:0>, m):<EOL><INDENT>save = af[k]<EOL>af[k] = save + c1 * ab[m-k-<NUM_LIT:1>] <EOL>ab[m-k-<NUM_LIT:1>] = ab[m-k-<NUM_LIT:1>] + c2 * save   <EOL>save = c[k]<EOL>c[k] = save + c3*d[k]       <EOL>d[k] = d[k] + c4*save       <EOL><DEDENT><DEDENT>r5 = temp.real**<NUM_LIT:2> + temp.imag**<NUM_LIT:2><EOL>pf = pf - r5*r2         <EOL>pb = pb - r5*r1         <EOL>r5 = theta.real**<NUM_LIT:2> + theta.imag**<NUM_LIT:2><EOL>delta = delta - r5*r4               <EOL>gamma = gamma - r5*r3               <EOL>if m != order-<NUM_LIT:1>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>pf = pf / float(N-m-<NUM_LIT:1>)<EOL>pb = pb / float(N-m-<NUM_LIT:1>)<EOL>break<EOL><DEDENT>if pf > <NUM_LIT:0> and pb > <NUM_LIT:0>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>ValueError(\"<STR_LIT>\")<EOL><DEDENT>if (delta > <NUM_LIT:0.> and delta <=<NUM_LIT:1> and gamma > <NUM_LIT:0.> and gamma <=<NUM_LIT:1>):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>ValueError(\"<STR_LIT>\")<EOL><DEDENT>r1 = <NUM_LIT:1.>/pf<EOL>r2 = <NUM_LIT:1.>/pb<EOL>r3 = <NUM_LIT:1.>/delta<EOL>r4 = <NUM_LIT:1.>/gamma<EOL>ef = x[m+<NUM_LIT:1>]<EOL>eb = x[(N-<NUM_LIT:1>)-m-<NUM_LIT:1>]<EOL>for k in range(<NUM_LIT:0>,m+<NUM_LIT:1>):<EOL><INDENT>ef = ef + af[k] * x[m-k]             <EOL>eb = eb + ab[k] * x[N-m+k-<NUM_LIT:1>]                   <EOL><DEDENT>c1 = ef*r3<EOL>c2 = eb*r4<EOL>c3 = eb.conjugate() * r2<EOL>c4 = ef.conjugate() * r1<EOL>for k in range(m, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>save = af[k]<EOL>af[k] = save + c1 * d[k]                    <EOL>d[k+<NUM_LIT:1>] = d[k] + c4 * save                    <EOL>save = ab[k]<EOL>ab[k] = save + c2 * c[m-k]                 <EOL>c[m-k] = c[m-k] + c3 * save              <EOL><DEDENT>c[m+<NUM_LIT:1>] = c3<EOL>d[<NUM_LIT:0>] = c4<EOL>r5 = ef.real**<NUM_LIT:2> + ef.imag**<NUM_LIT:2><EOL>pf = pf - r5 * r3                              <EOL>delta = delta-r5 * r1                        <EOL>r5 = eb.real**<NUM_LIT:2> + eb.imag**<NUM_LIT:2><EOL>pb = pb - r5 * r4                              <EOL>gamma = gamma-r5*r2                        <EOL>pbv.append(pb)<EOL>if (pf > <NUM_LIT:0.> and pb > <NUM_LIT:0.>):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>ValueError(\"<STR_LIT>\")<EOL><DEDENT>if (delta > <NUM_LIT:0.> and delta <= <NUM_LIT:1.>) and (gamma > <NUM_LIT:0.> and gamma <= <NUM_LIT:1.>):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return af, pf, ab, pb, pbv<EOL>", "docstring": "r\"\"\"Estimate AR model parameters using covariance method\n\n    This implementation is based on [Marple]_. This code is far more\n    complicated and slower than :func:`arcovar` function, which is now the official version.\n    See :func:`arcovar` for a detailed description of Covariance method.\n\n    This function should be used in place of arcovar only if order<=4, for\n    which :func:`arcovar` does not work.\n\n    Fast algorithm for the solution of the covariance least squares normal\n    equations from Marple.\n\n    :param array X:  Array of complex data samples\n    :param int oder: Order of linear prediction model\n\n    :return:\n        * AF   - Array of complex forward linear prediction coefficients\n        * PF   - Real forward linear prediction variance at order IP\n        * AB   - Array of complex backward linear prediction coefficients\n        * PB   - Real backward linear prediction variance at order IP\n        * PV   - store linear prediction coefficients\n\n    .. note:: this code and the original code in Marple diverge for ip>10.\n        it seems that this is related to single precision used with\n        complex type in fortran whereas numpy uses double precision for\n        complex type.\n\n    :validation: the AR parameters are the same as those returned by\n        a completely different function :func:`arcovar`.\n\n    :References: [Marple]_", "id": "f10928:m0"}
{"signature": "def arcovar(x, order):", "body": "from spectrum import corrmtx<EOL>import scipy.linalg<EOL>X = corrmtx(x, order, '<STR_LIT>')<EOL>Xc = np.matrix(X[:, <NUM_LIT:1>:])<EOL>X1 = np.array(X[:, <NUM_LIT:0>])<EOL>a, _residues, _rank, _singular_values = scipy.linalg.lstsq(-Xc, X1)<EOL>Cz = np.dot(X1.conj().transpose(), Xc)<EOL>e = np.dot(X1.conj().transpose(), X1) + np.dot(Cz, a)<EOL>assert e.imag < <NUM_LIT>, '<STR_LIT>'<EOL>e = float(e.real) <EOL>return a, e<EOL>", "docstring": "r\"\"\"Simple and fast implementation of the covariance AR estimate\n\n    This code is 10 times faster than :func:`arcovar_marple` and more importantly\n    only 10 lines of code, compared to a 200 loc for :func:`arcovar_marple`\n\n\n    :param array X:  Array of complex data samples\n    :param int oder: Order of linear prediction model\n\n    :return:\n        * a - Array of complex forward linear prediction coefficients\n        * e - error\n\n    The covariance method fits a Pth order autoregressive (AR) model to the\n    input signal, which is assumed to be the output of\n    an AR system driven by white noise. This method minimizes the forward\n    prediction error in the least-squares sense. The output vector\n    contains the normalized estimate of the AR system parameters\n\n    The white noise input variance estimate is also returned.\n\n    If is the power spectral density of y(n), then:\n\n    .. math:: \\frac{e}{\\left| A(e^{jw}) \\right|^2} = \\frac{e}{\\left| 1+\\sum_{k-1}^P a(k)e^{-jwk}\\right|^2}\n\n    Because the method characterizes the input data using an all-pole model,\n    the correct choice of the model order p is important.\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import arcovar, marple_data, arma2psd\n        from pylab import plot, log10, linspace, axis\n\n        ar_values, error = arcovar(marple_data, 15)\n        psd = arma2psd(ar_values, sides='centerdc')\n        plot(linspace(-0.5, 0.5, len(psd)), 10*log10(psd/max(psd)))\n        axis([-0.5, 0.5, -60, 0])\n\n    .. seealso:: :class:`pcovar`\n\n    :validation: the AR parameters are the same as those returned by\n        a completely different function :func:`arcovar_marple`.\n\n    :References: [Mathworks]_", "id": "f10928:m1"}
{"signature": "def speriodogram(x, NFFT=None, detrend=True, sampling=<NUM_LIT:1.>,<EOL>scale_by_freq=True, window='<STR_LIT>', axis=<NUM_LIT:0>):", "body": "x = np.array(x)<EOL>if x.ndim == <NUM_LIT:1>:<EOL><INDENT>axis = <NUM_LIT:0><EOL>r = x.shape[<NUM_LIT:0>]<EOL>w = Window(r, window)   <EOL>w = w.data<EOL><DEDENT>elif x.ndim == <NUM_LIT:2>:<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>[r, c] = x.shape<EOL>w = np.array([Window(r, window).data for this in range(c)]).reshape(r,c) <EOL><DEDENT>if NFFT is None:<EOL><INDENT>NFFT = len(x)<EOL><DEDENT>isreal = np.isrealobj(x)<EOL>if detrend == True:<EOL><INDENT>m = np.mean(x, axis=axis)<EOL><DEDENT>else:<EOL><INDENT>m = <NUM_LIT:0><EOL><DEDENT>if isreal == True:<EOL><INDENT>if x.ndim == <NUM_LIT:2>:<EOL><INDENT>res =  (abs (rfft (x*w - m, NFFT, axis=<NUM_LIT:0>))) ** <NUM_LIT> / r<EOL><DEDENT>else:<EOL><INDENT>res =  (abs (rfft (x*w - m, NFFT, axis=-<NUM_LIT:1>))) ** <NUM_LIT> / r<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if x.ndim == <NUM_LIT:2>:<EOL><INDENT>res =  (abs (fft (x*w - m, NFFT, axis=<NUM_LIT:0>))) ** <NUM_LIT> / r<EOL><DEDENT>else:<EOL><INDENT>res =  (abs (fft (x*w - m, NFFT, axis=-<NUM_LIT:1>))) ** <NUM_LIT> / r<EOL><DEDENT><DEDENT>if scale_by_freq is True:<EOL><INDENT>df = sampling / float(NFFT)<EOL>res*= <NUM_LIT:2> * np.pi / df<EOL><DEDENT>if x.ndim == <NUM_LIT:1>:<EOL><INDENT>return res.transpose()<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT>", "docstring": "Simple periodogram, but matrices accepted.\n\n    :param x: an array or matrix of data samples.\n    :param NFFT: length of the data before FFT is computed (zero padding)\n    :param bool detrend: detrend the data before co,puteing the FFT\n    :param float sampling: sampling frequency of the input :attr:`data`.\n\n    :param scale_by_freq:\n    :param str window:\n\n    :return: 2-sided PSD if complex data, 1-sided if real.\n\n    if a matrix is provided (using numpy.matrix), then a periodogram\n    is computed for each row. The returned matrix has the same shape as the input\n    matrix.\n\n    The mean of the input data is also removed from the data before computing\n    the psd.\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from pylab import grid, semilogy\n        from spectrum import data_cosine, speriodogram\n        data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200)\n        semilogy(speriodogram(data, detrend=False, sampling=1024), marker='o')\n        grid(True)\n\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        import numpy\n        from spectrum import speriodogram, data_cosine\n        from pylab import figure, semilogy, figure ,imshow\n        # create N data sets and make the frequency dependent on the time\n        N = 100\n        m = numpy.concatenate([data_cosine(N=1024, A=0.1, sampling=1024, freq=x) \n            for x in range(1, N)]);\n        m.resize(N, 1024)\n        res = speriodogram(m)\n        figure(1)\n        semilogy(res)\n        figure(2)\n        imshow(res.transpose(), aspect='auto')\n\n    .. todo:: a proper spectrogram class/function that takes care of normalisation", "id": "f10929:m0"}
{"signature": "def __init__(self, data, sampling=<NUM_LIT:1.>,<EOL>window='<STR_LIT>', NFFT=None, scale_by_freq=False,<EOL>detrend=None):", "body": "super(Periodogram, self).__init__(data,<EOL>window=window,<EOL>sampling=sampling,<EOL>NFFT=NFFT,<EOL>scale_by_freq=scale_by_freq,<EOL>detrend=detrend)<EOL>", "docstring": "**Periodogram Constructor**\n\n        :param array data: input data (list or numpy.array)\n        :param float sampling: sampling frequency of the input :attr:`data`.\n        :param str window: a tapering window. See :class:`~spectrum.window.Window`.\n        :param int NFFT: total length of the final data sets (padded with zero\n            if needed; default is 4096)\n        :param bool scale_by_freq:\n        :param str detrend:", "id": "f10929:c0:m0"}
{"signature": "def WelchPeriodogram(data, NFFT=None,  sampling=<NUM_LIT:1.>, **kargs):", "body": "from pylab import psd<EOL>spectrum = Spectrum(data, sampling=<NUM_LIT:1.>)<EOL>P = psd(data, NFFT, Fs=sampling, **kargs)<EOL>spectrum.psd = P[<NUM_LIT:0>]<EOL>return P, spectrum<EOL>", "docstring": "r\"\"\"Simple periodogram wrapper of numpy.psd function.\n\n    :param A: the input data\n    :param int NFFT: total length of the final data sets (padded \n        with zero if needed; default is 4096)\n    :param str window:\n\n    :Technical documentation:\n\n    When we calculate the periodogram of a set of data we get an estimation\n    of the spectral density. In fact as we use a Fourier transform and a\n    truncated segments the spectrum is the convolution of the data with a\n    rectangular window which Fourier transform is\n\n    .. math::\n\n        W(s)= \\frac{1}{N^2} \\left[ \\frac{\\sin(\\pi s)}{\\sin(\\pi s/N)} \\right]^2\n\n    Thus oscillations and sidelobes appears around the main frequency. One aim of t he tapering is to reduced this effects. We multiply data by a window whose  sidelobes are much smaller than the main lobe. Classical window is hanning window.  But other windows are available. However we must take into account this energy and divide the spectrum by energy of taper used. Thus periodogram becomes :\n\n    .. math::\n\n        D_k \\equiv \\sum_{j=0}^{N-1}c_jw_j \\; e^{2\\pi ijk/N}  \\qquad k=0,...,N-1\n\n    .. math::\n\n        P(0)=P(f_0)=\\frac{1}{2\\pi W_{ss}}\\arrowvert{D_0}\\arrowvert^2\n\n    .. math::\n\n        P(f_k)=\\frac{1}{2\\pi W_{ss}} \\left[\\arrowvert{D_k}\\arrowvert^2+\\arrowvert{D_{N-k}}\\arrowvert^2\\right]        \\qquad k=0,1,...,     \\left( \\frac{1}{2}-1 \\right)\n\n    .. math::\n\n        P(f_c)=P(f_{N/2})= \\frac{1}{2\\pi W_{ss}} \\arrowvert{D_{N/2}}\\arrowvert^2\n\n    with\n\n    .. math::\n\n        {W_{ss}} \\equiv N\\sum_{j=0}^{N-1}w_j^2\n\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import WelchPeriodogram, marple_data\n        psd = WelchPeriodogram(marple_data, 256)", "id": "f10929:m1"}
{"signature": "def ma(X, Q, M):", "body": "if Q <= <NUM_LIT:0> or Q >= M:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>a, rho, _c = yulewalker.aryule(X, M, '<STR_LIT>')   <EOL>a = np.insert(a, <NUM_LIT:0>, <NUM_LIT:1>)<EOL>ma_params, _p, _c = yulewalker.aryule(a, Q, '<STR_LIT>')    <EOL>return ma_params, rho<EOL>", "docstring": "Moving average estimator.\n\n    This program provides an estimate of the moving average parameters\n    and driving noise variance for a data sequence based on a\n    long AR model and a least squares fit.\n\n    :param array X: The input data array\n    :param int Q: Desired MA model order (must be >0 and <M)\n    :param int M: Order of \"long\" AR model (suggest at least 2*Q )\n\n    :return:\n        * MA    - Array of Q complex MA parameter estimates\n        * RHO   - Real scalar of white noise variance estimate\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import arma2psd, ma, marple_data\n        import pylab\n\n        # Estimate 15 Ma parameters\n        b, rho = ma(marple_data, 15, 30)\n        # Create the PSD from those MA parameters\n        psd = arma2psd(B=b, rho=rho, sides='centerdc')\n        # and finally plot the PSD\n        pylab.plot(pylab.linspace(-0.5, 0.5, 4096), 10 * pylab.log10(psd/max(psd)))\n        pylab.axis([-0.5, 0.5, -30, 0])\n\n    :reference: [Marple]_", "id": "f10932:m2"}
{"signature": "def arma_estimate(X, P, Q, lag):", "body": "R = CORRELATION(X, maxlags=lag, norm='<STR_LIT>')<EOL>R0 = R[<NUM_LIT:0>]<EOL>MPQ = lag - Q + P<EOL>N = len(X)<EOL>Y = np.zeros(N-P, dtype=complex)<EOL>for K in range(<NUM_LIT:0>, MPQ):<EOL><INDENT>KPQ = K + Q - P+<NUM_LIT:1><EOL>if KPQ < <NUM_LIT:0>:<EOL><INDENT>Y[K] = R[-KPQ].conjugate()<EOL><DEDENT>if KPQ == <NUM_LIT:0>:<EOL><INDENT>Y[K] = R0<EOL><DEDENT>if KPQ > <NUM_LIT:0>:<EOL><INDENT>Y[K] = R[KPQ]<EOL><DEDENT><DEDENT>Y.resize(lag)<EOL>if P <= <NUM_LIT:4>:<EOL><INDENT>res = arcovar_marple(Y.copy(), P)    <EOL>ar_params = res[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>res = arcovar(Y.copy(), P)    <EOL>ar_params = res[<NUM_LIT:0>]<EOL><DEDENT>Y.resize(N-P)<EOL>for k in range(P, N):<EOL><INDENT>SUM = X[k]<EOL>for j in range(<NUM_LIT:0>, P):<EOL><INDENT>SUM = SUM + ar_params[j] * X[k-j-<NUM_LIT:1>]   <EOL><DEDENT>Y[k-P] = SUM<EOL><DEDENT>ma_params, rho = ma(Y, Q, <NUM_LIT:2>*Q)     <EOL>return ar_params, ma_params, rho<EOL>", "docstring": "Autoregressive and moving average estimators.\n\n    This function provides an estimate of the autoregressive\n    parameters, the moving average parameters, and the driving\n    white noise variance of  an ARMA(P,Q) for a complex or real data sequence.\n\n    The parameters are estimated using three steps:\n\n        * Estimate the AR parameters from the original data based on a least\n          squares modified Yule-Walker technique,\n        * Produce a residual time sequence by filtering the original data\n          with a filter based on the AR parameters,\n        * Estimate the MA parameters from the residual time sequence.\n\n    :param array X: Array of data samples (length N)\n    :param int P: Desired number of AR parameters\n    :param int Q: Desired number of MA parameters\n    :param int lag: Maximum lag to use for autocorrelation estimates\n\n    :return:\n        * A     - Array of complex P AR parameter estimates\n        * B     - Array of complex Q MA parameter estimates\n        * RHO   - White noise variance estimate\n\n    .. note::\n      *  lag must be >= Q (MA order)\n\n    **dependencies**:\n        * :meth:`spectrum.correlation.CORRELATION`\n        * :meth:`spectrum.covar.arcovar`\n        * :meth:`spectrum.arma.ma`\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import arma_estimate, arma2psd, marple_data\n        import pylab\n\n        a,b, rho = arma_estimate(marple_data, 15, 15, 30)\n        psd = arma2psd(A=a, B=b, rho=rho, sides='centerdc', norm=True)\n        pylab.plot(10 * pylab.log10(psd))\n        pylab.ylim([-50,0])\n\n    :reference: [Marple]_", "id": "f10932:m1"}
{"signature": "def __init__(self, data, P, Q, lag, NFFT=None, sampling=<NUM_LIT:1.>,<EOL>scale_by_freq=False):", "body": "super(parma, self).__init__(data, ma_order=Q, ar_order=P, lag=lag,<EOL>NFFT=NFFT, sampling=sampling,<EOL>scale_by_freq=scale_by_freq)<EOL>self.lag = lag<EOL>", "docstring": "**Constructor:**\n\n        For a detailed description of the parameters, see :func:`arma_estimate`.\n\n        :param array data:     input data (list or numpy.array)\n        :param int P:\n        :param int Q:\n        :param int lag:\n        :param int NFFT: total length of the final data sets (padded with\n            zero if needed; default is 4096)\n        :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10932:c0:m0"}
{"signature": "def arma2psd(A=None, B=None, rho=<NUM_LIT:1.>, T=<NUM_LIT:1.>, NFFT=<NUM_LIT>, sides='<STR_LIT:default>',<EOL>norm=False):", "body": "if NFFT is None:<EOL><INDENT>NFFT = <NUM_LIT><EOL><DEDENT>if A is None and B is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>psd = np.zeros(NFFT, dtype=complex)<EOL>if A is not None:<EOL><INDENT>ip = len(A)<EOL>den = np.zeros(NFFT, dtype=complex)<EOL>den[<NUM_LIT:0>] = <NUM_LIT:1.>+<NUM_LIT><EOL>for k in range(<NUM_LIT:0>, ip):<EOL><INDENT>den[k+<NUM_LIT:1>] = A[k]<EOL><DEDENT>denf = fft(den, NFFT)<EOL><DEDENT>if B is not None:<EOL><INDENT>iq = len(B)<EOL>num = np.zeros(NFFT, dtype=complex)<EOL>num[<NUM_LIT:0>] = <NUM_LIT:1.>+<NUM_LIT><EOL>for k in range(<NUM_LIT:0>, iq):<EOL><INDENT>num[k+<NUM_LIT:1>] = B[k]<EOL><DEDENT>numf = fft(num, NFFT)<EOL><DEDENT>if A is not None and B is not None:<EOL><INDENT>psd = rho / T * abs(numf)**<NUM_LIT> / abs(denf)**<NUM_LIT><EOL><DEDENT>elif A is not None:<EOL><INDENT>psd = rho / T / abs(denf)**<NUM_LIT><EOL><DEDENT>elif B is not None:<EOL><INDENT>psd = rho / T * abs(numf)**<NUM_LIT><EOL><DEDENT>psd = np.real(psd)<EOL>if sides != '<STR_LIT:default>':<EOL><INDENT>from . import tools<EOL>assert sides in ['<STR_LIT>']<EOL>if sides == '<STR_LIT>':<EOL><INDENT>psd = tools.twosided_2_centerdc(psd)<EOL><DEDENT><DEDENT>if norm == True:<EOL><INDENT>psd /= max(psd)<EOL><DEDENT>return psd<EOL>", "docstring": "r\"\"\"Computes power spectral density given ARMA values.\n\n    This function computes the power spectral density values\n    given the ARMA parameters of an ARMA model. It assumes that\n    the driving sequence is a white noise process of zero mean and\n    variance :math:`\\rho_w`. The sampling frequency and noise variance are\n    used to scale the PSD output, which length is set by the user with the\n    `NFFT` parameter.\n\n    :param array A:   Array of AR parameters (complex or real)\n    :param array B:   Array of MA parameters (complex or real)\n    :param float rho: White noise variance to scale the returned PSD\n    :param float T:   Sample interval in seconds to scale the returned PSD\n    :param int NFFT:  Final size of the PSD\n    :param str sides: Default PSD is two-sided, but sides can be set to centerdc.\n\n    .. warning:: By convention, the AR or MA arrays does not contain the\n        A0=1 value.\n\n    If :attr:`B` is None, the model is a pure AR model. If :attr:`A` is None,\n    the model is a pure MA model.\n\n    :return: two-sided PSD\n\n    .. rubric:: Details:\n\n    AR case: the power spectral density is:\n\n    .. math:: P_{ARMA}(f) = T \\rho_w \\left|\\frac{B(f)}{A(f)}\\right|^2\n\n    where:\n\n    .. math:: A(f) = 1 + \\sum_{k=1}^q b(k) e^{-j2\\pi fkT}\n    .. math:: B(f) = 1 + \\sum_{k=1}^p a(k) e^{-j2\\pi fkT}\n\n    .. rubric:: **Example:**\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        import spectrum.arma\n        from pylab import plot, log10, legend\n        plot(10*log10(spectrum.arma.arma2psd([1,0.5],[0.5,0.5])), label='ARMA(2,2)')\n        plot(10*log10(spectrum.arma.arma2psd([1,0.5],None)), label='AR(2)')\n        plot(10*log10(spectrum.arma.arma2psd(None,[0.5,0.5])), label='MA(2)')\n        legend()\n\n    :References: [Marple]_", "id": "f10932:m0"}
{"signature": "def lpc(x, N=None):", "body": "m = len(x)<EOL>if N is None:<EOL><INDENT>N = m - <NUM_LIT:1> <EOL><DEDENT>elif N > m-<NUM_LIT:1>:<EOL><INDENT>x.resize(N+<NUM_LIT:1>)<EOL><DEDENT>X = fft(x, <NUM_LIT:2>**nextpow2(<NUM_LIT>*len(x)-<NUM_LIT:1>))<EOL>R = real(ifft(abs(X)**<NUM_LIT:2>))<EOL>R = R/(m-<NUM_LIT:1.>) <EOL>a, e, ref = LEVINSON(R, N)<EOL>return a, e<EOL>", "docstring": "Linear Predictor Coefficients.\n\n    :param x:\n    :param int N: default is length(X) - 1\n\n    :Details:\n\n    Finds the coefficients :math:`A=(1, a(2), \\dots a(N+1))`, of an Nth order\n    forward linear predictor that predicts the current value value of the\n    real-valued time series x based on past samples:\n\n    .. math:: \\hat{x}(n) = -a(2)*x(n-1) - a(3)*x(n-2) - ... - a(N+1)*x(n-N)\n\n    such that the sum of the squares of the errors\n\n    .. math:: err(n) = X(n) - Xp(n)\n\n    is minimized. This function  uses the Levinson-Durbin recursion to\n    solve the normal equations that arise from the least-squares formulation.\n\n    .. seealso:: :func:`levinson`, :func:`aryule`, :func:`prony`, :func:`stmcb`\n\n    .. todo:: matrix case, references\n\n    :Example:\n\n    ::\n\n        from scipy.signal import lfilter\n        noise = randn(50000,1);  % Normalized white Gaussian noise\n        x = filter([1], [1 1/2 1/3 1/4], noise)\n        x = x[45904:50000]\n        x.reshape(4096, 1)\n        x = x[0]\n\n    Compute the predictor coefficients, estimated signal, prediction error, and autocorrelation sequence of the prediction error:\n\n\n    1.00000 + 0.00000i   0.51711 - 0.00000i   0.33908 - 0.00000i   0.24410 - 0.00000i\n\n    ::\n\n        a = lpc(x, 3)\n        est_x = lfilter([0 -a(2:end)],1,x);    % Estimated signal\n        e = x - est_x;                        % Prediction error\n        [acs,lags] = xcorr(e,'coeff');   % ACS of prediction error", "id": "f10934:m0"}
{"signature": "def _get_signal_space(S, NP, verbose=False, threshold=None, NSIG=None,<EOL>criteria='<STR_LIT>'):", "body": "from .criteria import aic_eigen, mdl_eigen<EOL>if NSIG is None:<EOL><INDENT>if threshold is None:<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>if criteria == '<STR_LIT>':<EOL><INDENT>aic = aic_eigen(S, NP*<NUM_LIT:2>)<EOL><DEDENT>elif criteria == '<STR_LIT>':<EOL><INDENT>aic = mdl_eigen(S, NP*<NUM_LIT:2>)<EOL><DEDENT>NSIG = np.argmin(aic) + <NUM_LIT:1><EOL>logging.debug('<STR_LIT>', NSIG, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>logging.debug('<STR_LIT>')<EOL>m = threshold * min(S)<EOL>new_s = S[np.where(S>m)]<EOL>NSIG = len(new_s)<EOL>logging.debug('<STR_LIT>', NSIG)<EOL>if NSIG == <NUM_LIT:0>:<EOL><INDENT>NSIG = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return NSIG<EOL>", "docstring": "todo", "id": "f10936:m3"}
{"signature": "def eigen(X, P, NSIG=None, method='<STR_LIT>', threshold=None, NFFT=default_NFFT,<EOL>criteria='<STR_LIT>', verbose=False):", "body": "if method not in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if NSIG != None and threshold != None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if NSIG is not None:<EOL><INDENT>if NSIG < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if NSIG >= P:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>N = len(X)<EOL>NP = N - P<EOL>assert <NUM_LIT:2> * NP > P-<NUM_LIT:1>, '<STR_LIT>'<EOL>if NP > <NUM_LIT:100>:<EOL><INDENT>NP = <NUM_LIT:100><EOL><DEDENT>FB = np.zeros((<NUM_LIT:2>*NP, P), dtype=complex)<EOL>Z = np.zeros(NFFT, dtype=complex)<EOL>PSD = np.zeros(NFFT)<EOL>for I in range(<NUM_LIT:0>, NP):<EOL><INDENT>for K in range(<NUM_LIT:0>, P):<EOL><INDENT>FB[I, K] = X[I-K+P-<NUM_LIT:1>]<EOL>FB[I+NP, K] = X[I+K+<NUM_LIT:1>].conjugate()<EOL><DEDENT><DEDENT>_U, S, V = svd (FB)<EOL>V = -V.transpose()<EOL>NSIG  = _get_signal_space(S, <NUM_LIT:2>*NP,<EOL>verbose=verbose, threshold=threshold,<EOL>NSIG=NSIG, criteria=criteria)<EOL>for I in range(NSIG, P):<EOL><INDENT>Z[<NUM_LIT:0>:P] = V[<NUM_LIT:0>:P, I]<EOL>Z[P:NFFT] = <NUM_LIT:0><EOL>Z  = fft(Z, NFFT)<EOL>if method == '<STR_LIT>':<EOL><INDENT>PSD = PSD + abs(Z)**<NUM_LIT><EOL><DEDENT>elif method == '<STR_LIT>' :<EOL><INDENT>PSD = PSD + abs(Z)**<NUM_LIT> / S[I]<EOL><DEDENT><DEDENT>PSD = <NUM_LIT:1.>/PSD<EOL>nby2 = int(NFFT/<NUM_LIT:2>)<EOL>newpsd = np.append(PSD[nby2:<NUM_LIT:0>:-<NUM_LIT:1>], PSD[nby2*<NUM_LIT:2>-<NUM_LIT:1>:nby2-<NUM_LIT:1>:-<NUM_LIT:1>])<EOL>return newpsd, S<EOL>", "docstring": "r\"\"\"Pseudo spectrum using eigenvector method (EV or Music)\n\n    This function computes either the Music or EigenValue (EV) noise\n    subspace frequency estimator.\n\n    First, an autocorrelation matrix of order `P` is computed from\n    the data. Second, this matrix is separated into vector subspaces,\n    one a signal subspace and the other a noise\n    subspace using a SVD method to obtain the eigen values and vectors.\n    From the eigen values :math:`\\lambda_i`, and eigen vectors :math:`v_k`,\n    the **pseudo spectrum** (see note below) is computed as follows:\n\n    .. math:: P_{ev}(f) = \\frac{1}{e^H(f)\\left(\\sum\\limits_{k=M+1}^{p} \\frac{1}{\\lambda_k}v_kv_k^H\\right)e(f)}\n\n    The separation of the noise and signal subspaces requires expertise\n    of the signal. However, AIC and MDL criteria may be used to automatically\n    perform this task.\n\n    You still need to provide the parameter `P` to indicate the maximum number\n    of eigen values to be computed. The criteria will just select a subset\n    to estimate the pseudo spectrum (see :func:`~spectrum.criteria.aic_eigen`\n    and :func:`~spectrum.criteria.mdl_eigen` for details.\n\n    .. note:: **pseudo spectrum**. func:`eigen` does not compute a PSD estimate.\n        Indeed, the method does not preserve the measured process power.\n\n    :param X: Array data samples\n    :param int P: maximum number of eigen values to compute. NSIG (if\n        specified) must therefore be less than P.\n    :param str method: 'music' or 'ev'.\n    :param int NSIG: If specified, the signal sub space uses NSIG eigen values.\n    :param float threshold: If specified, the signal sub space is made of the\n        eigen values larger than :math:`\\rm{threshold} \\times \\lambda_{min}`,\n        where :math:`\\lambda_{min}` is the minimum eigen values.\n    :param int NFFT: total length of the final data sets (padded with zero \n        if needed; default is 4096)\n\n    :return:\n        * PSD: Array of real frequency estimator values (two sided for\n                complex data and one sided for real data)\n        * S, the eigen values\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import eigen, marple_data\n        from pylab import plot, log10, linspace, legend, axis\n\n        psd, ev = eigen(marple_data, 15, NSIG=11)\n        f = linspace(-0.5, 0.5, len(psd))\n        plot(f, 10 * log10(psd/max(psd)), label='User defined')\n\n        psd, ev = eigen(marple_data, 15, threshold=2)\n        plot(f, 10 * log10(psd/max(psd)), label='threshold method (100)')\n\n        psd, ev = eigen(marple_data, 15)\n        plot(f, 10 * log10(psd/max(psd)), label='AIC method (8)')\n\n        legend()\n        axis([-0.5, 0.5, -120, 0])\n\n    .. seealso::\n        :func:`pev`,\n        :func:`pmusic`,\n        :func:`~spectrum.criteria.aic_eigen`\n\n    :References: [Marple]_, Chap 13\n\n    .. todo:: for developers:\n\n        * what should be the second argument of the criteria N, N-P, P...?\n        * what should be the max value of NP", "id": "f10936:m2"}
{"signature": "def music(X, IP, NSIG=None, NFFT=default_NFFT, threshold=None, criteria='<STR_LIT>',<EOL>verbose=False):", "body": "return eigen(X, IP, NSIG=NSIG, method='<STR_LIT>', NFFT=NFFT,<EOL>threshold=threshold, criteria=criteria, verbose=verbose)<EOL>", "docstring": "Eigen value pseudo spectrum estimate. See :func:`eigenfre`", "id": "f10936:m0"}
{"signature": "def __init__(self, data, IP, NSIG=None, NFFT=None, sampling=<NUM_LIT:1.>,<EOL>threshold=None, criteria=\"<STR_LIT>\", verbose=False, scale_by_freq=False):", "body": "super(pmusic, self).__init__(data, ar_order=IP,<EOL>scale_by_freq=scale_by_freq, NFFT=NFFT, sampling=sampling)<EOL>self.NSIG = NSIG<EOL>self.threshold = threshold<EOL>self.criteria = criteria<EOL>self.verbose = verbose<EOL>", "docstring": "**Constructor:**\n\n        For a detailed description of the parameters, see :func:`arma_estimate`.\n\n        :param array data: input data (list or numpy.array)\n        :param int P: maximum number of eigen values to compute. NSIG (if\n            specified) must therefore be less than P.\n        :param int NSIG: If specified, the signal sub space uses NSIG eigen values.\n        :param int NFFT: total length of the final data sets (padded with zero if needed; default is 4096)\n        :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10936:c0:m0"}
{"signature": "def __init__(self, data, IP, NSIG=None, NFFT=None, sampling=<NUM_LIT:1.>,<EOL>scale_by_freq=False,<EOL>threshold=None, criteria=\"<STR_LIT>\", verbose=False):", "body": "super(pev, self).__init__(data, ar_order=IP,<EOL>scale_by_freq=scale_by_freq, NFFT=NFFT, sampling=sampling)<EOL>self.NSIG = NSIG<EOL>self.threshold = threshold<EOL>self.criteria = criteria<EOL>self.verbose = verbose<EOL>", "docstring": "**Constructor:**\n\n        For a detailed description of the parameters, see :func:`arma_estimate`.\n\n        :param array data: input data (list or numpy.array)\n        :param int P: maximum number of eigen values to compute. NSIG (if\n            specified) must therefore be less than P.\n        :param int NSIG: If specified, the signal sub space uses NSIG eigen values.\n        :param int NFFT: total length of the final data sets (padded with\n            zero if needed; default is 4096)\n        :param float sampling: sampling frequency of the input :attr:`data`.", "id": "f10936:c1:m0"}
{"signature": "def ev(X, IP, NSIG=None, NFFT=default_NFFT, threshold=None, criteria='<STR_LIT>',<EOL>verbose=False):", "body": "return eigen(X, IP, NSIG=NSIG, method='<STR_LIT>', NFFT=NFFT,<EOL>threshold=threshold, criteria=criteria, verbose=verbose)<EOL>", "docstring": "Eigen value pseudo spectrum estimate. See :func:`eigenfre`", "id": "f10936:m1"}
{"signature": "def db2mag(xdb):", "body": "return <NUM_LIT>**(xdb/<NUM_LIT>)<EOL>", "docstring": "Convert decibels (dB) to magnitude\n\n    .. doctest::\n\n        >>> from spectrum import db2mag\n        >>> db2mag(-20)\n        0.1\n\n    .. seealso:: :func:`pow2db`", "id": "f10937:m12"}
{"signature": "def twosided_2_onesided(data):", "body": "assert len(data) % <NUM_LIT:2> == <NUM_LIT:0><EOL>N = len(data)<EOL>psd = np.array(data[<NUM_LIT:0>:N//<NUM_LIT:2>+<NUM_LIT:1>]) * <NUM_LIT><EOL>psd[<NUM_LIT:0>] /= <NUM_LIT><EOL>psd[-<NUM_LIT:1>] = data[-<NUM_LIT:1>]<EOL>return psd<EOL>", "docstring": "Convert a one-sided PSD to a twosided PSD\n\n    In order to keep the power in the onesided PSD the same\n    as in the twosided version, the onesided values are twice\n    as much as in the input data (except for the zero-lag value).\n\n    ::\n\n        >>> twosided_2_onesided([10, 2,3,3,2,8])\n        array([ 10.,   4.,   6.,   8.])", "id": "f10937:m2"}
{"signature": "def fftshift(x):", "body": "return np.fft.fftshift(x)<EOL>", "docstring": "wrapper to numpy.fft.fftshift\n\n    .. doctest::\n\n        >>> from spectrum import fftshift\n        >>> x = [100, 2, 3, 4, 5]\n        >>> fftshift(x)\n        array([  4,   5, 100,   2,   3])", "id": "f10937:m0"}
{"signature": "def onesided_2_twosided(data):", "body": "psd = np.concatenate((data[<NUM_LIT:0>:-<NUM_LIT:1>], cshift(data[-<NUM_LIT:1>:<NUM_LIT:0>:-<NUM_LIT:1>], -<NUM_LIT:1>)))/<NUM_LIT><EOL>psd[<NUM_LIT:0>] *= <NUM_LIT><EOL>psd[-<NUM_LIT:1>] *= <NUM_LIT><EOL>return psd<EOL>", "docstring": "Convert a two-sided PSD to a one-sided PSD\n\n    In order to keep the power in the twosided PSD the same\n    as in the onesided version, the twosided values are 2 times\n    lower than the input data (except for the zero-lag and N-lag\n    values).\n\n    ::\n\n        >>> twosided_2_onesided([10, 4, 6, 8])\n        array([ 10.,   2.,   3.,   3., 2., 8.])", "id": "f10937:m3"}
{"signature": "def db2pow(xdb):", "body": "return <NUM_LIT>**(xdb/<NUM_LIT>)<EOL>", "docstring": "Convert decibels (dB) to power\n\n    .. doctest::\n\n        >>> from spectrum import db2pow\n        >>> p = db2pow(-10)\n        >>> p\n        0.1\n\n    .. seealso:: :func:`pow2db`", "id": "f10937:m10"}
{"signature": "def __init__(self, data, order, norm='<STR_LIT>', NFFT=None, sampling=<NUM_LIT:1.>,<EOL>scale_by_freq=True):", "body": "super(pyule, self).__init__(data, ar_order=order,  NFFT=NFFT,<EOL>scale_by_freq=scale_by_freq,<EOL>sampling=sampling)<EOL>self.sampling = sampling<EOL>self._norm_aryule = norm<EOL>", "docstring": "**Constructor**\n\n        For a detailled description of the parameters, see :func:`aryule`.\n\n        :param array data: input data (list or numpy.array)\n        :param int order:\n        :param int NFFT: total length of the final data sets (padded with\n            zero if needed; default is 4096)\n        :param float sampling: sampling frequency of the input :attr:`data`\n        :param str norm: don't change if you do not know", "id": "f10938:c0:m0"}
{"signature": "def _crosscov(x, y, axis=-<NUM_LIT:1>, all_lags=False, debias=True):", "body": "if x.shape[axis] != y.shape[axis]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if debias:<EOL><INDENT>x = _remove_bias(x, axis)<EOL>y = _remove_bias(y, axis)<EOL><DEDENT>slicing = [slice(d) for d in x.shape]<EOL>slicing[axis] = slice(None,None,-<NUM_LIT:1>)<EOL>sxy = _fftconvolve(x, y[tuple(slicing)], axis=axis, mode='<STR_LIT>')<EOL>N = x.shape[axis]<EOL>sxy /= N<EOL>if all_lags:<EOL><INDENT>return sxy<EOL><DEDENT>slicing[axis] = slice(N-<NUM_LIT:1>,<NUM_LIT:2>*N-<NUM_LIT:1>)<EOL>return sxy[tuple(slicing)]<EOL>", "docstring": "Returns the crosscovariance sequence between two ndarrays.\n    This is performed by calling fftconvolve on x, y[::-1]\n\n    Parameters\n\n\n    x: ndarray\n    y: ndarray\n    axis: time axis\n\n    all_lags: {True/False}\n    whether to return all nonzero lags, or to clip the length of s_xy\n    to be the length of x and y. If False, then the zero lag covariance\n    is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2\n\n    debias: {True/False}\n    Always removes an estimate of the mean along the axis, unless\n    told not to.\n\n\n    cross covariance is defined as\n    sxy[k] := E{X[t]*Y[t+k]}, where X,Y are zero mean random processes", "id": "f10939:m4"}
{"signature": "def _other_dpss_method(N, NW, Kmax):", "body": "<EOL>from scipy import linalg as la<EOL>Kmax = int(Kmax)<EOL>W = float(NW)/N<EOL>ab = np.zeros((<NUM_LIT:2>,N), '<STR_LIT:d>')<EOL>nidx = np.arange(N)<EOL>ab[<NUM_LIT:0>,<NUM_LIT:1>:] = nidx[<NUM_LIT:1>:]*(N-nidx[<NUM_LIT:1>:])/<NUM_LIT><EOL>ab[<NUM_LIT:1>] = ((N-<NUM_LIT:1>-<NUM_LIT:2>*nidx)/<NUM_LIT>)**<NUM_LIT:2> * np.cos(<NUM_LIT:2>*np.pi*W)<EOL>l,v = la.eig_banded(ab, select='<STR_LIT:i>', select_range=(N-Kmax, N-<NUM_LIT:1>))<EOL>dpss = v.transpose()[::-<NUM_LIT:1>]<EOL>fix_symmetric = (dpss[<NUM_LIT:0>::<NUM_LIT:2>].sum(axis=<NUM_LIT:1>) < <NUM_LIT:0>)<EOL>for i, f in enumerate(fix_symmetric):<EOL><INDENT>if f:<EOL><INDENT>dpss[<NUM_LIT:2>*i] *= -<NUM_LIT:1><EOL><DEDENT><DEDENT>fix_skew = (dpss[<NUM_LIT:1>::<NUM_LIT:2>,<NUM_LIT:1>] < <NUM_LIT:0>)<EOL>for i, f in enumerate(fix_skew):<EOL><INDENT>if f:<EOL><INDENT>dpss[<NUM_LIT:2>*i+<NUM_LIT:1>] *= -<NUM_LIT:1><EOL><DEDENT><DEDENT>acvs = _autocov(dpss, debias=False) * N<EOL>r = <NUM_LIT:4>*W*np.sinc(<NUM_LIT:2>*W*nidx)<EOL>r[<NUM_LIT:0>] = <NUM_LIT:2>*W<EOL>eigvals = np.dot(acvs, r)<EOL>return dpss, eigvals<EOL>", "docstring": "Returns the Discrete Prolate Spheroidal Sequences of orders [0,Kmax-1]\n    for a given frequency-spacing multiple NW and sequence length N.\n\n    See dpss function that is the official version. This version is indepedant\n    of the C code and relies on Scipy function. However, it is slower by a factor 3\n\n    Tridiagonal form of DPSS calculation from:", "id": "f10939:m2"}
{"signature": "def CORRELOGRAMPSD(X, Y=None, lag=-<NUM_LIT:1>, window='<STR_LIT>',<EOL>norm='<STR_LIT>', NFFT=<NUM_LIT>, window_params={},<EOL>correlation_method='<STR_LIT>'):", "body": "N = len(X)<EOL>assert lag<N, '<STR_LIT>'<EOL>assert correlation_method in ['<STR_LIT>', '<STR_LIT>']<EOL>if Y is None:<EOL><INDENT>Y = numpy.array(X)<EOL>crosscorrelation = False<EOL><DEDENT>else:<EOL><INDENT>crosscorrelation = True<EOL><DEDENT>if NFFT is None:<EOL><INDENT>NFFT = N<EOL><DEDENT>psd = numpy.zeros(NFFT, dtype=complex)<EOL>w = Window(<NUM_LIT>*lag+<NUM_LIT:1>, window, **window_params)<EOL>w = w.data[lag+<NUM_LIT:1>:]<EOL>if correlation_method == '<STR_LIT>':<EOL><INDENT>rxy = CORRELATION (X, Y, maxlags=lag, norm=norm)<EOL><DEDENT>elif correlation_method == '<STR_LIT>':<EOL><INDENT>rxy, _l = xcorr (X, Y, maxlags=lag, norm=norm)<EOL>rxy = rxy[lag:]<EOL><DEDENT>psd[<NUM_LIT:0>] = rxy[<NUM_LIT:0>]<EOL>psd[<NUM_LIT:1>:lag+<NUM_LIT:1>] = rxy[<NUM_LIT:1>:] * w<EOL>if crosscorrelation is True:<EOL><INDENT>if correlation_method == '<STR_LIT>':<EOL><INDENT>ryx = CORRELATION(Y, X, maxlags=lag, norm=norm)<EOL><DEDENT>elif correlation_method == '<STR_LIT>':<EOL><INDENT>ryx, _l = xcorr(Y, X, maxlags=lag, norm=norm)<EOL>ryx = ryx[lag:]<EOL><DEDENT>psd[-<NUM_LIT:1>:NFFT-lag-<NUM_LIT:1>:-<NUM_LIT:1>] = ryx[<NUM_LIT:1>:].conjugate() * w<EOL><DEDENT>else: <EOL><INDENT>psd[-<NUM_LIT:1>:NFFT-lag-<NUM_LIT:1>:-<NUM_LIT:1>] = rxy[<NUM_LIT:1>:].conjugate() * w<EOL><DEDENT>psd = numpy.real(fft(psd))<EOL>return psd<EOL>", "docstring": "PSD estimate using correlogram method.\n\n\n    :param array X: complex or real data samples X(1) to X(N)\n    :param array Y: complex data samples Y(1) to Y(N). If provided, computes\n        the cross PSD, otherwise the PSD is returned\n    :param int lag: highest lag index to compute. Must be less than N\n    :param str window_name: see :mod:`window` for list of valid names\n    :param str norm: one of the valid normalisation of :func:`xcorr` (biased, \n        unbiased, coeff, None)\n    :param int NFFT: total length of the final data sets (padded with zero \n        if needed; default is 4096)\n    :param str correlation_method: either `xcorr` or `CORRELATION`.\n        CORRELATION should be removed in the future.\n\n    :return:\n        * Array of real (cross) power spectral density estimate values. This is\n          a two sided array with negative values following the positive ones\n          whatever is the input data (real or complex).\n\n    .. rubric:: Description:\n\n    The exact power spectral density is the Fourier transform of the\n    autocorrelation sequence:\n\n    .. math:: P_{xx}(f) = T \\sum_{m=-\\infty}^{\\infty} r_{xx}[m] exp^{-j2\\pi fmT}\n\n    The correlogram method of PSD estimation substitutes a finite sequence of\n    autocorrelation estimates :math:`\\hat{r}_{xx}` in place of :math:`r_{xx}`.\n    This estimation can be computed with :func:`xcorr` or :func:`CORRELATION` by\n    chosing a proprer lag `L`. The estimated PSD is then\n\n    .. math:: \\hat{P}_{xx}(f) = T \\sum_{m=-L}^{L} \\hat{r}_{xx}[m] exp^{-j2\\pi fmT}\n\n    The lag index must be less than the number of data samples `N`. Ideally, it\n    should be around `L/10` [Marple]_ so as to avoid greater statistical\n    variance associated with higher lags.\n\n    To reduce the leakage of the implicit rectangular window and therefore to\n    reduce the bias in the estimate, a tapering window is normally used and lead\n    to the so-called Blackman and Tukey correlogram:\n\n    .. math:: \\hat{P}_{BT}(f) = T \\sum_{m=-L}^{L} w[m] \\hat{r}_{xx}[m] exp^{-j2\\pi fmT}\n\n    The correlogram for the cross power spectral estimate is\n\n    .. math:: \\hat{P}_{xx}(f) = T \\sum_{m=-L}^{L} \\hat{r}_{xx}[m] exp^{-j2\\pi fmT}\n\n    which is computed if :attr:`Y` is not provide. In such case,\n    :math:`r_{yx} = r_{xy}` so we compute the correlation only once.\n\n    .. plot::\n        :width: 80%\n        :include-source:\n\n        from spectrum import CORRELOGRAMPSD, marple_data\n        from spectrum.tools import cshift\n        from pylab import log10, axis, grid, plot,linspace\n\n        psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15)\n        f = linspace(-0.5, 0.5, len(psd))\n        psd = cshift(psd, len(psd)/2)\n        plot(f, 10*log10(psd/max(psd)))\n        axis([-0.5,0.5,-50,0])\n        grid(True)\n\n    .. seealso:: :func:`create_window`, :func:`CORRELATION`, :func:`xcorr`,\n        :class:`pcorrelogram`.", "id": "f10940:m0"}
{"signature": "@classmethod<EOL><INDENT>def gen_token(cls):<DEDENT>", "body": "token = os.urandom(<NUM_LIT:16>)<EOL>token_time = int(time.time())<EOL>return {'<STR_LIT>': token, '<STR_LIT>': token_time}<EOL>", "docstring": "\u751f\u6210 access_token", "id": "f10963:c1:m2"}
{"signature": "@classmethod<EOL><INDENT>def get_by_token(cls, token):<DEDENT>", "body": "try:<EOL><INDENT>return cls.get(cls.token == token)<EOL><DEDENT>except DoesNotExist:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "\u6839\u636e access_token \u83b7\u53d6\u7528\u6237", "id": "f10963:c1:m4"}
{"signature": "async def load_fk(self, info: SQLQueryInfo, records: Iterable[DataRecord]) -> Union[List, Iterable]:", "body": "<EOL>async def check(data, records):<EOL><INDENT>for column, fkvalues_lst in data.items():<EOL><INDENT>for fkvalues in fkvalues_lst:<EOL><INDENT>pks = []<EOL>all_ni = True<EOL>vcls = self.app.tables[fkvalues['<STR_LIT>']]<EOL>for i in records:<EOL><INDENT>val = i.get(column, NotImplemented)<EOL>if val != NotImplemented:<EOL><INDENT>all_ni = False<EOL><DEDENT>pks.append(val)<EOL><DEDENT>if all_ni:<EOL><INDENT>logger.debug(\"<STR_LIT>\" % column)<EOL>continue<EOL><DEDENT>v = vcls(self.app, self._request)  <EOL>await v._prepare()<EOL>info2 = SQLQueryInfo()<EOL>info2.set_select(ALL_COLUMNS)<EOL>info2.add_condition(PRIMARY_KEY, SQL_OP.IN, pks)<EOL>info2.bind(v)<EOL>try:<EOL><INDENT>fk_records, count = await v._sql.select_page(info2, size=-<NUM_LIT:1>)<EOL><DEDENT>except RecordNotFound:<EOL><INDENT>continue<EOL><DEDENT>await v.check_records_permission(info2, fk_records)<EOL>fk_dict = {}<EOL>for i in fk_records:<EOL><INDENT>fk_dict[i[vcls.primary_key]] = i<EOL><DEDENT>column_to_set = fkvalues.get('<STR_LIT>', column) or column<EOL>for _, record in enumerate(records):<EOL><INDENT>k = record.get(column, NotImplemented)<EOL>if k in fk_dict:<EOL><INDENT>record[column_to_set] = fk_dict[k]<EOL><DEDENT><DEDENT>if fkvalues['<STR_LIT>']:<EOL><INDENT>await check(fkvalues['<STR_LIT>'], fk_records)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>await check(info.loadfk, records)<EOL>return records<EOL>", "docstring": ":param info:\n:param records: the data got from database and filtered from permission\n:return:", "id": "f10982:c3:m8"}
{"signature": "async def after_delete(self, deleted_records: List[DataRecord]):", "body": "pass<EOL>", "docstring": ":param deleted_records:\n:return:", "id": "f10982:c3:m25"}
{"signature": "async def _call_handle(self, func, *args):", "body": "await async_call(func, *args)<EOL>if self.is_finished:<EOL><INDENT>raise FinishQuitException()<EOL><DEDENT>", "docstring": "call and check result of handle_query/read/insert/update", "id": "f10982:c3:m9"}
{"signature": "async def after_insert(self, raw_post: Dict, values: SQLValuesToWrite, record: DataRecord):", "body": "pass<EOL>", "docstring": "\u4e00\u5bf9\u4e00\nEmitted before finish\n:param raw_post:\n:param values:\n:param record:\n:return:", "id": "f10982:c3:m21"}
{"signature": "@classmethod<EOL><INDENT>def _ready(cls):<DEDENT>", "body": "sync_call(cls.ready)<EOL>", "docstring": "private version of cls.ready()", "id": "f10982:c0:m31"}
{"signature": "async def before_delete(self, records: List[DataRecord]):", "body": "pass<EOL>", "docstring": ":param records:\n:return:", "id": "f10982:c3:m24"}
{"signature": "@classmethod<EOL><INDENT>def ready(cls):<DEDENT>", "body": "pass<EOL>", "docstring": "All modules loaded, and ready to serve.\nEmitted after register routes and before loop start\n:return:", "id": "f10982:c0:m32"}
{"signature": "async def before_update(self, raw_post: Dict, values: SQLValuesToWrite, records: List[DataRecord]):", "body": "pass<EOL>", "docstring": "\u4e00\u5bf9\u591a\uff0c\u5f53\u6709\u4e00\u4e2a\u6743\u9650\u68c0\u67e5\u5931\u8d25\u65f6\u5373\u8fd4\u56de\u5f02\u5e38\nraw_post \u6743\u9650\u8fc7\u6ee4\u548c\u5217\u8fc7\u6ee4\u524d\uff0cvalues \u8fc7\u6ee4\u540e\n:param raw_post:\n:param values:\n:param records:\n:return:", "id": "f10982:c3:m22"}
{"signature": "@abstractmethod<EOL><INDENT>def setup_user_key(self, key, expires=<NUM_LIT:30>):<DEDENT>", "body": "pass<EOL>", "docstring": "setup user key for server", "id": "f10987:c1:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def teardown_user_key(self):<DEDENT>", "body": "pass<EOL>", "docstring": "teardown user key for server, make the token invalid here", "id": "f10987:c1:m3"}
{"signature": "@abstractmethod<EOL><INDENT>def get_current_user(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Override to determine the current user from, e.g., a cookie.", "id": "f10987:c1:m0"}
{"signature": "def add_condition(self, field_name, op, value):", "body": "if not isinstance(op, SQL_OP):<EOL><INDENT>if op not in SQL_OP.txt2op:<EOL><INDENT>raise SQLOperatorInvalid(op)<EOL><DEDENT>else:<EOL><INDENT>op = SQL_OP.txt2op.get(op)<EOL><DEDENT><DEDENT>self.conditions.append([field_name, op, value])<EOL>", "docstring": "Add a query condition and validate it.\nraise ParamsException if failed.\nself.view required\n:param field_name:\n:param op:\n:param value:\n:return: None", "id": "f10988:c7:m6"}
{"signature": "@staticmethod<EOL><INDENT>def parse_order(text):<DEDENT>", "body": "orders = []<EOL>for i in map(str.strip, text.split('<STR_LIT:U+002C>')):<EOL><INDENT>items = i.split('<STR_LIT:.>', <NUM_LIT:2>)<EOL>if len(items) == <NUM_LIT:1>: column, order = items[<NUM_LIT:0>], '<STR_LIT:default>'<EOL>elif len(items) == <NUM_LIT:2>: column, order = items<EOL>else: raise InvalidParams(\"<STR_LIT>\")<EOL>order = order.lower()<EOL>if order not in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT:default>'):<EOL><INDENT>raise InvalidParams('<STR_LIT>' % order)<EOL><DEDENT>if order != '<STR_LIT:default>':<EOL><INDENT>orders.append(SQLQueryOrder(column, order))<EOL><DEDENT><DEDENT>return orders<EOL>", "docstring": ":param text: order=id.desc, xxx.asc\n:return: [\n    [<column>, asc|desc|default],\n    [<column2>, asc|desc|default],\n]", "id": "f10988:c7:m2"}
{"signature": "@classmethod<EOL><INDENT>def parse_load_fk(cls, data: Dict[str, List[Dict[str, object]]]) -> Dict[str, List[Dict[str, object]]]:<DEDENT>", "body": "default_value_dict = {'<STR_LIT>': None, '<STR_LIT>': None, '<STR_LIT>': None, '<STR_LIT>': None}<EOL>def value_normalize_dict(value):<EOL><INDENT>def check(k, v):<EOL><INDENT>if k == '<STR_LIT>': return isinstance(v, str)<EOL>if k == '<STR_LIT>': return isinstance(v, str)<EOL>if k == '<STR_LIT>': return isinstance(v, str)<EOL>if k == '<STR_LIT>': return isinstance(v, dict)<EOL><DEDENT>valid = {k: v for k, v in value.items() if check(k, v)}<EOL>if not valid: return default_value_dict.copy()<EOL>if '<STR_LIT>' in valid and valid['<STR_LIT>']:<EOL><INDENT>valid['<STR_LIT>'] = cls.parse_load_fk(valid['<STR_LIT>'])<EOL><DEDENT>for k, v in default_value_dict.items():<EOL><INDENT>valid.setdefault(k, v)<EOL><DEDENT>return valid<EOL><DEDENT>def value_normalize(value, no_list=True):<EOL><INDENT>if value is None:<EOL><INDENT>return default_value_dict.copy()<EOL><DEDENT>elif not no_list and isinstance(value, List):<EOL><INDENT>return list(map(value_normalize, value))<EOL><DEDENT>elif isinstance(value, str):<EOL><INDENT>val = default_value_dict.copy()<EOL>val['<STR_LIT>'] = value<EOL>return val<EOL><DEDENT>elif isinstance(value, Dict):<EOL><INDENT>return value_normalize_dict(value)<EOL><DEDENT>else:<EOL><INDENT>raise InvalidParams('<STR_LIT>' % value)<EOL><DEDENT><DEDENT>new_data = {}<EOL>if not isinstance(data, dict):<EOL><INDENT>raise InvalidParams('<STR_LIT>' % data)<EOL><DEDENT>for k, v in data.items():<EOL><INDENT>nv = value_normalize(v, False)<EOL>new_data[k] = nv if isinstance(nv, List) else [nv]<EOL><DEDENT>return new_data<EOL>", "docstring": ":param data:{\n    <column>: role,\n    <column2>: role,\n    <column>: {\n        'role': role,\n        'loadfk': { ... },\n    },\n:return: {\n    <column>: {\n        'role': role,\n    },\n    ...\n    <column3>: {\n        'role': role,\n        'loadfk': { ... },\n    },\n}", "id": "f10988:c7:m5"}
{"signature": "@classmethod<EOL><INDENT>def parse_select(cls, text: str) -> Set:<DEDENT>", "body": "if text == '<STR_LIT:*>':<EOL><INDENT>return ALL_COLUMNS  <EOL><DEDENT>selected_columns = set(filter(lambda x: x, map(str.strip, text.split('<STR_LIT:U+002C>'))))<EOL>if not selected_columns:<EOL><INDENT>raise InvalidParams(\"<STR_LIT>\")<EOL><DEDENT>return selected_columns<EOL>", "docstring": "get columns from select text\n:param text: col1, col2\n:return: ALL_COLUMNS or ['col1', 'col2']", "id": "f10988:c7:m4"}
{"signature": "def _parse_permission(self, obj):", "body": "if isinstance(obj, str):<EOL><INDENT>if obj == '<STR_LIT:*>':<EOL><INDENT>return A.ALL<EOL><DEDENT>elif obj in A.ALL:<EOL><INDENT>return obj,<EOL><DEDENT>else:<EOL><INDENT>logger.warning('<STR_LIT>', obj)<EOL><DEDENT><DEDENT>elif isinstance(obj, (list, tuple)):<EOL><INDENT>for i in obj:<EOL><INDENT>if i not in A.ALL:<EOL><INDENT>logger.warning('<STR_LIT>', i)<EOL><DEDENT><DEDENT>return obj<EOL><DEDENT>elif isinstance(obj, dict):<EOL><INDENT>return self._parse_permission(obj.get('<STR_LIT:*>'))<EOL><DEDENT>", "docstring": "\u4ece obj \u4e2d\u53d6\u51fa\u6743\u9650\n:param obj:\n:return: [A.QUERY, A.WRITE, ...]", "id": "f10990:c3:m5"}
{"signature": "def can_with_record(self, user, action, record: DataRecord, *, available=None):", "body": "assert action not in (A.QUERY, A.CREATE), \"<STR_LIT>\" % action<EOL>rules = []<EOL>for rule in self.record_checks:<EOL><INDENT>if record.table == rule[<NUM_LIT:0>] and action in rule[<NUM_LIT:1>]:<EOL><INDENT>rules.append(rule)<EOL><DEDENT><DEDENT>if available is None: available = self.can_with_columns(user, action, record.table, record.keys())<EOL>else: available = list(available)<EOL>bak = available.copy()<EOL>for rule in rules:<EOL><INDENT>ret = rule[-<NUM_LIT:1>](self, user, action, record, available)<EOL>if isinstance(ret, (tuple, set, list)):<EOL><INDENT>available = list(ret)<EOL><DEDENT>elif ret == '<STR_LIT:*>':<EOL><INDENT>available = list(bak)<EOL><DEDENT>elif not ret:<EOL><INDENT>available = []<EOL><DEDENT><DEDENT>return available<EOL>", "docstring": "\u8fdb\u884c\u57fa\u4e8e Record \u7684\u6743\u9650\u5224\u5b9a\uff0c\u8fd4\u56de\u53ef\u7528\u5217\u3002\n:param user:\n:param action:\n:param record:\n:param available: \u9650\u5b9a\u68c0\u67e5\u8303\u56f4\n:return: \u53ef\u7528\u5217", "id": "f10990:c3:m7"}
{"signature": "def can_with_columns(self, user, action, table, columns):", "body": "<EOL>global_data = self.rules.get('<STR_LIT:*>')<EOL>global_actions = self._parse_permission(global_data)<EOL>if global_actions and action in global_actions:<EOL><INDENT>available = list(columns)<EOL><DEDENT>else:<EOL><INDENT>available = []<EOL><DEDENT>table_data = self.rules.get(table)<EOL>table_actions = self._parse_permission(table_data)<EOL>if table_actions and action in table_actions:<EOL><INDENT>available = list(columns)<EOL><DEDENT>if type(table_data) == dict:<EOL><INDENT>for column in columns:<EOL><INDENT>column_actions = self._parse_permission(table_data.get(column))<EOL>if column_actions is not None:<EOL><INDENT>if action in column_actions:<EOL><INDENT>if column not in available:<EOL><INDENT>available.append(column)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if column in available:<EOL><INDENT>available.remove(column)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>for check in self.common_checks:<EOL><INDENT>if check[<NUM_LIT:0>] == table and action in check[<NUM_LIT:1>]:<EOL><INDENT>ret = check[-<NUM_LIT:1>](self, user, action, available)<EOL>if isinstance(ret, (tuple, set, list)):<EOL><INDENT>available = list(ret)<EOL><DEDENT>elif ret == '<STR_LIT:*>':<EOL><INDENT>available = list(columns)<EOL><DEDENT>elif ret is False:<EOL><INDENT>available = []<EOL><DEDENT>if not available: break<EOL><DEDENT><DEDENT>return available<EOL>", "docstring": "\u6839\u636e\u6743\u9650\u8fdb\u884c\u5217\u8fc7\u6ee4\n\u6ce8\u610f\u4e00\u70b9\uff0c\u53ea\u8981\u6709\u4e00\u4e2a\u6761\u4ef6\u80fd\u591f\u901a\u8fc7\u6743\u9650\u68c0\u6d4b\uff0c\u90a3\u4e48\u8fc7\u6ee4\u540e\u8fd8\u4f1a\u6709\u5269\u4f59\u6761\u4ef6\uff0c\u6700\u7ec8\u5c31\u4e0d\u4f1a\u62a5\u9519\u3002\n\u5982\u679c\u5168\u90e8\u6761\u4ef6\u90fd\u4e0d\u80fd\u8fc7\u68c0\u6d4b\uff0c\u5c31\u4f1a\u7206\u51fa\u6743\u9650\u9519\u8bef\u4e86\u3002\n\n:param user:\n:param action: \u884c\u4e3a\n:param table: \u8868\u540d\n:param columns: \u5217\u540d\u5217\u8868\n:return: \u53ef\u7528\u5217\u7684\u5217\u8868", "id": "f10990:c3:m6"}
{"signature": "def add_common_check(self, actions, table, func):", "body": "self.common_checks.append([table, actions, func])<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>", "docstring": "emitted before query\n:param actions:\n:param table:\n:param func:\n:return:", "id": "f10990:c3:m3"}
{"signature": "def add_static(self, prefix, path, **kwargs):", "body": "self.statics.append((prefix, path, kwargs),)<EOL>", "docstring": ":param prefix: URL prefix\n:param path: file directory\n:param kwargs:\n:return:", "id": "f10991:c0:m5"}
{"signature": "def view_bind(app, cls_url, view_cls: Type['<STR_LIT>']):", "body": "if view_cls._no_route: return<EOL>cls_url = cls_url or view_cls.__class__.__name__.lower()<EOL>def add_route(name, route_info, beacon_info):<EOL><INDENT>for method in route_info['<STR_LIT>']:<EOL><INDENT>async def beacon(request): pass<EOL>route_key = route_info['<STR_LIT:url>'] if route_info['<STR_LIT:url>'] else name<EOL>app._raw_app.router.add_route(method, urljoin('<STR_LIT>', cls_url, route_key), beacon)<EOL>app.route._beacons[beacon] = beacon_info<EOL><DEDENT><DEDENT>for name, route_info_lst in view_cls._interface.items():<EOL><INDENT>for route_info in route_info_lst:<EOL><INDENT>real_handler = getattr(view_cls, name, None)<EOL>if real_handler is None: continue <EOL>assert real_handler is not None, \"<STR_LIT>\"<EOL>handler_name = '<STR_LIT>' % (view_cls.__name__, real_handler.__name__)<EOL>assert iscoroutinefunction(real_handler), \"<STR_LIT>\" % handler_name<EOL>beacon_info = {<EOL>'<STR_LIT>': view_cls,<EOL>'<STR_LIT:name>': name,<EOL>'<STR_LIT>': real_handler,<EOL>'<STR_LIT>': route_info<EOL>}<EOL>add_route(name, route_info, beacon_info)<EOL><DEDENT><DEDENT>", "docstring": "\u5c06 API \u7ed1\u5b9a\u5230 web \u670d\u52a1\u4e0a\n:param view_cls:\n:param app:\n:param cls_url:\n:return:", "id": "f10991:m1"}
{"signature": "@classmethod<EOL><INDENT>async def get_session(cls, view):<DEDENT>", "body": "session = cls(view)<EOL>session.key = await session.get_key()<EOL>session._data = await session.load() or {}<EOL>return session<EOL>", "docstring": "Every request have a session instance\n:param view:\n:return:", "id": "f10992:c0:m9"}
{"signature": "def _packb3(obj, **options):", "body": "fp = io.BytesIO()<EOL>_pack3(obj, fp, **options)<EOL>return fp.getvalue()<EOL>", "docstring": "Serialize a Python object into MessagePack bytes.\n\nArgs:\n    obj: a Python object\n\nKwargs:\n    ext_handlers (dict): dictionary of Ext handlers, mapping a custom type\n                         to a callable that packs an instance of the type\n                         into an Ext object\n    force_float_precision (str): \"single\" to force packing floats as\n                                 IEEE-754 single-precision floats,\n                                 \"double\" to force packing floats as\n                                 IEEE-754 double-precision floats.\n\nReturns:\n    A 'bytes' containing serialized MessagePack bytes.\n\nRaises:\n    UnsupportedType(PackException):\n        Object type not supported for packing.\n\nExample:\n>>> umsgpack.packb({u\"compact\": True, u\"schema\": 0})\nb'\\x82\\xa7compact\\xc3\\xa6schema\\x00'\n>>>", "id": "f10997:m14"}
{"signature": "def __str__(self):", "body": "s = \"<STR_LIT>\" % self.type<EOL>s += \"<STR_LIT:U+0020>\".join([\"<STR_LIT>\" % ord(self.data[i:i + <NUM_LIT:1>])<EOL>for i in xrange(min(len(self.data), <NUM_LIT:8>))])<EOL>if len(self.data) > <NUM_LIT:8>:<EOL><INDENT>s += \"<STR_LIT>\"<EOL><DEDENT>s += \"<STR_LIT:)>\"<EOL>return s<EOL>", "docstring": "String representation of this Ext object.", "id": "f10997:c0:m3"}
{"signature": "def __ne__(self, other):", "body": "return not self.__eq__(other)<EOL>", "docstring": "Compare this Ext object with another for inequality.", "id": "f10997:c0:m2"}
{"signature": "def _unpackb2(s, **options):", "body": "if not isinstance(s, (str, bytearray)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>return _unpack(io.BytesIO(s), options)<EOL>", "docstring": "Deserialize MessagePack bytes into a Python object.\n\nArgs:\n    s: a 'str' or 'bytearray' containing serialized MessagePack bytes\n\nKwargs:\n    ext_handlers (dict): dictionary of Ext handlers, mapping integer Ext\n                         type to a callable that unpacks an instance of\n                         Ext into an object\n    use_ordered_dict (bool): unpack maps into OrderedDict, instead of\n                             unordered dict (default False)\n    allow_invalid_utf8 (bool): unpack invalid strings into instances of\n                               InvalidString, for access to the bytes\n                               (default False)\n\nReturns:\n    A Python object.\n\nRaises:\n    TypeError:\n        Packed data type is neither 'str' nor 'bytearray'.\n    InsufficientDataException(UnpackException):\n        Insufficient data to unpack the serialized object.\n    InvalidStringException(UnpackException):\n        Invalid UTF-8 string encountered during unpacking.\n    UnsupportedTimestampException(UnpackException):\n        Unsupported timestamp format encountered during unpacking.\n    ReservedCodeException(UnpackException):\n        Reserved code encountered during unpacking.\n    UnhashableKeyException(UnpackException):\n        Unhashable key encountered during map unpacking.\n        The serialized map cannot be deserialized into a Python dictionary.\n    DuplicateKeyException(UnpackException):\n        Duplicate key encountered during map unpacking.\n\nExample:\n>>> umsgpack.unpackb(b'\\x82\\xa7compact\\xc3\\xa6schema\\x00')\n{u'compact': True, u'schema': 0}\n>>>", "id": "f10997:m31"}
{"signature": "def __hash__(self):", "body": "return hash((self.type, self.data))<EOL>", "docstring": "Provide a hash of this Ext object.", "id": "f10997:c0:m4"}
{"signature": "def _pack3(obj, fp, **options):", "body": "global compatibility<EOL>ext_handlers = options.get(\"<STR_LIT>\")<EOL>if obj is None:<EOL><INDENT>_pack_nil(obj, fp, options)<EOL><DEDENT>elif ext_handlers and obj.__class__ in ext_handlers:<EOL><INDENT>_pack_ext(ext_handlers[obj.__class__](obj), fp, options)<EOL><DEDENT>elif isinstance(obj, bool):<EOL><INDENT>_pack_boolean(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, int):<EOL><INDENT>_pack_integer(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, float):<EOL><INDENT>_pack_float(obj, fp, options)<EOL><DEDENT>elif compatibility and isinstance(obj, str):<EOL><INDENT>_pack_oldspec_raw(obj.encode('<STR_LIT:utf-8>'), fp, options)<EOL><DEDENT>elif compatibility and isinstance(obj, bytes):<EOL><INDENT>_pack_oldspec_raw(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, str):<EOL><INDENT>_pack_string(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, bytes):<EOL><INDENT>_pack_binary(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, list) or isinstance(obj, tuple):<EOL><INDENT>_pack_array(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, dict):<EOL><INDENT>_pack_map(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, datetime.datetime):<EOL><INDENT>_pack_ext_timestamp(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, Ext):<EOL><INDENT>_pack_ext(obj, fp, options)<EOL><DEDENT>elif ext_handlers:<EOL><INDENT>t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)<EOL>if t:<EOL><INDENT>_pack_ext(ext_handlers[t](obj), fp, options)<EOL><DEDENT>else:<EOL><INDENT>raise UnsupportedTypeException(<EOL>\"<STR_LIT>\" % str(type(obj)))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise UnsupportedTypeException(<EOL>\"<STR_LIT>\" % str(type(obj)))<EOL><DEDENT>", "docstring": "Serialize a Python object into MessagePack bytes.\n\nArgs:\n    obj: a Python object\n    fp: a .write()-supporting file-like object\n\nKwargs:\n    ext_handlers (dict): dictionary of Ext handlers, mapping a custom type\n                         to a callable that packs an instance of the type\n                         into an Ext object\n    force_float_precision (str): \"single\" to force packing floats as\n                                 IEEE-754 single-precision floats,\n                                 \"double\" to force packing floats as\n                                 IEEE-754 double-precision floats.\n\nReturns:\n    None.\n\nRaises:\n    UnsupportedType(PackException):\n        Object type not supported for packing.\n\nExample:\n>>> f = open('test.bin', 'wb')\n>>> umsgpack.pack({u\"compact\": True, u\"schema\": 0}, f)\n>>>", "id": "f10997:m12"}
{"signature": "def _packb2(obj, **options):", "body": "fp = io.BytesIO()<EOL>_pack2(obj, fp, **options)<EOL>return fp.getvalue()<EOL>", "docstring": "Serialize a Python object into MessagePack bytes.\n\nArgs:\n    obj: a Python object\n\nKwargs:\n    ext_handlers (dict): dictionary of Ext handlers, mapping a custom type\n                         to a callable that packs an instance of the type\n                         into an Ext object\n    force_float_precision (str): \"single\" to force packing floats as\n                                 IEEE-754 single-precision floats,\n                                 \"double\" to force packing floats as\n                                 IEEE-754 double-precision floats.\n\nReturns:\n    A 'str' containing serialized MessagePack bytes.\n\nRaises:\n    UnsupportedType(PackException):\n        Object type not supported for packing.\n\nExample:\n>>> umsgpack.packb({u\"compact\": True, u\"schema\": 0})\n'\\x82\\xa7compact\\xc3\\xa6schema\\x00'\n>>>", "id": "f10997:m13"}
{"signature": "def _pack2(obj, fp, **options):", "body": "global compatibility<EOL>ext_handlers = options.get(\"<STR_LIT>\")<EOL>if obj is None:<EOL><INDENT>_pack_nil(obj, fp, options)<EOL><DEDENT>elif ext_handlers and obj.__class__ in ext_handlers:<EOL><INDENT>_pack_ext(ext_handlers[obj.__class__](obj), fp, options)<EOL><DEDENT>elif isinstance(obj, bool):<EOL><INDENT>_pack_boolean(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, int) or isinstance(obj, long):<EOL><INDENT>_pack_integer(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, float):<EOL><INDENT>_pack_float(obj, fp, options)<EOL><DEDENT>elif compatibility and isinstance(obj, unicode):<EOL><INDENT>_pack_oldspec_raw(bytes(obj), fp, options)<EOL><DEDENT>elif compatibility and isinstance(obj, bytes):<EOL><INDENT>_pack_oldspec_raw(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, unicode):<EOL><INDENT>_pack_string(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, str):<EOL><INDENT>_pack_binary(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, list) or isinstance(obj, tuple):<EOL><INDENT>_pack_array(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, dict):<EOL><INDENT>_pack_map(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, datetime.datetime):<EOL><INDENT>_pack_ext_timestamp(obj, fp, options)<EOL><DEDENT>elif isinstance(obj, Ext):<EOL><INDENT>_pack_ext(obj, fp, options)<EOL><DEDENT>elif ext_handlers:<EOL><INDENT>t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)<EOL>if t:<EOL><INDENT>_pack_ext(ext_handlers[t](obj), fp, options)<EOL><DEDENT>else:<EOL><INDENT>raise UnsupportedTypeException(<EOL>\"<STR_LIT>\" % str(type(obj)))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise UnsupportedTypeException(\"<STR_LIT>\" % str(type(obj)))<EOL><DEDENT>", "docstring": "Serialize a Python object into MessagePack bytes.\n\nArgs:\n    obj: a Python object\n    fp: a .write()-supporting file-like object\n\nKwargs:\n    ext_handlers (dict): dictionary of Ext handlers, mapping a custom type\n                         to a callable that packs an instance of the type\n                         into an Ext object\n    force_float_precision (str): \"single\" to force packing floats as\n                                 IEEE-754 single-precision floats,\n                                 \"double\" to force packing floats as\n                                 IEEE-754 double-precision floats.\n\nReturns:\n    None.\n\nRaises:\n    UnsupportedType(PackException):\n        Object type not supported for packing.\n\nExample:\n>>> f = open('test.bin', 'wb')\n>>> umsgpack.pack({u\"compact\": True, u\"schema\": 0}, f)\n>>>", "id": "f10997:m11"}
{"signature": "def pagination_calc(items_count, page_size, cur_page=<NUM_LIT:1>, nearby=<NUM_LIT:2>):", "body": "if type(cur_page) == str:<EOL><INDENT>cur_page = int(cur_page) if cur_page.isdigit() else <NUM_LIT:1><EOL><DEDENT>elif type(cur_page) == int:<EOL><INDENT>if cur_page <= <NUM_LIT:0>:<EOL><INDENT>cur_page = <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>cur_page = <NUM_LIT:1><EOL><DEDENT>page_count = <NUM_LIT:1> if page_size == -<NUM_LIT:1> else int(math.ceil(items_count / page_size))<EOL>items_length = nearby * <NUM_LIT:2> + <NUM_LIT:1><EOL>first_page = None<EOL>last_page = None<EOL>prev_page = cur_page - <NUM_LIT:1> if cur_page != <NUM_LIT:1> else None<EOL>next_page = cur_page + <NUM_LIT:1> if cur_page != page_count else None<EOL>if page_count <= items_length:<EOL><INDENT>items = range(<NUM_LIT:1>, page_count + <NUM_LIT:1>)<EOL><DEDENT>elif cur_page <= nearby:<EOL><INDENT>items = range(<NUM_LIT:1>, items_length + <NUM_LIT:1>)<EOL>last_page = True<EOL><DEDENT>elif cur_page >= page_count - nearby:<EOL><INDENT>items = range(page_count - items_length + <NUM_LIT:1>, page_count + <NUM_LIT:1>)<EOL>first_page = True<EOL><DEDENT>else:<EOL><INDENT>items = range(cur_page - nearby, cur_page + nearby + <NUM_LIT:1>)<EOL>first_page, last_page = True, True<EOL><DEDENT>if first_page:<EOL><INDENT>first_page = <NUM_LIT:1><EOL><DEDENT>if last_page:<EOL><INDENT>last_page = page_count<EOL><DEDENT>return {<EOL>'<STR_LIT>': cur_page,<EOL>'<STR_LIT>': prev_page,<EOL>'<STR_LIT>': next_page,<EOL>'<STR_LIT>': first_page,<EOL>'<STR_LIT>': last_page,<EOL>'<STR_LIT>': list(items),<EOL>'<STR_LIT:info>': {<EOL>'<STR_LIT>': page_size,<EOL>'<STR_LIT>': page_count,<EOL>'<STR_LIT>': items_count,<EOL>}<EOL>}<EOL>", "docstring": ":param nearby:\n:param items_count: count of all items\n:param page_size: size of one page\n:param cur_page: current page number, accept string digit\n:return: num of pages, an iterator", "id": "f11003:m0"}
{"signature": "def get_bytes_from_blob(val) -> bytes:", "body": "if isinstance(val, bytes):<EOL><INDENT>return val<EOL><DEDENT>elif isinstance(val, memoryview):<EOL><INDENT>return val.tobytes()<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "\u4e0d\u540c\u6570\u636e\u5e93\u4eceblob\u62ff\u51fa\u7684\u6570\u636e\u6709\u6240\u5dee\u522b\uff0c\u6709\u7684\u662fmemoryview\u6709\u7684\u662fbytes", "id": "f11004:m3"}
{"signature": "def parse_query_by_json(data):", "body": "data = json.loads(data)<EOL>for i in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if i not in data:<EOL><INDENT>raise QueryException(\"<STR_LIT>\" % i)<EOL><DEDENT><DEDENT>tables = data['<STR_LIT>']<EOL>columns = data['<STR_LIT>']<EOL>conditions = data['<STR_LIT>']<EOL>def parse_stmt(s, expr_cls, all_op, multi_items_op):<EOL><INDENT>if len(s) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>if s[<NUM_LIT:0>] in all_op:<EOL><INDENT>if s[<NUM_LIT:0>] in multi_items_op:<EOL><INDENT>values = []<EOL>for i in s[<NUM_LIT:1>:]:<EOL><INDENT>values.append(parse_stmt(i, expr_cls, all_op, multi_items_op))<EOL><DEDENT>return expr_cls(None, s[<NUM_LIT:0>], None, values=values)<EOL><DEDENT>else:<EOL><INDENT>if len(s) == <NUM_LIT:5>:<EOL><INDENT>lhs = Column(s[<NUM_LIT:2>], table=s[<NUM_LIT:1>])<EOL>rhs = Column(s[<NUM_LIT:4>], table=s[<NUM_LIT:3>])<EOL>if (s[<NUM_LIT:1>] not in tables) or (s[<NUM_LIT:3>] not in tables):<EOL><INDENT>raise QueryException('<STR_LIT>')<EOL><DEDENT>return expr_cls(lhs, s[<NUM_LIT:0>], rhs)<EOL><DEDENT>else:<EOL><INDENT>lhs = Column(s[<NUM_LIT:2>], table=s[<NUM_LIT:1>])<EOL>if s[<NUM_LIT:1>] not in tables:<EOL><INDENT>raise QueryException('<STR_LIT>')<EOL><DEDENT>return expr_cls(lhs, s[<NUM_LIT:0>], s[<NUM_LIT:3>])<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise QueryException('<STR_LIT>')<EOL><DEDENT><DEDENT>query_op = ('<STR_LIT:+>', '<STR_LIT:->', '<STR_LIT:*>', '<STR_LIT:/>')<EOL>query_columns = []<EOL>for i in columns:<EOL><INDENT>if len(i) == <NUM_LIT:2>:<EOL><INDENT>query_columns.append(Column(i[<NUM_LIT:1>], table=i[<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>query_columns.append(parse_stmt(i, QueryExpression, query_op, query_op))<EOL><DEDENT><DEDENT>wheres = parse_stmt(conditions, ConditionExpression, _operator_map, ('<STR_LIT>', '<STR_LIT>',))<EOL>return {<EOL>'<STR_LIT>': tables,<EOL>'<STR_LIT>': query_columns,<EOL>'<STR_LIT>': wheres,<EOL>}<EOL>", "docstring": "['and',\n    ['==', 't1', 'col1', val1],\n    ['!=', 't1', 'col2', 't2', 'col2'],\n    ['and',\n        ['==', 't1', 'col3', val3],\n        ['!=', 't2', 'col4', val4],\n    ]\n]\n:return:\n:param data: \n:return:", "id": "f11012:m3"}
{"signature": "def ensure_remote_branch_is_tracked(branch):", "body": "if branch == MASTER_BRANCH:<EOL><INDENT>return<EOL><DEDENT>output = subprocess.check_output(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>for line in output.split('<STR_LIT:\\n>'):<EOL><INDENT>if line.strip() == branch:<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>sys.stdout.write(subprocess.check_output(<EOL>['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>' % branch]))<EOL><DEDENT>except subprocess.CalledProcessError:<EOL><INDENT>raise SystemExit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>", "docstring": "Track the specified remote branch if it is not already tracked.", "id": "f11015:m0"}
{"signature": "def block_reduction_b(inputs, scope=None, is_train=False):", "body": "<EOL>with tf.variable_scope(scope, '<STR_LIT>', [inputs]):<EOL><INDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_0, _ = conv_module(<EOL>inputs, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_0, _ = conv_module(<EOL>branch_0, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:3>, <NUM_LIT:3>), strides=(<NUM_LIT:2>, <NUM_LIT:2>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_1, _ = conv_module(<EOL>inputs, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_1, _ = conv_module(<EOL>branch_1, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:7>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_1, _ = conv_module(<EOL>branch_1, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:7>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_1, _ = conv_module(<EOL>branch_1, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:3>, <NUM_LIT:3>), strides=(<NUM_LIT:2>, <NUM_LIT:2>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_2 = tl.layers.MaxPool2d(inputs, (<NUM_LIT:3>, <NUM_LIT:3>), strides=(<NUM_LIT:2>, <NUM_LIT:2>), padding='<STR_LIT>', name='<STR_LIT>')<EOL><DEDENT>return tl.layers.ConcatLayer([branch_0, branch_1, branch_2], concat_dim=<NUM_LIT:3>, name='<STR_LIT>')<EOL><DEDENT>", "docstring": "Builds Reduction-B block for Inception v4 network.", "id": "f11040:m3"}
{"signature": "def block_inception_a(inputs, scope=None, is_train=False):", "body": "<EOL>with tf.variable_scope(name_or_scope=scope, default_name='<STR_LIT>', values=[inputs]):<EOL><INDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_0, _ = conv_module(<EOL>inputs, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_1, _ = conv_module(<EOL>inputs, n_out_channel=<NUM_LIT:64>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_1, _ = conv_module(<EOL>branch_1, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:3>, <NUM_LIT:3>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_2, _ = conv_module(<EOL>inputs, n_out_channel=<NUM_LIT:64>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_2, _ = conv_module(<EOL>branch_2, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:3>, <NUM_LIT:3>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_2, _ = conv_module(<EOL>branch_2, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:3>, <NUM_LIT:3>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_3 = tl.layers.MeanPool2d(<EOL>inputs, filter_size=(<NUM_LIT:3>, <NUM_LIT:3>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_3, _ = conv_module(<EOL>branch_3, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>return tl.layers.ConcatLayer([branch_0, branch_1, branch_2, branch_3], concat_dim=<NUM_LIT:3>, name='<STR_LIT>')<EOL><DEDENT>", "docstring": "Builds Inception-A block for Inception v4 network.", "id": "f11040:m0"}
{"signature": "def block_inception_b(inputs, scope=None, is_train=False):", "body": "<EOL>with tf.variable_scope(scope, '<STR_LIT>', [inputs]):<EOL><INDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_0, _ = conv_module(<EOL>inputs, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_1, _ = conv_module(<EOL>inputs, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_1, _ = conv_module(<EOL>branch_1, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:7>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_1, _ = conv_module(<EOL>branch_1, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:7>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_2, _ = conv_module(<EOL>inputs, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_2, _ = conv_module(<EOL>branch_2, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:7>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_2, _ = conv_module(<EOL>branch_2, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:7>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_2, _ = conv_module(<EOL>branch_2, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:7>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_2, _ = conv_module(<EOL>branch_2, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:7>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>branch_3 = tl.layers.MeanPool2d(<EOL>inputs, filter_size=(<NUM_LIT:3>, <NUM_LIT:3>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL>branch_3, _ = conv_module(<EOL>branch_3, n_out_channel=<NUM_LIT>, filter_size=(<NUM_LIT:1>, <NUM_LIT:1>), strides=(<NUM_LIT:1>, <NUM_LIT:1>), padding='<STR_LIT>', batch_norm_init=None,<EOL>is_train=is_train, use_batchnorm=True, activation_fn='<STR_LIT>', name='<STR_LIT>'<EOL>)<EOL><DEDENT>return tl.layers.ConcatLayer([branch_0, branch_1, branch_2, branch_3], concat_dim=<NUM_LIT:3>, name='<STR_LIT>')<EOL><DEDENT>", "docstring": "Builds Inception-B block for Inception v4 network.", "id": "f11040:m2"}
{"signature": "def prepro(I):", "body": "I = I[<NUM_LIT>:<NUM_LIT>]<EOL>I = I[::<NUM_LIT:2>, ::<NUM_LIT:2>, <NUM_LIT:0>]<EOL>I[I == <NUM_LIT>] = <NUM_LIT:0><EOL>I[I == <NUM_LIT>] = <NUM_LIT:0><EOL>I[I != <NUM_LIT:0>] = <NUM_LIT:1><EOL>return I.astype(np.float).ravel()<EOL>", "docstring": "Prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector.", "id": "f11074:m0"}
{"signature": "def main(_):", "body": "if FLAGS.model == \"<STR_LIT>\":<EOL><INDENT>init_scale = <NUM_LIT:0.1><EOL>learning_rate = <NUM_LIT:1.><EOL>max_grad_norm = <NUM_LIT:5><EOL>num_steps = <NUM_LIT:20><EOL>hidden_size = <NUM_LIT:200><EOL>max_epoch = <NUM_LIT:4><EOL>max_max_epoch = <NUM_LIT><EOL>keep_prob = <NUM_LIT:1.0><EOL>lr_decay = <NUM_LIT:0.5><EOL>batch_size = <NUM_LIT:20><EOL>vocab_size = <NUM_LIT><EOL><DEDENT>elif FLAGS.model == \"<STR_LIT>\":<EOL><INDENT>init_scale = <NUM_LIT><EOL>learning_rate = <NUM_LIT:1.0><EOL>max_grad_norm = <NUM_LIT:5><EOL>num_steps = <NUM_LIT><EOL>hidden_size = <NUM_LIT><EOL>max_epoch = <NUM_LIT:6><EOL>max_max_epoch = <NUM_LIT><EOL>keep_prob = <NUM_LIT:0.5><EOL>lr_decay = <NUM_LIT><EOL>batch_size = <NUM_LIT:20><EOL>vocab_size = <NUM_LIT><EOL><DEDENT>elif FLAGS.model == \"<STR_LIT>\":<EOL><INDENT>init_scale = <NUM_LIT><EOL>learning_rate = <NUM_LIT:1.0><EOL>max_grad_norm = <NUM_LIT:10><EOL>num_steps = <NUM_LIT><EOL>hidden_size = <NUM_LIT><EOL>max_epoch = <NUM_LIT><EOL>max_max_epoch = <NUM_LIT><EOL>keep_prob = <NUM_LIT><EOL>lr_decay = <NUM_LIT:1> / <NUM_LIT><EOL>batch_size = <NUM_LIT:20><EOL>vocab_size = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\", FLAGS.model)<EOL><DEDENT>train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()<EOL>print('<STR_LIT>'.format(len(train_data)))  <EOL>print('<STR_LIT>'.format(len(valid_data)))  <EOL>print('<STR_LIT>'.format(len(test_data)))  <EOL>print('<STR_LIT>'.format(vocab_size))  <EOL>sess = tf.InteractiveSession()<EOL>input_data = tf.placeholder(tf.int32, [batch_size, num_steps])<EOL>targets = tf.placeholder(tf.int32, [batch_size, num_steps])<EOL>input_data_test = tf.placeholder(tf.int32, [<NUM_LIT:1>, <NUM_LIT:1>])<EOL>targets_test = tf.placeholder(tf.int32, [<NUM_LIT:1>, <NUM_LIT:1>])<EOL>def inference(x, is_training, num_steps, reuse=None):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>print(\"<STR_LIT>\" % (num_steps, is_training, reuse))<EOL>init = tf.random_uniform_initializer(-init_scale, init_scale)<EOL>with tf.variable_scope(\"<STR_LIT>\", reuse=reuse):<EOL><INDENT>net = tl.layers.EmbeddingInputlayer(x, vocab_size, hidden_size, init, name='<STR_LIT>')<EOL>net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='<STR_LIT>')<EOL>net = tl.layers.RNNLayer(<EOL>net,<EOL>cell_fn=tf.contrib.rnn.BasicLSTMCell,  <EOL>cell_init_args={<EOL>'<STR_LIT>': <NUM_LIT:0.0>,<EOL>'<STR_LIT>': True<EOL>},<EOL>n_hidden=hidden_size,<EOL>initializer=init,<EOL>n_steps=num_steps,<EOL>return_last=False,<EOL>name='<STR_LIT>'<EOL>)<EOL>lstm1 = net<EOL>net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='<STR_LIT>')<EOL>net = tl.layers.RNNLayer(<EOL>net,<EOL>cell_fn=tf.contrib.rnn.BasicLSTMCell,  <EOL>cell_init_args={<EOL>'<STR_LIT>': <NUM_LIT:0.0>,<EOL>'<STR_LIT>': True<EOL>},<EOL>n_hidden=hidden_size,<EOL>initializer=init,<EOL>n_steps=num_steps,<EOL>return_last=False,<EOL>return_seq_2d=True,<EOL>name='<STR_LIT>'<EOL>)<EOL>lstm2 = net<EOL>net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_training, name='<STR_LIT>')<EOL>net = tl.layers.DenseLayer(net, vocab_size, W_init=init, b_init=init, act=None, name='<STR_LIT>')<EOL><DEDENT>return net, lstm1, lstm2<EOL><DEDENT>net, lstm1, lstm2 = inference(input_data, is_training=True, num_steps=num_steps, reuse=None)<EOL>net_val, lstm1_val, lstm2_val = inference(input_data, is_training=False, num_steps=num_steps, reuse=True)<EOL>net_test, lstm1_test, lstm2_test = inference(input_data_test, is_training=False, num_steps=<NUM_LIT:1>, reuse=True)<EOL>sess.run(tf.global_variables_initializer())<EOL>def loss_fn(outputs, targets, batch_size):<EOL><INDENT>loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(<EOL>[outputs], [tf.reshape(targets, [-<NUM_LIT:1>])], [tf.ones_like(tf.reshape(targets, [-<NUM_LIT:1>]), dtype=tf.float32)]<EOL>)<EOL>cost = tf.reduce_sum(loss) / batch_size<EOL>return cost<EOL><DEDENT>cost = loss_fn(net.outputs, targets, batch_size)<EOL>cost_val = loss_fn(net_val.outputs, targets, batch_size)<EOL>cost_test = loss_fn(net_test.outputs, targets_test, <NUM_LIT:1>)<EOL>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>lr = tf.Variable(<NUM_LIT:0.0>, trainable=False)<EOL><DEDENT>tvars = tf.trainable_variables()<EOL>grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)<EOL>optimizer = tf.train.GradientDescentOptimizer(lr)<EOL>train_op = optimizer.apply_gradients(zip(grads, tvars))<EOL>sess.run(tf.global_variables_initializer())<EOL>net.print_params()<EOL>net.print_layers()<EOL>tl.layers.print_all_variables()<EOL>print(\"<STR_LIT>\")<EOL>for i in range(max_max_epoch):<EOL><INDENT>new_lr_decay = lr_decay**max(i - max_epoch, <NUM_LIT:0.0>)<EOL>sess.run(tf.assign(lr, learning_rate * new_lr_decay))<EOL>print(\"<STR_LIT>\" % (i + <NUM_LIT:1>, max_max_epoch, sess.run(lr)))<EOL>epoch_size = ((len(train_data) // batch_size) - <NUM_LIT:1>) // num_steps<EOL>start_time = time.time()<EOL>costs = <NUM_LIT:0.0><EOL>iters = <NUM_LIT:0><EOL>state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)<EOL>state2 = tl.layers.initialize_rnn_state(lstm2.initial_state)<EOL>for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, num_steps)):<EOL><INDENT>feed_dict = {<EOL>input_data: x,<EOL>targets: y,<EOL>lstm1.initial_state.c: state1[<NUM_LIT:0>],<EOL>lstm1.initial_state.h: state1[<NUM_LIT:1>],<EOL>lstm2.initial_state.c: state2[<NUM_LIT:0>],<EOL>lstm2.initial_state.h: state2[<NUM_LIT:1>],<EOL>}<EOL>feed_dict.update(net.all_drop)<EOL>_cost, state1_c, state1_h, state2_c, state2_h, _ = sess.run(<EOL>[cost, lstm1.final_state.c, lstm1.final_state.h, lstm2.final_state.c, lstm2.final_state.h, train_op],<EOL>feed_dict=feed_dict<EOL>)<EOL>state1 = (state1_c, state1_h)<EOL>state2 = (state2_c, state2_h)<EOL>costs += _cost<EOL>iters += num_steps<EOL>if step % (epoch_size // <NUM_LIT:10>) == <NUM_LIT:10>:<EOL><INDENT>print(<EOL>\"<STR_LIT>\" %<EOL>(step * <NUM_LIT:1.0> / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time))<EOL>)<EOL><DEDENT><DEDENT>train_perplexity = np.exp(costs / iters)<EOL>print(\"<STR_LIT>\" % (i + <NUM_LIT:1>, max_max_epoch, train_perplexity))<EOL>start_time = time.time()<EOL>costs = <NUM_LIT:0.0><EOL>iters = <NUM_LIT:0><EOL>state1 = tl.layers.initialize_rnn_state(lstm1_val.initial_state)<EOL>state2 = tl.layers.initialize_rnn_state(lstm2_val.initial_state)<EOL>for step, (x, y) in enumerate(tl.iterate.ptb_iterator(valid_data, batch_size, num_steps)):<EOL><INDENT>feed_dict = {<EOL>input_data: x,<EOL>targets: y,<EOL>lstm1_val.initial_state.c: state1[<NUM_LIT:0>],<EOL>lstm1_val.initial_state.h: state1[<NUM_LIT:1>],<EOL>lstm2_val.initial_state.c: state2[<NUM_LIT:0>],<EOL>lstm2_val.initial_state.h: state2[<NUM_LIT:1>],<EOL>}<EOL>_cost, state1_c, state1_h, state2_c, state2_h, _ = sess.run(<EOL>[<EOL>cost_val, lstm1_val.final_state.c, lstm1_val.final_state.h, lstm2_val.final_state.c,<EOL>lstm2_val.final_state.h,<EOL>tf.no_op()<EOL>], feed_dict=feed_dict<EOL>)<EOL>state1 = (state1_c, state1_h)<EOL>state2 = (state2_c, state2_h)<EOL>costs += _cost<EOL>iters += num_steps<EOL><DEDENT>valid_perplexity = np.exp(costs / iters)<EOL>print(\"<STR_LIT>\" % (i + <NUM_LIT:1>, max_max_epoch, valid_perplexity))<EOL><DEDENT>print(\"<STR_LIT>\")<EOL>start_time = time.time()<EOL>costs = <NUM_LIT:0.0><EOL>iters = <NUM_LIT:0><EOL>state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)<EOL>state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state)<EOL>for step, (x, y) in enumerate(tl.iterate.ptb_iterator(test_data, batch_size=<NUM_LIT:1>, num_steps=<NUM_LIT:1>)):<EOL><INDENT>feed_dict = {<EOL>input_data_test: x,<EOL>targets_test: y,<EOL>lstm1_test.initial_state.c: state1[<NUM_LIT:0>],<EOL>lstm1_test.initial_state.h: state1[<NUM_LIT:1>],<EOL>lstm2_test.initial_state.c: state2[<NUM_LIT:0>],<EOL>lstm2_test.initial_state.h: state2[<NUM_LIT:1>],<EOL>}<EOL>_cost, state1_c, state1_h, state2_c, state2_h = sess.run(<EOL>[<EOL>cost_test,<EOL>lstm1_test.final_state.c,<EOL>lstm1_test.final_state.h,<EOL>lstm2_test.final_state.c,<EOL>lstm2_test.final_state.h,<EOL>], feed_dict=feed_dict<EOL>)<EOL>state1 = (state1_c, state1_h)<EOL>state2 = (state2_c, state2_h)<EOL>costs += _cost<EOL>iters += <NUM_LIT:1><EOL><DEDENT>test_perplexity = np.exp(costs / iters)<EOL>print(\"<STR_LIT>\" % (test_perplexity, time.time() - start_time))<EOL>print(<EOL>\"<STR_LIT>\"<EOL>)<EOL>", "docstring": "The core of the model consists of an LSTM cell that processes one word at\na time and computes probabilities of the possible continuations of the\nsentence. The memory state of the network is initialized with a vector\nof zeros and gets updated after reading each word. Also, for computational\nreasons, we will process data in mini-batches of size batch_size.", "id": "f11077:m0"}
{"signature": "def main_restore_embedding_layer():", "body": "<EOL>vocabulary_size = <NUM_LIT><EOL>embedding_size = <NUM_LIT><EOL>model_file_name = \"<STR_LIT>\"<EOL>batch_size = None<EOL>print(\"<STR_LIT>\")<EOL>all_var = tl.files.load_npy_to_any(name=model_file_name + '<STR_LIT>')<EOL>data = all_var['<STR_LIT:data>']<EOL>count = all_var['<STR_LIT:count>']<EOL>dictionary = all_var['<STR_LIT>']<EOL>reverse_dictionary = all_var['<STR_LIT>']<EOL>tl.nlp.save_vocab(count, name='<STR_LIT>' + model_file_name + '<STR_LIT>')<EOL>del all_var, data, count<EOL>load_params = tl.files.load_npz(name=model_file_name + '<STR_LIT>')<EOL>x = tf.placeholder(tf.int32, shape=[batch_size])<EOL>emb_net = tl.layers.EmbeddingInputlayer(x, vocabulary_size, embedding_size, name='<STR_LIT>')<EOL>sess.run(tf.global_variables_initializer())<EOL>tl.files.assign_params(sess, [load_params[<NUM_LIT:0>]], emb_net)<EOL>emb_net.print_params()<EOL>emb_net.print_layers()<EOL>word = b'<STR_LIT:hello>'<EOL>word_id = dictionary[word]<EOL>print('<STR_LIT>', word_id)<EOL>words = [b'<STR_LIT:i>', b'<STR_LIT>', b'<STR_LIT>', b'<STR_LIT>']<EOL>word_ids = tl.nlp.words_to_word_ids(words, dictionary, _UNK)<EOL>context = tl.nlp.word_ids_to_words(word_ids, reverse_dictionary)<EOL>print('<STR_LIT>', word_ids)<EOL>print('<STR_LIT>', context)<EOL>vector = sess.run(emb_net.outputs, feed_dict={x: [word_id]})<EOL>print('<STR_LIT>', vector.shape)<EOL>vectors = sess.run(emb_net.outputs, feed_dict={x: word_ids})<EOL>print('<STR_LIT>', vectors.shape)<EOL>", "docstring": "How to use Embedding layer, and how to convert IDs to vector,\n    IDs to words, etc.", "id": "f11079:m3"}
{"signature": "def main_lstm_generate_text():", "body": "<EOL>init_scale = <NUM_LIT:0.1><EOL>learning_rate = <NUM_LIT:1.0><EOL>max_grad_norm = <NUM_LIT:5><EOL>sequence_length = <NUM_LIT:20><EOL>hidden_size = <NUM_LIT:200><EOL>max_epoch = <NUM_LIT:4><EOL>max_max_epoch = <NUM_LIT:100><EOL>lr_decay = <NUM_LIT><EOL>batch_size = <NUM_LIT:20><EOL>top_k_list = [<NUM_LIT:1>, <NUM_LIT:3>, <NUM_LIT:5>, <NUM_LIT:10>]<EOL>print_length = <NUM_LIT:30><EOL>model_file_name = \"<STR_LIT>\"<EOL>words = customized_read_words(input_fpath=\"<STR_LIT>\")<EOL>vocab = tl.nlp.create_vocab([words], word_counts_output_file='<STR_LIT>', min_word_count=<NUM_LIT:1>)<EOL>vocab = tl.nlp.Vocabulary('<STR_LIT>', unk_word=\"<STR_LIT>\")<EOL>vocab_size = vocab.unk_id + <NUM_LIT:1><EOL>train_data = [vocab.word_to_id(word) for word in words]<EOL>seed = \"<STR_LIT>\"<EOL>seed = nltk.tokenize.word_tokenize(seed)<EOL>print('<STR_LIT>' % seed)<EOL>sess = tf.InteractiveSession()<EOL>input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])<EOL>targets = tf.placeholder(tf.int32, [batch_size, sequence_length])<EOL>input_data_test = tf.placeholder(tf.int32, [<NUM_LIT:1>, <NUM_LIT:1>])<EOL>def inference(x, is_train, sequence_length, reuse=None):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>print(\"<STR_LIT>\" % (sequence_length, is_train, reuse))<EOL>rnn_init = tf.random_uniform_initializer(-init_scale, init_scale)<EOL>with tf.variable_scope(\"<STR_LIT>\", reuse=reuse):<EOL><INDENT>network = EmbeddingInputlayer(x, vocab_size, hidden_size, rnn_init, name='<STR_LIT>')<EOL>network = RNNLayer(<EOL>network, cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={<EOL>'<STR_LIT>': <NUM_LIT:0.0>,<EOL>'<STR_LIT>': True<EOL>}, n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False,<EOL>return_seq_2d=True, name='<STR_LIT>'<EOL>)<EOL>lstm1 = network<EOL>network = DenseLayer(network, vocab_size, W_init=rnn_init, b_init=rnn_init, act=None, name='<STR_LIT>')<EOL><DEDENT>return network, lstm1<EOL><DEDENT>network, lstm1 = inference(input_data, is_train=True, sequence_length=sequence_length, reuse=None)<EOL>network_test, lstm1_test = inference(input_data_test, is_train=False, sequence_length=<NUM_LIT:1>, reuse=True)<EOL>y_linear = network_test.outputs<EOL>y_soft = tf.nn.softmax(y_linear)<EOL>def loss_fn(outputs, targets, batch_size, sequence_length):<EOL><INDENT>loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(<EOL>[outputs], [tf.reshape(targets, [-<NUM_LIT:1>])], [tf.ones([batch_size * sequence_length])]<EOL>)<EOL>cost = tf.reduce_sum(loss) / batch_size<EOL>return cost<EOL><DEDENT>cost = loss_fn(network.outputs, targets, batch_size, sequence_length)<EOL>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>lr = tf.Variable(<NUM_LIT:0.0>, trainable=False)<EOL><DEDENT>tvars = network.all_params<EOL>grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm)<EOL>optimizer = tf.train.GradientDescentOptimizer(lr)<EOL>train_op = optimizer.apply_gradients(zip(grads, tvars))<EOL>sess.run(tf.global_variables_initializer())<EOL>print(\"<STR_LIT>\")<EOL>for i in range(max_max_epoch):<EOL><INDENT>new_lr_decay = lr_decay**max(i - max_epoch, <NUM_LIT:0.0>)<EOL>sess.run(tf.assign(lr, learning_rate * new_lr_decay))<EOL>print(\"<STR_LIT>\" % (i + <NUM_LIT:1>, max_max_epoch, sess.run(lr)))<EOL>epoch_size = ((len(train_data) // batch_size) - <NUM_LIT:1>) // sequence_length<EOL>start_time = time.time()<EOL>costs = <NUM_LIT:0.0><EOL>iters = <NUM_LIT:0><EOL>state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)<EOL>for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)):<EOL><INDENT>_cost, state1, _ = sess.run(<EOL>[cost, lstm1.final_state, train_op], feed_dict={<EOL>input_data: x,<EOL>targets: y,<EOL>lstm1.initial_state: state1<EOL>}<EOL>)<EOL>costs += _cost<EOL>iters += sequence_length<EOL>if step % (epoch_size // <NUM_LIT:10>) == <NUM_LIT:1>:<EOL><INDENT>print(<EOL>\"<STR_LIT>\" %<EOL>(step * <NUM_LIT:1.0> / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time))<EOL>)<EOL><DEDENT><DEDENT>train_perplexity = np.exp(costs / iters)<EOL>print(\"<STR_LIT>\" % (i + <NUM_LIT:1>, max_max_epoch, train_perplexity))<EOL>for top_k in top_k_list:<EOL><INDENT>state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)<EOL>outs_id = [vocab.word_to_id(w) for w in seed]<EOL>for ids in outs_id[:-<NUM_LIT:1>]:<EOL><INDENT>a_id = np.asarray(ids).reshape(<NUM_LIT:1>, <NUM_LIT:1>)<EOL>state1 = sess.run(<EOL>[lstm1_test.final_state], feed_dict={<EOL>input_data_test: a_id,<EOL>lstm1_test.initial_state: state1<EOL>}<EOL>)<EOL><DEDENT>a_id = outs_id[-<NUM_LIT:1>]<EOL>for _ in range(print_length):<EOL><INDENT>a_id = np.asarray(a_id).reshape(<NUM_LIT:1>, <NUM_LIT:1>)<EOL>out, state1 = sess.run(<EOL>[y_soft, lstm1_test.final_state], feed_dict={<EOL>input_data_test: a_id,<EOL>lstm1_test.initial_state: state1<EOL>}<EOL>)<EOL>a_id = tl.nlp.sample_top(out[<NUM_LIT:0>], top_k=top_k)<EOL>outs_id.append(a_id)<EOL><DEDENT>sentence = [vocab.id_to_word(w) for w in outs_id]<EOL>sentence = \"<STR_LIT:U+0020>\".join(sentence)<EOL>print(top_k, '<STR_LIT::>', sentence)<EOL><DEDENT><DEDENT>print(\"<STR_LIT>\")<EOL>tl.files.save_npz(network_test.all_params, name=model_file_name)<EOL>", "docstring": "Generate text by Synced sequence input and output.", "id": "f11079:m4"}
{"signature": "def data_to_tfrecord(images, labels, filename):", "body": "if os.path.isfile(filename):<EOL><INDENT>print(\"<STR_LIT>\" % filename)<EOL>return<EOL><DEDENT>print(\"<STR_LIT>\" % filename)<EOL>writer = tf.python_io.TFRecordWriter(filename)<EOL>for index, img in enumerate(images):<EOL><INDENT>img_raw = img.tobytes()<EOL>label = int(labels[index])<EOL>example = tf.train.Example(<EOL>features=tf.train.Features(<EOL>feature={<EOL>\"<STR_LIT:label>\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),<EOL>'<STR_LIT>': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),<EOL>}<EOL>)<EOL>)<EOL>writer.write(example.SerializeToString())  <EOL><DEDENT>writer.close()<EOL>", "docstring": "Save data into TFRecord.", "id": "f11089:m0"}
{"signature": "def read_and_decode(filename, is_train=None):", "body": "filename_queue = tf.train.string_input_producer([filename])<EOL>reader = tf.TFRecordReader()<EOL>_, serialized_example = reader.read(filename_queue)<EOL>features = tf.parse_single_example(<EOL>serialized_example, features={<EOL>'<STR_LIT:label>': tf.FixedLenFeature([], tf.int64),<EOL>'<STR_LIT>': tf.FixedLenFeature([], tf.string),<EOL>}<EOL>)<EOL>img = tf.decode_raw(features['<STR_LIT>'], tf.float32)<EOL>img = tf.reshape(img, [<NUM_LIT:32>, <NUM_LIT:32>, <NUM_LIT:3>])<EOL>if is_train ==True:<EOL><INDENT>img = tf.random_crop(img, [<NUM_LIT>, <NUM_LIT>, <NUM_LIT:3>])<EOL>img = tf.image.random_flip_left_right(img)<EOL>img = tf.image.random_brightness(img, max_delta=<NUM_LIT>)<EOL>img = tf.image.random_contrast(img, lower=<NUM_LIT>, upper=<NUM_LIT>)<EOL>img = tf.image.per_image_standardization(img)<EOL><DEDENT>elif is_train == False:<EOL><INDENT>img = tf.image.resize_image_with_crop_or_pad(img, <NUM_LIT>, <NUM_LIT>)<EOL>img = tf.image.per_image_standardization(img)<EOL><DEDENT>elif is_train == None:<EOL><INDENT>img = img<EOL><DEDENT>label = tf.cast(features['<STR_LIT:label>'], tf.int32)<EOL>return img, label<EOL>", "docstring": "Return tensor to read from TFRecord.", "id": "f11089:m1"}
{"signature": "def read_and_decode(filename, is_train=None):", "body": "filename_queue = tf.train.string_input_producer([filename])<EOL>reader = tf.TFRecordReader()<EOL>_, serialized_example = reader.read(filename_queue)<EOL>features = tf.parse_single_example(<EOL>serialized_example, features={<EOL>'<STR_LIT:label>': tf.FixedLenFeature([], tf.int64),<EOL>'<STR_LIT>': tf.FixedLenFeature([], tf.string),<EOL>}<EOL>)<EOL>img = tf.decode_raw(features['<STR_LIT>'], tf.float32)<EOL>img = tf.reshape(img, [<NUM_LIT:32>, <NUM_LIT:32>, <NUM_LIT:3>])<EOL>if is_train ==True:<EOL><INDENT>img = tf.random_crop(img, [<NUM_LIT>, <NUM_LIT>, <NUM_LIT:3>])<EOL>img = tf.image.random_flip_left_right(img)<EOL>img = tf.image.random_brightness(img, max_delta=<NUM_LIT>)<EOL>img = tf.image.random_contrast(img, lower=<NUM_LIT>, upper=<NUM_LIT>)<EOL>img = tf.image.per_image_standardization(img)<EOL><DEDENT>elif is_train == False:<EOL><INDENT>img = tf.image.resize_image_with_crop_or_pad(img, <NUM_LIT>, <NUM_LIT>)<EOL>img = tf.image.per_image_standardization(img)<EOL><DEDENT>elif is_train == None:<EOL><INDENT>img = img<EOL><DEDENT>label = tf.cast(features['<STR_LIT:label>'], tf.int32)<EOL>return img, label<EOL>", "docstring": "Return tensor to read from TFRecord.", "id": "f11095:m1"}
{"signature": "def data_to_tfrecord(images, labels, filename):", "body": "if os.path.isfile(filename):<EOL><INDENT>print(\"<STR_LIT>\" % filename)<EOL>return<EOL><DEDENT>print(\"<STR_LIT>\" % filename)<EOL>writer = tf.python_io.TFRecordWriter(filename)<EOL>for index, img in enumerate(images):<EOL><INDENT>img_raw = img.tobytes()<EOL>label = int(labels[index])<EOL>example = tf.train.Example(<EOL>features=tf.train.Features(<EOL>feature={<EOL>\"<STR_LIT:label>\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),<EOL>'<STR_LIT>': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),<EOL>}<EOL>)<EOL>)<EOL>writer.write(example.SerializeToString())  <EOL><DEDENT>writer.close()<EOL>", "docstring": "Save data into TFRecord.", "id": "f11096:m0"}
{"signature": "def data_to_tfrecord(images, labels, filename):", "body": "if os.path.isfile(filename):<EOL><INDENT>print(\"<STR_LIT>\" % filename)<EOL>return<EOL><DEDENT>print(\"<STR_LIT>\" % filename)<EOL>writer = tf.python_io.TFRecordWriter(filename)<EOL>for index, img in enumerate(images):<EOL><INDENT>img_raw = img.tobytes()<EOL>label = int(labels[index])<EOL>example = tf.train.Example(<EOL>features=tf.train.Features(<EOL>feature={<EOL>\"<STR_LIT:label>\": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),<EOL>'<STR_LIT>': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),<EOL>}<EOL>)<EOL>)<EOL>writer.write(example.SerializeToString())  <EOL><DEDENT>writer.close()<EOL>", "docstring": "Save data into TFRecord.", "id": "f11099:m0"}
{"signature": "def distort_image(image, thread_id):", "body": "<EOL>with tf.name_scope(\"<STR_LIT>\"):  <EOL><INDENT>image = tf.image.random_flip_left_right(image)<EOL><DEDENT>color_ordering = thread_id % <NUM_LIT:2><EOL>with tf.name_scope(\"<STR_LIT>\"):  <EOL><INDENT>if color_ordering == <NUM_LIT:0>:<EOL><INDENT>image = tf.image.random_brightness(image, max_delta=<NUM_LIT> / <NUM_LIT>)<EOL>image = tf.image.random_saturation(image, lower=<NUM_LIT:0.5>, upper=<NUM_LIT>)<EOL>image = tf.image.random_hue(image, max_delta=<NUM_LIT>)<EOL>image = tf.image.random_contrast(image, lower=<NUM_LIT:0.5>, upper=<NUM_LIT>)<EOL><DEDENT>elif color_ordering == <NUM_LIT:1>:<EOL><INDENT>image = tf.image.random_brightness(image, max_delta=<NUM_LIT> / <NUM_LIT>)<EOL>image = tf.image.random_contrast(image, lower=<NUM_LIT:0.5>, upper=<NUM_LIT>)<EOL>image = tf.image.random_saturation(image, lower=<NUM_LIT:0.5>, upper=<NUM_LIT>)<EOL>image = tf.image.random_hue(image, max_delta=<NUM_LIT>)<EOL><DEDENT>image = tf.clip_by_value(image, <NUM_LIT:0.0>, <NUM_LIT:1.0>)<EOL><DEDENT>return image<EOL>", "docstring": "Perform random distortions on an image.\n    Args:\n        image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).\n        thread_id: Preprocessing thread id used to select the ordering of color\n        distortions. There should be a multiple of 2 preprocessing threads.\n    Returns:````\n        distorted_image: A float32 Tensor of shape [height, width, 3] with values in\n        [0, 1].", "id": "f11102:m4"}
{"signature": "def _bytes_feature(value):", "body": "<EOL>return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))<EOL>", "docstring": "Wrapper for inserting a bytes Feature into a SequenceExample proto,\n    e.g, an image in byte", "id": "f11102:m1"}
{"signature": "def prefetch_input_data(<EOL>reader, file_pattern, is_training, batch_size, values_per_shard, input_queue_capacity_factor=<NUM_LIT:16>,<EOL>num_reader_threads=<NUM_LIT:1>, shard_queue_name=\"<STR_LIT>\", value_queue_name=\"<STR_LIT>\"<EOL>):", "body": "data_files = []<EOL>for pattern in file_pattern.split(\"<STR_LIT:U+002C>\"):<EOL><INDENT>data_files.extend(tf.gfile.Glob(pattern))<EOL><DEDENT>if not data_files:<EOL><INDENT>tl.logging.fatal(\"<STR_LIT>\", file_pattern)<EOL><DEDENT>else:<EOL><INDENT>tl.logging.info(\"<STR_LIT>\", len(data_files), file_pattern)<EOL><DEDENT>if is_training:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=<NUM_LIT:16>, name=shard_queue_name)<EOL>min_queue_examples = values_per_shard * input_queue_capacity_factor<EOL>capacity = min_queue_examples + <NUM_LIT:100> * batch_size<EOL>values_queue = tf.RandomShuffleQueue(<EOL>capacity=capacity, min_after_dequeue=min_queue_examples, dtypes=[tf.string],<EOL>name=\"<STR_LIT>\" + value_queue_name<EOL>)<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=<NUM_LIT:1>, name=shard_queue_name)<EOL>capacity = values_per_shard + <NUM_LIT:3> * batch_size<EOL>values_queue = tf.FIFOQueue(capacity=capacity, dtypes=[tf.string], name=\"<STR_LIT>\" + value_queue_name)<EOL><DEDENT>enqueue_ops = []<EOL>for _ in range(num_reader_threads):<EOL><INDENT>_, value = reader.read(filename_queue)<EOL>enqueue_ops.append(values_queue.enqueue([value]))<EOL><DEDENT>tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(values_queue, enqueue_ops))<EOL>tf.summary.scalar(<EOL>\"<STR_LIT>\" % (values_queue.name, capacity),<EOL>tf.cast(values_queue.size(), tf.float32) * (<NUM_LIT:1.> / capacity)<EOL>)<EOL>return values_queue<EOL>", "docstring": "Prefetches string values from disk into an input queue.\n\n    In training the capacity of the queue is important because a larger queue\n    means better mixing of training examples between shards. The minimum number of\n    values kept in the queue is values_per_shard * input_queue_capacity_factor,\n    where input_queue_memory factor should be chosen to trade-off better mixing\n    with memory usage.\n\n    Args:\n        reader: Instance of tf.ReaderBase.\n        file_pattern: Comma-separated list of file patterns (e.g.\n            /tmp/train_data-?????-of-00100).\n        is_training: Boolean; whether prefetching for training or eval.\n        batch_size: Model batch size used to determine queue capacity.\n        values_per_shard: Approximate number of values per shard.\n        input_queue_capacity_factor: Minimum number of values to keep in the queue\n        in multiples of values_per_shard. See comments above.\n        num_reader_threads: Number of reader threads to fill the queue.\n        shard_queue_name: Name for the shards filename queue.\n        value_queue_name: Name for the values input queue.\n\n    Returns:\n        A Queue containing prefetched string values.", "id": "f11102:m5"}
{"signature": "def example4():", "body": "transform_matrix = create_transformation_matrix()<EOL>result = tl.prepro.affine_transform_cv2(image, transform_matrix)  <EOL>coords = [[(<NUM_LIT:50>, <NUM_LIT:100>), (<NUM_LIT:100>, <NUM_LIT:100>), (<NUM_LIT:100>, <NUM_LIT:50>), (<NUM_LIT:200>, <NUM_LIT:200>)], [(<NUM_LIT>, <NUM_LIT:50>), (<NUM_LIT:200>, <NUM_LIT:50>), (<NUM_LIT:200>, <NUM_LIT:100>)]]<EOL>coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)<EOL>def imwrite(image, coords_list, name):<EOL><INDENT>coords_list_ = []<EOL>for coords in coords_list:<EOL><INDENT>coords = np.array(coords, np.int32)<EOL>coords = coords.reshape((-<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:2>))<EOL>coords_list_.append(coords)<EOL><DEDENT>image = cv2.polylines(image, coords_list_, True, (<NUM_LIT:0>, <NUM_LIT:255>, <NUM_LIT:255>), <NUM_LIT:3>)<EOL>cv2.imwrite(name, image[..., ::-<NUM_LIT:1>])<EOL><DEDENT>imwrite(image, coords, '<STR_LIT>')<EOL>imwrite(result, coords_result, '<STR_LIT>')<EOL>", "docstring": "Example 4: Transforming coordinates using affine matrix.", "id": "f11105:m4"}
{"signature": "def convert_onnx_to_model(onnx_input_path):", "body": "model = onnx.load(onnx_input_path)<EOL>tf_rep = prepare(model)<EOL>img = np.load(\"<STR_LIT>\")<EOL>output = tf_rep.run(img.reshape([<NUM_LIT:1>, <NUM_LIT>]))<EOL>print(\"<STR_LIT>\", np.argmax(output))<EOL>", "docstring": "Reimplementation of the TensorFlow-onnx official tutorial convert the onnx file to specific: model\n\n    Parameters\n    -----------\n    onnx_input_path : string\n    the path where you save the onnx file.\n\n    References\n    -----------\n    - `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`__", "id": "f11112:m3"}
{"signature": "def freeze_graph(graph_path, checkpoint_path, output_path, end_node_names, is_binary_graph):", "body": "_freeze_graph(<EOL>input_graph=graph_path, input_saver='<STR_LIT>', input_binary=is_binary_graph, input_checkpoint=checkpoint_path,<EOL>output_graph=output_path, output_node_names=end_node_names, restore_op_name='<STR_LIT>',<EOL>filename_tensor_name='<STR_LIT>', clear_devices=True, initializer_nodes=None<EOL>)<EOL>", "docstring": "Reimplementation of the TensorFlow official freeze_graph function to freeze the graph and checkpoint together:\n\n    Parameters\n    -----------\n    graph_path : string\n        the path where your graph file save.\n    checkpoint_output_path : string\n        the path where your checkpoint save.\n    output_path : string\n        the path where you want to save the output proto buff\n    end_node_names : string\n        the name of the end node in your graph you want to get in your proto buff\n    is_binary_graph : boolean\n        declare your file whether is a binary graph\n\n    References\n    ----------\n    - `onnx-tf exporting tutorial <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxTensorflowExport.ipynb>`__\n    - `tensorflow freeze_graph <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py>`", "id": "f11112:m1"}
{"signature": "def google2_log_prefix(level, timestamp=None, file_and_line=None):", "body": "<EOL>global _level_names<EOL>now = timestamp or _time.time()<EOL>now_tuple = _time.localtime(now)<EOL>now_microsecond = int(<NUM_LIT> * (now % <NUM_LIT:1.0>))<EOL>(filename, line) = file_and_line or _GetFileAndLine()<EOL>basename = _os.path.basename(filename)<EOL>severity = '<STR_LIT:I>'<EOL>if level in _level_names:<EOL><INDENT>severity = _level_names[level][<NUM_LIT:0>]<EOL><DEDENT>s = '<STR_LIT>' % (<EOL>severity,<EOL>now_tuple[<NUM_LIT:1>],  <EOL>now_tuple[<NUM_LIT:2>],  <EOL>now_tuple[<NUM_LIT:3>],  <EOL>now_tuple[<NUM_LIT:4>],  <EOL>now_tuple[<NUM_LIT:5>],  <EOL>now_microsecond,<EOL>_get_thread_id(),<EOL>basename,<EOL>line<EOL>)<EOL>return s<EOL>", "docstring": "Assemble a logline prefix using the google2 format.", "id": "f11116:m16"}
{"signature": "def log_if(level, msg, condition, *args):", "body": "if condition:<EOL><INDENT>vlog(level, msg, *args)<EOL><DEDENT>", "docstring": "Log 'msg % args' at level 'level' only if condition is fulfilled.", "id": "f11116:m14"}
{"signature": "def set_verbosity(v):", "body": "_get_logger().setLevel(v)<EOL>", "docstring": "Sets the threshold for what messages will be logged.", "id": "f11116:m18"}
{"signature": "def _GetFileAndLine():", "body": "<EOL>f = _sys._getframe()<EOL>our_file = f.f_code.co_filename<EOL>f = f.f_back<EOL>while f:<EOL><INDENT>code = f.f_code<EOL>if code.co_filename != our_file:<EOL><INDENT>return (code.co_filename, f.f_lineno)<EOL><DEDENT>f = f.f_back<EOL><DEDENT>return ('<STR_LIT>', <NUM_LIT:0>)<EOL>", "docstring": "Returns (filename, linenumber) for the stack frame.", "id": "f11116:m15"}
{"signature": "def deconv2d_bilinear_upsampling_initializer(shape):", "body": "if shape[<NUM_LIT:0>] != shape[<NUM_LIT:1>]:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if shape[<NUM_LIT:3>] < shape[<NUM_LIT:2>]:<EOL><INDENT>raise Exception(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>filter_size = shape[<NUM_LIT:0>]<EOL>num_out_channels = shape[<NUM_LIT:2>]<EOL>num_in_channels = shape[<NUM_LIT:3>]<EOL>bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32)<EOL>scale_factor = (filter_size + <NUM_LIT:1>) // <NUM_LIT:2><EOL>if filter_size % <NUM_LIT:2> == <NUM_LIT:1>:<EOL><INDENT>center = scale_factor - <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>center = scale_factor - <NUM_LIT:0.5><EOL><DEDENT>for x in range(filter_size):<EOL><INDENT>for y in range(filter_size):<EOL><INDENT>bilinear_kernel[x, y] = (<NUM_LIT:1> - abs(x - center) / scale_factor) * (<NUM_LIT:1> - abs(y - center) / scale_factor)<EOL><DEDENT><DEDENT>weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels))<EOL>for i in range(num_out_channels):<EOL><INDENT>weights[:, :, i, i] = bilinear_kernel<EOL><DEDENT>return tf.constant_initializer(value=weights, dtype=LayersConfig.tf_dtype)<EOL>", "docstring": "Returns the initializer that can be passed to DeConv2dLayer for initializing the\n    weights in correspondence to channel-wise bilinear up-sampling.\n    Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211)\n\n    Parameters\n    ----------\n    shape : tuple of int\n        The shape of the filters, [height, width, output_channels, in_channels].\n        It must match the shape passed to DeConv2dLayer.\n\n    Returns\n    -------\n    ``tf.constant_initializer``\n        A constant initializer with weights set to correspond to per channel bilinear upsampling\n        when passed as W_int in DeConv2dLayer\n\n    Examples\n    --------\n    - Upsampling by a factor of 2, ie e.g 100->200\n    >>> import tensorflow as tf\n    >>> import tensorlayer as tl\n    >>> rescale_factor = 2\n    >>> imsize = 128\n    >>> num_channels = 3\n    >>> filter_shape = (5, 5)\n    >>> filter_size = (2 * rescale_factor - rescale_factor % 2) #Corresponding bilinear filter size\n    >>> num_in_channels = 3\n    >>> num_out_channels = 3\n    >>> deconv_filter_shape = (filter_size, filter_size, num_out_channels, num_in_channels)\n    >>> x = tf.placeholder(tf.float32, (1, imsize, imsize, num_channels))\n    >>> net = tl.layers.InputLayer(x, name='input_layer')\n    >>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape)\n    >>> net = tl.layers.DeConv2dLayer(net,\n    ...                    shape=filter_shape,\n    ...                    output_shape=(1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels),\n    ...                    strides=(1, rescale_factor, rescale_factor, 1),\n    ...                    W_init=bilinear_init,\n    ...                    padding='SAME',\n    ...                    act=None, name='g/h1/decon2d')", "id": "f11119:m0"}
{"signature": "def load_ptb_dataset(path='<STR_LIT:data>'):", "body": "path = os.path.join(path, '<STR_LIT>')<EOL>logging.info(\"<STR_LIT>\".format(path))<EOL>filename = '<STR_LIT>'<EOL>url = '<STR_LIT>'<EOL>maybe_download_and_extract(filename, path, url, extract=True)<EOL>data_path = os.path.join(path, '<STR_LIT>', '<STR_LIT:data>')<EOL>train_path = os.path.join(data_path, \"<STR_LIT>\")<EOL>valid_path = os.path.join(data_path, \"<STR_LIT>\")<EOL>test_path = os.path.join(data_path, \"<STR_LIT>\")<EOL>word_to_id = nlp.build_vocab(nlp.read_words(train_path))<EOL>train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)<EOL>valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)<EOL>test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)<EOL>vocab_size = len(word_to_id)<EOL>return train_data, valid_data, test_data, vocab_size<EOL>", "docstring": "Load Penn TreeBank (PTB) dataset.\n\n    It is used in many LANGUAGE MODELING papers,\n    including \"Empirical Evaluation and Combination of Advanced Language\n    Modeling Techniques\", \"Recurrent Neural Network Regularization\".\n    It consists of 929k training words, 73k validation words, and 82k test\n    words. It has 10k words in its vocabulary.\n\n    Parameters\n    ----------\n    path : str\n        The path that the data is downloaded to, defaults is ``data/ptb/``.\n\n    Returns\n    --------\n    train_data, valid_data, test_data : list of int\n        The training, validating and testing data in integer format.\n    vocab_size : int\n        The vocabulary size.\n\n    Examples\n    --------\n    >>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()\n\n    References\n    ---------------\n    - ``tensorflow.models.rnn.ptb import reader``\n    - `Manual download <http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz>`__\n\n    Notes\n    ------\n    - If you want to get the raw data, see the source code.", "id": "f11121:m0"}
{"signature": "def load_flickr1M_dataset(tag='<STR_LIT>', size=<NUM_LIT:10>, path=\"<STR_LIT:data>\", n_threads=<NUM_LIT:50>, printable=False):", "body": "import shutil<EOL>path = os.path.join(path, '<STR_LIT>')<EOL>logging.info(\"<STR_LIT>\".format(size * <NUM_LIT:10>, size * <NUM_LIT>))<EOL>images_zip = [<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'<EOL>]<EOL>tag_zip = '<STR_LIT>'<EOL>url = '<STR_LIT>'<EOL>for image_zip in images_zip[<NUM_LIT:0>:size]:<EOL><INDENT>image_folder = image_zip.split(\"<STR_LIT:.>\")[<NUM_LIT:0>]<EOL>if folder_exists(os.path.join(path, image_folder)) is False:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(image_folder, path))<EOL>maybe_download_and_extract(image_zip, path, url, extract=True)<EOL>del_file(os.path.join(path, image_zip))<EOL>shutil.move(os.path.join(path, '<STR_LIT>'), os.path.join(path, image_folder))<EOL><DEDENT>else:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(image_folder, path))<EOL><DEDENT><DEDENT>if folder_exists(os.path.join(path, \"<STR_LIT>\")) is False:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(path))<EOL>maybe_download_and_extract(tag_zip, path, url, extract=True)<EOL>del_file(os.path.join(path, tag_zip))<EOL><DEDENT>else:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(path))<EOL><DEDENT>images_list = []<EOL>images_folder_list = []<EOL>for i in range(<NUM_LIT:0>, size):<EOL><INDENT>images_folder_list += load_folder_list(path=os.path.join(path, '<STR_LIT>' % i))<EOL><DEDENT>images_folder_list.sort(key=lambda s: int(s.split('<STR_LIT:/>')[-<NUM_LIT:1>]))  <EOL>for folder in images_folder_list[<NUM_LIT:0>:size * <NUM_LIT:10>]:<EOL><INDENT>tmp = load_file_list(path=folder, regx='<STR_LIT>', printable=False)<EOL>tmp.sort(key=lambda s: int(s.split('<STR_LIT:.>')[-<NUM_LIT:2>]))  <EOL>images_list.extend([os.path.join(folder, x) for x in tmp])<EOL><DEDENT>tag_list = []<EOL>tag_folder_list = load_folder_list(os.path.join(path, \"<STR_LIT>\"))<EOL>tag_folder_list.sort(key=lambda s: int(os.path.basename(s)))<EOL>for folder in tag_folder_list[<NUM_LIT:0>:size * <NUM_LIT:10>]:<EOL><INDENT>tmp = load_file_list(path=folder, regx='<STR_LIT>', printable=False)<EOL>tmp.sort(key=lambda s: int(s.split('<STR_LIT:.>')[-<NUM_LIT:2>]))  <EOL>tmp = [os.path.join(folder, s) for s in tmp]<EOL>tag_list += tmp<EOL><DEDENT>logging.info(\"<STR_LIT>\".format(tag))<EOL>select_images_list = []<EOL>for idx, _val in enumerate(tag_list):<EOL><INDENT>tags = read_file(tag_list[idx]).split('<STR_LIT:\\n>')<EOL>if tag in tags:<EOL><INDENT>select_images_list.append(images_list[idx])<EOL><DEDENT><DEDENT>logging.info(\"<STR_LIT>\".format(tag))<EOL>images = visualize.read_images(select_images_list, '<STR_LIT>', n_threads=n_threads, printable=printable)<EOL>return images<EOL>", "docstring": "Load Flick1M dataset.\n\n    Returns a list of images by a given tag from Flickr1M dataset,\n    it will download Flickr1M from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__\n    at the first time you use it.\n\n    Parameters\n    ------------\n    tag : str or None\n        What images to return.\n            - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__.\n            - If you want to get all images, set to ``None``.\n\n    size : int\n        integer between 1 to 10. 1 means 100k images ... 5 means 500k images, 10 means all 1 million images. Default is 10.\n    path : str\n        The path that the data is downloaded to, defaults is ``data/flickr25k/``.\n    n_threads : int\n        The number of thread to read image.\n    printable : boolean\n        Whether to print infomation when reading images, default is ``False``.\n\n    Examples\n    ----------\n    Use 200k images\n\n    >>> images = tl.files.load_flickr1M_dataset(tag='zebra', size=2)\n\n    Use 1 Million images\n\n    >>> images = tl.files.load_flickr1M_dataset(tag='zebra')", "id": "f11122:m0"}
{"signature": "def load_celebA_dataset(path='<STR_LIT:data>'):", "body": "data_dir = '<STR_LIT>'<EOL>filename, drive_id = \"<STR_LIT>\", \"<STR_LIT>\"<EOL>save_path = os.path.join(path, filename)<EOL>image_path = os.path.join(path, data_dir)<EOL>if os.path.exists(image_path):<EOL><INDENT>logging.info('<STR_LIT>'.format(save_path))<EOL><DEDENT>else:<EOL><INDENT>exists_or_mkdir(path)<EOL>download_file_from_google_drive(drive_id, save_path)<EOL>zip_dir = '<STR_LIT>'<EOL>with zipfile.ZipFile(save_path) as zf:<EOL><INDENT>zip_dir = zf.namelist()[<NUM_LIT:0>]<EOL>zf.extractall(path)<EOL><DEDENT>os.remove(save_path)<EOL>os.rename(os.path.join(path, zip_dir), image_path)<EOL><DEDENT>data_files = load_file_list(path=image_path, regx='<STR_LIT>', printable=False)<EOL>for i, _v in enumerate(data_files):<EOL><INDENT>data_files[i] = os.path.join(image_path, data_files[i])<EOL><DEDENT>return data_files<EOL>", "docstring": "Load CelebA dataset\n\n    Return a list of image path.\n\n    Parameters\n    -----------\n    path : str\n        The path that the data is downloaded to, defaults is ``data/celebA/``.", "id": "f11126:m0"}
{"signature": "def load_wmt_en_fr_dataset(path='<STR_LIT:data>'):", "body": "path = os.path.join(path, '<STR_LIT>')<EOL>_WMT_ENFR_TRAIN_URL = \"<STR_LIT>\"<EOL>_WMT_ENFR_DEV_URL = \"<STR_LIT>\"<EOL>def gunzip_file(gz_path, new_path):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>logging.info(\"<STR_LIT>\" % (gz_path, new_path))<EOL>with gzip.open(gz_path, \"<STR_LIT:rb>\") as gz_file:<EOL><INDENT>with open(new_path, \"<STR_LIT:wb>\") as new_file:<EOL><INDENT>for line in gz_file:<EOL><INDENT>new_file.write(line)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>def get_wmt_enfr_train_set(path):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>filename = \"<STR_LIT>\"<EOL>maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)<EOL>train_path = os.path.join(path, \"<STR_LIT>\")<EOL>gunzip_file(train_path + \"<STR_LIT>\", train_path + \"<STR_LIT>\")<EOL>gunzip_file(train_path + \"<STR_LIT>\", train_path + \"<STR_LIT>\")<EOL>return train_path<EOL><DEDENT>def get_wmt_enfr_dev_set(path):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>filename = \"<STR_LIT>\"<EOL>dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)<EOL>dev_name = \"<STR_LIT>\"<EOL>dev_path = os.path.join(path, \"<STR_LIT>\")<EOL>if not (gfile.Exists(dev_path + \"<STR_LIT>\") and gfile.Exists(dev_path + \"<STR_LIT>\")):<EOL><INDENT>logging.info(\"<STR_LIT>\" % dev_file)<EOL>with tarfile.open(dev_file, \"<STR_LIT>\") as dev_tar:<EOL><INDENT>fr_dev_file = dev_tar.getmember(\"<STR_LIT>\" + dev_name + \"<STR_LIT>\")<EOL>en_dev_file = dev_tar.getmember(\"<STR_LIT>\" + dev_name + \"<STR_LIT>\")<EOL>fr_dev_file.name = dev_name + \"<STR_LIT>\"  <EOL>en_dev_file.name = dev_name + \"<STR_LIT>\"<EOL>dev_tar.extract(fr_dev_file, path)<EOL>dev_tar.extract(en_dev_file, path)<EOL><DEDENT><DEDENT>return dev_path<EOL><DEDENT>logging.info(\"<STR_LIT>\".format(path))<EOL>train_path = get_wmt_enfr_train_set(path)<EOL>dev_path = get_wmt_enfr_dev_set(path)<EOL>return train_path, dev_path<EOL>", "docstring": "Load WMT'15 English-to-French translation dataset.\n\n    It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set.\n    Returns the directories of training data and test data.\n\n    Parameters\n    ----------\n    path : str\n        The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``.\n\n    References\n    ----------\n    - Code modified from /tensorflow/models/rnn/translation/data_utils.py\n\n    Notes\n    -----\n    Usually, it will take a long time to download this dataset.", "id": "f11128:m0"}
{"signature": "def load_voc_dataset(path='<STR_LIT:data>', dataset='<STR_LIT>', contain_classes_in_person=False):", "body": "path = os.path.join(path, '<STR_LIT>')<EOL>def _recursive_parse_xml_to_dict(xml):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if xml is not None:<EOL><INDENT>return {xml.tag: xml.text}<EOL><DEDENT>result = {}<EOL>for child in xml:<EOL><INDENT>child_result = _recursive_parse_xml_to_dict(child)<EOL>if child.tag != '<STR_LIT:object>':<EOL><INDENT>result[child.tag] = child_result[child.tag]<EOL><DEDENT>else:<EOL><INDENT>if child.tag not in result:<EOL><INDENT>result[child.tag] = []<EOL><DEDENT>result[child.tag].append(child_result[child.tag])<EOL><DEDENT><DEDENT>return {xml.tag: result}<EOL><DEDENT>import xml.etree.ElementTree as ET<EOL>if dataset == \"<STR_LIT>\":<EOL><INDENT>url = \"<STR_LIT>\"<EOL>tar_filename = \"<STR_LIT>\"<EOL>extracted_filename = \"<STR_LIT>\"  <EOL>logging.info(\"<STR_LIT>\")<EOL><DEDENT>elif dataset == \"<STR_LIT>\":<EOL><INDENT>extracted_filename = \"<STR_LIT>\"  <EOL>logging.info(\"<STR_LIT>\")<EOL>logging.info(<EOL>\"<STR_LIT>\"<EOL>)<EOL>import time<EOL>time.sleep(<NUM_LIT:3>)<EOL>if os.path.isdir(os.path.join(path, extracted_filename)) is False:<EOL><INDENT>logging.info(\"<STR_LIT>\")<EOL>logging.info(<EOL>\"<STR_LIT>\"<EOL>)<EOL>logging.info(\"<STR_LIT>\" % path)<EOL>exit()<EOL><DEDENT><DEDENT>elif dataset == \"<STR_LIT>\":<EOL><INDENT>url = \"<STR_LIT>\"<EOL>tar_filename = \"<STR_LIT>\"<EOL>extracted_filename = \"<STR_LIT>\"<EOL>logging.info(\"<STR_LIT>\")<EOL><DEDENT>elif dataset == \"<STR_LIT>\":<EOL><INDENT>url = \"<STR_LIT>\"<EOL>tar_filename = \"<STR_LIT>\"<EOL>extracted_filename = \"<STR_LIT>\"<EOL>logging.info(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if dataset != \"<STR_LIT>\":<EOL><INDENT>from sys import platform as _platform<EOL>if folder_exists(os.path.join(path, extracted_filename)) is False:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(extracted_filename, path))<EOL>maybe_download_and_extract(tar_filename, path, url, extract=True)<EOL>del_file(os.path.join(path, tar_filename))<EOL>if dataset == \"<STR_LIT>\":<EOL><INDENT>if _platform == \"<STR_LIT:win32>\":<EOL><INDENT>os.system(\"<STR_LIT>\".format(path, path))<EOL><DEDENT>else:<EOL><INDENT>os.system(\"<STR_LIT>\".format(path, path))<EOL><DEDENT><DEDENT>elif dataset == \"<STR_LIT>\":<EOL><INDENT>if _platform == \"<STR_LIT:win32>\":<EOL><INDENT>os.system(\"<STR_LIT>\".format(path, path))<EOL><DEDENT>else:<EOL><INDENT>os.system(\"<STR_LIT>\".format(path, path))<EOL><DEDENT><DEDENT>elif dataset == \"<STR_LIT>\":<EOL><INDENT>if _platform == \"<STR_LIT:win32>\":<EOL><INDENT>os.system(\"<STR_LIT>\".format(path, path))<EOL><DEDENT>else:<EOL><INDENT>os.system(\"<STR_LIT>\".format(path, path))<EOL><DEDENT><DEDENT>del_folder(os.path.join(path, '<STR_LIT>'))<EOL><DEDENT><DEDENT>classes = [<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:train>\", \"<STR_LIT>\"<EOL>]<EOL>if contain_classes_in_person:<EOL><INDENT>classes_in_person = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>classes_in_person = []<EOL><DEDENT>classes += classes_in_person  <EOL>classes_dict = utils.list_string_to_dict(classes)<EOL>logging.info(\"<STR_LIT>\".format(classes_dict))<EOL>folder_imgs = os.path.join(path, extracted_filename, \"<STR_LIT>\")<EOL>imgs_file_list = load_file_list(path=folder_imgs, regx='<STR_LIT>', printable=False)<EOL>logging.info(\"<STR_LIT>\".format(len(imgs_file_list)))<EOL>imgs_file_list.sort(<EOL>key=lambda s: int(s.replace('<STR_LIT:.>', '<STR_LIT:U+0020>').replace('<STR_LIT:_>', '<STR_LIT>').split('<STR_LIT:U+0020>')[-<NUM_LIT:2>])<EOL>)  <EOL>imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list]<EOL>if dataset != \"<STR_LIT>\":<EOL><INDENT>folder_semseg = os.path.join(path, extracted_filename, \"<STR_LIT>\")<EOL>imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='<STR_LIT>', printable=False)<EOL>logging.info(\"<STR_LIT>\".format(len(imgs_semseg_file_list)))<EOL>imgs_semseg_file_list.sort(<EOL>key=lambda s: int(s.replace('<STR_LIT:.>', '<STR_LIT:U+0020>').replace('<STR_LIT:_>', '<STR_LIT>').split('<STR_LIT:U+0020>')[-<NUM_LIT:2>])<EOL>)  <EOL>imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list]<EOL>folder_insseg = os.path.join(path, extracted_filename, \"<STR_LIT>\")<EOL>imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='<STR_LIT>', printable=False)<EOL>logging.info(\"<STR_LIT>\".format(len(imgs_semseg_file_list)))<EOL>imgs_insseg_file_list.sort(<EOL>key=lambda s: int(s.replace('<STR_LIT:.>', '<STR_LIT:U+0020>').replace('<STR_LIT:_>', '<STR_LIT>').split('<STR_LIT:U+0020>')[-<NUM_LIT:2>])<EOL>)  <EOL>imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list]<EOL><DEDENT>else:<EOL><INDENT>imgs_semseg_file_list = []<EOL>imgs_insseg_file_list = []<EOL><DEDENT>folder_ann = os.path.join(path, extracted_filename, \"<STR_LIT>\")<EOL>imgs_ann_file_list = load_file_list(path=folder_ann, regx='<STR_LIT>', printable=False)<EOL>logging.info(<EOL>\"<STR_LIT>\".format(len(imgs_ann_file_list))<EOL>)<EOL>imgs_ann_file_list.sort(<EOL>key=lambda s: int(s.replace('<STR_LIT:.>', '<STR_LIT:U+0020>').replace('<STR_LIT:_>', '<STR_LIT>').split('<STR_LIT:U+0020>')[-<NUM_LIT:2>])<EOL>)  <EOL>imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list]<EOL>if dataset == \"<STR_LIT>\":  <EOL><INDENT>imgs_file_list_new = []<EOL>for ann in imgs_ann_file_list:<EOL><INDENT>ann = os.path.split(ann)[-<NUM_LIT:1>].split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL>for im in imgs_file_list:<EOL><INDENT>if ann in im:<EOL><INDENT>imgs_file_list_new.append(im)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>imgs_file_list = imgs_file_list_new<EOL>logging.info(\"<STR_LIT>\" % len(imgs_file_list_new))<EOL><DEDENT>def convert(size, box):<EOL><INDENT>dw = <NUM_LIT:1.> / size[<NUM_LIT:0>]<EOL>dh = <NUM_LIT:1.> / size[<NUM_LIT:1>]<EOL>x = (box[<NUM_LIT:0>] + box[<NUM_LIT:1>]) / <NUM_LIT><EOL>y = (box[<NUM_LIT:2>] + box[<NUM_LIT:3>]) / <NUM_LIT><EOL>w = box[<NUM_LIT:1>] - box[<NUM_LIT:0>]<EOL>h = box[<NUM_LIT:3>] - box[<NUM_LIT:2>]<EOL>x = x * dw<EOL>w = w * dw<EOL>y = y * dh<EOL>h = h * dh<EOL>return x, y, w, h<EOL><DEDENT>def convert_annotation(file_name):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>in_file = open(file_name)<EOL>out_file = \"<STR_LIT>\"<EOL>tree = ET.parse(in_file)<EOL>root = tree.getroot()<EOL>size = root.find('<STR_LIT:size>')<EOL>w = int(size.find('<STR_LIT:width>').text)<EOL>h = int(size.find('<STR_LIT>').text)<EOL>n_objs = <NUM_LIT:0><EOL>for obj in root.iter('<STR_LIT:object>'):<EOL><INDENT>if dataset != \"<STR_LIT>\":<EOL><INDENT>difficult = obj.find('<STR_LIT>').text<EOL>cls = obj.find('<STR_LIT:name>').text<EOL>if cls not in classes or int(difficult) == <NUM_LIT:1>:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>else:<EOL><INDENT>cls = obj.find('<STR_LIT:name>').text<EOL>if cls not in classes:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>cls_id = classes.index(cls)<EOL>xmlbox = obj.find('<STR_LIT>')<EOL>b = (<EOL>float(xmlbox.find('<STR_LIT>').text), float(xmlbox.find('<STR_LIT>').text), float(xmlbox.find('<STR_LIT>').text),<EOL>float(xmlbox.find('<STR_LIT>').text)<EOL>)<EOL>bb = convert((w, h), b)<EOL>out_file += str(cls_id) + \"<STR_LIT:U+0020>\" + \"<STR_LIT:U+0020>\".join([str(a) for a in bb]) + '<STR_LIT:\\n>'<EOL>n_objs += <NUM_LIT:1><EOL>if cls in \"<STR_LIT>\":<EOL><INDENT>for part in obj.iter('<STR_LIT>'):<EOL><INDENT>cls = part.find('<STR_LIT:name>').text<EOL>if cls not in classes_in_person:<EOL><INDENT>continue<EOL><DEDENT>cls_id = classes.index(cls)<EOL>xmlbox = part.find('<STR_LIT>')<EOL>b = (<EOL>float(xmlbox.find('<STR_LIT>').text), float(xmlbox.find('<STR_LIT>').text),<EOL>float(xmlbox.find('<STR_LIT>').text), float(xmlbox.find('<STR_LIT>').text)<EOL>)<EOL>bb = convert((w, h), b)<EOL>out_file += str(cls_id) + \"<STR_LIT:U+0020>\" + \"<STR_LIT:U+0020>\".join([str(a) for a in bb]) + '<STR_LIT:\\n>'<EOL>n_objs += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>in_file.close()<EOL>return n_objs, out_file<EOL><DEDENT>logging.info(\"<STR_LIT>\")<EOL>n_objs_list = []<EOL>objs_info_list = []  <EOL>objs_info_dicts = {}<EOL>for idx, ann_file in enumerate(imgs_ann_file_list):<EOL><INDENT>n_objs, objs_info = convert_annotation(ann_file)<EOL>n_objs_list.append(n_objs)<EOL>objs_info_list.append(objs_info)<EOL>with tf.gfile.GFile(ann_file, '<STR_LIT:r>') as fid:<EOL><INDENT>xml_str = fid.read()<EOL><DEDENT>xml = etree.fromstring(xml_str)<EOL>data = _recursive_parse_xml_to_dict(xml)['<STR_LIT>']<EOL>objs_info_dicts.update({imgs_file_list[idx]: data})<EOL><DEDENT>return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, classes, classes_in_person, classes_dict, n_objs_list, objs_info_list, objs_info_dicts<EOL>", "docstring": "Pascal VOC 2007/2012 Dataset.\n\n    It has 20 objects:\n    aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor\n    and additional 3 classes : head, hand, foot for person.\n\n    Parameters\n    -----------\n    path : str\n        The path that the data is downloaded to, defaults is ``data/VOC``.\n    dataset : str\n        The VOC dataset version, `2012`, `2007`, `2007test` or `2012test`. We usually train model on `2007+2012` and test it on `2007test`.\n    contain_classes_in_person : boolean\n        Whether include head, hand and foot annotation, default is False.\n\n    Returns\n    ---------\n    imgs_file_list : list of str\n        Full paths of all images.\n    imgs_semseg_file_list : list of str\n        Full paths of all maps for semantic segmentation. Note that not all images have this map!\n    imgs_insseg_file_list : list of str\n        Full paths of all maps for instance segmentation. Note that not all images have this map!\n    imgs_ann_file_list : list of str\n        Full paths of all annotations for bounding box and object class, all images have this annotations.\n    classes : list of str\n        Classes in order.\n    classes_in_person : list of str\n        Classes in person.\n    classes_dict : dictionary\n        Class label to integer.\n    n_objs_list : list of int\n        Number of objects in all images in ``imgs_file_list`` in order.\n    objs_info_list : list of str\n        Darknet format for the annotation of all images in ``imgs_file_list`` in order. ``[class_id x_centre y_centre width height]`` in ratio format.\n    objs_info_dicts : dictionary\n        The annotation of all images in ``imgs_file_list``, ``{imgs_file_list : dictionary for annotation}``,\n        format from `TensorFlow/Models/object-detection <https://github.com/tensorflow/models/blob/master/object_detection/create_pascal_tf_record.py>`__.\n\n    Examples\n    ----------\n    >>> imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list,\n    >>>     classes, classes_in_person, classes_dict,\n    >>>     n_objs_list, objs_info_list, objs_info_dicts = tl.files.load_voc_dataset(dataset=\"2012\", contain_classes_in_person=False)\n    >>> idx = 26\n    >>> print(classes)\n    ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\n    >>> print(classes_dict)\n    {'sheep': 16, 'horse': 12, 'bicycle': 1, 'bottle': 4, 'cow': 9, 'sofa': 17, 'car': 6, 'dog': 11, 'cat': 7, 'person': 14, 'train': 18, 'diningtable': 10, 'aeroplane': 0, 'bus': 5, 'pottedplant': 15, 'tvmonitor': 19, 'chair': 8, 'bird': 2, 'boat': 3, 'motorbike': 13}\n    >>> print(imgs_file_list[idx])\n    data/VOC/VOC2012/JPEGImages/2007_000423.jpg\n    >>> print(n_objs_list[idx])\n    2\n    >>> print(imgs_ann_file_list[idx])\n    data/VOC/VOC2012/Annotations/2007_000423.xml\n    >>> print(objs_info_list[idx])\n    14 0.173 0.461333333333 0.142 0.496\n    14 0.828 0.542666666667 0.188 0.594666666667\n    >>> ann = tl.prepro.parse_darknet_ann_str_to_list(objs_info_list[idx])\n    >>> print(ann)\n    [[14, 0.173, 0.461333333333, 0.142, 0.496], [14, 0.828, 0.542666666667, 0.188, 0.594666666667]]\n    >>> c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann)\n    >>> print(c, b)\n    [14, 14] [[0.173, 0.461333333333, 0.142, 0.496], [0.828, 0.542666666667, 0.188, 0.594666666667]]\n\n    References\n    -------------\n    - `Pascal VOC2012 Website <https://pjreddie.com/projects/pascal-voc-dataset-mirror/>`__.\n    - `Pascal VOC2007 Website <https://pjreddie.com/projects/pascal-voc-dataset-mirror/>`__.", "id": "f11134:m0"}
{"signature": "def load_cropped_svhn(path='<STR_LIT:data>', include_extra=True):", "body": "start_time = time.time()<EOL>path = os.path.join(path, '<STR_LIT>')<EOL>logging.info(\"<STR_LIT>\".format(path, include_extra))<EOL>url = \"<STR_LIT>\"<EOL>np_file = os.path.join(path, \"<STR_LIT>\")<EOL>if file_exists(np_file) is False:<EOL><INDENT>filename = \"<STR_LIT>\"<EOL>filepath = maybe_download_and_extract(filename, path, url)<EOL>mat = sio.loadmat(filepath)<EOL>X_train = mat['<STR_LIT:X>'] / <NUM_LIT>  <EOL>X_train = np.transpose(X_train, (<NUM_LIT:3>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>))<EOL>y_train = np.squeeze(mat['<STR_LIT:y>'], axis=<NUM_LIT:1>)<EOL>y_train[y_train == <NUM_LIT:10>] = <NUM_LIT:0>  <EOL>np.savez(np_file, X=X_train, y=y_train)<EOL>del_file(filepath)<EOL><DEDENT>else:<EOL><INDENT>v = np.load(np_file)<EOL>X_train = v['<STR_LIT:X>']<EOL>y_train = v['<STR_LIT:y>']<EOL><DEDENT>logging.info(\"<STR_LIT>\".format(len(y_train)))<EOL>np_file = os.path.join(path, \"<STR_LIT>\")<EOL>if file_exists(np_file) is False:<EOL><INDENT>filename = \"<STR_LIT>\"<EOL>filepath = maybe_download_and_extract(filename, path, url)<EOL>mat = sio.loadmat(filepath)<EOL>X_test = mat['<STR_LIT:X>'] / <NUM_LIT><EOL>X_test = np.transpose(X_test, (<NUM_LIT:3>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>))<EOL>y_test = np.squeeze(mat['<STR_LIT:y>'], axis=<NUM_LIT:1>)<EOL>y_test[y_test == <NUM_LIT:10>] = <NUM_LIT:0><EOL>np.savez(np_file, X=X_test, y=y_test)<EOL>del_file(filepath)<EOL><DEDENT>else:<EOL><INDENT>v = np.load(np_file)<EOL>X_test = v['<STR_LIT:X>']<EOL>y_test = v['<STR_LIT:y>']<EOL><DEDENT>logging.info(\"<STR_LIT>\".format(len(y_test)))<EOL>if include_extra:<EOL><INDENT>logging.info(\"<STR_LIT>\")<EOL>np_file = os.path.join(path, \"<STR_LIT>\")<EOL>if file_exists(np_file) is False:<EOL><INDENT>logging.info(\"<STR_LIT>\")<EOL>filename = \"<STR_LIT>\"<EOL>filepath = maybe_download_and_extract(filename, path, url)<EOL>mat = sio.loadmat(filepath)<EOL>X_extra = mat['<STR_LIT:X>'] / <NUM_LIT><EOL>X_extra = np.transpose(X_extra, (<NUM_LIT:3>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>))<EOL>y_extra = np.squeeze(mat['<STR_LIT:y>'], axis=<NUM_LIT:1>)<EOL>y_extra[y_extra == <NUM_LIT:10>] = <NUM_LIT:0><EOL>np.savez(np_file, X=X_extra, y=y_extra)<EOL>del_file(filepath)<EOL><DEDENT>else:<EOL><INDENT>v = np.load(np_file)<EOL>X_extra = v['<STR_LIT:X>']<EOL>y_extra = v['<STR_LIT:y>']<EOL><DEDENT>logging.info(\"<STR_LIT>\".format(len(y_extra), len(y_train)))<EOL>t = time.time()<EOL>X_train = np.concatenate((X_train, X_extra), <NUM_LIT:0>)<EOL>y_train = np.concatenate((y_train, y_extra), <NUM_LIT:0>)<EOL>logging.info(\"<STR_LIT>\".format(len(y_extra), len(y_train), time.time() - t))<EOL><DEDENT>else:<EOL><INDENT>logging.info(\"<STR_LIT>\")<EOL><DEDENT>logging.info(\"<STR_LIT>\" % (str(X_train.shape[<NUM_LIT:1>:<NUM_LIT:4>]), len(y_train), len(y_test)))<EOL>logging.info(\"<STR_LIT>\".format(int(time.time() - start_time)))<EOL>return X_train, y_train, X_test, y_test<EOL>", "docstring": "Load Cropped SVHN.\n\n    The Cropped Street View House Numbers (SVHN) Dataset contains 32x32x3 RGB images.\n    Digit '1' has label 1, '9' has label 9 and '0' has label 0 (the original dataset uses 10 to represent '0'), see `ufldl website <http://ufldl.stanford.edu/housenumbers/>`__.\n\n    Parameters\n    ----------\n    path : str\n        The path that the data is downloaded to.\n    include_extra : boolean\n        If True (default), add extra images to the training set.\n\n    Returns\n    -------\n    X_train, y_train, X_test, y_test: tuple\n        Return splitted training/test set respectively.\n\n    Examples\n    ---------\n    >>> X_train, y_train, X_test, y_test = tl.files.load_cropped_svhn(include_extra=False)\n    >>> tl.vis.save_images(X_train[0:100], [10, 10], 'svhn.png')", "id": "f11137:m4"}
{"signature": "def _load_mnist_dataset(shape, path, name='<STR_LIT>', url='<STR_LIT>'):", "body": "path = os.path.join(path, name)<EOL>def load_mnist_images(path, filename):<EOL><INDENT>filepath = maybe_download_and_extract(filename, path, url)<EOL>logging.info(filepath)<EOL>with gzip.open(filepath, '<STR_LIT:rb>') as f:<EOL><INDENT>data = np.frombuffer(f.read(), np.uint8, offset=<NUM_LIT:16>)<EOL><DEDENT>data = data.reshape(shape)<EOL>return data / np.float32(<NUM_LIT>)<EOL><DEDENT>def load_mnist_labels(path, filename):<EOL><INDENT>filepath = maybe_download_and_extract(filename, path, url)<EOL>with gzip.open(filepath, '<STR_LIT:rb>') as f:<EOL><INDENT>data = np.frombuffer(f.read(), np.uint8, offset=<NUM_LIT:8>)<EOL><DEDENT>return data<EOL><DEDENT>logging.info(\"<STR_LIT>\".format(name.upper(), path))<EOL>X_train = load_mnist_images(path, '<STR_LIT>')<EOL>y_train = load_mnist_labels(path, '<STR_LIT>')<EOL>X_test = load_mnist_images(path, '<STR_LIT>')<EOL>y_test = load_mnist_labels(path, '<STR_LIT>')<EOL>X_train, X_val = X_train[:-<NUM_LIT>], X_train[-<NUM_LIT>:]<EOL>y_train, y_val = y_train[:-<NUM_LIT>], y_train[-<NUM_LIT>:]<EOL>X_train = np.asarray(X_train, dtype=np.float32)<EOL>y_train = np.asarray(y_train, dtype=np.int32)<EOL>X_val = np.asarray(X_val, dtype=np.float32)<EOL>y_val = np.asarray(y_val, dtype=np.int32)<EOL>X_test = np.asarray(X_test, dtype=np.float32)<EOL>y_test = np.asarray(y_test, dtype=np.int32)<EOL>return X_train, y_train, X_val, y_val, X_test, y_test<EOL>", "docstring": "A generic function to load mnist-like dataset.\n\n    Parameters:\n    ----------\n    shape : tuple\n        The shape of digit images.\n    path : str\n        The path that the data is downloaded to.\n    name : str\n        The dataset name you want to use(the default is 'mnist').\n    url : str\n        The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').", "id": "f11137:m2"}
{"signature": "def load_wmt_en_fr_dataset(path='<STR_LIT:data>'):", "body": "path = os.path.join(path, '<STR_LIT>')<EOL>_WMT_ENFR_TRAIN_URL = \"<STR_LIT>\"<EOL>_WMT_ENFR_DEV_URL = \"<STR_LIT>\"<EOL>def gunzip_file(gz_path, new_path):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>logging.info(\"<STR_LIT>\" % (gz_path, new_path))<EOL>with gzip.open(gz_path, \"<STR_LIT:rb>\") as gz_file:<EOL><INDENT>with open(new_path, \"<STR_LIT:wb>\") as new_file:<EOL><INDENT>for line in gz_file:<EOL><INDENT>new_file.write(line)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>def get_wmt_enfr_train_set(path):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>filename = \"<STR_LIT>\"<EOL>maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)<EOL>train_path = os.path.join(path, \"<STR_LIT>\")<EOL>gunzip_file(train_path + \"<STR_LIT>\", train_path + \"<STR_LIT>\")<EOL>gunzip_file(train_path + \"<STR_LIT>\", train_path + \"<STR_LIT>\")<EOL>return train_path<EOL><DEDENT>def get_wmt_enfr_dev_set(path):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>filename = \"<STR_LIT>\"<EOL>dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)<EOL>dev_name = \"<STR_LIT>\"<EOL>dev_path = os.path.join(path, \"<STR_LIT>\")<EOL>if not (gfile.Exists(dev_path + \"<STR_LIT>\") and gfile.Exists(dev_path + \"<STR_LIT>\")):<EOL><INDENT>logging.info(\"<STR_LIT>\" % dev_file)<EOL>with tarfile.open(dev_file, \"<STR_LIT>\") as dev_tar:<EOL><INDENT>fr_dev_file = dev_tar.getmember(\"<STR_LIT>\" + dev_name + \"<STR_LIT>\")<EOL>en_dev_file = dev_tar.getmember(\"<STR_LIT>\" + dev_name + \"<STR_LIT>\")<EOL>fr_dev_file.name = dev_name + \"<STR_LIT>\"  <EOL>en_dev_file.name = dev_name + \"<STR_LIT>\"<EOL>dev_tar.extract(fr_dev_file, path)<EOL>dev_tar.extract(en_dev_file, path)<EOL><DEDENT><DEDENT>return dev_path<EOL><DEDENT>logging.info(\"<STR_LIT>\".format(path))<EOL>train_path = get_wmt_enfr_train_set(path)<EOL>dev_path = get_wmt_enfr_dev_set(path)<EOL>return train_path, dev_path<EOL>", "docstring": "Load WMT'15 English-to-French translation dataset.\n\n    It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set.\n    Returns the directories of training data and test data.\n\n    Parameters\n    ----------\n    path : str\n        The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``.\n\n    References\n    ----------\n    - Code modified from /tensorflow/models/rnn/translation/data_utils.py\n\n    Notes\n    -----\n    Usually, it will take a long time to download this dataset.", "id": "f11137:m9"}
{"signature": "def del_file(filepath):", "body": "os.remove(filepath)<EOL>", "docstring": "Delete a file by given file path.", "id": "f11137:m29"}
{"signature": "def del_folder(folderpath):", "body": "shutil.rmtree(folderpath)<EOL>", "docstring": "Delete a folder by given folder path.", "id": "f11137:m30"}
{"signature": "def load_matt_mahoney_text8_dataset(path='<STR_LIT:data>'):", "body": "path = os.path.join(path, '<STR_LIT>')<EOL>logging.info(\"<STR_LIT>\".format(path))<EOL>filename = '<STR_LIT>'<EOL>url = '<STR_LIT>'<EOL>maybe_download_and_extract(filename, path, url, expected_bytes=<NUM_LIT>)<EOL>with zipfile.ZipFile(os.path.join(path, filename)) as f:<EOL><INDENT>word_list = f.read(f.namelist()[<NUM_LIT:0>]).split()<EOL>for idx, _ in enumerate(word_list):<EOL><INDENT>word_list[idx] = word_list[idx].decode()<EOL><DEDENT><DEDENT>return word_list<EOL>", "docstring": "Load Matt Mahoney's dataset.\n\n    Download a text file from Matt Mahoney's website\n    if not present, and make sure it's the right size.\n    Extract the first file enclosed in a zip file as a list of words.\n    This dataset can be used for Word Embedding.\n\n    Parameters\n    ----------\n    path : str\n        The path that the data is downloaded to, defaults is ``data/mm_test8/``.\n\n    Returns\n    --------\n    list of str\n        The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...]\n\n    Examples\n    --------\n    >>> words = tl.files.load_matt_mahoney_text8_dataset()\n    >>> print('Data size', len(words))", "id": "f11137:m6"}
{"signature": "def load_fashion_mnist_dataset(shape=(-<NUM_LIT:1>, <NUM_LIT>), path='<STR_LIT:data>'):", "body": "return _load_mnist_dataset(<EOL>shape, path, name='<STR_LIT>', url='<STR_LIT>'<EOL>)<EOL>", "docstring": "Load the fashion mnist.\n\n    Automatically download fashion-MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 fashion images respectively, `examples <http://marubon-ds.blogspot.co.uk/2017/09/fashion-mnist-exploring.html>`__.\n\n    Parameters\n    ----------\n    shape : tuple\n        The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).\n    path : str\n        The path that the data is downloaded to.\n\n    Returns\n    -------\n    X_train, y_train, X_val, y_val, X_test, y_test: tuple\n        Return splitted training/validation/test set respectively.\n\n    Examples\n    --------\n    >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1,784), path='datasets')\n    >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1, 28, 28, 1))", "id": "f11137:m1"}
{"signature": "def exists_or_mkdir(path, verbose=True):", "body": "if not os.path.exists(path):<EOL><INDENT>if verbose:<EOL><INDENT>logging.info(\"<STR_LIT>\" % path)<EOL><DEDENT>os.makedirs(path)<EOL>return False<EOL><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>logging.info(\"<STR_LIT>\" % path)<EOL><DEDENT>return True<EOL><DEDENT>", "docstring": "Check a folder by given name, if not exist, create the folder and return False,\n    if directory exists, return True.\n\n    Parameters\n    ----------\n    path : str\n        A folder path.\n    verbose : boolean\n        If True (default), prints results.\n\n    Returns\n    --------\n    boolean\n        True if folder already exist, otherwise, returns False and create the folder.\n\n    Examples\n    --------\n    >>> tl.files.exists_or_mkdir(\"checkpoints/train\")", "id": "f11137:m34"}
{"signature": "def load_mpii_pose_dataset(path='<STR_LIT:data>', is_16_pos_only=False):", "body": "path = os.path.join(path, '<STR_LIT>')<EOL>logging.info(\"<STR_LIT>\".format(path))<EOL>url = \"<STR_LIT>\"<EOL>tar_filename = \"<STR_LIT>\"<EOL>extracted_filename = \"<STR_LIT>\"<EOL>if folder_exists(os.path.join(path, extracted_filename)) is False:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(extracted_filename, path))<EOL>maybe_download_and_extract(tar_filename, path, url, extract=True)<EOL>del_file(os.path.join(path, tar_filename))<EOL><DEDENT>url = \"<STR_LIT>\"<EOL>tar_filename = \"<STR_LIT>\"<EOL>extracted_filename2 = \"<STR_LIT>\"<EOL>if folder_exists(os.path.join(path, extracted_filename2)) is False:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(extracted_filename, path))<EOL>maybe_download_and_extract(tar_filename, path, url, extract=True)<EOL>del_file(os.path.join(path, tar_filename))<EOL><DEDENT>logging.info(\"<STR_LIT>\")<EOL>ann_train_list = []<EOL>ann_test_list = []<EOL>img_train_list = []<EOL>img_test_list = []<EOL>def save_joints():<EOL><INDENT>mat = sio.loadmat(os.path.join(path, extracted_filename, \"<STR_LIT>\"))<EOL>for _, (anno, train_flag) in enumerate(  <EOL>zip(mat['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>, <NUM_LIT:0>][<NUM_LIT:0>], mat['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>, <NUM_LIT:0>][<NUM_LIT:0>])):<EOL><INDENT>img_fn = anno['<STR_LIT:image>']['<STR_LIT:name>'][<NUM_LIT:0>, <NUM_LIT:0>][<NUM_LIT:0>]<EOL>train_flag = int(train_flag)<EOL>if train_flag:<EOL><INDENT>img_train_list.append(img_fn)<EOL>ann_train_list.append([])<EOL><DEDENT>else:<EOL><INDENT>img_test_list.append(img_fn)<EOL>ann_test_list.append([])<EOL><DEDENT>head_rect = []<EOL>if '<STR_LIT>' in str(anno['<STR_LIT>'].dtype):<EOL><INDENT>head_rect = zip(<EOL>[x1[<NUM_LIT:0>, <NUM_LIT:0>] for x1 in anno['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]], [y1[<NUM_LIT:0>, <NUM_LIT:0>] for y1 in anno['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]],<EOL>[x2[<NUM_LIT:0>, <NUM_LIT:0>] for x2 in anno['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]], [y2[<NUM_LIT:0>, <NUM_LIT:0>] for y2 in anno['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]]<EOL>)<EOL><DEDENT>else:<EOL><INDENT>head_rect = []  <EOL><DEDENT>if '<STR_LIT>' in str(anno['<STR_LIT>'].dtype):<EOL><INDENT>annopoints = anno['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]<EOL>head_x1s = anno['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]<EOL>head_y1s = anno['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]<EOL>head_x2s = anno['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]<EOL>head_y2s = anno['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]<EOL>for annopoint, head_x1, head_y1, head_x2, head_y2 in zip(annopoints, head_x1s, head_y1s, head_x2s,<EOL>head_y2s):<EOL><INDENT>if annopoint.size:<EOL><INDENT>head_rect = [<EOL>float(head_x1[<NUM_LIT:0>, <NUM_LIT:0>]),<EOL>float(head_y1[<NUM_LIT:0>, <NUM_LIT:0>]),<EOL>float(head_x2[<NUM_LIT:0>, <NUM_LIT:0>]),<EOL>float(head_y2[<NUM_LIT:0>, <NUM_LIT:0>])<EOL>]<EOL>annopoint = annopoint['<STR_LIT>'][<NUM_LIT:0>, <NUM_LIT:0>]<EOL>j_id = [str(j_i[<NUM_LIT:0>, <NUM_LIT:0>]) for j_i in annopoint['<STR_LIT:id>'][<NUM_LIT:0>]]<EOL>x = [x[<NUM_LIT:0>, <NUM_LIT:0>] for x in annopoint['<STR_LIT:x>'][<NUM_LIT:0>]]<EOL>y = [y[<NUM_LIT:0>, <NUM_LIT:0>] for y in annopoint['<STR_LIT:y>'][<NUM_LIT:0>]]<EOL>joint_pos = {}<EOL>for _j_id, (_x, _y) in zip(j_id, zip(x, y)):<EOL><INDENT>joint_pos[int(_j_id)] = [float(_x), float(_y)]<EOL><DEDENT>if '<STR_LIT>' in str(annopoint.dtype):<EOL><INDENT>vis = [v[<NUM_LIT:0>] if v.size > <NUM_LIT:0> else [<NUM_LIT:0>] for v in annopoint['<STR_LIT>'][<NUM_LIT:0>]]<EOL>vis = dict([(k, int(v[<NUM_LIT:0>])) if len(v) > <NUM_LIT:0> else v for k, v in zip(j_id, vis)])<EOL><DEDENT>else:<EOL><INDENT>vis = None<EOL><DEDENT>if ((is_16_pos_only ==True) and (len(joint_pos) == <NUM_LIT:16>)) or (is_16_pos_only == False):<EOL><INDENT>data = {<EOL>'<STR_LIT:filename>': img_fn,<EOL>'<STR_LIT:train>': train_flag,<EOL>'<STR_LIT>': head_rect,<EOL>'<STR_LIT>': vis,<EOL>'<STR_LIT>': joint_pos<EOL>}<EOL>if train_flag:<EOL><INDENT>ann_train_list[-<NUM_LIT:1>].append(data)<EOL><DEDENT>else:<EOL><INDENT>ann_test_list[-<NUM_LIT:1>].append(data)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>save_joints()<EOL>logging.info(\"<STR_LIT>\")<EOL>img_dir = os.path.join(path, extracted_filename2)<EOL>_img_list = load_file_list(path=os.path.join(path, extracted_filename2), regx='<STR_LIT>', printable=False)<EOL>for i, im in enumerate(img_train_list):<EOL><INDENT>if im not in _img_list:<EOL><INDENT>print('<STR_LIT>'.format(im, img_dir))<EOL>del img_train_list[i]<EOL>del ann_train_list[i]<EOL><DEDENT><DEDENT>for i, im in enumerate(img_test_list):<EOL><INDENT>if im not in _img_list:<EOL><INDENT>print('<STR_LIT>'.format(im, img_dir))<EOL>del img_train_list[i]<EOL>del ann_train_list[i]<EOL><DEDENT><DEDENT>n_train_images = len(img_train_list)<EOL>n_test_images = len(img_test_list)<EOL>n_images = n_train_images + n_test_images<EOL>logging.info(\"<STR_LIT>\".format(n_images, n_train_images, n_test_images))<EOL>n_train_ann = len(ann_train_list)<EOL>n_test_ann = len(ann_test_list)<EOL>n_ann = n_train_ann + n_test_ann<EOL>logging.info(\"<STR_LIT>\".format(n_ann, n_train_ann, n_test_ann))<EOL>n_train_people = len(sum(ann_train_list, []))<EOL>n_test_people = len(sum(ann_test_list, []))<EOL>n_people = n_train_people + n_test_people<EOL>logging.info(\"<STR_LIT>\".format(n_people, n_train_people, n_test_people))<EOL>for i, value in enumerate(img_train_list):<EOL><INDENT>img_train_list[i] = os.path.join(img_dir, value)<EOL><DEDENT>for i, value in enumerate(img_test_list):<EOL><INDENT>img_test_list[i] = os.path.join(img_dir, value)<EOL><DEDENT>return img_train_list, ann_train_list, img_test_list, ann_test_list<EOL>", "docstring": "Load MPII Human Pose Dataset.\n\n    Parameters\n    -----------\n    path : str\n        The path that the data is downloaded to.\n    is_16_pos_only : boolean\n        If True, only return the peoples contain 16 pose keypoints. (Usually be used for single person pose estimation)\n\n    Returns\n    ----------\n    img_train_list : list of str\n        The image directories of training data.\n    ann_train_list : list of dict\n        The annotations of training data.\n    img_test_list : list of str\n        The image directories of testing data.\n    ann_test_list : list of dict\n        The annotations of testing data.\n\n    Examples\n    --------\n    >>> import pprint\n    >>> import tensorlayer as tl\n    >>> img_train_list, ann_train_list, img_test_list, ann_test_list = tl.files.load_mpii_pose_dataset()\n    >>> image = tl.vis.read_image(img_train_list[0])\n    >>> tl.vis.draw_mpii_pose_to_image(image, ann_train_list[0], 'image.png')\n    >>> pprint.pprint(ann_train_list[0])\n\n    References\n    -----------\n    - `MPII Human Pose Dataset. CVPR 14 <http://human-pose.mpi-inf.mpg.de>`__\n    - `MPII Human Pose Models. CVPR 16 <http://pose.mpi-inf.mpg.de>`__\n    - `MPII Human Shape, Poselet Conditioned Pictorial Structures and etc <http://pose.mpi-inf.mpg.de/#related>`__\n    - `MPII Keyponts and ID <http://human-pose.mpi-inf.mpg.de/#download>`__", "id": "f11137:m16"}
{"signature": "def load_mnist_dataset(shape=(-<NUM_LIT:1>, <NUM_LIT>), path='<STR_LIT:data>'):", "body": "return _load_mnist_dataset(shape, path, name='<STR_LIT>', url='<STR_LIT>')<EOL>", "docstring": "Load the original mnist.\n\n    Automatically download MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 digit images respectively.\n\n    Parameters\n    ----------\n    shape : tuple\n        The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)).\n    path : str\n        The path that the data is downloaded to.\n\n    Returns\n    -------\n    X_train, y_train, X_val, y_val, X_test, y_test: tuple\n        Return splitted training/validation/test set respectively.\n\n    Examples\n    --------\n    >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784), path='datasets')\n    >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1))", "id": "f11137:m0"}
{"signature": "def load_ptb_dataset(path='<STR_LIT:data>'):", "body": "path = os.path.join(path, '<STR_LIT>')<EOL>logging.info(\"<STR_LIT>\".format(path))<EOL>filename = '<STR_LIT>'<EOL>url = '<STR_LIT>'<EOL>maybe_download_and_extract(filename, path, url, extract=True)<EOL>data_path = os.path.join(path, '<STR_LIT>', '<STR_LIT:data>')<EOL>train_path = os.path.join(data_path, \"<STR_LIT>\")<EOL>valid_path = os.path.join(data_path, \"<STR_LIT>\")<EOL>test_path = os.path.join(data_path, \"<STR_LIT>\")<EOL>word_to_id = nlp.build_vocab(nlp.read_words(train_path))<EOL>train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)<EOL>valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)<EOL>test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)<EOL>vocab_size = len(word_to_id)<EOL>return train_data, valid_data, test_data, vocab_size<EOL>", "docstring": "Load Penn TreeBank (PTB) dataset.\n\n    It is used in many LANGUAGE MODELING papers,\n    including \"Empirical Evaluation and Combination of Advanced Language\n    Modeling Techniques\", \"Recurrent Neural Network Regularization\".\n    It consists of 929k training words, 73k validation words, and 82k test\n    words. It has 10k words in its vocabulary.\n\n    Parameters\n    ----------\n    path : str\n        The path that the data is downloaded to, defaults is ``data/ptb/``.\n\n    Returns\n    --------\n    train_data, valid_data, test_data : list of int\n        The training, validating and testing data in integer format.\n    vocab_size : int\n        The vocabulary size.\n\n    Examples\n    --------\n    >>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset()\n\n    References\n    ---------------\n    - ``tensorflow.models.rnn.ptb import reader``\n    - `Manual download <http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz>`__\n\n    Notes\n    ------\n    - If you want to get the raw data, see the source code.", "id": "f11137:m5"}
{"signature": "def natural_keys(text):", "body": "<EOL>def atoi(text):<EOL><INDENT>return int(text) if text.isdigit() else text<EOL><DEDENT>return [atoi(c) for c in re.split('<STR_LIT>', text)]<EOL>", "docstring": "Sort list of string with number in human order.\n\n    Examples\n    ----------\n    >>> l = ['im1.jpg', 'im31.jpg', 'im11.jpg', 'im21.jpg', 'im03.jpg', 'im05.jpg']\n    >>> l.sort(key=tl.files.natural_keys)\n    ['im1.jpg', 'im03.jpg', 'im05', 'im11.jpg', 'im21.jpg', 'im31.jpg']\n    >>> l.sort() # that is what we dont want\n    ['im03.jpg', 'im05', 'im1.jpg', 'im11.jpg', 'im21.jpg', 'im31.jpg']\n\n    References\n    ----------\n    - `link <http://nedbatchelder.com/blog/200712/human_sorting.html>`__", "id": "f11137:m36"}
{"signature": "def load_and_assign_npz(sess=None, name=None, network=None):", "body": "if network is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if sess is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not os.path.exists(name):<EOL><INDENT>logging.error(\"<STR_LIT>\".format(name))<EOL>return False<EOL><DEDENT>else:<EOL><INDENT>params = load_npz(name=name)<EOL>assign_params(sess, params, network)<EOL>logging.info(\"<STR_LIT>\".format(name))<EOL>return network<EOL><DEDENT>", "docstring": "Load model from npz and assign to a network.\n\n    Parameters\n    -------------\n    sess : Session\n        TensorFlow Session.\n    name : str\n        The name of the `.npz` file.\n    network : :class:`Layer`\n        The network to be assigned.\n\n    Returns\n    --------\n    False or network\n        Returns False, if the model is not exist.\n\n    Examples\n    --------\n    - See ``tl.files.save_npz``", "id": "f11137:m20"}
{"signature": "def load_npz(path='<STR_LIT>', name='<STR_LIT>'):", "body": "d = np.load(os.path.join(path, name))<EOL>return d['<STR_LIT>']<EOL>", "docstring": "Load the parameters of a Model saved by tl.files.save_npz().\n\n    Parameters\n    ----------\n    path : str\n        Folder path to `.npz` file.\n    name : str\n        The name of the `.npz` file.\n\n    Returns\n    --------\n    list of array\n        A list of parameters in order.\n\n    Examples\n    --------\n    - See ``tl.files.save_npz``\n\n    References\n    ----------\n    - `Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`__", "id": "f11137:m18"}
{"signature": "def load_cifar10_dataset(shape=(-<NUM_LIT:1>, <NUM_LIT:32>, <NUM_LIT:32>, <NUM_LIT:3>), path='<STR_LIT:data>', plotable=False):", "body": "path = os.path.join(path, '<STR_LIT>')<EOL>logging.info(\"<STR_LIT>\".format(path))<EOL>def unpickle(file):<EOL><INDENT>fp = open(file, '<STR_LIT:rb>')<EOL>if sys.version_info.major == <NUM_LIT:2>:<EOL><INDENT>data = pickle.load(fp)<EOL><DEDENT>elif sys.version_info.major == <NUM_LIT:3>:<EOL><INDENT>data = pickle.load(fp, encoding='<STR_LIT>')<EOL><DEDENT>fp.close()<EOL>return data<EOL><DEDENT>filename = '<STR_LIT>'<EOL>url = '<STR_LIT>'<EOL>maybe_download_and_extract(filename, path, url, extract=True)<EOL>X_train = None<EOL>y_train = []<EOL>for i in range(<NUM_LIT:1>, <NUM_LIT:6>):<EOL><INDENT>data_dic = unpickle(os.path.join(path, '<STR_LIT>', \"<STR_LIT>\".format(i)))<EOL>if i == <NUM_LIT:1>:<EOL><INDENT>X_train = data_dic['<STR_LIT:data>']<EOL><DEDENT>else:<EOL><INDENT>X_train = np.vstack((X_train, data_dic['<STR_LIT:data>']))<EOL><DEDENT>y_train += data_dic['<STR_LIT>']<EOL><DEDENT>test_data_dic = unpickle(os.path.join(path, '<STR_LIT>', \"<STR_LIT>\"))<EOL>X_test = test_data_dic['<STR_LIT:data>']<EOL>y_test = np.array(test_data_dic['<STR_LIT>'])<EOL>if shape == (-<NUM_LIT:1>, <NUM_LIT:3>, <NUM_LIT:32>, <NUM_LIT:32>):<EOL><INDENT>X_test = X_test.reshape(shape)<EOL>X_train = X_train.reshape(shape)<EOL><DEDENT>elif shape == (-<NUM_LIT:1>, <NUM_LIT:32>, <NUM_LIT:32>, <NUM_LIT:3>):<EOL><INDENT>X_test = X_test.reshape(shape, order='<STR_LIT:F>')<EOL>X_train = X_train.reshape(shape, order='<STR_LIT:F>')<EOL>X_test = np.transpose(X_test, (<NUM_LIT:0>, <NUM_LIT:2>, <NUM_LIT:1>, <NUM_LIT:3>))<EOL>X_train = np.transpose(X_train, (<NUM_LIT:0>, <NUM_LIT:2>, <NUM_LIT:1>, <NUM_LIT:3>))<EOL><DEDENT>else:<EOL><INDENT>X_test = X_test.reshape(shape)<EOL>X_train = X_train.reshape(shape)<EOL><DEDENT>y_train = np.array(y_train)<EOL>if plotable:<EOL><INDENT>logging.info('<STR_LIT>')<EOL>fig = plt.figure(<NUM_LIT:1>)<EOL>logging.info('<STR_LIT>' % X_train[<NUM_LIT:0>].shape)<EOL>plt.ion()  <EOL>count = <NUM_LIT:1><EOL>for _ in range(<NUM_LIT:10>):  <EOL><INDENT>for _ in range(<NUM_LIT:10>):  <EOL><INDENT>_ = fig.add_subplot(<NUM_LIT:10>, <NUM_LIT:10>, count)<EOL>if shape == (-<NUM_LIT:1>, <NUM_LIT:3>, <NUM_LIT:32>, <NUM_LIT:32>):<EOL><INDENT>plt.imshow(np.transpose(X_train[count - <NUM_LIT:1>], (<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>)), interpolation='<STR_LIT>')<EOL><DEDENT>elif shape == (-<NUM_LIT:1>, <NUM_LIT:32>, <NUM_LIT:32>, <NUM_LIT:3>):<EOL><INDENT>plt.imshow(X_train[count - <NUM_LIT:1>], interpolation='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>plt.gca().xaxis.set_major_locator(plt.NullLocator())  <EOL>plt.gca().yaxis.set_major_locator(plt.NullLocator())<EOL>count = count + <NUM_LIT:1><EOL><DEDENT><DEDENT>plt.draw()  <EOL>plt.pause(<NUM_LIT:3>)  <EOL>logging.info(\"<STR_LIT>\" % X_train.shape)<EOL>logging.info(\"<STR_LIT>\" % y_train.shape)<EOL>logging.info(\"<STR_LIT>\" % X_test.shape)<EOL>logging.info(\"<STR_LIT>\" % y_test.shape)<EOL><DEDENT>X_train = np.asarray(X_train, dtype=np.float32)<EOL>X_test = np.asarray(X_test, dtype=np.float32)<EOL>y_train = np.asarray(y_train, dtype=np.int32)<EOL>y_test = np.asarray(y_test, dtype=np.int32)<EOL>return X_train, y_train, X_test, y_test<EOL>", "docstring": "Load CIFAR-10 dataset.\n\n    It consists of 60000 32x32 colour images in 10 classes, with\n    6000 images per class. There are 50000 training images and 10000 test images.\n\n    The dataset is divided into five training batches and one test batch, each with\n    10000 images. The test batch contains exactly 1000 randomly-selected images from\n    each class. The training batches contain the remaining images in random order,\n    but some training batches may contain more images from one class than another.\n    Between them, the training batches contain exactly 5000 images from each class.\n\n    Parameters\n    ----------\n    shape : tupe\n        The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3).\n    path : str\n        The path that the data is downloaded to, defaults is ``data/cifar10/``.\n    plotable : boolean\n        Whether to plot some image examples, False as default.\n\n    Examples\n    --------\n    >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))\n\n    References\n    ----------\n    - `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`__\n    - `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`__\n    - `<https://teratail.com/questions/28932>`__", "id": "f11137:m3"}
{"signature": "def load_and_assign_npz_dict(name='<STR_LIT>', sess=None):", "body": "if sess is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not os.path.exists(name):<EOL><INDENT>logging.error(\"<STR_LIT>\".format(name))<EOL>return False<EOL><DEDENT>params = np.load(name)<EOL>if len(params.keys()) != len(set(params.keys())):<EOL><INDENT>raise Exception(\"<STR_LIT>\" % name)<EOL><DEDENT>ops = list()<EOL>for key in params.keys():<EOL><INDENT>try:<EOL><INDENT>varlist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=key)<EOL>if len(varlist) > <NUM_LIT:1>:<EOL><INDENT>raise Exception(\"<STR_LIT>\" % key)<EOL><DEDENT>elif len(varlist) == <NUM_LIT:0>:<EOL><INDENT>raise KeyError<EOL><DEDENT>else:<EOL><INDENT>ops.append(varlist[<NUM_LIT:0>].assign(params[key]))<EOL>logging.info(\"<STR_LIT>\" % key)<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>logging.info(\"<STR_LIT>\" % key)<EOL><DEDENT><DEDENT>sess.run(ops)<EOL>logging.info(\"<STR_LIT>\" % name)<EOL>", "docstring": "Restore the parameters saved by ``tl.files.save_npz_dict()``.\n\n    Parameters\n    ----------\n    name : str\n        The name of the `.npz` file.\n    sess : Session\n        TensorFlow Session.", "id": "f11137:m22"}
{"signature": "def file_exists(filepath):", "body": "return os.path.isfile(filepath)<EOL>", "docstring": "Check whether a file exists by given file path.", "id": "f11137:m27"}
{"signature": "def save_ckpt(<EOL>sess=None, mode_name='<STR_LIT>', save_dir='<STR_LIT>', var_list=None, global_step=None, printable=False<EOL>):", "body": "if sess is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if var_list is None:<EOL><INDENT>var_list = []<EOL><DEDENT>ckpt_file = os.path.join(save_dir, mode_name)<EOL>if var_list == []:<EOL><INDENT>var_list = tf.global_variables()<EOL><DEDENT>logging.info(\"<STR_LIT>\" % (ckpt_file, len(var_list)))<EOL>if printable:<EOL><INDENT>for idx, v in enumerate(var_list):<EOL><INDENT>logging.info(\"<STR_LIT>\".format(idx, v.name, str(v.get_shape())))<EOL><DEDENT><DEDENT>saver = tf.train.Saver(var_list)<EOL>saver.save(sess, ckpt_file, global_step=global_step)<EOL>", "docstring": "Save parameters into `ckpt` file.\n\n    Parameters\n    ------------\n    sess : Session\n        TensorFlow Session.\n    mode_name : str\n        The name of the model, default is ``model.ckpt``.\n    save_dir : str\n        The path / file directory to the `ckpt`, default is ``checkpoint``.\n    var_list : list of tensor\n        The parameters / variables (tensor) to be saved. If empty, save all global variables (default).\n    global_step : int or None\n        Step number.\n    printable : boolean\n        Whether to print all parameters information.\n\n    See Also\n    --------\n    load_ckpt", "id": "f11137:m23"}
{"signature": "def load_file_list(path=None, regx='<STR_LIT>', printable=True, keep_prefix=False):", "body": "if path is None:<EOL><INDENT>path = os.getcwd()<EOL><DEDENT>file_list = os.listdir(path)<EOL>return_list = []<EOL>for _, f in enumerate(file_list):<EOL><INDENT>if re.search(regx, f):<EOL><INDENT>return_list.append(f)<EOL><DEDENT><DEDENT>if keep_prefix:<EOL><INDENT>for i, f in enumerate(return_list):<EOL><INDENT>return_list[i] = os.path.join(path, f)<EOL><DEDENT><DEDENT>if printable:<EOL><INDENT>logging.info('<STR_LIT>' % return_list)<EOL>logging.info('<STR_LIT>' % len(return_list))<EOL><DEDENT>return return_list<EOL>", "docstring": "r\"\"\"Return a file list in a folder by given a path and regular expression.\n\n    Parameters\n    ----------\n    path : str or None\n        A folder path, if `None`, use the current directory.\n    regx : str\n        The regx of file name.\n    printable : boolean\n        Whether to print the files infomation.\n    keep_prefix : boolean\n        Whether to keep path in the file name.\n\n    Examples\n    ----------\n    >>> file_list = tl.files.load_file_list(path=None, regx='w1pre_[0-9]+\\.(npz)')", "id": "f11137:m32"}
{"signature": "def load_flickr1M_dataset(tag='<STR_LIT>', size=<NUM_LIT:10>, path=\"<STR_LIT:data>\", n_threads=<NUM_LIT:50>, printable=False):", "body": "path = os.path.join(path, '<STR_LIT>')<EOL>logging.info(\"<STR_LIT>\".format(size * <NUM_LIT:10>, size * <NUM_LIT>))<EOL>images_zip = [<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'<EOL>]<EOL>tag_zip = '<STR_LIT>'<EOL>url = '<STR_LIT>'<EOL>for image_zip in images_zip[<NUM_LIT:0>:size]:<EOL><INDENT>image_folder = image_zip.split(\"<STR_LIT:.>\")[<NUM_LIT:0>]<EOL>if folder_exists(os.path.join(path, image_folder)) is False:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(image_folder, path))<EOL>maybe_download_and_extract(image_zip, path, url, extract=True)<EOL>del_file(os.path.join(path, image_zip))<EOL>shutil.move(os.path.join(path, '<STR_LIT>'), os.path.join(path, image_folder))<EOL><DEDENT>else:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(image_folder, path))<EOL><DEDENT><DEDENT>if folder_exists(os.path.join(path, \"<STR_LIT>\")) is False:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(path))<EOL>maybe_download_and_extract(tag_zip, path, url, extract=True)<EOL>del_file(os.path.join(path, tag_zip))<EOL><DEDENT>else:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(path))<EOL><DEDENT>images_list = []<EOL>images_folder_list = []<EOL>for i in range(<NUM_LIT:0>, size):<EOL><INDENT>images_folder_list += load_folder_list(path=os.path.join(path, '<STR_LIT>' % i))<EOL><DEDENT>images_folder_list.sort(key=lambda s: int(s.split('<STR_LIT:/>')[-<NUM_LIT:1>]))  <EOL>for folder in images_folder_list[<NUM_LIT:0>:size * <NUM_LIT:10>]:<EOL><INDENT>tmp = load_file_list(path=folder, regx='<STR_LIT>', printable=False)<EOL>tmp.sort(key=lambda s: int(s.split('<STR_LIT:.>')[-<NUM_LIT:2>]))  <EOL>images_list.extend([os.path.join(folder, x) for x in tmp])<EOL><DEDENT>tag_list = []<EOL>tag_folder_list = load_folder_list(os.path.join(path, \"<STR_LIT>\"))<EOL>tag_folder_list.sort(key=lambda s: int(os.path.basename(s)))<EOL>for folder in tag_folder_list[<NUM_LIT:0>:size * <NUM_LIT:10>]:<EOL><INDENT>tmp = load_file_list(path=folder, regx='<STR_LIT>', printable=False)<EOL>tmp.sort(key=lambda s: int(s.split('<STR_LIT:.>')[-<NUM_LIT:2>]))  <EOL>tmp = [os.path.join(folder, s) for s in tmp]<EOL>tag_list += tmp<EOL><DEDENT>logging.info(\"<STR_LIT>\".format(tag))<EOL>select_images_list = []<EOL>for idx, _val in enumerate(tag_list):<EOL><INDENT>tags = read_file(tag_list[idx]).split('<STR_LIT:\\n>')<EOL>if tag in tags:<EOL><INDENT>select_images_list.append(images_list[idx])<EOL><DEDENT><DEDENT>logging.info(\"<STR_LIT>\".format(tag))<EOL>images = visualize.read_images(select_images_list, '<STR_LIT>', n_threads=n_threads, printable=printable)<EOL>return images<EOL>", "docstring": "Load Flick1M dataset.\n\n    Returns a list of images by a given tag from Flickr1M dataset,\n    it will download Flickr1M from `the official website <http://press.liacs.nl/mirflickr/mirdownload.html>`__\n    at the first time you use it.\n\n    Parameters\n    ------------\n    tag : str or None\n        What images to return.\n            - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search <https://www.flickr.com/search/>`__.\n            - If you want to get all images, set to ``None``.\n\n    size : int\n        integer between 1 to 10. 1 means 100k images ... 5 means 500k images, 10 means all 1 million images. Default is 10.\n    path : str\n        The path that the data is downloaded to, defaults is ``data/flickr25k/``.\n    n_threads : int\n        The number of thread to read image.\n    printable : boolean\n        Whether to print infomation when reading images, default is ``False``.\n\n    Examples\n    ----------\n    Use 200k images\n\n    >>> images = tl.files.load_flickr1M_dataset(tag='zebra', size=2)\n\n    Use 1 Million images\n\n    >>> images = tl.files.load_flickr1M_dataset(tag='zebra')", "id": "f11137:m11"}
{"signature": "def folder_exists(folderpath):", "body": "return os.path.isdir(folderpath)<EOL>", "docstring": "Check whether a folder exists by given folder path.", "id": "f11137:m28"}
{"signature": "def save_npz(save_list=None, name='<STR_LIT>', sess=None):", "body": "logging.info(\"<STR_LIT>\" % name)<EOL>if save_list is None:<EOL><INDENT>save_list = []<EOL><DEDENT>save_list_var = []<EOL>if sess:<EOL><INDENT>save_list_var = sess.run(save_list)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>save_list_var.extend([v.eval() for v in save_list])<EOL><DEDENT>except Exception:<EOL><INDENT>logging.info(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>np.savez(name, params=save_list_var)<EOL>save_list_var = None<EOL>del save_list_var<EOL>logging.info(\"<STR_LIT>\")<EOL>", "docstring": "Input parameters and the file name, save parameters into .npz file. Use tl.utils.load_npz() to restore.\n\n    Parameters\n    ----------\n    save_list : list of tensor\n        A list of parameters (tensor) to be saved.\n    name : str\n        The name of the `.npz` file.\n    sess : None or Session\n        Session may be required in some case.\n\n    Examples\n    --------\n    Save model to npz\n\n    >>> tl.files.save_npz(network.all_params, name='model.npz', sess=sess)\n\n    Load model from npz (Method 1)\n\n    >>> load_params = tl.files.load_npz(name='model.npz')\n    >>> tl.files.assign_params(sess, load_params, network)\n\n    Load model from npz (Method 2)\n\n    >>> tl.files.load_and_assign_npz(sess=sess, name='model.npz', network=network)\n\n    Notes\n    -----\n    If you got session issues, you can change the value.eval() to value.eval(session=sess)\n\n    References\n    ----------\n    `Saving dictionary using numpy <http://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez>`__", "id": "f11137:m17"}
{"signature": "def roi_pooling(input, rois, pool_height, pool_width):", "body": "<EOL>out = roi_pooling_module.roi_pooling(input, rois, pool_height=pool_height, pool_width=pool_width)<EOL>output, argmax_output = out[<NUM_LIT:0>], out[<NUM_LIT:1>]<EOL>return output<EOL>", "docstring": "returns a tensorflow operation for computing the Region of Interest Pooling\n\n@arg input: feature maps on which to perform the pooling operation\n@arg rois: list of regions of interest in the format (feature map index, upper left, bottom right)\n@arg pool_width: size of the pooling sections", "id": "f11142:m0"}
{"signature": "@private_method<EOL><INDENT>def _to_b_h_w_n_c(self, x, x_shape):<DEDENT>", "body": "x = tf.reshape(x, (-<NUM_LIT:1>, x_shape[<NUM_LIT:4>], x_shape[<NUM_LIT:1>], x_shape[<NUM_LIT:2>], x_shape[<NUM_LIT:3>]))<EOL>x = tf.transpose(x, [<NUM_LIT:0>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:4>, <NUM_LIT:1>])<EOL>return x<EOL>", "docstring": "(b*c, h, w, n) -> (b, h, w, n, c)", "id": "f11162:c0:m2"}
{"signature": "@private_method<EOL><INDENT>def tf_flatten(self, a):<DEDENT>", "body": "return tf.reshape(a, [-<NUM_LIT:1>])<EOL>", "docstring": "Flatten tensor", "id": "f11162:c0:m3"}
{"signature": "@private_method<EOL><INDENT>def _to_bc_h_w(self, x, x_shape):<DEDENT>", "body": "x = tf.transpose(x, [<NUM_LIT:0>, <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:2>])<EOL>x = tf.reshape(x, (-<NUM_LIT:1>, x_shape[<NUM_LIT:1>], x_shape[<NUM_LIT:2>]))<EOL>return x<EOL>", "docstring": "(b, h, w, c) -> (b*c, h, w)", "id": "f11162:c0:m1"}
{"signature": "def _bias_scale(x, b, data_format):", "body": "if data_format == '<STR_LIT>':<EOL><INDENT>return x * b<EOL><DEDENT>elif data_format == '<STR_LIT>':<EOL><INDENT>return x * _to_channel_first_bias(b)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % data_format)<EOL><DEDENT>", "docstring": "The multiplication counter part of tf.nn.bias_add.", "id": "f11170:m1"}
{"signature": "def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, data_format, name=None):", "body": "with ops.name_scope(name, '<STR_LIT>', [x, mean, variance, scale, offset]):<EOL><INDENT>inv = math_ops.rsqrt(variance + variance_epsilon)<EOL>if scale is not None:<EOL><INDENT>inv *= scale<EOL><DEDENT>a = math_ops.cast(inv, x.dtype)<EOL>b = math_ops.cast(offset - mean * inv if offset is not None else -mean * inv, x.dtype)<EOL>df = {'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'}<EOL>return _bias_add(_bias_scale(x, a, df[data_format]), b, df[data_format])<EOL><DEDENT>", "docstring": "Data Format aware version of tf.nn.batch_normalization.", "id": "f11170:m3"}
{"signature": "def _bias_add(x, b, data_format):", "body": "if data_format == '<STR_LIT>':<EOL><INDENT>return tf.add(x, b)<EOL><DEDENT>elif data_format == '<STR_LIT>':<EOL><INDENT>return tf.add(x, _to_channel_first_bias(b))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % data_format)<EOL><DEDENT>", "docstring": "Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT.", "id": "f11170:m2"}
{"signature": "def print_layers(self):", "body": "for i, layer in enumerate(self.all_layers):<EOL><INDENT>logging.info(<EOL>\"<STR_LIT>\".format(i, layer.name, str(layer.get_shape()), layer.dtype.name)<EOL>)<EOL><DEDENT>", "docstring": "Print all info of layers in the network.", "id": "f11189:c1:m2"}
{"signature": "def print_params(self, details=True, session=None):", "body": "for i, p in enumerate(self.all_params):<EOL><INDENT>if details:<EOL><INDENT>try:<EOL><INDENT>val = p.eval(session=session)<EOL>logging.info(<EOL>\"<STR_LIT>\".<EOL>format(i, p.name, str(val.shape), p.dtype.name, val.mean(), np.median(val), val.std())<EOL>)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logging.info(str(e))<EOL>raise Exception(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(i, p.name, str(p.get_shape()), p.dtype.name))<EOL><DEDENT><DEDENT>logging.info(\"<STR_LIT>\" % self.count_params())<EOL>", "docstring": "Print all info of parameters in the network", "id": "f11189:c1:m1"}
{"signature": "@protected_method<EOL><INDENT>def _get_init_args(self, skip=<NUM_LIT:4>):<DEDENT>", "body": "stack = inspect.stack()<EOL>if len(stack) < skip + <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>args, _, _, values = inspect.getargvalues(stack[skip][<NUM_LIT:0>])<EOL>params = {}<EOL>for arg in args:<EOL><INDENT>if values[arg] is not None and arg not in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>val = values[arg]<EOL>if inspect.isfunction(val):<EOL><INDENT>params[arg] = {\"<STR_LIT>\": val.__module__, \"<STR_LIT>\": val.__name__}<EOL><DEDENT>elif arg.endswith('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>params[arg] = val<EOL><DEDENT><DEDENT><DEDENT>return params<EOL>", "docstring": "Get all arguments of current layer for saving the graph.", "id": "f11189:c1:m11"}
{"signature": "def transformer(U, theta, out_size, name='<STR_LIT>'):", "body": "def _repeat(x, n_repeats):<EOL><INDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([<EOL>n_repeats,<EOL>])), <NUM_LIT:1>), [<NUM_LIT:1>, <NUM_LIT:0>])<EOL>rep = tf.cast(rep, '<STR_LIT>')<EOL>x = tf.matmul(tf.reshape(x, (-<NUM_LIT:1>, <NUM_LIT:1>)), rep)<EOL>return tf.reshape(x, [-<NUM_LIT:1>])<EOL><DEDENT><DEDENT>def _interpolate(im, x, y, out_size):<EOL><INDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>num_batch = tf.shape(im)[<NUM_LIT:0>]<EOL>height = tf.shape(im)[<NUM_LIT:1>]<EOL>width = tf.shape(im)[<NUM_LIT:2>]<EOL>channels = tf.shape(im)[<NUM_LIT:3>]<EOL>x = tf.cast(x, '<STR_LIT>')<EOL>y = tf.cast(y, '<STR_LIT>')<EOL>height_f = tf.cast(height, '<STR_LIT>')<EOL>width_f = tf.cast(width, '<STR_LIT>')<EOL>out_height = out_size[<NUM_LIT:0>]<EOL>out_width = out_size[<NUM_LIT:1>]<EOL>zero = tf.zeros([], dtype='<STR_LIT>')<EOL>max_y = tf.cast(tf.shape(im)[<NUM_LIT:1>] - <NUM_LIT:1>, '<STR_LIT>')<EOL>max_x = tf.cast(tf.shape(im)[<NUM_LIT:2>] - <NUM_LIT:1>, '<STR_LIT>')<EOL>x = (x + <NUM_LIT:1.0>) * (width_f) / <NUM_LIT><EOL>y = (y + <NUM_LIT:1.0>) * (height_f) / <NUM_LIT><EOL>x0 = tf.cast(tf.floor(x), '<STR_LIT>')<EOL>x1 = x0 + <NUM_LIT:1><EOL>y0 = tf.cast(tf.floor(y), '<STR_LIT>')<EOL>y1 = y0 + <NUM_LIT:1><EOL>x0 = tf.clip_by_value(x0, zero, max_x)<EOL>x1 = tf.clip_by_value(x1, zero, max_x)<EOL>y0 = tf.clip_by_value(y0, zero, max_y)<EOL>y1 = tf.clip_by_value(y1, zero, max_y)<EOL>dim2 = width<EOL>dim1 = width * height<EOL>base = _repeat(tf.range(num_batch) * dim1, out_height * out_width)<EOL>base_y0 = base + y0 * dim2<EOL>base_y1 = base + y1 * dim2<EOL>idx_a = base_y0 + x0<EOL>idx_b = base_y1 + x0<EOL>idx_c = base_y0 + x1<EOL>idx_d = base_y1 + x1<EOL>im_flat = tf.reshape(im, tf.stack([-<NUM_LIT:1>, channels]))<EOL>im_flat = tf.cast(im_flat, '<STR_LIT>')<EOL>Ia = tf.gather(im_flat, idx_a)<EOL>Ib = tf.gather(im_flat, idx_b)<EOL>Ic = tf.gather(im_flat, idx_c)<EOL>Id = tf.gather(im_flat, idx_d)<EOL>x0_f = tf.cast(x0, '<STR_LIT>')<EOL>x1_f = tf.cast(x1, '<STR_LIT>')<EOL>y0_f = tf.cast(y0, '<STR_LIT>')<EOL>y1_f = tf.cast(y1, '<STR_LIT>')<EOL>wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), <NUM_LIT:1>)<EOL>wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), <NUM_LIT:1>)<EOL>wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), <NUM_LIT:1>)<EOL>wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), <NUM_LIT:1>)<EOL>output = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])<EOL>return output<EOL><DEDENT><DEDENT>def _meshgrid(height, width):<EOL><INDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>x_t = tf.matmul(<EOL>tf.ones(shape=tf.stack([height, <NUM_LIT:1>])),<EOL>tf.transpose(tf.expand_dims(tf.linspace(-<NUM_LIT:1.0>, <NUM_LIT:1.0>, width), <NUM_LIT:1>), [<NUM_LIT:1>, <NUM_LIT:0>])<EOL>)<EOL>y_t = tf.matmul(tf.expand_dims(tf.linspace(-<NUM_LIT:1.0>, <NUM_LIT:1.0>, height), <NUM_LIT:1>), tf.ones(shape=tf.stack([<NUM_LIT:1>, width])))<EOL>x_t_flat = tf.reshape(x_t, (<NUM_LIT:1>, -<NUM_LIT:1>))<EOL>y_t_flat = tf.reshape(y_t, (<NUM_LIT:1>, -<NUM_LIT:1>))<EOL>ones = tf.ones_like(x_t_flat)<EOL>grid = tf.concat(axis=<NUM_LIT:0>, values=[x_t_flat, y_t_flat, ones])<EOL>return grid<EOL><DEDENT><DEDENT>def _transform(theta, input_dim, out_size):<EOL><INDENT>with tf.variable_scope('<STR_LIT>'):<EOL><INDENT>num_batch = tf.shape(input_dim)[<NUM_LIT:0>]<EOL>num_channels = tf.shape(input_dim)[<NUM_LIT:3>]<EOL>theta = tf.reshape(theta, (-<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>))<EOL>theta = tf.cast(theta, '<STR_LIT>')<EOL>out_height = out_size[<NUM_LIT:0>]<EOL>out_width = out_size[<NUM_LIT:1>]<EOL>grid = _meshgrid(out_height, out_width)<EOL>grid = tf.expand_dims(grid, <NUM_LIT:0>)<EOL>grid = tf.reshape(grid, [-<NUM_LIT:1>])<EOL>grid = tf.tile(grid, tf.stack([num_batch]))<EOL>grid = tf.reshape(grid, tf.stack([num_batch, <NUM_LIT:3>, -<NUM_LIT:1>]))<EOL>T_g = tf.matmul(theta, grid)<EOL>x_s = tf.slice(T_g, [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>], [-<NUM_LIT:1>, <NUM_LIT:1>, -<NUM_LIT:1>])<EOL>y_s = tf.slice(T_g, [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>], [-<NUM_LIT:1>, <NUM_LIT:1>, -<NUM_LIT:1>])<EOL>x_s_flat = tf.reshape(x_s, [-<NUM_LIT:1>])<EOL>y_s_flat = tf.reshape(y_s, [-<NUM_LIT:1>])<EOL>input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size)<EOL>output = tf.reshape(input_transformed, tf.stack([num_batch, out_height, out_width, num_channels]))<EOL>return output<EOL><DEDENT><DEDENT>with tf.variable_scope(name):<EOL><INDENT>output = _transform(theta, U, out_size)<EOL>return output<EOL><DEDENT>", "docstring": "Spatial Transformer Layer for `2D Affine Transformation <https://en.wikipedia.org/wiki/Affine_transformation>`__\n    , see :class:`SpatialTransformer2dAffineLayer` class.\n\n    Parameters\n    ----------\n    U : list of float\n        The output of a convolutional net should have the\n        shape [num_batch, height, width, num_channels].\n    theta: float\n        The output of the localisation network should be [num_batch, 6], value range should be [0, 1] (via tanh).\n    out_size: tuple of int\n        The size of the output of the network (height, width)\n    name: str\n        Optional function name\n\n    Returns\n    -------\n    Tensor\n        The transformed tensor.\n\n    References\n    ----------\n    - `Spatial Transformer Networks <https://arxiv.org/abs/1506.02025>`__\n    - `TensorFlow/Models <https://github.com/tensorflow/models/tree/master/transformer>`__\n\n    Notes\n    -----\n    To initialize the network to the identity transform init.\n\n    >>> import tensorflow as tf\n    >>> # ``theta`` to\n    >>> identity = np.array([[1., 0., 0.], [0., 1., 0.]])\n    >>> identity = identity.flatten()\n    >>> theta = tf.Variable(initial_value=identity)", "id": "f11193:m0"}
{"signature": "@deprecated_alias(printable='<STR_LIT>', end_support_version=<NUM_LIT>)  <EOL>def get_variables_with_name(name=None, train_only=True, verbose=False):", "body": "if name is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>logging.info(\"<STR_LIT>\" % name)<EOL>if train_only:<EOL><INDENT>t_vars = tf.trainable_variables()<EOL><DEDENT>else:<EOL><INDENT>t_vars = tf.global_variables()<EOL><DEDENT>d_vars = [var for var in t_vars if name in var.name]<EOL>if verbose:<EOL><INDENT>for idx, v in enumerate(d_vars):<EOL><INDENT>logging.info(\"<STR_LIT>\".format(idx, v.name, str(v.get_shape())))<EOL><DEDENT><DEDENT>return d_vars<EOL>", "docstring": "Get a list of TensorFlow variables by a given name scope.\n\n    Parameters\n    ----------\n    name : str\n        Get the variables that contain this name.\n    train_only : boolean\n        If Ture, only get the trainable variables.\n    verbose : boolean\n        If True, print the information of all variables.\n\n    Returns\n    -------\n    list of Tensor\n        A list of TensorFlow variables\n\n    Examples\n    --------\n    >>> import tensorlayer as tl\n    >>> dense_vars = tl.layers.get_variables_with_name('dense', True, True)", "id": "f11194:m6"}
{"signature": "@tf.RegisterGradient(\"<STR_LIT>\")<EOL>def _quantize_grad(op, grad):", "body": "return tf.clip_by_value(grad, -<NUM_LIT:1>, <NUM_LIT:1>)<EOL>", "docstring": "Clip and binarize tensor using the straight through estimator (STE) for the gradient.", "id": "f11194:m19"}
{"signature": "def __call__(self, inputs, state, scope=None):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Run this RNN cell on inputs, starting from the given state.", "id": "f11199:c2:m0"}
{"signature": "def target_mask_op(data, pad_val=<NUM_LIT:0>):  ", "body": "data_shape_size = data.get_shape().ndims<EOL>if data_shape_size == <NUM_LIT:3>:<EOL><INDENT>return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=<NUM_LIT:2>), dtype=tf.int32)<EOL><DEDENT>elif data_shape_size == <NUM_LIT:2>:<EOL><INDENT>return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)<EOL><DEDENT>elif data_shape_size == <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (data_shape_size))<EOL><DEDENT>", "docstring": "Return tensor for mask, if input is ``tf.string``.", "id": "f11199:m5"}
{"signature": "def retrieve_seq_length_op2(data):", "body": "return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), <NUM_LIT:1>)<EOL>", "docstring": "An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)],\n    it can be used when the features of padding (on right hand side) are all zeros.\n\n    Parameters\n    -----------\n    data : tensor\n        [batch_size, n_step(max)] with zero padding on right hand side.\n\n    Examples\n    --------\n    >>> data = [[1,2,0,0,0],\n    ...         [1,2,3,0,0],\n    ...         [1,2,6,1,0]]\n    >>> o = retrieve_seq_length_op2(data)\n    >>> sess = tf.InteractiveSession()\n    >>> tl.layers.initialize_global_variables(sess)\n    >>> print(o.eval())\n    [2 3 4]", "id": "f11199:m3"}
{"signature": "def __init__(<EOL>self, shape, filter_size, num_features, forget_bias=<NUM_LIT:1.0>, input_size=None, state_is_tuple=False,<EOL>act=tf.nn.tanh<EOL>):", "body": "<EOL>if input_size is not None:<EOL><INDENT>logging.warn(\"<STR_LIT>\", self)<EOL><DEDENT>self.shape = shape<EOL>self.filter_size = filter_size<EOL>self.num_features = num_features<EOL>self._forget_bias = forget_bias<EOL>self._state_is_tuple = state_is_tuple<EOL>self._activation = act<EOL>", "docstring": "Initialize the basic Conv LSTM cell.", "id": "f11199:c3:m0"}
{"signature": "@property<EOL><INDENT>def output_size(self):<DEDENT>", "body": "return self._num_units<EOL>", "docstring": "Number of units in outputs.", "id": "f11199:c3:m2"}
{"signature": "def binary_dilation(x, radius=<NUM_LIT:3>):", "body": "mask = disk(radius)<EOL>x = _binary_dilation(x, selem=mask)<EOL>return x<EOL>", "docstring": "Return fast binary morphological dilation of an image.\n    see `skimage.morphology.binary_dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_dilation>`__.\n\n    Parameters\n    -----------\n    x : 2D array\n        A binary image.\n    radius : int\n        For the radius of mask.\n\n    Returns\n    -------\n    numpy.array\n        A processed binary image.", "id": "f11202:m50"}
{"signature": "def affine_vertical_flip_matrix(prob=<NUM_LIT:0.5>):", "body": "factor = np.random.uniform(<NUM_LIT:0>, <NUM_LIT:1>)<EOL>if prob >= factor:<EOL><INDENT>filp_matrix = np.array([[ <NUM_LIT:1.> , <NUM_LIT:0.>, <NUM_LIT:0.> ],[ <NUM_LIT:0.>, -<NUM_LIT:1.>, <NUM_LIT:0.> ],[ <NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:1.> ]])<EOL>return filp_matrix<EOL><DEDENT>else:<EOL><INDENT>filp_matrix = np.array([[ <NUM_LIT:1.> , <NUM_LIT:0.>, <NUM_LIT:0.> ],[ <NUM_LIT:0.>, <NUM_LIT:1.>, <NUM_LIT:0.> ],[ <NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:1.> ]])<EOL>return filp_matrix<EOL><DEDENT>", "docstring": "Create an affine transformation for image vertical flipping.\n    NOTE: In OpenCV, x is width and y is height.\n\n    Parameters\n    ----------\n    prob : float\n        Probability to flip the image. 1.0 means always flip.\n\n    Returns\n    -------\n    numpy.array\n        An affine transform matrix.", "id": "f11202:m3"}
{"signature": "def shift(<EOL>x, wrg=<NUM_LIT:0.1>, hrg=<NUM_LIT:0.1>, is_random=False, row_index=<NUM_LIT:0>, col_index=<NUM_LIT:1>, channel_index=<NUM_LIT:2>, fill_mode='<STR_LIT>', cval=<NUM_LIT:0.>,<EOL>order=<NUM_LIT:1><EOL>):", "body": "h, w = x.shape[row_index], x.shape[col_index]<EOL>if is_random:<EOL><INDENT>tx = np.random.uniform(-hrg, hrg) * h<EOL>ty = np.random.uniform(-wrg, wrg) * w<EOL><DEDENT>else:<EOL><INDENT>tx, ty = hrg * h, wrg * w<EOL><DEDENT>translation_matrix = np.array([[<NUM_LIT:1>, <NUM_LIT:0>, tx], [<NUM_LIT:0>, <NUM_LIT:1>, ty], [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>transform_matrix = translation_matrix  <EOL>x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)<EOL>return x<EOL>", "docstring": "Shift an image randomly or non-randomly.\n\n    Parameters\n    -----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    wrg : float\n        Percentage of shift in axis x, usually -0.25 ~ 0.25.\n    hrg : float\n        Percentage of shift in axis y, usually -0.25 ~ 0.25.\n    is_random : boolean\n        If True, randomly shift. Default is False.\n    row_index col_index and channel_index : int\n        Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n    fill_mode : str\n        Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__\n    cval : float\n        Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.\n    order : int\n        The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__\n\n    Returns\n    -------\n    numpy.array\n        A processed image.", "id": "f11202:m19"}
{"signature": "def affine_zoom_matrix(zoom_range=(<NUM_LIT>, <NUM_LIT>)):", "body": "if isinstance(zoom_range, (float, int)):<EOL><INDENT>scale = zoom_range<EOL><DEDENT>elif isinstance(zoom_range, tuple):<EOL><INDENT>scale = np.random.uniform(zoom_range[<NUM_LIT:0>], zoom_range[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>zoom_matrix = np.array([[scale, <NUM_LIT:0>, <NUM_LIT:0>],[<NUM_LIT:0>, scale, <NUM_LIT:0>],[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>return zoom_matrix<EOL>", "docstring": "Create an affine transform matrix for zooming/scaling an image's height and width.\n    OpenCV format, x is width.\n\n    Parameters\n    -----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    zoom_range : float or tuple of 2 floats\n        The zooming/scaling ratio, greater than 1 means larger.\n            - float, a fixed ratio.\n            - tuple of 2 floats, randomly sample a value as the ratio between these 2 values.\n\n    Returns\n    -------\n    numpy.array\n        An affine transform matrix.", "id": "f11202:m6"}
{"signature": "def affine_transform_keypoints(coords_list, transform_matrix):", "body": "coords_result_list = []<EOL>for coords in coords_list:<EOL><INDENT>coords = np.asarray(coords)<EOL>coords = coords.transpose([<NUM_LIT:1>, <NUM_LIT:0>])<EOL>coords = np.insert(coords, <NUM_LIT:2>, <NUM_LIT:1>, axis=<NUM_LIT:0>)<EOL>coords_result = np.matmul(transform_matrix, coords)<EOL>coords_result = coords_result[<NUM_LIT:0>:<NUM_LIT:2>, :].transpose([<NUM_LIT:1>, <NUM_LIT:0>])<EOL>coords_result_list.append(coords_result)<EOL><DEDENT>return coords_result_list<EOL>", "docstring": "Transform keypoint coordinates according to a given affine transform matrix.\n    OpenCV format, x is width.\n\n    Note that, for pose estimation task, flipping requires maintaining the left and right body information.\n    We should not flip the left and right body, so please use ``tl.prepro.keypoint_random_flip``.\n\n    Parameters\n    -----------\n    coords_list : list of list of tuple/list\n        The coordinates\n        e.g., the keypoint coordinates of every person in an image.\n    transform_matrix : numpy.array\n        Transform matrix, OpenCV format.\n\n    Examples\n    ---------\n    >>> # 1. get all affine transform matrices\n    >>> M_rotate = tl.prepro.affine_rotation_matrix(angle=20)\n    >>> M_flip = tl.prepro.affine_horizontal_flip_matrix(prob=1)\n    >>> # 2. combine all affine transform matrices to one matrix\n    >>> M_combined = dot(M_flip).dot(M_rotate)\n    >>> # 3. transfrom the matrix from Cartesian coordinate (the origin in the middle of image)\n    >>> # to Image coordinate (the origin on the top-left of image)\n    >>> transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, x=w, y=h)\n    >>> # 4. then we can transfrom the image once for all transformations\n    >>> result = tl.prepro.affine_transform_cv2(image, transform_matrix)  # 76 times faster\n    >>> # 5. transform keypoint coordinates\n    >>> coords = [[(50, 100), (100, 100), (100, 50), (200, 200)], [(250, 50), (200, 50), (200, 100)]]\n    >>> coords_result = tl.prepro.affine_transform_keypoints(coords, transform_matrix)", "id": "f11202:m11"}
{"signature": "def obj_box_coords_rescale(coords=None, shape=None):", "body": "if coords is None:<EOL><INDENT>coords = []<EOL><DEDENT>if shape is None:<EOL><INDENT>shape = [<NUM_LIT:100>, <NUM_LIT:200>]<EOL><DEDENT>imh, imw = shape[<NUM_LIT:0>], shape[<NUM_LIT:1>]<EOL>imh = imh * <NUM_LIT:1.0>  <EOL>imw = imw * <NUM_LIT:1.0><EOL>coords_new = list()<EOL>for coord in coords:<EOL><INDENT>if len(coord) != <NUM_LIT:4>:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>x = coord[<NUM_LIT:0>] / imw<EOL>y = coord[<NUM_LIT:1>] / imh<EOL>w = coord[<NUM_LIT:2>] / imw<EOL>h = coord[<NUM_LIT:3>] / imh<EOL>coords_new.append([x, y, w, h])<EOL><DEDENT>return coords_new<EOL>", "docstring": "Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].\n\n    Parameters\n    ------------\n    coords : list of list of 4 ints or None\n        For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].\n    shape : list of 2 int or None\n        \u3010height, width].\n\n    Returns\n    -------\n    list of list of 4 numbers\n        A list of new bounding boxes.\n\n\n    Examples\n    ---------\n    >>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])\n    >>> print(coords)\n      [[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]\n    >>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])\n    >>> print(coords)\n      [[0.3, 0.8, 0.5, 1.0]]\n    >>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])\n    >>> print(coords)\n      [[0.15, 0.4, 0.25, 0.5]]\n\n    Returns\n    -------\n    list of 4 numbers\n        New coordinates.", "id": "f11202:m54"}
{"signature": "def parse_darknet_ann_str_to_list(annotations):", "body": "annotations = annotations.split(\"<STR_LIT:\\n>\")<EOL>ann = []<EOL>for a in annotations:<EOL><INDENT>a = a.split()<EOL>if len(a) == <NUM_LIT:5>:<EOL><INDENT>for i, _v in enumerate(a):<EOL><INDENT>if i == <NUM_LIT:0>:<EOL><INDENT>a[i] = int(a[i])<EOL><DEDENT>else:<EOL><INDENT>a[i] = float(a[i])<EOL><DEDENT><DEDENT>ann.append(a)<EOL><DEDENT><DEDENT>return ann<EOL>", "docstring": "r\"\"\"Input string format of class, x, y, w, h, return list of list format.\n\n    Parameters\n    -----------\n    annotations : str\n        The annotations in darkent format \"class, x, y, w, h ....\" seperated by \"\\\\n\".\n\n    Returns\n    -------\n    list of list of 4 numbers\n        List of bounding box.", "id": "f11202:m61"}
{"signature": "def obj_box_horizontal_flip(im, coords=None, is_rescale=False, is_center=False, is_random=False):", "body": "if coords is None:<EOL><INDENT>coords = []<EOL><DEDENT>def _flip(im, coords):<EOL><INDENT>im = flip_axis(im, axis=<NUM_LIT:1>, is_random=False)<EOL>coords_new = list()<EOL>for coord in coords:<EOL><INDENT>if len(coord) != <NUM_LIT:4>:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>if is_rescale:<EOL><INDENT>if is_center:<EOL><INDENT>x = <NUM_LIT:1.> - coord[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>x = <NUM_LIT:1.> - coord[<NUM_LIT:0>] - coord[<NUM_LIT:2>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if is_center:<EOL><INDENT>x = im.shape[<NUM_LIT:1>] - coord[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>x = im.shape[<NUM_LIT:1>] - coord[<NUM_LIT:0>] - coord[<NUM_LIT:2>]<EOL><DEDENT><DEDENT>coords_new.append([x, coord[<NUM_LIT:1>], coord[<NUM_LIT:2>], coord[<NUM_LIT:3>]])<EOL><DEDENT>return im, coords_new<EOL><DEDENT>if is_random:<EOL><INDENT>factor = np.random.uniform(-<NUM_LIT:1>, <NUM_LIT:1>)<EOL>if factor > <NUM_LIT:0>:<EOL><INDENT>return _flip(im, coords)<EOL><DEDENT>else:<EOL><INDENT>return im, coords<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return _flip(im, coords)<EOL><DEDENT>", "docstring": "Left-right flip the image and coordinates for object detection.\n\n    Parameters\n    ----------\n    im : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    coords : list of list of 4 int/float or None\n        Coordinates [[x, y, w, h], [x, y, w, h], ...].\n    is_rescale : boolean\n        Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.\n    is_center : boolean\n        Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.\n    is_random : boolean\n        If True, randomly flip. Default is False.\n\n    Returns\n    -------\n    numpy.array\n        A processed image\n    list of list of 4 numbers\n        A list of new bounding boxes.\n\n    Examples\n    --------\n    >>> im = np.zeros([80, 100])    # as an image with shape width=100, height=80\n    >>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False)\n    >>> print(coords)\n      [[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]]\n    >>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False)\n    >>> print(coords)\n      [[0.5, 0.4, 0.3, 0.3]]\n    >>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False)\n    >>> print(coords)\n      [[80, 40, 30, 30]]\n    >>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False)\n    >>> print(coords)\n      [[50, 40, 30, 30]]", "id": "f11202:m63"}
{"signature": "def obj_box_shift(<EOL>im, classes=None, coords=None, wrg=<NUM_LIT:0.1>, hrg=<NUM_LIT:0.1>, row_index=<NUM_LIT:0>, col_index=<NUM_LIT:1>, channel_index=<NUM_LIT:2>, fill_mode='<STR_LIT>',<EOL>cval=<NUM_LIT:0.>, order=<NUM_LIT:1>, is_rescale=False, is_center=False, is_random=False, thresh_wh=<NUM_LIT>, thresh_wh2=<NUM_LIT><EOL>):", "body": "if classes is None:<EOL><INDENT>classes = []<EOL><DEDENT>if coords is None:<EOL><INDENT>coords = []<EOL><DEDENT>imh, imw = im.shape[row_index], im.shape[col_index]<EOL>if (hrg >= <NUM_LIT:1.0>) and (hrg <= <NUM_LIT:0.>) and (wrg >= <NUM_LIT:1.0>) and (wrg <= <NUM_LIT:0.>):<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>if is_random:<EOL><INDENT>tx = np.random.uniform(-hrg, hrg) * imh<EOL>ty = np.random.uniform(-wrg, wrg) * imw<EOL><DEDENT>else:<EOL><INDENT>tx, ty = hrg * imh, wrg * imw<EOL><DEDENT>translation_matrix = np.array([[<NUM_LIT:1>, <NUM_LIT:0>, tx], [<NUM_LIT:0>, <NUM_LIT:1>, ty], [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>transform_matrix = translation_matrix  <EOL>im_new = affine_transform(im, transform_matrix, channel_index, fill_mode, cval, order)<EOL>def _get_coord(coord):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if is_center:<EOL><INDENT>coord = obj_box_coord_centroid_to_upleft(coord)<EOL><DEDENT>x = coord[<NUM_LIT:0>] - ty  <EOL>y = coord[<NUM_LIT:1>] - tx  <EOL>w = coord[<NUM_LIT:2>]<EOL>h = coord[<NUM_LIT:3>]<EOL>if x < <NUM_LIT:0>:<EOL><INDENT>if x + w <= <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>w = w + x<EOL>x = <NUM_LIT:0><EOL><DEDENT>elif x > im_new.shape[<NUM_LIT:1>]:  <EOL><INDENT>return None<EOL><DEDENT>if y < <NUM_LIT:0>:<EOL><INDENT>if y + h <= <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>h = h + y<EOL>y = <NUM_LIT:0><EOL><DEDENT>elif y > im_new.shape[<NUM_LIT:0>]:  <EOL><INDENT>return None<EOL><DEDENT>if (x is not None) and (x + w > im_new.shape[<NUM_LIT:1>]):  <EOL><INDENT>w = im_new.shape[<NUM_LIT:1>] - x<EOL><DEDENT>if (y is not None) and (y + h > im_new.shape[<NUM_LIT:0>]):  <EOL><INDENT>h = im_new.shape[<NUM_LIT:0>] - y<EOL><DEDENT>if (w / (h + <NUM_LIT:1.>) > thresh_wh2) or (h / (w + <NUM_LIT:1.>) > thresh_wh2):  <EOL><INDENT>return None<EOL><DEDENT>if (w / (im_new.shape[<NUM_LIT:1>] * <NUM_LIT:1.>) < thresh_wh) or (h / (im_new.shape[<NUM_LIT:0>] * <NUM_LIT:1.>) <<EOL>thresh_wh):  <EOL><INDENT>return None<EOL><DEDENT>coord = [x, y, w, h]<EOL>if is_center:<EOL><INDENT>coord = obj_box_coord_upleft_to_centroid(coord)<EOL><DEDENT>return coord<EOL><DEDENT>coords_new = list()<EOL>classes_new = list()<EOL>for i, _ in enumerate(coords):<EOL><INDENT>coord = coords[i]<EOL>if len(coord) != <NUM_LIT:4>:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>if is_rescale:<EOL><INDENT>coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)<EOL>coord = _get_coord(coord)<EOL>if coord is not None:<EOL><INDENT>coord = obj_box_coord_rescale(coord, im_new.shape)<EOL>coords_new.append(coord)<EOL>classes_new.append(classes[i])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>coord = _get_coord(coord)<EOL>if coord is not None:<EOL><INDENT>coords_new.append(coord)<EOL>classes_new.append(classes[i])<EOL><DEDENT><DEDENT><DEDENT>return im_new, classes_new, coords_new<EOL>", "docstring": "Shift an image randomly or non-randomly, and compute the new bounding box coordinates.\n    Objects outside the cropped image will be removed.\n\n    Parameters\n    -----------\n    im : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    classes : list of int or None\n        Class IDs.\n    coords : list of list of 4 int/float or None\n        Coordinates [[x, y, w, h], [x, y, w, h], ...]\n    wrg, hrg row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.shift``.\n    is_rescale : boolean\n        Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.\n    is_center : boolean\n        Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.\n    thresh_wh : float\n        Threshold, remove the box if its ratio of width(height) to image size less than the threshold.\n    thresh_wh2 : float\n        Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.\n\n\n    Returns\n    -------\n    numpy.array\n        A processed image\n    list of int\n        A list of classes\n    list of list of 4 numbers\n        A list of new bounding boxes.", "id": "f11202:m66"}
{"signature": "def respective_zoom(x, h_range=(<NUM_LIT>, <NUM_LIT>), w_range=(<NUM_LIT>, <NUM_LIT>), flags=None, border_mode='<STR_LIT>'):", "body": "zoom_matrix = affine_respective_zoom_matrix(h_range=h_range, w_range=w_range)<EOL>h, w = x.shape[<NUM_LIT:0>], x.shape[<NUM_LIT:1>]<EOL>transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)<EOL>x = affine_transform_cv2(<EOL>x, transform_matrix, flags=flags, border_mode=border_mode<EOL>)  <EOL>return x<EOL>", "docstring": "Zooming/Scaling a single image that height and width are changed independently.\n\n    Parameters\n    -----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    h_range : float or tuple of 2 floats\n        The zooming/scaling ratio of height, greater than 1 means larger.\n            - float, a fixed ratio.\n            - tuple of 2 floats, randomly sample a value as the ratio between 2 values.\n    w_range : float or tuple of 2 floats\n        The zooming/scaling ratio of width, greater than 1 means larger.\n            - float, a fixed ratio.\n            - tuple of 2 floats, randomly sample a value as the ratio between 2 values.\n    border_mode : str\n        - `constant`, pad the image with a constant value (i.e. black or 0)\n        - `replicate`, the row or column at the very edge of the original is replicated to the extra border.\n\n    Returns\n    -------\n    numpy.array\n        A processed image.", "id": "f11202:m30"}
{"signature": "def shear_multi2(<EOL>x, shear=(<NUM_LIT:0.1>, <NUM_LIT:0.1>), is_random=False, row_index=<NUM_LIT:0>, col_index=<NUM_LIT:1>, channel_index=<NUM_LIT:2>, fill_mode='<STR_LIT>', cval=<NUM_LIT:0.>,<EOL>order=<NUM_LIT:1><EOL>):", "body": "if len(shear) != <NUM_LIT:2>:<EOL><INDENT>raise AssertionError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if isinstance(shear, tuple):<EOL><INDENT>shear = list(shear)<EOL><DEDENT>if is_random:<EOL><INDENT>shear[<NUM_LIT:0>] = np.random.uniform(-shear[<NUM_LIT:0>], shear[<NUM_LIT:0>])<EOL>shear[<NUM_LIT:1>] = np.random.uniform(-shear[<NUM_LIT:1>], shear[<NUM_LIT:1>])<EOL><DEDENT>shear_matrix = np.array([[<NUM_LIT:1>, shear[<NUM_LIT:0>], <NUM_LIT:0>], [shear[<NUM_LIT:1>], <NUM_LIT:1>, <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>h, w = x[<NUM_LIT:0>].shape[row_index], x[<NUM_LIT:0>].shape[col_index]<EOL>transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)<EOL>results = []<EOL>for data in x:<EOL><INDENT>results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))<EOL><DEDENT>return np.asarray(results)<EOL>", "docstring": "Shear images with the same arguments, randomly or non-randomly.\n    Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n    Parameters\n    -----------\n    x : list of numpy.array\n        List of images with dimension of [n_images, row, col, channel] (default).\n    others : args\n        See ``tl.prepro.shear2``.\n\n    Returns\n    -------\n    numpy.array\n        A list of processed images.", "id": "f11202:m24"}
{"signature": "def channel_shift_multi(x, intensity, is_random=False, channel_index=<NUM_LIT:2>):", "body": "if is_random:<EOL><INDENT>factor = np.random.uniform(-intensity, intensity)<EOL><DEDENT>else:<EOL><INDENT>factor = intensity<EOL><DEDENT>results = []<EOL>for data in x:<EOL><INDENT>data = np.rollaxis(data, channel_index, <NUM_LIT:0>)<EOL>min_x, max_x = np.min(data), np.max(data)<EOL>channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x]<EOL>data = np.stack(channel_images, axis=<NUM_LIT:0>)<EOL>data = np.rollaxis(x, <NUM_LIT:0>, channel_index + <NUM_LIT:1>)<EOL>results.append(data)<EOL><DEDENT>return np.asarray(results)<EOL>", "docstring": "Shift the channels of images with the same arguments, randomly or non-randomly, see `numpy.rollaxis <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rollaxis.html>`__.\n    Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n    Parameters\n    -----------\n    x : list of numpy.array\n        List of images with dimension of [n_images, row, col, channel] (default).\n    others : args\n        See ``tl.prepro.channel_shift``.\n\n    Returns\n    -------\n    numpy.array\n        A list of processed images.", "id": "f11202:m45"}
{"signature": "def remove_pad_sequences(sequences, pad_id=<NUM_LIT:0>):", "body": "sequences_out = copy.deepcopy(sequences)<EOL>for i, _ in enumerate(sequences):<EOL><INDENT>for j in range(<NUM_LIT:1>, len(sequences[i])):<EOL><INDENT>if sequences[i][-j] != pad_id:<EOL><INDENT>sequences_out[i] = sequences_out[i][<NUM_LIT:0>:-j + <NUM_LIT:1>]<EOL>break<EOL><DEDENT><DEDENT><DEDENT>return sequences_out<EOL>", "docstring": "Remove padding.\n\n    Parameters\n    -----------\n    sequences : list of list of int\n        All sequences where each row is a sequence.\n    pad_id : int\n        The pad ID.\n\n    Returns\n    ----------\n    list of list of int\n        The processed sequences.\n\n    Examples\n    ----------\n    >>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]]\n    >>> print(remove_pad_sequences(sequences, pad_id=0))\n    [[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]]", "id": "f11202:m69"}
{"signature": "def obj_box_coord_upleft_butright_to_centroid(coord):", "body": "if len(coord) != <NUM_LIT:4>:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>x1, y1, x2, y2 = coord<EOL>w = x2 - x1<EOL>h = y2 - y1<EOL>x_c = x1 + w / <NUM_LIT><EOL>y_c = y1 + h / <NUM_LIT><EOL>return [x_c, y_c, w, h]<EOL>", "docstring": "Convert one coordinate [x1, y1, x2, y2] to [x_center, y_center, w, h].\n    It is the reverse process of ``obj_box_coord_centroid_to_upleft_butright``.\n\n    Parameters\n    ------------\n    coord : list of 4 int/float\n        One coordinate.\n\n    Returns\n    -------\n    list of 4 numbers\n        New bounding box.", "id": "f11202:m58"}
{"signature": "def crop(x, wrg, hrg, is_random=False, row_index=<NUM_LIT:0>, col_index=<NUM_LIT:1>):", "body": "h, w = x.shape[row_index], x.shape[col_index]<EOL>if (h < hrg) or (w < wrg):<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>if is_random:<EOL><INDENT>h_offset = int(np.random.uniform(<NUM_LIT:0>, h - hrg))<EOL>w_offset = int(np.random.uniform(<NUM_LIT:0>, w - wrg))<EOL>return x[h_offset:hrg + h_offset, w_offset:wrg + w_offset]<EOL><DEDENT>else:  <EOL><INDENT>h_offset = int(np.floor((h - hrg) / <NUM_LIT>))<EOL>w_offset = int(np.floor((w - wrg) / <NUM_LIT>))<EOL>h_end = h_offset + hrg<EOL>w_end = w_offset + wrg<EOL>return x[h_offset:h_end, w_offset:w_end]<EOL><DEDENT>", "docstring": "Randomly or centrally crop an image.\n\n    Parameters\n    ----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    wrg : int\n        Size of width.\n    hrg : int\n        Size of height.\n    is_random : boolean,\n        If True, randomly crop, else central crop. Default is False.\n    row_index: int\n        index of row.\n    col_index: int\n        index of column.\n\n    Returns\n    -------\n    numpy.array\n        A processed image.", "id": "f11202:m15"}
{"signature": "def swirl_multi(<EOL>x, center=None, strength=<NUM_LIT:1>, radius=<NUM_LIT:100>, rotation=<NUM_LIT:0>, output_shape=None, order=<NUM_LIT:1>, mode='<STR_LIT>', cval=<NUM_LIT:0>,<EOL>clip=True, preserve_range=False, is_random=False<EOL>):", "body": "if radius == <NUM_LIT:0>:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>rotation = np.pi / <NUM_LIT> * rotation<EOL>if is_random:<EOL><INDENT>center_h = int(np.random.uniform(<NUM_LIT:0>, x[<NUM_LIT:0>].shape[<NUM_LIT:0>]))<EOL>center_w = int(np.random.uniform(<NUM_LIT:0>, x[<NUM_LIT:0>].shape[<NUM_LIT:1>]))<EOL>center = (center_h, center_w)<EOL>strength = np.random.uniform(<NUM_LIT:0>, strength)<EOL>radius = np.random.uniform(<NUM_LIT>, radius)<EOL>rotation = np.random.uniform(-rotation, rotation)<EOL><DEDENT>results = []<EOL>for data in x:<EOL><INDENT>max_v = np.max(data)<EOL>if max_v > <NUM_LIT:1>:  <EOL><INDENT>data = data / max_v<EOL><DEDENT>swirled = skimage.transform.swirl(<EOL>data, center=center, strength=strength, radius=radius, rotation=rotation, output_shape=output_shape,<EOL>order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range<EOL>)<EOL>if max_v > <NUM_LIT:1>:<EOL><INDENT>swirled = swirled * max_v<EOL><DEDENT>results.append(swirled)<EOL><DEDENT>return np.asarray(results)<EOL>", "docstring": "Swirl multiple images with the same arguments, randomly or non-randomly.\n    Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n    Parameters\n    -----------\n    x : list of numpy.array\n        List of images with dimension of [n_images, row, col, channel] (default).\n    others : args\n        See ``tl.prepro.swirl``.\n\n    Returns\n    -------\n    numpy.array\n        A list of processed images.", "id": "f11202:m26"}
{"signature": "def affine_shift_matrix(wrg=(-<NUM_LIT:0.1>, <NUM_LIT:0.1>), hrg=(-<NUM_LIT:0.1>, <NUM_LIT:0.1>), w=<NUM_LIT:200>, h=<NUM_LIT:200>):", "body": "if isinstance(wrg, tuple):<EOL><INDENT>tx = np.random.uniform(wrg[<NUM_LIT:0>], wrg[<NUM_LIT:1>]) * w<EOL><DEDENT>else:<EOL><INDENT>tx = wrg * w<EOL><DEDENT>if isinstance(hrg, tuple):<EOL><INDENT>ty = np.random.uniform(hrg[<NUM_LIT:0>], hrg[<NUM_LIT:1>]) * h<EOL><DEDENT>else:<EOL><INDENT>ty = hrg * h<EOL><DEDENT>shift_matrix = np.array([[<NUM_LIT:1>, <NUM_LIT:0>, tx],[<NUM_LIT:0>, <NUM_LIT:1>, ty],[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>return shift_matrix<EOL>", "docstring": "Create an affine transform matrix for image shifting.\n    NOTE: In OpenCV, x is width and y is height.\n\n    Parameters\n    -----------\n    wrg : float or tuple of floats\n        Range to shift on width axis, -1 ~ 1.\n            - float, a fixed distance.\n            - tuple of 2 floats, randomly sample a value as the distance between these 2 values.\n    hrg : float or tuple of floats\n        Range to shift on height axis, -1 ~ 1.\n            - float, a fixed distance.\n            - tuple of 2 floats, randomly sample a value as the distance between these 2 values.\n    w, h : int\n        The width and height of the image.\n\n    Returns\n    -------\n    numpy.array\n        An affine transform matrix.", "id": "f11202:m4"}
{"signature": "def obj_box_imresize(im, coords=None, size=None, interp='<STR_LIT>', mode=None, is_rescale=False):", "body": "if coords is None:<EOL><INDENT>coords = []<EOL><DEDENT>if size is None:<EOL><INDENT>size = [<NUM_LIT:100>, <NUM_LIT:100>]<EOL><DEDENT>imh, imw = im.shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>imh = imh * <NUM_LIT:1.0>  <EOL>imw = imw * <NUM_LIT:1.0><EOL>im = imresize(im, size=size, interp=interp, mode=mode)<EOL>if is_rescale is False:<EOL><INDENT>coords_new = list()<EOL>for coord in coords:<EOL><INDENT>if len(coord) != <NUM_LIT:4>:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>x = int(coord[<NUM_LIT:0>] * (size[<NUM_LIT:1>] / imw))<EOL>y = int(coord[<NUM_LIT:1>] * (size[<NUM_LIT:0>] / imh))<EOL>w = int(coord[<NUM_LIT:2>] * (size[<NUM_LIT:1>] / imw))<EOL>h = int(coord[<NUM_LIT:3>] * (size[<NUM_LIT:0>] / imh))<EOL>coords_new.append([x, y, w, h])<EOL><DEDENT>return im, coords_new<EOL><DEDENT>else:<EOL><INDENT>return im, coords<EOL><DEDENT>", "docstring": "Resize an image, and compute the new bounding box coordinates.\n\n    Parameters\n    -------------\n    im : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    coords : list of list of 4 int/float or None\n        Coordinates [[x, y, w, h], [x, y, w, h], ...]\n    size interp and mode : args\n        See ``tl.prepro.imresize``.\n    is_rescale : boolean\n        Set to True, if the input coordinates are rescaled to [0, 1], then return the original coordinates. Default is False.\n\n    Returns\n    -------\n    numpy.array\n        A processed image\n    list of list of 4 numbers\n        A list of new bounding boxes.\n\n    Examples\n    --------\n    >>> im = np.zeros([80, 100, 3])    # as an image with shape width=100, height=80\n    >>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False)\n    >>> print(coords)\n      [[40, 80, 60, 60], [20, 40, 40, 40]]\n    >>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False)\n    >>> print(coords)\n      [[20, 20, 30, 15]]\n    >>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False)\n    >>> print(coords)\n      [[30, 30, 45, 22]]\n    >>> im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True)\n    >>> print(coords, im2.shape)\n      [[0.2, 0.4, 0.3, 0.3]] (160, 200, 3)", "id": "f11202:m64"}
{"signature": "def rgb_to_hsv(rgb):", "body": "<EOL>rgb = rgb.astype('<STR_LIT:float>')<EOL>hsv = np.zeros_like(rgb)<EOL>hsv[..., <NUM_LIT:3>:] = rgb[..., <NUM_LIT:3>:]<EOL>r, g, b = rgb[..., <NUM_LIT:0>], rgb[..., <NUM_LIT:1>], rgb[..., <NUM_LIT:2>]<EOL>maxc = np.max(rgb[..., :<NUM_LIT:3>], axis=-<NUM_LIT:1>)<EOL>minc = np.min(rgb[..., :<NUM_LIT:3>], axis=-<NUM_LIT:1>)<EOL>hsv[..., <NUM_LIT:2>] = maxc<EOL>mask = maxc != minc<EOL>hsv[mask, <NUM_LIT:1>] = (maxc - minc)[mask] / maxc[mask]<EOL>rc = np.zeros_like(r)<EOL>gc = np.zeros_like(g)<EOL>bc = np.zeros_like(b)<EOL>rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]<EOL>gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]<EOL>bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]<EOL>hsv[..., <NUM_LIT:0>] = np.select([r == maxc, g == maxc], [bc - gc, <NUM_LIT> + rc - bc], default=<NUM_LIT> + gc - rc)<EOL>hsv[..., <NUM_LIT:0>] = (hsv[..., <NUM_LIT:0>] / <NUM_LIT>) % <NUM_LIT:1.0><EOL>return hsv<EOL>", "docstring": "Input RGB image [0~255] return HSV image [0~1].\n\n    Parameters\n    ------------\n    rgb : numpy.array\n        An image with values between 0 and 255.\n\n    Returns\n    -------\n    numpy.array\n        A processed image.", "id": "f11202:m35"}
{"signature": "def affine_horizontal_flip_matrix(prob=<NUM_LIT:0.5>):", "body": "factor = np.random.uniform(<NUM_LIT:0>, <NUM_LIT:1>)<EOL>if prob >= factor:<EOL><INDENT>filp_matrix = np.array([[ -<NUM_LIT:1.> , <NUM_LIT:0.>, <NUM_LIT:0.> ],[ <NUM_LIT:0.>, <NUM_LIT:1.>, <NUM_LIT:0.> ],[ <NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:1.> ]])<EOL>return filp_matrix<EOL><DEDENT>else:<EOL><INDENT>filp_matrix = np.array([[ <NUM_LIT:1.> , <NUM_LIT:0.>, <NUM_LIT:0.> ],[ <NUM_LIT:0.>, <NUM_LIT:1.>, <NUM_LIT:0.> ],[ <NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:1.> ]])<EOL>return filp_matrix<EOL><DEDENT>", "docstring": "Create an affine transformation matrix for image horizontal flipping.\n    NOTE: In OpenCV, x is width and y is height.\n\n    Parameters\n    ----------\n    prob : float\n        Probability to flip the image. 1.0 means always flip.\n\n    Returns\n    -------\n    numpy.array\n        An affine transform matrix.", "id": "f11202:m2"}
{"signature": "def keypoint_random_rotate(image, annos, mask=None, rg=<NUM_LIT>):", "body": "def _rotate_coord(shape, newxy, point, angle):<EOL><INDENT>angle = -<NUM_LIT:1> * angle / <NUM_LIT> * math.pi<EOL>ox, oy = shape<EOL>px, py = point<EOL>ox /= <NUM_LIT:2><EOL>oy /= <NUM_LIT:2><EOL>qx = math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)<EOL>qy = math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)<EOL>new_x, new_y = newxy<EOL>qx += ox - new_x<EOL>qy += oy - new_y<EOL>return int(qx + <NUM_LIT:0.5>), int(qy + <NUM_LIT:0.5>)<EOL><DEDENT>def _largest_rotated_rect(w, h, angle):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>angle = angle / <NUM_LIT> * math.pi<EOL>if w <= <NUM_LIT:0> or h <= <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0>, <NUM_LIT:0><EOL><DEDENT>width_is_longer = w >= h<EOL>side_long, side_short = (w, h) if width_is_longer else (h, w)<EOL>sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))<EOL>if side_short <= <NUM_LIT> * sin_a * cos_a * side_long:<EOL><INDENT>x = <NUM_LIT:0.5> * side_short<EOL>wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)<EOL><DEDENT>else:<EOL><INDENT>cos_2a = cos_a * cos_a - sin_a * sin_a<EOL>wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a<EOL><DEDENT>return int(np.round(wr)), int(np.round(hr))<EOL><DEDENT>img_shape = np.shape(image)<EOL>height = img_shape[<NUM_LIT:0>]<EOL>width = img_shape[<NUM_LIT:1>]<EOL>deg = np.random.uniform(-rg, rg)<EOL>img = image<EOL>center = (img.shape[<NUM_LIT:1>] * <NUM_LIT:0.5>, img.shape[<NUM_LIT:0>] * <NUM_LIT:0.5>)  <EOL>rot_m = cv2.getRotationMatrix2D((int(center[<NUM_LIT:0>]), int(center[<NUM_LIT:1>])), deg, <NUM_LIT:1>)<EOL>ret = cv2.warpAffine(img, rot_m, img.shape[<NUM_LIT:1>::-<NUM_LIT:1>], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)<EOL>if img.ndim == <NUM_LIT:3> and ret.ndim == <NUM_LIT:2>:<EOL><INDENT>ret = ret[:, :, np.newaxis]<EOL><DEDENT>neww, newh = _largest_rotated_rect(ret.shape[<NUM_LIT:1>], ret.shape[<NUM_LIT:0>], deg)<EOL>neww = min(neww, ret.shape[<NUM_LIT:1>])<EOL>newh = min(newh, ret.shape[<NUM_LIT:0>])<EOL>newx = int(center[<NUM_LIT:0>] - neww * <NUM_LIT:0.5>)<EOL>newy = int(center[<NUM_LIT:1>] - newh * <NUM_LIT:0.5>)<EOL>img = ret[newy:newy + newh, newx:newx + neww]<EOL>adjust_joint_list = []<EOL>for joint in annos:  <EOL><INDENT>adjust_joint = []<EOL>for point in joint:<EOL><INDENT>if point[<NUM_LIT:0>] < -<NUM_LIT:100> or point[<NUM_LIT:1>] < -<NUM_LIT:100>:<EOL><INDENT>adjust_joint.append((-<NUM_LIT:1000>, -<NUM_LIT:1000>))<EOL>continue<EOL><DEDENT>x, y = _rotate_coord((width, height), (newx, newy), point, deg)<EOL>if x > neww - <NUM_LIT:1> or y > newh - <NUM_LIT:1>:<EOL><INDENT>adjust_joint.append((-<NUM_LIT:1000>, -<NUM_LIT:1000>))<EOL>continue<EOL><DEDENT>if x < <NUM_LIT:0> or y < <NUM_LIT:0>:<EOL><INDENT>adjust_joint.append((-<NUM_LIT:1000>, -<NUM_LIT:1000>))<EOL>continue<EOL><DEDENT>adjust_joint.append((x, y))<EOL><DEDENT>adjust_joint_list.append(adjust_joint)<EOL><DEDENT>joint_list = adjust_joint_list<EOL>if mask is not None:<EOL><INDENT>msk = mask<EOL>center = (msk.shape[<NUM_LIT:1>] * <NUM_LIT:0.5>, msk.shape[<NUM_LIT:0>] * <NUM_LIT:0.5>)  <EOL>rot_m = cv2.getRotationMatrix2D((int(center[<NUM_LIT:0>]), int(center[<NUM_LIT:1>])), deg, <NUM_LIT:1>)<EOL>ret = cv2.warpAffine(msk, rot_m, msk.shape[<NUM_LIT:1>::-<NUM_LIT:1>], flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)<EOL>if msk.ndim == <NUM_LIT:3> and msk.ndim == <NUM_LIT:2>:<EOL><INDENT>ret = ret[:, :, np.newaxis]<EOL><DEDENT>neww, newh = _largest_rotated_rect(ret.shape[<NUM_LIT:1>], ret.shape[<NUM_LIT:0>], deg)<EOL>neww = min(neww, ret.shape[<NUM_LIT:1>])<EOL>newh = min(newh, ret.shape[<NUM_LIT:0>])<EOL>newx = int(center[<NUM_LIT:0>] - neww * <NUM_LIT:0.5>)<EOL>newy = int(center[<NUM_LIT:1>] - newh * <NUM_LIT:0.5>)<EOL>msk = ret[newy:newy + newh, newx:newx + neww]<EOL>return img, joint_list, msk<EOL><DEDENT>else:<EOL><INDENT>return img, joint_list, None<EOL><DEDENT>", "docstring": "Rotate an image and corresponding keypoints.\n\n    Parameters\n    -----------\n    image : 3 channel image\n        The given image for augmentation.\n    annos : list of list of floats\n        The keypoints annotation of people.\n    mask : single channel image or None\n        The mask if available.\n    rg : int or float\n        Degree to rotate, usually 0 ~ 180.\n\n    Returns\n    ----------\n    preprocessed image, annos, mask", "id": "f11202:m77"}
{"signature": "def flip_axis_multi(x, axis, is_random=False):", "body": "if is_random:<EOL><INDENT>factor = np.random.uniform(-<NUM_LIT:1>, <NUM_LIT:1>)<EOL>if factor > <NUM_LIT:0>:<EOL><INDENT>results = []<EOL>for data in x:<EOL><INDENT>data = np.asarray(data).swapaxes(axis, <NUM_LIT:0>)<EOL>data = data[::-<NUM_LIT:1>, ...]<EOL>data = data.swapaxes(<NUM_LIT:0>, axis)<EOL>results.append(data)<EOL><DEDENT>return np.asarray(results)<EOL><DEDENT>else:<EOL><INDENT>return np.asarray(x)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>results = []<EOL>for data in x:<EOL><INDENT>data = np.asarray(data).swapaxes(axis, <NUM_LIT:0>)<EOL>data = data[::-<NUM_LIT:1>, ...]<EOL>data = data.swapaxes(<NUM_LIT:0>, axis)<EOL>results.append(data)<EOL><DEDENT>return np.asarray(results)<EOL><DEDENT>", "docstring": "Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,\n\n    Parameters\n    -----------\n    x : list of numpy.array\n        List of images with dimension of [n_images, row, col, channel] (default).\n    others : args\n        See ``tl.prepro.flip_axis``.\n\n    Returns\n    -------\n    numpy.array\n        A list of processed images.", "id": "f11202:m18"}
{"signature": "def shear_multi(<EOL>x, intensity=<NUM_LIT:0.1>, is_random=False, row_index=<NUM_LIT:0>, col_index=<NUM_LIT:1>, channel_index=<NUM_LIT:2>, fill_mode='<STR_LIT>', cval=<NUM_LIT:0.>,<EOL>order=<NUM_LIT:1><EOL>):", "body": "if is_random:<EOL><INDENT>shear = np.random.uniform(-intensity, intensity)<EOL><DEDENT>else:<EOL><INDENT>shear = intensity<EOL><DEDENT>shear_matrix = np.array([[<NUM_LIT:1>, -np.sin(shear), <NUM_LIT:0>], [<NUM_LIT:0>, np.cos(shear), <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>h, w = x[<NUM_LIT:0>].shape[row_index], x[<NUM_LIT:0>].shape[col_index]<EOL>transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)<EOL>results = []<EOL>for data in x:<EOL><INDENT>results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))<EOL><DEDENT>return np.asarray(results)<EOL>", "docstring": "Shear images with the same arguments, randomly or non-randomly.\n    Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n    Parameters\n    -----------\n    x : list of numpy.array\n        List of images with dimension of [n_images, row, col, channel] (default).\n    others : args\n        See ``tl.prepro.shear``.\n\n    Returns\n    -------\n    numpy.array\n        A list of processed images.", "id": "f11202:m22"}
{"signature": "def shear(<EOL>x, intensity=<NUM_LIT:0.1>, is_random=False, row_index=<NUM_LIT:0>, col_index=<NUM_LIT:1>, channel_index=<NUM_LIT:2>, fill_mode='<STR_LIT>', cval=<NUM_LIT:0.>,<EOL>order=<NUM_LIT:1><EOL>):", "body": "if is_random:<EOL><INDENT>shear = np.random.uniform(-intensity, intensity)<EOL><DEDENT>else:<EOL><INDENT>shear = intensity<EOL><DEDENT>shear_matrix = np.array([[<NUM_LIT:1>, -np.sin(shear), <NUM_LIT:0>], [<NUM_LIT:0>, np.cos(shear), <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>h, w = x.shape[row_index], x.shape[col_index]<EOL>transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)<EOL>x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)<EOL>return x<EOL>", "docstring": "Shear an image randomly or non-randomly.\n\n    Parameters\n    -----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    intensity : float\n        Percentage of shear, usually -0.5 ~ 0.5 (is_random==True), 0 ~ 0.5 (is_random==False),\n        you can have a quick try by shear(X, 1).\n    is_random : boolean\n        If True, randomly shear. Default is False.\n    row_index col_index and channel_index : int\n        Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n    fill_mode : str\n        Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__\n    cval : float\n        Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.\n    order : int\n        The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__\n\n    Returns\n    -------\n    numpy.array\n        A processed image.\n\n    References\n    -----------\n    - `Affine transformation <https://uk.mathworks.com/discovery/affine-transformation.html>`__", "id": "f11202:m21"}
{"signature": "def obj_box_coord_upleft_to_centroid(coord):", "body": "if len(coord) != <NUM_LIT:4>:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>x, y, w, h = coord<EOL>x_center = x + w / <NUM_LIT><EOL>y_center = y + h / <NUM_LIT><EOL>return [x_center, y_center, w, h]<EOL>", "docstring": "Convert one coordinate [x, y, w, h] to [x_center, y_center, w, h].\n    It is the reverse process of ``obj_box_coord_centroid_to_upleft``.\n\n    Parameters\n    ------------\n    coord : list of 4 int/float\n        One coordinate.\n\n    Returns\n    -------\n    list of 4 numbers\n        New bounding box.", "id": "f11202:m60"}
{"signature": "def rotation_multi(<EOL>x, rg=<NUM_LIT:20>, is_random=False, row_index=<NUM_LIT:0>, col_index=<NUM_LIT:1>, channel_index=<NUM_LIT:2>, fill_mode='<STR_LIT>', cval=<NUM_LIT:0.>, order=<NUM_LIT:1><EOL>):", "body": "if is_random:<EOL><INDENT>theta = np.pi / <NUM_LIT> * np.random.uniform(-rg, rg)<EOL><DEDENT>else:<EOL><INDENT>theta = np.pi / <NUM_LIT> * rg<EOL><DEDENT>rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), <NUM_LIT:0>], [np.sin(theta), np.cos(theta), <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>h, w = x[<NUM_LIT:0>].shape[row_index], x[<NUM_LIT:0>].shape[col_index]<EOL>transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)<EOL>results = []<EOL>for data in x:<EOL><INDENT>results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order))<EOL><DEDENT>return np.asarray(results)<EOL>", "docstring": "Rotate multiple images with the same arguments, randomly or non-randomly.\n    Usually be used for image segmentation which x=[X, Y], X and Y should be matched.\n\n    Parameters\n    -----------\n    x : list of numpy.array\n        List of images with dimension of [n_images, row, col, channel] (default).\n    others : args\n        See ``tl.prepro.rotation``.\n\n    Returns\n    -------\n    numpy.array\n        A list of processed images.\n\n    Examples\n    --------\n    >>> x, y --> [row, col, 1]  greyscale\n    >>> x, y = tl.prepro.rotation_multi([x, y], rg=90, is_random=False)", "id": "f11202:m14"}
{"signature": "def erosion(x, radius=<NUM_LIT:3>):", "body": "mask = disk(radius)<EOL>x = _erosion(x, selem=mask)<EOL>return x<EOL>", "docstring": "Return greyscale morphological erosion of an image,\n    see `skimage.morphology.erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.erosion>`__.\n\n    Parameters\n    -----------\n    x : 2D array\n        A greyscale image.\n    radius : int\n        For the radius of mask.\n\n    Returns\n    -------\n    numpy.array\n        A processed greyscale image.", "id": "f11202:m53"}
{"signature": "def affine_transform_cv2(x, transform_matrix, flags=None, border_mode='<STR_LIT>'):", "body": "rows, cols = x.shape[<NUM_LIT:0>], x.shape[<NUM_LIT:1>]<EOL>if flags is None:<EOL><INDENT>flags = cv2.INTER_AREA<EOL><DEDENT>if border_mode is '<STR_LIT>':<EOL><INDENT>border_mode = cv2.BORDER_CONSTANT<EOL><DEDENT>elif border_mode is '<STR_LIT>':<EOL><INDENT>border_mode = cv2.BORDER_REPLICATE<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>return cv2.warpAffine(x, transform_matrix[<NUM_LIT:0>:<NUM_LIT:2>,:],(cols,rows), flags=flags, borderMode=border_mode)<EOL>", "docstring": "Return transformed images by given an affine matrix in OpenCV format (x is width). (Powered by OpenCV2, faster than ``tl.prepro.affine_transform``)\n\n    Parameters\n    ----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    transform_matrix : numpy.array\n        A transform matrix, OpenCV format.\n    border_mode : str\n        - `constant`, pad the image with a constant value (i.e. black or 0)\n        - `replicate`, the row or column at the very edge of the original is replicated to the extra border.\n\n    Examples\n    --------\n    >>> M_shear = tl.prepro.affine_shear_matrix(intensity=0.2, is_random=False)\n    >>> M_zoom = tl.prepro.affine_zoom_matrix(zoom_range=0.8)\n    >>> M_combined = M_shear.dot(M_zoom)\n    >>> result = tl.prepro.affine_transform_cv2(image, M_combined)", "id": "f11202:m10"}
{"signature": "def rotation(<EOL>x, rg=<NUM_LIT:20>, is_random=False, row_index=<NUM_LIT:0>, col_index=<NUM_LIT:1>, channel_index=<NUM_LIT:2>, fill_mode='<STR_LIT>', cval=<NUM_LIT:0.>, order=<NUM_LIT:1><EOL>):", "body": "if is_random:<EOL><INDENT>theta = np.pi / <NUM_LIT> * np.random.uniform(-rg, rg)<EOL><DEDENT>else:<EOL><INDENT>theta = np.pi / <NUM_LIT> * rg<EOL><DEDENT>rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), <NUM_LIT:0>], [np.sin(theta), np.cos(theta), <NUM_LIT:0>], [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>h, w = x.shape[row_index], x.shape[col_index]<EOL>transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)<EOL>x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)<EOL>return x<EOL>", "docstring": "Rotate an image randomly or non-randomly.\n\n    Parameters\n    -----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    rg : int or float\n        Degree to rotate, usually 0 ~ 180.\n    is_random : boolean\n        If True, randomly rotate. Default is False\n    row_index col_index and channel_index : int\n        Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n    fill_mode : str\n        Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__\n    cval : float\n        Value used for points outside the boundaries of the input if mode=`constant`. Default is 0.0\n    order : int\n        The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__\n\n    Returns\n    -------\n    numpy.array\n        A processed image.\n\n    Examples\n    ---------\n    >>> x --> [row, col, 1]\n    >>> x = tl.prepro.rotation(x, rg=40, is_random=False)\n    >>> tl.vis.save_image(x, 'im.png')", "id": "f11202:m13"}
{"signature": "def imresize(x, size=None, interp='<STR_LIT>', mode=None):", "body": "if size is None:<EOL><INDENT>size = [<NUM_LIT:100>, <NUM_LIT:100>]<EOL><DEDENT>if x.shape[-<NUM_LIT:1>] == <NUM_LIT:1>:<EOL><INDENT>x = scipy.misc.imresize(x[:, :, <NUM_LIT:0>], size, interp=interp, mode=mode)<EOL>return x[:, :, np.newaxis]<EOL><DEDENT>else:<EOL><INDENT>return scipy.misc.imresize(x, size, interp=interp, mode=mode)<EOL><DEDENT>", "docstring": "Resize an image by given output size and method.\n\n    Warning, this function will rescale the value to [0, 255].\n\n    Parameters\n    -----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    size : list of 2 int or None\n        For height and width.\n    interp : str\n        Interpolation method for re-sizing (`nearest`, `lanczos`, `bilinear`, `bicubic` (default) or `cubic`).\n    mode : str\n        The PIL image mode (`P`, `L`, etc.) to convert image before resizing.\n\n    Returns\n    -------\n    numpy.array\n        A processed image.\n\n    References\n    ------------\n    - `scipy.misc.imresize <https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html>`__", "id": "f11202:m38"}
{"signature": "def array_to_img(x, dim_ordering=(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>), scale=True):", "body": "<EOL>x = x.transpose(dim_ordering)<EOL>if scale:<EOL><INDENT>x += max(-np.min(x), <NUM_LIT:0>)<EOL>x_max = np.max(x)<EOL>if x_max != <NUM_LIT:0>:<EOL><INDENT>x = x / x_max<EOL><DEDENT>x *= <NUM_LIT:255><EOL><DEDENT>if x.shape[<NUM_LIT:2>] == <NUM_LIT:3>:<EOL><INDENT>return PIL.Image.fromarray(x.astype('<STR_LIT>'), '<STR_LIT>')<EOL><DEDENT>elif x.shape[<NUM_LIT:2>] == <NUM_LIT:1>:<EOL><INDENT>return PIL.Image.fromarray(x[:, :, <NUM_LIT:0>].astype('<STR_LIT>'), '<STR_LIT:L>')<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>', x.shape[<NUM_LIT:2>])<EOL><DEDENT>", "docstring": "Converts a numpy array to PIL image object (uint8 format).\n\n    Parameters\n    ----------\n    x : numpy.array\n        An image with dimension of 3 and channels of 1 or 3.\n    dim_ordering : tuple of 3 int\n        Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n    scale : boolean\n        If True, converts image to [0, 255] from any range of value like [-1, 2]. Default is True.\n\n    Returns\n    -------\n    PIL.image\n        An image.\n\n    References\n    -----------\n    `PIL Image.fromarray <http://pillow.readthedocs.io/en/3.1.x/reference/Image.html?highlight=fromarray>`__", "id": "f11202:m47"}
{"signature": "def brightness(x, gamma=<NUM_LIT:1>, gain=<NUM_LIT:1>, is_random=False):", "body": "if is_random:<EOL><INDENT>gamma = np.random.uniform(<NUM_LIT:1> - gamma, <NUM_LIT:1> + gamma)<EOL><DEDENT>x = exposure.adjust_gamma(x, gamma, gain)<EOL>return x<EOL>", "docstring": "Change the brightness of a single image, randomly or non-randomly.\n\n    Parameters\n    -----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    gamma : float\n        Non negative real number. Default value is 1.\n            - Small than 1 means brighter.\n            - If `is_random` is True, gamma in a range of (1-gamma, 1+gamma).\n    gain : float\n        The constant multiplier. Default value is 1.\n    is_random : boolean\n        If True, randomly change brightness. Default is False.\n\n    Returns\n    -------\n    numpy.array\n        A processed image.\n\n    References\n    -----------\n    - `skimage.exposure.adjust_gamma <http://scikit-image.org/docs/dev/api/skimage.exposure.html>`__\n    - `chinese blog <http://www.cnblogs.com/denny402/p/5124402.html>`__", "id": "f11202:m32"}
{"signature": "def drop(x, keep=<NUM_LIT:0.5>):", "body": "if len(x.shape) == <NUM_LIT:3>:<EOL><INDENT>if x.shape[-<NUM_LIT:1>] == <NUM_LIT:3>:  <EOL><INDENT>img_size = x.shape<EOL>mask = np.random.binomial(n=<NUM_LIT:1>, p=keep, size=x.shape[:-<NUM_LIT:1>])<EOL>for i in range(<NUM_LIT:3>):<EOL><INDENT>x[:, :, i] = np.multiply(x[:, :, i], mask)<EOL><DEDENT><DEDENT>elif x.shape[-<NUM_LIT:1>] == <NUM_LIT:1>:  <EOL><INDENT>img_size = x.shape<EOL>x = np.multiply(x, np.random.binomial(n=<NUM_LIT:1>, p=keep, size=img_size))<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\".format(x.shape))<EOL><DEDENT><DEDENT>elif len(x.shape) == <NUM_LIT:2> or <NUM_LIT:1>:  <EOL><INDENT>img_size = x.shape<EOL>x = np.multiply(x, np.random.binomial(n=<NUM_LIT:1>, p=keep, size=img_size))<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\".format(x.shape))<EOL><DEDENT>return x<EOL>", "docstring": "Randomly set some pixels to zero by a given keeping probability.\n\n    Parameters\n    -----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] or [row, col].\n    keep : float\n        The keeping probability (0, 1), the lower more values will be set to zero.\n\n    Returns\n    -------\n    numpy.array\n        A processed image.", "id": "f11202:m46"}
{"signature": "def keypoint_resize_random_crop(image, annos, mask=None, size=(<NUM_LIT>, <NUM_LIT>)):", "body": "if len(np.shape(image)) == <NUM_LIT:2>:<EOL><INDENT>image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)<EOL><DEDENT>def resize_image(image, annos, mask, target_width, target_height):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>y, x, _ = np.shape(image)<EOL>ratio_y = target_height / y<EOL>ratio_x = target_width / x<EOL>new_joints = []<EOL>for people in annos:<EOL><INDENT>new_keypoints = []<EOL>for keypoints in people:<EOL><INDENT>if keypoints[<NUM_LIT:0>] < <NUM_LIT:0> or keypoints[<NUM_LIT:1>] < <NUM_LIT:0>:<EOL><INDENT>new_keypoints.append((-<NUM_LIT:1000>, -<NUM_LIT:1000>))<EOL>continue<EOL><DEDENT>pts = (int(keypoints[<NUM_LIT:0>] * ratio_x + <NUM_LIT:0.5>), int(keypoints[<NUM_LIT:1>] * ratio_y + <NUM_LIT:0.5>))<EOL>if pts[<NUM_LIT:0>] > target_width - <NUM_LIT:1> or pts[<NUM_LIT:1>] > target_height - <NUM_LIT:1>:<EOL><INDENT>new_keypoints.append((-<NUM_LIT:1000>, -<NUM_LIT:1000>))<EOL>continue<EOL><DEDENT>new_keypoints.append(pts)<EOL><DEDENT>new_joints.append(new_keypoints)<EOL><DEDENT>annos = new_joints<EOL>new_image = cv2.resize(image, (target_width, target_height), interpolation=cv2.INTER_AREA)<EOL>if mask is not None:<EOL><INDENT>new_mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_AREA)<EOL>return new_image, annos, new_mask<EOL><DEDENT>else:<EOL><INDENT>return new_image, annos, None<EOL><DEDENT><DEDENT>_target_height = size[<NUM_LIT:0>]<EOL>_target_width = size[<NUM_LIT:1>]<EOL>if len(np.shape(image)) == <NUM_LIT:2>:<EOL><INDENT>image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)<EOL><DEDENT>height, width, _ = np.shape(image)<EOL>if height <= width:<EOL><INDENT>ratio = _target_height / height<EOL>new_width = int(ratio * width)<EOL>if height == width:<EOL><INDENT>new_width = _target_height<EOL><DEDENT>image, annos, mask = resize_image(image, annos, mask, new_width, _target_height)<EOL>if new_width > _target_width:<EOL><INDENT>crop_range_x = np.random.randint(<NUM_LIT:0>, new_width - _target_width)<EOL><DEDENT>else:<EOL><INDENT>crop_range_x = <NUM_LIT:0><EOL><DEDENT>image = image[:, crop_range_x:crop_range_x + _target_width, :]<EOL>if mask is not None:<EOL><INDENT>mask = mask[:, crop_range_x:crop_range_x + _target_width]<EOL><DEDENT>new_joints = []<EOL>for people in annos:<EOL><INDENT>new_keypoints = []<EOL>for keypoints in people:<EOL><INDENT>if keypoints[<NUM_LIT:0>] < -<NUM_LIT:10> or keypoints[<NUM_LIT:1>] < -<NUM_LIT:10>:<EOL><INDENT>new_keypoints.append((-<NUM_LIT:1000>, -<NUM_LIT:1000>))<EOL>continue<EOL><DEDENT>top = crop_range_x + _target_width - <NUM_LIT:1><EOL>if keypoints[<NUM_LIT:0>] >= crop_range_x and keypoints[<NUM_LIT:0>] <= top:<EOL><INDENT>pts = (int(keypoints[<NUM_LIT:0>] - crop_range_x), int(keypoints[<NUM_LIT:1>]))<EOL><DEDENT>else:<EOL><INDENT>pts = (-<NUM_LIT:1000>, -<NUM_LIT:1000>)<EOL><DEDENT>new_keypoints.append(pts)<EOL><DEDENT>new_joints.append(new_keypoints)<EOL><DEDENT>annos = new_joints<EOL><DEDENT>if height > width:<EOL><INDENT>ratio = _target_width / width<EOL>new_height = int(ratio * height)<EOL>image, annos, mask = resize_image(image, annos, mask, _target_width, new_height)<EOL>if new_height > _target_height:<EOL><INDENT>crop_range_y = np.random.randint(<NUM_LIT:0>, new_height - _target_height)<EOL><DEDENT>else:<EOL><INDENT>crop_range_y = <NUM_LIT:0><EOL><DEDENT>image = image[crop_range_y:crop_range_y + _target_width, :, :]<EOL>if mask is not None:<EOL><INDENT>mask = mask[crop_range_y:crop_range_y + _target_width, :]<EOL><DEDENT>new_joints = []<EOL>for people in annos:  <EOL><INDENT>new_keypoints = []<EOL>for keypoints in people:<EOL><INDENT>if keypoints[<NUM_LIT:0>] < <NUM_LIT:0> or keypoints[<NUM_LIT:1>] < <NUM_LIT:0>:<EOL><INDENT>new_keypoints.append((-<NUM_LIT:1000>, -<NUM_LIT:1000>))<EOL>continue<EOL><DEDENT>bot = crop_range_y + _target_height - <NUM_LIT:1><EOL>if keypoints[<NUM_LIT:1>] >= crop_range_y and keypoints[<NUM_LIT:1>] <= bot:<EOL><INDENT>pts = (int(keypoints[<NUM_LIT:0>]), int(keypoints[<NUM_LIT:1>] - crop_range_y))<EOL><DEDENT>else:<EOL><INDENT>pts = (-<NUM_LIT:1000>, -<NUM_LIT:1000>)<EOL><DEDENT>new_keypoints.append(pts)<EOL><DEDENT>new_joints.append(new_keypoints)<EOL><DEDENT>annos = new_joints<EOL><DEDENT>if mask is not None:<EOL><INDENT>return image, annos, mask<EOL><DEDENT>else:<EOL><INDENT>return image, annos, None<EOL><DEDENT>", "docstring": "Reszie the image to make either its width or height equals to the given sizes.\n    Then randomly crop image without influence scales.\n    Resize the image match with the minimum size before cropping, this API will change the zoom scale of object.\n\n    Parameters\n    -----------\n    image : 3 channel image\n        The given image for augmentation.\n    annos : list of list of floats\n        The keypoints annotation of people.\n    mask : single channel image or None\n        The mask if available.\n    size : tuple of int\n        The size (height, width) of returned image.\n\n    Returns\n    ----------\n    preprocessed image, annos, mask", "id": "f11202:m76"}
{"signature": "def shear2(<EOL>x, shear=(<NUM_LIT:0.1>, <NUM_LIT:0.1>), is_random=False, row_index=<NUM_LIT:0>, col_index=<NUM_LIT:1>, channel_index=<NUM_LIT:2>, fill_mode='<STR_LIT>', cval=<NUM_LIT:0.>,<EOL>order=<NUM_LIT:1><EOL>):", "body": "if len(shear) != <NUM_LIT:2>:<EOL><INDENT>raise AssertionError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if isinstance(shear, tuple):<EOL><INDENT>shear = list(shear)<EOL><DEDENT>if is_random:<EOL><INDENT>shear[<NUM_LIT:0>] = np.random.uniform(-shear[<NUM_LIT:0>], shear[<NUM_LIT:0>])<EOL>shear[<NUM_LIT:1>] = np.random.uniform(-shear[<NUM_LIT:1>], shear[<NUM_LIT:1>])<EOL><DEDENT>shear_matrix = np.array([[<NUM_LIT:1>, shear[<NUM_LIT:0>], <NUM_LIT:0>],[shear[<NUM_LIT:1>], <NUM_LIT:1>, <NUM_LIT:0>],[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>]])<EOL>h, w = x.shape[row_index], x.shape[col_index]<EOL>transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)<EOL>x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)<EOL>return x<EOL>", "docstring": "Shear an image randomly or non-randomly.\n\n    Parameters\n    -----------\n    x : numpy.array\n        An image with dimension of [row, col, channel] (default).\n    shear : tuple of two floats\n        Percentage of shear for height and width direction (0, 1).\n    is_random : boolean\n        If True, randomly shear. Default is False.\n    row_index col_index and channel_index : int\n        Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).\n    fill_mode : str\n        Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__\n    cval : float\n        Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.\n    order : int\n        The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.affine_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__\n\n    Returns\n    -------\n    numpy.array\n        A processed image.\n\n    References\n    -----------\n    - `Affine transformation <https://uk.mathworks.com/discovery/affine-transformation.html>`__", "id": "f11202:m23"}
{"signature": "def save_model(self, network=None, model_name='<STR_LIT>', **kwargs):", "body": "kwargs.update({'<STR_LIT>': model_name})<EOL>self._fill_project_info(kwargs)  <EOL>params = network.get_all_params()<EOL>s = time.time()<EOL>kwargs.update({'<STR_LIT>': network.all_graphs, '<STR_LIT:time>': datetime.utcnow()})<EOL>try:<EOL><INDENT>params_id = self.model_fs.put(self._serialization(params))<EOL>kwargs.update({'<STR_LIT>': params_id, '<STR_LIT:time>': datetime.utcnow()})<EOL>self.db.Model.insert_one(kwargs)<EOL>print(\"<STR_LIT>\".format(round(time.time() - s, <NUM_LIT:2>)))<EOL>return True<EOL><DEDENT>except Exception as e:<EOL><INDENT>exc_type, exc_obj, exc_tb = sys.exc_info()<EOL>fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[<NUM_LIT:1>]<EOL>logging.info(\"<STR_LIT>\".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))<EOL>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>", "docstring": "Save model architecture and parameters into database, timestamp will be added automatically.\n\n        Parameters\n        ----------\n        network : TensorLayer layer\n            TensorLayer layer instance.\n        model_name : str\n            The name/key of model.\n        kwargs : other events\n            Other events, such as name, accuracy, loss, step number and etc (optinal).\n\n        Examples\n        ---------\n        Save model architecture and parameters into database.\n        >>> db.save_model(net, accuracy=0.8, loss=2.3, name='second_model')\n\n        Load one model with parameters from database (run this in other script)\n        >>> net = db.find_top_model(sess=sess, accuracy=0.8, loss=2.3)\n\n        Find and load the latest model.\n        >>> net = db.find_top_model(sess=sess, sort=[(\"time\", pymongo.DESCENDING)])\n        >>> net = db.find_top_model(sess=sess, sort=[(\"time\", -1)])\n\n        Find and load the oldest model.\n        >>> net = db.find_top_model(sess=sess, sort=[(\"time\", pymongo.ASCENDING)])\n        >>> net = db.find_top_model(sess=sess, sort=[(\"time\", 1)])\n\n        Get model information\n        >>> net._accuracy\n        ... 0.8\n\n        Returns\n        ---------\n        boolean : True for success, False for fail.", "id": "f11203:c0:m5"}
{"signature": "def __str__(self):", "body": "return self._s<EOL>", "docstring": "Print information of databset.", "id": "f11203:c0:m1"}
{"signature": "def _fill_project_info(self, args):", "body": "return args.update({'<STR_LIT>': self.project_name})<EOL>", "docstring": "Fill in project_name for all studies, architectures and parameters.", "id": "f11203:c0:m2"}
{"signature": "def run_top_task(self, task_name=None, sort=None, **kwargs):", "body": "if not isinstance(task_name, str):  <EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>self._fill_project_info(kwargs)<EOL>kwargs.update({'<STR_LIT:status>': '<STR_LIT>'})<EOL>task = self.db.Task.find_one_and_update(kwargs, {'<STR_LIT>': {'<STR_LIT:status>': '<STR_LIT>'}}, sort=sort)<EOL>try:<EOL><INDENT>if task is None:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(task_name, sort))<EOL>return False<EOL><DEDENT>else:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(task_name, sort))<EOL><DEDENT>_datetime = task['<STR_LIT:time>']<EOL>_script = task['<STR_LIT>']<EOL>_id = task['<STR_LIT>']<EOL>_hyper_parameters = task['<STR_LIT>']<EOL>_saved_result_keys = task['<STR_LIT>']<EOL>logging.info(\"<STR_LIT>\")<EOL>for key in _hyper_parameters:<EOL><INDENT>globals()[key] = _hyper_parameters[key]<EOL>logging.info(\"<STR_LIT>\".format(key, _hyper_parameters[key]))<EOL><DEDENT>s = time.time()<EOL>logging.info(\"<STR_LIT>\".format(task_name, sort, _datetime))<EOL>_script = _script.decode('<STR_LIT:utf-8>')<EOL>with tf.Graph().as_default():  <EOL><INDENT>exec(_script, globals())<EOL><DEDENT>_ = self.db.Task.find_one_and_update({'<STR_LIT>': _id}, {'<STR_LIT>': {'<STR_LIT:status>': '<STR_LIT>'}})<EOL>__result = {}<EOL>for _key in _saved_result_keys:<EOL><INDENT>logging.info(\"<STR_LIT>\".format(_key, globals()[_key], type(globals()[_key])))<EOL>__result.update({\"<STR_LIT:%s>\" % _key: globals()[_key]})<EOL><DEDENT>_ = self.db.Task.find_one_and_update(<EOL>{<EOL>'<STR_LIT>': _id<EOL>}, {'<STR_LIT>': {<EOL>'<STR_LIT:result>': __result<EOL>}}, return_document=pymongo.ReturnDocument.AFTER<EOL>)<EOL>logging.info(<EOL>\"<STR_LIT>\".<EOL>format(task_name, sort, _datetime,<EOL>time.time() - s)<EOL>)<EOL>return True<EOL><DEDENT>except Exception as e:<EOL><INDENT>exc_type, exc_obj, exc_tb = sys.exc_info()<EOL>fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[<NUM_LIT:1>]<EOL>logging.info(\"<STR_LIT>\".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))<EOL>logging.info(\"<STR_LIT>\")<EOL>_ = self.db.Task.find_one_and_update({'<STR_LIT>': _id}, {'<STR_LIT>': {'<STR_LIT:status>': '<STR_LIT>'}})<EOL>return False<EOL><DEDENT>", "docstring": "Finds and runs a pending task that in the first of the sorting list.\n\n        Parameters\n        -----------\n        task_name : str\n            The task name.\n        sort : List of tuple\n            PyMongo sort comment, search \"PyMongo find one sorting\" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.\n        kwargs : other parameters\n            Users customized parameters such as description, version number.\n\n        Examples\n        ---------\n        Monitors the database and pull tasks to run\n        >>> while True:\n        >>>     print(\"waiting task from distributor\")\n        >>>     db.run_top_task(task_name='mnist', sort=[(\"time\", -1)])\n        >>>     time.sleep(1)\n\n        Returns\n        --------\n        boolean : True for success, False for fail.", "id": "f11203:c0:m19"}
{"signature": "def delete_datasets(self, **kwargs):", "body": "self._fill_project_info(kwargs)<EOL>self.db.Dataset.delete_many(kwargs)<EOL>logging.info(\"<STR_LIT>\")<EOL>", "docstring": "Delete datasets.\n\n        Parameters\n        -----------\n        kwargs : logging information\n            Find items to delete, leave it empty to delete all log.", "id": "f11203:c0:m11"}
{"signature": "def save_training_log(self, **kwargs):", "body": "self._fill_project_info(kwargs)<EOL>kwargs.update({'<STR_LIT:time>': datetime.utcnow()})<EOL>_result = self.db.TrainLog.insert_one(kwargs)<EOL>_log = self._print_dict(kwargs)<EOL>logging.info(\"<STR_LIT>\" + _log)<EOL>", "docstring": "Saves the training log, timestamp will be added automatically.\n\n        Parameters\n        -----------\n        kwargs : logging information\n            Events, such as accuracy, loss, step number and etc.\n\n        Examples\n        ---------\n        >>> db.save_training_log(accuracy=0.33, loss=0.98)", "id": "f11203:c0:m12"}
{"signature": "@staticmethod<EOL><INDENT>def _deserialization(ps):<DEDENT>", "body": "return pickle.loads(ps)<EOL>", "docstring": "Deseralize data.", "id": "f11203:c0:m4"}
{"signature": "def save_dataset(self, dataset=None, dataset_name=None, **kwargs):", "body": "self._fill_project_info(kwargs)<EOL>if dataset_name is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>kwargs.update({'<STR_LIT>': dataset_name})<EOL>s = time.time()<EOL>try:<EOL><INDENT>dataset_id = self.dataset_fs.put(self._serialization(dataset))<EOL>kwargs.update({'<STR_LIT>': dataset_id, '<STR_LIT:time>': datetime.utcnow()})<EOL>self.db.Dataset.insert_one(kwargs)<EOL>print(\"<STR_LIT>\".format(round(time.time() - s, <NUM_LIT:2>)))<EOL>return True<EOL><DEDENT>except Exception as e:<EOL><INDENT>exc_type, exc_obj, exc_tb = sys.exc_info()<EOL>fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[<NUM_LIT:1>]<EOL>logging.info(\"<STR_LIT>\".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))<EOL>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>", "docstring": "Saves one dataset into database, timestamp will be added automatically.\n\n        Parameters\n        ----------\n        dataset : any type\n            The dataset you want to store.\n        dataset_name : str\n            The name of dataset.\n        kwargs : other events\n            Other events, such as description, author and etc (optinal).\n\n        Examples\n        ----------\n        Save dataset\n        >>> db.save_dataset([X_train, y_train, X_test, y_test], 'mnist', description='this is a tutorial')\n\n        Get dataset\n        >>> dataset = db.find_top_dataset('mnist')\n\n        Returns\n        ---------\n        boolean : Return True if save success, otherwise, return False.", "id": "f11203:c0:m8"}
{"signature": "def create_task(self, task_name=None, script=None, hyper_parameters=None, saved_result_keys=None, **kwargs):", "body": "if not isinstance(task_name, str):  <EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(script, str):  <EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if hyper_parameters is None:<EOL><INDENT>hyper_parameters = {}<EOL><DEDENT>if saved_result_keys is None:<EOL><INDENT>saved_result_keys = []<EOL><DEDENT>self._fill_project_info(kwargs)<EOL>kwargs.update({'<STR_LIT:time>': datetime.utcnow()})<EOL>kwargs.update({'<STR_LIT>': hyper_parameters})<EOL>kwargs.update({'<STR_LIT>': saved_result_keys})<EOL>_script = open(script, '<STR_LIT:rb>').read()<EOL>kwargs.update({'<STR_LIT:status>': '<STR_LIT>', '<STR_LIT>': _script, '<STR_LIT:result>': {}})<EOL>self.db.Task.insert_one(kwargs)<EOL>logging.info(\"<STR_LIT>\".format(task_name, script))<EOL>", "docstring": "Uploads a task to the database, timestamp will be added automatically.\n\n        Parameters\n        -----------\n        task_name : str\n            The task name.\n        script : str\n            File name of the python script.\n        hyper_parameters : dictionary\n            The hyper parameters pass into the script.\n        saved_result_keys : list of str\n            The keys of the task results to keep in the database when the task finishes.\n        kwargs : other parameters\n            Users customized parameters such as description, version number.\n\n        Examples\n        -----------\n        Uploads a task\n        >>> db.create_task(task_name='mnist', script='example/tutorial_mnist_simple.py', description='simple tutorial')\n\n        Finds and runs the latest task\n        >>> db.run_top_task(sess=sess, sort=[(\"time\", pymongo.DESCENDING)])\n        >>> db.run_top_task(sess=sess, sort=[(\"time\", -1)])\n\n        Finds and runs the oldest task\n        >>> db.run_top_task(sess=sess, sort=[(\"time\", pymongo.ASCENDING)])\n        >>> db.run_top_task(sess=sess, sort=[(\"time\", 1)])", "id": "f11203:c0:m18"}
{"signature": "def sign(x):", "body": "with tf.get_default_graph().gradient_override_map({\"<STR_LIT>\": \"<STR_LIT>\"}):<EOL><INDENT>return tf.sign(x, name='<STR_LIT>')<EOL><DEDENT>", "docstring": "Sign function.\n\n    Clip and binarize tensor using the straight through estimator (STE) for the gradient, usually be used for\n    quantizing values in `Binarized Neural Networks`: https://arxiv.org/abs/1602.02830.\n\n    Parameters\n    ----------\n    x : Tensor\n        input.\n\n    Examples\n    --------\n    >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')\n\n    Returns\n    -------\n    Tensor\n        A ``Tensor`` in the same type as ``x``.\n\n    References\n    ----------\n    - `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013)`\n       http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf\n\n    - `BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1, Courbariaux et al. (2016)`\n       https://arxiv.org/abs/1602.02830", "id": "f11204:m6"}
{"signature": "@deprecated(date=\"<STR_LIT>\", instructions=\"<STR_LIT>\")<EOL>def leaky_relu(x, alpha=<NUM_LIT>, name=\"<STR_LIT>\"):", "body": "if not (<NUM_LIT:0> < alpha <= <NUM_LIT:1>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>with tf.name_scope(name, \"<STR_LIT>\") as name_scope:<EOL><INDENT>x = tf.convert_to_tensor(x, name=\"<STR_LIT>\")<EOL>return tf.maximum(x, alpha * x, name=name_scope)<EOL><DEDENT>", "docstring": "leaky_relu can be used through its shortcut: :func:`tl.act.lrelu`.\n\n    This function is a modified version of ReLU, introducing a nonzero gradient for negative input. Introduced by the paper:\n    `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__\n\n    The function return the following results:\n      - When x < 0: ``f(x) = alpha_low * x``.\n      - When x >= 0: ``f(x) = x``.\n\n    Parameters\n    ----------\n    x : Tensor\n        Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.\n    alpha : float\n        Slope.\n    name : str\n        The function name (optional).\n\n    Examples\n    --------\n    >>> import tensorlayer as tl\n    >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense')\n\n    Returns\n    -------\n    Tensor\n        A ``Tensor`` in the same type as ``x``.\n\n    References\n    ----------\n    - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__", "id": "f11204:m1"}
{"signature": "def leaky_twice_relu6(x, alpha_low=<NUM_LIT>, alpha_high=<NUM_LIT>, name=\"<STR_LIT>\"):", "body": "if not isinstance(alpha_high, tf.Tensor) and not (<NUM_LIT:0> < alpha_high <= <NUM_LIT:1>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(alpha_low, tf.Tensor) and not (<NUM_LIT:0> < alpha_low <= <NUM_LIT:1>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>with tf.name_scope(name, \"<STR_LIT>\") as name_scope:<EOL><INDENT>x = tf.convert_to_tensor(x, name=\"<STR_LIT>\")<EOL>x_is_above_0 = tf.minimum(x, <NUM_LIT:6> * (<NUM_LIT:1> - alpha_high) + alpha_high * x)<EOL>x_is_below_0 = tf.minimum(alpha_low * x, <NUM_LIT:0>)<EOL>return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope)<EOL><DEDENT>", "docstring": ":func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.\n\n    This activation function is a modified version :func:`leaky_relu` introduced by the following paper:\n    `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__\n\n    This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:\n    `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__\n\n    This function push further the logic by adding `leaky` behaviour both below zero and above six.\n\n    The function return the following results:\n      - When x < 0: ``f(x) = alpha_low * x``.\n      - When x in [0, 6]: ``f(x) = x``.\n      - When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.\n\n    Parameters\n    ----------\n    x : Tensor\n        Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.\n    alpha_low : float\n        Slope for x < 0: ``f(x) = alpha_low * x``.\n    alpha_high : float\n        Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.\n    name : str\n        The function name (optional).\n\n    Examples\n    --------\n    >>> import tensorlayer as tl\n    >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')\n\n    Returns\n    -------\n    Tensor\n        A ``Tensor`` in the same type as ``x``.\n\n    References\n    ----------\n    - `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__\n    - `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__", "id": "f11204:m3"}
{"signature": "def discount_episode_rewards(rewards=None, gamma=<NUM_LIT>, mode=<NUM_LIT:0>):", "body": "if rewards is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>discounted_r = np.zeros_like(rewards, dtype=np.float32)<EOL>running_add = <NUM_LIT:0><EOL>for t in reversed(xrange(<NUM_LIT:0>, rewards.size)):<EOL><INDENT>if mode == <NUM_LIT:0>:<EOL><INDENT>if rewards[t] != <NUM_LIT:0>: running_add = <NUM_LIT:0><EOL><DEDENT>running_add = running_add * gamma + rewards[t]<EOL>discounted_r[t] = running_add<EOL><DEDENT>return discounted_r<EOL>", "docstring": "Take 1D float array of rewards and compute discounted rewards for an\n    episode. When encount a non-zero value, consider as the end a of an episode.\n\n    Parameters\n    ----------\n    rewards : list\n        List of rewards\n    gamma : float\n        Discounted factor\n    mode : int\n        Mode for computing the discount rewards.\n            - If mode == 0, reset the discount process when encount a non-zero reward (Ping-pong game).\n            - If mode == 1, would not reset the discount process.\n\n    Returns\n    --------\n    list of float\n        The discounted rewards.\n\n    Examples\n    ----------\n    >>> rewards = np.asarray([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1])\n    >>> gamma = 0.9\n    >>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma)\n    >>> print(discount_rewards)\n    [ 0.72899997  0.81        0.89999998  1.          0.72899997  0.81\n    0.89999998  1.          0.72899997  0.81        0.89999998  1.        ]\n    >>> discount_rewards = tl.rein.discount_episode_rewards(rewards, gamma, mode=1)\n    >>> print(discount_rewards)\n    [ 1.52110755  1.69011939  1.87791049  2.08656716  1.20729685  1.34144104\n    1.49048996  1.65610003  0.72899997  0.81        0.89999998  1.        ]", "id": "f11205:m0"}
{"signature": "def choice_action_by_probs(probs=(<NUM_LIT:0.5>, <NUM_LIT:0.5>), action_list=None):", "body": "if action_list is None:<EOL><INDENT>n_action = len(probs)<EOL>action_list = np.arange(n_action)<EOL><DEDENT>else:<EOL><INDENT>if len(action_list) != len(probs):<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return np.random.choice(action_list, p=probs)<EOL>", "docstring": "Choice and return an an action by given the action probability distribution.\n\n    Parameters\n    ------------\n    probs : list of float.\n        The probability distribution of all actions.\n    action_list : None or a list of int or others\n        A list of action in integer, string or others. If None, returns an integer range between 0 and len(probs)-1.\n\n    Returns\n    --------\n    float int or str\n        The chosen action.\n\n    Examples\n    ----------\n    >>> for _ in range(5):\n    >>>     a = choice_action_by_probs([0.2, 0.4, 0.4])\n    >>>     print(a)\n    0\n    1\n    1\n    2\n    1\n    >>> for _ in range(3):\n    >>>     a = choice_action_by_probs([0.5, 0.5], ['a', 'b'])\n    >>>     print(a)\n    a\n    b\n    b", "id": "f11205:m3"}
{"signature": "def draw_boxes_and_labels_to_image(<EOL>image, classes, coords, scores, classes_list, is_center=True, is_rescale=True, save_name=None<EOL>):", "body": "if len(coords) != len(classes):<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>if len(scores) > <NUM_LIT:0> and len(scores) != len(classes):<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>image = image.copy()<EOL>imh, imw = image.shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>thick = int((imh + imw) // <NUM_LIT>)<EOL>for i, _v in enumerate(coords):<EOL><INDENT>if is_center:<EOL><INDENT>x, y, x2, y2 = tl.prepro.obj_box_coord_centroid_to_upleft_butright(coords[i])<EOL><DEDENT>else:<EOL><INDENT>x, y, x2, y2 = coords[i]<EOL><DEDENT>if is_rescale:  <EOL><INDENT>x, y, x2, y2 = tl.prepro.obj_box_coord_scale_to_pixelunit([x, y, x2, y2], (imh, imw))<EOL><DEDENT>cv2.rectangle(<EOL>image,<EOL>(int(x), int(y)),<EOL>(int(x2), int(y2)),  <EOL>[<NUM_LIT:0>, <NUM_LIT:255>, <NUM_LIT:0>],<EOL>thick<EOL>)<EOL>cv2.putText(<EOL>image,<EOL>classes_list[classes[i]] + ((\"<STR_LIT>\" % (scores[i])) if (len(scores) != <NUM_LIT:0>) else \"<STR_LIT:U+0020>\"),<EOL>(int(x), int(y)),  <EOL><NUM_LIT:0>,<EOL><NUM_LIT> * imh,  <EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT>],  <EOL>int(thick / <NUM_LIT:2>) + <NUM_LIT:1><EOL>)  <EOL><DEDENT>if save_name is not None:<EOL><INDENT>save_image(image, save_name)<EOL><DEDENT>return image<EOL>", "docstring": "Draw bboxes and class labels on image. Return or save the image with bboxes, example in the docs of ``tl.prepro``.\n\n    Parameters\n    -----------\n    image : numpy.array\n        The RGB image [height, width, channel].\n    classes : list of int\n        A list of class ID (int).\n    coords : list of int\n        A list of list for coordinates.\n            - Should be [x, y, x2, y2] (up-left and botton-right format)\n            - If [x_center, y_center, w, h] (set is_center to True).\n    scores : list of float\n        A list of score (float). (Optional)\n    classes_list : list of str\n        for converting ID to string on image.\n    is_center : boolean\n        Whether the coordinates is [x_center, y_center, w, h]\n            - If coordinates are [x_center, y_center, w, h], set it to True for converting it to [x, y, x2, y2] (up-left and botton-right) internally.\n            - If coordinates are [x1, x2, y1, y2], set it to False.\n    is_rescale : boolean\n        Whether to rescale the coordinates from pixel-unit format to ratio format.\n            - If True, the input coordinates are the portion of width and high, this API will scale the coordinates to pixel unit internally.\n            - If False, feed the coordinates with pixel unit format.\n    save_name : None or str\n        The name of image file (i.e. image.png), if None, not to save image.\n\n    Returns\n    -------\n    numpy.array\n        The saved image.\n\n    References\n    -----------\n    - OpenCV rectangle and putText.\n    - `scikit-image <http://scikit-image.org/docs/dev/api/skimage.draw.html#skimage.draw.rectangle>`__.", "id": "f11206:m4"}
{"signature": "def draw_weights(W=None, second=<NUM_LIT:10>, saveable=True, shape=None, name='<STR_LIT>', fig_idx=<NUM_LIT>):", "body": "if shape is None:<EOL><INDENT>shape = [<NUM_LIT>, <NUM_LIT>]<EOL><DEDENT>import matplotlib.pyplot as plt<EOL>if saveable is False:<EOL><INDENT>plt.ion()<EOL><DEDENT>fig = plt.figure(fig_idx)  <EOL>n_units = W.shape[<NUM_LIT:1>]<EOL>num_r = int(np.sqrt(n_units))  <EOL>num_c = int(np.ceil(n_units / num_r))<EOL>count = int(<NUM_LIT:1>)<EOL>for _row in range(<NUM_LIT:1>, num_r + <NUM_LIT:1>):<EOL><INDENT>for _col in range(<NUM_LIT:1>, num_c + <NUM_LIT:1>):<EOL><INDENT>if count > n_units:<EOL><INDENT>break<EOL><DEDENT>fig.add_subplot(num_r, num_c, count)<EOL>feature = W[:, count - <NUM_LIT:1>] / np.sqrt((W[:, count - <NUM_LIT:1>]**<NUM_LIT:2>).sum())<EOL>plt.imshow(<EOL>np.reshape(feature, (shape[<NUM_LIT:0>], shape[<NUM_LIT:1>])), cmap='<STR_LIT>', interpolation=\"<STR_LIT>\"<EOL>)  <EOL>plt.gca().xaxis.set_major_locator(plt.NullLocator())  <EOL>plt.gca().yaxis.set_major_locator(plt.NullLocator())<EOL>count = count + <NUM_LIT:1><EOL><DEDENT><DEDENT>if saveable:<EOL><INDENT>plt.savefig(name + '<STR_LIT>', format='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.draw()<EOL>plt.pause(second)<EOL><DEDENT>", "docstring": "Visualize every columns of the weight matrix to a group of Greyscale img.\n\n    Parameters\n    ----------\n    W : numpy.array\n        The weight matrix\n    second : int\n        The display second(s) for the image(s), if saveable is False.\n    saveable : boolean\n        Save or plot the figure.\n    shape : a list with 2 int or None\n        The shape of feature image, MNIST is [28, 80].\n    name : a string\n        A name to save the image, if saveable is True.\n    fig_idx : int\n        matplotlib figure index.\n\n    Examples\n    --------\n    >>> tl.visualize.draw_weights(network.all_params[0].eval(), second=10, saveable=True, name='weight_of_1st_layer', fig_idx=2012)", "id": "f11206:m10"}
{"signature": "def save_images(images, size, image_path='<STR_LIT>'):", "body": "if len(images.shape) == <NUM_LIT:3>:  <EOL><INDENT>images = images[:, :, :, np.newaxis]<EOL><DEDENT>def merge(images, size):<EOL><INDENT>h, w = images.shape[<NUM_LIT:1>], images.shape[<NUM_LIT:2>]<EOL>img = np.zeros((h * size[<NUM_LIT:0>], w * size[<NUM_LIT:1>], <NUM_LIT:3>), dtype=images.dtype)<EOL>for idx, image in enumerate(images):<EOL><INDENT>i = idx % size[<NUM_LIT:1>]<EOL>j = idx // size[<NUM_LIT:1>]<EOL>img[j * h:j * h + h, i * w:i * w + w, :] = image<EOL><DEDENT>return img<EOL><DEDENT>def imsave(images, size, path):<EOL><INDENT>if np.max(images) <= <NUM_LIT:1> and (-<NUM_LIT:1> <= np.min(images) < <NUM_LIT:0>):<EOL><INDENT>images = ((images + <NUM_LIT:1>) * <NUM_LIT>).astype(np.uint8)<EOL><DEDENT>elif np.max(images) <= <NUM_LIT:1> and np.min(images) >= <NUM_LIT:0>:<EOL><INDENT>images = (images * <NUM_LIT:255>).astype(np.uint8)<EOL><DEDENT>return imageio.imwrite(path, merge(images, size))<EOL><DEDENT>if len(images) > size[<NUM_LIT:0>] * size[<NUM_LIT:1>]:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\".format(len(images)))<EOL><DEDENT>return imsave(images, size, image_path)<EOL>", "docstring": "Save multiple images into one single image.\n\n    Parameters\n    -----------\n    images : numpy array\n        (batch, w, h, c)\n    size : list of 2 ints\n        row and column number.\n        number of images should be equal or less than size[0] * size[1]\n    image_path : str\n        save path\n\n    Examples\n    ---------\n    >>> import numpy as np\n    >>> import tensorlayer as tl\n    >>> images = np.random.rand(64, 100, 100, 3)\n    >>> tl.visualize.save_images(images, [8, 8], 'temp.png')", "id": "f11206:m3"}
{"signature": "def tsne_embedding(embeddings, reverse_dictionary, plot_only=<NUM_LIT>, second=<NUM_LIT:5>, saveable=False, name='<STR_LIT>', fig_idx=<NUM_LIT>):", "body": "import matplotlib.pyplot as plt<EOL>def plot_with_labels(low_dim_embs, labels, figsize=(<NUM_LIT>, <NUM_LIT>), second=<NUM_LIT:5>, saveable=True, name='<STR_LIT>', fig_idx=<NUM_LIT>):<EOL><INDENT>if low_dim_embs.shape[<NUM_LIT:0>] < len(labels):<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>if saveable is False:<EOL><INDENT>plt.ion()<EOL>plt.figure(fig_idx)<EOL><DEDENT>plt.figure(figsize=figsize)  <EOL>for i, label in enumerate(labels):<EOL><INDENT>x, y = low_dim_embs[i, :]<EOL>plt.scatter(x, y)<EOL>plt.annotate(label, xy=(x, y), xytext=(<NUM_LIT:5>, <NUM_LIT:2>), textcoords='<STR_LIT>', ha='<STR_LIT:right>', va='<STR_LIT>')<EOL><DEDENT>if saveable:<EOL><INDENT>plt.savefig(name + '<STR_LIT>', format='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.draw()<EOL>plt.pause(second)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>from sklearn.manifold import TSNE<EOL>from six.moves import xrange<EOL>tsne = TSNE(perplexity=<NUM_LIT:30>, n_components=<NUM_LIT:2>, init='<STR_LIT>', n_iter=<NUM_LIT>)<EOL>low_dim_embs = tsne.fit_transform(embeddings[:plot_only, :])<EOL>labels = [reverse_dictionary[i] for i in xrange(plot_only)]<EOL>plot_with_labels(low_dim_embs, labels, second=second, saveable=saveable, name=name, fig_idx=fig_idx)<EOL><DEDENT>except ImportError:<EOL><INDENT>_err = \"<STR_LIT>\"<EOL>tl.logging.error(_err)<EOL>raise ImportError(_err)<EOL><DEDENT>", "docstring": "Visualize the embeddings by using t-SNE.\n\n    Parameters\n    ----------\n    embeddings : numpy.array\n        The embedding matrix.\n    reverse_dictionary : dictionary\n        id_to_word, mapping id to unique word.\n    plot_only : int\n        The number of examples to plot, choice the most common words.\n    second : int\n        The display second(s) for the image(s), if saveable is False.\n    saveable : boolean\n        Save or plot the figure.\n    name : str\n        A name to save the image, if saveable is True.\n    fig_idx : int\n        matplotlib figure index.\n\n    Examples\n    --------\n    >>> see 'tutorial_word2vec_basic.py'\n    >>> final_embeddings = normalized_embeddings.eval()\n    >>> tl.visualize.tsne_embedding(final_embeddings, labels, reverse_dictionary,\n    ...                   plot_only=500, second=5, saveable=False, name='tsne')", "id": "f11206:m9"}
{"signature": "def CNN2d(CNN=None, second=<NUM_LIT:10>, saveable=True, name='<STR_LIT>', fig_idx=<NUM_LIT>):", "body": "import matplotlib.pyplot as plt<EOL>n_mask = CNN.shape[<NUM_LIT:3>]<EOL>n_row = CNN.shape[<NUM_LIT:0>]<EOL>n_col = CNN.shape[<NUM_LIT:1>]<EOL>n_color = CNN.shape[<NUM_LIT:2>]<EOL>row = int(np.sqrt(n_mask))<EOL>col = int(np.ceil(n_mask / row))<EOL>plt.ion()  <EOL>fig = plt.figure(fig_idx)<EOL>count = <NUM_LIT:1><EOL>for _ir in range(<NUM_LIT:1>, row + <NUM_LIT:1>):<EOL><INDENT>for _ic in range(<NUM_LIT:1>, col + <NUM_LIT:1>):<EOL><INDENT>if count > n_mask:<EOL><INDENT>break<EOL><DEDENT>fig.add_subplot(col, row, count)<EOL>if n_color == <NUM_LIT:1>:<EOL><INDENT>plt.imshow(np.reshape(CNN[:, :, :, count - <NUM_LIT:1>], (n_row, n_col)), cmap='<STR_LIT>', interpolation=\"<STR_LIT>\")<EOL><DEDENT>elif n_color == <NUM_LIT:3>:<EOL><INDENT>plt.imshow(<EOL>np.reshape(CNN[:, :, :, count - <NUM_LIT:1>], (n_row, n_col, n_color)), cmap='<STR_LIT>', interpolation=\"<STR_LIT>\"<EOL>)<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>plt.gca().xaxis.set_major_locator(plt.NullLocator())  <EOL>plt.gca().yaxis.set_major_locator(plt.NullLocator())<EOL>count = count + <NUM_LIT:1><EOL><DEDENT><DEDENT>if saveable:<EOL><INDENT>plt.savefig(name + '<STR_LIT>', format='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.draw()<EOL>plt.pause(second)<EOL><DEDENT>", "docstring": "Display a group of RGB or Greyscale CNN masks.\n\n    Parameters\n    ----------\n    CNN : numpy.array\n        The image. e.g: 64 5x5 RGB images can be (5, 5, 3, 64).\n    second : int\n        The display second(s) for the image(s), if saveable is False.\n    saveable : boolean\n        Save or plot the figure.\n    name : str\n        A name to save the image, if saveable is True.\n    fig_idx : int\n        The matplotlib figure index.\n\n    Examples\n    --------\n    >>> tl.visualize.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_mnist', fig_idx=2012)", "id": "f11206:m7"}
{"signature": "def build_words_dataset(words=None, vocabulary_size=<NUM_LIT>, printable=True, unk_key='<STR_LIT>'):", "body": "if words is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>count = [[unk_key, -<NUM_LIT:1>]]<EOL>count.extend(collections.Counter(words).most_common(vocabulary_size - <NUM_LIT:1>))<EOL>dictionary = dict()<EOL>for word, _ in count:<EOL><INDENT>dictionary[word] = len(dictionary)<EOL><DEDENT>data = list()<EOL>unk_count = <NUM_LIT:0><EOL>for word in words:<EOL><INDENT>if word in dictionary:<EOL><INDENT>index = dictionary[word]<EOL><DEDENT>else:<EOL><INDENT>index = <NUM_LIT:0>  <EOL>unk_count += <NUM_LIT:1><EOL><DEDENT>data.append(index)<EOL><DEDENT>count[<NUM_LIT:0>][<NUM_LIT:1>] = unk_count<EOL>reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))<EOL>if printable:<EOL><INDENT>tl.logging.info('<STR_LIT>' % len(collections.Counter(words).keys()))<EOL>tl.logging.info('<STR_LIT>'.format(vocabulary_size))<EOL><DEDENT>if len(collections.Counter(words).keys()) < vocabulary_size:<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>return data, count, dictionary, reverse_dictionary<EOL>", "docstring": "Build the words dictionary and replace rare words with 'UNK' token.\n    The most common word has the smallest integer id.\n\n    Parameters\n    ----------\n    words : list of str or byte\n        The context in list format. You may need to do preprocessing on the words, such as lower case, remove marks etc.\n    vocabulary_size : int\n        The maximum vocabulary size, limiting the vocabulary size. Then the script replaces rare words with 'UNK' token.\n    printable : boolean\n        Whether to print the read vocabulary size of the given words.\n    unk_key : str\n        Represent the unknown words.\n\n    Returns\n    --------\n    data : list of int\n        The context in a list of ID.\n    count : list of tuple and list\n        Pair words and IDs.\n            - count[0] is a list : the number of rare words\n            - count[1:] are tuples : the number of occurrence of each word\n            - e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]\n    dictionary : dictionary\n        It is `word_to_id` that maps word to ID.\n    reverse_dictionary : a dictionary\n        It is `id_to_word` that maps ID to word.\n\n    Examples\n    --------\n    >>> words = tl.files.load_matt_mahoney_text8_dataset()\n    >>> vocabulary_size = 50000\n    >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size)\n\n    References\n    -----------------\n    - `tensorflow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`__", "id": "f11210:m10"}
{"signature": "def id_to_word(self, word_id):", "body": "if word_id >= len(self.reverse_vocab):<EOL><INDENT>return self.reverse_vocab[self.unk_id]<EOL><DEDENT>else:<EOL><INDENT>return self.reverse_vocab[word_id]<EOL><DEDENT>", "docstring": "Returns the word string of an integer word id.", "id": "f11210:c1:m2"}
{"signature": "def initialize_vocabulary(vocabulary_path):", "body": "if gfile.Exists(vocabulary_path):<EOL><INDENT>rev_vocab = []<EOL>with gfile.GFile(vocabulary_path, mode=\"<STR_LIT:rb>\") as f:<EOL><INDENT>rev_vocab.extend(f.readlines())<EOL><DEDENT>rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]<EOL>vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])<EOL>return vocab, rev_vocab<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\", vocabulary_path)<EOL><DEDENT>", "docstring": "Initialize vocabulary from file, return the `word_to_id` (dictionary)\n    and `id_to_word` (list).\n\n    We assume the vocabulary is stored one-item-per-line, so a file will result in a vocabulary {\"dog\": 0, \"cat\": 1}, and this function will also return the reversed-vocabulary [\"dog\", \"cat\"].\n\n    Parameters\n    -----------\n    vocabulary_path : str\n        Path to the file containing the vocabulary.\n\n    Returns\n    --------\n    vocab : dictionary\n        a dictionary that maps word to ID.\n    rev_vocab : list of int\n        a list that maps ID to word.\n\n    Examples\n    ---------\n    >>> Assume 'test' contains\n    dog\n    cat\n    bird\n    >>> vocab, rev_vocab = tl.nlp.initialize_vocabulary(\"test\")\n    >>> print(vocab)\n    >>> {b'cat': 1, b'dog': 0, b'bird': 2}\n    >>> print(rev_vocab)\n    >>> [b'dog', b'cat', b'bird']\n\n    Raises\n    -------\n    ValueError : if the provided vocabulary_path does not exist.", "id": "f11210:m16"}
{"signature": "def data_to_token_ids(<EOL>data_path, target_path, vocabulary_path, tokenizer=None, normalize_digits=True, UNK_ID=<NUM_LIT:3>,<EOL>_DIGIT_RE=re.compile(br\"<STR_LIT>\")<EOL>):", "body": "if not gfile.Exists(target_path):<EOL><INDENT>tl.logging.info(\"<STR_LIT>\" % data_path)<EOL>vocab, _ = initialize_vocabulary(vocabulary_path)<EOL>with gfile.GFile(data_path, mode=\"<STR_LIT:rb>\") as data_file:<EOL><INDENT>with gfile.GFile(target_path, mode=\"<STR_LIT:w>\") as tokens_file:<EOL><INDENT>counter = <NUM_LIT:0><EOL>for line in data_file:<EOL><INDENT>counter += <NUM_LIT:1><EOL>if counter % <NUM_LIT> == <NUM_LIT:0>:<EOL><INDENT>tl.logging.info(\"<STR_LIT>\" % counter)<EOL><DEDENT>token_ids = sentence_to_token_ids(<EOL>line, vocab, tokenizer, normalize_digits, UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE<EOL>)<EOL>tokens_file.write(\"<STR_LIT:U+0020>\".join([str(tok) for tok in token_ids]) + \"<STR_LIT:\\n>\")<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>tl.logging.info(\"<STR_LIT>\" % target_path)<EOL><DEDENT>", "docstring": "Tokenize data file and turn into token-ids using given vocabulary file.\n\n    This function loads data line-by-line from data_path, calls the above\n    sentence_to_token_ids, and saves the result to target_path. See comment\n    for sentence_to_token_ids on the details of token-ids format.\n\n    Parameters\n    -----------\n    data_path : str\n        Path to the data file in one-sentence-per-line format.\n    target_path : str\n        Path where the file with token-ids will be created.\n    vocabulary_path : str\n        Path to the vocabulary file.\n    tokenizer : function\n        A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.\n    normalize_digits : boolean\n        If true, all digits are replaced by 0.\n\n    References\n    ----------\n    - Code from ``/tensorflow/models/rnn/translation/data_utils.py``", "id": "f11210:m18"}
{"signature": "def sample(a=None, temperature=<NUM_LIT:1.0>):", "body": "if a is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>b = np.copy(a)<EOL>try:<EOL><INDENT>if temperature == <NUM_LIT:1>:<EOL><INDENT>return np.argmax(np.random.multinomial(<NUM_LIT:1>, a, <NUM_LIT:1>))<EOL><DEDENT>if temperature is None:<EOL><INDENT>return np.argmax(a)<EOL><DEDENT>else:<EOL><INDENT>a = np.log(a) / temperature<EOL>a = np.exp(a) / np.sum(np.exp(a))<EOL>return np.argmax(np.random.multinomial(<NUM_LIT:1>, a, <NUM_LIT:1>))<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>message = \"<STR_LIT>\"<EOL>warnings.warn(message, Warning)<EOL>return np.argmax(np.random.multinomial(<NUM_LIT:1>, b, <NUM_LIT:1>))<EOL><DEDENT>", "docstring": "Sample an index from a probability array.\n\n    Parameters\n    ----------\n    a : list of float\n        List of probabilities.\n    temperature : float or None\n        The higher the more uniform. When a = [0.1, 0.2, 0.7],\n            - temperature = 0.7, the distribution will be sharpen [0.05048273,  0.13588945,  0.81362782]\n            - temperature = 1.0, the distribution will be the same [0.1,    0.2,    0.7]\n            - temperature = 1.5, the distribution will be filtered [0.16008435,  0.25411807,  0.58579758]\n            - If None, it will be ``np.argmax(a)``\n\n    Notes\n    ------\n    - No matter what is the temperature and input list, the sum of all probabilities will be one. Even if input list = [1, 100, 200], the sum of all probabilities will still be one.\n    - For large vocabulary size, choice a higher temperature or ``tl.nlp.sample_top`` to avoid error.", "id": "f11210:m1"}
{"signature": "def build_vocab(data):", "body": "<EOL>counter = collections.Counter(data)<EOL>count_pairs = sorted(counter.items(), key=lambda x: (-x[<NUM_LIT:1>], x[<NUM_LIT:0>]))<EOL>words, _ = list(zip(*count_pairs))<EOL>word_to_id = dict(zip(words, range(len(words))))<EOL>return word_to_id<EOL>", "docstring": "Build vocabulary.\n\n    Given the context in list format.\n    Return the vocabulary, which is a dictionary for word to id.\n    e.g. {'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 .... }\n\n    Parameters\n    ----------\n    data : list of str\n        The context in list format\n\n    Returns\n    --------\n    dictionary\n        that maps word to unique ID. e.g. {'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 .... }\n\n    References\n    ---------------\n    - `tensorflow.models.rnn.ptb.reader <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/models/rnn/ptb>`_\n\n    Examples\n    --------\n    >>> data_path = os.getcwd() + '/simple-examples/data'\n    >>> train_path = os.path.join(data_path, \"ptb.train.txt\")\n    >>> word_to_id = build_vocab(read_txt_words(train_path))", "id": "f11210:m8"}
{"signature": "def words_to_word_ids(data=None, word_to_id=None, unk_key='<STR_LIT>'):", "body": "if data is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if word_to_id is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>word_ids = []<EOL>for word in data:<EOL><INDENT>if word_to_id.get(word) is not None:<EOL><INDENT>word_ids.append(word_to_id[word])<EOL><DEDENT>else:<EOL><INDENT>word_ids.append(word_to_id[unk_key])<EOL><DEDENT><DEDENT>return word_ids<EOL>", "docstring": "Convert a list of string (words) to IDs.\n\n    Parameters\n    ----------\n    data : list of string or byte\n        The context in list format\n    word_to_id : a dictionary\n        that maps word to ID.\n    unk_key : str\n        Represent the unknown words.\n\n    Returns\n    --------\n    list of int\n        A list of IDs to represent the context.\n\n    Examples\n    --------\n    >>> words = tl.files.load_matt_mahoney_text8_dataset()\n    >>> vocabulary_size = 50000\n    >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True)\n    >>> context = [b'hello', b'how', b'are', b'you']\n    >>> ids = tl.nlp.words_to_word_ids(words, dictionary)\n    >>> context = tl.nlp.word_ids_to_words(ids, reverse_dictionary)\n    >>> print(ids)\n    [6434, 311, 26, 207]\n    >>> print(context)\n    [b'hello', b'how', b'are', b'you']\n\n    References\n    ---------------\n    - `tensorflow.models.rnn.ptb.reader <https://github.com/tensorflow/tensorflow/tree/master/tensorflow/models/rnn/ptb>`__", "id": "f11210:m11"}
{"signature": "def sentence_to_token_ids(<EOL>sentence, vocabulary, tokenizer=None, normalize_digits=True, UNK_ID=<NUM_LIT:3>, _DIGIT_RE=re.compile(br\"<STR_LIT>\")<EOL>):", "body": "if tokenizer:<EOL><INDENT>words = tokenizer(sentence)<EOL><DEDENT>else:<EOL><INDENT>words = basic_tokenizer(sentence)<EOL><DEDENT>if not normalize_digits:<EOL><INDENT>return [vocabulary.get(w, UNK_ID) for w in words]<EOL><DEDENT>return [vocabulary.get(re.sub(_DIGIT_RE, b\"<STR_LIT:0>\", w), UNK_ID) for w in words]<EOL>", "docstring": "Convert a string to list of integers representing token-ids.\n\n    For example, a sentence \"I have a dog\" may become tokenized into\n    [\"I\", \"have\", \"a\", \"dog\"] and with vocabulary {\"I\": 1, \"have\": 2,\n    \"a\": 4, \"dog\": 7\"} this function will return [1, 2, 4, 7].\n\n    Parameters\n    -----------\n    sentence : tensorflow.python.platform.gfile.GFile Object\n        The sentence in bytes format to convert to token-ids, see ``basic_tokenizer()`` and ``data_to_token_ids()``.\n    vocabulary : dictionary\n        Mmapping tokens to integers.\n    tokenizer : function\n        A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used.\n    normalize_digits : boolean\n        If true, all digits are replaced by 0.\n\n    Returns\n    --------\n    list of int\n        The token-ids for the sentence.", "id": "f11210:m17"}
{"signature": "def simple_read_words(filename=\"<STR_LIT>\"):", "body": "with open(filename, \"<STR_LIT:r>\") as f:<EOL><INDENT>words = f.read()<EOL>return words<EOL><DEDENT>", "docstring": "Read context from file without any preprocessing.\n\n    Parameters\n    ----------\n    filename : str\n        A file path (like .txt file)\n\n    Returns\n    --------\n    str\n        The context in a string.", "id": "f11210:m5"}
{"signature": "def __init__(self, vocab, unk_id):", "body": "self._vocab = vocab<EOL>self._unk_id = unk_id<EOL>", "docstring": "Initialize the vocabulary.", "id": "f11210:c0:m0"}
{"signature": "def word_ids_to_words(data, id_to_word):", "body": "return [id_to_word[i] for i in data]<EOL>", "docstring": "Convert a list of integer to strings (words).\n\n    Parameters\n    ----------\n    data : list of int\n        The context in list format.\n    id_to_word : dictionary\n        a dictionary that maps ID to word.\n\n    Returns\n    --------\n    list of str\n        A list of string or byte to represent the context.\n\n    Examples\n    ---------\n    >>> see ``tl.nlp.words_to_word_ids``", "id": "f11210:m12"}
{"signature": "def create_vocabulary(<EOL>vocabulary_path, data_path, max_vocabulary_size, tokenizer=None, normalize_digits=True,<EOL>_DIGIT_RE=re.compile(br\"<STR_LIT>\"), _START_VOCAB=None<EOL>):", "body": "if _START_VOCAB is None:<EOL><INDENT>_START_VOCAB = [b\"<STR_LIT>\", b\"<STR_LIT>\", b\"<STR_LIT>\", b\"<STR_LIT>\"]<EOL><DEDENT>if not gfile.Exists(vocabulary_path):<EOL><INDENT>tl.logging.info(\"<STR_LIT>\" % (vocabulary_path, data_path))<EOL>vocab = {}<EOL>with gfile.GFile(data_path, mode=\"<STR_LIT:rb>\") as f:<EOL><INDENT>counter = <NUM_LIT:0><EOL>for line in f:<EOL><INDENT>counter += <NUM_LIT:1><EOL>if counter % <NUM_LIT> == <NUM_LIT:0>:<EOL><INDENT>tl.logging.info(\"<STR_LIT>\" % counter)<EOL><DEDENT>tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)<EOL>for w in tokens:<EOL><INDENT>word = re.sub(_DIGIT_RE, b\"<STR_LIT:0>\", w) if normalize_digits else w<EOL>if word in vocab:<EOL><INDENT>vocab[word] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>vocab[word] = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)<EOL>if len(vocab_list) > max_vocabulary_size:<EOL><INDENT>vocab_list = vocab_list[:max_vocabulary_size]<EOL><DEDENT>with gfile.GFile(vocabulary_path, mode=\"<STR_LIT:wb>\") as vocab_file:<EOL><INDENT>for w in vocab_list:<EOL><INDENT>vocab_file.write(w + b\"<STR_LIT:\\n>\")<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>tl.logging.info(\"<STR_LIT>\" % (vocabulary_path, data_path))<EOL><DEDENT>", "docstring": "r\"\"\"Create vocabulary file (if it does not exist yet) from data file.\n\n    Data file is assumed to contain one sentence per line. Each sentence is\n    tokenized and digits are normalized (if normalize_digits is set).\n    Vocabulary contains the most-frequent tokens up to max_vocabulary_size.\n    We write it to vocabulary_path in a one-token-per-line format, so that later\n    token in the first line gets id=0, second line gets id=1, and so on.\n\n    Parameters\n    -----------\n    vocabulary_path : str\n        Path where the vocabulary will be created.\n    data_path : str\n        Data file that will be used to create vocabulary.\n    max_vocabulary_size : int\n        Limit on the size of the created vocabulary.\n    tokenizer : function\n        A function to use to tokenize each data sentence. If None, basic_tokenizer will be used.\n    normalize_digits : boolean\n        If true, all digits are replaced by `0`.\n    _DIGIT_RE : regular expression function\n        Default is ``re.compile(br\"\\d\")``.\n    _START_VOCAB : list of str\n        The pad, go, eos and unk token, default is ``[b\"_PAD\", b\"_GO\", b\"_EOS\", b\"_UNK\"]``.\n\n    References\n    ----------\n    - Code from ``/tensorflow/models/rnn/translation/data_utils.py``", "id": "f11210:m15"}
{"signature": "def _normalize_docstring(docstring):", "body": "if not docstring:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>lines = docstring.expandtabs().splitlines()<EOL>indent = sys.maxsize<EOL>for line in lines[<NUM_LIT:1>:]:<EOL><INDENT>stripped = line.lstrip()<EOL>if stripped:<EOL><INDENT>indent = min(indent, len(line) - len(stripped))<EOL><DEDENT><DEDENT>trimmed = [lines[<NUM_LIT:0>].strip()]<EOL>if indent < sys.maxsize:<EOL><INDENT>for line in lines[<NUM_LIT:1>:]:<EOL><INDENT>trimmed.append(line[indent:].rstrip())<EOL><DEDENT><DEDENT>while trimmed and not trimmed[-<NUM_LIT:1>]:<EOL><INDENT>trimmed.pop()<EOL><DEDENT>while trimmed and not trimmed[<NUM_LIT:0>]:<EOL><INDENT>trimmed.pop(<NUM_LIT:0>)<EOL><DEDENT>return '<STR_LIT:\\n>'.join(trimmed)<EOL>", "docstring": "Normalizes the docstring.\n\n    Replaces tabs with spaces, removes leading and trailing blanks lines, and\n    removes any indentation.\n\n    Copied from PEP-257:\n    https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation\n\n    Args:\n        docstring: the docstring to normalize\n\n    Returns:\n        The normalized docstring", "id": "f11215:m5"}
{"signature": "def _add_notice_to_docstring(doc, no_doc_str, notice):", "body": "if not doc:<EOL><INDENT>lines = [no_doc_str]<EOL><DEDENT>else:<EOL><INDENT>lines = _normalize_docstring(doc).splitlines()<EOL><DEDENT>notice = ['<STR_LIT>'] + notice<EOL>if len(lines) > <NUM_LIT:1>:<EOL><INDENT>if lines[<NUM_LIT:1>].strip():<EOL><INDENT>notice.append('<STR_LIT>')<EOL><DEDENT>lines[<NUM_LIT:1>:<NUM_LIT:1>] = notice<EOL><DEDENT>else:<EOL><INDENT>lines += notice<EOL><DEDENT>return '<STR_LIT:\\n>'.join(lines)<EOL>", "docstring": "Adds a deprecation notice to a docstring.", "id": "f11215:m4"}
{"signature": "def _add_deprecated_function_notice_to_docstring(doc, date, instructions):", "body": "if instructions:<EOL><INDENT>deprecation_message = \"\"\"<STR_LIT>\"\"\" % (('<STR_LIT>' if date is None else ('<STR_LIT>' % date)), instructions)<EOL><DEDENT>else:<EOL><INDENT>deprecation_message = \"\"\"<STR_LIT>\"\"\" % (('<STR_LIT>' if date is None else ('<STR_LIT>' % date)))<EOL><DEDENT>main_text = [deprecation_message]<EOL>return _add_notice_to_docstring(doc, '<STR_LIT>', main_text)<EOL>", "docstring": "Adds a deprecation notice to a docstring for deprecated functions.", "id": "f11215:m3"}
{"signature": "def seq_minibatches2(inputs, targets, batch_size, num_steps):", "body": "if len(inputs) != len(targets):<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>data_len = len(inputs)<EOL>batch_len = data_len // batch_size<EOL>data = np.zeros((batch_size, batch_len) + inputs.shape[<NUM_LIT:1>:], dtype=inputs.dtype)<EOL>data2 = np.zeros([batch_size, batch_len])<EOL>for i in range(batch_size):<EOL><INDENT>data[i] = inputs[batch_len * i:batch_len * (i + <NUM_LIT:1>)]<EOL>data2[i] = targets[batch_len * i:batch_len * (i + <NUM_LIT:1>)]<EOL><DEDENT>epoch_size = (batch_len - <NUM_LIT:1>) // num_steps<EOL>if epoch_size == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>for i in range(epoch_size):<EOL><INDENT>x = data[:, i * num_steps:(i + <NUM_LIT:1>) * num_steps]<EOL>x2 = data2[:, i * num_steps:(i + <NUM_LIT:1>) * num_steps]<EOL>yield (x, x2)<EOL><DEDENT>", "docstring": "Generate a generator that iterates on two list of words. Yields (Returns) the source contexts and\n    the target context by the given batch_size and num_steps (sequence_length).\n    In TensorFlow's tutorial, this generates the `batch_size` pointers into the raw PTB data, and allows minibatch iteration along these pointers.\n\n    Parameters\n    ----------\n    inputs : list of data\n        The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.\n    targets : list of data\n        The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs.\n    batch_size : int\n        The batch size.\n    num_steps : int\n        The number of unrolls. i.e. sequence length\n\n    Yields\n    ------\n    Pairs of the batched data, each a matrix of shape [batch_size, num_steps].\n\n    Raises\n    ------\n    ValueError : if batch_size or num_steps are too high.\n\n    Examples\n    --------\n    >>> X = [i for i in range(20)]\n    >>> Y = [i for i in range(20,40)]\n    >>> for batch in tl.iterate.seq_minibatches2(X, Y, batch_size=2, num_steps=3):\n    ...     x, y = batch\n    ...     print(x, y)\n\n    [[  0.   1.   2.]\n    [ 10.  11.  12.]]\n    [[ 20.  21.  22.]\n    [ 30.  31.  32.]]\n\n    [[  3.   4.   5.]\n    [ 13.  14.  15.]]\n    [[ 23.  24.  25.]\n    [ 33.  34.  35.]]\n\n    [[  6.   7.   8.]\n    [ 16.  17.  18.]]\n    [[ 26.  27.  28.]\n    [ 36.  37.  38.]]\n\n    Notes\n    -----\n    - Hint, if the input data are images, you can modify the source code `data = np.zeros([batch_size, batch_len)` to `data = np.zeros([batch_size, batch_len, inputs.shape[1], inputs.shape[2], inputs.shape[3]])`.", "id": "f11216:m2"}
{"signature": "def minibatches(inputs=None, targets=None, batch_size=None, allow_dynamic_batch_size=False, shuffle=False):", "body": "if len(inputs) != len(targets):<EOL><INDENT>raise AssertionError(\"<STR_LIT>\")<EOL><DEDENT>if shuffle:<EOL><INDENT>indices = np.arange(len(inputs))<EOL>np.random.shuffle(indices)<EOL><DEDENT>for start_idx in range(<NUM_LIT:0>, len(inputs), batch_size):<EOL><INDENT>end_idx = start_idx + batch_size<EOL>if end_idx > len(inputs):<EOL><INDENT>if allow_dynamic_batch_size:<EOL><INDENT>end_idx = len(inputs)<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if shuffle:<EOL><INDENT>excerpt = indices[start_idx:end_idx]<EOL><DEDENT>else:<EOL><INDENT>excerpt = slice(start_idx, end_idx)<EOL><DEDENT>if (isinstance(inputs, list) or isinstance(targets, list)) and (shuffle ==True):<EOL><INDENT>yield [inputs[i] for i in excerpt], [targets[i] for i in excerpt]<EOL><DEDENT>else:<EOL><INDENT>yield inputs[excerpt], targets[excerpt]<EOL><DEDENT><DEDENT>", "docstring": "Generate a generator that input a group of example in numpy.array and\n    their labels, return the examples and labels by the given batch size.\n\n    Parameters\n    ----------\n    inputs : numpy.array\n        The input features, every row is a example.\n    targets : numpy.array\n        The labels of inputs, every row is a example.\n    batch_size : int\n        The batch size.\n    allow_dynamic_batch_size: boolean\n        Allow the use of the last data batch in case the number of examples is not a multiple of batch_size, this may result in unexpected behaviour if other functions expect a fixed-sized batch-size.\n    shuffle : boolean\n        Indicating whether to use a shuffling queue, shuffle the dataset before return.\n\n    Examples\n    --------\n    >>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])\n    >>> y = np.asarray([0,1,2,3,4,5])\n    >>> for batch in tl.iterate.minibatches(inputs=X, targets=y, batch_size=2, shuffle=False):\n    >>>     print(batch)\n    (array([['a', 'a'], ['b', 'b']], dtype='<U1'), array([0, 1]))\n    (array([['c', 'c'], ['d', 'd']], dtype='<U1'), array([2, 3]))\n    (array([['e', 'e'], ['f', 'f']], dtype='<U1'), array([4, 5]))\n\n    Notes\n    -----\n    If you have two inputs and one label and want to shuffle them together, e.g. X1 (1000, 100), X2 (1000, 80) and Y (1000, 1), you can stack them together (`np.hstack((X1, X2))`)\n    into (1000, 180) and feed to ``inputs``. After getting a batch, you can split it back into X1 and X2.", "id": "f11216:m0"}
{"signature": "def alphas(shape, alpha_value, name=None):", "body": "with ops.name_scope(name, \"<STR_LIT>\", [shape]) as name:<EOL><INDENT>alpha_tensor = convert_to_tensor(alpha_value)<EOL>alpha_dtype = dtypes.as_dtype(alpha_tensor.dtype).base_dtype<EOL>if not isinstance(shape, ops.Tensor):<EOL><INDENT>try:<EOL><INDENT>shape = constant_op._tensor_shape_tensor_conversion_function(tensor_shape.TensorShape(shape))<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)<EOL><DEDENT><DEDENT>if not shape._shape_tuple():<EOL><INDENT>shape = reshape(shape, [-<NUM_LIT:1>])  <EOL><DEDENT>try:<EOL><INDENT>output = constant(alpha_value, shape=shape, dtype=alpha_dtype, name=name)<EOL><DEDENT>except (TypeError, ValueError):<EOL><INDENT>output = fill(shape, constant(alpha_value, dtype=alpha_dtype), name=name)<EOL><DEDENT>if output.dtype.base_dtype != alpha_dtype:<EOL><INDENT>raise AssertionError(\"<STR_LIT>\" % (output.dtype.base_dtype, alpha_dtype))<EOL><DEDENT>return output<EOL><DEDENT>", "docstring": "Creates a tensor with all elements set to `alpha_value`.\n    This operation returns a tensor of type `dtype` with shape `shape` and all\n    elements set to alpha.\n\n    Parameters\n    ----------\n    shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type `int32`.\n        The shape of the desired tensor\n    alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`\n        The value used to fill the resulting `Tensor`.\n    name: str\n        A name for the operation (optional).\n\n    Returns\n    -------\n    A `Tensor` with all elements set to alpha.\n\n    Examples\n    --------\n    >>> tl.alphas([2, 3], tf.int32)  # [[alpha, alpha, alpha], [alpha, alpha, alpha]]", "id": "f11217:m0"}
{"signature": "def cross_entropy(output, target, name=None):", "body": "if name is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output), name=name)<EOL>", "docstring": "Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions,\n    it implements softmax internally. See ``tf.nn.sparse_softmax_cross_entropy_with_logits``.\n\n    Parameters\n    ----------\n    output : Tensor\n        A batch of distribution with shape: [batch_size, num of classes].\n    target : Tensor\n        A batch of index with shape: [batch_size, ].\n    name : string\n        Name of this loss.\n\n    Examples\n    --------\n    >>> ce = tl.cost.cross_entropy(y_logits, y_target_logits, 'my_loss')\n\n    References\n    -----------\n    - About cross-entropy: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.\n    - The code is borrowed from: `<https://en.wikipedia.org/wiki/Cross_entropy>`__.", "id": "f11218:m0"}
{"signature": "def li_regularizer(scale, scope=None):", "body": "if isinstance(scale, numbers.Integral):<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if isinstance(scale, numbers.Real):<EOL><INDENT>if scale < <NUM_LIT:0.>:<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if scale >= <NUM_LIT:1.>:<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if scale == <NUM_LIT:0.>:<EOL><INDENT>tl.logging.info('<STR_LIT>')<EOL>return lambda _, name=None: None<EOL><DEDENT><DEDENT>def li(weights):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>with tf.name_scope('<STR_LIT>') as scope:<EOL><INDENT>my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='<STR_LIT>')<EOL>standard_ops_fn = standard_ops.multiply<EOL>return standard_ops_fn(<EOL>my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), <NUM_LIT:1>))),<EOL>name=scope<EOL>)<EOL><DEDENT><DEDENT>return li<EOL>", "docstring": "Li regularization removes the neurons of previous layer. The `i` represents `inputs`.\n    Returns a function that can be used to apply group li regularization to weights.\n    The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.\n\n    Parameters\n    ----------\n    scale : float\n        A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n    scope: str\n        An optional scope name for this function.\n\n    Returns\n    --------\n    A function with signature `li(weights, name=None)` that apply Li regularization.\n\n    Raises\n    ------\n    ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float.", "id": "f11218:m12"}
{"signature": "def cross_entropy_seq(logits, target_seqs, batch_size=None):  ", "body": "sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example<EOL>loss = sequence_loss_by_example_fn(<EOL>[logits], [tf.reshape(target_seqs, [-<NUM_LIT:1>])], [tf.ones_like(tf.reshape(target_seqs, [-<NUM_LIT:1>]), dtype=tf.float32)]<EOL>)<EOL>cost = tf.reduce_sum(loss)  <EOL>if batch_size is not None:<EOL><INDENT>cost = cost / batch_size<EOL><DEDENT>return cost<EOL>", "docstring": "Returns the expression of cross-entropy of two sequences, implement\n    softmax internally. Normally be used for fixed length RNN outputs, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.\n\n    Parameters\n    ----------\n    logits : Tensor\n        2D tensor with shape of `[batch_size * n_steps, n_classes]`.\n    target_seqs : Tensor\n        The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead.\n    batch_size : None or int.\n        Whether to divide the cost by batch size.\n            - If integer, the return cost will be divided by `batch_size`.\n            - If None (default), the return cost will not be divided by anything.\n\n    Examples\n    --------\n    >>> see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__.for more details\n    >>> input_data = tf.placeholder(tf.int32, [batch_size, n_steps])\n    >>> targets = tf.placeholder(tf.int32, [batch_size, n_steps])\n    >>> # build the network\n    >>> print(net.outputs)\n    (batch_size * n_steps, n_classes)\n    >>> cost = tl.cost.cross_entropy_seq(network.outputs, targets)", "id": "f11218:m9"}
{"signature": "def normalized_mean_square_error(output, target, name=\"<STR_LIT>\"):", "body": "<EOL>if output.get_shape().ndims == <NUM_LIT:2>:  <EOL><INDENT>nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=<NUM_LIT:1>))<EOL>nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=<NUM_LIT:1>))<EOL><DEDENT>elif output.get_shape().ndims == <NUM_LIT:3>:  <EOL><INDENT>nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[<NUM_LIT:1>, <NUM_LIT:2>]))<EOL>nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[<NUM_LIT:1>, <NUM_LIT:2>]))<EOL><DEDENT>elif output.get_shape().ndims == <NUM_LIT:4>:  <EOL><INDENT>nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>]))<EOL>nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>]))<EOL><DEDENT>nmse = tf.reduce_mean(nmse_a / nmse_b, name=name)<EOL>return nmse<EOL>", "docstring": "Return the TensorFlow expression of normalized mean-square-error of two distributions.\n\n    Parameters\n    ----------\n    output : Tensor\n        2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel].\n    target : Tensor\n        The target distribution, format the same with `output`.\n    name : str\n        An optional name to attach to this function.", "id": "f11218:m4"}
{"signature": "def maxnorm_o_regularizer(scale):", "body": "if isinstance(scale, numbers.Integral):<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if isinstance(scale, numbers.Real):<EOL><INDENT>if scale < <NUM_LIT:0.>:<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if scale == <NUM_LIT:0.>:<EOL><INDENT>tl.logging.info('<STR_LIT>')<EOL>return lambda _, name=None: None<EOL><DEDENT><DEDENT>def mn_o(weights, name='<STR_LIT>'):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>with tf.name_scope(name) as scope:<EOL><INDENT>my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='<STR_LIT>')<EOL>if tf.__version__ <= '<STR_LIT>':<EOL><INDENT>standard_ops_fn = standard_ops.mul<EOL><DEDENT>else:<EOL><INDENT>standard_ops_fn = standard_ops.multiply<EOL><DEDENT>return standard_ops_fn(<EOL>my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), <NUM_LIT:0>)), name=scope<EOL>)<EOL><DEDENT><DEDENT>return mn_o<EOL>", "docstring": "Max-norm output regularization removes the neurons of current layer.\n    Returns a function that can be used to apply max-norm regularization to each column of weight matrix.\n    The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.\n\n    Parameters\n    ----------\n    scale : float\n        A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n\n    Returns\n    ---------\n    A function with signature `mn_o(weights, name=None)` that apply Lo regularization.\n\n    Raises\n    ---------\n    ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.", "id": "f11218:m15"}
{"signature": "def lo_regularizer(scale):", "body": "if isinstance(scale, numbers.Integral):<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if isinstance(scale, numbers.Real):<EOL><INDENT>if scale < <NUM_LIT:0.>:<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if scale >= <NUM_LIT:1.>:<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if scale == <NUM_LIT:0.>:<EOL><INDENT>tl.logging.info('<STR_LIT>')<EOL>return lambda _, name=None: None<EOL><DEDENT><DEDENT>def lo(weights, name='<STR_LIT>'):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>with tf.name_scope(name) as scope:<EOL><INDENT>my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='<STR_LIT>')<EOL>standard_ops_fn = standard_ops.multiply<EOL>return standard_ops_fn(<EOL>my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), <NUM_LIT:0>))),<EOL>name=scope<EOL>)<EOL><DEDENT><DEDENT>return lo<EOL>", "docstring": "Lo regularization removes the neurons of current layer. The `o` represents `outputs`\n    Returns a function that can be used to apply group lo regularization to weights.\n    The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.\n\n    Parameters\n    ----------\n    scale : float\n        A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n\n    Returns\n    -------\n    A function with signature `lo(weights, name=None)` that apply Lo regularization.\n\n    Raises\n    ------\n    ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.", "id": "f11218:m13"}
{"signature": "def sigmoid_cross_entropy(output, target, name=None):", "body": "return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output), name=name)<EOL>", "docstring": "Sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``.\n\n    Parameters\n    ----------\n    output : Tensor\n        A batch of distribution with shape: [batch_size, num of classes].\n    target : Tensor\n        A batch of index with shape: [batch_size, ].\n    name : string\n        Name of this loss.", "id": "f11218:m1"}
{"signature": "def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):", "body": "targets = tf.reshape(target_seqs, [-<NUM_LIT:1>])  <EOL>weights = tf.to_float(tf.reshape(input_mask, [-<NUM_LIT:1>]))  <EOL>losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights<EOL>loss = tf.divide(<EOL>tf.reduce_sum(losses),  <EOL>tf.reduce_sum(weights),<EOL>name=\"<STR_LIT>\"<EOL>)<EOL>if return_details:<EOL><INDENT>return loss, losses, weights, targets<EOL><DEDENT>else:<EOL><INDENT>return loss<EOL><DEDENT>", "docstring": "Returns the expression of cross-entropy of two sequences, implement\n    softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output.\n\n    Parameters\n    -----------\n    logits : Tensor\n        2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example.\n        - Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`.\n    target_seqs : Tensor\n        int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example.\n    input_mask : Tensor\n        The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1.\n    return_details : boolean\n        Whether to return detailed losses.\n            - If False (default), only returns the loss.\n            - If True, returns the loss, losses, weights and targets (see source code).\n\n    Examples\n    --------\n    >>> batch_size = 64\n    >>> vocab_size = 10000\n    >>> embedding_size = 256\n    >>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name=\"input\")\n    >>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name=\"target\")\n    >>> input_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name=\"mask\")\n    >>> net = tl.layers.EmbeddingInputlayer(\n    ...         inputs = input_seqs,\n    ...         vocabulary_size = vocab_size,\n    ...         embedding_size = embedding_size,\n    ...         name = 'seq_embedding')\n    >>> net = tl.layers.DynamicRNNLayer(net,\n    ...         cell_fn = tf.contrib.rnn.BasicLSTMCell,\n    ...         n_hidden = embedding_size,\n    ...         dropout = (0.7 if is_train else None),\n    ...         sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs),\n    ...         return_seq_2d = True,\n    ...         name = 'dynamicrnn')\n    >>> print(net.outputs)\n    (?, 256)\n    >>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name=\"output\")\n    >>> print(net.outputs)\n    (?, 10000)\n    >>> loss = tl.cost.cross_entropy_seq_with_mask(net.outputs, target_seqs, input_mask)", "id": "f11218:m10"}
{"signature": "def maxnorm_i_regularizer(scale):", "body": "if isinstance(scale, numbers.Integral):<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if isinstance(scale, numbers.Real):<EOL><INDENT>if scale < <NUM_LIT:0.>:<EOL><INDENT>raise ValueError('<STR_LIT>' % scale)<EOL><DEDENT>if scale == <NUM_LIT:0.>:<EOL><INDENT>tl.logging.info('<STR_LIT>')<EOL>return lambda _, name=None: None<EOL><DEDENT><DEDENT>def mn_i(weights, name='<STR_LIT>'):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>with tf.name_scope(name) as scope:<EOL><INDENT>my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='<STR_LIT>')<EOL>if tf.__version__ <= '<STR_LIT>':<EOL><INDENT>standard_ops_fn = standard_ops.mul<EOL><DEDENT>else:<EOL><INDENT>standard_ops_fn = standard_ops.multiply<EOL><DEDENT>return standard_ops_fn(<EOL>my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), <NUM_LIT:1>)), name=scope<EOL>)<EOL><DEDENT><DEDENT>return mn_i<EOL>", "docstring": "Max-norm input regularization removes the neurons of previous layer.\n    Returns a function that can be used to apply max-norm regularization to each row of weight matrix.\n    The implementation follows `TensorFlow contrib <https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/layers/python/layers/regularizers.py>`__.\n\n    Parameters\n    ----------\n    scale : float\n        A scalar multiplier `Tensor`. 0.0 disables the regularizer.\n\n    Returns\n    ---------\n    A function with signature `mn_i(weights, name=None)` that apply Lo regularization.\n\n    Raises\n    ---------\n    ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float.", "id": "f11218:m16"}
{"signature": "def list_string_to_dict(string):", "body": "dictionary = {}<EOL>for idx, c in enumerate(string):<EOL><INDENT>dictionary.update({c: idx})<EOL><DEDENT>return dictionary<EOL>", "docstring": "Inputs ``['a', 'b', 'c']``, returns ``{'a': 0, 'b': 1, 'c': 2}``.", "id": "f11219:m8"}
{"signature": "def dict_to_one(dp_dict):", "body": "return {x: <NUM_LIT:1> for x in dp_dict}<EOL>", "docstring": "Input a dictionary, return a dictionary that all items are set to one.\n\n    Used for disable dropout, dropconnect layer and so on.\n\n    Parameters\n    ----------\n    dp_dict : dictionary\n        The dictionary contains key and number, e.g. keeping probabilities.\n\n    Examples\n    --------\n    >>> dp_dict = dict_to_one( network.all_drop )\n    >>> dp_dict = dict_to_one( network.all_drop )\n    >>> feed_dict.update(dp_dict)", "id": "f11219:m4"}
{"signature": "def open_tensorboard(log_dir='<STR_LIT>', port=<NUM_LIT>):", "body": "text = \"<STR_LIT>\" + str(port) + \"<STR_LIT>\"<EOL>text2 = \"<STR_LIT>\"<EOL>if not tl.files.exists_or_mkdir(log_dir, verbose=False):<EOL><INDENT>tl.logging.info(\"<STR_LIT>\" % log_dir)<EOL><DEDENT>if _platform == \"<STR_LIT>\" or _platform == \"<STR_LIT>\":<EOL><INDENT>raise NotImplementedError()<EOL><DEDENT>elif _platform == \"<STR_LIT>\":<EOL><INDENT>tl.logging.info('<STR_LIT>' % text)<EOL>subprocess.Popen(<EOL>sys.prefix + \"<STR_LIT>\" + log_dir + \"<STR_LIT>\" + str(port), shell=True<EOL>)  <EOL><DEDENT>elif _platform == \"<STR_LIT:win32>\":<EOL><INDENT>raise NotImplementedError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>tl.logging.info(_platform + text2)<EOL><DEDENT>", "docstring": "Open Tensorboard.\n\n    Parameters\n    ----------\n    log_dir : str\n        Directory where your tensorboard logs are saved\n    port : int\n        TensorBoard port you want to open, 6006 is tensorboard default", "id": "f11219:m10"}
{"signature": "def is_ps(self):", "body": "return self.type == '<STR_LIT>'<EOL>", "docstring": "Returns true if this server is a parameter server", "id": "f11220:c1:m1"}
{"signature": "def train_and_validate_to_end(self, validate_step_size=<NUM_LIT:50>):", "body": "while not self._sess.should_stop():<EOL><INDENT>self.train_on_batch()  <EOL>if self.global_step % validate_step_size == <NUM_LIT:0>:<EOL><INDENT>log_str = '<STR_LIT>' % self.global_step<EOL>for n, m in self.validation_metrics:<EOL><INDENT>log_str += '<STR_LIT>' % (n.name, m)<EOL><DEDENT>logging.info(log_str)<EOL><DEDENT><DEDENT>", "docstring": "A helper function that shows how to train and validate a model at the same time.\n\n        Parameters\n        ----------\n        validate_step_size : int\n            Validate the training network every N steps.", "id": "f11220:c0:m6"}
{"signature": "@deprecated(date=\"<STR_LIT>\", instructions=\"<STR_LIT>\")<EOL>def create_distributed_session(<EOL>task_spec=None, checkpoint_dir=None, scaffold=None, hooks=None, chief_only_hooks=None, save_checkpoint_secs=<NUM_LIT>,<EOL>save_summaries_steps=object(), save_summaries_secs=object(), config=None, stop_grace_period_secs=<NUM_LIT>,<EOL>log_step_count_steps=<NUM_LIT:100><EOL>):", "body": "target = task_spec.target() if task_spec is not None else None<EOL>is_chief = task_spec.is_master() if task_spec is not None else True<EOL>return tf.train.MonitoredTrainingSession(<EOL>master=target, is_chief=is_chief, checkpoint_dir=checkpoint_dir, scaffold=scaffold,<EOL>save_checkpoint_secs=save_checkpoint_secs, save_summaries_steps=save_summaries_steps,<EOL>save_summaries_secs=save_summaries_secs, log_step_count_steps=log_step_count_steps,<EOL>stop_grace_period_secs=stop_grace_period_secs, config=config, hooks=hooks, chief_only_hooks=chief_only_hooks<EOL>)<EOL>", "docstring": "Creates a distributed session.\n\n    It calls `MonitoredTrainingSession` to create a :class:`MonitoredSession` for distributed training.\n\n    Parameters\n    ----------\n    task_spec : :class:`TaskSpecDef`.\n        The task spec definition from create_task_spec_def()\n    checkpoint_dir : str.\n        Optional path to a directory where to restore variables.\n    scaffold : ``Scaffold``\n        A `Scaffold` used for gathering or building supportive ops.\n        If not specified, a default one is created. It's used to finalize the graph.\n    hooks : list of ``SessionRunHook`` objects.\n        Optional\n    chief_only_hooks : list of ``SessionRunHook`` objects.\n        Activate these hooks if `is_chief==True`, ignore otherwise.\n    save_checkpoint_secs : int\n        The frequency, in seconds, that a checkpoint is saved\n        using a default checkpoint saver. If `save_checkpoint_secs` is set to\n        `None`, then the default checkpoint saver isn't used.\n    save_summaries_steps : int\n        The frequency, in number of global steps, that the\n        summaries are written to disk using a default summary saver. If both\n        `save_summaries_steps` and `save_summaries_secs` are set to `None`, then\n        the default summary saver isn't used. Default 100.\n    save_summaries_secs : int\n        The frequency, in secs, that the summaries are written\n        to disk using a default summary saver.  If both `save_summaries_steps` and\n        `save_summaries_secs` are set to `None`, then the default summary saver\n        isn't used. Default not enabled.\n    config : ``tf.ConfigProto``\n        an instance of `tf.ConfigProto` proto used to configure the session.\n        It's the `config` argument of constructor of `tf.Session`.\n    stop_grace_period_secs : int\n        Number of seconds given to threads to stop after\n        `close()` has been called.\n    log_step_count_steps : int\n        The frequency, in number of global steps, that the\n        global step/sec is logged.\n\n    Examples\n    --------\n    A simple example for distributed training where all the workers use the same dataset:\n\n    >>> task_spec = TaskSpec()\n    >>> with tf.device(task_spec.device_fn()):\n    >>>      tensors = create_graph()\n    >>> with tl.DistributedSession(task_spec=task_spec,\n    ...                            checkpoint_dir='/tmp/ckpt') as session:\n    >>>      while not session.should_stop():\n    >>>           session.run(tensors)\n\n    An example where the dataset is shared among the workers\n    (see https://www.tensorflow.org/programmers_guide/datasets):\n\n    >>> task_spec = TaskSpec()\n    >>> # dataset is a :class:`tf.data.Dataset` with the raw data\n    >>> dataset = create_dataset()\n    >>> if task_spec is not None:\n    >>>     dataset = dataset.shard(task_spec.num_workers, task_spec.shard_index)\n    >>> # shuffle or apply a map function to the new sharded dataset, for example:\n    >>> dataset = dataset.shuffle(buffer_size=10000)\n    >>> dataset = dataset.batch(batch_size)\n    >>> dataset = dataset.repeat(num_epochs)\n    >>> # create the iterator for the dataset and the input tensor\n    >>> iterator = dataset.make_one_shot_iterator()\n    >>> next_element = iterator.get_next()\n    >>> with tf.device(task_spec.device_fn()):\n    >>>      # next_element is the input for the graph\n    >>>      tensors = create_graph(next_element)\n    >>> with tl.DistributedSession(task_spec=task_spec,\n    ...                            checkpoint_dir='/tmp/ckpt') as session:\n    >>>      while not session.should_stop():\n    >>>           session.run(tensors)\n\n    References\n    ----------\n    - `MonitoredTrainingSession <https://www.tensorflow.org/api_docs/python/tf/train/MonitoredTrainingSession>`__", "id": "f11220:m1"}
{"signature": "def expand_filepaths(base_dir, rel_paths):", "body": "return [os.path.join(base_dir, os.path.normpath(rp)) for rp in rel_paths]<EOL>", "docstring": "Expand a list of relative paths to a give base directory.\n\n    Parameters\n    ----------\n    base_dir : str\n        The target base directory\n\n    rel_paths : list (or list-like)\n        Collection of relative path strings\n\n    Returns\n    -------\n    expanded_paths : list\n        `rel_paths` rooted at `base_dir`\n\n    Examples\n    --------\n    >>> jams.util.expand_filepaths('/data', ['audio', 'beat', 'seglab'])\n    ['/data/audio', '/data/beat', '/data/seglab']", "id": "f11232:m1"}
{"signature": "def find_with_extension(in_dir, ext, depth=<NUM_LIT:3>, sort=True):", "body": "assert depth >= <NUM_LIT:1><EOL>ext = ext.strip(os.extsep)<EOL>match = list()<EOL>for n in range(<NUM_LIT:1>, depth+<NUM_LIT:1>):<EOL><INDENT>wildcard = os.path.sep.join([\"<STR_LIT:*>\"]*n)<EOL>search_path = os.path.join(in_dir, os.extsep.join([wildcard, ext]))<EOL>match += glob.glob(search_path)<EOL><DEDENT>if sort:<EOL><INDENT>match.sort()<EOL><DEDENT>return match<EOL>", "docstring": "Naive depth-search into a directory for files with a given extension.\n\n    Parameters\n    ----------\n    in_dir : str\n        Path to search.\n    ext : str\n        File extension to match.\n    depth : int\n        Depth of directories to search.\n    sort : bool\n        Sort the list alphabetically\n\n    Returns\n    -------\n    matched : list\n        Collection of matching file paths.\n\n    Examples\n    --------\n    >>> jams.util.find_with_extension('Audio', 'wav')\n    ['Audio/LizNelson_Rainfall/LizNelson_Rainfall_MIX.wav',\n     'Audio/LizNelson_Rainfall/LizNelson_Rainfall_RAW/LizNelson_Rainfall_RAW_01_01.wav',\n     'Audio/LizNelson_Rainfall/LizNelson_Rainfall_RAW/LizNelson_Rainfall_RAW_02_01.wav',\n     ...\n     'Audio/Phoenix_ScotchMorris/Phoenix_ScotchMorris_STEMS/Phoenix_ScotchMorris_STEM_02.wav',\n     'Audio/Phoenix_ScotchMorris/Phoenix_ScotchMorris_STEMS/Phoenix_ScotchMorris_STEM_03.wav',\n    'Audio/Phoenix_ScotchMorris/Phoenix_ScotchMorris_STEMS/Phoenix_ScotchMorris_STEM_04.wav']", "id": "f11232:m4"}
{"signature": "def filebase(filepath):", "body": "return os.path.splitext(os.path.basename(filepath))[<NUM_LIT:0>]<EOL>", "docstring": "Return the extension-less basename of a file path.\n\n    Parameters\n    ----------\n    filepath : str\n        Path to a file\n\n    Returns\n    -------\n    base : str\n        The name of the file, with directory and extension removed\n\n    Examples\n    --------\n    >>> jams.util.filebase('my_song.mp3')\n    'my_song'", "id": "f11232:m3"}
{"signature": "def smkdirs(dpath, mode=<NUM_LIT>):", "body": "if not os.path.exists(dpath):<EOL><INDENT>os.makedirs(dpath, mode=mode)<EOL><DEDENT>", "docstring": "Safely make a full directory path if it doesn't exist.\n\n    Parameters\n    ----------\n    dpath : str\n        Path of directory/directories to create\n\n    mode : int [default=0777]\n        Permissions for the new directories\n\n    See also\n    --------\n    os.makedirs", "id": "f11232:m2"}
{"signature": "def namespace(ns_key):", "body": "if ns_key not in __NAMESPACE__:<EOL><INDENT>raise NamespaceError('<STR_LIT>'.format(ns_key))<EOL><DEDENT>sch = copy.deepcopy(JAMS_SCHEMA['<STR_LIT>']['<STR_LIT>'])<EOL>for key in ['<STR_LIT:value>', '<STR_LIT>']:<EOL><INDENT>try:<EOL><INDENT>sch['<STR_LIT>'][key] = __NAMESPACE__[ns_key][key]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return sch<EOL>", "docstring": "Construct a validation schema for a given namespace.\n\n    Parameters\n    ----------\n    ns_key : str\n        Namespace key identifier (eg, 'beat' or 'segment_tut')\n\n    Returns\n    -------\n    schema : dict\n        JSON schema of `namespace`", "id": "f11234:m1"}
{"signature": "def is_dense(ns_key):", "body": "if ns_key not in __NAMESPACE__:<EOL><INDENT>raise NamespaceError('<STR_LIT>'.format(ns_key))<EOL><DEDENT>return __NAMESPACE__[ns_key]['<STR_LIT>']<EOL>", "docstring": "Determine whether a namespace has dense formatting.\n\n    Parameters\n    ----------\n    ns_key : str\n        Namespace key identifier\n\n    Returns\n    -------\n    dense : bool\n        True if `ns_key` has a dense packing\n        False otherwise.", "id": "f11234:m3"}
{"signature": "def __get_dtype(typespec):", "body": "if '<STR_LIT:type>' in typespec:<EOL><INDENT>return __TYPE_MAP__.get(typespec['<STR_LIT:type>'], np.object_)<EOL><DEDENT>elif '<STR_LIT>' in typespec:<EOL><INDENT>return np.object_<EOL><DEDENT>elif '<STR_LIT>' in typespec:<EOL><INDENT>types = [__get_dtype(v) for v in typespec['<STR_LIT>']]<EOL>if all([t == types[<NUM_LIT:0>] for t in types]):<EOL><INDENT>return types[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return np.object_<EOL>", "docstring": "Get the dtype associated with a jsonschema type definition\n\n    Parameters\n    ----------\n    typespec : dict\n        The schema definition\n\n    Returns\n    -------\n    dtype : numpy.dtype\n        The associated dtype", "id": "f11234:m7"}
{"signature": "def namespace_array(ns_key):", "body": "obs_sch = namespace(ns_key)<EOL>obs_sch['<STR_LIT:title>'] = '<STR_LIT>'<EOL>sch = copy.deepcopy(JAMS_SCHEMA['<STR_LIT>']['<STR_LIT>'])<EOL>sch['<STR_LIT>'] = obs_sch<EOL>return sch<EOL>", "docstring": "Construct a validation schema for arrays of a given namespace.\n\n    Parameters\n    ----------\n    ns_key : str\n        Namespace key identifier\n\n    Returns\n    -------\n    schema : dict\n        JSON schema of `namespace` observation arrays", "id": "f11234:m2"}
{"signature": "def display_multi(annotations, fig_kw=None, meta=True, **kwargs):", "body": "if fig_kw is None:<EOL><INDENT>fig_kw = dict()<EOL><DEDENT>fig_kw.setdefault('<STR_LIT>', True)<EOL>fig_kw.setdefault('<STR_LIT>', True)<EOL>display_annotations = []<EOL>for ann in annotations:<EOL><INDENT>for namespace in VIZ_MAPPING:<EOL><INDENT>if can_convert(ann, namespace):<EOL><INDENT>display_annotations.append(ann)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>if not len(display_annotations):<EOL><INDENT>raise ParameterError('<STR_LIT>')<EOL><DEDENT>fig, axs = plt.subplots(nrows=len(display_annotations), ncols=<NUM_LIT:1>, **fig_kw)<EOL>if len(display_annotations) == <NUM_LIT:1>:<EOL><INDENT>axs = [axs]<EOL><DEDENT>for ann, ax in zip(display_annotations, axs):<EOL><INDENT>kwargs['<STR_LIT>'] = ax<EOL>display(ann, meta=meta, **kwargs)<EOL><DEDENT>return fig, axs<EOL>", "docstring": "Display multiple annotations with shared axes\n\n    Parameters\n    ----------\n    annotations : jams.AnnotationArray\n        A collection of annotations to display\n\n    fig_kw : dict\n        Keyword arguments to `plt.figure`\n\n    meta : bool\n        If `True`, display annotation metadata for each annotation\n\n    kwargs\n        Additional keyword arguments to the `mir_eval.display` routines\n\n    Returns\n    -------\n    fig\n        The created figure\n    axs\n        List of subplot axes corresponding to each displayed annotation", "id": "f11235:m8"}
{"signature": "def piano_roll(annotation, **kwargs):", "body": "times, midi = annotation.to_interval_values()<EOL>return mir_eval.display.piano_roll(times, midi=midi, **kwargs)<EOL>", "docstring": "Plotting wrapper for piano rolls", "id": "f11235:m6"}
{"signature": "def event(annotation, **kwargs):", "body": "times, values = annotation.to_interval_values()<EOL>if any(values):<EOL><INDENT>labels = values<EOL><DEDENT>else:<EOL><INDENT>labels = None<EOL><DEDENT>return mir_eval.display.events(times, labels=labels, **kwargs)<EOL>", "docstring": "Plotting wrapper for events", "id": "f11235:m4"}
{"signature": "def intervals(annotation, **kwargs):", "body": "times, labels = annotation.to_interval_values()<EOL>return mir_eval.display.labeled_intervals(times, labels, **kwargs)<EOL>", "docstring": "Plotting wrapper for labeled intervals", "id": "f11235:m1"}
{"signature": "def beat_position(annotation, **kwargs):", "body": "times, values = annotation.to_interval_values()<EOL>labels = [_['<STR_LIT>'] for _ in values]<EOL>return mir_eval.display.events(times, labels=labels, **kwargs)<EOL>", "docstring": "Plotting wrapper for beat-position data", "id": "f11235:m5"}
{"signature": "def pitch_contour(annotation, **kwargs):", "body": "ax = kwargs.pop('<STR_LIT>', None)<EOL>ax = mir_eval.display.__get_axes(ax=ax)[<NUM_LIT:0>]<EOL>times, values = annotation.to_interval_values()<EOL>indices = np.unique([v['<STR_LIT:index>'] for v in values])<EOL>for idx in indices:<EOL><INDENT>rows = [i for (i, v) in enumerate(values) if v['<STR_LIT:index>'] == idx]<EOL>freqs = np.asarray([values[r]['<STR_LIT>'] for r in rows])<EOL>unvoiced = ~np.asarray([values[r]['<STR_LIT>'] for r in rows])<EOL>freqs[unvoiced] *= -<NUM_LIT:1><EOL>ax = mir_eval.display.pitch(times[rows, <NUM_LIT:0>], freqs, unvoiced=True,<EOL>ax=ax,<EOL>**kwargs)<EOL><DEDENT>return ax<EOL>", "docstring": "Plotting wrapper for pitch contours", "id": "f11235:m3"}
{"signature": "def pattern_to_mireval(ann):", "body": "<EOL>patterns = defaultdict(lambda: defaultdict(list))<EOL>for time, observation in zip(*ann.to_event_values()):<EOL><INDENT>pattern_id = observation['<STR_LIT>']<EOL>occurrence_id = observation['<STR_LIT>']<EOL>obs = (time, observation['<STR_LIT>'])<EOL>patterns[pattern_id][occurrence_id].append(obs)<EOL><DEDENT>return [list(_.values()) for _ in six.itervalues(patterns)]<EOL>", "docstring": "Convert a pattern_jku annotation object to mir_eval format.\n\n    Parameters\n    ----------\n    ann : jams.Annotation\n        Must have `namespace='pattern_jku'`\n\n    Returns\n    -------\n    patterns : list of list of tuples\n        - `patterns[x]` is a list containing all occurrences of pattern x\n\n        - `patterns[x][y]` is a list containing all notes for\n           occurrence y of pattern x\n\n        - `patterns[x][y][z]` contains a time-note tuple\n          `(time, midi note)`", "id": "f11236:m9"}
{"signature": "def transcription(ref, est, **kwargs):", "body": "namespace = '<STR_LIT>'<EOL>ref = coerce_annotation(ref, namespace)<EOL>est = coerce_annotation(est, namespace)<EOL>ref_intervals, ref_p = ref.to_interval_values()<EOL>est_intervals, est_p = est.to_interval_values()<EOL>ref_pitches = np.asarray([p['<STR_LIT>'] * (-<NUM_LIT:1>)**(~p['<STR_LIT>']) for p in ref_p])<EOL>est_pitches = np.asarray([p['<STR_LIT>'] * (-<NUM_LIT:1>)**(~p['<STR_LIT>']) for p in est_p])<EOL>return mir_eval.transcription.evaluate(<EOL>ref_intervals, ref_pitches, est_intervals, est_pitches, **kwargs)<EOL>", "docstring": "r'''Note transcription evaluation\n\n    Parameters\n    ----------\n    ref : jams.Annotation\n        Reference annotation object\n    est : jams.Annotation\n        Estimated annotation object\n    kwargs\n        Additional keyword arguments\n\n    Returns\n    -------\n    scores : dict\n        Dictionary of scores, where the key is the metric name (str) and\n        the value is the (float) score achieved.\n\n    See Also\n    --------\n    mir_eval.transcription.evaluate\n\n    Examples\n    --------\n    >>> # Load in the JAMS objects\n    >>> ref_jam = jams.load('reference.jams')\n    >>> est_jam = jams.load('estimated.jams')\n    >>> # Select the first relevant annotations. You can use any annotation\n    >>> # type that can be converted to pitch_contour (such as pitch_midi)\n    >>> ref_ann = ref_jam.search(namespace='pitch_contour')[0]\n    >>> est_ann = est_jam.search(namespace='note_hz')[0]\n    >>> scores = jams.eval.transcription(ref_ann, est_ann)", "id": "f11236:m11"}
{"signature": "def coerce_annotation(ann, namespace):", "body": "ann = convert(ann, namespace)<EOL>ann.validate(strict=True)<EOL>return ann<EOL>", "docstring": "Validate that the annotation has the correct namespace,\n    and is well-formed.\n\n    If the annotation is not of the correct namespace, automatic conversion\n    is attempted.\n\n    Parameters\n    ----------\n    ann : jams.Annotation\n        The annotation object in question\n\n    namespace : str\n        The namespace pattern to match `ann` against\n\n    Returns\n    -------\n    ann_coerced: jams.Annotation\n        The annotation coerced to the target namespace\n\n    Raises\n    ------\n    NamespaceError\n        If `ann` does not match the proper namespace\n\n    SchemaError\n        If `ann` fails schema validation\n\n    See Also\n    --------\n    jams.nsconvert.convert", "id": "f11236:m0"}
{"signature": "def beat(ref, est, **kwargs):", "body": "namespace = '<STR_LIT>'<EOL>ref = coerce_annotation(ref, namespace)<EOL>est = coerce_annotation(est, namespace)<EOL>ref_times, _ = ref.to_event_values()<EOL>est_times, _ = est.to_event_values()<EOL>return mir_eval.beat.evaluate(ref_times, est_times, **kwargs)<EOL>", "docstring": "r'''Beat tracking evaluation\n\n    Parameters\n    ----------\n    ref : jams.Annotation\n        Reference annotation object\n    est : jams.Annotation\n        Estimated annotation object\n    kwargs\n        Additional keyword arguments\n\n    Returns\n    -------\n    scores : dict\n        Dictionary of scores, where the key is the metric name (str) and\n        the value is the (float) score achieved.\n\n    See Also\n    --------\n    mir_eval.beat.evaluate\n\n    Examples\n    --------\n    >>> # Load in the JAMS objects\n    >>> ref_jam = jams.load('reference.jams')\n    >>> est_jam = jams.load('estimated.jams')\n    >>> # Select the first relevant annotations\n    >>> ref_ann = ref_jam.search(namespace='beat')[0]\n    >>> est_ann = est_jam.search(namespace='beat')[0]\n    >>> scores = jams.eval.beat(ref_ann, est_ann)", "id": "f11236:m1"}
{"signature": "def hierarchy(ref, est, **kwargs):", "body": "namespace = '<STR_LIT>'<EOL>ref = coerce_annotation(ref, namespace)<EOL>est = coerce_annotation(est, namespace)<EOL>ref_hier, ref_hier_lab = hierarchy_flatten(ref)<EOL>est_hier, est_hier_lab = hierarchy_flatten(est)<EOL>return mir_eval.hierarchy.evaluate(ref_hier, ref_hier_lab,<EOL>est_hier, est_hier_lab,<EOL>**kwargs)<EOL>", "docstring": "r'''Multi-level segmentation evaluation\n\n    Parameters\n    ----------\n    ref : jams.Annotation\n        Reference annotation object\n    est : jams.Annotation\n        Estimated annotation object\n    kwargs\n        Additional keyword arguments\n\n    Returns\n    -------\n    scores : dict\n        Dictionary of scores, where the key is the metric name (str) and\n        the value is the (float) score achieved.\n\n    See Also\n    --------\n    mir_eval.hierarchy.evaluate\n\n    Examples\n    --------\n    >>> # Load in the JAMS objects\n    >>> ref_jam = jams.load('reference.jams')\n    >>> est_jam = jams.load('estimated.jams')\n    >>> # Select the first relevant annotations\n    >>> ref_ann = ref_jam.search(namespace='multi_segment')[0]\n    >>> est_ann = est_jam.search(namespace='multi_segment')[0]\n    >>> scores = jams.eval.hierarchy(ref_ann, est_ann)", "id": "f11236:m6"}
{"signature": "def tempo(ref, est, **kwargs):", "body": "ref = coerce_annotation(ref, '<STR_LIT>')<EOL>est = coerce_annotation(est, '<STR_LIT>')<EOL>ref_tempi = np.asarray([o.value for o in ref])<EOL>ref_weight = ref.data[<NUM_LIT:0>].confidence<EOL>est_tempi = np.asarray([o.value for o in est])<EOL>return mir_eval.tempo.evaluate(ref_tempi, ref_weight, est_tempi, **kwargs)<EOL>", "docstring": "r'''Tempo evaluation\n\n    Parameters\n    ----------\n    ref : jams.Annotation\n        Reference annotation object\n    est : jams.Annotation\n        Estimated annotation object\n    kwargs\n        Additional keyword arguments\n\n    Returns\n    -------\n    scores : dict\n        Dictionary of scores, where the key is the metric name (str) and\n        the value is the (float) score achieved.\n\n    See Also\n    --------\n    mir_eval.tempo.evaluate\n\n    Examples\n    --------\n    >>> # Load in the JAMS objects\n    >>> ref_jam = jams.load('reference.jams')\n    >>> est_jam = jams.load('estimated.jams')\n    >>> # Select the first relevant annotations\n    >>> ref_ann = ref_jam.search(namespace='tempo')[0]\n    >>> est_ann = est_jam.search(namespace='tempo')[0]\n    >>> scores = jams.eval.tempo(ref_ann, est_ann)", "id": "f11236:m7"}
{"signature": "@_conversion('<STR_LIT>', '<STR_LIT>')<EOL>def pitch_hz_to_midi(annotation):", "body": "annotation.namespace = '<STR_LIT>'<EOL>data = annotation.pop_data()<EOL>for obs in data:<EOL><INDENT>annotation.append(time=obs.time, duration=obs.duration,<EOL>confidence=obs.confidence,<EOL>value=<NUM_LIT:12> * (np.log2(obs.value) - np.log2(<NUM_LIT>)) + <NUM_LIT>)<EOL><DEDENT>return annotation<EOL>", "docstring": "Convert a pitch_hz annotation to pitch_midi", "id": "f11238:m8"}
{"signature": "@_conversion('<STR_LIT>', '<STR_LIT>')<EOL>def segment_to_open(annotation):", "body": "annotation.namespace = '<STR_LIT>'<EOL>return annotation<EOL>", "docstring": "Convert any segmentation to open label space", "id": "f11238:m9"}
{"signature": "@_conversion('<STR_LIT>', '<STR_LIT>')<EOL>def beat_position(annotation):", "body": "annotation.namespace = '<STR_LIT>'<EOL>data = annotation.pop_data()<EOL>for obs in data:<EOL><INDENT>annotation.append(time=obs.time, duration=obs.duration,<EOL>confidence=obs.confidence,<EOL>value=obs.value['<STR_LIT>'])<EOL><DEDENT>return annotation<EOL>", "docstring": "Convert beat_position to beat", "id": "f11238:m12"}
{"signature": "@_conversion('<STR_LIT>', '<STR_LIT>')<EOL>def pitch_hz_to_contour(annotation):", "body": "annotation.namespace = '<STR_LIT>'<EOL>data = annotation.pop_data()<EOL>for obs in data:<EOL><INDENT>annotation.append(time=obs.time, duration=obs.duration,<EOL>confidence=obs.confidence,<EOL>value=dict(index=<NUM_LIT:0>,<EOL>frequency=np.abs(obs.value),<EOL>voiced=obs.value > <NUM_LIT:0>))<EOL><DEDENT>return annotation<EOL>", "docstring": "Convert a pitch_hz annotation to a contour", "id": "f11238:m3"}
{"signature": "@_conversion('<STR_LIT>', '<STR_LIT>')<EOL>def note_hz_to_midi(annotation):", "body": "annotation.namespace = '<STR_LIT>'<EOL>data = annotation.pop_data()<EOL>for obs in data:<EOL><INDENT>annotation.append(time=obs.time, duration=obs.duration,<EOL>confidence=obs.confidence,<EOL>value=<NUM_LIT:12> * (np.log2(obs.value) - np.log2(<NUM_LIT>)) + <NUM_LIT>)<EOL><DEDENT>return annotation<EOL>", "docstring": "Convert a pitch_hz annotation to pitch_midi", "id": "f11238:m6"}
{"signature": "@_conversion('<STR_LIT>', '<STR_LIT>')<EOL>def chordh_to_chord(annotation):", "body": "annotation.namespace = '<STR_LIT>'<EOL>return annotation<EOL>", "docstring": "Convert Harte annotation to chord", "id": "f11238:m13"}
{"signature": "def _conversion(target, source):", "body": "def register(func):<EOL><INDENT>'''<STR_LIT>'''<EOL>__CONVERSION__[target][source] = func<EOL>return func<EOL><DEDENT>return register<EOL>", "docstring": "A decorator to register namespace conversions.\n\n    Usage\n    -----\n    >>> @conversion('tag_open', 'tag_.*')\n    ... def tag_to_open(annotation):\n    ...     annotation.namespace = 'tag_open'\n    ...     return annotation", "id": "f11238:m0"}
{"signature": "def append_records(self, records):", "body": "for obs in records:<EOL><INDENT>if isinstance(obs, Observation):<EOL><INDENT>self.append(**obs._asdict())<EOL><DEDENT>else:<EOL><INDENT>self.append(**obs)<EOL><DEDENT><DEDENT>", "docstring": "Add observations from row-major storage.\n\n        This is primarily useful for deserializing sparsely packed data.\n\n        Parameters\n        ----------\n        records : iterable of dicts or Observations\n            Each element of `records` corresponds to one observation.", "id": "f11239:c2:m3"}
{"signature": "def pop_data(self):", "body": "data = self.data<EOL>self.data = SortedKeyList(key=self._key)<EOL>return data<EOL>", "docstring": "Replace this observation's data with a fresh container.\n\n        Returns\n        -------\n        annotation_data : SortedKeyList\n            The original annotation data container", "id": "f11239:c2:m8"}
{"signature": "def append(self, time=None, duration=None, value=None, confidence=None):", "body": "self.data.add(Observation(time=float(time),<EOL>duration=float(duration),<EOL>value=value,<EOL>confidence=confidence))<EOL>", "docstring": "Append an observation to the data field\n\n        Parameters\n        ----------\n        time : float >= 0\n        duration : float >= 0\n            The time and duration of the new observation, in seconds\n        value\n        confidence\n            The value and confidence of the new observations.\n\n            Types and values should conform to the namespace of the\n            Annotation object.\n\n        Examples\n        --------\n        >>> ann = jams.Annotation(namespace='chord')\n        >>> ann.append(time=3, duration=2, value='E#')", "id": "f11239:c2:m2"}
{"signature": "def deprecated(version, version_removed):", "body": "def __wrapper(func, *args, **kwargs):<EOL><INDENT>'''<STR_LIT>'''<EOL>code = six.get_function_code(func)<EOL>warnings.warn_explicit(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>.format(func.__module__, func.__name__,<EOL>version, version_removed),<EOL>category=DeprecationWarning,<EOL>filename=code.co_filename,<EOL>lineno=code.co_firstlineno + <NUM_LIT:1><EOL>)<EOL>return func(*args, **kwargs)<EOL><DEDENT>return decorator(__wrapper)<EOL>", "docstring": "This is a decorator which can be used to mark functions\n    as deprecated.\n\n    It will result in a warning being emitted when the function is used.", "id": "f11239:m0"}
{"signature": "def slice(self, start_time, end_time, strict=False):", "body": "sliced_array = AnnotationArray()<EOL>for ann in self:<EOL><INDENT>sliced_array.append(ann.slice(start_time, end_time, strict=strict))<EOL><DEDENT>return sliced_array<EOL>", "docstring": "Slice every annotation contained in the annotation array using\n`Annotation.slice`\nand return as a new AnnotationArray\n\nSee `Annotation.slice` for details about slicing. This function does\nnot modify the annotations in the original annotation array.\n\nParameters\n----------\nstart_time : float\n    The desired start time for slicing in seconds.\nend_time\n    The desired end time for slicing in seconds. Must be greater than\n    ``start_time``.\nstrict : bool\n    When ``False`` (default) observations that lie at the boundaries of\n    the slicing range (see `Annotation.slice` for details) will have\n    their time and/or duration adjusted such that only the part of the\n    observation that lies within the trim range is kept. When ``True``\n    such observations are discarded and not included in the sliced\n    annotation.\n\nReturns\n-------\nsliced_array : AnnotationArray\n    An annotation array where every annotation has been sliced.", "id": "f11239:c6:m5"}
{"signature": "@property<EOL><INDENT>def type(self):<DEDENT>", "body": "return self.__class__.__name__<EOL>", "docstring": "The type (class name) of a derived JObject type", "id": "f11239:c0:m18"}
{"signature": "def __repr__(self):", "body": "indent = len(self.type) + <NUM_LIT:2><EOL>jstr = '<STR_LIT>' + '<STR_LIT:U+0020>' * indent<EOL>props = self._display_properties()<EOL>params = jstr.join('<STR_LIT>'.format(p, summary(self[p],<EOL>indent=indent))<EOL>for (p, dp) in props)<EOL>return '<STR_LIT>'.format(self.type, params)<EOL>", "docstring": "Render the object alongside its attributes.", "id": "f11239:c0:m10"}
{"signature": "@classmethod<EOL><INDENT>def __json_init__(cls, **kwargs):<DEDENT>", "body": "return cls(**kwargs)<EOL>", "docstring": "Initialize the object from a dictionary of values", "id": "f11239:c0:m3"}
{"signature": "def to_samples(self, times, confidence=False):", "body": "times = np.asarray(times)<EOL>if times.ndim != <NUM_LIT:1> or np.any(times < <NUM_LIT:0>):<EOL><INDENT>raise ParameterError('<STR_LIT>')<EOL><DEDENT>idx = np.argsort(times)<EOL>samples = times[idx]<EOL>values = [list() for _ in samples]<EOL>confidences = [list() for _ in samples]<EOL>for obs in self.data:<EOL><INDENT>start = np.searchsorted(samples, obs.time)<EOL>end = np.searchsorted(samples, obs.time + obs.duration, side='<STR_LIT:right>')<EOL>for i in range(start, end):<EOL><INDENT>values[idx[i]].append(obs.value)<EOL>confidences[idx[i]].append(obs.confidence)<EOL><DEDENT><DEDENT>if confidence:<EOL><INDENT>return values, confidences<EOL><DEDENT>else:<EOL><INDENT>return values<EOL><DEDENT>", "docstring": "Sample the annotation at specified times.\n\n        Parameters\n        ----------\n        times : np.ndarray, non-negative, ndim=1\n            The times (in seconds) to sample the annotation\n\n        confidence : bool\n            If `True`, return both values and confidences.\n            If `False` (default) only return values.\n\n        Returns\n        -------\n        values : list\n            `values[i]` is a list of observation values for intervals\n            that cover `times[i]`.\n\n        confidence : list (optional)\n            `confidence` values corresponding to `values`", "id": "f11239:c2:m12"}
{"signature": "@property<EOL><INDENT>def __json_light__(self):<DEDENT>", "body": "filtered_dict = dict()<EOL>for k, item in six.iteritems(self.__dict__):<EOL><INDENT>if k.startswith('<STR_LIT:_>') or k == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>if hasattr(item, '<STR_LIT>'):<EOL><INDENT>filtered_dict[k] = item.__json__<EOL><DEDENT>else:<EOL><INDENT>filtered_dict[k] = serialize_obj(item)<EOL><DEDENT><DEDENT>return filtered_dict<EOL>", "docstring": "r\"\"\"Return the JObject as a set of native data types for serialization.\n\n        Note: attributes beginning with underscores are suppressed.\n\n        This also skips the `annotations` field, which will be validated separately.", "id": "f11239:c7:m9"}
{"signature": "def query_pop(query, prefix, sep='<STR_LIT:.>'):", "body": "terms = query.split(sep)<EOL>if terms[<NUM_LIT:0>] == prefix:<EOL><INDENT>terms = terms[<NUM_LIT:1>:]<EOL><DEDENT>return sep.join(terms)<EOL>", "docstring": "Pop a prefix from a query string.\n\n\n    Parameters\n    ----------\n    query : str\n        The query string\n\n    prefix : str\n        The prefix string to pop, if it exists\n\n    sep : str\n        The string to separate fields\n\n    Returns\n    -------\n    popped : str\n        `query` with a `prefix` removed from the front (if found)\n        or `query` if the prefix was not found\n\n    Examples\n    --------\n    >>> query_pop('Annotation.namespace', 'Annotation')\n    'namespace'\n    >>> query_pop('namespace', 'Annotation')\n    'namespace'", "id": "f11239:m3"}
{"signature": "def search(self, **kwargs):", "body": "match = False<EOL>r_query = {}<EOL>myself = self.__class__.__name__<EOL>for k, value in six.iteritems(kwargs):<EOL><INDENT>k_pop = query_pop(k, myself)<EOL>if k_pop:<EOL><INDENT>r_query[k_pop] = value<EOL><DEDENT><DEDENT>if not r_query:<EOL><INDENT>return False<EOL><DEDENT>for key in r_query:<EOL><INDENT>if hasattr(self, key):<EOL><INDENT>match |= match_query(getattr(self, key), r_query[key])<EOL><DEDENT><DEDENT>if not match:<EOL><INDENT>for attr in dir(self):<EOL><INDENT>obj = getattr(self, attr)<EOL>if isinstance(obj, JObject):<EOL><INDENT>match |= obj.search(**r_query)<EOL><DEDENT><DEDENT><DEDENT>return match<EOL>", "docstring": "Query this object (and its descendants).\n\n        Parameters\n        ----------\n        kwargs\n            Each `(key, value)` pair encodes a search field in `key`\n            and a target value in `value`.\n\n            `key` must be a string, and should correspond to a property in\n            the JAMS object hierarchy, e.g., 'Annotation.namespace` or `email`\n\n            `value` must be either an object (tested for equality), a\n            string describing a search pattern (regular expression), or a\n            lambda function which evaluates to `True` if the candidate\n            object matches the search criteria and `False` otherwise.\n\n        Returns\n        -------\n        match : bool\n            `True` if any of the search keys match the specified value,\n            `False` otherwise, or if the search keys do not exist\n            within the object.\n\n        Examples\n        --------\n        >>> J = jams.JObject(foo=5, needle='quick brown fox')\n        >>> J.search(needle='.*brown.*')\n        True\n        >>> J.search(needle='.*orange.*')\n        False\n        >>> J.search(badger='.*brown.*')\n        False\n        >>> J.search(foo=5)\n        True\n        >>> J.search(foo=10)\n        False\n        >>> J.search(foo=lambda x: x < 10)\n        True\n        >>> J.search(foo=lambda x: x > 10)\n        False", "id": "f11239:c0:m20"}
{"signature": "@classmethod<EOL><INDENT>def _key(cls, obs):<DEDENT>", "body": "if not isinstance(obs, Observation):<EOL><INDENT>raise JamsError('<STR_LIT>'.format(obs))<EOL><DEDENT>return obs.time<EOL>", "docstring": "Provides sorting index for Observation objects", "id": "f11239:c2:m20"}
{"signature": "def to_dataframe(self):", "body": "return pd.DataFrame.from_records(list(self.data),<EOL>columns=['<STR_LIT:time>', '<STR_LIT>',<EOL>'<STR_LIT:value>', '<STR_LIT>'])<EOL>", "docstring": "Convert this annotation to a pandas dataframe.\n\n        Returns\n        -------\n        df : pd.DataFrame\n            Columns are `time, duration, value, confidence`.\n            Each row is an observation, and rows are sorted by\n            ascending `time`.", "id": "f11239:c2:m11"}
{"signature": "def __getitem__(self, key):", "body": "return self.__dict__[key]<EOL>", "docstring": "Dict-style interface", "id": "f11239:c0:m6"}
{"signature": "def to_html(self, max_rows=None):", "body": "n = len(self.data)<EOL>div_id = _get_divid(self)<EOL>out = r'''<STR_LIT>'''.format(div_id, self.namespace, n)<EOL>out += r'''<STR_LIT>'''.format(div_id)<EOL>out += r'''<STR_LIT>'''.format(self.annotation_metadata._repr_html_())<EOL>out += r'''<STR_LIT>'''.format(self.sandbox._repr_html_())<EOL>out += r'''<STR_LIT>'''.format(self.namespace, n)<EOL>out += r'''<STR_LIT>'''<EOL>if max_rows is None or n <= max_rows:<EOL><INDENT>out += self._fmt_rows(<NUM_LIT:0>, n)<EOL><DEDENT>else:<EOL><INDENT>out += self._fmt_rows(<NUM_LIT:0>, max_rows//<NUM_LIT:2>)<EOL>out += r'''<STR_LIT>'''<EOL>out += self._fmt_rows(n-max_rows//<NUM_LIT:2>, n)<EOL><DEDENT>out += r'''<STR_LIT>'''<EOL>out += r'''<STR_LIT>'''<EOL>out += r'''<STR_LIT>'''<EOL>return out<EOL>", "docstring": "Render this annotation list in HTML\n\n        Returns\n        -------\n        rendered : str\n            An HTML table containing this annotation's data.", "id": "f11239:c2:m14"}
{"signature": "def slice(self, start_time, end_time, strict=False):", "body": "<EOL>if self.file_metadata.duration is None:<EOL><INDENT>raise JamsError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if (start_time < <NUM_LIT:0> or<EOL>start_time > float(self.file_metadata.duration) or<EOL>end_time < start_time or<EOL>end_time > float(self.file_metadata.duration)):<EOL><INDENT>raise ParameterError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(float(self.file_metadata.duration)))<EOL><DEDENT>jam_sliced = JAMS(annotations=None,<EOL>file_metadata=self.file_metadata,<EOL>sandbox=self.sandbox)<EOL>jam_sliced.annotations = self.annotations.slice(<EOL>start_time, end_time, strict=strict)<EOL>jam_sliced.file_metadata.duration = end_time - start_time<EOL>if '<STR_LIT>' not in jam_sliced.sandbox.keys():<EOL><INDENT>jam_sliced.sandbox.update(<EOL>slice=[{'<STR_LIT>': start_time, '<STR_LIT>': end_time}])<EOL><DEDENT>else:<EOL><INDENT>jam_sliced.sandbox.slice.append(<EOL>{'<STR_LIT>': start_time, '<STR_LIT>': end_time})<EOL><DEDENT>return jam_sliced<EOL>", "docstring": "Slice all the annotations inside the jam and return as a new `JAMS`\nobject.\n\nSee `Annotation.slice` for details about how the annotations\nare sliced.\n\nThis operation is also documented in the jam-level sandbox\nwith a list keyed by ``JAMS.sandbox.slice`` containing a tuple for each\njam-level slice of the form ``(start_time, end_time)``.\n\nSince slicing is implemented using trimming, the operation will also be\ndocumented in ``JAMS.sandbox.trim`` as described in `JAMS.trim`.\n\nThis function also copies over all of the file metadata from the\noriginal jam.\n\nNote: slicing will affect the duration of the jam, i.e. the new value\nof ``JAMS.file_metadata.duration`` will be ``end_time - start_time``.\n\nParameters\n----------\nstart_time : float\n    The desired start time for slicing in seconds.\nend_time\n    The desired end time for slicing in seconds. Must be greater than\n    ``start_time``.\nstrict : bool\n    When ``False`` (default) observations that lie at the boundaries of\n    the slicing range (see `Annotation.slice` for details), will have\n    their time and/or duration adjusted such that only the part of the\n    observation that lies within the slice range is kept. When ``True``\n    such observations are discarded and not included in the sliced\n    annotation.\n\nReturns\n-------\njam_sliced: JAMS\n    The sliced jam with sliced annotations, returned as a new\n    JAMS object.", "id": "f11239:c7:m8"}
{"signature": "@property<EOL><INDENT>def __json_data__(self):<DEDENT>", "body": "if schema.is_dense(self.namespace):<EOL><INDENT>dense_records = dict()<EOL>for field in Observation._fields:<EOL><INDENT>dense_records[field] = []<EOL><DEDENT>for obs in self.data:<EOL><INDENT>for key, val in six.iteritems(obs._asdict()):<EOL><INDENT>dense_records[key].append(serialize_obj(val))<EOL><DEDENT><DEDENT>return dense_records<EOL><DEDENT>else:<EOL><INDENT>return [serialize_obj(_) for _ in self.data]<EOL><DEDENT>", "docstring": "r\"\"\"JSON-serialize the observation sequence.", "id": "f11239:c2:m19"}
{"signature": "def __init__(self, annotations=None):", "body": "super(AnnotationArray, self).__init__()<EOL>if annotations is None:<EOL><INDENT>annotations = list()<EOL><DEDENT>self.extend([Annotation(**obj) for obj in annotations])<EOL>", "docstring": "Create an AnnotationArray.\n\n        Parameters\n        ----------\n        annotations: list\n            List of Annotations, or appropriately formated dicts\n            is consistent with Annotation.", "id": "f11239:c6:m0"}
{"signature": "def __init__(self, curator=None, version='<STR_LIT>', corpus='<STR_LIT>', annotator=None,<EOL>annotation_tools='<STR_LIT>', annotation_rules='<STR_LIT>', validation='<STR_LIT>',<EOL>data_source='<STR_LIT>'):", "body": "super(AnnotationMetadata, self).__init__()<EOL>if curator is None:<EOL><INDENT>curator = Curator()<EOL><DEDENT>if annotator is None:<EOL><INDENT>annotator = JObject()<EOL><DEDENT>self.curator = Curator(**curator)<EOL>self.annotator = JObject(**annotator)<EOL>self.version = version<EOL>self.corpus = corpus<EOL>self.annotation_tools = annotation_tools<EOL>self.annotation_rules = annotation_rules<EOL>self.validation = validation<EOL>self.data_source = data_source<EOL>", "docstring": "Create an AnnotationMetadata object.\n\n        Parameters\n        ----------\n        curator: Curator, default=None\n            Object documenting a name and email address for the person of\n            correspondence.\n\n        version: string, default=''\n            Version of this annotation.\n\n        annotator: dict, default=None\n            Sandbox for information about the specific annotator, such as\n            musical experience, skill level, principal instrument, etc.\n\n        corpus: str, default=''\n            Collection assignment.\n\n        annotation_tools: str, default=''\n            Description of the tools used to create the annotation.\n\n        annotation_rules: str, default=''\n            Description of the rules provided to the annotator.\n\n        validation: str, default=''\n            Methods for validating the integrity of the data.\n\n        data_source: str, default=''\n            Description of where the data originated, e.g. 'Manual Annotation'.", "id": "f11239:c4:m0"}
{"signature": "def update(self, **kwargs):", "body": "for name, value in six.iteritems(kwargs):<EOL><INDENT>setattr(self, name, value)<EOL><DEDENT>", "docstring": "Update the attributes of a JObject.\n\n        Parameters\n        ----------\n        kwargs\n            Keyword arguments of the form `attribute=new_value`\n\n        Examples\n        --------\n        >>> J = jams.JObject(foo=5)\n        >>> J.dumps()\n        '{\"foo\": 5}'\n        >>> J.update(bar='baz')\n        >>> J.dumps()\n        '{\"foo\": 5, \"bar\": \"baz\"}'", "id": "f11239:c0:m17"}
{"signature": "def summary(obj, indent=<NUM_LIT:0>):", "body": "if hasattr(obj, '<STR_LIT>'):<EOL><INDENT>rep = obj.__summary__()<EOL><DEDENT>elif isinstance(obj, SortedKeyList):<EOL><INDENT>rep = '<STR_LIT>'.format(len(obj))<EOL><DEDENT>else:<EOL><INDENT>rep = repr(obj)<EOL><DEDENT>return rep.replace('<STR_LIT:\\n>', '<STR_LIT:\\n>' + '<STR_LIT:U+0020>' * indent)<EOL>", "docstring": "Helper function to format repr strings for JObjects and friends.\n\n    Parameters\n    ----------\n    obj\n        The object to repr\n\n    indent : int >= 0\n        indent each new line by `indent` spaces\n\n    Returns\n    -------\n    r : str\n        If `obj` has a `__summary__` method, it is used.\n\n        If `obj` is a `SortedKeyList`, then it returns a description\n        of the length of the list.\n\n        Otherwise, `repr(obj)`.", "id": "f11239:m6"}
{"signature": "def __init__(self, title='<STR_LIT>', artist='<STR_LIT>', release='<STR_LIT>', duration=None,<EOL>identifiers=None, jams_version=None):", "body": "super(FileMetadata, self).__init__()<EOL>if jams_version is None:<EOL><INDENT>jams_version = __VERSION__<EOL><DEDENT>if identifiers is None:<EOL><INDENT>identifiers = Sandbox()<EOL><DEDENT>self.title = title<EOL>self.artist = artist<EOL>self.release = release<EOL>self.duration = duration<EOL>self.identifiers = Sandbox(**identifiers)<EOL>self.jams_version = jams_version<EOL>", "docstring": "Create a file-level Metadata object.\n\n        Parameters\n        ----------\n        title: str\n            Name of the recording.\n\n        artist: str\n            Name of the artist / musician.\n\n        release: str\n            Name of the release\n\n        duration: number >= 0\n            Time duration of the file, in seconds.\n\n        identifiers : jams.Sandbox\n            Sandbox of identifier keys (eg, musicbrainz ids)\n\n        jams_version: str\n            Version of the JAMS Schema.", "id": "f11239:c5:m0"}
{"signature": "def append_columns(self, columns):", "body": "self.append_records([dict(time=t, duration=d, value=v, confidence=c)<EOL>for (t, d, v, c)<EOL>in six.moves.zip(columns['<STR_LIT:time>'],<EOL>columns['<STR_LIT>'],<EOL>columns['<STR_LIT:value>'],<EOL>columns['<STR_LIT>'])])<EOL>", "docstring": "Add observations from column-major storage.\n\n        This is primarily used for deserializing densely packed data.\n\n        Parameters\n        ----------\n        columns : dict of lists\n            Keys must be `time, duration, value, confidence`,\n            and each much be a list of equal length.", "id": "f11239:c2:m4"}
{"signature": "def __init__(self, **kwargs):", "body": "super(JObject, self).__init__()<EOL>for name, value in six.iteritems(kwargs):<EOL><INDENT>setattr(self, name, value)<EOL><DEDENT>", "docstring": "Construct a new JObject\n\n        Parameters\n        ----------\n        kwargs\n            Each keyword argument becomes an attribute with the specified value\n\n        Examples\n        --------\n        >>> J = jams.JObject(foo=5)\n        >>> J.foo\n        5\n        >>> dict(J)\n        {'foo': 5}", "id": "f11239:c0:m0"}
{"signature": "def process_arguments(args):", "body": "parser = argparse.ArgumentParser(description='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>',<EOL>action='<STR_LIT:store>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>',<EOL>action='<STR_LIT:store>',<EOL>nargs='<STR_LIT:+>',<EOL>help='<STR_LIT>')<EOL>return vars(parser.parse_args(args))<EOL>", "docstring": "Argument parser", "id": "f11240:m0"}
{"signature": "def validate(schema_file=None, jams_files=None):", "body": "schema = load_json(schema_file)<EOL>for jams_file in jams_files:<EOL><INDENT>try:<EOL><INDENT>jams = load_json(jams_file)<EOL>jsonschema.validate(jams, schema)<EOL>print('<STR_LIT>'.format(jams_file))<EOL><DEDENT>except jsonschema.ValidationError as exc:<EOL><INDENT>print('<STR_LIT>'.format(jams_file))<EOL>print(exc)<EOL><DEDENT><DEDENT>", "docstring": "Validate a jams file against a schema", "id": "f11240:m2"}
{"signature": "def load_json(filename):", "body": "with open(filename, '<STR_LIT:r>') as fdesc:<EOL><INDENT>return json.load(fdesc)<EOL><DEDENT>", "docstring": "Load a json file", "id": "f11240:m1"}
{"signature": "def mkclick(freq, sr=<NUM_LIT>, duration=<NUM_LIT:0.1>):", "body": "times = np.arange(int(sr * duration))<EOL>click = np.sin(<NUM_LIT:2> * np.pi * times * freq / float(sr))<EOL>click *= np.exp(- times / (<NUM_LIT> * sr))<EOL>return click<EOL>", "docstring": "Generate a click sample.\n\n    This replicates functionality from mir_eval.sonify.clicks,\n    but exposes the target frequency and duration.", "id": "f11242:m0"}
{"signature": "def piano_roll(annotation, sr=<NUM_LIT>, length=None, **kwargs):", "body": "intervals, pitches = annotation.to_interval_values()<EOL>pitch_map = {f: idx for idx, f in enumerate(np.unique(pitches))}<EOL>gram = np.zeros((len(pitch_map), len(intervals)))<EOL>for col, f in enumerate(pitches):<EOL><INDENT>gram[pitch_map[f], col] = <NUM_LIT:1><EOL><DEDENT>return filter_kwargs(mir_eval.sonify.time_frequency,<EOL>gram, pitches, intervals,<EOL>sr, length=length, **kwargs)<EOL>", "docstring": "Sonify a piano-roll\n\n    This uses mir_eval.sonify.time_frequency, and is appropriate\n    for sparse transcription data, e.g., annotations in the `note_midi`\n    namespace.", "id": "f11242:m6"}
{"signature": "def sonify(annotation, sr=<NUM_LIT>, duration=None, **kwargs):", "body": "length = None<EOL>if duration is None:<EOL><INDENT>duration = annotation.duration<EOL><DEDENT>if duration is not None:<EOL><INDENT>length = int(duration * sr)<EOL><DEDENT>if annotation.namespace in SONIFY_MAPPING:<EOL><INDENT>ann = coerce_annotation(annotation, annotation.namespace)<EOL>return SONIFY_MAPPING[annotation.namespace](ann,<EOL>sr=sr,<EOL>length=length,<EOL>**kwargs)<EOL><DEDENT>for namespace, func in six.iteritems(SONIFY_MAPPING):<EOL><INDENT>try:<EOL><INDENT>ann = coerce_annotation(annotation, namespace)<EOL>return func(ann, sr=sr, length=length, **kwargs)<EOL><DEDENT>except NamespaceError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>raise NamespaceError('<STR_LIT>'<EOL>.format(annotation.namespace))<EOL>", "docstring": "Sonify a jams annotation through mir_eval\n\n    Parameters\n    ----------\n    annotation : jams.Annotation\n        The annotation to sonify\n\n    sr = : positive number\n        The sampling rate of the output waveform\n\n    duration : float (optional)\n        Optional length (in seconds) of the output waveform\n\n    kwargs\n        Additional keyword arguments to mir_eval.sonify functions\n\n    Returns\n    -------\n    y_sonified : np.ndarray\n        The waveform of the sonified annotation\n\n    Raises\n    ------\n    NamespaceError\n        If the annotation has an un-sonifiable namespace", "id": "f11242:m7"}
{"signature": "def clicks(annotation, sr=<NUM_LIT>, length=None, **kwargs):", "body": "interval, _ = annotation.to_interval_values()<EOL>return filter_kwargs(mir_eval.sonify.clicks, interval[:, <NUM_LIT:0>],<EOL>fs=sr, length=length, **kwargs)<EOL>", "docstring": "Sonify events with clicks.\n\n    This uses mir_eval.sonify.clicks, and is appropriate for instantaneous\n    events such as beats or segment boundaries.", "id": "f11242:m1"}
{"signature": "def chord(annotation, sr=<NUM_LIT>, length=None, **kwargs):", "body": "intervals, chords = annotation.to_interval_values()<EOL>return filter_kwargs(mir_eval.sonify.chords,<EOL>chords, intervals,<EOL>fs=sr, length=length,<EOL>**kwargs)<EOL>", "docstring": "Sonify chords\n\n    This uses mir_eval.sonify.chords.", "id": "f11242:m4"}
{"signature": "def parse_arguments(args):", "body": "parser = argparse.ArgumentParser(description='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT:-c>',<EOL>'<STR_LIT>',<EOL>dest='<STR_LIT>',<EOL>action='<STR_LIT:store_true>',<EOL>default=False,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', dest='<STR_LIT>', type=str, default='<STR_LIT:#>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>dest='<STR_LIT>',<EOL>nargs='<STR_LIT:+>',<EOL>default=['<STR_LIT>'],<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>')<EOL>return vars(parser.parse_args(args))<EOL>", "docstring": "Parse arguments from the command line", "id": "f11244:m4"}
{"signature": "def get_comments(jam, ann):", "body": "jam_comments = jam.file_metadata.__json__<EOL>ann_comments = ann.annotation_metadata.__json__<EOL>return json.dumps({'<STR_LIT>': jam_comments,<EOL>'<STR_LIT>': ann_comments},<EOL>indent=<NUM_LIT:2>)<EOL>", "docstring": "Get the metadata from a jam and an annotation, combined as a string.\n\n    Parameters\n    ----------\n    jam : JAMS\n        The jams object\n\n    ann : Annotation\n        An annotation object\n\n    Returns\n    -------\n    comments : str\n        The jam.file_metadata and ann.annotation_metadata, combined and serialized", "id": "f11244:m1"}
{"signature": "def convert_jams(jams_file, output_prefix, csv=False, comment_char='<STR_LIT:#>', namespaces=None):", "body": "if namespaces is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>jam = jams.load(jams_file)<EOL>counter = collections.Counter()<EOL>annotations = []<EOL>for query in namespaces:<EOL><INDENT>annotations.extend(jam.search(namespace=query))<EOL><DEDENT>if csv:<EOL><INDENT>suffix = '<STR_LIT>'<EOL>sep = '<STR_LIT:U+002C>'<EOL><DEDENT>else:<EOL><INDENT>suffix = '<STR_LIT>'<EOL>sep = '<STR_LIT:\\t>'<EOL><DEDENT>for ann in annotations:<EOL><INDENT>index = counter[ann.namespace]<EOL>counter[ann.namespace] += <NUM_LIT:1><EOL>filename = os.path.extsep.join([get_output_name(output_prefix,<EOL>ann.namespace,<EOL>index),<EOL>suffix])<EOL>comment = get_comments(jam, ann)<EOL>lab_dump(ann, comment, filename, sep, comment_char)<EOL><DEDENT>", "docstring": "Convert jams to labs.\n\n    Parameters\n    ----------\n    jams_file : str\n        The path on disk to the jams file in question\n\n    output_prefix : str\n        The file path prefix of the outputs\n\n    csv : bool\n        Whether to output in csv (True) or lab (False) format\n\n    comment_char : str\n        The character used to denote comments\n\n    namespaces : list-like\n        The set of namespace patterns to match for output", "id": "f11244:m3"}
{"signature": "def get_output_name(output_prefix, namespace, index):", "body": "return '<STR_LIT>'.format(output_prefix, namespace, index)<EOL>", "docstring": "Get the output name (prefix)\n\n    Parameters\n    ----------\n    output_prefix : str\n        The path prefix of the target filename\n\n    namespace : str\n        The namespace of the annotation in question\n\n    index : int\n        The index number of this annotation within the namespace\n\n    Returns\n    -------\n    output_name : str\n        \"output_prefix__namespace__index\"", "id": "f11244:m0"}
{"signature": "def pprint(arr, columns=('<STR_LIT>', '<STR_LIT>'),<EOL>names=('<STR_LIT>', '<STR_LIT>'),<EOL>max_rows=<NUM_LIT:32>, precision=<NUM_LIT:2>):", "body": "if max_rows is True:<EOL><INDENT>pd.set_option('<STR_LIT>', <NUM_LIT:1000>)<EOL><DEDENT>elif type(max_rows) is int:<EOL><INDENT>pd.set_option('<STR_LIT>', max_rows)<EOL><DEDENT>pd.set_option('<STR_LIT>', precision)<EOL>df = pd.DataFrame(arr.flatten(), index=arr['<STR_LIT:id>'].flatten(),<EOL>columns=columns)<EOL>df.columns = names<EOL>return df.style.format({names[<NUM_LIT:0>]: '<STR_LIT>',<EOL>names[<NUM_LIT:1>]: '<STR_LIT>'})<EOL>", "docstring": "Create a pandas DataFrame from a numpy ndarray.\n\nBy default use temp and lum with max rows of 32 and precision of 2.\n\narr - An numpy.ndarray.\ncolumns - The columns to include in the pandas DataFrame. Defaults to\n          temperature and luminosity.\nnames - The column names for the pandas DataFrame. Defaults to\n        Temperature and Luminosity.\nmax_rows - If max_rows is an integer then set the pandas\n           display.max_rows option to that value. If max_rows\n           is True then set display.max_rows option  to 1000.\nprecision - An integer to set the pandas precision option.", "id": "f11253:m1"}
{"signature": "def __init__(self, query=None, table=None):", "body": "if table is None:<EOL><INDENT>if query:<EOL><INDENT>super().__init__(query=query)<EOL><DEDENT>else:<EOL><INDENT>super().__init__(self.query)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>super().__init__(table=table)<EOL><DEDENT>", "docstring": "Initialize the data using the default query for Berkeley 20,\nor a provided query xor table.", "id": "f11253:c4:m0"}
{"signature": "def setup_notebook(debug=False):", "body": "output_notebook(INLINE, hide_banner=True)<EOL>if debug:<EOL><INDENT>_setup_logging(logging.DEBUG)<EOL>logging.debug('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>_setup_logging(logging.WARNING)<EOL><DEDENT>if '<STR_LIT>' not in os.environ:<EOL><INDENT>global jupyter_proxy_url<EOL>jupyter_proxy_url = '<STR_LIT>'<EOL>logging.info('<STR_LIT>')<EOL><DEDENT>", "docstring": "Called at the start of notebook execution to setup the environment.\n\n    This will configure bokeh, and setup the logging library to be\n    reasonable.", "id": "f11260:m2"}
{"signature": "def hr_diagram_figure(cluster):", "body": "temps, lums = round_teff_luminosity(cluster)<EOL>x, y = temps, lums<EOL>colors, color_mapper = hr_diagram_color_helper(temps)<EOL>x_range = [max(x) + max(x) * <NUM_LIT>, min(x) - min(x) * <NUM_LIT>]<EOL>source = ColumnDataSource(data=dict(x=x, y=y, color=colors))<EOL>pf = figure(y_axis_type='<STR_LIT>', x_range=x_range, name='<STR_LIT>',<EOL>tools='<STR_LIT>',<EOL>title='<STR_LIT>'.format(cluster.name))<EOL>pf.select(BoxSelectTool).select_every_mousemove = False<EOL>pf.select(LassoSelectTool).select_every_mousemove = False<EOL>hover = pf.select(HoverTool)[<NUM_LIT:0>]<EOL>hover.tooltips = [(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>(\"<STR_LIT>\", \"<STR_LIT>\")]<EOL>_diagram(source=source, plot_figure=pf, name='<STR_LIT>',<EOL>color={'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': color_mapper},<EOL>xaxis_label='<STR_LIT>',<EOL>yaxis_label='<STR_LIT>')<EOL>return pf<EOL>", "docstring": "Given a cluster create a Bokeh plot figure creating an\nH-R diagram.", "id": "f11263:m9"}
{"signature": "def hr_diagram_selection(cluster_name):", "body": "cluster = get_hr_data(cluster_name)<EOL>temps, lums = round_teff_luminosity(cluster)<EOL>x, y = temps, lums<EOL>colors, color_mapper = hr_diagram_color_helper(temps)<EOL>x_range = [max(x) + max(x) * <NUM_LIT>, min(x) - min(x) * <NUM_LIT>]<EOL>source = ColumnDataSource(data=dict(x=x, y=y, color=colors), name='<STR_LIT>')<EOL>source_selected = ColumnDataSource(data=dict(x=[], y=[], color=[]),<EOL>name='<STR_LIT>')<EOL>pf = figure(y_axis_type='<STR_LIT>', x_range=x_range,<EOL>tools='<STR_LIT>',<EOL>title='<STR_LIT>'.format(cluster.name))<EOL>_diagram(source=source, plot_figure=pf, name='<STR_LIT>', color={'<STR_LIT>':<EOL>'<STR_LIT>', '<STR_LIT>': color_mapper},<EOL>xaxis_label='<STR_LIT>',<EOL>yaxis_label='<STR_LIT>')<EOL>pf_selected = figure(y_axis_type='<STR_LIT>', y_range=pf.y_range,<EOL>x_range=x_range,<EOL>tools='<STR_LIT>',<EOL>title='<STR_LIT>'.format(cluster.name))<EOL>_diagram(source=source_selected, plot_figure=pf_selected, name='<STR_LIT>',<EOL>color={'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': color_mapper},<EOL>xaxis_label='<STR_LIT>',<EOL>yaxis_label='<STR_LIT>')<EOL>source.callback = CustomJS(args=dict(source_selected=source_selected),<EOL>code=\"\"\"<STR_LIT>\"\"\")<EOL>show_with_bokeh_server(row(pf, pf_selected))<EOL>", "docstring": "Given a cluster create two Bokeh plot based H-R diagrams.\nThe Selection in the left H-R diagram will show up on the\nright one.", "id": "f11263:m18"}
{"signature": "def hr_diagram(cluster_name, output=None):", "body": "cluster = get_hr_data(cluster_name)<EOL>pf = hr_diagram_figure(cluster)<EOL>show_with_bokeh_server(pf)<EOL>", "docstring": "Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R\n    diagram using the cluster_name; then show it.\n\n    Re", "id": "f11263:m6"}
{"signature": "def round_teff_luminosity(cluster):", "body": "temps = [round(t, -<NUM_LIT:1>) for t in teff(cluster)]<EOL>lums = [round(l, <NUM_LIT:3>) for l in luminosity(cluster)]<EOL>return temps, lums<EOL>", "docstring": "Returns rounded teff and luminosity lists.", "id": "f11263:m8"}
{"signature": "def cc_diagram(cluster_name):", "body": "x, y = get_hr_data(cluster_name)<EOL>y_range = [max(y) + <NUM_LIT:0.5>, min(y) - <NUM_LIT>]<EOL>pf = figure(y_range=y_range, title=cluster_name)<EOL>_diagram(x, y, pf)<EOL>show_with_bokeh_server(pf)<EOL>", "docstring": "Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R\n    diagram using the cluster_name; then show it.", "id": "f11263:m2"}
{"signature": "def color(teffs):", "body": "colors = []<EOL>for t in teffs:<EOL><INDENT>if t >= <NUM_LIT>:<EOL><INDENT>colors.append('<STR_LIT>')  <EOL><DEDENT>elif t >= <NUM_LIT>:<EOL><INDENT>colors.append('<STR_LIT>')  <EOL><DEDENT>elif t >= <NUM_LIT>:<EOL><INDENT>colors.append('<STR_LIT>')  <EOL><DEDENT>elif t >= <NUM_LIT>:<EOL><INDENT>colors.append('<STR_LIT>')  <EOL><DEDENT>else:<EOL><INDENT>colors.append('<STR_LIT>')  <EOL><DEDENT><DEDENT>return colors<EOL>", "docstring": "Conventional color descriptions of stars.\nSource: https://en.wikipedia.org/wiki/Stellar_classification", "id": "f11264:m5"}
{"signature": "def round_arr_teff_luminosity(arr):", "body": "arr['<STR_LIT>'] = np.around(arr['<STR_LIT>'], -<NUM_LIT:1>)<EOL>arr['<STR_LIT>'] = np.around(arr['<STR_LIT>'], <NUM_LIT:3>)<EOL>return arr<EOL>", "docstring": "Return the numpy array with rounded teff and luminosity columns.", "id": "f11264:m7"}
{"signature": "def teff(cluster):", "body": "b_vs, _ = cluster.stars()<EOL>teffs = []<EOL>for b_v in b_vs:<EOL><INDENT>b_v -= cluster.eb_v<EOL>if b_v > -<NUM_LIT>:<EOL><INDENT>x = (<NUM_LIT> - b_v) / <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>x = (<NUM_LIT> - math.sqrt(<NUM_LIT> + <NUM_LIT> * b_v)) / <NUM_LIT><EOL><DEDENT>teffs.append(math.pow(<NUM_LIT:10>, x))<EOL><DEDENT>return teffs<EOL>", "docstring": "Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use\n[Fe/H] of the cluster, if available.\n\nReturns a list of Teff values.", "id": "f11264:m2"}
{"signature": "def table(cluster):", "body": "teffs = teff(cluster)<EOL>lums = luminosity(cluster)<EOL>arr = cluster.to_array()<EOL>i = <NUM_LIT:0><EOL>for row in arr:<EOL><INDENT>row['<STR_LIT>'][<NUM_LIT:0>] = np.array([lums[i]], dtype='<STR_LIT:f>')<EOL>row['<STR_LIT>'][<NUM_LIT:0>] = np.array([teffs[i]], dtype='<STR_LIT:f>')<EOL>i += <NUM_LIT:1><EOL><DEDENT>arr = round_arr_teff_luminosity(arr)<EOL>return arr<EOL>", "docstring": "Create a numpy.ndarray with all observed fields and\ncomputed teff and luminosity values.", "id": "f11264:m6"}
{"signature": "def get_order_book(self):", "body": "return self.book<EOL>", "docstring": "Returns the Real-Time Order Book.\n\n        :returns: the Real-Time Order Book\n        :rtype: dict", "id": "f11266:c0:m14"}
{"signature": "def resume(self):", "body": "self.pause = False<EOL>", "docstring": "Resumes real-time updates.", "id": "f11266:c0:m13"}
{"signature": "def _post(self, *args, **kwargs):", "body": "return self._request('<STR_LIT>', *args, **kwargs)<EOL>", "docstring": "Performs HTTP POST requests.\n\n        :param args: arguments\n        :param kwargs: argument keywords\n        :returns: requested data\n        :raises APIError: for non-2xx responses", "id": "f11267:c1:m6"}
{"signature": "def _handle_response(self, response):", "body": "if not str(response.status_code).startswith('<STR_LIT:2>'):<EOL><INDENT>raise get_api_error(response)<EOL><DEDENT>return response<EOL>", "docstring": "Returns the given response or raises an APIError for non-2xx responses.\n\n        :param requests.Response response: HTTP response\n        :returns: requested data\n        :rtype: requests.Response\n        :raises APIError: for non-2xx responses", "id": "f11267:c1:m3"}
{"signature": "def _format_iso_time(self, time):", "body": "if isinstance(time, str):<EOL><INDENT>return time<EOL><DEDENT>elif isinstance(time, datetime):<EOL><INDENT>return time.strftime('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Makes sure we have proper ISO 8601 time.\n\n        :param time: either already ISO 8601 a string or datetime.datetime\n        :returns: ISO 8601 time\n        :rtype: str", "id": "f11267:c1:m2"}
{"signature": "def is_before(self):", "body": "return self.before<EOL>", "docstring": "Returns True if the direction is set to before.\n\n        :returns: True if the direction is set to before, otherwise False\n        :rtype: bool", "id": "f11267:c2:m5"}
{"signature": "def _get(self, *args, **kwargs):", "body": "return self._request('<STR_LIT>', *args, **kwargs)<EOL>", "docstring": "Performs HTTP GET requests.\n\n        :param args: arguments\n        :param kwargs: argument keywords\n        :returns: requested data\n        :raises APIError: for non-2xx responses", "id": "f11267:c1:m5"}
{"signature": "def get_after_cursor(self):", "body": "return self.after_cursor<EOL>", "docstring": "Acquires the after cursor.\n\n        :returns: the after cursor", "id": "f11267:c2:m11"}
{"signature": "def __next__(self):", "body": "next = self.endpoint()<EOL>if next is not None:<EOL><INDENT>return next<EOL><DEDENT>raise StopIteration<EOL>", "docstring": "Iterator function for Python 3.\n\n        :returns: the next message in the sequence\n        :raises StopIteration: if there are no more messages", "id": "f11267:c2:m2"}
{"signature": "def set_before(self):", "body": "self.before = True<EOL>", "docstring": "Sets the direction to before.", "id": "f11267:c2:m7"}
{"signature": "def set_after(self):", "body": "self.before = False<EOL>", "docstring": "Sets the direction to after.", "id": "f11267:c2:m8"}
{"signature": "def _request(self, method, *relative_path_parts, **kwargs):", "body": "raise NotImplementedError<EOL>", "docstring": "Abstract method - must be overriden.", "id": "f11267:c1:m4"}
{"signature": "def withdraw(self, amount, coinbase_account_id):", "body": "return self._deposit_withdraw('<STR_LIT>', amount, coinbase_account_id)<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#depositwithdraw>`_", "id": "f11268:c1:m16"}
{"signature": "def new_accounts_report(self,<EOL>start_date,<EOL>end_date,<EOL>account_id,<EOL>product_id='<STR_LIT>',<EOL>format=None,<EOL>email=None):", "body": "return self._new_report(start_date,<EOL>'<STR_LIT>',<EOL>end_date,<EOL>product_id,<EOL>account_id,<EOL>format,<EOL>email)<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#create-a-new-report>`_", "id": "f11268:c1:m19"}
{"signature": "def get_holds(self, account_id):", "body": "return self._get('<STR_LIT>', account_id, '<STR_LIT>')<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#get-holds>`_", "id": "f11268:c1:m5"}
{"signature": "def _new_report(self,<EOL>type,<EOL>start_date,<EOL>end_date,<EOL>product_id='<STR_LIT>',<EOL>account_id=None,<EOL>format=None,<EOL>email=None):", "body": "data = {<EOL>'<STR_LIT:type>':type,<EOL>'<STR_LIT>':self._format_iso_time(start_date),<EOL>'<STR_LIT>':self._format_iso_time(end_date),<EOL>'<STR_LIT>':product_id,<EOL>'<STR_LIT>':account_id,<EOL>'<STR_LIT>':format,<EOL>'<STR_LIT:email>':email<EOL>}<EOL>return self._post('<STR_LIT>', data=data)<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#create-a-new-report>`_", "id": "f11268:c1:m17"}
{"signature": "def cancel_order(self, order_id):", "body": "return self._delete('<STR_LIT>', order_id)<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#cancel-an-order>`_", "id": "f11268:c1:m9"}
{"signature": "def get_order(self, order_id):", "body": "return self._get('<STR_LIT>', order_id)<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#get-an-order>`_", "id": "f11268:c1:m12"}
{"signature": "def _request(self, method, *relative_path_parts, **kwargs):", "body": "uri = self._create_api_uri(*relative_path_parts)<EOL>if method == '<STR_LIT>':<EOL><INDENT>response = get(uri, auth=self.auth, params=kwargs.get('<STR_LIT>', None))<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>response = post(uri, auth=self.auth, json=kwargs.get('<STR_LIT:data>', None))<EOL><DEDENT>else:<EOL><INDENT>response = delete(uri, auth=self.auth, json=kwargs.get('<STR_LIT:data>', None))<EOL><DEDENT>return self._handle_response(response).json()<EOL>", "docstring": "Sends an HTTP request to the REST API and receives the requested data.\n\n        :param str method: HTTP method name\n        :param relative_path_parts: the relative paths for the request URI\n        :param kwargs: argument keywords\n        :returns: requested data\n        :raises APIError: for non-2xx responses", "id": "f11268:c1:m1"}
{"signature": "def list_orders(self, status=None):", "body": "return self._get('<STR_LIT>', params={'<STR_LIT:status>':status})<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#list-orders>`_", "id": "f11268:c1:m11"}
{"signature": "def place_limit_order(self,<EOL>side,<EOL>price,<EOL>size,<EOL>product_id='<STR_LIT>',<EOL>client_oid=None,<EOL>stp=None,<EOL>time_in_force=None,<EOL>cancel_after=None,<EOL>post_only=None):", "body": "return self._place_order(side,<EOL>product_id=product_id,<EOL>client_oid=client_oid,<EOL>type='<STR_LIT>',<EOL>stp=stp,<EOL>price=price,<EOL>size=size,<EOL>time_in_force=time_in_force,<EOL>cancel_after=cancel_after,<EOL>post_only=post_only)<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#orders>`_", "id": "f11268:c1:m7"}
{"signature": "def get_product_ticker(self, product_id='<STR_LIT>'):", "body": "return self._get('<STR_LIT>', product_id, '<STR_LIT>')<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#get-product-ticker>`_", "id": "f11269:c0:m3"}
{"signature": "def get_historic_trades(self, start, end, granularity, product_id='<STR_LIT>'):", "body": "params = {<EOL>'<STR_LIT:start>':self._format_iso_time(start),<EOL>'<STR_LIT:end>':self._format_iso_time(end),<EOL>'<STR_LIT>':granularity<EOL>}<EOL>return self._get('<STR_LIT>', product_id, '<STR_LIT>', params=params)<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#get-historic-rates>`_\n\n        :param start: either datetime.datetime or str in ISO 8601\n        :param end: either datetime.datetime or str in ISO 8601\n        :pram int granularity: desired timeslice in seconds\n        :returns: desired data", "id": "f11269:c0:m5"}
{"signature": "def get_time(self):", "body": "return self._get('<STR_LIT:time>')<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#time>`_", "id": "f11269:c0:m8"}
{"signature": "def get_trades(self, product_id='<STR_LIT>'):", "body": "return self._get('<STR_LIT>', product_id, '<STR_LIT>')<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#get-trades>`_", "id": "f11269:c0:m4"}
{"signature": "def get_products(self):", "body": "return self._get('<STR_LIT>')<EOL>", "docstring": "`<https://docs.exchange.coinbase.com/#get-products>`_", "id": "f11269:c0:m1"}
{"signature": "def _request(self, method, *relative_path_parts, **kwargs):", "body": "uri = self._create_api_uri(*relative_path_parts)<EOL>response = get(uri, params=kwargs.get('<STR_LIT>', None))<EOL>return self._handle_response(response).json()<EOL>", "docstring": "Sends an HTTP request to the REST API and receives the requested data.\n\n        :param str method: HTTP method name\n        :param relative_path_parts: the relative paths for the request URI\n        :param kwargs: argument keywords\n        :returns: requested data\n        :raises APIError: for non-2xx responses", "id": "f11269:c0:m0"}
{"signature": "def connected(self):", "body": "if self._ws:<EOL><INDENT>return self._ws.connected<EOL><DEDENT>return False<EOL>", "docstring": "Checks if we are connected to the WebSocket Feed.\n\n        :returns: True if connected, otherwise False\n        :rtype: bool", "id": "f11270:c0:m10"}
{"signature": "def receive(self):", "body": "if self.connected():<EOL><INDENT>return self._format_message(self._ws.recv())<EOL><DEDENT>return None<EOL>", "docstring": "Receive the next message in the sequence.\n\n        :returns: the next message in the sequence, None if not connected\n        :rtype: dict", "id": "f11270:c0:m9"}
{"signature": "def connect(self):", "body": "if not self.connected():<EOL><INDENT>self._ws = create_connection(self.WS_URI)<EOL>message = {<EOL>'<STR_LIT:type>':self.WS_TYPE,<EOL>'<STR_LIT>':self.WS_PRODUCT_ID<EOL>}<EOL>self._ws.send(dumps(message))<EOL>with self._lock:<EOL><INDENT>if not self._thread:<EOL><INDENT>thread = Thread(target=self._keep_alive_thread, args=[])<EOL>thread.start()<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Connects and subscribes to the WebSocket Feed.", "id": "f11270:c0:m7"}
{"signature": "def disconnect(self):", "body": "if self.connected():<EOL><INDENT>self._ws.close()<EOL>self._ws = None<EOL><DEDENT>", "docstring": "Disconnects from the WebSocket Feed.", "id": "f11270:c0:m8"}
{"signature": "@property<EOL><INDENT>def loaded(self):<DEDENT>", "body": "return True<EOL>", "docstring": "Loaded state of the page.\n\n        By default the driver will try to wait for any page loads to be\n        complete, however it's not uncommon for it to return early. To address\n        this you can override :py:attr:`loaded` to return ``True`` when the\n        page has finished loading.\n\n        :return: ``True`` if page is loaded, else ``False``.\n        :rtype: bool\n\n        Usage (Selenium)::\n\n          from pypom import Page\n          from selenium.webdriver.common.by import By\n\n          class Mozilla(Page):\n\n              @property\n              def loaded(self):\n                  body = self.find_element(By.TAG_NAME, 'body')\n                  return 'loaded' in body.get_attribute('class')\n\n        Usage (Splinter)::\n\n          from pypom import Page\n\n          class Mozilla(Page):\n\n              def loaded(self):\n                  body = self.find_element('tag', 'body')\n                  return 'loaded' in body['class']\n\n        Examples::\n\n            # wait for the seed_url value to be in the current URL\n            self.seed_url in self.selenium.current_url", "id": "f11285:c0:m4"}
{"signature": "def find_elements(self, strategy, locator):", "body": "return self.driver_adapter.find_elements(strategy, locator)<EOL>", "docstring": "Finds elements on the page.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n        :param locator: Location of target elements.\n        :type strategy: str\n        :type locator: str\n        :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.element_list.ElementList`\n        :rtype: list", "id": "f11286:c0:m3"}
{"signature": "@property<EOL><INDENT>def selenium(self):<DEDENT>", "body": "warn(\"<STR_LIT>\", DeprecationWarning, stacklevel=<NUM_LIT:2>)<EOL>return self.driver<EOL>", "docstring": "Backwards compatibility attribute", "id": "f11286:c0:m1"}
{"signature": "def is_element_displayed(self, strategy, locator):", "body": "return self.driver_adapter.is_element_displayed(strategy, locator)<EOL>", "docstring": "Checks whether an element is displayed.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n        :param locator: Location of target element.\n        :type strategy: str\n        :type locator: str\n        :return: ``True`` if element is displayed, else ``False``.\n        :rtype: bool", "id": "f11286:c0:m5"}
{"signature": "def is_element_present(self, strategy, locator):", "body": "return self.driver_adapter.is_element_present(strategy, locator)<EOL>", "docstring": "Checks whether an element is present.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n        :param locator: Location of target element.\n        :type strategy: str\n        :type locator: str\n        :return: ``True`` if element is present, else ``False``.\n        :rtype: bool", "id": "f11286:c0:m4"}
{"signature": "@property<EOL><INDENT>def loaded(self):<DEDENT>", "body": "return True<EOL>", "docstring": "Loaded state of the page region.\n\n        You may need to initialise your page region before it's ready for you\n        to interact with it. If this is the case, you can override\n        :py:attr:`loaded` to return ``True`` when the region has finished\n        loading.\n\n        :return: ``True`` if page is loaded, else ``False``.\n        :rtype: bool\n\n        Usage (Selenium)::\n\n          from pypom import Page, Region\n          from selenium.webdriver.common.by import By\n\n          class Mozilla(Page):\n              URL_TEMPLATE = 'https://www.mozilla.org/'\n\n              @property\n              def newsletter(self):\n                  return Newsletter(self)\n\n              class Newsletter(Region):\n                  _root_locator = (By.ID, 'newsletter-form')\n\n                  @property\n                  def loaded(self):\n                      return 'loaded' in self.root.get_attribute('class')\n\n        Usage (Splinter)::\n\n          from pypom import Page, Region\n\n          class Mozilla(Page):\n              URL_TEMPLATE = 'https://www.mozilla.org/'\n\n              @property\n              def newsletter(self):\n                  return Newsletter(self)\n\n              class Newsletter(Region):\n                  _root_locator = ('id', 'newsletter-form')\n\n                  @property\n                  def loaded(self):\n                      return 'loaded' in self.root['class']", "id": "f11287:c0:m7"}
{"signature": "def is_element_displayed(self, strategy, locator):", "body": "return self.driver_adapter.is_element_displayed(<EOL>strategy, locator, root=self.root<EOL>)<EOL>", "docstring": "Checks whether an element is displayed.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n        :param locator: Location of target element.\n        :type strategy: str\n        :type locator: str\n        :return: ``True`` if element is displayed, else ``False``.\n        :rtype: bool", "id": "f11287:c0:m6"}
{"signature": "def is_element_present(self, strategy, locator):", "body": "return self.driver_adapter.is_element_present(strategy, locator, root=self.root)<EOL>", "docstring": "Checks whether an element is present.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n        :param locator: Location of target element.\n        :type strategy: str\n        :type locator: str\n        :return: ``True`` if element is present, else ``False``.\n        :rtype: bool", "id": "f11287:c0:m5"}
{"signature": "def find_element(self, strategy, locator):", "body": "return self.driver_adapter.find_element(strategy, locator, root=self.root)<EOL>", "docstring": "Finds an element on the page.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n        :param locator: Location of target element.\n        :type strategy: str\n        :type locator: str\n        :return: An element.\n        :rytpe: :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.driver.webdriver.WebDriverElement`", "id": "f11287:c0:m3"}
{"signature": "def find_elements(self, strategy, locator):", "body": "return self.driver_adapter.find_elements(strategy, locator, root=self.root)<EOL>", "docstring": "Finds elements on the page.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` or :py:attr:`~pypom.splinter_driver.ALLOWED_STRATEGIES`.\n        :param locator: Location of target elements.\n        :type strategy: str\n        :type locator: str\n        :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` or :py:class:`~splinter.element_list.ElementList`\n        :rtype: list", "id": "f11287:c0:m4"}
{"signature": "@hookspec<EOL>def pypom_after_wait_for_region_to_load(region):", "body": "", "docstring": "Called after waiting for the region to load", "id": "f11288:m1"}
{"signature": "def find_element(strategy, locator, root=None):", "body": "", "docstring": "Finds an element on the page.\n\n        :param strategy: Location strategy to use (type depends on the driver implementation)\n        :param locator: Location of target element.\n        :param root: (optional) root node.\n        :type strategy: str\n        :type locator: str\n        :type root: web element object or None.\n        :return: web element object\n        :rtype: it depends on the driver implementation", "id": "f11290:c1:m2"}
{"signature": "def wait_factory(timeout):", "body": "", "docstring": "Returns a WebDriverWait like property for a given timeout.\n\n        :param timeout: Timeout used by WebDriverWait like calls\n        :type timeout: int", "id": "f11290:c1:m0"}
{"signature": "def find_elements(strategy, locator, root=None):", "body": "", "docstring": "Finds elements on the page.\n\n        :param strategy: Location strategy to use (type depends on the driver implementation)\n        :param locator: Location of target elements.\n        :param root: (optional) root node.\n        :type strategy: str\n        :type locator: str\n        :type root: web element object or None.\n        :return: iterable of web element objects\n        :rtype: iterable (if depends on the driver implementation)", "id": "f11290:c1:m3"}
{"signature": "def is_element_present(strategy, locator, root=None):", "body": "", "docstring": "Checks whether an element is present.\n\n        :param strategy: Location strategy to use (type depends on the driver implementation)\n        :param locator: Location of target element.\n        :param root: (optional) root node.\n        :type strategy: str\n        :type locator: str\n        :type root: web element object or None.\n        :return: ``True`` if element is present, else ``False``.\n        :rtype: bool", "id": "f11290:c1:m4"}
{"signature": "def open(url):", "body": "", "docstring": "Open the page.\n        Navigates to :py:attr:`url`", "id": "f11290:c1:m1"}
{"signature": "def find_elements(self, strategy, locator, root=None):", "body": "if root is not None:<EOL><INDENT>return root.find_elements(strategy, locator)<EOL><DEDENT>return self.driver.find_elements(strategy, locator)<EOL>", "docstring": "Finds elements on the page.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` for valid values.\n        :param locator: Location of target elements.\n        :param root: (optional) root node.\n        :type strategy: str\n        :type locator: str\n        :type root: str :py:class:`~selenium.webdriver.remote.webelement.WebElement` object or None.\n        :return: List of :py:class:`~selenium.webdriver.remote.webelement.WebElement` objects.\n        :rtype: list", "id": "f11293:c1:m4"}
{"signature": "def wait_factory(self, timeout):", "body": "return WebDriverWait(self.driver, timeout)<EOL>", "docstring": "Returns a WebDriverWait like property for a given timeout.\n\n        :param timeout: Timeout used by WebDriverWait calls\n        :type timeout: int", "id": "f11293:c1:m1"}
{"signature": "def is_element_present(self, strategy, locator, root=None):", "body": "try:<EOL><INDENT>return self.find_element(strategy, locator, root=root)<EOL><DEDENT>except NoSuchElementException:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Checks whether an element is present.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` for valid values.\n        :param locator: Location of target element.\n        :param root: (optional) root node.\n        :type strategy: str\n        :type locator: str\n        :type root: str :py:class:`~selenium.webdriver.remote.webelement.WebElement` object or None.\n        :return: ``True`` if element is present, else ``False``.\n        :rtype: bool", "id": "f11293:c1:m5"}
{"signature": "def is_element_displayed(self, strategy, locator, root=None):", "body": "try:<EOL><INDENT>return self.find_element(strategy, locator, root=root).is_displayed()<EOL><DEDENT>except NoSuchElementException:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Checks whether an element is displayed.\n\n        :param strategy: Location strategy to use. See :py:class:`~selenium.webdriver.common.by.By` for valid values.\n        :param locator: Location of target element.\n        :param root: (optional) root node.\n        :type strategy: str\n        :type locator: str\n        :type root: str :py:class:`~selenium.webdriver.remote.webelement.WebElement` object or None.\n        :return: ``True`` if element is displayed, else ``False``.\n        :rtype: bool", "id": "f11293:c1:m6"}
{"signature": "def find_elements(self, strategy, locator, root=None):", "body": "node = root or self.driver<EOL>if strategy in ALLOWED_STRATEGIES:<EOL><INDENT>return getattr(node, \"<STR_LIT>\" + strategy)(locator)<EOL><DEDENT>raise UsageError(\"<STR_LIT>\")<EOL>", "docstring": "Finds elements on the page.\n\n        :param strategy: Location strategy to use. See pypom.splinter_driver.ALLOWED_STRATEGIES for valid values.\n        :param locator: Location of target elements.\n        :type strategy: str\n        :type locator: str\n        :return: List of :py:class:`~splinter.driver.webdriver.WebDriverElement`\n        :rtype: :py:class:`splinter.element_list.ElementList`", "id": "f11295:c1:m3"}
{"signature": "def get(self, query, responseformat=\"<STR_LIT>\", verbosity=\"<STR_LIT:body>\", build=True):", "body": "<EOL>if build:<EOL><INDENT>full_query = self._construct_ql_query(<EOL>query, responseformat=responseformat, verbosity=verbosity<EOL>)<EOL><DEDENT>else:<EOL><INDENT>full_query = query<EOL><DEDENT>if self.debug:<EOL><INDENT>logging.getLogger().info(query)<EOL><DEDENT>r = self._get_from_overpass(full_query)<EOL>content_type = r.headers.get(\"<STR_LIT>\")<EOL>if self.debug:<EOL><INDENT>print(content_type)<EOL><DEDENT>if content_type == \"<STR_LIT>\":<EOL><INDENT>result = []<EOL>reader = csv.reader(StringIO(r.text), delimiter=\"<STR_LIT:\\t>\")<EOL>for row in reader:<EOL><INDENT>result.append(row)<EOL><DEDENT>return result<EOL><DEDENT>elif content_type in (\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>return r.text<EOL><DEDENT>elif content_type == \"<STR_LIT:application/json>\":<EOL><INDENT>response = json.loads(r.text)<EOL><DEDENT>if not build:<EOL><INDENT>return response<EOL><DEDENT>if \"<STR_LIT>\" not in response:<EOL><INDENT>raise UnknownOverpassError(\"<STR_LIT>\")<EOL><DEDENT>overpass_remark = response.get(\"<STR_LIT>\", None)<EOL>if overpass_remark and overpass_remark.startswith(\"<STR_LIT>\"):<EOL><INDENT>raise ServerRuntimeError(overpass_remark)<EOL><DEDENT>if responseformat is not \"<STR_LIT>\":<EOL><INDENT>return response<EOL><DEDENT>return self._as_geojson(response[\"<STR_LIT>\"])<EOL>", "docstring": "Pass in an Overpass query in Overpass QL.", "id": "f11305:c0:m1"}
{"signature": "def search(self, feature_type, regex=False):", "body": "raise NotImplementedError()<EOL>", "docstring": "Search for something.", "id": "f11305:c0:m2"}
{"signature": "def lookup_offset(self, sensor):", "body": "if self._device.product == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT:4><EOL><DEDENT>return (sensor + <NUM_LIT:1>) * <NUM_LIT:2><EOL>", "docstring": "Lookup the number of sensors on the device by product name.", "id": "f11313:c0:m2"}
{"signature": "def get_ports(self):", "body": "if self._ports:<EOL><INDENT>return self._ports<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Get device USB ports.", "id": "f11313:c0:m7"}
{"signature": "def _interrupt_read(self):", "body": "data = self._device.read(ENDPOINT, REQ_INT_LEN, timeout=TIMEOUT)<EOL>LOGGER.debug('<STR_LIT>', data)<EOL>return data<EOL>", "docstring": "Read data from device.", "id": "f11313:c0:m14"}
{"signature": "def lookup_sensor_count(self):", "body": "if (self._device.product == '<STR_LIT>') or(self._device.product == '<STR_LIT>'):<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>return <NUM_LIT:2><EOL>", "docstring": "Lookup the number of sensors on the device by product name.", "id": "f11313:c0:m4"}
{"signature": "def find_ports(device):", "body": "bus_id = device.bus<EOL>dev_id = device.address<EOL>for dirent in os.listdir(USB_SYS_PREFIX):<EOL><INDENT>matches = re.match(USB_PORTS_STR + '<STR_LIT:$>', dirent)<EOL>if matches:<EOL><INDENT>bus_str = readattr(dirent, '<STR_LIT>')<EOL>if bus_str:<EOL><INDENT>busnum = float(bus_str)<EOL><DEDENT>else:<EOL><INDENT>busnum = None<EOL><DEDENT>dev_str = readattr(dirent, '<STR_LIT>')<EOL>if dev_str:<EOL><INDENT>devnum = float(dev_str)<EOL><DEDENT>else:<EOL><INDENT>devnum = None<EOL><DEDENT>if busnum == bus_id and devnum == dev_id:<EOL><INDENT>return str(matches.groups()[<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Find the port chain a device is plugged on.\n\nThis is done by searching sysfs for a device that matches the device\nbus/address combination.\n\nUseful when the underlying usb lib does not return device.port_number for\nwhatever reason.", "id": "f11313:m1"}
{"signature": "def get_data(self, reset_device=False):", "body": "try:<EOL><INDENT>if reset_device:<EOL><INDENT>self._device.reset()<EOL><DEDENT>for interface in [<NUM_LIT:0>,<NUM_LIT:1>]:<EOL><INDENT>if self._device.is_kernel_driver_active(interface):<EOL><INDENT>LOGGER.debug('<STR_LIT>'<EOL>'<STR_LIT>', interface, self._device, self._ports)<EOL>self._device.detach_kernel_driver(interface)<EOL><DEDENT><DEDENT>self._device.set_configuration()<EOL>usb.util.claim_interface(self._device, INTERFACE)<EOL>self._control_transfer(COMMANDS['<STR_LIT>'])<EOL>self._interrupt_read()<EOL>self._control_transfer(COMMANDS['<STR_LIT>'])<EOL>temp_data = self._interrupt_read()<EOL>if self._device.product == '<STR_LIT>':<EOL><INDENT>humidity_data = temp_data<EOL><DEDENT>else:<EOL><INDENT>humidity_data = None<EOL><DEDENT>data = {'<STR_LIT>': temp_data, '<STR_LIT>': humidity_data}<EOL>usb.util.dispose_resources(self._device)<EOL>return data<EOL><DEDENT>except usb.USBError as err:<EOL><INDENT>if not reset_device:<EOL><INDENT>LOGGER.warning(\"<STR_LIT>\", err, self._device)<EOL>return self.get_data(True)<EOL><DEDENT>if \"<STR_LIT>\" in str(err):<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>LOGGER.error(err)<EOL>raise<EOL><DEDENT><DEDENT>", "docstring": "Get data from the USB device.", "id": "f11313:c0:m9"}
{"signature": "def get_devices(self):", "body": "return self._devices<EOL>", "docstring": "Get a list of all devices attached to this handler", "id": "f11313:c1:m1"}
{"signature": "def get_temperature(self, format='<STR_LIT>', sensor=<NUM_LIT:0>):", "body": "results = self.get_temperatures(sensors=[sensor,])<EOL>if format == '<STR_LIT>':<EOL><INDENT>return results[sensor]['<STR_LIT>']<EOL><DEDENT>elif format == '<STR_LIT>':<EOL><INDENT>return results[sensor]['<STR_LIT>']<EOL><DEDENT>elif format == '<STR_LIT>':<EOL><INDENT>return results[sensor]['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Get device temperature reading.", "id": "f11313:c0:m10"}
{"signature": "def get_bus(self):", "body": "if self._bus:<EOL><INDENT>return self._bus<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Get device USB bus.", "id": "f11313:c0:m8"}
{"signature": "def hello_jp():", "body": "return u\"<STR_LIT>\"<EOL>", "docstring": "Return 'Hello' in Japanese ('\u3053\u3093\u306b\u3061\u306f').", "id": "f11318:m0"}
{"signature": "def parse_skip_comment(self):", "body": "skipped_error_codes = \"<STR_LIT>\"<EOL>if self.current.kind == tk.COMMENT:<EOL><INDENT>if \"<STR_LIT>\" in self.current.value:<EOL><INDENT>skipped_error_codes = \"<STR_LIT>\".join(self.current.value.split(\"<STR_LIT>\")[<NUM_LIT:1>:])<EOL><DEDENT>elif self.current.value.startswith(\"<STR_LIT>\"):<EOL><INDENT>skipped_error_codes = \"<STR_LIT:all>\"<EOL><DEDENT><DEDENT>return skipped_error_codes<EOL>", "docstring": "Parse a definition comment for noqa skips.", "id": "f11326:c14:m10"}
{"signature": "def __iter__(self):", "body": "return chain([self], *self.children)<EOL>", "docstring": "Iterate.", "id": "f11326:c1:m0"}
{"signature": "@property<EOL><INDENT>def is_public(self):<DEDENT>", "body": "<EOL>for decorator in self.decorators:<EOL><INDENT>if re.compile(r\"<STR_LIT>\".format(self.name)).match(decorator.name):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>name_is_public = (<EOL>not self.name.startswith(\"<STR_LIT:_>\")<EOL>or self.name in VARIADIC_MAGIC_METHODS<EOL>or self.is_magic<EOL>)<EOL>return self.parent.is_public and name_is_public<EOL>", "docstring": "Return True iff this method should be considered public.", "id": "f11326:c6:m1"}
{"signature": "def code_mapping(level, msg, default=<NUM_LIT>):", "body": "try:<EOL><INDENT>return code_mappings_by_level[level][msg]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>if msg.count('<STR_LIT:\">') == <NUM_LIT:2> and '<STR_LIT>' in msg and msg.endswith('<STR_LIT>'):<EOL><INDENT>txt = msg[: msg.index('<STR_LIT>')]<EOL>return code_mappings_by_level[level].get(txt, default)<EOL><DEDENT>return default<EOL>", "docstring": "Return an error code between 0 and 99.", "id": "f11326:m0"}
{"signature": "def consume(self, kind):", "body": "next_token = self.stream.move()<EOL>assert next_token.kind == kind<EOL>", "docstring": "Consume one token and verify it is of the expected kind.", "id": "f11326:c14:m2"}
{"signature": "def humanize(string):", "body": "return re.compile(r\"<STR_LIT>\").sub(r\"<STR_LIT>\", string).lower()<EOL>", "docstring": "Make a string human readable.", "id": "f11326:m3"}
{"signature": "def parse_all(self):", "body": "assert self.current.value == \"<STR_LIT>\"<EOL>self.consume(tk.NAME)<EOL>if self.current.value != \"<STR_LIT:=>\":<EOL><INDENT>raise AllError(\"<STR_LIT>\")<EOL><DEDENT>self.consume(tk.OP)<EOL>if self.current.value not in \"<STR_LIT>\":<EOL><INDENT>raise AllError(\"<STR_LIT>\")<EOL><DEDENT>self.consume(tk.OP)<EOL>self.all = []<EOL>all_content = \"<STR_LIT:(>\"<EOL>while self.current.kind != tk.OP or self.current.value not in \"<STR_LIT>\":<EOL><INDENT>if self.current.kind in (tk.NL, tk.COMMENT):<EOL><INDENT>pass<EOL><DEDENT>elif self.current.kind == tk.STRING or self.current.value == \"<STR_LIT:U+002C>\":<EOL><INDENT>all_content += self.current.value<EOL><DEDENT>else:<EOL><INDENT>raise AllError(<EOL>\"<STR_LIT>\".format(<EOL>self.current.kind<EOL>)<EOL>)<EOL><DEDENT>self.stream.move()<EOL><DEDENT>self.consume(tk.OP)<EOL>all_content += \"<STR_LIT:)>\"<EOL>try:<EOL><INDENT>self.all = eval(all_content, {})<EOL><DEDENT>except BaseException as e:<EOL><INDENT>raise AllError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(all_content, e)<EOL>)<EOL><DEDENT>", "docstring": "Parse the __all__ definition in a module.", "id": "f11326:c14:m7"}
{"signature": "def run(self):", "body": "<EOL>if self.err is not None:<EOL><INDENT>assert self.source is None<EOL>msg = \"<STR_LIT>\" % (<EOL>rst_prefix,<EOL>rst_fail_load,<EOL>\"<STR_LIT>\" % self.err,<EOL>)<EOL>yield <NUM_LIT:0>, <NUM_LIT:0>, msg, type(self)<EOL>module = []<EOL><DEDENT>try:<EOL><INDENT>module = parse(StringIO(self.source), self.filename)<EOL><DEDENT>except SyntaxError as err:<EOL><INDENT>msg = \"<STR_LIT>\" % (<EOL>rst_prefix,<EOL>rst_fail_parse,<EOL>\"<STR_LIT>\" % err,<EOL>)<EOL>yield <NUM_LIT:0>, <NUM_LIT:0>, msg, type(self)<EOL>module = []<EOL><DEDENT>except AllError:<EOL><INDENT>msg = \"<STR_LIT>\" % (<EOL>rst_prefix,<EOL>rst_fail_all,<EOL>\"<STR_LIT>\",<EOL>)<EOL>yield <NUM_LIT:0>, <NUM_LIT:0>, msg, type(self)<EOL>module = []<EOL><DEDENT>for definition in module:<EOL><INDENT>if not definition.docstring:<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>unindented = trim(dequote_docstring(definition.docstring))<EOL>rst_errors = list(rst_lint.lint(unindented))<EOL><DEDENT>except Exception as err:<EOL><INDENT>msg = \"<STR_LIT>\" % (<EOL>rst_prefix,<EOL>rst_fail_lint,<EOL>\"<STR_LIT>\" % (definition.name, err),<EOL>)<EOL>yield definition.start, <NUM_LIT:0>, msg, type(self)<EOL>continue<EOL><DEDENT>for rst_error in rst_errors:<EOL><INDENT>if rst_error.level <= <NUM_LIT:1>:<EOL><INDENT>continue<EOL><DEDENT>msg = rst_error.message.split(\"<STR_LIT:\\n>\", <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>code = code_mapping(rst_error.level, msg)<EOL>assert code < <NUM_LIT:100>, code<EOL>code += <NUM_LIT:100> * rst_error.level<EOL>msg = \"<STR_LIT>\" % (rst_prefix, code, msg)<EOL>yield definition.start + rst_error.line, <NUM_LIT:0>, msg, type(self)<EOL><DEDENT><DEDENT>", "docstring": "Use docutils to check docstrings are valid RST.", "id": "f11326:c15:m1"}
{"signature": "def parse_definition(self, class_):", "body": "start = self.line<EOL>self.consume(tk.NAME)<EOL>name = self.current.value<EOL>self.log.debug(\"<STR_LIT>\", class_.__name__, name)<EOL>self.stream.move()<EOL>if self.current.kind == tk.OP and self.current.value == \"<STR_LIT:(>\":<EOL><INDENT>parenthesis_level = <NUM_LIT:0><EOL>while True:<EOL><INDENT>if self.current.kind == tk.OP:<EOL><INDENT>if self.current.value == \"<STR_LIT:(>\":<EOL><INDENT>parenthesis_level += <NUM_LIT:1><EOL><DEDENT>elif self.current.value == \"<STR_LIT:)>\":<EOL><INDENT>parenthesis_level -= <NUM_LIT:1><EOL>if parenthesis_level == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>self.stream.move()<EOL><DEDENT><DEDENT>if self.current.kind != tk.OP or self.current.value != \"<STR_LIT::>\":<EOL><INDENT>self.leapfrog(tk.OP, value=\"<STR_LIT::>\")<EOL><DEDENT>else:<EOL><INDENT>self.consume(tk.OP)<EOL><DEDENT>if self.current.kind in (tk.NEWLINE, tk.COMMENT):<EOL><INDENT>skipped_error_codes = self.parse_skip_comment()<EOL>self.leapfrog(tk.INDENT)<EOL>assert self.current.kind != tk.INDENT<EOL>docstring = self.parse_docstring()<EOL>decorators = self._accumulated_decorators<EOL>self.log.debug(\"<STR_LIT>\", decorators)<EOL>self._accumulated_decorators = []<EOL>self.log.debug(\"<STR_LIT>\")<EOL>children = list(self.parse_definitions(class_))<EOL>self.log.debug(\"<STR_LIT>\", name)<EOL>end = self.line - <NUM_LIT:1><EOL><DEDENT>else:  <EOL><INDENT>skipped_error_codes = \"<STR_LIT>\"<EOL>docstring = self.parse_docstring()<EOL>decorators = []  <EOL>children = []<EOL>end = self.line<EOL>self.leapfrog(tk.NEWLINE)<EOL><DEDENT>definition = class_(<EOL>name,<EOL>self.source,<EOL>start,<EOL>end,<EOL>decorators,<EOL>docstring,<EOL>children,<EOL>None,<EOL>skipped_error_codes,<EOL>)<EOL>for child in definition.children:<EOL><INDENT>child.parent = definition<EOL><DEDENT>self.log.debug(<EOL>\"<STR_LIT>\",<EOL>class_.__name__,<EOL>name,<EOL>self.current.kind,<EOL>self.current.value,<EOL>)<EOL>return definition<EOL>", "docstring": "Parse a definition and return its value in a `class_` object.", "id": "f11326:c14:m9"}
{"signature": "def __init__(self, filelike):", "body": "self._generator = tk.generate_tokens(filelike.readline)<EOL>self.current = Token(*next(self._generator, None))<EOL>self.line = self.current.start[<NUM_LIT:0>]<EOL>self.log = log<EOL>self.got_logical_newline = True<EOL>", "docstring": "Initialize.", "id": "f11326:c11:m0"}
{"signature": "@property<EOL><INDENT>def is_public(self):<DEDENT>", "body": "return (<EOL>not self.name.startswith(\"<STR_LIT:_>\")<EOL>and self.parent.is_class<EOL>and self.parent.is_public<EOL>)<EOL>", "docstring": "Return True iff this class should be considered public.", "id": "f11326:c8:m0"}
{"signature": "def __init__(self, *args):", "body": "super(Token, self).__init__(*args)<EOL>self.kind = TokenKind(self.kind)<EOL>", "docstring": "Initialize.", "id": "f11326:c13:m0"}
{"signature": "def __init__(self, message):", "body": "Exception.__init__(<EOL>self,<EOL>message<EOL>+ textwrap.dedent(<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>),<EOL>)<EOL>", "docstring": "Initialize the error with a more specific message.", "id": "f11326:c10:m0"}
{"signature": "def parse_docstring(self):", "body": "self.log.debug(<EOL>\"<STR_LIT>\", self.current.kind, self.current.value<EOL>)<EOL>while self.current.kind in (tk.COMMENT, tk.NEWLINE, tk.NL):<EOL><INDENT>self.stream.move()<EOL>self.log.debug(<EOL>\"<STR_LIT>\",<EOL>self.current.kind,<EOL>self.current.value,<EOL>)<EOL><DEDENT>if self.current.kind == tk.STRING:<EOL><INDENT>docstring = self.current.value<EOL>self.stream.move()<EOL>return docstring<EOL><DEDENT>return None<EOL>", "docstring": "Parse a single docstring and return its value.", "id": "f11326:c14:m4"}
{"signature": "def parse_definitions(self, class_, all=False):", "body": "while self.current is not None:<EOL><INDENT>self.log.debug(<EOL>\"<STR_LIT>\",<EOL>self.current.kind,<EOL>self.current.value,<EOL>)<EOL>self.log.debug(\"<STR_LIT>\", self.stream.got_logical_newline)<EOL>if all and self.current.value == \"<STR_LIT>\":<EOL><INDENT>self.parse_all()<EOL><DEDENT>elif (<EOL>self.current.kind == tk.OP<EOL>and self.current.value == \"<STR_LIT:@>\"<EOL>and self.stream.got_logical_newline<EOL>):<EOL><INDENT>self.consume(tk.OP)<EOL>self.parse_decorators()<EOL><DEDENT>elif self.current.value in [\"<STR_LIT>\", \"<STR_LIT:class>\"]:<EOL><INDENT>yield self.parse_definition(class_._nest(self.current.value))<EOL><DEDENT>elif self.current.kind == tk.INDENT:<EOL><INDENT>self.consume(tk.INDENT)<EOL>for definition in self.parse_definitions(class_):<EOL><INDENT>yield definition<EOL><DEDENT><DEDENT>elif self.current.kind == tk.DEDENT:<EOL><INDENT>self.consume(tk.DEDENT)<EOL>return<EOL><DEDENT>elif self.current.value == \"<STR_LIT>\":<EOL><INDENT>self.parse_from_import_statement()<EOL><DEDENT>else:<EOL><INDENT>self.stream.move()<EOL><DEDENT><DEDENT>", "docstring": "Parse multiple definitions and yield them.", "id": "f11326:c14:m6"}
{"signature": "def parse_from_import_statement(self):", "body": "self.log.debug(\"<STR_LIT>\")<EOL>is_future_import = self._parse_from_import_source()<EOL>self._parse_from_import_names(is_future_import)<EOL>", "docstring": "Parse a 'from x import y' statement.\n\n        The purpose is to find __future__ statements.", "id": "f11326:c14:m12"}
{"signature": "def __iter__(self):", "body": "while True:<EOL><INDENT>if self.current is not None:<EOL><INDENT>yield self.current<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT>self.move()<EOL><DEDENT>", "docstring": "Iterate.", "id": "f11326:c11:m3"}
{"signature": "@property<EOL><INDENT>def is_public(self):<DEDENT>", "body": "return not self.name.startswith(\"<STR_LIT:_>\") or self.name.startswith(\"<STR_LIT>\")<EOL>", "docstring": "Is the module public.", "id": "f11326:c2:m0"}
{"signature": "def _parse_from_import_source(self):", "body": "assert self.current.value == \"<STR_LIT>\", self.current.value<EOL>self.stream.move()<EOL>is_future_import = self.current.value == \"<STR_LIT>\"<EOL>self.stream.move()<EOL>while (<EOL>self.current is not None<EOL>and self.current.kind in (tk.DOT, tk.NAME, tk.OP)<EOL>and self.current.value != \"<STR_LIT>\"<EOL>):<EOL><INDENT>self.stream.move()<EOL><DEDENT>if self.current is None or self.current.value != \"<STR_LIT>\":<EOL><INDENT>return False<EOL><DEDENT>self.check_current(value=\"<STR_LIT>\")<EOL>assert self.current.value == \"<STR_LIT>\", self.current.value<EOL>self.stream.move()<EOL>return is_future_import<EOL>", "docstring": "Parse the 'from x import' part in a 'from x import y' statement.\n\n        Return true iff `x` is __future__.", "id": "f11326:c14:m13"}
{"signature": "def parse_decorators(self):  ", "body": "name = []<EOL>arguments = []<EOL>at_arguments = False<EOL>while self.current is not None:<EOL><INDENT>self.log.debug(<EOL>\"<STR_LIT>\",<EOL>self.current.kind,<EOL>self.current.value,<EOL>)<EOL>if self.current.kind == tk.NAME and self.current.value in [\"<STR_LIT>\", \"<STR_LIT:class>\"]:<EOL><INDENT>break<EOL><DEDENT>elif self.current.kind == tk.OP and self.current.value == \"<STR_LIT:@>\":<EOL><INDENT>self._accumulated_decorators.append(<EOL>Decorator(\"<STR_LIT>\".join(name), \"<STR_LIT>\".join(arguments))<EOL>)<EOL>name = []<EOL>arguments = []<EOL>at_arguments = False<EOL><DEDENT>elif self.current.kind == tk.OP and self.current.value == \"<STR_LIT:(>\":<EOL><INDENT>at_arguments = True<EOL><DEDENT>elif self.current.kind == tk.OP and self.current.value == \"<STR_LIT:)>\":<EOL><INDENT>pass<EOL><DEDENT>elif self.current.kind == tk.NEWLINE or self.current.kind == tk.NL:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if not at_arguments:<EOL><INDENT>name.append(self.current.value)<EOL><DEDENT>else:<EOL><INDENT>arguments.append(self.current.value)<EOL><DEDENT><DEDENT>self.stream.move()<EOL><DEDENT>self._accumulated_decorators.append(<EOL>Decorator(\"<STR_LIT>\".join(name), \"<STR_LIT>\".join(arguments))<EOL>)<EOL>", "docstring": "Called after first @ is found.\n\n        Parse decorators into self._accumulated_decorators.\n        Continue to do so until encountering the 'def' or 'class' start token.", "id": "f11326:c14:m5"}
{"signature": "def _ordinal_metric(_v1, _v2, i1, i2, n_v):", "body": "if i1 > i2:<EOL><INDENT>i1, i2 = i2, i1<EOL><DEDENT>return (np.sum(n_v[i1:(i2 + <NUM_LIT:1>)]) - (n_v[i1] + n_v[i2]) / <NUM_LIT:2>) ** <NUM_LIT:2><EOL>", "docstring": "Metric for ordinal data.", "id": "f11329:m1"}
{"signature": "def _reliability_data_to_value_counts(reliability_data, value_domain):", "body": "return np.array([[sum(<NUM_LIT:1> for rate in unit if rate == v) for v in value_domain] for unit in reliability_data.T])<EOL>", "docstring": "Return the value counts given the reliability data.\n\n    Parameters\n    ----------\n    reliability_data : ndarray, with shape (M, N)\n        Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters\n        and N is the unit count.\n        Missing rates are represented with `np.nan`.\n\n    value_domain : array_like, with shape (V,)\n        Possible values the units can take.\n\n    Returns\n    -------\n    value_counts : ndarray, with shape (N, V)\n        Number of coders that assigned a certain value to a determined unit, where N is the number of units\n        and V is the value count.", "id": "f11329:m9"}
{"signature": "def alpha(reliability_data=None, value_counts=None, value_domain=None, level_of_measurement='<STR_LIT>',<EOL>dtype=np.float64):", "body": "if (reliability_data is None) == (value_counts is None):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if value_counts is None:<EOL><INDENT>if type(reliability_data) is not np.ndarray:<EOL><INDENT>reliability_data = np.array(reliability_data)<EOL><DEDENT>value_domain = value_domain or np.unique(reliability_data[~np.isnan(reliability_data)])<EOL>value_counts = _reliability_data_to_value_counts(reliability_data, value_domain)<EOL><DEDENT>else:  <EOL><INDENT>if value_domain:<EOL><INDENT>assert value_counts.shape[<NUM_LIT:1>] == len(value_domain),\"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>value_domain = tuple(range(value_counts.shape[<NUM_LIT:1>]))<EOL><DEDENT><DEDENT>distance_metric = _distance_metric(level_of_measurement)<EOL>o = _coincidences(value_counts, value_domain, dtype=dtype)<EOL>n_v = np.sum(o, axis=<NUM_LIT:0>)<EOL>n = np.sum(n_v)<EOL>e = _random_coincidences(value_domain, n, n_v)<EOL>d = _distances(value_domain, distance_metric, n_v)<EOL>return <NUM_LIT:1> - np.sum(o * d) / np.sum(e * d)<EOL>", "docstring": "Compute Krippendorff's alpha.\n\n    See https://en.wikipedia.org/wiki/Krippendorff%27s_alpha for more information.\n\n    Parameters\n    ----------\n    reliability_data : array_like, with shape (M, N)\n        Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters\n        and N is the unit count.\n        Missing rates are represented with `np.nan`.\n        If it's provided then `value_counts` must not be provided.\n\n    value_counts : ndarray, with shape (N, V)\n        Number of coders that assigned a certain value to a determined unit, where N is the number of units\n        and V is the value count.\n        If it's provided then `reliability_data` must not be provided.\n\n    value_domain : array_like, with shape (V,)\n        Possible values the units can take.\n        If the level of measurement is not nominal, it must be ordered.\n        If `reliability_data` is provided, then the default value is the ordered list of unique rates that appear.\n        Else, the default value is `list(range(V))`.\n\n    level_of_measurement : string or callable\n        Steven's level of measurement of the variable.\n        It must be one of 'nominal', 'ordinal', 'interval', 'ratio' or a callable.\n\n    dtype : data-type\n        Result and computation data-type.\n\n    Returns\n    -------\n    alpha : `dtype`\n        Scalar value of Krippendorff's alpha of type `dtype`.\n\n    Examples\n    --------\n    >>> reliability_data = [[np.nan, np.nan, np.nan, np.nan, np.nan, 3, 4, 1, 2, 1, 1, 3, 3, np.nan, 3],\n    ...                     [1, np.nan, 2, 1, 3, 3, 4, 3, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],\n    ...                     [np.nan, np.nan, 2, 1, 3, 4, 4, np.nan, 2, 1, 1, 3, 3, np.nan, 4]]\n    >>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='nominal'), 6))\n    0.691358\n    >>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='interval'), 6))\n    0.810845\n    >>> value_counts = np.array([[1, 0, 0, 0],\n    ...                          [0, 0, 0, 0],\n    ...                          [0, 2, 0, 0],\n    ...                          [2, 0, 0, 0],\n    ...                          [0, 0, 2, 0],\n    ...                          [0, 0, 2, 1],\n    ...                          [0, 0, 0, 3],\n    ...                          [1, 0, 1, 0],\n    ...                          [0, 2, 0, 0],\n    ...                          [2, 0, 0, 0],\n    ...                          [2, 0, 0, 0],\n    ...                          [0, 0, 2, 0],\n    ...                          [0, 0, 2, 0],\n    ...                          [0, 0, 0, 0],\n    ...                          [0, 0, 1, 1]])\n    >>> print(round(alpha(value_counts=value_counts, level_of_measurement='nominal'), 6))\n    0.691358\n    >>> # The following examples were extracted from\n    >>> # https://www.statisticshowto.datasciencecentral.com/wp-content/uploads/2016/07/fulltext.pdf, page 8.\n    >>> reliability_data = [[1, 2, 3, 3, 2, 1, 4, 1, 2, np.nan, np.nan, np.nan],\n    ...                     [1, 2, 3, 3, 2, 2, 4, 1, 2, 5, np.nan, 3.],\n    ...                     [np.nan, 3, 3, 3, 2, 3, 4, 2, 2, 5, 1, np.nan],\n    ...                     [1, 2, 3, 3, 2, 4, 4, 1, 2, 5, 1, np.nan]]\n    >>> print(round(alpha(reliability_data, level_of_measurement='ordinal'), 3))\n    0.815\n    >>> print(round(alpha(reliability_data, level_of_measurement='ratio'), 3))\n    0.797", "id": "f11329:m10"}
{"signature": "def _interval_metric(v1, v2, **_kwargs):", "body": "return (v1 - v2) ** <NUM_LIT:2><EOL>", "docstring": "Metric for interval data.", "id": "f11329:m2"}
{"signature": "def _distances(value_domain, distance_metric, n_v):", "body": "return np.array([[distance_metric(v1, v2, i1=i1, i2=i2, n_v=n_v)<EOL>for i2, v2 in enumerate(value_domain)]<EOL>for i1, v1 in enumerate(value_domain)])<EOL>", "docstring": "Distances of the different possible values.\n\n    Parameters\n    ----------\n    value_domain : array_like, with shape (V,)\n        Possible values V the units can take.\n        If the level of measurement is not nominal, it must be ordered.\n\n    distance_metric : callable\n        Callable that return the distance of two given values.\n\n    n_v : ndarray, with shape (V,)\n        Number of pairable elements for each value.\n\n    Returns\n    -------\n    d : ndarray, with shape (V, V)\n        Distance matrix for each value pair.", "id": "f11329:m6"}
{"signature": "def loadResults(resultsFile):", "body": "with open(resultsFile) as f:<EOL><INDENT>raw=f.read().split(\"<STR_LIT:\\n>\")<EOL><DEDENT>foldersByDay={}<EOL>for line in raw:<EOL><INDENT>folder=line.split('<STR_LIT:\">')[<NUM_LIT:1>]+\"<STR_LIT:\\\\>\"<EOL>line=[]+line.split('<STR_LIT:\">')[<NUM_LIT:2>].split(\"<STR_LIT:U+002CU+0020>\")<EOL>for day in line[<NUM_LIT:1>:]:<EOL><INDENT>if not day in foldersByDay:<EOL><INDENT>foldersByDay[day]=[]<EOL><DEDENT>foldersByDay[day]=foldersByDay[day]+[folder]<EOL><DEDENT><DEDENT>nActiveDays=len(foldersByDay)<EOL>dayFirst=sorted(foldersByDay.keys())[<NUM_LIT:0>]<EOL>dayLast=sorted(foldersByDay.keys())[-<NUM_LIT:1>]<EOL>dayFirst=datetime.datetime.strptime(dayFirst, \"<STR_LIT>\" )<EOL>dayLast=datetime.datetime.strptime(dayLast, \"<STR_LIT>\" )<EOL>nDays = (dayLast - dayFirst).days + <NUM_LIT:1><EOL>emptyDays=<NUM_LIT:0><EOL>for deltaDays in range(nDays):<EOL><INDENT>day=dayFirst+datetime.timedelta(days=deltaDays)<EOL>stamp=datetime.datetime.strftime(day, \"<STR_LIT>\" )<EOL>if not stamp in foldersByDay:<EOL><INDENT>foldersByDay[stamp]=[]<EOL>emptyDays+=<NUM_LIT:1><EOL><DEDENT><DEDENT>percActive=nActiveDays/nDays*<NUM_LIT:100><EOL>print(\"<STR_LIT>\"%(nActiveDays,nDays,percActive))<EOL>return foldersByDay<EOL>", "docstring": "returns a dict of active folders with days as keys.", "id": "f11339:m2"}
{"signature": "def fileModifiedTimestamp(fname):", "body": "modifiedTime=os.path.getmtime(fname)<EOL>stamp=time.strftime('<STR_LIT>', time.localtime(modifiedTime))<EOL>return stamp<EOL>", "docstring": "return \"YYYY-MM-DD\" when the file was modified.", "id": "f11339:m0"}
{"signature": "def clampfit_rename(path,char):", "body": "assert len(char)==<NUM_LIT:1> and type(char)==str, \"<STR_LIT>\"<EOL>assert os.path.exists(path), \"<STR_LIT>\"<EOL>files = sorted(os.listdir(path))<EOL>files = [x for x in files if len(x)><NUM_LIT> and x[<NUM_LIT:4>]+x[<NUM_LIT:7>]+x[<NUM_LIT:10>]=='<STR_LIT>']<EOL>for fname in files:<EOL><INDENT>fname2 = list(fname)<EOL>fname2[<NUM_LIT:11>]=char<EOL>fname2=\"<STR_LIT>\".join(fname2)<EOL>if fname==fname2:<EOL><INDENT>print(fname, \"<STR_LIT>\", fname2)<EOL><DEDENT>else:<EOL><INDENT>print(fname, \"<STR_LIT>\", fname2)<EOL><INDENT>fname=os.path.join(path,fname)<EOL>fname2=os.path.join(path,fname2)<EOL>if not os.path.exists(fname2):<EOL><INDENT>os.rename(fname,fname2)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return<EOL>", "docstring": "Given ABFs and TIFs formatted long style, rename each of them to prefix their number with a different number.\n\nExample: 2017_10_11_0011.abf\nBecomes: 2017_10_11_?011.abf\nwhere ? can be any character.", "id": "f11340:m0"}
{"signature": "def comments(self,minutes=False):", "body": "if self.comments==<NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>self.log.debug(\"<STR_LIT>\")<EOL>for i,t in enumerate(self.abf.comment_times):<EOL><INDENT>if minutes:<EOL><INDENT>t/=<NUM_LIT><EOL><DEDENT>plt.axvline(t,color='<STR_LIT:r>',ls='<STR_LIT::>')<EOL>X1,X2,Y1,Y2=plt.axis()<EOL>Y2=Y2-abs(Y2-Y1)*<NUM_LIT><EOL>plt.text(t,Y2,self.abf.comment_tags[i],color='<STR_LIT:r>',rotation='<STR_LIT>',<EOL>ha='<STR_LIT:right>',va='<STR_LIT>',weight='<STR_LIT>',alpha=<NUM_LIT>,size=<NUM_LIT:8>,)<EOL><DEDENT>", "docstring": "Add comment lines/text to an existing plot. Defaults to seconds.\nCall after a plot has been made, and after margins have been set.", "id": "f11342:c0:m7"}
{"signature": "def figure_protocols(self):", "body": "self.log.debug(\"<STR_LIT>\")<EOL>self.figure()<EOL>for sweep in range(self.abf.sweeps):<EOL><INDENT>self.abf.setsweep(sweep)<EOL>plt.plot(self.abf.protoX,self.abf.protoY,color='<STR_LIT:r>')<EOL><DEDENT>self.marginX=<NUM_LIT:0><EOL>self.decorate(protocol=True)<EOL>", "docstring": "plot the protocol of all sweeps.", "id": "f11342:c0:m13"}
{"signature": "def __init__(self,abf):", "body": "self.log = logging.getLogger(\"<STR_LIT>\")<EOL>self.log.setLevel(swhlab.loglevel)<EOL>self.close(True) <EOL>if type(abf) is str:<EOL><INDENT>self.log.debug(\"<STR_LIT>\")<EOL>abf=ABF(abf)<EOL><DEDENT>self.abf=abf<EOL>self.figure_width=<NUM_LIT:10><EOL>self.figure_height=<NUM_LIT:5><EOL>self.figure_dpi=<NUM_LIT><EOL>self.subplot=False <EOL>self.gridAlpha=<NUM_LIT><EOL>self.title=os.path.basename(abf.filename)<EOL>self.traceColor='<STR_LIT:b>'<EOL>self.kwargs={\"<STR_LIT>\":<NUM_LIT>}<EOL>self.rainbow=True<EOL>self.colormap=\"<STR_LIT>\"<EOL>self.marginX,self.marginY=<NUM_LIT:0>,<NUM_LIT><EOL>self.log.debug(\"<STR_LIT>\")<EOL>", "docstring": "Load an ABF and get ready to plot stuff.", "id": "f11342:c0:m0"}
{"signature": "def figure_chronological(self):", "body": "self.log.debug(\"<STR_LIT>\")<EOL>self.figure()<EOL>for sweep in range(self.abf.sweeps):<EOL><INDENT>self.abf.setsweep(sweep)<EOL>self.setColorBySweep()<EOL>if self.abf.derivative:<EOL><INDENT>plt.plot(self.abf.sweepX,self.abf.sweepD,**self.kwargs)<EOL><DEDENT>else:<EOL><INDENT>plt.plot(self.abf.sweepX,self.abf.sweepY,**self.kwargs)<EOL><DEDENT><DEDENT>self.comments()<EOL>self.decorate()<EOL>", "docstring": "plot every sweep of an ABF file (with comments).", "id": "f11342:c0:m9"}
{"signature": "def convertImages(self):", "body": "<EOL>exts=['<STR_LIT>','<STR_LIT>']<EOL>for fname in [x for x in self.files1 if cm.ext(x) in exts]:<EOL><INDENT>ID=\"<STR_LIT>\"<EOL>if len(fname)><NUM_LIT:8> and fname[:<NUM_LIT:8>] in self.IDs:<EOL><INDENT>ID=fname[:<NUM_LIT:8>]<EOL><DEDENT>fname2=ID+\"<STR_LIT>\"+fname<EOL>if not fname2 in self.files2:<EOL><INDENT>self.log.info(\"<STR_LIT>\"%fname2)<EOL>shutil.copy(os.path.join(self.folder1,fname),os.path.join(self.folder2,fname2))<EOL><DEDENT>if not fname[:<NUM_LIT:8>]+\"<STR_LIT>\" in self.files1:<EOL><INDENT>self.log.error(\"<STR_LIT>\",fname)<EOL><DEDENT><DEDENT>exts=['<STR_LIT>','<STR_LIT>']<EOL>for fname in [x for x in self.files1 if cm.ext(x) in exts]:<EOL><INDENT>ID=\"<STR_LIT>\"<EOL>if len(fname)><NUM_LIT:8> and fname[:<NUM_LIT:8>] in self.IDs:<EOL><INDENT>ID=fname[:<NUM_LIT:8>]<EOL><DEDENT>fname2=ID+\"<STR_LIT>\"+fname+\"<STR_LIT>\"<EOL>if not fname2 in self.files2:<EOL><INDENT>self.log.info(\"<STR_LIT>\"%fname2)<EOL>imaging.TIF_to_jpg(os.path.join(self.folder1,fname),saveAs=os.path.join(self.folder2,fname2))<EOL><DEDENT>if not fname[:<NUM_LIT:8>]+\"<STR_LIT>\" in self.files1:<EOL><INDENT>self.log.error(\"<STR_LIT>\",fname)<EOL><DEDENT><DEDENT>", "docstring": "run this to turn all folder1 TIFs and JPGs into folder2 data.\nTIFs will be treated as micrographs and converted to JPG with enhanced\ncontrast. JPGs will simply be copied over.", "id": "f11343:c0:m2"}
{"signature": "def analyzeAll(self):", "body": "searchableData=str(self.files2)<EOL>self.log.debug(\"<STR_LIT>\",len(self.IDs))<EOL>for ID in self.IDs:<EOL><INDENT>if not ID+\"<STR_LIT:_>\" in searchableData:<EOL><INDENT>self.log.debug(\"<STR_LIT>\",ID)<EOL>try:<EOL><INDENT>self.analyzeABF(ID)<EOL><DEDENT>except:<EOL><INDENT>print(\"<STR_LIT>\"*<NUM_LIT:100>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.log.debug(\"<STR_LIT>\",ID)<EOL><DEDENT><DEDENT>self.log.debug(\"<STR_LIT>\",len(self.IDs))<EOL>", "docstring": "analyze every unanalyzed ABF in the folder.", "id": "f11343:c0:m3"}
{"signature": "def analyzeSingle(abfFname):", "body": "assert os.path.exists(abfFname) and abfFname.endswith(\"<STR_LIT>\")<EOL>ABFfolder,ABFfname=os.path.split(abfFname)<EOL>abfID=os.path.splitext(ABFfname)[<NUM_LIT:0>]<EOL>IN=INDEX(ABFfolder)<EOL>IN.analyzeABF(abfID)<EOL>IN.scan()<EOL>IN.html_single_basic([abfID],overwrite=True)<EOL>IN.html_single_plot([abfID],overwrite=True)<EOL>IN.scan()<EOL>IN.html_index()<EOL>return<EOL>", "docstring": "Reanalyze data for a single ABF. Also remakes child and parent html.", "id": "f11343:m1"}
{"signature": "def html_single_basic(self,abfID,launch=False,overwrite=False):", "body": "if type(abfID) is str:<EOL><INDENT>abfID=[abfID]<EOL><DEDENT>for thisABFid in cm.abfSort(abfID):<EOL><INDENT>parentID=cm.parent(self.groups,thisABFid)<EOL>saveAs=os.path.abspath(\"<STR_LIT>\"%(self.folder2,parentID))<EOL>if overwrite is False and os.path.basename(saveAs) in self.files2:<EOL><INDENT>continue<EOL><DEDENT>filesByType=cm.filesByType(self.groupFiles[parentID])<EOL>html=\"<STR_LIT>\"<EOL>html+='<STR_LIT>'<EOL>html+='<STR_LIT>'%parentID<EOL>html+='<STR_LIT>'%os.path.abspath(self.folder1+\"<STR_LIT:/>\"+parentID+\"<STR_LIT>\")<EOL>html+='<STR_LIT>'<EOL>catOrder=[\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\"]<EOL>categories=cm.list_order_by(filesByType.keys(),catOrder)<EOL>for category in [x for x in categories if len(filesByType[x])]:<EOL><INDENT>if category=='<STR_LIT>':<EOL><INDENT>html+=\"<STR_LIT>\"<EOL><DEDENT>elif category=='<STR_LIT>':<EOL><INDENT>html+=\"<STR_LIT>\"<EOL><DEDENT>elif category=='<STR_LIT>':<EOL><INDENT>html+=\"<STR_LIT>\"<EOL><DEDENT>elif category=='<STR_LIT>':<EOL><INDENT>html+=\"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>html+=\"<STR_LIT>\"<EOL><DEDENT>for fname in filesByType[category]:<EOL><INDENT>html+=self.htmlFor(fname)<EOL><DEDENT>html+='<STR_LIT>'*<NUM_LIT:3><EOL><DEDENT>print(\"<STR_LIT>\",saveAs,'<STR_LIT>')<EOL>style.save(html,saveAs,launch=launch)<EOL><DEDENT>", "docstring": "generate a generic flat file html for an ABF parent. You could give\nthis a single ABF ID, its parent ID, or a list of ABF IDs.\nIf a child ABF is given, the parent will automatically be used.", "id": "f11343:c0:m6"}
{"signature": "def makePics(self):", "body": "rescanNeeded=False<EOL>for fname in smartSort(self.fnames):<EOL><INDENT>if fname in self.fnames2:<EOL><INDENT>continue<EOL><DEDENT>ext=os.path.splitext(fname)[<NUM_LIT:1>].lower()<EOL>if ext in [\"<STR_LIT>\",\"<STR_LIT>\"]:<EOL><INDENT>if not fname in self.abfFolder2:<EOL><INDENT>self.log.debug(\"<STR_LIT>\",fname)<EOL>shutil.copy(os.path.join(self.abfFolder,fname),os.path.join(self.abfFolder2,fname))<EOL>rescanNeeded=True<EOL><DEDENT><DEDENT>if ext in [\"<STR_LIT>\",\"<STR_LIT>\"]:<EOL><INDENT>if not fname+\"<STR_LIT>\" in self.fnames2:<EOL><INDENT>self.log.debug(\"<STR_LIT>\",fname)<EOL>swhlab.swh_image.TIF_to_jpg(os.path.join(self.abfFolder,fname),saveAs=os.path.join(self.abfFolder2,fname+\"<STR_LIT>\"))<EOL>rescanNeeded=True<EOL><DEDENT><DEDENT><DEDENT>if rescanNeeded:<EOL><INDENT>self.log.debug(\"<STR_LIT>\")<EOL>self.log.debug(\"<STR_LIT>\")<EOL>self.folderScan()<EOL><DEDENT>", "docstring": "convert every .image we find to a ./swhlab/ image", "id": "f11344:c0:m7"}
{"signature": "def html_single_basic(self,ID):", "body": "if not ID in self.cells:<EOL><INDENT>self.log.error(\"<STR_LIT>\",ID)<EOL>return<EOL><DEDENT>htmlFname=os.path.abspath(self.abfFolder2+\"<STR_LIT:/>\"+ID+\"<STR_LIT>\")<EOL>html=\"<STR_LIT>\"%ID<EOL>npics=<NUM_LIT:0><EOL>for childID in [os.path.splitext(x)[<NUM_LIT:0>] for x in self.fnamesByCell[ID]]:<EOL><INDENT>pics=[x for x in self.fnames2 if x.startswith(childID) and os.path.splitext(x)[<NUM_LIT:1>].lower() in [\"<STR_LIT>\",\"<STR_LIT>\"]]<EOL>html+=\"<STR_LIT>\"%(os.path.abspath(self.abfFolder+'<STR_LIT:/>'+childID+\"<STR_LIT>\"))<EOL>for i,pic in enumerate(pics):<EOL><INDENT>html+='<STR_LIT>'%(pic,pic)<EOL>npics+=<NUM_LIT:1><EOL><DEDENT>html+=\"<STR_LIT>\"<EOL><DEDENT>style.save(html,htmlFname)<EOL>self.log.info(\"<STR_LIT>\",htmlFname,npics)<EOL>", "docstring": "generate ./swhlab/xxyxxzzz.html for a single given abf.\nInput can be an ABF file path of ABF ID.", "id": "f11344:c0:m4"}
{"signature": "def html_single_fixed(self,ID):", "body": "return<EOL>", "docstring": "Single page generator designed for easy gruop comparisons.", "id": "f11344:c0:m5"}
{"signature": "def filesByExtension(fnames):", "body": "byExt={\"<STR_LIT>\":[],\"<STR_LIT>\":[],\"<STR_LIT>\":[]} <EOL>for fname in fnames:<EOL><INDENT>ext = os.path.splitext(fname)[<NUM_LIT:1>].replace(\"<STR_LIT:.>\",'<STR_LIT>').lower()<EOL>if not ext in byExt.keys():<EOL><INDENT>byExt[ext]=[]<EOL><DEDENT>byExt[ext]=byExt[ext]+[fname]<EOL><DEDENT>return byExt<EOL>", "docstring": "given a list of files, return a dict organized by extension.", "id": "f11344:m0"}
{"signature": "def html_index_splash(self):", "body": "html=\"\"\"<STR_LIT>\"\"\"%version.__version__<EOL>for parent in smartSort(self.fnamesByCell.keys()):<EOL><INDENT>html+='<STR_LIT>'%(parent,parent)<EOL>for child in self.fnamesByCell[parent]:<EOL><INDENT>fullpath=os.path.join(self.abfFolder,child)<EOL>protocol = swhlab.swh_abf.abfProtocol(fullpath)<EOL>html+='<STR_LIT>'%(fullpath,protocol)<EOL><DEDENT><DEDENT>style.save(html,self.abfFolder2+\"<STR_LIT>\")<EOL>return<EOL>", "docstring": "generate landing page.", "id": "f11344:c0:m3"}
{"signature": "def html_index(self,launch=False,showChildren=False):", "body": "self.makePics() <EOL>html='<STR_LIT>'%os.path.basename(self.abfFolder)<EOL>for ID in smartSort(self.fnamesByCell.keys()):<EOL><INDENT>link='<STR_LIT>'<EOL>if ID+\"<STR_LIT>\" in self.fnames2:<EOL><INDENT>link='<STR_LIT>'%ID<EOL><DEDENT>html+=('<STR_LIT>'%(link,ID)) <EOL>if showChildren:<EOL><INDENT>for fname in self.fnamesByCell[ID]:<EOL><INDENT>thisID=os.path.splitext(fname)[<NUM_LIT:0>]<EOL>files2=[x for x in self.fnames2 if x.startswith(thisID) and not x.endswith(\"<STR_LIT>\")]<EOL>html+='<STR_LIT>'%thisID <EOL>if len(files2):<EOL><INDENT>html+='<STR_LIT>'%len(files2) <EOL><DEDENT>html+='<STR_LIT>'<EOL><DEDENT>html+=\"<STR_LIT>\"<EOL><DEDENT><DEDENT>style.save(html,self.abfFolder2+\"<STR_LIT>\")<EOL>self.html_index_splash() <EOL>style.frames(self.abfFolder2+\"<STR_LIT>\",launch=launch)<EOL>", "docstring": "generate list of cells with links. keep this simple.\nautomatically generates splash page and regnerates frames.", "id": "f11344:c0:m2"}
{"signature": "def filesByCell(fnames,cells):", "body": "byCell={}<EOL>fnames=smartSort(fnames)<EOL>days = list(set([elem[:<NUM_LIT:5>] for elem in fnames if elem.endswith(\"<STR_LIT>\")])) <EOL>for day in smartSort(days):<EOL><INDENT>parent=None<EOL>for i,fname in enumerate([elem for elem in fnames if elem.startswith(day) and elem.endswith(\"<STR_LIT>\")]):<EOL><INDENT>ID=os.path.splitext(fname)[<NUM_LIT:0>]<EOL>if len([x for x in fnames if x.startswith(ID)])-<NUM_LIT:1>:<EOL><INDENT>parent=ID<EOL><DEDENT>if not parent in byCell:<EOL><INDENT>byCell[parent]=[]<EOL><DEDENT>byCell[parent]=byCell[parent]+[fname]<EOL><DEDENT><DEDENT>return byCell<EOL>", "docstring": "given files and cells, return a dict of files grouped by cell.", "id": "f11344:m2"}
{"signature": "def save(html,fname=None,launch=False):", "body": "html=html_top+html+html_bot<EOL>html=html.replace(\"<STR_LIT>\",swhlab.common.datetimeToString())<EOL>if fname is None:<EOL><INDENT>fname = tempfile.gettempdir()+\"<STR_LIT>\"<EOL>launch=True<EOL><DEDENT>fname=os.path.abspath(fname)<EOL>with open(fname,'<STR_LIT:w>') as f:<EOL><INDENT>f.write(html)<EOL><DEDENT>global stylesheetSaved<EOL>stylesheetPath=os.path.join(os.path.dirname(fname),\"<STR_LIT>\")<EOL>if not os.path.exists(stylesheetPath) or stylesheetSaved is False:<EOL><INDENT>with open(stylesheetPath,'<STR_LIT:w>') as f:<EOL><INDENT>f.write(stylesheet)<EOL>stylesheetSaved=True<EOL><DEDENT><DEDENT>if launch:<EOL><INDENT>webbrowser.open(fname)<EOL><DEDENT>", "docstring": "wrap HTML in a top and bottom (with css) and save to disk.", "id": "f11345:m1"}
{"signature": "def kernel_gaussian(size=<NUM_LIT:100>, sigma=None, forwardOnly=False):", "body": "if sigma is None:<EOL><INDENT>sigma=size/<NUM_LIT:10><EOL><DEDENT>size=int(size)<EOL>points=np.exp(-np.power(np.arange(size)-size/<NUM_LIT:2>,<NUM_LIT:2>)/(<NUM_LIT:2>*np.power(sigma,<NUM_LIT:2>)))<EOL>if forwardOnly:<EOL><INDENT>points[:int(len(points)/<NUM_LIT:2>)]=<NUM_LIT:0><EOL><DEDENT>return points/sum(points)<EOL>", "docstring": "return a 1d gassuan array of a given size and sigma.\nIf sigma isn't given, it will be 1/10 of the size, which is usually good.\nNote that this is fully numpy, and doesn't use scipy.", "id": "f11348:m1"}
{"signature": "def timeit(timer=None):", "body": "if timer is None:<EOL><INDENT>return time.time()<EOL><DEDENT>else:<EOL><INDENT>took=time.time()-timer<EOL>if took<<NUM_LIT:1>:<EOL><INDENT>return \"<STR_LIT>\"%(took*<NUM_LIT>)<EOL><DEDENT>elif took<<NUM_LIT>:<EOL><INDENT>return \"<STR_LIT>\"%(took)<EOL><DEDENT>else:<EOL><INDENT>return \"<STR_LIT>\"%(took/<NUM_LIT>)<EOL><DEDENT><DEDENT>", "docstring": "simple timer. returns a time object, or a string.", "id": "f11348:m8"}
{"signature": "def abfSort(IDs):", "body": "IDs=list(IDs)<EOL>monO=[]<EOL>monN=[]<EOL>monD=[]<EOL>good=[]<EOL>for ID in IDs:<EOL><INDENT>if ID is None:<EOL><INDENT>continue<EOL><DEDENT>if '<STR_LIT:o>' in ID:<EOL><INDENT>monO.append(ID)<EOL><DEDENT>elif '<STR_LIT:n>' in ID:<EOL><INDENT>monN.append(ID)<EOL><DEDENT>elif '<STR_LIT:d>' in ID:<EOL><INDENT>monD.append(ID)<EOL><DEDENT>else:<EOL><INDENT>good.append(ID)<EOL><DEDENT><DEDENT>return sorted(good)+sorted(monO)+sorted(monN)+sorted(monD)<EOL>", "docstring": "given a list of goofy ABF names, return it sorted intelligently.\nThis places things like 16o01001 after 16901001.", "id": "f11348:m17"}
{"signature": "def abfFname_Load():", "body": "fname=userFolder()+\"<STR_LIT>\"<EOL>if os.path.exists(fname):<EOL><INDENT>abfFname=open(fname).read().strip()<EOL>if os.path.exists(abfFname) or abfFname.endswith(\"<STR_LIT>\"):<EOL><INDENT>return abfFname<EOL><DEDENT><DEDENT>return os.path.abspath(os.sep)<EOL>", "docstring": "return the path of the last loaded ABF.", "id": "f11348:m23"}
{"signature": "def convolve(signal,kernel):", "body": "pad=np.ones(len(kernel)/<NUM_LIT:2>)<EOL>signal=np.concatenate((pad*signal[<NUM_LIT:0>],signal,pad*signal[-<NUM_LIT:1>]))<EOL>signal=np.convolve(signal,kernel,mode='<STR_LIT>')<EOL>signal=signal[len(pad):-len(pad)]<EOL>return signal<EOL>", "docstring": "This applies a kernel to a signal through convolution and returns the result.\n\nSome magic is done at the edges so the result doesn't apprach zero:\n    1. extend the signal's edges with len(kernel)/2 duplicated values\n    2. perform the convolution ('same' mode)\n    3. slice-off the ends we added\n    4. return the same number of points as the original", "id": "f11348:m3"}
{"signature": "def list_move_to_back(l,value='<STR_LIT>'):", "body": "l=list(l)<EOL>if value in l:<EOL><INDENT>l.remove(value)<EOL>l.append(value)<EOL><DEDENT>return l<EOL>", "docstring": "if the value is in the list, move it to the back and return it.", "id": "f11348:m13"}
{"signature": "def ext(fname):", "body": "if \"<STR_LIT:.>\" in fname:<EOL><INDENT>return os.path.splitext(fname)[<NUM_LIT:1>]<EOL><DEDENT>return fname<EOL>", "docstring": "return the extension of a filename.", "id": "f11348:m16"}
{"signature": "def pause():", "body": "input(\"<STR_LIT>\")<EOL>", "docstring": "halt everything until user input. Use this sparingly.", "id": "f11348:m5"}
{"signature": "def list_to_lowercase(l):", "body": "return [x.lower() for x in l if type(x) is str]<EOL>", "docstring": "given a list of strings, make them all lowercase.", "id": "f11348:m15"}
{"signature": "def abfFname_Save(abfFname):", "body": "fname=userFolder()+\"<STR_LIT>\"<EOL>with open(fname,'<STR_LIT:w>') as f:<EOL><INDENT>f.write(os.path.abspath(abfFname))<EOL><DEDENT>return<EOL>", "docstring": "return the path of the last loaded ABF.", "id": "f11348:m24"}
{"signature": "def kernel_gaussian(self, sizeMS, sigmaMS=None, forwardOnly=False):", "body": "sigmaMS=sizeMS/<NUM_LIT:10> if sigmaMS is None else sigmaMS<EOL>size,sigma=sizeMS*self.pointsPerMs,sigmaMS*self.pointsPerMs<EOL>self.kernel=swhlab.common.kernel_gaussian(size,sigma,forwardOnly)<EOL>return self.kernel<EOL>", "docstring": "create kernel based on this ABF info.", "id": "f11350:c0:m12"}
{"signature": "def get_protocol(self,sweep):", "body": "self.setsweep(sweep)<EOL>return list(self.protoX),list(self.protoY)<EOL>", "docstring": "given a sweep, return the protocol as [Xs,Ys].\nThis is good for plotting/recreating the protocol trace.\nThere may be duplicate numbers.", "id": "f11350:c0:m6"}
{"signature": "def generate_protocol(self):", "body": "<EOL>self.offsetX = int(self.sweepSize/<NUM_LIT:64>)<EOL>if not len(self.header['<STR_LIT>']):<EOL><INDENT>self.log.debug(\"<STR_LIT>\")<EOL>self.protoX,self.protoY=[<NUM_LIT:0>,self.sweepX[-<NUM_LIT:1>]],[self.holding,self.holding]<EOL>self.protoSeqX,self.protoSeqY=[<NUM_LIT:0>],[self.holding]<EOL>return<EOL><DEDENT>proto=self.header['<STR_LIT>'][self.channel]<EOL>self.protoX,self.protoY=[] ,[]<EOL>self.protoX.append(<NUM_LIT:0>)<EOL>self.protoY.append(self.holding) <EOL>for step in proto:<EOL><INDENT>dX = proto[step]['<STR_LIT>']<EOL>Y = proto[step]['<STR_LIT>']+proto[step]['<STR_LIT>']*self.sweep<EOL>self.protoX.append(self.protoX[-<NUM_LIT:1>])<EOL>self.protoY.append(Y)<EOL>self.protoX.append(self.protoX[-<NUM_LIT:1>]+dX)<EOL>self.protoY.append(Y)<EOL><DEDENT>finalVal=self.holding <EOL>if self.header['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>']:<EOL><INDENT>finalVal=self.protoY[-<NUM_LIT:1>]<EOL><DEDENT>self.protoX.append(self.protoX[-<NUM_LIT:1>])<EOL>self.protoY.append(finalVal)<EOL>self.protoX.append(self.sweepSize)<EOL>self.protoY.append(finalVal)<EOL>for i in range(<NUM_LIT:1>,len(self.protoX)-<NUM_LIT:1>): <EOL><INDENT>self.protoX[i]=self.protoX[i]+self.offsetX<EOL><DEDENT>self.protoSeqY=[self.protoY[<NUM_LIT:0>]]<EOL>self.protoSeqX=[self.protoX[<NUM_LIT:0>]]<EOL>for i in range(<NUM_LIT:1>,len(self.protoY)):<EOL><INDENT>if not self.protoY[i]==self.protoY[i-<NUM_LIT:1>]:<EOL><INDENT>self.protoSeqY.append(self.protoY[i])<EOL>self.protoSeqX.append(self.protoX[i])<EOL><DEDENT><DEDENT>if self.protoY[<NUM_LIT:0>]!=self.protoY[<NUM_LIT:1>]:<EOL><INDENT>self.protoY.insert(<NUM_LIT:1>,self.protoY[<NUM_LIT:0>])<EOL>self.protoX.insert(<NUM_LIT:1>,self.protoX[<NUM_LIT:1>])<EOL>self.protoY.insert(<NUM_LIT:1>,self.protoY[<NUM_LIT:0>])<EOL>self.protoX.insert(<NUM_LIT:1>,self.protoX[<NUM_LIT:0>]+self.offsetX/<NUM_LIT:2>)<EOL><DEDENT>self.protoSeqY.append(finalVal)<EOL>self.protoSeqX.append(self.sweepSize)<EOL>self.protoX=np.array(self.protoX)/self.pointsPerSec<EOL>self.protoY=np.array(self.protoY)<EOL>", "docstring": "Recreate the command stimulus (protocol) for the current sweep.\nIt's not stored point by point (that's a waste of time and memory!)\nInstead it's stored as a few (x,y) points which can be easily graphed.\n\nTODO: THIS\nfor segment in abf.ABFreader.read_protocol():\n    for analogsignal in segment.analogsignals:\n        print(analogsignal)\n        plt.plot(analogsignal)\n        plt.show()\n        plt.close('all')", "id": "f11350:c0:m5"}
{"signature": "def abfIDfromFname(fname):", "body": "fname=os.path.abspath(fname)<EOL>basename=os.path.basename(fname)<EOL>return os.path.splitext(basename)[<NUM_LIT:0>]<EOL>", "docstring": "given a filename, return the ABFs ID string.", "id": "f11350:m0"}
{"signature": "def sweepYfiltered(self):", "body": "assert self.kernel is not None<EOL>return swhlab.common.convolve(self.sweepY,self.kernel)<EOL>", "docstring": "Get the filtered sweepY of the current sweep.\nOnly works if self.kernel has been generated.", "id": "f11350:c0:m13"}
{"signature": "def averageSweep(self,sweepFirst=<NUM_LIT:0>,sweepLast=None):", "body": "if sweepLast is None:<EOL><INDENT>sweepLast=self.sweeps-<NUM_LIT:1><EOL><DEDENT>nSweeps=sweepLast-sweepFirst+<NUM_LIT:1><EOL>runningSum=np.zeros(len(self.sweepY))<EOL>self.log.debug(\"<STR_LIT>\",sweepFirst,sweepLast)<EOL>for sweep in np.arange(nSweeps)+sweepFirst:<EOL><INDENT>self.setsweep(sweep)<EOL>runningSum+=self.sweepY.flatten()<EOL><DEDENT>average=runningSum/nSweeps<EOL>return average<EOL>", "docstring": "Return a sweep which is the average of multiple sweeps.\nFor now, standard deviation is lost.", "id": "f11350:c0:m11"}
{"signature": "def average(self,t1=<NUM_LIT:0>,t2=None,setsweep=False):", "body": "if setsweep:<EOL><INDENT>self.setsweep(setsweep)<EOL><DEDENT>if t2 is None or t2>self.sweepLength:<EOL><INDENT>t2=self.sweepLength<EOL>self.log.debug(\"<STR_LIT>\",t2)<EOL><DEDENT>t1=max(t1,<NUM_LIT:0>)<EOL>if t1>t2:<EOL><INDENT>self.log.error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>I1,I2=int(t1*self.pointsPerSec),int(t2*self.pointsPerSec)<EOL>if I1==I2:<EOL><INDENT>return np.nan<EOL><DEDENT>return np.average(self.sweepY[I1:I2])<EOL>", "docstring": "return the average of part of the current sweep.", "id": "f11350:c0:m10"}
{"signature": "def get_protocol_sequence(self,sweep):", "body": "self.setsweep(sweep)<EOL>return list(self.protoSeqX),list(self.protoSeqY)<EOL>", "docstring": "given a sweep, return the protocol as condensed sequence.\nThis is better for comparing similarities and determining steps.\nThere should be no duplicate numbers.", "id": "f11350:c0:m7"}
{"signature": "def abfProtocol(fname):", "body": "f=open(fname,'<STR_LIT:rb>')<EOL>raw=f.read(<NUM_LIT:30>*<NUM_LIT:1000>) <EOL>f.close()<EOL>raw=raw.decode(\"<STR_LIT:utf-8>\",\"<STR_LIT:ignore>\")<EOL>raw=raw.split(\"<STR_LIT>\")[<NUM_LIT:1>].split(\"<STR_LIT>\")[<NUM_LIT:0>]<EOL>protocol = os.path.basename(raw) <EOL>protocolID = protocol.split(\"<STR_LIT:U+0020>\")[<NUM_LIT:0>] <EOL>return protocolID<EOL>", "docstring": "Determine the protocol used to record an ABF file", "id": "f11350:m1"}
{"signature": "def sweepYsmartbase(self):", "body": "return self.sweepY-self.sweepYfiltered()<EOL>", "docstring": "return the sweep with sweepYfiltered subtracted from it.", "id": "f11350:c0:m14"}
{"signature": "def headerHTML(header,fname):", "body": "html=\"<STR_LIT>\"<EOL>html+=\"<STR_LIT>\"%(fname)<EOL>html+=pprint.pformat(header, indent=<NUM_LIT:1>)<EOL>html=html.replace(\"<STR_LIT:\\n>\",'<STR_LIT>').replace(\"<STR_LIT:U+0020>\",\"<STR_LIT>\")<EOL>html=html.replace(r\"<STR_LIT:\\x00>\",\"<STR_LIT>\")<EOL>html+=\"<STR_LIT>\"<EOL>print(\"<STR_LIT>\",fname)<EOL>f=open(fname,'<STR_LIT:w>')<EOL>f.write(html)<EOL>f.close()<EOL>webbrowser.open(fname)<EOL>", "docstring": "given the bytestring ABF header, make and launch HTML.", "id": "f11350:m2"}
{"signature": "def setsweeps(self):", "body": "for sweep in range(self.sweeps):<EOL><INDENT>self.setsweep(sweep)<EOL>yield self.sweep<EOL><DEDENT>", "docstring": "iterate over every sweep", "id": "f11350:c0:m3"}
{"signature": "def inspect(self):", "body": "<INDENT>webinspect.blacklist=[] <EOL>webinspect.launch(self.ABFblock.segments[<NUM_LIT:0>].eventarrays[<NUM_LIT:0>],'<STR_LIT>')<EOL>webinspect.blacklist=['<STR_LIT>'] <EOL>webinspect.launch(self.ABFblock.segments[<NUM_LIT:0>].analogsignals[<NUM_LIT:0>],'<STR_LIT>')<EOL>webinspect.blacklist=['<STR_LIT>','<STR_LIT>'] <EOL>webinspect.launch(self.ABFblock.segments[<NUM_LIT:0>],'<STR_LIT>')<EOL>webinspect.blacklist=[] <EOL>webinspect.launch(self.ABFblock,'<STR_LIT>')<EOL>webinspect.blacklist=[] <EOL>webinspect.launch(self.ABFreader,'<STR_LIT>')<EOL><DEDENT>headerFile=r\"<STR_LIT>\"<EOL>headerHTML(self.header,headerFile)<EOL>", "docstring": "Generate HTML containing information about NeoIO objects.\nThis is useful when trying to figure out how to extract data from ABFs.", "id": "f11350:c0:m18"}
{"signature": "def proto_0201(theABF):", "body": "abf=ABF(theABF)<EOL>abf.log.info(\"<STR_LIT>\")<EOL>plot=ABFplot(abf)<EOL>plot.figure_height,plot.figure_width=SQUARESIZE/<NUM_LIT:2>,SQUARESIZE/<NUM_LIT:2><EOL>plot.figure_sweeps()<EOL>plt.tight_layout()<EOL>frameAndSave(abf,\"<STR_LIT>\")<EOL>plt.close('<STR_LIT:all>')<EOL>", "docstring": "protocol: membrane test.", "id": "f11351:m7"}
{"signature": "def proto_0303(theABF):", "body": "abf=ABF(theABF)<EOL>abf.log.info(\"<STR_LIT>\")<EOL>proto_avgRange(theABF,<NUM_LIT>,<NUM_LIT>)<EOL>plt.close('<STR_LIT:all>')<EOL>plt.figure(figsize=(<NUM_LIT:8>,<NUM_LIT:8>))<EOL>for sweep in abf.setsweeps():<EOL><INDENT>color='<STR_LIT:b>'<EOL>if sweep in np.array(abf.comment_sweeps,dtype=int):<EOL><INDENT>color='<STR_LIT:r>'<EOL><DEDENT>plt.plot(abf.sweepX2,abf.sweepY+<NUM_LIT:100>*sweep,color=color,alpha=<NUM_LIT>)<EOL><DEDENT>plt.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>plt.tight_layout()<EOL>frameAndSave(abf,\"<STR_LIT>\")<EOL>plt.close('<STR_LIT:all>')<EOL>ap=AP(abf)<EOL>ap.detect_time1=<NUM_LIT><EOL>ap.detect_time2=<NUM_LIT><EOL>ap.detect()<EOL>apCount=[]<EOL>apSweepTimes=[]<EOL>for sweepNumber,times in enumerate(ap.get_bySweep(\"<STR_LIT>\")):<EOL><INDENT>apCount.append(len(times))<EOL>if len(times):<EOL><INDENT>apSweepTimes.append(times[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>apSweepTimes.append(<NUM_LIT:0>)<EOL><DEDENT><DEDENT>plt.figure(figsize=(<NUM_LIT:8>,<NUM_LIT:8>))<EOL>ax1=plt.subplot(<NUM_LIT>)<EOL>plt.grid(alpha=<NUM_LIT>,ls='<STR_LIT>')<EOL>plt.plot(np.arange(len(apCount))*abf.sweepLength/<NUM_LIT>,apCount,'<STR_LIT>',ms=<NUM_LIT:15>)<EOL>comment_lines(abf)<EOL>plt.ylabel(\"<STR_LIT>\")<EOL>plt.subplot(<NUM_LIT>,sharex=ax1)<EOL>plt.grid(alpha=<NUM_LIT>,ls='<STR_LIT>')<EOL>plt.plot(np.arange(len(apCount))*abf.sweepLength/<NUM_LIT>,apSweepTimes,'<STR_LIT>',ms=<NUM_LIT:15>)<EOL>comment_lines(abf)<EOL>plt.ylabel(\"<STR_LIT>\")<EOL>plt.xlabel(\"<STR_LIT>\")<EOL>plt.tight_layout()<EOL>frameAndSave(abf,\"<STR_LIT>\")<EOL>plt.close('<STR_LIT:all>')<EOL>", "docstring": "protocol: repeated IC ramps.", "id": "f11351:m13"}
{"signature": "def proto_0222(theABF):", "body": "abf=ABF(theABF)<EOL>abf.log.info(\"<STR_LIT>\")<EOL>plot=ABFplot(abf)<EOL>plot.figure_height,plot.figure_width=SQUARESIZE/<NUM_LIT:2>,SQUARESIZE/<NUM_LIT:2><EOL>plot.figure_sweeps()<EOL>plt.tight_layout()<EOL>frameAndSave(abf,\"<STR_LIT>\")<EOL>plt.close('<STR_LIT:all>')<EOL>", "docstring": "protocol: VC sine sweep.", "id": "f11351:m11"}
{"signature": "def proto_avgRange(theABF,m1=None,m2=None):", "body": "abf=ABF(theABF)<EOL>abf.log.info(\"<STR_LIT>\")<EOL>if m1 is None:<EOL><INDENT>m1=abf.sweepLength<EOL><DEDENT>if m2 is None:<EOL><INDENT>m2=abf.sweepLength<EOL><DEDENT>I1=int(abf.pointsPerSec*m1)<EOL>I2=int(abf.pointsPerSec*m2)<EOL>Ts=np.arange(abf.sweeps)*abf.sweepInterval<EOL>Yav=np.empty(abf.sweeps)*np.nan <EOL>Ysd=np.empty(abf.sweeps)*np.nan <EOL>for sweep in abf.setsweeps():<EOL><INDENT>Yav[sweep]=np.average(abf.sweepY[I1:I2])<EOL>Ysd[sweep]=np.std(abf.sweepY[I1:I2])<EOL><DEDENT>plot=ABFplot(abf)<EOL>plt.figure(figsize=(SQUARESIZE*<NUM_LIT:2>,SQUARESIZE/<NUM_LIT:2>))<EOL>plt.subplot(<NUM_LIT>)<EOL>plot.title=\"<STR_LIT>\"<EOL>plot.figure_sweep(<NUM_LIT:0>)<EOL>plt.title(\"<STR_LIT>\")<EOL>plt.axvspan(m1,m2,color='<STR_LIT:r>',ec=None,alpha=<NUM_LIT>)<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.grid(alpha=<NUM_LIT>)<EOL>for i,t in enumerate(abf.comment_times):<EOL><INDENT>plt.axvline(t/<NUM_LIT>,color='<STR_LIT:r>',alpha=<NUM_LIT>,lw=<NUM_LIT:2>,ls='<STR_LIT>')<EOL><DEDENT>plt.plot(Ts/<NUM_LIT>,Yav,'<STR_LIT:.>',alpha=<NUM_LIT>)<EOL>plt.title(\"<STR_LIT>\"%(\"<STR_LIT:U+002CU+0020>\".join(abf.comment_tags)))<EOL>plt.ylabel(abf.units2)<EOL>plt.xlabel(\"<STR_LIT>\")<EOL>plt.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.grid(alpha=<NUM_LIT>)<EOL>for i,t in enumerate(abf.comment_times):<EOL><INDENT>plt.axvline(t/<NUM_LIT>,color='<STR_LIT:r>',alpha=<NUM_LIT>,lw=<NUM_LIT:2>,ls='<STR_LIT>')<EOL><DEDENT>plt.plot(Ts/<NUM_LIT>,Ysd,'<STR_LIT:.>',alpha=<NUM_LIT>,color='<STR_LIT:g>',ms=<NUM_LIT:15>,mew=<NUM_LIT:0>)<EOL>plt.title(\"<STR_LIT>\"%(\"<STR_LIT:U+002CU+0020>\".join(abf.comment_tags)))<EOL>plt.ylabel(abf.units2)<EOL>plt.xlabel(\"<STR_LIT>\")<EOL>plt.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>plt.axis([None,None,<NUM_LIT:0>,np.percentile(Ysd,<NUM_LIT>)*<NUM_LIT>])<EOL>plt.tight_layout()<EOL>frameAndSave(abf,\"<STR_LIT>\",\"<STR_LIT>\")<EOL>plt.close('<STR_LIT:all>')<EOL>", "docstring": "experiment: generic VC time course experiment.", "id": "f11351:m27"}
{"signature": "def detect(self):", "body": "self.log.info(\"<STR_LIT>\")<EOL>t1=cm.timeit()<EOL>for sweep in range(self.abf.sweeps):<EOL><INDENT>self.detectSweep(sweep)<EOL><DEDENT>self.log.info(\"<STR_LIT>\",<EOL>self.abf.sweeps,len(self.APs),cm.timeit(t1))<EOL>", "docstring": "runs AP detection on every sweep.", "id": "f11352:c0:m3"}
{"signature": "def get_bySweep(self,feature=\"<STR_LIT>\"):", "body": "self.ensureDetection()<EOL>bySweepTimes=[[]]*self.abf.sweeps<EOL>for sweep in range(self.abf.sweeps):<EOL><INDENT>sweepTimes=[]<EOL>for ap in self.APs:<EOL><INDENT>if ap[\"<STR_LIT>\"]==sweep:<EOL><INDENT>sweepTimes.append(ap[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>bySweepTimes[sweep]=sweepTimes<EOL><DEDENT>bySweepFreqs=[[]]*self.abf.sweeps<EOL>for i,times in enumerate(bySweepTimes):<EOL><INDENT>if len(times)<<NUM_LIT:2>:<EOL><INDENT>continue<EOL><DEDENT>diffs=np.array(times[<NUM_LIT:1>:])-np.array(times[:-<NUM_LIT:1>])<EOL>bySweepFreqs[i]=np.array(<NUM_LIT:1>/diffs).tolist()<EOL><DEDENT>if feature == \"<STR_LIT>\":<EOL><INDENT>return bySweepFreqs<EOL><DEDENT>elif feature == \"<STR_LIT>\":<EOL><INDENT>result=np.zeros(self.abf.sweeps) <EOL>for i,freqs in enumerate(bySweepFreqs):<EOL><INDENT>if len(freqs):<EOL><INDENT>result[i]=freqs[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return result<EOL><DEDENT>elif feature == \"<STR_LIT>\":<EOL><INDENT>return bySweepTimes<EOL><DEDENT>elif feature == \"<STR_LIT:count>\":<EOL><INDENT>result=np.zeros(self.abf.sweeps) <EOL>for i,times in enumerate(bySweepTimes):<EOL><INDENT>result[i]=len(bySweepTimes[i])<EOL><DEDENT>return result<EOL><DEDENT>elif feature == \"<STR_LIT>\":<EOL><INDENT>result=np.zeros(self.abf.sweeps) <EOL>for i,freqs in enumerate(bySweepFreqs):<EOL><INDENT>if len(freqs):<EOL><INDENT>result[i]=np.nanmean(freqs)<EOL><DEDENT><DEDENT>return result<EOL><DEDENT>elif feature == \"<STR_LIT>\":<EOL><INDENT>result=np.zeros(self.abf.sweeps) <EOL>for i,freqs in enumerate(bySweepFreqs):<EOL><INDENT>if len(freqs):<EOL><INDENT>result[i]=np.nanmedian(freqs)<EOL><DEDENT><DEDENT>return result<EOL><DEDENT>else:<EOL><INDENT>self.log.error(\"<STR_LIT>\",feature)<EOL>return None<EOL><DEDENT>", "docstring": "returns AP info by sweep arranged as a list (by sweep).\n\nfeature:\n    * \"freqs\" - list of instantaneous frequencies by sweep.\n    * \"firsts\" - list of first instantaneous frequency by sweep.\n    * \"times\" - list of times of each AP in the sweep.\n    * \"count\" - numer of APs per sweep.\n    * \"average\" - average instanteous frequency per sweep.\n    * \"median\" - median instanteous frequency per sweep.", "id": "f11352:c0:m6"}
{"signature": "def get_times(self):", "body": "self.ensureDetection()<EOL>times=[]<EOL>for ap in self.APs:<EOL><INDENT>times.append(ap[\"<STR_LIT:T>\"])<EOL><DEDENT>return np.array(sorted(times))<EOL>", "docstring": "return an array of times (in sec) of all APs.", "id": "f11352:c0:m5"}
{"signature": "def __init__(self,abf):", "body": "self.log = logging.getLogger(\"<STR_LIT>\")<EOL>self.log.setLevel(swhlab.loglevel)<EOL>if abf in [None,False,'<STR_LIT>']:<EOL><INDENT>self.log.error(\"<STR_LIT>\",str(abf))<EOL>return<EOL><DEDENT>if type(abf) is str:<EOL><INDENT>self.log.debug(\"<STR_LIT>\")<EOL>abf=ABF(abf)<EOL><DEDENT>self.abf=abf<EOL>self.detect_over = <NUM_LIT:50> <EOL>self.detect_time1 = <NUM_LIT:0> <EOL>self.detect_time2 = abf.sweepLength <EOL>self.APs=False<EOL>", "docstring": "Load an ABF and get ready to do AP detection.\nAfter detect(), all AP data is stored as a list of dicts in AP.APs", "id": "f11352:c0:m0"}
{"signature": "def detectSweep(self,sweep=<NUM_LIT:0>):", "body": "if self.APs is False: <EOL><INDENT>self.APs=[] <EOL><DEDENT>for i,ap in enumerate(self.APs):<EOL><INDENT>if ap[\"<STR_LIT>\"]==sweep:<EOL><INDENT>self.APs[i]=None<EOL><DEDENT><DEDENT>if self.APs.count(None):<EOL><INDENT>self.log.debug(\"<STR_LIT>\",self.APs.count(None))<EOL>while None in self.APs:<EOL><INDENT>self.APs.remove(None)<EOL><DEDENT><DEDENT>self.log.debug(\"<STR_LIT>\",len(self.APs))<EOL>self.abf.derivative=True<EOL>self.abf.setsweep(sweep)<EOL>Is = cm.where_cross(self.abf.sweepD,self.detect_over)<EOL>self.log.debug(\"<STR_LIT>\"%len(Is))<EOL>for i,I in enumerate(Is):<EOL><INDENT>if np.min(self.abf.sweepD[I:I+<NUM_LIT:2>*self.abf.pointsPerMs])>-<NUM_LIT:10>:<EOL><INDENT>Is[i]=<NUM_LIT:0><EOL><DEDENT><DEDENT>Is=Is[np.nonzero(Is)]<EOL>self.log.debug(\"<STR_LIT>\"%len(Is))<EOL>for i,I in enumerate(Is):<EOL><INDENT>stepBack=<NUM_LIT:0><EOL>while(self.abf.sweepD[I-stepBack])><NUM_LIT:10> and stepBack/self.abf.pointsPerMs<<NUM_LIT:1>: <EOL><INDENT>stepBack+=<NUM_LIT:1><EOL><DEDENT>Is[i]-=stepBack<EOL><DEDENT>sweepAPs=[]<EOL>for i,I in enumerate(Is):<EOL><INDENT>try:<EOL><INDENT>timeInSweep=I/self.abf.pointsPerSec<EOL>if timeInSweep<self.detect_time1 or timeInSweep>self.detect_time2:<EOL><INDENT>continue <EOL><DEDENT>ap={} <EOL>ap[\"<STR_LIT>\"]=sweep <EOL>ap[\"<STR_LIT:I>\"]=I <EOL>ap[\"<STR_LIT>\"]=I/self.abf.pointsPerSec <EOL>ap[\"<STR_LIT:T>\"]=ap[\"<STR_LIT>\"]+self.abf.sweepInterval*sweep <EOL>ap[\"<STR_LIT>\"]=self.abf.sweepY[I] <EOL>chunk=self.abf.sweepD[I:I+<NUM_LIT:5>*self.abf.pointsPerMs] <EOL>I_toNegTen=np.where(chunk<-<NUM_LIT:10>)[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>chunk=self.abf.sweepD[I+I_toNegTen:I+I_toNegTen+<NUM_LIT:10>*self.abf.pointsPerMs] <EOL>if not max(chunk)>-<NUM_LIT:10>:<EOL><INDENT>self.log.debug(\"<STR_LIT>\"%ap[\"<STR_LIT:T>\"])<EOL>self.log.error(\"<STR_LIT>\")<EOL>continue <EOL><DEDENT>I_recover=np.where(chunk>-<NUM_LIT:10>)[<NUM_LIT:0>][<NUM_LIT:0>]+I_toNegTen+I <EOL>ap[\"<STR_LIT>\"]=[I,I_recover] <EOL>ap[\"<STR_LIT>\"]=(I_recover-I)/self.abf.pointsPerMs <EOL>chunk=self.abf.sweepD[ap[\"<STR_LIT>\"][<NUM_LIT:0>]:ap[\"<STR_LIT>\"][<NUM_LIT:1>]]<EOL>ap[\"<STR_LIT>\"]=np.max(chunk)<EOL>ap[\"<STR_LIT>\"]=np.where(chunk==ap[\"<STR_LIT>\"])[<NUM_LIT:0>][<NUM_LIT:0>]+I<EOL>ap[\"<STR_LIT>\"]=np.min(chunk)<EOL>ap[\"<STR_LIT>\"]=np.where(chunk==ap[\"<STR_LIT>\"])[<NUM_LIT:0>][<NUM_LIT:0>]+I<EOL>if ap[\"<STR_LIT>\"]<<NUM_LIT:10> or ap[\"<STR_LIT>\"]>-<NUM_LIT:10>:<EOL><INDENT>self.log.debug(\"<STR_LIT>\")<EOL>self.log.error(\"<STR_LIT>\")<EOL>continue<EOL><DEDENT>chunkSize=self.abf.pointsPerMs*<NUM_LIT:10> <EOL>if len(Is)-<NUM_LIT:1>>i and Is[i+<NUM_LIT:1>]<(I+chunkSize): <EOL><INDENT>chunkSize=Is[i+<NUM_LIT:1>]-I <EOL><DEDENT>if chunkSize<(self.abf.pointsPerMs*<NUM_LIT:2>):<EOL><INDENT>continue <EOL><DEDENT>ap[\"<STR_LIT>\"]=[I,I+chunkSize] <EOL>chunk=self.abf.sweepY[I:I+chunkSize]<EOL>ap[\"<STR_LIT>\"]=np.max(chunk)<EOL>ap[\"<STR_LIT>\"]=np.where(chunk==ap[\"<STR_LIT>\"])[<NUM_LIT:0>][<NUM_LIT:0>]+I<EOL>chunkForMin=np.copy(chunk) <EOL>chunkForMin[:ap[\"<STR_LIT>\"]-I]=np.inf <EOL>ap[\"<STR_LIT>\"]=np.min(chunkForMin) <EOL>ap[\"<STR_LIT>\"]=np.where(chunkForMin==ap[\"<STR_LIT>\"])[<NUM_LIT:0>][<NUM_LIT:0>]+I<EOL>if ap[\"<STR_LIT>\"]<ap[\"<STR_LIT>\"]:<EOL><INDENT>self.log.error(\"<STR_LIT>\")<EOL>self.log.error(\"<STR_LIT>\") <EOL>self.log.error(\"<STR_LIT>\")<EOL><DEDENT>if (len(chunk))-((I+len(chunk))-ap[\"<STR_LIT>\"])<<NUM_LIT:10>:<EOL><INDENT>self.log.error(\"<STR_LIT>\")<EOL>self.log.error(\"<STR_LIT>\")<EOL>self.log.error(\"<STR_LIT>\")<EOL><DEDENT>ap[\"<STR_LIT>\"]=(ap[\"<STR_LIT>\"]-I)/self.abf.pointsPerMs <EOL>ap[\"<STR_LIT>\"]=(ap[\"<STR_LIT>\"]-ap[\"<STR_LIT>\"])/self.abf.pointsPerMs <EOL>ap[\"<STR_LIT>\"]=np.average([ap[\"<STR_LIT>\"],ap[\"<STR_LIT>\"]]) <EOL>ap[\"<STR_LIT>\"]=cm.where_cross(chunk,ap[\"<STR_LIT>\"])[<NUM_LIT:0>]+I <EOL>ap[\"<STR_LIT>\"]=cm.where_cross(-chunk,-ap[\"<STR_LIT>\"])[<NUM_LIT:1>]+I <EOL>ap[\"<STR_LIT>\"]=(ap[\"<STR_LIT>\"]-ap[\"<STR_LIT>\"])/self.abf.pointsPerMs <EOL>sweepAPs.extend([ap])<EOL><DEDENT>except Exception as e:<EOL><INDENT>self.log.error(\"<STR_LIT>\",i,len(Is))<EOL>self.log.error(cm.exceptionToString(e))<EOL><DEDENT><DEDENT>self.log.debug(\"<STR_LIT>\",len(sweepAPs))<EOL>self.APs.extend(sweepAPs)<EOL>self.abf.derivative=False<EOL>", "docstring": "perform AP detection on current sweep.", "id": "f11352:c0:m4"}
{"signature": "def ensureDetection(self):", "body": "if self.APs==False:<EOL><INDENT>self.log.debug(\"<STR_LIT>\")<EOL>self.detect()<EOL><DEDENT>", "docstring": "run this before analysis. Checks if event detection occured.\nIf not, runs AP detection on all sweeps.", "id": "f11352:c0:m2"}
{"signature": "def processFolder(abfFolder):", "body": "if not type(abfFolder) is str or not len(abfFolder)><NUM_LIT:3>:<EOL><INDENT>return<EOL><DEDENT>files=sorted(glob.glob(abfFolder+\"<STR_LIT>\"))<EOL>for i,fname in enumerate(files):<EOL><INDENT>print(\"<STR_LIT>\".format(i,len(files)),os.path.basename(fname))<EOL>processAbf(fname,show=False)<EOL><DEDENT>plt.show()<EOL>return<EOL>", "docstring": "call processAbf() for every ABF in a folder.", "id": "f11353:m0"}
{"signature": "def processAbf(abfFname,saveAs=False,dpi=<NUM_LIT:100>,show=True):", "body": "if not type(abfFname) is str or not len(abfFname)><NUM_LIT:3>:<EOL><INDENT>return<EOL><DEDENT>abf=swhlab.ABF(abfFname)<EOL>plot=swhlab.plotting.ABFplot(abf)<EOL>plot.figure_height=<NUM_LIT:6><EOL>plot.figure_width=<NUM_LIT:10><EOL>plot.subplot=False<EOL>plot.figure(True)<EOL>if abf.get_protocol_sequence(<NUM_LIT:0>)==abf.get_protocol_sequence(<NUM_LIT:1>) or abf.sweeps<<NUM_LIT:2>:<EOL><INDENT>if abf.lengthMinutes<<NUM_LIT:2>:<EOL><INDENT>ax1=plt.subplot(<NUM_LIT>)<EOL>plot.figure_sweeps()<EOL>plt.title(\"<STR_LIT>\".format(abf.ID,abf.sweeps))<EOL>plt.gca().get_xaxis().set_visible(False)<EOL>plt.subplot(<NUM_LIT>,sharex=ax1)<EOL>plot.figure_protocol()<EOL>plt.title(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>plot.figure_chronological()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>plots=[<NUM_LIT>,<NUM_LIT>] <EOL>if abf.units=='<STR_LIT>': <EOL><INDENT>ap=swhlab.AP(abf) <EOL>ap.detect() <EOL>if len(ap.APs): <EOL><INDENT>plots=[<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>] <EOL><DEDENT><DEDENT>ax1=plt.subplot(plots[<NUM_LIT:0>])<EOL>plot.figure_sweeps()<EOL>plt.title(\"<STR_LIT>\".format(abf.ID,abf.sweeps))<EOL>plt.gca().get_xaxis().set_visible(False)<EOL>plt.subplot(plots[<NUM_LIT:1>],sharex=ax1)<EOL>plot.figure_protocols()<EOL>plt.title(\"<STR_LIT>\")<EOL>if len(plots)><NUM_LIT:2>:<EOL><INDENT>ax2=plt.subplot(plots[<NUM_LIT:2>])<EOL>plot.rainbow=False<EOL>plot.kwargs[\"<STR_LIT>\"]='<STR_LIT:b>'<EOL>plot.figure_chronological()<EOL>plt.gca().get_xaxis().set_visible(False)<EOL>plt.title(\"<STR_LIT>\")<EOL>plt.subplot(plots[<NUM_LIT:3>],sharex=ax2)<EOL>plot.abf.derivative=True<EOL>plot.rainbow=False<EOL>plot.traceColor='<STR_LIT:r>'<EOL>plot.figure_chronological()<EOL>plt.axis([ap.APs[<NUM_LIT:0>][\"<STR_LIT:T>\"]-<NUM_LIT>,ap.APs[<NUM_LIT:0>][\"<STR_LIT:T>\"]+<NUM_LIT>,None,None])<EOL>plt.title(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if saveAs:<EOL><INDENT>print(\"<STR_LIT>\",os.path.abspath(saveAs))<EOL>plt.savefig(os.path.abspath(saveAs),dpi=dpi)<EOL>return<EOL><DEDENT>if show:<EOL><INDENT>plot.show()<EOL><DEDENT>", "docstring": "automatically generate a single representative image for an ABF.\nIf saveAs is given (full path of a jpg of png file), the image will be saved.\nOtherwise, the image will pop up in a matplotlib window.", "id": "f11353:m1"}
{"signature": "def analyzeSweep(abf,sweep,m1=None,m2=None,plotToo=False):", "body": "abf.setsweep(sweep)<EOL>if m1 is None: m1=<NUM_LIT:0><EOL>else: m1=m1*abf.pointsPerSec<EOL>if m2 is None: m2=-<NUM_LIT:1><EOL>else: m2=m2*abf.pointsPerSec<EOL>Yorig=abf.sweepY[int(m1):int(m2)]<EOL>X=np.arange(len(Yorig))/abf.pointsPerSec<EOL><INDENT>Klpf=kernel_gaussian(size=abf.pointsPerMs*<NUM_LIT:10>,forwardOnly=True)<EOL>Ylpf=np.convolve(Yorig,Klpf,mode='<STR_LIT>')<EOL>Y=Ylpf <EOL><DEDENT>Kmb=kernel_gaussian(size=abf.pointsPerMs*<NUM_LIT:10>,forwardOnly=True)<EOL>Ymb=np.convolve(Yorig,Kmb,mode='<STR_LIT>')<EOL>Y=Yorig-Ymb <EOL>thresh=<NUM_LIT:5> <EOL>hitPos=np.where(Y>thresh)[<NUM_LIT:0>] <EOL>hitNeg=np.where(Y<-thresh)[<NUM_LIT:0>] <EOL>hitPos=np.concatenate((hitPos,[len(Y)-<NUM_LIT:1>])) <EOL>hitNeg=np.concatenate((hitNeg,[len(Y)-<NUM_LIT:1>])) <EOL>hitsPos=hitPos[np.where(np.abs(np.diff(hitPos))><NUM_LIT:10>)[<NUM_LIT:0>]] <EOL>hitsNeg=hitNeg[np.where(np.abs(np.diff(hitNeg))><NUM_LIT:10>)[<NUM_LIT:0>]] <EOL>hitsNeg=hitsNeg[<NUM_LIT:1>:] <EOL>if plotToo:<EOL><INDENT>plt.figure(figsize=(<NUM_LIT:10>,<NUM_LIT:5>))<EOL>ax1=plt.subplot(<NUM_LIT>)<EOL>plt.title(\"<STR_LIT>\"%(sweep,len(hitsPos),len(hitsNeg)))<EOL>plt.ylabel(\"<STR_LIT>\")<EOL>plt.grid()<EOL>plt.plot(X,Yorig,color='<STR_LIT:k>',alpha=<NUM_LIT>)<EOL>for hit in hitsPos:<EOL><INDENT>plt.plot(X[hit],Yorig[hit]+<NUM_LIT:20>,'<STR_LIT>',ms=<NUM_LIT:20>,alpha=<NUM_LIT>)<EOL><DEDENT>for hit in hitsNeg:<EOL><INDENT>plt.plot(X[hit],Yorig[hit]-<NUM_LIT:20>,'<STR_LIT>',ms=<NUM_LIT:20>,alpha=<NUM_LIT>)<EOL><DEDENT>plt.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>plt.subplot(<NUM_LIT>,sharex=ax1)<EOL>plt.title(\"<STR_LIT>\")<EOL>plt.ylabel(\"<STR_LIT>\")<EOL>plt.grid()<EOL>plt.axhline(thresh,color='<STR_LIT:r>',ls='<STR_LIT>',alpha=<NUM_LIT>,lw=<NUM_LIT:3>)<EOL>plt.axhline(-thresh,color='<STR_LIT:r>',ls='<STR_LIT>',alpha=<NUM_LIT>,lw=<NUM_LIT:3>)<EOL>plt.plot(X,Y,color='<STR_LIT:b>',alpha=<NUM_LIT>)<EOL>plt.axis([X[<NUM_LIT:0>],X[-<NUM_LIT:1>],-thresh*<NUM_LIT>,thresh*<NUM_LIT>])<EOL>plt.tight_layout()<EOL>if type(plotToo) is str and os.path.isdir(plotToo):<EOL><INDENT>print('<STR_LIT>'%(plotToo,sweep))<EOL>plt.savefig(plotToo+\"<STR_LIT>\"%sweep)<EOL><DEDENT>else:<EOL><INDENT>plt.show()<EOL><DEDENT>plt.close('<STR_LIT:all>')<EOL><DEDENT>return [len(hitsPos),len(hitsNeg)]<EOL>", "docstring": "m1 and m2, if given, are in seconds.\nreturns [# EPSCs, # IPSCs]", "id": "f11366:m7"}
{"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=<NUM_LIT:50>,<EOL>quietPercentile=<NUM_LIT:10>,histResolution=<NUM_LIT:1>):", "body": "<EOL>m1=<NUM_LIT:0> if m1 is None else m1*self.pointsPerSec<EOL>m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec<EOL>m1,m2=int(m1),int(m2)<EOL>padding=<NUM_LIT:200> <EOL>chunkPoints=int(chunkMs*self.pointsPerMs)<EOL>histBins=int((padding*<NUM_LIT:2>)/histResolution)<EOL>Y=self.sweepY[m1:m2]<EOL>hist,bins=np.histogram(Y,bins=<NUM_LIT:2>*padding)<EOL>Yoffset=bins[np.where(hist==max(hist))[<NUM_LIT:0>][<NUM_LIT:0>]]<EOL>Y=Y-Yoffset <EOL>nChunks=int(len(Y)/chunkPoints)<EOL>hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))<EOL>chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))<EOL>variances=np.ptp(chunks,axis=<NUM_LIT:1>)<EOL>percentiles=np.empty(len(variances))<EOL>for i,variance in enumerate(variances):<EOL><INDENT>percentiles[i]=sorted(variances).index(variance)/len(variances)*<NUM_LIT:100><EOL><DEDENT>blData=chunks[np.where(percentiles<=quietPercentile)[<NUM_LIT:0>]].flatten()<EOL>blHist,blBins=np.histogram(blData,bins=histBins,range=(-padding,padding))<EOL>blHist=blHist/max(blHist)*max(hist)<EOL>diff=hist-blHist<EOL>return diff/abf.pointsPerSec<EOL>", "docstring": "chunkMs should be ~50 ms or greater.\nbin sizes must be equal to or multiples of the data resolution.\ntransients smaller than the expected RMS will be silenced.", "id": "f11378:c0:m0"}
{"signature": "def quietParts(data,percentile=<NUM_LIT:10>):", "body": "nChunks=int(len(Y)/CHUNK_POINTS)<EOL>chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))<EOL>variances=np.var(chunks,axis=<NUM_LIT:1>)<EOL>percentiles=np.empty(len(variances))<EOL>for i,variance in enumerate(variances):<EOL><INDENT>percentiles[i]=sorted(variances).index(variance)/len(variances)*<NUM_LIT:100><EOL><DEDENT>selected=chunks[np.where(percentiles<=percentile)[<NUM_LIT:0>]].flatten()<EOL>return selected<EOL>", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.\nReturns data where the variance for its chunk size is below the given percentile.\nCHUNK_POINTS should be adjusted so it's about 10ms of data.", "id": "f11379:m0"}
{"signature": "def quietParts():", "body": "", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.", "id": "f11382:m3"}
{"signature": "def plot_shaded_data(X,Y,variances,varianceX):", "body": "plt.plot(X,Y,color='<STR_LIT:k>',lw=<NUM_LIT:2>)<EOL>nChunks=int(len(Y)/CHUNK_POINTS)<EOL>for i in range(<NUM_LIT:0>,<NUM_LIT:100>,PERCENT_STEP):<EOL><INDENT>varLimitLow=np.percentile(variances,i)<EOL>varLimitHigh=np.percentile(variances,i+PERCENT_STEP)<EOL>varianceIsAboveMin=np.where(variances>=varLimitLow)[<NUM_LIT:0>]<EOL>varianceIsBelowMax=np.where(variances<=varLimitHigh)[<NUM_LIT:0>]<EOL>varianceIsRange=[chunkNumber for chunkNumber in range(nChunks)if chunkNumber in varianceIsAboveMinand chunkNumber in varianceIsBelowMax]<EOL>for chunkNumber in varianceIsRange:<EOL><INDENT>t1=chunkNumber*CHUNK_POINTS/POINTS_PER_SEC<EOL>t2=t1+CHUNK_POINTS/POINTS_PER_SEC<EOL>plt.axvspan(t1,t2,alpha=<NUM_LIT>,color=COLORMAP(i/<NUM_LIT:100>),lw=<NUM_LIT:0>)<EOL><DEDENT><DEDENT>", "docstring": "plot X and Y data, then shade its background by variance.", "id": "f11382:m0"}
{"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=<NUM_LIT:50>,quietPercentile=<NUM_LIT:10>,<EOL>histResolution=<NUM_LIT>,plotToo=False,rmsExpected=<NUM_LIT:5>):", "body": "<EOL>m1=<NUM_LIT:0> if m1 is None else m1*self.pointsPerSec<EOL>m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec<EOL>m1,m2=int(m1),int(m2)<EOL>padding=<NUM_LIT:200> <EOL>chunkPoints=int(chunkMs*self.pointsPerMs)<EOL>histBins=int((padding*<NUM_LIT:2>)/histResolution)<EOL>Y=self.sweepY[m1:m2]<EOL>hist,bins=np.histogram(Y,bins=<NUM_LIT:2>*padding)<EOL>Yoffset=bins[np.where(hist==max(hist))[<NUM_LIT:0>][<NUM_LIT:0>]]<EOL>Y=Y-Yoffset <EOL>nChunks=int(len(Y)/chunkPoints)<EOL>hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))<EOL>hist=hist/len(Y) <EOL>Xs=bins[<NUM_LIT:1>:]<EOL>chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))<EOL>variances=np.var(chunks,axis=<NUM_LIT:1>)<EOL>percentiles=np.empty(len(variances))<EOL>for i,variance in enumerate(variances):<EOL><INDENT>percentiles[i]=sorted(variances).index(variance)/len(variances)*<NUM_LIT:100><EOL><DEDENT>blData=chunks[np.where(percentiles<=quietPercentile)[<NUM_LIT:0>]].flatten()<EOL>sigma=np.sqrt(np.var(blData))<EOL>center=np.average(blData)+histResolution/<NUM_LIT:2><EOL>blCurve=mlab.normpdf(Xs,center,sigma)<EOL>blCurve=blCurve*max(hist)/max(blCurve)<EOL>diff=hist-blCurve<EOL>ignrCenter=len(Xs)/<NUM_LIT:2><EOL>ignrPad=rmsExpected/histResolution<EOL>ignr1,ignt2=int(ignrCenter-ignrPad),int(ignrCenter+ignrPad)<EOL>diff[ignr1:ignt2]=<NUM_LIT:0><EOL>return diff/len(Y)*abf.pointsPerSec<EOL>", "docstring": "chunkMs should be ~50 ms or greater.\nbin sizes must be equal to or multiples of the data resolution.\ntransients smaller than the expected RMS will be silenced.", "id": "f11387:c0:m0"}
{"signature": "def quietParts(data,percentile=<NUM_LIT:10>):", "body": "nChunks=int(len(Y)/CHUNK_POINTS)<EOL>chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))<EOL>variances=np.var(chunks,axis=<NUM_LIT:1>)<EOL>percentiles=np.empty(len(variances))<EOL>for i,variance in enumerate(variances):<EOL><INDENT>percentiles[i]=sorted(variances).index(variance)/len(variances)*<NUM_LIT:100><EOL><DEDENT>selected=chunks[np.where(percentiles<=percentile)[<NUM_LIT:0>]].flatten()<EOL>return selected<EOL>", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.\nReturns data where the variance for its chunk size is below the given percentile.\nCHUNK_POINTS should be adjusted so it's about 10ms of data.", "id": "f11388:m0"}
{"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=<NUM_LIT:50>,quietPercentile=<NUM_LIT:10>,<EOL>histResolution=<NUM_LIT>,plotToo=False):", "body": "<EOL>m1=<NUM_LIT:0> if m1 is None else m1*self.pointsPerSec<EOL>m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec<EOL>m1,m2=int(m1),int(m2)<EOL>padding=<NUM_LIT:200> <EOL>chunkPoints=int(chunkMs*self.pointsPerMs)<EOL>histBins=int((padding*<NUM_LIT:2>)/histResolution)<EOL>Y=self.sweepY[m1:m2]<EOL>hist,bins=np.histogram(Y,bins=<NUM_LIT:2>*padding)<EOL>Yoffset=bins[np.where(hist==max(hist))[<NUM_LIT:0>][<NUM_LIT:0>]]<EOL>Y=Y-Yoffset <EOL>nChunks=int(len(Y)/chunkPoints)<EOL>hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))<EOL>Xs=bins[<NUM_LIT:1>:]<EOL>chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))<EOL>variances=np.var(chunks,axis=<NUM_LIT:1>)<EOL>percentiles=np.empty(len(variances))<EOL>for i,variance in enumerate(variances):<EOL><INDENT>percentiles[i]=sorted(variances).index(variance)/len(variances)*<NUM_LIT:100><EOL><DEDENT>blData=chunks[np.where(percentiles<=quietPercentile)[<NUM_LIT:0>]].flatten()<EOL>sigma=np.sqrt(np.var(blData))<EOL>center=np.average(blData)+histResolution/<NUM_LIT:2><EOL>blCurve=mlab.normpdf(Xs,center,sigma)<EOL>blCurve=blCurve*max(hist)/max(blCurve)<EOL>diff=hist-blCurve<EOL>if plotToo:<EOL><INDENT>plt.figure(figsize=(<NUM_LIT:15>,<NUM_LIT:5>))<EOL>plt.plot(Y)<EOL>plt.figure(figsize=(<NUM_LIT:7>,<NUM_LIT:7>))<EOL>ax1=plt.subplot(<NUM_LIT>)<EOL>plt.title(abf.ID+\"<STR_LIT>\")<EOL>plt.ylabel(\"<STR_LIT>\")<EOL>plt.plot(Xs,hist,'<STR_LIT:->',alpha=<NUM_LIT>,color='<STR_LIT:b>',lw=<NUM_LIT:3>)<EOL>plt.plot(Xs,blCurve,lw=<NUM_LIT:3>,alpha=<NUM_LIT>,color='<STR_LIT:r>')<EOL>plt.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>plt.subplot(<NUM_LIT>,sharex=ax1)<EOL>plt.title(\"<STR_LIT>\")<EOL>plt.ylabel(\"<STR_LIT>\")<EOL>plt.xlabel(\"<STR_LIT>\"%abf.units)<EOL>plt.plot(Xs,diff,'<STR_LIT:->',alpha=<NUM_LIT>,color='<STR_LIT:b>',lw=<NUM_LIT:3>)<EOL>plt.axhline(<NUM_LIT:0>,lw=<NUM_LIT:3>,alpha=<NUM_LIT>,color='<STR_LIT:r>')<EOL>plt.axvline(<NUM_LIT:0>,lw=<NUM_LIT:3>,alpha=<NUM_LIT>,color='<STR_LIT:k>')<EOL>plt.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>plt.axis([-<NUM_LIT:50>,<NUM_LIT:50>,None,None])<EOL>plt.tight_layout()<EOL>plt.show()<EOL><DEDENT>return [Xs,diff]<EOL>", "docstring": "let's keep the chunkMs as high as we reasonably can. 50ms is good.\nThings get flakey at lower numbers like 10ms.\n\nIMPORTANT! for this to work, prevent 0s from averaging in, so keep\nbin sizes well above the data resolution.", "id": "f11390:c0:m0"}
{"signature": "def quietParts(data,percentile=<NUM_LIT:10>):", "body": "nChunks=int(len(Y)/CHUNK_POINTS)<EOL>chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))<EOL>variances=np.var(chunks,axis=<NUM_LIT:1>)<EOL>percentiles=np.empty(len(variances))<EOL>for i,variance in enumerate(variances):<EOL><INDENT>percentiles[i]=sorted(variances).index(variance)/len(variances)*<NUM_LIT:100><EOL><DEDENT>selected=chunks[np.where(percentiles<=percentile)[<NUM_LIT:0>]].flatten()<EOL>return selected<EOL>", "docstring": "Given some data (Y) break it into chunks and return just the quiet ones.\nReturns data where the variance for its chunk size is below the given percentile.\nCHUNK_POINTS should be adjusted so it's about 10ms of data.", "id": "f11391:m0"}
{"signature": "def ndist(data,Xs):", "body": "sigma=np.sqrt(np.var(data))<EOL>center=np.average(data)<EOL>curve=mlab.normpdf(Xs,center,sigma)<EOL>curve*=len(data)*HIST_RESOLUTION<EOL>return curve<EOL>", "docstring": "given some data and a list of X posistions, return the normal\ndistribution curve as a Y point at each of those Xs.", "id": "f11391:m1"}
{"signature": "def ndist(data,Xs):", "body": "sigma=np.sqrt(np.var(data))<EOL>center=np.average(data)<EOL>curve=mlab.normpdf(Xs,center,sigma)<EOL>curve*=len(data)*HIST_RESOLUTION<EOL>return curve<EOL>", "docstring": "given some data and a list of X posistions, return the normal\ndistribution curve as a Y point at each of those Xs.", "id": "f11392:m1"}
{"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=<NUM_LIT:50>,<EOL>quietPercentile=<NUM_LIT:10>,histResolution=<NUM_LIT:1>):", "body": "<EOL>m1=<NUM_LIT:0> if m1 is None else m1*self.pointsPerSec<EOL>m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec<EOL>m1,m2=int(m1),int(m2)<EOL>padding=<NUM_LIT:200> <EOL>chunkPoints=int(chunkMs*self.pointsPerMs)<EOL>histBins=int((padding*<NUM_LIT:2>)/histResolution)<EOL>Y=self.sweepYfilteredHisto()[m1:m2]<EOL>hist,bins=np.histogram(Y,bins=<NUM_LIT:2>*padding)<EOL>nChunks=int(len(Y)/chunkPoints)<EOL>hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))<EOL>chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))<EOL>variances=np.ptp(chunks,axis=<NUM_LIT:1>)<EOL>percentiles=np.empty(len(variances))<EOL>for i,variance in enumerate(variances):<EOL><INDENT>percentiles[i]=sorted(variances).index(variance)/len(variances)*<NUM_LIT:100><EOL><DEDENT>blData=chunks[np.where(percentiles<=quietPercentile)[<NUM_LIT:0>]].flatten()<EOL>blHist,blBins=np.histogram(blData,bins=histBins,range=(-padding,padding))<EOL>blHist=blHist/max(blHist)*max(hist)<EOL>diff=hist-blHist<EOL>return diff/abf.pointsPerSec<EOL>", "docstring": "chunkMs should be ~50 ms or greater.\nbin sizes must be equal to or multiples of the data resolution.\ntransients smaller than the expected RMS will be silenced.", "id": "f11395:c0:m1"}
{"signature": "def phasicTonic(self,m1=None,m2=None,chunkMs=<NUM_LIT:10>,<EOL>quietPercentile=<NUM_LIT:10>,histResolution=<NUM_LIT>,<EOL>plotToo=False):", "body": "<EOL>m1=<NUM_LIT:0> if m1 is None else m1*self.pointsPerSec<EOL>m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec<EOL>m1,m2=int(m1),int(m2)<EOL>self.kernel=self.kernel_gaussian(<NUM_LIT>)<EOL>Y=self.sweepYsmartbase()<EOL>padding=<NUM_LIT:50> <EOL>chunkPoints=int(chunkMs*self.pointsPerMs)<EOL>nChunks=int(len(Y)/chunkPoints)<EOL>histBins=int((padding*<NUM_LIT:2>)/histResolution)<EOL>hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))<EOL>hist=hist.astype(np.float)<EOL>chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))<EOL>variances=np.var(chunks,axis=<NUM_LIT:1>)<EOL>percentiles=np.empty(len(variances))<EOL>for i,variance in enumerate(variances):<EOL><INDENT>percentiles[i]=sorted(variances).index(variance)/len(variances)*<NUM_LIT:100><EOL><DEDENT>blData=chunks[np.where(percentiles<=quietPercentile)[<NUM_LIT:0>]].flatten()<EOL>sigma=np.sqrt(np.var(blData))<EOL>center=np.average(blData)<EOL>blCurve=mlab.normpdf(bins[:-<NUM_LIT:1>],center,sigma)<EOL>blCurve=blCurve/max(blCurve)<EOL>blCurve=blCurve*max(hist)<EOL>blankPoints=int(<NUM_LIT:2>*sigma/histResolution)<EOL>centerI=int(len(hist)/<NUM_LIT:2>)<EOL>for i in range(blankPoints):<EOL><INDENT>hist[centerI-i]=np.nan<EOL>hist[centerI+i]=np.nan<EOL><DEDENT>diff=hist-blCurve<EOL>if plotToo:<EOL><INDENT>plt.figure(figsize=(<NUM_LIT:7>,<NUM_LIT:7>))<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.title(abf.ID+\"<STR_LIT>\")<EOL>plt.ylabel(\"<STR_LIT>\")<EOL>plt.plot(bins[:-<NUM_LIT:1>],hist,'<STR_LIT:.>',alpha=<NUM_LIT>)<EOL>plt.plot(bins[:-<NUM_LIT:1>],blCurve,lw=<NUM_LIT:3>,alpha=<NUM_LIT>,color='<STR_LIT:r>')<EOL>plt.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.title(\"<STR_LIT>\")<EOL>plt.ylabel(\"<STR_LIT>\")<EOL>plt.xlabel(\"<STR_LIT>\"%abf.units)<EOL>plt.plot(bins[:-<NUM_LIT:1>],diff,'<STR_LIT:.>',alpha=<NUM_LIT>,color='<STR_LIT:b>')<EOL>plt.axhline(<NUM_LIT:0>,lw=<NUM_LIT:3>,alpha=<NUM_LIT>,color='<STR_LIT:r>')<EOL>plt.axvline(<NUM_LIT:0>,lw=<NUM_LIT:3>,alpha=<NUM_LIT>,color='<STR_LIT:r>')<EOL>plt.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>plt.tight_layout()<EOL>plt.show()<EOL><DEDENT>", "docstring": "IMPORTANT: do this first!!\nself.kernel=self.kernel_gaussian(250)", "id": "f11397:c0:m0"}
{"signature": "def abf_read_header(fname, saveHeader=True):", "body": "<EOL>header={} <EOL>sections={} <EOL>strings=[] <EOL>protocol = {} <EOL>tags=[] <EOL>adcs=[] <EOL>dacs=[] <EOL>digitalOutputs=[] <EOL>config={} <EOL>f=open(fname,'<STR_LIT:rb>')<EOL>config[\"<STR_LIT>\"]=os.path.abspath(fname) <EOL>config[\"<STR_LIT>\"]=os.path.basename(fname)[:-<NUM_LIT:4>] <EOL>for key, byte_location, fmt in headerDescriptionV2:<EOL><INDENT>header[key]=fread(f,byte_location,fmt)<EOL><DEDENT>header['<STR_LIT>']=header['<STR_LIT>'].decode()<EOL>for sectionNumber, sectionName in enumerate(sectionNames):<EOL><INDENT>uBlockIndex, uBytes, llNumEntries = fread(f,<NUM_LIT>+sectionNumber*<NUM_LIT:16>,\"<STR_LIT>\")<EOL>sections[sectionName] = [uBlockIndex,uBytes,llNumEntries]<EOL><DEDENT>config[\"<STR_LIT>\"]=header['<STR_LIT>']/<NUM_LIT:1000>/<NUM_LIT:1000><EOL>byte_location = sections['<STR_LIT>'][<NUM_LIT:0>]*BLOCKSIZE<EOL>string_size = sections['<STR_LIT>'][<NUM_LIT:1>]<EOL>strings_data = fread(f,byte_location,structFormat=None,nBytes=string_size)<EOL>for key in [b'<STR_LIT>', b'<STR_LIT>', b'<STR_LIT>', b'<STR_LIT>', b'<STR_LIT>']:<EOL><INDENT>if key in strings_data:<EOL><INDENT>for line in strings_data.split(key)[<NUM_LIT:1>].split(b'<STR_LIT:\\x00>')[<NUM_LIT:1>:-<NUM_LIT:1>]:<EOL><INDENT>strings.append(line.decode())<EOL><DEDENT>config[\"<STR_LIT>\"]=strings[<NUM_LIT:0>]<EOL>config[\"<STR_LIT>\"]=strings[<NUM_LIT:1>]<EOL>config[\"<STR_LIT>\"]=strings[<NUM_LIT:2>::<NUM_LIT:2>]<EOL>config[\"<STR_LIT>\"]=strings[<NUM_LIT:3>::<NUM_LIT:2>]<EOL>break<EOL><DEDENT><DEDENT>for ADCsection in range(sections['<STR_LIT>'][<NUM_LIT:2>]):<EOL><INDENT>thisADC={}<EOL>byte_location=sections['<STR_LIT>'][<NUM_LIT:0>]*BLOCKSIZE+sections['<STR_LIT>'][<NUM_LIT:1>]*ADCsection<EOL>for key, fmt in ADCInfoDescription:<EOL><INDENT>thisADC[key]=fread(f,byte_location,fmt)<EOL>byte_location+=struct.calcsize(fmt)<EOL><DEDENT>adcs.append(thisADC)<EOL><DEDENT>byte_location=sections['<STR_LIT>'][<NUM_LIT:0>]*BLOCKSIZE<EOL>for key, fmt in protocolInfoDescription:<EOL><INDENT>protocol[key]=fread(f,byte_location,fmt)<EOL>byte_location+=struct.calcsize(fmt)<EOL><DEDENT>protocol.pop('<STR_LIT>', None) <EOL>byte_location=sections['<STR_LIT>'][<NUM_LIT:0>]*BLOCKSIZE<EOL>for i in range(sections['<STR_LIT>'][<NUM_LIT:2>]):<EOL><INDENT>thisTag=[]<EOL>for key, fmt in TagInfoDescription:<EOL><INDENT>val=fread(f,byte_location,fmt)<EOL>if type(val) is bytes:<EOL><INDENT>val=val.decode().strip()<EOL><DEDENT>thisTag.append(val)<EOL>byte_location+=struct.calcsize(fmt)<EOL><DEDENT>tags.append(thisTag)<EOL><DEDENT>for dacNumber in range(sections['<STR_LIT>'][<NUM_LIT:2>]):<EOL><INDENT>thisDAC={}<EOL>byte_location=sections['<STR_LIT>'][<NUM_LIT:0>]*BLOCKSIZE+sections['<STR_LIT>'][<NUM_LIT:1>]*dacNumber                              <EOL>for key, fmt in DACInfoDescription:<EOL><INDENT>thisDAC[key]=fread(f,byte_location,fmt)<EOL>byte_location+=struct.calcsize(fmt)<EOL><DEDENT>thisDAC.pop('<STR_LIT>', None) <EOL>if thisDAC['<STR_LIT>']==<NUM_LIT:0>: continue <EOL>dacs.append(thisDAC)<EOL><DEDENT>epochs=[]<EOL>for epochNumber in range(sections['<STR_LIT>'][<NUM_LIT:2>]):<EOL><INDENT>thisEpoch={}<EOL>byte_location=sections['<STR_LIT>'][<NUM_LIT:0>]*BLOCKSIZE+sections['<STR_LIT>'][<NUM_LIT:1>]*epochNumber<EOL>for key, fmt in EpochInfoPerDACDescription:<EOL><INDENT>thisEpoch[key]=fread(f,byte_location,fmt)<EOL>byte_location+=struct.calcsize(fmt)<EOL><DEDENT>thisEpoch.pop('<STR_LIT>', None) <EOL>epochs.append(thisEpoch)<EOL><DEDENT>byte_location=sections['<STR_LIT>'][<NUM_LIT:0>]*BLOCKSIZE<EOL>for epochNumber in range(sections['<STR_LIT>'][<NUM_LIT:0>]):<EOL><INDENT>if epochNumber>=len(epochs):<EOL><INDENT>break <EOL><DEDENT>thisEpoch=epochs[epochNumber]<EOL>for key, fmt in EpochSectionDescription:<EOL><INDENT>val=fread(f,byte_location,fmt)<EOL>if key=='<STR_LIT>':<EOL><INDENT>val=format(val, '<STR_LIT:b>').rjust(<NUM_LIT:8>,'<STR_LIT:0>') <EOL><DEDENT>thisEpoch[key]=val<EOL>byte_location+=struct.calcsize(fmt)<EOL><DEDENT>thisEpoch.pop('<STR_LIT>', None) <EOL>epochs[epochNumber]=thisEpoch<EOL><DEDENT>f.close()<EOL>config[\"<STR_LIT>\"]=float(\"<STR_LIT>\".join([str(x) for x in header['<STR_LIT>']]))/<NUM_LIT:100> <EOL>config['<STR_LIT>']=config['<STR_LIT>'][:len(adcs)] <EOL>config['<STR_LIT>']=config['<STR_LIT>'][:len(adcs)] <EOL>config['<STR_LIT>']=[x[:<NUM_LIT:2>] for x in tags]<EOL>config['<STR_LIT>']=sections['<STR_LIT>'][<NUM_LIT:2>]<EOL>YY = int(header['<STR_LIT>'] / <NUM_LIT>)<EOL>MM = int((header['<STR_LIT>'] - YY * <NUM_LIT>) / <NUM_LIT:100>)<EOL>DD = int(header['<STR_LIT>'] - YY * <NUM_LIT> - MM * <NUM_LIT:100>)<EOL>hh = int(header['<STR_LIT>'] / <NUM_LIT> / <NUM_LIT>)<EOL>mm = int((header['<STR_LIT>'] / <NUM_LIT> - hh * <NUM_LIT>) / <NUM_LIT>)<EOL>ss = header['<STR_LIT>'] / <NUM_LIT> - hh * <NUM_LIT> - mm * <NUM_LIT><EOL>ms = int((ss%<NUM_LIT:1>)*<NUM_LIT>)<EOL>ss = int(ss)<EOL>config['<STR_LIT>'] = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)<EOL>", "docstring": "Practice pulling data straight out of an ABF's binary header. Support only ABF2 (ClampEx an ClampFit 10).\nUse only native python libraries. Strive for simplicity and readability (to promote language portability).\nThis was made by Scott Harden after a line-by-line analysis of axonrawio.py from the neo io library.\nUnlike NeoIO's format, I'm going to try to prevent nested dictionaries to keep things simple.", "id": "f11403:m1"}
{"signature": "def detect(abf,sweep=None,threshold_upslope=<NUM_LIT:50>,dT=<NUM_LIT>,saveToo=True):", "body": "if type(sweep) is int:<EOL><INDENT>sweeps=[sweep]<EOL><DEDENT>else:<EOL><INDENT>sweeps=list(range(abf.sweeps))<EOL><DEDENT>timeStart=time.clock()<EOL>abf.APs=[None]*abf.sweeps<EOL>abf.SAP=[None]*abf.sweeps<EOL>for sweep in sweeps:<EOL><INDENT>abf.setSweep(sweep)<EOL>Y=abf.dataY<EOL>dI = int(dT/<NUM_LIT:1000>*abf.rate) <EOL>dY = (Y[dI:]-Y[:-dI])*(abf.rate/<NUM_LIT:1000>/dI) <EOL>Is = cm.where_cross(dY,threshold_upslope) <EOL>abf.APs[sweep]=[]<EOL>for i in range(len(Is)): <EOL><INDENT>try:<EOL><INDENT>AP=analyzeAP(Y,dY,Is[i],abf.rate) <EOL>if AP:<EOL><INDENT>AP[\"<STR_LIT>\"]=sweep<EOL>AP[\"<STR_LIT>\"]=sweep*abf.sweepInterval*abf.rate*+AP[\"<STR_LIT>\"]<EOL>AP[\"<STR_LIT>\"]=sweep*abf.sweepInterval+AP[\"<STR_LIT>\"]<EOL>AP[\"<STR_LIT>\"]=np.nan <EOL>if len(abf.APs[sweep]):<EOL><INDENT>AP[\"<STR_LIT>\"]=<NUM_LIT:1>/(AP[\"<STR_LIT>\"]-abf.APs[sweep][-<NUM_LIT:1>][\"<STR_LIT>\"])<EOL><DEDENT>if AP[\"<STR_LIT>\"] is np.nan or AP[\"<STR_LIT>\"]<<NUM_LIT>: <EOL><INDENT>abf.APs[sweep].append(AP)<EOL><DEDENT><DEDENT><DEDENT>except:<EOL><INDENT>print(\"<STR_LIT>\"%(i+<NUM_LIT:1>,len(Is)))<EOL><DEDENT><DEDENT>analyzeAPgroup(abf) <EOL><DEDENT>abf.APs=cm.matrixfromDicts(abf.APs)<EOL>abf.SAP=cm.matrixfromDicts(abf.SAP)<EOL>print(\"<STR_LIT>\"%(len(cm.dictFlat(abf.APs)),(time.clock()-timeStart)*<NUM_LIT:1000>))<EOL>if saveToo:<EOL><INDENT>abf.saveThing(abf.APs,\"<STR_LIT>\")<EOL>abf.saveThing(abf.SAP,\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "An AP will be detected by a upslope that exceeds 50V/s. Analyzed too.\n    if type(sweep) is int, graph int(sweep)\n    if sweep==None, process all sweeps sweep.", "id": "f11404:m0"}
{"signature": "def check_sweep(abf,sweep=None,dT=<NUM_LIT>):", "body": "if abf.APs is None:<EOL><INDENT>APs=[]<EOL><DEDENT>else:<EOL><INDENT>APs=cm.matrixToDicts(abf.APs)<EOL><DEDENT>if sweep is None or len(sweep)==<NUM_LIT:0>: <EOL><INDENT>for sweepNum in range(abf.sweeps):<EOL><INDENT>foundInThisSweep=<NUM_LIT:0><EOL>for AP in APs:<EOL><INDENT>if AP[\"<STR_LIT>\"]==sweepNum:<EOL><INDENT>foundInThisSweep+=<NUM_LIT:1><EOL><DEDENT>if foundInThisSweep>=<NUM_LIT:5>:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>sweep=sweepNum<EOL><DEDENT>abf.setSweep(sweep)<EOL>Y=abf.dataY<EOL>dI = int(dT/<NUM_LIT:1000>*abf.rate) <EOL>dY = (Y[dI:]-Y[:-dI])*(abf.rate/<NUM_LIT:1000>/dI) <EOL>pylab.figure(figsize=(<NUM_LIT:12>,<NUM_LIT:6>))<EOL>ax=pylab.subplot(<NUM_LIT>)<EOL>pylab.title(\"<STR_LIT>\"%abf.currentSweep)<EOL>pylab.ylabel(\"<STR_LIT>\")<EOL>pylab.plot(Y,'<STR_LIT:->',alpha=<NUM_LIT>)<EOL>for AP in APs:<EOL><INDENT>if not AP[\"<STR_LIT>\"]==sweep:<EOL><INDENT>continue<EOL><DEDENT>pylab.axvline(AP[\"<STR_LIT>\"],alpha=<NUM_LIT>,color='<STR_LIT:r>')<EOL>pylab.plot(AP[\"<STR_LIT>\"],AP[\"<STR_LIT>\"],'<STR_LIT:.>',alpha=<NUM_LIT>,ms=<NUM_LIT:20>,color='<STR_LIT:r>')<EOL>pylab.plot(AP[\"<STR_LIT>\"],AP[\"<STR_LIT>\"],'<STR_LIT:.>',alpha=<NUM_LIT>,ms=<NUM_LIT:20>,color='<STR_LIT:c>')<EOL>pylab.plot([AP[\"<STR_LIT>\"],AP[\"<STR_LIT>\"]],<EOL>[AP[\"<STR_LIT>\"],AP[\"<STR_LIT>\"]],<EOL>'<STR_LIT:->',alpha=<NUM_LIT>,ms=<NUM_LIT:20>,color='<STR_LIT:b>',lw=<NUM_LIT:7>)<EOL>pylab.plot([AP[\"<STR_LIT>\"],AP[\"<STR_LIT>\"]],<EOL>[AP[\"<STR_LIT>\"],AP[\"<STR_LIT>\"]],<EOL>'<STR_LIT:->',lw=<NUM_LIT:5>,alpha=<NUM_LIT>,color='<STR_LIT:g>')<EOL><DEDENT>pylab.subplot(<NUM_LIT>,sharex=ax)<EOL>pylab.ylabel(\"<STR_LIT>\")<EOL>pylab.xlabel(\"<STR_LIT>\"%(abf.rate/<NUM_LIT:1000>))<EOL>pylab.plot(dY,'<STR_LIT:->',alpha=<NUM_LIT>)<EOL>pylab.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>for AP in APs:<EOL><INDENT>if not AP[\"<STR_LIT>\"]==sweep:<EOL><INDENT>continue<EOL><DEDENT>pylab.axvline(AP[\"<STR_LIT>\"],alpha=<NUM_LIT>,color='<STR_LIT:r>')<EOL>pylab.plot(AP[\"<STR_LIT>\"],AP[\"<STR_LIT>\"],'<STR_LIT:.>',alpha=<NUM_LIT>,ms=<NUM_LIT:20>,color='<STR_LIT:g>')<EOL>pylab.plot(AP[\"<STR_LIT>\"],AP[\"<STR_LIT>\"],'<STR_LIT:.>',alpha=<NUM_LIT>,ms=<NUM_LIT:20>,color='<STR_LIT:g>')<EOL>pylab.axis([APs[<NUM_LIT:0>][\"<STR_LIT>\"]-<NUM_LIT:1000>,APs[-<NUM_LIT:1>][\"<STR_LIT>\"]+<NUM_LIT:1000>,None,None])<EOL><DEDENT>", "docstring": "Plotting for an eyeball check of AP detection in the given sweep.", "id": "f11404:m4"}
{"signature": "def analyzeAPgroup(abf=exampleABF,T1=None,T2=None,plotToo=False):", "body": "if T1 is None or T2 is None:<EOL><INDENT>if len(abf.protoSeqX)><NUM_LIT:2>:<EOL><INDENT>T1=abf.protoSeqX[<NUM_LIT:1>]/abf.rate<EOL>T2=abf.protoSeqX[<NUM_LIT:2>]/abf.rate<EOL><DEDENT>else:<EOL><INDENT>T1=<NUM_LIT:0><EOL>T2=abf.sweepLength<EOL><DEDENT><DEDENT>s={} <EOL>s[\"<STR_LIT>\"]=abf.currentSweep<EOL>s[\"<STR_LIT>\"]=abf.protoSeqY[<NUM_LIT:1>]<EOL>APs=[]<EOL>for key in ['<STR_LIT>','<STR_LIT>']:<EOL><INDENT>s[key]=<NUM_LIT:0><EOL><DEDENT>for AP in abf.APs[abf.currentSweep]:<EOL><INDENT>if T1<AP[\"<STR_LIT>\"]<T2:<EOL><INDENT>APs.append(AP)<EOL><DEDENT><DEDENT>s[\"<STR_LIT>\"]=len(APs) <EOL>apTimes=cm.dictVals(APs,'<STR_LIT>')<EOL>if len(APs)><NUM_LIT:1>: <EOL><INDENT>s[\"<STR_LIT>\"]=np.average(apTimes)-T1 <EOL>s[\"<STR_LIT>\"]=s[\"<STR_LIT>\"]/(T2-T1)*<NUM_LIT:100> <EOL>s[\"<STR_LIT>\"]=np.average(apTimes)-APs[<NUM_LIT:0>][\"<STR_LIT>\"] <EOL>s[\"<STR_LIT>\"]=s[\"<STR_LIT>\"]/(APs[-<NUM_LIT:1>][\"<STR_LIT>\"]-APs[<NUM_LIT:0>][\"<STR_LIT>\"])*<NUM_LIT:100> <EOL>s[\"<STR_LIT>\"]=(APs[<NUM_LIT:0>][\"<STR_LIT>\"]-T1)*<NUM_LIT:1000> <EOL>s[\"<STR_LIT>\"]=APs[<NUM_LIT:1>]['<STR_LIT>'] <EOL>s[\"<STR_LIT>\"]=cm.dictAvg(APs[<NUM_LIT:1>:<NUM_LIT:6>],'<STR_LIT>')[<NUM_LIT:0>] <EOL>s[\"<STR_LIT>\"]=APs[-<NUM_LIT:1>]['<STR_LIT>'] <EOL>s[\"<STR_LIT>\"]=cm.dictAvg(APs,'<STR_LIT>')[<NUM_LIT:0>] <EOL>s[\"<STR_LIT>\"]=len(APs)/(T2-T1) <EOL>s[\"<STR_LIT>\"]=cm.dictAvg(APs[-int(len(APs)*<NUM_LIT>):],'<STR_LIT>')[<NUM_LIT:0>] <EOL>s[\"<STR_LIT>\"]=s[\"<STR_LIT>\"]/s[\"<STR_LIT>\"] <EOL>s[\"<STR_LIT>\"]=s[\"<STR_LIT>\"]/s[\"<STR_LIT>\"] <EOL>s[\"<STR_LIT>\"]=s[\"<STR_LIT>\"]/s[\"<STR_LIT>\"] <EOL>s[\"<STR_LIT>\"]=s[\"<STR_LIT>\"]/s[\"<STR_LIT>\"] <EOL>s[\"<STR_LIT>\"]=cm.dictAvg(APs,'<STR_LIT>')[<NUM_LIT:1>]/cm.dictAvg(APs,'<STR_LIT>')[<NUM_LIT:0>] <EOL>s[\"<STR_LIT>\"]=T1<EOL>s[\"<STR_LIT>\"]=T2<EOL><DEDENT>abf.SAP[abf.currentSweep]=s<EOL>", "docstring": "On the current (setSweep()) sweep, calculate things like accomodation.\nOnly call directly just for demonstrating how it works by making a graph.\nOr call this if you want really custom T1 and T2 (multiple per sweep)\n  This is called by default with default T1 and T2.\n  Manually call it again for custom.", "id": "f11404:m1"}
{"signature": "def check_AP_group(abf=exampleABF,sweep=<NUM_LIT:0>):", "body": "abf.setSweep(sweep)<EOL>swhlab.plot.new(abf,title=\"<STR_LIT>\"%(abf.currentSweep,abf.protoSeqY[<NUM_LIT:1>]))<EOL>swhlab.plot.sweep(abf)<EOL>SAP=cm.matrixToDicts(abf.SAP[sweep])<EOL>if \"<STR_LIT>\" in SAP.keys():<EOL><INDENT>T1=SAP[\"<STR_LIT>\"]<EOL>T2=SAP[\"<STR_LIT>\"]<EOL>pylab.axvspan(T1/abf.rate,T2/abf.rate,color='<STR_LIT:r>',alpha=<NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>T1=<NUM_LIT:0><EOL>T2=abf.sweepLength<EOL><DEDENT>swhlab.plot.annotate(abf)<EOL>pylab.tight_layout()<EOL>pylab.subplots_adjust(right=<NUM_LIT>)<EOL>pylab.annotate(cm.msgDict(SAP),(<NUM_LIT>,<NUM_LIT>),ha='<STR_LIT:left>',va='<STR_LIT>',<EOL>weight='<STR_LIT>',family='<STR_LIT>',<EOL>xycoords='<STR_LIT>',size=<NUM_LIT:12>,color='<STR_LIT:g>')<EOL>pylab.axis([T1-<NUM_LIT>,T2+<NUM_LIT>,None,None])<EOL>", "docstring": "after running detect() and abf.SAP is populated, this checks it.", "id": "f11404:m2"}
{"signature": "def analyzeAP(Y,dY,I,rate,verbose=False):", "body": "Ims = int(rate/<NUM_LIT:1000>) <EOL>IsToLook=<NUM_LIT:5>*Ims <EOL>upslope=np.max(dY[I:I+IsToLook]) <EOL>upslopeI=np.where(dY[I:I+IsToLook]==upslope)[<NUM_LIT:0>][<NUM_LIT:0>]+I<EOL>I=upslopeI <EOL>downslope=np.min(dY[I:I+IsToLook]) <EOL>downslopeI=np.where(dY[I:I+IsToLook]==downslope)[<NUM_LIT:0>][<NUM_LIT:0>]+I<EOL>peak=np.max(Y[I:I+IsToLook]) <EOL>peakI=np.where(Y[I:I+IsToLook]==peak)[<NUM_LIT:0>][<NUM_LIT:0>]+I <EOL>thresholdI=I-np.where(dY[I:I+IsToLook:--<NUM_LIT:1>]<<NUM_LIT:10>)[<NUM_LIT:0>] <EOL>if not len(thresholdI):<EOL><INDENT>return False<EOL><DEDENT>thresholdI=thresholdI[<NUM_LIT:0>]<EOL>threshold=Y[thresholdI] <EOL>height=peak-threshold <EOL>halfwidthPoint=np.average((threshold,peak))<EOL>halfwidth=np.where(Y[I-IsToLook:I+IsToLook]>halfwidthPoint)[<NUM_LIT:0>]<EOL>if not len(halfwidth):<EOL><INDENT>return False <EOL><DEDENT>halfwidthI1=halfwidth[<NUM_LIT:0>]+I-IsToLook<EOL>halfwidthI2=halfwidth[-<NUM_LIT:1>]+I-IsToLook<EOL>if Y[halfwidthI1-<NUM_LIT:1>]>halfwidthPoint or Y[halfwidthI2+<NUM_LIT:1>]>halfwidthPoint:<EOL><INDENT>return False <EOL><DEDENT>halfwidth=len(halfwidth)/rate*<NUM_LIT:1000> <EOL>riseTime=(peakI-thresholdI)*<NUM_LIT:1000>/rate <EOL>IsToLook=<NUM_LIT:100>*Ims <EOL>AHPchunk=np.diff(Y[downslopeI:downslopeI+IsToLook]) <EOL>AHPI=np.where(AHPchunk><NUM_LIT:0>)[<NUM_LIT:0>]<EOL>if len(AHPI)==<NUM_LIT:0>:<EOL><INDENT>AHPI=np.nan<EOL><DEDENT>else:<EOL><INDENT>AHPI=AHPI[<NUM_LIT:0>]+downslopeI<EOL>AHPchunk=Y[AHPI:AHPI+IsToLook]<EOL>if max(AHPchunk)>threshold: <EOL><INDENT>AHPchunk=AHPchunk[:np.where(AHPchunk>threshold)[<NUM_LIT:0>][<NUM_LIT:0>]]<EOL><DEDENT>if len(AHPchunk):<EOL><INDENT>AHP=np.nanmin(AHPchunk)<EOL>AHPI=np.where(AHPchunk==AHP)[<NUM_LIT:0>][<NUM_LIT:0>]+AHPI<EOL>AHPheight=threshold-AHP <EOL>IsToLook=<NUM_LIT>*Ims <EOL>AHPreturn=np.average((AHP,threshold)) <EOL>AHPreturnI=np.where(Y[AHPI:AHPI+IsToLook]>AHPreturn)[<NUM_LIT:0>]<EOL>if len(AHPreturnI): <EOL><INDENT>AHPreturnI=AHPreturnI[<NUM_LIT:0>]+AHPI<EOL>AHPrisetime=(AHPreturnI-AHPI)*<NUM_LIT:2>/rate*<NUM_LIT:1000> <EOL>AHPupslope=AHPheight/AHPrisetime <EOL>AHPreturnFullI=(AHPreturnI-AHPI)*<NUM_LIT:2>+AHPI<EOL><DEDENT>else: <EOL><INDENT>AHPreturnI,AHPrisetime,AHPupslope=np.nan,np.nan,np.nan<EOL>downslope=np.nan<EOL><DEDENT><DEDENT><DEDENT>sweepI,sweepT=I,I/rate <EOL>del IsToLook,I, Y, dY, Ims, AHPchunk, verbose <EOL>return locals()<EOL>", "docstring": "given a sweep and a time point, return the AP array for that AP.\nAPs will be centered in time by their maximum upslope.", "id": "f11404:m3"}
{"signature": "def getAvgBySweep(abf,feature,T0=None,T1=None):", "body": "if T1 is None:<EOL><INDENT>T1=abf.sweepLength<EOL><DEDENT>if T0 is None:<EOL><INDENT>T0=<NUM_LIT:0><EOL><DEDENT>data = [np.empty((<NUM_LIT:0>))]*abf.sweeps<EOL>for AP in cm.dictFlat(cm.matrixToDicts(abf.APs)):<EOL><INDENT>if T0<AP['<STR_LIT>']<T1:<EOL><INDENT>val=AP[feature]<EOL>data[int(AP['<STR_LIT>'])]=np.concatenate((data[int(AP['<STR_LIT>'])],[val]))<EOL><DEDENT><DEDENT>for sweep in range(abf.sweeps):<EOL><INDENT>if len(data[sweep])><NUM_LIT:1> and np.any(data[sweep]):<EOL><INDENT>data[sweep]=np.nanmean(data[sweep])<EOL><DEDENT>elif len(data[sweep])==<NUM_LIT:1>:<EOL><INDENT>data[sweep]=data[sweep][<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>data[sweep]=np.nan<EOL><DEDENT><DEDENT>return data<EOL>", "docstring": "return average of a feature divided by sweep.", "id": "f11404:m12"}
{"signature": "def stats_first(abf):", "body": "msg=\"<STR_LIT>\"<EOL>for sweep in range(abf.sweeps):<EOL><INDENT>for AP in abf.APs[sweep]:<EOL><INDENT>for key in sorted(AP.keys()):<EOL><INDENT>if key[-<NUM_LIT:1>] is \"<STR_LIT:I>\" or key[-<NUM_LIT:2>:] in [\"<STR_LIT>\",\"<STR_LIT>\"]:<EOL><INDENT>continue<EOL><DEDENT>msg+=\"<STR_LIT>\"%(key,AP[key])<EOL><DEDENT>return msg<EOL><DEDENT><DEDENT>", "docstring": "provide all stats on the first AP.", "id": "f11404:m9"}
{"signature": "def gain(abf):", "body": "Ys=np.nan_to_num(swhlab.ap.getAvgBySweep(abf,'<STR_LIT>'))<EOL>Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[<NUM_LIT:1>]+<NUM_LIT>)])<EOL>swhlab.plot.new(abf,title=\"<STR_LIT>\",xlabel=\"<STR_LIT>\",<EOL>ylabel=\"<STR_LIT>\")<EOL>pylab.plot(Xs,Ys,'<STR_LIT>',ms=<NUM_LIT:20>,alpha=<NUM_LIT>,color='<STR_LIT:b>')<EOL>pylab.axhline(<NUM_LIT:0>,alpha=<NUM_LIT>,lw=<NUM_LIT:2>,color='<STR_LIT:r>',ls=\"<STR_LIT>\")<EOL>pylab.margins(<NUM_LIT>,<NUM_LIT>)<EOL>", "docstring": "easy way to plot a gain function.", "id": "f11405:m2"}
{"signature": "def sweep(ABF,sweep=None,rainbow=True,alpha=None,protocol=False,color='<STR_LIT:b>',<EOL>continuous=False,offsetX=<NUM_LIT:0>,offsetY=<NUM_LIT:0>,minutes=False,<EOL>decimate=None,newFigure=False):", "body": "if len(pylab.get_fignums())==<NUM_LIT:0> or newFigure:<EOL><INDENT>new(ABF,True)<EOL><DEDENT>if offsetY><NUM_LIT:0>:<EOL><INDENT>pylab.grid(None)<EOL><DEDENT>if sweep is None:<EOL><INDENT>sweeps=[ABF.currentSweep]<EOL>if not ABF.currentSweep:<EOL><INDENT>sweeps=[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>elif sweep==\"<STR_LIT:all>\":<EOL><INDENT>sweeps=range(<NUM_LIT:0>,ABF.sweeps)<EOL><DEDENT>elif type(sweep) in [int,float]:<EOL><INDENT>sweeps=[int(sweep)]<EOL><DEDENT>elif type(sweep) is list:<EOL><INDENT>sweeps=sweep<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\",type(sweep),sweep)<EOL><DEDENT>if continuous:<EOL><INDENT>offsetX=ABF.sweepInterval<EOL><DEDENT>colors=[color]*len(sweeps) <EOL>if rainbow and len(sweeps)><NUM_LIT:1>:<EOL><INDENT>for i in range(len(sweeps)):<EOL><INDENT>colors[i]=ABF.colormap[i]<EOL><DEDENT><DEDENT>if alpha is None and len(sweeps)==<NUM_LIT:1>:<EOL><INDENT>alpha=<NUM_LIT:1><EOL><DEDENT>if rainbow and alpha is None:<EOL><INDENT>alpha=<NUM_LIT><EOL><DEDENT>if alpha is None:<EOL><INDENT>alpha=<NUM_LIT:1><EOL><DEDENT>if minutes == False:<EOL><INDENT>minutes=<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>minutes=<NUM_LIT><EOL>pylab.xlabel(\"<STR_LIT>\")<EOL><DEDENT>ABF.decimateMethod=decimate<EOL>for i in range(len(sweeps)):<EOL><INDENT>ABF.setSweep(sweeps[i])<EOL>if protocol:<EOL><INDENT>pylab.plot((np.array(ABF.protoX)/ABF.rate+offsetX*i)/minutes,<EOL>ABF.protoY+offsetY*i,<EOL>alpha=alpha,color=colors[i])<EOL><DEDENT>else:<EOL><INDENT>pylab.plot((ABF.dataX+offsetX*i)/minutes,<EOL>ABF.dataY+offsetY*i,alpha=alpha,color=colors[i])<EOL><DEDENT><DEDENT>ABF.decimateMethod=None<EOL>pylab.margins(<NUM_LIT:0>,<NUM_LIT>)<EOL>", "docstring": "Load a particular sweep then plot it.\nIf sweep is None or False, just plot current dataX/dataY.\nIf rainbow, it'll make it color coded prettily.", "id": "f11405:m6"}
{"signature": "def show(abf):", "body": "save(abf)<EOL>", "docstring": "showing is the same as saving without a filename.", "id": "f11405:m9"}
{"signature": "def dual(ABF):", "body": "new(ABF)<EOL>pylab.subplot(<NUM_LIT>)<EOL>pylab.title(\"<STR_LIT>\")<EOL>ABF.channel=<NUM_LIT:0><EOL>sweep(ABF)<EOL>pylab.subplot(<NUM_LIT>)<EOL>pylab.title(\"<STR_LIT>\")<EOL>ABF.channel=<NUM_LIT:1><EOL>sweep(ABF)<EOL>", "docstring": "Plot two channels of current sweep (top/bottom).", "id": "f11405:m5"}
{"signature": "def inspectABF(abf=exampleABF,saveToo=False,justPlot=False):", "body": "pylab.close('<STR_LIT:all>')<EOL>print(\"<STR_LIT>\")<EOL>if type(abf) is str:<EOL><INDENT>abf=swhlab.ABF(abf)<EOL><DEDENT>swhlab.plot.new(abf,forceNewFigure=True)<EOL>if abf.sweepInterval*abf.sweeps<<NUM_LIT>*<NUM_LIT:5>: <EOL><INDENT>pylab.subplot(<NUM_LIT>)<EOL>pylab.title(\"<STR_LIT>\"%(abf.ID,abf.protoComment))<EOL>swhlab.plot.sweep(abf,'<STR_LIT:all>')<EOL>pylab.subplot(<NUM_LIT>)<EOL>swhlab.plot.sweep(abf,'<STR_LIT:all>',continuous=True)<EOL>swhlab.plot.comments(abf)<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>swhlab.plot.sweep(abf,'<STR_LIT:all>',continuous=True,minutes=True)<EOL>swhlab.plot.comments(abf,minutes=True)<EOL>pylab.title(\"<STR_LIT>\"%(abf.ID,abf.protoComment))<EOL><DEDENT>swhlab.plot.annotate(abf)<EOL>if justPlot:<EOL><INDENT>return<EOL><DEDENT>if saveToo:<EOL><INDENT>path=os.path.split(abf.fname)[<NUM_LIT:0>]<EOL>basename=os.path.basename(abf.fname)<EOL>pylab.savefig(os.path.join(path,\"<STR_LIT:_>\"+basename.replace(\"<STR_LIT>\",\"<STR_LIT>\")))<EOL><DEDENT>pylab.show()<EOL>return<EOL>", "docstring": "May be given an ABF object or filename.", "id": "f11406:m44"}
{"signature": "def ftp_login(folder=None):", "body": "pwDir=os.path.realpath(__file__)<EOL>for i in range(<NUM_LIT:3>):<EOL><INDENT>pwDir=os.path.dirname(pwDir)<EOL><DEDENT>pwFile = os.path.join(pwDir,\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\"%pwFile)<EOL>try:<EOL><INDENT>with open(pwFile) as f:<EOL><INDENT>lines=f.readlines()<EOL><DEDENT>username=lines[<NUM_LIT:0>].strip()<EOL>password=lines[<NUM_LIT:1>].strip()<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>except:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>username=TK_askPassword(\"<STR_LIT>\",\"<STR_LIT>\")<EOL>password=TK_askPassword(\"<STR_LIT>\",\"<STR_LIT>\"%username)<EOL>if not username or not password:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return<EOL><DEDENT><DEDENT>print(\"<STR_LIT>\",username)<EOL>print(\"<STR_LIT>\",\"<STR_LIT:*>\"*(len(password)))<EOL>print(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>ftp = ftplib.FTP(\"<STR_LIT>\")<EOL>ftp.login(username, password)<EOL>if folder:<EOL><INDENT>ftp.cwd(folder)<EOL><DEDENT>return ftp<EOL><DEDENT>except:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>", "docstring": "return an \"FTP\" object after logging in.", "id": "f11406:m47"}
{"signature": "def XMLfromPython(xmlObj,saveAs=False):", "body": "return<EOL>", "docstring": "given a an XML object, return XML string.\noptionally, save it to disk.", "id": "f11406:m16"}
{"signature": "def pickle_load(fname):", "body": "thing = pickle.load(open(fname,\"<STR_LIT:rb>\"))<EOL>return thing<EOL>", "docstring": "return the contents of a pickle file", "id": "f11406:m25"}
{"signature": "def dictAvg(listOfDicts,key,stdErr=False):", "body": "vals=dictVals(listOfDicts,key)<EOL>if len(vals) and np.any(vals):<EOL><INDENT>av=np.nanmean(vals)<EOL>er=np.nanstd(vals)<EOL>if stdErr:<EOL><INDENT>er=er/np.sqrt(np.count_nonzero(~np.isnan(er)))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>av,er=np.nan,np.nan<EOL><DEDENT>return av,er<EOL>", "docstring": "Given a list (l) of dicts (d), return AV and SD.", "id": "f11406:m5"}
{"signature": "def getABFgroups(files):", "body": "children=[]<EOL>groups={}<EOL>for fname in sorted(files):<EOL><INDENT>if fname.endswith(\"<STR_LIT>\"):<EOL><INDENT>if fname.replace(\"<STR_LIT>\",\"<STR_LIT>\") in files: <EOL><INDENT>if len(children):<EOL><INDENT>groups[children[<NUM_LIT:0>]]=children<EOL><DEDENT>children=[os.path.basename(fname)[:-<NUM_LIT:4>]]<EOL><DEDENT>else:<EOL><INDENT>children.append(os.path.basename(fname)[:-<NUM_LIT:4>])<EOL><DEDENT><DEDENT><DEDENT>groups[children[<NUM_LIT:0>]]=children<EOL>return groups<EOL>", "docstring": "given a list of ALL files (not just ABFs), return a dict[ID]=[ID,ID,ID].\nParents are determined if a .abf matches a .TIF.\nThis is made to assign children files to parent ABF IDs.", "id": "f11406:m41"}
{"signature": "def getPkl(fname): ", "body": "thing = pickle.load(open(fname,\"<STR_LIT:rb>\"))<EOL>return thing<EOL>", "docstring": "return the contents of a pickle file", "id": "f11406:m27"}
{"signature": "def forwardSlash(listOfFiles):", "body": "for i,fname in enumerate(listOfFiles):<EOL><INDENT>listOfFiles[i]=fname.replace(\"<STR_LIT:\\\\>\",\"<STR_LIT:/>\")<EOL><DEDENT>return listOfFiles<EOL>", "docstring": "convert silly C:\\\\names\\\\like\\\\this.txt to c:/names/like/this.txt", "id": "f11406:m36"}
{"signature": "def dictVals(l,key):", "body": "dicts=dictFlat(l)<EOL>vals=np.empty(len(dicts))*np.nan<EOL>for i in range(len(dicts)):<EOL><INDENT>if key in dicts[i]:<EOL><INDENT>vals[i]=dicts[i][key]<EOL><DEDENT><DEDENT>return vals<EOL>", "docstring": "Return all 'key' from a list of dicts. (or list of list of dicts)", "id": "f11406:m4"}
{"signature": "def matrixToHTML(data,names=None,units=None,bookName=None,sheetName=None,xCol=None):", "body": "if not names:<EOL><INDENT>names=[\"<STR_LIT>\"]*len(data[<NUM_LIT:0>])<EOL>if data.dtype.names:<EOL><INDENT>names=list(data.dtype.names)<EOL><DEDENT><DEDENT>if not units:<EOL><INDENT>units=[\"<STR_LIT>\"]*len(data[<NUM_LIT:0>])<EOL>for i in range(len(units)):<EOL><INDENT>if names[i] in UNITS.keys():<EOL><INDENT>units[i]=UNITS[names[i]]<EOL><DEDENT><DEDENT><DEDENT>if '<STR_LIT>' in str(type(data)): <EOL><INDENT>data=data.view(float).reshape(data.shape + (-<NUM_LIT:1>,))<EOL><DEDENT>if xCol and xCol in names:<EOL><INDENT>xCol=names.index(xCol)<EOL>names.insert(<NUM_LIT:0>,names[xCol])<EOL>units.insert(<NUM_LIT:0>,units[xCol])<EOL>data=np.insert(data,<NUM_LIT:0>,data[:,xCol],<NUM_LIT:1>)<EOL><DEDENT>htmlFname = tempfile.gettempdir()+\"<STR_LIT>\"%(bookName,sheetName)<EOL>html=\"\"\"<STR_LIT>\"\"\"<EOL>html+=\"<STR_LIT>\"<EOL>if bookName or sheetName:<EOL><INDENT>html+='<STR_LIT>'%(bookName,sheetName)<EOL><DEDENT>html+=\"<STR_LIT>\"<EOL>colNames=['<STR_LIT>']<EOL>for i in range(len(units)):<EOL><INDENT>label=\"<STR_LIT>\"%(chr(i+ord('<STR_LIT:A>')),i)<EOL>colNames.append(label)<EOL><DEDENT>html+=htmlListToTR(colNames,'<STR_LIT>','<STR_LIT>')<EOL>html+=htmlListToTR(['<STR_LIT>']+list(names),'<STR_LIT:name>',td1Class='<STR_LIT>')<EOL>html+=htmlListToTR(['<STR_LIT>']+list(units),'<STR_LIT>',td1Class='<STR_LIT>')<EOL>cutOff=False<EOL>for y in range(len(data)):<EOL><INDENT>html+=htmlListToTR([y+<NUM_LIT:1>]+list(data[y]),trClass='<STR_LIT>'%(y%<NUM_LIT:2>),td1Class='<STR_LIT>')<EOL>if y>=<NUM_LIT:200>:<EOL><INDENT>cutOff=True<EOL>break<EOL><DEDENT><DEDENT>html+=\"<STR_LIT>\"<EOL>html=html.replace(\"<STR_LIT>\",\"<STR_LIT>\")<EOL>html=html.replace(\"<STR_LIT>\",\"<STR_LIT>\")<EOL>if cutOff:<EOL><INDENT>html+=\"<STR_LIT>\"%(y,len(data))<EOL><DEDENT>html+=\"<STR_LIT>\"<EOL>with open(htmlFname,'<STR_LIT:w>') as f:<EOL><INDENT>f.write(html)<EOL><DEDENT>webbrowser.open(htmlFname)<EOL>return<EOL>", "docstring": "Put 2d numpy data into a temporary HTML file.", "id": "f11406:m14"}
{"signature": "def dummyListOfDicts(size=<NUM_LIT:100>):", "body": "titles=\"<STR_LIT>\".split(\"<STR_LIT:U+002C>\")<EOL>ld=[] <EOL>for i in range(size):<EOL><INDENT>d={}<EOL>for t in titles:<EOL><INDENT>if int(np.random.random(<NUM_LIT:1>)*<NUM_LIT:100>)><NUM_LIT:5>: <EOL><INDENT>d[t]=float(np.random.random(<NUM_LIT:1>)*<NUM_LIT:100>) <EOL><DEDENT>if t==\"<STR_LIT>\" and \"<STR_LIT>\" in d.keys():<EOL><INDENT>d[t]=int(d[t])<EOL><DEDENT><DEDENT>ld.append(d)<EOL><DEDENT>return ld<EOL>", "docstring": "returns a list (of the given size) of dicts with fake data.\nsome dictionary keys are missing for some of the items.", "id": "f11406:m6"}
{"signature": "def pickle_save(thing,fname):", "body": "pickle.dump(thing, open(fname,\"<STR_LIT:wb>\"),pickle.HIGHEST_PROTOCOL)<EOL>return thing<EOL>", "docstring": "save something to a pickle file", "id": "f11406:m26"}
{"signature": "def getIDsFromFiles(files):", "body": "if type(files) is str:<EOL><INDENT>files=glob.glob(files+\"<STR_LIT>\")<EOL><DEDENT>IDs=[]<EOL>for fname in files:<EOL><INDENT>if fname[-<NUM_LIT:4>:].lower()=='<STR_LIT>':<EOL><INDENT>ext=fname.split('<STR_LIT:.>')[-<NUM_LIT:1>]<EOL>IDs.append(os.path.basename(fname).replace('<STR_LIT:.>'+ext,'<STR_LIT>'))<EOL><DEDENT><DEDENT>return sorted(IDs)<EOL>", "docstring": "given a path or list of files, return ABF IDs.", "id": "f11406:m43"}
{"signature": "def checkOut(thing,html=True):", "body": "msg=\"<STR_LIT>\"<EOL>for name in sorted(dir(thing)):<EOL><INDENT>if not \"<STR_LIT>\" in name:<EOL><INDENT>msg+=\"<STR_LIT>\"%name<EOL>try:<EOL><INDENT>msg+=\"<STR_LIT>\"%getattr(thing,name)()<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>if html:<EOL><INDENT>html='<STR_LIT>'+msg+'<STR_LIT>'<EOL>html=html.replace(\"<STR_LIT:U+0020>\",\"<STR_LIT>\").replace(\"<STR_LIT:\\n>\",\"<STR_LIT>\")<EOL>fname = tempfile.gettempdir()+\"<STR_LIT>\"<EOL>with open(fname,'<STR_LIT:w>') as f:<EOL><INDENT>f.write(html)<EOL><DEDENT>webbrowser.open(fname)<EOL><DEDENT>print(msg.replace('<STR_LIT>','<STR_LIT>').replace('<STR_LIT>','<STR_LIT>'))<EOL>", "docstring": "show everything we can about an object's projects and methods.", "id": "f11406:m12"}
{"signature": "def msgDict(d,matching=None,sep1=\"<STR_LIT:=>\",sep2=\"<STR_LIT:\\n>\",sort=True,cantEndWith=None):", "body": "msg=\"<STR_LIT>\"<EOL>if \"<STR_LIT>\" in str(type(d)):<EOL><INDENT>keys=d.dtype.names<EOL><DEDENT>else:<EOL><INDENT>keys=d.keys()<EOL><DEDENT>if sort:<EOL><INDENT>keys=sorted(keys)<EOL><DEDENT>for key in keys:<EOL><INDENT>if key[<NUM_LIT:0>]==\"<STR_LIT:_>\":<EOL><INDENT>continue<EOL><DEDENT>if matching:<EOL><INDENT>if not key in matching:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>if cantEndWith and key[-len(cantEndWith)]==cantEndWith:<EOL><INDENT>continue<EOL><DEDENT>if '<STR_LIT:float>' in str(type(d[key])):<EOL><INDENT>s=\"<STR_LIT>\"%d[key]<EOL><DEDENT>else:<EOL><INDENT>s=str(d[key])<EOL><DEDENT>if \"<STR_LIT:object>\" in s:<EOL><INDENT>s='<STR_LIT>'<EOL><DEDENT>msg+=key+sep1+s+sep2<EOL><DEDENT>return msg.strip()<EOL>", "docstring": "convert a dictionary to a pretty formatted string.", "id": "f11406:m28"}
{"signature": "def matrixToDicts(data):", "body": "<EOL>if \"<STR_LIT:float>\" in str(type(data[<NUM_LIT:0>])):<EOL><INDENT>d={}<EOL>for x in range(len(data)):<EOL><INDENT>d[data.dtype.names[x]]=data[x]<EOL><DEDENT>return d<EOL><DEDENT>l=[]<EOL>for y in range(len(data)):<EOL><INDENT>d={}<EOL>for x in range(len(data[y])):<EOL><INDENT>d[data.dtype.names[x]]=data[y][x]<EOL><DEDENT>l.append(d)<EOL><DEDENT>return l<EOL>", "docstring": "given a recarray, return it as a list of dicts.", "id": "f11406:m8"}
{"signature": "def TK_ask(title,msg):", "body": "root = tkinter.Tk()<EOL>root.attributes(\"<STR_LIT>\", True) <EOL>root.withdraw() <EOL>result=tkinter.messagebox.askyesno(title,msg)<EOL>root.destroy()<EOL>return result<EOL>", "docstring": "use the GUI to ask YES or NO.", "id": "f11406:m53"}
{"signature": "def originFormat(thing):", "body": "if type(thing) is list and type(thing[<NUM_LIT:0>]) is dict:<EOL><INDENT>return originFormat_listOfDicts(thing)<EOL><DEDENT>if type(thing) is list and type(thing[<NUM_LIT:0>]) is list:<EOL><INDENT>return originFormat_listOfDicts(dictFlat(thing))<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(thing)<EOL><DEDENT>", "docstring": "Try to format anything as a 2D matrix with column names.", "id": "f11406:m24"}
{"signature": "def TK_message(title,msg):", "body": "root = tkinter.Tk()<EOL>root.withdraw() <EOL>root.attributes(\"<STR_LIT>\", True) <EOL>root.lift() <EOL>tkinter.messagebox.showwarning(title, msg)<EOL>root.destroy()<EOL>", "docstring": "use the GUI to pop up a message.", "id": "f11406:m52"}
{"signature": "def XMLtoPython(xmlStr=r\"<STR_LIT>\"):", "body": "<EOL>if os.path.exists(xmlStr):<EOL><INDENT>with open(xmlStr) as f:<EOL><INDENT>xmlStr=f.read()<EOL><DEDENT><DEDENT>print(xmlStr)<EOL>print(\"<STR_LIT>\")<EOL>return<EOL>", "docstring": "given a string or a path to an XML file, return an XML object.", "id": "f11406:m15"}
{"signature": "def getUnit(name):", "body": "for key in UNITS:<EOL><INDENT>if name in key:<EOL><INDENT>return UNITS[key]<EOL><DEDENT><DEDENT>return \"<STR_LIT>\"<EOL>", "docstring": "given a column name, return the best guess unit.", "id": "f11406:m0"}
{"signature": "def image_convert(fname,saveAs=True,showToo=False):", "body": "<EOL>im=scipy.ndimage.imread(fname) <EOL>im=np.array(im,dtype=float) <EOL>cutoffLow=np.percentile(im,<NUM_LIT>)<EOL>cutoffHigh=np.percentile(im,<NUM_LIT>)<EOL>im[np.where(im<cutoffLow)]=cutoffLow<EOL>im[np.where(im>cutoffHigh)]=cutoffHigh<EOL>im-=np.min(im) <EOL>im/=np.max(im) <EOL>im*=<NUM_LIT:255> <EOL>im = Image.fromarray(im)<EOL>msg=\"<STR_LIT>\"%os.path.basename(fname)<EOL>timestamp = datetime.datetime.fromtimestamp(os.path.getctime(fname))<EOL>msg+=\"<STR_LIT>\"%timestamp.strftime('<STR_LIT>')<EOL>d = ImageDraw.Draw(im)<EOL>fnt = ImageFont.truetype(\"<STR_LIT>\", <NUM_LIT:20>)<EOL>d.text((<NUM_LIT:6>,<NUM_LIT:6>),msg,font=fnt,fill=<NUM_LIT:0>)<EOL>d.text((<NUM_LIT:4>,<NUM_LIT:4>),msg,font=fnt,fill=<NUM_LIT:255>)<EOL>if showToo:<EOL><INDENT>im.show()<EOL><DEDENT>if saveAs is False:<EOL><INDENT>return<EOL><DEDENT>if saveAs is True:<EOL><INDENT>saveAs=fname+\"<STR_LIT>\"<EOL><DEDENT>im.convert('<STR_LIT>').save(saveAs)<EOL>", "docstring": "Convert weird TIF files into web-friendly versions.\nAuto contrast is applied (saturating lower and upper 0.1%).\n    make saveAs True to save as .TIF.png\n    make saveAs False and it won't save at all\n    make saveAs \"someFile.jpg\" to save it as a different path/format", "id": "f11406:m54"}
{"signature": "def html_temp_launch(html):", "body": "fname = tempfile.gettempdir()+\"<STR_LIT>\"<EOL>with open(fname,'<STR_LIT:w>') as f:<EOL><INDENT>f.write(html)<EOL><DEDENT>webbrowser.open(fname)<EOL>", "docstring": "given text, make it a temporary HTML file and launch it.", "id": "f11406:m11"}
{"signature": "def abfinfo(self,printToo=False,returnDict=False):", "body": "info=\"<STR_LIT>\"<EOL>d={}<EOL>for thingName in sorted(dir(self)):<EOL><INDENT>if thingName in ['<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>']:<EOL><INDENT>continue<EOL><DEDENT>if \"<STR_LIT:_>\" in thingName:<EOL><INDENT>continue<EOL><DEDENT>thing=getattr(self,thingName)<EOL>if type(thing) is list and len(thing)><NUM_LIT:5>:<EOL><INDENT>continue<EOL><DEDENT>thingType=str(type(thing)).split(\"<STR_LIT:'>\")[<NUM_LIT:1>]<EOL>if \"<STR_LIT>\" in thingType or \"<STR_LIT>\" in thingType:<EOL><INDENT>continue<EOL><DEDENT>if thingName in [\"<STR_LIT>\",\"<STR_LIT>\"]:<EOL><INDENT>continue<EOL><DEDENT>info+=\"<STR_LIT>\"%(thingName,thingType,thing)<EOL>d[thingName]=thing<EOL><DEDENT>if printToo:<EOL><INDENT>print()<EOL>for line in info.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>if len(line)<<NUM_LIT:3>:<EOL><INDENT>continue<EOL><DEDENT>print(\"<STR_LIT:U+0020>\",line)<EOL><DEDENT>print()<EOL><DEDENT>if returnDict:<EOL><INDENT>return d<EOL><DEDENT>return info<EOL>", "docstring": "show basic info about ABF class variables.", "id": "f11408:c0:m1"}
{"signature": "def headerHTML(self,fname=None):", "body": "if fname is None:<EOL><INDENT>fname = self.fname.replace(\"<STR_LIT>\",\"<STR_LIT>\")<EOL><DEDENT>html=\"<STR_LIT>\"<EOL>html+=\"<STR_LIT>\"%self.ID<EOL>html+=self.abfinfo().replace(\"<STR_LIT:<>\",\"<STR_LIT>\").replace(\"<STR_LIT:>>\",\"<STR_LIT>\").replace(\"<STR_LIT:\\n>\",\"<STR_LIT>\")<EOL>html+=\"<STR_LIT>\"%self.ID<EOL>html+=pprint.pformat(self.header, indent=<NUM_LIT:1>)<EOL>html=html.replace(\"<STR_LIT:\\n>\",'<STR_LIT>').replace(\"<STR_LIT:U+0020>\",\"<STR_LIT>\")<EOL>html=html.replace(r\"<STR_LIT:\\x00>\",\"<STR_LIT>\")<EOL>html+=\"<STR_LIT>\"<EOL>print(\"<STR_LIT>\")<EOL>print(fname)<EOL>f=open(fname,'<STR_LIT:w>')<EOL>f.write(html)<EOL>f.close()<EOL>", "docstring": "read the ABF header and save it HTML formatted.", "id": "f11408:c0:m2"}
{"signature": "def generate_colormap(self,colormap=None,reverse=False):", "body": "if colormap is None:<EOL><INDENT>colormap = pylab.cm.Dark2<EOL><DEDENT>self.cm=colormap<EOL>self.colormap=[]<EOL>for i in range(self.sweeps): <EOL><INDENT>self.colormap.append(colormap(i/self.sweeps))<EOL><DEDENT>if reverse:<EOL><INDENT>self.colormap.reverse()<EOL><DEDENT>", "docstring": "use 1 colormap for the whole abf. You can change it!.", "id": "f11408:c0:m3"}
{"signature": "def clampValues(self,timePoint=<NUM_LIT:0>):", "body": "Cs=np.zeros(self.sweeps)<EOL>for i in range(self.sweeps):<EOL><INDENT>self.setSweep(i) <EOL>for j in range(len(self.protoSeqX)):<EOL><INDENT>if self.protoSeqX[j]<=timePoint*self.rate:<EOL><INDENT>Cs[i]=self.protoSeqY[j]<EOL><DEDENT><DEDENT><DEDENT>return Cs<EOL>", "docstring": "return an array of command values at a time point (in sec).\nUseful for things like generating I/V curves.", "id": "f11408:c0:m9"}
{"signature": "def saveThing(self,thing,fname,overwrite=True,ext=\"<STR_LIT>\"):", "body": "if not os.path.exists(os.path.dirname(self.outpre)):<EOL><INDENT>os.mkdir(os.path.dirname(self.outpre))<EOL><DEDENT>if ext and not ext in fname:<EOL><INDENT>fname+=ext<EOL><DEDENT>fname=self.outpre+fname<EOL>if overwrite is False:<EOL><INDENT>if os.path.exists(fname):<EOL><INDENT>print(\"<STR_LIT>\"%os.path.basename(fname))<EOL>return<EOL><DEDENT><DEDENT>time1=cm.timethis()<EOL>pickle.dump(thing, open(fname,\"<STR_LIT:wb>\"),pickle.HIGHEST_PROTOCOL)<EOL>print(\"<STR_LIT>\"%(os.path.basename(fname),str(type(thing)),<EOL>sys.getsizeof(pickle.dumps(thing, -<NUM_LIT:1>))/<NUM_LIT>,<EOL>cm.timethis(time1)))<EOL>", "docstring": "save any object as /swhlab4/ID_[fname].pkl", "id": "f11408:c0:m14"}
{"signature": "def __init__(self,ABFfname=None,debugLevel=<NUM_LIT:0>,saveInfo=False):", "body": "self.valid=False<EOL>if ABFfname is None:<EOL><INDENT>return <EOL><DEDENT>if not type(ABFfname) is str:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if not ABFfname.lower().endswith(\"<STR_LIT>\"):<EOL><INDENT>raise Exception('<STR_LIT>'%ABFfname)<EOL><DEDENT>ABFfname=os.path.abspath(ABFfname)<EOL>if not os.path.exists(ABFfname):<EOL><INDENT>raise Exception('<STR_LIT>'%ABFfname)<EOL><DEDENT>if saveInfo:<EOL><INDENT>print(\"<STR_LIT>\",os.path.basename(ABFfname))<EOL><DEDENT>self.fname = os.path.abspath(ABFfname)<EOL>self.ID = os.path.basename(self.fname).replace(\"<STR_LIT>\",\"<STR_LIT>\")<EOL>self.outpath = os.path.join(os.path.split(self.fname)[<NUM_LIT:0>],\"<STR_LIT>\")<EOL>self.outpre = os.path.abspath(self.outpath+\"<STR_LIT:/>\"+self.ID+\"<STR_LIT:_>\") <EOL>self.reader = neo.io.AxonIO(ABFfname)<EOL>self.valid=False<EOL>try:<EOL><INDENT>self.header = self.reader.read_header()<EOL>self.block = self.reader.read_block(lazy=False, cascade=True) <EOL><DEDENT>except:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>self.valid=True<EOL>startDay=time.strptime(str(self.header[\"<STR_LIT>\"]), '<STR_LIT>')<EOL>self.timestamp=time.mktime(startDay)+self.header[\"<STR_LIT>\"]/<NUM_LIT> <EOL>self.units = self.header['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>'].decode('<STR_LIT:utf8>') <EOL>self.unitsCommand = self.header['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>'].decode('<STR_LIT:utf8>') <EOL>self.holding = self.header['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>'] <EOL>self.rate = int(<NUM_LIT>/self.header['<STR_LIT>']['<STR_LIT>']) <EOL>self.timebase = self.header['<STR_LIT>']['<STR_LIT>'] <EOL>self.nADC = self.header['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] <EOL>self.sweeps = self.header['<STR_LIT>']<EOL>self.gapFree=(self.sweeps==<NUM_LIT:0>)<EOL>if self.gapFree:<EOL><INDENT>self.sweeps=<NUM_LIT:1><EOL><DEDENT>self.sweepSize = self.header['<STR_LIT>']['<STR_LIT>']/self.nADC<EOL>self.sweepLength = self.sweepSize/self.rate <EOL>self.sweepInterval = self.header['<STR_LIT>']['<STR_LIT>'] <EOL>if self.sweepInterval==<NUM_LIT:0>:<EOL><INDENT>self.sweepInterval=self.sweepLength<EOL><DEDENT>self.commentTags = self.block.segments[<NUM_LIT:0>].eventarrays[<NUM_LIT:0>].annotations['<STR_LIT>']<EOL>self.commentTags = [x.decode('<STR_LIT:utf8>') for x in self.commentTags]<EOL>self.commentTimes = np.array(self.block.segments[<NUM_LIT:0>].eventarrays[<NUM_LIT:0>].times)/self.nADC<EOL>self.commentTimes = self.commentTimes/<NUM_LIT:4> <EOL>self.commentSweeps = np.array(self.commentTimes/self.sweepInterval,dtype=int)<EOL>self.baseline=[None,None] <EOL>self.decimateMethod=None <EOL>self.decimateBy=<NUM_LIT:100> <EOL>self.offsetX = int(self.sweepSize/<NUM_LIT:64>) <EOL>self.offsetY = <NUM_LIT:0> <EOL>self.dataY = None <EOL>self.dataX = None <EOL>self.dataStart = None <EOL>self.currentSweep = None <EOL>self.channel = <NUM_LIT:0> <EOL>self.generate_colormap()<EOL>self.generate_protocol()<EOL>self.protoComment=cm.determineProtocol(self.fname)<EOL>if saveInfo:<EOL><INDENT>self.saveThing(self.abfinfo(returnDict=True),'<STR_LIT:info>',overwrite=False)<EOL><DEDENT>", "docstring": "SWHLab4 ABF class.\n        Basic usage:\n            1.) call this with an ABF filename\n            2.) select a sweet with setSweep()\n            3.) reference data by ABF.dataX and ABF.dataY", "id": "f11408:c0:m0"}
{"signature": "def filter_gaussian(self,sigmaMs=<NUM_LIT:100>,applyFiltered=False,applyBaseline=False):", "body": "if sigmaMs==<NUM_LIT:0>:<EOL><INDENT>return self.dataY<EOL><DEDENT>filtered=cm.filter_gaussian(self.dataY,sigmaMs)<EOL>if applyBaseline:<EOL><INDENT>self.dataY=self.dataY-filtered<EOL><DEDENT>elif applyFiltered:<EOL><INDENT>self.dataY=filtered<EOL><DEDENT>else:<EOL><INDENT>return filtered<EOL><DEDENT>", "docstring": "RETURNS filtered trace. Desn't filter it in place.", "id": "f11408:c0:m13"}
{"signature": "def htmlABF(ID,group,d,folder,overwrite=False):", "body": "fname=folder+\"<STR_LIT>\"%ID<EOL>if overwrite is False and os.path.exists(fname):<EOL><INDENT>return<EOL><DEDENT>html=TEMPLATES['<STR_LIT>']<EOL>html=html.replace(\"<STR_LIT>\",ID)<EOL>html=html.replace(\"<STR_LIT>\",htmlABFcontent(ID,group,d))<EOL>print(\"<STR_LIT>\"%os.path.basename(fname))<EOL>with open(fname,'<STR_LIT:w>') as f:<EOL><INDENT>f.write(html)<EOL><DEDENT>return<EOL>", "docstring": "given an ID and the dict of files, generate a static html for that abf.", "id": "f11412:m2"}
{"signature": "def htmlABFcontent(ID,group,d):", "body": "html=\"<STR_LIT>\"<EOL>files=[]<EOL>for abfID in group:<EOL><INDENT>files.extend(d[abfID])<EOL><DEDENT>files=sorted(files)<EOL>html+=\"<STR_LIT>\"<EOL>for fname in files:<EOL><INDENT>if \"<STR_LIT>\" in fname.lower() and not \"<STR_LIT>\" in fname:<EOL><INDENT>fname=\"<STR_LIT>\"+os.path.basename(fname)<EOL>html+='<STR_LIT>'%(fname,fname)<EOL><DEDENT><DEDENT>html+=\"<STR_LIT>\"<EOL>lastID='<STR_LIT>'<EOL>for fname in sorted(files):<EOL><INDENT>if not \"<STR_LIT>\" in fname:<EOL><INDENT>continue<EOL><DEDENT>ID=os.path.basename(fname).split(\"<STR_LIT:_>\")[<NUM_LIT:0>]<EOL>if not ID==lastID:<EOL><INDENT>lastID=ID<EOL>html+=\"<STR_LIT>\"%os.path.basename(fname).split(\"<STR_LIT:_>\")[<NUM_LIT:0>]<EOL><DEDENT>if \"<STR_LIT>\" in fname.lower():<EOL><INDENT>fname=os.path.basename(fname)<EOL>html+='<STR_LIT>'%(fname,fname)<EOL>continue<EOL><DEDENT><DEDENT>html+=\"<STR_LIT>\"<EOL>for fname in files:<EOL><INDENT>if not \"<STR_LIT>\" in fname:<EOL><INDENT>continue<EOL><DEDENT>if \"<STR_LIT>\" in fname:<EOL><INDENT>callit=os.path.basename(fname)<EOL>thing=cm.getPkl(fname)<EOL>if \"<STR_LIT>\" in fname:<EOL><INDENT>callit+=\"<STR_LIT>\"<EOL>thing=cm.dictFlat(thing)<EOL>if len(thing):<EOL><INDENT>thing=thing[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>elif \"<STR_LIT>\" in fname:<EOL><INDENT>if type(thing) == dict:<EOL><INDENT>callit+=\"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>callit+=\"<STR_LIT>\"<EOL>thing=thing[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>elif \"<STR_LIT>\" in fname:<EOL><INDENT>continue <EOL><DEDENT>elif \"<STR_LIT>\" in fname or \"<STR_LIT>\" in fname:<EOL><INDENT>pass <EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\"%os.path.basename(fname))<EOL>continue<EOL><DEDENT>if type(thing) is dict:<EOL><INDENT>thing=cm.msgDict(thing)<EOL><DEDENT>if type(thing) is list:<EOL><INDENT>out='<STR_LIT>'<EOL>for item in thing:<EOL><INDENT>out+=str(item)+\"<STR_LIT:\\n>\"<EOL><DEDENT>thing=out<EOL><DEDENT>thing=str(thing) <EOL>thing=\"<STR_LIT>\"%os.path.basename(fname)+thing<EOL>html+=\"<STR_LIT>\"%(os.path.basename(fname),thing)<EOL><DEDENT><DEDENT>return html<EOL>", "docstring": "generate text to go inside <body> for single ABF page.", "id": "f11412:m1"}
{"signature": "def genIndex(folder,forceIDs=[]):", "body": "if not os.path.exists(folder+\"<STR_LIT>\"):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>timestart=cm.timethis()<EOL>files=glob.glob(folder+\"<STR_LIT>\") <EOL>files.extend(glob.glob(folder+\"<STR_LIT>\"))<EOL>print(\"<STR_LIT>\"%(cm.timethis(timestart)*<NUM_LIT:1000>))<EOL>files.extend(genPNGs(folder,files))<EOL>files=sorted(files)<EOL>timestart=cm.timethis()<EOL>d=cm.getIDfileDict(files) <EOL>print(\"<STR_LIT>\",len(d))<EOL>print(\"<STR_LIT>\"%(cm.timethis(timestart)*<NUM_LIT:1000>))<EOL>groups=cm.getABFgroups(files)<EOL>print(\"<STR_LIT>\",len(groups))<EOL>for ID in sorted(list(groups.keys())):<EOL><INDENT>overwrite=False<EOL>for abfID in groups[ID]:<EOL><INDENT>if abfID in forceIDs:<EOL><INDENT>overwrite=True<EOL><DEDENT><DEDENT>try:<EOL><INDENT>htmlABF(ID,groups[ID],d,folder,overwrite)<EOL><DEDENT>except:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>menu=expMenu(groups,folder)<EOL>makeSplash(menu,folder)<EOL>makeMenu(menu,folder)<EOL>htmlFrames(d,folder)<EOL>makeMenu(menu,folder)<EOL>makeSplash(menu,folder)<EOL>", "docstring": "expects a folder of ABFs.", "id": "f11412:m8"}
{"signature": "def genPNGs(folder,files=None):", "body": "if files is None:<EOL><INDENT>files=glob.glob(folder+\"<STR_LIT>\")<EOL><DEDENT>new=[]<EOL>for fname in files:<EOL><INDENT>ext=os.path.basename(fname).split(\"<STR_LIT:.>\")[-<NUM_LIT:1>].lower()<EOL>if ext in ['<STR_LIT>','<STR_LIT>']:<EOL><INDENT>if not os.path.exists(fname+\"<STR_LIT>\"):<EOL><INDENT>print(\"<STR_LIT>\"%os.path.basename(fname))<EOL>cm.image_convert(fname)<EOL>new.append(fname) <EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>return new<EOL>", "docstring": "Convert each TIF to PNG. Return filenames of new PNGs.", "id": "f11412:m0"}
{"signature": "def expMenu(groups,folder):", "body": "<EOL>orphans = sorted(list(groups.keys()))<EOL>menu=[]<EOL>if os.path.exists(folder+'<STR_LIT>'):<EOL><INDENT>with open(folder+'<STR_LIT>') as f:<EOL><INDENT>raw=f.read()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raw=\"<STR_LIT>\"<EOL><DEDENT>for line in raw.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>item={}<EOL>if len(line)==<NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>if line.startswith(\"<STR_LIT>\"):<EOL><INDENT>line=line[<NUM_LIT:1>:].split(\"<STR_LIT:U+0020>\",<NUM_LIT:2>)<EOL>item[\"<STR_LIT>\"]=line[<NUM_LIT:0>]<EOL>item[\"<STR_LIT>\"]='<STR_LIT>'<EOL>if len(line)><NUM_LIT:1>:<EOL><INDENT>item[\"<STR_LIT>\"]=line[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>item[\"<STR_LIT>\"]=\"<STR_LIT>\"<EOL><DEDENT>if len(line)><NUM_LIT:2> and len(line[<NUM_LIT:2>]):<EOL><INDENT>item[\"<STR_LIT>\"]=line[<NUM_LIT:2>]<EOL>if item[\"<STR_LIT>\"][<NUM_LIT:0>]==\"<STR_LIT:*>\":<EOL><INDENT>item[\"<STR_LIT>\"]='<STR_LIT:*>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>item[\"<STR_LIT>\"]='<STR_LIT>'<EOL><DEDENT>if item[\"<STR_LIT>\"] in orphans:<EOL><INDENT>orphans.remove(item[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>elif line.startswith(\"<STR_LIT>\"):<EOL><INDENT>line=line[<NUM_LIT:3>:].strip().split(\"<STR_LIT:U+0020>\",<NUM_LIT:1>)<EOL>item[\"<STR_LIT:title>\"]=line[<NUM_LIT:0>]<EOL>item[\"<STR_LIT>\"]='<STR_LIT>'<EOL>if len(line)><NUM_LIT:1>:<EOL><INDENT>if line[<NUM_LIT:1>].startswith(\"<STR_LIT>\"):<EOL><INDENT>line[<NUM_LIT:1>]=line[<NUM_LIT:1>][<NUM_LIT:2>:]<EOL><DEDENT>item[\"<STR_LIT>\"]=line[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>item[\"<STR_LIT>\"]=line<EOL><DEDENT>menu.append(item)<EOL><DEDENT>menu.append({\"<STR_LIT:title>\":\"<STR_LIT>\",\"<STR_LIT>\":\"<STR_LIT>\"})<EOL>for ophan in orphans:<EOL><INDENT>menu.append({\"<STR_LIT>\":ophan,\"<STR_LIT>\":ophan,\"<STR_LIT>\":'<STR_LIT>',\"<STR_LIT>\":'<STR_LIT>',\"<STR_LIT>\":'<STR_LIT>'})<EOL><DEDENT>return menu<EOL>", "docstring": "read experiment.txt and return a dict with [firstOfNewExp, color, star, comments].", "id": "f11412:m4"}
{"signature": "def proto_VC_50_MT_IV(abf=exampleABF):", "body": "swhlab.memtest.memtest(abf) <EOL>swhlab.memtest.checkSweep(abf) <EOL>swhlab.plot.save(abf,tag='<STR_LIT>',resize=False)<EOL>av1,sd1=swhlab.plot.IV(abf,<NUM_LIT>,<NUM_LIT>,True,'<STR_LIT:b>')<EOL>swhlab.plot.save(abf,tag='<STR_LIT>')<EOL>Xs=abf.clampValues(<NUM_LIT>) <EOL>abf.saveThing([Xs,av1],'<STR_LIT>')<EOL>", "docstring": "combination of membrane test and IV steps.", "id": "f11413:m15"}
{"signature": "def proto_01_01_HP010(abf=exampleABF):", "body": "swhlab.memtest.memtest(abf) <EOL>swhlab.memtest.checkSweep(abf) <EOL>swhlab.plot.save(abf,tag=\"<STR_LIT>\")<EOL>", "docstring": "hyperpolarization step. Use to calculate tau and stuff.", "id": "f11413:m5"}
{"signature": "def proto_03_01_0s2(abf=exampleABF):", "body": "standard_inspect(abf)<EOL>", "docstring": "repeated membrane tests, likely with drug added. Maybe IPSCs.", "id": "f11413:m13"}
{"signature": "def proto_02_02_IVdual(abf=exampleABF):", "body": "av1,sd1=swhlab.plot.IV(abf,<NUM_LIT>,<NUM_LIT:1>,True,'<STR_LIT:b>')<EOL>swhlab.plot.save(abf,tag='<STR_LIT>')<EOL>a2v,sd2=swhlab.plot.IV(abf,<NUM_LIT>,<NUM_LIT>,True,'<STR_LIT:r>')<EOL>swhlab.plot.save(abf,tag='<STR_LIT>')<EOL>swhlab.plot.sweep(abf,'<STR_LIT:all>')<EOL>pylab.axis([None,None,min(av1)-<NUM_LIT:50>,max(av1)+<NUM_LIT:50>])<EOL>swhlab.plot.save(abf,tag='<STR_LIT>')<EOL>", "docstring": "dual I/V steps in VC mode, one from -70 and one -50.", "id": "f11413:m11"}
{"signature": "def proto_SHIV4(abf=exampleABF):", "body": "standard_inspect(abf)<EOL>swhlab.ap.detect(abf)<EOL>standard_groupingForInj(abf,<NUM_LIT:200>)<EOL>swhlab.ap.check_AP_raw(abf) <EOL>swhlab.plot.save(abf,tag=\"<STR_LIT>\",resize=False)<EOL>swhlab.ap.check_AP_deriv(abf) <EOL>swhlab.plot.save(abf,tag=\"<STR_LIT>\")<EOL>swhlab.ap.check_AP_phase(abf) <EOL>swhlab.plot.save(abf,tag=\"<STR_LIT>\")<EOL>swhlab.ap.plot_values(abf,'<STR_LIT>',continuous=True) <EOL>pylab.subplot(<NUM_LIT>)<EOL>pylab.axhline(<NUM_LIT>,color='<STR_LIT:r>',lw=<NUM_LIT:2>,ls=\"<STR_LIT>\",alpha=<NUM_LIT>)<EOL>swhlab.plot.save(abf,tag='<STR_LIT>')<EOL>swhlab.ap.plot_values(abf,'<STR_LIT>',continuous=False) <EOL>pylab.subplot(<NUM_LIT>)<EOL>pylab.axhline(<NUM_LIT>,color='<STR_LIT:r>',lw=<NUM_LIT:2>,ls=\"<STR_LIT>\",alpha=<NUM_LIT>)<EOL>swhlab.plot.save(abf,tag='<STR_LIT>')<EOL>swhlab.plot.gain(abf) <EOL>swhlab.plot.save(abf,tag='<STR_LIT>')<EOL>", "docstring": "increasing ramps in (?) pA steps.", "id": "f11413:m17"}
{"signature": "def proto_04_01_MTmon70s2(abf=exampleABF):", "body": "standard_inspect(abf)<EOL>swhlab.memtest.memtest(abf)<EOL>swhlab.memtest.checkSweep(abf)<EOL>swhlab.plot.save(abf,tag='<STR_LIT>',resize=False)<EOL>swhlab.memtest.plot_standard4(abf)<EOL>swhlab.plot.save(abf,tag='<STR_LIT>')<EOL>", "docstring": "repeated membrane tests, likely with drug added. Maybe IPSCs.", "id": "f11413:m14"}
{"signature": "def handleNewABF(fname):", "body": "waitTillCopied(fname)<EOL>standard.autoABF(fname)<EOL>", "docstring": "we see a brand new ABF. now what?", "id": "f11414:m1"}
{"signature": "def newVersion():", "body": "version=None<EOL>fname='<STR_LIT>'<EOL>with open(fname) as f:<EOL><INDENT>raw=f.read().split(\"<STR_LIT:\\n>\")<EOL>for i,line in enumerate(raw):<EOL><INDENT>if line.startswith(\"<STR_LIT>\"):<EOL><INDENT>if version is None:<EOL><INDENT>version = int(line.split(\"<STR_LIT:=>\")[<NUM_LIT:1>])<EOL><DEDENT>raw[i]=\"<STR_LIT>\"%(version+<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>with open(fname,'<STR_LIT:w>') as f:<EOL><INDENT>f.write(\"<STR_LIT:\\n>\".join(raw))<EOL><DEDENT>print(\"<STR_LIT>\"%(version,version+<NUM_LIT:1>))<EOL>", "docstring": "increments version counter in swhlab/version.py", "id": "f11417:m0"}
{"signature": "def __init__(self, api_key, password):", "body": "self.api_key = api_key<EOL>self.password = password<EOL>", "docstring": "Initialise the AmbientSMS class\n\nExpects:\n - api_key - your AmbientSMS Central username\n - password - your AmbientSMS Central password", "id": "f11418:c2:m0"}
{"signature": "def dictFromXml(xmlString):", "body": "dom = minidom.parseString(xmlString)<EOL>return nodeToDic(dom.childNodes[<NUM_LIT:0>])<EOL>", "docstring": "Returns the a dictionary from the XML string.", "id": "f11418:m0"}
{"signature": "def getTextFromNode(node):", "body": "t = \"<STR_LIT>\"<EOL>for n in node.childNodes:<EOL><INDENT>if n.nodeType == n.TEXT_NODE:<EOL><INDENT>t += n.nodeValue<EOL><DEDENT>else:<EOL><INDENT>raise NotTextNodeError<EOL><DEDENT><DEDENT>return t<EOL>", "docstring": "Scans through all children of node and gathers the\ntext. If node has non-text child-nodes then\nNotTextNodeError is raised.", "id": "f11418:m1"}
{"signature": "def curl(self, url, post):", "body": "try:<EOL><INDENT>req = urllib.request.Request(url)<EOL>req.add_header(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>data = urllib.request.urlopen(req, post.encode('<STR_LIT:utf-8>')).read()<EOL><DEDENT>except urllib.error.URLError as v:<EOL><INDENT>raise AmbientSMSError(v)<EOL><DEDENT>return dictFromXml(data)<EOL>", "docstring": "Inteface for sending web requests to the AmbientSMS API Server", "id": "f11418:c2:m3"}
{"signature": "def p_objectitem_1(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "objectitem : block", "id": "f11423:c0:m10"}
{"signature": "def p_blockId(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "blockId : IDENTIFIER\n        | STRING", "id": "f11423:c0:m13"}
{"signature": "def p_listitems_1(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + [p[<NUM_LIT:3>]]<EOL>", "docstring": "listitems : listitems COMMA listitem", "id": "f11423:c0:m17"}
{"signature": "def p_list_1(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = []<EOL>", "docstring": "list : LEFTBRACKET RIGHTBRACKET", "id": "f11423:c0:m15"}
{"signature": "def p_object_0(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = self.objectlist_flat(p[<NUM_LIT:2>], False)<EOL>", "docstring": "object : LEFTBRACE objectlist RIGHTBRACE", "id": "f11423:c0:m5"}
{"signature": "def p_listitems_0(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL>", "docstring": "listitems : listitem", "id": "f11423:c0:m16"}
{"signature": "def p_float_1(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "float : FLOAT", "id": "f11423:c0:m26"}
{"signature": "def p_objectlist_2(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + [p[<NUM_LIT:3>]]<EOL>", "docstring": "objectlist : objectlist COMMA objectitem", "id": "f11423:c0:m4"}
{"signature": "def p_objectlist_1(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + [p[<NUM_LIT:2>]]<EOL>", "docstring": "objectlist : objectlist objectitem", "id": "f11423:c0:m3"}
{"signature": "def p_int_0(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = -p[<NUM_LIT:2>]<EOL>", "docstring": "int : MINUS int", "id": "f11423:c0:m23"}
{"signature": "def p_listitem(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "listitem : number\n         | object\n         | STRING", "id": "f11423:c0:m18"}
{"signature": "def p_top(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = self.objectlist_flat(p[<NUM_LIT:1>], True)<EOL>", "docstring": "top : objectlist", "id": "f11423:c0:m1"}
{"signature": "def p_list_0(self, p):", "body": "if DEBUG:<EOL><INDENT>self.print_p(p)<EOL><DEDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:2>]<EOL>", "docstring": "list : LEFTBRACKET listitems RIGHTBRACKET\n     | LEFTBRACKET listitems COMMA RIGHTBRACKET", "id": "f11423:c0:m14"}
{"signature": "def t_string_STRING(self, t):<EOL>", "body": "t.value = (<EOL>t.lexer.string_value + t.lexer.lexdata[t.lexer.rel_pos : t.lexer.lexpos - <NUM_LIT:1>]<EOL>)<EOL>t.lexer.lineno += t.lexer.lexdata[t.lexer.abs_start : t.lexer.lexpos - <NUM_LIT:1>].count(<EOL>'<STR_LIT:\\n>'<EOL>)<EOL>t.lexer.begin('<STR_LIT>')<EOL>return t<EOL>", "docstring": "r'\\", "id": "f11424:c0:m13"}
{"signature": "def t_heredoc_STRING(self, t):", "body": "return self._end_heredoc(t)<EOL>", "docstring": "r'^.+?(?=\\r?$)", "id": "f11424:c0:m24"}
{"signature": "def t_tabbedheredoc_STRING(self, t):", "body": "return self._end_heredoc(t)<EOL>", "docstring": "r'^\\t*.+?(?=\\r?$)", "id": "f11424:c0:m23"}
{"signature": "def t_stringdollar_rbrace(self, t):", "body": "t.lexer.braces -= <NUM_LIT:1><EOL>if t.lexer.braces == <NUM_LIT:0>:<EOL><INDENT>t.lexer.begin('<STR_LIT:string>')<EOL><DEDENT>", "docstring": "r'\\}", "id": "f11424:c0:m17"}
{"signature": "def t_string_escapedchar(self, t):<EOL>", "body": "t.lexer.string_value += (<EOL>t.lexer.lexdata[t.lexer.rel_pos : t.lexer.lexpos - <NUM_LIT:2>] + t.value<EOL>)<EOL>t.lexer.rel_pos = t.lexer.lexpos<EOL>pass<EOL>", "docstring": "r'(?<=\\\\)(\\\"|\\\\)", "id": "f11424:c0:m10"}
{"signature": "def t_COMMENT(self, t):", "body": "pass<EOL>", "docstring": "r'(\\#|(//)).*", "id": "f11424:c0:m27"}
{"signature": "def t_stringdollar_lbrace(self, t):", "body": "t.lexer.braces += <NUM_LIT:1><EOL>", "docstring": "r'\\{", "id": "f11424:c0:m16"}
{"signature": "def t_BOOL(self, t):", "body": "t.value = t.value == '<STR_LIT:true>'<EOL>return t<EOL>", "docstring": "r'(true)|(false)", "id": "f11424:c0:m0"}
{"signature": "def t_MULTICOMMENT(self, t):", "body": "t.lexer.lineno += t.value.count('<STR_LIT:\\n>')<EOL>pass<EOL>", "docstring": "r'/\\*(.|\\n)*?(\\*/)", "id": "f11424:c0:m28"}
{"signature": "def t_intnumber(self, t):", "body": "t.value = int(t.value)<EOL>t.type = '<STR_LIT>'<EOL>return t<EOL>", "docstring": "r'-?\\d+", "id": "f11424:c0:m5"}
{"signature": "def t_string(self, t):<EOL>", "body": "<EOL>t.lexer.abs_start = t.lexer.lexpos<EOL>t.lexer.rel_pos = t.lexer.lexpos<EOL>t.lexer.string_value = u'<STR_LIT>'<EOL>t.lexer.begin('<STR_LIT:string>')<EOL>", "docstring": "r'\\", "id": "f11424:c0:m9"}
{"signature": "def t_FLOAT(self, t):", "body": "t.value = float(t.value)<EOL>return t<EOL>", "docstring": "r'-?((\\d+\\.\\d*)|(\\d*\\.\\d+))", "id": "f11424:c0:m3"}
{"signature": "def t_string_stringdollar(self, t):<EOL>", "body": "t.lexer.braces = <NUM_LIT:1><EOL>t.lexer.begin('<STR_LIT>')<EOL>", "docstring": "r'(?<=\\$)\\{", "id": "f11424:c0:m11"}
{"signature": "def t_COMMA(self, t):", "body": "return t<EOL>", "docstring": "r',", "id": "f11424:c0:m7"}
{"signature": "def t_newline(self, t):", "body": "t.lexer.lineno += len(t.value)<EOL>", "docstring": "r'\\n+", "id": "f11424:c0:m29"}
{"signature": "def t_tabbedheredoc(self, t):", "body": "t.lexer.is_tabbed = True<EOL>self._init_heredoc(t)<EOL>t.lexer.begin('<STR_LIT>')<EOL>", "docstring": "r'<<-\\S+\\r?\\n", "id": "f11424:c0:m20"}
{"signature": "def dumps(*args, **kwargs):", "body": "return json.dumps(*args, **kwargs)<EOL>", "docstring": "Turns a dictionary into JSON, passthru to json.dumps", "id": "f11426:m3"}
{"signature": "def all_info_files(self) :", "body": "try :<EOL><INDENT>for info_file in list_files_in_dir(self.info_dir):<EOL><INDENT>if not os.path.basename(info_file).endswith('<STR_LIT>') :<EOL><INDENT>self.on_non_trashinfo_found()<EOL><DEDENT>else :<EOL><INDENT>yield info_file<EOL><DEDENT><DEDENT><DEDENT>except OSError: <EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Returns a generator of \"Path\"s", "id": "f11431:c0:m1"}
{"signature": "def persist_trash_info(self, basename, content, logger):", "body": "self.ensure_dir(self.info_dir, <NUM_LIT>)<EOL>index = <NUM_LIT:0><EOL>while True :<EOL><INDENT>if index == <NUM_LIT:0> :<EOL><INDENT>suffix = \"<STR_LIT>\"<EOL><DEDENT>elif index < <NUM_LIT:100>:<EOL><INDENT>suffix = \"<STR_LIT>\" % index<EOL><DEDENT>else :<EOL><INDENT>import random<EOL>suffix = \"<STR_LIT>\" % random.randint(<NUM_LIT:0>, <NUM_LIT>)<EOL><DEDENT>base_id = basename<EOL>trash_id = base_id + suffix<EOL>trash_info_basename = trash_id+\"<STR_LIT>\"<EOL>dest = os.path.join(self.info_dir, trash_info_basename)<EOL>try :<EOL><INDENT>self.atomic_write(dest, content)<EOL>logger.debug(\"<STR_LIT>\" % dest)<EOL>return dest<EOL><DEDENT>except OSError:<EOL><INDENT>logger.debug(\"<STR_LIT>\" % dest)<EOL><DEDENT>index += <NUM_LIT:1><EOL><DEDENT>raise IOError()<EOL>", "docstring": "Create a .trashinfo file in the $trash/info directory.\nreturns the created TrashInfoFile.", "id": "f11438:c6:m4"}
{"signature": "def _format_text(self, text) :", "body": "return text<EOL>", "docstring": "[Does not] format a text, return the text as it is.", "id": "f11438:c3:m0"}
{"signature": "def trash(self, file) :", "body": "if self._should_skipped_by_specs(file):<EOL><INDENT>self.reporter.unable_to_trash_dot_entries(file)<EOL>return<EOL><DEDENT>volume_of_file_to_be_trashed = self.volume_of_parent(file)<EOL>self.reporter.volume_of_file(volume_of_file_to_be_trashed)<EOL>candidates = self._possible_trash_directories_for(<EOL>volume_of_file_to_be_trashed)<EOL>self.try_trash_file_using_candidates(file,<EOL>volume_of_file_to_be_trashed,<EOL>candidates)<EOL>", "docstring": "Trash a file in the appropriate trash directory.\nIf the file belong to the same volume of the trash home directory it\nwill be trashed in the home trash directory.\nOtherwise it will be trashed in one of the relevant volume trash\ndirectories.\n\nEach volume can have two trash directories, they are\n    - $volume/.Trash/$uid\n    - $volume/.Trash-$uid\n\nFirstly the software attempt to trash the file in the first directory\nthen try to trash in the second trash directory.", "id": "f11438:c0:m4"}
{"signature": "def copytree(src, dst, symlinks=False, ignore=None):", "body": "<EOL>if not os.path.exists(dst):<EOL><INDENT>os.makedirs(dst)<EOL>shutil.copystat(src, dst)<EOL><DEDENT>lst = os.listdir(src)<EOL>if ignore:<EOL><INDENT>excl = ignore(src, lst)<EOL>lst = [x for x in lst if x not in excl]<EOL><DEDENT>for item in lst:<EOL><INDENT>s = os.path.join(src, item)<EOL>d = os.path.join(dst, item)<EOL>if symlinks and os.path.islink(s):<EOL><INDENT>if os.path.lexists(d):<EOL><INDENT>os.remove(d)<EOL><DEDENT>os.symlink(os.readlink(s), d)<EOL>try:<EOL><INDENT>st = os.lstat(s)<EOL>mode = stat.S_IMODE(st.st_mode)<EOL>os.lchmod(d, mode)<EOL><DEDENT>except:<EOL><INDENT>pass  <EOL><DEDENT><DEDENT>elif os.path.isdir(s):<EOL><INDENT>copytree(s, d, symlinks, ignore)<EOL><DEDENT>else:<EOL><INDENT>shutil.copy2(s, d)<EOL><DEDENT><DEDENT>", "docstring": "copytree that works even if folder already exists", "id": "f11486:m1"}
{"signature": "def get_source(self, doc):", "body": "start_iter = doc.get_start_iter()<EOL>end_iter = doc.get_end_iter()<EOL>source = doc.get_text(start_iter, end_iter, False)<EOL>return source<EOL>", "docstring": "Grab contents of 'doc' and return it\n\n:param doc: The active document\n:return:", "id": "f11487:c0:m9"}
{"signature": "def get_source(self, doc):", "body": "start_iter = doc.get_start_iter()<EOL>end_iter = doc.get_end_iter()<EOL>source = doc.get_text(start_iter, end_iter, False)<EOL>return source<EOL>", "docstring": "Grab contents of 'doc' and return it\n\n:param doc: The active document\n:return:", "id": "f11490:c0:m9"}
{"signature": "def get_source(self, doc):", "body": "start_iter = doc.get_start_iter()<EOL>end_iter = doc.get_end_iter()<EOL>source = doc.get_text(start_iter, end_iter, False)<EOL>return source<EOL>", "docstring": "Grab contents of 'doc' and return it\n\n:param doc: The active document\n:return:", "id": "f11492:c0:m9"}
{"signature": "def find_example_dir():", "body": "<EOL>code_stub = textwrap.dedent(\"\"\"<STR_LIT>\"\"\")<EOL>code = code_stub % '<STR_LIT>'<EOL>cmd = [\"<STR_LIT>\", \"<STR_LIT:-c>\", code]<EOL>p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)<EOL>output, errors = p.communicate()<EOL>if errors:<EOL><INDENT>print('<STR_LIT>')<EOL>print('<STR_LIT>'.format(errors.decode('<STR_LIT:utf-8>')))<EOL>return None<EOL><DEDENT>else:<EOL><INDENT>examples_dir = output.decode('<STR_LIT:utf-8>').strip()<EOL>if os.path.isdir(examples_dir):<EOL><INDENT>return examples_dir<EOL><DEDENT>code = code_stub % '<STR_LIT>'<EOL>cmd = [\"<STR_LIT>\", \"<STR_LIT:-c>\", code]<EOL>p = subprocess.Popen(cmd, stdout=subprocess.PIPE)<EOL>output, errors = p.communicate()<EOL>examples_dir = output.decode('<STR_LIT:utf-8>').strip()<EOL>if os.path.isdir(examples_dir):<EOL><INDENT>return examples_dir<EOL><DEDENT>if examples_dir:<EOL><INDENT>print('<STR_LIT>'.format(examples_dir))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Find examples dir .. a little bit ugly..", "id": "f11493:m1"}
{"signature": "def get_command_responses(self):", "body": "if not self.response_queue.empty():<EOL><INDENT>yield None<EOL><DEDENT>while not self.response_queue.empty():<EOL><INDENT>line = self.response_queue.get()<EOL>if line is not None:<EOL><INDENT>yield line<EOL><DEDENT><DEDENT>", "docstring": "Get responses to commands sent", "id": "f11493:c2:m7"}
{"signature": "def eof(self):", "body": "return (not self.is_alive()) and self._queue.empty() or self._fd.closed<EOL>", "docstring": "Check whether there is no more content to expect.", "id": "f11493:c0:m2"}
{"signature": "def get_output(self):", "body": "if self.process.poll() is not None:<EOL><INDENT>self.close()<EOL>yield None, None<EOL><DEDENT>while not (self.stdout_queue.empty() and self.stderr_queue.empty()):<EOL><INDENT>if not self.stdout_queue.empty():<EOL><INDENT>line = self.stdout_queue.get().decode('<STR_LIT:utf-8>')<EOL>yield line, None<EOL><DEDENT>if not self.stderr_queue.empty():<EOL><INDENT>line = self.stderr_queue.get().decode('<STR_LIT:utf-8>')<EOL>yield None, line<EOL><DEDENT><DEDENT>", "docstring": ":yield: stdout_line, stderr_line, running\n\nGenerator that outputs lines captured from stdout and stderr\n\nThese can be consumed to output on a widget in an IDE", "id": "f11493:c2:m6"}
{"signature": "def live_source_load(self, source):", "body": "source = source.rstrip('<STR_LIT:\\n>')<EOL>if source != self.source:<EOL><INDENT>self.source = source<EOL>b64_source = base64.b64encode(bytes(bytearray(source, \"<STR_LIT:ascii>\")))<EOL>self.send_command(CMD_LOAD_BASE64, b64_source)<EOL><DEDENT>", "docstring": "Send new source code to the bot\n\n:param source:\n:param good_cb: callback called if code was good\n:param bad_cb: callback called if code was bad (will get contents of exception)\n:return:", "id": "f11493:c2:m2"}
{"signature": "def sbot_executable():", "body": "gsettings=load_gsettings()<EOL>venv = gsettings.get_string('<STR_LIT>')<EOL>if venv == '<STR_LIT>':<EOL><INDENT>sbot = which('<STR_LIT>')<EOL><DEDENT>elif venv == '<STR_LIT>':<EOL><INDENT>env_venv = os.environ.get('<STR_LIT>')<EOL>if not env_venv:<EOL><INDENT>return which('<STR_LIT>')<EOL><DEDENT>for p in os.environ['<STR_LIT>'].split(os.path.pathsep):<EOL><INDENT>sbot='<STR_LIT>' % p<EOL>if not p.startswith(env_venv) and os.path.isfile(sbot):<EOL><INDENT>return sbot<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>sbot = os.path.join(venv, '<STR_LIT>')<EOL>if not os.path.isfile(sbot):<EOL><INDENT>print('<STR_LIT>')<EOL>sbot = which('<STR_LIT>')<EOL><DEDENT><DEDENT>return os.path.realpath(sbot)<EOL>", "docstring": "Find shoebot executable", "id": "f11494:m9"}
{"signature": "def mk_examples_menu(text, root_dir=None, depth=<NUM_LIT:0>):", "body": "<EOL>examples_dir = ide_utils.get_example_dir()<EOL>if not examples_dir:<EOL><INDENT>return None, []<EOL><DEDENT>root_dir = root_dir or examples_dir    <EOL>file_actions = []<EOL>menu = Gio.Menu.new()<EOL>base_item = Gio.MenuItem.new_submenu(text, menu)<EOL>for fn in sorted(os.listdir(root_dir)):<EOL><INDENT>path = os.path.join(root_dir, fn)<EOL>rel_path = path[len(examples_dir):]<EOL>if os.path.isdir(path):<EOL><INDENT>label = fn.capitalize()<EOL>item, sm_file_actions = mk_examples_menu(label, os.path.join(root_dir, fn))<EOL>menu.append_item(item)<EOL>file_actions.extend(sm_file_actions)<EOL><DEDENT>elif os.path.splitext(path)[<NUM_LIT:1>] in ['<STR_LIT>', '<STR_LIT>'] and not fn.startswith('<STR_LIT:_>'):<EOL><INDENT>label = ide_utils.make_readable_filename(fn)<EOL>action_name = \"<STR_LIT>\" % encode_relpath(rel_path)<EOL>menu.append(label, action_name)<EOL>file_actions.append(rel_path)<EOL><DEDENT><DEDENT>return base_item, file_actions<EOL>", "docstring": ":return: base_item, rel_paths", "id": "f11494:m2"}
{"signature": "def examples_menu(root_dir=None, depth=<NUM_LIT:0>):", "body": "<EOL>examples_dir = ide_utils.get_example_dir()<EOL>if not examples_dir:<EOL><INDENT>return \"<STR_LIT>\", [], []<EOL><DEDENT>root_dir = root_dir or examples_dir<EOL>file_tmpl = '<STR_LIT>'<EOL>dir_tmpl = '<STR_LIT>'<EOL>file_actions = []<EOL>submenu_actions = []<EOL>xml = \"<STR_LIT>\"<EOL>for fn in sorted(os.listdir(root_dir)):<EOL><INDENT>path = os.path.join(root_dir, fn)<EOL>rel_path = path[len(examples_dir):]<EOL>if os.path.isdir(path):<EOL><INDENT>action = '<STR_LIT>'.format(rel_path)<EOL>label = fn.capitalize()<EOL>sm_xml, sm_file_actions, sm_menu_actions = examples_menu(os.path.join(root_dir, fn), depth+<NUM_LIT:1>)<EOL>submenu_actions.extend(sm_menu_actions)<EOL>file_actions.extend(sm_file_actions)<EOL>submenu_actions.append((action, label))<EOL>xml += dir_tmpl.format(name=fn, action=action, menu=sm_xml)<EOL><DEDENT>elif os.path.splitext(path)[<NUM_LIT:1>] in ['<STR_LIT>', '<STR_LIT>'] and not fn.startswith('<STR_LIT:_>'):<EOL><INDENT>action = '<STR_LIT>'.format(rel_path)<EOL>label = ide_utils.make_readable_filename(fn)<EOL>xml += file_tmpl.format(name=fn, action=action)<EOL>file_actions.append((action, label))<EOL><DEDENT><DEDENT>return xml, file_actions, submenu_actions<EOL>", "docstring": ":return: xml for menu, [(bot_action, label), ...], [(menu_action, label), ...]", "id": "f11494:m0"}
{"signature": "def get_child_by_name(parent, name):", "body": "<EOL>def iterate_children(widget, name):<EOL><INDENT>if widget.get_name() == name:<EOL><INDENT>return widget<EOL><DEDENT>try:<EOL><INDENT>for w in widget.get_children():<EOL><INDENT>result = iterate_children(w, name)<EOL>if result is not None:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>continue<EOL><DEDENT><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return iterate_children(parent, name)<EOL>", "docstring": "Iterate through a gtk container, `parent`,\nand return the widget with the name `name`.", "id": "f11494:m4"}
{"signature": "def examples_menu(root_dir=None, depth=<NUM_LIT:0>):", "body": "examples_dir = ide_utils.get_example_dir()<EOL>if not examples_dir:<EOL><INDENT>return \"<STR_LIT>\", [], []<EOL><DEDENT>root_dir = root_dir or examples_dir<EOL>file_tmpl = '<STR_LIT>'<EOL>dir_tmpl = '<STR_LIT>'<EOL>file_actions = []<EOL>submenu_actions = []<EOL>xml = \"<STR_LIT>\"<EOL>for fn in sorted(os.listdir(root_dir)):<EOL><INDENT>path = os.path.join(root_dir, fn)<EOL>rel_path = path[len(examples_dir):]<EOL>if os.path.isdir(path):<EOL><INDENT>action = '<STR_LIT>'.format(rel_path)<EOL>label = fn.capitalize()<EOL>sm_xml, sm_file_actions, sm_menu_actions = examples_menu(os.path.join(root_dir, fn), depth+<NUM_LIT:1>)<EOL>submenu_actions.extend(sm_menu_actions)<EOL>file_actions.extend(sm_file_actions)<EOL>submenu_actions.append((action, label))<EOL>xml += dir_tmpl.format(name=fn, action=action, menu=sm_xml)<EOL><DEDENT>elif os.path.splitext(path)[<NUM_LIT:1>] in ['<STR_LIT>', '<STR_LIT>'] and not fn.startswith('<STR_LIT:_>'):<EOL><INDENT>action = '<STR_LIT>'.format(rel_path)<EOL>label = ide_utils.make_readable_filename(fn)<EOL>xml += file_tmpl.format(name=fn, action=action)<EOL>file_actions.append((action, label))<EOL><DEDENT><DEDENT>return xml, file_actions, submenu_actions<EOL>", "docstring": ":return: xml for menu, [(bot_action, label), ...], [(menu_action, label), ...]", "id": "f11495:m0"}
{"signature": "def gedit2_menu(xml):", "body": "return MENU_UI.format(xml)<EOL>", "docstring": "Build XML for GEDIT3 Menus.\n\nPass in the xml returned by example_menu", "id": "f11495:m1"}
{"signature": "def __init__(self, bot, host, port):", "body": "create_listening_socket(host, port, self.listener)<EOL>self.shell = None<EOL>self.bot = bot<EOL>", "docstring": "Initialize server and start listening.", "id": "f11496:c0:m0"}
{"signature": "def listener(self, sock, *args):", "body": "conn, addr = sock.accept()<EOL>f = conn.makefile(conn)<EOL>self.shell = ShoebotCmd(self.bot, stdin=f, stdout=f, intro=INTRO)<EOL>print(_(\"<STR_LIT>\"))<EOL>GObject.io_add_watch(conn, GObject.IO_IN, self.handler)<EOL>if self.shell.intro:<EOL><INDENT>self.shell.stdout.write(str(self.shell.intro)+\"<STR_LIT:\\n>\")<EOL>self.shell.stdout.flush()<EOL><DEDENT>return True<EOL>", "docstring": "Asynchronous connection listener. Starts a handler for each connection.", "id": "f11496:c0:m1"}
{"signature": "def handler(self, conn, *args):", "body": "<EOL>self.shell.stdout.write(self.shell.prompt)<EOL>line = self.shell.stdin.readline()<EOL>if not len(line):<EOL><INDENT>line = '<STR_LIT>'<EOL>return False<EOL><DEDENT>else:<EOL><INDENT>line = line.rstrip('<STR_LIT:\\r\\n>')<EOL>line = self.shell.precmd(line)<EOL>stop = self.shell.onecmd(line)<EOL>stop = self.shell.postcmd(stop, line)<EOL>self.shell.stdout.flush()<EOL>self.shell.postloop()<EOL>if stop:<EOL><INDENT>self.shell = None<EOL>conn.close()<EOL><DEDENT>return not stop<EOL><DEDENT>", "docstring": "Asynchronous connection handler. Processes each line from the socket.", "id": "f11496:c0:m2"}
{"signature": "def do_rewind(self, line):", "body": "self.print_response(\"<STR_LIT>\" % self.bot._frame)<EOL>self.bot._frame = <NUM_LIT:0><EOL>", "docstring": "rewind", "id": "f11497:c0:m11"}
{"signature": "def do_speed(self, speed):", "body": "if speed:<EOL><INDENT>try:<EOL><INDENT>self.bot._speed = float(speed)<EOL><DEDENT>except Exception as e:<EOL><INDENT>self.print_response('<STR_LIT>' % speed)<EOL>return<EOL><DEDENT><DEDENT>self.print_response('<STR_LIT>' % self.bot._speed)<EOL>", "docstring": "rewind", "id": "f11497:c0:m6"}
{"signature": "def do_set(self, line):", "body": "try:<EOL><INDENT>name, value = [part.strip() for part in line.split('<STR_LIT:=>')]<EOL>if name not in self.bot._vars:<EOL><INDENT>self.print_response('<STR_LIT>' % name)<EOL>return<EOL><DEDENT>variable = self.bot._vars[name]<EOL>variable.value = variable.sanitize(value.strip('<STR_LIT:;>'))<EOL>success, msg = self.bot.canvas.sink.var_changed(name, variable.value)<EOL>if success:<EOL><INDENT>print('<STR_LIT>'.format(name, variable.value), file=self.stdout)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>'.format(msg), file=self.stdout)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>print('<STR_LIT>', e)<EOL>return<EOL><DEDENT>", "docstring": "Set a variable.", "id": "f11497:c0:m21"}
{"signature": "@trusted_cmd<EOL><INDENT>def do_load_base64(self, line):<DEDENT>", "body": "cookie = self.cookie<EOL>executor = self.bot._executor<EOL>def source_good():<EOL><INDENT>self.print_response(status=RESPONSE_CODE_OK, cookie=cookie)<EOL>executor.clear_callbacks()<EOL><DEDENT>def source_bad(tb):<EOL><INDENT>if called_good:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.print_response(status=RESPONSE_REVERTED, keep=True, cookie=cookie)<EOL>self.print_response(tb.replace('<STR_LIT:\\n>', '<STR_LIT>'), cookie=cookie)<EOL>executor.clear_callbacks()<EOL><DEDENT>called_good = False<EOL>source = str(base64.b64decode(line))<EOL>publish_event(SOURCE_CHANGED_EVENT, data=source, extra_channels=\"<STR_LIT>\")<EOL>self.bot._executor.load_edited_source(source, good_cb=source_good, bad_cb=source_bad)<EOL>", "docstring": "load filename=(file)\nload base64=(base64 encoded)\n\nSend new code to shoebot.\n\nIf it does not run successfully shoebot will attempt to role back.\n\nEditors can enable livecoding by sending new code as it is edited.", "id": "f11497:c0:m13"}
{"signature": "def do_help(self, arg):", "body": "print(self.response_prompt, file=self.stdout)<EOL>return cmd.Cmd.do_help(self, arg)<EOL>", "docstring": "Show help on all commands.", "id": "f11497:c0:m20"}
{"signature": "def __init__(self, bot, intro=None, trusted=False, **kwargs):", "body": "cmd.Cmd.__init__(self, **kwargs)<EOL>self.bot = bot<EOL>self.pause_speed = None<EOL>self.intro = intro or INTRO<EOL>self.prompt = PROMPT<EOL>self.response_prompt = '<STR_LIT>'<EOL>self.use_rawinput = False<EOL>self.cookie = None<EOL>self.escape_nl = False<EOL>self.live_prefix = '<STR_LIT>'<EOL>self.trusted = trusted<EOL>", "docstring": ":param bot:\n:param intro:\n:param trusted: Only running from the commandline is trusted, not from sockets\n                untrusted can only change variables\n:param kwargs:\n:return:", "id": "f11497:c0:m0"}
{"signature": "def do_escape_nl(self, arg):", "body": "if arg.lower() == '<STR_LIT>':<EOL><INDENT>self.escape_nl = False<EOL><DEDENT>else:<EOL><INDENT>self.escape_nl = True<EOL><DEDENT>", "docstring": "Escape newlines in any responses", "id": "f11497:c0:m3"}
{"signature": "def do_goto(self, line):", "body": "self.print_response(\"<STR_LIT>\" % line)<EOL>self.bot._frame = int(line)<EOL>", "docstring": "Go to specific frame\n:param line:\n:return:", "id": "f11497:c0:m10"}
{"signature": "def textmetrics(self, txt, width=None, height=None, **kwargs):", "body": "<EOL>txt = self.Text(txt, <NUM_LIT:0>, <NUM_LIT:0>, width, height, enableRendering=False, **kwargs)<EOL>return txt.metrics<EOL>", "docstring": "Returns the width and height of a string of text as a tuple\n        (according to current font settings).", "id": "f11499:c0:m41"}
{"signature": "def rect(self, x, y, width, height, roundness=<NUM_LIT:0.0>, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)<EOL>path.rect(x, y, width, height, roundness, self.rectmode)<EOL>if draw:<EOL><INDENT>path.draw()<EOL><DEDENT>return path<EOL>", "docstring": "Draws a rectangle with top left corner at (x,y)\n\n        The roundness variable sets rounded corners.", "id": "f11499:c0:m1"}
{"signature": "def rellineto(self, x, y):", "body": "if self._path is None:<EOL><INDENT>raise ShoebotError(_(\"<STR_LIT>\"))<EOL><DEDENT>self._path.rellineto(x,y)<EOL>", "docstring": "Draw a line using relative coordinates.", "id": "f11499:c0:m16"}
{"signature": "def strokewidth(self, w=None):", "body": "if w is not None:<EOL><INDENT>self._strokewidth = w<EOL><DEDENT>else:<EOL><INDENT>return self._strokewidth<EOL><DEDENT>", "docstring": "Set the stroke width.", "id": "f11499:c0:m35"}
{"signature": "def ellipse(self, x, y, width, height, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)<EOL>path.ellipse(x,y,width,height)<EOL>if draw:<EOL><INDENT>path.draw()<EOL><DEDENT>return path<EOL>", "docstring": "Draws an ellipse starting from (x,y)", "id": "f11499:c0:m4"}
{"signature": "def colormode(self, mode=None, crange=None):", "body": "if mode is not None:<EOL><INDENT>if mode == \"<STR_LIT>\":<EOL><INDENT>self.color_mode = Bot.RGB<EOL><DEDENT>elif mode == \"<STR_LIT>\":<EOL><INDENT>self.color_mode = Bot.HSB<EOL><DEDENT>else:<EOL><INDENT>raise NameError(_(\"<STR_LIT>\"))<EOL><DEDENT><DEDENT>if crange is not None:<EOL><INDENT>self.color_range = crange<EOL><DEDENT>return self.color_mode<EOL>", "docstring": "Sets the current colormode (can be RGB or HSB) and eventually\n        the color range.\n\n        If called without arguments, it returns the current colormode.", "id": "f11499:c0:m29"}
{"signature": "def textwidth(self, txt, width=None):", "body": "w = width<EOL>return self.textmetrics(txt, width=w)[<NUM_LIT:0>]<EOL>", "docstring": "Returns the width of a string of text according to the current\n        font settings.", "id": "f11499:c0:m42"}
{"signature": "def line(self, x1, y1, x2, y2, draw=True):", "body": "p = self._path<EOL>self.newpath()<EOL>self.moveto(x1,y1)<EOL>self.lineto(x2,y2)<EOL>self.endpath(draw=draw)<EOL>self._path = p<EOL>return p<EOL>", "docstring": "Draws a line from (x1,y1) to (x2,y2)", "id": "f11499:c0:m6"}
{"signature": "def stroke(self,*args):", "body": "self._strokecolor = self.color(*args)<EOL>return self._strokecolor<EOL>", "docstring": "Set a stroke color, applying it to new paths.", "id": "f11499:c0:m33"}
{"signature": "def __init__(self, canvas, namespace=None, vars=None):", "body": "Grammar.__init__(self, canvas, namespace=namespace, vars=vars)<EOL>canvas.set_bot(self)<EOL>self._autoclosepath = True<EOL>self._path = None<EOL>if self._input_device:<EOL><INDENT>for key_name, value in self._input_device.get_key_map().items():<EOL><INDENT>self._namespace[key_name] = value<EOL>setattr(self, key_name, value)<EOL><DEDENT><DEDENT>self._canvas.size = None<EOL>self._frame = <NUM_LIT:1><EOL>self._set_initial_defaults()<EOL>", "docstring": ":param canvas: Canvas implementation for output.\n:param namespace: Optionally specify a dict to inject as namespace\n:param vars: Optional dict containing initial values for variables", "id": "f11500:c0:m0"}
{"signature": "def grid(self, cols, rows, colSize=<NUM_LIT:1>, rowSize=<NUM_LIT:1>, shuffled=False):", "body": "<EOL>from random import shuffle<EOL>rowRange = range(int(rows))<EOL>colRange = range(int(cols))<EOL>if (shuffled):<EOL><INDENT>shuffle(rowRange)<EOL>shuffle(colRange)<EOL><DEDENT>for y in rowRange:<EOL><INDENT>for x in colRange:<EOL><INDENT>yield (x * colSize, y * rowSize)<EOL><DEDENT><DEDENT>", "docstring": "Returns an iterator that contains coordinate tuples.\n        The grid can be used to quickly create grid-like structures.\n        A common way to use them is:\n            for x, y in grid(10,10,12,12):\n                rect(x,y, 10,10)", "id": "f11500:c0:m24"}
{"signature": "def _mouse_button_down(self, button):", "body": "self._namespace['<STR_LIT>'] = True<EOL>", "docstring": "GUI callback for mouse button down", "id": "f11500:c0:m3"}
{"signature": "def _key_pressed(self, key, keycode):", "body": "self._namespace['<STR_LIT:key>'] = key<EOL>self._namespace['<STR_LIT>'] = keycode<EOL>self._namespace['<STR_LIT>'] = True<EOL>", "docstring": "GUI callback for key pressed", "id": "f11500:c0:m6"}
{"signature": "def _mouse_button_up(self, button):", "body": "self._namespace['<STR_LIT>'] = self._input_device.mouse_down<EOL>", "docstring": "GUI callback for mouse button up", "id": "f11500:c0:m4"}
{"signature": "def _key_released(self, key, keycode):", "body": "self._namespace['<STR_LIT>'] = self._input_device.key_down<EOL>", "docstring": "GUI callback for key released", "id": "f11500:c0:m7"}
{"signature": "def _set_initial_defaults(self):", "body": "DEFAULT_WIDTH, DEFAULT_HEIGHT = self._canvas.DEFAULT_SIZE<EOL>self.WIDTH = self._namespace.get('<STR_LIT>', DEFAULT_WIDTH)<EOL>self.HEIGHT = self._namespace.get('<STR_LIT>', DEFAULT_WIDTH)<EOL>if '<STR_LIT>' in self._namespace or '<STR_LIT>' in self._namespace:<EOL><INDENT>self.size(w=self._namespace.get('<STR_LIT>'), h=self._namespace.get('<STR_LIT>'))<EOL><DEDENT>self._transformmode = Bot.CENTER<EOL>self._canvas.settings(<EOL>fontfile=\"<STR_LIT>\",<EOL>fontsize=<NUM_LIT:16>,<EOL>align=Bot.LEFT,<EOL>lineheight=<NUM_LIT:1>,<EOL>fillcolor=self.color(<NUM_LIT>),<EOL>strokecolor=None,<EOL>strokewidth=<NUM_LIT:1.0>,<EOL>background=self.color(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>))<EOL>", "docstring": "Set the default values. Called at __init__ and at the end of run(),\n        do that new draw loop iterations don't take up values left over by the\n        previous one.", "id": "f11500:c0:m1"}
{"signature": "def _makeInstance(self, clazz, args, kwargs):", "body": "inst = clazz(self, *args, **kwargs)<EOL>return inst<EOL>", "docstring": "Creates an instance of a class defined in this document.\n           This method sets the context of the object to the current context.", "id": "f11500:c0:m10"}
{"signature": "def speed(self, framerate=None):", "body": "if framerate is not None:<EOL><INDENT>self._speed = framerate<EOL>self._dynamic = True<EOL><DEDENT>else:<EOL><INDENT>return self._speed<EOL><DEDENT>", "docstring": "Set animation framerate.\n\n        :param framerate: Frames per second to run bot.\n        :return: Current framerate of animation.", "id": "f11500:c0:m30"}
{"signature": "@property<EOL><INDENT>def is_edited(self):<DEDENT>", "body": "return self.edited_source is not None<EOL>", "docstring": ":return: True if source has been edited", "id": "f11501:c0:m1"}
{"signature": "def run_tenuous(self):", "body": "with LiveExecution.lock:<EOL><INDENT>ns_snapshot = copy.copy(self.ns)<EOL>try:<EOL><INDENT>source = self.edited_source<EOL>self.edited_source = None<EOL>self.do_exec(source, ns_snapshot)<EOL>self.known_good = source<EOL>self.call_good_cb()<EOL>return True, None<EOL><DEDENT>except Exception as ex:<EOL><INDENT>tb = traceback.format_exc()<EOL>self.call_bad_cb(tb)<EOL>self.ns.clear()<EOL>self.ns.update(ns_snapshot)<EOL>return False, ex<EOL><DEDENT><DEDENT>", "docstring": "Run edited source, if no exceptions occur then it\ngraduates to known good.", "id": "f11501:c0:m5"}
{"signature": "def do_exec(self, source, ns):", "body": "exec(source, ns)<EOL>", "docstring": "Override if you want to do something other than exec in ns\n\ntenuous is True if the source has just been edited and may fail", "id": "f11501:c0:m4"}
{"signature": "def call_good_cb(self):", "body": "with LiveExecution.lock:<EOL><INDENT>if self.good_cb and not self.good_cb():<EOL><INDENT>self.good_cb = None<EOL><DEDENT><DEDENT>", "docstring": "If good_cb returns True then keep it\n:return:", "id": "f11501:c0:m9"}
{"signature": "def _addvar(self, v):", "body": "oldvar = self._oldvars.get(v.name)<EOL>if oldvar is not None:<EOL><INDENT>if isinstance(oldvar, Variable):<EOL><INDENT>if oldvar.compliesTo(v):<EOL><INDENT>v.value = oldvar.value<EOL><DEDENT><DEDENT>else:<EOL><INDENT>v.value = v.sanitize(oldvar)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for listener in VarListener.listeners:<EOL><INDENT>listener.var_added(v)<EOL><DEDENT><DEDENT>self._vars[v.name] = v<EOL>self._namespace[v.name] = v.value<EOL>self._oldvars[v.name] = v<EOL>return v<EOL>", "docstring": "Sets a new accessible variable.\n\n        :param v: Variable.", "id": "f11502:c0:m7"}
{"signature": "def nofill(self):", "body": "self._canvas.fillcolor = None<EOL>", "docstring": "Stop applying fills to new paths.", "id": "f11505:c0:m40"}
{"signature": "def textwidth(self, txt, width=None):", "body": "w = width<EOL>return self.textmetrics(txt, width=w)[<NUM_LIT:0>]<EOL>", "docstring": ":return: the width of a string of text according to the current\nfont settings.", "id": "f11505:c0:m50"}
{"signature": "def transform(self, mode=None):", "body": "if mode:<EOL><INDENT>self._canvas.mode = mode<EOL><DEDENT>return self._canvas.mode<EOL>", "docstring": "Set the current transform mode.\n\n:param mode: CENTER or CORNER", "id": "f11505:c0:m28"}
{"signature": "def arrow(self, x, y, width, type=NORMAL, draw=True, **kwargs):", "body": "<EOL>path = self.BezierPath(**kwargs)<EOL>if type == self.NORMAL:<EOL><INDENT>head = width * <NUM_LIT><EOL>tail = width * <NUM_LIT><EOL>path.moveto(x, y)<EOL>path.lineto(x - head, y + head)<EOL>path.lineto(x - head, y + tail)<EOL>path.lineto(x - width, y + tail)<EOL>path.lineto(x - width, y - tail)<EOL>path.lineto(x - head, y - tail)<EOL>path.lineto(x - head, y - head)<EOL>path.lineto(x, y)<EOL><DEDENT>elif type == self.FORTYFIVE:<EOL><INDENT>head = <NUM_LIT><EOL>tail = <NUM_LIT:1> + head<EOL>path.moveto(x, y)<EOL>path.lineto(x, y + width * (<NUM_LIT:1> - head))<EOL>path.lineto(x - width * head, y + width)<EOL>path.lineto(x - width * head, y + width * tail * <NUM_LIT>)<EOL>path.lineto(x - width * tail * <NUM_LIT>, y + width)<EOL>path.lineto(x - width, y + width * tail * <NUM_LIT>)<EOL>path.lineto(x - width * tail * <NUM_LIT>, y + width * head)<EOL>path.lineto(x - width, y + width * head)<EOL>path.lineto(x - width * (<NUM_LIT:1> - head), y)<EOL>path.lineto(x, y)<EOL><DEDENT>else:<EOL><INDENT>raise NameError(_(\"<STR_LIT>\"))<EOL><DEDENT>if draw:<EOL><INDENT>path.draw()<EOL><DEDENT>return path<EOL>", "docstring": "Draw an arrow.\n\n        Arrows can be two types: NORMAL or FORTYFIVE.\n\n        :param x: top left x-coordinate\n        :param y: top left y-coordinate\n        :param width: width of arrow\n        :param type:  NORMAL or FORTYFIVE\n        :draw:  If True draws arrow immediately\n\n        :return: Path object representing the arrow.", "id": "f11505:c0:m10"}
{"signature": "@property<EOL><INDENT>def canvas(self):<DEDENT>", "body": "return self._canvas<EOL>", "docstring": "Not entirely sure compatible the Shoebot 'canvas' is with Nodebox\nbut there you go.\n:return:", "id": "f11505:c0:m56"}
{"signature": "def line(self, x1, y1, x2, y2, draw=True):", "body": "p = self._path<EOL>self.beginpath()<EOL>self.moveto(x1, y1)<EOL>self.lineto(x2, y2)<EOL>self.endpath(draw=draw)<EOL>self._path = p<EOL>return p<EOL>", "docstring": "Draw a line from (x1,y1) to (x2,y2)\n        :param x1: start x-coordinate\n        :param y1: start y-coordinate\n        :param x2: end x-coordinate\n        :param y2: end y-coordinate", "id": "f11505:c0:m9"}
{"signature": "def textpath(self, txt, x, y, width=None, height=<NUM_LIT>, draw=False, **kwargs):", "body": "txt = self.Text(txt, x, y, width, height, enableRendering=False, **kwargs)<EOL>path = txt.path<EOL>if draw:<EOL><INDENT>path.draw()<EOL><DEDENT>return path<EOL>", "docstring": "Generates an outlined path of the input text.\n\n:param txt: Text to output\n:param x: x-coordinate of the top left corner\n:param y: y-coordinate of the top left corner\n:param width: text width\n:param height: text height\n:param draw: Set to False to inhibit immediate drawing (defaults to False)\n:return: Path object representing the text.", "id": "f11505:c0:m48"}
{"signature": "def translate(self, xt, yt, mode=None):", "body": "self._canvas.translate(xt, yt)<EOL>if mode:<EOL><INDENT>self._canvas.mode = mode<EOL><DEDENT>", "docstring": "Translate the current position by (xt, yt) and\noptionally set the transform mode.\n\n:param xt: Amount to move horizontally\n:param yt: Amount to move vertically\n:mode: Set the transform mode to CENTER or CORNER", "id": "f11505:c0:m29"}
{"signature": "def circle(self, x, y, diameter, draw=True, **kwargs):", "body": "return self.ellipse(x, y, diameter, diameter, draw, **kwargs)<EOL>", "docstring": "Draw a circle\n        :param x: x-coordinate of the top left corner\n        :param y: y-coordinate of the top left corner\n        :param diameter: Diameter of circle.\n        :param draw: Draw immediately (defaults to True, set to False to inhibit drawing)\n        :return: Path object representing circle", "id": "f11505:c0:m8"}
{"signature": "def font(self, fontpath=None, fontsize=None):", "body": "if fontpath is not None:<EOL><INDENT>self._canvas.fontfile = fontpath<EOL><DEDENT>else:<EOL><INDENT>return self._canvas.fontfile<EOL><DEDENT>if fontsize is not None:<EOL><INDENT>self._canvas.fontsize = fontsize<EOL><DEDENT>", "docstring": "Set the font to be used with new text instances.\n\n        :param fontpath: path to truetype or opentype font.\n        :param fontsize: size of font\n\n        :return: current current fontpath (if fontpath param not set)\n        Accepts TrueType and OpenType files. Depends on FreeType being\n        installed.", "id": "f11505:c0:m45"}
{"signature": "def fill(self, *args):", "body": "if args is not None:<EOL><INDENT>self._canvas.fillcolor = self.color(*args)<EOL><DEDENT>return self._canvas.fillcolor<EOL>", "docstring": "Sets a fill color, applying it to new paths.\n\n        :param args: color in supported format", "id": "f11505:c0:m39"}
{"signature": "def fontsize(self, fontsize=None):", "body": "if fontsize is not None:<EOL><INDENT>self._canvas.fontsize = fontsize<EOL><DEDENT>else:<EOL><INDENT>return self._canvas.fontsize<EOL><DEDENT>", "docstring": "Set or return size of current font.\n\n:param fontsize: Size of font.\n:return: Size of font (if fontsize was not specified)", "id": "f11505:c0:m46"}
{"signature": "def background(self, *args):", "body": "self._canvas.background = self.color(*args)<EOL>", "docstring": "Set the background color.\n\n        :param color: See color() function for supported color formats.", "id": "f11505:c0:m44"}
{"signature": "def text(self, txt, x, y, width=None, height=<NUM_LIT>, outline=False, draw=True, **kwargs):", "body": "txt = self.Text(txt, x, y, width, height, outline=outline, ctx=None, **kwargs)<EOL>if outline:<EOL><INDENT>path = txt.path<EOL>if draw:<EOL><INDENT>path.draw()<EOL><DEDENT>return path<EOL><DEDENT>else:<EOL><INDENT>return txt<EOL><DEDENT>", "docstring": "Draws a string of text according to current font settings.\n\n:param txt: Text to output\n:param x: x-coordinate of the top left corner\n:param y: y-coordinate of the top left corner\n:param width: text width\n:param height: text height\n:param outline: If True draws outline text (defaults to False)\n:param draw: Set to False to inhibit immediate drawing (defaults to True)\n:return: Path object representing the text.", "id": "f11505:c0:m47"}
{"signature": "def rect(self, x, y, width, height, roundness=<NUM_LIT:0.0>, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)<EOL>path.rect(x, y, width, height, roundness, self.rectmode)<EOL>if draw:<EOL><INDENT>path.draw()<EOL><DEDENT>return path<EOL>", "docstring": "Draw a rectangle from x, y of width, height.\n\n:param startx: top left x-coordinate\n:param starty: top left y-coordinate\n\n:param width: height  Size of rectangle.\n:roundness: Corner roundness defaults to 0.0 (a right-angle).\n:draw: If True draws immediately.\n:fill: Optionally pass a fill color.\n\n:return: path representing the rectangle.", "id": "f11505:c0:m3"}
{"signature": "def outputmode(self):", "body": "raise NotImplementedError(_(\"<STR_LIT>\"))<EOL>", "docstring": "NOT IMPLEMENTED", "id": "f11505:c0:m36"}
{"signature": "def textmetrics(self, txt, width=None, height=None, **kwargs):", "body": "<EOL>txt = self.Text(txt, <NUM_LIT:0>, <NUM_LIT:0>, width, height, enableRendering=False, **kwargs)<EOL>return txt.metrics<EOL>", "docstring": ":return: the width and height of a string of text as a tuple\naccording to current font settings.", "id": "f11505:c0:m49"}
{"signature": "def stroke(self, *args):", "body": "if args is not None:<EOL><INDENT>self._canvas.strokecolor = self.color(*args)<EOL><DEDENT>return self._canvas.strokecolor<EOL>", "docstring": "Set a stroke color, applying it to new paths.\n\n        :param args: color in supported format", "id": "f11505:c0:m41"}
{"signature": "def ellipse(self, x, y, width, height, draw=True, **kwargs):", "body": "path = self.BezierPath(**kwargs)<EOL>path.ellipse(x, y, width, height, self.ellipsemode)<EOL>if draw:<EOL><INDENT>path.draw()<EOL><DEDENT>return path<EOL>", "docstring": "Draw an ellipse starting from (x,y)", "id": "f11505:c0:m7"}
{"signature": "def rotate(self, degrees=<NUM_LIT:0>, radians=<NUM_LIT:0>):", "body": "<EOL>if radians:<EOL><INDENT>angle = radians<EOL><DEDENT>else:<EOL><INDENT>angle = deg2rad(degrees)<EOL><DEDENT>self._canvas.rotate(-angle)<EOL>", "docstring": "Set the current rotation in degrees or radians.\n\n:param degrees: Degrees to rotate\n:param radians: Radians to rotate", "id": "f11505:c0:m30"}
{"signature": "def colorrange(self, crange):", "body": "self.color_range = float(crange)<EOL>", "docstring": "By default colors range from 0.0 - 1.0 using colorrange\n        other defaults can be used, e.g. 0.0 - 255.0\n\n        :param crange: Color range of 0.0 - 255:\n        >>> colorrange(256)", "id": "f11505:c0:m38"}
{"signature": "def refresh(self):", "body": "self.reset()<EOL>self.parse(self.source)<EOL>return self.output()<EOL>", "docstring": "reset output buffer, re-parse entire source file, and return output\n\n        Since parsing involves a good deal of randomness, this is an\n        easy way to get new output without having to reload a grammar file\n        each time.", "id": "f11511:c1:m6"}
{"signature": "def reset(self):", "body": "self.pieces = []<EOL>self.capitalizeNextWord = <NUM_LIT:0><EOL>", "docstring": "reset parser", "id": "f11511:c1:m5"}
{"signature": "def parse_Element(self, node):", "body": "handlerMethod = getattr(self, \"<STR_LIT>\" % node.tagName)<EOL>handlerMethod(node)<EOL>", "docstring": "parse an element\n\n        An XML element corresponds to an actual tag in the source:\n        <xref id='...'>, <p chance='...'>, <choice>, etc.\n        Each element type is handled in its own method.  Like we did in\n        parse(), we construct a method name based on the name of the\n        element (\"do_xref\" for an <xref> tag, etc.) and\n        call the method.", "id": "f11511:c1:m12"}
{"signature": "def openAnything(source, searchpaths=None):", "body": "if hasattr(source, \"<STR_LIT>\"):<EOL><INDENT>return source<EOL><DEDENT>if source == \"<STR_LIT:->\":<EOL><INDENT>import sys<EOL>return sys.stdin<EOL><DEDENT>import urllib.request, urllib.parse, urllib.error<EOL>try:<EOL><INDENT>return urllib.request.urlopen(source)<EOL><DEDENT>except (IOError, OSError):<EOL><INDENT>pass<EOL><DEDENT>for path in searchpaths or ['<STR_LIT:.>']:<EOL><INDENT>try:<EOL><INDENT>return open(os.path.join(path, source))<EOL><DEDENT>except (IOError, OSError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>import io<EOL>return io.StringIO(str(source))<EOL>", "docstring": "URI, filename, or string --> stream\n\n    This function lets you define parsers that take any input source\n    (URL, pathname to local or network file, or actual data as a string)\n    and deal with it in a uniform manner.  Returned object is guaranteed\n    to have all the basic stdio read methods (read, readline, readlines).\n    Just .close() the object when you're done with it.\n\n    Examples:\n    >>> from xml.dom import minidom\n    >>> sock = openAnything(\"http://localhost/kant.xml\")\n    >>> doc = minidom.parse(sock)\n    >>> sock.close()\n    >>> sock = openAnything(\"c:\\\\inetpub\\\\wwwroot\\\\kant.xml\")\n    >>> doc = minidom.parse(sock)\n    >>> sock.close()\n    >>> sock = openAnything(\"<ref id='conjunction'><text>and</text><text>or</text></ref>\")\n    >>> doc = minidom.parse(sock)\n    >>> sock.close()", "id": "f11511:m0"}
{"signature": "def do_choice(self, node):", "body": "self.parse(self.randomChildElement(node))<EOL>", "docstring": "handle <choice> tag\n\n        A <choice> tag contains one or more <p> tags.  One <p> tag\n        is chosen at random and evaluated; the rest are ignored.", "id": "f11511:c1:m16"}
{"signature": "def do_xref(self, node):", "body": "id = node.attributes[\"<STR_LIT:id>\"].value<EOL>self.parse(self.randomChildElement(self.refs[id]))<EOL>", "docstring": "handle <xref id='...'> tag\n\n        An <xref id='...'> tag is a cross-reference to a <ref id='...'>\n        tag.  <xref id='sentence'/> evaluates to a randomly chosen child of\n        <ref id='sentence'>.", "id": "f11511:c1:m14"}
{"signature": "def do_p(self, node):", "body": "keys = list(node.attributes.keys())<EOL>if \"<STR_LIT:class>\" in keys:<EOL><INDENT>if node.attributes[\"<STR_LIT:class>\"].value == \"<STR_LIT>\":<EOL><INDENT>self.capitalizeNextWord = <NUM_LIT:1><EOL><DEDENT><DEDENT>if \"<STR_LIT>\" in keys:<EOL><INDENT>chance = int(node.attributes[\"<STR_LIT>\"].value)<EOL>doit = (chance > random.randrange(<NUM_LIT:100>))<EOL><DEDENT>else:<EOL><INDENT>doit = <NUM_LIT:1><EOL><DEDENT>if doit:<EOL><INDENT>for child in node.childNodes: self.parse(child)<EOL><DEDENT>", "docstring": "handle <p> tag\n\n        The <p> tag is the core of the grammar.  It can contain almost\n        anything: freeform text, <choice> tags, <xref> tags, even other\n        <p> tags.  If a \"class='sentence'\" attribute is found, a flag\n        is set and the next word will be capitalized.  If a \"chance='X'\"\n        attribute is found, there is an X% chance that the tag will be\n        evaluated (and therefore a (100-X)% chance that it will be\n        completely ignored)", "id": "f11511:c1:m15"}
{"signature": "def get_key_down(self):", "body": "return bool(self.keys_pressed)<EOL>", "docstring": "Return True if any key is pressed", "id": "f11513:c0:m2"}
{"signature": "def get_mouse_down(self):", "body": "return bool(self.mouse_buttons_down)<EOL>", "docstring": "Return True if any mouse button is pressed", "id": "f11513:c0:m3"}
{"signature": "def _output_file(self, frame):", "body": "if self.buff:<EOL><INDENT>return self.buff<EOL><DEDENT>elif self.multifile:<EOL><INDENT>return self.file_root + \"<STR_LIT>\" % frame + self.file_ext<EOL><DEDENT>else:<EOL><INDENT>return self.filename<EOL><DEDENT>", "docstring": "If filename was used output a filename, along with multifile\nnumbered filenames will be used.\n\nIf buff was specified it is returned.\n\n:return: Output buff or filename.", "id": "f11514:c0:m1"}
{"signature": "def create_rcontext(self, size, frame):", "body": "if self.format == '<STR_LIT>':<EOL><INDENT>surface = cairo.PDFSurface(self._output_file(frame), *size)<EOL><DEDENT>elif self.format in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>surface = cairo.PSSurface(self._output_file(frame), *size)<EOL><DEDENT>elif self.format == '<STR_LIT>':<EOL><INDENT>surface = cairo.SVGSurface(self._output_file(frame), *size)<EOL><DEDENT>elif self.format == '<STR_LIT>':<EOL><INDENT>surface = self.target<EOL><DEDENT>else:<EOL><INDENT>surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, *size)<EOL><DEDENT>return cairo.Context(surface)<EOL>", "docstring": "Called when CairoCanvas needs a cairo context to draw on", "id": "f11514:c0:m2"}
{"signature": "def rendering_finished(self, size, frame, cairo_ctx):", "body": "surface = cairo_ctx.get_target()<EOL>if self.format == '<STR_LIT>':<EOL><INDENT>surface.write_to_png(self._output_file(frame))<EOL><DEDENT>surface.finish()<EOL>surface.flush()<EOL>", "docstring": "Called when CairoCanvas has rendered a bot", "id": "f11514:c0:m3"}
{"signature": "def append(self, render_func):", "body": "self.render_funcs.append(render_func)<EOL>", "docstring": "Add a render function to the queue.", "id": "f11517:c0:m2"}
{"signature": "def settings(self, **kwargs):", "body": "for k, v in kwargs.items():<EOL><INDENT>setattr(self, k, v)<EOL><DEDENT>", "docstring": "Pass a load of settings into the canvas", "id": "f11518:c0:m8"}
{"signature": "def initial_drawqueue(self):", "body": "return DrawQueue()<EOL>", "docstring": "Override to create use special kinds of draw queue", "id": "f11518:c0:m3"}
{"signature": "def get_input_device(self):", "body": "return None<EOL>", "docstring": "Overrides can return actual input device", "id": "f11518:c0:m2"}
{"signature": "def flush(self, frame):", "body": "self.sink.render(self.size_or_default(), frame, self._drawqueue)<EOL>self.reset_drawqueue()<EOL>", "docstring": "Passes the drawqueue to the sink for rendering", "id": "f11518:c0:m14"}
{"signature": "def deferred_render(self, render_func):", "body": "self._drawqueue.append(render_func)<EOL>", "docstring": "Add a render function to the queue for rendering later", "id": "f11518:c0:m15"}
{"signature": "def initial_transform(self):", "body": "pass<EOL>", "docstring": "Must be overriden to create initial transform matrix", "id": "f11518:c0:m4"}
{"signature": "def render(self, size, frame, drawqueue):", "body": "r_context = self.create_rcontext(size, frame)<EOL>drawqueue.render(r_context)<EOL>self.rendering_finished(size, frame, r_context)<EOL>return r_context<EOL>", "docstring": "Calls implmentation to get a render context,\npasses it to the drawqueues render function\nthen calls self.rendering_finished", "id": "f11519:c0:m1"}
{"signature": "def create_rcontext(self, size, frame):", "body": "pass<EOL>", "docstring": "Returns a cairo context for drawing this\nframe of the bot", "id": "f11519:c0:m2"}
{"signature": "def event_is(event, event_t):", "body": "return event is not None and event.type == event_t<EOL>", "docstring": "Check if event type\n:param event:   event to compare\n:param event_t: event type\n:return: bool", "id": "f11520:m2"}
{"signature": "def publish_event(event_t, data=None, extra_channels=None, wait=None):", "body": "event = Event(event_t, data)<EOL>pubsub.publish(\"<STR_LIT>\", event)<EOL>for channel_name in extra_channels or []:<EOL><INDENT>pubsub.publish(channel_name, event)<EOL><DEDENT>if wait is not None:<EOL><INDENT>channel = pubsub.subscribe(wait)<EOL>channel.listen(wait)<EOL><DEDENT>", "docstring": "Publish an event ot any subscribers.\n\n:param event_t:  event type\n:param data:     event data\n:param extra_channels:\n:param wait:\n:return:", "id": "f11520:m3"}
{"signature": "def get_driver_options():", "body": "options = os.environ.get(\"<STR_LIT>\")<EOL>if not options:<EOL><INDENT>return {}<EOL><DEDENT>try:<EOL><INDENT>return dict([kv.split('<STR_LIT:=>') for kv in options.split()])<EOL><DEDENT>except ValueError:<EOL><INDENT>sys.stderr.write(\"<STR_LIT>\")<EOL>sys.stderr.write(\"<STR_LIT>\")<EOL>sys.stderr.write(\"<STR_LIT>\")<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Interpret env var as key=value\n:return:", "id": "f11521:m1"}
{"signature": "@property<EOL><INDENT>def sbot(self):<DEDENT>", "body": "self.bot_ready.wait()<EOL>return self._sbot<EOL>", "docstring": ":return: bot instance for communication", "id": "f11525:c1:m2"}
{"signature": "def __init__(self, create_args, create_kwargs,<EOL>run_args, run_kwargs,<EOL>send_sigint=False):", "body": "super(ShoebotThread, self).__init__()<EOL>self.bot_ready = threading.Event()<EOL>self.create_args = create_args<EOL>self.create_kwargs = create_kwargs<EOL>self.run_args = run_args<EOL>self.run_kwargs = run_kwargs<EOL>self.send_sigint = send_sigint<EOL>self._sbot = None<EOL>", "docstring": ":param create_args: passed to create_bot\n:param create_kwargs: passed to create_bot\n:param run_args: passed to bot.run\n:param run_kwargs: passed to bot.run\n:param send_sigint: if True then SIGINT will be sent on bot completion\n                    so the main thread can terminate", "id": "f11525:c1:m0"}
{"signature": "def error(message):", "body": "global parser<EOL>print (_(\"<STR_LIT>\") + message)<EOL>print ()<EOL>parser.print_help()<EOL>sys.exit()<EOL>", "docstring": "Prints an error message, the help message and quits", "id": "f11526:m1"}
{"signature": "def warn(message):", "body": "print (_(\"<STR_LIT>\") + message)<EOL>", "docstring": "Print a warning message", "id": "f11526:m2"}
{"signature": "def get_key_map(self):", "body": "kdict = {}<EOL>for gdk_name in dir(Gdk):<EOL><INDENT>nb_name = gdk_name.upper()<EOL>kdict[nb_name] = getattr(Gdk, gdk_name)<EOL><DEDENT>return kdict<EOL>", "docstring": "Return a dict in the form of\n\nSHOEBOT_KEY_NAME, GTK_VALUE\n\nShoebot key names look like KEY_LEFT, whereas Gdk uses KEY_Left\n- Shoebot key names are derived from Nodebox 1, which was a mac\n  app.", "id": "f11528:c0:m8"}
{"signature": "def scale_context_and_center(self, cr):", "body": "bot_width, bot_height = self.bot_size<EOL>if self.width != bot_width or self.height != bot_height:<EOL><INDENT>if self.width < self.height:<EOL><INDENT>scale_x = float(self.width) / float(bot_width)<EOL>scale_y = scale_x<EOL>cr.translate(<NUM_LIT:0>, (self.height - (bot_height * scale_y)) / <NUM_LIT>)<EOL><DEDENT>elif self.width > self.height:<EOL><INDENT>scale_y = float(self.height) / float(bot_height)<EOL>scale_x = scale_y<EOL>cr.translate((self.width - (bot_width * scale_x)) / <NUM_LIT>, <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>scale_x = <NUM_LIT:1.0><EOL>scale_y = <NUM_LIT:1.0><EOL><DEDENT>cr.scale(scale_x, scale_y)<EOL>self.input_device.scale_x = scale_y<EOL>self.input_device.scale_y = scale_y<EOL><DEDENT>", "docstring": "Scale context based on difference between bot size and widget", "id": "f11529:c1:m3"}
{"signature": "def do_drawing(self, size, frame, cairo_ctx):", "body": "if self.get_window() and not self.bot_size:<EOL><INDENT>self.set_size_request(*size)<EOL><DEDENT>self.bot_size = size<EOL>self.backing_store = BackingStore.get_backingstore(self.width, self.height)<EOL>cr = pycairo.Context(self.backing_store.surface)<EOL>if self.scale_fit:<EOL><INDENT>self.scale_context_and_center(cr)<EOL><DEDENT>cairo_ctx = driver.ensure_pycairo_context(cairo_ctx)<EOL>cr.set_source_surface(cairo_ctx.get_target())<EOL>cr.set_operator(cairo.OPERATOR_SOURCE)<EOL>cr.paint()<EOL>self.queue_draw()<EOL>while Gtk.events_pending():<EOL><INDENT>Gtk.main_iteration_do(False)<EOL><DEDENT>", "docstring": "Update the backing store from a cairo context and\nschedule a redraw (expose event)\n\n:param size: width, height in pixels of bot\n:param frame: frame # thar was drawn\n:param cairo_ctx: cairo context the bot was drawn on", "id": "f11529:c1:m6"}
{"signature": "def create_rcontext(self, size, frame):", "body": "self.frame = frame<EOL>width, height = size<EOL>meta_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (<NUM_LIT:0>, <NUM_LIT:0>, width, height))<EOL>ctx = cairo.Context(meta_surface)<EOL>return ctx<EOL>", "docstring": "Creates a recording surface for the bot to draw on\n\n:param size: The width and height of bot", "id": "f11529:c1:m5"}
{"signature": "def trigger_fullscreen_action(self, fullscreen):", "body": "action = self.action_group.get_action('<STR_LIT>')<EOL>action.set_active(fullscreen)<EOL>", "docstring": "Toggle fullscreen from outside the GUI,\ncauses the GUI to updated and run all its actions.", "id": "f11530:c0:m13"}
{"signature": "def var_window_closed(self, widget):", "body": "<EOL>self.action_group.get_action('<STR_LIT>').set_active(False)<EOL>self.show_vars = False<EOL>self.var_window = None<EOL>", "docstring": "Called if user clicked close button on var window\n:param widget:\n:return:", "id": "f11530:c0:m6"}
{"signature": "def do_window_close(self, widget, data=None):", "body": "publish_event(QUIT_EVENT)<EOL>if self.has_server:<EOL><INDENT>self.sock.close()<EOL><DEDENT>self.hide_variables_window()<EOL>self.destroy()<EOL>self.window_open = False<EOL>", "docstring": "Widget Action to Close the window, triggering the quit event.", "id": "f11530:c0:m16"}
{"signature": "def create_rcontext(self, size, frame):", "body": "return self.sb_widget.create_rcontext(size, frame)<EOL>", "docstring": "Delegates to the sb_widget", "id": "f11530:c0:m3"}
{"signature": "def do_fullscreen(self, widget):", "body": "self.fullscreen()<EOL>self.is_fullscreen = True<EOL>while Gtk.events_pending():<EOL><INDENT>Gtk.main_iteration()<EOL><DEDENT>self.bot._screen_width = Gdk.Screen.width()<EOL>self.bot._screen_height = Gdk.Screen.height()<EOL>self.bot._screen_ratio = self.bot._screen_width / self.bot._screen_height<EOL>", "docstring": "Widget Action to Make the window fullscreen and update the bot.", "id": "f11530:c0:m14"}
{"signature": "def do_toggle_variables(self, action):", "body": "self.show_vars = action.get_active()<EOL>if self.show_vars:<EOL><INDENT>self.show_variables_window()<EOL><DEDENT>else:<EOL><INDENT>self.hide_variables_window()<EOL><DEDENT>", "docstring": "Widget Action to toggle showing the variables window.", "id": "f11530:c0:m19"}
{"signature": "def schedule_snapshot(self, format):", "body": "bot = self.bot<EOL>canvas = self.bot.canvas<EOL>script = bot._namespace['<STR_LIT>']<EOL>if script:<EOL><INDENT>filename = os.path.splitext(script)[<NUM_LIT:0>] + '<STR_LIT:.>' + format<EOL><DEDENT>else:<EOL><INDENT>filename = '<STR_LIT>' + format<EOL><DEDENT>f = canvas.output_closure(filename, self.bot._frame)<EOL>self.scheduled_snapshots.append(f)<EOL>", "docstring": "Tell the canvas to perform a snapshot when it's finished rendering\n:param format:\n:return:", "id": "f11530:c0:m8"}
{"signature": "def var_added(self, v):", "body": "self.add_variable(v)<EOL>self.window.set_size_request(<NUM_LIT>, <NUM_LIT> * len(self.widgets.keys()))<EOL>self.window.show_all()<EOL>", "docstring": "var was added in the bot while it ran, possibly\nby livecoding\n\n:param v:\n:return:", "id": "f11532:c0:m11"}
{"signature": "def widget_changed(self, widget, v):", "body": "<EOL>if v.type is NUMBER:<EOL><INDENT>self.bot._namespace[v.name] = widget.get_value()<EOL>self.bot._vars[v.name].value = widget.get_value()  <EOL>publish_event(VARIABLE_UPDATED_EVENT, v)  <EOL><DEDENT>elif v.type is BOOLEAN:<EOL><INDENT>self.bot._namespace[v.name] = widget.get_active()<EOL>self.bot._vars[v.name].value = widget.get_active()  <EOL>publish_event(VARIABLE_UPDATED_EVENT, v)  <EOL><DEDENT>elif v.type is TEXT:<EOL><INDENT>self.bot._namespace[v.name] = widget.get_text()<EOL>self.bot._vars[v.name].value = widget.get_text()  <EOL>publish_event(VARIABLE_UPDATED_EVENT, v)<EOL><DEDENT>", "docstring": "Called when a slider is adjusted.", "id": "f11532:c0:m10"}
{"signature": "def add_variables(self):", "body": "for k, v in self.bot._vars.items():<EOL><INDENT>if not hasattr(v, '<STR_LIT:type>'):<EOL><INDENT>raise AttributeError(<EOL>'<STR_LIT>' % k)<EOL><DEDENT>self.add_variable(v)<EOL><DEDENT>", "docstring": "Add all widgets to specified vbox\n:param container:\n:return:", "id": "f11532:c0:m1"}
{"signature": "def update_var(self, name, value):", "body": "widget = self.widgets.get(name)<EOL>if widget is None:<EOL><INDENT>return False, '<STR_LIT>'.format(name)<EOL><DEDENT>try:<EOL><INDENT>if isinstance(widget, Gtk.CheckButton):<EOL><INDENT>widget.set_active(value)<EOL>return True, widget.get_active()<EOL><DEDENT>elif isinstance(widget, Gtk.Entry):<EOL><INDENT>widget.set_text(value)<EOL>return True, widget.get_text()<EOL><DEDENT>else:<EOL><INDENT>widget.set_value(value)<EOL>return True, widget.get_value()<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>return False, str(e)<EOL><DEDENT>", "docstring": ":return: success, err_msg_if_failed", "id": "f11532:c0:m9"}
{"signature": "def save_as(self):", "body": "chooser = ShoebotFileChooserDialog(_('<STR_LIT>'), None, Gtk.FileChooserAction.SAVE,<EOL>(Gtk.STOCK_SAVE, Gtk.ResponseType.ACCEPT,<EOL>Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))<EOL>chooser.set_do_overwrite_confirmation(True)<EOL>chooser.set_transient_for(self)<EOL>saved = chooser.run() == Gtk.ResponseType.ACCEPT<EOL>if saved:<EOL><INDENT>old_filename = self.filename<EOL>self.source_buffer.filename = chooser.get_filename()<EOL>if not self.save():<EOL><INDENT>self.filename = old_filename<EOL><DEDENT><DEDENT>chooser.destroy()<EOL>return saved<EOL>", "docstring": "Return True if the buffer was saved", "id": "f11533:c4:m29"}
{"signature": "def pangocairo_create_context(cr):", "body": "<EOL>try:<EOL><INDENT>return PangoCairo.create_context(cr)<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if e.args == ('<STR_LIT>',):<EOL><INDENT>raise ShoebotInstallError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>", "docstring": "If python-gi-cairo is not installed, using PangoCairo.create_context\ndies with an unhelpful KeyError, check for that and output somethig\nuseful.", "id": "f11534:m0"}
{"signature": "def _deferred_render(self, render_func=None):", "body": "self._canvas.deferred_render(render_func or self._render)<EOL>", "docstring": "Pass a function to the canvas for deferred rendering,\ndefaults to self._render", "id": "f11535:c0:m7"}
{"signature": "def inheritFromContext(self, ignore=()):", "body": "for canvas_attr, grob_attr in STATES.items():<EOL><INDENT>if canvas_attr in ignore:<EOL><INDENT>continue<EOL><DEDENT>setattr(self, grob_attr, getattr(self._bot._canvas, canvas_attr))<EOL><DEDENT>", "docstring": "Doesn't store exactly the same items as Nodebox for ease of implementation,\nit has enough to get the Nodebox Dentrite example working.", "id": "f11535:c0:m9"}
{"signature": "def _center_transform(self, transform):", "body": "dx, dy = self._get_center()<EOL>t = cairo.Matrix()<EOL>t.translate(dx, dy)<EOL>t = transform * t<EOL>t.translate(-dx, -dy)<EOL>return t<EOL>", "docstring": "Works like setupTransform of a version of java nodebox\nhttp://dev.nodebox.net/browser/nodebox-java/branches/rewrite/src/java/net/nodebox/graphics/Grob.java", "id": "f11535:c0:m5"}
{"signature": "def _set_mode(self, mode):", "body": "if mode == CENTER:<EOL><INDENT>self._call_transform_mode = self._center_transform<EOL><DEDENT>elif mode == CORNER:<EOL><INDENT>self._call_transform_mode = self._corner_transform<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Sets call_transform_mode to point to the\ncenter_transform or corner_transform", "id": "f11535:c0:m1"}
{"signature": "def _get_center(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Implementations must return the x, y of their center", "id": "f11535:c0:m3"}
{"signature": "def _call_transform_mode(self):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "This should never get called:\nset mode, changes the value of this to point\n\ncorner_transform or center_transform", "id": "f11535:c0:m4"}
{"signature": "def _get_pathmode(self):", "body": "if self._pathmode is not None:<EOL><INDENT>return self._pathmode<EOL><DEDENT>else:<EOL><INDENT>return self._canvas.pathmode<EOL><DEDENT>", "docstring": "Return pathmode or get it from self._canvas", "id": "f11535:c0:m2"}
{"signature": "def lab_to_rgb(l, a, b):", "body": "y = (l + <NUM_LIT:16>) / <NUM_LIT><EOL>x = a / <NUM_LIT> + y<EOL>z = y - b / <NUM_LIT><EOL>v = [x, y, z]<EOL>for i in _range(<NUM_LIT:3>):<EOL><INDENT>if pow(v[i], <NUM_LIT:3>) > <NUM_LIT>:<EOL><INDENT>v[i] = pow(v[i], <NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>v[i] = (v[i] - <NUM_LIT:16> / <NUM_LIT>) / <NUM_LIT><EOL><DEDENT><DEDENT>x = v[<NUM_LIT:0>] * <NUM_LIT> / <NUM_LIT:100><EOL>y = v[<NUM_LIT:1>] * <NUM_LIT> / <NUM_LIT:100><EOL>z = v[<NUM_LIT:2>] * <NUM_LIT> / <NUM_LIT:100><EOL>r = x * <NUM_LIT> + y * -<NUM_LIT> + z * -<NUM_LIT><EOL>g = x * -<NUM_LIT> + y * <NUM_LIT> + z * <NUM_LIT><EOL>b = x * <NUM_LIT> + y * -<NUM_LIT> + z * <NUM_LIT><EOL>v = [r, g, b]<EOL>for i in _range(<NUM_LIT:3>):<EOL><INDENT>if v[i] > <NUM_LIT>:<EOL><INDENT>v[i] = <NUM_LIT> * pow(v[i], <NUM_LIT:1> / <NUM_LIT>) - <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>v[i] = <NUM_LIT> * v[i]<EOL><DEDENT><DEDENT>r, g, b = v[<NUM_LIT:0>], v[<NUM_LIT:1>], v[<NUM_LIT:2>]<EOL>return r, g, b<EOL>", "docstring": "Converts CIE Lab to RGB components.\n\n    First we have to convert to XYZ color space.\n    Conversion involves using a white point,\n    in this case D65 which represents daylight illumination.\n\n    Algorithm adopted from:\n    http://www.easyrgb.com/math.php", "id": "f11536:m4"}
{"signature": "def __getattr__(self, a):", "body": "if a in self.__dict__:<EOL><INDENT>return a<EOL><DEDENT>elif a == \"<STR_LIT>\":<EOL><INDENT>return self.__dict__[\"<STR_LIT>\"]<EOL><DEDENT>elif a == \"<STR_LIT>\":<EOL><INDENT>return self.__dict__[\"<STR_LIT>\"]<EOL><DEDENT>elif a in [\"<STR_LIT:a>\", \"<STR_LIT>\",<EOL>\"<STR_LIT:r>\", \"<STR_LIT:g>\", \"<STR_LIT:b>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT:h>\", \"<STR_LIT:s>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT:c>\", \"<STR_LIT:m>\", \"<STR_LIT:y>\", \"<STR_LIT:k>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>return self.__dict__[\"<STR_LIT>\" + a[<NUM_LIT:0>]]<EOL><DEDENT>elif a in [\"<STR_LIT:a>\", \"<STR_LIT>\",<EOL>\"<STR_LIT:r>\", \"<STR_LIT:g>\", \"<STR_LIT:b>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT:h>\", \"<STR_LIT:s>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>return self.__dict__[\"<STR_LIT>\" + a[<NUM_LIT:0>]]<EOL><DEDENT>raise AttributeError(\"<STR_LIT:'>\" + str(self.__class__) + \"<STR_LIT>\" + a + \"<STR_LIT:'>\")<EOL>", "docstring": "Available properties:\n        r, g, b, a or red, green, blue, alpha\n        c, m, y, k or cyan, magenta, yellow, black,\n        h, s or hue, saturation, brightness", "id": "f11536:c0:m16"}
{"signature": "def invert(self):", "body": "m = self.matrix<EOL>d = m[<NUM_LIT:0>] * m[<NUM_LIT:4>] - m[<NUM_LIT:1>] * m[<NUM_LIT:3>]<EOL>self.matrix = [<EOL>m[<NUM_LIT:4>] / d, -m[<NUM_LIT:1>] / d, <NUM_LIT:0>,<EOL>-m[<NUM_LIT:3>] / d,  m[<NUM_LIT:0>] / d, <NUM_LIT:0>,<EOL>(m[<NUM_LIT:3>] * m[<NUM_LIT:7>] - m[<NUM_LIT:4>] * m[<NUM_LIT:6>]) / d,<EOL>-(m[<NUM_LIT:0>] * m[<NUM_LIT:7>] - m[<NUM_LIT:1>] * m[<NUM_LIT:6>]) / d,<EOL><NUM_LIT:1><EOL>]<EOL>", "docstring": "Multiplying a matrix by its inverse produces the identity matrix.", "id": "f11538:c0:m5"}
{"signature": "def lerp(a, b, t):", "body": "if t < <NUM_LIT:0.0>:<EOL><INDENT>return a<EOL><DEDENT>if t > <NUM_LIT:1.0>:<EOL><INDENT>return b<EOL><DEDENT>return a + (b - a) * t<EOL>", "docstring": "Returns the linear interpolation between a and b for time t between 0.0-1.0.\n        For example: lerp(100, 200, 0.5) => 150.", "id": "f11538:m5"}
{"signature": "def __init__(self, x, y, width, height):", "body": "<EOL>if width == None: width = INFINITE<EOL>if height == None: height = INFINITE<EOL>if width < <NUM_LIT:0>: x, width = x+width,  -width<EOL>if height < <NUM_LIT:0>: y, height = y+height, -height<EOL>self.x = x<EOL>self.y = y<EOL>self.width = width<EOL>self.height = height<EOL>", "docstring": "Creates a bounding box.\n            The bounding box is an untransformed rectangle that encompasses a shape or group of shapes.", "id": "f11538:c2:m0"}
{"signature": "def contains(self, *a):", "body": "if len(a) == <NUM_LIT:2>: a = [Point(a[<NUM_LIT:0>], a[<NUM_LIT:1>])]<EOL>if len(a) == <NUM_LIT:1>:<EOL><INDENT>a = a[<NUM_LIT:0>]<EOL>if isinstance(a, Point):<EOL><INDENT>return a.x >= self.x and a.x <= self.x+self.widthand a.y >= self.y and a.y <= self.y+self.height<EOL><DEDENT>if isinstance(a, Bounds):<EOL><INDENT>return a.x >= self.x and a.x+a.width <= self.x+self.widthand a.y >= self.y and a.y+a.height <= self.y+self.height<EOL><DEDENT><DEDENT>", "docstring": "Returns True if the given point or rectangle falls within the bounds.", "id": "f11538:c2:m6"}
{"signature": "def _mmult(self, a, b):", "body": "<EOL>return [<EOL>a[<NUM_LIT:0>] * b[<NUM_LIT:0>] + a[<NUM_LIT:1>] * b[<NUM_LIT:3>],<EOL>a[<NUM_LIT:0>] * b[<NUM_LIT:1>] + a[<NUM_LIT:1>] * b[<NUM_LIT:4>],<EOL><NUM_LIT:0>,<EOL>a[<NUM_LIT:3>] * b[<NUM_LIT:0>] + a[<NUM_LIT:4>] * b[<NUM_LIT:3>],<EOL>a[<NUM_LIT:3>] * b[<NUM_LIT:1>] + a[<NUM_LIT:4>] * b[<NUM_LIT:4>],<EOL><NUM_LIT:0>,<EOL>a[<NUM_LIT:6>] * b[<NUM_LIT:0>] + a[<NUM_LIT:7>] * b[<NUM_LIT:3>] + b[<NUM_LIT:6>],<EOL>a[<NUM_LIT:6>] * b[<NUM_LIT:1>] + a[<NUM_LIT:7>] * b[<NUM_LIT:4>] + b[<NUM_LIT:7>],<EOL><NUM_LIT:1><EOL>]<EOL>", "docstring": "Returns the 3x3 matrix multiplication of A and B.\n            Note that scale(), translate(), rotate() work with premultiplication,\n            e.g. the matrix A followed by B = BA and not AB.", "id": "f11538:c0:m4"}
{"signature": "def rotate(x, y, x0, y0, angle):", "body": "x, y = x - x0, y - y0<EOL>a, b = cos(radians(angle)), sin(radians(angle))<EOL>return (x * a - y * b + x0,<EOL>y * a + x * b + y0)<EOL>", "docstring": "Returns the coordinates of (x,y) rotated around origin (x0,y0).", "id": "f11538:m3"}
{"signature": "def __init__(self, name, type, **kwargs):", "body": "self.name = name<EOL>if not isinstance(name, basestring):<EOL><INDENT>raise AttributeError(\"<STR_LIT>\")<EOL><DEDENT>if kwargs.get(\"<STR_LIT>\") and kwargs.get(\"<STR_LIT>\"):<EOL><INDENT>raise AttributeError(\"<STR_LIT>\")  <EOL><DEDENT>self.type = type or NUMBER<EOL>self.min = None<EOL>self.max = None<EOL>self.step = None or kwargs.get(\"<STR_LIT>\")<EOL>self.steps = kwargs.get(\"<STR_LIT>\", DEFAULT_STEPS)<EOL>if self.type == NUMBER:<EOL><INDENT>self.min = kwargs.get(\"<STR_LIT>\", <NUM_LIT:0.0>)<EOL>self.max = kwargs.get(\"<STR_LIT>\", <NUM_LIT>)<EOL>if self.step is None:<EOL><INDENT>diff = max(self.min, self.max) - min(self.min, self.max)<EOL>self.step = (diff / float(self.steps))<EOL><DEDENT>self.default = kwargs.get(\"<STR_LIT:default>\")<EOL>if self.default is None:<EOL><INDENT>self.default = self.min<EOL><DEDENT><DEDENT>elif self.type == TEXT:<EOL><INDENT>self.default = kwargs.get(\"<STR_LIT:default>\", \"<STR_LIT>\")<EOL><DEDENT>elif self.type == BOOLEAN:<EOL><INDENT>self.default = kwargs.get(\"<STR_LIT:default>\", True)<EOL><DEDENT>elif self.type == BUTTON:<EOL><INDENT>self.default = kwargs.get(\"<STR_LIT:default>\", self.name)<EOL><DEDENT>else:<EOL><INDENT>raise AttributeError(\"<STR_LIT>\")<EOL><DEDENT>self.value = kwargs.get(\"<STR_LIT:value>\", self.default)<EOL>if self.value is None and self.default is not None:<EOL><INDENT>self.value = self.default<EOL><DEDENT>", "docstring": ":param name:     Name of variable\n:param type:     NUMBER | TEXT | BOOLEAN | BUTTON\n:param default:  default value\n:param min:      min value if number\n:param max:      max value if number\n:param value:    value\n:param step:     step between values - cannot specify at same time as step\n:param steps:    total steps\n:return:", "id": "f11541:c0:m0"}
{"signature": "def _append_element(self, render_func, pe):", "body": "self._render_funcs.append(render_func)<EOL>self._elements.append(pe)<EOL>", "docstring": "Append a render function and the parameters to pass\nan equivilent PathElement, or the PathElement itself.", "id": "f11542:c0:m1"}
{"signature": "def _curvelength(self, x0, y0, x1, y1, x2, y2, x3, y3, n=<NUM_LIT:20>):", "body": "<EOL>length = <NUM_LIT:0><EOL>xi = x0<EOL>yi = y0<EOL>for i in range(n):<EOL><INDENT>t = <NUM_LIT:1.0> * (i + <NUM_LIT:1>) / n<EOL>pt_x, pt_y, pt_c1x, pt_c1y, pt_c2x, pt_c2y =self._curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3)<EOL>c = sqrt(pow(abs(xi - pt_x), <NUM_LIT:2>) + pow(abs(yi - pt_y), <NUM_LIT:2>))<EOL>length += c<EOL>xi = pt_x<EOL>yi = pt_y<EOL><DEDENT>return length<EOL>", "docstring": "Returns the length of the spline.\n            Integrates the estimated length of the cubic bezier spline defined by x0, y0, ... x3, y3,\n            by adding the lengths of lineair lines between points at t.\n            The number of points is defined by n\n            (n=10 would add the lengths of lines between 0.0 and 0.1, between 0.1 and 0.2, and so on).\n            The default n=20 is fine for most cases, usually resulting in a deviation of less than 0.01.", "id": "f11542:c0:m27"}
{"signature": "def _render_closure(self):", "body": "fillcolor = self.fill<EOL>strokecolor = self.stroke<EOL>strokewidth = self.strokewidth<EOL>def _render(cairo_ctx):<EOL><INDENT>'''<STR_LIT>'''<EOL>transform = self._call_transform_mode(self._transform)<EOL>if fillcolor is None and strokecolor is None:<EOL><INDENT>return<EOL><DEDENT>cairo_ctx.set_matrix(transform)<EOL>self._traverse(cairo_ctx)<EOL>cairo_ctx.set_matrix(cairo.Matrix())<EOL>if fillcolor is not None and strokecolor is not None:<EOL><INDENT>if strokecolor[<NUM_LIT:3>] < <NUM_LIT:1>:<EOL><INDENT>cairo_ctx.push_group()<EOL>cairo_ctx.set_source_rgba(*fillcolor)<EOL>cairo_ctx.fill_preserve()<EOL>e = cairo_ctx.stroke_extents()<EOL>cairo_ctx.set_source_rgba(*strokecolor)<EOL>cairo_ctx.set_operator(cairo.OPERATOR_SOURCE)<EOL>cairo_ctx.set_line_width(strokewidth)<EOL>cairo_ctx.stroke()<EOL>cairo_ctx.pop_group_to_source()<EOL>cairo_ctx.paint()<EOL><DEDENT>else:<EOL><INDENT>cairo_ctx.set_source_rgba(*fillcolor)<EOL>cairo_ctx.fill_preserve()<EOL>cairo_ctx.set_source_rgba(*strokecolor)<EOL>cairo_ctx.set_line_width(strokewidth)<EOL>cairo_ctx.stroke()<EOL><DEDENT><DEDENT>elif fillcolor is not None:<EOL><INDENT>cairo_ctx.set_source_rgba(*fillcolor)<EOL>cairo_ctx.fill()<EOL><DEDENT>elif strokecolor is not None:<EOL><INDENT>cairo_ctx.set_source_rgba(*strokecolor)<EOL>cairo_ctx.set_line_width(strokewidth)<EOL>cairo_ctx.stroke()<EOL><DEDENT><DEDENT>return _render<EOL>", "docstring": "Use a closure so that draw attributes can be saved", "id": "f11542:c0:m18"}
{"signature": "def _get_contours(self):", "body": "<EOL>contours = []<EOL>current_contour = None<EOL>empty = True<EOL>for i, el in enumerate(self._get_elements()):<EOL><INDENT>if el.cmd == MOVETO:<EOL><INDENT>if not empty:<EOL><INDENT>contours.append(current_contour)<EOL><DEDENT>current_contour = BezierPath(self._bot)<EOL>current_contour.moveto(el.x, el.y)<EOL>empty = True<EOL><DEDENT>elif el.cmd == LINETO:<EOL><INDENT>empty = False<EOL>current_contour.lineto(el.x, el.y)<EOL><DEDENT>elif el.cmd == CURVETO:<EOL><INDENT>empty = False<EOL>current_contour.curveto(el.c1x, el.c1y, el.c2x, el.c2y, el.x, el.y)<EOL><DEDENT>elif el.cmd == CLOSE:<EOL><INDENT>current_contour.closepath()<EOL><DEDENT><DEDENT>if not empty:<EOL><INDENT>contours.append(current_contour)<EOL><DEDENT>return contours<EOL>", "docstring": "Returns a list of contours in the path, as BezierPath objects.\nA contour is a sequence of lines and curves separated from the next contour by a MOVETO.\nFor example, the glyph \"o\" has two contours: the inner circle and the outer circle.", "id": "f11542:c0:m20"}
{"signature": "def _get_elements(self):", "body": "for index, el in enumerate(self._elements):<EOL><INDENT>if isinstance(el, tuple):<EOL><INDENT>el = PathElement(*el)<EOL>self._elements[index] = el<EOL><DEDENT>yield el<EOL><DEDENT>", "docstring": "Yields all elements as PathElements", "id": "f11542:c0:m30"}
{"signature": "def _linepoint(self, t, x0, y0, x1, y1):", "body": "<EOL>out_x = x0 + t * (x1 - x0)<EOL>out_y = y0 + t * (y1 - y0)<EOL>return (out_x, out_y)<EOL>", "docstring": "Returns coordinates for point at t on the line.\n            Calculates the coordinates of x and y for a point at t on a straight line.\n            The t parameter is a number between 0.0 and 1.0,\n            x0 and y0 define the starting point of the line,\n            x1 and y1 the ending point of the line.", "id": "f11542:c0:m24"}
{"signature": "def contains(self, x, y):", "body": "if self._bounds:<EOL><INDENT>return self._bounds<EOL><DEDENT>record_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (-<NUM_LIT:1>, -<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>))<EOL>dummy_ctx = cairo.Context(record_surface)<EOL>self._traverse(dummy_ctx)<EOL>in_fill = dummy_ctx.in_fill(x, y)<EOL>return in_fill<EOL>", "docstring": "Return cached bounds of this Grob.\nIf bounds are not cached, render to a meta surface, and\nkeep the meta surface and bounds cached.", "id": "f11542:c0:m16"}
{"signature": "def point(self, t, segments=None):", "body": "<EOL>if len(self._elements) == <NUM_LIT:0>:<EOL><INDENT>raise PathError(\"<STR_LIT>\")<EOL><DEDENT>if self._segments is None:<EOL><INDENT>self._segments = self._get_length(segmented=True, precision=<NUM_LIT:10>)<EOL><DEDENT>i, t, closeto = self._locate(t, segments=self._segments)<EOL>x0, y0 = self[i].x, self[i].y<EOL>p1 = self[i + <NUM_LIT:1>]<EOL>if p1.cmd == CLOSE:<EOL><INDENT>x, y = self._linepoint(t, x0, y0, closeto.x, closeto.y)<EOL>return PathElement(LINETO, x, y)<EOL><DEDENT>elif p1.cmd in (LINETO, MOVETO):<EOL><INDENT>x1, y1 = p1.x, p1.y<EOL>x, y = self._linepoint(t, x0, y0, x1, y1)<EOL>return PathElement(LINETO, x, y)<EOL><DEDENT>elif p1.cmd == CURVETO:<EOL><INDENT>x3, y3, x1, y1, x2, y2 = p1.x, p1.y, p1.ctrl1.x, p1.ctrl1.y, p1.ctrl2.x, p1.ctrl2.y<EOL>x, y, c1x, c1y, c2x, c2y = self._curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3)<EOL>return PathElement(CURVETO, c1x, c1y, c2x, c2y, x, y)<EOL><DEDENT>else:<EOL><INDENT>raise PathError(\"<STR_LIT>\" % (p1.cmd, p1))<EOL><DEDENT>", "docstring": "Returns the PathElement at time t (0.0-1.0) on the path.\n\nReturns coordinates for point at t on the path.\nGets the length of the path, based on the length of each curve and line in the path.\nDetermines in what segment t falls. Gets the point on that segment.\nWhen you supply the list of segment lengths yourself, as returned from length(path, segmented=True),\npoint() works about thirty times faster in a for-loop since it doesn't need to recalculate\nthe length during each iteration.", "id": "f11542:c0:m22"}
{"signature": "def drawdaisy(x, y, color='<STR_LIT>'):", "body": "<EOL>_ctx.push()<EOL>_fill =_ctx.fill()<EOL>_stroke = _ctx.stroke()<EOL>sc = (<NUM_LIT:1.0> / _ctx.HEIGHT) * float(y * <NUM_LIT:0.5>) * <NUM_LIT><EOL>_ctx.strokewidth(sc * <NUM_LIT>)<EOL>_ctx.stroke('<STR_LIT>')<EOL>_ctx.line(x + (sin(x * <NUM_LIT:0.1>) * <NUM_LIT>), y + <NUM_LIT>, x + sin(_ctx.FRAME * <NUM_LIT:0.1>), y)<EOL>_ctx.translate(-<NUM_LIT:20>, <NUM_LIT:0>)<EOL>_ctx.scale(sc)<EOL>_ctx.fill(color)<EOL>_ctx.nostroke()<EOL>for angle in xrange(<NUM_LIT:0>, <NUM_LIT>, <NUM_LIT>):<EOL><INDENT>_ctx.rotate(degrees=<NUM_LIT>)<EOL>_ctx.rect(x, y, <NUM_LIT>, <NUM_LIT:8>, <NUM_LIT:1>)<EOL><DEDENT>_ctx.fill('<STR_LIT>')<EOL>_ctx.ellipse(x + <NUM_LIT:15>, y, <NUM_LIT:10>, <NUM_LIT:10>)<EOL>_ctx.fill(_fill)<EOL>_ctx.stroke(_stroke)<EOL>_ctx.pop()<EOL>", "docstring": "Draw a daisy at x, y", "id": "f11545:m0"}
{"signature": "def flatten_fft(scale=<NUM_LIT:1.0>):", "body": "_len = len(audio.spectrogram)<EOL>for i, v in enumerate(audio.spectrogram):<EOL><INDENT>yield scale * (i * v) / _len<EOL><DEDENT>", "docstring": "Produces a nicer graph, I'm not sure if this is correct", "id": "f11546:m1"}
{"signature": "def quit(self):", "body": "if self.running:<EOL><INDENT>self.running = False<EOL>self.join()<EOL><DEDENT>", "docstring": "Shutdown the audio thread", "id": "f11546:c1:m4"}
{"signature": "def scaled_fft(fft, scale=<NUM_LIT:1.0>):", "body": "data = np.zeros(len(fft))<EOL>for i, v in enumerate(fft):<EOL><INDENT>data[i] = scale * (i * v) / NUM_SAMPLES<EOL><DEDENT>return data<EOL>", "docstring": "Produces a nicer graph, I'm not sure if this is correct", "id": "f11546:m2"}
{"signature": "def fft_bandpassfilter(data, fs, lowcut, highcut):", "body": "fft = np.fft.fft(data)<EOL>bp = fft.copy()<EOL>bp *= fft.dot(fft) / bp.dot(bp)<EOL>ibp = <NUM_LIT:12> * np.fft.ifft(bp)<EOL>return ibp<EOL>", "docstring": "http://www.swharden.com/blog/2009-01-21-signal-filtering-with-python/#comment-16801", "id": "f11546:m0"}
{"signature": "def draw_cornu_bezier(x0, y0, t0, t1, s0, c0, flip, cs, ss, cmd, scale, rot):", "body": "s = None<EOL>for j in range(<NUM_LIT:0>, <NUM_LIT:5>):<EOL><INDENT>t = j * <NUM_LIT><EOL>t2 = t+ <NUM_LIT><EOL>curvetime = t0 + t * (t1 - t0)<EOL>curvetime2 = t0 + t2 * (t1 - t0)<EOL>Dt = (curvetime2 - curvetime) * scale<EOL>if not s:<EOL><INDENT>s, c = eval_cornu(curvetime)<EOL>s *= flip<EOL>s -= s0<EOL>c -= c0<EOL>dx1 =  cos(pow(curvetime, <NUM_LIT:2>) + (flip * rot))  <EOL>dy1 =  flip * sin(pow(curvetime, <NUM_LIT:2>) + (flip *rot))<EOL>x = ((c * cs - s * ss) +x0)<EOL>y = ((s * cs + c * ss) + y0)<EOL><DEDENT>s2,c2 = eval_cornu(curvetime2) <EOL>s2 *= flip<EOL>s2 -= s0<EOL>c2 -= c0<EOL>dx2 = cos(pow(curvetime2, <NUM_LIT:2>) + (flip * rot)) <EOL>dy2 = flip * sin(pow(curvetime2, <NUM_LIT:2>) + (flip * rot))<EOL>x3 = ((c2 * cs - s2 * ss)+x0)<EOL>y3 = ((s2 * cs + c2 * ss)+y0)<EOL>x1 = (x + ((Dt/<NUM_LIT>) * dx1))<EOL>y1 = (y + ((Dt/<NUM_LIT>) * dy1))   <EOL>x2 = (x3 - ((Dt/<NUM_LIT>) * dx2))<EOL>y2 = (y3 - ((Dt/<NUM_LIT>) * dy2))<EOL>if cmd == '<STR_LIT>':<EOL><INDENT>print_pt(x, y, cmd)<EOL>cmd = '<STR_LIT>'<EOL><DEDENT>print_crv(x1, y1, x2, y2, x3, y3)<EOL>dx1, dy1 = dx2, dy2<EOL>x,y = x3, y3<EOL><DEDENT>return cmd<EOL>", "docstring": "Mark Meyer's code draws elegant CURVETO segments.", "id": "f11548:m12"}
{"signature": "def draw_cornu_flat(x0, y0, t0, t1, s0, c0, flip, cs, ss, cmd):", "body": "for j in range(<NUM_LIT:0>, <NUM_LIT:100>):<EOL><INDENT>t = j * <NUM_LIT><EOL>s, c = eval_cornu(t0 + t * (t1 - t0))<EOL>s *= flip<EOL>s -= s0<EOL>c -= c0<EOL>x = c * cs - s * ss<EOL>y = s * cs + c * ss<EOL>print_pt(x0 + x, y0 + y, cmd)<EOL>cmd = '<STR_LIT>'<EOL><DEDENT>return cmd<EOL>", "docstring": "Raph Levien's code draws fast LINETO segments.", "id": "f11548:m11"}
{"signature": "def overlap(self, x1, y1, x2, y2, r=<NUM_LIT:5>):", "body": "if abs(x2-x1) < r and abs(y2-y1) < r:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Returns True when point 1 and point 2 overlap.\n\n        There is an r treshold in which point 1 and point 2\n        are considered to overlap.", "id": "f11549:c1:m1"}
{"signature": "def angle(self, x0, y0, x1, y1):", "body": "a = degrees( atan((y1-y0) / (x1-x0+<NUM_LIT>)) ) + <NUM_LIT><EOL>if x1-x0 < <NUM_LIT:0>: a += <NUM_LIT><EOL>return a<EOL>", "docstring": "Calculates the angle between two points.", "id": "f11549:c1:m3"}
{"signature": "def update(self):", "body": "x, y = mouse()<EOL>if self.show_grid:<EOL><INDENT>x, y = self.grid.snap(x, y)<EOL><DEDENT>if _ctx._ns[\"<STR_LIT>\"]and not self.freehand:<EOL><INDENT>self._dirty = True<EOL>if self.edit != Noneand not self.drag_pointand not self.drag_handle1and not self.drag_handle2:<EOL><INDENT>pt = self._points[self.edit]<EOL>dx = pt.x+self.btn_x<EOL>dy = pt.y+self.btn_y<EOL>if self.overlap(dx, dy, x, y, r=self.btn_r):<EOL><INDENT>self.delete = self.edit<EOL>return<EOL><DEDENT>dx += self.btn_r*<NUM_LIT:2> + <NUM_LIT:2><EOL>if self.edit == len(self._points) -<NUM_LIT:1> andself.overlap(dx, dy, x, y, r=self.btn_r):<EOL><INDENT>self.moveto = self.edit<EOL>return<EOL><DEDENT><DEDENT>if self.insert:<EOL><INDENT>self.inserting = True<EOL>return<EOL><DEDENT>if not self.drag_point andnot self.drag_handle1 andnot self.drag_handle2:<EOL><INDENT>self.editing = False<EOL>indices = range(len(self._points))<EOL>indices.reverse()<EOL>for i in indices:<EOL><INDENT>pt = self._points[i]<EOL>if pt != self.newand self.overlap(x, y, pt.x, pt.y)and self.new == None:<EOL><INDENT>if self.edit == i+<NUM_LIT:1>and self.overlap(self._points[i+<NUM_LIT:1>].ctrl1.x,<EOL>self._points[i+<NUM_LIT:1>].ctrl1.y, x, y):<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>self.edit = i<EOL>self.editing = True<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if not self.editing:<EOL><INDENT>if self.edit != None:<EOL><INDENT>pt = self._points[self.edit]<EOL>if self.overlap(pt.ctrl1.x, pt.ctrl1.y, x, y) orself.overlap(pt.ctrl2.x, pt.ctrl2.y, x, y):<EOL><INDENT>self.editing = True<EOL><DEDENT>else:<EOL><INDENT>self.edit = None<EOL><DEDENT><DEDENT><DEDENT>if self.edit == None:<EOL><INDENT>if self.new == None:<EOL><INDENT>self.new = PathElement()<EOL>if self.moveto == Trueor len(self._points) == <NUM_LIT:0>:<EOL><INDENT>cmd = MOVETO<EOL>self.moveto = None<EOL>self.last_moveto = self.new<EOL><DEDENT>else:<EOL><INDENT>cmd = CURVETO<EOL><DEDENT>self.new.cmd = cmd<EOL>self.new.x = x<EOL>self.new.y = y<EOL>self.new.ctrl1 = Point(x, y)<EOL>self.new.ctrl2 = Point(x, y)<EOL>self.new.freehand = False<EOL>if len(self._points) > <NUM_LIT:0>:<EOL><INDENT>prev = self._points[-<NUM_LIT:1>]<EOL>rx, ry = self.reflect(prev.x, prev.y, prev.ctrl2.x, prev.ctrl2.y)<EOL>self.new.ctrl1 = Point(rx, ry)<EOL><DEDENT>self._points.append(self.new)<EOL><DEDENT>else:<EOL><INDENT>rx, ry = self.reflect(self.new.x, self.new.y, x, y)<EOL>self.new.ctrl2 = Point(rx, ry)<EOL><DEDENT><DEDENT>elif self.new == None:<EOL><INDENT>pt = self._points[self.edit]<EOL>if self.overlap(pt.x, pt.y, x, y)and not self.drag_handle1and not self.drag_handle2and not self.new != None:<EOL><INDENT>self.drag_point = True<EOL>self.drag_handle1 = False<EOL>self.drag_handle2 = False<EOL><DEDENT>if self.overlap(pt.ctrl1.x, pt.ctrl1.y, x, y)and pt.cmd == CURVETOand not self.drag_pointand not self.drag_handle2:<EOL><INDENT>self.drag_point = False<EOL>self.drag_handle1 = True<EOL>self.drag_handle2 = False<EOL><DEDENT>if self.overlap(pt.ctrl2.x, pt.ctrl2.y, x, y)and pt.cmd == CURVETOand not self.drag_pointand not self.drag_handle1:<EOL><INDENT>self.drag_point = False<EOL>self.drag_handle1 = False<EOL>self.drag_handle2 = True<EOL><DEDENT>if self.drag_point == True:<EOL><INDENT>dx = x - pt.x<EOL>dy = y - pt.y<EOL>pt.x = x<EOL>pt.y = y<EOL>pt.ctrl2.x += dx<EOL>pt.ctrl2.y += dy<EOL>if self.edit < len(self._points)-<NUM_LIT:1>:<EOL><INDENT>rx, ry = self.reflect(pt.x, pt.y, x, y)<EOL>next = self._points[self.edit+<NUM_LIT:1>]<EOL>next.ctrl1.x += dx<EOL>next.ctrl1.y += dy<EOL><DEDENT><DEDENT>if self.drag_handle1 == True:<EOL><INDENT>pt.ctrl1 = Point(x, y)<EOL>if self.edit > <NUM_LIT:0>and self.last_key != \"<STR_LIT:x>\":<EOL><INDENT>prev = self._points[self.edit-<NUM_LIT:1>]<EOL>d = self.distance(prev.x, prev.y, prev.ctrl2.x, prev.ctrl2.y)<EOL>a = self.angle(prev.x, prev.y, pt.ctrl1.x, pt.ctrl1.y)<EOL>prev.ctrl2 = self.coordinates(prev.x, prev.y, d, a+<NUM_LIT>)                        <EOL><DEDENT><DEDENT>if self.drag_handle2 == True:   <EOL><INDENT>pt.ctrl2 = Point(x, y)<EOL>if self.edit < len(self._points)-<NUM_LIT:1>and self.last_key != \"<STR_LIT:x>\":<EOL><INDENT>next = self._points[self.edit+<NUM_LIT:1>]<EOL>d = self.distance(pt.x, pt.y, next.ctrl1.x, next.ctrl1.y)<EOL>a = self.angle(pt.x, pt.y, pt.ctrl2.x, pt.ctrl2.y)<EOL>next.ctrl1 = self.coordinates(pt.x, pt.y, d, a+<NUM_LIT>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif not self.freehand:<EOL><INDENT>self.new = None<EOL>self.drag_point = False<EOL>self.drag_handle1 = False<EOL>self.drag_handle2 = False<EOL>if self.delete != None and len(self._points) > <NUM_LIT:0>:<EOL><INDENT>i = self.delete<EOL>cmd = self._points[i].cmd<EOL>del self._points[i]<EOL>if <NUM_LIT:0> < i < len(self._points):<EOL><INDENT>prev = self._points[i-<NUM_LIT:1>]<EOL>rx, ry = self.reflect(prev.x, prev.y, prev.ctrl2.x, prev.ctrl2.y)<EOL>self._points[i].ctrl1 = Point(rx, ry)<EOL><DEDENT>start_i = i<EOL>while i > <NUM_LIT:1>:<EOL><INDENT>i -= <NUM_LIT:1><EOL>pt = self._points[i]<EOL>if pt.freehand:<EOL><INDENT>del self._points[i]<EOL><DEDENT>elif i < start_i-<NUM_LIT:1> and pt.freehand == False:<EOL><INDENT>if pt.cmd == MOVETO:<EOL><INDENT>del self._points[i]<EOL><DEDENT>break<EOL><DEDENT><DEDENT>if len(self._points) > <NUM_LIT:0>and (cmd == MOVETO or i == <NUM_LIT:0>):<EOL><INDENT>self.last_moveto = self._points[<NUM_LIT:0>]<EOL>for pt in self._points:<EOL><INDENT>if pt.cmd == MOVETO:<EOL><INDENT>self.last_moveto = pt<EOL><DEDENT><DEDENT><DEDENT>self.delete = None<EOL>self.edit = None<EOL><DEDENT>elif isinstance(self.moveto, int):<EOL><INDENT>self.moveto = True<EOL>self.edit = None<EOL><DEDENT>elif self.edit == Noneand self.contains_point(x, y, d=<NUM_LIT:2>):<EOL><INDENT>self.insert = True<EOL><DEDENT>else:<EOL><INDENT>self.insert = False<EOL><DEDENT>if self.insertingand self.contains_point(x, y, d=<NUM_LIT:2>): <EOL><INDENT>self.insert_point(x, y)<EOL>self.insert = False<EOL><DEDENT>self.inserting = False<EOL>if self._dirty == True:<EOL><INDENT>self.export_svg()<EOL>self._dirty = False<EOL><DEDENT><DEDENT>if _ctx._ns[\"<STR_LIT>\"]:<EOL><INDENT>self.last_key = _ctx._ns[\"<STR_LIT:key>\"]<EOL>self.last_keycode = _ctx._ns[\"<STR_LIT>\"]<EOL><DEDENT>if not _ctx._ns[\"<STR_LIT>\"] and self.last_key != None:<EOL><INDENT>if self.last_keycode == KEY_TAB:<EOL><INDENT>self.show_grid = not self.show_grid<EOL><DEDENT>if self.last_key == \"<STR_LIT:f>\":<EOL><INDENT>self.edit = None<EOL>self.freehand = not self.freehand<EOL>if self.freehand:<EOL><INDENT>self.msg = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>self.msg = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>if self.last_keycode == KEY_ESC:<EOL><INDENT>self.edit = None<EOL><DEDENT>if self.last_keycode == _ctx.KEY_BACKSPACEand self.edit != None:<EOL><INDENT>self.delete = self.edit<EOL><DEDENT>self.last_key = None<EOL>self.last_code = None<EOL><DEDENT>if _ctx._ns[\"<STR_LIT>\"]:<EOL><INDENT>dx = <NUM_LIT:0><EOL>dy = <NUM_LIT:0><EOL>keycode = _ctx._ns[\"<STR_LIT>\"]<EOL>if keycode == _ctx.KEY_LEFT:<EOL><INDENT>dx = -<NUM_LIT:10><EOL><DEDENT>elif keycode == _ctx.KEY_RIGHT:<EOL><INDENT>dx = <NUM_LIT:10><EOL><DEDENT>if keycode == _ctx.KEY_UP:<EOL><INDENT>dy = -<NUM_LIT:10><EOL><DEDENT>elif keycode == _ctx.KEY_DOWN:<EOL><INDENT>dy = <NUM_LIT:10><EOL><DEDENT>if dx != <NUM_LIT:0> or dy != <NUM_LIT:0>:<EOL><INDENT>for pt in self._points:<EOL><INDENT>pt.x += dx<EOL>pt.y += dy<EOL>pt.ctrl1.x += dx<EOL>pt.ctrl1.y += dy<EOL>pt.ctrl2.x += dx<EOL>pt.ctrl2.y += dy<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Update runs each frame to check for mouse interaction.\n\n        Alters the path by allowing the user to add new points,\n        drag point handles and move their location.\n        Updates are automatically stored as SVG\n        in the given filename.", "id": "f11549:c1:m8"}
{"signature": "def draw(self):", "body": "<EOL>self.update()<EOL>x, y = mouse()<EOL>if self.show_grid:<EOL><INDENT>self.grid.draw()<EOL>x, y = self.grid.snap(x, y)<EOL><DEDENT>_ctx.strokewidth(self.strokewidth)<EOL>if self.freehand:<EOL><INDENT>self.draw_freehand()<EOL><DEDENT>r = <NUM_LIT:4><EOL>_ctx.nofill()<EOL>if len(self._points) > <NUM_LIT:0>:<EOL><INDENT>first = True            <EOL>for i in range(len(self._points)):<EOL><INDENT>pt = self._points[i]<EOL>if first:<EOL><INDENT>_ctx.beginpath(pt.x, pt.y)<EOL>first = False<EOL><DEDENT>else:<EOL><INDENT>if pt.cmd == CLOSE:<EOL><INDENT>_ctx.closepath()<EOL><DEDENT>elif pt.cmd == MOVETO:<EOL><INDENT>_ctx.moveto(pt.x, pt.y)<EOL><DEDENT>elif pt.cmd == LINETO:<EOL><INDENT>_ctx.lineto(pt.x, pt.y)<EOL><DEDENT>elif pt.cmd == CURVETO:<EOL><INDENT>_ctx.curveto(pt.ctrl1.x, pt.ctrl1.y, <EOL>pt.ctrl2.x, pt.ctrl2.y, <EOL>pt.x, pt.y)<EOL><DEDENT><DEDENT>if ((i == self.edit and self.new == None)or pt == self.new)and pt.cmd == CURVETOand not pt.freehand:<EOL><INDENT>_ctx.stroke(self.handle_color)<EOL>_ctx.nofill()<EOL>_ctx.oval(pt.x-r, pt.y-r, r*<NUM_LIT:2>, r*<NUM_LIT:2>)<EOL>_ctx.stroke(self.handle_color)<EOL>_ctx.line(pt.ctrl2.x, pt.ctrl2.y, pt.x, pt.y)<EOL>_ctx.fill(self.handle_color)<EOL><DEDENT>if pt == self.newand not pt.freehand:<EOL><INDENT>rx, ry = self.reflect(pt.x, pt.y, pt.ctrl2.x, pt.ctrl2.y)<EOL>_ctx.stroke(self.handle_color)<EOL>_ctx.line(rx, ry, pt.x, pt.y)<EOL>_ctx.nostroke()<EOL>_ctx.fill(self.handle_color)<EOL>_ctx.oval(rx-r/<NUM_LIT:2>, ry-r/<NUM_LIT:2>, r, r)<EOL><DEDENT>if i == self.editand self.new == Noneand pt.cmd == CURVETOand not pt.freehand:<EOL><INDENT>_ctx.oval(pt.ctrl2.x-r/<NUM_LIT:2>, pt.ctrl2.y-r/<NUM_LIT:2>, r, r)<EOL>if i > <NUM_LIT:0>:<EOL><INDENT>prev = self._points[i-<NUM_LIT:1>]<EOL>_ctx.line(pt.ctrl1.x, pt.ctrl1.y, prev.x, prev.y)<EOL>_ctx.oval(pt.ctrl1.x-r/<NUM_LIT:2>, pt.ctrl1.y-r/<NUM_LIT:2>, r, r)<EOL><DEDENT>if i > <NUM_LIT:0> and self._points[i-<NUM_LIT:1>].cmd != MOVETO:<EOL><INDENT>_ctx.line(prev.ctrl2.x, prev.ctrl2.y, prev.x, prev.y)<EOL><DEDENT>if i < len(self._points)-<NUM_LIT:1>:<EOL><INDENT>next = self._points[i+<NUM_LIT:1>]<EOL>if next.cmd == CURVETO:<EOL><INDENT>_ctx.line(next.ctrl1.x, next.ctrl1.y, pt.x, pt.y)<EOL><DEDENT><DEDENT><DEDENT>elif self.overlap(x, y, pt.x, pt.y)and not pt.freehand:<EOL><INDENT>self.insert = False <EOL>_ctx.nofill()<EOL>_ctx.stroke(self.handle_color)<EOL>_ctx.oval(pt.x-r, pt.y-r, r*<NUM_LIT:2>, r*<NUM_LIT:2>)<EOL><DEDENT>_ctx.fontsize(<NUM_LIT:9>)<EOL>_ctx.fill(self.handle_color)<EOL>txt = \"<STR_LIT>\"+str(int(pt.x))+\"<STR_LIT:U+002CU+0020>\"+str(int(pt.y))+\"<STR_LIT:)>\"<EOL>if (i == self.edit and self.new == None)or pt == self.newand not pt.freehand:<EOL><INDENT>_ctx.text(txt, pt.x+r, pt.y+<NUM_LIT:2>)                                       <EOL><DEDENT>elif self.overlap(x, y, pt.x, pt.y)and not pt.freehand:<EOL><INDENT>_ctx.text(txt, pt.x+r, pt.y+<NUM_LIT:2>)<EOL><DEDENT>if not pt.freehand:<EOL><INDENT>if pt.cmd != MOVETO:<EOL><INDENT>_ctx.fill(self.path_color)<EOL>_ctx.nostroke()<EOL><DEDENT>else:<EOL><INDENT>_ctx.stroke(self.path_color)<EOL>_ctx.nofill()<EOL><DEDENT>_ctx.oval(pt.x-r/<NUM_LIT:2>, pt.y-r/<NUM_LIT:2>, r, r)<EOL><DEDENT><DEDENT>_ctx.stroke(self.path_color)<EOL>_ctx.fill(self.path_fill)<EOL>_ctx.autoclosepath(False)    <EOL>p = _ctx.endpath()<EOL>self.path = p<EOL>if self.insert:<EOL><INDENT>_ctx.stroke(self.handle_color)<EOL>_ctx.nofill()<EOL>_ctx.oval(x-r*<NUM_LIT>, y-r*<NUM_LIT>, r*<NUM_LIT>, r*<NUM_LIT>)<EOL><DEDENT>if self.edit == Noneand self.new == Noneand self.moveto != Trueand not self.freehand:<EOL><INDENT>_ctx.nofill()<EOL>_ctx.stroke(self.new_color)<EOL>rx, ry = self.reflect(pt.x, pt.y, pt.ctrl2.x, pt.ctrl2.y)<EOL>_ctx.beginpath(pt.x, pt.y)<EOL>_ctx.curveto(rx, ry, x, y, x, y)<EOL>_ctx.endpath()<EOL>if self.last_moveto != None:<EOL><INDENT>start = self.last_moveto<EOL><DEDENT>else:<EOL><INDENT>start = self._points[<NUM_LIT:0>]<EOL><DEDENT>p = _ctx.line(x, y, start.x, start.y, draw=False)<EOL>try: p._nsBezierPath.setLineDash_count_phase_([<NUM_LIT:2>,<NUM_LIT:4>], <NUM_LIT:2>, <NUM_LIT:50>)<EOL>except:<EOL><INDENT>pass<EOL><DEDENT>_ctx.drawpath(p)<EOL><DEDENT>elif self.edit == Noneand self.new == Noneand self.moveto != None:<EOL><INDENT>_ctx.stroke(self.new_color)<EOL>_ctx.nofill()<EOL>_ctx.oval(x-r*<NUM_LIT>, y-r*<NUM_LIT>, r*<NUM_LIT>, r*<NUM_LIT>)<EOL><DEDENT>if self.edit != None:<EOL><INDENT>pt = self._points[self.edit]<EOL>x = pt.x + self.btn_x<EOL>y = pt.y + self.btn_y<EOL>r = self.btn_r<EOL>_ctx.nostroke()<EOL>_ctx.fill(<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT>)<EOL>_ctx.fill(self.handle_color)<EOL>_ctx.oval(x-r, y-r, r*<NUM_LIT:2>, r*<NUM_LIT:2>)<EOL>_ctx.fill(<NUM_LIT:1>)<EOL>_ctx.rotate(<NUM_LIT>)<EOL>_ctx.rect(x-r+<NUM_LIT:2>, y-<NUM_LIT>, r+<NUM_LIT:1>, <NUM_LIT>)<EOL>_ctx.rotate(-<NUM_LIT>)<EOL>_ctx.rect(x-r+<NUM_LIT:2>, y-<NUM_LIT>, r+<NUM_LIT:1>, <NUM_LIT>)<EOL>_ctx.reset()<EOL>if self.edit == len(self._points)-<NUM_LIT:1>:<EOL><INDENT>_ctx.fill(self.handle_color)<EOL>_ctx.oval(x+r*<NUM_LIT:2>+<NUM_LIT:2>-r, y-r, r*<NUM_LIT:2>, r*<NUM_LIT:2>)<EOL>_ctx.fill(<NUM_LIT:1>)<EOL>_ctx.rect(x+r*<NUM_LIT:2>+<NUM_LIT:2>-<NUM_LIT>, y-r+<NUM_LIT:3>, <NUM_LIT>, r-<NUM_LIT:1>)<EOL>_ctx.rect(x+r*<NUM_LIT:2>+<NUM_LIT:2>+<NUM_LIT>, y-r+<NUM_LIT:3>, <NUM_LIT>, r-<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>if self.msg != \"<STR_LIT>\":<EOL><INDENT>self.msg_alpha -= <NUM_LIT:0.1><EOL>_ctx.nostroke()<EOL>_ctx.fill(<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:0>, self.msg_alpha)<EOL>_ctx.fontsize(<NUM_LIT>)<EOL>_ctx.lineheight(<NUM_LIT:1>)<EOL>w = _ctx.textwidth(self.msg)<EOL>_ctx.rect(_ctx.WIDTH/<NUM_LIT:2>-w/<NUM_LIT:2>-<NUM_LIT:9>, _ctx.HEIGHT/<NUM_LIT:2>-<NUM_LIT>, w+<NUM_LIT>, <NUM_LIT>, roundness=<NUM_LIT>)<EOL>_ctx.fill(<NUM_LIT:1>,<NUM_LIT:1>,<NUM_LIT:1>, <NUM_LIT>)<EOL>_ctx.align(CENTER) <EOL>_ctx.text(self.msg, <NUM_LIT:0>, _ctx.HEIGHT/<NUM_LIT:2>, width=_ctx.WIDTH)<EOL><DEDENT>if self.msg_alpha <= <NUM_LIT:0.0>:<EOL><INDENT>self.msg = \"<STR_LIT>\"<EOL>self.msg_alpha = <NUM_LIT:1.0><EOL><DEDENT>", "docstring": "Draws the editable path and interface elements.", "id": "f11549:c1:m9"}
{"signature": "def contains_point(self, x, y, d=<NUM_LIT:2>):", "body": "if self.path != None and len(self.path) > <NUM_LIT:1>and self.path.contains(x, y):<EOL><INDENT>if not self.path.contains(x+d, y)or not self.path.contains(x, y+d)or not self.path.contains(x-d, y)or not self.path.contains(x, y-d)or not self.path.contains(x+d, y+d)or not self.path.contains(x-d, y-d)or not self.path.contains(x+d, y-d)or not self.path.contains(x-d, y+d):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Returns true when x, y is on the path stroke outline.", "id": "f11549:c1:m6"}
{"signature": "def distance(self, x0, y0, x1, y1):", "body": "return sqrt(pow(x1-x0, <NUM_LIT:2>) + pow(y1-y0, <NUM_LIT:2>))<EOL>", "docstring": "Calculates the distance between two points.", "id": "f11549:c1:m4"}
{"signature": "def reflect(self, x0, y0, x, y):", "body": "rx = x0 - (x-x0)<EOL>ry = y0 - (y-y0)<EOL>return rx, ry<EOL>", "docstring": "Reflects the point x, y through origin x0, y0.", "id": "f11549:c1:m2"}
{"signature": "def draw_freehand(self):", "body": "if _ctx._ns[\"<STR_LIT>\"]:<EOL><INDENT>x, y = mouse()<EOL>if self.show_grid:<EOL><INDENT>x, y = self.grid.snap(x, y)<EOL><DEDENT>if self.freehand_move == True:<EOL><INDENT>cmd = MOVETO<EOL>self.freehand_move = False<EOL><DEDENT>else:<EOL><INDENT>cmd = LINETO<EOL><DEDENT>pt = PathElement()<EOL>if cmd != MOVETO:<EOL><INDENT>pt.freehand = True <EOL><DEDENT>else:<EOL><INDENT>pt.freehand = False<EOL><DEDENT>pt.cmd = cmd<EOL>pt.x = x<EOL>pt.y = y<EOL>pt.ctrl1 = Point(x,y)<EOL>pt.ctrl2 = Point(x,y)<EOL>self._points.append(pt)<EOL>r = <NUM_LIT:4><EOL>_ctx.nofill()<EOL>_ctx.stroke(self.handle_color)<EOL>_ctx.oval(pt.x-r, pt.y-r, r*<NUM_LIT:2>, r*<NUM_LIT:2>)<EOL>_ctx.fontsize(<NUM_LIT:9>)<EOL>_ctx.fill(self.handle_color)<EOL>_ctx.text(\"<STR_LIT>\"+str(int(pt.x))+\"<STR_LIT:U+002CU+0020>\"+str(int(pt.y))+\"<STR_LIT:)>\", pt.x+r, pt.y)<EOL>self._dirty = True<EOL><DEDENT>else:<EOL><INDENT>self.freehand_move = True<EOL>if self._dirty:<EOL><INDENT>self._points[-<NUM_LIT:1>].freehand = False<EOL>self.export_svg()<EOL>self._dirty = False<EOL><DEDENT><DEDENT>", "docstring": "Freehand sketching.", "id": "f11549:c1:m10"}
{"signature": "def color(self, img1, img2):", "body": "import colorsys<EOL>p1 = list(img1.getdata())<EOL>p2 = list(img2.getdata())<EOL>for i in range(len(p1)):<EOL><INDENT>r1, g1, b1, a1 = p1[i]<EOL>r1 = r1 / <NUM_LIT><EOL>g1 = g1 / <NUM_LIT><EOL>b1 = b1 / <NUM_LIT><EOL>h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1)<EOL>r2, g2, b2, a2 = p2[i]<EOL>r2 = r2 / <NUM_LIT><EOL>g2 = g2 / <NUM_LIT><EOL>b2 = b2 / <NUM_LIT><EOL>h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2)<EOL>r3, g3, b3 = colorsys.hsv_to_rgb(h2, s2, v1)<EOL>r3 = int(r3*<NUM_LIT:255>)<EOL>g3 = int(g3*<NUM_LIT:255>)<EOL>b3 = int(b3*<NUM_LIT:255>)<EOL>p1[i] = (r3, g3, b3, a1)<EOL><DEDENT>img = Image.new(\"<STR_LIT>\", img1.size, <NUM_LIT:255>)<EOL>img.putdata(p1)<EOL>return img<EOL>", "docstring": "Applies the color blend mode.\n\n        Colorize image img1 with image img2.\n        The color filter replaces the hue and saturation of pixels in img1\n        with the hue and saturation of pixels in img2.\n        Returns a composite image with the alpha channel retained.", "id": "f11554:c3:m2"}
{"signature": "def preferences(interpolation=BILINEAR):", "body": "self. interpolation = interpolation<EOL>", "docstring": "Settings that influence image manipulation.\n\n        Currently, only defines the image interpolation,\n        which can be set to NEAREST, BICUBIC or BILINEAR.", "id": "f11554:c0:m8"}
{"signature": "def translate(self, x, y):", "body": "self.x = x<EOL>self.y = y<EOL>", "docstring": "Positions the layer at the given coordinates.\n\n        The x and y parameters define where to position \n        the top left corner of the layer,\n        measured from the top left of the canvas.", "id": "f11554:c2:m20"}
{"signature": "def merge(self, layers):", "body": "layers.sort()<EOL>if layers[<NUM_LIT:0>] == <NUM_LIT:0>: del layers[<NUM_LIT:0>]<EOL>self.flatten(layers)<EOL>", "docstring": "Flattens the given layers on the canvas.\n\n        Merges the given layers with the indices in the list\n        on the bottom layer in the list.\n        The other layers are discarded.", "id": "f11554:c0:m4"}
{"signature": "def overlay(self, img1, img2):", "body": "p1 = list(img1.getdata())<EOL>p2 = list(img2.getdata())<EOL>for i in range(len(p1)):<EOL><INDENT>p3 = ()<EOL>for j in range(len(p1[i])):<EOL><INDENT>a = p1[i][j] / <NUM_LIT><EOL>b = p2[i][j] / <NUM_LIT><EOL>if j == <NUM_LIT:3>:<EOL><INDENT>d = min(a,b)<EOL><DEDENT>elif a > <NUM_LIT:0.5>: <EOL><INDENT>d = <NUM_LIT:2>*(a+b-a*b)-<NUM_LIT:1><EOL><DEDENT>else: <EOL><INDENT>d = <NUM_LIT:2>*a*b            <EOL><DEDENT>p3 += (int(d*<NUM_LIT:255>),)<EOL><DEDENT>p1[i] = p3<EOL><DEDENT>img = Image.new(\"<STR_LIT>\", img1.size, <NUM_LIT:255>)<EOL>img.putdata(p1)<EOL>return img<EOL>", "docstring": "Applies the overlay blend mode.\n\n        Overlays image img2 on image img1.\n        The overlay pixel combines multiply and screen:\n        it multiplies dark pixels values and screen light values.\n        Returns a composite image with the alpha channel retained.", "id": "f11554:c3:m0"}
{"signature": "def export(self, filename):", "body": "self.flatten()<EOL>self.layers[<NUM_LIT:1>].img.save(filename)<EOL>return filename<EOL>", "docstring": "Exports the flattened canvas.\n\n        Flattens the canvas.\n        PNG retains the alpha channel information.\n        Other possibilities are JPEG and GIF.", "id": "f11554:c0:m6"}
{"signature": "def duplicate(self):", "body": "i = self.canvas.layer(self.img.copy(), self.x, self.y, self.name)<EOL>clone = self.canvas.layers[i]<EOL>clone.alpha = self.alpha<EOL>clone.blend = self.blend<EOL>", "docstring": "Creates a copy of the current layer.\n\n        This copy becomes the top layer on the canvas.", "id": "f11554:c2:m9"}
{"signature": "def distort(self, x1=<NUM_LIT:0>,y1=<NUM_LIT:0>, x2=<NUM_LIT:0>,y2=<NUM_LIT:0>, x3=<NUM_LIT:0>,y3=<NUM_LIT:0>, x4=<NUM_LIT:0>,y4=<NUM_LIT:0>):", "body": "w, h = self.img.size<EOL>quad = (-x1,-y1, -x4,h-y4, w-x3,w-y3, w-x2,-y2)<EOL>self.img = self.img.transform(self.img.size, Image.QUAD, quad, INTERPOLATION)<EOL>", "docstring": "Distorts the layer.\n\n        Distorts the layer by translating \n        the four corners of its bounding box to the given coordinates:\n        upper left (x1,y1), upper right(x2,y2),\n        lower right (x3,y3) and lower left (x4,y4).", "id": "f11554:c2:m22"}
{"signature": "def invert(self):", "body": "alpha = self.img.split()[<NUM_LIT:3>]<EOL>self.img = self.img.convert(\"<STR_LIT>\")<EOL>self.img = ImageOps.invert(self.img)<EOL>self.img = self.img.convert(\"<STR_LIT>\")<EOL>self.img.putalpha(alpha)<EOL>", "docstring": "Inverts the layer.", "id": "f11554:c2:m19"}
{"signature": "def blur(self):", "body": "self.img = self.img.filter(ImageFilter.BLUR)<EOL>", "docstring": "Blurs the layer.", "id": "f11554:c2:m25"}
{"signature": "def delete(self):", "body": "i = self.index()<EOL>if i != None: del self.canvas.layers[i]<EOL>", "docstring": "Removes this layer from the canvas.", "id": "f11554:c2:m3"}
{"signature": "def desaturate(self):", "body": "alpha = self.img.split()[<NUM_LIT:3>]<EOL>self.img = self.img.convert(\"<STR_LIT:L>\")<EOL>self.img = self.img.convert(\"<STR_LIT>\")<EOL>self.img.putalpha(alpha)<EOL>", "docstring": "Desaturates the layer, making it grayscale.\n\n        Instantly removes all color information from the layer,\n        while maintaing its alpha channel.", "id": "f11554:c2:m18"}
{"signature": "def gradient(self, style=LINEAR, w=<NUM_LIT:1.0>, h=<NUM_LIT:1.0>, name=\"<STR_LIT>\"):", "body": "from types import FloatType<EOL>w0 = self.w <EOL>h0 = self.h<EOL>if type(w) == FloatType: w *= w0<EOL>if type(h) == FloatType: h *= h0<EOL>img = Image.new(\"<STR_LIT:L>\", (int(w),int(h)), <NUM_LIT:255>)<EOL>draw = ImageDraw.Draw(img)<EOL>if style == LINEAR:<EOL><INDENT>for i in range(int(w)):<EOL><INDENT>k = <NUM_LIT> * i/w<EOL>draw.rectangle((i, <NUM_LIT:0>, i, h), fill=int(k))<EOL><DEDENT><DEDENT>if style == RADIAL:<EOL><INDENT>r = min(w,h)/<NUM_LIT:2><EOL>for i in range(int(r)):<EOL><INDENT>k = <NUM_LIT:255> - <NUM_LIT> * i/r<EOL>draw.ellipse((w/<NUM_LIT:2>-r+i, h/<NUM_LIT:2>-r+i, w/<NUM_LIT:2>+r-i, h/<NUM_LIT:2>+r-i), fill=int(k))<EOL><DEDENT><DEDENT>if style == DIAMOND:<EOL><INDENT>r = max(w,h)<EOL>for i in range(int(r)):<EOL><INDENT>x = int(i*w/r*<NUM_LIT:0.5>)<EOL>y = int(i*h/r*<NUM_LIT:0.5>)<EOL>k = <NUM_LIT> * i/r<EOL>draw.rectangle((x, y, w-x, h-y), outline=int(k))<EOL><DEDENT><DEDENT>img = img.convert(\"<STR_LIT>\")<EOL>self.layer(img, <NUM_LIT:0>, <NUM_LIT:0>, name=\"<STR_LIT>\")<EOL>", "docstring": "Creates a gradient layer.\n\n        Creates a gradient layer, that is usually used\n        together with the mask() function.\n\n        All the image functions work on gradients,\n        so they can easily be flipped, rotated, scaled, inverted,\n        made brighter or darker, ...\n\n        Styles for gradients are LINEAR, RADIAL and DIAMOND.", "id": "f11554:c0:m3"}
{"signature": "def __init__(self, w, h):", "body": "self.interpolation = BILINEAR<EOL>self.layers = Layers()<EOL>self.w = w<EOL>self.h = h<EOL>img = Image.new(\"<STR_LIT>\", (w,h), (<NUM_LIT:255>,<NUM_LIT:255>,<NUM_LIT:255>,<NUM_LIT:0>))<EOL>self.layer(img, name=\"<STR_LIT>\")<EOL>", "docstring": "Creates a new canvas.\n\n        Creates the working area on which to blend layers.\n        The canvas background is transparent,\n        but a background color could be set using the fill() function.", "id": "f11554:c0:m0"}
{"signature": "def up(self):", "body": "i = self.index()<EOL>if i != None:<EOL><INDENT>del self.canvas.layers[i]<EOL>i = min(len(self.canvas.layers), i+<NUM_LIT:1>)<EOL>self.canvas.layers.insert(i, self)<EOL><DEDENT>", "docstring": "Moves the layer up in the stacking order.", "id": "f11554:c2:m4"}
{"signature": "def flip(self, axis=HORIZONTAL):", "body": "if axis == HORIZONTAL:<EOL><INDENT>self.img = self.img.transpose(Image.FLIP_LEFT_RIGHT)<EOL><DEDENT>if axis == VERTICAL:<EOL><INDENT>self.img = self.img.transpose(Image.FLIP_TOP_BOTTOM)<EOL><DEDENT>", "docstring": "Flips the layer, either HORIZONTAL or VERTICAL.", "id": "f11554:c2:m24"}
{"signature": "def hue(self, img1, img2):", "body": "import colorsys<EOL>p1 = list(img1.getdata())<EOL>p2 = list(img2.getdata())<EOL>for i in range(len(p1)):<EOL><INDENT>r1, g1, b1, a1 = p1[i]<EOL>r1 = r1 / <NUM_LIT><EOL>g1 = g1 / <NUM_LIT><EOL>b1 = b1 / <NUM_LIT><EOL>h1, s1, v1 = colorsys.rgb_to_hsv(r1, g1, b1)<EOL>r2, g2, b2, a2 = p2[i]<EOL>r2 = r2 / <NUM_LIT><EOL>g2 = g2 / <NUM_LIT><EOL>b2 = b2 / <NUM_LIT><EOL>h2, s2, v2 = colorsys.rgb_to_hsv(r2, g2, b2)<EOL>r3, g3, b3 = colorsys.hsv_to_rgb(h2, s1, v1)<EOL>r3 = int(r3*<NUM_LIT:255>)<EOL>g3 = int(g3*<NUM_LIT:255>)<EOL>b3 = int(b3*<NUM_LIT:255>)<EOL>p1[i] = (r3, g3, b3, a1)<EOL><DEDENT>img = Image.new(\"<STR_LIT>\", img1.size, <NUM_LIT:255>)<EOL>img.putdata(p1)<EOL>return img<EOL>", "docstring": "Applies the hue blend mode.\n\n        Hues image img1 with image img2.\n        The hue filter replaces the hues of pixels in img1\n        with the hues of pixels in img2.\n        Returns a composite image with the alpha channel retained.", "id": "f11554:c3:m1"}
{"signature": "def index(self):", "body": "for i in range(len(self.canvas.layers)):<EOL><INDENT>if self.canvas.layers[i] == self: break<EOL><DEDENT>if self.canvas.layers[i] == self: <EOL><INDENT>return i<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns this layer's index in the canvas.layers[].\n\n        Searches the position of this layer in the canvas'\n        layers list, return None when not found.", "id": "f11554:c2:m1"}
{"signature": "def create(self, name, overwrite=True):", "body": "self._name = name.rstrip(\"<STR_LIT>\")<EOL>from os import unlink<EOL>if overwrite: <EOL><INDENT>try: unlink(self._name + \"<STR_LIT>\")<EOL>except: pass       <EOL><DEDENT>self._con = sqlite.connect(self._name + \"<STR_LIT>\")<EOL>self._cur = self._con.cursor()<EOL>", "docstring": "Creates an SQLite database file.\n\n        Creates an SQLite database with the given name.\n        The .box file extension is added automatically.\n        Overwrites any existing database by default.", "id": "f11555:c0:m2"}
{"signature": "def commit(self, each=<NUM_LIT:0>):", "body": "self._commit = each<EOL>self._con.commit()<EOL>", "docstring": "Sets the commit frequency.\n\n        Modifications to the database,\n        e.g. row insertions are commited in batch,\n        specified by the given number.\n        A number that is reasonably high allows for faster transactions.\n        Commits anything still pending.", "id": "f11555:c0:m8"}
{"signature": "def __len__(self):", "body": "sql = \"<STR_LIT>\"+self._key+\"<STR_LIT>\"+self._name<EOL>self._db._cur.execute(sql)<EOL>i = <NUM_LIT:0><EOL>for r in self._db._cur: i += <NUM_LIT:1><EOL>return i<EOL>", "docstring": "The row count of the table. This should be optimized.", "id": "f11555:c1:m2"}
{"signature": "def create_index(self, table, field, unique=False, ascending=True):", "body": "if unique: u = \"<STR_LIT>\"<EOL>else: u = \"<STR_LIT>\"<EOL>if ascending: a = \"<STR_LIT>\"<EOL>else: a = \"<STR_LIT>\"<EOL>sql  = \"<STR_LIT>\"+u+\"<STR_LIT>\"+table+\"<STR_LIT:_>\"+field+\"<STR_LIT:U+0020>\"<EOL>sql += \"<STR_LIT>\"+table+\"<STR_LIT:(>\"+field+\"<STR_LIT:U+0020>\"+a+\"<STR_LIT:)>\"<EOL>self._cur.execute(sql)<EOL>self._con.commit()<EOL>", "docstring": "Creates a table index.\n\n        Creates an index on the given table,\n        on the given field with unique values enforced or not,\n        in ascending or descending order.", "id": "f11555:c0:m7"}
{"signature": "def connect(self, name):", "body": "self._name = name.rstrip(\"<STR_LIT>\")<EOL>self._con = sqlite.connect(self._name + \"<STR_LIT>\")<EOL>self._cur = self._con.cursor()<EOL>self._tables = []<EOL>self._cur.execute(\"<STR_LIT>\")<EOL>for r in self._cur: self._tables.append(r[<NUM_LIT:0>])<EOL>self._indices = []<EOL>self._cur.execute(\"<STR_LIT>\")<EOL>for r in self._cur: self._indices.append(r[<NUM_LIT:0>])<EOL>for t in self._tables:<EOL><INDENT>self._cur.execute(\"<STR_LIT>\"+t+\"<STR_LIT:)>\")<EOL>fields = []<EOL>key = \"<STR_LIT>\"<EOL>for r in self._cur:<EOL><INDENT>fields.append(r[<NUM_LIT:1>])<EOL>if r[<NUM_LIT:2>] == \"<STR_LIT>\": key = r[<NUM_LIT:1>]<EOL><DEDENT>setattr(self, t, Table(self, t, key, fields))<EOL><DEDENT>", "docstring": "Generic database.\n\n        Opens the SQLite database with the given name.\n        The .db extension is automatically appended to the name.\n        For each table in the database an attribute is created,\n        and assigned a Table object.\n\n        You can do: database.table or database[table].", "id": "f11555:c0:m1"}
{"signature": "def all(self):", "body": "return self.find(\"<STR_LIT:*>\")<EOL>", "docstring": "Returns all the rows in the table.", "id": "f11555:c1:m4"}
{"signature": "def close(self):", "body": "self._con.commit()<EOL>self._cur.close()<EOL>self._con.close()<EOL>", "docstring": "Commits any pending transactions and closes the database.", "id": "f11555:c0:m9"}
{"signature": "def segments(self, generation, time=None):", "body": "if not time:<EOL><INDENT>time = maxint<EOL><DEDENT>_ctx.push()<EOL>self._reset()<EOL>self._grow(generation, self.root, self.angle, self.d, time, draw=False)<EOL>_ctx.pop()<EOL>return self._segments<EOL>", "docstring": "Returns the number of segments drawn for a number of generations.\n\n        The number of segments that are drawn to the screen\n        depends of the number of generations and the amount of time.\n        Each F command has a cost that depletes time.\n        Segments will stop being drawn if generation reaches 0,\n        when there is no time left \n        or when the segment length falls below LSystem.threshold.", "id": "f11556:c0:m7"}
{"signature": "def _reset(self):", "body": "self._segments = <NUM_LIT:0><EOL>self._duration = <NUM_LIT:0><EOL>", "docstring": "Resets the number of drawn segments and the duration.\n\n        To calculate the number of segments or the total time needed,\n        we need to recurse through LSystem._grow().\n        Before that, the draw(), segments() and duration() command\n        will reset both tally variables.", "id": "f11556:c0:m3"}
{"signature": "def draw(self, x, y, generation, time=None, ease=None):", "body": "angle = self.angle<EOL>if time is not None and ease:<EOL><INDENT>angle = min(self.angle, self.angle * time / ease)<EOL><DEDENT>self._timed = True<EOL>if not time:<EOL><INDENT>self._timed = False<EOL>time = maxint<EOL><DEDENT>mode = _ctx.transform()<EOL>_ctx.transform(CORNER)<EOL>_ctx.push()<EOL>_ctx.translate(x, y)<EOL>self._reset()<EOL>self._grow(generation, self.root, angle, self.d, time, draw=True)<EOL>_ctx.pop()<EOL>_ctx.transform(mode)<EOL>", "docstring": "Draws a number of generations at the given position.\n\n        The time parameter can be used to progress the system in an animatiom.\n        As time nears LSystem.duration(generation), more segments will be drawn.\n\n        The ease parameter can be used to gradually increase the branching angle\n        as more segments are drawn.", "id": "f11556:c0:m6"}
{"signature": "def _grow(self, generation, rule, angle, length, time=maxint, draw=True):", "body": "if generation == <NUM_LIT:0>:<EOL><INDENT>self._duration = <NUM_LIT:1> + maxint-time<EOL><DEDENT>if length <= self.threshold:<EOL><INDENT>self._duration = <NUM_LIT:1> + maxint-time<EOL>return<EOL><DEDENT>if rule in self.commands:<EOL><INDENT>self.commands[rule](self, generation, rule, angle, length, time)<EOL><DEDENT>if draw:<EOL><INDENT>if rule == \"<STR_LIT:f>\":<EOL><INDENT>_ctx.translate(<NUM_LIT:0>, -min(length, length*time))<EOL><DEDENT>elif rule == \"<STR_LIT:->\":<EOL><INDENT>_ctx.rotate(max(-angle, -angle*time))<EOL><DEDENT>elif rule == \"<STR_LIT:+>\":<EOL><INDENT>_ctx.rotate(min(+angle, +angle*time))<EOL><DEDENT>elif rule == \"<STR_LIT:|>\":<EOL><INDENT>_ctx.rotate(<NUM_LIT>)<EOL><DEDENT>elif rule == \"<STR_LIT:[>\":<EOL><INDENT>_ctx.push()<EOL><DEDENT>elif rule == \"<STR_LIT:]>\":<EOL><INDENT>_ctx.pop()<EOL><DEDENT><DEDENT>if rule in self.rulesand generation > <NUM_LIT:0>and time > <NUM_LIT:0>:<EOL><INDENT>for cmd in self.rules[rule]:<EOL><INDENT>if cmd == \"<STR_LIT:F>\":<EOL><INDENT>time -= self.cost<EOL><DEDENT>elif cmd == \"<STR_LIT:!>\":<EOL><INDENT>angle = -angle<EOL><DEDENT>elif cmd == \"<STR_LIT:(>\":<EOL><INDENT>angle *= <NUM_LIT><EOL><DEDENT>elif cmd == \"<STR_LIT:)>\":<EOL><INDENT>angle *= <NUM_LIT><EOL><DEDENT>elif cmd == \"<STR_LIT:<>\":<EOL><INDENT>length *= <NUM_LIT><EOL><DEDENT>elif cmd == \"<STR_LIT:>>\":<EOL><INDENT>length *= <NUM_LIT><EOL><DEDENT>self._grow(<EOL>generation-<NUM_LIT:1>,<EOL>cmd,<EOL>angle,<EOL>length*self.decrease,<EOL>time,<EOL>draw<EOL>)<EOL><DEDENT><DEDENT>elif rule == \"<STR_LIT:F>\"or (rule in self.rules and self.rules[rule] == \"<STR_LIT>\"):<EOL><INDENT>self._segments += <NUM_LIT:1><EOL>if draw and time > <NUM_LIT:0>:<EOL><INDENT>length = min(length, length*time)<EOL>if self._timed:<EOL><INDENT>self.segment(length, generation, time, id=self._segments)<EOL><DEDENT>else:<EOL><INDENT>self.segment(length, generation, None, id=self._segments)<EOL><DEDENT>_ctx.translate(<NUM_LIT:0>, -length)<EOL><DEDENT><DEDENT>", "docstring": "Recurse through the system.\n\n        When a segment is drawn, the LSsytem.segment() method will be called.\n        You can customize this method to create your own visualizations.\n        It takes an optional time parameter. \n\n        If you divide this parameter by LSsytem.duration() you get \n        a number between 0.0 and 1.0 you can use as an alpha value for example.\n\n        The method also has an id parameter which is a unique number \n        between 0 and LSystem.segments.", "id": "f11556:c0:m4"}
{"signature": "def update(self):", "body": "raise NotImplementedError<EOL>", "docstring": "The method which gets executed when a new state of the object was\nreceived.", "id": "f11557:c1:m2"}
{"signature": "def close_socket(self):", "body": "self.socket.close()<EOL>", "docstring": "Closes the socket connection", "id": "f11560:c1:m2"}
{"signature": "def get_profile(self, profile):", "body": "return self.profiles.get(profile, None)<EOL>", "docstring": "Returns a specific profile from the profile list and otherwise None", "id": "f11560:c1:m5"}
{"signature": "def open_socket(self):", "body": "self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)<EOL>self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, <NUM_LIT:1>)<EOL>self.socket.setblocking(<NUM_LIT:0>)<EOL>self.socket.bind((self.host, self.port))<EOL>", "docstring": "Opens the socket and binds to the given host and port. Uses\nSO_REUSEADDR to be as robust as possible.", "id": "f11560:c1:m1"}
{"signature": "def callback(self, *incoming):", "body": "message = incoming[<NUM_LIT:0>]<EOL>if message:<EOL><INDENT>address, command = message[<NUM_LIT:0>], message[<NUM_LIT:2>]<EOL>profile = self.get_profile(address)<EOL>if profile is not None:<EOL><INDENT>try:<EOL><INDENT>getattr(profile, command)(self, message)<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Gets called by the CallbackManager if a new message was received", "id": "f11560:c1:m8"}
{"signature": "def dispatch(self, message, source = None):", "body": "msgtype = \"<STR_LIT>\"<EOL>try:<EOL><INDENT>if type(message[<NUM_LIT:0>]) == str:<EOL><INDENT>address = message[<NUM_LIT:0>]<EOL>self.callbacks[address](message)<EOL><DEDENT>elif type(message[<NUM_LIT:0>]) == list:<EOL><INDENT>for msg in message:<EOL><INDENT>self.dispatch(msg)<EOL><DEDENT><DEDENT><DEDENT>except KeyError as key:<EOL><INDENT>print('<STR_LIT>' % (address, key, message))<EOL>pprint.pprint(message)<EOL><DEDENT>except IndexError as e:<EOL><INDENT>print('<STR_LIT>' % (e, message))<EOL>pass<EOL><DEDENT>except None as e:<EOL><INDENT>print(\"<STR_LIT>\", address, \"<STR_LIT>\", e)<EOL><DEDENT>return<EOL>", "docstring": "Sends decoded OSC data to an appropriate calback", "id": "f11561:c1:m2"}
{"signature": "def add(self, callback, name):", "body": "if callback == None:<EOL><INDENT>del self.callbacks[name]<EOL><DEDENT>else:<EOL><INDENT>self.callbacks[name] = callback<EOL><DEDENT>", "docstring": "Adds a callback to our set of callbacks,\n        or removes the callback with name if callback\n        is None.", "id": "f11561:c1:m3"}
{"signature": "def hexDump(bytes):", "body": "for i in range(len(bytes)):<EOL><INDENT>sys.stdout.write(\"<STR_LIT>\" % (ord(bytes[i])))<EOL>if (i+<NUM_LIT:1>) % <NUM_LIT:8> == <NUM_LIT:0>:<EOL><INDENT>print(repr(bytes[i-<NUM_LIT:7>:i+<NUM_LIT:1>]))<EOL><DEDENT><DEDENT>if(len(bytes) % <NUM_LIT:8> != <NUM_LIT:0>):<EOL><INDENT>print(string.rjust(\"<STR_LIT>\", <NUM_LIT:11>), repr(bytes[i-len(bytes)%<NUM_LIT:8>:i+<NUM_LIT:1>]))<EOL><DEDENT>", "docstring": "Useful utility; prints the string in hexadecimal", "id": "f11561:m0"}
{"signature": "def rawAppend(self, data):", "body": "self.message = self.message + data<EOL>", "docstring": "Appends raw data to the message.  Use append().", "id": "f11561:c0:m7"}
{"signature": "def decodeOSC(data):", "body": "table = {\"<STR_LIT:i>\":readInt, \"<STR_LIT:f>\":readFloat, \"<STR_LIT:s>\":readString, \"<STR_LIT:b>\":readBlob}<EOL>decoded = []<EOL>address,  rest = readString(data)<EOL>typetags = \"<STR_LIT>\"<EOL>if address == \"<STR_LIT>\":<EOL><INDENT>time, rest = readLong(rest)<EOL>decoded.append(address)<EOL>decoded.append(time)<EOL>while len(rest)><NUM_LIT:0>:<EOL><INDENT>length, rest = readInt(rest)<EOL>decoded.append(decodeOSC(rest[:length]))<EOL>rest = rest[length:]<EOL><DEDENT><DEDENT>elif len(rest) > <NUM_LIT:0>:<EOL><INDENT>typetags, rest = readString(rest)<EOL>decoded.append(address)<EOL>decoded.append(typetags)<EOL>if typetags[<NUM_LIT:0>] == \"<STR_LIT:U+002C>\":<EOL><INDENT>for tag in typetags[<NUM_LIT:1>:]:<EOL><INDENT>value, rest = table[tag](rest)<EOL>decoded.append(value)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return decoded<EOL>", "docstring": "Converts a typetagged OSC message to a Python list.", "id": "f11561:m9"}
{"signature": "def alive(self, client, message):", "body": "raise NotImplementedError<EOL>", "docstring": "The 'alive' message contains the session ids of all alive fiducials\nknown to reacTIVision.", "id": "f11562:c0:m2"}
{"signature": "def blend(self, clr, factor=<NUM_LIT:0.5>):", "body": "r = self.r * (<NUM_LIT:1> - factor) + clr.r * factor<EOL>g = self.g * (<NUM_LIT:1> - factor) + clr.g * factor<EOL>b = self.b * (<NUM_LIT:1> - factor) + clr.b * factor<EOL>a = self.a * (<NUM_LIT:1> - factor) + clr.a * factor<EOL>return Color(r, g, b, a, mode=\"<STR_LIT>\")<EOL>", "docstring": "Returns a mix of two colors.", "id": "f11564:c1:m21"}
{"signature": "def hsv_to_rgb(h, s, v):", "body": "if s == <NUM_LIT:0>: return v, v, v<EOL>h = h / (<NUM_LIT> / <NUM_LIT>)<EOL>i = floor(h)<EOL>f = h - i<EOL>p = v * (<NUM_LIT:1> - s)<EOL>q = v * (<NUM_LIT:1> - s * f)<EOL>t = v * (<NUM_LIT:1> - s * (<NUM_LIT:1> - f))<EOL>if i == <NUM_LIT:0>:<EOL><INDENT>r = v;<EOL>g = t;<EOL>b = p<EOL><DEDENT>elif i == <NUM_LIT:1>:<EOL><INDENT>r = q;<EOL>g = v;<EOL>b = p<EOL><DEDENT>elif i == <NUM_LIT:2>:<EOL><INDENT>r = p;<EOL>g = v;<EOL>b = t<EOL><DEDENT>elif i == <NUM_LIT:3>:<EOL><INDENT>r = p;<EOL>g = q;<EOL>b = v<EOL><DEDENT>elif i == <NUM_LIT:4>:<EOL><INDENT>r = t;<EOL>g = p;<EOL>b = v<EOL><DEDENT>else:<EOL><INDENT>r = v;<EOL>g = p;<EOL>b = q<EOL><DEDENT>return r, g, b<EOL>", "docstring": "Hue, saturation, brightness to red, green, blue.\n    http://www.koders.com/python/fidB2FE963F658FE74D9BF74EB93EFD44DCAE45E10E.aspx\n    Results will differ from the way NSColor converts color spaces.", "id": "f11564:m5"}
{"signature": "def swatch(self, x, y, w=<NUM_LIT>, h=<NUM_LIT>, padding=<NUM_LIT:4>, roundness=<NUM_LIT:0>, n=<NUM_LIT:12>, d=<NUM_LIT>, grouped=None):", "body": "if grouped is None:  <EOL><INDENT>grouped = self.group_swatches<EOL><DEDENT>if not grouped:<EOL><INDENT>s = sum([wgt for clr, rng, wgt in self.ranges])<EOL>for clr, rng, wgt in self.ranges:<EOL><INDENT>cols = max(<NUM_LIT:1>, int(wgt / s * n))<EOL>for i in _range(cols):<EOL><INDENT>rng.colors(clr, n=n, d=d).swatch(x, y, w, h, padding=padding, roundness=roundness)<EOL>x += w + padding<EOL><DEDENT><DEDENT>return x, y + n * (h + padding)<EOL><DEDENT>grouped = self._weight_by_hue()<EOL>for total_weight, normalized_weight, hue, ranges in grouped:<EOL><INDENT>dy = y<EOL>rc = <NUM_LIT:0><EOL>for clr, rng, weight in ranges:<EOL><INDENT>dx = x<EOL>cols = int(normalized_weight * n)<EOL>cols = max(<NUM_LIT:1>, min(cols, n - len(grouped)))<EOL>if clr.name == \"<STR_LIT>\": rng = rng.black<EOL>if clr.name == \"<STR_LIT>\": rng = rng.white<EOL>for i in _range(cols):<EOL><INDENT>rows = int(weight / total_weight * n)<EOL>rows = max(<NUM_LIT:1>, rows)<EOL>if (clr, rng, weight) == ranges[-<NUM_LIT:1>] and rc + rows < n: rows += <NUM_LIT:1><EOL>rng.colors(clr, n=rows, d=d).swatch(dx, dy, w, h, padding=padding, roundness=roundness)<EOL>dx += w + padding<EOL><DEDENT>dy += (w + padding) * rows  <EOL>rc = rows<EOL><DEDENT>x += (w + padding) * cols + padding<EOL><DEDENT>return x, dy<EOL>", "docstring": "Draws a weighted swatch with approximately n columns and rows.\n\nWhen the grouped parameter is True, colors are grouped in blocks of the same hue\n(also see the _weight_by_hue() method).", "id": "f11564:c7:m21"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "_list.__init__(self)<EOL>self.name = \"<STR_LIT>\"<EOL>self.tags = []<EOL>for arg in args:<EOL><INDENT>if arg.__class__ == Color:<EOL><INDENT>self.append(arg)<EOL><DEDENT>if arg.__class__ == BaseColor:<EOL><INDENT>self.append(color(arg.r, arg.g, arg.b, mode=\"<STR_LIT>\"))<EOL><DEDENT>try:<EOL><INDENT>self.name = arg.label<EOL>for r, g, b in arg:<EOL><INDENT>self.append(color(r, g, b, mode=\"<STR_LIT>\"))<EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>if isinstance(arg, _list)or isinstance(arg, tuple):<EOL><INDENT>for clr in arg:<EOL><INDENT>if clr.__class__ == Color:<EOL><INDENT>self.append(clr)<EOL><DEDENT>if clr.__class__ == BaseColor:<EOL><INDENT>self.append(color(clr))<EOL><DEDENT><DEDENT><DEDENT>if isinstance(arg, (str, unicode)):<EOL><INDENT>if os.path.exists(arg):<EOL><INDENT>n = <NUM_LIT:10><EOL>if \"<STR_LIT:n>\" in kwargs.keys(): n = kwargs[\"<STR_LIT:n>\"]<EOL>self.image_to_rgb(arg, n)<EOL><DEDENT>else:<EOL><INDENT>clr = Color(arg)<EOL>if not clr.is_transparent:<EOL><INDENT>self.append(clr)<EOL>self.name = arg<EOL><DEDENT>else:<EOL><INDENT>self.extend(self.context_to_rgb(arg))<EOL>self.tags = arg<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if \"<STR_LIT:name>\" in kwargs.keys():<EOL><INDENT>self.name = kwargs[\"<STR_LIT:name>\"]<EOL><DEDENT>if \"<STR_LIT>\" in kwargs.keys():<EOL><INDENT>self.tags = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>", "docstring": "Construct a list of colors.\n\nColors can be supplied as individual arguments,\nor in a list or tuple:\nColorList(clr1, clr2)\nColorList([clr1, clr2])\nColorList((clr1, clr2))\n\nYou can also supply an object from inside a\nweb.kuler.search() or web.colr.search() list.\n\nOr a string with a named color, a descriptive feel,\nor the pathname of an image.\n\nColorList furthermore takes two named parameters,\na name and a list of tags.", "id": "f11564:c2:m0"}
{"signature": "def _xml(self):", "body": "grouped = self._weight_by_hue()<EOL>xml = \"<STR_LIT>\" + self.name + \"<STR_LIT>\" + \"<STR_LIT:U+002CU+0020>\".join(self.tags) + \"<STR_LIT>\"<EOL>for total_weight, normalized_weight, hue, ranges in grouped:<EOL><INDENT>if hue == self.blue: hue = \"<STR_LIT>\"<EOL>clr = color(hue)<EOL>xml += \"<STR_LIT>\" + clr.name + \"<STR_LIT>\" + str(normalized_weight) + \"<STR_LIT>\"<EOL>xml += \"<STR_LIT>\" + str(clr.r) + \"<STR_LIT>\" + str(clr.g) + \"<STR_LIT>\"<EOL>xml += \"<STR_LIT>\" + str(clr.b) + \"<STR_LIT>\" + str(clr.a) + \"<STR_LIT>\"<EOL>for clr, rng, wgt in ranges:<EOL><INDENT>xml += \"<STR_LIT>\" + str(rng) + \"<STR_LIT>\" + str(wgt / total_weight) + \"<STR_LIT>\"<EOL><DEDENT>xml = xml.rstrip(\"<STR_LIT:U+0020>\") + \"<STR_LIT>\"<EOL><DEDENT>xml += \"<STR_LIT>\"<EOL>return xml<EOL>", "docstring": "Returns the color information as XML.\n\nThe XML has the following structure:\n<colors query=\"\">\n    <color name=\"\" weight=\"\" />\n        <rgb r=\"\" g=\"\" b=\"\" />\n        <shade name=\"\" weight=\"\" />\n    </color>\n</colors>\n\nNotice that ranges are stored by name and retrieved in the _load()\nmethod with the shade() command - and are thus expected to be\nshades (e.g. intense, warm, ...) unless the shade() command would\nreturn any custom ranges as well. This can be done by appending custom\nranges to the shades list.", "id": "f11564:c7:m4"}
{"signature": "def context_to_rgb(self, str):", "body": "matches = []<EOL>for clr in context:<EOL><INDENT>tags = context[clr]<EOL>for tag in tags:<EOL><INDENT>if tag.startswith(str)or str.startswith(tag):<EOL><INDENT>matches.append(clr)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>matches = [color(name) for name in matches]<EOL>return matches<EOL>", "docstring": "Returns the colors that have the given word in their context.\n\n        For example, the word \"anger\" appears\n        in black, orange and red contexts,\n        so the list will contain those three colors.", "id": "f11564:c2:m2"}
{"signature": "def _average(self):", "body": "r, g, b, a = <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0><EOL>for clr in self:<EOL><INDENT>r += clr.r<EOL>g += clr.g<EOL>b += clr.b<EOL>a += clr.alpha<EOL><DEDENT>r /= len(self)<EOL>g /= len(self)<EOL>b /= len(self)<EOL>a /= len(self)<EOL>return color(r, g, b, a, mode=\"<STR_LIT>\")<EOL>", "docstring": "Returns one average color for the colors in the list.", "id": "f11564:c2:m7"}
{"signature": "def outline(path, colors, precision=<NUM_LIT>, continuous=True):", "body": "<EOL>def _point_count(path, precision):<EOL><INDENT>return max(int(path.length * precision * <NUM_LIT:0.5>), <NUM_LIT:10>)<EOL><DEDENT>n = sum([_point_count(contour, precision) for contour in path.contours])<EOL>contour_i = <NUM_LIT:0><EOL>contour_n = len(path.contours) - <NUM_LIT:1><EOL>if contour_n == <NUM_LIT:0>: continuous = False<EOL>i = <NUM_LIT:0><EOL>for contour in path.contours:<EOL><INDENT>if not continuous: i = <NUM_LIT:0><EOL>j = _point_count(contour, precision)<EOL>first = True<EOL>for pt in contour.points(j):<EOL><INDENT>if first:<EOL><INDENT>first = False<EOL><DEDENT>else:<EOL><INDENT>if not continuous:<EOL><INDENT>clr = float(i) / j * len(colors)<EOL><DEDENT>else:<EOL><INDENT>clr = float(i) / n * len(colors) - <NUM_LIT:1> * contour_i / contour_n<EOL><DEDENT>_ctx.stroke(colors[int(clr)])<EOL>_ctx.line(x0, y0, pt.x, pt.y)<EOL><DEDENT>x0 = pt.x<EOL>y0 = pt.y<EOL>i += <NUM_LIT:1><EOL><DEDENT>pt = contour.point(<NUM_LIT>)  <EOL>_ctx.line(x0, y0, pt.x, pt.y)<EOL>contour_i += <NUM_LIT:1><EOL><DEDENT>", "docstring": "Outlines each contour in a path with the colors in the list.\n\nEach contour starts with the first color in the list,\nand ends with the last color in the list.\n\nBecause each line segment is drawn separately,\nworks only with corner-mode transforms.", "id": "f11564:m28"}
{"signature": "def copy(self):", "body": "return ColorList(<EOL>[color(clr.r, clr.g, clr.b, clr.a, mode=\"<STR_LIT>\") for clr in self],<EOL>name=self.name,<EOL>tags=self.tags<EOL>)<EOL>", "docstring": "Returns a deep copy of the list.", "id": "f11564:c2:m4"}
{"signature": "def recombine(self, other, d=<NUM_LIT>):", "body": "a, b = self, other<EOL>d1 = max(<NUM_LIT:0>, min(d, <NUM_LIT:1>))<EOL>d2 = d1<EOL>c = ColorTheme(<EOL>name=a.name[:int(len(a.name) * d1)] +<EOL>b.name[int(len(b.name) * d2):],<EOL>ranges=a.ranges[:int(len(a.ranges) * d1)] +<EOL>b.ranges[int(len(b.ranges) * d2):],<EOL>top=a.top,<EOL>cache=os.path.join(DEFAULT_CACHE, \"<STR_LIT>\"),<EOL>blue=a.blue,<EOL>length=a.length * d1 + b.length * d2<EOL>)<EOL>c.tags = a.tags[:int(len(a.tags) * d1)]<EOL>c.tags += b.tags[int(len(b.tags) * d2):]<EOL>return c<EOL>", "docstring": "Genetic recombination of two themes using cut and splice technique.", "id": "f11564:c7:m20"}
{"signature": "def complement(clr):", "body": "clr = color(clr)<EOL>colors = colorlist(clr)<EOL>colors.append(clr.complement)<EOL>return colors<EOL>", "docstring": "Returns the color and its complement in a list.", "id": "f11564:m16"}
{"signature": "def __add__(self, colorrange):", "body": "<EOL>if isinstance(colorrange, Color):<EOL><INDENT>colorrange = ColorList(colorrange)<EOL><DEDENT>if isinstance(colorrange, ColorList)and not isinstance(colorrange, ColorRange):<EOL><INDENT>colorrange = ColorRange([(clr.h, clr.h) for clr in colorrange], [], [])<EOL><DEDENT>hsba = [[], [], [], []]<EOL>for r in [self, colorrange]:<EOL><INDENT>for i in _range(<NUM_LIT:4>):<EOL><INDENT>v = [r.h, r.s, r.b, r.a][i]<EOL>if isinstance(v, _list):<EOL><INDENT>hsba[i].extend(v)<EOL><DEDENT>elif isinstance(v, tuple):<EOL><INDENT>hsba[i].append(v)<EOL><DEDENT>else:<EOL><INDENT>hsba[i].append((v, v))<EOL><DEDENT><DEDENT><DEDENT>r = ColorRange(*hsba)<EOL>return r<EOL>", "docstring": "Combines two ColorRange objects into one.\n\nFor example, if you merge a dark green range and a light red range,\nyou get a range returning dark and light variations of green and red.", "id": "f11564:c5:m9"}
{"signature": "def triad(clr, angle=<NUM_LIT>):", "body": "clr = color(clr)<EOL>colors = colorlist(clr)<EOL>colors.append(clr.rotate_ryb(angle).lighten(<NUM_LIT:0.1>))<EOL>colors.append(clr.rotate_ryb(-angle).lighten(<NUM_LIT:0.1>))<EOL>return colors<EOL>", "docstring": "Returns a triad of colors.\n\nThe triad is made up of this color and two other colors\nthat together make up an equilateral triangle on\nthe artistic color wheel.", "id": "f11564:m23"}
{"signature": "def image_to_rgb(self, path, n=<NUM_LIT:10>):", "body": "from PIL import Image<EOL>img = Image.open(path)<EOL>p = img.getdata()<EOL>f = lambda p: choice(p)<EOL>for i in _range(n):<EOL><INDENT>rgba = f(p)<EOL>rgba = _list(rgba)<EOL>if len(rgba) == <NUM_LIT:3>:<EOL><INDENT>rgba.append(<NUM_LIT:255>)<EOL><DEDENT>r, g, b, a = [v / <NUM_LIT> for v in rgba]<EOL>clr = color(r, g, b, a, mode=\"<STR_LIT>\")<EOL>self.append(clr)<EOL><DEDENT>", "docstring": "Returns a list of colors based on pixel values in the image.\n\nThe Core Image library must be present to determine pixel colors.\nF. Albers: http://nodebox.net/code/index.php/shared_2007-06-11-11-37-05", "id": "f11564:c2:m1"}
{"signature": "def __contains__(self, clr):", "body": "for clr2 in self:<EOL><INDENT>if clr.r == clr2.r andclr.g == clr2.g andclr.b == clr2.b:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Returns True if clr's RGB values match a color in the list.", "id": "f11564:c2:m26"}
{"signature": "def hex_to_rgb(hex):", "body": "hex = hex.lstrip(\"<STR_LIT:#>\")<EOL>if len(hex) < <NUM_LIT:6>:<EOL><INDENT>hex += hex[-<NUM_LIT:1>] * (<NUM_LIT:6> - len(hex))<EOL><DEDENT>r, g, b = hex[<NUM_LIT:0>:<NUM_LIT:2>], hex[<NUM_LIT:2>:<NUM_LIT:4>], hex[<NUM_LIT:4>:]<EOL>r, g, b = [int(n, <NUM_LIT:16>) / <NUM_LIT> for n in (r, g, b)]<EOL>return r, g, b<EOL>", "docstring": "Returns RGB values for a hex color string.", "id": "f11564:m1"}
{"signature": "def __init__(self, h=(<NUM_LIT:0.0>, <NUM_LIT:1.0>), s=(<NUM_LIT:0.0>, <NUM_LIT:1.0>), b=(<NUM_LIT:0.0>, <NUM_LIT:1.0>), a=(<NUM_LIT:1.0>, <NUM_LIT:1.0>),<EOL>grayscale=False, name=\"<STR_LIT>\", length=<NUM_LIT:100>):", "body": "ColorList.__init__(self)<EOL>self.name = name<EOL>self.h = h<EOL>self.s = s<EOL>self.b = b<EOL>self.a = a<EOL>self.grayscale = grayscale<EOL>if not grayscale:<EOL><INDENT>self.black = ColorRange((<NUM_LIT:0>, <NUM_LIT:1>), <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, True, name)<EOL>self.white = ColorRange((<NUM_LIT:0>, <NUM_LIT:1>), <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, True, name)<EOL><DEDENT>self.length = length<EOL>", "docstring": "A stateless list of colors whose HSB values are confined to a range.\n\nHue, saturation and brightness are confined to a (min, max) tuple,\nor a list of (min, max) tuples for discontinuous ranges, or to a single value.\nThis way you can describe concepts such as \"light\", \"dark\", etc.\n\nWith stateless we mean that you are never sure which colors are\nin the ColorRange, different colors that fall within the ranges\nare returned each time when calling color() or colors().\n\nColorRange has all the ColorList transformations (such as darken()),\nthese return ColorList objects. It's like a snapshot of the original\nstateless ColorRange.", "id": "f11564:c5:m0"}
{"signature": "def contains(self, clr):", "body": "if not isinstance(clr, Color):<EOL><INDENT>return False<EOL><DEDENT>if not isinstance(clr, _list):<EOL><INDENT>clr = [clr]<EOL><DEDENT>for clr in clr:<EOL><INDENT>if clr.is_grey and not self.grayscale:<EOL><INDENT>return (self.black.contains(clr) orself.white.contains(clr))<EOL><DEDENT>for r, v in [(self.h, clr.h), (self.s, clr.s), (self.b, clr.brightness), (self.a, clr.a)]:<EOL><INDENT>if isinstance(r, _list):<EOL><INDENT>pass<EOL><DEDENT>elif isinstance(r, tuple):<EOL><INDENT>r = [r]<EOL><DEDENT>else:<EOL><INDENT>r = [(r, r)]<EOL><DEDENT>for min, max in r:<EOL><INDENT>if not (min <= v <= max):<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "Returns True if the given color is part of this color range.\n\nCheck whether each h, s, b, a component of the color\nfalls within the defined range for that component.\n\nIf the given color is grayscale,\nchecks against the definitions for black and white.", "id": "f11564:c5:m8"}
{"signature": "def morguefile(query, n=<NUM_LIT:10>, top=<NUM_LIT:10>):", "body": "from web import morguefile<EOL>images = morguefile.search(query)[:top]<EOL>path = choice(images).download(thumbnail=True, wait=<NUM_LIT:10>)<EOL>return ColorList(path, n, name=query)<EOL>", "docstring": "Returns a list of colors drawn from a morgueFile image.\n\nWith the Web library installed,\ndownloads a thumbnail from morgueFile and retrieves pixel colors.", "id": "f11564:m39"}
{"signature": "def cmyk_to_rgb(c, m, y, k):", "body": "r = <NUM_LIT:1.0> - min(<NUM_LIT:1.0>, c + k)<EOL>g = <NUM_LIT:1.0> - min(<NUM_LIT:1.0>, m + k)<EOL>b = <NUM_LIT:1.0> - min(<NUM_LIT:1.0>, y + k)<EOL>return r, g, b<EOL>", "docstring": "Cyan, magenta, yellow, black to red, green, blue.\n    ReportLab, http://www.koders.com/python/fid5C006F554616848C01AC7CB96C21426B69D2E5A9.aspx\n    Results will differ from the way NSColor converts color spaces.", "id": "f11564:m3"}
{"signature": "def _load(self, top=<NUM_LIT:5>, blue=\"<STR_LIT>\", archive=None, member=None):", "body": "if archive is None:<EOL><INDENT>path = os.path.join(self.cache, self.name + \"<STR_LIT>\")<EOL>xml = open(path).read()<EOL><DEDENT>else:<EOL><INDENT>assert member is not None<EOL>xml = archive.read(member)<EOL><DEDENT>dom = parseString(xml).documentElement<EOL>attr = lambda e, a: e.attributes[a].value<EOL>for e in dom.getElementsByTagName(\"<STR_LIT>\")[:top]:<EOL><INDENT>w = float(attr(e, \"<STR_LIT>\"))<EOL>try:<EOL><INDENT>rgb = e.getElementsByTagName(\"<STR_LIT>\")[<NUM_LIT:0>]<EOL>clr = color(<EOL>float(attr(rgb, \"<STR_LIT:r>\")),<EOL>float(attr(rgb, \"<STR_LIT:g>\")),<EOL>float(attr(rgb, \"<STR_LIT:b>\")),<EOL>float(attr(rgb, \"<STR_LIT:a>\")),<EOL>mode=\"<STR_LIT>\"<EOL>)<EOL>try:<EOL><INDENT>clr.name = attr(e, \"<STR_LIT:name>\")<EOL>if clr.name == \"<STR_LIT>\": clr = color(blue)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>except:<EOL><INDENT>name = attr(e, \"<STR_LIT:name>\")<EOL>if name == \"<STR_LIT>\": name = blue<EOL>clr = color(name)<EOL><DEDENT>for s in e.getElementsByTagName(\"<STR_LIT>\"):<EOL><INDENT>self.ranges.append((<EOL>clr,<EOL>shade(attr(s, \"<STR_LIT:name>\")),<EOL>w * float(attr(s, \"<STR_LIT>\"))<EOL>))<EOL><DEDENT><DEDENT>", "docstring": "Loads a theme from aggregated web data.\n\nThe data must be old-style Prism XML: <color>s consisting of <shade>s.\nColors named \"blue\" will be overridden with the blue parameter.\n\narchive can be a file like object (e.g. a ZipFile)\nand will be used along with 'member' if specified.", "id": "f11564:c7:m6"}
{"signature": "def right_complement(clr):", "body": "right = split_complementary(clr)[<NUM_LIT:2>]<EOL>colors = complementary(clr)<EOL>colors[<NUM_LIT:3>].h = right.h<EOL>colors[<NUM_LIT:4>].h = right.h<EOL>colors[<NUM_LIT:5>].h = right.h<EOL>colors = colorlist(<EOL>colors[<NUM_LIT:0>], colors[<NUM_LIT:2>], colors[<NUM_LIT:1>], colors[<NUM_LIT:5>], colors[<NUM_LIT:4>], colors[<NUM_LIT:3>]<EOL>)<EOL>return colors<EOL>", "docstring": "Returns the right half of the split complement.", "id": "f11564:m20"}
{"signature": "def colors(self, n=<NUM_LIT:10>, d=<NUM_LIT>):", "body": "s = sum([w for clr, rng, w in self.ranges])<EOL>colors = colorlist()<EOL>for i in _range(n):<EOL><INDENT>r = random()<EOL>for clr, rng, weight in self.ranges:<EOL><INDENT>if weight / s >= r: break<EOL>r -= weight / s<EOL><DEDENT>colors.append(rng(clr, d))<EOL><DEDENT>return colors<EOL>", "docstring": "Returns a number of random colors from the theme.", "id": "f11564:c7:m8"}
{"signature": "def _darkest(self):", "body": "min, n = (<NUM_LIT:1.0>, <NUM_LIT:1.0>, <NUM_LIT:1.0>), <NUM_LIT><EOL>for clr in self:<EOL><INDENT>if clr.r + clr.g + clr.b < n:<EOL><INDENT>min, n = clr, clr.r + clr.g + clr.b<EOL><DEDENT><DEDENT>return min<EOL>", "docstring": "Returns the darkest color from the list.\n\nKnowing the contrast between a light and a dark swatch\ncan help us decide how to display readable typography.", "id": "f11564:c2:m5"}
{"signature": "def swatch(self, x, y, w=<NUM_LIT>, h=<NUM_LIT>, padding=<NUM_LIT:0>, roundness=<NUM_LIT:0>):", "body": "for clr in self:<EOL><INDENT>clr.swatch(x, y, w, h, roundness)<EOL>y += h + padding<EOL><DEDENT>", "docstring": "Rectangle swatches for all the colors in the list.", "id": "f11564:c2:m37"}
{"signature": "def distance(self, clr):", "body": "coord = lambda a, d: (cos(radians(a)) * d, sin(radians(a)) * d)<EOL>x0, y0 = coord(self.h * <NUM_LIT>, self.s)<EOL>x1, y1 = coord(clr.h * <NUM_LIT>, clr.s)<EOL>z0 = self.brightness<EOL>z1 = clr.brightness<EOL>d = sqrt((x1 - x0) ** <NUM_LIT:2> + (y1 - y0) ** <NUM_LIT:2> + (z1 - z0) ** <NUM_LIT:2>)<EOL>return d<EOL>", "docstring": "Returns the Euclidean distance between two colors (0.0-1.0).\n\nConsider colors arranged on the color wheel:\n- hue is the angle of a color along the center\n- saturation is the distance of a color from the center\n- brightness is the elevation of a color from the center\n  (i.e. we're on color a sphere)", "id": "f11564:c1:m22"}
{"signature": "def __getattr__(self, q):", "body": "if q is None:<EOL><INDENT>return self<EOL><DEDENT>candidate = None<EOL>if _favorites.data.has_key(q):<EOL><INDENT>candidate = q<EOL><DEDENT>for name, (tags, colors) in _favorites.data.iteritems():<EOL><INDENT>if q in tags:<EOL><INDENT>candidate = name<EOL><DEDENT><DEDENT>if candidate:<EOL><INDENT>tags, colors = _favorites.data[candidate]<EOL>colors = ColorList([color(r, g, b, a) for r, g, b, a in colors], name=candidate)<EOL>colors.tags = tags.split(\"<STR_LIT:U+0020>\")<EOL>return colors<EOL><DEDENT>return None<EOL>", "docstring": "Returns the favorite colors list which name/tags matches q.", "id": "f11564:c4:m0"}
{"signature": "def monochrome(clr):", "body": "def _wrap(x, min, threshold, plus):<EOL><INDENT>if x - min < threshold:<EOL><INDENT>return x + plus<EOL><DEDENT>else:<EOL><INDENT>return x - min<EOL><DEDENT><DEDENT>colors = colorlist(clr)<EOL>c = clr.copy()<EOL>c.brightness = _wrap(clr.brightness, <NUM_LIT:0.5>, <NUM_LIT>, <NUM_LIT>)<EOL>c.saturation = _wrap(clr.saturation, <NUM_LIT>, <NUM_LIT:0.1>, <NUM_LIT>)<EOL>colors.append(c)<EOL>c = clr.copy()<EOL>c.brightness = _wrap(clr.brightness, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>)<EOL>colors.append(c)<EOL>c = clr.copy()<EOL>c.brightness = max(<NUM_LIT>, clr.brightness + (<NUM_LIT:1> - clr.brightness) * <NUM_LIT>)<EOL>c.saturation = _wrap(clr.saturation, <NUM_LIT>, <NUM_LIT:0.1>, <NUM_LIT>)<EOL>colors.append(c)<EOL>c = clr.copy()<EOL>c.brightness = _wrap(clr.brightness, <NUM_LIT:0.5>, <NUM_LIT>, <NUM_LIT>)<EOL>colors.append(c)<EOL>return colors<EOL>", "docstring": "Returns colors in the same hue with varying brightness/saturation.", "id": "f11564:m22"}
{"signature": "def __init__(self, *colors, **kwargs):", "body": "if len(colors) == <NUM_LIT:1>:<EOL><INDENT>if isinstance(colors[<NUM_LIT:0>], _list)or isinstance(colors[<NUM_LIT:0>], tuple):<EOL><INDENT>self._colors = _list(colors[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>self._colors = [colors[<NUM_LIT:0>]]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._colors = _list(colors)<EOL><DEDENT>self._colors = [color(clr) for clr in self._colors]<EOL>self._steps = <NUM_LIT:100><EOL>if kwargs.has_key(\"<STR_LIT>\"):<EOL><INDENT>self._steps = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>if kwargs.has_key(\"<STR_LIT>\"):<EOL><INDENT>self._steps = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>self._spread = <NUM_LIT:0.5><EOL>if kwargs.has_key(\"<STR_LIT>\"):<EOL><INDENT>self._spread = kwargs[\"<STR_LIT>\"]<EOL><DEDENT>self._cache()<EOL>", "docstring": "Creates a list of gradient colors based on a few given base colors.\n\nThe colors can be supplied as a list or tuple of colors,\nor simply an enumeration of color parameters.\n\nThe steps named parameter defining how many colors are in the list.\nThe spread named parameter controls the midpoint of the gradient", "id": "f11564:c3:m0"}
{"signature": "def cluster_sort(self, cmp1=\"<STR_LIT>\", cmp2=\"<STR_LIT>\", reversed=False, n=<NUM_LIT:12>):", "body": "sorted = self.sort(cmp1)<EOL>clusters = ColorList()<EOL>d = <NUM_LIT:1.0><EOL>i = <NUM_LIT:0><EOL>for j in _range(len(sorted)):<EOL><INDENT>if getattr(sorted[j], cmp1) < d:<EOL><INDENT>clusters.extend(sorted[i:j].sort(cmp2))<EOL>d -= <NUM_LIT:1.0> / n<EOL>i = j<EOL><DEDENT><DEDENT>clusters.extend(sorted[i:].sort(cmp2))<EOL>if reversed: _list.reverse(clusters)<EOL>return clusters<EOL>", "docstring": "Sorts the list by cmp1, then cuts it into n pieces which are sorted by cmp2.\n\nIf you want to cluster by hue, use n=12 (since there are 12 primary/secondary hues).\nThe resulting list will not contain n even slices:\nn is used rather to slice up the cmp1 property of the colors,\ne.g. cmp1=brightness and n=3 will cluster colors by brightness >= 0.66, 0.33, 0.0", "id": "f11564:c2:m23"}
{"signature": "def left_complement(clr):", "body": "left = split_complementary(clr)[<NUM_LIT:1>]<EOL>colors = complementary(clr)<EOL>colors[<NUM_LIT:3>].h = left.h<EOL>colors[<NUM_LIT:4>].h = left.h<EOL>colors[<NUM_LIT:5>].h = left.h<EOL>colors = colorlist(<EOL>colors[<NUM_LIT:0>], colors[<NUM_LIT:2>], colors[<NUM_LIT:1>], colors[<NUM_LIT:3>], colors[<NUM_LIT:4>], colors[<NUM_LIT:5>]<EOL>)<EOL>return colors<EOL>", "docstring": "Returns the left half of the split complement.\n\nA list is returned with the same darker and softer colors\nas in the complementary list, but using the hue of the\nleft split complement instead of the complement itself.", "id": "f11564:m19"}
{"signature": "def _weight_by_hue(self):", "body": "grouped = {}<EOL>weights = []<EOL>for clr, rng, weight in self.ranges:<EOL><INDENT>h = clr.nearest_hue(primary=False)<EOL>if grouped.has_key(h):<EOL><INDENT>ranges, total_weight = grouped[h]<EOL>ranges.append((clr, rng, weight))<EOL>total_weight += weight<EOL>grouped[h] = (ranges, total_weight)<EOL><DEDENT>else:<EOL><INDENT>grouped[h] = ([(clr, rng, weight)], weight)<EOL><DEDENT><DEDENT>s = <NUM_LIT:1.0> * sum([w for r, w in grouped.values()])<EOL>grouped = [(grouped[h][<NUM_LIT:1>], grouped[h][<NUM_LIT:1>] / s, h, grouped[h][<NUM_LIT:0>]) for h in grouped]<EOL>grouped.sort()<EOL>grouped.reverse()<EOL>return grouped<EOL>", "docstring": "Returns a list of (hue, ranges, total weight, normalized total weight)-tuples.\n\nColorTheme is made up out of (color, range, weight) tuples.\nFor consistency with XML-output in the old Prism format\n(i.e. <color>s made up of <shade>s) we need a group\nweight per different hue.\n\nThe same is true for the swatch() draw method.\nHues are grouped as a single unit (e.g. dark red, intense red, weak red)\nafter which the dimensions (rows/columns) is determined.", "id": "f11564:c7:m3"}
{"signature": "def __init__(self, dx=<NUM_LIT:10>, dy=<NUM_LIT:10>, alpha=<NUM_LIT>, blur=<NUM_LIT>, clr=None):", "body": "Grob.__init__(self, _ctx)<EOL>if clr is None:<EOL><INDENT>clr = color(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, alpha, mode=\"<STR_LIT>\")<EOL><DEDENT>self.dx = dx<EOL>self.dy = dy<EOL>self.blur = blur<EOL>self.clr = clr.copy()<EOL>self.clr.alpha = alpha<EOL>global _shadow<EOL>_shadow = self<EOL>", "docstring": "Sets the dropshadow for all onscreen elements.\n\nBoth the fill and stroke of a path get a dropshadow.\n\nTODO - Implement shadow, could work as a postprocessing effect", "id": "f11564:c8:m0"}
{"signature": "def guess_name(clr):", "body": "clr = Color(clr)<EOL>if clr.is_transparent: return \"<STR_LIT>\"<EOL>if clr.is_black: return \"<STR_LIT>\"<EOL>if clr.is_white: return \"<STR_LIT>\"<EOL>if clr.is_black: return \"<STR_LIT>\"<EOL>for name in named_colors:<EOL><INDENT>try:<EOL><INDENT>r, g, b = named_colors[name]<EOL><DEDENT>except:<EOL><INDENT>continue<EOL><DEDENT>if r == clr.r and g == clr.g and b == clr.b:<EOL><INDENT>return name<EOL><DEDENT><DEDENT>for shade in shades:<EOL><INDENT>if clr in shade:<EOL><INDENT>return shade.name + \"<STR_LIT:U+0020>\" + clr.nearest_hue()<EOL>break<EOL><DEDENT><DEDENT>return clr.nearest_hue()<EOL>", "docstring": "Guesses the shade and hue name of a color.\n\nIf the given color is named in the named_colors list, return that name.\nOtherwise guess its nearest hue and shade range.", "id": "f11564:m32"}
{"signature": "def _context(self):", "body": "tags1 = None<EOL>for clr in self:<EOL><INDENT>overlap = []<EOL>if clr.is_black:<EOL><INDENT>name = \"<STR_LIT>\"<EOL><DEDENT>elif clr.is_white:<EOL><INDENT>name = \"<STR_LIT>\"<EOL><DEDENT>elif clr.is_grey:<EOL><INDENT>name = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>name = clr.nearest_hue(primary=True)<EOL><DEDENT>if name == \"<STR_LIT>\" and clr.brightness < <NUM_LIT>:<EOL><INDENT>name = \"<STR_LIT>\"<EOL><DEDENT>tags2 = context[name]<EOL>if tags1 is None:<EOL><INDENT>tags1 = tags2<EOL><DEDENT>else:<EOL><INDENT>for tag in tags2:<EOL><INDENT>if tag in tags1:<EOL><INDENT>if tag not in overlap:<EOL><INDENT>overlap.append(tag)<EOL><DEDENT><DEDENT><DEDENT>tags1 = overlap<EOL><DEDENT><DEDENT>overlap.sort()<EOL>return overlap<EOL>", "docstring": "Returns the intersection of each color's context.\n\nGet the nearest named hue of each color,\nand finds overlapping tags in each hue's colors.\nFor example, a list containing yellow, deeppink and olive\nyields: femininity, friendship, happiness, joy.", "id": "f11564:c2:m3"}
{"signature": "def swatch(self, x, y, w=<NUM_LIT>, h=<NUM_LIT>, roundness=<NUM_LIT:0>):", "body": "_ctx.fill(self)<EOL>_ctx.rect(x, y, w, h, roundness)<EOL>", "docstring": "Rectangle swatch for this color.", "id": "f11564:c1:m23"}
{"signature": "def parse(svg, cached=False, _copy=True):", "body": "if not cached:<EOL><INDENT>dom = parser.parseString(svg)<EOL>paths = parse_node(dom, [])<EOL><DEDENT>else:<EOL><INDENT>id = _cache.id(svg)<EOL>if not _cache.has_key(id):<EOL><INDENT>dom = parser.parseString(svg)<EOL>_cache.save(id, parse_node(dom, []))<EOL><DEDENT>paths = _cache.load(id, _copy)<EOL><DEDENT>return paths<EOL>", "docstring": "Returns cached copies unless otherwise specified.", "id": "f11566:m0"}
{"signature": "def get_attribute(element, attribute, default=<NUM_LIT:0>):", "body": "a = element.getAttribute(attribute)<EOL>if a == \"<STR_LIT>\": <EOL><INDENT>return default<EOL><DEDENT>return a<EOL>", "docstring": "Returns XML element's attribute, or default if none.", "id": "f11566:m2"}
{"signature": "def transform_from_local(xp, yp, cphi, sphi, mx, my):", "body": "x = xp * cphi - yp * sphi + mx<EOL>y = xp * sphi + yp * cphi + my<EOL>return (x,y)<EOL>", "docstring": "Transform from the local frame to absolute space.", "id": "f11567:m2"}
{"signature": "def bezier_arc(x1, y1, x2, y2, start_angle=<NUM_LIT:0>, extent=<NUM_LIT>):", "body": "x1,y1, x2,y2 = min(x1,x2), max(y1,y2), max(x1,x2), min(y1,y2)<EOL>if abs(extent) <= <NUM_LIT>:<EOL><INDENT>frag_angle = float(extent)<EOL>nfrag = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>nfrag = int(ceil(abs(extent)/<NUM_LIT>))<EOL>if nfrag == <NUM_LIT:0>:<EOL><INDENT>warnings.warn('<STR_LIT>' % extent)<EOL>return []<EOL><DEDENT>frag_angle = float(extent) / nfrag<EOL><DEDENT>x_cen = (x1+x2)/<NUM_LIT><EOL>y_cen = (y1+y2)/<NUM_LIT><EOL>rx = (x2-x1)/<NUM_LIT><EOL>ry = (y2-y1)/<NUM_LIT><EOL>half_angle = radians(frag_angle) / <NUM_LIT:2><EOL>kappa = abs(<NUM_LIT> / <NUM_LIT> * (<NUM_LIT:1.> - cos(half_angle)) / sin(half_angle))<EOL>if frag_angle < <NUM_LIT:0>:<EOL><INDENT>sign = -<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>sign = <NUM_LIT:1><EOL><DEDENT>point_list = []<EOL>for i in range(nfrag):<EOL><INDENT>theta0 = radians(start_angle + i*frag_angle)<EOL>theta1 = radians(start_angle + (i+<NUM_LIT:1>)*frag_angle)<EOL>c0 = cos(theta0)<EOL>c1 = cos(theta1)<EOL>s0 = sin(theta0)<EOL>s1 = sin(theta1)<EOL>if frag_angle > <NUM_LIT:0>:<EOL><INDENT>signed_kappa = -kappa<EOL><DEDENT>else:<EOL><INDENT>signed_kappa = kappa<EOL><DEDENT>point_list.append((x_cen + rx * c0,<EOL>y_cen - ry * s0,<EOL>x_cen + rx * (c0 + signed_kappa * s0),<EOL>y_cen - ry * (s0 - signed_kappa * c0),<EOL>x_cen + rx * (c1 - signed_kappa * s1),<EOL>y_cen - ry * (s1 + signed_kappa * c1),<EOL>x_cen + rx * c1,<EOL>y_cen - ry * s1))<EOL><DEDENT>return point_list<EOL>", "docstring": "Compute a cubic Bezier approximation of an elliptical arc.\n\n    (x1, y1) and (x2, y2) are the corners of the enclosing rectangle.\n    The coordinate system has coordinates that increase to the right and down.\n    Angles, measured in degress, start with 0 to the right (the positive X axis) \n    and increase counter-clockwise.\n    The arc extends from start_angle to start_angle+extent.\n    I.e. start_angle=0 and extent=180 yields an openside-down semi-circle.\n\n    The resulting coordinates are of the form (x1,y1, x2,y2, x3,y3, x4,y4)\n    such that the curve goes from (x1, y1) to (x4, y4) with (x2, y2) and\n    (x3, y3) as their respective Bezier control points.", "id": "f11567:m0"}
{"signature": "def elliptical_arc_to(x1, y1, rx, ry, phi, large_arc_flag, sweep_flag, x2, y2):", "body": "<EOL>rx = abs(rx)<EOL>ry = abs(ry)<EOL>phi = phi % <NUM_LIT><EOL>if x1==x2 and y1==y2:<EOL><INDENT>return []<EOL><DEDENT>if rx == <NUM_LIT:0> or ry == <NUM_LIT:0>:<EOL><INDENT>return [(x2,y2)]<EOL><DEDENT>rphi = radians(phi)<EOL>cphi = cos(rphi)<EOL>sphi = sin(rphi)<EOL>dx = <NUM_LIT:0.5>*(x1 - x2)<EOL>dy = <NUM_LIT:0.5>*(y1 - y2)<EOL>x1p =  cphi * dx + sphi * dy<EOL>y1p = -sphi * dx + cphi * dy<EOL>lam = (x1p/rx)**<NUM_LIT:2> + (y1p/ry)**<NUM_LIT:2><EOL>if lam > <NUM_LIT:1.0>:<EOL><INDENT>scale = sqrt(lam)<EOL>rx *= scale<EOL>ry *= scale<EOL><DEDENT>num = max((rx*ry)**<NUM_LIT:2> - (rx*y1p)**<NUM_LIT:2> - (ry*x1p)**<NUM_LIT:2>, <NUM_LIT:0.0>)<EOL>den = ((rx*y1p)**<NUM_LIT:2> + (ry*x1p)**<NUM_LIT:2>)<EOL>a = sqrt(num / den)<EOL>cxp = a * rx*y1p/ry<EOL>cyp = -a * ry*x1p/rx<EOL>if large_arc_flag == sweep_flag:<EOL><INDENT>cxp = -cxp<EOL>cyp = -cyp<EOL><DEDENT>mx = <NUM_LIT:0.5>*(x1+x2)<EOL>my = <NUM_LIT:0.5>*(y1+y2)<EOL>dx = (x1p-cxp) / rx<EOL>dy = (y1p-cyp) / ry<EOL>dx2 = (-x1p-cxp) / rx<EOL>dy2 = (-y1p-cyp) / ry<EOL>theta1 = angle(<NUM_LIT:1>,<NUM_LIT:0>,dx,dy)<EOL>dtheta = angle(dx,dy,dx2,dy2)<EOL>if not sweep_flag and dtheta > <NUM_LIT:0>:<EOL><INDENT>dtheta -= <NUM_LIT><EOL><DEDENT>elif sweep_flag and dtheta < <NUM_LIT:0>:<EOL><INDENT>dtheta += <NUM_LIT><EOL><DEDENT>p = []<EOL>control_points = bezier_arc(cxp-rx,cyp-ry,cxp+rx,cyp+ry, theta1, dtheta)<EOL>for x1p,y1p, x2p,y2p, x3p,y3p, x4p,y4p in control_points:<EOL><INDENT>p.append((<EOL>transform_from_local(x2p,y2p,cphi,sphi,mx,my) +<EOL>transform_from_local(x3p,y3p,cphi,sphi,mx,my) +<EOL>transform_from_local(x4p,y4p,cphi,sphi,mx,my)<EOL>))<EOL><DEDENT>return p<EOL>", "docstring": "An elliptical arc approximated with Bezier curves or a line segment.\nAlgorithm taken from the SVG 1.1 Implementation Notes:\nhttp://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes", "id": "f11567:m3"}
{"signature": "def alignment(self, d=<NUM_LIT:5>):", "body": "vx = vy = vz = <NUM_LIT:0><EOL>for b in self.boids:<EOL><INDENT>if b != self:<EOL><INDENT>vx, vy, vz = vx+b.vx, vy+b.vy, vz+b.vz<EOL><DEDENT><DEDENT>n = len(self.boids)-<NUM_LIT:1><EOL>vx, vy, vz = vx/n, vy/n, vz/n<EOL>return (vx-self.vx)/d, (vy-self.vy)/d, (vz-self.vz)/d<EOL>", "docstring": "Boids match velocity with other boids.", "id": "f11568:c0:m4"}
{"signature": "def separation(self, r=<NUM_LIT:10>):", "body": "vx = vy = vz = <NUM_LIT:0><EOL>for b in self.boids:<EOL><INDENT>if b != self:<EOL><INDENT>if abs(self.x-b.x) < r: vx += (self.x-b.x)<EOL>if abs(self.y-b.y) < r: vy += (self.y-b.y)<EOL>if abs(self.z-b.z) < r: vz += (self.z-b.z)<EOL><DEDENT><DEDENT>return vx, vy, vz<EOL>", "docstring": "Boids keep a small distance from other boids.\n\n        Ensures that boids don't collide into each other,\n        in a smoothly accelerated motion.", "id": "f11568:c0:m3"}
{"signature": "def constrain(self):", "body": "dx = self.w * <NUM_LIT:0.1><EOL>dy = self.h * <NUM_LIT:0.1> <EOL>for b in self:<EOL><INDENT>if b.x < self.x-dx: b.vx += _ctx.random(dx)<EOL>if b.y < self.y-dy: b.vy += _ctx.random(dy)<EOL>if b.x > self.x+self.w+dx: b.vx -= _ctx.random(dx)<EOL>if b.y > self.y+self.h+dy: b.vy -= _ctx.random(dy)<EOL>if b.z < <NUM_LIT:0>: b.vz += <NUM_LIT:10><EOL>if b.z > <NUM_LIT:100>: b.vz -= <NUM_LIT:10><EOL>if b.y > self._perch_y and _ctx.random() < self._perch:<EOL><INDENT>b.y = self._perch_y<EOL>b.vy = -abs(b.vy) * <NUM_LIT><EOL>b.is_perching = True<EOL>try:<EOL><INDENT>b._perch_t = self._perch_t()<EOL><DEDENT>except:<EOL><INDENT>b._perch_t = self._perch_t<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Cages the flock inside the x, y, w, h area.\n\n        The actual cage is a bit larger,\n        so boids don't seem to bounce of invisible walls\n        (they are rather \"encouraged\" to stay in the area).\n\n        If a boid touches the ground level,\n        it may decide to perch there for a while.", "id": "f11568:c1:m8"}
{"signature": "def _title(self):", "body": "return self.find(\"<STR_LIT:title>\").string<EOL>", "docstring": "Returns the page title.", "id": "f11569:c2:m2"}
{"signature": "def links(self, external=True):", "body": "domain = URLParser(self.url).domain<EOL>links = []<EOL>for a in self(\"<STR_LIT:a>\"):<EOL><INDENT>for attribute, value in a.attrs:<EOL><INDENT>if attribute == \"<STR_LIT>\":<EOL><INDENT>if not externalor (value.startswith(\"<STR_LIT>\") and value.find(\"<STR_LIT>\"+domain) < <NUM_LIT:0>):<EOL><INDENT>links.append(value)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return links<EOL>", "docstring": "Retrieves links in the page.\n\n        Returns a list of URL's.\n        By default, only external URL's are returned.\n        External URL's starts with http:// and point to another\n        domain than the domain the page is on.", "id": "f11569:c2:m5"}
{"signature": "def hash(self, id):", "body": "h = md5(id).hexdigest()<EOL>return os.path.join(self.path, h+self.type)<EOL>", "docstring": "Creates a unique filename in the cache for the id.", "id": "f11570:c0:m1"}
{"signature": "def age(self, id):", "body": "path = self.hash(id)<EOL>if os.path.exists(path):<EOL><INDENT>modified = datetime.datetime.fromtimestamp(os.stat(path)[<NUM_LIT:8>])<EOL>age = datetime.datetime.today() - modified<EOL>return age.days<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Returns the age of the cache entry, in days.", "id": "f11570:c0:m5"}
{"signature": "def search_images(q, start=<NUM_LIT:0>, size=\"<STR_LIT>\", wait=<NUM_LIT:10>, asynchronous=False, cached=False):", "body": "service = GOOGLE_IMAGES<EOL>return GoogleSearch(q, start, service, size, wait, asynchronous, cached)<EOL>", "docstring": "Returns a Google images query formatted as a GoogleSearch list object.", "id": "f11571:m5"}
{"signature": "def format_data(s):", "body": "return s.encode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "Gogole library returns Unicode strings.", "id": "f11571:m2"}
{"signature": "def search(q, start=<NUM_LIT:0>, wait=<NUM_LIT:10>, asynchronous=False, cached=False):", "body": "service = GOOGLE_SEARCH<EOL>return GoogleSearch(q, start, service, \"<STR_LIT>\", wait, asynchronous, cached)<EOL>", "docstring": "Returns a Google web query formatted as a GoogleSearch list object.", "id": "f11571:m4"}
{"signature": "def search_news(q, start=<NUM_LIT:0>, wait=<NUM_LIT:10>, asynchronous=False, cached=False):", "body": "service = GOOGLE_NEWS<EOL>return GoogleSearch(q, start, service, \"<STR_LIT>\", wait, asynchronous, cached)<EOL>", "docstring": "Returns a Google news query formatted as a GoogleSearch list object.", "id": "f11571:m6"}
{"signature": "def __cmp__(self, other):", "body": "if self.total > other.total:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>elif self.total < other.total: <EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Compares with another GoogleSearch based on the number of results.", "id": "f11571:c2:m2"}
{"signature": "def sort(words, context=\"<STR_LIT>\", strict=True, relative=True, service=GOOGLE_SEARCH,<EOL>wait=<NUM_LIT:10>, asynchronous=False, cached=False):", "body": "results = []<EOL>for word in words:<EOL><INDENT>q = word + \"<STR_LIT:U+0020>\" + context<EOL>q.strip()<EOL>if strict: q = \"<STR_LIT>\"+q+\"<STR_LIT>\"<EOL>r = GoogleSearch(q, <NUM_LIT:1>, service, \"<STR_LIT>\", wait, asynchronous, cached)<EOL>results.append(r)<EOL><DEDENT>results.sort(GoogleResults.__cmp__)<EOL>results.reverse()<EOL>if relative and len(results) > <NUM_LIT:0>:<EOL><INDENT>sum = <NUM_LIT><EOL>for r in results: sum += r.total<EOL>for r in results: r.total /= float(sum)<EOL><DEDENT>results = [(r.query, r.total) for r in results]<EOL>return results<EOL>", "docstring": "Performs a Google sort on the given list.\n\n    Sorts the items in the list according to \n    the result count Google yields on an item.\n\n    Setting a context sorts the items according\n    to their relation to this context;\n    for example sorting [red, green, blue] by \"love\"\n    yields red as the highest results,\n    likely because red is the color commonly associated with love.", "id": "f11571:m8"}
{"signature": "def search(q, start=<NUM_LIT:1>, count=<NUM_LIT:10>, context=None, wait=<NUM_LIT:10>, asynchronous=False, cached=False):", "body": "service = YAHOO_SEARCH<EOL>return YahooSearch(q, start, count, service, context, wait, asynchronous, cached)<EOL>", "docstring": "Returns a Yahoo web query formatted as a YahooSearch list object.", "id": "f11572:m3"}
{"signature": "def __init__(self, q, start=<NUM_LIT:1>, count=<NUM_LIT:10>, service=YAHOO_SEARCH, context=None, <EOL>wait=<NUM_LIT:10>, asynchronous=False, cached=True):", "body": "self.query = q<EOL>self.service = service<EOL>if cached:<EOL><INDENT>cache = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>cache = None<EOL><DEDENT>url = \"<STR_LIT>\"<EOL>if service == YAHOO_SEARCH and context == None : url += \"<STR_LIT>\"<EOL>if service == YAHOO_SEARCH and context != None : url += \"<STR_LIT>\"<EOL>if service == YAHOO_IMAGES   :  url += \"<STR_LIT>\"<EOL>if service == YAHOO_NEWS     :  url += \"<STR_LIT>\"<EOL>if service == YAHOO_SPELLING :  url += \"<STR_LIT>\"<EOL>arg = urllib.urlencode(((\"<STR_LIT>\", YAHOO_ID), <EOL>(\"<STR_LIT>\", q),<EOL>(\"<STR_LIT:start>\", start),<EOL>(\"<STR_LIT>\", count),<EOL>(\"<STR_LIT>\", unicode(context))))<EOL>url += arg<EOL>URLAccumulator.__init__(self, url, wait, asynchronous, cache, \"<STR_LIT>\")<EOL>", "docstring": "Searches Yahoo for the given query.\n\n        By default, return cached results whenever possible.\n        Otherwise, go online and update the local cache.\n        The number of results is limited to count and starts at the given index.\n\n        The returned results depend on the service used: \n        web pages, images, news, spelling suggestion or contextual links.", "id": "f11572:c4:m0"}
{"signature": "def encode_basestring(s):", "body": "def replace(match):<EOL><INDENT>return ESCAPE_DCT[match.group(<NUM_LIT:0>)]<EOL><DEDENT>return '<STR_LIT:\">' + ESCAPE.sub(replace, s) + '<STR_LIT:\">'<EOL>", "docstring": "Return a JSON representation of a Python string", "id": "f11589:m1"}
{"signature": "def encode(self, o):", "body": "<EOL>if isinstance(o, basestring):<EOL><INDENT>if isinstance(o, str):<EOL><INDENT>_encoding = self.encoding<EOL>if (_encoding is not None <EOL>and not (_encoding == '<STR_LIT:utf-8>')):<EOL><INDENT>o = o.decode(_encoding)<EOL><DEDENT><DEDENT>if self.ensure_ascii:<EOL><INDENT>return encode_basestring_ascii(o)<EOL><DEDENT>else:<EOL><INDENT>return encode_basestring(o)<EOL><DEDENT><DEDENT>chunks = list(self.iterencode(o))<EOL>return '<STR_LIT>'.join(chunks)<EOL>", "docstring": "Return a JSON string representation of a Python data structure.\n\n>>> JSONEncoder().encode({\"foo\": [\"bar\", \"baz\"]})\n'{\"foo\": [\"bar\", \"baz\"]}'", "id": "f11589:c0:m7"}
{"signature": "def default(self, o):", "body": "raise TypeError(\"<STR_LIT>\" % (o,))<EOL>", "docstring": "Implement this method in a subclass such that it returns\na serializable object for ``o``, or calls the base implementation\n(to raise a ``TypeError``).\n\nFor example, to support arbitrary iterators, you could\nimplement default like this::\n\n    def default(self, o):\n        try:\n            iterable = iter(o)\n        except TypeError:\n            pass\n        else:\n            return list(iterable)\n        return JSONEncoder.default(self, o)", "id": "f11589:c0:m6"}
{"signature": "def __init__(self, skipkeys=False, ensure_ascii=True,<EOL>check_circular=True, allow_nan=True, sort_keys=False,<EOL>indent=None, separators=None, encoding='<STR_LIT:utf-8>', default=None):", "body": "self.skipkeys = skipkeys<EOL>self.ensure_ascii = ensure_ascii<EOL>self.check_circular = check_circular<EOL>self.allow_nan = allow_nan<EOL>self.sort_keys = sort_keys<EOL>self.indent = indent<EOL>self.current_indent_level = <NUM_LIT:0><EOL>if separators is not None:<EOL><INDENT>self.item_separator, self.key_separator = separators<EOL><DEDENT>if default is not None:<EOL><INDENT>self.default = default<EOL><DEDENT>self.encoding = encoding<EOL>", "docstring": "Constructor for JSONEncoder, with sensible defaults.\n\nIf skipkeys is False, then it is a TypeError to attempt\nencoding of keys that are not str, int, long, float or None.  If\nskipkeys is True, such items are simply skipped.\n\nIf ensure_ascii is True, the output is guaranteed to be str\nobjects with all incoming unicode characters escaped.  If\nensure_ascii is false, the output will be unicode object.\n\nIf check_circular is True, then lists, dicts, and custom encoded\nobjects will be checked for circular references during encoding to\nprevent an infinite recursion (which would cause an OverflowError).\nOtherwise, no such check takes place.\n\nIf allow_nan is True, then NaN, Infinity, and -Infinity will be\nencoded as such.  This behavior is not JSON specification compliant,\nbut is consistent with most JavaScript based encoders and decoders.\nOtherwise, it will be a ValueError to encode such floats.\n\nIf sort_keys is True, then the output of dictionaries will be\nsorted by key; this is useful for regression tests to ensure\nthat JSON serializations can be compared on a day-to-day basis.\n\nIf indent is a non-negative integer, then JSON array\nelements and object members will be pretty-printed with that\nindent level.  An indent level of 0 will only insert newlines.\nNone is the most compact representation.\n\nIf specified, separators should be a (item_separator, key_separator)\ntuple.  The default is (', ', ': ').  To get the most compact JSON\nrepresentation you should specify (',', ':') to eliminate whitespace.\n\nIf specified, default is a function that gets called for objects\nthat can't otherwise be serialized.  It should return a JSON encodable\nversion of the object or raise a ``TypeError``.\n\nIf encoding is not None, then all input strings will be\ntransformed into unicode using that encoding prior to JSON-encoding.\nThe default is UTF-8.", "id": "f11589:c0:m0"}
{"signature": "def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,<EOL>allow_nan=True, cls=None, indent=None, separators=None,<EOL>encoding='<STR_LIT:utf-8>', default=None, **kw):", "body": "<EOL>if (skipkeys is False and ensure_ascii is True and<EOL>check_circular is True and allow_nan is True and<EOL>cls is None and indent is None and separators is None and<EOL>encoding == '<STR_LIT:utf-8>' and default is None and not kw):<EOL><INDENT>return _default_encoder.encode(obj)<EOL><DEDENT>if cls is None:<EOL><INDENT>cls = JSONEncoder<EOL><DEDENT>return cls(<EOL>skipkeys=skipkeys, ensure_ascii=ensure_ascii,<EOL>check_circular=check_circular, allow_nan=allow_nan, indent=indent,<EOL>separators=separators, encoding=encoding, default=default,<EOL>**kw).encode(obj)<EOL>", "docstring": "Serialize ``obj`` to a JSON formatted ``str``.\n\nIf ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types\n(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) \nwill be skipped instead of raising a ``TypeError``.\n\nIf ``ensure_ascii`` is ``False``, then the return value will be a\n``unicode`` instance subject to normal Python ``str`` to ``unicode``\ncoercion rules instead of being escaped to an ASCII ``str``.\n\nIf ``check_circular`` is ``False``, then the circular reference check\nfor container types will be skipped and a circular reference will\nresult in an ``OverflowError`` (or worse).\n\nIf ``allow_nan`` is ``False``, then it will be a ``ValueError`` to\nserialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in\nstrict compliance of the JSON specification, instead of using the\nJavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).\n\nIf ``indent`` is a non-negative integer, then JSON array elements and\nobject members will be pretty-printed with that indent level. An indent\nlevel of 0 will only insert newlines. ``None`` is the most compact\nrepresentation.\n\nIf ``separators`` is an ``(item_separator, dict_separator)`` tuple\nthen it will be used instead of the default ``(', ', ': ')`` separators.\n``(',', ':')`` is the most compact JSON representation.\n\n``encoding`` is the character encoding for str instances, default is UTF-8.\n\n``default(obj)`` is a function that should return a serializable version\nof obj or raise TypeError. The default simply raises TypeError.\n\nTo use a custom ``JSONEncoder`` subclass (e.g. one that overrides the\n``.default()`` method to serialize additional types), specify it with\nthe ``cls`` kwarg.", "id": "f11590:m1"}
{"signature": "def write(obj):", "body": "import warnings<EOL>warnings.warn(\"<STR_LIT>\",<EOL>DeprecationWarning)<EOL>return dumps(obj)<EOL>", "docstring": "jsonlib, JsonUtils, python-json, json-py API compatibility hook.\nUse dumps(s) instead.", "id": "f11590:m7"}
{"signature": "def decode(s):", "body": "import warnings<EOL>warnings.warn(\"<STR_LIT>\",<EOL>DeprecationWarning)<EOL>return loads(s)<EOL>", "docstring": "demjson, python-cjson API compatibility hook. Use loads(s) instead.", "id": "f11590:m4"}
{"signature": "def __init__(self, encoding=None, object_hook=None, parse_float=None,<EOL>parse_int=None, parse_constant=None, strict=True):", "body": "self.encoding = encoding<EOL>self.object_hook = object_hook<EOL>self.parse_float = parse_float<EOL>self.parse_int = parse_int<EOL>self.parse_constant = parse_constant<EOL>self.strict = strict<EOL>", "docstring": "``encoding`` determines the encoding used to interpret any ``str``\nobjects decoded by this instance (utf-8 by default).  It has no\neffect when decoding ``unicode`` objects.\n\nNote that currently only encodings that are a superset of ASCII work,\nstrings of other encodings should be passed in as ``unicode``.\n\n``object_hook``, if specified, will be called with the result\nof every JSON object decoded and its return value will be used in\nplace of the given ``dict``.  This can be used to provide custom\ndeserializations (e.g. to support JSON-RPC class hinting).\n\n``parse_float``, if specified, will be called with the string\nof every JSON float to be decoded. By default this is equivalent to\nfloat(num_str). This can be used to use another datatype or parser\nfor JSON floats (e.g. decimal.Decimal).\n\n``parse_int``, if specified, will be called with the string\nof every JSON int to be decoded. By default this is equivalent to\nint(num_str). This can be used to use another datatype or parser\nfor JSON integers (e.g. float).\n\n``parse_constant``, if specified, will be called with one of the\nfollowing strings: -Infinity, Infinity, NaN, null, true, false.\nThis can be used to raise an exception if invalid JSON numbers\nare encountered.", "id": "f11591:c0:m0"}
{"signature": "def iterscan(self, string, idx=<NUM_LIT:0>, context=None):", "body": "match = self.scanner.scanner(string, idx).match<EOL>actions = self.actions<EOL>lastend = idx<EOL>end = len(string)<EOL>while True:<EOL><INDENT>m = match()<EOL>if m is None:<EOL><INDENT>break<EOL><DEDENT>matchbegin, matchend = m.span()<EOL>if lastend == matchend:<EOL><INDENT>break<EOL><DEDENT>action = actions[m.lastindex]<EOL>if action is not None:<EOL><INDENT>rval, next_pos = action(m, context)<EOL>if next_pos is not None and next_pos != matchend:<EOL><INDENT>matchend = next_pos<EOL>match = self.scanner.scanner(string, matchend).match<EOL><DEDENT>yield rval, matchend<EOL><DEDENT>lastend = matchend<EOL><DEDENT>", "docstring": "Yield match, end_idx for each match", "id": "f11592:c0:m1"}
{"signature": "def parse_translations(self, markup):", "body": "global languages<EOL>translations = {}<EOL>m = re.findall(self.re[\"<STR_LIT>\"], markup)<EOL>for language, translation in m:<EOL><INDENT>if language in languages:<EOL><INDENT>translations[language] = translation<EOL><DEDENT><DEDENT>return translations<EOL>", "docstring": "Returns a dictionary of translations for the page title.\n\n        A Wikipedia language link looks like: [[af:Rekenaar]].\n        The parser will also fetch links like \"user:\" and \"media:\"\n        but these are stripped against the dictionary of\n        Wikipedia languages.\n\n        You can get a translated page by searching Wikipedia\n        with the appropriate language code and supplying\n        the translated title as query.", "id": "f11594:c8:m24"}
{"signature": "def draw_table(table, x, y, w, padding=<NUM_LIT:5>):", "body": "try: from web import _ctx<EOL>except: pass<EOL>f = _ctx.fill()<EOL>_ctx.stroke(f)<EOL>h = _ctx.textheight(\"<STR_LIT:U+0020>\") + padding*<NUM_LIT:2><EOL>row_y = y<EOL>if table.title != \"<STR_LIT>\":<EOL><INDENT>_ctx.fill(f)<EOL>_ctx.rect(x, row_y, w, h)<EOL>_ctx.fill(<NUM_LIT:1>)<EOL>_ctx.text(table.title, x+padding, row_y+_ctx.fontsize()+ padding)<EOL>row_y += h<EOL><DEDENT>rowspans = [<NUM_LIT:1> for i in range(<NUM_LIT:10>)]<EOL>previous_cell_w = <NUM_LIT:0><EOL>for row in table:<EOL><INDENT>cell_x = x<EOL>cell_w  = <NUM_LIT:1.0> * w<EOL>cell_w -= previous_cell_w * len([n for n in rowspans if n > <NUM_LIT:1>])<EOL>cell_w /= len(row)<EOL>cell_h = <NUM_LIT:0><EOL>for cell in row:<EOL><INDENT>this_h = _ctx.textheight(cell, width=cell_w-padding*<NUM_LIT:2>) + padding*<NUM_LIT:2><EOL>cell_h = max(cell_h, this_h)<EOL><DEDENT>i = <NUM_LIT:0><EOL>for cell in row:<EOL><INDENT>if rowspans[i] > <NUM_LIT:1>:<EOL><INDENT>rowspans[i] -= <NUM_LIT:1><EOL>cell_x += previous_cell_w<EOL>i += <NUM_LIT:1><EOL><DEDENT>m = re.search(\"<STR_LIT>\", cell.properties)<EOL>if m:<EOL><INDENT>rowspan = int(m.group(<NUM_LIT:1>))<EOL>rowspans[i] = rowspan<EOL><DEDENT>else:<EOL><INDENT>rowspan = <NUM_LIT:1><EOL><DEDENT>_ctx.fill(f)<EOL>_ctx.text(cell, cell_x+padding, row_y+_ctx.fontsize()+padding, cell_w-padding*<NUM_LIT:2>)<EOL>_ctx.line(cell_x, row_y, cell_x+cell_w, row_y)<EOL>if cell_x > x:<EOL><INDENT>_ctx.nofill()<EOL>_ctx.line(cell_x, row_y, cell_x, row_y+cell_h)<EOL><DEDENT>cell_x += cell_w<EOL>i += <NUM_LIT:1><EOL><DEDENT>row_y += cell_h<EOL>previous_cell_w = cell_w<EOL><DEDENT>_ctx.nofill()<EOL>_ctx.rect(x, y, w, row_y-y)<EOL>", "docstring": "This is a very poor algorithm to draw Wikipedia tables in NodeBox.", "id": "f11594:m7"}
{"signature": "def parse_paragraphs(self, markup):", "body": "<EOL>refs = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>exclude = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>exclude.extend(refs)<EOL>paragraphs = []<EOL>paragraph = WikipediaParagraph(self.title)<EOL>paragraph_data = \"<STR_LIT>\"<EOL>for chunk in markup.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>if not chunk.startswith(\"<STR_LIT:U+0020>\"):<EOL><INDENT>chunk = chunk.strip()<EOL><DEDENT>if chunk.startswith(\"<STR_LIT:=>\"):<EOL><INDENT>if paragraph.title.lower() in refsor (paragraph.parent and paragraph.parent.title.lower() in refs):<EOL><INDENT>self.parse_paragraph_references(paragraph_data)<EOL><DEDENT>paragraph.extend(self.parse_paragraph(paragraph_data))<EOL>paragraphs.append(paragraph)<EOL>title = chunk.strip().strip(\"<STR_LIT:=>\")<EOL>title = self.plain(title)<EOL>paragraph = WikipediaParagraph(title)<EOL>paragraph.depth = self.parse_paragraph_heading_depth(chunk)<EOL>if paragraph.title.lower() not in exclude:<EOL><INDENT>paragraph = self.connect_paragraph(paragraph, paragraphs)<EOL><DEDENT>paragraph_data = \"<STR_LIT>\"<EOL><DEDENT>elif re.search(re.compile(\"<STR_LIT>\", re.I), chunk):<EOL><INDENT>paragraph.main = [link.strip(\"<STR_LIT>\") for link in chunk.split(\"<STR_LIT:|>\")[<NUM_LIT:1>:]]<EOL>paragraph.main = [re.sub(re.compile(\"<STR_LIT>\", re.I), \"<STR_LIT>\", link) <EOL>for link in paragraph.main]<EOL><DEDENT>elif re.search(re.compile(\"<STR_LIT>\", re.I), chunk):<EOL><INDENT>paragraph.related = [link.strip(\"<STR_LIT>\") for link in chunk.split(\"<STR_LIT:|>\")[<NUM_LIT:1>:]]<EOL><DEDENT>else:<EOL><INDENT>paragraph_data += chunk +\"<STR_LIT:\\n>\"<EOL><DEDENT><DEDENT>if paragraph.title.lower() in refsor (paragraph.parent and paragraph.parent.title.lower() in refs):<EOL><INDENT>self.parse_paragraph_references(paragraph_data)<EOL><DEDENT>paragraph.extend(self.parse_paragraph(paragraph_data))<EOL>paragraphs.append(paragraph)<EOL>paragraphs_exclude = []<EOL>for paragraph in paragraphs:<EOL><INDENT>if paragraph.title.lower() not in excludeand not (paragraph.parent and paragraph.parent.title.lower() in exclude):<EOL><INDENT>paragraphs_exclude.append(paragraph)<EOL><DEDENT><DEDENT>if len(paragraphs_exclude) == <NUM_LIT:1> andlen(paragraphs_exclude[<NUM_LIT:0>]) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>return paragraphs_exclude<EOL>", "docstring": "Returns a list of paragraphs in the markup.\n\n        A paragraph has a title and multiple lines of plain text.\n        A paragraph might have parent and child paragraphs,\n        denoting subtitles or bigger chapters.\n\n        A paragraph might have links to additional articles.\n\n        Formats numbered lists by replacing # by 1.\n        Formats bulleted sublists like ** or *** with indentation.", "id": "f11594:c8:m18"}
{"signature": "def parse_important(self, markup):", "body": "important = []<EOL>table_titles = [table.title for table in self.tables]<EOL>m = re.findall(self.re[\"<STR_LIT>\"], markup)<EOL>for bold in m:<EOL><INDENT>bold = self.plain(bold)<EOL>if not bold in table_titles:<EOL><INDENT>important.append(bold.lower())<EOL><DEDENT><DEDENT>return important<EOL>", "docstring": "Returns a list of words that appear in bold in the article.\n\n        Things like table titles are not added to the list,\n        these are probably bold because it makes the layout nice,\n        not necessarily because they are important.", "id": "f11594:c8:m26"}
{"signature": "def __init__(self, title, markup, light=False, full_strip=True):", "body": "self.title = title<EOL>self.markup = markup<EOL>self.full_strip = full_strip<EOL>self.disambiguation = []<EOL>self.categories = []<EOL>self.links = []<EOL>self.paragraphs = []<EOL>self.images = []<EOL>self.tables = []<EOL>self.references = []<EOL>self.translations = {}<EOL>self.important = []<EOL>self.re = {<EOL>\"<STR_LIT>\" : r\"<STR_LIT>\",<EOL>\"<STR_LIT>\"       : r\"<STR_LIT>\",<EOL>\"<STR_LIT>\"           : r\"<STR_LIT>\",<EOL>\"<STR_LIT:image>\"          : re.compile(r\"<STR_LIT>\", re.I),<EOL>\"<STR_LIT>\"        : re.compile(\"<STR_LIT>\", re.DOTALL),<EOL>\"<STR_LIT>\"          : re.compile(r\"<STR_LIT>\", re.DOTALL),<EOL>\"<STR_LIT>\"     : re.compile(r\"<STR_LIT>\", re.DOTALL),<EOL>\"<STR_LIT>\"      : re.compile(r\"<STR_LIT>\", re.DOTALL),<EOL>\"<STR_LIT>\"       : re.compile(r\"<STR_LIT>\", re.DOTALL),<EOL>\"<STR_LIT:url>\"            : r\"<STR_LIT>\",            <EOL>\"<STR_LIT>\"   : re.compile(r\"<STR_LIT>\", re.DOTALL),<EOL>\"<STR_LIT>\"    : r\"<STR_LIT>\",<EOL>\"<STR_LIT>\"           : r\"<STR_LIT>\",<EOL>\"<STR_LIT>\"        : re.compile(r\"<STR_LIT>\", re.DOTALL),       <EOL>}<EOL>self.ref = \"<STR_LIT>\"<EOL>self.parse(light)<EOL>", "docstring": "Wikipedia page parser.\n\n        The expected markup is the stuff in Wikipedia's edit textarea.\n        With light=True, it will onlt parse links to other articles (which is faster).\n        With full_strip=False, it will preserve some HTML markup (links, bold, italic).", "id": "f11594:c8:m0"}
{"signature": "def is_preformatted(str):", "body": "for chunk in str.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>if  not chunk.startswith(\"<STR_LIT:U+0020>\"):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Determines if an item in a paragraph is preformatted.\n\n    If all of the lines in the markup start with a \" \"\n    this indicates preformatted text.\n    Preformatted is usually used for programming code.", "id": "f11594:m1"}
{"signature": "def is_list(str):", "body": "for chunk in str.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>chunk = chunk.replace(\"<STR_LIT:\\t>\", \"<STR_LIT>\")<EOL>if  not chunk.lstrip().startswith(\"<STR_LIT:*>\")and not re.search(r\"<STR_LIT>\", chunk.lstrip()):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Determines if an item in a paragraph is a list.\n\n    If all of the lines in the markup start with a \"*\" or \"1.\" \n    this indicates a list as parsed by parse_paragraphs().\n    It can be drawn with draw_list().", "id": "f11594:m2"}
{"signature": "def plain(self, markup):", "body": "<EOL>if self.full_strip:<EOL><INDENT>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)<EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)<EOL><DEDENT>markup = re.sub(self.re[\"<STR_LIT>\"], \"<STR_LIT>\", markup)<EOL>markup = re.sub(self.re[\"<STR_LIT>\"], \"<STR_LIT>\", markup)<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>if self.full_strip:<EOL><INDENT>markup = re.sub(r\"<STR_LIT>\", \"<STR_LIT>\", markup)<EOL><DEDENT>else:<EOL><INDENT>markup = re.sub(r\"<STR_LIT>\", '<STR_LIT>', markup)<EOL>markup = re.sub(r\"<STR_LIT>\", '<STR_LIT>', markup)    <EOL><DEDENT>markup = re.sub(self.re[\"<STR_LIT>\"], \"<STR_LIT>\", markup)<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)<EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)<EOL>markup = markup.replace(\"<STR_LIT:[>\", \"<STR_LIT>\")<EOL>markup = markup.replace(\"<STR_LIT:]>\", \"<STR_LIT>\")<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT:[>\")<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT:]>\")<EOL>markup = re.sub(self.re[\"<STR_LIT>\"], \"<STR_LIT>\", markup)                  <EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)                      <EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)           <EOL>markup = re.sub(self.ref+\"<STR_LIT>\", \"<STR_LIT>\", markup)        <EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)                 <EOL>markup = re.sub(self.re[\"<STR_LIT>\"], \"<STR_LIT>\", markup)                    <EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)            <EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)                   <EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", markup)         <EOL>markup = re.sub(re.compile(\"<STR_LIT>\", re.DOTALL), \"<STR_LIT>\", markup)  <EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>markup = re.sub(\"<STR_LIT>\", \"<STR_LIT:U+0020>\", markup)<EOL>markup = markup.split(\"<STR_LIT:\\n>\")<EOL>for i in range(len(markup)):<EOL><INDENT>if not markup[i].startswith(\"<STR_LIT:U+0020>\"):<EOL><INDENT>markup[i] = re.sub(r\"<STR_LIT>\", \"<STR_LIT:U+0020>\", markup[i])<EOL><DEDENT><DEDENT>markup = \"<STR_LIT:\\n>\".join(markup)<EOL>markup = markup.replace(\"<STR_LIT>\", \"<STR_LIT:.>\")<EOL>if self.full_strip:<EOL><INDENT>markup = strip_tags(markup, exclude=[\"<STR_LIT>\"], linebreaks=True)<EOL><DEDENT>markup = markup.strip()<EOL>return markup<EOL>", "docstring": "Strips Wikipedia markup from given text.\n\n        This creates a \"plain\" version of the markup,\n        stripping images and references and the like.\n        Does some commonsense maintenance as well,\n        like collapsing multiple spaces.\n        If you specified full_strip=False for WikipediaPage instance,\n        some markup is preserved as HTML (links, bold, italic).", "id": "f11594:c8:m5"}
{"signature": "def parse_tables(self, markup):", "body": "tables = []<EOL>m = re.findall(self.re[\"<STR_LIT>\"], markup)<EOL>for chunk in m:<EOL><INDENT>table = WikipediaTable()<EOL>table.properties = chunk.split(\"<STR_LIT:\\n>\")[<NUM_LIT:0>].strip(\"<STR_LIT>\").strip()<EOL>self.connect_table(table, chunk, markup)<EOL>row = None<EOL>for chunk in chunk.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>chunk = chunk.strip()<EOL>if chunk.startswith(\"<STR_LIT>\"):<EOL><INDENT>title = self.plain(chunk.strip(\"<STR_LIT>\"))<EOL>table.title = title<EOL><DEDENT>elif chunk.startswith(\"<STR_LIT>\"):<EOL><INDENT>if row: <EOL><INDENT>row.properties = chunk.strip(\"<STR_LIT>\").strip()<EOL>table.append(row)<EOL><DEDENT>row = None<EOL><DEDENT>elif chunk.startswith(\"<STR_LIT>\"):<EOL><INDENT>pass<EOL><DEDENT>elif chunk.startswith(\"<STR_LIT:|>\")or chunk.startswith(\"<STR_LIT:!>\"):<EOL><INDENT>row = self.parse_table_row(chunk, row)<EOL><DEDENT><DEDENT>if row: table.append(row)<EOL>if len(table) > <NUM_LIT:0>:<EOL><INDENT>tables.append(table)<EOL><DEDENT><DEDENT>return tables<EOL>", "docstring": "Returns a list of tables in the markup.\n\n        A Wikipedia table looks like:\n        {| border=\"1\"\n        |-\n        |Cell 1 (no modifier - not aligned)\n        |-\n        |align=\"right\" |Cell 2 (right aligned)\n        |-\n        |}", "id": "f11594:c8:m21"}
{"signature": "def parse_disambiguation(self, markup):", "body": "m = re.search(self.re[\"<STR_LIT>\"], markup)<EOL>if m:<EOL><INDENT>return self.parse_links(m.group(<NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>return []<EOL><DEDENT>", "docstring": "Gets the Wikipedia disambiguation page for this article.\n\n        A Wikipedia disambiguation link refers to other pages\n        with the same title but of smaller significance,\n        e.g. {{dablink|For the IEEE magazine see [[Computer (magazine)]].}}", "id": "f11594:c8:m25"}
{"signature": "def parse_paragraph_heading_depth(self, markup):", "body": "return markup.count(\"<STR_LIT:=>\")/<NUM_LIT:2> - <NUM_LIT:1><EOL>", "docstring": "Returns the depth of a heading.\n\n        The depth determines parent and child relations,\n        which headings (and hence which paragraphs) are a child to a heading higher up.\n        Returns 0 for <h1> =, 1 for <h2> ==, etc.\n\n        Called from parse_paragraphs() method.", "id": "f11594:c8:m15"}
{"signature": "def parse_categories(self, markup):", "body": "categories = []<EOL>m = re.findall(self.re[\"<STR_LIT>\"], markup)<EOL>for category in m:<EOL><INDENT>category = category.split(\"<STR_LIT:|>\")<EOL>page = category[<NUM_LIT:0>].strip()<EOL>display = u\"<STR_LIT>\"<EOL>if len(category) > <NUM_LIT:1>: <EOL><INDENT>display = category[<NUM_LIT:1>].strip()<EOL><DEDENT>if not page in categories:<EOL><INDENT>categories.append(page)<EOL><DEDENT><DEDENT>return categories<EOL>", "docstring": "Returns a list of categories the page belongs to.\n\n        # A Wikipedia category link looks like:\n        # [[Category:Computing]]\n        # This indicates the page is included in the given category.\n        # If \"Category\" is preceded by \":\" this indicates a link to a category.", "id": "f11594:c8:m23"}
{"signature": "def connect_table(self, table, chunk, markup):", "body": "k = markup.find(chunk)<EOL>i = markup.rfind(\"<STR_LIT>\", <NUM_LIT:0>, k)<EOL>j = markup.find(\"<STR_LIT:\\n>\", i+<NUM_LIT:1>)<EOL>paragraph_title = markup[i:j].strip().strip(\"<STR_LIT>\")<EOL>for paragraph in self.paragraphs:<EOL><INDENT>if paragraph.title == paragraph_title:<EOL><INDENT>paragraph.tables.append(table)<EOL>table.paragraph = paragraph<EOL><DEDENT><DEDENT>", "docstring": "Creates a link from the table to paragraph and vice versa.\n\n        Finds the first heading above the table in the markup.\n        This is the title of the paragraph the table belongs to.", "id": "f11594:c8:m20"}
{"signature": "def convert_table(self, markup):", "body": "for table in re.findall(self.re[\"<STR_LIT>\"], markup):<EOL><INDENT>wiki = table<EOL>wiki = re.sub(r\"<STR_LIT>\", \"<STR_LIT>\", wiki)<EOL>wiki = re.sub(r\"<STR_LIT>\", \"<STR_LIT>\", wiki)<EOL>wiki = re.sub(r\"<STR_LIT>\", \"<STR_LIT>\", wiki)<EOL>wiki = wiki.replace(\"<STR_LIT>\", \"<STR_LIT:\\n>\")<EOL>wiki = wiki.replace(\"<STR_LIT>\", \"<STR_LIT:\\n>\")<EOL>wiki = wiki.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>markup = markup.replace(table, wiki)<EOL><DEDENT>return markup<EOL>", "docstring": "Subtitutes <table> content to Wikipedia markup.", "id": "f11594:c8:m8"}
{"signature": "def findAllNext(self, name=None, attrs={}, text=None, limit=None,<EOL>**kwargs):", "body": "return self._findAll(name, attrs, text, limit, self.nextGenerator,<EOL>**kwargs)<EOL>", "docstring": "Returns all items that match the given criteria and appear\n        after this Tag in the document.", "id": "f11596:c0:m7"}
{"signature": "def findPrevious(self, name=None, attrs={}, text=None, **kwargs):", "body": "return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)<EOL>", "docstring": "Returns the first item that matches the given criteria and\n        appears before this Tag in the document.", "id": "f11596:c0:m10"}
{"signature": "def findPreviousSiblings(self, name=None, attrs={}, text=None,<EOL>limit=None, **kwargs):", "body": "return self._findAll(name, attrs, text, limit,<EOL>self.previousSiblingGenerator, **kwargs)<EOL>", "docstring": "Returns the siblings of this Tag that match the given\n        criteria and appear before this Tag in the document.", "id": "f11596:c0:m13"}
{"signature": "def start_meta(self, attrs):", "body": "httpEquiv = None<EOL>contentType = None<EOL>contentTypeIndex = None<EOL>tagNeedsEncodingSubstitution = False<EOL>for i in range(<NUM_LIT:0>, len(attrs)):<EOL><INDENT>key, value = attrs[i]<EOL>key = key.lower()<EOL>if key == '<STR_LIT>':<EOL><INDENT>httpEquiv = value<EOL><DEDENT>elif key == '<STR_LIT:content>':<EOL><INDENT>contentType = value<EOL>contentTypeIndex = i<EOL><DEDENT><DEDENT>if httpEquiv and contentType: <EOL><INDENT>match = self.CHARSET_RE.search(contentType)<EOL>if match:<EOL><INDENT>if (self.declaredHTMLEncoding is not None or<EOL>self.originalEncoding == self.fromEncoding):<EOL><INDENT>def rewrite(match):<EOL><INDENT>return match.group(<NUM_LIT:1>) + \"<STR_LIT>\"<EOL><DEDENT>newAttr = self.CHARSET_RE.sub(rewrite, contentType)<EOL>attrs[contentTypeIndex] = (attrs[contentTypeIndex][<NUM_LIT:0>],<EOL>newAttr)<EOL>tagNeedsEncodingSubstitution = True<EOL><DEDENT>else:<EOL><INDENT>newCharset = match.group(<NUM_LIT:3>)<EOL>if newCharset and newCharset != self.originalEncoding:<EOL><INDENT>self.declaredHTMLEncoding = newCharset<EOL>self._feed(self.declaredHTMLEncoding)<EOL>raise StopParsing<EOL><DEDENT>pass<EOL><DEDENT><DEDENT><DEDENT>tag = self.unknown_starttag(\"<STR_LIT>\", attrs)<EOL>if tag and tagNeedsEncodingSubstitution:<EOL><INDENT>tag.containsSubstitutions = True<EOL><DEDENT>", "docstring": "Beautiful Soup can detect a charset included in a META tag,\n        try to convert the document to that charset, and re-parse the\n        document from the beginning.", "id": "f11596:c10:m1"}
{"signature": "def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,<EOL>prettyPrint=False, indentLevel=<NUM_LIT:0>):", "body": "encodedName = self.toEncoding(self.name, encoding)<EOL>attrs = []<EOL>if self.attrs:<EOL><INDENT>for key, val in self.attrs:<EOL><INDENT>fmt = '<STR_LIT>'<EOL>if isString(val):<EOL><INDENT>if self.containsSubstitutions and '<STR_LIT>' in val:<EOL><INDENT>val = self.substituteEncoding(val, encoding)<EOL><DEDENT>if '<STR_LIT:\">' in val:<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL>if \"<STR_LIT:'>\" in val:<EOL><INDENT>val = val.replace(\"<STR_LIT:'>\", \"<STR_LIT>\")<EOL><DEDENT><DEDENT>val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)<EOL><DEDENT>attrs.append(fmt % (self.toEncoding(key, encoding),<EOL>self.toEncoding(val, encoding)))<EOL><DEDENT><DEDENT>close = '<STR_LIT>'<EOL>closeTag = '<STR_LIT>'<EOL>if self.isSelfClosing:<EOL><INDENT>close = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>closeTag = '<STR_LIT>' % encodedName<EOL><DEDENT>indentTag, indentContents = <NUM_LIT:0>, <NUM_LIT:0><EOL>if prettyPrint:<EOL><INDENT>indentTag = indentLevel<EOL>space = ('<STR_LIT:U+0020>' * (indentTag-<NUM_LIT:1>))<EOL>indentContents = indentTag + <NUM_LIT:1><EOL><DEDENT>contents = self.renderContents(encoding, prettyPrint, indentContents)<EOL>if self.hidden:<EOL><INDENT>s = contents<EOL><DEDENT>else:<EOL><INDENT>s = []<EOL>attributeString = '<STR_LIT>'<EOL>if attrs:<EOL><INDENT>attributeString = '<STR_LIT:U+0020>' + '<STR_LIT:U+0020>'.join(attrs)<EOL><DEDENT>if prettyPrint:<EOL><INDENT>s.append(space)<EOL><DEDENT>s.append('<STR_LIT>' % (encodedName, attributeString, close))<EOL>if prettyPrint:<EOL><INDENT>s.append(\"<STR_LIT:\\n>\")<EOL><DEDENT>s.append(contents)<EOL>if prettyPrint and contents and contents[-<NUM_LIT:1>] != \"<STR_LIT:\\n>\":<EOL><INDENT>s.append(\"<STR_LIT:\\n>\")<EOL><DEDENT>if prettyPrint and closeTag:<EOL><INDENT>s.append(space)<EOL><DEDENT>s.append(closeTag)<EOL>if prettyPrint and closeTag and self.nextSibling:<EOL><INDENT>s.append(\"<STR_LIT:\\n>\")<EOL><DEDENT>s = '<STR_LIT>'.join(s)<EOL><DEDENT>return s<EOL>", "docstring": "Returns a string or Unicode representation of this tag and\n        its contents. To get Unicode, pass None for encoding.\n\n        NOTE: since Python's HTML parser consumes whitespace, this\n        method is not certain to reproduce the whitespace present in\n        the original string.", "id": "f11596:c6:m19"}
{"signature": "def convert_charref(self, name):", "body": "try:<EOL><INDENT>n = int(name)<EOL><DEDENT>except ValueError:<EOL><INDENT>return<EOL><DEDENT>if not <NUM_LIT:0> <= n <= <NUM_LIT> : <EOL><INDENT>return<EOL><DEDENT>return self.convert_codepoint(n)<EOL>", "docstring": "This method fixes a bug in Python's SGMLParser.", "id": "f11596:c9:m1"}
{"signature": "def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,<EOL>**kwargs):", "body": "return self._findAll(name, attrs, text, limit, self.previousGenerator,<EOL>**kwargs)<EOL>", "docstring": "Returns all items that match the given criteria and appear\n        before this Tag in the document.", "id": "f11596:c0:m11"}
{"signature": "def __init__(self, parser, name, attrs=None, parent=None,<EOL>previous=None):", "body": "<EOL>self.parserClass = parser.__class__<EOL>self.isSelfClosing = parser.isSelfClosingTag(name)<EOL>self.name = name<EOL>if attrs == None:<EOL><INDENT>attrs = []<EOL><DEDENT>self.attrs = attrs<EOL>self.contents = []<EOL>self.setup(parent, previous)<EOL>self.hidden = False<EOL>self.containsSubstitutions = False<EOL>self.convertHTMLEntities = parser.convertHTMLEntities<EOL>self.convertXMLEntities = parser.convertXMLEntities<EOL>self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities<EOL>convert = lambda k_val: (k_val[<NUM_LIT:0>],<EOL>re.sub(\"<STR_LIT>\",<EOL>self._convertEntities,<EOL>k_val[<NUM_LIT:1>]))<EOL>self.attrs = list(map(convert, self.attrs))<EOL>", "docstring": "Basic constructor.", "id": "f11596:c6:m2"}
{"signature": "def parse_declaration(self, i):", "body": "j = None<EOL>if self.rawdata[i:i+<NUM_LIT:9>] == '<STR_LIT>':<EOL><INDENT>k = self.rawdata.find('<STR_LIT>', i)<EOL>if k == -<NUM_LIT:1>:<EOL><INDENT>k = len(self.rawdata)<EOL><DEDENT>data = self.rawdata[i+<NUM_LIT:9>:k]<EOL>j = k+<NUM_LIT:3><EOL>self._toStringSubclass(data, CData)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>j = SGMLParser.parse_declaration(self, i)<EOL><DEDENT>except SGMLParseError:<EOL><INDENT>toHandle = self.rawdata[i:]<EOL>self.handle_data(toHandle)<EOL>j = i + len(toHandle)<EOL><DEDENT><DEDENT>return j<EOL>", "docstring": "Treat a bogus SGML declaration as raw data. Treat a CDATA\n        declaration as a CData object.", "id": "f11596:c9:m20"}
{"signature": "def _getAttrMap(self):", "body": "if not getattr(self, '<STR_LIT>'):<EOL><INDENT>self.attrMap = {}<EOL>for (key, value) in self.attrs:<EOL><INDENT>self.attrMap[key] = value<EOL><DEDENT><DEDENT>return self.attrMap<EOL>", "docstring": "Initializes a map representation of this tag's attributes,\n        if not already initialized.", "id": "f11596:c6:m27"}
{"signature": "def __getitem__(self, key):", "body": "return self._getAttrMap()[key]<EOL>", "docstring": "tag[key] returns the value of the 'key' attribute for the tag,\n        and throws an exception if it's not there.", "id": "f11596:c6:m5"}
{"signature": "def handle_decl(self, data):", "body": "self._toStringSubclass(data, Declaration)<EOL>", "docstring": "Handle DOCTYPEs and the like as Declaration objects.", "id": "f11596:c9:m19"}
{"signature": "def _popToTag(self, name, inclusivePop=True):", "body": "<EOL>if name == self.ROOT_TAG_NAME:<EOL><INDENT>return<EOL><DEDENT>numPops = <NUM_LIT:0><EOL>mostRecentTag = None<EOL>for i in range(len(self.tagStack)-<NUM_LIT:1>, <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>if name == self.tagStack[i].name:<EOL><INDENT>numPops = len(self.tagStack)-i<EOL>break<EOL><DEDENT><DEDENT>if not inclusivePop:<EOL><INDENT>numPops = numPops - <NUM_LIT:1><EOL><DEDENT>for i in range(<NUM_LIT:0>, numPops):<EOL><INDENT>mostRecentTag = self.popTag()<EOL><DEDENT>return mostRecentTag<EOL>", "docstring": "Pops the tag stack up to and including the most recent\n        instance of the given tag. If inclusivePop is false, pops the tag\n        stack up to but *not* including the most recent instqance of\n        the given tag.", "id": "f11596:c9:m9"}
{"signature": "def _smartPop(self, name):", "body": "nestingResetTriggers = self.NESTABLE_TAGS.get(name)<EOL>isNestable = nestingResetTriggers != None<EOL>isResetNesting = name in self.RESET_NESTING_TAGS<EOL>popTo = None<EOL>inclusive = True<EOL>for i in range(len(self.tagStack)-<NUM_LIT:1>, <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>p = self.tagStack[i]<EOL>if (not p or p.name == name) and not isNestable:<EOL><INDENT>popTo = name<EOL>break<EOL><DEDENT>if (nestingResetTriggers != None<EOL>and p.name in nestingResetTriggers)or (nestingResetTriggers == None and isResetNesting<EOL>and p.name in self.RESET_NESTING_TAGS):<EOL><INDENT>popTo = p.name<EOL>inclusive = False<EOL>break<EOL><DEDENT>p = p.parent<EOL><DEDENT>if popTo:<EOL><INDENT>self._popToTag(popTo, inclusive)<EOL><DEDENT>", "docstring": "We need to pop up to the previous tag of this type, unless\n        one of this tag's nesting reset triggers comes between this\n        tag and the previous tag of this type, OR unless this tag is a\n        generic nesting trigger and another generic nesting trigger\n        comes between this tag and the previous tag of this type.\n\n        Examples:\n         <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.\n         <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.\n         <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.\n\n         <li><ul><li> *<li>* should pop to 'ul', not the first 'li'.\n         <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'\n         <td><tr><td> *<td>* should pop to 'tr', not the first 'td'", "id": "f11596:c9:m10"}
{"signature": "def toEncoding(self, s, encoding=None):", "body": "if isinstance(s, str):<EOL><INDENT>if encoding:<EOL><INDENT>s = s.encode(encoding)<EOL><DEDENT><DEDENT>elif isinstance(s, str):<EOL><INDENT>if encoding:<EOL><INDENT>s = s.encode(encoding)<EOL><DEDENT>else:<EOL><INDENT>s = str(s)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if encoding:<EOL><INDENT>s  = self.toEncoding(str(s), encoding)<EOL><DEDENT>else:<EOL><INDENT>s = str(s)<EOL><DEDENT><DEDENT>return s<EOL>", "docstring": "Encodes an object to a string in some encoding, or to Unicode.\n        .", "id": "f11596:c0:m24"}
{"signature": "def isList(l):", "body": "return hasattr(l, '<STR_LIT>')or (type(l) in (list, tuple))<EOL>", "docstring": "Convenience method that works with all 2.x versions of Python\n    to determine whether or not something is listlike.", "id": "f11596:m0"}
{"signature": "def handle_charref(self, ref):", "body": "if self.convertEntities:<EOL><INDENT>data = chr(int(ref))<EOL><DEDENT>else:<EOL><INDENT>data = '<STR_LIT>' % ref<EOL><DEDENT>self.handle_data(data)<EOL>", "docstring": "Handle character references as data.", "id": "f11596:c9:m17"}
{"signature": "def _detectEncoding(self, xml_data, isHTML=False):", "body": "xml_encoding = sniffed_xml_encoding = None<EOL>try:<EOL><INDENT>if xml_data[:<NUM_LIT:4>] == '<STR_LIT>':<EOL><INDENT>xml_data = self._ebcdic_to_ascii(xml_data)<EOL><DEDENT>elif xml_data[:<NUM_LIT:4>] == '<STR_LIT>':<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT>'<EOL>xml_data = str(xml_data, '<STR_LIT>').encode('<STR_LIT:utf-8>')<EOL><DEDENT>elif (len(xml_data) >= <NUM_LIT:4>) and (xml_data[:<NUM_LIT:2>] == '<STR_LIT>')and (xml_data[<NUM_LIT:2>:<NUM_LIT:4>] != '<STR_LIT>'):<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT>'<EOL>xml_data = str(xml_data[<NUM_LIT:2>:], '<STR_LIT>').encode('<STR_LIT:utf-8>')<EOL><DEDENT>elif xml_data[:<NUM_LIT:4>] == '<STR_LIT>':<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT>'<EOL>xml_data = str(xml_data, '<STR_LIT>').encode('<STR_LIT:utf-8>')<EOL><DEDENT>elif (len(xml_data) >= <NUM_LIT:4>) and (xml_data[:<NUM_LIT:2>] == '<STR_LIT>') and(xml_data[<NUM_LIT:2>:<NUM_LIT:4>] != '<STR_LIT>'):<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT>'<EOL>xml_data = str(xml_data[<NUM_LIT:2>:], '<STR_LIT>').encode('<STR_LIT:utf-8>')<EOL><DEDENT>elif xml_data[:<NUM_LIT:4>] == '<STR_LIT>':<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT>'<EOL>xml_data = str(xml_data, '<STR_LIT>').encode('<STR_LIT:utf-8>')<EOL><DEDENT>elif xml_data[:<NUM_LIT:4>] == '<STR_LIT>':<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT>'<EOL>xml_data = str(xml_data, '<STR_LIT>').encode('<STR_LIT:utf-8>')<EOL><DEDENT>elif xml_data[:<NUM_LIT:4>] == '<STR_LIT>':<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT>'<EOL>xml_data = str(xml_data[<NUM_LIT:4>:], '<STR_LIT>').encode('<STR_LIT:utf-8>')<EOL><DEDENT>elif xml_data[:<NUM_LIT:4>] == '<STR_LIT>':<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT>'<EOL>xml_data = str(xml_data[<NUM_LIT:4>:], '<STR_LIT>').encode('<STR_LIT:utf-8>')<EOL><DEDENT>elif xml_data[:<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT:utf-8>'<EOL>xml_data = str(xml_data[<NUM_LIT:3>:], '<STR_LIT:utf-8>').encode('<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>sniffed_xml_encoding = '<STR_LIT:ascii>'<EOL>pass<EOL><DEDENT><DEDENT>except:<EOL><INDENT>xml_encoding_match = None<EOL><DEDENT>xml_encoding_match = re.compile(<EOL>'<STR_LIT>').match(xml_data)<EOL>if not xml_encoding_match and isHTML:<EOL><INDENT>regexp = re.compile('<STR_LIT>', re.I)<EOL>xml_encoding_match = regexp.search(xml_data)<EOL><DEDENT>if xml_encoding_match is not None:<EOL><INDENT>xml_encoding = xml_encoding_match.groups()[<NUM_LIT:0>].lower()<EOL>if isHTML:<EOL><INDENT>self.declaredHTMLEncoding = xml_encoding<EOL><DEDENT>if sniffed_xml_encoding and(xml_encoding in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>')):<EOL><INDENT>xml_encoding = sniffed_xml_encoding<EOL><DEDENT><DEDENT>return xml_data, xml_encoding, sniffed_xml_encoding<EOL>", "docstring": "Given a document, tries to detect its XML encoding.", "id": "f11596:c20:m4"}
{"signature": "def __getattr__(self, attr):", "body": "if attr == '<STR_LIT:string>':<EOL><INDENT>return self<EOL><DEDENT>else:<EOL><INDENT>raise AttributeError(\"<STR_LIT>\" % (self.__class__.__name__, attr))<EOL><DEDENT>", "docstring": "text.string gives you text. This is for backwards\n        compatibility for Navigable*String, but for CData* it lets you\n        get the string without the CData wrapper.", "id": "f11596:c1:m2"}
{"signature": "def __iter__(self):", "body": "return iter(self.contents)<EOL>", "docstring": "Iterating over a tag iterates over its contents.", "id": "f11596:c6:m6"}
{"signature": "def findNext(self, name=None, attrs={}, text=None, **kwargs):", "body": "return self._findOne(self.findAllNext, name, attrs, text, **kwargs)<EOL>", "docstring": "Returns the first item that matches the given criteria and\n        appears after this Tag in the document.", "id": "f11596:c0:m6"}
{"signature": "def __getattr__(self, methodName):", "body": "<EOL>if methodName.find('<STR_LIT>') == <NUM_LIT:0> or methodName.find('<STR_LIT>') == <NUM_LIT:0>or methodName.find('<STR_LIT>') == <NUM_LIT:0>:<EOL><INDENT>return SGMLParser.__getattr__(self, methodName)<EOL><DEDENT>elif methodName.find('<STR_LIT>') != <NUM_LIT:0>:<EOL><INDENT>return Tag.__getattr__(self, methodName)<EOL><DEDENT>else:<EOL><INDENT>raise AttributeError<EOL><DEDENT>", "docstring": "This method routes method call requests to either the SGMLParser\n        superclass or the Tag superclass, depending on the method name.", "id": "f11596:c9:m3"}
{"signature": "def _convertEntities(self, match):", "body": "x = match.group(<NUM_LIT:1>)<EOL>if self.convertHTMLEntities and x in name2codepoint:<EOL><INDENT>return chr(name2codepoint[x])<EOL><DEDENT>elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:<EOL><INDENT>if self.convertXMLEntities:<EOL><INDENT>return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>' % x<EOL><DEDENT><DEDENT>elif len(x) > <NUM_LIT:0> and x[<NUM_LIT:0>] == '<STR_LIT:#>':<EOL><INDENT>if len(x) > <NUM_LIT:1> and x[<NUM_LIT:1>] == '<STR_LIT:x>':<EOL><INDENT>return chr(int(x[<NUM_LIT:2>:], <NUM_LIT:16>))<EOL><DEDENT>else:<EOL><INDENT>return chr(int(x[<NUM_LIT:1>:]))<EOL><DEDENT><DEDENT>elif self.escapeUnrecognizedEntities:<EOL><INDENT>return '<STR_LIT>' % x<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>' % x<EOL><DEDENT>", "docstring": "Used in a call to re.sub to replace HTML, XML, and numeric\n        entities with the appropriate Unicode characters. If HTML\n        entities are being converted, any unrecognized entities are\n        escaped.", "id": "f11596:c6:m1"}
{"signature": "def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,<EOL>**kwargs):", "body": "return self._findAll(name, attrs, text, limit,<EOL>self.nextSiblingGenerator, **kwargs)<EOL>", "docstring": "Returns the siblings of this Tag that match the given\n        criteria and appear after this Tag in the document.", "id": "f11596:c0:m9"}
{"signature": "def get(self, key, default=None):", "body": "return self._getAttrMap().get(key, default)<EOL>", "docstring": "Returns the value of the 'key' attribute for the tag, or\n        the value given for 'default' if it doesn't have that\n        attribute.", "id": "f11596:c6:m3"}
{"signature": "def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,<EOL>prettyPrint=False, indentLevel=<NUM_LIT:0>):", "body": "s=[]<EOL>for c in self:<EOL><INDENT>text = None<EOL>if isinstance(c, NavigableString):<EOL><INDENT>text = c.__str__(encoding)<EOL><DEDENT>elif isinstance(c, Tag):<EOL><INDENT>s.append(c.__str__(encoding, prettyPrint, indentLevel))<EOL><DEDENT>if text and prettyPrint:<EOL><INDENT>text = text.strip()<EOL><DEDENT>if text:<EOL><INDENT>if prettyPrint:<EOL><INDENT>s.append(\"<STR_LIT:U+0020>\" * (indentLevel-<NUM_LIT:1>))<EOL><DEDENT>s.append(text)<EOL>if prettyPrint:<EOL><INDENT>s.append(\"<STR_LIT:\\n>\")<EOL><DEDENT><DEDENT><DEDENT>return '<STR_LIT>'.join(s)<EOL>", "docstring": "Renders the contents of this tag as a string in the given\n        encoding. If encoding is None, returns a Unicode string..", "id": "f11596:c6:m22"}
{"signature": "def __eq__(self, other):", "body": "if not hasattr(other, '<STR_LIT:name>') or not hasattr(other, '<STR_LIT>') or not hasattr(other, '<STR_LIT>') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):<EOL><INDENT>return False<EOL><DEDENT>for i in range(<NUM_LIT:0>, len(self.contents)):<EOL><INDENT>if self.contents[i] != other.contents[i]:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Returns true iff this tag has the same name, the same attributes,\n        and the same contents (recursively) as the given tag.\n\n        NOTE: right now this will return false if two tags have the\n        same attributes in a different order. Should this be fixed?", "id": "f11596:c6:m14"}
{"signature": "def append(self, tag):", "body": "self.insert(len(self.contents), tag)<EOL>", "docstring": "Appends the given tag to the contents of this tag.", "id": "f11596:c0:m5"}
{"signature": "def buildTagMap(default, *args):", "body": "built = {}<EOL>for portion in args:<EOL><INDENT>if hasattr(portion, '<STR_LIT>'):<EOL><INDENT>for k,v in list(portion.items()):<EOL><INDENT>built[k] = v<EOL><DEDENT><DEDENT>elif isList(portion):<EOL><INDENT>for k in portion:<EOL><INDENT>built[k] = default<EOL><DEDENT><DEDENT>else:<EOL><INDENT>built[portion] = default<EOL><DEDENT><DEDENT>return built<EOL>", "docstring": "Turns a list of maps, lists, or scalars into a single map.\n    Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and\n    NESTING_RESET_TAGS maps out of lists and partial maps.", "id": "f11596:m2"}
{"signature": "def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):", "body": "return self._findOne(self.findNextSiblings, name, attrs, text,<EOL>**kwargs)<EOL>", "docstring": "Returns the closest sibling to this Tag that matches the\n        given criteria and appears after this Tag in the document.", "id": "f11596:c0:m8"}
{"signature": "def replace_entities(ustring, placeholder=\"<STR_LIT:U+0020>\"):", "body": "def _repl_func(match):<EOL><INDENT>try:<EOL><INDENT>if match.group(<NUM_LIT:1>): <EOL><INDENT>return unichr( int(match.group(<NUM_LIT:2>)) ) <EOL><DEDENT>else:<EOL><INDENT>try: return cp1252[ unichr(int(match.group(<NUM_LIT:3>))) ].strip()<EOL>except: return unichr( name2codepoint[match.group(<NUM_LIT:3>)] )<EOL><DEDENT><DEDENT>except:<EOL><INDENT>return placeholder<EOL><DEDENT><DEDENT>if not isinstance(ustring, unicode):<EOL><INDENT>ustring = UnicodeDammit(ustring).unicode<EOL><DEDENT>ustring = ustring.replace(\"<STR_LIT>\", \"<STR_LIT:U+0020>\")<EOL>_entity_re = re.compile(r'<STR_LIT>') <EOL>return _entity_re.sub(_repl_func, ustring)<EOL>", "docstring": "Replaces HTML special characters by readable characters.\n\n    As taken from Leif K-Brooks algorithm on:\n    http://groups-beta.google.com/group/comp.lang.python", "id": "f11598:m1"}
{"signature": "def _darkest(self):", "body": "rgb, n = (<NUM_LIT:1.0>, <NUM_LIT:1.0>, <NUM_LIT:1.0>), <NUM_LIT><EOL>for r,g,b in self:<EOL><INDENT>if r+g+b < n:<EOL><INDENT>rgb, n = (r,g,b), r+g+b<EOL><DEDENT><DEDENT>return rgb<EOL>", "docstring": "Returns the darkest swatch.\n\n        Knowing the contract between a light and a dark swatch\n        can help us decide how to display readable typography.", "id": "f11600:c0:m1"}
{"signature": "def __init__(self, q, page=<NUM_LIT:0>, wait=<NUM_LIT:10>, asynchronous=False, cached=True):", "body": "if cached: <EOL><INDENT>cache = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>cache = None<EOL><DEDENT>url  = \"<STR_LIT>\"<EOL>if isinstance(q, int):<EOL><INDENT>url += \"<STR_LIT>\" + str(q)  <EOL><DEDENT>elif q in [\"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>url += \"<STR_LIT>\" + q<EOL><DEDENT>else:<EOL><INDENT>url += \"<STR_LIT>\" + quote(q)<EOL><DEDENT>if q == \"<STR_LIT>\":<EOL><INDENT>if cached and Cache(cache).age(url) > <NUM_LIT:0>:<EOL><INDENT>Cache(cache).remove(url)<EOL><DEDENT><DEDENT>if q == \"<STR_LIT>\":<EOL><INDENT>Cache(cache).remove(url)<EOL><DEDENT>URLAccumulator.__init__(self, url, wait, asynchronous, cache, type=\"<STR_LIT>\", throttle=<NUM_LIT:3>)<EOL>", "docstring": "Parses color themes from Adobe Kuler.\n\n        Valid queries are \"popular\", \"rating\", \n        a theme id as an integer, or a search string.", "id": "f11600:c1:m0"}
{"signature": "def _darkest(self):", "body": "rgb, n = (<NUM_LIT:1.0>, <NUM_LIT:1.0>, <NUM_LIT:1.0>), <NUM_LIT><EOL>for r,g,b in self:<EOL><INDENT>if r+g+b < n:<EOL><INDENT>rgb, n = (r,g,b), r+g+b<EOL><DEDENT><DEDENT>return rgb<EOL>", "docstring": "Returns the darkest swatch.\n\n        Knowing the contract between a light and a dark swatch\n        can help us decide how to display readable typography.", "id": "f11601:c0:m1"}
{"signature": "def __init__(self, q, page=<NUM_LIT:0>, wait=<NUM_LIT:10>, asynchronous=False, cached=True):", "body": "if cached: <EOL><INDENT>cache = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>cache = None<EOL><DEDENT>url  = \"<STR_LIT>\"<EOL>self.id_string = url + \"<STR_LIT>\"<EOL>if isinstance(q, int):<EOL><INDENT>url  = self.id_string + str(q)  <EOL><DEDENT>elif q in [\"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>url += \"<STR_LIT>\"+q<EOL>url += \"<STR_LIT>\"+str(page*<NUM_LIT:30>)+\"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>url += \"<STR_LIT>\"+quote(q)<EOL>url += \"<STR_LIT>\"+str(page*<NUM_LIT:30>)+\"<STR_LIT>\"<EOL><DEDENT>if q in [\"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>if cached and Cache(cache).age(url) > <NUM_LIT:0>:<EOL><INDENT>Cache(cache).remove(url)<EOL><DEDENT><DEDENT>URLAccumulator.__init__(self, url, wait, asynchronous, cache, type=\"<STR_LIT>\", throttle=<NUM_LIT:3>)<EOL>", "docstring": "Parses color themes from Adobe Kuler.\n\n        Valid queries are \"popular\", \"rating\", \n        a theme id as an integer, or a search string.", "id": "f11601:c1:m0"}
{"signature": "def is_type(url, types=[], wait=<NUM_LIT:10>):", "body": "<EOL>if isinstance(types, str):<EOL><INDENT>types = [types]<EOL><DEDENT>try: connection = open(url, wait)<EOL>except:<EOL><INDENT>return False<EOL><DEDENT>type = connection.info()[\"<STR_LIT:Content-Type>\"]<EOL>for t in types:<EOL><INDENT>if type.startswith(t): return True<EOL><DEDENT>return False<EOL>", "docstring": "Determine the MIME-type of the document behind the url.\n\n    MIME is more reliable than simply checking the document extension.\n    Returns True when the MIME-type starts with anything in the list of types.", "id": "f11604:m6"}
{"signature": "def __init__(self, url=\"<STR_LIT>\", method=\"<STR_LIT>\"):", "body": "<EOL>is_post_urlparser = False<EOL>if isinstance(url, URLParser) and url.method == \"<STR_LIT>\":<EOL><INDENT>is_post_urlparser = True<EOL>url.method = \"<STR_LIT>\"<EOL><DEDENT>urlstr = str(url)<EOL>if is_post_urlparser: url.method = \"<STR_LIT>\"<EOL>url = urlstr<EOL>url = urllib.parse.urlsplit(url)<EOL>self.protocol = url[<NUM_LIT:0>]<EOL>self.domain = url[<NUM_LIT:1>]<EOL>self.username = \"<STR_LIT>\"<EOL>self.password = \"<STR_LIT>\"<EOL>if self.domain.find(\"<STR_LIT:@>\") >= <NUM_LIT:0>:<EOL><INDENT>login = self.domain.split(\"<STR_LIT:@>\")[<NUM_LIT:0>]<EOL>if login.find(\"<STR_LIT::>\") >= <NUM_LIT:0>:<EOL><INDENT>self.username = login.split(\"<STR_LIT::>\")[<NUM_LIT:0>]<EOL>self.password = login.split(\"<STR_LIT::>\")[<NUM_LIT:1>]<EOL><DEDENT>self.domain = self.domain.split(\"<STR_LIT:@>\")[<NUM_LIT:1>]<EOL><DEDENT>self.port = \"<STR_LIT>\"<EOL>if self.domain.find(\"<STR_LIT::>\") >= <NUM_LIT:0>:<EOL><INDENT>p = self.domain.split(\"<STR_LIT::>\")<EOL>if p[<NUM_LIT:1>].isdigit():<EOL><INDENT>self.port = p[<NUM_LIT:1>]<EOL>self.domain = p[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>self.path = url[<NUM_LIT:2>]<EOL>self.page = \"<STR_LIT>\"<EOL>if not self.path.endswith(\"<STR_LIT:/>\"):<EOL><INDENT>if self.path.find(\"<STR_LIT:/>\") >= <NUM_LIT:0>:<EOL><INDENT>self.page = self.path.split(\"<STR_LIT:/>\")[-<NUM_LIT:1>]<EOL>self.path = self.path[:-len(self.page)]<EOL><DEDENT>else:<EOL><INDENT>self.page = self.path<EOL>self.path = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>self.filename = self.page<EOL>self.query = {}<EOL>self.method = method<EOL>if url[<NUM_LIT:3>] != \"<STR_LIT>\":<EOL><INDENT>self.method = \"<STR_LIT>\"<EOL><DEDENT>if is_post_urlparser:<EOL><INDENT>self.method = \"<STR_LIT>\"<EOL><DEDENT>for param in url[<NUM_LIT:3>].split(\"<STR_LIT:&>\"):<EOL><INDENT>key, value = \"<STR_LIT>\", \"<STR_LIT>\"<EOL>if param.find(\"<STR_LIT:=>\") >= <NUM_LIT:0>:<EOL><INDENT>try: (key, value) = param.split(\"<STR_LIT:=>\")<EOL>except:<EOL><INDENT>key = param<EOL><DEDENT><DEDENT>else:<EOL><INDENT>key = param<EOL><DEDENT>if key != \"<STR_LIT>\":<EOL><INDENT>self.query[key] = value<EOL><DEDENT><DEDENT>self.anchor = url[<NUM_LIT:4>]<EOL>", "docstring": "Splits an url string into different parts.\n\n        The parts are:\n        protocol, domain, login, username, password, port, path, page, query, anchor.\n\n        The method defaults to get when the url has a query part.\n        Setting it to post will submit the query by POST\n        when opening the url.", "id": "f11604:c6:m0"}
{"signature": "def not_found(url, wait=<NUM_LIT:10>):", "body": "try: connection = open(url, wait)<EOL>except HTTP404NotFound:<EOL><INDENT>return True<EOL><DEDENT>except:<EOL><INDENT>return False<EOL><DEDENT>return False<EOL>", "docstring": "Returns True when the url generates a \"404 Not Found\" error.", "id": "f11604:m5"}
{"signature": "def __init__(self, url, wait=<NUM_LIT>, asynchronous=False, cache=None, type=\"<STR_LIT>\", throttle=<NUM_LIT:0>):", "body": "self.url = url<EOL>self.data = None<EOL>self.redirect = None<EOL>self.error = None<EOL>if cache != None:<EOL><INDENT>self.cached = True<EOL>self._cache = Cache(cache, type)<EOL><DEDENT>else:<EOL><INDENT>self.cached = False<EOL>self._cache = None<EOL><DEDENT>self._domain = URLParser(self.url).domain<EOL>self._throttle = throttle<EOL>global urlaccumulator_throttle<EOL>if not self._domain in urlaccumulator_throttle:<EOL><INDENT>urlaccumulator_throttle[self._domain] = time.time() - self._throttle<EOL><DEDENT>self._start = time.time()<EOL>self._wait = wait<EOL>self._busy = True<EOL>self._loaded = False<EOL>_thread.start_new_thread(self._retrieve, (self.url,))<EOL>if not asynchronous:<EOL><INDENT>while not self._done():<EOL><INDENT>time.sleep(<NUM_LIT:0.1>)<EOL><DEDENT><DEDENT>", "docstring": "Creates a threaded connection to a url and reads data.\n\n        URLAccumulator can run asynchronously which is useful for animations.\n        The done property is set to True when downloading is complete.\n        The error attribute contains a URLError exception when no data is found.\n\n        URLAccumulator data can be cached.\n        Downloads that resulted in an error will write an empty file to the cache,\n        the data property will be an empty string but no error is logged\n        when the data is read from the cache in later calls.\n\n        URLAccumulator can be throttled.\n        This ensures only a certain amount of requests to a domain\n        will happen in a given period of time.\n\n        URLAccumulator data is loaded.\n        It has a load() method that is called once when done.", "id": "f11604:c7:m0"}
{"signature": "def __setattr__(self, a, v):", "body": "if a == \"<STR_LIT>\":<EOL><INDENT>self.__dict__[\"<STR_LIT>\"] = v<EOL><DEDENT>elif len(self) > <NUM_LIT:0> and a in list(self.values())[<NUM_LIT:0>].__dict__:<EOL><INDENT>for style in list(self.values()):<EOL><INDENT>style.__dict__[a] = v<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise AttributeError(\"<STR_LIT>\"+a+\"<STR_LIT:'>\")<EOL><DEDENT>", "docstring": "Setting an attribute is like setting it in all of the contained styles.", "id": "f11607:c0:m5"}
{"signature": "def path(s, graph, path):", "body": "def end(n):<EOL><INDENT>r = n.r * <NUM_LIT><EOL>s._ctx.oval(n.x-r, n.y-r, r*<NUM_LIT:2>, r*<NUM_LIT:2>)<EOL><DEDENT>if path and len(path) > <NUM_LIT:1> and s.stroke:<EOL><INDENT>s._ctx.nofill()<EOL>s._ctx.stroke(<EOL>s.stroke.r,<EOL>s.stroke.g,<EOL>s.stroke.b,<EOL>s.stroke.a<EOL>)<EOL>if s.name != DEFAULT:<EOL><INDENT>s._ctx.strokewidth(s.strokewidth)<EOL><DEDENT>else:<EOL><INDENT>s._ctx.strokewidth(s.strokewidth*<NUM_LIT:2>)<EOL><DEDENT>first = True<EOL>for id in path:<EOL><INDENT>n = graph[id]<EOL>if first:<EOL><INDENT>first = False<EOL>s._ctx.beginpath(n.x, n.y)<EOL>end(n)<EOL><DEDENT>else:<EOL><INDENT>s._ctx.lineto(n.x, n.y)<EOL><DEDENT><DEDENT>s._ctx.endpath()<EOL>end(n)<EOL><DEDENT>", "docstring": "Visualization of a shortest path between two nodes.", "id": "f11607:m8"}
{"signature": "def __init__(self, name, _ctx, **kwargs):", "body": "self.name = name<EOL>self._ctx = _ctx<EOL>if not _ctx:<EOL><INDENT>return<EOL><DEDENT>self.background = _ctx.color(<NUM_LIT>, <NUM_LIT>, <NUM_LIT:0.5>, <NUM_LIT>)<EOL>self.traffic = _ctx.color(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>)<EOL>self.fill = _ctx.color(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>)<EOL>self.stroke = _ctx.color(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>)<EOL>self.strokewidth = <NUM_LIT:0.5><EOL>self.text = _ctx.color(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>)<EOL>self.font = \"<STR_LIT>\"<EOL>self.fontsize = <NUM_LIT:10><EOL>self.textwidth = <NUM_LIT:100><EOL>self.align = <NUM_LIT:1><EOL>self.depth = True<EOL>self.graph_background = graph_background<EOL>self.graph_traffic = graph_traffic<EOL>self.node = node<EOL>self.node_label = node_label<EOL>self.edges = edges<EOL>self.edge = edge<EOL>self.edge_arrow = edge_arrow<EOL>self.edge_label = edge_label<EOL>self.path = path<EOL>for attr in kwargs:<EOL><INDENT>if attr in self.__dict__:<EOL><INDENT>self.__dict__[attr] = kwargs[attr]<EOL><DEDENT><DEDENT>if self.depth:<EOL><INDENT>try:<EOL><INDENT>global colors<EOL>colors = _ctx.ximport(\"<STR_LIT>\")<EOL><DEDENT>except:<EOL><INDENT>self.depth = False<EOL><DEDENT><DEDENT>", "docstring": "Graph styling. \n        The default style is used for edges.\n        When text is set to None, no id label is displayed.", "id": "f11607:c2:m0"}
{"signature": "def edge_label(s, edge, alpha=<NUM_LIT:1.0>):", "body": "if s.text and edge.label != \"<STR_LIT>\":<EOL><INDENT>s._ctx.nostroke()<EOL>s._ctx.fill(<EOL>s.text.r,<EOL>s.text.g,<EOL>s.text.b,<EOL>s.text.a * alpha*<NUM_LIT><EOL>)<EOL>s._ctx.lineheight(<NUM_LIT:1>)<EOL>s._ctx.font(s.font)<EOL>s._ctx.fontsize(s.fontsize*<NUM_LIT>)<EOL>try:<EOL><INDENT>p = edge._textpath<EOL><DEDENT>except:<EOL><INDENT>try:<EOL><INDENT>txt = str(edge.label)<EOL><DEDENT>except:<EOL><INDENT>try:<EOL><INDENT>txt = edge.label.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>edge._textpath = s._ctx.textpath(<EOL>txt, s._ctx.textwidth(\"<STR_LIT:U+0020>\"), <NUM_LIT:0>, width=s.textwidth)<EOL>p = edge._textpath<EOL><DEDENT>a = degrees(atan2(edge.node2.y-edge.node1.y,<EOL>edge.node2.x-edge.node1.x))<EOL>d = sqrt((edge.node2.x-edge.node1.x)**<NUM_LIT:2> +<EOL>(edge.node2.y-edge.node1.y)**<NUM_LIT:2>)<EOL>d = abs(d-s._ctx.textwidth(edge.label)) * <NUM_LIT:0.5><EOL>s._ctx.push()<EOL>s._ctx.transform(CORNER)<EOL>s._ctx.translate(edge.node1.x, edge.node1.y)<EOL>s._ctx.rotate(-a)<EOL>s._ctx.translate(d, s.fontsize*<NUM_LIT:1.0>)<EOL>s._ctx.scale(alpha)<EOL>if <NUM_LIT> < a % <NUM_LIT> < <NUM_LIT>:<EOL><INDENT>s._ctx.translate(s._ctx.textwidth(edge.label), -s.fontsize*<NUM_LIT>)<EOL>s._ctx.transform(CENTER)<EOL>s._ctx.rotate(<NUM_LIT>)<EOL>s._ctx.transform(CORNER)<EOL><DEDENT>s._ctx.drawpath(p.copy())<EOL>s._ctx.pop()<EOL><DEDENT>", "docstring": "Visualization of the label accompanying an edge.", "id": "f11607:m7"}
{"signature": "def copy(self, graph):", "body": "s = styles(graph)<EOL>s.guide = self.guide.copy(graph)<EOL>dict.__init__(s, [(v.name, v.copy()) for v in list(self.values())])<EOL>return s<EOL>", "docstring": "Returns a copy of all styles and a copy of the styleguide.", "id": "f11607:c0:m6"}
{"signature": "def graph_background(s):", "body": "if s.background == None:<EOL><INDENT>s._ctx.background(None)<EOL><DEDENT>else:<EOL><INDENT>s._ctx.background(s.background)<EOL><DEDENT>if s.depth:<EOL><INDENT>try:<EOL><INDENT>clr = colors.color(s.background).darker(<NUM_LIT>)<EOL>p = s._ctx.rect(<NUM_LIT:0>, <NUM_LIT:0>, s._ctx.WIDTH, s._ctx.HEIGHT, draw=False)<EOL>colors.gradientfill(p, clr, clr.lighter(<NUM_LIT>))<EOL>colors.shadow(dx=<NUM_LIT:0>, dy=<NUM_LIT:0>, blur=<NUM_LIT:2>, alpha=<NUM_LIT>, clr=s.background)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "Graph background color.", "id": "f11607:m0"}
{"signature": "def __getattr__(self, a):", "body": "if a in self:<EOL><INDENT>return self[a]<EOL><DEDENT>raise AttributeError(\"<STR_LIT>\"+a+\"<STR_LIT:'>\")<EOL>", "docstring": "Keys in the dictionaries are accessible as attributes.", "id": "f11607:c0:m4"}
{"signature": "def copy(self, graph):", "body": "g = styleguide(graph)<EOL>g.order = self.order<EOL>dict.__init__(g, [(k, v) for k, v in self.items()])<EOL>return g<EOL>", "docstring": "Returns a copy of the styleguide for the given graph.", "id": "f11607:c1:m4"}
{"signature": "def click(self, node):", "body": "if not self.has_node(node.id): return<EOL>if node == self.root: return<EOL>self._dx, self._dy = self.offset(node)<EOL>self.previous = self.root.id<EOL>self.load(node.id)<EOL>", "docstring": "Callback from graph.events when a node is clicked.", "id": "f11608:c4:m5"}
{"signature": "def solve(self):", "body": "self.layout.solve()<EOL>self.alpha = <NUM_LIT:1.0><EOL>", "docstring": "Iterates the graph layout until done.", "id": "f11608:c3:m14"}
{"signature": "def draw(self, dx=<NUM_LIT:0>, dy=<NUM_LIT:0>, weighted=False, directed=False, highlight=[], traffic=None):", "body": "self.update()<EOL>s = self.styles.default<EOL>s.graph_background(s)<EOL>_ctx.push()<EOL>_ctx.translate(self.x+dx, self.y+dy)<EOL>if traffic:<EOL><INDENT>if isinstance(traffic, bool): <EOL><INDENT>traffic = <NUM_LIT:5><EOL><DEDENT>for n in self.nodes_by_betweenness()[:traffic]:<EOL><INDENT>try: s = self.styles[n.style]<EOL>except: s = self.styles.default<EOL>if s.graph_traffic:<EOL><INDENT>s.graph_traffic(s, n, self.alpha)        <EOL><DEDENT><DEDENT><DEDENT>s = self.styles.default<EOL>if s.edges:<EOL><INDENT>s.edges(s, self.edges, self.alpha, weighted, directed)<EOL><DEDENT>for n in self.nodes:<EOL><INDENT>try:  s = self.styles[n.style]<EOL>except: s = self.styles.default<EOL>if s.node:<EOL><INDENT>s.node(s, n, self.alpha)<EOL><DEDENT><DEDENT>try: s = self.styles.highlight<EOL>except: s = self.styles.default<EOL>if s.path:<EOL><INDENT>s.path(s, self, highlight)<EOL><DEDENT>for n in self.nodes:<EOL><INDENT>try:  s = self.styles[n.style]<EOL>except: s = self.styles.default<EOL>if s.node_label:<EOL><INDENT>s.node_label(s, n, self.alpha)<EOL><DEDENT><DEDENT>_ctx.pop()<EOL>", "docstring": "Layout the graph incrementally.\n\n        The graph is drawn at the center of the canvas.\n        The weighted and directed parameters visualize edge weight and direction.\n        The highlight specifies list of connected nodes. \n        The path will be colored according to the \"highlight\" style.\n        Clicking and dragging events are monitored.", "id": "f11608:c3:m17"}
{"signature": "def betweenness_centrality(self, normalized=True):", "body": "bc = proximity.brandes_betweenness_centrality(self, normalized)<EOL>for id, w in bc.items(): self[id]._betweenness = w<EOL>return bc<EOL>", "docstring": "Calculates betweenness centrality and returns an node id -> weight dictionary.\n        Node betweenness weights are updated in the process.", "id": "f11608:c3:m20"}
{"signature": "def remove_edge(self, id1, id2):", "body": "for e in list(self.edges):<EOL><INDENT>if id1 in (e.node1.id, e.node2.id) andid2 in (e.node1.id, e.node2.id):<EOL><INDENT>e.node1.links.remove(e.node2)<EOL>e.node2.links.remove(e.node1)<EOL>self.edges.remove(e)<EOL><DEDENT><DEDENT>", "docstring": "Remove edges between nodes with given id's.", "id": "f11608:c3:m9"}
{"signature": "def _leaves(self):", "body": "return [node for node in self.nodes if node.is_leaf]<EOL>", "docstring": "Returns a list of nodes that have only one connection.", "id": "f11608:c3:m25"}
{"signature": "def nodes_by_betweenness(self, treshold=<NUM_LIT:0.0>):", "body": "nodes = [(n.betweenness, n) for n in self.nodes if n.betweenness > treshold]<EOL>nodes.sort(); nodes.reverse()<EOL>return [n for w, n in nodes]<EOL>", "docstring": "Returns nodes sorted by betweenness centrality.\n        Nodes with a lot of passing traffic will be at the front of the list.", "id": "f11608:c3:m22"}
{"signature": "def copy(self, empty=False):", "body": "g = graph(self.layout.n, self.distance, self.layout.type)<EOL>g.layout = self.layout.copy(g)<EOL>g.styles = self.styles.copy(g)<EOL>g.events = self.events.copy(g)<EOL>if not empty:<EOL><INDENT>for n in self.nodes:<EOL><INDENT>g.add_node(n.id, n.r, n.style, n.category, n.label, (n == self.root), n.__dict__)<EOL><DEDENT>for e in self.edges:<EOL><INDENT>g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)<EOL><DEDENT><DEDENT>return g<EOL>", "docstring": "Create a copy of the graph (by default with nodes and edges).", "id": "f11608:c3:m3"}
{"signature": "def nodes_by_category(self, category):", "body": "return [n for n in self.nodes if n.category == category]<EOL>", "docstring": "Returns nodes with the given category attribute.", "id": "f11608:c3:m24"}
{"signature": "def prune(self, depth=<NUM_LIT:0>):", "body": "for n in list(self.nodes):<EOL><INDENT>if len(n.links) <= depth:<EOL><INDENT>self.remove_node(n.id)<EOL><DEDENT><DEDENT>", "docstring": "Removes all nodes with less or equal links than depth.", "id": "f11608:c3:m18"}
{"signature": "def crown(self, depth=<NUM_LIT:2>):", "body": "nodes = []<EOL>for node in self.leaves: nodes += node.flatten(depth-<NUM_LIT:1>)<EOL>return cluster.unique(nodes)<EOL>", "docstring": "Returns a list of leaves, nodes connected to leaves, etc.", "id": "f11608:c3:m26"}
{"signature": "def __getattr__(self, a):", "body": "if a in self: <EOL><INDENT>return self[a]<EOL><DEDENT>raise AttributeError(\"<STR_LIT>\"+str(a)+\"<STR_LIT:'>\")<EOL>", "docstring": "Returns the node in the graph associated with the given id.", "id": "f11608:c3:m12"}
{"signature": "def _density(self):", "body": "return <NUM_LIT>*len(self.edges) / (len(self.nodes) * (len(self.nodes)-<NUM_LIT:1>))<EOL>", "docstring": "The number of edges in relation to the total number of possible edges.", "id": "f11608:c3:m27"}
{"signature": "def create(iterations=<NUM_LIT:1000>, distance=<NUM_LIT:1.0>, layout=LAYOUT_SPRING, depth=True):", "body": "<EOL>_ctx.colormode(_ctx.RGB)<EOL>g = graph(iterations, distance, layout)<EOL>s = style.style<EOL>g.styles.append(s(style.LIGHT    , _ctx, fill   = _ctx.color(<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT>)))<EOL>g.styles.append(s(style.DARK     , _ctx, fill   = _ctx.color(<NUM_LIT>, <NUM_LIT:0.5>, <NUM_LIT>, <NUM_LIT>)))<EOL>g.styles.append(s(style.BACK     , _ctx, fill   = _ctx.color(<NUM_LIT:0.5>, <NUM_LIT>, <NUM_LIT:0.0>, <NUM_LIT>)))<EOL>g.styles.append(s(style.IMPORTANT, _ctx, fill   = _ctx.color(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>)))<EOL>g.styles.append(s(style.HIGHLIGHT, _ctx, stroke = _ctx.color(<NUM_LIT:1.0>, <NUM_LIT:0.0>, <NUM_LIT:0.5>), strokewidth=<NUM_LIT>))<EOL>g.styles.append(s(style.MARKED   , _ctx))<EOL>g.styles.append(s(style.ROOT     , _ctx, text   = _ctx.color(<NUM_LIT:1.0>, <NUM_LIT:0.0>, <NUM_LIT>, <NUM_LIT>), <EOL>stroke = _ctx.color(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>),<EOL>strokewidth = <NUM_LIT>, <EOL>fontsize    = <NUM_LIT:16>, <EOL>textwidth   = <NUM_LIT>))<EOL>def important_node(s, node, alpha=<NUM_LIT:1.0>):<EOL><INDENT>style.style(None, _ctx).node(s, node, alpha)<EOL>r = node.r * <NUM_LIT><EOL>_ctx.nofill()<EOL>_ctx.oval(node.x-r, node.y-r, r*<NUM_LIT:2>, r*<NUM_LIT:2>)  <EOL><DEDENT>def marked_node(s, node, alpha=<NUM_LIT:1.0>):<EOL><INDENT>style.style(None, _ctx).node(s, node, alpha)<EOL>r = node.r * <NUM_LIT><EOL>_ctx.fill(s.stroke)<EOL>_ctx.oval(node.x-r, node.y-r, r*<NUM_LIT:2>, r*<NUM_LIT:2>)<EOL><DEDENT>g.styles.important.node = important_node<EOL>g.styles.marked.node = marked_node <EOL>g.styles.depth = depth<EOL>g.styles.guide.append(style.LIGHT     , lambda graph, node: graph.root in node.links)<EOL>g.styles.guide.append(style.DARK      , lambda graph, node: len(node.links) > <NUM_LIT:4>)<EOL>g.styles.guide.append(style.IMPORTANT , lambda graph, node: node.weight > <NUM_LIT>)<EOL>g.styles.guide.append(style.ROOT      , lambda graph, node: node == graph.root)<EOL>g.styles.guide.append(style.BACK      , lambda graph, node: node == graph.events.clicked)<EOL>def balance(graph, node): <EOL><INDENT>node.r = node.r*<NUM_LIT> + node.r*node.weight*<NUM_LIT><EOL><DEDENT>g.styles.guide.append(\"<STR_LIT>\", balance)<EOL>def cluster(graph, node):<EOL><INDENT>if len(node.links) == <NUM_LIT:1>: <EOL><INDENT>node.links.edge(node.links[<NUM_LIT:0>]).length *= <NUM_LIT:0.5><EOL><DEDENT><DEDENT>g.styles.guide.append(\"<STR_LIT>\", cluster)<EOL>g.styles.guide.order = [<EOL>style.LIGHT, style.DARK, style.IMPORTANT, style.ROOT, style.BACK, \"<STR_LIT>\", \"<STR_LIT>\"<EOL>]<EOL>return g<EOL>", "docstring": "Returns a new graph with predefined styling.", "id": "f11608:m0"}
{"signature": "def edge(self, id1, id2):", "body": "if id1 in self andid2 in self andself[id2] in self[id1].links:<EOL><INDENT>return self[id1].links.edge(id2)<EOL><DEDENT>return None<EOL>", "docstring": "Returns the edge between the nodes with given id1 and id2.", "id": "f11608:c3:m11"}
{"signature": "def add_node(self, id, radius=<NUM_LIT:8>, style=style.DEFAULT, category=\"<STR_LIT>\", label=None, root=False,<EOL>properties={}):", "body": "if id in self: <EOL><INDENT>return self[id]<EOL><DEDENT>if not isinstance(style, str) and style.__dict__.has_key[\"<STR_LIT:name>\"]:<EOL><INDENT>style = style.name<EOL><DEDENT>n = node(self, id, radius, style, category, label, properties)<EOL>self[n.id] = n<EOL>self.nodes.append(n)<EOL>if root: self.root = n<EOL>return n<EOL>", "docstring": "Add node from id and return the node object.", "id": "f11608:c3:m5"}
{"signature": "def eigenvector_centrality(graph, normalized=True, reversed=True, rating={},<EOL>start=None, iterations=<NUM_LIT:100>, tolerance=<NUM_LIT>):", "body": "G = graph.keys()     <EOL>W = adjacency (graph, directed=True, reversed=reversed)<EOL>def _normalize(x):<EOL><INDENT>s = sum(x.values())<EOL>if s != <NUM_LIT:0>: s = <NUM_LIT:1.0> / s<EOL>for k in x: <EOL><INDENT>x[k] *= s<EOL><DEDENT><DEDENT>x = start<EOL>if x is None:<EOL><INDENT>x = dict([(n, random()) for n in G])<EOL><DEDENT>_normalize(x)<EOL>for i in range(iterations):<EOL><INDENT>x0 = x<EOL>x = dict.fromkeys(x0.keys(), <NUM_LIT:0>)<EOL>for n in x:<EOL><INDENT>for nbr in W[n]:<EOL><INDENT>r = <NUM_LIT:1><EOL>if rating.has_key(n): r = rating[n]<EOL>x[n] += <NUM_LIT> + x0[nbr] * W[n][nbr] * r<EOL><DEDENT><DEDENT>_normalize(x)          <EOL>e = sum([abs(x[n]-x0[n]) for n in x])<EOL>if e < len(graph.nodes) * tolerance:<EOL><INDENT>if normalized:<EOL><INDENT>m = max(x.values())<EOL>if m == <NUM_LIT:0>: m = <NUM_LIT:1><EOL>x = dict([(id, w/m) for id, w in x.iteritems()])<EOL><DEDENT>return x<EOL><DEDENT><DEDENT>warn(\"<STR_LIT>\", Warning)<EOL>return dict([(n, <NUM_LIT:0>) for n in G])<EOL>", "docstring": "Eigenvector centrality for nodes in the graph (like Google's PageRank).\n\n    Eigenvector centrality is a measure of the importance of a node in a directed network. \n    It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.\n    Nodes with no incoming connections have a score of zero.\n    If you want to measure outgoing connections, reversed should be False.\n\n    The eigenvector calculation is done by the power iteration method.\n    It has no guarantee of convergence.\n    A starting vector for the power iteration can be given in the start dict.\n\n    You can adjust the importance of a node with the rating dictionary,\n    which links node id's to a score.\n\n    The algorithm is adapted from NetworkX, Aric Hagberg (hagberg@lanl.gov):\n    https://networkx.lanl.gov/attachment/ticket/119/eigenvector_centrality.py", "id": "f11609:m4"}
{"signature": "def depth_first_search(root, visit=lambda node: False, traversable=lambda node, edge: True):", "body": "stop = visit(root)<EOL>root._visited = True<EOL>for node in root.links:<EOL><INDENT>if stop: return True<EOL>if not traversable(root, root.links.edge(node)): continue<EOL>if not node._visited:<EOL><INDENT>stop = depth_first_search(node, visit, traversable)<EOL><DEDENT><DEDENT>return stop<EOL>", "docstring": "Simple, multi-purpose depth-first search.\n\n    Visits all the nodes connected to the root, depth-first.\n    The visit function is called on each node.\n    Recursion will stop if it returns True, and ubsequently dfs() will return True.\n    The traversable function takes the current node and edge,\n    and returns True if we are allowed to follow this connection to the next node.\n    For example, the traversable for directed edges is follows:\n    lambda node, edge: node == edge.node1\n\n    Note: node._visited is expected to be False for all nodes.", "id": "f11609:m0"}
{"signature": "def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):", "body": "v = {}<EOL>for n in graph.nodes:<EOL><INDENT>v[n.id] = {}<EOL><DEDENT>for e in graph.edges:<EOL><INDENT>id1 = e.node1.id<EOL>id2 = e.node2.id<EOL>if reversed:<EOL><INDENT>id1, id2 = id2, id1<EOL><DEDENT>v[id1][id2] = <NUM_LIT:1.0> - e.weight*<NUM_LIT:0.5><EOL>if heuristic:<EOL><INDENT>v[id1][id2] += heuristic(id1, id2)<EOL><DEDENT>if not directed: <EOL><INDENT>v[id2][id1] = v[id1][id2]<EOL><DEDENT><DEDENT>if stochastic:<EOL><INDENT>for id1 in v:<EOL><INDENT>d = sum(v[id1].values())<EOL>for id2 in v[id1]: <EOL><INDENT>v[id1][id2] /= d<EOL><DEDENT><DEDENT><DEDENT>return v<EOL>", "docstring": "An edge weight map indexed by node id's.\n\n    A dictionary indexed by node id1's in which each value is a\n    dictionary of connected node id2's linking to the edge weight.\n    If directed, edges go from id1 to id2, but not the other way.\n    If stochastic, all the weights for the neighbors of a given node sum to 1.\n    A heuristic can be a function that takes two node id's and returns\n    and additional cost for movement between the two nodes.", "id": "f11609:m1"}
{"signature": "def drag(self, node):", "body": "dx = self.mouse.x - self.graph.x<EOL>dy = self.mouse.y - self.graph.y<EOL>s = self.graph.styles.default<EOL>self._ctx.nofill()<EOL>self._ctx.nostroke()<EOL>if s.stroke: <EOL><INDENT>self._ctx.strokewidth(s.strokewidth)<EOL>self._ctx.stroke(<EOL>s.stroke.r, <EOL>s.stroke.g, <EOL>s.stroke.g, <EOL><NUM_LIT><EOL>)<EOL><DEDENT>p = self._ctx.line(node.x, node.y, dx, dy, draw=False)<EOL>try: p._nsBezierPath.setLineDash_count_phase_([<NUM_LIT:2>,<NUM_LIT:4>], <NUM_LIT:2>, <NUM_LIT:50>)<EOL>except:<EOL><INDENT>pass<EOL><DEDENT>self._ctx.drawpath(p)<EOL>r = node.__class__(None).r * <NUM_LIT><EOL>self._ctx.oval(dx-r/<NUM_LIT:2>, dy-r/<NUM_LIT:2>, r, r)<EOL>node.vx = dx / self.graph.d<EOL>node.vy = dy / self.graph.d<EOL>", "docstring": "Drags given node to mouse location.", "id": "f11610:c1:m5"}
{"signature": "def update(self):", "body": "if self.mousedown:<EOL><INDENT>if not self.pressed and not self.dragged:<EOL><INDENT>for n in self.graph.nodes:<EOL><INDENT>if self.mouse in n:<EOL><INDENT>self.pressed = n<EOL>break<EOL><DEDENT><DEDENT><DEDENT>elif self.pressed and not self.mouse in self.pressed:<EOL><INDENT>self.dragged = self.pressed<EOL>self.pressed = None<EOL><DEDENT>elif self.dragged and self.graph.layout.type == \"<STR_LIT>\":<EOL><INDENT>self.drag(self.dragged)<EOL>self.graph.layout.i = min(<NUM_LIT:100>, max(<NUM_LIT:2>, self.graph.layout.n-<NUM_LIT:100>))<EOL><DEDENT><DEDENT>elif self.pressed and self.mouse in self.pressed:<EOL><INDENT>self.clicked = self.pressed<EOL>self.pressed = None<EOL>self.graph.layout.i = <NUM_LIT:2><EOL>self.click(self.clicked)<EOL><DEDENT>else:<EOL><INDENT>self.hovered = None<EOL>self.pressed = None<EOL>self.dragged = None<EOL>for n in self.graph.nodes:<EOL><INDENT>if self.mouse in n:<EOL><INDENT>self.hovered = n<EOL>self.hover(n)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Interacts with the graph by clicking or dragging nodes.\n        Hovering a node fires the callback function events.hover().\n        Clicking a node fires the callback function events.click().", "id": "f11610:c1:m4"}
{"signature": "def copy(self, graph):", "body": "e = events(graph, self._ctx)<EOL>e.clicked = self.clicked<EOL>return e<EOL>", "docstring": "Returns a copy of the event handler, remembering the last node clicked.", "id": "f11610:c1:m1"}
{"signature": "def copy(self, graph):", "body": "l = self.__class__(graph, self.n)<EOL>l.i = <NUM_LIT:0><EOL>return l<EOL>", "docstring": "Returns a copy of the layout for the given graph.", "id": "f11611:c1:m1"}
{"signature": "def intersection(a, b):", "body": "return filter(lambda x: x in a, b)<EOL>", "docstring": "Returns the intersection of lists.\n    a & b -> elements that appear in a as well as in b.", "id": "f11612:m3"}
{"signature": "def is_clique(graph):", "body": "<EOL>if graph.density < <NUM_LIT:1.0>: <EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "A clique is a set of nodes in which each node is connected to all other nodes.", "id": "f11612:m7"}
{"signature": "def cliques(graph, threshold=<NUM_LIT:3>):", "body": "cliques = []<EOL>for n in graph.nodes:<EOL><INDENT>c = clique(graph, n.id)<EOL>if len(c) >= threshold: <EOL><INDENT>c.sort()<EOL>if c not in cliques:<EOL><INDENT>cliques.append(c)<EOL><DEDENT><DEDENT><DEDENT>return cliques<EOL>", "docstring": "Returns all the cliques in the graph of at least the given size.", "id": "f11612:m9"}
{"signature": "def clique(graph, id):", "body": "clique = [id]<EOL>for n in graph.nodes:<EOL><INDENT>friend = True<EOL>for id in clique:<EOL><INDENT>if n.id == id or graph.edge(n.id, id) == None:<EOL><INDENT>friend = False<EOL>break<EOL><DEDENT><DEDENT>if friend:<EOL><INDENT>clique.append(n.id)<EOL><DEDENT><DEDENT>return clique<EOL>", "docstring": "Returns the largest possible clique for the node with given id.", "id": "f11612:m8"}
{"signature": "def sorted(list, cmp=None, reversed=False):", "body": "list = [x for x in list]<EOL>list.sort(cmp)<EOL>if reversed: list.reverse()<EOL>return list<EOL>", "docstring": "Returns a sorted copy of the list.", "id": "f11612:m0"}
{"signature": "def unique(list):", "body": "unique = []; [unique.append(x) for x in list if x not in unique]<EOL>return unique<EOL>", "docstring": "Returns a copy of the list without duplicates.", "id": "f11612:m1"}
{"signature": "def subgraph(graph, id, distance=<NUM_LIT:1>):", "body": "g = graph.copy(empty=True)<EOL>if isinstance(id, (FunctionType, LambdaType)):<EOL><INDENT>id = [node.id for node in filter(id, graph.nodes)]<EOL><DEDENT>if not isinstance(id, (list, tuple)):<EOL><INDENT>id = [id]<EOL><DEDENT>for id in id:<EOL><INDENT>for n in flatten(graph[id], distance):<EOL><INDENT>g.add_node(n.id, n.r, n.style, n.category, n.label, (n==graph.root), n.__dict__)<EOL><DEDENT><DEDENT>for e in graph.edges:<EOL><INDENT>if g.has_key(e.node1.id) andg.has_key(e.node2.id):<EOL><INDENT>g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)<EOL><DEDENT><DEDENT>return g<EOL>", "docstring": "Creates the subgraph of the flattened node with given id (or list of id's).\n    Finds all the edges between the nodes that make up the subgraph.", "id": "f11612:m6"}
{"signature": "def add(self, foreign_currency, foreign_curve=None, fx_spot=<NUM_LIT:1.0>):", "body": "assert isinstance(foreign_currency, type(self.currency))<EOL>assert isinstance(foreign_curve, curve.RateCurve)<EOL>assert isinstance(fx_spot, float)<EOL>self[self.currency, foreign_currency] = FxCurve.cast(fx_spot, self.domestic_curve, foreign_curve)<EOL>self[foreign_currency, self.currency] = FxCurve.cast(<NUM_LIT:1> / fx_spot, foreign_curve, self.domestic_curve)<EOL>f = foreign_currency<EOL>new = dict()<EOL>for d, s in self:<EOL><INDENT>if s is self.currency and d is not foreign_currency:<EOL><INDENT>triangulated = self[d, s](self.domestic_curve.origin) * fx_spot<EOL>if (d, f) in self:<EOL><INDENT>self[d, f].foreign_curve = foreign_curve<EOL>self[d, f].fx_spot = triangulated<EOL>self[f, d].domestic_curve = foreign_curve<EOL>self[f, d].fx_spot = <NUM_LIT:1> / triangulated<EOL><DEDENT>else:<EOL><INDENT>new[d, f] = FxCurve.cast(triangulated, self[d, s].domestic_curve, foreign_curve)<EOL>new[f, d] = FxCurve.cast(<NUM_LIT:1> / triangulated, foreign_curve, self[d, s].domestic_curve)<EOL><DEDENT><DEDENT><DEDENT>self.update(new)<EOL>", "docstring": "adds contents to FxShelf.\nIf curve is FxCurve or FxDict, spot should turn curve.currency into self.currency,\nelse spot should turn currency into self.currency by\nN in EUR * spot = N in USD for currency = EUR and self.currency = USD", "id": "f11614:c1:m3"}
{"signature": "def __init__(self, currency, domestic_curve):", "body": "super(FxContainer, self).__init__()<EOL>self.currency = currency<EOL>self.domestic_curve = domestic_curve<EOL>self.add(currency, domestic_curve)<EOL>", "docstring": ":param currency: base currency of FxContainer\n:param RateCurve domestic_curve: base curve of FxContainer for discounting", "id": "f11614:c1:m0"}
{"signature": "def __init__(self, x_list=list(), y_list=list(), boundary_condition=None):", "body": "super(spline, self).__init__(x_list, y_list)  <EOL>self.intervals = list()<EOL>self.interpolation_coefficients = list()<EOL>self.boundary_condition = boundary_condition<EOL>for i in range(<NUM_LIT:0>, len(self.x_list) - <NUM_LIT:1>):<EOL><INDENT>self.intervals.append([self.x_list[i], self.x_list[i + <NUM_LIT:1>]])<EOL><DEDENT>if self.y_list:<EOL><INDENT>self.set_interpolation_coefficients()<EOL><DEDENT>", "docstring": ":param x_list: data\n:param y_list: data\n:param boundary_condition: Either a tuple (l, r) of values for the slope or None.\n    If the argument is not specified then None will be taken as boundary conditions, which\n    leads to the so called not-a-knot method for splines. Not-a-knot will determine the boundary conditions by also\n    requiring that the third derivatives of the two most left and the two most right interpolation polynomials agree.\n    The boundary condition (0,0) will lead to the so called natural spline", "id": "f11615:c9:m0"}
{"signature": "def __init__(self, x_list=None, y_list=None, y_inter=None):", "body": "if not y_inter:<EOL><INDENT>y_inter = interpolation.linear()<EOL><DEDENT>y_left, y_mid, y_right = interpolation.constant(), interpolation.linear(), interpolation.constant()<EOL>if isinstance(y_inter, (tuple, list)):<EOL><INDENT>if len(y_inter) == <NUM_LIT:3>:<EOL><INDENT>y_left, y_mid, y_right = y_inter<EOL><DEDENT>elif len(y_inter) == <NUM_LIT:2>:<EOL><INDENT>y_mid, y_right = y_inter<EOL>y_left = y_right<EOL><DEDENT>elif len(y_inter) == <NUM_LIT:1>:<EOL><INDENT>y_mid = y_inter[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError<EOL><DEDENT><DEDENT>elif isinstance(y_inter, interpolation.base_interpolation):<EOL><INDENT>y_mid = y_inter<EOL><DEDENT>else:<EOL><INDENT>raise AttributeError<EOL><DEDENT>assert len(x_list) == len(y_list)<EOL>assert len(x_list) == len(set(x_list))<EOL>self._y_mid = type(y_mid)(x_list, y_list)<EOL>self._y_right = type(y_right)(x_list, y_list)<EOL>self._y_left = type(y_left)(x_list, y_list)<EOL>", "docstring": "r\"\"\"\n        Curve object to build function\n\n        :param list(float) x_list: source values\n        :param list(float) y_list: target values\n        :param list(interpolation.interpolation) y_inter: interpolation function on x_list (optional)\n            or triple of (left, mid, right) interpolation functions with\n            left for x < x_list[0] (as default triple.right is used)\n            right for x > x_list][-1] (as default interpolation.constant is used)\n            mid else (as default interpolation.linear is used)\n\n        Curve object to build function :math:`f:R \\rightarrow R, x \\mapsto y`\n        from finite point vectors :math:`x` and :math:`y`\n        using piecewise various interpolation functions.", "id": "f11617:c0:m0"}
{"signature": "def _frange(start, stop=None, step=None):", "body": "if stop is None:<EOL><INDENT>stop = start<EOL>start = <NUM_LIT:0.0><EOL><DEDENT>if step is None:<EOL><INDENT>step = <NUM_LIT:1.0><EOL><DEDENT>r = start<EOL>while r < stop:<EOL><INDENT>yield r<EOL>r += step<EOL><DEDENT>", "docstring": "_frange range like function for float inputs\n:param start:\n:type start:\n:param stop:\n:type stop:\n:param step:\n:type step:\n:return:\n:rtype:", "id": "f11618:m0"}
{"signature": "def key_for_name(name):", "body": "return '<STR_LIT>' % name<EOL>", "docstring": "Return the key name used to store the given queue name in Redis.", "id": "f11623:m0"}
{"signature": "def put(self, *msgs):", "body": "if self.serializer is not None:<EOL><INDENT>msgs = map(self.serializer.dumps, msgs)<EOL><DEDENT>self.__redis.rpush(self.key, *msgs)<EOL>", "docstring": "Put one or more messages onto the queue. Example:\n\n        >>> queue.put(\"my message\")\n        >>> queue.put(\"another message\")\n\n        To put messages onto the queue in bulk, which can be significantly\n        faster if you have a large number of messages:\n\n        >>> queue.put(\"my message\", \"another message\", \"third message\")", "id": "f11623:c0:m6"}
{"signature": "def _patch_file(path, content):", "body": "existing_content = open(path).read()<EOL>if existing_content == content:<EOL><INDENT>log.warn('<STR_LIT>')<EOL>return False<EOL><DEDENT>log.warn('<STR_LIT>')<EOL>_rename_path(path)<EOL>f = open(path, '<STR_LIT:w>')<EOL>try:<EOL><INDENT>f.write(content)<EOL><DEDENT>finally:<EOL><INDENT>f.close()<EOL><DEDENT>return True<EOL>", "docstring": "Will backup the file then patch it", "id": "f11626:m6"}
{"signature": "def _extractall(self, path=\"<STR_LIT:.>\", members=None):", "body": "import copy<EOL>import operator<EOL>from tarfile import ExtractError<EOL>directories = []<EOL>if members is None:<EOL><INDENT>members = self<EOL><DEDENT>for tarinfo in members:<EOL><INDENT>if tarinfo.isdir():<EOL><INDENT>directories.append(tarinfo)<EOL>tarinfo = copy.copy(tarinfo)<EOL>tarinfo.mode = <NUM_LIT> <EOL><DEDENT>self.extract(tarinfo, path)<EOL><DEDENT>if sys.version_info < (<NUM_LIT:2>, <NUM_LIT:4>):<EOL><INDENT>def sorter(dir1, dir2):<EOL><INDENT>return cmp(dir1.name, dir2.name)<EOL><DEDENT>directories.sort(sorter)<EOL>directories.reverse()<EOL><DEDENT>else:<EOL><INDENT>directories.sort(key=operator.attrgetter('<STR_LIT:name>'), reverse=True)<EOL><DEDENT>for tarinfo in directories:<EOL><INDENT>dirpath = os.path.join(path, tarinfo.name)<EOL>try:<EOL><INDENT>self.chown(tarinfo, dirpath)<EOL>self.utime(tarinfo, dirpath)<EOL>self.chmod(tarinfo, dirpath)<EOL><DEDENT>except ExtractError:<EOL><INDENT>e = sys.exc_info()[<NUM_LIT:1>]<EOL>if self.errorlevel > <NUM_LIT:1>:<EOL><INDENT>raise<EOL><DEDENT>else:<EOL><INDENT>self._dbg(<NUM_LIT:1>, \"<STR_LIT>\" % e)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Extract all members from the archive to the current working\n       directory and set owner, modification time and permissions on\n       directories afterwards. `path' specifies a different directory\n       to extract to. `members' is optional and must be a subset of the\n       list returned by getmembers().", "id": "f11626:m17"}
{"signature": "def _adjust_delay(self, slot, response):", "body": "if response.status in self.retry_http_codes:<EOL><INDENT>new_delay = max(slot.delay, <NUM_LIT:1>) * <NUM_LIT:4><EOL>new_delay = max(new_delay, self.mindelay)<EOL>new_delay = min(new_delay, self.maxdelay)<EOL>slot.delay = new_delay<EOL>self.stats.inc_value('<STR_LIT>')<EOL><DEDENT>elif response.status == <NUM_LIT:200>:<EOL><INDENT>new_delay = max(slot.delay / <NUM_LIT:2>, self.mindelay)<EOL>if new_delay < <NUM_LIT>:<EOL><INDENT>new_delay = <NUM_LIT:0><EOL><DEDENT>slot.delay = new_delay<EOL><DEDENT>", "docstring": "Define delay adjustment policy", "id": "f11636:c0:m8"}
{"signature": "def _dispatch(self, event, listener, *args, **kwargs):", "body": "if (<EOL>asyncio.iscoroutinefunction(listener) or<EOL>isinstance(listener, functools.partial) and<EOL>asyncio.iscoroutinefunction(listener.func)<EOL>):<EOL><INDENT>return self._dispatch_coroutine(event, listener, *args, **kwargs)<EOL><DEDENT>return self._dispatch_function(event, listener, *args, **kwargs)<EOL>", "docstring": "Dispatch an event to a listener.\n\n        Args:\n            event (str): The name of the event that triggered this call.\n            listener (def or async def): The listener to trigger.\n            *args: Any number of positional arguments.\n            **kwargs: Any number of keyword arguments.\n\n        This method inspects the listener. If it is a def it dispatches the\n        listener to a method that will execute that def. If it is an async def\n        it dispatches it to a method that will schedule the resulting coro with\n        the event loop.", "id": "f11647:c0:m11"}
{"signature": "def add_listener(self, event, listener):", "body": "self.emit('<STR_LIT>', event, listener)<EOL>self._listeners[event].append(listener)<EOL>self._check_limit(event)<EOL>return self<EOL>", "docstring": "Bind a listener to a particular event.\n\n        Args:\n            event (str): The name of the event to listen for. This may be any\n                string value.\n            listener (def or async def): The callback to execute when the event\n                fires. This may be a sync or async function.", "id": "f11647:c0:m2"}
{"signature": "async def _try_catch_coro(emitter, event, listener, coro):", "body": "try:<EOL><INDENT>await coro<EOL><DEDENT>except Exception as exc:<EOL><INDENT>if event == emitter.LISTENER_ERROR_EVENT:<EOL><INDENT>raise<EOL><DEDENT>emitter.emit(emitter.LISTENER_ERROR_EVENT, event, listener, exc)<EOL><DEDENT>", "docstring": "Coroutine wrapper to catch errors after async scheduling.\n\n    Args:\n        emitter (EventEmitter): The event emitter that is attempting to\n            call a listener.\n        event (str): The event that triggered the emitter.\n        listener (async def): The async def that was used to generate the coro.\n        coro (coroutine): The coroutine that should be tried.\n\n    If an exception is caught the function will use the emitter to emit the\n    failure event. If, however, the current event _is_ the failure event then\n    the method reraises. The reraised exception may show in debug mode for the\n    event loop but is otherwise silently dropped.", "id": "f11647:m0"}
{"signature": "def remove_listener(self, event, listener):", "body": "with contextlib.suppress(ValueError):<EOL><INDENT>self._listeners[event].remove(listener)<EOL>return True<EOL><DEDENT>with contextlib.suppress(ValueError):<EOL><INDENT>self._once[event].remove(listener)<EOL>return True<EOL><DEDENT>return False<EOL>", "docstring": "Remove a listener from the emitter.\n\n        Args:\n            event (str): The event name on which the listener is bound.\n            listener: A reference to the same object given to add_listener.\n\n        Returns:\n            bool: True if a listener was removed else False.\n\n        This method only removes one listener at a time. If a listener is\n        attached multiple times then this method must be called repeatedly.\n        Additionally, this method removes listeners first from the those\n        registered with 'on' or 'add_listener'. If none are found it continue\n        to remove afterwards from those added with 'once'.", "id": "f11647:c0:m4"}
{"signature": "@max_listeners.setter<EOL><INDENT>def max_listeners(self, value):<DEDENT>", "body": "self._max_listeners = value<EOL>", "docstring": "Set the max number of listeners before warning.", "id": "f11647:c0:m7"}
{"signature": "def _dispatch_function(self, event, listener, *args, **kwargs):", "body": "try:<EOL><INDENT>return listener(*args, **kwargs)<EOL><DEDENT>except Exception as exc:<EOL><INDENT>if event == self.LISTENER_ERROR_EVENT:<EOL><INDENT>raise<EOL><DEDENT>return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc)<EOL><DEDENT>", "docstring": "Execute a sync function.\n\n        Args:\n            event (str): The name of the event that triggered this call.\n            listener (def): The def that needs to be executed.\n            *args: Any number of positional arguments.\n            **kwargs: Any number of keyword arguments.\n\n        The values of *args and **kwargs are passed, unaltered, to the def\n        when exceuting. If there is an exception executing the def, such as the\n        wrong number of arguments, the emitter's error event is triggered. If\n        the triggering event _is_ the emitter's error event then the exception\n        is reraised. The reraised exception may show in debug mode for the\n        event loop but is otherwise silently dropped.", "id": "f11647:c0:m10"}
{"signature": "def listeners(self, event):", "body": "return self._listeners[event][:] + self._once[event][:]<EOL>", "docstring": "Get an iterable of all listeners for the given event.\n\n        Args:\n            event (str): The name of the event for which to generate an\n                iterable of listeners.\n\n        The resulting iterable contains all listeners regardless of whether\n        they were registered with 'on'/'add_listener' or 'once'.", "id": "f11647:c0:m8"}
{"signature": "def _check_limit(self, event):", "body": "if self.count(event) > self.max_listeners:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'.format(event),<EOL>ResourceWarning,<EOL>)<EOL><DEDENT>", "docstring": "Check if the listener limit is hit and warn if needed.", "id": "f11647:c0:m1"}
{"signature": "@property<EOL><INDENT>def max_listeners(self):<DEDENT>", "body": "return self._max_listeners<EOL>", "docstring": "Get the max number of listeners before warning.", "id": "f11647:c0:m6"}
{"signature": "def _dispatch_coroutine(self, event, listener, *args, **kwargs):", "body": "try:<EOL><INDENT>coro = listener(*args, **kwargs)<EOL><DEDENT>except Exception as exc:<EOL><INDENT>if event == self.LISTENER_ERROR_EVENT:<EOL><INDENT>raise<EOL><DEDENT>return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc)<EOL><DEDENT>asyncio.ensure_future(<EOL>_try_catch_coro(self, event, listener, coro),<EOL>loop=self._loop,<EOL>)<EOL>", "docstring": "Schedule a coroutine for execution.\n\n        Args:\n            event (str): The name of the event that triggered this call.\n            listener (async def): The async def that needs to be executed.\n            *args: Any number of positional arguments.\n            **kwargs: Any number of keyword arguments.\n\n        The values of *args and **kwargs are passed, unaltered, to the async\n        def when generating the coro. If there is an exception generating the\n        coro, such as the wrong number of arguments, the emitter's error event\n        is triggered. If the triggering event _is_ the emitter's error event\n        then the exception is reraised. The reraised exception may show in\n        debug mode for the event loop but is otherwise silently dropped.", "id": "f11647:c0:m9"}
{"signature": "def __init__(self, emitter, event):", "body": "self._emitter = emitter<EOL>self._event = event<EOL>", "docstring": "Initialize the iterable with an emitter and target event.", "id": "f11648:c1:m0"}
{"signature": "async def _push(self, *args, **kwargs):", "body": "self._data.append((args, kwargs))<EOL>if self._future is not None:<EOL><INDENT>future, self._future = self._future, None<EOL>future.set_result(True)<EOL><DEDENT>", "docstring": "Push new data into the buffer. Resume looping if paused.", "id": "f11648:c0:m1"}
{"signature": "def bracket_split(source, brackets=('<STR_LIT>', '<STR_LIT:{}>', '<STR_LIT>'), strip=False):", "body": "starts = [e[<NUM_LIT:0>] for e in brackets]<EOL>in_bracket = <NUM_LIT:0><EOL>n = <NUM_LIT:0><EOL>last = <NUM_LIT:0><EOL>while n < len(source):<EOL><INDENT>e = source[n]<EOL>if not in_bracket and e in starts:<EOL><INDENT>in_bracket = <NUM_LIT:1><EOL>start = n<EOL>b_start, b_end = brackets[starts.index(e)]<EOL><DEDENT>elif in_bracket:<EOL><INDENT>if e == b_start:<EOL><INDENT>in_bracket += <NUM_LIT:1><EOL><DEDENT>elif e == b_end:<EOL><INDENT>in_bracket -= <NUM_LIT:1><EOL>if not in_bracket:<EOL><INDENT>if source[last:start]:<EOL><INDENT>yield source[last:start]<EOL><DEDENT>last = n + <NUM_LIT:1><EOL>yield source[start + strip:n + <NUM_LIT:1> - strip]<EOL><DEDENT><DEDENT><DEDENT>n += <NUM_LIT:1><EOL><DEDENT>if source[last:]:<EOL><INDENT>yield source[last:]<EOL><DEDENT>", "docstring": "DOES NOT RETURN EMPTY STRINGS (can only return empty bracket content if strip=True)", "id": "f11658:m3"}
{"signature": "def except_token(source, start, token, throw=True):", "body": "start = pass_white(source, start)<EOL>if start < len(source) and source[start] == token:<EOL><INDENT>return start + <NUM_LIT:1><EOL><DEDENT>if throw:<EOL><INDENT>raise SyntaxError('<STR_LIT>' % token)<EOL><DEDENT>return None<EOL>", "docstring": "Token can be only a single char. Returns position after token if found. Otherwise raises syntax error if throw\n    otherwise returns None", "id": "f11658:m8"}
{"signature": "def except_keyword(source, start, keyword):", "body": "start = pass_white(source, start)<EOL>kl = len(keyword)  <EOL>if kl + start > len(source):<EOL><INDENT>return None<EOL><DEDENT>if source[start:start + kl] != keyword:<EOL><INDENT>return None<EOL><DEDENT>if kl + start < len(source) and source[start + kl] in IDENTIFIER_PART:<EOL><INDENT>return None<EOL><DEDENT>return start + kl<EOL>", "docstring": "Returns position after keyword if found else None\n        Note: skips white space", "id": "f11658:m9"}
{"signature": "def pass_bracket(source, start, bracket='<STR_LIT>'):", "body": "e = bracket_split(source[start:], [bracket], False)<EOL>try:<EOL><INDENT>cand = next(e)<EOL><DEDENT>except StopIteration:<EOL><INDENT>return None, None<EOL><DEDENT>if not cand.strip():  <EOL><INDENT>try:<EOL><INDENT>res = next(e)<EOL>return res, start + len(cand) + len(res)<EOL><DEDENT>except StopIteration:<EOL><INDENT>return None, None<EOL><DEDENT><DEDENT>elif cand[-<NUM_LIT:1>] == bracket[<NUM_LIT:1>]:<EOL><INDENT>return cand, start + len(cand)<EOL><DEDENT>else:<EOL><INDENT>return None, None<EOL><DEDENT>", "docstring": "Returns content of brackets with brackets and first pos after brackets\n     if source[start] is followed by some optional white space and brackets.\n     Otherwise None", "id": "f11658:m4"}
{"signature": "def split_at_single(text, sep, not_before=[], not_after=[]):", "body": "n = <NUM_LIT:0><EOL>lt, s = len(text), len(sep)<EOL>last = <NUM_LIT:0><EOL>while n < lt:<EOL><INDENT>if not s + n > lt:<EOL><INDENT>if sep == text[n:n + s]:<EOL><INDENT>if any(text[last:n].endswith(e) for e in not_before):<EOL><INDENT>pass<EOL><DEDENT>elif any(text[n + s:].startswith(e) for e in not_after):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>yield text[last:n]<EOL>last = n + s<EOL>n += s - <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>n += <NUM_LIT:1><EOL><DEDENT>yield text[last:]<EOL>", "docstring": "Works like text.split(sep) but separated fragments\n    cant end with not_before or start with not_after", "id": "f11658:m14"}
{"signature": "def do_statement(source, start):", "body": "start = pass_white(source, start)<EOL>if not start < len(source):  <EOL><INDENT>return None, start<EOL><DEDENT>if any(startswith_keyword(source[start:], e) for e in {'<STR_LIT>', '<STR_LIT:default>'}):<EOL><INDENT>return None, start<EOL><DEDENT>rest = source[start:]<EOL>for key, meth in KEYWORD_METHODS.items(<EOL>):  <EOL><INDENT>if rest.startswith(key):<EOL><INDENT>if len(key) == len(rest) or rest[len(key)] not in IDENTIFIER_PART:<EOL><INDENT>return meth(source, start)<EOL><DEDENT><DEDENT><DEDENT>if rest[<NUM_LIT:0>] == '<STR_LIT:{>':  <EOL><INDENT>return do_block(source, start)<EOL><DEDENT>cand = parse_identifier(source, start, False)<EOL>if cand is not None:  <EOL><INDENT>label, cand_start = cand<EOL>cand_start = pass_white(source, cand_start)<EOL>if source[cand_start] == '<STR_LIT::>':<EOL><INDENT>return do_label(source, start)<EOL><DEDENT><DEDENT>return do_expression(source, start)<EOL>", "docstring": "returns none if not found other functions that begin with 'do_' raise\n    also this do_ type function passes white space", "id": "f11659:m5"}
{"signature": "def translate_flow(source):", "body": "global TO_REGISTER<EOL>TO_REGISTER = []<EOL>return do_block('<STR_LIT>' % source, <NUM_LIT:0>)[<NUM_LIT:0>], TO_REGISTER<EOL>", "docstring": "Source cant have arrays, object, constant or function literals.\n       Returns PySource and variables to register", "id": "f11659:m23"}
{"signature": "def translate_func(name, block, args):", "body": "inline = name.startswith('<STR_LIT>')<EOL>real_name = '<STR_LIT>'<EOL>if inline:<EOL><INDENT>name, real_name = name.split('<STR_LIT:@>')<EOL><DEDENT>arglist = '<STR_LIT:U+002CU+0020>'.join(args) + '<STR_LIT:U+002CU+0020>' if args else '<STR_LIT>'<EOL>code = '<STR_LIT>' % (name, arglist)<EOL>scope = \"<STR_LIT>\"  <EOL>for arg in args:<EOL><INDENT>scope += '<STR_LIT>' % (repr(arg), arg)<EOL><DEDENT>if real_name:<EOL><INDENT>scope += '<STR_LIT>' % (repr(real_name), name)<EOL><DEDENT>code += indent('<STR_LIT>' % scope)<EOL>block, nested_hoisted, nested_inline = remove_functions(block)<EOL>py_code, to_register = translate_flow(block)<EOL>to_register += list(nested_hoisted.keys())<EOL>if to_register:<EOL><INDENT>code += indent('<STR_LIT>' % str(to_register))<EOL><DEDENT>for nested_name, info in nested_hoisted.items():<EOL><INDENT>nested_block, nested_args = info<EOL>new_code = translate_func('<STR_LIT>', nested_block,<EOL>nested_args)<EOL>code += indent(new_code)<EOL>code += indent(<EOL>'<STR_LIT>' % repr(nested_name))<EOL>code += indent(<EOL>'<STR_LIT>' % repr(nested_name))<EOL><DEDENT>for nested_name, info in nested_inline.items():<EOL><INDENT>nested_block, nested_args = info<EOL>new_code = translate_func(nested_name, nested_block, nested_args)<EOL>py_code = inject_before_lval(py_code,<EOL>nested_name.split('<STR_LIT:@>')[<NUM_LIT:0>], new_code)<EOL><DEDENT>if py_code.strip():<EOL><INDENT>code += indent(py_code)<EOL><DEDENT>return code<EOL>", "docstring": "Translates functions and all nested functions to Python code.\n       name -  name of that function (global functions will be available under var while\n            inline will be available directly under this name )\n       block - code of the function (*with* brackets {} )\n       args - arguments that this function takes", "id": "f11661:m1"}
{"signature": "def is_object(n, last):", "body": "if is_empty_object(n, last):<EOL><INDENT>return True<EOL><DEDENT>if not n.strip():<EOL><INDENT>return False<EOL><DEDENT>if len(argsplit(n, '<STR_LIT:;>')) > <NUM_LIT:1>:<EOL><INDENT>return False<EOL><DEDENT>cands = argsplit(n, '<STR_LIT:U+002C>')<EOL>if not cands[-<NUM_LIT:1>].strip():<EOL><INDENT>return True  <EOL><DEDENT>for cand in cands:<EOL><INDENT>cand = cand.strip()<EOL>kv = argsplit(cand, '<STR_LIT::>')<EOL>if len(<EOL>kv<EOL>) > <NUM_LIT:2>:  <EOL><INDENT>kv = kv[<NUM_LIT:0>], '<STR_LIT::>'.join(kv[<NUM_LIT:1>:])<EOL><DEDENT>if len(kv) == <NUM_LIT:2>:<EOL><INDENT>k, v = kv<EOL>if not is_lval(k.strip()):<EOL><INDENT>return False<EOL><DEDENT>v = v.strip()<EOL>if v.startswith('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>if v[<NUM_LIT:0>] == '<STR_LIT:{>':  <EOL><INDENT>return False<EOL><DEDENT>for e in KEYWORD_METHODS:<EOL><INDENT>if v.startswith(e) and len(e) < len(v) and v[len(<EOL>e)] not in IDENTIFIER_PART:<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT>elif not (cand.startswith('<STR_LIT>') or cand.startswith('<STR_LIT>')):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "n may be the inside of block or object.\n       last is the code before object", "id": "f11662:m3"}
{"signature": "def remove_objects(code, count=<NUM_LIT:1>):", "body": "replacements = {}  <EOL>br = bracket_split(code, ['<STR_LIT:{}>', '<STR_LIT>'])<EOL>res = '<STR_LIT>'<EOL>last = '<STR_LIT>'<EOL>for e in br:<EOL><INDENT>if e[<NUM_LIT:0>] == '<STR_LIT:{>':<EOL><INDENT>n, temp_rep, cand_count = remove_objects(e[<NUM_LIT:1>:-<NUM_LIT:1>], count)<EOL>if is_object(n, last):<EOL><INDENT>res += '<STR_LIT:U+0020>' + OBJECT_LVAL % count<EOL>replacements[OBJECT_LVAL % count] = e<EOL>count += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>res += '<STR_LIT>' % n<EOL>count = cand_count<EOL>replacements.update(temp_rep)<EOL><DEDENT><DEDENT>elif e[<NUM_LIT:0>] == '<STR_LIT:[>':<EOL><INDENT>if is_array(last):<EOL><INDENT>res += e  <EOL><DEDENT>else:  <EOL><INDENT>n, rep, count = remove_objects(e[<NUM_LIT:1>:-<NUM_LIT:1>], count)<EOL>res += '<STR_LIT>' % n<EOL>replacements.update(rep)<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>res += e<EOL><DEDENT>last = e  <EOL><DEDENT>return res, replacements, count<EOL>", "docstring": "This function replaces objects with OBJECTS_LVALS, returns new code, replacement dict and count.\n        count arg is the number that should be added to the LVAL of the first replaced object", "id": "f11662:m5"}
{"signature": "def translate_array(array, lval, obj_count=<NUM_LIT:1>, arr_count=<NUM_LIT:1>):", "body": "array = array[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>array, obj_rep, obj_count = remove_objects(array, obj_count)<EOL>array, arr_rep, arr_count = remove_arrays(array, arr_count)<EOL>array, hoisted, inline = functions.remove_functions(array, all_inline=True)<EOL>assert not hoisted<EOL>arr = []<EOL>for e in argsplit(array, '<STR_LIT:U+002C>'):<EOL><INDENT>e = exp_translator(e.replace('<STR_LIT:\\n>', '<STR_LIT>'))<EOL>arr.append(e if e else '<STR_LIT:None>')<EOL><DEDENT>arr = '<STR_LIT>' % (lval, '<STR_LIT:U+002C>'.join(arr))<EOL>for nested_name, nested_info in inline.items():<EOL><INDENT>nested_block, nested_args = nested_info<EOL>new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)<EOL>arr = new_def + arr<EOL><DEDENT>for lval, obj in obj_rep.items():<EOL><INDENT>new_def, obj_count, arr_count = translate_object(<EOL>obj, lval, obj_count, arr_count)<EOL>arr = new_def + arr<EOL><DEDENT>for lval, obj in arr_rep.items():<EOL><INDENT>new_def, obj_count, arr_count = translate_array(<EOL>obj, lval, obj_count, arr_count)<EOL>arr = new_def + arr<EOL><DEDENT>return arr, obj_count, arr_count<EOL>", "docstring": "array has to be any js array for example [1,2,3]\n       lval has to be name of this array.\n       Returns python code that adds lval to the PY scope it should be put before lval", "id": "f11662:m10"}
{"signature": "def remove_arrays(code, count=<NUM_LIT:1>):", "body": "res = '<STR_LIT>'<EOL>last = '<STR_LIT>'<EOL>replacements = {}<EOL>for e in bracket_split(code, ['<STR_LIT>']):<EOL><INDENT>if e[<NUM_LIT:0>] == '<STR_LIT:[>':<EOL><INDENT>if is_array(last):<EOL><INDENT>name = ARRAY_LVAL % count<EOL>res += '<STR_LIT:U+0020>' + name<EOL>replacements[name] = e<EOL>count += <NUM_LIT:1><EOL><DEDENT>else:  <EOL><INDENT>cand, new_replacements, count = remove_arrays(e[<NUM_LIT:1>:-<NUM_LIT:1>], count)<EOL>res += '<STR_LIT>' % cand<EOL>replacements.update(new_replacements)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>res += e<EOL><DEDENT>last = e<EOL><DEDENT>return res, replacements, count<EOL>", "docstring": "removes arrays and replaces them with ARRAY_LVALS\n       returns new code and replacement dict\n       *NOTE* has to be called AFTER remove objects", "id": "f11662:m6"}
{"signature": "def recover_constants(py_source,<EOL>replacements):  ", "body": "for identifier, value in replacements.items():<EOL><INDENT>if identifier.startswith('<STR_LIT>'):<EOL><INDENT>py_source = py_source.replace(identifier,<EOL>'<STR_LIT>' % repr(value))<EOL><DEDENT>elif identifier.startswith('<STR_LIT>'):<EOL><INDENT>py_source = py_source.replace(<EOL>identifier, '<STR_LIT>' % unify_string_literals(value))<EOL><DEDENT>else:<EOL><INDENT>py_source = py_source.replace(identifier, '<STR_LIT>' % value)<EOL><DEDENT><DEDENT>return py_source<EOL>", "docstring": "Converts identifiers representing Js constants to the PyJs constants\n    PyJsNumberConst_1_ which has the true value of 5 will be converted to PyJsNumber(5)", "id": "f11663:m5"}
{"signature": "def parse_num(source, start, charset):", "body": "while start < len(source) and source[start] in charset:<EOL><INDENT>start += <NUM_LIT:1><EOL><DEDENT>return start<EOL>", "docstring": "Returns a first index>=start of chat not in charset", "id": "f11663:m2"}
{"signature": "def do_escape(source, n):", "body": "if not n + <NUM_LIT:1> < len(source):<EOL><INDENT>return '<STR_LIT>'  <EOL><DEDENT>if source[n + <NUM_LIT:1>] in LINE_TERMINATOR:<EOL><INDENT>if source[n + <NUM_LIT:1>] == CR and n + <NUM_LIT:2> < len(source) and source[n + <NUM_LIT:2>] == LF:<EOL><INDENT>return source[n:n + <NUM_LIT:3>], n + <NUM_LIT:3><EOL><DEDENT>return source[n:n + <NUM_LIT:2>], n + <NUM_LIT:2><EOL><DEDENT>if source[n + <NUM_LIT:1>] in ESCAPE_CHARS:<EOL><INDENT>return source[n:n + <NUM_LIT:2>], n + <NUM_LIT:2><EOL><DEDENT>if source[n + <NUM_LIT:1>] in {'<STR_LIT:x>', '<STR_LIT:u>'}:<EOL><INDENT>char, length = ('<STR_LIT:u>', <NUM_LIT:4>) if source[n + <NUM_LIT:1>] == '<STR_LIT:u>' else ('<STR_LIT:x>', <NUM_LIT:2>)<EOL>n += <NUM_LIT:2><EOL>end = parse_num(source, n, HEX)<EOL>if end - n < length:<EOL><INDENT>raise SyntaxError('<STR_LIT>')<EOL><DEDENT>return source[n - <NUM_LIT:2>:n + length], n + length<EOL><DEDENT>if source[n + <NUM_LIT:1>] in OCTAL:<EOL><INDENT>n += <NUM_LIT:1><EOL>end = parse_num(source, n, OCTAL)<EOL>end = min(end, n + <NUM_LIT:3>)  <EOL>max_num = <NUM_LIT:255><EOL>num = <NUM_LIT:0><EOL>len_parsed = <NUM_LIT:0><EOL>for e in source[n:end]:<EOL><INDENT>cand = <NUM_LIT:8> * num + int(e)<EOL>if cand > max_num:<EOL><INDENT>break<EOL><DEDENT>num = cand<EOL>len_parsed += <NUM_LIT:1><EOL><DEDENT>return '<STR_LIT:\\\\>' + hex(num)[<NUM_LIT:1>:], n + len_parsed<EOL><DEDENT>return source[n + <NUM_LIT:1>], n + <NUM_LIT:2><EOL>", "docstring": "Its actually quite complicated to cover every case :)\n       http://www.javascriptkit.com/jsref/escapesequence.shtml", "id": "f11663:m8"}
{"signature": "def remove_constants(source):", "body": "source = '<STR_LIT:U+0020>' + source + '<STR_LIT:\\n>'<EOL>comments = []<EOL>inside_comment, single_comment = False, False<EOL>inside_single, inside_double = False, False<EOL>inside_regexp = False<EOL>regexp_class_count = <NUM_LIT:0><EOL>n = <NUM_LIT:0><EOL>while n < len(source):<EOL><INDENT>char = source[n]<EOL>if char == '<STR_LIT:\">' and not (inside_comment or inside_single<EOL>or inside_regexp):<EOL><INDENT>if not _is_cancelled(source, n):<EOL><INDENT>if inside_double:<EOL><INDENT>inside_double[<NUM_LIT:1>] = n + <NUM_LIT:1><EOL>comments.append(inside_double)<EOL>inside_double = False<EOL><DEDENT>else:<EOL><INDENT>inside_double = [n, None, <NUM_LIT:0>]<EOL><DEDENT><DEDENT><DEDENT>elif char == \"<STR_LIT:'>\" and not (inside_comment or inside_double<EOL>or inside_regexp):<EOL><INDENT>if not _is_cancelled(source, n):<EOL><INDENT>if inside_single:<EOL><INDENT>inside_single[<NUM_LIT:1>] = n + <NUM_LIT:1><EOL>comments.append(inside_single)<EOL>inside_single = False<EOL><DEDENT>else:<EOL><INDENT>inside_single = [n, None, <NUM_LIT:0>]<EOL><DEDENT><DEDENT><DEDENT>elif (inside_single or inside_double):<EOL><INDENT>if char in LINE_TERMINATOR:<EOL><INDENT>if _is_cancelled(source, n):<EOL><INDENT>if char == CR and source[n + <NUM_LIT:1>] == LF:<EOL><INDENT>n += <NUM_LIT:1><EOL><DEDENT>n += <NUM_LIT:1><EOL>continue<EOL><DEDENT>else:<EOL><INDENT>raise SyntaxError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if inside_comment:<EOL><INDENT>if single_comment:<EOL><INDENT>if char in LINE_TERMINATOR:<EOL><INDENT>inside_comment[<NUM_LIT:1>] = n<EOL>comments.append(inside_comment)<EOL>inside_comment = False<EOL>single_comment = False<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>if char == '<STR_LIT:/>' and source[n - <NUM_LIT:1>] == '<STR_LIT:*>':<EOL><INDENT>inside_comment[<NUM_LIT:1>] = n + <NUM_LIT:1><EOL>comments.append(inside_comment)<EOL>inside_comment = False<EOL><DEDENT><DEDENT><DEDENT>elif inside_regexp:<EOL><INDENT>if not quiting_regexp:<EOL><INDENT>if char in LINE_TERMINATOR:<EOL><INDENT>raise SyntaxError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if _is_cancelled(source, n):<EOL><INDENT>n += <NUM_LIT:1><EOL>continue<EOL><DEDENT>if char == '<STR_LIT:[>':<EOL><INDENT>regexp_class_count += <NUM_LIT:1><EOL><DEDENT>elif char == '<STR_LIT:]>':<EOL><INDENT>regexp_class_count = max(regexp_class_count - <NUM_LIT:1>, <NUM_LIT:0>)<EOL><DEDENT>elif char == '<STR_LIT:/>' and not regexp_class_count:<EOL><INDENT>quiting_regexp = True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if char not in IDENTIFIER_START:<EOL><INDENT>inside_regexp[<NUM_LIT:1>] = n<EOL>comments.append(inside_regexp)<EOL>inside_regexp = False<EOL><DEDENT><DEDENT><DEDENT>elif char == '<STR_LIT:/>' and source[n - <NUM_LIT:1>] == '<STR_LIT:/>':<EOL><INDENT>single_comment = True<EOL>inside_comment = [n - <NUM_LIT:1>, None, <NUM_LIT:1>]<EOL><DEDENT>elif char == '<STR_LIT:*>' and source[n - <NUM_LIT:1>] == '<STR_LIT:/>':<EOL><INDENT>inside_comment = [n - <NUM_LIT:1>, None, <NUM_LIT:1>]<EOL><DEDENT>elif char == '<STR_LIT:/>' and source[n + <NUM_LIT:1>] not in ('<STR_LIT:/>', '<STR_LIT:*>'):<EOL><INDENT>if not _ensure_regexp(source, n):  <EOL><INDENT>n += <NUM_LIT:1><EOL>continue  <EOL><DEDENT>quiting_regexp = False<EOL>inside_regexp = [n, None, <NUM_LIT:2>]<EOL><DEDENT>elif not (inside_comment or inside_regexp):<EOL><INDENT>if (char in NUMS and<EOL>source[n - <NUM_LIT:1>] not in IDENTIFIER_PART) or char == '<STR_LIT:.>':<EOL><INDENT>if char == '<STR_LIT:.>':<EOL><INDENT>k = parse_num(source, n + <NUM_LIT:1>, NUMS)<EOL>if k == n + <NUM_LIT:1>:  <EOL><INDENT>n += <NUM_LIT:1><EOL>continue<EOL><DEDENT>k = parse_exponent(source, k)<EOL><DEDENT>elif char == '<STR_LIT:0>' and source[n + <NUM_LIT:1>] in {<EOL>'<STR_LIT:x>', '<STR_LIT:X>'<EOL>}:  <EOL><INDENT>k = parse_num(source, n + <NUM_LIT:2>, HEX)<EOL>if k == n + <NUM_LIT:2> or source[k] in IDENTIFIER_PART:<EOL><INDENT>raise SyntaxError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>k = parse_num(source, n + <NUM_LIT:1>, NUMS)<EOL>if source[k] == '<STR_LIT:.>':<EOL><INDENT>k = parse_num(source, k + <NUM_LIT:1>, NUMS)<EOL><DEDENT>k = parse_exponent(source, k)<EOL><DEDENT>comments.append((n, k, <NUM_LIT:3>))<EOL>n = k<EOL>continue<EOL><DEDENT><DEDENT><DEDENT>n += <NUM_LIT:1><EOL><DEDENT>res = '<STR_LIT>'<EOL>start = <NUM_LIT:0><EOL>count = <NUM_LIT:0><EOL>constants = {}<EOL>for end, next_start, typ in comments:<EOL><INDENT>res += source[start:end]<EOL>start = next_start<EOL>if typ == <NUM_LIT:0>:  <EOL><INDENT>name = StringName<EOL><DEDENT>elif typ == <NUM_LIT:1>:  <EOL><INDENT>continue<EOL><DEDENT>elif typ == <NUM_LIT:2>:  <EOL><INDENT>name = RegExpName<EOL><DEDENT>elif typ == <NUM_LIT:3>:  <EOL><INDENT>name = NumberName<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError()<EOL><DEDENT>res += '<STR_LIT:U+0020>' + name % count + '<STR_LIT:U+0020>'<EOL>constants[name % count] = source[end:next_start]<EOL>count += <NUM_LIT:1><EOL><DEDENT>res += source[start:]<EOL>for e in WHITE:<EOL><INDENT>res = res.replace(e, '<STR_LIT:U+0020>')<EOL><DEDENT>res = res.replace(CR + LF, '<STR_LIT:\\n>')<EOL>for e in LINE_TERMINATOR:<EOL><INDENT>res = res.replace(e, '<STR_LIT:\\n>')<EOL><DEDENT>return res.strip(), constants<EOL>", "docstring": "Replaces Strings and Regexp literals in the source code with\n       identifiers and *removes comments*. Identifier is of the format:\n\n       PyJsStringConst(String const number)_ - for Strings\n       PyJsRegExpConst(RegExp const number)_ - for RegExps\n\n       Returns dict which relates identifier and replaced constant.\n\n       Removes single line and multiline comments from JavaScript source code\n       Pseudo comments (inside strings) will not be removed.\n\n       For example this line:\n       var x = \"/*PSEUDO COMMENT*/ TEXT //ANOTHER PSEUDO COMMENT\"\n       will be unaltered", "id": "f11663:m4"}
{"signature": "def _ensure_regexp(source, n):  ", "body": "markers = '<STR_LIT>'<EOL>k = <NUM_LIT:0><EOL>while True:<EOL><INDENT>k += <NUM_LIT:1><EOL>if n - k < <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>char = source[n - k]<EOL>if char in markers:<EOL><INDENT>return True<EOL><DEDENT>if char != '<STR_LIT:U+0020>' and char != '<STR_LIT:\\n>':<EOL><INDENT>break<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "returns True if regexp starts at n else returns False\n      checks whether it is not a division", "id": "f11663:m1"}
{"signature": "def lr(self, lis, op):", "body": "it = iter(lis)<EOL>res = trans(next(it))<EOL>for e in it:<EOL><INDENT>e = trans(e)<EOL>res = op(res, e)<EOL><DEDENT>return res<EOL>", "docstring": "performs this operation on a list from *left to right*\n        op must take 2 args\n        a,b,c  => op(op(a, b), c)", "id": "f11665:c0:m2"}
{"signature": "def is_valid_lval(t):", "body": "if not is_internal(t) and is_lval(t) and t not in RESERVED_NAMES:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Checks whether t is valid JS identifier name (no keyword like var, function, if etc)\n    Also returns false on internal", "id": "f11668:m1"}
{"signature": "def is_lval(t):", "body": "if not t:<EOL><INDENT>return False<EOL><DEDENT>i = iter(t)<EOL>if i.next() not in IDENTIFIER_START:<EOL><INDENT>return False<EOL><DEDENT>return all(e in IDENTIFIER_PART for e in i)<EOL>", "docstring": "Does not chceck whether t is not resticted or internal", "id": "f11668:m0"}
{"signature": "def translate_js6(js):", "body": "return translate_js(js6_to_js5(js))<EOL>", "docstring": "Just like translate_js but with experimental support for js6 via babel.", "id": "f11669:m9"}
{"signature": "def console(self):", "body": "while True:<EOL><INDENT>if six.PY2:<EOL><INDENT>code = raw_input('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>code = input('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>print(self.eval(code))<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>break<EOL><DEDENT>except Exception as e:<EOL><INDENT>import traceback<EOL>if DEBUG:<EOL><INDENT>sys.stderr.write(traceback.format_exc())<EOL><DEDENT>else:<EOL><INDENT>sys.stderr.write('<STR_LIT>' + str(e) + '<STR_LIT:\\n>')<EOL><DEDENT>time.sleep(<NUM_LIT>)<EOL><DEDENT><DEDENT>", "docstring": "starts to interact (starts interactive console) Something like code.InteractiveConsole", "id": "f11669:c0:m9"}
{"signature": "def translate_file(input_path, output_path):", "body": "js = get_file_contents(input_path)<EOL>py_code = translate_js(js)<EOL>lib_name = os.path.basename(output_path).split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL>head = '<STR_LIT>' % repr(<EOL>lib_name)<EOL>tail = '<STR_LIT>' % lib_name<EOL>out = head + py_code + tail<EOL>write_file_contents(output_path, out)<EOL>", "docstring": "Translates input JS file to python and saves the it to the output path.\nIt appends some convenience code at the end so that it is easy to import JS objects.\n\nFor example we have a file 'example.js' with:   var a = function(x) {return x}\ntranslate_file('example.js', 'example.py')\n\nNow example.py can be easily importend and used:\n>>> from example import example\n>>> example.a(30)\n30", "id": "f11669:m5"}
{"signature": "def execute_debug(self, js):", "body": "code = translate_js(js, '<STR_LIT>')<EOL>filename = '<STR_LIT>' + os.sep + '<STR_LIT:_>' + hashlib.md5(<EOL>code.encode(\"<STR_LIT:utf-8>\")).hexdigest() + '<STR_LIT>'<EOL>try:<EOL><INDENT>with open(filename, mode='<STR_LIT:w>') as f:<EOL><INDENT>f.write(code)<EOL><DEDENT>with open(filename, \"<STR_LIT:r>\") as f:<EOL><INDENT>pyCode = compile(f.read(), filename, '<STR_LIT>')<EOL>exec(pyCode, self._context)<EOL><DEDENT><DEDENT>except Exception as err:<EOL><INDENT>raise err<EOL><DEDENT>finally:<EOL><INDENT>os.remove(filename)<EOL>try:<EOL><INDENT>os.remove(filename + '<STR_LIT:c>')<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "executes javascript js in current context\n        as opposed to the (faster) self.execute method, you can use your regular debugger\n        to set breakpoints and inspect the generated python code", "id": "f11669:c0:m3"}
{"signature": "def eval(self, expression, use_compilation_plan=False):", "body": "code = '<STR_LIT>' % json.dumps(expression)<EOL>self.execute(code, use_compilation_plan=use_compilation_plan)<EOL>return self['<STR_LIT>']<EOL>", "docstring": "evaluates expression in current context and returns its value", "id": "f11669:c0:m2"}
{"signature": "def in_op(self, other):", "body": "if not is_object(other):<EOL><INDENT>raise MakeError(<EOL>'<STR_LIT>',<EOL>\"<STR_LIT>\")<EOL><DEDENT>return other.has_property(to_string(self))<EOL>", "docstring": "checks if self is in other", "id": "f11670:m27"}
{"signature": "def abstract_equality_op(self, other):", "body": "tx, ty = Type(self), Type(other)<EOL>if tx == ty:<EOL><INDENT>if tx == '<STR_LIT>' or tx == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>if tx == '<STR_LIT>' or tx == '<STR_LIT>' or tx == '<STR_LIT>':<EOL><INDENT>return self == other<EOL><DEDENT>return self is other  <EOL><DEDENT>elif (tx == '<STR_LIT>' and ty == '<STR_LIT>') or (ty == '<STR_LIT>'<EOL>and tx == '<STR_LIT>'):<EOL><INDENT>return True<EOL><DEDENT>elif tx == '<STR_LIT>' and ty == '<STR_LIT>':<EOL><INDENT>return abstract_equality_op(self, to_number(other))<EOL><DEDENT>elif tx == '<STR_LIT>' and ty == '<STR_LIT>':<EOL><INDENT>return abstract_equality_op(to_number(self), other)<EOL><DEDENT>elif tx == '<STR_LIT>':<EOL><INDENT>return abstract_equality_op(to_number(self), other)<EOL><DEDENT>elif ty == '<STR_LIT>':<EOL><INDENT>return abstract_equality_op(self, to_number(other))<EOL><DEDENT>elif (tx == '<STR_LIT>' or tx == '<STR_LIT>') and is_object(other):<EOL><INDENT>return abstract_equality_op(self, to_primitive(other))<EOL><DEDENT>elif (ty == '<STR_LIT>' or ty == '<STR_LIT>') and is_object(self):<EOL><INDENT>return abstract_equality_op(to_primitive(self), other)<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "returns the result of JS == compare.\n       result is PyJs type: bool", "id": "f11670:m22"}
{"signature": "def abstract_relational_comparison(self, other,<EOL>self_first=True):  ", "body": "px = to_primitive(self, '<STR_LIT>')<EOL>py = to_primitive(other, '<STR_LIT>')<EOL>if not self_first:  <EOL><INDENT>px, py = py, px<EOL><DEDENT>if not (Type(px) == '<STR_LIT>' and Type(py) == '<STR_LIT>'):<EOL><INDENT>px, py = to_number(px), to_number(py)<EOL>if is_nan(px) or is_nan(py):<EOL><INDENT>return None  <EOL><DEDENT>return px < py  <EOL><DEDENT>else:<EOL><INDENT>return px < py<EOL><DEDENT>", "docstring": "self<other if self_first else other<self.\n       Returns the result of the question: is self smaller than other?\n       in case self_first is false it returns the answer of:\n                                           is other smaller than self.\n       result is PyJs type: bool or undefined", "id": "f11670:m17"}
{"signature": "def instanceof_op(self, other):", "body": "if not hasattr(other, '<STR_LIT>'):<EOL><INDENT>return False<EOL><DEDENT>return other.has_instance(self)<EOL>", "docstring": "checks if self is instance of other", "id": "f11670:m26"}
{"signature": "def ConstructArray(self, py_arr):", "body": "arr = self.NewArray(len(py_arr))<EOL>arr._init(py_arr)<EOL>return arr<EOL>", "docstring": "note py_arr elems are NOT converted to PyJs types!", "id": "f11671:c0:m9"}
{"signature": "def ConstructObject(self, py_obj):", "body": "obj = self.NewObject()<EOL>for k, v in py_obj.items():<EOL><INDENT>obj.put(unicode(k), v)<EOL><DEDENT>return obj<EOL>", "docstring": "note py_obj items are NOT converted to PyJs types!", "id": "f11671:c0:m10"}
{"signature": "def match(self, string, pos):", "body": "return self.pat.match(string, int(pos))<EOL>", "docstring": "string is of course a py string", "id": "f11686:c3:m1"}
{"signature": "def cok(self):", "body": "if type(self) in (UNDEFINED_TYPE, NULL_TYPE):<EOL><INDENT>raise MakeError('<STR_LIT>',<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Check object coercible", "id": "f11687:m10"}
{"signature": "def pad(num, n=<NUM_LIT:2>, sign=False):", "body": "s = unicode(abs(num))<EOL>if len(s) < n:<EOL><INDENT>s = '<STR_LIT:0>' * (n - len(s)) + s<EOL><DEDENT>if not sign:<EOL><INDENT>return s<EOL><DEDENT>if num >= <NUM_LIT:0>:<EOL><INDENT>return '<STR_LIT:+>' + s<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT:->' + s<EOL><DEDENT>", "docstring": "returns n digit string representation of the num", "id": "f11697:m10"}
{"signature": "def emit(self, op_code, *args):", "body": "self.tape.append(OP_CODES[op_code](*args))<EOL>", "docstring": "Adds op_code with specified args to tape", "id": "f11703:c0:m2"}
{"signature": "def compile(self, start_loc=<NUM_LIT:0>):", "body": "self.label_locs = {} if self.label_locs is None else self.label_locs<EOL>loc = start_loc<EOL>while loc < len(self.tape):<EOL><INDENT>if type(self.tape[loc]) == LABEL:<EOL><INDENT>self.label_locs[self.tape[loc].num] = loc<EOL>del self.tape[loc]<EOL>continue<EOL><DEDENT>loc += <NUM_LIT:1><EOL><DEDENT>self.compiled = True<EOL>", "docstring": "Records locations of labels and compiles the code", "id": "f11703:c0:m3"}
{"signature": "def to_arr(this):", "body": "return [this.get(str(e)) for e in xrange(len(this))]<EOL>", "docstring": "Returns Python array from Js array", "id": "f11710:m0"}
{"signature": "def MakeError(name, message):", "body": "return JsToPyException(ERRORS[name](Js(message)))<EOL>", "docstring": "Returns PyJsException with PyJsError inside", "id": "f11719:m1"}
{"signature": "def put(self, prop, val, op=None):  ", "body": "if self.Class == '<STR_LIT>' or self.Class == '<STR_LIT>':<EOL><INDENT>raise MakeError('<STR_LIT>',<EOL>'<STR_LIT>')<EOL><DEDENT>if not isinstance(prop, basestring):<EOL><INDENT>prop = prop.to_string().value<EOL><DEDENT>if NUMPY_AVAILABLE and prop.isdigit():<EOL><INDENT>if self.Class == '<STR_LIT>':<EOL><INDENT>val = Js(numpy.int8(val.to_number().value))<EOL><DEDENT>elif self.Class == '<STR_LIT>':<EOL><INDENT>val = Js(numpy.uint8(val.to_number().value))<EOL><DEDENT>elif self.Class == '<STR_LIT>':<EOL><INDENT>if val < Js(numpy.uint8(<NUM_LIT:0>)):<EOL><INDENT>val = Js(numpy.uint8(<NUM_LIT:0>))<EOL><DEDENT>elif val > Js(numpy.uint8(<NUM_LIT:255>)):<EOL><INDENT>val = Js(numpy.uint8(<NUM_LIT:255>))<EOL><DEDENT>else:<EOL><INDENT>val = Js(numpy.uint8(val.to_number().value))<EOL><DEDENT><DEDENT>elif self.Class == '<STR_LIT>':<EOL><INDENT>val = Js(numpy.int16(val.to_number().value))<EOL><DEDENT>elif self.Class == '<STR_LIT>':<EOL><INDENT>val = Js(numpy.uint16(val.to_number().value))<EOL><DEDENT>elif self.Class == '<STR_LIT>':<EOL><INDENT>val = Js(numpy.int32(val.to_number().value))<EOL><DEDENT>elif self.Class == '<STR_LIT>':<EOL><INDENT>val = Js(numpy.uint32(val.to_number().value))<EOL><DEDENT>elif self.Class == '<STR_LIT>':<EOL><INDENT>val = Js(numpy.float32(val.to_number().value))<EOL><DEDENT>elif self.Class == '<STR_LIT>':<EOL><INDENT>val = Js(numpy.float64(val.to_number().value))<EOL><DEDENT>if isinstance(self.buff, numpy.ndarray):<EOL><INDENT>self.buff[int(prop)] = int(val.to_number().value)<EOL><DEDENT><DEDENT>if op is not None:<EOL><INDENT>val = getattr(self.get(prop), OP_METHODS[op])(val)<EOL><DEDENT>if not self.can_put(prop):<EOL><INDENT>return val<EOL><DEDENT>own_desc = self.get_own_property(prop)<EOL>if is_data_descriptor(own_desc):<EOL><INDENT>if self.Class in [<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'<EOL>]:<EOL><INDENT>self.define_own_property(prop, {'<STR_LIT:value>': val})<EOL><DEDENT>else:<EOL><INDENT>self.own[prop]['<STR_LIT:value>'] = val<EOL><DEDENT>return val<EOL><DEDENT>desc = self.get_property(prop)<EOL>if is_accessor_descriptor(desc):<EOL><INDENT>desc['<STR_LIT>'].call(self, (val, ))<EOL><DEDENT>else:<EOL><INDENT>new = {<EOL>'<STR_LIT:value>': val,<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT>': True<EOL>}<EOL>if self.Class in [<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'<EOL>]:<EOL><INDENT>self.define_own_property(prop, new)<EOL><DEDENT>else:<EOL><INDENT>self.own[prop] = new<EOL><DEDENT><DEDENT>return val<EOL>", "docstring": "Just like in js: self.prop op= val\n           for example when op is '+' it will be self.prop+=val\n           op can be either None for simple assignment or one of:\n           * / % + - << >> & ^ |", "id": "f11719:c0:m12"}
{"signature": "def _set_name(self, name):", "body": "if self.own.get('<STR_LIT:name>'):<EOL><INDENT>self.func_name = name<EOL>self.own['<STR_LIT:name>']['<STR_LIT:value>'] = Js(name)<EOL><DEDENT>", "docstring": "name is py type", "id": "f11719:c7:m1"}
{"signature": "def callprop(self, prop, *args):", "body": "if not isinstance(prop, basestring):<EOL><INDENT>prop = prop.to_string().value<EOL><DEDENT>cand = self.get(prop)<EOL>if not cand.is_callable():<EOL><INDENT>raise MakeError('<STR_LIT>',<EOL>'<STR_LIT>' % cand.typeof())<EOL><DEDENT>return cand.call(self, args)<EOL>", "docstring": "Call a property prop as a method (this will be self).\n\n        NOTE: dont pass this and arguments here, these will be added\n        automatically!", "id": "f11719:c0:m70"}
{"signature": "def instanceof(self, other):", "body": "if not hasattr(other, '<STR_LIT>'):<EOL><INDENT>return false<EOL><DEDENT>return other.has_instance(self)<EOL>", "docstring": "checks if self is instance of other", "id": "f11719:c0:m62"}
{"signature": "def call(self, this, args=()):", "body": "if not hasattr(args, '<STR_LIT>'):  <EOL><INDENT>args = (args, )<EOL><DEDENT>args = tuple(Js(e) for e in args)  <EOL>arguments = PyJsArguments(<EOL>args, self)  <EOL>arglen = self.argcount  <EOL>if len(args) > arglen:<EOL><INDENT>args = args[<NUM_LIT:0>:arglen]<EOL><DEDENT>elif len(args) < arglen:<EOL><INDENT>args += (undefined, ) * (arglen - len(args))<EOL><DEDENT>args += this, arguments  <EOL>try:<EOL><INDENT>return Js(self.code(*args))<EOL><DEDENT>except NotImplementedError:<EOL><INDENT>raise<EOL><DEDENT>except RuntimeError as e:  <EOL><INDENT>raise MakeError(<EOL>'<STR_LIT>', e.message if<EOL>not isinstance(e, NotImplementedError) else '<STR_LIT>')<EOL><DEDENT>", "docstring": "Calls this function and returns a result\n        (converted to PyJs type so func can return python types)\n\n        this must be a PyJs object and args must be a python tuple of PyJs objects.\n\n        arguments object is passed automatically and will be equal to Js(args)\n        (tuple converted to arguments object).You dont need to worry about number\n        of arguments you provide if you supply less then missing ones will be set\n        to undefined (but not present in arguments object).\n        And if you supply too much then excess will not be passed\n        (but they will be present in arguments object).", "id": "f11719:c7:m3"}
{"signature": "def PyJsStrictNeq(a, b):", "body": "return PyJsStrictEq(a, b).neg()<EOL>", "docstring": "a!==b", "id": "f11719:m12"}
{"signature": "def to_python(self):", "body": "return to_python(self)<EOL>", "docstring": "returns equivalent python object.\n         for example if this object is javascript array then this method will return equivalent python array", "id": "f11719:c0:m71"}
{"signature": "def Js(val, Clamped=False):", "body": "if isinstance(val, PyJs):<EOL><INDENT>return val<EOL><DEDENT>elif val is None:<EOL><INDENT>return undefined<EOL><DEDENT>elif isinstance(val, basestring):<EOL><INDENT>return PyJsString(val, StringPrototype)<EOL><DEDENT>elif isinstance(val, bool):<EOL><INDENT>return true if val else false<EOL><DEDENT>elif isinstance(val, float) or isinstance(val, int) or isinstance(<EOL>val, long) or (NUMPY_AVAILABLE and isinstance(<EOL>val,<EOL>(numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,<EOL>numpy.int32, numpy.uint32, numpy.float32, numpy.float64))):<EOL><INDENT>if val in NUM_BANK:<EOL><INDENT>return NUM_BANK[val]<EOL><DEDENT>return PyJsNumber(float(val), NumberPrototype)<EOL><DEDENT>elif isinstance(val, FunctionType):<EOL><INDENT>return PyJsFunction(val, FunctionPrototype)<EOL><DEDENT>elif isinstance(val, dict):  <EOL><INDENT>temp = PyJsObject({}, ObjectPrototype)<EOL>for k, v in six.iteritems(val):<EOL><INDENT>temp.put(Js(k), Js(v))<EOL><DEDENT>return temp<EOL><DEDENT>elif isinstance(val, (list, tuple)):  <EOL><INDENT>return PyJsArray(val, ArrayPrototype)<EOL><DEDENT>elif isinstance(val, JsObjectWrapper):<EOL><INDENT>return val.__dict__['<STR_LIT>']<EOL><DEDENT>elif NUMPY_AVAILABLE and isinstance(val, numpy.ndarray):<EOL><INDENT>if val.dtype == numpy.int8:<EOL><INDENT>return PyJsInt8Array(val, Int8ArrayPrototype)<EOL><DEDENT>elif val.dtype == numpy.uint8 and not Clamped:<EOL><INDENT>return PyJsUint8Array(val, Uint8ArrayPrototype)<EOL><DEDENT>elif val.dtype == numpy.uint8 and Clamped:<EOL><INDENT>return PyJsUint8ClampedArray(val, Uint8ClampedArrayPrototype)<EOL><DEDENT>elif val.dtype == numpy.int16:<EOL><INDENT>return PyJsInt16Array(val, Int16ArrayPrototype)<EOL><DEDENT>elif val.dtype == numpy.uint16:<EOL><INDENT>return PyJsUint16Array(val, Uint16ArrayPrototype)<EOL><DEDENT>elif val.dtype == numpy.int32:<EOL><INDENT>return PyJsInt32Array(val, Int32ArrayPrototype)<EOL><DEDENT>elif val.dtype == numpy.uint32:<EOL><INDENT>return PyJsUint16Array(val, Uint32ArrayPrototype)<EOL><DEDENT>elif val.dtype == numpy.float32:<EOL><INDENT>return PyJsFloat32Array(val, Float32ArrayPrototype)<EOL><DEDENT>elif val.dtype == numpy.float64:<EOL><INDENT>return PyJsFloat64Array(val, Float64ArrayPrototype)<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>return py_wrap(val)<EOL><DEDENT>", "docstring": "Converts Py type to PyJs type", "id": "f11719:m6"}
{"signature": "def abstract_equality_comparison(self, other):", "body": "tx, ty = self.TYPE, other.TYPE<EOL>if tx == ty:<EOL><INDENT>if tx == '<STR_LIT>' or tx == '<STR_LIT>':<EOL><INDENT>return true<EOL><DEDENT>if tx == '<STR_LIT>' or tx == '<STR_LIT>' or tx == '<STR_LIT>':<EOL><INDENT>return Js(self.value == other.value)<EOL><DEDENT>return Js(self is other)  <EOL><DEDENT>elif (tx == '<STR_LIT>' and ty == '<STR_LIT>') or (ty == '<STR_LIT>'<EOL>and tx == '<STR_LIT>'):<EOL><INDENT>return true<EOL><DEDENT>elif tx == '<STR_LIT>' and ty == '<STR_LIT>':<EOL><INDENT>return self.abstract_equality_comparison(other.to_number())<EOL><DEDENT>elif tx == '<STR_LIT>' and ty == '<STR_LIT>':<EOL><INDENT>return self.to_number().abstract_equality_comparison(other)<EOL><DEDENT>elif tx == '<STR_LIT>':<EOL><INDENT>return self.to_number().abstract_equality_comparison(other)<EOL><DEDENT>elif ty == '<STR_LIT>':<EOL><INDENT>return self.abstract_equality_comparison(other.to_number())<EOL><DEDENT>elif (tx == '<STR_LIT>' or tx == '<STR_LIT>') and other.is_object():<EOL><INDENT>return self.abstract_equality_comparison(other.to_primitive())<EOL><DEDENT>elif (ty == '<STR_LIT>' or ty == '<STR_LIT>') and self.is_object():<EOL><INDENT>return self.to_primitive().abstract_equality_comparison(other)<EOL><DEDENT>else:<EOL><INDENT>return false<EOL><DEDENT>", "docstring": "returns the result of JS == compare.\n           result is PyJs type: bool", "id": "f11719:c0:m59"}
{"signature": "def __init__(self, scope, closure=None):", "body": "self.prototype = closure<EOL>if closure is None:<EOL><INDENT>self.own = {}<EOL>for k, v in six.iteritems(scope):<EOL><INDENT>self.define_own_property(<EOL>k, {<EOL>'<STR_LIT:value>': v,<EOL>'<STR_LIT>': False,<EOL>'<STR_LIT>': False,<EOL>'<STR_LIT>': False<EOL>})<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.own = scope<EOL><DEDENT>", "docstring": "Doc", "id": "f11719:c2:m0"}
{"signature": "def translate_js(js, HEADER=DEFAULT_HEADER, use_compilation_plan=False):", "body": "if use_compilation_plan and not '<STR_LIT>' in js and not '<STR_LIT>' in js:<EOL><INDENT>return translate_js_with_compilation_plan(js, HEADER=HEADER)<EOL><DEDENT>parser = pyjsparser.PyJsParser()<EOL>parsed = parser.parse(js)  <EOL>translating_nodes.clean_stacks()<EOL>return HEADER + translating_nodes.trans(<EOL>parsed)<EOL>", "docstring": "js has to be a javascript source code.\n       returns equivalent python code.", "id": "f11745:m1"}
{"signature": "def translate_js_with_compilation_plan(js, HEADER=DEFAULT_HEADER):", "body": "match_increaser_str, match_increaser_num, compilation_plan = get_compilation_plan(<EOL>js)<EOL>cp_hash = hashlib.md5(compilation_plan.encode('<STR_LIT:utf-8>')).digest()<EOL>try:<EOL><INDENT>python_code = cache[cp_hash]['<STR_LIT>']<EOL><DEDENT>except:<EOL><INDENT>parser = pyjsparser.PyJsParser()<EOL>parsed = parser.parse(compilation_plan)  <EOL>translating_nodes.clean_stacks()<EOL>python_code = translating_nodes.trans(<EOL>parsed)  <EOL>cache[cp_hash] = {<EOL>'<STR_LIT>': compilation_plan,<EOL>'<STR_LIT>': python_code,<EOL>}<EOL><DEDENT>python_code = match_increaser_str.wrap_up(python_code)<EOL>python_code = match_increaser_num.wrap_up(python_code)<EOL>return HEADER + python_code<EOL>", "docstring": "js has to be a javascript source code.\n       returns equivalent python code.\n\n       compile plans only work with the following restrictions:\n       - only enabled for oneliner expressions\n       - when there are comments in the js code string substitution is disabled\n       - when there nested escaped quotes string substitution is disabled, so\n\n       cacheable:\n       Q1 == 1 && name == 'harry'\n\n       not cacheable:\n       Q1 == 1 && name == 'harry' // some comment\n\n       not cacheable:\n       Q1 == 1 && name == 'o\\'Reilly'\n\n       not cacheable:\n       Q1 == 1 && name /* some comment */ == 'o\\'Reilly'", "id": "f11745:m3"}
{"signature": "def dbg(x):", "body": "return '<STR_LIT>'<EOL>", "docstring": "does nothing, legacy dummy function", "id": "f11745:m0"}
{"signature": "def to_key(literal_or_identifier):", "body": "if literal_or_identifier['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>return literal_or_identifier['<STR_LIT:name>']<EOL><DEDENT>elif literal_or_identifier['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>k = literal_or_identifier['<STR_LIT:value>']<EOL>if isinstance(k, float):<EOL><INDENT>return unicode(float_repr(k))<EOL><DEDENT>elif '<STR_LIT>' in literal_or_identifier:<EOL><INDENT>return compose_regex(k)<EOL><DEDENT>elif isinstance(k, bool):<EOL><INDENT>return '<STR_LIT:true>' if k else '<STR_LIT:false>'<EOL><DEDENT>elif k is None:<EOL><INDENT>return '<STR_LIT:null>'<EOL><DEDENT>else:<EOL><INDENT>return unicode(k)<EOL><DEDENT><DEDENT>", "docstring": "returns string representation of this object", "id": "f11746:m1"}
{"signature": "def limited(func):", "body": "def f(standard=False, **args):<EOL><INDENT>insert_pos = len(<EOL>inline_stack.names<EOL>)  <EOL>res = func(**args)<EOL>if len(res) > LINE_LEN_LIMIT:<EOL><INDENT>name = inline_stack.require('<STR_LIT>')<EOL>inline_stack.names.pop()<EOL>inline_stack.names.insert(insert_pos, name)<EOL>res = '<STR_LIT>' % (name, res)<EOL>inline_stack.define(name, res)<EOL>return name + '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return res<EOL><DEDENT><DEDENT>f.__dict__['<STR_LIT>'] = func<EOL>return f<EOL>", "docstring": "Decorator limiting resulting line length in order to avoid python parser stack overflow -\n      If expression longer than LINE_LEN_LIMIT characters then it will be moved to upper line\n     USE ONLY ON EXPRESSIONS!!!", "id": "f11746:m3"}
{"signature": "def trans(ele, standard=False):", "body": "try:<EOL><INDENT>node = globals().get(ele['<STR_LIT:type>'])<EOL>if not node:<EOL><INDENT>raise NotImplementedError('<STR_LIT>' % ele['<STR_LIT:type>'])<EOL><DEDENT>if standard:<EOL><INDENT>node = node.__dict__[<EOL>'<STR_LIT>'] if '<STR_LIT>' in node.__dict__ else node<EOL><DEDENT>return node(**ele)<EOL><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>", "docstring": "Translates esprima syntax tree to python by delegating to appropriate translating node", "id": "f11746:m2"}
{"signature": "def argsplit(args, sep='<STR_LIT:U+002C>'):", "body": "parsed_len = <NUM_LIT:0><EOL>last = <NUM_LIT:0><EOL>splits = []<EOL>for e in bracket_split(args, brackets=['<STR_LIT>', '<STR_LIT>', '<STR_LIT:{}>']):<EOL><INDENT>if e[<NUM_LIT:0>] not in ('<STR_LIT:(>', '<STR_LIT:[>', '<STR_LIT:{>'):<EOL><INDENT>for i, char in enumerate(e):<EOL><INDENT>if char == sep:<EOL><INDENT>splits.append(args[last:parsed_len + i])<EOL>last = parsed_len + i + <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>parsed_len += len(e)<EOL><DEDENT>splits.append(args[last:])<EOL>return splits<EOL>", "docstring": "used to split JS args (it is not that simple as it seems because\n       sep can be inside brackets).\n\n       pass args *without* brackets!\n\n       Used also to parse array and object elements, and more", "id": "f11749:m9"}
{"signature": "def index(request):", "body": "return {}<EOL>", "docstring": "Base view to load our template", "id": "f11763:m0"}
{"signature": "def simple_route(config, name, url, fn):", "body": "config.add_route(name, url)<EOL>config.add_view(fn, route_name=name,<EOL>renderer=\"<STR_LIT>\" % name)<EOL>", "docstring": "Function to simplify creating routes in pyramid \n        Takes the pyramid configuration, name of the route, url, and view\n        function", "id": "f11764:m0"}
{"signature": "def includeme(config):", "body": "config.scan('<STR_LIT>')<EOL>", "docstring": "Pyramid configuration", "id": "f11765:m0"}
{"signature": "def leave(self, room):", "body": "self.socket.rooms.remove(self._get_room_name(room))<EOL>", "docstring": "Lets a user leave a room on a specific Namespace.", "id": "f11770:c0:m2"}
{"signature": "def join(self, room):", "body": "self.socket.rooms.add(self._get_room_name(room))<EOL>", "docstring": "Lets a user join a room on a specific Namespace.", "id": "f11770:c0:m1"}
{"signature": "def simple_route(config, name, url, fn):", "body": "config.add_route(name, url)<EOL>config.add_view(fn, route_name=name,<EOL>renderer=\"<STR_LIT>\" % name)<EOL>", "docstring": "Function to simplify creating routes in pyramid\nTakes the pyramid configuration, name of the route, url, and view\nfunction.", "id": "f11771:m0"}
{"signature": "def includeme(config):", "body": "config.scan('<STR_LIT>')<EOL>", "docstring": "Pyramid configuration", "id": "f11772:m0"}
{"signature": "def index(request):", "body": "return {}<EOL>", "docstring": "Base view to load our template", "id": "f11781:m0"}
{"signature": "def index(request):", "body": "return {}<EOL>", "docstring": "Base view to load our template", "id": "f11788:m0"}
{"signature": "def rooms(request, template=\"<STR_LIT>\"):", "body": "context = {\"<STR_LIT>\": ChatRoom.objects.all()}<EOL>return render(request, template, context)<EOL>", "docstring": "Homepage - lists all rooms.", "id": "f11797:m0"}
{"signature": "def room(request, slug, template=\"<STR_LIT>\"):", "body": "context = {\"<STR_LIT>\": get_object_or_404(ChatRoom, slug=slug)}<EOL>return render(request, template, context)<EOL>", "docstring": "Show a room.", "id": "f11797:m1"}
{"signature": "def leave(self, room):", "body": "self.session['<STR_LIT>'].remove(self._get_room_name(room))<EOL>", "docstring": "Lets a user leave a room on a specific Namespace.", "id": "f11805:c0:m2"}
{"signature": "def broadcast_event_not_me(self, event, *args):", "body": "pkt = dict(type=\"<STR_LIT>\",<EOL>name=event,<EOL>args=args,<EOL>endpoint=self.ns_name)<EOL>for sessid, socket in six.iteritems(self.socket.server.sockets):<EOL><INDENT>if socket is not self.socket:<EOL><INDENT>socket.send_packet(pkt)<EOL><DEDENT><DEDENT>", "docstring": "This is sent to all in the sockets in this particular Namespace,\nexcept itself.", "id": "f11805:c1:m1"}
{"signature": "def encode(data, json_dumps=default_json_dumps):", "body": "payload = '<STR_LIT>'<EOL>msg = str(MSG_TYPES[data['<STR_LIT:type>']])<EOL>if msg in ['<STR_LIT:0>', '<STR_LIT:1>']:<EOL><INDENT>msg += '<STR_LIT>' + data['<STR_LIT>']<EOL>if '<STR_LIT>' in data and data['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>msg += '<STR_LIT::>' + data['<STR_LIT>']<EOL><DEDENT><DEDENT>elif msg == '<STR_LIT:2>':<EOL><INDENT>msg += '<STR_LIT>'<EOL><DEDENT>elif msg in ['<STR_LIT:3>', '<STR_LIT:4>', '<STR_LIT:5>']:<EOL><INDENT>if msg == '<STR_LIT:3>':<EOL><INDENT>payload = data['<STR_LIT:data>']<EOL><DEDENT>if msg == '<STR_LIT:4>':<EOL><INDENT>payload = json_dumps(data['<STR_LIT:data>'])<EOL><DEDENT>if msg == '<STR_LIT:5>':<EOL><INDENT>d = {}<EOL>d['<STR_LIT:name>'] = data['<STR_LIT:name>']<EOL>if '<STR_LIT:args>' in data and data['<STR_LIT:args>'] != []:<EOL><INDENT>d['<STR_LIT:args>'] = data['<STR_LIT:args>']<EOL><DEDENT>payload = json_dumps(d)<EOL><DEDENT>if '<STR_LIT:id>' in data:<EOL><INDENT>msg += '<STR_LIT::>' + str(data['<STR_LIT:id>'])<EOL>if data['<STR_LIT>'] == '<STR_LIT:data>':<EOL><INDENT>msg += '<STR_LIT:+>'<EOL><DEDENT>msg += '<STR_LIT::>'<EOL><DEDENT>else:<EOL><INDENT>msg += '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' not in data:<EOL><INDENT>data['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if payload != '<STR_LIT>':<EOL><INDENT>msg += data['<STR_LIT>'] + '<STR_LIT::>' + payload<EOL><DEDENT>else:<EOL><INDENT>msg += data['<STR_LIT>']<EOL><DEDENT><DEDENT>elif msg == '<STR_LIT>':<EOL><INDENT>msg += '<STR_LIT>' + data.get('<STR_LIT>', '<STR_LIT>') + '<STR_LIT::>' + str(data['<STR_LIT>'])<EOL>if '<STR_LIT:args>' in data and data['<STR_LIT:args>'] != []:<EOL><INDENT>msg += '<STR_LIT:+>' + json_dumps(data['<STR_LIT:args>'])<EOL><DEDENT><DEDENT>elif msg == '<STR_LIT>':<EOL><INDENT>msg += '<STR_LIT>'<EOL>if '<STR_LIT>' in data and data['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>msg += str(ERROR_REASONS[data['<STR_LIT>']])<EOL><DEDENT>if '<STR_LIT>' in data and data['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>msg += '<STR_LIT:+>' + str(ERROR_ADVICES[data['<STR_LIT>']])<EOL><DEDENT>msg += data['<STR_LIT>']<EOL><DEDENT>elif msg == '<STR_LIT>':<EOL><INDENT>msg += '<STR_LIT>'<EOL><DEDENT>return msg<EOL>", "docstring": "Encode an attribute dict into a byte string.", "id": "f11806:m0"}
{"signature": "def __hasitem__(self, key):", "body": "return key in self.active_ns<EOL>", "docstring": "Verifies if the namespace is active (was initialized)", "id": "f11809:c0:m12"}
{"signature": "def _get_next_msgid(self):", "body": "self.ack_counter += <NUM_LIT:1><EOL>return self.ack_counter<EOL>", "docstring": "This retrieves the next value for the 'id' field when sending\n        an 'event' or 'message' or 'json' that asks the remote client\n        to 'ack' back, so that we trigger the local callback.", "id": "f11809:c0:m7"}
{"signature": "def _set_namespaces(self, namespaces):", "body": "self.namespaces = namespaces<EOL>", "docstring": "This is a mapping (dict) of the different '/namespaces' to their\n        BaseNamespace object derivative.\n\n        This is called by socketio_manage().", "id": "f11809:c0:m1"}
{"signature": "def _set_request(self, request):", "body": "self.request = request<EOL>", "docstring": "Saves the request object for future use by the different Namespaces.\n\n        This is called by socketio_manage().", "id": "f11809:c0:m2"}
{"signature": "def _set_environ(self, environ):", "body": "self.environ = environ<EOL>", "docstring": "Save the WSGI environ, for future use.\n\n        This is called by socketio_manage().", "id": "f11809:c0:m3"}
{"signature": "def remove_namespace(self, namespace):", "body": "if namespace in self.active_ns:<EOL><INDENT>del self.active_ns[namespace]<EOL><DEDENT>if len(self.active_ns) == <NUM_LIT:0> and self.connected:<EOL><INDENT>self.kill(detach=True)<EOL><DEDENT>", "docstring": "This removes a Namespace object from the socket.\n\n        This is usually called by\n        :meth:`~socketio.namespace.BaseNamespace.disconnect`.", "id": "f11809:c0:m25"}
{"signature": "def get_client_msg(self, **kwargs):", "body": "return self.client_queue.get(**kwargs)<EOL>", "docstring": "Grab a message to send it to the browser", "id": "f11809:c0:m20"}
{"signature": "def __getitem__(self, key):", "body": "return self.active_ns[key]<EOL>", "docstring": "This will get the nested Namespace using its '/chat' reference.\n\n        Using this, you can go from one Namespace to the other (to emit, add\n        ACLs, etc..) with:\n\n          adminnamespace.socket['/chat'].add_acl_method('kick-ban')", "id": "f11809:c0:m11"}
{"signature": "def send_packet(self, pkt):", "body": "self.put_client_msg(packet.encode(pkt, self.json_dumps))<EOL>", "docstring": "Low-level interface to queue a packet on the wire (encoded as wire\n        protocol", "id": "f11809:c0:m26"}
{"signature": "@property<EOL><INDENT>def connected(self):<DEDENT>", "body": "return self.state == self.STATE_CONNECTED<EOL>", "docstring": "Returns whether the state is CONNECTED or not.", "id": "f11809:c0:m13"}
{"signature": "def put_client_msg(self, msg):", "body": "self.client_queue.put_nowait(msg)<EOL>", "docstring": "Writes to the client's pipe, to end up in the browser", "id": "f11809:c0:m19"}
{"signature": "def get_multiple_client_msgs(self, **kwargs):", "body": "client_queue = self.client_queue<EOL>msgs = [client_queue.get(**kwargs)]<EOL>while client_queue.qsize():<EOL><INDENT>msgs.append(client_queue.get())<EOL><DEDENT>return msgs<EOL>", "docstring": "Get multiple messages, in case we're going through the various\n        XHR-polling methods, on which we can pack more than one message if the\n        rate is high, and encode the payload for the HTTP channel.", "id": "f11809:c0:m22"}
{"signature": "def _spawn_watcher(self):", "body": "job = gevent.spawn(self._watcher)<EOL>return job<EOL>", "docstring": "This one is not waited for with joinall(socket.jobs), as it\n        is an external watcher, to clean up when everything is done.", "id": "f11809:c0:m31"}
{"signature": "def serve_paste(app, global_conf, **kw):", "body": "serve(app, **kw)<EOL>return <NUM_LIT:0><EOL>", "docstring": "pserve / paster serve / waitress replacement / integration\n\n    You can pass as parameters:\n\n    transports = websockets, xhr-multipart, xhr-longpolling, etc...\n    policy_server = True", "id": "f11810:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "self.sockets = {}<EOL>if '<STR_LIT>' in kwargs:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>self.resource = kwargs.pop('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.resource = kwargs.pop('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>self.transports = kwargs.pop('<STR_LIT>', None)<EOL>if kwargs.pop('<STR_LIT>', True):<EOL><INDENT>try:<EOL><INDENT>address = args[<NUM_LIT:0>][<NUM_LIT:0>]<EOL><DEDENT>except TypeError:<EOL><INDENT>try:<EOL><INDENT>address = args[<NUM_LIT:0>].address[<NUM_LIT:0>]<EOL><DEDENT>except AttributeError:<EOL><INDENT>address = args[<NUM_LIT:0>].cfg_addr[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>policylistener = kwargs.pop('<STR_LIT>', (address, <NUM_LIT>))<EOL>self.policy_server = FlashPolicyServer(policylistener)<EOL><DEDENT>else:<EOL><INDENT>self.policy_server = None<EOL><DEDENT>self.config = {<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>}<EOL>for f in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if f in kwargs:<EOL><INDENT>self.config[f] = int(kwargs.pop(f))<EOL><DEDENT><DEDENT>if not '<STR_LIT>' in kwargs:<EOL><INDENT>kwargs['<STR_LIT>'] = SocketIOHandler<EOL><DEDENT>if not '<STR_LIT>' in kwargs:<EOL><INDENT>self.ws_handler_class = WebSocketHandler<EOL><DEDENT>else:<EOL><INDENT>self.ws_handler_class = kwargs.pop('<STR_LIT>')<EOL><DEDENT>log_file = kwargs.pop('<STR_LIT>', None)<EOL>if log_file:<EOL><INDENT>kwargs['<STR_LIT>'] = open(log_file, '<STR_LIT:a>')<EOL><DEDENT>super(SocketIOServer, self).__init__(*args, **kwargs)<EOL>", "docstring": "This is just like the standard WSGIServer __init__, except with a\n        few additional ``kwargs``:\n\n        :param resource: The URL which has to be identified as a\n            socket.io request.  Defaults to the /socket.io/ URL.\n\n        :param transports: Optional list of transports to allow. List of\n            strings, each string should be one of\n            handler.SocketIOHandler.handler_types.\n\n        :param policy_server: Boolean describing whether or not to use the\n            Flash policy server.  Default True.\n\n        :param policy_listener: A tuple containing (host, port) for the\n            policy server.  This is optional and used only if policy server\n            is set to true.  The default value is 0.0.0.0:843\n\n        :param heartbeat_interval: int The timeout for the server, we\n            should receive a heartbeat from the client within this\n            interval. This should be less than the\n            ``heartbeat_timeout``.\n\n        :param heartbeat_timeout: int The timeout for the client when\n            it should send a new heartbeat to the server. This value\n            is sent to the client after a successful handshake.\n\n        :param close_timeout: int The timeout for the client, when it\n            closes the connection it still X amounts of seconds to do\n            re open of the connection. This value is sent to the\n            client after a successful handshake.\n\n        :param log_file: str The file in which you want the PyWSGI\n            server to write its access log.  If not specified, it\n            is sent to `stderr` (with gevent 0.13).", "id": "f11810:c0:m0"}
{"signature": "def get_socket(self, sessid='<STR_LIT>'):", "body": "socket = self.sockets.get(sessid)<EOL>if sessid and not socket:<EOL><INDENT>return None  <EOL><DEDENT>if socket is None:<EOL><INDENT>socket = Socket(self, self.config)<EOL>self.sockets[socket.sessid] = socket<EOL><DEDENT>else:<EOL><INDENT>socket.incr_hits()<EOL><DEDENT>return socket<EOL>", "docstring": "Return an existing or new client Socket.", "id": "f11810:c0:m4"}
{"signature": "def get_messages_payload(self, socket, timeout=None):", "body": "try:<EOL><INDENT>msgs = socket.get_multiple_client_msgs(timeout=timeout)<EOL>data = self.encode_payload(msgs)<EOL><DEDENT>except Empty:<EOL><INDENT>data = \"<STR_LIT>\"<EOL><DEDENT>return data<EOL>", "docstring": "This will fetch the messages from the Socket's queue, and if\n        there are many messes, pack multiple messages in one payload and return", "id": "f11811:c1:m5"}
{"signature": "def encode_payload(self, messages):", "body": "if not messages or messages[<NUM_LIT:0>] is None:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if len(messages) == <NUM_LIT:1>:<EOL><INDENT>return messages[<NUM_LIT:0>].encode('<STR_LIT:utf-8>')<EOL><DEDENT>payload = u'<STR_LIT>'.join([(u'<STR_LIT>' % (len(p), p))<EOL>for p in messages if p is not None])<EOL>return payload.encode('<STR_LIT:utf-8>')<EOL>", "docstring": "Encode list of messages. Expects messages to be unicode.\n\n        ``messages`` - List of raw messages to encode, if necessary", "id": "f11811:c1:m6"}
{"signature": "def __init__(self, handler, config, **kwargs):", "body": "self.content_type = (\"<STR_LIT:Content-Type>\", \"<STR_LIT>\")<EOL>self.headers = [<EOL>(\"<STR_LIT>\", \"<STR_LIT:*>\"),<EOL>(\"<STR_LIT>\", \"<STR_LIT:true>\"),<EOL>(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>]<EOL>self.handler = handler<EOL>self.config = config<EOL>", "docstring": "Base transport class.\n\n        :param config: dict Should contain the config keys, like\n          ``heartbeat_interval``, ``heartbeat_timeout`` and\n          ``close_timeout``.", "id": "f11811:c0:m0"}
{"signature": "def socketio_manage(environ, namespaces, request=None, error_handler=None,<EOL>json_loads=None, json_dumps=None):", "body": "socket = environ['<STR_LIT>']<EOL>socket._set_environ(environ)<EOL>socket._set_namespaces(namespaces)<EOL>if request:<EOL><INDENT>socket._set_request(request)<EOL><DEDENT>if error_handler:<EOL><INDENT>socket._set_error_handler(error_handler)<EOL><DEDENT>if json_loads:<EOL><INDENT>socket._set_json_loads(json_loads)<EOL><DEDENT>if json_dumps:<EOL><INDENT>socket._set_json_dumps(json_dumps)<EOL><DEDENT>receiver_loop = socket._spawn_receiver_loop()<EOL>gevent.joinall([receiver_loop])<EOL>return<EOL>", "docstring": "Main SocketIO management function, call from within your Framework of\n    choice's view.\n\n    The ``environ`` variable is the WSGI ``environ``.  It is used to extract\n    Socket object from the underlying server (as the 'socketio' key), and will\n    be attached to both the ``Socket`` and ``Namespace`` objects.\n\n    The ``namespaces`` parameter is a dictionary of the namespace string\n    representation as key, and the BaseNamespace namespace class descendant as\n    a value.  The empty string ('') namespace is the global namespace.  You can\n    use Socket.GLOBAL_NS to be more explicit. So it would look like:\n\n    .. code-block:: python\n\n      namespaces={'': GlobalNamespace,\n                  '/chat': ChatNamespace}\n\n    The ``request`` object is not required, but will probably be useful to pass\n    framework-specific things into your Socket and Namespace functions. It will\n    simply be attached to the Socket and Namespace object (accessible through\n    ``self.request`` in both cases), and it is not accessed in any case by the\n    ``gevent-socketio`` library.\n\n    Pass in an ``error_handler`` if you want to override the default\n    error_handler (which is :func:`socketio.virtsocket.default_error_handler`.\n    The callable you pass in should have the same signature as the default\n    error handler.\n\n    The ``json_loads`` and ``json_dumps`` are overrides for the default\n    ``json.loads`` and ``json.dumps`` function calls.  Override these at\n    the top-most level here.  This will affect all sockets created by this\n    socketio manager, and all namespaces inside.\n\n    This function will block the current \"view\" or \"controller\" in your\n    framework to do the recv/send on the socket, and dispatch incoming messages\n    to your namespaces.\n\n    This is a simple example using Pyramid:\n\n    .. code-block:: python\n\n      def my_view(request):\n          socketio_manage(request.environ, {'': GlobalNamespace}, request)\n\n    NOTE: You must understand that this function is going to be called\n    *only once* per socket opening, *even though* you are using a long\n    polling mechanism.  The subsequent calls (for long polling) will\n    be hooked directly at the server-level, to interact with the\n    active ``Socket`` instance.  This means you will *not* get access\n    to the future ``request`` or ``environ`` objects.  This is of\n    particular importance regarding sessions (like Beaker).  The\n    session will be opened once at the opening of the Socket, and not\n    closed until the socket is closed.  You are responsible for\n    opening and closing the cookie-based session yourself if you want\n    to keep its data in sync with the rest of your GET/POST calls.", "id": "f11812:m0"}
{"signature": "def handle_one_response(self):", "body": "path = self.environ.get('<STR_LIT>')<EOL>if not path.lstrip('<STR_LIT:/>').startswith(self.server.resource + '<STR_LIT:/>'):<EOL><INDENT>return super(SocketIOHandler, self).handle_one_response()<EOL><DEDENT>self.status = None<EOL>self.headers_sent = False<EOL>self.result = None<EOL>self.response_length = <NUM_LIT:0><EOL>self.response_use_chunked = False<EOL>request_method = self.environ.get(\"<STR_LIT>\")<EOL>request_tokens = self.RE_REQUEST_URL.match(path)<EOL>handshake_tokens = self.RE_HANDSHAKE_URL.match(path)<EOL>disconnect_tokens = self.RE_DISCONNECT_URL.match(path)<EOL>if handshake_tokens:<EOL><INDENT>return self._do_handshake(handshake_tokens.groupdict())<EOL><DEDENT>elif disconnect_tokens:<EOL><INDENT>tokens = disconnect_tokens.groupdict()<EOL><DEDENT>elif request_tokens:<EOL><INDENT>tokens = request_tokens.groupdict()<EOL><DEDENT>else:<EOL><INDENT>return super(SocketIOHandler, self).handle_one_response()<EOL><DEDENT>sessid = tokens[\"<STR_LIT>\"]<EOL>socket = self.server.get_socket(sessid)<EOL>if not socket:<EOL><INDENT>self.handle_bad_request()<EOL>return []  <EOL><DEDENT>if self.environ['<STR_LIT>'].startswith('<STR_LIT>'):<EOL><INDENT>socket.disconnect()<EOL>self.handle_disconnect_request()<EOL>return []<EOL><DEDENT>transport = self.handler_types.get(tokens[\"<STR_LIT>\"])<EOL>old_class = None<EOL>if issubclass(transport, (transports.WebsocketTransport,<EOL>transports.FlashSocketTransport)):<EOL><INDENT>old_class = self.__class__<EOL>self.__class__ = self.server.ws_handler_class<EOL>self.prevent_wsgi_call = True  <EOL>self.handle_one_response()  <EOL><DEDENT>self.environ['<STR_LIT>'] = socket<EOL>self.transport = transport(self, self.config)<EOL>self.transport.do_exchange(socket, request_method)<EOL>if not socket.connection_established:<EOL><INDENT>socket.connection_established = True<EOL>socket.state = socket.STATE_CONNECTED<EOL>socket._spawn_heartbeat()<EOL>socket._spawn_watcher()<EOL>try:<EOL><INDENT>if socket.wsgi_app_greenlet is None:<EOL><INDENT>start_response = lambda status, headers, exc=None: None<EOL>socket.wsgi_app_greenlet = gevent.spawn(self.application,<EOL>self.environ,<EOL>start_response)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>self.handle_error(*sys.exc_info())<EOL><DEDENT><DEDENT>if tokens['<STR_LIT>'] in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>gevent.joinall(socket.jobs)<EOL><DEDENT>if old_class:<EOL><INDENT>self.__class__ = old_class<EOL><DEDENT>if hasattr(self, '<STR_LIT>') and self.websocket:<EOL><INDENT>if hasattr(self.websocket, '<STR_LIT>'):<EOL><INDENT>del self.websocket.environ<EOL><DEDENT>del self.websocket<EOL><DEDENT>if self.environ:<EOL><INDENT>del self.environ<EOL><DEDENT>", "docstring": "This function deals with *ONE INCOMING REQUEST* from the web.\n\n        It will wire and exchange message to the queues for long-polling\n        methods, otherwise, will stay alive for websockets.", "id": "f11815:c0:m5"}
{"signature": "def initialize(self):", "body": "pass<EOL>", "docstring": "This is called right after ``__init__``, on the initial\n        creation of a namespace so you may handle any setup job you\n        need.\n\n        Namespaces are created only when some packets arrive that ask\n        for the namespace.  They are not created altogether when a new\n        :class:`~socketio.virtsocket.Socket` connection is established,\n        so you can have many many namespaces assigned (when calling\n        :func:`~socketio.socketio_manage`) without clogging the\n        memory.\n\n        If you override this method, you probably want to initialize\n        the variables you're going to use in the events handled by this\n        namespace, setup ACLs, etc..\n\n        This method is called on all base classes following the _`method resolution order <http://docs.python.org/library/stdtypes.html?highlight=mro#class.__mro__>`\n        so you don't need to call super() to initialize the mixins or\n        other derived classes.", "id": "f11816:c0:m11"}
{"signature": "def error(self, error_name, error_message, msg_id=None, quiet=False):", "body": "self.socket.error(error_name, error_message, endpoint=self.ns_name,<EOL>msg_id=msg_id, quiet=quiet)<EOL>", "docstring": "Use this to use the configured ``error_handler`` yield an\n        error message to your application.\n\n        :param error_name: is a short string, to associate messages to recovery\n                           methods\n        :param error_message: is some human-readable text, describing the error\n        :param msg_id: is used to associate with a request\n        :param quiet: specific to error_handlers. The default doesn't send a\n                      message to the user, but shows a debug message on the\n                      developer console.", "id": "f11816:c0:m17"}
{"signature": "def call_method(self, method_name, packet, *args):", "body": "method = getattr(self, method_name, None)<EOL>if method is None:<EOL><INDENT>self.error('<STR_LIT>',<EOL>'<STR_LIT>' % method_name)<EOL>return<EOL><DEDENT>specs = inspect.getargspec(method)<EOL>func_args = specs.args<EOL>if not len(func_args) or func_args[<NUM_LIT:0>] != '<STR_LIT>':<EOL><INDENT>self.error(\"<STR_LIT>\",<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if hasattr(self, '<STR_LIT>'):<EOL><INDENT>method = self.exception_handler_decorator(method)<EOL><DEDENT>if len(func_args) == <NUM_LIT:2> and func_args[<NUM_LIT:1>] == '<STR_LIT>':<EOL><INDENT>return method(packet)<EOL><DEDENT>else:<EOL><INDENT>return method(*args)<EOL><DEDENT>", "docstring": "This function is used to implement the two behaviors on dispatched\n        ``on_*()`` and ``recv_*()`` method calls.\n\n        Those are the two behaviors:\n\n        * If there is only one parameter on the dispatched method and\n          it is named ``packet``, then pass in the packet dict as the\n          sole parameter.\n\n        * Otherwise, pass in the arguments as specified by the\n          different ``recv_*()`` methods args specs, or the\n          :meth:`process_event` documentation.\n\n        This method will also consider the\n        ``exception_handler_decorator``.  See Namespace documentation\n        for details and examples.", "id": "f11816:c0:m10"}
{"signature": "def del_acl_method(self, method_name):", "body": "if self.allowed_methods is None:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>)<EOL><DEDENT>self.allowed_methods.remove(method_name)<EOL>", "docstring": "ACL system: ensure the user will not have access to that method.", "id": "f11816:c0:m3"}
{"signature": "def add_acl_method(self, method_name):", "body": "if isinstance(self.allowed_methods, set):<EOL><INDENT>self.allowed_methods.add(method_name)<EOL><DEDENT>else:<EOL><INDENT>self.allowed_methods = set([method_name])<EOL><DEDENT>", "docstring": "ACL system: make the method_name accessible to the current socket", "id": "f11816:c0:m2"}
{"signature": "def send(self, message, json=False, callback=None):", "body": "pkt = dict(type=\"<STR_LIT:message>\", data=message, endpoint=self.ns_name)<EOL>if json:<EOL><INDENT>pkt['<STR_LIT:type>'] = \"<STR_LIT>\"<EOL><DEDENT>if callback:<EOL><INDENT>pkt['<STR_LIT>'] = True<EOL>pkt['<STR_LIT:id>'] = msgid = self.socket._get_next_msgid()<EOL>self.socket._save_ack_callback(msgid, callback)<EOL><DEDENT>self.socket.send_packet(pkt)<EOL>", "docstring": "Use send to send a simple string message.\n\n        If ``json`` is True, the message will be encoded as a JSON object\n        on the wire, and decoded on the other side.\n\n        This is mostly for backwards compatibility.  ``emit()`` is more fun.\n\n        :param callback: This is a callback function that will be\n                         called automatically by the client upon\n                         reception.  It does not verify that the\n                         listener over there was completed with\n                         success.  It just tells you that the browser\n                         got a hold of the packet.\n        :type callback: callable", "id": "f11816:c0:m18"}
{"signature": "def reset_acl(self):", "body": "self.allowed_methods = self.get_initial_acl()<EOL>", "docstring": "Resets ACL to its initial value (calling\n        :meth:`get_initial_acl`` and applying that again).", "id": "f11816:c0:m6"}
{"signature": "def get_initial_acl(self):", "body": "return None<EOL>", "docstring": "ACL system: If you define this function, you must return\n        all the 'event' names that you want your User (the established\n        virtual Socket) to have access to.\n\n        If you do not define this function, the user will have free\n        access to all of the ``on_*()`` and ``recv_*()`` functions,\n        etc.. methods.\n\n        Return something like: ``set(['recv_connect', 'on_public_method'])``\n\n        You can later modify this list dynamically (inside\n        ``on_connect()`` for example) using:\n\n        .. code-block:: python\n\n           self.add_acl_method('on_secure_method')\n\n        ``self.request`` is available in here, if you're already ready to\n        do some auth. check.\n\n        The ACLs are checked by the :meth:`process_packet` and/or\n        :meth:`process_event` default implementations, before calling\n        the class's methods.\n\n        **Beware**, returning ``None`` leaves the namespace completely\n        accessible.\n\n        The methods that are open are stored in the ``allowed_methods``\n        attribute of the ``Namespace`` instance.", "id": "f11816:c0:m5"}
{"signature": "def process_packet(self, packet):", "body": "packet_type = packet['<STR_LIT:type>']<EOL>if packet_type == '<STR_LIT>':<EOL><INDENT>return self.process_event(packet)<EOL><DEDENT>elif packet_type == '<STR_LIT:message>':<EOL><INDENT>return self.call_method_with_acl('<STR_LIT>', packet,<EOL>packet['<STR_LIT:data>'])<EOL><DEDENT>elif packet_type == '<STR_LIT>':<EOL><INDENT>return self.call_method_with_acl('<STR_LIT>', packet,<EOL>packet['<STR_LIT:data>'])<EOL><DEDENT>elif packet_type == '<STR_LIT>':<EOL><INDENT>self.socket.send_packet(packet)<EOL>return self.call_method_with_acl('<STR_LIT>', packet)<EOL><DEDENT>elif packet_type == '<STR_LIT:error>':<EOL><INDENT>return self.call_method_with_acl('<STR_LIT>', packet)<EOL><DEDENT>elif packet_type == '<STR_LIT>':<EOL><INDENT>callback = self.socket._pop_ack_callback(packet['<STR_LIT>'])<EOL>if not callback:<EOL><INDENT>print(\"<STR_LIT>\" % packet['<STR_LIT>'])<EOL>return<EOL><DEDENT>return callback(*(packet['<STR_LIT:args>']))<EOL><DEDENT>elif packet_type == '<STR_LIT>':<EOL><INDENT>return self.call_method_with_acl('<STR_LIT>', packet)<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\", packet)<EOL><DEDENT>", "docstring": "If you override this, NONE of the functions in this class\n        will be called.  It is responsible for dispatching to\n        :meth:`process_event` (which in turn calls ``on_*()`` and\n        ``recv_*()`` methods).\n\n        If the packet arrived here, it is because it belongs to this endpoint.\n\n        For each packet arriving, the only possible path of execution, that is,\n        the only methods that *can* be called are the following:\n\n        * recv_connect()\n        * recv_message()\n        * recv_json()\n        * recv_error()\n        * recv_disconnect()\n        * on_*()", "id": "f11816:c0:m7"}
{"signature": "def call_method_with_acl(self, method_name, packet, *args):", "body": "if not self.is_method_allowed(method_name):<EOL><INDENT>self.error('<STR_LIT>',<EOL>'<STR_LIT>' % method_name)<EOL>return<EOL><DEDENT>return self.call_method(method_name, packet, *args)<EOL>", "docstring": "You should always use this function to call the methods,\n        as it checks if the user is allowed according to the ACLs.\n\n        If you override :meth:`process_packet` or\n        :meth:`process_event`, you should definitely want to use this\n        instead of ``getattr(self, 'my_method')()``", "id": "f11816:c0:m9"}
{"signature": "def disconnect(self, silent=False):", "body": "if not silent:<EOL><INDENT>packet = {\"<STR_LIT:type>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": self.ns_name}<EOL>self.socket.send_packet(packet)<EOL><DEDENT>try:<EOL><INDENT>self.socket.remove_namespace(self.ns_name)<EOL><DEDENT>finally:<EOL><INDENT>self.kill_local_jobs()<EOL><DEDENT>", "docstring": "Send a 'disconnect' packet, so that the user knows it has been\n        disconnected (booted actually).  This will trigger an onDisconnect()\n        call on the client side.\n\n        Over here, we will kill all ``spawn``ed processes and remove the\n        namespace from the Socket object.\n\n        :param silent: do not actually send the packet (if they asked for a\n                       disconnect for example), but just kill all jobs spawned\n                       by this Namespace, and remove it from the Socket.", "id": "f11816:c0:m21"}
{"signature": "def spawn(self, fn, *args, **kwargs):", "body": "<EOL>if hasattr(self, '<STR_LIT>'):<EOL><INDENT>fn = self.exception_handler_decorator(fn)<EOL><DEDENT>new = gevent.spawn(fn, *args, **kwargs)<EOL>self.jobs.append(new)<EOL>return new<EOL>", "docstring": "Spawn a new process, attached to this Namespace.\n\n        It will be monitored by the \"watcher\" process in the Socket. If the\n        socket disconnects, all these greenlets are going to be killed, after\n        calling BaseNamespace.disconnect()\n\n        This method uses the ``exception_handler_decorator``.  See\n        Namespace documentation for more information.", "id": "f11816:c0:m20"}
{"signature": "def kill_local_jobs(self):", "body": "gevent.killall(self.jobs)<EOL>self.jobs = []<EOL>", "docstring": "Kills all the jobs spawned with BaseNamespace.spawn() on a namespace\n        object.\n\n        This will be called automatically if the ``watcher`` process detects\n        that the Socket was closed.", "id": "f11816:c0:m22"}
{"signature": "def process_event(self, packet):", "body": "args = packet['<STR_LIT:args>']<EOL>name = packet['<STR_LIT:name>']<EOL>if not allowed_event_name_regex.match(name):<EOL><INDENT>self.error(\"<STR_LIT>\",<EOL>\"<STR_LIT>\")<EOL>return<EOL><DEDENT>method_name = '<STR_LIT>' + name.replace('<STR_LIT:U+0020>', '<STR_LIT:_>')<EOL>return self.call_method_with_acl(method_name, packet, *args)<EOL>", "docstring": "This function dispatches ``event`` messages to the correct\n        functions. You should override this method only if you are not\n        satisfied with the automatic dispatching to\n        ``on_``-prefixed methods.  You could then implement your own dispatch.\n        See the source code for inspiration.\n\n        There are two ways to deal with callbacks from the client side\n        (meaning, the browser has a callback waiting for data that this\n        server will be sending back):\n\n        The first one is simply to return an object.  If the incoming\n        packet requested has an 'ack' field set, meaning the browser is\n        waiting for callback data, it will automatically be packaged\n        and sent, associated with the 'ackId' from the browser. The\n        return value must be a *sequence* of elements, that will be\n        mapped to the positional parameters of the callback function\n        on the browser side.\n\n        If you want to *know* that you're dealing with a packet\n        that requires a return value, you can do those things manually\n        by inspecting the ``ack`` and ``id`` keys from the ``packet``\n        object.  Your callback will behave specially if the name of\n        the argument to your method is ``packet``.  It will fill it\n        with the unprocessed ``packet`` object for your inspection,\n        like this:\n\n        .. code-block:: python\n\n          def on_my_callback(self, packet):\n              if 'ack' in packet:\n                  self.emit('go_back', 'param1', id=packet['id'])", "id": "f11816:c0:m8"}
{"signature": "def report(self, linenumber, filename, severity, message, rulename, char):", "body": "if self._print_filename is not None:<EOL><INDENT>print(\"<STR_LIT>\" + self._print_filename)<EOL>self._print_filename = None<EOL><DEDENT>if severity in (WARNING, ERROR):<EOL><INDENT>self.counts[severity] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>self.counts[\"<STR_LIT>\"] += <NUM_LIT:1><EOL><DEDENT>print(self.args.format.format(linenumber=linenumber, filename=filename,<EOL>severity=severity, message=message.encode('<STR_LIT:utf-8>'),<EOL>rulename=rulename, char=char))<EOL>", "docstring": "Report a rule violation", "id": "f11825:c0:m14"}
{"signature": "def report(self, obj, message, linenum, char_offset=<NUM_LIT:0>):", "body": "self.controller.report(linenumber=linenum, filename=obj.path,<EOL>severity=self.severity, message=message,<EOL>rulename = self.__class__.__name__,<EOL>char=char_offset)<EOL>", "docstring": "Report an error or warning", "id": "f11826:c0:m3"}
{"signature": "@property<EOL><INDENT>def variables(self):<DEDENT>", "body": "for table in self.tables:<EOL><INDENT>if isinstance(table, VariableTable):<EOL><INDENT>for statement in table.rows:<EOL><INDENT>if statement[<NUM_LIT:0>] != \"<STR_LIT>\":<EOL><INDENT>yield statement<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Generator which returns all of the statements in all of the variables tables", "id": "f11834:c2:m2"}
{"signature": "@property<EOL><INDENT>def settings(self):<DEDENT>", "body": "for table in self.tables:<EOL><INDENT>if isinstance(table, SettingTable):<EOL><INDENT>for statement in table.statements:<EOL><INDENT>yield statement<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Generator which returns all of the statements in all of the settings tables", "id": "f11834:c2:m1"}
{"signature": "def walk(self, *types):", "body": "requested = types if len(types) > <NUM_LIT:0> else [SuiteFile, ResourceFile, SuiteFolder, Testcase, Keyword]<EOL>for thing in self.robot_files:<EOL><INDENT>if thing.__class__ in requested:<EOL><INDENT>yield thing<EOL><DEDENT>if isinstance(thing, SuiteFolder):<EOL><INDENT>for child in thing.walk():<EOL><INDENT>if child.__class__ in requested:<EOL><INDENT>yield child<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for child in thing.walk(*types):<EOL><INDENT>yield child<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Iterator which visits all suites and suite files,\nyielding test cases and keywords", "id": "f11834:c0:m1"}
{"signature": "@property<EOL><INDENT>def type(self):<DEDENT>", "body": "robot_tables = [table for table in self.tables if not isinstance(table, UnknownTable)]<EOL>if len(robot_tables) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>for table in self.tables:<EOL><INDENT>if isinstance(table, TestcaseTable):<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return \"<STR_LIT>\"<EOL>", "docstring": "Return 'suite' or 'resource' or None\n\n        This will return 'suite' if a testcase table is found;\n        It will return 'resource' if at least one robot table\n        is found. If no tables are found it will return None", "id": "f11834:c1:m4"}
{"signature": "@property<EOL><INDENT>def settings(self):<DEDENT>", "body": "for table in self.tables:<EOL><INDENT>if isinstance(table, SettingTable):<EOL><INDENT>for statement in table.statements:<EOL><INDENT>yield statement<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Generator which returns all of the statements in all of the settings tables", "id": "f11834:c3:m1"}
{"signature": "def dump(self):", "body": "for table in self.tables:<EOL><INDENT>print(\"<STR_LIT>\" % table.name)<EOL>table.dump()<EOL><DEDENT>", "docstring": "Regurgitate the tables and rows", "id": "f11834:c1:m7"}
{"signature": "@property<EOL><INDENT>def keywords(self):<DEDENT>", "body": "for table in self.tables:<EOL><INDENT>if isinstance(table, KeywordTable):<EOL><INDENT>for keyword in table.keywords:<EOL><INDENT>yield keyword<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Generator which returns all keywords in the suite", "id": "f11834:c1:m5"}
{"signature": "@property<EOL><INDENT>def statements(self):<DEDENT>", "body": "<EOL>if len(self.rows) == <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>current_statement = Statement(self.rows[<NUM_LIT:0>])<EOL>current_statement.startline = self.rows[<NUM_LIT:0>].linenumber<EOL>current_statement.endline = self.rows[<NUM_LIT:0>].linenumber<EOL>statements = []<EOL>for row in self.rows[<NUM_LIT:1>:]:<EOL><INDENT>if len(row) > <NUM_LIT:0> and row[<NUM_LIT:0>] == \"<STR_LIT>\":<EOL><INDENT>current_statement += row[<NUM_LIT:1>:]<EOL>current_statement.endline = row.linenumber<EOL><DEDENT>else:<EOL><INDENT>if len(current_statement) > <NUM_LIT:0>:<EOL><INDENT>statements.append(current_statement)<EOL><DEDENT>current_statement = Statement(row)<EOL>current_statement.startline = row.linenumber<EOL>current_statement.endline = row.linenumber<EOL><DEDENT><DEDENT>if len(current_statement) > <NUM_LIT:0>:<EOL><INDENT>statements.append(current_statement)<EOL><DEDENT>while (len(statements[-<NUM_LIT:1>]) == <NUM_LIT:0> or <EOL>((len(statements[-<NUM_LIT:1>]) == <NUM_LIT:1>) and len(statements[-<NUM_LIT:1>][<NUM_LIT:0>]) == <NUM_LIT:0>)):<EOL><INDENT>statements.pop()<EOL><DEDENT>return statements<EOL>", "docstring": "Return a list of statements\n\n        This is done by joining together any rows that\n        have continuations", "id": "f11835:c1:m0"}
{"signature": "@property<EOL><INDENT>def is_templated(self):<DEDENT>", "body": "for table in self.parent.tables:<EOL><INDENT>if isinstance(table, SettingTable):<EOL><INDENT>for row in table.rows:<EOL><INDENT>if row[<NUM_LIT:0>].lower() == \"<STR_LIT>\":<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Return True if the test is part of a suite that uses a Test Template", "id": "f11836:c0:m1"}
{"signature": "def is_comment(self):", "body": "for cell in self[:]:<EOL><INDENT>if cell == \"<STR_LIT>\":<EOL><INDENT>continue<EOL><DEDENT>if cell.lstrip().startswith(\"<STR_LIT:#>\"):<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Return True if the first non-empty cell starts with \"#", "id": "f11837:c3:m1"}
{"signature": "def reset(self, settings=None):", "body": "self.__init__(settings=settings if settings is not None else SETTINGS)<EOL>", "docstring": "Reinitializes the Algolia engine and its client.\n        :param settings: settings to use instead of the default django.conf.settings.algolia", "id": "f11854:c2:m13"}
{"signature": "def __pre_delete_receiver(self, instance, **kwargs):", "body": "logger.debug('<STR_LIT>', instance.__class__)<EOL>self.delete_record(instance)<EOL>", "docstring": "Signal handler for when a registered model has been deleted.", "id": "f11854:c2:m15"}
{"signature": "def is_registered(self, model):", "body": "return model in self.__registered_models<EOL>", "docstring": "Checks whether the given models is registered with Algolia engine", "id": "f11854:c2:m1"}
{"signature": "def reindex_all(self, model, batch_size=<NUM_LIT:1000>):", "body": "adapter = self.get_adapter(model)<EOL>return adapter.reindex_all(batch_size)<EOL>", "docstring": "Reindex all the records.\n\nBy default, this method use Model.objects.all() but you can implement\na method `get_queryset` in your subclass. This can be used to optimize\nthe performance (for example with select_related or prefetch_related).", "id": "f11854:c2:m12"}
{"signature": "def get_adapter_from_instance(self, instance):", "body": "model = instance.__class__<EOL>return self.get_adapter(model)<EOL>", "docstring": "Returns the adapter associated with the given instance.", "id": "f11854:c2:m6"}
{"signature": "def handle(self, *args, **options):", "body": "batch_size = options.get('<STR_LIT>', None)<EOL>if not batch_size:<EOL><INDENT>batch_size = <NUM_LIT:1000><EOL><DEDENT>self.stdout.write('<STR_LIT>')<EOL>for model in get_registered_model():<EOL><INDENT>if options.get('<STR_LIT>', None) and not (model.__name__ in<EOL>options['<STR_LIT>']):<EOL><INDENT>continue<EOL><DEDENT>counts = reindex_all(model, batch_size=batch_size)<EOL>self.stdout.write('<STR_LIT>'.format(model.__name__, counts))<EOL><DEDENT>", "docstring": "Run the management command.", "id": "f11857:c0:m1"}
{"signature": "def handle(self, *args, **options):", "body": "self.stdout.write('<STR_LIT>')<EOL>for model in get_registered_model():<EOL><INDENT>if options.get('<STR_LIT>', None) and not (model.__name__ in<EOL>options['<STR_LIT>']):<EOL><INDENT>continue<EOL><DEDENT>clear_index(model)<EOL>self.stdout.write('<STR_LIT>'.format(model.__name__))<EOL><DEDENT>", "docstring": "Run the management command.", "id": "f11859:c0:m1"}
{"signature": "def raw_search(self, query='<STR_LIT>', params=None):", "body": "if params is None:<EOL><INDENT>params = {}<EOL><DEDENT>try:<EOL><INDENT>return self.__index.search(query, params)<EOL><DEDENT>except AlgoliaException as e:<EOL><INDENT>if DEBUG:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>logger.warning('<STR_LIT>', self.index_name, e)<EOL><DEDENT><DEDENT>", "docstring": "Performs a search query and returns the parsed JSON.", "id": "f11860:c1:m10"}
{"signature": "def clear_index(self):", "body": "try:<EOL><INDENT>self.__index.clear_index()<EOL>logger.info('<STR_LIT>', self.index_name)<EOL><DEDENT>except AlgoliaException as e:<EOL><INDENT>if DEBUG:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>logger.warning('<STR_LIT>', self.model, e)<EOL><DEDENT><DEDENT>", "docstring": "Clears the index.", "id": "f11860:c1:m13"}
{"signature": "def delete_record(self, instance):", "body": "objectID = self.objectID(instance)<EOL>try:<EOL><INDENT>self.__index.delete_object(objectID)<EOL>logger.info('<STR_LIT>', objectID, self.model)<EOL><DEDENT>except AlgoliaException as e:<EOL><INDENT>if DEBUG:<EOL><INDENT>raise e<EOL><DEDENT>else:<EOL><INDENT>logger.warning('<STR_LIT>', objectID,<EOL>self.model, e)<EOL><DEDENT><DEDENT>", "docstring": "Deletes the record.", "id": "f11860:c1:m8"}
{"signature": "@staticmethod<EOL><INDENT>def _validate_geolocation(geolocation):<DEDENT>", "body": "if set(geolocation) != {'<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>raise AlgoliaIndexError(<EOL>'<STR_LIT>'.format(<EOL>geolocation<EOL>)<EOL>)<EOL><DEDENT>", "docstring": "Make sure we have the proper geolocation format.", "id": "f11860:c1:m2"}
{"signature": "def get_raw_record(self, instance, update_fields=None):", "body": "tmp = {'<STR_LIT>': self.objectID(instance)}<EOL>if update_fields:<EOL><INDENT>if isinstance(update_fields, str):<EOL><INDENT>update_fields = (update_fields,)<EOL><DEDENT>for elt in update_fields:<EOL><INDENT>key = self.__translate_fields.get(elt, None)<EOL>if key:<EOL><INDENT>tmp[key] = self.__named_fields[key](instance)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for key, value in self.__named_fields.items():<EOL><INDENT>tmp[key] = value(instance)<EOL><DEDENT>if self.geo_field:<EOL><INDENT>loc = self.geo_field(instance)<EOL>if isinstance(loc, tuple):<EOL><INDENT>tmp['<STR_LIT>'] = {'<STR_LIT>': loc[<NUM_LIT:0>], '<STR_LIT>': loc[<NUM_LIT:1>]}<EOL><DEDENT>elif isinstance(loc, dict):<EOL><INDENT>self._validate_geolocation(loc)<EOL>tmp['<STR_LIT>'] = loc<EOL><DEDENT>elif isinstance(loc, list):<EOL><INDENT>[self._validate_geolocation(geo) for geo in loc]<EOL>tmp['<STR_LIT>'] = loc<EOL><DEDENT><DEDENT>if self.tags:<EOL><INDENT>if callable(self.tags):<EOL><INDENT>tmp['<STR_LIT>'] = self.tags(instance)<EOL><DEDENT>if not isinstance(tmp['<STR_LIT>'], list):<EOL><INDENT>tmp['<STR_LIT>'] = list(tmp['<STR_LIT>'])<EOL><DEDENT><DEDENT><DEDENT>logger.debug('<STR_LIT>', tmp['<STR_LIT>'], self.model)<EOL>return tmp<EOL>", "docstring": "Gets the raw record.\n\nIf `update_fields` is set, the raw record will be build with only\nthe objectID and the given fields. Also, `_geoloc` and `_tags` will\nnot be included.", "id": "f11860:c1:m3"}
{"signature": "def _has_should_index(self):", "body": "return self.should_index is not None<EOL>", "docstring": "Return True if this AlgoliaIndex has a should_index method or attribute", "id": "f11860:c1:m4"}
{"signature": "def _should_really_index(self, instance):", "body": "if self._should_index_is_method:<EOL><INDENT>is_method = inspect.ismethod(self.should_index)<EOL>try:<EOL><INDENT>count_args = len(inspect.signature(self.should_index).parameters)<EOL><DEDENT>except AttributeError:<EOL><INDENT>count_args = len(inspect.getargspec(self.should_index).args)<EOL><DEDENT>if is_method or count_args is <NUM_LIT:1>:<EOL><INDENT>return self.should_index(instance)<EOL><DEDENT>else:<EOL><INDENT>return self.should_index()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>attr_type = type(self.should_index)<EOL>if attr_type is DeferredAttribute:<EOL><INDENT>attr_value = self.should_index.__get__(instance, None)<EOL><DEDENT>elif attr_type is str:<EOL><INDENT>attr_value = getattr(instance, self.should_index)<EOL><DEDENT>elif attr_type is property:<EOL><INDENT>attr_value = self.should_index.__get__(instance)<EOL><DEDENT>else:<EOL><INDENT>raise AlgoliaIndexError('<STR_LIT>'.format(<EOL>self.should_index))<EOL><DEDENT>if type(attr_value) is not bool:<EOL><INDENT>raise AlgoliaIndexError(\"<STR_LIT>\" % (<EOL>instance.__class__.__name__, self.should_index))<EOL><DEDENT>return attr_value<EOL><DEDENT>", "docstring": "Return True if according to should_index the object should be indexed.", "id": "f11860:c1:m6"}
{"signature": "def update_records(self, qs, batch_size=<NUM_LIT:1000>, **kwargs):", "body": "tmp = {}<EOL>for key, value in kwargs.items():<EOL><INDENT>name = self.__translate_fields.get(key, None)<EOL>if name:<EOL><INDENT>tmp[name] = value<EOL><DEDENT><DEDENT>batch = []<EOL>objectsIDs = qs.only(self.custom_objectID).values_list(<EOL>self.custom_objectID, flat=True)<EOL>for elt in objectsIDs:<EOL><INDENT>tmp['<STR_LIT>'] = elt<EOL>batch.append(dict(tmp))<EOL>if len(batch) >= batch_size:<EOL><INDENT>self.__index.partial_update_objects(batch)<EOL>batch = []<EOL><DEDENT><DEDENT>if len(batch) > <NUM_LIT:0>:<EOL><INDENT>self.__index.partial_update_objects(batch)<EOL><DEDENT>", "docstring": "Updates multiple records.\n\nThis method is optimized for speed. It takes a QuerySet and the same\narguments as QuerySet.update(). Optionnaly, you can specify the size\nof the batch send to Algolia with batch_size (default to 1000).\n\n>>> from algoliasearch_django import update_records\n>>> qs = MyModel.objects.filter(myField=False)\n>>> update_records(MyModel, qs, myField=True)\n>>> qs.update(myField=True)", "id": "f11860:c1:m9"}
{"signature": "@classmethod<EOL><INDENT>def auto_client(cls, host, server, ca_path=None, ca_contents=None,<EOL>cert_path=None, cert_contents=None, key_path=None,<EOL>key_contents=None):<DEDENT>", "body": "<EOL>client = {<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": True<EOL>}<EOL>port = server.get('<STR_LIT:port>') or <NUM_LIT><EOL>client['<STR_LIT>'] = [{'<STR_LIT:host>': host, '<STR_LIT:port>': port}]<EOL>if server.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>client['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>client['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' in server or '<STR_LIT>' in server:<EOL><INDENT>client['<STR_LIT>'] = True<EOL><DEDENT>if '<STR_LIT>' not in server or not server['<STR_LIT>']:<EOL><INDENT>client['<STR_LIT>'] = False<EOL><DEDENT>ns_cert_type = {None: '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'}<EOL>client['<STR_LIT>'] = ns_cert_type[server.get('<STR_LIT>')]<EOL>remote_cert_tls = {None: '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'}<EOL>client['<STR_LIT>'] = remote_cert_tls[server.get('<STR_LIT>')]<EOL>copy_keys = ['<STR_LIT:name>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:key>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT:user>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT:float>', '<STR_LIT>', '<STR_LIT>']<EOL>for key in copy_keys:<EOL><INDENT>if key in server:<EOL><INDENT>client[key] = server[key]<EOL><DEDENT><DEDENT>files = cls._auto_client_files(client, ca_path, ca_contents,<EOL>cert_path, cert_contents,<EOL>key_path, key_contents)<EOL>return {<EOL>'<STR_LIT>': [client],<EOL>'<STR_LIT>': files<EOL>}<EOL>", "docstring": "Returns a configuration dictionary representing an OpenVPN client configuration\nthat is compatible with the passed server configuration.\n\n:param host: remote VPN server\n:param server: dictionary representing a single OpenVPN server configuration\n:param ca_path: optional string representing path to CA, will consequently add\n                a file in the resulting configuration dictionary\n:param ca_contents: optional string representing contents of CA file\n:param cert_path: optional string representing path to certificate, will consequently add\n                a file in the resulting configuration dictionary\n:param cert_contents: optional string representing contents of cert file\n:param key_path: optional string representing path to key, will consequently add\n                a file in the resulting configuration dictionary\n:param key_contents: optional string representing contents of key file\n:returns: dictionary representing a single OpenVPN client configuration", "id": "f11891:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def _auto_client_files(cls, client, ca_path=None, ca_contents=None, cert_path=None,<EOL>cert_contents=None, key_path=None, key_contents=None):<DEDENT>", "body": "files = []<EOL>if ca_path and ca_contents:<EOL><INDENT>client['<STR_LIT>'] = ca_path<EOL>files.append(dict(path=ca_path,<EOL>contents=ca_contents,<EOL>mode=DEFAULT_FILE_MODE))<EOL><DEDENT>if cert_path and cert_contents:<EOL><INDENT>client['<STR_LIT>'] = cert_path<EOL>files.append(dict(path=cert_path,<EOL>contents=cert_contents,<EOL>mode=DEFAULT_FILE_MODE))<EOL><DEDENT>if key_path and key_contents:<EOL><INDENT>client['<STR_LIT:key>'] = key_path<EOL>files.append(dict(path=key_path,<EOL>contents=key_contents,<EOL>mode=DEFAULT_FILE_MODE,))<EOL><DEDENT>return files<EOL>", "docstring": "returns a list of NetJSON extra files for automatically generated clients\nproduces side effects in ``client`` dictionary", "id": "f11891:c0:m2"}
{"signature": "def _generate_contents(self, tar):", "body": "text = self.render(files=False)<EOL>vpn_instances = vpn_pattern.split(text)<EOL>if '<STR_LIT>' in vpn_instances:<EOL><INDENT>vpn_instances.remove('<STR_LIT>')<EOL><DEDENT>for vpn in vpn_instances:<EOL><INDENT>lines = vpn.split('<STR_LIT:\\n>')<EOL>vpn_name = lines[<NUM_LIT:0>]<EOL>text_contents = '<STR_LIT:\\n>'.join(lines[<NUM_LIT:2>:])<EOL>if text_contents.endswith('<STR_LIT>'):<EOL><INDENT>text_contents = text_contents[<NUM_LIT:0>:-<NUM_LIT:1>]<EOL><DEDENT>self._add_file(tar=tar,<EOL>name='<STR_LIT>'.format(vpn_name, config_suffix),<EOL>contents=text_contents)<EOL><DEDENT>", "docstring": "Adds configuration files to tarfile instance.\n\n:param tar: tarfile instance\n:returns: None", "id": "f11891:c0:m0"}
{"signature": "def cleanup(self, output):", "body": "return output<EOL>", "docstring": "Performs cleanup of output (indentation, new lines)\n\n:param output: string representation of the client configuration", "id": "f11892:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def get_name(cls):<DEDENT>", "body": "return str(cls.__name__).replace('<STR_LIT>', '<STR_LIT>').lower()<EOL>", "docstring": "Returns the name of the render class without its prefix", "id": "f11892:c0:m3"}
{"signature": "def _render_files(self):", "body": "output = '<STR_LIT>'<EOL>files = self.config.get('<STR_LIT>', [])<EOL>if files:<EOL><INDENT>output += '<STR_LIT>'.format(self.FILE_SECTION_DELIMITER)<EOL><DEDENT>for f in files:<EOL><INDENT>mode = f.get('<STR_LIT>', DEFAULT_FILE_MODE)<EOL>file_output = '<STR_LIT>''<STR_LIT>''<STR_LIT>'.format(f['<STR_LIT:path>'], mode, f['<STR_LIT>'])<EOL>output += file_output<EOL><DEDENT>return output<EOL>", "docstring": "Renders additional files specified in ``self.config['files']``", "id": "f11894:c0:m4"}
{"signature": "def to_intermediate(self):", "body": "self.validate()<EOL>self.intermediate_data = OrderedDict()<EOL>for converter_class in self.converters:<EOL><INDENT>if not converter_class.should_run_forward(self.config):<EOL><INDENT>continue<EOL><DEDENT>converter = converter_class(self)<EOL>value = converter.to_intermediate()<EOL>if value and isinstance(value, (tuple, list)):  <EOL><INDENT>value = OrderedDict(value)<EOL><DEDENT>if value:<EOL><INDENT>self.intermediate_data = merge_config(self.intermediate_data,<EOL>value,<EOL>list_identifiers=['<STR_LIT>'])<EOL><DEDENT><DEDENT>", "docstring": "Converts the NetJSON configuration dictionary (self.config)\nto the intermediate data structure (self.intermediate_data) that will\nbe then used by the renderer class to generate the router configuration", "id": "f11894:c0:m13"}
{"signature": "def generate(self):", "body": "tar_bytes = BytesIO()<EOL>tar = tarfile.open(fileobj=tar_bytes, mode='<STR_LIT:w>')<EOL>self._generate_contents(tar)<EOL>self._process_files(tar)<EOL>tar.close()<EOL>tar_bytes.seek(<NUM_LIT:0>)  <EOL>gzip_bytes = BytesIO()<EOL>gz = gzip.GzipFile(fileobj=gzip_bytes, mode='<STR_LIT:wb>', mtime=<NUM_LIT:0>)<EOL>gz.write(tar_bytes.getvalue())<EOL>gz.close()<EOL>gzip_bytes.seek(<NUM_LIT:0>)  <EOL>return gzip_bytes<EOL>", "docstring": "Returns a ``BytesIO`` instance representing an in-memory tar.gz archive\ncontaining the native router configuration.\n\n:returns: in-memory tar.gz archive, instance of ``BytesIO``", "id": "f11894:c0:m8"}
{"signature": "def _process_files(self, tar):", "body": "<EOL>for file_item in self.config.get('<STR_LIT>', []):<EOL><INDENT>path = file_item['<STR_LIT:path>']<EOL>if path.startswith('<STR_LIT:/>'):<EOL><INDENT>path = path[<NUM_LIT:1>:]<EOL><DEDENT>self._add_file(tar=tar,<EOL>name=path,<EOL>contents=file_item['<STR_LIT>'],<EOL>mode=file_item.get('<STR_LIT>', DEFAULT_FILE_MODE))<EOL><DEDENT>", "docstring": "Adds files specified in self.config['files'] to tarfile instance.\n\n:param tar: tarfile instance\n:returns: None", "id": "f11894:c0:m11"}
{"signature": "def _load(self, config):", "body": "if isinstance(config, six.string_types):<EOL><INDENT>try:<EOL><INDENT>config = json.loads(config)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if not isinstance(config, dict):<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>return config<EOL>", "docstring": "Loads config from string or dict", "id": "f11894:c0:m1"}
{"signature": "def _merge_config(self, config, templates):", "body": "if not templates:<EOL><INDENT>return config<EOL><DEDENT>if not isinstance(templates, list):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>result = {}<EOL>config_list = templates + [config]<EOL>for merging in config_list:<EOL><INDENT>result = merge_config(result, self._load(merging), self.list_identifiers)<EOL><DEDENT>return result<EOL>", "docstring": "Merges config with templates", "id": "f11894:c0:m2"}
{"signature": "def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE):", "body": "byte_contents = BytesIO(contents.encode('<STR_LIT:utf8>'))<EOL>info = tarfile.TarInfo(name=name)<EOL>info.size = len(contents)<EOL>info.mtime = <NUM_LIT:0><EOL>info.type = tarfile.REGTYPE<EOL>info.mode = int(mode, <NUM_LIT:8>)  <EOL>tar.addfile(tarinfo=info, fileobj=byte_contents)<EOL>", "docstring": "Adds a single file in tarfile instance.\n\n:param tar: tarfile instance\n:param name: string representing filename or path\n:param contents: string representing file contents\n:param mode: string representing file mode, defaults to 644\n:returns: None", "id": "f11894:c0:m12"}
{"signature": "def render(self, files=True):", "body": "self.validate()<EOL>if self.intermediate_data is None:<EOL><INDENT>self.to_intermediate()<EOL><DEDENT>renderers = getattr(self, '<STR_LIT>', None) or [self.renderer]<EOL>output = '<STR_LIT>'<EOL>for renderer_class in renderers:<EOL><INDENT>renderer = renderer_class(self)<EOL>output += renderer.render()<EOL>del renderer<EOL><DEDENT>if files:<EOL><INDENT>files_output = self._render_files()<EOL>if files_output:<EOL><INDENT>output += files_output.replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT><DEDENT>return output<EOL>", "docstring": "Converts the configuration dictionary into the corresponding configuration format\n\n:param files: whether to include \"additional files\" in the output or not;\n              defaults to ``True``\n:returns: string with output", "id": "f11894:c0:m6"}
{"signature": "def parse(self, native):", "body": "if not hasattr(self, '<STR_LIT>') or not self.parser:<EOL><INDENT>raise NotImplementedError('<STR_LIT>')<EOL><DEDENT>parser = self.parser(native)<EOL>self.intermediate_data = parser.intermediate_data<EOL>del parser<EOL>self.to_netjson()<EOL>", "docstring": "Parses a native configuration and converts\nit to a NetJSON configuration dictionary", "id": "f11894:c0:m14"}
{"signature": "def json(self, validate=True, *args, **kwargs):", "body": "if validate:<EOL><INDENT>self.validate()<EOL><DEDENT>config = deepcopy(self.config)<EOL>config.update({'<STR_LIT:type>': '<STR_LIT>'})<EOL>return json.dumps(config, *args, **kwargs)<EOL>", "docstring": "returns a string formatted as **NetJSON DeviceConfiguration**;\nperforms validation before returning output;\n\n``*args`` and ``*kwargs`` will be passed to ``json.dumps``;\n\n:returns: string", "id": "f11894:c0:m7"}
{"signature": "def to_netjson(self, remove_block=True):", "body": "result = OrderedDict()<EOL>intermediate_data = list(self.intermediate_data[self.intermediate_key])<EOL>for index, block in enumerate(intermediate_data):<EOL><INDENT>if self.should_skip_block(block):<EOL><INDENT>continue<EOL><DEDENT>if remove_block:<EOL><INDENT>self.intermediate_data[self.intermediate_key].remove(block)<EOL><DEDENT>result = self.to_netjson_loop(block, result, index + <NUM_LIT:1>)<EOL><DEDENT>return result<EOL>", "docstring": "Converts the intermediate data structure (``self.intermediate_datra``)\nto a NetJSON configuration dictionary (``self.config``)", "id": "f11895:c0:m8"}
{"signature": "def to_intermediate_loop(self, block, result, index=None):  ", "body": "raise NotImplementedError()<EOL>", "docstring": "Utility method called in the loop of ``to_intermediate``", "id": "f11895:c0:m7"}
{"signature": "def _sanitize_radios(self):", "body": "for radio in self.config.get('<STR_LIT>', []):<EOL><INDENT>radio.setdefault('<STR_LIT>', False)<EOL><DEDENT>", "docstring": "OpenWisp 1.x requires the following explicit entry\nin the radio sections of /uci/wireless.conf:\n    option disabled '0'", "id": "f11897:c0:m1"}
{"signature": "def _add_tc_script(self):", "body": "<EOL>context = dict(tc_options=self.config.get('<STR_LIT>', []))<EOL>contents = self._render_template('<STR_LIT>', context)<EOL>self.config.setdefault('<STR_LIT>', [])  <EOL>self._add_unique_file({<EOL>\"<STR_LIT:path>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": contents,<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>})<EOL>", "docstring": "generates tc_script.sh and adds it to included files", "id": "f11897:c0:m8"}
{"signature": "def _get_install_context(self):", "body": "config = self.config<EOL>l2vpn = []<EOL>for vpn in self.config.get('<STR_LIT>', []):<EOL><INDENT>if vpn.get('<STR_LIT>') != '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>tap = vpn.copy()<EOL>l2vpn.append(tap)<EOL><DEDENT>bridges = []<EOL>for interface in self.config.get('<STR_LIT>', []):<EOL><INDENT>if interface['<STR_LIT:type>'] != '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>bridge = interface.copy()<EOL>if bridge.get('<STR_LIT>'):<EOL><INDENT>bridge['<STR_LIT>'] = interface['<STR_LIT>'][<NUM_LIT:0>].get('<STR_LIT>')<EOL>bridge['<STR_LIT>'] = interface['<STR_LIT>'][<NUM_LIT:0>].get('<STR_LIT:address>')<EOL><DEDENT>bridges.append(bridge)<EOL><DEDENT>cron = False<EOL>for _file in config.get('<STR_LIT>', []):<EOL><INDENT>path = _file['<STR_LIT:path>']<EOL>if path.startswith('<STR_LIT>') or path.startswith('<STR_LIT>'):<EOL><INDENT>cron = True<EOL>break<EOL><DEDENT><DEDENT>return dict(hostname=config['<STR_LIT>']['<STR_LIT>'],  <EOL>l2vpn=l2vpn,<EOL>bridges=bridges,<EOL>radios=config.get('<STR_LIT>', []),  <EOL>cron=cron)<EOL>", "docstring": "returns the template context for install.sh and uninstall.sh", "id": "f11897:c0:m4"}
{"signature": "def _add_install(self, context):", "body": "contents = self._render_template('<STR_LIT>', context)<EOL>self.config.setdefault('<STR_LIT>', [])  <EOL>self._add_unique_file({<EOL>\"<STR_LIT:path>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": contents,<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>})<EOL>", "docstring": "generates install.sh and adds it to included files", "id": "f11897:c0:m5"}
{"signature": "@classmethod<EOL><INDENT>def should_run_forward(cls, config):<DEDENT>", "body": "return True<EOL>", "docstring": "Always runs", "id": "f11904:c0:m0"}
{"signature": "def __intermediate_proto(self, interface, address):", "body": "<EOL>address_proto = address.pop('<STR_LIT>', '<STR_LIT>')<EOL>if '<STR_LIT>' not in interface:<EOL><INDENT>return address_proto<EOL><DEDENT>else:<EOL><INDENT>return interface.pop('<STR_LIT>')<EOL><DEDENT>", "docstring": "determines UCI interface \"proto\" option", "id": "f11905:c0:m5"}
{"signature": "def __intermediate_dns_servers(self, uci, address):", "body": "<EOL>if '<STR_LIT>' in uci:<EOL><INDENT>return uci['<STR_LIT>']<EOL><DEDENT>if address['<STR_LIT>'] in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT:none>']:<EOL><INDENT>return None<EOL><DEDENT>dns = self.netjson.get('<STR_LIT>', None)<EOL>if dns:<EOL><INDENT>return '<STR_LIT:U+0020>'.join(dns)<EOL><DEDENT>", "docstring": "determines UCI interface \"dns\" option", "id": "f11905:c0:m6"}
{"signature": "def __intermediate_interface(self, interface, uci_name):", "body": "interface.update({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': uci_name,<EOL>'<STR_LIT>': interface.pop('<STR_LIT:name>')<EOL>})<EOL>if '<STR_LIT>' in interface:<EOL><INDENT>del interface['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in interface:<EOL><INDENT>if interface.get('<STR_LIT:type>') != '<STR_LIT>':<EOL><INDENT>interface['<STR_LIT>'] = interface['<STR_LIT>']<EOL><DEDENT>del interface['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in interface:<EOL><INDENT>interface['<STR_LIT>'] = interface['<STR_LIT>']<EOL>del interface['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in interface:<EOL><INDENT>interface['<STR_LIT>'] = not interface['<STR_LIT>']<EOL>del interface['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in interface:<EOL><INDENT>del interface['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in interface:<EOL><INDENT>del interface['<STR_LIT>']<EOL><DEDENT>return interface<EOL>", "docstring": "converts NetJSON interface to\nUCI intermediate data structure", "id": "f11905:c0:m2"}
{"signature": "def __intermediate_bridge(self, interface, i):", "body": "<EOL>if interface['<STR_LIT:type>'] == '<STR_LIT>' and i < <NUM_LIT:2>:<EOL><INDENT>bridge_members = '<STR_LIT:U+0020>'.join(interface.pop('<STR_LIT>'))<EOL>if bridge_members:<EOL><INDENT>interface['<STR_LIT>'] = bridge_members<EOL><DEDENT>else:<EOL><INDENT>interface['<STR_LIT>'] = True<EOL>del interface['<STR_LIT>']<EOL><DEDENT><DEDENT>elif interface['<STR_LIT:type>'] == '<STR_LIT>' and i >= <NUM_LIT:2>:<EOL><INDENT>if '<STR_LIT>' not in interface['<STR_LIT>']:<EOL><INDENT>interface['<STR_LIT>'] = '<STR_LIT>'.format(**interface)<EOL><DEDENT>for attr in ['<STR_LIT:type>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if attr in interface:<EOL><INDENT>del interface[attr]<EOL><DEDENT><DEDENT><DEDENT>elif interface['<STR_LIT:type>'] != '<STR_LIT>':<EOL><INDENT>del interface['<STR_LIT:type>']<EOL><DEDENT>return interface<EOL>", "docstring": "converts NetJSON bridge to\nUCI intermediate data structure", "id": "f11905:c0:m4"}
{"signature": "def __intermediate_hwmode(self, radio):", "body": "protocol = radio['<STR_LIT>']<EOL>if protocol in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>return protocol[<NUM_LIT:4>:]<EOL><DEDENT>if radio['<STR_LIT>'] is <NUM_LIT:0>:<EOL><INDENT>return radio.get('<STR_LIT>')<EOL><DEDENT>elif radio['<STR_LIT>'] <= <NUM_LIT>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>", "docstring": "possible return values are: 11a, 11b, 11g", "id": "f11914:c0:m2"}
{"signature": "def _generate_contents(self, tar):", "body": "uci = self.render(files=False)<EOL>packages = packages_pattern.split(uci)<EOL>if '<STR_LIT>' in packages:<EOL><INDENT>packages.remove('<STR_LIT>')<EOL><DEDENT>for package in packages:<EOL><INDENT>lines = package.split('<STR_LIT:\\n>')<EOL>package_name = lines[<NUM_LIT:0>]<EOL>text_contents = '<STR_LIT:\\n>'.join(lines[<NUM_LIT:2>:])<EOL>self._add_file(tar=tar,<EOL>name='<STR_LIT>'.format(config_path, package_name),<EOL>contents=text_contents)<EOL><DEDENT>", "docstring": "Adds configuration files to tarfile instance.\n\n:param tar: tarfile instance\n:returns: None", "id": "f11915:c0:m0"}
{"signature": "def merge_config(template, config, list_identifiers=None):", "body": "result = template.copy()<EOL>for key, value in config.items():<EOL><INDENT>if isinstance(value, dict):<EOL><INDENT>node = result.get(key, OrderedDict())<EOL>result[key] = merge_config(node, value)<EOL><DEDENT>elif isinstance(value, list) and isinstance(result.get(key), list):<EOL><INDENT>result[key] = merge_list(result[key], value, list_identifiers)<EOL><DEDENT>else:<EOL><INDENT>result[key] = value<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Merges ``config`` on top of ``template``.\n\nConflicting keys are handled in the following way:\n\n* simple values (eg: ``str``, ``int``, ``float``, ecc) in ``config`` will\n  overwrite the ones in ``template``\n* values of type ``list`` in both ``config`` and ``template`` will be\n  merged using to the ``merge_list`` function\n* values of type ``dict`` will be merged recursively\n\n:param template: template ``dict``\n:param config: config ``dict``\n:param list_identifiers: ``list`` or ``None``\n:returns: merged ``dict``", "id": "f11916:m0"}
{"signature": "def _list_errors(e):", "body": "error_list = []<EOL>for value, error in zip(e.validator_value, e.context):<EOL><INDENT>error_list.append((value, error.message))<EOL>if error.context:<EOL><INDENT>error_list += _list_errors(error)<EOL><DEDENT><DEDENT>return error_list<EOL>", "docstring": "Returns a list of violated schema fragments and related error messages\n:param e: ``jsonschema.exceptions.ValidationError`` instance", "id": "f11918:m0"}
{"signature": "def get_install_requires():", "body": "requirements = []<EOL>for line in open('<STR_LIT>').readlines():<EOL><INDENT>if line.startswith('<STR_LIT:#>') or line == '<STR_LIT>' or line.startswith('<STR_LIT:http>') or line.startswith('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>requirements.append(line.replace('<STR_LIT:\\n>', '<STR_LIT>'))<EOL><DEDENT>if sys.version_info.major < <NUM_LIT:3>:<EOL><INDENT>requirements.append('<STR_LIT>')<EOL><DEDENT>return requirements<EOL>", "docstring": "parse requirements.txt, ignore links, exclude comments", "id": "f11920:m0"}
{"signature": "def xor_fault(a, b, out, fault):", "body": "if (a != b) == out:<EOL><INDENT>return fault == <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>return fault == <NUM_LIT:1><EOL><DEDENT>", "docstring": "Returns True if XOR(a, b) == out and fault == 0 or XOR(a, b) != out and fault == 1.", "id": "f11931:m0"}
{"signature": "def irreducible_components(constraint):", "body": "<EOL>return _irreducible_components(constraint.configurations, constraint.variables)<EOL>", "docstring": "Determine the sets of variables that are irreducible.\n\n    Let V(C) denote the variables of constraint C. For a configuration x, let x|A denote the\n    restriction of the configuration to the variables of A. Constraint C is reducible if there\n    is a partition of V(C) into nonempty subsets A and B, and two constraints C_A and C_B, with\n    V(C_A) = A and C_B V(C_B) = B, such that a configuration x is feasible in C if and only if x|A\n    is feasible in C_A and x|B is feasible in C_B. A constraint is irreducible if it is not\n    reducible.\n\n    Args:\n        constraint (:obj:`.Constraint`):\n            Constraint to attempt to reduce.\n\n    Returns:\n        list[tuple]: List of tuples in which each tuple is a set of variables that is irreducible.\n\n    Examples:\n        This example reduces a constraint, created by specifying its valid configurations, to two\n        constraints. The original constraint, that valid configurations for a,b,c are 0,0,1 and\n        1,1,1, can be represented by two reduced constraints, for example, (c=1) & (a=b). For\n        comparison, an attempt to reduce a constraint representing an AND gate fails to find a\n        valid reduction.\n\n        >>> import dwavebinarycsp\n        >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 0, 1), (1, 1, 1)],\n        ...                                                       ['a', 'b', 'c'], dwavebinarycsp.BINARY)\n        >>> dwavebinarycsp.irreducible_components(const)\n        [('c',), ('a', 'b')]\n        >>> const_and = dwavebinarycsp.Constraint.from_configurations([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 1)],\n        ...                                                           ['a', 'b', 'c'], dwavebinarycsp.BINARY)\n        >>> dwavebinarycsp.irreducible_components(const_and)\n        [('a', 'b', 'c')]", "id": "f11932:m0"}
{"signature": "@dimod.decorators.vartype_argument('<STR_LIT>')<EOL>def halfadder_gate(variables, vartype=dimod.BINARY, name='<STR_LIT>'):", "body": "variables = tuple(variables)<EOL>if vartype is dimod.BINARY:<EOL><INDENT>configs = frozenset([(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>),<EOL>(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>),<EOL>(<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>),<EOL>(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>)])<EOL><DEDENT>else:<EOL><INDENT>configs = frozenset([(-<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>),<EOL>(-<NUM_LIT:1>, +<NUM_LIT:1>, +<NUM_LIT:1>, -<NUM_LIT:1>),<EOL>(+<NUM_LIT:1>, -<NUM_LIT:1>, +<NUM_LIT:1>, -<NUM_LIT:1>),<EOL>(+<NUM_LIT:1>, +<NUM_LIT:1>, -<NUM_LIT:1>, +<NUM_LIT:1>)])<EOL><DEDENT>def func(augend, addend, sum_, carry):<EOL><INDENT>total = (augend > <NUM_LIT:0>) + (addend > <NUM_LIT:0>)<EOL>if total == <NUM_LIT:0>:<EOL><INDENT>return (sum_ <= <NUM_LIT:0>) and (carry <= <NUM_LIT:0>)<EOL><DEDENT>elif total == <NUM_LIT:1>:<EOL><INDENT>return (sum_ > <NUM_LIT:0>) and (carry <= <NUM_LIT:0>)<EOL><DEDENT>elif total == <NUM_LIT:2>:<EOL><INDENT>return (sum_ <= <NUM_LIT:0>) and (carry > <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return Constraint(func, configs, variables, vartype=vartype, name=name)<EOL>", "docstring": "Half adder.\n\n    Args:\n        variables (list): Variable labels for the and gate as `[in1, in2, sum, carry]`,\n            where `in1, in2` are inputs to be added and `sum` and 'carry' the resultant\n            outputs.\n        vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n            input values:\n\n            * Vartype.SPIN, 'SPIN', {-1, 1}\n            * Vartype.BINARY, 'BINARY', {0, 1}\n        name (str, optional, default='HALF_ADDER'): Name for the constraint.\n\n    Returns:\n        Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n        assigned values that match the valid states of a Boolean half adder.\n\n    Examples:\n        >>> import dwavebinarycsp\n        >>> import dwavebinarycsp.factories.constraint.gates as gates\n        >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n        >>> csp.add_constraint(gates.halfadder_gate(['a', 'b', 'total', 'carry'], name='HA1'))\n        >>> csp.check({'a': 1, 'b': 1, 'total': 0, 'carry': 1})\n        True", "id": "f11933:m3"}
{"signature": "@dimod.decorators.vartype_argument('<STR_LIT>')<EOL>def and_gate(variables, vartype=dimod.BINARY, name='<STR_LIT>'):", "body": "variables = tuple(variables)<EOL>if vartype is dimod.BINARY:<EOL><INDENT>configurations = frozenset([(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>),<EOL>(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>),<EOL>(<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>),<EOL>(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>)])<EOL>def func(in1, in2, out): return (in1 and in2) == out<EOL><DEDENT>else:<EOL><INDENT>configurations = frozenset([(-<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>),<EOL>(-<NUM_LIT:1>, +<NUM_LIT:1>, -<NUM_LIT:1>),<EOL>(+<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>),<EOL>(+<NUM_LIT:1>, +<NUM_LIT:1>, +<NUM_LIT:1>)])<EOL>def func(in1, in2, out): return ((in1 > <NUM_LIT:0>) and (in2 > <NUM_LIT:0>)) == (out > <NUM_LIT:0>)<EOL><DEDENT>return Constraint(func, configurations, variables, vartype=vartype, name=name)<EOL>", "docstring": "AND gate.\n\n    Args:\n        variables (list): Variable labels for the and gate as `[in1, in2, out]`,\n            where `in1, in2` are inputs and `out` the gate's output.\n        vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n            input values:\n\n            * Vartype.SPIN, 'SPIN', {-1, 1}\n            * Vartype.BINARY, 'BINARY', {0, 1}\n        name (str, optional, default='AND'): Name for the constraint.\n\n    Returns:\n        Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n        assigned values that match the valid states of an AND gate.\n\n    Examples:\n        >>> import dwavebinarycsp\n        >>> import dwavebinarycsp.factories.constraint.gates as gates\n        >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n        >>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'], name='AND1'))\n        >>> csp.check({'a': 1, 'b': 0, 'c': 0})\n        True", "id": "f11933:m0"}
{"signature": "@dimod.decorators.vartype_argument('<STR_LIT>')<EOL>def sat2in4(pos, neg=tuple(), vartype=dimod.BINARY, name='<STR_LIT>'):", "body": "pos = tuple(pos)<EOL>neg = tuple(neg)<EOL>variables = pos + neg<EOL>if len(variables) != <NUM_LIT:4>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if neg and (len(neg) < <NUM_LIT:4>):<EOL><INDENT>const = sat2in4(pos=variables, vartype=vartype)  <EOL>for v in neg:<EOL><INDENT>const.flip_variable(v)<EOL>const.name = name  <EOL><DEDENT>return const<EOL><DEDENT>if vartype is dimod.BINARY:<EOL><INDENT>configurations = frozenset([(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>),<EOL>(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>),<EOL>(<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>),<EOL>(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>),<EOL>(<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>),<EOL>(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>)])<EOL><DEDENT>else:<EOL><INDENT>configurations = frozenset([(-<NUM_LIT:1>, -<NUM_LIT:1>, +<NUM_LIT:1>, +<NUM_LIT:1>),<EOL>(-<NUM_LIT:1>, +<NUM_LIT:1>, -<NUM_LIT:1>, +<NUM_LIT:1>),<EOL>(+<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>, +<NUM_LIT:1>),<EOL>(-<NUM_LIT:1>, +<NUM_LIT:1>, +<NUM_LIT:1>, -<NUM_LIT:1>),<EOL>(+<NUM_LIT:1>, -<NUM_LIT:1>, +<NUM_LIT:1>, -<NUM_LIT:1>),<EOL>(+<NUM_LIT:1>, +<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>)])<EOL><DEDENT>def func(a, b, c, d):<EOL><INDENT>if a == b:<EOL><INDENT>return (b != c) and (c == d)<EOL><DEDENT>elif a == c:<EOL><INDENT>return b == d<EOL><DEDENT>else:<EOL><INDENT>return a == d<EOL><DEDENT><DEDENT>return Constraint(func, configurations, variables, vartype=vartype, name=name)<EOL>", "docstring": "Two-in-four (2-in-4) satisfiability.\n\n    Args:\n        pos (iterable):\n            Variable labels, as an iterable, for non-negated variables of the constraint.\n            Exactly four variables are specified by `pos` and `neg` together.\n        neg (tuple):\n            Variable labels, as an iterable, for negated variables of the constraint.\n            Exactly four variables are specified by `pos` and `neg` together.\n        vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n            input values:\n\n            * Vartype.SPIN, 'SPIN', {-1, 1}\n            * Vartype.BINARY, 'BINARY', {0, 1}\n        name (str, optional, default='2-in-4'): Name for the constraint.\n\n    Returns:\n        Constraint(:obj:`.Constraint`): Constraint that is satisfied when its variables are\n        assigned values that satisfy a two-in-four satisfiability problem.\n\n    Examples:\n        >>> import dwavebinarycsp\n        >>> import dwavebinarycsp.factories.constraint.sat as sat\n        >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n        >>> csp.add_constraint(sat.sat2in4(['w', 'x', 'y', 'z'], vartype='BINARY', name='sat1'))\n        >>> csp.check({'w': 1, 'x': 1, 'y': 0, 'z': 0})\n        True", "id": "f11935:m0"}
{"signature": "def random_2in4sat(num_variables, num_clauses, vartype=dimod.BINARY, satisfiable=True):", "body": "if num_variables < <NUM_LIT:4>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if num_clauses > <NUM_LIT:16> * _nchoosek(num_variables, <NUM_LIT:4>):  <EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>csp = ConstraintSatisfactionProblem(vartype)<EOL>variables = list(range(num_variables))<EOL>constraints = set()<EOL>if satisfiable:<EOL><INDENT>values = tuple(vartype.value)<EOL>planted_solution = {v: choice(values) for v in variables}<EOL>configurations = [(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>), (<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>), (<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>),<EOL>(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>), (<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>), (<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>)]<EOL>while len(constraints) < num_clauses:<EOL><INDENT>constraint_variables = sorted(sample(variables, <NUM_LIT:4>))<EOL>config = choice(configurations)<EOL>pos = tuple(v for idx, v in enumerate(constraint_variables) if config[idx] == (planted_solution[v] > <NUM_LIT:0>))<EOL>neg = tuple(v for idx, v in enumerate(constraint_variables) if config[idx] != (planted_solution[v] > <NUM_LIT:0>))<EOL>const = sat2in4(pos=pos, neg=neg, vartype=vartype)<EOL>assert const.check(planted_solution)<EOL>constraints.add(const)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>while len(constraints) < num_clauses:<EOL><INDENT>constraint_variables = sorted(sample(variables, <NUM_LIT:4>))<EOL>pos = tuple(v for v in constraint_variables if random() > <NUM_LIT>)<EOL>neg = tuple(v for v in constraint_variables if v not in pos)<EOL>const = sat2in4(pos=pos, neg=neg, vartype=vartype)<EOL>constraints.add(const)<EOL><DEDENT><DEDENT>for const in constraints:<EOL><INDENT>csp.add_constraint(const)<EOL><DEDENT>for v in variables:<EOL><INDENT>csp.add_variable(v)<EOL><DEDENT>return csp<EOL>", "docstring": "Random two-in-four (2-in-4) constraint satisfaction problem.\n\n    Args:\n        num_variables (integer): Number of variables (at least four).\n        num_clauses (integer): Number of constraints that together constitute the\n            constraint satisfaction problem.\n        vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n            input values:\n\n            * Vartype.SPIN, 'SPIN', {-1, 1}\n            * Vartype.BINARY, 'BINARY', {0, 1}\n        satisfiable (bool, optional, default=True): True if the CSP can be satisfied.\n\n    Returns:\n        CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when its variables\n        are assigned values that satisfy a two-in-four satisfiability problem.\n\n    Examples:\n        This example creates a CSP with 6 variables and two random constraints and checks\n        whether a particular assignment of variables satisifies it.\n\n        >>> import dwavebinarycsp\n        >>> import dwavebinarycsp.factories as sat\n        >>> csp = sat.random_2in4sat(6, 2)\n        >>> csp.constraints    # doctest: +SKIP\n        [Constraint.from_configurations(frozenset({(1, 0, 1, 0), (1, 0, 0, 1), (1, 1, 1, 1), (0, 1, 1, 0), (0, 0, 0, 0),\n         (0, 1, 0, 1)}), (2, 4, 0, 1), Vartype.BINARY, name='2-in-4'),\n         Constraint.from_configurations(frozenset({(1, 0, 1, 1), (1, 1, 0, 1), (1, 1, 1, 0), (0, 0, 0, 1),\n         (0, 1, 0, 0), (0, 0, 1, 0)}), (1, 2, 4, 5), Vartype.BINARY, name='2-in-4')]\n        >>> csp.check({0: 1, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0})       # doctest: +SKIP\n        True", "id": "f11938:m0"}
{"signature": "def random_xorsat(num_variables, num_clauses, vartype=dimod.BINARY, satisfiable=True):", "body": "if num_variables < <NUM_LIT:3>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if num_clauses > <NUM_LIT:8> * _nchoosek(num_variables, <NUM_LIT:3>):  <EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>csp = ConstraintSatisfactionProblem(vartype)<EOL>variables = list(range(num_variables))<EOL>constraints = set()<EOL>if satisfiable:<EOL><INDENT>values = tuple(vartype.value)<EOL>planted_solution = {v: choice(values) for v in variables}<EOL>configurations = [(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>), (<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>), (<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>), (<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>)]<EOL>while len(constraints) < num_clauses:<EOL><INDENT>x, y, z = sample(variables, <NUM_LIT:3>)<EOL>if y > x:<EOL><INDENT>x, y = y, x<EOL><DEDENT>const = xor_gate([x, y, z], vartype=vartype)<EOL>config = choice(configurations)<EOL>for idx, v in enumerate(const.variables):<EOL><INDENT>if config[idx] != (planted_solution[v] > <NUM_LIT:0>):<EOL><INDENT>const.flip_variable(v)<EOL><DEDENT><DEDENT>assert const.check(planted_solution)<EOL>constraints.add(const)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>while len(constraints) < num_clauses:<EOL><INDENT>x, y, z = sample(variables, <NUM_LIT:3>)<EOL>if y > x:<EOL><INDENT>x, y = y, x<EOL><DEDENT>const = xor_gate([x, y, z], vartype=vartype)<EOL>for idx, v in enumerate(const.variables):<EOL><INDENT>if random() > <NUM_LIT>:<EOL><INDENT>const.flip_variable(v)<EOL><DEDENT><DEDENT>assert const.check(planted_solution)<EOL>constraints.add(const)<EOL><DEDENT><DEDENT>for const in constraints:<EOL><INDENT>csp.add_constraint(const)<EOL><DEDENT>for v in variables:<EOL><INDENT>csp.add_variable(v)<EOL><DEDENT>return csp<EOL>", "docstring": "Random XOR constraint satisfaction problem.\n\n    Args:\n        num_variables (integer): Number of variables (at least three).\n        num_clauses (integer): Number of constraints that together constitute the\n            constraint satisfaction problem.\n        vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n            input values:\n\n            * Vartype.SPIN, 'SPIN', {-1, 1}\n            * Vartype.BINARY, 'BINARY', {0, 1}\n        satisfiable (bool, optional, default=True): True if the CSP can be satisfied.\n\n    Returns:\n        CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when its variables\n        are assigned values that satisfy a XOR satisfiability problem.\n\n    Examples:\n        This example creates a CSP with 5 variables and two random constraints and checks\n        whether a particular assignment of variables satisifies it.\n\n        >>> import dwavebinarycsp\n        >>> import dwavebinarycsp.factories as sat\n        >>> csp = sat.random_xorsat(5, 2)\n        >>> csp.constraints    # doctest: +SKIP\n        [Constraint.from_configurations(frozenset({(1, 0, 0), (1, 1, 1), (0, 1, 0), (0, 0, 1)}), (4, 3, 0),\n         Vartype.BINARY, name='XOR (0 flipped)'),\n         Constraint.from_configurations(frozenset({(1, 1, 0), (0, 1, 1), (0, 0, 0), (1, 0, 1)}), (2, 0, 4),\n         Vartype.BINARY, name='XOR (2 flipped) (0 flipped)')]\n        >>> csp.check({0: 1, 1: 0, 2: 0, 3: 1, 4: 1})       # doctest: +SKIP\n        True", "id": "f11938:m1"}
{"signature": "def multiplication_circuit(nbit, vartype=dimod.BINARY):", "body": "if nbit < <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>num_multiplier_bits = num_multiplicand_bits = nbit<EOL>csp = ConstraintSatisfactionProblem(vartype)<EOL>a = {i: '<STR_LIT>' % i for i in range(nbit)}<EOL>b = {j: '<STR_LIT>' % j for j in range(nbit)}<EOL>p = {k: '<STR_LIT>' % k for k in range(nbit + nbit)}<EOL>AND = defaultdict(dict)  <EOL>SUM = defaultdict(dict)  <EOL>CARRY = defaultdict(dict)  <EOL>for i in range(num_multiplier_bits):<EOL><INDENT>for j in range(num_multiplicand_bits):<EOL><INDENT>ai = a[i]<EOL>bj = b[j]<EOL>if i == <NUM_LIT:0> and j == <NUM_LIT:0>:<EOL><INDENT>andij = AND[i][j] = p[<NUM_LIT:0>]<EOL>gate = and_gate([ai, bj, andij], vartype=vartype, name='<STR_LIT>' % (ai, bj, andij))<EOL>csp.add_constraint(gate)<EOL>continue<EOL><DEDENT>andij = AND[i][j] = '<STR_LIT>' % (i, j)<EOL>gate = and_gate([ai, bj, andij], vartype=vartype, name='<STR_LIT>' % (ai, bj, andij))<EOL>csp.add_constraint(gate)<EOL>inputs = [andij]<EOL>if i - <NUM_LIT:1> in CARRY and j in CARRY[i - <NUM_LIT:1>]:<EOL><INDENT>inputs.append(CARRY[i - <NUM_LIT:1>][j])<EOL><DEDENT>if i - <NUM_LIT:1> in SUM and j + <NUM_LIT:1> in SUM[i - <NUM_LIT:1>]:<EOL><INDENT>inputs.append(SUM[i - <NUM_LIT:1>][j + <NUM_LIT:1>])<EOL><DEDENT>if len(inputs) == <NUM_LIT:1>:<EOL><INDENT>SUM[i][j] = andij<EOL><DEDENT>elif len(inputs) == <NUM_LIT:2>:<EOL><INDENT>if j == <NUM_LIT:0>:<EOL><INDENT>sumij = SUM[i][j] = p[i]<EOL><DEDENT>else:<EOL><INDENT>sumij = SUM[i][j] = '<STR_LIT>' % (i, j)<EOL><DEDENT>carryij = CARRY[i][j] = '<STR_LIT>' % (i, j)<EOL>name = '<STR_LIT>' % (inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], sumij, carryij)<EOL>gate = halfadder_gate([inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], sumij, carryij], vartype=vartype, name=name)<EOL>csp.add_constraint(gate)<EOL><DEDENT>else:<EOL><INDENT>assert len(inputs) == <NUM_LIT:3>, '<STR_LIT>'<EOL>if j == <NUM_LIT:0>:<EOL><INDENT>sumij = SUM[i][j] = p[i]<EOL><DEDENT>else:<EOL><INDENT>sumij = SUM[i][j] = '<STR_LIT>' % (i, j)<EOL><DEDENT>carryij = CARRY[i][j] = '<STR_LIT>' % (i, j)<EOL>name = '<STR_LIT>' % (inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], inputs[<NUM_LIT:2>], sumij, carryij)<EOL>gate = fulladder_gate([inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], inputs[<NUM_LIT:2>], sumij, carryij], vartype=vartype, name=name)<EOL>csp.add_constraint(gate)<EOL><DEDENT><DEDENT><DEDENT>for col in range(nbit - <NUM_LIT:1>):<EOL><INDENT>inputs = [CARRY[nbit - <NUM_LIT:1>][col], SUM[nbit - <NUM_LIT:1>][col + <NUM_LIT:1>]]<EOL>if col == <NUM_LIT:0>:<EOL><INDENT>sumout = p[nbit + col]<EOL>carryout = CARRY[nbit][col] = '<STR_LIT>' % (nbit, col)<EOL>name = '<STR_LIT>' % (inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], sumout, carryout)<EOL>gate = halfadder_gate([inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], sumout, carryout], vartype=vartype, name=name)<EOL>csp.add_constraint(gate)<EOL>continue<EOL><DEDENT>inputs.append(CARRY[nbit][col - <NUM_LIT:1>])<EOL>sumout = p[nbit + col]<EOL>if col < nbit - <NUM_LIT:2>:<EOL><INDENT>carryout = CARRY[nbit][col] = '<STR_LIT>' % (nbit, col)<EOL><DEDENT>else:<EOL><INDENT>carryout = p[<NUM_LIT:2> * nbit - <NUM_LIT:1>]<EOL><DEDENT>name = '<STR_LIT>' % (inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], inputs[<NUM_LIT:2>], sumout, carryout)<EOL>gate = fulladder_gate([inputs[<NUM_LIT:0>], inputs[<NUM_LIT:1>], inputs[<NUM_LIT:2>], sumout, carryout], vartype=vartype, name=name)<EOL>csp.add_constraint(gate)<EOL><DEDENT>return csp<EOL>", "docstring": "Multiplication circuit constraint satisfaction problem.\n\n    A constraint satisfaction problem that represents the binary multiplication :math:`ab=p`,\n    where the multiplicands are binary variables of length `nbit`; for example,\n    :math:`a_0 + 2a_1 + 4a_2 +... +2^ma_{nbit}`.\n\n    The square below shows a graphic representation of the circuit::\n\n      ________________________________________________________________________________\n      |                                         and20         and10         and00    |\n      |                                           |             |             |      |\n      |                           and21         add11\u2500\u2500and11  add01\u2500\u2500and01    |      |\n      |                             |\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518|\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518|             |      |\n      |             and22         add12\u2500\u2500and12  add02\u2500\u2500and02    |             |      |\n      |               |\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518|\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518|             |             |      |\n      |             add13\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500add03           |             |             |      |\n      |  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518|             |             |             |             |      |\n      | p5            p4            p3            p2            p1            p0     |\n      --------------------------------------------------------------------------------\n\n    Args:\n        nbit (int): Number of bits in the multiplicands.\n        vartype (Vartype, optional, default='BINARY'): Variable type. Accepted\n            input values:\n\n            * Vartype.SPIN, 'SPIN', {-1, 1}\n            * Vartype.BINARY, 'BINARY', {0, 1}\n\n    Returns:\n        CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when variables\n        :math:`a,b,p` are assigned values that correctly solve binary multiplication :math:`ab=p`.\n\n    Examples:\n        This example creates a multiplication circuit CSP that multiplies two 3-bit numbers,\n        which is then formulated as a binary quadratic model (BQM). It fixes the multiplacands\n        as :math:`a=5, b=6` (:math:`101` and :math:`110`) and uses a simulated annealing sampler\n        to find the product, :math:`p=30` (:math:`111100`).\n\n        >>> import dwavebinarycsp\n        >>> from dwavebinarycsp.factories.csp.circuits import multiplication_circuit\n        >>> import neal\n        >>> csp = multiplication_circuit(3)\n        >>> bqm = dwavebinarycsp.stitch(csp)\n        >>> bqm.fix_variable('a0', 1); bqm.fix_variable('a1', 0); bqm.fix_variable('a2', 1)\n        >>> bqm.fix_variable('b0', 1); bqm.fix_variable('b1', 1); bqm.fix_variable('b2', 0)\n        >>> sampler = neal.SimulatedAnnealingSampler()\n        >>> response = sampler.sample(bqm)\n        >>> p = next(response.samples(n=1, sorted_by='energy'))\n        >>> print(p['p0'], p['p1'], p['p2'], p['p3'], p['p4'], p['p5'])    # doctest: +SKIP\n        1 1 1 1 0 0", "id": "f11939:m0"}
{"signature": "def projection(self, variables):", "body": "<EOL>variables = set(variables)<EOL>if not variables.issubset(self.variables):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>idxs = [i for i, v in enumerate(self.variables) if v in variables]<EOL>configurations = frozenset(tuple(config[i] for i in idxs) for config in self.configurations)<EOL>variables = tuple(self.variables[i] for i in idxs)<EOL>return self.from_configurations(configurations, variables, self.vartype)<EOL>", "docstring": "Create a new constraint that is the projection onto a subset of the variables.\n\n        Args:\n            variables (iterable):\n                Subset of the constraint's variables.\n\n        Returns:\n            :obj:`.Constraint`: A new constraint over a subset of the variables.\n\n        Examples:\n\n            >>> import dwavebinarycsp\n            ...\n            >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 0), (0, 1)],\n            ...                                                       ['a', 'b'],\n            ...                                                       dwavebinarycsp.BINARY)\n            >>> proj = const.projection(['a'])\n            >>> proj.variables\n            ['a']\n            >>> proj.configurations\n            {(0,)}", "id": "f11942:c0:m14"}
{"signature": "def __len__(self):", "body": "return self.variables.__len__()<EOL>", "docstring": "The number of variables.", "id": "f11942:c0:m3"}
{"signature": "def fix_variable(self, v, value):", "body": "variables = self.variables<EOL>try:<EOL><INDENT>idx = variables.index(v)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(v))<EOL><DEDENT>if value not in self.vartype.value:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(self.vartype.value, value))<EOL><DEDENT>configurations = frozenset(config[:idx] + config[idx + <NUM_LIT:1>:]  <EOL>for config in self.configurations<EOL>if config[idx] == value)<EOL>if not configurations:<EOL><INDENT>raise UnsatError(\"<STR_LIT>\".format(v, value))<EOL><DEDENT>variables = variables[:idx] + variables[idx + <NUM_LIT:1>:]<EOL>self.configurations = configurations<EOL>self.variables = variables<EOL>def func(*args): return args in configurations<EOL>self.func = func<EOL>self.name = '<STR_LIT>'.format(self.name, v, value)<EOL>", "docstring": "Fix the value of a variable and remove it from the constraint.\n\n        Args:\n            v (variable):\n                Variable in the constraint to be set to a constant value.\n\n            val (int):\n                Value assigned to the variable. Values must match the :class:`.Vartype` of the\n                constraint.\n\n        Examples:\n            This example creates a constraint that :math:`a \\\\ne b` on binary variables,\n            fixes variable a to 0, and tests two candidate solutions.\n\n            >>> import dwavebinarycsp\n            >>> const = dwavebinarycsp.Constraint.from_func(operator.ne,\n            ...             ['a', 'b'], dwavebinarycsp.BINARY)\n            >>> const.fix_variable('a', 0)\n            >>> const.check({'b': 1})\n            True\n            >>> const.check({'b': 0})\n            False", "id": "f11942:c0:m11"}
{"signature": "@classmethod<EOL><INDENT>@dimod.decorators.vartype_argument('<STR_LIT>')<EOL>def from_func(cls, func, variables, vartype, name=None):<DEDENT>", "body": "variables = tuple(variables)<EOL>configurations = frozenset(config<EOL>for config in itertools.product(vartype.value, repeat=len(variables))<EOL>if func(*config))<EOL>return cls(func, configurations, variables, vartype, name)<EOL>", "docstring": "Construct a constraint from a validation function.\n\n        Args:\n            func (function):\n                Function that evaluates True when the variables satisfy the constraint.\n\n            variables (iterable):\n                Iterable of variable labels.\n\n            vartype (:class:`~dimod.Vartype`/str/set):\n                Variable type for the constraint. Accepted input values:\n\n                * :attr:`~dimod.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``\n                * :attr:`~dimod.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``\n\n            name (string, optional, default='Constraint'):\n                Name for the constraint.\n\n        Examples:\n            This example creates a constraint that binary variables `a` and `b`\n            are not equal.\n\n            >>> import dwavebinarycsp\n            >>> import operator\n            >>> const = dwavebinarycsp.Constraint.from_func(operator.ne, ['a', 'b'], 'BINARY')\n            >>> print(const.name)\n            Constraint\n            >>> (0, 1) in const.configurations\n            True\n\n            This example creates a constraint that :math:`out = NOT(x)`\n            for spin variables.\n\n            >>> import dwavebinarycsp\n            >>> def not_(y, x):  # y=NOT(x) for spin variables\n            ...     return (y == -x)\n            ...\n            >>> const = dwavebinarycsp.Constraint.from_func(\n            ...               not_,\n            ...               ['out', 'in'],\n            ...               {1, -1},\n            ...               name='not_spin')\n            >>> print(const.name)\n            not_spin\n            >>> (1, -1) in const.configurations\n            True", "id": "f11942:c0:m1"}
{"signature": "def check(self, solution):", "body": "return all(constraint.check(solution) for constraint in self.constraints)<EOL>", "docstring": "Check that a solution satisfies all of the constraints.\n\n        Args:\n            solution (container):\n                An assignment of values for the variables in the constraint satisfaction problem.\n\n        Returns:\n            bool: True if the solution satisfies all of the constraints; False otherwise.\n\n        Examples:\n            This example creates a binary-valued constraint satisfaction problem, adds\n            two logic gates implementing Boolean constraints, :math:`c = a \\wedge b`\n            and :math:`d = a \\oplus c`, and verifies that the combined problem is satisfied\n            for a given assignment.\n\n            >>> import dwavebinarycsp\n            >>> import dwavebinarycsp.factories.constraint.gates as gates\n            >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n            >>> csp.add_constraint(gates.and_gate(['a', 'b', 'c']))  # add an AND gate\n            >>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd']))  # add an XOR gate\n            >>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})\n            True", "id": "f11943:c0:m4"}
{"signature": "def add_variable(self, v):", "body": "self.variables[v]<EOL>", "docstring": "Add a variable.\n\n        Args:\n            v (variable):\n                Variable in the constraint satisfaction problem. May be of any type that\n                can be a dict key.\n\n        Examples:\n            This example adds two variables, one of which is already used in a constraint\n            of the constraint satisfaction problem.\n\n            >>> import dwavebinarycsp\n            >>> import operator\n            >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)\n            >>> csp.add_constraint(operator.eq, ['a', 'b'])\n            >>> csp.add_variable('a')  # does nothing, already added as part of the constraint\n            >>> csp.add_variable('c')\n            >>> csp.check({'a': -1, 'b': -1, 'c': 1})\n            True\n            >>> csp.check({'a': -1, 'b': -1, 'c': -1})\n            True", "id": "f11943:c0:m3"}
{"signature": "def add_constraint(self, constraint, variables=tuple()):", "body": "if isinstance(constraint, Constraint):<EOL><INDENT>if variables and (tuple(variables) != constraint.variables):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>elif isinstance(constraint, Callable):<EOL><INDENT>constraint = Constraint.from_func(constraint, variables, self.vartype)<EOL><DEDENT>elif isinstance(constraint, Iterable):<EOL><INDENT>constraint = Constraint.from_configurations(constraint, variables, self.vartype)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>self.constraints.append(constraint)<EOL>for v in constraint.variables:<EOL><INDENT>self.variables[v].append(constraint)<EOL><DEDENT>", "docstring": "Add a constraint.\n\n        Args:\n            constraint (function/iterable/:obj:`.Constraint`):\n                Constraint definition in one of the supported formats:\n\n                1. Function, with input arguments matching the order and\n                   :attr:`~.ConstraintSatisfactionProblem.vartype` type of the `variables`\n                   argument, that evaluates True when the constraint is satisfied.\n                2. List explicitly specifying each allowed configuration as a tuple.\n                3. :obj:`.Constraint` object built either explicitly or by :mod:`dwavebinarycsp.factories`.\n\n            variables(iterable):\n                Variables associated with the constraint. Not required when `constraint` is\n                a :obj:`.Constraint` object.\n\n        Examples:\n            This example defines a function that evaluates True when the constraint is satisfied.\n            The function's input arguments match the order and type of the `variables` argument.\n\n            >>> import dwavebinarycsp\n            >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n            >>> def all_equal(a, b, c):  # works for both dwavebinarycsp.BINARY and dwavebinarycsp.SPIN\n            ...     return (a == b) and (b == c)\n            >>> csp.add_constraint(all_equal, ['a', 'b', 'c'])\n            >>> csp.check({'a': 0, 'b': 0, 'c': 0})\n            True\n            >>> csp.check({'a': 0, 'b': 0, 'c': 1})\n            False\n\n            This example explicitly lists allowed configurations.\n\n            >>> import dwavebinarycsp\n            >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)\n            >>> eq_configurations = {(-1, -1), (1, 1)}\n            >>> csp.add_constraint(eq_configurations, ['v0', 'v1'])\n            >>> csp.check({'v0': -1, 'v1': +1})\n            False\n            >>> csp.check({'v0': -1, 'v1': -1})\n            True\n\n            This example uses a :obj:`.Constraint` object built by :mod:`dwavebinarycsp.factories`.\n\n            >>> import dwavebinarycsp\n            >>> import dwavebinarycsp.factories.constraint.gates as gates\n            >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)\n            >>> csp.add_constraint(gates.and_gate(['a', 'b', 'c']))  # add an AND gate\n            >>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd']))  # add an XOR gate\n            >>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})\n            True", "id": "f11943:c0:m2"}
{"signature": "def fix_variable(self, v, value):", "body": "if v not in self.variables:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(v))<EOL><DEDENT>for constraint in self.variables[v]:<EOL><INDENT>constraint.fix_variable(v, value)<EOL><DEDENT>del self.variables[v]<EOL>", "docstring": "Fix the value of a variable and remove it from the constraint satisfaction problem.\n\n        Args:\n            v (variable):\n                Variable to be fixed in the constraint satisfaction problem.\n\n            value (int):\n                Value assigned to the variable. Values must match the\n                :attr:`~.ConstraintSatisfactionProblem.vartype` of the constraint\n                satisfaction problem.\n\n        Examples:\n            This example creates a spin-valued constraint satisfaction problem, adds two constraints,\n            :math:`a = b` and :math:`b \\\\ne c`, and fixes variable b to +1.\n\n            >>> import dwavebinarycsp\n            >>> import operator\n            >>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)\n            >>> csp.add_constraint(operator.eq, ['a', 'b'])\n            >>> csp.add_constraint(operator.ne, ['b', 'c'])\n            >>> csp.check({'a': +1, 'b': +1, 'c': -1})\n            True\n            >>> csp.check({'a': -1, 'b': -1, 'c': +1})\n            True\n            >>> csp.fix_variable('b', +1)\n            >>> csp.check({'a': +1, 'b': +1, 'c': -1})  # 'b' is ignored\n            True\n            >>> csp.check({'a': -1, 'b': -1, 'c': +1})\n            False\n            >>> csp.check({'a': +1, 'c': -1})\n            True\n            >>> csp.check({'a': -1, 'c': +1})\n            False", "id": "f11943:c0:m5"}
{"signature": "def _bqm_from_1sat(constraint):", "body": "configurations = constraint.configurations<EOL>num_configurations = len(configurations)<EOL>bqm = dimod.BinaryQuadraticModel.empty(constraint.vartype)<EOL>if num_configurations == <NUM_LIT:1>:<EOL><INDENT>val, = next(iter(configurations))<EOL>v, = constraint.variables<EOL>bqm.add_variable(v, -<NUM_LIT:1> if val > <NUM_LIT:0> else +<NUM_LIT:1>, vartype=dimod.SPIN)<EOL><DEDENT>else:<EOL><INDENT>bqm.add_variables_from((v, <NUM_LIT:0.0>) for v in constraint.variables)<EOL><DEDENT>return bqm<EOL>", "docstring": "create a bqm for a constraint with only one variable\n\n    bqm will have exactly classical gap 2.", "id": "f11947:m1"}
{"signature": "@property<EOL><INDENT>def options(self):<DEDENT>", "body": "return list(self._results.keys())<EOL>", "docstring": "Returns a list of strings to represent the options for the poll, in\n        the order they were given when the poll was created.", "id": "f11956:c0:m4"}
{"signature": "def result_at(self, index):", "body": "return list(self._results.items())[index]<EOL>", "docstring": "Returns a tuple containing a string representing the option and an\n        int which specify the current votes for that option.\n\n        :param int index: The index of the wanted option in the options list.", "id": "f11956:c0:m9"}
{"signature": "def submit_poll(self, poll, *, request_policy=None):", "body": "if poll.id is not None:<EOL><INDENT>raise ExistingPoll()<EOL><DEDENT>options = poll.options<EOL>data = {<EOL>'<STR_LIT:title>': poll.title,<EOL>'<STR_LIT>': options,<EOL>'<STR_LIT>': poll.multi,<EOL>'<STR_LIT>': poll.dupcheck,<EOL>'<STR_LIT>': poll.captcha<EOL>}<EOL>return self._http_client.post(self._POLLS,<EOL>data=data,<EOL>request_policy=request_policy,<EOL>cls=strawpoll.Poll)<EOL>", "docstring": "Submits a poll on strawpoll.\n\n        :param poll: The poll to submit.\n        :type poll: :class:`Poll`\n        :param request_policy: Overrides :attr:`API.requests_policy` for that \\\n        request.\n        :type request_policy: Optional[:class:`RequestsPolicy`]\n\n        :raises ExistingPoll: This poll instance has already been submitted.\n        :raises HTTPException: The submission failed.\n\n        :returns: The given poll updated with the data sent back from the submission.\n        :rtype: :class:`Poll`\n\n        .. note::\n            Only polls that have a non empty title and between 2 and 30 options\n            can be submitted.", "id": "f11961:c0:m4"}
{"signature": "def _get_digest(self):", "body": "return hmac.new(<EOL>self._secret, request.data, hashlib.sha1).hexdigest() if self._secret else None<EOL>", "docstring": "Return message digest if a secret key was provided", "id": "f11965:c0:m2"}
{"signature": "def main():", "body": "usage = \"<STR_LIT>\"<EOL>parser = optparse.OptionParser(usage=usage)<EOL>parser.add_option(<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>action=\"<STR_LIT:store_true>\", dest=\"<STR_LIT>\", default=False,<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option(<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>action=\"<STR_LIT:store>\", type=\"<STR_LIT:string>\", dest=\"<STR_LIT>\",<EOL>default='<STR_LIT>',<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option(<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>action=\"<STR_LIT:store>\", type=\"<STR_LIT:string>\", dest=\"<STR_LIT>\",<EOL>default='<STR_LIT>',<EOL>help=(\"<STR_LIT>\" +<EOL>\"<STR_LIT>\"))<EOL>(options, args) = parser.parse_args()<EOL>if options.verbose:<EOL><INDENT>log_level = logging.DEBUG<EOL><DEDENT>else:<EOL><INDENT>log_level = logging.INFO<EOL><DEDENT>logging.basicConfig(level=log_level,<EOL>format=\"<STR_LIT>\")<EOL>curdir = os.getcwd()<EOL>testbinary = os.path.join(curdir, '<STR_LIT>', '<STR_LIT:test>')<EOL>if not os.path.exists(testbinary):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % testbinary)<EOL><DEDENT>coveragebinary = os.path.join(curdir, '<STR_LIT>', '<STR_LIT>')<EOL>if not os.path.exists(coveragebinary):<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>coveragebinary = '<STR_LIT>'<EOL><DEDENT>logger.info(\"<STR_LIT>\")<EOL>parts = [coveragebinary, '<STR_LIT>', testbinary]<EOL>if options.test_args:<EOL><INDENT>parts.append(options.test_args)<EOL><DEDENT>system(\"<STR_LIT:U+0020>\".join(parts))<EOL>logger.debug(\"<STR_LIT>\")<EOL>if options.output_dir:<EOL><INDENT>coverage_dir = options.output_dir<EOL>open_in_browser = False<EOL><DEDENT>else:<EOL><INDENT>coverage_dir = '<STR_LIT>'  <EOL>open_in_browser = True<EOL><DEDENT>system(\"<STR_LIT>\" % (coveragebinary, coverage_dir))<EOL>logger.info(\"<STR_LIT>\", coverage_dir)<EOL>if open_in_browser:<EOL><INDENT>index_file = os.path.abspath(<EOL>os.path.join(coverage_dir, '<STR_LIT>'))<EOL>logger.debug(\"<STR_LIT>\", index_file)<EOL>webbrowser.open('<STR_LIT>' + index_file)<EOL>logger.info(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Create coverage reports and open them in the browser.", "id": "f11970:m1"}
{"signature": "def system(command, input=None):", "body": "logger.debug(\"<STR_LIT>\", command)<EOL>p = subprocess.Popen(command,<EOL>shell=True,<EOL>stdin=subprocess.PIPE,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE,<EOL>close_fds=MUST_CLOSE_FDS)<EOL>stdoutdata, stderrdata = p.communicate(input=input)<EOL>result = stdoutdata + stderrdata<EOL>if p.returncode:<EOL><INDENT>logger.error(\"<STR_LIT>\",<EOL>command)<EOL>logger.error(\"<STR_LIT>\", p.returncode)<EOL>logger.error(\"<STR_LIT>\")<EOL>logger.error(result)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>logger.info(result)<EOL>", "docstring": "commands.getoutput() replacement that also works on windows.\n\n    Code mostly copied from zc.buildout.", "id": "f11970:m0"}
{"signature": "def after_scenario(context, scenario):", "body": "context.log.debug(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% scenario.name)<EOL>", "docstring": "Add an entry to the debug log after scenario was finished", "id": "f11975:m1"}
{"signature": "def before_scenario(context, scenario):", "body": "context.log.debug(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% scenario.name)<EOL>context.params = {}<EOL>context.exception = None<EOL>context.result = None<EOL>", "docstring": "Add entry to the debug log befere starting the scenario", "id": "f11975:m0"}
{"signature": "def url_quote(string):", "body": "return quote(string.encode('<STR_LIT:utf8>'), safe='<STR_LIT>')<EOL>", "docstring": "Percent encode a string as ASCII so that it is a valid part of a URI\n\n    :param string: str\n    :returns: ASCII string", "id": "f11977:m5"}
{"signature": "def match_part(string, part):", "body": "if not string or not re.match('<STR_LIT>' + PARTS[part] + '<STR_LIT>', string):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(part, PARTS[part]))<EOL><DEDENT>", "docstring": "Raise an exception if string doesn't match a part's regex\n\n    :param string: str\n    :param part: a key in the PARTS dict\n    :raises: ValueError, TypeError", "id": "f11977:m3"}
{"signature": "def is_hub_key(value):", "body": "try:<EOL><INDENT>parse_hub_key(value)<EOL>return True<EOL><DEDENT>except (ValueError, TypeError):<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Test if a value could be a hub key\n    :param value: the value to test if it is a hub key\n    :returns: True if it is a hub key", "id": "f11977:m2"}
{"signature": "def generate_hub_key(resolver_id, hub_id, repository_id, entity_type, entity_id=None):", "body": "parsed = urlparse(resolver_id)<EOL>if not parsed.scheme:<EOL><INDENT>parsed = parsed._replace(scheme=PROTOCOL, netloc=idna_encode(parsed.path.lower()), path=u'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>parsed = parsed._replace(netloc=idna_encode(parsed.netloc.lower()))<EOL><DEDENT>resolver_id = urlunparse(parsed)<EOL>hub_id = url_quote(hub_id.lower())<EOL>if not entity_id:<EOL><INDENT>entity_id = str(uuid.uuid4()).replace('<STR_LIT:->', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>match_part(entity_id, '<STR_LIT>')<EOL><DEDENT>match_part(resolver_id, '<STR_LIT>')<EOL>match_part(hub_id, '<STR_LIT>')<EOL>match_part(repository_id, '<STR_LIT>')<EOL>match_part(entity_type, '<STR_LIT>')<EOL>hub_key = SEPARATOR.join(<EOL>[resolver_id, SCHEMA, hub_id, repository_id, entity_type, entity_id])<EOL>return hub_key<EOL>", "docstring": "Create and return an array of hub keys\n    :param resolver_id: the service that can resolve this key\n    :param hub_id: the unique id of the hub\n    :param repository_id: the type of id that the provider recognises\n    :param entity_type: the type of the entity to which the key refers.\n    :param entity_id: ID of entity (UUID)\n    :returns: a hub key\n    :raises:\n    :AttributeError: if a parameter has a bad value\n    :TypeError: if a parameter has a bad value\n    :ValueError: if a parameter has a bad value", "id": "f11977:m6"}
{"signature": "def press_key(self, key, mode=<NUM_LIT:0>):", "body": "if isinstance(key, str):<EOL><INDENT>assert key in KEYS, '<STR_LIT>'.format(key)<EOL>key = KEYS[key]<EOL><DEDENT>_LOGGER.info('<STR_LIT>', self.__get_key_name(key))<EOL>return self.rq('<STR_LIT>', OrderedDict([('<STR_LIT:key>', key), ('<STR_LIT>', mode)]))<EOL>", "docstring": "modes:\n    0 -> simple press\n    1 -> long press\n    2 -> release after long press", "id": "f11988:c0:m41"}
{"signature": "def get_refkey(self, obj, referent):", "body": "if isinstance(obj, dict):<EOL><INDENT>for k, v in obj.items():<EOL><INDENT>if v is referent:<EOL><INDENT>return \"<STR_LIT>\" % k<EOL><DEDENT><DEDENT><DEDENT>for k in dir(obj) + ['<STR_LIT>']:<EOL><INDENT>if getattr(obj, k, None) is referent:<EOL><INDENT>return \"<STR_LIT>\" % k<EOL><DEDENT><DEDENT>return \"<STR_LIT>\"<EOL>", "docstring": "Return the dict key or attribute name of obj which refers to\n        referent.", "id": "f11991:c0:m2"}
{"signature": "def get_repr(self, obj, referent=None):", "body": "objtype = type(obj)<EOL>typename = str(objtype.__module__) + \"<STR_LIT:.>\" + objtype.__name__<EOL>prettytype = typename.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>name = getattr(obj, \"<STR_LIT>\", \"<STR_LIT>\")<EOL>if name:<EOL><INDENT>prettytype = \"<STR_LIT>\" % (prettytype, name)<EOL><DEDENT>key = \"<STR_LIT>\"<EOL>if referent:<EOL><INDENT>key = self.get_refkey(obj, referent)<EOL><DEDENT>url = reverse('<STR_LIT>', args=(<EOL>typename,<EOL>id(obj)<EOL>))<EOL>return ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (url, id(obj), prettytype, key, get_repr(obj, <NUM_LIT:100>))<EOL>)<EOL>", "docstring": "Return an HTML tree block describing the given object.", "id": "f11991:c0:m1"}
{"signature": "def print_tree(self, maxresults=<NUM_LIT:100>, maxdepth=None):", "body": "self.ignore_caller()<EOL>for trail in self.walk(maxresults, maxdepth):<EOL><INDENT>print(trail)<EOL><DEDENT>if self.stops:<EOL><INDENT>print(\"<STR_LIT>\" % self.stops)<EOL><DEDENT>", "docstring": "Walk the object tree, pretty-printing each branch.", "id": "f11993:c3:m2"}
{"signature": "def walk(self, maxresults=<NUM_LIT:100>, maxdepth=None):", "body": "self.stops = <NUM_LIT:0><EOL>self.seen = {}<EOL>self.ignore(self, self.__dict__, self.seen, self._ignore)<EOL>self.ignore_caller()<EOL>self.maxdepth = maxdepth<EOL>count = <NUM_LIT:0><EOL>for result in self._gen(self.obj):<EOL><INDENT>yield result<EOL>count += <NUM_LIT:1><EOL>if maxresults and count >= maxresults:<EOL><INDENT>yield <NUM_LIT:0>, <NUM_LIT:0>, \"<STR_LIT>\"<EOL>return<EOL><DEDENT><DEDENT>", "docstring": "Walk the object tree, showing circular referents.", "id": "f11993:c3:m0"}
{"signature": "def walk(self, maxresults=<NUM_LIT:100>, maxdepth=None):", "body": "log.debug(\"<STR_LIT>\")<EOL>self.seen = {}<EOL>self.ignore(self, self.__dict__, self.obj, self.seen, self._ignore)<EOL>self.ignore_caller()<EOL>self.maxdepth = maxdepth<EOL>count = <NUM_LIT:0><EOL>log.debug(\"<STR_LIT>\")<EOL>for result in self._gen(self.obj):<EOL><INDENT>log.debug(\"<STR_LIT>\")<EOL>yield result<EOL>count += <NUM_LIT:1><EOL>if maxresults and count >= maxresults:<EOL><INDENT>yield <NUM_LIT:0>, <NUM_LIT:0>, \"<STR_LIT>\"<EOL>return<EOL><DEDENT><DEDENT>", "docstring": "Walk the object tree, ignoring duplicates and circular refs.", "id": "f11993:c0:m3"}
{"signature": "def decode_timestamp(data: str) -> datetime.datetime:", "body": "year = <NUM_LIT> + int(data[<NUM_LIT:0>:<NUM_LIT:2>])<EOL>month = int(data[<NUM_LIT:2>:<NUM_LIT:4>])<EOL>day = int(data[<NUM_LIT:4>:<NUM_LIT:6>])<EOL>hour = int(data[<NUM_LIT:6>:<NUM_LIT:8>])<EOL>minute = int(data[<NUM_LIT:8>:<NUM_LIT:10>])<EOL>second = int(data[<NUM_LIT:10>:<NUM_LIT:12>])<EOL>if minute == <NUM_LIT>:<EOL><INDENT>minute = <NUM_LIT:0><EOL>hour += <NUM_LIT:1><EOL><DEDENT>return datetime.datetime(year=year, month=month, day=day, hour=hour,<EOL>minute=minute, second=second)<EOL>", "docstring": "Decode timestamp using bespoke decoder.\nCannot use simple strptime since the ness panel contains a bug\nthat P199E zone and state updates emitted on the hour cause a minute\nvalue of `60` to be sent, causing strptime to fail. This decoder handles\nthis edge case.", "id": "f12003:m4"}
{"signature": "@classmethod<EOL><INDENT>def decode(cls, _data: str) -> '<STR_LIT>':<DEDENT>", "body": "<EOL>data = DataIterator(_data)<EOL>_LOGGER.debug(\"<STR_LIT>\", _data)<EOL>start = data.take_hex()<EOL>address = None<EOL>if has_address(start, len(_data)):<EOL><INDENT>address = data.take_hex(half=is_user_interface_req(start))<EOL><DEDENT>length = data.take_hex()<EOL>data_length = length & <NUM_LIT><EOL>seq = length >> <NUM_LIT:7><EOL>command = CommandType(data.take_hex())<EOL>msg_data = data.take_bytes(data_length,<EOL>half=is_user_interface_req(start))<EOL>timestamp = None<EOL>if has_timestamp(start):<EOL><INDENT>timestamp = decode_timestamp(data.take_bytes(<NUM_LIT:6>))<EOL><DEDENT>checksum = data.take_hex()  <EOL>if not data.is_consumed():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return Packet(<EOL>is_user_interface_resp=(is_user_interface_resp(start) and<EOL>command == CommandType.USER_INTERFACE),<EOL>address=address,<EOL>seq=seq,<EOL>command=command,<EOL>data=msg_data,<EOL>timestamp=timestamp,<EOL>)<EOL>", "docstring": "Packets are ASCII encoded data. Packet layout is as follows:\n\n+---------------------------------------------------------------------------+\n| start | address | length | command | data | timestamp | checksum | finish |\n| hex   | hex     | hex    | hex     | str  | dec       | hex      | crlf   |\n| 1     | 1       | 1      | 1       | n    | 6         | 1        |        |\n+---------------------------------------------------------------------------+\n\nTimestamp:\n    Timestamps are formatted in the following format, where each field is\n    decimal encoded:\n\n    YY MM DD HH MM SS\n\nChecksum:\n    Calculated by...?\n\nSince data is ASCII encoded, each byte uses 2 ASCII character to be\nrepresented. However, we cannot simply do a hex decode on the entire\nmessage, since the timestamp and data fields are represented using a\nnon-hex representation and therefore must be manually decoded.", "id": "f12003:c1:m5"}
{"signature": "async def update(self) -> None:", "body": "_LOGGER.debug(\"<STR_LIT>\")<EOL>await asyncio.gather(<EOL>self.send_command('<STR_LIT>'),<EOL>self.send_command('<STR_LIT>'),<EOL>)<EOL>", "docstring": "Force update of alarm status and zones", "id": "f12005:c0:m6"}
{"signature": "async def _update_loop(self) -> None:", "body": "await asyncio.sleep(self._update_interval)<EOL>while not self._closed:<EOL><INDENT>await self.update()<EOL>await asyncio.sleep(self._update_interval)<EOL><DEDENT>", "docstring": "Schedule a state update to keep the connection alive", "id": "f12005:c0:m11"}
{"signature": "def _handle_system_status_event(self, event: SystemStatusEvent) -> None:", "body": "if event.type == SystemStatusEvent.EventType.UNSEALED:<EOL><INDENT>return self._update_zone(event.zone, True)<EOL><DEDENT>elif event.type == SystemStatusEvent.EventType.SEALED:<EOL><INDENT>return self._update_zone(event.zone, False)<EOL><DEDENT>elif event.type == SystemStatusEvent.EventType.ALARM:<EOL><INDENT>return self._update_arming_state(ArmingState.TRIGGERED)<EOL><DEDENT>elif event.type == SystemStatusEvent.EventType.ALARM_RESTORE:<EOL><INDENT>if self.arming_state != ArmingState.DISARMED:<EOL><INDENT>return self._update_arming_state(ArmingState.ARMED)<EOL><DEDENT><DEDENT>elif event.type == SystemStatusEvent.EventType.ENTRY_DELAY_START:<EOL><INDENT>return self._update_arming_state(ArmingState.ENTRY_DELAY)<EOL><DEDENT>elif event.type == SystemStatusEvent.EventType.ENTRY_DELAY_END:<EOL><INDENT>pass<EOL><DEDENT>elif event.type == SystemStatusEvent.EventType.EXIT_DELAY_START:<EOL><INDENT>return self._update_arming_state(ArmingState.EXIT_DELAY)<EOL><DEDENT>elif event.type == SystemStatusEvent.EventType.EXIT_DELAY_END:<EOL><INDENT>if self.arming_state == ArmingState.EXIT_DELAY:<EOL><INDENT>return self._update_arming_state(ArmingState.ARMED)<EOL><DEDENT><DEDENT>elif event.type in Alarm.ARM_EVENTS:<EOL><INDENT>return self._update_arming_state(ArmingState.ARMING)<EOL><DEDENT>elif event.type == SystemStatusEvent.EventType.DISARMED:<EOL><INDENT>return self._update_arming_state(ArmingState.DISARMED)<EOL><DEDENT>elif event.type == SystemStatusEvent.EventType.ARMING_DELAYED:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "DISARMED -> ARMED_AWAY -> EXIT_DELAY_START -> EXIT_DELAY_END\n (trip): -> ALARM -> OUTPUT_ON -> ALARM_RESTORE\n    (disarm): -> DISARMED -> OUTPUT_OFF\n (disarm): -> DISARMED\n (disarm before EXIT_DELAY_END): -> DISARMED -> EXIT_DELAY_END\n\nTODO(NW): Check ALARM_RESTORE state transition to move back into ARMED_AWAY state", "id": "f12006:c1:m4"}
{"signature": "@cli.command()<EOL>def version():", "body": "print(get_version())<EOL>", "docstring": "Print installed package version.", "id": "f12009:m1"}
{"signature": "def listening_ports():", "body": "ports = []<EOL>if not os.path.exists(PROC_TCP):<EOL><INDENT>return ports<EOL><DEDENT>with open(PROC_TCP) as fh:<EOL><INDENT>for line in fh:<EOL><INDENT>if '<STR_LIT>' not in line:<EOL><INDENT>continue<EOL><DEDENT>parts = line.lstrip('<STR_LIT:U+0020>').split('<STR_LIT:U+0020>')<EOL>if parts[<NUM_LIT:2>] != '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>local_port = parts[<NUM_LIT:1>].split('<STR_LIT::>')[<NUM_LIT:1>]<EOL>local_port = int('<STR_LIT>' + local_port, base=<NUM_LIT:16>)<EOL>ports.append(local_port)<EOL><DEDENT><DEDENT>return ports<EOL>", "docstring": "Reads listening ports from /proc/net/tcp", "id": "f12018:m0"}
{"signature": "def __iter__(self):", "body": "idx = self._padding<EOL>while idx < len(self._data):<EOL><INDENT>tobject, hole = self._read_next(idx, len(self._data))<EOL>if tobject is None:<EOL><INDENT>return<EOL><DEDENT>if hole:<EOL><INDENT>idx += hole[<NUM_LIT:1>]  <EOL><DEDENT>idx += tobject.bytes_length + self._padding<EOL>yield tobject, hole<EOL><DEDENT>", "docstring": "An iterator that yields a tuple of (thrift object, hole), where\nhole is a tuple of (start, skipped) bytes.", "id": "f12019:c0:m2"}
{"signature": "@classmethod<EOL><INDENT>def read(cls, data,<EOL>protocol=None,<EOL>fallback_protocol=TBinaryProtocol,<EOL>finagle_thrift=False,<EOL>max_fields=MAX_FIELDS,<EOL>max_list_size=MAX_LIST_SIZE,<EOL>max_map_size=MAX_MAP_SIZE,<EOL>max_set_size=MAX_SET_SIZE,<EOL>read_values=False):<DEDENT>", "body": "<EOL>if len(data) < cls.MIN_MESSAGE_SIZE:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if protocol is None:<EOL><INDENT>protocol = cls.detect_protocol(data, fallback_protocol)<EOL><DEDENT>trans = TTransport.TMemoryBuffer(data)<EOL>proto = protocol(trans)<EOL>header = None<EOL>if finagle_thrift:<EOL><INDENT>try:<EOL><INDENT>header = ThriftStruct.read(<EOL>proto,<EOL>max_fields,<EOL>max_list_size,<EOL>max_map_size,<EOL>max_set_size,<EOL>read_values)<EOL><DEDENT>except:<EOL><INDENT>trans = TTransport.TMemoryBuffer(data)<EOL>proto = protocol(trans)<EOL><DEDENT><DEDENT>method, mtype, seqid = proto.readMessageBegin()<EOL>mtype = cls.message_type_to_str(mtype)<EOL>if len(method) == <NUM_LIT:0> or method.isspace() or method.startswith('<STR_LIT:U+0020>'):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(method) > cls.MAX_METHOD_LENGTH:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>valid = range(<NUM_LIT>, <NUM_LIT>)<EOL>if any(ord(char) not in valid for char in method):<EOL><INDENT>raise ValueError('<STR_LIT>' % method)<EOL><DEDENT>args = ThriftStruct.read(<EOL>proto,<EOL>max_fields,<EOL>max_list_size,<EOL>max_map_size,<EOL>max_set_size,<EOL>read_values)<EOL>proto.readMessageEnd()<EOL>msglen = trans._buffer.tell()<EOL>return cls(method, mtype, seqid, args, header, msglen), msglen<EOL>", "docstring": "tries to deserialize a message, might fail if data is missing", "id": "f12030:c0:m10"}
{"signature": "def pop_data(self, nbytes):", "body": "last_timestamp = <NUM_LIT:0><EOL>data = []<EOL>for packet in self.pop(nbytes):<EOL><INDENT>last_timestamp = packet.timestamp<EOL>data.append(packet.data.data)<EOL><DEDENT>return '<STR_LIT>'.join(data), last_timestamp<EOL>", "docstring": "similar to pop, but returns payload + last timestamp", "id": "f12034:c0:m7"}
{"signature": "def is_isomorphic_to(self, other):", "body": "return (isinstance(other, self.__class__)<EOL>and<EOL>len(self.fields) == len(other.fields)<EOL>and<EOL>all(a.is_isomorphic_to(b) for a, b in zip(self.fields,<EOL>other.fields)))<EOL>", "docstring": "Returns true if all fields of other struct are isomorphic to this\nstruct's fields", "id": "f12036:c2:m4"}
{"signature": "def __len__(self):", "body": "return len(self._fields)<EOL>", "docstring": "number of fields, NOT number of bytes", "id": "f12036:c2:m6"}
{"signature": "@classmethod<EOL><INDENT>def of_structs(cls, a, b):<DEDENT>", "body": "t_diff = ThriftDiff(a, b)<EOL>t_diff._do_diff()<EOL>return t_diff<EOL>", "docstring": "Diff two thrift structs and return the result as a ThriftDiff instance", "id": "f12038:c0:m1"}
{"signature": "@property<EOL><INDENT>def field_with_different_value(self):<DEDENT>", "body": "return self._fields_with_different_value<EOL>", "docstring": "List of isomorphically equivalent field pairs for which value is NOT\nequal", "id": "f12038:c0:m10"}
{"signature": "@property<EOL><INDENT>def fields_with_same_value(self):<DEDENT>", "body": "return self._fields_with_same_value<EOL>", "docstring": "List of isomorphically equivalent fields for which value is also equal\nNote: this doesn't return a list of 'pairs'", "id": "f12038:c0:m9"}
{"signature": "@property<EOL><INDENT>def fields_only_in_b(self):<DEDENT>", "body": "return self._fields_only_in_b<EOL>", "docstring": "List of fields exclusive to second struct", "id": "f12038:c0:m8"}
{"signature": "@property<EOL><INDENT>def common_fields(self):<DEDENT>", "body": "return self._common_fields<EOL>", "docstring": "List of isomorphically equivalent field pairs which may or may not have\nsame value", "id": "f12038:c0:m6"}
{"signature": "def capture(<EOL>target_url,<EOL>user_agent=\"<STR_LIT>\",<EOL>proxies={}<EOL>):", "body": "<EOL>domain = \"<STR_LIT>\"<EOL>save_url = urljoin(domain, \"<STR_LIT>\")<EOL>headers = {<EOL>'<STR_LIT>': user_agent,<EOL>\"<STR_LIT:host>\": \"<STR_LIT>\",<EOL>}<EOL>logger.debug(\"<STR_LIT>\".format(domain + \"<STR_LIT:/>\"))<EOL>get_kwargs = dict(<EOL>timeout=<NUM_LIT>,<EOL>allow_redirects=True,<EOL>headers=headers,<EOL>)<EOL>if proxies:<EOL><INDENT>get_kwargs['<STR_LIT>'] = proxies<EOL><DEDENT>response = requests.get(domain + \"<STR_LIT:/>\", **get_kwargs)<EOL>response.raise_for_status()<EOL>html = str(response.content)<EOL>try:<EOL><INDENT>unique_id = html.split('<STR_LIT>', <NUM_LIT:1>)[<NUM_LIT:1>].split('<STR_LIT>', <NUM_LIT:1>)[<NUM_LIT:1>].split('<STR_LIT:\">', <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>logger.debug(\"<STR_LIT>\".format(unique_id))<EOL><DEDENT>except IndexError:<EOL><INDENT>logger.warn(\"<STR_LIT>\")<EOL>unique_id = None<EOL><DEDENT>data = {<EOL>\"<STR_LIT:url>\": target_url,<EOL>\"<STR_LIT>\": <NUM_LIT:1>,<EOL>}<EOL>if unique_id:<EOL><INDENT>data.update({\"<STR_LIT>\": unique_id})<EOL><DEDENT>post_kwargs = dict(<EOL>timeout=<NUM_LIT>,<EOL>allow_redirects=True,<EOL>headers=headers,<EOL>data=data<EOL>)<EOL>if proxies:<EOL><INDENT>post_kwargs['<STR_LIT>'] = proxies<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(save_url))<EOL>response = requests.post(save_url, **post_kwargs)<EOL>response.raise_for_status()<EOL>if '<STR_LIT>' in response.headers:<EOL><INDENT>memento = str(response.headers['<STR_LIT>']).split('<STR_LIT>')[<NUM_LIT:1>]<EOL>logger.debug(\"<STR_LIT>\".format(memento))<EOL>return memento<EOL><DEDENT>if '<STR_LIT>' in response.headers:<EOL><INDENT>memento = response.headers['<STR_LIT>']<EOL>logger.debug(\"<STR_LIT>\".format(memento))<EOL>return memento<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>for i, r in enumerate(response.history):<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(i))<EOL>logger.debug(r.headers)<EOL>if '<STR_LIT>' in r.headers:<EOL><INDENT>memento = r.headers['<STR_LIT>']<EOL>logger.debug(\"<STR_LIT>\".format(i+<NUM_LIT:1>, memento))<EOL>return memento<EOL><DEDENT><DEDENT>logger.error(\"<STR_LIT>\")<EOL>logger.error(\"<STR_LIT>\".format(response.status_code))<EOL>logger.error(response.headers)<EOL>logger.error(response.text)<EOL>raise Exception(\"<STR_LIT>\")<EOL>", "docstring": "Archives the provided URL using archive.is\n\nReturns the URL where the capture is stored.", "id": "f12041:m0"}
{"signature": "def read(*paths):", "body": "with open(os.path.join(*paths), '<STR_LIT:r>') as f:<EOL><INDENT>return f.read()<EOL><DEDENT>", "docstring": "Build a file path from *paths* and return the contents.", "id": "f12045:m0"}
{"signature": "def frexp10(x):", "body": "expon = _np.int(_np.floor(_np.log10(_np.abs(x))))<EOL>mant = x/_np.power(<NUM_LIT:10>, expon)<EOL>return (mant, expon)<EOL>", "docstring": "Finds the mantissa and exponent of a number :math:`x` such that :math:`x = m 10^e`.\n\nParameters\n----------\n\nx : float\n    Number :math:`x` such that :math:`x = m 10^e`.\n\nReturns\n-------\n\nmantissa : float\n    Number :math:`m` such that :math:`x = m 10^e`.\nexponent : float\n    Number :math:`e` such that :math:`x = m 10^e`.", "id": "f12058:m0"}
{"signature": "def linspacestep(start, stop, step=<NUM_LIT:1>):", "body": "<EOL>numsteps = _np.int((stop-start)/step)<EOL>return _np.linspace(start, start+step*numsteps, numsteps+<NUM_LIT:1>)<EOL>", "docstring": "Create a vector of values over an interval with a specified step size.\n\nParameters\n----------\n\nstart : float\n    The beginning of the interval.\nstop : float\n    The end of the interval.\nstep : float\n    The step size.\n\nReturns\n-------\nvector : :class:`numpy.ndarray`\n    The vector of values.", "id": "f12059:m0"}
{"signature": "def get(f, key, default=None):", "body": "if key in f.keys():<EOL><INDENT>val = f[key].value<EOL>if default is None:<EOL><INDENT>return val<EOL><DEDENT>else:<EOL><INDENT>if _np.shape(val) == _np.shape(default):<EOL><INDENT>return val<EOL><DEDENT><DEDENT><DEDENT>return default<EOL>", "docstring": "Gets an array from datasets.\n\n.. versionadded:: 1.4", "id": "f12065:m1"}
{"signature": "def curve_fit_unscaled(*args, **kwargs):", "body": "<EOL>verbose = kwargs.pop('<STR_LIT>', False)<EOL>popt, pcov = _spopt.curve_fit(*args, **kwargs)<EOL>func = args[<NUM_LIT:0>]<EOL>x    = args[<NUM_LIT:1>]<EOL>y    = args[<NUM_LIT:2>]<EOL>ddof = len(popt)<EOL>try:<EOL><INDENT>sigma = kwargs['<STR_LIT>']<EOL>if sigma is None:<EOL><INDENT>sigma = _np.ones(len(y))<EOL><DEDENT>y_expect = func(x, *popt)<EOL>chisq_red = _chisquare(y, y_expect, sigma, ddof, verbose=verbose)<EOL>pcov = pcov / chisq_red<EOL>return popt, pcov, chisq_red<EOL><DEDENT>except ValueError:<EOL><INDENT>print('<STR_LIT:hello>')<EOL><DEDENT>", "docstring": "Use the reduced chi square to unscale :mod:`scipy`'s scaled :func:`scipy.optimize.curve_fit`. *\\*args* and *\\*\\*kwargs* are passed through to :func:`scipy.optimize.curve_fit`. The tuple *popt, pcov, chisq_red* is returned, where *popt* is the optimal values for the parameters, *pcov* is the estimated covariance of *popt*, and *chisq_red* is the reduced chi square. See http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.optimize.curve_fit.html.", "id": "f12066:m0"}
{"signature": "@property<EOL><INDENT>def chisq_red(self):<DEDENT>", "body": "return self._chisq_red<EOL>", "docstring": "The reduced chi square.", "id": "f12070:c0:m3"}
{"signature": "@property<EOL><INDENT>def y(self):<DEDENT>", "body": "if self._y is None:<EOL><INDENT>self._y = self.y_unweighted/self.y_error<EOL><DEDENT>return self._y<EOL>", "docstring": "The :math:`X` weighted properly by the errors from *y_error*", "id": "f12071:c0:m9"}
{"signature": "@property<EOL><INDENT>def beta(self):<DEDENT>", "body": "if self._beta is None:<EOL><INDENT>self._beta = _np.dot(_np.linalg.pinv(self.X) , self.y)<EOL><DEDENT>return self._beta<EOL>", "docstring": "The result :math:`\\\\beta` of the linear least squares", "id": "f12071:c0:m11"}
{"signature": "@property<EOL><INDENT>def emit(self):<DEDENT>", "body": "return self._emit<EOL>", "docstring": "Emittance of the beam, :math:`\\\\epsilon = \\\\sqrt{ \\\\langle x^2 \\\\rangle \\\\langle {x'}^2 \\\\rangle - \\\\langle x x' \\\\rangle^2 }`", "id": "f12073:c0:m5"}
{"signature": "@property<EOL><INDENT>def gamma(self):<DEDENT>", "body": "return _sltr.GeV2gamma(self.E)<EOL>", "docstring": "Relativistic :math:`\\\\gamma` of beam", "id": "f12073:c1:m1"}
{"signature": "@property<EOL><INDENT>def sigma_prime(self):<DEDENT>", "body": "return _np.sqrt(self.emit/self.beta(self.E))<EOL>", "docstring": "Divergence of matched beam", "id": "f12073:c0:m8"}
{"signature": "@property<EOL><INDENT>def phi(self):<DEDENT>", "body": "return self._phi<EOL>", "docstring": "Particle phases :math:`\\\\phi`.", "id": "f12074:c0:m2"}
{"signature": "@property<EOL><INDENT>def divsq(self):<DEDENT>", "body": "return _np.mean(self.xp**<NUM_LIT:2>, axis=<NUM_LIT:0>)<EOL>", "docstring": "The beam divergence :math:`\\\\langle x'^2 \\\\rangle`.", "id": "f12074:c0:m10"}
{"signature": "@property<EOL><INDENT>def spotsq(self):<DEDENT>", "body": "return _np.mean(self.x**<NUM_LIT:2>, axis=<NUM_LIT:0>)<EOL>", "docstring": "The beam variance :math:`\\\\langle x^2 \\\\rangle`.", "id": "f12074:c0:m9"}
{"signature": "@property<EOL><INDENT>def x(self):<DEDENT>", "body": "return self._x<EOL>", "docstring": "Coordinates of beam (:math:`x`).", "id": "f12074:c0:m6"}
{"signature": "@property<EOL><INDENT>def k_xi(self):<DEDENT>", "body": "return self._k_xi<EOL>", "docstring": "Ion focusing wavenumber :math:`k_\\\\xi`.", "id": "f12074:c0:m1"}
{"signature": "@property<EOL><INDENT>def s(self):<DEDENT>", "body": "return self._s<EOL>", "docstring": "Coordinates of beam (:math:`s`).", "id": "f12074:c0:m5"}
{"signature": "@property<EOL><INDENT>def xxp(self):<DEDENT>", "body": "return _np.mean(self.x*self.xp, axis=<NUM_LIT:0>)<EOL>", "docstring": "The beam correlation :math:`\\\\langle x x' \\\\rangle`.", "id": "f12074:c0:m11"}
{"signature": "@property<EOL><INDENT>def nb0(self):<DEDENT>", "body": "return self.N_e / ( (<NUM_LIT:2>*_np.pi)**(<NUM_LIT:3>/<NUM_LIT:2>) * self.sig_r**<NUM_LIT:2> * self.sig_xi)<EOL>", "docstring": "On-axis beam density :math:`n_{b,0}`.", "id": "f12075:c0:m2"}
{"signature": "@property<EOL><INDENT>def sig_r(self):<DEDENT>", "body": "return self._sig_r<EOL>", "docstring": "Transverse R.M.S. width", "id": "f12075:c0:m8"}
{"signature": "def r_small(self, x, r0):", "body": "return r0*_np.cos(_np.sqrt(self.k_small) * x)<EOL>", "docstring": "Approximate trajectory function for small (:math:`r_0 < \\\\sigma_r`) oscillations.", "id": "f12075:c0:m4"}
{"signature": "def q(self, x, q0):", "body": "y1_0 = q0<EOL>y0_0 = <NUM_LIT:0><EOL>y0   = [y0_0, y1_0]<EOL>y = _sp.integrate.odeint(self._func, y0, x, Dfun=self._gradient, rtol=self.rtol, atol=self.atol)<EOL>return y[:, <NUM_LIT:1>]<EOL>", "docstring": "Numerically solved trajectory function for initial conditons :math:`q(0) = q_0` and :math:`q'(0) = 0`.", "id": "f12076:c0:m11"}
{"signature": "@property<EOL><INDENT>def lambda_small(self):<DEDENT>", "body": "return <NUM_LIT:2>*_np.pi/_np.sqrt(self.k_small)<EOL>", "docstring": "The wavelength for small (:math:`r_0 < \\\\sigma_r`) oscillations.", "id": "f12076:c0:m5"}
{"signature": "@property<EOL><INDENT>def A(self):<DEDENT>", "body": "return self.species.mass<EOL>", "docstring": "Ion mass in units of AMU.", "id": "f12076:c0:m8"}
{"signature": "@property<EOL><INDENT>def m(self):<DEDENT>", "body": "return amu * self.species.mass<EOL>", "docstring": "Ion mass.", "id": "f12076:c0:m7"}
{"signature": "@property<EOL><INDENT>def E(self):<DEDENT>", "body": "return self._E<EOL>", "docstring": "Beam energy in GeV.", "id": "f12078:c0:m3"}
{"signature": "@property<EOL><INDENT>def alpha(self):<DEDENT>", "body": "alpha = -self.sxxp/self.emit<EOL>return alpha<EOL>", "docstring": "Courant-Snyder parameter :math:`\\\\alpha`.", "id": "f12078:c2:m7"}
{"signature": "@property<EOL><INDENT>def sxxp(self):<DEDENT>", "body": "return self._sxxp<EOL>", "docstring": "Beam moment where :math:`\\\\text{sxxp} = \\\\langle x x' \\\\rangle`.", "id": "f12078:c2:m4"}
{"signature": "@property<EOL><INDENT>def sxp(self):<DEDENT>", "body": "return self._sxp<EOL>", "docstring": "Beam moment where :math:`\\\\text{sxp}^2 = \\\\langle x'^2 \\\\rangle`.", "id": "f12078:c2:m3"}
{"signature": "@property<EOL><INDENT>def emit_n(self):<DEDENT>", "body": "return self._emit*_sltr.GeV2gamma(self.E)<EOL>", "docstring": "Normalized beam emittance :math:`\\\\gamma \\\\epsilon`.", "id": "f12078:c0:m6"}
{"signature": "@property<EOL><INDENT>def dE(self):<DEDENT>", "body": "return self._dE<EOL>", "docstring": "Beam energy spread.", "id": "f12078:c0:m4"}
{"signature": "@property<EOL><INDENT>def sig_x(self):<DEDENT>", "body": "return self._sig_x<EOL>", "docstring": "The R.M.S. width :math:`\\\\sigma_x`.", "id": "f12079:c0:m3"}
{"signature": "@property<EOL><INDENT>def sig_y(self):<DEDENT>", "body": "return self._sig_y<EOL>", "docstring": "The R.M.S. width :math:`\\\\sigma_y`.", "id": "f12079:c0:m4"}
{"signature": "@property<EOL><INDENT>def nb0(self):<DEDENT>", "body": "return self.N_e / (<NUM_LIT:4>*_np.sqrt(<NUM_LIT:3>) * _np.pi * self.sig_x * self.sig_y * self.sig_xi)<EOL>", "docstring": "On-axis beam density :math:`n_{b,0}`.", "id": "f12079:c0:m5"}
{"signature": "@property<EOL><INDENT>def n_p(self):<DEDENT>", "body": "return self._n_p<EOL>", "docstring": "Plasma density in SI units.", "id": "f12081:c0:m4"}
{"signature": "@property<EOL><INDENT>def beta(self):<DEDENT>", "body": "return self._beta<EOL>", "docstring": "Beam beta :math:`\\\\beta`.", "id": "f12082:c0:m12"}
{"signature": "@property<EOL><INDENT>def xp(self):<DEDENT>", "body": "return self._xp<EOL>", "docstring": "Particle coordinates :math:`x'`.", "id": "f12082:c0:m7"}
{"signature": "@property<EOL><INDENT>def emit(self):<DEDENT>", "body": "return self._emit<EOL>", "docstring": "Beam emittance :math:`\\\\epsilon`.", "id": "f12082:c0:m9"}
{"signature": "@property<EOL><INDENT>def sig_delta(self):<DEDENT>", "body": "return self._sig_delta<EOL>", "docstring": "Beam energy spread :math:`\\\\sigma_\\\\delta`.", "id": "f12082:c0:m11"}
{"signature": "@property<EOL><INDENT>def delta(self):<DEDENT>", "body": "return self._delta<EOL>", "docstring": "Particle coordinates :math:`\\\\delta`.", "id": "f12082:c0:m8"}
{"signature": "@property<EOL><INDENT>def mean(self):<DEDENT>", "body": "return self._mean<EOL>", "docstring": "Mean for x, x'.", "id": "f12082:c0:m3"}
{"signature": "@property<EOL><INDENT>def step(self):<DEDENT>", "body": "return self._step<EOL>", "docstring": "The current step.", "id": "f12083:c0:m1"}
{"signature": "def githubtunnel(user1, server1, user2, server2, port, verbose, stanford=False):", "body": "if stanford:<EOL><INDENT>port_shift = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>port_shift = <NUM_LIT:0><EOL><DEDENT>command1 = '<STR_LIT>'.format(port-<NUM_LIT:1>-port_shift, server2, user1, server1)<EOL>command2 = '<STR_LIT>'.format(port-port_shift, port-port_shift-<NUM_LIT:1>, user2)<EOL>command3 = '<STR_LIT>'.format(port, port-<NUM_LIT:1>, user2)<EOL>if verbose:<EOL><INDENT>print(command1)<EOL>if stanford:<EOL><INDENT>print(command2)<EOL><DEDENT>print(command3)<EOL><DEDENT>try:<EOL><INDENT>call(shlex.split(command1))<EOL>if stanford:<EOL><INDENT>call(shlex.split(command2))<EOL><DEDENT>call(shlex.split(command3))<EOL><DEDENT>except:<EOL><INDENT>print('<STR_LIT>')<EOL>pass<EOL><DEDENT>", "docstring": "Opens a nested tunnel, first to *user1*@*server1*, then to *user2*@*server2*, for accessing on *port*.\n\nIf *verbose* is true, prints various ssh commands.\n\nIf *stanford* is true, shifts ports up by 1.\n\nAttempts to get *user1*, *user2* from environment variable ``USER_NAME`` if called from the command line.", "id": "f12085:m0"}
{"signature": "def pdf2png(file_in, file_out):", "body": "command = '<STR_LIT>'.format(file_in, file_out)<EOL>_subprocess.call(_shlex.split(command))<EOL>", "docstring": "Uses `ImageMagick <http://www.imagemagick.org/>`_ to convert an input *file_in* pdf to a *file_out* png. (Untested with other formats.)\n\nParameters\n----------\n\nfile_in : str\n    The path to the pdf file to be converted.\nfile_out : str\n    The path to the png file to be written.", "id": "f12088:m0"}
{"signature": "def K2BDES(K, quad_length, energy):", "body": "<EOL>K           = _np.float_(K)<EOL>quad_length = _np.float_(quad_length)<EOL>energy      = _np.float_(energy)<EOL>Brho = energy/_np.float_(<NUM_LIT>)<EOL>BDES = K*Brho*quad_length<EOL>logger.log(level=loggerlevel, msg='<STR_LIT>'.format(<EOL>bdes        = BDES        ,<EOL>quad_length = quad_length ,<EOL>energy      = energy      ,<EOL>K           = K<EOL>)<EOL>)<EOL>return BDES<EOL>", "docstring": "Returns the BDES for a quadrupole with geometric strength *K* and length *quad_length* for a beam with a given *energy*.\nConverts a geometric focusing strength :math:`K` into a quadrupole :math:`B_des`.\n\nParameters\n----------\nK : float\n    The geometric focusing strength :math:`K`.\nquad_length : float\n    The length of the quadrupole in meters.\nenergy : float\n    The design energy of the beam in GeV.\n\nReturns\n-------\nbdes : float\n    The magnet value of :math:`B_des`.", "id": "f12090:m1"}
{"signature": "def fitimageslice(img, res_x, res_y, xslice, yslice, avg_e_func=None, h5file=None, plot=False):", "body": "<EOL>x_start = xslice[<NUM_LIT:0>]<EOL>x_end   = xslice[<NUM_LIT:1>]<EOL>y_start = yslice[<NUM_LIT:0>]<EOL>y_end   = yslice[<NUM_LIT:1>]<EOL>y_low = _np.round(y_start-<NUM_LIT:0.5>) + <NUM_LIT:0.5><EOL>y_high = _np.round(y_end-<NUM_LIT:0.5>) + <NUM_LIT:0.5><EOL>y_px = linspacestep(<NUM_LIT:1>, img.shape[<NUM_LIT:0>])<EOL>y_bool = _np.logical_and(y_low < y_px, y_px < y_high)<EOL>strip = img[y_bool, x_start:x_end]<EOL>histdata = _np.sum(strip, <NUM_LIT:0>)<EOL>xbins = len(histdata)<EOL>x = _np.linspace(<NUM_LIT:1>, xbins, xbins)*res_x<EOL>varbool  = True<EOL>gaussout = _sp.GaussResults(<EOL>x,<EOL>histdata,<EOL>sigma_y    = _np.ones(xbins),<EOL>variance   = varbool,<EOL>background = True,<EOL>p0         = [<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:0>]<EOL>)<EOL>if avg_e_func is not None:<EOL><INDENT>relcounts = _np.sum(strip, <NUM_LIT:1>) / _np.float(_np.sum(strip))<EOL>Eavg = <NUM_LIT:0><EOL>for i, val in enumerate(linspacestep(y_low, y_high-<NUM_LIT:1>)):<EOL><INDENT>Eavg = Eavg + avg_e_func(val, val+<NUM_LIT:1>, h5file, res_y)*relcounts[i]<EOL><DEDENT>return Eavg, gaussout<EOL><DEDENT>else:<EOL><INDENT>return gaussout<EOL><DEDENT>", "docstring": "Fits a gaussian to a slice of an image *img* specified by *xslice* x-coordinates and *yslice* y-coordinates. *res_x* and *res_y* specify image resolution in x and y. *avg_e_func* is a function that returns the energy of the image as a function of x. It should have the form:\n\n*avg_e_func(x_1, x_2, h5file, res_y)*\n\nFits a gaussian to a slice of an image.\n\nParameters\n----------\n\nimg : array\n    Image to be fitted.\nres_x : int\n    Image resolution in :math:`x`.\nres_y : int\n    Image resolution in :math:`y`.\nxslice : (int, int)\n    Slice coordinates in :math:`x`\nyslice : (int, int)\n    Slice coordinates in :math:`y`\navg_e_func : function\n    Of the form *avg_e_func(x_1, x_2, h5file, res_y)*, returns the energy of the image as a function of :math:`x`.\nh5file : h5file\n    Instance from dataset.\nplot : boolean\n    Whether to plot or not.", "id": "f12094:m0"}
{"signature": "def pcolor_axes(array, px_to_units=px_to_units):", "body": "<EOL>x_size = array.shape[<NUM_LIT:0>]+<NUM_LIT:1><EOL>y_size = array.shape[<NUM_LIT:1>]+<NUM_LIT:1><EOL>x = _np.empty((x_size, y_size))<EOL>y = _np.empty((x_size, y_size))<EOL>for i in range(x_size):<EOL><INDENT>for j in range(y_size):<EOL><INDENT>x[i, j], y[i, j] = px_to_units(i-<NUM_LIT:0.5>, j-<NUM_LIT:0.5>)<EOL><DEDENT><DEDENT>return x, y<EOL>", "docstring": "Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`.\n\n*px_to_units* is a function to convert pixels to units. By default, returns pixels.", "id": "f12096:m1"}
{"signature": "@property<EOL><INDENT>def AxesImage(self):<DEDENT>", "body": "return self._AxesImage<EOL>", "docstring": "The :class:`matplotlib.image.AxesImage` from :meth:`matplotlib.axes.Axes.imshow`.", "id": "f12102:c0:m3"}
{"signature": "@property<EOL><INDENT>def clim_min(self):<DEDENT>", "body": "return self.minslider.val<EOL>", "docstring": "Slider value for minimum", "id": "f12102:c0:m8"}
{"signature": "@property<EOL><INDENT>def ax(self):<DEDENT>", "body": "return self._ax_img<EOL>", "docstring": "The :class:`matplotlib.axes.Axes` used for :meth:`matplotlib.axes.Axes.imshow`.", "id": "f12102:c0:m4"}
{"signature": "def set_cmap(self, cmap):", "body": "self.AxesImage.set_cmap(cmap)<EOL>", "docstring": "Sets color map to *cmap*.", "id": "f12102:c0:m2"}
{"signature": "@property<EOL><INDENT>def image(self):<DEDENT>", "body": "return self._image<EOL>", "docstring": "The image loaded.", "id": "f12102:c0:m12"}
{"signature": "@property<EOL><INDENT>def imgmax(self):<DEDENT>", "body": "return _np.max(self.image)<EOL>", "docstring": "Highest value of input image.", "id": "f12102:c0:m5"}
{"signature": "def tile():", "body": "figs = plt.get_fignums()<EOL>x       = <NUM_LIT:0><EOL>y       = <NUM_LIT:0><EOL>toppad  = <NUM_LIT><EOL>size = np.array([<NUM_LIT:0>, <NUM_LIT:0>])<EOL>if ( len(figs) != <NUM_LIT:0> ):<EOL><INDENT>fig     = plt.figure(figs[<NUM_LIT:0>])<EOL>screen  = fig.canvas.window.get_screen()<EOL>screenx = screen.get_monitor_geometry(screen.get_primary_monitor())<EOL>screenx = screenx[<NUM_LIT:2>]<EOL>fig = plt.figure(figs[<NUM_LIT:0>])<EOL>fig.canvas.manager.window.move(x, y)<EOL>maxy = np.array(fig.canvas.manager.window.get_position())[<NUM_LIT:1>]<EOL>size = np.array(fig.canvas.manager.window.get_size())<EOL>y    = maxy<EOL>x += size[<NUM_LIT:0>]+<NUM_LIT:1><EOL>for fig in figs[<NUM_LIT:1>:]:<EOL><INDENT>fig  = plt.figure(fig)<EOL>size = np.array(fig.canvas.manager.window.get_size())<EOL>if ( x+size[<NUM_LIT:0>] > screenx ):<EOL><INDENT>x    = <NUM_LIT:0><EOL>y    = maxy<EOL>maxy = y+size[<NUM_LIT:1>]+toppad<EOL><DEDENT>else:<EOL><INDENT>maxy = max(maxy, y+size[<NUM_LIT:1>]+toppad)<EOL><DEDENT>fig.canvas.manager.window.move(x, y)<EOL>x += size[<NUM_LIT:0>] + <NUM_LIT:1><EOL><DEDENT><DEDENT>", "docstring": "Tiles open figures.", "id": "f12103:m0"}
{"signature": "def set_cmap(self, cmap):", "body": "self.AxesImage.set_cmap(cmap)<EOL>", "docstring": "Sets color map to *cmap*.", "id": "f12104:c0:m5"}
{"signature": "@property<EOL><INDENT>def ax(self):<DEDENT>", "body": "return self._ax_img<EOL>", "docstring": "The :class:`matplotlib.axes.Axes` used for :meth:`matplotlib.axes.Axes.imshow`.", "id": "f12104:c0:m8"}
{"signature": "@property<EOL><INDENT>def image(self):<DEDENT>", "body": "return self._images[self._image_ind]<EOL>", "docstring": "The image loaded.", "id": "f12104:c0:m17"}
{"signature": "@property<EOL><INDENT>def imgmax(self):<DEDENT>", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>imgmax = _np.max(self.images[<NUM_LIT:0>])<EOL>for img in self.images:<EOL><INDENT>imax = _np.max(img)<EOL>if imax > imgmax:<EOL><INDENT>imgmax = imax<EOL><DEDENT><DEDENT>self._imgmax = imgmax<EOL><DEDENT>return self._imgmax<EOL>", "docstring": "Highest value of input image.", "id": "f12104:c0:m9"}
{"signature": "def imshow_batch(images, cbar=True, show=True, pdf=None, figsize=(<NUM_LIT:16>, <NUM_LIT:12>), rows=<NUM_LIT:2>, columns=<NUM_LIT:2>, cmap=None, **kwargs):", "body": "<EOL>images = _np.array(images)<EOL>gs = _gridspec.GridSpec(rows, columns)<EOL>num_imgs = images.shape[<NUM_LIT:0>]<EOL>max_ind = num_imgs-<NUM_LIT:1><EOL>per_page = rows*columns<EOL>num_pages = _np.int(_np.ceil(num_imgs/per_page))<EOL>fig_array = _np.empty(shape=num_pages, dtype=object)<EOL>if num_pages > <NUM_LIT:1>:<EOL><INDENT>logger.info('<STR_LIT>')<EOL><DEDENT>if pdf is not None:<EOL><INDENT>f = _PdfPages(pdf)<EOL><DEDENT>for p in range(num_pages):<EOL><INDENT>fig_array[p] = _plt.figure(figsize=figsize)<EOL>pg_max_ind = _np.min( [(p+<NUM_LIT:1>) * per_page - <NUM_LIT:1>, max_ind] )<EOL>num_rows = _np.int(_np.ceil((pg_max_ind+<NUM_LIT:1> - p * per_page) / columns))<EOL>for i in range(num_rows):<EOL><INDENT>i_min_ind = p * per_page + i * columns<EOL>col_max_ind = _np.min([i_min_ind + columns - <NUM_LIT:1>, max_ind])<EOL>for j, image in enumerate(images[i_min_ind:col_max_ind+<NUM_LIT:1>]):<EOL><INDENT>ax = fig_array[p].add_subplot(gs[i, j])<EOL>try:<EOL><INDENT>if _np.issubdtype(image.dtype, _np.integer):<EOL><INDENT>image = _np.array(image, dtype=float)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>plot = ax.imshow(image, **kwargs)<EOL>if cmap is not None:<EOL><INDENT>plot.set_cmap(cmap)<EOL><DEDENT>if cbar:<EOL><INDENT>fig_array[p].colorbar(plot)<EOL><DEDENT><DEDENT><DEDENT>fig_array[p].tight_layout()<EOL>if pdf is not None:<EOL><INDENT>f.savefig(fig_array[p])<EOL><DEDENT>if not show:<EOL><INDENT>_plt.close(fig_array[p])<EOL><DEDENT><DEDENT>if pdf is not None:<EOL><INDENT>f.close()<EOL><DEDENT>return fig_array<EOL>", "docstring": "Plots an array of *images* to a single window of size *figsize* with *rows* and *columns*.\n\n* *cmap*: Specifies color map\n* *cbar*: Add color bars\n* *show*: If false, dismisses each window after it is created and optionally saved\n* *pdf*: Save to a pdf of filename *pdf*\n* *\\*\\*kwargs* passed to :class:`matplotlib.axis.imshow`", "id": "f12112:m0"}
{"signature": "@property<EOL><INDENT>def erelease(self):<DEDENT>", "body": "if self._erelease is None:<EOL><INDENT>raise IOError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return self._erelease<EOL><DEDENT>", "docstring": "The ending mouse click from :class:`RectangleSelector <scisalt.matplotlib.RectangleSelector.RectangleSelector>`.", "id": "f12117:c0:m12"}
{"signature": "@property<EOL><INDENT>def RectangleSelector(self):<DEDENT>", "body": "return self._RectangleSelector<EOL>", "docstring": "The instance of :class:`matplotlib.widgets.RectangleSelector`.", "id": "f12117:c0:m7"}
{"signature": "@property<EOL><INDENT>def width(self):<DEDENT>", "body": "return self.x1-self.x0<EOL>", "docstring": "Width of rectangle.", "id": "f12117:c0:m25"}
{"signature": "@property<EOL><INDENT>def yslice(self):<DEDENT>", "body": "return [self.y0, self.y1]<EOL>", "docstring": "A list `[y0, y1]`.", "id": "f12117:c0:m23"}
{"signature": "@property<EOL><INDENT>def selfunc_results(self):<DEDENT>", "body": "if self.selfunc is not None:<EOL><INDENT>return self.selfunc(self)<EOL><DEDENT>", "docstring": "The results of :func:`selfunc(instance) <scisalt.matplotlib.RectangleSelector.selfunc>` where *instance* is this class.", "id": "f12117:c0:m10"}
{"signature": "@property<EOL><INDENT>def x1(self):<DEDENT>", "body": "return self._x1<EOL>", "docstring": "Maximum x coordinate of rectangle.", "id": "f12117:c0:m15"}
{"signature": "@property<EOL><INDENT>def y0(self):<DEDENT>", "body": "return self._y0<EOL>", "docstring": "Minimum y coordinate of rectangle.", "id": "f12117:c0:m17"}
{"signature": "@property<EOL><INDENT>def y1(self):<DEDENT>", "body": "return self._y1<EOL>", "docstring": "Maximum y coordinate of rectangle.", "id": "f12117:c0:m19"}
{"signature": "@property<EOL><INDENT>def xslice(self):<DEDENT>", "body": "return [self.x0, self.x1]<EOL>", "docstring": "A list `[x0, x1]`.", "id": "f12117:c0:m21"}
{"signature": "@property<EOL><INDENT>def x0(self):<DEDENT>", "body": "return self._x0<EOL>", "docstring": "Minimum x coordinate of rectangle.", "id": "f12117:c0:m13"}
{"signature": "def hist2d(x, y, bins=<NUM_LIT:10>, labels=None, aspect=\"<STR_LIT>\", plot=True, fig=None, ax=None, interpolation='<STR_LIT:none>', cbar=True, **kwargs):", "body": "h_range   = kwargs.pop('<STR_LIT>', None)<EOL>h_normed  = kwargs.pop('<STR_LIT>', None)<EOL>h_weights = kwargs.pop('<STR_LIT>', None)<EOL>h, xe, ye = _np.histogram2d(x, y, bins=bins, range=h_range, normed=h_normed, weights=h_weights)<EOL>extent    = [xe[<NUM_LIT:0>], xe[-<NUM_LIT:1>], ye[<NUM_LIT:0>], ye[-<NUM_LIT:1>]]<EOL>if plot:<EOL><INDENT>if ax is None:<EOL><INDENT>if fig is None:<EOL><INDENT>fig = _figure('<STR_LIT>')<EOL><DEDENT>ax = fig.gca()<EOL>ax.clear()<EOL><DEDENT>img = ax.imshow(h.transpose(), extent=extent, interpolation=interpolation, aspect=aspect, **kwargs)<EOL>if cbar:<EOL><INDENT>_colorbar(ax=ax, im=img)<EOL><DEDENT>if labels is not None:<EOL><INDENT>_addlabel(labels[<NUM_LIT:0>], labels[<NUM_LIT:1>], labels[<NUM_LIT:2>])<EOL><DEDENT><DEDENT>return h, extent<EOL>", "docstring": "Creates a 2-D histogram of data *x*, *y* with *bins*, *labels* = :code:`[title, xlabel, ylabel]`, aspect ration *aspect*. Attempts to use axis *ax* first, then the current axis of *fig*, then the last axis, to use an already-created window.\n\nPlotting (*plot*) is on by default, setting false doesn't attempt to create a figure.\n\n*interpolation* sets the interpolation type of :meth:`matplotlib.axis.imshow`.\n\nReturns a handle and extent as :code:`h, extent`", "id": "f12118:m0"}
{"signature": "@property<EOL><INDENT>def y0(self):<DEDENT>", "body": "return self._sorted_y[<NUM_LIT:0>]<EOL>", "docstring": "The smaller x coordinate.", "id": "f12127:c0:m7"}
{"signature": "@property<EOL><INDENT>def x0(self):<DEDENT>", "body": "return self._sorted_x[<NUM_LIT:0>]<EOL>", "docstring": "The smaller x coordinate.", "id": "f12127:c0:m3"}
{"signature": "@property<EOL><INDENT>def y1(self):<DEDENT>", "body": "return self._sorted_y[<NUM_LIT:1>]<EOL>", "docstring": "The larger x coordinate.", "id": "f12127:c0:m8"}
{"signature": "def print2elog(author='<STR_LIT>', title='<STR_LIT>', text='<STR_LIT>', link=None, file=None, now=None):", "body": "<EOL>if now is None:<EOL><INDENT>now  = _dt.datetime.now()<EOL><DEDENT>fulltime = now.strftime('<STR_LIT>')<EOL>if not ((link is None) ^ (file is None)):<EOL><INDENT>link_copied = _copy_file(link, fulltime)<EOL>file_copied = _copy_file(file, fulltime)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>loader = _jj.PackageLoader('<STR_LIT>', '<STR_LIT>')<EOL>env = _jj.Environment(loader=loader, trim_blocks=True)<EOL>template = env.get_template('<STR_LIT>')<EOL>stream = template.stream(author=author, title=title, text=text, link=link_copied, file=file_copied, now=now)<EOL>with _tempfile.TemporaryDirectory() as dirname:<EOL><INDENT>filename = '<STR_LIT>'.format(fulltime)<EOL>filepath = _os.path.join(dirname, filename)<EOL>with open(filepath, '<STR_LIT>') as fid:<EOL><INDENT>stream.dump(fid)<EOL><DEDENT>finalpath = _os.path.join(basedir, filename)<EOL>_shutil.copyfile(filepath, finalpath)<EOL><DEDENT>", "docstring": "Prints to the elog.\n\nParameters\n----------\n\nauthor : str, optional\n    Author of the elog.\ntitle : str, optional\n    Title of the elog.\nlink : str, optional\n    Path to a thumbnail.\nfile : str, optional\n    Path to a file.\nnow : :class:`datetime.datetime`\n    Time of the elog.", "id": "f12128:m1"}
{"signature": "def submitEntry(self):", "body": "<EOL>mcclogs, physlogs = self.selectedLogs()<EOL>success = True<EOL>if mcclogs != []:<EOL><INDENT>if not self.acceptedUser(\"<STR_LIT>\"):<EOL><INDENT>QMessageBox().warning(self, \"<STR_LIT>\", \"<STR_LIT>\")<EOL>return<EOL><DEDENT>fileName = self.xmlSetup(\"<STR_LIT>\", mcclogs)<EOL>if fileName is None:<EOL><INDENT>return<EOL><DEDENT>if not self.imagePixmap.isNull():<EOL><INDENT>self.prepareImages(fileName, \"<STR_LIT>\")<EOL><DEDENT>success = self.sendToLogbook(fileName, \"<STR_LIT>\")<EOL><DEDENT>if physlogs != []:<EOL><INDENT>for i in range(len(physlogs)):<EOL><INDENT>fileName = self.xmlSetup(\"<STR_LIT>\", physlogs[i])<EOL>if fileName is None:<EOL><INDENT>return<EOL><DEDENT>if not self.imagePixmap.isNull():<EOL><INDENT>self.prepareImages(fileName, \"<STR_LIT>\")<EOL><DEDENT>success_phys = self.sendToLogbook(fileName, \"<STR_LIT>\", physlogs[i])<EOL>success = success and success_phys<EOL><DEDENT><DEDENT>self.done(success)<EOL>", "docstring": "Process user inputs and subit logbook entry when user clicks Submit button", "id": "f12131:c0:m13"}
{"signature": "def selectedLogs(self):", "body": "mcclogs = []<EOL>physlogs = []<EOL>for i in range(len(self.logMenus)):<EOL><INDENT>logType = self.logMenus[i].selectedType()<EOL>log = self.logMenus[i].selectedProgram()<EOL>if logType == \"<STR_LIT>\":<EOL><INDENT>if log not in mcclogs:<EOL><INDENT>mcclogs.append(log)<EOL><DEDENT><DEDENT>elif logType == \"<STR_LIT>\":<EOL><INDENT>if log not in physlogs:<EOL><INDENT>physlogs.append(log)<EOL><DEDENT><DEDENT><DEDENT>return mcclogs, physlogs<EOL>", "docstring": "Return selected log books by type.", "id": "f12131:c0:m8"}
{"signature": "def xmlSetup(self, logType, logList):", "body": "from xml.etree.ElementTree import Element, SubElement, ElementTree<EOL>from datetime import datetime<EOL>curr_time = datetime.now()<EOL>if logType == \"<STR_LIT>\":<EOL><INDENT>log_entry = Element('<STR_LIT>')<EOL>title     = SubElement(log_entry, '<STR_LIT:title>')<EOL>program   = SubElement(log_entry, '<STR_LIT>')<EOL>timestamp = SubElement(log_entry, '<STR_LIT>')<EOL>priority  = SubElement(log_entry, '<STR_LIT>')<EOL>os_user   = SubElement(log_entry, '<STR_LIT>')<EOL>hostname  = SubElement(log_entry, '<STR_LIT>')<EOL>text      = SubElement(log_entry, '<STR_LIT:text>')<EOL>log_user  = SubElement(log_entry, '<STR_LIT>')<EOL>logbook = []<EOL>for i in range(len(logList)):<EOL><INDENT>logbook.append(SubElement(log_entry, '<STR_LIT>'))<EOL>logbook[i].text = logList[i].lower()<EOL><DEDENT>log_entry.attrib['<STR_LIT:type>'] = \"<STR_LIT>\"<EOL>program.text = \"<STR_LIT>\"<EOL>priority.text = \"<STR_LIT>\"<EOL>os_user.text = \"<STR_LIT>\"<EOL>hostname.text = \"<STR_LIT>\"<EOL>text.attrib['<STR_LIT:type>'] = \"<STR_LIT>\"<EOL>if not self.imagePixmap.isNull():<EOL><INDENT>attachment = SubElement(log_entry, '<STR_LIT>')<EOL>attachment.attrib['<STR_LIT:name>'] = \"<STR_LIT>\"<EOL>attachment.attrib['<STR_LIT:type>'] = \"<STR_LIT>\" + self.imageType<EOL>attachment.text = curr_time.strftime(\"<STR_LIT>\") + str(curr_time.microsecond) + \"<STR_LIT:.>\" + self.imageType<EOL><DEDENT>timestamp.text = curr_time.strftime(\"<STR_LIT>\")<EOL>fileName = \"<STR_LIT>\" + curr_time.strftime(\"<STR_LIT>\") + str(curr_time.microsecond) + \"<STR_LIT>\"<EOL><DEDENT>else:  <EOL><INDENT>timeString = curr_time.strftime(\"<STR_LIT>\")<EOL>log_entry = Element(None)<EOL>severity  = SubElement(log_entry, '<STR_LIT>')<EOL>location  = SubElement(log_entry, '<STR_LIT:location>')<EOL>keywords  = SubElement(log_entry, '<STR_LIT>')<EOL>time      = SubElement(log_entry, '<STR_LIT:time>')<EOL>isodate   = SubElement(log_entry, '<STR_LIT>')<EOL>log_user  = SubElement(log_entry, '<STR_LIT>')<EOL>category  = SubElement(log_entry, '<STR_LIT>')<EOL>title     = SubElement(log_entry, '<STR_LIT:title>')<EOL>metainfo  = SubElement(log_entry, '<STR_LIT>')<EOL>if not self.imagePixmap.isNull():<EOL><INDENT>imageFile = SubElement(log_entry, '<STR_LIT>')<EOL>imageFile.text = timeString + \"<STR_LIT>\" + self.imageType<EOL>thumbnail = SubElement(log_entry, '<STR_LIT:file>')<EOL>thumbnail.text = timeString + \"<STR_LIT>\"<EOL><DEDENT>text      = SubElement(log_entry, '<STR_LIT:text>')  <EOL>log_entry.attrib['<STR_LIT:type>'] = \"<STR_LIT>\"<EOL>category.text = \"<STR_LIT>\"<EOL>location.text = \"<STR_LIT>\"<EOL>severity.text = \"<STR_LIT>\"<EOL>keywords.text = \"<STR_LIT:none>\"<EOL>time.text = curr_time.strftime(\"<STR_LIT>\")<EOL>isodate.text = curr_time.strftime(\"<STR_LIT>\")<EOL>metainfo.text = timeString + \"<STR_LIT>\"<EOL>fileName = \"<STR_LIT>\" + metainfo.text<EOL><DEDENT>log_user.text = str(self.logui.userName.text())<EOL>title.text = str(self.logui.titleEntry.text())<EOL>if title.text == \"<STR_LIT>\":<EOL><INDENT>QMessageBox().warning(self, \"<STR_LIT>\", \"<STR_LIT>\")<EOL>return None<EOL><DEDENT>text.text = str(self.logui.textEntry.toPlainText())<EOL>if text.text == \"<STR_LIT>\":<EOL><INDENT>text.text = \"<STR_LIT:U+0020>\"<EOL><DEDENT>xmlFile = open(fileName, \"<STR_LIT:w>\")<EOL>if logType == \"<STR_LIT>\":<EOL><INDENT>ElementTree(log_entry).write(xmlFile)<EOL><DEDENT>else:<EOL><INDENT>xmlString = self.prettify(log_entry)<EOL>xmlFile.write(xmlString)<EOL><DEDENT>xmlFile.write(\"<STR_LIT:\\n>\")  <EOL>xmlFile.close()<EOL>return fileName.rstrip(\"<STR_LIT>\")<EOL>", "docstring": "Create xml file with fields from logbook form.", "id": "f12131:c0:m10"}
{"signature": "def setupUI(self):", "body": "labelSizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)<EOL>labelSizePolicy.setHorizontalStretch(<NUM_LIT:0>)<EOL>labelSizePolicy.setVerticalStretch(<NUM_LIT:0>)<EOL>menuSizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)<EOL>menuSizePolicy.setHorizontalStretch(<NUM_LIT:0>)<EOL>menuSizePolicy.setVerticalStretch(<NUM_LIT:0>)<EOL>logTypeLayout = QHBoxLayout()<EOL>logTypeLayout.setSpacing(<NUM_LIT:0>)<EOL>typeLabel = QLabel(\"<STR_LIT>\")<EOL>typeLabel.setMinimumSize(QSize(<NUM_LIT>, <NUM_LIT:0>))<EOL>typeLabel.setMaximumSize(QSize(<NUM_LIT>, <NUM_LIT>))<EOL>typeLabel.setSizePolicy(labelSizePolicy)<EOL>logTypeLayout.addWidget(typeLabel)<EOL>self.logType = QComboBox(self)<EOL>self.logType.setMinimumSize(QSize(<NUM_LIT:100>, <NUM_LIT:0>))<EOL>self.logType.setMaximumSize(QSize(<NUM_LIT>, <NUM_LIT>))<EOL>menuSizePolicy.setHeightForWidth(self.logType.sizePolicy().hasHeightForWidth())<EOL>self.logType.setSizePolicy(menuSizePolicy)<EOL>logTypeLayout.addWidget(self.logType)<EOL>logTypeLayout.setStretch(<NUM_LIT:1>, <NUM_LIT:6>)<EOL>programLayout = QHBoxLayout()<EOL>programLayout.setSpacing(<NUM_LIT:0>)<EOL>programLabel = QLabel(\"<STR_LIT>\")<EOL>programLabel.setMinimumSize(QSize(<NUM_LIT>, <NUM_LIT:0>))<EOL>programLabel.setMaximumSize(QSize(<NUM_LIT>, <NUM_LIT>))<EOL>programLabel.setSizePolicy(labelSizePolicy)<EOL>programLayout.addWidget(programLabel)<EOL>self.programName = QComboBox(self)<EOL>self.programName.setMinimumSize(QSize(<NUM_LIT:100>, <NUM_LIT:0>))<EOL>self.programName.setMaximumSize(QSize(<NUM_LIT>, <NUM_LIT>))<EOL>menuSizePolicy.setHeightForWidth(self.programName.sizePolicy().hasHeightForWidth())<EOL>self.programName.setSizePolicy(menuSizePolicy)<EOL>programLayout.addWidget(self.programName)<EOL>programLayout.setStretch(<NUM_LIT:1>, <NUM_LIT:6>)<EOL>if self.initialInstance:<EOL><INDENT>self.logButton = QPushButton(\"<STR_LIT:+>\", self)<EOL>self.logButton.setToolTip(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self.logButton = QPushButton(\"<STR_LIT:->\")<EOL>self.logButton.setToolTip(\"<STR_LIT>\")<EOL><DEDENT>self.logButton.setMinimumSize(QSize(<NUM_LIT:16>, <NUM_LIT:16>))  <EOL>self.logButton.setMaximumSize(QSize(<NUM_LIT:16>, <NUM_LIT:16>))  <EOL>self.logButton.setObjectName(\"<STR_LIT>\")<EOL>self.logButton.setStyleSheet(\"<STR_LIT>\")<EOL>self._logSelectLayout = QHBoxLayout()<EOL>self._logSelectLayout.setSpacing(<NUM_LIT:6>)<EOL>self._logSelectLayout.addLayout(logTypeLayout)<EOL>self._logSelectLayout.addLayout(programLayout)<EOL>self._logSelectLayout.addWidget(self.logButton)<EOL>self._logSelectLayout.setStretch(<NUM_LIT:0>, <NUM_LIT:6>)<EOL>self._logSelectLayout.setStretch(<NUM_LIT:1>, <NUM_LIT:6>)<EOL>", "docstring": "Create graphical objects for menus.", "id": "f12131:c1:m1"}
{"signature": "def _connectSlots(self):", "body": "<EOL>QObject.connect(self.logType, SIGNAL(\"<STR_LIT>\"), self.changeLogType)<EOL>", "docstring": "Connect menu change signals.", "id": "f12131:c1:m2"}
{"signature": "def removeLogbook(self, menu=None):", "body": "if self.logMenuCount > <NUM_LIT:1> and menu is not None:<EOL><INDENT>menu.removeMenu()<EOL>self.logMenus.remove(menu)<EOL>self.logMenuCount -= <NUM_LIT:1><EOL><DEDENT>", "docstring": "Remove logbook menu set.", "id": "f12131:c0:m7"}
{"signature": "def prettify(self, elem):", "body": "from xml.etree import ElementTree<EOL>from re import sub<EOL>rawString = ElementTree.tostring(elem, '<STR_LIT:utf-8>')<EOL>parsedString = sub(r'<STR_LIT>', '<STR_LIT:\\n>', rawString)  <EOL>return parsedString[<NUM_LIT:1>:]<EOL>", "docstring": "Parse xml elements for pretty printing", "id": "f12131:c0:m11"}
{"signature": "def prepareImages(self, fileName, logType):", "body": "import subprocess<EOL>if self.imageType == \"<STR_LIT>\":<EOL><INDENT>self.imagePixmap.save(fileName + \"<STR_LIT>\", \"<STR_LIT>\", -<NUM_LIT:1>)<EOL>if logType == \"<STR_LIT>\":<EOL><INDENT>makePostScript = \"<STR_LIT>\" + fileName + \"<STR_LIT>\" + fileName + \"<STR_LIT>\"<EOL>process = subprocess.Popen(makePostScript, shell=True)<EOL>process.wait()<EOL>thumbnailPixmap = self.imagePixmap.scaled(<NUM_LIT>, <NUM_LIT>, Qt.KeepAspectRatio)<EOL>thumbnailPixmap.save(fileName + \"<STR_LIT>\", \"<STR_LIT>\", -<NUM_LIT:1>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>renameImage = \"<STR_LIT>\" + self.image + \"<STR_LIT:U+0020>\" + fileName + \"<STR_LIT>\"<EOL>process = subprocess.Popen(renameImage, shell=True)<EOL>process.wait()<EOL>if logType == \"<STR_LIT>\":<EOL><INDENT>thumbnailPixmap = self.imagePixmap.scaled(<NUM_LIT>, <NUM_LIT>, Qt.KeepAspectRatio)<EOL>thumbnailPixmap.save(fileName + \"<STR_LIT>\", \"<STR_LIT>\", -<NUM_LIT:1>)<EOL><DEDENT><DEDENT>", "docstring": "Convert supplied QPixmap object to image file.", "id": "f12131:c0:m12"}
{"signature": "def acceptedUser(self, logType):", "body": "from urllib2 import urlopen, URLError, HTTPError<EOL>import json<EOL>isApproved = False<EOL>userName = str(self.logui.userName.text())<EOL>if userName == \"<STR_LIT>\":<EOL><INDENT>return False  <EOL><DEDENT>if logType == \"<STR_LIT>\":<EOL><INDENT>networkFault = False<EOL>data = []<EOL>log_url = \"<STR_LIT>\" + userName<EOL>try:<EOL><INDENT>data = urlopen(log_url, None, <NUM_LIT:5>).read()<EOL>data = json.loads(data)<EOL><DEDENT>except URLError as error:<EOL><INDENT>print(\"<STR_LIT>\" + str(error.reason))<EOL>networkFault = True<EOL><DEDENT>except HTTPError as error:<EOL><INDENT>print(\"<STR_LIT>\" + str(error.reason))<EOL>networkFault = True<EOL><DEDENT>if networkFault:<EOL><INDENT>msgBox = QMessageBox()<EOL>msgBox.setText(\"<STR_LIT>\")<EOL>msgBox.setInformativeText(\"<STR_LIT>\")<EOL>msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)<EOL>msgBox.setDefaultButton(QMessageBox.Ok)<EOL>if msgBox.exec_() == QMessageBox.Ok:<EOL><INDENT>isApproved = True<EOL><DEDENT><DEDENT>if data != [] and (data is not None):<EOL><INDENT>isApproved = True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>isApproved = True<EOL><DEDENT>return isApproved<EOL>", "docstring": "Verify enetered user name is on accepted MCC logbook list.", "id": "f12131:c0:m9"}
{"signature": "def addLogbooks(self, type=None, logs=[], default=\"<STR_LIT>\"):", "body": "if type is not None and len(logs) != <NUM_LIT:0>:<EOL><INDENT>if type in self.logList:<EOL><INDENT>for logbook in logs:<EOL><INDENT>if logbook not in self.logList.get(type)[<NUM_LIT:0>]:<EOL><INDENT>self.logList.get(type)[<NUM_LIT:0>].append(logbook)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.logList[type] = []<EOL>self.logList[type].append(logs)<EOL><DEDENT>if len(self.logList[type]) > <NUM_LIT:1> and default != \"<STR_LIT>\":<EOL><INDENT>self.logList.get(type)[<NUM_LIT:1>] == default<EOL><DEDENT>else:<EOL><INDENT>self.logList.get(type).append(default)<EOL><DEDENT>self.logType.clear()<EOL>self.logType.addItems(list(self.logList.keys()))<EOL>self.changeLogType()<EOL><DEDENT>", "docstring": "Add or change list of logbooks.", "id": "f12131:c1:m6"}
{"signature": "def clearForm(self):", "body": "self.logui.titleEntry.clear()<EOL>self.logui.textEntry.clear()<EOL>while self.logMenuCount > <NUM_LIT:1>:<EOL><INDENT>self.removeLogbook(self.logMenus[-<NUM_LIT:1>])<EOL><DEDENT>", "docstring": "Clear all form fields (except author).", "id": "f12131:c0:m15"}
{"signature": "def t_TRANSPORT_KWD(t):", "body": "return t<EOL>", "docstring": "r'(tcp|udp)", "id": "f12143:m7"}
{"signature": "def t_START_KWD(t):", "body": "return t<EOL>", "docstring": "r'start", "id": "f12143:m3"}
{"signature": "def p_action_blocks2(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "action_blocks : action_block", "id": "f12143:m24"}
{"signature": "def t_CONNECTION_KWD(t):", "body": "return t<EOL>", "docstring": "r'connection", "id": "f12143:m0"}
{"signature": "def p_action_blocks(p):", "body": "if isinstance(p[<NUM_LIT:1>], list):<EOL><INDENT>if isinstance(p[<NUM_LIT:1>][<NUM_LIT:0>], list):<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>][<NUM_LIT:0>] + [p[<NUM_LIT:2>]]<EOL><DEDENT>else:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + p[<NUM_LIT:2>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>p[<NUM_LIT:0>] = [p[<NUM_LIT:1>], p[<NUM_LIT:2>]]<EOL><DEDENT>", "docstring": "action_blocks : action_blocks action_block", "id": "f12143:m23"}
{"signature": "def p_actions(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + [p[<NUM_LIT:2>]]<EOL>", "docstring": "actions : actions action", "id": "f12143:m26"}
{"signature": "def t_CLIENT_KWD(t):", "body": "return t<EOL>", "docstring": "r'client", "id": "f12143:m1"}
{"signature": "def t_FLOAT(t):", "body": "t.value = float(t.value)<EOL>return t<EOL>", "docstring": "r'([-]?(\\d+)(\\.\\d+)(e(\\+|-)?(\\d+))? | (\\d+)e(\\+|-)?(\\d+))([lL]|[fF])?", "id": "f12143:m11"}
{"signature": "def t_SERVER_KWD(t):", "body": "return t<EOL>", "docstring": "r'server", "id": "f12143:m2"}
{"signature": "def t_INTEGER(t):", "body": "t.value = int(t.value)<EOL>return t<EOL>", "docstring": "r'[-]?\\d+([uU]|[lL]|[uU][lL]|[lL][uU])?", "id": "f12143:m12"}
{"signature": "def p_port(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "port : KEY\nport : p_integer_arg", "id": "f12143:m19"}
{"signature": "def t_REGEX_MATCH_INCOMING_KWD(t):", "body": "return t<EOL>", "docstring": "r\"regex_match_incoming", "id": "f12143:m8"}
{"signature": "def long_to_bytes(N, blocksize=<NUM_LIT:1>):", "body": "bytestring = hex(N)<EOL>bytestring = bytestring[<NUM_LIT:2>:] if bytestring.startswith('<STR_LIT>') else bytestring<EOL>bytestring = bytestring[:-<NUM_LIT:1>] if bytestring.endswith('<STR_LIT:L>') else bytestring<EOL>bytestring = '<STR_LIT:0>' + bytestring if (len(bytestring) % <NUM_LIT:2>) != <NUM_LIT:0> else bytestring<EOL>bytestring = binascii.unhexlify(bytestring)<EOL>if blocksize > <NUM_LIT:0> and len(bytestring) % blocksize != <NUM_LIT:0>:<EOL><INDENT>bytestring = '<STR_LIT:\\x00>' *(blocksize - (len(bytestring) % blocksize)) + bytestring<EOL><DEDENT>return bytestring<EOL>", "docstring": "Given an input integer ``N``, ``long_to_bytes`` returns the representation of ``N`` in bytes.\n    If ``blocksize`` is greater than ``1`` then the output string will be right justified and then padded with zero-bytes,\n    such that the return values length is a multiple of ``blocksize``.", "id": "f12146:m1"}
{"signature": "def _save_without_sending(self, *args, **kwargs):", "body": "self.do_not_send = True<EOL>super(MailerMessage, self).save(*args, **kwargs)<EOL>", "docstring": "Saves the MailerMessage instance without sending the e-mail. This ensures\nother models (e.g. `Attachment`) have something to relate to in the database.", "id": "f12195:c1:m2"}
{"signature": "def clear_sent_messages(self, offset=None):", "body": "if offset is None:<EOL><INDENT>offset = getattr(settings, '<STR_LIT>', defaults.MAILQUEUE_CLEAR_OFFSET)<EOL><DEDENT>if type(offset) is int:<EOL><INDENT>offset = datetime.timedelta(hours=offset)<EOL><DEDENT>delete_before = timezone.now() - offset<EOL>self.filter(sent=True, last_attempt__lte=delete_before).delete()<EOL>", "docstring": "Deletes sent MailerMessage records", "id": "f12195:c0:m1"}
{"signature": "def compute_volume_exposures(shares_held, volumes, percentile):", "body": "shares_held = shares_held.replace(<NUM_LIT:0>, np.nan)<EOL>shares_longed = shares_held[shares_held > <NUM_LIT:0>]<EOL>shares_shorted = -<NUM_LIT:1> * shares_held[shares_held < <NUM_LIT:0>]<EOL>shares_grossed = shares_held.abs()<EOL>longed_frac = shares_longed.divide(volumes)<EOL>shorted_frac = shares_shorted.divide(volumes)<EOL>grossed_frac = shares_grossed.divide(volumes)<EOL>longed_threshold = <NUM_LIT:100> * longed_frac.apply(<EOL>partial(np.nanpercentile, q=<NUM_LIT:100> * percentile),<EOL>axis='<STR_LIT>',<EOL>)<EOL>shorted_threshold = <NUM_LIT:100> * shorted_frac.apply(<EOL>partial(np.nanpercentile, q=<NUM_LIT:100> * percentile),<EOL>axis='<STR_LIT>',<EOL>)<EOL>grossed_threshold = <NUM_LIT:100> * grossed_frac.apply(<EOL>partial(np.nanpercentile, q=<NUM_LIT:100> * percentile),<EOL>axis='<STR_LIT>',<EOL>)<EOL>return longed_threshold, shorted_threshold, grossed_threshold<EOL>", "docstring": "Returns arrays of pth percentile of long, short and gross volume exposures\nof an algorithm's held shares\n\nParameters\n----------\nshares_held : pd.DataFrame\n    Daily number of shares held by an algorithm.\n    - See full explanation in create_risk_tear_sheet\n\nvolume : pd.DataFrame\n    Daily volume per asset\n    - See full explanation in create_risk_tear_sheet\n\npercentile : float\n    Percentile to use when computing and plotting volume exposures\n    - See full explanation in create_risk_tear_sheet", "id": "f12198:m10"}
{"signature": "def compute_style_factor_exposures(positions, risk_factor):", "body": "positions_wo_cash = positions.drop('<STR_LIT>', axis='<STR_LIT>')<EOL>gross_exposure = positions_wo_cash.abs().sum(axis='<STR_LIT>')<EOL>style_factor_exposure = positions_wo_cash.multiply(risk_factor).divide(gross_exposure, axis='<STR_LIT:index>')<EOL>tot_style_factor_exposure = style_factor_exposure.sum(axis='<STR_LIT>',<EOL>skipna=True)<EOL>return tot_style_factor_exposure<EOL>", "docstring": "Returns style factor exposure of an algorithm's positions\n\nParameters\n----------\npositions : pd.DataFrame\n    Daily equity positions of algorithm, in dollars.\n    - See full explanation in create_risk_tear_sheet\n\nrisk_factor : pd.DataFrame\n    Daily risk factor per asset.\n    - DataFrame with dates as index and equities as columns\n    - Example:\n                     Equity(24   Equity(62\n                       [AAPL])      [ABT])\n    2017-04-03\t  -0.51284     1.39173\n    2017-04-04\t  -0.73381     0.98149\n    2017-04-05\t  -0.90132     1.13981", "id": "f12198:m0"}
{"signature": "def plot_cap_exposures_gross(gross_exposures, ax=None):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>color_list = plt.cm.gist_rainbow(np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:5>))<EOL>ax.stackplot(gross_exposures[<NUM_LIT:0>].index, gross_exposures,<EOL>labels=CAP_BUCKETS.keys(), colors=color_list, alpha=<NUM_LIT>,<EOL>baseline='<STR_LIT>')<EOL>ax.axhline(<NUM_LIT:0>, color='<STR_LIT:k>', linestyle='<STR_LIT:->')<EOL>ax.set(title='<STR_LIT>',<EOL>ylabel='<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Plots outputs of compute_cap_exposures as area charts\n\nParameters\n----------\ngross_exposures : array\n    Arrays of gross market cap exposures (output of compute_cap_exposures).", "id": "f12198:m8"}
{"signature": "def plot_sector_exposures_net(net_exposures, sector_dict=None, ax=None):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>if sector_dict is None:<EOL><INDENT>sector_names = SECTORS.values()<EOL><DEDENT>else:<EOL><INDENT>sector_names = sector_dict.values()<EOL><DEDENT>color_list = plt.cm.gist_rainbow(np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:11>))<EOL>for i in range(len(net_exposures)):<EOL><INDENT>ax.plot(net_exposures[i], color=color_list[i], alpha=<NUM_LIT>,<EOL>label=sector_names[i])<EOL><DEDENT>ax.set(title='<STR_LIT>',<EOL>ylabel='<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Plots output of compute_sector_exposures as line graphs\n\nParameters\n----------\nnet_exposures : arrays\n    Arrays of net sector exposures (output of compute_sector_exposures).\n\nsector_dict : dict or OrderedDict\n    Dictionary of all sectors\n    - See full description in compute_sector_exposures", "id": "f12198:m5"}
{"signature": "def plot_volume_exposures_gross(grossed_threshold, percentile, ax=None):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>ax.plot(grossed_threshold.index, grossed_threshold,<EOL>color='<STR_LIT:b>', label='<STR_LIT>')<EOL>ax.axhline(<NUM_LIT:0>, color='<STR_LIT:k>')<EOL>ax.set(title='<STR_LIT>',<EOL>ylabel='<STR_LIT>'<EOL>.format(<NUM_LIT:100> * percentile))<EOL>ax.legend(frameon=True, framealpha=<NUM_LIT:0.5>)<EOL>return ax<EOL>", "docstring": "Plots outputs of compute_volume_exposures as line graphs\n\nParameters\n----------\ngrossed_threshold : pd.Series\n    Series of grossed volume exposures (output of\n    compute_volume_exposures).\n\npercentile : float\n    Percentile to use when computing and plotting volume exposures\n    - See full explanation in create_risk_tear_sheet", "id": "f12198:m12"}
{"signature": "def plot_sector_exposures_longshort(long_exposures, short_exposures,<EOL>sector_dict=SECTORS, ax=None):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>if sector_dict is None:<EOL><INDENT>sector_names = SECTORS.values()<EOL><DEDENT>else:<EOL><INDENT>sector_names = sector_dict.values()<EOL><DEDENT>color_list = plt.cm.gist_rainbow(np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:11>))<EOL>ax.stackplot(long_exposures[<NUM_LIT:0>].index, long_exposures,<EOL>labels=sector_names, colors=color_list, alpha=<NUM_LIT>,<EOL>baseline='<STR_LIT>')<EOL>ax.stackplot(long_exposures[<NUM_LIT:0>].index, short_exposures,<EOL>colors=color_list, alpha=<NUM_LIT>, baseline='<STR_LIT>')<EOL>ax.axhline(<NUM_LIT:0>, color='<STR_LIT:k>', linestyle='<STR_LIT:->')<EOL>ax.set(title='<STR_LIT>',<EOL>ylabel='<STR_LIT>')<EOL>ax.legend(loc='<STR_LIT>', frameon=True, framealpha=<NUM_LIT:0.5>)<EOL>return ax<EOL>", "docstring": "Plots outputs of compute_sector_exposures as area charts\n\nParameters\n----------\nlong_exposures, short_exposures : arrays\n    Arrays of long and short sector exposures (output of\n    compute_sector_exposures).\n\nsector_dict : dict or OrderedDict\n    Dictionary of all sectors\n    - See full description in compute_sector_exposures", "id": "f12198:m3"}
{"signature": "def plot_cap_exposures_net(net_exposures, ax=None):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>color_list = plt.cm.gist_rainbow(np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:5>))<EOL>cap_names = CAP_BUCKETS.keys()<EOL>for i in range(len(net_exposures)):<EOL><INDENT>ax.plot(net_exposures[i], color=color_list[i], alpha=<NUM_LIT>,<EOL>label=cap_names[i])<EOL><DEDENT>ax.axhline(<NUM_LIT:0>, color='<STR_LIT:k>', linestyle='<STR_LIT:->')<EOL>ax.set(title='<STR_LIT>',<EOL>ylabel='<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Plots outputs of compute_cap_exposures as line graphs\n\nParameters\n----------\nnet_exposures : array\n    Arrays of gross market cap exposures (output of compute_cap_exposures).", "id": "f12198:m9"}
{"signature": "def compute_sector_exposures(positions, sectors, sector_dict=SECTORS):", "body": "sector_ids = sector_dict.keys()<EOL>long_exposures = []<EOL>short_exposures = []<EOL>gross_exposures = []<EOL>net_exposures = []<EOL>positions_wo_cash = positions.drop('<STR_LIT>', axis='<STR_LIT>')<EOL>long_exposure = positions_wo_cash[positions_wo_cash > <NUM_LIT:0>].sum(axis='<STR_LIT>')<EOL>short_exposure = positions_wo_cash[positions_wo_cash < <NUM_LIT:0>].abs().sum(axis='<STR_LIT>')<EOL>gross_exposure = positions_wo_cash.abs().sum(axis='<STR_LIT>')<EOL>for sector_id in sector_ids:<EOL><INDENT>in_sector = positions_wo_cash[sectors == sector_id]<EOL>long_sector = in_sector[in_sector > <NUM_LIT:0>].sum(axis='<STR_LIT>').divide(long_exposure)<EOL>short_sector = in_sector[in_sector < <NUM_LIT:0>].sum(axis='<STR_LIT>').divide(short_exposure)<EOL>gross_sector = in_sector.abs().sum(axis='<STR_LIT>').divide(gross_exposure)<EOL>net_sector = long_sector.subtract(short_sector)<EOL>long_exposures.append(long_sector)<EOL>short_exposures.append(short_sector)<EOL>gross_exposures.append(gross_sector)<EOL>net_exposures.append(net_sector)<EOL><DEDENT>return long_exposures, short_exposures, gross_exposures, net_exposures<EOL>", "docstring": "Returns arrays of long, short and gross sector exposures of an algorithm's\npositions\n\nParameters\n----------\npositions : pd.DataFrame\n    Daily equity positions of algorithm, in dollars.\n    - See full explanation in compute_style_factor_exposures.\n\nsectors : pd.DataFrame\n    Daily Morningstar sector code per asset\n    - See full explanation in create_risk_tear_sheet\n\nsector_dict : dict or OrderedDict\n    Dictionary of all sectors\n    - Keys are sector codes (e.g. ints or strings) and values are sector\n      names (which must be strings)\n    - Defaults to Morningstar sectors", "id": "f12198:m2"}
{"signature": "@plotting.customize<EOL>def create_bayesian_tear_sheet(returns, benchmark_rets=None,<EOL>live_start_date=None, samples=<NUM_LIT>,<EOL>return_fig=False, stoch_vol=False,<EOL>progressbar=True):", "body": "if not have_bayesian:<EOL><INDENT>raise NotImplementedError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if live_start_date is None:<EOL><INDENT>raise NotImplementedError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>live_start_date = ep.utils.get_utc_timestamp(live_start_date)<EOL>df_train = returns.loc[returns.index < live_start_date]<EOL>df_test = returns.loc[returns.index >= live_start_date]<EOL>print(\"<STR_LIT>\")<EOL>previous_time = time()<EOL>start_time = previous_time<EOL>trace_t, ppc_t = bayesian.run_model('<STR_LIT:t>', df_train,<EOL>returns_test=df_test,<EOL>samples=samples, ppc=True,<EOL>progressbar=progressbar)<EOL>previous_time = timer(\"<STR_LIT>\", previous_time)<EOL>print(\"<STR_LIT>\")<EOL>trace_best = bayesian.run_model('<STR_LIT>', df_train,<EOL>returns_test=df_test,<EOL>samples=samples,<EOL>progressbar=progressbar)<EOL>previous_time = timer(\"<STR_LIT>\", previous_time)<EOL>fig = plt.figure(figsize=(<NUM_LIT>, <NUM_LIT:10> * <NUM_LIT:2>))<EOL>gs = gridspec.GridSpec(<NUM_LIT:9>, <NUM_LIT:2>, wspace=<NUM_LIT>, hspace=<NUM_LIT>)<EOL>axs = []<EOL>row = <NUM_LIT:0><EOL>ax_cone = plt.subplot(gs[row, :])<EOL>bayesian.plot_bayes_cone(df_train, df_test, ppc_t, ax=ax_cone)<EOL>previous_time = timer(\"<STR_LIT>\", previous_time)<EOL>row += <NUM_LIT:1><EOL>axs.append(plt.subplot(gs[row, <NUM_LIT:0>]))<EOL>axs.append(plt.subplot(gs[row, <NUM_LIT:1>]))<EOL>row += <NUM_LIT:1><EOL>axs.append(plt.subplot(gs[row, <NUM_LIT:0>]))<EOL>axs.append(plt.subplot(gs[row, <NUM_LIT:1>]))<EOL>row += <NUM_LIT:1><EOL>axs.append(plt.subplot(gs[row, <NUM_LIT:0>]))<EOL>axs.append(plt.subplot(gs[row, <NUM_LIT:1>]))<EOL>row += <NUM_LIT:1><EOL>axs.append(plt.subplot(gs[row, :]))<EOL>bayesian.plot_best(trace=trace_best, axs=axs)<EOL>previous_time = timer(\"<STR_LIT>\", previous_time)<EOL>row += <NUM_LIT:1><EOL>ax_ret_pred_day = plt.subplot(gs[row, <NUM_LIT:0>])<EOL>ax_ret_pred_week = plt.subplot(gs[row, <NUM_LIT:1>])<EOL>day_pred = ppc_t[:, <NUM_LIT:0>]<EOL>p5 = scipy.stats.scoreatpercentile(day_pred, <NUM_LIT:5>)<EOL>sns.distplot(day_pred,<EOL>ax=ax_ret_pred_day<EOL>)<EOL>ax_ret_pred_day.axvline(p5, linestyle='<STR_LIT>', linewidth=<NUM_LIT>)<EOL>ax_ret_pred_day.set_xlabel('<STR_LIT>')<EOL>ax_ret_pred_day.set_ylabel('<STR_LIT>')<EOL>ax_ret_pred_day.text(<NUM_LIT>, <NUM_LIT>, '<STR_LIT>' % p5,<EOL>verticalalignment='<STR_LIT>',<EOL>horizontalalignment='<STR_LIT:right>',<EOL>transform=ax_ret_pred_day.transAxes)<EOL>previous_time = timer(\"<STR_LIT>\", previous_time)<EOL>week_pred = (<EOL>np.cumprod(ppc_t[:, :<NUM_LIT:5>] + <NUM_LIT:1>, <NUM_LIT:1>) - <NUM_LIT:1>)[:, -<NUM_LIT:1>]<EOL>p5 = scipy.stats.scoreatpercentile(week_pred, <NUM_LIT:5>)<EOL>sns.distplot(week_pred,<EOL>ax=ax_ret_pred_week<EOL>)<EOL>ax_ret_pred_week.axvline(p5, linestyle='<STR_LIT>', linewidth=<NUM_LIT>)<EOL>ax_ret_pred_week.set_xlabel('<STR_LIT>')<EOL>ax_ret_pred_week.set_ylabel('<STR_LIT>')<EOL>ax_ret_pred_week.text(<NUM_LIT>, <NUM_LIT>, '<STR_LIT>' % p5,<EOL>verticalalignment='<STR_LIT>',<EOL>horizontalalignment='<STR_LIT:right>',<EOL>transform=ax_ret_pred_week.transAxes)<EOL>previous_time = timer(\"<STR_LIT>\", previous_time)<EOL>if benchmark_rets is not None:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>benchmark_rets = benchmark_rets.loc[df_train.index]<EOL>trace_alpha_beta = bayesian.run_model('<STR_LIT>', df_train,<EOL>bmark=benchmark_rets,<EOL>samples=samples,<EOL>progressbar=progressbar)<EOL>previous_time = timer(\"<STR_LIT>\", previous_time)<EOL>row += <NUM_LIT:1><EOL>ax_alpha = plt.subplot(gs[row, <NUM_LIT:0>])<EOL>ax_beta = plt.subplot(gs[row, <NUM_LIT:1>])<EOL>sns.distplot((<NUM_LIT:1> + trace_alpha_beta['<STR_LIT>'][<NUM_LIT:100>:])**<NUM_LIT> - <NUM_LIT:1>,<EOL>ax=ax_alpha)<EOL>sns.distplot(trace_alpha_beta['<STR_LIT>'][<NUM_LIT:100>:], ax=ax_beta)<EOL>ax_alpha.set_xlabel('<STR_LIT>')<EOL>ax_alpha.set_ylabel('<STR_LIT>')<EOL>ax_beta.set_xlabel('<STR_LIT>')<EOL>ax_beta.set_ylabel('<STR_LIT>')<EOL>previous_time = timer(\"<STR_LIT>\", previous_time)<EOL><DEDENT>if stoch_vol:<EOL><INDENT>returns_cutoff = <NUM_LIT><EOL>print(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(returns_cutoff)<EOL>)<EOL>if df_train.size > returns_cutoff:<EOL><INDENT>df_train_truncated = df_train[-returns_cutoff:]<EOL><DEDENT>_, trace_stoch_vol = bayesian.model_stoch_vol(df_train_truncated)<EOL>previous_time = timer(<EOL>\"<STR_LIT>\", previous_time)<EOL>row += <NUM_LIT:1><EOL>ax_volatility = plt.subplot(gs[row, :])<EOL>bayesian.plot_stoch_vol(<EOL>df_train_truncated, trace=trace_stoch_vol, ax=ax_volatility)<EOL>previous_time = timer(<EOL>\"<STR_LIT>\", previous_time)<EOL><DEDENT>total_time = time() - start_time<EOL>print(\"<STR_LIT>\".format(total_time))<EOL>gs.tight_layout(fig)<EOL>if return_fig:<EOL><INDENT>return fig<EOL><DEDENT>", "docstring": "Generate a number of Bayesian distributions and a Bayesian\ncone plot of returns.\n\nPlots: Sharpe distribution, annual volatility distribution,\nannual alpha distribution, beta distribution, predicted 1 and 5\nday returns distributions, and a cumulative returns cone plot.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in create_full_tear_sheet.\nbenchmark_rets : pd.Series, optional\n    Daily noncumulative returns of the benchmark.\n     - This is in the same style as returns.\nlive_start_date : datetime, optional\n    The point in time when the strategy began live\n    trading, after its backtest period.\nsamples : int, optional\n    Number of posterior samples to draw.\nreturn_fig : boolean, optional\n    If True, returns the figure that was plotted on.\nstoch_vol : boolean, optional\n    If True, run and plot the stochastic volatility model\nprogressbar : boolean, optional\n    If True, show a progress bar", "id": "f12208:m9"}
{"signature": "def create_full_tear_sheet(returns,<EOL>positions=None,<EOL>transactions=None,<EOL>market_data=None,<EOL>benchmark_rets=None,<EOL>slippage=None,<EOL>live_start_date=None,<EOL>sector_mappings=None,<EOL>bayesian=False,<EOL>round_trips=False,<EOL>estimate_intraday='<STR_LIT>',<EOL>hide_positions=False,<EOL>cone_std=(<NUM_LIT:1.0>, <NUM_LIT>, <NUM_LIT>),<EOL>bootstrap=False,<EOL>unadjusted_returns=None,<EOL>style_factor_panel=None,<EOL>sectors=None,<EOL>caps=None,<EOL>shares_held=None,<EOL>volumes=None,<EOL>percentile=None,<EOL>turnover_denom='<STR_LIT>',<EOL>set_context=True,<EOL>factor_returns=None,<EOL>factor_loadings=None,<EOL>pos_in_dollars=True,<EOL>header_rows=None,<EOL>factor_partitions=FACTOR_PARTITIONS):", "body": "if (unadjusted_returns is None) and (slippage is not None) and(transactions is not None):<EOL><INDENT>unadjusted_returns = returns.copy()<EOL>returns = txn.adjust_returns_for_slippage(returns, positions,<EOL>transactions, slippage)<EOL><DEDENT>positions = utils.check_intraday(estimate_intraday, returns,<EOL>positions, transactions)<EOL>create_returns_tear_sheet(<EOL>returns,<EOL>positions=positions,<EOL>transactions=transactions,<EOL>live_start_date=live_start_date,<EOL>cone_std=cone_std,<EOL>benchmark_rets=benchmark_rets,<EOL>bootstrap=bootstrap,<EOL>turnover_denom=turnover_denom,<EOL>header_rows=header_rows,<EOL>set_context=set_context)<EOL>create_interesting_times_tear_sheet(returns,<EOL>benchmark_rets=benchmark_rets,<EOL>set_context=set_context)<EOL>if positions is not None:<EOL><INDENT>create_position_tear_sheet(returns, positions,<EOL>hide_positions=hide_positions,<EOL>set_context=set_context,<EOL>sector_mappings=sector_mappings,<EOL>estimate_intraday=False)<EOL>if transactions is not None:<EOL><INDENT>create_txn_tear_sheet(returns, positions, transactions,<EOL>unadjusted_returns=unadjusted_returns,<EOL>estimate_intraday=False,<EOL>set_context=set_context)<EOL>if round_trips:<EOL><INDENT>create_round_trip_tear_sheet(<EOL>returns=returns,<EOL>positions=positions,<EOL>transactions=transactions,<EOL>sector_mappings=sector_mappings,<EOL>estimate_intraday=False)<EOL><DEDENT>if market_data is not None:<EOL><INDENT>create_capacity_tear_sheet(returns, positions, transactions,<EOL>market_data,<EOL>liquidation_daily_vol_limit=<NUM_LIT>,<EOL>last_n_days=<NUM_LIT>,<EOL>estimate_intraday=False)<EOL><DEDENT><DEDENT>if style_factor_panel is not None:<EOL><INDENT>create_risk_tear_sheet(positions, style_factor_panel, sectors,<EOL>caps, shares_held, volumes, percentile)<EOL><DEDENT>if factor_returns is not None and factor_loadings is not None:<EOL><INDENT>create_perf_attrib_tear_sheet(returns, positions, factor_returns,<EOL>factor_loadings, transactions,<EOL>pos_in_dollars=pos_in_dollars,<EOL>factor_partitions=factor_partitions)<EOL><DEDENT><DEDENT>if bayesian:<EOL><INDENT>create_bayesian_tear_sheet(returns,<EOL>live_start_date=live_start_date,<EOL>benchmark_rets=benchmark_rets,<EOL>set_context=set_context)<EOL><DEDENT>", "docstring": "Generate a number of tear sheets that are useful\nfor analyzing a strategy's performance.\n\n- Fetches benchmarks if needed.\n- Creates tear sheets for returns, and significant events.\n    If possible, also creates tear sheets for position analysis,\n    transaction analysis, and Bayesian analysis.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - Time series with decimal returns.\n     - Example:\n        2015-07-16    -0.012143\n        2015-07-17    0.045350\n        2015-07-20    0.030957\n        2015-07-21    0.004902\npositions : pd.DataFrame, optional\n    Daily net position values.\n     - Time series of dollar amount invested in each position and cash.\n     - Days where stocks are not held can be represented by 0 or NaN.\n     - Non-working capital is labelled 'cash'\n     - Example:\n        index         'AAPL'         'MSFT'          cash\n        2004-01-09    13939.3800     -14012.9930     711.5585\n        2004-01-12    14492.6300     -14624.8700     27.1821\n        2004-01-13    -13853.2800    13653.6400      -43.6375\ntransactions : pd.DataFrame, optional\n    Executed trade volumes and fill prices.\n    - One row per trade.\n    - Trades on different names that occur at the\n      same time will have identical indicies.\n    - Example:\n        index                  amount   price    symbol\n        2004-01-09 12:18:01    483      324.12   'AAPL'\n        2004-01-09 12:18:01    122      83.10    'MSFT'\n        2004-01-13 14:12:23    -75      340.43   'AAPL'\nmarket_data : pd.Panel, optional\n    Panel with items axis of 'price' and 'volume' DataFrames.\n    The major and minor axes should match those of the\n    the passed positions DataFrame (same dates and symbols).\nslippage : int/float, optional\n    Basis points of slippage to apply to returns before generating\n    tearsheet stats and plots.\n    If a value is provided, slippage parameter sweep\n    plots will be generated from the unadjusted returns.\n    Transactions and positions must also be passed.\n    - See txn.adjust_returns_for_slippage for more details.\nlive_start_date : datetime, optional\n    The point in time when the strategy began live trading,\n    after its backtest period. This datetime should be normalized.\nhide_positions : bool, optional\n    If True, will not output any symbol names.\nbayesian: boolean, optional\n    If True, causes the generation of a Bayesian tear sheet.\nround_trips: boolean, optional\n    If True, causes the generation of a round trip tear sheet.\nsector_mappings : dict or pd.Series, optional\n    Security identifier to sector mapping.\n    Security ids as keys, sectors as values.\nestimate_intraday: boolean or str, optional\n    Instead of using the end-of-day positions, use the point in the day\n    where we have the most $ invested. This will adjust positions to\n    better approximate and represent how an intraday strategy behaves.\n    By default, this is 'infer', and an attempt will be made to detect\n    an intraday strategy. Specifying this value will prevent detection.\ncone_std : float, or tuple, optional\n    If float, The standard deviation to use for the cone plots.\n    If tuple, Tuple of standard deviation values to use for the cone plots\n     - The cone is a normal distribution with this standard deviation\n         centered around a linear regression.\nbootstrap : boolean (optional)\n    Whether to perform bootstrap analysis for the performance\n    metrics. Takes a few minutes longer.\nturnover_denom : str\n    Either AGB or portfolio_value, default AGB.\n    - See full explanation in txn.get_turnover.\nfactor_returns : pd.Dataframe, optional\n    Returns by factor, with date as index and factors as columns\nfactor_loadings : pd.Dataframe, optional\n    Factor loadings for all days in the date range, with date and\n    ticker as index, and factors as columns.\npos_in_dollars : boolean, optional\n    indicates whether positions is in dollars\nheader_rows : dict or OrderedDict, optional\n    Extra rows to display at the top of the perf stats table.\nset_context : boolean, optional\n    If True, set default plotting style context.\n     - See plotting.context().\nfactor_partitions : dict, optional\n    dict specifying how factors should be separated in perf attrib\n    factor returns and risk exposures plots\n    - See create_perf_attrib_tear_sheet().", "id": "f12208:m1"}
{"signature": "@plotting.customize<EOL>def create_txn_tear_sheet(returns, positions, transactions,<EOL>unadjusted_returns=None, estimate_intraday='<STR_LIT>',<EOL>return_fig=False):", "body": "positions = utils.check_intraday(estimate_intraday, returns,<EOL>positions, transactions)<EOL>vertical_sections = <NUM_LIT:6> if unadjusted_returns is not None else <NUM_LIT:4><EOL>fig = plt.figure(figsize=(<NUM_LIT>, vertical_sections * <NUM_LIT:6>))<EOL>gs = gridspec.GridSpec(vertical_sections, <NUM_LIT:3>, wspace=<NUM_LIT:0.5>, hspace=<NUM_LIT:0.5>)<EOL>ax_turnover = plt.subplot(gs[<NUM_LIT:0>, :])<EOL>ax_daily_volume = plt.subplot(gs[<NUM_LIT:1>, :], sharex=ax_turnover)<EOL>ax_turnover_hist = plt.subplot(gs[<NUM_LIT:2>, :])<EOL>ax_txn_timings = plt.subplot(gs[<NUM_LIT:3>, :])<EOL>plotting.plot_turnover(<EOL>returns,<EOL>transactions,<EOL>positions,<EOL>ax=ax_turnover)<EOL>plotting.plot_daily_volume(returns, transactions, ax=ax_daily_volume)<EOL>try:<EOL><INDENT>plotting.plot_daily_turnover_hist(transactions, positions,<EOL>ax=ax_turnover_hist)<EOL><DEDENT>except ValueError:<EOL><INDENT>warnings.warn('<STR_LIT>', UserWarning)<EOL><DEDENT>plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings)<EOL>if unadjusted_returns is not None:<EOL><INDENT>ax_slippage_sweep = plt.subplot(gs[<NUM_LIT:4>, :])<EOL>plotting.plot_slippage_sweep(unadjusted_returns,<EOL>positions,<EOL>transactions,<EOL>ax=ax_slippage_sweep<EOL>)<EOL>ax_slippage_sensitivity = plt.subplot(gs[<NUM_LIT:5>, :])<EOL>plotting.plot_slippage_sensitivity(unadjusted_returns,<EOL>positions,<EOL>transactions,<EOL>ax=ax_slippage_sensitivity<EOL>)<EOL><DEDENT>for ax in fig.axes:<EOL><INDENT>plt.setp(ax.get_xticklabels(), visible=True)<EOL><DEDENT>if return_fig:<EOL><INDENT>return fig<EOL><DEDENT>", "docstring": "Generate a number of plots for analyzing a strategy's transactions.\n\nPlots: turnover, daily volume, and a histogram of daily volume.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n    Daily net position values.\n     - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n    Prices and amounts of executed trades. One row per trade.\n     - See full explanation in create_full_tear_sheet.\nunadjusted_returns : pd.Series, optional\n    Daily unadjusted returns of the strategy, noncumulative.\n    Will plot additional swippage sweep analysis.\n     - See pyfolio.plotting.plot_swippage_sleep and\n       pyfolio.plotting.plot_slippage_sensitivity\nestimate_intraday: boolean or str, optional\n    Approximate returns for intraday strategies.\n    See description in create_full_tear_sheet.\nreturn_fig : boolean, optional\n    If True, returns the figure that was plotted on.", "id": "f12208:m5"}
{"signature": "@plotting.customize<EOL>def create_returns_tear_sheet(returns, positions=None,<EOL>transactions=None,<EOL>live_start_date=None,<EOL>cone_std=(<NUM_LIT:1.0>, <NUM_LIT>, <NUM_LIT>),<EOL>benchmark_rets=None,<EOL>bootstrap=False,<EOL>turnover_denom='<STR_LIT>',<EOL>header_rows=None,<EOL>return_fig=False):", "body": "if benchmark_rets is not None:<EOL><INDENT>returns = utils.clip_returns_to_benchmark(returns, benchmark_rets)<EOL><DEDENT>plotting.show_perf_stats(returns, benchmark_rets,<EOL>positions=positions,<EOL>transactions=transactions,<EOL>turnover_denom=turnover_denom,<EOL>bootstrap=bootstrap,<EOL>live_start_date=live_start_date,<EOL>header_rows=header_rows)<EOL>plotting.show_worst_drawdown_periods(returns)<EOL>vertical_sections = <NUM_LIT:11><EOL>if live_start_date is not None:<EOL><INDENT>vertical_sections += <NUM_LIT:1><EOL>live_start_date = ep.utils.get_utc_timestamp(live_start_date)<EOL><DEDENT>if benchmark_rets is not None:<EOL><INDENT>vertical_sections += <NUM_LIT:1><EOL><DEDENT>if bootstrap:<EOL><INDENT>vertical_sections += <NUM_LIT:1><EOL><DEDENT>fig = plt.figure(figsize=(<NUM_LIT>, vertical_sections * <NUM_LIT:6>))<EOL>gs = gridspec.GridSpec(vertical_sections, <NUM_LIT:3>, wspace=<NUM_LIT:0.5>, hspace=<NUM_LIT:0.5>)<EOL>ax_rolling_returns = plt.subplot(gs[:<NUM_LIT:2>, :])<EOL>i = <NUM_LIT:2><EOL>ax_rolling_returns_vol_match = plt.subplot(gs[i, :],<EOL>sharex=ax_rolling_returns)<EOL>i += <NUM_LIT:1><EOL>ax_rolling_returns_log = plt.subplot(gs[i, :],<EOL>sharex=ax_rolling_returns)<EOL>i += <NUM_LIT:1><EOL>ax_returns = plt.subplot(gs[i, :],<EOL>sharex=ax_rolling_returns)<EOL>i += <NUM_LIT:1><EOL>if benchmark_rets is not None:<EOL><INDENT>ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)<EOL>i += <NUM_LIT:1><EOL><DEDENT>ax_rolling_volatility = plt.subplot(gs[i, :], sharex=ax_rolling_returns)<EOL>i += <NUM_LIT:1><EOL>ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)<EOL>i += <NUM_LIT:1><EOL>ax_drawdown = plt.subplot(gs[i, :], sharex=ax_rolling_returns)<EOL>i += <NUM_LIT:1><EOL>ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)<EOL>i += <NUM_LIT:1><EOL>ax_monthly_heatmap = plt.subplot(gs[i, <NUM_LIT:0>])<EOL>ax_annual_returns = plt.subplot(gs[i, <NUM_LIT:1>])<EOL>ax_monthly_dist = plt.subplot(gs[i, <NUM_LIT:2>])<EOL>i += <NUM_LIT:1><EOL>ax_return_quantiles = plt.subplot(gs[i, :])<EOL>i += <NUM_LIT:1><EOL>plotting.plot_rolling_returns(<EOL>returns,<EOL>factor_returns=benchmark_rets,<EOL>live_start_date=live_start_date,<EOL>cone_std=cone_std,<EOL>ax=ax_rolling_returns)<EOL>ax_rolling_returns.set_title(<EOL>'<STR_LIT>')<EOL>plotting.plot_rolling_returns(<EOL>returns,<EOL>factor_returns=benchmark_rets,<EOL>live_start_date=live_start_date,<EOL>cone_std=None,<EOL>volatility_match=(benchmark_rets is not None),<EOL>legend_loc=None,<EOL>ax=ax_rolling_returns_vol_match)<EOL>ax_rolling_returns_vol_match.set_title(<EOL>'<STR_LIT>')<EOL>plotting.plot_rolling_returns(<EOL>returns,<EOL>factor_returns=benchmark_rets,<EOL>logy=True,<EOL>live_start_date=live_start_date,<EOL>cone_std=cone_std,<EOL>ax=ax_rolling_returns_log)<EOL>ax_rolling_returns_log.set_title(<EOL>'<STR_LIT>')<EOL>plotting.plot_returns(<EOL>returns,<EOL>live_start_date=live_start_date,<EOL>ax=ax_returns,<EOL>)<EOL>ax_returns.set_title(<EOL>'<STR_LIT>')<EOL>if benchmark_rets is not None:<EOL><INDENT>plotting.plot_rolling_beta(<EOL>returns, benchmark_rets, ax=ax_rolling_beta)<EOL><DEDENT>plotting.plot_rolling_volatility(<EOL>returns, factor_returns=benchmark_rets, ax=ax_rolling_volatility)<EOL>plotting.plot_rolling_sharpe(<EOL>returns, ax=ax_rolling_sharpe)<EOL>plotting.plot_drawdown_periods(<EOL>returns, top=<NUM_LIT:5>, ax=ax_drawdown)<EOL>plotting.plot_drawdown_underwater(<EOL>returns=returns, ax=ax_underwater)<EOL>plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)<EOL>plotting.plot_annual_returns(returns, ax=ax_annual_returns)<EOL>plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)<EOL>plotting.plot_return_quantiles(<EOL>returns,<EOL>live_start_date=live_start_date,<EOL>ax=ax_return_quantiles)<EOL>if bootstrap and (benchmark_rets is not None):<EOL><INDENT>ax_bootstrap = plt.subplot(gs[i, :])<EOL>plotting.plot_perf_stats(returns, benchmark_rets,<EOL>ax=ax_bootstrap)<EOL><DEDENT>elif bootstrap:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>for ax in fig.axes:<EOL><INDENT>plt.setp(ax.get_xticklabels(), visible=True)<EOL><DEDENT>if return_fig:<EOL><INDENT>return fig<EOL><DEDENT>", "docstring": "Generate a number of plots for analyzing a strategy's returns.\n\n- Fetches benchmarks, then creates the plots on a single figure.\n- Plots: rolling returns (with cone), rolling beta, rolling sharpe,\n    rolling Fama-French risk factors, drawdowns, underwater plot, monthly\n    and annual return plots, daily similarity plots,\n    and return quantile box plot.\n- Will also print the start and end dates of the strategy,\n    performance statistics, drawdown periods, and the return range.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame, optional\n    Daily net position values.\n     - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame, optional\n    Executed trade volumes and fill prices.\n    - See full explanation in create_full_tear_sheet.\nlive_start_date : datetime, optional\n    The point in time when the strategy began live trading,\n    after its backtest period.\ncone_std : float, or tuple, optional\n    If float, The standard deviation to use for the cone plots.\n    If tuple, Tuple of standard deviation values to use for the cone plots\n     - The cone is a normal distribution with this standard deviation\n         centered around a linear regression.\nbenchmark_rets : pd.Series, optional\n    Daily noncumulative returns of the benchmark.\n     - This is in the same style as returns.\nbootstrap : boolean, optional\n    Whether to perform bootstrap analysis for the performance\n    metrics. Takes a few minutes longer.\nturnover_denom : str, optional\n    Either AGB or portfolio_value, default AGB.\n    - See full explanation in txn.get_turnover.\nheader_rows : dict or OrderedDict, optional\n    Extra rows to display at the top of the perf stats table.\nreturn_fig : boolean, optional\n    If True, returns the figure that was plotted on.", "id": "f12208:m3"}
{"signature": "@plotting.customize<EOL>def create_perf_attrib_tear_sheet(returns,<EOL>positions,<EOL>factor_returns,<EOL>factor_loadings,<EOL>transactions=None,<EOL>pos_in_dollars=True,<EOL>return_fig=False,<EOL>factor_partitions=FACTOR_PARTITIONS):", "body": "portfolio_exposures, perf_attrib_data = perf_attrib.perf_attrib(<EOL>returns, positions, factor_returns, factor_loadings, transactions,<EOL>pos_in_dollars=pos_in_dollars<EOL>)<EOL>display(Markdown(\"<STR_LIT>\"))<EOL>perf_attrib.show_perf_attrib_stats(returns, positions, factor_returns,<EOL>factor_loadings, transactions,<EOL>pos_in_dollars)<EOL>vertical_sections = <NUM_LIT:1> + <NUM_LIT:2> * max(len(factor_partitions), <NUM_LIT:1>)<EOL>current_section = <NUM_LIT:0><EOL>fig = plt.figure(figsize=[<NUM_LIT>, vertical_sections * <NUM_LIT:6>])<EOL>gs = gridspec.GridSpec(vertical_sections, <NUM_LIT:1>,<EOL>wspace=<NUM_LIT:0.5>, hspace=<NUM_LIT:0.5>)<EOL>perf_attrib.plot_returns(perf_attrib_data,<EOL>ax=plt.subplot(gs[current_section]))<EOL>current_section += <NUM_LIT:1><EOL>if factor_partitions is not None:<EOL><INDENT>for factor_type, partitions in factor_partitions.iteritems():<EOL><INDENT>columns_to_select = perf_attrib_data.columns.intersection(<EOL>partitions<EOL>)<EOL>perf_attrib.plot_factor_contribution_to_perf(<EOL>perf_attrib_data[columns_to_select],<EOL>ax=plt.subplot(gs[current_section]),<EOL>title=(<EOL>'<STR_LIT>'<EOL>).format(factor_type)<EOL>)<EOL>current_section += <NUM_LIT:1><EOL><DEDENT>for factor_type, partitions in factor_partitions.iteritems():<EOL><INDENT>perf_attrib.plot_risk_exposures(<EOL>portfolio_exposures[portfolio_exposures.columns<EOL>.intersection(partitions)],<EOL>ax=plt.subplot(gs[current_section]),<EOL>title='<STR_LIT>'.format(factor_type)<EOL>)<EOL>current_section += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>perf_attrib.plot_factor_contribution_to_perf(<EOL>perf_attrib_data,<EOL>ax=plt.subplot(gs[current_section])<EOL>)<EOL>current_section += <NUM_LIT:1><EOL>perf_attrib.plot_risk_exposures(<EOL>portfolio_exposures,<EOL>ax=plt.subplot(gs[current_section])<EOL>)<EOL><DEDENT>gs.tight_layout(fig)<EOL>if return_fig:<EOL><INDENT>return fig<EOL><DEDENT>", "docstring": "Generate plots and tables for analyzing a strategy's performance.\n\nParameters\n----------\nreturns : pd.Series\n    Returns for each day in the date range.\n\npositions: pd.DataFrame\n    Daily holdings (in dollars or percentages), indexed by date.\n    Will be converted to percentages if positions are in dollars.\n    Short positions show up as cash in the 'cash' column.\n\nfactor_returns : pd.DataFrame\n    Returns by factor, with date as index and factors as columns\n\nfactor_loadings : pd.DataFrame\n    Factor loadings for all days in the date range, with date\n    and ticker as index, and factors as columns.\n\ntransactions : pd.DataFrame, optional\n    Prices and amounts of executed trades. One row per trade.\n     - See full explanation in create_full_tear_sheet.\n     - Default is None.\n\npos_in_dollars : boolean, optional\n    Flag indicating whether `positions` are in dollars or percentages\n    If True, positions are in dollars.\n\nreturn_fig : boolean, optional\n    If True, returns the figure that was plotted on.\n\nfactor_partitions : dict\n    dict specifying how factors should be separated in factor returns\n    and risk exposures plots\n    - Example:\n      {'style': ['momentum', 'size', 'value', ...],\n       'sector': ['technology', 'materials', ... ]}", "id": "f12208:m11"}
{"signature": "@plotting.customize<EOL>def create_capacity_tear_sheet(returns, positions, transactions,<EOL>market_data,<EOL>liquidation_daily_vol_limit=<NUM_LIT>,<EOL>trade_daily_vol_limit=<NUM_LIT>,<EOL>last_n_days=utils.APPROX_BDAYS_PER_MONTH * <NUM_LIT:6>,<EOL>days_to_liquidate_limit=<NUM_LIT:1>,<EOL>estimate_intraday='<STR_LIT>'):", "body": "positions = utils.check_intraday(estimate_intraday, returns,<EOL>positions, transactions)<EOL>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>max_days_by_ticker = capacity.get_max_days_to_liquidate_by_ticker(<EOL>positions, market_data,<EOL>max_bar_consumption=liquidation_daily_vol_limit,<EOL>capital_base=<NUM_LIT>,<EOL>mean_volume_window=<NUM_LIT:5>)<EOL>max_days_by_ticker.index = (<EOL>max_days_by_ticker.index.map(utils.format_asset))<EOL>print(\"<STR_LIT>\")<EOL>utils.print_table(<EOL>max_days_by_ticker[max_days_by_ticker.days_to_liquidate ><EOL>days_to_liquidate_limit])<EOL>max_days_by_ticker_lnd = capacity.get_max_days_to_liquidate_by_ticker(<EOL>positions, market_data,<EOL>max_bar_consumption=liquidation_daily_vol_limit,<EOL>capital_base=<NUM_LIT>,<EOL>mean_volume_window=<NUM_LIT:5>,<EOL>last_n_days=last_n_days)<EOL>max_days_by_ticker_lnd.index = (<EOL>max_days_by_ticker_lnd.index.map(utils.format_asset))<EOL>print(\"<STR_LIT>\".format(last_n_days))<EOL>utils.print_table(<EOL>max_days_by_ticker_lnd[max_days_by_ticker_lnd.days_to_liquidate > <NUM_LIT:1>])<EOL>llt = capacity.get_low_liquidity_transactions(transactions, market_data)<EOL>llt.index = llt.index.map(utils.format_asset)<EOL>print('<STR_LIT>'<EOL>'<STR_LIT>'.format(trade_daily_vol_limit * <NUM_LIT:100>))<EOL>utils.print_table(<EOL>llt[llt['<STR_LIT>'] > trade_daily_vol_limit * <NUM_LIT:100>])<EOL>llt = capacity.get_low_liquidity_transactions(<EOL>transactions, market_data, last_n_days=last_n_days)<EOL>print(\"<STR_LIT>\".format(last_n_days))<EOL>utils.print_table(<EOL>llt[llt['<STR_LIT>'] > trade_daily_vol_limit * <NUM_LIT:100>])<EOL>bt_starting_capital = positions.iloc[<NUM_LIT:0>].sum() / (<NUM_LIT:1> + returns.iloc[<NUM_LIT:0>])<EOL>fig, ax_capacity_sweep = plt.subplots(figsize=(<NUM_LIT>, <NUM_LIT:6>))<EOL>plotting.plot_capacity_sweep(returns, transactions, market_data,<EOL>bt_starting_capital,<EOL>min_pv=<NUM_LIT>,<EOL>max_pv=<NUM_LIT>,<EOL>step_size=<NUM_LIT>,<EOL>ax=ax_capacity_sweep)<EOL>", "docstring": "Generates a report detailing portfolio size constraints set by\nleast liquid tickers. Plots a \"capacity sweep,\" a curve describing\nprojected sharpe ratio given the slippage penalties that are\napplied at various capital bases.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n    Daily net position values.\n     - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n    Prices and amounts of executed trades. One row per trade.\n     - See full explanation in create_full_tear_sheet.\nmarket_data : pd.Panel\n    Panel with items axis of 'price' and 'volume' DataFrames.\n    The major and minor axes should match those of the\n    the passed positions DataFrame (same dates and symbols).\nliquidation_daily_vol_limit : float\n    Max proportion of a daily bar that can be consumed in the\n    process of liquidating a position in the\n    \"days to liquidation\" analysis.\ntrade_daily_vol_limit : float\n    Flag daily transaction totals that exceed proportion of\n    daily bar.\nlast_n_days : integer\n    Compute max position allocation and dollar volume for only\n    the last N days of the backtest\ndays_to_liquidate_limit : integer\n    Display all tickers with greater max days to liquidation.\nestimate_intraday: boolean or str, optional\n    Approximate returns for intraday strategies.\n    See description in create_full_tear_sheet.", "id": "f12208:m8"}
{"signature": "def get_max_days_to_liquidate_by_ticker(positions, market_data,<EOL>max_bar_consumption=<NUM_LIT>,<EOL>capital_base=<NUM_LIT>,<EOL>mean_volume_window=<NUM_LIT:5>,<EOL>last_n_days=None):", "body": "dtlp = days_to_liquidate_positions(positions, market_data,<EOL>max_bar_consumption=max_bar_consumption,<EOL>capital_base=capital_base,<EOL>mean_volume_window=mean_volume_window)<EOL>if last_n_days is not None:<EOL><INDENT>dtlp = dtlp.loc[dtlp.index.max() - pd.Timedelta(days=last_n_days):]<EOL><DEDENT>pos_alloc = pos.get_percent_alloc(positions)<EOL>pos_alloc = pos_alloc.drop('<STR_LIT>', axis=<NUM_LIT:1>)<EOL>liq_desc = pd.DataFrame()<EOL>liq_desc['<STR_LIT>'] = dtlp.unstack()<EOL>liq_desc['<STR_LIT>'] = pos_alloc.unstack() * <NUM_LIT:100><EOL>liq_desc.index.levels[<NUM_LIT:0>].name = '<STR_LIT>'<EOL>liq_desc.index.levels[<NUM_LIT:1>].name = '<STR_LIT:date>'<EOL>worst_liq = liq_desc.reset_index().sort_values(<EOL>'<STR_LIT>', ascending=False).groupby('<STR_LIT>').first()<EOL>return worst_liq<EOL>", "docstring": "Finds the longest estimated liquidation time for each traded\nname over the course of backtest (or last n days of the backtest).\n\nParameters\n----------\npositions: pd.DataFrame\n    Contains daily position values including cash\n    - See full explanation in tears.create_full_tear_sheet\nmarket_data : pd.Panel\n    Panel with items axis of 'price' and 'volume' DataFrames.\n    The major and minor axes should match those of the\n    the passed positions DataFrame (same dates and symbols).\nmax_bar_consumption : float\n    Max proportion of a daily bar that can be consumed in the\n    process of liquidating a position.\ncapital_base : integer\n    Capital base multiplied by portfolio allocation to compute\n    position value that needs liquidating.\nmean_volume_window : float\n    Trailing window to use in mean volume calculation.\nlast_n_days : integer\n    Compute for only the last n days of the passed backtest data.\n\nReturns\n-------\ndays_to_liquidate : pd.DataFrame\n    Max Number of days required to fully liquidate each traded name.\n    Index of symbols. Columns for days_to_liquidate and the corresponding\n    date and position_alloc on that day.", "id": "f12209:m2"}
{"signature": "def get_low_liquidity_transactions(transactions, market_data,<EOL>last_n_days=None):", "body": "txn_daily_w_bar = daily_txns_with_bar_data(transactions, market_data)<EOL>txn_daily_w_bar.index.name = '<STR_LIT:date>'<EOL>txn_daily_w_bar = txn_daily_w_bar.reset_index()<EOL>if last_n_days is not None:<EOL><INDENT>md = txn_daily_w_bar.date.max() - pd.Timedelta(days=last_n_days)<EOL>txn_daily_w_bar = txn_daily_w_bar[txn_daily_w_bar.date > md]<EOL><DEDENT>bar_consumption = txn_daily_w_bar.assign(<EOL>max_pct_bar_consumed=(<EOL>txn_daily_w_bar.amount/txn_daily_w_bar.volume)*<NUM_LIT:100><EOL>).sort_values('<STR_LIT>', ascending=False)<EOL>max_bar_consumption = bar_consumption.groupby('<STR_LIT>').first()<EOL>return max_bar_consumption[['<STR_LIT:date>', '<STR_LIT>']]<EOL>", "docstring": "For each traded name, find the daily transaction total that consumed\nthe greatest proportion of available daily bar volume.\n\nParameters\n----------\ntransactions : pd.DataFrame\n    Prices and amounts of executed trades. One row per trade.\n     - See full explanation in create_full_tear_sheet.\nmarket_data : pd.Panel\n    Panel with items axis of 'price' and 'volume' DataFrames.\n    The major and minor axes should match those of the\n    the passed positions DataFrame (same dates and symbols).\nlast_n_days : integer\n    Compute for only the last n days of the passed backtest data.", "id": "f12209:m3"}
{"signature": "def show_perf_stats(returns, factor_returns=None, positions=None,<EOL>transactions=None, turnover_denom='<STR_LIT>',<EOL>live_start_date=None, bootstrap=False,<EOL>header_rows=None):", "body": "if bootstrap:<EOL><INDENT>perf_func = timeseries.perf_stats_bootstrap<EOL><DEDENT>else:<EOL><INDENT>perf_func = timeseries.perf_stats<EOL><DEDENT>perf_stats_all = perf_func(<EOL>returns,<EOL>factor_returns=factor_returns,<EOL>positions=positions,<EOL>transactions=transactions,<EOL>turnover_denom=turnover_denom)<EOL>date_rows = OrderedDict()<EOL>if len(returns.index) > <NUM_LIT:0>:<EOL><INDENT>date_rows['<STR_LIT>'] = returns.index[<NUM_LIT:0>].strftime('<STR_LIT>')<EOL>date_rows['<STR_LIT>'] = returns.index[-<NUM_LIT:1>].strftime('<STR_LIT>')<EOL><DEDENT>if live_start_date is not None:<EOL><INDENT>live_start_date = ep.utils.get_utc_timestamp(live_start_date)<EOL>returns_is = returns[returns.index < live_start_date]<EOL>returns_oos = returns[returns.index >= live_start_date]<EOL>positions_is = None<EOL>positions_oos = None<EOL>transactions_is = None<EOL>transactions_oos = None<EOL>if positions is not None:<EOL><INDENT>positions_is = positions[positions.index < live_start_date]<EOL>positions_oos = positions[positions.index >= live_start_date]<EOL>if transactions is not None:<EOL><INDENT>transactions_is = transactions[(transactions.index <<EOL>live_start_date)]<EOL>transactions_oos = transactions[(transactions.index ><EOL>live_start_date)]<EOL><DEDENT><DEDENT>perf_stats_is = perf_func(<EOL>returns_is,<EOL>factor_returns=factor_returns,<EOL>positions=positions_is,<EOL>transactions=transactions_is,<EOL>turnover_denom=turnover_denom)<EOL>perf_stats_oos = perf_func(<EOL>returns_oos,<EOL>factor_returns=factor_returns,<EOL>positions=positions_oos,<EOL>transactions=transactions_oos,<EOL>turnover_denom=turnover_denom)<EOL>if len(returns.index) > <NUM_LIT:0>:<EOL><INDENT>date_rows['<STR_LIT>'] = int(len(returns_is) /<EOL>APPROX_BDAYS_PER_MONTH)<EOL>date_rows['<STR_LIT>'] = int(len(returns_oos) /<EOL>APPROX_BDAYS_PER_MONTH)<EOL><DEDENT>perf_stats = pd.concat(OrderedDict([<EOL>('<STR_LIT>', perf_stats_is),<EOL>('<STR_LIT>', perf_stats_oos),<EOL>('<STR_LIT>', perf_stats_all),<EOL>]), axis=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>if len(returns.index) > <NUM_LIT:0>:<EOL><INDENT>date_rows['<STR_LIT>'] = int(len(returns) /<EOL>APPROX_BDAYS_PER_MONTH)<EOL><DEDENT>perf_stats = pd.DataFrame(perf_stats_all, columns=['<STR_LIT>'])<EOL><DEDENT>for column in perf_stats.columns:<EOL><INDENT>for stat, value in perf_stats[column].iteritems():<EOL><INDENT>if stat in STAT_FUNCS_PCT:<EOL><INDENT>perf_stats.loc[stat, column] = str(np.round(value * <NUM_LIT:100>,<EOL><NUM_LIT:1>)) + '<STR_LIT:%>'<EOL><DEDENT><DEDENT><DEDENT>if header_rows is None:<EOL><INDENT>header_rows = date_rows<EOL><DEDENT>else:<EOL><INDENT>header_rows = OrderedDict(header_rows)<EOL>header_rows.update(date_rows)<EOL><DEDENT>utils.print_table(<EOL>perf_stats,<EOL>float_format='<STR_LIT>'.format,<EOL>header_rows=header_rows,<EOL>)<EOL>", "docstring": "Prints some performance metrics of the strategy.\n\n- Shows amount of time the strategy has been run in backtest and\n  out-of-sample (in live trading).\n\n- Shows Omega ratio, max drawdown, Calmar ratio, annual return,\n  stability, Sharpe ratio, annual volatility, alpha, and beta.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series, optional\n    Daily noncumulative returns of the benchmark factor to which betas are\n    computed. Usually a benchmark such as market returns.\n     - This is in the same style as returns.\npositions : pd.DataFrame, optional\n    Daily net position values.\n     - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame, optional\n    Prices and amounts of executed trades. One row per trade.\n    - See full explanation in tears.create_full_tear_sheet\nturnover_denom : str, optional\n    Either AGB or portfolio_value, default AGB.\n    - See full explanation in txn.get_turnover.\nlive_start_date : datetime, optional\n    The point in time when the strategy began live trading, after\n    its backtest period.\nbootstrap : boolean, optional\n    Whether to perform bootstrap analysis for the performance\n    metrics.\n     - For more information, see timeseries.perf_stats_bootstrap\nheader_rows : dict or OrderedDict, optional\n    Extra rows to display at the top of the displayed table.", "id": "f12210:m11"}
{"signature": "def plot_round_trip_lifetimes(round_trips, disp_amount=<NUM_LIT:16>, lsize=<NUM_LIT>, ax=None):", "body": "if ax is None:<EOL><INDENT>ax = plt.subplot()<EOL><DEDENT>symbols_sample = round_trips.symbol.unique()<EOL>np.random.seed(<NUM_LIT:1>)<EOL>sample = np.random.choice(round_trips.symbol.unique(), replace=False,<EOL>size=min(disp_amount, len(symbols_sample)))<EOL>sample_round_trips = round_trips[round_trips.symbol.isin(sample)]<EOL>symbol_idx = pd.Series(np.arange(len(sample)), index=sample)<EOL>for symbol, sym_round_trips in sample_round_trips.groupby('<STR_LIT>'):<EOL><INDENT>for _, row in sym_round_trips.iterrows():<EOL><INDENT>c = '<STR_LIT:b>' if row.long else '<STR_LIT:r>'<EOL>y_ix = symbol_idx[symbol] + <NUM_LIT><EOL>ax.plot([row['<STR_LIT>'], row['<STR_LIT>']],<EOL>[y_ix, y_ix], color=c,<EOL>linewidth=lsize, solid_capstyle='<STR_LIT>')<EOL><DEDENT><DEDENT>ax.set_yticks(range(disp_amount))<EOL>ax.set_yticklabels([utils.format_asset(s) for s in sample])<EOL>ax.set_ylim((-<NUM_LIT:0.5>, min(len(sample), disp_amount) - <NUM_LIT:0.5>))<EOL>blue = patches.Rectangle([<NUM_LIT:0>, <NUM_LIT:0>], <NUM_LIT:1>, <NUM_LIT:1>, color='<STR_LIT:b>', label='<STR_LIT>')<EOL>red = patches.Rectangle([<NUM_LIT:0>, <NUM_LIT:0>], <NUM_LIT:1>, <NUM_LIT:1>, color='<STR_LIT:r>', label='<STR_LIT>')<EOL>leg = ax.legend(handles=[blue, red], loc='<STR_LIT>',<EOL>frameon=True, framealpha=<NUM_LIT:0.5>)<EOL>leg.get_frame().set_edgecolor('<STR_LIT>')<EOL>ax.grid(False)<EOL>return ax<EOL>", "docstring": "Plots timespans and directions of a sample of round trip trades.\n\nParameters\n----------\nround_trips : pd.DataFrame\n    DataFrame with one row per round trip trade.\n    - See full explanation in round_trips.extract_round_trips\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m32"}
{"signature": "def plot_gross_leverage(returns, positions, ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>gl = timeseries.gross_lev(positions)<EOL>gl.plot(lw=<NUM_LIT:0.5>, color='<STR_LIT>', legend=False, ax=ax, **kwargs)<EOL>ax.axhline(gl.mean(), color='<STR_LIT:g>', linestyle='<STR_LIT>', lw=<NUM_LIT:3>)<EOL>ax.set_title('<STR_LIT>')<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Plots gross leverage versus date.\n\nGross leverage is the sum of long and short exposure per share\ndivided by net asset value.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\npositions : pd.DataFrame\n    Daily net position values.\n     - See full explanation in create_full_tear_sheet.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m17"}
{"signature": "def show_profit_attribution(round_trips):", "body": "total_pnl = round_trips['<STR_LIT>'].sum()<EOL>pnl_attribution = round_trips.groupby('<STR_LIT>')['<STR_LIT>'].sum() / total_pnl<EOL>pnl_attribution.name = '<STR_LIT>'<EOL>pnl_attribution.index = pnl_attribution.index.map(utils.format_asset)<EOL>utils.print_table(<EOL>pnl_attribution.sort_values(<EOL>inplace=False,<EOL>ascending=False,<EOL>),<EOL>name='<STR_LIT>',<EOL>float_format='<STR_LIT>'.format,<EOL>)<EOL>", "docstring": "Prints the share of total PnL contributed by each\ntraded name.\n\nParameters\n----------\nround_trips : pd.DataFrame\n    DataFrame with one row per round trip trade.\n    - See full explanation in round_trips.extract_round_trips\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m33"}
{"signature": "def plot_monthly_returns_heatmap(returns, ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>monthly_ret_table = ep.aggregate_returns(returns, '<STR_LIT>')<EOL>monthly_ret_table = monthly_ret_table.unstack().round(<NUM_LIT:3>)<EOL>sns.heatmap(<EOL>monthly_ret_table.fillna(<NUM_LIT:0>) *<EOL><NUM_LIT>,<EOL>annot=True,<EOL>annot_kws={\"<STR_LIT:size>\": <NUM_LIT:9>},<EOL>alpha=<NUM_LIT:1.0>,<EOL>center=<NUM_LIT:0.0>,<EOL>cbar=False,<EOL>cmap=matplotlib.cm.RdYlGn,<EOL>ax=ax, **kwargs)<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>ax.set_title(\"<STR_LIT>\")<EOL>return ax<EOL>", "docstring": "Plots a heatmap of returns by month.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m3"}
{"signature": "def plot_rolling_returns(returns,<EOL>factor_returns=None,<EOL>live_start_date=None,<EOL>logy=False,<EOL>cone_std=None,<EOL>legend_loc='<STR_LIT>',<EOL>volatility_match=False,<EOL>cone_function=timeseries.forecast_cone_bootstrap,<EOL>ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>ax.set_xlabel('<STR_LIT>')<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_yscale('<STR_LIT>' if logy else '<STR_LIT>')<EOL>if volatility_match and factor_returns is None:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>elif volatility_match and factor_returns is not None:<EOL><INDENT>bmark_vol = factor_returns.loc[returns.index].std()<EOL>returns = (returns / returns.std()) * bmark_vol<EOL><DEDENT>cum_rets = ep.cum_returns(returns, <NUM_LIT:1.0>)<EOL>y_axis_formatter = FuncFormatter(utils.two_dec_places)<EOL>ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))<EOL>if factor_returns is not None:<EOL><INDENT>cum_factor_returns = ep.cum_returns(<EOL>factor_returns[cum_rets.index], <NUM_LIT:1.0>)<EOL>cum_factor_returns.plot(lw=<NUM_LIT:2>, color='<STR_LIT>',<EOL>label=factor_returns.name, alpha=<NUM_LIT>,<EOL>ax=ax, **kwargs)<EOL><DEDENT>if live_start_date is not None:<EOL><INDENT>live_start_date = ep.utils.get_utc_timestamp(live_start_date)<EOL>is_cum_returns = cum_rets.loc[cum_rets.index < live_start_date]<EOL>oos_cum_returns = cum_rets.loc[cum_rets.index >= live_start_date]<EOL><DEDENT>else:<EOL><INDENT>is_cum_returns = cum_rets<EOL>oos_cum_returns = pd.Series([])<EOL><DEDENT>is_cum_returns.plot(lw=<NUM_LIT:3>, color='<STR_LIT>', alpha=<NUM_LIT>,<EOL>label='<STR_LIT>', ax=ax, **kwargs)<EOL>if len(oos_cum_returns) > <NUM_LIT:0>:<EOL><INDENT>oos_cum_returns.plot(lw=<NUM_LIT:4>, color='<STR_LIT>', alpha=<NUM_LIT>,<EOL>label='<STR_LIT>', ax=ax, **kwargs)<EOL>if cone_std is not None:<EOL><INDENT>if isinstance(cone_std, (float, int)):<EOL><INDENT>cone_std = [cone_std]<EOL><DEDENT>is_returns = returns.loc[returns.index < live_start_date]<EOL>cone_bounds = cone_function(<EOL>is_returns,<EOL>len(oos_cum_returns),<EOL>cone_std=cone_std,<EOL>starting_value=is_cum_returns[-<NUM_LIT:1>])<EOL>cone_bounds = cone_bounds.set_index(oos_cum_returns.index)<EOL>for std in cone_std:<EOL><INDENT>ax.fill_between(cone_bounds.index,<EOL>cone_bounds[float(std)],<EOL>cone_bounds[float(-std)],<EOL>color='<STR_LIT>', alpha=<NUM_LIT:0.5>)<EOL><DEDENT><DEDENT><DEDENT>if legend_loc is not None:<EOL><INDENT>ax.legend(loc=legend_loc, frameon=True, framealpha=<NUM_LIT:0.5>)<EOL><DEDENT>ax.axhline(<NUM_LIT:1.0>, linestyle='<STR_LIT>', color='<STR_LIT>', lw=<NUM_LIT:2>)<EOL>return ax<EOL>", "docstring": "Plots cumulative rolling returns versus some benchmarks'.\n\nBacktest returns are in green, and out-of-sample (live trading)\nreturns are in red.\n\nAdditionally, a non-parametric cone plot may be added to the\nout-of-sample returns region.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series, optional\n    Daily noncumulative returns of the benchmark factor to which betas are\n    computed. Usually a benchmark such as market returns.\n     - This is in the same style as returns.\nlive_start_date : datetime, optional\n    The date when the strategy began live trading, after\n    its backtest period. This date should be normalized.\nlogy : bool, optional\n    Whether to log-scale the y-axis.\ncone_std : float, or tuple, optional\n    If float, The standard deviation to use for the cone plots.\n    If tuple, Tuple of standard deviation values to use for the cone plots\n     - See timeseries.forecast_cone_bounds for more details.\nlegend_loc : matplotlib.loc, optional\n    The location of the legend on the plot.\nvolatility_match : bool, optional\n    Whether to normalize the volatility of the returns to those of the\n    benchmark returns. This helps compare strategies with different\n    volatilities. Requires passing of benchmark_rets.\ncone_function : function, optional\n    Function to use when generating forecast probability cone.\n    The function signiture must follow the form:\n    def cone(in_sample_returns (pd.Series),\n             days_to_project_forward (int),\n             cone_std= (float, or tuple),\n             starting_value= (int, or float))\n    See timeseries.forecast_cone_bootstrap for an example.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m13"}
{"signature": "def plot_sector_allocations(returns, sector_alloc, ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>sector_alloc.plot(title='<STR_LIT>',<EOL>alpha=<NUM_LIT:0.5>, ax=ax, **kwargs)<EOL>box = ax.get_position()<EOL>ax.set_position([box.x0, box.y0 + box.height * <NUM_LIT:0.1>,<EOL>box.width, box.height * <NUM_LIT>])<EOL>ax.legend(loc='<STR_LIT>', frameon=True, framealpha=<NUM_LIT:0.5>,<EOL>bbox_to_anchor=(<NUM_LIT:0.5>, -<NUM_LIT>), ncol=<NUM_LIT:5>)<EOL>ax.set_xlim((sector_alloc.index[<NUM_LIT:0>], sector_alloc.index[-<NUM_LIT:1>]))<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Plots the sector exposures of the portfolio over time.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nsector_alloc : pd.DataFrame\n    Portfolio allocation of positions. See pos.get_sector_alloc.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m21"}
{"signature": "def plot_return_quantiles(returns, live_start_date=None, ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>is_returns = returns if live_start_date is Noneelse returns.loc[returns.index < live_start_date]<EOL>is_weekly = ep.aggregate_returns(is_returns, '<STR_LIT>')<EOL>is_monthly = ep.aggregate_returns(is_returns, '<STR_LIT>')<EOL>sns.boxplot(data=[is_returns, is_weekly, is_monthly],<EOL>palette=[\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>ax=ax, **kwargs)<EOL>if live_start_date is not None:<EOL><INDENT>oos_returns = returns.loc[returns.index >= live_start_date]<EOL>oos_weekly = ep.aggregate_returns(oos_returns, '<STR_LIT>')<EOL>oos_monthly = ep.aggregate_returns(oos_returns, '<STR_LIT>')<EOL>sns.swarmplot(data=[oos_returns, oos_weekly, oos_monthly], ax=ax,<EOL>color=\"<STR_LIT>\",<EOL>marker=\"<STR_LIT:d>\", **kwargs)<EOL>red_dots = matplotlib.lines.Line2D([], [], color=\"<STR_LIT>\", marker=\"<STR_LIT:d>\",<EOL>label=\"<STR_LIT>\",<EOL>linestyle='<STR_LIT>')<EOL>ax.legend(handles=[red_dots], frameon=True, framealpha=<NUM_LIT:0.5>)<EOL><DEDENT>ax.set_xticklabels(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>ax.set_title('<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Creates a box plot of daily, weekly, and monthly return\ndistributions.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nlive_start_date : datetime, optional\n    The point in time when the strategy began live trading, after\n    its backtest period.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m22"}
{"signature": "def plot_slippage_sensitivity(returns, positions, transactions,<EOL>ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>avg_returns_given_slippage = pd.Series()<EOL>for bps in range(<NUM_LIT:1>, <NUM_LIT:100>):<EOL><INDENT>adj_returns = txn.adjust_returns_for_slippage(returns, positions,<EOL>transactions, bps)<EOL>avg_returns = ep.annual_return(adj_returns)<EOL>avg_returns_given_slippage.loc[bps] = avg_returns<EOL><DEDENT>avg_returns_given_slippage.plot(alpha=<NUM_LIT:1.0>, lw=<NUM_LIT:2>, ax=ax)<EOL>ax.set_title('<STR_LIT>')<EOL>ax.set_xticks(np.arange(<NUM_LIT:0>, <NUM_LIT:100>, <NUM_LIT:10>))<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Plots curve relating per-dollar slippage to average annual returns.\n\nParameters\n----------\nreturns : pd.Series\n    Timeseries of portfolio returns to be adjusted for various\n    degrees of slippage.\npositions : pd.DataFrame\n    Daily net position values.\n     - See full explanation in tears.create_full_tear_sheet.\ntransactions : pd.DataFrame\n    Prices and amounts of executed trades. One row per trade.\n     - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m25"}
{"signature": "def plot_drawdown_underwater(returns, ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>y_axis_formatter = FuncFormatter(utils.percentage)<EOL>ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))<EOL>df_cum_rets = ep.cum_returns(returns, starting_value=<NUM_LIT:1.0>)<EOL>running_max = np.maximum.accumulate(df_cum_rets)<EOL>underwater = -<NUM_LIT:100> * ((running_max - df_cum_rets) / running_max)<EOL>(underwater).plot(ax=ax, kind='<STR_LIT>', color='<STR_LIT>', alpha=<NUM_LIT>, **kwargs)<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_title('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Plots how far underwaterr returns are over time, or plots current\ndrawdown vs. date.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m9"}
{"signature": "def plot_monthly_returns_dist(returns, ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>x_axis_formatter = FuncFormatter(utils.percentage)<EOL>ax.xaxis.set_major_formatter(FuncFormatter(x_axis_formatter))<EOL>ax.tick_params(axis='<STR_LIT:x>', which='<STR_LIT>')<EOL>monthly_ret_table = ep.aggregate_returns(returns, '<STR_LIT>')<EOL>ax.hist(<EOL><NUM_LIT:100> * monthly_ret_table,<EOL>color='<STR_LIT>',<EOL>alpha=<NUM_LIT>,<EOL>bins=<NUM_LIT:20>,<EOL>**kwargs)<EOL>ax.axvline(<EOL><NUM_LIT:100> * monthly_ret_table.mean(),<EOL>color='<STR_LIT>',<EOL>linestyle='<STR_LIT>',<EOL>lw=<NUM_LIT:4>,<EOL>alpha=<NUM_LIT:1.0>)<EOL>ax.axvline(<NUM_LIT:0.0>, color='<STR_LIT>', linestyle='<STR_LIT:->', lw=<NUM_LIT:3>, alpha=<NUM_LIT>)<EOL>ax.legend(['<STR_LIT>'], frameon=True, framealpha=<NUM_LIT:0.5>)<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>ax.set_title(\"<STR_LIT>\")<EOL>return ax<EOL>", "docstring": "Plots a distribution of monthly returns.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m5"}
{"signature": "def plot_rolling_volatility(returns, factor_returns=None,<EOL>rolling_window=APPROX_BDAYS_PER_MONTH * <NUM_LIT:6>,<EOL>legend_loc='<STR_LIT>', ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>y_axis_formatter = FuncFormatter(utils.two_dec_places)<EOL>ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))<EOL>rolling_vol_ts = timeseries.rolling_volatility(<EOL>returns, rolling_window)<EOL>rolling_vol_ts.plot(alpha=<NUM_LIT>, lw=<NUM_LIT:3>, color='<STR_LIT>', ax=ax,<EOL>**kwargs)<EOL>if factor_returns is not None:<EOL><INDENT>rolling_vol_ts_factor = timeseries.rolling_volatility(<EOL>factor_returns, rolling_window)<EOL>rolling_vol_ts_factor.plot(alpha=<NUM_LIT>, lw=<NUM_LIT:3>, color='<STR_LIT>', ax=ax,<EOL>**kwargs)<EOL><DEDENT>ax.set_title('<STR_LIT>')<EOL>ax.axhline(<EOL>rolling_vol_ts.mean(),<EOL>color='<STR_LIT>',<EOL>linestyle='<STR_LIT>',<EOL>lw=<NUM_LIT:3>)<EOL>ax.axhline(<NUM_LIT:0.0>, color='<STR_LIT>', linestyle='<STR_LIT:->', lw=<NUM_LIT:2>)<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>if factor_returns is None:<EOL><INDENT>ax.legend(['<STR_LIT>', '<STR_LIT>'],<EOL>loc=legend_loc, frameon=True, framealpha=<NUM_LIT:0.5>)<EOL><DEDENT>else:<EOL><INDENT>ax.legend(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'],<EOL>loc=legend_loc, frameon=True, framealpha=<NUM_LIT:0.5>)<EOL><DEDENT>return ax<EOL>", "docstring": "Plots the rolling volatility versus date.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series, optional\n    Daily noncumulative returns of the benchmark factor to which betas are\n    computed. Usually a benchmark such as market returns.\n     - This is in the same style as returns.\nrolling_window : int, optional\n    The days window over which to compute the volatility.\nlegend_loc : matplotlib.loc, optional\n    The location of the legend on the plot.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m15"}
{"signature": "def plot_exposures(returns, positions, ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>pos_no_cash = positions.drop('<STR_LIT>', axis=<NUM_LIT:1>)<EOL>l_exp = pos_no_cash[pos_no_cash > <NUM_LIT:0>].sum(axis=<NUM_LIT:1>) / positions.sum(axis=<NUM_LIT:1>)<EOL>s_exp = pos_no_cash[pos_no_cash < <NUM_LIT:0>].sum(axis=<NUM_LIT:1>) / positions.sum(axis=<NUM_LIT:1>)<EOL>net_exp = pos_no_cash.sum(axis=<NUM_LIT:1>) / positions.sum(axis=<NUM_LIT:1>)<EOL>ax.fill_between(l_exp.index,<EOL><NUM_LIT:0>,<EOL>l_exp.values,<EOL>label='<STR_LIT>', color='<STR_LIT>', alpha=<NUM_LIT:0.5>)<EOL>ax.fill_between(s_exp.index,<EOL><NUM_LIT:0>,<EOL>s_exp.values,<EOL>label='<STR_LIT>', color='<STR_LIT>', alpha=<NUM_LIT:0.5>)<EOL>ax.plot(net_exp.index, net_exp.values,<EOL>label='<STR_LIT>', color='<STR_LIT>', linestyle='<STR_LIT>')<EOL>ax.set_xlim((returns.index[<NUM_LIT:0>], returns.index[-<NUM_LIT:1>]))<EOL>ax.set_title(\"<STR_LIT>\")<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.legend(loc='<STR_LIT>', frameon=True, framealpha=<NUM_LIT:0.5>)<EOL>ax.set_xlabel('<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Plots a cake chart of the long and short exposure.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\npositions_alloc : pd.DataFrame\n    Portfolio allocation of positions. See\n    pos.get_percent_alloc.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m18"}
{"signature": "def plot_daily_turnover_hist(transactions, positions,<EOL>ax=None, **kwargs):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>turnover = txn.get_turnover(positions, transactions)<EOL>sns.distplot(turnover, ax=ax, **kwargs)<EOL>ax.set_title('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>return ax<EOL>", "docstring": "Plots a histogram of daily turnover rates.\n\nParameters\n----------\ntransactions : pd.DataFrame\n    Prices and amounts of executed trades. One row per trade.\n     - See full explanation in tears.create_full_tear_sheet.\npositions : pd.DataFrame\n    Daily net position values.\n     - See full explanation in tears.create_full_tear_sheet.\nax : matplotlib.Axes, optional\n    Axes upon which to plot.\n**kwargs, optional\n    Passed to seaborn plotting function.\n\nReturns\n-------\nax : matplotlib.Axes\n    The axes that were plotted on.", "id": "f12210:m27"}
{"signature": "def get_long_short_pos(positions):", "body": "pos_wo_cash = positions.drop('<STR_LIT>', axis=<NUM_LIT:1>)<EOL>longs = pos_wo_cash[pos_wo_cash > <NUM_LIT:0>].sum(axis=<NUM_LIT:1>).fillna(<NUM_LIT:0>)<EOL>shorts = pos_wo_cash[pos_wo_cash < <NUM_LIT:0>].sum(axis=<NUM_LIT:1>).fillna(<NUM_LIT:0>)<EOL>cash = positions.cash<EOL>net_liquidation = longs + shorts + cash<EOL>df_pos = pd.DataFrame({'<STR_LIT>': longs.divide(net_liquidation, axis='<STR_LIT:index>'),<EOL>'<STR_LIT>': shorts.divide(net_liquidation,<EOL>axis='<STR_LIT:index>')})<EOL>df_pos['<STR_LIT>'] = df_pos['<STR_LIT>'] + df_pos['<STR_LIT>']<EOL>return df_pos<EOL>", "docstring": "Determines the long and short allocations in a portfolio.\n\nParameters\n----------\npositions : pd.DataFrame\n    The positions that the strategy takes over time.\n\nReturns\n-------\ndf_long_short : pd.DataFrame\n    Long and short allocations as a decimal\n    percentage of the total net liquidation", "id": "f12213:m5"}
{"signature": "def get_sector_exposures(positions, symbol_sector_map):", "body": "cash = positions['<STR_LIT>']<EOL>positions = positions.drop('<STR_LIT>', axis=<NUM_LIT:1>)<EOL>unmapped_pos = np.setdiff1d(positions.columns.values,<EOL>list(symbol_sector_map.keys()))<EOL>if len(unmapped_pos) > <NUM_LIT:0>:<EOL><INDENT>warn_message = \"\"\"<STR_LIT>\"\"\".format(<EOL>\"<STR_LIT:U+002CU+0020>\".join(map(str, unmapped_pos)))<EOL>warnings.warn(warn_message, UserWarning)<EOL><DEDENT>sector_exp = positions.groupby(<EOL>by=symbol_sector_map, axis=<NUM_LIT:1>).sum()<EOL>sector_exp['<STR_LIT>'] = cash<EOL>return sector_exp<EOL>", "docstring": "Sum position exposures by sector.\n\nParameters\n----------\npositions : pd.DataFrame\n    Contains position values or amounts.\n    - Example\n        index         'AAPL'         'MSFT'        'CHK'        cash\n        2004-01-09    13939.380     -15012.993    -403.870      1477.483\n        2004-01-12    14492.630     -18624.870    142.630       3989.610\n        2004-01-13    -13853.280    13653.640     -100.980      100.000\nsymbol_sector_map : dict or pd.Series\n    Security identifier to sector mapping.\n    Security ids as keys/index, sectors as values.\n    - Example:\n        {'AAPL' : 'Technology'\n         'MSFT' : 'Technology'\n         'CHK' : 'Natural Resources'}\n\nReturns\n-------\nsector_exp : pd.DataFrame\n    Sectors and their allocations.\n    - Example:\n        index         'Technology'    'Natural Resources' cash\n        2004-01-09    -1073.613       -403.870            1477.4830\n        2004-01-12    -4132.240       142.630             3989.6100\n        2004-01-13    -199.640        -100.980            100.0000", "id": "f12213:m4"}
{"signature": "def get_max_median_position_concentration(positions):", "body": "expos = get_percent_alloc(positions)<EOL>expos = expos.drop('<STR_LIT>', axis=<NUM_LIT:1>)<EOL>longs = expos.where(expos.applymap(lambda x: x > <NUM_LIT:0>))<EOL>shorts = expos.where(expos.applymap(lambda x: x < <NUM_LIT:0>))<EOL>alloc_summary = pd.DataFrame()<EOL>alloc_summary['<STR_LIT>'] = longs.max(axis=<NUM_LIT:1>)<EOL>alloc_summary['<STR_LIT>'] = longs.median(axis=<NUM_LIT:1>)<EOL>alloc_summary['<STR_LIT>'] = shorts.median(axis=<NUM_LIT:1>)<EOL>alloc_summary['<STR_LIT>'] = shorts.min(axis=<NUM_LIT:1>)<EOL>return alloc_summary<EOL>", "docstring": "Finds the max and median long and short position concentrations\nin each time period specified by the index of positions.\n\nParameters\n----------\npositions : pd.DataFrame\n    The positions that the strategy takes over time.\n\nReturns\n-------\npd.DataFrame\n    Columns are max long, max short, median long, and median short\n    position concentrations. Rows are timeperiods.", "id": "f12213:m2"}
{"signature": "def model_returns_normal(data, samples=<NUM_LIT>, progressbar=True):", "body": "with pm.Model() as model:<EOL><INDENT>mu = pm.Normal('<STR_LIT>', mu=<NUM_LIT:0>, sd=<NUM_LIT>, testval=data.mean())<EOL>sigma = pm.HalfCauchy('<STR_LIT>', beta=<NUM_LIT:1>, testval=data.std())<EOL>returns = pm.Normal('<STR_LIT>', mu=mu, sd=sigma, observed=data)<EOL>pm.Deterministic(<EOL>'<STR_LIT>',<EOL>returns.distribution.variance**<NUM_LIT> *<EOL>np.sqrt(<NUM_LIT>))<EOL>pm.Deterministic(<EOL>'<STR_LIT>',<EOL>returns.distribution.mean /<EOL>returns.distribution.variance**<NUM_LIT> *<EOL>np.sqrt(<NUM_LIT>))<EOL>trace = pm.sample(samples, progressbar=progressbar)<EOL><DEDENT>return model, trace<EOL>", "docstring": "Run Bayesian model assuming returns are normally distributed.\n\nParameters\n----------\nreturns : pandas.Series\n    Series of simple returns of an algorithm or stock.\nsamples : int (optional)\n    Number of posterior samples to draw.\n\nReturns\n-------\nmodel : pymc.Model object\n    PyMC3 model containing all random variables.\ntrace : pymc3.sampling.BaseTrace object\n    A PyMC3 trace object that contains samples for each parameter\n    of the posterior.", "id": "f12216:m1"}
{"signature": "def model_returns_t_alpha_beta(data, bmark, samples=<NUM_LIT>, progressbar=True):", "body": "data_bmark = pd.concat([data, bmark], axis=<NUM_LIT:1>).dropna()<EOL>with pm.Model() as model:<EOL><INDENT>sigma = pm.HalfCauchy(<EOL>'<STR_LIT>',<EOL>beta=<NUM_LIT:1>)<EOL>nu = pm.Exponential('<STR_LIT>', <NUM_LIT:1.> / <NUM_LIT>)<EOL>X = data_bmark.iloc[:, <NUM_LIT:1>]<EOL>y = data_bmark.iloc[:, <NUM_LIT:0>]<EOL>alpha_reg = pm.Normal('<STR_LIT>', mu=<NUM_LIT:0>, sd=<NUM_LIT>)<EOL>beta_reg = pm.Normal('<STR_LIT>', mu=<NUM_LIT:0>, sd=<NUM_LIT:1>)<EOL>mu_reg = alpha_reg + beta_reg * X<EOL>pm.StudentT('<STR_LIT>',<EOL>nu=nu + <NUM_LIT:2>,<EOL>mu=mu_reg,<EOL>sd=sigma,<EOL>observed=y)<EOL>trace = pm.sample(samples, progressbar=progressbar)<EOL><DEDENT>return model, trace<EOL>", "docstring": "Run Bayesian alpha-beta-model with T distributed returns.\n\nThis model estimates intercept (alpha) and slope (beta) of two\nreturn sets. Usually, these will be algorithm returns and\nbenchmark returns (e.g. S&P500). The data is assumed to be T\ndistributed and thus is robust to outliers and takes tail events\ninto account.  If a pandas.DataFrame is passed as a benchmark, then\nmultiple linear regression is used to estimate alpha and beta.\n\nParameters\n----------\nreturns : pandas.Series\n    Series of simple returns of an algorithm or stock.\nbmark : pandas.DataFrame\n    DataFrame of benchmark returns (e.g., S&P500) or risk factors (e.g.,\n    Fama-French SMB, HML, and UMD).\n    If bmark has more recent returns than returns_train, these dates\n    will be treated as missing values and predictions will be\n    generated for them taking market correlations into account.\nsamples : int (optional)\n    Number of posterior samples to draw.\n\nReturns\n-------\nmodel : pymc.Model object\n    PyMC3 model containing all random variables.\ntrace : pymc3.sampling.BaseTrace object\n    A PyMC3 trace object that contains samples for each parameter\n    of the posterior.", "id": "f12216:m0"}
{"signature": "def plot_stoch_vol(data, trace=None, ax=None):", "body": "if trace is None:<EOL><INDENT>trace = model_stoch_vol(data)<EOL><DEDENT>if ax is None:<EOL><INDENT>fig, ax = plt.subplots(figsize=(<NUM_LIT:15>, <NUM_LIT:8>))<EOL><DEDENT>data.abs().plot(ax=ax)<EOL>ax.plot(data.index, np.exp(trace['<STR_LIT:s>', ::<NUM_LIT:30>].T), '<STR_LIT:r>', alpha=<NUM_LIT>)<EOL>ax.set(title='<STR_LIT>', xlabel='<STR_LIT>', ylabel='<STR_LIT>')<EOL>ax.legend(['<STR_LIT>', '<STR_LIT>'],<EOL>frameon=True, framealpha=<NUM_LIT:0.5>)<EOL>return ax<EOL>", "docstring": "Generate plot for stochastic volatility model.\n\nParameters\n----------\ndata : pandas.Series\n    Returns to model.\ntrace : pymc3.sampling.BaseTrace object, optional\n    trace as returned by model_stoch_vol\n    If not passed, sample from model.\nax : matplotlib.axes object, optional\n    Plot into axes object\n\nReturns\n-------\nax object\n\nSee Also\n--------\nmodel_stoch_vol : run stochastic volatility model", "id": "f12216:m6"}
{"signature": "def model_best(y1, y2, samples=<NUM_LIT:1000>, progressbar=True):", "body": "y = np.concatenate((y1, y2))<EOL>mu_m = np.mean(y)<EOL>mu_p = <NUM_LIT> * <NUM_LIT:1> / np.std(y)**<NUM_LIT:2><EOL>sigma_low = np.std(y) / <NUM_LIT:1000><EOL>sigma_high = np.std(y) * <NUM_LIT:1000><EOL>with pm.Model() as model:<EOL><INDENT>group1_mean = pm.Normal('<STR_LIT>', mu=mu_m, tau=mu_p,<EOL>testval=y1.mean())<EOL>group2_mean = pm.Normal('<STR_LIT>', mu=mu_m, tau=mu_p,<EOL>testval=y2.mean())<EOL>group1_std = pm.Uniform('<STR_LIT>', lower=sigma_low,<EOL>upper=sigma_high, testval=y1.std())<EOL>group2_std = pm.Uniform('<STR_LIT>', lower=sigma_low,<EOL>upper=sigma_high, testval=y2.std())<EOL>nu = pm.Exponential('<STR_LIT>', <NUM_LIT:1> / <NUM_LIT>, testval=<NUM_LIT>) + <NUM_LIT><EOL>returns_group1 = pm.StudentT('<STR_LIT>', nu=nu, mu=group1_mean,<EOL>lam=group1_std**-<NUM_LIT:2>, observed=y1)<EOL>returns_group2 = pm.StudentT('<STR_LIT>', nu=nu, mu=group2_mean,<EOL>lam=group2_std**-<NUM_LIT:2>, observed=y2)<EOL>diff_of_means = pm.Deterministic('<STR_LIT>',<EOL>group2_mean - group1_mean)<EOL>pm.Deterministic('<STR_LIT>',<EOL>group2_std - group1_std)<EOL>pm.Deterministic('<STR_LIT>', diff_of_means /<EOL>pm.math.sqrt((group1_std**<NUM_LIT:2> +<EOL>group2_std**<NUM_LIT:2>) / <NUM_LIT:2>))<EOL>pm.Deterministic('<STR_LIT>',<EOL>returns_group1.distribution.variance**<NUM_LIT> *<EOL>np.sqrt(<NUM_LIT>))<EOL>pm.Deterministic('<STR_LIT>',<EOL>returns_group2.distribution.variance**<NUM_LIT> *<EOL>np.sqrt(<NUM_LIT>))<EOL>pm.Deterministic('<STR_LIT>', returns_group1.distribution.mean /<EOL>returns_group1.distribution.variance**<NUM_LIT> *<EOL>np.sqrt(<NUM_LIT>))<EOL>pm.Deterministic('<STR_LIT>', returns_group2.distribution.mean /<EOL>returns_group2.distribution.variance**<NUM_LIT> *<EOL>np.sqrt(<NUM_LIT>))<EOL>trace = pm.sample(samples, progressbar=progressbar)<EOL><DEDENT>return model, trace<EOL>", "docstring": "Bayesian Estimation Supersedes the T-Test\n\nThis model runs a Bayesian hypothesis comparing if y1 and y2 come\nfrom the same distribution. Returns are assumed to be T-distributed.\n\nIn addition, computes annual volatility and Sharpe of in and\nout-of-sample periods.\n\nThis model replicates the example used in:\nKruschke, John. (2012) Bayesian estimation supersedes the t\ntest. Journal of Experimental Psychology: General.\n\nParameters\n----------\ny1 : array-like\n    Array of returns (e.g. in-sample)\ny2 : array-like\n    Array of returns (e.g. out-of-sample)\nsamples : int, optional\n    Number of posterior samples to draw.\n\nReturns\n-------\nmodel : pymc.Model object\n    PyMC3 model containing all random variables.\ntrace : pymc3.sampling.BaseTrace object\n    A PyMC3 trace object that contains samples for each parameter\n    of the posterior.\n\nSee Also\n--------\nplot_stoch_vol : plotting of tochastic volatility model", "id": "f12216:m3"}
{"signature": "def extract_round_trips(transactions,<EOL>portfolio_value=None):", "body": "transactions = _groupby_consecutive(transactions)<EOL>roundtrips = []<EOL>for sym, trans_sym in transactions.groupby('<STR_LIT>'):<EOL><INDENT>trans_sym = trans_sym.sort_index()<EOL>price_stack = deque()<EOL>dt_stack = deque()<EOL>trans_sym['<STR_LIT>'] = trans_sym.price *np.sign(trans_sym.amount)<EOL>trans_sym['<STR_LIT>'] = trans_sym.amount.abs().astype(int)<EOL>for dt, t in trans_sym.iterrows():<EOL><INDENT>if t.price < <NUM_LIT:0>:<EOL><INDENT>warnings.warn('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>continue<EOL><DEDENT>indiv_prices = [t.signed_price] * t.abs_amount<EOL>if (len(price_stack) == <NUM_LIT:0>) or(copysign(<NUM_LIT:1>, price_stack[-<NUM_LIT:1>]) == copysign(<NUM_LIT:1>, t.amount)):<EOL><INDENT>price_stack.extend(indiv_prices)<EOL>dt_stack.extend([dt] * len(indiv_prices))<EOL><DEDENT>else:<EOL><INDENT>pnl = <NUM_LIT:0><EOL>invested = <NUM_LIT:0><EOL>cur_open_dts = []<EOL>for price in indiv_prices:<EOL><INDENT>if len(price_stack) != <NUM_LIT:0> and(copysign(<NUM_LIT:1>, price_stack[-<NUM_LIT:1>]) != copysign(<NUM_LIT:1>, price)):<EOL><INDENT>prev_price = price_stack.popleft()<EOL>prev_dt = dt_stack.popleft()<EOL>pnl += -(price + prev_price)<EOL>cur_open_dts.append(prev_dt)<EOL>invested += abs(prev_price)<EOL><DEDENT>else:<EOL><INDENT>price_stack.append(price)<EOL>dt_stack.append(dt)<EOL><DEDENT><DEDENT>roundtrips.append({'<STR_LIT>': pnl,<EOL>'<STR_LIT>': cur_open_dts[<NUM_LIT:0>],<EOL>'<STR_LIT>': dt,<EOL>'<STR_LIT>': price < <NUM_LIT:0>,<EOL>'<STR_LIT>': pnl / invested,<EOL>'<STR_LIT>': sym,<EOL>})<EOL><DEDENT><DEDENT><DEDENT>roundtrips = pd.DataFrame(roundtrips)<EOL>roundtrips['<STR_LIT>'] = roundtrips['<STR_LIT>'].sub(roundtrips['<STR_LIT>'])<EOL>if portfolio_value is not None:<EOL><INDENT>pv = pd.DataFrame(portfolio_value,<EOL>columns=['<STR_LIT>']).assign(date=portfolio_value.index)<EOL>roundtrips['<STR_LIT:date>'] = roundtrips.close_dt.apply(lambda x:<EOL>x.replace(hour=<NUM_LIT:0>,<EOL>minute=<NUM_LIT:0>,<EOL>second=<NUM_LIT:0>))<EOL>tmp = roundtrips.join(pv, on='<STR_LIT:date>', lsuffix='<STR_LIT:_>')<EOL>roundtrips['<STR_LIT>'] = tmp.pnl / tmp.portfolio_value<EOL>roundtrips = roundtrips.drop('<STR_LIT:date>', axis='<STR_LIT>')<EOL><DEDENT>return roundtrips<EOL>", "docstring": "Group transactions into \"round trips\". First, transactions are\n    grouped by day and directionality. Then, long and short\n    transactions are matched to create round-trip round_trips for which\n    PnL, duration and returns are computed. Crossings where a position\n    changes from long to short and vice-versa are handled correctly.\n\n    Under the hood, we reconstruct the individual shares in a\n    portfolio over time and match round_trips in a FIFO-order.\n\n    For example, the following transactions would constitute one round trip:\n    index                  amount   price    symbol\n    2004-01-09 12:18:01    10       50      'AAPL'\n    2004-01-09 15:12:53    10       100      'AAPL'\n    2004-01-13 14:41:23    -10      100      'AAPL'\n    2004-01-13 15:23:34    -10      200       'AAPL'\n\n    First, the first two and last two round_trips will be merged into a two\n    single transactions (computing the price via vwap). Then, during\n    the portfolio reconstruction, the two resulting transactions will\n    be merged and result in 1 round-trip trade with a PnL of\n    (150 * 20) - (75 * 20) = 1500.\n\n    Note, that round trips do not have to close out positions\n    completely. For example, we could have removed the last\n    transaction in the example above and still generated a round-trip\n    over 10 shares with 10 shares left in the portfolio to be matched\n    with a later transaction.\n\n    Parameters\n    ----------\n    transactions : pd.DataFrame\n        Prices and amounts of executed round_trips. One row per trade.\n        - See full explanation in tears.create_full_tear_sheet\n\n    portfolio_value : pd.Series (optional)\n        Portfolio value (all net assets including cash) over time.\n        Note that portfolio_value needs to beginning of day, so either\n        use .shift() or positions.sum(axis='columns') / (1+returns).\n\n    Returns\n    -------\n    round_trips : pd.DataFrame\n        DataFrame with one row per round trip.  The returns column\n        contains returns in respect to the portfolio value while\n        rt_returns are the returns in regards to the invested capital\n        into that partiulcar round-trip.", "id": "f12217:m2"}
{"signature": "def _groupby_consecutive(txn, max_delta=pd.Timedelta('<STR_LIT>')):", "body": "def vwap(transaction):<EOL><INDENT>if transaction.amount.sum() == <NUM_LIT:0>:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL>return np.nan<EOL><DEDENT>return (transaction.amount * transaction.price).sum() /transaction.amount.sum()<EOL><DEDENT>out = []<EOL>for sym, t in txn.groupby('<STR_LIT>'):<EOL><INDENT>t = t.sort_index()<EOL>t.index.name = '<STR_LIT>'<EOL>t = t.reset_index()<EOL>t['<STR_LIT>'] = t.amount > <NUM_LIT:0><EOL>t['<STR_LIT>'] = (t.order_sign.shift(<EOL><NUM_LIT:1>) != t.order_sign).astype(int).cumsum()<EOL>t['<STR_LIT>'] = ((t.dt.sub(t.dt.shift(<NUM_LIT:1>))) ><EOL>max_delta).astype(int).cumsum()<EOL>grouped_price = (t.groupby(('<STR_LIT>',<EOL>'<STR_LIT>'))<EOL>.apply(vwap))<EOL>grouped_price.name = '<STR_LIT>'<EOL>grouped_rest = t.groupby(('<STR_LIT>', '<STR_LIT>')).agg({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'})<EOL>grouped = grouped_rest.join(grouped_price)<EOL>out.append(grouped)<EOL><DEDENT>out = pd.concat(out)<EOL>out = out.set_index('<STR_LIT>')<EOL>return out<EOL>", "docstring": "Merge transactions of the same direction separated by less than\n    max_delta time duration.\n\n    Parameters\n    ----------\n    transactions : pd.DataFrame\n        Prices and amounts of executed round_trips. One row per trade.\n        - See full explanation in tears.create_full_tear_sheet\n\n    max_delta : pandas.Timedelta (optional)\n        Merge transactions in the same direction separated by less\n        than max_delta time duration.\n\n\n    Returns\n    -------\n    transactions : pd.DataFrame", "id": "f12217:m1"}
{"signature": "def _align_and_warn(returns,<EOL>positions,<EOL>factor_returns,<EOL>factor_loadings,<EOL>transactions=None,<EOL>pos_in_dollars=True):", "body": "missing_stocks = positions.columns.difference(<EOL>factor_loadings.index.get_level_values(<NUM_LIT:1>).unique()<EOL>)<EOL>num_stocks = len(positions.columns) - <NUM_LIT:1><EOL>missing_stocks = missing_stocks.drop('<STR_LIT>')<EOL>num_stocks_covered = num_stocks - len(missing_stocks)<EOL>missing_ratio = round(len(missing_stocks) / num_stocks, ndigits=<NUM_LIT:3>)<EOL>if num_stocks_covered == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if len(missing_stocks) > <NUM_LIT:0>:<EOL><INDENT>if len(missing_stocks) > <NUM_LIT:5>:<EOL><INDENT>missing_stocks_displayed = (<EOL>\"<STR_LIT>\"<EOL>).format(len(missing_stocks),<EOL>'<STR_LIT:U+002CU+0020>'.join(missing_stocks[:<NUM_LIT:5>].map(str)),<EOL>missing_stocks[-<NUM_LIT:1>])<EOL>avg_allocation_msg = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>missing_stocks_displayed = (<EOL>\"<STR_LIT>\"<EOL>).format(list(missing_stocks))<EOL>avg_allocation_msg = \"<STR_LIT>\"<EOL><DEDENT>missing_stocks_warning_msg = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT:\\n>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT:\\n>\"<EOL>\"<STR_LIT>\"<EOL>).format(<EOL>missing_stocks_displayed,<EOL>missing_ratio,<EOL>avg_allocation_msg,<EOL>positions[missing_stocks[:<NUM_LIT:5>].union(missing_stocks[[-<NUM_LIT:1>]])].mean(),<EOL>)<EOL>warnings.warn(missing_stocks_warning_msg)<EOL>positions = positions.drop(missing_stocks, axis='<STR_LIT>',<EOL>errors='<STR_LIT:ignore>')<EOL><DEDENT>missing_factor_loadings_index = positions.index.difference(<EOL>factor_loadings.index.get_level_values(<NUM_LIT:0>).unique()<EOL>)<EOL>missing_factor_loadings_index = positions.index.difference(<EOL>factor_loadings.index.get_level_values(<NUM_LIT:0>).unique()<EOL>)<EOL>if len(missing_factor_loadings_index) > <NUM_LIT:0>:<EOL><INDENT>if len(missing_factor_loadings_index) > <NUM_LIT:5>:<EOL><INDENT>missing_dates_displayed = (<EOL>\"<STR_LIT>\"<EOL>).format(<EOL>missing_factor_loadings_index[<NUM_LIT:0>],<EOL>missing_factor_loadings_index[-<NUM_LIT:1>]<EOL>)<EOL><DEDENT>else:<EOL><INDENT>missing_dates_displayed = list(missing_factor_loadings_index)<EOL><DEDENT>warning_msg = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>).format(len(missing_factor_loadings_index), missing_dates_displayed)<EOL>warnings.warn(warning_msg)<EOL>positions = positions.drop(missing_factor_loadings_index,<EOL>errors='<STR_LIT:ignore>')<EOL>returns = returns.drop(missing_factor_loadings_index, errors='<STR_LIT:ignore>')<EOL>factor_returns = factor_returns.drop(missing_factor_loadings_index,<EOL>errors='<STR_LIT:ignore>')<EOL><DEDENT>if transactions is not None and pos_in_dollars:<EOL><INDENT>turnover = get_turnover(positions, transactions).mean()<EOL>if turnover > PERF_ATTRIB_TURNOVER_THRESHOLD:<EOL><INDENT>warning_msg = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT:\\n>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL>warnings.warn(warning_msg)<EOL><DEDENT><DEDENT>return (returns, positions, factor_returns, factor_loadings)<EOL>", "docstring": "Make sure that all inputs have matching dates and tickers,\nand raise warnings if necessary.", "id": "f12219:m8"}
{"signature": "def _cumulative_returns_less_costs(returns, costs):", "body": "if costs is None:<EOL><INDENT>return ep.cum_returns(returns)<EOL><DEDENT>return ep.cum_returns(returns - costs)<EOL>", "docstring": "Compute cumulative returns, less costs.", "id": "f12219:m10"}
{"signature": "def create_perf_attrib_stats(perf_attrib, risk_exposures):", "body": "summary = OrderedDict()<EOL>total_returns = perf_attrib['<STR_LIT>']<EOL>specific_returns = perf_attrib['<STR_LIT>']<EOL>common_returns = perf_attrib['<STR_LIT>']<EOL>summary['<STR_LIT>'] =ep.annual_return(specific_returns)<EOL>summary['<STR_LIT>'] =ep.annual_return(common_returns)<EOL>summary['<STR_LIT>'] =ep.annual_return(total_returns)<EOL>summary['<STR_LIT>'] =ep.sharpe_ratio(specific_returns)<EOL>summary['<STR_LIT>'] =ep.cum_returns_final(specific_returns)<EOL>summary['<STR_LIT>'] =ep.cum_returns_final(common_returns)<EOL>summary['<STR_LIT>'] =ep.cum_returns_final(total_returns)<EOL>summary = pd.Series(summary, name='<STR_LIT>')<EOL>annualized_returns_by_factor = [ep.annual_return(perf_attrib[c])<EOL>for c in risk_exposures.columns]<EOL>cumulative_returns_by_factor = [ep.cum_returns_final(perf_attrib[c])<EOL>for c in risk_exposures.columns]<EOL>risk_exposure_summary = pd.DataFrame(<EOL>data=OrderedDict([<EOL>(<EOL>'<STR_LIT>',<EOL>risk_exposures.mean(axis='<STR_LIT>')<EOL>),<EOL>('<STR_LIT>', annualized_returns_by_factor),<EOL>('<STR_LIT>', cumulative_returns_by_factor),<EOL>]),<EOL>index=risk_exposures.columns,<EOL>)<EOL>return summary, risk_exposure_summary<EOL>", "docstring": "Takes perf attribution data over a period of time and computes annualized\nmultifactor alpha, multifactor sharpe, risk exposures.", "id": "f12219:m2"}
{"signature": "def plot_returns(perf_attrib_data, cost=None, ax=None):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>returns = perf_attrib_data['<STR_LIT>']<EOL>total_returns_label = '<STR_LIT>'<EOL>cumulative_returns_less_costs = _cumulative_returns_less_costs(<EOL>returns,<EOL>cost<EOL>)<EOL>if cost is not None:<EOL><INDENT>total_returns_label += '<STR_LIT>'<EOL><DEDENT>specific_returns = perf_attrib_data['<STR_LIT>']<EOL>common_returns = perf_attrib_data['<STR_LIT>']<EOL>ax.plot(cumulative_returns_less_costs, color='<STR_LIT:b>',<EOL>label=total_returns_label)<EOL>ax.plot(ep.cum_returns(specific_returns), color='<STR_LIT:g>',<EOL>label='<STR_LIT>')<EOL>ax.plot(ep.cum_returns(common_returns), color='<STR_LIT:r>',<EOL>label='<STR_LIT>')<EOL>if cost is not None:<EOL><INDENT>ax.plot(-ep.cum_returns(cost), color='<STR_LIT:k>',<EOL>label='<STR_LIT>')<EOL><DEDENT>ax.set_title('<STR_LIT>')<EOL>ax.set_ylabel('<STR_LIT>')<EOL>configure_legend(ax)<EOL>return ax<EOL>", "docstring": "Plot total, specific, and common returns.\n\nParameters\n----------\nperf_attrib_data : pd.DataFrame\n    df with factors, common returns, and specific returns as columns,\n    and datetimes as index. Assumes the `total_returns` column is NOT\n    cost adjusted.\n    - Example:\n                    momentum  reversal  common_returns  specific_returns\n        dt\n        2017-01-01  0.249087  0.935925        1.185012          1.185012\n        2017-01-02 -0.003194 -0.400786       -0.403980         -0.403980\n\ncost : pd.Series, optional\n    if present, gets subtracted from `perf_attrib_data['total_returns']`,\n    and gets plotted separately\n\nax :  matplotlib.axes.Axes\n    axes on which plots are made. if None, current axes will be used\n\nReturns\n-------\nax :  matplotlib.axes.Axes", "id": "f12219:m4"}
{"signature": "def plot_alpha_returns(alpha_returns, ax=None):", "body": "if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>ax.hist(alpha_returns, color='<STR_LIT:g>', label='<STR_LIT>')<EOL>ax.set_title('<STR_LIT>')<EOL>ax.axvline(<NUM_LIT:0>, color='<STR_LIT:k>', linestyle='<STR_LIT>', label='<STR_LIT>')<EOL>avg = alpha_returns.mean()<EOL>ax.axvline(avg, color='<STR_LIT:b>', label='<STR_LIT>'.format(avg))<EOL>configure_legend(ax)<EOL>return ax<EOL>", "docstring": "Plot histogram of daily multi-factor alpha returns (specific returns).\n\nParameters\n----------\nalpha_returns : pd.Series\n    series of daily alpha returns indexed by datetime\n\nax :  matplotlib.axes.Axes\n    axes on which plots are made. if None, current axes will be used\n\nReturns\n-------\nax :  matplotlib.axes.Axes", "id": "f12219:m5"}
{"signature": "def _stack_positions(positions, pos_in_dollars=True):", "body": "if pos_in_dollars:<EOL><INDENT>positions = get_percent_alloc(positions)<EOL><DEDENT>positions = positions.drop('<STR_LIT>', axis='<STR_LIT>')<EOL>positions = positions.stack()<EOL>positions.index = positions.index.set_names(['<STR_LIT>', '<STR_LIT>'])<EOL>return positions<EOL>", "docstring": "Convert positions to percentages if necessary, and change them\nto long format.\n\nParameters\n----------\npositions: pd.DataFrame\n    Daily holdings (in dollars or percentages), indexed by date.\n    Will be converted to percentages if positions are in dollars.\n    Short positions show up as cash in the 'cash' column.\n\npos_in_dollars : bool\n    Flag indicating whether `positions` are in dollars or percentages\n    If True, positions are in dollars.", "id": "f12219:m9"}
{"signature": "def perf_attrib(returns,<EOL>positions,<EOL>factor_returns,<EOL>factor_loadings,<EOL>transactions=None,<EOL>pos_in_dollars=True):", "body": "(returns,<EOL>positions,<EOL>factor_returns,<EOL>factor_loadings) = _align_and_warn(returns,<EOL>positions,<EOL>factor_returns,<EOL>factor_loadings,<EOL>transactions=transactions,<EOL>pos_in_dollars=pos_in_dollars)<EOL>positions = _stack_positions(positions, pos_in_dollars=pos_in_dollars)<EOL>return ep.perf_attrib(returns, positions, factor_returns, factor_loadings)<EOL>", "docstring": "Attributes the performance of a returns stream to a set of risk factors.\n\nPreprocesses inputs, and then calls empyrical.perf_attrib. See\nempyrical.perf_attrib for more info.\n\nPerformance attribution determines how much each risk factor, e.g.,\nmomentum, the technology sector, etc., contributed to total returns, as\nwell as the daily exposure to each of the risk factors. The returns that\ncan be attributed to one of the given risk factors are the\n`common_returns`, and the returns that _cannot_ be attributed to a risk\nfactor are the `specific_returns`, or the alpha. The common_returns and\nspecific_returns summed together will always equal the total returns.\n\nParameters\n----------\nreturns : pd.Series\n    Returns for each day in the date range.\n    - Example:\n        2017-01-01   -0.017098\n        2017-01-02    0.002683\n        2017-01-03   -0.008669\n\npositions: pd.DataFrame\n    Daily holdings (in dollars or percentages), indexed by date.\n    Will be converted to percentages if positions are in dollars.\n    Short positions show up as cash in the 'cash' column.\n    - Examples:\n                    AAPL  TLT  XOM  cash\n        2017-01-01    34   58   10     0\n        2017-01-02    22   77   18     0\n        2017-01-03   -15   27   30    15\n\n                        AAPL       TLT       XOM  cash\n        2017-01-01  0.333333  0.568627  0.098039   0.0\n        2017-01-02  0.188034  0.658120  0.153846   0.0\n        2017-01-03  0.208333  0.375000  0.416667   0.0\n\nfactor_returns : pd.DataFrame\n    Returns by factor, with date as index and factors as columns\n    - Example:\n                    momentum  reversal\n        2017-01-01  0.002779 -0.005453\n        2017-01-02  0.001096  0.010290\n\nfactor_loadings : pd.DataFrame\n    Factor loadings for all days in the date range, with date and ticker as\n    index, and factors as columns.\n    - Example:\n                           momentum  reversal\n        dt         ticker\n        2017-01-01 AAPL   -1.592914  0.852830\n                   TLT     0.184864  0.895534\n                   XOM     0.993160  1.149353\n        2017-01-02 AAPL   -0.140009 -0.524952\n                   TLT    -1.066978  0.185435\n                   XOM    -1.798401  0.761549\n\n\ntransactions : pd.DataFrame, optional\n    Executed trade volumes and fill prices. Used to check the turnover of\n    the algorithm. Default is None, in which case the turnover check is\n    skipped.\n\n    - One row per trade.\n    - Trades on different names that occur at the\n      same time will have identical indicies.\n    - Example:\n        index                  amount   price    symbol\n        2004-01-09 12:18:01    483      324.12   'AAPL'\n        2004-01-09 12:18:01    122      83.10    'MSFT'\n        2004-01-13 14:12:23    -75      340.43   'AAPL'\n\npos_in_dollars : bool\n    Flag indicating whether `positions` are in dollars or percentages\n    If True, positions are in dollars.\n\nReturns\n-------\ntuple of (risk_exposures_portfolio, perf_attribution)\n\nrisk_exposures_portfolio : pd.DataFrame\n    df indexed by datetime, with factors as columns\n    - Example:\n                    momentum  reversal\n        dt\n        2017-01-01 -0.238655  0.077123\n        2017-01-02  0.821872  1.520515\n\nperf_attribution : pd.DataFrame\n    df with factors, common returns, and specific returns as columns,\n    and datetimes as index\n    - Example:\n                    momentum  reversal  common_returns  specific_returns\n        dt\n        2017-01-01  0.249087  0.935925        1.185012          1.185012\n        2017-01-02 -0.003194 -0.400786       -0.403980         -0.403980", "id": "f12219:m0"}
{"signature": "def configure_legend(ax, autofmt_xdate=True, change_colors=False,<EOL>rotation=<NUM_LIT:30>, ha='<STR_LIT:right>'):", "body": "chartBox = ax.get_position()<EOL>ax.set_position([chartBox.x0, chartBox.y0,<EOL>chartBox.width * <NUM_LIT>, chartBox.height])<EOL>handles, labels = ax.get_legend_handles_labels()<EOL>handles_and_labels_sorted = sorted(zip(handles, labels),<EOL>key=lambda x: x[<NUM_LIT:0>].get_ydata()[-<NUM_LIT:1>],<EOL>reverse=True)<EOL>handles_sorted = [h[<NUM_LIT:0>] for h in handles_and_labels_sorted]<EOL>labels_sorted = [h[<NUM_LIT:1>] for h in handles_and_labels_sorted]<EOL>if change_colors:<EOL><INDENT>for handle, color in zip(handles_sorted,<EOL>cycle(COLORS)):<EOL><INDENT>handle.set_color(color)<EOL><DEDENT><DEDENT>ax.legend(handles=handles_sorted,<EOL>labels=labels_sorted,<EOL>frameon=True,<EOL>framealpha=<NUM_LIT:0.5>,<EOL>loc='<STR_LIT>',<EOL>bbox_to_anchor=(<NUM_LIT>, <NUM_LIT:1>),<EOL>fontsize='<STR_LIT>')<EOL>if autofmt_xdate:<EOL><INDENT>for label in ax.get_xticklabels():<EOL><INDENT>label.set_ha(ha)<EOL>label.set_rotation(rotation)<EOL><DEDENT><DEDENT>", "docstring": "Format legend for perf attribution plots:\n- put legend to the right of plot instead of overlapping with it\n- make legend order match up with graph lines\n- set colors according to colormap", "id": "f12220:m16"}
{"signature": "def check_intraday(estimate, returns, positions, transactions):", "body": "if estimate == '<STR_LIT>':<EOL><INDENT>if positions is not None and transactions is not None:<EOL><INDENT>if detect_intraday(positions, transactions):<EOL><INDENT>warnings.warn('<STR_LIT>' +<EOL>'<STR_LIT>' +<EOL>'<STR_LIT>')<EOL>return estimate_intraday(returns, positions, transactions)<EOL><DEDENT>else:<EOL><INDENT>return positions<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return positions<EOL><DEDENT><DEDENT>elif estimate:<EOL><INDENT>if positions is not None and transactions is not None:<EOL><INDENT>return estimate_intraday(returns, positions, transactions)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return positions<EOL><DEDENT>", "docstring": "Logic for checking if a strategy is intraday and processing it.\n\nParameters\n----------\nestimate: boolean or str, optional\n    Approximate returns for intraday strategies.\n    See description in tears.create_full_tear_sheet.\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in create_full_tear_sheet.\npositions : pd.DataFrame\n    Daily net position values.\n     - See full explanation in create_full_tear_sheet.\ntransactions : pd.DataFrame\n    Prices and amounts of executed trades. One row per trade.\n     - See full explanation in create_full_tear_sheet.\n\nReturns\n-------\npd.DataFrame\n    Daily net position values, adjusted for intraday movement.", "id": "f12220:m9"}
{"signature": "def two_dec_places(x, pos):", "body": "return '<STR_LIT>' % x<EOL>", "docstring": "Adds 1/100th decimal to plot ticks.", "id": "f12220:m1"}
{"signature": "def register_return_func(func):", "body": "SETTINGS['<STR_LIT>'] = func<EOL>", "docstring": "Registers the 'returns_func' that will be called for\nretrieving returns data.\n\nParameters\n----------\nfunc : function\n    A function that returns a pandas Series of asset returns.\n    The signature of the function must be as follows\n\n    >>> func(symbol)\n\n    Where symbol is an asset identifier\n\nReturns\n-------\nNone", "id": "f12220:m14"}
{"signature": "def to_utc(df):", "body": "try:<EOL><INDENT>df.index = df.index.tz_localize('<STR_LIT>')<EOL><DEDENT>except TypeError:<EOL><INDENT>df.index = df.index.tz_convert('<STR_LIT>')<EOL><DEDENT>return df<EOL>", "docstring": "For use in tests; applied UTC timestamp to DataFrame.", "id": "f12220:m12"}
{"signature": "def to_series(df):", "body": "return df[df.columns[<NUM_LIT:0>]]<EOL>", "docstring": "For use in tests; converts DataFrame's first column to Series.", "id": "f12220:m13"}
{"signature": "def print_table(table,<EOL>name=None,<EOL>float_format=None,<EOL>formatters=None,<EOL>header_rows=None):", "body": "if isinstance(table, pd.Series):<EOL><INDENT>table = pd.DataFrame(table)<EOL><DEDENT>if name is not None:<EOL><INDENT>table.columns.name = name<EOL><DEDENT>html = table.to_html(float_format=float_format, formatters=formatters)<EOL>if header_rows is not None:<EOL><INDENT>n_cols = html.split('<STR_LIT>')[<NUM_LIT:1>].split('<STR_LIT>')[<NUM_LIT:0>].count('<STR_LIT>')<EOL>rows = '<STR_LIT>'<EOL>for name, value in header_rows.items():<EOL><INDENT>rows += ('<STR_LIT>' +<EOL>'<STR_LIT>') % (name, n_cols, value)<EOL><DEDENT>html = html.replace('<STR_LIT>', '<STR_LIT>' + rows)<EOL><DEDENT>display(HTML(html))<EOL>", "docstring": "Pretty print a pandas DataFrame.\n\nUses HTML output if running inside Jupyter Notebook, otherwise\nformatted text output.\n\nParameters\n----------\ntable : pandas.Series or pandas.DataFrame\n    Table to pretty-print.\nname : str, optional\n    Table name to display in upper left corner.\nfloat_format : function, optional\n    Formatter to use for displaying table elements, passed as the\n    `float_format` arg to pd.Dataframe.to_html.\n    E.g. `'{0:.2%}'.format` for displaying 100 as '100.00%'.\nformatters : list or dict, optional\n    Formatters to use by column, passed as the `formatters` arg to\n    pd.Dataframe.to_html.\nheader_rows : dict, optional\n    Extra rows to display at the top of the table.", "id": "f12220:m6"}
{"signature": "def vectorize(func):", "body": "def wrapper(df, *args, **kwargs):<EOL><INDENT>if df.ndim == <NUM_LIT:1>:<EOL><INDENT>return func(df, *args, **kwargs)<EOL><DEDENT>elif df.ndim == <NUM_LIT:2>:<EOL><INDENT>return df.apply(func, *args, **kwargs)<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Decorator so that functions can be written to work on Series but\nmay still be called with DataFrames.", "id": "f12220:m4"}
{"signature": "def rolling_volatility(returns, rolling_vol_window):", "body": "return returns.rolling(rolling_vol_window).std()* np.sqrt(APPROX_BDAYS_PER_YEAR)<EOL>", "docstring": "Determines the rolling volatility of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nrolling_vol_window : int\n    Length of rolling window, in days, over which to compute.\n\nReturns\n-------\npd.Series\n    Rolling volatility.", "id": "f12221:m30"}
{"signature": "def perf_stats(returns, factor_returns=None, positions=None,<EOL>transactions=None, turnover_denom='<STR_LIT>'):", "body": "stats = pd.Series()<EOL>for stat_func in SIMPLE_STAT_FUNCS:<EOL><INDENT>stats[STAT_FUNC_NAMES[stat_func.__name__]] = stat_func(returns)<EOL><DEDENT>if positions is not None:<EOL><INDENT>stats['<STR_LIT>'] = gross_lev(positions).mean()<EOL>if transactions is not None:<EOL><INDENT>stats['<STR_LIT>'] = get_turnover(positions,<EOL>transactions,<EOL>turnover_denom).mean()<EOL><DEDENT><DEDENT>if factor_returns is not None:<EOL><INDENT>for stat_func in FACTOR_STAT_FUNCS:<EOL><INDENT>res = stat_func(returns, factor_returns)<EOL>stats[STAT_FUNC_NAMES[stat_func.__name__]] = res<EOL><DEDENT><DEDENT>return stats<EOL>", "docstring": "Calculates various performance metrics of a strategy, for use in\nplotting.show_perf_stats.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.Series, optional\n    Daily noncumulative returns of the benchmark factor to which betas are\n    computed. Usually a benchmark such as market returns.\n     - This is in the same style as returns.\n     - If None, do not compute alpha, beta, and information ratio.\npositions : pd.DataFrame\n    Daily net position values.\n     - See full explanation in tears.create_full_tear_sheet.\ntransactions : pd.DataFrame\n    Prices and amounts of executed trades. One row per trade.\n    - See full explanation in tears.create_full_tear_sheet.\nturnover_denom : str\n    Either AGB or portfolio_value, default AGB.\n    - See full explanation in txn.get_turnover.\n\nReturns\n-------\npd.Series\n    Performance metrics.", "id": "f12221:m22"}
{"signature": "def common_sense_ratio(returns):", "body": "return ep.tail_ratio(returns) *(<NUM_LIT:1> + ep.annual_return(returns))<EOL>", "docstring": "Common sense ratio is the multiplication of the tail ratio and the\nGain-to-Pain-Ratio -- sum(profits) / sum(losses).\n\nSee http://bit.ly/1ORzGBk for more information on motivation of\nthis metric.\n\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\n\nReturns\n-------\nfloat\n    common sense ratio", "id": "f12221:m14"}
{"signature": "def get_max_drawdown_underwater(underwater):", "body": "valley = np.argmin(underwater)  <EOL>peak = underwater[:valley][underwater[:valley] == <NUM_LIT:0>].index[-<NUM_LIT:1>]<EOL>try:<EOL><INDENT>recovery = underwater[valley:][underwater[valley:] == <NUM_LIT:0>].index[<NUM_LIT:0>]<EOL><DEDENT>except IndexError:<EOL><INDENT>recovery = np.nan  <EOL><DEDENT>return peak, valley, recovery<EOL>", "docstring": "Determines peak, valley, and recovery dates given an 'underwater'\nDataFrame.\n\nAn underwater DataFrame is a DataFrame that has precomputed\nrolling drawdown.\n\nParameters\n----------\nunderwater : pd.Series\n   Underwater returns (rolling drawdown) of a strategy.\n\nReturns\n-------\npeak : datetime\n    The maximum drawdown's peak.\nvalley : datetime\n    The maximum drawdown's valley.\nrecovery : datetime\n    The maximum drawdown's recovery.", "id": "f12221:m26"}
{"signature": "def rolling_regression(returns, factor_returns,<EOL>rolling_window=APPROX_BDAYS_PER_MONTH * <NUM_LIT:6>,<EOL>nan_threshold=<NUM_LIT:0.1>):", "body": "<EOL>ret_no_na = returns.dropna()<EOL>columns = ['<STR_LIT>'] + factor_returns.columns.tolist()<EOL>rolling_risk = pd.DataFrame(columns=columns,<EOL>index=ret_no_na.index)<EOL>rolling_risk.index.name = '<STR_LIT>'<EOL>for beg, end in zip(ret_no_na.index[:-rolling_window],<EOL>ret_no_na.index[rolling_window:]):<EOL><INDENT>returns_period = ret_no_na[beg:end]<EOL>factor_returns_period = factor_returns.loc[returns_period.index]<EOL>if np.all(factor_returns_period.isnull().mean()) < nan_threshold:<EOL><INDENT>factor_returns_period_dnan = factor_returns_period.dropna()<EOL>reg = linear_model.LinearRegression(fit_intercept=True).fit(<EOL>factor_returns_period_dnan,<EOL>returns_period.loc[factor_returns_period_dnan.index])<EOL>rolling_risk.loc[end, factor_returns.columns] = reg.coef_<EOL>rolling_risk.loc[end, '<STR_LIT>'] = reg.intercept_<EOL><DEDENT><DEDENT>return rolling_risk<EOL>", "docstring": "Computes rolling factor betas using a multivariate linear regression\n(separate linear regressions is problematic because the factors may be\nconfounded).\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nfactor_returns : pd.DataFrame\n    Daily noncumulative returns of the benchmark factor to which betas are\n    computed. Usually a benchmark such as market returns.\n     - Computes rolling beta for each column.\n     - This is in the same style as returns.\nrolling_window : int, optional\n    The days window over which to compute the beta. Defaults to 6 months.\nnan_threshold : float, optional\n    If there are more than this fraction of NaNs, the rolling regression\n    for the given date will be skipped.\n\nReturns\n-------\npandas.DataFrame\n    DataFrame containing rolling beta coefficients to SMB, HML and UMD", "id": "f12221:m19"}
{"signature": "def get_top_drawdowns(returns, top=<NUM_LIT:10>):", "body": "returns = returns.copy()<EOL>df_cum = ep.cum_returns(returns, <NUM_LIT:1.0>)<EOL>running_max = np.maximum.accumulate(df_cum)<EOL>underwater = df_cum / running_max - <NUM_LIT:1><EOL>drawdowns = []<EOL>for t in range(top):<EOL><INDENT>peak, valley, recovery = get_max_drawdown_underwater(underwater)<EOL>if not pd.isnull(recovery):<EOL><INDENT>underwater.drop(underwater[peak: recovery].index[<NUM_LIT:1>:-<NUM_LIT:1>],<EOL>inplace=True)<EOL><DEDENT>else:<EOL><INDENT>underwater = underwater.loc[:peak]<EOL><DEDENT>drawdowns.append((peak, valley, recovery))<EOL>if (len(returns) == <NUM_LIT:0>) or (len(underwater) == <NUM_LIT:0>):<EOL><INDENT>break<EOL><DEDENT><DEDENT>return drawdowns<EOL>", "docstring": "Finds top drawdowns, sorted by drawdown amount.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\ntop : int, optional\n    The amount of top drawdowns to find (default 10).\n\nReturns\n-------\ndrawdowns : list\n    List of drawdown peaks, valleys, and recoveries. See get_max_drawdown.", "id": "f12221:m28"}
{"signature": "def summarize_paths(samples, cone_std=(<NUM_LIT:1.>, <NUM_LIT>, <NUM_LIT>), starting_value=<NUM_LIT:1.>):", "body": "cum_samples = ep.cum_returns(samples.T,<EOL>starting_value=starting_value).T<EOL>cum_mean = cum_samples.mean(axis=<NUM_LIT:0>)<EOL>cum_std = cum_samples.std(axis=<NUM_LIT:0>)<EOL>if isinstance(cone_std, (float, int)):<EOL><INDENT>cone_std = [cone_std]<EOL><DEDENT>cone_bounds = pd.DataFrame(columns=pd.Float64Index([]))<EOL>for num_std in cone_std:<EOL><INDENT>cone_bounds.loc[:, float(num_std)] = cum_mean + cum_std * num_std<EOL>cone_bounds.loc[:, float(-num_std)] = cum_mean - cum_std * num_std<EOL><DEDENT>return cone_bounds<EOL>", "docstring": "Gnerate the upper and lower bounds of an n standard deviation\ncone of forecasted cumulative returns.\n\nParameters\n----------\nsamples : numpy.ndarray\n    Alternative paths, or series of possible outcomes.\ncone_std : list of int/float\n    Number of standard devations to use in the boundaries of\n    the cone. If multiple values are passed, cone bounds will\n    be generated for each value.\n\nReturns\n-------\nsamples : pandas.core.frame.DataFrame", "id": "f12221:m33"}
{"signature": "@deprecated(msg=DEPRECATION_WARNING)<EOL>def beta(returns, factor_returns):", "body": "return ep.beta(returns, factor_returns)<EOL>", "docstring": "Calculates beta.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n    - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nfactor_returns : pd.Series\n    Daily noncumulative returns of the benchmark factor to which betas are\n    computed. Usually a benchmark such as market returns.\n     - This is in the same style as returns.\n\nReturns\n-------\nfloat\n    Beta.", "id": "f12221:m11"}
{"signature": "def gen_drawdown_table(returns, top=<NUM_LIT:10>):", "body": "df_cum = ep.cum_returns(returns, <NUM_LIT:1.0>)<EOL>drawdown_periods = get_top_drawdowns(returns, top=top)<EOL>df_drawdowns = pd.DataFrame(index=list(range(top)),<EOL>columns=['<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'])<EOL>for i, (peak, valley, recovery) in enumerate(drawdown_periods):<EOL><INDENT>if pd.isnull(recovery):<EOL><INDENT>df_drawdowns.loc[i, '<STR_LIT>'] = np.nan<EOL><DEDENT>else:<EOL><INDENT>df_drawdowns.loc[i, '<STR_LIT>'] = len(pd.date_range(peak,<EOL>recovery,<EOL>freq='<STR_LIT:B>'))<EOL><DEDENT>df_drawdowns.loc[i, '<STR_LIT>'] = (peak.to_pydatetime()<EOL>.strftime('<STR_LIT>'))<EOL>df_drawdowns.loc[i, '<STR_LIT>'] = (valley.to_pydatetime()<EOL>.strftime('<STR_LIT>'))<EOL>if isinstance(recovery, float):<EOL><INDENT>df_drawdowns.loc[i, '<STR_LIT>'] = recovery<EOL><DEDENT>else:<EOL><INDENT>df_drawdowns.loc[i, '<STR_LIT>'] = (recovery.to_pydatetime()<EOL>.strftime('<STR_LIT>'))<EOL><DEDENT>df_drawdowns.loc[i, '<STR_LIT>'] = (<EOL>(df_cum.loc[peak] - df_cum.loc[valley]) / df_cum.loc[peak]) * <NUM_LIT:100><EOL><DEDENT>df_drawdowns['<STR_LIT>'] = pd.to_datetime(df_drawdowns['<STR_LIT>'])<EOL>df_drawdowns['<STR_LIT>'] = pd.to_datetime(df_drawdowns['<STR_LIT>'])<EOL>df_drawdowns['<STR_LIT>'] = pd.to_datetime(<EOL>df_drawdowns['<STR_LIT>'])<EOL>return df_drawdowns<EOL>", "docstring": "Places top drawdowns in a table.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\ntop : int, optional\n    The amount of top drawdowns to find (default 10).\n\nReturns\n-------\ndf_drawdowns : pd.DataFrame\n    Information about top drawdowns.", "id": "f12221:m29"}
{"signature": "@deprecated(msg=DEPRECATION_WARNING)<EOL>def sharpe_ratio(returns, risk_free=<NUM_LIT:0>, period=DAILY):", "body": "return ep.sharpe_ratio(returns, risk_free=risk_free, period=period)<EOL>", "docstring": "Determines the Sharpe ratio of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n    - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nrisk_free : int, float\n    Constant risk-free return throughout the period.\nperiod : str, optional\n    Defines the periodicity of the 'returns' data for purposes of\n    annualizing. Can be 'monthly', 'weekly', or 'daily'.\n    - Defaults to 'daily'.\n\nReturns\n-------\nfloat\n    Sharpe ratio.\nnp.nan\n    If insufficient length of returns or if if adjusted returns are 0.\n\nNote\n-----\nSee https://en.wikipedia.org/wiki/Sharpe_ratio for more details.", "id": "f12221:m8"}
{"signature": "def var_cov_var_normal(P, c, mu=<NUM_LIT:0>, sigma=<NUM_LIT:1>):", "body": "alpha = sp.stats.norm.ppf(<NUM_LIT:1> - c, mu, sigma)<EOL>return P - P * (alpha + <NUM_LIT:1>)<EOL>", "docstring": "Variance-covariance calculation of daily Value-at-Risk in a\nportfolio.\n\nParameters\n----------\nP : float\n    Portfolio value.\nc : float\n    Confidence level.\nmu : float, optional\n    Mean.\n\nReturns\n-------\nfloat\n    Variance-covariance.", "id": "f12221:m0"}
{"signature": "@deprecated(msg=DEPRECATION_WARNING)<EOL>def stability_of_timeseries(returns):", "body": "return ep.stability_of_timeseries(returns)<EOL>", "docstring": "Determines R-squared of a linear fit to the cumulative\nlog returns. Computes an ordinary least squares linear fit,\nand returns R-squared.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n    - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\n\nReturns\n-------\nfloat\n    R-squared.", "id": "f12221:m12"}
{"signature": "def normalize(returns, starting_value=<NUM_LIT:1>):", "body": "return starting_value * (returns / returns.iloc[<NUM_LIT:0>])<EOL>", "docstring": "Normalizes a returns timeseries based on the first value.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nstarting_value : float, optional\n   The starting returns (default 1).\n\nReturns\n-------\npd.Series\n    Normalized returns.", "id": "f12221:m15"}
{"signature": "@deprecated(msg=DEPRECATION_WARNING)<EOL>def sortino_ratio(returns, required_return=<NUM_LIT:0>, period=DAILY):", "body": "return ep.sortino_ratio(returns, required_return=required_return)<EOL>", "docstring": "Determines the Sortino ratio of a strategy.\n\nParameters\n----------\nreturns : pd.Series or pd.DataFrame\n    Daily returns of the strategy, noncumulative.\n    - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nrequired_return: float / series\n    minimum acceptable return\nperiod : str, optional\n    Defines the periodicity of the 'returns' data for purposes of\n    annualizing. Can be 'monthly', 'weekly', or 'daily'.\n    - Defaults to 'daily'.\n\nReturns\n-------\ndepends on input type\nseries ==> float\nDataFrame ==> np.array\n\n    Annualized Sortino ratio.", "id": "f12221:m6"}
{"signature": "def gross_lev(positions):", "body": "exposure = positions.drop('<STR_LIT>', axis=<NUM_LIT:1>).abs().sum(axis=<NUM_LIT:1>)<EOL>return exposure / positions.sum(axis=<NUM_LIT:1>)<EOL>", "docstring": "Calculates the gross leverage of a strategy.\n\nParameters\n----------\npositions : pd.DataFrame\n    Daily net position values.\n     - See full explanation in tears.create_full_tear_sheet.\n\nReturns\n-------\npd.Series\n    Gross leverage.", "id": "f12221:m20"}
{"signature": "def extract_interesting_date_ranges(returns):", "body": "returns_dupe = returns.copy()<EOL>returns_dupe.index = returns_dupe.index.map(pd.Timestamp)<EOL>ranges = OrderedDict()<EOL>for name, (start, end) in PERIODS.items():<EOL><INDENT>try:<EOL><INDENT>period = returns_dupe.loc[start:end]<EOL>if len(period) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>ranges[name] = period<EOL><DEDENT>except BaseException:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>return ranges<EOL>", "docstring": "Extracts returns based on interesting events. See\ngen_date_range_interesting.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\n\nReturns\n-------\nranges : OrderedDict\n    Date ranges, with returns, of all valid events.", "id": "f12221:m35"}
{"signature": "def rolling_sharpe(returns, rolling_sharpe_window):", "body": "return returns.rolling(rolling_sharpe_window).mean()/ returns.rolling(rolling_sharpe_window).std()* np.sqrt(APPROX_BDAYS_PER_YEAR)<EOL>", "docstring": "Determines the rolling Sharpe ratio of a strategy.\n\nParameters\n----------\nreturns : pd.Series\n    Daily returns of the strategy, noncumulative.\n     - See full explanation in tears.create_full_tear_sheet.\nrolling_sharpe_window : int\n    Length of rolling window, in days, over which to compute.\n\nReturns\n-------\npd.Series\n    Rolling Sharpe ratio.\n\nNote\n-----\nSee https://en.wikipedia.org/wiki/Sharpe_ratio for more details.", "id": "f12221:m31"}
{"signature": "@deprecated(msg=DEPRECATION_WARNING)<EOL>def downside_risk(returns, required_return=<NUM_LIT:0>, period=DAILY):", "body": "return ep.downside_risk(returns,<EOL>required_return=required_return,<EOL>period=period)<EOL>", "docstring": "Determines the downside deviation below a threshold\n\nParameters\n----------\nreturns : pd.Series or pd.DataFrame\n    Daily returns of the strategy, noncumulative.\n    - See full explanation in :func:`~pyfolio.timeseries.cum_returns`.\nrequired_return: float / series\n    minimum acceptable return\nperiod : str, optional\n    Defines the periodicity of the 'returns' data for purposes of\n    annualizing. Can be 'monthly', 'weekly', or 'daily'.\n    - Defaults to 'daily'.\n\nReturns\n-------\ndepends on input type\nseries ==> float\nDataFrame ==> np.array\n\n    Annualized downside deviation", "id": "f12221:m7"}
{"signature": "def get_txn_vol(transactions):", "body": "txn_norm = transactions.copy()<EOL>txn_norm.index = txn_norm.index.normalize()<EOL>amounts = txn_norm.amount.abs()<EOL>prices = txn_norm.price<EOL>values = amounts * prices<EOL>daily_amounts = amounts.groupby(amounts.index).sum()<EOL>daily_values = values.groupby(values.index).sum()<EOL>daily_amounts.name = \"<STR_LIT>\"<EOL>daily_values.name = \"<STR_LIT>\"<EOL>return pd.concat([daily_values, daily_amounts], axis=<NUM_LIT:1>)<EOL>", "docstring": "Extract daily transaction data from set of transaction objects.\n\nParameters\n----------\ntransactions : pd.DataFrame\n    Time series containing one row per symbol (and potentially\n    duplicate datetime indices) and columns for amount and\n    price.\n\nReturns\n-------\npd.DataFrame\n    Daily transaction volume and number of shares.\n     - See full explanation in tears.create_full_tear_sheet.", "id": "f12223:m2"}
{"signature": "def make_transaction_frame(transactions):", "body": "transaction_list = []<EOL>for dt in transactions.index:<EOL><INDENT>txns = transactions.loc[dt]<EOL>if len(txns) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>for txn in txns:<EOL><INDENT>txn = map_transaction(txn)<EOL>transaction_list.append(txn)<EOL><DEDENT><DEDENT>df = pd.DataFrame(sorted(transaction_list, key=lambda x: x['<STR_LIT>']))<EOL>df['<STR_LIT>'] = -df['<STR_LIT>'] * df['<STR_LIT>']<EOL>df.index = list(map(pd.Timestamp, df.dt.values))<EOL>return df<EOL>", "docstring": "Formats a transaction DataFrame.\n\nParameters\n----------\ntransactions : pd.DataFrame\n    Contains improperly formatted transactional data.\n\nReturns\n-------\ndf : pd.DataFrame\n    Daily transaction volume and dollar ammount.\n     - See full explanation in tears.create_full_tear_sheet.", "id": "f12223:m1"}
{"signature": "def get_users(session, query):", "body": "<EOL>response = make_get_request(session, '<STR_LIT>', params_data=query)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise UsersNotFoundException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Get one or more users", "id": "f12288:m6"}
{"signature": "def add_user_jobs(session, job_ids):", "body": "jobs_data = {<EOL>'<STR_LIT>': job_ids<EOL>}<EOL>response = make_post_request(session, '<STR_LIT>', json_data=jobs_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:status>']<EOL><DEDENT>else:<EOL><INDENT>raise UserJobsNotAddedException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Add a list of jobs to the currently authenticated user", "id": "f12288:m3"}
{"signature": "def get_self(session, user_details=None):", "body": "<EOL>if user_details:<EOL><INDENT>user_details['<STR_LIT>'] = True<EOL><DEDENT>response = make_get_request(session, '<STR_LIT>', params_data=user_details)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise SelfNotRetrievedException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>']<EOL>)<EOL><DEDENT>", "docstring": "Get details about the currently authenticated user", "id": "f12288:m0"}
{"signature": "def get_self_user_id(session):", "body": "response = make_get_request(session, '<STR_LIT>')<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return response.json()['<STR_LIT:result>']['<STR_LIT:id>']<EOL><DEDENT>else:<EOL><INDENT>raise UserIdNotRetrievedException(<EOL>'<STR_LIT>' % response.text, response.text)<EOL><DEDENT>", "docstring": "Get the currently authenticated user ID", "id": "f12288:m2"}
{"signature": "def get_user_by_id(session, user_id, user_details=None):", "body": "if user_details:<EOL><INDENT>user_details['<STR_LIT>'] = True<EOL><DEDENT>response = make_get_request(<EOL>session, '<STR_LIT>'.format(user_id), params_data=user_details)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise UserNotFoundException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>']<EOL>)<EOL><DEDENT>", "docstring": "Get details about specific user", "id": "f12288:m1"}
{"signature": "def create_project(session, title, description,<EOL>currency, budget, jobs):", "body": "project_data = {'<STR_LIT:title>': title,<EOL>'<STR_LIT:description>': description,<EOL>'<STR_LIT>': currency,<EOL>'<STR_LIT>': budget,<EOL>'<STR_LIT>': jobs<EOL>}<EOL>response = make_post_request(session, '<STR_LIT>', json_data=project_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>project_data = json_data['<STR_LIT:result>']<EOL>p = Project(project_data)<EOL>p.url = urljoin(session.url, '<STR_LIT>' % p.seo_url)<EOL>return p<EOL><DEDENT>else:<EOL><INDENT>raise ProjectNotCreatedException(message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'],<EOL>)<EOL><DEDENT>", "docstring": "Create a project", "id": "f12290:m0"}
{"signature": "def get_jobs(session, job_ids, seo_details, lang):", "body": "get_jobs_data = {<EOL>'<STR_LIT>': job_ids,<EOL>'<STR_LIT>': seo_details,<EOL>'<STR_LIT>': lang,<EOL>}<EOL>response = make_get_request(session, '<STR_LIT>', params_data=get_jobs_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise JobsNotFoundException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Get a list of jobs", "id": "f12290:m28"}
{"signature": "def create_hourly_project(session, title, description,<EOL>currency, budget, jobs, hourly_project_info):", "body": "project_data = {'<STR_LIT:title>': title,<EOL>'<STR_LIT:description>': description,<EOL>'<STR_LIT>': currency,<EOL>'<STR_LIT>': budget,<EOL>'<STR_LIT>': jobs,<EOL>'<STR_LIT:type>': '<STR_LIT>',<EOL>'<STR_LIT>': hourly_project_info<EOL>}<EOL>response = make_post_request(session, '<STR_LIT>', json_data=project_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>project_data = json_data['<STR_LIT:result>']<EOL>p = Project(project_data)<EOL>p.url = urljoin(session.url, '<STR_LIT>' % p.seo_url)<EOL>return p<EOL><DEDENT>else:<EOL><INDENT>raise ProjectNotCreatedException(message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'],<EOL>)<EOL><DEDENT>", "docstring": "Create a fixed project", "id": "f12290:m1"}
{"signature": "def post_track(session, user_id, project_id, latitude, longitude):", "body": "tracking_data = {<EOL>'<STR_LIT>': user_id,<EOL>'<STR_LIT>': project_id,<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': latitude,<EOL>'<STR_LIT>': longitude<EOL>}<EOL>}<EOL>response = make_post_request(session, '<STR_LIT>',<EOL>json_data=tracking_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise TrackNotCreatedException(message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Start tracking a project by creating a track", "id": "f12290:m17"}
{"signature": "def get_milestones(session, project_ids=[], milestone_ids=[], user_details=None, limit=<NUM_LIT:10>, offset=<NUM_LIT:0>):", "body": "get_milestones_data = {}<EOL>if milestone_ids:<EOL><INDENT>get_milestones_data['<STR_LIT>'] = milestone_ids<EOL><DEDENT>if project_ids:<EOL><INDENT>get_milestones_data['<STR_LIT>'] = project_ids<EOL><DEDENT>get_milestones_data['<STR_LIT>'] = limit<EOL>get_milestones_data['<STR_LIT>'] = offset<EOL>if user_details:<EOL><INDENT>get_milestones_data.update(user_details)<EOL><DEDENT>response = make_get_request(<EOL>session, '<STR_LIT>', params_data=get_milestones_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise MilestonesNotFoundException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>']<EOL>)<EOL><DEDENT>", "docstring": "Get the list of milestones", "id": "f12290:m9"}
{"signature": "def place_project_bid(session, project_id, bidder_id, description, amount,<EOL>period, milestone_percentage):", "body": "bid_data = {<EOL>'<STR_LIT>': project_id,<EOL>'<STR_LIT>': bidder_id,<EOL>'<STR_LIT:description>': description,<EOL>'<STR_LIT>': amount,<EOL>'<STR_LIT>': period,<EOL>'<STR_LIT>': milestone_percentage,<EOL>}<EOL>response = make_post_request(session, '<STR_LIT>', json_data=bid_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>bid_data = json_data['<STR_LIT:result>']<EOL>return Bid(bid_data)<EOL><DEDENT>else:<EOL><INDENT>raise BidNotPlacedException(message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Place a bid on a project", "id": "f12290:m7"}
{"signature": "def create_local_project(session, title, description,<EOL>currency, budget, jobs, location):", "body": "project_data = {'<STR_LIT:title>': title,<EOL>'<STR_LIT:description>': description,<EOL>'<STR_LIT>': currency,<EOL>'<STR_LIT>': budget,<EOL>'<STR_LIT>': jobs,<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT:location>': location<EOL>}<EOL>response = make_post_request(session, '<STR_LIT>', json_data=project_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>project_data = json_data['<STR_LIT:result>']<EOL>p = Project(project_data)<EOL>p.url = urljoin(session.url, '<STR_LIT>' % p.seo_url)<EOL>return p<EOL><DEDENT>else:<EOL><INDENT>raise ProjectNotCreatedException(message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'],<EOL>)<EOL><DEDENT>", "docstring": "Create a fixed project", "id": "f12290:m2"}
{"signature": "def award_project_bid(session, bid_id):", "body": "headers = {<EOL>'<STR_LIT:Content-Type>': '<STR_LIT>'<EOL>}<EOL>bid_data = {<EOL>'<STR_LIT:action>': '<STR_LIT>'<EOL>}<EOL>endpoint = '<STR_LIT>'.format(bid_id)<EOL>response = make_put_request(session, endpoint, headers=headers,<EOL>params_data=bid_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:status>']<EOL><DEDENT>else:<EOL><INDENT>json_data = response.json()<EOL>raise BidNotAwardedException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>']<EOL>)<EOL><DEDENT>", "docstring": "Award a bid on a project", "id": "f12290:m11"}
{"signature": "def accept_milestone_request(session, milestone_request_id):", "body": "params_data = {<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>}<EOL>endpoint = '<STR_LIT>'.format(milestone_request_id)<EOL>response = make_put_request(session, endpoint, params_data=params_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:status>']<EOL><DEDENT>else:<EOL><INDENT>raise MilestoneRequestNotAcceptedException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Accept a milestone request", "id": "f12290:m24"}
{"signature": "def get_milestone_by_id(session, milestone_id, user_details=None):", "body": "<EOL>endpoint = '<STR_LIT>'.format(milestone_id)<EOL>response = make_get_request(session, endpoint, params_data=user_details)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise MilestonesNotFoundException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>']<EOL>)<EOL><DEDENT>", "docstring": "Get a specific milestone", "id": "f12290:m10"}
{"signature": "def get_track_by_id(session, track_id, track_point_limit=None, track_point_offset=None):", "body": "tracking_data = {}<EOL>if track_point_limit:<EOL><INDENT>tracking_data['<STR_LIT>'] = track_point_limit<EOL><DEDENT>if track_point_offset:<EOL><INDENT>tracking_data['<STR_LIT>'] = track_point_offset<EOL><DEDENT>response = make_get_request(session, '<STR_LIT>'.format(track_id),<EOL>params_data=tracking_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise TrackNotFoundException(message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Gets a specific track", "id": "f12290:m19"}
{"signature": "def reject_milestone_request(session, milestone_request_id):", "body": "params_data = {<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>}<EOL>endpoint = '<STR_LIT>'.format(milestone_request_id)<EOL>response = make_put_request(session, endpoint, params_data=params_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:status>']<EOL><DEDENT>else:<EOL><INDENT>raise MilestoneRequestNotRejectedException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Reject a milestone request", "id": "f12290:m25"}
{"signature": "def create_thread(session, member_ids, context_type, context, message):", "body": "headers = {<EOL>'<STR_LIT:Content-Type>': '<STR_LIT>'<EOL>}<EOL>thread_data = {<EOL>'<STR_LIT>': member_ids,<EOL>'<STR_LIT>': context_type,<EOL>'<STR_LIT>': context,<EOL>'<STR_LIT:message>': message,<EOL>}<EOL>response = make_post_request(session, '<STR_LIT>', headers,<EOL>form_data=thread_data)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return Thread(json_data['<STR_LIT:result>'])<EOL><DEDENT>else:<EOL><INDENT>raise ThreadNotCreatedException(message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Create a thread", "id": "f12295:m0"}
{"signature": "def get_messages(session, query, limit=<NUM_LIT:10>, offset=<NUM_LIT:0>):", "body": "query['<STR_LIT>'] = limit<EOL>query['<STR_LIT>'] = offset<EOL>response = make_get_request(session, '<STR_LIT>', params_data=query)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise MessagesNotFoundException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>']<EOL>)<EOL><DEDENT>", "docstring": "Get one or more messages", "id": "f12295:m4"}
{"signature": "def search_messages(session, thread_id, query, limit=<NUM_LIT:20>,<EOL>offset=<NUM_LIT:0>, message_context_details=None,<EOL>window_above=None, window_below=None):", "body": "query = {<EOL>'<STR_LIT>': thread_id,<EOL>'<STR_LIT>': query,<EOL>'<STR_LIT>': limit,<EOL>'<STR_LIT>': offset<EOL>}<EOL>if message_context_details:<EOL><INDENT>query['<STR_LIT>'] = message_context_details<EOL><DEDENT>if window_above:<EOL><INDENT>query['<STR_LIT>'] = window_above<EOL><DEDENT>if window_below:<EOL><INDENT>query['<STR_LIT>'] = window_below<EOL><DEDENT>response = make_get_request(session, '<STR_LIT>', params_data=query)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise MessagesNotFoundException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>']<EOL>)<EOL><DEDENT>", "docstring": "Search for messages", "id": "f12295:m5"}
{"signature": "def get_threads(session, query):", "body": "<EOL>response = make_get_request(session, '<STR_LIT>', params_data=query)<EOL>json_data = response.json()<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>return json_data['<STR_LIT:result>']<EOL><DEDENT>else:<EOL><INDENT>raise ThreadsNotFoundException(<EOL>message=json_data['<STR_LIT:message>'],<EOL>error_code=json_data['<STR_LIT>'],<EOL>request_id=json_data['<STR_LIT>']<EOL>)<EOL><DEDENT>", "docstring": "Get one or more threads", "id": "f12295:m6"}
{"signature": "def description(description):", "body": "def wrapper(func):<EOL><INDENT>@wraps(func)<EOL>def wrapped(self, *args, **kwargs):<EOL><INDENT>return func(self, *args, **kwargs)<EOL><DEDENT>wrapped.description = description<EOL>return wrapped<EOL><DEDENT>return wrapper<EOL>", "docstring": "Set description to test_method", "id": "f12310:m0"}
{"signature": "def reset_cookie(self):", "body": "self.cookie = CookieDict()<EOL>", "docstring": "Reset cookie data.", "id": "f12310:c0:m1"}
{"signature": "def _connect(self, url, params, ok_codes, rtype, description,<EOL>redirect=False, consumer=None):", "body": "<EOL>if self.cookie:<EOL><INDENT>self.setHeader('<STR_LIT>', self.cookie.render_to_string())<EOL><DEDENT>response = super(TestCase, self)._connect(<EOL>url, params, ok_codes, rtype, description,<EOL>redirect=redirect, consumer=consumer)<EOL>for key, value in response.headers.items():<EOL><INDENT>if key.lower() == '<STR_LIT>':<EOL><INDENT>self.cookie.from_cookie_string(value)<EOL><DEDENT><DEDENT>content_type_header = response.headers.get('<STR_LIT>')<EOL>if content_type_header:<EOL><INDENT>if '<STR_LIT:;>' in content_type_header:<EOL><INDENT>content_type, charset_pair = content_type_header.split('<STR_LIT:;>', <NUM_LIT:1>)<EOL>if '<STR_LIT:=>' in charset_pair:<EOL><INDENT>_key, charset = charset_pair.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>charset = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>content_type = content_type_header<EOL>charset = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>content_type = charset = None<EOL><DEDENT>response.content_type_header = content_type_header<EOL>if content_type:<EOL><INDENT>response.content_type = content_type.strip()<EOL><DEDENT>else:<EOL><INDENT>response.content_type = None<EOL><DEDENT>response.charset = charset<EOL>if response.content_type == '<STR_LIT:application/json>':<EOL><INDENT>response.data = json.loads(response.body)<EOL><DEDENT>else:<EOL><INDENT>response.data = None<EOL><DEDENT>return response<EOL>", "docstring": "Override FunkLoadTestCase._connect", "id": "f12310:c0:m2"}
{"signature": "def clear_session_id(self):", "body": "return self.cookie.get(self.session_id_key)<EOL>", "docstring": "Clear session ID", "id": "f12310:c0:m4"}
{"signature": "def render_to_string(self):", "body": "values = '<STR_LIT>'<EOL>for key, value in self.items():<EOL><INDENT>values += '<STR_LIT>'.format(key, value)<EOL><DEDENT>return values<EOL>", "docstring": "Render to cookie strings.", "id": "f12312:c0:m1"}
{"signature": "def __len__(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Many DynamicArgs won't have a length that can be\nprecomputed. Most DynamicArgs objects will have an iteration\nlimit to guarantee eventual termination. If so, the maximum\npossible number of arguments that could be generated should be\nreturned.", "id": "f12318:c0:m9"}
{"signature": "def update(self, tids, info):", "body": "outputs_dir = os.path.join(info['<STR_LIT>'], '<STR_LIT>')<EOL>pattern = '<STR_LIT>' % info['<STR_LIT>']<EOL>flist = os.listdir(outputs_dir)<EOL>try:<EOL><INDENT>outputs = []<EOL>for tid in tids:<EOL><INDENT>matches = fnmatch.filter(flist, pattern.format(tid=tid))<EOL>if len(matches) != <NUM_LIT:1>:<EOL><INDENT>self.warning(\"<STR_LIT>\" % tid)<EOL><DEDENT>contents = open(os.path.join(outputs_dir, matches[<NUM_LIT:0>]),'<STR_LIT:r>').read()<EOL>outputs.append(self.output_extractor(contents))<EOL><DEDENT>self._next_val = self._update_state(outputs)<EOL>self.trace.append((outputs, self._next_val))<EOL><DEDENT>except:<EOL><INDENT>self.warning(\"<STR_LIT>\")<EOL>self._next_val = StopIteration<EOL><DEDENT>", "docstring": "Called to update the state of the iterator.  This methods\nreceives the set of task ids from the previous set of tasks\ntogether with the launch information to allow the output\nvalues to be parsed using the output_extractor. This data is then\nused to determine the next desired point in the parameter\nspace by calling the _update_state method.", "id": "f12318:c0:m4"}
{"signature": "def _trace_summary(self):", "body": "for (i, (val, args)) in enumerate(self.trace):<EOL><INDENT>if args is StopIteration:<EOL><INDENT>info = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>pprint = '<STR_LIT:U+002C>'.join('<STR_LIT:{>' + '<STR_LIT:U+002C>'.join('<STR_LIT>' % (k,v)<EOL>for (k,v) in arg.items()) + '<STR_LIT:}>' for arg in args)<EOL>info = (\"<STR_LIT>\" % pprint )<EOL><DEDENT>if i == <NUM_LIT:0>: print(\"<STR_LIT>\" % (i, info))<EOL>else:      print(\"<STR_LIT>\" % (i, info.capitalize(), val))<EOL><DEDENT>", "docstring": "Summarizes the trace of values used to update the DynamicArgs\nand the arguments subsequently returned. May be used to\nimplement the summary method.", "id": "f12318:c0:m6"}
{"signature": "def __add__(self, other):", "body": "if not other: return self<EOL>dynamic = (isinstance(self, DynamicArgs),  isinstance(other, DynamicArgs))<EOL>if dynamic == (True, True):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>elif (True in dynamic):<EOL><INDENT>return DynamicConcatenate(self,other)<EOL><DEDENT>else:<EOL><INDENT>return Concatenate(self,other)<EOL><DEDENT>", "docstring": "Concatenates two argument specifiers. See Concatenate and\nDynamicConcatenate documentation respectively.", "id": "f12318:c0:m7"}
{"signature": "def cross_check_launchers(self, launchers):", "body": "if len(launchers) == <NUM_LIT:0>: raise Exception('<STR_LIT>')<EOL>timestamps = [launcher.timestamp for launcher in launchers]<EOL>if not all(timestamps[<NUM_LIT:0>] == tstamp for tstamp in timestamps):<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>root_directories = []<EOL>for launcher in launchers:<EOL><INDENT>command = launcher.command<EOL>args = launcher.args<EOL>command.verify(args)<EOL>root_directory = launcher.get_root_directory()<EOL>if os.path.isdir(root_directory):<EOL><INDENT>raise Exception(\"<STR_LIT>\" % root_directory)<EOL><DEDENT>if root_directory in root_directories:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>root_directories.append(root_directory)<EOL><DEDENT>", "docstring": "Performs consistency checks across all the launchers.", "id": "f12319:c6:m1"}
{"signature": "def _append_log(self, specs):", "body": "self._spec_log += specs <EOL>log_path = os.path.join(self.root_directory, (\"<STR_LIT>\" % self.batch_name))<EOL>core.Log.write_log(log_path, [spec for (_, spec) in specs], allow_append=True)<EOL>", "docstring": "The log contains the tids and corresponding specifications\nused during launch with the specifications in JSON format.", "id": "f12319:c3:m2"}
{"signature": "def _launch_process_group(self, process_commands, streams_path):", "body": "processes = {}<EOL>def check_complete_processes(wait=False):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>result = False<EOL>for proc in list(processes):<EOL><INDENT>if wait: proc.wait()<EOL>if proc.poll() is not None:<EOL><INDENT>self.debug(\"<STR_LIT>\"<EOL>% (processes[proc]['<STR_LIT>'], proc.poll()))<EOL>processes[proc]['<STR_LIT>'].close()<EOL>processes[proc]['<STR_LIT>'].close()<EOL>del processes[proc]<EOL>result = True<EOL><DEDENT><DEDENT>return result<EOL><DEDENT>for cmd, tid in process_commands:<EOL><INDENT>self.debug(\"<STR_LIT>\" % tid)<EOL>job_timestamp = time.strftime('<STR_LIT>')<EOL>basename = \"<STR_LIT>\" % (self.batch_name, job_timestamp, tid)<EOL>stdout_handle = open(os.path.join(streams_path, \"<STR_LIT>\"<EOL>% (basename, tid)), \"<STR_LIT:wb>\")<EOL>stderr_handle = open(os.path.join(streams_path, \"<STR_LIT>\"<EOL>% (basename, tid)), \"<STR_LIT:wb>\")<EOL>proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle)<EOL>processes[proc] = { '<STR_LIT>' : tid,<EOL>'<STR_LIT>' : stdout_handle,<EOL>'<STR_LIT>' : stderr_handle }<EOL>if self.max_concurrency:<EOL><INDENT>while len(processes) >= self.max_concurrency:<EOL><INDENT>if not check_complete_processes(len(processes)==<NUM_LIT:1>):<EOL><INDENT>time.sleep(<NUM_LIT:0.1>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>while len(processes) > <NUM_LIT:0>:<EOL><INDENT>if not check_complete_processes(True):<EOL><INDENT>time.sleep(<NUM_LIT:0.1>)<EOL><DEDENT><DEDENT>", "docstring": "Launches processes defined by process_commands, but only\nexecutes max_concurrency processes at a time; if a process\ncompletes and there are still outstanding processes to be\nexecuted, the next processes are run until max_concurrency is\nreached again.", "id": "f12319:c3:m6"}
{"signature": "def get_root_directory(self, timestamp=None):", "body": "if timestamp is None: timestamp = self.timestamp<EOL>if self.timestamp_format is not None:<EOL><INDENT>root_name =  (time.strftime(self.timestamp_format, timestamp)<EOL>+ '<STR_LIT:->' + self.batch_name)<EOL><DEDENT>else:<EOL><INDENT>root_name = self.batch_name<EOL><DEDENT>path = os.path.join(self.output_directory,<EOL>*(self.subdir+[root_name]))<EOL>return os.path.abspath(path)<EOL>", "docstring": "A helper method that supplies the root directory name given a\ntimestamp.", "id": "f12319:c3:m1"}
{"signature": "def _qsub_block(self, output_dir, error_dir, tid_specs):", "body": "processes = []<EOL>job_names = []<EOL>for (tid, spec) in tid_specs:<EOL><INDENT>job_name = \"<STR_LIT>\" % (self.batch_name, self.job_timestamp, tid)<EOL>job_names.append(job_name)<EOL>cmd_args = self.command(<EOL>self.command._formatter(spec),<EOL>tid, self._launchinfo)<EOL>popen_args = self._qsub_args([(\"<STR_LIT>\",error_dir), ('<STR_LIT>',job_name), (\"<STR_LIT>\",output_dir)],<EOL>cmd_args)<EOL>p = subprocess.Popen(popen_args, stdout=subprocess.PIPE)<EOL>(stdout, stderr) = p.communicate()<EOL>self.debug(stdout)<EOL>if p.poll() != <NUM_LIT:0>:<EOL><INDENT>raise EnvironmentError(\"<STR_LIT>\" % p.poll())<EOL><DEDENT>processes.append(p)<EOL><DEDENT>self.message(\"<STR_LIT>\" % len(processes))<EOL>if (self.reduction_fn is not None) or self.dynamic:<EOL><INDENT>self._qsub_collate_and_launch(output_dir, error_dir, job_names)<EOL><DEDENT>", "docstring": "This method handles static argument specifiers and cases where\nthe dynamic specifiers cannot be queued before the arguments\nare known.", "id": "f12319:c4:m5"}
{"signature": "def _qsub_collate_and_launch(self, output_dir, error_dir, job_names):", "body": "job_name = \"<STR_LIT>\" % (self.batch_name,<EOL>self.job_timestamp,<EOL>self.collate_count)<EOL>overrides = [(\"<STR_LIT>\",error_dir), ('<STR_LIT>',job_name), (\"<STR_LIT>\",output_dir),<EOL>('<STR_LIT>','<STR_LIT:U+002C>'.join(job_names))]<EOL>resume_cmds =[\"<STR_LIT>\",<EOL>(\"<STR_LIT>\"<EOL>% self.root_directory),<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\"]<EOL>cmd_args = [self.command.executable,<EOL>'<STR_LIT:-c>', '<STR_LIT:;>'.join(resume_cmds)]<EOL>popen_args = self._qsub_args(overrides, cmd_args)<EOL>p = subprocess.Popen(popen_args, stdout=subprocess.PIPE)<EOL>(stdout, stderr) = p.communicate()<EOL>self.debug(stdout)<EOL>if p.poll() != <NUM_LIT:0>:<EOL><INDENT>raise EnvironmentError(\"<STR_LIT>\" % p.poll())<EOL><DEDENT>self.collate_count += <NUM_LIT:1><EOL>self.message(\"<STR_LIT>\")<EOL>return job_name<EOL>", "docstring": "The method that actually runs qsub to invoke the python\nprocess with the necessary commands to trigger the next\ncollation step and next block of jobs.", "id": "f12319:c4:m4"}
{"signature": "def __call__(self, spec, tid=None, info={}):", "body": "raise NotImplementedError<EOL>", "docstring": "Formats a single argument specification supplied as a\ndictionary of argument name/value pairs. The info dictionary\ncontains launch information as defined in the _setup_launch\nmethod of Launcher.", "id": "f12319:c0:m1"}
{"signature": "def summary(self):", "body": "raise NotImplementedError<EOL>", "docstring": "A succinct summary of the Command configuration.  Unlike the\nrepr, a summary does not have to be complete but must supply\nkey information relevant to the user. Must begin by stating\nthe executable.", "id": "f12319:c0:m6"}
{"signature": "def _review_all(self, launchers):", "body": "<EOL>if self.launch_args is not None:<EOL><INDENT>proceed = self.review_args(self.launch_args,<EOL>show_repr=True,<EOL>heading='<STR_LIT>')<EOL>if not proceed: return False<EOL><DEDENT>reviewers = [self.review_args,<EOL>self.review_command,<EOL>self.review_launcher]<EOL>for (count, launcher) in enumerate(launchers):<EOL><INDENT>if not all(reviewer(launcher) for reviewer in reviewers):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>if len(launchers)!= <NUM_LIT:1> and count < len(launchers)-<NUM_LIT:1>:<EOL><INDENT>skip_remaining = self.input_options(['<STR_LIT:Y>', '<STR_LIT:n>','<STR_LIT>'],<EOL>'<STR_LIT>', default='<STR_LIT:y>')<EOL>if skip_remaining == '<STR_LIT:y>':          break<EOL>elif skip_remaining == '<STR_LIT>':     return False<EOL><DEDENT><DEDENT>if self.input_options(['<STR_LIT:y>','<STR_LIT:N>'], '<STR_LIT>', default='<STR_LIT:n>') != '<STR_LIT:y>':<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return self._launch_all(launchers)<EOL><DEDENT>", "docstring": "Runs the review process for all the launchers.", "id": "f12319:c6:m4"}
{"signature": "def __call__(self):", "body": "launchinfo = self._setup_launch()<EOL>streams_path = self._setup_streams_path()<EOL>self.command.finalize(launchinfo)<EOL>self._record_info(launchinfo)<EOL>last_tid = <NUM_LIT:0><EOL>last_tids = []<EOL>for gid, groupspecs in enumerate(self.args):<EOL><INDENT>tids = list(range(last_tid, last_tid+len(groupspecs)))<EOL>last_tid += len(groupspecs)<EOL>allcommands = [self.command(<EOL>self.command._formatter(spec), tid, launchinfo)for (spec,tid) in zip(groupspecs,tids)]<EOL>self._append_log(list(zip(tids,groupspecs)))<EOL>self.message(\"<STR_LIT>\" % (gid, len(allcommands)))<EOL>self._launch_process_group(zip(allcommands,tids), streams_path)<EOL>last_tids = tids[:]<EOL>if self.dynamic:<EOL><INDENT>self.args.update(last_tids, launchinfo)<EOL><DEDENT><DEDENT>self._record_info()<EOL>if self.reduction_fn is not None:<EOL><INDENT>self.reduction_fn(self._spec_log, self.root_directory)<EOL><DEDENT>", "docstring": "Call to start Launcher execution. Typically invoked by\nreview_and_launch but may be called directly by the user.", "id": "f12319:c3:m7"}
{"signature": "def review_args(self, obj, show_repr=False, heading='<STR_LIT>'):", "body": "args = obj.args if isinstance(obj, Launcher) else obj<EOL>print('<STR_LIT>' % self.summary_heading(heading))<EOL>args.summary()<EOL>if show_repr: print(\"<STR_LIT>\" % args)<EOL>response = self.input_options(['<STR_LIT:y>', '<STR_LIT:N>','<STR_LIT>'],<EOL>'<STR_LIT>', default='<STR_LIT:n>')<EOL>if response == '<STR_LIT>': return False<EOL>if response == '<STR_LIT:y>':  args.show()<EOL>print('<STR_LIT>')<EOL>return True<EOL>", "docstring": "Reviews the given argument specification. Can review the\nmeta-arguments (launch_args) or the arguments themselves.", "id": "f12319:c6:m6"}
{"signature": "def _launch_all(self, launchers):", "body": "for launcher in launchers:<EOL><INDENT>print(\"<STR_LIT>\" % launcher.batch_name)<EOL>launcher()<EOL><DEDENT>return True<EOL>", "docstring": "Launches all available launchers.", "id": "f12319:c6:m3"}
{"signature": "def qdel_batch(self):", "body": "p = subprocess.Popen(['<STR_LIT>', '<STR_LIT>' % (self.batch_name,<EOL>self.job_timestamp)],<EOL>stdout=subprocess.PIPE)<EOL>(stdout, stderr) = p.communicate()<EOL>return p.poll()<EOL>", "docstring": "Runs qdel command to remove all remaining queued jobs using\nthe <batch_name>* pattern . Necessary when StopIteration is\nraised with scheduled jobs left on the queue.\nReturns exit-code of qdel.", "id": "f12319:c4:m6"}
{"signature": "def __call__(self):", "body": "self._launchinfo = self._setup_launch()<EOL>self.command.finalize(self._launchinfo)<EOL>self.job_timestamp = time.strftime('<STR_LIT>')<EOL>streams_path = self._setup_streams_path()<EOL>self.qsub_flag_options['<STR_LIT>'] = streams_path<EOL>self.qsub_flag_options['<STR_LIT>'] = streams_path<EOL>self.collate_and_launch()<EOL>self._record_info(self._launchinfo)<EOL>", "docstring": "Main entry point for the launcher. Collects the static\ninformation about the launch and sets up the stdout and stderr\nstream output directories. Generates the first call to\ncollate_and_launch().", "id": "f12319:c4:m2"}
{"signature": "def input_options(self, options, prompt='<STR_LIT>', default=None):", "body": "check_options = [x.lower() for x in options]<EOL>while True:<EOL><INDENT>response = input('<STR_LIT>' % (prompt, '<STR_LIT:U+002CU+0020>'.join(options))).lower()<EOL>if response in check_options: return response.strip()<EOL>elif response == '<STR_LIT>' and default is not None:<EOL><INDENT>return default.lower().strip()<EOL><DEDENT><DEDENT>", "docstring": "Helper to prompt the user for input on the commandline.", "id": "f12319:c6:m9"}
{"signature": "def _launch_process_group(self, process_commands, streams_path):", "body": "processes = []<EOL>for cmd, tid in process_commands:<EOL><INDENT>job_timestamp = time.strftime('<STR_LIT>')<EOL>basename = \"<STR_LIT>\" % (self.batch_name, job_timestamp, tid)<EOL>stdout_path = os.path.join(streams_path, \"<STR_LIT>\" % (basename, tid))<EOL>stderr_path = os.path.join(streams_path, \"<STR_LIT>\" % (basename, tid))<EOL>process = { '<STR_LIT>' : tid,<EOL>'<STR_LIT>' : cmd,<EOL>'<STR_LIT>' : stdout_path,<EOL>'<STR_LIT>' : stderr_path }<EOL>processes.append(process)<EOL><DEDENT>json_path = os.path.join(self.root_directory, self.json_name % (tid))<EOL>with open(json_path, '<STR_LIT:w>') as json_file:<EOL><INDENT>json.dump(processes, json_file, sort_keys=True, indent=<NUM_LIT:4>)<EOL><DEDENT>p = subprocess.Popen([self.script_path, json_path, self.batch_name,<EOL>str(len(processes)), str(self.max_concurrency)])<EOL>if p.wait() != <NUM_LIT:0>:<EOL><INDENT>raise EnvironmentError(\"<STR_LIT>\" % p.poll())<EOL><DEDENT>", "docstring": "Aggregates all process_commands and the designated output files into a\nlist, and outputs it as JSON, after which the wrapper script is called.", "id": "f12319:c5:m1"}
{"signature": "def verify(self, args):", "body": "return<EOL>", "docstring": "Optional, final check that ensures valid arguments have been\npassed before launch. Allows the constant and varying_keys to\nbe be checked and can inspect the specs attribute if an\ninstance of Args. If invalid, raise an Exception with the\nappropriate error message, otherwise return None.", "id": "f12319:c0:m4"}
{"signature": "def _savepath(self, filename):", "body": "(basename, ext) = os.path.splitext(filename)<EOL>basename = basename if (ext in self.extensions) else filename<EOL>ext = ext if (ext in self.extensions) else self.extensions[<NUM_LIT:0>]<EOL>savepath = os.path.abspath(os.path.join(self.directory,<EOL>'<STR_LIT>' % (basename, ext)))<EOL>return (tempfile.mkstemp(ext, basename + \"<STR_LIT:_>\", self.directory)[<NUM_LIT:1>]<EOL>if self.hash_suffix else savepath)<EOL>", "docstring": "Returns the full path for saving the file, adding an extension\nand making the filename unique as necessary.", "id": "f12320:c0:m5"}
{"signature": "def save(self, filename,  metadata={}, **data):", "body": "intersection = set(metadata.keys()) & set(data.keys())<EOL>if intersection:<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise Exception(msg  % '<STR_LIT:U+002C>'.join(intersection))<EOL><DEDENT>", "docstring": "The implementation in the base class simply checks there is no\nclash between the metadata and data keys.", "id": "f12320:c0:m1"}
{"signature": "def save(self, filename, imdata, **data):", "body": "if isinstance(imdata, numpy.ndarray):<EOL><INDENT>imdata = Image.fromarray(numpy.uint8(imdata))<EOL><DEDENT>elif isinstance(imdata, Image.Image):<EOL><INDENT>imdata.save(self._savepath(filename))<EOL><DEDENT>", "docstring": "Data may be either a PIL Image object or a Numpy array.", "id": "f12320:c6:m2"}
{"signature": "def data(self, filename):", "body": "raise NotImplementedError<EOL>", "docstring": "Data returned as a dictionary.", "id": "f12320:c0:m3"}
{"signature": "def load_ipython_extension(ip):", "body": "global _loaded<EOL>if not _loaded:<EOL><INDENT>_loaded = True<EOL>from lancet import launch<EOL>if sys.version_info[<NUM_LIT:0>] == <NUM_LIT:2>:<EOL><INDENT>launch.input = lambda *args, **kwargs: raw_input(*args, **kwargs)<EOL><DEDENT>plaintext_formatter = ip.display_formatter.formatters['<STR_LIT>']<EOL>plaintext_formatter.for_type(Args, repr_pretty_annotated)<EOL>plaintext_formatter.for_type(Command, repr_pretty_unannotated)<EOL>plaintext_formatter.for_type(Launcher, repr_pretty_unannotated)<EOL>plaintext_formatter.for_type(FileType, repr_pretty_unannotated)<EOL>plaintext_formatter.for_type(review_and_launch, repr_pretty_unannotated)<EOL><DEDENT>", "docstring": "IPython pretty printing support (optional). To load the extension\nyou may execute the following in IPython:\n\n%load_ext lancet", "id": "f12321:m2"}
{"signature": "def __call__(self, paths=[], **params_to_override):", "body": "p=param.ParamOverrides(self, dict(params_to_override, paths=paths))<EOL>if p.paths == []:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>paths = [p.paths] if isinstance(p.paths, str) else p.paths<EOL>def _desc(path, ind):<EOL><INDENT>for vcs in p.commands.keys():<EOL><INDENT>if os.path.exists(os.path.join(path, vcs)):<EOL><INDENT>proc = subprocess.Popen(p.commands[vcs][ind],<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE, cwd=path)<EOL>return str(proc.communicate()[<NUM_LIT:0>].decode()).strip()<EOL><DEDENT><DEDENT><DEDENT>abspaths = [os.path.abspath(path) for path in paths]<EOL>return {'<STR_LIT>' : dict((path, _desc(path,<NUM_LIT:0>)) for path in abspaths),<EOL>'<STR_LIT>':  dict((path, _desc(path,<NUM_LIT:1>)) for path in abspaths),<EOL>'<STR_LIT>':     dict((path, _desc(path,<NUM_LIT:2>)) for path in abspaths)}<EOL>", "docstring": "Takes a single path string or a list of path strings and\nreturns the corresponing version control information.", "id": "f12321:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def spec_formatter(cls, spec):<DEDENT>", "body": "return type(spec)((k, str(v)) for (k,v) in spec.items())<EOL>", "docstring": "Formats the elements of an argument set appropriately", "id": "f12322:c1:m3"}
{"signature": "def load_table(self, table):", "body": "items,  data_keys = [], None<EOL>for key, filename in table.items():<EOL><INDENT>data_dict = self.filetype.data(filename[<NUM_LIT:0>])<EOL>current_keys = tuple(sorted(data_dict.keys()))<EOL>values = [data_dict[k] for k in current_keys]<EOL>if data_keys is None:<EOL><INDENT>data_keys = current_keys<EOL><DEDENT>elif data_keys != current_keys:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>items.append((key, values))<EOL><DEDENT>return Table(items, kdims=table.kdims, vdims=data_keys)<EOL>", "docstring": "Load the file contents into the supplied Table using the\nspecified key and filetype. The input table should have the\nfilenames as values which will be replaced by the loaded\ndata. If data_key is specified, this key will be used to index\nthe loaded data to retrive the specified item.", "id": "f12322:c10:m4"}
{"signature": "@property<EOL><INDENT>def constant_keys(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Returns the list of parameter names whose values are constant\nas the argument specifier is iterated.  Note that the union of\nconstant and varying_keys should partition the entire set of\nkeys in the case where there are no unsortable keys.", "id": "f12322:c1:m4"}
{"signature": "@classmethod<EOL><INDENT>def directory(cls, directory, root=None, extension=None, **kwargs):<DEDENT>", "body": "root = os.getcwd() if root is None else root<EOL>suffix = '<STR_LIT>' if extension is None else '<STR_LIT:.>' + extension.rsplit('<STR_LIT:.>')[-<NUM_LIT:1>]<EOL>pattern = directory + os.sep + '<STR_LIT:*>' + suffix<EOL>key = os.path.join(root, directory,'<STR_LIT:*>').rsplit(os.sep)[-<NUM_LIT:2>]<EOL>format_parse = list(string.Formatter().parse(key))<EOL>if not all([el is None for el in zip(*format_parse)[<NUM_LIT:1>]]):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return cls(key, pattern, root, **kwargs)<EOL>", "docstring": "Load all the files in a given directory selecting only files\nwith the given extension if specified. The given kwargs are\npassed through to the normal constructor.", "id": "f12322:c9:m0"}
{"signature": "def _info(self, source, key, filetype, ignore):", "body": "specs, mdata = [], {}<EOL>mdata_clashes  = set()<EOL>for spec in source.specs:<EOL><INDENT>if key not in spec:<EOL><INDENT>raise Exception(\"<STR_LIT>\" % key)<EOL><DEDENT>mdata = dict((k,v) for (k,v) in filetype.metadata(spec[key]).items()<EOL>if k not in ignore)<EOL>mdata_spec = {}<EOL>mdata_spec.update(spec)<EOL>mdata_spec.update(mdata)<EOL>specs.append(mdata_spec)<EOL>mdata_clashes = mdata_clashes | (set(spec.keys()) & set(mdata.keys()))<EOL><DEDENT>if mdata_clashes:<EOL><INDENT>self.warning(\"<STR_LIT>\")<EOL><DEDENT>return specs<EOL>", "docstring": "Generates the union of the source.specs and the metadata\ndictionary loaded by the filetype object.", "id": "f12322:c10:m6"}
{"signature": "@staticmethod<EOL><INDENT>def extract_log(log_path, dict_type=dict):<DEDENT>", "body": "log_path = (log_path if os.path.isfile(log_path)<EOL>else os.path.join(os.getcwd(), log_path))<EOL>with open(log_path,'<STR_LIT:r>') as log:<EOL><INDENT>splits = (line.split() for line in log)<EOL>uzipped = ((int(split[<NUM_LIT:0>]), json.loads(\"<STR_LIT:U+0020>\".join(split[<NUM_LIT:1>:]))) for split in splits)<EOL>szipped = [(i, dict((str(k),v) for (k,v) in d.items())) for (i,d) in uzipped]<EOL><DEDENT>return dict_type(szipped)<EOL>", "docstring": "Parses the log file generated by a launcher and returns\ndictionary with tid keys and specification values.\n\nOrdering can be maintained by setting dict_type to the\nappropriate constructor (i.e. OrderedDict). Keys are converted\nfrom unicode to strings for kwarg use.", "id": "f12322:c8:m0"}
{"signature": "def pprint_args(self, pos_args, keyword_args, infix_operator=None, extra_params={}):", "body": "if infix_operator and not (len(pos_args)==<NUM_LIT:2> and keyword_args==[]):<EOL><INDENT>raise Exception('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>(kwargs,_,_,_) = self._pprint_args<EOL>self._pprint_args = (keyword_args + kwargs, pos_args, infix_operator, extra_params)<EOL>", "docstring": "Method to define the positional arguments and keyword order\nfor pretty printing.", "id": "f12322:c0:m0"}
{"signature": "def _decompose_pattern(self, pattern):", "body": "sep = '<STR_LIT>'<EOL>float_codes = ['<STR_LIT:e>','<STR_LIT:E>','<STR_LIT:f>', '<STR_LIT:F>','<STR_LIT:g>', '<STR_LIT>', '<STR_LIT:n>']<EOL>typecodes = dict([(k,float) for k in float_codes]<EOL>+ [('<STR_LIT:b>',bin), ('<STR_LIT:d>',int), ('<STR_LIT:o>',oct), ('<STR_LIT:x>',hex)])<EOL>parse = list(string.Formatter().parse(pattern))<EOL>text, fields, codes, _ = zip(*parse)<EOL>types = []<EOL>for (field, code) in zip(fields, codes):<EOL><INDENT>if code in ['<STR_LIT>', None]: continue<EOL>constructor =  typecodes.get(code[-<NUM_LIT:1>], None)<EOL>if constructor: types += [(field, constructor)]<EOL><DEDENT>stars =  ['<STR_LIT>' if not f else '<STR_LIT:*>' for f in fields]<EOL>globpat = '<STR_LIT>'.join(text+star for (text,star) in zip(text,stars))<EOL>refields = ['<STR_LIT>' if not f else sep+('<STR_LIT>'% f)+sep for f in fields]<EOL>parts = '<STR_LIT>'.join(text+group for (text,group) in zip(text, refields)).split(sep)<EOL>for i in range(<NUM_LIT:0>, len(parts), <NUM_LIT:2>): parts[i] = re.escape(parts[i])<EOL>regexp_pattern = '<STR_LIT>'.join(parts).replace('<STR_LIT>','<STR_LIT>')<EOL>fields = list(f for f in fields if f)<EOL>return globpat, regexp_pattern , fields, dict(types)<EOL>", "docstring": "Given a path pattern with format declaration, generates a\nfour-tuple (glob_pattern, regexp pattern, fields, type map)", "id": "f12322:c9:m5"}
{"signature": "@staticmethod<EOL><INDENT>def write_log(log_path, data, allow_append=True):<DEDENT>", "body": "append = os.path.isfile(log_path)<EOL>islist = isinstance(data, list)<EOL>if append and not allow_append:<EOL><INDENT>raise Exception('<STR_LIT>'<EOL>'<STR_LIT>' % log_path)<EOL><DEDENT>if not (islist or isinstance(data, Args)):<EOL><INDENT>raise Exception('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>specs = data if islist else data.specs<EOL>if not all(isinstance(el,dict) for el in specs):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>log_file = open(log_path, '<STR_LIT>') if append else open(log_path, '<STR_LIT:w>')<EOL>start = int(log_file.readlines()[-<NUM_LIT:1>].split()[<NUM_LIT:0>])+<NUM_LIT:1> if append else <NUM_LIT:0><EOL>ascending_indices = range(start, start+len(data))<EOL>log_str = '<STR_LIT:\\n>'.join(['<STR_LIT>' % (tid, json.dumps(el))<EOL>for (tid, el) in zip(ascending_indices,specs)])<EOL>log_file.write(\"<STR_LIT:\\n>\"+log_str if append else log_str)<EOL>log_file.close()<EOL>", "docstring": "Writes the supplied specifications to the log path. The data\nmay be supplied as either as a an Args or as a list of\ndictionaries.\n\nBy default, specifications will be appropriately appended to\nan existing log file. This can be disabled by setting\nallow_append to False.", "id": "f12322:c8:m1"}
{"signature": "def __next__(self):", "body": "raise StopIteration<EOL>", "docstring": "Called to get a list of specifications: dictionaries with\nparameter name keys and string values.", "id": "f12322:c1:m8"}
{"signature": "def show(self, exclude=[]):", "body": "ordering = self.constant_keys + self.varying_keys<EOL>spec_lines = ['<STR_LIT:U+002CU+0020>'.join(['<STR_LIT>' % (k, s[k]) for k in ordering<EOL>if (k in s) and (k not in exclude)])<EOL>for s in self.specs]<EOL>print('<STR_LIT:\\n>'.join(['<STR_LIT>' % (i,l) for (i,l) in enumerate(spec_lines)]))<EOL>", "docstring": "Convenience method to inspect the available argument values in\nhuman-readable format. The ordering of keys is determined by\nhow quickly they vary.\n\nThe exclude list allows specific keys to be excluded for\nreadability (e.g. to hide long, absolute filenames).", "id": "f12322:c3:m5"}
{"signature": "def _build_specs(self, specs, kwargs, fp_precision):", "body": "if specs is None:<EOL><INDENT>overrides = param.ParamOverrides(self, kwargs,<EOL>allow_extra_keywords=True)<EOL>extra_kwargs = overrides.extra_keywords()<EOL>kwargs = dict([(k,v) for (k,v) in kwargs.items()<EOL>if k not in extra_kwargs])<EOL>rounded_specs = list(self.round_floats([extra_kwargs],<EOL>fp_precision))<EOL>if extra_kwargs=={}: return [], kwargs, True<EOL>else:                return rounded_specs, kwargs, False<EOL><DEDENT>return list(self.round_floats(specs, fp_precision)), kwargs, True<EOL>", "docstring": "Returns the specs, the remaining kwargs and whether or not the\nconstructor was called with kwarg or explicit specs.", "id": "f12322:c3:m1"}
{"signature": "def remove_namespace(doc, namespace):", "body": "ns = u'<STR_LIT>' % namespace<EOL>nsl = len(ns)<EOL>for elem in doc.getiterator():<EOL><INDENT>if elem.tag.startswith(ns):<EOL><INDENT>elem.tag = elem.tag[nsl:]<EOL>elem.attrib['<STR_LIT>'] = namespace<EOL><DEDENT><DEDENT>", "docstring": "Remove namespace in the passed document in place.", "id": "f12332:m0"}
{"signature": "def encode(self, xtree):", "body": "raise NotImplementedError()<EOL>", "docstring": "returns a tuple of (contentType, data)", "id": "f12332:c0:m0"}
{"signature": "def mergeItems(self, localItem, remoteItem, changeSpec):", "body": "raise ConflictError('<STR_LIT>')<EOL>", "docstring": "[OPTIONAL] Merges the properties of `remoteItem`, which is an item\nprovided by a remote peer during a synchronization, into the\n`localItem`, which is an item retrieved from this agent either via\n:meth:`getItem` or :meth:`matchItem`. `changeSpec` will represent\nthe changes applied to `localItem` since `remoteItem` was last\nsynchronized, or will be ``None`` when called as a result of a\nslow-sync :meth:`matchItem` call.\n\nThis method should return a new change-spec (see\n:meth:`replaceItem` for details) that represents the changes\napplied to `localItem` from `remoteItem`.\n\nIf the items cannot be merged, then a `pysyncml.ConflictError`\nshould be raised with more descriptive information on what failed\nduring the merge --- in which case pysyncml will revert to the\nconflict resolution policy defined by `store.conflictPolicy` or\n`adapter.conflictPolicy`.\n\nIMPORTANT: if the merge fails, `localItem` and `remoteItem` must\nstay untouched by this call; most importantly, if the merge fails\nwith a ConflictError, then `remoteItem` must be in the identical\nstate as when it entered the call.\n\nThis method by default raises a ConflictError, which means that if\nany changes are made to the same item simultaneously by two\ndifferent peers, they will result in a conflict and will not be\nauto-mergeable.\n\nSee :doc:`../merging` for details.", "id": "f12337:c0:m12"}
{"signature": "def deleteItem(self, itemID):", "body": "raise NotImplementedError()<EOL>", "docstring": "Deletes the local datastore item with ID `itemID`.", "id": "f12337:c0:m10"}
{"signature": "def dumpItem(self, item, stream, contentType=None, version=None):", "body": "raise NotImplementedError()<EOL>", "docstring": "Converts the specified `item` to serialized form (such that it can\ntransported over the wire) and writes it to the provided file-like\n`stream` object. For agents that support multiple content-types,\nthe desired `contentType` and `version` will be specified as a\nparameter. If `contentType` and `version` are None, appropriate\ndefault values should be used. For agents that concurrently use\nmultiple content-types, the return value may be a two-element\ntuple of (contentType, version), thus overriding or enhancing the\nprovided values.", "id": "f12337:c0:m2"}
{"signature": "def loadItem(self, stream, contentType=None, version=None):", "body": "raise NotImplementedError()<EOL>", "docstring": "Reverses the effects of the :meth:`dumpItem` method, and returns\nthe de-serialized Item from the file-like source `stream`.\n\nNote: `version` will typically be ``None``, so it should either be\nauto-determined, or not used. This is an issue in the SyncML\nprotocol, and is only here for symmetry with :meth:`dumpItem`\nand as \"future-proofing\".", "id": "f12337:c0:m4"}
{"signature": "def matchItem(self, item):", "body": "for match in self.getAllItems():<EOL><INDENT>if cmp(match, item) == <NUM_LIT:0>:<EOL><INDENT>return match<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "[OPTIONAL] Attempts to find the specified item and returns an item\nthat describes the same object although it's specific properties\nmay be different. For example, a contact whose name is an\nidentical match, but whose telephone number has changed would\nreturn the matched item. ``None`` should be returned if no match\nis found, otherwise the item that `item` matched should be\nreturned.\n\nThis is used primarily when a slow-sync is invoked and objects\nthat exist in both peers should not be replicated.\n\nNote that **NO** merging of the items' properties should be done;\nthat will be initiated via a separate call to :meth:`mergeItems`.\n\nThis method by default will iterate over all items (by calling\n:meth:`getAllItems`) and compare them using ``cmp()``. This means\nthat if the items managed by this agent implement the ``__eq__``\nor ``__cmp__`` methods, then matching items will be detected and\nreturned. Otherwise, any items that exist in both peers will be\nduplicated on slow-sync.\n\nSub-classes *should* implement a more efficient method of finding\nmatching items.\n\nSee :doc:`../merging` for details.", "id": "f12337:c0:m11"}
{"signature": "def getAllItems(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Returns an iterable of all the items stored in the local datastore.", "id": "f12337:c0:m6"}
{"signature": "def newMerger(self, changeSpec=None):", "body": "raise NotImplementedError()<EOL>", "docstring": "Returns a :class:`Merger` for the specified `changeSpec` which can\nbe ``None`` if the merger is intended to generate a change-spec.", "id": "f12343:c1:m0"}
{"signature": "def getChangeSpec(self):", "body": "return self._changes2spec(self.changes)<EOL>", "docstring": "Returns a string-representation of the changes recorded by this\nChangeTracker that were reported since construction (or calls to\npushChangeSpec()) by calls to :meth:`append` or :meth:`update`.\n\nThis is similar to, but distinct from, :meth:`getFullChangeSpec`.", "id": "f12347:c1:m7"}
{"signature": "def append(self, fieldname, changeType, initialValue=None, isMd5=False):", "body": "raise NotImplementedError()<EOL>", "docstring": "Add a change to this ChangeTracker.\n\n:param fieldname:\n\n  The item attribute that was changed in some way. The type of\n  `fieldname` is dependent on which subclass of ChangeTracker is\n  being used.\n\n:param changeType:\n\n  The type of change that was applied to `fieldname`, which can be\n  one of ``pysyncml.ITEM_ADDED``, ``pysyncml.ITEM_MODIFIED``, or\n  ``pysyncml.ITEM_DELETED``.\n\n:param initialValue:\n\n  For non-ADDED change types, specifies the *initial* value of the\n  field, before the change was applied. Note that if the\n  `initialValue` is very large, an MD5 checksum can be provided\n  instead, in which case `isMd5` should be set to ``True``.\n\n:param isMd5:\n\n  Specifies whether `initialValue` is an MD5 checksum or not. For\n  large values of `initialValue` the ChangeTrackers will\n  automatically convert it to a checksum, but this allows the\n  caller to potentially do some additional optimizations.", "id": "f12347:c1:m3"}
{"signature": "def getFullChangeSpec(self):", "body": "return self._changes2spec(self.allchanges)<EOL>", "docstring": "Returns a string-representation of *all* changes recorded by this\nChangeTracker, including those provided in the constructor and any\ncalls to `pushChangeSpec()`. Note that this is usually *NOT* what\nyou are looking for when reporting changes to the pysyncml\nframework -- for that, see :meth:`getChangeSpec`.", "id": "f12347:c1:m6"}
{"signature": "def __init__(self, changeSpec=None, *args, **kw):", "body": "self.baseline = []<EOL>self.current  = []<EOL>super(ListChangeTracker, self).__init__(changeSpec, *args, **kw)<EOL>", "docstring": "Initializes this ListChangeTracker with the provided `changeSpec`,\nwhich is expected to be in the same format as what would have been\nreturned by a call to ``str()`` on this object. The change-spec\nwill look similar to::\n\n  2:a,1:M68b329d...,1:mh4d9,2:Dba45...,3:a\n\nIf `changeSpec` is not specified, this ListChangeTracker will\nstart assuming no prior changes were made to any content and will\nexpect changes to be reported via :meth:`pushChange`.", "id": "f12347:c3:m0"}
{"signature": "def isChange(self, fieldname, changeType, newValue=None, isMd5=False):", "body": "<EOL>changes = self._collapseChanges(self.baseline, self.current)<EOL>if fieldname not in changes:<EOL><INDENT>return fieldname<EOL><DEDENT>cur = changes[fieldname]<EOL>if changeType == constants.ITEM_DELETED:<EOL><INDENT>if cur.op == constants.ITEM_ADDED or cur.op == constants.ITEM_DELETED:<EOL><INDENT>return None<EOL><DEDENT>raise ConflictError('<STR_LIT>'<EOL>% (fieldname,))<EOL><DEDENT>if isMd5Equal(newValue, isMd5, cur.ival, cur.md5):<EOL><INDENT>return None<EOL><DEDENT>raise ConflictError(<EOL>'<STR_LIT>' % (fieldname,))<EOL>", "docstring": "Implements as specified in :meth:`.ChangeTracker.isChange` where\nthe `changeObject` is simply the fieldname that needs to be\nupdated with the `newValue`. Currently, this is always equal to\n`fieldname`.", "id": "f12347:c2:m6"}
{"signature": "def isChange(self, listIndex, changeType, newValue=None, isMd5=False, token=None):", "body": "<EOL>adjust  = <NUM_LIT:0>               <EOL>token   = token           <EOL>index   = int(listIndex)<EOL>ret     = index<EOL>changes = self._collapseChanges(self.baseline, self.current)<EOL>for cur in changes:<EOL><INDENT>if cur.index > index:<EOL><INDENT>if changeType != constants.ITEM_ADDED:<EOL><INDENT>return (ret, None)<EOL><DEDENT>if token is None or token[<NUM_LIT:0>] != index - adjust:<EOL><INDENT>token = (ret, <NUM_LIT:0>)<EOL><DEDENT>token = (ret, token[<NUM_LIT:1>] + <NUM_LIT:1>)<EOL>return (ret, token)<EOL><DEDENT>if cur.index != index:<EOL><INDENT>if cur.op == constants.ITEM_DELETED:<EOL><INDENT>index  += <NUM_LIT:1><EOL>adjust += <NUM_LIT:1><EOL><DEDENT>continue<EOL><DEDENT>if token is not None and token[<NUM_LIT:0>] == index - adjust:<EOL><INDENT>index += token[<NUM_LIT:1>]<EOL>continue<EOL><DEDENT>if changeType == constants.ITEM_DELETED:<EOL><INDENT>if cur.op == constants.ITEM_ADDED:<EOL><INDENT>return (None, None)<EOL><DEDENT>raise ConflictError(<EOL>'<STR_LIT>' % (index,))<EOL><DEDENT>if changeType == constants.ITEM_ADDED:<EOL><INDENT>if token is None:<EOL><INDENT>token = (ret, <NUM_LIT:0>)<EOL><DEDENT>token = (ret, token[<NUM_LIT:1>] + <NUM_LIT:1>)<EOL>if cur.op == constants.ITEM_DELETED:<EOL><INDENT>if isMd5Equal(newValue, isMd5, cur.ival, cur.md5):<EOL><INDENT>return (None, token)<EOL><DEDENT><DEDENT>return (ret, token)<EOL><DEDENT>if cur.op == constants.ITEM_DELETED:<EOL><INDENT>index  += <NUM_LIT:1><EOL>adjust += <NUM_LIT:1><EOL>continue<EOL><DEDENT>if cur.op == constants.ITEM_ADDED:<EOL><INDENT>raise ConflictError(<EOL>'<STR_LIT>' % (index,))<EOL><DEDENT>if isMd5Equal(newValue, isMd5, cur.ival, cur.md5):<EOL><INDENT>return (None, None)<EOL><DEDENT>raise ConflictError(<EOL>'<STR_LIT>' % (index,))<EOL><DEDENT>if changeType != constants.ITEM_ADDED:<EOL><INDENT>return (ret, None)<EOL><DEDENT>if token is None or token[<NUM_LIT:0>] != index - adjust:<EOL><INDENT>token = (ret, <NUM_LIT:0>)<EOL><DEDENT>token = (ret, token[<NUM_LIT:1>] + <NUM_LIT:1>)<EOL>return (ret, token)<EOL>", "docstring": "Implements as specified in :meth:`.ChangeTracker.isChange` where\nthe `changeObject` is a two-element tuple. The first element is\nthe index at which the change should be applied, and the second\nelement is an abstract token that should be passed back into this\nmethod at every iteration.\n\nIMPORTANT: unlike the AttributeChangeTracker, the\nListChangeTracker's `isChange()` method is sensitive to order\n(which is why it uses the `changeObject` and `token`\nmechanisms. Therefore, it is important to call `isChange()`\nsequentially with all changes in the order that they occur in the\nchange list.", "id": "f12347:c3:m8"}
{"signature": "def getAddressSize():", "body": "return int(platform.architecture(bits='<STR_LIT>')[<NUM_LIT:0>].replace('<STR_LIT>', '<STR_LIT>'))<EOL>", "docstring": "Returns the size of a memory address reference on the current\n    platform (e.g. 32 or 64 for respectively 32-bit or 64-bit operating\n    platforms) - defaults to 32 if it cannot be determined.", "id": "f12349:m9"}
{"signature": "def describeStats(stats, stream, title=None, details=True, totals=True, gettext=None):", "body": "from . import state<EOL>modeStringLut = dict((<EOL>(constants.SYNCTYPE_TWO_WAY,             '<STR_LIT>'),<EOL>(constants.SYNCTYPE_SLOW_SYNC,           '<STR_LIT>'),<EOL>(constants.SYNCTYPE_ONE_WAY_FROM_CLIENT, '<STR_LIT>'),<EOL>(constants.SYNCTYPE_REFRESH_FROM_CLIENT, '<STR_LIT>'),<EOL>(constants.SYNCTYPE_ONE_WAY_FROM_SERVER, '<STR_LIT>'),<EOL>(constants.SYNCTYPE_REFRESH_FROM_SERVER, '<STR_LIT>'),<EOL>))<EOL>if gettext is not None:<EOL><INDENT>_ = gettext<EOL><DEDENT>else:<EOL><INDENT>_ = lambda s: s<EOL><DEDENT>wSrc  = len(_('<STR_LIT>'))<EOL>wMode = len(_('<STR_LIT>'))<EOL>wCon  = len(_('<STR_LIT>'))<EOL>wCol  = len(_('<STR_LIT>'))<EOL>wMrg  = len(_('<STR_LIT>'))<EOL>wHereAdd = wPeerAdd = len(_('<STR_LIT>'))<EOL>wHereMod = wPeerMod = len(_('<STR_LIT>'))<EOL>wHereDel = wPeerDel = len(_('<STR_LIT>'))<EOL>wHereErr = wPeerErr = len(_('<STR_LIT>'))<EOL>totLoc = <NUM_LIT:0><EOL>totRem = <NUM_LIT:0><EOL>totErr = <NUM_LIT:0><EOL>totCol = <NUM_LIT:0><EOL>totMrg = <NUM_LIT:0><EOL>for key in stats.keys():<EOL><INDENT>wSrc  = max(wSrc, len(key))<EOL>wMode = max(wMode, len(modeStringLut.get(stats[key].mode)))<EOL>wCol  = max(wCol, len(num2str(stats[key].conflicts)))<EOL>wMrg  = max(wMrg, len(num2str(stats[key].merged)))<EOL>wHereAdd = max(wHereAdd, len(num2str(stats[key].hereAdd)))<EOL>wPeerAdd = max(wPeerAdd, len(num2str(stats[key].peerAdd)))<EOL>wHereMod = max(wHereMod, len(num2str(stats[key].hereMod)))<EOL>wPeerMod = max(wPeerMod, len(num2str(stats[key].peerMod)))<EOL>wHereDel = max(wHereDel, len(num2str(stats[key].hereDel)))<EOL>wPeerDel = max(wPeerDel, len(num2str(stats[key].peerDel)))<EOL>wHereErr = max(wHereErr, len(num2str(stats[key].hereErr)))<EOL>wPeerErr = max(wPeerErr, len(num2str(stats[key].peerErr)))<EOL>totLoc += stats[key].hereAdd + stats[key].hereMod + stats[key].hereDel<EOL>totRem += stats[key].peerAdd + stats[key].peerMod + stats[key].peerDel<EOL>totErr += stats[key].hereErr + stats[key].peerErr<EOL>totCol += stats[key].conflicts<EOL>totMrg += stats[key].merged<EOL><DEDENT>if wCon > wCol + <NUM_LIT:3> + wMrg:<EOL><INDENT>diff = wCon - ( wCol + <NUM_LIT:3> + wMrg )<EOL>wCol += diff / <NUM_LIT:2><EOL>wMrg = wCon - <NUM_LIT:3> - wCol<EOL><DEDENT>else:<EOL><INDENT>wCon = wCol + <NUM_LIT:3> + wMrg<EOL><DEDENT>if details:<EOL><INDENT>tWid = ( wSrc + <NUM_LIT:3> + wMode + <NUM_LIT:3><EOL>+ wHereAdd + wHereMod + wHereDel + wHereErr + <NUM_LIT:9> + <NUM_LIT:3><EOL>+ wPeerAdd + wPeerMod + wPeerDel + wPeerErr + <NUM_LIT:9> + <NUM_LIT:3><EOL>+ wCon )<EOL><DEDENT>else:<EOL><INDENT>if title is None:<EOL><INDENT>tWid = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>tWid = len(title)<EOL><DEDENT><DEDENT>if totals:<EOL><INDENT>sumlist = []<EOL>for val, singular, plural in [<EOL>(totLoc, _('<STR_LIT>'), _('<STR_LIT>')),<EOL>(totRem, _('<STR_LIT>'), _('<STR_LIT>')),<EOL>(totErr, _('<STR_LIT:error>'), _('<STR_LIT>')),<EOL>]:<EOL><INDENT>if val == <NUM_LIT:1>:<EOL><INDENT>sumlist.append(num2str(val) + '<STR_LIT:U+0020>' + singular)<EOL><DEDENT>elif val > <NUM_LIT:1>:<EOL><INDENT>sumlist.append(num2str(val) + '<STR_LIT:U+0020>' + plural)<EOL><DEDENT><DEDENT>if len(sumlist) <= <NUM_LIT:0>:<EOL><INDENT>sumlist = _('<STR_LIT>')<EOL><DEDENT>elif len(sumlist) == <NUM_LIT:1>:<EOL><INDENT>sumlist = sumlist[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>sumlist = '<STR_LIT:U+002CU+0020>'.join(sumlist[:-<NUM_LIT:1>]) + '<STR_LIT:U+0020>' + _('<STR_LIT>') + '<STR_LIT:U+0020>' + sumlist[-<NUM_LIT:1>]<EOL><DEDENT>if totMrg > <NUM_LIT:0> or totCol > <NUM_LIT:0>:<EOL><INDENT>sumlist += '<STR_LIT>'<EOL>if totMrg == <NUM_LIT:1>:<EOL><INDENT>sumlist += num2str(totMrg) + '<STR_LIT:U+0020>' + _('<STR_LIT>')<EOL><DEDENT>elif totMrg > <NUM_LIT:1>:<EOL><INDENT>sumlist += num2str(totMrg) + '<STR_LIT:U+0020>' + _('<STR_LIT>')<EOL><DEDENT>if totMrg > <NUM_LIT:0> and totCol > <NUM_LIT:0>:<EOL><INDENT>sumlist += '<STR_LIT:U+0020>' + _('<STR_LIT>') + '<STR_LIT:U+0020>'<EOL><DEDENT>if totCol == <NUM_LIT:1>:<EOL><INDENT>sumlist += num2str(totCol) + '<STR_LIT:U+0020>' + _('<STR_LIT>')<EOL><DEDENT>elif totCol > <NUM_LIT:1>:<EOL><INDENT>sumlist += num2str(totCol) + '<STR_LIT:U+0020>' + _('<STR_LIT>')<EOL><DEDENT><DEDENT>sumlist += '<STR_LIT:.>'<EOL>if len(sumlist) > tWid:<EOL><INDENT>wSrc += len(sumlist) - tWid<EOL>tWid = len(sumlist)<EOL><DEDENT><DEDENT>if title is not None:<EOL><INDENT>stream.write('<STR_LIT>' + '<STR_LIT:->' * tWid + '<STR_LIT>')<EOL>stream.write('<STR_LIT>'.format(title, w=tWid))<EOL>stream.write('<STR_LIT>')<EOL><DEDENT>hline = '<STR_LIT>'+ '<STR_LIT:->' * wSrc+ '<STR_LIT>'+ '<STR_LIT:->' * wMode+ '<STR_LIT>'+ '<STR_LIT:->' * ( wHereAdd + wHereMod + wHereDel + wHereErr + <NUM_LIT:9> )+ '<STR_LIT>'+ '<STR_LIT:->' * ( wPeerAdd + wPeerMod + wPeerDel + wPeerErr + <NUM_LIT:9> )+ '<STR_LIT>'+ '<STR_LIT:->' * wCon+ '<STR_LIT>'<EOL>if details:<EOL><INDENT>stream.write(hline)<EOL>stream.write('<STR_LIT>' + '<STR_LIT:U+0020>' * wSrc)<EOL>stream.write('<STR_LIT>' + '<STR_LIT:U+0020>' * wMode)<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=( wHereAdd + wHereMod + wHereDel + wHereErr + <NUM_LIT:9> )))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=( wPeerAdd + wPeerMod + wPeerDel + wPeerErr + <NUM_LIT:9> )))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wCon))<EOL>stream.write('<STR_LIT>')<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wSrc))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wMode))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wHereAdd))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wHereMod))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wHereDel))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wHereErr))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wPeerAdd))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wPeerMod))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wPeerDel))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wPeerErr))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wCol))<EOL>stream.write('<STR_LIT>'.format(_('<STR_LIT>'), w=wMrg))<EOL>stream.write('<STR_LIT>')<EOL>hsline = '<STR_LIT>' + '<STR_LIT:->' * wSrc+ '<STR_LIT>' + '<STR_LIT:->' * wMode+ '<STR_LIT>' + '<STR_LIT:->' * wHereAdd+ '<STR_LIT>' + '<STR_LIT:->' * wHereMod+ '<STR_LIT>' + '<STR_LIT:->' * wHereDel+ '<STR_LIT>' + '<STR_LIT:->' * wHereErr+ '<STR_LIT>' + '<STR_LIT:->' * wPeerAdd+ '<STR_LIT>' + '<STR_LIT:->' * wPeerMod+ '<STR_LIT>' + '<STR_LIT:->' * wPeerDel+ '<STR_LIT>' + '<STR_LIT:->' * wPeerErr+ '<STR_LIT>' + '<STR_LIT:->' * wCol+ '<STR_LIT>' + '<STR_LIT:->' * wMrg+ '<STR_LIT>'<EOL>stream.write(hsline)<EOL>def numcol(val, wid):<EOL><INDENT>if val == <NUM_LIT:0>:<EOL><INDENT>return '<STR_LIT>'.format('<STR_LIT:->', w=wid)<EOL><DEDENT>return '<STR_LIT>'.format(num2str(val), w=wid)<EOL><DEDENT>for key in sorted(stats.keys(), key=lambda k: str(k).lower()):<EOL><INDENT>stream.write('<STR_LIT>'.format(key, w=wSrc))<EOL>stream.write('<STR_LIT>'.format(modeStringLut.get(stats[key].mode), w=wMode))<EOL>stream.write(numcol(stats[key].hereAdd, wHereAdd))<EOL>stream.write(numcol(stats[key].hereMod, wHereMod))<EOL>stream.write(numcol(stats[key].hereDel, wHereDel))<EOL>stream.write(numcol(stats[key].hereErr, wHereErr))<EOL>stream.write(numcol(stats[key].peerAdd, wPeerAdd))<EOL>stream.write(numcol(stats[key].peerMod, wPeerMod))<EOL>stream.write(numcol(stats[key].peerDel, wPeerDel))<EOL>stream.write(numcol(stats[key].peerErr, wPeerErr))<EOL>stream.write(numcol(stats[key].conflicts, wCol))<EOL>stream.write(numcol(stats[key].merged, wMrg))<EOL>stream.write('<STR_LIT>')<EOL><DEDENT>stream.write(hsline)<EOL><DEDENT>if totals:<EOL><INDENT>if title is None and not details:<EOL><INDENT>stream.write('<STR_LIT>' + '<STR_LIT:->' * tWid + '<STR_LIT>')<EOL><DEDENT>stream.write('<STR_LIT>'.format(sumlist, w=tWid))<EOL>stream.write('<STR_LIT>')<EOL>stream.write('<STR_LIT>' + '<STR_LIT:->' * tWid + '<STR_LIT>')<EOL><DEDENT>return<EOL>", "docstring": "Renders an ASCII-table of the synchronization statistics `stats`,\nexample output:\n\n.. code-block::\n\n  +----------------------------------------------------------------------------------+\n  |                                      TITLE                                       |\n  +----------+------+-------------------------+--------------------------+-----------+\n  |          |      |          Local          |          Remote          | Conflicts |\n  |   Source | Mode |  Add  | Mod | Del | Err |   Add  | Mod | Del | Err | Col | Mrg |\n  +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+\n  | contacts |  <=  |   -   |  -  |  -  |  -  | 10,387 |  -  |  -  |  -  |  -  |  -  |\n  |     note |  SS  | 1,308 |  -  |   2 |  -  |    -   |  -  |  -  |  -  |  -  |  -  |\n  +----------+------+-------+-----+-----+-----+--------+-----+-----+-----+-----+-----+\n  |                  1,310 local changes and 10,387 remote changes.                  |\n  +----------------------------------------------------------------------------------+\n\n:Parameters:\n\nstats : dict\n\n  The synchronization stats returned by a call to Adapter.sync().\n\nstream : file-like-object\n\n  An output file-like object that has at least a `write()` method,\n  e.g. ``sys.stdout`` can be used.\n\ntitle : str, optional, default: null\n\n  A title placed at the top of the table -- if omitted (the default),\n  then no title is rendered.\n\ndetails : bool, optional, default: true\n\n  If truthy, a per-datastore listing of changes will be displayed\n  (as in the above example).\n\ntotals : bool, optional, default: true\n\n  If truthy, a summary of all changes will be displayed (as in the\n  above example).\n\ngettext : callable, optional, @DEPRECATED(0.2.0), default: null\n\n  A `gettext.gettext` compatible callable used for translating\n  localized content (such as number formatting, etc.).\n\n  NOTE: this parameter is deprecated, and will be replaced with\n  a generalized i18n solution.", "id": "f12349:m12"}
{"signature": "@classmethod<EOL><INDENT>def load(cls, stream, contentType=None, version=None):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Reverses the effects of the :meth:`dump` method, and returns the\nde-serialized Item from the file-like source `stream`.\n\nNote: `version` will typically be ``None``, so it should either be\nauto-determined, or not used. This is an issue in the SyncML\nprotocol, and is only here for symmetry with :meth:`dump` and as\n\"future-proofing\".", "id": "f12353:c0:m3"}
{"signature": "def __init__(self, name=None, parent=None,<EOL>created=None, modified=None, accessed=None,<EOL>contentType=None, body=None, size=None,<EOL>hidden=None, system=None, archived=None, delete=None,<EOL>writable=None, readable=None, executable=None,<EOL>*args, **kw):", "body": "super(FileItem, self).__init__(*args, **kw)<EOL>self.name        = name<EOL>self.parent      = parent<EOL>self.created     = created<EOL>self.modified    = modified<EOL>self.accessed    = accessed<EOL>self.contentType = contentType<EOL>self.body        = body<EOL>self.size        = size<EOL>if self.size is None and self.body is not None:<EOL><INDENT>self.size      = len(body)<EOL><DEDENT>self.hidden      = hidden<EOL>self.system      = system<EOL>self.archived    = archived<EOL>self.delete      = delete<EOL>self.writable    = writable<EOL>self.readable    = readable<EOL>self.executable  = executable<EOL>", "docstring": "FileItem constructor which takes the following optional parameters:\n\n:param name:\n\n  the file name (relative to the parent folder).\n\n:param parent:\n\n  the file\\'s containing folder.\n\n:param created:\n\n  the file\\'s creation time, in number of seconds since\n  the epoch.\n\n:param modified:\n\n  the file\\'s last modification time, in number of seconds\n  since the epoch.\n\n:param accessed:\n\n  the file\\'s last accessed time, in number of seconds\n  since the epoch.\n\n:param contentType:\n\n  the file\\'s content-type.\n\n:param body:\n\n  the file\\'s content.\n\n:param size:\n\n  the size of file\\'s content, specified as an integer. If not\n  specified and `body` is specified, the size will be taken from\n  the `body` parameter.\n\n:param hidden:\n\n  the file\\'s \"hidden\" boolean attribute.\n\n:param system:\n\n  the file\\'s \"system\" boolean attribute.\n\n:param archived:\n\n  the file\\'s \"archived\" boolean attribute.\n\n:param delete:\n\n  the file\\'s \"delete\" boolean attribute.\n\n:param writable:\n\n  the file\\'s \"writable\" boolean attribute.\n\n:param readable:\n\n  the file\\'s \"readable\" boolean attribute.\n\n:param executable:\n\n  the file\\'s \"executable\" boolean attribute.", "id": "f12357:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def load(cls, stream, contentType=None, version=None):<DEDENT>", "body": "if contentType is None:<EOL><INDENT>contentType = constants.TYPE_OMADS_FILE<EOL><DEDENT>if ctype.getBaseType(contentType) == constants.TYPE_OMADS_FOLDER:<EOL><INDENT>from .folder import FolderItem<EOL>return FolderItem.load(stream, contentType, version)<EOL><DEDENT>if ctype.getBaseType(contentType) != constants.TYPE_OMADS_FILE:<EOL><INDENT>raise common.InvalidContentType('<STR_LIT>' % (contentType,))<EOL><DEDENT>if version is None:<EOL><INDENT>version = '<STR_LIT>'<EOL><DEDENT>if version != '<STR_LIT>':<EOL><INDENT>raise common.InvalidContentType('<STR_LIT>' % (version,))<EOL><DEDENT>ret = FileItem()<EOL>data = stream.read()<EOL>xdoc = ET.fromstring(data)<EOL>if xdoc.tag != '<STR_LIT>':<EOL><INDENT>raise common.InvalidContent('<STR_LIT>'<EOL>% (xdoc.tag,))<EOL><DEDENT>ret.name = xdoc.findtext('<STR_LIT:name>')<EOL>ret.body = xdoc.findtext('<STR_LIT:body>')<EOL>ret.size = xdoc.findtext('<STR_LIT:size>')<EOL>if ret.body is not None:<EOL><INDENT>ret.size = len(ret.body)<EOL><DEDENT>elif ret.size is not None:<EOL><INDENT>ret.size = int(ret.size)<EOL><DEDENT>for attr in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>val = xdoc.findtext(attr)<EOL>if val is not None:<EOL><INDENT>setattr(ret, attr, int(common.parse_ts_iso(val)))<EOL><DEDENT><DEDENT>for attr in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>val = xdoc.findtext('<STR_LIT>' + attr[<NUM_LIT:0>])<EOL>if val is not None:<EOL><INDENT>setattr(ret, attr, val.lower() == '<STR_LIT:true>')<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "Reverses the effects of the :meth:`dump` method, creating a FileItem\nfrom the specified file-like `stream` object.", "id": "f12357:c0:m3"}
{"signature": "def __init__(self, name=None, parent=None,<EOL>created=None, modified=None, accessed=None,<EOL>role=None,<EOL>hidden=None, system=None, archived=None, delete=None,<EOL>writable=None, readable=None, executable=None,<EOL>*args, **kw):", "body": "super(FolderItem, self).__init__(*args, **kw)<EOL>self.name        = name<EOL>self.parent      = parent<EOL>self.created     = created<EOL>self.modified    = modified<EOL>self.accessed    = accessed<EOL>self.role        = role<EOL>self.hidden      = hidden<EOL>self.system      = system<EOL>self.archived    = archived<EOL>self.delete      = delete<EOL>self.writable    = writable<EOL>self.readable    = readable<EOL>self.executable  = executable<EOL>", "docstring": "FolderItem constructor which takes the following optional parameters:\n\n:param name:\n\n  the folder name (relative to the parent folder).\n\n:param parent:\n\n  the folder\\'s containing folder.\n\n:param created:\n\n  the folder\\'s creation time, in number of seconds since\n  the epoch.\n\n:param modified:\n\n  the folder\\'s last modification time, in number of seconds\n  since the epoch.\n\n:param accessed:\n\n  the folder\\'s last accessed time, in number of seconds\n  since the epoch.\n\n:param role:\n\n  the folder\\'s role, primarily used when dealing with collections\n  of emails.\n\n:param hidden:\n\n  the folder\\'s \"hidden\" boolean attribute.\n\n:param system:\n\n  the folder\\'s \"system\" boolean attribute.\n\n:param archived:\n\n  the folder\\'s \"archived\" boolean attribute.\n\n:param delete:\n\n  the folder\\'s \"delete\" boolean attribute.\n\n:param writable:\n\n  the folder\\'s \"writable\" boolean attribute.\n\n:param readable:\n\n  the folder\\'s \"readable\" boolean attribute.\n\n:param executable:\n\n  the folder\\'s \"executable\" boolean attribute.", "id": "f12358:c0:m0"}
{"signature": "def __init__(self, name=None, body=None, *args, **kw):", "body": "super(NoteItem, self).__init__(*args, **kw)<EOL>self.name        = name<EOL>self.body        = body<EOL>", "docstring": "NoteItem constructor which takes attributes `name` and `body`.", "id": "f12359:c0:m0"}
{"signature": "def __init__(self, syncSubdir='<STR_LIT>', defaultDir=None,<EOL>*args, **kw):", "body": "super(DirectorySyncEngine, self).__init__(*args, **kw)<EOL>self.syncSubdir = syncSubdir<EOL>self.defaultDir = defaultDir<EOL>self.rootDir    = None<EOL>", "docstring": "In addition to the :class:`CommandLineSyncEngine` constructor parameters,\nthe `DirectorySyncEngine` accepts the following:\n\n:param syncSubdir:\n\n  Specifies the name of the immediate subdirectory of the base\n  directory that should be created to contain configuration, state\n  and synchronization data. Removal of this directory will reset\n  all client/server states, and synchronization will need to resume\n  via a \"slow-sync\". The application should ignore this directory\n  when manipulating any data. The default is ``\".sync\"``.\n\n:param defaultDir:\n\n  If specified, will allow the user to invoke the application without\n  needing to identify the directory to synchronize, and instead will\n  default to this value. If used, this `CommandLineSyncEngine` begins to\n  resemble how the :class:`LocalUserSyncEngine` operates, but diverges in\n  the fact that the synchronization data is kept in the same directory\n  as the synchronized items.\n\nIn addition to the :class:`CommandLineSyncEngine` attributes,\nthe `DirectorySyncEngine` also provides the following:\n\n:param rootDir:\n\n  The path (potentially either relative or absolute) to the\n  directory under control by this synchronization engine. The path,\n  if valid, ends with a slash (\"/\").", "id": "f12363:c2:m0"}
{"signature": "def _makeAdapter(self):", "body": "self._callHooks('<STR_LIT>')<EOL>context = pysyncml.Context(storage='<STR_LIT>' % (self.dataDir,),<EOL>owner=None, autoCommit=True)<EOL>self._callHooks('<STR_LIT>', context)<EOL>adapter = context.Adapter()<EOL>if hasattr(self, '<STR_LIT>') and self.serverConf.policy is not None:<EOL><INDENT>adapter.conflictPolicy = self.serverConf.policy<EOL><DEDENT>if self.options.name is not None or self.appDisplay is not None:<EOL><INDENT>adapter.name = self.options.name or self.appDisplay<EOL><DEDENT>if adapter.devinfo is None:<EOL><INDENT>log.info('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>if self.options.devid is not None and self.options.devid != adapter.devinfo.devID:<EOL><INDENT>log.info('<STR_LIT>')<EOL>adapter.devinfo = None<EOL><DEDENT><DEDENT>if adapter.devinfo is None:<EOL><INDENT>devinfoParams = dict(<EOL>devID             = self.options.devid or self.defaultDevID,<EOL>devType           = pysyncml.DEVTYPE_SERVER if self.options.server elsepysyncml.DEVTYPE_WORKSTATION,<EOL>manufacturerName  = '<STR_LIT>',<EOL>modelName         = self.appLabel,<EOL>softwareVersion   = pysyncml.version,<EOL>hierarchicalSync  = self.agent.hierarchicalSync if self.agent is not None else False,<EOL>)<EOL>if self.devinfoParams is not None:<EOL><INDENT>devinfoParams.update(self.devinfoParams)<EOL><DEDENT>adapter.devinfo = context.DeviceInfo(**devinfoParams)<EOL><DEDENT>self._callHooks('<STR_LIT>', context, adapter)<EOL>if not self.options.server:<EOL><INDENT>if adapter.peer is None:<EOL><INDENT>if self.options.remote is None:<EOL><INDENT>self.options.remote = input('<STR_LIT>')<EOL>if self.options.username is None:<EOL><INDENT>self.options.username = input('<STR_LIT>')<EOL>if len(self.options.username) <= <NUM_LIT:0>:<EOL><INDENT>self.options.username = None<EOL><DEDENT><DEDENT><DEDENT>log.info('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>if self.options.remote is not None:<EOL><INDENT>if self.options.remote != adapter.peer.urlor self.options.username != adapter.peer.usernameor self.options.password != adapter.peer.password:<EOL><INDENT>log.info('<STR_LIT>')<EOL>adapter.peer = None<EOL><DEDENT><DEDENT><DEDENT>if adapter.peer is None:<EOL><INDENT>auth = None<EOL>if self.options.username is not None:<EOL><INDENT>auth = pysyncml.NAMESPACE_AUTH_BASIC<EOL>if self.options.password is None:<EOL><INDENT>self.options.password = getpass.getpass('<STR_LIT>')<EOL><DEDENT><DEDENT>adapter.peer = context.RemoteAdapter(<EOL>url      = self.options.remote,<EOL>auth     = auth,<EOL>username = self.options.username,<EOL>password = self.options.password,<EOL>)<EOL><DEDENT>self._callHooks('<STR_LIT>', context, adapter, adapter.peer)<EOL><DEDENT>uri = self.storeParams.get('<STR_LIT>', self.appLabel)<EOL>if uri in adapter.stores:<EOL><INDENT>store = adapter.stores[uri]<EOL>store.agent = self.agent<EOL><DEDENT>else:<EOL><INDENT>storeParams = dict(<EOL>uri         = uri,<EOL>displayName = self.options.name or self.appDisplay,<EOL>agent       = self.agent,<EOL>maxObjSize  = None)<EOL>if self.storeParams is not None:<EOL><INDENT>storeParams.update(self.storeParams)<EOL><DEDENT>store = adapter.addStore(context.Store(**storeParams))<EOL><DEDENT>self._callHooks('<STR_LIT>', context, adapter, store)<EOL>if self.options.local:<EOL><INDENT>def locprint(msg):<EOL><INDENT>print(msg)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>locprint = log.info<EOL><DEDENT>def showChanges(changes, prefix):<EOL><INDENT>for c in changes:<EOL><INDENT>if c.state != pysyncml.ITEM_DELETED:<EOL><INDENT>item = self.agent.getItem(c.itemID)<EOL><DEDENT>else:<EOL><INDENT>item = '<STR_LIT>' % (c.itemID,)<EOL><DEDENT>locprint('<STR_LIT>' % (prefix, item, pysyncml.state2string(c.state)))<EOL><DEDENT><DEDENT>if self.options.server:<EOL><INDENT>peers = adapter.getKnownPeers()<EOL>if len(peers) > <NUM_LIT:0>:<EOL><INDENT>locprint('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>locprint('<STR_LIT>')<EOL><DEDENT>for peer in peers:<EOL><INDENT>for puri, pstore in list(peer.stores.items()):<EOL><INDENT>if pstore.binding is None or pstore.binding.uri != store.uri:<EOL><INDENT>continue<EOL><DEDENT>changes = list(pstore.getRegisteredChanges())<EOL>if len(changes) <= <NUM_LIT:0>:<EOL><INDENT>locprint('<STR_LIT>' % (peer.devID, puri))<EOL><DEDENT>else:<EOL><INDENT>locprint('<STR_LIT>' % (peer.devID, puri))<EOL><DEDENT>showChanges(changes, '<STR_LIT:U+0020>')<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if store.peer is None:<EOL><INDENT>locprint('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>changes = list(store.peer.getRegisteredChanges())<EOL>if len(changes) <= <NUM_LIT:0>:<EOL><INDENT>locprint('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>locprint('<STR_LIT>')<EOL><DEDENT>showChanges(changes, '<STR_LIT>')<EOL><DEDENT><DEDENT>self._callHooks('<STR_LIT>', context, adapter)<EOL>return (context, adapter)<EOL>", "docstring": "Creates a tuple of ( Context, Adapter ) based on the options\nspecified by `self.options`. The Context is the pysyncml.Context created for\nthe storage location specified in `self.options`, and the Adapter is a newly\ncreated Adapter if a previously created one was not found.", "id": "f12363:c1:m10"}
{"signature": "def run(self, stdout=sys.stdout, stderr=sys.stderr):", "body": "if self.options.local or self.options.describe:<EOL><INDENT>context, adapter = self._makeAdapter()<EOL>if self.options.describe:<EOL><INDENT>self.describe(stdout)<EOL>adapter.describe(stdout)<EOL><DEDENT>self.dbsession.rollback()<EOL>return <NUM_LIT:0><EOL><DEDENT>if self.options.server:<EOL><INDENT>return self._runServer(stdout, stderr)<EOL><DEDENT>return self._runClient(stdout, stderr)<EOL>", "docstring": "Runs this SyncEngine by executing one of the following functions\n(as controlled by command-line options or stored parameters):\n\n* Display local pending changes.\n* Describe local configuration.\n* Run an HTTP server and engage server-side mode.\n* Connect to a remote SyncML peer and engage client-side mode.\n\nNOTE: when running in the first two modes, all database interactions\nare rolled back in order to keep the SyncEngine idempotent.", "id": "f12363:c1:m15"}
{"signature": "def __init__(self,<EOL>*args, **kw):", "body": "super(LocalUserSyncEngine, self).__init__(*args, **kw)<EOL>raise NotImplementedError()<EOL>", "docstring": "TODO: document & implement...", "id": "f12363:c3:m0"}
{"signature": "def configure(self, argv=None):", "body": "self._setupOptions()<EOL>self._parseOptions(argv)<EOL>self._setupLogging()<EOL>self._setupModel()<EOL>self.dbsession.commit()<EOL>return self<EOL>", "docstring": "Configures this engine based on the options array passed into\n`argv`. If `argv` is ``None``, then ``sys.argv`` is used instead.\nDuring configuration, the command line options are merged with\npreviously stored values. Then the logging subsystem and the\ndatabase model are initialized, and all storable settings are\nserialized to configurations files.", "id": "f12363:c1:m14"}
{"signature": "def tree2commands(self, adapter, session, lastcmds, xsync):", "body": "<EOL>assert xsync.tag == constants.NODE_SYNCML<EOL>assert len(xsync) == <NUM_LIT:2><EOL>assert xsync[<NUM_LIT:0>].tag == constants.CMD_SYNCHDR<EOL>assert xsync[<NUM_LIT:1>].tag == constants.NODE_SYNCBODY<EOL>version = xsync[<NUM_LIT:0>].findtext('<STR_LIT>')<EOL>if version != constants.SYNCML_VERSION_1_2:<EOL><INDENT>raise common.FeatureNotSupported('<STR_LIT>'% (version, constants.SYNCML_VERSION_1_2))<EOL><DEDENT>verdtd = xsync[<NUM_LIT:0>].findtext('<STR_LIT>')<EOL>if verdtd != constants.SYNCML_DTD_VERSION_1_2:<EOL><INDENT>raise common.FeatureNotSupported('<STR_LIT>'% (verdtd, constants.SYNCML_DTD_VERSION_1_2))<EOL><DEDENT>ret = self.initialize(adapter, session, xsync)<EOL>hdrcmd = ret[<NUM_LIT:0>]<EOL>if session.isServer:<EOL><INDENT>log.debug('<STR_LIT>',<EOL>hdrcmd.target, hdrcmd.sessionID, hdrcmd.msgID)<EOL><DEDENT>else:<EOL><INDENT>log.debug('<STR_LIT>',<EOL>lastcmds[<NUM_LIT:0>].target, lastcmds[<NUM_LIT:0>].sessionID, lastcmds[<NUM_LIT:0>].msgID)<EOL><DEDENT>try:<EOL><INDENT>return self._tree2commands(adapter, session, lastcmds, xsync, ret)<EOL><DEDENT>except Exception as e:<EOL><INDENT>if not session.isServer:<EOL><INDENT>raise<EOL><DEDENT>code = '<STR_LIT>' % (e.__class__.__module__, e.__class__.__name__)<EOL>msg  = '<STR_LIT>'.join(traceback.format_exception_only(type(e), e)).strip()<EOL>log.exception('<STR_LIT>', msg)<EOL>return [<EOL>hdrcmd,<EOL>state.Command(<EOL>name       = constants.CMD_STATUS,<EOL>cmdID      = '<STR_LIT:1>',<EOL>msgRef     = session.pendingMsgID,<EOL>cmdRef     = <NUM_LIT:0>,<EOL>sourceRef  = xsync[<NUM_LIT:0>].findtext('<STR_LIT>'),<EOL>targetRef  = xsync[<NUM_LIT:0>].findtext('<STR_LIT>'),<EOL>statusOf   = constants.CMD_SYNCHDR,<EOL>statusCode = constants.STATUS_COMMAND_FAILED,<EOL>errorCode  = code,<EOL>errorMsg   = msg,<EOL>errorTrace = '<STR_LIT>'.join(traceback.format_exception(type(e), e, sys.exc_info()[<NUM_LIT:2>])),<EOL>),<EOL>state.Command(name=constants.CMD_FINAL)]<EOL><DEDENT>", "docstring": "Consumes an ET protocol tree and converts it to state.Command commands", "id": "f12372:c0:m6"}
{"signature": "def page_jump(self, count):", "body": "for i in range(count):<EOL><INDENT>self.get_data()<EOL><DEDENT>", "docstring": "Page through data quickly. Used to resume failed job or jump to another\npage\n:param count: The number of pages to iterate over", "id": "f12376:c1:m4"}
{"signature": "def get_data(self):", "body": "pass<EOL>", "docstring": "Obtain the data to iterate over from the API\n:return:", "id": "f12376:c1:m5"}
{"signature": "def page_posts(self, page_id, after='<STR_LIT>', post_type=\"<STR_LIT>\",<EOL>include_hidden=False, fields=None, **params):", "body": "if fields:<EOL><INDENT>fields = \"<STR_LIT:U+002C>\".join(fields)<EOL><DEDENT>parameters = {\"<STR_LIT>\": self.key,<EOL>\"<STR_LIT>\": after,<EOL>\"<STR_LIT>\": fields,<EOL>\"<STR_LIT>\": include_hidden}<EOL>parameters = self.merge_params(parameters, params)<EOL>return self.api_call('<STR_LIT>' % (page_id, post_type), parameters)<EOL>", "docstring": ":param page_id:\n:param after:\n:param post_type: Can be 'posts', 'feed', 'tagged', 'promotable_posts'\n:param include_hidden:\n:param fields:\n:param params:\n:return:", "id": "f12377:c3:m4"}
{"signature": "def post(self, post_id, fields=None, **params):", "body": "if fields:<EOL><INDENT>fields = \"<STR_LIT:U+002C>\".join(fields)<EOL><DEDENT>parameters = {\"<STR_LIT>\": fields,<EOL>\"<STR_LIT>\": self.key}<EOL>parameters = self.merge_params(parameters, params)<EOL>return self.api_call('<STR_LIT:%s>' % post_id, parameters)<EOL>", "docstring": ":param post_id:\n:param fields:\n:param params:\n:return:", "id": "f12377:c3:m3"}
{"signature": "def get(self, *args, **kwargs):", "body": "try:<EOL><INDENT>req_func = self.session.get if self.session else requests.get<EOL>req = req_func(*args, **kwargs)<EOL>req.raise_for_status()<EOL>self.failed_last = False<EOL>return req<EOL><DEDENT>except requests.exceptions.RequestException as e:<EOL><INDENT>self.log_error(e)<EOL>for i in range(<NUM_LIT:1>, self.num_retries):<EOL><INDENT>sleep_time = self.retry_rate * i<EOL>self.log_function(\"<STR_LIT>\" % sleep_time)<EOL>self._sleep(sleep_time)<EOL>try:<EOL><INDENT>req = requests.get(*args, **kwargs)<EOL>req.raise_for_status()<EOL>self.log_function(\"<STR_LIT>\")<EOL>return req<EOL><DEDENT>except requests.exceptions.RequestException:<EOL><INDENT>self.log_function(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if not self.failed_last:<EOL><INDENT>self.failed_last = True<EOL>raise ApiError(e)<EOL><DEDENT>else:<EOL><INDENT>raise FatalApiError(e)<EOL><DEDENT><DEDENT>", "docstring": "An interface for get requests that handles errors more gracefully to\nprevent data loss", "id": "f12377:c0:m5"}
{"signature": "def to_csv(data, field_names=None, filename='<STR_LIT>',<EOL>overwrite=True,<EOL>write_headers=True, append=False, flat=True,<EOL>primary_fields=None, sort_fields=True):", "body": "<EOL>if not overwrite and path.isfile(filename):<EOL><INDENT>raise FileExistsError('<STR_LIT>')<EOL><DEDENT>write_type = '<STR_LIT:w>' if not append else '<STR_LIT:a>'<EOL>if flat or not field_names:<EOL><INDENT>data = [flatten(datum) for datum in data]<EOL><DEDENT>if not field_names:<EOL><INDENT>field_names, data = fill_gaps(data)<EOL><DEDENT>if sort_fields:<EOL><INDENT>field_names.sort()<EOL><DEDENT>if primary_fields:<EOL><INDENT>for key in primary_fields[::-<NUM_LIT:1>]:<EOL><INDENT>field_names.insert(<NUM_LIT:0>, field_names.pop(field_names.index(key)))<EOL><DEDENT>data = sorted(data, key=lambda k: k[field_names[<NUM_LIT:0>]], reverse=True)<EOL><DEDENT>with open(filename, write_type, encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>writer = csv.DictWriter(f, fieldnames=field_names, lineterminator='<STR_LIT:\\n>')<EOL>if not append or write_headers:<EOL><INDENT>writer.writeheader()<EOL><DEDENT>for datum in data:<EOL><INDENT>for key in list(datum.keys()):<EOL><INDENT>if key not in field_names:<EOL><INDENT>del datum[key]<EOL><DEDENT>elif type(datum[key]) is str:<EOL><INDENT>datum[key] = datum[key].strip()<EOL><DEDENT>datum[key] = str(datum[key])<EOL><DEDENT>writer.writerow(datum)<EOL><DEDENT><DEDENT>", "docstring": "DEPRECATED    Write a list of dicts to a csv file\n\n:param data: List of dicts\n:param field_names: The list column names\n:param filename: The name of the file\n:param overwrite: Overwrite the file if exists\n:param write_headers: Write the headers to the csv file\n:param append: Write new rows if the file exists\n:param flat: Flatten the dictionary before saving\n:param primary_fields: The first columns of the csv file\n:param sort_fields: Sort the field names alphabetically\n:return: None", "id": "f12379:m2"}
{"signature": "def flatten(dictionary, parent_key=False, separator='<STR_LIT:.>'):", "body": "items = []<EOL>for key, value in dictionary.items():<EOL><INDENT>new_key = str(parent_key) + separator + key if parent_key else key<EOL>if isinstance(value, collections.MutableMapping):<EOL><INDENT>items.extend(flatten(value, new_key, separator).items())<EOL><DEDENT>elif isinstance(value, list):<EOL><INDENT>for k, v in enumerate(value):<EOL><INDENT>items.extend(flatten({str(k): v}, new_key).items())<EOL><DEDENT><DEDENT>else:<EOL><INDENT>items.append((new_key, value))<EOL><DEDENT><DEDENT>return dict(items)<EOL>", "docstring": "Turn a nested dictionary into a flattened dictionary\n\n:param dictionary: The dictionary to flatten\n:param parent_key: The string to prepend to dictionary's keys\n:param separator: The string used to separate flattened keys\n:return: A flattened dictionary", "id": "f12379:m0"}
{"signature": "def to_json(data, filename='<STR_LIT>', indent=<NUM_LIT:4>):", "body": "with open(filename, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(json.dumps(data, indent=indent))<EOL><DEDENT>", "docstring": "Write an object to a json file\n\n:param data: The object\n:param filename: The name of the file\n:param indent: The indentation of the file\n:return: None", "id": "f12379:m3"}
{"signature": "def env_key(key, default):", "body": "env = key.upper().replace('<STR_LIT:.>', '<STR_LIT:_>')<EOL>return os.environ.get(env, default)<EOL>", "docstring": "Try to get `key` from the environment.\n\nThis mutates `key` to replace dots with underscores and makes it all\nuppercase.\n\n    my.database.host => MY_DATABASE_HOST", "id": "f12385:m8"}
{"signature": "def init(self, hosts=None, cacert=None, client_cert=None, client_key=None):", "body": "<EOL>try:<EOL><INDENT>import etcd<EOL>self.module = etcd<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>if not self.module:<EOL><INDENT>return<EOL><DEDENT>self._parse_jetconfig()<EOL>hosts = env('<STR_LIT>', hosts)<EOL>protocol = env('<STR_LIT>', None)<EOL>cacert = env('<STR_LIT>', cacert)<EOL>client_cert = env('<STR_LIT>', client_cert)<EOL>client_key = env('<STR_LIT>', client_key)<EOL>username = None<EOL>password = None<EOL>auth = env('<STR_LIT>', None)<EOL>if auth:<EOL><INDENT>auth = auth.split('<STR_LIT::>')<EOL>auth.append('<STR_LIT>')<EOL>username = auth[<NUM_LIT:0>]<EOL>password = auth[<NUM_LIT:1>]<EOL><DEDENT>hosts = self._parse_hosts(hosts)<EOL>if hosts is None:<EOL><INDENT>return<EOL><DEDENT>kw = {}<EOL>kw['<STR_LIT>'] = True<EOL>if protocol:<EOL><INDENT>kw['<STR_LIT>'] = protocol<EOL><DEDENT>if username:<EOL><INDENT>kw['<STR_LIT:username>'] = username<EOL><DEDENT>if password:<EOL><INDENT>kw['<STR_LIT:password>'] = password<EOL><DEDENT>if cacert:<EOL><INDENT>kw['<STR_LIT>'] = os.path.abspath(cacert)<EOL><DEDENT>if client_cert and client_key:<EOL><INDENT>kw['<STR_LIT>'] = ((os.path.abspath(client_cert),<EOL>os.path.abspath(client_key)))<EOL><DEDENT>elif client_cert:<EOL><INDENT>kw['<STR_LIT>'] = os.path.abspath(client_cert)<EOL><DEDENT>if cacert or client_cert or client_key:<EOL><INDENT>kw['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>self.client = self.module.Client(hosts, **kw)<EOL>", "docstring": "Handle creating the new etcd client instance and other business.\n\n:param hosts: Host string or list of hosts (default: `'127.0.0.1:2379'`)\n:param cacert: CA cert filename (optional)\n:param client_cert: Client cert filename (optional)\n:param client_key: Client key filename (optional)\n:type ca: str\n:type cert: str\n:type key: str", "id": "f12385:c2:m2"}
{"signature": "def get_watcher(self):", "body": "if not self.watching:<EOL><INDENT>raise StopIteration()<EOL><DEDENT>return self.client.eternal_watch(self.prefix, recursive=True)<EOL>", "docstring": "Return a etcd watching generator which yields events as they happen.", "id": "f12385:c2:m4"}
{"signature": "def reload(self, clear=False):", "body": "log.info(\"<STR_LIT>\")<EOL>self.load(clear)<EOL>", "docstring": "Reloads the configuration.", "id": "f12385:c1:m6"}
{"signature": "def add_reload_hook(self, hook):", "body": "self.reload_hooks.append(hook)<EOL>", "docstring": "Registers a reload hook that's called when :meth:`load` is called.\n\n            :param function hook: Hook to register.", "id": "f12385:c1:m7"}
{"signature": "def setting(name, default=None, allow_default=True):", "body": "return Setting(name, default, allow_default)<EOL>", "docstring": "Shortcut method for getting a setting descriptor.\n\n        See :class:`pyconfig.Setting` for details.", "id": "f12385:m1"}
{"signature": "def reload_hook(func):", "body": "Config().add_reload_hook(func)<EOL>return func<EOL>", "docstring": "Decorator for registering a reload hook.", "id": "f12385:m4"}
{"signature": "def load(self, clear=False):", "body": "if clear:<EOL><INDENT>self.settings = {}<EOL><DEDENT>defer = []<EOL>for conf in pkg_resources.iter_entry_points('<STR_LIT>'):<EOL><INDENT>if conf.attrs:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>mod_name = conf.module_name<EOL>base_name = conf.name if conf.name != '<STR_LIT>' else None<EOL>log.info(\"<STR_LIT>\", mod_name)<EOL>mod_dict = runpy.run_module(mod_name)<EOL>if mod_dict.get('<STR_LIT>', None) is deferred:<EOL><INDENT>log.info(\"<STR_LIT>\", mod_name)<EOL>mod_dict.pop('<STR_LIT>')<EOL>defer.append((mod_name, base_name, mod_dict))<EOL>continue<EOL><DEDENT>self._update(mod_dict, base_name)<EOL><DEDENT>for mod_name, base_name, mod_dict in defer:<EOL><INDENT>log.info(\"<STR_LIT>\", mod_name)<EOL>self._update(mod_dict, base_name)<EOL><DEDENT>if etcd().configured:<EOL><INDENT>mod_dict = etcd().load()<EOL>if mod_dict:<EOL><INDENT>self._update(mod_dict)<EOL><DEDENT><DEDENT>mod_dict = None<EOL>try:<EOL><INDENT>mod_dict = runpy.run_module('<STR_LIT>')<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>except ValueError as err:<EOL><INDENT>if getattr(err, '<STR_LIT:message>') != '<STR_LIT>':<EOL><INDENT>raise<EOL><DEDENT>mod_name = '<STR_LIT>'<EOL>if sys.version_info < (<NUM_LIT:2>, <NUM_LIT:7>):<EOL><INDENT>loader, code, fname = runpy._get_module_details(mod_name)<EOL><DEDENT>else:<EOL><INDENT>_, loader, code, fname = runpy._get_module_details(mod_name)<EOL><DEDENT>mod_dict = runpy._run_code(code, {}, {}, mod_name, fname, loader,<EOL>pkg_name=None)<EOL><DEDENT>if mod_dict:<EOL><INDENT>log.info(\"<STR_LIT>\")<EOL>self._update(mod_dict)<EOL><DEDENT>self.call_reload_hooks()<EOL>", "docstring": "Loads all the config plugin modules to build a working configuration.\n\nIf there is a ``localconfig`` module on the python path, it will be\nloaded last, overriding other settings.\n\n:param bool clear: Clear out the previous settings before loading", "id": "f12385:c1:m3"}
{"signature": "def clear(self):", "body": "self.settings = {}<EOL>", "docstring": "Clears all the cached configuration.", "id": "f12385:c1:m8"}
{"signature": "def start_watching(self):", "body": "<EOL>if self.watcher and self.watcher.is_alive():<EOL><INDENT>return<EOL><DEDENT>self.watcher = Watcher()<EOL>self.watcher.start()<EOL>", "docstring": "Begins watching etcd for changes.", "id": "f12385:c2:m5"}
{"signature": "def load(self, prefix=None, depth=None):", "body": "prefix = prefix or self.prefix<EOL>prefix = '<STR_LIT:/>' + prefix.strip('<STR_LIT:/>') + '<STR_LIT:/>'<EOL>if depth is None:<EOL><INDENT>depth = self.inherit_depth<EOL><DEDENT>if not self.configured:<EOL><INDENT>log.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if self.watching:<EOL><INDENT>log.info(\"<STR_LIT>\", prefix)<EOL>self.start_watching()<EOL><DEDENT>log.info(\"<STR_LIT>\", prefix)<EOL>try:<EOL><INDENT>result = self.client.get(prefix)<EOL><DEDENT>except self.module.EtcdKeyNotFound:<EOL><INDENT>result = None<EOL><DEDENT>if not result:<EOL><INDENT>log.info(\"<STR_LIT>\")<EOL>return {}<EOL><DEDENT>update = {}<EOL>for item in result.children:<EOL><INDENT>key = item.key<EOL>value = item.value<EOL>try:<EOL><INDENT>value = pytool.json.from_json(value)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>if not self.case_sensitive:<EOL><INDENT>key = key.lower()<EOL><DEDENT>if key.startswith(prefix):<EOL><INDENT>key = key[len(prefix):]<EOL><DEDENT>update[key] = value<EOL><DEDENT>inherited = Config().settings.get(self.inherit_key,<EOL>update.get(self.inherit_key, None))<EOL>if depth > <NUM_LIT:0> and inherited:<EOL><INDENT>log.info(\"<STR_LIT>\")<EOL>inherited = self.load(inherited, depth - <NUM_LIT:1>) or {}<EOL>inherited.update(update)<EOL>update = inherited<EOL><DEDENT>return update<EOL>", "docstring": "Return a dictionary of settings loaded from etcd.", "id": "f12385:c2:m3"}
{"signature": "def env(key, default):", "body": "value = os.environ.get(key, None)<EOL>if value is not None:<EOL><INDENT>log.info('<STR_LIT>', key.lower().replace('<STR_LIT:_>', '<STR_LIT:.>'), value)<EOL>return value<EOL><DEDENT>key = key.lower().replace('<STR_LIT:_>', '<STR_LIT:.>')<EOL>value = get(key)<EOL>if value is not None:<EOL><INDENT>return value<EOL><DEDENT>return default<EOL>", "docstring": "Helper to try to get a setting from the environment, or pyconfig, or\nfinally use a provided default.", "id": "f12385:m7"}
{"signature": "def set(self, name, value):", "body": "if not self.settings.get('<STR_LIT>', False):<EOL><INDENT>name = name.lower()<EOL><DEDENT>log.info(\"<STR_LIT>\", name, repr(value))<EOL>with self.mut_lock:<EOL><INDENT>self.settings[name] = value<EOL><DEDENT>", "docstring": "Changes a setting value.\n\n            This implements a locking mechanism to ensure some level of thread\n            safety.\n\n            :param str name: Setting key name.\n            :param value: Setting value.", "id": "f12385:c1:m1"}
{"signature": "def _default_value_only(self):", "body": "line = self.source[self.col_offset:]<EOL>regex = re.compile('''<STR_LIT>''')<EOL>match = regex.match(line)<EOL>if not match:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return match.group(<NUM_LIT:1>)<EOL>", "docstring": "Return only the default value, if there is one.", "id": "f12386:c2:m7"}
{"signature": "def as_live(self):", "body": "key = self.get_key()<EOL>default = pyconfig.get(key)<EOL>if default:<EOL><INDENT>default = repr(default)<EOL><DEDENT>else:<EOL><INDENT>default = self._default() or NotSet()<EOL><DEDENT>return \"<STR_LIT>\" % (key, default)<EOL>", "docstring": "Return this call as if it were being assigned in a pyconfig namespace,\nbut load the actual value currently available in pyconfig.", "id": "f12386:c2:m2"}
{"signature": "def _output(calls, args):", "body": "<EOL>if args.natural_sort or args.source:<EOL><INDENT>calls = sorted(calls, key=lambda c: (c.filename, c.lineno))<EOL><DEDENT>else:<EOL><INDENT>calls = sorted(calls, key=lambda c: c.key)<EOL><DEDENT>out = []<EOL>if args.only_keys:<EOL><INDENT>keys = set()<EOL>for call in calls:<EOL><INDENT>if call.key in keys:<EOL><INDENT>continue<EOL><DEDENT>out.append(_format_call(call, args))<EOL>keys.add(call.key)<EOL><DEDENT>out = '<STR_LIT:\\n>'.join(out)<EOL>if args.color:<EOL><INDENT>out = _colorize(out)<EOL><DEDENT>print(out, end='<STR_LIT:U+0020>')<EOL>return<EOL><DEDENT>keys = set()<EOL>for call in calls:<EOL><INDENT>if call.default:<EOL><INDENT>keys.add(call.key)<EOL><DEDENT><DEDENT>for call in calls:<EOL><INDENT>if not args.all and not call.default and call.key in keys:<EOL><INDENT>continue<EOL><DEDENT>out.append(_format_call(call, args))<EOL><DEDENT>out = '<STR_LIT:\\n>'.join(out)<EOL>if args.color:<EOL><INDENT>out = _colorize(out)<EOL><DEDENT>print(out, end='<STR_LIT:U+0020>')<EOL>", "docstring": "Outputs `calls`.\n\n:param calls: List of :class:`_PyconfigCall` instances\n:param args: :class:`~argparse.ArgumentParser` instance\n:type calls: list\n:type args: argparse.ArgumentParser", "id": "f12386:m6"}
{"signature": "def _source_call_only(self):", "body": "line = self.source[self.col_offset:]<EOL>regex = re.compile('''<STR_LIT>''')<EOL>match = regex.match(line)<EOL>if not match:<EOL><INDENT>return self.source<EOL><DEDENT>return match.group(<NUM_LIT:1>)<EOL>", "docstring": "Return the source line stripped down to just the pyconfig call.", "id": "f12386:c2:m6"}
{"signature": "def _colorize(output):", "body": "if not pygments:<EOL><INDENT>return output<EOL><DEDENT>return pygments.highlight(output,<EOL>pygments.lexers.PythonLexer(),<EOL>pygments.formatters.Terminal256Formatter(style='<STR_LIT>'))<EOL>", "docstring": "Return `output` colorized with Pygments, if available.", "id": "f12386:m8"}
{"signature": "def annotation(self):", "body": "if not self.source:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>return \"<STR_LIT>\" % (self.filename, self.lineno)<EOL>", "docstring": "Return this call's source annotation.", "id": "f12386:c2:m4"}
{"signature": "def _error(msg, *args):", "body": "print(msg % args, file=sys.stderr)<EOL>sys.exit(<NUM_LIT:1>)<EOL>", "docstring": "Print an error message and exit.\n\n:param msg: A message to print\n:type msg: str", "id": "f12386:m3"}
{"signature": "def _format_call(call, args):", "body": "out = '<STR_LIT>'<EOL>if args.source:<EOL><INDENT>out += call.annotation() + '<STR_LIT:\\n>'<EOL><DEDENT>if args.only_keys:<EOL><INDENT>out += call.get_key()<EOL>return out<EOL><DEDENT>if args.view_call:<EOL><INDENT>out += call.as_call()<EOL><DEDENT>elif args.load_configs:<EOL><INDENT>out += call.as_live()<EOL><DEDENT>else:<EOL><INDENT>out += call.as_namespace()<EOL><DEDENT>return out<EOL>", "docstring": "Return `call` formatted appropriately for `args`.\n\n:param call: A pyconfig call object\n:param args: Arguments from the command\n:type call: :class:`_PyconfigCall`", "id": "f12386:m7"}
{"signature": "def as_namespace(self, namespace=None):", "body": "key = self.key<EOL>if namespace and key.startswith(namespace):<EOL><INDENT>key = key[len(namespace) + <NUM_LIT:1>:]<EOL><DEDENT>return \"<STR_LIT>\" % (self.get_key(), self._default() or NotSet())<EOL>", "docstring": "Return this call as if it were being assigned in a pyconfig namespace.\n\nIf `namespace` is specified and matches the top level of this call's\n:attr:`key`, then that section of the key will be removed.", "id": "f12386:c2:m1"}
{"signature": "def _parse_and_output(filename, args):", "body": "relpath = os.path.dirname(filename)<EOL>if os.path.isfile(filename):<EOL><INDENT>calls = _parse_file(filename, relpath)<EOL><DEDENT>elif os.path.isdir(filename):<EOL><INDENT>calls = _parse_dir(filename, relpath)<EOL><DEDENT>else:<EOL><INDENT>_error(\"<STR_LIT>\", filename)<EOL><DEDENT>if not calls:<EOL><INDENT>_error(\"<STR_LIT>\")<EOL><DEDENT>if args.load_configs:<EOL><INDENT>keys = set()<EOL>for call in calls:<EOL><INDENT>keys.add(call.key)<EOL><DEDENT>conf = pyconfig.Config()<EOL>for key, value in conf.settings.items():<EOL><INDENT>if key in keys:<EOL><INDENT>continue<EOL><DEDENT>calls.append(_PyconfigCall('<STR_LIT>', key, value, [None]*<NUM_LIT:4>))<EOL><DEDENT><DEDENT>_output(calls, args)<EOL>", "docstring": "Parse `filename` appropriately and then output calls according to the\n`args` specified.\n\n:param filename: A file or directory\n:param args: Command arguments\n:type filename: str", "id": "f12386:m5"}
{"signature": "def main():", "body": "parser = argparse.ArgumentParser(description=\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>target_group = parser.add_mutually_exclusive_group()<EOL>target_group.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=\"<STR_LIT>\",<EOL>metavar='<STR_LIT:F>')<EOL>target_group.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=\"<STR_LIT>\",<EOL>metavar='<STR_LIT:M>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=\"<STR_LIT>\",<EOL>action='<STR_LIT:store_true>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=\"<STR_LIT>\",<EOL>action='<STR_LIT:store_true>')<EOL>key_group = parser.add_mutually_exclusive_group()<EOL>key_group.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=\"<STR_LIT>\",<EOL>action='<STR_LIT:store_true>')<EOL>key_group.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=\"<STR_LIT>\",<EOL>action='<STR_LIT:store_true>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=\"<STR_LIT>\",<EOL>action='<STR_LIT:store_true>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>',<EOL>help=\"<STR_LIT>\",<EOL>action='<STR_LIT:store_true>')<EOL>parser.add_argument('<STR_LIT:-c>', '<STR_LIT>',<EOL>help=\"<STR_LIT>\" % bool(pygments),<EOL>action='<STR_LIT>', default=bool(pygments),<EOL>const=(not bool(pygments)))<EOL>args = parser.parse_args()<EOL>if args.color and not pygments:<EOL><INDENT>_error(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if args.module:<EOL><INDENT>_handle_module(args)<EOL><DEDENT>if args.filename:<EOL><INDENT>_handle_file(args)<EOL><DEDENT>", "docstring": "Main script for `pyconfig` command.", "id": "f12386:m0"}
{"signature": "def _receive_until(self, s):", "body": "return self._socket.recv_until(s)<EOL>", "docstring": "Recieve data from the socket until the given substring is observed.\nData in the same datagram as the substring, following the substring,\nwill not be returned and will be cached for future receives.", "id": "f12396:c0:m4"}
{"signature": "def set_parameter(self, key, value):", "body": "if value is None or isinstance(value, (int, float, bool)):<EOL><INDENT>value = str(value)<EOL><DEDENT>if key.endswith('<STR_LIT>'):<EOL><INDENT>value = urlsafe_b64encode(value.encode('<STR_LIT:utf-8>'))<EOL>value = value.replace(b('<STR_LIT:=>'), b('<STR_LIT>'))<EOL><DEDENT>self._parameters[key] = value<EOL>", "docstring": "Set a url parameter.\n\nParameters\n----------\nkey : str\n    If key ends with '64', the value provided will be automatically\n    base64 encoded.", "id": "f12406:c0:m2"}
{"signature": "def create_url(self, path, params={}, opts={}):", "body": "if opts:<EOL><INDENT>warnings.warn('<STR_LIT>',<EOL>DeprecationWarning, stacklevel=<NUM_LIT:2>)<EOL><DEDENT>params = params or opts<EOL>if self._shard_strategy == SHARD_STRATEGY_CRC:<EOL><INDENT>crc = zlib.crc32(path.encode('<STR_LIT:utf-8>')) & <NUM_LIT><EOL>index = crc % len(self._domains)  <EOL>domain = self._domains[index]<EOL><DEDENT>elif self._shard_strategy == SHARD_STRATEGY_CYCLE:<EOL><INDENT>domain = self._domains[self._shard_next_index]<EOL>self._shard_next_index = (<EOL>self._shard_next_index + <NUM_LIT:1>) % len(self._domains)<EOL><DEDENT>else:<EOL><INDENT>domain = self._domains[<NUM_LIT:0>]<EOL><DEDENT>scheme = \"<STR_LIT>\" if self._use_https else \"<STR_LIT:http>\"<EOL>url_obj = UrlHelper(<EOL>domain,<EOL>path,<EOL>scheme,<EOL>sign_key=self._sign_key,<EOL>include_library_param=self._include_library_param,<EOL>params=params)<EOL>return str(url_obj)<EOL>", "docstring": "Create URL with supplied path and `opts` parameters dict.\n\nParameters\n----------\npath : str\nopts : dict\n    Dictionary specifying URL parameters. Non-imgix parameters are\n    added to the URL unprocessed. For a complete list of imgix\n    supported parameters, visit https://docs.imgix.com/apis/url .\n    (default {})\n\nReturns\n-------\nstr\n    imgix URL", "id": "f12407:c0:m2"}
{"signature": "def read_file_header(fd, endian):", "body": "fields = [<EOL>('<STR_LIT:description>', '<STR_LIT:s>', <NUM_LIT>),<EOL>('<STR_LIT>', '<STR_LIT:s>', <NUM_LIT:8>),<EOL>('<STR_LIT:version>', '<STR_LIT:H>', <NUM_LIT:2>),<EOL>('<STR_LIT>', '<STR_LIT:s>', <NUM_LIT:2>)<EOL>]<EOL>hdict = {}<EOL>for name, fmt, num_bytes in fields:<EOL><INDENT>data = fd.read(num_bytes)<EOL>hdict[name] = unpack(endian, fmt, data)<EOL><DEDENT>hdict['<STR_LIT:description>'] = hdict['<STR_LIT:description>'].strip()<EOL>v_major = hdict['<STR_LIT:version>'] >> <NUM_LIT:8><EOL>v_minor = hdict['<STR_LIT:version>'] & <NUM_LIT><EOL>hdict['<STR_LIT>'] = '<STR_LIT>' % (v_major, v_minor)<EOL>return hdict<EOL>", "docstring": "Read mat 5 file header of the file fd.\n    Returns a dict with header values.", "id": "f12409:m2"}
{"signature": "def unpack(endian, fmt, data):", "body": "if fmt == '<STR_LIT:s>':<EOL><INDENT>val = struct.unpack('<STR_LIT>'.join([endian, str(len(data)), '<STR_LIT:s>']),<EOL>data)[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>num = len(data) // struct.calcsize(fmt)<EOL>val = struct.unpack('<STR_LIT>'.join([endian, str(num), fmt]), data)<EOL>if len(val) == <NUM_LIT:1>:<EOL><INDENT>val = val[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return val<EOL>", "docstring": "Unpack a byte string to the given format. If the byte string\n    contains more bytes than required for the given format, the function\n    returns a tuple of values.", "id": "f12409:m1"}
{"signature": "def read_var_header(fd, endian):", "body": "mtpn, num_bytes = unpack(endian, '<STR_LIT>', fd.read(<NUM_LIT:8>))<EOL>next_pos = fd.tell() + num_bytes<EOL>if mtpn == etypes['<STR_LIT>']['<STR_LIT:n>']:<EOL><INDENT>data = fd.read(num_bytes)<EOL>dcor = zlib.decompressobj()<EOL>fd_var = BytesIO(dcor.decompress(data))<EOL>del data<EOL>fd = fd_var<EOL>if dcor.flush() != b'<STR_LIT>':<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>mtpn, num_bytes = unpack(endian, '<STR_LIT>', fd.read(<NUM_LIT:8>))<EOL><DEDENT>if mtpn != etypes['<STR_LIT>']['<STR_LIT:n>']:<EOL><INDENT>raise ParseError('<STR_LIT>'<EOL>'<STR_LIT>'.format(etypes['<STR_LIT>']['<STR_LIT:n>'], mtpn))<EOL><DEDENT>header = read_header(fd, endian)<EOL>return header, next_pos, fd<EOL>", "docstring": "Read full header tag.\n\n    Return a dict with the parsed header, the file position of next tag,\n    a file like object for reading the uncompressed element data.", "id": "f12409:m6"}
{"signature": "def read_numeric_array(fd, endian, header, data_etypes):", "body": "if header['<STR_LIT>']:<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>data = read_elements(fd, endian, data_etypes)<EOL>if not isinstance(data, Sequence):<EOL><INDENT>return data<EOL><DEDENT>rowcount = header['<STR_LIT>'][<NUM_LIT:0>]<EOL>colcount = header['<STR_LIT>'][<NUM_LIT:1>]<EOL>array = [list(data[c * rowcount + r] for c in range(colcount))<EOL>for r in range(rowcount)]<EOL>return squeeze(array)<EOL>", "docstring": "Read a numeric matrix.\n    Returns an array with rows of the numeric matrix.", "id": "f12409:m8"}
{"signature": "def loadmat(filename, meta=False):", "body": "if isinstance(filename, basestring):<EOL><INDENT>fd = open(filename, '<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>fd = filename<EOL><DEDENT>fd.seek(<NUM_LIT>)<EOL>tst_str = fd.read(<NUM_LIT:4>)<EOL>little_endian = (tst_str[<NUM_LIT:2>:<NUM_LIT:4>] == b'<STR_LIT>')<EOL>endian = '<STR_LIT>'<EOL>if (sys.byteorder == '<STR_LIT>' and little_endian) or(sys.byteorder == '<STR_LIT>' and not little_endian):<EOL><INDENT>pass<EOL><DEDENT>elif sys.byteorder == '<STR_LIT>':<EOL><INDENT>endian = '<STR_LIT:>>'<EOL><DEDENT>else:<EOL><INDENT>endian = '<STR_LIT:<>'<EOL><DEDENT>maj_ind = int(little_endian)<EOL>maj_val = ord(tst_str[maj_ind]) if ispy2 else tst_str[maj_ind]<EOL>if maj_val != <NUM_LIT:1>:<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>mdict = {}<EOL>if meta:<EOL><INDENT>fd.seek(<NUM_LIT:0>)<EOL>mdict['<STR_LIT>'] = read_file_header(fd, endian)<EOL>mdict['<STR_LIT>'] = []<EOL><DEDENT>while not eof(fd):<EOL><INDENT>hdr, next_position, fd_var = read_var_header(fd, endian)<EOL>name = hdr['<STR_LIT:name>']<EOL>if name in mdict:<EOL><INDENT>raise ParseError('<STR_LIT>'<EOL>.format(name))<EOL><DEDENT>mdict[name] = read_var_array(fd_var, endian, hdr)<EOL>if meta and hdr['<STR_LIT>']:<EOL><INDENT>mdict['<STR_LIT>'].append(name)<EOL><DEDENT>fd.seek(next_position)<EOL><DEDENT>fd.close()<EOL>return mdict<EOL>", "docstring": "Load data from MAT-file:\n\n    data = loadmat(filename, meta=False)\n\n    The filename argument is either a string with the filename, or\n    a file like object.\n\n    The returned parameter ``data`` is a dict with the variables found\n    in the MAT file.\n\n    Call ``loadmat`` with parameter meta=True to include meta data, such\n    as file header information and list of globals.\n\n    A ``ParseError`` exception is raised if the MAT-file is corrupt or\n    contains a data type that cannot be parsed.", "id": "f12409:m14"}
{"signature": "def read_var_array(fd, endian, header):", "body": "mc = inv_mclasses[header['<STR_LIT>']]<EOL>if mc in numeric_class_etypes:<EOL><INDENT>return read_numeric_array(<EOL>fd, endian, header,<EOL>set(compressed_numeric).union([numeric_class_etypes[mc]])<EOL>)<EOL><DEDENT>elif mc == '<STR_LIT>':<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>elif mc == '<STR_LIT>':<EOL><INDENT>return read_char_array(fd, endian, header)<EOL><DEDENT>elif mc == '<STR_LIT>':<EOL><INDENT>return read_cell_array(fd, endian, header)<EOL><DEDENT>elif mc == '<STR_LIT>':<EOL><INDENT>return read_struct_array(fd, endian, header)<EOL><DEDENT>elif mc == '<STR_LIT>':<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>elif mc == '<STR_LIT>':<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>elif mc == '<STR_LIT>':<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Read variable array (of any supported type).", "id": "f12409:m12"}
{"signature": "def squeeze(array):", "body": "if len(array) == <NUM_LIT:1>:<EOL><INDENT>array = array[<NUM_LIT:0>]<EOL><DEDENT>return array<EOL>", "docstring": "Return array contents if array contains only one element.\n    Otherwise, return the full array.", "id": "f12409:m7"}
{"signature": "def read_header(fd, endian):", "body": "flag_class, nzmax = read_elements(fd, endian, ['<STR_LIT>'])<EOL>header = {<EOL>'<STR_LIT>': flag_class & <NUM_LIT>,<EOL>'<STR_LIT>': (flag_class >> <NUM_LIT:9> & <NUM_LIT:1>) == <NUM_LIT:1>,<EOL>'<STR_LIT>': (flag_class >> <NUM_LIT:10> & <NUM_LIT:1>) == <NUM_LIT:1>,<EOL>'<STR_LIT>': (flag_class >> <NUM_LIT:11> & <NUM_LIT:1>) == <NUM_LIT:1>,<EOL>'<STR_LIT>': nzmax<EOL>}<EOL>header['<STR_LIT>'] = read_elements(fd, endian, ['<STR_LIT>'])<EOL>header['<STR_LIT>'] = len(header['<STR_LIT>'])<EOL>if header['<STR_LIT>'] != <NUM_LIT:2>:<EOL><INDENT>raise ParseError('<STR_LIT>')<EOL><DEDENT>header['<STR_LIT:name>'] = read_elements(fd, endian, ['<STR_LIT>'], is_name=True)<EOL>return header<EOL>", "docstring": "Read and return the matrix header.", "id": "f12409:m5"}
{"signature": "def guess_header(array, name='<STR_LIT>'):", "body": "header = {}<EOL>if isinstance(array, Sequence) and len(array) == <NUM_LIT:1>:<EOL><INDENT>array = array[<NUM_LIT:0>]<EOL><DEDENT>if isinstance(array, basestring):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (<NUM_LIT:1> if len(array) > <NUM_LIT:0> else <NUM_LIT:0>, len(array))})<EOL><DEDENT>elif isinstance(array, Sequence) and len(array) == <NUM_LIT:0>:<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>', '<STR_LIT>': (<NUM_LIT:0>, <NUM_LIT:0>)})<EOL><DEDENT>elif isinstance(array, Mapping):<EOL><INDENT>field_types = [type(j) for j in array.values()]<EOL>field_lengths = [<NUM_LIT:1> if isinstance(j, (basestring, int, float))<EOL>else len(j) for j in array.values()]<EOL>if len(field_lengths) == <NUM_LIT:1>:<EOL><INDENT>equal_lengths = True<EOL>equal_types = True<EOL><DEDENT>else:<EOL><INDENT>equal_lengths = not any(diff(field_lengths))<EOL>equal_types = all([field_types[<NUM_LIT:0>] == f for f in field_types])<EOL><DEDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (<EOL><NUM_LIT:1>,<EOL>field_lengths[<NUM_LIT:0>] if equal_lengths and equal_types else <NUM_LIT:1>)}<EOL>)<EOL><DEDENT>elif isinstance(array, int):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>', '<STR_LIT>': (<NUM_LIT:1>, <NUM_LIT:1>)})<EOL><DEDENT>elif isinstance(array, float):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>', '<STR_LIT>': (<NUM_LIT:1>, <NUM_LIT:1>)})<EOL><DEDENT>elif isinstance(array, Sequence):<EOL><INDENT>if isarray(array, lambda i: isinstance(i, int), <NUM_LIT:1>):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (<NUM_LIT:1>, len(array))})<EOL><DEDENT>elif isarray(array, lambda i: isinstance(i, (int, float)), <NUM_LIT:1>):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (<NUM_LIT:1>, len(array))})<EOL><DEDENT>elif (isarray(array, lambda i: isinstance(i, Sequence), <NUM_LIT:1>) and<EOL>any(diff(len(s) for s in array))):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (<NUM_LIT:1>, len(array))<EOL>})<EOL><DEDENT>elif isarray(array, lambda i: isinstance(i, basestring), <NUM_LIT:1>):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (len(array), len(array[<NUM_LIT:0>]))})<EOL><DEDENT>elif isarray(array, lambda i: isinstance(i, Sequence), <NUM_LIT:1>):<EOL><INDENT>if any(diff(len(j) for j in array)):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (len(array), len(array[<NUM_LIT:0>]))})<EOL><DEDENT>elif isarray(array, lambda i: isinstance(i, int)):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (len(array), len(array[<NUM_LIT:0>]))})<EOL><DEDENT>elif isarray(array, lambda i: isinstance(i, (int, float))):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (len(array), len(array[<NUM_LIT:0>]))})<EOL><DEDENT><DEDENT>elif isarray(array, lambda i: isinstance(<EOL>i, (int, float, basestring, Sequence, Mapping))):<EOL><INDENT>header.update({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': (<NUM_LIT:1>, len(array))})<EOL><DEDENT><DEDENT>if not header:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>header['<STR_LIT:name>'] = name<EOL>return header, array<EOL>", "docstring": "Guess the array header information.\n    Returns a header dict, with class, data type, and size information.", "id": "f12410:m12"}
{"signature": "def write_var_data(fd, data):", "body": "<EOL>fd.write(struct.pack('<STR_LIT>', etypes['<STR_LIT>']['<STR_LIT:n>'], len(data)))<EOL>fd.write(data)<EOL>", "docstring": "Write variable data to file", "id": "f12410:m4"}
{"signature": "def p_expression_p(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:2>]<EOL>", "docstring": "expression             : t_popen expression t_pclose", "id": "f12427:c2:m76"}
{"signature": "def p_margument_list_aux(self, p):", "body": "p[<NUM_LIT:1>].extend(list(p)[<NUM_LIT:2>:])<EOL>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "mixin_kwarg_arg_list       : mixin_kwarg_arg_list argument", "id": "f12427:c2:m32"}
{"signature": "def p_filter_group_aux(self, p):", "body": "p[<NUM_LIT:1>].extend(p[<NUM_LIT:2>])<EOL>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "filter_group              : filter_group filter", "id": "f12427:c2:m65"}
{"signature": "def p_string_part_list_aux(self, p):", "body": "p[<NUM_LIT:1>].extend([p[<NUM_LIT:2>]])<EOL>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "string_part_list        : string_part_list string_part", "id": "f12427:c2:m81"}
{"signature": "def p_argument_list_aux(self, p):", "body": "p[<NUM_LIT:1>].extend(list(p)[<NUM_LIT:2>:])<EOL>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "argument_list       : argument_list argument\n                                | argument_list t_comma argument", "id": "f12427:c2:m71"}
{"signature": "def p_escaped_string(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:2>]<EOL>", "docstring": "estring                 : t_eopen style_list t_eclose\n                                    | t_eopen identifier_list t_eclose", "id": "f12427:c2:m79"}
{"signature": "def p_factor(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "factor                  : color\n                                    | number\n                                    | variable\n                                    | css_dom\n                                    | fcall", "id": "f12427:c2:m78"}
{"signature": "def p_statement_namespace(self, p):", "body": "p[<NUM_LIT:0>] = Statement(list(p)[<NUM_LIT:1>:], p.lineno(<NUM_LIT:1>))<EOL>p[<NUM_LIT:0>].parse(None)<EOL>", "docstring": "statement            : css_namespace t_ws word css_string t_semicolon", "id": "f12427:c2:m8"}
{"signature": "def p_mixin_args_empty(self, p):", "body": "p[<NUM_LIT:0>] = None<EOL>", "docstring": "mixin_args                : empty", "id": "f12427:c2:m30"}
{"signature": "def p_property_decl(self, p):", "body": "l = len(p)<EOL>p[<NUM_LIT:0>] = Property(list(p)[<NUM_LIT:1>:-<NUM_LIT:1>], p.lineno(l - <NUM_LIT:1>))<EOL>", "docstring": "property_decl           : prop_open style_list t_semicolon\n                                    | prop_open style_list css_important t_semicolon\n                                    | prop_open empty t_semicolon", "id": "f12427:c2:m37"}
{"signature": "def p_mixin_args_aux(self, p):", "body": "p[<NUM_LIT:1>].extend(list(p)[<NUM_LIT:2>:])<EOL>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "mixin_args                : mixin_args argument", "id": "f12427:c2:m28"}
{"signature": "def p_unit(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "unit                     : statement\n                                     | variable_decl\n                                     | block_decl\n                                     | mixin_decl\n                                     | call_mixin\n                                     | import_statement", "id": "f12427:c2:m6"}
{"signature": "def p_unit_list(self, p):", "body": "if isinstance(p[<NUM_LIT:1>], list):<EOL><INDENT>if len(p) >= <NUM_LIT:3>:<EOL><INDENT>if isinstance(p[<NUM_LIT:2>], list):<EOL><INDENT>p[<NUM_LIT:1>].extend(p[<NUM_LIT:2>])<EOL><DEDENT>else:<EOL><INDENT>p[<NUM_LIT:1>].append(p[<NUM_LIT:2>])<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>p[<NUM_LIT:1>] = [p[<NUM_LIT:1>]]<EOL><DEDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "unit_list                : unit_list unit\n                                     | unit", "id": "f12427:c2:m5"}
{"signature": "def p_keyframe_open(self, p):", "body": "p[<NUM_LIT:0>] = KeyframeSelector([p[<NUM_LIT:1>]]).parse(self.scope)<EOL>", "docstring": "block_open                 : css_keyframe_selector brace_open\n                                      | number brace_open", "id": "f12427:c2:m15"}
{"signature": "def p_mixin_args_list_aux(self, p):", "body": "p[<NUM_LIT:1>].extend([p[<NUM_LIT:3>]])<EOL>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "mixin_args_list          : mixin_args_list t_comma mixin_args\n                                     | mixin_args_list t_semicolon mixin_args", "id": "f12427:c2:m26"}
{"signature": "def p_mixin_guard_cond_list(self, p):", "body": "p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL>", "docstring": "mixin_guard_cond_list     : mixin_guard_cond", "id": "f12427:c2:m20"}
{"signature": "def p_declaration(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] if isinstance(p[<NUM_LIT:1>], list) else [p[<NUM_LIT:1>]]<EOL>", "docstring": "declaration                : variable_decl\n                                       | property_decl\n                                       | block_decl\n                                       | mixin_decl\n                                       | call_mixin\n                                       | import_statement", "id": "f12427:c2:m35"}
{"signature": "def p_not(self, p):", "body": "p[<NUM_LIT:0>] = tuple(list(p)[<NUM_LIT:1>:])<EOL>", "docstring": "not                       : t_not t_ws\n                                      | t_not", "id": "f12427:c2:m108"}
{"signature": "def p_media_query_value(self, p):", "body": "if utility.is_variable(p[<NUM_LIT:1>]):<EOL><INDENT>var = self.scope.variables('<STR_LIT>'.join(p[<NUM_LIT:1>]))<EOL>if var:<EOL><INDENT>value = var.value[<NUM_LIT:0>]<EOL>if hasattr(value, '<STR_LIT>'):<EOL><INDENT>p[<NUM_LIT:1>] = value.parse(self.scope)<EOL><DEDENT>else:<EOL><INDENT>p[<NUM_LIT:1>] = value<EOL><DEDENT><DEDENT><DEDENT>if isinstance(p[<NUM_LIT:1>], Expression):<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>].parse(self.scope)<EOL><DEDENT>else:<EOL><INDENT>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "media_query_value           : number\n                                        | variable\n                                        | word\n                                        | color\n                                        | expression", "id": "f12427:c2:m61"}
{"signature": "def p_variable_strange(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:2>]<EOL>", "docstring": "variable                : t_popen variable t_pclose", "id": "f12427:c2:m86"}
{"signature": "def p_mixin(self, p):", "body": "self.scope.add_mixin(Mixin(list(p)[<NUM_LIT:1>:], p.lineno(<NUM_LIT:3>)).parse(self.scope))<EOL>self.scope.pop()<EOL>p[<NUM_LIT:0>] = None<EOL>", "docstring": "mixin_decl                : open_mixin declaration_list brace_close", "id": "f12427:c2:m16"}
{"signature": "def p_call_mixin(self, p):", "body": "p[<NUM_LIT:1>].parse(None)<EOL>p[<NUM_LIT:0>] = Deferred(p[<NUM_LIT:1>], p[<NUM_LIT:3>], p.lineno(<NUM_LIT:4>))<EOL>", "docstring": "call_mixin                : identifier t_popen mixin_args_list t_pclose t_semicolon", "id": "f12427:c2:m24"}
{"signature": "def p_selector(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "selector                  : '*'\n                                      | '+'\n                                      | child_selector\n                                      | general_sibling_selector", "id": "f12427:c2:m62"}
{"signature": "def p_media_type(self, p):", "body": "p[<NUM_LIT:0>] = tuple(list(p)[<NUM_LIT:1>:])<EOL>", "docstring": "media_type                : css_media_type\n                                      | css_media_type t_ws", "id": "f12427:c2:m101"}
{"signature": "def p_variable_decl(self, p):", "body": "p[<NUM_LIT:0>] = Variable(list(p)[<NUM_LIT:1>:-<NUM_LIT:1>], p.lineno(<NUM_LIT:4>))<EOL>p[<NUM_LIT:0>].parse(self.scope)<EOL>", "docstring": "variable_decl            : variable t_colon style_list t_semicolon", "id": "f12427:c2:m36"}
{"signature": "def p_class(self, p):", "body": "p[<NUM_LIT:0>] = tuple(list(p)[<NUM_LIT:1>:])<EOL>", "docstring": "class                     : css_class\n                                      | css_class t_ws", "id": "f12427:c2:m92"}
{"signature": "def __init__(self,<EOL>lex_optimize=True,<EOL>yacc_optimize=True,<EOL>tabfile='<STR_LIT>',<EOL>yacc_debug=False,<EOL>scope=None,<EOL>outputdir=tempfile.gettempdir(),<EOL>importlvl=<NUM_LIT:0>,<EOL>verbose=False,<EOL>fail_with_exc=False):", "body": "self.verbose = verbose<EOL>self.importlvl = importlvl<EOL>self.lex = lexer.LessLexer()<EOL>if not tabfile:<EOL><INDENT>tabfile = '<STR_LIT>'<EOL><DEDENT>self.ignored = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>self.tokens = [t for t in self.lex.tokens if t not in self.ignored]<EOL>self.parser = ply.yacc.yacc(<EOL>module=self,<EOL>start='<STR_LIT>',<EOL>debug=yacc_debug,<EOL>optimize=yacc_optimize,<EOL>tabmodule=tabfile,<EOL>outputdir=outputdir)<EOL>self.scope = scope if scope else Scope()<EOL>self.stash = {}<EOL>self.result = None<EOL>self.target = None<EOL>self.fail_with_exc = fail_with_exc<EOL>if fail_with_exc:<EOL><INDENT>self.register = ErrorRegister()<EOL><DEDENT>else:<EOL><INDENT>self.register = PrintErrorRegister()<EOL><DEDENT>", "docstring": "Parser object\n\n            Kwargs:\n                lex_optimize (bool): Optimize lexer\n                yacc_optimize (bool): Optimize parser\n                tabfile (str): Yacc tab filename\n                yacc_debug (bool): yacc debug mode\n                scope (Scope): Inherited scope\n                outputdir (str): Output (debugging)\n                importlvl (int): Import depth\n                verbose (bool): Verbose mode\n                fail_with_exc (bool): Throw exception on syntax error instead\n                                      of printing to stderr", "id": "f12427:c2:m0"}
{"signature": "def p_margument_list(self, p):", "body": "p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL>", "docstring": "mixin_kwarg_arg_list      : argument", "id": "f12427:c2:m33"}
{"signature": "def p_number(self, p):", "body": "p[<NUM_LIT:0>] = tuple(list(p)[<NUM_LIT:1>:])<EOL>", "docstring": "number                    : css_number\n                                      | css_number t_ws", "id": "f12427:c2:m89"}
{"signature": "def scopemap(self):", "body": "utility.debug_print(self.result)<EOL>", "docstring": "Output scopemap.", "id": "f12427:c2:m3"}
{"signature": "def p_media_query_list(self, p):", "body": "p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL>", "docstring": "media_query_list            : media_query", "id": "f12427:c2:m56"}
{"signature": "def p_mixin_guard(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:2>]<EOL>", "docstring": "mixin_guard               : less_when mixin_guard_cond_list", "id": "f12427:c2:m18"}
{"signature": "def p_block_open_media_query(self, p):", "body": "p[<NUM_LIT:0>] = Identifier(p[<NUM_LIT:1>]).parse(self.scope)<EOL>", "docstring": "block_open                : media_query_decl brace_open", "id": "f12427:c2:m13"}
{"signature": "def p_property_decl_arguments(self, p):", "body": "p[<NUM_LIT:0>] = Property([p[<NUM_LIT:1>], [p[<NUM_LIT:2>]]], p.lineno(<NUM_LIT:3>))<EOL>", "docstring": "property_decl           : prop_open less_arguments t_semicolon", "id": "f12427:c2:m38"}
{"signature": "def p_media_query_expression(self, p):", "body": "p[<NUM_LIT:0>] = list(p)[<NUM_LIT:1>:]<EOL>", "docstring": "media_query_expression      : t_popen css_media_feature t_pclose\n                                        | t_popen css_media_feature t_colon media_query_value t_pclose", "id": "f12427:c2:m60"}
{"signature": "def p_vendor_property(self, p):", "body": "p[<NUM_LIT:0>] = tuple(list(p)[<NUM_LIT:1>:])<EOL>", "docstring": "vendor_property           : css_vendor_property\n                                      | css_vendor_property t_ws", "id": "f12427:c2:m100"}
{"signature": "def p_page(self, p):", "body": "p[<NUM_LIT:0>] = tuple(list(p)[<NUM_LIT:1>:])<EOL>", "docstring": "page                      : css_page\n                                      | css_page t_ws", "id": "f12427:c2:m99"}
{"signature": "def p_argument_list(self, p):", "body": "p[<NUM_LIT:0>] = [p[<NUM_LIT:1>]]<EOL>", "docstring": "argument_list       : argument", "id": "f12427:c2:m72"}
{"signature": "def p_block_replace(self, p):", "body": "m = p[<NUM_LIT:1>].parse(None)<EOL>block = self.scope.blocks(m.raw())<EOL>if block:<EOL><INDENT>p[<NUM_LIT:0>] = block.copy_inner(self.scope)<EOL><DEDENT>else:<EOL><INDENT>p[<NUM_LIT:0>] = Deferred(p[<NUM_LIT:1>], None, p.lineno(<NUM_LIT:2>))<EOL><DEDENT>", "docstring": "block_decl               : identifier t_semicolon", "id": "f12427:c2:m11"}
{"signature": "def p_ident_part(self, p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "ident_part                : iclass\n                                      | id\n                                      | dom\n                                      | combinator\n                                      | color", "id": "f12427:c2:m63"}
{"signature": "def p_identifier_istr(self, p):", "body": "p[<NUM_LIT:0>] = Identifier(Call([p[<NUM_LIT:2>], p[<NUM_LIT:3>]]), <NUM_LIT:0>)<EOL>", "docstring": "identifier                : t_popen estring t_pclose", "id": "f12427:c2:m45"}
{"signature": "def p_empty(self, p):", "body": "pass<EOL>", "docstring": "empty                        :", "id": "f12427:c2:m110"}
{"signature": "def p_tunit(self, p):", "body": "p[<NUM_LIT:0>] = [u for u in p[<NUM_LIT:1>] if u]<EOL>", "docstring": "tunit                    : unit_list", "id": "f12427:c2:m4"}
{"signature": "def p_font_face_open(self, p):", "body": "p[<NUM_LIT:0>] = Identifier([p[<NUM_LIT:1>], p[<NUM_LIT:2>]]).parse(self.scope)<EOL>", "docstring": "block_open                : css_font_face t_ws brace_open", "id": "f12427:c2:m14"}
{"signature": "def p_prop_open(self, p):", "body": "p[<NUM_LIT:0>] = (p[<NUM_LIT:1>][<NUM_LIT:0>], '<STR_LIT>')<EOL>", "docstring": "prop_open               : property t_colon\n                                    | vendor_property t_colon\n                                    | word t_colon", "id": "f12427:c2:m40"}
{"signature": "def p_identifier_list_aux(self, p):", "body": "p[<NUM_LIT:1>].extend([p[<NUM_LIT:2>]])<EOL>p[<NUM_LIT:1>].extend(p[<NUM_LIT:3>])<EOL>p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "identifier_list           : identifier_list t_comma identifier_group", "id": "f12427:c2:m46"}
{"signature": "def p_identifier_list_keyframe(self, p):", "body": "p[<NUM_LIT:0>] = list(p)[<NUM_LIT:1>:]<EOL>", "docstring": "identifier_list           : css_keyframes t_ws css_ident\n                                      | css_keyframes t_ws css_ident t_ws", "id": "f12427:c2:m48"}
{"signature": "def t_iselector_less_variable(self, t):", "body": "return t<EOL>", "docstring": "r'@\\{[^@\\}]+\\}", "id": "f12428:c0:m9"}
{"signature": "def t_css_ms_filter(self, t):", "body": "return t<EOL>", "docstring": "r'(?:progid:|DX\\.)[^;\\(]*", "id": "f12428:c0:m2"}
{"signature": "def t_escapeapostrophe_t_eclose(self, t):", "body": "t.lexer.pop_state()<EOL>return t<EOL>", "docstring": "r'\\", "id": "f12428:c0:m45"}
{"signature": "def token(self):", "body": "if self.next_:<EOL><INDENT>t = self.next_<EOL>self.next_ = None<EOL>return t<EOL><DEDENT>while True:<EOL><INDENT>t = self.lexer.token()<EOL>if not t:<EOL><INDENT>return t<EOL><DEDENT>if t.type == '<STR_LIT>' and (<EOL>self.pretok or<EOL>(self.last and self.last.type not in self.significant_ws)):<EOL><INDENT>continue<EOL><DEDENT>self.pretok = False<EOL>if t.type == '<STR_LIT>' and self.last and self.last.type not in ['<STR_LIT>', '<STR_LIT>'] and self.last.type != '<STR_LIT>'and not (hasattr(t, '<STR_LIT>') and (t.lexer.lexstate == '<STR_LIT>' or t.lexer.lexstate == '<STR_LIT>')):<EOL><INDENT>self.next_ = t<EOL>tok = lex.LexToken()<EOL>tok.type = '<STR_LIT>'<EOL>tok.value = '<STR_LIT:;>'<EOL>tok.lineno = t.lineno<EOL>tok.lexpos = t.lexpos<EOL>self.last = tok<EOL>self.lexer.in_property_decl = False<EOL>return tok<EOL><DEDENT>self.last = t<EOL>break<EOL><DEDENT>return t<EOL>", "docstring": "Token function. Contains 2 hacks:\n    1.  Injects ';' into blocks where the last property\n        leaves out the ;\n    2.  Strips out whitespace from nonsignificant locations\n        to ease parsing.", "id": "f12428:c0:m58"}
{"signature": "def input(self, file):", "body": "if isinstance(file, string_types):<EOL><INDENT>with open(file) as f:<EOL><INDENT>self.lexer.input(f.read())<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.lexer.input(file.read())<EOL><DEDENT>", "docstring": "Load lexer with content from `file` which can be a path or a file\nlike object.", "id": "f12428:c0:m57"}
{"signature": "def t_iselector_t_bopen(self, t):", "body": "t.lexer.pop_state()<EOL>return t<EOL>", "docstring": "r'\\{", "id": "f12428:c0:m14"}
{"signature": "def t_css_comment(self, t):", "body": "t.lexer.lineno += t.value.count('<STR_LIT:\\n>')<EOL>pass<EOL>", "docstring": "r'(/\\*(.|\\n|\\r)*?\\*/)", "id": "f12428:c0:m31"}
{"signature": "def t_mediaquery_t_not(self, t):", "body": "return t<EOL>", "docstring": "r'not", "id": "f12428:c0:m16"}
{"signature": "def t_less_open_format(self, t):", "body": "t.lexer.push_state('<STR_LIT>')<EOL>return t<EOL>", "docstring": "r'%\\(", "id": "f12428:c0:m36"}
{"signature": "def t_istringquotes_css_string(self, t):", "body": "t.lexer.lineno += t.value.count('<STR_LIT:\\n>')<EOL>return t<EOL>", "docstring": "r'[^\"@]+", "id": "f12428:c0:m51"}
{"signature": "def t_import_t_semicolon(self, t):", "body": "t.lexer.pop_state()<EOL>return t<EOL>", "docstring": "r';", "id": "f12428:c0:m25"}
{"signature": "def t_istringapostrophe_css_string(self, t):", "body": "t.lexer.lineno += t.value.count('<STR_LIT:\\n>')<EOL>return t<EOL>", "docstring": "r'[^\\'@]+", "id": "f12428:c0:m50"}
{"signature": "def t_escapeapostrophe_less_variable(self, t):", "body": "return t<EOL>", "docstring": "r'@\\{[^@\\'\\}]+\\}", "id": "f12428:c0:m43"}
{"signature": "def t_t_comma(self, t):", "body": "t.lexer.in_property_decl = False<EOL>return t<EOL>", "docstring": "r',", "id": "f12428:c0:m6"}
{"signature": "def t_t_bclose(self, t):", "body": "return t<EOL>", "docstring": "r'\\}", "id": "f12428:c0:m4"}
{"signature": "def t_istringapostrophe_less_variable(self, t):", "body": "return t<EOL>", "docstring": "r'@\\{[^@\\'\\}]+\\}", "id": "f12428:c0:m49"}
{"signature": "def t_escapequotes_t_eclose(self, t):", "body": "t.lexer.pop_state()<EOL>return t<EOL>", "docstring": "r", "id": "f12428:c0:m44"}
{"signature": "def t_t_ws(self, t):", "body": "t.value = '<STR_LIT:U+0020>'<EOL>return t<EOL>", "docstring": "r'[ \\t\\f\\v]+", "id": "f12428:c0:m34"}
{"signature": "def t_istringquotes_less_variable(self, t):", "body": "return t<EOL>", "docstring": "r'@\\{[^@\"\\}]+\\}", "id": "f12428:c0:m48"}
{"signature": "def t_mediaquery_t_only(self, t):", "body": "return t<EOL>", "docstring": "r'only", "id": "f12428:c0:m17"}
{"signature": "def t_t_semicolon(self, t):", "body": "t.lexer.in_property_decl = False<EOL>return t<EOL>", "docstring": "r';", "id": "f12428:c0:m39"}
{"signature": "def t_t_tilde(self, t):", "body": "return t<EOL>", "docstring": "r'~", "id": "f12428:c0:m41"}
{"signature": "def t_mediaquery_t_popen(self, t):", "body": "<EOL>return t<EOL>", "docstring": "r'\\(", "id": "f12428:c0:m19"}
{"signature": "def t_less_comment(self, t):", "body": "pass<EOL>", "docstring": "r'//.*", "id": "f12428:c0:m32"}
{"signature": "def t_parn_t_pclose(self, t):", "body": "t.lexer.pop_state()<EOL>return t<EOL>", "docstring": "r'\\)", "id": "f12428:c0:m37"}
{"signature": "def is_color(value):", "body": "if not value or not isinstance(value, string_types):<EOL><INDENT>return False<EOL><DEDENT>if value[<NUM_LIT:0>] == '<STR_LIT:#>' and len(value) in [<NUM_LIT:4>, <NUM_LIT:5>, <NUM_LIT:7>, <NUM_LIT:9>]:<EOL><INDENT>try:<EOL><INDENT>int(value[<NUM_LIT:1>:], <NUM_LIT:16>)<EOL>return True<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Is string CSS color\n    args:\n        value (str): string\n    returns:\n        bool", "id": "f12430:m9"}
{"signature": "def away_from_zero_round(value, ndigits=<NUM_LIT:0>):", "body": "if sys.version_info[<NUM_LIT:0>] >= <NUM_LIT:3>:<EOL><INDENT>p = <NUM_LIT:10>**ndigits<EOL>return float(math.floor((value * p) + math.copysign(<NUM_LIT:0.5>, value))) / p<EOL><DEDENT>else:<EOL><INDENT>return round(value, ndigits)<EOL><DEDENT>", "docstring": "Round half-way away from zero.\n\n    Python2's round() method.", "id": "f12430:m14"}
{"signature": "def blocksearch(block, name):", "body": "if hasattr(block, '<STR_LIT>'):<EOL><INDENT>for b in block.tokens[<NUM_LIT:1>]:<EOL><INDENT>b = (b if hasattr(b, '<STR_LIT>') and b.raw() == name else blocksearch(<EOL>b, name))<EOL>if b:<EOL><INDENT>return b<EOL><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Recursive search for name in block (inner blocks)\n    Args:\n        name (str): search term\n    Returns:\n        Block OR False", "id": "f12430:m3"}
{"signature": "def debug_print(lst, lvl=<NUM_LIT:0>):", "body": "pad = '<STR_LIT>'.join(['<STR_LIT>'] * lvl)<EOL>t = type(lst)<EOL>if t is list:<EOL><INDENT>for p in lst:<EOL><INDENT>debug_print(p, lvl)<EOL><DEDENT><DEDENT>elif hasattr(lst, '<STR_LIT>'):<EOL><INDENT>print(pad, t)<EOL>debug_print(list(flatten(lst.tokens)), lvl + <NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Print scope tree\n    args:\n        lst (list): parse result\n        lvl (int): current nesting level", "id": "f12430:m5"}
{"signature": "def is_int(value):", "body": "try:<EOL><INDENT>int(str(value))<EOL>return True<EOL><DEDENT>except (ValueError, TypeError):<EOL><INDENT>pass<EOL><DEDENT>return False<EOL>", "docstring": "Is value integer\n    args:\n        value (str): string\n    returns:\n        bool", "id": "f12430:m11"}
{"signature": "def with_unit(number, unit=None):", "body": "if isinstance(number, tuple):<EOL><INDENT>number, unit = number<EOL><DEDENT>if number == <NUM_LIT:0>:<EOL><INDENT>return '<STR_LIT:0>'<EOL><DEDENT>if unit:<EOL><INDENT>number = str(number)<EOL>if number.startswith('<STR_LIT:.>'):<EOL><INDENT>number = '<STR_LIT:0>' + number<EOL><DEDENT>return \"<STR_LIT>\" % (number, unit)<EOL><DEDENT>return number if isinstance(number, string_types) else str(number)<EOL>", "docstring": "Return number with unit\n    args:\n        number (mixed): Number\n        unit (str): Unit\n    returns:\n        str", "id": "f12430:m8"}
{"signature": "def swap(self, name):", "body": "if name.startswith('<STR_LIT>'):<EOL><INDENT>var = self.variables(name[<NUM_LIT:1>:])<EOL>if var is False:<EOL><INDENT>raise SyntaxError('<STR_LIT>' % name)<EOL><DEDENT>name = '<STR_LIT:@>' + utility.destring(var.value[<NUM_LIT:0>])<EOL>var = self.variables(name)<EOL>if var is False:<EOL><INDENT>raise SyntaxError('<STR_LIT>' % name)<EOL><DEDENT><DEDENT>elif name.startswith('<STR_LIT>'):<EOL><INDENT>var = self.variables('<STR_LIT:@>' + name[<NUM_LIT:2>:-<NUM_LIT:1>])<EOL>if var is False:<EOL><INDENT>raise SyntaxError('<STR_LIT>' % name)<EOL><DEDENT>if isinstance(var.value[<NUM_LIT:0>], string_types):<EOL><INDENT>var.value[<NUM_LIT:0>] = utility.destring(var.value[<NUM_LIT:0>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>var = self.variables(name)<EOL>if var is False:<EOL><INDENT>raise SyntaxError('<STR_LIT>' % name)<EOL><DEDENT><DEDENT>return var.value<EOL>", "docstring": "Swap variable name for variable value\n        Args:\n            name (str): Variable name\n        Returns:\n            Variable value (Mixed)", "id": "f12432:c0:m15"}
{"signature": "def blocks(self, name):", "body": "b = self._blocks(name)<EOL>if b:<EOL><INDENT>return b<EOL><DEDENT>return self._blocks(name.replace('<STR_LIT>', '<STR_LIT:U+0020>'))<EOL>", "docstring": "Search for defined blocks recursively.\nAllow '>' to be ignored. '.a .b' == '.a > .b'\nArgs:\n    name (string): Search term\nReturns:\n    Block object OR False", "id": "f12432:c0:m12"}
{"signature": "def add_block(self, block):", "body": "self[-<NUM_LIT:1>]['<STR_LIT>'].append(block)<EOL>self[-<NUM_LIT:1>]['<STR_LIT>'].append(block.raw())<EOL>", "docstring": "Add block element to scope\n        Args:\n            block (Block): Block object", "id": "f12432:c0:m5"}
{"signature": "def add_mixin(self, mixin):", "body": "raw = mixin.tokens[<NUM_LIT:0>][<NUM_LIT:0>].raw()<EOL>if raw in self._mixins:<EOL><INDENT>self._mixins[raw].append(mixin)<EOL><DEDENT>else:<EOL><INDENT>self._mixins[raw] = [mixin]<EOL><DEDENT>", "docstring": "Add mixin to scope\n        Args:\n            mixin (Mixin): Mixin object", "id": "f12432:c0:m7"}
{"signature": "def push(self):", "body": "self.append({<EOL>'<STR_LIT>': {},<EOL>'<STR_LIT>': [],<EOL>'<STR_LIT>': [],<EOL>'<STR_LIT>': None<EOL>})<EOL>", "docstring": "Push level on scope", "id": "f12432:c0:m1"}
{"signature": "def mixins(self, name):", "body": "m = self._smixins(name)<EOL>if m:<EOL><INDENT>return m<EOL><DEDENT>return self._smixins(name.replace('<STR_LIT>', '<STR_LIT:U+0020>'))<EOL>", "docstring": "Search mixins for name.\n        Allow '>' to be ignored. '.a .b()' == '.a > .b()'\n        Args:\n            name (string): Search term\n        Returns:\n            Mixin object list OR False", "id": "f12432:c0:m10"}
{"signature": "def add_variable(self, variable):", "body": "self[-<NUM_LIT:1>]['<STR_LIT>'][variable.name] = variable<EOL>", "docstring": "Add variable to scope\n        Args:\n            variable (Variable): Variable object", "id": "f12432:c0:m8"}
{"signature": "def rgb(self, *args):", "body": "if len(args) == <NUM_LIT:4>:<EOL><INDENT>args = args[:<NUM_LIT:3>]<EOL><DEDENT>if len(args) == <NUM_LIT:3>:<EOL><INDENT>try:<EOL><INDENT>return self._rgbatohex(list(map(int, args)))<EOL><DEDENT>except ValueError:<EOL><INDENT>if all((a for a in args<EOL>if a[-<NUM_LIT:1>] == '<STR_LIT:%>' and <NUM_LIT:100> >= int(a[:-<NUM_LIT:1>]) >= <NUM_LIT:0>)):<EOL><INDENT>return self._rgbatohex(<EOL>[int(a[:-<NUM_LIT:1>]) * <NUM_LIT:255> / <NUM_LIT> for a in args])<EOL><DEDENT><DEDENT><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Translate rgb(...) to color string\n        raises:\n            ValueError\n        returns:\n            str", "id": "f12433:c0:m2"}
{"signature": "def darken(self, color, diff, *args):", "body": "if color and diff:<EOL><INDENT>return self._ophsl(color, diff, <NUM_LIT:1>, operator.sub)<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Darken a color\n        args:\n            color (str): color\n            diff (str): percentage\n        returns:\n            str", "id": "f12433:c0:m12"}
{"signature": "def desaturate(self, color, diff, *args):", "body": "if color and diff:<EOL><INDENT>return self._ophsl(color, diff, <NUM_LIT:2>, operator.sub)<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Desaturate a color\n        args:\n            color (str): color\n            diff (str): percentage\n        returns:\n            str", "id": "f12433:c0:m14"}
{"signature": "def saturate(self, color, diff, *args):", "body": "if color and diff:<EOL><INDENT>return self._ophsl(color, diff, <NUM_LIT:2>, operator.add)<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Saturate a color\n        args:\n            color (str): color\n            diff (str): percentage\n        returns:\n            str", "id": "f12433:c0:m13"}
{"signature": "def lighten(self, color, diff, *args):", "body": "if color and diff:<EOL><INDENT>return self._ophsl(color, diff, <NUM_LIT:1>, operator.add)<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Lighten a color\n        args:\n            color (str): color\n            diff (str): percentage\n        returns:\n            str", "id": "f12433:c0:m11"}
{"signature": "def mix(self, color1, color2, weight=<NUM_LIT:50>, *args):", "body": "if color1 and color2:<EOL><INDENT>if isinstance(weight, string_types):<EOL><INDENT>weight = float(weight.strip('<STR_LIT:%>'))<EOL><DEDENT>weight = ((weight / <NUM_LIT>) * <NUM_LIT:2>) - <NUM_LIT:1><EOL>rgb1 = self._hextorgb(color1)<EOL>rgb2 = self._hextorgb(color2)<EOL>alpha = <NUM_LIT:0><EOL>w1 = (((weight if weight * alpha == -<NUM_LIT:1> else weight + alpha) /<EOL>(<NUM_LIT:1> + weight * alpha)) + <NUM_LIT:1>)<EOL>w1 = w1 / <NUM_LIT><EOL>w2 = <NUM_LIT:1> - w1<EOL>rgb = [<EOL>rgb1[<NUM_LIT:0>] * w1 + rgb2[<NUM_LIT:0>] * w2,<EOL>rgb1[<NUM_LIT:1>] * w1 + rgb2[<NUM_LIT:1>] * w2,<EOL>rgb1[<NUM_LIT:2>] * w1 + rgb2[<NUM_LIT:2>] * w2,<EOL>]<EOL>return self._rgbatohex(rgb)<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "This algorithm factors in both the user-provided weight\n        and the difference between the alpha values of the two colors\n        to decide how to perform the weighted average of the two RGB values.\n\n        It works by first normalizing both parameters to be within [-1, 1],\n        where 1 indicates \"only use color1\", -1 indicates \"only use color 0\",\n        and all values in between indicated a proportionately weighted average.\n\n        Once we have the normalized variables w and a,\n        we apply the formula (w + a)/(1 + w*a)\n        to get the combined weight (in [-1, 1]) of color1.\n        This formula has two especially nice properties:\n\n         * When either w or a are -1 or 1, the combined weight is also that number\n           (cases where w * a == -1 are undefined, and handled as a special case).\n\n         * When a is 0, the combined weight is w, and vice versa\n\n        Finally, the weight of color1 is renormalized to be within [0, 1]\n        and the weight of color2 is given by 1 minus the weight of color1.\n\n        Copyright (c) 2006-2009 Hampton Catlin, Nathan Weizenbaum, and Chris Eppstein\n        http://sass-lang.com\n        args:\n            color1 (str): first color\n            color2 (str): second color\n            weight (int/str): weight\n        raises:\n            ValueError\n        returns:\n            str", "id": "f12433:c0:m19"}
{"signature": "def hsla(self, *args):", "body": "if len(args) == <NUM_LIT:4>:<EOL><INDENT>h, s, l, a = args<EOL>rgb = colorsys.hls_to_rgb(<EOL>int(h) / <NUM_LIT>, utility.pc_or_float(l), utility.pc_or_float(s))<EOL>color = [float(utility.convergent_round(c * <NUM_LIT:255>)) for c in rgb]<EOL>color.append(utility.pc_or_float(a))<EOL>return \"<STR_LIT>\" % tuple(color)<EOL><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Translate hsla(...) to color string\n        raises:\n            ValueError\n        returns:\n            str", "id": "f12433:c0:m6"}
{"signature": "def process(self, expression):", "body": "a, o, b = expression<EOL>c1 = self._hextorgb(a)<EOL>c2 = self._hextorgb(b)<EOL>r = ['<STR_LIT:#>']<EOL>for i in range(<NUM_LIT:3>):<EOL><INDENT>v = self.operate(c1[i], c2[i], o)<EOL>if v > <NUM_LIT>:<EOL><INDENT>v = <NUM_LIT><EOL><DEDENT>if v < <NUM_LIT:0>:<EOL><INDENT>v = <NUM_LIT:0><EOL><DEDENT>r.append(\"<STR_LIT>\" % int(v))<EOL><DEDENT>return '<STR_LIT>'.join(r)<EOL>", "docstring": "Process color expression\n        args:\n            expression (tuple): color expression\n        returns:\n            str", "id": "f12433:c0:m0"}
{"signature": "def rgba(self, *args):", "body": "if len(args) == <NUM_LIT:4>:<EOL><INDENT>try:<EOL><INDENT>falpha = float(list(args)[<NUM_LIT:3>])<EOL>if falpha > <NUM_LIT:1>:<EOL><INDENT>args = args[:<NUM_LIT:3>]<EOL><DEDENT>if falpha == <NUM_LIT:0>:<EOL><INDENT>values = self._rgbatohex_raw(list(map(int, args)))<EOL>return \"<STR_LIT>\" % '<STR_LIT:U+002C>'.join([str(a) for a in values])<EOL><DEDENT>return self._rgbatohex(list(map(int, args)))<EOL><DEDENT>except ValueError:<EOL><INDENT>if all((a for a in args<EOL>if a[-<NUM_LIT:1>] == '<STR_LIT:%>' and <NUM_LIT:100> >= int(a[:-<NUM_LIT:1>]) >= <NUM_LIT:0>)):<EOL><INDENT>alpha = list(args)[<NUM_LIT:3>]<EOL>if alpha[-<NUM_LIT:1>] == '<STR_LIT:%>' and float(alpha[:-<NUM_LIT:1>]) == <NUM_LIT:0>:<EOL><INDENT>values = self._rgbatohex_raw(<EOL>[int(a[:-<NUM_LIT:1>]) * <NUM_LIT:255> / <NUM_LIT> for a in args])<EOL>return \"<STR_LIT>\" % '<STR_LIT:U+002C>'.join([str(a) for a in values])<EOL><DEDENT>return self._rgbatohex(<EOL>[int(a[:-<NUM_LIT:1>]) * <NUM_LIT:255> / <NUM_LIT> for a in args])<EOL><DEDENT><DEDENT><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Translate rgba(...) to color string\n        raises:\n            ValueError\n        returns:\n            str", "id": "f12433:c0:m3"}
{"signature": "def argb(self, *args):", "body": "if len(args) == <NUM_LIT:1> and type(args[<NUM_LIT:0>]) is str:<EOL><INDENT>match = re.match(r'<STR_LIT>', args[<NUM_LIT:0>])<EOL>if match:<EOL><INDENT>rgb = re.sub(r'<STR_LIT>', '<STR_LIT>', match.group(<NUM_LIT:1>)).split('<STR_LIT:U+002C>')<EOL><DEDENT>else:<EOL><INDENT>rgb = list(self._hextorgb(args[<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rgb = list(args)<EOL><DEDENT>if len(rgb) == <NUM_LIT:3>:<EOL><INDENT>return self._rgbatohex([<NUM_LIT:255>] + list(map(int, rgb)))<EOL><DEDENT>elif len(rgb) == <NUM_LIT:4>:<EOL><INDENT>rgb = [rgb.pop()] + rgb  <EOL>try:<EOL><INDENT>fval = float(list(rgb)[<NUM_LIT:0>])<EOL>if fval > <NUM_LIT:1>:<EOL><INDENT>rgb = [<NUM_LIT:255>] + rgb[<NUM_LIT:1>:]  <EOL><DEDENT>elif <NUM_LIT:1> >= fval >= <NUM_LIT:0>:<EOL><INDENT>rgb = [<EOL>fval * <NUM_LIT><EOL>] + rgb[<NUM_LIT:1>:]  <EOL><DEDENT>else:<EOL><INDENT>rgb = [<NUM_LIT:0>] + rgb[<NUM_LIT:1>:]  <EOL><DEDENT>return self._rgbatohex(list(map(int, rgb)))<EOL><DEDENT>except ValueError:<EOL><INDENT>if all((a for a in rgb<EOL>if a[-<NUM_LIT:1>] == '<STR_LIT:%>' and <NUM_LIT:100> >= int(a[:-<NUM_LIT:1>]) >= <NUM_LIT:0>)):<EOL><INDENT>return self._rgbatohex(<EOL>[int(a[:-<NUM_LIT:1>]) * <NUM_LIT:255> / <NUM_LIT> for a in rgb])<EOL><DEDENT><DEDENT><DEDENT>raise ValueError('<STR_LIT>')<EOL>", "docstring": "Translate argb(...) to color string\n\n        Creates a hex representation of a color in #AARRGGBB format (NOT\n        #RRGGBBAA!). This format is used in Internet Explorer, and .NET\n        and Android development.\n\n        raises:\n            ValueError\n        returns:\n            str", "id": "f12433:c0:m4"}
{"signature": "def parse(self, scope):", "body": "self.keyframe, = [<EOL>e[<NUM_LIT:0>] if isinstance(e, tuple) else e for e in self.tokens<EOL>if str(e).strip()<EOL>]<EOL>self.subparse = False<EOL>return self<EOL>", "docstring": "Parse node.\n        args:\n            scope (Scope): Current scope\n        raises:\n            SyntaxError\n        returns:\n            self", "id": "f12440:c0:m0"}
{"signature": "def call(self, scope, args=[]):", "body": "ret = False<EOL>if args:<EOL><INDENT>args = [[<EOL>a.parse(scope) if isinstance(a, Expression) else a for a in arg<EOL>] if arg else arg for arg in args]<EOL><DEDENT>try:<EOL><INDENT>self.parse_args(args, scope)<EOL><DEDENT>except SyntaxError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if self.parse_guards(scope):<EOL><INDENT>body = self.body.copy()<EOL>ret = body.tokens[<NUM_LIT:1>]<EOL>if ret:<EOL><INDENT>utility.rename(ret, scope, Block)<EOL><DEDENT><DEDENT><DEDENT>return ret<EOL>", "docstring": "Call mixin. Parses a copy of the mixins body\n        in the current scope and returns it.\n        args:\n            scope (Scope): current scope\n            args (list): arguments\n        raises:\n            SyntaxError\n        returns:\n            list or False", "id": "f12441:c0:m5"}
{"signature": "def fmt(self, fills):", "body": "return '<STR_LIT>'.join(self.parsed) + fills['<STR_LIT>']<EOL>", "docstring": "Format node\n        args:\n            fills (dict): replacements\n        returns:\n            str", "id": "f12442:c0:m1"}
{"signature": "def expression(self):", "body": "return utility.flatten(self.tokens)<EOL>", "docstring": "Return str representation of expression\n        returns:\n            str", "id": "f12443:c0:m3"}
{"signature": "def parse(self, scope):", "body": "assert (len(self.tokens) == <NUM_LIT:3>)<EOL>expr = self.process(self.tokens, scope)<EOL>A, O, B = [<EOL>e[<NUM_LIT:0>] if isinstance(e, tuple) else e for e in expr<EOL>if str(e).strip()<EOL>]<EOL>try:<EOL><INDENT>a, ua = utility.analyze_number(A, '<STR_LIT>')<EOL>b, ub = utility.analyze_number(B, '<STR_LIT>')<EOL><DEDENT>except SyntaxError:<EOL><INDENT>return '<STR_LIT:U+0020>'.join([str(A), str(O), str(B)])<EOL><DEDENT>if (a is False or b is False):<EOL><INDENT>return '<STR_LIT:U+0020>'.join([str(A), str(O), str(B)])<EOL><DEDENT>if ua == '<STR_LIT>' or ub == '<STR_LIT>':<EOL><INDENT>return color.Color().process((A, O, B))<EOL><DEDENT>if a == <NUM_LIT:0> and O == '<STR_LIT:/>':<EOL><INDENT>return '<STR_LIT>'.join([str(A), str(O), str(B), '<STR_LIT:U+0020>'])<EOL><DEDENT>out = self.operate(a, b, O)<EOL>if isinstance(out, bool):<EOL><INDENT>return out<EOL><DEDENT>return self.with_units(out, ua, ub)<EOL>", "docstring": "Parse Node\n        args:\n            scope (Scope): Scope object\n        raises:\n            SyntaxError\n        returns:\n            str", "id": "f12443:c0:m0"}
{"signature": "def copy(self):", "body": "return Property([t for t in self.tokens], <NUM_LIT:0>)<EOL>", "docstring": "Return a full copy of self\n        Returns:\n            Property object", "id": "f12444:c0:m3"}
{"signature": "def copy(self):", "body": "return Import([t for t in self.tokens], <NUM_LIT:0>)<EOL>", "docstring": "Return a full copy of self\n        Returns:\n            Import object", "id": "f12448:c0:m2"}
{"signature": "def parse(self, scope):", "body": "self.name, _, self.value = self.tokens<EOL>if isinstance(self.name, tuple):<EOL><INDENT>if len(self.name) > <NUM_LIT:1>:<EOL><INDENT>self.name, pad = self.name<EOL>self.value.append(pad)<EOL><DEDENT>else:<EOL><INDENT>self.name = self.name[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>scope.add_variable(self)<EOL>return self<EOL>", "docstring": "Parse function\n        args:\n            scope (Scope): Scope object\n        returns:\n            self", "id": "f12449:c0:m0"}
{"signature": "def copy(self):", "body": "return Variable([t for t in self.tokens])<EOL>", "docstring": "Return a copy of self\n        Returns:\n            Variable object", "id": "f12449:c0:m1"}
{"signature": "def raw(self, clean=False):", "body": "if clean:<EOL><INDENT>return '<STR_LIT>'.join('<STR_LIT>'.join(p) for p in self.parsed).replace('<STR_LIT:?>', '<STR_LIT:U+0020>')<EOL><DEDENT>return '<STR_LIT:%>'.join('<STR_LIT:%>'.join(p) for p in self.parsed).strip().strip('<STR_LIT:%>')<EOL>", "docstring": "Raw identifier.\n        args:\n            clean (bool): clean name\n        returns:\n            str", "id": "f12450:c0:m2"}
{"signature": "def fmt(self, fills):", "body": "name = '<STR_LIT>'.join('<STR_LIT>'.join(p).strip() for p in self.parsed)<EOL>name = re.sub('<STR_LIT>', '<STR_LIT>', name) % fills<EOL>return name.replace('<STR_LIT>', fills['<STR_LIT>']).replace('<STR_LIT:U+0020>', '<STR_LIT:U+0020>')<EOL>", "docstring": "Format identifier\n        args:\n            fills (dict): replacements\n        returns:\n            str (CSS)", "id": "f12450:c0:m4"}
{"signature": "def parse(self, scope):", "body": "names = []<EOL>name = []<EOL>self._subp = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>')<EOL>if self.tokens and hasattr(self.tokens, '<STR_LIT>'):<EOL><INDENT>self.tokens = list(<EOL>utility.flatten([<EOL>id.split() + ['<STR_LIT:U+002C>']<EOL>for id in self.tokens.parse(scope).split('<STR_LIT:U+002C>')<EOL>]))<EOL>self.tokens.pop()<EOL><DEDENT>if self.tokens and any(hasattr(t, '<STR_LIT>') for t in self.tokens):<EOL><INDENT>tmp_tokens = []<EOL>for t in self.tokens:<EOL><INDENT>if hasattr(t, '<STR_LIT>'):<EOL><INDENT>tmp_tokens.append(t.parse(scope))<EOL><DEDENT>else:<EOL><INDENT>tmp_tokens.append(t)<EOL><DEDENT><DEDENT>self.tokens = list(utility.flatten(tmp_tokens))<EOL><DEDENT>if self.tokens and self.tokens[<NUM_LIT:0>] in self._subp:<EOL><INDENT>name = list(utility.flatten(self.tokens))<EOL>self.subparse = True<EOL><DEDENT>else:<EOL><INDENT>self.subparse = False<EOL>for n in utility.flatten(self.tokens):<EOL><INDENT>if n == '<STR_LIT:*>':<EOL><INDENT>name.append('<STR_LIT>')<EOL><DEDENT>elif n in '<STR_LIT>':<EOL><INDENT>if name and name[-<NUM_LIT:1>] == '<STR_LIT:U+0020>':<EOL><INDENT>name.pop()<EOL><DEDENT>name.append('<STR_LIT>' % n)<EOL><DEDENT>elif n == '<STR_LIT:U+002C>':<EOL><INDENT>names.append(name)<EOL>name = []<EOL><DEDENT>else:<EOL><INDENT>name.append(n)<EOL><DEDENT><DEDENT><DEDENT>names.append(name)<EOL>parsed = self.root(scope, names) if scope else names<EOL>def replace_variables(tokens, scope):<EOL><INDENT>return [<EOL>scope.swap(t)<EOL>if (utility.is_variable(t) and not t in reserved.tokens) else t<EOL>for t in tokens<EOL>]<EOL><DEDENT>parsed = [<EOL>list(utility.flatten(replace_variables(part, scope)))<EOL>for part in parsed<EOL>]<EOL>self.parsed = [[<EOL>i for i, j in utility.pairwise(part)<EOL>if i != '<STR_LIT:U+0020>' or (j and '<STR_LIT:?>' not in j)<EOL>] for part in parsed]<EOL>return self<EOL>", "docstring": "Parse node. Block identifiers are stored as\n        strings with spaces replaced with ?\n        args:\n            scope (Scope): Current scope\n        raises:\n            SyntaxError\n        returns:\n            self", "id": "f12450:c0:m0"}
{"signature": "def copy_inner(self, scope):", "body": "if self.tokens[<NUM_LIT:1>]:<EOL><INDENT>tokens = [u.copy() if u else u for u in self.tokens[<NUM_LIT:1>]]<EOL>out = [p for p in tokens if p]<EOL>utility.rename(out, scope, Block)<EOL>return out<EOL><DEDENT>return None<EOL>", "docstring": "Copy block contents (properties, inner blocks).\n        Renames inner block from current scope.\n        Used for mixins.\n        args:\n            scope (Scope): Current scope\n        returns:\n            list (block contents)", "id": "f12451:c0:m4"}
{"signature": "def fmt(self, fills):", "body": "f = \"<STR_LIT>\"<EOL>out = []<EOL>name = self.name.fmt(fills)<EOL>if self.parsed and any(<EOL>p for p in self.parsed<EOL>if str(type(p)) != \"<STR_LIT>\"):<EOL><INDENT>fills.update({<EOL>'<STR_LIT>':<EOL>name,<EOL>'<STR_LIT>':<EOL>'<STR_LIT>'.join([p.fmt(fills) for p in self.parsed if p]),<EOL>})<EOL>out.append(f % fills)<EOL><DEDENT>if hasattr(self, '<STR_LIT>'):<EOL><INDENT>if self.name.subparse and len(self.inner) > <NUM_LIT:0>:  <EOL><INDENT>inner = '<STR_LIT>'.join([p.fmt(fills) for p in self.inner])<EOL>inner = inner.replace(fills['<STR_LIT>'],<EOL>fills['<STR_LIT>'] + fills['<STR_LIT>']).rstrip(<EOL>fills['<STR_LIT>'])<EOL>if not fills['<STR_LIT>']:<EOL><INDENT>inner = inner.strip()<EOL><DEDENT>fills.update({<EOL>'<STR_LIT>': name,<EOL>'<STR_LIT>': fills['<STR_LIT>'] + inner<EOL>})<EOL>out.append(f % fills)<EOL><DEDENT>else:<EOL><INDENT>out.append('<STR_LIT>'.join([p.fmt(fills) for p in self.inner]))<EOL><DEDENT><DEDENT>return '<STR_LIT>'.join(out)<EOL>", "docstring": "Format block (CSS)\n        args:\n            fills (dict): Fill elements\n        returns:\n            str (CSS)", "id": "f12451:c0:m2"}
{"signature": "def copy(self):", "body": "name, inner = self.tokens<EOL>if inner:<EOL><INDENT>inner = [u.copy() if u else u for u in inner]<EOL><DEDENT>if name:<EOL><INDENT>name = name.copy()<EOL><DEDENT>return Block([name, inner], <NUM_LIT:0>)<EOL>", "docstring": "Return a full copy of self\n        returns: Block object", "id": "f12451:c0:m3"}
{"signature": "def fmt(self, fills):", "body": "raise ValueError('<STR_LIT>')<EOL>", "docstring": "Format node\n        args:\n            fills (dict): replacements\n        returns:\n            str", "id": "f12452:c0:m4"}
{"signature": "def replace_variables(self, tokens, scope):", "body": "list = []<EOL>for t in tokens:<EOL><INDENT>if utility.is_variable(t):<EOL><INDENT>list.append(scope.swap(t))<EOL><DEDENT>elif str(type(t)) == \"<STR_LIT>\":<EOL><INDENT>list.append(scope.swap(t.name))<EOL><DEDENT>else:<EOL><INDENT>list.append(t)<EOL><DEDENT><DEDENT>return list<EOL>", "docstring": "Replace variables in tokenlist\n        args:\n            tokens (list): tokenlist\n            scope (Scope): Current scope\n        returns:\n            list", "id": "f12452:c0:m3"}
{"signature": "def __init__(self, tokens, lineno=<NUM_LIT:0>):", "body": "self.tokens = tokens<EOL>self.lineno = lineno<EOL>self.parsed = False<EOL>", "docstring": "Base Node\n        args:\n            tokens (list): tokenlist\n            lineno (int): Line number of node", "id": "f12452:c0:m0"}
{"signature": "def parse(self, scope):", "body": "return self<EOL>", "docstring": "Base parse function\n        args:\n            scope (Scope): Current scope\n        returns:\n            self", "id": "f12452:c0:m1"}
{"signature": "def process(self, tokens, scope):", "body": "while True:<EOL><INDENT>tokens = list(utility.flatten(tokens))<EOL>done = True<EOL>if any(t for t in tokens if hasattr(t, '<STR_LIT>')):<EOL><INDENT>tokens = [<EOL>t.parse(scope) if hasattr(t, '<STR_LIT>') else t<EOL>for t in tokens<EOL>]<EOL>done = False<EOL><DEDENT>if any(<EOL>t for t in tokens<EOL>if (utility.is_variable(t)) or str(type(t)) ==<EOL>\"<STR_LIT>\"):<EOL><INDENT>tokens = self.replace_variables(tokens, scope)<EOL>done = False<EOL><DEDENT>if done:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return tokens<EOL>", "docstring": "Process tokenslist, flattening and parsing it\n        args:\n            tokens (list): tokenlist\n            scope (Scope): Current scope\n        returns:\n            list", "id": "f12452:c0:m2"}
{"signature": "def iscolor(self, string, *args):", "body": "return (string in lessColors)<EOL>", "docstring": "Is color\n        args:\n            string (str): match\n        returns:\n            bool", "id": "f12453:c0:m4"}
{"signature": "def escape(self, string, *args):", "body": "return utility.destring(string.strip('<STR_LIT>'))<EOL>", "docstring": "Less Escape.\n        args:\n            string (str): string to escape\n        returns:\n            str", "id": "f12453:c0:m1"}
{"signature": "def isurl(self, string, *args):", "body": "arg = utility.destring(string)<EOL>regex = re.compile(<EOL>r'<STR_LIT>'  <EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>'  <EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>'  <EOL>r'<STR_LIT>'<EOL>r'<STR_LIT>',<EOL>re.IGNORECASE)<EOL>return regex.match(arg)<EOL>", "docstring": "Is url\n        args:\n            string (str): match\n        returns:\n            bool", "id": "f12453:c0:m5"}
{"signature": "def ceil(self, value, *args):", "body": "n, u = utility.analyze_number(value)<EOL>return utility.with_unit(int(math.ceil(n)), u)<EOL>", "docstring": "Ceil number\n        args:\n            value (str): target\n        returns:\n            str", "id": "f12453:c0:m12"}
{"signature": "def run():", "body": "aparse = argparse.ArgumentParser(<EOL>description='<STR_LIT>', epilog='<STR_LIT>')<EOL>aparse.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>', action='<STR_LIT:version>', version=VERSION_STR)<EOL>aparse.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store>\",<EOL>type=str,<EOL>help=\"<STR_LIT>\")<EOL>aparse.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>aparse.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>fgroup = aparse.add_argument_group('<STR_LIT>')<EOL>fgroup.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>fgroup.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>fgroup.add_argument('<STR_LIT>', '<STR_LIT>', help=\"<STR_LIT>\", action=\"<STR_LIT:store_true>\")<EOL>fgroup.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>help=\"<STR_LIT>\",<EOL>default=<NUM_LIT:2>)<EOL>dgroup = aparse.add_argument_group(<EOL>'<STR_LIT>', '<STR_LIT>'<EOL>'<STR_LIT>')<EOL>dgroup.add_argument('<STR_LIT>', '<STR_LIT>', action=\"<STR_LIT:store>\", help=\"<STR_LIT>\")<EOL>dgroup.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>help=\"<STR_LIT>\")<EOL>dgroup.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>help=\"<STR_LIT>\")<EOL>dgroup.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>dgroup.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>group = aparse.add_argument_group('<STR_LIT>')<EOL>group.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>group.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>group.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>group.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>action=\"<STR_LIT:store_true>\",<EOL>default=False,<EOL>help=\"<STR_LIT>\")<EOL>aparse.add_argument('<STR_LIT:target>', help=\"<STR_LIT>\")<EOL>aparse.add_argument('<STR_LIT>', nargs='<STR_LIT:?>', help=\"<STR_LIT>\")<EOL>args = aparse.parse_args()<EOL>try:<EOL><INDENT>if args.lex_only:<EOL><INDENT>lex = lexer.LessLexer()<EOL>ll = lex.file(args.target)<EOL>while True:<EOL><INDENT>tok = ll.token()<EOL>if not tok:<EOL><INDENT>break<EOL><DEDENT>if hasattr(tok,<EOL>\"<STR_LIT>\"):  <EOL><INDENT>print(tok, \"<STR_LIT>\", tok.lexer.lexstate)<EOL><DEDENT>else:<EOL><INDENT>print(tok)<EOL><DEDENT><DEDENT>print('<STR_LIT>')<EOL>sys.exit()<EOL><DEDENT>yacctab = '<STR_LIT>' if args.debug else None<EOL>scope = None<EOL>if args.include:<EOL><INDENT>for u in args.include.split('<STR_LIT:U+002C>'):<EOL><INDENT>if os.path.exists(u):<EOL><INDENT>p = parser.LessParser(<EOL>yacc_debug=(args.debug),<EOL>lex_optimize=True,<EOL>yacc_optimize=(not args.debug),<EOL>tabfile=yacctab,<EOL>verbose=args.verbose)<EOL>p.parse(filename=u, debuglevel=args.debug)<EOL>if not scope:<EOL><INDENT>scope = p.scope<EOL><DEDENT>else:<EOL><INDENT>scope.update(p.scope)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sys.exit('<STR_LIT>' % u)<EOL><DEDENT>sys.stdout.flush()<EOL><DEDENT><DEDENT>p = None<EOL>f = formatter.Formatter(args)<EOL>if not os.path.exists(args.target):<EOL><INDENT>sys.exit(\"<STR_LIT>\" % args.target)<EOL><DEDENT>if os.path.isdir(args.target):<EOL><INDENT>ldirectory(args.target, args.out, args, scope)<EOL>if args.dry_run:<EOL><INDENT>print('<STR_LIT>', file=sys.stderr)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>p = parser.LessParser(<EOL>yacc_debug=(args.debug),<EOL>lex_optimize=True,<EOL>yacc_optimize=(not args.debug),<EOL>scope=copy.deepcopy(scope),<EOL>verbose=args.verbose)<EOL>p.parse(filename=args.target, debuglevel=args.debug)<EOL>if args.scopemap:<EOL><INDENT>args.no_css = True<EOL>p.scopemap()<EOL><DEDENT>if not args.no_css and p:<EOL><INDENT>out = f.format(p)<EOL>if args.output:<EOL><INDENT>if not args.dont_create_dirs and not os.path.exists(<EOL>os.path.dirname(args.output)):<EOL><INDENT>try:<EOL><INDENT>os.makedirs(os.path.dirname(args.output))<EOL><DEDENT>except OSError as exc:  <EOL><INDENT>if exc.errno != errno.EEXIST:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>with open(args.output, \"<STR_LIT:w>\") as f:<EOL><INDENT>f.write(out)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print(out)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>except (KeyboardInterrupt, SystemExit, IOError):<EOL><INDENT>sys.exit('<STR_LIT>')<EOL><DEDENT>", "docstring": "Run compiler", "id": "f12455:m1"}
{"signature": "def ldirectory(inpath, outpath, args, scope):", "body": "yacctab = '<STR_LIT>' if args.debug else None<EOL>if not outpath:<EOL><INDENT>sys.exit(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>if not os.path.isdir(outpath):<EOL><INDENT>if args.verbose:<EOL><INDENT>print(\"<STR_LIT>\" % outpath, file=sys.stderr)<EOL><DEDENT>if not args.dry_run:<EOL><INDENT>os.mkdir(outpath)<EOL><DEDENT><DEDENT><DEDENT>less = glob.glob(os.path.join(inpath, '<STR_LIT>'))<EOL>f = formatter.Formatter(args)<EOL>for lf in less:<EOL><INDENT>outf = os.path.splitext(os.path.basename(lf))<EOL>minx = '<STR_LIT>' if args.min_ending else '<STR_LIT>'<EOL>outf = \"<STR_LIT>\" % (outpath, outf[<NUM_LIT:0>], minx)<EOL>if not args.force and os.path.exists(outf):<EOL><INDENT>recompile = os.path.getmtime(outf) < os.path.getmtime(lf)<EOL><DEDENT>else:<EOL><INDENT>recompile = True<EOL><DEDENT>if recompile:<EOL><INDENT>print('<STR_LIT>' % (lf, outf))<EOL>p = parser.LessParser(<EOL>yacc_debug=(args.debug),<EOL>lex_optimize=True,<EOL>yacc_optimize=(not args.debug),<EOL>scope=scope,<EOL>tabfile=yacctab,<EOL>verbose=args.verbose)<EOL>p.parse(filename=lf, debuglevel=<NUM_LIT:0>)<EOL>css = f.format(p)<EOL>if not args.dry_run:<EOL><INDENT>with open(outf, '<STR_LIT:w>') as outfile:<EOL><INDENT>outfile.write(css)<EOL><DEDENT><DEDENT><DEDENT>elif args.verbose:<EOL><INDENT>print('<STR_LIT>' % lf, file=sys.stderr)<EOL><DEDENT>sys.stdout.flush()<EOL><DEDENT>if args.recurse:<EOL><INDENT>[<EOL>ldirectory(<EOL>os.path.join(inpath, name), os.path.join(outpath, name), args,<EOL>scope) for name in os.listdir(inpath)<EOL>if os.path.isdir(os.path.join(inpath, name))<EOL>and not name.startswith('<STR_LIT:.>') and not name == outpath<EOL>]<EOL><DEDENT>", "docstring": "Compile all *.less files in directory\n    Args:\n        inpath (str): Path to compile\n        outpath (str): Output directory\n        args (object): Argparse Object\n        scope (Scope): Scope object or None", "id": "f12455:m0"}
{"signature": "def get_version():", "body": "return '<STR_LIT:.>'.join(str(i) for i in VERSION[:<NUM_LIT:3>])<EOL>", "docstring": "Returns only digit parts of version.", "id": "f12465:m1"}
{"signature": "def get_user_ip(request):", "body": "ip = get_real_ip(request)<EOL>if ip is None:<EOL><INDENT>ip = get_ip(request)<EOL>if ip is None:<EOL><INDENT>ip = '<STR_LIT:127.0.0.1>'<EOL><DEDENT><DEDENT>return ip<EOL>", "docstring": "Return user ip\n\n    :param request: Django request object\n    :return: user ip", "id": "f12466:m0"}
{"signature": "def __init__(self, **kwargs):", "body": "api_key = kwargs.get('<STR_LIT>', None)<EOL>api_host = kwargs.get('<STR_LIT>', None)<EOL>api_timeout = kwargs.get('<STR_LIT>', None)<EOL>use_cache = kwargs.get('<STR_LIT>', None)<EOL>cache_backend = kwargs.get('<STR_LIT>', None)<EOL>cache_timeout = kwargs.get('<STR_LIT>', None)<EOL>no_api_key = True<EOL>if hasattr(settings, '<STR_LIT>'):<EOL><INDENT>self._api_key = api_key if api_key else settings.CACHED_HTTPBL_API_KEY<EOL>if self._api_key is not None:<EOL><INDENT>no_api_key = False<EOL><DEDENT><DEDENT>if no_api_key:<EOL><INDENT>raise ImproperlyConfigured(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>self._last_result = None<EOL>self._api_host = api_host if api_host else settings.CACHED_HTTPBL_API_HOST<EOL>self._api_timeout = api_timeout if api_timeout else settings.CACHED_HTTPBL_API_TIMEOUT<EOL>self._use_cache = use_cache if use_cache else settings.CACHED_HTTPBL_USE_CACHE<EOL>self._cache_backend = cache_backend if cache_backend else settings.CACHED_HTTPBL_CACHE_BACKEND<EOL>self._cache_timeout = cache_timeout if cache_timeout else settings.CACHED_HTTPBL_CACHE_TIMEOUT<EOL>self._cache_version = <NUM_LIT:1><EOL>if self._use_cache and self._cache_backend is None:<EOL><INDENT>self._cache_backend = '<STR_LIT:default>'<EOL><DEDENT>if self._use_cache:<EOL><INDENT>try:<EOL><INDENT>self._cache = cache.caches[self._cache_backend]<EOL>try:<EOL><INDENT>self._cache_version = int(self._cache.get('<STR_LIT>'.format(self._api_key)))<EOL><DEDENT>except TypeError:<EOL><INDENT>self._cache.set('<STR_LIT>'.format(self._api_key), str(<NUM_LIT:1>))<EOL><DEDENT><DEDENT>except cache.InvalidCacheBackendError:<EOL><INDENT>raise ImproperlyConfigured('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Instantiate the CachedHTTPBL object.\n\n:param kwargs: optional parameters\n:return:", "id": "f12468:c0:m0"}
{"signature": "def handler(self,):", "body": "if self.oauth_version == '<STR_LIT>':<EOL><INDENT>request_token, request_token_secret = self.oauth.get_request_token(params={'<STR_LIT>': self.callback_uri})<EOL>logger.debug(\"<STR_LIT>\".format(request_token, request_token_secret))<EOL>authorize_url = self.oauth.get_authorize_url(request_token)<EOL><DEDENT>else:<EOL><INDENT>authorize_url = self.oauth.get_authorize_url(client_secret=self.consumer_secret, redirect_uri=self.callback_uri, response_type='<STR_LIT:code>')<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(authorize_url))<EOL>webbrowser.open(authorize_url)<EOL>self.verifier = input(\"<STR_LIT>\")<EOL>self.token_time = time.time()<EOL>credentials = {'<STR_LIT>': self.token_time}<EOL>if self.oauth_version == '<STR_LIT>':<EOL><INDENT>raw_access = self.oauth.get_raw_access_token(request_token, request_token_secret, params={\"<STR_LIT>\": self.verifier})<EOL>parsed_access = parse_utf8_qsl(raw_access.content)<EOL>self.access_token = parsed_access['<STR_LIT>']<EOL>self.access_token_secret = parsed_access['<STR_LIT>']<EOL>self.session_handle = parsed_access['<STR_LIT>']<EOL>self.guid = parsed_access['<STR_LIT>']<EOL>credentials.update({<EOL>'<STR_LIT>': self.access_token,<EOL>'<STR_LIT>': self.access_token_secret,<EOL>'<STR_LIT>': self.session_handle,<EOL>'<STR_LIT>': self.guid<EOL>})<EOL><DEDENT>else:<EOL><INDENT>headers = self.generate_oauth2_headers()<EOL>raw_access = self.oauth.get_raw_access_token(data={\"<STR_LIT:code>\": self.verifier, '<STR_LIT>': self.callback_uri,'<STR_LIT>':'<STR_LIT>'}, headers=headers)<EOL>credentials.update(self.oauth2_access_parser(raw_access))<EOL><DEDENT>return credentials<EOL>", "docstring": "* get request token if OAuth1\n            * Get user authorization\n            * Get access token", "id": "f12475:c0:m1"}
{"signature": "def __init__(self, oauth_version, consumer_key, consumer_secret, **kwargs):", "body": "self.oauth_version = oauth_version<EOL>data = {}<EOL>if kwargs.get('<STR_LIT>'):<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self.from_file = kwargs.get('<STR_LIT>')<EOL>data = get_data(self.from_file)<EOL>vars(self).update(data)<EOL><DEDENT>else:<EOL><INDENT>self.consumer_key = consumer_key<EOL>self.consumer_secret = consumer_secret<EOL><DEDENT>vars(self).update(kwargs)<EOL>self.oauth_version = oauth_version<EOL>self.callback_uri = vars(self).get('<STR_LIT>',CALLBACK_URI)<EOL>if self.oauth_version == '<STR_LIT>':<EOL><INDENT>service_params = {<EOL>'<STR_LIT>': self.consumer_key,<EOL>'<STR_LIT>' : self.consumer_secret,<EOL>'<STR_LIT>': services[self.oauth_version]['<STR_LIT>']<EOL>}<EOL><DEDENT>else:<EOL><INDENT>service_params = {<EOL>'<STR_LIT>': self.consumer_key,<EOL>'<STR_LIT>': self.consumer_secret<EOL>}<EOL><DEDENT>service_params.update({<EOL>'<STR_LIT:name>' : '<STR_LIT>',<EOL>'<STR_LIT>' : services[self.oauth_version]['<STR_LIT>'],<EOL>'<STR_LIT>' : services[self.oauth_version]['<STR_LIT>'],<EOL>'<STR_LIT>': vars(self).get('<STR_LIT>',None)<EOL>})<EOL>self.oauth = services[oauth_version]['<STR_LIT>'](**service_params)<EOL>if vars(self).get('<STR_LIT>') and vars(self).get('<STR_LIT>') and vars(self).get('<STR_LIT>'):<EOL><INDENT>if not self.token_is_valid():<EOL><INDENT>data.update(self.refresh_access_token())<EOL><DEDENT><DEDENT>elif vars(self).get('<STR_LIT>') and vars(self).get('<STR_LIT>') and vars(self).get('<STR_LIT>'):<EOL><INDENT>if not self.token_is_valid():<EOL><INDENT>data.update(self.refresh_access_token())<EOL><DEDENT><DEDENT>else:<EOL><INDENT>data.update(self.handler()) <EOL><DEDENT>if self.oauth_version == '<STR_LIT>':<EOL><INDENT>self.session = self.oauth.get_session((self.access_token, self.access_token_secret))<EOL><DEDENT>else:<EOL><INDENT>self.session = self.oauth.get_session(token=self.access_token)<EOL><DEDENT>write_data(data, vars(self).get('<STR_LIT>','<STR_LIT>'))<EOL>", "docstring": "consumer_key : client key\nconsumer_secret : client secret\naccess_token : access token\naccess_token_secret : access token secret\nfrom_file : file containing the credentials\nbase_url : Base url", "id": "f12475:c0:m0"}
{"signature": "def json_write_data(json_data, filename):", "body": "with open(filename, '<STR_LIT:w>') as fp:<EOL><INDENT>json.dump(json_data, fp, indent=<NUM_LIT:4>, sort_keys=True, ensure_ascii=False)<EOL>return True<EOL><DEDENT>return False<EOL>", "docstring": "Write json data into a file", "id": "f12477:m3"}
{"signature": "def get_data(filename):", "body": "name, ext = get_file_extension(filename)<EOL>func = json_get_data if ext == '<STR_LIT>' else yaml_get_data<EOL>return func(filename)<EOL>", "docstring": "Calls right function according to file extension", "id": "f12477:m1"}
{"signature": "def parse_header(head):", "body": "try:<EOL><INDENT>(fromcall, path) = head.split('<STR_LIT:>>', <NUM_LIT:1>)<EOL><DEDENT>except:<EOL><INDENT>raise ParseError(\"<STR_LIT>\")<EOL><DEDENT>if (not <NUM_LIT:1> <= len(fromcall) <= <NUM_LIT:9> or<EOL>not re.findall(r\"<STR_LIT>\", fromcall, re.I)):<EOL><INDENT>raise ParseError(\"<STR_LIT>\")<EOL><DEDENT>path = path.split('<STR_LIT:U+002C>')<EOL>if len(path[<NUM_LIT:0>]) == <NUM_LIT:0>:<EOL><INDENT>raise ParseError(\"<STR_LIT>\")<EOL><DEDENT>tocall = path[<NUM_LIT:0>]<EOL>path = path[<NUM_LIT:1>:]<EOL>validate_callsign(tocall, \"<STR_LIT>\")<EOL>for digi in path:<EOL><INDENT>if not re.findall(r\"<STR_LIT>\", digi, re.I):<EOL><INDENT>raise ParseError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>parsed = {<EOL>'<STR_LIT>': fromcall,<EOL>'<STR_LIT:to>': tocall,<EOL>'<STR_LIT:path>': path,<EOL>}<EOL>viacall = \"<STR_LIT>\"<EOL>if len(path) >= <NUM_LIT:2> and re.match(r\"<STR_LIT>\", path[-<NUM_LIT:2>]):<EOL><INDENT>viacall = path[-<NUM_LIT:1>]<EOL><DEDENT>parsed.update({'<STR_LIT>': viacall})<EOL>return parsed<EOL>", "docstring": "Parses the header part of packet\nReturns a dict", "id": "f12495:m1"}
{"signature": "def parse_comment_telemetry(text):", "body": "parsed = {}<EOL>match = re.findall(r\"<STR_LIT>\", text)<EOL>if match and len(match[<NUM_LIT:0>][<NUM_LIT:1>]) % <NUM_LIT:2> == <NUM_LIT:0>:<EOL><INDENT>text, telemetry, post = match[<NUM_LIT:0>]<EOL>text += post<EOL>temp = [<NUM_LIT:0>] * <NUM_LIT:7><EOL>for i in range(<NUM_LIT:7>):<EOL><INDENT>temp[i] = base91.to_decimal(telemetry[i*<NUM_LIT:2>:i*<NUM_LIT:2>+<NUM_LIT:2>])<EOL><DEDENT>parsed.update({<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': temp[<NUM_LIT:0>],<EOL>'<STR_LIT>': temp[<NUM_LIT:1>:<NUM_LIT:6>]<EOL>}<EOL>})<EOL>if temp[<NUM_LIT:6>] != '<STR_LIT>':<EOL><INDENT>parsed['<STR_LIT>'].update({<EOL>'<STR_LIT>': \"<STR_LIT>\".format(temp[<NUM_LIT:6>] & <NUM_LIT>)[::-<NUM_LIT:1>]<EOL>})<EOL><DEDENT><DEDENT>return (text, parsed)<EOL>", "docstring": "Looks for base91 telemetry found in comment field\nReturns [remaining_text, telemetry]", "id": "f12498:m0"}
{"signature": "def close(self):", "body": "self._connected = False<EOL>self.buf = b'<STR_LIT>'<EOL>if self.sock is not None:<EOL><INDENT>self.sock.close()<EOL><DEDENT>", "docstring": "Closes the socket\nCalled internally when Exceptions are raised", "id": "f12502:c0:m6"}
{"signature": "def connect(self, blocking=False, retry=<NUM_LIT:30>):", "body": "if self._connected:<EOL><INDENT>return<EOL><DEDENT>while True:<EOL><INDENT>try:<EOL><INDENT>self._connect()<EOL>if not self.skip_login:<EOL><INDENT>self._send_login()<EOL><DEDENT>break<EOL><DEDENT>except (LoginError, ConnectionError):<EOL><INDENT>if not blocking:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>self.logger.info(\"<STR_LIT>\" % retry)<EOL>time.sleep(retry)<EOL><DEDENT>", "docstring": "Initiate connection to APRS server and attempt to login\n\nblocking = False     - Should we block until connected and logged-in\nretry = 30           - Retry interval in seconds", "id": "f12502:c0:m5"}
{"signature": "def _connect(self):", "body": "self.logger.info(\"<STR_LIT>\", self.server[<NUM_LIT:0>], self.server[<NUM_LIT:1>])<EOL>try:<EOL><INDENT>self._open_socket()<EOL>peer = self.sock.getpeername()<EOL>self.logger.info(\"<STR_LIT>\", str(peer))<EOL>self.sock.setblocking(<NUM_LIT:1>)<EOL>self.sock.settimeout(<NUM_LIT:5>)<EOL>self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, <NUM_LIT:1>)<EOL>banner = self.sock.recv(<NUM_LIT>)<EOL>if is_py3:<EOL><INDENT>banner = banner.decode('<STR_LIT>')<EOL><DEDENT>if banner[<NUM_LIT:0>] == \"<STR_LIT:#>\":<EOL><INDENT>self.logger.debug(\"<STR_LIT>\", banner.rstrip())<EOL><DEDENT>else:<EOL><INDENT>raise ConnectionError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except ConnectionError as e:<EOL><INDENT>self.logger.error(str(e))<EOL>self.close()<EOL>raise<EOL><DEDENT>except (socket.error, socket.timeout) as e:<EOL><INDENT>self.close()<EOL>self.logger.error(\"<STR_LIT>\" % str(e))<EOL>if str(e) == \"<STR_LIT>\":<EOL><INDENT>raise ConnectionError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>raise ConnectionError(e)<EOL><DEDENT><DEDENT>self._connected = True<EOL>", "docstring": "Attemps connection to the server", "id": "f12502:c0:m10"}
{"signature": "def _send_login(self):", "body": "login_str = \"<STR_LIT>\"<EOL>login_str = login_str.format(<EOL>self.callsign,<EOL>self.passwd,<EOL>(\"<STR_LIT>\" + self.filter) if self.filter != \"<STR_LIT>\" else \"<STR_LIT>\",<EOL>__version__<EOL>)<EOL>self.logger.info(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>self._sendall(login_str)<EOL>self.sock.settimeout(<NUM_LIT:5>)<EOL>test = self.sock.recv(len(login_str) + <NUM_LIT:100>)<EOL>if is_py3:<EOL><INDENT>test = test.decode('<STR_LIT>')<EOL><DEDENT>test = test.rstrip()<EOL>self.logger.debug(\"<STR_LIT>\", test)<EOL>_, _, callsign, status, _ = test.split('<STR_LIT:U+0020>', <NUM_LIT:4>)<EOL>if callsign == \"<STR_LIT>\":<EOL><INDENT>raise LoginError(\"<STR_LIT>\")<EOL><DEDENT>if callsign != self.callsign:<EOL><INDENT>raise LoginError(\"<STR_LIT>\" % test)<EOL><DEDENT>if status != \"<STR_LIT>\" and self.passwd != \"<STR_LIT>\":<EOL><INDENT>raise LoginError(\"<STR_LIT>\")<EOL><DEDENT>if self.passwd == \"<STR_LIT>\":<EOL><INDENT>self.logger.info(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self.logger.info(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except LoginError as e:<EOL><INDENT>self.logger.error(str(e))<EOL>self.close()<EOL>raise<EOL><DEDENT>except:<EOL><INDENT>self.close()<EOL>self.logger.error(\"<STR_LIT>\")<EOL>raise LoginError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Sends login string to server", "id": "f12502:c0:m11"}
{"signature": "def to_decimal(text):", "body": "if not isinstance(text, string_type):<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % type(text))<EOL><DEDENT>if findall(r\"<STR_LIT>\", text):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>text = text.lstrip('<STR_LIT:!>')<EOL>decimal = <NUM_LIT:0><EOL>length = len(text) - <NUM_LIT:1><EOL>for i, char in enumerate(text):<EOL><INDENT>decimal += (ord(char) - <NUM_LIT>) * (<NUM_LIT> ** (length - i))<EOL><DEDENT>return decimal if text != '<STR_LIT>' else <NUM_LIT:0><EOL>", "docstring": "Takes a base91 char string and returns decimal", "id": "f12504:m0"}
{"signature": "def get_params_for_field(self, field_name, sort_type=None):", "body": "if not sort_type:<EOL><INDENT>if self.initial_sort == field_name:<EOL><INDENT>sort_type = '<STR_LIT>' if self.initial_sort_type == '<STR_LIT>' else '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>sort_type = '<STR_LIT>'<EOL><DEDENT><DEDENT>self.initial_params[self.sort_param_name] = self.sort_fields[field_name]<EOL>self.initial_params[self.sort_type_param_name] = sort_type<EOL>return '<STR_LIT>' % self.initial_params.urlencode()<EOL>", "docstring": "If sort_type is None - inverse current sort for field, if no sorted - use asc", "id": "f12511:c1:m2"}
{"signature": "def try_convert_to_date(self, word):", "body": "for frm in self.search_date_formats:<EOL><INDENT>try:<EOL><INDENT>return datetime.datetime.strptime(word, frm).date()<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Tries to convert word to date(datetime) using search_date_formats\nReturn None if word fits no one format", "id": "f12511:c0:m2"}
{"signature": "def get_inline_model(self):", "body": "return self.inline_model<EOL>", "docstring": "Returns the inline model to use with the inline formset", "id": "f12513:c4:m0"}
{"signature": "def get(self, request, *args, **kwargs):", "body": "formset = self.construct_formset()<EOL>return self.render_to_response(self.get_context_data(formset=formset))<EOL>", "docstring": "Handles GET requests and instantiates a blank version of the formset.", "id": "f12513:c7:m0"}
{"signature": "def get_factory_kwargs(self):", "body": "kwargs = super(BaseInlineFormSetFactory, self).get_factory_kwargs()<EOL>kwargs.setdefault('<STR_LIT>', self.fields)<EOL>kwargs.setdefault('<STR_LIT>', self.exclude)<EOL>if self.get_form_class():<EOL><INDENT>kwargs['<STR_LIT>'] = self.get_form_class()<EOL><DEDENT>return kwargs<EOL>", "docstring": "Returns the keyword arguments for calling the formset factory", "id": "f12513:c4:m2"}
{"signature": "def get_formset_class(self):", "body": "return self.formset_class<EOL>", "docstring": "Returns the formset class to use in the formset factory", "id": "f12513:c0:m3"}
{"signature": "def get_formset(self):", "body": "return formset_factory(self.get_form_class(), **self.get_factory_kwargs())<EOL>", "docstring": "Returns the formset class from the formset factory", "id": "f12513:c0:m5"}
{"signature": "def get_factory_kwargs(self):", "body": "<EOL>for attr in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if hasattr(self, attr):<EOL><INDENT>klass = type(self).__name__<EOL>raise DeprecationWarning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(klass, attr)<EOL>)<EOL><DEDENT><DEDENT>kwargs = self.factory_kwargs.copy()<EOL>if self.get_formset_class():<EOL><INDENT>kwargs['<STR_LIT>'] = self.get_formset_class()<EOL><DEDENT>return kwargs<EOL>", "docstring": "Returns the keyword arguments for calling the formset factory", "id": "f12513:c0:m7"}
{"signature": "def formset_valid(self, formset):", "body": "self.object_list = formset.save()<EOL>return super(ModelFormSetMixin, self).formset_valid(formset)<EOL>", "docstring": "If the formset is valid, save the associated models.", "id": "f12513:c3:m3"}
{"signature": "def post(self, request, *args, **kwargs):", "body": "formset = self.construct_formset()<EOL>if formset.is_valid():<EOL><INDENT>return self.formset_valid(formset)<EOL><DEDENT>else:<EOL><INDENT>return self.formset_invalid(formset)<EOL><DEDENT>", "docstring": "Handles POST requests, instantiating a formset instance with the passed\nPOST variables and then checked for validity.", "id": "f12513:c7:m1"}
{"signature": "def get_form_class(self):", "body": "return self.form_class<EOL>", "docstring": "Returns the form class to use with the formset in this view", "id": "f12513:c0:m4"}
{"signature": "def formset_invalid(self, formset):", "body": "return self.render_to_response(self.get_context_data(formset=formset))<EOL>", "docstring": "If the formset is invalid, re-render the context data with the\ndata-filled formset and errors.", "id": "f12513:c2:m2"}
{"signature": "def get_formset_kwargs(self):", "body": "kwargs = super(ModelFormSetMixin, self).get_formset_kwargs()<EOL>kwargs['<STR_LIT>'] = self.get_queryset()<EOL>return kwargs<EOL>", "docstring": "Returns the keyword arguments for instantiating the formset.", "id": "f12513:c3:m0"}
{"signature": "def daterange(start_date, end_date):", "body": "for n in range(int((end_date - start_date).days + <NUM_LIT:1>)):<EOL><INDENT>yield start_date + datetime.timedelta(n)<EOL><DEDENT>", "docstring": "Returns an iterator of dates between two provided ones", "id": "f12514:m0"}
{"signature": "def get_end_date(self, obj):", "body": "obj_date = getattr(obj, self.get_end_date_field())<EOL>try:<EOL><INDENT>obj_date = obj_date.date()<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>return obj_date<EOL>", "docstring": "Returns the end date for a model instance", "id": "f12514:c0:m4"}
{"signature": "def get_first_of_week(self):", "body": "if self.first_of_week is None:<EOL><INDENT>raise ImproperlyConfigured(\"<STR_LIT>\" % self.__class__.__name__)<EOL><DEDENT>if self.first_of_week not in range(<NUM_LIT:7>):<EOL><INDENT>raise ImproperlyConfigured(\"<STR_LIT>\" % self.__class__.__name__)<EOL><DEDENT>return self.first_of_week<EOL>", "docstring": "Returns an integer representing the first day of the week.\n\n0 represents Monday, 6 represents Sunday.", "id": "f12514:c0:m5"}
{"signature": "def get_queryset(self):", "body": "qs = super(BaseCalendarMonthView, self).get_queryset()<EOL>year = self.get_year()<EOL>month = self.get_month()<EOL>date_field = self.get_date_field()<EOL>end_date_field = self.get_end_date_field()<EOL>date = _date_from_string(year, self.get_year_format(),<EOL>month, self.get_month_format())<EOL>since = date<EOL>until = self.get_next_month(date)<EOL>if since.weekday() != self.get_first_of_week():<EOL><INDENT>diff = math.fabs(since.weekday() - self.get_first_of_week())<EOL>since = since - datetime.timedelta(days=diff)<EOL><DEDENT>if until.weekday() != ((self.get_first_of_week() + <NUM_LIT:6>) % <NUM_LIT:7>):<EOL><INDENT>diff = math.fabs(((self.get_first_of_week() + <NUM_LIT:6>) % <NUM_LIT:7>) - until.weekday())<EOL>until = until + datetime.timedelta(days=diff)<EOL><DEDENT>if end_date_field:<EOL><INDENT>predicate1 = Q(**{<EOL>'<STR_LIT>' % date_field: since,<EOL>end_date_field: None<EOL>})<EOL>predicate2 = Q(**{<EOL>'<STR_LIT>' % date_field: since,<EOL>'<STR_LIT>' % end_date_field: until<EOL>})<EOL>predicate3 = Q(**{<EOL>'<STR_LIT>' % date_field: since,<EOL>'<STR_LIT>' % end_date_field: since,<EOL>'<STR_LIT>' % end_date_field: until<EOL>})<EOL>predicate4 = Q(**{<EOL>'<STR_LIT>' % date_field: since,<EOL>'<STR_LIT>' % date_field: until,<EOL>'<STR_LIT>' % end_date_field: until<EOL>})<EOL>predicate5 = Q(**{<EOL>'<STR_LIT>' % date_field: since,<EOL>'<STR_LIT>' % end_date_field: until<EOL>})<EOL>return qs.filter(predicate1 | predicate2 | predicate3 | predicate4 | predicate5)<EOL><DEDENT>return qs.filter(**{<EOL>'<STR_LIT>' % date_field: since<EOL>})<EOL>", "docstring": "Returns a queryset of models for the month requested", "id": "f12514:c0:m6"}
{"signature": "def get_context_data(self, **kwargs):", "body": "data = super(BaseCalendarMonthView, self).get_context_data(**kwargs)<EOL>year = self.get_year()<EOL>month = self.get_month()<EOL>date = _date_from_string(year, self.get_year_format(),<EOL>month, self.get_month_format())<EOL>cal = Calendar(self.get_first_of_week())<EOL>month_calendar = []<EOL>now = datetime.datetime.utcnow()<EOL>date_lists = defaultdict(list)<EOL>multidate_objs = []<EOL>for obj in data['<STR_LIT>']:<EOL><INDENT>obj_date = self.get_start_date(obj)<EOL>end_date_field = self.get_end_date_field()<EOL>if end_date_field:<EOL><INDENT>end_date = self.get_end_date(obj)<EOL>if end_date and end_date != obj_date:<EOL><INDENT>multidate_objs.append({<EOL>'<STR_LIT>': obj,<EOL>'<STR_LIT>': [x for x in daterange(obj_date, end_date)]<EOL>})<EOL>continue  <EOL><DEDENT><DEDENT>date_lists[obj_date].append(obj)<EOL><DEDENT>for week in cal.monthdatescalendar(date.year, date.month):<EOL><INDENT>week_range = set(daterange(week[<NUM_LIT:0>], week[<NUM_LIT:6>]))<EOL>week_events = []<EOL>for val in multidate_objs:<EOL><INDENT>intersect_length = len(week_range.intersection(val['<STR_LIT>']))<EOL>if intersect_length:<EOL><INDENT>slot = <NUM_LIT:1><EOL>width = intersect_length  <EOL>nowrap_previous = True  <EOL>nowrap_next = True  <EOL>if val['<STR_LIT>'][<NUM_LIT:0>] >= week[<NUM_LIT:0>]:<EOL><INDENT>slot = <NUM_LIT:1> + (val['<STR_LIT>'][<NUM_LIT:0>] - week[<NUM_LIT:0>]).days<EOL><DEDENT>else:<EOL><INDENT>nowrap_previous = False<EOL><DEDENT>if val['<STR_LIT>'][-<NUM_LIT:1>] > week[<NUM_LIT:6>]:<EOL><INDENT>nowrap_next = False<EOL><DEDENT>week_events.append({<EOL>'<STR_LIT>': val['<STR_LIT>'],<EOL>'<STR_LIT>': slot,<EOL>'<STR_LIT:width>': width,<EOL>'<STR_LIT>': nowrap_previous,<EOL>'<STR_LIT>': nowrap_next,<EOL>})<EOL><DEDENT><DEDENT>week_calendar = {<EOL>'<STR_LIT>': week_events,<EOL>'<STR_LIT>': [],<EOL>}<EOL>for day in week:<EOL><INDENT>week_calendar['<STR_LIT>'].append({<EOL>'<STR_LIT>': day,<EOL>'<STR_LIT>': date_lists[day],<EOL>'<STR_LIT>': day == now.date(),<EOL>'<STR_LIT>': day.month == date.month,<EOL>})<EOL><DEDENT>month_calendar.append(week_calendar)<EOL><DEDENT>data['<STR_LIT>'] = month_calendar<EOL>data['<STR_LIT>'] = [DAYS[x] for x in cal.iterweekdays()]<EOL>data['<STR_LIT>'] = date<EOL>data['<STR_LIT>'] = self.get_next_month(date)<EOL>data['<STR_LIT>'] = self.get_previous_month(date)<EOL>return data<EOL>", "docstring": "Injects variables necessary for rendering the calendar into the context.\n\nVariables added are: `calendar`, `weekdays`, `month`, `next_month` and `previous_month`.", "id": "f12514:c0:m7"}
{"signature": "def forms_valid(self, form, inlines):", "body": "response = self.form_valid(form)<EOL>for formset in inlines:<EOL><INDENT>formset.save()<EOL><DEDENT>return response<EOL>", "docstring": "If the form and formsets are valid, save the associated models.", "id": "f12515:c2:m1"}
{"signature": "def construct_inlines(self):", "body": "inline_formsets = []<EOL>for inline_class in self.get_inlines():<EOL><INDENT>inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self)<EOL>inline_formset = inline_instance.construct_formset()<EOL>inline_formsets.append(inline_formset)<EOL><DEDENT>return inline_formsets<EOL>", "docstring": "Returns the inline formset instances", "id": "f12515:c2:m3"}
{"signature": "def construct_formset(self):", "body": "formset = super(InlineFormSetFactory, self).construct_formset()<EOL>formset.model = self.inline_model<EOL>return formset<EOL>", "docstring": "Overrides construct_formset to attach the model class as\nan attribute of the returned formset instance.", "id": "f12515:c0:m1"}
{"signature": "def get_inlines(self):", "body": "return self.inlines<EOL>", "docstring": "Returns the inline formset classes", "id": "f12515:c2:m0"}
{"signature": "def connect(self):", "body": "return self.response<EOL>", "docstring": "Connect to http/https server.", "id": "f12528:c0:m1"}
{"signature": "def execute(self, command, timeout=None):", "body": "try:<EOL><INDENT>self.channel = self.ssh.get_transport().open_session()<EOL><DEDENT>except paramiko.SSHException as e:<EOL><INDENT>self.unknown(\"<STR_LIT>\" % e)<EOL><DEDENT>try:<EOL><INDENT>self.channel.settimeout(self.args.timeout if not timeout else timeout)<EOL><DEDENT>except socket.timeout as e:<EOL><INDENT>self.unknown(\"<STR_LIT>\" % e)<EOL><DEDENT>try:<EOL><INDENT>self.logger.debug(\"<STR_LIT>\".format(command))<EOL>self.channel.exec_command(command)<EOL><DEDENT>except paramiko.SSHException as e:<EOL><INDENT>self.unknown(\"<STR_LIT>\" % e)<EOL><DEDENT>try:<EOL><INDENT>self.stdin = self.channel.makefile('<STR_LIT:wb>', -<NUM_LIT:1>)<EOL>self.stderr = map(string.strip, self.channel.makefile_stderr('<STR_LIT:rb>', -<NUM_LIT:1>).readlines())<EOL>self.stdout = map(string.strip, self.channel.makefile('<STR_LIT:rb>', -<NUM_LIT:1>).readlines())<EOL><DEDENT>except Exception as e:<EOL><INDENT>self.unknown(\"<STR_LIT>\" % e)<EOL><DEDENT>try:<EOL><INDENT>self.status = self.channel.recv_exit_status()<EOL><DEDENT>except paramiko.SSHException as e:<EOL><INDENT>self.unknown(\"<STR_LIT>\" % e)<EOL><DEDENT>else:<EOL><INDENT>if self.status != <NUM_LIT:0>:<EOL><INDENT>self.unknown(\"<STR_LIT>\" % (self.status, self.errors))<EOL><DEDENT>else:<EOL><INDENT>return self.stdout<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self.logger.debug(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Execute a shell command.", "id": "f12530:c0:m1"}
{"signature": "def close(self):", "body": "try:<EOL><INDENT>self.ssh.close()<EOL>self.logger.debug(\"<STR_LIT>\")<EOL><DEDENT>except paramiko.SSHException as e:<EOL><INDENT>self.unknown(\"<STR_LIT>\" % e)<EOL><DEDENT>", "docstring": "Close and exit the connection.", "id": "f12530:c0:m2"}
{"signature": "def query(self, wql):", "body": "try:<EOL><INDENT>self.__wql = ['<STR_LIT>', '<STR_LIT>',<EOL>self.args.domain + '<STR_LIT:\\\\>' + self.args.user + '<STR_LIT:%>' + self.args.password,<EOL>'<STR_LIT>' + self.args.host,<EOL>'<STR_LIT>', self.args.namespace,<EOL>'<STR_LIT>', self.args.delimiter,<EOL>wql]<EOL>self.logger.debug(\"<STR_LIT>\".format(self.__wql))<EOL>self.__output = subprocess.check_output(self.__wql)<EOL>self.logger.debug(\"<STR_LIT>\".format(self.__output))<EOL>self.logger.debug(\"<STR_LIT>\")<EOL>self.__wmi_output = self.__output.splitlines()[<NUM_LIT:1>:]<EOL>self.logger.debug(\"<STR_LIT>\".format(self.__wmi_output))<EOL>self.__csv_header = csv.DictReader(self.__wmi_output, delimiter='<STR_LIT:|>')<EOL>self.logger.debug(\"<STR_LIT>\".format(self.__csv_header))<EOL>return list(self.__csv_header)<EOL><DEDENT>except subprocess.CalledProcessError as e:<EOL><INDENT>self.unknown(\"<STR_LIT>\" % e)<EOL><DEDENT>", "docstring": "Connect by wmi and run wql.", "id": "f12531:c0:m1"}
{"signature": "def connect(self):", "body": "return self.ftp<EOL>", "docstring": "Connect to ftp server.", "id": "f12532:c0:m1"}
{"signature": "def close(self):", "body": "try:<EOL><INDENT>self.conn.close()<EOL>self.logger.debug(\"<STR_LIT>\")<EOL><DEDENT>except pymysql.Error as e:<EOL><INDENT>self.unknown(\"<STR_LIT>\" % e)<EOL><DEDENT>", "docstring": "Close the connection.", "id": "f12534:c0:m2"}
{"signature": "def command_handle(self):", "body": "self.__results = self.execute(self.args.command)<EOL>self.close()<EOL>self.logger.debug(\"<STR_LIT>\".format(self.__results))<EOL>if not self.__results:<EOL><INDENT>self.unknown(\"<STR_LIT>\".format(self.args.command))<EOL><DEDENT>if len(self.__results) != <NUM_LIT:1>:<EOL><INDENT>self.unknown(<EOL>\"<STR_LIT>\".format(<EOL>self.args.command))<EOL><DEDENT>self.__result = int(self.__results[<NUM_LIT:0>])<EOL>self.logger.debug(\"<STR_LIT>\".format(self.__result))<EOL>if not isinstance(self.__result, (int, long)):<EOL><INDENT>self.unknown(<EOL>\"<STR_LIT>\".format(<EOL>self.args.command))<EOL><DEDENT>status = self.ok<EOL>if self.__result > self.args.warning:<EOL><INDENT>status = self.warning<EOL><DEDENT>if self.__result > self.args.critical:<EOL><INDENT>status = self.critical<EOL><DEDENT>self.shortoutput = \"<STR_LIT>\".format(<EOL>self.args.command, self.__result)<EOL>[self.longoutput.append(line)<EOL>for line in self.__results if self.__results]<EOL>self.perfdata.append(\"<STR_LIT>\".format(<EOL>crit=self.args.critical,<EOL>warn=self.args.warning,<EOL>result=self.__result,<EOL>command=self.args.command))<EOL>status(self.output(long_output_limit=None))<EOL>self.logger.debug(\"<STR_LIT>\")<EOL>", "docstring": "Get the number of the shell command.", "id": "f12538:c0:m2"}
{"signature": "def main():", "body": "plugin = Register()<EOL>if plugin.args.option == '<STR_LIT>':<EOL><INDENT>plugin.command_handle()<EOL><DEDENT>else:<EOL><INDENT>plugin.unknown(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Register your own mode and handle method here.", "id": "f12538:m0"}
{"signature": "def main():", "body": "plugin = Register()<EOL>if plugin.args.option == '<STR_LIT>':<EOL><INDENT>plugin.sql_handle()<EOL><DEDENT>else:<EOL><INDENT>plugin.unknown(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Register your own mode and handle method here.", "id": "f12539:m0"}
{"signature": "def fileage_handle(self):", "body": "self.file_list = []<EOL>self.ok_file = []<EOL>self.warn_file = []<EOL>self.crit_file = []<EOL>status = self.ok<EOL>if self.args.recursion:<EOL><INDENT>self.__file_list = self.__get_folder(self.args.path)<EOL><DEDENT>else:<EOL><INDENT>self.__file_list = self.__get_file(self.args.path)<EOL><DEDENT>self.logger.debug(\"<STR_LIT>\".format(self.__file_list))<EOL>for file_dict in self.__file_list:<EOL><INDENT>self.filename = file_dict.get('<STR_LIT:Name>')<EOL>if self.filename and self.filename != '<STR_LIT:Name>':<EOL><INDENT>self.logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>self.filename))<EOL>self.file_datetime_string = file_dict.get(<EOL>'<STR_LIT>').split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL>self.file_datetime = datetime.datetime.strptime(<EOL>self.file_datetime_string, '<STR_LIT>')<EOL>self.logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>self.file_datetime))<EOL>self.current_datetime = self.__get_current_datetime()<EOL>self.logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>self.current_datetime))<EOL>self.__delta_datetime = self.current_datetime - self.file_datetime<EOL>self.logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>self.__delta_datetime))<EOL>self.logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>datetime.timedelta(<EOL>minutes=self.args.warning)))<EOL>self.logger.debug(<EOL>\"<STR_LIT>\".format(<EOL>datetime.timedelta(<EOL>minutes=self.args.critical)))<EOL>if self.__delta_datetime > datetime.timedelta(<EOL>minutes=self.args.critical):<EOL><INDENT>self.crit_file.append(self.filename)<EOL><DEDENT>elif self.__delta_datetime > datetime.timedelta(minutes=self.args.warning):<EOL><INDENT>self.warn_file.append(self.filename)<EOL><DEDENT>else:<EOL><INDENT>self.ok_file.append(self.filename)<EOL><DEDENT><DEDENT><DEDENT>if self.crit_file:<EOL><INDENT>status = self.critical<EOL><DEDENT>elif self.warn_file:<EOL><INDENT>status = self.warning<EOL><DEDENT>else:<EOL><INDENT>status = self.ok<EOL><DEDENT>self.shortoutput = \"<STR_LIT>\".format(<EOL>len(self.crit_file))<EOL>if self.crit_file:<EOL><INDENT>self.longoutput.append(\"<STR_LIT>\")<EOL><DEDENT>[self.longoutput.append(filename)<EOL>for filename in self.crit_file if self.crit_file]<EOL>if self.warn_file:<EOL><INDENT>self.longoutput.append(\"<STR_LIT>\")<EOL><DEDENT>[self.longoutput.append(filename)<EOL>for filename in self.warn_file if self.warn_file]<EOL>if self.ok_file:<EOL><INDENT>self.longoutput.append(\"<STR_LIT>\")<EOL><DEDENT>[self.longoutput.append(filename)<EOL>for filename in self.ok_file if self.ok_file]<EOL>self.perfdata.append(\"<STR_LIT>\".format(<EOL>crit=self.args.critical,<EOL>warn=self.args.warning,<EOL>result=len(self.crit_file),<EOL>path=self.args.drive + self.args.path))<EOL>status(self.output(long_output_limit=None))<EOL>self.logger.debug(\"<STR_LIT>\")<EOL>", "docstring": "Get the number of file in the folder.", "id": "f12540:c1:m5"}
{"signature": "def __get_current_datetime(self):", "body": "self.wql_time = \"<STR_LIT>\"<EOL>self.current_time = self.query(self.wql_time)<EOL>self.current_time_string = str(<EOL>self.current_time[<NUM_LIT:0>].get('<STR_LIT>').split('<STR_LIT:.>')[<NUM_LIT:0>])<EOL>self.current_time_format = datetime.datetime.strptime(<EOL>self.current_time_string, '<STR_LIT>')<EOL>return self.current_time_format<EOL>", "docstring": "Get current datetime for every file.", "id": "f12542:c1:m4"}
{"signature": "def main():", "body": "plugin = Register()<EOL>if plugin.args.option == '<STR_LIT>':<EOL><INDENT>plugin.filenumber_handle()<EOL><DEDENT>else:<EOL><INDENT>plugin.unknown(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Register your own mode and handle method here.", "id": "f12543:m0"}
{"signature": "def main():", "body": "plugin = Register()<EOL>if plugin.args.option == '<STR_LIT>':<EOL><INDENT>plugin.sqlserverlocks_handle()<EOL><DEDENT>else:<EOL><INDENT>plugin.unknown(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Register your own mode and handle method here.", "id": "f12544:m0"}
{"signature": "def main():", "body": "plugin = Register()<EOL>if plugin.args.option == '<STR_LIT>':<EOL><INDENT>plugin.sql_handle()<EOL><DEDENT>elif plugin.args.option == '<STR_LIT>':<EOL><INDENT>plugin.database_used_handle()<EOL><DEDENT>elif plugin.args.option == '<STR_LIT>':<EOL><INDENT>plugin.database_log_used_handle()<EOL><DEDENT>else:<EOL><INDENT>plugin.unknown(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Register your own mode and handle method here.", "id": "f12545:m0"}
{"signature": "@cli.command()<EOL>def serve():", "body": "build_site(dev_mode=True)<EOL>serve_site()<EOL>", "docstring": "Serve the site for local testing and editing.", "id": "f12547:m59"}
{"signature": "@cli.command()<EOL>def scaffold():", "body": "click.echo(\"<STR_LIT>\")<EOL>title = click.prompt(\"<STR_LIT>\")<EOL>url = click.prompt(\"<STR_LIT>\")<EOL>click.echo(\"<STR_LIT>\" % url)<EOL>", "docstring": "Start a new site.", "id": "f12547:m55"}
{"signature": "@cli.command()<EOL>def list():", "body": "click.echo('<STR_LIT>')<EOL>", "docstring": "List all posts", "id": "f12547:m58"}
{"signature": "def pprint(data):", "body": "print(json.dumps(data, sort_keys=True, indent=<NUM_LIT:4>, separators=('<STR_LIT:U+002C>', '<STR_LIT>')))<EOL>", "docstring": "Alternative to `pprint.PrettyPrinter()` that uses `json.dumps()` for\n    sorting and displaying data.  \n\n    :param data: item to print to STDOUT.  The item must be json serializable!", "id": "f12549:m6"}
{"signature": "def time_zone_by_country_and_region(country_code, region_code=None):", "body": "timezone = country_dict.get(country_code)<EOL>if not timezone:<EOL><INDENT>return None<EOL><DEDENT>if isinstance(timezone, str):<EOL><INDENT>return timezone<EOL><DEDENT>return timezone.get(region_code)<EOL>", "docstring": "Returns time zone from country and region code.\n\n:arg country_code: Country code\n:arg region_code: Region code", "id": "f12577:m0"}
{"signature": "def str2fp(data):", "body": "return BytesIO(bytearray(data, const.ENCODING)) if const.PY3 else StringIO(data)<EOL>", "docstring": "Convert bytes data to file handle object (StringIO or BytesIO).\n\n:arg data: String data to transform", "id": "f12578:m1"}
{"signature": "def netspeed_by_addr(self, addr):", "body": "if self._databaseType == const.NETSPEED_EDITION:<EOL><INDENT>return const.NETSPEED_NAMES[self.id_by_addr(addr)]<EOL><DEDENT>elif self._databaseType in (const.NETSPEED_EDITION_REV1,<EOL>const.NETSPEED_EDITION_REV1_V6):<EOL><INDENT>ipnum = util.ip2long(addr)<EOL>return self._get_org(ipnum)<EOL><DEDENT>raise GeoIPError(<EOL>'<STR_LIT>')<EOL>", "docstring": "Returns NetSpeed name from address.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m12"}
{"signature": "def region_by_addr(self, addr):", "body": "if self._databaseType not in const.REGION_CITY_EDITIONS:<EOL><INDENT>message = '<STR_LIT>'<EOL>raise GeoIPError(message)<EOL><DEDENT>ipnum = util.ip2long(addr)<EOL>return self._get_region(ipnum)<EOL>", "docstring": "Returns dictionary containing `country_code` and `region_code`.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m20"}
{"signature": "def country_code_by_addr(self, addr):", "body": "VALID_EDITIONS = (const.COUNTRY_EDITION, const.COUNTRY_EDITION_V6)<EOL>if self._databaseType in VALID_EDITIONS:<EOL><INDENT>country_id = self.id_by_addr(addr)<EOL>return const.COUNTRY_CODES[country_id]<EOL><DEDENT>elif self._databaseType in const.REGION_CITY_EDITIONS:<EOL><INDENT>return self.region_by_addr(addr).get('<STR_LIT>')<EOL><DEDENT>raise GeoIPError('<STR_LIT>')<EOL>", "docstring": "Returns 2-letter country code (e.g. US) from IP address.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m10"}
{"signature": "def id_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)<EOL>return self.id_by_addr(addr)<EOL>", "docstring": "Returns the database ID for specified hostname.\nThe id might be useful as array index. 0 is unknown.\n\n:arg hostname: Hostname to get ID from.", "id": "f12580:c2:m7"}
{"signature": "def country_name_by_addr(self, addr):", "body": "VALID_EDITIONS = (const.COUNTRY_EDITION, const.COUNTRY_EDITION_V6)<EOL>if self._databaseType in VALID_EDITIONS:<EOL><INDENT>country_id = self.id_by_addr(addr)<EOL>return const.COUNTRY_NAMES[country_id]<EOL><DEDENT>elif self._databaseType in const.CITY_EDITIONS:<EOL><INDENT>return self.record_by_addr(addr).get('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>message = '<STR_LIT>'<EOL>raise GeoIPError(message)<EOL><DEDENT>", "docstring": "Returns full country name for specified IP address.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m14"}
{"signature": "def _gethostbyname(self, hostname):", "body": "if self._databaseType in const.IPV6_EDITIONS:<EOL><INDENT>response = socket.getaddrinfo(hostname, <NUM_LIT:0>, socket.AF_INET6)<EOL>family, socktype, proto, canonname, sockaddr = response[<NUM_LIT:0>]<EOL>address, port, flow, scope = sockaddr<EOL>return address<EOL><DEDENT>else:<EOL><INDENT>return socket.gethostbyname(hostname)<EOL><DEDENT>", "docstring": "Hostname lookup method, supports both IPv4 and IPv6.", "id": "f12580:c2:m6"}
{"signature": "def country_name_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)<EOL>return self.country_name_by_addr(addr)<EOL>", "docstring": "Returns full country name for specified hostname.\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m15"}
{"signature": "def record_by_addr(self, addr):", "body": "if self._databaseType not in const.CITY_EDITIONS:<EOL><INDENT>message = '<STR_LIT>'<EOL>raise GeoIPError(message)<EOL><DEDENT>ipnum = util.ip2long(addr)<EOL>rec = self._get_record(ipnum)<EOL>if not rec:<EOL><INDENT>return None<EOL><DEDENT>return rec<EOL>", "docstring": "Returns dictionary with city data containing `country_code`, `country_name`,\n`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,\n`metro_code`, `area_code`, `region_code` and `time_zone`.\n\n:arg addr: IP address (e.g. 203.0.113.30)", "id": "f12580:c2:m18"}
{"signature": "def last_netmask(self):", "body": "return self._netmask<EOL>", "docstring": "Returns the netmask depth of the last lookup.", "id": "f12580:c2:m9"}
{"signature": "def time_zone_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)<EOL>return self.time_zone_by_addr(addr)<EOL>", "docstring": "Returns time zone in tzdata format (e.g. America/New_York or Europe/Paris)\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m23"}
{"signature": "def country_code_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)<EOL>return self.country_code_by_addr(addr)<EOL>", "docstring": "Returns 2-letter country code (e.g. US) from hostname.\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m11"}
{"signature": "def record_by_name(self, hostname):", "body": "addr = self._gethostbyname(hostname)<EOL>return self.record_by_addr(addr)<EOL>", "docstring": "Returns dictionary with city data containing `country_code`, `country_name`,\n`region`, `city`, `postal_code`, `latitude`, `longitude`, `dma_code`,\n`metro_code`, `area_code`, `region_code` and `time_zone`.\n\n:arg hostname: Hostname (e.g. example.com)", "id": "f12580:c2:m19"}
{"signature": "def _get_record(self, ipnum):", "body": "seek_country = self._seek_country(ipnum)<EOL>if seek_country == self._databaseSegments:<EOL><INDENT>return {}<EOL><DEDENT>read_length = (<NUM_LIT:2> * self._recordLength - <NUM_LIT:1>) * self._databaseSegments<EOL>try:<EOL><INDENT>self._lock.acquire()<EOL>self._fp.seek(seek_country + read_length, os.SEEK_SET)<EOL>buf = self._fp.read(const.FULL_RECORD_LENGTH)<EOL><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>if PY3 and type(buf) is bytes:<EOL><INDENT>buf = buf.decode(ENCODING)<EOL><DEDENT>record = {<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None<EOL>}<EOL>latitude = <NUM_LIT:0><EOL>longitude = <NUM_LIT:0><EOL>char = ord(buf[<NUM_LIT:0>])<EOL>record['<STR_LIT>'] = const.COUNTRY_CODES[char]<EOL>record['<STR_LIT>'] = const.COUNTRY_CODES3[char]<EOL>record['<STR_LIT>'] = const.COUNTRY_NAMES[char]<EOL>record['<STR_LIT>'] = const.CONTINENT_NAMES[char]<EOL>def read_data(buf, pos):<EOL><INDENT>cur = pos<EOL>while buf[cur] != '<STR_LIT>':<EOL><INDENT>cur += <NUM_LIT:1><EOL><DEDENT>return cur, buf[pos:cur] if cur > pos else None<EOL><DEDENT>offset, record['<STR_LIT>'] = read_data(buf, <NUM_LIT:1>)<EOL>offset, record['<STR_LIT>'] = read_data(buf, offset + <NUM_LIT:1>)<EOL>offset, record['<STR_LIT>'] = read_data(buf, offset + <NUM_LIT:1>)<EOL>offset = offset + <NUM_LIT:1><EOL>for j in range(<NUM_LIT:3>):<EOL><INDENT>latitude += (ord(buf[offset + j]) << (j * <NUM_LIT:8>))<EOL><DEDENT>for j in range(<NUM_LIT:3>):<EOL><INDENT>longitude += (ord(buf[offset + j + <NUM_LIT:3>]) << (j * <NUM_LIT:8>))<EOL><DEDENT>record['<STR_LIT>'] = (latitude / <NUM_LIT>) - <NUM_LIT><EOL>record['<STR_LIT>'] = (longitude / <NUM_LIT>) - <NUM_LIT><EOL>if self._databaseType in (const.CITY_EDITION_REV1, const.CITY_EDITION_REV1_V6):<EOL><INDENT>if record['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>dma_area = <NUM_LIT:0><EOL>for j in range(<NUM_LIT:3>):<EOL><INDENT>dma_area += ord(buf[offset + j + <NUM_LIT:6>]) << (j * <NUM_LIT:8>)<EOL><DEDENT>record['<STR_LIT>'] = int(floor(dma_area / <NUM_LIT:1000>))<EOL>record['<STR_LIT>'] = dma_area % <NUM_LIT:1000><EOL>record['<STR_LIT>'] = const.DMA_MAP.get(record['<STR_LIT>'])<EOL><DEDENT><DEDENT>params = (record['<STR_LIT>'], record['<STR_LIT>'])<EOL>record['<STR_LIT>'] = time_zone_by_country_and_region(*params)<EOL>return record<EOL>", "docstring": "Populate location dict for converted IP.\nReturns dict with numerous location properties.\n\n:arg ipnum: Result of ip2long conversion", "id": "f12580:c2:m5"}
{"signature": "def _action(self, action):", "body": "if action <= <NUM_LIT:1>:<EOL><INDENT>self._outA()<EOL><DEDENT>if action <= <NUM_LIT:2>:<EOL><INDENT>self.theA = self.theB<EOL>if self.theA == \"<STR_LIT:'>\" or self.theA == '<STR_LIT:\">':<EOL><INDENT>while <NUM_LIT:1>:<EOL><INDENT>self._outA()<EOL>self.theA = self._get()<EOL>if self.theA == self.theB:<EOL><INDENT>break<EOL><DEDENT>if self.theA <= '<STR_LIT:\\n>':<EOL><INDENT>raise UnterminatedStringLiteral()<EOL><DEDENT>if self.theA == '<STR_LIT:\\\\>':<EOL><INDENT>self._outA()<EOL>self.theA = self._get()<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if action <= <NUM_LIT:3>:<EOL><INDENT>self.theB = self._next()<EOL>if self.theB == '<STR_LIT:/>' and (self.theA == '<STR_LIT:(>' or self.theA == '<STR_LIT:U+002C>' or<EOL>self.theA == '<STR_LIT:=>' or self.theA == '<STR_LIT::>' or<EOL>self.theA == '<STR_LIT:[>' or self.theA == '<STR_LIT:?>' or<EOL>self.theA == '<STR_LIT:!>' or self.theA == '<STR_LIT:&>' or<EOL>self.theA == '<STR_LIT:|>' or self.theA == '<STR_LIT:;>' or<EOL>self.theA == '<STR_LIT:{>' or self.theA == '<STR_LIT:}>' or<EOL>self.theA == '<STR_LIT:\\n>'):<EOL><INDENT>self._outA()<EOL>self._outB()<EOL>while <NUM_LIT:1>:<EOL><INDENT>self.theA = self._get()<EOL>if self.theA == '<STR_LIT:/>':<EOL><INDENT>break<EOL><DEDENT>elif self.theA == '<STR_LIT:\\\\>':<EOL><INDENT>self._outA()<EOL>self.theA = self._get()<EOL><DEDENT>elif self.theA <= '<STR_LIT:\\n>':<EOL><INDENT>raise UnterminatedRegularExpression()<EOL><DEDENT>self._outA()<EOL><DEDENT>self.theB = self._next()<EOL><DEDENT><DEDENT>", "docstring": "do something! What you do is determined by the argument:\n           1   Output A. Copy B to A. Get the next B.\n           2   Copy B to A. Get the next B. (Delete A).\n           3   Get the next B. (Delete B).\n           action treats a string as a single character. Wow!\n           action recognizes a regular expression if it is preceded by ( or , or =.", "id": "f12583:c3:m5"}
{"signature": "def _next(self):", "body": "c = self._get()<EOL>if c == '<STR_LIT:/>' and self.theA != '<STR_LIT:\\\\>':<EOL><INDENT>p = self._peek()<EOL>if p == '<STR_LIT:/>':<EOL><INDENT>c = self._get()<EOL>while c > '<STR_LIT:\\n>':<EOL><INDENT>c = self._get()<EOL><DEDENT>return c<EOL><DEDENT>if p == '<STR_LIT:*>':<EOL><INDENT>c = self._get()<EOL>while <NUM_LIT:1>:<EOL><INDENT>c = self._get()<EOL>if c == '<STR_LIT:*>':<EOL><INDENT>if self._peek() == '<STR_LIT:/>':<EOL><INDENT>self._get()<EOL>return '<STR_LIT:U+0020>'<EOL><DEDENT><DEDENT>if c == '<STR_LIT>':<EOL><INDENT>raise UnterminatedComment()<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return c<EOL>", "docstring": "get the next character, excluding comments. peek() is used to see\n           if an unescaped '/' is followed by a '/' or '*'.", "id": "f12583:c3:m4"}
{"signature": "def minimalize(css, level=NORMAL):", "body": "return CssMinifier(level).minify(css)<EOL>", "docstring": "Compress css using level method and return new css as a string.", "id": "f12584:m0"}
{"signature": "def minify(self, css):", "body": "css = css.replace(\"<STR_LIT:\\r\\n>\", \"<STR_LIT:\\n>\") <EOL>for rule in _REPLACERS[self.level]:<EOL><INDENT>css = re.compile(rule[<NUM_LIT:0>], re.MULTILINE|re.UNICODE|re.DOTALL).sub(rule[<NUM_LIT:1>], css)<EOL><DEDENT>return css<EOL>", "docstring": "Tries to minimize the length of CSS code passed as parameter. Returns string.", "id": "f12584:c0:m1"}
{"signature": "def _solve_pkg(main_globals):", "body": "<EOL>main_dir, search_path = _try_search_paths(main_globals)<EOL>if not search_path:<EOL><INDENT>_log_debug('<STR_LIT>' % main_dir)<EOL>return<EOL><DEDENT>pkg_str = path.relpath(main_dir, search_path).replace(path.sep, '<STR_LIT:.>')<EOL>site_pkgs = '<STR_LIT>'<EOL>if pkg_str.startswith(site_pkgs):<EOL><INDENT>pkg_str = pkg_str[len(site_pkgs):]<EOL><DEDENT>assert pkg_str<EOL>_log_debug('<STR_LIT>' % pkg_str)<EOL>try:<EOL><INDENT>if '<STR_LIT>' in main_globals['<STR_LIT>']:<EOL><INDENT>_log_debug('<STR_LIT>')<EOL>sys.modules[pkg_str] = sys.modules['<STR_LIT:__main__>']<EOL>sys.modules[pkg_str].__path__ = [main_dir]<EOL>parent_pkg_str = '<STR_LIT:.>'.join(pkg_str.split('<STR_LIT:.>')[:-<NUM_LIT:1>])<EOL>if parent_pkg_str:<EOL><INDENT>importlib.import_module(parent_pkg_str)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>_log_debug('<STR_LIT>' % pkg_str)<EOL>importlib.import_module(pkg_str)<EOL><DEDENT>main_globals['<STR_LIT>'] = pkg_str<EOL>return pkg_str<EOL><DEDENT>except ImportError as e:<EOL><INDENT>_print_exc(e)<EOL><DEDENT>", "docstring": "Find parent python path of __main__. From there solve the package\ncontaining __main__, import it and set __package__ variable.\n\n:param main_globals: globals dictionary in __main__", "id": "f12604:m3"}
{"signature": "def _log_error(msg):", "body": "if _log_level <= ERROR:<EOL><INDENT>_log(msg)<EOL><DEDENT>", "docstring": "Log at error level\n:param msg: message to log", "id": "f12604:m6"}
{"signature": "def _log(msg):", "body": "sys.stderr.write(msg + '<STR_LIT:\\n>')<EOL>sys.stderr.flush()<EOL>", "docstring": "Central log function (all levels)\n:param msg: message to log", "id": "f12604:m4"}
{"signature": "def __init__(<EOL>self, access_token=DEMO_TOKEN, timeout=DEFAULT_TIMEOUT, websession=None<EOL>):", "body": "if websession is None:<EOL><INDENT>async def _create_session():<EOL><INDENT>return aiohttp.ClientSession()<EOL><DEDENT>loop = asyncio.get_event_loop()<EOL>self.websession = loop.run_until_complete(_create_session())<EOL><DEDENT>else:<EOL><INDENT>self.websession = websession<EOL><DEDENT>self._timeout = timeout<EOL>self._access_token = access_token<EOL>self._name = None<EOL>self._home_ids = []<EOL>self._all_home_ids = []<EOL>self._homes = {}<EOL>self.sub_manager = None<EOL>", "docstring": "Initialize the Tibber connection.", "id": "f12605:c0:m0"}
{"signature": "async def rt_connect(self, loop):", "body": "if self.sub_manager is not None:<EOL><INDENT>return<EOL><DEDENT>self.sub_manager = SubscriptionManager(<EOL>loop, \"<STR_LIT>\".format(self._access_token), SUB_ENDPOINT<EOL>)<EOL>self.sub_manager.start()<EOL>", "docstring": "Start subscription manager for real time data.", "id": "f12605:c0:m3"}
{"signature": "def sync_update_current_price_info(self):", "body": "loop = asyncio.get_event_loop()<EOL>task = loop.create_task(self.update_current_price_info())<EOL>loop.run_until_complete(task)<EOL>", "docstring": "Update current price info.", "id": "f12605:c1:m3"}
{"signature": "def __init__(self, home_id, tibber_control):", "body": "self._tibber_control = tibber_control<EOL>self._home_id = home_id<EOL>self._current_price_total = None<EOL>self._current_price_info = {}<EOL>self._price_info = {}<EOL>self._level_info = {}<EOL>self.sub_manager = None<EOL>self.info = {}<EOL>self._subscription_id = None<EOL>self._data = None<EOL>", "docstring": "Initialize the Tibber home class.", "id": "f12605:c1:m0"}
{"signature": "async def get_historic_data(self, n_data):", "body": "query = gql(<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>% (self.home_id, n_data)<EOL>)<EOL>data = await self._tibber_control.execute(query)<EOL>if not data:<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>data = data[\"<STR_LIT>\"][\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>if data is None:<EOL><INDENT>self._data = []<EOL>return<EOL><DEDENT>self._data = data[\"<STR_LIT>\"]<EOL>", "docstring": "Get historic data.", "id": "f12605:c1:m22"}
{"signature": "def get_home(self, home_id):", "body": "if home_id not in self._all_home_ids:<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\", home_id)<EOL>return None<EOL><DEDENT>if home_id not in self._homes.keys():<EOL><INDENT>self._homes[home_id] = TibberHome(home_id, self)<EOL><DEDENT>return self._homes[home_id]<EOL>", "docstring": "Retun an instance of TibberHome for given home id.", "id": "f12605:c0:m13"}
{"signature": "@property<EOL><INDENT>def rt_subscription_running(self):<DEDENT>", "body": "return (<EOL>self._tibber_control.sub_manager is not None<EOL>and self._tibber_control.sub_manager.is_running<EOL>and self._subscription_id is not None<EOL>)<EOL>", "docstring": "Is real time subscription running.", "id": "f12605:c1:m21"}
{"signature": "async def rt_unsubscribe(self):", "body": "if self._subscription_id is None:<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>await self._tibber_control.sub_manager.unsubscribe(self._subscription_id)<EOL>", "docstring": "Unsubscribe to Tibber rt subscription.", "id": "f12605:c1:m20"}
{"signature": "async def rt_disconnect(self):", "body": "if self.sub_manager is None:<EOL><INDENT>return<EOL><DEDENT>await self.sub_manager.stop()<EOL>", "docstring": "Stop subscription manager.", "id": "f12605:c0:m4"}
{"signature": "@property<EOL><INDENT>def current_price_info(self):<DEDENT>", "body": "return self._current_price_info<EOL>", "docstring": "Get current price info.", "id": "f12605:c1:m8"}
{"signature": "@property<EOL><INDENT>def currency(self):<DEDENT>", "body": "try:<EOL><INDENT>current_subscription = self.info[\"<STR_LIT>\"][\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>return current_subscription[\"<STR_LIT>\"][\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL><DEDENT>except (KeyError, TypeError, IndexError):<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL><DEDENT>return \"<STR_LIT>\"<EOL>", "docstring": "Return the currency.", "id": "f12605:c1:m16"}
{"signature": "async def update_info(self, *_):", "body": "query = gql(<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>)<EOL>res = await self._execute(query)<EOL>if res is None:<EOL><INDENT>return<EOL><DEDENT>errors = res.get(\"<STR_LIT>\", [])<EOL>if errors:<EOL><INDENT>msg = errors[<NUM_LIT:0>].get(\"<STR_LIT:message>\", \"<STR_LIT>\")<EOL>_LOGGER.error(msg)<EOL>raise InvalidLogin(msg)<EOL><DEDENT>data = res.get(\"<STR_LIT:data>\")<EOL>if not data:<EOL><INDENT>return<EOL><DEDENT>viewer = data.get(\"<STR_LIT>\")<EOL>if not viewer:<EOL><INDENT>return<EOL><DEDENT>self._name = viewer.get(\"<STR_LIT:name>\")<EOL>homes = viewer.get(\"<STR_LIT>\", [])<EOL>self._home_ids = []<EOL>for _home in homes:<EOL><INDENT>home_id = _home.get(\"<STR_LIT:id>\")<EOL>self._all_home_ids += [home_id]<EOL>subs = _home.get(\"<STR_LIT>\")<EOL>if subs:<EOL><INDENT>status = subs[<NUM_LIT:0>].get(\"<STR_LIT:status>\", \"<STR_LIT>\").lower()<EOL>if not home_id or status != \"<STR_LIT>\":<EOL><INDENT>continue<EOL><DEDENT><DEDENT>self._home_ids += [home_id]<EOL><DEDENT>", "docstring": "Update home info async.", "id": "f12605:c0:m8"}
{"signature": "def sync_update_price_info(self):", "body": "loop = asyncio.get_event_loop()<EOL>task = loop.create_task(self.update_price_info())<EOL>loop.run_until_complete(task)<EOL>", "docstring": "Update current price info.", "id": "f12605:c1:m5"}
{"signature": "@property<EOL><INDENT>def address1(self):<DEDENT>", "body": "try:<EOL><INDENT>return self.info[\"<STR_LIT>\"][\"<STR_LIT>\"][\"<STR_LIT:address>\"][\"<STR_LIT>\"]<EOL><DEDENT>except (KeyError, TypeError):<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL><DEDENT>return \"<STR_LIT>\"<EOL>", "docstring": "Return the home adress1.", "id": "f12605:c1:m14"}
{"signature": "@property<EOL><INDENT>def consumption_unit(self):<DEDENT>", "body": "return \"<STR_LIT>\"<EOL>", "docstring": "Return the consumption.", "id": "f12605:c1:m15"}
{"signature": "@property<EOL><INDENT>def has_active_subscription(self):<DEDENT>", "body": "try:<EOL><INDENT>sub = self.info[\"<STR_LIT>\"][\"<STR_LIT>\"][\"<STR_LIT>\"][\"<STR_LIT:status>\"]<EOL><DEDENT>except (KeyError, TypeError):<EOL><INDENT>return False<EOL><DEDENT>return sub == \"<STR_LIT>\"<EOL>", "docstring": "Return home id.", "id": "f12605:c1:m12"}
{"signature": "def get_homes(self, only_active=True):", "body": "return [self.get_home(home_id) for home_id in self.get_home_ids(only_active)]<EOL>", "docstring": "Return list of Tibber homes.", "id": "f12605:c0:m12"}
{"signature": "async def update_price_info(self):", "body": "query = gql(<EOL>\"\"\"<STR_LIT>\"\"\"<EOL>% self.home_id<EOL>)<EOL>price_info_temp = await self._tibber_control.execute(query)<EOL>if not price_info_temp:<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>self._price_info = {}<EOL>self._level_info = {}<EOL>for key in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>try:<EOL><INDENT>home = price_info_temp[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>current_subscription = home[\"<STR_LIT>\"]<EOL>price_info = current_subscription[\"<STR_LIT>\"][key]<EOL><DEDENT>except (KeyError, TypeError):<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\", key)<EOL>continue<EOL><DEDENT>if key == \"<STR_LIT>\":<EOL><INDENT>self._current_price_info = price_info<EOL>continue<EOL><DEDENT>for data in price_info:<EOL><INDENT>self._price_info[data.get(\"<STR_LIT>\")] = data.get(\"<STR_LIT>\")<EOL>self._level_info[data.get(\"<STR_LIT>\")] = data.get(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Update price info async.", "id": "f12605:c1:m6"}
{"signature": "def get_home_ids(self, only_active=True):", "body": "if only_active:<EOL><INDENT>return self._home_ids<EOL><DEDENT>return self._all_home_ids<EOL>", "docstring": "Return list of home ids.", "id": "f12605:c0:m11"}
{"signature": "@property<EOL><INDENT>def country(self):<DEDENT>", "body": "try:<EOL><INDENT>return self.info[\"<STR_LIT>\"][\"<STR_LIT>\"][\"<STR_LIT:address>\"][\"<STR_LIT>\"]<EOL><DEDENT>except (KeyError, TypeError):<EOL><INDENT>_LOGGER.error(\"<STR_LIT>\")<EOL><DEDENT>return \"<STR_LIT>\"<EOL>", "docstring": "Return the country.", "id": "f12605:c1:m17"}
{"signature": "@property<EOL><INDENT>def price_level(self):<DEDENT>", "body": "return self._level_info<EOL>", "docstring": "Get dictionary with price level, key is date-time.", "id": "f12605:c1:m10"}
{"signature": "def setUp(self):     ", "body": "self.tibber = tibber.Tibber(access_token='<STR_LIT>')        <EOL>self.assertRaises(tibber.InvalidLogin, self.tibber.sync_update_info)<EOL>", "docstring": "things to be run when tests are started.", "id": "f12606:c2:m0"}
{"signature": "def tearDown(self):  ", "body": "self.tibber.sync_close_connection()<EOL>", "docstring": "Stop stuff we started.", "id": "f12606:c3:m1"}
{"signature": "def setUp(self):     ", "body": "self.tibber = tibber.Tibber()<EOL>self.tibber.sync_update_info()<EOL>", "docstring": "things to be run when tests are started.", "id": "f12606:c0:m0"}
{"signature": "def setUp(self):  ", "body": "async def _create_session():<EOL><INDENT>return aiohttp.ClientSession()<EOL><DEDENT>loop = asyncio.get_event_loop()<EOL>self.websession = loop.run_until_complete(_create_session())<EOL>self.tibber = tibber.Tibber(websession=self.websession)<EOL>self.tibber.sync_update_info()<EOL>", "docstring": "things to be run when tests are started.", "id": "f12606:c1:m0"}
{"signature": "def exists(self, server):", "body": "try:<EOL><INDENT>server.get(<EOL>'<STR_LIT>',<EOL>replacements={<EOL>'<STR_LIT>': self.__challenge__.slug,<EOL>'<STR_LIT>': self.identifier})<EOL><DEDENT>except Exception:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Check if a task exists on the server", "id": "f12608:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def from_payload(cls, payload):<DEDENT>", "body": "return cls(**payload)<EOL>", "docstring": "Create a task from JSON", "id": "f12608:c0:m7"}
{"signature": "@classmethod<EOL><INDENT>def from_server(cls, server, slug, identifier):<DEDENT>", "body": "task = server.get(<EOL>'<STR_LIT>',<EOL>replacements={<EOL>'<STR_LIT>': slug,<EOL>'<STR_LIT>': identifier})<EOL>return cls(**task)<EOL>", "docstring": "Retrieve a task from the server", "id": "f12608:c0:m6"}
{"signature": "def update(self, server):", "body": "return server.put(<EOL>'<STR_LIT>',<EOL>self.as_payload(),<EOL>replacements={<EOL>'<STR_LIT>': self.__challenge__.slug,<EOL>'<STR_LIT>': self.identifier})<EOL>", "docstring": "Update existing task on the server", "id": "f12608:c0:m2"}
{"signature": "def exists(self, server):", "body": "try:<EOL><INDENT>server.get(<EOL>'<STR_LIT>',<EOL>replacements={'<STR_LIT>': self.slug})<EOL><DEDENT>except Exception:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Check if a challenge exists on the server", "id": "f12611:c0:m4"}
{"signature": "def update(self, server):", "body": "for chunk in self.__cut_to_size():<EOL><INDENT>server.put(<EOL>'<STR_LIT>',<EOL>chunk.as_payload(),<EOL>replacements={<EOL>'<STR_LIT>': chunk.challenge.slug})<EOL><DEDENT>", "docstring": "Update existing tasks on the server", "id": "f12612:c0:m2"}
{"signature": "def __create_task_collection(self, challenge):", "body": "task_collection = MapRouletteTaskCollection(challenge)<EOL>i = <NUM_LIT:0><EOL>while i < self.A_TON:<EOL><INDENT>i += <NUM_LIT:1><EOL>task_collection.tasks.append(<EOL>MapRouletteTask(<EOL>challenge=challenge,<EOL>identifier='<STR_LIT>'.format(uuid.uuid4()),<EOL>geometries=self.__random_point()))<EOL><DEDENT>return task_collection<EOL>", "docstring": "Return a collection of A_TON of tasks with random Point geometries", "id": "f12615:c0:m12"}
{"signature": "def update_probability_at_read_level(self, model=<NUM_LIT:3>):", "body": "self.probability.reset()  <EOL>if model == <NUM_LIT:1>:<EOL><INDENT>self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)<EOL>self.probability.normalize_reads(axis=APM.Axis.HAPLOGROUP, grouping_mat=self.t2t_mat)<EOL>haplogroup_sum_mat = self.allelic_expression * self.t2t_mat<EOL>self.probability.multiply(haplogroup_sum_mat, axis=APM.Axis.READ)<EOL>self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat)<EOL>self.probability.multiply(haplogroup_sum_mat.sum(axis=<NUM_LIT:0>), axis=APM.Axis.HAPLOTYPE)<EOL>self.probability.normalize_reads(axis=APM.Axis.READ)<EOL><DEDENT>elif model == <NUM_LIT:2>:<EOL><INDENT>self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)<EOL>self.probability.normalize_reads(axis=APM.Axis.LOCUS)<EOL>self.probability.multiply(self.allelic_expression.sum(axis=<NUM_LIT:0>), axis=APM.Axis.HAPLOTYPE)<EOL>self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat)<EOL>self.probability.multiply((self.allelic_expression * self.t2t_mat).sum(axis=<NUM_LIT:0>), axis=APM.Axis.HAPLOTYPE)<EOL>self.probability.normalize_reads(axis=APM.Axis.READ)<EOL><DEDENT>elif model == <NUM_LIT:3>:<EOL><INDENT>self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)<EOL>self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat)<EOL>self.probability.multiply((self.allelic_expression * self.t2t_mat).sum(axis=<NUM_LIT:0>), axis=APM.Axis.HAPLOTYPE)<EOL>self.probability.normalize_reads(axis=APM.Axis.READ)<EOL><DEDENT>elif model == <NUM_LIT:4>:<EOL><INDENT>self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ)<EOL>self.probability.normalize_reads(axis=APM.Axis.READ)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Updates the probability of read origin at read level\n\n:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)\n:return: Nothing (as it performs in-place operations)", "id": "f12618:c0:m4"}
{"signature": "def export_posterior_probability(self, filename, title=\"<STR_LIT>\"):", "body": "self.probability.save(h5file=filename, title=title)<EOL>", "docstring": "Writes the posterior probability of read origin\n\n:param filename: File name for output\n:param title: The title of the posterior probability matrix\n:return: Nothing but the method writes a file in EMASE format (PyTables)", "id": "f12618:c0:m9"}
{"signature": "def report_depths(self, filename, tpm=True, grp_wise=False, reorder='<STR_LIT>', notes=None):", "body": "if grp_wise:<EOL><INDENT>lname = self.probability.gname<EOL>depths = self.allelic_expression * self.grp_conv_mat<EOL><DEDENT>else:<EOL><INDENT>lname = self.probability.lname<EOL>depths = self.allelic_expression<EOL><DEDENT>if tpm:<EOL><INDENT>depths *= (<NUM_LIT> / depths.sum())<EOL><DEDENT>total_depths = depths.sum(axis=<NUM_LIT:0>)<EOL>if reorder == '<STR_LIT>':<EOL><INDENT>report_order = np.argsort(total_depths.flatten())<EOL>report_order = report_order[::-<NUM_LIT:1>]<EOL><DEDENT>elif reorder == '<STR_LIT>':<EOL><INDENT>report_order = np.argsort(total_depths.flatten())<EOL><DEDENT>elif reorder == '<STR_LIT>':<EOL><INDENT>report_order = np.arange(len(lname))  <EOL><DEDENT>cntdata = np.vstack((depths, total_depths))<EOL>fhout = open(filename, '<STR_LIT:w>')<EOL>fhout.write(\"<STR_LIT>\" + \"<STR_LIT:\\t>\".join(self.probability.hname) + \"<STR_LIT>\")<EOL>if notes is not None:<EOL><INDENT>fhout.write(\"<STR_LIT>\")<EOL><DEDENT>fhout.write(\"<STR_LIT:\\n>\")<EOL>for locus_id in report_order:<EOL><INDENT>lname_cur = lname[locus_id]<EOL>fhout.write(\"<STR_LIT:\\t>\".join([lname_cur] + list(map(str, cntdata[:, locus_id].ravel()))))<EOL>if notes is not None:<EOL><INDENT>fhout.write(\"<STR_LIT>\" % notes[lname_cur])<EOL><DEDENT>fhout.write(\"<STR_LIT:\\n>\")<EOL><DEDENT>fhout.close()<EOL>", "docstring": "Exports expected depths\n\n:param filename: File name for output\n:param grp_wise: whether the report is at isoform level or gene level\n:param reorder: whether the report should be either 'decreasing' or 'increasing' order or just 'as-is'\n:return: Nothing but the method writes a file", "id": "f12618:c0:m8"}
{"signature": "def run(self, model, tol=<NUM_LIT>, max_iters=<NUM_LIT>, verbose=True):", "body": "orig_err_states = np.seterr(all='<STR_LIT>')<EOL>np.seterr(under='<STR_LIT:ignore>')<EOL>if verbose:<EOL><INDENT>print()<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>num_iters = <NUM_LIT:0><EOL>err_sum = <NUM_LIT><EOL>time0 = time.time()<EOL>target_err = <NUM_LIT> * tol<EOL>while err_sum > target_err and num_iters < max_iters:<EOL><INDENT>prev_isoform_expression = self.get_allelic_expression().sum(axis=<NUM_LIT:0>)<EOL>prev_isoform_expression *= (<NUM_LIT> / prev_isoform_expression.sum())<EOL>self.update_allelic_expression(model=model)<EOL>curr_isoform_expression = self.get_allelic_expression().sum(axis=<NUM_LIT:0>)<EOL>curr_isoform_expression *= (<NUM_LIT> / curr_isoform_expression.sum())<EOL>err = np.abs(curr_isoform_expression - prev_isoform_expression)<EOL>err_sum = err.sum()<EOL>num_iters += <NUM_LIT:1><EOL>if verbose:<EOL><INDENT>time1 = time.time()<EOL>delmin, s = divmod(int(time1 - time0), <NUM_LIT>)<EOL>h, m = divmod(delmin, <NUM_LIT>)<EOL>print(\"<STR_LIT>\" % (num_iters, h, m, s, err_sum))<EOL><DEDENT><DEDENT>", "docstring": "Runs EM iterations\n\n:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)\n:param tol: Tolerance for termination\n:param max_iters: Maximum number of iterations until termination\n:param verbose: Display information on how EM is running\n:return: Nothing (as it performs in-place operations)", "id": "f12618:c0:m6"}
{"signature": "def update_allelic_expression(self, model=<NUM_LIT:3>):", "body": "self.update_probability_at_read_level(model)<EOL>self.allelic_expression = self.probability.sum(axis=APM.Axis.READ)<EOL>if self.target_lengths is not None:<EOL><INDENT>self.allelic_expression = np.divide(self.allelic_expression, self.target_lengths)<EOL><DEDENT>", "docstring": "A single EM step: Update probability at read level and then re-estimate allelic specific expression\n\n:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)\n:return: Nothing (as it performs in-place operations)", "id": "f12618:c0:m5"}
{"signature": "def report_read_counts(self, filename, grp_wise=False, reorder='<STR_LIT>', notes=None):", "body": "expected_read_counts = self.probability.sum(axis=APM.Axis.READ)<EOL>if grp_wise:<EOL><INDENT>lname = self.probability.gname<EOL>expected_read_counts = expected_read_counts * self.grp_conv_mat<EOL><DEDENT>else:<EOL><INDENT>lname = self.probability.lname<EOL><DEDENT>total_read_counts = expected_read_counts.sum(axis=<NUM_LIT:0>)<EOL>if reorder == '<STR_LIT>':<EOL><INDENT>report_order = np.argsort(total_read_counts.flatten())<EOL>report_order = report_order[::-<NUM_LIT:1>]<EOL><DEDENT>elif reorder == '<STR_LIT>':<EOL><INDENT>report_order = np.argsort(total_read_counts.flatten())<EOL><DEDENT>elif reorder == '<STR_LIT>':<EOL><INDENT>report_order = np.arange(len(lname))  <EOL><DEDENT>cntdata = np.vstack((expected_read_counts, total_read_counts))<EOL>fhout = open(filename, '<STR_LIT:w>')<EOL>fhout.write(\"<STR_LIT>\" + \"<STR_LIT:\\t>\".join(self.probability.hname) + \"<STR_LIT>\")<EOL>if notes is not None:<EOL><INDENT>fhout.write(\"<STR_LIT>\")<EOL><DEDENT>fhout.write(\"<STR_LIT:\\n>\")<EOL>for locus_id in report_order:<EOL><INDENT>lname_cur = lname[locus_id]<EOL>fhout.write(\"<STR_LIT:\\t>\".join([lname_cur] + list(map(str, cntdata[:, locus_id].ravel()))))<EOL>if notes is not None:<EOL><INDENT>fhout.write(\"<STR_LIT>\" % notes[lname_cur])<EOL><DEDENT>fhout.write(\"<STR_LIT:\\n>\")<EOL><DEDENT>fhout.close()<EOL>", "docstring": "Exports expected read counts\n\n:param filename: File name for output\n:param grp_wise: whether the report is at isoform level or gene level\n:param reorder: whether the report should be either 'decreasing' or 'increasing' order or just 'as-is'\n:return: Nothing but the method writes a file", "id": "f12618:c0:m7"}
{"signature": "def get_unique_reads(self, ignore_haplotype=False, shallow=False):", "body": "if self.finalized:<EOL><INDENT>if ignore_haplotype:<EOL><INDENT>summat = self.sum(axis=self.Axis.HAPLOTYPE)<EOL>nnz_per_read = np.diff(summat.tocsr().indptr)<EOL>unique_reads = np.logical_and(nnz_per_read > <NUM_LIT:0>, nnz_per_read < <NUM_LIT:2>)<EOL><DEDENT>else:  <EOL><INDENT>alncnt_per_read = self.sum(axis=self.Axis.LOCUS).sum(axis=self.Axis.HAPLOTYPE)<EOL>unique_reads = np.logical_and(alncnt_per_read > <NUM_LIT:0>, alncnt_per_read < <NUM_LIT:2>)<EOL><DEDENT>return self.pull_alignments_from(unique_reads, shallow=shallow)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Pull out alignments of uniquely-aligning reads\n\n:param ignore_haplotype: whether to regard allelic multiread as uniquely-aligning read\n:param shallow: whether to copy sparse 3D matrix only or not\n:return: a new AlignmentPropertyMatrix object that particular reads are", "id": "f12620:c0:m13"}
{"signature": "def __getitem__(self, name):", "body": "value = self.get(name)<EOL>if value:<EOL><INDENT>return value<EOL><DEDENT>raise KeyError(name)<EOL>", "docstring": "Return the value at key ``name``, raises a KeyError if the key\ndoesn't exist.", "id": "f12629:c0:m5"}
{"signature": "def _rc_sdiffstore(self, dst, src, *args):", "body": "args = list_or_args(src, args)<EOL>result = self.sdiff(*args)<EOL>if result is not set([]):<EOL><INDENT>return self.sadd(dst, *list(result))<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Store the difference of sets ``src``,  ``args`` into a new\nset named ``dest``.  Returns the number of keys in the new set.", "id": "f12629:c0:m11"}
{"signature": "def _rc_mget(self, keys, *args):", "body": "args = list_or_args(keys, args)<EOL>result = []<EOL>for key in args:<EOL><INDENT>result.append(self.get(key))<EOL><DEDENT>return result<EOL>", "docstring": "Returns a list of values ordered identically to ``*args``", "id": "f12629:c0:m19"}
{"signature": "def _rc_sunionstore(self, dst, src, *args):", "body": "args = list_or_args(src, args)<EOL>result = self.sunion(*args)<EOL>if result is not set([]):<EOL><INDENT>return self.sadd(dst, *list(result))<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Store the union of sets ``src``,  ``args`` into a new\nset named ``dest``.  Returns the number of keys in the new set.", "id": "f12629:c0:m16"}
{"signature": "def value_to_none(config_val, evar):", "body": "if not config_val:<EOL><INDENT>return None<EOL><DEDENT>return config_val<EOL>", "docstring": "Given a value that evaluates to a boolean False, return None.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n    a value for.\n:rtype: str or None\n:return: Either the non-False value or None.", "id": "f12632:m2"}
{"signature": "def comma_separated_str_to_list(config_val, evar):", "body": "if not config_val:<EOL><INDENT>return []<EOL><DEDENT>return [token.strip() for token in config_val.split('<STR_LIT:U+002C>')]<EOL>", "docstring": "Splits a comma-separated environment variable into a list of strings.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n    a value for.\n:rtype: list\n:return: The equivalent list for a comma-separated string.", "id": "f12632:m0"}
{"signature": "def value_to_bool(config_val, evar):", "body": "if not config_val:<EOL><INDENT>return False<EOL><DEDENT>if config_val.strip().lower() == '<STR_LIT:true>':<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Massages the 'true' and 'false' strings to bool equivalents.\n\n:param str config_val: The env var value.\n:param EnvironmentVariable evar: The EVar object we are validating\n    a value for.\n:rtype: bool\n:return: True or False, depending on the value.", "id": "f12632:m4"}
{"signature": "def __init__(self, name, is_required=True, default_val=None,<EOL>filters=None, help_txt=None):", "body": "self.name = name<EOL>self.is_required = is_required<EOL>self.default_val = default_val<EOL>self.filters = filters or []<EOL>self.help_txt = help_txt<EOL>", "docstring": ":param str name: The name of the environment variable. *This is\n    case-sensitive!*\n:keyword bool is_required: If ``True``, this variable must be defined\n    when your Python process starts. If ``False``, the default loaded\n    value will match ``default_val``.\n:keyword default_val: If ``is_required`` is ``False`` and this\n    environment variable is not defined, this value will be loaded.\n:keyword list filters: A list of functions to pass the environment\n    variable's value (or default value) through. Order is\n    significant!\n:keyword str help_txt: Optional help text describing the environment\n    variable.", "id": "f12634:c1:m0"}
{"signature": "def printdir(self):", "body": "print(\"<STR_LIT>\" % (\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"))<EOL>for rarinfo in self.filelist:<EOL><INDENT>date = \"<STR_LIT>\" % rarinfo.date_time[:<NUM_LIT:6>]<EOL>print(\"<STR_LIT>\" % (<EOL>rarinfo.filename, date, rarinfo.file_size))<EOL><DEDENT>", "docstring": "Print a table of contents for the RAR file.", "id": "f12640:c3:m12"}
{"signature": "def _open(self, archive):", "body": "try:<EOL><INDENT>handle = unrarlib.RAROpenArchiveEx(ctypes.byref(archive))<EOL><DEDENT>except unrarlib.UnrarException:<EOL><INDENT>raise BadRarFile(\"<STR_LIT>\")<EOL><DEDENT>return handle<EOL>", "docstring": "Open RAR archive file.", "id": "f12640:c3:m4"}
{"signature": "def open(self, member, pwd=None):", "body": "if isinstance(member, RarInfo):<EOL><INDENT>member = member.filename<EOL><DEDENT>archive = unrarlib.RAROpenArchiveDataEx(<EOL>self.filename, mode=constants.RAR_OM_EXTRACT)<EOL>handle = self._open(archive)<EOL>password = pwd or self.pwd<EOL>if password is not None:<EOL><INDENT>unrarlib.RARSetPassword(handle, b(password))<EOL><DEDENT>data = _ReadIntoMemory()<EOL>c_callback = unrarlib.UNRARCALLBACK(data._callback)<EOL>unrarlib.RARSetCallback(handle, c_callback, <NUM_LIT:0>)<EOL>try:<EOL><INDENT>rarinfo = self._read_header(handle)<EOL>while rarinfo is not None:<EOL><INDENT>if rarinfo.filename == member:<EOL><INDENT>self._process_current(handle, constants.RAR_TEST)<EOL>break<EOL><DEDENT>else:<EOL><INDENT>self._process_current(handle, constants.RAR_SKIP)<EOL><DEDENT>rarinfo = self._read_header(handle)<EOL><DEDENT>if rarinfo is None:<EOL><INDENT>data = None<EOL><DEDENT><DEDENT>except unrarlib.MissingPassword:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>except unrarlib.BadPassword:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>except unrarlib.BadDataError:<EOL><INDENT>if password is not None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except unrarlib.UnrarException as e:<EOL><INDENT>raise BadRarFile(\"<STR_LIT>\" % str(e))<EOL><DEDENT>finally:<EOL><INDENT>self._close(handle)<EOL><DEDENT>if data is None:<EOL><INDENT>raise KeyError('<STR_LIT>' % member)<EOL><DEDENT>return data.get_bytes()<EOL>", "docstring": "Return file-like object for 'member'.\n\n           'member' may be a filename or a RarInfo object.", "id": "f12640:c3:m6"}
{"signature": "def _load_metadata(self, handle):", "body": "rarinfo = self._read_header(handle)<EOL>while rarinfo:<EOL><INDENT>self.filelist.append(rarinfo)<EOL>self.NameToInfo[rarinfo.filename] = rarinfo<EOL>self._process_current(handle, constants.RAR_SKIP)<EOL>rarinfo = self._read_header(handle)<EOL><DEDENT>", "docstring": "Load archive members metadata.", "id": "f12640:c3:m3"}
{"signature": "def extract(self, member, path=None, pwd=None):", "body": "if isinstance(member, RarInfo):<EOL><INDENT>member = member.filename<EOL><DEDENT>if path is None:<EOL><INDENT>path = os.getcwd()<EOL><DEDENT>self._extract_members([member], path, pwd)<EOL>return os.path.join(path, member)<EOL>", "docstring": "Extract a member from the archive to the current working directory,\n           using its full name. Its file information is extracted as accurately\n           as possible. `member' may be a filename or a RarInfo object. You can\n           specify a different directory using `path'.", "id": "f12640:c3:m14"}
{"signature": "def infolist(self):", "body": "return self.filelist<EOL>", "docstring": "Return a list of class RarInfo instances for files in the\n        archive.", "id": "f12640:c3:m11"}
{"signature": "def _close(self, handle):", "body": "try:<EOL><INDENT>unrarlib.RARCloseArchive(handle)<EOL><DEDENT>except unrarlib.CloseError:<EOL><INDENT>raise BadRarFile(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Close RAR archive file.", "id": "f12640:c3:m5"}
{"signature": "def setpassword(self, pwd):", "body": "self.pwd = pwd<EOL>", "docstring": "Set default password for encrypted files.", "id": "f12640:c3:m9"}
{"signature": "def __init__(self, filename, mode='<STR_LIT:r>', pwd=None):", "body": "self.filename = filename<EOL>mode = constants.RAR_OM_LIST_INCSPLIT<EOL>archive = unrarlib.RAROpenArchiveDataEx(filename, mode=mode)<EOL>handle = self._open(archive)<EOL>self.pwd = pwd<EOL>if self.pwd is not None:<EOL><INDENT>unrarlib.RARSetPassword(handle, b(self.pwd))<EOL><DEDENT>self.filelist = []<EOL>self.NameToInfo = {}<EOL>if archive.CmtState == constants.RAR_COMMENTS_SUCCESS:<EOL><INDENT>self.comment = archive.CmtBuf.value<EOL><DEDENT>else:<EOL><INDENT>self.comment = None<EOL><DEDENT>self._load_metadata(handle)<EOL>self._close(handle)<EOL>", "docstring": "Load RAR archive file with mode read only \"r\".", "id": "f12640:c3:m0"}
{"signature": "def _process_current(self, handle, op, dest_path=None, dest_name=None):", "body": "unrarlib.RARProcessFileW(handle, op, dest_path, dest_name)<EOL>", "docstring": "Process current member with 'op' operation.", "id": "f12640:c3:m2"}
{"signature": "def serial_ports():", "body": "df_comports = sd.comports()<EOL>df_teensy_comports = df_comports.loc[df_comports.hardware_id.str<EOL>.contains('<STR_LIT>',<EOL>case=False)]<EOL>return df_teensy_comports<EOL>", "docstring": "Returns\n-------\npandas.DataFrame\n    Table of serial ports that match the USB vendor ID and product ID for\n    the `Teensy 3.2`_ board.\n\n.. Teensy 3.2: https://www.pjrc.com/store/teensy32.html", "id": "f12647:m0"}
{"signature": "def get_firmwares():", "body": "return OrderedDict([(board_dir.name, [f.abspath() for f in<EOL>board_dir.walkfiles('<STR_LIT>')])<EOL>for board_dir in<EOL>package_path().joinpath('<STR_LIT>').dirs()])<EOL>", "docstring": "Return compiled Arduino hex file paths.\n\nThis function may be used to locate firmware binaries that are available\nfor flashing to [Arduino][1] boards.\n\n[1]: http://arduino.cc", "id": "f12649:m5"}
{"signature": "def get_sources():", "body": "import base_node_rpc<EOL>return (get_sketch_directory().files('<STR_LIT>') +<EOL>list(get_lib_directory().walkfiles('<STR_LIT>')) +<EOL>base_node_rpc.get_sources())<EOL>", "docstring": "Return Arduino source file paths.  This includes any supplementary source\nfiles that are not contained in Arduino libraries.", "id": "f12649:m4"}
{"signature": "def parse_args(args=None):", "body": "from argparse import ArgumentParser<EOL>if args is None:<EOL><INDENT>args = sys.argv<EOL><DEDENT>parser = ArgumentParser(description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', help='<STR_LIT>'<EOL>'<STR_LIT>')<EOL>args = parser.parse_args()<EOL>return args<EOL>", "docstring": "Parses arguments, returns (options, args).", "id": "f12650:m1"}
{"signature": "def normalize_excludes(rootpath, excludes):", "body": "sep = os.path.sep<EOL>f_excludes = []<EOL>for exclude in excludes:<EOL><INDENT>if not os.path.isabs(exclude) and not exclude.startswith(rootpath):<EOL><INDENT>exclude = os.path.join(rootpath, exclude)<EOL><DEDENT>if not exclude.endswith(sep):<EOL><INDENT>exclude += sep<EOL><DEDENT>f_excludes.append(exclude)<EOL><DEDENT>return f_excludes<EOL>", "docstring": "Normalize the excluded directory list:\n* must be either an absolute path or start with rootpath,\n* otherwise it is joined with rootpath\n* with trailing slash", "id": "f12651:m9"}
{"signature": "def format_directive(module, package=None):", "body": "directive = '<STR_LIT>' % makename(package, module)<EOL>for option in OPTIONS:<EOL><INDENT>directive += '<STR_LIT>' % option<EOL><DEDENT>return directive<EOL>", "docstring": "Create the automodule directive and add the options.", "id": "f12651:m3"}
{"signature": "def create_module_file(package, module, opts):", "body": "text = format_heading(<NUM_LIT:1>, '<STR_LIT>' % module)<EOL>text += format_heading(<NUM_LIT:2>, '<STR_LIT>' % module)<EOL>text += format_directive(module, package)<EOL>write_file(makename(package, module), text, opts)<EOL>", "docstring": "Build the text of the file and write the file.", "id": "f12651:m4"}
{"signature": "def is_excluded(root, excludes):", "body": "sep = os.path.sep<EOL>if not root.endswith(sep):<EOL><INDENT>root += sep<EOL><DEDENT>for exclude in excludes:<EOL><INDENT>if root.startswith(exclude):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Check if the directory is in the exclude list.\n\nNote: by having trailing slashes, we avoid common prefix issues, like\n      e.g. an exlude \"foo\" also accidentally excluding \"foobar\".", "id": "f12651:m10"}
{"signature": "def makename(package, module):", "body": "<EOL>if package:<EOL><INDENT>name = package<EOL>if module:<EOL><INDENT>name += '<STR_LIT:.>' + module<EOL><DEDENT><DEDENT>else:<EOL><INDENT>name = module<EOL><DEDENT>return name<EOL>", "docstring": "Join package and module with a dot.", "id": "f12651:m0"}
{"signature": "def seeded_auth_token(client, service, seed):", "body": "hash_func = hashlib.md5()<EOL>token = '<STR_LIT:U+002C>'.join((client, service, seed)).encode('<STR_LIT:utf-8>')<EOL>hash_func.update(token)<EOL>return hash_func.hexdigest()<EOL>", "docstring": "Return an auth token based on the client+service+seed tuple.", "id": "f12653:m0"}
{"signature": "def clean_sources(self, sources):", "body": "return [tuple(path.rsplit('<STR_LIT:.>', <NUM_LIT:1>)[<NUM_LIT:0>].rsplit('<STR_LIT:/>', <NUM_LIT:2>)[<NUM_LIT:1>:])<EOL>for path in sources]<EOL>", "docstring": "Convert a config_sources result into a create_sources list.\n\n        i.e. the last two path components, minus a file extension", "id": "f12658:c1:m4"}
{"signature": "def write(self, name, contents):", "body": "fn = os.path.join(self.tmpdir, name)<EOL>with open(fn, '<STR_LIT:w>') as f:<EOL><INDENT>f.write(contents)<EOL><DEDENT>return fn<EOL>", "docstring": "Write contents to tmpdir/name. Return full filename.", "id": "f12658:c2:m3"}
{"signature": "def filter(config):", "body": "keys = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>]<EOL>return filter_dict(config, keys)<EOL>", "docstring": "The subset of configuration keys to be made public.", "id": "f12660:m0"}
{"signature": "def filter_config(config, deploy_config):", "body": "if not os.path.isfile(deploy_config):<EOL><INDENT>return DotDict()<EOL><DEDENT>config_module = get_config_module(deploy_config)<EOL>return config_module.filter(config)<EOL>", "docstring": "Return a config subset using the filter defined in the deploy config.", "id": "f12663:m0"}
{"signature": "def _convert_item(self, obj):", "body": "if isinstance(obj, dict) and not isinstance(obj, DotDict):<EOL><INDENT>obj = DotDict(obj)<EOL><DEDENT>elif isinstance(obj, list):<EOL><INDENT>for i, item in enumerate(obj):<EOL><INDENT>if isinstance(item, dict) and not isinstance(item, DotDict):<EOL><INDENT>obj[i] = DotDict(item)<EOL><DEDENT><DEDENT><DEDENT>return obj<EOL>", "docstring": "Convert obj into a DotDict, or list of DotDict.\n\nDirectly nested lists aren't supported.\nReturns the result", "id": "f12664:c0:m6"}
{"signature": "def merge_dicts(d1, d2, _path=None):", "body": "if _path is None:<EOL><INDENT>_path = ()<EOL><DEDENT>if isinstance(d1, dict) and isinstance(d2, dict):<EOL><INDENT>for k, v in d2.items():<EOL><INDENT>if isinstance(v, MissingValue) and v.name is None:<EOL><INDENT>v.name = '<STR_LIT:.>'.join(_path + (k,))<EOL><DEDENT>if isinstance(v, DeletedValue):<EOL><INDENT>d1.pop(k, None)<EOL><DEDENT>elif k not in d1:<EOL><INDENT>if isinstance(v, dict):<EOL><INDENT>d1[k] = merge_dicts({}, v, _path + (k,))<EOL><DEDENT>else:<EOL><INDENT>d1[k] = v<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if isinstance(d1[k], dict) and isinstance(v, dict):<EOL><INDENT>d1[k] = merge_dicts(d1[k], v, _path + (k,))<EOL><DEDENT>elif isinstance(d1[k], list) and isinstance(v, list):<EOL><INDENT>d1[k] += v<EOL><DEDENT>elif isinstance(d1[k], MissingValue):<EOL><INDENT>d1[k] = v<EOL><DEDENT>elif d1[k] is None:<EOL><INDENT>d1[k] = v<EOL><DEDENT>elif type(d1[k]) == type(v):<EOL><INDENT>d1[k] = v<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>% (type(d1[k]), type(v)))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>' % (type(d1), type(d2)))<EOL><DEDENT>return d1<EOL>", "docstring": "Merge dictionary d2 into d1, overriding entries in d1 with values from d2.\n\nd1 is mutated.\n\n_path is for internal, recursive use.", "id": "f12664:m0"}
{"signature": "def available_sources(sources):", "body": "for dirs, name in sources:<EOL><INDENT>for directory in dirs:<EOL><INDENT>fn = os.path.join(directory, name) + '<STR_LIT>'<EOL>if os.path.isfile(fn):<EOL><INDENT>yield fn<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Yield the sources that are present.", "id": "f12665:m1"}
{"signature": "def add_roles(self, *roles):", "body": "for role in roles:<EOL><INDENT>self.add_role(role)<EOL><DEDENT>", "docstring": "Add roles to this user.\n\n        :param roles: Roles to add.", "id": "f12670:c1:m2"}
{"signature": "def add_role(self, role):", "body": "self.roles.add(role)<EOL>", "docstring": "Add a role to this user.\n\n        :param role: Role to add.", "id": "f12670:c1:m1"}
{"signature": "def get_name(self):", "body": "return self.name<EOL>", "docstring": "Return the name of this role", "id": "f12670:c0:m1"}
{"signature": "def add_parent(self, parent):", "body": "parent.children.add(self)<EOL>self.parents.add(parent)<EOL>", "docstring": "Add a parent to this role,\n        and add role itself to the parent's children set.\n        you should override this function if neccessary.\n\n        Example::\n\n            logged_user = RoleMixin('logged_user')\n            student = RoleMixin('student')\n            student.add_parent(logged_user)\n\n        :param parent: Parent role to add in.", "id": "f12670:c0:m2"}
{"signature": "def set_user_loader(self, loader):", "body": "self._user_loader = loader<EOL>", "docstring": "Set user loader, which is used to load current user.\n        An example::\n\n            from flask_login import current_user\n            rbac.set_user_loader(lambda: current_user)\n\n        :param loader: Current user function.", "id": "f12671:c2:m6"}
{"signature": "def allow(self, role, method, resource, with_children=True):", "body": "if with_children:<EOL><INDENT>for r in role.get_children():<EOL><INDENT>permission = (r.get_name(), method, resource)<EOL>if permission not in self._allowed:<EOL><INDENT>self._allowed.append(permission)<EOL><DEDENT><DEDENT><DEDENT>if role == '<STR_LIT>':<EOL><INDENT>permission = (role, method, resource)<EOL><DEDENT>else:<EOL><INDENT>permission = (role.get_name(), method, resource)<EOL><DEDENT>if permission not in self._allowed:<EOL><INDENT>self._allowed.append(permission)<EOL><DEDENT>", "docstring": "Add allowing rules.\n\n        :param role: Role of this rule.\n        :param method: Method to allow in rule, include GET, POST, PUT etc.\n        :param resource: Resource also view function.\n        :param with_children: Allow role's children in rule as well\n                              if with_children is `True`", "id": "f12671:c0:m1"}
{"signature": "def exempt(self, resource):", "body": "if resource not in self._exempt:<EOL><INDENT>self._exempt.append(resource)<EOL><DEDENT>", "docstring": "Exempt a view function from being checked permission\n\n        :param resource: The view function exempt from checking.", "id": "f12671:c0:m3"}
{"signature": "def allow(self, roles, methods, with_children=True):", "body": "def decorator(view_func):<EOL><INDENT>_methods = [m.upper() for m in methods]<EOL>for r, m, v in itertools.product(roles, _methods, [view_func.__name__]):<EOL><INDENT>self.before_acl['<STR_LIT>'].append((r, m, v, with_children))<EOL><DEDENT>return view_func<EOL><DEDENT>return decorator<EOL>", "docstring": "This is a decorator function.\n\n        You can allow roles to access the view func with it.\n\n        An example::\n\n            @app.route('/website/setting', methods=['GET', 'POST'])\n            @rbac.allow(['administrator', 'super_user'], ['GET', 'POST'])\n            def website_setting():\n                return Response('Setting page.')\n\n        :param roles: List, each name of roles. Please note that,\n                      `anonymous` is refered to anonymous.\n                      If you add `anonymous` to the rule,\n                      everyone can access the resource,\n                      unless you deny other roles.\n        :param methods: List, each name of methods.\n                        methods is valid in ['GET', 'POST', 'PUT', 'DELETE']\n        :param with_children: Whether allow children of roles as well.\n                              True by default.", "id": "f12671:c2:m9"}
{"signature": "def get_app(self, reference_app=None):", "body": "if reference_app is not None:<EOL><INDENT>return reference_app<EOL><DEDENT>if self.app is not None:<EOL><INDENT>return self.app<EOL><DEDENT>ctx = connection_stack.top<EOL>if ctx is not None:<EOL><INDENT>return ctx.app<EOL><DEDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>", "docstring": "Helper method that implements the logic to look up an application.", "id": "f12671:c2:m12"}
{"signature": "def deny(self, role, method, resource, with_children=False):", "body": "if with_children:<EOL><INDENT>for r in role.get_children():<EOL><INDENT>permission = (r.get_name(), method, resource)<EOL>if permission not in self._denied:<EOL><INDENT>self._denied.append(permission)<EOL><DEDENT><DEDENT><DEDENT>permission = (role.get_name(), method, resource)<EOL>if permission not in self._denied:<EOL><INDENT>self._denied.append(permission)<EOL><DEDENT>", "docstring": "Add denying rules.\n\n        :param role: Role of this rule.\n        :param method: Method to deny in rule, include GET, POST, PUT etc.\n        :param resource: Resource also view function.\n        :param with_children: Deny role's children in rule as well\n                              if with_children is `True`", "id": "f12671:c0:m2"}
{"signature": "def is_denied(self, role, method, resource):", "body": "return (role, method, resource) in self._denied<EOL>", "docstring": "Check wherther role is denied to access resource\n\n        :param role: Role to be checked.\n        :param method: Method to be checked.\n        :param resource: View function to be checked.", "id": "f12671:c0:m5"}
{"signature": "def __init__(self, app=None, **kwargs):", "body": "self.acl = AccessControlList()<EOL>self.before_acl = {'<STR_LIT>': [], '<STR_LIT>': []}<EOL>self._role_model = kwargs.get('<STR_LIT>', RoleMixin)<EOL>self._user_model = kwargs.get('<STR_LIT>', UserMixin)<EOL>self._user_loader = kwargs.get('<STR_LIT>', lambda: current_user)<EOL>self.permission_failed_hook = kwargs.get('<STR_LIT>')<EOL>if app is not None:<EOL><INDENT>self.app = app<EOL>self.init_app(app)<EOL><DEDENT>else:<EOL><INDENT>self.app = None<EOL><DEDENT>", "docstring": "Initialize with app.", "id": "f12671:c2:m0"}
{"signature": "def setEnvironmentalData(self, humidity, temperature):", "body": "'''<STR_LIT>'''<EOL>hum_perc = humidity << <NUM_LIT:1><EOL>parts = math.fmod(temperature)<EOL>fractional = parts[<NUM_LIT:0>]<EOL>temperature = parts[<NUM_LIT:1>]<EOL>temp_high = ((temperature + <NUM_LIT>) << <NUM_LIT:9>)<EOL>temp_low = ((fractional / <NUM_LIT>) & <NUM_LIT>)<EOL>temp_conv = (temp_high | temp_low)<EOL>buf = [hum_perc, <NUM_LIT>,((temp_conv >> <NUM_LIT:8>) & <NUM_LIT>), (temp_conv & <NUM_LIT>)]<EOL>self._device.writeList(CCS811_ENV_DATA, buf)<EOL>", "docstring": "Humidity is stored as an unsigned 16 bits in 1/512%RH. The\n        default value is 50% = 0x64, 0x00. As an example 48.5%\n        humidity would be 0x61, 0x00.", "id": "f12674:c0:m6"}
{"signature": "def get_zip_class():", "body": "class ContextualZipFile(zipfile.ZipFile):<EOL><INDENT>def __enter__(self):<EOL><INDENT>return self<EOL><DEDENT>def __exit__(self, type, value, traceback):<EOL><INDENT>self.close<EOL><DEDENT><DEDENT>return zipfile.ZipFile if hasattr(zipfile.ZipFile, '<STR_LIT>') elseContextualZipFile<EOL>", "docstring": "Supplement ZipFile class to support context manager for Python 2.6", "id": "f12676:m3"}
{"signature": "def download_file_insecure(url, target):", "body": "try:<EOL><INDENT>from urllib.request import urlopen<EOL><DEDENT>except ImportError:<EOL><INDENT>from urllib2 import urlopen<EOL><DEDENT>src = dst = None<EOL>try:<EOL><INDENT>src = urlopen(url)<EOL>data = src.read()<EOL>dst = open(target, \"<STR_LIT:wb>\")<EOL>dst.write(data)<EOL><DEDENT>finally:<EOL><INDENT>if src:<EOL><INDENT>src.close()<EOL><DEDENT>if dst:<EOL><INDENT>dst.close()<EOL><DEDENT><DEDENT>", "docstring": "Use Python to download the file, even though it cannot authenticate the\nconnection.", "id": "f12676:m14"}
{"signature": "def download_file_powershell(url, target):", "body": "target = os.path.abspath(target)<EOL>cmd = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>\"<STR_LIT>\" % vars(),<EOL>]<EOL>_clean_check(cmd, target)<EOL>", "docstring": "Download the file at url to target using Powershell (which will validate\ntrust). Raise an exception if the command cannot complete.", "id": "f12676:m8"}
{"signature": "def get_time_estimate(self, start_latitude, start_longitude, customer_uuid=None, product_id=None):", "body": "endpoint = '<STR_LIT>'<EOL>query_parameters = {<EOL>'<STR_LIT>': start_latitude,<EOL>'<STR_LIT>': start_longitude<EOL>}<EOL>if customer_uuid is not None:<EOL><INDENT>query_parameters['<STR_LIT>'] = customer_uuid<EOL><DEDENT>elif product_id is not None:<EOL><INDENT>query_parameters['<STR_LIT>'] = product_id<EOL><DEDENT>elif customer_uuid is not None and product_id is not None:<EOL><INDENT>query_parameters['<STR_LIT>'] = customer_uuid<EOL>query_parameters['<STR_LIT>'] = product_id<EOL><DEDENT>return self.get_json(endpoint, '<STR_LIT:GET>', query_parameters, None, None)<EOL>", "docstring": "Get the ETA for Uber products.\n:param start_latitude: Starting latitude.\n:param start_longitude: Starting longitude.\n:param customer_uuid: (Optional) Customer unique ID.\n:param product_id: (Optional) If ETA is needed only for a specific product type.\n:return: JSON", "id": "f12685:c0:m3"}
{"signature": "def get_products(self, latitude, longitude):", "body": "endpoint = '<STR_LIT>'<EOL>query_parameters = {<EOL>'<STR_LIT>': latitude,<EOL>'<STR_LIT>': longitude<EOL>}<EOL>return self.get_json(endpoint, '<STR_LIT:GET>', query_parameters, None, None)<EOL>", "docstring": "Get a list of all Uber products based on latitude and longitude coordinates.\n:param latitude: Latitude for which product list is required.\n:param longitude: Longitude for which product list is required.\n:return: JSON", "id": "f12685:c0:m1"}
{"signature": "def get_price_estimate(self, start_latitude, start_longitude, end_latitude, end_longitude):", "body": "endpoint = '<STR_LIT>'<EOL>query_parameters = {<EOL>'<STR_LIT>': start_latitude,<EOL>'<STR_LIT>': start_longitude,<EOL>'<STR_LIT>': end_latitude,<EOL>'<STR_LIT>': end_longitude<EOL>}<EOL>return self.get_json(endpoint, '<STR_LIT:GET>', query_parameters, None, None)<EOL>", "docstring": "Returns the fare estimate based on two sets of coordinates.\n:param start_latitude: Starting latitude or latitude of pickup address.\n:param start_longitude: Starting longitude or longitude of pickup address.\n:param end_latitude: Ending latitude or latitude of destination address.\n:param end_longitude: Ending longitude or longitude of destination address.\n:return: JSON", "id": "f12685:c0:m2"}
{"signature": "def add_credentials(self, query_parameters):", "body": "query_parameters['<STR_LIT>'] = self.server_token<EOL>return query_parameters<EOL>", "docstring": "Adds the Uber server token to the query parameters to make an authorised request.\n:param query_parameters: Query parameters to be sent.\n:return: string", "id": "f12687:c0:m1"}
{"signature": "def get_json(self, uri_path, http_method='<STR_LIT:GET>', query_parameters=None, body=None, headers=None):", "body": "query_parameters = query_parameters or {}<EOL>headers = headers or {}<EOL>query_parameters = self.add_credentials(query_parameters)<EOL>uri = self.build_request(uri_path, query_parameters)<EOL>if http_method in ('<STR_LIT:POST>', '<STR_LIT>', '<STR_LIT>') and '<STR_LIT:Content-Type>' not in headers:<EOL><INDENT>headers['<STR_LIT:Content-Type>'] = '<STR_LIT:application/json>'<EOL><DEDENT>headers['<STR_LIT>'] = '<STR_LIT:application/json>'<EOL>response, content = self.client.request(<EOL>uri=uri,<EOL>method=http_method,<EOL>body=body,<EOL>headers=headers<EOL>)<EOL>self.check_status(content, response)<EOL>return json.loads(content.decode('<STR_LIT:utf-8>'))<EOL>", "docstring": "Fetches the JSON returned, after making the call and checking for errors.\n:param uri_path: Endpoint to be used to make a request.\n:param http_method: HTTP method to be used.\n:param query_parameters: Parameters to be added to the request.\n:param body: Optional body, if required.\n:param headers: Optional headers, if required.\n:return: JSON", "id": "f12687:c0:m5"}
{"signature": "def _parse(self, filename):", "body": "self.names = {}<EOL>with codecs.open(filename, encoding=\"<STR_LIT>\") as f:<EOL><INDENT>for line in f:<EOL><INDENT>if any(map(lambda c: <NUM_LIT> < ord(c) < <NUM_LIT>, line)):<EOL><INDENT>line = line.encode(\"<STR_LIT>\").decode(\"<STR_LIT>\")<EOL><DEDENT>self._eat_name_line(line.strip())<EOL><DEDENT><DEDENT>", "docstring": "Opens data file and for each line, calls _eat_name_line", "id": "f12691:c1:m1"}
{"signature": "def _most_popular_gender(self, name, counter):", "body": "if name not in self.names:<EOL><INDENT>return self.unknown_value<EOL><DEDENT>max_count, max_tie = (<NUM_LIT:0>, <NUM_LIT:0>)<EOL>best = self.names[name].keys()[<NUM_LIT:0>]<EOL>for gender, country_values in self.names[name].items():<EOL><INDENT>count, tie = counter(country_values)<EOL>if count > max_count or (count == max_count and tie > max_tie):<EOL><INDENT>max_count, max_tie, best = count, tie, gender<EOL><DEDENT><DEDENT>return best if max_count > <NUM_LIT:0> else self.unknown_value<EOL>", "docstring": "Finds the most popular gender for the given name counting by given counter", "id": "f12691:c1:m4"}
{"signature": "@groupify<EOL>def sizeClassifier(path, min_size=DEFAULTS['<STR_LIT>']):", "body": "filestat = _stat(path)<EOL>if stat.S_ISLNK(filestat.st_mode):<EOL><INDENT>return  <EOL><DEDENT>if filestat.st_size < min_size:<EOL><INDENT>return  <EOL><DEDENT>return filestat.st_size<EOL>", "docstring": "Sort a file into a group based on on-disk size.\n\n    :param paths: See :func:`fastdupes.groupify`\n\n    :param min_size: Files smaller than this size (in bytes) will be ignored.\n    :type min_size: :class:`__builtins__.int`\n\n    :returns: See :func:`fastdupes.groupify`\n\n    .. todo:: Rework the calling of :func:`~os.stat` to minimize the number of\n        calls. It's a fairly significant percentage of the time taken according\n        to the profiler.", "id": "f12694:m5"}
{"signature": "def find_dupes(paths, exact=False, ignores=None, min_size=<NUM_LIT:0>):", "body": "groups = {'<STR_LIT>': getPaths(paths, ignores)}<EOL>groups = groupBy(groups, sizeClassifier, '<STR_LIT>', min_size=min_size)<EOL>groups = groupBy(groups, hashClassifier, '<STR_LIT>', limit=HEAD_SIZE)<EOL>if exact:<EOL><INDENT>groups = groupBy(groups, groupByContent, fun_desc='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>groups = groupBy(groups, hashClassifier, fun_desc='<STR_LIT>')<EOL><DEDENT>return groups<EOL>", "docstring": "High-level code to walk a set of paths and find duplicate groups.\n\n    :param exact: Whether to compare file contents by hash or by reading\n                  chunks in parallel.\n    :type exact: :class:`~__builtins__.bool`\n\n    :param paths: See :meth:`~fastdupes.getPaths`\n    :param ignores: See :meth:`~fastdupes.getPaths`\n    :param min_size: See :meth:`~fastdupes.sizeClassifier`\n\n    :returns: A list of groups of files with identical contents\n    :rtype: ``[[path, ...], [path, ...]]``", "id": "f12694:m10"}
{"signature": "def groupBy(groups_in, classifier, fun_desc='<STR_LIT:?>', keep_uniques=False,<EOL>*args, **kwargs):", "body": "groups, count, group_count = {}, <NUM_LIT:0>, len(groups_in)<EOL>for pos, paths in enumerate(groups_in.values()):<EOL><INDENT>out.write(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>pos + <NUM_LIT:1>, group_count, fun_desc, count, len(paths)<EOL>))<EOL>for key, group in list(classifier(paths, *args, **kwargs).items()):<EOL><INDENT>groups.setdefault(key, set()).update(group)<EOL>count += len(group)<EOL><DEDENT><DEDENT>if not keep_uniques:<EOL><INDENT>groups = dict([(x, groups[x]) for x in groups if len(groups[x]) > <NUM_LIT:1>])<EOL><DEDENT>out.write(\"<STR_LIT>\"<EOL>% (len(groups), fun_desc, count), newline=True)<EOL>return groups<EOL>", "docstring": "Subdivide groups of paths according to a function.\n\n    :param groups_in: Grouped sets of paths.\n    :type groups_in: :class:`~__builtins__.dict` of iterables\n\n    :param classifier: Function to group a list of paths by some attribute.\n    :type classifier: ``function(list, *args, **kwargs) -> str``\n\n    :param fun_desc: Human-readable term for what the classifier operates on.\n        (Used in log messages)\n    :type fun_desc: :class:`~__builtins__.str`\n\n    :param keep_uniques: If ``False``, discard groups with only one member.\n    :type keep_uniques: :class:`~__builtins__.bool`\n\n\n    :returns: A dict mapping classifier keys to groups of matches.\n    :rtype: :class:`~__builtins__.dict`\n\n\n    :attention: Grouping functions generally use a :class:`~__builtins__.set`\n        ``groups`` as extra protection against accidentally counting a given\n        file twice. (Complimentary to use of :func:`os.path.realpath` in\n        :func:`~fastdupes.getPaths`)\n\n    .. todo:: Find some way to bring back the file-by-file status text", "id": "f12694:m3"}
{"signature": "def hashFile(handle, want_hex=False, limit=None, chunk_size=CHUNK_SIZE):", "body": "fhash, read = hashlib.sha1(), <NUM_LIT:0><EOL>if isinstance(handle, str):<EOL><INDENT>handle = file(handle, '<STR_LIT:rb>')<EOL><DEDENT>if limit:<EOL><INDENT>chunk_size = min(chunk_size, limit)<EOL><DEDENT>for block in iter(lambda: handle.read(chunk_size), '<STR_LIT>'):<EOL><INDENT>fhash.update(block)<EOL>read += chunk_size<EOL>if <NUM_LIT:0> < limit <= read:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return want_hex and fhash.hexdigest() or fhash.digest()<EOL>", "docstring": "Generate a hash from a potentially long file.\n    Digesting will obey :const:`CHUNK_SIZE` to conserve memory.\n\n    :param handle: A file-like object or path to hash from.\n    :param want_hex: If ``True``, returned hash will be hex-encoded.\n    :type want_hex: :class:`~__builtins__.bool`\n\n    :param limit: Maximum number of bytes to read (rounded up to a multiple of\n        ``CHUNK_SIZE``)\n    :type limit: :class:`~__builtins__.int`\n\n    :param chunk_size: Size of :meth:`~__builtins__.file.read` operations\n        in bytes.\n    :type chunk_size: :class:`~__builtins__.int`\n\n\n    :rtype: :class:`~__builtins__.str`\n    :returns: A binary or hex-encoded SHA1 hash.\n\n    .. note:: It is your responsibility to close any file-like objects you pass\n        in", "id": "f12694:m1"}
{"signature": "def getPaths(roots, ignores=None):", "body": "paths, count, ignores = [], <NUM_LIT:0>, ignores or []<EOL>ignore_re = multiglob_compile(ignores, prefix=False)<EOL>for root in roots:<EOL><INDENT>root = os.path.realpath(root)<EOL>if os.path.isfile(root):<EOL><INDENT>paths.append(root)<EOL>continue<EOL><DEDENT>for fldr in os.walk(root):<EOL><INDENT>out.write(\"<STR_LIT>\"<EOL>% count)<EOL>for subdir in fldr[<NUM_LIT:1>]:<EOL><INDENT>dirpath = os.path.join(fldr[<NUM_LIT:0>], subdir)<EOL>if ignore_re.match(dirpath):<EOL><INDENT>fldr[<NUM_LIT:1>].remove(subdir)<EOL><DEDENT><DEDENT>for filename in fldr[<NUM_LIT:2>]:<EOL><INDENT>filepath = os.path.join(fldr[<NUM_LIT:0>], filename)<EOL>if ignore_re.match(filepath):<EOL><INDENT>continue  <EOL><DEDENT>paths.append(filepath)<EOL>count += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>out.write(\"<STR_LIT>\" % (len(paths)),<EOL>newline=True)<EOL>return paths<EOL>", "docstring": "Recursively walk a set of paths and return a listing of contained files.\n\n:param roots: Relative or absolute paths to files or folders.\n:type roots: :class:`~__builtins__.list` of :class:`~__builtins__.str`\n\n:param ignores: A list of :py:mod:`fnmatch` globs to avoid walking and\n    omit from results\n:type ignores: :class:`~__builtins__.list` of :class:`~__builtins__.str`\n\n:returns: Absolute paths to only files.\n:rtype: :class:`~__builtins__.list` of :class:`~__builtins__.str`\n\n.. todo:: Try to optimize the ignores matching. Running a regex on every\n   filename is a fairly significant percentage of the time taken according\n   to the profiler.", "id": "f12694:m2"}
{"signature": "def pruneUI(dupeList, mainPos=<NUM_LIT:1>, mainLen=<NUM_LIT:1>):", "body": "dupeList = sorted(dupeList)<EOL>print()<EOL>for pos, val in enumerate(dupeList):<EOL><INDENT>print(\"<STR_LIT>\" % (pos + <NUM_LIT:1>, val))<EOL><DEDENT>while True:<EOL><INDENT>choice = input(\"<STR_LIT>\" % (mainPos, mainLen)).strip()<EOL>if not choice:<EOL><INDENT>print (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>continue<EOL><DEDENT>elif choice.lower() == '<STR_LIT:all>':<EOL><INDENT>return []<EOL><DEDENT>try:<EOL><INDENT>out = [int(x) - <NUM_LIT:1> for x in choice.replace('<STR_LIT:U+002C>', '<STR_LIT:U+0020>').split()]<EOL>return [val for pos, val in enumerate(dupeList) if pos not in out]<EOL><DEDENT>except ValueError:<EOL><INDENT>print(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Display a list of files and prompt for ones to be kept.\n\n    The user may enter ``all`` or one or more numbers separated by spaces\n    and/or commas.\n\n    .. note:: It is impossible to accidentally choose to keep none of the\n        displayed files.\n\n    :param dupeList: A list duplicate file paths\n    :param mainPos: Used to display \"set X of Y\"\n    :param mainLen: Used to display \"set X of Y\"\n    :type dupeList: :class:`~__builtins__.list`\n    :type mainPos: :class:`~__builtins__.int`\n    :type mainLen: :class:`~__builtins__.int`\n\n    :returns: A list of files to be deleted.\n    :rtype: :class:`~__builtins__.int`", "id": "f12694:m9"}
{"signature": "def __init__(self, fields, target=sys.stdout):", "body": "self.fields = fields<EOL>self.target = target<EOL>", "docstring": "Create a processor that prints the requested fields' values\n\n        This is useful for strings with newlines in them. Keep in mind that the\n        fields will be popped from the event dictionary, so they will not be\n        visible to anything (other processors and the logger itself) after this\n        processor has printed them.\n\n        :param fields: An iterable specifying the fields to print\n        :param target: A file-like object to print to", "id": "f12703:c4:m0"}
{"signature": "def __init__(self, field_map):", "body": "self.lexers = {<EOL>field: get_lexer_by_name(language)<EOL>for field, language in field_map.items()<EOL>}<EOL>", "docstring": "Create a processor that syntax highlights code in the event values\n\n        The syntax highlighting will use with ANSI terminal color codes.\n\n        :param field_map: A mapping with field names mapped to languages, e.g.\n                          ``{'body': 'json': 'soap_response': 'xml'}``", "id": "f12703:c3:m0"}
{"signature": "def strip_minidom_whitespace(node):", "body": "for child in node.childNodes:<EOL><INDENT>if child.nodeType == Node.TEXT_NODE:<EOL><INDENT>if child.nodeValue:<EOL><INDENT>child.nodeValue = child.nodeValue.strip()<EOL><DEDENT><DEDENT>elif child.nodeType == Node.ELEMENT_NODE:<EOL><INDENT>strip_minidom_whitespace(child)<EOL><DEDENT><DEDENT>", "docstring": "Strips all whitespace from a minidom XML node and its children\n\n    This operation is made in-place.", "id": "f12704:m0"}
{"signature": "def _mock_response(<EOL>self,<EOL>status_code=<NUM_LIT:200>,<EOL>content=b'<STR_LIT>',<EOL>raise_for_status=None):", "body": "mock_resp = Mock()<EOL>mock_resp.raise_for_status = Mock()<EOL>mock_resp.status_code = status_code<EOL>if raise_for_status:<EOL><INDENT>mock_resp.raise_for_status.side_effect = raise_for_status<EOL>return mock_resp<EOL><DEDENT>mock_resp.content = content<EOL>mock_resp.iter_content = Mock()<EOL>iter_result =  iter([bytes([b]) for b in content])<EOL>mock_resp.iter_content.return_value = iter_result<EOL>return mock_resp<EOL>", "docstring": "Build a mock for each response, include errors and content data", "id": "f12714:c0:m0"}
{"signature": "def load_game(self, jsonstr):", "body": "logging.debug(\"<STR_LIT>\")<EOL>logging.debug(\"<STR_LIT>\")<EOL>self._g = GameObject()<EOL>logging.debug(\"<STR_LIT>\".format(jsonstr))<EOL>self._g.from_json(jsonstr=jsonstr)<EOL>", "docstring": "load_game() takes a JSON string representing a game object and calls the underlying\ngame object (_g) to load the JSON. The underlying object will handle schema validation\nand transformation.\n\n:param jsonstr: A valid JSON string representing a GameObject (see above)\n\n:return: None", "id": "f12727:c0:m6"}
{"signature": "def new_game(self, mode=None):", "body": "<EOL>self._g = GameObject()<EOL>_mode = mode or \"<STR_LIT>\"<EOL>logging.debug(\"<STR_LIT>\".format(_mode))<EOL>mode_info = self._g.get_game_type(gametype=_mode)<EOL>logging.debug(\"<STR_LIT>\".format(mode_info, type(mode_info)))<EOL>if not mode_info:<EOL><INDENT>self._g = None<EOL>raise ValueError('<STR_LIT>'.format(_mode))<EOL><DEDENT>logging.debug(\"<STR_LIT>\".format(mode_info.digitType))<EOL>dw = DigitWord(wordtype=mode_info.digitType)<EOL>dw.random(mode_info.digits)<EOL>logging.debug(\"<STR_LIT>\".format(dw.word))<EOL>_game = {<EOL>\"<STR_LIT:key>\": str(uuid.uuid4()),<EOL>\"<STR_LIT:status>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": int(time()) + <NUM_LIT>,<EOL>\"<STR_LIT>\": dw.word,<EOL>\"<STR_LIT>\": _mode,<EOL>\"<STR_LIT>\": mode_info.guesses_allowed,<EOL>\"<STR_LIT>\": <NUM_LIT:0><EOL>}<EOL>logging.debug(\"<STR_LIT>\".format(_game))<EOL>self._g.from_json(jsonstr=json.dumps(_game))<EOL>return self._g.to_json()<EOL>", "docstring": "new_game() creates a new game. Docs TBC.\n\n:return: JSON String containing the game object.", "id": "f12727:c0:m5"}
{"signature": "def guess(self, *args):", "body": "logging.debug(\"<STR_LIT>\")<EOL>logging.debug(\"<STR_LIT>\")<EOL>self._validate_game_object(op=\"<STR_LIT>\")<EOL>logging.debug(\"<STR_LIT>\")<EOL>_return_results = {<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": [],<EOL>\"<STR_LIT:status>\": \"<STR_LIT>\"<EOL>}<EOL>logging.debug(\"<STR_LIT>\")<EOL>if self._g.status.lower() == \"<STR_LIT>\":<EOL><INDENT>_return_results[\"<STR_LIT:message>\"] = self._start_again(\"<STR_LIT>\")<EOL><DEDENT>elif self._g.status.lower() == \"<STR_LIT>\":<EOL><INDENT>_return_results[\"<STR_LIT:message>\"] = self._start_again(\"<STR_LIT>\")<EOL><DEDENT>elif self._g.guesses_remaining < <NUM_LIT:1>:<EOL><INDENT>_return_results[\"<STR_LIT:message>\"] = self._start_again(\"<STR_LIT>\")<EOL><DEDENT>elif self._g.ttl < time():<EOL><INDENT>_return_results[\"<STR_LIT:message>\"] = self._start_again(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>logging.debug(\"<STR_LIT>\")<EOL>_wordtype = DigitWord.HEXDIGIT if self._g.mode.lower() == '<STR_LIT>' else DigitWord.DIGIT<EOL>guess = DigitWord(*args, wordtype=_wordtype)<EOL>logging.debug(\"<STR_LIT>\")<EOL>self._g.guesses_remaining -= <NUM_LIT:1><EOL>self._g.guesses_made += <NUM_LIT:1><EOL>logging.debug(\"<STR_LIT>\")<EOL>_return_results[\"<STR_LIT>\"] = []<EOL>_return_results[\"<STR_LIT>\"] = <NUM_LIT:0><EOL>_return_results[\"<STR_LIT>\"] = <NUM_LIT:0><EOL>logging.debug(\"<STR_LIT>\")<EOL>for i in self._g.answer.compare(guess):<EOL><INDENT>logging.debug(\"<STR_LIT>\".format(i.index))<EOL>if i.match is True:<EOL><INDENT>logging.debug(\"<STR_LIT>\")<EOL>_return_results[\"<STR_LIT>\"] += <NUM_LIT:1><EOL><DEDENT>elif i.in_word is True:<EOL><INDENT>logging.debug(\"<STR_LIT>\")<EOL>_return_results[\"<STR_LIT>\"] += <NUM_LIT:1><EOL><DEDENT>logging.debug(\"<STR_LIT>\")<EOL>_return_results[\"<STR_LIT>\"].append(i.get_object())<EOL><DEDENT>logging.debug(\"<STR_LIT>\")<EOL>if _return_results[\"<STR_LIT>\"] == len(self._g.answer.word):<EOL><INDENT>logging.debug(\"<STR_LIT>\")<EOL>self._g.status = \"<STR_LIT>\"<EOL>self._g.guesses_remaining = <NUM_LIT:0><EOL>_return_results[\"<STR_LIT:message>\"] = \"<STR_LIT>\"\"<STR_LIT>\".format(self._get_text_answer())<EOL><DEDENT>elif self._g.guesses_remaining < <NUM_LIT:1>:<EOL><INDENT>logging.debug(\"<STR_LIT>\")<EOL>self._g.status = \"<STR_LIT>\"<EOL>_return_results[\"<STR_LIT:message>\"] = \"<STR_LIT>\"\"<STR_LIT:{}>\".format(self._get_text_answer())<EOL><DEDENT>_return_results[\"<STR_LIT:status>\"] = self._g.status<EOL><DEDENT>logging.debug(\"<STR_LIT>\")<EOL>return _return_results<EOL>", "docstring": "guess() allows a guess to be made. Before the guess is made, the method\nchecks to see if the game has been won, lost, or there are no tries\nremaining. It then creates a return object stating the number of bulls\n(direct matches), cows (indirect matches), an analysis of the guess (a\nlist of analysis objects), and a status.\n\n:param args: any number of integers (or string representations of integers)\nto the number of Digits in the answer; i.e. in normal mode, there would be\na DigitWord to guess of 4 digits, so guess would expect guess(1, 2, 3, 4)\nand a shorter (guess(1, 2)) or longer (guess(1, 2, 3, 4, 5)) sequence will\nraise an exception.\n\n:return: a JSON object containing the analysis of the guess:\n\n{\n    \"cows\": {\"type\": \"integer\"},\n    \"bulls\": {\"type\": \"integer\"},\n    \"analysis\": {\"type\": \"array of DigitWordAnalysis\"},\n    \"status\": {\"type\": \"string\"}\n}", "id": "f12727:c0:m8"}
{"signature": "def _validate_game_object(self, op=\"<STR_LIT>\"):", "body": "if self._g is None:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if not isinstance(self._g, GameObject):<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\".format(op)<EOL>)<EOL><DEDENT>", "docstring": "A helper method to provide validation of the game object (_g). If the\ngame object does not exist or if (for any reason) the object is not a GameObject,\nthen an exception will be raised.\n\n:param op: A string describing the operation (e.g. guess, save, etc.) taking place\n:return: Nothing", "id": "f12727:c0:m11"}
{"signature": "def load_modes(self, input_modes=None):", "body": "<EOL>_modes = [<EOL>GameMode(<EOL>mode=\"<STR_LIT>\", priority=<NUM_LIT:2>, digits=<NUM_LIT:4>, digit_type=DigitWord.DIGIT, guesses_allowed=<NUM_LIT:10><EOL>),<EOL>GameMode(<EOL>mode=\"<STR_LIT>\", priority=<NUM_LIT:1>, digits=<NUM_LIT:3>, digit_type=DigitWord.DIGIT, guesses_allowed=<NUM_LIT:6><EOL>),<EOL>GameMode(<EOL>mode=\"<STR_LIT>\", priority=<NUM_LIT:3>, digits=<NUM_LIT:6>, digit_type=DigitWord.DIGIT, guesses_allowed=<NUM_LIT:6><EOL>),<EOL>GameMode(<EOL>mode=\"<STR_LIT>\", priority=<NUM_LIT:4>, digits=<NUM_LIT:4>, digit_type=DigitWord.HEXDIGIT, guesses_allowed=<NUM_LIT:10><EOL>)<EOL>]<EOL>if input_modes is not None:<EOL><INDENT>if not isinstance(input_modes, list):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>for mode in input_modes:<EOL><INDENT>if not isinstance(mode, GameMode):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>_modes.append(mode)<EOL><DEDENT><DEDENT>self._game_modes = copy.deepcopy(_modes)<EOL>", "docstring": "Loads modes (GameMode objects) to be supported by the game object. Four default\nmodes are provided (normal, easy, hard, and hex) but others could be provided\neither by calling load_modes directly or passing a list of GameMode objects to\nthe instantiation call.\n\n:param input_modes: A list of GameMode objects; nb: even if only one new GameMode\nobject is provided, it MUST be passed as a list - for example, passing GameMode gm1\nwould require passing [gm1] NOT gm1.\n\n:return: A list of GameMode objects (both defaults and any added).", "id": "f12730:c0:m6"}
{"signature": "def __init__(self, game_json=None, game_modes=None, mode=None):", "body": "<EOL>self._game_modes = None<EOL>self.load_modes(input_modes=game_modes)<EOL>self.game = None<EOL>self.load(game_json=game_json, mode=mode)<EOL>", "docstring": "Initialize a GameController object to allow the game to be played. The controller\ncreates a game object (see GameObject.py) and allows guesses to be made against\nthe 'hidden' object.\n\n:param game_json: <optional>, if provided is a JSON serialized representation\nof a game; if not provided a new game is instantiated.\n:param game_modes: <optional>, a list of GameMode objects representing game modes.\n:param mode: <optional>, the mode the game should be played in; may be a GameMode\nobject or a str representing the name of a GameMode object already defined (e.g.\npassed via game_modes).", "id": "f12730:c0:m0"}
{"signature": "@property<EOL><INDENT>def digit_type(self):<DEDENT>", "body": "return self._digit_type<EOL>", "docstring": "The digit_type is a flag used to specify the type of digit to be used; for example, a\ndigit (DigitWord.DIGIT) enables a single digit between 0 and 9, while a hex digit\n(DigitWord.HEXDIGIT) enables a single digit between 0 and F.\n\n:return: <int>", "id": "f12731:c0:m9"}
{"signature": "@property<EOL><INDENT>def digits(self):<DEDENT>", "body": "return self._digits<EOL>", "docstring": "The number of digits used by the DigitWord used in this mode; e.g. a value of 3 would\nindicate there are three digits (e.g. 1, 2, and 3), while a value of 5 would indicate\nfive values (e.g. 0, 1, 2, 3, 4).\n\n:return: <int>", "id": "f12731:c0:m7"}
{"signature": "def __init__(<EOL>self,<EOL>mode=None,<EOL>source_game=None<EOL>):", "body": "<EOL>if mode is None:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if not isinstance(mode, GameMode):<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>self._key = None                    <EOL>self._status = None                 <EOL>self._ttl = None                    <EOL>self._answer = None                 <EOL>self._mode = None                   <EOL>self._guesses_remaining = None      <EOL>self._guesses_made = None           <EOL>if source_game:<EOL><INDENT>self.load(source=source_game)<EOL><DEDENT>else:<EOL><INDENT>self.new(mode=mode)<EOL><DEDENT>", "docstring": "Initialize a game object to hold the state, properties, and control of the game.\n\n:param mode: <required> A GameMode object defining the game play mode.\n:param source_game: <optional> A JSON Serialized representation of the game.", "id": "f12733:c0:m0"}
{"signature": "def dump(self):", "body": "return {<EOL>\"<STR_LIT:key>\": self._key,<EOL>\"<STR_LIT:status>\": self._status,<EOL>\"<STR_LIT>\": self._ttl,<EOL>\"<STR_LIT>\": self._answer.word,<EOL>\"<STR_LIT>\": self._mode.dump(),<EOL>\"<STR_LIT>\": self._guesses_made<EOL>}<EOL>", "docstring": "Dump (return) a dict representation of the GameObject. This is a Python\ndict and is NOT serialized. NB: the answer (a DigitWord object) and the\nmode (a GameMode object) are converted to python objects of a list and\ndict respectively.\n\n:return: python <dict> of the GameObject as detailed above.", "id": "f12733:c0:m10"}
{"signature": "def new(self, mode):", "body": "dw = DigitWord(wordtype=mode.digit_type)<EOL>dw.random(mode.digits)<EOL>self._key = str(uuid.uuid4())<EOL>self._status = \"<STR_LIT>\"<EOL>self._ttl = <NUM_LIT><EOL>self._answer = dw<EOL>self._mode = mode<EOL>self._guesses_remaining = mode.guesses_allowed<EOL>self._guesses_made = <NUM_LIT:0><EOL>", "docstring": "Create a new instance of a game. Note, a mode MUST be provided and MUST be of\ntype GameMode.\n\n:param mode: <required>", "id": "f12733:c0:m12"}
{"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n    The \".dev0\" means dirty. Note that .dev0 sorts backwards\n    (a dirty tree will appear \"older\" than the corresponding clean one),\n    but you shouldn't be releasing software with -dirty anyways.\n\n    Exceptions:\n    1: no tags. 0.postDISTANCE[.dev0]", "id": "f12746:m11"}
{"signature": "def render(pieces, style):", "body": "if pieces[\"<STR_LIT:error>\"]:<EOL><INDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": pieces.get(\"<STR_LIT>\"),<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": pieces[\"<STR_LIT:error>\"],<EOL>\"<STR_LIT:date>\": None}<EOL><DEDENT>if not style or style == \"<STR_LIT:default>\":<EOL><INDENT>style = \"<STR_LIT>\"  <EOL><DEDENT>if style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_pre(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_post(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_old(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe_long(pieces)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % style)<EOL><DEDENT>return {\"<STR_LIT:version>\": rendered, \"<STR_LIT>\": pieces[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": pieces[\"<STR_LIT>\"], \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": pieces.get(\"<STR_LIT:date>\")}<EOL>", "docstring": "Render the given version pieces into the requested style.", "id": "f12746:m15"}
{"signature": "def render_git_describe(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[-DISTANCE-gHEX][-dirty].\n\n    Like 'git describe --tags --dirty --always'.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f12746:m13"}
{"signature": "def render_pep440(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += plus_or_dot(pieces)<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"],<EOL>pieces[\"<STR_LIT>\"])<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "Build up version string, with post-release \"local version identifier\".\n\n    Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n    get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n    Exceptions:\n    1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]", "id": "f12746:m9"}
{"signature": "def plus_or_dot(pieces):", "body": "if \"<STR_LIT:+>\" in pieces.get(\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>return \"<STR_LIT:.>\"<EOL><DEDENT>return \"<STR_LIT:+>\"<EOL>", "docstring": "Return a + if we don't already have one, else return a .", "id": "f12746:m8"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):", "body": "GITS = [\"<STR_LIT>\"]<EOL>if sys.platform == \"<STR_LIT:win32>\":<EOL><INDENT>GITS = [\"<STR_LIT>\", \"<STR_LIT>\"]<EOL><DEDENT>out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\"], cwd=root,<EOL>hide_stderr=True)<EOL>if rc != <NUM_LIT:0>:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % root)<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>describe_out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\" % tag_prefix],<EOL>cwd=root)<EOL>if describe_out is None:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>describe_out = describe_out.strip()<EOL>full_out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\"], cwd=root)<EOL>if full_out is None:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>full_out = full_out.strip()<EOL>pieces = {}<EOL>pieces[\"<STR_LIT>\"] = full_out<EOL>pieces[\"<STR_LIT>\"] = full_out[:<NUM_LIT:7>]  <EOL>pieces[\"<STR_LIT:error>\"] = None<EOL>git_describe = describe_out<EOL>dirty = git_describe.endswith(\"<STR_LIT>\")<EOL>pieces[\"<STR_LIT>\"] = dirty<EOL>if dirty:<EOL><INDENT>git_describe = git_describe[:git_describe.rindex(\"<STR_LIT>\")]<EOL><DEDENT>if \"<STR_LIT:->\" in git_describe:<EOL><INDENT>mo = re.search(r'<STR_LIT>', git_describe)<EOL>if not mo:<EOL><INDENT>pieces[\"<STR_LIT:error>\"] = (\"<STR_LIT>\"<EOL>% describe_out)<EOL>return pieces<EOL><DEDENT>full_tag = mo.group(<NUM_LIT:1>)<EOL>if not full_tag.startswith(tag_prefix):<EOL><INDENT>if verbose:<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL>print(fmt % (full_tag, tag_prefix))<EOL><DEDENT>pieces[\"<STR_LIT:error>\"] = (\"<STR_LIT>\"<EOL>% (full_tag, tag_prefix))<EOL>return pieces<EOL><DEDENT>pieces[\"<STR_LIT>\"] = full_tag[len(tag_prefix):]<EOL>pieces[\"<STR_LIT>\"] = int(mo.group(<NUM_LIT:2>))<EOL>pieces[\"<STR_LIT>\"] = mo.group(<NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>pieces[\"<STR_LIT>\"] = None<EOL>count_out, rc = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>cwd=root)<EOL>pieces[\"<STR_LIT>\"] = int(count_out)  <EOL><DEDENT>date = run_command(GITS, [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>cwd=root)[<NUM_LIT:0>].strip()<EOL>pieces[\"<STR_LIT:date>\"] = date.strip().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:T>\", <NUM_LIT:1>).replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\", <NUM_LIT:1>)<EOL>return pieces<EOL>", "docstring": "Get version from 'git describe' in the root of the source tree.\n\n    This only gets called if the git-archive 'subst' keywords were *not*\n    expanded, and _version.py hasn't already been rewritten with a short\n    version string, meaning we're inside a checked out source tree.", "id": "f12746:m7"}
{"signature": "def register_vcs_handler(vcs, method):  ", "body": "def decorate(f):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if vcs not in HANDLERS:<EOL><INDENT>HANDLERS[vcs] = {}<EOL><DEDENT>HANDLERS[vcs][method] = f<EOL>return f<EOL><DEDENT>return decorate<EOL>", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f12746:m2"}
{"signature": "def render_pep440_old(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"] or pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>return rendered<EOL>", "docstring": "TAG[.postDISTANCE[.dev0]] .\n\n    The \".dev0\" means dirty.\n\n    Eexceptions:\n    1: no tags. 0.postDISTANCE[.dev0]", "id": "f12746:m12"}
{"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "rootdirs = []<EOL>for i in range(<NUM_LIT:3>):<EOL><INDENT>dirname = os.path.basename(root)<EOL>if dirname.startswith(parentdir_prefix):<EOL><INDENT>return {\"<STR_LIT:version>\": dirname[len(parentdir_prefix):],<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": None, \"<STR_LIT:date>\": None}<EOL><DEDENT>else:<EOL><INDENT>rootdirs.append(root)<EOL>root = os.path.dirname(root)  <EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" %<EOL>(str(rootdirs), parentdir_prefix))<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL>", "docstring": "Try to determine the version from the parent directory name.\n\n    Source tarballs conventionally unpack into a directory that includes both\n    the project name and a version string. We will also support searching up\n    two directory levels for an appropriately named parent directory", "id": "f12746:m4"}
{"signature": "def render_git_describe_long(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>rendered += \"<STR_LIT>\" % (pieces[\"<STR_LIT>\"], pieces[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL><DEDENT>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\"<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG-DISTANCE-gHEX[-dirty].\n\n    Like 'git describe --tags --dirty --always -long'.\n    The distance/hash is unconditional.\n\n    Exceptions:\n    1: no tags. HEX[-dirty]  (note: no 'g' prefix)", "id": "f12746:m14"}
{"signature": "def _parse_document_id(elm_tree):", "body": "xpath = '<STR_LIT>'<EOL>return [x for x in elm_tree.xpath(xpath, namespaces=COLLECTION_NSMAP)][<NUM_LIT:0>]<EOL>", "docstring": "Given the parsed xml to an `ElementTree`,\n    parse the id from the content.", "id": "f12747:m0"}
{"signature": "def parse_litezip(path):", "body": "struct = [parse_collection(path)]<EOL>struct.extend([parse_module(x) for x in path.iterdir()<EOL>if x.is_dir() and x.name.startswith('<STR_LIT:m>')])<EOL>return tuple(sorted(struct))<EOL>", "docstring": "Parse a litezip file structure to a data structure given the path\n    to the litezip directory.", "id": "f12747:m6"}
{"signature": "def parse_collection(path, excludes=None):", "body": "file = path / COLLECTION_FILENAME<EOL>if not file.exists():<EOL><INDENT>raise MissingFile(file)<EOL><DEDENT>id = _parse_document_id(etree.parse(file.open()))<EOL>excludes = excludes or []<EOL>excludes.extend([<EOL>lambda filepath: filepath.name == COLLECTION_FILENAME,<EOL>lambda filepath: filepath.is_dir(),<EOL>])<EOL>resources_paths = _find_resources(path, excludes=excludes)<EOL>resources = tuple(_resource_from_path(res) for res in resources_paths)<EOL>return Collection(id, file, resources)<EOL>", "docstring": "Parse a file structure to a data structure given the path to\n    a collection directory.", "id": "f12747:m5"}
{"signature": "def validate_content(*objs):", "body": "from .main import Collection, Module<EOL>validator = {<EOL>Collection: cnxml.validate_collxml,<EOL>Module: cnxml.validate_cnxml,<EOL>}[type(objs[<NUM_LIT:0>])]<EOL>return validator(*[obj.file for obj in objs])<EOL>", "docstring": "Runs the correct validator for given `obj`ects. Assumes all same type", "id": "f12752:m1"}
{"signature": "def validate_litezip(struct):", "body": "msgs = []<EOL>def _fmt_err(err):<EOL><INDENT>return (Path(err.filename), \"<STR_LIT>\".format(*(err[<NUM_LIT:1>:])))<EOL><DEDENT>obj_by_type = {}<EOL>for obj in struct:<EOL><INDENT>if not is_valid_identifier(obj.id):<EOL><INDENT>msg = (obj.file.parent,<EOL>\"<STR_LIT>\".format(obj.id),)<EOL>logger.info(\"<STR_LIT>\".format(*msg))<EOL>msgs.append(msg)<EOL><DEDENT>obj_by_type.setdefault(type(obj), []).append(obj)<EOL><DEDENT>for obtype in obj_by_type:<EOL><INDENT>content_msgs = list([_fmt_err(err) for err in<EOL>validate_content(*obj_by_type[obtype])])<EOL>for msg in content_msgs:<EOL><INDENT>logger.info(\"<STR_LIT>\".format(*msg))<EOL><DEDENT>msgs.extend(content_msgs)<EOL><DEDENT>return msgs<EOL>", "docstring": "Validate the given litezip as `struct`.\n    Returns a list of validation messages.", "id": "f12752:m2"}
{"signature": "def convert_completezip(path):", "body": "for filepath in path.glob('<STR_LIT>'):<EOL><INDENT>filepath.rename(filepath.parent / '<STR_LIT>')<EOL>logger.debug('<STR_LIT>'.format(filepath))<EOL><DEDENT>for filepath in path.glob('<STR_LIT>'):<EOL><INDENT>filepath.unlink()<EOL><DEDENT>return parse_litezip(path)<EOL>", "docstring": "Converts a completezip file structure to a litezip file structure.\n    Returns a litezip data structure.", "id": "f12753:m0"}
{"signature": "def configure_logging(config):", "body": "dictConfig(config)<EOL>", "docstring": "Configure logging given a dictified configuration.", "id": "f12755:m0"}
{"signature": "def orthogonal(shape, scale=<NUM_LIT>):", "body": "flat_shape = (shape[<NUM_LIT:0>], np.prod(shape[<NUM_LIT:1>:]))<EOL>a = np.random.normal(<NUM_LIT:0.0>, <NUM_LIT:1.0>, flat_shape)<EOL>u, _, v = np.linalg.svd(a, full_matrices=False)<EOL>q = u if u.shape == flat_shape else v <EOL>q = q.reshape(shape)<EOL>return sharedX(scale * q[:shape[<NUM_LIT:0>], :shape[<NUM_LIT:1>]])<EOL>", "docstring": "benanne lasagne ortho init (faster than qr approach)", "id": "f12769:m2"}
{"signature": "def execute(mp):", "body": "<EOL>with mp.open(\"<STR_LIT>\", resampling=\"<STR_LIT>\") as raster_file:<EOL><INDENT>if raster_file.is_empty():<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>dem = raster_file.read()<EOL><DEDENT>tags = {<EOL><NUM_LIT:1>: {\"<STR_LIT>\": True},<EOL>\"<STR_LIT>\": \"<STR_LIT:value>\"}<EOL>return dem, tags<EOL>", "docstring": "User defined process.", "id": "f12778:m0"}
{"signature": "def execute(mp):", "body": "<EOL>with mp.open(mp.params[\"<STR_LIT:input>\"][\"<STR_LIT>\"]) as vector_file:<EOL><INDENT>return [<EOL>dict(<EOL>geometry=feature[\"<STR_LIT>\"],<EOL>properties=dict(<EOL>name=feature[\"<STR_LIT>\"].get(\"<STR_LIT>\", None),<EOL>id=feature[\"<STR_LIT>\"].get(\"<STR_LIT>\", None),<EOL>area=shape(feature[\"<STR_LIT>\"]).area<EOL>)<EOL>)<EOL>for feature in vector_file.read()<EOL>]<EOL><DEDENT>", "docstring": "User defined process.", "id": "f12782:m0"}
{"signature": "def extract_contours(array, tile, interval=<NUM_LIT:100>, field='<STR_LIT>', base=<NUM_LIT:0>):", "body": "import matplotlib.pyplot as plt<EOL>levels = _get_contour_values(<EOL>array.min(), array.max(), interval=interval, base=base)<EOL>if not levels:<EOL><INDENT>return []<EOL><DEDENT>contours = plt.contour(array, levels)<EOL>index = <NUM_LIT:0><EOL>out_contours = []<EOL>for level in range(len(contours.collections)):<EOL><INDENT>elevation = levels[index]<EOL>index += <NUM_LIT:1><EOL>paths = contours.collections[level].get_paths()<EOL>for path in paths:<EOL><INDENT>out_coords = [<EOL>(<EOL>tile.left + (y * tile.pixel_x_size),<EOL>tile.top - (x * tile.pixel_y_size),<EOL>)<EOL>for x, y in zip(path.vertices[:, <NUM_LIT:1>], path.vertices[:, <NUM_LIT:0>])<EOL>]<EOL>if len(out_coords) >= <NUM_LIT:2>:<EOL><INDENT>out_contours.append(<EOL>dict(<EOL>properties={field: elevation},<EOL>geometry=mapping(LineString(out_coords))<EOL>)<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return out_contours<EOL>", "docstring": "Extract contour lines from an array.\n\nParameters\n----------\narray : array\n    input elevation data\ntile : Tile\n    tile covering the array\ninterval : integer\n    elevation value interval when drawing contour lines\nfield : string\n    output field name containing elevation value\nbase : integer\n    elevation base value the intervals are computed from\n\nReturns\n-------\ncontours : iterable\n    contours as GeoJSON-like pairs of properties and geometry", "id": "f12801:m0"}
{"signature": "def hillshade(elevation, tile, azimuth=<NUM_LIT>, altitude=<NUM_LIT>, z=<NUM_LIT:1.0>, scale=<NUM_LIT:1.0>):", "body": "azimuth = float(azimuth)<EOL>altitude = float(altitude)<EOL>z = float(z)<EOL>scale = float(scale)<EOL>xres = tile.tile.pixel_x_size<EOL>yres = -tile.tile.pixel_y_size<EOL>slope, aspect = calculate_slope_aspect(<EOL>elevation, xres, yres, z=z, scale=scale)<EOL>deg2rad = math.pi / <NUM_LIT><EOL>shaded = np.sin(altitude * deg2rad) * np.sin(slope)+ np.cos(altitude * deg2rad) * np.cos(slope)* np.cos((azimuth - <NUM_LIT>) * deg2rad - aspect)<EOL>shaded = (((shaded+<NUM_LIT:1.0>)/<NUM_LIT:2>)*-<NUM_LIT>).astype(\"<STR_LIT>\")<EOL>return ma.masked_array(<EOL>data=np.pad(shaded, <NUM_LIT:1>, mode='<STR_LIT>'), mask=elevation.mask<EOL>)<EOL>", "docstring": "Return hillshaded numpy array.\n\nParameters\n----------\nelevation : array\n    input elevation data\ntile : Tile\n    tile covering the array\nz : float\n    vertical exaggeration factor\nscale : float\n    scale factor of pixel size units versus height units (insert 112000\n    when having elevation values in meters in a geodetic projection)", "id": "f12802:m1"}
{"signature": "def get_zoom_levels(process_zoom_levels=None, init_zoom_levels=None):", "body": "process_zoom_levels = _validate_zooms(process_zoom_levels)<EOL>if init_zoom_levels is None:<EOL><INDENT>return process_zoom_levels<EOL><DEDENT>else:<EOL><INDENT>init_zoom_levels = _validate_zooms(init_zoom_levels)<EOL>if not set(init_zoom_levels).issubset(set(process_zoom_levels)):<EOL><INDENT>raise MapcheteConfigError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>return init_zoom_levels<EOL><DEDENT>", "docstring": "Validate and return zoom levels.", "id": "f12805:m2"}
{"signature": "def snap_bounds(bounds=None, pyramid=None, zoom=None):", "body": "if not isinstance(bounds, (tuple, list)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if len(bounds) != <NUM_LIT:4>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(pyramid, BufferedTilePyramid):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>bounds = Bounds(*bounds)<EOL>lb = pyramid.tile_from_xy(bounds.left, bounds.bottom, zoom, on_edge_use=\"<STR_LIT>\").bounds<EOL>rt = pyramid.tile_from_xy(bounds.right, bounds.top, zoom, on_edge_use=\"<STR_LIT>\").bounds<EOL>return Bounds(lb.left, lb.bottom, rt.right, rt.top)<EOL>", "docstring": "Snaps bounds to tiles boundaries of specific zoom level.\n\nParameters\n----------\nbounds : bounds to be snapped\npyramid : TilePyramid\nzoom : int\n\nReturns\n-------\nBounds(left, bottom, right, top)", "id": "f12805:m3"}
{"signature": "def bounds_at_zoom(self, zoom=None):", "body": "return () if self.area_at_zoom(zoom).is_empty else Bounds(<EOL>*self.area_at_zoom(zoom).bounds)<EOL>", "docstring": "Return process bounds for zoom level.\n\nParameters\n----------\nzoom : integer or list\n\nReturns\n-------\nprocess bounds : tuple\n    left, bottom, right, top", "id": "f12805:c0:m13"}
{"signature": "def __init__(<EOL>self, input_config, zoom=None, bounds=None, single_input_file=None,<EOL>mode=\"<STR_LIT>\", debug=False<EOL>):", "body": "<EOL>self._raw = _map_to_new_config(_config_to_dict(input_config))<EOL>self._raw[\"<STR_LIT>\"] = zoom<EOL>self._raw[\"<STR_LIT>\"] = bounds<EOL>self._cache_area_at_zoom = {}<EOL>self._cache_full_process_area = None<EOL>try:<EOL><INDENT>validate_values(self._raw, _MANDATORY_PARAMETERS)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise MapcheteConfigError(e)<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>self.config_dir = self._raw[\"<STR_LIT>\"]<EOL>self.process_name = self._raw[\"<STR_LIT>\"]<EOL>self.process_func<EOL>logger.debug(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>process_metatiling = self._raw[\"<STR_LIT>\"].get(\"<STR_LIT>\", <NUM_LIT:1>)<EOL>output_metatiling = self._raw[\"<STR_LIT>\"].get(<EOL>\"<STR_LIT>\", process_metatiling)<EOL>if output_metatiling > process_metatiling:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>self.process_pyramid = BufferedTilePyramid(<EOL>self._raw[\"<STR_LIT>\"][\"<STR_LIT>\"],<EOL>metatiling=process_metatiling,<EOL>pixelbuffer=self._raw[\"<STR_LIT>\"].get(\"<STR_LIT>\", <NUM_LIT:0>))<EOL>self.output_pyramid = BufferedTilePyramid(<EOL>self._raw[\"<STR_LIT>\"][\"<STR_LIT>\"],<EOL>metatiling=output_metatiling,<EOL>pixelbuffer=self._raw[\"<STR_LIT>\"].get(\"<STR_LIT>\", <NUM_LIT:0>))<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.exception(e)<EOL>raise MapcheteConfigError(e)<EOL><DEDENT>if mode not in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>raise MapcheteConfigError(\"<STR_LIT>\" % mode)<EOL><DEDENT>self.mode = mode<EOL>logger.debug(\"<STR_LIT>\")<EOL>self._params_at_zoom = _raw_at_zoom(self._raw, self.init_zoom_levels)<EOL>logger.debug(\"<STR_LIT>\")<EOL>self.output<EOL>logger.debug(\"<STR_LIT>\")<EOL>self.input<EOL>", "docstring": "Initialize configuration.", "id": "f12805:c0:m0"}
{"signature": "@cached_property<EOL><INDENT>def baselevels(self):<DEDENT>", "body": "if \"<STR_LIT>\" not in self._raw:<EOL><INDENT>return {}<EOL><DEDENT>baselevels = self._raw[\"<STR_LIT>\"]<EOL>minmax = {k: v for k, v in baselevels.items() if k in [\"<STR_LIT>\", \"<STR_LIT>\"]}<EOL>if not minmax:<EOL><INDENT>raise MapcheteConfigError(\"<STR_LIT>\")<EOL><DEDENT>for v in minmax.values():<EOL><INDENT>if not isinstance(v, int) or v < <NUM_LIT:0>:<EOL><INDENT>raise MapcheteConfigError(<EOL>\"<STR_LIT>\" % minmax.values()<EOL>)<EOL><DEDENT><DEDENT>zooms = list(range(<EOL>minmax.get(\"<STR_LIT>\", min(self.zoom_levels)),<EOL>minmax.get(\"<STR_LIT>\", max(self.zoom_levels)) + <NUM_LIT:1>)<EOL>)<EOL>if not set(self.zoom_levels).difference(set(zooms)):<EOL><INDENT>raise MapcheteConfigError(\"<STR_LIT>\")<EOL><DEDENT>return dict(<EOL>zooms=zooms,<EOL>lower=baselevels.get(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>higher=baselevels.get(\"<STR_LIT>\", \"<STR_LIT>\"),<EOL>tile_pyramid=BufferedTilePyramid(<EOL>self.output_pyramid.grid,<EOL>pixelbuffer=self.output_pyramid.pixelbuffer,<EOL>metatiling=self.process_pyramid.metatiling<EOL>)<EOL>)<EOL>", "docstring": "Optional baselevels configuration.\n\nbaselevels:\n    min: <zoom>\n    max: <zoom>\n    lower: <resampling method>\n    higher: <resampling method>", "id": "f12805:c0:m8"}
{"signature": "@cached_property<EOL><INDENT>def process_file(self):<DEDENT>", "body": "warnings.warn(DeprecationWarning(\"<STR_LIT>\"))<EOL>return os.path.join(self._raw[\"<STR_LIT>\"], self._raw[\"<STR_LIT>\"])<EOL>", "docstring": "Deprecated.", "id": "f12805:c0:m18"}
{"signature": "def at_zoom(self, zoom):", "body": "warnings.warn(DeprecationWarning(\"<STR_LIT>\"))<EOL>return self.params_at_zoom(zoom)<EOL>", "docstring": "Deprecated.", "id": "f12805:c0:m19"}
{"signature": "def clip_bounds(bounds=None, clip=None):", "body": "bounds = Bounds(*bounds)<EOL>clip = Bounds(*clip)<EOL>return Bounds(<EOL>max(bounds.left, clip.left),<EOL>max(bounds.bottom, clip.bottom),<EOL>min(bounds.right, clip.right),<EOL>min(bounds.top, clip.top)<EOL>)<EOL>", "docstring": "Clips bounds by clip.\n\nParameters\n----------\nbounds : bounds to be clipped\nclip : clip bounds\n\nReturns\n-------\nBounds(left, bottom, right, top)", "id": "f12805:m4"}
{"signature": "def _raw_at_zoom(config, zooms):", "body": "params_per_zoom = {}<EOL>for zoom in zooms:<EOL><INDENT>params = {}<EOL>for name, element in config.items():<EOL><INDENT>if name not in _RESERVED_PARAMETERS:<EOL><INDENT>out_element = _element_at_zoom(name, element, zoom)<EOL>if out_element is not None:<EOL><INDENT>params[name] = out_element<EOL><DEDENT><DEDENT><DEDENT>params_per_zoom[zoom] = params<EOL><DEDENT>return params_per_zoom<EOL>", "docstring": "Return parameter dictionary per zoom level.", "id": "f12805:m13"}
{"signature": "def _flatten_tree(tree, old_path=None):", "body": "flat_tree = []<EOL>for key, value in tree.items():<EOL><INDENT>new_path = \"<STR_LIT:/>\".join([old_path, key]) if old_path else key<EOL>if isinstance(value, dict) and \"<STR_LIT>\" not in value:<EOL><INDENT>flat_tree.extend(_flatten_tree(value, old_path=new_path))<EOL><DEDENT>else:<EOL><INDENT>flat_tree.append((new_path, value))<EOL><DEDENT><DEDENT>return flat_tree<EOL>", "docstring": "Flatten dict tree into dictionary where keys are paths of old dict.", "id": "f12805:m17"}
{"signature": "@cached_property<EOL><INDENT>def init_zoom_levels(self):<DEDENT>", "body": "return get_zoom_levels(<EOL>process_zoom_levels=self._raw[\"<STR_LIT>\"],<EOL>init_zoom_levels=self._raw[\"<STR_LIT>\"]<EOL>)<EOL>", "docstring": "Zoom levels this process is currently initialized with.\n\nThis gets triggered by using the ``zoom`` kwarg. If not set, it will\nbe equal to self.zoom_levels.", "id": "f12805:c0:m2"}
{"signature": "def _validate_zoom(zoom):", "body": "if any([not isinstance(zoom, int), zoom < <NUM_LIT:0>]):<EOL><INDENT>raise MapcheteConfigError(\"<STR_LIT>\")<EOL><DEDENT>return zoom<EOL>", "docstring": "Assert zoom value is positive integer.", "id": "f12805:m11"}
{"signature": "def raw_conf(mapchete_file):", "body": "return _map_to_new_config(yaml.load(open(mapchete_file, \"<STR_LIT:r>\").read()))<EOL>", "docstring": "Loads a mapchete_file into a dictionary.\n\nParameters\n----------\nmapchete_file : str\n    Path to a Mapchete file.\n\nReturns\n-------\ndictionary", "id": "f12805:m5"}
{"signature": "def _validate_zooms(zooms):", "body": "if isinstance(zooms, dict):<EOL><INDENT>if any([a not in zooms for a in [\"<STR_LIT>\", \"<STR_LIT>\"]]):<EOL><INDENT>raise MapcheteConfigError(\"<STR_LIT>\")<EOL><DEDENT>zmin = _validate_zoom(zooms[\"<STR_LIT>\"])<EOL>zmax = _validate_zoom(zooms[\"<STR_LIT>\"])<EOL>if zmin > zmax:<EOL><INDENT>raise MapcheteConfigError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>return list(range(zmin, zmax + <NUM_LIT:1>))<EOL><DEDENT>elif isinstance(zooms, list):<EOL><INDENT>if len(zooms) == <NUM_LIT:1>:<EOL><INDENT>return zooms<EOL><DEDENT>elif len(zooms) == <NUM_LIT:2>:<EOL><INDENT>zmin, zmax = sorted([_validate_zoom(z) for z in zooms])<EOL>return list(range(zmin, zmax + <NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>return zooms<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return [_validate_zoom(zooms)]<EOL><DEDENT>", "docstring": "Return a list of zoom levels.\n\nFollowing inputs are converted:\n- int --> [int]\n- dict{min, max} --> range(min, max + 1)\n- [int] --> [int]\n- [int, int] --> range(smaller int, bigger int + 1)", "id": "f12805:m10"}
{"signature": "def empty(self, process_tile):", "body": "raise NotImplementedError<EOL>", "docstring": "Return empty data.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n    must be member of process ``TilePyramid``\n\nReturns\n-------\nempty data : array or list\n    empty array with correct data type for raster data or empty list\n    for vector data", "id": "f12806:c2:m8"}
{"signature": "def open(self, tile, **kwargs):", "body": "raise NotImplementedError<EOL>", "docstring": "Return InputTile object.\n\nParameters\n----------\ntile : ``Tile``\n\nReturns\n-------\ninput tile : ``InputTile``\n    tile view of input data", "id": "f12806:c0:m1"}
{"signature": "def is_empty(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12806:c1:m2"}
{"signature": "def prepare_path(self, tile):", "body": "makedirs(os.path.dirname(self.get_path(tile)))<EOL>", "docstring": "Create directory and subdirectory if necessary.\n\nParameters\n----------\ntile : ``BufferedTile``\n    must be member of output ``TilePyramid``", "id": "f12806:c2:m6"}
{"signature": "def __exit__(self, t, v, tb):", "body": "pass<EOL>", "docstring": "Clean up.", "id": "f12806:c1:m4"}
{"signature": "def __enter__(self):", "body": "return self<EOL>", "docstring": "Required for 'with' statement.", "id": "f12806:c1:m3"}
{"signature": "def is_valid_with_config(self, config):", "body": "raise NotImplementedError<EOL>", "docstring": "Check if output format is valid with other process parameters.\n\nParameters\n----------\nconfig : dictionary\n    output configuration parameters\n\nReturns\n-------\nis_valid : bool", "id": "f12806:c2:m4"}
{"signature": "def extract_subset(self, input_data_tiles=None, out_tile=None):", "body": "if self.METADATA[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>mosaic = create_mosaic(input_data_tiles)<EOL>return extract_from_array(<EOL>in_raster=prepare_array(<EOL>mosaic.data,<EOL>nodata=self.nodata,<EOL>dtype=self.output_params[\"<STR_LIT>\"]<EOL>),<EOL>in_affine=mosaic.affine,<EOL>out_tile=out_tile<EOL>)<EOL><DEDENT>elif self.METADATA[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>return [<EOL>feature for feature in list(<EOL>chain.from_iterable([features for _, features in input_data_tiles])<EOL>)<EOL>if shape(feature[\"<STR_LIT>\"]).intersects(out_tile.bbox)<EOL>]<EOL><DEDENT>", "docstring": "Extract subset from multiple tiles.\n\ninput_data_tiles : list of (``Tile``, process data) tuples\nout_tile : ``Tile``\n\nReturns\n-------\nNumPy array or list of features.", "id": "f12806:c2:m11"}
{"signature": "def get_path(self, tile):", "body": "return os.path.join(*[<EOL>self.path,<EOL>str(tile.zoom),<EOL>str(tile.row),<EOL>str(tile.col) + self.file_extension<EOL>])<EOL>", "docstring": "Determine target file path.\n\nParameters\n----------\ntile : ``BufferedTile``\n    must be member of output ``TilePyramid``\n\nReturns\n-------\npath : string", "id": "f12806:c2:m5"}
{"signature": "def tiles_exist(self, process_tile=None, output_tile=None):", "body": "if process_tile and output_tile:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if process_tile:<EOL><INDENT>return any(<EOL>path_exists(self.get_path(tile))<EOL>for tile in self.pyramid.intersecting(process_tile)<EOL>)<EOL><DEDENT>if output_tile:<EOL><INDENT>return path_exists(self.get_path(output_tile))<EOL><DEDENT>", "docstring": "Check whether output tiles of a tile (either process or output) exists.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n    must be member of process ``TilePyramid``\noutput_tile : ``BufferedTile``\n    must be member of output ``TilePyramid``\n\nReturns\n-------\nexists : bool", "id": "f12806:c2:m3"}
{"signature": "def read(self, **kwargs):", "body": "raise NotImplementedError<EOL>", "docstring": "Read reprojected & resampled input data.\n\nReturns\n-------\ndata : array or list\n    NumPy array for raster data or feature list for vector data", "id": "f12806:c1:m1"}
{"signature": "def bbox(self, out_crs=None):", "body": "raise NotImplementedError<EOL>", "docstring": "Return data bounding box.\n\nParameters\n----------\nout_crs : ``rasterio.crs.CRS``\n    rasterio CRS object (default: CRS of process pyramid)\n\nReturns\n-------\nbounding box : geometry\n    Shapely geometry object", "id": "f12806:c0:m2"}
{"signature": "def cleanup(self):", "body": "pass<EOL>", "docstring": "Optional cleanup function called when Mapchete exits.", "id": "f12806:c0:m4"}
{"signature": "def read(self, output_tile):", "body": "raise NotImplementedError<EOL>", "docstring": "Read existing process output.\n\nParameters\n----------\noutput_tile : ``BufferedTile``\n    must be member of output ``TilePyramid``\n\nReturns\n-------\nprocess output : array or list", "id": "f12806:c2:m1"}
{"signature": "def _read_as_tiledir(<EOL>self,<EOL>out_tile=None,<EOL>td_crs=None,<EOL>tiles_paths=None,<EOL>profile=None,<EOL>validity_check=False,<EOL>indexes=None,<EOL>resampling=None,<EOL>dst_nodata=None,<EOL>gdal_opts=None,<EOL>**kwargs<EOL>):", "body": "return _read_as_tiledir(<EOL>data_type=self.METADATA[\"<STR_LIT>\"],<EOL>out_tile=out_tile,<EOL>td_crs=td_crs,<EOL>tiles_paths=tiles_paths,<EOL>profile=profile,<EOL>validity_check=validity_check,<EOL>indexes=indexes,<EOL>resampling=resampling,<EOL>dst_nodata=dst_nodata,<EOL>gdal_opts=gdal_opts,<EOL>**{k: v for k, v in kwargs.items() if k != \"<STR_LIT>\"}<EOL>)<EOL>", "docstring": "Read reprojected & resampled input data.\n\nParameters\n----------\nvalidity_check : bool\n    vector file: also run checks if reprojected geometry is valid,\n    otherwise throw RuntimeError (default: True)\n\nindexes : list or int\n    raster file: a list of band numbers; None will read all.\ndst_nodata : int or float, optional\n    raster file: if not set, the nodata value from the source dataset\n    will be used\ngdal_opts : dict\n    raster file: GDAL options passed on to rasterio.Env()\n\nReturns\n-------\ndata : list for vector files or numpy array for raster files", "id": "f12806:c2:m13"}
{"signature": "def exists(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Check if data or file even exists.\n\nReturns\n-------\nfile exists : bool", "id": "f12806:c0:m3"}
{"signature": "def __init__(self, output_params, readonly=False):", "body": "self.pixelbuffer = output_params[\"<STR_LIT>\"]<EOL>if \"<STR_LIT:type>\" in output_params:<EOL><INDENT>warnings.warn(DeprecationWarning(\"<STR_LIT>\"))<EOL>if \"<STR_LIT>\" not in output_params:<EOL><INDENT>output_params[\"<STR_LIT>\"] = output_params.pop(\"<STR_LIT:type>\")<EOL><DEDENT><DEDENT>self.pyramid = TilePyramid(<EOL>grid=output_params[\"<STR_LIT>\"],<EOL>metatiling=output_params[\"<STR_LIT>\"]<EOL>)<EOL>self.crs = self.pyramid.crs<EOL>self._bucket = None<EOL>if not readonly:<EOL><INDENT>write_output_metadata(output_params)<EOL><DEDENT>", "docstring": "Initialize.", "id": "f12806:c2:m0"}
{"signature": "def output_is_valid(self, process_data):", "body": "if self.METADATA[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>return (<EOL>is_numpy_or_masked_array(process_data) or<EOL>is_numpy_or_masked_array_with_tags(process_data)<EOL>)<EOL><DEDENT>elif self.METADATA[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>return is_feature_list(process_data)<EOL><DEDENT>", "docstring": "Check whether process output is allowed with output driver.\n\nParameters\n----------\nprocess_data : raw process output\n\nReturns\n-------\nTrue or False", "id": "f12806:c2:m9"}
{"signature": "def available_output_formats():", "body": "output_formats = []<EOL>for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):<EOL><INDENT>driver_ = v.load()<EOL>if hasattr(driver_, \"<STR_LIT>\") and (<EOL>driver_.METADATA[\"<STR_LIT>\"] in [\"<STR_LIT:w>\", \"<STR_LIT>\"]<EOL>):<EOL><INDENT>output_formats.append(driver_.METADATA[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>return output_formats<EOL>", "docstring": "Return all available output formats.\n\nReturns\n-------\nformats : list\n    all available output formats", "id": "f12807:m1"}
{"signature": "def write_output_metadata(output_params):", "body": "if \"<STR_LIT:path>\" in output_params:<EOL><INDENT>metadata_path = os.path.join(output_params[\"<STR_LIT:path>\"], \"<STR_LIT>\")<EOL>logger.debug(\"<STR_LIT>\", metadata_path)<EOL>try:<EOL><INDENT>existing_params = read_output_metadata(metadata_path)<EOL>logger.debug(\"<STR_LIT>\", metadata_path)<EOL>logger.debug(\"<STR_LIT>\", pformat(existing_params))<EOL>existing_tp = existing_params[\"<STR_LIT>\"]<EOL>current_params = params_to_dump(output_params)<EOL>logger.debug(\"<STR_LIT>\", pformat(current_params))<EOL>current_tp = BufferedTilePyramid(**current_params[\"<STR_LIT>\"])<EOL>if existing_tp != current_tp:<EOL><INDENT>raise MapcheteConfigError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (existing_tp, current_tp)<EOL>)<EOL><DEDENT>existing_format = existing_params[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>current_format = current_params[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>if existing_format != current_format:<EOL><INDENT>raise MapcheteConfigError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>(existing_format, current_format)<EOL>)<EOL>)<EOL><DEDENT><DEDENT>except FileNotFoundError:<EOL><INDENT>logger.debug(\"<STR_LIT>\", metadata_path)<EOL>dump_params = params_to_dump(output_params)<EOL>write_json(metadata_path, dump_params)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Dump output JSON and verify parameters if output metadata exist.", "id": "f12807:m8"}
{"signature": "def open(self, tile, **kwargs):", "body": "return self.process.config.output.open(tile, self.process, **kwargs)<EOL>", "docstring": "Return InputTile object.\n\nParameters\n----------\ntile : ``Tile``\n\nReturns\n-------\ninput tile : ``InputTile``\n    tile view of input data", "id": "f12809:c0:m1"}
{"signature": "def __init__(self, output_params, **kwargs):", "body": "super(OutputData, self).__init__(output_params)<EOL>self.path = output_params[\"<STR_LIT:path>\"]<EOL>self.file_extension = \"<STR_LIT>\"<EOL>self.output_params = output_params<EOL>self.nodata = output_params.get(\"<STR_LIT>\", GTIFF_DEFAULT_PROFILE[\"<STR_LIT>\"])<EOL>self._bucket = self.path.split(\"<STR_LIT:/>\")[<NUM_LIT:2>] if self.path.startswith(\"<STR_LIT>\") else None<EOL>", "docstring": "Initialize.", "id": "f12811:c0:m0"}
{"signature": "def is_empty(self, indexes=None):", "body": "<EOL>return not self.tile.bbox.intersects(self.process.config.area_at_zoom())<EOL>", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12811:c1:m2"}
{"signature": "def __init__(self, tile, process, resampling):", "body": "self.tile = tile<EOL>self.process = process<EOL>self.pixelbuffer = None<EOL>self.resampling = resampling<EOL>", "docstring": "Initialize.", "id": "f12811:c1:m0"}
{"signature": "def write(self, process_tile, data):", "body": "if (<EOL>isinstance(data, tuple) and<EOL>len(data) == <NUM_LIT:2> and<EOL>isinstance(data[<NUM_LIT:1>], dict)<EOL>):<EOL><INDENT>data, tags = data<EOL><DEDENT>else:<EOL><INDENT>tags = {}<EOL><DEDENT>data = prepare_array(<EOL>data,<EOL>masked=True,<EOL>nodata=self.nodata,<EOL>dtype=self.profile(process_tile)[\"<STR_LIT>\"]<EOL>)<EOL>if data.mask.all():<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None<EOL>for tile in self.pyramid.intersecting(process_tile):<EOL><INDENT>out_path = self.get_path(tile)<EOL>self.prepare_path(tile)<EOL>out_tile = BufferedTile(tile, self.pixelbuffer)<EOL>write_raster_window(<EOL>in_tile=process_tile,<EOL>in_data=data,<EOL>out_profile=self.profile(out_tile),<EOL>out_tile=out_tile,<EOL>out_path=out_path,<EOL>tags=tags,<EOL>bucket_resource=bucket_resource<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Write data from process tiles into GeoTIFF file(s).\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n    must be member of process ``TilePyramid``\ndata : ``np.ndarray``", "id": "f12811:c0:m2"}
{"signature": "def _get_band_indexes(self, indexes=None):", "body": "if indexes:<EOL><INDENT>if isinstance(indexes, list):<EOL><INDENT>return indexes<EOL><DEDENT>else:<EOL><INDENT>return [indexes]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return range(<NUM_LIT:1>, self.process.config.output.profile(self.tile)[\"<STR_LIT:count>\"] + <NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Return valid band indexes.", "id": "f12811:c1:m3"}
{"signature": "def __enter__(self):", "body": "return self<EOL>", "docstring": "Enable context manager.", "id": "f12811:c1:m4"}
{"signature": "def read(self, indexes=None, **kwargs):", "body": "band_indexes = self._get_band_indexes(indexes)<EOL>arr = self.process.get_raw_output(self.tile)<EOL>if len(band_indexes) == <NUM_LIT:1>:<EOL><INDENT>return arr[band_indexes[<NUM_LIT:0>] - <NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>return ma.concatenate([ma.expand_dims(arr[i - <NUM_LIT:1>], <NUM_LIT:0>) for i in band_indexes])<EOL><DEDENT>", "docstring": "Read reprojected & resampled input data.\n\nParameters\n----------\nindexes : integer or list\n    band number or list of band numbers\n\nReturns\n-------\ndata : array", "id": "f12811:c1:m1"}
{"signature": "def open(self, tile, process, **kwargs):", "body": "return InputTile(tile, process, kwargs.get(\"<STR_LIT>\", None))<EOL>", "docstring": "Open process output as input for other process.\n\nParameters\n----------\ntile : ``Tile``\nprocess : ``MapcheteProcess``\nkwargs : keyword arguments", "id": "f12811:c0:m7"}
{"signature": "def bbox(self, out_crs=None):", "body": "return reproject_geometry(<EOL>box(*self._bounds),<EOL>src_crs=self.td_pyramid.crs,<EOL>dst_crs=self.pyramid.crs if out_crs is None else out_crs<EOL>)<EOL>", "docstring": "Return data bounding box.\n\nParameters\n----------\nout_crs : ``rasterio.crs.CRS``\n    rasterio CRS object (default: CRS of process pyramid)\n\nReturns\n-------\nbounding box : geometry\n    Shapely geometry object", "id": "f12812:c0:m2"}
{"signature": "def is_empty(self):", "body": "return len(self._tiles_paths) == <NUM_LIT:0><EOL>", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12812:c1:m2"}
{"signature": "def read(<EOL>self,<EOL>validity_check=False,<EOL>indexes=None,<EOL>resampling=None,<EOL>dst_nodata=None,<EOL>gdal_opts=None,<EOL>**kwargs<EOL>):", "body": "return self._read_as_tiledir(<EOL>data_type=self._file_type,<EOL>out_tile=self.tile,<EOL>td_crs=self._td_crs,<EOL>tiles_paths=self._tiles_paths,<EOL>profile=self._profile,<EOL>validity_check=validity_check,<EOL>indexes=indexes,<EOL>resampling=resampling if resampling else self._resampling,<EOL>dst_nodata=dst_nodata,<EOL>gdal_opts=gdal_opts,<EOL>**{k: v for k, v in kwargs.items() if k != \"<STR_LIT>\"}<EOL>)<EOL>", "docstring": "Read reprojected & resampled input data.\n\nParameters\n----------\nvalidity_check : bool\n    vector file: also run checks if reprojected geometry is valid,\n    otherwise throw RuntimeError (default: True)\n\nindexes : list or int\n    raster file: a list of band numbers; None will read all.\ndst_nodata : int or float, optional\n    raster file: if not set, the nodata value from the source dataset\n    will be used\ngdal_opts : dict\n    raster file: GDAL options passed on to rasterio.Env()\n\nReturns\n-------\ndata : list for vector files or numpy array for raster files", "id": "f12812:c1:m1"}
{"signature": "def __init__(self, output_params, **kwargs):", "body": "super(OutputData, self).__init__(output_params)<EOL>self.path = output_params[\"<STR_LIT:path>\"]<EOL>self.file_extension = \"<STR_LIT>\"<EOL>self.output_params = output_params<EOL>self.output_params[\"<STR_LIT>\"] = PNG_DEFAULT_PROFILE[\"<STR_LIT>\"]<EOL>self.nodata = output_params.get(\"<STR_LIT>\", PNG_DEFAULT_PROFILE[\"<STR_LIT>\"])<EOL>self._bucket = self.path.split(\"<STR_LIT:/>\")[<NUM_LIT:2>] if self.path.startswith(\"<STR_LIT>\") else None<EOL>", "docstring": "Initialize.", "id": "f12813:c0:m0"}
{"signature": "def empty(self, process_tile):", "body": "bands = (<EOL>self.output_params[\"<STR_LIT>\"]<EOL>if \"<STR_LIT>\" in self.output_params<EOL>else PNG_DEFAULT_PROFILE[\"<STR_LIT:count>\"]<EOL>)<EOL>return ma.masked_array(<EOL>data=ma.zeros((bands, ) + process_tile.shape),<EOL>mask=ma.zeros((bands, ) + process_tile.shape),<EOL>dtype=PNG_DEFAULT_PROFILE[\"<STR_LIT>\"]<EOL>)<EOL>", "docstring": "Return empty data.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n    must be member of process ``TilePyramid``\n\nReturns\n-------\nempty data : array\n    empty array with data type given in output parameters", "id": "f12813:c0:m6"}
{"signature": "def profile(self, tile=None):", "body": "dst_metadata = PNG_DEFAULT_PROFILE<EOL>dst_metadata.pop(\"<STR_LIT>\", None)<EOL>if tile is not None:<EOL><INDENT>dst_metadata.update(<EOL>width=tile.width, height=tile.height, affine=tile.affine,<EOL>crs=tile.crs)<EOL><DEDENT>try:<EOL><INDENT>dst_metadata.update(count=self.output_params[\"<STR_LIT:count>\"])<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>return dst_metadata<EOL>", "docstring": "Create a metadata dictionary for rasterio.\n\nParameters\n----------\ntile : ``BufferedTile``\n\nReturns\n-------\nmetadata : dictionary\n    output profile dictionary used for rasterio.", "id": "f12813:c0:m4"}
{"signature": "def write(self, process_tile, data):", "body": "rgba = self._prepare_array_for_png(data)<EOL>data = ma.masked_where(rgba == self.nodata, rgba)<EOL>if data.mask.all():<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None<EOL>for tile in self.pyramid.intersecting(process_tile):<EOL><INDENT>out_path = self.get_path(tile)<EOL>self.prepare_path(tile)<EOL>out_tile = BufferedTile(tile, self.pixelbuffer)<EOL>write_raster_window(<EOL>in_tile=process_tile,<EOL>in_data=data,<EOL>out_profile=self.profile(out_tile),<EOL>out_tile=out_tile,<EOL>out_path=out_path,<EOL>bucket_resource=bucket_resource<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Write data from one or more process tiles.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n    must be member of process ``TilePyramid``", "id": "f12813:c0:m1"}
{"signature": "def write(self, process_tile, data):", "body": "data = self._prepare_array(data)<EOL>if data.mask.all():<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None<EOL>for tile in self.pyramid.intersecting(process_tile):<EOL><INDENT>out_path = self.get_path(tile)<EOL>self.prepare_path(tile)<EOL>out_tile = BufferedTile(tile, self.pixelbuffer)<EOL>write_raster_window(<EOL>in_tile=process_tile,<EOL>in_data=data,<EOL>out_profile=self.profile(out_tile),<EOL>out_tile=out_tile,<EOL>out_path=out_path,<EOL>bucket_resource=bucket_resource<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Write data from process tiles into PNG file(s).\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n    must be member of process ``TilePyramid``", "id": "f12814:c0:m1"}
{"signature": "def empty(self, process_tile):", "body": "return ma.masked_values(np.zeros(process_tile.shape), <NUM_LIT:0>)<EOL>", "docstring": "Return empty data.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n    must be member of process ``TilePyramid``\n\nReturns\n-------\nempty data : array or list\n    empty array with correct data type for raster data or empty list\n    for vector data", "id": "f12814:c0:m6"}
{"signature": "def __init__(self, output_params, **kwargs):", "body": "super(OutputData, self).__init__(output_params)<EOL>self.path = output_params[\"<STR_LIT:path>\"]<EOL>self.file_extension = \"<STR_LIT>\"<EOL>self.output_params = output_params<EOL>self._profile = dict(PNG_DEFAULT_PROFILE)<EOL>self.nodata = self._profile[\"<STR_LIT>\"]<EOL>try:<EOL><INDENT>self.old_band_num = output_params[\"<STR_LIT>\"]<EOL>self._profile.update(count=<NUM_LIT:4>)<EOL><DEDENT>except KeyError:<EOL><INDENT>self.old_band_num = False<EOL><DEDENT>self.output_params.update(dtype=self._profile[\"<STR_LIT>\"])<EOL>self._bucket = self.path.split(\"<STR_LIT:/>\")[<NUM_LIT:2>] if self.path.startswith(\"<STR_LIT>\") else None<EOL>", "docstring": "Initialize.", "id": "f12814:c0:m0"}
{"signature": "def read(self, output_tile, **kwargs):", "body": "try:<EOL><INDENT>return ma.masked_values(<EOL>read_raster_no_crs(<EOL>self.get_path(output_tile), indexes=(<NUM_LIT:4> if self.old_band_num else <NUM_LIT:2>)<EOL>),<EOL><NUM_LIT:0><EOL>)<EOL><DEDENT>except FileNotFoundError:<EOL><INDENT>return self.empty(output_tile)<EOL><DEDENT>", "docstring": "Read existing process output.\n\nParameters\n----------\noutput_tile : ``BufferedTile``\n    must be member of output ``TilePyramid``\n\nReturns\n-------\nprocess output : ``BufferedTile`` with appended data", "id": "f12814:c0:m2"}
{"signature": "def for_web(self, data):", "body": "return (<EOL>memory_file(self._prepare_array(data), self.profile()), \"<STR_LIT>\"<EOL>)<EOL>", "docstring": "Convert data to web output.\n\nParameters\n----------\ndata : array\n\nReturns\n-------\nMemoryFile(), MIME type", "id": "f12814:c0:m5"}
{"signature": "def write(self, process_tile, data):", "body": "if data is None or len(data) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>if not isinstance(data, (list, types.GeneratorType)):<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>data = list(data)<EOL>if not len(data):<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None<EOL>for tile in self.pyramid.intersecting(process_tile):<EOL><INDENT>out_path = self.get_path(tile)<EOL>self.prepare_path(tile)<EOL>out_tile = BufferedTile(tile, self.pixelbuffer)<EOL>write_vector_window(<EOL>in_data=data,<EOL>out_schema=self.output_params[\"<STR_LIT>\"],<EOL>out_tile=out_tile,<EOL>out_path=out_path,<EOL>bucket_resource=bucket_resource<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Write data from process tiles into GeoJSON file(s).\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n    must be member of process ``TilePyramid``", "id": "f12815:c0:m2"}
{"signature": "def empty(self, process_tile=None):", "body": "return []<EOL>", "docstring": "Return empty data.\n\nParameters\n----------\nprocess_tile : ``BufferedTile``\n    must be member of process ``TilePyramid``\n\nReturns\n-------\nempty data : list", "id": "f12815:c0:m4"}
{"signature": "def read(self, validity_check=True, no_neighbors=False, **kwargs):", "body": "if no_neighbors:<EOL><INDENT>raise NotImplementedError()<EOL><DEDENT>return self._from_cache(validity_check=validity_check)<EOL>", "docstring": "Read data from process output.\n\nParameters\n----------\nvalidity_check : bool\n    run geometry validity check (default: True)\nno_neighbors : bool\n    don't include neighbor tiles if there is a pixelbuffer (default:\n    False)\n\nReturns\n-------\nfeatures : list\n    GeoJSON-like list of features", "id": "f12815:c1:m1"}
{"signature": "def open(self, tile, process):", "body": "return InputTile(tile, process)<EOL>", "docstring": "Open process output as input for other process.\n\nParameters\n----------\ntile : ``Tile``\nprocess : ``MapcheteProcess``", "id": "f12815:c0:m6"}
{"signature": "def for_web(self, data):", "body": "return list(data), \"<STR_LIT:application/json>\"<EOL>", "docstring": "Convert data to web output (raster only).\n\nParameters\n----------\ndata : array\n\nReturns\n-------\nweb data : array", "id": "f12815:c0:m5"}
{"signature": "def read(self, output_tile, **kwargs):", "body": "path = self.get_path(output_tile)<EOL>try:<EOL><INDENT>with fiona.open(path, \"<STR_LIT:r>\") as src:<EOL><INDENT>return list(src)<EOL><DEDENT><DEDENT>except DriverError as e:<EOL><INDENT>for i in (\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>if i in str(e):<EOL><INDENT>return self.empty(output_tile)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>", "docstring": "Read existing process output.\n\nParameters\n----------\noutput_tile : ``BufferedTile``\n    must be member of output ``TilePyramid``\n\nReturns\n-------\nprocess output : list", "id": "f12815:c0:m1"}
{"signature": "def __exit__(self, t, v, tb):", "body": "self._cache = {}<EOL>", "docstring": "Clear cache on close.", "id": "f12815:c1:m5"}
{"signature": "def __enter__(self):", "body": "return self<EOL>", "docstring": "Enable context manager.", "id": "f12815:c1:m4"}
{"signature": "def open(self, tile, **kwargs):", "body": "return InputTile(tile, self, **kwargs)<EOL>", "docstring": "Return InputTile object.\n\nParameters\n----------\ntile : ``Tile``\n\nReturns\n-------\ninput tile : ``InputTile``\n    tile view of input data", "id": "f12816:c0:m2"}
{"signature": "def read(self, indexes=None, **kwargs):", "body": "return read_raster_window(<EOL>self.raster_file.path,<EOL>self.tile,<EOL>indexes=self._get_band_indexes(indexes),<EOL>resampling=self.resampling,<EOL>gdal_opts=self.gdal_opts<EOL>)<EOL>", "docstring": "Read reprojected & resampled input data.\n\nReturns\n-------\ndata : array", "id": "f12816:c1:m1"}
{"signature": "def is_empty(self, indexes=None):", "body": "<EOL>return not self.tile.bbox.intersects(<EOL>self.raster_file.bbox(out_crs=self.tile.crs)<EOL>)<EOL>", "docstring": "Check if there is data within this tile.\n\nReturns\n-------\nis empty : bool", "id": "f12816:c1:m2"}
{"signature": "def bbox(self, out_crs=None):", "body": "out_crs = self.pyramid.crs if out_crs is None else out_crs<EOL>with rasterio.open(self.path) as inp:<EOL><INDENT>inp_crs = inp.crs<EOL>out_bbox = bbox = box(*inp.bounds)<EOL><DEDENT>if inp_crs != out_crs:<EOL><INDENT>return reproject_geometry(<EOL>segmentize_geometry(<EOL>bbox, inp.transform[<NUM_LIT:0>] * self.pyramid.tile_size<EOL>),<EOL>src_crs=inp_crs, dst_crs=out_crs<EOL>)<EOL><DEDENT>else:<EOL><INDENT>return out_bbox<EOL><DEDENT>", "docstring": "Return data bounding box.\n\nParameters\n----------\nout_crs : ``rasterio.crs.CRS``\n    rasterio CRS object (default: CRS of process pyramid)\n\nReturns\n-------\nbounding box : geometry\n    Shapely geometry object", "id": "f12816:c0:m3"}
{"signature": "def _get_band_indexes(self, indexes=None):", "body": "if indexes:<EOL><INDENT>if isinstance(indexes, list):<EOL><INDENT>return indexes<EOL><DEDENT>else:<EOL><INDENT>return [indexes]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return range(<NUM_LIT:1>, self.raster_file.profile[\"<STR_LIT:count>\"] + <NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Return valid band indexes.", "id": "f12816:c1:m3"}
{"signature": "def _extract(self, in_tile=None, in_data=None, out_tile=None):", "body": "return self.config.output.extract_subset(<EOL>input_data_tiles=[(in_tile, in_data)],<EOL>out_tile=out_tile<EOL>)<EOL>", "docstring": "Extract data from tile.", "id": "f12819:c0:m12"}
{"signature": "def get_raw_output(self, tile, _baselevel_readonly=False):", "body": "if not isinstance(tile, (BufferedTile, tuple)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if isinstance(tile, tuple):<EOL><INDENT>tile = self.config.output_pyramid.tile(*tile)<EOL><DEDENT>if _baselevel_readonly:<EOL><INDENT>tile = self.config.baselevels[\"<STR_LIT>\"].tile(*tile.id)<EOL><DEDENT>if tile.zoom not in self.config.zoom_levels:<EOL><INDENT>return self.config.output.empty(tile)<EOL><DEDENT>if tile.crs != self.config.process_pyramid.crs:<EOL><INDENT>raise NotImplementedError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if self.config.mode == \"<STR_LIT>\":<EOL><INDENT>process_tile = self.config.process_pyramid.intersecting(tile)[<NUM_LIT:0>]<EOL>return self._extract(<EOL>in_tile=process_tile,<EOL>in_data=self._execute_using_cache(process_tile),<EOL>out_tile=tile<EOL>)<EOL><DEDENT>process_tile = self.config.process_pyramid.intersecting(tile)[<NUM_LIT:0>]<EOL>if tile.pixelbuffer > self.config.output.pixelbuffer:<EOL><INDENT>output_tiles = list(self.config.output_pyramid.tiles_from_bounds(<EOL>tile.bounds, tile.zoom<EOL>))<EOL><DEDENT>else:<EOL><INDENT>output_tiles = self.config.output_pyramid.intersecting(tile)<EOL><DEDENT>if self.config.mode == \"<STR_LIT>\" or _baselevel_readonly:<EOL><INDENT>if self.config.output.tiles_exist(process_tile):<EOL><INDENT>return self._read_existing_output(tile, output_tiles)<EOL><DEDENT>else:<EOL><INDENT>return self.config.output.empty(tile)<EOL><DEDENT><DEDENT>elif self.config.mode == \"<STR_LIT>\" and not _baselevel_readonly:<EOL><INDENT>if self.config.output.tiles_exist(process_tile):<EOL><INDENT>return self._read_existing_output(tile, output_tiles)<EOL><DEDENT>else:<EOL><INDENT>return self._process_and_overwrite_output(tile, process_tile)<EOL><DEDENT><DEDENT>elif self.config.mode == \"<STR_LIT>\" and not _baselevel_readonly:<EOL><INDENT>return self._process_and_overwrite_output(tile, process_tile)<EOL><DEDENT>", "docstring": "Get output raw data.\n\nThis function won't work with multiprocessing, as it uses the\n``threading.Lock()`` class.\n\nParameters\n----------\ntile : tuple, Tile or BufferedTile\n    If a tile index is given, a tile from the output pyramid will be\n    assumed. Tile cannot be bigger than process tile!\n\nReturns\n-------\ndata : NumPy array or features\n    process output", "id": "f12819:c0:m8"}
{"signature": "def read(self, output_tile):", "body": "if self.config.mode not in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if isinstance(output_tile, tuple):<EOL><INDENT>output_tile = self.config.output_pyramid.tile(*output_tile)<EOL><DEDENT>elif isinstance(output_tile, BufferedTile):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>return self.config.output.read(output_tile)<EOL>", "docstring": "Read from written process output.\n\nParameters\n----------\noutput_tile : BufferedTile or tile index tuple\n    Member of the output tile pyramid (not necessarily the process\n    pyramid, if output has a different metatiling setting)\n\nReturns\n-------\ndata : NumPy array or features\n    process output", "id": "f12819:c0:m6"}
{"signature": "def contours(<EOL>self, elevation, interval=<NUM_LIT:100>, field='<STR_LIT>', base=<NUM_LIT:0><EOL>):", "body": "return commons_contours.extract_contours(<EOL>elevation, self.tile, interval=interval, field=field, base=base)<EOL>", "docstring": "Extract contour lines from elevation data.\n\nParameters\n----------\nelevation : array\n    input elevation data\ninterval : integer\n    elevation value interval when drawing contour lines\nfield : string\n    output field name containing elevation value\nbase : integer\n    elevation base value the intervals are computed from\n\nReturns\n-------\ncontours : iterable\n    contours as GeoJSON-like pairs of properties and geometry", "id": "f12819:c1:m5"}
{"signature": "def execute(self, process_tile, raise_nodata=False):", "body": "if self.config.mode not in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if isinstance(process_tile, tuple):<EOL><INDENT>process_tile = self.config.process_pyramid.tile(*process_tile)<EOL><DEDENT>elif isinstance(process_tile, BufferedTile):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if process_tile.zoom not in self.config.zoom_levels:<EOL><INDENT>return self.config.output.empty(process_tile)<EOL><DEDENT>return self._execute(process_tile, raise_nodata=raise_nodata)<EOL>", "docstring": "Run the Mapchete process.\n\nExecute, write and return data.\n\nParameters\n----------\nprocess_tile : Tile or tile index tuple\n    Member of the process tile pyramid (not necessarily the output\n    pyramid, if output has a different metatiling setting)\n\nReturns\n-------\ndata : NumPy array or features\n    process output", "id": "f12819:c0:m5"}
{"signature": "def write(self, process_tile, data):", "body": "if isinstance(process_tile, tuple):<EOL><INDENT>process_tile = self.config.process_pyramid.tile(*process_tile)<EOL><DEDENT>elif not isinstance(process_tile, BufferedTile):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % type(process_tile))<EOL><DEDENT>if self.config.mode not in [\"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if self.config.mode == \"<STR_LIT>\" and (<EOL>self.config.output.tiles_exist(process_tile)<EOL>):<EOL><INDENT>message = \"<STR_LIT>\"<EOL>logger.debug((process_tile.id, message))<EOL>return ProcessInfo(<EOL>tile=process_tile,<EOL>processed=False,<EOL>process_msg=None,<EOL>written=False,<EOL>write_msg=message<EOL>)<EOL><DEDENT>elif data is None:<EOL><INDENT>message = \"<STR_LIT>\"<EOL>logger.debug((process_tile.id, message))<EOL>return ProcessInfo(<EOL>tile=process_tile,<EOL>processed=False,<EOL>process_msg=None,<EOL>written=False,<EOL>write_msg=message<EOL>)<EOL><DEDENT>else:<EOL><INDENT>with Timer() as t:<EOL><INDENT>self.config.output.write(process_tile=process_tile, data=data)<EOL><DEDENT>message = \"<STR_LIT>\" % t<EOL>logger.debug((process_tile.id, message))<EOL>return ProcessInfo(<EOL>tile=process_tile,<EOL>processed=False,<EOL>process_msg=None,<EOL>written=True,<EOL>write_msg=message<EOL>)<EOL><DEDENT>", "docstring": "Write data into output format.\n\nParameters\n----------\nprocess_tile : BufferedTile or tile index tuple\n    process tile\ndata : NumPy array or features\n    data to be written", "id": "f12819:c0:m7"}
{"signature": "def open(self, input_id, **kwargs):", "body": "if not isinstance(input_id, str):<EOL><INDENT>return input_id.open(self.tile, **kwargs)<EOL><DEDENT>if input_id not in self.params[\"<STR_LIT:input>\"]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % input_id)<EOL><DEDENT>return self.params[\"<STR_LIT:input>\"][input_id].open(self.tile, **kwargs)<EOL>", "docstring": "Open input data.\n\nParameters\n----------\ninput_id : string\n    input identifier from configuration file or file path\nkwargs : driver specific parameters (e.g. resampling)\n\nReturns\n-------\ntiled input data : InputTile\n    reprojected input data within tile", "id": "f12819:c1:m3"}
{"signature": "def read(self, **kwargs):", "body": "if self.tile.pixelbuffer > self.config.output.pixelbuffer:<EOL><INDENT>output_tiles = list(self.config.output_pyramid.tiles_from_bounds(<EOL>self.tile.bounds, self.tile.zoom<EOL>))<EOL><DEDENT>else:<EOL><INDENT>output_tiles = self.config.output_pyramid.intersecting(self.tile)<EOL><DEDENT>return self.config.output.extract_subset(<EOL>input_data_tiles=[<EOL>(output_tile, self.config.output.read(output_tile))<EOL>for output_tile in output_tiles<EOL>],<EOL>out_tile=self.tile,<EOL>)<EOL>", "docstring": "Read existing output data from a previous run.\n\nReturns\n-------\nprocess output : NumPy array (raster) or feature iterator (vector)", "id": "f12819:c1:m2"}
{"signature": "def _shift_required(tiles):", "body": "if tiles[<NUM_LIT:0>][<NUM_LIT:0>].tile_pyramid.is_global:<EOL><INDENT>tile_cols = sorted(list(set([t[<NUM_LIT:0>].col for t in tiles])))<EOL>if tile_cols == list(range(min(tile_cols), max(tile_cols) + <NUM_LIT:1>)):<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>def gen_groups(items):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>j = items[<NUM_LIT:0>]<EOL>group = [j]<EOL>for i in items[<NUM_LIT:1>:]:<EOL><INDENT>if i == j + <NUM_LIT:1>:<EOL><INDENT>group.append(i)<EOL><DEDENT>else:<EOL><INDENT>yield group<EOL>group = [i]<EOL><DEDENT>j = i<EOL><DEDENT>yield group<EOL><DEDENT>groups = list(gen_groups(tile_cols))<EOL>if len(groups) == <NUM_LIT:1>:<EOL><INDENT>return False<EOL><DEDENT>normal_distance = groups[-<NUM_LIT:1>][-<NUM_LIT:1>] - groups[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>antimeridian_distance = (<EOL>groups[<NUM_LIT:0>][-<NUM_LIT:1>] + tiles[<NUM_LIT:0>][<NUM_LIT:0>].tile_pyramid.matrix_width(tiles[<NUM_LIT:0>][<NUM_LIT:0>].zoom)<EOL>) - groups[-<NUM_LIT:1>][<NUM_LIT:0>]<EOL>return antimeridian_distance < normal_distance<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Determine if distance over antimeridian is shorter than normal distance.", "id": "f12820:m15"}
{"signature": "def create_mosaic(tiles, nodata=<NUM_LIT:0>):", "body": "if isinstance(tiles, GeneratorType):<EOL><INDENT>tiles = list(tiles)<EOL><DEDENT>elif not isinstance(tiles, list):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if not all([isinstance(pair, tuple) for pair in tiles]):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if not all([<EOL>all([isinstance(tile, BufferedTile), isinstance(data, np.ndarray)])<EOL>for tile, data in tiles<EOL>]):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if len(tiles) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>logger.debug(\"<STR_LIT>\", len(tiles))<EOL>if len(tiles) == <NUM_LIT:1>:<EOL><INDENT>tile, data = tiles[<NUM_LIT:0>]<EOL>return ReferencedRaster(<EOL>data=data,<EOL>affine=tile.affine,<EOL>bounds=tile.bounds,<EOL>crs=tile.crs<EOL>)<EOL><DEDENT>pyramid, resolution, dtype = _get_tiles_properties(tiles)<EOL>shift = _shift_required(tiles)<EOL>m_left, m_bottom, m_right, m_top = None, None, None, None<EOL>for tile, data in tiles:<EOL><INDENT>num_bands = data.shape[<NUM_LIT:0>] if data.ndim > <NUM_LIT:2> else <NUM_LIT:1><EOL>left, bottom, right, top = tile.bounds<EOL>if shift:<EOL><INDENT>left += pyramid.x_size / <NUM_LIT:2><EOL>right += pyramid.x_size / <NUM_LIT:2><EOL>if right > pyramid.right:<EOL><INDENT>right -= pyramid.x_size<EOL>left -= pyramid.x_size<EOL><DEDENT><DEDENT>m_left = min([left, m_left]) if m_left is not None else left<EOL>m_bottom = min([bottom, m_bottom]) if m_bottom is not None else bottom<EOL>m_right = max([right, m_right]) if m_right is not None else right<EOL>m_top = max([top, m_top]) if m_top is not None else top<EOL><DEDENT>height = int(round((m_top - m_bottom) / resolution))<EOL>width = int(round((m_right - m_left) / resolution))<EOL>mosaic = ma.MaskedArray(<EOL>data=np.full((num_bands, height, width), dtype=dtype, fill_value=nodata),<EOL>mask=np.ones((num_bands, height, width))<EOL>)<EOL>affine = Affine(resolution, <NUM_LIT:0>, m_left, <NUM_LIT:0>, -resolution, m_top)<EOL>for tile, data in tiles:<EOL><INDENT>data = prepare_array(data, nodata=nodata, dtype=dtype)<EOL>t_left, t_bottom, t_right, t_top = tile.bounds<EOL>if shift:<EOL><INDENT>t_left += pyramid.x_size / <NUM_LIT:2><EOL>t_right += pyramid.x_size / <NUM_LIT:2><EOL>if t_right > pyramid.right:<EOL><INDENT>t_right -= pyramid.x_size<EOL>t_left -= pyramid.x_size<EOL><DEDENT><DEDENT>minrow, maxrow, mincol, maxcol = bounds_to_ranges(<EOL>out_bounds=(t_left, t_bottom, t_right, t_top),<EOL>in_affine=affine,<EOL>in_shape=(height, width)<EOL>)<EOL>mosaic[:, minrow:maxrow, mincol:maxcol] = data<EOL>mosaic.mask[:, minrow:maxrow, mincol:maxcol] = data.mask<EOL><DEDENT>if shift:<EOL><INDENT>affine = Affine(resolution, <NUM_LIT:0>, m_left - pyramid.x_size / <NUM_LIT:2>, <NUM_LIT:0>, -resolution, m_top)<EOL><DEDENT>return ReferencedRaster(<EOL>data=mosaic,<EOL>affine=affine,<EOL>bounds=Bounds(m_left, m_bottom, m_right, m_top),<EOL>crs=tile.crs<EOL>)<EOL>", "docstring": "Create a mosaic from tiles. Tiles must be connected (also possible over Antimeridian),\notherwise strange things can happen!\n\nParameters\n----------\ntiles : iterable\n    an iterable containing tuples of a BufferedTile and an array\nnodata : integer or float\n    raster nodata value to initialize the mosaic with (default: 0)\n\nReturns\n-------\nmosaic : ReferencedRaster", "id": "f12820:m11"}
{"signature": "def prepare_array(data, masked=True, nodata=<NUM_LIT:0>, dtype=\"<STR_LIT>\"):", "body": "<EOL>if isinstance(data, (list, tuple)):<EOL><INDENT>return _prepare_iterable(data, masked, nodata, dtype)<EOL><DEDENT>elif isinstance(data, np.ndarray) and data.ndim == <NUM_LIT:2>:<EOL><INDENT>data = ma.expand_dims(data, axis=<NUM_LIT:0>)<EOL><DEDENT>if isinstance(data, ma.MaskedArray):<EOL><INDENT>return _prepare_masked(data, masked, nodata, dtype)<EOL><DEDENT>elif isinstance(data, np.ndarray):<EOL><INDENT>if masked:<EOL><INDENT>return ma.masked_values(data.astype(dtype, copy=False), nodata, copy=False)<EOL><DEDENT>else:<EOL><INDENT>return data.astype(dtype, copy=False)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>", "docstring": "Turn input data into a proper array for further usage.\n\nOutut array is always 3-dimensional with the given data type. If the output\nis masked, the fill_value corresponds to the given nodata value and the\nnodata value will be burned into the data array.\n\nParameters\n----------\ndata : array or iterable\n    array (masked or normal) or iterable containing arrays\nnodata : integer or float\n    nodata value (default: 0) used if input is not a masked array and\n    for output array\nmasked : bool\n    return a NumPy Array or a NumPy MaskedArray (default: True)\ndtype : string\n    data type of output array (default: \"int16\")\n\nReturns\n-------\narray : array", "id": "f12820:m17"}
{"signature": "def _get_warped_array(<EOL>input_file=None,<EOL>indexes=None,<EOL>dst_bounds=None,<EOL>dst_shape=None,<EOL>dst_crs=None,<EOL>resampling=None,<EOL>src_nodata=None,<EOL>dst_nodata=None<EOL>):", "body": "try:<EOL><INDENT>return _rasterio_read(<EOL>input_file=input_file,<EOL>indexes=indexes,<EOL>dst_bounds=dst_bounds,<EOL>dst_shape=dst_shape,<EOL>dst_crs=dst_crs,<EOL>resampling=resampling,<EOL>src_nodata=src_nodata,<EOL>dst_nodata=dst_nodata<EOL>)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logger.exception(\"<STR_LIT>\", input_file, e)<EOL>raise<EOL><DEDENT>", "docstring": "Extract a numpy array from a raster file.", "id": "f12820:m3"}
{"signature": "def __exit__(self, *args):", "body": "self.rio_memfile.close()<EOL>", "docstring": "Make sure MemoryFile is closed.", "id": "f12820:c0:m2"}
{"signature": "def memory_file(data=None, profile=None):", "body": "memfile = MemoryFile()<EOL>profile.update(width=data.shape[-<NUM_LIT:2>], height=data.shape[-<NUM_LIT:1>])<EOL>with memfile.open(**profile) as dataset:<EOL><INDENT>dataset.write(data)<EOL><DEDENT>return memfile<EOL>", "docstring": "Return a rasterio.io.MemoryFile instance from input.\n\nParameters\n----------\ndata : array\n    array to be written\nprofile : dict\n    rasterio profile for MemoryFile", "id": "f12820:m16"}
{"signature": "def write_raster_window(<EOL>in_tile=None, in_data=None, out_profile=None, out_tile=None, out_path=None,<EOL>tags=None, bucket_resource=None<EOL>):", "body": "if not isinstance(out_path, str):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>logger.debug(\"<STR_LIT>\", out_path)<EOL>if out_path == \"<STR_LIT>\":<EOL><INDENT>raise DeprecationWarning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>out_tile = in_tile if out_tile is None else out_tile<EOL>_validate_write_window_params(in_tile, out_tile, in_data, out_profile)<EOL>window_data = extract_from_array(<EOL>in_raster=in_data,<EOL>in_affine=in_tile.affine,<EOL>out_tile=out_tile<EOL>) if in_tile != out_tile else in_data<EOL>if \"<STR_LIT>\" in out_profile:<EOL><INDENT>out_profile[\"<STR_LIT>\"] = out_profile.pop(\"<STR_LIT>\")<EOL><DEDENT>if window_data.all() is not ma.masked:<EOL><INDENT>try:<EOL><INDENT>if out_path.startswith(\"<STR_LIT>\"):<EOL><INDENT>with RasterWindowMemoryFile(<EOL>in_tile=out_tile,<EOL>in_data=window_data,<EOL>out_profile=out_profile,<EOL>out_tile=out_tile,<EOL>tags=tags<EOL>) as memfile:<EOL><INDENT>logger.debug((out_tile.id, \"<STR_LIT>\", out_path))<EOL>bucket_resource.put_object(<EOL>Key=\"<STR_LIT:/>\".join(out_path.split(\"<STR_LIT:/>\")[<NUM_LIT:3>:]),<EOL>Body=memfile<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with rasterio.open(out_path, '<STR_LIT:w>', **out_profile) as dst:<EOL><INDENT>logger.debug((out_tile.id, \"<STR_LIT>\", out_path))<EOL>dst.write(window_data.astype(out_profile[\"<STR_LIT>\"], copy=False))<EOL>_write_tags(dst, tags)<EOL><DEDENT><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>logger.exception(\"<STR_LIT>\", out_path, e)<EOL>raise<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.debug((out_tile.id, \"<STR_LIT>\", out_path))<EOL><DEDENT>", "docstring": "Write a window from a numpy array to an output file.\n\nParameters\n----------\nin_tile : ``BufferedTile``\n    ``BufferedTile`` with a data attribute holding NumPy data\nin_data : array\nout_profile : dictionary\n    metadata dictionary for rasterio\nout_tile : ``Tile``\n    provides output boundaries; if None, in_tile is used\nout_path : string\n    output path to write to\ntags : optional tags to be added to GeoTIFF file\nbucket_resource : boto3 bucket resource to write to in case of S3 output", "id": "f12820:m6"}
{"signature": "def bounds_to_ranges(out_bounds=None, in_affine=None, in_shape=None):", "body": "return itertools.chain(<EOL>*from_bounds(<EOL>*out_bounds, transform=in_affine, height=in_shape[-<NUM_LIT:2>], width=in_shape[-<NUM_LIT:1>]<EOL>).round_lengths(pixel_precision=<NUM_LIT:0>).round_offsets(pixel_precision=<NUM_LIT:0>).toranges()<EOL>)<EOL>", "docstring": "Return bounds range values from geolocated input.\n\nParameters\n----------\nout_bounds : tuple\n    left, bottom, right, top\nin_affine : Affine\n    input geolocation\nin_shape : tuple\n    input shape\n\nReturns\n-------\nminrow, maxrow, mincol, maxcol", "id": "f12820:m12"}
{"signature": "def resample_from_array(<EOL>in_raster=None,<EOL>in_affine=None,<EOL>out_tile=None,<EOL>in_crs=None,<EOL>resampling=\"<STR_LIT>\",<EOL>nodataval=<NUM_LIT:0><EOL>):", "body": "<EOL>if isinstance(in_raster, ma.MaskedArray):<EOL><INDENT>pass<EOL><DEDENT>if isinstance(in_raster, np.ndarray):<EOL><INDENT>in_raster = ma.MaskedArray(in_raster, mask=in_raster == nodataval)<EOL><DEDENT>elif isinstance(in_raster, ReferencedRaster):<EOL><INDENT>in_affine = in_raster.affine<EOL>in_crs = in_raster.crs<EOL>in_raster = in_raster.data<EOL><DEDENT>elif isinstance(in_raster, tuple):<EOL><INDENT>in_raster = ma.MaskedArray(<EOL>data=np.stack(in_raster),<EOL>mask=np.stack([<EOL>band.mask<EOL>if isinstance(band, ma.masked_array)<EOL>else np.where(band == nodataval, True, False)<EOL>for band in in_raster<EOL>]),<EOL>fill_value=nodataval<EOL>)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % type(in_raster))<EOL><DEDENT>if in_raster.ndim == <NUM_LIT:2>:<EOL><INDENT>in_raster = ma.expand_dims(in_raster, axis=<NUM_LIT:0>)<EOL><DEDENT>elif in_raster.ndim == <NUM_LIT:3>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if in_raster.fill_value != nodataval:<EOL><INDENT>ma.set_fill_value(in_raster, nodataval)<EOL><DEDENT>out_shape = (in_raster.shape[<NUM_LIT:0>], ) + out_tile.shape<EOL>dst_data = np.empty(out_shape, in_raster.dtype)<EOL>in_raster = ma.masked_array(<EOL>data=in_raster.filled(), mask=in_raster.mask, fill_value=nodataval<EOL>)<EOL>reproject(<EOL>in_raster,<EOL>dst_data,<EOL>src_transform=in_affine,<EOL>src_crs=in_crs if in_crs else out_tile.crs,<EOL>dst_transform=out_tile.affine,<EOL>dst_crs=out_tile.crs,<EOL>resampling=Resampling[resampling]<EOL>)<EOL>return ma.MaskedArray(dst_data, mask=dst_data == nodataval)<EOL>", "docstring": "Extract and resample from array to target tile.\n\nParameters\n----------\nin_raster : array\nin_affine : ``Affine``\nout_tile : ``BufferedTile``\nresampling : string\n    one of rasterio's resampling methods (default: nearest)\nnodataval : integer or float\n    raster nodata value (default: 0)\n\nReturns\n-------\nresampled array : array", "id": "f12820:m10"}
{"signature": "def makedirs(path):", "body": "if not path_is_remote(path):<EOL><INDENT>try:<EOL><INDENT>os.makedirs(path)<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "Silently create all subdirectories of path if path is local.\n\nParameters:\n-----------\npath : path", "id": "f12821:m7"}
{"signature": "def path_exists(path):", "body": "if path.startswith((\"<STR_LIT>\", \"<STR_LIT>\")):<EOL><INDENT>try:<EOL><INDENT>urlopen(path).info()<EOL>return True<EOL><DEDENT>except HTTPError as e:<EOL><INDENT>if e.code == <NUM_LIT>:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>elif path.startswith(\"<STR_LIT>\"):<EOL><INDENT>bucket = get_boto3_bucket(path.split(\"<STR_LIT:/>\")[<NUM_LIT:2>])<EOL>key = \"<STR_LIT:/>\".join(path.split(\"<STR_LIT:/>\")[<NUM_LIT:3>:])<EOL>for obj in bucket.objects.filter(Prefix=key):<EOL><INDENT>if obj.key == key:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\", path, os.path.exists(path))<EOL>return os.path.exists(path)<EOL><DEDENT>", "docstring": "Check if file exists either remote or local.\n\nParameters:\n-----------\npath : path to file\n\nReturns:\n--------\nexists : bool", "id": "f12821:m4"}
{"signature": "def get_gdal_options(opts, is_remote=False):", "body": "user_opts = {} if opts is None else dict(**opts)<EOL>if is_remote:<EOL><INDENT>return dict(GDAL_HTTP_OPTS, **user_opts)<EOL><DEDENT>else:<EOL><INDENT>return user_opts<EOL><DEDENT>", "docstring": "Return a merged set of custom and default GDAL/rasterio Env options.\n\nIf is_remote is set to True, the default GDAL_HTTP_OPTS are appended.\n\nParameters\n----------\nopts : dict or None\n    Explicit GDAL options.\nis_remote : bool\n    Indicate whether Env is for a remote file.\n\nReturns\n-------\ndictionary", "id": "f12821:m11"}
{"signature": "def read_json(path):", "body": "if path.startswith((\"<STR_LIT>\", \"<STR_LIT>\")):<EOL><INDENT>try:<EOL><INDENT>return json.loads(urlopen(path).read().decode())<EOL><DEDENT>except HTTPError:<EOL><INDENT>raise FileNotFoundError(\"<STR_LIT>\", path)<EOL><DEDENT><DEDENT>elif path.startswith(\"<STR_LIT>\"):<EOL><INDENT>bucket = get_boto3_bucket(path.split(\"<STR_LIT:/>\")[<NUM_LIT:2>])<EOL>key = \"<STR_LIT:/>\".join(path.split(\"<STR_LIT:/>\")[<NUM_LIT:3>:])<EOL>for obj in bucket.objects.filter(Prefix=key):<EOL><INDENT>if obj.key == key:<EOL><INDENT>return json.loads(obj.get()['<STR_LIT>'].read().decode())<EOL><DEDENT><DEDENT>raise FileNotFoundError(\"<STR_LIT>\", path)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>with open(path, \"<STR_LIT:r>\") as src:<EOL><INDENT>return json.loads(src.read())<EOL><DEDENT><DEDENT>except:<EOL><INDENT>raise FileNotFoundError(\"<STR_LIT>\", path)<EOL><DEDENT><DEDENT>", "docstring": "Read local or remote.", "id": "f12821:m9"}
{"signature": "def path_is_remote(path, s3=True):", "body": "prefixes = (\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>if s3:<EOL><INDENT>prefixes += (\"<STR_LIT>\", \"<STR_LIT>\")<EOL><DEDENT>return path.startswith(prefixes)<EOL>", "docstring": "Determine whether file path is remote or local.\n\nParameters\n----------\npath : path to file\n\nReturns\n-------\nis_remote : bool", "id": "f12821:m3"}
{"signature": "def get_best_zoom_level(input_file, tile_pyramid_type):", "body": "tile_pyramid = BufferedTilePyramid(tile_pyramid_type)<EOL>with rasterio.open(input_file, \"<STR_LIT:r>\") as src:<EOL><INDENT>xmin, ymin, xmax, ymax = reproject_geometry(<EOL>segmentize_geometry(<EOL>box(<EOL>src.bounds.left, src.bounds.bottom, src.bounds.right,<EOL>src.bounds.top<EOL>),<EOL>get_segmentize_value(input_file, tile_pyramid)<EOL>),<EOL>src_crs=src.crs, dst_crs=tile_pyramid.crs<EOL>).bounds<EOL>x_dif = xmax - xmin<EOL>y_dif = ymax - ymin<EOL>size = float(src.width + src.height)<EOL>avg_resolution = (<EOL>(x_dif / float(src.width)) * (float(src.width) / size) +<EOL>(y_dif / float(src.height)) * (float(src.height) / size)<EOL>)<EOL><DEDENT>for zoom in range(<NUM_LIT:0>, <NUM_LIT>):<EOL><INDENT>if tile_pyramid.pixel_x_size(zoom) <= avg_resolution:<EOL><INDENT>return zoom-<NUM_LIT:1><EOL><DEDENT><DEDENT>", "docstring": "Determine the best base zoom level for a raster.\n\n\"Best\" means the maximum zoom level where no oversampling has to be done.\n\nParameters\n----------\ninput_file : path to raster file\ntile_pyramid_type : ``TilePyramid`` projection (``geodetic`` or``mercator``)\n\nReturns\n-------\nzoom : integer", "id": "f12821:m0"}
{"signature": "def __enter__(self):", "body": "self.fio_memfile = MemoryFile()<EOL>with self.fio_memfile.open(<EOL>schema=self.schema,<EOL>driver=self.driver,<EOL>crs=self.tile.crs<EOL>) as dst:<EOL><INDENT>dst.writerecords(self.features)<EOL><DEDENT>return self.fio_memfile<EOL>", "docstring": "Open MemoryFile, write data and return.", "id": "f12822:c0:m1"}
{"signature": "def multipart_to_singleparts(geom):", "body": "if isinstance(geom, base.BaseGeometry):<EOL><INDENT>if hasattr(geom, \"<STR_LIT>\"):<EOL><INDENT>for subgeom in geom:<EOL><INDENT>yield subgeom<EOL><DEDENT><DEDENT>else:<EOL><INDENT>yield geom<EOL><DEDENT><DEDENT>", "docstring": "Yield single part geometries if geom is multipart, otherwise yield geom.\n\nParameters:\n-----------\ngeom : shapely geometry\n\nReturns:\n--------\nshapely single part geometries", "id": "f12822:m10"}
{"signature": "def write_vector_window(<EOL>in_data=None, out_schema=None, out_tile=None, out_path=None, bucket_resource=None<EOL>):", "body": "<EOL>try:<EOL><INDENT>os.remove(out_path)<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>out_features = []<EOL>for feature in in_data:<EOL><INDENT>try:<EOL><INDENT>for out_geom in multipart_to_singleparts(<EOL>clean_geometry_type(<EOL>to_shape(feature[\"<STR_LIT>\"]).intersection(out_tile.bbox),<EOL>out_schema[\"<STR_LIT>\"]<EOL>)<EOL>):<EOL><INDENT>out_features.append({<EOL>\"<STR_LIT>\": mapping(out_geom),<EOL>\"<STR_LIT>\": feature[\"<STR_LIT>\"]<EOL>})<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>logger.warning(\"<STR_LIT>\", e)<EOL>continue<EOL><DEDENT><DEDENT>if out_features:<EOL><INDENT>try:<EOL><INDENT>if out_path.startswith(\"<STR_LIT>\"):<EOL><INDENT>with VectorWindowMemoryFile(<EOL>tile=out_tile,<EOL>features=out_features,<EOL>schema=out_schema,<EOL>driver=\"<STR_LIT>\"<EOL>) as memfile:<EOL><INDENT>logger.debug((out_tile.id, \"<STR_LIT>\", out_path))<EOL>bucket_resource.put_object(<EOL>Key=\"<STR_LIT:/>\".join(out_path.split(\"<STR_LIT:/>\")[<NUM_LIT:3>:]),<EOL>Body=memfile<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with fiona.open(<EOL>out_path, '<STR_LIT:w>', schema=out_schema, driver=\"<STR_LIT>\",<EOL>crs=out_tile.crs.to_dict()<EOL>) as dst:<EOL><INDENT>logger.debug((out_tile.id, \"<STR_LIT>\", out_path))<EOL>dst.writerecords(out_features)<EOL><DEDENT><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>logger.error(\"<STR_LIT>\", out_path, e)<EOL>raise<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.debug((out_tile.id, \"<STR_LIT>\", out_path))<EOL><DEDENT>", "docstring": "Write features to GeoJSON file.\n\nParameters\n----------\nin_data : features\nout_schema : dictionary\n    output schema for fiona\nout_tile : ``BufferedTile``\n    tile used for output extent\nout_path : string\n    output path for GeoJSON file", "id": "f12822:m6"}
{"signature": "def __init__(<EOL>self, tile=None, features=None, schema=None, driver=None<EOL>):", "body": "self.tile = tile<EOL>self.schema = schema<EOL>self.driver = driver<EOL>self.features = features<EOL>", "docstring": "Prepare data & profile.", "id": "f12822:c0:m0"}
{"signature": "def __exit__(self, *args):", "body": "self.fio_memfile.close()<EOL>", "docstring": "Make sure MemoryFile is closed.", "id": "f12822:c0:m2"}
{"signature": "def segmentize_geometry(geometry, segmentize_value):", "body": "if geometry.geom_type != \"<STR_LIT>\":<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>return Polygon(<EOL>LinearRing([<EOL>p<EOL>for l in map(<EOL>lambda x: LineString([x[<NUM_LIT:0>], x[<NUM_LIT:1>]]),<EOL>zip(geometry.exterior.coords[:-<NUM_LIT:1>], geometry.exterior.coords[<NUM_LIT:1>:])<EOL>)<EOL>for p in [<EOL>l.interpolate(segmentize_value * i).coords[<NUM_LIT:0>]<EOL>for i in range(int(l.length / segmentize_value))<EOL>] + [l.coords[<NUM_LIT:1>]]<EOL>])<EOL>)<EOL>", "docstring": "Segmentize Polygon outer ring by segmentize value.\n\nJust Polygon geometry type supported.\n\nParameters\n----------\ngeometry : ``shapely.geometry``\nsegmentize_value: float\n\nReturns\n-------\ngeometry : ``shapely.geometry``", "id": "f12822:m3"}
{"signature": "def reproject_geometry(<EOL>geometry, src_crs=None, dst_crs=None, error_on_clip=False, validity_check=True,<EOL>antimeridian_cutting=False<EOL>):", "body": "src_crs = _validated_crs(src_crs)<EOL>dst_crs = _validated_crs(dst_crs)<EOL>def _reproject_geom(geometry, src_crs, dst_crs):<EOL><INDENT>if geometry.is_empty:<EOL><INDENT>return geometry<EOL><DEDENT>else:<EOL><INDENT>out_geom = to_shape(<EOL>transform_geom(<EOL>src_crs.to_dict(),<EOL>dst_crs.to_dict(),<EOL>mapping(geometry),<EOL>antimeridian_cutting=antimeridian_cutting<EOL>)<EOL>)<EOL>return _repair(out_geom) if validity_check else out_geom<EOL><DEDENT><DEDENT>if src_crs == dst_crs or geometry.is_empty:<EOL><INDENT>return _repair(geometry)<EOL><DEDENT>elif (<EOL>dst_crs.is_epsg_code and               <EOL>dst_crs.get(\"<STR_LIT>\") in CRS_BOUNDS and  <EOL>dst_crs.get(\"<STR_LIT>\") != \"<STR_LIT>\"     <EOL>):<EOL><INDENT>wgs84_crs = CRS().from_epsg(<NUM_LIT>)<EOL>crs_bbox = box(*CRS_BOUNDS[dst_crs.get(\"<STR_LIT>\")])<EOL>geometry_4326 = _reproject_geom(geometry, src_crs, wgs84_crs)<EOL>if error_on_clip and not geometry_4326.within(crs_bbox):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>return _reproject_geom(crs_bbox.intersection(geometry_4326), wgs84_crs, dst_crs)<EOL><DEDENT>else:<EOL><INDENT>return _reproject_geom(geometry, src_crs, dst_crs)<EOL><DEDENT>", "docstring": "Reproject a geometry to target CRS.\n\nAlso, clips geometry if it lies outside the destination CRS boundary.\nSupported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical\nMercator) and 3035 (ETRS89 / ETRS-LAEA).\n\nParameters\n----------\ngeometry : ``shapely.geometry``\nsrc_crs : ``rasterio.crs.CRS`` or EPSG code\n    CRS of source data\ndst_crs : ``rasterio.crs.CRS`` or EPSG code\n    target CRS\nerror_on_clip : bool\n    raises a ``RuntimeError`` if a geometry is outside of CRS bounds\n    (default: False)\nvalidity_check : bool\n    checks if reprojected geometry is valid and throws ``TopologicalError``\n    if invalid (default: True)\nantimeridian_cutting : bool\n    cut geometry at Antimeridian; can result in a multipart output geometry\n\nReturns\n-------\ngeometry : ``shapely.geometry``", "id": "f12822:m0"}
{"signature": "def zoom_index_gen(<EOL>mp=None,<EOL>out_dir=None,<EOL>zoom=None,<EOL>geojson=False,<EOL>gpkg=False,<EOL>shapefile=False,<EOL>txt=False,<EOL>vrt=False,<EOL>fieldname=\"<STR_LIT:location>\",<EOL>basepath=None,<EOL>for_gdal=True,<EOL>threading=False,<EOL>):", "body": "for zoom in get_zoom_levels(process_zoom_levels=zoom):<EOL><INDENT>with ExitStack() as es:<EOL><INDENT>index_writers = []<EOL>if geojson:<EOL><INDENT>index_writers.append(<EOL>es.enter_context(<EOL>VectorFileWriter(<EOL>driver=\"<STR_LIT>\",<EOL>out_path=_index_file_path(out_dir, zoom, \"<STR_LIT>\"),<EOL>crs=mp.config.output_pyramid.crs,<EOL>fieldname=fieldname<EOL>)<EOL>)<EOL>)<EOL><DEDENT>if gpkg:<EOL><INDENT>index_writers.append(<EOL>es.enter_context(<EOL>VectorFileWriter(<EOL>driver=\"<STR_LIT>\",<EOL>out_path=_index_file_path(out_dir, zoom, \"<STR_LIT>\"),<EOL>crs=mp.config.output_pyramid.crs,<EOL>fieldname=fieldname<EOL>)<EOL>)<EOL>)<EOL><DEDENT>if shapefile:<EOL><INDENT>index_writers.append(<EOL>es.enter_context(<EOL>VectorFileWriter(<EOL>driver=\"<STR_LIT>\",<EOL>out_path=_index_file_path(out_dir, zoom, \"<STR_LIT>\"),<EOL>crs=mp.config.output_pyramid.crs,<EOL>fieldname=fieldname<EOL>)<EOL>)<EOL>)<EOL><DEDENT>if txt:<EOL><INDENT>index_writers.append(<EOL>es.enter_context(<EOL>TextFileWriter(out_path=_index_file_path(out_dir, zoom, \"<STR_LIT>\"))<EOL>)<EOL>)<EOL><DEDENT>if vrt:<EOL><INDENT>index_writers.append(<EOL>es.enter_context(<EOL>VRTFileWriter(<EOL>out_path=_index_file_path(out_dir, zoom, \"<STR_LIT>\"),<EOL>output=mp.config.output,<EOL>out_pyramid=mp.config.output_pyramid<EOL>)<EOL>)<EOL>)<EOL><DEDENT>logger.debug(\"<STR_LIT>\", index_writers)<EOL>def _worker(tile):<EOL><INDENT>tile_path = _tile_path(<EOL>orig_path=mp.config.output.get_path(tile),<EOL>basepath=basepath,<EOL>for_gdal=for_gdal<EOL>)<EOL>indexes = [<EOL>i for i in index_writers<EOL>if not i.entry_exists(tile=tile, path=tile_path)<EOL>]<EOL>if indexes:<EOL><INDENT>output_exists = mp.config.output.tiles_exist(output_tile=tile)<EOL><DEDENT>else:<EOL><INDENT>output_exists = None<EOL><DEDENT>return tile, tile_path, indexes, output_exists<EOL><DEDENT>with concurrent.futures.ThreadPoolExecutor() as executor:<EOL><INDENT>for task in concurrent.futures.as_completed(<EOL>(<EOL>executor.submit(_worker, i)<EOL>for i in mp.config.output_pyramid.tiles_from_geom(<EOL>mp.config.area_at_zoom(zoom), zoom<EOL>)<EOL>)<EOL>):<EOL><INDENT>tile, tile_path, indexes, output_exists = task.result()<EOL>if indexes and output_exists:<EOL><INDENT>logger.debug(\"<STR_LIT>\", tile_path)<EOL>logger.debug(\"<STR_LIT>\" % len(indexes))<EOL>for index in indexes:<EOL><INDENT>index.write(tile, tile_path)<EOL><DEDENT><DEDENT>yield tile<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Generate indexes for given zoom level.\n\nParameters\n----------\nmp : Mapchete object\n    process output to be indexed\nout_dir : path\n    optionally override process output directory\nzoom : int\n    zoom level to be processed\ngeojson : bool\n    generate GeoJSON index (default: False)\ngpkg : bool\n    generate GeoPackage index (default: False)\nshapefile : bool\n    generate Shapefile index (default: False)\ntxt : bool\n    generate tile path list textfile (default: False)\nvrt : bool\n    GDAL-style VRT file (default: False)\nfieldname : str\n    field name which contains paths of tiles (default: \"location\")\nbasepath : str\n    if set, use custom base path instead of output path\nfor_gdal : bool\n    use GDAL compatible remote paths, i.e. add \"/vsicurl/\" before path\n    (default: True)", "id": "f12823:m0"}
{"signature": "def __init__(self, tile, pixelbuffer=<NUM_LIT:0>):", "body": "assert not isinstance(tile, BufferedTile)<EOL>Tile.__init__(self, tile.tile_pyramid, tile.zoom, tile.row, tile.col)<EOL>self._tile = tile<EOL>self.pixelbuffer = pixelbuffer<EOL>", "docstring": "Initialize.", "id": "f12824:c1:m0"}
{"signature": "def tile(self, zoom, row, col):", "body": "tile = self.tile_pyramid.tile(zoom, row, col)<EOL>return BufferedTile(tile, pixelbuffer=self.pixelbuffer)<EOL>", "docstring": "Return ``BufferedTile`` object of this ``BufferedTilePyramid``.\n\nParameters\n----------\nzoom : integer\n    zoom level\nrow : integer\n    tile matrix row\ncol : integer\n    tile matrix column\n\nReturns\n-------\nbuffered tile : ``BufferedTile``", "id": "f12824:c0:m1"}
{"signature": "@cached_property<EOL><INDENT>def affine(self):<DEDENT>", "body": "return self._tile.affine(pixelbuffer=self.pixelbuffer)<EOL>", "docstring": "Return buffered Affine.", "id": "f12824:c1:m8"}
{"signature": "def intersecting(self, tile):", "body": "return [<EOL>self.tile(*intersecting_tile.id)<EOL>for intersecting_tile in self.tile_pyramid.intersecting(tile)<EOL>]<EOL>", "docstring": "Return all BufferedTiles intersecting with tile.\n\nParameters\n----------\ntile : ``BufferedTile``\n    another tile", "id": "f12824:c0:m5"}
{"signature": "@cached_property<EOL><INDENT>def width(self):<DEDENT>", "body": "return self._tile.shape(pixelbuffer=self.pixelbuffer).width<EOL>", "docstring": "Return buffered width.", "id": "f12824:c1:m6"}
{"signature": "@cached_property<EOL><INDENT>def shape(self):<DEDENT>", "body": "return self._tile.shape(pixelbuffer=self.pixelbuffer)<EOL>", "docstring": "Return buffered shape.", "id": "f12824:c1:m7"}
{"signature": "def get_parent(self):", "body": "return BufferedTile(self._tile.get_parent(), self.pixelbuffer)<EOL>", "docstring": "Get tile parent (intersecting tile in previous zoom level).\n\nReturns\n-------\nparent : ``BufferedTile``", "id": "f12824:c1:m12"}
{"signature": "def is_on_edge(self):", "body": "return (<EOL>self.left <= self.tile_pyramid.left or      <EOL>self.bottom <= self.tile_pyramid.bottom or  <EOL>self.right >= self.tile_pyramid.right or    <EOL>self.top >= self.tile_pyramid.top           <EOL>)<EOL>", "docstring": "Determine whether tile touches or goes over pyramid edge.", "id": "f12824:c1:m14"}
{"signature": "def user_process_logger(pname):", "body": "warnings.warn(<EOL>DeprecationWarning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL>)<EOL>return logging.getLogger(\"<STR_LIT>\" + pname)<EOL>", "docstring": "Logger to be used within a user process file.", "id": "f12825:m3"}
{"signature": "def execute(mp):", "body": "<EOL>with mp.open(\"<STR_LIT>\", resampling=\"<STR_LIT>\") as raster_file:<EOL><INDENT>if raster_file.is_empty():<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>dem = raster_file.read()<EOL><DEDENT>return dem<EOL>", "docstring": "Example process for testing.\n\nInputs:\n-------\nfile1\n    raster file\n\nParameters:\n-----------\n\nOutput:\n-------\nnp.ndarray", "id": "f12828:m0"}
{"signature": "@click.command(help=\"<STR_LIT>\")<EOL>@utils.arg_create_mapchete_file<EOL>@utils.arg_process_file<EOL>@utils.arg_out_format<EOL>@utils.opt_out_path<EOL>@utils.opt_pyramid_type<EOL>@utils.opt_force<EOL>def create(<EOL>mapchete_file,<EOL>process_file,<EOL>out_format,<EOL>out_path=None,<EOL>pyramid_type=None,<EOL>force=False<EOL>):", "body": "if os.path.isfile(process_file) or os.path.isfile(mapchete_file):<EOL><INDENT>if not force:<EOL><INDENT>raise IOError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>out_path = out_path if out_path else os.path.join(os.getcwd(), \"<STR_LIT>\")<EOL>process_template = pkg_resources.resource_filename(<EOL>\"<STR_LIT>\", \"<STR_LIT>\"<EOL>)<EOL>process_file = os.path.join(os.getcwd(), process_file)<EOL>copyfile(process_template, process_file)<EOL>mapchete_template = pkg_resources.resource_filename(<EOL>\"<STR_LIT>\", \"<STR_LIT>\"<EOL>)<EOL>output_options = dict(<EOL>format=out_format, path=out_path, **FORMAT_MANDATORY[out_format]<EOL>)<EOL>pyramid_options = {'<STR_LIT>': pyramid_type}<EOL>substitute_elements = {<EOL>'<STR_LIT>': process_file,<EOL>'<STR_LIT>': dump({'<STR_LIT>': output_options}, default_flow_style=False),<EOL>'<STR_LIT>': dump({'<STR_LIT>': pyramid_options}, default_flow_style=False)<EOL>}<EOL>with open(mapchete_template, '<STR_LIT:r>') as config_template:<EOL><INDENT>config = Template(config_template.read())<EOL>customized_config = config.substitute(substitute_elements)<EOL><DEDENT>with open(mapchete_file, '<STR_LIT:w>') as target_config:<EOL><INDENT>target_config.write(customized_config)<EOL><DEDENT>", "docstring": "Create an empty Mapchete and process file in a given directory.", "id": "f12832:m0"}
{"signature": "@click.command(help=\"<STR_LIT>\")<EOL>@utils.arg_mapchete_files<EOL>@utils.opt_zoom<EOL>@utils.opt_bounds<EOL>@utils.opt_point<EOL>@utils.opt_wkt_geometry<EOL>@utils.opt_tile<EOL>@utils.opt_overwrite<EOL>@utils.opt_multi<EOL>@utils.opt_input_file<EOL>@utils.opt_logfile<EOL>@utils.opt_verbose<EOL>@utils.opt_no_pbar<EOL>@utils.opt_debug<EOL>@utils.opt_max_chunksize<EOL>@utils.opt_vrt<EOL>@utils.opt_idx_out_dir<EOL>def execute(<EOL>mapchete_files,<EOL>zoom=None,<EOL>bounds=None,<EOL>point=None,<EOL>wkt_geometry=None,<EOL>tile=None,<EOL>overwrite=False,<EOL>multi=None,<EOL>input_file=None,<EOL>logfile=None,<EOL>verbose=False,<EOL>no_pbar=False,<EOL>debug=False,<EOL>max_chunksize=None,<EOL>vrt=False,<EOL>idx_out_dir=None<EOL>):", "body": "multi = multi if multi else cpu_count()<EOL>mode = \"<STR_LIT>\" if overwrite else \"<STR_LIT>\"<EOL>if debug or not verbose:<EOL><INDENT>verbose_dst = open(os.devnull, '<STR_LIT:w>')<EOL><DEDENT>else:<EOL><INDENT>verbose_dst = sys.stdout<EOL><DEDENT>for mapchete_file in mapchete_files:<EOL><INDENT>tqdm.tqdm.write(\"<STR_LIT>\" % mapchete_file, file=verbose_dst)<EOL>with click_spinner.spinner(disable=debug) as spinner:<EOL><INDENT>if tile:<EOL><INDENT>tile = raw_conf_process_pyramid(raw_conf(mapchete_file)).tile(*tile)<EOL>with mapchete.open(<EOL>mapchete_file,<EOL>mode=mode,<EOL>bounds=tile.bounds,<EOL>zoom=tile.zoom,<EOL>single_input_file=input_file<EOL>) as mp:<EOL><INDENT>spinner.stop()<EOL>tqdm.tqdm.write(\"<STR_LIT>\", file=verbose_dst)<EOL>for result in mp.batch_processor(tile=tile):<EOL><INDENT>utils.write_verbose_msg(result, dst=verbose_dst)<EOL><DEDENT>tqdm.tqdm.write(<EOL>\"<STR_LIT>\" % mapchete_file, file=verbose_dst<EOL>)<EOL>if vrt:<EOL><INDENT>tqdm.tqdm.write(\"<STR_LIT>\", file=verbose_dst)<EOL>for tile in tqdm.tqdm(<EOL>zoom_index_gen(<EOL>mp=mp,<EOL>zoom=tile.zoom,<EOL>out_dir=(<EOL>idx_out_dir if idx_out_dir else mp.config.output.path<EOL>),<EOL>vrt=vrt,<EOL>),<EOL>total=mp.count_tiles(tile.zoom, tile.zoom),<EOL>unit=\"<STR_LIT>\",<EOL>disable=debug or no_pbar<EOL>):<EOL><INDENT>logger.debug(\"<STR_LIT>\", tile)<EOL><DEDENT>tqdm.tqdm.write(<EOL>\"<STR_LIT>\" % mapchete_file,<EOL>file=verbose_dst<EOL>)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>with mapchete.open(<EOL>mapchete_file,<EOL>mode=mode,<EOL>zoom=zoom,<EOL>bounds=bounds_from_opts(<EOL>wkt_geometry=wkt_geometry,<EOL>point=point,<EOL>bounds=bounds,<EOL>raw_conf=raw_conf(mapchete_file)<EOL>),<EOL>single_input_file=input_file<EOL>) as mp:<EOL><INDENT>spinner.stop()<EOL>tiles_count = mp.count_tiles(<EOL>min(mp.config.init_zoom_levels),<EOL>max(mp.config.init_zoom_levels)<EOL>)<EOL>tqdm.tqdm.write(<EOL>\"<STR_LIT>\" % (tiles_count, multi),<EOL>file=verbose_dst<EOL>)<EOL>for process_info in tqdm.tqdm(<EOL>mp.batch_processor(<EOL>multi=multi, zoom=zoom, max_chunksize=max_chunksize<EOL>),<EOL>total=tiles_count,<EOL>unit=\"<STR_LIT>\",<EOL>disable=debug or no_pbar<EOL>):<EOL><INDENT>utils.write_verbose_msg(process_info, dst=verbose_dst)<EOL><DEDENT>tqdm.tqdm.write(<EOL>\"<STR_LIT>\" % mapchete_file, file=verbose_dst<EOL>)<EOL>if vrt:<EOL><INDENT>tqdm.tqdm.write(\"<STR_LIT>\", file=verbose_dst)<EOL>for tile in tqdm.tqdm(<EOL>zoom_index_gen(<EOL>mp=mp,<EOL>zoom=mp.config.init_zoom_levels,<EOL>out_dir=(<EOL>idx_out_dir if idx_out_dir<EOL>else mp.config.output.path<EOL>),<EOL>vrt=vrt<EOL>),<EOL>total=mp.count_tiles(<EOL>min(mp.config.init_zoom_levels),<EOL>max(mp.config.init_zoom_levels)<EOL>),<EOL>unit=\"<STR_LIT>\",<EOL>disable=debug or no_pbar<EOL>):<EOL><INDENT>logger.debug(\"<STR_LIT>\", tile)<EOL><DEDENT>tqdm.tqdm.write(<EOL>\"<STR_LIT>\" % mapchete_file,<EOL>file=verbose_dst<EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Execute a Mapchete process.", "id": "f12834:m0"}
{"signature": "def _get_zoom(zoom, input_raster, pyramid_type):", "body": "if not zoom:<EOL><INDENT>minzoom = <NUM_LIT:1><EOL>maxzoom = get_best_zoom_level(input_raster, pyramid_type)<EOL><DEDENT>elif len(zoom) == <NUM_LIT:1>:<EOL><INDENT>minzoom = zoom[<NUM_LIT:0>]<EOL>maxzoom = zoom[<NUM_LIT:0>]<EOL><DEDENT>elif len(zoom) == <NUM_LIT:2>:<EOL><INDENT>if zoom[<NUM_LIT:0>] < zoom[<NUM_LIT:1>]:<EOL><INDENT>minzoom = zoom[<NUM_LIT:0>]<EOL>maxzoom = zoom[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>minzoom = zoom[<NUM_LIT:1>]<EOL>maxzoom = zoom[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return minzoom, maxzoom<EOL>", "docstring": "Determine minimum and maximum zoomlevel.", "id": "f12838:m2"}
{"signature": "def wgs84_distance(lat1, lon1, lat2, lon2):", "body": "dLat = math.radians(lat2 - lat1)<EOL>dLon = math.radians(lon2 - lon1)<EOL>a = (math.sin(dLat / <NUM_LIT:2>) * math.sin(dLat / <NUM_LIT:2>) +<EOL>math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *<EOL>math.sin(dLon / <NUM_LIT:2>) * math.sin(dLon / <NUM_LIT:2>))<EOL>c = <NUM_LIT:2> * math.atan2(math.sqrt(a), math.sqrt(<NUM_LIT:1> - a))<EOL>d = EARTH_RADIUS * c<EOL>return d<EOL>", "docstring": "Distance (in meters) between two points in WGS84 coord system.", "id": "f12841:m1"}
{"signature": "def draw_net_using_node_coords(net):", "body": "import matplotlib.pyplot as plt<EOL>fig = plt.figure()<EOL>node_coords = {}<EOL>for node, data in net.nodes(data=True):<EOL><INDENT>node_coords[node] = (data['<STR_LIT>'], data['<STR_LIT>'])<EOL><DEDENT>ax = fig.add_subplot(<NUM_LIT>)<EOL>networkx.draw(net, pos=node_coords, ax=ax, node_size=<NUM_LIT:50>)<EOL>return fig<EOL>", "docstring": "Plot a networkx.Graph by using the lat and lon attributes of nodes.\nParameters\n----------\nnet : networkx.Graph\nReturns\n-------\nfig : matplotlib.figure\n    the figure object where the network is plotted", "id": "f12841:m16"}
{"signature": "def difference_of_pandas_dfs(df_self, df_other, col_names=None):", "body": "df = pd.concat([df_self, df_other])<EOL>df = df.reset_index(drop=True)<EOL>df_gpby = df.groupby(col_names)<EOL>idx = [x[<NUM_LIT:0>] for x in list(df_gpby.groups.values()) if len(x) == <NUM_LIT:1>]<EOL>df_sym_diff = df.reindex(idx)<EOL>df_diff = pd.concat([df_other, df_sym_diff])<EOL>df_diff = df_diff.reset_index(drop=True)<EOL>df_gpby = df_diff.groupby(col_names)<EOL>idx = [x[<NUM_LIT:0>] for x in list(df_gpby.groups.values()) if len(x) == <NUM_LIT:2>]<EOL>df_diff = df_diff.reindex(idx)<EOL>return df_diff<EOL>", "docstring": "Returns a dataframe with all of df_other that are not in df_self, when considering the columns specified in col_names\n:param df_self: pandas Dataframe\n:param df_other: pandas Dataframe\n:param col_names: list of column names\n:return:", "id": "f12841:m18"}
{"signature": "def str_time_to_day_seconds(time):", "body": "t = str(time).split('<STR_LIT::>')<EOL>seconds = int(t[<NUM_LIT:0>]) * <NUM_LIT> + int(t[<NUM_LIT:1>]) * <NUM_LIT> + int(t[<NUM_LIT:2>])<EOL>return seconds<EOL>", "docstring": "Converts time strings to integer seconds\n:param time: %H:%M:%S string\n:return: integer seconds", "id": "f12841:m8"}
{"signature": "@contextlib.contextmanager<EOL>def create_file(fname=None, fname_tmp=None, tmpdir=None,<EOL>save_tmpfile=False, keepext=False):", "body": "<EOL>if fname == '<STR_LIT>':<EOL><INDENT>yield fname<EOL>return<EOL><DEDENT>if fname_tmp is None:<EOL><INDENT>basename = os.path.basename(fname)<EOL>root, ext = os.path.splitext(basename)<EOL>dir_ = this_dir = os.path.dirname(fname)<EOL>if not keepext:<EOL><INDENT>root = root + ext<EOL>ext = '<STR_LIT>'<EOL><DEDENT>if tmpdir:<EOL><INDENT>if tmpdir is True:<EOL><INDENT>for dir__ in possible_tmpdirs:<EOL><INDENT>if os.access(dir__, os.F_OK):<EOL><INDENT>dir_ = dir__<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>tmpfile = tempfile.NamedTemporaryFile(<EOL>prefix='<STR_LIT>' + root + '<STR_LIT:->', suffix=ext, dir=dir_, delete=False)<EOL>fname_tmp = tmpfile.name<EOL><DEDENT>try:<EOL><INDENT>yield fname_tmp<EOL><DEDENT>except Exception as e:<EOL><INDENT>if save_tmpfile:<EOL><INDENT>print(\"<STR_LIT>\" % fname_tmp)<EOL><DEDENT>else:<EOL><INDENT>os.unlink(fname_tmp)<EOL><DEDENT>raise<EOL><DEDENT>try:<EOL><INDENT>os.rename(fname_tmp, fname)<EOL>os.chmod(fname, <NUM_LIT> & ~current_umask)<EOL><DEDENT>except OSError as e:<EOL><INDENT>tmpfile2 = tempfile.NamedTemporaryFile(<EOL>prefix='<STR_LIT>' + root + '<STR_LIT:->', suffix=ext, dir=this_dir, delete=False)<EOL>shutil.copy(fname_tmp, tmpfile2.name)<EOL>os.rename(tmpfile2.name, fname)<EOL>os.chmod(fname, <NUM_LIT> & ~current_umask)<EOL>os.unlink(fname_tmp)<EOL><DEDENT>", "docstring": "Context manager for making files with possibility of failure.\n\n    If you are creating a file, it is possible that the code will fail\n    and leave a corrupt intermediate file.  This is especially damaging\n    if this is used as automatic input to another process.  This context\n    manager helps by creating a temporary filename, your code runs and\n    creates that temporary file, and then if no exceptions are raised,\n    the context manager will move the temporary file to the original\n    filename you intended to open.\n\n    Parameters\n    ----------\n    fname : str\n        Target filename, this file will be created if all goes well\n    fname_tmp : str\n        If given, this is used as the temporary filename.\n    tmpdir : str or bool\n        If given, put temporary files in this directory.  If `True`,\n        then find a good tmpdir that is not on local filesystem.\n    save_tmpfile : bool\n        If true, the temporary file is not deleteted if an exception\n        is raised.\n    keepext : bool, default False\n            If true, have tmpfile have same extension as final file.\n\n    Returns (as context manager value)\n    ----------------------------------\n     fname_tmp: str\n        Temporary filename to be used.  Same as `fname_tmp`\n        if given as an argument.\n\n    Raises\n    ------\n    Re-raises any except occuring during the context block.", "id": "f12841:m4"}
{"signature": "def set_process_timezone(TZ):", "body": "try:<EOL><INDENT>prev_timezone = os.environ['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>prev_timezone = None<EOL><DEDENT>os.environ['<STR_LIT>'] = TZ<EOL>time.tzset()  <EOL>return prev_timezone<EOL>", "docstring": "Parameters\n----------\nTZ: string", "id": "f12841:m0"}
{"signature": "def makedirs(path):", "body": "if not os.path.isdir(path):<EOL><INDENT>os.makedirs(path)<EOL><DEDENT>return path<EOL>", "docstring": "Create directories if they do not exist, otherwise do nothing.\n\nReturn path for convenience", "id": "f12841:m10"}
{"signature": "def get_segments(self):", "body": "cur = self._gtfs.get_cursor()<EOL>cur.execute('''<STR_LIT>''')<EOL>", "docstring": "Get segment\n\nReturns\n-------\nsegments: list[Segment]", "id": "f12842:c0:m1"}
{"signature": "def calculate_trip_shape_breakpoints(conn):", "body": "from gtfspy import shapes<EOL>cur = conn.cursor()<EOL>breakpoints_cache = {}<EOL>count_bad_shape_ordering = <NUM_LIT:0><EOL>count_bad_shape_fit = <NUM_LIT:0><EOL>count_no_shape_fit = <NUM_LIT:0><EOL>trip_Is = [x[<NUM_LIT:0>] for x in<EOL>cur.execute('<STR_LIT>').fetchall()]<EOL>for trip_I in trip_Is:<EOL><INDENT>row = cur.execute('''<STR_LIT>''', (trip_I,)).fetchone()<EOL>if row is None:<EOL><INDENT>continue<EOL><DEDENT>shape_id = row[<NUM_LIT:0>]<EOL>if shape_id is None or shape_id == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>cur.execute('''<STR_LIT>''',<EOL>(trip_I,))<EOL>stop_points = [dict(seq=row[<NUM_LIT:0>],<EOL>lat=row[<NUM_LIT:1>],<EOL>lon=row[<NUM_LIT:2>],<EOL>stop_I=row[<NUM_LIT:3>])<EOL>for row in cur if row[<NUM_LIT:1>] and row[<NUM_LIT:2>]]<EOL>cache_key = (shape_id, tuple(x['<STR_LIT>'] for x in stop_points))<EOL>if cache_key in breakpoints_cache:<EOL><INDENT>breakpoints = breakpoints_cache[cache_key]<EOL><DEDENT>else:<EOL><INDENT>shape_points = shapes.get_shape_points(cur, shape_id)<EOL>breakpoints, badness= shapes.find_segments(stop_points, shape_points)<EOL>if breakpoints != sorted(breakpoints):<EOL><INDENT>count_bad_shape_ordering += <NUM_LIT:1><EOL>breakpoints_cache[cache_key] = None<EOL>continue  <EOL><DEDENT>breakpoints_cache[cache_key] = breakpoints<EOL>if badness > <NUM_LIT:30> * len(breakpoints):<EOL><INDENT>count_bad_shape_fit += <NUM_LIT:1><EOL><DEDENT><DEDENT>if breakpoints is None:<EOL><INDENT>continue<EOL><DEDENT>if len(breakpoints) == <NUM_LIT:0>:<EOL><INDENT>count_no_shape_fit += <NUM_LIT:1><EOL>continue<EOL><DEDENT>assert len(breakpoints) == len(stop_points)<EOL>cur.executemany('<STR_LIT>'<EOL>'<STR_LIT>',<EOL>((int(bkpt), int(trip_I), int(stpt['<STR_LIT>']))<EOL>for bkpt, stpt in zip(breakpoints, stop_points)))<EOL><DEDENT>if count_bad_shape_fit > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" % count_bad_shape_fit)<EOL><DEDENT>if count_bad_shape_ordering > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" % count_bad_shape_ordering)<EOL><DEDENT>if count_no_shape_fit > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" % count_no_shape_fit)<EOL><DEDENT>conn.commit()<EOL>", "docstring": "Pre-compute the shape points corresponding to each trip's stop.\n\n    Depends: shapes", "id": "f12850:m1"}
{"signature": "def __init__(self, gtfssource=None, print_progress=True):", "body": "if isinstance(gtfssource, string_types + (dict,)):<EOL><INDENT>_gtfs_sources = [gtfssource]<EOL><DEDENT>else:<EOL><INDENT>assert isinstance(gtfssource, list)<EOL>_gtfs_sources = gtfssource<EOL><DEDENT>self.print_progress = print_progress<EOL>self.gtfs_sources = []<EOL>for source in _gtfs_sources:<EOL><INDENT>if isinstance(source, dict):<EOL><INDENT>self.gtfs_sources.append(source)<EOL><DEDENT>elif isinstance(source, string_types):<EOL><INDENT>if os.path.isdir(source):<EOL><INDENT>self.gtfs_sources.append(source)<EOL><DEDENT>else:<EOL><INDENT>z = zipfile.ZipFile(source, mode='<STR_LIT:r>')<EOL>zip_commonprefix = os.path.commonprefix(z.namelist())<EOL>zip_source_datum = {<EOL>\"<STR_LIT>\": source,<EOL>\"<STR_LIT>\": zip_commonprefix<EOL>}<EOL>self.gtfs_sources.append(zip_source_datum)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Parameters\n----------\ngtfssource: str, dict, list\n    str: path to GTFS directory or zipfile\n    dict:\n        dictionary of files to use to as the GTFS files.  This\n        is mainly useful for testing, not for any normal use of\n        GTFS.  For example, to provide an agency.txt file,\n        do this:\n            d = {'agency.txt':\n                     'agency_id, agency_name, agency_timezone,agency_url\\n' \\\n                     '555,CompNet,Europe/Lala,http://x'\n                  }\n        Of course you probably wouldn't want all the string data\n        inline like that. You can provide the data as a string or\n        a file-like object (with a .read() attribute and line\n        iteration).\n    list: a list of the above elements to import (i.e. \"merge\") multiple GTFS feeds to the same database\n\nprint_progress: boolean\n    whether to print progress of the", "id": "f12857:c0:m0"}
{"signature": "def import_(self, conn):", "body": "if self.print_progress:<EOL><INDENT>print('<STR_LIT>', self.__class__.__name__)<EOL><DEDENT>self._conn = conn<EOL>self.create_table(conn)<EOL>if self.mode in ('<STR_LIT:all>', '<STR_LIT>') and self.fname and self.exists() and self.table not in ignore_tables:<EOL><INDENT>self.insert_data(conn)<EOL><DEDENT>if self.mode in ('<STR_LIT:all>', '<STR_LIT:index>') and hasattr(self, '<STR_LIT:index>'):<EOL><INDENT>self.create_index(conn)<EOL><DEDENT>if self.mode in ('<STR_LIT:all>', '<STR_LIT>') and hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.run_post_import(conn)<EOL><DEDENT>conn.commit()<EOL>", "docstring": "Do the actual import. Copy data and store in connection object.\n\n        This function:\n        - Creates the tables\n        - Imports data (using self.gen_rows)\n        - Run any post_import hooks.\n        - Creates any indexs\n        - Does *not* run self.make_views - those must be done\n          after all tables are loaded.", "id": "f12857:c0:m11"}
{"signature": "def gen_rows0(self):", "body": "return self.gen_rows(*(self._get_csv_reader_generators()))<EOL>", "docstring": "Iterate through all rows in all files file.\n\n        The file is specified by the class - there is one class per\n        table.  This opens the file, does basic sanitaition, and\n        iterates over all rows as dictionaries, converted by\n        csv.DictReader.  This function opens files from both .zip and\n        raw directories.  The actual logic of converting all data to\n        Python is done in the .gen_rows() method which must be\n        defined in each subclass.", "id": "f12857:c0:m4"}
{"signature": "def get_trip_points(cur, route_id, offset=<NUM_LIT:0>, tripid_glob='<STR_LIT>'):", "body": "extra_where = '<STR_LIT>'<EOL>if tripid_glob:<EOL><INDENT>extra_where = \"<STR_LIT>\" % tripid_glob<EOL><DEDENT>cur.execute('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % extra_where, (route_id, offset))<EOL>stop_points = [dict(seq=row[<NUM_LIT:0>], lat=row[<NUM_LIT:1>], lon=row[<NUM_LIT:2>]) for row in cur]<EOL>return stop_points<EOL>", "docstring": "Get all scheduled stops on a particular route_id.\n\n    Given a route_id, return the trip-stop-list with\n    latitude/longitudes.  This is a bit more tricky than it seems,\n    because we have to go from table route->trips->stop_times.  This\n    functions finds an arbitrary trip (in trip table) with this route ID\n    and, and then returns all stop points for that trip.\n\n    Parameters\n    ----------\n    cur : sqlite3.Cursor\n        cursor to sqlite3 DB containing GTFS\n    route_id : string or any\n        route_id to get stop points of\n    offset : int\n        LIMIT offset if you don't want the first trip returned.\n    tripid_glob : string\n        If given, allows you to limit tripids which can be selected.\n        Mainly useful in debugging.\n\n    Returns\n    -------\n    stop-list\n        List of stops in stop-seq format.", "id": "f12861:m9"}
{"signature": "def print_coords(rows, prefix='<STR_LIT>'):", "body": "lat = [row['<STR_LIT>'] for row in rows]<EOL>lon = [row['<STR_LIT>'] for row in rows]<EOL>print('<STR_LIT>'+'<STR_LIT:->' * <NUM_LIT:5>)<EOL>print(\"<STR_LIT>\" % (prefix, prefix, lat, lon))<EOL>print('<STR_LIT:->'*<NUM_LIT:5>)<EOL>", "docstring": "Print coordinates within a sequence.\n\n    This is only used for debugging.  Printed in a form that can be\n    pasted into Python for visualization.", "id": "f12861:m0"}
{"signature": "def get_shape_points(cur, shape_id):", "body": "cur.execute('''<STR_LIT>''', (shape_id,))<EOL>shape_points = [dict(seq=row[<NUM_LIT:0>], lat=row[<NUM_LIT:1>], lon=row[<NUM_LIT:2>], d=row[<NUM_LIT:3>])<EOL>for row in cur]<EOL>return shape_points<EOL>", "docstring": "Given a shape_id, return its shape-sequence.\n\nParameters\n----------\ncur: sqlite3.Cursor\n    cursor to a GTFS database\nshape_id: str\n    id of the route\n\nReturns\n-------\nshape_points: list\n    elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape", "id": "f12861:m5"}
{"signature": "def return_segments(shape, break_points):", "body": "<EOL>segs = []<EOL>bp = <NUM_LIT:0> <EOL>bp2 = <NUM_LIT:0><EOL>for i in range(len(break_points)-<NUM_LIT:1>):<EOL><INDENT>bp = break_points[i] if break_points[i] is not None else bp2<EOL>bp2 = break_points[i+<NUM_LIT:1>] if break_points[i+<NUM_LIT:1>] is not None else bp<EOL>segs.append(shape[bp:bp2+<NUM_LIT:1>])<EOL><DEDENT>segs.append([])<EOL>return segs<EOL>", "docstring": "Break a shape into segments between stops using break_points.\n\n    This function can use the `break_points` outputs from\n    `find_segments`, and cuts the shape-sequence into pieces\n    corresponding to each stop.", "id": "f12861:m3"}
{"signature": "def find_best_segments(cur, stops, shape_ids, route_id=None,<EOL>breakpoints_cache=None):", "body": "cache_key = None<EOL>if breakpoints_cache is not None:<EOL><INDENT>cache_key = (route_id, tuple(x['<STR_LIT>'] for x in stops))<EOL>if cache_key in breakpoints_cache:<EOL><INDENT>print('<STR_LIT>')<EOL>return breakpoints_cache[cache_key]<EOL><DEDENT><DEDENT>if route_id is not None:<EOL><INDENT>cur.execute('''<STR_LIT>''',<EOL>(route_id,))<EOL>data = cur.fetchall()<EOL>if not data:<EOL><INDENT>print(\"<STR_LIT>\" % route_id)<EOL>return [], None, None, None<EOL><DEDENT>shape_ids = zip(*data)[<NUM_LIT:0>]<EOL><DEDENT>results = []<EOL>for shape_id in shape_ids:<EOL><INDENT>shape = get_shape_points(cur, shape_id)<EOL>breakpoints, badness = find_segments(stops, shape)<EOL>results.append([badness, breakpoints, shape, shape_id])<EOL>if len(stops) > <NUM_LIT:5> and badness < <NUM_LIT:5>*(len(stops)):<EOL><INDENT>break<EOL><DEDENT><DEDENT>best = np.argmin(zip(*results)[<NUM_LIT:0>])<EOL>badness = results[best][<NUM_LIT:0>]<EOL>breakpoints = results[best][<NUM_LIT:1>]<EOL>shape = results[best][<NUM_LIT:2>]<EOL>shape_id = results[best][<NUM_LIT:3>]<EOL>if breakpoints_cache is not None:<EOL><INDENT>print(\"<STR_LIT>\", cache_key[<NUM_LIT:0>], hash(cache_key[<NUM_LIT:1>:]))<EOL>breakpoints_cache[cache_key] = breakpoints, badness, shape, shape_id<EOL><DEDENT>return breakpoints, badness, shape, shape_id<EOL>", "docstring": "Finds the best shape_id for a stop-sequence.\n\n    This is used in cases like when you have GPS data with a route\n    name, but you don't know the route direction.  It tries shapes\n    going both directions and returns the shape that best matches.\n    Could be used in other cases as well.\n\n    Parameters\n    ----------\n    cur : sqlite3.Cursor\n        database cursor\n    stops : list\n    shape_ids : list of shape_id:s\n    route_id : route_id to search for stops\n    breakpoints_cache : dict\n        If given, use this to cache results from this function.", "id": "f12861:m2"}
{"signature": "def get_shape_points2(cur, shape_id):", "body": "cur.execute('''<STR_LIT>''', (shape_id,))<EOL>shape_points = {'<STR_LIT>': [], '<STR_LIT>':  [], '<STR_LIT>': [], '<STR_LIT:d>': []}<EOL>for row in cur:<EOL><INDENT>shape_points['<STR_LIT>'].append(row[<NUM_LIT:0>])<EOL>shape_points['<STR_LIT>'].append(row[<NUM_LIT:1>])<EOL>shape_points['<STR_LIT>'].append(row[<NUM_LIT:2>])<EOL>shape_points['<STR_LIT:d>'].append(row[<NUM_LIT:3>])<EOL><DEDENT>return shape_points<EOL>", "docstring": "Given a shape_id, return its shape-sequence (as a dict of lists).\nget_shape_points function returns them as a list of dicts\n\nParameters\n----------\ncur: sqlite3.Cursor\n    cursor to a GTFS database\nshape_id: str\n    id of the route\n\nReturns\n-------\nshape_points: dict of lists\n    dict contains keys 'seq', 'lat', 'lon', and 'd'(istance) of the shape", "id": "f12861:m6"}
{"signature": "def get_convex_hull_coordinates(gtfs):", "body": "lons, lats = _get_stop_lat_lons(gtfs)<EOL>lon_lats = list(zip(lons, lats))<EOL>polygon = MultiPoint(lon_lats).convex_hull<EOL>hull_lons, hull_lats= polygon.exterior.coords.xy<EOL>return hull_lats, hull_lons<EOL>", "docstring": "Parameters\n----------\ngtfs: gtfs.GTFS\n\nReturns\n-------\nlons: list\n    of floats\nlats: list\n    of floats", "id": "f12863:m0"}
{"signature": "def __init__(self, gtfs, start_time_ut, lat, lon, max_duration_ut, min_transfer_time=<NUM_LIT:30>,<EOL>shapes=True, walk_speed=<NUM_LIT:0.5>):", "body": "self.gtfs = gtfs<EOL>self.start_time_ut = start_time_ut<EOL>self.lat = lat<EOL>self.lon = lon<EOL>self.max_duration_ut = max_duration_ut<EOL>self.min_transfer_time = min_transfer_time<EOL>self.shapes = shapes<EOL>self.event_heap = None<EOL>self.walk_speed = walk_speed<EOL>self._uninfected_stops = None<EOL>self._stop_I_to_spreading_stop = None<EOL>self._initialized = False<EOL>self._has_run = False<EOL>", "docstring": "Parameters\n----------\ngtfs: GTFS\n    the underlying GTFS (database) connection for getting data\nstart_time_ut: number\n    Start time of the spreading.\nlat: float\n    latitude of the spreading seed location\nlon: float\n    longitude of the spreading seed location\nmax_duration_ut: int\n    maximum duration of the spreading process (in seconds)\nmin_transfer_time : int\n    minimum transfer time in seconds\nshapes : bool\n    whether to include shapes", "id": "f12864:c0:m0"}
{"signature": "def get_min_visit_time(self):", "body": "if not self.visit_events:<EOL><INDENT>return float('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return min(self.visit_events, key=lambda event: event.arr_time_ut).arr_time_ut<EOL><DEDENT>", "docstring": "Get the earliest visit time of the stop.", "id": "f12865:c0:m1"}
{"signature": "def __init__(self, pd_df=None):", "body": "self.heap = []<EOL>keys = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>n = len(pd_df)<EOL>key_to_j = {}<EOL>for j, key in enumerate(pd_df.columns.values):<EOL><INDENT>key_to_j[key] = j<EOL><DEDENT>pd_df_values = pd_df.values<EOL>for i in range(n):<EOL><INDENT>vals = []<EOL>for key in keys:<EOL><INDENT>j = key_to_j[key]<EOL>vals.append(pd_df_values[i, j])<EOL><DEDENT>e = Event(*vals)<EOL>self.add_event(e)<EOL><DEDENT>", "docstring": "Parameters\n----------\npd_df : Pandas.Dataframe\n    Initial list of", "id": "f12866:c0:m0"}
{"signature": "def timeit(method):", "body": "def timed(*args, **kw):<EOL><INDENT>time_start = time.time()<EOL>result = method(*args, **kw)<EOL>time_end = time.time()<EOL>print('<STR_LIT>' % (method.__name__, time_end-time_start, str(args)[:<NUM_LIT:20>], kw))<EOL>return result<EOL><DEDENT>return timed<EOL>", "docstring": "A Python decorator for printing out the execution time for a function.\n\nAdapted from:\nwww.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods", "id": "f12868:m0"}
{"signature": "def median_temporal_distances(self, min_n_boardings=None, max_n_boardings=None):", "body": "if min_n_boardings is None:<EOL><INDENT>min_n_boardings = <NUM_LIT:0><EOL><DEDENT>if max_n_boardings is None:<EOL><INDENT>max_n_boardings = self.max_trip_n_boardings()<EOL>if max_n_boardings is None:<EOL><INDENT>max_n_boardings = <NUM_LIT:0><EOL><DEDENT><DEDENT>median_temporal_distances = [float('<STR_LIT>') for _ in range(min_n_boardings, max_n_boardings + <NUM_LIT:1>)]<EOL>for n_boardings in range(min_n_boardings, max_n_boardings + <NUM_LIT:1>):<EOL><INDENT>simple_analyzer = self.get_time_profile_analyzer(n_boardings)<EOL>median_temporal_distances[n_boardings] = simple_analyzer.median_temporal_distance()<EOL><DEDENT>return median_temporal_distances<EOL>", "docstring": "Returns\n-------\nmean_temporal_distances: list\n    list indices encode the number of vehicle legs each element\n    in the list tells gets the mean temporal distance", "id": "f12871:c0:m26"}
{"signature": "def __init__(self, labels, walk_to_target_duration, start_time_dep, end_time_dep):", "body": "self._node_profile_final_labels = labels<EOL>self.start_time_dep = start_time_dep<EOL>self.end_time_dep = end_time_dep<EOL>self.all_labels = [label for label in self._node_profile_final_labels if<EOL>(start_time_dep <= label.departure_time <= end_time_dep)]<EOL>after_label_candidates = [label for label in self._node_profile_final_labels if<EOL>(label.departure_time > self.end_time_dep)]<EOL>after_label_candidates.sort(key=lambda el: (el.arrival_time_target, el.n_boardings))<EOL>min_n_boardings_observed = float('<STR_LIT>')<EOL>after_labels = []<EOL>for candidate_after_label in after_label_candidates:<EOL><INDENT>if candidate_after_label.n_boardings < min_n_boardings_observed:<EOL><INDENT>after_labels.append(candidate_after_label)<EOL>min_n_boardings_observed = candidate_after_label.n_boardings<EOL><DEDENT><DEDENT>self.all_labels.extend(after_labels)<EOL>if len(after_labels) is <NUM_LIT:0>:<EOL><INDENT>self._labels_within_time_frame = self.all_labels<EOL><DEDENT>else:<EOL><INDENT>self._labels_within_time_frame = self.all_labels[:-len(after_labels)]<EOL><DEDENT>self._walk_to_target_duration = walk_to_target_duration<EOL>self._n_boardings_to_simple_time_analyzers = {}<EOL>self._transfers_on_fastest_paths_analyzer = self._get_transfers_on_fastest_path_analyzer()<EOL>", "docstring": "Initialize the data structures required by\n\nParameters\n----------\nnode_profile: NodeProfileMultiObjective", "id": "f12871:c0:m1"}
{"signature": "def n_pareto_optimal_trips(self):", "body": "return float(len(self._labels_within_time_frame))<EOL>", "docstring": "Get number of pareto-optimal trips\n\nReturns\n-------\nn_trips: float", "id": "f12871:c0:m36"}
{"signature": "def _truncate_colormap(cmap, minval=<NUM_LIT:0.0>, maxval=<NUM_LIT:1.0>, n=<NUM_LIT:100>):", "body": "new_cmap = LinearSegmentedColormap.from_list(<EOL>'<STR_LIT>'.format(n=cmap.name, a=minval, b=maxval),<EOL>cmap(numpy.linspace(minval, maxval, n))<EOL>)<EOL>return new_cmap<EOL>", "docstring": "Truncates a colormap to use.\nCode originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib", "id": "f12871:m2"}
{"signature": "def read_data_as_dataframe(self,<EOL>travel_impedance_measure,<EOL>from_stop_I=None,<EOL>to_stop_I=None,<EOL>statistic=None):", "body": "to_select = []<EOL>where_clauses = []<EOL>to_select.append(\"<STR_LIT>\")<EOL>to_select.append(\"<STR_LIT>\")<EOL>if from_stop_I is not None:<EOL><INDENT>where_clauses.append(\"<STR_LIT>\" + str(int(from_stop_I)))<EOL><DEDENT>if to_stop_I is not None:<EOL><INDENT>where_clauses.append(\"<STR_LIT>\" + str(int(to_stop_I)))<EOL><DEDENT>where_clause = \"<STR_LIT>\"<EOL>if len(where_clauses) > <NUM_LIT:0>:<EOL><INDENT>where_clause = \"<STR_LIT>\" + \"<STR_LIT>\".join(where_clauses)<EOL><DEDENT>if not statistic:<EOL><INDENT>to_select.extend([\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>to_select.append(statistic)<EOL><DEDENT>to_select_clause = \"<STR_LIT:U+002C>\".join(to_select)<EOL>if not to_select_clause:<EOL><INDENT>to_select_clause = \"<STR_LIT:*>\"<EOL><DEDENT>sql = \"<STR_LIT>\" + to_select_clause + \"<STR_LIT>\" + travel_impedance_measure + where_clause + \"<STR_LIT:;>\"<EOL>df = pd.read_sql(sql, self.conn)<EOL>return df<EOL>", "docstring": "Recover pre-computed travel_impedance between od-pairs from the database.\n\nReturns\n-------\nvalues: number | Pandas DataFrame", "id": "f12874:c0:m1"}
{"signature": "def get_arrival_times(self):", "body": "assert self._has_run<EOL>return self.__stop_labels<EOL>", "docstring": "Returns\n-------\narrival_times: dict[int, float]\n    maps integer stop_ids to floats", "id": "f12875:c0:m1"}
{"signature": "def get_walk_to_target_duration(self):", "body": "return self._walk_to_target_duration<EOL>", "docstring": "Get walking distance to target node.\n\nReturns\n-------\nwalk_to_target_duration: float", "id": "f12877:c0:m2"}
{"signature": "def __init__(self,<EOL>dep_times=None,<EOL>walk_to_target_duration=float('<STR_LIT>'),<EOL>label_class=LabelTimeWithBoardingsCount,<EOL>transit_connection_dep_times=None,<EOL>closest_target=None,<EOL>node_id=None):", "body": "if dep_times is None:<EOL><INDENT>dep_times = []<EOL><DEDENT>n_dep_times = len(dep_times)<EOL>assert n_dep_times == len(set(dep_times)), \"<STR_LIT>\"<EOL>self._departure_times = list(reversed(sorted(dep_times)))<EOL>self.dep_times_to_index = dict(zip(self._departure_times, range(len(self._departure_times))))<EOL>self._label_bags = [[]] * len(self._departure_times)<EOL>self._walk_to_target_duration = walk_to_target_duration<EOL>self._min_dep_time = float('<STR_LIT>')<EOL>self.label_class = label_class<EOL>self.closest_target = closest_target<EOL>if self.label_class == LabelTimeBoardingsAndRoute and self._walk_to_target_duration < float('<STR_LIT>'):<EOL><INDENT>assert (self.closest_target is not None)<EOL><DEDENT>if transit_connection_dep_times is not None:<EOL><INDENT>self._connection_dep_times = transit_connection_dep_times<EOL><DEDENT>else:<EOL><INDENT>self._connection_dep_times = dep_times<EOL><DEDENT>assert (isinstance(self._connection_dep_times, (list, numpy.ndarray)))<EOL>self._closed = False<EOL>self._finalized = False<EOL>self._final_pareto_optimal_labels = None<EOL>self._real_connection_labels = None<EOL>self.node_id = node_id<EOL>", "docstring": "Parameters\n----------\ndep_times\nwalk_to_target_duration\nlabel_class: label class to be used\ntransit_connection_dep_times:\n    if not given, all connections are assumed to be real connections\nclosest_target: int, optional\n    stop_I of the closest target if within walking distance (and Routes are recorded)", "id": "f12877:c0:m0"}
{"signature": "def largest_finite_distance(self):", "body": "block_start_distances = [block.distance_start for block in self._profile_blocks if<EOL>block.distance_start < float('<STR_LIT>')]<EOL>block_end_distances = [block.distance_end for block in self._profile_blocks if<EOL>block.distance_end < float('<STR_LIT>')]<EOL>distances = block_start_distances + block_end_distances<EOL>if len(distances) > <NUM_LIT:0>:<EOL><INDENT>return max(distances)<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Compute the maximum temporal distance.\n\nReturns\n-------\nmax_temporal_distance : float", "id": "f12878:c0:m6"}
{"signature": "def get_journey_time_per_mode(self, modes=None):", "body": "pass<EOL>", "docstring": ":param modes: return these\n:return:", "id": "f12892:c0:m22"}
{"signature": "def get_upstream_stops_ratio(self, target, trough_stops, ratio):", "body": "if isinstance(trough_stops, list):<EOL><INDENT>trough_stops = \"<STR_LIT:U+002C>\".join(trough_stops)<EOL><DEDENT>query = \"\"\"<STR_LIT>\"\"\".format(target=target, trough_stops=trough_stops, ratio=ratio)<EOL>df = read_sql_query(query, self.conn)<EOL>return df<EOL>", "docstring": "Selects the stops for which the ratio or higher proportion of trips to the target passes trough a set of trough stops\n:param target: target of trips\n:param trough_stops: stops where the selected trips are passing trough\n:param ratio: threshold for inclusion\n:return:", "id": "f12892:c0:m10"}
{"signature": "def passing_journeys_per_stop(self):", "body": "pass<EOL>", "docstring": ":return:", "id": "f12892:c0:m11"}
{"signature": "def _insert_journeys_into_db_no_route(self, stop_profiles, target_stop=None):<EOL>", "body": "print(\"<STR_LIT>\")<EOL>journey_id = <NUM_LIT:1><EOL>journey_list = []<EOL>tot = len(stop_profiles)<EOL>for i, (origin_stop, labels) in enumerate(stop_profiles.items(), start=<NUM_LIT:1>):<EOL><INDENT>for label in labels:<EOL><INDENT>assert (isinstance(label, LabelTimeWithBoardingsCount))<EOL>if self.multitarget_routing:<EOL><INDENT>target_stop = None<EOL><DEDENT>else:<EOL><INDENT>target_stop = int(target_stop)<EOL><DEDENT>values = [int(journey_id),<EOL>int(origin_stop),<EOL>target_stop,<EOL>int(label.departure_time),<EOL>int(label.arrival_time_target),<EOL>int(label.n_boardings)]<EOL>journey_list.append(values)<EOL>journey_id += <NUM_LIT:1><EOL><DEDENT><DEDENT>print(\"<STR_LIT>\")<EOL>insert_journeys_stmt = '''<STR_LIT>''' % (\"<STR_LIT:U+002CU+0020>\".join([\"<STR_LIT:?>\" for x in range(<NUM_LIT:6>)]))<EOL>self._executemany_exclusive(insert_journeys_stmt, journey_list)<EOL>self.conn.commit()<EOL>", "docstring": "con.isolation_level = 'EXCLUSIVE'\ncon.execute('BEGIN EXCLUSIVE')\n#exclusive access starts here. Nothing else can r/w the db, do your magic here.\ncon.commit()", "id": "f12893:c0:m5"}
{"signature": "def __init__(self, gtfs_path, journey_db_path, routing_params=None, multitarget_routing=False,<EOL>track_vehicle_legs=True, track_route=False):", "body": "self.multitarget_routing = multitarget_routing<EOL>self.track_route = track_route<EOL>self.track_vehicle_legs = track_vehicle_legs<EOL>self.gtfs_path = gtfs_path<EOL>self.gtfs = GTFS(self.gtfs_path)<EOL>self.gtfs_meta = self.gtfs.meta<EOL>self.gtfs._dont_close = True<EOL>self.od_pairs = None<EOL>self._targets = None<EOL>self._origins = None<EOL>self.diff_conn = None<EOL>if not routing_params:<EOL><INDENT>routing_params = dict()<EOL><DEDENT>self.routing_params_input = routing_params<EOL>assert os.path.exists(journey_db_path) or routing_params is not None<EOL>journey_db_pre_exists = os.path.isfile(journey_db_path)<EOL>timeout = <NUM_LIT:1000><EOL>self.conn = sqlite3.connect(journey_db_path, timeout)<EOL>if not journey_db_pre_exists:<EOL><INDENT>self.initialize_database()<EOL><DEDENT>self.routing_parameters = Parameters(self.conn)<EOL>self._assert_journey_computation_paramaters_match()<EOL>self.journey_properties = {\"<STR_LIT>\": (_T_WALK_STR, _T_WALK_STR)}<EOL>if routing_params.get('<STR_LIT>', False) orself.routing_parameters.get('<STR_LIT>', False):<EOL><INDENT>self.journey_properties[\"<STR_LIT>\"] = (float(\"<STR_LIT>\"), <NUM_LIT:0>)<EOL><DEDENT>if self.track_route:<EOL><INDENT>additional_journey_parameters = {<EOL>\"<STR_LIT>\": (float('<STR_LIT>'), <NUM_LIT:0>),<EOL>\"<STR_LIT>\": (float('<STR_LIT>'), <NUM_LIT:0>),<EOL>\"<STR_LIT>\": (_T_WALK_STR, _T_WALK_STR),<EOL>\"<STR_LIT>\": (float('<STR_LIT>'), <NUM_LIT:0>)<EOL>}<EOL>self.journey_properties.update(additional_journey_parameters)<EOL><DEDENT>self.travel_impedance_measure_names = list(self.journey_properties.keys())<EOL>self.travel_impedance_measure_names += [\"<STR_LIT>\"]<EOL>", "docstring": ":param gtfs: GTFS object\n:param list_of_stop_profiles: dict of NodeProfileMultiObjective\n:param multitarget_routing: bool", "id": "f12893:c0:m0"}
{"signature": "def update_pareto_optimal_tuples(self, new_label):", "body": "assert (isinstance(new_label, LabelTime))<EOL>if self._labels:<EOL><INDENT>assert (new_label.departure_time <= self._labels[-<NUM_LIT:1>].departure_time)<EOL>best_later_departing_arrival_time = self._labels[-<NUM_LIT:1>].arrival_time_target<EOL><DEDENT>else:<EOL><INDENT>best_later_departing_arrival_time = float('<STR_LIT>')<EOL><DEDENT>walk_to_target_arrival_time = new_label.departure_time + self._walk_to_target_duration<EOL>best_arrival_time = min(walk_to_target_arrival_time,<EOL>best_later_departing_arrival_time,<EOL>new_label.arrival_time_target)<EOL>if (new_label.arrival_time_target < walk_to_target_arrival_time and<EOL>new_label.arrival_time_target < best_later_departing_arrival_time):<EOL><INDENT>self._labels.append(LabelTime(new_label.departure_time, best_arrival_time))<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Parameters\n----------\nnew_label: LabelTime\n\nReturns\n-------\nupdated: bool", "id": "f12896:c0:m2"}
{"signature": "def evaluate_earliest_arrival_time_at_target(self, dep_time, transfer_margin):", "body": "minimum = dep_time + self._walk_to_target_duration<EOL>for label in self._labels[::-<NUM_LIT:1>]:<EOL><INDENT>if label.departure_time >= dep_time + transfer_margin:<EOL><INDENT>minimum = min(minimum, label.arrival_time_target)<EOL>break<EOL><DEDENT><DEDENT>return float(minimum)<EOL>", "docstring": "Get the earliest arrival time at the target, given a departure time.\n\nParameters\n----------\ndep_time : float, int\n    time in unix seconds\ntransfer_margin: float, int\n    transfer margin in seconds\n\nReturns\n-------\narrival_time : float\n    Arrival time in the given time unit (seconds after unix epoch).", "id": "f12896:c0:m3"}
{"signature": "def __init__(self, legs=None):", "body": "self.legs = []<EOL>self.departure_time = None<EOL>self.arrival_time = None<EOL>self.trip_ids = set()<EOL>self.n_boardings = <NUM_LIT:0><EOL>if legs is not None:<EOL><INDENT>for leg in legs:<EOL><INDENT>self.add_leg(leg)<EOL><DEDENT><DEDENT>", "docstring": "Parameters\n----------\nlegs: list[Connection]", "id": "f12897:c0:m0"}
{"signature": "def get_transfer_stop_pairs(self):", "body": "transfer_stop_pairs = []<EOL>previous_arrival_stop = None<EOL>current_trip_id = None<EOL>for leg in self.legs:<EOL><INDENT>if leg.trip_id is not None and leg.trip_id != current_trip_id and previous_arrival_stop is not None:<EOL><INDENT>transfer_stop_pair = (previous_arrival_stop, leg.departure_stop)<EOL>transfer_stop_pairs.append(transfer_stop_pair)<EOL><DEDENT>previous_arrival_stop = leg.arrival_stop<EOL>current_trip_id = leg.trip_id<EOL><DEDENT>return transfer_stop_pairs<EOL>", "docstring": "Get stop pairs through which transfers take place\n\nReturns\n-------\ntransfer_stop_pairs: list", "id": "f12897:c0:m6"}
{"signature": "def _scan_footpaths_to_departure_stop(self, connection_dep_stop, connection_dep_time, arrival_time_target):", "body": "for _, neighbor, data in self._walk_network.edges_iter(nbunch=[connection_dep_stop],<EOL>data=True):<EOL><INDENT>d_walk = data['<STR_LIT>']<EOL>neighbor_dep_time = connection_dep_time - d_walk / self._walk_speed<EOL>pt = LabelTimeSimple(departure_time=neighbor_dep_time, arrival_time_target=arrival_time_target)<EOL>self._stop_profiles[neighbor].update_pareto_optimal_tuples(pt)<EOL><DEDENT>", "docstring": "A helper method for scanning the footpaths. Updates self._stop_profiles accordingly", "id": "f12898:c0:m2"}
{"signature": "def __init__(self,<EOL>transit_events,<EOL>target_stop,<EOL>start_time=None,<EOL>end_time=None,<EOL>transfer_margin=<NUM_LIT:0>,<EOL>walk_network=None,<EOL>walk_speed=<NUM_LIT>,<EOL>verbose=False):", "body": "AbstractRoutingAlgorithm.__init__(self)<EOL>self._target = target_stop<EOL>self._connections = transit_events<EOL>if start_time is None:<EOL><INDENT>start_time = transit_events[-<NUM_LIT:1>].departure_time<EOL><DEDENT>if end_time is None:<EOL><INDENT>end_time = transit_events[<NUM_LIT:0>].departure_time<EOL><DEDENT>self._start_time = start_time<EOL>self._end_time = end_time<EOL>self._transfer_margin = transfer_margin<EOL>if walk_network is None:<EOL><INDENT>walk_network = networkx.Graph()<EOL><DEDENT>self._walk_network = walk_network<EOL>self._walk_speed = float(walk_speed)<EOL>self._verbose = verbose<EOL>self.__trip_min_arrival_time = defaultdict(lambda: float(\"<STR_LIT>\"))<EOL>self._stop_profiles = defaultdict(lambda: NodeProfileSimple())<EOL>self._stop_profiles[self._target] = NodeProfileSimple(<NUM_LIT:0>)<EOL>if target_stop in walk_network.nodes():<EOL><INDENT>for target_neighbor in walk_network.neighbors(target_stop):<EOL><INDENT>edge_data = walk_network.get_edge_data(target_neighbor, target_stop)<EOL>walk_duration = edge_data[\"<STR_LIT>\"] / self._walk_speed<EOL>self._stop_profiles[target_neighbor] = NodeProfileSimple(walk_duration)<EOL><DEDENT><DEDENT>", "docstring": "Parameters\n----------\ntransit_events: list[Connection]\n    events are assumed to be ordered in DECREASING departure_time (!)\ntarget_stop: int\n    index of the target stop\nstart_time : int, optional\n    start time in unixtime seconds\nend_time: int, optional\n    end time in unixtime seconds (no connections will be scanned after this time)\ntransfer_margin: int, optional\n    required extra margin required for transfers in seconds\nwalk_speed: float, optional\n    walking speed between stops in meters / second.\nwalk_network: networkx.Graph, optional\n    each edge should have the walking distance as a data attribute (\"distance_shape\") expressed in meters\nverbose: boolean, optional\n    whether to print out progress", "id": "f12898:c0:m0"}
{"signature": "@property<EOL><INDENT>def stop_profiles(self):<DEDENT>", "body": "assert self._has_run<EOL>return self._stop_profiles<EOL>", "docstring": "Returns\n-------\n_stop_profiles : dict[int, NodeProfileSimple]\n    The pareto tuples necessary.", "id": "f12898:c0:m3"}
{"signature": "@_if_no_trips_return_inf<EOL><INDENT>def median_trip_duration(self):<DEDENT>", "body": "return numpy.median(self.trip_durations)<EOL>", "docstring": "Get average travel time to destination.\n\nReturns\n-------\nfloat: max_trip_duration\n    float('inf') if no trips take place", "id": "f12899:c0:m6"}
{"signature": "def n_pareto_optimal_trips(self):", "body": "return float(len(self.trip_durations))<EOL>", "docstring": "Get number of pareto-optimal trips\n\nReturns\n-------\nn_trips: float", "id": "f12899:c0:m2"}
{"signature": "@_if_no_trips_return_inf<EOL><INDENT>def max_trip_duration(self):<DEDENT>", "body": "return numpy.max(self.trip_durations)<EOL>", "docstring": "Get minimum travel time to destination.\n\nReturns\n-------\nfloat: max_trip_duration\n    float('inf') if no trips take place", "id": "f12899:c0:m4"}
{"signature": "@_if_no_trips_return_inf<EOL><INDENT>def mean_trip_duration(self):<DEDENT>", "body": "return numpy.mean(self.trip_durations)<EOL>", "docstring": "Get average travel time to destination.\n\nReturns\n-------\nfloat: max_trip_duration\n    float('inf') if no trips take place", "id": "f12899:c0:m5"}
{"signature": "def mean_temporal_distance(self):", "body": "total_width = self.end_time_dep - self.start_time_dep<EOL>total_area = sum([block.area() for block in self._profile_blocks])<EOL>return total_area / total_width<EOL>", "docstring": "Get mean temporal distance (in seconds) to the target.\n\nReturns\n-------\nmean_temporal_distance : float", "id": "f12899:c0:m7"}
{"signature": "def largest_finite_temporal_distance(self):", "body": "return self.profile_block_analyzer.largest_finite_distance()<EOL>", "docstring": "Compute the maximum temporal distance.\n\nReturns\n-------\nmax_temporal_distance : float", "id": "f12899:c0:m11"}
{"signature": "def __init__(self, labels, walk_time_to_target, start_time_dep, end_time_dep):", "body": "self.start_time_dep = start_time_dep<EOL>self.end_time_dep = end_time_dep<EOL>all_pareto_optimal_tuples = [pt for pt in labels if<EOL>(start_time_dep < pt.departure_time < end_time_dep)]<EOL>labels_after_dep_time = [label for label in labels if label.departure_time >= self.end_time_dep]<EOL>if labels_after_dep_time:<EOL><INDENT>next_label_after_end_time = min(labels_after_dep_time, key=lambda el: el.arrival_time_target)<EOL>all_pareto_optimal_tuples.append(next_label_after_end_time)<EOL><DEDENT>all_pareto_optimal_tuples = sorted(all_pareto_optimal_tuples, key=lambda ptuple: ptuple.departure_time)<EOL>arrival_time_target_at_end_time = end_time_dep + walk_time_to_target<EOL>previous_trip = None<EOL>for trip_tuple in all_pareto_optimal_tuples:<EOL><INDENT>if previous_trip:<EOL><INDENT>assert(trip_tuple.arrival_time_target > previous_trip.arrival_time_target)<EOL><DEDENT>if trip_tuple.departure_time > self.end_time_depand trip_tuple.arrival_time_target < arrival_time_target_at_end_time:<EOL><INDENT>arrival_time_target_at_end_time = trip_tuple.arrival_time_target<EOL><DEDENT>previous_trip = trip_tuple<EOL><DEDENT>self._walk_time_to_target = walk_time_to_target<EOL>self._profile_blocks = []<EOL>previous_departure_time = start_time_dep<EOL>self.trip_durations = []<EOL>self.trip_departure_times = []<EOL>for trip_pareto_tuple in all_pareto_optimal_tuples:<EOL><INDENT>if trip_pareto_tuple.departure_time > self.end_time_dep:<EOL><INDENT>continue<EOL><DEDENT>if self._walk_time_to_target <= trip_pareto_tuple.duration():<EOL><INDENT>print(self._walk_time_to_target, trip_pareto_tuple.duration())<EOL>assert(self._walk_time_to_target > trip_pareto_tuple.duration())<EOL><DEDENT>effective_trip_previous_departure_time = max(<EOL>previous_departure_time,<EOL>trip_pareto_tuple.departure_time - (self._walk_time_to_target - trip_pareto_tuple.duration())<EOL>)<EOL>if effective_trip_previous_departure_time > previous_departure_time:<EOL><INDENT>walk_block = ProfileBlock(start_time=previous_departure_time,<EOL>end_time=effective_trip_previous_departure_time,<EOL>distance_start=self._walk_time_to_target,<EOL>distance_end=self._walk_time_to_target<EOL>)<EOL>self._profile_blocks.append(walk_block)<EOL><DEDENT>trip_waiting_time = trip_pareto_tuple.departure_time - effective_trip_previous_departure_time<EOL>trip_block = ProfileBlock(end_time=trip_pareto_tuple.departure_time,<EOL>start_time=effective_trip_previous_departure_time,<EOL>distance_start=trip_pareto_tuple.duration() + trip_waiting_time,<EOL>distance_end=trip_pareto_tuple.duration())<EOL>self.trip_durations.append(trip_pareto_tuple.duration())<EOL>self.trip_departure_times.append(trip_pareto_tuple.departure_time)<EOL>self._profile_blocks.append(trip_block)<EOL>previous_departure_time = trip_pareto_tuple.departure_time<EOL><DEDENT>if not self._profile_blocks or self._profile_blocks[-<NUM_LIT:1>].end_time < end_time_dep:<EOL><INDENT>if len(self._profile_blocks) > <NUM_LIT:0>:<EOL><INDENT>dep_previous = self._profile_blocks[-<NUM_LIT:1>].end_time<EOL><DEDENT>else:<EOL><INDENT>dep_previous = start_time_dep<EOL><DEDENT>waiting_time = end_time_dep - dep_previous<EOL>distance_end_trip = arrival_time_target_at_end_time - end_time_dep<EOL>walking_wait_time = min(end_time_dep - dep_previous,<EOL>waiting_time - (self._walk_time_to_target - distance_end_trip))<EOL>walking_wait_time = max(<NUM_LIT:0>, walking_wait_time)<EOL>if walking_wait_time > <NUM_LIT:0>:<EOL><INDENT>walk_block = ProfileBlock(start_time=dep_previous,<EOL>end_time=dep_previous + walking_wait_time,<EOL>distance_start=self._walk_time_to_target,<EOL>distance_end=self._walk_time_to_target<EOL>)<EOL>assert (walk_block.start_time <= walk_block.end_time)<EOL>assert (walk_block.distance_end <= walk_block.distance_start)<EOL>self._profile_blocks.append(walk_block)<EOL><DEDENT>trip_waiting_time = waiting_time - walking_wait_time<EOL>if trip_waiting_time > <NUM_LIT:0>:<EOL><INDENT>try:<EOL><INDENT>trip_block = ProfileBlock(start_time=dep_previous + walking_wait_time,<EOL>end_time=dep_previous + walking_wait_time + trip_waiting_time,<EOL>distance_start=distance_end_trip + trip_waiting_time,<EOL>distance_end=distance_end_trip)<EOL>assert (trip_block.start_time <= trip_block.end_time)<EOL>assert (trip_block.distance_end <= trip_block.distance_start)<EOL>self._profile_blocks.append(trip_block)<EOL><DEDENT>except AssertionError as e:<EOL><INDENT>assert(trip_waiting_time < <NUM_LIT:10>**-<NUM_LIT:5>)<EOL><DEDENT><DEDENT><DEDENT>self.profile_block_analyzer = ProfileBlockAnalyzer(profile_blocks=self._profile_blocks)<EOL>", "docstring": "Initialize the data structures required by\n\nParameters\n----------\nnode_profile: NodeProfileSimple", "id": "f12899:c0:m1"}
{"signature": "def plot_temporal_distance_pdf_horizontal(self, use_minutes=True,<EOL>color=\"<STR_LIT>\",<EOL>ax=None,<EOL>duration_divider=<NUM_LIT>,<EOL>legend_font_size=None,<EOL>legend_loc=None):", "body": "from matplotlib import pyplot as plt<EOL>plt.rc('<STR_LIT:text>', usetex=True)<EOL>if ax is None:<EOL><INDENT>fig = plt.figure()<EOL>ax = fig.add_subplot(<NUM_LIT>)<EOL><DEDENT>temporal_distance_split_points_ordered, densities, delta_peaks = self._temporal_distance_pdf()<EOL>xs = []<EOL>for i, x in enumerate(temporal_distance_split_points_ordered):<EOL><INDENT>xs.append(x)<EOL>xs.append(x)<EOL><DEDENT>xs = numpy.array(xs)<EOL>ys = [<NUM_LIT:0>]<EOL>for y in densities:<EOL><INDENT>ys.append(y)<EOL>ys.append(y)<EOL><DEDENT>ys.append(<NUM_LIT:0>)<EOL>ys = numpy.array(ys)<EOL>xlabel = \"<STR_LIT>\"<EOL>ylabel = \"<STR_LIT>\"<EOL>if use_minutes:<EOL><INDENT>xs /= duration_divider<EOL>ys *= duration_divider<EOL>xlabel = \"<STR_LIT>\"<EOL>delta_peaks = {peak / <NUM_LIT>: mass for peak, mass in delta_peaks.items()}<EOL><DEDENT>if delta_peaks:<EOL><INDENT>peak_height = max(ys) * <NUM_LIT><EOL>max_x = max(xs)<EOL>min_x = min(xs)<EOL>now_max_x = max(xs) + <NUM_LIT> * (max_x - min_x)<EOL>now_min_x = min_x - <NUM_LIT:0.1> * (max_x - min_x)<EOL>text_x_offset = <NUM_LIT:0.1> * (now_max_x - max_x)<EOL>for loc, mass in delta_peaks.items():<EOL><INDENT>text = \"<STR_LIT>\" + (\"<STR_LIT>\" % (mass))<EOL>ax.plot([<NUM_LIT:0>, peak_height], [loc, loc], color=color, lw=<NUM_LIT:5>, label=text)<EOL><DEDENT><DEDENT>ax.plot(ys, xs, \"<STR_LIT>\")<EOL>if delta_peaks:<EOL><INDENT>tot_delta_peak_mass = sum(delta_peaks.values())<EOL>fill_label = \"<STR_LIT>\" % (<NUM_LIT:1>-tot_delta_peak_mass)<EOL><DEDENT>else:<EOL><INDENT>fill_label = None<EOL><DEDENT>ax.fill_betweenx(xs, ys, color=color, alpha=<NUM_LIT>, label=fill_label)<EOL>ax.set_ylabel(xlabel)<EOL>ax.set_xlabel(ylabel)<EOL>ax.set_xlim(left=<NUM_LIT:0>, right=max(ys) * <NUM_LIT>)<EOL>if delta_peaks:<EOL><INDENT>if legend_font_size is None:<EOL><INDENT>legend_font_size = <NUM_LIT:12><EOL><DEDENT>if legend_loc is None:<EOL><INDENT>legend_loc = \"<STR_LIT>\"<EOL><DEDENT>ax.legend(loc=legend_loc, prop={'<STR_LIT:size>': legend_font_size})<EOL><DEDENT>if True:<EOL><INDENT>line_tyles = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:->\"][::-<NUM_LIT:1>]<EOL>to_plot_funcs = [self.max_temporal_distance, self.mean_temporal_distance, self.min_temporal_distance]<EOL>xmin, xmax = ax.get_xlim()<EOL>for to_plot_func, ls in zip(to_plot_funcs, line_tyles):<EOL><INDENT>y = to_plot_func() / duration_divider<EOL>assert y < float('<STR_LIT>')<EOL>ax.plot([xmin, xmax*<NUM_LIT:10>], [y, y], color=\"<STR_LIT>\", ls=ls, lw=<NUM_LIT:1>)<EOL><DEDENT><DEDENT>return ax.figure<EOL>", "docstring": "Plot the temporal distance probability density function.\n\nReturns\n-------\nfig: matplotlib.Figure", "id": "f12899:c0:m14"}
{"signature": "def get_prop_analyzer_flat(self, property, value_no_next_journey, value_cutoff):", "body": "kwargs = self.kwargs<EOL>fp_blocks = self.get_fastest_path_temporal_distance_blocks()<EOL>prop_blocks = []<EOL>for b in fp_blocks:<EOL><INDENT>if b.is_flat():<EOL><INDENT>if b.distance_end == self.walk_duration and b.distance_end != float('<STR_LIT>'):<EOL><INDENT>prop_value = value_cutoff<EOL><DEDENT>else:<EOL><INDENT>prop_value = value_no_next_journey<EOL><DEDENT><DEDENT>else:<EOL><INDENT>prop_value = b[property]<EOL><DEDENT>prop_block = ProfileBlock(b.start_time, b.end_time, prop_value, prop_value)<EOL>prop_blocks.append(prop_block)<EOL><DEDENT>return ProfileBlockAnalyzer(prop_blocks, **kwargs)<EOL>", "docstring": "Get a journey property analyzer, where each journey is weighted by the number of.\n\nParameters\n----------\nproperty: string\n    Name of the property, needs to be one of label_props given on initialization.\nvalue_no_next_journey:\n    Value of the profile, when there is no next journey available.\nvalue_cutoff: number\n    default value of the property when cutoff is applied\n\nReturns\n-------\nProfileBlockAnalyzer", "id": "f12901:c0:m9"}
{"signature": "def __init__(self, labels, start_time_dep, end_time_dep, walk_duration=float('<STR_LIT>'), label_props_to_consider=None, **kwargs):", "body": "for label in labels:<EOL><INDENT>assert (hasattr(label, \"<STR_LIT>\"))<EOL>assert (hasattr(label, \"<STR_LIT>\"))<EOL><DEDENT>self.start_time_dep = start_time_dep<EOL>self.end_time_dep = end_time_dep<EOL>self.walk_duration = walk_duration<EOL>if label_props_to_consider is None:<EOL><INDENT>self.label_props = []<EOL><DEDENT>else:<EOL><INDENT>self.label_props = label_props_to_consider<EOL><DEDENT>self._fastest_path_labels = self._compute_fastest_path_labels(labels)<EOL>for label in self._fastest_path_labels:<EOL><INDENT>for prop in self.label_props:<EOL><INDENT>assert (hasattr(label, prop))<EOL><DEDENT><DEDENT>self.kwargs = kwargs<EOL>", "docstring": "Parameters\n----------\nlabels: list\n    List of labels (each label should at least have attributes \"departure_time\" and \"arrival_time\")\nwalk_duration: float\n    What is the maximum duration for a journey to be considered.\nlabel_props_to_consider: list", "id": "f12901:c0:m0"}
{"signature": "def match_stops_to_nodes(gtfs, walk_network):", "body": "network_nodes = walk_network.nodes(data=\"<STR_LIT:true>\")<EOL>stop_Is = set(gtfs.get_straight_line_transfer_distances()['<STR_LIT>'])<EOL>stops_df = gtfs.stops()<EOL>geo_index = GeoGridIndex(precision=<NUM_LIT:6>)<EOL>for net_node, data in network_nodes:<EOL><INDENT>geo_index.add_point(GeoPoint(data['<STR_LIT>'], data['<STR_LIT>'], ref=net_node))<EOL><DEDENT>stop_I_to_node = {}<EOL>stop_I_to_dist = {}<EOL>for stop_I in stop_Is:<EOL><INDENT>stop_lat = float(stops_df[stops_df.stop_I == stop_I].lat)<EOL>stop_lon = float(stops_df[stops_df.stop_I == stop_I].lon)<EOL>geo_point = GeoPoint(stop_lat, stop_lon)<EOL>min_dist = float('<STR_LIT>')<EOL>min_dist_node = None<EOL>search_distances_m = [<NUM_LIT>, <NUM_LIT>]<EOL>for search_distance_m in search_distances_m:<EOL><INDENT>for point, distance in geo_index.get_nearest_points(geo_point, search_distance_m, \"<STR_LIT>\"):<EOL><INDENT>if distance < min_dist:<EOL><INDENT>min_dist = distance * <NUM_LIT:1000><EOL>min_dist_node = point.ref<EOL><DEDENT><DEDENT>if min_dist_node is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if min_dist_node is None:<EOL><INDENT>warn(\"<STR_LIT>\" + str(stops_df[stops_df.stop_I == stop_I]))<EOL><DEDENT>stop_I_to_node[stop_I] = min_dist_node<EOL>stop_I_to_dist[stop_I] = min_dist<EOL><DEDENT>return stop_I_to_node, stop_I_to_dist<EOL>", "docstring": "Parameters\n----------\ngtfs : a GTFS object\nwalk_network : networkx.Graph\n\nReturns\n-------\nstop_I_to_node: dict\n    maps stop_I to closest walk_network node\nstop_I_to_dist: dict\n    maps stop_I to the distance to the closest walk_network node", "id": "f12903:m1"}
{"signature": "def add_walk_distances_to_db_python(gtfs, osm_path, cutoff_distance_m=<NUM_LIT:1000>):", "body": "if isinstance(gtfs, str):<EOL><INDENT>gtfs = GTFS(gtfs)<EOL><DEDENT>assert (isinstance(gtfs, GTFS))<EOL>print(\"<STR_LIT>\")<EOL>walk_network = create_walk_network_from_osm(osm_path)<EOL>print(\"<STR_LIT>\")<EOL>stop_I_to_nearest_osm_node, stop_I_to_nearest_osm_node_distance = match_stops_to_nodes(gtfs, walk_network)<EOL>transfers = gtfs.get_straight_line_transfer_distances()<EOL>from_I_to_to_stop_Is = {stop_I: set() for stop_I in stop_I_to_nearest_osm_node}<EOL>for transfer_tuple in transfers.itertuples():<EOL><INDENT>from_I = transfer_tuple.from_stop_I<EOL>to_I = transfer_tuple.to_stop_I<EOL>from_I_to_to_stop_Is[from_I].add(to_I)<EOL><DEDENT>print(\"<STR_LIT>\")<EOL>for from_I, to_stop_Is in from_I_to_to_stop_Is.items():<EOL><INDENT>from_node = stop_I_to_nearest_osm_node[from_I]<EOL>from_dist = stop_I_to_nearest_osm_node_distance[from_I]<EOL>shortest_paths = networkx.single_source_dijkstra_path_length(walk_network,<EOL>from_node,<EOL>cutoff=cutoff_distance_m - from_dist,<EOL>weight=\"<STR_LIT>\")<EOL>for to_I in to_stop_Is:<EOL><INDENT>to_distance = stop_I_to_nearest_osm_node_distance[to_I]<EOL>to_node = stop_I_to_nearest_osm_node[to_I]<EOL>osm_distance = shortest_paths.get(to_node, float('<STR_LIT>'))<EOL>total_distance = from_dist + osm_distance + to_distance<EOL>from_stop_I_transfers = transfers[transfers['<STR_LIT>'] == from_I]<EOL>straigth_distance = from_stop_I_transfers[from_stop_I_transfers[\"<STR_LIT>\"] == to_I][\"<STR_LIT:d>\"].values[<NUM_LIT:0>]<EOL>assert (straigth_distance < total_distance + <NUM_LIT:2>)  <EOL>if total_distance <= cutoff_distance_m:<EOL><INDENT>gtfs.conn.execute(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" + str(int(total_distance)) +<EOL>\"<STR_LIT>\" + str(from_I) + \"<STR_LIT>\" + str(to_I))<EOL><DEDENT><DEDENT><DEDENT>gtfs.conn.commit()<EOL>", "docstring": "Computes the walk paths between stops, and updates these to the gtfs database.\n\nParameters\n----------\ngtfs: gtfspy.GTFS or str\n    A GTFS object or a string representation.\nosm_path: str\n    path to the OpenStreetMap file\ncutoff_distance_m: number\n    maximum allowed distance in meters\n\nReturns\n-------\nNone\n\nSee Also\n--------\ngtfspy.calc_transfers\ncompute_walk_paths_java", "id": "f12903:m0"}
{"signature": "def setUp(self):", "body": "self.gtfs_source_dir = self.__class__.gtfs_source_dir<EOL>self.gtfs = self.__class__.G<EOL>self.extract_output_dir = os.path.join(self.gtfs_source_dir, \"<STR_LIT>\", \"<STR_LIT>\")<EOL>if not os.path.exists(self.extract_output_dir):<EOL><INDENT>makedirs(self.extract_output_dir)<EOL><DEDENT>", "docstring": "This method is run once before _each_ test method is executed", "id": "f12904:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def setUpClass(cls):<DEDENT>", "body": "cls.gtfs_source_dir = os.path.join(os.path.dirname(__file__), \"<STR_LIT>\")<EOL>cls.G = GTFS.from_directory_as_inmemory_db(cls.gtfs_source_dir)<EOL>", "docstring": "This method is run once before executing any tests", "id": "f12904:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def setUpClass(cls):<DEDENT>", "body": "cls.gtfs_source_dir = os.path.join(os.path.dirname(__file__), \"<STR_LIT>\")<EOL>cls.G = GTFS.from_directory_as_inmemory_db(cls.gtfs_source_dir)<EOL>", "docstring": "This method is run once before executing any tests", "id": "f12905:c0:m0"}
{"signature": "def setUp(self):", "body": "self.conn = sqlite3.connect('<STR_LIT>')<EOL>self.agencyText ='<STR_LIT>''<STR_LIT>'<EOL>self.stopsText ='<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'<EOL>self.calendarText ='<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'<EOL>self.calendarDatesText ='<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'<EOL>self.tripText =\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>self.routesText =\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>self.shapeText =\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>self.stopTimesText =\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>self.frequenciesText =\"<STR_LIT>\"\"<STR_LIT>\"<EOL>self.transfersText =\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>self.feedInfoText =\"<STR_LIT>\"\"<STR_LIT>\"<EOL>self.fdict = {<EOL>'<STR_LIT>':           self.agencyText,<EOL>'<STR_LIT>':            self.stopsText,<EOL>'<STR_LIT>':         self.calendarText,<EOL>'<STR_LIT>':   self.calendarDatesText,<EOL>'<STR_LIT>':            self.tripText,<EOL>'<STR_LIT>':           self.routesText,<EOL>'<STR_LIT>':           self.shapeText,<EOL>'<STR_LIT>':       self.stopTimesText,<EOL>'<STR_LIT>':      self.frequenciesText,<EOL>'<STR_LIT>':        self.transfersText,<EOL>'<STR_LIT>':        self.feedInfoText<EOL>}<EOL>self.orig_row_factory = self.conn.row_factory<EOL>", "docstring": "This method is run once before _each_ test method is executed", "id": "f12907:c0:m3"}
{"signature": "def printTable(self, table_name):", "body": "prev_row_factory = self.setRowConn()<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\" + table_name)<EOL>print(\"<STR_LIT>\")<EOL>cur = self.conn.execute(\"<STR_LIT>\" % table_name)<EOL>names = [d[<NUM_LIT:0>] for d in cur.description]<EOL>for name in names:<EOL><INDENT>print(name + '<STR_LIT:U+002CU+0020>', end=\"<STR_LIT>\")<EOL><DEDENT>print(\"<STR_LIT>\")<EOL>for row in cur:<EOL><INDENT>print(row)<EOL><DEDENT>self.conn.row_factory = prev_row_factory<EOL>", "docstring": "Pretty prints a table with name table_name.\n\nParameters\n----------\ntable_name : str\n    name of the table", "id": "f12907:c0:m6"}
{"signature": "def tearDown(self):", "body": "pass<EOL>", "docstring": "This method is run once after _each_ test method is executed", "id": "f12911:c0:m2"}
{"signature": "def setUp(self):", "body": "self.gtfs_source_dir = self.__class__.gtfs_source_dir<EOL>self.gtfs = self.__class__.G<EOL>", "docstring": "This method is run once before _each_ test method is executed", "id": "f12911:c0:m1"}
{"signature": "def remove_all_trips_fully_outside_buffer(db_conn, center_lat, center_lon, buffer_km, update_secondary_data=True):", "body": "distance_function_str = add_wgs84_distance_function_to_db(db_conn)<EOL>stops_within_buffer_query_sql = \"<STR_LIT>\" + distance_function_str +\"<STR_LIT>\".format(lat=float(center_lat), lon=float(center_lon), d_m=int(<NUM_LIT:1000>*buffer_km))<EOL>select_all_trip_Is_where_stop_I_is_within_buffer_sql = \"<STR_LIT>\" + stops_within_buffer_query_sql + \"<STR_LIT:)>\"<EOL>trip_Is_to_remove_sql = \"<STR_LIT>\" + select_all_trip_Is_where_stop_I_is_within_buffer_sql + \"<STR_LIT:)>\"<EOL>trip_Is_to_remove = pandas.read_sql(trip_Is_to_remove_sql, db_conn)[\"<STR_LIT>\"].values<EOL>trip_Is_to_remove_string = \"<STR_LIT:U+002C>\".join([str(trip_I) for trip_I in trip_Is_to_remove])<EOL>remove_all_trips_fully_outside_buffer_sql = \"<STR_LIT>\" + trip_Is_to_remove_string + \"<STR_LIT:)>\"<EOL>remove_all_stop_times_where_trip_I_fully_outside_buffer_sql = \"<STR_LIT>\" + trip_Is_to_remove_string  + \"<STR_LIT:)>\"<EOL>db_conn.execute(remove_all_trips_fully_outside_buffer_sql)<EOL>db_conn.execute(remove_all_stop_times_where_trip_I_fully_outside_buffer_sql)<EOL>delete_stops_not_in_stop_times_and_not_as_parent_stop(db_conn)<EOL>db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)<EOL>db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)<EOL>db_conn.execute(DELETE_DAYS_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)<EOL>db_conn.execute(DELETE_DAY_TRIPS2_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)<EOL>db_conn.execute(DELETE_CALENDAR_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)<EOL>db_conn.execute(DELETE_CALENDAR_DATES_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)<EOL>db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)<EOL>db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)<EOL>if update_secondary_data:<EOL><INDENT>update_secondary_data_copies(db_conn)<EOL><DEDENT>", "docstring": "Not used in the regular filter process for the time being.\n\nParameters\n----------\ndb_conn: sqlite3.Connection\n    connection to the GTFS object\ncenter_lat: float\ncenter_lon: float\nbuffer_km: float", "id": "f12919:m2"}
{"signature": "def _filter_by_agency(self):", "body": "if self.agency_ids_to_preserve is not None:<EOL><INDENT>logging.info(\"<STR_LIT>\")<EOL>agency_ids_to_preserve = list(self.agency_ids_to_preserve)<EOL>agencies = pandas.read_sql(\"<STR_LIT>\", self.copy_db_conn)<EOL>agencies_to_remove = []<EOL>for idx, row in agencies.iterrows():<EOL><INDENT>if row['<STR_LIT>'] not in agency_ids_to_preserve:<EOL><INDENT>agencies_to_remove.append(row['<STR_LIT>'])<EOL><DEDENT><DEDENT>for agency_id in agencies_to_remove:<EOL><INDENT>self.copy_db_conn.execute('<STR_LIT>', (agency_id,))<EOL><DEDENT>self.copy_db_conn.execute('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>self.copy_db_conn.execute('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>self.copy_db_conn.execute('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>self.copy_db_conn.execute('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>self.copy_db_conn.execute('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>self.copy_db_conn.execute('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>self.copy_db_conn.execute('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>self.copy_db_conn.execute('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>self.copy_db_conn.execute('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>self.copy_db_conn.commit()<EOL>return FILTERED<EOL><DEDENT>else:<EOL><INDENT>return NOT_FILTERED<EOL><DEDENT>", "docstring": "filter by agency ids\n:param copy_db_conn:\n:param agency_ids_to_preserve:\n:return:", "id": "f12919:c0:m5"}
{"signature": "def _delete_rows_by_start_and_end_date(self):", "body": "<EOL>if (self.start_date is not None) and (self.end_date is not None):<EOL><INDENT>start_date_ut = self.gtfs.get_day_start_ut(self.start_date)<EOL>end_date_ut = self.gtfs.get_day_start_ut(self.end_date)<EOL>if self.copy_db_conn.execute(\"<STR_LIT>\"<EOL>\"<STR_LIT>\").fetchone() != (<NUM_LIT:0>,):<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>logging.info(\"<STR_LIT>\")<EOL>table_to_preserve_map = {<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\": '<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>}<EOL>table_to_remove_map = {key: \"<STR_LIT>\" + to_preserve + \"<STR_LIT>\"<EOL>for key, to_preserve in table_to_preserve_map.items() }<EOL>GTFS(self.copy_db_conn).set_current_process_time_zone()<EOL>for table, query_template in table_to_remove_map.items():<EOL><INDENT>param_dict = {\"<STR_LIT>\": str(start_date_ut),<EOL>\"<STR_LIT>\": str(end_date_ut)}<EOL>query = \"<STR_LIT>\" + table + \"<STR_LIT:U+0020>\" +query_template.format(**param_dict)<EOL>self.copy_db_conn.execute(query)<EOL>self.copy_db_conn.commit()<EOL><DEDENT>return FILTERED<EOL><DEDENT>else:<EOL><INDENT>return NOT_FILTERED<EOL><DEDENT>", "docstring": "Removes rows from the sqlite database copy that are out of the time span defined by start_date and end_date\n:param gtfs: GTFS object\n:param copy_db_conn: sqlite database connection\n:param start_date:\n:param end_date:\n:return:", "id": "f12919:c0:m2"}
{"signature": "def __init__(self,<EOL>G,<EOL>copy_db_path,<EOL>buffer_distance_km=None,<EOL>buffer_lat=None,<EOL>buffer_lon=None,<EOL>update_metadata=True,<EOL>start_date=None,<EOL>end_date=None,<EOL>agency_ids_to_preserve=None,<EOL>agency_distance=None):", "body": "if start_date and end_date:<EOL><INDENT>if isinstance(start_date, (datetime.datetime, datetime.date)):<EOL><INDENT>self.start_date = start_date.strftime(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self.start_date = start_date<EOL><DEDENT>if isinstance(end_date, (datetime.datetime, datetime.date)):<EOL><INDENT>end_date_dt = end_date<EOL>self.end_date = end_date.strftime(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self.end_date = end_date<EOL>end_date_dt = datetime.datetime.strptime(self.end_date, \"<STR_LIT>\")<EOL><DEDENT>end_date_to_include = end_date_dt - datetime.timedelta(days=<NUM_LIT:1>)<EOL>self.end_date_to_include_str = end_date_to_include.strftime(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self.start_date = None<EOL>self.end_date = None<EOL><DEDENT>self.copy_db_conn = None<EOL>self.copy_db_path = copy_db_path<EOL>self.agency_ids_to_preserve = agency_ids_to_preserve<EOL>self.gtfs = G<EOL>self.buffer_lat = buffer_lat<EOL>self.buffer_lon = buffer_lon<EOL>self.buffer_distance_km = buffer_distance_km<EOL>self.update_metadata = update_metadata<EOL>if agency_distance is not None:<EOL><INDENT>raise NotImplementedError<EOL><DEDENT>self.this_db_path = self.gtfs.get_main_database_path()<EOL>assert os.path.exists(self.this_db_path), \"<STR_LIT>\"<EOL>assert os.path.exists(os.path.dirname(os.path.abspath(copy_db_path))),\"<STR_LIT>\"<EOL>assert not os.path.exists(copy_db_path), \"<STR_LIT>\" % copy_db_path<EOL>", "docstring": "Copy a database, and then based on various filters.\nOnly method `create_filtered_copy` is provided as we do not want to take the risk of\nlosing the data stored in the original database.\n\nG: gtfspy.gtfs.GTFS\n    the original database\ncopy_db_path : str\n    path to another database database\nupdate_metadata : boolean, optional\n    whether to update metadata of the feed, defaulting to true\n    (this option is mainly available for testing purposes)\nstart_date : str, or datetime.datetime\n    filter out all data taking place before end_date (the start_time_ut of the end date)\n    Date format \"YYYY-MM-DD\"\n    (end_date_ut is not included after filtering)\nend_date : str, or datetime.datetime\n    Filter out all data taking place after end_date\n    The end_date is not included after filtering.\nagency_ids_to_preserve : iterable\n    List of agency_ids to retain (str) (e.g. 'HSL' for Helsinki)\n    Only routes by the listed agencies are then considered\nagency_distance : float\n    Only evaluated in combination with agency filter.\n    Distance (in km) to the other near-by stops that should be included in addition to\n    the ones defined by the agencies.\n    All vehicle trips going through at least two such stops would then be included in the\n    export. Note that this should not be a recursive thing.\n    Or should it be? :)\nbuffer_lat : float\n    Latitude of the buffer zone center\nbuffer_lon : float\n    Longitude of the buffer zone center\nbuffer_distance : float\n    Distance from the buffer zone center (in kilometers)\n\nReturns\n-------\nNone", "id": "f12919:c0:m0"}
{"signature": "def trips_frequencies(gtfs):", "body": "query = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>return(gtfs.execute_custom_query_pandas(query))<EOL>", "docstring": "Get the frequency of trip_I in a particular day", "id": "f12921:m18"}
{"signature": "def hourly_frequencies(gtfs, st, et, route_type):", "body": "timeframe = et-st<EOL>hours = timeframe/ <NUM_LIT><EOL>day = gtfs.get_suitable_date_for_daily_extract()<EOL>stops = gtfs.get_stops_for_route_type(route_type).T.drop_duplicates().T<EOL>query = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(h=hours, st=st, et=et, day=day))<EOL>try:<EOL><INDENT>trips_frequency = gtfs.execute_custom_query_pandas(query).T.drop_duplicates().T<EOL>df = pd.merge(stops[['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']], trips_frequency[['<STR_LIT>', '<STR_LIT>']],<EOL>on='<STR_LIT>', how='<STR_LIT>')<EOL>return df.apply(pd.to_numeric)<EOL><DEDENT>except:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Return all the number of vehicles (i.e. busses,trams,etc) that pass hourly through a stop in a time frame.\n\nParameters\n----------\ngtfs: GTFS\nst : int\n    start time of the time framein unix time\net : int\n    end time of the time frame in unix time\nroute_type: int\n\nReturns\n-------\nnumeric pandas.DataFrame with columns\n    stop_I, lat, lon, frequency", "id": "f12921:m14"}
{"signature": "def update_stats(gtfs):", "body": "stats = get_stats(gtfs)<EOL>gtfs.update_stats(stats)<EOL>", "docstring": "Computes stats AND stores them into the underlying gtfs object (i.e. database).\n\nParameters\n----------\ngtfs: GTFS", "id": "f12921:m10"}
{"signature": "def get_spatial_bounds(gtfs, as_dict=False):", "body": "stats = get_stats(gtfs)<EOL>lon_min = stats['<STR_LIT>']<EOL>lon_max = stats['<STR_LIT>']<EOL>lat_min = stats['<STR_LIT>']<EOL>lat_max = stats['<STR_LIT>']<EOL>if as_dict:<EOL><INDENT>return {'<STR_LIT>': lon_min, '<STR_LIT>': lon_max, '<STR_LIT>': lat_min, '<STR_LIT>': lat_max}<EOL><DEDENT>else:<EOL><INDENT>return lon_min, lon_max, lat_min, lat_max<EOL><DEDENT>", "docstring": "Parameters\n----------\ngtfs\n\nReturns\n-------\nmin_lon: float\nmax_lon: float\nmin_lat: float\nmax_lat: float", "id": "f12921:m0"}
{"signature": "def trip_stats(gtfs, results_by_mode=False):", "body": "conn = gtfs.conn<EOL>conn.create_function(\"<STR_LIT>\", <NUM_LIT:4>, wgs84_distance)<EOL>cur = conn.cursor()<EOL>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'<EOL>q_result = pd.read_sql_query(query, conn)<EOL>q_result['<STR_LIT>'] = <NUM_LIT> * q_result['<STR_LIT>'] / q_result['<STR_LIT>']<EOL>q_result['<STR_LIT>'] = q_result['<STR_LIT>'] / <NUM_LIT:1000><EOL>q_result['<STR_LIT>'] = q_result['<STR_LIT>'] / <NUM_LIT><EOL>q_result = q_result.loc[q_result['<STR_LIT>'] != float(\"<STR_LIT>\")]<EOL>if results_by_mode:<EOL><INDENT>q_results = {}<EOL>for type in q_result['<STR_LIT:type>'].unique().tolist():<EOL><INDENT>q_results[type] = q_result.loc[q_result['<STR_LIT:type>'] == type]<EOL><DEDENT>return q_results<EOL><DEDENT>else:<EOL><INDENT>return q_result<EOL><DEDENT>", "docstring": "Parameters\n----------\ngtfs: GTFS\nresults_by_mode: bool\n\nReturns\n-------\nif results_by_mode is False:\n    q_result: pandas.DataFrame\nif results_by_mode is True:\n    q_results: dict\n        a dict with the following keys:\n            [ADD HERE]", "id": "f12921:m11"}
{"signature": "def _feed_calendar_span(gtfs, stats):", "body": "n_feeds = _n_gtfs_sources(gtfs)[<NUM_LIT:0>]<EOL>max_start = None<EOL>min_end = None<EOL>if n_feeds > <NUM_LIT:1>:<EOL><INDENT>for i in range(n_feeds):<EOL><INDENT>feed_key = \"<STR_LIT>\" + str(i) + \"<STR_LIT:_>\"<EOL>start_key = feed_key + \"<STR_LIT>\"<EOL>end_key = feed_key + \"<STR_LIT>\"<EOL>calendar_span = gtfs.conn.cursor().execute(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', (feed_key + '<STR_LIT:%>',)).fetchone()<EOL>stats[start_key] = calendar_span[<NUM_LIT:0>]<EOL>stats[end_key] = calendar_span[<NUM_LIT:1>]<EOL>if calendar_span[<NUM_LIT:0>] is not None and calendar_span[<NUM_LIT:1>] is not None:<EOL><INDENT>if not max_start and not min_end:<EOL><INDENT>max_start = calendar_span[<NUM_LIT:0>]<EOL>min_end = calendar_span[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>if gtfs.get_day_start_ut(calendar_span[<NUM_LIT:0>]) > gtfs.get_day_start_ut(max_start):<EOL><INDENT>max_start = calendar_span[<NUM_LIT:0>]<EOL><DEDENT>if gtfs.get_day_start_ut(calendar_span[<NUM_LIT:1>]) < gtfs.get_day_start_ut(min_end):<EOL><INDENT>min_end = calendar_span[<NUM_LIT:1>]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>stats[\"<STR_LIT>\"] = max_start<EOL>stats[\"<STR_LIT>\"] = min_end<EOL><DEDENT>else:<EOL><INDENT>stats[\"<STR_LIT>\"] = stats[\"<STR_LIT>\"]<EOL>stats[\"<STR_LIT>\"] = stats[\"<STR_LIT>\"]<EOL><DEDENT>return stats<EOL>", "docstring": "Computes the temporal coverage of each source feed\n\nParameters\n----------\ngtfs: gtfspy.GTFS object\nstats: dict\n    where to append the stats\n\nReturns\n-------\nstats: dict", "id": "f12921:m9"}
{"signature": "def __init__(self, fname_or_conn):", "body": "if isinstance(fname_or_conn, string_types):<EOL><INDENT>if os.path.isfile(fname_or_conn):<EOL><INDENT>self.conn = sqlite3.connect(fname_or_conn)<EOL>self.fname = fname_or_conn<EOL>self.conn.execute('<STR_LIT>')<EOL>self.conn.execute('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise FileNotFoundError(\"<STR_LIT>\" + fname_or_conn + \"<STR_LIT>\")<EOL><DEDENT><DEDENT>elif isinstance(fname_or_conn, sqlite3.Connection):<EOL><INDENT>self.conn = fname_or_conn<EOL>self._dont_close = True<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError(<EOL>\"<STR_LIT>\" + str(type(fname_or_conn)) + \"<STR_LIT>\")<EOL><DEDENT>assert self.conn.execute(\"<STR_LIT>\").fetchone() is not None<EOL>self.meta = GTFSMetadata(self.conn)<EOL>self.conn.create_function(\"<STR_LIT>\", <NUM_LIT:4>, wgs84_distance)<EOL>self._timezone = pytz.timezone(self.get_timezone_name())<EOL>", "docstring": "Open a GTFS object\n\n        Parameters\n        ----------\n        fname_or_conn: str | sqlite3.Connection\n            path to the preprocessed gtfs database or a connection to a gtfs database", "id": "f12922:c0:m0"}
{"signature": "def get_trip_counts_per_day(self):", "body": "query = \"<STR_LIT>\"<EOL>trip_counts_per_day = pd.read_sql_query(query, self.conn, index_col=\"<STR_LIT:date>\")<EOL>max_day = trip_counts_per_day.index.max()<EOL>min_day = trip_counts_per_day.index.min()<EOL>min_date = datetime.datetime.strptime(min_day, '<STR_LIT>')<EOL>max_date = datetime.datetime.strptime(max_day, '<STR_LIT>')<EOL>num_days = (max_date - min_date).days<EOL>dates = [min_date + datetime.timedelta(days=x) for x in range(num_days + <NUM_LIT:1>)]<EOL>trip_counts = []<EOL>date_strings = []<EOL>for date in dates:<EOL><INDENT>date_string = date.strftime(\"<STR_LIT>\")<EOL>date_strings.append(date_string)<EOL>try:<EOL><INDENT>value = trip_counts_per_day.loc[date_string, '<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>value = <NUM_LIT:0><EOL><DEDENT>trip_counts.append(value)<EOL><DEDENT>for date_string in trip_counts_per_day.index:<EOL><INDENT>assert date_string in date_strings<EOL><DEDENT>data = {\"<STR_LIT:date>\": dates, \"<STR_LIT>\": date_strings, \"<STR_LIT>\": trip_counts}<EOL>return pd.DataFrame(data)<EOL>", "docstring": "Get trip counts per day between the start and end day of the feed.\n\nReturns\n-------\ntrip_counts : pandas.DataFrame\n    Has columns \"date_str\" (dtype str) \"trip_counts\" (dtype int)", "id": "f12922:c0:m25"}
{"signature": "def get_transit_events(self, start_time_ut=None, end_time_ut=None, route_type=None):", "body": "table_name = self._get_day_trips_table_name()<EOL>event_query = \"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\" + table_name + \"<STR_LIT:U+0020>\"\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>where_clauses = []<EOL>if end_time_ut:<EOL><INDENT>where_clauses.append(table_name + \"<STR_LIT>\".format(end_time_ut=end_time_ut))<EOL>where_clauses.append(\"<STR_LIT>\".format(end_time_ut=end_time_ut))<EOL><DEDENT>if start_time_ut:<EOL><INDENT>where_clauses.append(table_name + \"<STR_LIT>\".format(start_time_ut=start_time_ut))<EOL>where_clauses.append(\"<STR_LIT>\".format(start_time_ut=start_time_ut))<EOL><DEDENT>if route_type is not None:<EOL><INDENT>assert route_type in ALL_ROUTE_TYPES<EOL>where_clauses.append(\"<STR_LIT>\".format(route_type=route_type))<EOL><DEDENT>if len(where_clauses) > <NUM_LIT:0>:<EOL><INDENT>event_query += \"<STR_LIT>\"<EOL>for i, where_clause in enumerate(where_clauses):<EOL><INDENT>if i is not <NUM_LIT:0>:<EOL><INDENT>event_query += \"<STR_LIT>\"<EOL><DEDENT>event_query += where_clause<EOL><DEDENT><DEDENT>event_query += \"<STR_LIT>\"<EOL>events_result = pd.read_sql_query(event_query, self.conn)<EOL>from_indices = numpy.nonzero(<EOL>(events_result['<STR_LIT>'][:-<NUM_LIT:1>].values == events_result['<STR_LIT>'][<NUM_LIT:1>:].values) *<EOL>(events_result['<STR_LIT>'][:-<NUM_LIT:1>].values < events_result['<STR_LIT>'][<NUM_LIT:1>:].values)<EOL>)[<NUM_LIT:0>]<EOL>to_indices = from_indices + <NUM_LIT:1><EOL>assert (events_result['<STR_LIT>'][from_indices].values == events_result['<STR_LIT>'][to_indices].values).all()<EOL>trip_Is = events_result['<STR_LIT>'][from_indices]<EOL>from_stops = events_result['<STR_LIT>'][from_indices]<EOL>to_stops = events_result['<STR_LIT>'][to_indices]<EOL>shape_ids = events_result['<STR_LIT>'][from_indices]<EOL>dep_times = events_result['<STR_LIT>'][from_indices]<EOL>arr_times = events_result['<STR_LIT>'][to_indices]<EOL>route_types = events_result['<STR_LIT>'][from_indices]<EOL>route_ids = events_result['<STR_LIT>'][from_indices]<EOL>route_Is = events_result['<STR_LIT>'][from_indices]<EOL>durations = arr_times.values - dep_times.values<EOL>assert (durations >= <NUM_LIT:0>).all()<EOL>from_seqs = events_result['<STR_LIT>'][from_indices]<EOL>to_seqs = events_result['<STR_LIT>'][to_indices]<EOL>data_tuples = zip(from_stops, to_stops, dep_times, arr_times,<EOL>shape_ids, route_types, route_ids, trip_Is,<EOL>durations, from_seqs, to_seqs, route_Is)<EOL>columns = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>df = pd.DataFrame.from_records(data_tuples, columns=columns)<EOL>return df<EOL>", "docstring": "Obtain a list of events that take place during a time interval.\nEach event needs to be only partially overlap the given time interval.\nDoes not include walking events.\n\nParameters\n----------\nstart_time_ut : int\n    start of the time interval in unix time (seconds)\nend_time_ut: int\n    end of the time interval in unix time (seconds)\nroute_type: int\n    consider only events for this route_type\n\nReturns\n-------\nevents: pandas.DataFrame\n    with the following columns and types\n        dep_time_ut: int\n        arr_time_ut: int\n        from_stop_I: int\n        to_stop_I: int\n        trip_I : int\n        shape_id : int\n        route_type : int\n\nSee also\n--------\nget_transit_events_in_time_span : an older version of the same thing", "id": "f12922:c0:m50"}
{"signature": "def get_route_name_and_type(self, route_I):", "body": "cur = self.conn.cursor()<EOL>results = cur.execute(\"<STR_LIT>\", (route_I,))<EOL>name, rtype = results.fetchone()<EOL>return name, int(rtype)<EOL>", "docstring": "Get route short name and type\n\nParameters\n----------\nroute_I: int\n    route index (database specific)\n\nReturns\n-------\nname: str\n    short name of the route, eg. 195N\ntype: int\n    route_type according to the GTFS standard", "id": "f12922:c0:m33"}
{"signature": "def _get_possible_day_starts(self, start_ut, end_ut, max_time_overnight=None):", "body": "if max_time_overnight is None:<EOL><INDENT>max_time_overnight = <NUM_LIT:7> * <NUM_LIT> * <NUM_LIT><EOL><DEDENT>assert start_ut < end_ut<EOL>start_day_ut = self.day_start_ut(start_ut)<EOL>start_day_ds = start_ut - start_day_ut<EOL>end_day_ut = self.day_start_ut(end_ut)<EOL>if start_day_ds < max_time_overnight:<EOL><INDENT>start_day_ut = self.increment_day_start_ut(start_day_ut, n_days=-<NUM_LIT:1>)<EOL><DEDENT>day_start_times_ut = [start_day_ut]<EOL>while day_start_times_ut[-<NUM_LIT:1>] < end_day_ut:<EOL><INDENT>day_start_times_ut.append(self.increment_day_start_ut(day_start_times_ut[-<NUM_LIT:1>]))<EOL><DEDENT>start_times_ds = []<EOL>end_times_ds = []<EOL>for dsut in day_start_times_ut:<EOL><INDENT>day_start_ut = max(<NUM_LIT:0>, start_ut - dsut)<EOL>start_times_ds.append(day_start_ut)<EOL>day_end_ut = end_ut - dsut<EOL>end_times_ds.append(day_end_ut)<EOL><DEDENT>return day_start_times_ut, start_times_ds, end_times_ds<EOL>", "docstring": "Get all possible day start times between start_ut and end_ut\nCurrently this function is used only by get_tripIs_within_range_by_dsut\n\nParameters\n----------\nstart_ut : list<int>\n    start time in unix time\nend_ut : list<int>\n    end time in unix time\nmax_time_overnight : list<int>\n    the maximum length of time that a trip can take place on\n    during the next day (i.e. after midnight run times like 25:35)\n\nReturns\n-------\nday_start_times_ut : list\n    list of ints (unix times in seconds) for returning all possible day\n    start times\nstart_times_ds : list\n    list of ints (unix times in seconds) stating the valid start time in\n    day seconds\nend_times_ds : list\n    list of ints (unix times in seconds) stating the valid end times in\n    day_seconds", "id": "f12922:c0:m40"}
{"signature": "def get_stop_count_data(self, start_ut, end_ut):", "body": "<EOL>trips_df = self.get_tripIs_active_in_range(start_ut, end_ut)<EOL>stop_counts = Counter()<EOL>for row in trips_df.itertuples():<EOL><INDENT>stops_seq = self.get_trip_stop_time_data(row.trip_I, row.day_start_ut)<EOL>for stop_time_row in stops_seq.itertuples(index=False):<EOL><INDENT>if (stop_time_row.dep_time_ut >= start_ut) and (stop_time_row.dep_time_ut <= end_ut):<EOL><INDENT>stop_counts[stop_time_row.stop_I] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>all_stop_data = self.stops()<EOL>counts = [stop_counts[stop_I] for stop_I in all_stop_data[\"<STR_LIT>\"].values]<EOL>all_stop_data.loc[:, \"<STR_LIT:count>\"] = pd.Series(counts, index=all_stop_data.index)<EOL>return all_stop_data<EOL>", "docstring": "Get stop count data.\n\nParameters\n----------\nstart_ut : int\n    start time in unixtime\nend_ut : int\n    end time in unixtime\n\nReturns\n-------\nstopData : pandas.DataFrame\n    each row in the stopData dataFrame is a dictionary with the following elements\n        stop_I, count, lat, lon, name\n    with data types\n        (int, int, float, float, str)", "id": "f12922:c0:m21"}
{"signature": "def get_route_name_and_type_of_tripI(self, trip_I):", "body": "cur = self.conn.cursor()<EOL>results = cur.execute(\"<STR_LIT>\"<EOL>.format(trip_I=trip_I))<EOL>name, rtype = results.fetchone()<EOL>return u\"<STR_LIT:%s>\" % str(name), int(rtype)<EOL>", "docstring": "Get route short name and type\n\nParameters\n----------\ntrip_I: int\n    short trip index created when creating the database\n\nReturns\n-------\nname: str\n    short name of the route, eg. 195N\ntype: int\n    route_type according to the GTFS standard", "id": "f12922:c0:m32"}
{"signature": "def get_events_by_tripI_and_dsut(self, trip_I, day_start_ut,<EOL>start_ut=None, end_ut=None):", "body": "<EOL>assert day_start_ut <= start_ut<EOL>assert day_start_ut <= end_ut<EOL>assert start_ut <= end_ut<EOL>events = []<EOL>if not self.tripI_takes_place_on_dsut(trip_I, day_start_ut):<EOL><INDENT>return events<EOL><DEDENT>query = \"\"\"<STR_LIT>\"\"\"<EOL>params = [day_start_ut, day_start_ut,<EOL>trip_I]<EOL>if start_ut:<EOL><INDENT>query += \"<STR_LIT>\"<EOL>params += [start_ut, day_start_ut]<EOL><DEDENT>if end_ut:<EOL><INDENT>query += \"<STR_LIT>\"<EOL>params += [end_ut, day_start_ut]<EOL><DEDENT>query += \"<STR_LIT>\"<EOL>cur = self.conn.cursor()<EOL>rows = cur.execute(query, params)<EOL>stop_data = list(rows)<EOL>for i in range(len(stop_data) - <NUM_LIT:1>):<EOL><INDENT>event = {<EOL>\"<STR_LIT>\": stop_data[i][<NUM_LIT:0>],<EOL>\"<STR_LIT>\": stop_data[i + <NUM_LIT:1>][<NUM_LIT:0>],<EOL>\"<STR_LIT>\": stop_data[i][<NUM_LIT:2>],<EOL>\"<STR_LIT>\": stop_data[i + <NUM_LIT:1>][<NUM_LIT:1>]<EOL>}<EOL>events.append(event)<EOL><DEDENT>return events<EOL>", "docstring": "Get trip data as a list of events (i.e. dicts).\n\nParameters\n----------\ntrip_I : int\n    shorthand index of the trip.\nday_start_ut : int\n    the start time of the day in unix time (seconds)\nstart_ut : int, optional\n    consider only events that start after this time\n    If not specified, this filtering is not applied.\nend_ut : int, optional\n    Consider only events that end before this time\n    If not specified, this filtering is not applied.\n\nReturns\n-------\nevents: list of dicts\n    each element contains the following data:\n        from_stop: int (stop_I)\n        to_stop: int (stop_I)\n        dep_time_ut: int (in unix time)\n        arr_time_ut: int (in unix time)", "id": "f12922:c0:m36"}
{"signature": "def stops(self):", "body": "return self.get_table(\"<STR_LIT>\")<EOL>", "docstring": "Get all stop data as a pandas DataFrame\n\nReturns\n-------\ndf: pandas.DataFrame", "id": "f12922:c0:m42"}
{"signature": "def get_table(self, table_name):", "body": "return pd.read_sql(\"<STR_LIT>\" + table_name, self.conn)<EOL>", "docstring": "Return a pandas.DataFrame object corresponding to the sql table\n\nParameters\n----------\ntable_name: str\n    name of the table in the database\n\nReturns\n-------\ndf : pandas.DataFrame", "id": "f12922:c0:m10"}
{"signature": "def get_route_difference_with_other_db(self, other_gtfs, start_time, end_time, uniqueness_threshold=None,<EOL>uniqueness_ratio=None):", "body": "from gtfspy.stats import frequencies_by_generated_route<EOL>this_df = frequencies_by_generated_route(self, start_time, end_time)<EOL>other_df = frequencies_by_generated_route(other_gtfs, start_time, end_time)<EOL>this_routes = {x: set(x.split('<STR_LIT:U+002C>')) for x in this_df[\"<STR_LIT>\"]}<EOL>other_routes = {x: set(x.split('<STR_LIT:U+002C>')) for x in other_df[\"<STR_LIT>\"]}<EOL>this_uniques = list(this_routes.keys())<EOL>other_uniques = list(other_routes.keys())<EOL>print(\"<STR_LIT>\", len(this_uniques))<EOL>print(\"<STR_LIT>\", len(other_uniques))<EOL>for i_key, i in this_routes.items():<EOL><INDENT>for j_key, j in other_routes.items():<EOL><INDENT>union = i | j<EOL>intersection = i & j<EOL>symmetric_difference = i ^ j<EOL>if uniqueness_ratio:<EOL><INDENT>if len(intersection) / len(union) >= uniqueness_ratio:<EOL><INDENT>try:<EOL><INDENT>this_uniques.remove(i_key)<EOL>this_df = this_df[this_df[\"<STR_LIT>\"] != i_key]<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>other_uniques.remove(j_key)<EOL>other_df = other_df[other_df[\"<STR_LIT>\"] != j_key]<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>print(\"<STR_LIT>\", len(this_df))<EOL>print(\"<STR_LIT>\", len(other_df))<EOL>return this_df, other_df<EOL>", "docstring": "Compares the routes based on stops in the schedule with the routes in another db and returns the ones without match.\nUniqueness thresholds or ratio can be used to allow small differences\n:param uniqueness_threshold:\n:param uniqueness_ratio:\n:return:", "id": "f12922:c0:m51"}
{"signature": "def get_shape_distance_between_stops(self, trip_I, from_stop_seq, to_stop_seq):", "body": "query_template = \"<STR_LIT>\"<EOL>stop_seqs = [from_stop_seq, to_stop_seq]<EOL>shape_breaks = []<EOL>for seq in stop_seqs:<EOL><INDENT>q = query_template.format(seq=seq, trip_I=trip_I)<EOL>shape_breaks.append(self.conn.execute(q).fetchone())<EOL><DEDENT>query_template = \"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>distance_query = query_template.format(trip_I=trip_I, from_stop_seq=from_stop_seq, to_stop_seq=to_stop_seq)<EOL>return self.conn.execute(distance_query).fetchone()[<NUM_LIT:0>]<EOL>", "docstring": "Get the distance along a shape between stops\n\nParameters\n----------\ntrip_I : int\n    trip_ID along which we travel\nfrom_stop_seq : int\n    the sequence number of the 'origin' stop\nto_stop_seq : int\n    the sequence number of the 'destination' stop\n\nReturns\n-------\ndistance : float, None\n    If the shape calculation succeeded, return a float, otherwise return None\n    (i.e. in the case where the shapes table is empty)", "id": "f12922:c0:m5"}
{"signature": "def generate_routable_transit_events(self, start_time_ut=None, end_time_ut=None, route_type=None):", "body": "from gtfspy.networks import temporal_network<EOL>df = temporal_network(self, start_time_ut=start_time_ut, end_time_ut=end_time_ut, route_type=route_type)<EOL>df.sort_values(\"<STR_LIT>\", ascending=False, inplace=True)<EOL>for row in df.itertuples():<EOL><INDENT>yield row<EOL><DEDENT>", "docstring": "Generates events that take place during a time interval [start_time_ut, end_time_ut].\nEach event needs to be only partially overlap the given time interval.\nDoes not include walking events.\nThis is just a quick and dirty implementation to get a way of quickly get a\nmethod for generating events compatible with the routing algorithm\n\nParameters\n----------\nstart_time_ut: int\nend_time_ut: int\nroute_type: ?\n\nYields\n------\nevent: namedtuple\n    containing:\n        dep_time_ut: int\n        arr_time_ut: int\n        from_stop_I: int\n        to_stop_I: int\n        trip_I : int\n        route_type : int\n        seq: int", "id": "f12922:c0:m49"}
{"signature": "def get_table_names(self):", "body": "return list(pd.read_sql(\"<STR_LIT>\", self.conn)[\"<STR_LIT:name>\"])<EOL>", "docstring": "Return a list of the underlying tables in the database.\n\nReturns\n-------\ntable_names: list[str]", "id": "f12922:c0:m12"}
{"signature": "@classmethod<EOL><INDENT>def from_directory_as_inmemory_db(cls, gtfs_directory):<DEDENT>", "body": "<EOL>from gtfspy.import_gtfs import import_gtfs<EOL>conn = sqlite3.connect(\"<STR_LIT>\")<EOL>import_gtfs(gtfs_directory,<EOL>conn,<EOL>preserve_connection=True,<EOL>print_progress=False)<EOL>return cls(conn)<EOL>", "docstring": "Instantiate a GTFS object by computing\n\nParameters\n----------\ngtfs_directory: str\n    path to the directory for importing the database", "id": "f12922:c0:m2"}
{"signature": "def update_stop_coordinates(self, stop_updates):", "body": "cur = self.conn.cursor()<EOL>stop_values = [(values.lat, values.lon, values.stop_id) for values in stop_updates.itertuples()]<EOL>cur.executemany(\"\"\"<STR_LIT>\"\"\", stop_values)<EOL>self.conn.commit()<EOL>", "docstring": ":param stop_updates: DataFrame\n:return:", "id": "f12922:c0:m71"}
{"signature": "def print_validation_warnings(self):", "body": "from .timetable_validator import TimetableValidator<EOL>validator = TimetableValidator(self)<EOL>return validator.validate_and_get_warnings()<EOL>", "docstring": "See Validator.validate for more information.\n\nReturns\n-------\nwarnings_container: validator.TimetableValidationWarningsContainer", "id": "f12922:c0:m59"}
{"signature": "def get_trip_stop_time_data(self, trip_I, day_start_ut):", "body": "to_select = \"<STR_LIT>\" + str(day_start_ut) + \"<STR_LIT>\"<EOL>str_to_run = \"<STR_LIT>\" + to_select + \"\"\"<STR_LIT>\"\"\"<EOL>str_to_run = str_to_run.format(trip_I=trip_I)<EOL>return pd.read_sql_query(str_to_run, self.conn)<EOL>", "docstring": "Obtain from the (standard) GTFS database, trip stop data\n(departure time in ut, lat, lon, seq, shape_break) as a pandas DataFrame\n\nSome filtering could be applied here, if only e.g. departure times\ncorresponding within some time interval should be considered.\n\nParameters\n----------\ntrip_I : int\n    integer index of the trip\nday_start_ut : int\n    the start time of the day in unix time (seconds)\n\nReturns\n-------\ndf: pandas.DataFrame\n    df has the following columns\n    'departure_time_ut, lat, lon, seq, shape_break'", "id": "f12922:c0:m35"}
{"signature": "def get_stops_for_route_type(self, route_type):", "body": "if route_type is WALK:<EOL><INDENT>return self.stops()<EOL><DEDENT>else:<EOL><INDENT>return pd.read_sql_query(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\", self.conn, params=(route_type,))<EOL><DEDENT>", "docstring": "Parameters\n----------\nroute_type: int\n\nReturns\n-------\nstops: pandas.DataFrame", "id": "f12922:c0:m47"}
{"signature": "def unixtime_seconds_to_gtfs_datetime(self, unixtime):", "body": "return datetime.datetime.fromtimestamp(unixtime, self._timezone)<EOL>", "docstring": "Convert unixtime to localized datetime\n\nParameters\n----------\nunixtime : int\n\nReturns\n-------\ngtfs_datetime: datetime.datetime\n    time localized to gtfs_datetime's timezone", "id": "f12922:c0:m17"}
{"signature": "def get_weekly_extract_start_date(self, ut=False, weekdays_at_least_of_max=<NUM_LIT>,<EOL>verbose=False, download_date_override=None):", "body": "daily_trip_counts = self.get_trip_counts_per_day()<EOL>if isinstance(download_date_override, str):<EOL><INDENT>search_start_date = datetime.datetime.strptime(download_date_override, \"<STR_LIT>\")<EOL><DEDENT>elif isinstance(download_date_override, datetime.datetime):<EOL><INDENT>search_start_date = download_date_override<EOL><DEDENT>else:<EOL><INDENT>assert download_date_override is None<EOL>download_date_str = self.meta['<STR_LIT>']<EOL>if download_date_str == \"<STR_LIT>\":<EOL><INDENT>warnings.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" + self.get_weekly_extract_start_date.__name__ +<EOL>\"<STR_LIT>\")<EOL>search_start_date = daily_trip_counts['<STR_LIT:date>'].min()<EOL><DEDENT>else:<EOL><INDENT>search_start_date = datetime.datetime.strptime(download_date_str, \"<STR_LIT>\")<EOL><DEDENT><DEDENT>feed_min_date = daily_trip_counts['<STR_LIT:date>'].min()<EOL>feed_max_date = daily_trip_counts['<STR_LIT:date>'].max()<EOL>assert (feed_max_date - feed_min_date >= datetime.timedelta(days=<NUM_LIT:7>)),\"<STR_LIT>\"<EOL>next_monday_from_search_start_date = search_start_date + timedelta(days=(<NUM_LIT:7> - search_start_date.weekday()))<EOL>if not (feed_min_date <= next_monday_from_search_start_date <= feed_max_date):<EOL><INDENT>warnings.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>next_monday_from_search_start_date = feed_min_date + timedelta(days=(<NUM_LIT:7> - feed_min_date.weekday()))<EOL><DEDENT>max_trip_count = daily_trip_counts['<STR_LIT>'].quantile(<NUM_LIT>)<EOL>threshold = weekdays_at_least_of_max * max_trip_count<EOL>threshold_fulfilling_days = daily_trip_counts['<STR_LIT>'] > threshold<EOL>search_start_monday_index = daily_trip_counts[daily_trip_counts['<STR_LIT:date>'] == next_monday_from_search_start_date].index[<NUM_LIT:0>]<EOL>while_loop_monday_index = search_start_monday_index<EOL>while len(daily_trip_counts.index) >= while_loop_monday_index + <NUM_LIT:7>:<EOL><INDENT>if all(threshold_fulfilling_days[while_loop_monday_index:while_loop_monday_index + <NUM_LIT:5>]):<EOL><INDENT>row = daily_trip_counts.iloc[while_loop_monday_index]<EOL>if ut:<EOL><INDENT>return self.get_day_start_ut(row.date_str)<EOL><DEDENT>else:<EOL><INDENT>return row['<STR_LIT:date>']<EOL><DEDENT><DEDENT>while_loop_monday_index += <NUM_LIT:7><EOL><DEDENT>while_loop_monday_index = search_start_monday_index - <NUM_LIT:7><EOL>while while_loop_monday_index >= <NUM_LIT:0>:<EOL><INDENT>if all(threshold_fulfilling_days[while_loop_monday_index:while_loop_monday_index + <NUM_LIT:5>]):<EOL><INDENT>row = daily_trip_counts.iloc[while_loop_monday_index]<EOL>if ut:<EOL><INDENT>return self.get_day_start_ut(row.date_str)<EOL><DEDENT>else:<EOL><INDENT>return row['<STR_LIT:date>']<EOL><DEDENT><DEDENT>while_loop_monday_index -= <NUM_LIT:7><EOL><DEDENT>raise RuntimeError(\"<STR_LIT>\")<EOL>", "docstring": "Find a suitable weekly extract start date (monday).\nThe goal is to obtain as 'usual' week as possible.\nThe weekdays of the weekly extract week should contain\nat least 0.9 of the total maximum of trips.\n\nParameters\n----------\nut: return unixtime?\nweekdays_at_least_of_max: float\n\ndownload_date_override: str, semi-optional\n    Download-date in format %Y-%m-%d, weeks close to this.\n    Overrides the (possibly) recorded downloaded date in the database\n\nReturns\n-------\ndate: int or str\n\nRaises\n------\nerror: RuntimeError\n    If no download date could be found.", "id": "f12922:c0:m27"}
{"signature": "def get_main_database_path(self):", "body": "cur = self.conn.cursor()<EOL>cur.execute(\"<STR_LIT>\")<EOL>rows = cur.fetchall()<EOL>for row in rows:<EOL><INDENT>if row[<NUM_LIT:1>] == str(\"<STR_LIT>\"):<EOL><INDENT>return row[<NUM_LIT:2>]<EOL><DEDENT><DEDENT>", "docstring": "Should return the path to the database\n\nReturns\n-------\npath : unicode\n    path to the database, empty string for in-memory databases", "id": "f12922:c0:m3"}
{"signature": "def set_current_process_time_zone(self):", "body": "TZ = self.conn.execute('<STR_LIT>').fetchall()[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>return set_process_timezone(TZ)<EOL>", "docstring": "This function queries a GTFS connection, finds the timezone of this\ndatabase, and sets it in the TZ environment variable.  This is a\nprocess-global configuration, by the nature of the C library!\n\nReturns\n-------\nNone\n\nAlters os.environ['TZ']", "id": "f12922:c0:m13"}
{"signature": "def increment_day_start_ut(self, day_start_ut, n_days=<NUM_LIT:1>):", "body": "old_tz = self.set_current_process_time_zone()<EOL>day0 = time.localtime(day_start_ut + <NUM_LIT>)  <EOL>dayN = time.mktime(day0[:<NUM_LIT:2>] +  <EOL>(day0[<NUM_LIT:2>] + n_days,) +  <EOL>(<NUM_LIT:12>, <NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, -<NUM_LIT:1>)) - <NUM_LIT>  <EOL>set_process_timezone(old_tz)<EOL>return dayN<EOL>", "docstring": "Increment the GTFS-definition of \"day start\".\n\n        Parameters\n        ----------\n        day_start_ut : int\n            unixtime of the previous start of day.  If this time is between\n            12:00 or greater, there *will* be bugs.  To solve this, run the\n            input through day_start_ut first.\n        n_days: int\n            number of days to increment", "id": "f12922:c0:m39"}
{"signature": "def get_approximate_schedule_time_span_in_ut(self):", "body": "first_day_start_ut, last_day_start_ut = self.get_day_start_ut_span()<EOL>return first_day_start_ut, last_day_start_ut + <NUM_LIT> * <NUM_LIT><EOL>", "docstring": "Return conservative estimates of start_time_ut and end_time_uts.\nAll trips, events etc. should start after start_time_ut_conservative and end before end_time_ut_conservative\n\nReturns\n-------\nstart_time_ut_conservative : int\nend_time_ut_conservative : int", "id": "f12922:c0:m55"}
{"signature": "def stop(self, stop_I):", "body": "return pd.read_sql_query(\"<STR_LIT>\".format(stop_I=stop_I), self.conn)<EOL>", "docstring": "Get all stop data as a pandas DataFrame for all stops, or an individual stop'\n\nParameters\n----------\nstop_I : int\n    stop index\n\nReturns\n-------\nstop: pandas.DataFrame", "id": "f12922:c0:m43"}
{"signature": "def unlocalized_datetime_to_ut_seconds(self, unlocalized_datetime):", "body": "loc_dt = self._timezone.localize(unlocalized_datetime)<EOL>unixtime_seconds = calendar.timegm(loc_dt.utctimetuple())<EOL>return unixtime_seconds<EOL>", "docstring": "Convert datetime (in GTFS timezone) to unixtime\n\nParameters\n----------\nunlocalized_datetime : datetime.datetime\n    (tz coerced to GTFS timezone, should NOT be UTC.)\n\nReturns\n-------\noutput : int (unixtime)", "id": "f12922:c0:m18"}
{"signature": "def get_directly_accessible_stops_within_distance(self, stop, distance):", "body": "query = \"\"\"<STR_LIT>\"\"\" % (stop, distance)<EOL>return pd.read_sql_query(query, self.conn)<EOL>", "docstring": "Returns stops that are accessible without transfer from the stops that are within a specific walking distance\n:param stop: int\n:param distance: int\n:return:", "id": "f12922:c0:m8"}
{"signature": "def stop_to_stop_networks_by_type(gtfs):", "body": "route_type_to_network = dict()<EOL>for route_type in route_types.ALL_ROUTE_TYPES:<EOL><INDENT>if route_type == route_types.WALK:<EOL><INDENT>net = walk_transfer_stop_to_stop_network(gtfs)<EOL><DEDENT>else:<EOL><INDENT>net = stop_to_stop_network_for_route_type(gtfs, route_type)<EOL><DEDENT>route_type_to_network[route_type] = net<EOL><DEDENT>assert len(route_type_to_network) == len(route_types.ALL_ROUTE_TYPES)<EOL>return route_type_to_network<EOL>", "docstring": "Compute stop-to-stop networks for all travel modes (route_types).\n\nParameters\n----------\ngtfs: gtfspy.GTFS\n\nReturns\n-------\ndict: dict[int, networkx.DiGraph]\n    keys should be one of route_types.ALL_ROUTE_TYPES (i.e. GTFS route_types)", "id": "f12923:m2"}
{"signature": "def _add_stops_to_net(net, stops):", "body": "for stop in stops.itertuples():<EOL><INDENT>data = {<EOL>\"<STR_LIT>\": stop.lat,<EOL>\"<STR_LIT>\": stop.lon,<EOL>\"<STR_LIT:name>\": stop.name<EOL>}<EOL>net.add_node(stop.stop_I, data)<EOL><DEDENT>", "docstring": "Add nodes to the network from the pandas dataframe describing (a part of the) stops table in the GTFS database.\n\nParameters\n----------\nnet: networkx.Graph\nstops: pandas.DataFrame", "id": "f12923:m4"}
{"signature": "def get_warning_counter(self):", "body": "return self._warnings_counter<EOL>", "docstring": "Returns\n-------\ncounter: collections.Counter", "id": "f12924:c0:m4"}
{"signature": "def _write_stop_to_stop_network_edges(net, file_name, data=True, fmt=None):", "body": "if fmt is None:<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL><DEDENT>if fmt == \"<STR_LIT>\":<EOL><INDENT>if data:<EOL><INDENT>networkx.write_edgelist(net, file_name, data=True)<EOL><DEDENT>else:<EOL><INDENT>networkx.write_edgelist(net, file_name)<EOL><DEDENT><DEDENT>elif fmt == \"<STR_LIT>\":<EOL><INDENT>with open(file_name, '<STR_LIT:w>') as f:<EOL><INDENT>edge_iter = net.edges_iter(data=True)<EOL>_, _, edg_data = next(edge_iter)<EOL>edg_data_keys = list(sorted(edg_data.keys()))<EOL>header = \"<STR_LIT:;>\".join([\"<STR_LIT>\", \"<STR_LIT>\"] + edg_data_keys)<EOL>f.write(header)<EOL>for from_node_I, to_node_I, data in net.edges_iter(data=True):<EOL><INDENT>f.write(\"<STR_LIT:\\n>\")<EOL>values = [str(from_node_I), str(to_node_I)]<EOL>data_values = []<EOL>for key in edg_data_keys:<EOL><INDENT>if key == \"<STR_LIT>\":<EOL><INDENT>route_I_counts_string = str(data[key]).replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\")[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>data_values.append(route_I_counts_string)<EOL><DEDENT>else:<EOL><INDENT>data_values.append(str(data[key]))<EOL><DEDENT><DEDENT>all_values = values + data_values<EOL>f.write(\"<STR_LIT:;>\".join(all_values))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Write out a network\n\nParameters\n----------\nnet: networkx.DiGraph\nbase_name: str\n    path to the filename (without extension)\ndata: bool, optional\n    whether or not to write out any edge data present\nfmt: str, optional\n    If \"csv\" write out the network in csv format.", "id": "f12925:m8"}
{"signature": "def write_walk_transfer_edges(gtfs, output_file_name):", "body": "transfers = gtfs.get_table(\"<STR_LIT>\")<EOL>transfers.drop([u\"<STR_LIT>\", u\"<STR_LIT>\"], <NUM_LIT:1>, inplace=True)<EOL>with util.create_file(output_file_name, tmpdir=True, keepext=True) as tmpfile:<EOL><INDENT>transfers.to_csv(tmpfile, encoding='<STR_LIT:utf-8>', index=False)<EOL><DEDENT>", "docstring": "Parameters\n----------\ngtfs: gtfspy.GTFS\noutput_file_name: str", "id": "f12925:m0"}
{"signature": "def write_nodes(gtfs, output, fields=None):", "body": "nodes = gtfs.get_table(\"<STR_LIT>\")<EOL>if fields is not None:<EOL><INDENT>nodes = nodes[fields]<EOL><DEDENT>with util.create_file(output, tmpdir=True, keepext=True) as tmpfile:<EOL><INDENT>nodes.to_csv(tmpfile, encoding='<STR_LIT:utf-8>', index=False, sep=\"<STR_LIT:;>\")<EOL><DEDENT>", "docstring": "Parameters\n----------\ngtfs: gtfspy.GTFS\noutput: str\n    Path to the output file\nfields: list, optional\n    which pieces of information to provide", "id": "f12925:m1"}
{"signature": "def write_static_networks(gtfs, output_dir, fmt=None):", "body": "if fmt is None:<EOL><INDENT>fmt = \"<STR_LIT>\"<EOL><DEDENT>single_layer_networks = stop_to_stop_networks_by_type(gtfs)<EOL>util.makedirs(output_dir)<EOL>for route_type, net in single_layer_networks.items():<EOL><INDENT>tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]<EOL>file_name = os.path.join(output_dir, \"<STR_LIT>\" + tag + \"<STR_LIT:.>\" + fmt)<EOL>if len(net.edges()) > <NUM_LIT:0>:<EOL><INDENT>_write_stop_to_stop_network_edges(net, file_name, fmt=fmt)<EOL><DEDENT><DEDENT>", "docstring": "Parameters\n----------\ngtfs: gtfspy.GTFS\noutput_dir: (str, unicode)\n    a path where to write\nfmt: None, optional\n    defaulting to \"edg\" and writing results as \".edg\" files\n     If \"csv\" csv files are produced instead", "id": "f12925:m5"}
{"signature": "def write_temporal_network(gtfs, output_filename, start_time_ut=None, end_time_ut=None):", "body": "util.makedirs(os.path.dirname(os.path.abspath(output_filename)))<EOL>pandas_data_frame = temporal_network(gtfs, start_time_ut=start_time_ut, end_time_ut=end_time_ut)<EOL>pandas_data_frame.to_csv(output_filename, encoding='<STR_LIT:utf-8>', index=False)<EOL>", "docstring": "Parameters\n----------\ngtfs : gtfspy.GTFS\noutput_filename : str\n    path to the directory where to store the extracts\nstart_time_ut: int | None\n    start time of the extract in unixtime (seconds after epoch)\nend_time_ut: int | None\n    end time of the extract in unixtime (seconds after epoch)", "id": "f12925:m7"}
{"signature": "def write_temporal_networks_by_route_type(gtfs, extract_output_dir):", "body": "util.makedirs(extract_output_dir)<EOL>for route_type in route_types.TRANSIT_ROUTE_TYPES:<EOL><INDENT>pandas_data_frame = temporal_network(gtfs, start_time_ut=None, end_time_ut=None, route_type=route_type)<EOL>tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]<EOL>out_file_name = os.path.join(extract_output_dir, tag + \"<STR_LIT>\")<EOL>pandas_data_frame.to_csv(out_file_name, encoding='<STR_LIT:utf-8>', index=False)<EOL><DEDENT>", "docstring": "Write temporal networks by route type to disk.\n\nParameters\n----------\ngtfs: gtfspy.GTFS\nextract_output_dir: str", "id": "f12925:m6"}
{"signature": "def _frequency_generated_trips_rows(self, gtfs_soure_path, return_df_freq=False):", "body": "df_freq = source_csv_to_pandas(gtfs_soure_path, '<STR_LIT>')<EOL>df_trips = source_csv_to_pandas(gtfs_soure_path, \"<STR_LIT>\")<EOL>df_freq['<STR_LIT>'] = df_freq.apply(lambda row: len(range(str_time_to_day_seconds(row['<STR_LIT>']),<EOL>str_time_to_day_seconds(row['<STR_LIT>']),<EOL>row['<STR_LIT>'])), axis=<NUM_LIT:1>)<EOL>df_trips_freq = pd.merge(df_freq, df_trips, how='<STR_LIT>', on='<STR_LIT>')<EOL>n_freq_generated_trips = int(df_trips_freq['<STR_LIT>'].fillna(<NUM_LIT:1>).sum(axis=<NUM_LIT:0>))<EOL>if return_df_freq:<EOL><INDENT>return df_trips_freq<EOL><DEDENT>else:<EOL><INDENT>return n_freq_generated_trips<EOL><DEDENT>", "docstring": "This function calculates the equivalent rowcounts for trips when\ntaking into account the generated rows in the gtfs object\nParameters\n----------\ngtfs_soure_path: path to the source file\nparam txt: txt file in question\n:return: sum of all trips", "id": "f12927:c0:m5"}
{"signature": "def _validate_danglers(self):", "body": "for query, warning in zip(DANGLER_QUERIES, DANGLER_WARNINGS):<EOL><INDENT>dangler_count = self.gtfs.execute_custom_query(query).fetchone()[<NUM_LIT:0>]<EOL>if dangler_count > <NUM_LIT:0>:<EOL><INDENT>if self.verbose:<EOL><INDENT>print(str(dangler_count) + \"<STR_LIT:U+0020>\" + warning)<EOL><DEDENT>self.warnings_container.add_warning(warning, self.location, count=dangler_count)<EOL><DEDENT><DEDENT>", "docstring": "Checks for rows that are not referenced in the the tables that should be linked\n\nstops <> stop_times using stop_I\nstop_times <> trips <> days, using trip_I\ntrips <> routes, using route_I\n:return:", "id": "f12927:c0:m4"}
{"signature": "def _validate_table_row_counts(self):", "body": "for db_table_name in DB_TABLE_NAME_TO_SOURCE_FILE.keys():<EOL><INDENT>table_name_source_file = DB_TABLE_NAME_TO_SOURCE_FILE[db_table_name]<EOL>row_warning_str = DB_TABLE_NAME_TO_ROWS_MISSING_WARNING[db_table_name]<EOL>database_row_count = self.gtfs.get_row_count(db_table_name)<EOL>source_row_count = <NUM_LIT:0><EOL>for gtfs_source in self.gtfs_sources:<EOL><INDENT>frequencies_in_source = source_csv_to_pandas(gtfs_source, '<STR_LIT>')<EOL>try:<EOL><INDENT>if table_name_source_file == '<STR_LIT>' and not frequencies_in_source.empty:<EOL><INDENT>source_row_count += self._frequency_generated_trips_rows(gtfs_source)<EOL><DEDENT>elif table_name_source_file == '<STR_LIT>' and not frequencies_in_source.empty:<EOL><INDENT>source_row_count += self._compute_number_of_frequency_generated_stop_times(gtfs_source)<EOL><DEDENT>else:<EOL><INDENT>df = source_csv_to_pandas(gtfs_source, table_name_source_file)<EOL>source_row_count += len(df.index)<EOL><DEDENT><DEDENT>except IOError as e:<EOL><INDENT>if hasattr(e, \"<STR_LIT:filename>\") and db_table_name in e.filename:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT><DEDENT>if source_row_count == database_row_count and self.verbose:<EOL><INDENT>print(\"<STR_LIT>\" + table_name_source_file + \"<STR_LIT>\"<EOL>+ str(database_row_count) + \"<STR_LIT:)>\")<EOL><DEDENT>else:<EOL><INDENT>difference = database_row_count - source_row_count<EOL>('<STR_LIT>' + str(table_name_source_file) + '<STR_LIT>' + str(source_row_count) +<EOL>'<STR_LIT>' + str(database_row_count) + \"<STR_LIT:)>\")<EOL>if table_name_source_file == \"<STR_LIT>\" and difference > <NUM_LIT:0>:<EOL><INDENT>query = \"<STR_LIT>\"+ str(int(difference)) +\"<STR_LIT>\"<EOL>number_of_entries_added_by_calendar_dates_loader = self.gtfs.execute_custom_query(query).fetchone()[<EOL><NUM_LIT:0>]<EOL>if number_of_entries_added_by_calendar_dates_loader == difference and self.verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>if self.verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>self.warnings_container.add_warning(row_warning_str, self.location, difference)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.warnings_container.add_warning(row_warning_str, self.location, difference)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Imports source .txt files, checks row counts and then compares the rowcounts with the gtfsobject\n:return:", "id": "f12927:c0:m2"}
{"signature": "def validate_day_start_ut(conn):", "body": "G = GTFS(conn)<EOL>cur = conn.execute('<STR_LIT>')<EOL>for date, day_start_ut in cur:<EOL><INDENT>assert day_start_ut == G.get_day_start_ut(date)<EOL><DEDENT>", "docstring": "This validates the day_start_ut of the days table.", "id": "f12928:m1"}
{"signature": "def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height):", "body": "b = bounds<EOL>height_meters = util.wgs84_distance(b['<STR_LIT>'], b['<STR_LIT>'], b['<STR_LIT>'], b['<STR_LIT>'])<EOL>width_meters = util.wgs84_distance(b['<STR_LIT>'], b['<STR_LIT>'], b['<STR_LIT>'], b['<STR_LIT>'])<EOL>x_per_y_meters = width_meters / height_meters<EOL>x_per_y_axes = ax_width / ax_height<EOL>if x_per_y_axes > x_per_y_meters:  <EOL><INDENT>width_meters_new = (height_meters * x_per_y_axes)<EOL>d_lon_new = ((b['<STR_LIT>'] - b['<STR_LIT>']) / width_meters) * width_meters_new<EOL>mean_lon = (b['<STR_LIT>'] + b['<STR_LIT>'])/<NUM_LIT><EOL>lon_min = mean_lon - d_lon_new / <NUM_LIT><EOL>lon_max = mean_lon + d_lon_new / <NUM_LIT><EOL>spatial_bounds = {<EOL>\"<STR_LIT>\": lon_min,<EOL>\"<STR_LIT>\": lon_max,<EOL>\"<STR_LIT>\": b['<STR_LIT>'],<EOL>\"<STR_LIT>\": b['<STR_LIT>']<EOL>}<EOL><DEDENT>else:<EOL><INDENT>height_meters_new = (width_meters / x_per_y_axes)<EOL>d_lat_new = ((b['<STR_LIT>'] - b['<STR_LIT>']) / height_meters) * height_meters_new<EOL>mean_lat = (b['<STR_LIT>'] + b['<STR_LIT>']) / <NUM_LIT><EOL>lat_min = mean_lat - d_lat_new / <NUM_LIT><EOL>lat_max = mean_lat + d_lat_new / <NUM_LIT><EOL>spatial_bounds = {<EOL>\"<STR_LIT>\": b['<STR_LIT>'],<EOL>\"<STR_LIT>\": b['<STR_LIT>'],<EOL>\"<STR_LIT>\": lat_min,<EOL>\"<STR_LIT>\": lat_max<EOL>}<EOL><DEDENT>return spatial_bounds<EOL>", "docstring": "Parameters\n----------\nbounds: dict\nax_width: float\nax_height: float\n\nReturns\n-------\nspatial_bounds", "id": "f12929:m5"}
{"signature": "def plot_route_network_from_gtfs(g, ax=None, spatial_bounds=None, map_alpha=<NUM_LIT>, scalebar=True, legend=True,<EOL>return_smopy_map=False, map_style=None):", "body": "assert(isinstance(g, GTFS))<EOL>route_shapes = g.get_all_route_shapes()<EOL>if spatial_bounds is None:<EOL><INDENT>spatial_bounds = get_spatial_bounds(g, as_dict=True)<EOL><DEDENT>if ax is not None:<EOL><INDENT>bbox = ax.get_window_extent().transformed(ax.figure.dpi_scale_trans.inverted())<EOL>width, height = bbox.width, bbox.height<EOL>spatial_bounds = _expand_spatial_bounds_to_fit_axes(spatial_bounds, width, height)<EOL><DEDENT>return plot_as_routes(route_shapes,<EOL>ax=ax,<EOL>spatial_bounds=spatial_bounds,<EOL>map_alpha=map_alpha,<EOL>plot_scalebar=scalebar,<EOL>legend=legend,<EOL>return_smopy_map=return_smopy_map,<EOL>map_style=map_style)<EOL>", "docstring": "Parameters\n----------\ng: A gtfspy.gtfs.GTFS object\n    Where to get the data from?\nax: matplotlib.Axes object, optional\n    If None, a new figure and an axis is created\nspatial_bounds: dict, optional\n    with str keys: lon_min, lon_max, lat_min, lat_max\nreturn_smopy_map: bool, optional\n    defaulting to false\n\nReturns\n-------\nax: matplotlib.axes.Axes", "id": "f12929:m1"}
{"signature": "def plot_all_stops(g, ax=None, scalebar=False):", "body": "assert(isinstance(g, GTFS))<EOL>lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)<EOL>smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)<EOL>if ax is None:<EOL><INDENT>fig = plt.figure()<EOL>ax = fig.add_subplot(<NUM_LIT>)<EOL><DEDENT>ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=<NUM_LIT>)<EOL>stops = g.stops()<EOL>lats = numpy.array(stops['<STR_LIT>'])<EOL>lons = numpy.array(stops['<STR_LIT>'])<EOL>xs, ys = smopy_map.to_pixels(lats, lons)<EOL>ax.scatter(xs, ys, color=\"<STR_LIT>\", s=<NUM_LIT:10>)<EOL>ax.set_xlim(min(xs), max(xs))<EOL>ax.set_ylim(max(ys), min(ys))<EOL>return ax<EOL>", "docstring": "Parameters\n----------\ng: A gtfspy.gtfs.GTFS object\nax: matplotlib.Axes object, optional\n    If None, a new figure and an axis is created, otherwise results are plotted on the axis.\nscalebar: bool, optional\n    Whether to include a scalebar to the plot.\n\nReturns\n-------\nax: matplotlib.Axes", "id": "f12929:m9"}
{"signature": "def plot_as_routes(route_shapes, ax=None, spatial_bounds=None, map_alpha=<NUM_LIT>, plot_scalebar=True, legend=True,<EOL>return_smopy_map=False, line_width_attribute=None, line_width_scale=<NUM_LIT:1.0>, map_style=None):", "body": "lon_min = spatial_bounds['<STR_LIT>']<EOL>lon_max = spatial_bounds['<STR_LIT>']<EOL>lat_min = spatial_bounds['<STR_LIT>']<EOL>lat_max = spatial_bounds['<STR_LIT>']<EOL>if ax is None:<EOL><INDENT>fig = plt.figure()<EOL>ax = fig.add_subplot(<NUM_LIT>)<EOL><DEDENT>smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max, map_style=map_style)<EOL>ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)<EOL>bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),<EOL>numpy.array([lon_min, lon_max]))<EOL>route_types_to_lines = {}<EOL>for shape in route_shapes:<EOL><INDENT>route_type = ROUTE_TYPE_CONVERSION[shape['<STR_LIT:type>']]<EOL>lats = numpy.array(shape['<STR_LIT>'])<EOL>lons = numpy.array(shape['<STR_LIT>'])<EOL>if line_width_attribute:<EOL><INDENT>line_width = line_width_scale * shape[line_width_attribute]<EOL><DEDENT>else:<EOL><INDENT>line_width = <NUM_LIT:1><EOL><DEDENT>xs, ys = smopy_map.to_pixels(lats, lons)<EOL>line, = ax.plot(xs, ys, linewidth=line_width, color=ROUTE_TYPE_TO_COLOR[route_type], zorder=ROUTE_TYPE_TO_ZORDER[route_type])<EOL>route_types_to_lines[route_type] = line<EOL><DEDENT>if legend:<EOL><INDENT>lines = list(route_types_to_lines.values())<EOL>labels = [ROUTE_TYPE_TO_SHORT_DESCRIPTION[route_type] for route_type in route_types_to_lines.keys()]<EOL>ax.legend(lines, labels, loc=\"<STR_LIT>\")<EOL><DEDENT>if plot_scalebar:<EOL><INDENT>_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())<EOL><DEDENT>ax.set_xticks([])<EOL>ax.set_yticks([])<EOL>ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())<EOL>ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())<EOL>if return_smopy_map:<EOL><INDENT>return ax, smopy_map<EOL><DEDENT>else:<EOL><INDENT>return ax<EOL><DEDENT>", "docstring": "Parameters\n----------\nroute_shapes: list of dicts that should have the following keys\n        name, type, agency, lats, lons\n        with types\n        list, list, str, list, list\nax: axis object\nspatial_bounds: dict\nmap_alpha:\nplot_scalebar: bool\nlegend:\nreturn_smopy_map:\nline_width_attribute:\nline_width_scale:\n\nReturns\n-------\nax: matplotlib.axes object", "id": "f12929:m2"}
{"signature": "def validate_and_get_warnings(self):", "body": "self.warnings_container.clear()<EOL>self._validate_stops_with_same_stop_time()<EOL>self._validate_speeds_and_trip_times()<EOL>self._validate_stop_spacings()<EOL>self._validate_stop_sequence()<EOL>self._validate_misplaced_stops()<EOL>return self.warnings_container<EOL>", "docstring": "Validates/checks a given GTFS feed with respect to a number of different issues.\n\nThe set of warnings that are checked for, can be found in the gtfs_validator.ALL_WARNINGS\n\nReturns\n-------\nwarnings: WarningsContainer", "id": "f12930:c0:m1"}
{"signature": "def get_next_photo(self):", "body": "try:<EOL><INDENT>next_photo = Photo.objects.filter(<EOL>gallery=self.gallery,<EOL>created__gt=self.created,<EOL>)[<NUM_LIT:0>]<EOL><DEDENT>except IndexError:<EOL><INDENT>next_photo = Photo.objects.filter(gallery=self.gallery)[<NUM_LIT:0>]<EOL><DEDENT>return next_photo<EOL>", "docstring": "Returns next photo from the same gallery (in chronological order).\n\nWraps around from last photo in the gallery to the first one.", "id": "f12958:c3:m3"}
{"signature": "def __get_rev(self, key, version, **kwa):", "body": "if '<STR_LIT>' in kwa:<EOL><INDENT>doc = kwa['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>if type(version) is int:<EOL><INDENT>if version == <NUM_LIT:0>:<EOL><INDENT>order = pymongo.ASCENDING<EOL><DEDENT>elif version == -<NUM_LIT:1>:<EOL><INDENT>order = pymongo.DESCENDING<EOL><DEDENT>doc = self._collection.find_one({'<STR_LIT:k>': key}, sort=[['<STR_LIT:d>', order]])<EOL><DEDENT>elif type(version) is datetime:<EOL><INDENT>ver = self.__round_time(version)<EOL>doc = self._collection.find_one({'<STR_LIT:k>': key, '<STR_LIT:d>': ver})<EOL><DEDENT><DEDENT>if doc is None:<EOL><INDENT>raise KeyError('<STR_LIT>'<EOL>.format(key, str(version)))<EOL><DEDENT>coded_val = doc['<STR_LIT:v>']<EOL>return pickle.loads(coded_val)<EOL>", "docstring": "Obtain particular version of the doc at key.", "id": "f12967:c0:m1"}
{"signature": "@DictProperty('<STR_LIT>', '<STR_LIT>', read_only=True)<EOL><INDENT>def POST(self):<DEDENT>", "body": "post = FormsDict()<EOL>if not self.content_type.startswith('<STR_LIT>'):<EOL><INDENT>pairs = _parse_qsl(tonat(self._get_body_string(), '<STR_LIT>'))<EOL>for key, value in pairs:<EOL><INDENT>post[key] = value<EOL><DEDENT>return post<EOL><DEDENT>safe_env = {'<STR_LIT>': '<STR_LIT>'}  <EOL>for key in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if key in self.environ: safe_env[key] = self.environ[key]<EOL><DEDENT>args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)<EOL>if py31:<EOL><INDENT>args['<STR_LIT>'] = NCTextIOWrapper(args['<STR_LIT>'],<EOL>encoding='<STR_LIT:utf8>',<EOL>newline='<STR_LIT:\\n>')<EOL><DEDENT>elif py3k:<EOL><INDENT>args['<STR_LIT>'] = '<STR_LIT:utf8>'<EOL><DEDENT>data = cgi.FieldStorage(**args)<EOL>self['<STR_LIT>'] = data  <EOL>data = data.list or []<EOL>for item in data:<EOL><INDENT>if item.filename:<EOL><INDENT>post[item.name] = FileUpload(item.file, item.name,<EOL>item.filename, item.headers)<EOL><DEDENT>else:<EOL><INDENT>post[item.name] = item.value<EOL><DEDENT><DEDENT>return post<EOL>", "docstring": "The values of :attr:`forms` and :attr:`files` combined into a single\n            :class:`FormsDict`. Values are either strings (form values) or\n            instances of :class:`cgi.FieldStorage` (file uploads).", "id": "f12971:c12:m21"}
{"signature": "def static_file(filename, root,<EOL>mimetype='<STR_LIT>',<EOL>download=False,<EOL>charset='<STR_LIT>'):", "body": "root = os.path.abspath(root) + os.sep<EOL>filename = os.path.abspath(os.path.join(root, filename.strip('<STR_LIT>')))<EOL>headers = dict()<EOL>if not filename.startswith(root):<EOL><INDENT>return HTTPError(<NUM_LIT>, \"<STR_LIT>\")<EOL><DEDENT>if not os.path.exists(filename) or not os.path.isfile(filename):<EOL><INDENT>return HTTPError(<NUM_LIT>, \"<STR_LIT>\")<EOL><DEDENT>if not os.access(filename, os.R_OK):<EOL><INDENT>return HTTPError(<NUM_LIT>, \"<STR_LIT>\")<EOL><DEDENT>if mimetype == '<STR_LIT>':<EOL><INDENT>if download and download != True:<EOL><INDENT>mimetype, encoding = mimetypes.guess_type(download)<EOL><DEDENT>else:<EOL><INDENT>mimetype, encoding = mimetypes.guess_type(filename)<EOL><DEDENT>if encoding: headers['<STR_LIT>'] = encoding<EOL><DEDENT>if mimetype:<EOL><INDENT>if mimetype[:<NUM_LIT:5>] == '<STR_LIT>' and charset and '<STR_LIT>' not in mimetype:<EOL><INDENT>mimetype += '<STR_LIT>' % charset<EOL><DEDENT>headers['<STR_LIT:Content-Type>'] = mimetype<EOL><DEDENT>if download:<EOL><INDENT>download = os.path.basename(filename if download == True else download)<EOL>headers['<STR_LIT>'] = '<STR_LIT>' % download<EOL><DEDENT>stats = os.stat(filename)<EOL>headers['<STR_LIT>'] = clen = stats.st_size<EOL>lm = time.strftime(\"<STR_LIT>\", time.gmtime(stats.st_mtime))<EOL>headers['<STR_LIT>'] = lm<EOL>ims = request.environ.get('<STR_LIT>')<EOL>if ims:<EOL><INDENT>ims = parse_date(ims.split(\"<STR_LIT:;>\")[<NUM_LIT:0>].strip())<EOL><DEDENT>if ims is not None and ims >= int(stats.st_mtime):<EOL><INDENT>headers['<STR_LIT>'] = time.strftime(\"<STR_LIT>\",<EOL>time.gmtime())<EOL>return HTTPResponse(status=<NUM_LIT>, **headers)<EOL><DEDENT>body = '<STR_LIT>' if request.method == '<STR_LIT>' else open(filename, '<STR_LIT:rb>')<EOL>headers[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>ranges = request.environ.get('<STR_LIT>')<EOL>if '<STR_LIT>' in request.environ:<EOL><INDENT>ranges = list(parse_range_header(request.environ['<STR_LIT>'], clen))<EOL>if not ranges:<EOL><INDENT>return HTTPError(<NUM_LIT>, \"<STR_LIT>\")<EOL><DEDENT>offset, end = ranges[<NUM_LIT:0>]<EOL>headers[\"<STR_LIT>\"] = \"<STR_LIT>\" % (offset, end - <NUM_LIT:1>, clen)<EOL>headers[\"<STR_LIT>\"] = str(end - offset)<EOL>if body: body = _file_iter_range(body, offset, end - offset)<EOL>return HTTPResponse(body, status=<NUM_LIT>, **headers)<EOL><DEDENT>return HTTPResponse(body, **headers)<EOL>", "docstring": "Open a file in a safe way and return :exc:`HTTPResponse` with status\n        code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,\n        ``Content-Length`` and ``Last-Modified`` headers are set if possible.\n        Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``\n        requests.\n\n        :param filename: Name or path of the file to send.\n        :param root: Root path for file lookups. Should be an absolute directory\n            path.\n        :param mimetype: Defines the content-type header (default: guess from\n            file extension)\n        :param download: If True, ask the browser to open a `Save as...` dialog\n            instead of opening the file with the associated program. You can\n            specify a custom filename as a string. If not specified, the\n            original filename is used (default: False).\n        :param charset: The charset to use for files with a ``text/*``\n            mime-type. (default: UTF-8)", "id": "f12971:m12"}
{"signature": "@cached_property<EOL><INDENT>def filename(self):<DEDENT>", "body": "fname = self.raw_filename<EOL>if not isinstance(fname, unicode):<EOL><INDENT>fname = fname.decode('<STR_LIT:utf8>', '<STR_LIT:ignore>')<EOL><DEDENT>fname = normalize('<STR_LIT>', fname)<EOL>fname = fname.encode('<STR_LIT>', '<STR_LIT:ignore>').decode('<STR_LIT>')<EOL>fname = os.path.basename(fname.replace('<STR_LIT:\\\\>', os.path.sep))<EOL>fname = re.sub(r'<STR_LIT>', '<STR_LIT>', fname).strip()<EOL>fname = re.sub(r'<STR_LIT>', '<STR_LIT:->', fname).strip('<STR_LIT>')<EOL>return fname[:<NUM_LIT:255>] or '<STR_LIT>'<EOL>", "docstring": "Name of the file on the client file system, but normalized to ensure\n            file system compatibility. An empty filename is returned as 'empty'.\n\n            Only ASCII letters, digits, dashes, underscores and dots are\n            allowed in the final filename. Accents are removed, if possible.\n            Whitespace is replaced by a single dash. Leading or tailing dots\n            or dashes are removed. The filename is limited to 255 characters.", "id": "f12971:c32:m1"}
{"signature": "def __getattr__(self, name):", "body": "try:<EOL><INDENT>var = self.environ['<STR_LIT>' % name]<EOL>return var.__get__(self) if hasattr(var, '<STR_LIT>') else var<EOL><DEDENT>except KeyError:<EOL><INDENT>raise AttributeError('<STR_LIT>' % name)<EOL><DEDENT>", "docstring": "Search in self.environ for additional user defined attributes.", "id": "f12971:c12:m44"}
{"signature": "@property<EOL><INDENT>def status_code(self):<DEDENT>", "body": "return self._status_code<EOL>", "docstring": "The HTTP status code as an integer (e.g. 404).", "id": "f12971:c14:m5"}
{"signature": "def get(self, path=None, method='<STR_LIT:GET>', **options):", "body": "return self.route(path, method, **options)<EOL>", "docstring": "Equals :meth:`route`.", "id": "f12971:c11:m17"}
{"signature": "def get_syntax(self):", "body": "return self._syntax<EOL>", "docstring": "Tokens as a space separated string (default: <% %> % {{ }})", "id": "f12971:c62:m1"}
{"signature": "@property<EOL><INDENT>def charset(self, default='<STR_LIT>'):<DEDENT>", "body": "if '<STR_LIT>' in self.content_type:<EOL><INDENT>return self.content_type.split('<STR_LIT>')[-<NUM_LIT:1>].split('<STR_LIT:;>')[<NUM_LIT:0>].strip()<EOL><DEDENT>return default<EOL>", "docstring": "Return the charset specified in the content-type header (default: utf8).", "id": "f12971:c14:m18"}
{"signature": "def get_cookie(self, key, default=None, secret=None):", "body": "value = self.cookies.get(key)<EOL>if secret and value:<EOL><INDENT>dec = cookie_decode(value, secret)  <EOL>return dec[<NUM_LIT:1>] if dec and dec[<NUM_LIT:0>] == key else default<EOL><DEDENT>return value or default<EOL>", "docstring": "Return the content of a cookie. To read a `Signed Cookie`, the\n            `secret` must match the one used to create the cookie (see\n            :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing\n            cookie or wrong signature), return a default value.", "id": "f12971:c12:m9"}
{"signature": "def replace(self, key, value):", "body": "self.dict[key] = [value]<EOL>", "docstring": "Replace the list of values with a single value.", "id": "f12971:c23:m10"}
{"signature": "def __init__(self, fileobj, name, filename, headers=None):", "body": "<EOL>self.file = fileobj<EOL>self.name = name<EOL>self.raw_filename = filename<EOL>self.headers = HeaderDict(headers) if headers else HeaderDict()<EOL>", "docstring": "Wrapper for file uploads.", "id": "f12971:c32:m0"}
{"signature": "def add(self, rule, method, target, name=None):", "body": "anons = <NUM_LIT:0>  <EOL>keys = []  <EOL>pattern = '<STR_LIT>'  <EOL>filters = []  <EOL>builder = []  <EOL>is_static = True<EOL>for key, mode, conf in self._itertokens(rule):<EOL><INDENT>if mode:<EOL><INDENT>is_static = False<EOL>if mode == '<STR_LIT:default>': mode = self.default_filter<EOL>mask, in_filter, out_filter = self.filters[mode](conf)<EOL>if not key:<EOL><INDENT>pattern += '<STR_LIT>' % mask<EOL>key = '<STR_LIT>' % anons<EOL>anons += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>pattern += '<STR_LIT>' % (key, mask)<EOL>keys.append(key)<EOL><DEDENT>if in_filter: filters.append((key, in_filter))<EOL>builder.append((key, out_filter or str))<EOL><DEDENT>elif key:<EOL><INDENT>pattern += re.escape(key)<EOL>builder.append((None, key))<EOL><DEDENT><DEDENT>self.builder[rule] = builder<EOL>if name: self.builder[name] = builder<EOL>if is_static and not self.strict_order:<EOL><INDENT>self.static.setdefault(method, {})<EOL>self.static[method][self.build(rule)] = (target, None)<EOL>return<EOL><DEDENT>try:<EOL><INDENT>re_pattern = re.compile('<STR_LIT>' % pattern)<EOL>re_match = re_pattern.match<EOL><DEDENT>except re.error:<EOL><INDENT>raise RouteSyntaxError(\"<STR_LIT>\" %<EOL>(rule, _e()))<EOL><DEDENT>if filters:<EOL><INDENT>def getargs(path):<EOL><INDENT>url_args = re_match(path).groupdict()<EOL>for name, wildcard_filter in filters:<EOL><INDENT>try:<EOL><INDENT>url_args[name] = wildcard_filter(url_args[name])<EOL><DEDENT>except ValueError:<EOL><INDENT>raise HTTPError(<NUM_LIT>, '<STR_LIT>')<EOL><DEDENT><DEDENT>return url_args<EOL><DEDENT><DEDENT>elif re_pattern.groupindex:<EOL><INDENT>def getargs(path):<EOL><INDENT>return re_match(path).groupdict()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>getargs = None<EOL><DEDENT>flatpat = _re_flatten(pattern)<EOL>whole_rule = (rule, flatpat, target, getargs)<EOL>if (flatpat, method) in self._groups:<EOL><INDENT>if DEBUG:<EOL><INDENT>msg = '<STR_LIT>'<EOL>warnings.warn(msg % (method, rule), RuntimeWarning)<EOL><DEDENT>self.dyna_routes[method][<EOL>self._groups[flatpat, method]] = whole_rule<EOL><DEDENT>else:<EOL><INDENT>self.dyna_routes.setdefault(method, []).append(whole_rule)<EOL>self._groups[flatpat, method] = len(self.dyna_routes[method]) - <NUM_LIT:1><EOL><DEDENT>self._compile(method)<EOL>", "docstring": "Add a new rule or replace the target for an existing rule.", "id": "f12971:c9:m3"}
{"signature": "def _cast(self, out, peek=None):", "body": "<EOL>if not out:<EOL><INDENT>if '<STR_LIT>' not in response:<EOL><INDENT>response['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>return []<EOL><DEDENT>if isinstance(out, (tuple, list))and isinstance(out[<NUM_LIT:0>], (bytes, unicode)):<EOL><INDENT>out = out[<NUM_LIT:0>][<NUM_LIT:0>:<NUM_LIT:0>].join(out)  <EOL><DEDENT>if isinstance(out, unicode):<EOL><INDENT>out = out.encode(response.charset)<EOL><DEDENT>if isinstance(out, bytes):<EOL><INDENT>if '<STR_LIT>' not in response:<EOL><INDENT>response['<STR_LIT>'] = len(out)<EOL><DEDENT>return [out]<EOL><DEDENT>if isinstance(out, HTTPError):<EOL><INDENT>out.apply(response)<EOL>out = self.error_handler.get(out.status_code,<EOL>self.default_error_handler)(out)<EOL>return self._cast(out)<EOL><DEDENT>if isinstance(out, HTTPResponse):<EOL><INDENT>out.apply(response)<EOL>return self._cast(out.body)<EOL><DEDENT>if hasattr(out, '<STR_LIT>'):<EOL><INDENT>if '<STR_LIT>' in request.environ:<EOL><INDENT>return request.environ['<STR_LIT>'](out)<EOL><DEDENT>elif hasattr(out, '<STR_LIT>') or not hasattr(out, '<STR_LIT>'):<EOL><INDENT>return WSGIFileWrapper(out)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>iout = iter(out)<EOL>first = next(iout)<EOL>while not first:<EOL><INDENT>first = next(iout)<EOL><DEDENT><DEDENT>except StopIteration:<EOL><INDENT>return self._cast('<STR_LIT>')<EOL><DEDENT>except HTTPResponse:<EOL><INDENT>first = _e()<EOL><DEDENT>except (KeyboardInterrupt, SystemExit, MemoryError):<EOL><INDENT>raise<EOL><DEDENT>except:<EOL><INDENT>if not self.catchall: raise<EOL>first = HTTPError(<NUM_LIT>, '<STR_LIT>', _e(), format_exc())<EOL><DEDENT>if isinstance(first, HTTPResponse):<EOL><INDENT>return self._cast(first)<EOL><DEDENT>elif isinstance(first, bytes):<EOL><INDENT>new_iter = itertools.chain([first], iout)<EOL><DEDENT>elif isinstance(first, unicode):<EOL><INDENT>encoder = lambda x: x.encode(response.charset)<EOL>new_iter = imap(encoder, itertools.chain([first], iout))<EOL><DEDENT>else:<EOL><INDENT>msg = '<STR_LIT>' % type(first)<EOL>return self._cast(HTTPError(<NUM_LIT>, msg))<EOL><DEDENT>if hasattr(out, '<STR_LIT>'):<EOL><INDENT>new_iter = _closeiter(new_iter, out.close)<EOL><DEDENT>return new_iter<EOL>", "docstring": "Try to convert the parameter into something WSGI compatible and set\n        correct HTTP headers when possible.\n        Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,\n        iterable of strings and iterable of unicodes", "id": "f12971:c11:m25"}
{"signature": "def push(self, value=None):", "body": "if not isinstance(value, Bottle):<EOL><INDENT>value = Bottle()<EOL><DEDENT>self.append(value)<EOL>return value<EOL>", "docstring": "Add a new :class:`Bottle` instance to the stack", "id": "f12971:c28:m1"}
{"signature": "@cached_property<EOL><INDENT>def call(self):<DEDENT>", "body": "return self._make_callback()<EOL>", "docstring": "The route callback with all plugins applied. This property is\n            created on demand and then cached to speed up subsequent requests.", "id": "f12971:c10:m1"}
{"signature": "def meta_set(self, key, metafield, value):", "body": "self._meta.setdefault(key, {})[metafield] = value<EOL>if key in self:<EOL><INDENT>self[key] = self[key]<EOL><DEDENT>", "docstring": "Set the meta field for a key to a new value. This triggers the\n            on-change handler for existing keys.", "id": "f12971:c27:m8"}
{"signature": "@property<EOL><INDENT>def status_line(self):<DEDENT>", "body": "return self._status_line<EOL>", "docstring": "The HTTP status line as a string (e.g. ``404 Not Found``).", "id": "f12971:c14:m4"}
{"signature": "def decode(self, encoding=None):", "body": "copy = FormsDict()<EOL>enc = copy.input_encoding = encoding or self.input_encoding<EOL>copy.recode_unicode = False<EOL>for key, value in self.allitems():<EOL><INDENT>copy.append(self._fix(key, enc), self._fix(value, enc))<EOL><DEDENT>return copy<EOL>", "docstring": "Returns a copy with all keys and values de- or recoded to match\n            :attr:`input_encoding`. Some libraries (e.g. WTForms) want a\n            unicode dictionary.", "id": "f12971:c24:m1"}
{"signature": "def get_header(self, name, default=None):", "body": "return self._headers.get(_hkey(name), [default])[-<NUM_LIT:1>]<EOL>", "docstring": "Return the value of a previously defined header. If there is no\n            header with that name, return a default value.", "id": "f12971:c14:m13"}
{"signature": "def raw(self, key, default=None):", "body": "return self.environ.get(self._ekey(key), default)<EOL>", "docstring": "Return the header value as is (may be bytes or unicode).", "id": "f12971:c26:m2"}
{"signature": "@DictProperty('<STR_LIT>', '<STR_LIT>', read_only=True)<EOL><INDENT>def query(self):<DEDENT>", "body": "get = self.environ['<STR_LIT>'] = FormsDict()<EOL>pairs = _parse_qsl(self.environ.get('<STR_LIT>', '<STR_LIT>'))<EOL>for key, value in pairs:<EOL><INDENT>get[key] = value<EOL><DEDENT>return get<EOL>", "docstring": "The :attr:`query_string` parsed into a :class:`FormsDict`. These\n            values are sometimes called \"URL arguments\" or \"GET parameters\", but\n            not to be confused with \"URL wildcards\" as they are provided by the\n            :class:`Router`.", "id": "f12971:c12:m10"}
{"signature": "def match(self, environ):", "body": "verb = environ['<STR_LIT>'].upper()<EOL>path = environ['<STR_LIT>'] or '<STR_LIT:/>'<EOL>if verb == '<STR_LIT>':<EOL><INDENT>methods = ['<STR_LIT>', verb, '<STR_LIT:GET>', '<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>methods = ['<STR_LIT>', verb, '<STR_LIT>']<EOL><DEDENT>for method in methods:<EOL><INDENT>if method in self.static and path in self.static[method]:<EOL><INDENT>target, getargs = self.static[method][path]<EOL>return target, getargs(path) if getargs else {}<EOL><DEDENT>elif method in self.dyna_regexes:<EOL><INDENT>for combined, rules in self.dyna_regexes[method]:<EOL><INDENT>match = combined(path)<EOL>if match:<EOL><INDENT>target, getargs = rules[match.lastindex - <NUM_LIT:1>]<EOL>return target, getargs(path) if getargs else {}<EOL><DEDENT><DEDENT><DEDENT><DEDENT>allowed = set([])<EOL>nocheck = set(methods)<EOL>for method in set(self.static) - nocheck:<EOL><INDENT>if path in self.static[method]:<EOL><INDENT>allowed.add(verb)<EOL><DEDENT><DEDENT>for method in set(self.dyna_regexes) - allowed - nocheck:<EOL><INDENT>for combined, rules in self.dyna_regexes[method]:<EOL><INDENT>match = combined(path)<EOL>if match:<EOL><INDENT>allowed.add(method)<EOL><DEDENT><DEDENT><DEDENT>if allowed:<EOL><INDENT>allow_header = \"<STR_LIT:U+002C>\".join(sorted(allowed))<EOL>raise HTTPError(<NUM_LIT>, \"<STR_LIT>\", Allow=allow_header)<EOL><DEDENT>raise HTTPError(<NUM_LIT>, \"<STR_LIT>\" + repr(path))<EOL>", "docstring": "Return a (target, url_args) tuple or raise HTTPError(400/404/405).", "id": "f12971:c9:m6"}
{"signature": "def run(self, **kwargs):", "body": "run(self, **kwargs)<EOL>", "docstring": "Calls :func:`run` with the same parameters.", "id": "f12971:c11:m12"}
{"signature": "def append(self, key, value):", "body": "self.dict.setdefault(key, []).append(value)<EOL>", "docstring": "Add a new value to the list of values for this key.", "id": "f12971:c23:m9"}
{"signature": "def __init__(self,<EOL>source=None,<EOL>name=None,<EOL>lookup=None,<EOL>encoding='<STR_LIT:utf8>', **settings):", "body": "self.name = name<EOL>self.source = source.read() if hasattr(source, '<STR_LIT>') else source<EOL>self.filename = source.filename if hasattr(source, '<STR_LIT:filename>') else None<EOL>self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []<EOL>self.encoding = encoding<EOL>self.settings = self.settings.copy()  <EOL>self.settings.update(settings)  <EOL>if not self.source and self.name:<EOL><INDENT>self.filename = self.search(self.name, self.lookup)<EOL>if not self.filename:<EOL><INDENT>raise TemplateError('<STR_LIT>' % repr(name))<EOL><DEDENT><DEDENT>if not self.source and not self.filename:<EOL><INDENT>raise TemplateError('<STR_LIT>')<EOL><DEDENT>self.prepare(**self.settings)<EOL>", "docstring": "Create a new template.\n        If the source parameter (str or buffer) is missing, the name argument\n        is used to guess a template filename. Subclasses can assume that\n        self.source and/or self.filename are set. Both are strings.\n        The lookup, encoding and settings parameters are stored as instance\n        variables.\n        The lookup parameter stores a list containing directory paths.\n        The encoding parameter should be used to decode byte strings or files.\n        The settings parameter contains a dict for engine-specific settings.", "id": "f12971:c56:m0"}
{"signature": "@DictProperty('<STR_LIT>', '<STR_LIT>', read_only=True)<EOL><INDENT>def params(self):<DEDENT>", "body": "params = FormsDict()<EOL>for key, value in self.query.allitems():<EOL><INDENT>params[key] = value<EOL><DEDENT>for key, value in self.forms.allitems():<EOL><INDENT>params[key] = value<EOL><DEDENT>return params<EOL>", "docstring": "A :class:`FormsDict` with the combined values of :attr:`query` and\n            :attr:`forms`. File uploads are stored in :attr:`files`.", "id": "f12971:c12:m12"}
{"signature": "def _lscmp(a, b):", "body": "return not sum(<NUM_LIT:0> if x == y else <NUM_LIT:1><EOL>for x, y in zip(a, b)) and len(a) == len(b)<EOL>", "docstring": "Compares two strings in a cryptographically safe way:\n        Runtime is not affected by length of common prefix.", "id": "f12971:m19"}
{"signature": "def error(self, code=<NUM_LIT>):", "body": "def wrapper(handler):<EOL><INDENT>self.error_handler[int(code)] = handler<EOL>return handler<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator: Register an output handler for a HTTP error code", "id": "f12971:c11:m22"}
{"signature": "@classmethod<EOL><INDENT>def global_config(cls, key, *args):<DEDENT>", "body": "if args:<EOL><INDENT>cls.settings = cls.settings.copy()  <EOL>cls.settings[key] = args[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return cls.settings[key]<EOL><DEDENT>", "docstring": "This reads or sets the global settings stored in class.settings.", "id": "f12971:c56:m2"}
{"signature": "@property<EOL><INDENT>def headers(self):<DEDENT>", "body": "hdict = HeaderDict()<EOL>hdict.dict = self._headers<EOL>return hdict<EOL>", "docstring": "An instance of :class:`HeaderDict`, a case-insensitive dict-like\n            view on the response headers.", "id": "f12971:c14:m8"}
{"signature": "def abort(code=<NUM_LIT>, text='<STR_LIT>'):", "body": "raise HTTPError(code, text)<EOL>", "docstring": "Aborts execution and causes a HTTP error.", "id": "f12971:m9"}
{"signature": "def open(self, name, mode='<STR_LIT:r>', *args, **kwargs):", "body": "fname = self.lookup(name)<EOL>if not fname: raise IOError(\"<STR_LIT>\" % name)<EOL>return self.opener(fname, mode=mode, *args, **kwargs)<EOL>", "docstring": "Find a resource and return a file object, or raise IOError.", "id": "f12971:c31:m4"}
{"signature": "def uninstall(self, plugin):", "body": "removed, remove = [], plugin<EOL>for i, plugin in list(enumerate(self.plugins))[::-<NUM_LIT:1>]:<EOL><INDENT>if remove is True or remove is plugin or remove is type(plugin)or getattr(plugin, '<STR_LIT:name>', True) == remove:<EOL><INDENT>removed.append(plugin)<EOL>del self.plugins[i]<EOL>if hasattr(plugin, '<STR_LIT>'): plugin.close()<EOL><DEDENT><DEDENT>if removed: self.reset()<EOL>return removed<EOL>", "docstring": "Uninstall plugins. Pass an instance to remove a specific plugin, a type\n            object to remove all plugins that match that type, a string to remove\n            all plugins with a matching ``name`` attribute or ``True`` to remove all\n            plugins. Return the list of removed plugins.", "id": "f12971:c11:m9"}
{"signature": "def get_header(self, name, default=None):", "body": "return self.headers.get(name, default)<EOL>", "docstring": "Return the value of a request header, or a given default value.", "id": "f12971:c12:m7"}
{"signature": "def copy(self):", "body": "return Request(self.environ.copy())<EOL>", "docstring": "Return a new :class:`Request` with a shallow :attr:`environ` copy.", "id": "f12971:c12:m35"}
{"signature": "def __setitem__(self, key, value):", "body": "if self.environ.get('<STR_LIT>'):<EOL><INDENT>raise KeyError('<STR_LIT>')<EOL><DEDENT>self.environ[key] = value<EOL>todelete = ()<EOL>if key == '<STR_LIT>':<EOL><INDENT>todelete = ('<STR_LIT:body>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>todelete = ('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>elif key.startswith('<STR_LIT>'):<EOL><INDENT>todelete = ('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>for key in todelete:<EOL><INDENT>self.environ.pop('<STR_LIT>' + key, None)<EOL><DEDENT>", "docstring": "Change an environ value and clear all caches that depend on it.", "id": "f12971:c12:m42"}
{"signature": "def make_default_app_wrapper(name):", "body": "@functools.wraps(getattr(Bottle, name))<EOL>def wrapper(*a, **ka):<EOL><INDENT>return getattr(app(), name)(*a, **ka)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Return a callable that relays calls to the current default app.", "id": "f12971:m28"}
{"signature": "@property<EOL><INDENT>def url(self):<DEDENT>", "body": "return self.urlparts.geturl()<EOL>", "docstring": "The full request URI including hostname and scheme. If your app\n            lives behind a reverse proxy or load balancer and you get confusing\n            results, make sure that the ``X-Forwarded-Host`` header is set\n            correctly.", "id": "f12971:c12:m22"}
{"signature": "def render(self, *args, **kwargs):", "body": "env = {}<EOL>stdout = []<EOL>for dictarg in args:<EOL><INDENT>env.update(dictarg)<EOL><DEDENT>env.update(kwargs)<EOL>self.execute(stdout, env)<EOL>return '<STR_LIT>'.join(stdout)<EOL>", "docstring": "Render the template using keyword arguments as local variables.", "id": "f12971:c60:m6"}
{"signature": "def yieldroutes(func):", "body": "path = '<STR_LIT:/>' + func.__name__.replace('<STR_LIT>', '<STR_LIT:/>').lstrip('<STR_LIT:/>')<EOL>spec = getargspec(func)<EOL>argc = len(spec[<NUM_LIT:0>]) - len(spec[<NUM_LIT:3>] or [])<EOL>path += ('<STR_LIT>' * argc) % tuple(spec[<NUM_LIT:0>][:argc])<EOL>yield path<EOL>for arg in spec[<NUM_LIT:0>][argc:]:<EOL><INDENT>path += '<STR_LIT>' % arg<EOL>yield path<EOL><DEDENT>", "docstring": "Return a generator for routes that match the signature (name, args)\n    of the func parameter. This may yield more than one route if the function\n    takes optional keyword arguments. The output is best described by example::\n\n        a()         -> '/a'\n        b(x, y)     -> '/b/<x>/<y>'\n        c(x, y=5)   -> '/c/<x>' and '/c/<x>/<y>'\n        d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'", "id": "f12971:m25"}
{"signature": "def path_shift(script_name, path_info, shift=<NUM_LIT:1>):", "body": "if shift == <NUM_LIT:0>: return script_name, path_info<EOL>pathlist = path_info.strip('<STR_LIT:/>').split('<STR_LIT:/>')<EOL>scriptlist = script_name.strip('<STR_LIT:/>').split('<STR_LIT:/>')<EOL>if pathlist and pathlist[<NUM_LIT:0>] == '<STR_LIT>': pathlist = []<EOL>if scriptlist and scriptlist[<NUM_LIT:0>] == '<STR_LIT>': scriptlist = []<EOL>if <NUM_LIT:0> < shift <= len(pathlist):<EOL><INDENT>moved = pathlist[:shift]<EOL>scriptlist = scriptlist + moved<EOL>pathlist = pathlist[shift:]<EOL><DEDENT>elif <NUM_LIT:0> > shift >= -len(scriptlist):<EOL><INDENT>moved = scriptlist[shift:]<EOL>pathlist = moved + pathlist<EOL>scriptlist = scriptlist[:shift]<EOL><DEDENT>else:<EOL><INDENT>empty = '<STR_LIT>' if shift < <NUM_LIT:0> else '<STR_LIT>'<EOL>raise AssertionError(\"<STR_LIT>\" % empty)<EOL><DEDENT>new_script_name = '<STR_LIT:/>' + '<STR_LIT:/>'.join(scriptlist)<EOL>new_path_info = '<STR_LIT:/>' + '<STR_LIT:/>'.join(pathlist)<EOL>if path_info.endswith('<STR_LIT:/>') and pathlist: new_path_info += '<STR_LIT:/>'<EOL>return new_script_name, new_path_info<EOL>", "docstring": "Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.\n\n        :return: The modified paths.\n        :param script_name: The SCRIPT_NAME path.\n        :param script_name: The PATH_INFO path.\n        :param shift: The number of path fragments to shift. May be negative to\n          change the shift direction. (default: 1)", "id": "f12971:m26"}
{"signature": "def get_undecorated_callback(self):", "body": "func = self.callback<EOL>func = getattr(func, '<STR_LIT>' if py3k else '<STR_LIT>', func)<EOL>closure_attr = '<STR_LIT>' if py3k else '<STR_LIT>'<EOL>while hasattr(func, closure_attr) and getattr(func, closure_attr):<EOL><INDENT>attributes = getattr(func, closure_attr)<EOL>func = attributes[<NUM_LIT:0>].cell_contents<EOL>if not isinstance(func, FunctionType):<EOL><INDENT>func = filter(lambda x: isinstance(x, FunctionType),<EOL>map(lambda x: x.cell_contents, attributes))<EOL>func = list(func)[<NUM_LIT:0>]  <EOL><DEDENT><DEDENT>return func<EOL>", "docstring": "Return the callback. If the callback is a decorated function, try to\n            recover the original function.", "id": "f12971:c10:m6"}
{"signature": "@property<EOL><INDENT>def script_name(self):<DEDENT>", "body": "script_name = self.environ.get('<STR_LIT>', '<STR_LIT>').strip('<STR_LIT:/>')<EOL>return '<STR_LIT:/>' + script_name + '<STR_LIT:/>' if script_name else '<STR_LIT:/>'<EOL>", "docstring": "The initial portion of the URL's `path` that was removed by a higher\n            level (server or routing middleware) before the application was\n            called. This script path is returned with leading and tailing\n            slashes.", "id": "f12971:c12:m26"}
{"signature": "def _get_body_string(self):", "body": "clen = self.content_length<EOL>if clen > self.MEMFILE_MAX:<EOL><INDENT>raise HTTPError(<NUM_LIT>, '<STR_LIT>')<EOL><DEDENT>if clen < <NUM_LIT:0>: clen = self.MEMFILE_MAX + <NUM_LIT:1><EOL>data = self.body.read(clen)<EOL>if len(data) > self.MEMFILE_MAX:  <EOL><INDENT>raise HTTPError(<NUM_LIT>, '<STR_LIT>')<EOL><DEDENT>return data<EOL>", "docstring": "read body until content-length or MEMFILE_MAX into a string. Raise\n            HTTPError(413) on requests that are to large.", "id": "f12971:c12:m18"}
{"signature": "def delete(self, path=None, method='<STR_LIT>', **options):", "body": "return self.route(path, method, **options)<EOL>", "docstring": "Equals :meth:`route` with a ``DELETE`` method parameter.", "id": "f12971:c11:m20"}
{"signature": "def parse_date(ims):", "body": "try:<EOL><INDENT>ts = email.utils.parsedate_tz(ims)<EOL>return time.mktime(ts[:<NUM_LIT:8>] + (<NUM_LIT:0>, )) - (ts[<NUM_LIT:9>] or <NUM_LIT:0>) - time.timezone<EOL><DEDENT>except (TypeError, ValueError, IndexError, OverflowError):<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch.", "id": "f12971:m15"}
{"signature": "def reset(self, route=None):", "body": "if route is None: routes = self.routes<EOL>elif isinstance(route, Route): routes = [route]<EOL>else: routes = [self.routes[route]]<EOL>for route in routes:<EOL><INDENT>route.reset()<EOL><DEDENT>if DEBUG:<EOL><INDENT>for route in routes:<EOL><INDENT>route.prepare()<EOL><DEDENT><DEDENT>self.trigger_hook('<STR_LIT>')<EOL>", "docstring": "Reset all routes (force plugins to be re-applied) and clear all\n            caches. If an ID or route object is given, only that specific route\n            is affected.", "id": "f12971:c11:m10"}
{"signature": "def template(*args, **kwargs):", "body": "tpl = args[<NUM_LIT:0>] if args else None<EOL>adapter = kwargs.pop('<STR_LIT>', SimpleTemplate)<EOL>lookup = kwargs.pop('<STR_LIT>', TEMPLATE_PATH)<EOL>tplid = (id(lookup), tpl)<EOL>if tplid not in TEMPLATES or DEBUG:<EOL><INDENT>settings = kwargs.pop('<STR_LIT>', {})<EOL>if isinstance(tpl, adapter):<EOL><INDENT>TEMPLATES[tplid] = tpl<EOL>if settings: TEMPLATES[tplid].prepare(**settings)<EOL><DEDENT>elif \"<STR_LIT:\\n>\" in tpl or \"<STR_LIT:{>\" in tpl or \"<STR_LIT:%>\" in tpl or '<STR_LIT:$>' in tpl:<EOL><INDENT>TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)<EOL><DEDENT>else:<EOL><INDENT>TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)<EOL><DEDENT><DEDENT>if not TEMPLATES[tplid]:<EOL><INDENT>abort(<NUM_LIT>, '<STR_LIT>' % tpl)<EOL><DEDENT>for dictarg in args[<NUM_LIT:1>:]:<EOL><INDENT>kwargs.update(dictarg)<EOL><DEDENT>return TEMPLATES[tplid].render(kwargs)<EOL>", "docstring": "Get a rendered template as a string iterator.\nYou can use a name, a filename or a template string as first parameter.\nTemplate rendering arguments can be passed as dictionaries\nor directly (as keyword arguments).", "id": "f12971:m32"}
{"signature": "def get_config(self, key, default=None):", "body": "for conf in (self.config, self.app.config):<EOL><INDENT>if key in conf: return conf[key]<EOL><DEDENT>return default<EOL>", "docstring": "Lookup a config field and return its value, first checking the\n            route.config, then route.app.config.", "id": "f12971:c10:m8"}
{"signature": "def __enter__(self):", "body": "default_app.push(self)<EOL>return self<EOL>", "docstring": "Use this application as default for all module-level shortcuts.", "id": "f12971:c11:m28"}
{"signature": "def close(self):", "body": "for plugin in self.plugins:<EOL><INDENT>if hasattr(plugin, '<STR_LIT>'): plugin.close()<EOL><DEDENT>", "docstring": "Close the application and all installed plugins.", "id": "f12971:c11:m11"}
{"signature": "def hook(self, name):", "body": "def decorator(func):<EOL><INDENT>self.add_hook(name, func)<EOL>return func<EOL><DEDENT>return decorator<EOL>", "docstring": "Return a decorator that attaches a callback to a hook. See\n            :meth:`add_hook` for details.", "id": "f12971:c11:m5"}
{"signature": "def getunicode(self, name, default=None, encoding=None):", "body": "try:<EOL><INDENT>return self._fix(self[name], encoding)<EOL><DEDENT>except (UnicodeError, KeyError):<EOL><INDENT>return default<EOL><DEDENT>", "docstring": "Return the value as a unicode string, or the default.", "id": "f12971:c24:m2"}
{"signature": "def prepare(self):", "body": "self.call<EOL>", "docstring": "Do all on-demand work immediately (useful for debugging).", "id": "f12971:c10:m3"}
{"signature": "@DictProperty('<STR_LIT>', '<STR_LIT>', read_only=True)<EOL><INDENT>def app(self):<DEDENT>", "body": "raise RuntimeError('<STR_LIT>')<EOL>", "docstring": "Bottle application handling this request.", "id": "f12971:c12:m1"}
{"signature": "def __call__(self):", "body": "return self[-<NUM_LIT:1>]<EOL>", "docstring": "Return the current default application.", "id": "f12971:c28:m0"}
{"signature": "@DictProperty('<STR_LIT>', '<STR_LIT>', read_only=True)<EOL><INDENT>def forms(self):<DEDENT>", "body": "forms = FormsDict()<EOL>for name, item in self.POST.allitems():<EOL><INDENT>if not isinstance(item, FileUpload):<EOL><INDENT>forms[name] = item<EOL><DEDENT><DEDENT>return forms<EOL>", "docstring": "Form values parsed from an `url-encoded` or `multipart/form-data`\n            encoded POST or PUT request body. The result is returned as a\n            :class:`FormsDict`. All keys and values are strings. File uploads\n            are stored separately in :attr:`files`.", "id": "f12971:c12:m11"}
{"signature": "def html_quote(string):", "body": "return '<STR_LIT>' % html_escape(string).replace('<STR_LIT:\\n>', '<STR_LIT>').replace('<STR_LIT:\\r>', '<STR_LIT>').replace('<STR_LIT:\\t>', '<STR_LIT>')<EOL>", "docstring": "Escape and quote a string to be used as an HTTP attribute.", "id": "f12971:m24"}
{"signature": "def _re_flatten(p):", "body": "if '<STR_LIT:(>' not in p:<EOL><INDENT>return p<EOL><DEDENT>return re.sub(r'<STR_LIT>', lambda m: m.group(<NUM_LIT:0>) if<EOL>len(m.group(<NUM_LIT:1>)) % <NUM_LIT:2> else m.group(<NUM_LIT:1>) + '<STR_LIT>', p)<EOL>", "docstring": "Turn all capturing groups in a regular expression pattern into\n        non-capturing groups.", "id": "f12971:m6"}
{"signature": "def cookie_encode(data, key):", "body": "msg = base64.b64encode(pickle.dumps(data, -<NUM_LIT:1>))<EOL>sig = base64.b64encode(hmac.new(tob(key), msg).digest())<EOL>return tob('<STR_LIT:!>') + sig + tob('<STR_LIT:?>') + msg<EOL>", "docstring": "Encode and sign a pickle-able object. Return a (byte) string", "id": "f12971:m20"}
{"signature": "def install(self, plugin):", "body": "if hasattr(plugin, '<STR_LIT>'): plugin.setup(self)<EOL>if not callable(plugin) and not hasattr(plugin, '<STR_LIT>'):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>self.plugins.append(plugin)<EOL>self.reset()<EOL>return plugin<EOL>", "docstring": "Add a plugin to the list of plugins and prepare it for being\n            applied to all routes of this application. A plugin may be a simple\n            decorator or an object that implements the :class:`Plugin` API.", "id": "f12971:c11:m8"}
{"signature": "def getall(self, key):", "body": "return self.dict.get(key) or []<EOL>", "docstring": "Return a (possibly empty) list of values for a key.", "id": "f12971:c23:m11"}
{"signature": "def build(self, _name, *anons, **query):", "body": "builder = self.builder.get(_name)<EOL>if not builder:<EOL><INDENT>raise RouteBuildError(\"<STR_LIT>\", _name)<EOL><DEDENT>try:<EOL><INDENT>for i, value in enumerate(anons):<EOL><INDENT>query['<STR_LIT>' % i] = value<EOL><DEDENT>url = '<STR_LIT>'.join([f(query.pop(n)) if n else f for (n, f) in builder])<EOL>return url if not query else url + '<STR_LIT:?>' + urlencode(query)<EOL><DEDENT>except KeyError:<EOL><INDENT>raise RouteBuildError('<STR_LIT>' % _e().args[<NUM_LIT:0>])<EOL><DEDENT>", "docstring": "Build an URL by filling the wildcards in a rule.", "id": "f12971:c9:m5"}
{"signature": "def prepare(self, **options):", "body": "raise NotImplementedError<EOL>", "docstring": "Run preparations (parsing, caching, ...).\n        It should be possible to call this again to refresh a template or to\n        update settings.", "id": "f12971:c56:m3"}
{"signature": "def cookie_decode(data, key):", "body": "data = tob(data)<EOL>if cookie_is_encoded(data):<EOL><INDENT>sig, msg = data.split(tob('<STR_LIT:?>'), <NUM_LIT:1>)<EOL>if _lscmp(sig[<NUM_LIT:1>:], base64.b64encode(hmac.new(tob(key), msg).digest())):<EOL><INDENT>return pickle.loads(base64.b64decode(msg))<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Verify and decode an encoded string. Return an object or None.", "id": "f12971:m21"}
{"signature": "def load_config(self, filename):", "body": "conf = ConfigParser()<EOL>conf.read(filename)<EOL>for section in conf.sections():<EOL><INDENT>for key, value in conf.items(section):<EOL><INDENT>if section not in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>key = section + '<STR_LIT:.>' + key<EOL><DEDENT>self[key] = value<EOL><DEDENT><DEDENT>return self<EOL>", "docstring": "Load values from an ``*.ini`` style config file.\n\n            If the config file contains sections, their names are used as\n            namespaces for the values within. The two special sections\n            ``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).", "id": "f12971:c27:m1"}
{"signature": "def reset(self):", "body": "self.__dict__.pop('<STR_LIT>', None)<EOL>", "docstring": "Forget any cached values. The next time :attr:`call` is accessed,\n            all plugins are re-applied.", "id": "f12971:c10:m2"}
{"signature": "def view(tpl_name, **defaults):", "body": "def decorator(func):<EOL><INDENT>@functools.wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>result = func(*args, **kwargs)<EOL>if isinstance(result, (dict, DictMixin)):<EOL><INDENT>tplvars = defaults.copy()<EOL>tplvars.update(result)<EOL>return template(tpl_name, **tplvars)<EOL><DEDENT>elif result is None:<EOL><INDENT>return template(tpl_name, defaults)<EOL><DEDENT>return result<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "Decorator: renders a template for a handler.\n        The handler can control its behavior like that:\n\n          - return a dict of template vars to fill out the template\n          - return something other than a dict and the view decorator will not\n            process the template, but return the handler result as is.\n            This includes returning a HTTPResponse(dict) to get,\n            for instance, JSON with autojson or other castfilters.", "id": "f12971:m33"}
{"signature": "@DictProperty('<STR_LIT>', '<STR_LIT>', read_only=True)<EOL><INDENT>def json(self):<DEDENT>", "body": "ctype = self.environ.get('<STR_LIT>', '<STR_LIT>').lower().split('<STR_LIT:;>')[<NUM_LIT:0>]<EOL>if ctype == '<STR_LIT:application/json>':<EOL><INDENT>b = self._get_body_string()<EOL>if not b:<EOL><INDENT>return None<EOL><DEDENT>return json_loads(b)<EOL><DEDENT>return None<EOL>", "docstring": "If the ``Content-Type`` header is ``application/json``, this\n            property holds the parsed content of the request body. Only requests\n            smaller than :attr:`MEMFILE_MAX` are processed to avoid memory\n            exhaustion.", "id": "f12971:c12:m14"}
{"signature": "def all_plugins(self):", "body": "unique = set()<EOL>for p in reversed(self.app.plugins + self.plugins):<EOL><INDENT>if True in self.skiplist: break<EOL>name = getattr(p, '<STR_LIT:name>', False)<EOL>if name and (name in self.skiplist or name in unique): continue<EOL>if p in self.skiplist or type(p) in self.skiplist: continue<EOL>if name: unique.add(name)<EOL>yield p<EOL><DEDENT>", "docstring": "Yield all Plugins affecting this route.", "id": "f12971:c10:m4"}
{"signature": "def redirect(url, code=None):", "body": "if not code:<EOL><INDENT>code = <NUM_LIT> if request.get('<STR_LIT>') == \"<STR_LIT>\" else <NUM_LIT><EOL><DEDENT>res = response.copy(cls=HTTPResponse)<EOL>res.status = code<EOL>res.body = \"<STR_LIT>\"<EOL>res.set_header('<STR_LIT>', urljoin(request.url, url))<EOL>raise res<EOL>", "docstring": "Aborts execution and causes a 303 or 302 redirect, depending on\n        the HTTP protocol version.", "id": "f12971:m10"}
{"signature": "@property<EOL><INDENT>def content_type(self):<DEDENT>", "body": "return self.environ.get('<STR_LIT>', '<STR_LIT>').lower()<EOL>", "docstring": "The Content-Type header as a lowercase-string (default: empty).", "id": "f12971:c12:m29"}
{"signature": "def get_url(self, routename, **kargs):", "body": "scriptname = request.environ.get('<STR_LIT>', '<STR_LIT>').strip('<STR_LIT:/>') + '<STR_LIT:/>'<EOL>location = self.router.build(routename, **kargs).lstrip('<STR_LIT:/>')<EOL>return urljoin(urljoin('<STR_LIT:/>', scriptname), location)<EOL>", "docstring": "Return a string that matches a named route", "id": "f12971:c11:m14"}
{"signature": "def get(self, key, default=None, index=-<NUM_LIT:1>, type=None):", "body": "try:<EOL><INDENT>val = self.dict[key][index]<EOL>return type(val) if type else val<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT>return default<EOL>", "docstring": "Return the most recent value for a key.\n\n            :param default: The default value to be returned if the key is not\n                   present or the type conversion fails.\n            :param index: An index for the list of available values.\n            :param type: If defined, this callable is used to cast the value\n                    into a specific type. Exception are suppressed and result in\n                    the default value to be returned.", "id": "f12971:c23:m8"}
{"signature": "@property<EOL><INDENT>def chunked(self):<DEDENT>", "body": "return '<STR_LIT>' in self.environ.get(<EOL>'<STR_LIT>', '<STR_LIT>').lower()<EOL>", "docstring": "True if Chunked transfer encoding was.", "id": "f12971:c12:m20"}
{"signature": "def route(self,<EOL>path=None,<EOL>method='<STR_LIT:GET>',<EOL>callback=None,<EOL>name=None,<EOL>apply=None,<EOL>skip=None, **config):", "body": "if callable(path): path, callback = None, path<EOL>plugins = makelist(apply)<EOL>skiplist = makelist(skip)<EOL>def decorator(callback):<EOL><INDENT>if isinstance(callback, basestring): callback = load(callback)<EOL>for rule in makelist(path) or yieldroutes(callback):<EOL><INDENT>for verb in makelist(method):<EOL><INDENT>verb = verb.upper()<EOL>route = Route(self, rule, verb, callback,<EOL>name=name,<EOL>plugins=plugins,<EOL>skiplist=skiplist, **config)<EOL>self.add_route(route)<EOL><DEDENT><DEDENT>return callback<EOL><DEDENT>return decorator(callback) if callback else decorator<EOL>", "docstring": "A decorator to bind a function to a request URL. Example::\n\n                @app.route('/hello/<name>')\n                def hello(name):\n                    return 'Hello %s' % name\n\n            The ``:name`` part is a wildcard. See :class:`Router` for syntax\n            details.\n\n            :param path: Request path or a list of paths to listen to. If no\n              path is specified, it is automatically generated from the\n              signature of the function.\n            :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of\n              methods to listen to. (default: `GET`)\n            :param callback: An optional shortcut to avoid the decorator\n              syntax. ``route(..., callback=func)`` equals ``route(...)(func)``\n            :param name: The name for this route. (default: None)\n            :param apply: A decorator or plugin or a list of plugins. These are\n              applied to the route callback in addition to installed plugins.\n            :param skip: A list of plugins, plugin classes or names. Matching\n              plugins are not installed to this route. ``True`` skips all.\n\n            Any additional keyword arguments are stored as route-specific\n            configuration and passed to plugins (see :meth:`Plugin.apply`).", "id": "f12971:c11:m16"}
{"signature": "def exists(self, path):", "body": "", "docstring": "TODO", "id": "f12972:c2:m1"}
{"signature": "def generate_dirlist_html(FS, filepath):", "body": "yield '<STR_LIT>'<EOL>if filepath == '<STR_LIT:/>':<EOL><INDENT>filepath = '<STR_LIT>'<EOL><DEDENT>for name in FS.listdir(filepath):<EOL><INDENT>full_path = pathjoin(filepath, name)<EOL>if FS.isdir(full_path):<EOL><INDENT>full_path = full_path + '<STR_LIT:/>'<EOL><DEDENT>yield u'<STR_LIT>'.format(<EOL>cgi.escape(full_path))  <EOL><DEDENT>yield '<STR_LIT>'<EOL>", "docstring": "Generate directory listing HTML\n\nArguments:\n    FS (FS): filesystem object to read files from\n    filepath (str): path to generate directory listings for\n\nKeyword Arguments:\n    list_dir (callable: list[str]): list file names in a directory\n    isdir (callable: bool): os.path.isdir\n\nYields:\n    str: lines of an HTML table", "id": "f12972:m7"}
{"signature": "def pathjoin(*args, **kwargs):", "body": "log.debug('<STR_LIT>' % list(args))<EOL>def _pathjoin(*args, **kwargs):<EOL><INDENT>len_ = len(args) - <NUM_LIT:1><EOL>if len_ < <NUM_LIT:0>:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>elif len_ == <NUM_LIT:0>:<EOL><INDENT>if not isinstance(args, basestring):<EOL><INDENT>if hasattr(args, '<STR_LIT>'):<EOL><INDENT>_args = args<EOL>_args<EOL>args = args[<NUM_LIT:0>]<EOL><DEDENT><DEDENT><DEDENT>for i, arg in enumerate(args):<EOL><INDENT>if not i:<EOL><INDENT>yield arg.rstrip('<STR_LIT:/>')<EOL><DEDENT>elif i == len_:<EOL><INDENT>yield arg.lstrip('<STR_LIT:/>')<EOL><DEDENT>else:<EOL><INDENT>yield arg.strip('<STR_LIT:/>')<EOL><DEDENT><DEDENT><DEDENT>joined_path = u'<STR_LIT:/>'.join(_pathjoin(*args))<EOL>return sanitize_path(joined_path)<EOL>", "docstring": "Arguments:\n    args (list): *args list of paths\n        if len(args) == 1, args[0] is not a string, and args[0] is iterable,\n        set args to args[0].\n\nBasically::\n\n    joined_path = u'/'.join(\n        [args[0].rstrip('/')] +\n        [a.strip('/') for a in args[1:-1]] +\n        [args[-1].lstrip('/')])", "id": "f12972:m0"}
{"signature": "def get_user(self, username):", "body": "if hasattr(self._bot, '<STR_LIT>'):<EOL><INDENT>user = self._bot.user_manager.get_by_username(username)<EOL>if user:<EOL><INDENT>return user<EOL><DEDENT>user = SlackUser.get_user(self._bot.sc, username)<EOL>self._bot.user_manager.set(user)<EOL>return user<EOL><DEDENT>return SlackUser.get_user(self._bot.sc, username)<EOL>", "docstring": "Utility function to query slack for a particular user\n\n:param username: The username of the user to lookup\n:return: SlackUser object or None", "id": "f12990:c0:m8"}
{"signature": "def send_message(self, channel, text):", "body": "if isinstance(channel, SlackIM) or isinstance(channel, SlackUser):<EOL><INDENT>self._bot.send_im(channel, text)<EOL><DEDENT>elif isinstance(channel, SlackRoom):<EOL><INDENT>self._bot.send_message(channel, text)<EOL><DEDENT>elif isinstance(channel, basestring):<EOL><INDENT>if channel[<NUM_LIT:0>] == '<STR_LIT:@>':<EOL><INDENT>self._bot.send_im(channel[<NUM_LIT:1>:], text)<EOL><DEDENT>elif channel[<NUM_LIT:0>] == '<STR_LIT:#>':<EOL><INDENT>self._bot.send_message(channel[<NUM_LIT:1>:], text)<EOL><DEDENT>else:<EOL><INDENT>self._bot.send_message(channel, text)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._bot.send_message(channel, text)<EOL><DEDENT>", "docstring": "Used to send a message to the specified channel.\n\n* channel - can be a channel or user\n* text - message to send", "id": "f12990:c0:m4"}
{"signature": "def get_channel(self, channel):", "body": "return SlackChannel.get_channel(self._bot.sc, channel)<EOL>", "docstring": "Utility function to query slack for a particular channel\n\n:param channel: The channel name or id of the channel to lookup\n:return: SlackChannel object or None", "id": "f12990:c0:m9"}
{"signature": "def cmd(admin_only=False, acl='<STR_LIT:*>', aliases=None, while_ignored=False, *args, **kwargs):", "body": "def wrapper(func):<EOL><INDENT>func.is_cmd = True<EOL>func.is_subcmd = len(func.__name__.split('<STR_LIT:_>')) > <NUM_LIT:1><EOL>func.cmd_name = func.__name__.replace('<STR_LIT:_>', '<STR_LIT:U+0020>')<EOL>func.admin_only = admin_only<EOL>func.acl = acl<EOL>func.aliases = aliases<EOL>func.while_ignored = while_ignored<EOL>return func<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator to mark plugin functions as commands in the form of !<cmd_name>\n\n* admin_only - indicates only users in bot_admin are allowed to execute (only used if AuthManager is loaded)\n* acl - indicates which ACL to perform permission checks against (only used if AuthManager is loaded)\n* aliases - register function with additional commands (i.e. !alias1, !alias2, etc)\n* while_ignored - allows a command to be run, even if channel has been !sleep", "id": "f12991:m0"}
{"signature": "def run(self, start=True):", "body": "<EOL>if not self.is_setup:<EOL><INDENT>raise NotSetupError<EOL><DEDENT>self.webserver.start()<EOL>first_connect = True<EOL>try:<EOL><INDENT>while self.runnable:<EOL><INDENT>if self.reconnect_needed:<EOL><INDENT>if not self.sc.rtm_connect(with_team_state=start):<EOL><INDENT>return False<EOL><DEDENT>self.reconnect_needed = False<EOL>if first_connect:<EOL><INDENT>first_connect = False<EOL>self.plugins.connect()<EOL><DEDENT><DEDENT>try:<EOL><INDENT>events = self.sc.rtm_read()<EOL><DEDENT>except AttributeError:<EOL><INDENT>self.log.exception('<STR_LIT>')<EOL>self.runnable = False<EOL>events = []<EOL><DEDENT>except:<EOL><INDENT>self.log.exception('<STR_LIT>')<EOL>self.reconnect_needed = True<EOL>events = []<EOL><DEDENT>for e in events:<EOL><INDENT>try:<EOL><INDENT>self._handle_event(e)<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>self.runnable = False<EOL><DEDENT>except:<EOL><INDENT>self.log.exception('<STR_LIT>')<EOL><DEDENT><DEDENT>sleep(<NUM_LIT:0.1>)<EOL><DEDENT><DEDENT>except KeyboardInterrupt:<EOL><INDENT>pass<EOL><DEDENT>except:<EOL><INDENT>self.log.exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "Connects to slack and enters the main loop.\n\n* start - If True, rtm.start API is used. Else rtm.connect API is used\n\nFor more info, refer to\nhttps://python-slackclient.readthedocs.io/en/latest/real_time_messaging.html#rtm-start-vs-rtm-connect", "id": "f12992:c0:m3"}
{"signature": "def stop(self):", "body": "if self.webserver is not None:<EOL><INDENT>self.webserver.stop()<EOL><DEDENT>if not self.test_mode:<EOL><INDENT>self.plugins.save_state()<EOL><DEDENT>", "docstring": "Does cleanup of bot and plugins.", "id": "f12992:c0:m4"}
{"signature": "def eventhandler(*args, **kwargs):", "body": "def wrapper(func):<EOL><INDENT>if isinstance(kwargs['<STR_LIT>'], basestring):<EOL><INDENT>kwargs['<STR_LIT>'] = [kwargs['<STR_LIT>']]<EOL><DEDENT>func.is_eventhandler = True<EOL>func.events = kwargs['<STR_LIT>']<EOL>return func<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorator.  Marks a function as a receiver for the specified slack event(s).\n\n* events - String or list of events to handle", "id": "f12992:m0"}
{"signature": "def send_im(self, user, text):", "body": "if isinstance(user, SlackUser):<EOL><INDENT>user = user.id<EOL>channelid = self._find_im_channel(user)<EOL><DEDENT>else:<EOL><INDENT>channelid = user.id<EOL><DEDENT>self.send_message(channelid, text)<EOL>", "docstring": "Sends a message to a user as an IM\n\n* user - The user to send to.  This can be a SlackUser object, a user id, or the username (without the @)\n* text - String to send", "id": "f12992:c0:m6"}
{"signature": "def _ignore_event(self, message):", "body": "if hasattr(message, '<STR_LIT>') and message.subtype in self.ignored_events:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "message_replied event is not truly a message event and does not have a message.text\ndon't process such events\n\ncommands may not be idempotent, so ignore message_changed events.", "id": "f12993:c3:m2"}
{"signature": "@cmd()<EOL><INDENT>def alert(self, msg, args):<DEDENT>", "body": "self.send_message(self.config['<STR_LIT>'], '<STR_LIT>')<EOL>return None<EOL>", "docstring": "Alert everyone.", "id": "f12997:c0:m2"}
{"signature": "@cmd()<EOL><INDENT>def shortsleep(self, msg, args):<DEDENT>", "body": "self.start_timer(<NUM_LIT:5>, self._sleep_func)<EOL>", "docstring": "Sleep for a bit, then print a message.", "id": "f12997:c0:m4"}
{"signature": "@cmd()<EOL><INDENT>def shortsleep2(self, msg, args):<DEDENT>", "body": "self.start_timer(<NUM_LIT:5>, self._sleep_func2, msg.channel, '<STR_LIT:U+0020>'.join(args))<EOL>", "docstring": "Sleep for a bit, then echo the message back", "id": "f12997:c0:m5"}
{"signature": "@cmd()<EOL><INDENT>def xyzzy(self, msg, args):<DEDENT>", "body": "return \"<STR_LIT>\" % msg.user<EOL>", "docstring": "Nothing happens.", "id": "f12997:c0:m1"}
{"signature": "def set(self, user):", "body": "self.log.info(\"<STR_LIT>\", user.id, user.username)<EOL>self.load_user_info(user)<EOL>self.log.info(\"<STR_LIT>\", user.id, user.username)<EOL>self.load_user_rights(user)<EOL>self.log.info(\"<STR_LIT>\", user.id, user.username)<EOL>self._add_user_to_cache(user)<EOL>return user<EOL>", "docstring": "Adds a user object to the user manager\n\nuser - a SlackUser object", "id": "f12999:c0:m3"}
{"signature": "def load_user_rights(self, user):", "body": "if user.username in self.admins:<EOL><INDENT>user.is_admin = True<EOL><DEDENT>elif not hasattr(user, '<STR_LIT>'):<EOL><INDENT>user.is_admin = False<EOL><DEDENT>", "docstring": "Sets permissions on user object", "id": "f12999:c0:m6"}
{"signature": "def load_user_info(self, user):", "body": "<EOL>pass<EOL>", "docstring": "Loads additional user information and stores in user object", "id": "f12999:c0:m5"}
{"signature": "def get_by_username(self, username):", "body": "res = filter(lambda x: x.username == username, self.users.values())<EOL>if len(res) > <NUM_LIT:0>:<EOL><INDENT>return res[<NUM_LIT:0>]<EOL><DEDENT>return None<EOL>", "docstring": "Retrieve user by username", "id": "f12999:c0:m2"}
{"signature": "@cmd()<EOL><INDENT>@channel_wrapper<EOL>def sleep(self, channel):<DEDENT>", "body": "self.log.info('<STR_LIT>', channel)<EOL>self._bot.dispatcher.ignore(channel)<EOL>self.send_message(channel, '<STR_LIT>')<EOL>", "docstring": "Causes the bot to ignore all messages from the channel.\n\n        Usage:\n        !sleep [channel name] - ignore the specified channel (or current if none specified)", "id": "f13000:c0:m7"}
{"signature": "@cmd()<EOL><INDENT>def whoami(self, msg, args):<DEDENT>", "body": "output = [\"<STR_LIT>\" % msg.user]<EOL>if hasattr(self._bot.dispatcher, '<STR_LIT>') and msg.user.is_admin is True:<EOL><INDENT>output.append(\"<STR_LIT>\")<EOL><DEDENT>output.append(\"<STR_LIT>\" % (self._bot.version, self._bot.commit))<EOL>return '<STR_LIT:\\n>'.join(output)<EOL>", "docstring": "Prints information about the user and bot version.", "id": "f13000:c0:m6"}
{"signature": "def remove_user_from_acl(self, name, user):", "body": "if name not in self._acl:<EOL><INDENT>return False<EOL><DEDENT>if user in self._acl[name]['<STR_LIT>']:<EOL><INDENT>self._acl[name]['<STR_LIT>'].remove(user)<EOL><DEDENT>if user in self._acl[name]['<STR_LIT>']:<EOL><INDENT>self._acl[name]['<STR_LIT>'].remove(user)<EOL><DEDENT>return True<EOL>", "docstring": "Remove a user from the given acl (both allow and deny).", "id": "f13001:c0:m10"}
{"signature": "def create_acl(self, name):", "body": "if name in self._acl:<EOL><INDENT>return False<EOL><DEDENT>self._acl[name] = {<EOL>'<STR_LIT>': [],<EOL>'<STR_LIT>': []<EOL>}<EOL>return True<EOL>", "docstring": "Create a new acl.", "id": "f13001:c0:m11"}
{"signature": "def _list_networks():", "body": "output = core.run(\"<STR_LIT>\")<EOL>networks = {}<EOL>net_lines = [n.strip() for n in output.splitlines()[<NUM_LIT:2>:]]<EOL>for line in net_lines:<EOL><INDENT>if not line:<EOL><INDENT>continue<EOL><DEDENT>name, state, auto = line.split()<EOL>networks[name] = state == \"<STR_LIT>\"<EOL><DEDENT>return networks<EOL>", "docstring": "Return a dictionary of network name to active status bools.\n\n        Sample virsh net-list output::\n\n    Name                 State      Autostart\n    -----------------------------------------\n    default              active     yes\n    juju-test            inactive   no\n    foobar               inactive   no\n\n    Parsing the above would return::\n    {\"default\": True, \"juju-test\": False, \"foobar\": False}\n\n    See: http://goo.gl/kXwfC", "id": "f13057:m3"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_friendly_name(self):<DEDENT>", "body": "return (yield from self.handle_text(self.API.get('<STR_LIT>')))<EOL>", "docstring": "Get the friendly name of the device.", "id": "f13081:c0:m12"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_mode_list(self):<DEDENT>", "body": "self.__modes = yield from self.get_modes()<EOL>return (yield from self.collect_labels(self.__modes))<EOL>", "docstring": "Get the label list of the supported modes.", "id": "f13081:c0:m17"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_sleep(self):<DEDENT>", "body": "return (yield from self.handle_long(self.API.get('<STR_LIT>')))<EOL>", "docstring": "Check when and if the device is going to sleep.", "id": "f13081:c0:m39"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_fsapi_endpoint(self):<DEDENT>", "body": "endpoint = yield from self.__session.get(self.fsapi_device_url, timeout = self.timeout)<EOL>text = yield from endpoint.text(encoding='<STR_LIT:utf-8>')<EOL>doc = objectify.fromstring(text)<EOL>return doc.webfsapi.text<EOL>", "docstring": "Parse the fsapi endpoint from the device url.", "id": "f13081:c0:m2"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_play_status(self):<DEDENT>", "body": "status = yield from self.handle_int(self.API.get('<STR_LIT:status>'))<EOL>return self.PLAY_STATES.get(status)<EOL>", "docstring": "Get the play status of the device.", "id": "f13081:c0:m25"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def set_mute(self, value=False):<DEDENT>", "body": "mute = (yield from self.handle_set(self.API.get('<STR_LIT>'), int(value)))<EOL>return bool(mute)<EOL>", "docstring": "Mute or unmute the device.", "id": "f13081:c0:m24"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_power(self):<DEDENT>", "body": "power = (yield from self.handle_int(self.API.get('<STR_LIT>')))<EOL>return bool(power)<EOL>", "docstring": "Check if the device is on.", "id": "f13081:c0:m14"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def rewind(self):<DEDENT>", "body": "return (yield from self.play_control(<NUM_LIT:4>))<EOL>", "docstring": "Previous media.", "id": "f13081:c0:m36"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_volume(self):<DEDENT>", "body": "return (yield from self.handle_int(self.API.get('<STR_LIT>')))<EOL>", "docstring": "Read the volume level of the device.", "id": "f13081:c0:m21"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def handle_list(self, item):<DEDENT>", "body": "doc = yield from self.call('<STR_LIT>'+item+'<STR_LIT>', dict(<EOL>maxItems=<NUM_LIT:100>,<EOL>))<EOL>if doc is None:<EOL><INDENT>return []<EOL><DEDENT>if not doc.status == '<STR_LIT>':<EOL><INDENT>return []<EOL><DEDENT>ret = list()<EOL>for index, item in enumerate(list(doc.iterchildren('<STR_LIT>'))):<EOL><INDENT>temp = dict(band=index)<EOL>for field in list(item.iterchildren()):<EOL><INDENT>temp[field.get('<STR_LIT:name>')] = list(field.iterchildren()).pop()<EOL><DEDENT>ret.append(temp)<EOL><DEDENT>return ret<EOL>", "docstring": "Helper method for fetching a list(map) value.", "id": "f13081:c0:m10"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def call(self, path, extra=None):<DEDENT>", "body": "try:<EOL><INDENT>if not self.__webfsapi:<EOL><INDENT>self.__webfsapi = yield from self.get_fsapi_endpoint()<EOL><DEDENT>if not self.sid:<EOL><INDENT>self.sid = yield from self.create_session()<EOL><DEDENT>if not isinstance(extra, dict):<EOL><INDENT>extra = dict()<EOL><DEDENT>params = dict(pin=self.pin, sid=self.sid)<EOL>params.update(**extra)<EOL>req_url = ('<STR_LIT>' % (self.__webfsapi, path))<EOL>result = yield from self.__session.get(req_url, params=params,<EOL>timeout = self.timeout)<EOL>if result.status == <NUM_LIT:200>:<EOL><INDENT>text = yield from result.text(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>else:<EOL><INDENT>self.sid = yield from self.create_session()<EOL>params = dict(pin=self.pin, sid=self.sid)<EOL>params.update(**extra)<EOL>result = yield from self.__session.get(req_url, params=params,<EOL>timeout = self.timeout)<EOL>text = yield from result.text(encoding='<STR_LIT:utf-8>')<EOL><DEDENT>return objectify.fromstring(text)<EOL><DEDENT>except Exception as e:<EOL><INDENT>logging.info('<STR_LIT>' +traceback.format_exc())<EOL><DEDENT>return None<EOL>", "docstring": "Execute a frontier silicon API call.", "id": "f13081:c0:m4"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def pause(self):<DEDENT>", "body": "return (yield from self.play_control(<NUM_LIT:2>))<EOL>", "docstring": "Pause playing.", "id": "f13081:c0:m34"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_equaliser_list(self):<DEDENT>", "body": "self.__equalisers = yield from self.get_equalisers()<EOL>return (yield from self.collect_labels(self.__equalisers))<EOL>", "docstring": "Get the label list of the supported modes.", "id": "f13081:c0:m38"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_play_text(self):<DEDENT>", "body": "return (yield from self.handle_text(self.API.get('<STR_LIT:text>')))<EOL>", "docstring": "Get the text associated with the played media.", "id": "f13081:c0:m27"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def set_friendly_name(self, value):<DEDENT>", "body": "return (yield from self.handle_set(<EOL>self.API.get('<STR_LIT>'), value))<EOL>", "docstring": "Set the friendly name of the device.", "id": "f13081:c0:m13"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def forward(self):<DEDENT>", "body": "return (yield from self.play_control(<NUM_LIT:3>))<EOL>", "docstring": "Next media.", "id": "f13081:c0:m35"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def handle_int(self, item):<DEDENT>", "body": "doc = yield from self.handle_get(item)<EOL>if doc is None:<EOL><INDENT>return None<EOL><DEDENT>return int(doc.value.u8.text) or None<EOL>", "docstring": "Helper method for fetching a integer value.", "id": "f13081:c0:m8"}
{"signature": "@asyncio.coroutine<EOL><INDENT>def get_play_name(self):<DEDENT>", "body": "return (yield from self.handle_text(self.API.get('<STR_LIT:name>')))<EOL>", "docstring": "Get the name of the played item.", "id": "f13081:c0:m26"}
{"signature": "def cross_origin(*args, **kwargs):", "body": "_options = kwargs<EOL>def decorator(f):<EOL><INDENT>LOG.debug(\"<STR_LIT>\", f, _options)<EOL>if _options.get('<STR_LIT>', True):<EOL><INDENT>f.required_methods = getattr(f, '<STR_LIT>', set())<EOL>f.required_methods.add('<STR_LIT>')<EOL>f.provide_automatic_options = False<EOL><DEDENT>def wrapped_function(*args, **kwargs):<EOL><INDENT>options = get_cors_options(current_app, _options)<EOL>if options.get('<STR_LIT>') and request.method == '<STR_LIT>':<EOL><INDENT>resp = current_app.make_default_options_response()<EOL><DEDENT>else:<EOL><INDENT>resp = make_response(f(*args, **kwargs))<EOL><DEDENT>set_cors_headers(resp, options)<EOL>setattr(resp, FLASK_CORS_EVALUATED, True)<EOL>return resp<EOL><DEDENT>return update_wrapper(wrapped_function, f)<EOL><DEDENT>return decorator<EOL>", "docstring": "This function is the decorator which is used to wrap a Flask route with.\nIn the simplest case, simply use the default parameters to allow all\norigins in what is the most permissive configuration. If this method\nmodifies state or performs authentication which may be brute-forced, you\nshould add some degree of protection, such as Cross Site Forgery\nRequest protection.\n\n:param origins:\n    The origin, or list of origins to allow requests from.\n    The origin(s) may be regular expressions, case-sensitive strings,\n    or else an asterisk\n\n    Default : '*'\n:type origins: list, string or regex\n\n:param methods:\n    The method or list of methods which the allowed origins are allowed to\n    access for non-simple requests.\n\n    Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]\n:type methods: list or string\n\n:param expose_headers:\n    The header or list which are safe to expose to the API of a CORS API\n    specification.\n\n    Default : None\n:type expose_headers: list or string\n\n:param allow_headers:\n    The header or list of header field names which can be used when this\n    resource is accessed by allowed origins. The header(s) may be regular\n    expressions, case-sensitive strings, or else an asterisk.\n\n    Default : '*', allow all headers\n:type allow_headers: list, string or regex\n\n:param supports_credentials:\n    Allows users to make authenticated requests. If true, injects the\n    `Access-Control-Allow-Credentials` header in responses. This allows\n    cookies and credentials to be submitted across domains.\n\n    :note: This option cannot be used in conjuction with a '*' origin\n\n    Default : False\n:type supports_credentials: bool\n\n:param max_age:\n    The maximum time for which this CORS request maybe cached. This value\n    is set as the `Access-Control-Max-Age` header.\n\n    Default : None\n:type max_age: timedelta, integer, string or None\n\n:param send_wildcard: If True, and the origins parameter is `*`, a wildcard\n    `Access-Control-Allow-Origin` header is sent, rather than the\n    request's `Origin` header.\n\n    Default : False\n:type send_wildcard: bool\n\n:param vary_header:\n    If True, the header Vary: Origin will be returned as per the W3\n    implementation guidelines.\n\n    Setting this header when the `Access-Control-Allow-Origin` is\n    dynamically generated (e.g. when there is more than one allowed\n    origin, and an Origin than '*' is returned) informs CDNs and other\n    caches that the CORS headers are dynamic, and cannot be cached.\n\n    If False, the Vary header will never be injected or altered.\n\n    Default : True\n:type vary_header: bool\n\n:param automatic_options:\n    Only applies to the `cross_origin` decorator. If True, Flask-CORS will\n    override Flask's default OPTIONS handling to return CORS headers for\n    OPTIONS requests.\n\n    Default : True\n:type automatic_options: bool", "id": "f13102:m0"}
{"signature": "def get_cors_options(appInstance, *dicts):", "body": "options = DEFAULT_OPTIONS.copy()<EOL>options.update(get_app_kwarg_dict(appInstance))<EOL>if dicts:<EOL><INDENT>for d in dicts:<EOL><INDENT>options.update(d)<EOL><DEDENT><DEDENT>return serialize_options(options)<EOL>", "docstring": "Compute CORS options for an application by combining the DEFAULT_OPTIONS,\nthe app's configuration-specified options and any dictionaries passed. The\nlast specified option wins.", "id": "f13104:m10"}
{"signature": "def ensure_iterable(inst):", "body": "if isinstance(inst, string_types):<EOL><INDENT>return [inst]<EOL><DEDENT>elif not isinstance(inst, collections.Iterable):<EOL><INDENT>return [inst]<EOL><DEDENT>else:<EOL><INDENT>return inst<EOL><DEDENT>", "docstring": "Wraps scalars or string types as a list, or returns the iterable instance.", "id": "f13104:m14"}
{"signature": "def serialize_options(opts):", "body": "options = (opts or {}).copy()<EOL>for key in opts.keys():<EOL><INDENT>if key not in DEFAULT_OPTIONS:<EOL><INDENT>LOG.warning(\"<STR_LIT>\", key)<EOL><DEDENT><DEDENT>options['<STR_LIT>'] = sanitize_regex_param(options.get('<STR_LIT>'))<EOL>options['<STR_LIT>'] = sanitize_regex_param(options.get('<STR_LIT>'))<EOL>if r'<STR_LIT>' in options['<STR_LIT>'] and options['<STR_LIT>'] and options['<STR_LIT>']:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>serialize_option(options, '<STR_LIT>')<EOL>serialize_option(options, '<STR_LIT>', upper=True)<EOL>if isinstance(options.get('<STR_LIT>'), timedelta):<EOL><INDENT>options['<STR_LIT>'] = str(int(options['<STR_LIT>'].total_seconds()))<EOL><DEDENT>return options<EOL>", "docstring": "A helper method to serialize and processes the options dictionary.", "id": "f13104:m16"}
{"signature": "def re_fix(reg):", "body": "return r'<STR_LIT>' if reg == r'<STR_LIT:*>' else reg<EOL>", "docstring": "Replace the invalid regex r'*' with the valid, wildcard regex r'/.*' to\nenable the CORS app extension to have a more user friendly api.", "id": "f13104:m7"}
{"signature": "def set_cors_headers(resp, options):", "body": "<EOL>if hasattr(resp, FLASK_CORS_EVALUATED):<EOL><INDENT>LOG.debug('<STR_LIT>')<EOL>return resp<EOL><DEDENT>if (not isinstance(resp.headers, Headers)<EOL>and not isinstance(resp.headers, MultiDict)):<EOL><INDENT>resp.headers = MultiDict(resp.headers)<EOL><DEDENT>headers_to_set = get_cors_headers(options, request.headers, request.method)<EOL>LOG.debug('<STR_LIT>', str(headers_to_set))<EOL>for k, v in headers_to_set.items():<EOL><INDENT>resp.headers.add(k, v)<EOL><DEDENT>return resp<EOL>", "docstring": "Performs the actual evaluation of Flas-CORS options and actually\nmodifies the response object.\n\nThis function is used both in the decorator and the after_request\ncallback", "id": "f13104:m5"}
{"signature": "def try_match(request_origin, maybe_regex):", "body": "if isinstance(maybe_regex, RegexObject):<EOL><INDENT>return re.match(maybe_regex, request_origin)<EOL><DEDENT>elif probably_regex(maybe_regex):<EOL><INDENT>return re.match(maybe_regex, request_origin, flags=re.IGNORECASE)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>return request_origin.lower() == maybe_regex.lower()<EOL><DEDENT>except AttributeError:<EOL><INDENT>return request_origin == maybe_regex<EOL><DEDENT><DEDENT>", "docstring": "Safely attempts to match a pattern or string to a request origin.", "id": "f13104:m9"}
{"signature": "@app.route(\"<STR_LIT:/>\")<EOL>def helloWorld():", "body": "return", "docstring": "Since the path '/' does not match the regular expression r'/api/*',\nthis route does not have CORS headers set.", "id": "f13107:m0"}
{"signature": "@app.route(\"<STR_LIT>\", methods=['<STR_LIT:POST>'])<EOL>def create_user():", "body": "return jsonify(success=True)<EOL>", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set.\n\nBrowsers will first make a preflight request to verify that the resource\nallows cross-origin POSTs with a JSON Content-Type, which can be simulated\nas:\n$ curl --include -X OPTIONS http://127.0.0.1:5000/api/v1/users/create \\\n    --header Access-Control-Request-Method:POST \\\n    --header Access-Control-Request-Headers:Content-Type \\\n    --header Origin:www.examplesite.com\n>> HTTP/1.0 200 OK\nContent-Type: text/html; charset=utf-8\nAllow: POST, OPTIONS\nAccess-Control-Allow-Origin: *\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT\nContent-Length: 0\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:22 GMT\n\n\n$ curl --include -X POST http://127.0.0.1:5000/api/v1/users/create \\\n    --header Content-Type:application/json \\\n    --header Origin:www.examplesite.com\n\n\n>> HTTP/1.0 200 OK\nContent-Type: application/json\nContent-Length: 21\nAccess-Control-Allow-Origin: *\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:04 GMT\n\n{\n  \"success\": true\n}", "id": "f13107:m2"}
{"signature": "@app.route(\"<STR_LIT>\")<EOL>def get_exception():", "body": "raise Exception(\"<STR_LIT>\")<EOL>", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set.\n\nBrowsers will first make a preflight request to verify that the resource\nallows cross-origin POSTs with a JSON Content-Type, which can be simulated\nas:\n$ curl --include -X OPTIONS http://127.0.0.1:5000/exception \\\n    --header Access-Control-Request-Method:POST \\\n    --header Access-Control-Request-Headers:Content-Type \\\n    --header Origin:www.examplesite.com\n>> HTTP/1.0 200 OK\nContent-Type: text/html; charset=utf-8\nAllow: POST, OPTIONS\nAccess-Control-Allow-Origin: *\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT\nContent-Length: 0\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:22 GMT", "id": "f13107:m3"}
{"signature": "@app.route(\"<STR_LIT>\")<EOL>def list_users():", "body": "return jsonify(user=\"<STR_LIT>\")<EOL>", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set. The expected result is as follows:\n\n$ curl --include -X GET http://127.0.0.1:5000/api/v1/users/ \\\n    --header Origin:www.examplesite.com\nHTTP/1.0 200 OK\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Origin: *\nContent-Length: 21\nContent-Type: application/json\nDate: Sat, 09 Aug 2014 00:26:41 GMT\nServer: Werkzeug/0.9.4 Python/2.7.8\n\n{\n    \"success\": true\n}", "id": "f13107:m1"}
{"signature": "@public_routes.route(\"<STR_LIT:/>\")<EOL>def helloWorld():", "body": "return '''<STR_LIT>'''<EOL>", "docstring": "Since the path '/' does not match the regular expression r'/api/*',\nthis route does not have CORS headers set.", "id": "f13108:m2"}
{"signature": "@api_v1.route(\"<STR_LIT>\", methods=['<STR_LIT:POST>'])<EOL>def create_user():", "body": "return jsonify(success=True)<EOL>", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set.\n\nBrowsers will first make a preflight request to verify that the resource\nallows cross-origin POSTs with a JSON Content-Type, which can be simulated\nas:\n$ curl --include -X OPTIONS http://127.0.0.1:5000/api/v1/users/create \\\n    --header Access-Control-Request-Method:POST \\\n    --header Access-Control-Request-Headers:Content-Type \\\n    --header Origin:www.examplesite.com\n>> HTTP/1.0 200 OK\nContent-Type: text/html; charset=utf-8\nAllow: POST, OPTIONS\nAccess-Control-Allow-Origin: *\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT\nContent-Length: 0\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:22 GMT\n\n\n$ curl --include -X POST http://127.0.0.1:5000/api/v1/users/create \\\n    --header Content-Type:application/json \\\n    --header Origin:www.examplesite.com\n\n\n>> HTTP/1.0 200 OK\nContent-Type: application/json\nContent-Length: 21\nAccess-Control-Allow-Origin: *\nServer: Werkzeug/0.9.6 Python/2.7.9\nDate: Sat, 31 Jan 2015 22:25:04 GMT\n\n{\n  \"success\": true\n}", "id": "f13108:m1"}
{"signature": "def extract_data_from_file(file_location: str, parser: Callable[[str], Any]=lambda data: data, separator: str=None)-> List[Any]:", "body": "with open(file_location, '<STR_LIT:r>') as file:<EOL><INDENT>contents = file.read()<EOL><DEDENT>if separator is not None:<EOL><INDENT>raw_data = contents.split(separator)<EOL><DEDENT>else:<EOL><INDENT>raw_data = [contents]<EOL><DEDENT>extracted = []<EOL>for item in raw_data:<EOL><INDENT>parsed = parser(item)<EOL>extracted.append(parsed)<EOL><DEDENT>return extracted<EOL>", "docstring": "Extracts data from the file at the given location, using the given parser.\n:param file_location: the location of the file to read data from\n:param parser: the parser to extract data from the file\n:param separator: (optional) separator for data in the file\n:return: the extracted data", "id": "f13111:m2"}
{"signature": "def block_until_synchronised_files_data_source_started(source: SynchronisedFilesDataSource):", "body": "blocked = True<EOL>def unblock(*args):<EOL><INDENT>nonlocal blocked<EOL>blocked = False<EOL><DEDENT>event_handler = FileSystemEventHandler()<EOL>event_handler.on_modified = unblock<EOL>source._observer.schedule(event_handler, source._directory_location, recursive=True)<EOL>temp_file_name = \"<STR_LIT>\" % block_until_synchronised_files_data_source_started.__name__<EOL>temp_file_path = os.path.join(source._directory_location, temp_file_name)<EOL>i = <NUM_LIT:0><EOL>while blocked:<EOL><INDENT>with open(temp_file_path, '<STR_LIT:a>') as file:<EOL><INDENT>file.write(str(i))<EOL><DEDENT>sleep(<NUM_LIT:10> / <NUM_LIT:1000>)<EOL>i += <NUM_LIT:1><EOL><DEDENT>", "docstring": "Blocks until the given synchronised files data source has started to notice changes in the file system (may be a few\nmilliseconds after it has been started).\n:param source: the synchronised files data source that has been started", "id": "f13113:m0"}
{"signature": "def _add_more_data_in_nested_directory(self, number_of_extra_files: int=<NUM_LIT:1>) -> Tuple[str, List[int]]:", "body": "nested_directory_path = os.path.join(self.temp_directory, \"<STR_LIT>\")<EOL>os.makedirs(nested_directory_path)<EOL>more_data = [i for i in range(<NUM_LIT:50>)]<EOL>write_data_to_files_in_temp_directory(more_data, number_of_extra_files, dir=nested_directory_path,<EOL>file_prefix=TestSynchronisedFilesDataSource._FILE_PREFIX)<EOL>return (nested_directory_path, more_data)<EOL>", "docstring": "Adds more data in a directory nested inside the temp directory.\n:param number_of_extra_files: (optional) the number of files to put the new data in inside the nested directory\n:return: a tuple where the first value is the path to the new nested directory and the second is the new data", "id": "f13116:c1:m11"}
{"signature": "def rename(self, key: Any, new_key: Any):", "body": "if new_key == key:<EOL><INDENT>return<EOL><DEDENT>required_locks = [self._key_locks[key], self._key_locks[new_key]]<EOL>ordered_required_locks = sorted(required_locks, key=lambda x: id(x))<EOL>for lock in ordered_required_locks:<EOL><INDENT>lock.acquire()<EOL><DEDENT>try:<EOL><INDENT>if key not in self._data:<EOL><INDENT>raise KeyError(\"<STR_LIT>\" % key)<EOL><DEDENT>self._data[new_key] = self[key]<EOL>del self._data[key]<EOL><DEDENT>finally:<EOL><INDENT>for lock in required_locks:<EOL><INDENT>lock.release()<EOL><DEDENT><DEDENT>", "docstring": "Renames an item in this collection as a transaction.\n\nWill override if new key name already exists.\n:param key: the current name of the item\n:param new_key: the new name that the item should have", "id": "f13127:c1:m1"}
{"signature": "def __init__(self, seq=()):", "body": "self._data = dict(seq)<EOL>self._key_locks = ThreadSafeDefaultdict(Lock)<EOL>", "docstring": "Constructor.\n:param seq: initial metadata items", "id": "f13127:c1:m0"}
{"signature": "def __init__(self, directory_location: str, data_type: type):", "body": "super().__init__(directory_location)<EOL>self._data_type = data_type<EOL>", "docstring": "Constructor.\n:param directory_location: the location of the directory\n:param data_type: the type of data that is loaded from files in the given directory", "id": "f13128:c0:m0"}
{"signature": "def unregister(registerable: Any):", "body": "listenable = registration_event_listenable_map[type(registerable)]<EOL>event = RegistrationEvent(registerable, RegistrationEvent.Type.UNREGISTERED)<EOL>listenable.notify_listeners(event)<EOL>", "docstring": "Unregisters an object, notifying any listeners that may be interested in it.\n:param registerable: the object to unregister", "id": "f13128:m1"}
{"signature": "def _on_file_moved(self, event: FileSystemMovedEvent):", "body": "if not event.is_directory and self.is_data_file(event.src_path):<EOL><INDENT>delete_event = FileSystemEvent(event.src_path)<EOL>delete_event.event_type = EVENT_TYPE_DELETED<EOL>self._on_file_deleted(delete_event)<EOL>create_event = FileSystemEvent(event.dest_path)<EOL>create_event.event_type = EVENT_TYPE_CREATED<EOL>self._on_file_created(create_event)<EOL><DEDENT>", "docstring": "Called when a file in the monitored directory has been moved.\n\nBreaks move down into a delete and a create (which it is sometimes detected as!).\n:param event: the file system event", "id": "f13132:c2:m7"}
{"signature": "def _on_file_modified(self, event: FileSystemEvent):", "body": "if not event.is_directory and self.is_data_file(event.src_path):<EOL><INDENT>assert event.src_path in self._origin_mapped_data<EOL>self._origin_mapped_data[event.src_path] = self.no_error_extract_data_from_file(event.src_path)<EOL>self.notify_listeners(FileSystemChange.MODIFY)<EOL><DEDENT>", "docstring": "Called when a file in the monitored directory has been modified.\n:param event: the file system event", "id": "f13132:c2:m5"}
{"signature": "def stop(self):", "body": "with self._status_lock:<EOL><INDENT>if self._running:<EOL><INDENT>assert self._observer is not None<EOL>self._observer.stop()<EOL>self._running = False<EOL>self._origin_mapped_data = dict()<EOL><DEDENT><DEDENT>", "docstring": "Stops monitoring the predefined directory.", "id": "f13132:c2:m3"}
{"signature": "def _on_file_deleted(self, event: FileSystemEvent):", "body": "if not event.is_directory and self.is_data_file(event.src_path):<EOL><INDENT>assert event.src_path in self._origin_mapped_data<EOL>del(self._origin_mapped_data[event.src_path])<EOL>self.notify_listeners(FileSystemChange.DELETE)<EOL><DEDENT>", "docstring": "Called when a file in the monitored directory has been deleted.\n:param event: the file system event", "id": "f13132:c2:m6"}
{"signature": "@abstractmethod<EOL><INDENT>def extract_data_from_file(self, file_path: str) -> Iterable[DataSourceType]:<DEDENT>", "body": "", "docstring": "Extracts data from the file at the given file path.\n:param file_path: the path to the file to extract data from\n:return: the extracted data", "id": "f13132:c0:m1"}
{"signature": "def _create_client(base_url: str, tls: TLSConfig=False) -> Optional[APIClient]:", "body": "try:<EOL><INDENT>client = APIClient(base_url=base_url, tls=tls, version=\"<STR_LIT>\")<EOL>return client if client.ping() else None<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Creates a Docker client with the given details.\n:param base_url: the base URL of the Docker daemon\n:param tls: the Docker daemon's TLS config (if any)\n:return: the created client else None if unable to connect the client to the daemon", "id": "f13133:m0"}
{"signature": "def create_client() -> APIClient:", "body": "global _client<EOL>client = _client()<EOL>if client is None:<EOL><INDENT>docker_environment = kwargs_from_env(assert_hostname=False)<EOL>if \"<STR_LIT>\" in docker_environment:<EOL><INDENT>client = _create_client(docker_environment.get(\"<STR_LIT>\"), docker_environment.get(\"<STR_LIT>\"))<EOL>if client is None:<EOL><INDENT>raise ConnectionError(<EOL>\"<STR_LIT>\"<EOL>% docker_environment)<EOL><DEDENT>else:<EOL><INDENT>logging.info(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>client = _create_client(\"<STR_LIT>\")<EOL>if client is not None:<EOL><INDENT>logging.info(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>raise ConnectionError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>_client = weakref.ref(client)<EOL><DEDENT>assert isinstance(client, APIClient)<EOL>return client<EOL>", "docstring": "Clients a Docker client.\n\nWill raise a `ConnectionError` if the Docker daemon is not accessible.\n:return: the Docker client", "id": "f13133:m1"}
{"signature": "def create_temp_directory(self, **mkdtemp_kwargs) -> str:", "body": "kwargs = {**self.default_mkdtemp_kwargs, **mkdtemp_kwargs}<EOL>location = tempfile.mkdtemp(**kwargs)<EOL>self._temp_directories.add(location)<EOL>return location<EOL>", "docstring": "Creates a temp directory.\n:param mkdtemp_kwargs: named arguments to be passed to `tempfile.mkdtemp`\n:return: the location of the temp directory", "id": "f13135:c0:m2"}
{"signature": "def __init__(self, default_mkdtemp_kwargs: dict=None, default_mkstemp_kwargs: dict=None):", "body": "self.default_mkdtemp_kwargs = default_mkdtemp_kwargs if default_mkdtemp_kwargs is not None else {}<EOL>self.default_mkstemp_kwargs = default_mkstemp_kwargs if default_mkstemp_kwargs is not None else {}<EOL>self._temp_directories = set()  <EOL>self._temp_files = set()    <EOL>atexit.register(self.tear_down)<EOL>", "docstring": "Constructor.\n:param default_mkdtemp_kwargs:\n:param default_mkstemp_kwargs:", "id": "f13135:c0:m0"}
{"signature": "def extract_version_number(string: str) -> str:", "body": "matched = _EXTRACT_VERSION_PATTERN.search(string)<EOL>if matched is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return matched.group().replace(\"<STR_LIT:_>\", \"<STR_LIT:.>\")<EOL>", "docstring": "Extracts a version from a string in the form: `.*[0-9]+(_[0-9]+)*.*`, e.g. Irods4_1_9CompatibleController.\n\nIf the string contains multiple version numbers, the first (from left) is extracted.\n\nWill raise a `ValueError` if there is no version number in the given string.\n:param string: the string containing the version number\n:return: the extracted version", "id": "f13138:m2"}
{"signature": "def create_random_string(postfix: str= \"<STR_LIT>\", prefix: str=\"<STR_LIT>\") -> str:", "body": "return \"<STR_LIT>\" % (prefix, uuid4(), postfix)<EOL>", "docstring": "Creates a random string.\n:param postfix: optional postfix\n:param prefix: optional prefix\n:return: created string", "id": "f13138:m0"}
{"signature": "def get_open_port() -> int:", "body": "free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)<EOL>free_socket.bind((\"<STR_LIT>\", <NUM_LIT:0>))<EOL>free_socket.listen(<NUM_LIT:1>)<EOL>port = free_socket.getsockname()[<NUM_LIT:1>]<EOL>free_socket.close()<EOL>return port<EOL>", "docstring": "Gets a PORT that will (probably) be available on the machine.\nIt is possible that in-between the time in which the open PORT of found and when it is used, another process may\nbind to it instead.\n:return: the (probably) available PORT", "id": "f13138:m1"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "self._lock = Lock(*args, **kwargs)<EOL>self._stat_lock = Lock()<EOL>self._waiting = <NUM_LIT:0><EOL>self._locked = False  <EOL>self._last_released = datetime.now()<EOL>", "docstring": "Wraps Lock constructor", "id": "f13142:c0:m0"}
{"signature": "def acquire(self, *args, **kwargs):", "body": "with self._stat_lock:<EOL><INDENT>self._waiting += <NUM_LIT:1><EOL><DEDENT>self._lock.acquire(*args, **kwargs)<EOL>with self._stat_lock:<EOL><INDENT>self._locked = True<EOL>self._waiting -= <NUM_LIT:1><EOL><DEDENT>", "docstring": "Wraps Lock.acquire", "id": "f13142:c0:m1"}
{"signature": "def release(self):", "body": "self._lock.release()<EOL>with self._stat_lock:<EOL><INDENT>self._locked = False<EOL>self._last_released = datetime.now()<EOL><DEDENT>", "docstring": "Wraps Lock.release", "id": "f13142:c0:m2"}
{"signature": "def is_locked(self) -> bool:", "body": "with self._stat_lock:<EOL><INDENT>return self._locked<EOL><DEDENT>", "docstring": "Is the lock currently acquired", "id": "f13142:c0:m4"}
{"signature": "def __init__(self, target: _RegistrationTarget, event_type: Type):", "body": "self.target = target<EOL>self.event_type = event_type<EOL>", "docstring": "Constructor.\n:param target: the object the event refers to\n:param event_type: the type of update event", "id": "f13143:c2:m0"}
{"signature": "def remove_listener(self, listener: Callable[[_ListenableDataType], None]):", "body": "self._listeners.remove(listener)<EOL>", "docstring": "Removes a listener.\n:param listener: the event listener to remove", "id": "f13145:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def get_lower_priority_value(value: int):<DEDENT>", "body": "if value == Priority.MIN_PRIORITY:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return value + <NUM_LIT:1><EOL>", "docstring": "Gets a lower priority value than that given.\n\nWill raise a `ValueError` if already lowest priority value.\n:param value: gets a lower priority value than this\n:return: the lower priority value", "id": "f13146:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def get_higher_priority_value(value: int):<DEDENT>", "body": "if value == Priority.MAX_PRIORITY:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return value - <NUM_LIT:1><EOL>", "docstring": "Gets a higher priority value than that given.\n\nWill raise a `ValueError` if already highest priority value.\n:param value: gets a higher priority value than this\n:return: the higher priority value", "id": "f13146:c0:m0"}
{"signature": "@register.filter(is_safe=False)<EOL>def to_data_string_with_default(value, arg='<STR_LIT>'):", "body": "if isinstance(value, bool):<EOL><INDENT>if value:<EOL><INDENT>return '<STR_LIT:true>'<EOL><DEDENT>return '<STR_LIT:false>'<EOL><DEDENT>return value or arg<EOL>", "docstring": "Given a Python boolean value converts it to string representation so\n    we can use it in HTML data attributes. If value is None use given default\n    or '' if default is not provided.\n\n    -----       ------\n    Value       Output\n    -----       ------\n    True        \"true\"\n    False       \"false\"\n    None        arg", "id": "f13148:m1"}
{"signature": "@register.tag<EOL>def social_widget_render(parser, token):", "body": "bits = token.split_contents()<EOL>tag_name = bits[<NUM_LIT:0>]<EOL>if len(bits) < <NUM_LIT:2>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" %<EOL>tag_name)<EOL><DEDENT>args = []<EOL>kwargs = {}<EOL>bits = bits[<NUM_LIT:1>:]<EOL>if len(bits):<EOL><INDENT>for bit in bits:<EOL><INDENT>match = kwarg_re.match(bit)<EOL>if not match:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" %<EOL>tag_name)<EOL><DEDENT>name, value = match.groups()<EOL>if name:<EOL><INDENT>name = name.replace('<STR_LIT:->', '<STR_LIT:_>')<EOL>kwargs[name] = parser.compile_filter(value)<EOL><DEDENT>else:<EOL><INDENT>args.append(parser.compile_filter(value))<EOL><DEDENT><DEDENT><DEDENT>return SocialWidgetNode(args, kwargs)<EOL>", "docstring": "Renders the selected social widget. You can specify optional settings\n    that will be passed  to widget template.\n\n    Sample usage:\n    {% social_widget_render widget_template ke1=val1 key2=val2 %}\n\n    For example to render Twitter follow button you can use code like this:\n    {% social_widget_render 'twitter/follow_button.html' username=\"ev\" %}", "id": "f13148:m2"}
{"signature": "def _subscribe_mock(self, signal_name, weak):", "body": "callback = mock.Mock()<EOL>decorator = self.model.subscribe(signal_name, weak)<EOL>return decorator(callback)<EOL>", "docstring": "Create a mock callback function and subscribe it to the specific\n        signal on ``self.model``.", "id": "f13173:c0:m4"}
{"signature": "@task<EOL>def mongo(daemon=False, port=<NUM_LIT>):", "body": "cmd = \"<STR_LIT>\".format(port)<EOL>if daemon:<EOL><INDENT>cmd += \"<STR_LIT>\"<EOL><DEDENT>run(cmd)<EOL>", "docstring": "Run the mongod process.", "id": "f13190:m0"}
{"signature": "def find_version(fname):", "body": "version = '<STR_LIT>'<EOL>with open(fname, '<STR_LIT:r>') as fp:<EOL><INDENT>reg = re.compile(r'<STR_LIT>')<EOL>for line in fp:<EOL><INDENT>m = reg.match(line)<EOL>if m:<EOL><INDENT>version = m.group(<NUM_LIT:1>)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>if not version:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>return version<EOL>", "docstring": "Attempts to find the version number in the file names fname.\n    Raises RuntimeError if not found.", "id": "f13191:m1"}
{"signature": "def freeze(value):", "body": "if isinstance(value, list):<EOL><INDENT>return FrozenList(*value)<EOL><DEDENT>if isinstance(value, dict):<EOL><INDENT>return FrozenDict(**value)<EOL><DEDENT>return value<EOL>", "docstring": "Cast value to its frozen counterpart.", "id": "f13196:m0"}
{"signature": "def with_proxies(proxy_map, get_key):", "body": "def wrapper(cls):<EOL><INDENT>for label, ProxiedClass in six.iteritems(proxy_map):<EOL><INDENT>proxy = proxy_factory(cls, label, ProxiedClass, get_key)<EOL>setattr(cls, label, proxy)<EOL><DEDENT>return cls<EOL><DEDENT>return wrapper<EOL>", "docstring": "Class decorator factory; adds proxy class variables to target class.\n\n    :param dict proxy_map: Mapping between class variable labels and proxied\n        classes\n    :param function get_key: Extension-specific key function; may return e.g.\n        the current Flask request", "id": "f13200:m1"}
{"signature": "def proxy_factory(BaseSchema, label, ProxiedClass, get_key):", "body": "def local():<EOL><INDENT>key = get_key()<EOL>try:<EOL><INDENT>return proxies[BaseSchema][label][key]<EOL><DEDENT>except KeyError:<EOL><INDENT>proxies[BaseSchema][label][key] = ProxiedClass()<EOL>return proxies[BaseSchema][label][key]<EOL><DEDENT><DEDENT>return LocalProxy(local)<EOL>", "docstring": "Create a proxy to a class instance stored in ``proxies``.\n\n    :param class BaseSchema: Base schema (e.g. ``StoredObject``)\n    :param str label: Name of class variable to set\n    :param class ProxiedClass: Class to get or create\n    :param function get_key: Extension-specific key function; may return e.g.\n        the current Flask request", "id": "f13200:m0"}
{"signature": "def _to_primary_key(self, value):", "body": "if value is None:<EOL><INDENT>return None<EOL><DEDENT>if isinstance(value, self.base_class):<EOL><INDENT>if not value._is_loaded:<EOL><INDENT>raise exceptions.DatabaseError('<STR_LIT>')<EOL><DEDENT>return value._primary_key<EOL><DEDENT>return self.base_class._to_primary_key(value)<EOL>", "docstring": "Return primary key; if value is StoredObject, verify\nthat it is loaded.", "id": "f13209:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def delegate(cls, method, conflict=None, *args, **kwargs):<DEDENT>", "body": "if cls.queue.active:<EOL><INDENT>action = WriteAction(method, *args, **kwargs)<EOL>if conflict:<EOL><INDENT>logger.warn('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(action))<EOL><DEDENT>cls.queue.push(action)<EOL><DEDENT>else:<EOL><INDENT>method(*args, **kwargs)<EOL><DEDENT>", "docstring": "Execute or queue a database action. Variable positional and keyword\n        arguments are passed to the provided method.\n\n        :param function method: Method to execute or queue\n        :param bool conflict: Potential conflict between cache_sandbox and backend,\n            e.g., in the event of bulk updates or removes that bypass the\n            cache_sandbox", "id": "f13215:c2:m51"}
{"signature": "def warn_if_detached(func):", "body": "@wraps(func)<EOL>def wrapped(this, *args, **kwargs):<EOL><INDENT>if '<STR_LIT>' in this.__dict__ and this._detached:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL><DEDENT>return func(this, *args, **kwargs)<EOL><DEDENT>return wrapped<EOL>", "docstring": "Warn if self / cls is detached.", "id": "f13215:m3"}
{"signature": "@classmethod<EOL><INDENT>@has_storage<EOL>def remove_one(cls, which, rm=True):<DEDENT>", "body": "<EOL>obj = cls._which_to_obj(which)<EOL>rm_fwd_refs(obj)<EOL>rm_back_refs(obj)<EOL>cls._clear_caches(obj._storage_key)<EOL>if rm:<EOL><INDENT>cls.delegate(<EOL>cls._storage[<NUM_LIT:0>].remove,<EOL>False,<EOL>RawQuery(obj._primary_name, '<STR_LIT>', obj._storage_key)<EOL>)<EOL><DEDENT>obj._detached = True<EOL>", "docstring": "Remove an object, along with its references and back-references.\n        Remove the object from the cache_sandbox and sets its _detached flag to True.\n\n        :param which: Object selector: Query, StoredObject, or primary key\n        :param rm: Remove data from backend", "id": "f13215:c2:m64"}
{"signature": "@classmethod<EOL><INDENT>@has_storage<EOL>@log_storage<EOL>def load(cls, key=None, data=None, _is_loaded=True):<DEDENT>", "body": "<EOL>signals.load.send(<EOL>cls,<EOL>key=key,<EOL>data=data,<EOL>)<EOL>if key is not None:<EOL><INDENT>key = cls._check_pk_type(key)<EOL>cached_object = cls._load_from_cache(key)<EOL>if cached_object is not None:<EOL><INDENT>return cached_object<EOL><DEDENT><DEDENT>if data is None:<EOL><INDENT>data = cls._storage[<NUM_LIT:0>].get(cls._primary_name, cls._pk_to_storage(key))<EOL><DEDENT>if data is None:<EOL><INDENT>return None<EOL><DEDENT>data = cls.from_storage(data)<EOL>if cls._version_of and '<STR_LIT>' in data and data['<STR_LIT>'] != cls._version:<EOL><INDENT>old_object = cls._version_of.load(data=data)<EOL>new_object = cls(_is_loaded=_is_loaded)<EOL>cls.migrate(old_object, new_object)<EOL>new_object._stored_key = new_object._primary_key<EOL>return new_object<EOL><DEDENT>ret = cls(_is_loaded=_is_loaded, **data)<EOL>ret._stored_key = ret._primary_key<EOL>return ret<EOL>", "docstring": "Get a record by its primary key.", "id": "f13215:c2:m33"}
{"signature": "@classmethod<EOL><INDENT>def subscribe(cls, signal_name, weak=True):<DEDENT>", "body": "try:<EOL><INDENT>signal = getattr(signals, signal_name)<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(signal_name)<EOL>)<EOL><DEDENT>sender = None if cls._is_root else cls<EOL>return signal.connect_via(sender, weak)<EOL>", "docstring": ":param str signal_name: Name of signal to subscribe to; must be found\n    in ``signals.py``.\n:param bool weak: Create weak reference to callback\n:returns: Decorator created by ``Signal::connect_via``\n:raises: ValueError if signal is not found\n\nExample usage: ::\n\n    >>> @Schema.subscribe('before_save')\n    ... def listener(cls, instance):\n    ...     instance.value += 1", "id": "f13215:c2:m56"}
{"signature": "@classmethod<EOL><INDENT>@has_storage<EOL>def remove(cls, query=None):<DEDENT>", "body": "objs = cls.find(query)<EOL>for obj in objs:<EOL><INDENT>cls.remove_one(obj, rm=False)<EOL><DEDENT>cls.delegate(<EOL>cls._storage[<NUM_LIT:0>].remove,<EOL>False,<EOL>query<EOL>)<EOL>", "docstring": "Remove objects by query.\n\n        :param query: Query object", "id": "f13215:c2:m65"}
{"signature": "@classmethod<EOL><INDENT>def migrate(cls, old, new, verbose=True, dry_run=False, rm_refs=True):<DEDENT>", "body": "if verbose:<EOL><INDENT>logging.basicConfig(format='<STR_LIT>',<EOL>level=logging.DEBUG)<EOL><DEDENT>deleted_fields = [field for field in old._fields if field not in new._fields]<EOL>added_fields = [field for field in new._fields if field not in old._fields]<EOL>logging.info('<STR_LIT>'.format(deleted_fields))<EOL>logging.info('<STR_LIT>'.format(added_fields))<EOL>if old._primary_name != new._primary_name:<EOL><INDENT>logging.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>.format(old_name=old._primary_name,<EOL>old_field=old._fields[old._primary_name],<EOL>new_name=new._primary_name,<EOL>new_field=new._fields[new._primary_name]))<EOL><DEDENT>for field in old._fields:<EOL><INDENT>if field not in cls._fields:<EOL><INDENT>if rm_refs:<EOL><INDENT>logging.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(name=field,<EOL>field=old._fields[field]))<EOL>if not dry_run:<EOL><INDENT>rm_fwd_refs(old)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logging.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(name=field,<EOL>field=old._fields[field]))<EOL><DEDENT>continue<EOL><DEDENT>old_field_obj = old._fields[field]<EOL>new_field_obj = new._fields[field]<EOL>if old_field_obj != new_field_obj:<EOL><INDENT>if not old_field_obj._required and new_field_obj._required:<EOL><INDENT>logging.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>.format(name=field))<EOL><DEDENT>else:<EOL><INDENT>logging.info(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(name=field,<EOL>old_field=old_field_obj,<EOL>new_field=new_field_obj)<EOL>)<EOL><DEDENT>continue<EOL><DEDENT>if not dry_run:<EOL><INDENT>field_object = cls._fields[field]<EOL>field_object.__set__(<EOL>new,<EOL>getattr(old, field),<EOL>safe=True<EOL>)<EOL><DEDENT><DEDENT>if not dry_run:<EOL><INDENT>new.__backrefs = old.__backrefs<EOL><DEDENT>if not dry_run:<EOL><INDENT>cls._migrate(old, new)<EOL><DEDENT>", "docstring": "Migrate record to new schema.\n\n        :param old: Record from original schema\n        :param new: Record from new schema\n        :param verbose: Print detailed info\n        :param dry_run: Dry run; make no changes if true\n        :param rm_refs: Remove references on deleted fields", "id": "f13215:c2:m35"}
{"signature": "def _optimistic_insert(self, primary_name, value, n=<NUM_LIT:5>):", "body": "while True:<EOL><INDENT>try:<EOL><INDENT>key = self._generate_random_id(n)<EOL>value[primary_name] = key<EOL>self.insert(primary_name, key, value)<EOL>break<EOL><DEDENT>except KeyExistsException:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return key<EOL>", "docstring": "Attempt to insert with randomly generated key until insert\n        is successful.\n\n        :param str primary_name: The name of the primary key.\n        :param dict value: The dictionary representation of the record.\n        :param n: Number of characters in random key", "id": "f13217:c3:m2"}
{"signature": "def _generate_random_id(self, n=<NUM_LIT:5>):", "body": "alphabet = '<STR_LIT>'<EOL>return '<STR_LIT>'.join(random.sample(alphabet, n))<EOL>", "docstring": "Generated random alphanumeric key.\n\n        :param n: Number of characters in random key", "id": "f13217:c3:m1"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def get(self, primary_name, key):<DEDENT>", "body": "pass<EOL>", "docstring": "Get a single record.\n\n        :param str primary_name: The name of the primary key.\n        :param key: The value of the primary key.", "id": "f13217:c3:m5"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def update(self, query, data):<DEDENT>", "body": "pass<EOL>", "docstring": "Update multiple records with new data.\n\n        :param query: A query object.\n        :param dict data: Dictionary of key:value pairs.", "id": "f13217:c3:m4"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def flush(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Flush the database.", "id": "f13217:c3:m7"}
{"signature": "def __call__(self):", "body": "while self.pos < self.max_pos:<EOL><INDENT>match = self.find_match()<EOL>self.pos = match.end()<EOL><DEDENT>return self.root()<EOL>", "docstring": "Parse the given string data and sequentually update the current\n        cursor position until the end is reached.\n\n        Return the Root object if successful.", "id": "f13238:c0:m16"}
{"signature": "@log_callback<EOL><INDENT>def parse_dashes(self, match):<DEDENT>", "body": "raise IgnoredMatchException<EOL>", "docstring": "Ignore lines that contain three dash symbols.", "id": "f13238:c0:m5"}
{"signature": "@log_callback<EOL><INDENT>def parse_comment(self, match):<DEDENT>", "body": "raise IgnoredMatchException<EOL>", "docstring": "Ignore line comments.", "id": "f13238:c0:m3"}
{"signature": "def assert_command_id(self, request, target_id):<EOL>", "body": "<EOL>command_id = request.ams_header.command_id<EOL>command_id = struct.unpack('<STR_LIT>', command_id)[<NUM_LIT:0>]<EOL>self.assertEqual(command_id, target_id)<EOL>", "docstring": "Assert command_id and target_id.", "id": "f13247:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def setUpClass(cls):<EOL><DEDENT>", "body": "cls.test_server = AdsTestServer(logging=True)<EOL>cls.test_server.start()<EOL>time.sleep(<NUM_LIT:1>)<EOL>", "docstring": "Setup the ADS testserver.", "id": "f13247:c0:m0"}
{"signature": "def close(self):<EOL>", "body": "self.stop()<EOL>", "docstring": "Close the server thread.", "id": "f13251:c0:m4"}
{"signature": "def __exit__(self, exc_type, exc_value, traceback):<EOL>", "body": "self.close()<EOL>", "docstring": "Exit context.", "id": "f13251:c0:m2"}
{"signature": "def stop(self):<EOL>", "body": "if self._run:<EOL><INDENT>logger.info(<EOL>\"<STR_LIT>\".format(*self.client_address)<EOL>)<EOL>self._run = False<EOL><DEDENT>self.join()<EOL>", "docstring": "Stop the client thread.", "id": "f13251:c1:m1"}
{"signature": "def run(self):<EOL>", "body": "self._run = True<EOL>self.server.listen(<NUM_LIT:5>)<EOL>logger.info(<EOL>\"<STR_LIT>\".format(<EOL>self.ip_address or \"<STR_LIT:localhost>\", self.port<EOL>)<EOL>)<EOL>while self._run:<EOL><INDENT>ready, _, _ = select.select([self.server], [], [], <NUM_LIT:0.1>)<EOL>if ready:<EOL><INDENT>try:<EOL><INDENT>client, address = self.server.accept()<EOL><DEDENT>except:<EOL><INDENT>continue<EOL><DEDENT>logger.info(\"<STR_LIT>\".format(*address))<EOL>client_thread = AdsClientConnection(<EOL>handler=self.handler, client=client, address=address, server=self<EOL>)<EOL>client_thread.daemon = True<EOL>client_thread.start()<EOL>self.clients.append(client_thread)<EOL><DEDENT><DEDENT>", "docstring": "Listen for incoming connections from clients.", "id": "f13251:c0:m5"}
{"signature": "def construct_request(self, request_bytes):<EOL>", "body": "data = request_bytes  <EOL>tcp_header = AmsTcpHeader(data[<NUM_LIT:2>:<NUM_LIT:6>])<EOL>ams_header = AmsHeader(<EOL>data[<NUM_LIT:6>:<NUM_LIT:12>],<EOL>data[<NUM_LIT:12>:<NUM_LIT>],<EOL>data[<NUM_LIT>:<NUM_LIT:20>],<EOL>data[<NUM_LIT:20>:<NUM_LIT>],<EOL>data[<NUM_LIT>:<NUM_LIT>],<EOL>data[<NUM_LIT>:<NUM_LIT>],<EOL>data[<NUM_LIT>:<NUM_LIT:30>],<EOL>data[<NUM_LIT:30>:<NUM_LIT>],<EOL>data[<NUM_LIT>:<NUM_LIT>],<EOL>data[<NUM_LIT>:],<EOL>)<EOL>return AmsPacket(tcp_header, ams_header)<EOL>", "docstring": "Unpack an AMS packet from binary data.\n\n        :param bytes request_bytes: The raw request data\n        :rtype AmsPacket:\n        :return: AmsPacket with fields populated from the binary data", "id": "f13251:c1:m5"}
{"signature": "def handle_request(self, request):<EOL>", "body": "<EOL>command_id_bytes = request.ams_header.command_id<EOL>command_id = struct.unpack(\"<STR_LIT>\", command_id_bytes)[<NUM_LIT:0>]<EOL>state = struct.unpack(\"<STR_LIT>\", request.ams_header.state_flags)[<NUM_LIT:0>]<EOL>state = state | <NUM_LIT>  <EOL>state = struct.pack(\"<STR_LIT>\", state)<EOL>def handle_read_device_info():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>logger.info(\"<STR_LIT>\")<EOL>major_version = \"<STR_LIT>\".encode(\"<STR_LIT:utf-8>\")<EOL>minor_version = \"<STR_LIT>\".encode(\"<STR_LIT:utf-8>\")<EOL>version_build = \"<STR_LIT>\".encode(\"<STR_LIT:utf-8>\")<EOL>device_name = \"<STR_LIT>\".encode(\"<STR_LIT:utf-8>\")<EOL>response_content = (<EOL>major_version + minor_version + version_build + device_name<EOL>)<EOL>return response_content<EOL><DEDENT>def handle_read():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>data = request.ams_header.data<EOL>index_group = struct.unpack(\"<STR_LIT>\", data[:<NUM_LIT:4>])[<NUM_LIT:0>]<EOL>index_offset = struct.unpack(\"<STR_LIT>\", data[<NUM_LIT:4>:<NUM_LIT:8>])[<NUM_LIT:0>]<EOL>plc_datatype = struct.unpack(\"<STR_LIT>\", data[<NUM_LIT:8>:<NUM_LIT:12>])[<NUM_LIT:0>]<EOL>logger.info(<EOL>(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>).format(index_group, index_offset, plc_datatype)<EOL>)<EOL>if index_group == constants.ADSIGRP_SYM_VALBYHND:<EOL><INDENT>response_value = self._named_data[index_offset].value<EOL><DEDENT>else:<EOL><INDENT>response_value = self._data[(index_group, index_offset)][:plc_datatype]<EOL><DEDENT>return struct.pack(\"<STR_LIT>\", len(response_value)) + response_value<EOL><DEDENT>def handle_write():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>data = request.ams_header.data<EOL>index_group = struct.unpack(\"<STR_LIT>\", data[:<NUM_LIT:4>])[<NUM_LIT:0>]<EOL>index_offset = struct.unpack(\"<STR_LIT>\", data[<NUM_LIT:4>:<NUM_LIT:8>])[<NUM_LIT:0>]<EOL>plc_datatype = struct.unpack(\"<STR_LIT>\", data[<NUM_LIT:8>:<NUM_LIT:12>])[<NUM_LIT:0>]<EOL>value = data[<NUM_LIT:12>:(<NUM_LIT:12> + plc_datatype)]<EOL>logger.info(<EOL>(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>).format(index_group, index_offset, plc_datatype, value)<EOL>)<EOL>if index_group == constants.ADSIGRP_SYM_RELEASEHND:<EOL><INDENT>return b\"<STR_LIT>\"<EOL><DEDENT>elif index_group == constants.ADSIGRP_SYM_VALBYHND:<EOL><INDENT>self._named_data[index_offset].value = value<EOL>return b\"<STR_LIT>\"<EOL><DEDENT>self._data[(index_group, index_offset)] = value<EOL>return b\"<STR_LIT>\"<EOL><DEDENT>def handle_read_write():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>data = request.ams_header.data<EOL>index_group = struct.unpack(\"<STR_LIT>\", data[:<NUM_LIT:4>])[<NUM_LIT:0>]<EOL>index_offset = struct.unpack(\"<STR_LIT>\", data[<NUM_LIT:4>:<NUM_LIT:8>])[<NUM_LIT:0>]<EOL>read_length = struct.unpack(\"<STR_LIT>\", data[<NUM_LIT:8>:<NUM_LIT:12>])[<NUM_LIT:0>]<EOL>write_length = struct.unpack(\"<STR_LIT>\", data[<NUM_LIT:12>:<NUM_LIT:16>])[<NUM_LIT:0>]<EOL>write_data = data[<NUM_LIT:16>:(<NUM_LIT:16> + write_length)]<EOL>logger.info(<EOL>(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>).format(<EOL>index_group, index_offset, read_length, write_length, write_data<EOL>)<EOL>)<EOL>if index_group == constants.ADSIGRP_SYM_HNDBYNAME:<EOL><INDENT>var_name = write_data.decode()<EOL>names = [x.name for x in self._named_data]<EOL>try:<EOL><INDENT>handle = names.index(var_name)<EOL><DEDENT>except ValueError:<EOL><INDENT>self._named_data.append(PLCVariable(name=var_name, value=bytes(<NUM_LIT:16>)))<EOL>handle = len(self._named_data) - <NUM_LIT:1><EOL><DEDENT>read_data = struct.pack(\"<STR_LIT>\", handle)<EOL><DEDENT>else:<EOL><INDENT>read_data = self._data[(index_group, index_offset)][:read_length]<EOL>self._data[(index_group, index_offset)] = write_data<EOL><DEDENT>return struct.pack(\"<STR_LIT>\", len(read_data)) + read_data<EOL><DEDENT>def handle_read_state():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>logger.info(\"<STR_LIT>\")<EOL>ads_state = struct.pack(\"<STR_LIT>\", constants.ADSSTATE_RUN)<EOL>device_state = struct.pack(\"<STR_LIT>\", <NUM_LIT:0>)<EOL>return ads_state + device_state<EOL><DEDENT>def handle_writectrl():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>logger.info(\"<STR_LIT>\")<EOL>return b\"<STR_LIT>\"<EOL><DEDENT>def handle_add_devicenote():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>logger.info(\"<STR_LIT>\")<EOL>handle = (\"<STR_LIT>\" * <NUM_LIT:4>).encode(\"<STR_LIT:utf-8>\")<EOL>return handle<EOL><DEDENT>def handle_delete_devicenote():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>logger.info(\"<STR_LIT>\")<EOL>return b\"<STR_LIT>\"<EOL><DEDENT>def handle_devicenote():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>logger.info(\"<STR_LIT>\")<EOL>return b\"<STR_LIT>\"<EOL><DEDENT>function_map = {<EOL>constants.ADSCOMMAND_READDEVICEINFO: handle_read_device_info,<EOL>constants.ADSCOMMAND_READ: handle_read,<EOL>constants.ADSCOMMAND_WRITE: handle_write,<EOL>constants.ADSCOMMAND_READWRITE: handle_read_write,<EOL>constants.ADSCOMMAND_READSTATE: handle_read_state,<EOL>constants.ADSCOMMAND_WRITECTRL: handle_writectrl,<EOL>constants.ADSCOMMAND_ADDDEVICENOTE: handle_add_devicenote,<EOL>constants.ADSCOMMAND_DELDEVICENOTE: handle_delete_devicenote,<EOL>constants.ADSCOMMAND_DEVICENOTE: handle_devicenote,<EOL>}<EOL>try:<EOL><INDENT>response_content = function_map[command_id]()<EOL><DEDENT>except KeyError:<EOL><INDENT>logger.info(\"<STR_LIT>\".format(hex(command_id)))<EOL>error_code = \"<STR_LIT>\".encode(\"<STR_LIT:utf-8>\")<EOL>return AmsResponseData(state, error_code, \"<STR_LIT>\".encode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT>error_code = (\"<STR_LIT:\\x00>\" * <NUM_LIT:4>).encode(\"<STR_LIT:utf-8>\")<EOL>response_data = error_code + response_content<EOL>return AmsResponseData(state, request.ams_header.error_code, response_data)<EOL>", "docstring": "Handle incoming requests and create a response.", "id": "f13251:c5:m1"}
{"signature": "def PLCTYPE_ARR_INT(n):<EOL>", "body": "return c_int16 * n<EOL>", "docstring": "Return an array with n int16 values.", "id": "f13253:m2"}
{"signature": "def PLCTYPE_ARR_SHORT(n):<EOL>", "body": "return c_short * n<EOL>", "docstring": "Return an array with n short values.", "id": "f13253:m4"}
{"signature": "def PLCTYPE_ARR_REAL(n):<EOL>", "body": "return c_float * n<EOL>", "docstring": "Return an array with n float values.", "id": "f13253:m0"}
{"signature": "def PLCTYPE_ARR_LREAL(n):<EOL>", "body": "return c_double * n<EOL>", "docstring": "Return an array with n double values.", "id": "f13253:m1"}
{"signature": "def read_by_name(self, data_name, plc_datatype, return_ctypes=False):<EOL>", "body": "if self._port:<EOL><INDENT>return adsSyncReadByNameEx(self._port, self._adr, data_name, plc_datatype, return_ctypes)<EOL><DEDENT>return None<EOL>", "docstring": "Read data synchronous from an ADS-device from data name.\n\n        :param string data_name: data name\n        :param int plc_datatype: type of the data given to the PLC, according\n            to PLCTYPE constants\n            :return: value: **value**\n        :param bool return_ctypes: return ctypes instead of python types if True\n            (default: False)", "id": "f13258:c0:m12"}
{"signature": "def set_local_address(ams_netid):<EOL>", "body": "if isinstance(ams_netid, str):<EOL><INDENT>ams_netid_st = _parse_ams_netid(ams_netid)<EOL><DEDENT>else:<EOL><INDENT>ams_netid_st = ams_netid<EOL><DEDENT>assert isinstance(ams_netid_st, SAmsNetId)<EOL>if linux:<EOL><INDENT>return adsSetLocalAddress(ams_netid_st)<EOL><DEDENT>else:<EOL><INDENT>raise ADSError(<EOL>text=\"<STR_LIT>\"<EOL>)<EOL><DEDENT>", "docstring": "Set the local NetID (**Linux only**).\n\n    :param str: new AmsNetID\n    :rtype: None\n\n    **Usage:**\n\n        >>> import pyads\n        >>> pyads.open_port()\n        >>> pyads.set_local_address('0.0.0.0.1.1')", "id": "f13258:m4"}
{"signature": "def notification(self, plc_datatype=None):<EOL>", "body": "def notification_decorator(func):<EOL><INDENT>def func_wrapper(notification, data_name):<EOL><INDENT>contents = notification.contents<EOL>data = contents.data<EOL>data_size = contents.cbSampleSize<EOL>datatype_map = {<EOL>PLCTYPE_BOOL: \"<STR_LIT>\",<EOL>PLCTYPE_BYTE: \"<STR_LIT>\",<EOL>PLCTYPE_DINT: \"<STR_LIT>\",<EOL>PLCTYPE_DWORD: \"<STR_LIT>\",<EOL>PLCTYPE_INT: \"<STR_LIT>\",<EOL>PLCTYPE_LREAL: \"<STR_LIT>\",<EOL>PLCTYPE_REAL: \"<STR_LIT>\",<EOL>PLCTYPE_SINT: \"<STR_LIT>\",<EOL>PLCTYPE_UDINT: \"<STR_LIT>\",<EOL>PLCTYPE_UINT: \"<STR_LIT>\",<EOL>PLCTYPE_USINT: \"<STR_LIT>\",<EOL>PLCTYPE_WORD: \"<STR_LIT>\",<EOL>}  <EOL>if plc_datatype == PLCTYPE_STRING:<EOL><INDENT>dest = (c_ubyte * data_size)()<EOL>memmove(addressof(dest), addressof(data), data_size)<EOL>value = bytearray(dest).split(b\"<STR_LIT>\", <NUM_LIT:1>)[<NUM_LIT:0>].decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>elif issubclass(plc_datatype, Structure):<EOL><INDENT>value = plc_datatype()<EOL>fit_size = min(data_size, sizeof(value))<EOL>memmove(addressof(value), addressof(data), fit_size)<EOL><DEDENT>elif plc_datatype not in datatype_map:<EOL><INDENT>value = data<EOL><DEDENT>else:<EOL><INDENT>value = struct.unpack(<EOL>datatype_map[plc_datatype], bytearray(data)[:data_size]<EOL>)[<NUM_LIT:0>]<EOL><DEDENT>dt = filetime_to_dt(contents.nTimeStamp)<EOL>return func(contents.hNotification, data_name, dt, value)<EOL><DEDENT>return func_wrapper<EOL><DEDENT>return notification_decorator<EOL>", "docstring": "Decorate a callback function.\n\n        **Decorator**.\n\n        A decorator that can be used for callback functions in order to\n        convert the data of the NotificationHeader into the fitting\n        Python type.\n\n        :param plc_datatype: The PLC datatype that needs to be converted. This can\n        be any basic PLC datatype or a `ctypes.Structure`.\n\n        The callback functions need to be of the following type:\n\n        >>> def callback(handle, name, timestamp, value)\n\n        * `handle`: the notification handle\n        * `name`: the variable name\n        * `timestamp`: the timestamp as datetime value\n        * `value`: the converted value of the variable\n\n        **Usage**:\n\n            >>> import pyads\n            >>>\n            >>> plc = pyads.Connection('172.18.3.25.1.1', 851)\n            >>>\n            >>>\n            >>> @plc.notification(pyads.PLCTYPE_STRING)\n            >>> def callback(handle, name, timestamp, value):\n            >>>     print(handle, name, timestamp, value)\n            >>>\n            >>>\n            >>> with plc:\n            >>>    attr = pyads.NotificationAttrib(20,\n            >>>                                    pyads.ADSTRANS_SERVERCYCLE)\n            >>>    handles = plc.add_device_notification('GVL.test', attr,\n            >>>                                          callback)\n            >>>    while True:\n            >>>        pass", "id": "f13258:c0:m18"}
{"signature": "def set_timeout(ms):<EOL>", "body": "if port is not None:<EOL><INDENT>return adsSyncSetTimeoutEx(port, ms)<EOL><DEDENT>", "docstring": "Set timeout.", "id": "f13258:m17"}
{"signature": "def write_control(self, ads_state, device_state, data, plc_datatype):<EOL>", "body": "if self._port is not None:<EOL><INDENT>return adsSyncWriteControlReqEx(<EOL>self._port, self._adr, ads_state, device_state, data, plc_datatype<EOL>)<EOL><DEDENT>", "docstring": "Change the ADS state and the machine-state of the ADS-server.\n\n        :param int ads_state: new ADS-state, according to ADSTATE constants\n        :param int device_state: new machine-state\n        :param data: additional data\n        :param int plc_datatype: datatype, according to PLCTYPE constants\n\n        :note: Despite changing the ADS-state and the machine-state it is\n            possible to send additional data to the ADS-server. For current\n            ADS-devices additional data is not progressed.\n            Every ADS-device is able to communicate its current state to other\n            devices. There is a difference between the device-state and the\n            state of the ADS-interface (AdsState). The possible states of an\n            ADS-interface are defined in the ADS-specification.", "id": "f13258:c0:m7"}
{"signature": "def del_device_notification(adr, notification_handle, user_handle):<EOL>", "body": "if port is not None:<EOL><INDENT>return adsSyncDelDeviceNotificationReqEx(<EOL>port, adr, notification_handle, user_handle<EOL>)<EOL><DEDENT>", "docstring": "Remove a device notification.\n\n    :param pyads.structs.AmsAddr adr: AMS Address associated with the routing\n        entry which is to be removed from the router.\n    :param notification_handle: address of the variable that contains\n        the handle of the notification\n    :param user_handle: user handle", "id": "f13258:m16"}
{"signature": "def __exit__(self, _type, _val, _traceback):<EOL>", "body": "self.close()<EOL>", "docstring": "Close on leaving with-block.", "id": "f13258:c0:m2"}
{"signature": "def add_device_notification(adr, data_name, attr, callback, user_handle=None):<EOL>", "body": "if port is not None:<EOL><INDENT>return adsSyncAddDeviceNotificationReqEx(<EOL>port, adr, data_name, attr, callback, user_handle<EOL>)<EOL><DEDENT>return None<EOL>", "docstring": "Add a device notification.\n\n    :param pyads.structs.AmsAddr adr: AMS Address associated with the routing\n        entry which is to be removed from the router.\n    :param str data_name: PLC storage address\n    :param pyads.structs.NotificationAttrib attr: object that contains\n        all the attributes for the definition of a notification\n    :param callback: callback function that gets executed on in the event\n        of a notification\n\n    :rtype: (int, int)\n    :returns: notification handle, user handle\n\n    Save the notification handle and the user handle on creating a\n    notification if you want to be able to remove the notification\n    later in your code.", "id": "f13258:m15"}
{"signature": "def __enter__(self):<EOL>", "body": "self.open()<EOL>return self<EOL>", "docstring": "Open on entering with-block.", "id": "f13258:c0:m1"}
{"signature": "def read_device_info(self):<EOL>", "body": "if self._port is not None:<EOL><INDENT>return adsSyncReadDeviceInfoReqEx(self._port, self._adr)<EOL><DEDENT>return None<EOL>", "docstring": "Read the name and the version number of the ADS-server.\n\n        :rtype: string, AdsVersion\n        :return: device name, version", "id": "f13258:c0:m8"}
{"signature": "def get_local_address():<EOL>", "body": "if port is not None:<EOL><INDENT>return adsGetLocalAddressEx(port)<EOL><DEDENT>return None<EOL>", "docstring": "Return the local AMS-address and the port number.\n\n    :rtype: AmsAddr", "id": "f13258:m3"}
{"signature": "def delete_route(adr):<EOL>", "body": "return adsDelRoute(adr.netIdStruct())<EOL>", "docstring": "Remove existing route from the AMS Router (Linux Only).\n\n    :param pyads.structs.AmsAddr adr: AMS Address associated with the routing\n        entry which is to be removed from the router.", "id": "f13258:m14"}
{"signature": "@property<EOL><INDENT>def is_open(self):<EOL><DEDENT>", "body": "return self._open<EOL>", "docstring": "Show the current connection state.\n\n        :return: True if connection is open", "id": "f13258:c0:m16"}
{"signature": "def close_port():<EOL>", "body": "global port<EOL>if port is not None:<EOL><INDENT>adsPortCloseEx(port)<EOL>port = None<EOL><DEDENT>", "docstring": "Close the connection to the TwinCAT message router.", "id": "f13258:m2"}
{"signature": "def get_local_address(self):<EOL>", "body": "if self._port is not None:<EOL><INDENT>return adsGetLocalAddressEx(self._port)<EOL><DEDENT>return None<EOL>", "docstring": "Return the local AMS-address and the port number.\n\n        :rtype: AmsAddr", "id": "f13258:c0:m5"}
{"signature": "def read(self, index_group, index_offset, plc_datatype, return_ctypes=False):<EOL>", "body": "if self._port is not None:<EOL><INDENT>return adsSyncReadReqEx2(<EOL>self._port, self._adr, index_group, index_offset, plc_datatype, return_ctypes<EOL>)<EOL><DEDENT>return None<EOL>", "docstring": "Read data synchronous from an ADS-device.\n\n        :param int index_group: PLC storage area, according to the INDEXGROUP\n            constants\n        :param int index_offset: PLC storage address\n        :param int plc_datatype: type of the data given to the PLC, according\n            to PLCTYPE constants\n            :return: value: **value**\n        :param bool return_ctypes: return ctypes instead of python types if True\n            (default: False)", "id": "f13258:c0:m11"}
{"signature": "def read_write(<EOL>adr,<EOL>index_group,<EOL>index_offset,<EOL>plc_read_datatype,<EOL>value,<EOL>plc_write_datatype,<EOL>return_ctypes=False,<EOL>):<EOL>", "body": "if port is not None:<EOL><INDENT>return adsSyncReadWriteReqEx2(<EOL>port,<EOL>adr,<EOL>index_group,<EOL>index_offset,<EOL>plc_read_datatype,<EOL>value,<EOL>plc_write_datatype,<EOL>return_ctypes,<EOL>)<EOL><DEDENT>return None<EOL>", "docstring": "Read and write data synchronous from/to an ADS-device.\n\n    :param AmsAddr adr: local or remote AmsAddr\n    :param int index_group: PLC storage area, according to the INDEXGROUP\n        constants\n    :param int index_offset: PLC storage address\n    :param Type plc_read_datatype: type of the data given to the PLC to respond\n            to, according to PLCTYPE constants\n        :param value: value to write to the storage address of the PLC\n    :param Type plc_write_datatype: type of the data given to the PLC, according to\n        PLCTYPE constants\n    :param bool return_ctypes: return ctypes instead of python types if True\n        (default: False)\n    :rtype: PLCTYPE\n    :return: value: **value**", "id": "f13258:m9"}
{"signature": "def read_by_name(adr, data_name, plc_datatype, return_ctypes=False):<EOL>", "body": "if port is not None:<EOL><INDENT>return adsSyncReadByNameEx(port, adr, data_name, plc_datatype, return_ctypes)<EOL><DEDENT>return None<EOL>", "docstring": "Read data synchronous from an ADS-device from data name.\n\n    :param AmsAddr adr: local or remote AmsAddr\n    :param string data_name: data name\n    :param int plc_datatype: type of the data given to the PLC, according to\n        PLCTYPE constants\n    :param bool return_ctypes: return ctypes instead of python types if True\n        (default: False)\n    :return: value: **value**", "id": "f13258:m11"}
{"signature": "@property<EOL><INDENT>def port(self):<EOL><DEDENT>", "body": "return self._ams_addr.port<EOL>", "docstring": "Port of the AmsAddress object.", "id": "f13260:c4:m4"}
{"signature": "def __repr__(self):<EOL>", "body": "return '<STR_LIT>'.format(self.netid, self.port)<EOL>", "docstring": "Return object name.", "id": "f13260:c4:m9"}
{"signature": "def __init__(self, netid=None, port=None):<EOL>", "body": "self._ams_addr = SAmsAddr()<EOL>if netid is not None:<EOL><INDENT>self.netid = netid<EOL><DEDENT>if port is not None:<EOL><INDENT>self.port = port<EOL><DEDENT>", "docstring": "Create a new AmsAddr object by a given netid and port.\n\n        :param netid: NetId of an ADS device\n        :param port: port of an ADS device", "id": "f13260:c4:m0"}
{"signature": "@property<EOL><INDENT>def length(self):<EOL><DEDENT>", "body": "return self._attrib.cbLength<EOL>", "docstring": "Notification data length.", "id": "f13260:c5:m2"}
{"signature": "def __repr__(self):<EOL>", "body": "return ('<STR_LIT>'<EOL>.format(self.length, self.trans_mode, self.max_delay,<EOL>self.cycle_time))<EOL>", "docstring": "Return object name.", "id": "f13260:c5:m10"}
{"signature": "def __init__(self, length, trans_mode=ADSTRANS_SERVERONCHA,<EOL>max_delay=<NUM_LIT>, cycle_time=<NUM_LIT>):<EOL>", "body": "self._attrib = SAdsNotificationAttrib()<EOL>if length:<EOL><INDENT>self._attrib.cbLength = length<EOL><DEDENT>if trans_mode:<EOL><INDENT>self._attrib.nTransMode = trans_mode<EOL><DEDENT>if max_delay:<EOL><INDENT>self._attrib.nMaxDelay = int(max_delay * <NUM_LIT>)<EOL><DEDENT>if cycle_time:<EOL><INDENT>self._attrib.nCycleTime = int(cycle_time * <NUM_LIT>)<EOL><DEDENT>", "docstring": "Create a new NotificationAttrib object.\n\n        :param int length: length of the data\n        :param int trans_mode: transmission mode\n        :param float max_delay: maximum delay in ms\n        :param float cycle_time: cycle time in ms", "id": "f13260:c5:m0"}
{"signature": "def netIdStruct(self):<EOL>", "body": "return self._ams_addr.netId<EOL>", "docstring": "Return the c-types structure SAmsNetId.", "id": "f13260:c4:m7"}
{"signature": "def notificationAttribStruct(self):<EOL>", "body": "return self._attrib<EOL>", "docstring": "Return the raw struct.", "id": "f13260:c5:m1"}
{"signature": "def toString(self):<EOL>", "body": "return self.netid + \"<STR_LIT>\" + str(self._ams_addr.port)<EOL>", "docstring": "Textual representation of the AMS address.\n\n        :rtype: string\n        :return:  textual representation of the AMS adress", "id": "f13260:c4:m1"}
{"signature": "def adsSyncWriteByNameEx(port, address, data_name, value, data_type):<EOL>", "body": "<EOL>handle = adsSyncReadWriteReqEx2(<EOL>port,<EOL>address,<EOL>ADSIGRP_SYM_HNDBYNAME,<EOL><NUM_LIT>,<EOL>PLCTYPE_UDINT,<EOL>data_name,<EOL>PLCTYPE_STRING,<EOL>)<EOL>adsSyncWriteReqEx(port, address, ADSIGRP_SYM_VALBYHND, handle, value, data_type)<EOL>adsSyncWriteReqEx(port, address, ADSIGRP_SYM_RELEASEHND, <NUM_LIT:0>, handle, PLCTYPE_UDINT)<EOL>", "docstring": "Send data synchronous to an ADS-device from data name.\n\n    :param int port: local AMS port as returned by adsPortOpenEx()\n    :param pyads.structs.AmsAddr address: local or remote AmsAddr\n    :param string data_name: PLC storage address\n    :param value: value to write to the storage address of the PLC\n    :param Type data_type: type of the data given to the PLC,\n        according to PLCTYPE constants", "id": "f13261:m14"}
{"signature": "def adsSyncWriteControlReqEx(<EOL>port, address, ads_state, device_state, data, plc_data_type<EOL>):<EOL>", "body": "sync_write_control_request = _adsDLL.AdsSyncWriteControlReqEx<EOL>ams_address_pointer = ctypes.pointer(address.amsAddrStruct())<EOL>ads_state_c = ctypes.c_ulong(ads_state)<EOL>device_state_c = ctypes.c_ulong(device_state)<EOL>if plc_data_type == PLCTYPE_STRING:<EOL><INDENT>data = ctypes.c_char_p(data.encode(\"<STR_LIT:utf-8>\"))<EOL>data_pointer = data<EOL>data_length = len(data_pointer.value) + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>data = plc_data_type(data)<EOL>data_pointer = ctypes.pointer(data)<EOL>data_length = ctypes.sizeof(data)<EOL><DEDENT>error_code = sync_write_control_request(<EOL>port,<EOL>ams_address_pointer,<EOL>ads_state_c,<EOL>device_state_c,<EOL>data_length,<EOL>data_pointer,<EOL>)<EOL>if error_code:<EOL><INDENT>raise ADSError(error_code)<EOL><DEDENT>", "docstring": "Change the ADS state and the machine-state of the ADS-server.\n\n    :param int port: local AMS port as returned by adsPortOpenEx()\n    :param pyads.structs.AmsAddr adr: local or remote AmsAddr\n    :param int ads_state: new ADS-state, according to ADSTATE constants\n    :param int device_state: new machine-state\n    :param data: additional data\n    :param int plc_data_type: plc datatype, according to PLCTYPE constants", "id": "f13261:m9"}
{"signature": "@router_function<EOL>def adsDelRoute(net_id):<EOL>", "body": "delete_route = _adsDLL.AdsDelRoute<EOL>delete_route(net_id)<EOL>", "docstring": "Remove existing route from the AMS Router.\n\n    :param pyads.structs.SAmsNetId net_id: net id associated with the routing\n        entry which is to be removed from the router.", "id": "f13261:m2"}
{"signature": "def adsPortOpenEx():<EOL>", "body": "port_open_ex = _adsDLL.AdsPortOpenEx<EOL>port_open_ex.restype = ctypes.c_long<EOL>port = port_open_ex()<EOL>if port == <NUM_LIT:0>:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>return port<EOL>", "docstring": "Connect to the TwinCAT message router.\n\n    :rtype: int\n    :return: port number", "id": "f13261:m3"}
{"signature": "def adsSyncReadReqEx2(<EOL>port, address, index_group, index_offset, data_type, return_ctypes=False<EOL>):<EOL>", "body": "sync_read_request = _adsDLL.AdsSyncReadReqEx2<EOL>ams_address_pointer = ctypes.pointer(address.amsAddrStruct())<EOL>index_group_c = ctypes.c_ulong(index_group)<EOL>index_offset_c = ctypes.c_ulong(index_offset)<EOL>if data_type == PLCTYPE_STRING:<EOL><INDENT>data = (STRING_BUFFER * PLCTYPE_STRING)()<EOL><DEDENT>else:<EOL><INDENT>data = data_type()<EOL><DEDENT>data_pointer = ctypes.pointer(data)<EOL>data_length = ctypes.c_ulong(ctypes.sizeof(data))<EOL>bytes_read = ctypes.c_ulong()<EOL>bytes_read_pointer = ctypes.pointer(bytes_read)<EOL>error_code = sync_read_request(<EOL>port,<EOL>ams_address_pointer,<EOL>index_group_c,<EOL>index_offset_c,<EOL>data_length,<EOL>data_pointer,<EOL>bytes_read_pointer,<EOL>)<EOL>if error_code:<EOL><INDENT>raise ADSError(error_code)<EOL><DEDENT>if data_type != PLCTYPE_STRING and bytes_read.value != data_length.value:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\".format(<EOL>data_length.value, bytes_read.value<EOL>)<EOL>)<EOL><DEDENT>if return_ctypes:<EOL><INDENT>return data<EOL><DEDENT>if data_type == PLCTYPE_STRING:<EOL><INDENT>return data.value.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>if type(data_type).__name__ == \"<STR_LIT>\":<EOL><INDENT>return [i for i in data]<EOL><DEDENT>if hasattr(data, \"<STR_LIT:value>\"):<EOL><INDENT>return data.value<EOL><DEDENT>return data<EOL>", "docstring": "Read data synchronous from an ADS-device.\n\n    :param int port: local AMS port as returned by adsPortOpenEx()\n    :param pyads.structs.AmsAddr address: local or remote AmsAddr\n    :param int index_group: PLC storage area, according to the INDEXGROUP\n        constants\n    :param int index_offset: PLC storage address\n    :param Type data_type: type of the data given to the PLC, according to\n        PLCTYPE constants\n    :param bool return_ctypes: return ctypes instead of python types if True\n        (default: False)\n    :rtype: data_type\n    :return: value: **value**", "id": "f13261:m12"}
{"signature": "def adsSyncAddDeviceNotificationReqEx(<EOL>port, adr, data_name, pNoteAttrib, callback, user_handle=None<EOL>):<EOL>", "body": "global callback_store<EOL>if NOTEFUNC is None:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>adsSyncAddDeviceNotificationReqFct = _adsDLL.AdsSyncAddDeviceNotificationReqEx<EOL>pAmsAddr = ctypes.pointer(adr.amsAddrStruct())<EOL>hnl = adsSyncReadWriteReqEx2(<EOL>port, adr, ADSIGRP_SYM_HNDBYNAME, <NUM_LIT>, PLCTYPE_UDINT, data_name, PLCTYPE_STRING<EOL>)<EOL>nIndexGroup = ctypes.c_ulong(ADSIGRP_SYM_VALBYHND)<EOL>nIndexOffset = ctypes.c_ulong(hnl)<EOL>attrib = pNoteAttrib.notificationAttribStruct()<EOL>pNotification = ctypes.c_ulong()<EOL>nHUser = ctypes.c_ulong(hnl)<EOL>if user_handle is not None:<EOL><INDENT>nHUser = ctypes.c_ulong(user_handle)<EOL><DEDENT>adsSyncAddDeviceNotificationReqFct.argtypes = [<EOL>ctypes.c_ulong,<EOL>ctypes.POINTER(SAmsAddr),<EOL>ctypes.c_ulong,<EOL>ctypes.c_ulong,<EOL>ctypes.POINTER(SAdsNotificationAttrib),<EOL>NOTEFUNC,<EOL>ctypes.c_ulong,<EOL>ctypes.POINTER(ctypes.c_ulong),<EOL>]<EOL>adsSyncAddDeviceNotificationReqFct.restype = ctypes.c_long<EOL>def wrapper(addr, notification, user):<EOL><INDENT>return callback(notification, data_name)<EOL><DEDENT>c_callback = NOTEFUNC(wrapper)<EOL>err_code = adsSyncAddDeviceNotificationReqFct(<EOL>port,<EOL>pAmsAddr,<EOL>nIndexGroup,<EOL>nIndexOffset,<EOL>ctypes.byref(attrib),<EOL>c_callback,<EOL>nHUser,<EOL>ctypes.byref(pNotification),<EOL>)<EOL>if err_code:<EOL><INDENT>raise ADSError(err_code)<EOL><DEDENT>callback_store[pNotification.value] = c_callback<EOL>return (pNotification.value, hnl)<EOL>", "docstring": "Add a device notification.\n\n    :param int port: local AMS port as returned by adsPortOpenEx()\n    :param pyads.structs.AmsAddr adr: local or remote AmsAddr\n    :param string data_name: PLC storage address\n    :param pyads.structs.NotificationAttrib pNoteAttrib: notification attributes\n    :param callback: Callback function to handle notification\n    :param user_handle: User Handle\n    :rtype: (int, int)\n    :returns: notification handle, user handle", "id": "f13261:m15"}
{"signature": "def adsSyncReadByNameEx(port, address, data_name, data_type, return_ctypes=False):<EOL>", "body": "<EOL>handle = adsSyncReadWriteReqEx2(<EOL>port,<EOL>address,<EOL>ADSIGRP_SYM_HNDBYNAME,<EOL><NUM_LIT>,<EOL>PLCTYPE_UDINT,<EOL>data_name,<EOL>PLCTYPE_STRING,<EOL>)<EOL>value = adsSyncReadReqEx2(<EOL>port, address, ADSIGRP_SYM_VALBYHND, handle, data_type, return_ctypes<EOL>)<EOL>adsSyncWriteReqEx(port, address, ADSIGRP_SYM_RELEASEHND, <NUM_LIT:0>, handle, PLCTYPE_UDINT)<EOL>return value<EOL>", "docstring": "Read data synchronous from an ADS-device from data name.\n\n    :param int port: local AMS port as returned by adsPortOpenEx()\n    :param pyads.structs.AmsAddr address: local or remote AmsAddr\n    :param string data_name: data name\n    :param Type data_type: type of the data given to the PLC, according to\n        PLCTYPE constants\n    :param bool return_ctypes: return ctypes instead of python types if True\n        (default: False)\n    :rtype: data_type\n    :return: value: **value**", "id": "f13261:m13"}
{"signature": "def adsGetLocalAddressEx(port):<EOL>", "body": "get_local_address_ex = _adsDLL.AdsGetLocalAddressEx<EOL>ams_address_struct = SAmsAddr()<EOL>error_code = get_local_address_ex(port, ctypes.pointer(ams_address_struct))<EOL>if error_code:<EOL><INDENT>raise ADSError(error_code)<EOL><DEDENT>local_ams_address = AmsAddr()<EOL>local_ams_address._ams_addr = ams_address_struct<EOL>return local_ams_address<EOL>", "docstring": "Return the local AMS-address and the port number.\n\n    :rtype: pyads.structs.AmsAddr\n    :return: AMS-address", "id": "f13261:m5"}
{"signature": "def adsPortCloseEx(port):<EOL>", "body": "port_close_ex = _adsDLL.AdsPortCloseEx<EOL>port_close_ex.restype = ctypes.c_long<EOL>error_code = port_close_ex(port)<EOL>if error_code:<EOL><INDENT>raise ADSError(error_code)<EOL><DEDENT>", "docstring": "Close the connection to the TwinCAT message router.", "id": "f13261:m4"}
{"signature": "def router_function(fn):<EOL>", "body": "@wraps(fn)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>if platform_is_windows():  <EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>return fn(*args, **kwargs)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Raise a runtime error if on Win32 systems.\n\n    Decorator.\n\n    Decorator for functions that interact with the router for the Linux\n    implementation of the ADS library.\n\n    Unlike the Windows implementation which uses a separate router daemon,\n    the Linux library manages AMS routing in-process. As such, routing must be\n    configured programatically via. the provided API. These endpoints are\n    invalid on Win32 systems, so an exception will be raised.", "id": "f13261:m0"}
{"signature": "def adsSyncWriteReqEx(port, address, index_group, index_offset, value, plc_data_type):<EOL>", "body": "sync_write_request = _adsDLL.AdsSyncWriteReqEx<EOL>ams_address_pointer = ctypes.pointer(address.amsAddrStruct())<EOL>index_group_c = ctypes.c_ulong(index_group)<EOL>index_offset_c = ctypes.c_ulong(index_offset)<EOL>if plc_data_type == PLCTYPE_STRING:<EOL><INDENT>data = ctypes.c_char_p(value.encode(\"<STR_LIT:utf-8>\"))<EOL>data_pointer = data  <EOL>data_length = len(data_pointer.value) + <NUM_LIT:1>  <EOL><DEDENT>else:<EOL><INDENT>if type(plc_data_type).__name__ == \"<STR_LIT>\":<EOL><INDENT>data = plc_data_type(*value)<EOL><DEDENT>else:<EOL><INDENT>data = plc_data_type(value)<EOL><DEDENT>data_pointer = ctypes.pointer(data)<EOL>data_length = ctypes.sizeof(data)<EOL><DEDENT>error_code = sync_write_request(<EOL>port,<EOL>ams_address_pointer,<EOL>index_group_c,<EOL>index_offset_c,<EOL>data_length,<EOL>data_pointer,<EOL>)<EOL>if error_code:<EOL><INDENT>raise ADSError(error_code)<EOL><DEDENT>", "docstring": "Send data synchronous to an ADS-device.\n\n    :param int port: local AMS port as returned by adsPortOpenEx()\n    :param pyads.structs.AmsAddr address: local or remote AmsAddr\n    :param int indexGroup: PLC storage area, according to the INDEXGROUP\n        constants\n    :param int index_offset: PLC storage address\n    :param value: value to write to the storage address of the PLC\n    :param int plc_data_type: type of the data given to the PLC,\n        according to PLCTYPE constants", "id": "f13261:m10"}
{"signature": "def tzname(self, dt):<EOL>", "body": "return \"<STR_LIT>\"<EOL>", "docstring": "Return name of the timezone.", "id": "f13262:c0:m1"}
{"signature": "def utcoffset(self, dt):<EOL>", "body": "return ZERO<EOL>", "docstring": "Return offset of localtime from UTC time.", "id": "f13262:c0:m0"}
{"signature": "def render_tag(self, context, name, nodelist):", "body": "<EOL>settings = self.setting_model.objects.filter(name=name).as_dict()<EOL>try:<EOL><INDENT>value = settings[name]<EOL><DEDENT>except KeyError:<EOL><INDENT>value = settings[name] = nodelist.render(context)<EOL><DEDENT>return value<EOL>", "docstring": "Returns the value of the named setting.", "id": "f13269:c2:m0"}
{"signature": "def get_value(self, context, default):", "body": "if default is None:<EOL><INDENT>settings = self.setting_model.objects.as_dict()<EOL><DEDENT>else:<EOL><INDENT>settings = self.setting_model.objects.as_dict(default=default)<EOL><DEDENT>return settings<EOL>", "docstring": "Returns a ``SettingDict`` object.", "id": "f13269:c0:m0"}
{"signature": "def __setitem__(self, key, value):", "body": "<EOL>if self.empty_cache:<EOL><INDENT>self.refresh()<EOL><DEDENT>with transaction.atomic():<EOL><INDENT>self.model.objects.filter(name=key).delete()<EOL>self.model.objects.create(name=key, value=value)<EOL><DEDENT>super(SettingDict, self).__setitem__(key, value)<EOL>", "docstring": "Tries to delete and then creates a setting, in case the value type has\nchanged. Otherwise, we would need to get, update (if same type), or\ndelete and create (if not same type).", "id": "f13278:c0:m3"}
{"signature": "def create(self, name, value):", "body": "if value is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>model = Setting.get_model_for_value(value)<EOL>obj = super(SettingQuerySet, model.objects.all()).create(name=name, value=value)<EOL>return obj<EOL>", "docstring": "Creates and returns an object of the appropriate type for ``value``.", "id": "f13279:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def is_compatible(cls, value):<DEDENT>", "body": "if not hasattr(cls, '<STR_LIT>'):<EOL><INDENT>raise NotImplementedError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>return isinstance(value, cls.value_type)<EOL>", "docstring": "Returns ``True`` if this model should be used to store ``value``.\n\nChecks if ``value`` is an instance of ``value_type``. Override this\nmethod if you need more advanced behaviour. For example, to distinguish\nbetween single and multi-line text.", "id": "f13279:c3:m0"}
{"signature": "def as_dict(self, default=None):", "body": "settings = SettingDict(queryset=self, default=default)<EOL>return settings<EOL>", "docstring": "Returns a ``SettingDict`` object for this queryset.", "id": "f13279:c0:m0"}
{"signature": "def on_message(self, websocket, message):", "body": "waiter = self._waiter<EOL>self._waiter = None<EOL>encoded = json.loads(message)<EOL>event = encoded.get('<STR_LIT>')<EOL>channel = encoded.get('<STR_LIT>')<EOL>data = json.loads(encoded.get('<STR_LIT:data>'))<EOL>try:<EOL><INDENT>if event == PUSHER_ERROR:<EOL><INDENT>raise PusherError(data['<STR_LIT:message>'], data['<STR_LIT:code>'])<EOL><DEDENT>elif event == PUSHER_CONNECTION:<EOL><INDENT>self.socket_id = data.get('<STR_LIT>')<EOL>self.logger.info('<STR_LIT>',<EOL>self.socket_id)<EOL>waiter.set_result(self.socket_id)<EOL><DEDENT>elif event == PUSHER_SUBSCRIBED:<EOL><INDENT>self.logger.info('<STR_LIT>',<EOL>encoded.get('<STR_LIT>'))<EOL><DEDENT>elif channel:<EOL><INDENT>self[channel]._event(event, data)<EOL><DEDENT><DEDENT>except Exception as exc:<EOL><INDENT>if waiter:<EOL><INDENT>waiter.set_exception(exc)<EOL><DEDENT>else:<EOL><INDENT>self.logger.exception('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Handle websocket incoming messages", "id": "f13281:c2:m5"}
{"signature": "async def connect(self):", "body": "if not self._consumer:<EOL><INDENT>waiter = self._waiter = asyncio.Future()<EOL>try:<EOL><INDENT>address = self._websocket_host()<EOL>self.logger.info('<STR_LIT>', address)<EOL>self._consumer = await self.http.get(address)<EOL>if self._consumer.status_code != <NUM_LIT>:<EOL><INDENT>raise PusherError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except Exception as exc:<EOL><INDENT>waiter.set_exception(exc)<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>await waiter<EOL><DEDENT><DEDENT>return self._consumer<EOL>", "docstring": "Connect to a Pusher websocket", "id": "f13281:c2:m2"}
{"signature": "@property<EOL><INDENT>def http_session(self):<DEDENT>", "body": "return self.endpoint.http_session<EOL>", "docstring": "HTTP session object", "id": "f13282:c0:m3"}
{"signature": "def get_git_changeset(filename=None):", "body": "dirname = os.path.dirname(filename or __file__)<EOL>git_show = sh('<STR_LIT>',<EOL>cwd=dirname)<EOL>timestamp = git_show.partition('<STR_LIT:\\n>')[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))<EOL><DEDENT>except ValueError:<EOL><INDENT>return None<EOL><DEDENT>return timestamp.strftime('<STR_LIT>')<EOL>", "docstring": "Returns a numeric identifier of the latest git changeset.\n\n    The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n    This value isn't guaranteed to be unique, but collisions are very unlikely,\n    so it's sufficient for generating the development version numbers.", "id": "f13283:m2"}
{"signature": "async def upload_file(self, bucket, file, uploadpath=None, key=None,<EOL>ContentType=None, **kw):", "body": "is_filename = False<EOL>if hasattr(file, '<STR_LIT>'):<EOL><INDENT>if hasattr(file, '<STR_LIT>'):<EOL><INDENT>file.seek(<NUM_LIT:0>)<EOL><DEDENT>file = file.read()<EOL>size = len(file)<EOL><DEDENT>elif key:<EOL><INDENT>size = len(file)<EOL><DEDENT>else:<EOL><INDENT>is_filename = True<EOL>size = os.stat(file).st_size<EOL>key = os.path.basename(file)<EOL><DEDENT>assert key, '<STR_LIT>'<EOL>if not ContentType:<EOL><INDENT>ContentType, _ = mimetypes.guess_type(key)<EOL><DEDENT>if uploadpath:<EOL><INDENT>if not uploadpath.endswith('<STR_LIT:/>'):<EOL><INDENT>uploadpath = '<STR_LIT>' % uploadpath<EOL><DEDENT>key = '<STR_LIT>' % (uploadpath, key)<EOL><DEDENT>params = dict(Bucket=bucket, Key=key)<EOL>if not ContentType:<EOL><INDENT>ContentType = '<STR_LIT>'<EOL><DEDENT>params['<STR_LIT>'] = ContentType<EOL>if size > MULTI_PART_SIZE and is_filename:<EOL><INDENT>resp = await _multipart(self, file, params)<EOL><DEDENT>elif is_filename:<EOL><INDENT>with open(file, '<STR_LIT:rb>') as fp:<EOL><INDENT>params['<STR_LIT>'] = fp.read()<EOL><DEDENT>resp = await self.put_object(**params)<EOL><DEDENT>else:<EOL><INDENT>params['<STR_LIT>'] = file<EOL>resp = await self.put_object(**params)<EOL><DEDENT>if '<STR_LIT>' not in resp:<EOL><INDENT>resp['<STR_LIT>'] = key<EOL><DEDENT>if '<STR_LIT>' not in resp:<EOL><INDENT>resp['<STR_LIT>'] = bucket<EOL><DEDENT>return resp<EOL>", "docstring": "Upload a file to S3 possibly using the multi-part uploader\n        Return the key uploaded", "id": "f13284:c0:m0"}
{"signature": "def upload_folder(self, bucket, folder, key=None, skip=None,<EOL>content_types=None):", "body": "uploader = FolderUploader(self, bucket, folder, key, skip,<EOL>content_types)<EOL>return uploader.start()<EOL>", "docstring": "Recursively upload a ``folder`` into a backet.\n\n        :param bucket: bucket where to upload the folder to\n        :param folder: the folder location in the local file system\n        :param key: Optional key where the folder is uploaded\n        :param skip: Optional list of files to skip\n        :param content_types: Optional dictionary mapping suffixes to\n            content types\n        :return: a coroutine", "id": "f13284:c0:m2"}
{"signature": "async def copy_storage_object(self, source_bucket, source_key,<EOL>bucket, key):", "body": "info = await self.head_object(Bucket=source_bucket, Key=source_key)<EOL>size = info['<STR_LIT>']<EOL>if size > MULTI_PART_SIZE:<EOL><INDENT>result = await _multipart_copy(self, source_bucket, source_key,<EOL>bucket, key, size)<EOL><DEDENT>else:<EOL><INDENT>result = await self.copy_object(<EOL>Bucket=bucket, Key=key,<EOL>CopySource=_source_string(source_bucket, source_key)<EOL>)<EOL><DEDENT>return result<EOL>", "docstring": "Copy a file from one bucket into another", "id": "f13284:c0:m1"}
{"signature": "def get_paginator(self, operation_name):", "body": "if not self.can_paginate(operation_name):<EOL><INDENT>raise OperationNotPageableError(operation_name=operation_name)<EOL><DEDENT>else:<EOL><INDENT>actual_operation_name = self._PY_TO_OP_NAME[operation_name]<EOL>Paginator.PAGE_ITERATOR_CLS = AsyncPageIterator<EOL>paginator = Paginator(<EOL>getattr(self, operation_name),<EOL>self._cache['<STR_LIT>'][actual_operation_name])<EOL>return paginator<EOL><DEDENT>", "docstring": "Create a paginator for an operation.\n        :type operation_name: string\n        :param operation_name: The operation name.  This is the same name\n            as the method name on the client.  For example, if the\n            method name is ``create_foo``, and you'd normally invoke the\n            operation as ``client.create_foo(**kwargs)``, if the\n            ``create_foo`` operation can be paginated, you can use the\n            call ``client.get_paginator(\"create_foo\")``.\n        :raise OperationNotPageableError: Raised if the operation is not\n            pageable.  You can use the ``client.can_paginate`` method to\n            check if an operation is pageable.\n        :rtype: L{botocore.paginate.Paginator}\n        :return: A paginator object.", "id": "f13289:c1:m3"}
{"signature": "def cache_result(func):", "body": "def cache_set(key, value):<EOL><INDENT>cache.set(key, value, AVATAR_CACHE_TIMEOUT)<EOL>return value<EOL><DEDENT>def cached_func(user, size):<EOL><INDENT>prefix = func.__name__<EOL>cached_funcs.add(prefix)<EOL>key = get_cache_key(user, size, prefix=prefix)<EOL>return cache.get(key) or cache_set(key, func(user, size))<EOL><DEDENT>return cached_func<EOL>", "docstring": "Decorator to cache the result of functions that take a ``user`` and a\n``size`` value.", "id": "f13303:m1"}
{"signature": "def send(self, data):", "body": "while len(self.senders) >= self.window:<EOL><INDENT>pass<EOL><DEDENT>self.senders[self.new_seq_no] = self.Sender(<EOL>self.write,<EOL>self.send_lock,<EOL>data,<EOL>self.new_seq_no,<EOL>timeout=self.sending_timeout,<EOL>callback=self.send_callback,<EOL>)<EOL>self.senders[self.new_seq_no].start()<EOL>self.new_seq_no = (self.new_seq_no + <NUM_LIT:1>) % HDLController.MAX_SEQ_NO<EOL>", "docstring": "Sends a new data frame.\n\nThis method will block until a new room is available for\na new sender. This limit is determined by the size of the window.", "id": "f13317:c0:m7"}
{"signature": "def set_send_callback(self, callback):", "body": "if not hasattr(callback, '<STR_LIT>'):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self.send_callback = callback<EOL>", "docstring": "Sets the send callback function.\n\nIf the HDLC controller has already been started, the new\ncallback function will be taken into account for the next\ndata frames to be sent.", "id": "f13317:c0:m3"}
{"signature": "def get_senders_number(self):", "body": "return len(self.senders)<EOL>", "docstring": "Returns the number of active senders.", "id": "f13317:c0:m6"}
{"signature": "def set_sending_timeout(self, sending_timeout):", "body": "if sending_timeout >= HDLController.MIN_SENDING_TIMEOUT:<EOL><INDENT>self.sending_timeout = sending_timeout<EOL><DEDENT>", "docstring": "Sets the sending timeout.", "id": "f13317:c0:m5"}
{"signature": "def set_receive_callback(self, callback):", "body": "if not hasattr(callback, '<STR_LIT>'):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self.receive_callback = callback<EOL>", "docstring": "Sets the receive callback function.\n\nThis method has to be called before starting the\nHDLC controller.", "id": "f13317:c0:m4"}
{"signature": "def get_result(self, errors=STRICT, **params):", "body": "service_url = self.create_session(**params)<EOL>return self.poll(service_url, errors=errors)<EOL>", "docstring": "Get all results, no filtering,\netc. by creating and polling the session.", "id": "f13321:c0:m3"}
{"signature": "def make_request(self, url, method='<STR_LIT>', headers=None, data=None,<EOL>callback=None, errors=STRICT, verify=False, timeout=None, **params):", "body": "error_modes = (STRICT, GRACEFUL, IGNORE)<EOL>error_mode = errors or GRACEFUL<EOL>if error_mode.lower() not in error_modes:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>% '<STR_LIT:U+002C>'.join(error_modes))<EOL><DEDENT>if callback is None:<EOL><INDENT>callback = self._default_resp_callback<EOL><DEDENT>request = getattr(requests, method.lower())<EOL>log.debug('<STR_LIT>' % url)<EOL>log.debug('<STR_LIT>' % method)<EOL>log.debug('<STR_LIT>' % params)<EOL>log.debug('<STR_LIT>' % headers)<EOL>log.debug('<STR_LIT>' % timeout)<EOL>r = request(<EOL>url, headers=headers, data=data, verify=verify, timeout=timeout, params=params)<EOL>log.debug('<STR_LIT>' % r.url)<EOL>try:<EOL><INDENT>r.raise_for_status()<EOL>return callback(r)<EOL><DEDENT>except Exception as e:<EOL><INDENT>return self._with_error_handling(r, e,<EOL>error_mode, self.response_format)<EOL><DEDENT>", "docstring": "Reusable method for performing requests.\n:param url - URL to request\n:param method - request method, default is 'get'\n:param headers - request headers\n:param data - post data\n:param callback - callback to be applied to response,\n                  default callback will parse response as json object.\n:param errors - specifies communication errors handling mode, possible\n                values are:\n                 * strict (default) - throw an error as soon as one\n                    occurred\n                 * graceful - ignore certain errors, e.g. EmptyResponse\n                 * ignore - ignore all errors and return a result in\n                            any case.\n                            NOTE that it DOES NOT mean that no\n                            exceptions can be\n                            raised from this method, it mostly ignores\n                            communication\n                            related errors.\n                 * None or empty string equals to default\n:param verify - whether or not to verify SSL cert, default to False\n:param timeout - the timeout of the request in second, default to None\n:param params - additional query parameters for request", "id": "f13322:c5:m2"}
{"signature": "def get_sizeof_descriptor_table(version=\"<STR_LIT>\"):", "body": "if version == \"<STR_LIT>\":<EOL><INDENT>return sizeof(DescriptorTableDenali)<EOL><DEDENT>elif version == \"<STR_LIT>\":<EOL><INDENT>return sizeof(DescriptorTableSpec20)<EOL><DEDENT>elif version == \"<STR_LIT>\":<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Get sizeof DescriptorTable", "id": "f13328:m1"}
{"signature": "def cat_file(path):", "body": "cmd = [\"<STR_LIT>\", path]<EOL>status, stdout, _ = cij.ssh.command(cmd, shell=True, echo=True)<EOL>if status:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % path)<EOL><DEDENT>return stdout.strip()<EOL>", "docstring": "Cat file and return content", "id": "f13330:m0"}
{"signature": "def exists():", "body": "if env():<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>nvme = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)<EOL>cmd = [\"<STR_LIT>\", \"<STR_LIT>\", nvme[\"<STR_LIT>\"], \"<STR_LIT>\"]<EOL>rcode, _, _ = cij.ssh.command(cmd, shell=True, echo=False)<EOL>if rcode:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Verify that the ENV defined NVMe device exists", "id": "f13330:m3"}
{"signature": "def comp_meta(file_bef, file_aft, mode=\"<STR_LIT>\"):", "body": "if env():<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>nvme = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)<EOL>num_chk = int(nvme[\"<STR_LIT>\"])<EOL>meta_bef = cij.bin.Buffer(types=get_descriptor_table(nvme['<STR_LIT>']), length=num_chk)<EOL>meta_aft = cij.bin.Buffer(types=get_descriptor_table(nvme['<STR_LIT>']), length=num_chk)<EOL>meta_bef.read(file_bef)<EOL>meta_aft.read(file_aft)<EOL>for chk in range(num_chk):<EOL><INDENT>ignore = [\"<STR_LIT>\", \"<STR_LIT>\"]<EOL>if mode == \"<STR_LIT>\" and meta_bef[chk].CS == <NUM_LIT:4>:<EOL><INDENT>ignore.append(\"<STR_LIT>\")<EOL><DEDENT>if meta_bef.compare(meta_aft, chk, ignore=ignore):<EOL><INDENT>cij.warn(\"<STR_LIT>\" % chk)<EOL>meta_bef.dump(chk)<EOL>cij.warn(\"<STR_LIT>\" % chk)<EOL>meta_aft.dump(chk)<EOL>cij.err(\"<STR_LIT>\" % chk)<EOL>return <NUM_LIT:1><EOL><DEDENT><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Compare chunk meta, mode=[pfail, power, reboot]", "id": "f13330:m5"}
{"signature": "def dev_get_rprt(dev_name, pugrp=None, punit=None):", "body": "cmd = [\"<STR_LIT>\", \"<STR_LIT>\", dev_name]<EOL>if not (pugrp is None and punit is None):<EOL><INDENT>cmd = [\"<STR_LIT>\", \"<STR_LIT>\", dev_name, str(pugrp), str(punit)]<EOL><DEDENT>_, _, _, struct = cij.test.command_to_struct(cmd)<EOL>if not struct:<EOL><INDENT>return None<EOL><DEDENT>return struct[\"<STR_LIT>\"]<EOL>", "docstring": "Get-log-page chunk information\n\nIf the pugrp and punit is set, then provide report only for that pugrp/punit\n\n@returns the first chunk in the given state if one exists, None otherwise", "id": "f13331:m2"}
{"signature": "def s20_to_gen(self, pugrp, punit, chunk, sectr):", "body": "cmd = [\"<STR_LIT>\", self.envs,<EOL>\"<STR_LIT>\" % (pugrp, punit, chunk, sectr)]<EOL>status, stdout, _ = cij.ssh.command(cmd, shell=True)<EOL>if status:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>return int(re.findall(r\"<STR_LIT>\", stdout)[<NUM_LIT:0>], <NUM_LIT:16>)<EOL>", "docstring": "S20 unit to gen address", "id": "f13332:c0:m8"}
{"signature": "def dev_to_gen(self, address):", "body": "cmd = [\"<STR_LIT>\", self.envs, \"<STR_LIT>\".format(address)]<EOL>status, stdout, _ = cij.ssh.command(cmd, shell=True)<EOL>if status:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>return int(re.findall(r\"<STR_LIT>\", stdout)[<NUM_LIT:0>], <NUM_LIT:16>)<EOL>", "docstring": "Generic address to device address", "id": "f13332:c0:m10"}
{"signature": "def is_closed_chunk(self, chk):", "body": "cs = self.get_chunk_status(chk)<EOL>if cs & <NUM_LIT> != <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Check the chunk is free or not", "id": "f13332:c0:m6"}
{"signature": "def slc_erase(self, address, BE_ID=<NUM_LIT>, PMODE=<NUM_LIT>):", "body": "cmd = [\"<STR_LIT>\" % BE_ID, \"<STR_LIT>\" % PMODE, \"<STR_LIT>\", self.envs, \"<STR_LIT>\" % address]<EOL>status, _, _ = cij.ssh.command(cmd, shell=True)<EOL>return status<EOL>", "docstring": "slc erase", "id": "f13332:c0:m23"}
{"signature": "def get_chunk_information(self, chk, lun, chunk_name):", "body": "cmd = [\"<STR_LIT>\", self.envs,<EOL>\"<STR_LIT>\" % (chk, lun, chunk_name)]<EOL>status, _, _ = cij.ssh.command(cmd, shell=True)<EOL>return status<EOL>", "docstring": "Get chunk information", "id": "f13332:c0:m3"}
{"signature": "def vblk_write(self, address, file_name=None):", "body": "cmd = [\"<STR_LIT>\", self.envs, \"<STR_LIT>\" % address]<EOL>if file_name:<EOL><INDENT>cmd += [\"<STR_LIT>\".format(file_name)]<EOL><DEDENT>status, _, _ = cij.ssh.command(cmd, shell=True)<EOL>return status<EOL>", "docstring": "nvm_vblk write", "id": "f13332:c0:m12"}
{"signature": "def vblk_erase(self, address):", "body": "cmd = [\"<STR_LIT>\", self.envs, \"<STR_LIT>\" % address]<EOL>status, _, _ = cij.ssh.command(cmd, shell=True)<EOL>return status<EOL>", "docstring": "nvm_vblk erase", "id": "f13332:c0:m11"}
{"signature": "def scalar_read(self, address, block_count, data_file, meta_file):", "body": "cmd = [\"<STR_LIT>\", \"<STR_LIT>\", self.envs, \"<STR_LIT>\".format(address),<EOL>\"<STR_LIT>\".format(block_count - <NUM_LIT:1>), \"<STR_LIT>\".format(data_file),<EOL>\"<STR_LIT>\".format(meta_file),<EOL>\"<STR_LIT>\".format(block_count * self.get_env(\"<STR_LIT>\", \"<STR_LIT>\")),<EOL>\"<STR_LIT>\".format(block_count * self.get_env(\"<STR_LIT>\", \"<STR_LIT>\"))]<EOL>status, _, _ = cij.ssh.command(cmd, shell=True)<EOL>return status<EOL>", "docstring": "nvme read", "id": "f13332:c0:m18"}
{"signature": "def is_free_chunk(self, chk):", "body": "cs = self.get_chunk_status(chk)<EOL>if cs & <NUM_LIT> != <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Check the chunk is free or not", "id": "f13332:c0:m5"}
{"signature": "def gen_to_dev(self, address):", "body": "cmd = [\"<STR_LIT>\", self.envs[\"<STR_LIT>\"], \"<STR_LIT>\".format(address)]<EOL>status, stdout, _ = cij.ssh.command(cmd, shell=True)<EOL>if status:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>return int(re.findall(r\"<STR_LIT>\", stdout)[<NUM_LIT:0>], <NUM_LIT:16>)<EOL>", "docstring": "Generic address to device address", "id": "f13334:c0:m9"}
{"signature": "def vblk_read(self, address, meta=False):", "body": "cmd = list()<EOL>if meta:<EOL><INDENT>cmd.append(\"<STR_LIT>\")<EOL><DEDENT>cmd += [\"<STR_LIT>\", self.envs[\"<STR_LIT>\"], \"<STR_LIT>\" % address]<EOL>status, _, _ = cij.ssh.command(cmd, shell=True)<EOL>return status<EOL>", "docstring": "nvm_vblk read", "id": "f13334:c0:m12"}
{"signature": "def is_free_chunk(self, chunk_meta, grp, pug, chk):", "body": "meta = self.get_chunk_meta_item(chunk_meta, grp, pug, chk)<EOL>if meta.CS & <NUM_LIT> != <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Check the chunk is free or not", "id": "f13334:c0:m4"}
{"signature": "def get_chunk_meta(self, meta_file):", "body": "chunks = self.envs[\"<STR_LIT>\"]<EOL>if cij.nvme.get_meta(<NUM_LIT:0>, chunks * self.envs[\"<STR_LIT>\"], meta_file):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>chunk_meta = cij.bin.Buffer(types=self.envs[\"<STR_LIT>\"], length=chunks)<EOL>chunk_meta.read(meta_file)<EOL>return chunk_meta<EOL>", "docstring": "Get chunk meta table", "id": "f13334:c0:m2"}
{"signature": "def scalar_write(self, address, block_count, data_file, meta_file):", "body": "cmd = [\"<STR_LIT>\", \"<STR_LIT>\", self.envs[\"<STR_LIT>\"], \"<STR_LIT>\".format(address),<EOL>\"<STR_LIT>\".format(block_count-<NUM_LIT:1>), \"<STR_LIT>\".format(data_file), \"<STR_LIT>\".format(meta_file),<EOL>\"<STR_LIT>\".format(block_count * self.envs[\"<STR_LIT>\"]),<EOL>\"<STR_LIT>\".format(block_count * self.envs[\"<STR_LIT>\"])]<EOL>status, _, _ = cij.ssh.command(cmd, shell=True)<EOL>return status<EOL>", "docstring": "nvme write", "id": "f13334:c0:m16"}
{"signature": "def get_envs(self, key):", "body": "return self.envs[key]<EOL>", "docstring": "Get environment of liblightnvm", "id": "f13334:c0:m1"}
{"signature": "def scalar_read(self, address, block_count, data_file, meta_file):", "body": "cmd = [\"<STR_LIT>\", \"<STR_LIT>\", self.envs[\"<STR_LIT>\"], \"<STR_LIT>\".format(address),<EOL>\"<STR_LIT>\".format(block_count - <NUM_LIT:1>), \"<STR_LIT>\".format(data_file),<EOL>\"<STR_LIT>\".format(meta_file),<EOL>\"<STR_LIT>\".format(block_count * self.envs[\"<STR_LIT>\"]),<EOL>\"<STR_LIT>\".format(block_count * self.envs[\"<STR_LIT>\"])]<EOL>status, _, _ = cij.ssh.command(cmd, shell=True)<EOL>return status<EOL>", "docstring": "nvme read", "id": "f13334:c0:m17"}
{"signature": "def read(*parts):", "body": "here = os.path.abspath(os.path.dirname(__file__))<EOL>with codecs.open(os.path.join(here, *parts), '<STR_LIT:r>') as pfp:<EOL><INDENT>return pfp.read()<EOL><DEDENT>", "docstring": "Read parts to use a e.g. long_description", "id": "f13335:m0"}
{"signature": "def regex_find(pattern, content):", "body": "find = re.findall(pattern, content)<EOL>if not find:<EOL><INDENT>cij.err(\"<STR_LIT>\" % pattern)<EOL>cij.err(\"<STR_LIT>\" % content)<EOL>return '<STR_LIT>'<EOL><DEDENT>if len(find) >= <NUM_LIT:2>:<EOL><INDENT>cij.err(\"<STR_LIT>\" % pattern)<EOL>cij.err(\"<STR_LIT>\" % content)<EOL>return '<STR_LIT>'<EOL><DEDENT>return find[<NUM_LIT:0>]<EOL>", "docstring": "Find the given 'pattern' in 'content", "id": "f13336:m1"}
{"signature": "def env():", "body": "if cij.ssh.env():<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>board = cij.env_to_dict(PREFIX, REQUIRED)   <EOL>if board is None:<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>board[\"<STR_LIT>\"] = \"<STR_LIT:_>\".join([board[r] for r in REQUIRED[:-<NUM_LIT:1>]])<EOL>board[\"<STR_LIT>\"] = \"<STR_LIT:->\".join([board[\"<STR_LIT>\"], board[\"<STR_LIT>\"]])<EOL>cij.env_export(PREFIX, EXPORTED, board)     <EOL>return <NUM_LIT:0><EOL>", "docstring": "Verify BOARD variables and construct exported variables", "id": "f13337:m0"}
{"signature": "def generate_steady_rt_pic(process_data, para_meter, scale, steady_time):", "body": "pic_path_steady = para_meter['<STR_LIT:filename>'] + '<STR_LIT>'<EOL>plt.figure(figsize=(<NUM_LIT:4> * scale, <NUM_LIT> * scale))<EOL>for key in process_data.keys():<EOL><INDENT>if len(process_data[key]) < steady_time:<EOL><INDENT>steady_time = len(process_data[key])<EOL><DEDENT>plt.scatter(process_data[key][-<NUM_LIT:1> * steady_time:, <NUM_LIT:0>],<EOL>process_data[key][-<NUM_LIT:1> * steady_time:, <NUM_LIT:1>], label=str(key), s=<NUM_LIT:10>)<EOL>steady_value = np.mean(process_data[key][-<NUM_LIT:1> * steady_time:, <NUM_LIT:1>])<EOL>steady_value_5 = steady_value * (<NUM_LIT:1> + <NUM_LIT>)<EOL>steady_value_10 = steady_value * (<NUM_LIT:1> + <NUM_LIT:0.1>)<EOL>steady_value_ng_5 = steady_value * (<NUM_LIT:1> - <NUM_LIT>)<EOL>steady_value_ng_10 = steady_value * (<NUM_LIT:1> - <NUM_LIT:0.1>)<EOL>plt.plot(process_data[key][-<NUM_LIT:1> * steady_time:, <NUM_LIT:0>], [steady_value] * steady_time, '<STR_LIT:b>')<EOL>plt.plot(process_data[key][-<NUM_LIT:1> * steady_time:, <NUM_LIT:0>], [steady_value_5] * steady_time, '<STR_LIT:g>')<EOL>plt.plot(process_data[key][-<NUM_LIT:1> * steady_time:, <NUM_LIT:0>],<EOL>[steady_value_ng_5] * steady_time, '<STR_LIT:g>')<EOL>plt.plot(process_data[key][-<NUM_LIT:1> * steady_time:, <NUM_LIT:0>], [steady_value_10] * steady_time, '<STR_LIT:r>')<EOL>plt.plot(process_data[key][-<NUM_LIT:1> * steady_time:, <NUM_LIT:0>],<EOL>[steady_value_ng_10] * steady_time, '<STR_LIT:r>')<EOL><DEDENT>plt.title(para_meter['<STR_LIT:title>'] + '<STR_LIT>')<EOL>plt.xlabel(para_meter['<STR_LIT>'] + '<STR_LIT>')<EOL>plt.ylabel(para_meter['<STR_LIT>'] + '<STR_LIT>')<EOL>plt.legend(loc='<STR_LIT>')<EOL>plt.savefig(pic_path_steady)<EOL>return pic_path_steady<EOL>", "docstring": "generate rate steady", "id": "f13338:m1"}
{"signature": "def import_source(self, sheet, source, delimiter=\"<STR_LIT:U+002C>\"):", "body": "<EOL>if '<STR_LIT:U+0020>' in sheet:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % sheet)<EOL><DEDENT>if not source.endswith(\"<STR_LIT>\") and not source.endswith(\"<STR_LIT>\"):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % source)<EOL><DEDENT>self.source_sheet = sheet<EOL>source_data = np.loadtxt(source, dtype=str, delimiter=delimiter)<EOL>self.source_data = {\"<STR_LIT:title>\": source_data[<NUM_LIT:0>].tolist(),<EOL>\"<STR_LIT:data>\": source_data[<NUM_LIT:1>:]}<EOL>cell_format_title = self.workbook.add_format({'<STR_LIT>': True,<EOL>'<STR_LIT>': u'<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': <NUM_LIT>})<EOL>cell_format = self.workbook.add_format({'<STR_LIT>': False,<EOL>'<STR_LIT>': u'<STR_LIT>',<EOL>'<STR_LIT>': <NUM_LIT:0>})<EOL>worksheet = self.workbook.add_worksheet(sheet)<EOL>worksheet.write_row('<STR_LIT>', self.source_data['<STR_LIT:title>'], cell_format_title)<EOL>_, col_num = self.source_data['<STR_LIT:data>'].shape<EOL>for i in range(col_num):<EOL><INDENT>try:<EOL><INDENT>data_array = self.source_data['<STR_LIT:data>'][:, i].astype(float)<EOL><DEDENT>except ValueError:<EOL><INDENT>data_array = self.source_data['<STR_LIT:data>'][:, i]<EOL><DEDENT>worksheet.write_column(<NUM_LIT:1>, i, data_array.tolist(), cell_format)<EOL><DEDENT>", "docstring": "Function:\n    Save original data into specific sheet, and try to translate data to float type\nInput:\n    sheet: Must be a non exists sheet\n    source: File path of source", "id": "f13338:c0:m1"}
{"signature": "def gen_data_sheet(self, datafile, para_meter, scale=<NUM_LIT>, steady_time=<NUM_LIT>):", "body": "filename = os.path.splitext(os.path.split(datafile)[<NUM_LIT:1>])[<NUM_LIT:0>][:-<NUM_LIT:5>]<EOL>para_meter['<STR_LIT:filename>'] = filename<EOL>source_data = np.loadtxt(datafile, dtype=int, delimiter='<STR_LIT:U+002C>')[:, :<NUM_LIT:3>]<EOL>is_bw = '<STR_LIT>'in para_meter['<STR_LIT:title>'].lower()<EOL>file_data = process_rt_data(source_data, is_bw)<EOL>pic_path = generate_rt_pic(file_data, para_meter, scale)<EOL>pic_path_steady = generate_steady_rt_pic(file_data, para_meter, scale, steady_time)<EOL>if para_meter['<STR_LIT>'] in self.sheetname_dict.keys():<EOL><INDENT>self.sheetname_dict[para_meter['<STR_LIT>']] =self.sheetname_dict[para_meter['<STR_LIT>']] + <NUM_LIT:1><EOL>chart_sheet = self.workbook.get_worksheet_by_name(para_meter['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>self.sheetname_dict[para_meter['<STR_LIT>']] = <NUM_LIT:1><EOL>chart_sheet = self.workbook.add_worksheet(para_meter['<STR_LIT>'])<EOL><DEDENT>chart_sheet.insert_image('<STR_LIT>' %<EOL>(<NUM_LIT:5> + (self.sheetname_dict[para_meter['<STR_LIT>']] - <NUM_LIT:1>) * <NUM_LIT:30>),<EOL>pic_path)<EOL>chart_sheet.insert_image('<STR_LIT>' %<EOL>(<NUM_LIT:5> + (self.sheetname_dict[para_meter['<STR_LIT>']] - <NUM_LIT:1>) * <NUM_LIT:30>),<EOL>pic_path_steady)<EOL>self.__insert_value(chart_sheet, file_data,<EOL><NUM_LIT:5> + (self.sheetname_dict[para_meter['<STR_LIT>']] - <NUM_LIT:1>) * <NUM_LIT:30>,<EOL>steady_time)<EOL>self.pic_list.append(pic_path)<EOL>self.pic_list.append(pic_path_steady)<EOL>", "docstring": "datafile, sheetname, x_axis_name, y_axis_name, title,\nFunction:\n    Turn realtime bw data into picture, and save into specific sheet\nInput:\n    sheetname: If already exists, new chart will be added continuely.\n               Otherwise, it would create new sheet;\n    x_axis_name: x_axis name;\n    y_axis_name: y_axis name;\n    title: picure name;\n    scale\uff1b size of picture.", "id": "f13338:c1:m2"}
{"signature": "def close(self):", "body": "self.workbook.close()<EOL>", "docstring": "Close work book", "id": "f13338:c0:m6"}
{"signature": "def sum_data(filter_data, is_bw):", "body": "for index in range(len(filter_data) - <NUM_LIT:1>):<EOL><INDENT>if filter_data[index][<NUM_LIT:0>] > filter_data[index + <NUM_LIT:1>][<NUM_LIT:0>]:<EOL><INDENT>max_index = index + <NUM_LIT:1><EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>max_index = len(filter_data)<EOL><DEDENT>print(\"<STR_LIT>\", max_index + <NUM_LIT:1>)<EOL>num_jobs = int(round(len(filter_data) * <NUM_LIT:1.0> / max_index))<EOL>print(\"<STR_LIT>\", num_jobs)<EOL>dict_time = Counter(filter_data[:, <NUM_LIT:0>])<EOL>list_sum = []<EOL>for time_index in range(<NUM_LIT:1>, max_index + <NUM_LIT:1>):<EOL><INDENT>if dict_time.get(time_index * <NUM_LIT:1000>, <NUM_LIT:0>) != num_jobs:<EOL><INDENT>print(\"<STR_LIT>\" % (<EOL>time_index * <NUM_LIT:1000>, dict_time.get(time_index * <NUM_LIT:1000>, <NUM_LIT:0>), num_jobs<EOL>))<EOL>continue<EOL><DEDENT>filter_mask = (filter_data[:, <NUM_LIT:0>] == time_index * <NUM_LIT:1000>)<EOL>sum_rst = np.sum(filter_data[filter_mask][:, <NUM_LIT:1>])<EOL>if is_bw:<EOL><INDENT>sum_rst = sum_rst / <NUM_LIT><EOL><DEDENT>list_sum.append([time_index, sum_rst])<EOL><DEDENT>return np.array(list_sum)<EOL>", "docstring": "caculate sum", "id": "f13338:m3"}
{"signature": "def generate_rt_pic(process_data, para_meter, scale):", "body": "pic_path = para_meter['<STR_LIT:filename>'] + '<STR_LIT>'<EOL>plt.figure(figsize=(<NUM_LIT> * scale, <NUM_LIT> * scale))<EOL>for key in process_data.keys():<EOL><INDENT>plt.plot(process_data[key][:, <NUM_LIT:0>], process_data[key][:, <NUM_LIT:1>], label=str(key))<EOL><DEDENT>plt.title(para_meter['<STR_LIT:title>'])<EOL>plt.xlabel(para_meter['<STR_LIT>'])<EOL>plt.ylabel(para_meter['<STR_LIT>'])<EOL>plt.legend(loc='<STR_LIT>')<EOL>plt.savefig(pic_path)<EOL>return pic_path<EOL>", "docstring": "generate rater pic", "id": "f13338:m0"}
{"signature": "def generate_chart(self, properties):", "body": "<EOL>if not {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}.issubset(set(properties.keys())):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % properties.keys())<EOL><DEDENT>mask = self.__filter_data(properties['<STR_LIT>'])<EOL>chart = self.__generate_chart(mask, properties)<EOL>sheet = properties['<STR_LIT>']<EOL>if sheet in self.sheet_dict.keys():<EOL><INDENT>self.sheet_dict[sheet] += <NUM_LIT:1><EOL>worksheet = self.workbook.get_worksheet_by_name(sheet)<EOL><DEDENT>else:<EOL><INDENT>self.sheet_dict[sheet] = <NUM_LIT:1><EOL>worksheet = self.workbook.add_worksheet(sheet)<EOL><DEDENT>worksheet.insert_chart('<STR_LIT>' % (<NUM_LIT:5> + (self.sheet_dict[sheet] - <NUM_LIT:1>) * <NUM_LIT>), chart)<EOL>", "docstring": "Function:\n    Generate and save chart to specific sheet.\nInput:\n    sheet: If already exists, new chart will be added below.\n           Otherwise, it would create a new sheet;\n    x_axis: Specify x axis;\n    y_axis: Specify y axis;\n    series: Specify series;\n    filters: dict type, use to filter useful data from original data;\n    title: if None, the chart will create without title;\n    x_axis_name: if None, use x_axis instead;\n    y_axis_name: if None, use y_axis instead;", "id": "f13338:c0:m2"}
{"signature": "def round_data(filter_data):", "body": "for index, _ in enumerate(filter_data):<EOL><INDENT>filter_data[index][<NUM_LIT:0>] = round(filter_data[index][<NUM_LIT:0>] / <NUM_LIT>) * <NUM_LIT><EOL><DEDENT>return filter_data<EOL>", "docstring": "round the data", "id": "f13338:m4"}
{"signature": "def process_tsuite(tsuite):", "body": "<EOL>tsuite[\"<STR_LIT>\"] = runlogs_to_html(tsuite[\"<STR_LIT>\"])<EOL>tsuite[\"<STR_LIT>\"] = aux_listing(tsuite[\"<STR_LIT>\"])<EOL>tsuite[\"<STR_LIT>\"] = extract_hook_names(tsuite)<EOL>return True<EOL>", "docstring": "Goes through the tsuite and processes \"*.log", "id": "f13339:m6"}
{"signature": "def src_to_html(fpath):", "body": "if not os.path.exists(fpath):<EOL><INDENT>return \"<STR_LIT>\" % fpath<EOL><DEDENT>return open(fpath, \"<STR_LIT:r>\").read()<EOL>", "docstring": "Returns content of the given 'fpath' with HTML annotations for syntax\nhighlighting", "id": "f13339:m4"}
{"signature": "def postprocess(trun):", "body": "plog = []<EOL>plog.append((\"<STR_LIT>\", process_trun(trun)))<EOL>for tsuite in trun[\"<STR_LIT>\"]:<EOL><INDENT>plog.append((\"<STR_LIT>\", process_tsuite(tsuite)))<EOL>for tcase in tsuite[\"<STR_LIT>\"]:<EOL><INDENT>plog.append((\"<STR_LIT>\", process_tcase(tcase)))<EOL><DEDENT><DEDENT>for task, success in plog:<EOL><INDENT>if not success:<EOL><INDENT>cij.err(\"<STR_LIT>\" % task)<EOL><DEDENT><DEDENT>return sum((success for task, success in plog))<EOL>", "docstring": "Perform postprocessing of the given test run", "id": "f13339:m9"}
{"signature": "def tcase_parse_descr(tcase):", "body": "descr_short = \"<STR_LIT>\"<EOL>descr_long = \"<STR_LIT>\"<EOL>try:<EOL><INDENT>comment = tcase_comment(tcase)<EOL><DEDENT>except (IOError, OSError, ValueError) as exc:<EOL><INDENT>comment = []<EOL>cij.err(\"<STR_LIT>\" % (exc, tcase))<EOL><DEDENT>comment = [l for l in comment if l.strip()]     <EOL>for line_number, line in enumerate(comment):<EOL><INDENT>if line.startswith(\"<STR_LIT:#>\"):<EOL><INDENT>comment[line_number] = line[<NUM_LIT:1>:]<EOL><DEDENT><DEDENT>if comment:<EOL><INDENT>descr_short = comment[<NUM_LIT:0>]<EOL><DEDENT>if len(comment) > <NUM_LIT:1>:<EOL><INDENT>descr_long = \"<STR_LIT:\\n>\".join(comment[<NUM_LIT:1>:])<EOL><DEDENT>return descr_short, descr_long<EOL>", "docstring": "Parse descriptions from the the given tcase", "id": "f13339:m2"}
{"signature": "def dset_to_html(dset, tmpl_fpath):", "body": "def stamp_to_datetime(stamp):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return datetime.datetime.fromtimestamp(int(stamp))<EOL><DEDENT>def strftime(dtime, fmt):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return dtime.strftime(fmt)<EOL><DEDENT>def ansi_to_html(ansi):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>conv = ansi2html.Ansi2HTMLConverter(<EOL>scheme=\"<STR_LIT>\",<EOL>inline=True<EOL>)<EOL>html = conv.convert(ansi, full=False)<EOL>with open(\"<STR_LIT>\", \"<STR_LIT:w>\") as html_file:<EOL><INDENT>html_file.write(html)<EOL><DEDENT>return html<EOL><DEDENT>tmpl_dpath = os.path.dirname(tmpl_fpath)<EOL>tmpl_fname = os.path.basename(tmpl_fpath)<EOL>env = jinja2.Environment(<EOL>autoescape=True,<EOL>loader=jinja2.FileSystemLoader(tmpl_dpath)<EOL>)<EOL>env.filters['<STR_LIT>'] = stamp_to_datetime<EOL>env.filters['<STR_LIT>'] = strftime<EOL>env.filters['<STR_LIT>'] = ansi_to_html<EOL>tmpl = env.get_template(tmpl_fname)<EOL>return tmpl.render(dset=dset)<EOL>", "docstring": "@returns A HTML representation of the given 'dset' using the template at\n'tmpl_fpath'", "id": "f13339:m10"}
{"signature": "def aux_listing(aux_root):", "body": "listing = []<EOL>for root, _, fnames in os.walk(aux_root):<EOL><INDENT>count = len(aux_root.split(os.sep))<EOL>prefix = root.split(os.sep)[count:]<EOL>for fname in fnames:<EOL><INDENT>listing.append(os.sep.join(prefix + [fname]))<EOL><DEDENT><DEDENT>return listing<EOL>", "docstring": "Listing", "id": "f13339:m5"}
{"signature": "def extract_hook_names(ent):", "body": "hnames = []<EOL>for hook in ent[\"<STR_LIT>\"][\"<STR_LIT>\"] + ent[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>hname = os.path.basename(hook[\"<STR_LIT>\"])<EOL>hname = os.path.splitext(hname)[<NUM_LIT:0>]<EOL>hname = hname.strip()<EOL>hname = hname.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>hname = hname.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>if hname in hnames:<EOL><INDENT>continue<EOL><DEDENT>hnames.append(hname)<EOL><DEDENT>hnames.sort()<EOL>return hnames<EOL>", "docstring": "Extract hook names from the given entity", "id": "f13339:m0"}
{"signature": "def envs():", "body": "variables = {}<EOL>for req in REQS:<EOL><INDENT>prefix = req.upper()<EOL>variables[prefix] = cij.env_to_dict(<EOL>prefix, getattr(cij, req).REQUIRED + getattr(cij, req).EXPORTED<EOL>)<EOL><DEDENT>return variables<EOL>", "docstring": "Return variables defined by modules required by test", "id": "f13340:m1"}
{"signature": "def tindex(spath=None):", "body": "spath = spath if spath else os.environ.get(\"<STR_LIT>\", None)<EOL>if spath is None:<EOL><INDENT>return None<EOL><DEDENT>tests = []                          <EOL>for root, _, files in os.walk(spath):<EOL><INDENT>if root != spath:<EOL><INDENT>continue<EOL><DEDENT>tests += [f for f in files if f[-<NUM_LIT:3>:] in [\"<STR_LIT>\", \"<STR_LIT>\"]]<EOL><DEDENT>return tests<EOL>", "docstring": "Lists tindex in CIJ_TESTCASES\n\n@Returns On success, a list of filenames is returned. On error, None is\nreturned", "id": "f13340:m0"}
{"signature": "def texit(msg=None, rcode=<NUM_LIT:1>):", "body": "msg = \"<STR_LIT>\" % msg if msg else \"<STR_LIT>\"<EOL>if rcode:<EOL><INDENT>cij.err(\"<STR_LIT>\" % msg)<EOL><DEDENT>else:<EOL><INDENT>cij.good(\"<STR_LIT>\" % msg)<EOL><DEDENT>sys.exit(rcode)<EOL>", "docstring": "Exit the test", "id": "f13340:m4"}
{"signature": "def remove():", "body": "if env():<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>lnvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)<EOL>cij.emph(\"<STR_LIT>\" % lnvm[\"<STR_LIT>\"])<EOL>cmd = [\"<STR_LIT>\" % (lnvm[\"<STR_LIT>\"])]<EOL>rcode, _, _ = cij.ssh.command(cmd, shell=True)<EOL>if rcode:<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Remove LNVM device", "id": "f13341:m3"}
{"signature": "def create():", "body": "if env():<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>nvme = cij.env_to_dict(\"<STR_LIT>\", [\"<STR_LIT>\"])<EOL>lnvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)<EOL>cij.emph(\"<STR_LIT>\" % lnvm[\"<STR_LIT>\"])<EOL>cmd = [\"<STR_LIT>\" % (<EOL>nvme[\"<STR_LIT>\"], lnvm[\"<STR_LIT>\"], lnvm[\"<STR_LIT>\"], lnvm[\"<STR_LIT>\"], lnvm[\"<STR_LIT>\"])]<EOL>rcode, _, _ = cij.ssh.command(cmd, shell=True)<EOL>if rcode:<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Create LNVM device", "id": "f13341:m1"}
{"signature": "def recover():", "body": "if env():<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>nvme = cij.env_to_dict(\"<STR_LIT>\", [\"<STR_LIT>\"])<EOL>lnvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED)<EOL>cij.emph(\"<STR_LIT>\" % lnvm[\"<STR_LIT>\"])<EOL>cmd = [\"<STR_LIT>\" % (<EOL>nvme[\"<STR_LIT>\"], lnvm[\"<STR_LIT>\"], lnvm[\"<STR_LIT>\"], lnvm[\"<STR_LIT>\"], lnvm[\"<STR_LIT>\"])]<EOL>rcode, _, _ = cij.ssh.command(cmd, shell=True)<EOL>if rcode:<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Recover LNVM device", "id": "f13341:m2"}
{"signature": "def start(self):", "body": "self.__thread = Thread(target=self.__run, args=(True, False))<EOL>self.__thread.setDaemon(True)<EOL>self.__thread.start()<EOL>", "docstring": "Start DMESG job in thread", "id": "f13342:c0:m2"}
{"signature": "def terminate(self):", "body": "if self.__thread:<EOL><INDENT>cmd = [\"<STR_LIT>\"]<EOL>status, output, _ = cij.util.execute(cmd, shell=True, echo=True)<EOL>if status:<EOL><INDENT>cij.warn(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>tty = output.split()[<NUM_LIT:1>]<EOL>cmd = [\"<STR_LIT>\".format(\"<STR_LIT:U+0020>\".join(self.__prefix), tty)]<EOL>status, _, _ = cij.util.execute(cmd, shell=True, echo=True)<EOL>if status:<EOL><INDENT>cij.warn(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>self.__thread.join()<EOL>self.__thread = None<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Terminate DMESG job", "id": "f13342:c0:m3"}
{"signature": "def index(search_path, ext=None):", "body": "if ext is None:<EOL><INDENT>ext = \"<STR_LIT>\"<EOL><DEDENT>fnames = set([])<EOL>for _, _, files in os.walk(search_path):<EOL><INDENT>for fname in files:<EOL><INDENT>if os.path.splitext(fname)[-<NUM_LIT:1>] in EXTS[ext]:<EOL><INDENT>fnames.add(fname)<EOL><DEDENT><DEDENT><DEDENT>return fnames<EOL>", "docstring": "@returns a set of filenames with extension 'ext' in 'search_path", "id": "f13343:m0"}
{"signature": "def env_export(prefix, exported, env):", "body": "for exp in exported:<EOL><INDENT>ENV[\"<STR_LIT:_>\".join([prefix, exp])] = env[exp]<EOL><DEDENT>", "docstring": "Define the list of 'exported' variables with 'prefix' with values from 'env'", "id": "f13343:m9"}
{"signature": "def paths_from_env(prefix=None, names=None):", "body": "def expand_path(path):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))<EOL><DEDENT>if prefix is None:<EOL><INDENT>prefix = \"<STR_LIT>\"<EOL><DEDENT>if names is None:<EOL><INDENT>names = [<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\"<EOL>]<EOL><DEDENT>conf = {v: os.environ.get(\"<STR_LIT:_>\".join([prefix, v])) for v in names}<EOL>for env in (e for e in conf.keys() if e[:len(prefix)] in names and conf[e]):<EOL><INDENT>conf[env] = expand_path(conf[env])<EOL>if not os.path.exists(conf[env]):<EOL><INDENT>err(\"<STR_LIT>\" % (prefix, env, conf[env]))<EOL><DEDENT><DEDENT>return conf<EOL>", "docstring": "Construct dict of paths from environment variables", "id": "f13343:m7"}
{"signature": "def length(self):", "body": "return self.m_len<EOL>", "docstring": "Get length of types", "id": "f13344:c0:m3"}
{"signature": "def types(self):", "body": "return self.m_types<EOL>", "docstring": "Get types of buffer", "id": "f13344:c0:m5"}
{"signature": "def compare(self, buf, offset=<NUM_LIT:0>, length=<NUM_LIT:1>, ignore=\"<STR_LIT>\"):", "body": "for i in range(offset, offset + length):<EOL><INDENT>if isinstance(self.m_types, (type(Union), type(Structure))):<EOL><INDENT>if compare(self.m_buf[i], buf[i], ignore=ignore):<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>elif self.m_buf[i] != buf[i]:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Compare buffer", "id": "f13344:c0:m10"}
{"signature": "def dump(buf, indent=<NUM_LIT:0>, skip=\"<STR_LIT>\"):", "body": "if not isinstance(type(buf), (type(Union), type(Structure))):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % type(buf))<EOL><DEDENT>for field in getattr(buf, '<STR_LIT>'):<EOL><INDENT>name, types = field[<NUM_LIT:0>], field[<NUM_LIT:1>]<EOL>if name in skip:<EOL><INDENT>return<EOL><DEDENT>value = getattr(buf, name)<EOL>if isinstance(types, (type(Union), type(Structure))):<EOL><INDENT>cij.info(\"<STR_LIT>\" % (\"<STR_LIT:U+0020>\" * indent, name))<EOL>dump(value, indent+<NUM_LIT:2>, skip)<EOL><DEDENT>elif isinstance(types, type(Array)):<EOL><INDENT>for i, item in enumerate(value):<EOL><INDENT>name_index = \"<STR_LIT>\" % (name, i)<EOL>if isinstance(types, (type(Union), type(Structure))):<EOL><INDENT>cij.info(\"<STR_LIT>\" % (\"<STR_LIT:U+0020>\" * indent, name_index))<EOL>dump(item, indent + <NUM_LIT:2>, skip)<EOL><DEDENT>else:<EOL><INDENT>cij.info(\"<STR_LIT>\" % (\"<STR_LIT:U+0020>\" * indent, name_index, item))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>cij.info(\"<STR_LIT>\" % (\"<STR_LIT:U+0020>\" * indent, name, value))<EOL><DEDENT><DEDENT>", "docstring": "Dump UnionType/StructType to STDOUT", "id": "f13344:m0"}
{"signature": "def read(self, path):", "body": "with open(path, \"<STR_LIT:rb>\") as fout:<EOL><INDENT>memmove(self.m_buf, fout.read(self.m_size), self.m_size)<EOL><DEDENT>", "docstring": "Read file to buffer", "id": "f13344:c0:m8"}
{"signature": "def power_off(self, interval=<NUM_LIT:200>):", "body": "if self.__power_off_port is None:<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>return self.__press(self.__power_off_port, interval=interval)<EOL>", "docstring": "230v power off", "id": "f13345:c0:m6"}
{"signature": "def run(self, shell=True, cmdline=False, echo=True):", "body": "if env():<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>cmd = [\"<STR_LIT>\"] + self.__parse_parms()<EOL>if cmdline:<EOL><INDENT>cij.emph(\"<STR_LIT>\" % (shell, cmd))<EOL><DEDENT>return cij.ssh.command(cmd, shell, echo)<EOL>", "docstring": "Run FIO job", "id": "f13347:c1:m8"}
{"signature": "def pkill():", "body": "if env():<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>cmd = [\"<STR_LIT>\"]<EOL>status, _, _ = cij.ssh.command(cmd, shell=True, echo=False)<EOL>if not status:<EOL><INDENT>status, _, _ = cij.ssh.command([\"<STR_LIT>\"], shell=True)<EOL>if status:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Kill all of FIO processes", "id": "f13347:m1"}
{"signature": "def get_parm(self, key):", "body": "if key in self.__parm.keys():<EOL><INDENT>return self.__parm[key]<EOL><DEDENT>return None<EOL>", "docstring": "Get parameter of FIO", "id": "f13347:c1:m4"}
{"signature": "def __parse_parms(self):", "body": "args = list()<EOL>for key, val in self.__parm.items():<EOL><INDENT>key = key.replace(\"<STR_LIT>\", \"<STR_LIT>\").lower()<EOL>if key == \"<STR_LIT>\":<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL><DEDENT>if val is None:<EOL><INDENT>args.append(\"<STR_LIT>\" % key)<EOL><DEDENT>else:<EOL><INDENT>args.append(\"<STR_LIT>\" % (key, val))<EOL><DEDENT><DEDENT>return args<EOL>", "docstring": "Translate dict parameters to string", "id": "f13347:c1:m1"}
{"signature": "def result(self):", "body": "return self.output<EOL>", "docstring": "Get result of thread", "id": "f13347:c0:m2"}
{"signature": "def run(self):", "body": "self.output = self.target(*self.args)<EOL>", "docstring": "Start run thread", "id": "f13347:c0:m1"}
{"signature": "def pwr_reset():", "body": "cmd(\"<STR_LIT>\")<EOL>", "docstring": "Target reset", "id": "f13348:m4"}
{"signature": "def env():", "body": "ipmi = cij.env_to_dict(PREFIX, REQUIRED)<EOL>if ipmi is None:<EOL><INDENT>ipmi[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>ipmi[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>ipmi[\"<STR_LIT>\"] = \"<STR_LIT:localhost>\"<EOL>ipmi[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>cij.info(\"<STR_LIT>\" % (<EOL>ipmi[\"<STR_LIT>\"], ipmi[\"<STR_LIT>\"], ipmi[\"<STR_LIT>\"], ipmi[\"<STR_LIT>\"]<EOL>))<EOL><DEDENT>cij.env_export(PREFIX, EXPORTED, ipmi)<EOL>return <NUM_LIT:0><EOL>", "docstring": "Verify IPMI environment", "id": "f13348:m0"}
{"signature": "def pwr_off():", "body": "cmd(\"<STR_LIT>\")<EOL>", "docstring": "Target Off", "id": "f13348:m3"}
{"signature": "def tcase_setup(trun, parent, tcase_fname):", "body": "<EOL>case = copy.deepcopy(TESTCASE)<EOL>case[\"<STR_LIT>\"] = tcase_fname<EOL>case[\"<STR_LIT>\"] = os.sep.join([trun[\"<STR_LIT>\"][\"<STR_LIT>\"], case[\"<STR_LIT>\"]])<EOL>if not os.path.exists(case[\"<STR_LIT>\"]):<EOL><INDENT>cij.err('<STR_LIT>' % case[\"<STR_LIT>\"])<EOL>return None<EOL><DEDENT>case[\"<STR_LIT:name>\"] = os.path.splitext(case[\"<STR_LIT>\"])[<NUM_LIT:0>]<EOL>case[\"<STR_LIT>\"] = \"<STR_LIT:/>\".join([parent[\"<STR_LIT>\"], case[\"<STR_LIT>\"]])<EOL>case[\"<STR_LIT>\"] = os.sep.join([parent[\"<STR_LIT>\"], case[\"<STR_LIT>\"]])<EOL>case[\"<STR_LIT>\"] = os.sep.join([case[\"<STR_LIT>\"], \"<STR_LIT>\"])<EOL>case[\"<STR_LIT>\"] = os.sep.join([case[\"<STR_LIT>\"], \"<STR_LIT>\"])<EOL>case[\"<STR_LIT>\"] = os.sep.join([case[\"<STR_LIT>\"], case[\"<STR_LIT>\"]])<EOL>case[\"<STR_LIT>\"].update(copy.deepcopy(parent[\"<STR_LIT>\"]))<EOL>os.makedirs(case[\"<STR_LIT>\"])                       <EOL>os.makedirs(case[\"<STR_LIT>\"])<EOL>shutil.copyfile(case[\"<STR_LIT>\"], case[\"<STR_LIT>\"])  <EOL>case[\"<STR_LIT>\"] = hooks_setup(trun, case, parent.get(\"<STR_LIT>\"))<EOL>return case<EOL>", "docstring": "Create and initialize a testcase", "id": "f13350:m7"}
{"signature": "def yml_fpath(output_path):", "body": "return os.sep.join([output_path, \"<STR_LIT>\"])<EOL>", "docstring": "Returns the path to the trun-file", "id": "f13350:m0"}
{"signature": "def tsuite_setup(trun, declr, enum):", "body": "suite = copy.deepcopy(TESTSUITE)  <EOL>suite[\"<STR_LIT:name>\"] = declr.get(\"<STR_LIT:name>\")<EOL>if suite[\"<STR_LIT:name>\"] is None:<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>suite[\"<STR_LIT>\"] = declr.get(\"<STR_LIT>\")<EOL>suite[\"<STR_LIT>\"] = \"<STR_LIT>\" % (suite[\"<STR_LIT:name>\"], enum)<EOL>suite[\"<STR_LIT>\"] = os.sep.join([trun[\"<STR_LIT>\"][\"<STR_LIT>\"], suite[\"<STR_LIT>\"]])<EOL>suite[\"<STR_LIT>\"] = os.sep.join([suite[\"<STR_LIT>\"], \"<STR_LIT>\"])<EOL>suite[\"<STR_LIT>\"].update(copy.deepcopy(trun[\"<STR_LIT>\"]))<EOL>suite[\"<STR_LIT>\"].update(copy.deepcopy(declr.get(\"<STR_LIT>\", {})))<EOL>os.makedirs(suite[\"<STR_LIT>\"])<EOL>os.makedirs(suite[\"<STR_LIT>\"])<EOL>suite[\"<STR_LIT>\"] = hooks_setup(trun, suite, declr.get(\"<STR_LIT>\"))<EOL>suite[\"<STR_LIT>\"] = declr.get(\"<STR_LIT>\", [])<EOL>suite[\"<STR_LIT>\"] = \"<STR_LIT>\" % suite[\"<STR_LIT:name>\"]<EOL>suite[\"<STR_LIT>\"] = os.sep.join([trun[\"<STR_LIT>\"][\"<STR_LIT>\"], suite[\"<STR_LIT>\"]])<EOL>tcase_fpaths = []                               <EOL>if os.path.exists(suite[\"<STR_LIT>\"]):              <EOL><INDENT>suite_lines = (<EOL>l.strip() for l in open(suite[\"<STR_LIT>\"]).read().splitlines()<EOL>)<EOL>tcase_fpaths.extend(<EOL>(l for l in suite_lines if len(l) > <NUM_LIT:1> and l[<NUM_LIT:0>] != \"<STR_LIT:#>\")<EOL>)<EOL><DEDENT>else:                                           <EOL><INDENT>tcase_fpaths.extend(declr.get(\"<STR_LIT>\", []))<EOL><DEDENT>if len(set(tcase_fpaths)) != len(tcase_fpaths):<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>for tcase_fname in tcase_fpaths:                <EOL><INDENT>tcase = tcase_setup(trun, suite, tcase_fname)<EOL>if not tcase:<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>suite[\"<STR_LIT>\"].append(tcase)<EOL><DEDENT>return suite<EOL>", "docstring": "Creates and initialized a TESTSUITE struct and site-effects such as creating\noutput directories and forwarding initialization of testcases", "id": "f13350:m10"}
{"signature": "def tsuite_enter(trun, tsuite):", "body": "if trun[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>cij.emph(\"<STR_LIT>\" % tsuite[\"<STR_LIT:name>\"])<EOL><DEDENT>rcode = <NUM_LIT:0><EOL>for hook in tsuite[\"<STR_LIT>\"][\"<STR_LIT>\"]:     <EOL><INDENT>rcode = script_run(trun, hook)<EOL>if rcode:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if trun[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>cij.emph(\"<STR_LIT>\" % rcode, rcode)<EOL><DEDENT>return rcode<EOL>", "docstring": "Triggers when entering the given testsuite", "id": "f13350:m9"}
{"signature": "def main(conf):", "body": "fpath = yml_fpath(conf[\"<STR_LIT>\"])<EOL>if os.path.exists(fpath):   <EOL><INDENT>cij.err(\"<STR_LIT>\" % fpath)<EOL>return <NUM_LIT:1><EOL><DEDENT>trun = trun_setup(conf)         <EOL>if not trun:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>trun_to_file(trun)              <EOL>trun_emph(trun)                 <EOL>tr_err = <NUM_LIT:0><EOL>tr_ent_err = trun_enter(trun)<EOL>for tsuite in (ts for ts in trun[\"<STR_LIT>\"] if not tr_ent_err):<EOL><INDENT>ts_err = <NUM_LIT:0><EOL>ts_ent_err = tsuite_enter(trun, tsuite)<EOL>for tcase in (tc for tc in tsuite[\"<STR_LIT>\"] if not ts_ent_err):<EOL><INDENT>tc_err = tcase_enter(trun, tsuite, tcase)<EOL>if not tc_err:<EOL><INDENT>tc_err += script_run(trun, tcase)<EOL>tc_err += tcase_exit(trun, tsuite, tcase)<EOL><DEDENT>tcase[\"<STR_LIT:status>\"] = \"<STR_LIT>\" if tc_err else \"<STR_LIT>\"<EOL>trun[\"<STR_LIT>\"][tcase[\"<STR_LIT:status>\"]] += <NUM_LIT:1>  <EOL>trun[\"<STR_LIT>\"][\"<STR_LIT>\"] -= <NUM_LIT:1><EOL>ts_err += tc_err                        <EOL>trun_to_file(trun)                      <EOL><DEDENT>if not ts_ent_err:<EOL><INDENT>ts_err += tsuite_exit(trun, tsuite)<EOL><DEDENT>ts_err += ts_ent_err                        <EOL>tr_err += ts_err<EOL>tsuite[\"<STR_LIT:status>\"] = \"<STR_LIT>\" if ts_err else \"<STR_LIT>\"<EOL>cij.emph(\"<STR_LIT>\" % tsuite[\"<STR_LIT:status>\"], tsuite[\"<STR_LIT:status>\"] != \"<STR_LIT>\")<EOL><DEDENT>if not tr_ent_err:<EOL><INDENT>trun_exit(trun)<EOL><DEDENT>tr_err += tr_ent_err<EOL>trun[\"<STR_LIT:status>\"] = \"<STR_LIT>\" if tr_err else \"<STR_LIT>\"<EOL>trun[\"<STR_LIT>\"][\"<STR_LIT:end>\"] = int(time.time()) + <NUM_LIT:1>         <EOL>trun_to_file(trun)                                  <EOL>cij.emph(\"<STR_LIT>\" % trun[\"<STR_LIT>\"])<EOL>cij.emph(\"<STR_LIT>\" % trun[\"<STR_LIT:status>\"], trun[\"<STR_LIT:status>\"] != \"<STR_LIT>\")<EOL>return trun[\"<STR_LIT>\"][\"<STR_LIT>\"] + trun[\"<STR_LIT>\"][\"<STR_LIT>\"]<EOL>", "docstring": "CIJ Test Runner main entry point", "id": "f13350:m16"}
{"signature": "def hook_setup(parent, hook_fpath):", "body": "hook = copy.deepcopy(HOOK)<EOL>hook[\"<STR_LIT:name>\"] = os.path.splitext(os.path.basename(hook_fpath))[<NUM_LIT:0>]<EOL>hook[\"<STR_LIT:name>\"] = hook[\"<STR_LIT:name>\"].replace(\"<STR_LIT>\", \"<STR_LIT>\").replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>hook[\"<STR_LIT>\"] = parent[\"<STR_LIT>\"]<EOL>hook[\"<STR_LIT>\"] = hook_fpath<EOL>hook[\"<STR_LIT>\"] = \"<STR_LIT>\" % os.path.basename(hook[\"<STR_LIT>\"])<EOL>hook[\"<STR_LIT>\"] = os.sep.join([hook[\"<STR_LIT>\"], hook[\"<STR_LIT>\"]])<EOL>hook[\"<STR_LIT>\"] = os.sep.join([<EOL>hook[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\" % hook[\"<STR_LIT>\"]<EOL>])<EOL>hook[\"<STR_LIT>\"].update(copy.deepcopy(parent[\"<STR_LIT>\"]))<EOL>shutil.copyfile(hook[\"<STR_LIT>\"], hook[\"<STR_LIT>\"])<EOL>return hook<EOL>", "docstring": "Setup hook", "id": "f13350:m2"}
{"signature": "def tcase_exit(trun, tsuite, tcase):", "body": "<EOL>if trun[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>cij.emph(\"<STR_LIT>\" % tcase[\"<STR_LIT>\"])<EOL><DEDENT>rcode = <NUM_LIT:0><EOL>for hook in reversed(tcase[\"<STR_LIT>\"][\"<STR_LIT>\"]):    <EOL><INDENT>rcode = script_run(trun, hook)<EOL>if rcode:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if trun[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>cij.emph(\"<STR_LIT>\" % rcode, rcode)<EOL><DEDENT>return rcode<EOL>", "docstring": "...", "id": "f13350:m11"}
{"signature": "def pull(src, dst, folder=False):", "body": "if env():<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>args = []<EOL>if cij.ENV.get(\"<STR_LIT>\"):<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL>args.append(cij.ENV.get(\"<STR_LIT>\"))<EOL><DEDENT>if cij.ENV.get(\"<STR_LIT>\"):<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL>args.append(cij.ENV.get(\"<STR_LIT>\"))<EOL><DEDENT>if folder:<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL><DEDENT>target = \"<STR_LIT>\" % (\"<STR_LIT:@>\".join([cij.ENV.get(\"<STR_LIT>\"), cij.ENV.get(\"<STR_LIT>\")]), src)<EOL>wrapped = [\"<STR_LIT>\", \"<STR_LIT:U+0020>\".join(args), target, dst]<EOL>return cij.util.execute(wrapped, shell=True, echo=True)<EOL>", "docstring": "SSH: pull data from remote linux", "id": "f13351:m3"}
{"signature": "def push(src, dst, folder=False):", "body": "if env():<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>args = []<EOL>if cij.ENV.get(\"<STR_LIT>\"):<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL>args.append(cij.ENV.get(\"<STR_LIT>\"))<EOL><DEDENT>if cij.ENV.get(\"<STR_LIT>\"):<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL>args.append(cij.ENV.get(\"<STR_LIT>\"))<EOL><DEDENT>if folder:<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL><DEDENT>target = \"<STR_LIT>\" % (\"<STR_LIT:@>\".join([cij.ENV.get(\"<STR_LIT>\"), cij.ENV.get(\"<STR_LIT>\")]), dst)<EOL>wrapped = [\"<STR_LIT>\", \"<STR_LIT:U+0020>\".join(args), src, target]<EOL>return cij.util.execute(wrapped, shell=True, echo=True)<EOL>", "docstring": "SSH: push data to remote linux", "id": "f13351:m2"}
{"signature": "def command(cmd, shell=True, echo=True, suffix=None):", "body": "if env():<EOL><INDENT>cij.err(\"<STR_LIT>\")<EOL>return <NUM_LIT:1><EOL><DEDENT>prefix = []<EOL>if cij.ENV.get(\"<STR_LIT>\") == \"<STR_LIT:1>\":<EOL><INDENT>prefix.append(\"<STR_LIT>\")<EOL><DEDENT>if cij.ENV.get(\"<STR_LIT>\"):<EOL><INDENT>prefix.append(\"<STR_LIT>\")<EOL>prefix.append(cij.ENV.get(\"<STR_LIT>\"))<EOL><DEDENT>prefix.append(\"<STR_LIT>\")<EOL>args = []<EOL>if cij.ENV.get(\"<STR_LIT>\"):<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL>args.append(cij.ENV.get(\"<STR_LIT>\"))<EOL><DEDENT>if cij.ENV.get(\"<STR_LIT>\"):<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL>args.append(cij.ENV.get(\"<STR_LIT>\"))<EOL><DEDENT>args.append(\"<STR_LIT:@>\".join([cij.ENV.get(\"<STR_LIT>\"), cij.ENV.get(\"<STR_LIT>\")]))<EOL>wrapped = prefix + args + [\"<STR_LIT>\" % \"<STR_LIT:U+0020>\".join(cmd)]<EOL>if suffix:<EOL><INDENT>wrapped += suffix<EOL><DEDENT>return cij.util.execute(wrapped, shell, echo)<EOL>", "docstring": "SSH: Run the given command over SSH as defined in environment", "id": "f13351:m1"}
{"signature": "def env():", "body": "ssh = cij.env_to_dict(PREFIX, REQUIRED)<EOL>if \"<STR_LIT>\" in ssh:<EOL><INDENT>ssh[\"<STR_LIT>\"] = cij.util.expand_path(ssh[\"<STR_LIT>\"])<EOL><DEDENT>if cij.ENV.get(\"<STR_LIT>\") is None:<EOL><INDENT>cij.ENV[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>cij.warn(\"<STR_LIT>\" % (<EOL>cij.ENV.get(\"<STR_LIT>\")<EOL>))<EOL><DEDENT>if cij.ENV.get(\"<STR_LIT>\") is None:<EOL><INDENT>cij.ENV[\"<STR_LIT>\"] = \"<STR_LIT:1>\"<EOL>cij.warn(\"<STR_LIT>\" % (<EOL>cij.ENV.get(\"<STR_LIT>\")<EOL>))<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Verify SSH variables and construct exported variables", "id": "f13351:m0"}
{"signature": "def get_field_value_from_context(field_name, context_list):", "body": "field_path = field_name.split('<STR_LIT:.>')<EOL>if field_path[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>context_index = <NUM_LIT:0><EOL>field_path.pop(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>context_index = -<NUM_LIT:1><EOL>while field_path[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>context_index -= <NUM_LIT:1><EOL>field_path.pop(<NUM_LIT:0>)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>field_value = context_list[context_index]<EOL>while len(field_path):<EOL><INDENT>field = field_path.pop(<NUM_LIT:0>)<EOL>if isinstance(field_value, (list, tuple, ListModel)):<EOL><INDENT>if field.isdigit():<EOL><INDENT>field = int(field)<EOL><DEDENT>field_value = field_value[field]<EOL><DEDENT>elif isinstance(field_value, dict):<EOL><INDENT>try:<EOL><INDENT>field_value = field_value[field]<EOL><DEDENT>except KeyError:<EOL><INDENT>if field.isdigit():<EOL><INDENT>field = int(field)<EOL>field_value = field_value[field]<EOL><DEDENT>else:<EOL><INDENT>field_value = None<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>field_value = getattr(field_value, field)<EOL><DEDENT><DEDENT>return field_value<EOL><DEDENT>except (IndexError, AttributeError, KeyError, TypeError):<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Helper to get field value from string path.\nString '<context>' is used to go up on context stack. It just\ncan be used at the beginning of path: <context>.<context>.field_name_1\nOn the other hand, '<root>' is used to start lookup from first item on context.", "id": "f13359:m0"}
{"signature": "def __init__(self, comp_value=None, *args, **kwargs):", "body": "super(NotEqualTo, self).__init__(*args, **kwargs)<EOL>self.comp_value = comp_value<EOL>self.message_values.update({'<STR_LIT>': self.comp_value})<EOL>", "docstring": ":param comp_value: Static value to use on check", "id": "f13360:c3:m0"}
{"signature": "def error(self, error_code, value, **kwargs):", "body": "code = self.error_code_map.get(error_code, error_code)<EOL>try:<EOL><INDENT>message = Template(self.error_messages[code])<EOL><DEDENT>except KeyError:<EOL><INDENT>message = Template(self.error_messages[error_code])<EOL><DEDENT>placeholders = {\"<STR_LIT:value>\": self.hidden_value if self.hidden else value}<EOL>placeholders.update(kwargs)<EOL>placeholders.update(self.message_values)<EOL>self.messages[code] = message.safe_substitute(placeholders)<EOL>", "docstring": "Helper to add error to messages field. It fills placeholder with extra call parameters\nor values from message_value map.\n\n:param error_code: Error code to use\n:rparam error_code: str\n:param value: Value checked\n:param kwargs: Map of values to use in placeholders", "id": "f13360:c1:m1"}
{"signature": "def date_proc(func):", "body": "@wraps(func)<EOL>def wrapped(request, *args, **kwargs):<EOL><INDENT>if '<STR_LIT:date>' in request.GET and request.GET['<STR_LIT:date>'] == '<STR_LIT>':<EOL><INDENT>raise Http404(\"<STR_LIT>\")<EOL><DEDENT>elif '<STR_LIT:date>' not in request.GET:<EOL><INDENT>date = datetime.today()<EOL>return func(request, date)<EOL><DEDENT>else:<EOL><INDENT>date = tuple(int(intValue) for intValue in request.GET['<STR_LIT:date>'].split('<STR_LIT:->'))<EOL>if len(date) == <NUM_LIT:3>:<EOL><INDENT>date = datetime(*date)<EOL><DEDENT>elif len(date) == <NUM_LIT:2>:<EOL><INDENT>date = datetime(*date, day = <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>date = datetime(*date, month = <NUM_LIT:1>, day = <NUM_LIT:1>)<EOL><DEDENT>return func(request, date)<EOL><DEDENT><DEDENT>return wrapped<EOL>", "docstring": "An decorator checking whether date parameter is passing in or not. If not, default date value is all PTT data.\n            Else, return PTT data with right date.\n    Args:\n            func: function you want to decorate.\n            request: WSGI request parameter getten from django.\n\n    Returns:\n            date:\n                    a datetime variable, you can only give year, year + month or year + month + day, three type.\n                    The missing part would be assigned default value 1 (for month is Jan, for day is 1).", "id": "f13365:m0"}
{"signature": "def queryString_required(strList):", "body": "def _dec(function):<EOL><INDENT>@wraps(function)<EOL>def _wrap(request, *args, **kwargs):<EOL><INDENT>for i in strList:<EOL><INDENT>if i not in request.GET:<EOL><INDENT>raise Http404(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return function(request, *args, **kwargs)<EOL><DEDENT>return _wrap<EOL><DEDENT>return _dec<EOL>", "docstring": "An decorator checking whether queryString key is valid or not\n    Args:\n            str: allowed queryString key\n\n    Returns:\n            if contains invalid queryString key, it will raise exception.", "id": "f13365:m1"}
{"signature": "def unpack(self, buffer):", "body": "fields = self._all_fields()<EOL>ctx = UnpackContext(self, fields, buffer)<EOL>for field in fields:<EOL><INDENT>try:<EOL><INDENT>if field.unpack_if.deref(ctx):<EOL><INDENT>for prev_field in field.unpack_after:<EOL><INDENT>prev_field.unpack_value_ref.deref(ctx)<EOL><DEDENT>field.unpack_value_ref.deref(ctx)<EOL><DEDENT>else:<EOL><INDENT>setattr(self, field.attr_name(), None)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>raise chain_exceptions(InstructBufferError(\"<STR_LIT>\", ctx, type(self), field.attr_name()))<EOL><DEDENT><DEDENT>return self.calc_byte_size(ctx)<EOL>", "docstring": "Unpacks the object's fields from buffer.", "id": "f13400:c2:m2"}
{"signature": "def pack(self):", "body": "fields = self._all_fields()<EOL>ctx = PackContext(self, fields)<EOL>for field in fields:<EOL><INDENT>if field.pack_if.deref(ctx):<EOL><INDENT>try:<EOL><INDENT>ctx.output_buffer.set(field.pack_ref.deref(ctx), field.pack_absolute_position_ref.deref(ctx))<EOL><DEDENT>except:<EOL><INDENT>raise chain_exceptions(InstructBufferError(\"<STR_LIT>\", ctx, type(self),<EOL>field.attr_name()))<EOL><DEDENT><DEDENT><DEDENT>result = bytearray(ctx.output_buffer.get())<EOL>static_byte_size = type(self).byte_size<EOL>if static_byte_size:<EOL><INDENT>static_byte_size = int(math.ceil(static_byte_size))<EOL>assert len(result) <= static_byte_size,(\"<STR_LIT>\" +<EOL>\"<STR_LIT>\").format(type(self), len(result),<EOL>static_byte_size)<EOL>if len(result) < static_byte_size:<EOL><INDENT>result += bytearray(static_byte_size - len(result))<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Packs the object and returns a buffer representing the packed object.", "id": "f13400:c2:m1"}
{"signature": "def to_slice(self):", "body": "return slice(self.start, self.stop, <NUM_LIT:1>)<EOL>", "docstring": ":returns: a slice object from start to stop\n:rtype: slice", "id": "f13401:c1:m5"}
{"signature": "def byte_offset(self, bytes):", "body": "remaining_bytes = bytes<EOL>for r in self:<EOL><INDENT>if r.is_open() or r.byte_length() >= remaining_bytes:<EOL><INDENT>return r.start + remaining_bytes<EOL><DEDENT>else:<EOL><INDENT>remaining_bytes -= r.byte_length()<EOL><DEDENT><DEDENT>assert False, \"<STR_LIT>\".format(bytes, self)<EOL>", "docstring": "Maps `bytes` length to a sequence's offset. For example, if we do byte_offset(5) and our list of sequences is\n[(0, 2), (10, 11), (40, 45)] then the returned value will be 42.\nNote that `bytes` must be <= byte_length().\n:returns: actual offset in one of the sequences in the range for request byte length.\n:rtype: int or float", "id": "f13401:c2:m6"}
{"signature": "def contains(self, point):", "body": "return self.start <= point and (self.stop is None or self.stop > point)<EOL>", "docstring": ":param point: point to check if contained in range\n:type point: int or float\n:returns: True if the range contains `point`\n:rtype: bool", "id": "f13401:c1:m7"}
{"signature": "def max_stop(self):", "body": "m = <NUM_LIT:0><EOL>for r in self:<EOL><INDENT>if r.is_open():<EOL><INDENT>return None<EOL><DEDENT>m = max(m, r.stop)<EOL><DEDENT>return m<EOL>", "docstring": ":returns: maximum stop in list or None if there's at least one open range\n:type: int, float or None", "id": "f13401:c2:m4"}
{"signature": "def create_from_string(self, string, context=EMPTY_CONTEXT, *args, **kwargs):", "body": "if not PY2 and not isinstance(string, bytes):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>io = StringIO(string)<EOL>instance = self.create_from_stream(io, context, *args, **kwargs)<EOL>io.close()<EOL>return instance<EOL>", "docstring": "Deserializes a new instance from a string.\nThis is a convenience method that creates a StringIO object and calls create_instance_from_stream().", "id": "f13418:c4:m3"}
{"signature": "def safe_repr(obj):", "body": "try:<EOL><INDENT>obj_repr = repr(obj)<EOL><DEDENT>except:<EOL><INDENT>obj_repr = \"<STR_LIT>\".format(type(obj), id(obj))<EOL><DEDENT>return obj_repr<EOL>", "docstring": "Returns a repr of an object and falls back to a minimal representation of type and ID if the call to repr raised\n    an error.\n\n    :param obj: object to safe repr\n    :returns: repr string or '(type<id> repr error)' string\n    :rtype: str", "id": "f13419:m0"}
{"signature": "def keep_kwargs_partial(func, *args, **keywords):", "body": "def newfunc(*fargs, **fkeywords):<EOL><INDENT>newkeywords = fkeywords.copy()<EOL>newkeywords.update(keywords)<EOL>return func(*(args + fargs), **newkeywords)<EOL><DEDENT>newfunc.func = func<EOL>newfunc.args = args<EOL>newfunc.keywords = keywords<EOL>return newfunc<EOL>", "docstring": "Like functools.partial but instead of using the new kwargs, keeps the old ones.", "id": "f13421:m0"}
{"signature": "def get_size(file_path):", "body": "try:<EOL><INDENT>im = _read_image(file_path)<EOL><DEDENT>except (IOError, IndexError, TypeError, AttributeError) as e:<EOL><INDENT>logger = logging.getLogger(__name__)<EOL>logger.error(\"<STR_LIT>\", file_path, e)<EOL><DEDENT>else:<EOL><INDENT>width, height = im.size<EOL>return {<EOL>'<STR_LIT:width>': width,<EOL>'<STR_LIT>': height<EOL>}<EOL><DEDENT>", "docstring": "Return image size (width and height).", "id": "f13446:m5"}
{"signature": "def get_exif_tags(data, datetime_format='<STR_LIT>'):", "body": "logger = logging.getLogger(__name__)<EOL>simple = {}<EOL>for tag in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if tag in data:<EOL><INDENT>if isinstance(data[tag], tuple):<EOL><INDENT>simple[tag] = data[tag][<NUM_LIT:0>].strip()<EOL><DEDENT>else:<EOL><INDENT>simple[tag] = data[tag].strip()<EOL><DEDENT><DEDENT><DEDENT>if '<STR_LIT>' in data:<EOL><INDENT>fnumber = data['<STR_LIT>']<EOL>try:<EOL><INDENT>simple['<STR_LIT>'] = float(fnumber[<NUM_LIT:0>]) / fnumber[<NUM_LIT:1>]<EOL><DEDENT>except Exception:<EOL><INDENT>logger.debug('<STR_LIT>', fnumber, exc_info=True)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in data:<EOL><INDENT>focal = data['<STR_LIT>']<EOL>try:<EOL><INDENT>simple['<STR_LIT>'] = round(float(focal[<NUM_LIT:0>]) / focal[<NUM_LIT:1>])<EOL><DEDENT>except Exception:<EOL><INDENT>logger.debug('<STR_LIT>', focal,<EOL>exc_info=True)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in data:<EOL><INDENT>exptime = data['<STR_LIT>']<EOL>if isinstance(exptime, tuple):<EOL><INDENT>try:<EOL><INDENT>simple['<STR_LIT>'] = str(fractions.Fraction(exptime[<NUM_LIT:0>],<EOL>exptime[<NUM_LIT:1>]))<EOL><DEDENT>except ZeroDivisionError:<EOL><INDENT>logger.info('<STR_LIT>', exptime)<EOL><DEDENT><DEDENT>elif isinstance(exptime, int):<EOL><INDENT>simple['<STR_LIT>'] = str(exptime)<EOL><DEDENT>else:<EOL><INDENT>logger.info('<STR_LIT>', exptime)<EOL><DEDENT><DEDENT>if data.get('<STR_LIT>'):<EOL><INDENT>simple['<STR_LIT>'] = data['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in data:<EOL><INDENT>date = data['<STR_LIT>'].rsplit('<STR_LIT:\\x00>')[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>simple['<STR_LIT>'] = datetime.strptime(date, '<STR_LIT>')<EOL>simple['<STR_LIT>'] = simple['<STR_LIT>'].strftime(datetime_format)<EOL><DEDENT>except (ValueError, TypeError) as e:<EOL><INDENT>logger.info('<STR_LIT>', e)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in data:<EOL><INDENT>info = data['<STR_LIT>']<EOL>lat_info = info.get('<STR_LIT>')<EOL>lon_info = info.get('<STR_LIT>')<EOL>lat_ref_info = info.get('<STR_LIT>')<EOL>lon_ref_info = info.get('<STR_LIT>')<EOL>if lat_info and lon_info and lat_ref_info and lon_ref_info:<EOL><INDENT>try:<EOL><INDENT>lat = dms_to_degrees(lat_info)<EOL>lon = dms_to_degrees(lon_info)<EOL><DEDENT>except (ZeroDivisionError, ValueError, TypeError):<EOL><INDENT>logger.info('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>simple['<STR_LIT>'] = {<EOL>'<STR_LIT>': - lat if lat_ref_info != '<STR_LIT:N>' else lat,<EOL>'<STR_LIT>': - lon if lon_ref_info != '<STR_LIT:E>' else lon,<EOL>}<EOL><DEDENT><DEDENT><DEDENT>return simple<EOL>", "docstring": "Make a simplified version with common tags from raw EXIF data.", "id": "f13446:m9"}
{"signature": "def get_exif_data(filename):", "body": "logger = logging.getLogger(__name__)<EOL>img = _read_image(filename)<EOL>try:<EOL><INDENT>exif = img._getexif() or {}<EOL><DEDENT>except ZeroDivisionError:<EOL><INDENT>logger.warning('<STR_LIT>')<EOL>return None<EOL><DEDENT>data = {TAGS.get(tag, tag): value for tag, value in exif.items()}<EOL>if '<STR_LIT>' in data:<EOL><INDENT>try:<EOL><INDENT>data['<STR_LIT>'] = {GPSTAGS.get(tag, tag): value<EOL>for tag, value in data['<STR_LIT>'].items()}<EOL><DEDENT>except AttributeError:<EOL><INDENT>logger = logging.getLogger(__name__)<EOL>logger.info('<STR_LIT>')<EOL>del data['<STR_LIT>']<EOL><DEDENT><DEDENT>return data<EOL>", "docstring": "Return a dict with the raw EXIF data.", "id": "f13446:m6"}
{"signature": "def init_logging(name, level=logging.INFO):", "body": "logger = logging.getLogger(name)<EOL>logger.setLevel(level)<EOL>try:<EOL><INDENT>if os.isatty(sys.stdout.fileno()) andnot sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>formatter = ColoredFormatter()<EOL><DEDENT>elif level == logging.DEBUG:<EOL><INDENT>formatter = Formatter('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>formatter = Formatter('<STR_LIT>')<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>formatter = Formatter('<STR_LIT>')<EOL><DEDENT>handler = logging.StreamHandler()<EOL>handler.setFormatter(formatter)<EOL>logger.addHandler(handler)<EOL>", "docstring": "Logging config\n\n    Set the level and create a more detailed formatter for debug mode.", "id": "f13448:m1"}
{"signature": "def load_exif(album):", "body": "if not hasattr(album.gallery, \"<STR_LIT>\"):<EOL><INDENT>_restore_cache(album.gallery)<EOL><DEDENT>cache = album.gallery.exifCache<EOL>for media in album.medias:<EOL><INDENT>if media.type == \"<STR_LIT:image>\":<EOL><INDENT>key = os.path.join(media.path, media.filename)<EOL>if key in cache:<EOL><INDENT>media.exif = cache[key]<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Loads the exif data of all images in an album from cache", "id": "f13450:m0"}
{"signature": "def filter_nomedia(album, settings=None):", "body": "nomediapath = os.path.join(album.src_path, \"<STR_LIT>\")<EOL>if os.path.isfile(nomediapath):<EOL><INDENT>if os.path.getsize(nomediapath) == <NUM_LIT:0>:<EOL><INDENT>logger.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\", album.name)<EOL>_remove_albums_with_subdirs(album.gallery.albums, [album.path])<EOL>try:<EOL><INDENT>os.rmdir(album.dst_path)<EOL><DEDENT>except OSError as e:<EOL><INDENT>pass<EOL><DEDENT>album.subdirs = []<EOL>album.medias = []<EOL><DEDENT>else:<EOL><INDENT>with open(nomediapath, \"<STR_LIT:r>\") as nomediaFile:<EOL><INDENT>logger.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\", album.name)<EOL>ignored = nomediaFile.read().split(\"<STR_LIT:\\n>\")<EOL>album.medias = [media for media in album.medias<EOL>if media.src_filename not in ignored]<EOL>album.subdirs = [dirname for dirname in album.subdirs<EOL>if dirname not in ignored]<EOL>_remove_albums_with_subdirs(album.gallery.albums,<EOL>ignored, album.path + os.path.sep)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Removes all filtered Media and subdirs from an Album", "id": "f13451:m1"}
{"signature": "def reduce_opacity(im, opacity):", "body": "assert opacity >= <NUM_LIT:0> and opacity <= <NUM_LIT:1><EOL>if im.mode != '<STR_LIT>':<EOL><INDENT>im = im.convert('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>im = im.copy()<EOL><DEDENT>alpha = im.split()[<NUM_LIT:3>]<EOL>alpha = ImageEnhance.Brightness(alpha).enhance(opacity)<EOL>im.putalpha(alpha)<EOL>return im<EOL>", "docstring": "Returns an image with reduced opacity.", "id": "f13456:m0"}
{"signature": "@main.command()<EOL>@argument('<STR_LIT>', default='<STR_LIT>')<EOL>@option('<STR_LIT>', '<STR_LIT>', help=\"<STR_LIT>\", default=<NUM_LIT>)<EOL>@option('<STR_LIT:-c>', '<STR_LIT>', default=_DEFAULT_CONFIG_FILE,<EOL>show_default=True, help='<STR_LIT>')<EOL>def serve(destination, port, config):", "body": "if os.path.exists(destination):<EOL><INDENT>pass<EOL><DEDENT>elif os.path.exists(config):<EOL><INDENT>settings = read_settings(config)<EOL>destination = settings.get('<STR_LIT>')<EOL>if not os.path.exists(destination):<EOL><INDENT>sys.stderr.write(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(destination))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sys.stderr.write(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>.format(destination=destination, config=config))<EOL>sys.exit(<NUM_LIT:2>)<EOL><DEDENT>print('<STR_LIT>'.format(destination))<EOL>os.chdir(destination)<EOL>Handler = server.SimpleHTTPRequestHandler<EOL>httpd = socketserver.TCPServer((\"<STR_LIT>\", port), Handler, False)<EOL>print(\"<STR_LIT>\".format(port))<EOL>try:<EOL><INDENT>httpd.allow_reuse_address = True<EOL>httpd.server_bind()<EOL>httpd.server_activate()<EOL>httpd.serve_forever()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>", "docstring": "Run a simple web server.", "id": "f13459:m4"}
{"signature": "def create_settings(**kwargs):", "body": "settings = _DEFAULT_CONFIG.copy()<EOL>settings.update(kwargs)<EOL>return settings<EOL>", "docstring": "Create a new default setting copy and initialize it with kwargs.", "id": "f13460:m2"}
{"signature": "def read_settings(filename=None):", "body": "logger = logging.getLogger(__name__)<EOL>logger.info(\"<STR_LIT>\")<EOL>settings = _DEFAULT_CONFIG.copy()<EOL>if filename:<EOL><INDENT>logger.debug(\"<STR_LIT>\", filename)<EOL>settings_path = os.path.dirname(filename)<EOL>tempdict = {}<EOL>with open(filename) as f:<EOL><INDENT>code = compile(f.read(), filename, '<STR_LIT>')<EOL>exec(code, tempdict)<EOL><DEDENT>settings.update((k, v) for k, v in tempdict.items()<EOL>if k not in ['<STR_LIT>'])<EOL>paths = ['<STR_LIT:source>', '<STR_LIT>', '<STR_LIT>']<EOL>if os.path.isdir(join(settings_path, settings['<STR_LIT>'])) andos.path.isdir(join(settings_path, settings['<STR_LIT>'],<EOL>'<STR_LIT>')):<EOL><INDENT>paths.append('<STR_LIT>')<EOL><DEDENT>for p in paths:<EOL><INDENT>path = settings[p]<EOL>if path and not isabs(path):<EOL><INDENT>settings[p] = abspath(normpath(join(settings_path, path)))<EOL>logger.debug(\"<STR_LIT>\", p, path, settings[p])<EOL><DEDENT><DEDENT><DEDENT>for key in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>w, h = settings[key]<EOL>if h > w:<EOL><INDENT>settings[key] = (h, w)<EOL>logger.warning(\"<STR_LIT>\"<EOL>\"<STR_LIT>\", key)<EOL><DEDENT><DEDENT>if not settings['<STR_LIT>']:<EOL><INDENT>logger.info('<STR_LIT>')<EOL><DEDENT>logger.debug('<STR_LIT>', pformat(settings, width=<NUM_LIT>))<EOL>return settings<EOL>", "docstring": "Read settings from a config file in the source_dir root.", "id": "f13460:m1"}
{"signature": "def write(self, album):", "body": "page = self.template.render(**self.generate_context(album))<EOL>output_file = os.path.join(album.dst_path, album.output_file)<EOL>with open(output_file, '<STR_LIT:w>', encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>f.write(page)<EOL><DEDENT>", "docstring": "Generate the HTML page and save it.", "id": "f13461:c0:m2"}
{"signature": "def generate_context(self, album):", "body": "from . import __url__ as sigal_link<EOL>self.logger.info(\"<STR_LIT>\", album)<EOL>return {<EOL>'<STR_LIT>': album,<EOL>'<STR_LIT>': self.index_title,<EOL>'<STR_LIT>': self.settings,<EOL>'<STR_LIT>': sigal_link,<EOL>'<STR_LIT>': {'<STR_LIT:name>': os.path.basename(self.theme),<EOL>'<STR_LIT:url>': url_from_path(os.path.relpath(self.theme_path,<EOL>album.dst_path))},<EOL>}<EOL>", "docstring": "Generate the context dict for the given path.", "id": "f13461:c0:m1"}
{"signature": "def url_from_path(path):", "body": "if os.sep != '<STR_LIT:/>':<EOL><INDENT>path = '<STR_LIT:/>'.join(path.split(os.sep))<EOL><DEDENT>return quote(path)<EOL>", "docstring": "Transform path to url, converting backslashes to slashes if needed.", "id": "f13462:m2"}
{"signature": "def is_valid_html5_video(ext):", "body": "return ext in VIDEO_MIMES.keys()<EOL>", "docstring": "Checks if ext is a supported HTML5 video.", "id": "f13462:m4"}
{"signature": "def check_subprocess(cmd, source, outname):", "body": "logger = logging.getLogger(__name__)<EOL>try:<EOL><INDENT>res = subprocess.run(cmd, stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>logger.debug('<STR_LIT>', outname)<EOL>if os.path.isfile(outname):<EOL><INDENT>os.remove(outname)<EOL><DEDENT>raise<EOL><DEDENT>if res.returncode:<EOL><INDENT>logger.debug('<STR_LIT>', res.stdout.decode('<STR_LIT:utf8>'))<EOL>logger.debug('<STR_LIT>', res.stderr.decode('<STR_LIT:utf8>'))<EOL>if os.path.isfile(outname):<EOL><INDENT>logger.debug('<STR_LIT>', outname)<EOL>os.remove(outname)<EOL><DEDENT>raise SubprocessException('<STR_LIT>' + source)<EOL><DEDENT>", "docstring": "Run the command to resize the video and remove the output file if the\n    processing fails.", "id": "f13463:m0"}
{"signature": "def video_size(source, converter='<STR_LIT>'):", "body": "res = subprocess.run([converter, '<STR_LIT>', source], stderr=subprocess.PIPE)<EOL>stderr = res.stderr.decode('<STR_LIT:utf8>')<EOL>pattern = re.compile(r'<STR_LIT>')<EOL>match = pattern.search(stderr)<EOL>rot_pattern = re.compile(r'<STR_LIT>')<EOL>rot_match = rot_pattern.search(stderr)<EOL>if match:<EOL><INDENT>x, y = int(match.groups()[<NUM_LIT:0>]), int(match.groups()[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>x = y = <NUM_LIT:0><EOL><DEDENT>if rot_match:<EOL><INDENT>x, y = y, x<EOL><DEDENT>return x, y<EOL>", "docstring": "Returns the dimensions of the video.", "id": "f13463:m1"}
{"signature": "def generate_thumbnail(source, outname, box, delay, fit=True, options=None,<EOL>converter='<STR_LIT>'):", "body": "logger = logging.getLogger(__name__)<EOL>tmpfile = outname + \"<STR_LIT>\"<EOL>cmd = [converter, '<STR_LIT>', source, '<STR_LIT>', '<STR_LIT>', '<STR_LIT:1>',<EOL>'<STR_LIT>', delay, '<STR_LIT>', '<STR_LIT:1>', '<STR_LIT>', tmpfile]<EOL>logger.debug('<STR_LIT>', '<STR_LIT:U+0020>'.join(cmd))<EOL>check_subprocess(cmd, source, outname)<EOL>image.generate_thumbnail(tmpfile, outname, box, fit=fit, options=options)<EOL>os.unlink(tmpfile)<EOL>", "docstring": "Create a thumbnail image for the video source, based on ffmpeg.", "id": "f13463:m3"}
{"signature": "@property<EOL><INDENT>def images(self):<DEDENT>", "body": "for media in self.medias:<EOL><INDENT>if media.type == '<STR_LIT:image>':<EOL><INDENT>yield media<EOL><DEDENT><DEDENT>", "docstring": "List of images (:class:`~sigal.gallery.Image`).", "id": "f13464:c3:m9"}
{"signature": "@property<EOL><INDENT>def videos(self):<DEDENT>", "body": "for media in self.medias:<EOL><INDENT>if media.type == '<STR_LIT>':<EOL><INDENT>yield media<EOL><DEDENT><DEDENT>", "docstring": "List of videos (:class:`~sigal.gallery.Video`).", "id": "f13464:c3:m10"}
{"signature": "@property<EOL><INDENT>def thumbnail(self):<DEDENT>", "body": "if self._thumbnail:<EOL><INDENT>return self._thumbnail<EOL><DEDENT>thumbnail = self.meta.get('<STR_LIT>', ['<STR_LIT>'])[<NUM_LIT:0>]<EOL>if thumbnail and isfile(join(self.src_path, thumbnail)):<EOL><INDENT>self._thumbnail = url_from_path(join(<EOL>self.name, get_thumb(self.settings, thumbnail)))<EOL>self.logger.debug(\"<STR_LIT>\", self, self._thumbnail)<EOL>return self._thumbnail<EOL><DEDENT>else:<EOL><INDENT>for f in self.medias:<EOL><INDENT>ext = splitext(f.filename)[<NUM_LIT:1>]<EOL>if ext.lower() in self.settings['<STR_LIT>']:<EOL><INDENT>size = f.size<EOL>if size is None:<EOL><INDENT>size = get_size(f.src_path)<EOL><DEDENT>if size['<STR_LIT:width>'] > size['<STR_LIT>']:<EOL><INDENT>self._thumbnail = (url_quote(self.name) + '<STR_LIT:/>' +<EOL>f.thumbnail)<EOL>self.logger.debug(<EOL>\"<STR_LIT>\",<EOL>self, self._thumbnail)<EOL>return self._thumbnail<EOL><DEDENT><DEDENT><DEDENT>if not self._thumbnail and self.medias:<EOL><INDENT>for media in self.medias:<EOL><INDENT>if media.thumbnail is not None:<EOL><INDENT>self._thumbnail = (url_quote(self.name) + '<STR_LIT:/>' +<EOL>media.thumbnail)<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.logger.warning(\"<STR_LIT>\", self)<EOL>return None<EOL><DEDENT>self.logger.debug(\"<STR_LIT>\",<EOL>self, self._thumbnail)<EOL>return self._thumbnail<EOL><DEDENT>if not self._thumbnail:<EOL><INDENT>for path, album in self.gallery.get_albums(self.path):<EOL><INDENT>if album.thumbnail:<EOL><INDENT>self._thumbnail = (url_quote(self.name) + '<STR_LIT:/>' +<EOL>album.thumbnail)<EOL>self.logger.debug(<EOL>\"<STR_LIT>\",<EOL>self, self._thumbnail)<EOL>return self._thumbnail<EOL><DEDENT><DEDENT><DEDENT><DEDENT>self.logger.error('<STR_LIT>', self)<EOL>return None<EOL>", "docstring": "Path to the thumbnail of the album.", "id": "f13464:c3:m13"}
{"signature": "@property<EOL><INDENT>def albums(self):<DEDENT>", "body": "root_path = self.path if self.path != '<STR_LIT:.>' else '<STR_LIT>'<EOL>return [self.gallery.albums[join(root_path, path)]<EOL>for path in self.subdirs]<EOL>", "docstring": "List of :class:`~sigal.gallery.Album` objects for each\n        sub-directory.", "id": "f13464:c3:m11"}
{"signature": "@property<EOL><INDENT>def show_map(self):<DEDENT>", "body": "return any(image.has_location() for image in self.images)<EOL>", "docstring": "Check if we have at least one photo with GPS location in the album", "id": "f13464:c3:m16"}
{"signature": "def _get_metadata(self):", "body": "self.description = '<STR_LIT>'<EOL>self.meta = {}<EOL>self.title = '<STR_LIT>'<EOL>descfile = splitext(self.src_path)[<NUM_LIT:0>] + '<STR_LIT>'<EOL>if isfile(descfile):<EOL><INDENT>meta = read_markdown(descfile)<EOL>for key, val in meta.items():<EOL><INDENT>setattr(self, key, val)<EOL><DEDENT><DEDENT>", "docstring": "Get image metadata from filename.md: title, description, meta.", "id": "f13464:c0:m7"}
{"signature": "@property<EOL><INDENT>def big_url(self):<DEDENT>", "body": "if self.big is not None:<EOL><INDENT>return url_from_path(self.big)<EOL><DEDENT>", "docstring": "URL of the original media.", "id": "f13464:c0:m5"}
{"signature": "@property<EOL><INDENT>def breadcrumb(self):<DEDENT>", "body": "if self.path == '<STR_LIT:.>':<EOL><INDENT>return []<EOL><DEDENT>path = self.path<EOL>breadcrumb = [((self.url_ext or '<STR_LIT:.>'), self.title)]<EOL>while True:<EOL><INDENT>path = os.path.normpath(os.path.join(path, '<STR_LIT:..>'))<EOL>if path == '<STR_LIT:.>':<EOL><INDENT>break<EOL><DEDENT>url = (url_from_path(os.path.relpath(path, self.path)) + '<STR_LIT:/>' +<EOL>self.url_ext)<EOL>breadcrumb.append((url, self.gallery.albums[path].title))<EOL><DEDENT>breadcrumb.reverse()<EOL>return breadcrumb<EOL>", "docstring": "List of ``(url, title)`` tuples defining the current breadcrumb\n        path.", "id": "f13464:c3:m15"}
{"signature": "@property<EOL><INDENT>def big(self):<DEDENT>", "body": "if self.settings['<STR_LIT>']:<EOL><INDENT>s = self.settings<EOL>if s['<STR_LIT>']:<EOL><INDENT>return self.filename<EOL><DEDENT>orig_path = join(s['<STR_LIT>'], self.path, s['<STR_LIT>'])<EOL>check_or_create_dir(orig_path)<EOL>big_path = join(orig_path, self.src_filename)<EOL>if not isfile(big_path):<EOL><INDENT>copy(self.src_path, big_path, symlink=s['<STR_LIT>'],<EOL>rellink=self.settings['<STR_LIT>'])<EOL><DEDENT>return join(s['<STR_LIT>'], self.src_filename)<EOL><DEDENT>", "docstring": "Path to the original image, if ``keep_orig`` is set (relative to the\n        album directory). Copy the file if needed.", "id": "f13464:c0:m4"}
{"signature": "def displayOutdated(modules, dependency_specs, use_colours):", "body": "if use_colours:<EOL><INDENT>DIM    = colorama.Style.DIM       <EOL>NORMAL = colorama.Style.NORMAL    <EOL>BRIGHT = colorama.Style.BRIGHT    <EOL>YELLOW = colorama.Fore.YELLOW     <EOL>RED    = colorama.Fore.RED        <EOL>GREEN  = colorama.Fore.GREEN      <EOL>RESET  = colorama.Style.RESET_ALL <EOL><DEDENT>else:<EOL><INDENT>DIM = BRIGHT = YELLOW = RED = GREEN = RESET = u'<STR_LIT>'<EOL><DEDENT>status = <NUM_LIT:0><EOL>from yotta.lib import access<EOL>from yotta.lib import access_common<EOL>from yotta.lib import sourceparse<EOL>for name, m in modules.items():<EOL><INDENT>if m.isTestDependency():<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>latest_v = access.latestSuitableVersion(name, '<STR_LIT:*>', registry='<STR_LIT>', quiet=True)<EOL><DEDENT>except access_common.Unavailable as e:<EOL><INDENT>latest_v = None<EOL><DEDENT>if not m:<EOL><INDENT>m_version = u'<STR_LIT:U+0020>' + RESET + BRIGHT + RED + u\"<STR_LIT>\" + RESET<EOL><DEDENT>else:<EOL><INDENT>m_version = DIM + u'<STR_LIT>' % (m.version)<EOL><DEDENT>if not latest_v:<EOL><INDENT>print(u'<STR_LIT>' % (RED, name, m_version, NORMAL, RESET))<EOL>status = <NUM_LIT:2><EOL>continue<EOL><DEDENT>elif not m or m.version < latest_v:<EOL><INDENT>update_prevented_by = '<STR_LIT>'<EOL>if m:<EOL><INDENT>specs_preventing_update = [<EOL>x for x in dependency_specs<EOL>if x.name == name and not<EOL>sourceparse.parseSourceURL(x.nonShrinkwrappedVersionReq()).semanticSpecMatches(latest_v)<EOL>]<EOL>shrinkwrap_prevents_update = [<EOL>x for x in dependency_specs<EOL>if x.name == name and x.isShrinkwrapped() and not<EOL>sourceparse.parseSourceURL(x.versionReq()).semanticSpecMatches(latest_v)<EOL>]<EOL>if len(specs_preventing_update):<EOL><INDENT>update_prevented_by = '<STR_LIT>' % (<EOL>'<STR_LIT:U+002CU+0020>'.join(['<STR_LIT>' % (x.version_req, x.specifying_module) for x in specs_preventing_update])<EOL>)<EOL><DEDENT>if len(shrinkwrap_prevents_update):<EOL><INDENT>update_prevented_by += '<STR_LIT>'<EOL><DEDENT>if m.version.major() < latest_v.major():<EOL><INDENT>colour = GREEN<EOL><DEDENT>elif m.version.minor() < latest_v.minor():<EOL><INDENT>colour = YELLOW<EOL><DEDENT>else:<EOL><INDENT>colour = RED<EOL><DEDENT><DEDENT>else:<EOL><INDENT>colour = RED<EOL><DEDENT>print(u'<STR_LIT>' % (name, m_version, RESET, colour, latest_v.version, update_prevented_by, RESET))<EOL>if not status:<EOL><INDENT>status = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return status<EOL>", "docstring": "print information about outdated modules,\n        return 0 if there is nothing to be done and nonzero otherwise", "id": "f13517:m2"}
{"signature": "def isValidSpec(spec_or_source_url):", "body": "try:<EOL><INDENT>parseSourceURL(spec_or_source_url)<EOL>return True<EOL><DEDENT>except InvalidVersionSpec:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Check if the specified version source URL (or version spec), can be\n        parsed successfully.", "id": "f13533:m2"}
{"signature": "def parseSourceURL(source_url):", "body": "name, spec = _getNonRegistryRef(source_url)<EOL>if spec:<EOL><INDENT>return spec<EOL><DEDENT>try:<EOL><INDENT>url_is_spec = version.Spec(source_url)<EOL><DEDENT>except ValueError:<EOL><INDENT>url_is_spec = None<EOL><DEDENT>if url_is_spec is not None:<EOL><INDENT>return VersionSource('<STR_LIT>', '<STR_LIT>', source_url)<EOL><DEDENT>raise InvalidVersionSpec(\"<STR_LIT>\" % (source_url))<EOL>", "docstring": "Parse the specified version source URL (or version spec), and return an\n        instance of VersionSource", "id": "f13533:m1"}
{"signature": "def __init__(self, version_string, url=None):", "body": "super(Version, self).__init__()<EOL>self.url = url<EOL>version_string = str(version_string.strip())<EOL>self.version = None<EOL>if version_string.startswith('<STR_LIT:v>') or version_string.startswith('<STR_LIT:=>'):<EOL><INDENT>self.version = semantic_version.Version(version_string[<NUM_LIT:1>:], partial=False)<EOL><DEDENT>elif not version_string:<EOL><INDENT>self.version = TipVersion()<EOL><DEDENT>else:<EOL><INDENT>self.version = semantic_version.Version(version_string, partial=False)<EOL><DEDENT>self.url = url<EOL>", "docstring": "Wrap the semantic_version Version class so that we can represent\n            'tip' versions as well as specific versions, and store an optional\n            URL that can represent the location from which we can retrieve this\n            version.\n\n            Also add some useful methods for manipulating versions.", "id": "f13534:c1:m0"}
{"signature": "def which(program):", "body": "<EOL>if os.path.split(program)[<NUM_LIT:0>]:<EOL><INDENT>if os.path.exists(program) and os.access(program, os.X_OK):<EOL><INDENT>return program<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for path in os.environ['<STR_LIT>'].split(os.pathsep):<EOL><INDENT>progpath = os.path.join(path, program)<EOL>if os.path.exists(progpath) and os.access(progpath, os.X_OK):<EOL><INDENT>return progpath<EOL><DEDENT><DEDENT><DEDENT>return None<EOL>", "docstring": "look for \"program\" in PATH, and return the path to it, or None if it\n        was not found", "id": "f13539:m8"}
{"signature": "def dropRootPrivs(fn):", "body": "def wrapped_fn(*args, **kwargs):<EOL><INDENT>q = multiprocessing.Queue()<EOL>p = multiprocessing.Process(target=_dropPrivsReturnViaQueue, args=(q, fn, args, kwargs))<EOL>p.start()<EOL>r = None<EOL>e = None<EOL>while True:<EOL><INDENT>msg = q.get()<EOL>if msg[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>r = msg[<NUM_LIT:1>]<EOL><DEDENT>if msg[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>e = msg[<NUM_LIT:1>](msg[<NUM_LIT:2>])<EOL><DEDENT>if msg[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>if e is not None:<EOL><INDENT>raise e <EOL><DEDENT>return r<EOL><DEDENT><DEDENT><DEDENT>return wrapped_fn<EOL>", "docstring": "decorator to drop su/sudo privilages before running a function on\n        unix/linux.\n        The *real* uid is modified, so privileges are permanently dropped for\n        the process. (i.e. make sure you don't need to do\n\n        If there is a SUDO_UID environment variable, then we drop to that,\n        otherwise we drop to nobody.", "id": "f13539:m2"}
{"signature": "def satisfyVersionByInstalling(name, version_required, working_directory, type='<STR_LIT>', inherit_shrinkwrap=None):", "body": "v = latestSuitableVersion(name, version_required, _registryNamespaceForType(type))<EOL>install_into = os.path.join(working_directory, name)<EOL>return _satisfyVersionByInstallingVersion(<EOL>name, version_required, install_into, v, type=type, inherit_shrinkwrap = inherit_shrinkwrap<EOL>)<EOL>", "docstring": "installs and returns a Component/Target for the specified name+version\n        requirement, into a subdirectory of `working_directory'", "id": "f13541:m8"}
{"signature": "def _satisfyVersionByInstallingVersion(name, version_required, working_directory, version, type='<STR_LIT>', inherit_shrinkwrap=None):", "body": "assert(version)<EOL>logger.info('<STR_LIT>', version)<EOL>version.unpackInto(working_directory)<EOL>r = _clsForType(type)(working_directory, inherit_shrinkwrap = inherit_shrinkwrap)<EOL>if not r:<EOL><INDENT>raise Exception(<EOL>'<STR_LIT>' % (name, version_required, type)<EOL>)<EOL><DEDENT>if name != r.getName():<EOL><INDENT>raise Exception('<STR_LIT>' % (<EOL>type, name, version_required, r.getName()<EOL>))<EOL><DEDENT>r.runScript('<STR_LIT>')<EOL>return r<EOL>", "docstring": "installs and returns a Component/Target for the specified version requirement into\n        'working_directory' using the provided remote version object.\n        This function is not normally called via `satisfyVersionByInstalling',\n        which looks up a suitable remote version object.", "id": "f13541:m9"}
{"signature": "def satisfyVersionFromSearchPaths(name, version_required, search_paths, update=False, type='<STR_LIT>', inherit_shrinkwrap=None):", "body": "<EOL>from yotta.lib import pack<EOL>v = None<EOL>try:<EOL><INDENT>sv = sourceparse.parseSourceURL(version_required)<EOL><DEDENT>except ValueError as e:<EOL><INDENT>logging.error(e)<EOL>return None<EOL><DEDENT>try:<EOL><INDENT>local_version = searchPathsFor(<EOL>name,<EOL>sv.semanticSpec(),<EOL>search_paths,<EOL>type,<EOL>inherit_shrinkwrap = inherit_shrinkwrap<EOL>)<EOL><DEDENT>except pack.InvalidDescription as e:<EOL><INDENT>logger.error(e)<EOL>return None<EOL><DEDENT>logger.debug(\"<STR_LIT>\" % (('<STR_LIT>', '<STR_LIT>')[not local_version], name))<EOL>if local_version:<EOL><INDENT>if update and not local_version.installedLinked():<EOL><INDENT>v = latestSuitableVersion(name, version_required, registry=_registryNamespaceForType(type))<EOL>if local_version:<EOL><INDENT>local_version.setLatestAvailable(v)<EOL><DEDENT><DEDENT>if local_version.installedLinked() or not local_version.outdated():<EOL><INDENT>logger.debug(\"<STR_LIT>\" % local_version.path)<EOL>if name != local_version.getName():<EOL><INDENT>raise Exception('<STR_LIT>' % (<EOL>local_version.getName(), name, local_version.path<EOL>))<EOL><DEDENT>return local_version<EOL><DEDENT>logger.info('<STR_LIT>' % (<EOL>name,<EOL>local_version.getVersion(),<EOL>v<EOL>))<EOL>fsutils.rmRf(local_version.path)<EOL>return _satisfyVersionByInstallingVersion(<EOL>name, version_required, local_version.path, v, type=type, inherit_shrinkwrap=inherit_shrinkwrap<EOL>)<EOL><DEDENT>return None<EOL>", "docstring": "returns a Component/Target for the specified version, if found in the\n        list of search paths. If `update' is True, then also check for newer\n        versions of the found component, and update it in-place (unless it was\n        installed via a symlink).", "id": "f13541:m7"}
{"signature": "@_swallowRequestExceptions(fail_return=None)<EOL>@_retryConnectionErrors<EOL>@_friendlyAuthError<EOL>@_handleAuth<EOL>def listOwners(namespace, name, registry=None):", "body": "registry = registry or Registry_Base_URL<EOL>url = '<STR_LIT>' % (<EOL>registry,<EOL>namespace,<EOL>name<EOL>)<EOL>request_headers = _headersForRegistry(registry)<EOL>response = requests.get(url, headers=request_headers)<EOL>if response.status_code == <NUM_LIT>:<EOL><INDENT>logger.error('<STR_LIT>' % (namespace[:-<NUM_LIT:1>], name))<EOL>return None<EOL><DEDENT>response.raise_for_status()<EOL>return ordered_json.loads(response.text)<EOL>", "docstring": "List the owners of a module or target (owners are the people with\n        permission to publish versions and add/remove the owners).", "id": "f13542:m25"}
{"signature": "def getPublicKey(registry=None):", "body": "registry = registry or Registry_Base_URL<EOL>pubkey_pem = None<EOL>if _isPublicRegistry(registry):<EOL><INDENT>pubkey_pem = settings.getProperty('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>for s in _getSources():<EOL><INDENT>if _sourceMatches(s, registry):<EOL><INDENT>if '<STR_LIT>' in s and s['<STR_LIT>'] and '<STR_LIT>' in s['<STR_LIT>']:<EOL><INDENT>pubkey_pem = s['<STR_LIT>']['<STR_LIT>']<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if not pubkey_pem:<EOL><INDENT>pubkey_pem, privatekey_pem = _generateAndSaveKeys()<EOL><DEDENT>else:<EOL><INDENT>pubkey_pem = pubkey_pem.encode('<STR_LIT:ascii>')<EOL><DEDENT>if b'<STR_LIT>' in pubkey_pem:<EOL><INDENT>pubkey = serialization.load_pem_public_key(pubkey_pem, default_backend())<EOL><DEDENT>else:<EOL><INDENT>pubkey_der = binascii.unhexlify(pubkey_pem)<EOL>pubkey = serialization.load_der_public_key(pubkey_der, default_backend())<EOL><DEDENT>return _pubkeyWireFormat(pubkey)<EOL>", "docstring": "Return the user's public key (generating and saving a new key pair if necessary)", "id": "f13542:m32"}
{"signature": "def availableVersions(self):", "body": "return _listVersions(self.namespace, self.name)<EOL>", "docstring": "return a list of Version objects, each able to retrieve a tarball", "id": "f13542:c2:m3"}
{"signature": "@_swallowRequestExceptions(fail_return=\"<STR_LIT>\")<EOL>@_retryConnectionErrors<EOL>@_friendlyAuthError<EOL>@_handleAuth<EOL>def publish(namespace, name, version, description_file, tar_file, readme_file,<EOL>readme_file_ext, registry=None):", "body": "registry = registry or Registry_Base_URL<EOL>url = '<STR_LIT>' % (<EOL>registry,<EOL>namespace,<EOL>name,<EOL>version<EOL>)<EOL>if readme_file_ext == '<STR_LIT>':<EOL><INDENT>readme_section_name = '<STR_LIT>'<EOL><DEDENT>elif readme_file_ext == '<STR_LIT>':<EOL><INDENT>readme_section_name = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % readme_file_ext)<EOL><DEDENT>body = OrderedDict([('<STR_LIT>', (None, description_file.read(),'<STR_LIT:application/json>')),<EOL>('<STR_LIT>',('<STR_LIT>', tar_file)),<EOL>(readme_section_name, (readme_section_name, readme_file))])<EOL>headers = _headersForRegistry(registry)<EOL>response = requests.put(url, headers=headers, files=body)<EOL>response.raise_for_status()<EOL>return None<EOL>", "docstring": "Publish a tarblob to the registry, if the request fails, an exception\n        is raised, which either triggers re-authentication, or is turned into a\n        return value by the decorators. (If successful, the decorated function\n        returns None)", "id": "f13542:m23"}
{"signature": "def setAPIKey(registry, api_key):", "body": "if (registry is None) or (registry == Registry_Base_URL):<EOL><INDENT>return<EOL><DEDENT>sources = _getSources()<EOL>source = None<EOL>for s in sources:<EOL><INDENT>if _sourceMatches(s, registry):<EOL><INDENT>source = s<EOL><DEDENT><DEDENT>if source is None:<EOL><INDENT>source = {<EOL>'<STR_LIT:type>':'<STR_LIT>',<EOL>'<STR_LIT:url>':registry,<EOL>}<EOL>sources.append(source)<EOL><DEDENT>source['<STR_LIT>'] = api_key<EOL>settings.set('<STR_LIT>', sources)<EOL>", "docstring": "Set the api key for accessing a registry. This is only necessary for\n        development/test registries.", "id": "f13542:m31"}
{"signature": "def _raiseUnavailableFor401(message):", "body": "def __raiseUnavailableFor401(fn):<EOL><INDENT>def wrapped(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>return fn(*args, **kwargs)<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>if e.response.status_code == requests.codes.unauthorized:<EOL><INDENT>raise access_common.Unavailable(message)<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>return wrapped<EOL><DEDENT>return __raiseUnavailableFor401<EOL>", "docstring": "Returns a decorator to swallow a requests exception for modules that\n        are not accessible without logging in, and turn it into an Unavailable\n        exception.", "id": "f13542:m7"}
{"signature": "@_swallowRequestExceptions(fail_return=None)<EOL>@_retryConnectionErrors<EOL>@_friendlyAuthError<EOL>@_handleAuth<EOL>def addOwner(namespace, name, owner, registry=None):", "body": "registry = registry or Registry_Base_URL<EOL>url = '<STR_LIT>' % (<EOL>registry,<EOL>namespace,<EOL>name,<EOL>owner<EOL>)<EOL>request_headers = _headersForRegistry(registry)<EOL>response = requests.put(url, headers=request_headers)<EOL>if response.status_code == <NUM_LIT>:<EOL><INDENT>logger.error('<STR_LIT>' % (namespace[:-<NUM_LIT:1>], name))<EOL>return<EOL><DEDENT>response.raise_for_status()<EOL>return True<EOL>", "docstring": "Add an owner for a module or target (owners are the people with\n        permission to publish versions and add/remove the owners).", "id": "f13542:m26"}
{"signature": "@_swallowRequestExceptions(fail_return=\"<STR_LIT>\")<EOL>@_retryConnectionErrors<EOL>@_friendlyAuthError<EOL>@_handleAuth<EOL>def unpublish(namespace, name, version, registry=None):", "body": "registry = registry or Registry_Base_URL<EOL>url = '<STR_LIT>' % (<EOL>registry,<EOL>namespace,<EOL>name,<EOL>version<EOL>)<EOL>headers = _headersForRegistry(registry)<EOL>response = requests.delete(url, headers=headers)<EOL>response.raise_for_status()<EOL>return None<EOL>", "docstring": "Try to unpublish a recently published version. Return any errors that\n        occur.", "id": "f13542:m24"}
{"signature": "def _handleAuth(fn):", "body": "@functools.wraps(fn)<EOL>def wrapped(*args, **kwargs):<EOL><INDENT>from yotta.lib import auth<EOL>interactive = globalconf.get('<STR_LIT>')<EOL>try:<EOL><INDENT>return fn(*args, **kwargs)<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>if e.response.status_code == requests.codes.unauthorized: <EOL><INDENT>logger.debug('<STR_LIT>', fn)<EOL>auth.authorizeUser(provider=None, interactive=interactive)<EOL>if interactive:<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>return fn(*args, **kwargs)<EOL><DEDENT><DEDENT>raise<EOL><DEDENT><DEDENT>return wrapped<EOL>", "docstring": "Decorator to re-try API calls after asking the user for authentication.", "id": "f13542:m5"}
{"signature": "def _loadConfig(self):", "body": "config_dicts = [self.additional_config, self.app_config] + [t.getConfig() for t in self.hierarchy]<EOL>config_blame = [<EOL>_mirrorStructure(self.additional_config, '<STR_LIT>'),<EOL>_mirrorStructure(self.app_config, '<STR_LIT>'),<EOL>] + [<EOL>_mirrorStructure(t.getConfig(), t.getName()) for t in self.hierarchy<EOL>]<EOL>self.config = _mergeDictionaries(*config_dicts)<EOL>self.config_blame = _mergeDictionaries(*config_blame)<EOL>", "docstring": "load the configuration information from the target hierarchy", "id": "f13543:c1:m4"}
{"signature": "def getScript(self, scriptname):", "body": "for t in self.hierarchy:<EOL><INDENT>s = t.getScript(scriptname)<EOL>if s:<EOL><INDENT>return s<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "return the specified script if one exists (possibly inherited from\n            a base target)", "id": "f13543:c1:m3"}
{"signature": "def exec_helper(self, cmd, builddir):", "body": "try:<EOL><INDENT>child = subprocess.Popen(cmd, cwd=builddir)<EOL>child.wait()<EOL><DEDENT>except OSError as e:<EOL><INDENT>if e.errno == errno.ENOENT:<EOL><INDENT>if cmd[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>' % (cmd[<NUM_LIT:0>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return '<STR_LIT>' % (cmd)<EOL><DEDENT><DEDENT>if child.returncode:<EOL><INDENT>return '<STR_LIT>' % (cmd)<EOL><DEDENT>", "docstring": "Execute the given command, returning an error message if an error occured\n            or None if the command was succesful.", "id": "f13543:c1:m17"}
{"signature": "def debug(self, builddir, program):", "body": "try:<EOL><INDENT>signal.signal(signal.SIGINT, _ignoreSignal);<EOL>if self.getScript('<STR_LIT>') is not None:<EOL><INDENT>return self._debugWithScript(builddir, program)<EOL><DEDENT>elif '<STR_LIT>' in self.description:<EOL><INDENT>logger.warning(<EOL>'<STR_LIT>'+<EOL>'<STR_LIT>', self.getName()<EOL>)<EOL>return self._debugDeprecated(builddir, program)<EOL><DEDENT>else:<EOL><INDENT>return \"<STR_LIT>\" % self<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>signal.signal(signal.SIGINT, signal.SIG_DFL);<EOL><DEDENT>", "docstring": "Launch a debugger for the specified program. Uses the `debug`\n            script if specified by the target, falls back to the `debug` and\n            `debugServer` commands if not. `program` is inserted into the\n            $program variable in commands.", "id": "f13543:c1:m22"}
{"signature": "def __init__(self, leaf_target, base_targets, app_config, additional_config):", "body": "<EOL>super(DerivedTarget, self).__init__(<EOL>path = leaf_target.path,<EOL>installed_linked = leaf_target.installed_linked,<EOL>latest_suitable_version = leaf_target.latest_suitable_version<EOL>)<EOL>self.hierarchy = [leaf_target] + base_targets[:]<EOL>self.config = None<EOL>self.config_blame = None<EOL>self.app_config = app_config<EOL>self.additional_config = additional_config or {}<EOL>", "docstring": "Initialise a DerivedTarget (representing an inheritance hierarchy of\n            Targets.), given the most-derived Target description, and a set of\n            available Targets to compose the rest of the lineage from.\n\n            DerivedTarget provides build & debug commands, and access to the\n            derived target config info (merged with the application config\n            info from config.json, if any).\n\n            It's possible to update the application config for an existing\n            DerivedTarget instance.\n\n            DerivedTarget can also be used as a stand-in for the most-derived\n            (leaf) target in the inheritance hierarchy.", "id": "f13543:c1:m0"}
{"signature": "@fsutils.dropRootPrivs<EOL><INDENT>def build(self, builddir, component, args, release_build=False, build_args=None, targets=None,<EOL>release_no_debug_info_build=False):<DEDENT>", "body": "if build_args is None:<EOL><INDENT>build_args = []<EOL><DEDENT>if targets is None:<EOL><INDENT>targets = []<EOL><DEDENT>if release_no_debug_info_build:<EOL><INDENT>build_type = '<STR_LIT>'<EOL><DEDENT>elif release_build:<EOL><INDENT>build_type = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>build_type = '<STR_LIT>'<EOL><DEDENT>cmd = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>' % build_type, '<STR_LIT>', args.cmake_generator, '<STR_LIT:.>']<EOL>res = self.exec_helper(cmd, builddir)<EOL>if res is not None:<EOL><INDENT>return res<EOL><DEDENT>from yotta.lib import cmake_fixups<EOL>cmake_fixups.applyFixupsForFenerator(args.cmake_generator, builddir, component)<EOL>build_command = self.overrideBuildCommand(args.cmake_generator, targets=targets)<EOL>if build_command:<EOL><INDENT>cmd = build_command + build_args<EOL><DEDENT>else:<EOL><INDENT>cmd = ['<STR_LIT>', '<STR_LIT>', builddir]<EOL>if len(targets):<EOL><INDENT>cmd += ['<STR_LIT>', targets[<NUM_LIT:0>]]<EOL><DEDENT>cmd += build_args<EOL><DEDENT>res = self.exec_helper(cmd, builddir)<EOL>if res is not None:<EOL><INDENT>return res<EOL><DEDENT>hint = self.hintForCMakeGenerator(args.cmake_generator, component)<EOL>if hint:<EOL><INDENT>logger.info(hint)<EOL><DEDENT>", "docstring": "Execute the commands necessary to build this component, and all of\n            its dependencies.", "id": "f13543:c1:m18"}
{"signature": "def getAdditionalIncludes(self):", "body": "return reversed([<EOL>os.path.join(t.path, include_file)<EOL>for t in self.hierarchy<EOL>for include_file in t.description.get('<STR_LIT>', [])<EOL>])<EOL>", "docstring": "Return the list of cmake files which are to be included by yotta in\n            every module built. The list is returned in the order they should\n            be included (most-derived last).", "id": "f13543:c1:m11"}
{"signature": "def availableTags(self):", "body": "return [<EOL>GithubComponentVersion(<EOL>'<STR_LIT>', t[<NUM_LIT:0>], t[<NUM_LIT:1>], self.name, cache_key=_createCacheKey('<STR_LIT>', t[<NUM_LIT:0>], t[<NUM_LIT:1>], self.name)<EOL>) for t in self._getTags()<EOL>]<EOL>", "docstring": "return a list of GithubComponentVersion objects for all tags", "id": "f13545:c1:m6"}
{"signature": "@classmethod<EOL><INDENT>def createFromSource(cls, vs, name=None):<DEDENT>", "body": "return GithubComponent(vs.location, vs.spec, vs.semantic_spec, name)<EOL>", "docstring": "returns a github component for any github url (including\n            git+ssh:// git+http:// etc. or None if this is not a Github URL.\n            For all of these we use the github api to grab a tarball, because\n            that's faster.\n\n            Normally version will be empty, unless the original url was of the\n            form: 'owner/repo @version' or 'url://...#version', which can be used\n            to grab a particular tagged version.\n\n            (Note that for github components we ignore the component name - it\n             doesn't have to match the github module name)", "id": "f13545:c1:m1"}
{"signature": "def availableBranches(self):", "body": "return [<EOL>GithubComponentVersion(<EOL>'<STR_LIT>', b[<NUM_LIT:0>], b[<NUM_LIT:1>], self.name, cache_key=None<EOL>) for b in _getBranchHeads(self.repo).items()<EOL>]<EOL>", "docstring": "return a list of GithubComponentVersion objects for the tip of each branch", "id": "f13545:c1:m7"}
{"signature": "@_handleAuth<EOL>def _getTarball(url, into_directory, cache_key, origin_info=None):", "body": "try:<EOL><INDENT>access_common.unpackFromCache(cache_key, into_directory)<EOL><DEDENT>except KeyError as e:<EOL><INDENT>tok = settings.getProperty('<STR_LIT>', '<STR_LIT>')<EOL>headers = {}<EOL>if tok is not None:<EOL><INDENT>headers['<STR_LIT>'] = '<STR_LIT>' + str(tok)<EOL><DEDENT>logger.debug('<STR_LIT>', url)<EOL>response = requests.get(url, allow_redirects=True, stream=True, headers=headers)<EOL>response.raise_for_status()<EOL>logger.debug('<STR_LIT>', url)<EOL>logger.debug('<STR_LIT>', response.headers)<EOL>response.raise_for_status()<EOL>access_common.unpackTarballStream(<EOL>stream = response,<EOL>into_directory = into_directory,<EOL>hash = {},<EOL>cache_key = cache_key,<EOL>origin_info = origin_info<EOL>)<EOL><DEDENT>", "docstring": "unpack the specified tarball url into the specified directory", "id": "f13545:m8"}
{"signature": "@_handleAuth<EOL>def _getTags(repo):", "body": "logger.debug('<STR_LIT>', repo)<EOL>g = Github(settings.getProperty('<STR_LIT>', '<STR_LIT>'))<EOL>repo = g.get_repo(repo)<EOL>tags = repo.get_tags()<EOL>logger.debug('<STR_LIT>', repo, [t.name for t in tags])<EOL>return {t.name: _ensureDomainPrefixed(t.tarball_url) for t in tags}<EOL>", "docstring": "return a dictionary of {tag: tarball_url}", "id": "f13545:m3"}
{"signature": "def _handleAuth(fn):", "body": "@functools.wraps(fn)<EOL>def wrapped(*args, **kwargs):<EOL><INDENT>interactive = globalconf.get('<STR_LIT>')<EOL>def retryWithAuthOrRaise(original_exception):<EOL><INDENT>auth.authorizeUser(provider='<STR_LIT>', interactive=interactive)<EOL>if not interactive:<EOL><INDENT>raise original_exception<EOL><DEDENT>else:<EOL><INDENT>logger.debug('<STR_LIT>', settings.getProperty('<STR_LIT>', '<STR_LIT>'))<EOL>return fn(*args, **kwargs)<EOL><DEDENT><DEDENT>def handleRateLimitExceeded(original_exception):<EOL><INDENT>if not _userAuthedWithGithub():<EOL><INDENT>logger.warning('<STR_LIT>')<EOL>return retryWithAuthOrRaise(original_exception)<EOL><DEDENT>else:<EOL><INDENT>raise original_exception<EOL><DEDENT><DEDENT>try:<EOL><INDENT>return fn(*args, **kwargs)<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>if e.response.status_code == <NUM_LIT>:<EOL><INDENT>return handleRateLimitExceeded(e)<EOL><DEDENT>if e.response.status_code == <NUM_LIT>:<EOL><INDENT>return retryWithAuthOrRaise(e)<EOL><DEDENT>raise<EOL><DEDENT>except github.BadCredentialsException as e:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return retryWithAuthOrRaise(e)<EOL><DEDENT>except github.UnknownObjectException as e:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>if not _userAuthedWithGithub():<EOL><INDENT>logger.info('<STR_LIT>')<EOL>return retryWithAuthOrRaise(e)<EOL><DEDENT>raise<EOL><DEDENT>except github.RateLimitExceededException as e:<EOL><INDENT>return handleRateLimitExceeded(e)<EOL><DEDENT>except github.GithubException as e:<EOL><INDENT>if e.status == <NUM_LIT>:<EOL><INDENT>return handleRateLimitExceeded(e)<EOL><DEDENT>raise<EOL><DEDENT><DEDENT>return wrapped<EOL>", "docstring": "Decorator to re-try API calls after asking the user for authentication.", "id": "f13545:m2"}
{"signature": "def licenses(self):", "body": "if '<STR_LIT>' in self.description:<EOL><INDENT>return [self.description['<STR_LIT>']]<EOL><DEDENT>else:<EOL><INDENT>return [x['<STR_LIT:type>'] for x in self.description['<STR_LIT>']]<EOL><DEDENT>", "docstring": "Return a list of licenses that apply to this module. (Strings,\n            which may be SPDX identifiers)", "id": "f13549:c0:m18"}
{"signature": "def getLibs(self, explicit_only=False):", "body": "if '<STR_LIT>' in self.description:<EOL><INDENT>return {os.path.normpath(self.description['<STR_LIT>']): self.getName()}<EOL><DEDENT>elif '<STR_LIT>' not in self.description and not explicit_only:<EOL><INDENT>return {'<STR_LIT:source>': self.getName()}<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT>", "docstring": "Return a dictionary of libraries to compile: {\"dirname\":\"libname\"},\n            this is used when automatically generating CMakeLists.\n\n            If explicit_only is not set, then in the absence of both 'lib' and\n            'bin' sections in the module.json file, the \"source\" directory\n            will be returned.\n\n            Note that currently modules may define only a single executable\n            binary or library to be built by the automatic build system, by\n            specifying `\"bin\": \"dir-to-be-built-into-binary\"`, or `\"lib\":\n            \"dir-to-be-built-into-library\"`, and the bin/lib will always have\n            the same name as the module. The default behaviour if nothing is\n            specified is for the 'source' directory to be built into a library.\n\n            The module.json syntax may allow for other combinations in the\n            future (and callers of this function should not rely on it\n            returning only a single item). For example, a \"bin\": {\"dirname\":\n            \"exename\"} syntax might be supported, however currently more\n            complex builds must be controlled by custom CMakeLists.", "id": "f13549:c0:m17"}
{"signature": "def getBinaries(self):", "body": "<EOL>if '<STR_LIT>' in self.description:<EOL><INDENT>return {os.path.normpath(self.description['<STR_LIT>']): self.getName()}<EOL><DEDENT>else:<EOL><INDENT>return {}<EOL><DEDENT>", "docstring": "Return a dictionary of binaries to compile: {\"dirname\":\"exename\"},\n            this is used when automatically generating CMakeLists\n\n            Note that currently modules may define only a single executable\n            binary or library to be built by the automatic build system, by\n            specifying `\"bin\": \"dir-to-be-built-into-binary\"`, or `\"lib\":\n            \"dir-to-be-built-into-library\"`, and the bin/lib will always have\n            the same name as the module. The default behaviour if nothing is\n            specified is for the 'source' directory to be built into a library.\n\n            The module.json syntax may allow for other combinations in the\n            future (and callers of this function should not rely on it\n            returning only a single item). For example, a \"bin\": {\"dirname\":\n            \"exename\"} syntax might be supported, however currently more\n            complex builds must be controlled by custom CMakeLists.", "id": "f13549:c0:m16"}
{"signature": "def isApplication(self):", "body": "return bool(len(self.getBinaries()))<EOL>", "docstring": "Return true if this module is an application instead of a reusable\n            library", "id": "f13549:c0:m15"}
{"signature": "def installedDependencies(self):", "body": "return self.installed_dependencies<EOL>", "docstring": "Return true if satisfyDependencies has been called.\n\n            Note that this is slightly different to when all of the\n            dependencies are actually satisfied, but can be used as if it means\n            that.", "id": "f13549:c0:m14"}
{"signature": "def getTarget(self, target_name_and_version, additional_config=None):", "body": "derived_target, errors = self.satisfyTarget(<EOL>target_name_and_version,<EOL>additional_config = additional_config,<EOL>install_missing = False<EOL>)<EOL>if len(errors):<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return derived_target<EOL><DEDENT>", "docstring": "Return a derived target object representing the selected target: if\n            the target is not installed, or is invalid then the returned object\n            will test false in a boolean context.\n\n            Returns derived_target\n\n            Errors are not displayed.", "id": "f13549:c0:m13"}
{"signature": "def satisfyTarget(self, target_name_and_version, update_installed=False, additional_config=None, install_missing=True):", "body": "<EOL>from yotta.lib import target<EOL>application_dir = None<EOL>if self.isApplication():<EOL><INDENT>application_dir = self.path<EOL><DEDENT>return target.getDerivedTarget(<EOL>target_name_and_version,<EOL>self.targetsPath(),<EOL>install_missing = install_missing,<EOL>application_dir = application_dir,<EOL>update_installed = update_installed,<EOL>additional_config = additional_config,<EOL>shrinkwrap = self.getShrinkwrap()<EOL>)<EOL>", "docstring": "Ensure that the specified target name (and optionally version,\n            github ref or URL) is installed in the targets directory of the\n            current component\n\n            returns (derived_target, errors)", "id": "f13549:c0:m12"}
{"signature": "def getDependencySpecs(self, target=None):", "body": "deps = []<EOL>def specForDependency(name, version_spec, istest):<EOL><INDENT>shrinkwrap = self.getShrinkwrapMapping()<EOL>shrinkwrap_version_req = None<EOL>if name in shrinkwrap:<EOL><INDENT>shrinkwrap_version_req = shrinkwrap[name]<EOL>logger.debug(<EOL>'<STR_LIT>', self.getName(), shrinkwrap_version_req, name<EOL>)<EOL><DEDENT>return pack.DependencySpec(<EOL>name,<EOL>version_spec,<EOL>istest,<EOL>shrinkwrap_version_req = shrinkwrap_version_req,<EOL>specifying_module = self.getName()<EOL>)<EOL><DEDENT>deps += [specForDependency(x[<NUM_LIT:0>], x[<NUM_LIT:1>], False) for x in self.description.get('<STR_LIT>', {}).items()]<EOL>target_deps = self.description.get('<STR_LIT>', {})<EOL>if target is not None:<EOL><INDENT>for conf_key, target_conf_deps in target_deps.items():<EOL><INDENT>if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated():<EOL><INDENT>logger.debug(<EOL>'<STR_LIT>' %<EOL>(conf_key, self.getName())<EOL>)<EOL>deps += [specForDependency(x[<NUM_LIT:0>], x[<NUM_LIT:1>], False) for x in target_conf_deps.items()]<EOL><DEDENT><DEDENT><DEDENT>deps += [specForDependency(x[<NUM_LIT:0>], x[<NUM_LIT:1>], True) for x in self.description.get('<STR_LIT>', {}).items()]<EOL>target_deps = self.description.get('<STR_LIT>', {})<EOL>if target is not None:<EOL><INDENT>for conf_key, target_conf_deps in target_deps.items():<EOL><INDENT>if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated():<EOL><INDENT>logger.debug(<EOL>'<STR_LIT>' %<EOL>(conf_key, self.getName())<EOL>)<EOL>deps += [specForDependency(x[<NUM_LIT:0>], x[<NUM_LIT:1>], True) for x in target_conf_deps.items()]<EOL><DEDENT><DEDENT><DEDENT>seen = set()<EOL>r = []<EOL>for dep in deps:<EOL><INDENT>if not dep.name in seen:<EOL><INDENT>r.append(dep)<EOL>seen.add(dep.name)<EOL><DEDENT><DEDENT>return r<EOL>", "docstring": "Returns [DependencySpec]\n\n            These are returned in the order that they are listed in the\n            component description file: this is so that dependency resolution\n            proceeds in a predictable way.", "id": "f13549:c0:m1"}
{"signature": "def get(self, path):", "body": "path = _splitPath(path)<EOL>for config in self.configs.values():<EOL><INDENT>cur = config<EOL>for el in path:<EOL><INDENT>if el in cur:<EOL><INDENT>cur = cur[el]<EOL><DEDENT>else:<EOL><INDENT>cur = None<EOL>break<EOL><DEDENT><DEDENT>if cur is not None:<EOL><INDENT>return cur<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "return a configuration value\n\n            usage:\n                get('section.property')\n\n            Note that currently array indexes are not supported. You must\n            get the whole array.\n\n            returns None if any path element or the property is missing", "id": "f13552:c0:m2"}
{"signature": "def unpublish(self, registry=None):", "body": "return registry_access.unpublish(<EOL>self.getRegistryNamespace(),<EOL>self.getName(),<EOL>self.getVersion(),<EOL>registry=registry<EOL>)<EOL>", "docstring": "Try to un-publish the current version. Return a description of any\n            errors that occured, or None if successful.", "id": "f13553:c3:m25"}
{"signature": "def getError(self):", "body": "return self.error<EOL>", "docstring": "If this isn't a valid component/target, return some sort of\n            explanation about why that is.", "id": "f13553:c3:m6"}
{"signature": "def nonShrinkwrappedVersionReq(self):", "body": "return self.version_req<EOL>", "docstring": "return the dependency specification ignoring any shrinkwrap", "id": "f13553:c2:m2"}
{"signature": "def origin(self):", "body": "if self.origin_info is None:<EOL><INDENT>self.origin_info = {}<EOL>try:<EOL><INDENT>self.origin_info = ordered_json.load(os.path.join(self.path, Origin_Info_Fname))<EOL><DEDENT>except IOError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return self.origin_info.get('<STR_LIT:url>', None)<EOL>", "docstring": "Read the .yotta_origin.json file (if present), and return the value\n            of the 'url' property", "id": "f13553:c3:m3"}
{"signature": "def getVersion(self):", "body": "return self.version<EOL>", "docstring": "Return the version as specified by the package file.\n            This will always be a real version: 1.2.3, not a hash or a URL.\n\n            Note that a component installed through a URL still provides a real\n            version - so if the first component to depend on some component C\n            depends on it via a URI, and a second component depends on a\n            specific version 1.2.3, dependency resolution will only succeed if\n            the version of C obtained from the URL happens to be 1.2.3", "id": "f13553:c3:m14"}
{"signature": "def getScript(self, scriptname):", "body": "script = self.description.get('<STR_LIT>', {}).get(scriptname, None)<EOL>if script is not None:<EOL><INDENT>if isinstance(script, str) or isinstance(script, type(u'<STR_LIT>')):<EOL><INDENT>import shlex<EOL>script = shlex.split(script)<EOL><DEDENT>if len(script) and script[<NUM_LIT:0>].lower().endswith('<STR_LIT>'):<EOL><INDENT>if not os.path.isabs(script[<NUM_LIT:0>]):<EOL><INDENT>absscript = os.path.abspath(os.path.join(self.path, script[<NUM_LIT:0>]))<EOL>logger.debug('<STR_LIT>', script[<NUM_LIT:0>], absscript)<EOL>script[<NUM_LIT:0>] = absscript<EOL><DEDENT>import sys<EOL>script = [sys.executable] + script<EOL><DEDENT><DEDENT>return script<EOL>", "docstring": "Return the specified script command. If the first part of the\n            command is a .py file, then the current python interpreter is\n            prepended.\n\n            If the script is a single string, rather than an array, it is\n            shlex-split.", "id": "f13553:c3:m26"}
{"signature": "def which(program):", "body": "<EOL>if os.path.exists(program) and os.access(program, os.X_OK):<EOL><INDENT>return program<EOL><DEDENT>for path in os.environ['<STR_LIT>'].split(os.pathsep):<EOL><INDENT>path = path.strip('<STR_LIT:\">')<EOL>for ext in os.environ.get('<STR_LIT>', '<STR_LIT>').split(os.pathsep):<EOL><INDENT>progpath = os.path.join(path, program + ext)<EOL>if os.path.exists(progpath) and os.access(progpath, os.X_OK):<EOL><INDENT>return progpath<EOL><DEDENT><DEDENT><DEDENT>return None<EOL>", "docstring": "look for \"program\" in PATH (respecting PATHEXT), and return the path to\n        it, or None if it was not found", "id": "f13555:m6"}
{"signature": "def generateRecursive(self, component, all_components, builddir=None, modbuilddir=None, processed_components=None, application=None):", "body": "assert(self.configured)<EOL>if builddir is None:<EOL><INDENT>builddir = self.buildroot<EOL><DEDENT>if modbuilddir is None:<EOL><INDENT>modbuilddir = os.path.join(builddir, '<STR_LIT>')<EOL><DEDENT>if processed_components is None:<EOL><INDENT>processed_components = dict()<EOL><DEDENT>if not self.target:<EOL><INDENT>yield '<STR_LIT>' % self.target<EOL><DEDENT>toplevel = not len(processed_components)<EOL>logger.debug('<STR_LIT>' % (component, self.target))<EOL>recursive_deps = component.getDependenciesRecursive(<EOL>available_components = all_components,<EOL>target = self.target,<EOL>available_only = True,<EOL>test = True<EOL>)<EOL>dependencies = component.getDependencies(<EOL>all_components,<EOL>target = self.target,<EOL>available_only = True,<EOL>test = True<EOL>)<EOL>for name, dep in dependencies.items():<EOL><INDENT>if not dep:<EOL><INDENT>if dep.isTestDependency():<EOL><INDENT>logger.debug('<STR_LIT>' % (name, component))<EOL><DEDENT>else:<EOL><INDENT>yield '<STR_LIT>' % (name, component)<EOL><DEDENT><DEDENT><DEDENT>processed_components[component.getName()] = component<EOL>new_dependencies = OrderedDict([(name,c) for name,c in dependencies.items() if c and not name in processed_components])<EOL>self.generate(builddir, modbuilddir, component, new_dependencies, dependencies, recursive_deps, application, toplevel)<EOL>logger.debug('<STR_LIT>' % component)<EOL>for d in recursive_deps.values():<EOL><INDENT>logger.debug('<STR_LIT>' % d)<EOL><DEDENT>processed_components.update(new_dependencies)<EOL>for name, c in new_dependencies.items():<EOL><INDENT>for error in self.generateRecursive(<EOL>c, all_components, os.path.join(modbuilddir, name), modbuilddir, processed_components, application=application<EOL>):<EOL><INDENT>yield error<EOL><DEDENT><DEDENT>", "docstring": "generate top-level CMakeLists for this component and its\n            dependencies: the CMakeLists are all generated in self.buildroot,\n            which MUST be out-of-source\n\n            !!! NOTE: experimenting with a slightly different way of doing\n            things here, this function is a generator that yields any errors\n            produced, so the correct use is:\n\n            for error in gen.generateRecursive(...):\n                print(error)", "id": "f13557:c1:m3"}
{"signature": "def configure(self, component, all_dependencies):", "body": "r = {}<EOL>builddir = self.buildroot<EOL>available_dependencies = OrderedDict((k, v) for k, v in all_dependencies.items() if v)<EOL>self.set_toplevel_definitions = '<STR_LIT>'<EOL>if self.build_info_include_file is None:<EOL><INDENT>self.build_info_include_file, build_info_definitions = self.getBuildInfo(component.path, builddir)<EOL>self.set_toplevel_definitions += build_info_definitions<EOL><DEDENT>if self.config_include_file is None:<EOL><INDENT>self.config_include_file, config_definitions, self.config_json_file = self._getConfigData(available_dependencies, component, builddir, self.build_info_include_file)<EOL>self.set_toplevel_definitions += config_definitions<EOL><DEDENT>self.configured = True<EOL>return {<EOL>'<STR_LIT>': self.config_include_file,<EOL>'<STR_LIT>': self.config_json_file,<EOL>'<STR_LIT>': self.build_info_include_file<EOL>}<EOL>", "docstring": "Ensure all config-time files have been generated. Return a\n            dictionary of generated items.", "id": "f13557:c1:m2"}
{"signature": "def generate(<EOL>self, builddir, modbuilddir, component, active_dependencies, immediate_dependencies, all_dependencies, application, toplevel<EOL>):", "body": "include_root_dirs = '<STR_LIT>'<EOL>if application is not None and component is not application:<EOL><INDENT>include_root_dirs += '<STR_LIT>' % replaceBackslashes(application.path)<EOL><DEDENT>include_sys_dirs = '<STR_LIT>'<EOL>include_other_dirs = '<STR_LIT>'<EOL>for name, c in itertools.chain(((component.getName(), component),), all_dependencies.items()):<EOL><INDENT>if c is not component and c.isTestDependency():<EOL><INDENT>continue<EOL><DEDENT>include_root_dirs += '<STR_LIT>' % replaceBackslashes(c.path)<EOL>dep_sys_include_dirs = c.getExtraSysIncludes()<EOL>for d in dep_sys_include_dirs:<EOL><INDENT>include_sys_dirs += '<STR_LIT>' % replaceBackslashes(os.path.join(c.path, d))<EOL><DEDENT>dep_extra_include_dirs = c.getExtraIncludes()<EOL>for d in dep_extra_include_dirs:<EOL><INDENT>include_other_dirs += '<STR_LIT>' % replaceBackslashes(os.path.join(c.path, d))<EOL><DEDENT><DEDENT>add_depend_subdirs = '<STR_LIT>'<EOL>for name, c in active_dependencies.items():<EOL><INDENT>depend_subdir = replaceBackslashes(os.path.join(modbuilddir, name))<EOL>relpath = replaceBackslashes(os.path.relpath(depend_subdir, self.buildroot))<EOL>add_depend_subdirs +='<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'% (depend_subdir, relpath)<EOL><DEDENT>delegate_to_existing = None<EOL>delegate_build_dir = None<EOL>module_is_empty = False<EOL>if os.path.isfile(os.path.join(component.path, '<STR_LIT>')) and not component.ignores('<STR_LIT>'):<EOL><INDENT>delegate_to_existing = component.path<EOL>add_own_subdirs = []<EOL>logger.debug(\"<STR_LIT>\", builddir)<EOL>delegate_build_dir = os.path.join(builddir, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self._validateListedSubdirsExist(component)<EOL>subdirs = self._listSubDirectories(component, toplevel)<EOL>manual_subdirs      = subdirs['<STR_LIT>']<EOL>autogen_subdirs     = subdirs['<STR_LIT>']<EOL>binary_subdirs      = subdirs['<STR_LIT>']<EOL>lib_subdirs         = subdirs['<STR_LIT>']<EOL>test_subdirs        = subdirs['<STR_LIT:test>']<EOL>resource_subdirs    = subdirs['<STR_LIT>']<EOL>header_subdirs      = subdirs['<STR_LIT>']<EOL>logger.debug(\"<STR_LIT>\", component, lib_subdirs, binary_subdirs)<EOL>add_own_subdirs = []<EOL>for f in manual_subdirs:<EOL><INDENT>if os.path.isfile(os.path.join(component.path, f, '<STR_LIT>')):<EOL><INDENT>if f in test_subdirs and component.isTestDependency():<EOL><INDENT>continue<EOL><DEDENT>add_own_subdirs.append(<EOL>(os.path.join(component.path, f), f)<EOL>)<EOL><DEDENT><DEDENT>all_subdirs = manual_subdirs + [x[<NUM_LIT:0>] for x in autogen_subdirs]<EOL>if component.isTestDependency():<EOL><INDENT>if len(autogen_subdirs) + len(add_own_subdirs) == <NUM_LIT:0>:<EOL><INDENT>module_is_empty = True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if len(autogen_subdirs) + len(add_own_subdirs) <= len(test_subdirs):<EOL><INDENT>module_is_empty = True<EOL><DEDENT><DEDENT>for f, source_files in autogen_subdirs:<EOL><INDENT>if f in test_subdirs:<EOL><INDENT>if component.isTestDependency():<EOL><INDENT>continue<EOL><DEDENT>self.generateTestDirList(<EOL>builddir, f, source_files, component, immediate_dependencies, toplevel=toplevel, module_is_empty=module_is_empty<EOL>)<EOL><DEDENT>else:<EOL><INDENT>if f in binary_subdirs:<EOL><INDENT>is_executable = True<EOL>object_name = binary_subdirs[f]<EOL><DEDENT>else:<EOL><INDENT>assert(f in lib_subdirs)<EOL>object_name = lib_subdirs[f]<EOL><DEDENT>for header_dir, header_files in header_subdirs:<EOL><INDENT>source_files.extend(header_files)<EOL><DEDENT>self.generateSubDirList(<EOL>builddir = builddir,<EOL>dirname = f,<EOL>source_files = source_files,<EOL>component = component,<EOL>all_subdirs = all_subdirs,<EOL>immediate_dependencies = immediate_dependencies,<EOL>object_name = object_name,<EOL>resource_subdirs = resource_subdirs,<EOL>is_executable = (f in binary_subdirs)<EOL>)<EOL><DEDENT>add_own_subdirs.append(<EOL>(os.path.join(builddir, f), f)<EOL>)<EOL><DEDENT>if component.isTestDependency():<EOL><INDENT>test_subdirs = []<EOL><DEDENT>if module_is_empty:<EOL><INDENT>if len(binary_subdirs):<EOL><INDENT>logger.warning('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>add_own_subdirs.append(self.createDummyLib(<EOL>component, builddir, [x[<NUM_LIT:0>] for x in immediate_dependencies.items() if not x[<NUM_LIT:1>].isTestDependency()]<EOL>))<EOL><DEDENT><DEDENT><DEDENT>toolchain_file_path = os.path.join(builddir, '<STR_LIT>')<EOL>if toplevel:<EOL><INDENT>template = jinja_environment.get_template('<STR_LIT>')<EOL>file_contents = template.render({  <EOL>\"<STR_LIT>\": self.target.getToolchainFiles()<EOL>})<EOL>self._writeFile(toolchain_file_path, file_contents)<EOL><DEDENT>template = jinja_environment.get_template('<STR_LIT>')<EOL>relpath = os.path.relpath(builddir, self.buildroot)<EOL>file_contents = template.render({ <EOL>\"<STR_LIT>\": toplevel,<EOL>\"<STR_LIT>\": self.target.getName(),<EOL>\"<STR_LIT>\": self.set_toplevel_definitions,<EOL>\"<STR_LIT>\": toolchain_file_path,<EOL>\"<STR_LIT>\": component,<EOL>\"<STR_LIT>\": relpath,<EOL>\"<STR_LIT>\": include_root_dirs,<EOL>\"<STR_LIT>\": include_sys_dirs,<EOL>\"<STR_LIT>\": include_other_dirs,<EOL>\"<STR_LIT>\": add_depend_subdirs,<EOL>\"<STR_LIT>\": add_own_subdirs,<EOL>\"<STR_LIT>\": self.config_include_file,<EOL>\"<STR_LIT>\": delegate_to_existing,<EOL>\"<STR_LIT>\": delegate_build_dir,<EOL>\"<STR_LIT>\": active_dependencies,<EOL>\"<STR_LIT>\": module_is_empty,<EOL>\"<STR_LIT>\": self.target.getAdditionalIncludes()<EOL>})<EOL>self._writeFile(os.path.join(builddir, '<STR_LIT>'), file_contents)<EOL>", "docstring": "active_dependencies is the dictionary of components that need to be\n            built for this component, but will not already have been built for\n            another component.", "id": "f13557:c1:m10"}
{"signature": "def long_to_bytes(n, blocksize=<NUM_LIT:0>):", "body": "<EOL>s = b'<STR_LIT>'<EOL>n = int(n)<EOL>pack = struct.pack<EOL>while n > <NUM_LIT:0>:<EOL><INDENT>s = pack('<STR_LIT>', n & <NUM_LIT>) + s<EOL>n = n >> <NUM_LIT:32><EOL><DEDENT>for i in range(len(s)):<EOL><INDENT>if s[i] != '<STR_LIT>':<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>s = '<STR_LIT>'<EOL>i = <NUM_LIT:0><EOL><DEDENT>s = s[i:]<EOL>if blocksize > <NUM_LIT:0> and len(s) % blocksize:<EOL><INDENT>s = (blocksize - len(s) % blocksize) * '<STR_LIT>' + s<EOL><DEDENT>return s<EOL>", "docstring": "long_to_bytes(n:long, blocksize:int) : string\n    Convert a long integer to a byte string.\n\n    If optional blocksize is given and greater than zero, pad the front of the\n    byte string with binary zeros so that the length is a multiple of\n    blocksize.", "id": "f13558:m0"}
{"signature": "def unpackFromCache(cache_key, to_directory):", "body": "if cache_key is None:<EOL><INDENT>raise NotInCache('<STR_LIT>')<EOL><DEDENT>cache_key = _encodeCacheKey(cache_key)<EOL>cache_dir = folders.cacheDirectory()<EOL>fsutils.mkDirP(cache_dir)<EOL>path = os.path.join(cache_dir, cache_key)<EOL>logger.debug('<STR_LIT>', path, to_directory)<EOL>try:<EOL><INDENT>unpackFrom(path, to_directory)<EOL>try:<EOL><INDENT>shutil.copy(path + '<STR_LIT>', os.path.join(to_directory, '<STR_LIT>'))<EOL><DEDENT>except IOError as e:<EOL><INDENT>if e.errno == errno.ENOENT:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>cache_logger.debug('<STR_LIT>', cache_key, to_directory)<EOL>return<EOL><DEDENT>except IOError as e:<EOL><INDENT>if e.errno == errno.ENOENT:<EOL><INDENT>cache_logger.debug('<STR_LIT>', cache_key)<EOL>raise NotInCache('<STR_LIT>')<EOL><DEDENT><DEDENT>except OSError as e:<EOL><INDENT>if e.errno == errno.ENOTEMPTY:<EOL><INDENT>logger.error('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>", "docstring": "If the specified cache key exists, unpack the tarball into the\n        specified directory, otherwise raise NotInCache (a KeyError subclass).", "id": "f13560:m6"}
{"signature": "def _downloadToCache(stream, hashinfo={}, origin_info=dict()):", "body": "hash_name  = None<EOL>hash_value = None<EOL>m = None<EOL>if len(hashinfo):<EOL><INDENT>for h in ('<STR_LIT>',):<EOL><INDENT>if h in hashinfo:<EOL><INDENT>hash_name  = h<EOL>hash_value = hashinfo[h]<EOL>m = getattr(hashlib, h)()<EOL>break<EOL><DEDENT><DEDENT>if not hash_name:<EOL><INDENT>logger.warning('<STR_LIT>', hashinfo)<EOL><DEDENT><DEDENT>cache_dir = folders.cacheDirectory()<EOL>fsutils.mkDirP(cache_dir)<EOL>file_size = <NUM_LIT:0><EOL>(download_file, download_fname) = tempfile.mkstemp(dir=cache_dir, suffix='<STR_LIT>')<EOL>with os.fdopen(download_file, '<STR_LIT:wb>') as f:<EOL><INDENT>f.seek(<NUM_LIT:0>)<EOL>for chunk in stream.iter_content(<NUM_LIT>):<EOL><INDENT>f.write(chunk)<EOL>if hash_name:<EOL><INDENT>m.update(chunk)<EOL><DEDENT><DEDENT>if hash_name:<EOL><INDENT>calculated_hash = m.hexdigest()<EOL>logger.debug(<EOL>'<STR_LIT>' % (<EOL>hash_name, calculated_hash, hash_value<EOL>)<EOL>)<EOL>if hash_value and (hash_value != calculated_hash):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>file_size = f.tell()<EOL>logger.debug('<STR_LIT>', file_size, download_fname)<EOL>f.truncate()<EOL><DEDENT>extended_origin_info = {<EOL>'<STR_LIT>': hashinfo,<EOL>'<STR_LIT:size>': file_size<EOL>}<EOL>extended_origin_info.update(origin_info)<EOL>ordered_json.dump(download_fname + '<STR_LIT>', extended_origin_info)<EOL>return os.path.basename(download_fname)<EOL>", "docstring": "Download the specified stream to a temporary cache directory, and\n        returns a cache key that can be used to access/remove the file.\n        You should use either removeFromCache(cache_key) or _moveCachedFile to\n        move the downloaded file to a known key after downloading.", "id": "f13560:m7"}
{"signature": "@sometimesPruneCache(<NUM_LIT>)<EOL>def unpackTarballStream(stream, into_directory, hash={}, cache_key=None, origin_info=dict()):", "body": "cache_key = _encodeCacheKey(cache_key)<EOL>if getMaxCachedModules() == <NUM_LIT:0>:<EOL><INDENT>cache_key = None<EOL><DEDENT>new_cache_key = _downloadToCache(stream, hash, origin_info)<EOL>unpackFromCache(new_cache_key, into_directory)<EOL>if cache_key is None:<EOL><INDENT>removeFromCache(new_cache_key)<EOL><DEDENT>else:<EOL><INDENT>_moveCachedFile(new_cache_key, cache_key)<EOL><DEDENT>", "docstring": "Unpack a responses stream that contains a tarball into a directory. If\n        a hash is provided, then it will be used as a cache key (for future\n        requests you can try to retrieve the key value from the cache first,\n        before making the request)", "id": "f13560:m9"}
{"signature": "@classmethod<EOL><INDENT>def createFromSource(cls, vs, name=None):<DEDENT>", "body": "return GitComponent(vs.location, vs.spec, vs.semantic_spec)<EOL>", "docstring": "returns a git component for any git:// url, or None if this is not\n            a git component.\n\n            Normally version will be empty, unless the original url was of the\n            form 'git://...#version', which can be used to grab a particular\n            tag or branch, or ...#>=1.2.3, which can be used to specify\n            semantic version specifications on tags.", "id": "f13561:c2:m1"}
{"signature": "def availableTags(self):", "body": "return [GitCloneVersion('<STR_LIT>', t, self) for t in self.vcs.tags()]<EOL>", "docstring": "return a list of GitCloneVersion objects for all tags", "id": "f13561:c1:m3"}
{"signature": "def availableBranches(self):", "body": "return [GitCloneVersion('<STR_LIT>', b, self) for b in self.vcs.branches()]<EOL>", "docstring": "return a list of GitCloneVersion objects for the tip of each branch", "id": "f13561:c1:m4"}
{"signature": "def checkDependenciesForShrinkwrap(dependency_list):", "body": "<EOL>from yotta.lib import sourceparse<EOL>errors = []<EOL>available_versions = {}<EOL>for mod in dependency_list.get('<STR_LIT>', []):<EOL><INDENT>available_versions[mod['<STR_LIT:name>']] = mod['<STR_LIT:version>']<EOL><DEDENT>for mod in dependency_list.get('<STR_LIT>', []):<EOL><INDENT>for spec_info in mod.get('<STR_LIT>', []):<EOL><INDENT>name = spec_info['<STR_LIT:name>']<EOL>spec = spec_info['<STR_LIT:version>']<EOL>if spec_info.get('<STR_LIT>', False):<EOL><INDENT>continue<EOL><DEDENT>if not name in available_versions:<EOL><INDENT>errors.append('<STR_LIT>' % (<EOL>name, mod['<STR_LIT:name>']<EOL>))<EOL><DEDENT>else:<EOL><INDENT>available_version = available_versions[name]<EOL>parsed_spec = sourceparse.parseSourceURL(spec)<EOL>if not parsed_spec.semanticSpecMatches(available_version):<EOL><INDENT>errors.append('<STR_LIT>' % (<EOL>name, available_version, parsed_spec.semanticSpec(), mod['<STR_LIT:name>']<EOL>))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return errors<EOL>", "docstring": "return a list of errors encountered (e.g. dependency missing or\n        specification not met", "id": "f13566:m2"}
{"signature": "def poisson_sample(<EOL>offset,<EOL>G,<EOL>heritability=<NUM_LIT:0.5>,<EOL>causal_variants=None,<EOL>causal_variance=<NUM_LIT:0>,<EOL>random_state=None,<EOL>):", "body": "mean, cov = _mean_cov(<EOL>offset, G, heritability, causal_variants, causal_variance, random_state<EOL>)<EOL>link = LogLink()<EOL>lik = PoissonProdLik(link)<EOL>sampler = GGPSampler(lik, mean, cov)<EOL>return sampler.sample(random_state)<EOL>", "docstring": "Poisson likelihood sampling.\n\n    Parameters\n    ----------\n    random_state : random_state\n        Set the initial random state.\n\n    Example\n    -------\n\n    .. doctest::\n\n        >>> from glimix_core.random import poisson_sample\n        >>> from numpy.random import RandomState\n        >>> offset = -0.5\n        >>> G = [[0.5, -1], [2, 1]]\n        >>> poisson_sample(offset, G, random_state=RandomState(0))\n        array([0, 6])", "id": "f13578:m2"}
{"signature": "def binomial_sample(<EOL>ntrials,<EOL>offset,<EOL>G,<EOL>heritability=<NUM_LIT:0.5>,<EOL>causal_variants=None,<EOL>causal_variance=<NUM_LIT:0>,<EOL>random_state=None,<EOL>):", "body": "link = LogitLink()<EOL>mean, cov = _mean_cov(<EOL>offset, G, heritability, causal_variants, causal_variance, random_state<EOL>)<EOL>lik = BinomialProdLik(ntrials, link)<EOL>sampler = GGPSampler(lik, mean, cov)<EOL>return sampler.sample(random_state)<EOL>", "docstring": "Binomial likelihood sampling.\n\n    Parameters\n    ----------\n    random_state : random_state\n        Set the initial random state.\n\n    Example\n    -------\n\n    .. doctest::\n\n        >>> from glimix_core.random import binomial_sample\n        >>> from numpy.random import RandomState\n        >>> ntrials = [5, 15]\n        >>> offset = 0.5\n        >>> G = [[1, -1], [2, 1]]\n        >>> binomial_sample(ntrials, offset, G, random_state=RandomState(0))\n        array([ 2., 14.])", "id": "f13578:m1"}
{"signature": "@property<EOL><INDENT>def B(self):<DEDENT>", "body": "return unvec(self._vecB.value, (self.X.shape[<NUM_LIT:1>], self.A.shape[<NUM_LIT:0>]))<EOL>", "docstring": "Effect-sizes parameter, B.", "id": "f13588:c0:m7"}
{"signature": "def __init__(self, A, X):", "body": "self._A = asarray(A, float)<EOL>self._X = asarray(X, float)<EOL>vecB = zeros((X.shape[<NUM_LIT:1>], A.shape[<NUM_LIT:0>])).ravel()<EOL>self._vecB = Vector(vecB)<EOL>self._nparams = vecB.size<EOL>Function.__init__(self, \"<STR_LIT>\", vecB=self._vecB)<EOL>", "docstring": "Constructor.\n\nParameters\n----------\nA : array_like\n    p\u00d7p array.\nX : array_like\n    n\u00d7c array.", "id": "f13588:c0:m0"}
{"signature": "@property<EOL><INDENT>def X(self):<DEDENT>", "body": "return self._X<EOL>", "docstring": "Matrix X.", "id": "f13588:c0:m3"}
{"signature": "@property<EOL><INDENT>def nparams(self):<DEDENT>", "body": "return self._nparams<EOL>", "docstring": "Number of parameters.", "id": "f13588:c0:m1"}
{"signature": "@property<EOL><INDENT>def A(self):<DEDENT>", "body": "return self._A<EOL>", "docstring": "Matrix A.", "id": "f13588:c0:m2"}
{"signature": "def value(self):", "body": "return add.reduce([mean.value() for mean in self._means])<EOL>", "docstring": "Sum of mean vectors, \ud835\udc1f\u2080 + \ud835\udc1f\u2081 + \u2026.\n\nReturns\n-------\n\ud835\udc26 : ndarray\n    \ud835\udc1f\u2080 + \ud835\udc1f\u2081 + \u2026.", "id": "f13593:c0:m1"}
{"signature": "def gradient(self):", "body": "grad = {}<EOL>for i, f in enumerate(self._means):<EOL><INDENT>for varname, g in f.gradient().items():<EOL><INDENT>grad[f\"<STR_LIT>\"] = g<EOL><DEDENT><DEDENT>return grad<EOL>", "docstring": "Sum of mean function derivatives.\n\nReturns\n-------\n\u2202\ud835\udc26 : dict\n    \u2202\ud835\udc1f\u2080 + \u2202\ud835\udc1f\u2081 + \u2026.", "id": "f13593:c0:m2"}
{"signature": "def __init__(self, n):", "body": "self._offset = Scalar(<NUM_LIT:0.0>)<EOL>self._offset.bounds = (-<NUM_LIT>, +<NUM_LIT:200>)<EOL>self._n = n<EOL>Function.__init__(self, \"<STR_LIT>\", offset=self._offset)<EOL>", "docstring": "Constructor.\n\nParameters\n----------\nn : int\n    Size of the \ud835\udfcf array.", "id": "f13595:c0:m0"}
{"signature": "def gradient(self):", "body": "return dict(offset=ones(self._n))<EOL>", "docstring": "Gradient of the offset function.\n\nReturns\n-------\noffset : (n,) ndarray\n    Vector \ud835\udfcf.", "id": "f13595:c0:m4"}
{"signature": "def value(self):", "body": "return self._X @ self._effsizes<EOL>", "docstring": "Linear mean function.\n\nReturns\n-------\n\ud835\udc26 : (n,) ndarray\n    X\ud835\udf36.", "id": "f13596:c0:m2"}
{"signature": "def __init__(self, X):", "body": "X = asarray(X, float)<EOL>m = X.shape[<NUM_LIT:1>]<EOL>self._effsizes = Vector(zeros(m))<EOL>self._effsizes.bounds = [(-<NUM_LIT>, +<NUM_LIT:200>)] * m<EOL>self._X = X<EOL>Function.__init__(self, \"<STR_LIT>\", effsizes=self._effsizes)<EOL>", "docstring": "Constructor.\n\nParameters\n----------\nX : array_like\n    Covariates X, from X\ud835\udf36.", "id": "f13596:c0:m0"}
{"signature": "@property<EOL><INDENT>def dim(self):<DEDENT>", "body": "return self._I.shape[<NUM_LIT:0>]<EOL>", "docstring": "Dimension of the matrix, d.\n\nIt corresponds to the number of rows and to the number of columns.", "id": "f13597:c0:m3"}
{"signature": "def value(self):", "body": "return self.scale * self._I<EOL>", "docstring": "Covariance matrix.\n\nReturns\n-------\nK : ndarray\n    s\u22c5I, for scale s and a d\u00d7d identity matrix I.", "id": "f13597:c0:m4"}
{"signature": "def __init__(self, dim):", "body": "self._dim = dim<EOL>self._I = eye(dim)<EOL>self._logscale = Scalar(<NUM_LIT:0.0>)<EOL>Function.__init__(self, \"<STR_LIT>\", logscale=self._logscale)<EOL>self._logscale.bounds = (-<NUM_LIT>, +<NUM_LIT:10>)<EOL>", "docstring": "Constructor.\n\nParameters\n----------\ndim : int\n    Matrix dimension, d.", "id": "f13597:c0:m0"}
{"signature": "def value(self):", "body": "return self.scale * self._K0<EOL>", "docstring": "Covariance matrix, s\u22c5K\u2080.\n\nReturns\n-------\nK : ndarray\n    s\u22c5K\u2080.", "id": "f13598:c0:m3"}
{"signature": "@property<EOL><INDENT>def scale(self):<DEDENT>", "body": "return float(exp(self._logscale.value))<EOL>", "docstring": "Scale parameter, s.", "id": "f13598:c0:m1"}
{"signature": "@property<EOL><INDENT>def shape(self):<DEDENT>", "body": "n = self._L.shape[<NUM_LIT:0>]<EOL>return (n, n)<EOL>", "docstring": "Array shape.", "id": "f13599:c0:m4"}
{"signature": "def listen(self, func):", "body": "self._Lu.listen(func)<EOL>", "docstring": "Listen to parameters change.\n\nParameters\n----------\nfunc : callable\n    Function to be called when a parameter changes.", "id": "f13599:c0:m3"}
{"signature": "def unfix(self):", "body": "self._Lu.unfix()<EOL>", "docstring": "Enable parameter optimisation.", "id": "f13599:c0:m6"}
{"signature": "@property<EOL><INDENT>def Lu(self):<DEDENT>", "body": "return self._Lu.value<EOL>", "docstring": "Lower-triangular, flat part of L.", "id": "f13599:c0:m8"}
{"signature": "@property<EOL><INDENT>def L(self):<DEDENT>", "body": "m = len(self._tril1[<NUM_LIT:0>])<EOL>self._L[self._tril1] = self._Lu.value[:m]<EOL>self._L[self._diag] = exp(self._Lu.value[m:])<EOL>return self._L<EOL>", "docstring": "Lower-triangular matrix L such that K = LL\u1d40 + \u03f5I.\n\nReturns\n-------\nL : (d, d) ndarray\n    Lower-triangular matrix.", "id": "f13599:c0:m10"}
{"signature": "def gradient(self):", "body": "L = self.L<EOL>self._grad_Lu[:] = <NUM_LIT:0><EOL>for i in range(len(self._tril1[<NUM_LIT:0>])):<EOL><INDENT>row = self._tril1[<NUM_LIT:0>][i]<EOL>col = self._tril1[<NUM_LIT:1>][i]<EOL>self._grad_Lu[row, :, i] = L[:, col]<EOL>self._grad_Lu[:, row, i] += L[:, col]<EOL><DEDENT>m = len(self._tril1[<NUM_LIT:0>])<EOL>for i in range(len(self._diag[<NUM_LIT:0>])):<EOL><INDENT>row = self._diag[<NUM_LIT:0>][i]<EOL>col = self._diag[<NUM_LIT:1>][i]<EOL>self._grad_Lu[row, :, m + i] = L[row, col] * L[:, col]<EOL>self._grad_Lu[:, row, m + i] += L[row, col] * L[:, col]<EOL><DEDENT>return {\"<STR_LIT>\": self._grad_Lu}<EOL>", "docstring": "Derivative of the covariance matrix over the parameters of L.\n\nReturns\n-------\nLu : ndarray\n    Derivative of K over the lower triangular part of L.", "id": "f13599:c0:m14"}
{"signature": "def fix(self):", "body": "self._Lu.fix()<EOL>", "docstring": "Disable parameter optimisation.", "id": "f13599:c0:m5"}
{"signature": "def value(self):", "body": "K = dot(self.L, self.L.T)<EOL>return K + self._epsilon * eye(K.shape[<NUM_LIT:0>])<EOL>", "docstring": "Covariance matrix.\n\nReturns\n-------\nK : ndarray\n    Matrix K = LL\u1d40 + \u03f5I, for a very small positive number \u03f5.", "id": "f13599:c0:m13"}
{"signature": "def gradient(self):", "body": "grad = {}<EOL>for i, f in enumerate(self._covariances):<EOL><INDENT>for varname, g in f.gradient().items():<EOL><INDENT>grad[f\"<STR_LIT>\"] = g<EOL><DEDENT><DEDENT>return grad<EOL>", "docstring": "Sum of covariance function derivatives.\n\nReturns\n-------\ndict\n    \u2202K\u2080 + \u2202K\u2081 + \u22ef", "id": "f13607:c0:m2"}
{"signature": "def value(self):", "body": "return add.reduce([cov.value() for cov in self._covariances])<EOL>", "docstring": "r\"\"\"\n        Sum of covariance matrices.\n\n        Returns\n        -------\n        K : ndarray\n            K\u2080 + K\u2081 + \u22ef", "id": "f13607:c0:m1"}
{"signature": "def __init__(self, covariances):", "body": "self._covariances = [c for c in covariances]<EOL>Function.__init__(self, \"<STR_LIT>\", composite=self._covariances)<EOL>", "docstring": "Constructor.\n\nParameters\n----------\ncovariances : list\n    List of covariance functions.", "id": "f13607:c0:m0"}
{"signature": "def __init__(self, n, m):", "body": "self._L = ones((n, m))<EOL>self._Lu = Vector(self._L.ravel())<EOL>Function.__init__(self, \"<STR_LIT>\", Lu=self._Lu)<EOL>", "docstring": "Constructor.\n\nParameters\n----------\nn : int\n    Covariance dimension.\nm : int\n    Upper limit of the covariance matrix rank.", "id": "f13608:c0:m0"}
{"signature": "@property<EOL><INDENT>def L(self):<DEDENT>", "body": "return self._L<EOL>", "docstring": "Matrix L from K = LL\u1d40.\n\nReturns\n-------\nL : (n, m) ndarray\n    Parametric matrix.", "id": "f13608:c0:m7"}
{"signature": "def gradient(self):", "body": "L = self.L<EOL>n = self.L.shape[<NUM_LIT:0>]<EOL>grad = {\"<STR_LIT>\": zeros((n, n, n * self._L.shape[<NUM_LIT:1>]))}<EOL>for ii in range(self._L.shape[<NUM_LIT:0>] * self._L.shape[<NUM_LIT:1>]):<EOL><INDENT>row = ii // self._L.shape[<NUM_LIT:1>]<EOL>col = ii % self._L.shape[<NUM_LIT:1>]<EOL>grad[\"<STR_LIT>\"][row, :, ii] = L[:, col]<EOL>grad[\"<STR_LIT>\"][:, row, ii] += L[:, col]<EOL><DEDENT>return grad<EOL>", "docstring": "Derivative of the covariance matrix over the lower triangular, flat part of L.\n\nIt is equal to\n\n    \u2202K/\u2202L\u1d62\u2c7c = AL\u1d40 + LA\u1d40,\n\nwhere A\u1d62\u2c7c is an n\u00d7m matrix of zeros except at [A\u1d62\u2c7c]\u1d62\u2c7c=1.\n\nReturns\n-------\nLu : ndarray\n    Derivative of K over the lower-triangular, flat part of L.", "id": "f13608:c0:m11"}
{"signature": "def value(self):", "body": "return dot(self.L, self.L.T)<EOL>", "docstring": "Covariance matrix.\n\nReturns\n-------\nK : (n, n) ndarray\n    K = LL\u1d40.", "id": "f13608:c0:m10"}
{"signature": "def fix(self):", "body": "self._Lu.fix()<EOL>", "docstring": "Disable parameter optimisation.", "id": "f13608:c0:m3"}
{"signature": "def listen(self, func):", "body": "self._Lu.listen(func)<EOL>", "docstring": "Listen to parameters change.\n\nParameters\n----------\nfunc : callable\n    Function to be called when a parameter changes.", "id": "f13608:c0:m2"}
{"signature": "def unfix(self):", "body": "self._Lu.unfix()<EOL>", "docstring": "Enable parameter optimisation.", "id": "f13608:c0:m4"}
{"signature": "@property<EOL><INDENT>def shape(self):<DEDENT>", "body": "n = self._L.shape[<NUM_LIT:0>]<EOL>return (n, n)<EOL>", "docstring": "Array shape.", "id": "f13608:c0:m9"}
{"signature": "def __init__(self, G, dim, rank):", "body": "self._cache = {\"<STR_LIT>\": None}<EOL>self._C0 = LRFreeFormCov(dim, rank)<EOL>self._C0.name = \"<STR_LIT>\"<EOL>self._C1 = FreeFormCov(dim)<EOL>self._C1.name = \"<STR_LIT>\"<EOL>G = atleast_2d(asarray(G, float))<EOL>self._G = G<EOL>self._Sxe = None<EOL>self._Sx = None<EOL>self._Lx = None<EOL>self._LxG = None<EOL>self._diag_LxGGLx = None<EOL>self._Lxe = None<EOL>self._LxGe = None<EOL>self._diag_LxGGLxe = None<EOL>Function.__init__(<EOL>self, \"<STR_LIT>\", composite=[(\"<STR_LIT>\", self._C0), (\"<STR_LIT>\", self._C1)]<EOL>)<EOL>self._C0.listen(self._parameters_update)<EOL>self._C1.listen(self._parameters_update)<EOL>", "docstring": "Constructor.\n\nParameters\n----------\ndim : int\n    Dimension d for the square matrices C\u2080 and C\u2081.\nrank : int\n    Maximum rank of the C\u2081 matrix.", "id": "f13610:c0:m0"}
{"signature": "@property<EOL><INDENT>def C1(self):<DEDENT>", "body": "return self._C1<EOL>", "docstring": "Definite positive matrix C\u2081.", "id": "f13610:c0:m16"}
{"signature": "@property<EOL><INDENT>@lru_cache(maxsize=None)<EOL>def Ge(self):<DEDENT>", "body": "from scipy.linalg import svd<EOL>from numpy_sugar.linalg import ddot<EOL>U, S, _ = svd(self._G, full_matrices=False, check_finite=False)<EOL>if U.shape[<NUM_LIT:1>] < self._G.shape[<NUM_LIT:1>]:<EOL><INDENT>return ddot(U, S)<EOL><DEDENT>return self._G<EOL>", "docstring": "Result of US from the SVD decomposition G = USV\u1d40.", "id": "f13610:c0:m3"}
{"signature": "def listen(self, func):", "body": "self._C0.listen(func)<EOL>self._C1.listen(func)<EOL>", "docstring": "Listen to parameters change.\n\nParameters\n----------\nfunc : callable\n    Function to be called when a parameter changes.", "id": "f13610:c0:m7"}
{"signature": "def logdet(self):", "body": "self._init_svd()<EOL>return -log(self._De).sum() + self.G.shape[<NUM_LIT:0>] * self.C1.logdet()<EOL>", "docstring": "Implements log|K| = - log|D| + n\u22c5log|C\u2081|.\n\nReturns\n-------\nlogdet : float\n    Log-determinant of K.", "id": "f13610:c0:m21"}
{"signature": "def gradient(self):", "body": "return dict(logscale=self.value())<EOL>", "docstring": "Derivative of the covariance matrix over log(s).\n\nReturns\n-------\nlogscale : ndarray\n    s\u22c5XX\u1d40.", "id": "f13611:c0:m7"}
{"signature": "def __init__(self, X):", "body": "self._logscale = Scalar(<NUM_LIT:0.0>)<EOL>self._X = X<EOL>Function.__init__(self, \"<STR_LIT>\", logscale=self._logscale)<EOL>self._logscale.bounds = (-<NUM_LIT>, +<NUM_LIT:10>)<EOL>", "docstring": "Constructor.\n\nParameters\n----------\nX : array_like\n    Matrix X from K = s\u22c5XX\u1d40.", "id": "f13611:c0:m0"}
{"signature": "@property<EOL><INDENT>@cache<EOL>def null_beta_se(self):<DEDENT>", "body": "return sqrt(self.null_beta_covariance.diagonal())<EOL>", "docstring": "Standard errors of the optimal \ud835\udf37.\n\nReturns\n-------\nbeta_se : ndarray\n    Square root of the diagonal of the beta covariance.", "id": "f13612:c0:m4"}
{"signature": "def _bstar_1effect(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):", "body": "from numpy_sugar import epsilon<EOL>from numpy_sugar.linalg import dotd<EOL>from numpy import sum<EOL>r = full(MTBM[<NUM_LIT:0>].shape[<NUM_LIT:0>], yTBy)<EOL>r -= <NUM_LIT:2> * add.reduce([dot(i, beta) for i in yTBX])<EOL>r -= <NUM_LIT:2> * add.reduce([i * alpha for i in yTBM])<EOL>r += add.reduce([dotd(beta.T, dot(i, beta)) for i in XTBX])<EOL>r += add.reduce([dotd(beta.T, i * alpha) for i in XTBM])<EOL>r += add.reduce([sum(alpha * i * beta, axis=<NUM_LIT:0>) for i in XTBM])<EOL>r += add.reduce([alpha * i.ravel() * alpha for i in MTBM])<EOL>return clip(r, epsilon.tiny, inf)<EOL>", "docstring": "Same as :func:`_bstar_set` but for single-effect.", "id": "f13612:m0"}
{"signature": "def _bstar_set(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):", "body": "from numpy_sugar import epsilon<EOL>r = yTBy<EOL>r -= <NUM_LIT:2> * add.reduce([i @ beta for i in yTBX])<EOL>r -= <NUM_LIT:2> * add.reduce([i @ alpha for i in yTBM])<EOL>r += add.reduce([beta.T @ i @ beta for i in XTBX])<EOL>r += <NUM_LIT:2> * add.reduce([beta.T @ i @ alpha for i in XTBM])<EOL>r += add.reduce([alpha.T @ i @ alpha for i in MTBM])<EOL>return clip(r, epsilon.tiny, inf)<EOL>", "docstring": "Compute -2\ud835\udc32\u1d40BE\u2c7c\ud835\udc1b\u2c7c + (\ud835\udc1b\u2c7cE\u2c7c)\u1d40BE\u2c7c\ud835\udc1b\u2c7c.\n\nFor \ud835\udc1b\u2c7c = [\ud835\udf37\u2c7c\u1d40 \ud835\udf36\u2c7c\u1d40]\u1d40.", "id": "f13612:m1"}
{"signature": "def scan(self, M):", "body": "from numpy_sugar.linalg import ddot<EOL>from numpy_sugar import is_all_finite<EOL>M = asarray(M, float)<EOL>if M.shape[<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>return {<EOL>\"<STR_LIT>\": self.null_lml(),<EOL>\"<STR_LIT>\": self.null_beta,<EOL>\"<STR_LIT>\": self.null_beta_se,<EOL>\"<STR_LIT>\": empty((<NUM_LIT:0>)),<EOL>\"<STR_LIT>\": empty((<NUM_LIT:0>)),<EOL>\"<STR_LIT>\": self.null_scale,<EOL>}<EOL><DEDENT>if not is_all_finite(M):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>MTQ = [dot(M.T, Q) for Q in self._QS[<NUM_LIT:0>] if Q.size > <NUM_LIT:0>]<EOL>yTBM = [dot(i, j.T) for (i, j) in zip(self._yTQDi, MTQ)]<EOL>XTBM = [dot(i, j.T) for (i, j) in zip(self._XTQDi, MTQ)]<EOL>D = self._D<EOL>MTBM = [ddot(i, <NUM_LIT:1> / j) @ i.T for i, j in zip(MTQ, D) if j.min() > <NUM_LIT:0>]<EOL>return self._multicovariate_set(yTBM, XTBM, MTBM)<EOL>", "docstring": "LML, fixed-effect sizes, and scale of the candidate set.\n\nParameters\n----------\nM : array_like\n    Fixed-effects set.\n\nReturns\n-------\nlml : float\n    Log of the marginal likelihood.\neffsizes0 : ndarray\n    Covariates fixed-effect sizes.\neffsizes0_se : ndarray\n    Covariates fixed-effect size standard errors.\neffsizes1 : ndarray\n    Candidate set fixed-effect sizes.\neffsizes1_se : ndarray\n    Candidate fixed-effect size standard errors.\nscale : ndarray\n    Optimal scale.", "id": "f13612:c0:m7"}
{"signature": "def __init__(self, Y, A, X, G, terms):", "body": "self._Y = asarray(Y, float)<EOL>self._A = asarray(A, float)<EOL>self._X = asarray(X, float)<EOL>self._G = asarray(G, float)<EOL>self._H = terms[\"<STR_LIT:H>\"]<EOL>self._logdetK = terms[\"<STR_LIT>\"]<EOL>self._W = terms[\"<STR_LIT>\"]<EOL>self._yKiy = terms[\"<STR_LIT>\"]<EOL>self._WA = terms[\"<STR_LIT>\"]<EOL>self._WL0 = terms[\"<STR_LIT>\"]<EOL>self._Lz = terms[\"<STR_LIT>\"]<EOL>self._XRiM = terms[\"<STR_LIT>\"]<EOL>self._ZiXRiy = terms[\"<STR_LIT>\"]<EOL>self._ZiXRiM = terms[\"<STR_LIT>\"]<EOL>self._MRiM = terms[\"<STR_LIT>\"]<EOL>self._MRiXZiXRiM = terms[\"<STR_LIT>\"]<EOL>self._MRiy = terms[\"<STR_LIT>\"]<EOL>self._MRiXZiXRiy = terms[\"<STR_LIT>\"]<EOL>", "docstring": "Constructor.\n\nParameters\n----------\nY : (n, p) array_like\n    Outcome matrix.\nA : (n, n) array_like\n    Trait-by-trait design matrix.\nX : (n, c) array_like\n    Covariates design matrix.\nG : (n, r) array_like\n    Matrix G from the GG\u1d40 term.\nterms : dict\n    Pre-computed terms.", "id": "f13613:c0:m0"}
{"signature": "@property<EOL><INDENT>@cache<EOL>def null_beta(self):<DEDENT>", "body": "return rsolve(self._MKiM, self._MKiy)<EOL>", "docstring": "Optimal \ud835\udec3 according to the marginal likelihood.\n\nIt is compute by solving the equation ::\n\n    M\u1d40K\u207b\u00b9M\ud835\udec3 = M\u1d40K\u207b\u00b9\ud835\udc32,\n\nfor \ud835\udc32 = vec(Y) and M = (A \u2297 X)vec(\ud835\udea9).\n\nReturns\n-------\neffsizes : ndarray\n    Optimal \ud835\udec3.", "id": "f13613:c0:m2"}
{"signature": "@property<EOL><INDENT>@cache<EOL>def null_beta_se(self):<DEDENT>", "body": "return sqrt(self.null_beta_covariance.diagonal())<EOL>", "docstring": "Standard errors of the optimal \ud835\udec3.\n\nReturns\n-------\nbeta_se : ndarray\n    Square root of the diagonal of the beta covariance.", "id": "f13613:c0:m4"}
{"signature": "def scan(self, A1, X1):", "body": "from numpy import empty<EOL>from numpy.linalg import multi_dot<EOL>from numpy_sugar import epsilon, is_all_finite<EOL>from scipy.linalg import cho_solve<EOL>A1 = asarray(A1, float)<EOL>X1 = asarray(X1, float)<EOL>if not is_all_finite(A1):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not is_all_finite(X1):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if A1.shape[<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>beta_se = sqrt(self.null_beta_covariance.diagonal())<EOL>return {<EOL>\"<STR_LIT>\": self.null_lml(),<EOL>\"<STR_LIT>\": unvec(self.null_beta, (self._ncovariates, -<NUM_LIT:1>)),<EOL>\"<STR_LIT>\": unvec(beta_se, (self._ncovariates, -<NUM_LIT:1>)),<EOL>\"<STR_LIT>\": empty((<NUM_LIT:0>,)),<EOL>\"<STR_LIT>\": empty((<NUM_LIT:0>,)),<EOL>\"<STR_LIT>\": self.null_scale,<EOL>}<EOL><DEDENT>X1X1 = X1.T @ X1<EOL>XX1 = self._X.T @ X1<EOL>AWA1 = self._WA.T @ A1<EOL>A1W = A1.T @ self._W<EOL>GX1 = self._G.T @ X1<EOL>MRiM1 = kron(AWA1, XX1)<EOL>M1RiM1 = kron(A1W @ A1, X1X1)<EOL>M1Riy = vec(multi_dot([X1.T, self._Y, A1W.T]))<EOL>XRiM1 = kron(self._WL0.T @ A1, GX1)<EOL>ZiXRiM1 = cho_solve(self._Lz, XRiM1)<EOL>MRiXZiXRiM1 = self._XRiM.T @ ZiXRiM1<EOL>M1RiXZiXRiM1 = XRiM1.T @ ZiXRiM1<EOL>M1RiXZiXRiy = XRiM1.T @ self._ZiXRiy<EOL>T0 = [[self._MRiM, MRiM1], [MRiM1.T, M1RiM1]]<EOL>T1 = [[self._MRiXZiXRiM, MRiXZiXRiM1], [MRiXZiXRiM1.T, M1RiXZiXRiM1]]<EOL>T2 = [self._MRiy, M1Riy]<EOL>T3 = [self._MRiXZiXRiy, M1RiXZiXRiy]<EOL>MKiM = block(T0) - block(T1)<EOL>MKiy = block(T2) - block(T3)<EOL>beta = rsolve(MKiM, MKiy)<EOL>mKiy = beta.T @ MKiy<EOL>cp = self._ntraits * self._ncovariates<EOL>effsizes0 = unvec(beta[:cp], (self._ncovariates, self._ntraits))<EOL>effsizes1 = unvec(beta[cp:], (X1.shape[<NUM_LIT:1>], A1.shape[<NUM_LIT:1>]))<EOL>np = self._nsamples * self._ntraits<EOL>sqrtdot = self._yKiy - mKiy<EOL>scale = clip(sqrtdot / np, epsilon.tiny, inf)<EOL>lml = self._static_lml() / <NUM_LIT:2> - np * safe_log(scale) / <NUM_LIT:2> - np / <NUM_LIT:2><EOL>effsizes_se = sqrt(clip(scale * pinv(MKiM).diagonal(), epsilon.tiny, inf))<EOL>effsizes0_se = unvec(effsizes_se[:cp], (self._ncovariates, self._ntraits))<EOL>effsizes1_se = unvec(effsizes_se[cp:], (X1.shape[<NUM_LIT:1>], A1.shape[<NUM_LIT:1>]))<EOL>return {<EOL>\"<STR_LIT>\": lml,<EOL>\"<STR_LIT>\": effsizes0,<EOL>\"<STR_LIT>\": effsizes1,<EOL>\"<STR_LIT>\": scale,<EOL>\"<STR_LIT>\": effsizes0_se,<EOL>\"<STR_LIT>\": effsizes1_se,<EOL>}<EOL>", "docstring": "LML, fixed-effect sizes, and scale of the candidate set.\n\nParameters\n----------\nA1 : (p, e) array_like\n    Trait-by-environments design matrix.\nX1 : (n, m) array_like\n    Variants set matrix.\n\nReturns\n-------\nlml : float\n    Log of the marginal likelihood for the set.\neffsizes0 : (c, p) ndarray\n    Fixed-effect sizes for the covariates.\neffsizes0_se : (c, p) ndarray\n    Fixed-effect size standard errors for the covariates.\neffsizes1 : (m, e) ndarray\n    Fixed-effect sizes for the candidates.\neffsizes1_se : (m, e) ndarray\n    Fixed-effect size standard errors for the candidates.\nscale : float\n    Optimal scale.", "id": "f13613:c0:m6"}
{"signature": "@cache<EOL><INDENT>def null_lml(self):<DEDENT>", "body": "np = self._nsamples * self._ntraits<EOL>scale = self.null_scale<EOL>return self._static_lml() / <NUM_LIT:2> - np * safe_log(scale) / <NUM_LIT:2> - np / <NUM_LIT:2><EOL>", "docstring": "Log of the marginal likelihood for the null hypothesis.\n\nIt is implemented as ::\n\n    2\u00b7log(p(Y)) = -n\u00b7p\u00b7log(2\ud835\udf0bs) - log\uff5cK\uff5c - n\u00b7p,\n\nfor which s and \ud835\udea9 are optimal.\n\nReturns\n-------\nlml : float\n    Log of the marginal likelihood.", "id": "f13613:c0:m1"}
{"signature": "@property<EOL><INDENT>@cache<EOL>def null_beta_covariance(self):<DEDENT>", "body": "return self.null_scale * pinv(self._H)<EOL>", "docstring": "Covariance of the optimal \ud835\udec3 according to the marginal likelihood.\n\nReturns\n-------\neffsizes-covariance : ndarray\n    s(M\u1d40K\u207b\u00b9M)\u207b\u00b9.", "id": "f13613:c0:m3"}
{"signature": "@property<EOL><INDENT>def ncovariates(self):<DEDENT>", "body": "return self._X[\"<STR_LIT:X>\"].shape[<NUM_LIT:1>]<EOL>", "docstring": "Number of covariates, c.", "id": "f13620:c0:m13"}
{"signature": "def unfix(self, param):", "body": "if param == \"<STR_LIT>\":<EOL><INDENT>self._unfix(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self._fix[param] = False<EOL><DEDENT>", "docstring": "Enable parameter optimization.\n\nParameters\n----------\nparam : str\n    Possible values are ``\"delta\"``, ``\"beta\"``, and ``\"scale\"``.", "id": "f13620:c0:m5"}
{"signature": "def value(self):", "body": "if not self._fix[\"<STR_LIT>\"]:<EOL><INDENT>self._update_beta()<EOL><DEDENT>if not self._fix[\"<STR_LIT>\"]:<EOL><INDENT>self._update_scale()<EOL><DEDENT>return self.lml()<EOL>", "docstring": "Internal use only.", "id": "f13620:c0:m10"}
{"signature": "@property<EOL><INDENT>def nsamples(self):<DEDENT>", "body": "return len(self._y)<EOL>", "docstring": "Number of samples, n.", "id": "f13620:c0:m12"}
{"signature": "def _lml_arbitrary_scale(self):", "body": "s = self.scale<EOL>D = self._D<EOL>n = len(self._y)<EOL>lml = -self._df * log2pi - n * log(s)<EOL>lml -= sum(npsum(log(d)) for d in D)<EOL>d = (mTQ - yTQ for (mTQ, yTQ) in zip(self._mTQ, self._yTQ))<EOL>lml -= sum((i / j) @ i for (i, j) in zip(d, D)) / s<EOL>return lml / <NUM_LIT:2><EOL>", "docstring": "Log of the marginal likelihood for arbitrary scale.\n\nReturns\n-------\nlml : float\n    Log of the marginal likelihood.", "id": "f13620:c0:m26"}
{"signature": "@property<EOL><INDENT>def beta(self):<DEDENT>", "body": "from numpy_sugar.linalg import rsolve<EOL>return rsolve(self._X[\"<STR_LIT>\"], rsolve(self._X[\"<STR_LIT>\"], self.mean()))<EOL>", "docstring": "Fixed-effect sizes.\n\nReturns\n-------\neffect-sizes : numpy.ndarray\n    Optimal fixed-effect sizes.\n\nNotes\n-----\nSetting the derivative of log(p(\ud835\udc32)) over effect sizes equal\nto zero leads to solutions \ud835\udf37 from equation ::\n\n    (Q\u1d40X)\u1d40D\u207b\u00b9(Q\u1d40X)\ud835\udf37 = (Q\u1d40X)\u1d40D\u207b\u00b9(Q\u1d40\ud835\udc32).", "id": "f13620:c0:m1"}
{"signature": "@property<EOL><INDENT>def v1(self):<DEDENT>", "body": "return self.scale * self.delta<EOL>", "docstring": "Second variance.\n\nReturns\n-------\nv1 : float\n    s\ud835\udeff.", "id": "f13620:c0:m7"}
{"signature": "def _lml_optimal_scale(self):", "body": "assert self._optimal[\"<STR_LIT>\"]<EOL>n = len(self._y)<EOL>lml = -self._df * log2pi - self._df - n * log(self.scale)<EOL>lml -= sum(npsum(log(D)) for D in self._D)<EOL>return lml / <NUM_LIT:2><EOL>", "docstring": "Log of the marginal likelihood for optimal scale.\n\nImplementation for unrestricted LML::\n\nReturns\n-------\nlml : float\n    Log of the marginal likelihood.", "id": "f13620:c0:m25"}
{"signature": "@property<EOL><INDENT>def A(self):<DEDENT>", "body": "return self._mean.A<EOL>", "docstring": "A from the equation \ud835\udc26 = (A \u2297 X) vec(B).\n\nReturns\n-------\nA : ndarray\n    A.", "id": "f13621:c0:m3"}
{"signature": "@property<EOL><INDENT>def beta_covariance(self):<DEDENT>", "body": "H = self._terms[\"<STR_LIT:H>\"]<EOL>return inv(H)<EOL>", "docstring": "Estimates the covariance-matrix of the optimal beta.\n\nReturns\n-------\nbeta-covariance : ndarray\n    (M\u1d40K\u207b\u00b9M)\u207b\u00b9.\n\nReferences\n----------\n.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John\n   Wiley & Sons.", "id": "f13621:c0:m1"}
{"signature": "def covariance(self):", "body": "return self._cov.value()<EOL>", "docstring": "Covariance K = C\u2080 \u2297 GG\u1d40 + C\u2081 \u2297 I.\n\nReturns\n-------\ncovariance : ndarray\n    K.", "id": "f13621:c0:m9"}
{"signature": "@property<EOL><INDENT>def B(self):<DEDENT>", "body": "self._terms<EOL>return asarray(self._mean.B, float)<EOL>", "docstring": "Fixed-effect sizes B from \ud835\udc26 = (A \u2297 X) vec(B).\n\nReturns\n-------\nfixed-effects : ndarray\n    B from \ud835\udc26 = (A \u2297 X) vec(B).", "id": "f13621:c0:m4"}
{"signature": "def _lml_gradient(self):", "body": "from scipy.linalg import cho_solve<EOL>terms = self._terms<EOL>dC0 = self._cov.C0.gradient()[\"<STR_LIT>\"]<EOL>dC1 = self._cov.C1.gradient()[\"<STR_LIT>\"]<EOL>b = terms[\"<STR_LIT:b>\"]<EOL>W = terms[\"<STR_LIT>\"]<EOL>Lh = terms[\"<STR_LIT>\"]<EOL>Lz = terms[\"<STR_LIT>\"]<EOL>WA = terms[\"<STR_LIT>\"]<EOL>WL0 = terms[\"<STR_LIT>\"]<EOL>YW = terms[\"<STR_LIT>\"]<EOL>MRiM = terms[\"<STR_LIT>\"]<EOL>MRiy = terms[\"<STR_LIT>\"]<EOL>XRiM = terms[\"<STR_LIT>\"]<EOL>XRiy = terms[\"<STR_LIT>\"]<EOL>ZiXRiM = terms[\"<STR_LIT>\"]<EOL>ZiXRiy = terms[\"<STR_LIT>\"]<EOL>WdC0 = _mdot(W, dC0)<EOL>WdC1 = _mdot(W, dC1)<EOL>AWdC0 = _mdot(WA.T, dC0)<EOL>AWdC1 = _mdot(WA.T, dC1)<EOL>MR0M = _mkron(_mdot(AWdC0, WA), self._XGGX)<EOL>MR1M = _mkron(_mdot(AWdC1, WA), self._XX)<EOL>MR0X = _mkron(_mdot(AWdC0, WL0), self._XGGG)<EOL>MR1X = _mkron(_mdot(AWdC1, WL0), self._GX.T)<EOL>MR0y = vec(_mdot(self._XGGY, _mdot(WdC0, WA)))<EOL>MR1y = vec(_mdot(self._XY, WdC1, WA))<EOL>XR0X = _mkron(_mdot(WL0.T, dC0, WL0), self._GGGG)<EOL>XR1X = _mkron(_mdot(WL0.T, dC1, WL0), self._GG)<EOL>XR0y = vec(_mdot(self._GGGY, WdC0, WL0))<EOL>XR1y = vec(_mdot(self._GY, WdC1, WL0))<EOL>yR0y = vec(_mdot(self._GY, WdC0)).T @ vec(self._GY @ W)<EOL>yR1y = (YW.T * _mdot(self._Y, WdC1).T).T.sum(axis=(<NUM_LIT:0>, <NUM_LIT:1>))<EOL>ZiXR0X = cho_solve(Lz, XR0X)<EOL>ZiXR1X = cho_solve(Lz, XR1X)<EOL>ZiXR0y = cho_solve(Lz, XR0y)<EOL>ZiXR1y = cho_solve(Lz, XR1y)<EOL>MK0y = MR0y - _mdot(XRiM.T, ZiXR0y) - _mdot(MR0X, ZiXRiy)<EOL>MK0y += _mdot(XRiM.T, ZiXR0X, ZiXRiy)<EOL>MK1y = MR1y - _mdot(XRiM.T, ZiXR1y) - _mdot(MR1X, ZiXRiy)<EOL>MK1y += _mdot(XRiM.T, ZiXR1X, ZiXRiy)<EOL>yK0y = yR0y - <NUM_LIT:2> * XR0y.T @ ZiXRiy + ZiXRiy.T @ _mdot(XR0X, ZiXRiy)<EOL>yK1y = yR1y - <NUM_LIT:2> * XR1y.T @ ZiXRiy + ZiXRiy.T @ _mdot(XR1X, ZiXRiy)<EOL>MR0XZiXRiM = _mdot(MR0X, ZiXRiM)<EOL>MK0M = MR0M - MR0XZiXRiM - MR0XZiXRiM.transpose([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:2>])<EOL>MK0M += _mdot(ZiXRiM.T, XR0X, ZiXRiM)<EOL>MR1XZiXRiM = _mdot(MR1X, ZiXRiM)<EOL>MK1M = MR1M - MR1XZiXRiM - MR1XZiXRiM.transpose([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:2>])<EOL>MK1M += _mdot(ZiXRiM.T, XR1X, ZiXRiM)<EOL>MK0m = _mdot(MK0M, b)<EOL>mK0y = b.T @ MK0y<EOL>mK0m = b.T @ MK0m<EOL>MK1m = _mdot(MK1M, b)<EOL>mK1y = b.T @ MK1y<EOL>mK1m = b.T @ MK1m<EOL>XRim = XRiM @ b<EOL>MRim = MRiM @ b<EOL>db = {\"<STR_LIT>\": cho_solve(Lh, MK0m - MK0y), \"<STR_LIT>\": cho_solve(Lh, MK1m - MK1y)}<EOL>grad = {<EOL>\"<STR_LIT>\": -trace(WdC0) * self._trGG + trace(ZiXR0X),<EOL>\"<STR_LIT>\": -trace(WdC1) * self.nsamples + trace(ZiXR1X),<EOL>}<EOL>if self._restricted:<EOL><INDENT>grad[\"<STR_LIT>\"] += cho_solve(Lh, MK0M).diagonal().sum(<NUM_LIT:1>)<EOL>grad[\"<STR_LIT>\"] += cho_solve(Lh, MK1M).diagonal().sum(<NUM_LIT:1>)<EOL><DEDENT>mKiM = MRim.T - XRim.T @ ZiXRiM<EOL>yKiM = MRiy.T - XRiy.T @ ZiXRiM<EOL>grad[\"<STR_LIT>\"] += yK0y - <NUM_LIT:2> * mK0y + mK0m - <NUM_LIT:2> * _mdot(mKiM, db[\"<STR_LIT>\"])<EOL>grad[\"<STR_LIT>\"] += <NUM_LIT:2> * _mdot(yKiM, db[\"<STR_LIT>\"])<EOL>grad[\"<STR_LIT>\"] += yK1y - <NUM_LIT:2> * mK1y + mK1m - <NUM_LIT:2> * _mdot(mKiM, db[\"<STR_LIT>\"])<EOL>grad[\"<STR_LIT>\"] += <NUM_LIT:2> * _mdot(yKiM, db[\"<STR_LIT>\"])<EOL>grad[\"<STR_LIT>\"] /= <NUM_LIT:2><EOL>grad[\"<STR_LIT>\"] /= <NUM_LIT:2><EOL>return grad<EOL>", "docstring": "Gradient of the log of the marginal likelihood.\n\nLet \ud835\udc32 = vec(Y), \ud835\udd42 = K\u207b\u00b9\u2202(K)K\u207b\u00b9, and H = M\u1d40K\u207b\u00b9M. The gradient is given by::\n\n    2\u22c5\u2202log(p(\ud835\udc32)) = -tr(K\u207b\u00b9\u2202K) - tr(H\u207b\u00b9\u2202H) + \ud835\udc32\u1d40\ud835\udd42\ud835\udc32 - \ud835\udc26\u1d40\ud835\udd42(2\u22c5\ud835\udc32-\ud835\udc26)\n        - 2\u22c5(\ud835\udc26-\ud835\udc32)\u1d40K\u207b\u00b9\u2202(\ud835\udc26).\n\nObserve that\n\n    \u2202\ud835\udec3 = -H\u207b\u00b9(\u2202H)\ud835\udec3 - H\u207b\u00b9M\u1d40\ud835\udd42\ud835\udc32 and \u2202H = -M\u1d40\ud835\udd42M.\n\nLet Z = I + X\u1d40R\u207b\u00b9X and \ud835\udce1 = R\u207b\u00b9(\u2202K)R\u207b\u00b9. We use Woodbury matrix identity to\nwrite ::\n\n    \ud835\udc32\u1d40\ud835\udd42\ud835\udc32 = \ud835\udc32\u1d40\ud835\udce1\ud835\udc32 - 2(\ud835\udc32\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc32) + (\ud835\udc32\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc32)\n    M\u1d40\ud835\udd42M = M\u1d40\ud835\udce1M - 2(M\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9M) + (M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9M)\n    M\u1d40\ud835\udd42\ud835\udc32 = M\u1d40\ud835\udce1\ud835\udc32 - (M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40\ud835\udce1\ud835\udc32) - (M\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc32)\n          + (M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40\ud835\udce1X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9\ud835\udc32)\n    H\u207b\u00b9   = M\u1d40R\u207b\u00b9M - (M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9M),\n\nwhere we have used parentheses to separate expressions\nthat we will compute separately. For example, we have ::\n\n    \ud835\udc32\u1d40\ud835\udce1\ud835\udc32 = \ud835\udc32\u1d40(U\u2081S\u2081\u207b\u00b9U\u2081\u1d40 \u2297 I)(\u2202C\u2080 \u2297 GG\u1d40)(U\u2081S\u2081\u207b\u00b9U\u2081\u1d40 \u2297 I)\ud835\udc32\n          = \ud835\udc32\u1d40(U\u2081S\u2081\u207b\u00b9U\u2081\u1d40\u2202C\u2080 \u2297 G)(U\u2081S\u2081\u207b\u00b9U\u2081\u1d40 \u2297 G\u1d40)\ud835\udc32\n          = vec(G\u1d40YU\u2081S\u2081\u207b\u00b9U\u2081\u1d40\u2202C\u2080)\u1d40vec(G\u1d40YU\u2081S\u2081\u207b\u00b9U\u2081\u1d40),\n\nwhen the derivative is over the parameters of C\u2080. Otherwise, we have\n\n    \ud835\udc32\u1d40\ud835\udce1\ud835\udc32 = vec(YU\u2081S\u2081\u207b\u00b9U\u2081\u1d40\u2202C\u2081)\u1d40vec(YU\u2081S\u2081\u207b\u00b9U\u2081\u1d40).\n\nThe above equations can be more compactly written as\n\n    \ud835\udc32\u1d40\ud835\udce1\ud835\udc32 = vec(E\u1d62\u1d40YW\u2202C\u1d62)\u1d40vec(E\u1d62\u1d40YW),\n\nwhere W = U\u2081S\u2081\u207b\u00b9U\u2081\u1d40, E\u2080 = G, and E\u2081 = I. We will now just state the results for\nthe other instances of the aBc form, which follow similar derivations::\n\n    X\u1d40\ud835\udce1X = (L\u2080\u1d40W\u2202C\u1d62WL\u2080) \u2297 (G\u1d40E\u1d62E\u1d62\u1d40G)\n    M\u1d40\ud835\udce1y = (A\u1d40W\u2202C\u1d62\u2297X\u1d40E\u1d62)vec(E\u1d62\u1d40YW) = vec(X\u1d40E\u1d62E\u1d62\u1d40YW\u2202C\u1d62WA)\n    M\u1d40\ud835\udce1X = A\u1d40W\u2202C\u1d62WL\u2080 \u2297 X\u1d40E\u1d62E\u1d62\u1d40G\n    M\u1d40\ud835\udce1M = A\u1d40W\u2202C\u1d62WA \u2297 X\u1d40E\u1d62E\u1d62\u1d40X\n    X\u1d40\ud835\udce1\ud835\udc32 = G\u1d40E\u1d62E\u1d62\u1d40YW\u2202C\u1d62WL\u2080\n\nFrom Woodbury matrix identity and Kronecker product properties we have ::\n\n    tr(K\u207b\u00b9\u2202K) = tr[W\u2202C\u1d62]tr[E\u1d62E\u1d62\u1d40] - tr[Z\u207b\u00b9(X\u1d40\ud835\udce1X)]\n    tr(H\u207b\u00b9\u2202H) = - tr[(M\u1d40R\u207b\u00b9M)(M\u1d40\ud835\udd42M)] + tr[(M\u1d40R\u207b\u00b9X)Z\u207b\u00b9(X\u1d40R\u207b\u00b9M)(M\u1d40\ud835\udd42M)]\n\nNote also that ::\n\n    \u2202\ud835\udec3 = H\u207b\u00b9M\u1d40\ud835\udd42M\ud835\udec3 - H\u207b\u00b9M\u1d40\ud835\udd42\ud835\udc32.\n\nReturns\n-------\nC0.Lu : ndarray\n    Gradient of the log of the marginal likelihood over C\u2080 parameters.\nC1.Lu : ndarray\n    Gradient of the log of the marginal likelihood over C\u2081 parameters.", "id": "f13621:c0:m32"}
{"signature": "@property<EOL><INDENT>def ncovariates(self):<DEDENT>", "body": "return self._mean.X.shape[<NUM_LIT:1>]<EOL>", "docstring": "Number of covariates, c.", "id": "f13621:c0:m14"}
{"signature": "def L(self):", "body": "from scipy.linalg import cho_factor<EOL>from numpy_sugar.linalg import ddot, sum2diag<EOL>if self._L_cache is not None:<EOL><INDENT>return self._L_cache<EOL><DEDENT>Q = self._cov[\"<STR_LIT>\"][<NUM_LIT:0>][<NUM_LIT:0>]<EOL>S = self._cov[\"<STR_LIT>\"][<NUM_LIT:1>]<EOL>B = dot(Q.T, ddot(self._site.tau, Q, left=True))<EOL>sum2diag(B, <NUM_LIT:1.0> / S, out=B)<EOL>self._L_cache = cho_factor(B, lower=True)[<NUM_LIT:0>]<EOL>return self._L_cache<EOL>", "docstring": "r\"\"\"Cholesky decomposition of :math:`\\mathrm B`.\n\n        .. math::\n\n            \\mathrm B = \\mathrm Q^{\\intercal}\\tilde{\\mathrm{T}}\\mathrm Q\n                + \\mathrm{S}^{-1}", "id": "f13628:c0:m10"}
{"signature": "def _initialize(self):", "body": "if self._mean is None or self._cov is None:<EOL><INDENT>return<EOL><DEDENT>Q = self._cov[\"<STR_LIT>\"][<NUM_LIT:0>][<NUM_LIT:0>]<EOL>S = self._cov[\"<STR_LIT>\"][<NUM_LIT:1>]<EOL>if S.size > <NUM_LIT:0>:<EOL><INDENT>self.tau[:] = <NUM_LIT:1> / npsum((Q * sqrt(S)) ** <NUM_LIT:2>, axis=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>self.tau[:] = <NUM_LIT:0.0><EOL><DEDENT>self.eta[:] = self._mean<EOL>self.eta[:] *= self.tau<EOL>", "docstring": "r\"\"\"Initialize the mean and covariance of the posterior.\n\n        Given that :math:`\\tilde{\\mathrm T}` is a matrix of zeros right before\n        the first EP iteration, we have\n\n        .. math::\n\n            \\boldsymbol\\mu = \\mathrm K^{-1} \\mathbf m ~\\text{ and }~\n            \\Sigma = \\mathrm K\n\n        as the initial posterior mean and covariance.", "id": "f13628:c0:m5"}
{"signature": "def rsolve(A, y):", "body": "from numpy_sugar.linalg import rsolve as _rsolve<EOL>try:<EOL><INDENT>beta = _rsolve(A, y)<EOL><DEDENT>except LinAlgError:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>msg += \"<STR_LIT>\"<EOL>warnings.warn(msg, RuntimeWarning)<EOL>beta = zeros(A.shape[<NUM_LIT:0>])<EOL><DEDENT>return beta<EOL>", "docstring": "Robust solve Ax=y.", "id": "f13632:m1"}
{"signature": "def hsolve(A00, A01, A11, y0, y1):", "body": "from numpy_sugar import epsilon<EOL>n = _norm(A00, A01)<EOL>u0 = A00 - n<EOL>u1 = A01<EOL>nu = _norm(u0, u1)<EOL>with errstate(invalid=\"<STR_LIT:ignore>\", divide=\"<STR_LIT:ignore>\"):<EOL><INDENT>v0 = nan_to_num(u0 / nu)<EOL>v1 = nan_to_num(u1 / nu)<EOL><DEDENT>B00 = <NUM_LIT:1> - <NUM_LIT:2> * v0 * v0<EOL>B01 = <NUM_LIT:0> - <NUM_LIT:2> * v0 * v1<EOL>B11 = <NUM_LIT:1> - <NUM_LIT:2> * v1 * v1<EOL>D00 = B00 * A00 + B01 * A01<EOL>D01 = B00 * A01 + B01 * A11<EOL>D11 = B01 * A01 + B11 * A11<EOL>b0 = y0 - <NUM_LIT:2> * y0 * v0 * v0 - <NUM_LIT:2> * y1 * v0 * v1<EOL>b1 = y1 - <NUM_LIT:2> * y0 * v1 * v0 - <NUM_LIT:2> * y1 * v1 * v1<EOL>n = _norm(D00, D01)<EOL>u0 = D00 - n<EOL>u1 = D01<EOL>nu = _norm(u0, u1)<EOL>with errstate(invalid=\"<STR_LIT:ignore>\", divide=\"<STR_LIT:ignore>\"):<EOL><INDENT>v0 = nan_to_num(u0 / nu)<EOL>v1 = nan_to_num(u1 / nu)<EOL><DEDENT>E00 = <NUM_LIT:1> - <NUM_LIT:2> * v0 * v0<EOL>E01 = <NUM_LIT:0> - <NUM_LIT:2> * v0 * v1<EOL>E11 = <NUM_LIT:1> - <NUM_LIT:2> * v1 * v1<EOL>F00 = E00 * D00 + E01 * D01<EOL>F01 = E01 * D11<EOL>F11 = E11 * D11<EOL>F11 = (npy_abs(F11) > epsilon.small) * F11<EOL>with errstate(divide=\"<STR_LIT:ignore>\", invalid=\"<STR_LIT:ignore>\"):<EOL><INDENT>Fi00 = nan_to_num(F00 / F00 / F00)<EOL>Fi11 = nan_to_num(F11 / F11 / F11)<EOL>Fi10 = nan_to_num(-(F01 / F00) * Fi11)<EOL><DEDENT>c0 = Fi00 * b0<EOL>c1 = Fi10 * b0 + Fi11 * b1<EOL>x0 = E00 * c0 + E01 * c1<EOL>x1 = E01 * c0 + E11 * c1<EOL>return array([x0, x1])<EOL>", "docstring": "Solver for the linear equations of two variables and equations only.\n\nIt uses Householder reductions to solve A\ud835\udc31 = \ud835\udc32 in a robust manner.\n\nParameters\n----------\nA : array_like\n    Coefficient matrix.\ny : array_like\n    Ordinate values.\n\nReturns\n-------\nndarray\n    Solution \ud835\udc31.", "id": "f13632:m7"}
{"signature": "def value(self):", "body": "from numpy_sugar.linalg import ddot, sum2diag<EOL>if self._cache[\"<STR_LIT:value>\"] is not None:<EOL><INDENT>return self._cache[\"<STR_LIT:value>\"]<EOL><DEDENT>scale = exp(self.logscale)<EOL>delta = <NUM_LIT:1> / (<NUM_LIT:1> + exp(-self.logitdelta))<EOL>v0 = scale * (<NUM_LIT:1> - delta)<EOL>v1 = scale * delta<EOL>mu = self.eta / self.tau<EOL>n = len(mu)<EOL>if self._QS is None:<EOL><INDENT>K = zeros((n, n))<EOL><DEDENT>else:<EOL><INDENT>Q0 = self._QS[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>S0 = self._QS[<NUM_LIT:1>]<EOL>K = dot(ddot(Q0, S0), Q0.T)<EOL><DEDENT>A = sum2diag(sum2diag(v0 * K, v1), <NUM_LIT:1> / self.tau)<EOL>m = mu - self.mean()<EOL>v = -n * log(<NUM_LIT:2> * pi)<EOL>v -= slogdet(A)[<NUM_LIT:1>]<EOL>v -= dot(m, solve(A, m))<EOL>self._cache[\"<STR_LIT:value>\"] = v / <NUM_LIT:2><EOL>return self._cache[\"<STR_LIT:value>\"]<EOL>", "docstring": "r\"\"\"Log of the marginal likelihood.\n\n        Formally,\n\n        .. math::\n\n            - \\frac{n}{2}\\log{2\\pi} - \\frac{1}{2} \\log{\\left|\n                v_0 \\mathrm K + v_1 \\mathrm I + \\tilde{\\Sigma} \\right|}\n                    - \\frac{1}{2}\n                    \\left(\\tilde{\\boldsymbol\\mu} -\n                    \\mathrm X\\boldsymbol\\beta\\right)^{\\intercal}\n                    \\left( v_0 \\mathrm K + v_1 \\mathrm I +\n                    \\tilde{\\Sigma} \\right)^{-1}\n                    \\left(\\tilde{\\boldsymbol\\mu} -\n                    \\mathrm X\\boldsymbol\\beta\\right)\n\n        Returns\n        -------\n        float\n            :math:`\\log{p(\\tilde{\\boldsymbol\\mu})}`", "id": "f13643:c0:m16"}
{"signature": "def posteriori_mean(self):", "body": "from numpy_sugar.linalg import rsolve<EOL>Sigma = self.posteriori_covariance()<EOL>eta = self._ep._posterior.eta<EOL>return dot(Sigma, eta + rsolve(GLMM.covariance(self), self.mean()))<EOL>", "docstring": "r\"\"\" Mean of the estimated posteriori.\n\n        This is also the maximum a posteriori estimation of the latent variable.", "id": "f13646:c0:m15"}
{"signature": "@property<EOL><INDENT>def v1(self):<DEDENT>", "body": "return self.scale * self.delta<EOL>", "docstring": "r\"\"\"Second variance.\n\n        Returns\n        -------\n        float\n            :math:`v_1 = s \\delta`", "id": "f13646:c0:m23"}
{"signature": "def lml(self):", "body": "return self.value()<EOL>", "docstring": "r\"\"\"Log of the marginal likelihood.\n\n        Returns\n        -------\n        float\n            :math:`\\log p(\\mathbf y)`", "id": "f13646:c0:m10"}
{"signature": "def covariance(self):", "body": "from numpy_sugar.linalg import ddot, sum2diag<EOL>Q0 = self._QS[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>S0 = self._QS[<NUM_LIT:1>]<EOL>return sum2diag(dot(ddot(Q0, self.v0 * S0), Q0.T), self.v1)<EOL>", "docstring": "r\"\"\"Covariance of the prior.\n\n        Returns\n        -------\n        :class:`numpy.ndarray`\n            :math:`v_0 \\mathrm K + v_1 \\mathrm I`.", "id": "f13646:c0:m5"}
{"signature": "@property<EOL><INDENT>def beta(self):<DEDENT>", "body": "return asarray(self._variables.get(\"<STR_LIT>\").value, float)<EOL>", "docstring": "r\"\"\"Fixed-effect sizes.\n\n        Returns\n        -------\n        :class:`numpy.ndarray`\n            :math:`\\boldsymbol\\beta`.", "id": "f13646:c0:m2"}
{"signature": "def mean(self):", "body": "return dot(self._X, self.beta)<EOL>", "docstring": "r\"\"\"Mean of the prior.\n\n        Returns\n        -------\n        :class:`numpy.ndarray`\n            :math:`\\mathrm X\\boldsymbol\\beta`.", "id": "f13646:c0:m17"}
{"signature": "def gradient(self):", "body": "self._update_approx()<EOL>g = self._ep.lml_derivatives(self._X)<EOL>ed = exp(-self.logitdelta)<EOL>es = exp(self.logscale)<EOL>grad = dict()<EOL>grad[\"<STR_LIT>\"] = g[\"<STR_LIT>\"] * (ed / (<NUM_LIT:1> + ed)) / (<NUM_LIT:1> + ed)<EOL>grad[\"<STR_LIT>\"] = g[\"<STR_LIT>\"] * es<EOL>grad[\"<STR_LIT>\"] = g[\"<STR_LIT>\"]<EOL>return grad<EOL>", "docstring": "r\"\"\"Gradient of the log of the marginal likelihood.\n\n        Returns\n        -------\n        dict\n            Map between variables to their gradient values.", "id": "f13648:c0:m10"}
{"signature": "def mean(self, x):", "body": "return x<EOL>", "docstring": "r\"\"\"Outcome mean.", "id": "f13654:c0:m4"}
{"signature": "def mean(self, x):", "body": "return self._link.inv(x)<EOL>", "docstring": "r\"\"\"Mean of the number of successful trials.", "id": "f13654:c2:m5"}
{"signature": "@property<EOL><INDENT>def outcome(self):<DEDENT>", "body": "return self._outcome<EOL>", "docstring": "r\"\"\"Get or set an array of outcomes.", "id": "f13654:c0:m2"}
{"signature": "@property<EOL><INDENT>def name(self):<DEDENT>", "body": "return \"<STR_LIT>\"<EOL>", "docstring": "r\"\"\"Get the name of this likelihood.", "id": "f13654:c3:m1"}
{"signature": "@property<EOL><INDENT>def noccurrences(self):<DEDENT>", "body": "return self._noccurrences<EOL>", "docstring": "r\"\"\"Get or set an array of number of occurrences.", "id": "f13654:c3:m2"}
{"signature": "@property<EOL><INDENT>def name(self):<DEDENT>", "body": "return \"<STR_LIT>\"<EOL>", "docstring": "r\"\"\"Get the name of this likelihood.", "id": "f13654:c2:m1"}
{"signature": "@property<EOL><INDENT>def sample_size(self):<DEDENT>", "body": "return len(self.nsuccesses)<EOL>", "docstring": "r\"\"\"Get the number of samples.", "id": "f13654:c2:m7"}
{"signature": "@property<EOL><INDENT>def sample_size(self):<DEDENT>", "body": "return len(self.outcome)<EOL>", "docstring": "r\"\"\"Get the number of samples.", "id": "f13654:c1:m6"}
{"signature": "def _call_multi_fortran_z_attr(self, names, data_types, num_elems,<EOL>entry_nums, attr_nums, var_names,<EOL>input_type_code, func, data_offset=None):", "body": "<EOL>idx, = np.where(data_types == input_type_code)<EOL>if len(idx) > <NUM_LIT:0>:<EOL><INDENT>max_num = num_elems[idx].max()<EOL>sub_num_elems = num_elems[idx]<EOL>sub_names = np.array(names)[idx]<EOL>sub_var_names = np.array(var_names)[idx]<EOL>sub_entry_nums = entry_nums[idx]<EOL>sub_attr_nums = attr_nums[idx]<EOL>status, data = func(self.fname, sub_attr_nums, sub_entry_nums,<EOL>len(sub_attr_nums), max_num, len(self.fname))<EOL>if (status == <NUM_LIT:0>).all():<EOL><INDENT>if data_offset is not None:<EOL><INDENT>data = data.astype(int)<EOL>idx, idy, = np.where(data < <NUM_LIT:0>)<EOL>data[idx, idy] += data_offset<EOL><DEDENT>self._process_return_multi_z_attr(data, sub_names,<EOL>sub_var_names, sub_num_elems)<EOL><DEDENT>else:<EOL><INDENT>idx, = np.where(status != <NUM_LIT:0>)<EOL>raise IOError(fortran_cdf.statusreporter(status[idx][<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>", "docstring": "Calls Fortran function that reads attribute data.\n\n        data_offset translates unsigned into signed.\n        If number read in is negative, offset added.", "id": "f13658:c0:m11"}
{"signature": "def load_all_variables(self):", "body": "self.data = {}<EOL>file_var_names = self.z_variable_info.keys()<EOL>dim_sizes = []<EOL>rec_nums = []<EOL>data_types = []<EOL>names = []<EOL>for i, name in enumerate(file_var_names):<EOL><INDENT>dim_sizes.extend(self.z_variable_info[name]['<STR_LIT>'])<EOL>rec_nums.append(self.z_variable_info[name]['<STR_LIT>'])<EOL>data_types.append(self.z_variable_info[name]['<STR_LIT>'])<EOL>names.append(name.ljust(<NUM_LIT>))<EOL><DEDENT>dim_sizes = np.array(dim_sizes)<EOL>rec_nums = np.array(rec_nums)<EOL>data_types = np.array(data_types)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_real4)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT:float>'],<EOL>fortran_cdf.get_multi_z_real4)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_real8)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_real8)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_int4)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_int4,<EOL>data_offset=<NUM_LIT:2> ** <NUM_LIT:32>)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_int2)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_int2,<EOL>data_offset=<NUM_LIT:2> ** <NUM_LIT:16>)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_int1)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_int1,<EOL>data_offset=<NUM_LIT:2> ** <NUM_LIT:8>)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_int1)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_real8,<EOL>epoch=True)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, <NUM_LIT:2> * dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_epoch16,<EOL>epoch16=True)<EOL>self._call_multi_fortran_z(names, data_types, rec_nums, dim_sizes,<EOL>self.cdf_data_types['<STR_LIT>'],<EOL>fortran_cdf.get_multi_z_tt2000,<EOL>epoch=True)<EOL>self.data_loaded = True<EOL>", "docstring": "Loads all variables from CDF.\n\n        Note this routine is called automatically\n        upon instantiation.", "id": "f13658:c0:m6"}
{"signature": "def _read_all_attribute_info(self):", "body": "num = copy.deepcopy(self._num_attrs)<EOL>fname = copy.deepcopy(self.fname)<EOL>out = fortran_cdf.inquire_all_attr(fname, num, len(fname))<EOL>status = out[<NUM_LIT:0>]<EOL>names = out[<NUM_LIT:1>].astype('<STR_LIT>')<EOL>scopes = out[<NUM_LIT:2>]<EOL>max_gentries = out[<NUM_LIT:3>]<EOL>max_rentries = out[<NUM_LIT:4>]<EOL>max_zentries = out[<NUM_LIT:5>]<EOL>attr_nums = out[<NUM_LIT:6>]<EOL>global_attrs_info = {}<EOL>var_attrs_info = {}<EOL>if status == <NUM_LIT:0>:<EOL><INDENT>for name, scope, gentry, rentry, zentry, num in zip(names, scopes, max_gentries,<EOL>max_rentries, max_zentries,<EOL>attr_nums):<EOL><INDENT>name = '<STR_LIT>'.join(name)<EOL>name = name.rstrip()<EOL>nug = {}<EOL>nug['<STR_LIT>'] = scope<EOL>nug['<STR_LIT>'] = gentry<EOL>nug['<STR_LIT>'] = rentry<EOL>nug['<STR_LIT>'] = zentry<EOL>nug['<STR_LIT>'] = num<EOL>flag = (gentry == <NUM_LIT:0>) & (rentry == <NUM_LIT:0>) & (zentry == <NUM_LIT:0>)<EOL>if not flag:<EOL><INDENT>if scope == <NUM_LIT:1>:<EOL><INDENT>global_attrs_info[name] = nug<EOL><DEDENT>elif scope == <NUM_LIT:2>:<EOL><INDENT>var_attrs_info[name] = nug<EOL><DEDENT><DEDENT><DEDENT>self.global_attrs_info = global_attrs_info<EOL>self.var_attrs_info = var_attrs_info<EOL><DEDENT>else:<EOL><INDENT>raise IOError(fortran_cdf.statusreporter(status))<EOL><DEDENT>", "docstring": "Read all attribute properties, g, r, and z attributes", "id": "f13658:c0:m9"}
{"signature": "def inquire(self):", "body": "name = copy.deepcopy(self.fname)<EOL>stats = fortran_cdf.inquire(name)<EOL>status = stats[<NUM_LIT:0>]<EOL>if status == <NUM_LIT:0>:<EOL><INDENT>self._num_dims = stats[<NUM_LIT:1>]<EOL>self._dim_sizes = stats[<NUM_LIT:2>]<EOL>self._encoding = stats[<NUM_LIT:3>]<EOL>self._majority = stats[<NUM_LIT:4>]<EOL>self._max_rec = stats[<NUM_LIT:5>]<EOL>self._num_r_vars = stats[<NUM_LIT:6>]<EOL>self._num_z_vars = stats[<NUM_LIT:7>]<EOL>self._num_attrs = stats[<NUM_LIT:8>]<EOL><DEDENT>else:<EOL><INDENT>raise IOError(fortran_cdf.statusreporter(status))<EOL><DEDENT>", "docstring": "Maps to fortran CDF_Inquire.\n\n        Assigns parameters returned by CDF_Inquire\n        to pysatCDF instance. Not intended\n        for regular direct use by user.", "id": "f13658:c0:m4"}
{"signature": "def _read_all_z_variable_info(self):", "body": "self.z_variable_info = {}<EOL>self.z_variable_names_by_num = {}<EOL>info = fortran_cdf.z_var_all_inquire(self.fname, self._num_z_vars,<EOL>len(self.fname))<EOL>status = info[<NUM_LIT:0>]<EOL>data_types = info[<NUM_LIT:1>]<EOL>num_elems = info[<NUM_LIT:2>]<EOL>rec_varys = info[<NUM_LIT:3>]<EOL>dim_varys = info[<NUM_LIT:4>]<EOL>num_dims = info[<NUM_LIT:5>]<EOL>dim_sizes = info[<NUM_LIT:6>]<EOL>rec_nums = info[<NUM_LIT:7>]<EOL>var_nums = info[<NUM_LIT:8>]<EOL>var_names = info[<NUM_LIT:9>]<EOL>if status == <NUM_LIT:0>:<EOL><INDENT>for i in np.arange(len(data_types)):<EOL><INDENT>out = {}<EOL>out['<STR_LIT>'] = data_types[i]<EOL>out['<STR_LIT>'] = num_elems[i]<EOL>out['<STR_LIT>'] = rec_varys[i]<EOL>out['<STR_LIT>'] = dim_varys[i]<EOL>out['<STR_LIT>'] = num_dims[i]<EOL>out['<STR_LIT>'] = dim_sizes[i, :<NUM_LIT:1>]<EOL>if out['<STR_LIT>'][<NUM_LIT:0>] == <NUM_LIT:0>:<EOL><INDENT>out['<STR_LIT>'][<NUM_LIT:0>] += <NUM_LIT:1><EOL><DEDENT>out['<STR_LIT>'] = rec_nums[i]<EOL>out['<STR_LIT>'] = var_nums[i]<EOL>var_name = '<STR_LIT>'.join(var_names[i].astype('<STR_LIT>'))<EOL>out['<STR_LIT>'] = var_name.rstrip()<EOL>self.z_variable_info[out['<STR_LIT>']] = out<EOL>self.z_variable_names_by_num[out['<STR_LIT>']] = var_name<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise IOError(fortran_cdf.statusreporter(status))<EOL><DEDENT>", "docstring": "Gets all CDF z-variable information, not data though.\n\n        Maps to calls using var_inquire. Gets information on\n        data type, number of elements, number of dimensions, etc.", "id": "f13658:c0:m5"}
{"signature": "def _process_return_multi_z_attr(self, data, attr_names, var_names, sub_num_elems):", "body": "<EOL>for i, (attr_name, var_name, num_e) in enumerate(zip(attr_names, var_names, sub_num_elems)):<EOL><INDENT>if var_name not in self.meta.keys():<EOL><INDENT>self.meta[var_name] = {}<EOL><DEDENT>if num_e == <NUM_LIT:1>:<EOL><INDENT>self.meta[var_name][attr_name] = data[i, <NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>if data[i].dtype == '<STR_LIT>':<EOL><INDENT>self.meta[var_name][attr_name] = '<STR_LIT>'.join(data[i, <NUM_LIT:0>:num_e].astype('<STR_LIT>')).rstrip()<EOL><DEDENT>else:<EOL><INDENT>self.meta[var_name][attr_name] = data[i, <NUM_LIT:0>:num_e]<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "process and attach data from fortran_cdf.get_multi_*", "id": "f13658:c0:m12"}
{"signature": "def teardown(self):", "body": "<EOL>", "docstring": "Runs after every method to clean up previous testing.", "id": "f13659:c0:m1"}
{"signature": "def setup(self):", "body": "<EOL>", "docstring": "Runs before every method to create a clean testing setup.", "id": "f13659:c0:m0"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "kwargs['<STR_LIT>'] = Bucket<EOL>super(S3Connection, self).__init__(*args, **kwargs)<EOL>", "docstring": "Set the base class for bucket objects created in the connection to\n        the MimicDB bucket class.", "id": "f13661:c0:m0"}
{"signature": "def _get_file_internal(self, *args, **kwargs):", "body": "super(Key, self)._get_file_internal(*args, **kwargs)<EOL>mimicdb.backend.sadd(tpl.bucket % self.bucket.name, self.name)<EOL>mimicdb.backend.hmset(tpl.key % (self.bucket.name, self.name),<EOL>dict(size=self.size, md5=self.md5))<EOL>", "docstring": "Called internally for any type of download. After download finishes,\n        make sure the key is in the bucket set and save the metadata.", "id": "f13662:c0:m5"}
{"signature": "@name.setter<EOL><INDENT>def name(self, value):<DEDENT>", "body": "self._name = value<EOL>if value:<EOL><INDENT>meta = mimicdb.backend.hgetall(tpl.key % (self.bucket.name, value))<EOL>if meta:<EOL><INDENT>mimicdb.backend.sadd(tpl.bucket % self.bucket.name, value)<EOL>self._load_meta(meta['<STR_LIT:size>'], meta['<STR_LIT>'])<EOL><DEDENT><DEDENT>", "docstring": "Key name can be set by Key.key or Key.name. Key.key sets Key.name\n        internally, so just handle this property. When changing the key\n        name, try to load it's metadata from MimicDB. If it's not available,\n        the key hasn't been uploaded, downloaded or synced so don't add it to\n        the bucket set (it also might have just been deleted,\n        see boto.s3.bucket.py#785)", "id": "f13662:c0:m3"}
{"signature": "def _load_meta(self, size, md5):", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.local_hashes = {}<EOL><DEDENT>self.size = int(size)<EOL>if (re.match('<STR_LIT>', md5)):<EOL><INDENT>self.md5 = md5<EOL><DEDENT>", "docstring": "Set key attributes to retrived metadata. Might be extended in the\n        future to support more attributes.", "id": "f13662:c0:m1"}
{"signature": "def list(self, *args, **kwargs):", "body": "if kwargs.pop('<STR_LIT>', None):<EOL><INDENT>headers = kwargs.get('<STR_LIT>', args[<NUM_LIT:4>] if len(args) > <NUM_LIT:4> else None) or dict()<EOL>headers['<STR_LIT>'] = True<EOL>kwargs['<STR_LIT>'] = headers<EOL>for key in super(Bucket, self).list(*args, **kwargs):<EOL><INDENT>yield key<EOL><DEDENT><DEDENT>else:<EOL><INDENT>prefix = kwargs.get('<STR_LIT>', args[<NUM_LIT:0>] if args else '<STR_LIT>')<EOL>for key in mimicdb.backend.smembers(tpl.bucket % self.name):<EOL><INDENT>if key.startswith(prefix):<EOL><INDENT>k = Key(self, key)<EOL>meta = mimicdb.backend.hgetall(tpl.key % (self.name, key))<EOL>if meta:<EOL><INDENT>k._load_meta(meta['<STR_LIT:size>'], meta['<STR_LIT>'])<EOL><DEDENT>yield k<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Return an iterable of keys from MimicDB.\n\n        :param boolean force: If true, API call is forced to S3", "id": "f13663:c0:m7"}
{"signature": "def _get_all(self, *args, **kwargs):", "body": "headers = kwargs.get('<STR_LIT>', args[<NUM_LIT:2>] if len(args) > <NUM_LIT:2> else None) or dict()<EOL>if '<STR_LIT>' in headers:<EOL><INDENT>keys = super(Bucket, self)._get_all(*args, **kwargs)<EOL>for key in keys:<EOL><INDENT>mimicdb.backend.sadd(tpl.bucket % self.name, key.name)<EOL>mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('<STR_LIT:\">')))<EOL>key.name = key.name<EOL><DEDENT>return keys<EOL><DEDENT>prefix = kwargs.get('<STR_LIT>', '<STR_LIT>')<EOL>return list(self.list(prefix=prefix))<EOL>", "docstring": "If 'force' is in the headers, retrieve the list of keys from S3.\n        Otherwise, use the list() function to retrieve the keys from MimicDB.", "id": "f13663:c0:m8"}
{"signature": "def delete_keys(self, *args, **kwargs):", "body": "ikeys = iter(kwargs.get('<STR_LIT>', args[<NUM_LIT:0>] if args else []))<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>key = ikeys.next()<EOL><DEDENT>except StopIteration:<EOL><INDENT>break<EOL><DEDENT>if isinstance(key, basestring):<EOL><INDENT>mimicdb.backend.srem(tpl.bucket % self.name, key)<EOL>mimicdb.backend.delete(tpl.key % (self.name, key))<EOL><DEDENT>elif isinstance(key, BotoKey) or isinstance(key, Key):<EOL><INDENT>mimicdb.backend.srem(tpl.bucket % self.name, key.name)<EOL>mimicdb.backend.delete(tpl.key % (self.name, key.name))<EOL><DEDENT><DEDENT>return super(Bucket, self).delete_keys(*args, **kwargs)<EOL>", "docstring": "Remove each key or key name in an iterable from the bucket set.", "id": "f13663:c0:m5"}
{"signature": "def clean(self):", "body": "self.sessions = []<EOL>", "docstring": "Clear all GenePattern sessions from the sessions list\n:return:", "id": "f13676:c0:m2"}
{"signature": "def _get_index(self, server_url):", "body": "for i in range(len(self.sessions)):<EOL><INDENT>session = self.sessions[i]<EOL>if session.url == server_url:<EOL><INDENT>return i<EOL><DEDENT><DEDENT>return -<NUM_LIT:1><EOL>", "docstring": "Returns a registered GPServer object with a matching GenePattern server url\nReturns -1 if no matching result was found\n:param server_url:\n:return:", "id": "f13676:c0:m3"}
{"signature": "def get(self, server):", "body": "<EOL>if isinstance(server, int):<EOL><INDENT>if server >= len(self.sessions):<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return self.sessions[server]<EOL><DEDENT><DEDENT>index = self._get_index(server)<EOL>if index == -<NUM_LIT:1>:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return self.sessions[index]<EOL><DEDENT>", "docstring": "Returns a registered GPServer object with a matching GenePattern server url or index\nReturns None if no matching result was found\n:param server:\n:return:", "id": "f13676:c0:m1"}
{"signature": "def display(content):", "body": "if isinstance(content, gp.GPServer):<EOL><INDENT>IPython.display.display(GPAuthWidget(content))<EOL><DEDENT>elif isinstance(content, gp.GPTask):<EOL><INDENT>IPython.display.display(GPTaskWidget(content))<EOL><DEDENT>elif isinstance(content, gp.GPJob):<EOL><INDENT>IPython.display.display(GPJobWidget(content))<EOL><DEDENT>else:<EOL><INDENT>IPython.display.display(content)<EOL><DEDENT>", "docstring": "Display a widget, text or other media in a notebook without the need to import IPython at the top level.\n\nAlso handles wrapping GenePattern Python Library content in widgets.\n:param content:\n:return:", "id": "f13676:m2"}
{"signature": "def get_number_of_app_ports(app):", "body": "mode = _get_networking_mode(app)<EOL>ports_list = None<EOL>if mode == '<STR_LIT:host>':<EOL><INDENT>ports_list = _get_port_definitions(app)<EOL><DEDENT>elif mode == '<STR_LIT>':<EOL><INDENT>ports_list = _get_port_definitions(app)<EOL>if ports_list is None:<EOL><INDENT>ports_list = _get_container_port_mappings(app)<EOL><DEDENT><DEDENT>elif mode == '<STR_LIT>':<EOL><INDENT>ports_list = _get_ip_address_discovery_ports(app)<EOL>if not ports_list:<EOL><INDENT>ports_list = _get_container_port_mappings(app)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\".format(mode))<EOL><DEDENT>return len(ports_list)<EOL>", "docstring": "Get the number of ports for the given app JSON. This roughly follows the\nlogic in marathon-lb for finding app IPs/ports, although we are only\ninterested in the quantity of ports an app should have and don't consider\nthe specific IPs/ports of individual tasks:\nhttps://github.com/mesosphere/marathon-lb/blob/v1.10.3/utils.py#L393-L415\n\n:param app: The app JSON from the Marathon API.\n:return: The number of ports for the app.", "id": "f13680:m0"}
{"signature": "def check_signalled_usr1(self):", "body": "was_signalled, self._signalled_usr1 = self._signalled_usr1, False<EOL>return was_signalled<EOL>", "docstring": "Check and reset the ``_signalled_usr1`` flag.", "id": "f13681:c2:m2"}
{"signature": "def check_signalled_hup(self):", "body": "was_signalled, self._signalled_hup = self._signalled_hup, False<EOL>return was_signalled<EOL>", "docstring": "Check and reset the ``_signalled_hup`` flag.", "id": "f13681:c2:m1"}
{"signature": "def get_kv_data(self, path):", "body": "return self._kv_data.get(path)<EOL>", "docstring": "Read KV data at the given path. Returns the data and metadata.", "id": "f13683:c0:m1"}
{"signature": "def read_request_json(request):", "body": "return json.loads(request.content.read().decode('<STR_LIT:utf-8>'))<EOL>", "docstring": "Read the body of a request and decode it as JSON. The counterpart to\n``marathon_acme.server.write_request_json`` but only used in tests.", "id": "f13690:m0"}
{"signature": "def _parse_marathon_event_timestamp(timestamp):", "body": "return datetime.strptime(timestamp, '<STR_LIT>')<EOL>", "docstring": "Parse Marathon's ISO8601-like timestamps into a datetime.", "id": "f13691:m4"}
{"signature": "def IsMarathonEvent(event_type, **kwargs):", "body": "matching_dict = {<EOL>'<STR_LIT>': Equals(event_type),<EOL>'<STR_LIT>': After(_parse_marathon_event_timestamp,<EOL>matches_time_or_just_before(datetime.utcnow()))<EOL>}<EOL>matching_dict.update(kwargs)<EOL>return MatchesDict(matching_dict)<EOL>", "docstring": "Match a dict (deserialized from JSON) as a Marathon event. Matches the\nevent type and checks for a recent timestamp.\n\n:param event_type: The event type ('eventType' field value)\n:param kwargs: Any other matchers to apply to the dict", "id": "f13691:m3"}
{"signature": "def when_finished(self):", "body": "d = Deferred()<EOL>self._waiting.append(d)<EOL>return d<EOL>", "docstring": "Get a deferred that will be fired when the connection is closed.", "id": "f13695:c0:m5"}
{"signature": "def _parse_field_value(line):", "body": "if line.startswith('<STR_LIT::>'):<EOL><INDENT>return None, None<EOL><DEDENT>if '<STR_LIT::>' not in line:<EOL><INDENT>return line, '<STR_LIT>'<EOL><DEDENT>field, value = line.split('<STR_LIT::>', <NUM_LIT:1>)<EOL>value = value[<NUM_LIT:1>:] if value.startswith('<STR_LIT:U+0020>') else value<EOL>return field, value<EOL>", "docstring": "Parse the field and value from a line.", "id": "f13695:m0"}
{"signature": "def __init__(<EOL>self, handler, max_length=MAX_LENGTH, timeout=None, reactor=None):", "body": "self._handler = handler<EOL>self._max_length = max_length<EOL>self._timeout = timeout<EOL>if reactor is None:<EOL><INDENT>from twisted.internet import reactor as _reactor<EOL>reactor = _reactor<EOL><DEDENT>self._reactor = reactor<EOL>self._waiting = []<EOL>self._buffer = b'<STR_LIT>'<EOL>self._reset_event_data()<EOL>", "docstring": ":param handler:\n    A 2-args callable that will be called back with the event and data\n    when a complete message is received.\n:param int max_length:\n    The maximum length in bytes of a single line in an SSE event that\n    will be accepted.\n:param float timeout:\n    Amount of time in seconds to wait for some data to be received\n    before timing out. (Default: None - no timeout).\n:param reactor:\n    Reactor to use to timeout the connection.", "id": "f13695:c0:m0"}
{"signature": "def init_logging(log_level):", "body": "log_level_filter = LogLevelFilterPredicate(<EOL>LogLevel.levelWithName(log_level))<EOL>log_level_filter.setLogLevelForNamespace(<EOL>'<STR_LIT>', LogLevel.warn)<EOL>log_observer = FilteringLogObserver(<EOL>textFileLogObserver(sys.stdout), [log_level_filter])<EOL>globalLogPublisher.addObserver(log_observer)<EOL>", "docstring": "Initialise the logging by adding an observer to the global log publisher.\n\n:param str log_level: The minimum log level to log messages for.", "id": "f13696:m6"}
{"signature": "@app.route('<STR_LIT>', branch=True, methods=['<STR_LIT:GET>'])<EOL><INDENT>def acme_challenge(self, request):<DEDENT>", "body": "return self.responder_resource<EOL>", "docstring": "Respond to ACME challenge validation requests on\n``/.well-known/acme-challenge/`` using the ACME responder resource.", "id": "f13697:c0:m2"}
{"signature": "@app.route('<STR_LIT>', methods=['<STR_LIT:GET>'])<EOL><INDENT>def acme_challenge_ping(self, request):<DEDENT>", "body": "request.setResponseCode(OK)<EOL>write_request_json(request, {'<STR_LIT:message>': '<STR_LIT>'})<EOL>", "docstring": "Respond to requests on ``/.well-known/acme-challenge/ping`` to debug\npath routing issues.", "id": "f13697:c0:m3"}
{"signature": "def get_single_header(headers, key):", "body": "raw_headers = headers.getRawHeaders(key)<EOL>if raw_headers is None:<EOL><INDENT>return None<EOL><DEDENT>header, _ = cgi.parse_header(raw_headers[-<NUM_LIT:1>])<EOL>return header<EOL>", "docstring": "Get a single value for the given key out of the given set of headers.\n\n:param twisted.web.http_headers.Headers headers:\n    The set of headers in which to look for the header value\n:param str key:\n    The header key", "id": "f13698:m0"}
{"signature": "def _compose_url(self, url, kwargs):", "body": "if url is None:<EOL><INDENT>url = self.url<EOL><DEDENT>if url is None:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>split_result = urisplit(url)<EOL>userinfo = split_result.userinfo<EOL>compose_kwargs = {}<EOL>for key in ['<STR_LIT>', '<STR_LIT:host>', '<STR_LIT:port>', '<STR_LIT:path>', '<STR_LIT>']:<EOL><INDENT>if key in kwargs:<EOL><INDENT>compose_kwargs[key] = kwargs.pop(key)<EOL><DEDENT>else:<EOL><INDENT>compose_kwargs[key] = getattr(split_result, key)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in kwargs:<EOL><INDENT>compose_kwargs['<STR_LIT>'] = kwargs.pop('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>compose_kwargs['<STR_LIT>'] = split_result.query<EOL><DEDENT>if '<STR_LIT>' not in kwargs and userinfo is not None:<EOL><INDENT>kwargs['<STR_LIT>'] = tuple(userinfo.split('<STR_LIT::>', <NUM_LIT:2>))<EOL><DEDENT>return uricompose(**compose_kwargs)<EOL>", "docstring": "Compose a URL starting with the given URL (or self.url if that URL is\nNone) and using the values in kwargs.\n\n:param str url:\n    The base URL to use. If None, ``self.url`` will be used instead.\n:param dict kwargs:\n    A dictionary of values to override in the base URL. Relevant keys\n    will be popped from the dictionary.", "id": "f13698:c0:m3"}
{"signature": "def __init__(self, url=None, client=None, timeout=DEFAULT_TIMEOUT,<EOL>reactor=None):", "body": "self.url = url<EOL>self._timeout = timeout<EOL>self._client, self._reactor = default_client(reactor, client)<EOL>", "docstring": "Create a client with the specified default URL.", "id": "f13698:c0:m0"}
{"signature": "def request(self, method, url=None, **kwargs):", "body": "url = self._compose_url(url, kwargs)<EOL>kwargs.setdefault('<STR_LIT>', self._timeout)<EOL>d = self._client.request(method, url, reactor=self._reactor, **kwargs)<EOL>d.addCallback(self._log_request_response, method, url, kwargs)<EOL>d.addErrback(self._log_request_error, url)<EOL>return d<EOL>", "docstring": "Perform a request.\n\n:param: method:\n    The HTTP method to use (example is `GET`).\n:param: url:\n    The URL to use. The default value is the URL this client was\n    created with (`self.url`) (example is `http://localhost:8080`)\n:param: kwargs:\n    Any other parameters that will be passed to `treq.request`, for\n    example headers. Or any URL parameters to override, for example\n    path, query or fragment.", "id": "f13698:c0:m4"}
{"signature": "def HasRequestProperties(method, url, query=None):", "body": "if query is None:<EOL><INDENT>query = {}<EOL><DEDENT>return MatchesStructure(<EOL>method=Equals(method.encode('<STR_LIT:ascii>')),<EOL>path=Equals(url.encode('<STR_LIT:ascii>')),<EOL>uri=After(lambda u: urisplit(u).getquerydict(), Equals(query))<EOL>)<EOL>", "docstring": "Check if a HTTP request object has certain properties.\n\nParses the query dict from the request URI rather than using the request\n\"args\" property as the args do not include query parameters that have no\nvalue.\n\n:param str method:\n    The HTTP method.\n:param str url:\n    The HTTP URL, without any query parameters. Should already be percent\n    encoded.\n:param dict query:\n    A dictionary of HTTP query parameters.", "id": "f13702:m0"}
{"signature": "def add_agent(self, location, agent):", "body": "self.agents[location] = agent<EOL>", "docstring": "Add an agent for URIs with the specified location.\n:param bytes location:\n    The URI authority/location (e.g. b'example.com:80')\n:param agent: The twisted.web.iweb.IAgent to use for the location", "id": "f13704:c1:m1"}
{"signature": "@classmethod<EOL><INDENT>def from_env(cls, reactor=None, env=os.environ):<DEDENT>", "body": "address = env.get('<STR_LIT>', '<STR_LIT>')<EOL>token = env.get('<STR_LIT>', '<STR_LIT>')<EOL>ca_cert = env.get('<STR_LIT>')<EOL>tls_server_name = env.get('<STR_LIT>')<EOL>client_cert = env.get('<STR_LIT>')<EOL>client_key = env.get('<STR_LIT>')<EOL>cf = ClientPolicyForHTTPS.from_pem_files(<EOL>caKey=ca_cert, privateKey=client_key, certKey=client_cert,<EOL>tls_server_name=tls_server_name<EOL>)<EOL>client, reactor = default_client(reactor, contextFactory=cf)<EOL>return cls(address, token, client=client, reactor=reactor)<EOL>", "docstring": "Create a Vault client with configuration from the environment. Supports\na limited number of the available config options:\nhttps://www.vaultproject.io/docs/commands/index.html#environment-variables\nhttps://github.com/hashicorp/vault/blob/v0.11.3/api/client.go#L28-L40\n\nSupported:\n- ``VAULT_ADDR``\n- ``VAULT_CACERT``\n- ``VAULT_CLIENT_CERT``\n- ``VAULT_CLIENT_KEY``\n- ``VAULT_TLS_SERVER_NAME``\n- ``VAULT_TOKEN``\n\nNot currently supported:\n- ``VAULT_CAPATH``\n- ``VAULT_CLIENT_TIMEOUT``\n- ``VAULT_MAX_RETRIES``\n- ``VAULT_MFA``\n- ``VAULT_RATE_LIMIT``\n- ``VAULT_SKIP_VERIFY``\n- ``VAULT_WRAP_TTL``", "id": "f13708:c2:m1"}
{"signature": "def read_kv2(self, path, version=None, mount_path='<STR_LIT>'):", "body": "params = {}<EOL>if version is not None:<EOL><INDENT>params['<STR_LIT:version>'] = version<EOL><DEDENT>read_path = '<STR_LIT>'.format(mount_path, path)<EOL>return self.read(read_path, **params)<EOL>", "docstring": "Read some data from a key/value version 2 secret engine.", "id": "f13708:c2:m7"}
{"signature": "def create_or_update_kv2(self, path, data, cas=None, mount_path='<STR_LIT>'):", "body": "params = {<EOL>'<STR_LIT>': {},<EOL>'<STR_LIT:data>': data<EOL>}<EOL>if cas is not None:<EOL><INDENT>params['<STR_LIT>']['<STR_LIT>'] = cas<EOL><DEDENT>write_path = '<STR_LIT>'.format(mount_path, path)<EOL>return self.write(write_path, **params)<EOL>", "docstring": "Create or update some data in a key/value version 2 secret engine.\n\n:raises CasError:\n    Raises an error if the ``cas`` value, when provided, doesn't match\n    Vault's version for the key.", "id": "f13708:c2:m8"}
{"signature": "def write(self, path, **data):", "body": "d = self.request('<STR_LIT>', '<STR_LIT>' + path, json=data)<EOL>return d.addCallback(self._handle_response, check_cas=True)<EOL>", "docstring": "Write data to Vault. Returns the JSON-decoded response.", "id": "f13708:c2:m6"}
{"signature": "def read(self, path, **params):", "body": "d = self.request('<STR_LIT:GET>', '<STR_LIT>' + path, params=params)<EOL>return d.addCallback(self._handle_response)<EOL>", "docstring": "Read data from Vault. Returns the JSON-decoded response.", "id": "f13708:c2:m5"}
{"signature": "def __init__(self, url, token, *args, **kwargs):", "body": "super(VaultClient, self).__init__(*args, url=url, **kwargs)<EOL>self._token = token<EOL>", "docstring": ":param url: the URL for Vault\n:param token: the Vault auth token", "id": "f13708:c2:m0"}
{"signature": "def get_apps(self):", "body": "return self.get_json_field('<STR_LIT>', path='<STR_LIT>')<EOL>", "docstring": "Get the currently running Marathon apps, returning a list of app\ndefinitions.", "id": "f13710:c0:m6"}
{"signature": "def sse_content(response, handler, **sse_kwargs):", "body": "<EOL>raise_for_not_ok_status(response)<EOL>raise_for_header(response, '<STR_LIT:Content-Type>', '<STR_LIT>')<EOL>finished, _ = _sse_content_with_protocol(response, handler, **sse_kwargs)<EOL>return finished<EOL>", "docstring": "Callback to collect the Server-Sent Events content of a response. Callbacks\npassed will receive event data.\n\n:param response:\n    The response from the SSE request.\n:param handler:\n    The handler for the SSE protocol.", "id": "f13710:m2"}
{"signature": "def raise_for_not_ok_status(response):", "body": "if response.code != OK:<EOL><INDENT>raise HTTPError('<STR_LIT>' % (<EOL>response.code, uridecode(response.request.absoluteURI)))<EOL><DEDENT>return response<EOL>", "docstring": "Raises a `requests.exceptions.HTTPError` if the response has a non-200\nstatus code.", "id": "f13710:m0"}
{"signature": "def _cert_data_to_pem_objects(cert_data):", "body": "pem_objects = []<EOL>for key in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>pem_objects.extend(pem.parse(cert_data[key].encode('<STR_LIT:utf-8>')))<EOL><DEDENT>return pem_objects<EOL>", "docstring": "Given a non-None response from the Vault key/value store, convert the\nkey/values into a list of PEM objects.", "id": "f13712:m3"}
{"signature": "def generate_wildcard_pem_bytes():", "body": "key = generate_private_key(u'<STR_LIT>')<EOL>name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, u'<STR_LIT:*>')])<EOL>cert = (<EOL>x509.CertificateBuilder()<EOL>.issuer_name(name)<EOL>.subject_name(name)<EOL>.not_valid_before(datetime.today() - timedelta(days=<NUM_LIT:1>))<EOL>.not_valid_after(datetime.now() + timedelta(days=<NUM_LIT>))<EOL>.serial_number(int(uuid.uuid4()))<EOL>.public_key(key.public_key())<EOL>.sign(<EOL>private_key=key,<EOL>algorithm=hashes.SHA256(),<EOL>backend=default_backend())<EOL>)<EOL>return b'<STR_LIT>'.join((<EOL>key.private_bytes(<EOL>encoding=serialization.Encoding.PEM,<EOL>format=serialization.PrivateFormat.TraditionalOpenSSL,<EOL>encryption_algorithm=serialization.NoEncryption()),<EOL>cert.public_bytes(serialization.Encoding.PEM)<EOL>))<EOL>", "docstring": "Generate a wildcard (subject name '*') self-signed certificate valid for\n10 years.\n\nhttps://cryptography.io/en/latest/x509/tutorial/#creating-a-self-signed-certificate\n\n:return: Bytes representation of the PEM certificate data", "id": "f13714:m5"}
{"signature": "def maybe_key(pem_path):", "body": "acme_key_file = pem_path.child(u'<STR_LIT>')<EOL>if acme_key_file.exists():<EOL><INDENT>key = _load_pem_private_key_bytes(acme_key_file.getContent())<EOL><DEDENT>else:<EOL><INDENT>key = generate_private_key(u'<STR_LIT>')<EOL>acme_key_file.setContent(_dump_pem_private_key_bytes(key))<EOL><DEDENT>return succeed(JWKRSA(key=key))<EOL>", "docstring": "Set up a client key if one does not exist already.\n\nhttps://gist.github.com/glyph/27867a478bb71d8b6046fbfb176e1a33#file-local-certs-py-L32-L50\n\n:type pem_path: twisted.python.filepath.FilePath\n:param pem_path:\n    The path to the certificate directory to use.\n:rtype: twisted.internet.defer.Deferred", "id": "f13714:m2"}
{"signature": "def _issue_cert(self, domain):", "body": "def errback(failure):<EOL><INDENT>failure.trap(txacme_ServerError)<EOL>acme_error = failure.value.message<EOL>if acme_error.code in ['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>self.log.error(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', code=acme_error.code, domain=domain,<EOL>detail=acme_error.detail)<EOL><DEDENT>else:<EOL><INDENT>return failure<EOL><DEDENT><DEDENT>d = self.txacme_service.issue_cert(domain)<EOL>return d.addErrback(errback)<EOL>", "docstring": "Issue a certificate for the given domain.", "id": "f13715:c0:m11"}
{"signature": "def __init__(self, marathon_client, group, cert_store, mlb_client,<EOL>txacme_client_creator, reactor, email=None,<EOL>allow_multiple_certs=False):", "body": "self.marathon_client = marathon_client<EOL>self.group = group<EOL>self.reactor = reactor<EOL>responder = HTTP01Responder()<EOL>self.server = MarathonAcmeServer(responder.resource)<EOL>mlb_cert_store = MlbCertificateStore(cert_store, mlb_client)<EOL>self.txacme_service = AcmeIssuingService(<EOL>mlb_cert_store, txacme_client_creator, reactor, [responder], email)<EOL>self._allow_multiple_certs = allow_multiple_certs<EOL>self._server_listening = None<EOL>", "docstring": "Create the marathon-acme service.\n\n:param marathon_client: The Marathon API client.\n:param group: The name of the marathon-lb group.\n:param cert_store: The ``ICertificateStore`` instance to use.\n:param mlb_clinet: The marathon-lb API client.\n:param txacme_client_creator: Callable to create the txacme client.\n:param reactor: The reactor to use.\n:param email: The ACME registration email.\n:param allow_multiple_certs:\n    Whether to allow multiple certificates per app port.", "id": "f13715:c0:m0"}
{"signature": "def print_file_info():", "body": "tpl = TableLogger(columns='<STR_LIT>')<EOL>for f in os.listdir('<STR_LIT:.>'):<EOL><INDENT>size = os.stat(f).st_size<EOL>date_created = datetime.fromtimestamp(os.path.getctime(f))<EOL>date_modified = datetime.fromtimestamp(os.path.getmtime(f))<EOL>tpl(f, date_created, date_modified, size)<EOL><DEDENT>", "docstring": "Prints file details in the current directory", "id": "f13717:m2"}
{"signature": "def __call__(self, value):", "body": "fmt = self.fmt(value)<EOL>if len(fmt) > self.col_width:<EOL><INDENT>fmt = fmt[:self.col_width - <NUM_LIT:3>] + '<STR_LIT>'<EOL><DEDENT>fmt = self.just(fmt, self.col_width)<EOL>return fmt<EOL>", "docstring": "Formats a given value\n\nArgs:\n    value: value to format\n\nReturns:\n    str: formatted value", "id": "f13718:c0:m1"}
{"signature": "def csv_format(self, row):", "body": "if PY2:<EOL><INDENT>buf = io.BytesIO()<EOL>csvwriter = csv.writer(buf)<EOL>csvwriter.writerow([c.strip().encode(self.encoding) for c in row])<EOL>csv_line = buf.getvalue().decode(self.encoding).rstrip()<EOL><DEDENT>else:<EOL><INDENT>buf = io.StringIO()<EOL>csvwriter = csv.writer(buf)<EOL>csvwriter.writerow([c.strip() for c in row])<EOL>csv_line = buf.getvalue().rstrip()<EOL><DEDENT>return csv_line<EOL>", "docstring": "Converts row values into a csv line\n\n        Args:\n            row: a list of row cells as unicode\n        Returns:\n            csv_line (unicode)", "id": "f13719:c0:m10"}
{"signature": "def __call__(self, *args):", "body": "if len(self.formatters) == <NUM_LIT:0>:<EOL><INDENT>self.setup(*args)<EOL><DEDENT>row_cells = []<EOL>if self.rownum:<EOL><INDENT>row_cells.append(<NUM_LIT:0>)<EOL><DEDENT>if self.timestamp:<EOL><INDENT>row_cells.append(datetime.datetime.now())<EOL><DEDENT>if self.time_diff:<EOL><INDENT>row_cells.append(<NUM_LIT:0>)<EOL><DEDENT>row_cells.extend(args)<EOL>if len(row_cells) != len(self.formatters):<EOL><INDENT>raise ValueError('<STR_LIT>'.format(<EOL>len(self.formatters), len(row_cells)))<EOL><DEDENT>line = self.format_row(*row_cells)<EOL>self.print_line(line)<EOL>", "docstring": "Prints a formatted row\n\n        Args:\n            args: row cells", "id": "f13719:c0:m1"}
{"signature": "def setup_formatters(self, *args):", "body": "formatters = []<EOL>col_offset = <NUM_LIT:0><EOL>if self.rownum:<EOL><INDENT>formatters.append(fmt.RowNumberFormatter.setup(<NUM_LIT:0>))<EOL>col_offset += <NUM_LIT:1><EOL><DEDENT>if self.timestamp:<EOL><INDENT>formatters.append(fmt.DatetimeFormatter.setup(<EOL>datetime.datetime.now(),<EOL>fmt='<STR_LIT>'.format,<EOL>col_width=<NUM_LIT>))<EOL>col_offset += <NUM_LIT:1><EOL><DEDENT>if self.time_diff:<EOL><INDENT>formatters.append(fmt.TimeDeltaFormatter.setup(<NUM_LIT:0>))<EOL>col_offset += <NUM_LIT:1><EOL><DEDENT>for coli, value in enumerate(args):<EOL><INDENT>fmt_class = type2fmt.get(type(value), fmt.GenericFormatter)<EOL>kwargs = {}<EOL>if self.default_colwidth is not None:<EOL><INDENT>kwargs['<STR_LIT>'] = self.default_colwidth<EOL><DEDENT>if coli in self.column_widths:<EOL><INDENT>kwargs['<STR_LIT>'] = self.column_widths[coli]<EOL><DEDENT>elif self.columns and self.columns[coli + col_offset] in self.column_widths:<EOL><INDENT>kwargs['<STR_LIT>'] = self.column_widths[self.columns[coli + col_offset]]<EOL><DEDENT>if fmt_class == fmt.FloatFormatter and self.float_format is not None:<EOL><INDENT>kwargs['<STR_LIT>'] = self.float_format<EOL><DEDENT>if coli in self.column_formatters:<EOL><INDENT>kwargs['<STR_LIT>'] = self.column_formatters[coli]<EOL><DEDENT>elif self.columns and self.columns[coli + col_offset] in self.column_formatters:<EOL><INDENT>kwargs['<STR_LIT>'] = self.column_formatters[self.columns[coli + col_offset]]<EOL><DEDENT>formatter = fmt_class.setup(value, **kwargs)<EOL>formatters.append(formatter)<EOL><DEDENT>self.formatters = formatters<EOL>", "docstring": "Setup formatters by observing the first row.\n\n        Args:\n            *args: row cells", "id": "f13719:c0:m4"}
{"signature": "def strToBool(val):", "body": "if isinstance(val, str):<EOL><INDENT>val = val.lower()<EOL><DEDENT>return val in ['<STR_LIT:true>', '<STR_LIT>', '<STR_LIT:yes>', True]<EOL>", "docstring": "Helper function to turn a string representation of \"true\" into\nboolean True.", "id": "f13724:m1"}
{"signature": "@register.tag<EOL>def bootstrap_pager(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) < <NUM_LIT:2>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>page = parser.compile_filter(bits[<NUM_LIT:1>])<EOL>kwargs = {}<EOL>bits = bits[<NUM_LIT:2>:]<EOL>kwarg_re = re.compile(r'<STR_LIT>')<EOL>if len(bits):<EOL><INDENT>for bit in bits:<EOL><INDENT>match = kwarg_re.match(bit)<EOL>if not match:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\")<EOL><DEDENT>name, value = match.groups()<EOL>kwargs[name] = parser.compile_filter(value)<EOL><DEDENT><DEDENT>return BootstrapPagerNode(page, kwargs)<EOL>", "docstring": "Renders a Page object as a Twitter Bootstrap styled pager bar.\nCompatible with Bootstrap 2.x and 3.x only.\n\nExample::\n\n    {% bootstrap_pager page_obj %}\n\n\nNamed Parameters::\n\n\n    previous_label - The label to show for the Previous link (defaults to \"Previous Page\")\n\n    next_label - The label to show for the Next link (defualts to \"Next Page\")\n\n    previous_title - The link title for the previous link (defaults to \"Previous Page\")\n\n    next_title - The link title for the next link (defaults to \"Next Page\")\n\n    url_view_name - The named URL to use. Defaults to None. If None, then the\n                    default template simply appends the url parameter as a\n                    relative URL link, eg: <a href=\"?page=1\">1</a>\n\n    url_param_name - The name of the parameter to use in the URL. If\n                     url_view_name is set to None, this string is used as the\n                     parameter name in the relative URL path. If a URL\n                     name is specified, this string is used as the\n                     parameter name passed into the reverse() method for\n                     the URL.\n\n    url_extra_args - This is used only in conjunction with url_view_name.\n                    When referencing a URL, additional arguments may be\n                    passed in as a list.\n\n    url_extra_kwargs - This is used only in conjunction with url_view_name.\n                       When referencing a URL, additional named arguments\n                       may be passed in as a dictionary.\n\n    url_get_params - The other get parameters to pass, only the page\n                     number will be overwritten. Use this to preserve\n                     filters.\n\n    url_anchor - The anchor to use in URLs. Defaults to None.\n\n    extra_pager_classes - A space separated list of CSS class names\n                          that will be added to the top level <ul>\n                          HTML element. This could be  used to,\n                          as an example, add a class to prevent\n                          the pager from showing up when printing.", "id": "f13724:m4"}
{"signature": "def get_page_url(page_num, current_app, url_view_name, url_extra_args, url_extra_kwargs, url_param_name, url_get_params, url_anchor):", "body": "if url_view_name is not None:<EOL><INDENT>url_extra_kwargs[url_param_name] = page_num<EOL>try:<EOL><INDENT>url = reverse(url_view_name, args=url_extra_args, kwargs=url_extra_kwargs, current_app=current_app)<EOL><DEDENT>except NoReverseMatch as e:  <EOL><INDENT>if settings.SETTINGS_MODULE:<EOL><INDENT>if django.VERSION < (<NUM_LIT:1>, <NUM_LIT:9>, <NUM_LIT:0>):<EOL><INDENT>separator  = '<STR_LIT:.>'<EOL><DEDENT>else:<EOL><INDENT>separator  = '<STR_LIT::>' <EOL><DEDENT>project_name = settings.SETTINGS_MODULE.split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>url = reverse(project_name + separator + url_view_name, args=url_extra_args, kwargs=url_extra_kwargs, current_app=current_app)<EOL><DEDENT>except NoReverseMatch:<EOL><INDENT>raise e <EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise e <EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>url = '<STR_LIT>'<EOL>url_get_params = url_get_params or QueryDict(url)<EOL>url_get_params = url_get_params.copy()<EOL>url_get_params[url_param_name] = str(page_num)<EOL><DEDENT>if len(url_get_params) > <NUM_LIT:0>:<EOL><INDENT>if not isinstance(url_get_params, QueryDict):<EOL><INDENT>tmp = QueryDict(mutable=True)<EOL>tmp.update(url_get_params)<EOL>url_get_params = tmp<EOL><DEDENT>url += '<STR_LIT:?>' + url_get_params.urlencode()<EOL><DEDENT>if (url_anchor is not None):<EOL><INDENT>url += '<STR_LIT:#>' + url_anchor<EOL><DEDENT>return url<EOL>", "docstring": "Helper function to return a valid URL string given the template tag parameters", "id": "f13724:m2"}
{"signature": "@register.tag<EOL>def bootstrap_paginate(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) < <NUM_LIT:2>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>page = parser.compile_filter(bits[<NUM_LIT:1>])<EOL>kwargs = {}<EOL>bits = bits[<NUM_LIT:2>:]<EOL>kwarg_re = re.compile(r'<STR_LIT>')<EOL>if len(bits):<EOL><INDENT>for bit in bits:<EOL><INDENT>match = kwarg_re.match(bit)<EOL>if not match:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\")<EOL><DEDENT>name, value = match.groups()<EOL>kwargs[name] = parser.compile_filter(value)<EOL><DEDENT><DEDENT>return BootstrapPaginationNode(page, kwargs)<EOL>", "docstring": "Renders a Page object as a Twitter Bootstrap styled pagination bar.\nCompatible with Bootstrap 3.x and 4.x only.\n\nExample::\n\n    {% bootstrap_paginate page_obj range=10 %}\n\n\nNamed Parameters::\n\n    range - The size of the pagination bar (ie, if set to 10 then, at most,\n            10 page numbers will display at any given time) Defaults to\n            None, which shows all pages.\n\n\n    size - Accepts \"small\", and \"large\". Defaults to\n                None which is the standard size.\n\n    show_prev_next - Accepts \"true\" or \"false\". Determines whether or not\n                    to show the previous and next page links. Defaults to\n                    \"true\"\n\n\n    show_first_last - Accepts \"true\" or \"false\". Determines whether or not\n                      to show the first and last page links. Defaults to\n                      \"false\"\n\n    previous_label - The text to display for the previous page link.\n                     Defaults to \"&larr;\"\n\n    next_label - The text to display for the next page link. Defaults to\n                 \"&rarr;\"\n\n    first_label - The text to display for the first page link. Defaults to\n                  \"&laquo;\"\n\n    last_label - The text to display for the last page link. Defaults to\n                 \"&raquo;\"\n\n    url_view_name - The named URL to use. Defaults to None. If None, then the\n                    default template simply appends the url parameter as a\n                    relative URL link, eg: <a href=\"?page=1\">1</a>\n\n    url_param_name - The name of the parameter to use in the URL. If\n                     url_view_name is set to None, this string is used as the\n                     parameter name in the relative URL path. If a URL\n                     name is specified, this string is used as the\n                     parameter name passed into the reverse() method for\n                     the URL.\n\n    url_extra_args - This is used only in conjunction with url_view_name.\n                     When referencing a URL, additional arguments may be\n                     passed in as a list.\n\n    url_extra_kwargs - This is used only in conjunction with url_view_name.\n                       When referencing a URL, additional named arguments\n                       may be passed in as a dictionary.\n\n    url_get_params - The other get parameters to pass, only the page\n                     number will be overwritten. Use this to preserve\n                     filters.\n\n    url_anchor - The anchor to use in URLs. Defaults to None.\n\n    extra_pagination_classes - A space separated list of CSS class names\n                               that will be added to the top level <ul>\n                               HTML element. In particular, this can be\n                               utilized in Bootstrap 4 installatinos  to\n                               add the appropriate alignment classes from\n                               Flexbox utilites, eg:  justify-content-center", "id": "f13724:m3"}
{"signature": "@property<EOL><INDENT>def is_branch(self):<DEDENT>", "body": "return self.semantics in ('<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "True if the instruction is a jump", "id": "f13733:c4:m32"}
{"signature": "@property<EOL><INDENT>def bytes(self):<DEDENT>", "body": "b = [bytes([self._opcode])]<EOL>for offset in reversed(range(self.operand_size)):<EOL><INDENT>b.append(bytes([(self.operand >> offset * <NUM_LIT:8>) & <NUM_LIT>]))<EOL><DEDENT>return b'<STR_LIT>'.join(b)<EOL>", "docstring": "Encoded instruction", "id": "f13733:c4:m18"}
{"signature": "def assemble_one(asmcode, pc=<NUM_LIT:0>, fork=DEFAULT_FORK):", "body": "try:<EOL><INDENT>instruction_table = instruction_tables[fork]<EOL>asmcode = asmcode.strip().split('<STR_LIT:U+0020>')<EOL>instr = instruction_table[asmcode[<NUM_LIT:0>].upper()]<EOL>if pc:<EOL><INDENT>instr.pc = pc<EOL><DEDENT>if instr.operand_size > <NUM_LIT:0>:<EOL><INDENT>assert len(asmcode) == <NUM_LIT:2><EOL>instr.operand = int(asmcode[<NUM_LIT:1>], <NUM_LIT:0>)<EOL><DEDENT>return instr<EOL><DEDENT>except:<EOL><INDENT>raise AssembleError(\"<STR_LIT>\" % pc)<EOL><DEDENT>", "docstring": "Assemble one EVM instruction from its textual representation.\n\n        :param asmcode: assembly code for one instruction\n        :type asmcode: str\n        :param pc: program counter of the instruction(optional)\n        :type pc: int\n        :param fork: fork name (optional)\n        :type fork: str\n        :return: An Instruction object\n        :rtype: Instruction\n\n        Example use::\n\n            >>> print assemble_one('LT')", "id": "f13733:m0"}
{"signature": "def __eq__(self, other):", "body": "return self._opcode == other._opcode andself._name == other._name andself._operand == other._operand andself._operand_size == other._operand_size andself._pops == other._pops andself._pushes == other._pushes andself._fee == other._fee andself._pc == other._pc andself._description == other._description<EOL>", "docstring": "Instructions are equal if all features match", "id": "f13733:c4:m1"}
{"signature": "def assemble_all(asmcode, pc=<NUM_LIT:0>, fork=DEFAULT_FORK):", "body": "asmcode = asmcode.split('<STR_LIT:\\n>')<EOL>asmcode = iter(asmcode)<EOL>for line in asmcode:<EOL><INDENT>if not line.strip():<EOL><INDENT>continue<EOL><DEDENT>instr = assemble_one(line, pc=pc, fork=fork)<EOL>yield instr<EOL>pc += instr.size<EOL><DEDENT>", "docstring": "Assemble a sequence of textual representation of EVM instructions\n\n        :param asmcode: assembly code for any number of instructions\n        :type asmcode: str\n        :param pc: program counter of the first instruction(optional)\n        :type pc: int\n        :param fork: fork name (optional)\n        :type fork: str\n        :return: An generator of Instruction objects\n        :rtype: generator[Instructions]\n\n        Example use::\n\n            >>> assemble_one('''PUSH1 0x60\\n \\\n                            PUSH1 0x40\\n \\\n                            MSTORE\\n \\\n                            PUSH1 0x2\\n \\\n                            PUSH2 0x108\\n \\\n                            PUSH1 0x0\\n \\\n                            POP\\n \\\n                            SSTORE\\n \\\n                            PUSH1 0x40\\n \\\n                            MLOAD\\n \\\n                            ''')", "id": "f13733:m1"}
{"signature": "@property<EOL><INDENT>def is_endtx(self):<DEDENT>", "body": "return self.semantics in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "True if the instruction is a transaction terminator", "id": "f13733:c4:m30"}
{"signature": "@property<EOL><INDENT>def is_starttx(self):<DEDENT>", "body": "return self.semantics in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "True if the instruction is a transaction initiator", "id": "f13733:c4:m31"}
{"signature": "@property<EOL><INDENT>def operand_size(self):<DEDENT>", "body": "return self._operand_size<EOL>", "docstring": "The immediate operand size", "id": "f13733:c4:m8"}
{"signature": "@property<EOL><INDENT>def is_environmental(self):<DEDENT>", "body": "return self.group == '<STR_LIT>'<EOL>", "docstring": "True if the instruction access enviromental data", "id": "f13733:c4:m33"}
{"signature": "@property<EOL><INDENT>def has_operand(self):<DEDENT>", "body": "return self.operand_size > <NUM_LIT:0><EOL>", "docstring": "True if the instruction uses an immediate operand", "id": "f13733:c4:m9"}
{"signature": "@property<EOL><INDENT>def pushes(self):<DEDENT>", "body": "return self._pushes<EOL>", "docstring": "Number words pushed to the stack", "id": "f13733:c4:m13"}
{"signature": "@property<EOL><INDENT>def is_terminator(self):<DEDENT>", "body": "return self.semantics in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "True if the instruction is a basic block terminator", "id": "f13733:c4:m29"}
{"signature": "@property<EOL><INDENT>def name(self):<DEDENT>", "body": "if self._name == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>' % self.operand_size<EOL><DEDENT>elif self._name == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>' % self.pops<EOL><DEDENT>elif self._name == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>' % (self.pops - <NUM_LIT:1>)<EOL><DEDENT>elif self._name == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>' % (self.pops - <NUM_LIT:2>)<EOL><DEDENT>return self._name<EOL>", "docstring": "The instruction name/mnemonic", "id": "f13733:c4:m6"}
{"signature": "@property<EOL><INDENT>def opcode(self):<DEDENT>", "body": "return self._opcode<EOL>", "docstring": "The opcode as an integer", "id": "f13733:c4:m4"}
{"signature": "@property<EOL><INDENT>def mnemonic(self):<DEDENT>", "body": "return self.name<EOL>", "docstring": "Alias for name", "id": "f13733:c4:m5"}
{"signature": "@property<EOL><INDENT>def is_arithmetic(self):<DEDENT>", "body": "return self.semantics in (<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "True if the instruction is an arithmetic operation", "id": "f13733:c4:m36"}
{"signature": "@property<EOL><INDENT>def reads_from_storage(self):<DEDENT>", "body": "return self.semantics in '<STR_LIT>'<EOL>", "docstring": "True if the instruction reads from the storage", "id": "f13733:c4:m28"}
{"signature": "@property<EOL><INDENT>def writes_to_memory(self):<DEDENT>", "body": "return self.semantics in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "True if the instruction writes to memory", "id": "f13733:c4:m25"}
{"signature": "def parse_operand(self, buf):", "body": "buf = iter(buf)<EOL>try:<EOL><INDENT>operand = <NUM_LIT:0><EOL>for _ in range(self.operand_size):<EOL><INDENT>operand <<= <NUM_LIT:8><EOL>operand |= next(buf)<EOL><DEDENT>self._operand = operand<EOL><DEDENT>except StopIteration:<EOL><INDENT>raise ParseError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Parses an operand from buf\n\n            :param buf: a buffer\n            :type buf: iterator/generator/string", "id": "f13733:c4:m7"}
{"signature": "def assemble_hex(asmcode, pc=<NUM_LIT:0>, fork=DEFAULT_FORK):", "body": "if isinstance(asmcode, list):<EOL><INDENT>return '<STR_LIT>' + hexlify(b'<STR_LIT>'.join([x.bytes for x in asmcode])).decode('<STR_LIT:ascii>')<EOL><DEDENT>return '<STR_LIT>' + hexlify(assemble(asmcode, pc=pc, fork=fork)).decode('<STR_LIT:ascii>')<EOL>", "docstring": "Assemble an EVM program\n\n        :param asmcode: an evm assembler program\n        :type asmcode: str | iterator[Instruction]\n        :param pc: program counter of the first instruction(optional)\n        :type pc: int\n        :param fork: fork name (optional)\n        :type fork: str\n        :return: the hex representation of the bytecode\n        :rtype: str\n\n        Example use::\n\n            >>> assemble_hex('''PUSH1 0x60\\n \\\n                                       BLOCKHASH\\n \\\n                                       MSTORE\\n \\\n                                       PUSH1 0x2\\n \\\n                                       PUSH2 0x100\\n \\\n                                    ''')\n            ...\n            \"0x6060604052600261010\"", "id": "f13733:m7"}
{"signature": "def disassemble_hex(bytecode, pc=<NUM_LIT:0>, fork=DEFAULT_FORK):", "body": "if bytecode.startswith('<STR_LIT>'):<EOL><INDENT>bytecode = bytecode[<NUM_LIT:2>:]<EOL><DEDENT>bytecode = unhexlify(bytecode)<EOL>return disassemble(bytecode, pc=pc, fork=fork)<EOL>", "docstring": "Disassemble an EVM bytecode\n\n        :param bytecode: canonical representation of an evm bytecode (hexadecimal)\n        :type bytecode: str\n        :param pc: program counter of the first instruction(optional)\n        :type pc: int\n        :param fork: fork name (optional)\n        :type fork: str\n        :return: the text representation of the assembler code\n        :rtype: str\n\n        Example use::\n\n            >>> disassemble_hex(\"0x6060604052600261010\")\n            ...\n            PUSH1 0x60\n            BLOCKHASH\n            MSTORE\n            PUSH1 0x2\n            PUSH2 0x100", "id": "f13733:m6"}
{"signature": "@property<EOL><INDENT>def writes_to_stack(self):<DEDENT>", "body": "return self.pushes > <NUM_LIT:0><EOL>", "docstring": "True if the instruction writes to the stack", "id": "f13733:c4:m24"}
{"signature": "def _shell(cmd, check=True, stdin=None, stdout=None, stderr=None):  ", "body": "return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr)<EOL>", "docstring": "Runs a subprocess shell with check=True by default", "id": "f13737:m0"}
{"signature": "def _get_current_branch():", "body": "result = temple.utils.shell('<STR_LIT>', stdout=subprocess.PIPE)<EOL>return result.stdout.decode('<STR_LIT:utf8>').strip()<EOL>", "docstring": "Determine the current git branch", "id": "f13738:m0"}
{"signature": "def _apply_template(template, target, *, checkout, extra_context):", "body": "with tempfile.TemporaryDirectory() as tempdir:<EOL><INDENT>repo_dir = cc_main.cookiecutter(<EOL>template,<EOL>checkout=checkout,<EOL>no_input=True,<EOL>output_dir=tempdir,<EOL>extra_context=extra_context)<EOL>for item in os.listdir(repo_dir):<EOL><INDENT>src = os.path.join(repo_dir, item)<EOL>dst = os.path.join(target, item)<EOL>if os.path.isdir(src):<EOL><INDENT>if os.path.exists(dst):<EOL><INDENT>shutil.rmtree(dst)<EOL><DEDENT>shutil.copytree(src, dst)<EOL><DEDENT>else:<EOL><INDENT>if os.path.exists(dst):<EOL><INDENT>os.remove(dst)<EOL><DEDENT>shutil.copy2(src, dst)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Apply a template to a temporary directory and then copy results to target.", "id": "f13748:m4"}
{"signature": "@temple.utils.set_cmd_env_var('<STR_LIT>')<EOL>def up_to_date(version=None):", "body": "temple.check.in_git_repo()<EOL>temple.check.is_temple_project()<EOL>temple_config = temple.utils.read_temple_config()<EOL>old_template_version = temple_config['<STR_LIT>']<EOL>new_template_version = version or _get_latest_template_version(temple_config['<STR_LIT>'])<EOL>return new_template_version == old_template_version<EOL>", "docstring": "Checks if a temple project is up to date with the repo\n\n    Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'update' for the duration of this\n    function.\n\n    Args:\n        version (str, optional): Update against this git SHA or branch of the template\n\n    Returns:\n        boolean: True if up to date with ``version`` (or latest version), False otherwise\n\n    Raises:\n        `NotInGitRepoError`: When running outside of a git repo\n        `InvalidTempleProjectError`: When not inside a valid temple repository", "id": "f13748:m5"}
{"signature": "def _needs_new_cc_config_for_update(old_template, old_version, new_template, new_version):", "body": "if old_template != new_template:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return _cookiecutter_configs_have_changed(new_template,<EOL>old_version,<EOL>new_version)<EOL><DEDENT>", "docstring": "Given two templates and their respective versions, return True if a new cookiecutter\nconfig needs to be obtained from the user", "id": "f13748:m6"}
{"signature": "def _cookiecutter_configs_have_changed(template, old_version, new_version):", "body": "temple.check.is_git_ssh_path(template)<EOL>repo_path = temple.utils.get_repo_path(template)<EOL>github_client = temple.utils.GithubClient()<EOL>api = '<STR_LIT>'.format(repo_path)<EOL>old_config_resp = github_client.get(api, params={'<STR_LIT>': old_version})<EOL>old_config_resp.raise_for_status()<EOL>new_config_resp = github_client.get(api, params={'<STR_LIT>': new_version})<EOL>new_config_resp.raise_for_status()<EOL>return old_config_resp.json()['<STR_LIT:content>'] != new_config_resp.json()['<STR_LIT:content>']<EOL>", "docstring": "Given an old version and new version, check if the cookiecutter.json files have changed\n\n    When the cookiecutter.json files change, it means the user will need to be prompted for\n    new context\n\n    Args:\n        template (str): The git SSH path to the template\n        old_version (str): The git SHA of the old version\n        new_version (str): The git SHA of the new version\n\n    Returns:\n        bool: True if the cookiecutter.json files have been changed in the old and new versions", "id": "f13748:m0"}
{"signature": "@main.command()<EOL>@click.argument('<STR_LIT>', nargs=<NUM_LIT:1>, required=True)<EOL>@click.option('<STR_LIT>', '<STR_LIT>', default=None,<EOL>help='<STR_LIT>')<EOL>def switch(template, version):", "body": "temple.update.update(new_template=template, new_version=version)<EOL>", "docstring": "Switch a project's template to a different template.", "id": "f13749:m5"}
{"signature": "@main.command()<EOL>@click.option('<STR_LIT:-c>', '<STR_LIT>', is_flag=True,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True,<EOL>help='<STR_LIT>')<EOL>@click.option('<STR_LIT>', '<STR_LIT>', default=None,<EOL>help='<STR_LIT>')<EOL>def update(check, enter_parameters, version):", "body": "if check:<EOL><INDENT>if temple.update.up_to_date(version=version):<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>msg = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>raise temple.exceptions.NotUpToDateWithTemplateError(msg)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>temple.update.update(new_version=version, enter_parameters=enter_parameters)<EOL><DEDENT>", "docstring": "Update package with latest template. Must be inside of the project\nfolder to run.\n\nUsing \"-e\" will prompt for re-entering the template parameters again\neven if the project is up to date.\n\nUse \"-v\" to update to a particular version of a template.\n\nUsing \"-c\" will perform a check that the project is up to date\nwith the latest version of the template (or the version specified by \"-v\").\nNo updating will happen when using this option.", "id": "f13749:m2"}
{"signature": "@main.command()<EOL>def clean():", "body": "temple.clean.clean()<EOL>", "docstring": "Cleans temporary resources created by temple, such as the temple update branch", "id": "f13749:m4"}
{"signature": "@main.command()<EOL>@click.argument('<STR_LIT>', nargs=<NUM_LIT:1>, required=True)<EOL>@click.option('<STR_LIT>', '<STR_LIT>', default=None,<EOL>help='<STR_LIT>')<EOL>def setup(template, version):", "body": "temple.setup.setup(template, version=version)<EOL>", "docstring": "Setup new project. Takes a full git SSH path to the template as returned\nby \"temple ls\". In order to start a project from a\nparticular version (instead of the latest), use the \"-v\" option.", "id": "f13749:m1"}
{"signature": "@main.command()<EOL>@click.argument('<STR_LIT>', nargs=<NUM_LIT:1>, required=True)<EOL>@click.argument('<STR_LIT>', nargs=<NUM_LIT:1>, required=False)<EOL>@click.option('<STR_LIT>', '<STR_LIT>', is_flag=True,<EOL>help='<STR_LIT>')<EOL>def ls(github_user, template, long_format):", "body": "github_urls = temple.ls.ls(github_user, template=template)<EOL>for ssh_path, info in github_urls.items():<EOL><INDENT>if long_format:<EOL><INDENT>print(ssh_path, '<STR_LIT:->', info['<STR_LIT:description>'] or '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>print(ssh_path)<EOL><DEDENT><DEDENT>", "docstring": "List packages created with temple. Enter a github user or\norganization to list all templates under the user or org.\nUsing a template path as the second argument will list all projects\nthat have been started with that template.\n\nUse \"-l\" to print the Github repository descriptions of templates\nor projects.", "id": "f13749:m3"}
{"signature": "def is_git_ssh_path(template_path):", "body": "if not template_path.startswith('<STR_LIT>') or not template_path.endswith('<STR_LIT>'):<EOL><INDENT>raise temple.exceptions.InvalidTemplatePathError(<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "Raises a `InvalidTemplatePathError` if ``template_path`` is not a git SSH url\n\n    Note that the git SSH url must be in the form as provided from Github or from\n    ``temple ls``. For example, ``git@github.com:user/template.git``.", "id": "f13751:m0"}
{"signature": "def not_in_git_repo():", "body": "if _in_git_repo():<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise temple.exceptions.InGitRepoError(msg)<EOL><DEDENT>", "docstring": "Raises `InGitRepoError` if inside of a git repository", "id": "f13751:m3"}
{"signature": "def not_has_branch(branch):", "body": "if _has_branch(branch):<EOL><INDENT>msg = '<STR_LIT>'.format(branch)<EOL>raise temple.exceptions.ExistingBranchError(msg)<EOL><DEDENT>", "docstring": "Raises `ExistingBranchError` if the specified branch exists.", "id": "f13751:m7"}
{"signature": "def in_git_repo():", "body": "if not _in_git_repo():<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise temple.exceptions.NotInGitRepoError(msg)<EOL><DEDENT>", "docstring": "Raises `NotInGitRepoError` if not inside a git repository", "id": "f13751:m2"}
{"signature": "def in_clean_repo():", "body": "if not _in_clean_repo():<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise temple.exceptions.InDirtyRepoError(msg)<EOL><DEDENT>", "docstring": "Raises `InDirtyRepoError` if inside a dirty repository", "id": "f13751:m5"}
{"signature": "def is_temple_project():", "body": "if not os.path.exists(temple.constants.TEMPLE_CONFIG_FILE):<EOL><INDENT>msg = '<STR_LIT>'.format(temple.constants.TEMPLE_CONFIG_FILE)<EOL>raise temple.exceptions.InvalidTempleProjectError(msg)<EOL><DEDENT>", "docstring": "Raises `InvalidTempleProjectError` if repository is not a temple project", "id": "f13751:m9"}
{"signature": "def _in_git_repo():", "body": "ret = temple.utils.shell('<STR_LIT>', stderr=subprocess.DEVNULL, check=False)<EOL>return ret.returncode == <NUM_LIT:0><EOL>", "docstring": "Returns True if inside a git repo, False otherwise", "id": "f13751:m1"}
{"signature": "def _code_search(query, github_user=None):", "body": "github_client = temple.utils.GithubClient()<EOL>headers = {'<STR_LIT>': '<STR_LIT>'}<EOL>resp = github_client.get('<STR_LIT>',<EOL>params={'<STR_LIT:q>': query, '<STR_LIT>': <NUM_LIT:100>},<EOL>headers=headers)<EOL>if resp.status_code == requests.codes.unprocessable_entity and github_user:<EOL><INDENT>raise temple.exceptions.InvalidGithubUserError(<EOL>'<STR_LIT>'.format(github_user))<EOL><DEDENT>resp.raise_for_status()<EOL>resp_data = resp.json()<EOL>repositories = collections.defaultdict(dict)<EOL>while True:<EOL><INDENT>repositories.update({<EOL>'<STR_LIT>'.format(repo['<STR_LIT>']['<STR_LIT>']): repo['<STR_LIT>']<EOL>for repo in resp_data['<STR_LIT>']<EOL>})<EOL>next_url = _parse_link_header(resp.headers).get('<STR_LIT>')<EOL>if next_url:<EOL><INDENT>resp = requests.get(next_url, headers=headers)<EOL>resp.raise_for_status()<EOL>resp_data = resp.json()<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return repositories<EOL>", "docstring": "Performs a Github API code search\n\n    Args:\n        query (str): The query sent to Github's code search\n        github_user (str, optional): The Github user being searched in the query string\n\n    Returns:\n        dict: A dictionary of repository information keyed on the git SSH url\n\n    Raises:\n        `InvalidGithubUserError`: When ``github_user`` is invalid", "id": "f13753:m1"}
{"signature": "def _parse_link_header(headers):", "body": "links = {}<EOL>if '<STR_LIT>' in headers:<EOL><INDENT>link_headers = headers['<STR_LIT>'].split('<STR_LIT:U+002CU+0020>')<EOL>for link_header in link_headers:<EOL><INDENT>(url, rel) = link_header.split('<STR_LIT>')<EOL>url = url[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>rel = rel[<NUM_LIT:5>:-<NUM_LIT:1>]<EOL>links[rel] = url<EOL><DEDENT><DEDENT>return links<EOL>", "docstring": "Parses Github's link header for pagination.\n\n    TODO eventually use a github client for this", "id": "f13753:m0"}
{"signature": "def write_temple_config(temple_config, template, version):", "body": "with open(temple.constants.TEMPLE_CONFIG_FILE, '<STR_LIT:w>') as temple_config_file:<EOL><INDENT>versioned_config = {<EOL>**temple_config,<EOL>**{'<STR_LIT>': version, '<STR_LIT>': template},<EOL>}<EOL>yaml.dump(versioned_config, temple_config_file, Dumper=yaml.SafeDumper)<EOL><DEDENT>", "docstring": "Writes the temple YAML configuration", "id": "f13754:m4"}
{"signature": "def get(self, url, **request_kwargs):", "body": "return self._call_api('<STR_LIT>', url, **request_kwargs)<EOL>", "docstring": "Github API get", "id": "f13754:c0:m2"}
{"signature": "def set_cmd_env_var(value):", "body": "def func_decorator(function):<EOL><INDENT>@functools.wraps(function)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>previous_cmd_env_var = os.getenv(temple.constants.TEMPLE_ENV_VAR)<EOL>os.environ[temple.constants.TEMPLE_ENV_VAR] = value<EOL>try:<EOL><INDENT>ret_val = function(*args, **kwargs)<EOL><DEDENT>finally:<EOL><INDENT>if previous_cmd_env_var is None:<EOL><INDENT>del os.environ[temple.constants.TEMPLE_ENV_VAR]<EOL><DEDENT>else:<EOL><INDENT>os.environ[temple.constants.TEMPLE_ENV_VAR] = previous_cmd_env_var<EOL><DEDENT><DEDENT>return ret_val<EOL><DEDENT>return wrapper<EOL><DEDENT>return func_decorator<EOL>", "docstring": "Decorator that sets the temple command env var to value", "id": "f13754:m6"}
{"signature": "def _generate_files(repo_dir, config, template, version):", "body": "with unittest.mock.patch('<STR_LIT>', side_effect=_patched_run_hook):<EOL><INDENT>cc_generate.generate_files(repo_dir=repo_dir,<EOL>context={'<STR_LIT>': config,<EOL>'<STR_LIT>': template,<EOL>'<STR_LIT:version>': version},<EOL>overwrite_if_exists=False,<EOL>output_dir='<STR_LIT:.>')<EOL><DEDENT>", "docstring": "Uses cookiecutter to generate files for the project.\n\n    Monkeypatches cookiecutter's \"run_hook\" to ensure that the temple.yaml file is\n    generated before any hooks run. This is important to ensure that hooks can also\n    perform any actions involving temple.yaml", "id": "f13755:m1"}
{"signature": "def _patched_run_hook(hook_name, project_dir, context):", "body": "if hook_name == '<STR_LIT>':<EOL><INDENT>with temple.utils.cd(project_dir):<EOL><INDENT>temple.utils.write_temple_config(context['<STR_LIT>'],<EOL>context['<STR_LIT>'],<EOL>context['<STR_LIT:version>'])<EOL><DEDENT><DEDENT>return cc_hooks.run_hook(hook_name, project_dir, context)<EOL>", "docstring": "Used to patch cookiecutter's ``run_hook`` function.\n\n    This patched version ensures that the temple.yaml file is created before\n    any cookiecutter hooks are executed", "id": "f13755:m0"}
{"signature": "@contextlib.contextmanager<EOL>def _setenv(key, value):", "body": "old_value = os.environ.get(key, None)<EOL>if value is None:<EOL><INDENT>os.environ.pop(key, None)<EOL><DEDENT>else:<EOL><INDENT>os.environ[key] = value<EOL><DEDENT>yield<EOL>if old_value is None:<EOL><INDENT>os.environ.pop(key, None)<EOL><DEDENT>else:<EOL><INDENT>os.environ[key] = value<EOL><DEDENT>", "docstring": "Context manager to set an environment variable temporarily.", "id": "f13759:m0"}
{"signature": "@decorator<EOL>def gzip(f, *args, **kwargs):", "body": "data = f(*args, **kwargs)<EOL>if isinstance(data, Response):<EOL><INDENT>content = data.data<EOL><DEDENT>else:<EOL><INDENT>content = data<EOL><DEDENT>gzip_buffer = BytesIO()<EOL>gzip_file = gzip2.GzipFile(<EOL>mode='<STR_LIT:wb>',<EOL>compresslevel=<NUM_LIT:4>,<EOL>fileobj=gzip_buffer<EOL>)<EOL>gzip_file.write(content)<EOL>gzip_file.close()<EOL>gzip_data = gzip_buffer.getvalue()<EOL>if isinstance(data, Response):<EOL><INDENT>data.data = gzip_data<EOL>data.headers['<STR_LIT>'] = '<STR_LIT>'<EOL>data.headers['<STR_LIT>'] = str(len(data.data))<EOL>return data<EOL><DEDENT>return gzip_data<EOL>", "docstring": "GZip Flask Response Decorator.", "id": "f13762:m1"}
{"signature": "def get_files(request):", "body": "files = dict()<EOL>for k, v in request.files.items():<EOL><INDENT>content_type = (<EOL>request.files[k].content_type or '<STR_LIT>'<EOL>)<EOL>val = json_safe(v.read(), content_type)<EOL>if files.get(k):<EOL><INDENT>if not isinstance(files[k], list):<EOL><INDENT>files[k] = [files[k]]<EOL><DEDENT>files[k].append(val)<EOL><DEDENT>else:<EOL><INDENT>files[k] = val<EOL><DEDENT><DEDENT>return files<EOL>", "docstring": "Returns files dict from request context.", "id": "f13764:m1"}
{"signature": "def semiflatten(multi):", "body": "if multi:<EOL><INDENT>result = multi.to_dict()<EOL>for k, v in result.items():<EOL><INDENT>if len(v) == <NUM_LIT:1>:<EOL><INDENT>result[k] = v[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return result<EOL><DEDENT>else:<EOL><INDENT>return multi<EOL><DEDENT>", "docstring": "Convert a MutiDict into a regular dict. If there are more than one value\n    for a key, the result will have a list of values for the key. Otherwise it\n    will have the plain value.", "id": "f13764:m3"}
{"signature": "def secure_cookie(request):", "body": "return request.environ['<STR_LIT>'] == '<STR_LIT>'<EOL>", "docstring": "Return true if cookie should have secure attribute", "id": "f13764:m13"}
{"signature": "def __parse_request_range(range_header_text):", "body": "left = None<EOL>right = None<EOL>if not range_header_text:<EOL><INDENT>return left, right<EOL><DEDENT>range_header_text = range_header_text.strip()<EOL>if not range_header_text.startswith('<STR_LIT>'):<EOL><INDENT>return left, right<EOL><DEDENT>components = range_header_text.split('<STR_LIT:=>')<EOL>if len(components) != <NUM_LIT:2>:<EOL><INDENT>return left, right<EOL><DEDENT>components = components[<NUM_LIT:1>].split('<STR_LIT:->')<EOL>try:<EOL><INDENT>right = int(components[<NUM_LIT:1>])<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>left = int(components[<NUM_LIT:0>])<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>return left, right<EOL>", "docstring": "Return a tuple describing the byte range requested in a GET request\n    If the range is open ended on the left or right side, then a value of None\n    will be set.\n    RFC7233:\n      http://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7233.html#header.range\n    Examples:\n      Range : bytes=1024-\n      Range : bytes=10-20\n      Range : bytes=-999", "id": "f13764:m14"}
{"signature": "def status_code(code):", "body": "redirect = dict(headers=dict(location=REDIRECT_LOCATION))<EOL>code_map = {<EOL><NUM_LIT>: redirect,<EOL><NUM_LIT>: redirect,<EOL><NUM_LIT>: redirect,<EOL><NUM_LIT>: dict(data='<STR_LIT>'),<EOL><NUM_LIT>: redirect,<EOL><NUM_LIT>: redirect,<EOL><NUM_LIT>: dict(headers={'<STR_LIT>': '<STR_LIT>'}),<EOL><NUM_LIT>: dict(<EOL>data='<STR_LIT>',<EOL>headers={<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>),<EOL><NUM_LIT>: dict(data=json.dumps({<EOL>'<STR_LIT:message>': '<STR_LIT>',<EOL>'<STR_LIT>': ACCEPTED_MEDIA_TYPES<EOL>}),<EOL>headers={<EOL>'<STR_LIT:Content-Type>': '<STR_LIT:application/json>'<EOL>}),<EOL><NUM_LIT>: dict(headers={'<STR_LIT>': '<STR_LIT>'}),<EOL><NUM_LIT>: dict(  <EOL>data=ASCII_ART,<EOL>headers={<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>),<EOL>}<EOL>r = Response()<EOL>r.status_code = code<EOL>if code in code_map:<EOL><INDENT>m = code_map[code]<EOL>if '<STR_LIT:data>' in m:<EOL><INDENT>r.data = m['<STR_LIT:data>']<EOL><DEDENT>if '<STR_LIT>' in m:<EOL><INDENT>r.headers = m['<STR_LIT>']<EOL><DEDENT><DEDENT>return r<EOL>", "docstring": "Returns response object of given status code.", "id": "f13764:m6"}
{"signature": "def get_headers(request, hide_env=True):", "body": "headers = dict(request.headers.items())<EOL>if hide_env and ('<STR_LIT>' not in request.args):<EOL><INDENT>for key in ENV_HEADERS:<EOL><INDENT>try:<EOL><INDENT>del headers[key]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>return CaseInsensitiveDict(headers.items())<EOL>", "docstring": "Returns headers dict from request context.", "id": "f13764:m2"}
{"signature": "def HA2(credentails, request):", "body": "if credentails.get('<STR_LIT>') == '<STR_LIT>' or credentails.get('<STR_LIT>') is None:<EOL><INDENT>return H(b'<STR_LIT::>'.join([request['<STR_LIT>'].encode('<STR_LIT:utf-8>'),<EOL>request['<STR_LIT>'].encode('<STR_LIT:utf-8>')]))<EOL><DEDENT>elif credentails.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>for k in '<STR_LIT>', '<STR_LIT>', '<STR_LIT:body>':<EOL><INDENT>if k not in request:<EOL><INDENT>raise ValueError('<STR_LIT>' % k)<EOL><DEDENT><DEDENT>return H('<STR_LIT>' % (request['<STR_LIT>'],<EOL>request['<STR_LIT>'],<EOL>H(request['<STR_LIT:body>'])))<EOL><DEDENT>raise ValueError<EOL>", "docstring": "Create HA2 md5 hash\n\n    If the qop directive's value is \"auth\" or is unspecified, then HA2:\n        HA2 = md5(A2) = MD5(method:digestURI)\n    If the qop directive's value is \"auth-int\" , then HA2 is\n        HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))", "id": "f13764:m10"}
{"signature": "def get_dict(request, *keys, **extras):", "body": "_keys = ('<STR_LIT:url>', '<STR_LIT:args>', '<STR_LIT>', '<STR_LIT:data>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>')<EOL>assert all(map(_keys.__contains__, keys))<EOL>data = request.data<EOL>form = request.form<EOL>form = semiflatten(request.form)<EOL>try:<EOL><INDENT>_json = json.loads(data.decode('<STR_LIT:utf-8>'))<EOL><DEDENT>except (ValueError, TypeError):<EOL><INDENT>_json = None<EOL><DEDENT>d = dict(<EOL>url=get_url(request),<EOL>args=semiflatten(request.args),<EOL>form=form,<EOL>data=json_safe(data),<EOL>origin=request.headers.get('<STR_LIT>', request.remote_addr),<EOL>headers=get_headers(request),<EOL>files=get_files(request),<EOL>json=_json<EOL>)<EOL>out_d = dict()<EOL>for key in keys:<EOL><INDENT>out_d[key] = d.get(key)<EOL><DEDENT>out_d.update(extras)<EOL>return out_d<EOL>", "docstring": "Returns request dict of given keys.", "id": "f13764:m5"}
{"signature": "@app.route('<STR_LIT>')<EOL>def delete_cookies(request):", "body": "cookies = dict(request.args.items())<EOL>r = app.make_response(redirect(url_for('<STR_LIT>')))<EOL>for key, value in cookies.items():<EOL><INDENT>r.delete_cookie(key=key)<EOL><DEDENT>return r<EOL>", "docstring": "Deletes cookie(s) as provided by the query string\n    and redirects to cookie list.", "id": "f13765:m28"}
{"signature": "@app.route('<STR_LIT>', methods=('<STR_LIT>',))<EOL>def view_patch(request):", "body": "return jsonify(get_dict(request, '<STR_LIT:url>', '<STR_LIT:args>', '<STR_LIT>', '<STR_LIT:data>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'))<EOL>", "docstring": "Returns PATCH Data.", "id": "f13765:m12"}
{"signature": "@app.route('<STR_LIT>')<EOL>def redirect_n_times(request, n):", "body": "n = int(n)<EOL>assert n > <NUM_LIT:0><EOL>absolute = request.args.get('<STR_LIT>', '<STR_LIT:false>').lower() == '<STR_LIT:true>'<EOL>if n == <NUM_LIT:1>:<EOL><INDENT>return redirect(app.url_for('<STR_LIT>', _request=request,<EOL>_external=absolute))<EOL><DEDENT>if absolute:<EOL><INDENT>return _redirect(request, '<STR_LIT>', n, True)<EOL><DEDENT>else:<EOL><INDENT>return _redirect(request, '<STR_LIT>', n, False)<EOL><DEDENT>", "docstring": "302 Redirects n times.", "id": "f13765:m16"}
{"signature": "@app.route('<STR_LIT>')<EOL>def stream_n_messages(request, n):", "body": "n = int(n)<EOL>response = get_dict(request, '<STR_LIT:url>', '<STR_LIT:args>', '<STR_LIT>', '<STR_LIT>')<EOL>n = min(n, <NUM_LIT:100>)<EOL>def generate_stream():<EOL><INDENT>for i in range(n):<EOL><INDENT>response['<STR_LIT:id>'] = i<EOL>yield json.dumps(response, default=json_dumps_default) + '<STR_LIT:\\n>'<EOL><DEDENT><DEDENT>return Response(generate_stream(), headers={<EOL>'<STR_LIT:Content-Type>': '<STR_LIT:application/json>',<EOL>})<EOL>", "docstring": "Stream n JSON messages", "id": "f13765:m21"}
{"signature": "@app.route('<STR_LIT>')<EOL>def link_page(request, n, offset):", "body": "n = int(n)<EOL>offset = int(offset)<EOL>n = min(max(<NUM_LIT:1>, n), <NUM_LIT:200>)  <EOL>link = \"<STR_LIT>\"<EOL>html = ['<STR_LIT>']<EOL>for i in xrange(n):<EOL><INDENT>if i == offset:<EOL><INDENT>html.append('<STR_LIT>'.format(i))<EOL><DEDENT>else:<EOL><INDENT>html.append(link.format(url_for('<STR_LIT>', n=n, offset=i), i))<EOL><DEDENT><DEDENT>html.append('<STR_LIT>')<EOL>return '<STR_LIT>'.join(html)<EOL>", "docstring": "Generate a page containing n links to other pages which do the same.", "id": "f13765:m41"}
{"signature": "@app.route('<STR_LIT>')<EOL>def drip(request):", "body": "args = CaseInsensitiveDict(request.args.items())<EOL>duration = float(args.get('<STR_LIT>', <NUM_LIT:2>))<EOL>numbytes = int(args.get('<STR_LIT>', <NUM_LIT:10>))<EOL>code = int(args.get('<STR_LIT:code>', <NUM_LIT:200>))<EOL>pause = duration / numbytes<EOL>delay = float(args.get('<STR_LIT>', <NUM_LIT:0>))<EOL>if delay > <NUM_LIT:0>:<EOL><INDENT>time.sleep(delay)<EOL><DEDENT>def generate_bytes():<EOL><INDENT>for i in xrange(numbytes):<EOL><INDENT>yield u'<STR_LIT:*>'.encode('<STR_LIT:utf-8>')<EOL>time.sleep(pause)<EOL><DEDENT><DEDENT>response = Response(generate_bytes(), headers={<EOL>'<STR_LIT:Content-Type>': '<STR_LIT>',<EOL>'<STR_LIT>': str(numbytes),<EOL>})<EOL>response.status_code = code<EOL>return response<EOL>", "docstring": "Drips data over a duration after an optional initial delay.", "id": "f13765:m33"}
{"signature": "@app.route('<STR_LIT>')<EOL>def absolute_redirect_n_times(request, n):", "body": "n = int(n)<EOL>assert n > <NUM_LIT:0><EOL>if n == <NUM_LIT:1>:<EOL><INDENT>return redirect(app.url_for('<STR_LIT>', _request=request,<EOL>_external=True))<EOL><DEDENT>return _redirect(request, '<STR_LIT>', n, True)<EOL>", "docstring": "302 Redirects n times.", "id": "f13765:m20"}
{"signature": "@app.route('<STR_LIT>')<EOL>def digest_auth(request, qop=None, user='<STR_LIT:user>', passwd='<STR_LIT>'):", "body": "if qop not in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>qop = None<EOL><DEDENT>if '<STR_LIT>' not in request.headers ornot check_digest_auth(user, passwd) or'<STR_LIT>' not in request.headers:<EOL><INDENT>response = app.make_response('<STR_LIT>')<EOL>response.status_code = <NUM_LIT><EOL>nonce = H(b'<STR_LIT>'.join([<EOL>getattr(request, '<STR_LIT>', u'<STR_LIT>').encode('<STR_LIT:ascii>'),<EOL>b'<STR_LIT::>',<EOL>str(time.time()).encode('<STR_LIT:ascii>'),<EOL>b'<STR_LIT::>',<EOL>os.urandom(<NUM_LIT:10>)<EOL>]))<EOL>opaque = H(os.urandom(<NUM_LIT:10>))<EOL>auth = WWWAuthenticate('<STR_LIT>')<EOL>auth.set_digest('<STR_LIT>', nonce, opaque=opaque,<EOL>qop=('<STR_LIT>', '<STR_LIT>') if qop is None else (qop, ))<EOL>response.headers['<STR_LIT>'] = auth.to_header()<EOL>response.headers['<STR_LIT>'] = '<STR_LIT>'<EOL>return response<EOL><DEDENT>return jsonify(authenticated=True, user=user)<EOL>", "docstring": "Prompts the user for authorization using HTTP Digest auth", "id": "f13765:m31"}
{"signature": "@app.route('<STR_LIT>')<EOL>def view_robots_page(request):", "body": "response = Response()<EOL>response.content = ROBOT_TXT<EOL>response.content_type = '<STR_LIT>'<EOL>return response<EOL>", "docstring": "Simple Html Page", "id": "f13765:m4"}
{"signature": "@app.route('<STR_LIT>')<EOL>def view_user_agent(request):", "body": "headers = get_headers(request)<EOL>return jsonify({'<STR_LIT>': headers['<STR_LIT>']})<EOL>", "docstring": "Returns User-Agent.", "id": "f13765:m8"}
{"signature": "@app.route('<STR_LIT>')<EOL>def image(request):", "body": "headers = get_headers(request)<EOL>if '<STR_LIT>' not in headers:<EOL><INDENT>return image_png(request)  <EOL><DEDENT>accept = headers['<STR_LIT>'].lower()<EOL>if '<STR_LIT>' in accept:<EOL><INDENT>return image_webp(request)<EOL><DEDENT>elif '<STR_LIT>' in accept:<EOL><INDENT>return image_svg(request)<EOL><DEDENT>elif '<STR_LIT>' in accept:<EOL><INDENT>return image_jpeg(request)<EOL><DEDENT>elif '<STR_LIT>' in accept or '<STR_LIT>' in accept:<EOL><INDENT>return image_png(request)<EOL><DEDENT>else:<EOL><INDENT>return status_code(<NUM_LIT>)<EOL><DEDENT>", "docstring": "Returns a simple image of the type suggest by the Accept header.", "id": "f13765:m43"}
{"signature": "@app.route('<STR_LIT>', methods=('<STR_LIT:GET>', '<STR_LIT>'))<EOL>def view_get(request):", "body": "return jsonify(get_dict(request, '<STR_LIT:url>', '<STR_LIT:args>', '<STR_LIT>', '<STR_LIT>'))<EOL>", "docstring": "Returns GET Data.", "id": "f13765:m9"}
{"signature": "@app.route('<STR_LIT>')<EOL>def cache_control(request, value):", "body": "value = int(value)<EOL>response = view_get(request)<EOL>response.headers['<STR_LIT>'] = '<STR_LIT>'.format(value)<EOL>return response<EOL>", "docstring": "Sets a Cache-Control header.", "id": "f13765:m36"}
{"signature": "@app.route('<STR_LIT:/>')<EOL>def view_landing_page(request):", "body": "tracking_enabled = '<STR_LIT>' in os.environ<EOL>return render_template('<STR_LIT>', request=request,<EOL>tracking_enabled=tracking_enabled)<EOL>", "docstring": "Generates Landing Page.", "id": "f13765:m2"}
{"signature": "@app.route('<STR_LIT>')<EOL>def decode_base64(request, value):", "body": "encoded = value.encode('<STR_LIT:utf-8>')  <EOL>return base64.urlsafe_b64decode(encoded).decode('<STR_LIT:utf-8>')<EOL>", "docstring": "Decodes base64url-encoded string", "id": "f13765:m34"}
{"signature": "@app.route('<STR_LIT>', methods=('<STR_LIT:POST>',))<EOL>def view_post(request):", "body": "return jsonify(get_dict(request, '<STR_LIT:url>', '<STR_LIT:args>', '<STR_LIT>', '<STR_LIT:data>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'))<EOL>", "docstring": "Returns POST Data.", "id": "f13765:m10"}
{"signature": "def weighted_choice(choices):", "body": "values, weights = zip(*choices)<EOL>total = <NUM_LIT:0><EOL>cum_weights = []<EOL>for w in weights:<EOL><INDENT>total += w<EOL>cum_weights.append(total)<EOL><DEDENT>x = random.uniform(<NUM_LIT:0>, total)<EOL>i = bisect.bisect(cum_weights, x)<EOL>return values[i]<EOL>", "docstring": "Returns a value from choices chosen by weighted random selection\n\n    choices should be a list of (value, weight) tuples.\n\n    eg. weighted_choice([('val1', 5), ('val2', 0.3), ('val3', 1)])", "id": "f13766:m0"}
{"signature": "def setup_environ(self):", "body": "<EOL>env = self.base_environ = {}<EOL>env['<STR_LIT>'] = self.server_name<EOL>env['<STR_LIT>'] = '<STR_LIT>'<EOL>env['<STR_LIT>'] = str(self.server_port)<EOL>env['<STR_LIT>'] = '<STR_LIT>'<EOL>env['<STR_LIT>'] = '<STR_LIT>'<EOL>env['<STR_LIT>'] = '<STR_LIT>'<EOL>", "docstring": "https://www.python.org/dev/peps/pep-0333/#environ-variables", "id": "f13788:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def _replace_type_to_regex(cls, match):<DEDENT>", "body": "groupdict = match.groupdict()<EOL>_type = groupdict.get('<STR_LIT:type>')<EOL>type_regex = cls.TYPE_REGEX_MAP.get(_type, '<STR_LIT>')<EOL>name = groupdict.get('<STR_LIT:name>')<EOL>return r'<STR_LIT>'.format(<EOL>name=name, type_regex=type_regex<EOL>)<EOL>", "docstring": "/<int:id>  -> r'(?P<id>\\d+)", "id": "f13789:c1:m3"}
{"signature": "def get_func(self, path):", "body": "for url_match, func_pair in self._urls_regex_map.items():<EOL><INDENT>m = url_match.match(path)<EOL>if m is not None:<EOL><INDENT>return func_pair.func, func_pair.methods, m.groupdict()<EOL><DEDENT><DEDENT>return None, None, None<EOL>", "docstring": ":return: (func, methods)", "id": "f13789:c0:m2"}
{"signature": "def response_status_string(code):", "body": "mean = HTTP_STATUS_CODES.get(code, '<STR_LIT>').upper()<EOL>return '<STR_LIT>'.format(code=code, mean=mean)<EOL>", "docstring": "e.g. ``200 OK``", "id": "f13791:m1"}
{"signature": "def cookie_dump(key, value='<STR_LIT>', max_age=None, expires=None, path='<STR_LIT:/>',<EOL>domain=None, secure=False, httponly=False):", "body": "cookie = SimpleCookie()<EOL>cookie[key] = value<EOL>for attr in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT:path>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>attr_key = attr.replace('<STR_LIT:_>', '<STR_LIT:->')<EOL>attr_value = locals()[attr]<EOL>if attr_value:<EOL><INDENT>cookie[key][attr_key] = attr_value<EOL><DEDENT><DEDENT>return cookie<EOL>", "docstring": ":rtype: ``Cookie.SimpleCookie``", "id": "f13791:m0"}
{"signature": "@property<EOL><INDENT>def cookies(self):<DEDENT>", "body": "http_cookie = self.environ.get('<STR_LIT>', '<STR_LIT>')<EOL>_cookies = {<EOL>k: v.value<EOL>for (k, v) in SimpleCookie(http_cookie).items()<EOL>}<EOL>return _cookies<EOL>", "docstring": "Request cookies\n\n        :rtype: dict", "id": "f13791:c0:m11"}
{"signature": "def cleanup_extra_whitespaces(self, text):", "body": "return re.sub(r'<STR_LIT>', r'<STR_LIT>', text)<EOL>", "docstring": "cleanup extra whitespaces let numbers of whitespaces <=1", "id": "f13792:c1:m15"}
{"signature": "def in_(self, value):", "body": "return '<STR_LIT>'.format(self), value<EOL>", "docstring": ":type value: tuple", "id": "f13797:c1:m14"}
{"signature": "def __call__(self, environ, start_response):", "body": "self.start_response = start_response<EOL>path = environ['<STR_LIT>']<EOL>method = environ['<STR_LIT>']<EOL>func, methods, func_kwargs = self.url_resolve(path)<EOL>try:<EOL><INDENT>if func is None:<EOL><INDENT>self.notfound()<EOL><DEDENT>if method not in methods:<EOL><INDENT>self.abort(<NUM_LIT>)<EOL><DEDENT>request = Request(environ)<EOL>result = self.handle_before_request_hooks(request, view_func=func)<EOL>if isinstance(result, Response):<EOL><INDENT>response = result<EOL><DEDENT>else:<EOL><INDENT>response = self.handle_view(request, func, func_kwargs)<EOL><DEDENT>self.handle_after_request_hooks(request, response, view_func=func)<EOL><DEDENT>except HTTPException as ex:<EOL><INDENT>response = ex.response<EOL><DEDENT>return self._start_response(response)<EOL>", "docstring": "for wsgi server", "id": "f13798:c0:m5"}
{"signature": "def get_metadata(self):", "body": "return self.metadata<EOL>", "docstring": "Get some metadata on the provider or the vocab it represents.\n\n        :rtype: Dict.", "id": "f13799:c0:m6"}
{"signature": "def _get_language(self, **kwargs):", "body": "return kwargs.get(<EOL>'<STR_LIT>',<EOL>self.metadata.get('<STR_LIT>', '<STR_LIT>')<EOL>)<EOL>", "docstring": "Determine what language to render labels in.\n\n        Will first check if there's a language keyword specified in **kwargs.\n        If not, will check the default language of the provider. If there's no\n        default language, will fall back to 'en'.\n\n        :rtype: str", "id": "f13799:c0:m1"}
{"signature": "def get_children_display(self, id, **kwargs):", "body": "", "docstring": "Return a list of concepts or collections that should be displayed\nunder this concept or collection.\n\n:param string language: Optional. If present, it should be a\n    :term:`language-tag`. This language-tag is passed on to the\n    underlying providers and used when selecting the label to display\n    for each concept.\n:param string sort: Optional. If present, it should either be `id`,\n    `label` or `sortlabel`. The `sortlabel` option means the providers should\n    take into account any `sortLabel` if present, if not it will\n    fallback to a regular label to sort on.\n:param string sort_order: Optional. What order to sort in: `asc` or\n    `desc`. Defaults to `asc`\n\n:param str id: A concept or collection id.\n:returns: A :class:`lst` of concepts and collections. Each of these\n    is a dict with the following keys:\n\n    * id: id within the conceptscheme\n    * uri: :term:`uri` of the concept or collection\n    * type: concept or collection\n    * label: A label to represent the concept or collection. It is \\\n        determined by looking at the `language` parameter, the default \\\n        language of the provider and finally falls back to `en`.", "id": "f13799:c0:m14"}
{"signature": "def _include_in_find(self, c, query):", "body": "include = True<EOL>if include and '<STR_LIT:type>' in query:<EOL><INDENT>include = query['<STR_LIT:type>'] == c.type<EOL><DEDENT>if include and '<STR_LIT:label>' in query:<EOL><INDENT>def finder(l, query):<EOL><INDENT>if not self.case_insensitive:<EOL><INDENT>return l.label.find(query['<STR_LIT:label>'])<EOL><DEDENT>else:<EOL><INDENT>return l.label.upper().find(query['<STR_LIT:label>'].upper())<EOL><DEDENT><DEDENT>include = any([finder(l, query) >= <NUM_LIT:0> for l in c.labels])<EOL><DEDENT>if include and '<STR_LIT>' in query:<EOL><INDENT>coll = self.get_by_id(query['<STR_LIT>']['<STR_LIT:id>'])<EOL>if not coll or not isinstance(coll, Collection):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if '<STR_LIT>' in query['<STR_LIT>'] and query['<STR_LIT>']['<STR_LIT>'] == '<STR_LIT:all>':<EOL><INDENT>members = self.expand(coll.id)<EOL><DEDENT>else:<EOL><INDENT>members = coll.members<EOL><DEDENT>include = any([True for id in members if str(id) == str(c.id)]) <EOL><DEDENT>return include<EOL>", "docstring": ":param c: A :class:`skosprovider.skos.Concept` or\n    :class:`skosprovider.skos.Collection`.\n:param query: A dict that can be used to express a query.\n:rtype: boolean", "id": "f13799:c1:m5"}
{"signature": "def _get_sort(self, **kwargs):", "body": "return kwargs.get('<STR_LIT>', None)<EOL>", "docstring": "Determine on what attribute to sort.\n\n        :rtype: str", "id": "f13799:c0:m2"}
{"signature": "def _sort(self, concepts, sort=None, language='<STR_LIT>', reverse=False):", "body": "sorted = copy.copy(concepts)<EOL>if sort:<EOL><INDENT>sorted.sort(key=methodcaller('<STR_LIT>', sort, language), reverse=reverse)<EOL><DEDENT>return sorted<EOL>", "docstring": "Returns a sorted version of a list of concepts. Will leave the original\nlist unsorted.\n\n:param list concepts: A list of concepts and collections.\n:param string sort: What to sort on: `id`, `label` or `sortlabel`\n:param string language: Language to use when sorting on `label` or\n    `sortlabel`.\n:param boolean reverse: Reverse the sort order?\n:rtype: list", "id": "f13799:c0:m4"}
{"signature": "def __init__(self, metadata, reader, **kwargs):", "body": "super(SimpleCsvProvider, self).__init__(metadata, [], **kwargs)<EOL>self.list = [self._from_row(row) for row in reader]<EOL>", "docstring": ":param metadata: A metadata dictionary.\n:param reader: A csv reader.", "id": "f13799:c3:m0"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def find(self, query, **kwargs):<DEDENT>", "body": "", "docstring": "Find concepts that match a certain query.\n\n        Currently query is expected to be a dict, so that complex queries can\n        be passed. You can use this dict to search for concepts or collections\n        with a certain label, with a certain type and for concepts that belong\n        to a certain collection.\n\n        .. code-block:: python\n\n            # Find anything that has a label of church.\n            provider.find({'label': 'church'})\n\n            # Find all concepts that are a part of collection 5.\n            provider.find({'type': 'concept', 'collection': {'id': 5})\n\n            # Find all concepts, collections or children of these\n            # that belong to collection 5.\n            provider.find({'collection': {'id': 5, 'depth': 'all'})\n\n            # Find anything that has a label of church.\n            # Preferentially display a label in Dutch.\n            provider.find({'label': 'church'}, language='nl')\n\n        :param query: A dict that can be used to express a query. The following\n            keys are permitted:\n\n            * `label`: Search for something with this label value. An empty \\\n                label is equal to searching for all concepts.\n            * `type`: Limit the search to certain SKOS elements. If not \\\n                present or `None`, `all` is assumed:\n\n                * `concept`: Only return :class:`skosprovider.skos.Concept` \\\n                    instances.\n                * `collection`: Only return \\\n                    :class:`skosprovider.skos.Collection` instances.\n                * `all`: Return both :class:`skosprovider.skos.Concept` and \\\n                    :class:`skosprovider.skos.Collection` instances.\n            * `collection`: Search only for concepts belonging to a certain \\\n                collection. This argument should be a dict with two keys:\n\n                * `id`: The id of a collection. Required.\n                * `depth`: Can be `members` or `all`. Optional. If not \\\n                    present, `members` is assumed, meaning only concepts or \\\n                    collections that are a direct member of the collection \\\n                    should be considered. When set to `all`, this method \\\n                    should return concepts and collections that are a member \\\n                    of the collection or are a narrower concept of a member \\\n                    of the collection.\n\n        :param string language: Optional. If present, it should be a\n            :term:`language-tag`. This language-tag is passed on to the\n            underlying providers and used when selecting the label to display\n            for each concept.\n        :param string sort: Optional. If present, it should either be `id`,\n            `label` or `sortlabel`. The `sortlabel` option means the providers should\n            take into account any `sortLabel` if present, if not it will\n            fallback to a regular label to sort on.\n        :param string sort_order: Optional. What order to sort in: `asc` or\n            `desc`. Defaults to `asc`\n\n        :returns: A :class:`lst` of concepts and collections. Each of these\n            is a dict with the following keys:\n\n            * id: id within the conceptscheme\n            * uri: :term:`uri` of the concept or collection\n            * type: concept or collection\n            * label: A label to represent the concept or collection. It is \\\n                determined by looking at the `language` parameter, the default \\\n                language of the provider and finally falls back to `en`.", "id": "f13799:c0:m11"}
{"signature": "def label(self, language='<STR_LIT>'):", "body": "return label(self.labels, language)<EOL>", "docstring": "Provide a single label for this conceptscheme.\n\nThis uses the :func:`label` function to determine which label to\nreturn.\n\n:param string language: The preferred language to receive the label in.\n    This should be a valid IANA language tag.\n:rtype: :class:`skosprovider.skos.Label` or False if no labels were found.", "id": "f13800:c3:m1"}
{"signature": "def dict_to_note(dict):", "body": "if isinstance(dict, Note):<EOL><INDENT>return dict<EOL><DEDENT>return Note(<EOL>dict['<STR_LIT>'],<EOL>dict.get('<STR_LIT:type>', '<STR_LIT>'),<EOL>dict.get('<STR_LIT>', '<STR_LIT>'),<EOL>dict.get('<STR_LIT>')<EOL>)<EOL>", "docstring": "Transform a dict with keys `note`, `type` and `language` into a\n:class:`Note`.\n\nOnly the `note` key is mandatory. If `type` is not present, it will\ndefault to `note`. If `language` is not present, it will default to `und`.\nIf `markup` is not present it will default to `None`.\n\nIf the argument passed is already a :class:`Note`, this method just returns\nthe argument.", "id": "f13800:m4"}
{"signature": "def label(labels=[], language='<STR_LIT>', sortLabel=False):", "body": "if not labels:<EOL><INDENT>return None<EOL><DEDENT>if not language:<EOL><INDENT>language = '<STR_LIT>'<EOL><DEDENT>labels = [dict_to_label(l) for l in labels]<EOL>l = False<EOL>if sortLabel:<EOL><INDENT>l = find_best_label_for_type(labels, language, '<STR_LIT>')<EOL><DEDENT>if not l:<EOL><INDENT>l = find_best_label_for_type(labels, language, '<STR_LIT>')<EOL><DEDENT>if not l:<EOL><INDENT>l = find_best_label_for_type(labels, language, '<STR_LIT>')<EOL><DEDENT>if l:<EOL><INDENT>return l<EOL><DEDENT>else:<EOL><INDENT>return label(labels, '<STR_LIT>', sortLabel) if language != '<STR_LIT>' else None<EOL><DEDENT>", "docstring": "Provide a label for a list of labels.\n\nThe items in the list of labels are assumed to be either instances of\n:class:`Label`, or dicts with at least the key `label` in them. These will\nbe passed to the :func:`dict_to_label` function.\n\nThis method tries to find a label by looking if there's\na pref label for the specified language. If there's no pref label,\nit looks for an alt label. It disregards hidden labels.\n\nWhile matching languages, preference will be given to exact matches. But,\nif no exact match is present, an inexact match will be attempted. This might\nbe because a label in language `nl-BE` is being requested, but only `nl` or\neven `nl-NL` is present. Similarly, when requesting `nl`, a label with\nlanguage `nl-NL` or even `nl-Latn-NL` will also be considered,\nproviding no label is present that has an exact match with the\nrequested language.\n\nIf language 'any' was specified, all labels will be considered,\nregardless of language.\n\nTo find a label without a specified language, pass `None` as language.\n\nIf a language or None was specified, and no label could be found, this\nmethod will automatically try to find a label in some other language.\n\nFinally, if no label could be found, None is returned.\n\n:param string language: The preferred language to receive the label in. This\n    should be a valid IANA language tag.\n:param boolean sortLabel: Should sortLabels be considered or not? If True,\n    sortLabels will be preferred over prefLabels. Bear in mind that these\n    are still language dependent. So, it's possible to have a different\n    sortLabel per language.\n:rtype: A :class:`Label` or `None` if no label could be found.", "id": "f13800:m0"}
{"signature": "def dict_to_label(dict):", "body": "try:<EOL><INDENT>return Label(<EOL>dict['<STR_LIT:label>'],<EOL>dict.get('<STR_LIT:type>', '<STR_LIT>'),<EOL>dict.get('<STR_LIT>', '<STR_LIT>')<EOL>)<EOL><DEDENT>except (KeyError, AttributeError, TypeError):<EOL><INDENT>return dict<EOL><DEDENT>", "docstring": "Transform a dict with keys `label`, `type` and `language` into a\n:class:`Label`.\n\nOnly the `label` key is mandatory. If `type` is not present, it will\ndefault to `prefLabel`. If `language` is not present, it will default\nto `und`.\n\nIf the argument passed is not a dict, this method just\nreturns the argument.", "id": "f13800:m3"}
{"signature": "def _sortkey(self, key='<STR_LIT:id>', language='<STR_LIT>'):", "body": "if key == '<STR_LIT:id>':<EOL><INDENT>return str(self.id)<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>return self.uri if self.uri else '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>l = label(self.labels, language, key == '<STR_LIT>')<EOL>return l.label.lower() if l else '<STR_LIT>'<EOL><DEDENT>", "docstring": "Provide a single sortkey for this collection.\n\n:param string key: Either `id`, `uri`, `label` or `sortlabel`.\n:param string language: The preferred language to receive the label in\n    if key is `label` or `sortlabel`. This should be a valid IANA language tag.\n:rtype: :class:`str`", "id": "f13800:c4:m2"}
{"signature": "@staticmethod<EOL><INDENT>def is_valid_type(type):<DEDENT>", "body": "return type in Label.valid_types<EOL>", "docstring": "Check if the argument is a valid SKOS label type.\n\n:param string type: The type to be checked.", "id": "f13800:c0:m3"}
{"signature": "def find_best_label_for_type(labels, language, labeltype):", "body": "typelabels = [l for l in labels if l.type == labeltype]<EOL>if not typelabels:<EOL><INDENT>return False<EOL><DEDENT>if language == '<STR_LIT>':<EOL><INDENT>return typelabels[<NUM_LIT:0>]<EOL><DEDENT>exact = filter_labels_by_language(typelabels, language)<EOL>if exact:<EOL><INDENT>return exact[<NUM_LIT:0>]<EOL><DEDENT>inexact = filter_labels_by_language(typelabels, language, True)<EOL>if inexact:<EOL><INDENT>return inexact[<NUM_LIT:0>]<EOL><DEDENT>return False<EOL>", "docstring": "Find the best label for a certain labeltype.\n\n:param list labels: A list of :class:`Label`.\n:param str language: An IANA language string, eg. `nl` or `nl-BE`.\n:param str labeltype: Type of label to look for, eg. `prefLabel`.", "id": "f13800:m1"}
{"signature": "def generate(self, **kwargs):", "body": "return (self.pattern % (self.vocabulary_id, kwargs['<STR_LIT:id>'])).lower()<EOL>", "docstring": "Generate a :term:`URI` based on parameters passed.\n\n:param id: The id of the concept or collection.\n:rtype: string", "id": "f13801:c2:m1"}
{"signature": "def generate(self, **kwargs):", "body": "if kwargs['<STR_LIT:type>'] not in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>raise ValueError('<STR_LIT>' % kwargs['<STR_LIT:type>'])<EOL><DEDENT>return (<EOL>self.pattern % (self.vocabulary_id, kwargs['<STR_LIT:type>'], kwargs['<STR_LIT:id>'])<EOL>).lower()<EOL>", "docstring": "Generate a :term:`URI` based on parameters passed.\n\n:param id: The id of the concept or collection.\n:param type: What we're generating a :term:`URI` for: `concept`\n    or `collection`.\n:rtype: string", "id": "f13801:c4:m1"}
{"signature": "def generate(self, **kwargs):", "body": "return self.pattern % kwargs['<STR_LIT:id>']<EOL>", "docstring": "Generate a :term:`URI` based on parameters passed.\n\n:param id: The id of the concept or collection.\n:rtype: string", "id": "f13801:c1:m1"}
{"signature": "def get_provider(self, id):", "body": "if id in self.providers:<EOL><INDENT>return self.providers.get(id, False)<EOL><DEDENT>elif is_uri(id) and id in self.concept_scheme_uri_map:<EOL><INDENT>return self.providers.get(self.concept_scheme_uri_map[id], False)<EOL><DEDENT>return False<EOL>", "docstring": "Get a provider by id or :term:`uri`.\n\n:param str id: The identifier for the provider. This can either be the\n    id with which it was registered or the :term:`uri` of the conceptscheme\n    that the provider services.\n:returns: A :class:`skosprovider.providers.VocabularyProvider`\n    or `False` if the id or uri is unknown.", "id": "f13802:c1:m3"}
{"signature": "def register_provider(self, provider):", "body": "if provider.get_vocabulary_id() in self.providers:<EOL><INDENT>raise RegistryException(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>self.providers[provider.get_vocabulary_id()] = provider<EOL>if provider.concept_scheme.uri in self.concept_scheme_uri_map:<EOL><INDENT>raise RegistryException(<EOL>'<STR_LIT>' % provider.concept_scheme.uri<EOL>)<EOL><DEDENT>self.concept_scheme_uri_map[provider.concept_scheme.uri] = provider.get_vocabulary_id()<EOL>", "docstring": "Register a :class:`skosprovider.providers.VocabularyProvider`.\n\n:param skosprovider.providers.VocabularyProvider provider: The provider\n    to register.\n:raises RegistryException: A provider with this id or uri has already \n    been registered.", "id": "f13802:c1:m1"}
{"signature": "def __init__(self, message):", "body": "self.message = message<EOL>", "docstring": ":param message: More information about the exception.", "id": "f13804:c0:m0"}
{"signature": "def includeme(config):", "body": "settings = config.registry.settings<EOL>if asbool(settings.get('<STR_LIT>', True)):<EOL><INDENT>LOGGER.debug('<STR_LIT>')<EOL>config.include('<STR_LIT>')<EOL>config.include('<STR_LIT>')<EOL>config.include('<STR_LIT>')<EOL>config.include('<STR_LIT>')<EOL>config.add_xmlrpc_endpoint('<STR_LIT>', '<STR_LIT>')<EOL>config.add_xmlrpc_method(RPCInterface, attr='<STR_LIT>', endpoint='<STR_LIT>', method='<STR_LIT>')<EOL>config.add_xmlrpc_method(RPCInterface, attr='<STR_LIT>', endpoint='<STR_LIT>', method='<STR_LIT>')<EOL>config.add_xmlrpc_method(RPCInterface, attr='<STR_LIT>', endpoint='<STR_LIT>', method='<STR_LIT>')<EOL>config.add_xmlrpc_method(RPCInterface, attr='<STR_LIT>', endpoint='<STR_LIT>', method='<STR_LIT>')<EOL>config.add_xmlrpc_method(RPCInterface, attr='<STR_LIT>', endpoint='<STR_LIT>', method='<STR_LIT>')<EOL>config.add_xmlrpc_method(RPCInterface, attr='<STR_LIT>', endpoint='<STR_LIT>', method='<STR_LIT>')<EOL>config.add_xmlrpc_method(RPCInterface, attr='<STR_LIT>', endpoint='<STR_LIT>', method='<STR_LIT>')<EOL>config.add_xmlrpc_method(RPCInterface, attr='<STR_LIT>', endpoint='<STR_LIT>', method='<STR_LIT>')<EOL>config.add_xmlrpc_method(RPCInterface, attr='<STR_LIT>', endpoint='<STR_LIT>', method='<STR_LIT>')<EOL><DEDENT>", "docstring": "The callable makes it possible to include rpcinterface\n    in a Pyramid application.\n\n    Calling ``config.include(twitcher.rpcinterface)`` will result in this\n    callable being called.\n\n    Arguments:\n\n    * ``config``: the ``pyramid.config.Configurator`` object.", "id": "f13833:m0"}
{"signature": "def unregister_service(self, name):", "body": "return self.srvreg.unregister_service(name)<EOL>", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.unregister_service`.", "id": "f13833:c0:m5"}
{"signature": "def get_service_by_name(self, name):", "body": "return self.srvreg.get_service_by_name(name)<EOL>", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.get_service_by_name`.", "id": "f13833:c0:m6"}
{"signature": "def get_service_by_url(self, url):", "body": "return self.srvreg.get_service_by_url(url)<EOL>", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.get_service_by_url`.", "id": "f13833:c0:m7"}
{"signature": "def revoke_all_tokens(self):", "body": "return self.tokenmgr.revoke_all_tokens()<EOL>", "docstring": "Implementation of :meth:`twitcher.api.ITokenManager.revoke_all_tokens`.", "id": "f13833:c0:m3"}
{"signature": "def list_services(self):", "body": "return self.srvreg.list_services()<EOL>", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.list_services`.", "id": "f13833:c0:m8"}
{"signature": "def generate(self):", "body": "return uuid.uuid4().get_hex()<EOL>", "docstring": ":return: A new token", "id": "f13838:c1:m0"}
{"signature": "def ows_security_tween_factory(handler, registry):", "body": "security = owssecurity_factory(registry)<EOL>def ows_security_tween(request):<EOL><INDENT>try:<EOL><INDENT>security.check_request(request)<EOL>return handler(request)<EOL><DEDENT>except OWSException as err:<EOL><INDENT>logger.exception(\"<STR_LIT>\")<EOL>return err<EOL><DEDENT>except Exception as err:<EOL><INDENT>logger.exception(\"<STR_LIT>\")<EOL>return OWSNoApplicableCode(\"<STR_LIT:{}>\".format(err))<EOL><DEDENT><DEDENT>return ows_security_tween<EOL>", "docstring": "A tween factory which produces a tween which raises an exception\n    if access to OWS service is not allowed.", "id": "f13843:m1"}
{"signature": "def localize_datetime(dt, tz_name='<STR_LIT>'):", "body": "tz_aware_dt = dt<EOL>if dt.tzinfo is None:<EOL><INDENT>utc = pytz.timezone('<STR_LIT>')<EOL>aware = utc.localize(dt)<EOL>timezone = pytz.timezone(tz_name)<EOL>tz_aware_dt = aware.astimezone(timezone)<EOL><DEDENT>else:<EOL><INDENT>logger.warn('<STR_LIT>')<EOL><DEDENT>return tz_aware_dt<EOL>", "docstring": "Provide a timzeone-aware object for a given datetime and timezone name", "id": "f13844:m5"}
{"signature": "@property<EOL><INDENT>def url(self):<DEDENT>", "body": "return self['<STR_LIT:url>']<EOL>", "docstring": "Service URL.", "id": "f13845:c0:m1"}
{"signature": "@property<EOL><INDENT>def auth(self):<DEDENT>", "body": "return self.get('<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "Authentication method: public, token, cert.", "id": "f13845:c0:m7"}
{"signature": "def is_expired(self):", "body": "if self.expires_at is None:<EOL><INDENT>return True<EOL><DEDENT>if self.expires_in > <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Determines if the token has expired.\n:return: `True` if the token has expired. Otherwise `False`.", "id": "f13845:c1:m4"}
{"signature": "@property<EOL><INDENT>def token(self):<DEDENT>", "body": "return self['<STR_LIT>']<EOL>", "docstring": "Access token string.", "id": "f13845:c1:m1"}
{"signature": "def has_purl(self):", "body": "return is_valid_url(self.purl)<EOL>", "docstring": "Return true if we have a valid public URL (purl).", "id": "f13845:c0:m5"}
{"signature": "def save_service(self, service, overwrite=True):", "body": "raise NotImplementedError<EOL>", "docstring": "Stores an OWS service in storage.\n\n:param service: An instance of :class:`twitcher.datatype.Service`.", "id": "f13846:c1:m0"}
{"signature": "def delete_service(self, name):", "body": "raise NotImplementedError<EOL>", "docstring": "Removes service from database.", "id": "f13846:c1:m1"}
{"signature": "def clear_tokens(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Removes all tokens from database.", "id": "f13846:c0:m3"}
{"signature": "def fetch_by_url(self, url):", "body": "raise NotImplementedError<EOL>", "docstring": "Get service for given ``url`` from storage.\n\n:param token: A string containing the service url.\n:return: An instance of :class:`twitcher.datatype.Service`.", "id": "f13846:c1:m4"}
{"signature": "def fetch_by_url(self, url):", "body": "service = self.collection.find_one({'<STR_LIT:url>': url})<EOL>if not service:<EOL><INDENT>raise ServiceNotFound<EOL><DEDENT>return Service(service)<EOL>", "docstring": "Gets service for given ``url`` from mongodb storage.", "id": "f13847:c2:m4"}
{"signature": "def delete_service(self, name):", "body": "self._delete(name=name)<EOL>return True<EOL>", "docstring": "Removes service from registry database.", "id": "f13848:c1:m4"}
{"signature": "def get_service_by_name(self, name):", "body": "raise NotImplementedError<EOL>", "docstring": "Gets service with given ``name`` from service store.", "id": "f13850:c1:m2"}
{"signature": "def clear_services(self):", "body": "try:<EOL><INDENT>self.store.clear_services()<EOL><DEDENT>except Exception:<EOL><INDENT>LOGGER.error('<STR_LIT>')<EOL>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.clear_services`.", "id": "f13850:c3:m6"}
{"signature": "def list_services(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Lists all registred OWS services.", "id": "f13850:c1:m4"}
{"signature": "def unregister_service(self, name):", "body": "raise NotImplementedError<EOL>", "docstring": "Removes OWS service with the given ``name`` from the service store.", "id": "f13850:c1:m1"}
{"signature": "def revoke_token(self, token):", "body": "try:<EOL><INDENT>self.store.delete_token(token)<EOL><DEDENT>except Exception:<EOL><INDENT>LOGGER.exception('<STR_LIT>')<EOL>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Implementation of :meth:`twitcher.api.ITokenManager.revoke_token`.", "id": "f13850:c2:m2"}
{"signature": "def register_service(self, url, data, overwrite):", "body": "raise NotImplementedError<EOL>", "docstring": "Adds an OWS service with the given ``url`` to the service store.\n\n:param data: a dict with additional information like ``name``.", "id": "f13850:c1:m0"}
{"signature": "def register_service(self, url, data=None, overwrite=True):", "body": "data = data or {}<EOL>args = dict(data)<EOL>args['<STR_LIT:url>'] = url<EOL>service = Service(**args)<EOL>service = self.store.save_service(service, overwrite=overwrite)<EOL>return service.params<EOL>", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.register_service`.", "id": "f13850:c3:m1"}
{"signature": "def get_service_by_url(self, url):", "body": "try:<EOL><INDENT>service = self.store.fetch_by_url(url=url)<EOL><DEDENT>except Exception:<EOL><INDENT>LOGGER.error('<STR_LIT>', url)<EOL>return {}<EOL><DEDENT>else:<EOL><INDENT>return service.params<EOL><DEDENT>", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.get_service_by_url`.", "id": "f13850:c3:m4"}
{"signature": "def revoke_all_tokens(self):", "body": "try:<EOL><INDENT>self.store.clear_tokens()<EOL><DEDENT>except Exception:<EOL><INDENT>LOGGER.exception('<STR_LIT>')<EOL>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Implementation of :meth:`twitcher.api.ITokenManager.revoke_all_tokens`.", "id": "f13850:c2:m3"}
{"signature": "def revoke_all_tokens(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Removes all tokens from tokenstore.", "id": "f13850:c0:m2"}
{"signature": "def get_service_by_name(self, name):", "body": "try:<EOL><INDENT>service = self.store.fetch_by_name(name=name)<EOL><DEDENT>except Exception:<EOL><INDENT>LOGGER.error('<STR_LIT>', name)<EOL>return {}<EOL><DEDENT>else:<EOL><INDENT>return service.params<EOL><DEDENT>", "docstring": "Implementation of :meth:`twitcher.api.IRegistry.get_service_by_name`.", "id": "f13850:c3:m3"}
{"signature": "def _get_request_type(self):", "body": "return self._get_param(param=\"<STR_LIT>\", allowed_values=allowed_request_types[self.params['<STR_LIT>']])<EOL>", "docstring": "Find requested request type in GET request.", "id": "f13851:c2:m3"}
{"signature": "def _get_param(self, param, allowed_values=None, optional=False):", "body": "request_params = self._request_params()<EOL>if param in request_params:<EOL><INDENT>value = request_params[param].lower()<EOL>if allowed_values is not None:<EOL><INDENT>if value in allowed_values:<EOL><INDENT>self.params[param] = value<EOL><DEDENT>else:<EOL><INDENT>raise OWSInvalidParameterValue(\"<STR_LIT>\" % (param, value), value=param)<EOL><DEDENT><DEDENT><DEDENT>elif optional:<EOL><INDENT>self.params[param] = None<EOL><DEDENT>else:<EOL><INDENT>raise OWSMissingParameterValue('<STR_LIT>' % param, value=param)<EOL><DEDENT>return self.params[param]<EOL>", "docstring": "Get parameter in GET request.", "id": "f13851:c2:m1"}
{"signature": "def _get_service(self):", "body": "if \"<STR_LIT>\" in self.document.attrib:<EOL><INDENT>value = self.document.attrib[\"<STR_LIT>\"].lower()<EOL>if value in allowed_service_types:<EOL><INDENT>self.params[\"<STR_LIT>\"] = value<EOL><DEDENT>else:<EOL><INDENT>raise OWSInvalidParameterValue(\"<STR_LIT>\" % value, value=\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise OWSMissingParameterValue('<STR_LIT>', value=\"<STR_LIT>\")<EOL><DEDENT>return self.params[\"<STR_LIT>\"]<EOL>", "docstring": "Check mandatory service name parameter in POST request.", "id": "f13851:c3:m1"}
{"signature": "def _get_version(self):", "body": "if \"<STR_LIT:version>\" in self.document.attrib:<EOL><INDENT>value = self.document.attrib[\"<STR_LIT:version>\"].lower()<EOL>if value in allowed_versions[self.params['<STR_LIT>']]:<EOL><INDENT>self.params[\"<STR_LIT:version>\"] = value<EOL><DEDENT>else:<EOL><INDENT>raise OWSInvalidParameterValue(\"<STR_LIT>\" % value, value=\"<STR_LIT:version>\")<EOL><DEDENT><DEDENT>elif self._get_request_type() == \"<STR_LIT>\":<EOL><INDENT>self.params[\"<STR_LIT:version>\"] = None<EOL><DEDENT>else:<EOL><INDENT>raise OWSMissingParameterValue('<STR_LIT>', value=\"<STR_LIT:version>\")<EOL><DEDENT>return self.params[\"<STR_LIT:version>\"]<EOL>", "docstring": "Find requested version in POST request.", "id": "f13851:c3:m3"}
{"signature": "def attrs_sqlalchemy(maybe_cls=None):", "body": "def wrap(cls):<EOL><INDENT>warnings.warn(UserWarning('<STR_LIT>'))<EOL>these = {<EOL>name: attr.ib()<EOL>for name in inspect(cls).columns.keys()<EOL>}<EOL>return attr.s(cls, these=these, init=False)<EOL><DEDENT>if maybe_cls is None:<EOL><INDENT>return wrap<EOL><DEDENT>else:<EOL><INDENT>return wrap(maybe_cls)<EOL><DEDENT>", "docstring": "A class decorator that adds ``__repr__``, ``__eq__``, ``__cmp__``, and\n``__hash__`` methods according to the fields defined on the SQLAlchemy\nmodel class.", "id": "f13868:m0"}
{"signature": "def read(*parts):", "body": "with codecs.open(os.path.join(HERE, *parts), '<STR_LIT:rb>', '<STR_LIT:utf-8>') as f:<EOL><INDENT>return f.read()<EOL><DEDENT>", "docstring": "Build an absolute path from *parts* and and return the contents of the\nresulting file.  Assume UTF-8 encoding.", "id": "f13869:m0"}
{"signature": "def find_meta(meta):", "body": "meta_match = re.search(<EOL>r\"<STR_LIT>\".format(meta=meta),<EOL>META_FILE, re.M<EOL>)<EOL>if meta_match:<EOL><INDENT>return meta_match.group(<NUM_LIT:1>)<EOL><DEDENT>raise RuntimeError('<STR_LIT>'.format(meta=meta))<EOL>", "docstring": "Extract __*meta*__ from META_FILE.", "id": "f13869:m1"}
{"signature": "def remote_file_exists(self, url):", "body": "return requests.head(url).status_code == <NUM_LIT:200><EOL>", "docstring": "Check whether the remote file exists on Storage", "id": "f13874:c1:m2"}
{"signature": "def preprocess_constraints(ml, cl, n):", "body": "<EOL>ml_graph, cl_graph = {}, {}<EOL>for i in range(n):<EOL><INDENT>ml_graph[i] = set()<EOL>cl_graph[i] = set()<EOL><DEDENT>def add_both(d, i, j):<EOL><INDENT>d[i].add(j)<EOL>d[j].add(i)<EOL><DEDENT>for (i, j) in ml:<EOL><INDENT>ml_graph[i].add(j)<EOL>ml_graph[j].add(i)<EOL><DEDENT>for (i, j) in cl:<EOL><INDENT>cl_graph[i].add(j)<EOL>cl_graph[j].add(i)<EOL><DEDENT>def dfs(i, graph, visited, component):<EOL><INDENT>visited[i] = True<EOL>for j in graph[i]:<EOL><INDENT>if not visited[j]:<EOL><INDENT>dfs(j, graph, visited, component)<EOL><DEDENT><DEDENT>component.append(i)<EOL><DEDENT>visited = [False] * n<EOL>neighborhoods = []<EOL>for i in range(n):<EOL><INDENT>if not visited[i] and ml_graph[i]:<EOL><INDENT>component = []<EOL>dfs(i, ml_graph, visited, component)<EOL>for x1 in component:<EOL><INDENT>for x2 in component:<EOL><INDENT>if x1 != x2:<EOL><INDENT>ml_graph[x1].add(x2)<EOL><DEDENT><DEDENT><DEDENT>neighborhoods.append(component)<EOL><DEDENT><DEDENT>for (i, j) in cl:<EOL><INDENT>for x in ml_graph[i]:<EOL><INDENT>add_both(cl_graph, x, j)<EOL><DEDENT>for y in ml_graph[j]:<EOL><INDENT>add_both(cl_graph, i, y)<EOL><DEDENT>for x in ml_graph[i]:<EOL><INDENT>for y in ml_graph[j]:<EOL><INDENT>add_both(cl_graph, x, y)<EOL><DEDENT><DEDENT><DEDENT>for i in ml_graph:<EOL><INDENT>for j in ml_graph[i]:<EOL><INDENT>if j != i and j in cl_graph[i]:<EOL><INDENT>raise InconsistentConstraintsException('<STR_LIT>'.format(i, j))<EOL><DEDENT><DEDENT><DEDENT>return ml_graph, cl_graph, neighborhoods<EOL>", "docstring": "Create a graph of constraints for both must- and cannot-links", "id": "f13878:m0"}
{"signature": "def _dist(self, x, y, A):", "body": "return scipy.spatial.distance.mahalanobis(x, y, A) ** <NUM_LIT:2><EOL>", "docstring": "(x - y)^T A (x - y)", "id": "f13880:c0:m4"}
{"signature": "def _dist(self, x, y, A):", "body": "return scipy.spatial.distance.mahalanobis(x, y, A) ** <NUM_LIT:2><EOL>", "docstring": "(x - y)^T A (x - y)", "id": "f13883:c0:m4"}
{"signature": "def query(self, i, j):", "body": "if self.queries_cnt < self.max_queries_cnt:<EOL><INDENT>self.queries_cnt += <NUM_LIT:1><EOL>return self.labels[i] == self.labels[j]<EOL><DEDENT>else:<EOL><INDENT>raise MaximumQueriesExceeded<EOL><DEDENT>", "docstring": "Query the oracle to find out whether i and j should be must-linked", "id": "f13891:c1:m1"}
{"signature": "def specifier_to_db(db_spec):", "body": "local_match = LOCAL_RE.match(db_spec)<EOL>remote_match = REMOTE_RE.match(db_spec)<EOL>plain_match = PLAIN_RE.match(db_spec)<EOL>if local_match:<EOL><INDENT>return local_match.groupdict()['<STR_LIT>']<EOL><DEDENT>elif remote_match:<EOL><INDENT>hostname, portnum, database = map(remote_match.groupdict().get,<EOL>('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'))<EOL>local_url = settings._('<STR_LIT>', '<STR_LIT>')<EOL>localhost, localport = urlparse.urlparse(local_url)[<NUM_LIT:1>].split('<STR_LIT::>')<EOL>if (localhost == hostname) and (localport == portnum):<EOL><INDENT>return database<EOL><DEDENT>return '<STR_LIT>' % (hostname, portnum, database)<EOL><DEDENT>elif plain_match:<EOL><INDENT>return plain_match.groupdict()['<STR_LIT>']<EOL><DEDENT>raise ValueError('<STR_LIT>' % (db_spec,))<EOL>", "docstring": "Return the database string for a database specifier.\n\nThe database specifier takes a custom format for specifying local and remote\ndatabases. A local database is specified by the following format:\n\n    local:<db_name>\n\nFor example, a database called 'sessions' would be specified by the string\n``'local:sessions'``. Remote databases are specified like this:\n\n    remote:<host>:<port_num>:<db_name>\n\nFor example, a database called 'log' on the server 'dev.example.com' at port\nnumber 5984 would be specified by ``'remote:dev.example.com:5984:log'``.\n\nThese specifiers are translated into strings acceptable to CouchDB; local\nspecs are turned into the database name alone, and remote specs are turned\ninto ``'http://host:port/db_name'`` URLs.", "id": "f13902:m0"}
{"signature": "def replicate_existing(source_db, target_db):", "body": "<EOL>server = shortcuts.get_server()<EOL>logger = logging.getLogger('<STR_LIT>')<EOL>logger.debug('<STR_LIT>' + urllib.parse.urljoin(server.resource.uri, '<STR_LIT>'))<EOL>source, target = specifier_to_db(source_db), specifier_to_db(target_db)<EOL>logger.debug('<STR_LIT>' % (source,))<EOL>logger.debug('<STR_LIT>' % (target,))<EOL>try:<EOL><INDENT>resp_headers, resp_body = server.resource.post(path='<STR_LIT>',<EOL>content=json.dumps({'<STR_LIT:source>': source, '<STR_LIT:target>': target}))<EOL><DEDENT>except couchdb.client.ServerError as exc:<EOL><INDENT>logger.error('<STR_LIT>')<EOL>raise ReplicationError(exc.args)<EOL><DEDENT>result = resp_body['<STR_LIT>'][<NUM_LIT:0>]<EOL>if resp_body['<STR_LIT>']:<EOL><INDENT>logger.info('<STR_LIT>' % (<EOL>resp_body['<STR_LIT>'][:<NUM_LIT:6>],))<EOL>logger.info('<STR_LIT>' + result['<STR_LIT>'])<EOL>logger.info('<STR_LIT>' + result['<STR_LIT>'])<EOL>result['<STR_LIT>'] = datetime.datetime.strptime(result['<STR_LIT>'],<EOL>'<STR_LIT>')<EOL>result['<STR_LIT>'] = datetime.datetime.strptime(result['<STR_LIT>'],<EOL>'<STR_LIT>')<EOL>timedelta = result['<STR_LIT>'] - result['<STR_LIT>']<EOL>if timedelta.days:<EOL><INDENT>logger.info('<STR_LIT>' % (<EOL>timedelta.days,<EOL>timedelta.seconds + (timedelta.microseconds * (<NUM_LIT>))))<EOL><DEDENT>else:<EOL><INDENT>logger.info('<STR_LIT>' % (<EOL>timedelta.seconds + (timedelta.microseconds * (<NUM_LIT>))))<EOL><DEDENT>result['<STR_LIT>'] = resp_body['<STR_LIT>']<EOL>result['<STR_LIT>'] = resp_body['<STR_LIT>']<EOL>result['<STR_LIT>'] = resp_body['<STR_LIT>']<EOL>if result['<STR_LIT>'] == <NUM_LIT:1>:<EOL><INDENT>docs_read = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>docs_read = '<STR_LIT>' % (result['<STR_LIT>'],)<EOL><DEDENT>if result['<STR_LIT>'] == <NUM_LIT:1>:<EOL><INDENT>docs_written = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>docs_written = '<STR_LIT>' % (result['<STR_LIT>'],)<EOL><DEDENT>if result['<STR_LIT>'] == <NUM_LIT:1>:<EOL><INDENT>missing_checked = '<STR_LIT>' % (<EOL>result['<STR_LIT>'],)<EOL><DEDENT>else:<EOL><INDENT>missing_checked = '<STR_LIT>' % (<EOL>result['<STR_LIT>'], result['<STR_LIT>'],)<EOL><DEDENT>logging.info('<STR_LIT>' % (docs_read, docs_written))<EOL>logging.info(missing_checked)<EOL>return result<EOL><DEDENT>else:<EOL><INDENT>logger.error('<STR_LIT>' % (<EOL>resp_body['<STR_LIT>'][:<NUM_LIT:6>],))<EOL>result['<STR_LIT>'] = resp_body['<STR_LIT>']<EOL>result['<STR_LIT>'] = resp_body['<STR_LIT>']<EOL>result['<STR_LIT>'] = resp_body['<STR_LIT>']<EOL>raise ReplicationFailure(resp_headers, result)<EOL><DEDENT>", "docstring": "Replicate an existing database to another existing database.", "id": "f13903:m0"}
{"signature": "def get_doc(doc_id, db_name, server_url='<STR_LIT>', rev=None):", "body": "db = get_server(server_url)[db_name]<EOL>if rev:<EOL><INDENT>headers, response = db.resource.get(doc_id, rev=rev)<EOL>return couchdb.client.Document(response)<EOL><DEDENT>return db[doc_id]<EOL>", "docstring": "Return a CouchDB document, given its ID, revision and database name.", "id": "f13904:m2"}
{"signature": "def get_db(db_name, server_url='<STR_LIT>'):", "body": "return get_server(server_url)[db_name]<EOL>", "docstring": "Return a CouchDB database instance, given its name.", "id": "f13904:m1"}
{"signature": "def get_server(server_url='<STR_LIT>'):", "body": "return couchdb.client.Server(<EOL>server_url if server_url else settings._('<STR_LIT>'))<EOL>", "docstring": "Return a CouchDB server instance based on Django project settings.", "id": "f13904:m0"}
{"signature": "@utils.generator_to_list<EOL><INDENT>def handle_map_doc(self, document):<DEDENT>", "body": "<EOL>for function in sorted(list(self.functions.values()), key=lambda x: x[<NUM_LIT:0>]):<EOL><INDENT>try:<EOL><INDENT>yield [list(function(document))]<EOL><DEDENT>except Exception as exc:<EOL><INDENT>yield []<EOL>self.log(repr(exc))<EOL><DEDENT><DEDENT>", "docstring": "Return the mapping of a document according to the function list.", "id": "f13910:c2:m2"}
{"signature": "def get_function(function_name):", "body": "module, basename = str(function_name).rsplit('<STR_LIT:.>', <NUM_LIT:1>)<EOL>try:<EOL><INDENT>return getattr(__import__(module, fromlist=[basename]), basename)<EOL><DEDENT>except (ImportError, AttributeError):<EOL><INDENT>raise FunctionNotFound(function_name)<EOL><DEDENT>", "docstring": "Given a Python function name, return the function it refers to.", "id": "f13910:m0"}
{"signature": "def log(self, string):", "body": "self.wfile.write(json.dumps({'<STR_LIT>': string}) + NEWLINE)<EOL>", "docstring": "Log an event on the CouchDB server.", "id": "f13910:c2:m7"}
{"signature": "def handle_reset(self):", "body": "self.functions.clear()<EOL>self.function_counter = <NUM_LIT:0><EOL>", "docstring": "Reset the current function list.", "id": "f13910:c2:m0"}
{"signature": "def js_error(exc):", "body": "<EOL>return json.dumps({<EOL>'<STR_LIT:error>': type(exc).__name__,<EOL>'<STR_LIT>': str(exc)})<EOL>", "docstring": "Transform a Python exception into a CouchDB JSON error.", "id": "f13910:m2"}
{"signature": "def handle_reduce(self, reduce_function_names, mapped_docs):", "body": "reduce_functions = []<EOL>for reduce_function_name in reduce_function_names:<EOL><INDENT>try:<EOL><INDENT>reduce_function = get_function(reduce_function_name)<EOL>if getattr(reduce_function, '<STR_LIT>', None):<EOL><INDENT>reduce_function = reduce_function(self.log)<EOL><DEDENT>reduce_functions.append(reduce_function)<EOL><DEDENT>except Exception as exc:<EOL><INDENT>self.log(repr(exc))<EOL>reduce_functions.append(lambda *args, **kwargs: None)<EOL><DEDENT><DEDENT>keys, values = list(zip(<EOL>(key, value) for ((key, doc_id), value) in mapped_docs))<EOL>results = []<EOL>for reduce_function in reduce_functions:<EOL><INDENT>try:<EOL><INDENT>results.append(reduce_function(keys, values, rereduce=False))<EOL><DEDENT>except Exception as exc:<EOL><INDENT>self.log(repr(exc))<EOL>results.append(None)<EOL><DEDENT><DEDENT>return [True, results]<EOL>", "docstring": "Reduce several mapped documents by several reduction functions.", "id": "f13910:c2:m3"}
{"signature": "def handle_validate(self, function_name, new_doc, old_doc, user_ctx):", "body": "try:<EOL><INDENT>function = get_function(function_name)<EOL><DEDENT>except Exception as exc:<EOL><INDENT>self.log(repr(exc))<EOL>return False<EOL><DEDENT>try:<EOL><INDENT>return function(new_doc, old_doc, user_ctx)<EOL><DEDENT>except Exception as exc:<EOL><INDENT>self.log(repr(exc))<EOL>return repr(exc)<EOL><DEDENT>", "docstring": "Validate...this function is undocumented, but still in CouchDB.", "id": "f13910:c2:m5"}
{"signature": "@classmethod<EOL><INDENT>def from_uniform(<EOL>cls, z, origin=(<NUM_LIT:0>, <NUM_LIT:0>), step=(<NUM_LIT:1>, <NUM_LIT:1>), formatter=numpy_formatter):<DEDENT>", "body": "z = np.ma.asarray(z, dtype=np.float64)<EOL>if z.ndim != <NUM_LIT:2>:<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\".format(z.ndim))<EOL><DEDENT>if len(origin) != <NUM_LIT:2>:<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\".format(<EOL>len(origin)))<EOL><DEDENT>if len(step) != <NUM_LIT:2>:<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\".format(<EOL>len(step)))<EOL><DEDENT>if any(s == <NUM_LIT:0> for s in step):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\".format(<EOL>str(step)))<EOL><DEDENT>y, x = np.mgrid[<EOL>origin[<NUM_LIT:0>]:(origin[<NUM_LIT:0>]+step[<NUM_LIT:0>]*z.shape[<NUM_LIT:0>]):step[<NUM_LIT:0>],<EOL>origin[<NUM_LIT:1>]:(origin[<NUM_LIT:1>]+step[<NUM_LIT:1>]*z.shape[<NUM_LIT:1>]):step[<NUM_LIT:1>]]<EOL>return cls(x, y, z, formatter)<EOL>", "docstring": "Construct a contour generator from a uniform grid.\n\n        NOTE\n        ----\n        The default `origin` and `step` values is equivalent to calling\n        :meth:`matplotlib.axes.Axes.contour` with only the `z` argument.\n\n        Parameters\n        ----------\n        z : array_like\n            The 2-dimensional uniform grid of data to compute contours for.\n            Masked arrays are supported.\n        origin : (number.Number, number.Number)\n            The (x, y) coordinate of data point `z[0,0]`.\n        step :  (number.Number, number.Number)\n            The (x, y) distance between data points in `z`.\n        formatter : callable\n            A conversion function to convert from the internal `Matplotlib`_\n            contour format to an external format.  See :ref:`formatters` for\n            more information.\n\n        Returns\n        -------\n        : :class:`QuadContourGenerator`\n            Initialized contour generator.", "id": "f13913:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def from_curvilinear(cls, x, y, z, formatter=numpy_formatter):<DEDENT>", "body": "return cls(x, y, z, formatter)<EOL>", "docstring": "Construct a contour generator from a curvilinear grid.\n\n        Note\n        ----\n        This is an alias for the default constructor.\n\n        Parameters\n        ----------\n        x : array_like\n            x coordinates of each point in `z`.  Must be the same size as `z`.\n        y : array_like\n            y coordinates of each point in `z`.  Must be the same size as `z`.\n        z : array_like\n            The 2-dimensional curvilinear grid of data to compute\n            contours for.  Masked arrays are supported.\n        formatter : callable\n            A conversion function to convert from the internal `Matplotlib`_\n            contour format to an external format.  See :ref:`formatters` for\n            more information.\n\n        Returns\n        -------\n        : :class:`QuadContourGenerator`\n            Initialized contour generator.", "id": "f13913:c0:m1"}
{"signature": "def __init__(self, formatter=numpy_formatter, *args, **kwargs):", "body": "<EOL>self.formatter = formatter<EOL>", "docstring": "Initialize a :class:`ContourMixin`, see class docstring.", "id": "f13915:c1:m0"}
{"signature": "def contour(self, level):", "body": "if not isinstance(level, numbers.Number):<EOL><INDENT>raise TypeError(<EOL>(\"<STR_LIT>\"<EOL>\"<STR_LIT>\").format(type(level)))<EOL><DEDENT>vertices = self._contour_generator.create_contour(level)<EOL>return self.formatter(level, vertices)<EOL>", "docstring": "Get contour lines at the given level.\n\n        Parameters\n        ----------\n        level : numbers.Number\n            The data level to calculate the contour lines for.\n\n        Returns\n        -------\n        :\n            The result of the :attr:`formatter` called on the contour at the\n            given `level`.", "id": "f13915:c1:m1"}
{"signature": "def filled_contour(self, min=None, max=None):", "body": "<EOL>if min is None:<EOL><INDENT>min = np.finfo(np.float64).min<EOL><DEDENT>if max is None:<EOL><INDENT>max = np.finfo(np.float64).max<EOL><DEDENT>vertices, codes = (<EOL>self._contour_generator.create_filled_contour(min, max))<EOL>return self.formatter((min, max), vertices, codes)<EOL>", "docstring": "Get contour polygons between the given levels.\n\n        Parameters\n        ----------\n        min : numbers.Number or None\n            The minimum data level of the contour polygon.  If :obj:`None`,\n            ``numpy.finfo(numpy.float64).min`` will be used.\n        max : numbers.Number or None\n            The maximum data level of the contour polygon.  If :obj:`None`,\n            ``numpy.finfo(numpy.float64).max`` will be used.\n\n        Returns\n        -------\n        :\n            The result of the :attr:`formatter` called on the filled contour\n            between `min` and `max`.", "id": "f13915:c1:m2"}
{"signature": "def null_formatter(level, vertices, codes=None):", "body": "return level, vertices, codes<EOL>", "docstring": "Null formatter that passes through the raw vertices and codes.", "id": "f13915:m0"}
{"signature": "def create_repository(self, path, info=None, description=None, replace=True, allowNoneEmpty=True, raiseError=True):", "body": "assert isinstance(raiseError, bool), \"<STR_LIT>\"<EOL>assert isinstance(allowNoneEmpty, bool), \"<STR_LIT>\"<EOL>assert isinstance(replace, bool), \"<STR_LIT>\"<EOL>assert isinstance(path, basestring), \"<STR_LIT>\"<EOL>if info is None:<EOL><INDENT>info = '<STR_LIT>'<EOL><DEDENT>try:<EOL><INDENT>pickle.dumps(info)<EOL><DEDENT>except Exception as err:<EOL><INDENT>raise Exception(\"<STR_LIT>\"%str(err))<EOL><DEDENT>if description is None:<EOL><INDENT>description = '<STR_LIT>'<EOL><DEDENT>assert isinstance(description, basestring), \"<STR_LIT>\"<EOL>if path.strip() in ('<STR_LIT>','<STR_LIT:.>'):<EOL><INDENT>path = os.getcwd()<EOL><DEDENT>realPath = os.path.realpath( os.path.expanduser(path) )<EOL>message = []<EOL>if self.is_repository(realPath):<EOL><INDENT>if not replace:<EOL><INDENT>message.append(\"<STR_LIT>\"%path)<EOL>return False, message<EOL><DEDENT>else:<EOL><INDENT>message.append(\"<STR_LIT>\"%path)<EOL>try:<EOL><INDENT>for _df in os.listdir(realPath):<EOL><INDENT>_p = os.path.join(realPath, _df)<EOL>if os.path.isdir(_p):<EOL><INDENT>shutil.rmtree( _p )<EOL><DEDENT>else:<EOL><INDENT>os.remove(_p)<EOL><DEDENT><DEDENT><DEDENT>except Exception as err:<EOL><INDENT>message.append(\"<STR_LIT>\"%(str(err)))<EOL>return False, '<STR_LIT:\\n>'.join(message)<EOL><DEDENT><DEDENT><DEDENT>if not os.path.isdir(realPath):<EOL><INDENT>os.makedirs(realPath)<EOL><DEDENT>elif len(os.listdir(realPath)) and not allowNoneEmpty:<EOL><INDENT>return False, \"<STR_LIT>\"<EOL><DEDENT>oldRepo = self.__repo<EOL>self.reset()<EOL>self.__path = realPath.rstrip(os.sep)<EOL>self.__repo['<STR_LIT>'] = info<EOL>saved = self.save(description=description)<EOL>if not saved:<EOL><INDENT>self.__repo = oldRepo<EOL>message.append(\"<STR_LIT>\")<EOL>return False, '<STR_LIT:\\n>'.join(message)<EOL><DEDENT>return True, '<STR_LIT:\\n>'.join(message)<EOL>", "docstring": "create a repository in a directory. This method insures the creation of\nthe directory in the system if it is missing.\\n\n\n**N.B. If replace is True and existing repository is found in path, create_repository erases all existing files and directories in path.**\n\n:Parameters:\n    #. path (string): The real absolute path where to create the Repository.\n       If '.' or an empty string is passed, the current working directory will be used.\n    #. description (None, str): Repository main directory information.\n    #. info (None, object): Repository information. It can\n       be None or any pickle writable type of data.\n    #. replace (boolean): Whether to replace existing repository.\n    #. allowNoneEmpty (boolean): Allow creating repository in none-empty\n       directory.\n    #. raiseError (boolean): Whether to raise encountered error instead\n       of returning failure.\n\n:Returns:\n    #. success (boolean): Whether creating repository was successful\n    #. message (None, str): Any returned message.", "id": "f13917:c1:m19"}
{"signature": "@property<EOL><INDENT>def info(self):<DEDENT>", "body": "return self.__repo['<STR_LIT>']<EOL>", "docstring": "Get repository information", "id": "f13917:c1:m12"}
{"signature": "@path_required<EOL><INDENT>def get_repository_state(self, relaPath=None):<DEDENT>", "body": "state = []<EOL>def _walk_dir(relaPath, dirList):<EOL><INDENT>dirDict = {'<STR_LIT:type>':'<STR_LIT>',<EOL>'<STR_LIT>':os.path.isdir(os.path.join(self.__path,relaPath)),<EOL>'<STR_LIT>':os.path.isfile(os.path.join(self.__path,relaPath,self.__dirInfo)),<EOL>}<EOL>state.append({relaPath:dirDict})<EOL>for fname in sorted([f for f in dirList if isinstance(f, basestring)]):<EOL><INDENT>relaFilePath = os.path.join(relaPath,fname)<EOL>realFilePath = os.path.join(self.__path,relaFilePath)<EOL>fileDict = {'<STR_LIT:type>':'<STR_LIT:file>',<EOL>'<STR_LIT>':os.path.isfile(realFilePath),<EOL>'<STR_LIT>':os.path.isfile(os.path.join(self.__path,relaPath,self.__fileInfo%fname)),<EOL>}<EOL>state.append({relaFilePath:fileDict})<EOL><DEDENT>for ddict in sorted([d for d in dirList if isinstance(d, dict)], key=lambda k: list(k)[<NUM_LIT:0>]):<EOL><INDENT>dirname = list(ddict)[<NUM_LIT:0>]<EOL>_walk_dir(relaPath=os.path.join(relaPath,dirname), dirList=ddict[dirname])<EOL><DEDENT><DEDENT>if relaPath is None:<EOL><INDENT>_walk_dir(relaPath='<STR_LIT>', dirList=self.__repo['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>assert isinstance(relaPath, basestring), \"<STR_LIT>\"<EOL>relaPath = self.to_repo_relative_path(path=relaPath, split=False)<EOL>spath    = relaPath.split(os.sep)<EOL>dirList  = self.__repo['<STR_LIT>']<EOL>while len(spath):<EOL><INDENT>dirname = spath.pop(<NUM_LIT:0>)<EOL>dList   = [d for d in dirList if isinstance(d, dict)]<EOL>if not len(dList):<EOL><INDENT>dirList = None<EOL>break<EOL><DEDENT>cDict = [d for d in dList if dirname in d]<EOL>if not len(cDict):<EOL><INDENT>dirList = None<EOL>break<EOL><DEDENT>dirList = cDict[<NUM_LIT:0>][dirname]<EOL><DEDENT>if dirList is not None:<EOL><INDENT>_walk_dir(relaPath=relaPath, dirList=dirList)<EOL><DEDENT><DEDENT>return state<EOL>", "docstring": "Get a list representation of repository state along with useful\ninformation. List state is ordered relativeley to directories level\n\n:Parameters:\n    #. relaPath (None, str): relative directory path from where to\n       start. If None all repository representation is returned.\n\n:Returns:\n    #. state (list): List representation of the repository.\n       List items are all dictionaries. Every dictionary has a single\n       key which is the file or the directory name and the value is a\n       dictionary of information including:\n\n           * 'type': the type of the tracked whether it's file, dir, or objectdir\n           * 'exists': whether file or directory actually exists on disk\n           * 'pyrepfileinfo': In case of a file or an objectdir whether .%s_pyrepfileinfo exists\n           * 'pyrepdirinfo': In case of a directory whether .pyrepdirinfo exists", "id": "f13917:c1:m24"}
{"signature": "def walk_directories_info(self, relativePath=\"<STR_LIT>\", fullPath=False, recursive=False):", "body": "assert isinstance(fullPath, bool), \"<STR_LIT>\"<EOL>assert isinstance(recursive, bool), \"<STR_LIT>\"<EOL>relativePath = self.to_repo_relative_path(path=relativePath, split=False)<EOL>for dpath in self.walk_directories_path(relativePath=relativePath, fullPath=False, recursive=recursive):<EOL><INDENT>dirInfoPath = os.path.join(self.__path,dpath,self.__dirInfo)<EOL>if os.path.isfile(dirInfoPath):<EOL><INDENT>with open(dirInfoPath, '<STR_LIT:rb>') as fd:<EOL><INDENT>info = pickle.load(fd)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>info = None<EOL><DEDENT>if fullPath:<EOL><INDENT>yield (os.path.join(self.__path, dpath), info)<EOL><DEDENT>else:<EOL><INDENT>yield (dpath, info)<EOL><DEDENT><DEDENT>", "docstring": "Walk the repository relative path and yield tuple of two items where\nfirst item is directory relative/full path and second item is directory\ninfo. If directory file info is not found on disk, second item will be None.\n\n:parameters:\n    #. relativePath (string): The relative path from which start the walk.\n    #. fullPath (boolean): Whether to return full or relative path.\n    #. recursive (boolean): Whether walk all directories files recursively.", "id": "f13917:c1:m32"}
{"signature": "def update(self, *args, **kwargs):", "body": "return self.update_file(*args, **kwargs)<EOL>", "docstring": "Alias to update_file", "id": "f13917:c1:m43"}
{"signature": "def get_dump_method(dump, protocol=-<NUM_LIT:1>):", "body": "if dump is None:<EOL><INDENT>dump = '<STR_LIT>'<EOL><DEDENT>if dump.startswith('<STR_LIT>'):<EOL><INDENT>if dump == '<STR_LIT>':<EOL><INDENT>proto = protocol<EOL><DEDENT>else:<EOL><INDENT>proto = dump.strip('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>proto = int(proto)<EOL>assert proto>=-<NUM_LIT:1><EOL><DEDENT>except:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>code = \"\"\"<STR_LIT>\"\"\"<EOL><DEDENT>rt dill<EOL><INDENT>open('<STR_LIT>', '<STR_LIT:wb>') as fd:<EOL><DEDENT>dill.dump( value, fd, protocol=%i )<EOL>fd.flush()<EOL>os.fsync(fd.fileno())<EOL>proto<EOL>elif dump == '<STR_LIT>':<EOL><INDENT>code = \"\"\"<STR_LIT>\"\"\"<EOL><DEDENT>rt numpy<EOL><INDENT>open('<STR_LIT>', '<STR_LIT:wb>') as fd:<EOL><DEDENT>numpy.save(file=fd, arr=value)<EOL>fd.flush()<EOL>os.fsync(fd.fileno())<EOL>elif dump == '<STR_LIT>':<EOL><INDENT>code =", "docstring": "Get dump function code string", "id": "f13917:m1"}
{"signature": "def pull(self, *args, **kwargs):", "body": "return self.pull_file(*args, **kwargs)<EOL>", "docstring": "Alias to pull_file", "id": "f13917:c1:m45"}
{"signature": "@path_required<EOL><INDENT>def create_package(self, path=None, name=None, mode=None):<DEDENT>", "body": "<EOL>assert mode in (None, '<STR_LIT:w>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'), '<STR_LIT>'%str(mode)<EOL>if mode is None:<EOL><INDENT>mode = '<STR_LIT>'<EOL><DEDENT>if path is None:<EOL><INDENT>root = os.path.split(self.__path)[<NUM_LIT:0>]<EOL><DEDENT>elif path.strip() in ('<STR_LIT>','<STR_LIT:.>'):<EOL><INDENT>root = os.getcwd()<EOL><DEDENT>else:<EOL><INDENT>root = os.path.realpath( os.path.expanduser(path) )<EOL><DEDENT>assert os.path.isdir(root), '<STR_LIT>'%path<EOL>if name is None:<EOL><INDENT>ext = mode.split(\"<STR_LIT::>\")<EOL>if len(ext) == <NUM_LIT:2>:<EOL><INDENT>if len(ext[<NUM_LIT:1>]):<EOL><INDENT>ext = \"<STR_LIT:.>\"+ext[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>ext = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ext = '<STR_LIT>'<EOL><DEDENT>name = os.path.split(self.__path)[<NUM_LIT:1>]+ext<EOL><DEDENT>tarfilePath = os.path.join(root, name)<EOL>try:<EOL><INDENT>tarHandler = tarfile.TarFile.open(tarfilePath, mode=mode)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise Exception(\"<STR_LIT>\"%e)<EOL><DEDENT>for dpath in sorted(list(self.walk_directories_path(recursive=True))):<EOL><INDENT>t = tarfile.TarInfo( dpath )<EOL>t.type = tarfile.DIRTYPE<EOL>tarHandler.addfile(t)<EOL>tarHandler.add(os.path.join(self.__path,dpath,self.__dirInfo), arcname=self.__dirInfo)<EOL><DEDENT>for fpath in self.walk_files_path(recursive=True):<EOL><INDENT>relaPath, fname = os.path.split(fpath)<EOL>tarHandler.add(os.path.join(self.__path,fpath), arcname=fname)<EOL>tarHandler.add(os.path.join(self.__path,relaPath,self.__fileInfo%fname), arcname=self.__fileInfo%fname)<EOL>tarHandler.add(os.path.join(self.__path,relaPath,self.__fileClass%fname), arcname=self.__fileClass%fname)<EOL><DEDENT>tarHandler.add(os.path.join(self.__path,self.__repoFile), arcname=\"<STR_LIT>\")<EOL>tarHandler.close()<EOL>", "docstring": "Create a tar file package of all the repository files and directories.\nOnly files and directories that are tracked in the repository\nare stored in the package tar file.\n\n**N.B. On some systems packaging requires root permissions.**\n\n:Parameters:\n    #. path (None, string): The real absolute path where to create the\n       package. If None, it will be created in the same directory as\n       the repository. If '.' or an empty string is passed, the current\n       working directory will be used.\n    #. name (None, string): The name to give to the package file\n       If None, the package directory name will be used with the\n       appropriate extension added.\n    #. mode (None, string): The writing mode of the tarfile.\n       If None, automatically the best compression mode will be chose.\n       Available modes are ('w', 'w:', 'w:gz', 'w:bz2')", "id": "f13917:c1:m33"}
{"signature": "def reset(self):", "body": "self.__path   = None<EOL>self.__repo   = {'<STR_LIT>': str(uuid.uuid1()),<EOL>'<STR_LIT>': time.time(),<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': str(__version__),<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': []}<EOL>", "docstring": "Reset repository instance.", "id": "f13917:c1:m16"}
{"signature": "def walk_files_info(self, relativePath=\"<STR_LIT>\", fullPath=False, recursive=False):", "body": "assert isinstance(fullPath, bool), \"<STR_LIT>\"<EOL>assert isinstance(recursive, bool), \"<STR_LIT>\"<EOL>relativePath = self.to_repo_relative_path(path=relativePath, split=False)<EOL>for relaPath in self.walk_files_path(relativePath=relativePath, fullPath=False, recursive=recursive):<EOL><INDENT>fpath, fname = os.path.split(relaPath)<EOL>fileInfoPath = os.path.join(self.__path,fpath,self.__fileInfo%fname)<EOL>if os.path.isfile(fileInfoPath):<EOL><INDENT>with open(fileInfoPath, '<STR_LIT:rb>') as fd:<EOL><INDENT>info = pickle.load(fd)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>info = None<EOL><DEDENT>if fullPath:<EOL><INDENT>yield (os.path.join(self.__path, relaPath), info)<EOL><DEDENT>else:<EOL><INDENT>yield (relaPath, info)<EOL><DEDENT><DEDENT>", "docstring": "Walk the repository relative path and yield tuple of two items where\nfirst item is file relative/full path and second item is file info.\nIf file info is not found on disk, second item will be None.\n\n:parameters:\n    #. relativePath (string): The relative path from which start the walk.\n    #. fullPath (boolean): Whether to return full or relative path.\n    #. recursive (boolean): Whether walk all directories files recursively", "id": "f13917:c1:m30"}
{"signature": "@path_required<EOL><INDENT>def remove_directory(self, relativePath, clean=False, raiseError=True, ntrials=<NUM_LIT:3>):<DEDENT>", "body": "assert isinstance(raiseError, bool), \"<STR_LIT>\"<EOL>assert isinstance(clean, bool), \"<STR_LIT>\"<EOL>assert isinstance(ntrials, int), \"<STR_LIT>\"<EOL>assert ntrials><NUM_LIT:0>, \"<STR_LIT>\"<EOL>relativePath = self.to_repo_relative_path(path=relativePath, split=False)<EOL>parentPath, dirName = os.path.split(relativePath)<EOL>if relativePath == '<STR_LIT>':<EOL><INDENT>return False, \"<STR_LIT>\"<EOL><DEDENT>if not self.is_repository_directory(relativePath):<EOL><INDENT>return False, \"<STR_LIT>\"%relativePath<EOL><DEDENT>realPath = os.path.join(self.__path,relativePath)<EOL>if not os.path.isdir(realPath):<EOL><INDENT>error = \"<STR_LIT>\"<EOL>assert not raiseError, error<EOL>return False, error<EOL><DEDENT>LD =  Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path,parentPath,self.__dirLock))<EOL>acquired, code = LD.acquire_lock()<EOL>if not acquired:<EOL><INDENT>error = \"<STR_LIT>\"%(code,realPath)<EOL>assert not raiseError, error<EOL>return False, error<EOL><DEDENT>LR =  Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))<EOL>acquired, code = LR.acquire_lock()<EOL>if not acquired:<EOL><INDENT>LD.release_lock()<EOL>m = \"<STR_LIT>\"%(code,)<EOL>assert raiseError,  Exception(m)<EOL>return False,m<EOL><DEDENT>for _trial in range(ntrials):<EOL><INDENT>error = None<EOL>try:<EOL><INDENT>dirList = self.__get_repository_parent_directory(relativePath=relativePath)<EOL>assert dirList is not None, \"<STR_LIT>\"%(relativePath,)<EOL>stateBefore = self.get_repository_state(relaPath=parentPath)<EOL>_files = [f for f in dirList if isinstance(f, basestring)]<EOL>_dirs  = [d for d in dirList if isinstance(d, dict)]<EOL>_dirs  = [d for d in dirList if dirName not in d]<EOL>_ = [dirList.pop(<NUM_LIT:0>) for _ in range(len(dirList))]<EOL>dirList.extend(_files)<EOL>dirList.extend(_dirs)<EOL>if clean:<EOL><INDENT>shutil.rmtree(realPath)<EOL><DEDENT>else:<EOL><INDENT>stateAfter = self.get_repository_state(relaPath=parentPath)<EOL>success, errors = self.__clean_before_after(stateBefore=stateBefore, stateAfter=stateAfter, keepNoneEmptyDirectory=True)<EOL>assert success, \"<STR_LIT:\\n>\".join(errors)<EOL><DEDENT><DEDENT>except Exception as err:<EOL><INDENT>error = str(err)<EOL>if self.DEBUG_PRINT_FAILED_TRIALS: print(\"<STR_LIT>\"%(_trial, inspect.stack()[<NUM_LIT:1>][<NUM_LIT:3>], str(error)))<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if error is None:<EOL><INDENT>_, error = self.__save_repository_pickle_file(lockFirst=False, raiseError=False)<EOL><DEDENT>LD.release_lock()<EOL>LR.release_lock()<EOL>assert error is None or not raiseError, \"<STR_LIT>\"%(relativePath, ntrials, error,)<EOL>return error is None, error<EOL>", "docstring": "Remove directory from repository tracking.\n\n:Parameters:\n    #. relativePath (string): The relative to the repository path of the\n       directory to remove from the repository.\n    #. clean (boolean): Whether to os remove directory. If False only\n       tracked files will be removed along with left empty directories.\n    #. raiseError (boolean): Whether to raise encountered error instead\n       of returning failure.\n    #. ntrials (int): After aquiring all locks, ntrials is the maximum\n       number of trials allowed before failing.\n       In rare cases, when multiple processes\n       are accessing the same repository components, different processes\n       can alter repository components between successive lock releases\n       of some other process. Bigger number of trials lowers the\n       likelyhood of failure due to multiple processes same time\n       alteration.\n\n:Returns:\n    #. success (boolean): Whether removing the directory was successful.\n    #. reason (None, string): Reason why directory was not removed.", "id": "f13917:c1:m36"}
{"signature": "def remove_repository(self, path=None, removeEmptyDirs=True):", "body": "assert isinstance(removeEmptyDirs, bool), \"<STR_LIT>\"<EOL>if path is not None:<EOL><INDENT>if path != self.__path:<EOL><INDENT>repo = Repository()<EOL>repo.load_repository(path)<EOL><DEDENT>else:<EOL><INDENT>repo = self<EOL><DEDENT><DEDENT>else:<EOL><INDENT>repo = self<EOL><DEDENT>assert repo.path is not None, \"<STR_LIT>\"<EOL>for fdict in reversed(repo.get_repository_state()):<EOL><INDENT>relaPath   = list(fdict)[<NUM_LIT:0>]<EOL>realPath   = os.path.join(repo.path, relaPath)<EOL>path, name = os.path.split(realPath)<EOL>if fdict[relaPath]['<STR_LIT:type>'] == '<STR_LIT:file>':<EOL><INDENT>if os.path.isfile(realPath):<EOL><INDENT>os.remove(realPath)<EOL><DEDENT>if os.path.isfile(os.path.join(repo.path,path,self.__fileInfo%name)):<EOL><INDENT>os.remove(os.path.join(repo.path,path,self.__fileInfo%name))<EOL><DEDENT>if os.path.isfile(os.path.join(repo.path,path,self.__fileLock%name)):<EOL><INDENT>os.remove(os.path.join(repo.path,path,self.__fileLock%name))<EOL><DEDENT>if os.path.isfile(os.path.join(repo.path,path,self.__fileClass%name)):<EOL><INDENT>os.remove(os.path.join(repo.path,path,self.__fileClass%name))<EOL><DEDENT><DEDENT>elif fdict[relaPath]['<STR_LIT:type>'] == '<STR_LIT>':<EOL><INDENT>if os.path.isfile(os.path.join(realPath,self.__dirInfo)):<EOL><INDENT>os.remove(os.path.join(realPath,self.__dirInfo))<EOL><DEDENT>if os.path.isfile(os.path.join(realPath,self.__dirLock)):<EOL><INDENT>os.remove(os.path.join(realPath,self.__dirLock))<EOL><DEDENT>if not len(os.listdir(realPath)) and removeEmptyDirs:<EOL><INDENT>shutil.rmtree( realPath )<EOL><DEDENT><DEDENT><DEDENT>if os.path.isfile(os.path.join(repo.path,self.__repoFile)):<EOL><INDENT>os.remove(os.path.join(repo.path,self.__repoFile))<EOL><DEDENT>if os.path.isfile(os.path.join(repo.path,self.__repoLock)):<EOL><INDENT>os.remove(os.path.join(repo.path,self.__repoLock))<EOL><DEDENT>", "docstring": "Remove all repository from path along with all repository tracked files.\n\n:Parameters:\n    #. path (None, string): The path the repository to remove.\n    #. removeEmptyDirs (boolean): Whether to remove remaining empty\n       directories.", "id": "f13917:c1:m20"}
{"signature": "def load_repository(self, path, verbose=True, ntrials=<NUM_LIT:3>):", "body": "assert isinstance(ntrials, int), \"<STR_LIT>\"<EOL>assert ntrials><NUM_LIT:0>, \"<STR_LIT>\"<EOL>repo = None<EOL>for _trial in range(ntrials):<EOL><INDENT>try:<EOL><INDENT>self.__load_repository(path=path, verbose=True)<EOL><DEDENT>except Exception as err1:<EOL><INDENT>try:<EOL><INDENT>from .OldRepository import Repository<EOL>REP = Repository(path)<EOL><DEDENT>except Exception as err2:<EOL><INDENT>error = \"<STR_LIT>\"%(err1, err2)<EOL>if self.DEBUG_PRINT_FAILED_TRIALS: print(\"<STR_LIT>\"%(_trial, inspect.stack()[<NUM_LIT:1>][<NUM_LIT:3>], str(error)))<EOL><DEDENT>else:<EOL><INDENT>error = None<EOL>repo  = REP<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>error = None<EOL>repo  = self<EOL>break<EOL><DEDENT><DEDENT>assert error is None, error<EOL>return repo<EOL>", "docstring": "Load repository from a directory path and update the current instance.\nFirst, new repository still will be loaded. If failed, then old\nstyle repository load will be tried.\n\n:Parameters:\n    #. path (string): The path of the directory from where to load\n       the repository from. If '.' or an empty string is passed,\n       the current working directory will be used.\n    #. verbose (boolean): Whether to be verbose about abnormalities\n    #. ntrials (int): After aquiring all locks, ntrials is the maximum\n       number of trials allowed before failing.\n       In rare cases, when multiple processes\n       are accessing the same repository components, different processes\n       can alter repository components between successive lock releases\n       of some other process. Bigger number of trials lowers the\n       likelyhood of failure due to multiple processes same time\n       alteration.\n\n:Returns:\n     #. repository (pyrep.Repository): returns self repository with loaded data.", "id": "f13917:c1:m18"}
{"signature": "def get_repository_directory(self, relativePath):", "body": "return copy.deepcopy(self.__get_repository_directory(relativePath))<EOL>", "docstring": "Get repository directory list copy.\n\n:Parameters:\n    #. relativePath (string): The relative to the repository path .\n\n:Returns:\n    #. dirList (None, list): List of directories and files in repository\n       directory. If directory is not tracked in repository None is\n       returned", "id": "f13917:c1:m25"}
{"signature": "@path_required<EOL><INDENT>def rename_directory(self, relativePath, newName, raiseError=True, ntrials=<NUM_LIT:3>):<DEDENT>", "body": "assert isinstance(raiseError, bool), \"<STR_LIT>\"<EOL>assert isinstance(ntrials, int), \"<STR_LIT>\"<EOL>assert ntrials><NUM_LIT:0>, \"<STR_LIT>\"<EOL>relativePath = self.to_repo_relative_path(path=relativePath, split=False)<EOL>parentPath, dirName = os.path.split(relativePath)<EOL>if relativePath == '<STR_LIT>':<EOL><INDENT>error = \"<STR_LIT>\"<EOL>assert not raiseError, error<EOL>return False, error<EOL><DEDENT>realPath = os.path.join(self.__path,relativePath)<EOL>newRealPath = os.path.join(os.path.dirname(realPath), newName)<EOL>if os.path.isdir(newRealPath):<EOL><INDENT>error = \"<STR_LIT>\"%(newRealPath,)<EOL>assert not raiseError, error<EOL>return False, error<EOL><DEDENT>LD =  Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path,parentPath, self.__dirLock))<EOL>acquired, code = LD.acquire_lock()<EOL>if not acquired:<EOL><INDENT>error = \"<STR_LIT>\"%(code,dirPath)<EOL>assert not raiseError, error<EOL>return False, error<EOL><DEDENT>error = None<EOL>LR =  Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))<EOL>acquired, code = LR.acquire_lock()<EOL>if not acquired:<EOL><INDENT>LD.release_lock()<EOL>m = \"<STR_LIT>\"%(code,dirPath)<EOL>assert raiseError,  Exception(m)<EOL>return False,m<EOL><DEDENT>for _trial in range(ntrials):<EOL><INDENT>try:<EOL><INDENT>repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile))<EOL>self.__repo['<STR_LIT>'] = repo['<STR_LIT>']<EOL><DEDENT>except Exception as err:<EOL><INDENT>error = str(err)<EOL>if self.DEBUG_PRINT_FAILED_TRIALS: print(\"<STR_LIT>\"%(_trial, inspect.stack()[<NUM_LIT:1>][<NUM_LIT:3>], str(error)))<EOL><DEDENT>else:<EOL><INDENT>error = None<EOL>break<EOL><DEDENT><DEDENT>if error is not None:<EOL><INDENT>LD.release_lock()<EOL>LR.release_lock()<EOL>assert not raiseError, Exception(error)<EOL>return False, error<EOL><DEDENT>for _trial in range(ntrials):<EOL><INDENT>error = None<EOL>try:<EOL><INDENT>dirList = self.__get_repository_parent_directory(relativePath=relativePath)<EOL>assert dirList is not None, \"<STR_LIT>\"%(relativePath,)<EOL>_dirDict = [nd for nd in dirList  if isinstance(nd,dict)]<EOL>_dirDict = [nd for nd in _dirDict if dirName in nd]<EOL>assert len(_dirDict) == <NUM_LIT:1>, \"<STR_LIT>\"<EOL>os.rename(realPath, newRealPath)<EOL>_dirDict[<NUM_LIT:0>][newName] = _dirDict[<NUM_LIT:0>][dirName]<EOL>_dirDict[<NUM_LIT:0>].pop(dirName)<EOL>self.__save_dirinfo(description=None, dirInfoPath=parentPath, create=False)<EOL><DEDENT>except Exception as err:<EOL><INDENT>error = str(err)<EOL>if self.DEBUG_PRINT_FAILED_TRIALS: print(\"<STR_LIT>\"%(_trial, inspect.stack()[<NUM_LIT:1>][<NUM_LIT:3>], str(error)))<EOL><DEDENT>else:<EOL><INDENT>error = None<EOL>break<EOL><DEDENT><DEDENT>if error is None:<EOL><INDENT>_, error = self.__save_repository_pickle_file(lockFirst=False, raiseError=False)<EOL><DEDENT>LR.release_lock()<EOL>LD.release_lock()<EOL>assert error is None or not raiseError, \"<STR_LIT>\"%(relativePath, newName, ntrials, error,)<EOL>return error is None, error<EOL>", "docstring": "Rename a directory in the repository. It insures renaming the directory in the system.\n\n:Parameters:\n    #. relativePath (string): The relative to the repository path of\n       the directory to be renamed.\n    #. newName (string): The new directory name.\n    #. raiseError (boolean): Whether to raise encountered error instead\n       of returning failure.\n    #. ntrials (int): After aquiring all locks, ntrials is the maximum\n       number of trials allowed before failing.\n       In rare cases, when multiple processes\n       are accessing the same repository components, different processes\n       can alter repository components between successive lock releases\n       of some other process. Bigger number of trials lowers the\n       likelyhood of failure due to multiple processes same time\n       alteration.\n\n:Returns:\n    #. success (boolean): Whether renaming the directory was successful.\n    #. message (None, string): Some explanatory message or error reason\n       why directory was not renamed.", "id": "f13917:c1:m37"}
{"signature": "@path_required<EOL><INDENT>def pull_file(self, relativePath, pull=None, update=True, ntrials=<NUM_LIT:3>):<DEDENT>", "body": "assert isinstance(ntrials, int), \"<STR_LIT>\"<EOL>assert ntrials><NUM_LIT:0>, \"<STR_LIT>\"<EOL>relativePath = self.to_repo_relative_path(path=relativePath, split=False)<EOL>realPath     = os.path.join(self.__path,relativePath)<EOL>fPath, fName = os.path.split(realPath)<EOL>isRepoFile,fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)<EOL>if not isRepoFile:<EOL><INDENT>fileOnDisk  = [\"<STR_LIT>\",\"<STR_LIT>\"][fileOnDisk]<EOL>infoOnDisk  = [\"<STR_LIT>\",\"<STR_LIT>\"%self.__fileInfo%fName][infoOnDisk]<EOL>classOnDisk = [\"<STR_LIT>\",\"<STR_LIT>\"%self.__fileClass%fName][classOnDisk]<EOL>assert False, \"<STR_LIT>\"%(relativePath,fileOnDisk,infoOnDisk,classOnDisk)<EOL><DEDENT>assert fileOnDisk, \"<STR_LIT>\"%(relativePath,)<EOL>if not infoOnDisk:<EOL><INDENT>if pull is not None:<EOL><INDENT>warnings.warn(\"<STR_LIT>\"%(self.__fileInfo%fName))<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\"%(relativePath,(self.__fileInfo%fName)))<EOL><DEDENT><DEDENT>LF =  Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))<EOL>acquired, code = LF.acquire_lock()<EOL>if not acquired:<EOL><INDENT>error = \"<STR_LIT>\"%(code,relativePath)<EOL>return False, error<EOL><DEDENT>for _trial in range(ntrials):<EOL><INDENT>error = None<EOL>try:<EOL><INDENT>if pull is not None:<EOL><INDENT>pull = get_pull_method(pull)<EOL><DEDENT>else:<EOL><INDENT>with open(os.path.join(fPath,self.__fileInfo%fName), '<STR_LIT:rb>') as fd:<EOL><INDENT>info = pickle.load(fd)<EOL><DEDENT>pull = info['<STR_LIT>']<EOL><DEDENT>my_exec( pull.replace(\"<STR_LIT>\", str(realPath) ), locals=locals(), globals=globals(), description='<STR_LIT>' )<EOL><DEDENT>except Exception as err:<EOL><INDENT>LF.release_lock()<EOL>m = str(pull).replace(\"<STR_LIT>\", str(realPath) )<EOL>error = \"<STR_LIT>\"%(m,err)<EOL>if self.DEBUG_PRINT_FAILED_TRIALS: print(\"<STR_LIT>\"%(_trial, inspect.stack()[<NUM_LIT:1>][<NUM_LIT:3>], str(error)))<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>LF.release_lock()<EOL>assert error is None, \"<STR_LIT>\"%(ntrials, error)<EOL>return locals()['<STR_LIT>']<EOL>", "docstring": "Pull a file's data from the Repository.\n\n:Parameters:\n    #. relativePath (string): The relative to the repository path from\n       where to pull the file.\n    #. pull (None, string): The pulling method.\n       If None, the pull method saved in the file info will be used.\n       If a string is given, the string should include all the necessary\n       imports, a '$FILE_PATH' that replaces the absolute file path when\n       the dumping will be performed and finally a PULLED_DATA variable.\n       e.g \"import numpy as np; PULLED_DATA=np.loadtxt(fname='$FILE_PATH')\"\n    #. update (boolean): If pull is not None, Whether to update the pull\n       method stored in the file info by the given pull method.\n    #. ntrials (int): After aquiring all locks, ntrials is the maximum\n       number of trials allowed before failing.\n       In rare cases, when multiple processes\n       are accessing the same repository components, different processes\n       can alter repository components between successive lock releases\n       of some other process. Bigger number of trials lowers the\n       likelyhood of failure due to multiple processes same time\n       alteration.\n\n:Returns:\n    #. data (object): The pulled data from the file.", "id": "f13917:c1:m44"}
{"signature": "def __clean_before_after(self, stateBefore, stateAfter, keepNoneEmptyDirectory=True):", "body": "<EOL>errors    = []<EOL>afterDict = {}<EOL>[afterDict.setdefault(list(aitem)[<NUM_LIT:0>],[]).append(aitem) for aitem in stateAfter]<EOL>for bitem in reversed(stateBefore):<EOL><INDENT>relaPath = list(bitem)[<NUM_LIT:0>]<EOL>basename = os.path.basename(relaPath)<EOL>btype    = bitem[relaPath]['<STR_LIT:type>']<EOL>alist    = afterDict.get(relaPath, [])<EOL>aitem    = [a for a in alist if a[relaPath]['<STR_LIT:type>']==btype]<EOL>if len(aitem)><NUM_LIT:1>:<EOL><INDENT>errors.append(\"<STR_LIT>\"%(basename,btype,relaPath))<EOL>continue<EOL><DEDENT>if not len(aitem):<EOL><INDENT>removeDirs  = []<EOL>removeFiles = []<EOL>if btype == '<STR_LIT>':<EOL><INDENT>if not len(relaPath):<EOL><INDENT>errors.append(\"<STR_LIT>\")<EOL>continue<EOL><DEDENT>removeDirs.append(os.path.join(self.__path,relaPath))<EOL>removeFiles.append(os.path.join(self.__path,relaPath,self.__dirInfo))<EOL>removeFiles.append(os.path.join(self.__path,relaPath,self.__dirLock))<EOL><DEDENT>elif btype == '<STR_LIT:file>':<EOL><INDENT>removeFiles.append(os.path.join(self.__path,relaPath))<EOL>removeFiles.append(os.path.join(self.__path,relaPath,self.__fileInfo%basename))<EOL>removeFiles.append(os.path.join(self.__path,relaPath,self.__fileLock%basename))<EOL><DEDENT>else:<EOL><INDENT>removeDirs.append(os.path.join(self.__path,relaPath))<EOL>removeFiles.append(os.path.join(self.__path,relaPath,self.__fileInfo%basename))<EOL><DEDENT>for fpath in removeFiles:<EOL><INDENT>if os.path.isfile(fpath):<EOL><INDENT>try:<EOL><INDENT>os.remove(fpath)<EOL><DEDENT>except Exception as err:<EOL><INDENT>errors.append(\"<STR_LIT>\"%(fpath, str(err)))<EOL><DEDENT><DEDENT><DEDENT>for dpath in removeDirs:<EOL><INDENT>if os.path.isdir(dpath):<EOL><INDENT>if keepNoneEmptyDirectory or not len(os.listdir(dpath)):<EOL><INDENT>try:<EOL><INDENT>shutil.rmtree(dpath)<EOL><DEDENT>except Exception as err:<EOL><INDENT>errors.append(\"<STR_LIT>\"%(fpath, str(err)))<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return len(errors)==<NUM_LIT:0>, errors<EOL>", "docstring": "clean repository given before and after states", "id": "f13917:c1:m6"}
{"signature": "def walk_directories_path(self, relativePath=\"<STR_LIT>\", fullPath=False, recursive=False):", "body": "assert isinstance(fullPath, bool), \"<STR_LIT>\"<EOL>assert isinstance(recursive, bool), \"<STR_LIT>\"<EOL>relativePath = self.to_repo_relative_path(path=relativePath, split=False)<EOL>dirList      = self.__get_repository_directory(relativePath=relativePath)<EOL>assert dirList is not None, \"<STR_LIT>\"%relativePath<EOL>def _walk(rpath, dlist,recursive):<EOL><INDENT>for ddict in dlist:<EOL><INDENT>if isinstance(ddict, dict):<EOL><INDENT>dname = list(ddict)[<NUM_LIT:0>]<EOL>if fullPath:<EOL><INDENT>yield os.path.join(self.__path, rpath, dname)<EOL><DEDENT>else:<EOL><INDENT>yield os.path.join(rpath, dname)<EOL><DEDENT><DEDENT><DEDENT>if recursive:<EOL><INDENT>for ddict in dlist:<EOL><INDENT>if isinstance(ddict, dict):<EOL><INDENT>dname = list(ddict)[<NUM_LIT:0>]<EOL>for p in _walk(rpath=os.path.join(rpath,dname), dlist=ddict[dname],recursive=recursive):<EOL><INDENT>yield p<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return _walk(rpath=relativePath, dlist=dirList, recursive=recursive)<EOL>", "docstring": "Walk repository relative path and yield directory relative/full path\n\n:parameters:\n    #. relativePath (string): The relative path from which start the walk.\n    #. fullPath (boolean): Whether to return full or relative path.\n    #. recursive (boolean): Whether walk all directories files recursively.", "id": "f13917:c1:m31"}
{"signature": "@path_required<EOL><INDENT>def dump_file(self, value, relativePath,<EOL>description=None,<EOL>dump=None, pull=None,<EOL>replace=False, raiseError=True, ntrials=<NUM_LIT:3>):<DEDENT>", "body": "<EOL>assert isinstance(raiseError, bool), \"<STR_LIT>\"<EOL>assert isinstance(replace, bool), \"<STR_LIT>\"<EOL>assert isinstance(ntrials, int), \"<STR_LIT>\"<EOL>assert ntrials><NUM_LIT:0>, \"<STR_LIT>\"<EOL>if description is None:<EOL><INDENT>description = '<STR_LIT>'<EOL><DEDENT>assert isinstance(description, basestring), \"<STR_LIT>\"<EOL>if pull is None and dump is not None:<EOL><INDENT>if dump.startswith('<STR_LIT>') or dump.startswith('<STR_LIT>') or dump.startswith('<STR_LIT>') or dump =='<STR_LIT>':<EOL><INDENT>pull = dump<EOL><DEDENT><DEDENT>dump = get_dump_method(dump, protocol=self._DEFAULT_PICKLE_PROTOCOL)<EOL>pull = get_pull_method(pull)<EOL>relativePath = self.to_repo_relative_path(path=relativePath, split=False)<EOL>savePath     = os.path.join(self.__path,relativePath)<EOL>fPath, fName = os.path.split(savePath)<EOL>success, reason = self.is_name_allowed(savePath)<EOL>if not success:<EOL><INDENT>assert not raiseError, reason<EOL>return False, reason<EOL><DEDENT>try:<EOL><INDENT>success, reason = self.add_directory(fPath, raiseError=False, ntrials=ntrials)<EOL><DEDENT>except Exception as err:<EOL><INDENT>reason  = \"<STR_LIT>\"%(str(err))<EOL>success = False<EOL><DEDENT>if not success:<EOL><INDENT>assert not raiseError, reason<EOL>return False, reason<EOL><DEDENT>LR =  Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))<EOL>acquired, code = LR.acquire_lock()<EOL>if not acquired:<EOL><INDENT>m = \"<STR_LIT>\"%(code,)<EOL>assert raiseError, Exception(m)<EOL>return False,m<EOL><DEDENT>LF =  Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(fPath,self.__fileLock%fName))<EOL>acquired, code = LF.acquire_lock()<EOL>if not acquired:<EOL><INDENT>LR.release_lock()<EOL>error = \"<STR_LIT>\"%(code,relativePath)<EOL>assert not raiseError, error<EOL>return False, error<EOL><DEDENT>for _trial in range(ntrials):<EOL><INDENT>try:<EOL><INDENT>repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile))<EOL>self.__repo['<STR_LIT>'] = repo['<STR_LIT>']<EOL><DEDENT>except Exception as err:<EOL><INDENT>error = str(err)<EOL>if self.DEBUG_PRINT_FAILED_TRIALS: print(\"<STR_LIT>\"%(_trial, inspect.stack()[<NUM_LIT:1>][<NUM_LIT:3>], str(error)))<EOL><DEDENT>else:<EOL><INDENT>error = None<EOL>break<EOL><DEDENT><DEDENT>if error is not None:<EOL><INDENT>LR.release_lock()<EOL>LF.release_lock()<EOL>assert not raiseError, Exception(error)<EOL>return False, error<EOL><DEDENT>for _trial in range(ntrials):<EOL><INDENT>error = None<EOL>try:<EOL><INDENT>isRepoFile, fileOnDisk, infoOnDisk, classOnDisk = self.is_repository_file(relativePath)<EOL>if isRepoFile:<EOL><INDENT>assert replace, \"<STR_LIT>\"<EOL><DEDENT>fileInfoPath = os.path.join(self.__path,os.path.dirname(relativePath),self.__fileInfo%fName)<EOL>if isRepoFile and fileOnDisk:<EOL><INDENT>with open(fileInfoPath, '<STR_LIT:rb>') as fd:<EOL><INDENT>info = pickle.load(fd)<EOL><DEDENT>assert info['<STR_LIT>'] == self.__repo['<STR_LIT>'], \"<STR_LIT>\"<EOL>info['<STR_LIT>'] = time.time()<EOL><DEDENT>else:<EOL><INDENT>info = {'<STR_LIT>':self.__repo['<STR_LIT>']}<EOL>info['<STR_LIT>'] = info['<STR_LIT>'] = time.time()<EOL><DEDENT>info['<STR_LIT>'] = dump<EOL>info['<STR_LIT>'] = pull<EOL>info['<STR_LIT:description>'] = description<EOL>if not isRepoFile:<EOL><INDENT>dirList = self.__get_repository_directory(fPath)<EOL><DEDENT>my_exec( dump.replace(\"<STR_LIT>\", str(savePath)), locals=locals(), globals=globals(), description='<STR_LIT>'  )<EOL>with open(fileInfoPath, '<STR_LIT:wb>') as fd:<EOL><INDENT>pickle.dump( info,fd, protocol=self._DEFAULT_PICKLE_PROTOCOL)<EOL>fd.flush()<EOL>os.fsync(fd.fileno())<EOL><DEDENT>fileClassPath = os.path.join(self.__path,os.path.dirname(relativePath),self.__fileClass%fName)<EOL>with open(fileClassPath, '<STR_LIT:wb>') as fd:<EOL><INDENT>if value is None:<EOL><INDENT>klass = None<EOL><DEDENT>else:<EOL><INDENT>klass = value.__class__<EOL><DEDENT>pickle.dump(klass , fd, protocol=self._DEFAULT_PICKLE_PROTOCOL )<EOL>fd.flush()<EOL>os.fsync(fd.fileno())<EOL><DEDENT>if not isRepoFile:<EOL><INDENT>dirList.append(fName)<EOL><DEDENT><DEDENT>except Exception as err:<EOL><INDENT>error = \"<STR_LIT>\"%(str(err),)<EOL>try:<EOL><INDENT>if '<STR_LIT>' in dump:<EOL><INDENT>mi = get_pickling_errors(value)<EOL>if mi is not None:<EOL><INDENT>error += '<STR_LIT>'%str(mi)<EOL><DEDENT><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>if self.DEBUG_PRINT_FAILED_TRIALS: print(\"<STR_LIT>\"%(_trial, inspect.stack()[<NUM_LIT:1>][<NUM_LIT:3>], str(error)))<EOL><DEDENT>else:<EOL><INDENT>error = None<EOL>break<EOL><DEDENT><DEDENT>if error is None:<EOL><INDENT>_, error = self.__save_repository_pickle_file(lockFirst=False, raiseError=False)<EOL><DEDENT>LR.release_lock()<EOL>LF.release_lock()<EOL>assert not raiseError or error is None, \"<STR_LIT>\"%(relativePath, ntrials, error,)<EOL>return success, error<EOL>", "docstring": "Dump a file using its value to the system and creates its\nattribute in the Repository with utc timestamp.\n\n:Parameters:\n    #. value (object): The value of a file to dump and add to the\n       repository. It is any python object or file.\n    #. relativePath (str): The relative to the repository path to where\n       to dump the file.\n    #. description (None, string): Any description about the file.\n    #. dump (None, string): The dumping method.\n       If None it will be set automatically to pickle and therefore the\n       object must be pickleable. If a string is given, it can be a\n       keyword ('json','pickle','dill') or a string compileable code to\n       dump the data. The string code must include all the necessary\n       imports and a '$FILE_PATH' that replaces the absolute file path\n       when the dumping will be performed.\\n\n       e.g. \"import numpy as np; np.savetxt(fname='$FILE_PATH', X=value, fmt='%.6e')\"\n    #. pull (None, string): The pulling method. If None it will be set\n       automatically to pickle and therefore the object must be\n       pickleable. If a string is given, it can be a keyword\n       ('json','pickle','dill') or a string compileable code to pull\n       the data. The string code must include all the necessary imports,\n       a '$FILE_PATH' that replaces the absolute file path when the\n       dumping will be performed and finally a PULLED_DATA variable.\\n\n       e.g \"import numpy as np; PULLED_DATA=np.loadtxt(fname='$FILE_PATH')\"\n    #. replace (boolean): Whether to replace any existing file.\n    #. raiseError (boolean): Whether to raise encountered error instead\n       of returning failure.\n    #. ntrials (int): After aquiring all locks, ntrials is the maximum\n       number of trials allowed before failing.\n       In rare cases, when multiple processes\n       are accessing the same repository components, different processes\n       can alter repository components between successive lock releases\n       of some other process. Bigger number of trials lowers the\n       likelyhood of failure due to multiple processes same time\n       alteration.\n\n:Returns:\n    #. success (boolean): Whether renaming the directory was successful.\n    #. message (None, string): Some explanatory message or error reason\n       why directory was not dumped.", "id": "f13917:c1:m39"}
{"signature": "@path_required<EOL><INDENT>def save(self, description=None, raiseError=True, ntrials=<NUM_LIT:3>):<DEDENT>", "body": "assert isinstance(raiseError, bool), \"<STR_LIT>\"<EOL>assert isinstance(ntrials, int), \"<STR_LIT>\"<EOL>assert ntrials><NUM_LIT:0>, \"<STR_LIT>\"<EOL>if description is not None:<EOL><INDENT>assert isinstance(description, basestring), \"<STR_LIT>\"<EOL><DEDENT>dirInfoPath = os.path.join(self.__path, self.__dirInfo)<EOL>if description is None and not os.path.isfile(dirInfoPath):<EOL><INDENT>description = '<STR_LIT>'<EOL><DEDENT>LR =  Locker(filePath=None, lockPass=str(uuid.uuid1()), lockPath=os.path.join(self.__path, self.__repoLock))<EOL>acquired, code = LR.acquire_lock()<EOL>m = \"<STR_LIT>\"%(code,)<EOL>if not acquired:<EOL><INDENT>assert not raiseError, Exception(m)<EOL>return False, m<EOL><DEDENT>for _trial in range(ntrials):<EOL><INDENT>try:<EOL><INDENT>repoInfoPath = os.path.join(self.__path, self.__repoFile)<EOL>error = None<EOL>self.__save_dirinfo(description=description, dirInfoPath=dirInfoPath)<EOL>if os.path.isfile(repoInfoPath):<EOL><INDENT>with open(repoInfoPath, '<STR_LIT:rb>') as fd:<EOL><INDENT>repo = self.__load_repository_pickle_file(os.path.join(self.__path, self.__repoFile))<EOL>self.__repo['<STR_LIT>'] = repo['<STR_LIT>']<EOL><DEDENT><DEDENT>with open(repoInfoPath, '<STR_LIT:wb>') as fd:<EOL><INDENT>self.__repo[\"<STR_LIT>\"] = time.time()<EOL>pickle.dump( self.__repo,fd, protocol=self._DEFAULT_PICKLE_PROTOCOL )<EOL>fd.flush()<EOL>os.fsync(fd.fileno())<EOL><DEDENT><DEDENT>except Exception as err:<EOL><INDENT>error = \"<STR_LIT>\"%err<EOL>if self.DEBUG_PRINT_FAILED_TRIALS: print(\"<STR_LIT>\"%(_trial, inspect.stack()[<NUM_LIT:1>][<NUM_LIT:3>], str(error)))<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>LR.release_lock()<EOL>assert error is None or not raiseError, error<EOL>return error is None, error<EOL>", "docstring": "Save repository '.pyreprepo' to disk and create (if missing) or\nupdate (if description is not None) '.pyrepdirinfo'.\n\n:Parameters:\n    #. description (None, str): Repository main directory information.\n       If given will be replaced.\n    #. raiseError (boolean): Whether to raise encountered error instead\n       of returning failure.\n    #. ntrials (int): After aquiring all locks, ntrials is the maximum\n       number of trials allowed before failing.\n       In rare cases, when multiple processes\n       are accessing the same repository components, different processes\n       can alter repository components between successive lock releases\n       of some other process. Bigger number of trials lowers the\n       likelyhood of failure due to multiple processes same time\n       alteration.\n\n:Returns:\n    #. success (bool): Whether saving was successful.\n    #. error (None, string): Fail to save repository message in case\n       saving is not successful. If success is True, error will be None.", "id": "f13917:c1:m21"}
{"signature": "def get_file_relative_path_by_name(self, name, skip=<NUM_LIT:0>):", "body": "if skip is None:<EOL><INDENT>paths = []<EOL><DEDENT>else:<EOL><INDENT>paths = None<EOL><DEDENT>for path, info in self.walk_files_info():<EOL><INDENT>_, n = os.path.split(path)<EOL>if n==name:<EOL><INDENT>if skip is None:<EOL><INDENT>paths.append(path)<EOL><DEDENT>elif skip><NUM_LIT:0>:<EOL><INDENT>skip -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>paths = path<EOL>break<EOL><DEDENT><DEDENT><DEDENT>return paths<EOL>", "docstring": "Get file relative path given the file name. If file name is redundant in different\ndirectories in the repository, this method ensures to return all or some of the\nfiles according to skip value.\n\nParameters:\n    #. name (string): The file name.\n    #. skip (None, integer): As file names can be identical, skip determines\n       the number of satisfying files name to skip before returning.\\n\n       If None is given, a list of all files relative path will be returned.\n\n:Returns:\n    #. relativePath (string, list): The file relative path.\n       If None, it means file was not found.\\n\n       If skip is None a list of all found files relative paths will be returned.", "id": "f13923:c0:m41"}
{"signature": "@acquire_lock<EOL><INDENT>@sync_required<EOL>def add_directory(self, relativePath, info=None):<DEDENT>", "body": "path = os.path.normpath(relativePath)<EOL>currentDir  = self.path<EOL>currentDict = self<EOL>if path in (\"<STR_LIT>\",\"<STR_LIT:.>\"):<EOL><INDENT>return currentDict<EOL><DEDENT>save = False<EOL>for dir in path.split(os.sep):<EOL><INDENT>dirPath = os.path.join(currentDir, dir)<EOL>if not os.path.exists(dirPath):<EOL><INDENT>os.mkdir(dirPath)<EOL><DEDENT>currentDict = dict.__getitem__(currentDict, \"<STR_LIT>\")<EOL>if currentDict.get(dir, None) is None:<EOL><INDENT>save = True<EOL>currentDict[dir] = {\"<STR_LIT>\":{}, \"<STR_LIT>\":{},<EOL>\"<STR_LIT>\":datetime.utcnow(),<EOL>\"<STR_LIT:id>\":str(uuid.uuid1()),<EOL>\"<STR_LIT:info>\": info} <EOL><DEDENT>currentDict = currentDict[dir]<EOL>currentDir  = dirPath<EOL><DEDENT>if save:<EOL><INDENT>self.save()<EOL><DEDENT>return currentDict<EOL>", "docstring": "Adds a directory in the repository and creates its\nattribute in the Repository with utc timestamp.\nIt insures adding all the missing directories in the path.\n\n:Parameters:\n    #. relativePath (string): The relative to the repository path of the directory to add in the repository.\n    #. info (None, string, pickable object): Any random info about the folder.\n\n:Returns:\n    #. info (dict): The directory info dict.", "id": "f13923:c0:m43"}
{"signature": "@acquire_lock<EOL><INDENT>@sync_required<EOL>def move_directory(self, relativePath, relativeDestination, replace=False, verbose=True):<DEDENT>", "body": "<EOL>relativePath    = os.path.normpath(relativePath)<EOL>relativeDestination = os.path.normpath(relativeDestination)<EOL>filesInfo = list( self.walk_files_info(relativePath=relativePath) )<EOL>dirsPath  = list( self.walk_directories_relative_path(relativePath=relativePath) )<EOL>dirInfoDict, errorMessage = self.get_directory_info(relativePath)<EOL>assert dirInfoDict is not None, errorMessage<EOL>self.remove_directory(relativePath=relativePath, removeFromSystem=False)<EOL>self.add_directory(relativeDestination)<EOL>for RP, info in filesInfo:<EOL><INDENT>source      = os.path.join(self.__path, relativePath, RP)<EOL>destination = os.path.join(self.__path, relativeDestination, RP)<EOL>newDirRP, fileName = os.path.split(os.path.join(relativeDestination, RP))<EOL>dirInfoDict = self.add_directory( newDirRP )<EOL>if os.path.isfile(destination):<EOL><INDENT>if replace:<EOL><INDENT>os.remove(destination)<EOL>if verbose:<EOL><INDENT>warnings.warn(\"<STR_LIT>\"%(fileName, newDirRP))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>warnings.warn(\"<STR_LIT>\"%(fileName,destination))<EOL><DEDENT>continue<EOL><DEDENT><DEDENT>os.rename(source, destination)<EOL>dict.__getitem__(dirInfoDict, \"<STR_LIT>\")[fileName] = info<EOL><DEDENT>self.save()<EOL>", "docstring": "Move a directory in the repository from one place to another. It insures moving all the\nfiles and subdirectories in the system.\n\n:Parameters:\n    #. relativePath (string): The relative to the repository path of the directory to be moved.\n    #. relativeDestination (string): The new relative to the repository path of the directory.\n    #. replace (boolean): Whether to replace existing files with the same name in the new created directory.\n    #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m45"}
{"signature": "@acquire_lock<EOL><INDENT>def synchronize(self, verbose=False):<DEDENT>", "body": "if self.__path is None:<EOL><INDENT>return<EOL><DEDENT>for dirPath in sorted(list(self.walk_directories_relative_path())):<EOL><INDENT>realPath = os.path.join(self.__path, dirPath)<EOL>if os.path.isdir(realPath):<EOL><INDENT>continue<EOL><DEDENT>if verbose: warnings.warn(\"<STR_LIT>\"%realPath)<EOL>keys = dirPath.split(os.sep)<EOL>dirInfoDict = self<EOL>for idx in range(len(keys)-<NUM_LIT:1>):<EOL><INDENT>dirs = dict.get(dirInfoDict, '<STR_LIT>', None)<EOL>if dirs is None: break<EOL>dirInfoDict = dict.get(dirs, keys[idx], None)<EOL>if dirInfoDict is None: break<EOL><DEDENT>if dirInfoDict is not None:<EOL><INDENT>dirs = dict.get(dirInfoDict, '<STR_LIT>', None)<EOL>if dirs is not None:<EOL><INDENT>dict.pop( dirs, keys[-<NUM_LIT:1>], None )<EOL><DEDENT><DEDENT><DEDENT>for filePath in sorted(list(self.walk_files_relative_path())):<EOL><INDENT>realPath = os.path.join(self.__path, filePath)<EOL>if os.path.isfile( realPath ):<EOL><INDENT>continue<EOL><DEDENT>if verbose: warnings.warn(\"<STR_LIT>\"%realPath)<EOL>keys = filePath.split(os.sep)<EOL>dirInfoDict = self<EOL>for idx in range(len(keys)-<NUM_LIT:1>):<EOL><INDENT>dirs = dict.get(dirInfoDict, '<STR_LIT>', None)<EOL>if dirs is None: break<EOL>dirInfoDict = dict.get(dirs, keys[idx], None)<EOL>if dirInfoDict is None: break<EOL><DEDENT>if dirInfoDict is not None:<EOL><INDENT>files = dict.get(dirInfoDict, '<STR_LIT>', None)<EOL>if files is not None:<EOL><INDENT>dict.pop( files, keys[-<NUM_LIT:1>], None )<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Synchronizes the Repository information with the directory.\nAll registered but missing files and directories in the directory,\nwill be automatically removed from the Repository.\n\n:parameters:\n    #. verbose (boolean): Whether to be warn and inform about any abnormalities.", "id": "f13923:c0:m26"}
{"signature": "@path_required<EOL><INDENT>def create_package(self, path=None, name=None, mode=None):<DEDENT>", "body": "<EOL>assert mode in (None, '<STR_LIT:w>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'), '<STR_LIT>'%str(mode)<EOL>if mode is None:<EOL><INDENT>mode = '<STR_LIT>'<EOL>mode = '<STR_LIT>'<EOL><DEDENT>if path is None:<EOL><INDENT>root = os.path.split(self.__path)[<NUM_LIT:0>]<EOL><DEDENT>elif path.strip() in ('<STR_LIT>','<STR_LIT:.>'):<EOL><INDENT>root = os.getcwd()<EOL><DEDENT>else:<EOL><INDENT>root = os.path.realpath( os.path.expanduser(path) )<EOL><DEDENT>assert os.path.isdir(root), '<STR_LIT>'%path<EOL>if name is None:<EOL><INDENT>ext = mode.split(\"<STR_LIT::>\")<EOL>if len(ext) == <NUM_LIT:2>:<EOL><INDENT>if len(ext[<NUM_LIT:1>]):<EOL><INDENT>ext = \"<STR_LIT:.>\"+ext[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>ext = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ext = '<STR_LIT>'<EOL><DEDENT>name = os.path.split(self.__path)[<NUM_LIT:1>]+ext<EOL><DEDENT>self.save()<EOL>tarfilePath = os.path.join(root, name)<EOL>try:<EOL><INDENT>tarHandler = tarfile.TarFile.open(tarfilePath, mode=mode)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise Exception(\"<STR_LIT>\"%e)<EOL><DEDENT>for directory in sorted(list(self.walk_directories_relative_path())):<EOL><INDENT>t = tarfile.TarInfo( directory )<EOL>t.type = tarfile.DIRTYPE<EOL>tarHandler.addfile(t)<EOL><DEDENT>for file in self.walk_files_relative_path():<EOL><INDENT>tarHandler.add(os.path.join(self.__path,file), arcname=file)<EOL><DEDENT>tarHandler.add(os.path.join(self.__path,\"<STR_LIT>\"), arcname=\"<STR_LIT>\")<EOL>tarHandler.close()<EOL>", "docstring": "Create a tar file package of all the repository files and directories.\nOnly files and directories that are stored in the repository info\nare stored in the package tar file.\n\n**N.B. On some systems packaging requires root permissions.**\n\n:Parameters:\n    #. path (None, string): The real absolute path where to create the package.\n       If None, it will be created in the same directory as the repository\n       If '.' or an empty string is passed, the current working directory will be used.\n    #. name (None, string): The name to give to the package file\n       If None, the package directory name will be used with the appropriate extension added.\n    #. mode (None, string): The writing mode of the tarfile.\n       If None, automatically the best compression mode will be chose.\n       Available modes are ('w', 'w:', 'w:gz', 'w:bz2')", "id": "f13923:c0:m33"}
{"signature": "def get_repository(self, path, info=None, verbose=True):", "body": "<EOL>if path.strip() in ('<STR_LIT>','<STR_LIT:.>'):<EOL><INDENT>path = os.getcwd()<EOL><DEDENT>realPath = os.path.realpath( os.path.expanduser(path) )<EOL>if not os.path.isdir(realPath):<EOL><INDENT>os.makedirs(realPath)<EOL><DEDENT>if not self.is_repository(realPath):<EOL><INDENT>self.create_repository(realPath, info=info, verbose=verbose)<EOL><DEDENT>else:<EOL><INDENT>self.load_repository(realPath)<EOL><DEDENT>", "docstring": "Create a repository at given real path or load any existing one.\nThis method insures the creation of the directory in the system if it is missing.\\n\nUnlike create_repository, this method doesn't erase any existing repository\nin the path but loads it instead.\n\n**N.B. On some systems and some paths, creating a directory may requires root permissions.**\n\n:Parameters:\n    #. path (string): The real absolute path where to create the Repository.\n       If '.' or an empty string is passed, the current working directory will be used.\n    #. info (None, object): Any information that can identify the repository.\n    #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m30"}
{"signature": "def pull_file(self, relativePath, name=None, pull=None, update=True):", "body": "<EOL>relativePath = os.path.normpath(relativePath)<EOL>if relativePath == '<STR_LIT:.>':<EOL><INDENT>relativePath = '<STR_LIT>'<EOL>assert name != '<STR_LIT>', \"<STR_LIT>\"<EOL>assert name != '<STR_LIT>', \"<STR_LIT>\"<EOL>assert name != '<STR_LIT>', \"<STR_LIT>\"<EOL><DEDENT>if name is None:<EOL><INDENT>assert len(relativePath), \"<STR_LIT>\"<EOL>relativePath,name = os.path.split(relativePath)<EOL><DEDENT>fileInfo, errorMessage = self.get_file_info(relativePath, name)<EOL>assert fileInfo is not None, errorMessage<EOL>realPath = os.path.join(self.__path, relativePath)<EOL>assert os.path.exists(realPath), \"<STR_LIT>\"%(relativePath, self.__path)<EOL>fileAbsPath = os.path.join(realPath, name)<EOL>assert os.path.isfile(fileAbsPath), \"<STR_LIT>\"%(name,realPath)<EOL>if pull is None:<EOL><INDENT>pull = fileInfo[\"<STR_LIT>\"]<EOL><DEDENT>try:<EOL><INDENT>namespace = {}<EOL>namespace.update( globals() )<EOL>exec( pull.replace(\"<STR_LIT>\", str(os.path.join(realPath,name)) ), namespace )<EOL><DEDENT>except Exception as e:<EOL><INDENT>m = pull.replace(\"<STR_LIT>\", str(os.path.join(realPath,name)) )<EOL>raise Exception( \"<STR_LIT>\"%(m,e) )<EOL><DEDENT>if update:<EOL><INDENT>fileInfo[\"<STR_LIT>\"] = pull<EOL><DEDENT>return namespace['<STR_LIT>']<EOL>", "docstring": "Pull a file's data from the Repository.\n\n:Parameters:\n    #. relativePath (string): The relative to the repository path of the directory where the file should be pulled.\n    #. name (string): The file name.\n       If None is given, name will be split from relativePath.\n    #. pull (None, string): The pulling method.\n       If None, the pull method saved in the file info will be used.\n       If a string is given, the string should include all the necessary imports,\n       a '$FILE_PATH' that replaces the absolute file path when the dumping will be performed\n       and finally a PULLED_DATA variable.\n       e.g \"import numpy as np; PULLED_DATA=np.loadtxt(fname='$FILE_PATH')\"\n    #. update (boolean): If pull is not None, Whether to update the pull method stored in the file info by the given pull method.\n\n:Returns:\n    #. data (object): The pulled data from the file.", "id": "f13923:c0:m54"}
{"signature": "@property<EOL><INDENT>def state(self):<DEDENT>", "body": "return self.__state<EOL>", "docstring": "Repository state.", "id": "f13923:c0:m10"}
{"signature": "def path_required(func):", "body": "@wraps(func)<EOL>def wrapper(self, *args, **kwargs):<EOL><INDENT>if self.path is None:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL>return<EOL><DEDENT>return func(self, *args, **kwargs)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorate methods when repository path is required.", "id": "f13923:m0"}
{"signature": "@property<EOL><INDENT>def path(self):<DEDENT>", "body": "return self.__path<EOL>", "docstring": "The repository instance path which points to the folder and\n        directory where .pyrepinfo is.", "id": "f13923:c0:m12"}
{"signature": "def acquire_lock(func):", "body": "@wraps(func)<EOL>def wrapper(self, *args, **kwargs):<EOL><INDENT>with self.locker as r:<EOL><INDENT>acquired, code, _  = r<EOL>if acquired:<EOL><INDENT>try:<EOL><INDENT>r = func(self, *args, **kwargs)<EOL><DEDENT>except Exception as err:<EOL><INDENT>e = str(err)<EOL><DEDENT>else:<EOL><INDENT>e = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>warnings.warn(\"<STR_LIT>\"%(code,func.__name__) )<EOL>e = None<EOL>r = None<EOL><DEDENT><DEDENT>if e is not None:<EOL><INDENT>traceback.print_stack()<EOL>raise Exception(e)<EOL><DEDENT>return r<EOL><DEDENT>return wrapper<EOL>", "docstring": "Decorate methods when locking repository is required.", "id": "f13923:m1"}
{"signature": "def walk_directories_relative_path(self, relativePath=\"<STR_LIT>\"):", "body": "def walk_directories(directory, relativePath):<EOL><INDENT>directories = dict.__getitem__(directory, '<STR_LIT>')<EOL>dirNames = dict.keys(directories)<EOL>for d in sorted(dirNames):<EOL><INDENT>yield os.path.join(relativePath, d)<EOL><DEDENT>for k in sorted(dict.keys(directories)):<EOL><INDENT>path = os.path.join(relativePath, k)<EOL>dir  = dict.__getitem__(directories, k)<EOL>for e in walk_directories(dir, path):<EOL><INDENT>yield e<EOL><DEDENT><DEDENT><DEDENT>dir, errorMessage = self.get_directory_info(relativePath)<EOL>assert dir is not None, errorMessage<EOL>return walk_directories(dir, relativePath='<STR_LIT>')<EOL>", "docstring": "Walk repository and yield all found directories relative path\n\n:parameters:\n    #. relativePath (str): The relative path from which start the walk.", "id": "f13923:c0:m20"}
{"signature": "@acquire_lock<EOL><INDENT>@sync_required<EOL>def dump_file(self, value, relativePath, name=None,<EOL>description=None, klass=None,<EOL>dump=None, pull=None,<EOL>replace=False, ACID=None, verbose=False):<DEDENT>", "body": "<EOL>if ACID is None:<EOL><INDENT>ACID = self.__ACID<EOL><DEDENT>assert isinstance(ACID, bool), \"<STR_LIT>\"<EOL>relativePath = os.path.normpath(relativePath)<EOL>if relativePath == '<STR_LIT:.>':<EOL><INDENT>relativePath = '<STR_LIT>'<EOL>assert name != '<STR_LIT>', \"<STR_LIT>\"<EOL>assert name != '<STR_LIT>', \"<STR_LIT>\"<EOL>assert name != '<STR_LIT>', \"<STR_LIT>\"<EOL><DEDENT>if name is None:<EOL><INDENT>assert len(relativePath), \"<STR_LIT>\"<EOL>relativePath,name = os.path.split(relativePath)<EOL><DEDENT>self.add_directory(relativePath)<EOL>realPath = os.path.join(self.__path, relativePath)<EOL>dirInfoDict, errorMessage = self.get_directory_info(relativePath)<EOL>assert dirInfoDict is not None, errorMessage<EOL>if name in dict.__getitem__(dirInfoDict, \"<STR_LIT>\"):<EOL><INDENT>if not replace:<EOL><INDENT>if verbose:<EOL><INDENT>warnings.warn(\"<STR_LIT>\"%(name))<EOL><DEDENT>return<EOL><DEDENT><DEDENT>if dump is None:<EOL><INDENT>dump=DEFAULT_DUMP<EOL><DEDENT>if pull is None:<EOL><INDENT>pull=DEFAULT_PULL<EOL><DEDENT>if ACID:<EOL><INDENT>savePath = os.path.join(tempfile.gettempdir(), str(uuid.uuid1()))<EOL><DEDENT>else:<EOL><INDENT>savePath = os.path.join(realPath,name)<EOL><DEDENT>try:<EOL><INDENT>exec( dump.replace(\"<STR_LIT>\", str(savePath)) )<EOL><DEDENT>except Exception as e:<EOL><INDENT>message = \"<STR_LIT>\"%e<EOL>if '<STR_LIT>' in dump:<EOL><INDENT>message += '<STR_LIT>'%str(get_pickling_errors(value))<EOL><DEDENT>raise Exception( message )<EOL><DEDENT>if ACID:<EOL><INDENT>try:<EOL><INDENT>shutil.copyfile(savePath, os.path.join(realPath,name))<EOL><DEDENT>except Exception as e:<EOL><INDENT>os.remove(savePath)<EOL>if verbose:<EOL><INDENT>warnings.warn(e)<EOL><DEDENT>return<EOL><DEDENT>os.remove(savePath)<EOL><DEDENT>if klass is None and value is not None:<EOL><INDENT>klass = value.__class__<EOL><DEDENT>if klass is not None:<EOL><INDENT>assert inspect.isclass(klass), \"<STR_LIT>\"<EOL><DEDENT>dict.__getitem__(dirInfoDict, \"<STR_LIT>\")[name] = {\"<STR_LIT>\":dump,<EOL>\"<STR_LIT>\":pull,<EOL>\"<STR_LIT>\":datetime.utcnow(),<EOL>\"<STR_LIT:id>\":str(uuid.uuid1()),<EOL>\"<STR_LIT:class>\": klass,<EOL>\"<STR_LIT:description>\":description}<EOL>self.save()<EOL>", "docstring": "Dump a file using its value to the system and creates its\nattribute in the Repository with utc timestamp.\n\n:Parameters:\n    #. value (object): The value of a file to dump and add to the repository. It is any python object or file.\n    #. relativePath (str): The relative to the repository path of the directory where the file should be dumped.\n       If relativePath does not exist, it will be created automatically.\n    #. name (string): The file name.\n       If None is given, name will be split from relativePath.\n    #. description (None, string, pickable object): Any random description about the file.\n    #. klass (None, class): The dumped object class. If None is given\n       klass will be automatically set to the following value.__class__\n    #. dump (None, string): The dumping method.\n       If None it will be set automatically to pickle and therefore the object must be pickleable.\n       If a string is given, the string should include all the necessary imports\n       and a '$FILE_PATH' that replaces the absolute file path when the dumping will be performed.\\n\n       e.g. \"import numpy as np; np.savetxt(fname='$FILE_PATH', X=value, fmt='%.6e')\"\n    #. pull (None, string): The pulling method.\n       If None it will be set automatically to pickle and therefore the object must be pickleable.\n       If a string is given, the string should include all the necessary imports,\n       a '$FILE_PATH' that replaces the absolute file path when the dumping will be performed\n       and finally a PULLED_DATA variable.\\n\n       e.g \"import numpy as np; PULLED_DATA=np.loadtxt(fname='$FILE_PATH')\"\n    #. replace (boolean): Whether to replace any existing file with the same name if existing.\n    #. ACID (None, boolean): Whether to ensure the ACID (Atomicity, Consistency, Isolation, Durability)\n       properties of the repository upon dumping a file. This is ensured by dumping the file in\n       a temporary path first and then moving it to the desired path.\n       If None is given, repository ACID property will be used.\n    #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m50"}
{"signature": "def walk_directory_directories_relative_path(self, relativePath=\"<STR_LIT>\"):", "body": "<EOL>errorMessage = \"<STR_LIT>\"<EOL>relativePath = os.path.normpath(relativePath)<EOL>dirInfoDict, errorMessage = self.get_directory_info(relativePath)<EOL>assert dirInfoDict is not None, errorMessage<EOL>for dname in dict.__getitem__(dirInfoDict, \"<STR_LIT>\"):<EOL><INDENT>yield os.path.join(relativePath, dname)<EOL><DEDENT>", "docstring": "Walk a certain directory in repository and yield all found directories relative path.\n\n:parameters:\n    #. relativePath (str): The relative path of the directory.", "id": "f13923:c0:m24"}
{"signature": "@property<EOL><INDENT>def locker(self):<DEDENT>", "body": "return self.__locker<EOL>", "docstring": "Repository locker manager.", "id": "f13923:c0:m11"}
{"signature": "@acquire_lock<EOL><INDENT>@sync_required<EOL>def update_file(self, value, relativePath, name=None,<EOL>description=False, klass=False,<EOL>dump=False, pull=False,<EOL>ACID=None, verbose=False):<DEDENT>", "body": "<EOL>if ACID is None:<EOL><INDENT>ACID = self.__ACID<EOL><DEDENT>assert isinstance(ACID, bool), \"<STR_LIT>\"<EOL>relativePath = os.path.normpath(relativePath)<EOL>if relativePath == '<STR_LIT:.>':<EOL><INDENT>relativePath = '<STR_LIT>'<EOL>assert name != '<STR_LIT>', \"<STR_LIT>\"<EOL>assert name != '<STR_LIT>', \"<STR_LIT>\"<EOL>assert name != '<STR_LIT>', \"<STR_LIT>\"<EOL><DEDENT>if name is None:<EOL><INDENT>assert len(relativePath), \"<STR_LIT>\"<EOL>relativePath,name = os.path.split(relativePath)<EOL><DEDENT>fileInfoDict, errorMessage = self.get_file_info(relativePath, name)<EOL>assert fileInfoDict is not None, errorMessage<EOL>realPath = os.path.join(self.__path, relativePath)<EOL>if verbose:<EOL><INDENT>if not os.path.isfile( os.path.join(realPath, name) ):<EOL><INDENT>warnings.warn(\"<STR_LIT>\"%os.path.join(realPath, name))<EOL><DEDENT><DEDENT>if not dump:<EOL><INDENT>dump = fileInfoDict[\"<STR_LIT>\"]<EOL><DEDENT>if not pull:<EOL><INDENT>pull = fileInfoDict[\"<STR_LIT>\"]<EOL><DEDENT>if ACID:<EOL><INDENT>savePath = os.path.join(tempfile.gettempdir(), name)<EOL><DEDENT>else:<EOL><INDENT>savePath = os.path.join(realPath,name)<EOL><DEDENT>try:<EOL><INDENT>exec( dump.replace(\"<STR_LIT>\", str(savePath)) )<EOL><DEDENT>except Exception as e:<EOL><INDENT>message = \"<STR_LIT>\"%e<EOL>if '<STR_LIT>' in dump:<EOL><INDENT>message += '<STR_LIT>'%str(get_pickling_errors(value))<EOL><DEDENT>raise Exception( message )<EOL><DEDENT>if ACID:<EOL><INDENT>try:<EOL><INDENT>shutil.copyfile(savePath, os.path.join(realPath,name))<EOL><DEDENT>except Exception as e:<EOL><INDENT>os.remove(savePath)<EOL>if verbose:<EOL><INDENT>warnings.warn(e)<EOL><DEDENT>return<EOL><DEDENT>os.remove(savePath)<EOL><DEDENT>fileInfoDict[\"<STR_LIT>\"] = datetime.utcnow()<EOL>if description is not False:<EOL><INDENT>fileInfoDict[\"<STR_LIT:description>\"] = description<EOL><DEDENT>if klass is not False:<EOL><INDENT>assert inspect.isclass(klass), \"<STR_LIT>\"<EOL>fileInfoDict[\"<STR_LIT:class>\"] = klass<EOL><DEDENT>self.save()<EOL>", "docstring": "Update the value and the utc timestamp of a file that is already in the Repository.\\n\nIf file is not registered in repository, and error will be thrown.\\n\nIf file is missing in the system, it will be regenerated as dump method is called.\n\n:Parameters:\n    #. value (object): The value of the file to update. It is any python object or a file.\n    #. relativePath (str): The relative to the repository path of the directory where the file should be dumped.\n    #. name (None, string): The file name.\n       If None is given, name will be split from relativePath.\n    #. description (False, string, pickable object): Any random description about the file.\n       If False is given, the description info won't be updated,\n       otherwise it will be update to what description argument value is.\n    #. klass (False, class): The dumped object class. If False is given,\n       the class info won't be updated, otherwise it will be update to what klass argument value is.\n    #. dump (False, string): The new dump method. If False is given, the old one will be used.\n    #. pull (False, string): The new pull method. If False is given, the old one will be used.\n    #. ACID (boolean): Whether to ensure the ACID (Atomicity, Consistency, Isolation, Durability)\n       properties of the repository upon dumping a file. This is ensured by dumping the file in\n       a temporary path first and then moving it to the desired path.\n       If None is given, repository ACID property will be used.\n    #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m52"}
{"signature": "def walk_directory_directories_info(self, relativePath=\"<STR_LIT>\"):", "body": "<EOL>relativePath = os.path.normpath(relativePath)<EOL>dirInfoDict, errorMessage = self.get_directory_info(relativePath)<EOL>assert dirInfoDict is not None, errorMessage<EOL>for fname in dict.__getitem__(dirInfoDict, \"<STR_LIT>\"):<EOL><INDENT>yield os.path.join(relativePath, fname), dict.__getitem__(dirInfoDict, \"<STR_LIT>\")[fname]<EOL><DEDENT>", "docstring": "Walk a certain directory in repository and yield tuples as the following:\\n\n(relative path joined with directory name, file info dict).\n\n:parameters:\n    #. relativePath (str): The relative path of the directory.", "id": "f13923:c0:m25"}
{"signature": "@acquire_lock<EOL><INDENT>@sync_required<EOL>def dump_copy(self, path, relativePath, name=None,<EOL>description=None,<EOL>replace=False, verbose=False):<DEDENT>", "body": "relativePath = os.path.normpath(relativePath)<EOL>if relativePath == '<STR_LIT:.>':<EOL><INDENT>relativePath = '<STR_LIT>'<EOL><DEDENT>if name is None:<EOL><INDENT>_,name = os.path.split(path)<EOL><DEDENT>self.add_directory(relativePath)<EOL>realPath = os.path.join(self.__path, relativePath)<EOL>dirInfoDict, errorMessage = self.get_directory_info(relativePath)<EOL>assert dirInfoDict is not None, errorMessage<EOL>if name in dict.__getitem__(dirInfoDict, \"<STR_LIT>\"):<EOL><INDENT>if not replace:<EOL><INDENT>if verbose:<EOL><INDENT>warnings.warn(\"<STR_LIT>\"%(name))<EOL><DEDENT>return<EOL><DEDENT><DEDENT>dump = \"<STR_LIT>\"<EOL>pull = \"<STR_LIT>\"<EOL>try:<EOL><INDENT>shutil.copyfile(path, os.path.join(realPath,name))<EOL><DEDENT>except Exception as e:<EOL><INDENT>if verbose:<EOL><INDENT>warnings.warn(e)<EOL><DEDENT>return<EOL><DEDENT>klass = None<EOL>dict.__getitem__(dirInfoDict, \"<STR_LIT>\")[name] = {\"<STR_LIT>\":dump,<EOL>\"<STR_LIT>\":pull,<EOL>\"<STR_LIT>\":datetime.utcnow(),<EOL>\"<STR_LIT:id>\":str(uuid.uuid1()),<EOL>\"<STR_LIT:class>\": klass,<EOL>\"<STR_LIT:description>\":description}<EOL>self.save()<EOL>", "docstring": "Copy an exisitng system file to the repository.\nattribute in the Repository with utc timestamp.\n\n:Parameters:\n    #. path (str): The full path of the file to copy into the repository.\n    #. relativePath (str): The relative to the repository path of the directory where the file should be dumped.\n       If relativePath does not exist, it will be created automatically.\n    #. name (string): The file name.\n       If None is given, name will be split from path.\n    #. description (None, string, pickable object): Any random description about the file.\n    #. replace (boolean): Whether to replace any existing file with the same name if existing.\n    #. verbose (boolean): Whether to be warn and informed about any abnormalities.", "id": "f13923:c0:m49"}
{"signature": "def get_directory_info(self, relativePath):", "body": "relativePath = os.path.normpath(relativePath)<EOL>if relativePath in ('<STR_LIT>','<STR_LIT:.>'):<EOL><INDENT>return self, \"<STR_LIT>\"<EOL><DEDENT>currentDir  = self.__path<EOL>dirInfoDict = self<EOL>for dir in relativePath.split(os.sep):<EOL><INDENT>dirInfoDict = dict.__getitem__(dirInfoDict, \"<STR_LIT>\")<EOL>currentDir = os.path.join(currentDir, dir)<EOL>if not os.path.exists(currentDir):<EOL><INDENT>return None,  \"<STR_LIT>\"%currentDir<EOL><DEDENT>val = dirInfoDict.get(dir, None)<EOL>if val is None:<EOL><INDENT>return None,  \"<STR_LIT>\"%currentDir<EOL><DEDENT>dirInfoDict = val<EOL><DEDENT>return dirInfoDict, \"<STR_LIT>\"<EOL>", "docstring": "get directory info from the Repository.\n\n:Parameters:\n    #. relativePath (string): The relative to the repository path of the directory.\n\n:Returns:\n    #. info (None, dictionary): The directory information dictionary.\n       If None, it means an error has occurred.\n    #. error (string): The error message if any error occurred.", "id": "f13923:c0:m35"}
{"signature": "@acquire_lock<EOL><INDENT>@sync_required<EOL>def remove_directory(self, relativePath, removeFromSystem=False):<DEDENT>", "body": "<EOL>relativePath = os.path.normpath(relativePath)<EOL>parentDirInfoDict, errorMessage = self.get_parent_directory_info(relativePath)<EOL>assert parentDirInfoDict is not None, errorMessage<EOL>path, name = os.path.split(relativePath)<EOL>if dict.__getitem__(parentDirInfoDict, '<STR_LIT>').get(name, None) is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\"%(name, path))<EOL><DEDENT>if removeFromSystem:<EOL><INDENT>for rp in self.walk_files_relative_path(relativePath=relativePath):<EOL><INDENT>ap = os.path.join(self.__path, relativePath, rp)<EOL>if not os.path.isfile(ap):<EOL><INDENT>continue<EOL><DEDENT>if not os.path.exists(ap):<EOL><INDENT>continue<EOL><DEDENT>if os.path.isfile(ap):<EOL><INDENT>os.remove( ap )<EOL><DEDENT><DEDENT>for rp in self.walk_directories_relative_path(relativePath=relativePath):<EOL><INDENT>ap = os.path.join(self.__path, relativePath, rp)<EOL>if not os.path.isdir(ap):<EOL><INDENT>continue<EOL><DEDENT>if not os.path.exists(ap):<EOL><INDENT>continue<EOL><DEDENT>if not len(os.listdir(ap)):<EOL><INDENT>os.rmdir(ap)<EOL><DEDENT><DEDENT><DEDENT>dict.__getitem__(parentDirInfoDict, '<STR_LIT>').pop(name, None)<EOL>ap = os.path.join(self.__path, relativePath)<EOL>if not os.path.isdir(ap):<EOL><INDENT>if not len(os.listdir(ap)):<EOL><INDENT>os.rmdir(ap)<EOL><DEDENT><DEDENT>self.save()<EOL>", "docstring": "Remove directory from repository.\n\n:Parameters:\n    #. relativePath (string): The relative to the repository path of the directory to remove from the repository.\n    #. removeFromSystem (boolean): Whether to also remove directory and all files from the system.\\n\n       Only files saved in the repository will be removed and empty left directories.", "id": "f13923:c0:m44"}
{"signature": "def is_repository(self, path):", "body": "realPath = os.path.realpath( os.path.expanduser(path) )<EOL>if not os.path.isdir(realPath):<EOL><INDENT>return False<EOL><DEDENT>if \"<STR_LIT>\" not in os.listdir(realPath):<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Check if there is a Repository in path.\n\n:Parameters:\n    #. path (string): The real path of the directory where to check if there is a repository.\n\n:Returns:\n    #. result (boolean): Whether its a repository or not.", "id": "f13923:c0:m34"}
{"signature": "def set_ACID(self, ACID):", "body": "assert isinstance(ACID, bool), \"<STR_LIT>\"<EOL>self.__ACID = ACID<EOL>", "docstring": "Set the gobal ACID poperty of the repository.\n\n:parameters:\n    #. ACID (boolean): Whether to ensure the ACID (Atomicity, Consistency, Isolation, Durability)\n       properties of the repository upon dumping a file. This is ensured by dumping the file in\n       a temporary path first and then moving it to the desired path.", "id": "f13923:c0:m16"}
{"signature": "def walk_files_relative_path(self, relativePath=\"<STR_LIT>\"):", "body": "def walk_files(directory, relativePath):<EOL><INDENT>directories = dict.__getitem__(directory, '<STR_LIT>')<EOL>files       = dict.__getitem__(directory, '<STR_LIT>')<EOL>for f in sorted(files):<EOL><INDENT>yield os.path.join(relativePath, f)<EOL><DEDENT>for k in sorted(dict.keys(directories)):<EOL><INDENT>path = os.path.join(relativePath, k)<EOL>dir  = directories.__getitem__(k)<EOL>for e in walk_files(dir, path):<EOL><INDENT>yield e<EOL><DEDENT><DEDENT><DEDENT>dir, errorMessage = self.get_directory_info(relativePath)<EOL>assert dir is not None, errorMessage<EOL>return walk_files(dir, relativePath='<STR_LIT>')<EOL>", "docstring": "Walk the repository and yield all found files relative path joined with file name.\n\n:parameters:\n    #. relativePath (str): The relative path from which start the walk.", "id": "f13923:c0:m18"}
{"signature": "def get_doc():", "body": "return __onlinedoc__<EOL>", "docstring": "Get pyrep's official online documentation link.", "id": "f13924:m3"}
{"signature": "def create_new_version(<EOL>self,<EOL>name,<EOL>subject,<EOL>text='<STR_LIT>',<EOL>template_id=None,<EOL>html=None,<EOL>locale=None,<EOL>timeout=None<EOL>):", "body": "if(html):<EOL><INDENT>payload = {<EOL>'<STR_LIT:name>': name,<EOL>'<STR_LIT>': subject,<EOL>'<STR_LIT:html>': html,<EOL>'<STR_LIT:text>': text<EOL>}<EOL><DEDENT>else:<EOL><INDENT>payload = {<EOL>'<STR_LIT:name>': name,<EOL>'<STR_LIT>': subject,<EOL>'<STR_LIT:text>': text<EOL>}<EOL><DEDENT>if locale:<EOL><INDENT>url = self.TEMPLATES_SPECIFIC_LOCALE_VERSIONS_ENDPOINT % (<EOL>template_id,<EOL>locale<EOL>)<EOL><DEDENT>else:<EOL><INDENT>url = self.TEMPLATES_NEW_VERSION_ENDPOINT % template_id<EOL><DEDENT>return self._api_request(<EOL>url,<EOL>self.HTTP_POST,<EOL>payload=payload,<EOL>timeout=timeout<EOL>)<EOL>", "docstring": "API call to create a new version of a template", "id": "f13929:c0:m15"}
{"signature": "def create_new_locale(<EOL>self,<EOL>template_id,<EOL>locale,<EOL>version_name,<EOL>subject,<EOL>text='<STR_LIT>',<EOL>html='<STR_LIT>',<EOL>timeout=None<EOL>):", "body": "payload = {<EOL>'<STR_LIT>': locale,<EOL>'<STR_LIT:name>': version_name,<EOL>'<STR_LIT>': subject<EOL>}<EOL>if html:<EOL><INDENT>payload['<STR_LIT:html>'] = html<EOL><DEDENT>if text:<EOL><INDENT>payload['<STR_LIT:text>'] = text<EOL><DEDENT>return self._api_request(<EOL>self.TEMPLATES_LOCALES_ENDPOINT % template_id,<EOL>self.HTTP_POST,<EOL>payload=payload,<EOL>timeout=timeout<EOL>)<EOL>", "docstring": "API call to create a new locale and version of a template", "id": "f13929:c0:m14"}
{"signature": "def _parse_response(self, response):", "body": "if not self._raise_errors:<EOL><INDENT>return response<EOL><DEDENT>is_4xx_error = str(response.status_code)[<NUM_LIT:0>] == '<STR_LIT:4>'<EOL>is_5xx_error = str(response.status_code)[<NUM_LIT:0>] == '<STR_LIT:5>'<EOL>content = response.content<EOL>if response.status_code == <NUM_LIT>:<EOL><INDENT>raise AuthenticationError(content)<EOL><DEDENT>elif is_4xx_error:<EOL><INDENT>raise APIError(content)<EOL><DEDENT>elif is_5xx_error:<EOL><INDENT>raise ServerError(content)<EOL><DEDENT>return response<EOL>", "docstring": "Parses the API response and raises appropriate errors if\n        raise_errors was set to True", "id": "f13929:c0:m5"}
{"signature": "def send(<EOL>self,<EOL>email_id,<EOL>recipient,<EOL>email_data=None,<EOL>sender=None,<EOL>cc=None,<EOL>bcc=None,<EOL>tags=[],<EOL>headers={},<EOL>esp_account=None,<EOL>locale=None,<EOL>email_version_name=None,<EOL>inline=None,<EOL>files=[],<EOL>timeout=None<EOL>):", "body": "if not email_data:<EOL><INDENT>email_data = {}<EOL><DEDENT>if isinstance(recipient, string_types):<EOL><INDENT>warnings.warn(<EOL>\"<STR_LIT>\",<EOL>DeprecationWarning)<EOL>recipient = {'<STR_LIT:address>': recipient}<EOL><DEDENT>payload = {<EOL>'<STR_LIT>': email_id,<EOL>'<STR_LIT>': recipient,<EOL>'<STR_LIT>': email_data<EOL>}<EOL>if sender:<EOL><INDENT>payload['<STR_LIT>'] = sender<EOL><DEDENT>if cc:<EOL><INDENT>if not type(cc) == list:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>' % type(cc))<EOL><DEDENT>payload['<STR_LIT>'] = cc<EOL><DEDENT>if bcc:<EOL><INDENT>if not type(bcc) == list:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>' % type(bcc))<EOL><DEDENT>payload['<STR_LIT>'] = bcc<EOL><DEDENT>if tags:<EOL><INDENT>if not type(tags) == list:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>' % (type(tags)))<EOL><DEDENT>payload['<STR_LIT>'] = tags<EOL><DEDENT>if headers:<EOL><INDENT>if not type(headers) == dict:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>' % (<EOL>type(headers)<EOL>)<EOL>)<EOL><DEDENT>payload['<STR_LIT>'] = headers<EOL><DEDENT>if esp_account:<EOL><INDENT>if not isinstance(esp_account, string_types):<EOL><INDENT>logger.error(<EOL>'<STR_LIT>' % (<EOL>type(esp_account)<EOL>)<EOL>)<EOL><DEDENT>payload['<STR_LIT>'] = esp_account<EOL><DEDENT>if locale:<EOL><INDENT>if not isinstance(locale, string_types):<EOL><INDENT>logger.error(<EOL>'<STR_LIT>' % (type(locale))<EOL>)<EOL><DEDENT>payload['<STR_LIT>'] = locale<EOL><DEDENT>if email_version_name:<EOL><INDENT>if not isinstance(email_version_name, string_types):<EOL><INDENT>logger.error(<EOL>'<STR_LIT>' % (<EOL>type(email_version_name)))<EOL><DEDENT>payload['<STR_LIT>'] = email_version_name<EOL><DEDENT>if inline:<EOL><INDENT>payload['<STR_LIT>'] = self._make_file_dict(inline)<EOL><DEDENT>if files:<EOL><INDENT>payload['<STR_LIT>'] = [self._make_file_dict(f) for f in files]<EOL><DEDENT>return self._api_request(<EOL>self.SEND_ENDPOINT,<EOL>self.HTTP_POST,<EOL>payload=payload,<EOL>timeout=timeout<EOL>)<EOL>", "docstring": "API call to send an email", "id": "f13929:c0:m25"}
{"signature": "def templates(self, timeout=None):", "body": "return self._api_request(<EOL>self.TEMPLATES_ENDPOINT,<EOL>self.HTTP_GET,<EOL>timeout=timeout<EOL>)<EOL>", "docstring": "API call to get a list of templates", "id": "f13929:c0:m10"}
{"signature": "def _api_request(self, endpoint, http_method, *args, **kwargs):", "body": "logger.debug('<STR_LIT>' % endpoint)<EOL>path = self._build_request_path(endpoint, absolute=False)<EOL>logger.debug('<STR_LIT>' % path)<EOL>data = None<EOL>if '<STR_LIT>' in kwargs:<EOL><INDENT>data = kwargs['<STR_LIT>']<EOL><DEDENT>logger.debug('<STR_LIT>' % data)<EOL>command = {<EOL>\"<STR_LIT:path>\": path,<EOL>\"<STR_LIT>\": http_method<EOL>}<EOL>if data:<EOL><INDENT>command['<STR_LIT:body>'] = data<EOL><DEDENT>self._commands.append(command)<EOL>", "docstring": "Private method for api requests", "id": "f13929:c1:m1"}
{"signature": "def __init__(<EOL>self,<EOL>api_key=None,<EOL>json_encoder=SendwithusJSONEncoder,<EOL>raise_errors=False,<EOL>default_timeout=None,<EOL>**kwargs<EOL>):", "body": "if not api_key:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>self.API_KEY = api_key<EOL>self.DEFAULT_TIMEOUT = default_timeout<EOL>self._json_encoder = json_encoder<EOL>self._raise_errors = raise_errors<EOL>if '<STR_LIT>' in kwargs:<EOL><INDENT>self.API_HOST = kwargs['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in kwargs:<EOL><INDENT>self.API_PROTO = kwargs['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in kwargs:<EOL><INDENT>self.API_PORT = kwargs['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in kwargs:<EOL><INDENT>self.API_VERSION = kwargs['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in kwargs:<EOL><INDENT>self.DEBUG = kwargs['<STR_LIT>']<EOL><DEDENT>if self.DEBUG:<EOL><INDENT>logging.basicConfig(format=LOGGER_FORMAT, level=logging.DEBUG)<EOL>logger.debug('<STR_LIT>')<EOL>logger.propagate = True<EOL><DEDENT>", "docstring": "Constructor, expects api key", "id": "f13929:c0:m0"}
{"signature": "def create_snippet(self, name, body, timeout=None):", "body": "payload = {<EOL>'<STR_LIT:name>': name,<EOL>'<STR_LIT:body>': body<EOL>}<EOL>return self._api_request(<EOL>self.SNIPPETS_ENDPOINT,<EOL>self.HTTP_POST,<EOL>payload=payload,<EOL>timeout=timeout<EOL>)<EOL>", "docstring": "API call to create a Snippet", "id": "f13929:c0:m19"}
{"signature": "def get_template(self, template_id, version=None, timeout=None):", "body": "if (version):<EOL><INDENT>return self._api_request(<EOL>self.TEMPLATES_VERSION_ENDPOINT % (template_id, version),<EOL>self.HTTP_GET,<EOL>timeout=timeout<EOL>)<EOL><DEDENT>else:<EOL><INDENT>return self._api_request(<EOL>self.TEMPLATES_SPECIFIC_ENDPOINT % template_id,<EOL>self.HTTP_GET,<EOL>timeout=timeout<EOL>)<EOL><DEDENT>", "docstring": "API call to get a specific template", "id": "f13929:c0:m11"}
{"signature": "def execute(self, timeout=None):", "body": "logger.debug('<STR_LIT>' % len(self._commands))<EOL>auth = self._build_http_auth()<EOL>headers = self._build_request_headers()<EOL>logger.debug('<STR_LIT>' % headers)<EOL>logger.debug('<STR_LIT>' % len(self._commands))<EOL>path = self._build_request_path(self.BATCH_ENDPOINT)<EOL>data = json.dumps(self._commands, cls=self._json_encoder)<EOL>r = requests.post(<EOL>path,<EOL>auth=auth,<EOL>headers=headers,<EOL>data=data,<EOL>timeout=(self.DEFAULT_TIMEOUT if timeout is None else timeout)<EOL>)<EOL>self._commands = []<EOL>logger.debug('<STR_LIT>' % r.status_code)<EOL>try:<EOL><INDENT>logger.debug('<STR_LIT>' % r.json())<EOL><DEDENT>except:<EOL><INDENT>logger.debug('<STR_LIT>' % r.content)<EOL><DEDENT>return r<EOL>", "docstring": "Execute all currently queued batch commands", "id": "f13929:c1:m2"}
{"signature": "def create_template(<EOL>self,<EOL>name,<EOL>subject,<EOL>html,<EOL>text='<STR_LIT>',<EOL>timeout=None<EOL>):", "body": "payload = {<EOL>'<STR_LIT:name>': name,<EOL>'<STR_LIT>': subject,<EOL>'<STR_LIT:html>': html,<EOL>'<STR_LIT:text>': text<EOL>}<EOL>return self._api_request(<EOL>self.TEMPLATES_ENDPOINT,<EOL>self.HTTP_POST,<EOL>payload=payload,<EOL>timeout=timeout<EOL>)<EOL>", "docstring": "API call to create a template", "id": "f13929:c0:m13"}
{"signature": "def get_log(self, log_id, timeout=None):", "body": "return self._api_request(<EOL>self.GET_LOG_ENDPOINT % log_id,<EOL>self.HTTP_GET,<EOL>timeout=timeout<EOL>)<EOL>", "docstring": "API call to get a specific log entry", "id": "f13929:c0:m7"}
{"signature": "def _make_file_dict(self, f):", "body": "if isinstance(f, dict):<EOL><INDENT>file_obj = f['<STR_LIT:file>']<EOL>if '<STR_LIT:filename>' in f:<EOL><INDENT>file_name = f['<STR_LIT:filename>']<EOL><DEDENT>else:<EOL><INDENT>file_name = file_obj.name<EOL><DEDENT><DEDENT>else:<EOL><INDENT>file_obj = f<EOL>file_name = f.name<EOL><DEDENT>b64_data = base64.b64encode(file_obj.read())<EOL>return {<EOL>'<STR_LIT:id>': file_name,<EOL>'<STR_LIT:data>': b64_data.decode() if six.PY3 else b64_data,<EOL>}<EOL>", "docstring": "Make a dictionary with filename and base64 file data", "id": "f13929:c0:m24"}
{"signature": "def _api_request(self, endpoint, http_method, *args, **kwargs):", "body": "logger.debug('<STR_LIT>' % endpoint)<EOL>auth = self._build_http_auth()<EOL>headers = self._build_request_headers(kwargs.get('<STR_LIT>'))<EOL>logger.debug('<STR_LIT>' % headers)<EOL>path = self._build_request_path(endpoint)<EOL>logger.debug('<STR_LIT>' % path)<EOL>data = self._build_payload(kwargs.get('<STR_LIT>'))<EOL>if not data:<EOL><INDENT>data = kwargs.get('<STR_LIT:data>')<EOL><DEDENT>logger.debug('<STR_LIT>' % data)<EOL>req_kw = dict(<EOL>auth=auth,<EOL>headers=headers,<EOL>timeout=kwargs.get('<STR_LIT>', self.DEFAULT_TIMEOUT)<EOL>)<EOL>if (http_method == self.HTTP_POST):<EOL><INDENT>if (data):<EOL><INDENT>r = requests.post(path, data=data, **req_kw)<EOL><DEDENT>else:<EOL><INDENT>r = requests.post(path, **req_kw)<EOL><DEDENT><DEDENT>elif http_method == self.HTTP_PUT:<EOL><INDENT>if (data):<EOL><INDENT>r = requests.put(path, data=data, **req_kw)<EOL><DEDENT>else:<EOL><INDENT>r = requests.put(path, **req_kw)<EOL><DEDENT><DEDENT>elif http_method == self.HTTP_DELETE:<EOL><INDENT>r = requests.delete(path, **req_kw)<EOL><DEDENT>else:<EOL><INDENT>r = requests.get(path, **req_kw)<EOL><DEDENT>logger.debug('<STR_LIT>' % r.status_code)<EOL>try:<EOL><INDENT>logger.debug('<STR_LIT>' % r.json())<EOL><DEDENT>except:<EOL><INDENT>logger.debug('<STR_LIT>' % r.content)<EOL><DEDENT>return self._parse_response(r)<EOL>", "docstring": "Private method for api requests", "id": "f13929:c0:m6"}
{"signature": "def foo(n):", "body": "if n == <NUM_LIT:0>:<EOL><INDENT>return n<EOL><DEDENT>else:<EOL><INDENT>return foo(n - <NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Return 0 for all n >= 0", "id": "f13971:m0"}
{"signature": "def _get_namedrange(book, rangename, sheetname=None):", "body": "def cond(namedef):<EOL><INDENT>if namedef.type.upper() == \"<STR_LIT>\":<EOL><INDENT>if namedef.name.upper() == rangename.upper():<EOL><INDENT>if sheetname is None:<EOL><INDENT>if not namedef.localSheetId:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>sheet_id = [sht.upper() for sht in book.sheetnames].index(<EOL>sheetname.upper()<EOL>)<EOL>if namedef.localSheetId == sheet_id:<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return False<EOL><DEDENT>def get_destinations(name_def):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>from openpyxl.formula import Tokenizer<EOL>from openpyxl.utils.cell import SHEETRANGE_RE<EOL>if name_def.type == \"<STR_LIT>\":<EOL><INDENT>tok = Tokenizer(\"<STR_LIT:=>\" + name_def.value)<EOL>for part in tok.items:<EOL><INDENT>if part.subtype == \"<STR_LIT>\":<EOL><INDENT>m = SHEETRANGE_RE.match(part.value)<EOL>if m.group(\"<STR_LIT>\"):<EOL><INDENT>sheet_name = m.group(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>sheet_name = m.group(\"<STR_LIT>\")<EOL><DEDENT>yield sheet_name, m.group(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT><DEDENT>namedef = next(<EOL>(item for item in book.defined_names.definedName if cond(item)), None<EOL>)<EOL>if namedef is None:<EOL><INDENT>return None<EOL><DEDENT>dests = get_destinations(namedef)<EOL>xlranges = []<EOL>sheetnames_upper = [name.upper() for name in book.sheetnames]<EOL>for sht, addr in dests:<EOL><INDENT>if sheetname:<EOL><INDENT>sht = sheetname<EOL><DEDENT>index = sheetnames_upper.index(sht.upper())<EOL>xlranges.append(book.worksheets[index][addr])<EOL><DEDENT>if len(xlranges) == <NUM_LIT:1>:<EOL><INDENT>return xlranges[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return xlranges<EOL><DEDENT>", "docstring": "Get range from a workbook.\n\n    A workbook can contain multiple definitions for a single name,\n    as a name can be defined for the entire book or for\n    a particular sheet.\n\n    If sheet is None, the book-wide def is searched,\n    otherwise sheet-local def is looked up.\n\n    Args:\n        book: An openpyxl workbook object.\n        rangename (str): Range expression, such as \"A1\", \"$G4:$K10\",\n            named range \"NamedRange1\".\n        sheetname (str, optional): None for book-wide name def,\n            sheet name for sheet-local named range.\n\n    Returns:\n        Range object specified by the name.", "id": "f13974:m4"}
{"signature": "def read_range(filepath, range_expr, sheet=None, dict_generator=None):", "body": "def default_generator(cells):<EOL><INDENT>for row_ind, row in enumerate(cells):<EOL><INDENT>for col_ind, cell in enumerate(row):<EOL><INDENT>yield (row_ind, col_ind), cell.value<EOL><DEDENT><DEDENT><DEDENT>book = opxl.load_workbook(filepath, data_only=True)<EOL>if _is_range_address(range_expr):<EOL><INDENT>sheet_names = [name.upper() for name in book.sheetnames]<EOL>index = sheet_names.index(sheet.upper())<EOL>cells = book.worksheets[index][range_expr]<EOL><DEDENT>else:<EOL><INDENT>cells = _get_namedrange(book, range_expr, sheet)<EOL><DEDENT>if isinstance(cells, opxl.cell.Cell):<EOL><INDENT>return cells.value<EOL><DEDENT>if dict_generator is None:<EOL><INDENT>dict_generator = default_generator<EOL><DEDENT>gen = dict_generator(cells)<EOL>return {keyval[<NUM_LIT:0>]: keyval[<NUM_LIT:1>] for keyval in gen}<EOL>", "docstring": "Read values from an Excel range into a dictionary.\n\n    `range_expr` ie either a range address string, such as \"A1\", \"$C$3:$E$5\",\n    or a defined name string for a range, such as \"NamedRange1\".\n    If a range address is provided, `sheet` argument must also be provided.\n    If a named range is provided and `sheet` is not, book level defined name\n    is searched. If `sheet` is also provided, sheet level defined name for the\n    specified `sheet` is searched.\n    If range_expr points to a single cell, its value is returned.\n\n    `dictgenerator` is a generator function that yields keys and values of \n    the returned dictionary. the excel range, as a nested tuple of openpyxl's\n    Cell objects, is passed to the generator function as its single argument.\n    If not specified, default generator is used, which maps tuples of row and\n    column indexes, both starting with 0, to their values.\n\n    Args:\n        filepath (str): Path to an Excel file.\n        range_epxr (str): Range expression, such as \"A1\", \"$G4:$K10\", \n            or named range \"NamedRange1\"\n        sheet (str): Sheet name (case ignored).\n            None if book level defined range name is passed as `range_epxr`.\n        dict_generator: A generator function taking a nested tuple of cells \n            as a single parameter.\n\n    Returns:\n        Nested list containing range values.", "id": "f13974:m3"}
{"signature": "def _get_col_index(name):", "body": "index = string.ascii_uppercase.index<EOL>col = <NUM_LIT:0><EOL>for c in name.upper():<EOL><INDENT>col = col * <NUM_LIT> + index(c) + <NUM_LIT:1><EOL><DEDENT>return col<EOL>", "docstring": "Convert column name to index.", "id": "f13974:m0"}
{"signature": "def restore_state(self, system):", "body": "super().restore_state(system)<EOL>BaseSpaceContainerImpl.restore_state(self, system)<EOL>for cells in self._cells.values():<EOL><INDENT>cells.restore_state(system)<EOL><DEDENT>", "docstring": "Called after unpickling to restore some attributes manually.", "id": "f13977:c9:m38"}
{"signature": "def _is_derived(self):", "body": "return self._impl.is_derived<EOL>", "docstring": "True if the space is a derived space, False otherwise.", "id": "f13977:c7:m7"}
{"signature": "@property<EOL><INDENT>def cells(self):<DEDENT>", "body": "return self._impl.cells.interfaces<EOL>", "docstring": "A mapping of cells names to the cells objects in the space.", "id": "f13977:c7:m11"}
{"signature": "def get_dynspace(self, args, kwargs=None):", "body": "node = get_node(self, *convert_args(args, kwargs))<EOL>key = node[KEY]<EOL>if key in self.param_spaces:<EOL><INDENT>return self.param_spaces[key]<EOL><DEDENT>else:<EOL><INDENT>last_self = self.system.self<EOL>self.system.self = self<EOL>try:<EOL><INDENT>space_args = self.eval_formula(node)<EOL><DEDENT>finally:<EOL><INDENT>self.system.self = last_self<EOL><DEDENT>if space_args is None:<EOL><INDENT>space_args = {\"<STR_LIT>\": [self]}  <EOL><DEDENT>else:<EOL><INDENT>if \"<STR_LIT>\" in space_args:<EOL><INDENT>bases = get_impls(space_args[\"<STR_LIT>\"])<EOL>if isinstance(bases, StaticSpaceImpl):<EOL><INDENT>space_args[\"<STR_LIT>\"] = [bases]<EOL><DEDENT>elif bases is None:<EOL><INDENT>space_args[\"<STR_LIT>\"] = [self]  <EOL><DEDENT>else:<EOL><INDENT>space_args[\"<STR_LIT>\"] = bases<EOL><DEDENT><DEDENT>else:<EOL><INDENT>space_args[\"<STR_LIT>\"] = [self]<EOL><DEDENT><DEDENT>space_args[\"<STR_LIT>\"] = node_get_args(node)<EOL>space = self._new_dynspace(**space_args)<EOL>self.param_spaces[key] = space<EOL>space.inherit(clear_value=False)<EOL>return space<EOL><DEDENT>", "docstring": "Create a dynamic root space\n\n        Called from interface methods", "id": "f13977:c9:m32"}
{"signature": "@property<EOL><INDENT>def evalrepr(self):<DEDENT>", "body": "args = [repr(arg) for arg in get_interfaces(self.argvalues)]<EOL>param = \"<STR_LIT:U+002CU+0020>\".join(args)<EOL>return \"<STR_LIT>\" % (self.parent.evalrepr, param)<EOL>", "docstring": "Evaluable repr", "id": "f13977:c13:m8"}
{"signature": "def _is_root(self):", "body": "return isinstance(self._impl, RootDynamicSpaceImpl)<EOL>", "docstring": "True if ths space is a dynamic space, False otherwise.", "id": "f13977:c7:m9"}
{"signature": "@property<EOL><INDENT>def formula(self):<DEDENT>", "body": "return self._impl.formula<EOL>", "docstring": "Property to get, set, delete formula.", "id": "f13977:c7:m24"}
{"signature": "def _is_base(self, other):", "body": "return self._impl.is_base(other._impl)<EOL>", "docstring": "True if the space is a base space of ``other``, False otherwise.", "id": "f13977:c7:m4"}
{"signature": "@property<EOL><INDENT>def _derived_spaces(self):<DEDENT>", "body": "return self._impl.derived_spaces.interfaces<EOL>", "docstring": "A mapping associating names to derived spaces.", "id": "f13977:c7:m19"}
{"signature": "def add_bases(self, *bases):", "body": "return self._impl.add_bases(get_impls(bases))<EOL>", "docstring": "Add base spaces.", "id": "f13977:c8:m1"}
{"signature": "def __contains__(self, item):", "body": "if isinstance(item, str):<EOL><INDENT>return item in self._impl.namespace<EOL><DEDENT>elif isinstance(item, Cells):<EOL><INDENT>return item._impl in self._impl.cells.values()<EOL><DEDENT>elif isinstance(item, StaticSpace):<EOL><INDENT>return item._impl in self._impl.spaces.values()<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Check if item is in the space.\n\n        item can be either a cells or space.\n\n        Args:\n            item: a cells or space to check.\n\n        Returns:\n            True if item is a direct child of the space, False otherwise.", "id": "f13977:c8:m7"}
{"signature": "def new_cells_from_module(self, module):", "body": "<EOL>newcells = self._impl.new_cells_from_module(module)<EOL>return get_interfaces(newcells)<EOL>", "docstring": "Create a cells from a module.\n\n        Alias to :py:meth:`import_funcs`.", "id": "f13977:c8:m4"}
{"signature": "@property<EOL><INDENT>def _derived_cells(self):<DEDENT>", "body": "return self._impl.derived_cells.interfaces<EOL>", "docstring": "A mapping associating names to derived cells.", "id": "f13977:c7:m13"}
{"signature": "@property<EOL><INDENT>def bases(self):<DEDENT>", "body": "return get_interfaces(self._impl.bases)<EOL>", "docstring": "List of base classes.", "id": "f13977:c7:m2"}
{"signature": "def get_object(self, name):", "body": "parts = name.split(\"<STR_LIT:.>\")<EOL>child = parts.pop(<NUM_LIT:0>)<EOL>if parts:<EOL><INDENT>return self.spaces[child].get_object(\"<STR_LIT:.>\".join(parts))<EOL><DEDENT>else:<EOL><INDENT>return self._namespace_impl[child]<EOL><DEDENT>", "docstring": "Retrieve an object by a dotted name relative to the space.", "id": "f13977:c9:m27"}
{"signature": "def remove_bases(self, *bases):", "body": "return self._impl.remove_bases(bases)<EOL>", "docstring": "Remove base spaces.", "id": "f13977:c8:m2"}
{"signature": "def has_params(self):", "body": "<EOL>return bool(self._impl.formula)<EOL>", "docstring": "Check if the parameter function is set.", "id": "f13977:c7:m25"}
{"signature": "def remove_decorator(source: str):", "body": "lines = source.splitlines()<EOL>atok = asttokens.ASTTokens(source, parse=True)<EOL>for node in ast.walk(atok.tree):<EOL><INDENT>if isinstance(node, ast.FunctionDef):<EOL><INDENT>break<EOL><DEDENT><DEDENT>if node.decorator_list:<EOL><INDENT>deco_first = node.decorator_list[<NUM_LIT:0>]<EOL>deco_last = node.decorator_list[-<NUM_LIT:1>]<EOL>line_first = atok.tokens[deco_first.first_token.index - <NUM_LIT:1>].start[<NUM_LIT:0>]<EOL>line_last = atok.tokens[deco_last.last_token.index + <NUM_LIT:1>].start[<NUM_LIT:0>]<EOL>lines = lines[:line_first - <NUM_LIT:1>] + lines[line_last:]<EOL><DEDENT>return \"<STR_LIT:\\n>\".join(lines) + \"<STR_LIT:\\n>\"<EOL>", "docstring": "Remove decorators from function definition", "id": "f13978:m6"}
{"signature": "def find_funcdef(source):", "body": "try:<EOL><INDENT>module_node = compile(<EOL>source, \"<STR_LIT>\", mode=\"<STR_LIT>\", flags=ast.PyCF_ONLY_AST<EOL>)<EOL><DEDENT>except SyntaxError:<EOL><INDENT>return find_funcdef(fix_lamdaline(source))<EOL><DEDENT>for node in ast.walk(module_node):<EOL><INDENT>if isinstance(node, ast.FunctionDef) or isinstance(node, ast.Lambda):<EOL><INDENT>return node<EOL><DEDENT><DEDENT>raise ValueError(\"<STR_LIT>\")<EOL>", "docstring": "Find the first FuncDef ast object in source", "id": "f13978:m1"}
{"signature": "def __getstate__(self):", "body": "return {\"<STR_LIT:source>\": self.source, \"<STR_LIT>\": self.module}<EOL>", "docstring": "Specify members to pickle.", "id": "f13978:c1:m7"}
{"signature": "def _reload(self, module=None):", "body": "if self.module is None:<EOL><INDENT>raise RuntimeError<EOL><DEDENT>elif module is None:<EOL><INDENT>import importlib<EOL>module = ModuleSource(importlib.reload(module))<EOL><DEDENT>elif module.name != self.module:<EOL><INDENT>raise RuntimeError<EOL><DEDENT>if self.name in module.funcs:<EOL><INDENT>func = module.funcs[self.name]<EOL>self.__init__(func=func)<EOL><DEDENT>else:<EOL><INDENT>self.__init__(func=NULL_FORMULA)<EOL><DEDENT>return self<EOL>", "docstring": "Reload the source function from the source module.\n\n        **Internal use only**\n        Update the source function of the formula.\n        This method is used to updated the underlying formula\n        when the source code of the module in which the source function\n        is read from is modified.\n\n        If the formula was not created from a module, an error is raised.\n        If ``module_`` is not given, the source module of the formula is\n        reloaded. If ``module_`` is given and matches the source module,\n        then the module_ is used without being reloaded.\n        If ``module_`` is given and does not match the source module of\n        the formula, an error is raised.\n\n        Args:\n            module_: A ``ModuleSource`` object\n\n        Returns:\n            self", "id": "f13978:c1:m10"}
{"signature": "def extract_params(source):", "body": "funcdef = find_funcdef(source)<EOL>params = []<EOL>for node in ast.walk(funcdef.args):<EOL><INDENT>if isinstance(node, ast.arg):<EOL><INDENT>if node.arg not in params:<EOL><INDENT>params.append(node.arg)<EOL><DEDENT><DEDENT><DEDENT>return params<EOL>", "docstring": "Extract parameters from a function definition", "id": "f13978:m2"}
{"signature": "def clear_descendants(self, source, clear_source=True):", "body": "desc = nx.descendants(self, source)<EOL>if clear_source:<EOL><INDENT>desc.add(source)<EOL><DEDENT>self.remove_nodes_from(desc)<EOL>return desc<EOL>", "docstring": "Remove all descendants of(reachable from) `source`.\n\n        Args:\n            source: Node descendants\n            clear_source(bool): Remove origin too if True.\n        Returns:\n            set: The removed nodes.", "id": "f13980:c0:m0"}
{"signature": "def rename(self, name):", "body": "self._impl.system.rename_model(new_name=name, old_name=self.name)<EOL>", "docstring": "Rename the model itself", "id": "f13980:c1:m0"}
{"signature": "def check_mro(self, bases):", "body": "try:<EOL><INDENT>self.add_node(\"<STR_LIT>\")<EOL>for base in bases:<EOL><INDENT>nx.DiGraph.add_edge(self, base, \"<STR_LIT>\")<EOL><DEDENT>result = self.get_mro(\"<STR_LIT>\")[<NUM_LIT:1>:]<EOL><DEDENT>finally:<EOL><INDENT>self.remove_node(\"<STR_LIT>\")<EOL><DEDENT>return result<EOL>", "docstring": "Check if C3 MRO is possible with given bases", "id": "f13980:c3:m4"}
{"signature": "def clear_obj(self, obj):", "body": "obj_nodes = self.get_nodes_with(obj)<EOL>removed = set()<EOL>for node in obj_nodes:<EOL><INDENT>if self.has_node(node):<EOL><INDENT>removed.update(self.clear_descendants(node))<EOL><DEDENT><DEDENT>return removed<EOL>", "docstring": "Remove all nodes with `obj` and their descendants.", "id": "f13980:c0:m1"}
{"signature": "def get_object(self, name):", "body": "parts = name.split(\"<STR_LIT:.>\")<EOL>space = self.spaces[parts.pop(<NUM_LIT:0>)]<EOL>if parts:<EOL><INDENT>return space.get_object(\"<STR_LIT:.>\".join(parts))<EOL><DEDENT>else:<EOL><INDENT>return space<EOL><DEDENT>", "docstring": "Retrieve an object by a dotted name relative to the model.", "id": "f13980:c2:m12"}
{"signature": "def clear_descendants(self, source, clear_source=True):", "body": "removed = self.cellgraph.clear_descendants(source, clear_source)<EOL>for node in removed:<EOL><INDENT>del node[OBJ].data[node[KEY]]<EOL><DEDENT>", "docstring": "Clear values and nodes calculated from `source`.", "id": "f13980:c2:m2"}
{"signature": "@property<EOL><INDENT>def refs(self):<DEDENT>", "body": "return self._impl.global_refs.interfaces<EOL>", "docstring": "Return a mapping of global references.", "id": "f13980:c1:m8"}
{"signature": "def close(self):", "body": "self._impl.close()<EOL>", "docstring": "Close the model.", "id": "f13980:c1:m2"}
{"signature": "def configure_python(self):", "body": "sys.setrecursionlimit(<NUM_LIT:10>**<NUM_LIT:6>)<EOL>warnings.showwarning = custom_showwarning<EOL>threading.stack_size(<NUM_LIT>)<EOL>", "docstring": "Configure Python settings for modelx\n\n        The error handler is configured later.", "id": "f13981:c2:m3"}
{"signature": "def excepthook(self, except_type, exception, traceback):", "body": "if except_type is DeepReferenceError:<EOL><INDENT>print(exception.msg)<EOL><DEDENT>else:<EOL><INDENT>self.default_excepthook(except_type, exception, traceback)<EOL><DEDENT>", "docstring": "Not Used: Custom exception hook to replace sys.excepthook\n\n    This is for CPython's default shell. IPython does not use sys.exepthook.\n\n    https://stackoverflow.com/questions/27674602/hide-traceback-unless-a-debug-flag-is-set", "id": "f13981:m3"}
{"signature": "def get_object(self, name):", "body": "parts = name.split(\"<STR_LIT:.>\")<EOL>model_name = parts.pop(<NUM_LIT:0>)<EOL>return self.models[model_name].get_object(\"<STR_LIT:.>\".join(parts))<EOL>", "docstring": "Retrieve an object by its absolute name.", "id": "f13981:c2:m14"}
{"signature": "def custom_showwarning(<EOL>message, category, filename=\"<STR_LIT>\", lineno=-<NUM_LIT:1>, file=None, line=None<EOL>):", "body": "if file is None:<EOL><INDENT>file = sys.stderr<EOL>if file is None:<EOL><INDENT>return<EOL><DEDENT><DEDENT>text = \"<STR_LIT>\" % (category.__name__, message)<EOL>try:<EOL><INDENT>file.write(text)<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Hook to override default showwarning.\n\n    https://stackoverflow.com/questions/2187269/python-print-only-the-message-on-warnings", "id": "f13981:m0"}
{"signature": "def tracemessage(self, maxlen=<NUM_LIT:6>):", "body": "result = \"<STR_LIT>\"<EOL>for i, value in enumerate(self):<EOL><INDENT>result += \"<STR_LIT>\".format(i, get_node_repr(value))<EOL><DEDENT>result = result.strip(\"<STR_LIT:\\n>\")<EOL>lines = result.split(\"<STR_LIT:\\n>\")<EOL>if maxlen and len(lines) > maxlen:<EOL><INDENT>i = int(maxlen / <NUM_LIT:2>)<EOL>lines = lines[:i] + [\"<STR_LIT>\"] + lines[-(maxlen - i) :]<EOL>result = \"<STR_LIT:\\n>\".join(lines)<EOL><DEDENT>return result<EOL>", "docstring": "if maxlen > 0, the message is shortened to maxlen traces.", "id": "f13981:c1:m4"}
{"signature": "def custom_showtraceback(<EOL>self,<EOL>exc_tuple=None,<EOL>filename=None,<EOL>tb_offset=None,<EOL>exception_only=False,<EOL>running_compiled_code=False,<EOL>):", "body": "self.default_showtraceback(<EOL>exc_tuple,<EOL>filename,<EOL>tb_offset,<EOL>exception_only=True,<EOL>running_compiled_code=running_compiled_code,<EOL>)<EOL>", "docstring": "Custom showtraceback for monkey-patching IPython's InteractiveShell\n\n    https://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook", "id": "f13981:m2"}
{"signature": "def __sub__(self, other):", "body": "return self + -other<EOL>", "docstring": "self - other", "id": "f13982:c1:m15"}
{"signature": "@property<EOL><INDENT>def value(self):<DEDENT>", "body": "if self.has_value:<EOL><INDENT>return self._impl[OBJ].get_value(self._impl[KEY])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Return the value of the cells.", "id": "f13982:c3:m4"}
{"signature": "def __radd__(self, other):", "body": "return self.__add__(other)<EOL>", "docstring": "other + self", "id": "f13982:c1:m12"}
{"signature": "def clear(self, *args, **kwargs):", "body": "return self._impl.clear_value(*args, **kwargs)<EOL>", "docstring": "Clear all the values.", "id": "f13982:c1:m9"}
{"signature": "def __mul__(self, other):", "body": "return self._impl.single_value * other<EOL>", "docstring": "self * other", "id": "f13982:c1:m17"}
{"signature": "def succs(self, *args, **kwargs):", "body": "return self._impl.successors(args, kwargs)<EOL>", "docstring": "Return a list of successors of a cell.\n\n        This method returns a list of CellNode objects, whose elements are\n        successors of (i.e. referencing in their formulas)\n        the cell specified by the given arguments.", "id": "f13982:c1:m44"}
{"signature": "def __ge__(self, other):", "body": "return self.__eq__(other) or self.__gt__(other)<EOL>", "docstring": "self >= other", "id": "f13982:c1:m28"}
{"signature": "def node(self, *args, **kwargs):", "body": "return CellNode(get_node(self._impl, *convert_args(args, kwargs)))<EOL>", "docstring": "Return a :class:`CellNode` object for the given arguments.", "id": "f13982:c1:m42"}
{"signature": "@property<EOL><INDENT>def has_value(self):<DEDENT>", "body": "return self._impl[OBJ].has_cell(self._impl[KEY])<EOL>", "docstring": "Return ``True`` if the cell has a value.", "id": "f13982:c3:m3"}
{"signature": "def __rsub__(self, other):", "body": "return -self + other<EOL>", "docstring": "other - self", "id": "f13982:c1:m16"}
{"signature": "@property<EOL><INDENT>def frame(self):<DEDENT>", "body": "return self._impl.to_frame(())<EOL>", "docstring": "Alias of ``to_frame()``.", "id": "f13982:c1:m32"}
{"signature": "def __gt__(self, other):", "body": "return self._impl.single_value > other<EOL>", "docstring": "self > other", "id": "f13982:c1:m27"}
{"signature": "def __truediv__(self, other):", "body": "return self._impl.single_value / other<EOL>", "docstring": "self / other: Should promote to float when necessary.", "id": "f13982:c1:m19"}
{"signature": "def __abs__(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Returns the Real distance from 0. Called for abs(self).", "id": "f13982:c1:m23"}
{"signature": "def __rtruediv__(self, other):", "body": "return other / self._impl.single_value<EOL>", "docstring": "other / self", "id": "f13982:c1:m20"}
{"signature": "@property<EOL><INDENT>def formula(self):<DEDENT>", "body": "return self._impl.formula<EOL>", "docstring": "Property to get, set, delete formula.", "id": "f13982:c1:m33"}
{"signature": "def __le__(self, other):", "body": "return self.__eq__(other) or self.__lt__(other)<EOL>", "docstring": "self <= other", "id": "f13982:c1:m26"}
{"signature": "@property<EOL><INDENT>def args(self):<DEDENT>", "body": "return self._impl[KEY]<EOL>", "docstring": "Return a tuple of the cells' arguments.", "id": "f13982:c3:m2"}
{"signature": "def match(self, *args, **kwargs):", "body": "return self._impl.find_match(args, kwargs)<EOL>", "docstring": "Returns the best matching args and their value.\n\n        If the cells returns None for the given arguments,\n        continue to get a value by passing arguments\n        masking the given arguments with Nones.\n        The search of non-None value starts from the given arguments\n        to the all None arguments in the lexicographical order.\n        The masked arguments that returns non-None value\n        first is returned with the value.", "id": "f13982:c1:m3"}
{"signature": "def __setitem__(self, key, value):", "body": "self._impl.set_value(tuplize_key(self, key), value)<EOL>", "docstring": "Set value of a particular cell", "id": "f13982:c1:m5"}
{"signature": "def restore_state(self, system):", "body": "self.system = system<EOL>", "docstring": "Called after unpickling to restore some attributes manually.", "id": "f13983:c0:m5"}
{"signature": "@property<EOL><INDENT>def parent(self):<DEDENT>", "body": "if self._impl.parent is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return self._impl.parent.interface<EOL><DEDENT>", "docstring": "The parent of this object. None for models.\n\n        The parent object of a cells is a space that contains the cells.\n        The parent object of a space is either a model or another space\n        that contains the space.", "id": "f13983:c5:m3"}
{"signature": "def update_lazyevals(self):", "body": "if self.lazy_evals is None:<EOL><INDENT>return<EOL><DEDENT>elif isinstance(self.lazy_evals, LazyEval):<EOL><INDENT>self.lazy_evals.get_updated()<EOL><DEDENT>else:<EOL><INDENT>for lz in self.lazy_evals:<EOL><INDENT>lz.get_updated()<EOL><DEDENT><DEDENT>", "docstring": "Update all LazyEvals in self\n\n        self.lzy_evals must be set to LazyEval object(s) enough to\n        update all owned LazyEval objects.", "id": "f13983:c0:m2"}
{"signature": "@property<EOL><INDENT>def _baseattrs(self):<DEDENT>", "body": "result = {<EOL>\"<STR_LIT:type>\": type(self).__name__,<EOL>\"<STR_LIT:id>\": id(self),<EOL>\"<STR_LIT:name>\": self.name,<EOL>\"<STR_LIT>\": self.fullname,<EOL>\"<STR_LIT>\": self._get_repr(),<EOL>}<EOL>return result<EOL>", "docstring": "A dict of members expressed in literals", "id": "f13983:c5:m11"}
{"signature": "def _update_data(self):", "body": "func = self.owner.formula.func<EOL>codeobj = func.__code__<EOL>name = func.__name__  <EOL>namespace_impl = self.owner._namespace_impl.get_updated()<EOL>namespace = namespace_impl.interfaces<EOL>selfnode = get_node(self.owner, None, None)<EOL>for name in self.owner.formula.srcnames:<EOL><INDENT>if name in namespace_impl and isinstance(<EOL>namespace_impl[name], ReferenceImpl<EOL>):<EOL><INDENT>refnode = get_node(namespace_impl[name], None, None)<EOL>self.owner.model.lexdep.add_path([selfnode, refnode])<EOL><DEDENT><DEDENT>closure = func.__closure__  <EOL>if closure is not None:  <EOL><INDENT>closure = create_closure(self.owner.interface)<EOL><DEDENT>self.altfunc = FunctionType(<EOL>codeobj, namespace, name=name, closure=closure<EOL>)<EOL>", "docstring": "Update altfunc", "id": "f13983:c16:m1"}
{"signature": "@property<EOL><INDENT>def _baseattrs(self):<DEDENT>", "body": "result = {\"<STR_LIT:type>\": type(self).__name__}<EOL>try:<EOL><INDENT>result[\"<STR_LIT>\"] = {<EOL>name: item._baseattrs<EOL>for name, item in self.items()<EOL>if name[<NUM_LIT:0>] != \"<STR_LIT:_>\"<EOL>}<EOL><DEDENT>except:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % self)<EOL><DEDENT>return result<EOL>", "docstring": "A dict of members expressed in literals", "id": "f13983:c14:m6"}
{"signature": "def _to_attrdict(self, attrs=None):", "body": "result = self._baseattrs<EOL>for attr in attrs:<EOL><INDENT>if hasattr(self, attr):<EOL><INDENT>result[attr] = getattr(self, attr)._to_attrdict(attrs)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Get extra attributes", "id": "f13983:c5:m12"}
{"signature": "def new_space(<EOL>self,<EOL>name=None,<EOL>bases=None,<EOL>formula=None,<EOL>*,<EOL>refs=None,<EOL>source=None,<EOL>is_derived=False,<EOL>prefix=\"<STR_LIT>\"<EOL>):", "body": "from modelx.core.space import StaticSpaceImpl<EOL>if name is None:<EOL><INDENT>name = self.spacenamer.get_next(self.namespace, prefix)<EOL><DEDENT>if name in self.namespace:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % name)<EOL><DEDENT>if not prefix and not is_valid_name(name):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % name)<EOL><DEDENT>space = self._new_space(<EOL>name=name,<EOL>formula=formula,<EOL>refs=refs,<EOL>source=source,<EOL>is_derived=is_derived,<EOL>)<EOL>self._set_space(space)<EOL>self.model.spacegraph.add_space(space)<EOL>if bases is not None:<EOL><INDENT>if isinstance(bases, StaticSpaceImpl):<EOL><INDENT>bases = [bases]<EOL><DEDENT>space.add_bases(bases)<EOL><DEDENT>return space<EOL>", "docstring": "Create a new child space.\n\n        Args:\n            name (str): Name of the space. If omitted, the space is\n                created automatically.\n            bases: If specified, the new space becomes a derived space of\n                the `base` space.\n            formula: Function whose parameters used to set space parameters.\n            refs: a mapping of refs to be added.\n            arguments: ordered dict of space parameter names to their values.\n            source: A source module from which cell definitions are read.\n            prefix: Prefix to the autogenerated name when name is None.", "id": "f13984:c3:m0"}
{"signature": "def restore_state(self, system):", "body": "for space in self._spaces.values():<EOL><INDENT>space.restore_state(system)<EOL><DEDENT>", "docstring": "Called after unpickling to restore some attributes manually.", "id": "f13984:c2:m3"}
{"signature": "def new_space_from_excel(<EOL>self,<EOL>book,<EOL>range_,<EOL>sheet=None,<EOL>name=None,<EOL>names_row=None,<EOL>param_cols=None,<EOL>space_param_order=None,<EOL>cells_param_order=None,<EOL>transpose=False,<EOL>names_col=None,<EOL>param_rows=None,<EOL>):", "body": "space = self._impl.new_space_from_excel(<EOL>book,<EOL>range_,<EOL>sheet,<EOL>name,<EOL>names_row,<EOL>param_cols,<EOL>space_param_order,<EOL>cells_param_order,<EOL>transpose,<EOL>names_col,<EOL>param_rows,<EOL>)<EOL>return get_interfaces(space)<EOL>", "docstring": "Create a child space from an Excel range.\n\n        To use this method, ``openpyxl`` package must be installed.\n\n        Args:\n            book (str): Path to an Excel file.\n            range_ (str): Range expression, such as \"A1\", \"$G4:$K10\",\n                or named range \"NamedRange1\".\n            sheet (str): Sheet name (case ignored).\n            name (str, optional): Name of the space. Defaults to ``SpaceN``,\n                where ``N`` is a number determined automatically.\n            names_row (optional): an index number indicating\n                what row contains the names of cells and parameters.\n                Defaults to the top row (0).\n            param_cols (optional): a sequence of index numbers\n                indicating parameter columns.\n                Defaults to only the leftmost column ([0]).\n            names_col (optional): an index number, starting from 0,\n                indicating what column contains additional parameters.\n            param_rows (optional): a sequence of index numbers, starting from\n                0, indicating rows of additional parameters, in case cells are\n                defined in two dimensions.\n            transpose (optional): Defaults to ``False``.\n                If set to ``True``, \"row(s)\" and \"col(s)\" in the parameter\n                names are interpreted inversely, i.e.\n                all indexes passed to \"row(s)\" parameters are interpreted\n                as column indexes,\n                and all indexes passed to \"col(s)\" parameters as row indexes.\n            space_param_order: a sequence to specify space parameters and\n                their orders. The elements of the sequence denote the indexes\n                of ``param_cols`` elements, and optionally the index of\n                ``param_rows`` elements shifted by the length of\n                ``param_cols``. The elements of this parameter and\n                ``cell_param_order`` must not overlap.\n            cell_param_order (optional): a sequence to reorder the parameters.\n                The elements of the sequence denote the indexes of\n                ``param_cols`` elements, and optionally the index of\n                ``param_rows`` elements shifted by the length of\n                ``param_cols``. The elements of this parameter and\n                ``cell_space_order`` must not overlap.\n\n        Returns:\n            The new child space created from the Excel range.", "id": "f13984:c1:m3"}
{"signature": "@property<EOL><INDENT>def _baseattrs(self):<DEDENT>", "body": "result = super()._baseattrs<EOL>result[\"<STR_LIT>\"] = self.spaces._baseattrs<EOL>return result<EOL>", "docstring": "A dict of members expressed in literals", "id": "f13984:c0:m2"}
{"signature": "def new_space(self, name=None, bases=None, formula=None, refs=None):", "body": "space = self._impl.model.currentspace = self._impl.new_space(<EOL>name=name, bases=get_impls(bases), formula=formula, refs=refs<EOL>)<EOL>return space.interface<EOL>", "docstring": "Create a child space.\n\n        Args:\n            name (str, optional): Name of the space. Defaults to ``SpaceN``,\n                where ``N`` is a number determined automatically.\n            bases (optional): A space or a sequence of spaces to be the base\n                space(s) of the created space.\n            formula (optional): Function to specify the parameters of\n                dynamic child spaces. The signature of this function is used\n                for setting parameters for dynamic child spaces.\n                This function should return a mapping of keyword arguments\n                to be passed to this method when the dynamic child spaces\n                are created.\n\n        Returns:\n            The new child space.", "id": "f13984:c1:m0"}
{"signature": "def new_space(name=None, bases=None, formula=None):", "body": "return cur_model().new_space(name, bases, formula)<EOL>", "docstring": "Create and return a new space in the current model.\n\n    The ``currentspace`` of the current model is set to the created model.\n\n    Args:\n        name (:obj:`str`, optional): The name of the space to create.\n            Defaults to ``SpaceN``, with ``N``\n            being an automatically assigned integer.\n\n    Returns:\n        The new space.", "id": "f13986:m6"}
{"signature": "def _get_node(name: str, args: str):", "body": "obj = get_object(name)<EOL>args = ast.literal_eval(args)<EOL>if not isinstance(args, tuple):<EOL><INDENT>args = (args,)<EOL><DEDENT>return obj.node(*args)<EOL>", "docstring": "Get node from object name and arg string\n\n    Not Used. Left for future reference purpose.", "id": "f13986:m10"}
{"signature": "def configure_python():", "body": "_system.configure_python()<EOL>", "docstring": "Configure Python ``sys`` settings for modelx.\n\n    This function is called implicitly when importing modelx.\n    To restore the Python settings, call :py:func:`restore_python`", "id": "f13986:m0"}
{"signature": "def get_object(name: str):", "body": "<EOL>elms = name.split(\"<STR_LIT:.>\")<EOL>parent = get_models()[elms.pop(<NUM_LIT:0>)]<EOL>while len(elms) > <NUM_LIT:0>:<EOL><INDENT>obj = elms.pop(<NUM_LIT:0>)<EOL>parent = getattr(parent, obj)<EOL><DEDENT>return parent<EOL>", "docstring": "Get a modelx object from its full name.", "id": "f13986:m9"}
{"signature": "def setup_ipython():", "body": "_system.setup_ipython()<EOL>", "docstring": "Set up IPython shell for modelx.\n\n    Suppress IPython's default traceback messages upon error.", "id": "f13986:m2"}
{"signature": "def get_models():", "body": "return _get_interfaces(_system.models)<EOL>", "docstring": "Returns a dict that maps model names to models.", "id": "f13986:m8"}
{"signature": "def restore_ipython():", "body": "_system.restore_ipython()<EOL>", "docstring": "Restore IPython' default error message.\n\n    Bring back IPython's default traceback message upon error for debugging.", "id": "f13986:m3"}
{"signature": "def tuplize_key(obj, key, remove_extra=False):", "body": "paramlen = len(obj.formula.parameters)<EOL>if isinstance(key, str):<EOL><INDENT>key = (key,)<EOL><DEDENT>elif not isinstance(key, Sequence):<EOL><INDENT>key = (key,)<EOL><DEDENT>if not remove_extra:<EOL><INDENT>return key<EOL><DEDENT>else:<EOL><INDENT>arglen = len(key)<EOL>if arglen:<EOL><INDENT>return key[: min(arglen, paramlen)]<EOL><DEDENT>else:<EOL><INDENT>return key<EOL><DEDENT><DEDENT>", "docstring": "Args", "id": "f13987:m3"}
{"signature": "def node_get_args(node):", "body": "obj = node[OBJ]<EOL>key = node[KEY]<EOL>boundargs = obj.formula.signature.bind(*key)<EOL>boundargs.apply_defaults()<EOL>return boundargs.arguments<EOL>", "docstring": "Return an ordered mapping from params to args", "id": "f13987:m2"}
{"signature": "def show_tree(model=None):", "body": "if model is None:<EOL><INDENT>model = mx.cur_model()<EOL><DEDENT>view = get_modeltree(model)<EOL>app = QApplication.instance()<EOL>if not app:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>view.show()<EOL>app.exec_()<EOL>", "docstring": "Display the model tree window.\n\n    Args:\n        model: :class:`Model <modelx.core.model.Model>` object.\n            Defaults to the current model.\n\n    Warnings:\n        For this function to work with Spyder, *Graphics backend* option\n        of Spyder must be set to *inline*.", "id": "f13991:m2"}
{"signature": "def get_tree(model=None):", "body": "if model is None:<EOL><INDENT>model = mx.cur_model()<EOL><DEDENT>treemodel = ModelTreeModel(model._baseattrs)<EOL>view = QTreeView()<EOL>view.setModel(treemodel)<EOL>view.setWindowTitle(\"<STR_LIT>\" % model.name)<EOL>view.setAlternatingRowColors(True)<EOL>return view<EOL>", "docstring": "Get QTreeView object containing the model tree.\n\n    Args:\n        model: :class:`Model <modelx.core.model.Model>` object.\n            Defaults to the current model.", "id": "f13991:m1"}
{"signature": "def get_bases(self, node):", "body": "return self.predecessors(node)<EOL>", "docstring": "Direct Bases iterator", "id": "f13995:c0:m1"}
{"signature": "def main():", "body": "print(\"<STR_LIT>\")<EOL>", "docstring": "Entry point for the application script", "id": "f14007:m0"}
{"signature": "def pv_payments(n):", "body": "if n == <NUM_LIT:1>:<EOL><INDENT>return <NUM_LIT:1> / (<NUM_LIT:1> + interest_rate)<EOL><DEDENT>else:<EOL><INDENT>return pv_payments(n - <NUM_LIT:1>) + <NUM_LIT:1> / (<NUM_LIT:1> + interest_rate) ** n<EOL><DEDENT>", "docstring": "Present value of repayments of 1 for n years.", "id": "f14009:m1"}
{"signature": "def __init__(self, hostname: str = '<STR_LIT>',<EOL>port: int = <NUM_LIT>, retries: int = <NUM_LIT:10>,<EOL>log_errors: bool = False) -> None:", "body": "self.hostname = hostname<EOL>self.port = port<EOL>self.retries = retries<EOL>self.log_errors = log_errors<EOL>self._log_fp = Path(tempfile.gettempdir(), '<STR_LIT>')<EOL>if self._log_fp.is_file():<EOL><INDENT>self._log_fp.open('<STR_LIT:w>').close()<EOL><DEDENT>", "docstring": ":param hostname: The IP address of the TweeboParser API server.\n:param port: The Port that the TweeboParser API server is attached to.\n:param retries: Number of times to retry json decoding the\n                returned data.", "id": "f14017:c0:m0"}
{"signature": "def log_error(self, text: str) -> None:", "body": "if self.log_errors:<EOL><INDENT>with self._log_fp.open('<STR_LIT>') as log_file:<EOL><INDENT>log_file.write(f'<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Given some error text it will log the text if self.log_errors is True\n\n:param text: Error text to log", "id": "f14017:c0:m1"}
{"signature": "def _hyphens_to_dashes(self):", "body": "problematic_hyphens = [(r'<STR_LIT>', r'<STR_LIT>'),<EOL>(r'<STR_LIT>', '<STR_LIT>'),<EOL>(r'<STR_LIT>', '<STR_LIT>')]<EOL>for problem_case in problematic_hyphens:<EOL><INDENT>self._regex_replacement(*problem_case)<EOL><DEDENT>", "docstring": "Transform hyphens to various kinds of dashes", "id": "f14040:c0:m5"}
{"signature": "def _sentence_to_interstitial_spacing(self):", "body": "not_sentence_end_chars = ['<STR_LIT:U+0020>']<EOL>abbreviations = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']<EOL>titles = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>for abbrev in abbreviations:<EOL><INDENT>for x in not_sentence_end_chars:<EOL><INDENT>self._str_replacement(abbrev + x, abbrev + '<STR_LIT>')<EOL><DEDENT><DEDENT>for title in titles:<EOL><INDENT>for x in not_sentence_end_chars:<EOL><INDENT>self._str_replacement(title + x, title + '<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Fix common spacing errors caused by LaTeX's habit\n        of using an inter-sentence space after any full stop.", "id": "f14040:c0:m2"}
{"signature": "def tostring(self):", "body": "return self.data<EOL>", "docstring": "Return self as instance of str()", "id": "f14040:c0:m1"}
{"signature": "def LatexText(*args, **kwargs):", "body": "return LatexFixer(*args, **kwargs).tostring()<EOL>", "docstring": "Transform a unicode string into another more compatible with latex,\n    fixing some common typographical errors", "id": "f14040:m0"}
{"signature": "def _interstitial_to_sentence_spacing(self):", "body": "pass<EOL>", "docstring": "Fix errors where inter-sentence spacing\n        is not used after after a word ending with a capital letter.", "id": "f14040:c0:m3"}
{"signature": "def _process_events(self, events):", "body": "for f, callback, transferred, key, ov in events:<EOL><INDENT>try:<EOL><INDENT>self._logger.debug('<STR_LIT>'.format(callback))<EOL>value = callback(transferred, key, ov)<EOL><DEDENT>except OSError:<EOL><INDENT>self._logger.warning('<STR_LIT>', exc_info=sys.exc_info())<EOL><DEDENT>else:<EOL><INDENT>f.set_result(value)<EOL><DEDENT><DEDENT>", "docstring": "Process events from proactor.", "id": "f14045:c0:m1"}
{"signature": "def select(self, *args, **kwargs):", "body": "raise NotImplementedError<EOL>", "docstring": "Implement abstract method even though we don't need it.", "id": "f14047:c1:m1"}
{"signature": "def _fileobj_to_fd(fileobj):", "body": "if isinstance(fileobj, int):<EOL><INDENT>fd = fileobj<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>fd = int(fileobj.fileno())<EOL><DEDENT>except (AttributeError, TypeError, ValueError) as ex:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(fileobj)) from ex<EOL><DEDENT><DEDENT>if fd < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(fd))<EOL><DEDENT>return fd<EOL>", "docstring": "Return a file descriptor from a file object.\n\nParameters:\nfileobj -- file object or file descriptor\n\nReturns:\ncorresponding file descriptor\n\nRaises:\nValueError if the object is invalid", "id": "f14047:m0"}
{"signature": "def _fileobj_lookup(self, fileobj):", "body": "try:<EOL><INDENT>return _fileobj_to_fd(fileobj)<EOL><DEDENT>except ValueError:<EOL><INDENT>for key in self._fd_to_key.values():<EOL><INDENT>if key.fileobj is fileobj:<EOL><INDENT>return key.fd<EOL><DEDENT><DEDENT>raise<EOL><DEDENT>", "docstring": "Return a file descriptor from a file object.\n\n        This wraps _fileobj_to_fd() to do an exhaustive search in case\n        the object is invalid but we still have it in our map.  This\n        is used by unregister() so we can unregister an object that\n        was previously registered even if it is closed.  It is also\n        used by _SelectorMapping.", "id": "f14047:c1:m2"}
{"signature": "def _process_event(self, key, mask):", "body": "self._logger.debug('<STR_LIT>'.format(key, mask))<EOL>fileobj, (reader, writer) = key.fileobj, key.data<EOL>if mask & selectors.EVENT_READ and reader is not None:<EOL><INDENT>if reader._cancelled:<EOL><INDENT>self.remove_reader(fileobj)<EOL><DEDENT>else:<EOL><INDENT>self._logger.debug('<STR_LIT>'.format(reader))<EOL>reader._run()<EOL><DEDENT><DEDENT>if mask & selectors.EVENT_WRITE and writer is not None:<EOL><INDENT>if writer._cancelled:<EOL><INDENT>self.remove_writer(fileobj)<EOL><DEDENT>else:<EOL><INDENT>self._logger.debug('<STR_LIT>'.format(writer))<EOL>writer._run()<EOL><DEDENT><DEDENT>", "docstring": "Selector has delivered us an event.", "id": "f14047:c2:m3"}
{"signature": "def default_exception_handler(self, context):", "body": "self._logger.debug('<STR_LIT>')<EOL>message = context.get('<STR_LIT:message>')<EOL>if not message:<EOL><INDENT>message = '<STR_LIT>'<EOL><DEDENT>try:<EOL><INDENT>exception = context['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>exc_info = False<EOL><DEDENT>else:<EOL><INDENT>exc_info = (type(exception), exception, exception.__traceback__)<EOL><DEDENT>log_lines = [message]<EOL>for key in [k for k in sorted(context) if k not in {'<STR_LIT:message>', '<STR_LIT>'}]:<EOL><INDENT>log_lines.append('<STR_LIT>'.format(key, context[key]))<EOL><DEDENT>self.__log_error('<STR_LIT:\\n>'.join(log_lines), exc_info=exc_info)<EOL>", "docstring": "Handle exceptions.\n\n        This is the default exception handler.\n\n        This is called when an exception occurs and no exception\n        handler is set, and can be called by a custom exception\n        handler that wants to defer to the default behavior.\n\n        context parameter has the same meaning as in\n        `call_exception_handler()`.", "id": "f14048:c3:m21"}
{"signature": "def run_forever(self):", "body": "self.__is_running = True<EOL>self._before_run_forever()<EOL>try:<EOL><INDENT>self._logger.debug('<STR_LIT>')<EOL>rslt = self.__app.exec_()<EOL>self._logger.debug('<STR_LIT>'.format(rslt))<EOL>return rslt<EOL><DEDENT>finally:<EOL><INDENT>self._after_run_forever()<EOL>self.__is_running = False<EOL><DEDENT>", "docstring": "Run eventloop forever.", "id": "f14048:c3:m1"}
{"signature": "def call_at(self, when, callback, *args, context=None):", "body": "return self.call_later(when - self.time(), callback, *args, context=context)<EOL>", "docstring": "Register callback to be invoked at a certain time.", "id": "f14048:c3:m9"}
{"signature": "def add_writer(self, fd, callback, *args):", "body": "self._check_closed()<EOL>try:<EOL><INDENT>existing = self._write_notifiers[fd]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>existing.setEnabled(False)<EOL>existing.activated.disconnect()<EOL><DEDENT>notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Write)<EOL>notifier.setEnabled(True)<EOL>self._logger.debug('<STR_LIT>'.format(fd))<EOL>notifier.activated.connect(<EOL>lambda: self.__on_notifier_ready(<EOL>self._write_notifiers, notifier, fd, callback, args)  <EOL>)<EOL>self._write_notifiers[fd] = notifier<EOL>", "docstring": "Register a callback for when a file descriptor is ready for writing.", "id": "f14048:c3:m13"}
{"signature": "def remove_writer(self, fd):", "body": "if self.is_closed():<EOL><INDENT>return<EOL><DEDENT>self._logger.debug('<STR_LIT>'.format(fd))<EOL>try:<EOL><INDENT>notifier = self._write_notifiers.pop(fd)<EOL><DEDENT>except KeyError:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>notifier.setEnabled(False)<EOL>return True<EOL><DEDENT>", "docstring": "Remove writer callback.", "id": "f14048:c3:m14"}
{"signature": "def is_running(self):", "body": "return self.__is_running<EOL>", "docstring": "Return True if the event loop is running, False otherwise.", "id": "f14048:c3:m4"}
{"signature": "def remove_reader(self, fd):", "body": "if self.is_closed():<EOL><INDENT>return<EOL><DEDENT>self._logger.debug('<STR_LIT>'.format(fd))<EOL>try:<EOL><INDENT>notifier = self._read_notifiers.pop(fd)<EOL><DEDENT>except KeyError:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>notifier.setEnabled(False)<EOL>return True<EOL><DEDENT>", "docstring": "Remove reader callback.", "id": "f14048:c3:m12"}
{"signature": "def close(self):", "body": "if self.is_running():<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>if self.is_closed():<EOL><INDENT>return<EOL><DEDENT>self._logger.debug('<STR_LIT>')<EOL>if self.__default_executor is not None:<EOL><INDENT>self.__default_executor.shutdown()<EOL><DEDENT>super().close()<EOL>self._timer.stop()<EOL>self.__app = None<EOL>for notifier in itertools.chain(self._read_notifiers.values(), self._write_notifiers.values()):<EOL><INDENT>notifier.setEnabled(False)<EOL><DEDENT>self._read_notifiers = None<EOL>self._write_notifiers = None<EOL>", "docstring": "Release all resources used by the event loop.\n\nThe loop cannot be restarted after it has been closed.", "id": "f14048:c3:m5"}
{"signature": "def asyncClose(fn):", "body": "@functools.wraps(fn)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>f = asyncio.ensure_future(fn(*args, **kwargs))<EOL>while not f.done():<EOL><INDENT>QApplication.instance().processEvents()<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Allow to run async code before application is closed.", "id": "f14048:m1"}
{"signature": "def get_video(self, node):", "body": "video = Video()<EOL>video._embed_code = self.get_embed_code(node)<EOL>video._embed_type = self.get_embed_type(node)<EOL>video._width = self.get_width(node)<EOL>video._height = self.get_height(node)<EOL>video._src = self.get_src(node)<EOL>video._provider = self.get_provider(video.src)<EOL>return video<EOL>", "docstring": "Create a video object from a video embed", "id": "f14073:c0:m7"}
{"signature": "def update_score(self, node, add_to_score):", "body": "current_score = <NUM_LIT:0><EOL>score_string = self.parser.getAttribute(node, '<STR_LIT>')<EOL>if score_string:<EOL><INDENT>current_score = int(score_string)<EOL><DEDENT>new_score = current_score + int(add_to_score)<EOL>self.parser.setAttribute(node, \"<STR_LIT>\", str(new_score))<EOL>", "docstring": "adds a score to the gravityScore Attribute we put on divs\nwe'll get the current score then add the score\nwe're passing in to the current", "id": "f14074:c0:m9"}
{"signature": "def update_node_count(self, node, add_to_count):", "body": "current_score = <NUM_LIT:0><EOL>count_string = self.parser.getAttribute(node, '<STR_LIT>')<EOL>if count_string:<EOL><INDENT>current_score = int(count_string)<EOL><DEDENT>new_score = current_score + add_to_count<EOL>self.parser.setAttribute(node, \"<STR_LIT>\", str(new_score))<EOL>", "docstring": "\\\n        stores how many decent nodes are under a parent node", "id": "f14074:c0:m10"}
{"signature": "def get_siblings_score(self, top_node):", "body": "base = <NUM_LIT><EOL>paragraphs_number = <NUM_LIT:0><EOL>paragraphs_score = <NUM_LIT:0><EOL>nodes_to_check = self.parser.getElementsByTag(top_node, tag='<STR_LIT:p>')<EOL>for node in nodes_to_check:<EOL><INDENT>text_node = self.parser.getText(node)<EOL>word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node)<EOL>high_link_density = self.is_highlink_density(node)<EOL>if word_stats.get_stopword_count() > <NUM_LIT:2> and not high_link_density:<EOL><INDENT>paragraphs_number += <NUM_LIT:1><EOL>paragraphs_score += word_stats.get_stopword_count()<EOL><DEDENT><DEDENT>if paragraphs_number > <NUM_LIT:0>:<EOL><INDENT>base = paragraphs_score // paragraphs_number<EOL><DEDENT>return base<EOL>", "docstring": "we could have long articles that have tons of paragraphs\nso if we tried to calculate the base score against\nthe total text score of those paragraphs it would be unfair.\nSo we need to normalize the score based on the average scoring\nof the paragraphs within the top node.\nFor example if our total score of 10 paragraphs was 1000\nbut each had an average value of 100 then 100 should be our base.", "id": "f14074:c0:m8"}
{"signature": "def get_score(self, node):", "body": "return self.get_node_gravity_score(node) or <NUM_LIT:0><EOL>", "docstring": "returns the gravityScore as an integer from this node", "id": "f14074:c0:m12"}
{"signature": "def is_highlink_density(self, element):", "body": "links = self.parser.getElementsByTag(element, tag='<STR_LIT:a>')<EOL>if not links:<EOL><INDENT>return False<EOL><DEDENT>text = self.parser.getText(element)<EOL>words = text.split('<STR_LIT:U+0020>')<EOL>words_number = float(len(words))<EOL>link_text_parts = []<EOL>for link in links:<EOL><INDENT>link_text_parts.append(self.parser.getText(link))<EOL><DEDENT>link_text = '<STR_LIT>'.join(link_text_parts)<EOL>link_words = link_text.split('<STR_LIT:U+0020>')<EOL>number_of_link_words = float(len(link_words))<EOL>number_of_links = float(len(links))<EOL>link_divisor = float(number_of_link_words / words_number)<EOL>score = float(link_divisor * number_of_links)<EOL>if score >= <NUM_LIT:1.0>:<EOL><INDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "checks the density of links within a node,\nis there not much text and most of it contains linky shit?\nif so it's no good", "id": "f14074:c0:m11"}
{"signature": "def get_language(self):", "body": "<EOL>if self.config.use_meta_language:<EOL><INDENT>if self.article.meta_lang:<EOL><INDENT>return self.article.meta_lang[:<NUM_LIT:2>]<EOL><DEDENT><DEDENT>return self.config.target_language<EOL>", "docstring": "Returns the language is by the article or\nthe configuration language", "id": "f14074:c0:m0"}
{"signature": "def is_boostable(self, node):", "body": "para = \"<STR_LIT:p>\"<EOL>steps_away = <NUM_LIT:0><EOL>minimum_stopword_count = <NUM_LIT:5><EOL>max_stepsaway_from_node = <NUM_LIT:3><EOL>nodes = self.walk_siblings(node)<EOL>for current_node in nodes:<EOL><INDENT>current_node_tag = self.parser.getTag(current_node)<EOL>if current_node_tag == para:<EOL><INDENT>if steps_away >= max_stepsaway_from_node:<EOL><INDENT>return False<EOL><DEDENT>para_text = self.parser.getText(current_node)<EOL>word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(para_text)<EOL>if word_stats.get_stopword_count() > minimum_stopword_count:<EOL><INDENT>return True<EOL><DEDENT>steps_away += <NUM_LIT:1><EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "\\\n        alot of times the first paragraph might be the caption under an image\n        so we'll want to make sure if we're going to boost a parent node that\n        it should be connected to other paragraphs,\n        at least for the first n paragraphs so we'll want to make sure that\n        the next sibling is a paragraph and has at\n        least some substatial weight to it", "id": "f14074:c0:m4"}
{"signature": "def check_known_schemas(self):", "body": "if '<STR_LIT:image>' in self.article.opengraph:<EOL><INDENT>return self.get_image(self.article.opengraph[\"<STR_LIT:image>\"],<EOL>extraction_type='<STR_LIT>')<EOL><DEDENT>elif (self.article.schema and '<STR_LIT:image>' in self.article.schema and<EOL>\"<STR_LIT:url>\" in self.article.schema[\"<STR_LIT:image>\"]):<EOL><INDENT>return self.get_image(self.article.schema[\"<STR_LIT:image>\"][\"<STR_LIT:url>\"],<EOL>extraction_type='<STR_LIT>')<EOL><DEDENT>return None<EOL>", "docstring": "\\\n        checks to see if we were able to find the image via known schemas:\n\n        Supported Schemas\n         - Open Graph\n         - schema.org", "id": "f14075:c1:m15"}
{"signature": "def is_valid_filename(self, image_node):", "body": "src = self.parser.getAttribute(image_node, attr='<STR_LIT:src>')<EOL>if not src:<EOL><INDENT>return False<EOL><DEDENT>if self.badimages_names_re.search(src):<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "\\\n        will check the image src against a list\n        of bad image files we know of like buttons, etc...", "id": "f14075:c1:m10"}
{"signature": "def build_image_path(self, src):", "body": "o = urlparse(src)<EOL>if o.netloc != '<STR_LIT>':<EOL><INDENT>return o.geturl()<EOL><DEDENT>return urljoin(self.article.final_url, src)<EOL>", "docstring": "\\\n        This method will take an image path and build\n        out the absolute path to that image\n        * using the initial url we crawled\n          so we can find a link to the image\n          if they use relative urls like ../myimage.jpg", "id": "f14075:c1:m19"}
{"signature": "def check_link_tag(self):", "body": "node = self.article.raw_doc<EOL>meta = self.parser.getElementsByTag(node, tag='<STR_LIT>', attr='<STR_LIT>', value='<STR_LIT>')<EOL>for item in meta:<EOL><INDENT>src = self.parser.getAttribute(item, attr='<STR_LIT>')<EOL>if src:<EOL><INDENT>return self.get_image(src, extraction_type='<STR_LIT>')<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "\\\n        checks to see if we were able to\n        find open link_src on this page", "id": "f14075:c1:m14"}
{"signature": "def filter_bad_names(self, images):", "body": "good_images = []<EOL>for image in images:<EOL><INDENT>if self.is_valid_filename(image):<EOL><INDENT>good_images.append(image)<EOL><DEDENT><DEDENT>return good_images if len(good_images) > <NUM_LIT:0> else None<EOL>", "docstring": "\\\n        takes a list of image elements\n        and filters out the ones with bad names", "id": "f14075:c1:m9"}
{"signature": "def get_local_image(self, src):", "body": "return ImageUtils.store_image(self.fetcher, self.article.link_hash, src, self.config)<EOL>", "docstring": "\\\n        returns the bytes of the image file on disk", "id": "f14075:c1:m16"}
{"signature": "def get_favicon(self):", "body": "kwargs = {'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>', '<STR_LIT:value>': '<STR_LIT>'}<EOL>meta = self.parser.getElementsByTag(self.article.doc, **kwargs)<EOL>if meta:<EOL><INDENT>favicon = self.parser.getAttribute(meta[<NUM_LIT:0>], '<STR_LIT>')<EOL>return favicon<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Extract the favicon from a website\nhttp://en.wikipedia.org/wiki/Favicon\n<link rel=\"shortcut icon\" type=\"image/png\" href=\"favicon.png\" />\n<link rel=\"icon\" type=\"image/png\" href=\"favicon.png\" />", "id": "f14082:c0:m1"}
{"signature": "def get_meta_keywords(self):", "body": "return self.get_meta_content(\"<STR_LIT>\")<EOL>", "docstring": "if the article has meta keywords set in the source, use that", "id": "f14082:c0:m6"}
{"signature": "def get_meta_content(self, meta_name):", "body": "meta = self.parser.css_select(self.article.doc, meta_name)<EOL>content = None<EOL>if meta is not None and len(meta) > <NUM_LIT:0>:<EOL><INDENT>content = self.parser.getAttribute(meta[<NUM_LIT:0>], '<STR_LIT:content>')<EOL><DEDENT>if content:<EOL><INDENT>return content.strip()<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Extract a given meta content form document", "id": "f14082:c0:m4"}
{"signature": "@property<EOL><INDENT>def extraction_type(self):<DEDENT>", "body": "return self._extraction_type<EOL>", "docstring": "str: The extraction type used\n\n            Note:\n                Read only", "id": "f14085:c0:m6"}
{"signature": "def remove_fewwords_paragraphs(self):", "body": "all_nodes = self.parser.getElementsByTags(self.get_top_node(), ['<STR_LIT:*>'])<EOL>all_nodes.reverse()<EOL>for elm in all_nodes:<EOL><INDENT>tag = self.parser.getTag(elm)<EOL>text = self.parser.getText(elm)<EOL>stop_words = self.stopwords_class(language=self.get_language()).get_stopword_count(text)<EOL>if ((tag != '<STR_LIT>' or text != '<STR_LIT>') and stop_words.get_stopword_count() < <NUM_LIT:3> and<EOL>len(self.parser.getElementsByTag(elm, tag='<STR_LIT:object>')) == <NUM_LIT:0> and<EOL>len(self.parser.getElementsByTag(elm, tag='<STR_LIT>')) == <NUM_LIT:0>):<EOL><INDENT>self.parser.remove(elm)<EOL><DEDENT>else:<EOL><INDENT>trimmed = self.parser.getText(elm)<EOL>if trimmed.startswith(\"<STR_LIT:(>\") and trimmed.endswith(\"<STR_LIT:)>\"):<EOL><INDENT>self.parser.remove(elm)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "\\\n        remove paragraphs that have less than x number of words,\n        would indicate that it's some sort of link", "id": "f14086:c0:m10"}
{"signature": "def links_to_text(self):", "body": "self.parser.stripTags(self.get_top_node(), '<STR_LIT:a>')<EOL>", "docstring": "\\\n        cleans up and converts any nodes that\n        should be considered text into text", "id": "f14086:c0:m6"}
{"signature": "def make_list_elms_pretty(self):", "body": "for elm in self.parser.getElementsByTag(self.top_node, tag='<STR_LIT>'):<EOL><INDENT>elm.text = r'<STR_LIT>'.format(elm.text)<EOL><DEDENT>", "docstring": "make any list element read like a list", "id": "f14086:c0:m7"}
{"signature": "def smart_unicode(string, encoding='<STR_LIT:utf-8>', strings_only=False, errors='<STR_LIT:strict>'):", "body": "<EOL>return force_unicode(string, encoding, strings_only, errors)<EOL>", "docstring": "Returns a unicode object representing 's'. Treats bytestrings using the\n'encoding' codec.\n\nIf strings_only is True, don't convert (some) non-string-like objects.", "id": "f14090:m0"}
{"signature": "@property<EOL><INDENT>def top_image(self):<DEDENT>", "body": "return self._top_image<EOL>", "docstring": "Image: The top image object that likely represents the article\n\n            Returns:\n                Image: See more information on the goose3.Image class\n            Note:\n                Read only", "id": "f14091:c0:m11"}
{"signature": "@property<EOL><INDENT>def meta_lang(self):<DEDENT>", "body": "return self._meta_lang<EOL>", "docstring": "str: Contents of the meta-lang field from the HTML source\n\n            Note:\n                Read only", "id": "f14091:c0:m4"}
{"signature": "@property<EOL><INDENT>def authors(self):<DEDENT>", "body": "return self._authors<EOL>", "docstring": "list(str): A listing of authors as parsed from the meta tags\n\n            Note:\n                Read only", "id": "f14091:c0:m17"}
{"signature": "@property<EOL><INDENT>def opengraph(self):<DEDENT>", "body": "return self._opengraph<EOL>", "docstring": "dict: All opengraph tag data\n\n            Note:\n                Read only", "id": "f14091:c0:m13"}
{"signature": "@property<EOL><INDENT>def meta_encoding(self):<DEDENT>", "body": "return self._meta_encoding<EOL>", "docstring": "str: Contents of the encoding/charset field from the HTML source\n\n            Note:\n                Read only", "id": "f14091:c0:m7"}
{"signature": "@property<EOL><INDENT>def domain(self):<DEDENT>", "body": "return self._domain<EOL>", "docstring": "str: Domain of the article parsed\n\n            Note:\n                Read only", "id": "f14091:c0:m9"}
{"signature": "@property<EOL><INDENT>def links(self):<DEDENT>", "body": "return self._links<EOL>", "docstring": "list(str): A listing of URL links within the article\n\n            Note:\n                Read only", "id": "f14091:c0:m16"}
{"signature": "@property<EOL><INDENT>def link_hash(self):<DEDENT>", "body": "return self._link_hash<EOL>", "docstring": "str: The MD5 of the final url to be used for various identification tasks\n\n            Note:\n                Read only", "id": "f14091:c0:m19"}
{"signature": "@property<EOL><INDENT>def infos(self):<DEDENT>", "body": "data = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT:description>\": self.meta_description,<EOL>\"<STR_LIT>\": self.meta_lang,<EOL>\"<STR_LIT>\": self.meta_keywords,<EOL>\"<STR_LIT>\": self.meta_favicon,<EOL>\"<STR_LIT>\": self.canonical_link,<EOL>\"<STR_LIT>\": self.meta_encoding<EOL>},<EOL>\"<STR_LIT:image>\": None,<EOL>\"<STR_LIT>\": self.domain,<EOL>\"<STR_LIT:title>\": self.title,<EOL>\"<STR_LIT>\": self.cleaned_text,<EOL>\"<STR_LIT>\": self.opengraph,<EOL>\"<STR_LIT>\": self.tags,<EOL>\"<STR_LIT>\": self.tweets,<EOL>\"<STR_LIT>\": [],<EOL>\"<STR_LIT>\": self.links,<EOL>\"<STR_LIT>\": self.authors,<EOL>\"<STR_LIT>\": self.publish_date<EOL>}<EOL>if self.top_image is not None:<EOL><INDENT>data['<STR_LIT:image>'] = {<EOL>'<STR_LIT:url>': self.top_image.src,<EOL>'<STR_LIT:width>': self.top_image.width,<EOL>'<STR_LIT>': self.top_image.height,<EOL>'<STR_LIT:type>': '<STR_LIT:image>'<EOL>}<EOL><DEDENT>for movie in self.movies:<EOL><INDENT>data['<STR_LIT>'].append({<EOL>'<STR_LIT>': movie.embed_type,<EOL>'<STR_LIT>': movie.provider,<EOL>'<STR_LIT:width>': movie.width,<EOL>'<STR_LIT>': movie.height,<EOL>'<STR_LIT>': movie.embed_code,<EOL>'<STR_LIT:src>': movie.src,<EOL>})<EOL><DEDENT>return data<EOL>", "docstring": "dict: The summation of all data available about the extracted article\n\n            Note:\n                Read only", "id": "f14091:c0:m27"}
{"signature": "@property<EOL><INDENT>def schema(self):<DEDENT>", "body": "return self._schema<EOL>", "docstring": "dict: All schema tag data\n\n            Note:\n                Read only", "id": "f14091:c0:m23"}
{"signature": "@property<EOL><INDENT>def meta_description(self):<DEDENT>", "body": "return self._meta_description<EOL>", "docstring": "str: Contents of the meta-description field from the HTML source\n\n            Note:\n                Read only", "id": "f14091:c0:m3"}
{"signature": "@property<EOL><INDENT>def doc(self):<DEDENT>", "body": "return self._doc<EOL>", "docstring": "etree: lxml document that is being processed\n\n            Note:\n                Read only", "id": "f14091:c0:m21"}
{"signature": "@property<EOL><INDENT>def tweets(self):<DEDENT>", "body": "return self._tweets<EOL>", "docstring": "list(str): A listing of embeded tweets in the article\n\n            Note:\n                Read only", "id": "f14091:c0:m14"}
{"signature": "@property<EOL><INDENT>def top_node(self):<DEDENT>", "body": "return self._top_node<EOL>", "docstring": "etree: The top Element that is a candidate for the main body of the article\n\n            Note:\n                Read only", "id": "f14091:c0:m10"}
{"signature": "@property<EOL><INDENT>def meta_favicon(self):<DEDENT>", "body": "return self._meta_favicon<EOL>", "docstring": "str: Contents of the meta-favicon field from the HTML source\n\n            Note:\n                Read only", "id": "f14091:c0:m5"}
{"signature": "@property<EOL><INDENT>def final_url(self):<DEDENT>", "body": "return self._final_url<EOL>", "docstring": "str: The URL that was used to pull and parsed; `None` if raw_html was used\n            and no url element was found.\n\n            Note:\n                Read only", "id": "f14091:c0:m18"}
{"signature": "@property<EOL><INDENT>def http_timeout(self):<DEDENT>", "body": "return self._http_timeout<EOL>", "docstring": "float: The time delay to pass to `requests` to wait for the response\n            in seconds\n\n            Note:\n                Defaults to 30.0", "id": "f14092:c3:m9"}
{"signature": "@property<EOL><INDENT>def http_headers(self):<DEDENT>", "body": "return self._http_headers<EOL>", "docstring": "dict: Custom headers to pass directly to the supporting `requests` object\n\n            See Also:\n                `Requests Custom Headers <http://docs.python-requests.org/en/master/user/quickstart/#custom-headers>`__", "id": "f14092:c3:m22"}
{"signature": "@property<EOL><INDENT>def parser_class(self):<DEDENT>", "body": "return self._parser_class<EOL>", "docstring": "str: The key of the parser to use\n\n            Note:\n                Defaults to `lxml`", "id": "f14092:c3:m15"}
{"signature": "@property<EOL><INDENT>def parse_headers(self):<DEDENT>", "body": "return self._parse_headers<EOL>", "docstring": "bool: Specify if headers should be pulled or not in the cleaned_text\n            output\n\n            Note:\n                Defaults to `True`", "id": "f14092:c3:m44"}
{"signature": "@browser_user_agent.setter<EOL><INDENT>def browser_user_agent(self, val):<DEDENT>", "body": "self._browser_user_agent = val<EOL>", "docstring": "set the browser user agent string", "id": "f14092:c3:m25"}
{"signature": "@images_min_bytes.setter<EOL><INDENT>def images_min_bytes(self, val):<DEDENT>", "body": "self._images_min_bytes = int(val)<EOL>", "docstring": "set the images_min_bytes property", "id": "f14092:c3:m39"}
{"signature": "@parse_headers.setter<EOL><INDENT>def parse_headers(self, val):<DEDENT>", "body": "self._parse_headers = bool(val)<EOL>", "docstring": "set if headers should be parsed", "id": "f14092:c3:m45"}
{"signature": "@property<EOL><INDENT>def images_min_bytes(self):<DEDENT>", "body": "return self._images_min_bytes<EOL>", "docstring": "int: Minimum number of bytes for an image to be evaluated to be the\n            main image of the site\n\n            Note:\n                Defaults to 4500 bytes", "id": "f14092:c3:m38"}
{"signature": "@debug.setter<EOL><INDENT>def debug(self, val):<DEDENT>", "body": "self._debug = bool(val)<EOL>", "docstring": "set the debug property", "id": "f14092:c3:m14"}
{"signature": "@property<EOL><INDENT>def use_meta_language(self):<DEDENT>", "body": "return self._use_meta_language<EOL>", "docstring": "bool: Determine if language should be extracted from the meta tags\n            or not. If this is set to `False` then the target_language will be\n            used. Also, if extraction fails then the target_language will be\n            utilized.\n\n            Note:\n                Defaults to `True`", "id": "f14092:c3:m34"}
{"signature": "@local_storage_path.setter<EOL><INDENT>def local_storage_path(self, val):<DEDENT>", "body": "self._local_storage_path = val<EOL>", "docstring": "set the local_storage_path property", "id": "f14092:c3:m12"}
{"signature": "@property<EOL><INDENT>def http_auth(self):<DEDENT>", "body": "return self._http_auth<EOL>", "docstring": "tuple: Authentication class and information to pass to the requests\n            library\n\n            See Also:\n                `Requests Authentication <http://docs.python-requests.org/en/master/user/authentication/>`__", "id": "f14092:c3:m18"}
{"signature": "@http_proxies.setter<EOL><INDENT>def http_proxies(self, val):<DEDENT>", "body": "self._http_proxies = val<EOL>", "docstring": "set the http_proxies property", "id": "f14092:c3:m21"}
{"signature": "@known_author_patterns.setter<EOL><INDENT>def known_author_patterns(self, val):<DEDENT>", "body": "def create_pat_from_dict(val):<EOL><INDENT>'''<STR_LIT>'''<EOL>if \"<STR_LIT>\" in val:<EOL><INDENT>pat = AuthorPattern(tag=val[\"<STR_LIT>\"])<EOL>if \"<STR_LIT>\" in val:<EOL><INDENT>pat.attr = val[\"<STR_LIT>\"]<EOL>pat.value = val[\"<STR_LIT:value>\"]<EOL><DEDENT><DEDENT>elif \"<STR_LIT>\" in val:<EOL><INDENT>pat = AuthorPattern(attr=val[\"<STR_LIT>\"], value=val[\"<STR_LIT:value>\"],<EOL>content=val[\"<STR_LIT:content>\"])<EOL><DEDENT>if \"<STR_LIT>\" in val:<EOL><INDENT>pat.subpattern = create_pat_from_dict(val[\"<STR_LIT>\"])<EOL><DEDENT>return pat<EOL><DEDENT>if isinstance(val, list):<EOL><INDENT>self._known_author_patterns = [<EOL>x if isinstance(x, AuthorPattern) else create_pat_from_dict(x)<EOL>for x in val<EOL>] + self.known_author_patterns<EOL><DEDENT>elif isinstance(val, AuthorPattern):<EOL><INDENT>self._known_author_patterns.insert(<NUM_LIT:0>, val)<EOL><DEDENT>elif isinstance(val, dict):<EOL><INDENT>self._known_author_patterns.insert(<NUM_LIT:0>, create_pat_from_dict(val))<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\".format(type(val)))<EOL><DEDENT>", "docstring": "val must be a dictionary or list of dictionaries\n            e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}\n                or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'},\n                    {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}]", "id": "f14092:c3:m6"}
{"signature": "def extract(self, url=None, raw_html=None):", "body": "crawl_candidate = CrawlCandidate(self.config, url, raw_html)<EOL>return self.__crawl(crawl_candidate)<EOL>", "docstring": "Extract the most likely article content from the html page\n\n            Args:\n                url (str): URL to pull and parse\n                raw_html (str): String representation of the HTML page\n            Returns:\n                Article: Representation of the article contents \\\n                including other parsed and extracted metadata", "id": "f14093:c0:m4"}
{"signature": "def close(self):", "body": "if self.fetcher is not None:<EOL><INDENT>self.shutdown_network()<EOL><DEDENT>self.finalizer.atexit = False<EOL>", "docstring": "Close the network connection and perform any other required cleanup\n\n            Note:\n                Auto closed when using goose as a context manager or when garbage collected", "id": "f14093:c0:m3"}
{"signature": "def __exit__(self, exc_type, exc_val, exc_tb):", "body": "self.close()<EOL>", "docstring": "Define what to do when the context manager exits", "id": "f14093:c0:m2"}
{"signature": "def __enter__(self):", "body": "return self<EOL>", "docstring": "Setup the context manager", "id": "f14093:c0:m1"}
{"signature": "def shutdown_network(self):", "body": "self.fetcher.close()<EOL>self.fetcher = None<EOL>", "docstring": "Close the network connection\n\n            Note:\n                Auto closed when using goose as a context manager or when garbage collected", "id": "f14093:c0:m5"}
{"signature": "@property<EOL><INDENT>def embed_code(self):<DEDENT>", "body": "return self._embed_code<EOL>", "docstring": "str: The embed code of the video\n\n            Note:\n                Read only", "id": "f14095:c0:m5"}
{"signature": "@property<EOL><INDENT>def src(self):<DEDENT>", "body": "return self._src<EOL>", "docstring": "str: The URL source of the video\n\n            Note:\n                Read only", "id": "f14095:c0:m6"}
{"signature": "@property<EOL><INDENT>def embed_type(self):<DEDENT>", "body": "return self._embed_type<EOL>", "docstring": "str: The type of embeding such as embed, object, or iframe\n\n            Note:\n                Read only", "id": "f14095:c0:m1"}
{"signature": "def read_file(filepath):", "body": "with open(filepath, '<STR_LIT:r>') as filepointer:<EOL><INDENT>res = filepointer.read()<EOL><DEDENT>return res<EOL>", "docstring": "read the file", "id": "f14099:m0"}
{"signature": "def get_context(request, model=None):", "body": "<EOL>param_values = get_param_values(request, model=model)<EOL>context = param_values.pop('<STR_LIT>', {})<EOL>if isinstance(context, (unicode, str)):<EOL><INDENT>context = projex.rest.unjsonify(context)<EOL><DEDENT>has_limit = '<STR_LIT>' in context or '<STR_LIT>' in param_values<EOL>orb_context = orb.Context(**context)<EOL>used = set()<EOL>query_context = {}<EOL>for key in orb.Context.Defaults:<EOL><INDENT>if key in param_values:<EOL><INDENT>used.add(key)<EOL>query_context[key] = param_values.get(key)<EOL><DEDENT><DEDENT>schema_values = {}<EOL>if model:<EOL><INDENT>for key, value in request.matchdict.items():<EOL><INDENT>if model.schema().column(key, raise_=False):<EOL><INDENT>schema_values[key] = value<EOL><DEDENT><DEDENT>for key, value in param_values.items():<EOL><INDENT>root_key = key.split('<STR_LIT:.>')[<NUM_LIT:0>]<EOL>schema_object = model.schema().column(root_key, raise_=False) or model.schema().collector(root_key)<EOL>if schema_object:<EOL><INDENT>value = param_values.pop(key)<EOL>if isinstance(schema_object, orb.Collector) and type(value) not in (tuple, list):<EOL><INDENT>value = [value]<EOL><DEDENT>schema_values[key] = value<EOL><DEDENT><DEDENT><DEDENT>query_context['<STR_LIT>'] = {<EOL>'<STR_LIT>': request<EOL>}<EOL>try:<EOL><INDENT>default_context = request.orb_default_context<EOL><DEDENT>except AttributeError:<EOL><INDENT>try:<EOL><INDENT>query_context['<STR_LIT>'].update(request.orb_scope)<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if '<STR_LIT>' in default_context:<EOL><INDENT>query_context['<STR_LIT>'].update(default_context.pop('<STR_LIT>'))<EOL><DEDENT>for k, v in default_context.items():<EOL><INDENT>query_context.setdefault(k, v)<EOL><DEDENT><DEDENT>orb_context.update(query_context)<EOL>return schema_values, orb_context<EOL>", "docstring": "Extracts ORB context information from the request.\n\n:param request: <pyramid.request.Request>\n:param model: <orb.Model> || None\n\n:return: {<str> key: <variant> value} values, <orb.Context>", "id": "f14110:m1"}
{"signature": "def data_for_target(self, do_roll=True, ignore_bright=<NUM_LIT:0>):", "body": "self.ignore_bright = ignore_bright<EOL>self.calc_fluxes()<EOL>self.roll_best = np.zeros((<NUM_LIT:4>,<NUM_LIT:2>))<EOL>if do_roll == True:<EOL><INDENT>for i in range(<NUM_LIT:4>):<EOL><INDENT>g = np.where(self.qs == i)[<NUM_LIT:0>]<EOL>wh = np.where(self.times[g] > <NUM_LIT>)<EOL>self.roll_best[i] = self.do_rolltest(g, wh)<EOL><DEDENT><DEDENT>self.do_photometry()<EOL>", "docstring": "Determine the normalized photometry, accounting for effects shared by reference stars. Does not provide\nthe opportunity to adjust the aperture\n\nArgs: \n    image_region: If ``True`` allow the aperture to be shifted up to one pixel in both the x and y\n        directions to account for differential velocity aberration (default: ``True``)\n    ignore_bright: The number of brightest stars to be ignored in the determination of the flux from \n        reference stars. If there is reason to believe (for example) that saturated stars may behave\n        differently than the target star, they can be avoided with this flag (default: ``0``)", "id": "f14117:c0:m9"}
{"signature": "def generate_panel(self, img):", "body": "plt.figure(figsize=(<NUM_LIT>,<NUM_LIT:6>))<EOL>ax = plt.gca()<EOL>fig = plt.gcf()<EOL>plt.subplot(<NUM_LIT>)<EOL>data_save = np.zeros_like(self.postcard)<EOL>self.roll_best = np.zeros((<NUM_LIT:4>,<NUM_LIT:2>))<EOL>for i in range(<NUM_LIT:4>):<EOL><INDENT>g = np.where(self.qs == i)[<NUM_LIT:0>]<EOL>wh = np.where(self.times[g] > <NUM_LIT>)<EOL>self.roll_best[i] = self.do_rolltest(g, wh)<EOL><DEDENT>self.do_photometry()<EOL>for i in range(<NUM_LIT:4>):<EOL><INDENT>g = np.where(self.qs == i)[<NUM_LIT:0>]<EOL>plt.errorbar(self.times[g], self.obs_flux[g], yerr=self.flux_uncert[i], fmt=fmt[i])<EOL><DEDENT>plt.xlabel('<STR_LIT>', fontsize=<NUM_LIT:20>)<EOL>plt.ylabel('<STR_LIT>', fontsize=<NUM_LIT:20>)<EOL>plt.subplot(<NUM_LIT>)<EOL>implot = plt.imshow(img, interpolation='<STR_LIT>', cmap='<STR_LIT>', vmin=<NUM_LIT>*<NUM_LIT>, vmax=<NUM_LIT>*<NUM_LIT>)<EOL>cid = fig.canvas.mpl_connect('<STR_LIT>', self.onclick)<EOL>plt.show(block=True)<EOL>", "docstring": "Creates the figure shown in ``adjust_aperture`` for visualization purposes. Called by other functions\nand generally not called by the user directly.\n\nArgs: \n    img: The data frame to be passed through to be plotted. A cutout of the ``integrated_postcard``", "id": "f14117:c0:m7"}
{"signature": "def calc_centroids(self):", "body": "self.cm = np.zeros((len(self.postcard), <NUM_LIT:2>))<EOL>for i in range(len(self.postcard)):<EOL><INDENT>target = self.postcard[i]<EOL>target[self.targets != <NUM_LIT:1>] = <NUM_LIT:0.0><EOL>self.cm[i] = center_of_mass(target)<EOL><DEDENT>", "docstring": "Identify the centroid positions for the target star at all epochs. Useful for verifying that there is\nno correlation between flux and position, as might be expected for high proper motion stars.", "id": "f14117:c0:m11"}
{"signature": "def calc_fluxes(self, min_flux = <NUM_LIT>, outlier_iterations=<NUM_LIT:5>,<EOL>max_outlier_obs=<NUM_LIT:4>, outlier_limit=<NUM_LIT>):", "body": "jj, ii = self.center<EOL>numer = np.zeros(len(self.times))<EOL>denom = np.zeros(len(self.times))<EOL>factr = np.zeros(len(self.times))<EOL>numer_pix = self.postcard[:,self.targets == <NUM_LIT:1>]<EOL>numer = np.sum(numer_pix, axis=<NUM_LIT:1>)<EOL>tar_vals = np.zeros((len(self.times), int(np.max(self.targets)+<NUM_LIT:1>-<NUM_LIT:2>-self.ignore_bright)))<EOL>for i in range(<NUM_LIT:2>+self.ignore_bright,int(np.max(self.targets)+<NUM_LIT:1>)):<EOL><INDENT>tval = np.sum(self.postcard[:,self.targets == i], axis=<NUM_LIT:1>)<EOL>tar_vals[:,i-<NUM_LIT:2>-self.ignore_bright] = tval <EOL><DEDENT>for i in range(len(self.obs_filenames)):<EOL><INDENT>if np.max(tar_vals[i]) < min_flux:<EOL><INDENT>tar_vals[self.qs == self.qs[i]] = <NUM_LIT:0.0><EOL><DEDENT><DEDENT>all_tar = np.zeros((len(self.times), int(np.max(self.targets)-self.ignore_bright)))<EOL>all_tar[:,<NUM_LIT:0>] = numer<EOL>all_tar[:,<NUM_LIT:1>:] = tar_vals<EOL>self.photometry_array = all_tar<EOL>for i in range(len(tar_vals[<NUM_LIT:0>])):<EOL><INDENT>for j in range(<NUM_LIT:4>):<EOL><INDENT>g = np.where(self.qs == j)[<NUM_LIT:0>]  <EOL>tar_vals[g,i] /= (np.median(tar_vals[g,i])+<NUM_LIT>)<EOL><DEDENT><DEDENT>tar_vals_old = tar_vals + <NUM_LIT:0.0><EOL>for i in range(outlier_iterations):<EOL><INDENT>nonzeros = np.where(tar_vals[<NUM_LIT:0>,:] != <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>med = np.median(tar_vals[:,nonzeros], axis=<NUM_LIT:1>)<EOL>std = np.std(tar_vals[:,nonzeros], axis=<NUM_LIT:1>)<EOL>if np.sum(tar_vals) != <NUM_LIT:0.0>:<EOL><INDENT>tar_vals_old = tar_vals + <NUM_LIT:0.0><EOL><DEDENT>for k in range(len(tar_vals[<NUM_LIT:0>])):<EOL><INDENT>h = np.where((np.abs(med-tar_vals[:,k])/std) > outlier_limit)[<NUM_LIT:0>]<EOL>if len(h) >= max_outlier_obs:<EOL><INDENT>tar_vals[:,k] = <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>if np.sum(tar_vals) == <NUM_LIT:0.0>:<EOL><INDENT>tar_vals = tar_vals_old + <NUM_LIT:0.0><EOL><DEDENT>denom = np.sum(tar_vals, axis=<NUM_LIT:1>)<EOL>self.target_flux_pixels = numer_pix<EOL>self.reference_flux = denom<EOL>", "docstring": "Determine the suitable reference stars, and then the total flux in those stars and \nin the target star in each epoch\n\nArgs: \n    min_flux: The size of the region around the target star to be plotted. Images will be a square \n        with side length ``image_region`` (default: ``5000``)\n    outlier_iterations: The number of iterations to remove outliers from the reference star sample\n        (stars at epochs with more than ``max_outlier_obs`` observations more than ``outlier_limit`` standard\n        deviations from the median value for all stars after normalization) (default: ``5``)\n    max_outlier_obs: The maximum number of epochs at which a star is allowed to be more than ``outlier_limit``\n        standard deviations from the median value for all stars before it is removed as a suitable\n        reference star (default: ``4``)\n    outlier_limit: The level of deviation (measured in standard deviations) which a target is allowed\n        to be discrepant from the median. If it is this discrepant at more than ``max_outlier_obs``\n        epochs, it is removed from consideration (default: ``1.7``)", "id": "f14117:c0:m10"}
{"signature": "def model_uncert(self):", "body": "Y = self.photometry_array.T<EOL>Y /= np.median(Y, axis=<NUM_LIT:1>)[:, None]<EOL>C = np.median(Y, axis=<NUM_LIT:0>)<EOL>nstars, nobs = np.shape(Y)<EOL>Z = np.empty((nstars, <NUM_LIT:4>))<EOL>qs = self.qs.astype(int)<EOL>for s in range(<NUM_LIT:4>):<EOL><INDENT>Z[:, s] = np.median((Y / C)[:, qs == s], axis=<NUM_LIT:1>)<EOL><DEDENT>resid2 = (Y - Z[:, qs] * C)**<NUM_LIT:2><EOL>z = Z[:, qs]<EOL>trend = z * C[None, :]<EOL>lnS = np.log(np.nanmedian(resid2, axis=<NUM_LIT:0>))<EOL>jitter = np.log(<NUM_LIT:0.1>*np.nanmedian(np.abs(np.diff(Y, axis=<NUM_LIT:1>))))<EOL>cal_ferr = np.sqrt(np.exp(<NUM_LIT:2>*(jitter/trend))+z**<NUM_LIT:2>*np.exp(lnS)[None, :])<EOL>self.modeled_uncert = cal_ferr<EOL>self.target_uncert = cal_ferr[<NUM_LIT:0>]<EOL>", "docstring": "Estimate the photometric uncertainties on each data point following Equation A.2 of The Paper.\nBased on the kepcal package of Dan Foreman-Mackey.", "id": "f14117:c0:m13"}
{"signature": "def de(output_basename, parameter_names, transform, loglikelihood, prior, nsteps=<NUM_LIT>, vizfunc=None, printfunc=None, **problem):", "body": "import json<EOL>import inspyred<EOL>import random<EOL>prng = random.Random()<EOL>if '<STR_LIT>' in problem:<EOL><INDENT>prng.seed(problem['<STR_LIT>'])<EOL><DEDENT>n_params = len(parameter_names)<EOL>seeds = problem.get('<STR_LIT>', [])<EOL>if '<STR_LIT:start>' in problem:<EOL><INDENT>seeds.append(problem['<STR_LIT:start>'])<EOL><DEDENT>prefix = output_basename<EOL>def viz(candidate, args):<EOL><INDENT>if vizfunc is not None:<EOL><INDENT>vizfunc(candidate)<EOL><DEDENT><DEDENT>def print_candidate(candidate, l, args):<EOL><INDENT>if printfunc is not None:<EOL><INDENT>printfunc(cube=candidate, loglikelihood=l)<EOL><DEDENT>else:<EOL><INDENT>print(l, candidate)<EOL><DEDENT><DEDENT>def eval_candidate(candidate):<EOL><INDENT>params = transform(candidate)<EOL>l = loglikelihood(params)<EOL>p = prior(params)<EOL>if numpy.isinf(p) and p < <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>')<EOL>return -<NUM_LIT><EOL><DEDENT>if numpy.isnan(l):<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>return l, p<EOL><DEDENT>@inspyred.ec.utilities.memoize<EOL>@inspyred.ec.evaluators.evaluator<EOL>def fitness(candidate, args):<EOL><INDENT>l, p = eval_candidate(candidate)<EOL>return (l + p)<EOL><DEDENT>cutoff_store = <NUM_LIT:10><EOL>def solution_archiver(random, population, archive, args):<EOL><INDENT>psize = len(population)<EOL>population.sort(reverse=True)<EOL>best = population[<NUM_LIT:0>].fitness<EOL>all_candidates = sorted(population + archive, reverse=True)<EOL>all_fitness = numpy.array([c.fitness for c in all_candidates])<EOL>mask = best - all_fitness > cutoff_store / <NUM_LIT:3><EOL>if mask.sum() < <NUM_LIT:20>:<EOL><INDENT>mask = best - all_fitness > cutoff_store<EOL><DEDENT>newarchive = [c for i, c in enumerate(all_candidates) if i == <NUM_LIT:0> or all_fitness[i - <NUM_LIT:1>] != c.fitness]<EOL>print('<STR_LIT>', len(archive), len(newarchive))<EOL>json.dump([{'<STR_LIT>': [float(f) for f in c.candidate], '<STR_LIT>':c.fitness} for c in newarchive], <EOL>open(prefix + '<STR_LIT>', '<STR_LIT:w>'), indent=<NUM_LIT:4>)<EOL>return newarchive<EOL><DEDENT>def observer(population, num_generations, num_evaluations, args):<EOL><INDENT>population.sort(reverse=True)<EOL>candidate = population[<NUM_LIT:0>]<EOL>print(('<STR_LIT>'.format(num_evaluations)), '<STR_LIT>', end='<STR_LIT:U+0020>') <EOL>print_candidate(candidate.candidate, candidate.fitness, args)<EOL>if num_evaluations % len(population) == <NUM_LIT:0> or num_evaluations < len(population) or args.get('<STR_LIT>', False):<EOL><INDENT>viz(candidate.candidate, args)<EOL><DEDENT><DEDENT>def generator(random, args): <EOL><INDENT>u = [random.uniform(<NUM_LIT:0>, <NUM_LIT:1>) for _ in range(n_params)]<EOL>u = [random.gauss(<NUM_LIT:0.5>, <NUM_LIT:0.1>) for _ in range(n_params)]<EOL>return bounder(u, args)<EOL><DEDENT>ea = inspyred.ec.DEA(prng)<EOL>ea.terminator = inspyred.ec.terminators.evaluation_termination<EOL>ea.archiver = solution_archiver<EOL>bounder = inspyred.ec.Bounder(lower_bound=<NUM_LIT>, upper_bound=<NUM_LIT:1>-<NUM_LIT>)<EOL>import copy<EOL>from math import log<EOL>@inspyred.ec.variators.mutator<EOL>def double_exponential_mutation(random, candidate, args):<EOL><INDENT>mut_rate = args.setdefault('<STR_LIT>', <NUM_LIT:0.1>)<EOL>mean = args.setdefault('<STR_LIT>', <NUM_LIT:0.0>)<EOL>stdev = args.setdefault('<STR_LIT>', <NUM_LIT:1.0>)<EOL>scale = log(<NUM_LIT:0.5>) / - (stdev)<EOL>bounder = args['<STR_LIT>'].bounder<EOL>mutant = copy.copy(candidate)<EOL>for i, m in enumerate(mutant):<EOL><INDENT>dice = random.random()<EOL>if dice < mut_rate:<EOL><INDENT>sign = (dice < mut_rate / <NUM_LIT:2>) * <NUM_LIT:2> - <NUM_LIT:1><EOL>delta = -log(random.random()) / scale<EOL>mutant[i] += delta * sign<EOL><DEDENT>mutant = bounder(mutant, args)<EOL><DEDENT>return mutant<EOL><DEDENT>def minute_gaussian_mutation(random, candidates, args):<EOL><INDENT>args = dict(args)<EOL>args['<STR_LIT>'] = <NUM_LIT:1><EOL>args['<STR_LIT>'] = <NUM_LIT><EOL>return inspyred.ec.variators.gaussian_mutation(random, candidates, args)<EOL><DEDENT>ea.variator = [inspyred.ec.variators.n_point_crossover, inspyred.ec.variators.gaussian_mutation, minute_gaussian_mutation]<EOL>ea.replacer = inspyred.ec.replacers.steady_state_replacement<EOL>ea.observer = observer<EOL>pop_size = <NUM_LIT:20><EOL>final_pop = ea.evolve(pop_size=pop_size, <EOL>max_evaluations=nsteps, maximize=True, seeds=seeds, <EOL>gaussian_stdev=<NUM_LIT>, <EOL>bounder=bounder, generator=generator, evaluator=fitness,<EOL>)<EOL>best = max(final_pop)<EOL>seeds = [c.candidate for c in ea.archive]<EOL>print('<STR_LIT>', best)<EOL>return {'<STR_LIT:start>': best.candidate, '<STR_LIT:value>': best.fitness,<EOL>'<STR_LIT>': seeds, '<STR_LIT>': '<STR_LIT>'}<EOL>", "docstring": "**Differential evolution**\n\nvia `inspyred <http://inspyred.github.io/>`_\n\nspecially tuned. steady state replacement, n-point crossover, \n        pop size 20, gaussian mutation noise 0.01 & 1e-6.\nstores intermediate results (can be used for resume, see seeds)\n\n:param start: start point\n:param seeds: list of start points\n:param vizfunc: callback to do visualization of current best solution\n:param printfunc: callback to summarize current best solution\n:param seed: RNG initialization (if set)", "id": "f14123:m0"}
{"signature": "def mcmc_advance(start, stdevs, logp, nsteps = <NUM_LIT>, adapt=True, callback=None):", "body": "import scipy<EOL>from numpy import log<EOL>import progressbar<EOL>prob = logp(start)<EOL>chain = [start]<EOL>accepts = [True]<EOL>probs = [prob]<EOL>assert not numpy.isnan(start).any()<EOL>assert not numpy.isnan(stdevs).any()<EOL>i = <NUM_LIT:0><EOL>widgets=['<STR_LIT>', progressbar.Percentage(), progressbar.Counter('<STR_LIT>'),<EOL>progressbar.Bar(), progressbar.ETA()]<EOL>pbar = progressbar.ProgressBar(widgets=widgets,<EOL>maxval=nsteps).start()<EOL>prev = start<EOL>prev_prob = prob<EOL>print('<STR_LIT>', prob)<EOL>stepchange = <NUM_LIT:0.1><EOL>while len(chain) < nsteps:<EOL><INDENT>i = i + <NUM_LIT:1><EOL>next = scipy.random.normal(prev, stdevs)<EOL>next[next > <NUM_LIT:1>] = <NUM_LIT:1><EOL>next[next < <NUM_LIT:0>] = <NUM_LIT:0><EOL>next_prob = logp(next)<EOL>assert not numpy.isnan(next).any()<EOL>assert not numpy.isnan(next_prob).any()<EOL>delta = next_prob - prev_prob<EOL>dice = log(scipy.random.uniform(<NUM_LIT:0>, <NUM_LIT:1>))<EOL>accept = delta > dice<EOL>if accept:<EOL><INDENT>prev = next<EOL>prev_prob = next_prob<EOL>if adapt: stdevs *= (<NUM_LIT:1> + stepchange)<EOL><DEDENT>else:<EOL><INDENT>if adapt: stdevs *= (<NUM_LIT:1> + stepchange)**(-<NUM_LIT>) <EOL><DEDENT>if callback: callback(prev_prob, prev, accept)<EOL>chain.append(prev)<EOL>accepts.append(accept)<EOL>probs.append(prev_prob)<EOL>if adapt: stepchange = min(<NUM_LIT:0.1>, <NUM_LIT> / i)<EOL>widgets[<NUM_LIT:0>] = '<STR_LIT>' % numpy.mean(numpy.array(accepts[len(accepts)/<NUM_LIT:3>:])+<NUM_LIT:0>)<EOL>pbar.update(pbar.currval + <NUM_LIT:1>)<EOL><DEDENT>pbar.finish()<EOL>return chain, probs, accepts, stdevs<EOL>", "docstring": "Generic Metropolis MCMC. Advances the chain by nsteps.\nCalled by :func:`mcmc`\n\n:param adapt: enables adaptive stepwidth alteration (converges).", "id": "f14125:m0"}
{"signature": "def mcmc(transform, loglikelihood, parameter_names, nsteps=<NUM_LIT>, nburn=<NUM_LIT>, <EOL>stdevs=<NUM_LIT:0.1>, start = <NUM_LIT:0.5>, **problem):", "body": "if '<STR_LIT>' in problem:<EOL><INDENT>numpy.random.seed(problem['<STR_LIT>'])<EOL><DEDENT>n_params = len(parameter_names)<EOL>def like(cube):<EOL><INDENT>cube = numpy.array(cube)<EOL>if (cube <= <NUM_LIT>).any() or (cube >= <NUM_LIT:1>-<NUM_LIT>).any():<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>params = transform(cube)<EOL>return loglikelihood(params)<EOL><DEDENT>start = start + numpy.zeros(n_params)<EOL>stdevs = stdevs + numpy.zeros(n_params)<EOL>def compute_stepwidths(chain):<EOL><INDENT>return numpy.std(chain, axis=<NUM_LIT:0>) / <NUM_LIT:3><EOL><DEDENT>import matplotlib.pyplot as plt<EOL>plt.figure(figsize=(<NUM_LIT:7>, <NUM_LIT:7>))<EOL>steps = numpy.array([<NUM_LIT:0.1>]*(n_params))<EOL>print('<STR_LIT>')<EOL>chain, prob, _, steps_ = mcmc_advance(start, steps, like, nsteps=nburn / <NUM_LIT:2>, adapt=True)<EOL>steps = compute_stepwidths(chain)<EOL>print('<STR_LIT>')<EOL>chain, prob, _, steps_ = mcmc_advance(chain[-<NUM_LIT:1>], steps, like, nsteps=nburn / <NUM_LIT:2>, adapt=True)<EOL>steps = compute_stepwidths(chain)<EOL>print('<STR_LIT>')<EOL>chain, prob, _, steps_ = mcmc_advance(chain[-<NUM_LIT:1>], steps, like, nsteps=nsteps)<EOL>chain = numpy.array(chain)<EOL>i = numpy.argmax(prob)<EOL>final = chain[-<NUM_LIT:1>]<EOL>print('<STR_LIT>')<EOL>chain = numpy.array([transform(params) for params in chain])<EOL>return dict(start=chain[-<NUM_LIT:1>], maximum=chain[i], seeds=[final, chain[i]], chain=chain, method='<STR_LIT>')<EOL>", "docstring": "**Metropolis Hastings MCMC**\n\nwith automatic step width adaption. \nBurnin period is also used to guess steps.\n\n:param nburn: number of burnin steps\n:param stdevs: step widths to start with", "id": "f14125:m1"}
{"signature": "def onebyone(transform, loglikelihood, parameter_names, prior, start = <NUM_LIT:0.5>, ftol=<NUM_LIT:0.1>, disp=<NUM_LIT:0>, nsteps=<NUM_LIT>,<EOL>parallel=False, find_uncertainties=False, **args):", "body": "def minfunc(cube):<EOL><INDENT>cube = numpy.array(cube)<EOL>if (cube <= <NUM_LIT>).any() or (cube >= <NUM_LIT:1>-<NUM_LIT>).any():<EOL><INDENT>return <NUM_LIT><EOL><DEDENT>params = transform(cube)<EOL>l = loglikelihood(params)<EOL>p = prior(params)<EOL>if numpy.isinf(p) and p < <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>')<EOL>return -<NUM_LIT><EOL><DEDENT>if numpy.isnan(l):<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>return -l - p<EOL><DEDENT>if parallel:<EOL><INDENT>func = opt_grid_parallel<EOL><DEDENT>else:<EOL><INDENT>func = opt_grid<EOL><DEDENT>n_params = len(parameter_names)<EOL>start = start + numpy.zeros(n_params)<EOL>ret = func(start, minfunc, [(<NUM_LIT>, <NUM_LIT:1>-<NUM_LIT>)] * n_params, ftol=ftol, disp=disp, compute_errors=find_uncertainties)<EOL>if find_uncertainties:<EOL><INDENT>c0 = ret[<NUM_LIT:0>]<EOL>p0 = transform(c0)<EOL>stdev = numpy.zeros(n_params)<EOL>lower = numpy.zeros(n_params)<EOL>upper = numpy.zeros(n_params)<EOL>for i, (lo, hi) in enumerate(ret[<NUM_LIT:1>]):<EOL><INDENT>c1 = numpy.copy(c0)<EOL>c1[i] = lo<EOL>c2 = numpy.copy(c0)<EOL>c2[i] = hi<EOL>p1 = transform(c1)<EOL>p2 = transform(c2)<EOL>stdev[i] = numpy.abs(p2[i] - p1[i]) / <NUM_LIT:2><EOL>lower[i] = min(p2[i], p1[i])<EOL>upper[i] = max(p2[i], p1[i])<EOL><DEDENT>return dict(start=ret[<NUM_LIT:0>], maximum=p0,<EOL>stdev=stdev, upper=upper, lower=lower,<EOL>method='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return dict(start=ret, maximum=transform(ret), method='<STR_LIT>')<EOL><DEDENT>", "docstring": "**Convex optimization based on Brent's method**\n\nA strict assumption of one optimum between the parameter limits is used.\nThe bounds are narrowed until it is found, i.e. the likelihood function is flat\nwithin the bounds.\n* If optimum outside bracket, expands bracket until contained.\n* Thus guaranteed to return local optimum.\n* Supports parallelization (multiple parameters are treated independently)\n* Supports finding ML uncertainties (Delta-Chi^2=1)\n\nVery useful for 1-3d problems.\nOtherwise useful, reproducible/deterministic algorithm for finding the minimum in \nwell-behaved likelihoods, where the parameters are weakly independent,\nor to find a good starting point. \nOptimizes each parameter in order, assuming they are largely independent.\n\nFor 1-dimensional algorithm used, see :func:`jbopt.opt_grid`\n\n:param ftol: difference in values at which the function can be considered flat\n:param compute_errors: compute standard deviation of gaussian around optimum", "id": "f14126:m1"}
{"signature": "def opt_grid(params, func, limits, ftol=<NUM_LIT>, disp=<NUM_LIT:0>, compute_errors=True):", "body": "caches = [[] for p in params]<EOL>newparams = numpy.copy(params)<EOL>errors = [[] for p in params]<EOL>for i, p in enumerate(params):<EOL><INDENT>cache = []<EOL>def func1(x0):<EOL><INDENT>newparams[i] = x0<EOL>v = func(newparams)<EOL>cache.append([x0, v])<EOL>return v<EOL><DEDENT>lo, hi = limits[i]<EOL>bestval = optimize(func1, x0=p,<EOL>cons=[lambda x: x - lo, lambda x: hi - x],<EOL>ftol=ftol, disp=disp - <NUM_LIT:1>)<EOL>beststat = func1(bestval)<EOL>if compute_errors:<EOL><INDENT>errors[i] = cache2errors(func1, cache, disp=disp - <NUM_LIT:1>)<EOL><DEDENT>newparams[i] = bestval<EOL>caches[i] = cache<EOL>if disp > <NUM_LIT:0>:<EOL><INDENT>if compute_errors:<EOL><INDENT>print('<STR_LIT>' % (<EOL>i, bestval, errors[i][<NUM_LIT:0>], errors[i][<NUM_LIT:1>], beststat))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>' %<EOL>(i, bestval, beststat))<EOL><DEDENT><DEDENT><DEDENT>beststat = func(newparams)<EOL>if disp > <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>' % (beststat))<EOL><DEDENT>if compute_errors:<EOL><INDENT>return newparams, errors<EOL><DEDENT>else:<EOL><INDENT>return newparams<EOL><DEDENT>", "docstring": "see :func:`optimize1d.optimize`, considers each parameter in order\n\n:param ftol: \n        difference in values at which the function can be considered flat\n:param compute_errors:\n        compute standard deviation of gaussian around optimum", "id": "f14127:m1"}
{"signature": "def opt_normalizations(params, func, limits, abandon_threshold=<NUM_LIT:100>, noimprovement_threshold=<NUM_LIT>,<EOL>disp=<NUM_LIT:0>):", "body": "newparams = numpy.copy(params)<EOL>lower = [lo for lo, hi in limits]<EOL>upper = [hi for lo, hi in limits]<EOL>for i, p in enumerate(params):<EOL><INDENT>startval = p<EOL>beststat = func(newparams)<EOL>bestval = startval<EOL>if disp > <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>' % (startval, beststat))<EOL><DEDENT>go_up = True<EOL>go_down = True<EOL>for n in list(<NUM_LIT>**numpy.arange(<NUM_LIT:1>, <NUM_LIT:20>)) + [None] + list(<NUM_LIT>**numpy.arange(<NUM_LIT:1>, <NUM_LIT>)):<EOL><INDENT>if n is None:<EOL><INDENT>startval = bestval<EOL>if disp > <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>' % (startval))<EOL><DEDENT>go_up = True<EOL>go_down = True<EOL>continue<EOL><DEDENT>if go_up and startval * n > upper[i]:<EOL><INDENT>if disp > <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>' %<EOL>(startval, n, upper[i]))<EOL><DEDENT>go_up = False<EOL><DEDENT>if go_down and startval / n < lower[i]:<EOL><INDENT>if disp > <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>' %<EOL>(startval, n, lower[i]))<EOL><DEDENT>go_down = False<EOL><DEDENT>if go_up:<EOL><INDENT>if disp > <NUM_LIT:1>:<EOL><INDENT>print('<STR_LIT>' % (startval * n))<EOL><DEDENT>newparams[i] = startval * n<EOL>newstat = func(newparams)<EOL>if disp > <NUM_LIT:1>:<EOL><INDENT>print('<STR_LIT>' % (newparams[i], newstat))<EOL><DEDENT>if newstat <= beststat:<EOL><INDENT>bestval = newparams[i]<EOL>beststat = newstat<EOL>if disp > <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>' % newparams[i])<EOL><DEDENT><DEDENT>if newstat > beststat + abandon_threshold:<EOL><INDENT>go_up = False<EOL><DEDENT><DEDENT>if go_down:<EOL><INDENT>if disp > <NUM_LIT:1>:<EOL><INDENT>print('<STR_LIT>' % (startval / n))<EOL><DEDENT>newparams[i] = startval / n<EOL>newstat = func(newparams)<EOL>if disp > <NUM_LIT:1>:<EOL><INDENT>print('<STR_LIT>' % (newparams[i], newstat))<EOL><DEDENT>if newstat + noimprovement_threshold < beststat:  <EOL><INDENT>bestval = newparams[i]<EOL>beststat = newstat<EOL>if disp > <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>' % newparams[i])<EOL><DEDENT><DEDENT>if newstat > beststat + abandon_threshold:<EOL><INDENT>go_down = False<EOL><DEDENT><DEDENT><DEDENT>newparams[i] = bestval<EOL>print('<STR_LIT>' % (i, newparams[i]))<EOL><DEDENT>print('<STR_LIT>' % (beststat))<EOL>return newparams<EOL>", "docstring": "**optimization algorithm for scale variables (positive value of unknown magnitude)**\n\nEach parameter is a normalization of a feature, and its value is sought.\nThe parameters are handled in order (assumed to be independent), \nbut a second round can be run.\nVarious magnitudes of the normalization are tried. If the normalization converges\nto zero, the largest value yielding a comparable value is used.\n\nOptimizes each normalization parameter in rough steps \nusing multiples of 3 of start point\nto find reasonable starting values for another algorithm.\n\nparameters, minimization function, parameter space definition [(lo, hi) for i in params]\n\n:param abandon_threshold:\n        if in one direction the function increases by this much over the best value, \n        abort search in this direction\n:param noimprovement_threshold:\n        when decreasing the normalization, if the function increases by less than \n        this amount, abort search in this direction\n:param disp:\n        verbosity", "id": "f14127:m0"}
{"signature": "def reproduce_sexually(self, egg_donor, sperm_donor):", "body": "egg_word = random.choice(egg_donor.genome)<EOL>egg = self.generate_gamete(egg_word)<EOL>sperm_word = random.choice(sperm_donor.genome)<EOL>sperm = self.generate_gamete(sperm_word)<EOL>self.genome = list(set(egg + sperm)) <EOL>self.parents = [egg_donor.name, sperm_donor.name]<EOL>self.generation = max(egg_donor.generation, sperm_donor.generation) + <NUM_LIT:1><EOL>sum_ = egg_donor.divinity + sperm_donor.divinity<EOL>self.divinity = int(npchoice(divinities, <NUM_LIT:1>, p=p_divinity[sum_])[<NUM_LIT:0>])<EOL>", "docstring": "Produce two gametes, an egg and a sperm, from input Gods. Combine\n        them to produce a genome a la sexual reproduction. Assign divinity\n        according to probabilities in p_divinity. The more divine the parents,\n        the more divine their offspring.", "id": "f14134:c0:m5"}
{"signature": "def set_chromosomes(self, chromosomes=None):", "body": "if chromosomes and chromosomes in valid_chromosomes:<EOL><INDENT>self.chromosomes = chromosomes<EOL><DEDENT>else:<EOL><INDENT>self.chromosomes = random.choice([XX, XY])<EOL><DEDENT>", "docstring": "This model uses the XY sex-determination system. Sex != gender.\n        Assign either XX or XY randomly with a 50/50 chance of each, unless\n        <chromosomes> are passed as an argument.", "id": "f14134:c0:m1"}
{"signature": "def generate_gamete(self, egg_or_sperm_word):", "body": "p_rate_of_mutation = [<NUM_LIT>, <NUM_LIT:0.1>]<EOL>should_use_mutant_pool = (npchoice([<NUM_LIT:0>,<NUM_LIT:1>], <NUM_LIT:1>, p=p_rate_of_mutation)[<NUM_LIT:0>] == <NUM_LIT:1>)<EOL>if should_use_mutant_pool:<EOL><INDENT>pool = tokens.secondary_tokens<EOL><DEDENT>else:<EOL><INDENT>pool = tokens.primary_tokens<EOL><DEDENT>return get_matches(egg_or_sperm_word, pool, <NUM_LIT>)<EOL>", "docstring": "Extract 23 'chromosomes' aka words from 'gene pool' aka list of tokens\n        by searching the list of tokens for words that are related to the given\n        egg_or_sperm_word.", "id": "f14134:c0:m8"}
{"signature": "def set_epithet(self):", "body": "if self.divinity == human:<EOL><INDENT>obsession = random.choice(self.genome)<EOL>if self.gender == female:<EOL><INDENT>self.epithet = '<STR_LIT>'<EOL><DEDENT>elif self.gender == male:<EOL><INDENT>self.epithet = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>self.epithet = '<STR_LIT>'<EOL><DEDENT>self.epithet += '<STR_LIT>' + obsession<EOL>return <EOL><DEDENT>if self.gender == female:<EOL><INDENT>title = '<STR_LIT>'<EOL><DEDENT>elif self.gender == male:<EOL><INDENT>title = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>title = '<STR_LIT>'<EOL><DEDENT>if self.divinity == demi_god:<EOL><INDENT>title = '<STR_LIT>' + title if self.gender == non_binary else '<STR_LIT>' + title<EOL><DEDENT>num_domains = npchoice([<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>,<NUM_LIT:4>], <NUM_LIT:1>, p=[<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>])[<NUM_LIT:0>]<EOL>if num_domains == <NUM_LIT:1>:<EOL><INDENT>template = '<STR_LIT>'<EOL><DEDENT>if num_domains == <NUM_LIT:2>:<EOL><INDENT>template = '<STR_LIT>'<EOL><DEDENT>elif num_domains == <NUM_LIT:3>:<EOL><INDENT>template = '<STR_LIT>' <EOL><DEDENT>elif num_domains == <NUM_LIT:4>:<EOL><INDENT>template = '<STR_LIT>'<EOL><DEDENT>self.domains = [d.title() for d in random.sample(self.genome, num_domains)]<EOL>self.epithet = template % (title, *self.domains)<EOL>", "docstring": "Divine an appropriate epithet for this God. (See what I did there?)", "id": "f14134:c0:m7"}
{"signature": "def print_parents(self):", "body": "if self.gender == female:<EOL><INDENT>title = '<STR_LIT>'<EOL><DEDENT>elif self.gender == male:<EOL><INDENT>title = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>title = '<STR_LIT>'<EOL><DEDENT>p1 = self.parents[<NUM_LIT:0>]<EOL>p2 = self.parents[<NUM_LIT:1>]<EOL>template = '<STR_LIT>'<EOL>print(template % (title, p1.name, p1.epithet, p2.name, p2.epithet))<EOL>", "docstring": "Print parents' names and epithets.", "id": "f14134:c0:m9"}
{"signature": "def breed(self, egg_donor, sperm_donor):", "body": "offspring = []<EOL>try:<EOL><INDENT>num_children = npchoice([<NUM_LIT:1>,<NUM_LIT:2>], <NUM_LIT:1>, p=[<NUM_LIT>, <NUM_LIT>])[<NUM_LIT:0>] <EOL>for _ in range(num_children):<EOL><INDENT>child = God(egg_donor, sperm_donor)<EOL>offspring.append(child)<EOL>send_birth_announcement(egg_donor, sperm_donor, child)<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return offspring<EOL>", "docstring": "Get it on.", "id": "f14135:c0:m4"}
{"signature": "def spawn(self, generations):", "body": "egg_donors = [god for god in self.gods.values() if god.chromosomes == '<STR_LIT>']<EOL>sperm_donors = [god for god in self.gods.values() if god.chromosomes == '<STR_LIT>']<EOL>for i in range(generations):<EOL><INDENT>print(\"<STR_LIT>\" % (i+<NUM_LIT:1>))<EOL>gen_xx = []<EOL>gen_xy = []<EOL>for egg_donor in egg_donors:<EOL><INDENT>sperm_donor = random.choice(sperm_donors)<EOL>brood = self.breed(egg_donor, sperm_donor)<EOL>for child in brood:<EOL><INDENT>if child.divinity > human:<EOL><INDENT>self.add_god(child)<EOL><DEDENT>if child.chromosomes == '<STR_LIT>':<EOL><INDENT>gen_xx.append(child)<EOL><DEDENT>else:<EOL><INDENT>gen_xy.append(child)<EOL><DEDENT><DEDENT><DEDENT>egg_donors = [ed for ed in egg_donors if ed.generation > (i-<NUM_LIT:2>)]<EOL>sperm_donors = [sd for sd in sperm_donors if sd.generation > (i-<NUM_LIT:3>)]<EOL>egg_donors += gen_xx<EOL>sperm_donors += gen_xy<EOL><DEDENT>", "docstring": "Grow this Pantheon by multiplying Gods.", "id": "f14135:c0:m3"}
{"signature": "def get_god(self, name_of_god):", "body": "try:<EOL><INDENT>return self.gods[name_of_god]<EOL><DEDENT>except:<EOL><INDENT>print('<STR_LIT>' % name_of_god)<EOL><DEDENT>", "docstring": "Retrieve a god from this Pantheon's gods dictionary.", "id": "f14135:c0:m2"}
{"signature": "def sequence_genes():", "body": "tokenize_corpora()<EOL>", "docstring": "An alias.", "id": "f14136:m0"}
{"signature": "def define_gene_pool(pool, individuals):", "body": "make_tokens_dir(pool, individuals)<EOL>", "docstring": "An alias.", "id": "f14136:m2"}
{"signature": "def filter_genes(pool, filters):", "body": "save_tokens_to_dir(pool, filters)<EOL>", "docstring": "An alias.", "id": "f14136:m1"}
{"signature": "def list_gene_pools():", "body": "get_tokens_dirs()<EOL>", "docstring": "An alias.", "id": "f14136:m4"}
{"signature": "def make_tokens_dir(dir_, sources):", "body": "os.mkdir(tokens_dir + dir_)<EOL>for source in sources:<EOL><INDENT>if not os.path.isfile(corpora_dir + source):<EOL><INDENT>print('<STR_LIT>' + source)<EOL>return<EOL><DEDENT><DEDENT>with open(tokens_dir + dir_ + '<STR_LIT>', '<STR_LIT:w>') as outjson:<EOL><INDENT>json.dump(sources, outjson)<EOL><DEDENT>", "docstring": "Create a new directory named <dir_>. Create a new file within it called\n    sources.json. The input <sources> is a list of names of tokenized texts.\n    Write <sources> into sources.json.", "id": "f14136:m7"}
{"signature": "def closest(tokens, search_vec, limit, offset=<NUM_LIT:0>):", "body": "return sorted(tokens,<EOL>key=lambda x: cosine(search_vec, word_vec(x)),<EOL>reverse=True)[offset:offset+limit]<EOL>", "docstring": "Return the <limit> words from <tokens> whose vectors most closely\n    resemble the search_vec. Skip the first <offset> results.", "id": "f14137:m3"}
{"signature": "def get_matches(word, tokens, limit, offset=<NUM_LIT:0>):", "body": "return closest(tokens, word_vec(word), limit, offset)<EOL>", "docstring": "Return words from <tokens> that are most closely related to <word>.", "id": "f14137:m0"}
{"signature": "def word_vec(word):", "body": "return nlp.vocab[word].vector<EOL>", "docstring": "Return spaCy's vector for <word>.", "id": "f14137:m1"}
{"signature": "def set_name_lists(ethnicity=None):", "body": "if not ethnicity: ethnicity = random.choice(get_ethnicities())<EOL>print(\"<STR_LIT>\" + ethnicity)<EOL>filename = names_dir + ethnicity + '<STR_LIT>'<EOL>try:<EOL><INDENT>with open(filename, '<STR_LIT:r>') as injson:<EOL><INDENT>data = json.load(injson)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>return '<STR_LIT>' + filename<EOL><DEDENT>else:<EOL><INDENT>names = [ tuple(name.split('<STR_LIT:U+002C>')) for name in data ]<EOL>random.shuffle(names)<EOL>global female_names<EOL>female_names = [name for name,gender,*desc in names if gender == '<STR_LIT>']<EOL>global male_names<EOL>male_names = [name for name,gender,*desc in names if gender == '<STR_LIT>']<EOL>global nb_names<EOL>nb_names = [name for name,gender,*desc in names if gender == '<STR_LIT>']<EOL><DEDENT>", "docstring": "Set three globally available lists of names.", "id": "f14138:m0"}
{"signature": "def get_ethnicities():", "body": "ethnicities = [ fname.split('<STR_LIT:.>')[<NUM_LIT:0>] for fname in os.listdir(names_dir) ]<EOL>return ethnicities<EOL>", "docstring": "Retrieve a list of the ethnicities for which name data was scraped.\n    Exclude the file extension for human friendliness.", "id": "f14138:m1"}
{"signature": "def list(self, ignore_patterns):", "body": "for prefix, root in self.locations:<EOL><INDENT>storage = self.storages[root]<EOL>for path in utils.get_files(storage, ignore_patterns):<EOL><INDENT>yield path, storage<EOL><DEDENT><DEDENT>", "docstring": "List all files in all locations.", "id": "f14149:c0:m3"}
{"signature": "def find(self, path, all=False):", "body": "matches = []<EOL>for app in self.apps:<EOL><INDENT>app_location = self.storages[app].location<EOL>if app_location not in searched_locations:<EOL><INDENT>searched_locations.append(app_location)<EOL><DEDENT>match = self.find_in_app(app, path)<EOL>if match:<EOL><INDENT>if not all:<EOL><INDENT>return match<EOL><DEDENT>matches.append(match)<EOL><DEDENT><DEDENT>return matches<EOL>", "docstring": "Looks for files in the app directories.", "id": "f14149:c1:m2"}
{"signature": "def find(self, path, all=False):", "body": "matches = []<EOL>for prefix, root in self.locations:<EOL><INDENT>if root not in searched_locations:<EOL><INDENT>searched_locations.append(root)<EOL><DEDENT>matched_path = self.find_location(root, path, prefix)<EOL>if matched_path:<EOL><INDENT>if not all:<EOL><INDENT>return matched_path<EOL><DEDENT>matches.append(matched_path)<EOL><DEDENT><DEDENT>return matches<EOL>", "docstring": "Looks for files in the extra locations\nas defined in ``MEDIA_FIXTURES_FILES_DIRS``.", "id": "f14149:c0:m1"}
{"signature": "@lru_cache.lru_cache(maxsize=None)<EOL>def get_finder(import_path):", "body": "Finder = import_string(import_path)<EOL>if not issubclass(Finder, BaseFinder):<EOL><INDENT>raise ImproperlyConfigured('<STR_LIT>' %<EOL>(Finder, BaseFinder))<EOL><DEDENT>return Finder()<EOL>", "docstring": "Imports the media fixtures files finder class described by import_path, where\nimport_path is the full Python path to the class.", "id": "f14149:m1"}
{"signature": "def find_in_app(self, app, path):", "body": "storage = self.storages.get(app, None)<EOL>if storage:<EOL><INDENT>if storage.exists(path):<EOL><INDENT>matched_path = storage.path(path)<EOL>if matched_path:<EOL><INDENT>return matched_path<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Find a requested media file in an app's media fixtures locations.", "id": "f14149:c1:m3"}
{"signature": "def find_location(self, root, path, prefix=None):", "body": "if prefix:<EOL><INDENT>prefix = '<STR_LIT>' % (prefix, os.sep)<EOL>if not path.startswith(prefix):<EOL><INDENT>return None<EOL><DEDENT>path = path[len(prefix):]<EOL><DEDENT>path = safe_join(root, path)<EOL>if os.path.exists(path):<EOL><INDENT>return path<EOL><DEDENT>", "docstring": "Finds a requested media file in a location, returning the found\nabsolute path (or ``None`` if no match).", "id": "f14149:c0:m2"}
{"signature": "def collect(self):", "body": "if self.symlink and not self.local:<EOL><INDENT>raise CommandError(\"<STR_LIT>\")<EOL><DEDENT>if self.clear:<EOL><INDENT>self.clear_dir('<STR_LIT>')<EOL><DEDENT>if self.symlink:<EOL><INDENT>handler = self.link_file<EOL><DEDENT>else:<EOL><INDENT>handler = self.copy_file<EOL><DEDENT>found_files = OrderedDict()<EOL>for finder in get_finders():<EOL><INDENT>for path, storage in finder.list(self.ignore_patterns):<EOL><INDENT>if getattr(storage, '<STR_LIT>', None):<EOL><INDENT>prefixed_path = os.path.join(storage.prefix, path)<EOL><DEDENT>else:<EOL><INDENT>prefixed_path = path<EOL><DEDENT>if prefixed_path not in found_files:<EOL><INDENT>found_files[prefixed_path] = (storage, path)<EOL>handler(path, prefixed_path, storage)<EOL><DEDENT><DEDENT><DEDENT>if self.post_process and hasattr(self.storage, '<STR_LIT>'):<EOL><INDENT>processor = self.storage.post_process(found_files,<EOL>dry_run=self.dry_run)<EOL>for original_path, processed_path, processed in processor:<EOL><INDENT>if isinstance(processed, Exception):<EOL><INDENT>self.stderr.write(\"<STR_LIT>\" % original_path)<EOL>self.stderr.write(\"<STR_LIT>\")<EOL>raise processed<EOL><DEDENT>if processed:<EOL><INDENT>self.log(\"<STR_LIT>\" %<EOL>(original_path, processed_path), level=<NUM_LIT:1>)<EOL>self.post_processed_files.append(original_path)<EOL><DEDENT>else:<EOL><INDENT>self.log(\"<STR_LIT>\" % original_path)<EOL><DEDENT><DEDENT><DEDENT>return {<EOL>'<STR_LIT>': self.copied_files + self.symlinked_files,<EOL>'<STR_LIT>': self.unmodified_files,<EOL>'<STR_LIT>': self.post_processed_files,<EOL>}<EOL>", "docstring": "Perform the bulk of the work of collectmedia.\n\nSplit off from handle() to facilitate testing.", "id": "f14150:c0:m3"}
{"signature": "def generate_local_url(self, js_name):", "body": "host = self._settings['<STR_LIT>'].format(**self._host_context).rstrip('<STR_LIT:/>')<EOL>return '<STR_LIT>'.format(host, js_name)<EOL>", "docstring": "Generate the local url for a js file.\n:param js_name:\n:return:", "id": "f14158:c0:m6"}
{"signature": "@staticmethod<EOL><INDENT>def as_object(data):<DEDENT>", "body": "return json.dumps(data, indent=<NUM_LIT:4>, default=json_encoder)<EOL>", "docstring": "Dump object to multiple-line javascript object.\n:param data:\n:return:", "id": "f14160:c0:m0"}
{"signature": "def to_css_length(l):", "body": "if isinstance(l, (int, float)):<EOL><INDENT>return '<STR_LIT>'.format(l)<EOL><DEDENT>else:<EOL><INDENT>return l<EOL><DEDENT>", "docstring": "Return the standard length string of css.\nIt's compatible with number values in old versions.\n:param l: source css length.\n:return: A string.", "id": "f14160:m0"}
{"signature": "def ifetch_single(iterable, key, default=EMPTY, getter=None):", "body": "def _getter(item):<EOL><INDENT>if getter:<EOL><INDENT>custom_getter = partial(getter, key=key)<EOL>return custom_getter(item)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>attrgetter = operator.attrgetter(key)<EOL>return attrgetter(item)<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>itemgetter = operator.itemgetter(key)<EOL>return itemgetter(item)<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>if default is not EMPTY:<EOL><INDENT>return default<EOL><DEDENT>raise ValueError('<STR_LIT>' % (item, key))<EOL><DEDENT><DEDENT>return map(_getter, iterable)<EOL>", "docstring": "getter() g(item, key):pass", "id": "f14165:m0"}
{"signature": "def post(self, endpoint, data, url_data=None, parameters=None):", "body": "return self.request_handler.request(<EOL>self._url(endpoint, url_data, parameters),<EOL>method=Api._method['<STR_LIT>'],<EOL>body=urllib.urlencode(data)<EOL>)<EOL>", "docstring": "Returns the response and body for a post request\n            endpoints = 'users'  # resource to access\n            data = {'username': 'blah, 'password': blah}  # POST body\n            url_data = {}, ()  # Used to modularize endpoints, see __init__\n            parameters = {}, ((),()) # URL paramters, ex: google.com?q=a&f=b", "id": "f14195:c0:m4"}
{"signature": "def _url(self, endpoint, url_data=None, parameters=None):", "body": "try:<EOL><INDENT>url = '<STR_LIT>' % (self.base_url, self.endpoints[endpoint])<EOL><DEDENT>except KeyError:<EOL><INDENT>raise EndPointDoesNotExist(endpoint)<EOL><DEDENT>if url_data:<EOL><INDENT>url = url % url_data<EOL><DEDENT>if parameters:<EOL><INDENT>url = '<STR_LIT>' % (url, urllib.urlencode(parameters, True))<EOL><DEDENT>return url<EOL>", "docstring": "Generate URL on the modularized endpoints and url parameters", "id": "f14195:c0:m8"}
{"signature": "def update_endpoints(self, endpoints):", "body": "self.endpoints.update(endpoints)<EOL>", "docstring": "Adds to the endpoints collection\n    endpoints = {\n        'get_users': 'users',\n        'classes': 'crazyURL/withExtraStuff',\n        'get_user': 'user/%(id)s'  # requires url_data\n        'get_me': 'user/3'  # Don't be afraid to hard code!\n    }", "id": "f14195:c0:m1"}
{"signature": "def get(self, endpoint, url_data=None, parameters=None):", "body": "return self.request_handler.request(<EOL>self._url(endpoint, url_data, parameters),<EOL>method=Api._method['<STR_LIT>'],<EOL>)<EOL>", "docstring": "Returns the response and body for a get request\n            endpoints = 'users'  # resource to access\n            url_data = {}, ()  # Used to modularize endpoints, see __init__\n            parameters = {}, ((),()) # URL parameters: google.com?q=a&f=b", "id": "f14195:c0:m3"}
{"signature": "def clear_endpoints(self):", "body": "self.endpoints = {}<EOL>", "docstring": "Clears all stored endpoints", "id": "f14195:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _httplib2_init(username, password):<DEDENT>", "body": "obj = httplib2.Http()<EOL>if username and password:<EOL><INDENT>obj.add_credentials(username, password)<EOL><DEDENT>return obj<EOL>", "docstring": "Used to instantiate a regular HTTP request object", "id": "f14195:c0:m9"}
{"signature": "def delete(self, endpoint, data, url_data=None, parameters=None):", "body": "return self.request_handler.request(<EOL>self._url(endpoint, url_data, parameters),<EOL>method=Api._method['<STR_LIT>'],<EOL>body=urllib.urlencode(data)<EOL>)<EOL>", "docstring": "Returns the response and body for a delete request\n            endpoints = 'users'  # resource to access\n            data = {'username': 'blah, 'password': blah}  # DELETE body\n            url_data = {}, ()  # Used to modularize endpoints, see __init__\n            parameters = {}, ((),()) # URL paramters, ex: google.com?q=a&f=b", "id": "f14195:c0:m6"}
{"signature": "def divide_timedelta(td1, td2):", "body": "try:<EOL><INDENT>return td1 / td2<EOL><DEDENT>except TypeError:<EOL><INDENT>return td1.total_seconds() / td2.total_seconds()<EOL><DEDENT>", "docstring": "Get the ratio of two timedeltas\n\n>>> one_day = datetime.timedelta(days=1)\n>>> one_hour = datetime.timedelta(hours=1)\n>>> divide_timedelta(one_hour, one_day) == 1 / 24\nTrue", "id": "f14200:m12"}
{"signature": "@classmethod<EOL><INDENT>def construct_datetime(cls, *args, **kwargs):<DEDENT>", "body": "if len(args) == <NUM_LIT:1>:<EOL><INDENT>arg = args[<NUM_LIT:0>]<EOL>method = cls.__get_dt_constructor(<EOL>type(arg).__module__,<EOL>type(arg).__name__,<EOL>)<EOL>result = method(arg)<EOL>try:<EOL><INDENT>result = result.replace(tzinfo=kwargs.pop('<STR_LIT>'))<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>if kwargs:<EOL><INDENT>first_key = kwargs.keys()[<NUM_LIT:0>]<EOL>tmpl = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL>raise TypeError(tmpl.format(**locals()))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>result = datetime.datetime(*args, **kwargs)<EOL><DEDENT>return result<EOL>", "docstring": "Construct a datetime.datetime from a number of different time\n        types found in python and pythonwin", "id": "f14200:c1:m0"}
{"signature": "def datetime_mod(dt, period, start=None):", "body": "if start is None:<EOL><INDENT>start = datetime.datetime.combine(dt.date(), datetime.time())<EOL><DEDENT>delta = dt - start<EOL>def get_time_delta_microseconds(td):<EOL><INDENT>return (td.days * seconds_per_day + td.seconds) * <NUM_LIT> + td.microseconds<EOL><DEDENT>delta, period = map(get_time_delta_microseconds, (delta, period))<EOL>offset = datetime.timedelta(microseconds=delta % period)<EOL>result = dt - offset<EOL>return result<EOL>", "docstring": "Find the time which is the specified date/time truncated to the time delta\nrelative to the start date/time.\nBy default, the start time is midnight of the same day as the specified\ndate/time.\n\n>>> datetime_mod(datetime.datetime(2004, 1, 2, 3),\n...     datetime.timedelta(days = 1.5),\n...     start = datetime.datetime(2004, 1, 1))\ndatetime.datetime(2004, 1, 1, 0, 0)\n>>> datetime_mod(datetime.datetime(2004, 1, 2, 13),\n...     datetime.timedelta(days = 1.5),\n...     start = datetime.datetime(2004, 1, 1))\ndatetime.datetime(2004, 1, 2, 12, 0)\n>>> datetime_mod(datetime.datetime(2004, 1, 2, 13),\n...     datetime.timedelta(days = 7),\n...     start = datetime.datetime(2004, 1, 1))\ndatetime.datetime(2004, 1, 1, 0, 0)\n>>> datetime_mod(datetime.datetime(2004, 1, 10, 13),\n...     datetime.timedelta(days = 7),\n...     start = datetime.datetime(2004, 1, 1))\ndatetime.datetime(2004, 1, 8, 0, 0)", "id": "f14200:m2"}
{"signature": "def datetime_round(dt, period, start=None):", "body": "result = datetime_mod(dt, period, start)<EOL>if abs(dt - result) >= period // <NUM_LIT:2>:<EOL><INDENT>result += period<EOL><DEDENT>return result<EOL>", "docstring": "Find the nearest even period for the specified date/time.\n\n>>> datetime_round(datetime.datetime(2004, 11, 13, 8, 11, 13),\n...     datetime.timedelta(hours = 1))\ndatetime.datetime(2004, 11, 13, 8, 0)\n>>> datetime_round(datetime.datetime(2004, 11, 13, 8, 31, 13),\n...     datetime.timedelta(hours = 1))\ndatetime.datetime(2004, 11, 13, 9, 0)\n>>> datetime_round(datetime.datetime(2004, 11, 13, 8, 30),\n...     datetime.timedelta(hours = 1))\ndatetime.datetime(2004, 11, 13, 9, 0)", "id": "f14200:m3"}
{"signature": "def parse_timedelta(str):", "body": "deltas = (_parse_timedelta_part(part.strip()) for part in str.split('<STR_LIT:U+002C>'))<EOL>return sum(deltas, datetime.timedelta())<EOL>", "docstring": "Take a string representing a span of time and parse it to a time delta.\nAccepts any string of comma-separated numbers each with a unit indicator.\n\n>>> parse_timedelta('1 day')\ndatetime.timedelta(days=1)\n\n>>> parse_timedelta('1 day, 30 seconds')\ndatetime.timedelta(days=1, seconds=30)\n\n>>> parse_timedelta('47.32 days, 20 minutes, 15.4 milliseconds')\ndatetime.timedelta(days=47, seconds=28848, microseconds=15400)\n\nSupports weeks, months, years\n\n>>> parse_timedelta('1 week')\ndatetime.timedelta(days=7)\n\n>>> parse_timedelta('1 year, 1 month')\ndatetime.timedelta(days=395, seconds=58685)\n\nNote that months and years strict intervals, not aligned\nto a calendar:\n\n>>> now = datetime.datetime.now()\n>>> later = now + parse_timedelta('1 year')\n>>> diff = later.replace(year=now.year) - now\n>>> diff.seconds\n20940", "id": "f14200:m10"}
{"signature": "@staticmethod<EOL><INDENT>def _from_timestamp(input):<DEDENT>", "body": "if not isinstance(input, numbers.Real):<EOL><INDENT>return input<EOL><DEDENT>return from_timestamp(input)<EOL>", "docstring": "If input is a real number, interpret it as a Unix timestamp\n(seconds sinc Epoch in UTC) and return a timezone-aware\ndatetime object. Otherwise return input unchanged.", "id": "f14201:c0:m2"}
{"signature": "def _next_time(self):", "body": "return self._localize(self + self.delay)<EOL>", "docstring": "Add delay to self, localized", "id": "f14201:c1:m0"}
{"signature": "@classmethod<EOL><INDENT>def at_time(cls, at, target):<DEDENT>", "body": "at = cls._from_timestamp(at)<EOL>cmd = cls.from_datetime(at)<EOL>cmd.delay = at - now()<EOL>cmd.target = target<EOL>return cmd<EOL>", "docstring": "Construct a DelayedCommand to come due at `at`, where `at` may be\na datetime or timestamp.", "id": "f14201:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def exec_func_with_sys_argv(func_exec, custom_argv, *args_func_exec, **kwargs_func_exec):<DEDENT>", "body": "<EOL>with patch.object(sys, '<STR_LIT>', custom_argv):<EOL><INDENT>print('<STR_LIT>'.format(sys.argv))<EOL>func_exec(*args_func_exec, **kwargs_func_exec)<EOL><DEDENT>", "docstring": "Exec a CLI function patching sys.argv.\nFor test CLI main functions with argparse\n\n:param func_exec:\n:param custom_argv:\n:param kwargs_func_exec:", "id": "f14211:c0:m0"}
{"signature": "def perfiles_consumo_en_intervalo(t0, tf):", "body": "t_ini = pd.Timestamp(t0)<EOL>t_fin = pd.Timestamp(tf)<EOL>assert (t_fin > t_ini)<EOL>marca_fin = '<STR_LIT>'.format(t_fin)<EOL>marca_ini = '<STR_LIT>'.format(t_ini)<EOL>if marca_ini == marca_fin:<EOL><INDENT>perfiles = get_data_perfiles_finales_mes(t_ini)<EOL><DEDENT>else:<EOL><INDENT>dates = pd.DatetimeIndex(start=t_ini.replace(day=<NUM_LIT:1>),<EOL>end=t_fin.replace(day=<NUM_LIT:1>), freq='<STR_LIT>')<EOL>perfiles = pd.concat([get_data_perfiles_finales_mes(t) for t in dates])<EOL><DEDENT>return perfiles.loc[t_ini:t_fin].iloc[:-<NUM_LIT:1>]<EOL>", "docstring": "Descarga de perfiles horarios para un intervalo dado\n    Con objeto de calcular el precio medio ponderado de aplicaci\u00f3n para dicho intervalo.\n    :return: perfiles_intervalo\n    :rtype: pd.Dataframe", "id": "f14216:m3"}
{"signature": "def print_green(x):", "body": "cprint(x, '<STR_LIT>')<EOL>", "docstring": "Prints in green", "id": "f14217:m13"}
{"signature": "def print_blue(x):", "body": "cprint(x, '<STR_LIT>')<EOL>", "docstring": "Prints in blue", "id": "f14217:m15"}
{"signature": "def print_secc(x):", "body": "cprint('<STR_LIT>' + x, '<STR_LIT>', attrs=['<STR_LIT>', '<STR_LIT>'])<EOL>", "docstring": "Prints in bold + blue + underline & starts with ' ==>", "id": "f14217:m3"}
{"signature": "def print_err(x):", "body": "cprint('<STR_LIT>' + str(x), on_color='<STR_LIT>', attrs=['<STR_LIT>'])<EOL>", "docstring": "Prints in bold + red background & starts with 'ERROR:", "id": "f14217:m4"}
{"signature": "def print_white(x):", "body": "cprint(x, '<STR_LIT>')<EOL>", "docstring": "Prints in white", "id": "f14217:m18"}
{"signature": "def print_cyan(x):", "body": "cprint(x, '<STR_LIT>')<EOL>", "docstring": "Prints in cyan", "id": "f14217:m17"}
{"signature": "def print_ok(x):", "body": "cprint(x, '<STR_LIT>', attrs=['<STR_LIT>'])<EOL>", "docstring": "Prints in bold + green", "id": "f14217:m2"}
{"signature": "def print_infob(x):", "body": "cprint(x, '<STR_LIT>', attrs=['<STR_LIT>'])<EOL>", "docstring": "Prints in bold + blue", "id": "f14217:m1"}
{"signature": "def print_yellowb(x):", "body": "cprint(x, '<STR_LIT>', attrs=['<STR_LIT>'])<EOL>", "docstring": "Prints in yellow + underline", "id": "f14217:m8"}
{"signature": "def generacion_csv_oficial_consumo_horario(self, save_pathdir=None):", "body": "df_csv = pd.DataFrame(self._consumo_horario)<EOL>columns = '<STR_LIT>'.split('<STR_LIT:;>')<EOL>date_fmt = '<STR_LIT>'<EOL>time_fmt = '<STR_LIT>'<EOL>metodo_obt = '<STR_LIT:R>'<EOL>df_csv[columns[<NUM_LIT:0>]] = self._cups<EOL>df_csv[columns[<NUM_LIT:1>]] = [date_fmt.format(x) for x in df_csv.index]<EOL>df_csv[columns[<NUM_LIT:2>]] = [int(time_fmt.format(x)) + <NUM_LIT:1> for x in df_csv.index]<EOL>df_csv[columns[<NUM_LIT:3>]] = df_csv[self._consumo_horario.name].round(<NUM_LIT:3>)<EOL>df_csv[columns[<NUM_LIT:4>]] = metodo_obt<EOL>df_csv.drop(self._consumo_horario.name, axis=<NUM_LIT:1>, inplace=True)<EOL>if save_pathdir is not None:  <EOL><INDENT>params_csv = dict(index=False, sep='<STR_LIT:;>', decimal='<STR_LIT:U+002C>', float_format='<STR_LIT>')<EOL>path_csv = os.path.join(os.path.expanduser(save_pathdir),<EOL>'<STR_LIT>'.format(pd.Timestamp(self._t0), self._tf))<EOL>df_csv.to_csv(path_csv, **params_csv)<EOL><DEDENT>return df_csv<EOL>", "docstring": "Genera o graba el consumo horario en CSV, con el formato utilizado por las distribuidoras (oficial de facto),\npara su importaci\u00f3n en otras herramientas, como la existente en: https://facturaluz2.cnmc.es/facturaluz2.html\n\nEl formato es el siguiente:\n```\n    CUPS;Fecha;Hora;Consumo_kWh;Metodo_obtencion\n    ES00XXXXXXXXXXXXXXDB;06/09/2015;1;0,267;R\n    ES00XXXXXXXXXXXXXXDB;06/09/2015;2;0,143;R\n    ES00XXXXXXXXXXXXXXDB;06/09/2015;3;0,118;R\n    ...\n```\n:param save_pathdir: (OPC) path_str de destino para grabar el CSV.\n:return: dataframe de consumo con 'formato oficial'\n:rtype: pd.dataframe", "id": "f14218:c0:m27"}
{"signature": "@property<EOL><INDENT>def impuesto_electrico_general(self):<DEDENT>", "body": "return self._termino_impuesto_electrico<EOL>", "docstring": "Calcula el importe del impuesto el\u00e9ctrico, antes de IVA o equivalente, en \u20ac.\n        :return impuesto\n        :rtype float", "id": "f14218:c0:m24"}
{"signature": "@property<EOL><INDENT>def coste_total(self):<DEDENT>", "body": "return self._total_factura<EOL>", "docstring": "Calcula el importe total de la factura el\u00e9ctrica, en \u20ac.\n        :return coste\n        :rtype float", "id": "f14218:c0:m26"}
{"signature": "def _calcula_iva_y_total(self):", "body": "<EOL>subt_fijo_var = self._termino_fijo_total + self._termino_variable_total<EOL>subt_fijo_var += self._termino_impuesto_electrico + self._descuento_bono_social<EOL>_, impuesto_gen, impuesto_medida = DATOS_ZONAS_IMPUESTOS[self._zona_impuestos]<EOL>self._terminos_iva = (subt_fijo_var * impuesto_gen, self._termino_equipo_medida * impuesto_medida)<EOL>self._termino_iva_total = self._round(self._terminos_iva[<NUM_LIT:0>] + self._terminos_iva[<NUM_LIT:1>])<EOL>subt_fijo_var += self._termino_equipo_medida + self._termino_iva_total<EOL>self._total_factura = self._round(subt_fijo_var)<EOL>", "docstring": "A\u00f1ade el IVA y obtiene el total.", "id": "f14218:c0:m10"}
{"signature": "@property<EOL><INDENT>def coste_termino_consumo(self):<DEDENT>", "body": "return self._termino_variable_total<EOL>", "docstring": "Calcula el coste asociado al consumo de energ\u00eda en el periodo facturado, antes de impuestos, en \u20ac.\n        :return gasto_energia\n        :rtype float", "id": "f14218:c0:m22"}
{"signature": "@property<EOL><INDENT>def tipo_peaje(self):<DEDENT>", "body": "return self._tipo_peaje<EOL>", "docstring": "Devuelve el tipo de tarifa asociada a la factura el\u00e9ctrica.", "id": "f14218:c0:m15"}
{"signature": "@property<EOL><INDENT>def num_dias_factura(self):<DEDENT>", "body": "return self._num_dias_factura<EOL>", "docstring": "Devuelve el # de d\u00edas del periodo facturado.", "id": "f14218:c0:m14"}
{"signature": "def plot_consumo_diario(self, ax=None):", "body": "p_params = dict(figsize=(<NUM_LIT:16>, <NUM_LIT:9>)) if ax is None else dict(ax=ax)<EOL>consumo_diario = self._consumo_horario.groupby(pd.TimeGrouper('<STR_LIT:D>')).sum()<EOL>ax = consumo_diario.plot(color='<STR_LIT>', lw=<NUM_LIT:2>, **p_params)<EOL>params_lines = dict(lw=<NUM_LIT:1>, linestyle='<STR_LIT::>', alpha=<NUM_LIT>)<EOL>xlim = consumo_diario[<NUM_LIT:0>], consumo_diario.index[-<NUM_LIT:1>]<EOL>ax.hlines([consumo_diario.mean()], *xlim, color='<STR_LIT>', **params_lines)<EOL>ax.hlines([consumo_diario.max()], *xlim, color='<STR_LIT>', **params_lines)<EOL>ax.hlines([consumo_diario.min()], *xlim, color='<STR_LIT>', **params_lines)<EOL>ax.set_title('<STR_LIT>'.format(self.consumo_total))<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_xlabel('<STR_LIT>')<EOL>ax.set_ylim((<NUM_LIT:0>, consumo_diario.max() * <NUM_LIT>))<EOL>ax.grid('<STR_LIT>', axis='<STR_LIT:x>')<EOL>return ax<EOL>", "docstring": "Gr\u00e1fica del consumo diario en el intervalo.\n        :param ax: optional matplotlib axes\n        :return: matplotlib axes", "id": "f14218:c0:m29"}
{"signature": "def plot_patron_semanal_consumo(self, ax=None):", "body": "consumo_diario = self._consumo_horario.groupby(pd.TimeGrouper('<STR_LIT:D>')).sum()<EOL>media_semanal = consumo_diario.groupby(lambda x: x.weekday).mean().round(<NUM_LIT:1>)<EOL>d\u00edas_semana = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>media_semanal.columns = d\u00edas_semana<EOL>p_params = dict(figsize=(<NUM_LIT:16>, <NUM_LIT:9>)) if ax is None else dict(ax=ax)<EOL>ax = media_semanal.T.plot(kind='<STR_LIT:bar>', color='<STR_LIT>', legend=False, **p_params)<EOL>ax.set_xticklabels(d\u00edas_semana, rotation=<NUM_LIT:0>)<EOL>ax.set_title('<STR_LIT>')<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.grid('<STR_LIT>', axis='<STR_LIT:y>')<EOL>ax.hlines([consumo_diario.mean()], -<NUM_LIT:1>, <NUM_LIT:7>, lw=<NUM_LIT:3>, color='<STR_LIT>', linestyle='<STR_LIT::>')<EOL>return ax<EOL>", "docstring": "Gr\u00e1fica de consumo medio por d\u00eda de la semana (patr\u00f3n semanal de consumo).\n        :param ax: optional matplotlib axes\n        :return: matplotlib axes", "id": "f14218:c0:m30"}
{"signature": "@property<EOL><INDENT>def coste_iva(self):<DEDENT>", "body": "return self._termino_iva_total<EOL>", "docstring": "Calcula el importe del IVA o equivalente, en \u20ac.\n        :return impuesto\n        :rtype float", "id": "f14218:c0:m25"}
{"signature": "@gasto_equipo_medida.setter<EOL><INDENT>def gasto_equipo_medida(self, nuevo_gasto):<DEDENT>", "body": "self.alquiler_euros = nuevo_gasto<EOL>self._termino_equipo_medida = self._round(nuevo_gasto)<EOL>self._calcula_iva_y_total()<EOL>", "docstring": "Establece el gasto relativo al alquiler de equipos de medida antes de impuestos, en \u20ac, de forma absoluta.\n        :param nuevo_gasto: Gasto en \u20ac", "id": "f14218:c0:m20"}
{"signature": "def reparto_coste(self, detallado=False):", "body": "<EOL>cod_tarifa = DATOS_TIPO_PEAJE[self._tipo_peaje][<NUM_LIT:1>]<EOL>_, impuesto_gen, impuesto_medida = DATOS_ZONAS_IMPUESTOS[self._zona_impuestos]<EOL>coste_horario = pd.DataFrame(self._consumo_horario)<EOL>for (ndias, ndias_a\u00f1o, a\u00f1o), (coste, coef_p) in zip(self._periodos_fact, self._termino_fijo):<EOL><INDENT>b_period_f = coste_horario.index.year == a\u00f1o<EOL>coste_horario.loc[b_period_f, '<STR_LIT>'] = coef_p * self._potencia_contratada / (<NUM_LIT> * ndias_a\u00f1o)<EOL>coefs_ener = TERM_ENER_PEAJE_ACCESO_EUR_KWH_TEA[a\u00f1o][self._tipo_peaje]<EOL>tcu = self._pvpc_horario['<STR_LIT>'.format(cod_tarifa)].loc[b_period_f]<EOL>hay_discr, cons_discr = self._asigna_periodos_discr_horaria(coste_horario.loc[b_period_f, COL_CONSUMO])<EOL>if hay_discr:<EOL><INDENT>for c, coef in zip(cons_discr.columns[<NUM_LIT:1>:], coefs_ener):<EOL><INDENT>sub_pd = cons_discr[cons_discr[c]]<EOL>coste_horario.loc[sub_pd.index, '<STR_LIT>'] = coste_horario.loc[sub_pd.index, COL_CONSUMO] * coef<EOL><DEDENT><DEDENT>else:<EOL><INDENT>coste_horario.loc[b_period_f, '<STR_LIT>'] = coste_horario.loc[b_period_f, COL_CONSUMO] * coefs_ener[<NUM_LIT:0>]<EOL><DEDENT>coste_horario.loc[b_period_f, '<STR_LIT>'] += tcu * coste_horario.loc[b_period_f, COL_CONSUMO]<EOL><DEDENT>cols_suma = ['<STR_LIT>', '<STR_LIT>']<EOL>if self._con_bono_social:<EOL><INDENT>coste_horario['<STR_LIT>'] = coste_horario[cols_suma].sum(axis=<NUM_LIT:1>) * -<NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>coste_horario['<STR_LIT>'] = <NUM_LIT:0.><EOL><DEDENT>cols_suma += ['<STR_LIT>']<EOL>coste_horario['<STR_LIT>'] = coste_horario[cols_suma].sum(axis=<NUM_LIT:1>) * self._impuesto_electrico_general<EOL>coste_horario['<STR_LIT>'] = self.gasto_equipo_medida / len(self._consumo_horario)<EOL>cols_suma += ['<STR_LIT>']<EOL>coste_horario['<STR_LIT>'] = coste_horario[cols_suma].sum(axis=<NUM_LIT:1>) * impuesto_gen<EOL>coste_horario['<STR_LIT>'] += coste_horario['<STR_LIT>'] * impuesto_medida<EOL>cols_suma += ['<STR_LIT>', '<STR_LIT>']<EOL>if detallado:<EOL><INDENT>return coste_horario<EOL><DEDENT>else:<EOL><INDENT>return coste_horario[cols_suma].sum(axis=<NUM_LIT:1>).rename('<STR_LIT>')<EOL><DEDENT>", "docstring": "Devuelve un pd.DataFrame o pd.Series con el coste facturado repartido por horas, conforme al consumo y precio\n        de cada hora.\n        Dado que el suministrador aplica convenientes redondeos por cada t\u00e9rmino facturado, se observar\u00e1 que la suma de\n        la serie de coste no coincide necesariamente con el importe total de la factura, admitiendo cierto error m\u00ednimo.\n        La mejor aproximaci\u00f3n se consigue r\u00e1pidamente obteniendo el df de costes y haciendo la suma:\n          `df_coste.drop(COL_CONSUMO, axis=1).sum(axis=0).round(2).sum()`\n        Aunque \u00e9sta tampoco es exactamente igual al proceso seguido por el c\u00e1lculo de la factura.\n\n        :param detallado: Si `True`, devuelve el pd.Dataframe con coste repartido en por columnas,\n        coste total como pd.Series por defecto.\n        :return: coste_horario", "id": "f14218:c0:m28"}
{"signature": "@tipo_peaje.setter<EOL><INDENT>def tipo_peaje(self, tarifa):<DEDENT>", "body": "peajes = [TIPO_PEAJE_GEN, TIPO_PEAJE_NOC, TIPO_PEAJE_VHC]<EOL>codes_peajes = [DATOS_TIPO_PEAJE[p][<NUM_LIT:1>] for p in peajes]<EOL>if type(tarifa) is int:<EOL><INDENT>self._tipo_peaje = peajes[tarifa - <NUM_LIT:1>]<EOL><DEDENT>elif tarifa in peajes:<EOL><INDENT>self._tipo_peaje = peajes[peajes.index(tarifa)]<EOL><DEDENT>elif tarifa in codes_peajes:<EOL><INDENT>self._tipo_peaje = peajes[codes_peajes.index(tarifa)]<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>'<EOL>.format(tarifa, '<STR_LIT:|>'.join(peajes), '<STR_LIT:|>'.join(codes_peajes)))<EOL><DEDENT>self._calcula_factura()<EOL>", "docstring": "Establece el tipo de tarifa asociada a la factura el\u00e9ctrica.\n        :param tarifa: int 1|2|3 \u00f3 str GEN|NOC|VHC \u00f3 str 2.0A|2.0DHA|2.0DHS", "id": "f14218:c0:m16"}
{"signature": "@property<EOL><INDENT>def coste_termino_fijo(self):<DEDENT>", "body": "return self._termino_fijo_total<EOL>", "docstring": "Calcula el coste del t\u00e9rmino de potencia antes de impuestos, en \u20ac.\n        :return gasto_potencia\n        :rtype float", "id": "f14218:c0:m21"}
{"signature": "def _calcula_factura(self):", "body": "<EOL>self._dict_repr = None<EOL>self._str_repr = None<EOL>self._html_repr = None<EOL>self._html_repr_completa = None<EOL>cod_tarifa = DATOS_TIPO_PEAJE[self._tipo_peaje][<NUM_LIT:1>]<EOL>year = self._t0.year<EOL>year_f = self._tf.year<EOL>self._num_dias_factura = (self._tf - self._t0).days  <EOL>if year_f > year:<EOL><INDENT>ts_limit = pd.Timestamp('<STR_LIT>'.format(year))<EOL>days_1 = (ts_limit - self._t0).days<EOL>days_2 = (self._tf - ts_limit).days<EOL>n_days_y1 = (pd.Timestamp('<STR_LIT>'.format(year + <NUM_LIT:1>)) - pd.Timestamp('<STR_LIT>'.format(year))).days<EOL>n_days_y2 = (pd.Timestamp('<STR_LIT>'.format(year_f + <NUM_LIT:1>)) - pd.Timestamp('<STR_LIT>'.format(year_f))).days<EOL>self._periodos_fact = ((days_1, n_days_y1, year), (days_2, n_days_y2, year_f))<EOL><DEDENT>else:<EOL><INDENT>n_days_y = (pd.Timestamp('<STR_LIT>'.format(year + <NUM_LIT:1>)) - pd.Timestamp('<STR_LIT>'.format(year))).days<EOL>self._periodos_fact = ((self._num_dias_factura, n_days_y, year),)<EOL><DEDENT>if self._pvpc_data is None:<EOL><INDENT>pvpc_data = PVPC(update=True, verbose=False)<EOL>self._pvpc_data = pvpc_calc_tcu_cp_feu_d(pvpc_data.data['<STR_LIT:data>'], verbose=False, convert_kwh=True)<EOL><DEDENT>cols_tarifa = list(filter(lambda x: cod_tarifa in x, self._pvpc_data.columns))<EOL>pvpc_t_ini, pvpc_t_fin = self._t0 + pd.Timedelta('<STR_LIT>'), self._tf + pd.Timedelta('<STR_LIT>')<EOL>self._pvpc_horario = self._pvpc_data[cols_tarifa].loc[pvpc_t_ini:pvpc_t_fin].iloc[:-<NUM_LIT:1>]<EOL>self._termino_fijo, self._termino_fijo_total = [], <NUM_LIT:0><EOL>for (days_fac, days_year, year) in self._periodos_fact:<EOL><INDENT>coef_potencia = MARGEN_COMERCIALIZACI\u00d3N_EUR_KW_A\u00d1O_MCF + TERM_POT_PEAJE_ACCESO_EUR_KW_A\u00d1O_TPA[year]<EOL>coste = self._potencia_contratada * days_fac * coef_potencia / days_year<EOL>self._termino_fijo.append((coste, coef_potencia))<EOL>self._termino_fijo_total += coste<EOL><DEDENT>self._termino_fijo_total = self._round(self._termino_fijo_total)<EOL>if self._consumo is not None:<EOL><INDENT>son_totales, consumo_calc = self._consumo_numerico()<EOL>if son_totales:<EOL><INDENT>c_coef = '<STR_LIT>'.format(cod_tarifa)<EOL>hay_discr, perfs_interv = self._asigna_periodos_discr_horaria(self._pvpc_horario[c_coef])<EOL>if not hay_discr:<EOL><INDENT>self._consumo_horario = (perfs_interv * consumo_calc[<NUM_LIT:0>] / perfs_interv.sum()).rename(COL_CONSUMO)<EOL><DEDENT>else:<EOL><INDENT>consumos_horarios_periodos = []<EOL>for i, cons_periodo_i in enumerate(consumo_calc):<EOL><INDENT>c = '<STR_LIT>'.format(i + <NUM_LIT:1>)<EOL>idx = perfs_interv[perfs_interv[c]].index<EOL>consumos_horarios_periodos.append(perfs_interv.loc[idx, c_coef] * cons_periodo_i<EOL>/ perfs_interv.loc[idx, c_coef].sum())<EOL><DEDENT>self._consumo_horario = pd.Series(pd.concat(consumos_horarios_periodos)<EOL>).rename(COL_CONSUMO).sort_index()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._consumo_horario = self._check_hourly_data(consumo_calc)<EOL>t0, tf = self._consumo_horario.index[<NUM_LIT:0>], self._consumo_horario.index[-<NUM_LIT:1>]<EOL>self._pvpc_horario = self._pvpc_horario.loc[t0:tf]<EOL><DEDENT>col_tcu = '<STR_LIT>'.format(cod_tarifa)<EOL>if len(self._periodos_fact) > <NUM_LIT:1>:<EOL><INDENT>ts_limit = pd.Timestamp('<STR_LIT>'.format(self._tf.year)).tz_localize(self._consumo_horario.index.tz)<EOL>consumo_1 = self._consumo_horario.loc[:ts_limit].iloc[:-<NUM_LIT:1>]<EOL>consumo_2 = self._consumo_horario.loc[ts_limit:]<EOL>pvpc_1 = self._pvpc_horario.loc[:ts_limit].iloc[:-<NUM_LIT:1>]<EOL>pvpc_2 = self._pvpc_horario.loc[ts_limit:]<EOL>tea_y1, tcu_y1, cons_tot_y1 = list(zip(*self._coste_tea_tcu(consumo_1, pvpc_1[col_tcu], self._t0.year)))<EOL>tea_y2, tcu_y2, cons_tot_y2 = list(zip(*self._coste_tea_tcu(consumo_2, pvpc_2[col_tcu], self._tf.year)))<EOL>self._coste_peaje_acceso_tea = (tea_y1, tea_y2)<EOL>self._coste_ponderado_energia_tcu = (tcu_y1, tcu_y2)<EOL>self._consumos_totales_por_periodo = (cons_tot_y1, cons_tot_y2)<EOL>coste_variable_tot = sum([sum(x) for x in self._coste_peaje_acceso_tea])<EOL>coste_variable_tot += sum([sum(x) for x in self._coste_ponderado_energia_tcu])<EOL><DEDENT>else:<EOL><INDENT>tea, tcu, cons_tot = list(zip(*self._coste_tea_tcu(self._consumo_horario, self._pvpc_horario[col_tcu],<EOL>self._t0.year)))<EOL>self._coste_peaje_acceso_tea = tea<EOL>self._coste_ponderado_energia_tcu = tcu<EOL>self._consumos_totales_por_periodo = cons_tot<EOL>coste_variable_tot = self._round_sum(self._coste_peaje_acceso_tea)<EOL>coste_variable_tot += self._round_sum(self._coste_ponderado_energia_tcu)<EOL><DEDENT>self._consumo = self._consumo_horario<EOL><DEDENT>else:<EOL><INDENT>self._coste_peaje_acceso_tea = (<NUM_LIT:0.>,)<EOL>self._coste_ponderado_energia_tcu = (<NUM_LIT:0.>,)<EOL>self._consumos_totales_por_periodo = (<NUM_LIT:0.>,)<EOL>coste_variable_tot = <NUM_LIT:0.><EOL><DEDENT>self._termino_variable_total = self._round(coste_variable_tot)<EOL>subt_fijo_var = self._termino_fijo_total + self._termino_variable_total<EOL>self._descuento_bono_social = <NUM_LIT:0.><EOL>if self._con_bono_social:<EOL><INDENT>self._descuento_bono_social = self._round(-<NUM_LIT> * self._round(subt_fijo_var))<EOL>subt_fijo_var += self._descuento_bono_social<EOL><DEDENT>self._termino_impuesto_electrico = self._round(self._impuesto_electrico_general * subt_fijo_var)<EOL>subt_fijo_var += self._termino_impuesto_electrico<EOL>if self.alquiler_euros is not None:<EOL><INDENT>self._termino_equipo_medida = self._round(self.alquiler_euros)<EOL><DEDENT>else:<EOL><INDENT>frac_a\u00f1o = sum([nd / dy for nd, dy, _ in self._periodos_fact])<EOL>self._termino_equipo_medida = self._round(frac_a\u00f1o * self.alquiler_euros_a\u00f1o)<EOL><DEDENT>self._calcula_iva_y_total()<EOL>", "docstring": "M\u00e9todo para regenerar el c\u00e1lculo de la factura el\u00e9ctrica.", "id": "f14218:c0:m11"}
{"signature": "@consumo_total.setter<EOL><INDENT>def consumo_total(self, nuevo_consumo):<DEDENT>", "body": "self._consumo = nuevo_consumo<EOL>self._calcula_factura()<EOL>", "docstring": "Establece el consumo energ\u00e9tico del periodo de facturaci\u00f3n en kWh.\n        :param nuevo_consumo: Consumo en kWh, bien como float, bien como lista de 1, 2 o 3 elementos,\n        bien como time series de datos horarios.", "id": "f14218:c0:m18"}
{"signature": "def main_cli():", "body": "def _get_parser_args():<EOL><INDENT>p = argparse.ArgumentParser(description='<STR_LIT>')<EOL>p.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>', help='<STR_LIT>')<EOL>p.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store>', nargs='<STR_LIT:*>',<EOL>help=\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>p.add_argument('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>',<EOL>help=\"<STR_LIT>\")<EOL>p.add_argument('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>',<EOL>help=\"<STR_LIT>\")<EOL>p.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>', help=\"<STR_LIT>\")<EOL>p.add_argument('<STR_LIT>', '<STR_LIT>', action='<STR_LIT:store_true>', help='<STR_LIT>')<EOL>arguments = p.parse_args()<EOL>return arguments, p<EOL><DEDENT>def _parse_date(string, columns):<EOL><INDENT>try:<EOL><INDENT>ts = pd.Timestamp(string)<EOL>print_cyan('<STR_LIT>'.format(string, ts, ts.date()))<EOL>columns.remove(string)<EOL>return ts.date().isoformat()<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>args, parser = _get_parser_args()<EOL>print_secc('<STR_LIT>')<EOL>if args.dem:<EOL><INDENT>db_web = DatosREE(update=args.update, force_update=args.forceupdate, verbose=args.verbose)<EOL><DEDENT>else:<EOL><INDENT>db_web = PVPC(update=args.update, force_update=args.forceupdate, verbose=args.verbose)<EOL><DEDENT>data = db_web.data['<STR_LIT:data>']<EOL>if args.info is not None:<EOL><INDENT>if len(args.info) > <NUM_LIT:0>:<EOL><INDENT>cols = args.info.copy()<EOL>dates = [d for d in [_parse_date(s, cols) for s in args.info] if d]<EOL>if len(dates) == <NUM_LIT:2>:<EOL><INDENT>data = data.loc[dates[<NUM_LIT:0>]:dates[<NUM_LIT:1>]]<EOL><DEDENT>elif len(dates) == <NUM_LIT:1>:<EOL><INDENT>data = data.loc[dates[<NUM_LIT:0>]]<EOL><DEDENT>if len(cols) > <NUM_LIT:0>:<EOL><INDENT>try:<EOL><INDENT>data = data[[c.upper() for c in cols]]<EOL><DEDENT>except KeyError as e:<EOL><INDENT>print_red('<STR_LIT>'<EOL>.format(e, data.columns))<EOL><DEDENT><DEDENT>print_info(data)<EOL><DEDENT>else:<EOL><INDENT>print_secc('<STR_LIT>')<EOL>print_info(data.iloc[-<NUM_LIT>:])<EOL>print_cyan(data.columns)<EOL><DEDENT><DEDENT>if args.plot:<EOL><INDENT>if args.dem:<EOL><INDENT>from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora<EOL>print_red('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora<EOL>if len(data) < <NUM_LIT>:<EOL><INDENT>pvpcplot_grid_hora(data)<EOL><DEDENT>else:<EOL><INDENT>print_red('<STR_LIT>'.<EOL>format(len(data), data.index[<NUM_LIT:0>], data.index[-<NUM_LIT:1>]))<EOL>pvpcplot_grid_hora(db_web.data['<STR_LIT:data>'].iloc[-<NUM_LIT>:])<EOL>pvpcplot_tarifas_hora(db_web.data['<STR_LIT:data>'].iloc[-<NUM_LIT>:])<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Actualiza la base de datos de PVPC/DEMANDA almacenados como dataframe en local,\ncreando una nueva si no existe o hubiere alg\u00fan problema. Los datos registrados se guardan en HDF5", "id": "f14219:m0"}
{"signature": "def post_update_data(self):", "body": "if self.data is not None:<EOL><INDENT>self.data['<STR_LIT>'] = self.busca_errores_data()<EOL><DEDENT>", "docstring": "Definici\u00f3n opcional para analizar la informaci\u00f3n descargada en busca de errores,\nque quedan almacenados en `self.data['errores']`.", "id": "f14221:c1:m3"}
{"signature": "def procesa_data_dia(self, str_dia, datos_para_procesar):", "body": "return dem_procesa_datos_dia(str_dia, datos_para_procesar)<EOL>", "docstring": "Procesa los datos descargados correspondientes a un d\u00eda `key_dia`.", "id": "f14221:c1:m2"}
{"signature": "def url_data_dia(self, key_dia):", "body": "return dem_url_dia(key_dia)<EOL>", "docstring": "Devuelve la url de descarga de datos para `key_dia`.", "id": "f14221:c1:m1"}
{"signature": "def busca_errores_data(self):", "body": "data_busqueda = self.append_delta_index(TS_DATA_DEM, data_delta=self.data[self.masterkey].copy())<EOL>idx_desconex = (((data_busqueda.index < '<STR_LIT>') & (data_busqueda.index >= self.DATE_INI)) &<EOL>((data_busqueda.delta_T > <NUM_LIT:1>) | data_busqueda['<STR_LIT>'].isnull() |<EOL>data_busqueda['<STR_LIT>'].isnull() | data_busqueda['<STR_LIT>'].isnull()))<EOL>sosp = data_busqueda[idx_desconex].copy()<EOL>assert len(sosp) == <NUM_LIT:0><EOL>return pd.DataFrame()<EOL>", "docstring": "Busca errores o inconsistencias en los datos adquiridos\n:return: Dataframe de errores encontrados", "id": "f14221:c1:m6"}
{"signature": "def pvpc_data_dia(str_dia, str_dia_fin=None):", "body": "params = {'<STR_LIT>': DATE_FMT, '<STR_LIT>': False,<EOL>'<STR_LIT>': pvpc_procesa_datos_dia, '<STR_LIT>': pvpc_url_dia,<EOL>'<STR_LIT>': {'<STR_LIT>': True, '<STR_LIT>': HEADERS}}<EOL>if str_dia_fin is not None:<EOL><INDENT>params['<STR_LIT>'] = True<EOL>data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia_fin, **params)<EOL><DEDENT>else:<EOL><INDENT>data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia, **params)<EOL><DEDENT>if not hay_errores:<EOL><INDENT>return data<EOL><DEDENT>else:<EOL><INDENT>return str_import<EOL><DEDENT>", "docstring": "Obtiene datos de PVPC en un d\u00eda concreto o un intervalo, accediendo directamente a la web.", "id": "f14223:m4"}
{"signature": "def pvpc_procesa_datos_dia(_, response, verbose=True):", "body": "try:<EOL><INDENT>d_data = response['<STR_LIT>']<EOL>df = _process_json_pvpc_hourly_data(pd.DataFrame(d_data))<EOL>return df, <NUM_LIT:0><EOL><DEDENT>except Exception as e:<EOL><INDENT>if verbose:<EOL><INDENT>print('<STR_LIT>'.format(e))<EOL><DEDENT>return None, -<NUM_LIT:2><EOL><DEDENT>", "docstring": "Procesa la informaci\u00f3n JSON descargada y forma el dataframe de los datos de un d\u00eda.", "id": "f14223:m3"}
{"signature": "def seed(self, seed): ", "body": "if seed is None:<EOL><INDENT>self.env.seed = round(time.time())<EOL><DEDENT>else:<EOL><INDENT>self.env.seed = seed<EOL><DEDENT>return self.env.seed<EOL>", "docstring": "Sets the random seed of the environment to the given value (current time, if seed=None).\nNaturally deterministic Environments (e.g. ALE or some gym Envs) don't have to implement this method.\n\nArgs:\n    seed (int): The seed to use for initializing the pseudo-random number generator (default=epoch time in sec).\nReturns: The actual seed (int) used OR None if Environment did not override this method (no seeding supported).", "id": "f14224:c0:m3"}
{"signature": "def reset(self):", "body": "self.env.reset_game()<EOL>return self.env.getScreenRGB()<EOL>", "docstring": "Reset environment and setup for new episode.\n\nReturns:\n    initial state of reset environment.", "id": "f14224:c0:m4"}
{"signature": "def available_actions(self):", "body": "return [action for action in range(<NUM_LIT:4>) if self.is_action_available(action)]<EOL>", "docstring": "Computes the set of actions that are available.", "id": "f14226:c0:m9"}
{"signature": "def game_over(self):", "body": "for action in range(<NUM_LIT:4>):<EOL><INDENT>if self.is_action_available(action):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Whether the game is over.", "id": "f14226:c0:m8"}
{"signature": "def score(self):", "body": "return self._score<EOL>", "docstring": "Return current score.", "id": "f14226:c0:m17"}
{"signature": "def _is_action_available_left(self, state):", "body": "<EOL>for row in range(<NUM_LIT:4>):<EOL><INDENT>has_empty = False<EOL>for col in range(<NUM_LIT:4>):<EOL><INDENT>has_empty |= state[row, col] == <NUM_LIT:0><EOL>if state[row, col] != <NUM_LIT:0> and has_empty:<EOL><INDENT>return True<EOL><DEDENT>if (state[row, col] != <NUM_LIT:0> and col > <NUM_LIT:0> and<EOL>state[row, col] == state[row, col - <NUM_LIT:1>]):<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Determines whether action 'Left' is available.", "id": "f14226:c0:m11"}
{"signature": "def __init__(self, state=None, initial_score=<NUM_LIT:0>):", "body": "self._score = initial_score<EOL>if state is None:<EOL><INDENT>self._state = np.zeros((<NUM_LIT:4>, <NUM_LIT:4>), dtype=np.int)<EOL>self.add_random_tile()<EOL>self.add_random_tile()<EOL><DEDENT>else:<EOL><INDENT>self._state = state<EOL><DEDENT>", "docstring": "Init the Game object.\n        Args:\n        state: Shape (4, 4) numpy array to initialize the state with. If None,\n            the state will be initialized with with two random tiles (as done\n            in the original game).\n        initial_score: Score to initialize the Game with.", "id": "f14226:c0:m6"}
{"signature": "def _do_action_left(self, state):", "body": "reward = <NUM_LIT:0><EOL>for row in range(<NUM_LIT:4>):<EOL><INDENT>merge_candidate = -<NUM_LIT:1><EOL>merged = np.zeros((<NUM_LIT:4>,), dtype=np.bool)<EOL>for col in range(<NUM_LIT:4>):<EOL><INDENT>if state[row, col] == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>if (merge_candidate != -<NUM_LIT:1> and<EOL>not merged[merge_candidate] and<EOL>state[row, merge_candidate] == state[row, col]):<EOL><INDENT>state[row, col] = <NUM_LIT:0><EOL>merged[merge_candidate] = True<EOL>state[row, merge_candidate] += <NUM_LIT:1><EOL>reward += <NUM_LIT:2> ** state[row, merge_candidate]<EOL><DEDENT>else:<EOL><INDENT>merge_candidate += <NUM_LIT:1><EOL>if col != merge_candidate:<EOL><INDENT>state[row, merge_candidate] = state[row, col]<EOL>state[row, col] = <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT><DEDENT>return reward<EOL>", "docstring": "Executes action 'Left'.", "id": "f14226:c0:m13"}
{"signature": "def state(self):", "body": "return self._state<EOL>", "docstring": "Return current state.", "id": "f14226:c0:m16"}
{"signature": "def print_state(self):", "body": "def tile_string(value):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if value > <NUM_LIT:0>:<EOL><INDENT>return '<STR_LIT>' % (<NUM_LIT:2> ** value,)<EOL><DEDENT>return \"<STR_LIT:U+0020>\"<EOL><DEDENT>separator_line = '<STR_LIT:->' * <NUM_LIT><EOL>print(separator_line)<EOL>for row in range(<NUM_LIT:4>):<EOL><INDENT>print(\"<STR_LIT:|>\" + \"<STR_LIT:|>\".join([tile_string(v) for v in self._state[row, :]]) + \"<STR_LIT:|>\")<EOL>print(separator_line)<EOL><DEDENT>", "docstring": "Prints the current state.", "id": "f14226:c0:m15"}
{"signature": "def recv(self, socket_, encoding=None):", "body": "unpacker = msgpack.Unpacker(encoding=encoding)<EOL>response = socket_.recv(<NUM_LIT:8>)  <EOL>if response == b\"<STR_LIT>\":<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\" +<EOL>\"<STR_LIT>\")<EOL><DEDENT>orig_len = int(response)<EOL>received_len = <NUM_LIT:0><EOL>while True:<EOL><INDENT>data = socket_.recv(min(orig_len - received_len, self.max_msg_len))<EOL>if not data:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\".<EOL>format(orig_len - received_len))<EOL><DEDENT>data_len = len(data)<EOL>received_len += data_len<EOL>unpacker.feed(data)<EOL>if received_len == orig_len:<EOL><INDENT>break<EOL><DEDENT><DEDENT>for message in unpacker:<EOL><INDENT>sts = message.get(\"<STR_LIT:status>\", message.get(b\"<STR_LIT:status>\"))<EOL>if sts:<EOL><INDENT>if sts == \"<STR_LIT>\" or sts == b\"<STR_LIT>\":<EOL><INDENT>return message<EOL><DEDENT>else:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\".<EOL>format(message.get(\"<STR_LIT:message>\", \"<STR_LIT>\")))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>raise TensorForceError(\"<STR_LIT>\".<EOL>format(orig_len))<EOL>", "docstring": "Receives a message as msgpack-numpy encoded byte-string from the given socket object.\nBlocks until something was received.\n\nArgs:\n    socket_: The python socket object to use.\n    encoding (str): The encoding to use for unpacking messages from the socket.\nReturns: The decoded (as dict) message received.", "id": "f14227:c1:m2"}
{"signature": "def connect(self, timeout=<NUM_LIT>):", "body": "<EOL>if self.socket:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\" +<EOL>\"<STR_LIT>\".format(self.host, self.port))<EOL><DEDENT>self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)<EOL>if timeout < <NUM_LIT:5> or timeout is None:<EOL><INDENT>timeout = <NUM_LIT:5><EOL><DEDENT>err = <NUM_LIT:0><EOL>start_time = time.time()<EOL>while time.time() - start_time < timeout:<EOL><INDENT>self.socket.settimeout(<NUM_LIT:5>)<EOL>err = self.socket.connect_ex((self.host, self.port))<EOL>if err == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>time.sleep(<NUM_LIT:1>)<EOL><DEDENT>if err != <NUM_LIT:0>:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\".<EOL>format(self.host, self.port, err, errno.errorcode[err], os.strerror(err)))<EOL><DEDENT>", "docstring": "Starts the server tcp connection on the given host:port.\n\nArgs:\n    timeout (int): The time (in seconds) for which we will attempt a connection to the remote\n        (every 5sec). After that (or if timeout is None or 0), an error is raised.", "id": "f14227:c0:m3"}
{"signature": "def __init__(self, host=\"<STR_LIT:localhost>\", port=<NUM_LIT>):", "body": "Environment.__init__(self)<EOL>self.port = int(port) or <NUM_LIT><EOL>self.host = host or \"<STR_LIT:localhost>\"<EOL>self.socket = None<EOL>self.buffer_size = <NUM_LIT><EOL>self.last_observation = None<EOL>", "docstring": "A remote Environment that one can connect to through tcp.\nImplements a simple msgpack protocol to get the step/reset/etc.. commands to the\nremote server and simply waits (blocks) for a response.\n\nArgs:\n        host (str): The hostname to connect to.\n        port (int): The port to connect to.", "id": "f14227:c0:m0"}
{"signature": "def disconnect(self):", "body": "<EOL>if not self.socket:<EOL><INDENT>logging.warning(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>self.socket.close()<EOL>self.socket = None<EOL>", "docstring": "Ends our server tcp connection.", "id": "f14227:c0:m4"}
{"signature": "def execute(self, action):", "body": "action_mappings, axis_mappings = [], []<EOL>if self.discretize_actions:<EOL><INDENT>combination = self.discretized_actions[action]<EOL>for key, value in combination:<EOL><INDENT>if isinstance(value, bool):<EOL><INDENT>action_mappings.append((key, value))<EOL><DEDENT>else:<EOL><INDENT>axis_mappings.append((key, value))<EOL><DEDENT><DEDENT><DEDENT>elif action:<EOL><INDENT>try:<EOL><INDENT>action_mappings, axis_mappings = self.translate_abstract_actions_to_keys(action)<EOL><DEDENT>except KeyError as e:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\".<EOL>format(e))<EOL><DEDENT><DEDENT>message = dict(<EOL>cmd=\"<STR_LIT>\",<EOL>delta_time=self.delta_time,<EOL>num_ticks=self.num_ticks,<EOL>actions=action_mappings,<EOL>axes=axis_mappings<EOL>)<EOL>self.protocol.send(message, self.socket)<EOL>response = self.protocol.recv(self.socket)<EOL>r = response.pop(b\"<STR_LIT>\", <NUM_LIT:0.0>)<EOL>is_terminal = response.pop(b\"<STR_LIT>\", False)<EOL>obs = self.extract_observation(response)<EOL>self.last_observation = obs<EOL>return obs, is_terminal, r<EOL>", "docstring": "Executes a single step in the UE4 game. This step may be comprised of one or more actual game ticks for all of\nwhich the same given\naction- and axis-inputs (or action number in case of discretized actions) are repeated.\nUE4 distinguishes between action-mappings, which are boolean actions (e.g. jump or dont-jump) and axis-mappings,\nwhich are continuous actions\nlike MoveForward with values between -1.0 (run backwards) and 1.0 (run forwards), 0.0 would mean: stop.", "id": "f14228:c0:m6"}
{"signature": "def __init__(<EOL>self,<EOL>host=\"<STR_LIT:localhost>\",<EOL>port=<NUM_LIT>,<EOL>connect=True,<EOL>discretize_actions=False,<EOL>delta_time=<NUM_LIT:1>/<NUM_LIT>,<EOL>num_ticks=<NUM_LIT:4><EOL>):", "body": "RemoteEnvironment.__init__(self, host, port)<EOL>self.game_name = None<EOL>self.action_space_desc = None<EOL>self.observation_space_desc = None<EOL>self.discretize_actions = discretize_actions<EOL>self.discretized_actions = None<EOL>self.delta_time = delta_time<EOL>self.num_ticks = num_ticks<EOL>self.protocol = MsgPackNumpyProtocol()<EOL>if connect:<EOL><INDENT>self.connect()<EOL><DEDENT>", "docstring": "Args:\n    host (str): The hostname to connect to.\n    port (int): The port to connect to.\n    connect (bool): Whether to connect already in this c'tor.\n    discretize_actions (bool): Whether to treat axis-mappings defined in UE4 game as discrete actions.\n        This would be necessary e.g. for agents that use q-networks where the output are q-values per discrete\n        state-action pair.\n    delta_time (float): The fake delta time to use for each single game tick.\n    num_ticks (int): The number of ticks to be executed in a single act call (each tick will\n        repeat the same given actions).", "id": "f14228:c0:m0"}
{"signature": "def discretize_action_space_desc(self):", "body": "<EOL>unique_list = []<EOL>for nice, record in self.action_space_desc.items():<EOL><INDENT>list_for_record = []<EOL>if record[\"<STR_LIT:type>\"] == \"<STR_LIT>\":<EOL><INDENT>head_key = record[\"<STR_LIT>\"][<NUM_LIT:0>][<NUM_LIT:0>]<EOL>head_value = record[\"<STR_LIT>\"][<NUM_LIT:0>][<NUM_LIT:1>]<EOL>list_for_record.append((head_key, <NUM_LIT:0.0>))<EOL>set_ = set()<EOL>for key_and_scale in self.action_space_desc[nice][\"<STR_LIT>\"]:<EOL><INDENT>if key_and_scale[<NUM_LIT:1>] not in set_:<EOL><INDENT>list_for_record.append((head_key, key_and_scale[<NUM_LIT:1>] / head_value))<EOL>set_.add(key_and_scale[<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>list_for_record = [(record[\"<STR_LIT>\"][<NUM_LIT:0>], False), (record[\"<STR_LIT>\"][<NUM_LIT:0>], True)]<EOL><DEDENT>unique_list.append(list_for_record)<EOL><DEDENT>def so(in_):<EOL><INDENT>st = \"<STR_LIT>\"<EOL>for i in in_:<EOL><INDENT>st += str(i[<NUM_LIT:1>])<EOL><DEDENT>return st<EOL><DEDENT>combinations = list(itertools.product(*unique_list))<EOL>combinations = list(map(lambda x: sorted(list(x), key=lambda y: y[<NUM_LIT:0>]), combinations))<EOL>combinations = sorted(combinations, key=so)<EOL>self.discretized_actions = combinations<EOL>", "docstring": "Creates a list of discrete action(-combinations) in case we want to learn with a discrete set of actions,\nbut only have action-combinations (maybe even continuous) available from the env.\nE.g. the UE4 game has the following action/axis-mappings:\n\n```javascript\n{\n'Fire':\n    {'type': 'action', 'keys': ('SpaceBar',)},\n'MoveRight':\n    {'type': 'axis', 'keys': (('Right', 1.0), ('Left', -1.0), ('A', -1.0), ('D', 1.0))},\n}\n```\n\n-> this method will discretize them into the following 6 discrete actions:\n\n```javascript\n[\n[(Right, 0.0),(SpaceBar, False)],\n[(Right, 0.0),(SpaceBar, True)]\n[(Right, -1.0),(SpaceBar, False)],\n[(Right, -1.0),(SpaceBar, True)],\n[(Right, 1.0),(SpaceBar, False)],\n[(Right, 1.0),(SpaceBar, True)],\n]\n```", "id": "f14228:c0:m10"}
{"signature": "def translate_abstract_actions_to_keys(self, abstract):", "body": "<EOL>if len(abstract) >= <NUM_LIT:2> and not isinstance(abstract[<NUM_LIT:1>], (list, tuple)):<EOL><INDENT>abstract = list((abstract,))<EOL><DEDENT>actions, axes = [], []<EOL>for a in abstract:<EOL><INDENT>first_key = self.action_space_desc[a[<NUM_LIT:0>]][\"<STR_LIT>\"][<NUM_LIT:0>]<EOL>if isinstance(first_key, (bytes, str)):<EOL><INDENT>actions.append((first_key, a[<NUM_LIT:1>]))<EOL><DEDENT>elif isinstance(first_key, tuple):<EOL><INDENT>axes.append((first_key[<NUM_LIT:0>], a[<NUM_LIT:1>] * first_key[<NUM_LIT:1>]))<EOL><DEDENT>else:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\".format(a[<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>return actions, axes<EOL>", "docstring": "Translates a list of tuples ([pretty mapping], [value]) to a list of tuples ([some key], [translated value])\neach single item in abstract will undergo the following translation:\n\nExample1:\nwe want: \"MoveRight\": 5.0\npossible keys for the action are: (\"Right\", 1.0), (\"Left\", -1.0)\nresult: \"Right\": 5.0 * 1.0 = 5.0\n\nExample2:\nwe want: \"MoveRight\": -0.5\npossible keys for the action are: (\"Left\", -1.0), (\"Right\", 1.0)\nresult: \"Left\": -0.5 * -1.0 = 0.5 (same as \"Right\": -0.5)", "id": "f14228:c0:m9"}
{"signature": "def sanity_check_states(states_spec):", "body": "<EOL>states = copy.deepcopy(states_spec)<EOL>is_unique = ('<STR_LIT>' in states)<EOL>if is_unique:<EOL><INDENT>states = dict(state=states)<EOL><DEDENT>for name, state in states.items():<EOL><INDENT>if isinstance(state['<STR_LIT>'], int):<EOL><INDENT>state['<STR_LIT>'] = (state['<STR_LIT>'],)<EOL><DEDENT>if '<STR_LIT:type>' not in state:<EOL><INDENT>state['<STR_LIT:type>'] = '<STR_LIT:float>'<EOL><DEDENT><DEDENT>return states, is_unique<EOL>", "docstring": "Sanity checks a states dict, used to define the state space for an MDP.\nThrows an error or warns if mismatches are found.\n\nArgs:\n    states_spec (Union[None,dict]): The spec-dict to check (or None).\n\nReturns: Tuple of 1) the state space desc and 2) whether there is only one component in the state space.", "id": "f14229:m0"}
{"signature": "def sanity_check_execution_spec(execution_spec):", "body": "<EOL>def_ = dict(type=\"<STR_LIT>\",<EOL>distributed_spec=None,<EOL>session_config=None)<EOL>if execution_spec is None:<EOL><INDENT>return def_<EOL><DEDENT>assert isinstance(execution_spec, dict), \"<STR_LIT>\".format(type(execution_spec).__name__)<EOL>type_ = execution_spec.get(\"<STR_LIT:type>\")<EOL>if type_ == \"<STR_LIT>\":<EOL><INDENT>def_ = dict(job=\"<STR_LIT>\", task_index=<NUM_LIT:0>, cluster_spec={<EOL>\"<STR_LIT>\": [\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": [\"<STR_LIT>\"]<EOL>})<EOL>def_.update(execution_spec.get(\"<STR_LIT>\", {}))<EOL>execution_spec[\"<STR_LIT>\"] = def_<EOL>execution_spec[\"<STR_LIT>\"] = execution_spec.get(\"<STR_LIT>\")<EOL>return execution_spec<EOL><DEDENT>elif type_ == \"<STR_LIT>\":<EOL><INDENT>return execution_spec<EOL><DEDENT>elif type_ == \"<STR_LIT>\":<EOL><INDENT>return execution_spec<EOL><DEDENT>if execution_spec.get('<STR_LIT>') != None:<EOL><INDENT>assert type(execution_spec['<STR_LIT>']) is int, \"<STR_LIT>\".format(type(execution_spec['<STR_LIT>']).__name__)<EOL>assert execution_spec['<STR_LIT>'] > <NUM_LIT:0>, \"<STR_LIT>\".format(execution_spec['<STR_LIT>'])<EOL>return execution_spec<EOL><DEDENT>raise TensorForceError(\"<STR_LIT>\".format(type_))<EOL>", "docstring": "Sanity checks a execution_spec dict, used to define execution logic (distributed vs single, shared memories, etc..)\nand distributed learning behavior of agents/models.\nThrows an error or warns if mismatches are found.\n\nArgs:\n    execution_spec (Union[None,dict]): The spec-dict to check (or None). Dict needs to have the following keys:\n        - type: \"single\", \"distributed\"\n        - distributed_spec: The distributed_spec dict with the following fields:\n            - cluster_spec: TensorFlow ClusterSpec object (required).\n            - job: The tf-job name.\n            - task_index: integer (required).\n            - protocol: communication protocol (default: none, i.e. 'grpc').\n        - session_config: dict with options for a TensorFlow ConfigProto object (default: None).\n\nReturns: A cleaned-up (in-place) version of the given execution-spec.", "id": "f14229:m2"}
{"signature": "def sanity_check_actions(actions_spec):", "body": "<EOL>actions = copy.deepcopy(actions_spec)<EOL>is_unique = ('<STR_LIT:type>' in actions)<EOL>if is_unique:<EOL><INDENT>actions = dict(action=actions)<EOL><DEDENT>for name, action in actions.items():<EOL><INDENT>if '<STR_LIT:type>' not in action:<EOL><INDENT>action['<STR_LIT:type>'] = '<STR_LIT:int>'<EOL><DEDENT>if action['<STR_LIT:type>'] == '<STR_LIT:int>':<EOL><INDENT>if '<STR_LIT>' not in action:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>elif action['<STR_LIT:type>'] == '<STR_LIT:float>':<EOL><INDENT>if ('<STR_LIT>' in action) != ('<STR_LIT>' in action):<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if '<STR_LIT>' not in action:<EOL><INDENT>action['<STR_LIT>'] = ()<EOL><DEDENT>if isinstance(action['<STR_LIT>'], int):<EOL><INDENT>action['<STR_LIT>'] = (action['<STR_LIT>'],)<EOL><DEDENT><DEDENT>return actions, is_unique<EOL>", "docstring": "Sanity checks an actions dict, used to define the action space for an MDP.\nThrows an error or warns if mismatches are found.\n\nArgs:\n    actions_spec (Union[None,dict]): The spec-dict to check (or None).\n\nReturns: Tuple of 1) the action space desc and 2) whether there is only one component in the action space.", "id": "f14229:m1"}
{"signature": "def reset(self):", "body": "return self.env.reset()<EOL>", "docstring": "Reset environment and setup for new episode.\n\nReturns:\n    initial state of reset environment.", "id": "f14230:c0:m4"}
{"signature": "def close(self):", "body": "self.env = None<EOL>", "docstring": "Close environment. No other method calls possible afterwards.", "id": "f14230:c0:m2"}
{"signature": "def __init__(<EOL>self,<EOL>rom,<EOL>frame_skip=<NUM_LIT:1>,<EOL>repeat_action_probability=<NUM_LIT:0.0>,<EOL>loss_of_life_termination=False,<EOL>loss_of_life_reward=<NUM_LIT:0>,<EOL>display_screen=False,<EOL>seed=np.random.RandomState()<EOL>):", "body": "self.ale = ALEInterface()<EOL>self.rom = rom<EOL>self.ale.setBool(b'<STR_LIT>', display_screen)<EOL>self.ale.setInt(b'<STR_LIT>', seed.randint(<NUM_LIT:0>, <NUM_LIT>))<EOL>self.ale.setFloat(b'<STR_LIT>', repeat_action_probability)<EOL>self.ale.setBool(b'<STR_LIT>', False)<EOL>self.ale.setInt(b'<STR_LIT>', frame_skip)<EOL>self.ale.loadROM(rom.encode())<EOL>width, height = self.ale.getScreenDims()<EOL>self.gamescreen = np.empty((height, width, <NUM_LIT:3>), dtype=np.uint8)<EOL>self.frame_skip = frame_skip<EOL>self.action_inds = self.ale.getMinimalActionSet()<EOL>self.loss_of_life_reward = loss_of_life_reward<EOL>self.cur_lives = self.ale.lives()<EOL>self.loss_of_life_termination = loss_of_life_termination<EOL>self.life_lost = False<EOL>", "docstring": "Initialize ALE.\n\nArgs:\n    rom: Rom filename and directory.\n    frame_skip: Repeat action for n frames. Default 1.\n    repeat_action_probability: Repeats last action with given probability. Default 0.\n    loss_of_life_termination: Signals a terminal state on loss of life. Default False.\n    loss_of_life_reward: Reward/Penalty on loss of life (negative values are a penalty). Default 0.\n    display_screen: Displays the emulator screen. Default False.\n    seed: Random seed", "id": "f14231:c0:m0"}
{"signature": "@property<EOL><INDENT>def num_steps(self):<DEDENT>", "body": "return self.level.num_steps()<EOL>", "docstring": "Number of frames since the last reset() call.", "id": "f14233:c0:m7"}
{"signature": "def __init__(self, gym_id, monitor=None, monitor_safe=False, monitor_video=<NUM_LIT:0>, visualize=False):", "body": "self.gym_id = gym_id<EOL>self.gym = gym.make(gym_id)  <EOL>self.visualize = visualize<EOL>if monitor:<EOL><INDENT>if monitor_video == <NUM_LIT:0>:<EOL><INDENT>video_callable = False<EOL><DEDENT>else:<EOL><INDENT>video_callable = (lambda x: x % monitor_video == <NUM_LIT:0>)<EOL><DEDENT>self.gym = gym.wrappers.Monitor(self.gym, monitor, force=not monitor_safe, video_callable=video_callable)<EOL><DEDENT>self._states = OpenAIGym.state_from_space(space=self.gym.observation_space)<EOL>self._actions = OpenAIGym.action_from_space(space=self.gym.action_space)<EOL>", "docstring": "Initialize OpenAI Gym.\n\nArgs:\n    gym_id: OpenAI Gym environment ID. See https://gym.openai.com/envs\n    monitor: Output directory. Setting this to None disables monitoring.\n    monitor_safe: Setting this to True prevents existing log files to be overwritten. Default False.\n    monitor_video: Save a video every monitor_video steps. Setting this to 0 disables recording of videos.\n    visualize: If set True, the program will visualize the trainings of gym's environment. Note that such\n        visualization is probabily going to slow down the training.", "id": "f14235:c0:m0"}
{"signature": "def tf_dtype(dtype):", "body": "<EOL>if dtype == '<STR_LIT:float>' or dtype == float or dtype == np.float32 or dtype == tf.float32:<EOL><INDENT>return tf.float32<EOL><DEDENT>elif dtype == np.float64 or dtype == tf.float64:<EOL><INDENT>return tf.float64<EOL><DEDENT>elif dtype == np.float16 or dtype == tf.float16:<EOL><INDENT>return tf.float16<EOL><DEDENT>elif dtype == '<STR_LIT:int>' or dtype == int or dtype == np.int32 or dtype == tf.int32:<EOL><INDENT>return tf.int32<EOL><DEDENT>elif dtype == np.int64 or dtype == tf.int64:<EOL><INDENT>return tf.int64<EOL><DEDENT>elif dtype == np.int16 or dtype == tf.int16:<EOL><INDENT>return tf.int16<EOL><DEDENT>elif dtype == '<STR_LIT:bool>' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:<EOL><INDENT>return tf.bool<EOL><DEDENT>else:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\".format(str(dtype)))<EOL><DEDENT>", "docstring": "Translates dtype specifications in configurations to tensorflow data types.\n\n       Args:\n           dtype: String describing a numerical type (e.g. 'float'), numpy data type,\n               or numerical type primitive.\n\n       Returns: TensorFlow data type", "id": "f14239:m4"}
{"signature": "def prod(xs):", "body": "p = <NUM_LIT:1><EOL>for x in xs:<EOL><INDENT>p *= x<EOL><DEDENT>return p<EOL>", "docstring": "Computes the product along the elements in an iterable. Returns 1 for empty iterable.\n\n    Args:\n        xs: Iterable containing numbers.\n\n    Returns: Product along iterable.", "id": "f14239:m0"}
{"signature": "def get_object(obj, predefined_objects=None, default_object=None, kwargs=None):", "body": "args = ()<EOL>kwargs = dict() if kwargs is None else kwargs<EOL>if isinstance(obj, str) and os.path.isfile(obj):<EOL><INDENT>with open(obj, '<STR_LIT:r>') as fp:<EOL><INDENT>obj = json.load(fp=fp)<EOL><DEDENT><DEDENT>if isinstance(obj, dict):<EOL><INDENT>kwargs.update(obj)<EOL>obj = kwargs.pop('<STR_LIT:type>', None)<EOL><DEDENT>if predefined_objects is not None and obj in predefined_objects:<EOL><INDENT>obj = predefined_objects[obj]<EOL><DEDENT>elif isinstance(obj, str):<EOL><INDENT>if obj.find('<STR_LIT:.>') != -<NUM_LIT:1>:<EOL><INDENT>module_name, function_name = obj.rsplit('<STR_LIT:.>', <NUM_LIT:1>)<EOL>module = importlib.import_module(module_name)<EOL>obj = getattr(module, function_name)<EOL><DEDENT>else:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\".format(<EOL>obj,<EOL>list(predefined_objects or ())<EOL>))<EOL><DEDENT><DEDENT>elif callable(obj):<EOL><INDENT>pass<EOL><DEDENT>elif default_object is not None:<EOL><INDENT>args = (obj,)<EOL>obj = default_object<EOL><DEDENT>else:<EOL><INDENT>return obj<EOL><DEDENT>return obj(*args, **kwargs)<EOL>", "docstring": "Utility method to map some kind of object specification to its content,\ne.g. optimizer or baseline specifications to the respective classes.\n\nArgs:\n    obj: A specification dict (value for key 'type' optionally specifies\n            the object, options as follows), a module path (e.g.,\n            my_module.MyClass), a key in predefined_objects, or a callable\n            (e.g., the class type object).\n    predefined_objects: Dict containing predefined set of objects,\n            accessible via their key\n    default_object: Default object is no other is specified\n    kwargs: Arguments for object creation\n\nReturns: The retrieved object", "id": "f14239:m7"}
{"signature": "def register_saver_ops(self):", "body": "variables = self.get_savable_variables()<EOL>if variables is None or len(variables) == <NUM_LIT:0>:<EOL><INDENT>self._saver = None<EOL>return<EOL><DEDENT>base_scope = self._get_base_variable_scope()<EOL>variables_map = {strip_name_scope(v.name, base_scope): v for v in variables}<EOL>self._saver = tf.train.Saver(<EOL>var_list=variables_map,<EOL>reshape=False,<EOL>sharded=False,<EOL>max_to_keep=<NUM_LIT:5>,<EOL>keep_checkpoint_every_n_hours=<NUM_LIT>,<EOL>name=None,<EOL>restore_sequentially=False,<EOL>saver_def=None,<EOL>builder=None,<EOL>defer_build=False,<EOL>allow_empty=True,<EOL>write_version=tf.train.SaverDef.V2,<EOL>pad_step_number=False,<EOL>save_relative_paths=True<EOL>)<EOL>", "docstring": "Registers the saver operations to the graph in context.", "id": "f14239:c0:m0"}
{"signature": "def clone_worker_agent(agent, factor, environment, network, agent_config):", "body": "ret = [agent]<EOL>for i in xrange(factor - <NUM_LIT:1>):<EOL><INDENT>worker = WorkerAgentGenerator(type(agent))(<EOL>states=environment.states,<EOL>actions=environment.actions,<EOL>network=network,<EOL>model=agent.model,<EOL>**agent_config<EOL>)<EOL>ret.append(worker)<EOL><DEDENT>return ret<EOL>", "docstring": "Clones a given Agent (`factor` times) and returns a list of the cloned Agents with the original Agent\nin the first slot.\n\nArgs:\n    agent (Agent): The Agent object to clone.\n    factor (int): The length of the final list.\n    environment (Environment): The Environment to use for all cloned agents.\n    network (LayeredNetwork): The Network to use (or None) for an Agent's Model.\n    agent_config (dict): A dict of Agent specifications passed into the Agent's c'tor as kwargs.\nReturns:\n    The list with `factor` cloned agents (including the original one).", "id": "f14240:m1"}
{"signature": "def reset(self, history=None):", "body": "if not history:<EOL><INDENT>history = dict()<EOL><DEDENT>self.episode_rewards = history.get(\"<STR_LIT>\", list())<EOL>self.episode_timesteps = history.get(\"<STR_LIT>\", list())<EOL>self.episode_times = history.get(\"<STR_LIT>\", list())<EOL>", "docstring": "Resets the Runner's internal stats counters.\nIf history is empty, use default values in history.get().\n\nArgs:\n    history (dict): A dictionary containing an already run experiment's results. Keys should be:\n        episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times)", "id": "f14241:c0:m1"}
{"signature": "def close(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Should perform clean up operations on Runner's Agent(s) and Environment(s).", "id": "f14241:c0:m2"}
{"signature": "@property<EOL><INDENT>def timestep(self):<DEDENT>", "body": "return self.global_timestep<EOL>", "docstring": "Deprecated property `timestep` -> global_timestep.", "id": "f14241:c0:m5"}
{"signature": "def execute(self, action):", "body": "raise NotImplementedError<EOL>", "docstring": "Executes action, observes next state(s) and reward.\n\nArgs:\n    actions: Actions to execute.\n\nReturns:\n    Tuple of (next state, bool indicating terminal, reward)", "id": "f14270:c0:m4"}
{"signature": "@property<EOL><INDENT>def states(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Return the state space. Might include subdicts if multiple states are \navailable simultaneously.\n\nReturns:\n    States specification, with the following attributes\n        (required):\n        - type: one of 'bool', 'int', 'float' (default: 'float').\n        - shape: integer, or list/tuple of integers (required).", "id": "f14270:c0:m5"}
{"signature": "def create_distributions(self):", "body": "distributions = dict()<EOL>for name in sorted(self.actions_spec):<EOL><INDENT>action = self.actions_spec[name]<EOL>if self.distributions_spec is not None and name in self.distributions_spec:<EOL><INDENT>kwargs = dict(action)<EOL>kwargs['<STR_LIT>'] = name<EOL>kwargs['<STR_LIT>'] = self.summary_labels<EOL>distributions[name] = Distribution.from_spec(<EOL>spec=self.distributions_spec[name],<EOL>kwargs=kwargs<EOL>)<EOL><DEDENT>elif action['<STR_LIT:type>'] == '<STR_LIT:bool>':<EOL><INDENT>distributions[name] = Bernoulli(<EOL>shape=action['<STR_LIT>'],<EOL>scope=name,<EOL>summary_labels=self.summary_labels<EOL>)<EOL><DEDENT>elif action['<STR_LIT:type>'] == '<STR_LIT:int>':<EOL><INDENT>distributions[name] = Categorical(<EOL>shape=action['<STR_LIT>'],<EOL>num_actions=action['<STR_LIT>'],<EOL>scope=name,<EOL>summary_labels=self.summary_labels<EOL>)<EOL><DEDENT>elif action['<STR_LIT:type>'] == '<STR_LIT:float>':<EOL><INDENT>if '<STR_LIT>' in action:<EOL><INDENT>distributions[name] = Beta(<EOL>shape=action['<STR_LIT>'],<EOL>min_value=action['<STR_LIT>'],<EOL>max_value=action['<STR_LIT>'],<EOL>scope=name,<EOL>summary_labels=self.summary_labels<EOL>)<EOL><DEDENT>else:<EOL><INDENT>distributions[name] = Gaussian(<EOL>shape=action['<STR_LIT>'],<EOL>scope=name,<EOL>summary_labels=self.summary_labels<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return distributions<EOL>", "docstring": "Creates and returns the Distribution objects based on self.distributions_spec.\n\nReturns: Dict of distributions according to self.distributions_spec.", "id": "f14274:c0:m2"}
{"signature": "def setup_components_and_tf_funcs(self, custom_getter=None):", "body": "<EOL>self.network = Network.from_spec(<EOL>spec=self.network_spec,<EOL>kwargs=dict(summary_labels=self.summary_labels)<EOL>)<EOL>assert len(self.internals_spec) == <NUM_LIT:0><EOL>self.internals_spec = self.network.internals_spec()<EOL>for name in sorted(self.internals_spec):<EOL><INDENT>internal = self.internals_spec[name]<EOL>self.internals_input[name] = tf.placeholder(<EOL>dtype=util.tf_dtype(internal['<STR_LIT:type>']),<EOL>shape=(None,) + tuple(internal['<STR_LIT>']),<EOL>name=('<STR_LIT>' + name)<EOL>)<EOL>if internal['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>self.internals_init[name] = np.zeros(shape=internal['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>custom_getter = super(DistributionModel, self).setup_components_and_tf_funcs(custom_getter)<EOL>self.distributions = self.create_distributions()<EOL>self.fn_kl_divergence = tf.make_template(<EOL>name_='<STR_LIT>',<EOL>func_=self.tf_kl_divergence,<EOL>custom_getter_=custom_getter<EOL>)<EOL>return custom_getter<EOL>", "docstring": "Creates and stores Network and Distribution objects.\nGenerates and stores all template functions.", "id": "f14274:c0:m1"}
{"signature": "def tf_observe_timestep(self, states, internals, actions, terminal, reward):", "body": "raise NotImplementedError<EOL>", "docstring": "Creates the TensorFlow operations for processing a batch of observations coming in from our buffer (state,\naction, internals) as well as from the agent's python-batch (terminal-signals and rewards from the env).\n\nArgs:\n    states (dict): Dict of state tensors (each key represents one state space component).\n    internals (dict): Dict of prior internal state tensors (each key represents one internal state component).\n    actions (dict): Dict of action tensors (each key represents one action space component).\n    terminal: 1D (bool) tensor of terminal signals.\n    reward: 1D (float) tensor of rewards.\n\nReturns:\n    The observation operation depending on the model type.", "id": "f14275:c0:m16"}
{"signature": "def create_act_operations(self, states, internals, deterministic, independent, index):", "body": "<EOL>operations = list()<EOL>if self.variable_noise is not None and self.variable_noise > <NUM_LIT:0.0>:<EOL><INDENT>self.fn_actions_and_internals(<EOL>states=states,<EOL>internals=internals,<EOL>deterministic=deterministic<EOL>)<EOL>noise_deltas = list()<EOL>for variable in self.get_variables():<EOL><INDENT>noise_delta = tf.random_normal(shape=util.shape(variable), mean=<NUM_LIT:0.0>, stddev=self.variable_noise)<EOL>noise_deltas.append(noise_delta)<EOL>operations.append(variable.assign_add(delta=noise_delta))<EOL><DEDENT><DEDENT>with tf.control_dependencies(control_inputs=operations):<EOL><INDENT>self.actions_output, self.internals_output = self.fn_actions_and_internals(<EOL>states=states,<EOL>internals=internals,<EOL>deterministic=deterministic<EOL>)<EOL><DEDENT>with tf.control_dependencies(control_inputs=[self.actions_output[name] for name in sorted(self.actions_output)]):<EOL><INDENT>operations = list()<EOL>if self.variable_noise is not None and self.variable_noise > <NUM_LIT:0.0>:<EOL><INDENT>for variable, noise_delta in zip(self.get_variables(), noise_deltas):<EOL><INDENT>operations.append(variable.assign_sub(delta=noise_delta))<EOL><DEDENT><DEDENT><DEDENT>with tf.control_dependencies(control_inputs=operations):<EOL><INDENT>for name in sorted(self.actions_exploration):<EOL><INDENT>self.actions_output[name] = tf.cond(<EOL>pred=self.deterministic_input,<EOL>true_fn=(lambda: self.actions_output[name]),<EOL>false_fn=(lambda: self.fn_action_exploration(<EOL>action=self.actions_output[name],<EOL>exploration=self.actions_exploration[name],<EOL>action_spec=self.actions_spec[name]<EOL>))<EOL>)<EOL><DEDENT><DEDENT>def independent_act():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return self.global_timestep<EOL><DEDENT>def normal_act():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>operations = list()<EOL>batch_size = tf.shape(input=states[next(iter(sorted(states)))])[<NUM_LIT:0>]<EOL>for name in sorted(states):<EOL><INDENT>operations.append(tf.assign(<EOL>ref=self.list_states_buffer[name][index, self.list_buffer_index[index]: self.list_buffer_index[index] + batch_size],<EOL>value=states[name]<EOL>))<EOL><DEDENT>for name in sorted(internals):<EOL><INDENT>operations.append(tf.assign(<EOL>ref=self.list_internals_buffer[name][index, self.list_buffer_index[index]: self.list_buffer_index[index] + batch_size],<EOL>value=internals[name]<EOL>))<EOL><DEDENT>for name in sorted(self.actions_output):<EOL><INDENT>operations.append(tf.assign(<EOL>ref=self.list_actions_buffer[name][index, self.list_buffer_index[index]: self.list_buffer_index[index] + batch_size],<EOL>value=self.actions_output[name]<EOL>))<EOL><DEDENT>with tf.control_dependencies(control_inputs=operations):<EOL><INDENT>operations = list()<EOL>operations.append(tf.assign(<EOL>ref=self.list_buffer_index[index: index+<NUM_LIT:1>],<EOL>value=tf.add(self.list_buffer_index[index: index+<NUM_LIT:1>], tf.constant([<NUM_LIT:1>]))<EOL>))<EOL>operations.append(tf.assign_add(<EOL>ref=self.timestep,<EOL>value=tf.to_int64(x=batch_size)<EOL>))<EOL>operations.append(tf.assign_add(<EOL>ref=self.global_timestep,<EOL>value=tf.to_int64(x=batch_size)<EOL>))<EOL><DEDENT>with tf.control_dependencies(control_inputs=operations):<EOL><INDENT>return self.global_timestep + <NUM_LIT:0><EOL><DEDENT><DEDENT>self.timestep_output = tf.cond(<EOL>pred=independent,<EOL>true_fn=independent_act,<EOL>false_fn=normal_act<EOL>)<EOL>", "docstring": "Creates and stores tf operations that are fetched when calling act(): actions_output, internals_output and\ntimestep_output.\n\nArgs:\n    states (dict): Dict of state tensors (each key represents one state space component).\n    internals (dict): Dict of prior internal state tensors (each key represents one internal state component).\n    deterministic: 0D (bool) tensor (whether to not use action exploration).\n    independent (bool): 0D (bool) tensor (whether to store states/internals/action in local buffer).", "id": "f14275:c0:m17"}
{"signature": "def tf_preprocess(self, states, actions, reward):", "body": "<EOL>for name in sorted(self.states_preprocessing):<EOL><INDENT>states[name] = self.states_preprocessing[name].process(tensor=states[name])<EOL><DEDENT>if self.reward_preprocessing is not None:<EOL><INDENT>reward = self.reward_preprocessing.process(tensor=reward)<EOL><DEDENT>return states, actions, reward<EOL>", "docstring": "Applies preprocessing ops to the raw states/action/reward inputs.\n\nArgs:\n    states (dict): Dict of raw state tensors.\n    actions (dict): Dict or raw action tensors.\n    reward: 1D (float) raw rewards tensor.\n\nReturns: The preprocessed versions of the input tensors.", "id": "f14275:c0:m13"}
{"signature": "def get_variables(self, include_submodules=False, include_nontrainable=False):", "body": "if include_nontrainable:<EOL><INDENT>model_variables = [self.all_variables[key] for key in sorted(self.all_variables)]<EOL>states_preprocessing_variables = [<EOL>variable for name in sorted(self.states_preprocessing)<EOL>for variable in self.states_preprocessing[name].get_variables()<EOL>]<EOL>model_variables += states_preprocessing_variables<EOL>actions_exploration_variables = [<EOL>variable for name in sorted(self.actions_exploration)<EOL>for variable in self.actions_exploration[name].get_variables()<EOL>]<EOL>model_variables += actions_exploration_variables<EOL>if self.reward_preprocessing is not None:<EOL><INDENT>reward_preprocessing_variables = self.reward_preprocessing.get_variables()<EOL>model_variables += reward_preprocessing_variables<EOL><DEDENT><DEDENT>else:<EOL><INDENT>model_variables = [self.variables[key] for key in sorted(self.variables)]<EOL><DEDENT>return model_variables<EOL>", "docstring": "Returns the TensorFlow variables used by the model.\n\nArgs:\n    include_submodules: Includes variables of submodules (e.g. baseline, target network)\n        if true.\n    include_nontrainable: Includes non-trainable variables if true.\n\nReturns:\n    List of variables.", "id": "f14275:c0:m21"}
{"signature": "def __init__(<EOL>self,<EOL>states,<EOL>actions,<EOL>scope,<EOL>device,<EOL>saver,<EOL>summarizer,<EOL>execution,<EOL>batching_capacity,<EOL>variable_noise,<EOL>states_preprocessing,<EOL>actions_exploration,<EOL>reward_preprocessing,<EOL>tf_session_dump_dir=\"<STR_LIT>\"<EOL>):", "body": "<EOL>self.network = None<EOL>self.states_spec = states<EOL>self.internals_spec = dict()<EOL>self.actions_spec = actions<EOL>self.scope = scope<EOL>self.device = device<EOL>if saver is None or saver.get('<STR_LIT>') is None:<EOL><INDENT>self.saver_spec = None<EOL><DEDENT>else:<EOL><INDENT>self.saver_spec = saver<EOL><DEDENT>if summarizer is None or summarizer.get('<STR_LIT>') is None:<EOL><INDENT>self.summarizer_spec = None<EOL>self.summary_labels = set()<EOL><DEDENT>else:<EOL><INDENT>self.summarizer_spec = summarizer<EOL>self.summary_labels = set(self.summarizer_spec.get('<STR_LIT>', ()))<EOL><DEDENT>self.summarizer = None<EOL>self.graph_summary = None<EOL>self.summarizer_init_op = None<EOL>self.flush_summarizer = None<EOL>self.execution_spec = execution<EOL>self.execution_type = self.execution_spec[\"<STR_LIT:type>\"]<EOL>self.session_config = self.execution_spec[\"<STR_LIT>\"]<EOL>self.distributed_spec = self.execution_spec[\"<STR_LIT>\"]<EOL>assert batching_capacity is None or (isinstance(batching_capacity, int) and batching_capacity > <NUM_LIT:0>)<EOL>self.batching_capacity = batching_capacity or <NUM_LIT:1>  <EOL>self.tf_session_dump_dir = tf_session_dump_dir<EOL>self.num_parallel = self.execution_spec.get('<STR_LIT>')<EOL>if self.num_parallel is None:<EOL><INDENT>self.num_parallel = <NUM_LIT:1><EOL><DEDENT>self.list_states_buffer = dict()<EOL>self.list_internals_buffer = dict()<EOL>self.list_actions_buffer = dict()<EOL>self.list_buffer_index = [None for _ in range(self.num_parallel)]<EOL>self.episode_output = None<EOL>self.episode_index_input = None<EOL>self.unbuffered_episode_output = None<EOL>assert variable_noise is None or variable_noise > <NUM_LIT:0.0><EOL>self.variable_noise = variable_noise<EOL>self.states_preprocessing_spec = states_preprocessing<EOL>self.actions_exploration_spec = actions_exploration<EOL>self.reward_preprocessing_spec = reward_preprocessing<EOL>self.variables = None<EOL>self.all_variables = None<EOL>self.registered_variables = None<EOL>self.list_timestep = [None for _ in range(self.num_parallel)]<EOL>self.episode = None<EOL>self.global_timestep = None<EOL>self.global_episode = None<EOL>self.states_input = dict()<EOL>self.states_preprocessing = dict()<EOL>self.internals_input = dict()<EOL>self.internals_init = dict()<EOL>self.actions_input = dict()<EOL>self.actions_exploration = dict()<EOL>self.terminal_input = None<EOL>self.reward_input = None<EOL>self.reward_preprocessing = None<EOL>self.deterministic_input = None<EOL>self.independent_input = None<EOL>self.update_input = None<EOL>self.fn_initialize = None<EOL>self.fn_preprocess = None<EOL>self.fn_actions_and_internals = None<EOL>self.fn_observe_timestep = None<EOL>self.fn_action_exploration = None<EOL>self.graph = None<EOL>self.global_model = None<EOL>self.is_local_model = True<EOL>self.server = None<EOL>self.summarizer = None<EOL>self.saver = None<EOL>self.saver_directory = None<EOL>self.scaffold = None<EOL>self.session = None<EOL>self.monitored_session = None<EOL>self.actions_output = None<EOL>self.internals_output = None<EOL>self.timestep_output = None<EOL>self.list_buffer_index_reset_op = None<EOL>self.setup()<EOL>", "docstring": "Model.\n\nArgs:\n    states (spec): The state-space description dictionary.\n    actions (spec): The action-space description dictionary.\n    scope (str): The root scope str to use for tf variable scoping.\n    device (str): The name of the device to run the graph of this model on.\n    saver (spec): Dict specifying whether and how to save the model's parameters.\n    summarizer (spec): Dict specifying which tensorboard summaries should be created and added to the graph.\n    execution (spec): Dict specifying whether and how to do distributed training on the model's graph.\n    batching_capacity (int): Batching capacity.\n    variable_noise (float): The stddev value of a Normal distribution used for adding random\n        noise to the model's output (for each batch, noise can be toggled and - if active - will be resampled).\n        Use None for not adding any noise.\n    states_preprocessing (spec / dict of specs): Dict specifying whether and how to preprocess state signals\n        (e.g. normalization, greyscale, etc..).\n    actions_exploration (spec / dict of specs): Dict specifying whether and how to add exploration to the model's\n        \"action outputs\" (e.g. epsilon-greedy).\n    reward_preprocessing (spec): Dict specifying whether and how to preprocess rewards coming\n        from the Environment (e.g. reward normalization).\n    tf_session_dump_dir (str): If non-empty string, all session.run calls will be dumped using the tensorflow\n        offline-debug session into the given directory.\n    execution: (dict)\n        - num_parallel: (int) number of parallel episodes", "id": "f14275:c0:m0"}
{"signature": "def setup_scaffold(self):", "body": "if self.execution_type == \"<STR_LIT>\":<EOL><INDENT>global_variables = self.get_variables(include_submodules=True, include_nontrainable=True)<EOL>init_op = tf.variables_initializer(var_list=global_variables)<EOL>if self.summarizer_init_op is not None:<EOL><INDENT>init_op = tf.group(init_op, self.summarizer_init_op)<EOL><DEDENT>if self.graph_summary is None:<EOL><INDENT>ready_op = tf.report_uninitialized_variables(var_list=global_variables)<EOL>ready_for_local_init_op = None<EOL>local_init_op = None<EOL><DEDENT>else:<EOL><INDENT>ready_op = None<EOL>ready_for_local_init_op = tf.report_uninitialized_variables(var_list=global_variables)<EOL>local_init_op = self.graph_summary<EOL><DEDENT><DEDENT>else:<EOL><INDENT>global_variables = self.global_model.get_variables(include_submodules=True, include_nontrainable=True)<EOL>local_variables = self.get_variables(include_submodules=True, include_nontrainable=True)<EOL>init_op = tf.variables_initializer(var_list=global_variables)<EOL>if self.summarizer_init_op is not None:<EOL><INDENT>init_op = tf.group(init_op, self.summarizer_init_op)<EOL><DEDENT>ready_op = tf.report_uninitialized_variables(var_list=(global_variables + local_variables))<EOL>ready_for_local_init_op = tf.report_uninitialized_variables(var_list=global_variables)<EOL>if self.graph_summary is None:<EOL><INDENT>local_init_op = tf.group(<EOL>tf.variables_initializer(var_list=local_variables),<EOL>*(tf.assign(ref=local_var, value=global_var) for local_var, global_var in zip(<EOL>self.get_variables(include_submodules=True),<EOL>self.global_model.get_variables(include_submodules=True)<EOL>))<EOL>)<EOL><DEDENT>else:<EOL><INDENT>local_init_op = tf.group(<EOL>tf.variables_initializer(var_list=local_variables),<EOL>self.graph_summary,<EOL>*(tf.assign(ref=local_var, value=global_var) for local_var, global_var in zip(<EOL>self.get_variables(include_submodules=True),<EOL>self.global_model.get_variables(include_submodules=True)<EOL>))<EOL>)<EOL><DEDENT><DEDENT>def init_fn(scaffold, session):<EOL><INDENT>if self.saver_spec is not None and self.saver_spec.get('<STR_LIT>', True):<EOL><INDENT>directory = self.saver_spec['<STR_LIT>']<EOL>file = self.saver_spec.get('<STR_LIT:file>')<EOL>if file is None:<EOL><INDENT>file = tf.train.latest_checkpoint(<EOL>checkpoint_dir=directory,<EOL>latest_filename=None  <EOL>)<EOL><DEDENT>elif not os.path.isfile(file):<EOL><INDENT>file = os.path.join(directory, file)<EOL><DEDENT>if file is not None:<EOL><INDENT>try:<EOL><INDENT>scaffold.saver.restore(sess=session, save_path=file)<EOL>session.run(fetches=self.list_buffer_index_reset_op)<EOL><DEDENT>except tf.errors.NotFoundError:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT><DEDENT>self.scaffold = tf.train.Scaffold(<EOL>init_op=init_op,<EOL>init_feed_dict=None,<EOL>init_fn=init_fn,<EOL>ready_op=ready_op,<EOL>ready_for_local_init_op=ready_for_local_init_op,<EOL>local_init_op=local_init_op,<EOL>summary_op=None,<EOL>saver=self.saver,<EOL>copy_from_scaffold=None<EOL>)<EOL>", "docstring": "Creates the tf.train.Scaffold object and assigns it to self.scaffold.\nOther fields of the Scaffold are generated automatically.", "id": "f14275:c0:m7"}
{"signature": "def act(self, states, internals, deterministic=False, independent=False, fetch_tensors=None, index=<NUM_LIT:0>):", "body": "name = next(iter(states))<EOL>state = np.asarray(states[name])<EOL>batched = (state.ndim != len(self.states_spec[name]['<STR_LIT>']))<EOL>if batched:<EOL><INDENT>assert state.shape[<NUM_LIT:0>] <= self.batching_capacity<EOL><DEDENT>fetches = [self.actions_output, self.internals_output, self.timestep_output]<EOL>if self.network is not None and fetch_tensors is not None:<EOL><INDENT>for name in fetch_tensors:<EOL><INDENT>valid, tensor = self.network.get_named_tensor(name)<EOL>if valid:<EOL><INDENT>fetches.append(tensor)<EOL><DEDENT>else:<EOL><INDENT>keys = self.network.get_list_of_named_tensor()<EOL>raise TensorForceError('<STR_LIT>'.format(name, keys))<EOL><DEDENT><DEDENT><DEDENT>feed_dict = self.get_feed_dict(<EOL>states=states,<EOL>internals=internals,<EOL>deterministic=deterministic,<EOL>independent=independent,<EOL>index=index<EOL>)<EOL>fetch_list = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict)<EOL>actions, internals, timestep = fetch_list[<NUM_LIT:0>:<NUM_LIT:3>]<EOL>if not batched:<EOL><INDENT>actions = {name: actions[name][<NUM_LIT:0>] for name in sorted(actions)}<EOL>internals = {name: internals[name][<NUM_LIT:0>] for name in sorted(internals)}<EOL><DEDENT>if self.network is not None and fetch_tensors is not None:<EOL><INDENT>fetch_dict = dict()<EOL>for index_, tensor in enumerate(fetch_list[<NUM_LIT:3>:]):<EOL><INDENT>name = fetch_tensors[index_]<EOL>fetch_dict[name] = tensor<EOL><DEDENT>return actions, internals, timestep, fetch_dict<EOL><DEDENT>else:<EOL><INDENT>return actions, internals, timestep<EOL><DEDENT>", "docstring": "Does a forward pass through the model to retrieve action (outputs) given inputs for state (and internal\nstate, if applicable (e.g. RNNs))\n\nArgs:\n    states (dict): Dict of state values (each key represents one state space component).\n    internals (dict): Dict of internal state values (each key represents one internal state component).\n    deterministic (bool): If True, will not apply exploration after actions are calculated.\n    independent (bool): If true, action is not followed by observe (and hence not included\n        in updates).\n    fetch_tensors (list): List of names of additional tensors (from the model's network) to fetch (and return).\n    index: (int) index of the episode we want to produce the next action\n\nReturns:\n    tuple:\n        - Actual action-outputs (batched if state input is a batch).\n        - Actual values of internal states (if applicable) (batched if state input is a batch).\n        - The timestep (int) after calculating the (batch of) action(s).", "id": "f14275:c0:m24"}
{"signature": "def get_components(self):", "body": "return dict()<EOL>", "docstring": "Returns a dictionary of component name to component of all the components within this model.\n\nReturns:\n    (dict) The mapping of name to component.", "id": "f14275:c0:m29"}
{"signature": "def get_savable_components(self):", "body": "components = self.get_components()<EOL>components = [components[name] for name in sorted(components)]<EOL>return set(filter(lambda x: isinstance(x, util.SavableComponent), components))<EOL>", "docstring": "Returns the list of all of the components this model consists of that can be individually saved and restored.\nFor instance the network or distribution.\n\nReturns:\n    List of util.SavableComponent", "id": "f14275:c0:m30"}
{"signature": "def reset(self):", "body": "fetches = [self.global_episode, self.global_timestep]<EOL>for name in sorted(self.states_preprocessing):<EOL><INDENT>fetch = self.states_preprocessing[name].reset()<EOL>if fetch is not None:<EOL><INDENT>fetches.extend(fetch)<EOL><DEDENT><DEDENT>if self.flush_summarizer is not None:<EOL><INDENT>fetches.append(self.flush_summarizer)<EOL><DEDENT>fetch_list = self.monitored_session.run(fetches=fetches)<EOL>episode, timestep = fetch_list[:<NUM_LIT:2>]<EOL>return episode, timestep, self.internals_init<EOL>", "docstring": "Resets the model to its initial state on episode start. This should also reset all preprocessor(s).\n\nReturns:\n    tuple:\n        Current episode, timestep counter and the shallow-copied list of internal state initialization Tensors.", "id": "f14275:c0:m22"}
{"signature": "def create_observe_operations(self, terminal, reward, index):", "body": "<EOL>num_episodes = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype('<STR_LIT:int>'))<EOL>increment_episode = tf.assign_add(ref=self.episode, value=tf.to_int64(x=num_episodes))<EOL>increment_global_episode = tf.assign_add(ref=self.global_episode, value=tf.to_int64(x=num_episodes))<EOL>with tf.control_dependencies(control_inputs=(increment_episode, increment_global_episode)):<EOL><INDENT>fn = (lambda x: tf.stop_gradient(input=x[:self.list_buffer_index[index]]))<EOL>states = util.map_tensors(fn=fn, tensors=self.list_states_buffer, index=index)<EOL>internals = util.map_tensors(fn=fn, tensors=self.list_internals_buffer, index=index)<EOL>actions = util.map_tensors(fn=fn, tensors=self.list_actions_buffer, index=index)<EOL>terminal = tf.stop_gradient(input=terminal)<EOL>reward = tf.stop_gradient(input=reward)<EOL>observation = self.fn_observe_timestep(<EOL>states=states,<EOL>internals=internals,<EOL>actions=actions,<EOL>terminal=terminal,<EOL>reward=reward<EOL>)<EOL><DEDENT>with tf.control_dependencies(control_inputs=(observation,)):<EOL><INDENT>reset_index = tf.assign(ref=self.list_buffer_index[index], value=<NUM_LIT:0>)<EOL><DEDENT>with tf.control_dependencies(control_inputs=(reset_index,)):<EOL><INDENT>self.episode_output = self.global_episode + <NUM_LIT:0><EOL><DEDENT>self.list_buffer_index_reset_op = tf.group(<EOL>*(tf.assign(ref=self.list_buffer_index[n], value=<NUM_LIT:0>) for n in range(self.num_parallel))<EOL>)<EOL>", "docstring": "Returns the tf op to fetch when an observation batch is passed in (e.g. an episode's rewards and\nterminals). Uses the filled tf buffers for states, actions and internals to run\nthe tf_observe_timestep (model-dependent), resets buffer index and increases counters (episodes,\ntimesteps).\n\nArgs:\n    terminal: The 1D tensor (bool) of terminal signals to process (more than one True within that list is ok).\n    reward: The 1D tensor (float) of rewards to process.\n\nReturns: Tf op to fetch when `observe()` is called.", "id": "f14275:c0:m18"}
{"signature": "def setup_saver(self):", "body": "if self.execution_type == \"<STR_LIT>\":<EOL><INDENT>global_variables = self.get_variables(include_submodules=True, include_nontrainable=True)<EOL><DEDENT>else:<EOL><INDENT>global_variables = self.global_model.get_variables(include_submodules=True, include_nontrainable=True)<EOL><DEDENT>for c in self.get_savable_components():<EOL><INDENT>c.register_saver_ops()<EOL><DEDENT>self.saver = tf.train.Saver(<EOL>var_list=global_variables,  <EOL>reshape=False,<EOL>sharded=False,<EOL>max_to_keep=<NUM_LIT:5>,<EOL>keep_checkpoint_every_n_hours=<NUM_LIT>,<EOL>name=None,<EOL>restore_sequentially=False,<EOL>saver_def=None,<EOL>builder=None,<EOL>defer_build=False,<EOL>allow_empty=True,<EOL>write_version=tf.train.SaverDef.V2,<EOL>pad_step_number=False,<EOL>save_relative_paths=True<EOL>)<EOL>", "docstring": "Creates the tf.train.Saver object and stores it in self.saver.", "id": "f14275:c0:m6"}
{"signature": "def setup_components_and_tf_funcs(self, custom_getter=None):", "body": "if custom_getter is None:<EOL><INDENT>def custom_getter(getter, name, registered=False, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if registered:<EOL><INDENT>self.registered_variables.add(name)<EOL><DEDENT>elif name in self.registered_variables:<EOL><INDENT>registered = True<EOL><DEDENT>variable = getter(name=name, **kwargs)<EOL>if registered:<EOL><INDENT>pass<EOL><DEDENT>elif name in self.all_variables:<EOL><INDENT>assert variable is self.all_variables[name]<EOL>if kwargs.get('<STR_LIT>', True):<EOL><INDENT>assert variable is self.variables[name]<EOL>if '<STR_LIT>' in self.summary_labels:<EOL><INDENT>tf.contrib.summary.histogram(name=name, tensor=variable)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.all_variables[name] = variable<EOL>if kwargs.get('<STR_LIT>', True):<EOL><INDENT>self.variables[name] = variable<EOL>if '<STR_LIT>' in self.summary_labels:<EOL><INDENT>tf.contrib.summary.histogram(name=name, tensor=variable)<EOL><DEDENT><DEDENT><DEDENT>return variable<EOL><DEDENT><DEDENT>self.fn_initialize = tf.make_template(<EOL>name_='<STR_LIT>',<EOL>func_=self.tf_initialize,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.fn_preprocess = tf.make_template(<EOL>name_='<STR_LIT>',<EOL>func_=self.tf_preprocess,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.fn_actions_and_internals = tf.make_template(<EOL>name_='<STR_LIT>',<EOL>func_=self.tf_actions_and_internals,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.fn_observe_timestep = tf.make_template(<EOL>name_='<STR_LIT>',<EOL>func_=self.tf_observe_timestep,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.fn_action_exploration = tf.make_template(<EOL>name_='<STR_LIT>',<EOL>func_=self.tf_action_exploration,<EOL>custom_getter_=custom_getter<EOL>)<EOL>return custom_getter<EOL>", "docstring": "Allows child models to create model's component objects, such as optimizer(s), memory(s), etc..\nCreates all tensorflow functions via tf.make_template calls on all the class' \"tf_\"-methods.\n\nArgs:\n    custom_getter: The `custom_getter_` object to use for `tf.make_template` when creating TensorFlow functions.\n        If None, use a default custom_getter_.\n\nReturns: The custom_getter passed in (or a default one if custom_getter was None).", "id": "f14275:c0:m5"}
{"signature": "def tf_actions_and_internals(self, states, internals, deterministic):", "body": "raise NotImplementedError<EOL>", "docstring": "Creates and returns the TensorFlow operations for retrieving the actions and - if applicable -\nthe posterior internal state Tensors in reaction to the given input states (and prior internal states).\n\nArgs:\n    states (dict): Dict of state tensors (each key represents one state space component).\n    internals (dict): Dict of internal state tensors (each key represents one internal space component).\n    deterministic: Boolean tensor indicating whether action should be chosen\n        deterministically.\n\nReturns:\n    tuple:\n        1) dict of output actions (with or without exploration applied (see `deterministic`))\n        2) list of posterior internal state Tensors (empty for non-internal state models)", "id": "f14275:c0:m15"}
{"signature": "def create_atomic_observe_operations(self, states, actions, internals, terminal, reward, index):", "body": "<EOL>num_episodes = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype('<STR_LIT:int>'))<EOL>increment_episode = tf.assign_add(ref=self.episode, value=tf.to_int64(x=num_episodes))<EOL>increment_global_episode = tf.assign_add(ref=self.global_episode, value=tf.to_int64(x=num_episodes))<EOL>with tf.control_dependencies(control_inputs=(increment_episode, increment_global_episode)):<EOL><INDENT>states = util.map_tensors(fn=tf.stop_gradient, tensors=states)<EOL>internals = util.map_tensors(fn=tf.stop_gradient, tensors=internals)<EOL>actions = util.map_tensors(fn=tf.stop_gradient, tensors=actions)<EOL>terminal = tf.stop_gradient(input=terminal)<EOL>reward = tf.stop_gradient(input=reward)<EOL>observation = self.fn_observe_timestep(<EOL>states=states,<EOL>internals=internals,<EOL>actions=actions,<EOL>terminal=terminal,<EOL>reward=reward<EOL>)<EOL><DEDENT>with tf.control_dependencies(control_inputs=(observation,)):<EOL><INDENT>self.unbuffered_episode_output = self.global_episode + <NUM_LIT:0><EOL><DEDENT>", "docstring": "Returns the tf op to fetch when unbuffered observations are passed in.\n\nArgs:\n    states (any): One state (usually a value tuple) or dict of states if multiple states are expected.\n    actions (any): One action (usually a value tuple) or dict of states if multiple actions are expected.\n    internals (any): Internal list.\n    terminal (bool): boolean indicating if the episode terminated after the observation.\n    reward (float): scalar reward that resulted from executing the action.\n\nReturns: Tf op to fetch when `observe()` is called.", "id": "f14275:c0:m19"}
{"signature": "def tf_optimization(self, states, internals, actions, terminal, reward, next_states=None, next_internals=None):", "body": "arguments = self.optimizer_arguments(<EOL>states=states,<EOL>internals=internals,<EOL>actions=actions,<EOL>terminal=terminal,<EOL>reward=reward,<EOL>next_states=next_states,<EOL>next_internals=next_internals<EOL>)<EOL>return self.optimizer.minimize(**arguments)<EOL>", "docstring": "Creates the TensorFlow operations for performing an optimization update step based\non the given input states and actions batch.\n\nArgs:\n    states: Dict of state tensors.\n    internals: List of prior internal state tensors.\n    actions: Dict of action tensors.\n    terminal: Terminal boolean tensor.\n    reward: Reward tensor.\n    next_states: Dict of successor state tensors.\n    next_internals: List of posterior internal state tensors.\n\nReturns:\n    The optimization operation.", "id": "f14277:c0:m10"}
{"signature": "def tf_import_experience(self, states, internals, actions, terminal, reward):", "body": "return self.memory.store(<EOL>states=states,<EOL>internals=internals,<EOL>actions=actions,<EOL>terminal=terminal,<EOL>reward=reward<EOL>)<EOL>", "docstring": "Imports experiences into the TensorFlow memory structure. Can be used to import\noff-policy data.\n\n:param states: Dict of state values to import with keys as state names and values as values to set.\n:param internals: Internal values to set, can be fetched from agent via agent.current_internals\n    if no values available.\n:param actions: Dict of action values to import with keys as action names and values as values to set.\n:param terminal: Terminal value(s)\n:param reward: Reward value(s)", "id": "f14277:c0:m12"}
{"signature": "def tf_loss(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):", "body": "<EOL>loss_per_instance = self.fn_loss_per_instance(<EOL>states=states,<EOL>internals=internals,<EOL>actions=actions,<EOL>terminal=terminal,<EOL>reward=reward,<EOL>next_states=next_states,<EOL>next_internals=next_internals,<EOL>update=update,<EOL>reference=reference<EOL>)<EOL>updated = self.memory.update_batch(loss_per_instance=loss_per_instance)<EOL>with tf.control_dependencies(control_inputs=(updated,)):<EOL><INDENT>loss = tf.reduce_mean(input_tensor=loss_per_instance, axis=<NUM_LIT:0>)<EOL>if '<STR_LIT>' in self.summary_labels:<EOL><INDENT>tf.contrib.summary.scalar(name='<STR_LIT>', tensor=loss)<EOL><DEDENT>losses = self.fn_regularization_losses(states=states, internals=internals, update=update)<EOL>if len(losses) > <NUM_LIT:0>:<EOL><INDENT>loss += tf.add_n(inputs=[losses[name] for name in sorted(losses)])<EOL>if '<STR_LIT>' in self.summary_labels:<EOL><INDENT>for name in sorted(losses):<EOL><INDENT>tf.contrib.summary.scalar(name=('<STR_LIT>' + name), tensor=losses[name])<EOL><DEDENT><DEDENT><DEDENT>if '<STR_LIT>' in self.summary_labels or '<STR_LIT>' in self.summary_labels:<EOL><INDENT>tf.contrib.summary.scalar(name='<STR_LIT>', tensor=loss)<EOL><DEDENT>return loss<EOL><DEDENT>", "docstring": "Creates the TensorFlow operations for calculating the full loss of a batch.\n\nArgs:\n    states: Dict of state tensors.\n    internals: List of prior internal state tensors.\n    actions: Dict of action tensors.\n    terminal: Terminal boolean tensor.\n    reward: Reward tensor.\n    next_states: Dict of successor state tensors.\n    next_internals: List of posterior internal state tensors.\n    update: Boolean tensor indicating whether this call happens during an update.\n    reference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\n    Loss tensor.", "id": "f14277:c0:m8"}
{"signature": "def tf_discounted_cumulative_reward(self, terminal, reward, discount=None, final_reward=<NUM_LIT:0.0>, horizon=<NUM_LIT:0>):", "body": "<EOL>if discount is None:<EOL><INDENT>discount = self.discount<EOL><DEDENT>def cumulate(cumulative, reward_terminal_horizon_subtract):<EOL><INDENT>rew, is_terminal, is_over_horizon, sub = reward_terminal_horizon_subtract<EOL>return tf.where(<EOL>condition=is_terminal,<EOL>x=rew,<EOL>y=tf.where(<EOL>condition=is_over_horizon,<EOL>x=(rew + cumulative * discount - sub),<EOL>y=(rew + cumulative * discount)<EOL>)<EOL>)<EOL><DEDENT>def len_(cumulative, term):<EOL><INDENT>return tf.where(<EOL>condition=term,<EOL>x=tf.ones(shape=(), dtype=tf.int32),<EOL>y=cumulative + <NUM_LIT:1><EOL>)<EOL><DEDENT>reward = tf.reverse(tensor=reward, axis=(<NUM_LIT:0>,))<EOL>terminal = tf.reverse(tensor=terminal, axis=(<NUM_LIT:0>,))<EOL>lengths = tf.scan(fn=len_, elems=terminal, initializer=<NUM_LIT:0>)<EOL>off_horizon = tf.greater(lengths, tf.fill(dims=tf.shape(lengths), value=horizon))<EOL>if horizon > <NUM_LIT:0>:<EOL><INDENT>horizon_subtractions = tf.map_fn(lambda x: (discount ** horizon) * x, reward, dtype=tf.float32)<EOL>horizon_subtractions = tf.concat([np.zeros(shape=(horizon,)), horizon_subtractions], axis=<NUM_LIT:0>)<EOL>horizon_subtractions = tf.slice(horizon_subtractions, begin=(<NUM_LIT:0>,), size=tf.shape(reward))<EOL><DEDENT>else:<EOL><INDENT>horizon_subtractions = tf.zeros(shape=tf.shape(reward))<EOL><DEDENT>reward = tf.scan(<EOL>fn=cumulate,<EOL>elems=(reward, terminal, off_horizon, horizon_subtractions),<EOL>initializer=final_reward if horizon != <NUM_LIT:1> else <NUM_LIT:0.0><EOL>)<EOL>return tf.reverse(tensor=reward, axis=(<NUM_LIT:0>,))<EOL>", "docstring": "Creates and returns the TensorFlow operations for calculating the sequence of discounted cumulative rewards\nfor a given sequence of single rewards.\n\nExample:\nsingle rewards = 2.0 1.0 0.0 0.5 1.0 -1.0\nterminal = False, False, False, False True False\ngamma = 0.95\nfinal_reward = 100.0 (only matters for last episode (r=-1.0) as this episode has no terminal signal)\nhorizon=3\noutput = 2.95 1.45 1.38 1.45 1.0 94.0\n\nArgs:\n    terminal: Tensor (bool) holding the is-terminal sequence. This sequence may contain more than one\n        True value. If its very last element is False (not terminating), the given `final_reward` value\n        is assumed to follow the last value in the single rewards sequence (see below).\n    reward: Tensor (float) holding the sequence of single rewards. If the last element of `terminal` is False,\n        an assumed last reward of the value of `final_reward` will be used.\n    discount (float): The discount factor (gamma). By default, take the Model's discount factor.\n    final_reward (float): Reward value to use if last episode in sequence does not terminate (terminal sequence\n        ends with False). This value will be ignored if horizon == 1 or discount == 0.0.\n    horizon (int): The length of the horizon (e.g. for n-step cumulative rewards in continuous tasks\n        without terminal signals). Use 0 (default) for an infinite horizon. Note that horizon=1 leads to the\n        exact same results as a discount factor of 0.0.\n\nReturns:\n    Discounted cumulative reward tensor with the same shape as `reward`.", "id": "f14277:c0:m4"}
{"signature": "def __init__(<EOL>self,<EOL>states,<EOL>actions,<EOL>scope,<EOL>device,<EOL>saver,<EOL>summarizer,<EOL>execution,<EOL>batching_capacity,<EOL>variable_noise,<EOL>states_preprocessing,<EOL>actions_exploration,<EOL>reward_preprocessing,<EOL>update_mode,<EOL>memory,<EOL>optimizer,<EOL>discount<EOL>):", "body": "self.update_mode = update_mode<EOL>self.memory_spec = memory<EOL>self.optimizer_spec = optimizer<EOL>assert discount is None or discount >= <NUM_LIT:0.0><EOL>self.discount = discount<EOL>self.memory = None<EOL>self.optimizer = None<EOL>self.fn_discounted_cumulative_reward = None<EOL>self.fn_reference = None<EOL>self.fn_loss_per_instance = None<EOL>self.fn_regularization_losses = None<EOL>self.fn_loss = None<EOL>self.fn_optimization = None<EOL>self.fn_import_experience = None<EOL>super(MemoryModel, self).__init__(<EOL>states=states,<EOL>actions=actions,<EOL>scope=scope,<EOL>device=device,<EOL>saver=saver,<EOL>summarizer=summarizer,<EOL>execution=execution,<EOL>batching_capacity=batching_capacity,<EOL>variable_noise=variable_noise,<EOL>states_preprocessing=states_preprocessing,<EOL>actions_exploration=actions_exploration,<EOL>reward_preprocessing=reward_preprocessing<EOL>)<EOL>", "docstring": "Memory model.\n\nArgs:\n    states (spec): The state-space description dictionary.\n    actions (spec): The action-space description dictionary.\n    scope (str): The root scope str to use for tf variable scoping.\n    device (str): The name of the device to run the graph of this model on.\n    saver (spec): Dict specifying whether and how to save the model's parameters.\n    summarizer (spec): Dict specifying which tensorboard summaries should be created and added to the graph.\n    execution (spec): Dict specifying whether and how to do distributed training on the model's graph.\n    batching_capacity (int): Batching capacity.\n    variable_noise (float): The stddev value of a Normal distribution used for adding random\n        noise to the model's output (for each batch, noise can be toggled and - if active - will be resampled).\n        Use None for not adding any noise.\n    states_preprocessing (spec / dict of specs): Dict specifying whether and how to preprocess state signals\n        (e.g. normalization, greyscale, etc..).\n    actions_exploration (spec / dict of specs): Dict specifying whether and how to add exploration to the model's\n        \"action outputs\" (e.g. epsilon-greedy).\n    reward_preprocessing (spec): Dict specifying whether and how to preprocess rewards coming\n        from the Environment (e.g. reward normalization).\n    update_mode (spec): Update mode.\n    memory (spec): Memory.\n    optimizer (spec): Dict specifying the tf optimizer to use for tuning the model's trainable parameters.\n    discount (float): The RL reward discount factor (gamma).", "id": "f14277:c0:m0"}
{"signature": "def tf_initialize(self):", "body": "super(MemoryModel, self).tf_initialize()<EOL>self.memory.initialize()<EOL>", "docstring": "Also initializes our Memory object (self.memory).", "id": "f14277:c0:m3"}
{"signature": "def tf_baseline_loss(self, states, internals, reward, update, reference=None):", "body": "if self.baseline_mode == '<STR_LIT>':<EOL><INDENT>loss = self.baseline.loss(<EOL>states=states,<EOL>internals=internals,<EOL>reward=reward,<EOL>update=update,<EOL>reference=reference<EOL>)<EOL><DEDENT>elif self.baseline_mode == '<STR_LIT>':<EOL><INDENT>loss = self.baseline.loss(<EOL>states=self.network.apply(x=states, internals=internals, update=update),<EOL>internals=internals,<EOL>reward=reward,<EOL>update=update,<EOL>reference=reference<EOL>)<EOL><DEDENT>regularization_loss = self.baseline.regularization_loss()<EOL>if regularization_loss is not None:<EOL><INDENT>loss += regularization_loss<EOL><DEDENT>return loss<EOL>", "docstring": "Creates the TensorFlow operations for calculating the baseline loss of a batch.\n\nArgs:\n    states: Dict of state tensors.\n    internals: List of prior internal state tensors.\n    reward: Reward tensor.\n    update: Boolean tensor indicating whether this call happens during an update.\n    reference: Optional reference tensor(s), in case of a comparative loss.\n\nReturns:\n    Loss tensor.", "id": "f14283:c0:m5"}
{"signature": "def tf_import_demo_experience(self, states, internals, actions, terminal, reward):", "body": "return self.demo_memory.store(<EOL>states=states,<EOL>internals=internals,<EOL>actions=actions,<EOL>terminal=terminal,<EOL>reward=reward<EOL>)<EOL>", "docstring": "Imports a single experience to memory.", "id": "f14284:c0:m3"}
{"signature": "def target_optimizer_arguments(self):", "body": "variables = self.target_network.get_variables() + [<EOL>variable for name in sorted(self.target_distributions)<EOL>for variable in self.target_distributions[name].get_variables()<EOL>]<EOL>source_variables = self.network.get_variables() + [<EOL>variable for name in sorted(self.distributions)<EOL>for variable in self.distributions[name].get_variables()<EOL>]<EOL>arguments = dict(<EOL>time=self.global_timestep,<EOL>variables=variables,<EOL>source_variables=source_variables<EOL>)<EOL>if self.global_model is not None:<EOL><INDENT>arguments['<STR_LIT>'] = self.global_model.target_network.get_variables() + [<EOL>variable for name in sorted(self.global_model.target_distributions)<EOL>for variable in self.global_model.target_distributions[name].get_variables()<EOL>]<EOL><DEDENT>return arguments<EOL>", "docstring": "Returns the target optimizer arguments including the time, the list of variables to  \noptimize, and various functions which the optimizer might require to perform an update  \nstep.\n\nReturns:\n    Target optimizer arguments as dict.", "id": "f14285:c0:m6"}
{"signature": "def __init__(<EOL>self,<EOL>states,<EOL>actions,<EOL>batched_observe=True,<EOL>batching_capacity=<NUM_LIT:1000>,<EOL>scope='<STR_LIT>',<EOL>device=None,<EOL>saver=None,<EOL>summarizer=None,<EOL>execution=None,<EOL>):", "body": "self.scope = scope<EOL>self.device = device<EOL>self.saver = saver<EOL>self.summarizer = summarizer<EOL>self.execution = sanity_check_execution_spec(execution)<EOL>super(RandomAgent, self).__init__(<EOL>states=states,<EOL>actions=actions,<EOL>batched_observe=batched_observe,<EOL>batching_capacity=batching_capacity<EOL>)<EOL>", "docstring": "Initializes the random agent.\n\nArgs:\n    scope (str): TensorFlow scope (default: name of agent).\n    device: TensorFlow device (default: none)\n    saver (spec): Saver specification, with the following attributes (default: none):\n        - directory: model directory.\n        - file: model filename (optional).\n        - seconds or steps: save frequency (default: 600 seconds).\n        - load: specifies whether model is loaded, if existent (default: true).\n        - basename: optional file basename (default: 'model.ckpt').\n    summarizer (spec): Summarizer specification, with the following attributes (default:\n        none):\n        - directory: summaries directory.\n        - seconds or steps: summarize frequency (default: 120 seconds).\n        - labels: list of summary labels to record (default: []).\n        - meta_param_recorder_class: ???.\n    execution (spec): Execution specification (see sanity_check_execution_spec for details).", "id": "f14286:c0:m0"}
{"signature": "def __init__(<EOL>self,<EOL>states,<EOL>actions,<EOL>network,<EOL>batched_observe=True,<EOL>batching_capacity=<NUM_LIT:1000>,<EOL>scope='<STR_LIT>',<EOL>device=None,<EOL>saver=None,<EOL>summarizer=None,<EOL>execution=None,<EOL>variable_noise=None,<EOL>states_preprocessing=None,<EOL>actions_exploration=None,<EOL>reward_preprocessing=None,<EOL>update_mode=None,<EOL>memory=None,<EOL>optimizer=None,<EOL>discount=<NUM_LIT>,<EOL>distributions=None,<EOL>entropy_regularization=None,<EOL>target_sync_frequency=<NUM_LIT>,<EOL>target_update_weight=<NUM_LIT:1.0>,<EOL>double_q_model=False,<EOL>huber_loss=None<EOL>):", "body": "<EOL>if update_mode is None:<EOL><INDENT>update_mode = dict(<EOL>unit='<STR_LIT>',<EOL>batch_size=<NUM_LIT:10><EOL>)<EOL><DEDENT>elif '<STR_LIT>' in update_mode:<EOL><INDENT>assert update_mode['<STR_LIT>'] == '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>update_mode['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if memory is None:<EOL><INDENT>memory = dict(<EOL>type='<STR_LIT>',<EOL>include_next_states=True,<EOL>capacity=(<NUM_LIT:1000> * update_mode['<STR_LIT>'])<EOL>)<EOL><DEDENT>else:<EOL><INDENT>assert memory['<STR_LIT>']<EOL><DEDENT>if optimizer is None:<EOL><INDENT>optimizer = dict(<EOL>type='<STR_LIT>',<EOL>learning_rate=<NUM_LIT><EOL>)<EOL><DEDENT>self.target_sync_frequency = target_sync_frequency<EOL>self.target_update_weight = target_update_weight<EOL>self.double_q_model = double_q_model<EOL>self.huber_loss = huber_loss<EOL>super(DQNNstepAgent, self).__init__(<EOL>states=states,<EOL>actions=actions,<EOL>batched_observe=batched_observe,<EOL>batching_capacity=batching_capacity,<EOL>scope=scope,<EOL>device=device,<EOL>saver=saver,<EOL>summarizer=summarizer,<EOL>execution=execution,<EOL>variable_noise=variable_noise,<EOL>states_preprocessing=states_preprocessing,<EOL>actions_exploration=actions_exploration,<EOL>reward_preprocessing=reward_preprocessing,<EOL>update_mode=update_mode,<EOL>memory=memory,<EOL>optimizer=optimizer,<EOL>discount=discount,<EOL>network=network,<EOL>distributions=distributions,<EOL>entropy_regularization=entropy_regularization<EOL>)<EOL>", "docstring": "Initializes the DQN n-step agent.\n\nArgs:\n    update_mode (spec): Update mode specification, with the following attributes:\n        - unit: 'episodes' if given (default: 'episodes').\n        - batch_size: integer (default: 10).\n        - frequency: integer (default: batch_size).\n    memory (spec): Memory specification, see core.memories module for more information\n        (default: {type='latest', include_next_states=true, capacity=1000*batch_size}).\n    optimizer (spec): Optimizer specification, see core.optimizers module for more\n        information (default: {type='adam', learning_rate=1e-3}).\n    target_sync_frequency (int): Target network sync frequency (default: 10000).\n    target_update_weight (float): Target network update weight (default: 1.0).\n    double_q_model (bool): Specifies whether double DQN mode is used (default: false).\n    huber_loss (float): Huber loss clipping (default: none).", "id": "f14289:c0:m0"}
{"signature": "def observe(self, terminal, reward, index=<NUM_LIT:0>):", "body": "self.current_terminal = terminal<EOL>self.current_reward = reward<EOL>if self.batched_observe:<EOL><INDENT>self.observe_terminal[index].append(self.current_terminal)<EOL>self.observe_reward[index].append(self.current_reward)<EOL>if self.current_terminal or len(self.observe_terminal[index]) >= self.batching_capacity:<EOL><INDENT>self.episode = self.model.observe(<EOL>terminal=self.observe_terminal[index],<EOL>reward=self.observe_reward[index],<EOL>index=index<EOL>)<EOL>self.observe_terminal[index] = list()<EOL>self.observe_reward[index] = list()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.episode = self.model.observe(<EOL>terminal=self.current_terminal,<EOL>reward=self.current_reward<EOL>)<EOL><DEDENT>", "docstring": "Observe experience from the environment to learn from. Optionally pre-processes rewards\nChild classes should call super to get the processed reward\nEX: terminal, reward = super()...\n\nArgs:\n    terminal (bool): boolean indicating if the episode terminated after the observation.\n    reward (float): scalar reward that resulted from executing the action.", "id": "f14290:c0:m6"}
{"signature": "def act(self, states, deterministic=False, independent=False, fetch_tensors=None, buffered=True, index=<NUM_LIT:0>):", "body": "self.current_internals = self.next_internals<EOL>if self.unique_state:<EOL><INDENT>self.current_states = dict(state=np.asarray(states))<EOL><DEDENT>else:<EOL><INDENT>self.current_states = {name: np.asarray(states[name]) for name in sorted(states)}<EOL><DEDENT>if fetch_tensors is not None:<EOL><INDENT>self.current_actions, self.next_internals, self.timestep, self.fetched_tensors = self.model.act(<EOL>states=self.current_states,<EOL>internals=self.current_internals,<EOL>deterministic=deterministic,<EOL>independent=independent,<EOL>fetch_tensors=fetch_tensors,<EOL>index=index<EOL>)<EOL>if self.unique_action:<EOL><INDENT>return self.current_actions['<STR_LIT:action>'], self.fetched_tensors<EOL><DEDENT>else:<EOL><INDENT>return self.current_actions, self.fetched_tensors<EOL><DEDENT><DEDENT>self.current_actions, self.next_internals, self.timestep = self.model.act(<EOL>states=self.current_states,<EOL>internals=self.current_internals,<EOL>deterministic=deterministic,<EOL>independent=independent,<EOL>index=index<EOL>)<EOL>if buffered:<EOL><INDENT>if self.unique_action:<EOL><INDENT>return self.current_actions['<STR_LIT:action>']<EOL><DEDENT>else:<EOL><INDENT>return self.current_actions<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self.unique_action:<EOL><INDENT>return self.current_actions['<STR_LIT:action>'], self.current_states, self.current_internals<EOL><DEDENT>else:<EOL><INDENT>return self.current_actions, self.current_states, self.current_internals<EOL><DEDENT><DEDENT>", "docstring": "Return action(s) for given state(s). States preprocessing and exploration are applied if\nconfigured accordingly.\n\nArgs:\n    states (any): One state (usually a value tuple) or dict of states if multiple states are expected.\n    deterministic (bool): If true, no exploration and sampling is applied.\n    independent (bool): If true, action is not followed by observe (and hence not included\n        in updates).\n    fetch_tensors (list): Optional String of named tensors to fetch\n    buffered (bool): If true (default), states and internals are not returned but buffered\n        with observes. Must be false for multi-threaded mode as we need atomic inserts.\nReturns:\n    Scalar value of the action or dict of multiple actions the agent wants to execute.\n    (fetched_tensors) Optional dict() with named tensors fetched", "id": "f14290:c0:m5"}
{"signature": "def restore_model(self, directory=None, file=None):", "body": "self.model.restore(directory=directory, file=file)<EOL>", "docstring": "Restore TensorFlow model. If no checkpoint file is given, the latest checkpoint is\nrestored. If no checkpoint directory is given, the model's default saver directory is\nused (unless file specifies the entire path).\n\nArgs:\n    directory: Optional checkpoint directory.\n    file: Optional checkpoint file, or path if directory not given.", "id": "f14290:c0:m11"}
{"signature": "def __init__(<EOL>self,<EOL>states,<EOL>actions,<EOL>network,<EOL>batched_observe=True,<EOL>batching_capacity=<NUM_LIT:1000>,<EOL>scope='<STR_LIT>',<EOL>device=None,<EOL>saver=None,<EOL>summarizer=None,<EOL>execution=None,<EOL>variable_noise=None,<EOL>states_preprocessing=None,<EOL>actions_exploration=None,<EOL>reward_preprocessing=None,<EOL>update_mode=None,<EOL>memory=None,<EOL>optimizer=None,<EOL>discount=<NUM_LIT>,<EOL>distributions=None,<EOL>entropy_regularization=None,<EOL>baseline_mode=None,<EOL>baseline=None,<EOL>baseline_optimizer=None,<EOL>gae_lambda=None<EOL>):", "body": "<EOL>if update_mode is None:<EOL><INDENT>update_mode = dict(<EOL>unit='<STR_LIT>',<EOL>batch_size=<NUM_LIT:10><EOL>)<EOL><DEDENT>elif '<STR_LIT>' in update_mode:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>update_mode['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if memory is None:<EOL><INDENT>memory = dict(<EOL>type='<STR_LIT>',<EOL>include_next_states=False,<EOL>capacity=(<NUM_LIT:1000> * update_mode['<STR_LIT>'])<EOL>)<EOL><DEDENT>else:<EOL><INDENT>assert not memory['<STR_LIT>']<EOL><DEDENT>if optimizer is None:<EOL><INDENT>optimizer = dict(<EOL>type='<STR_LIT>',<EOL>learning_rate=<NUM_LIT><EOL>)<EOL><DEDENT>self.baseline_mode = baseline_mode<EOL>self.baseline = baseline<EOL>self.baseline_optimizer = baseline_optimizer<EOL>self.gae_lambda = gae_lambda<EOL>super(VPGAgent, self).__init__(<EOL>states=states,<EOL>actions=actions,<EOL>batched_observe=batched_observe,<EOL>batching_capacity=batching_capacity,<EOL>scope=scope,<EOL>device=device,<EOL>saver=saver,<EOL>summarizer=summarizer,<EOL>execution=execution,<EOL>variable_noise=variable_noise,<EOL>states_preprocessing=states_preprocessing,<EOL>actions_exploration=actions_exploration,<EOL>reward_preprocessing=reward_preprocessing,<EOL>update_mode=update_mode,<EOL>memory=memory,<EOL>optimizer=optimizer,<EOL>discount=discount,<EOL>network=network,<EOL>distributions=distributions,<EOL>entropy_regularization=entropy_regularization<EOL>)<EOL>", "docstring": "Initializes the VPG agent.\n\nArgs:\n    update_mode (spec): Update mode specification, with the following attributes:\n        - unit: 'episodes' if given (default: 'episodes').\n        - batch_size: integer (default: 10).\n        - frequency: integer (default: batch_size).\n    memory (spec): Memory specification, see core.memories module for more information\n        (default: {type='latest', include_next_states=false, capacity=1000*batch_size}).\n    optimizer (spec): Optimizer specification, see core.optimizers module for more\n        information (default: {type='adam', learning_rate=1e-3}).\n    baseline_mode (str): One of 'states', 'network' (default: none).\n    baseline (spec): Baseline specification, see core.baselines module for more information\n        (default: none).\n    baseline_optimizer (spec): Baseline optimizer specification, see core.optimizers module\n        for more information (default: none).\n    gae_lambda (float): Lambda factor for generalized advantage estimation (default: none).", "id": "f14292:c0:m0"}
{"signature": "def __init__(<EOL>self,<EOL>states,<EOL>actions,<EOL>network,<EOL>batched_observe=True,<EOL>batching_capacity=<NUM_LIT:1000>,<EOL>scope='<STR_LIT>',<EOL>device=None,<EOL>saver=None,<EOL>summarizer=None,<EOL>execution=None,<EOL>variable_noise=None,<EOL>states_preprocessing=None,<EOL>actions_exploration=None,<EOL>reward_preprocessing=None,<EOL>update_mode=None,<EOL>memory=None,<EOL>discount=<NUM_LIT>,<EOL>distributions=None,<EOL>entropy_regularization=None,<EOL>baseline_mode=None,<EOL>baseline=None,<EOL>baseline_optimizer=None,<EOL>gae_lambda=None,<EOL>likelihood_ratio_clipping=<NUM_LIT>,<EOL>step_optimizer=None,<EOL>subsampling_fraction=<NUM_LIT:0.1>,<EOL>optimization_steps=<NUM_LIT:50><EOL>):", "body": "<EOL>if update_mode is None:<EOL><INDENT>update_mode = dict(<EOL>unit='<STR_LIT>',<EOL>batch_size=<NUM_LIT:10><EOL>)<EOL><DEDENT>elif '<STR_LIT>' in update_mode:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>update_mode['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if memory is None:<EOL><INDENT>memory = dict(<EOL>type='<STR_LIT>',<EOL>include_next_states=False,<EOL>capacity=(<NUM_LIT:1000> * update_mode['<STR_LIT>'])<EOL>)<EOL><DEDENT>else:<EOL><INDENT>assert not memory['<STR_LIT>']<EOL><DEDENT>assert (update_mode['<STR_LIT>'] != '<STR_LIT>' or memory['<STR_LIT:type>'] == '<STR_LIT>')<EOL>if step_optimizer is None:<EOL><INDENT>step_optimizer = dict(<EOL>type='<STR_LIT>',<EOL>learning_rate=<NUM_LIT><EOL>)<EOL><DEDENT>optimizer = dict(<EOL>type='<STR_LIT>',<EOL>optimizer=dict(<EOL>type='<STR_LIT>',<EOL>optimizer=step_optimizer,<EOL>fraction=subsampling_fraction<EOL>),<EOL>num_steps=optimization_steps<EOL>)<EOL>self.baseline_mode = baseline_mode<EOL>self.baseline = baseline<EOL>self.baseline_optimizer = baseline_optimizer<EOL>self.gae_lambda = gae_lambda<EOL>self.likelihood_ratio_clipping = likelihood_ratio_clipping<EOL>super(PPOAgent, self).__init__(<EOL>states=states,<EOL>actions=actions,<EOL>batched_observe=batched_observe,<EOL>batching_capacity=batching_capacity,<EOL>scope=scope,<EOL>device=device,<EOL>saver=saver,<EOL>summarizer=summarizer,<EOL>execution=execution,<EOL>variable_noise=variable_noise,<EOL>states_preprocessing=states_preprocessing,<EOL>actions_exploration=actions_exploration,<EOL>reward_preprocessing=reward_preprocessing,<EOL>update_mode=update_mode,<EOL>memory=memory,<EOL>optimizer=optimizer,<EOL>discount=discount,<EOL>network=network,<EOL>distributions=distributions,<EOL>entropy_regularization=entropy_regularization<EOL>)<EOL>", "docstring": "Initializes the PPO agent.\n\nArgs:\n    update_mode (spec): Update mode specification, with the following attributes:\n        - unit: 'episodes' if given (default: 'episodes').\n        - batch_size: integer (default: 10).\n        - frequency: integer (default: batch_size).\n    memory (spec): Memory specification, see core.memories module for more information\n        (default: {type='latest', include_next_states=false, capacity=1000*batch_size}).\n    optimizer (spec): PPO agent implicitly defines a multi-step subsampling optimizer.\n    baseline_mode (str): One of 'states', 'network' (default: none).\n    baseline (spec): Baseline specification, see core.baselines module for more information\n        (default: none).\n    baseline_optimizer (spec): Baseline optimizer specification, see core.optimizers module\n        for more information (default: none).\n    gae_lambda (float): Lambda factor for generalized advantage estimation (default: none).\n    likelihood_ratio_clipping (float): Likelihood ratio clipping for policy gradient\n        (default: 0.2).\n    step_optimizer (spec): Step optimizer specification of implicit multi-step subsampling\n        optimizer, see core.optimizers module for more information (default: {type='adam',\n        learning_rate=1e-3}).\n    subsampling_fraction (float): Subsampling fraction of implicit subsampling optimizer\n        (default: 0.1).\n    optimization_steps (int): Number of optimization steps for implicit multi-step\n        optimizer (default: 50).", "id": "f14293:c0:m0"}
{"signature": "def __init__(<EOL>self,<EOL>states,<EOL>actions,<EOL>network,<EOL>batched_observe=True,<EOL>batching_capacity=<NUM_LIT:1000>,<EOL>scope='<STR_LIT>',<EOL>device=None,<EOL>saver=None,<EOL>summarizer=None,<EOL>execution=None,<EOL>variable_noise=None,<EOL>states_preprocessing=None,<EOL>actions_exploration=None,<EOL>reward_preprocessing=None,<EOL>update_mode=None,<EOL>memory=None,<EOL>discount=<NUM_LIT>,<EOL>distributions=None,<EOL>entropy_regularization=None,<EOL>baseline_mode=None,<EOL>baseline=None,<EOL>baseline_optimizer=None,<EOL>gae_lambda=None,<EOL>likelihood_ratio_clipping=None,<EOL>learning_rate=<NUM_LIT>,<EOL>cg_max_iterations=<NUM_LIT:10>,<EOL>cg_damping=<NUM_LIT>,<EOL>cg_unroll_loop=True,<EOL>ls_max_iterations=<NUM_LIT:10>,<EOL>ls_accept_ratio=<NUM_LIT>,<EOL>ls_unroll_loop=False<EOL>):", "body": "<EOL>if update_mode is None:<EOL><INDENT>update_mode = dict(<EOL>unit='<STR_LIT>',<EOL>batch_size=<NUM_LIT:10><EOL>)<EOL><DEDENT>elif '<STR_LIT>' in update_mode:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>update_mode['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if memory is None:<EOL><INDENT>memory = dict(<EOL>type='<STR_LIT>',<EOL>include_next_states=False,<EOL>capacity=(<NUM_LIT:1000> * update_mode['<STR_LIT>'])<EOL>)<EOL><DEDENT>else:<EOL><INDENT>assert not memory['<STR_LIT>']<EOL><DEDENT>assert (update_mode['<STR_LIT>'] != '<STR_LIT>' or memory['<STR_LIT:type>'] == '<STR_LIT>')<EOL>optimizer = dict(<EOL>type='<STR_LIT>',<EOL>optimizer=dict(<EOL>type='<STR_LIT>',<EOL>learning_rate=learning_rate,<EOL>cg_max_iterations=cg_max_iterations,<EOL>cg_damping=cg_damping,<EOL>cg_unroll_loop=cg_unroll_loop,<EOL>),<EOL>ls_max_iterations=ls_max_iterations,<EOL>ls_accept_ratio=ls_accept_ratio,<EOL>ls_mode='<STR_LIT>',  <EOL>ls_parameter=<NUM_LIT:0.5>,  <EOL>ls_unroll_loop=ls_unroll_loop<EOL>)<EOL>self.baseline_mode = baseline_mode<EOL>self.baseline = baseline<EOL>self.baseline_optimizer = baseline_optimizer<EOL>self.gae_lambda = gae_lambda<EOL>self.likelihood_ratio_clipping = likelihood_ratio_clipping<EOL>super(TRPOAgent, self).__init__(<EOL>states=states,<EOL>actions=actions,<EOL>batched_observe=batched_observe,<EOL>batching_capacity=batching_capacity,<EOL>scope=scope,<EOL>device=device,<EOL>saver=saver,<EOL>summarizer=summarizer,<EOL>execution=execution,<EOL>variable_noise=variable_noise,<EOL>states_preprocessing=states_preprocessing,<EOL>actions_exploration=actions_exploration,<EOL>reward_preprocessing=reward_preprocessing,<EOL>update_mode=update_mode,<EOL>memory=memory,<EOL>optimizer=optimizer,<EOL>discount=discount,<EOL>network=network,<EOL>distributions=distributions,<EOL>entropy_regularization=entropy_regularization<EOL>)<EOL>", "docstring": "Initializes the TRPO agent.\n\nArgs:\n    update_mode (spec): Update mode specification, with the following attributes:\n        - unit: 'episodes' if given (default: 'episodes').\n        - batch_size: integer (default: 10).\n        - frequency: integer (default: batch_size).\n    memory (spec): Memory specification, see core.memories module for more information\n        (default: {type='latest', include_next_states=false, capacity=1000*batch_size}).\n    optimizer (spec): TRPO agent implicitly defines a optimized-step natural-gradient\n        optimizer.\n    baseline_mode (str): One of 'states', 'network' (default: none).\n    baseline (spec): Baseline specification, see core.baselines module for more information\n        (default: none).\n    baseline_optimizer (spec): Baseline optimizer specification, see core.optimizers module\n        for more information (default: none).\n    gae_lambda (float): Lambda factor for generalized advantage estimation (default: none).\n    likelihood_ratio_clipping (float): Likelihood ratio clipping for policy gradient\n        (default: none).\n    learning_rate (float): Learning rate of natural-gradient optimizer (default: 1e-3).\n    cg_max_iterations (int): Conjugate-gradient max iterations (default: 20).\n    cg_damping (float): Conjugate-gradient damping (default: 1e-3).\n    cg_unroll_loop (bool): Conjugate-gradient unroll loop (default: false).\n    ls_max_iterations (int): Line-search max iterations (default: 10).\n    ls_accept_ratio (float): Line-search accept ratio (default: 0.9).\n    ls_unroll_loop (bool): Line-search unroll loop (default: false).", "id": "f14296:c0:m0"}
{"signature": "def __init__(<EOL>self,<EOL>sigma=<NUM_LIT>,<EOL>mu=<NUM_LIT:0.0>,<EOL>theta=<NUM_LIT>,<EOL>scope='<STR_LIT>',<EOL>summary_labels=()<EOL>):", "body": "self.sigma = sigma<EOL>self.mu = float(mu)  <EOL>self.theta = theta<EOL>super(OrnsteinUhlenbeckProcess, self).__init__(scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Initializes an Ornstein-Uhlenbeck process which is a mean reverting stochastic process\nintroducing time-correlated noise.", "id": "f14304:c0:m0"}
{"signature": "def internals_spec(self):", "body": "return dict()<EOL>", "docstring": "Returns the internal states specification.\n\nReturns:\n    Internal states specification", "id": "f14307:c0:m3"}
{"signature": "def __init__(<EOL>self,<EOL>name,<EOL>named_tensors=None,<EOL>scope='<STR_LIT>',<EOL>summary_labels=()<EOL>):", "body": "self.name = name<EOL>super(Output, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Output layer.\n\nArgs:\n    output: A string that names the tensor, will be added to available inputs", "id": "f14307:c2:m0"}
{"signature": "def tf_regularization_loss(self):", "body": "return None<EOL>", "docstring": "Creates the TensorFlow operations for the layer regularization loss.\n\nReturns:\n    Regularization loss tensor.", "id": "f14307:c0:m2"}
{"signature": "def __init__(self, layer, named_tensors=None, scope='<STR_LIT>', summary_labels=(), **kwargs):", "body": "self.layer_spec = layer<EOL>self.layer = util.get_object(obj=layer, predefined_objects=TFLayer.tf_layers, kwargs=kwargs)<EOL>self.first_scope = None<EOL>super(TFLayer, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Creates a new layer instance of a TensorFlow layer.\n\nArgs:\n    name: The name of the layer, one of 'dense'.\n    **kwargs: Additional arguments passed on to the TensorFlow layer constructor.", "id": "f14307:c3:m0"}
{"signature": "def __init__(self, named_tensors=None, scope='<STR_LIT>', summary_labels=None):", "body": "self.scope = scope<EOL>self.summary_labels = set(summary_labels or ())<EOL>self.named_tensors = named_tensors<EOL>self.variables = dict()<EOL>self.all_variables = dict()<EOL>def custom_getter(getter, name, registered=False, **kwargs):<EOL><INDENT>variable = getter(name=name, registered=True, **kwargs)<EOL>if registered:<EOL><INDENT>pass<EOL><DEDENT>elif name in self.all_variables:<EOL><INDENT>assert variable is self.all_variables[name]<EOL>if kwargs.get('<STR_LIT>', True):<EOL><INDENT>assert variable is self.variables[name]<EOL>if '<STR_LIT>' in self.summary_labels:<EOL><INDENT>tf.contrib.summary.histogram(name=name, tensor=variable)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.all_variables[name] = variable<EOL>if kwargs.get('<STR_LIT>', True):<EOL><INDENT>self.variables[name] = variable<EOL>if '<STR_LIT>' in self.summary_labels:<EOL><INDENT>tf.contrib.summary.histogram(name=name, tensor=variable)<EOL><DEDENT><DEDENT><DEDENT>return variable<EOL><DEDENT>self.apply = tf.make_template(<EOL>name_=(scope + '<STR_LIT>'),<EOL>func_=self.tf_apply,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.regularization_loss = tf.make_template(<EOL>name_=(scope + '<STR_LIT>'),<EOL>func_=self.tf_regularization_loss,<EOL>custom_getter_=custom_getter<EOL>)<EOL>", "docstring": "Layer.", "id": "f14307:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def from_spec(spec, kwargs=None):<DEDENT>", "body": "layer = util.get_object(<EOL>obj=spec,<EOL>predefined_objects=tensorforce.core.networks.layers,<EOL>kwargs=kwargs<EOL>)<EOL>assert isinstance(layer, Layer)<EOL>return layer<EOL>", "docstring": "Creates a layer from a specification dict.", "id": "f14307:c0:m5"}
{"signature": "def __init__(self, size, dropout=None, named_tensors=None, scope='<STR_LIT>', summary_labels=(), return_final_state=True):", "body": "self.size = size<EOL>self.dropout = dropout<EOL>self.return_final_state = return_final_state<EOL>super(Lstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "LSTM layer.\n\nArgs:\n    size: LSTM size.\n    dropout: Dropout rate.", "id": "f14307:c15:m0"}
{"signature": "def __init__(<EOL>self,<EOL>size,<EOL>bias=False,<EOL>activation='<STR_LIT:none>',<EOL>l2_regularization=<NUM_LIT:0.0>,<EOL>l1_regularization=<NUM_LIT:0.0>,<EOL>output=None,<EOL>named_tensors=None,<EOL>scope='<STR_LIT>',<EOL>summary_labels=()<EOL>):", "body": "<EOL>self.expectation_layer = Linear(<EOL>size=<NUM_LIT:1>, bias=bias,<EOL>l2_regularization=l2_regularization,<EOL>l1_regularization=l1_regularization,<EOL>summary_labels=summary_labels,<EOL>)<EOL>self.advantage_layer = Linear(<EOL>size=size,<EOL>bias=bias,<EOL>l2_regularization=l2_regularization,<EOL>l1_regularization=l1_regularization,<EOL>summary_labels=summary_labels,<EOL>)<EOL>self.output = output<EOL>self.nonlinearity = Nonlinearity(summary_labels=summary_labels, **util.prepare_kwargs(activation))<EOL>super(Dueling, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Dueling layer.\n\n[Dueling Networks] (https://arxiv.org/pdf/1511.06581.pdf)\nImplement Y = Expectation[x] + (Advantage[x] - Mean(Advantage[x]))\n\nArgs:\n    size: Layer size.\n    bias: If true, bias is added.\n    activation: Type of nonlinearity, or dict with name & arguments\n    l2_regularization: L2 regularization weight.\n    l1_regularization: L1 regularization weight.\n    output: None or tuple of output names for ('expectation','advantage','mean_advantage')", "id": "f14307:c11:m0"}
{"signature": "def __init__(<EOL>self,<EOL>indices,<EOL>size,<EOL>l2_regularization=<NUM_LIT:0.0>,<EOL>l1_regularization=<NUM_LIT:0.0>,<EOL>named_tensors=None,<EOL>scope='<STR_LIT>',<EOL>summary_labels=()<EOL>):", "body": "self.indices = indices<EOL>self.size = size<EOL>self.l2_regularization = l2_regularization<EOL>self.l1_regularization = l1_regularization<EOL>super(Embedding, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Embedding layer.\n\nArgs:\n    indices: Number of embedding indices.\n    size: Embedding size.\n    l2_regularization: L2 regularization weight.\n    l1_regularization: L1 regularization weight.", "id": "f14307:c8:m0"}
{"signature": "def __init__(self,<EOL>name='<STR_LIT:relu>',<EOL>alpha=None,<EOL>beta=<NUM_LIT:1.0>,<EOL>max=None,<EOL>min=None,<EOL>named_tensors=None,<EOL>scope='<STR_LIT>',<EOL>summary_labels=()<EOL>):", "body": "self.name = name<EOL>self.alpha = None<EOL>self.max = None<EOL>self.min = None<EOL>self.beta_learn = False<EOL>super(Nonlinearity, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)<EOL>if max is not None:<EOL><INDENT>self.max = float(max)<EOL><DEDENT>if min is not None:<EOL><INDENT>self.min = float(min)<EOL><DEDENT>if alpha is not None:<EOL><INDENT>self.alpha = float(alpha)<EOL><DEDENT>if beta == '<STR_LIT>':<EOL><INDENT>self.beta_learn = True<EOL>self.beta = None<EOL><DEDENT>else:<EOL><INDENT>self.beta = tf.constant(float(beta), dtype=util.tf_dtype('<STR_LIT:float>'))<EOL><DEDENT>", "docstring": "Non-linearity activation layer.\n\nArgs:\n    name: Non-linearity name, one of 'elu', 'relu', 'selu', 'sigmoid', 'swish',\n        'leaky_relu' (or 'lrelu'), 'crelu', 'softmax', 'softplus', 'softsign', 'tanh' or 'none'.\n    alpha: (float|int) Alpha value for leaky Relu\n    beta: (float|int|'learn') Beta value or 'learn' to train value (default 1.0)\n    max: (float|int) maximum (beta * input) value passed to non-linearity function\n    min: (float|int) minimum (beta * input) value passed to non-linearity function\n    summary_labels: Requested summary labels for tensorboard export, add 'beta' to watch beta learning", "id": "f14307:c4:m0"}
{"signature": "@staticmethod<EOL><INDENT>def from_spec(spec, kwargs=None):<DEDENT>", "body": "network = util.get_object(<EOL>obj=spec,<EOL>default_object=LayeredNetwork,<EOL>kwargs=kwargs<EOL>)<EOL>assert isinstance(network, Network)<EOL>return network<EOL>", "docstring": "Creates a network from a specification dict.", "id": "f14309:c0:m8"}
{"signature": "def get_variables(self, include_nontrainable=False):", "body": "if include_nontrainable:<EOL><INDENT>return [self.all_variables[key] for key in sorted(self.all_variables)]<EOL><DEDENT>else:<EOL><INDENT>return [self.variables[key] for key in sorted(self.variables)]<EOL><DEDENT>", "docstring": "Returns the TensorFlow variables used by the network.\n\nReturns:\n    List of variables", "id": "f14309:c0:m4"}
{"signature": "def internals_spec(self):", "body": "return dict()<EOL>", "docstring": "Returns the internal states specification.\n\nReturns:\n    Internal states specification", "id": "f14309:c0:m3"}
{"signature": "def tf_apply(self, x, internals, update, return_internals=False):", "body": "raise NotImplementedError<EOL>", "docstring": "Creates the TensorFlow operations for applying the network to the given input.\n\nArgs:\n    x: Network input tensor or dict of input tensors.\n    internals: List of prior internal state tensors\n    update: Boolean tensor indicating whether this call happens during an update.\n    return_internals: If true, also returns posterior internal state tensors\n\nReturns:\n    Network output tensor, plus optionally list of posterior internal state tensors", "id": "f14309:c0:m1"}
{"signature": "def reset(self):", "body": "fetches = []<EOL>for processor in self.preprocessors:<EOL><INDENT>fetches.extend(processor.reset() or [])<EOL><DEDENT>return fetches<EOL>", "docstring": "Calls `reset` on all our Preprocessor objects.\n\nReturns:\n    A list of tensors to be fetched.", "id": "f14313:c1:m1"}
{"signature": "@staticmethod<EOL><INDENT>def from_spec(spec, kwargs=None):<DEDENT>", "body": "if isinstance(spec, dict):<EOL><INDENT>spec = [spec]<EOL><DEDENT>stack = PreprocessorStack()<EOL>for preprocessor_spec in spec:<EOL><INDENT>preprocessor_kwargs = copy.deepcopy(kwargs)<EOL>preprocessor = util.get_object(<EOL>obj=preprocessor_spec,<EOL>predefined_objects=tensorforce.core.preprocessors.preprocessors,<EOL>kwargs=preprocessor_kwargs<EOL>)<EOL>assert isinstance(preprocessor, Preprocessor)<EOL>stack.preprocessors.append(preprocessor)<EOL><DEDENT>return stack<EOL>", "docstring": "Creates a preprocessing stack from a specification dict.", "id": "f14313:c1:m5"}
{"signature": "def processed_shape(self, shape):", "body": "return shape<EOL>", "docstring": "Shape of preprocessed state given original shape.\n\nArgs:\n    shape (tuple): The original (unprocessed) shape.\n\nReturns: The processed tensor shape.", "id": "f14313:c0:m3"}
{"signature": "def get_variables(self):", "body": "return [self.variables[key] for key in sorted(self.variables)]<EOL>", "docstring": "Returns the TensorFlow variables used by the preprocessor.\n\nReturns:\n    List of variables.", "id": "f14313:c0:m4"}
{"signature": "def __init__(self, conv_sizes, dense_sizes, scope='<STR_LIT>', summary_labels=()):", "body": "network = []<EOL>for size in conv_sizes:<EOL><INDENT>network.append(dict(type='<STR_LIT>', size=size))<EOL><DEDENT>network[<NUM_LIT:0>]['<STR_LIT>'] = <NUM_LIT:5><EOL>network.append(dict(type='<STR_LIT>'))  <EOL>for size in dense_sizes:<EOL><INDENT>network.append(dict(type='<STR_LIT>', size=size))<EOL><DEDENT>super(CNNBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "CNN baseline.\n\nArgs:\n    conv_sizes: List of convolutional layer sizes\n    dense_sizes: List of dense layer sizes", "id": "f14324:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def from_spec(spec, kwargs=None):<DEDENT>", "body": "baseline = util.get_object(<EOL>obj=spec,<EOL>predefined_objects=tensorforce.core.baselines.baselines,<EOL>kwargs=kwargs<EOL>)<EOL>assert isinstance(baseline, Baseline)<EOL>return baseline<EOL>", "docstring": "Creates a baseline from a specification dict.", "id": "f14325:c0:m6"}
{"signature": "def tf_reference(self, states, internals, reward, update):", "body": "return None<EOL>", "docstring": "Creates the TensorFlow operations for obtaining the reference tensor(s), in case of a\ncomparative loss.\n\nArgs:\n    states: Dict of state tensors.\n    internals: List of prior internal state tensors.\n    reward: Reward tensor.\n    update: Boolean tensor indicating whether this call happens during an update.\n\nReturns:\n    Reference tensor(s).", "id": "f14325:c0:m2"}
{"signature": "def __init__(self, sizes, scope='<STR_LIT>', summary_labels=()):", "body": "network = []<EOL>for size in sizes:<EOL><INDENT>network.append(dict(type='<STR_LIT>', size=size))<EOL><DEDENT>super(MLPBaseline, self).__init__(network=network, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Multi-layer perceptron baseline.\n\nArgs:\n    sizes: List of dense layer sizes", "id": "f14326:c0:m0"}
{"signature": "def __init__(self, shape, min_value, max_value, alpha=<NUM_LIT:0.0>, beta=<NUM_LIT:0.0>, scope='<STR_LIT>', summary_labels=()):", "body": "assert min_value is None or max_value > min_value<EOL>self.shape = shape<EOL>self.min_value = min_value<EOL>self.max_value = max_value<EOL>action_size = util.prod(self.shape)<EOL>self.alpha = Linear(size=action_size, bias=alpha, scope='<STR_LIT>', summary_labels=summary_labels)<EOL>self.beta = Linear(size=action_size, bias=beta, scope='<STR_LIT>', summary_labels=summary_labels)<EOL>super(Beta, self).__init__(shape=shape, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Beta distribution.\n\nArgs:\n    shape: Action shape.\n    min_value: Minimum value of continuous actions.\n    max_value: Maximum value of continuous actions.\n    alpha: Optional distribution bias for the alpha value.\n    beta: Optional distribution bias for the beta value.", "id": "f14329:c0:m0"}
{"signature": "def tf_kl_divergence(self, distr_params1, distr_params2):", "body": "raise NotImplementedError<EOL>", "docstring": "Creates the tensorFlow operations for calculating the KL divergence between two  \ndistributions.\n\nArgs:\n    distr_params1: tuple of parameter tensors for first distribution.\n    distr_params2: tuple of parameter tensors for second distribution.\n\nReturns:\n    KL divergence tensor.", "id": "f14332:c0:m5"}
{"signature": "def tf_entropy(self, distr_params):", "body": "raise NotImplementedError<EOL>", "docstring": "Creates the tensorFlow operations for calculating the entropy of a distribution.\n\nArgs:\n    distr_params: tuple of distribution parameter tensors.\n\nReturns:\n    Entropy tensor.", "id": "f14332:c0:m4"}
{"signature": "def tf_regularization_loss(self):", "body": "return None<EOL>", "docstring": "Creates the tensorFlow operations for the distribution regularization loss.\n\nReturns:\n    Regularization loss tensor.", "id": "f14332:c0:m6"}
{"signature": "def __init__(self, shape, scope='<STR_LIT>', summary_labels=None):", "body": "self.shape = shape<EOL>self.scope = scope<EOL>self.summary_labels = set(summary_labels or ())<EOL>self.variables = dict()<EOL>self.all_variables = dict()<EOL>def custom_getter(getter, name, registered=False, **kwargs):<EOL><INDENT>variable = getter(name=name, registered=True, **kwargs)<EOL>if registered:<EOL><INDENT>pass<EOL><DEDENT>elif name in self.all_variables:<EOL><INDENT>assert variable is self.all_variables[name]<EOL>if kwargs.get('<STR_LIT>', True):<EOL><INDENT>assert variable is self.variables[name]<EOL>if '<STR_LIT>' in self.summary_labels:<EOL><INDENT>tf.contrib.summary.histogram(name=name, tensor=variable)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.all_variables[name] = variable<EOL>if kwargs.get('<STR_LIT>', True):<EOL><INDENT>self.variables[name] = variable<EOL>if '<STR_LIT>' in self.summary_labels:<EOL><INDENT>tf.contrib.summary.histogram(name=name, tensor=variable)<EOL><DEDENT><DEDENT><DEDENT>return variable<EOL><DEDENT>self.parameterize = tf.make_template(<EOL>name_=(scope + '<STR_LIT>'),<EOL>func_=self.tf_parameterize,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.sample = tf.make_template(<EOL>name_=(scope + '<STR_LIT>'),<EOL>func_=self.tf_sample,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.log_probability = tf.make_template(<EOL>name_=(scope + '<STR_LIT>'),<EOL>func_=self.tf_log_probability,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.entropy = tf.make_template(<EOL>name_=(scope + '<STR_LIT>'),<EOL>func_=self.tf_entropy,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.kl_divergence = tf.make_template(<EOL>name_=(scope + '<STR_LIT>'),<EOL>func_=self.tf_kl_divergence,<EOL>custom_getter_=custom_getter<EOL>)<EOL>self.regularization_loss = tf.make_template(<EOL>name_=(scope + '<STR_LIT>'),<EOL>func_=self.tf_regularization_loss,<EOL>custom_getter_=custom_getter<EOL>)<EOL>", "docstring": "Distribution.\n\nArgs:\n    shape: Action shape.", "id": "f14332:c0:m0"}
{"signature": "def tf_parameterize(self, x):", "body": "raise NotImplementedError<EOL>", "docstring": "Creates the tensorFlow operations for parameterizing a distribution conditioned on the\ngiven input.\n\nArgs:\n    x: Input tensor which the distribution is conditioned on.\n\nReturns:\n    tuple of distribution parameter tensors.", "id": "f14332:c0:m1"}
{"signature": "def tf_sample(self, distr_params, deterministic):", "body": "raise NotImplementedError<EOL>", "docstring": "Creates the tensorFlow operations for sampling an action based on a distribution.\n\nArgs:\n    distr_params: tuple of distribution parameter tensors.\n    deterministic: Boolean input tensor indicating whether the maximum likelihood action\n        should be returned.\n\nReturns:\n    Sampled action tensor.", "id": "f14332:c0:m2"}
{"signature": "def get_variables(self):", "body": "return [self.variables[key] for key in sorted(self.variables)]<EOL>", "docstring": "Returns the TensorFlow variables used by the optimizer.\n\nReturns:\n    List of variables.", "id": "f14334:c0:m4"}
{"signature": "def apply_step(self, variables, deltas):", "body": "if len(variables) != len(deltas):<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\")<EOL><DEDENT>return tf.group(<EOL>*(tf.assign_add(ref=variable, value=delta) for variable, delta in zip(variables, deltas))<EOL>)<EOL>", "docstring": "Applies the given (and already calculated) step deltas to the variable values.\n\nArgs:\n    variables: List of variables.\n    deltas: List of deltas of same length.\n\nReturns:\n    The step-applied operation. A tf.group of tf.assign_add ops.", "id": "f14334:c0:m2"}
{"signature": "def tf_step(self, time, variables, **kwargs):", "body": "arguments = kwargs[\"<STR_LIT>\"]<EOL>fn_loss = kwargs[\"<STR_LIT>\"]<EOL>loss = fn_loss(**arguments)<EOL>with tf.control_dependencies(control_inputs=(loss,)):<EOL><INDENT>previous_variables = [variable + <NUM_LIT:0.0> for variable in variables]<EOL><DEDENT>with tf.control_dependencies(control_inputs=previous_variables):<EOL><INDENT>applied = self.tf_optimizer.minimize(loss=loss, var_list=variables)  <EOL><DEDENT>with tf.control_dependencies(control_inputs=(applied,)):<EOL><INDENT>return [<EOL>variable - previous_variable<EOL>for variable, previous_variable in zip(variables, previous_variables)<EOL>]<EOL><DEDENT>", "docstring": "Keyword Args:\n    arguments: Dict of arguments for passing to fn_loss as **kwargs.\n    fn_loss: A callable taking arguments as kwargs and returning the loss op of the current model.", "id": "f14336:c0:m1"}
{"signature": "def __init__(self, optimizer, scope=None, summary_labels=(), **kwargs):", "body": "self.tf_optimizer_type = optimizer<EOL>self.tf_optimizer = TFOptimizer.tf_optimizers[optimizer](**kwargs)<EOL>super(TFOptimizer, self).__init__(scope=(scope or optimizer), summary_labels=summary_labels)<EOL>", "docstring": "Creates a new optimizer instance of a TensorFlow optimizer.\n\nArgs:\n    optimizer: The name of the optimizer. Must be one of the keys of the tf_optimizers dict.\n    **kwargs: Arguments passed on to the TensorFlow optimizer constructor as **kwargs.", "id": "f14336:c0:m0"}
{"signature": "def __init__(self, optimizer, fraction=<NUM_LIT:0.1>, scope='<STR_LIT>', summary_labels=()):", "body": "assert isinstance(fraction, float) and fraction > <NUM_LIT:0.0><EOL>self.fraction = fraction<EOL>super(SubsamplingStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Creates a new subsampling-step meta optimizer instance.\n\nArgs:\n    optimizer: The optimizer which is modified by this meta optimizer.\n    fraction: The fraction of instances of the batch to subsample.", "id": "f14337:c0:m0"}
{"signature": "def __init__(self, learning_rate, num_samples=<NUM_LIT:1>, unroll_loop=False, scope='<STR_LIT>', summary_labels=()):", "body": "assert isinstance(learning_rate, float) and learning_rate > <NUM_LIT:0.0><EOL>self.learning_rate = learning_rate<EOL>assert isinstance(num_samples, int) and num_samples > <NUM_LIT:0><EOL>self.num_samples = num_samples<EOL>assert isinstance(unroll_loop, bool)<EOL>self.unroll_loop = unroll_loop<EOL>super(Evolutionary, self).__init__(scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Creates a new evolutionary optimizer instance.\n\nArgs:\n    learning_rate: Learning rate.\n    num_samples: Number of sampled perturbations.", "id": "f14339:c0:m0"}
{"signature": "def __init__(self, sync_frequency=<NUM_LIT:1>, update_weight=<NUM_LIT:1.0>, scope='<STR_LIT>', summary_labels=()):", "body": "assert isinstance(sync_frequency, int) and sync_frequency > <NUM_LIT:0><EOL>self.sync_frequency = sync_frequency<EOL>assert isinstance(update_weight, float) and update_weight > <NUM_LIT:0.0><EOL>self.update_weight = update_weight<EOL>super(Synchronization, self).__init__(scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Creates a new synchronization optimizer instance.\n\nArgs:\n    sync_frequency: The interval between optimization calls actually performing a  \n    synchronization step.\n    update_weight: The update weight, 1.0 meaning a full assignment of the source  \n    variables values.", "id": "f14340:c0:m0"}
{"signature": "def tf_step(self, time, variables, **kwargs):", "body": "deltas = self.optimizer.step(time=time, variables=variables, **kwargs)<EOL>with tf.control_dependencies(control_inputs=deltas):<EOL><INDENT>clipped_deltas = list()<EOL>exceeding_deltas = list()<EOL>for delta in deltas:<EOL><INDENT>clipped_delta = tf.clip_by_value(<EOL>t=delta,<EOL>clip_value_min=-self.clipping_value,<EOL>clip_value_max=self.clipping_value<EOL>)<EOL>clipped_deltas.append(clipped_delta)<EOL>exceeding_deltas.append(clipped_delta - delta)<EOL><DEDENT><DEDENT>applied = self.apply_step(variables=variables, deltas=exceeding_deltas)<EOL>with tf.control_dependencies(control_inputs=(applied,)):<EOL><INDENT>return [delta + <NUM_LIT:0.0> for delta in clipped_deltas]<EOL><DEDENT>", "docstring": "Creates the TensorFlow operations for performing an optimization step.\n\nArgs:\n    time: Time tensor.\n    variables: List of variables to optimize.\n    **kwargs: Additional arguments passed on to the internal optimizer.\n\nReturns:\n    List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14341:c0:m1"}
{"signature": "def __init__(self, optimizer, clipping_value, scope='<STR_LIT>', summary_labels=()):", "body": "assert isinstance(clipping_value, float) and clipping_value > <NUM_LIT:0.0><EOL>self.clipping_value = clipping_value<EOL>super(ClippedStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Creates a new multi-step meta optimizer instance.\n\nArgs:\n    optimizer: The optimizer which is modified by this meta optimizer.\n    clipping_value: Clip deltas at this value.", "id": "f14341:c0:m0"}
{"signature": "def tf_step(self, time, variables, **kwargs):", "body": "global_variables = kwargs[\"<STR_LIT>\"]<EOL>assert all(<EOL>util.shape(global_variable) == util.shape(local_variable)<EOL>for global_variable, local_variable in zip(global_variables, variables)<EOL>)<EOL>local_deltas = self.optimizer.step(time=time, variables=variables, **kwargs)<EOL>with tf.control_dependencies(control_inputs=local_deltas):<EOL><INDENT>applied = self.optimizer.apply_step(variables=global_variables, deltas=local_deltas)<EOL><DEDENT>with tf.control_dependencies(control_inputs=(applied,)):<EOL><INDENT>update_deltas = list()<EOL>for global_variable, local_variable in zip(global_variables, variables):<EOL><INDENT>delta = global_variable - local_variable<EOL>update_deltas.append(delta)<EOL><DEDENT>applied = self.apply_step(variables=variables, deltas=update_deltas)<EOL><DEDENT>with tf.control_dependencies(control_inputs=(applied,)):<EOL><INDENT>return [local_delta + update_delta for local_delta, update_delta in zip(local_deltas, update_deltas)]<EOL><DEDENT>", "docstring": "Keyword Args:\n    global_variables: List of global variables to apply the proposed optimization step to.\n\nReturns:\n    List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14342:c0:m1"}
{"signature": "def __init__(<EOL>self,<EOL>optimizer,<EOL>ls_max_iterations=<NUM_LIT:10>,<EOL>ls_accept_ratio=<NUM_LIT>,<EOL>ls_mode='<STR_LIT>',<EOL>ls_parameter=<NUM_LIT:0.5>,<EOL>ls_unroll_loop=False,<EOL>scope='<STR_LIT>',<EOL>summary_labels=()<EOL>):", "body": "self.solver = LineSearch(<EOL>max_iterations=ls_max_iterations,<EOL>accept_ratio=ls_accept_ratio,<EOL>mode=ls_mode,<EOL>parameter=ls_parameter,<EOL>unroll_loop=ls_unroll_loop<EOL>)<EOL>super(OptimizedStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Creates a new optimized step meta optimizer instance.\n\nArgs:\n    optimizer: The optimizer which is modified by this meta optimizer.\n    ls_max_iterations: Maximum number of line search iterations.\n    ls_accept_ratio: Line search acceptance ratio.\n    ls_mode: Line search mode, see LineSearch solver.\n    ls_parameter: Line search parameter, see LineSearch solver.\n    ls_unroll_loop: Unroll line search loop if true.", "id": "f14345:c0:m0"}
{"signature": "def minimize(self, time, variables, **kwargs):", "body": "loss = kwargs[\"<STR_LIT>\"]<EOL>sampled_loss = kwargs[\"<STR_LIT>\"]<EOL>min_op, _ = self.minimize_(loss, sampled_loss, var_list=variables)<EOL>return min_op<EOL>", "docstring": "Performs an optimization step.\n\nArgs:\n    time: Time tensor. Not used for this\n    variables: List of variables to optimize.\n    **kwargs: \n        fn_loss : loss function tensor that is differentiated\n        sampled_loss : the sampled loss from running the model.\n\nReturns:\n    The optimization operation.", "id": "f14347:c0:m17"}
{"signature": "def __init__(<EOL>self,<EOL>learning_rate=<NUM_LIT>,<EOL>momentum=<NUM_LIT>,<EOL>clip_kl=<NUM_LIT>,<EOL>kfac_update=<NUM_LIT:2>,<EOL>stats_accum_iter=<NUM_LIT>,<EOL>full_stats_init=False,<EOL>cold_iter=<NUM_LIT:100>,<EOL>cold_lr=None,<EOL>async_=False,<EOL>async_stats=False,<EOL>epsilon=<NUM_LIT>,<EOL>stats_decay=<NUM_LIT>,<EOL>blockdiag_bias=False,<EOL>channel_fac=False,<EOL>factored_damping=False,<EOL>approxT2=False,<EOL>use_float64=False,<EOL>weight_decay_dict={},<EOL>max_grad_norm=<NUM_LIT:0.5>,<EOL>scope='<STR_LIT>',<EOL>summary_labels=()<EOL>):", "body": "self.max_grad_norm = max_grad_norm<EOL>self._lr = learning_rate<EOL>self._momentum = momentum<EOL>self._clip_kl = clip_kl<EOL>self._channel_fac = channel_fac<EOL>self._kfac_update = kfac_update<EOL>self._async = async_<EOL>self._async_stats = async_stats<EOL>self._epsilon = epsilon<EOL>self._stats_decay = stats_decay<EOL>self._blockdiag_bias = blockdiag_bias<EOL>self._approxT2 = approxT2<EOL>self._use_float64 = use_float64<EOL>self._factored_damping = factored_damping<EOL>self._cold_iter = cold_iter<EOL>if cold_lr == None:<EOL><INDENT>self._cold_lr = self._lr<EOL><DEDENT>else:<EOL><INDENT>self._cold_lr = cold_lr<EOL><DEDENT>self._stats_accum_iter = stats_accum_iter<EOL>self._weight_decay_dict = weight_decay_dict<EOL>self._diag_init_coeff = <NUM_LIT:0.><EOL>self._full_stats_init = full_stats_init<EOL>if not self._full_stats_init:<EOL><INDENT>self._stats_accum_iter = self._cold_iter<EOL><DEDENT>self.sgd_step = tf.Variable(<NUM_LIT:0>, name='<STR_LIT>', trainable=False)<EOL>self.global_step = tf.Variable(<EOL><NUM_LIT:0>, name='<STR_LIT>', trainable=False)<EOL>self.cold_step = tf.Variable(<NUM_LIT:0>, name='<STR_LIT>', trainable=False)<EOL>self.factor_step = tf.Variable(<EOL><NUM_LIT:0>, name='<STR_LIT>', trainable=False)<EOL>self.stats_step = tf.Variable(<EOL><NUM_LIT:0>, name='<STR_LIT>', trainable=False)<EOL>self.vFv = tf.Variable(<NUM_LIT:0.>, name='<STR_LIT>', trainable=False)<EOL>self.factors = {}<EOL>self.param_vars = []<EOL>self.stats = {}<EOL>self.stats_eigen = {}<EOL>super(KFAC, self).__init__(scope=scope, summary_labels=summary_labels)<EOL>", "docstring": "Initializes a KFAC optimizer.\n\nFor more information on arguments, see the Kfac Optimization paper https://arxiv.org/pdf/1503.05671.pdf", "id": "f14347:c0:m0"}
{"signature": "def tf_step(self, time, variables, **kwargs):", "body": "fn_loss = kwargs[\"<STR_LIT>\"]<EOL>if variables is None:<EOL><INDENT>variables = tf.trainable_variables<EOL><DEDENT>return tf.gradients(fn_loss, variables)<EOL>", "docstring": "Creates the TensorFlow operations for performing an optimization step on the given variables, including\nactually changing the values of the variables.\n\nArgs:\n    time: Time tensor. Not used for this optimizer.\n    variables: List of variables to optimize.\n    **kwargs: \n        fn_loss : loss function tensor to differentiate.\n\nReturns:\n    List of delta tensors corresponding to the updates for each optimized variable.", "id": "f14347:c0:m15"}
{"signature": "def tf_solve(self, fn_x, x_init, base_value, target_value, estimated_improvement=None):", "body": "return super(LineSearch, self).tf_solve(fn_x, x_init, base_value, target_value, estimated_improvement)<EOL>", "docstring": "Iteratively optimizes $f(x)$ for $x$ on the line between $x'$ and $x_0$.\n\nArgs:\n    fn_x: A callable returning the value $f(x)$ at $x$.\n    x_init: Initial solution guess $x_0$.\n    base_value: Value $f(x')$ at $x = x'$.\n    target_value: Value $f(x_0)$ at $x = x_0$.\n    estimated_improvement: Estimated improvement for $x = x_0$, $f(x')$ if None.\n\nReturns:\n    A solution $x$ to the problem as given by the solver.", "id": "f14348:c0:m1"}
{"signature": "def tf_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement):", "body": "x, next_iteration, deltas, improvement, last_improvement, estimated_improvement = super(LineSearch, self).tf_step(<EOL>x, iteration, deltas, improvement, last_improvement, estimated_improvement<EOL>)<EOL>next_x = [t + delta for t, delta in zip(x, deltas)]<EOL>if self.mode == '<STR_LIT>':<EOL><INDENT>next_deltas = deltas<EOL>next_estimated_improvement = estimated_improvement + self.estimated_incr<EOL><DEDENT>elif self.mode == '<STR_LIT>':<EOL><INDENT>next_deltas = [delta * self.parameter for delta in deltas]<EOL>next_estimated_improvement = estimated_improvement * self.parameter<EOL><DEDENT>target_value = self.fn_x(next_deltas)<EOL>next_improvement = tf.divide(<EOL>x=(target_value - self.base_value),<EOL>y=tf.maximum(x=next_estimated_improvement, y=util.epsilon)<EOL>)<EOL>return next_x, next_iteration, next_deltas, next_improvement, improvement, next_estimated_improvement<EOL>", "docstring": "Iteration loop body of the line search algorithm.\n\nArgs:\n    x: Current solution estimate $x_t$.\n    iteration: Current iteration counter $t$.\n    deltas: Current difference $x_t - x'$.\n    improvement: Current improvement $(f(x_t) - f(x')) / v'$.\n    last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\n    estimated_improvement: Current estimated value $v'$.\n\nReturns:\n    Updated arguments for next iteration.", "id": "f14348:c0:m3"}
{"signature": "def tf_next_step(self, x, iteration, deltas, improvement, last_improvement, estimated_improvement):", "body": "next_step = super(LineSearch, self).tf_next_step(<EOL>x, iteration, deltas, improvement, last_improvement, estimated_improvement<EOL>)<EOL>def undo_deltas():<EOL><INDENT>value = self.fn_x([-delta for delta in deltas])<EOL>with tf.control_dependencies(control_inputs=(value,)):<EOL><INDENT>return tf.less(x=value, y=value)  <EOL><DEDENT><DEDENT>improved = tf.cond(<EOL>pred=(improvement > last_improvement),<EOL>true_fn=(lambda: True),<EOL>false_fn=undo_deltas<EOL>)<EOL>next_step = tf.logical_and(x=next_step, y=improved)<EOL>next_step = tf.logical_and(x=next_step, y=(improvement < self.accept_ratio))<EOL>return tf.logical_and(x=next_step, y=(estimated_improvement > util.epsilon))<EOL>", "docstring": "Termination condition: max number of iterations, or no improvement for last step, or  \nimprovement less than acceptable ratio, or estimated value not positive.\n\nArgs:\n    x: Current solution estimate $x_t$.\n    iteration: Current iteration counter $t$.\n    deltas: Current difference $x_t - x'$.\n    improvement: Current improvement $(f(x_t) - f(x')) / v'$.\n    last_improvement: Last improvement $(f(x_{t-1}) - f(x')) / v'$.\n    estimated_improvement: Current estimated value $v'$.\n\nReturns:\n    True if another iteration should be performed.", "id": "f14348:c0:m4"}
{"signature": "def __init__(self, max_iterations, unroll_loop=False):", "body": "assert max_iterations >= <NUM_LIT:0><EOL>self.max_iterations = max_iterations<EOL>assert isinstance(unroll_loop, bool)<EOL>self.unroll_loop = unroll_loop<EOL>super(Iterative, self).__init__()<EOL>self.initialize = tf.make_template(name_='<STR_LIT>', func_=self.tf_initialize)<EOL>self.step = tf.make_template(name_='<STR_LIT>', func_=self.tf_step)<EOL>self.next_step = tf.make_template(name_='<STR_LIT>', func_=self.tf_next_step)<EOL>", "docstring": "Creates a new iterative solver instance.\n\nArgs:\n    max_iterations: Maximum number of iterations before termination.\n    unroll_loop: Unrolls the TensorFlow while loop if true.", "id": "f14349:c0:m0"}
{"signature": "def tf_solve(self, fn_x, x_init, *args):", "body": "self.fn_x = fn_x<EOL>args = self.initialize(x_init, *args)<EOL>if self.unroll_loop:<EOL><INDENT>for _ in range(self.max_iterations):<EOL><INDENT>next_step = self.next_step(*args)<EOL>step = (lambda: self.step(*args))<EOL>do_nothing = (lambda: args)<EOL>args = tf.cond(pred=next_step, true_fn=step, false_fn=do_nothing)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>args = tf.while_loop(cond=self.next_step, body=self.step, loop_vars=args)<EOL><DEDENT>return args[<NUM_LIT:0>]<EOL>", "docstring": "Iteratively solves an equation/optimization for $x$ involving an expression $f(x)$.\n\nArgs:\n    fn_x: A callable returning an expression $f(x)$ given $x$.\n    x_init: Initial solution guess $x_0$.\n    *args: Additional solver-specific arguments.\n\nReturns:\n    A solution $x$ to the problem as given by the solver.", "id": "f14349:c0:m1"}
{"signature": "def tf_solve(self, fn_x, *args):", "body": "raise NotImplementedError<EOL>", "docstring": "Solves an equation/optimization for $x$ involving an expression $f(x)$.\n\nArgs:\n    fn_x: A callable returning an expression $f(x)$ given $x$.\n    *args: Additional solver-specific arguments.\n\nReturns:\n    A solution $x$ to the problem as given by the solver.", "id": "f14350:c0:m1"}
{"signature": "def __init__(self):", "body": "<EOL>self.solve = tf.make_template(name_='<STR_LIT>', func_=self.tf_solve)<EOL>", "docstring": "Creates a new solver instance.", "id": "f14350:c0:m0"}
{"signature": "def tf_initialize(self, x_init, b):", "body": "if x_init is None:<EOL><INDENT>x_init = [tf.zeros(shape=util.shape(t)) for t in b]<EOL><DEDENT>initial_args = super(ConjugateGradient, self).tf_initialize(x_init)<EOL>conjugate = residual = [t - fx for t, fx in zip(b, self.fn_x(x_init))]<EOL>squared_residual = tf.add_n(inputs=[tf.reduce_sum(input_tensor=(res * res)) for res in residual])<EOL>return initial_args + (conjugate, residual, squared_residual)<EOL>", "docstring": "Initialization step preparing the arguments for the first iteration of the loop body:  \n$x_0, 0, p_0, r_0, r_0^2$.\n\nArgs:\n    x_init: Initial solution guess $x_0$, zero vector if None.\n    b: The right-hand side $b$ of the system of linear equations.\n\nReturns:\n    Initial arguments for tf_step.", "id": "f14352:c0:m2"}
{"signature": "def tf_next_step(self, x, iteration, conjugate, residual, squared_residual):", "body": "next_step = super(ConjugateGradient, self).tf_next_step(x, iteration, conjugate, residual, squared_residual)<EOL>return tf.logical_and(x=next_step, y=(squared_residual >= util.epsilon))<EOL>", "docstring": "Termination condition: max number of iterations, or residual sufficiently small.\n\nArgs:\n    x: Current solution estimate $x_t$.\n    iteration: Current iteration counter $t$.\n    conjugate: Current conjugate $c_t$.\n    residual: Current residual $r_t$.\n    squared_residual: Current squared residual $r_t^2$.\n\nReturns:\n    True if another iteration should be performed.", "id": "f14352:c0:m4"}
{"signature": "def update_batch(self, loss_per_instance):", "body": "if self.batch_indices is None:<EOL><INDENT>raise TensorForceError(\"<STR_LIT>\")<EOL><DEDENT>for index, loss in zip(self.batch_indices, loss_per_instance):<EOL><INDENT>new_priority = (np.abs(loss) + self.prioritization_constant) ** self.prioritization_weight<EOL>self.observations._move(index, new_priority)<EOL>self.none_priority_index += <NUM_LIT:1><EOL><DEDENT>", "docstring": "Computes priorities according to loss.\n\nArgs:\n    loss_per_instance:", "id": "f14354:c1:m3"}
{"signature": "def _move(self, index, new_priority):", "body": "item, old_priority = self._memory[index]<EOL>old_priority = old_priority or <NUM_LIT:0><EOL>self._memory[index] = _SumRow(item, new_priority)<EOL>self._update_internal_nodes(index, new_priority - old_priority)<EOL>", "docstring": "Change the priority of a leaf node.", "id": "f14354:c0:m3"}
{"signature": "def _next_position_then_increment(self):", "body": "start = self._capacity - <NUM_LIT:1><EOL>position = start + self._position<EOL>self._position = (self._position + <NUM_LIT:1>) % self._capacity<EOL>return position<EOL>", "docstring": "Similar to position++.", "id": "f14354:c0:m6"}
{"signature": "def move(self, external_index, new_priority):", "body": "index = external_index + (self._capacity - <NUM_LIT:1>)<EOL>return self._move(index, new_priority)<EOL>", "docstring": "Change the priority of a leaf node", "id": "f14354:c0:m2"}
{"signature": "def __len__(self):", "body": "return len(self._memory) - (self._capacity - <NUM_LIT:1>)<EOL>", "docstring": "Return the current number of transitions.", "id": "f14354:c0:m9"}
{"signature": "def put(self, item, priority=None):", "body": "if not self._isfull():<EOL><INDENT>self._memory.append(None)<EOL><DEDENT>position = self._next_position_then_increment()<EOL>old_priority = <NUM_LIT:0> if self._memory[position] is Noneelse (self._memory[position].priority or <NUM_LIT:0>)<EOL>row = _SumRow(item, priority)<EOL>self._memory[position] = row<EOL>self._update_internal_nodes(<EOL>position, (row.priority or <NUM_LIT:0>) - old_priority)<EOL>", "docstring": "Stores a transition in replay memory.\n\nIf the memory is full, the oldest entry is replaced.", "id": "f14354:c0:m1"}
{"signature": "def __init__(self, states, internals, actions, include_next_states, capacity, scope='<STR_LIT>', summary_labels=None):", "body": "super(Replay, self).__init__(<EOL>states=states,<EOL>internals=internals,<EOL>actions=actions,<EOL>include_next_states=include_next_states,<EOL>capacity=capacity,<EOL>scope=scope,<EOL>summary_labels=summary_labels<EOL>)<EOL>", "docstring": "Replay memory.\n\nArgs:\n    states (dict): States specification.\n    internals (dict): Internal states specification.\n    actions (dict): Actions specification.\n    include_next_states (bool): Include subsequent state if true.\n    capacity (int): Memory capacity (number of state/internals/action/(next-state)? records).", "id": "f14356:c0:m0"}
{"signature": "def get_variables(self):", "body": "return [self.variables[key] for key in sorted(self.variables)]<EOL>", "docstring": "Returns the TensorFlow variables used by the memory.\n\nReturns:\n    List of variables.", "id": "f14357:c0:m8"}
{"signature": "def tf_store(self, states, internals, actions, terminal, reward):", "body": "raise NotImplementedError<EOL>", "docstring": "Stores experiences, i.e. a batch of timesteps.\n\nArgs:\n    states: Dict of state tensors.\n    internals: List of prior internal state tensors.\n    actions: Dict of action tensors.\n    terminal: Terminal boolean tensor.\n    reward: Reward tensor.", "id": "f14357:c0:m3"}
{"signature": "def __init__(self, states, internals, actions, include_next_states, scope='<STR_LIT>', summary_labels=None):", "body": "self.states_spec = states<EOL>self.internals_spec = internals<EOL>self.actions_spec = actions<EOL>self.include_next_states = include_next_states<EOL>self.scope = scope<EOL>self.summary_labels = set(summary_labels or ())<EOL>self.variables = dict()<EOL>self.initialize = None  <EOL>self.store = None<EOL>self.retrieve_timesteps = None<EOL>self.retrieve_episodes = None<EOL>self.retrieve_sequences = None<EOL>self.update_batch = None<EOL>self.setup_template_funcs()<EOL>", "docstring": "Args:\n    states (dict): States specification.\n    internals (dict): Internal states specification.\n    actions (dict): Actions specification.\n    include_next_states (bool): Include subsequent state if true.\n    scope (str): The tf variable scope to use when creating variables for this memory.\n    summary_labels (list): List of summary labels.", "id": "f14357:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def from_spec(spec, kwargs=None):<DEDENT>", "body": "memory = util.get_object(<EOL>obj=spec,<EOL>predefined_objects=tensorforce.core.memories.memories,<EOL>kwargs=kwargs<EOL>)<EOL>assert isinstance(memory, Memory)<EOL>return memory<EOL>", "docstring": "Creates a memory from a specification dict.", "id": "f14357:c0:m9"}
{"signature": "def tf_retrieve_indices(self, buffer_elements, priority_indices):", "body": "states = dict()<EOL>buffer_start = self.buffer_index - buffer_elements<EOL>buffer_end = self.buffer_index<EOL>for name in sorted(self.states_memory):<EOL><INDENT>buffer_state_memory = self.states_buffer[name]<EOL>buffer_states = buffer_state_memory[buffer_start:buffer_end]<EOL>memory_states = tf.gather(params=self.states_memory[name], indices=priority_indices)<EOL>states[name] = tf.concat(values=(buffer_states, memory_states), axis=<NUM_LIT:0>)<EOL><DEDENT>internals = dict()<EOL>for name in sorted(self.internals_memory):<EOL><INDENT>internal_buffer_memory = self.internals_buffer[name]<EOL>buffer_internals = internal_buffer_memory[buffer_start:buffer_end]<EOL>memory_internals = tf.gather(params=self.internals_memory[name], indices=priority_indices)<EOL>internals[name] = tf.concat(values=(buffer_internals, memory_internals), axis=<NUM_LIT:0>)<EOL><DEDENT>actions = dict()<EOL>for name in sorted(self.actions_memory):<EOL><INDENT>action_buffer_memory = self.actions_buffer[name]<EOL>buffer_action = action_buffer_memory[buffer_start:buffer_end]<EOL>memory_action = tf.gather(params=self.actions_memory[name], indices=priority_indices)<EOL>actions[name] = tf.concat(values=(buffer_action, memory_action), axis=<NUM_LIT:0>)<EOL><DEDENT>buffer_terminal = self.terminal_buffer[buffer_start:buffer_end]<EOL>priority_terminal = tf.gather(params=self.terminal_memory, indices=priority_indices)<EOL>terminal = tf.concat(values=(buffer_terminal, priority_terminal), axis=<NUM_LIT:0>)<EOL>buffer_reward = self.reward_buffer[buffer_start:buffer_end]<EOL>priority_reward = tf.gather(params=self.reward_memory, indices=priority_indices)<EOL>reward = tf.concat(values=(buffer_reward, priority_reward), axis=<NUM_LIT:0>)<EOL>if self.include_next_states:<EOL><INDENT>assert util.rank(priority_indices) == <NUM_LIT:1><EOL>next_priority_indices = (priority_indices + <NUM_LIT:1>) % self.capacity<EOL>next_buffer_start = (buffer_start + <NUM_LIT:1>) % self.buffer_size<EOL>next_buffer_end = (buffer_end + <NUM_LIT:1>) % self.buffer_size<EOL>next_states = dict()<EOL>for name in sorted(self.states_memory):<EOL><INDENT>buffer_state_memory = self.states_buffer[name]<EOL>buffer_next_states = buffer_state_memory[next_buffer_start:next_buffer_end]<EOL>memory_next_states = tf.gather(params=self.states_memory[name], indices=next_priority_indices)<EOL>next_states[name] = tf.concat(values=(buffer_next_states, memory_next_states), axis=<NUM_LIT:0>)<EOL><DEDENT>next_internals = dict()<EOL>for name in sorted(self.internals_memory):<EOL><INDENT>buffer_internal_memory = self.internals_buffer[name]<EOL>buffer_next_internals = buffer_internal_memory[next_buffer_start:next_buffer_end]<EOL>memory_next_internals = tf.gather(params=self.internals_memory[name], indices=next_priority_indices)<EOL>next_internals[name] = tf.concat(values=(buffer_next_internals, memory_next_internals), axis=<NUM_LIT:0>)<EOL><DEDENT>return dict(<EOL>states=states,<EOL>internals=internals,<EOL>actions=actions,<EOL>terminal=terminal,<EOL>reward=reward,<EOL>next_states=next_states,<EOL>next_internals=next_internals<EOL>)<EOL><DEDENT>else:<EOL><INDENT>return dict(<EOL>states=states,<EOL>internals=internals,<EOL>actions=actions,<EOL>terminal=terminal,<EOL>reward=reward<EOL>)<EOL><DEDENT>", "docstring": "Fetches experiences for given indices by combining entries from buffer\nwhich have no priorities, and entries from priority memory.\n\nArgs:\n    buffer_elements: Number of buffer elements to retrieve\n    priority_indices: Index tensor for priority memory\n\nReturns: Batch of experiences", "id": "f14360:c0:m5"}
{"signature": "def tf_update_batch(self, loss_per_instance):", "body": "<EOL>mask = tf.not_equal(<EOL>x=self.batch_indices,<EOL>y=tf.zeros(shape=tf.shape(input=self.batch_indices), dtype=tf.int32)<EOL>)<EOL>priority_indices = tf.reshape(tensor=tf.where(condition=mask), shape=[-<NUM_LIT:1>])<EOL>sampled_buffer_batch = self.tf_retrieve_indices(<EOL>buffer_elements=self.last_batch_buffer_elems,<EOL>priority_indices=priority_indices<EOL>)<EOL>states = sampled_buffer_batch['<STR_LIT>']<EOL>internals = sampled_buffer_batch['<STR_LIT>']<EOL>actions = sampled_buffer_batch['<STR_LIT>']<EOL>terminal = sampled_buffer_batch['<STR_LIT>']<EOL>reward = sampled_buffer_batch['<STR_LIT>']<EOL>priorities = loss_per_instance ** self.prioritization_weight<EOL>assignments = list()<EOL>memory_end_index = self.memory_index + self.last_batch_buffer_elems<EOL>memory_insert_indices = tf.range(<EOL>start=self.memory_index,<EOL>limit=memory_end_index<EOL>) % self.capacity<EOL>for name in sorted(states):<EOL><INDENT>assignments.append(tf.scatter_update(<EOL>ref=self.states_memory[name],<EOL>indices=memory_insert_indices,<EOL>updates=states[name][<NUM_LIT:0>:self.last_batch_buffer_elems])<EOL>)<EOL><DEDENT>for name in sorted(internals):<EOL><INDENT>assignments.append(tf.scatter_update(<EOL>ref=self.internals_buffer[name],<EOL>indices=memory_insert_indices,<EOL>updates=internals[name][<NUM_LIT:0>:self.last_batch_buffer_elems]<EOL>))<EOL><DEDENT>assignments.append(tf.scatter_update(<EOL>ref=self.priorities,<EOL>indices=memory_insert_indices,<EOL>updates=priorities[<NUM_LIT:0>:self.last_batch_buffer_elems]<EOL>))<EOL>assignments.append(tf.scatter_update(<EOL>ref=self.terminal_memory,<EOL>indices=memory_insert_indices,<EOL>updates=terminal[<NUM_LIT:0>:self.last_batch_buffer_elems])<EOL>)<EOL>assignments.append(tf.scatter_update(<EOL>ref=self.reward_memory,<EOL>indices=memory_insert_indices,<EOL>updates=reward[<NUM_LIT:0>:self.last_batch_buffer_elems])<EOL>)<EOL>for name in sorted(actions):<EOL><INDENT>assignments.append(tf.scatter_update(<EOL>ref=self.actions_memory[name],<EOL>indices=memory_insert_indices,<EOL>updates=actions[name][<NUM_LIT:0>:self.last_batch_buffer_elems]<EOL>))<EOL><DEDENT>main_memory_priorities = priorities[self.last_batch_buffer_elems:]<EOL>main_memory_priorities = main_memory_priorities[<NUM_LIT:0>:tf.shape(priority_indices)[<NUM_LIT:0>]]<EOL>assignments.append(tf.scatter_update(<EOL>ref=self.priorities,<EOL>indices=priority_indices,<EOL>updates=main_memory_priorities<EOL>))<EOL>with tf.control_dependencies(control_inputs=assignments):<EOL><INDENT>assignments = list()<EOL>sorted_priorities, sorted_indices = tf.nn.top_k(<EOL>input=self.priorities,<EOL>k=self.capacity,<EOL>sorted=True<EOL>)<EOL>assignments.append(tf.assign(ref=self.priorities, value=sorted_priorities))<EOL>assignments.append(tf.scatter_update(<EOL>ref=self.terminal_memory,<EOL>indices=sorted_indices,<EOL>updates=self.terminal_memory<EOL>))<EOL>for name in sorted(self.states_memory):<EOL><INDENT>assignments.append(tf.scatter_update(<EOL>ref=self.states_memory[name],<EOL>indices=sorted_indices,<EOL>updates=self.states_memory[name]<EOL>))<EOL><DEDENT>for name in sorted(self.actions_memory):<EOL><INDENT>assignments.append(tf.scatter_update(<EOL>ref=self.actions_memory[name],<EOL>indices=sorted_indices,<EOL>updates=self.actions_memory[name]<EOL>))<EOL><DEDENT>for name in sorted(self.internals_memory):<EOL><INDENT>assignments.append(tf.scatter_update(<EOL>ref=self.internals_memory[name],<EOL>indices=sorted_indices,<EOL>updates=self.internals_memory[name]<EOL>))<EOL><DEDENT>assignments.append(tf.scatter_update(<EOL>ref=self.reward_memory,<EOL>indices=sorted_indices,<EOL>updates=self.reward_memory<EOL>))<EOL><DEDENT>with tf.control_dependencies(control_inputs=assignments):<EOL><INDENT>assignments = list()<EOL>assignments.append(tf.assign_sub(ref=self.buffer_index, value=self.last_batch_buffer_elems))<EOL>total_inserted_elements = self.memory_size + self.last_batch_buffer_elems<EOL>assignments.append(tf.assign(<EOL>ref=self.memory_size,<EOL>value=tf.minimum(x=total_inserted_elements, y=self.capacity))<EOL>)<EOL>assignments.append(tf.assign(ref=self.memory_index, value=memory_end_index))<EOL>assignments.append(tf.assign(<EOL>ref=self.batch_indices,<EOL>value=tf.zeros(shape=tf.shape(self.batch_indices), dtype=tf.int32)<EOL>))<EOL><DEDENT>with tf.control_dependencies(control_inputs=assignments):<EOL><INDENT>return tf.no_op()<EOL><DEDENT>", "docstring": "Updates priority memory by performing the following steps:\n\n1. Use saved indices from prior retrieval to reconstruct the batch\nelements which will have their priorities updated.\n2. Compute priorities for these elements.\n3. Insert buffer elements to memory, potentially overwriting existing elements.\n4. Update priorities of existing memory elements\n5. Resort memory.\n6. Update buffer insertion index.\n\nNote that this implementation could be made more efficient by maintaining\na sorted version via sum trees.\n\n:param loss_per_instance: Losses from recent batch to perform priority update", "id": "f14360:c0:m6"}
{"signature": "def _fire(self, layers, the_plot):", "body": "<EOL>if the_plot.get('<STR_LIT>') == the_plot.frame: return<EOL>the_plot['<STR_LIT>'] = the_plot.frame<EOL>col = np.random.choice(np.nonzero(layers['<STR_LIT:X>'].sum(axis=<NUM_LIT:0>))[<NUM_LIT:0>])<EOL>row = np.nonzero(layers['<STR_LIT:X>'][:, col])[<NUM_LIT:0>][-<NUM_LIT:1>] + <NUM_LIT:1><EOL>self._teleport((row, col))<EOL>", "docstring": "Launches a new bolt from a random Marauder.", "id": "f14375:c4:m3"}
{"signature": "def __init__(self, corner, position, character):", "body": "super(PlayerSprite, self).__init__(<EOL>corner, position, character, impassable='<STR_LIT>', confined_to_board=True)<EOL>", "docstring": "Simply indicates to the superclass that we can't walk off the board.", "id": "f14375:c2:m0"}
{"signature": "def _fly(self, board, layers, things, the_plot):", "body": "<EOL>if (self.character in the_plot['<STR_LIT>'] or<EOL>self.character in the_plot['<STR_LIT>']):<EOL><INDENT>return self._teleport((-<NUM_LIT:1>, -<NUM_LIT:1>))<EOL><DEDENT>self._north(board, the_plot)<EOL>", "docstring": "Handles the behaviour of visible bolts flying toward Marauders.", "id": "f14375:c3:m2"}
{"signature": "def _fly(self, board, layers, things, the_plot):", "body": "<EOL>if self.character in the_plot['<STR_LIT>']:<EOL><INDENT>return self._teleport((-<NUM_LIT:1>, -<NUM_LIT:1>))<EOL><DEDENT>if self.position == things['<STR_LIT:P>'].position: the_plot.terminate_episode()<EOL>self._south(board, the_plot)<EOL>", "docstring": "Handles the behaviour of visible bolts flying toward the player.", "id": "f14375:c4:m2"}
{"signature": "def __init__(self, corner, position, character):", "body": "super(UpwardLaserBoltSprite, self).__init__(<EOL>corner, position, character, impassable='<STR_LIT>')<EOL>self._teleport((-<NUM_LIT:1>, -<NUM_LIT:1>))<EOL>", "docstring": "Starts the Sprite in a hidden position off of the board.", "id": "f14375:c3:m0"}
{"signature": "def make_game():", "body": "return ascii_art.ascii_art_to_game(<EOL>GAME_ART, what_lies_beneath='<STR_LIT:U+0020>',<EOL>sprites=dict(<EOL>[('<STR_LIT:P>', PlayerSprite)] +<EOL>[(c, UpwardLaserBoltSprite) for c in UPWARD_BOLT_CHARS] +<EOL>[(c, DownwardLaserBoltSprite) for c in DOWNWARD_BOLT_CHARS]),<EOL>drapes=dict(X=MarauderDrape,<EOL>B=BunkerDrape),<EOL>update_schedule=['<STR_LIT:P>', '<STR_LIT:B>', '<STR_LIT:X>'] + list(_ALL_BOLT_CHARS))<EOL>", "docstring": "Builds and returns an Extraterrestrial Marauders game.", "id": "f14375:m0"}
{"signature": "def list(self, body, ordered=True):", "body": "tag = '<STR_LIT>'<EOL>if ordered:<EOL><INDENT>tag = '<STR_LIT>'<EOL><DEDENT>return '<STR_LIT>' % (tag, body, tag)<EOL>", "docstring": "Rendering list tags like ``<ul>`` and ``<ol>``.\n\n        :param body: body contents of the list.\n        :param ordered: whether this list is ordered or not.", "id": "f14379:c4:m7"}
{"signature": "def text(self, text):", "body": "if self.options.get('<STR_LIT>'):<EOL><INDENT>return text<EOL><DEDENT>return escape(text)<EOL>", "docstring": "Rendering unformatted text.\n\n        :param text: text content.", "id": "f14379:c4:m18"}
{"signature": "def inline_html(self, html):", "body": "if self.options.get('<STR_LIT>'):<EOL><INDENT>return escape(html)<EOL><DEDENT>return html<EOL>", "docstring": "Rendering span level pure html content.\n\n        :param html: text content of the html snippet.", "id": "f14379:c4:m23"}
{"signature": "def codespan(self, text):", "body": "text = escape(text.rstrip(), smart_amp=False)<EOL>return '<STR_LIT>' % text<EOL>", "docstring": "Rendering inline `code` text.\n\n        :param text: text content for inline code.", "id": "f14379:c4:m15"}
{"signature": "def escape(self, text):", "body": "return escape(text)<EOL>", "docstring": "Rendering escape sequence.\n\n        :param text: text content.", "id": "f14379:c4:m19"}
{"signature": "def parse_lheading(self, m):", "body": "self.tokens.append({<EOL>'<STR_LIT:type>': '<STR_LIT>',<EOL>'<STR_LIT>': <NUM_LIT:1> if m.group(<NUM_LIT:2>) == '<STR_LIT:=>' else <NUM_LIT:2>,<EOL>'<STR_LIT:text>': m.group(<NUM_LIT:1>),<EOL>})<EOL>", "docstring": "Parse setext heading.", "id": "f14379:c1:m7"}
{"signature": "def footnote_item(self, key, text):", "body": "back = (<EOL>'<STR_LIT>'<EOL>) % escape(key)<EOL>text = text.rstrip()<EOL>if text.endswith('<STR_LIT>'):<EOL><INDENT>text = re.sub(r'<STR_LIT>', r'<STR_LIT>' % back, text)<EOL><DEDENT>else:<EOL><INDENT>text = '<STR_LIT>' % (text, back)<EOL><DEDENT>html = '<STR_LIT>' % (escape(key), text)<EOL>return html<EOL>", "docstring": "Rendering a footnote item.\n\n        :param key: identity key for the footnote.\n        :param text: text content of the footnote.", "id": "f14379:c4:m26"}
{"signature": "def footnotes(self, text):", "body": "html = '<STR_LIT>'<EOL>return html % (self.hrule(), text)<EOL>", "docstring": "Wrapper for all footnotes.\n\n        :param text: contents of all footnotes.", "id": "f14379:c4:m27"}
{"signature": "def hrule(self):", "body": "if self.options.get('<STR_LIT>'):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Rendering method for ``<hr>`` tag.", "id": "f14379:c4:m6"}
{"signature": "def image(self, src, title, text):", "body": "src = escape_link(src)<EOL>text = escape(text, quote=True)<EOL>if title:<EOL><INDENT>title = escape(title, quote=True)<EOL>html = '<STR_LIT>' % (src, text, title)<EOL><DEDENT>else:<EOL><INDENT>html = '<STR_LIT>' % (src, text)<EOL><DEDENT>if self.options.get('<STR_LIT>'):<EOL><INDENT>return '<STR_LIT>' % html<EOL><DEDENT>return '<STR_LIT>' % html<EOL>", "docstring": "Rendering a image with title and text.\n\n        :param src: source link of the image.\n        :param title: title text of the image.\n        :param text: alt text of the image.", "id": "f14379:c4:m22"}
{"signature": "def block_quote(self, text):", "body": "return '<STR_LIT>' % text.rstrip('<STR_LIT:\\n>')<EOL>", "docstring": "Rendering <blockquote> with the given text.\n\n        :param text: text content of the blockquote.", "id": "f14379:c4:m3"}
{"signature": "def block_html(self, html):", "body": "if self.options.get('<STR_LIT>') andhtml.lower().startswith('<STR_LIT>'):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>if self.options.get('<STR_LIT>'):<EOL><INDENT>return escape(html)<EOL><DEDENT>return html<EOL>", "docstring": "Rendering block level pure html content.\n\n        :param html: text content of the html snippet.", "id": "f14379:c4:m4"}
{"signature": "def footnote_ref(self, key, index):", "body": "html = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>) % (escape(key), escape(key), index)<EOL>return html<EOL>", "docstring": "Rendering the ref anchor of a footnote.\n\n        :param key: identity key for the footnote.\n        :param index: the index count of current footnote.", "id": "f14379:c4:m25"}
{"signature": "def linebreak(self):", "body": "if self.options.get('<STR_LIT>'):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Rendering line break like ``<br>``.", "id": "f14379:c4:m16"}
{"signature": "def placeholder(self):", "body": "return '<STR_LIT>'<EOL>", "docstring": "Returns the default, empty output value for the renderer.\n\n        All renderer methods use the '+=' operator to append to this value.\n        Default is a string so rendering HTML can build up a result string with\n        the rendered Markdown.\n\n        Can be overridden by Renderer subclasses to be types like an empty\n        list, allowing the renderer to create a tree-like structure to\n        represent the document (which can then be reprocessed later into a\n        separate format like docx or pdf).", "id": "f14379:c4:m1"}
{"signature": "def process_docstring(app, what, name, obj, options, lines):", "body": "markdown = \"<STR_LIT:\\n>\".join(lines)<EOL>rest = m2r(markdown)<EOL>rest.replace(\"<STR_LIT:\\r\\n>\", \"<STR_LIT:\\n>\")<EOL>del lines[:]<EOL>lines.extend(rest.split(\"<STR_LIT:\\n>\"))<EOL>", "docstring": "Enable markdown syntax in docstrings", "id": "f14380:m0"}
{"signature": "def output_rest_role(self, m):", "body": "return self.renderer.rest_role(m.group(<NUM_LIT:0>))<EOL>", "docstring": "Pass through rest role.", "id": "f14381:c3:m4"}
{"signature": "def output_image_link(self, m):", "body": "return self.renderer.image_link(<EOL>m.group('<STR_LIT:url>'), m.group('<STR_LIT:target>'), m.group('<STR_LIT>'))<EOL>", "docstring": "Pass through rest role.", "id": "f14381:c3:m3"}
{"signature": "def emphasis(self, text):", "body": "return '<STR_LIT>'.format(text)<EOL>", "docstring": "Rendering *emphasis* text.\n\n        :param text: text content for emphasis.", "id": "f14381:c4:m14"}
{"signature": "def list(self, body, ordered=True):", "body": "mark = '<STR_LIT>' if ordered else '<STR_LIT>'<EOL>lines = body.splitlines()<EOL>for i, line in enumerate(lines):<EOL><INDENT>if line and not line.startswith(self.list_marker):<EOL><INDENT>lines[i] = '<STR_LIT:U+0020>' * len(mark) + line<EOL><DEDENT><DEDENT>return '<STR_LIT>'.format(<EOL>'<STR_LIT:\\n>'.join(lines)).replace(self.list_marker, mark)<EOL>", "docstring": "Rendering list tags like ``<ul>`` and ``<ol>``.\n\n        :param body: body contents of the list.\n        :param ordered: whether this list is ordered or not.", "id": "f14381:c4:m7"}
{"signature": "def run(self):", "body": "if not self.state.document.settings.file_insertion_enabled:<EOL><INDENT>raise self.warning('<STR_LIT>' % self.name)<EOL><DEDENT>source = self.state_machine.input_lines.source(<EOL>self.lineno - self.state_machine.input_offset - <NUM_LIT:1>)<EOL>source_dir = os.path.dirname(os.path.abspath(source))<EOL>path = rst.directives.path(self.arguments[<NUM_LIT:0>])<EOL>path = os.path.normpath(os.path.join(source_dir, path))<EOL>path = utils.relative_path(None, path)<EOL>path = nodes.reprunicode(path)<EOL>encoding = self.options.get(<EOL>'<STR_LIT>', self.state.document.settings.input_encoding)<EOL>e_handler = self.state.document.settings.input_encoding_error_handler<EOL>tab_width = self.options.get(<EOL>'<STR_LIT>', self.state.document.settings.tab_width)<EOL>try:<EOL><INDENT>self.state.document.settings.record_dependencies.add(path)<EOL>include_file = io.FileInput(source_path=path,<EOL>encoding=encoding,<EOL>error_handler=e_handler)<EOL><DEDENT>except UnicodeEncodeError as error:<EOL><INDENT>raise self.severe('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(self.name, SafeString(path)))<EOL><DEDENT>except IOError as error:<EOL><INDENT>raise self.severe('<STR_LIT>' %<EOL>(self.name, ErrorString(error)))<EOL><DEDENT>try:<EOL><INDENT>rawtext = include_file.read()<EOL><DEDENT>except UnicodeError as error:<EOL><INDENT>raise self.severe('<STR_LIT>' %<EOL>(self.name, ErrorString(error)))<EOL><DEDENT>config = self.state.document.settings.env.config<EOL>converter = M2R(no_underscore_emphasis=config.no_underscore_emphasis)<EOL>include_lines = statemachine.string2lines(converter(rawtext),<EOL>tab_width,<EOL>convert_whitespace=True)<EOL>self.state_machine.insert_input(include_lines, path)<EOL>return []<EOL>", "docstring": "Most of this method is from ``docutils.parser.rst.Directive``.\n\n        docutils version: 0.12", "id": "f14381:c7:m0"}
{"signature": "def table(self, header, body):", "body": "table = '<STR_LIT>'<EOL>if header and not header.isspace():<EOL><INDENT>table = (table + self.indent + '<STR_LIT>' +<EOL>self._indent_block(header) + '<STR_LIT:\\n>')<EOL><DEDENT>else:<EOL><INDENT>table = table + '<STR_LIT:\\n>'<EOL><DEDENT>table = table + self._indent_block(body) + '<STR_LIT>'<EOL>return table<EOL>", "docstring": "Rendering table element. Wrap header and body in it.\n\n        :param header: header part of the table.\n        :param body: body part of the table.", "id": "f14381:c4:m10"}
{"signature": "def table_cell(self, content, **flags):", "body": "return '<STR_LIT>' + content + '<STR_LIT:\\n>'<EOL>", "docstring": "Rendering a table cell. Like ``<th>`` ``<td>``.\n\n        :param content: content of current table cell.\n        :param header: whether this is header or not.\n        :param align: align of current table cell.", "id": "f14381:c4:m12"}
{"signature": "def footnote_ref(self, key, index):", "body": "return '<STR_LIT>'.format(key)<EOL>", "docstring": "Rendering the ref anchor of a footnote.\n\n        :param key: identity key for the footnote.\n        :param index: the index count of current footnote.", "id": "f14381:c4:m24"}
{"signature": "def eol_literal_marker(self, marker):", "body": "return marker<EOL>", "docstring": "Extension of recommonmark", "id": "f14381:c4:m31"}
{"signature": "def output_inline_math(self, m):", "body": "return self.renderer.inline_math(m.group(<NUM_LIT:1>))<EOL>", "docstring": "Pass through rest link.", "id": "f14381:c3:m6"}
{"signature": "def hrule(self):", "body": "return '<STR_LIT>'<EOL>", "docstring": "Rendering method for ``<hr>`` tag.", "id": "f14381:c4:m6"}
{"signature": "def block_html(self, html):", "body": "return '<STR_LIT>' + self._indent_block(html) + '<STR_LIT>'<EOL>", "docstring": "Rendering block level pure html content.\n\n        :param html: text content of the html snippet.", "id": "f14381:c4:m4"}
{"signature": "def linebreak(self):", "body": "if self.options.get('<STR_LIT>'):<EOL><INDENT>return self._raw_html('<STR_LIT>') + '<STR_LIT:\\n>'<EOL><DEDENT>return self._raw_html('<STR_LIT>') + '<STR_LIT:\\n>'<EOL>", "docstring": "Rendering line break like ``<br>``.", "id": "f14381:c4:m16"}
{"signature": "def list_item(self, text):", "body": "return '<STR_LIT:\\n>' + self.list_marker + text<EOL>", "docstring": "Rendering list item snippet. Like ``<li>``.", "id": "f14381:c4:m8"}
{"signature": "def setup(app):", "body": "global _is_sphinx<EOL>_is_sphinx = True<EOL>app.add_config_value('<STR_LIT>', False, '<STR_LIT>')<EOL>app.add_source_parser('<STR_LIT>', M2RParser)<EOL>app.add_directive('<STR_LIT>', MdInclude)<EOL>", "docstring": "When used for spinx extension.", "id": "f14381:m1"}
{"signature": "def validate(self, filename, module, classname, ignore):", "body": "with open(os.path.normpath(os.path.join('<STR_LIT>', '<STR_LIT>', filename))) as f:<EOL><INDENT>docs = f.read()<EOL><DEDENT>module = module_loading.import_module(module)<EOL>methods = re.findall(r'<STR_LIT>', docs, flags=re.M)<EOL>attributes = re.findall(r'<STR_LIT>', docs, flags=re.M)<EOL>documented = set(filter(lambda x: x.startswith(classname), [a for a in methods] + attributes))<EOL>implemented = set(classname + '<STR_LIT:.>' + x for x in dir(getattr(module, classname))<EOL>if not x.startswith('<STR_LIT:_>') or x == '<STR_LIT>')<EOL>print(implemented)<EOL>ignored = set(classname + '<STR_LIT:.>' + x for x in ignore)<EOL>self.assertSetEqual(implemented - documented - ignored, set(), msg='<STR_LIT>')<EOL>self.assertSetEqual(documented - implemented - ignored, set(), msg='<STR_LIT>')<EOL>", "docstring": "Finds all automethod and autoattribute statements in an rst file\ncomparing them to the attributes found in the actual class", "id": "f14387:c0:m0"}
{"signature": "def local(path):", "body": "return os.path.join(__name__.split('<STR_LIT:.>')[-<NUM_LIT:2>], path)<EOL>", "docstring": "Prepend the effect package name to a path so resources\ncan still be loaded when copied into a new effect package.", "id": "f14397:m0"}
{"signature": "def local(path):", "body": "return os.path.join(__name__.split('<STR_LIT:.>')[-<NUM_LIT:2>], path)<EOL>", "docstring": "Prepend the effect package name to a path so resources\ncan still be loaded when copied into a new effect package.", "id": "f14399:m0"}
{"signature": "@property<EOL><INDENT>def geometry_vertices(self) -> int:<DEDENT>", "body": "return self.program.geometry_vertices<EOL>", "docstring": "int: The maximum number of vertices that\nthe geometry shader will output.", "id": "f14402:c3:m11"}
{"signature": "@classmethod<EOL><INDENT>def from_single(cls, meta: ProgramDescription, source: str):<DEDENT>", "body": "instance = cls(meta)<EOL>instance.vertex_source = ShaderSource(<EOL>VERTEX_SHADER,<EOL>meta.path or meta.vertex_shader,<EOL>source<EOL>)<EOL>if GEOMETRY_SHADER in source:<EOL><INDENT>instance.geometry_source = ShaderSource(<EOL>GEOMETRY_SHADER,<EOL>meta.path or meta.geometry_shader,<EOL>source,<EOL>)<EOL><DEDENT>if FRAGMENT_SHADER in source:<EOL><INDENT>instance.fragment_source = ShaderSource(<EOL>FRAGMENT_SHADER,<EOL>meta.path or meta.fragment_shader,<EOL>source,<EOL>)<EOL><DEDENT>if TESS_CONTROL_SHADER in source:<EOL><INDENT>instance.tess_control_source = ShaderSource(<EOL>TESS_CONTROL_SHADER,<EOL>meta.path or meta.tess_control_shader,<EOL>source,<EOL>)<EOL><DEDENT>if TESS_EVALUATION_SHADER in source:<EOL><INDENT>instance.tess_evaluation_source = ShaderSource(<EOL>TESS_EVALUATION_SHADER,<EOL>meta.path or meta.tess_evaluation_shader,<EOL>source,<EOL>)<EOL><DEDENT>return instance<EOL>", "docstring": "Initialize a single glsl string containing all shaders", "id": "f14402:c0:m2"}
{"signature": "@property<EOL><INDENT>def subroutines(self) -> Tuple[str, ...]:<DEDENT>", "body": "return self.program.subroutines<EOL>", "docstring": "tuple: The subroutine uniforms.", "id": "f14402:c3:m8"}
{"signature": "def create(self):", "body": "<EOL>out_attribs = []<EOL>if not self.fragment_source:<EOL><INDENT>if self.geometry_source:<EOL><INDENT>out_attribs = self.geometry_source.find_out_attribs()<EOL><DEDENT>else:<EOL><INDENT>out_attribs = self.vertex_source.find_out_attribs()<EOL><DEDENT><DEDENT>program = self.ctx.program(<EOL>vertex_shader=self.vertex_source.source,<EOL>geometry_shader=self.geometry_source.source if self.geometry_source else None,<EOL>fragment_shader=self.fragment_source.source if self.fragment_source else None,<EOL>tess_control_shader=self.tess_control_source.source if self.tess_control_source else None,<EOL>tess_evaluation_shader=self.tess_evaluation_source.source if self.tess_evaluation_source else None,<EOL>varyings=out_attribs,<EOL>)<EOL>program.extra = {'<STR_LIT>': self.meta}<EOL>return program<EOL>", "docstring": "Creates a shader program.\n\nReturns:\n    ModernGL Program instance", "id": "f14402:c0:m4"}
{"signature": "@property<EOL><INDENT>def glo(self) -> int:<DEDENT>", "body": "return self.program.glo<EOL>", "docstring": "int: The internal OpenGL object.\nThis values is provided for debug purposes only.", "id": "f14402:c3:m7"}
{"signature": "def __init__(self, meta: ProgramDescription, program: moderngl.Program):", "body": "self.program = program<EOL>self.meta = meta<EOL>", "docstring": "Create a shader using either a file path or a name\n:param meta: The ProgramMeta\n:param program: The program instance", "id": "f14402:c3:m0"}
{"signature": "@property<EOL><INDENT>def mglo(self):<DEDENT>", "body": "return self.program.mglo<EOL>", "docstring": "The ModernGL Program object", "id": "f14402:c3:m6"}
{"signature": "def render(self, program: moderngl.Program, mode=None, vertices=-<NUM_LIT:1>, first=<NUM_LIT:0>, instances=<NUM_LIT:1>):", "body": "vao = self.instance(program)<EOL>if mode is None:<EOL><INDENT>mode = self.mode<EOL><DEDENT>vao.render(mode, vertices=vertices, first=first, instances=instances)<EOL>", "docstring": "Render the VAO.\n\nArgs:\n    program: The ``moderngl.Program``\n\nKeyword Args:\n    mode: Override the draw mode (``TRIANGLES`` etc)\n    vertices (int): The number of vertices to transform\n    first (int): The index of the first vertex to start with\n    instances (int): The number of instances", "id": "f14403:c1:m1"}
{"signature": "def content(self, attributes: List[str]):", "body": "formats = []<EOL>attrs = []<EOL>for attrib_format, attrib in zip(self.attrib_formats, self.attributes):<EOL><INDENT>if attrib not in attributes:<EOL><INDENT>formats.append(attrib_format.pad_str())<EOL>continue<EOL><DEDENT>formats.append(attrib_format.format)<EOL>attrs.append(attrib)<EOL>attributes.remove(attrib)<EOL><DEDENT>if not attrs:<EOL><INDENT>return None<EOL><DEDENT>return (<EOL>self.buffer,<EOL>\"<STR_LIT>\".format(\"<STR_LIT:U+0020>\".join(formats), '<STR_LIT>' if self.per_instance else '<STR_LIT>'),<EOL>*attrs<EOL>)<EOL>", "docstring": "Build content tuple for the buffer", "id": "f14403:c0:m2"}
{"signature": "def __init__(self, buffer: moderngl.Buffer, buffer_format: str, attributes=None, per_instance=False):", "body": "self.buffer = buffer<EOL>self.attrib_formats = types.parse_attribute_formats(buffer_format)<EOL>self.attributes = attributes<EOL>self.per_instance = per_instance<EOL>if self.buffer.size % self.vertex_size != <NUM_LIT:0>:<EOL><INDENT>raise VAOError(\"<STR_LIT>\".format(<EOL>buffer_format, self.vertex_size, self.buffer.size % self.vertex_size<EOL>))<EOL><DEDENT>self.vertices = self.buffer.size // self.vertex_size<EOL>", "docstring": ":param buffer: The vbo object\n:param format: The format of the buffer", "id": "f14403:c0:m0"}
{"signature": "def __init__(self, name=\"<STR_LIT>\", mode=moderngl.TRIANGLES):", "body": "self.ctx = context.ctx()<EOL>self.name = name<EOL>self.mode = mode<EOL>try:<EOL><INDENT>DRAW_MODES[self.mode]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise VAOError(\"<STR_LIT>\".format(DRAW_MODES.values()))<EOL><DEDENT>self.buffers = []<EOL>self._index_buffer = None<EOL>self._index_element_size = None<EOL>self.vertex_count = <NUM_LIT:0><EOL>self.vaos = {}<EOL>", "docstring": "Create and empty VAO\n\nKeyword Args:\n    name (str): The name for debug purposes\n    mode (int): Default draw mode", "id": "f14403:c1:m0"}
{"signature": "def __init__(self, format_string: str, components: int, bytes_per_component: int):", "body": "self.format = format_string<EOL>self.components = components<EOL>self.bytes_per_component = bytes_per_component<EOL>", "docstring": ":param format_string: moderngl format string\n:param components: components\n:param byte_size: byte per component", "id": "f14404:c0:m0"}
{"signature": "def attribute_format(frmt: str) -> BufferFormat:", "body": "try:<EOL><INDENT>return ATTRIBUTE_FORMATS[frmt]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(<EOL>frmt, ATTRIBUTE_FORMATS.keys()<EOL>))<EOL><DEDENT>", "docstring": "Look up info about an attribute format\n:param frmt: Format of an\n:return: BufferFormat instance", "id": "f14404:m1"}
{"signature": "def update(self, aspect_ratio=None, fov=None, near=None, far=None):", "body": "self.aspect_ratio = aspect_ratio or self.aspect_ratio<EOL>self.fov = fov or self.fov<EOL>self.near = near or self.near<EOL>self.far = far or self.far<EOL>self.matrix = Matrix44.perspective_projection(self.fov, self.aspect_ratio, self.near, self.far)<EOL>", "docstring": "Update the internal projection matrix based on current values\nor values passed in if specified.\n\n:param aspect_ratio: New aspect ratio\n:param fov: New field of view\n:param near: New near value\n:param far: New far value", "id": "f14405:c0:m1"}
{"signature": "def draw(self, texture, pos=(<NUM_LIT:0.0>, <NUM_LIT:0.0>), scale=(<NUM_LIT:1.0>, <NUM_LIT:1.0>)):", "body": "if not self.initialized:<EOL><INDENT>self.init()<EOL><DEDENT>self._texture2d_shader[\"<STR_LIT>\"].value = (pos[<NUM_LIT:0>] - <NUM_LIT:1.0>, pos[<NUM_LIT:1>] - <NUM_LIT:1.0>)<EOL>self._texture2d_shader[\"<STR_LIT>\"].value = (scale[<NUM_LIT:0>], scale[<NUM_LIT:1>])<EOL>texture.use(location=<NUM_LIT:0>)<EOL>self._texture2d_sampler.use(location=<NUM_LIT:0>)<EOL>self._texture2d_shader[\"<STR_LIT>\"].value = <NUM_LIT:0><EOL>self._quad.render(self._texture2d_shader)<EOL>self._texture2d_sampler.clear(location=<NUM_LIT:0>)<EOL>", "docstring": "Draw texture using a fullscreen quad.\nBy default this will conver the entire screen.\n\n:param pos: (tuple) offset x, y\n:param scale: (tuple) scale x, y", "id": "f14406:c0:m3"}
{"signature": "def draw_depth(self, texture, near, far, pos=(<NUM_LIT:0.0>, <NUM_LIT:0.0>), scale=(<NUM_LIT:1.0>, <NUM_LIT:1.0>)):", "body": "if not self.initialized:<EOL><INDENT>self.init()<EOL><DEDENT>self._depth_shader[\"<STR_LIT>\"].value = (pos[<NUM_LIT:0>] - <NUM_LIT:1.0>, pos[<NUM_LIT:1>] - <NUM_LIT:1.0>)<EOL>self._depth_shader[\"<STR_LIT>\"].value = (scale[<NUM_LIT:0>], scale[<NUM_LIT:1>])<EOL>self._depth_shader[\"<STR_LIT>\"].value = near<EOL>self._depth_shader[\"<STR_LIT>\"].value = far<EOL>self._depth_sampler.use(location=<NUM_LIT:0>)<EOL>texture.use(location=<NUM_LIT:0>)<EOL>self._depth_shader[\"<STR_LIT>\"].value = <NUM_LIT:0><EOL>self._quad.render(self._depth_shader)<EOL>self._depth_sampler.clear(location=<NUM_LIT:0>)<EOL>", "docstring": "Draw depth buffer linearized.\nBy default this will draw the texture as a full screen quad.\nA sampler will be used to ensure the right conditions to draw the depth buffer.\n\n:param near: Near plane in projection\n:param far: Far plane in projection\n:param pos: (tuple) offset x, y\n:param scale: (tuple) scale x, y", "id": "f14406:c0:m4"}
{"signature": "def find(self, path: Path):", "body": "<EOL>if getattr(self, '<STR_LIT>', None):<EOL><INDENT>self.paths = getattr(settings, self.settings_attr)<EOL><DEDENT>path_found = None<EOL>for entry in self.paths:<EOL><INDENT>abspath = entry / path<EOL>if abspath.exists():<EOL><INDENT>path_found = abspath<EOL><DEDENT><DEDENT>return path_found<EOL>", "docstring": "Find a file in the path. The file may exist in multiple\npaths. The last found file will be returned.\n\n:param path: The path to find\n:return: The absolute path to the file or None if not found", "id": "f14409:c0:m1"}
{"signature": "def rot_state(self, x, y):", "body": "if self.last_x is None:<EOL><INDENT>self.last_x = x<EOL><DEDENT>if self.last_y is None:<EOL><INDENT>self.last_y = y<EOL><DEDENT>x_offset = self.last_x - x<EOL>y_offset = self.last_y - y<EOL>self.last_x = x<EOL>self.last_y = y<EOL>x_offset *= self.mouse_sensitivity<EOL>y_offset *= self.mouse_sensitivity<EOL>self.yaw -= x_offset<EOL>self.pitch += y_offset<EOL>if self.pitch > <NUM_LIT>:<EOL><INDENT>self.pitch = <NUM_LIT><EOL><DEDENT>if self.pitch < -<NUM_LIT>:<EOL><INDENT>self.pitch = -<NUM_LIT><EOL><DEDENT>self._update_yaw_and_pitch()<EOL>", "docstring": "Set the rotation state of the camera\n\n:param x: viewport x pos\n:param y: viewport y pos", "id": "f14415:c1:m8"}
{"signature": "@property<EOL><INDENT>def view_matrix(self):<DEDENT>", "body": "self._update_yaw_and_pitch()<EOL>return self._gl_look_at(self.position, self.position + self.dir, self._up)<EOL>", "docstring": ":return: The current view matrix for the camera", "id": "f14415:c0:m2"}
{"signature": "@property<EOL><INDENT>def view_matrix(self):<DEDENT>", "body": "<EOL>now = time.time()<EOL>t = max(now - self._last_time, <NUM_LIT:0>)<EOL>self._last_time = now<EOL>if self._xdir == POSITIVE:<EOL><INDENT>self.position += self.right * self.velocity * t<EOL><DEDENT>elif self._xdir == NEGATIVE:<EOL><INDENT>self.position -= self.right * self.velocity * t<EOL><DEDENT>if self._zdir == NEGATIVE:<EOL><INDENT>self.position += self.dir * self.velocity * t<EOL><DEDENT>elif self._zdir == POSITIVE:<EOL><INDENT>self.position -= self.dir * self.velocity * t<EOL><DEDENT>if self._ydir == POSITIVE:<EOL><INDENT>self.position += self.up * self.velocity * t<EOL><DEDENT>elif self._ydir == NEGATIVE:<EOL><INDENT>self.position -= self.up * self.velocity * t<EOL><DEDENT>return self._gl_look_at(self.position, self.position + self.dir, self._up)<EOL>", "docstring": ":return: The current view matrix for the camera", "id": "f14415:c1:m9"}
{"signature": "def _update_yaw_and_pitch(self):", "body": "front = Vector3([<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>])<EOL>front.x = cos(radians(self.yaw)) * cos(radians(self.pitch))<EOL>front.y = sin(radians(self.pitch))<EOL>front.z = sin(radians(self.yaw)) * cos(radians(self.pitch))<EOL>self.dir = vector.normalise(front)<EOL>self.right = vector.normalise(vector3.cross(self.dir, self._up))<EOL>self.up = vector.normalise(vector3.cross(self.right, self.dir))<EOL>", "docstring": "Updates the camera vectors based on the current yaw and pitch", "id": "f14415:c0:m3"}
{"signature": "def look_at(self, vec=None, pos=None):", "body": "if pos is None:<EOL><INDENT>vec = Vector3(pos)<EOL><DEDENT>if vec is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return self._gl_look_at(self.position, vec, self._up)<EOL>", "docstring": "Look at a specific point\n\n:param vec: Vector3 position\n:param pos: python list [x, y, x]\n:return: Camera matrix", "id": "f14415:c0:m4"}
{"signature": "def set_position(self, x, y, z):", "body": "self.position = Vector3([x, y, z])<EOL>", "docstring": "Set the 3D position of the camera\n\n:param x: float\n:param y: float\n:param z: float", "id": "f14415:c0:m1"}
{"signature": "def add_attribute(self, attr_type, name, components):", "body": "self.attributes[attr_type] = {\"<STR_LIT:name>\": name, \"<STR_LIT>\": components}<EOL>", "docstring": "Add metadata about the mesh\n:param attr_type: POSITION, NORMAL etc\n:param name: The attribute name used in the program\n:param components: Number of floats", "id": "f14416:c0:m3"}
{"signature": "def draw(self, projection_matrix=None, view_matrix=None, camera_matrix=None, time=<NUM_LIT:0>):", "body": "if self.mesh_program:<EOL><INDENT>self.mesh_program.draw(<EOL>self,<EOL>projection_matrix=projection_matrix,<EOL>view_matrix=view_matrix,<EOL>camera_matrix=camera_matrix,<EOL>time=time<EOL>)<EOL><DEDENT>", "docstring": "Draw the mesh using the assigned mesh program\n\n:param projection_matrix: projection_matrix (bytes)\n:param view_matrix: view_matrix (bytes)\n:param camera_matrix: camera_matrix (bytes)", "id": "f14416:c0:m1"}
{"signature": "def draw(self, projection_matrix=None, camera_matrix=None, time=<NUM_LIT:0>):", "body": "projection_matrix = projection_matrix.astype('<STR_LIT>').tobytes()<EOL>camera_matrix = camera_matrix.astype('<STR_LIT>').tobytes()<EOL>for node in self.root_nodes:<EOL><INDENT>node.draw(<EOL>projection_matrix=projection_matrix,<EOL>camera_matrix=camera_matrix,<EOL>time=time,<EOL>)<EOL><DEDENT>self.ctx.clear_samplers(<NUM_LIT:0>, <NUM_LIT:4>)<EOL>", "docstring": "Draw all the nodes in the scene\n\n:param projection_matrix: projection matrix (bytes)\n:param camera_matrix: camera_matrix (bytes)\n:param time: The current time", "id": "f14417:c0:m4"}
{"signature": "def __init__(self, name, mesh_programs=None, **kwargs):", "body": "self.name = name<EOL>self.root_nodes = []<EOL>self.nodes = []<EOL>self.materials = []<EOL>self.meshes = []<EOL>self.cameras = []<EOL>self.bbox_min = None<EOL>self.bbox_max = None<EOL>self.diagonal_size = <NUM_LIT:1.0><EOL>self.bbox_vao = geometry.bbox()<EOL>self.bbox_program = programs.load(ProgramDescription(<EOL>label='<STR_LIT>',<EOL>path='<STR_LIT>'))<EOL>self._view_matrix = matrix44.create_identity()<EOL>", "docstring": ":param name: Unique name or path for the scene\n:param mesh_programs: List of MeshPrograms to apply to the scene\n:param loader: Loader class for the scene if relevant", "id": "f14417:c0:m0"}
{"signature": "def destroy(self):", "body": "for mesh in self.meshes:<EOL><INDENT>mesh.vao.release()<EOL><DEDENT>", "docstring": "Destroy the scene data and deallocate buffers", "id": "f14417:c0:m9"}
{"signature": "def stop(self) -> float:", "body": "mixer.music.stop()<EOL>return self.get_time()<EOL>", "docstring": "Stop the music\n\nReturns:\n    The current location in the music", "id": "f14422:c0:m4"}
{"signature": "def pause(self):", "body": "mixer.music.pause()<EOL>self.pause_time = self.get_time()<EOL>self.paused = True<EOL>", "docstring": "Pause the music", "id": "f14422:c0:m2"}
{"signature": "def toggle_pause(self):", "body": "if self.paused:<EOL><INDENT>self.start()<EOL><DEDENT>else:<EOL><INDENT>self.pause()<EOL><DEDENT>", "docstring": "Toggle pause mode", "id": "f14422:c0:m3"}
{"signature": "def set_time(self, value: float):", "body": "if value < <NUM_LIT:0>:<EOL><INDENT>value = <NUM_LIT:0><EOL><DEDENT>self.offset += self.get_time() - value<EOL>", "docstring": "Set the current time. This can be used to jump in the timeline.\n\nArgs:\n    value (float): The new time", "id": "f14423:c0:m6"}
{"signature": "def toggle_pause(self):", "body": "if self.pause_time:<EOL><INDENT>self.start()<EOL><DEDENT>else:<EOL><INDENT>self.pause()<EOL><DEDENT>", "docstring": "Toggle the paused state", "id": "f14423:c0:m3"}
{"signature": "def get_time(self) -> float:", "body": "if self.pause_time is not None:<EOL><INDENT>curr_time = self.pause_time - self.offset - self.start_time<EOL>return curr_time<EOL><DEDENT>curr_time = time.time()<EOL>return curr_time - self.start_time - self.offset<EOL>", "docstring": "Get the current time in seconds\n\nReturns:\n    The current time in seconds", "id": "f14423:c0:m5"}
{"signature": "def stop(self) -> float:", "body": "self.stop_time = time.time()<EOL>return self.stop_time - self.start_time - self.offset<EOL>", "docstring": "Stop the timer\n\nReturns:\n    The time the timer was stopped", "id": "f14423:c0:m4"}
{"signature": "def stop(self) -> float:", "body": "raise NotImplementedError()<EOL>", "docstring": "Stop the timer. Should only be called once when stopping the timer.\n\nReturns:\n    The time the timer was stopped\n\nRaises:\n    NotImplementedError", "id": "f14424:c0:m4"}
{"signature": "def pause(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Pause the timer\n\nRaises:\n    NotImplementedError", "id": "f14424:c0:m2"}
{"signature": "def pause(self):", "body": "self.controller.playing = False<EOL>", "docstring": "Pause the timer", "id": "f14425:c0:m4"}
{"signature": "def toggle_pause(self):", "body": "self.controller.playing = not self.controller.playing<EOL>", "docstring": "Toggle pause mode", "id": "f14425:c0:m5"}
{"signature": "def __init__(self, **kwargs):", "body": "config = getattr(settings, '<STR_LIT>', None)<EOL>if config is None:<EOL><INDENT>config = {}<EOL><DEDENT>self.mode = config.get('<STR_LIT>') or '<STR_LIT>'<EOL>self.files = config.get('<STR_LIT>') or '<STR_LIT>'<EOL>self.project = config.get('<STR_LIT>') or '<STR_LIT>'<EOL>self.rps = config.get('<STR_LIT>', <NUM_LIT>)<EOL>self.start_paused = False<EOL>self.controller = TimeController(self.rps)<EOL>if self.mode == '<STR_LIT>':<EOL><INDENT>self.rocket = Rocket.from_socket(self.controller, track_path=self.files)<EOL>self.start_paused = True<EOL><DEDENT>elif self.mode == '<STR_LIT>':<EOL><INDENT>self.rocket = Rocket.from_project_file(self.controller, self.project)<EOL><DEDENT>elif self.mode == '<STR_LIT>':<EOL><INDENT>self.rocket = Rocket.from_files(self.controller, self.files)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(self.mode))<EOL><DEDENT>for track in tracks.tacks:<EOL><INDENT>self.rocket.tracks.add(track)<EOL><DEDENT>for track in tracks.tacks:<EOL><INDENT>self.rocket.track(track.name)<EOL><DEDENT>self.rocket.update()<EOL>super().__init__(**kwargs)<EOL>", "docstring": "Initialize the rocket timer using values from settings", "id": "f14425:c0:m0"}
{"signature": "def stop(self) -> float:", "body": "self.player.stop()<EOL>return self.get_time()<EOL>", "docstring": "Stop the music\n\nReturns:\n    The current time in seconds", "id": "f14426:c0:m4"}
{"signature": "def start(self):", "body": "self.music.start()<EOL>if not self.start_paused:<EOL><INDENT>self.rocket.start()<EOL><DEDENT>", "docstring": "Start the timer", "id": "f14427:c0:m1"}
{"signature": "def set_time(self, value: float):", "body": "self.music.set_time(value)<EOL>", "docstring": "Set the current time jumping in the timeline\n\nArgs:\n    value (float): The new time value", "id": "f14427:c0:m3"}
{"signature": "def toggle_pause(self):", "body": "self.controller.playing = not self.controller.playing<EOL>self.music.toggle_pause()<EOL>", "docstring": "Toggle pause mode", "id": "f14427:c0:m5"}
{"signature": "def get_time(self) -> float:", "body": "self.rocket.update()<EOL>return self.music.get_time()<EOL>", "docstring": "Get the current time in seconds\n\nReturns:\n    The current time in seconds", "id": "f14427:c0:m2"}
{"signature": "def render_lights_debug(self, camera_matrix, projection):", "body": "self.ctx.enable(moderngl.BLEND)<EOL>self.ctx.blend_func = moderngl.SRC_ALPHA, moderngl.ONE_MINUS_SRC_ALPHA<EOL>for light in self.point_lights:<EOL><INDENT>m_mv = matrix44.multiply(light.matrix, camera_matrix)<EOL>light_size = light.radius<EOL>self.debug_shader[\"<STR_LIT>\"].write(projection.tobytes())<EOL>self.debug_shader[\"<STR_LIT>\"].write(m_mv.astype('<STR_LIT>').tobytes())<EOL>self.debug_shader[\"<STR_LIT:size>\"].value = light_size<EOL>self.unit_cube.render(self.debug_shader, mode=moderngl.LINE_STRIP)<EOL><DEDENT>self.ctx.disable(moderngl.BLEND)<EOL>", "docstring": "Render outlines of light volumes", "id": "f14429:c0:m4"}
{"signature": "@property<EOL><INDENT>def window(self) -> BaseWindow:<DEDENT>", "body": "return self._window<EOL>", "docstring": "The :py:class:`Window`", "id": "f14430:c0:m4"}
{"signature": "def get_scene(self, label: str) -> Scene:", "body": "return self._project.get_scene(label)<EOL>", "docstring": "Get a scene by its label\n\nArgs:\n    label (str): The label for the scene\n\nReturns: The :py:class:`Scene` instance", "id": "f14430:c0:m11"}
{"signature": "def create_projection(self, fov: float = <NUM_LIT>, near: float = <NUM_LIT:1.0>, far: float = <NUM_LIT>, aspect_ratio: float = None):", "body": "return matrix44.create_perspective_projection_matrix(<EOL>fov,<EOL>aspect_ratio or self.window.aspect_ratio,<EOL>near,<EOL>far,<EOL>dtype='<STR_LIT>',<EOL>)<EOL>", "docstring": "Create a projection matrix with the following parameters.\nWhen ``aspect_ratio`` is not provided the configured aspect\nratio for the window will be used.\n\nArgs:\n    fov (float): Field of view (float)\n    near (float): Camera near value\n    far (float): Camrea far value\n\nKeyword Args:\n    aspect_ratio (float): Aspect ratio of the viewport\n\nReturns:\n    The projection matrix as a float32 :py:class:`numpy.array`", "id": "f14430:c0:m15"}
{"signature": "def create_transformation(self, rotation=None, translation=None):", "body": "mat = None<EOL>if rotation is not None:<EOL><INDENT>mat = Matrix44.from_eulers(Vector3(rotation))<EOL><DEDENT>if translation is not None:<EOL><INDENT>trans = matrix44.create_from_translation(Vector3(translation))<EOL>if mat is None:<EOL><INDENT>mat = trans<EOL><DEDENT>else:<EOL><INDENT>mat = matrix44.multiply(mat, trans)<EOL><DEDENT><DEDENT>return mat<EOL>", "docstring": "Creates a transformation matrix woth rotations and translation.\n\nArgs:\n    rotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\n    translation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`\n\nReturns:\n    A 4x4 matrix as a :py:class:`numpy.array`", "id": "f14430:c0:m16"}
{"signature": "@property<EOL><INDENT>def sys_camera(self) -> camera.SystemCamera:<DEDENT>", "body": "return self._sys_camera<EOL>", "docstring": "The system camera responding to input", "id": "f14430:c0:m6"}
{"signature": "@property<EOL><INDENT>def name(self) -> str:<DEDENT>", "body": "return self._name<EOL>", "docstring": "Full python path to the effect", "id": "f14430:c0:m2"}
{"signature": "def create_normal_matrix(self, modelview):", "body": "normal_m = Matrix33.from_matrix44(modelview)<EOL>normal_m = normal_m.inverse<EOL>normal_m = normal_m.transpose()<EOL>return normal_m<EOL>", "docstring": "Creates a normal matrix from modelview matrix\n\nArgs:\n    modelview: The modelview matrix\n\nReturns:\n    A 3x3 Normal matrix as a :py:class:`numpy.array`", "id": "f14430:c0:m17"}
{"signature": "def get_program(self, label: str) -> moderngl.Program:", "body": "return self._project.get_program(label)<EOL>", "docstring": "Get a program by its label\n\nArgs:\n    label (str): The label for the program\n\nReturns: py:class:`moderngl.Program` instance", "id": "f14430:c0:m8"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "pass<EOL>", "docstring": "Implement the initialize when extending the class.\nThis method is responsible for fetching or creating resources\nand doing genereal initalization of the effect.\n\nThe effect initializer is called when all resources are loaded\n(with the exception of resources you manually load in the\nthe initializer).\n\nIf your effect requires arguments during initialiation you\nare free to add positional and keyword arguments.\n\nYou **do not** have to call the superclass initializer though ``super()``\n\nExample::\n\n    def __init__(self):\n        # Fetch reference to resource by their label\n        self.program = self.get_program('simple_textured')\n        self.texture = self.get_texture('bricks')\n        # .. create a cube etc ..", "id": "f14430:c0:m0"}
{"signature": "def get_effect(self, label: str) -> '<STR_LIT>':", "body": "return self._project.get_effect(label)<EOL>", "docstring": "Get an effect instance by label.\n\nArgs:\n    label (str): Label for the data file\n\nReturns: The :py:class:`Effect` instance", "id": "f14430:c0:m13"}
{"signature": "def get_track(self, name: str) -> Track:", "body": "return resources.tracks.get(name)<EOL>", "docstring": "Gets or creates a rocket track.\nOnly avaiable when using a Rocket timer.\n\nArgs:\n    name (str): The rocket track name\n\nReturns:\n    The :py:class:`rocket.Track` instance", "id": "f14430:c0:m10"}
{"signature": "def load_effects_classes(self):", "body": "self.effect_classes = []<EOL>for _, cls in inspect.getmembers(self.effect_module):<EOL><INDENT>if inspect.isclass(cls):<EOL><INDENT>if cls == Effect:<EOL><INDENT>continue<EOL><DEDENT>if issubclass(cls, Effect):<EOL><INDENT>self.effect_classes.append(cls)<EOL>self.effect_class_map[cls.__name__] = cls<EOL>cls._name = \"<STR_LIT>\".format(self.effect_module_name, cls.__name__)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Iterate the module attributes picking out effects", "id": "f14432:c1:m8"}
{"signature": "def load_resource_module(self):", "body": "<EOL>try:<EOL><INDENT>name = '<STR_LIT>'.format(self.name, '<STR_LIT>')<EOL>self.dependencies_module = importlib.import_module(name)<EOL><DEDENT>except ModuleNotFoundError as err:<EOL><INDENT>raise EffectError(<EOL>(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>).format(self.name, err))<EOL><DEDENT>try:<EOL><INDENT>self.resources = getattr(self.dependencies_module, '<STR_LIT>')<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise EffectError(\"<STR_LIT>\".format(name))<EOL><DEDENT>if not isinstance(self.resources, list):<EOL><INDENT>raise EffectError(<EOL>\"<STR_LIT>\".format(<EOL>name, type(self.resources)))<EOL><DEDENT>try:<EOL><INDENT>self.effect_packages = getattr(self.dependencies_module, '<STR_LIT>')<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise EffectError(\"<STR_LIT>\".format(name))<EOL><DEDENT>if not isinstance(self.effect_packages, list):<EOL><INDENT>raise EffectError(<EOL>\"<STR_LIT>\".format(<EOL>name, type(self.effects)))<EOL><DEDENT>", "docstring": "Fetch the resource list", "id": "f14432:c1:m9"}
{"signature": "def get_effect_resources(self) -> List[Any]:", "body": "resources = []<EOL>for package in self.packages:<EOL><INDENT>resources.extend(package.resources)<EOL><DEDENT>return resources<EOL>", "docstring": "Get all resources registed in effect packages.\nThese are typically located in ``resources.py``", "id": "f14432:c0:m2"}
{"signature": "def polulate(self, package_list):", "body": "for package in package_list:<EOL><INDENT>self.add_package(package)<EOL><DEDENT>", "docstring": "Polulate the registry with effect packages.\n\n:param module_list: List of effect module paths", "id": "f14432:c0:m3"}
{"signature": "def runnable_effects(self) -> List[Type[Effect]]:", "body": "return [cls for cls in self.effect_classes if cls.runnable]<EOL>", "docstring": "Returns the runnable effect in the package", "id": "f14432:c1:m1"}
{"signature": "def get_package(self, name) -> '<STR_LIT>':", "body": "name, cls_name = parse_package_string(name)<EOL>try:<EOL><INDENT>return self.package_map[name]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise EffectError(\"<STR_LIT>\".format(name))<EOL><DEDENT>", "docstring": "Get a package by python path. Can also contain path to an effect.\n\nArgs:\n    name (str): Path to effect package or effect\n\nReturns:\n    The requested EffectPackage\n\nRaises:\n    EffectError when no package is found", "id": "f14432:c0:m5"}
{"signature": "def add_package(self, name):", "body": "name, cls_name = parse_package_string(name)<EOL>if name in self.package_map:<EOL><INDENT>return<EOL><DEDENT>package = EffectPackage(name)<EOL>package.load()<EOL>self.packages.append(package)<EOL>self.package_map[package.name] = package<EOL>self.polulate(package.effect_packages)<EOL>", "docstring": "Registers a single package\n\n:param name: (str) The effect package to add", "id": "f14432:c0:m4"}
{"signature": "def parse_package_string(path):", "body": "parts = path.split('<STR_LIT:.>')<EOL>if parts[-<NUM_LIT:1>][<NUM_LIT:0>].isupper():<EOL><INDENT>return \"<STR_LIT:.>\".join(parts[:-<NUM_LIT:1>]), parts[-<NUM_LIT:1>]<EOL><DEDENT>return path, \"<STR_LIT>\"<EOL>", "docstring": "Parse the effect package string.\nCan contain the package python path or path to effect class in an effect package.\n\nExamples::\n\n    # Path to effect pacakge\n    examples.cubes\n\n    # Path to effect class\n    examples.cubes.Cubes\n\nArgs:\n    path: python path to effect package. May also include effect class name.\n\nReturns:\n    tuple: (package_path, effect_class)", "id": "f14432:m0"}
{"signature": "def __init__(self, area, text_lines=None, aspect_ratio=<NUM_LIT:1.0>):", "body": "super().__init__()<EOL>self.area = area<EOL>self._text_lines = text_lines<EOL>self._projection_bytes = None<EOL>self._aspect_ratio = <NUM_LIT:1.0><EOL>self.aspect_ratio = aspect_ratio<EOL>self._vao = None<EOL>self._config = self.get_data('<STR_LIT>')<EOL>self._texture = self.get_texture('<STR_LIT>')<EOL>self._program = self.get_program('<STR_LIT>')<EOL>self._string_buffer = None<EOL>self._init(FontMeta(self._config))<EOL>self._string_buffer = self.ctx.buffer(reserve=self.area[<NUM_LIT:0>] * <NUM_LIT:4> * self.area[<NUM_LIT:1>])<EOL>self._string_buffer.clear(chunk=b'<STR_LIT>')<EOL>pos = self.ctx.buffer(data=bytes([<NUM_LIT:0>] * <NUM_LIT:4> * <NUM_LIT:3>))<EOL>self._vao = VAO(\"<STR_LIT>\", mode=moderngl.POINTS)<EOL>self._vao.buffer(pos, '<STR_LIT>', '<STR_LIT>')<EOL>self._vao.buffer(self._string_buffer, '<STR_LIT>', '<STR_LIT>', per_instance=True)<EOL>self.text_lines = self._text_lines<EOL>", "docstring": ":param area: (x, y) Text area size (number of characters)\n:param size: Text size\n:param text: Initial text lines", "id": "f14436:c0:m0"}
{"signature": "def load_shader(self, shader_type: str, path: str):", "body": "if path:<EOL><INDENT>resolved_path = self.find_program(path)<EOL>if not resolved_path:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(shader_type, path))<EOL><DEDENT>print(\"<STR_LIT>\", path)<EOL>with open(resolved_path, '<STR_LIT:r>') as fd:<EOL><INDENT>return fd.read()<EOL><DEDENT><DEDENT>", "docstring": "Load a single shader", "id": "f14442:c0:m1"}
{"signature": "def get_bbox(self, primitive):", "body": "accessor = primitive.attributes.get('<STR_LIT>')<EOL>return accessor.min, accessor.max<EOL>", "docstring": "Get the bounding box for the mesh", "id": "f14444:c3:m4"}
{"signature": "@property<EOL><INDENT>def has_data_uri(self):<DEDENT>", "body": "if not self.uri:<EOL><INDENT>return False<EOL><DEDENT>return self.uri.startswith(\"<STR_LIT>\")<EOL>", "docstring": "Is data embedded in json?", "id": "f14444:c7:m1"}
{"signature": "def prepare_attrib_mapping(self, primitive):", "body": "buffer_info = []<EOL>for name, accessor in primitive.attributes.items():<EOL><INDENT>info = VBOInfo(*accessor.info())<EOL>info.attributes.append((name, info.components))<EOL>if buffer_info and buffer_info[-<NUM_LIT:1>].buffer_view == info.buffer_view:<EOL><INDENT>if buffer_info[-<NUM_LIT:1>].interleaves(info):<EOL><INDENT>buffer_info[-<NUM_LIT:1>].merge(info)<EOL>continue<EOL><DEDENT><DEDENT>buffer_info.append(info)<EOL><DEDENT>return buffer_info<EOL>", "docstring": "Pre-parse buffer mappings for each VBO to detect interleaved data for a primitive", "id": "f14444:c3:m3"}
{"signature": "def check_extensions(self, supported):", "body": "if self.data.get('<STR_LIT>'):<EOL><INDENT>for ext in self.data.get('<STR_LIT>'):<EOL><INDENT>if ext not in supported:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(ext))<EOL><DEDENT><DEDENT><DEDENT>if self.data.get('<STR_LIT>'):<EOL><INDENT>for ext in self.data.get('<STR_LIT>'):<EOL><INDENT>if ext not in supported:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(ext))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "\"extensionsRequired\": [\"KHR_draco_mesh_compression\"],\n\"extensionsUsed\": [\"KHR_draco_mesh_compression\"]", "id": "f14444:c1:m4"}
{"signature": "@property<EOL><INDENT>def is_resource_node(self):<DEDENT>", "body": "return self.camera is not None or self.mesh is not None<EOL>", "docstring": "Is this just a reference node to a resource?", "id": "f14444:c9:m2"}
{"signature": "@property<EOL><INDENT>def is_separate_file(self):<DEDENT>", "body": "return self.uri is not None and not self.has_data_uri<EOL>", "docstring": "Buffer represents an independent bin file?", "id": "f14444:c7:m2"}
{"signature": "def load(self):", "body": "self.path = self.find_scene(self.meta.path)<EOL>if not self.path:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(self.meta.path))<EOL><DEDENT>self.scene = Scene(self.path)<EOL>if self.path.suffix == '<STR_LIT>':<EOL><INDENT>self.load_gltf()<EOL><DEDENT>if self.path.suffix == '<STR_LIT>':<EOL><INDENT>self.load_glb()<EOL><DEDENT>self.meta.check_version()<EOL>self.meta.check_extensions(self.supported_extensions)<EOL>self.load_images()<EOL>self.load_samplers()<EOL>self.load_textures()<EOL>self.load_materials()<EOL>self.load_meshes()<EOL>self.load_nodes()<EOL>self.scene.calc_scene_bbox()<EOL>self.scene.prepare()<EOL>return self.scene<EOL>", "docstring": "Deferred loading of the scene\n\n:param scene: The scene object\n:param file: Resolved path if changed by finder", "id": "f14444:c0:m1"}
{"signature": "def _link_data(self):", "body": "<EOL>for acc in self.accessors:<EOL><INDENT>acc.bufferView = self.buffer_views[acc.bufferViewId]<EOL><DEDENT>for buffer_view in self.buffer_views:<EOL><INDENT>buffer_view.buffer = self.buffers[buffer_view.bufferId]<EOL><DEDENT>for mesh in self.meshes:<EOL><INDENT>for primitive in mesh.primitives:<EOL><INDENT>if getattr(primitive, \"<STR_LIT>\", None) is not None:<EOL><INDENT>primitive.indices = self.accessors[primitive.indices]<EOL><DEDENT>for name, value in primitive.attributes.items():<EOL><INDENT>primitive.attributes[name] = self.accessors[value]<EOL><DEDENT><DEDENT><DEDENT>for image in self.images:<EOL><INDENT>if image.bufferViewId is not None:<EOL><INDENT>image.bufferView = self.buffer_views[image.bufferViewId]<EOL><DEDENT><DEDENT>", "docstring": "Add references", "id": "f14444:c1:m1"}
{"signature": "def images_exist(self):", "body": "pass<EOL>", "docstring": "checks if the images references in textures exist", "id": "f14444:c1:m6"}
{"signature": "def load_indices(self, primitive):", "body": "if getattr(primitive, \"<STR_LIT>\") is None:<EOL><INDENT>return None, None<EOL><DEDENT>_, component_type, buffer = primitive.indices.read()<EOL>return component_type, buffer<EOL>", "docstring": "Loads the index buffer / polygon list for a primitive", "id": "f14444:c3:m2"}
{"signature": "def read(self):", "body": "<EOL>dtype = NP_COMPONENT_DTYPE[self.componentType.value]<EOL>return ACCESSOR_TYPE[self.type], self.componentType, self.bufferView.read(<EOL>byte_offset=self.byteOffset,<EOL>dtype=dtype,<EOL>count=self.count * ACCESSOR_TYPE[self.type],<EOL>)<EOL>", "docstring": "Reads buffer data\n:return: component count, component type, data", "id": "f14444:c5:m1"}
{"signature": "def create(self):", "body": "dtype = NP_COMPONENT_DTYPE[self.component_type.value]<EOL>data = numpy.frombuffer(<EOL>self.buffer.read(byte_length=self.byte_length, byte_offset=self.byte_offset),<EOL>count=self.count * self.components,<EOL>dtype=dtype,<EOL>)<EOL>return dtype, data<EOL>", "docstring": "Create the VBO", "id": "f14444:c4:m3"}
{"signature": "def load(self) -> Scene:", "body": "raise NotImplementedError()<EOL>", "docstring": "Load the scene", "id": "f14445:c0:m1"}
{"signature": "def _find_last_of(self, path, finders):", "body": "found_path = None<EOL>for finder in finders:<EOL><INDENT>result = finder.find(path)<EOL>if result:<EOL><INDENT>found_path = result<EOL><DEDENT><DEDENT>return found_path<EOL>", "docstring": "Find the last occurance of the file in finders", "id": "f14447:c0:m6"}
{"signature": "def __init__(self, meta):", "body": "self.meta = meta<EOL>", "docstring": ":param meta: ResourceDescription instance", "id": "f14447:c0:m0"}
{"signature": "def image_data(image):", "body": "<EOL>data = image.tobytes()<EOL>components = len(data) // (image.size[<NUM_LIT:0>] * image.size[<NUM_LIT:1>])<EOL>return components, data<EOL>", "docstring": "Get components and bytes for an image", "id": "f14449:m0"}
{"signature": "def load(self):", "body": "self.meta.resolved_path = self.find_data(self.meta.path)<EOL>if not self.meta.resolved_path:<EOL><INDENT>raise ImproperlyConfigured(\"<STR_LIT>\".format(self.meta.path))<EOL><DEDENT>print(\"<STR_LIT>\", self.meta.path)<EOL>with open(self.meta.resolved_path, '<STR_LIT:r>') as fd:<EOL><INDENT>return fd.read()<EOL><DEDENT>", "docstring": "Load a file in text mode", "id": "f14451:c0:m0"}
{"signature": "def init(*args, **kwargs):", "body": "view.init(*args, **kwargs)<EOL>", "docstring": "Initialize and load", "id": "f14454:m1"}
{"signature": "def setup(**kwargs):", "body": "settings.setup()<EOL>settings.update(**kwargs)<EOL>", "docstring": "Configure", "id": "f14454:m0"}
{"signature": "def key_event(self, key, action, mods):", "body": "pass<EOL>", "docstring": "Forwarded key events from the system.\n\n:param key: The key that was pressed or released.\n:param action: ACTION_PRESS, ACTION_RELEASE\n:param mods: Bit field describing which modifier keys were held down.", "id": "f14456:c0:m2"}
{"signature": "def draw(self, time, frametime, target):", "body": "for effect in self.effects:<EOL><INDENT>value = effect.rocket_timeline_track.time_value(time)<EOL>if value > <NUM_LIT:0.5>:<EOL><INDENT>effect.draw(time, frametime, target)<EOL><DEDENT><DEDENT>", "docstring": "Fetch track value for every runnable effect.\nIf the value is > 0.5 we draw it.", "id": "f14457:c0:m1"}
{"signature": "def run_from_argv(self, argv):", "body": "parser = self.create_parser(argv[<NUM_LIT:0>], argv[<NUM_LIT:1>])<EOL>options = parser.parse_args(argv[<NUM_LIT:2>:])<EOL>cmd_options = vars(options)<EOL>args = cmd_options.pop('<STR_LIT:args>', ())<EOL>self.handle(*args, **cmd_options)<EOL>", "docstring": "Called by the system when executing the command from the command line.\nThis should not be overridden.\n\n:param argv: Arguments from command line", "id": "f14459:c1:m3"}
{"signature": "def print_help(self, prog_name, subcommand):", "body": "parser = self.create_parser(prog_name, subcommand)<EOL>parser.print_help()<EOL>", "docstring": "Prints the help text generated by the argument parser defined for this command.\nThis method should not be overridden.\n\n:param prog_name: name of the program that started the command.\n:param subcommand: The subcommand name", "id": "f14459:c1:m4"}
{"signature": "def add_arguments(self, parser):", "body": "pass<EOL>", "docstring": "This method is for adding arguments to a command.\nWhen extending this class we define the arguments\nby adding it to the parser passed in.\n\n:param parser: The parser to add arguments to (standard argparse)", "id": "f14459:c1:m1"}
{"signature": "def handle(self, *args, **options):", "body": "raise NotImplementedError()<EOL>", "docstring": "The actual run logic for the command.\n\n:param args: arguments from the argparser\n:param options: keyword arguments from the argparser", "id": "f14459:c1:m2"}
{"signature": "def create_entrypoint(self):", "body": "with open(os.path.join(self.template_dir, '<STR_LIT>'), '<STR_LIT:r>') as fd:<EOL><INDENT>data = fd.read().format(project_name=self.project_name)<EOL><DEDENT>with open('<STR_LIT>', '<STR_LIT:w>') as fd:<EOL><INDENT>fd.write(data)<EOL><DEDENT>os.chmod('<STR_LIT>', <NUM_LIT>)<EOL>", "docstring": "Write manage.py in the current directory", "id": "f14464:c0:m4"}
{"signature": "def initial_sanity_check(self):", "body": "<EOL>self.try_import(self.project_name)<EOL>self.validate_name(self.project_name)<EOL>if os.path.exists(self.project_name):<EOL><INDENT>print(\"<STR_LIT>\".format(self.project_name))<EOL>return False<EOL><DEDENT>if os.path.exists('<STR_LIT>'):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>return True<EOL>", "docstring": "Checks if we can create the project", "id": "f14464:c0:m2"}
{"signature": "def find_commands(command_dir: str) -> List[str]:", "body": "if not command_dir:<EOL><INDENT>return []<EOL><DEDENT>return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir])<EOL>if not is_pkg and not name.startswith('<STR_LIT:_>')]<EOL>", "docstring": "Get all command names in the a folder\n\n:return: List of commands names", "id": "f14465:m0"}
{"signature": "def execute_from_command_line(argv=None):", "body": "if not argv:<EOL><INDENT>argv = sys.argv<EOL><DEDENT>system_commands = find_commands(system_command_dir())<EOL>project_commands = find_commands(project_command_dir())<EOL>project_package = project_package_name()<EOL>command = argv[<NUM_LIT:1>] if len(argv) > <NUM_LIT:1> else None<EOL>if command in system_commands:<EOL><INDENT>cmd = load_command_class('<STR_LIT>', command)<EOL>cmd.run_from_argv(argv)<EOL><DEDENT>elif command in project_commands:<EOL><INDENT>cmd = load_command_class(project_package, command)<EOL>cmd.run_from_argv(argv)<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>for name in system_commands:<EOL><INDENT>print(\"<STR_LIT>\".format(name))<EOL><DEDENT>for name in project_commands:<EOL><INDENT>print(\"<STR_LIT>\".format(name))<EOL><DEDENT><DEDENT>", "docstring": "Currently the only entrypoint (manage.py, demosys-admin)", "id": "f14465:m6"}
{"signature": "def get_scene(self, label: str) -> Scene:", "body": "return self._get_resource(label, self._scenes, \"<STR_LIT>\")<EOL>", "docstring": "Gets a scene by label\n\nArgs:\n    label (str): The label for the scene to fetch\n\nReturns:\n    Scene instance", "id": "f14467:c0:m12"}
{"signature": "@property<EOL><INDENT>def ctx(self) -> moderngl.Context:<DEDENT>", "body": "return context.ctx()<EOL>", "docstring": "The MondernGL context", "id": "f14467:c0:m18"}
{"signature": "def post_load(self):", "body": "for _, effect in self._effects.items():<EOL><INDENT>effect.post_load()<EOL><DEDENT>", "docstring": "Called after resources are loaded before effects starts rendering.\nIt simply iterates each effect instance calling their ``post_load`` methods.", "id": "f14467:c0:m6"}
{"signature": "def create_effect(self, label: str, name: str, *args, **kwargs) -> Effect:", "body": "effect_cls = effects.find_effect_class(name)<EOL>effect = effect_cls(*args, **kwargs)<EOL>effect._label = label<EOL>if label in self._effects:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(label))<EOL><DEDENT>self._effects[label] = effect<EOL>return effect<EOL>", "docstring": "Create an effect instance adding it to the internal effects dictionary using the label as key.\n\nArgs:\n    label (str): The unique label for the effect instance\n    name (str): Name or full python path to the effect class we want to instantiate\n    args: Positional arguments to the effect initializer\n    kwargs: Keyword arguments to the effect initializer\n\nReturns:\n    The newly created Effect instance", "id": "f14467:c0:m5"}
{"signature": "def create_effect_instances(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Create instances of effects.\nMust be implemented or ``NotImplementedError`` is raised.", "id": "f14467:c0:m4"}
{"signature": "def get_effect(self, label: str) -> Effect:", "body": "return self._get_resource(label, self._effects, \"<STR_LIT>\")<EOL>", "docstring": "Get an effect instance by label\n\nArgs:\n    label (str): The label for the effect instance\n\nReturns:\n    Effect class instance", "id": "f14467:c0:m10"}
{"signature": "def get_effect_class(self, class_name, package_name=None) -> Type[Effect]:", "body": "if package_name:<EOL><INDENT>return effects.find_effect_class(\"<STR_LIT>\".format(package_name, class_name))<EOL><DEDENT>return effects.find_effect_class(class_name)<EOL>", "docstring": "Get an effect class from the effect registry.\n\nArgs:\n    class_name (str): The exact class name of the effect\n\nKeyword Args:\n    package_name (str): The python path to the effect package the effect name is located.\n                        This is optional and can be used to avoid issue with class name collisions.\n\nReturns:\n    Effect class", "id": "f14467:c0:m11"}
{"signature": "def quad_fs() -> VAO:", "body": "return quad_2d(<NUM_LIT>, <NUM_LIT>, <NUM_LIT:0.0>, <NUM_LIT:0.0>)<EOL>", "docstring": "Creates a screen aligned quad using two triangles with normals and texture coordiantes.\n\nReturns:\n    A :py:class:`demosys.opengl.vao.VAO` instance.", "id": "f14469:m0"}
{"signature": "def plane_xz(size=(<NUM_LIT:10>, <NUM_LIT:10>), resolution=(<NUM_LIT:10>, <NUM_LIT:10>)) -> VAO:", "body": "sx, sz = size<EOL>rx, rz = resolution<EOL>dx, dz = sx / rx, sz / rz  <EOL>ox, oz = -sx / <NUM_LIT:2>, -sz / <NUM_LIT:2>  <EOL>def gen_pos():<EOL><INDENT>for z in range(rz):<EOL><INDENT>for x in range(rx):<EOL><INDENT>yield ox + x * dx<EOL>yield <NUM_LIT:0><EOL>yield oz + z * dz<EOL><DEDENT><DEDENT><DEDENT>def gen_uv():<EOL><INDENT>for z in range(rz):<EOL><INDENT>for x in range(rx):<EOL><INDENT>yield x / (rx - <NUM_LIT:1>)<EOL>yield <NUM_LIT:1> - z / (rz - <NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT>def gen_normal():<EOL><INDENT>for _ in range(rx * rz):<EOL><INDENT>yield <NUM_LIT:0.0><EOL>yield <NUM_LIT:1.0><EOL>yield <NUM_LIT:0.0><EOL><DEDENT><DEDENT>def gen_index():<EOL><INDENT>for z in range(rz - <NUM_LIT:1>):<EOL><INDENT>for x in range(rx - <NUM_LIT:1>):<EOL><INDENT>yield z * rz + x + <NUM_LIT:1><EOL>yield z * rz + x<EOL>yield z * rz + x + rx<EOL>yield z * rz + x + <NUM_LIT:1><EOL>yield z * rz + x + rx<EOL>yield z * rz + x + rx + <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32)<EOL>uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32)<EOL>normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32)<EOL>index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32)<EOL>vao = VAO(\"<STR_LIT>\", mode=moderngl.TRIANGLES)<EOL>vao.buffer(pos_data, '<STR_LIT>', ['<STR_LIT>'])<EOL>vao.buffer(uv_data, '<STR_LIT>', ['<STR_LIT>'])<EOL>vao.buffer(normal_data, '<STR_LIT>', ['<STR_LIT>'])<EOL>vao.index_buffer(index_data, index_element_size=<NUM_LIT:4>)<EOL>return vao<EOL>", "docstring": "Generates a plane on the xz axis of a specific size and resolution.\nNormals and texture coordinates are also included.\n\nArgs:\n    size: (x, y) tuple\n    resolution: (x, y) tuple\n\nReturns:\n    A :py:class:`demosys.opengl.vao.VAO` instance", "id": "f14470:m0"}
{"signature": "def points_random_3d(count, range_x=(-<NUM_LIT>, <NUM_LIT>), range_y=(-<NUM_LIT>, <NUM_LIT>), range_z=(-<NUM_LIT>, <NUM_LIT>), seed=None) -> VAO:", "body": "random.seed(seed)<EOL>def gen():<EOL><INDENT>for _ in range(count):<EOL><INDENT>yield random.uniform(*range_x)<EOL>yield random.uniform(*range_y)<EOL>yield random.uniform(*range_z)<EOL><DEDENT><DEDENT>data = numpy.fromiter(gen(), count=count * <NUM_LIT:3>, dtype=numpy.float32)<EOL>vao = VAO(\"<STR_LIT>\", mode=moderngl.POINTS)<EOL>vao.buffer(data, '<STR_LIT>', ['<STR_LIT>'])<EOL>return vao<EOL>", "docstring": "Generates random positions inside a confied box.\n\nArgs:\n    count (int): Number of points to generate\n\nKeyword Args:\n    range_x (tuple): min-max range for x axis: Example (-10.0. 10.0)\n    range_y (tuple): min-max range for y axis: Example (-10.0. 10.0)\n    range_z (tuple): min-max range for z axis: Example (-10.0. 10.0)\n    seed (int): The random seed\n\nReturns:\n    A :py:class:`demosys.opengl.vao.VAO` instance", "id": "f14472:m0"}
{"signature": "def sphere(radius=<NUM_LIT:0.5>, sectors=<NUM_LIT:32>, rings=<NUM_LIT:16>) -> VAO:", "body": "R = <NUM_LIT:1.0> / (rings - <NUM_LIT:1>)<EOL>S = <NUM_LIT:1.0> / (sectors - <NUM_LIT:1>)<EOL>vertices = [<NUM_LIT:0>] * (rings * sectors * <NUM_LIT:3>)<EOL>normals = [<NUM_LIT:0>] * (rings * sectors * <NUM_LIT:3>)<EOL>uvs = [<NUM_LIT:0>] * (rings * sectors * <NUM_LIT:2>)<EOL>v, n, t = <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0><EOL>for r in range(rings):<EOL><INDENT>for s in range(sectors):<EOL><INDENT>y = math.sin(-math.pi / <NUM_LIT:2> + math.pi * r * R)<EOL>x = math.cos(<NUM_LIT:2> * math.pi * s * S) * math.sin(math.pi * r * R)<EOL>z = math.sin(<NUM_LIT:2> * math.pi * s * S) * math.sin(math.pi * r * R)<EOL>uvs[t] = s * S<EOL>uvs[t + <NUM_LIT:1>] = r * R<EOL>vertices[v] = x * radius<EOL>vertices[v + <NUM_LIT:1>] = y * radius<EOL>vertices[v + <NUM_LIT:2>] = z * radius<EOL>normals[n] = x<EOL>normals[n + <NUM_LIT:1>] = y<EOL>normals[n + <NUM_LIT:2>] = z<EOL>t += <NUM_LIT:2><EOL>v += <NUM_LIT:3><EOL>n += <NUM_LIT:3><EOL><DEDENT><DEDENT>indices = [<NUM_LIT:0>] * rings * sectors * <NUM_LIT:6><EOL>i = <NUM_LIT:0><EOL>for r in range(rings - <NUM_LIT:1>):<EOL><INDENT>for s in range(sectors - <NUM_LIT:1>):<EOL><INDENT>indices[i] = r * sectors + s<EOL>indices[i + <NUM_LIT:1>] = (r + <NUM_LIT:1>) * sectors + (s + <NUM_LIT:1>)<EOL>indices[i + <NUM_LIT:2>] = r * sectors + (s + <NUM_LIT:1>)<EOL>indices[i + <NUM_LIT:3>] = r * sectors + s<EOL>indices[i + <NUM_LIT:4>] = (r + <NUM_LIT:1>) * sectors + s<EOL>indices[i + <NUM_LIT:5>] = (r + <NUM_LIT:1>) * sectors + (s + <NUM_LIT:1>)<EOL>i += <NUM_LIT:6><EOL><DEDENT><DEDENT>vbo_vertices = numpy.array(vertices, dtype=numpy.float32)<EOL>vbo_normals = numpy.array(normals, dtype=numpy.float32)<EOL>vbo_uvs = numpy.array(uvs, dtype=numpy.float32)<EOL>vbo_elements = numpy.array(indices, dtype=numpy.uint32)<EOL>vao = VAO(\"<STR_LIT>\", mode=mlg.TRIANGLES)<EOL>vao.buffer(vbo_vertices, '<STR_LIT>', ['<STR_LIT>'])<EOL>vao.buffer(vbo_normals, '<STR_LIT>', ['<STR_LIT>'])<EOL>vao.buffer(vbo_uvs, '<STR_LIT>', ['<STR_LIT>'])<EOL>vao.index_buffer(vbo_elements, index_element_size=<NUM_LIT:4>)<EOL>return vao<EOL>", "docstring": "Creates a sphere.\n\nKeyword Args:\n    radius (float): Radius or the sphere\n    rings (int): number or horizontal rings\n    sectors (int): number of vertical segments\n\nReturns:\n    A :py:class:`demosys.opengl.vao.VAO` instance", "id": "f14475:m0"}
{"signature": "def close(self):", "body": "glfw.set_window_should_close(self.window, True)<EOL>", "docstring": "Set the window closing state in glfw", "id": "f14477:c0:m3"}
{"signature": "def swap_buffers(self):", "body": "self.frames += <NUM_LIT:1><EOL>glfw.swap_buffers(self.window)<EOL>self.poll_events()<EOL>", "docstring": "Swaps buffers, incement the framecounter and pull events.", "id": "f14477:c0:m4"}
{"signature": "def check_glfw_version(self):", "body": "print(\"<STR_LIT>\".format(glfw.get_version(), glfw.__version__))<EOL>if glfw.get_version() < self.min_glfw_version:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(self.min_glfw_version))<EOL><DEDENT>", "docstring": "Ensure glfw library  version is compatible", "id": "f14477:c0:m8"}
{"signature": "def resize(self, width, height):", "body": "self.width = width<EOL>self.height = height<EOL>self.buffer_width, self.buffer_height = glfw.get_framebuffer_size(self.window)<EOL>self.set_default_viewport()<EOL>", "docstring": "Sets the new size and buffer size internally", "id": "f14477:c0:m5"}
{"signature": "def on_resize(self, width, height):", "body": "self.width, self.height = width, height<EOL>self.buffer_width, self.buffer_height = width, height<EOL>self.resize(width, height)<EOL>", "docstring": "Pyglet specific callback for window resize events.", "id": "f14480:c0:m4"}
{"signature": "def should_close(self) -> bool:", "body": "return self.window.has_exit<EOL>", "docstring": "returns the ``has_exit`` state in the pyglet window", "id": "f14480:c0:m7"}
{"signature": "def swap_buffers(self):", "body": "if not self.window.context:<EOL><INDENT>return<EOL><DEDENT>self.frames += <NUM_LIT:1><EOL>self.window.flip()<EOL>self.window.dispatch_events()<EOL>", "docstring": "Swap buffers, increment frame counter and pull events", "id": "f14480:c0:m6"}
{"signature": "def __init__(self):", "body": "super().__init__()<EOL>pyglet.options['<STR_LIT>'] = False<EOL>config = pyglet.gl.Config()<EOL>config.double_buffer = True<EOL>config.major_version = self.gl_version.major<EOL>config.minor_version = self.gl_version.minor<EOL>config.forward_compatible = True<EOL>config.sample_buffers = <NUM_LIT:1> if self.samples > <NUM_LIT:1> else <NUM_LIT:0><EOL>config.samples = self.samples<EOL>self.window = PygletWrapper(<EOL>width=self.width, height=self.height,<EOL>caption=self.title,<EOL>resizable=self.resizable,<EOL>vsync=self.vsync,<EOL>fullscreen=self.fullscreen,<EOL>)<EOL>self.window.set_mouse_visible(self.cursor)<EOL>self.window.event(self.on_key_press)<EOL>self.window.event(self.on_key_release)<EOL>self.window.event(self.on_mouse_motion)<EOL>self.window.event(self.on_resize)<EOL>self.ctx = moderngl.create_context(require=self.gl_version.code)<EOL>context.WINDOW = self<EOL>self.fbo = self.ctx.screen<EOL>self.set_default_viewport()<EOL>", "docstring": "Opens a window using pyglet, registers input callbacks\nand creates a moderngl context.", "id": "f14480:c0:m0"}
{"signature": "def on_key_press(self, symbol, modifiers):", "body": "self.keyboard_event(symbol, self.keys.ACTION_PRESS, modifiers)<EOL>", "docstring": "Pyglet specific key press callback.\nForwards and translates the events to :py:func:`keyboard_event`", "id": "f14480:c0:m1"}
{"signature": "def on_mouse_motion(self, x, y, dx, dy):", "body": "<EOL>self.cursor_event(x, self.buffer_height - y, dx, dy)<EOL>", "docstring": "Pyglet specific mouse motion callback.\nForwards and traslates the event to :py:func:`cursor_event`", "id": "f14480:c0:m3"}
{"signature": "def on_key_release(self, symbol, modifiers):", "body": "self.keyboard_event(symbol, self.keys.ACTION_RELEASE, modifiers)<EOL>", "docstring": "Pyglet specific key release callback.\nForwards and translates the events to :py:func:`keyboard_event`", "id": "f14480:c0:m2"}
{"signature": "def __init__(self):", "body": "super().__init__()<EOL>self._closed = False<EOL>gl = QtOpenGL.QGLFormat()<EOL>gl.setVersion(self.gl_version.major, self.gl_version.minor)<EOL>gl.setProfile(QtOpenGL.QGLFormat.CoreProfile)<EOL>gl.setDepthBufferSize(<NUM_LIT>)<EOL>gl.setDoubleBuffer(True)<EOL>gl.setSwapInterval(<NUM_LIT:1> if self.vsync else <NUM_LIT:0>)<EOL>if self.samples > <NUM_LIT:1>:<EOL><INDENT>gl.setSampleBuffers(True)<EOL>gl.setSamples(self.samples)<EOL><DEDENT>self.app = QtWidgets.QApplication([])<EOL>self.widget = QtOpenGL.QGLWidget(gl)<EOL>self.widget.setWindowTitle(self.title)<EOL>if self.fullscreen:<EOL><INDENT>rect = QtWidgets.QDesktopWidget().screenGeometry()<EOL>self.width = rect.width()<EOL>self.height = rect.height()<EOL>self.buffer_width = rect.width() * self.widget.devicePixelRatio()<EOL>self.buffer_height = rect.height() * self.widget.devicePixelRatio()<EOL><DEDENT>if self.resizable:<EOL><INDENT>size_policy = QtWidgets.QSizePolicy(<EOL>QtWidgets.QSizePolicy.Expanding,<EOL>QtWidgets.QSizePolicy.Expanding,<EOL>)<EOL>self.widget.setSizePolicy(size_policy)<EOL>self.widget.resize(self.width, self.height)<EOL><DEDENT>else:<EOL><INDENT>self.widget.setFixedSize(self.width, self.height)<EOL><DEDENT>self.widget.move(QtWidgets.QDesktopWidget().rect().center() - self.widget.rect().center())<EOL>self.widget.resizeGL = self.resize  <EOL>self.widget.show()<EOL>if not self.cursor:<EOL><INDENT>self.widget.setCursor(QtCore.Qt.BlankCursor)<EOL><DEDENT>if self.fullscreen:<EOL><INDENT>self.widget.showFullScreen()<EOL><DEDENT>self.widget.setMouseTracking(True)<EOL>self.widget.keyPressEvent = self.keyPressEvent<EOL>self.widget.keyReleaseEvent = self.keyReleaseEvent<EOL>self.widget.mouseMoveEvent = self.mouseMoveEvent<EOL>self.ctx = moderngl.create_context(require=self.gl_version.code)<EOL>context.WINDOW = self<EOL>self.fbo = self.ctx.screen<EOL>self.buffer_width = self.width * self.widget.devicePixelRatio()<EOL>self.buffer_height = self.height * self.widget.devicePixelRatio()<EOL>self.set_default_viewport()<EOL>", "docstring": "Creates a pyqt application and window overriding the\nbuilt in event loop. Sets up keyboard and mouse events\nand creates a ``monderngl.Context``.", "id": "f14482:c0:m0"}
{"signature": "def close(self):", "body": "self._closed = True<EOL>", "docstring": "Set the internal close state", "id": "f14482:c0:m8"}
{"signature": "def keyPressEvent(self, event):", "body": "self.keyboard_event(event.key(), self.keys.ACTION_PRESS, <NUM_LIT:0>)<EOL>", "docstring": "Pyqt specific key press callback function.\nTranslates and forwards events to :py:func:`keyboard_event`.", "id": "f14482:c0:m1"}
{"signature": "def mouseMoveEvent(self, event):", "body": "self.cursor_event(event.x(), event.y(), <NUM_LIT:0>, <NUM_LIT:0>)<EOL>", "docstring": "Pyqt specific mouse event callback\nTranslates and forwards events to :py:func:`cursor_event`.", "id": "f14482:c0:m3"}
{"signature": "def swap_buffers(self):", "body": "self.frames += <NUM_LIT:1><EOL>self.widget.swapBuffers()<EOL>self.app.processEvents()<EOL>", "docstring": "Swaps buffers, increments the frame counter and pulls events", "id": "f14482:c0:m5"}
{"signature": "def keyReleaseEvent(self, event):", "body": "self.keyboard_event(event.key(), self.keys.ACTION_RELEASE, <NUM_LIT:0>)<EOL>", "docstring": "Pyqt specific key release callback function.\nTranslates and forwards events to :py:func:`keyboard_event`.", "id": "f14482:c0:m2"}
{"signature": "def resize(self, width, height):", "body": "self.width = width<EOL>self.height = height<EOL>self.buffer_width, self.buffer_height = self.width, self.height<EOL>self.set_default_viewport()<EOL>", "docstring": "Sets the new size and buffer size internally", "id": "f14484:c0:m3"}
{"signature": "def __init__(self):", "body": "super().__init__()<EOL>self.window_closing = False<EOL>self.tmp_size_x = c_int()<EOL>self.tmp_size_y = c_int()<EOL>print(\"<STR_LIT>\", self.get_library_version())<EOL>if sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO) != <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MAJOR_VERSION, self.gl_version.major)<EOL>sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_MINOR_VERSION, self.gl_version.minor)<EOL>sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK, sdl2.SDL_GL_CONTEXT_PROFILE_CORE)<EOL>sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_FORWARD_COMPATIBLE_FLAG, <NUM_LIT:1>)<EOL>sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_DOUBLEBUFFER, <NUM_LIT:1>)<EOL>sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_DEPTH_SIZE, <NUM_LIT>)<EOL>sdl2.SDL_ShowCursor(sdl2.SDL_ENABLE if self.cursor else sdl2.SDL_DISABLE)<EOL>if self.samples > <NUM_LIT:1>:<EOL><INDENT>sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_MULTISAMPLEBUFFERS, <NUM_LIT:1>)<EOL>sdl2.video.SDL_GL_SetAttribute(sdl2.SDL_GL_MULTISAMPLESAMPLES, self.samples)<EOL><DEDENT>flags = sdl2.SDL_WINDOW_OPENGL<EOL>if self.fullscreen:<EOL><INDENT>flags |= sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP<EOL><DEDENT>else:<EOL><INDENT>if self.resizable:<EOL><INDENT>flags |= sdl2.SDL_WINDOW_RESIZABLE<EOL><DEDENT><DEDENT>self.window = sdl2.SDL_CreateWindow(<EOL>self.title.encode(),<EOL>sdl2.SDL_WINDOWPOS_UNDEFINED,<EOL>sdl2.SDL_WINDOWPOS_UNDEFINED,<EOL>self.width,<EOL>self.height,<EOL>flags<EOL>)<EOL>if not self.window:<EOL><INDENT>raise ValueError(\"<STR_LIT>\", sdl2.SDL_GetError())<EOL><DEDENT>self.context = sdl2.SDL_GL_CreateContext(self.window)<EOL>sdl2.video.SDL_GL_SetSwapInterval(<NUM_LIT:1> if self.vsync else <NUM_LIT:0>)<EOL>self.ctx = moderngl.create_context(require=self.gl_version.code)<EOL>context.WINDOW = self<EOL>self.fbo = self.ctx.screen<EOL>self.set_default_viewport()<EOL>", "docstring": "Initializes sdl2, sets up key and mouse events and\ncreates a ``moderngl.Context`` using the context sdl2 createad.\n\nUsing the sdl2 window requires sdl binaries and PySDL2.", "id": "f14484:c0:m0"}
{"signature": "def cursor_event(self, x, y, dx, dy):", "body": "self.sys_camera.rot_state(x, y)<EOL>", "docstring": "The standard mouse movement event method.\nCan be overriden to add new functionality.\nBy default this feeds the system camera with new values.\n\nArgs:\n    x: The current mouse x position\n    y: The current mouse y position\n    dx: Delta x postion (x position difference from the previous event)\n    dy: Delta y postion (y position difference from the previous event)", "id": "f14485:c1:m13"}
{"signature": "def clear(self):", "body": "self.ctx.fbo.clear(<EOL>red=self.clear_color[<NUM_LIT:0>],<EOL>green=self.clear_color[<NUM_LIT:1>],<EOL>blue=self.clear_color[<NUM_LIT:2>],<EOL>alpha=self.clear_color[<NUM_LIT:3>],<EOL>depth=self.clear_depth,<EOL>)<EOL>", "docstring": "Clear the window buffer", "id": "f14485:c1:m4"}
{"signature": "@property<EOL><INDENT>def buffer_size(self) -> Tuple[int, int]:<DEDENT>", "body": "return (self.buffer_width, self.buffer_height)<EOL>", "docstring": "(width, heigh) buffer size of the window.\n\nThis is the actual buffer size of the window\ntaking UI scale into account. A 1920 x 1080\nwindow running in an environment with UI scale 2.0\nwould have a 3840 x 2160 window buffer.", "id": "f14485:c1:m2"}
{"signature": "def set_default_viewport(self):", "body": "<EOL>expected_height = int(self.buffer_width / self.aspect_ratio)<EOL>blank_space = self.buffer_height - expected_height<EOL>self.fbo.viewport = (<NUM_LIT:0>, blank_space // <NUM_LIT:2>, self.buffer_width, expected_height)<EOL>", "docstring": "Calculates the viewport based on the configured aspect ratio in settings.\nWill add black borders if the window do not match the viewport.", "id": "f14485:c1:m15"}
{"signature": "def terminate(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "The actual teardown of the window.\n\nRaises:\n    NotImplementedError", "id": "f14485:c1:m11"}
{"signature": "def close(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Set the window in close state. This doesn't actually close the window,\nbut should make :py:func:`should_close` return ``True`` so the\nmain loop can exit gracefully.\n\nRaises:\n    NotImplementedError", "id": "f14485:c1:m9"}
{"signature": "def should_close(self) -> bool:", "body": "raise NotImplementedError()<EOL>", "docstring": "Check if window should close. This should always be checked in the main draw loop.\n\nRaises:\n    NotImplementedError", "id": "f14485:c1:m10"}
{"signature": "def resize(self, width, height):", "body": "self.set_default_viewport()<EOL>", "docstring": "Resize the window. Should normallty be overriden\nwhen implementing a window as most window libraries need additional logic here.\n\nArgs:\n    width (int): Width of the window\n    height: (int): Height of the window", "id": "f14485:c1:m8"}
{"signature": "def window(raise_on_error=True) -> BaseWindow:", "body": "if not WINDOW and raise_on_error:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>return WINDOW<EOL>", "docstring": "The window instance we are rendering to\n\n:param raise_on_error: Raise an error if the window is not created yet", "id": "f14486:m0"}
{"signature": "def __init__(self):", "body": "super().__init__()<EOL>self.headless_frames = getattr(settings, '<STR_LIT>', <NUM_LIT:0>)<EOL>self.headless_duration = getattr(settings, '<STR_LIT>', <NUM_LIT:0>)<EOL>if not self.headless_frames and not self.headless_duration:<EOL><INDENT>raise ImproperlyConfigured(\"<STR_LIT>\")<EOL><DEDENT>self._close = False<EOL>self.ctx = moderngl.create_standalone_context(require=self.gl_version.code)<EOL>context.WINDOW = self<EOL>self.fbo = self.ctx.framebuffer(<EOL>color_attachments=self.ctx.texture(self.size, <NUM_LIT:4>),<EOL>depth_attachment=self.ctx.depth_texture(self.size),<EOL>)<EOL>self.set_default_viewport()<EOL>self.fbo.use()<EOL>", "docstring": "Creates a standalone ``moderngl.Context``.\nThe headless window currently have no event input from keyboard or mouse.\n\nUsing this window require either ``settings`` values to be present:\n\n* ``HEADLESS_FRAMES``: How many frames should be rendered before closing the window\n* ``HEADLESS_DURATION``: How many seconds rendering should last before the window closes", "id": "f14487:c0:m0"}
{"signature": "def close(self):", "body": "self._close = True<EOL>", "docstring": "Sets the internal close state", "id": "f14487:c0:m4"}
{"signature": "def use(self):", "body": "self.fbo.use()<EOL>", "docstring": "Binds the framebuffer representing this window", "id": "f14487:c0:m2"}
{"signature": "@property<EOL><INDENT>def loader_cls(self) -> Type:<DEDENT>", "body": "return self._kwargs.get('<STR_LIT>')<EOL>", "docstring": "(Type) The loader class for this resource", "id": "f14493:c0:m5"}
{"signature": "@property<EOL><INDENT>def resolved_path(self) -> Path:<DEDENT>", "body": "return self.kwargs.get('<STR_LIT>')<EOL>", "docstring": "(pathlib.Path) The resolved path by a finder", "id": "f14493:c0:m7"}
{"signature": "@property<EOL><INDENT>def kwargs(self) -> Dict[str, str]:<DEDENT>", "body": "return self._kwargs<EOL>", "docstring": "(dict) All keywords arguments passed to the resource", "id": "f14493:c0:m9"}
{"signature": "def add(self, meta):", "body": "self._check_meta(meta)<EOL>self.resolve_loader(meta)<EOL>self._resources.append(meta)<EOL>", "docstring": "Add a resource to this pool.\nThe resource is loaded and returned when ``load_pool()`` is called.\n\n:param meta: The resource description", "id": "f14493:c1:m3"}
{"signature": "@property<EOL><INDENT>def loader(self):<DEDENT>", "body": "return self._kwargs.get('<STR_LIT>') or self.default_loader<EOL>", "docstring": "(str) Name of the loader", "id": "f14493:c0:m3"}
{"signature": "def get_loader(self, meta: ResourceDescription, raise_on_error=False) -> BaseLoader:", "body": "for loader in self._loaders:<EOL><INDENT>if loader.name == meta.loader:<EOL><INDENT>return loader<EOL><DEDENT><DEDENT>if raise_on_error:<EOL><INDENT>raise ImproperlyConfigured(<EOL>\"<STR_LIT>\".format(<EOL>meta.loader, meta, [loader.name for loader in self._loaders]))<EOL><DEDENT>", "docstring": "Attempts to get a loader\n\n:param meta: The resource description instance\n:param raise_on_error: Raise ImproperlyConfigured if the loader cannot be resolved\n:returns: The requested loader class", "id": "f14493:c1:m6"}
{"signature": "@property<EOL><INDENT>def path(self):<DEDENT>", "body": "return self._kwargs.get('<STR_LIT:path>')<EOL>", "docstring": "(str) The path to a resource when a single file is specified", "id": "f14493:c0:m2"}
{"signature": "def resolve_loader(self, meta: ProgramDescription):", "body": "if not meta.loader:<EOL><INDENT>meta.loader = '<STR_LIT>' if meta.path else '<STR_LIT>'<EOL><DEDENT>for loader_cls in self._loaders:<EOL><INDENT>if loader_cls.name == meta.loader:<EOL><INDENT>meta.loader_cls = loader_cls<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ImproperlyConfigured(<EOL>(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>).format(meta.path)<EOL>)<EOL><DEDENT>", "docstring": "Resolve program loader", "id": "f14499:c0:m1"}
{"signature": "def add_program_dir(self, directory):", "body": "dirs = list(self.PROGRAM_DIRS)<EOL>dirs.append(directory)<EOL>self.PROGRAM_DIRS = dirs<EOL>", "docstring": "Hack in program directory", "id": "f14501:c0:m4"}
{"signature": "def update(self, **kwargs):", "body": "for name, value in kwargs.items():<EOL><INDENT>setattr(self, name, value)<EOL><DEDENT>", "docstring": "Override settings values", "id": "f14501:c0:m1"}
{"signature": "def show_type(self, edge_type, **kwargs):", "body": "for v in self.g.nodes():<EOL><INDENT>e = (v, v)<EOL>if self.g.is_edge(e) and self.g.ep(e, '<STR_LIT>') == edge_type:<EOL><INDENT>ei = self.g.edge_index[e]<EOL>self.g.set_vp(v, '<STR_LIT>', self.colors['<STR_LIT>'])<EOL>self.g.set_vp(v, '<STR_LIT>', self.edge2queue[ei].colors['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>self.g.set_vp(v, '<STR_LIT>', self.colors['<STR_LIT>'])<EOL>self.g.set_vp(v, '<STR_LIT>', [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT>])<EOL><DEDENT><DEDENT>for e in self.g.edges():<EOL><INDENT>if self.g.ep(e, '<STR_LIT>') == edge_type:<EOL><INDENT>self.g.set_ep(e, '<STR_LIT>', self.colors['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>self.g.set_ep(e, '<STR_LIT>', self.colors['<STR_LIT>'])<EOL><DEDENT><DEDENT>self.draw(update_colors=False, **kwargs)<EOL>self._update_all_colors()<EOL>", "docstring": "Draws the network, highlighting queues of a certain type.\n\n        The colored vertices represent self loops of type ``edge_type``.\n        Dark edges represent queues of type ``edge_type``.\n\n        Parameters\n        ----------\n        edge_type : int\n            The type of vertices and edges to be shown.\n        **kwargs\n            Any additional parameters to pass to :meth:`.draw`, and\n            :meth:`.QueueNetworkDiGraph.draw_graph`\n\n        Notes\n        -----\n        The colors are defined by the class attribute ``colors``. The\n        relevant colors are ``vertex_active``, ``vertex_inactive``,\n        ``vertex_highlight``, ``edge_active``, and ``edge_inactive``.\n\n        Examples\n        --------\n        The following code highlights all edges with edge type ``2``.\n        If the edge is a loop then the vertex is highlighted as well.\n        In this case all edges with edge type ``2`` happen to be loops.\n\n        >>> import queueing_tool as qt\n        >>> g = qt.generate_pagerank_graph(100, seed=13)\n        >>> net = qt.QueueNetwork(g, seed=13)\n        >>> fname = 'edge_type_2.png'\n        >>> net.show_type(2, fname=fname) # doctest: +SKIP\n\n        .. figure:: edge_type_2-1.png\n           :align: center", "id": "f14533:c1:m21"}
{"signature": "def get_agent_data(self, queues=None, edge=None, edge_type=None, return_header=False):", "body": "queues = _get_queues(self.g, queues, edge, edge_type)<EOL>data = {}<EOL>for qid in queues:<EOL><INDENT>for agent_id, dat in self.edge2queue[qid].data.items():<EOL><INDENT>datum = np.zeros((len(dat), <NUM_LIT:6>))<EOL>datum[:, :<NUM_LIT:5>] = np.array(dat)<EOL>datum[:, <NUM_LIT:5>] = qid<EOL>if agent_id in data:<EOL><INDENT>data[agent_id] = np.vstack((data[agent_id], datum))<EOL><DEDENT>else:<EOL><INDENT>data[agent_id] = datum<EOL><DEDENT><DEDENT><DEDENT>dType = [<EOL>('<STR_LIT:a>', float),<EOL>('<STR_LIT:s>', float),<EOL>('<STR_LIT:d>', float),<EOL>('<STR_LIT:q>', float),<EOL>('<STR_LIT:n>', float),<EOL>('<STR_LIT:id>', float)<EOL>]<EOL>for agent_id, dat in data.items():<EOL><INDENT>datum = np.array([tuple(d) for d in dat.tolist()], dtype=dType)<EOL>datum = np.sort(datum, order='<STR_LIT:a>')<EOL>data[agent_id] = np.array([tuple(d) for d in datum])<EOL><DEDENT>if return_header:<EOL><INDENT>return data, '<STR_LIT>'<EOL><DEDENT>return data<EOL>", "docstring": "Gets data from queues and organizes it by agent.\n\n        If none of the parameters are given then data from every\n        :class:`.QueueServer` is retrieved.\n\n        Parameters\n        ----------\n        queues : int or *array_like* (optional)\n            The edge index (or an iterable of edge indices) identifying\n            the :class:`QueueServer(s)<.QueueServer>` whose data will\n            be retrieved.\n        edge : 2-tuple of int or *array_like* (optional)\n            Explicitly specify which queues to retrieve agent data\n            from. Must be either:\n\n            * A 2-tuple of the edge's source and target vertex\n              indices, or\n            * An iterable of 2-tuples of the edge's source and\n              target vertex indices.\n\n        edge_type : int or an iterable of int (optional)\n            A integer, or a collection of integers identifying which\n            edge types to retrieve agent data from.\n        return_header : bool (optonal, default: False)\n            Determines whether the column headers are returned.\n\n        Returns\n        -------\n        dict\n            Returns a ``dict`` where the keys are the\n            :class:`Agent's<.Agent>` ``agent_id`` and the values are\n            :class:`ndarrays<~numpy.ndarray>` for that\n            :class:`Agent's<.Agent>` data. The columns of this array\n            are as follows:\n\n            * First: The arrival time of an agent.\n            * Second: The service start time of an agent.\n            * Third: The departure time of an agent.\n            * Fourth: The length of the queue upon the agents arrival.\n            * Fifth: The total number of :class:`Agents<.Agent>` in the\n              :class:`.QueueServer`.\n            * Sixth: the :class:`QueueServer's<.QueueServer>` id\n              (its edge index).\n\n        headers : str (optional)\n            A comma seperated string of the column headers. Returns\n            ``'arrival,service,departure,num_queued,num_total,q_id'``", "id": "f14533:c1:m14"}
{"signature": "def simulate(self, n=<NUM_LIT:1>, t=None):", "body": "if not self._initialized:<EOL><INDENT>msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>raise QueueingToolError(msg)<EOL><DEDENT>if t is None:<EOL><INDENT>for dummy in range(n):<EOL><INDENT>self._simulate_next_event(slow=False)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>now = self._t<EOL>while self._t < now + t:<EOL><INDENT>self._simulate_next_event(slow=False)<EOL><DEDENT><DEDENT>", "docstring": "Simulates the network forward.\n\n        Simulates either a specific number of events or for a specified\n        amount of simulation time.\n\n        Parameters\n        ----------\n        n : int (optional, default: 1)\n            The number of events to simulate. If ``t`` is not given\n            then this parameter is used.\n        t : float (optional)\n            The amount of simulation time to simulate forward. If\n            given, ``t`` is used instead of ``n``.\n\n        Raises\n        ------\n        QueueingToolError\n            Will raise a :exc:`.QueueingToolError` if the\n            ``QueueNetwork`` has not been initialized. Call\n            :meth:`.initialize` before calling this method.\n\n        Examples\n        --------\n        Let ``net`` denote your instance of a ``QueueNetwork``. Before\n        you simulate, you need to initialize the network, which allows\n        arrivals from outside the network. To initialize with 2 (random\n        chosen) edges accepting arrivals run:\n\n        >>> import queueing_tool as qt\n        >>> g = qt.generate_pagerank_graph(100, seed=50)\n        >>> net = qt.QueueNetwork(g, seed=50)\n        >>> net.initialize(2)\n\n        To simulate the network 50000 events run:\n\n        >>> net.num_events\n        0\n        >>> net.simulate(50000)\n        >>> net.num_events\n        50000\n\n        To simulate the network for at least 75 simulation time units\n        run:\n\n        >>> t0 = net.current_time\n        >>> net.simulate(t=75)\n        >>> t1 = net.current_time\n        >>> t1 - t0 # doctest: +ELLIPSIS\n        75...", "id": "f14533:c1:m22"}
{"signature": "def copy(self):", "body": "net = QueueNetwork(None)<EOL>net.g = self.g.copy()<EOL>net.max_agents = copy.deepcopy(self.max_agents)<EOL>net.nV = copy.deepcopy(self.nV)<EOL>net.nE = copy.deepcopy(self.nE)<EOL>net.num_agents = copy.deepcopy(self.num_agents)<EOL>net.num_events = copy.deepcopy(self.num_events)<EOL>net._t = copy.deepcopy(self._t)<EOL>net._initialized = copy.deepcopy(self._initialized)<EOL>net._prev_edge = copy.deepcopy(self._prev_edge)<EOL>net._blocking = copy.deepcopy(self._blocking)<EOL>net.colors = copy.deepcopy(self.colors)<EOL>net.out_edges = copy.deepcopy(self.out_edges)<EOL>net.in_edges = copy.deepcopy(self.in_edges)<EOL>net.edge2queue = copy.deepcopy(self.edge2queue)<EOL>net._route_probs = copy.deepcopy(self._route_probs)<EOL>if net._initialized:<EOL><INDENT>keys = [q._key() for q in net.edge2queue if q._time < np.infty]<EOL>net._fancy_heap = PriorityQueue(keys, net.nE)<EOL><DEDENT>return net<EOL>", "docstring": "Returns a deep copy of itself.", "id": "f14533:c1:m12"}
{"signature": "def stop_collecting_data(self, queues=None, edge=None, edge_type=None):", "body": "queues = _get_queues(self.g, queues, edge, edge_type)<EOL>for k in queues:<EOL><INDENT>self.edge2queue[k].collect_data = False<EOL><DEDENT>", "docstring": "Tells the queues to stop collecting data on agents.\n\n        If none of the parameters are given then every\n        :class:`.QueueServer` will stop collecting data.\n\n        Parameters\n        ----------\n        queues : int, *array_like* (optional)\n            The edge index (or an iterable of edge indices) identifying\n            the :class:`QueueServer(s)<.QueueServer>` that will stop\n            collecting data.\n        edge : 2-tuple of int or *array_like* (optional)\n            Explicitly specify which queues will stop collecting data.\n            Must be either:\n\n            * A 2-tuple of the edge's source and target vertex\n              indices, or\n            * An iterable of 2-tuples of the edge's source and\n              target vertex indices.\n\n        edge_type : int or an iterable of int (optional)\n            A integer, or a collection of integers identifying which\n            edge types will stop collecting data.", "id": "f14533:c1:m25"}
{"signature": "def transitions(self, return_matrix=True):", "body": "if return_matrix:<EOL><INDENT>mat = np.zeros((self.nV, self.nV))<EOL>for v in self.g.nodes():<EOL><INDENT>ind = [e[<NUM_LIT:1>] for e in sorted(self.g.out_edges(v))]<EOL>mat[v, ind] = self._route_probs[v]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>mat = {<EOL>k: {e[<NUM_LIT:1>]: p for e, p in zip(sorted(self.g.out_edges(k)), value)}<EOL>for k, value in enumerate(self._route_probs)<EOL>}<EOL><DEDENT>return mat<EOL>", "docstring": "Returns the routing probabilities for each vertex in the\n        graph.\n\n        Parameters\n        ----------\n        return_matrix : bool (optional, the default is ``True``)\n            Specifies whether an :class:`~numpy.ndarray` is returned.\n            If ``False``, a dict is returned instead.\n\n        Returns\n        -------\n        out : a dict or :class:`~numpy.ndarray`\n            The transition probabilities for each vertex in the graph.\n            If ``out`` is an :class:`~numpy.ndarray`, then\n            ``out[v, u]`` returns the probability of a transition from\n            vertex ``v`` to vertex ``u``. If ``out`` is a dict\n            then ``out_edge[v][u]`` is the probability of moving from\n            vertex ``v`` to the vertex ``u``.\n\n        Examples\n        --------\n        Lets change the routing probabilities:\n\n        >>> import queueing_tool as qt\n        >>> import networkx as nx\n        >>> g = nx.sedgewick_maze_graph()\n        >>> net = qt.QueueNetwork(g)\n\n        Below is an adjacency list for the graph ``g``.\n\n        >>> ans = qt.graph2dict(g, False)\n        >>> {k: sorted(v) for k, v in ans.items()}\n        ...                         # doctest: +NORMALIZE_WHITESPACE\n        {0: [2, 5, 7],\n         1: [7],\n         2: [0, 6],\n         3: [4, 5],\n         4: [3, 5, 6, 7],\n         5: [0, 3, 4],\n         6: [2, 4],\n         7: [0, 1, 4]}\n\n        The default transition matrix is every out edge being equally\n        likely:\n\n        >>> net.transitions(False)  # doctest: +ELLIPSIS\n        ...                         # doctest: +NORMALIZE_WHITESPACE\n        {0: {2: 0.333..., 5: 0.333..., 7: 0.333...},\n         1: {7: 1.0},\n         2: {0: 0.5, 6: 0.5},\n         3: {4: 0.5, 5: 0.5},\n         4: {3: 0.25, 5: 0.25, 6: 0.25, 7: 0.25},\n         5: {0: 0.333..., 3: 0.333..., 4: 0.333...},\n         6: {2: 0.5, 4: 0.5},\n         7: {0: 0.333..., 1: 0.333..., 4: 0.333...}}\n\n        Now we will generate a random routing matrix:\n\n        >>> mat = qt.generate_transition_matrix(g, seed=96)\n        >>> net.set_transitions(mat)\n        >>> net.transitions(False)  # doctest: +ELLIPSIS\n        ...                         # doctest: +NORMALIZE_WHITESPACE\n        {0: {2: 0.112..., 5: 0.466..., 7: 0.420...},\n         1: {7: 1.0},\n         2: {0: 0.561..., 6: 0.438...},\n         3: {4: 0.545..., 5: 0.454...},\n         4: {3: 0.374..., 5: 0.381..., 6: 0.026..., 7: 0.217...},\n         5: {0: 0.265..., 3: 0.460..., 4: 0.274...},\n         6: {2: 0.673..., 4: 0.326...},\n         7: {0: 0.033..., 1: 0.336..., 4: 0.630...}}\n\n        What this shows is the following: when an :class:`.Agent` is at\n        vertex ``2`` they will transition to vertex ``0`` with\n        probability ``0.561`` and route to vertex ``6`` probability\n        ``0.438``, when at vertex ``6`` they will transition back to\n        vertex ``2`` with probability ``0.673`` and route vertex ``4``\n        probability ``0.326``, etc.", "id": "f14533:c1:m26"}
{"signature": "def show_active(self, **kwargs):", "body": "g = self.g<EOL>for v in g.nodes():<EOL><INDENT>self.g.set_vp(v, '<STR_LIT>', [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT>])<EOL>is_active = False<EOL>my_iter = g.in_edges(v) if g.is_directed() else g.out_edges(v)<EOL>for e in my_iter:<EOL><INDENT>ei = g.edge_index[e]<EOL>if self.edge2queue[ei]._active:<EOL><INDENT>is_active = True<EOL>break<EOL><DEDENT><DEDENT>if is_active:<EOL><INDENT>self.g.set_vp(v, '<STR_LIT>', self.colors['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>self.g.set_vp(v, '<STR_LIT>', self.colors['<STR_LIT>'])<EOL><DEDENT><DEDENT>for e in g.edges():<EOL><INDENT>ei = g.edge_index[e]<EOL>if self.edge2queue[ei]._active:<EOL><INDENT>self.g.set_ep(e, '<STR_LIT>', self.colors['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>self.g.set_ep(e, '<STR_LIT>', self.colors['<STR_LIT>'])<EOL><DEDENT><DEDENT>self.draw(update_colors=False, **kwargs)<EOL>self._update_all_colors()<EOL>", "docstring": "Draws the network, highlighting active queues.\n\n        The colored vertices represent vertices that have at least one\n        queue on an in-edge that is active. Dark edges represent\n        queues that are active, light edges represent queues that are\n        inactive.\n\n        Parameters\n        ----------\n        **kwargs\n            Any additional parameters to pass to :meth:`.draw`, and\n            :meth:`.QueueNetworkDiGraph.draw_graph`.\n\n        Notes\n        -----\n        Active queues are :class:`QueueServers<.QueueServer>` that\n        accept arrivals from outside the network. The colors are\n        defined by the class attribute ``colors``. The relevant keys\n        are ``vertex_active``, ``vertex_inactive``, ``edge_active``,\n        and ``edge_inactive``.", "id": "f14533:c1:m20"}
{"signature": "def start_collecting_data(self, queues=None, edge=None, edge_type=None):", "body": "queues = _get_queues(self.g, queues, edge, edge_type)<EOL>for k in queues:<EOL><INDENT>self.edge2queue[k].collect_data = True<EOL><DEDENT>", "docstring": "Tells the queues to collect data on agents' arrival, service\n        start, and departure times.\n\n        If none of the parameters are given then every\n        :class:`.QueueServer` will start collecting data.\n\n        Parameters\n        ----------\n        queues : :any:`int`, *array_like* (optional)\n            The edge index (or an iterable of edge indices) identifying\n            the :class:`QueueServer(s)<.QueueServer>` that will start\n            collecting data.\n        edge : 2-tuple of int or *array_like* (optional)\n            Explicitly specify which queues will collect data. Must be\n            either:\n\n            * A 2-tuple of the edge's source and target vertex\n              indices, or\n            * An iterable of 2-tuples of the edge's source and\n              target vertex indices.\n\n        edge_type : int or an iterable of int (optional)\n            A integer, or a collection of integers identifying which\n            edge types will be set active.", "id": "f14533:c1:m24"}
{"signature": "def set_transitions(self, mat):", "body": "if isinstance(mat, dict):<EOL><INDENT>for key, value in mat.items():<EOL><INDENT>probs = list(value.values())<EOL>if key not in self.g.node:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg)<EOL><DEDENT>elif len(self.out_edges[key]) > <NUM_LIT:0> and not np.isclose(sum(probs), <NUM_LIT:1>):<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg)<EOL><DEDENT>elif (np.array(probs) < <NUM_LIT:0>).any():<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg)<EOL><DEDENT>for k, e in enumerate(sorted(self.g.out_edges(key))):<EOL><INDENT>self._route_probs[key][k] = value.get(e[<NUM_LIT:1>], <NUM_LIT:0>)<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(mat, np.ndarray):<EOL><INDENT>non_terminal = np.array([self.g.out_degree(v) > <NUM_LIT:0> for v in self.g.nodes()])<EOL>if mat.shape != (self.nV, self.nV):<EOL><INDENT>msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\").format(self.nV, self.nV)<EOL>raise ValueError(msg)<EOL><DEDENT>elif not np.allclose(np.sum(mat[non_terminal, :], axis=<NUM_LIT:1>), <NUM_LIT:1>):<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg)<EOL><DEDENT>elif (mat < <NUM_LIT:0>).any():<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>for k in range(self.nV):<EOL><INDENT>for j, e in enumerate(sorted(self.g.out_edges(k))):<EOL><INDENT>self._route_probs[k][j] = mat[k, e[<NUM_LIT:1>]]<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Change the routing transitions probabilities for the\n        network.\n\n        Parameters\n        ----------\n        mat : dict or :class:`~numpy.ndarray`\n            A transition routing matrix or transition dictionary. If\n            passed a dictionary, the keys are source vertex indices and\n            the values are dictionaries with target vertex indicies\n            as the keys and the probabilities of routing from the\n            source to the target as the values.\n\n        Raises\n        ------\n        ValueError\n            A :exc:`.ValueError` is raised if: the keys in the dict\n            don't match with a vertex index in the graph; or if the\n            :class:`~numpy.ndarray` is passed with the wrong shape,\n            must be (``num_vertices``, ``num_vertices``); or the values\n            passed are not probabilities (for each vertex they are\n            positive and sum to 1);\n        TypeError\n            A :exc:`.TypeError` is raised if mat is not a dict or\n            :class:`~numpy.ndarray`.\n\n        Examples\n        --------\n        The default transition matrix is every out edge being equally\n        likely:\n\n        >>> import queueing_tool as qt\n        >>> adjacency = {\n        ...     0: [2],\n        ...     1: [2, 3],\n        ...     2: [0, 1, 2, 4],\n        ...     3: [1],\n        ...     4: [2],\n        ... }\n        >>> g = qt.adjacency2graph(adjacency)\n        >>> net = qt.QueueNetwork(g)\n        >>> net.transitions(False)  # doctest: +ELLIPSIS\n        ...                         # doctest: +NORMALIZE_WHITESPACE\n        {0: {2: 1.0},\n         1: {2: 0.5, 3: 0.5},\n         2: {0: 0.25, 1: 0.25, 2: 0.25, 4: 0.25},\n         3: {1: 1.0},\n         4: {2: 1.0}}\n\n        If you want to change only one vertex's transition\n        probabilities, you can do so with the following:\n\n        >>> net.set_transitions({1 : {2: 0.75, 3: 0.25}})\n        >>> net.transitions(False)  # doctest: +ELLIPSIS\n        ...                         # doctest: +NORMALIZE_WHITESPACE\n        {0: {2: 1.0},\n         1: {2: 0.75, 3: 0.25},\n         2: {0: 0.25, 1: 0.25, 2: 0.25, 4: 0.25},\n         3: {1: 1.0},\n         4: {2: 1.0}}\n\n        One can generate a transition matrix using\n        :func:`.generate_transition_matrix`. You can change all\n        transition probabilities with an :class:`~numpy.ndarray`:\n\n        >>> mat = qt.generate_transition_matrix(g, seed=10)\n        >>> net.set_transitions(mat)\n        >>> net.transitions(False)  # doctest: +ELLIPSIS\n        ...                         # doctest: +NORMALIZE_WHITESPACE\n        {0: {2: 1.0},\n         1: {2: 0.962..., 3: 0.037...},\n         2: {0: 0.301..., 1: 0.353..., 2: 0.235..., 4: 0.108...},\n         3: {1: 1.0},\n         4: {2: 1.0}}\n\n        See Also\n        --------\n        :meth:`.transitions` : Return the current routing\n            probabilities.\n        :func:`.generate_transition_matrix` : Generate a random routing\n            matrix.", "id": "f14533:c1:m19"}
{"signature": "def size(self, s):", "body": "leader = self.find(s)<EOL>return self._size[leader]<EOL>", "docstring": "Returns the number of elements in the set that ``s`` belongs to.\n\n        Parameters\n        ----------\n        s : object\n            An object\n\n        Returns\n        -------\n        out : int\n            The number of elements in the set that ``s`` belongs to.", "id": "f14535:c0:m2"}
{"signature": "def union(self, a, b):", "body": "s1, s2 = self.find(a), self.find(b)<EOL>if s1 != s2:<EOL><INDENT>r1, r2  = self._rank[s1], self._rank[s2]<EOL>if r2 > r1:<EOL><INDENT>r1, r2 = r2, r1<EOL>s1, s2 = s2, s1<EOL><DEDENT>if r1 == r2:<EOL><INDENT>self._rank[s1] += <NUM_LIT:1><EOL><DEDENT>self._leader[s2] = s1<EOL>self._size[s1]  += self._size[s2]<EOL>self.nClusters  -= <NUM_LIT:1><EOL><DEDENT>", "docstring": "Merges the set that contains ``a`` with the set that contains ``b``.\n\n        Parameters\n        ----------\n        a, b : objects\n            Two objects whose sets are to be merged.", "id": "f14535:c0:m4"}
{"signature": "def find(self, s):", "body": "pSet   = [s]<EOL>parent = self._leader[s]<EOL>while parent != self._leader[parent]:<EOL><INDENT>pSet.append(parent)<EOL>parent = self._leader[parent]<EOL><DEDENT>if len(pSet) > <NUM_LIT:1>:<EOL><INDENT>for a in pSet:<EOL><INDENT>self._leader[a] = parent<EOL><DEDENT><DEDENT>return parent<EOL>", "docstring": "Locates the leader of the set to which the element ``s`` belongs.\n\n        Parameters\n        ----------\n        s : object\n            An object that the ``UnionFind`` contains.\n\n        Returns\n        -------\n        object\n            The leader of the set that contains ``s``.", "id": "f14535:c0:m3"}
{"signature": "def add_loss(self, *args, **kwargs):", "body": "self.blocked += <NUM_LIT:1><EOL>", "docstring": "Adds one to the number of times the agent has been blocked\n        from entering a queue.", "id": "f14536:c0:m7"}
{"signature": "def desired_destination(self, network, edge):", "body": "n = len(network.out_edges[edge[<NUM_LIT:1>]])<EOL>if n <= <NUM_LIT:1>:<EOL><INDENT>return network.out_edges[edge[<NUM_LIT:1>]][<NUM_LIT:0>]<EOL><DEDENT>u = uniform()<EOL>pr = network._route_probs[edge[<NUM_LIT:1>]]<EOL>k = _choice(pr, u, n)<EOL>return network.out_edges[edge[<NUM_LIT:1>]][k]<EOL>", "docstring": "Returns the agents next destination given their current\n        location on the network.\n\n        An ``Agent`` chooses one of the out edges at random. The\n        probability that the ``Agent`` will travel along a specific\n        edge is specified in the :class:`QueueNetwork's<.QueueNetwork>`\n        transition matrix.\n\n        Parameters\n        ----------\n        network : :class:`.QueueNetwork`\n            The :class:`.QueueNetwork` where the Agent resides.\n        edge : tuple\n            A 4-tuple indicating which edge this agent is located at.\n            The first two slots indicate the current edge's source and\n            target vertices, while the third slot indicates this edges\n            ``edge_index``. The last slot indicates the edge type of\n            that edge\n\n        Returns\n        -------\n        out : int\n            Returns an the edge index corresponding to the agents next\n            edge to visit in the network.\n\n        See Also\n        --------\n        :meth:`.transitions` : :class:`QueueNetwork's<.QueueNetwork>`\n            method that returns the transition probabilities for each\n            edge in the graph.", "id": "f14536:c0:m8"}
{"signature": "def queue_action(self, queue, *args, **kwargs):", "body": "if isinstance(queue, ResourceQueue):<EOL><INDENT>if self._has_resource:<EOL><INDENT>self._has_resource = False<EOL>self._had_resource = True<EOL><DEDENT>else:<EOL><INDENT>if queue.num_servers > <NUM_LIT:0>:<EOL><INDENT>queue.set_num_servers(queue.num_servers - <NUM_LIT:1>)<EOL>self._has_resource = True<EOL>self._had_resource = False<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Function that specifies the interaction with a\n        :class:`.ResourceQueue` upon departure.\n\n        When departuring from a :class:`.ResourceQueue` (or a\n        :class:`.QueueServer`), this method is called. If the agent\n        does not already have a resource then it decrements the number\n        of servers at :class:`.ResourceQueue` by one. Note that this\n        only applies to :class:`ResourceQueue's<.ResourceQueue>`.\n\n        Parameters\n        ----------\n        queue : :class:`.QueueServer`\n            The instance of the queue that the ``ResourceAgent`` will\n            interact with.", "id": "f14538:c0:m2"}
{"signature": "def number_queued(self):", "body": "return len(self.queue)<EOL>", "docstring": "Returns the number of agents waiting in line to be served.\n\n        Returns\n        -------\n        out : int\n            The number of agents waiting in line to be served.", "id": "f14539:c0:m14"}
{"signature": "def clear(self):", "body": "self.data = {}<EOL>self._num_arrivals = <NUM_LIT:0><EOL>self._oArrivals = <NUM_LIT:0><EOL>self.num_departures = <NUM_LIT:0><EOL>self.num_system = <NUM_LIT:0><EOL>self._num_total = <NUM_LIT:0><EOL>self._current_t = <NUM_LIT:0><EOL>self._time = infty<EOL>self._next_ct = <NUM_LIT:0><EOL>self._active = False<EOL>self.queue = collections.deque()<EOL>inftyAgent = InftyAgent()<EOL>self._arrivals = [inftyAgent]<EOL>self._departures = [inftyAgent]<EOL>", "docstring": "Clears out the queue. Removes all arrivals, departures, and\n        queued agents from the :class:`.QueueServer`, resets\n        ``num_arrivals``, ``num_departures``, ``num_system``, and the clock to\n        zero. It also clears any stored ``data`` and the server is then\n        set to inactive.", "id": "f14539:c0:m8"}
{"signature": "def next_event_description(self):", "body": "if self._departures[<NUM_LIT:0>]._time < self._arrivals[<NUM_LIT:0>]._time:<EOL><INDENT>return <NUM_LIT:2><EOL><DEDENT>elif self._arrivals[<NUM_LIT:0>]._time < infty:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Returns an integer representing whether the next event is\n        an arrival, a departure, or nothing.\n\n        Returns\n        -------\n        out : int\n            An integer representing whether the next event is an\n            arrival or a departure: ``1`` corresponds to an arrival,\n            ``2`` corresponds to a departure, and ``0`` corresponds to\n            nothing scheduled to occur.", "id": "f14539:c0:m16"}
{"signature": "def delay_service(self, t=None):", "body": "if len(self._departures) > <NUM_LIT:1>:<EOL><INDENT>agent = heappop(self._departures)<EOL>if t is None:<EOL><INDENT>agent._time = self.service_f(agent._time)<EOL><DEDENT>else:<EOL><INDENT>agent._time = t<EOL><DEDENT>heappush(self._departures, agent)<EOL>self._update_time()<EOL><DEDENT>", "docstring": "Adds an extra service time to the next departing\n        :class:`Agent's<.Agent>` service time.\n\n        Parameters\n        ----------\n        t : float (optional)\n            Specifies the departing time for the agent scheduled\n            to depart next. If ``t`` is not given, then an additional\n            service time is added to the next departing agent.", "id": "f14539:c0:m11"}
{"signature": "def set_inactive(self):", "body": "self._active = False<EOL>", "docstring": "Changes the ``active`` attribute to False.", "id": "f14539:c0:m18"}
{"signature": "def poisson_random_measure(t, rate, rate_max):", "body": "scale = <NUM_LIT:1.0> / rate_max<EOL>t = t + exponential(scale)<EOL>while rate_max * uniform() > rate(t):<EOL><INDENT>t = t + exponential(scale)<EOL><DEDENT>return t<EOL>", "docstring": "A function that returns the arrival time of the next arrival for\n    a Poisson random measure.\n\n    Parameters\n    ----------\n    t : float\n        The start time from which to simulate the next arrival time.\n    rate : function\n        The *intensity function* for the measure, where ``rate(t)`` is\n        the expected arrival rate at time ``t``.\n    rate_max : float\n        The maximum value of the ``rate`` function.\n\n    Returns\n    -------\n    out : float\n        The time of the next arrival.\n\n    Notes\n    -----\n    This function returns the time of the next arrival, where the\n    distribution of the number of arrivals between times :math:`t` and\n    :math:`t+s` is Poisson with mean\n\n    .. math::\n\n       \\int_{t}^{t+s} dx \\, r(x)\n\n    where :math:`r(t)` is the supplied ``rate`` function. This function\n    can only simulate processes that have bounded intensity functions.\n    See chapter 6 of [3]_ for more on the mathematics behind Poisson\n    random measures; the book's publisher, Springer, has that chapter\n    available online for free at (`pdf`_\\).\n\n    A Poisson random measure is sometimes called a non-homogeneous\n    Poisson process. A Poisson process is a special type of Poisson\n    random measure.\n\n    .. _pdf: http://www.springer.com/cda/content/document/\\\n                cda_downloaddocument/9780387878584-c1.pdf\n\n    Examples\n    --------\n    Suppose you wanted to model the arrival process as a Poisson\n    random measure with rate function :math:`r(t) = 2 + \\sin( 2\\pi t)`.\n    Then you could do so as follows:\n\n    >>> import queueing_tool as qt\n    >>> import numpy as np\n    >>> np.random.seed(10)\n    >>> rate  = lambda t: 2 + np.sin(2 * np.pi * t)\n    >>> arr_f = lambda t: qt.poisson_random_measure(t, rate, 3)\n    >>> arr_f(1)  # doctest: +ELLIPSIS\n    1.491...\n\n    References\n    ----------\n    .. [3] Cinlar, Erhan. *Probability and stochastics*. Graduate Texts in\\\n           Mathematics. Vol. 261. Springer, New York, 2011.\\\n           :doi:`10.1007/978-0-387-87859-1`", "id": "f14539:m0"}
{"signature": "def set_active(self):", "body": "if not self._active:<EOL><INDENT>self._active = True<EOL>self._add_arrival()<EOL><DEDENT>", "docstring": "Changes the ``active`` attribute to True. Agents may now\n        arrive from outside the network.", "id": "f14539:c0:m17"}
{"signature": "def at_capacity(self):", "body": "return False<EOL>", "docstring": "Returns whether the queue is at capacity or not.\n\n        Returns\n        -------\n        bool\n            Always returns ``False``, since the ``QueueServer`` class\n            has infinite capacity.", "id": "f14539:c0:m7"}
{"signature": "def adjacency2graph(adjacency, edge_type=None, adjust=<NUM_LIT:1>, **kwargs):", "body": "if isinstance(adjacency, np.ndarray):<EOL><INDENT>adjacency = _matrix2dict(adjacency)<EOL><DEDENT>elif isinstance(adjacency, dict):<EOL><INDENT>adjacency = _dict2dict(adjacency)<EOL><DEDENT>else:<EOL><INDENT>msg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>raise TypeError(msg)<EOL><DEDENT>if edge_type is None:<EOL><INDENT>edge_type = {}<EOL><DEDENT>else:<EOL><INDENT>if isinstance(edge_type, np.ndarray):<EOL><INDENT>edge_type = _matrix2dict(edge_type, etype=True)<EOL><DEDENT>elif isinstance(edge_type, dict):<EOL><INDENT>edge_type = _dict2dict(edge_type)<EOL><DEDENT><DEDENT>for u, ty in edge_type.items():<EOL><INDENT>for v, et in ty.items():<EOL><INDENT>adjacency[u][v]['<STR_LIT>'] = et<EOL><DEDENT><DEDENT>g = nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph())<EOL>adjacency = nx.to_dict_of_dicts(g)<EOL>adjacency = _adjacency_adjust(adjacency, adjust, True)<EOL>return nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph())<EOL>", "docstring": "Takes an adjacency list, dict, or matrix and returns a graph.\n\n    The purpose of this function is take an adjacency list (or matrix)\n    and return a :class:`.QueueNetworkDiGraph` that can be used with a\n    :class:`.QueueNetwork` instance. The Graph returned has the\n    ``edge_type`` edge property set for each edge. Note that the graph may\n    be altered.\n\n    Parameters\n    ----------\n    adjacency : dict or :class:`~numpy.ndarray`\n        An adjacency list as either a dict, or an adjacency matrix.\n    adjust : int ``{1, 2}`` (optional, default: 1)\n        Specifies what to do when the graph has terminal vertices\n        (nodes with no out-edges). Note that if ``adjust`` is not 2\n        then it is assumed to be 1. There are two choices:\n\n        * ``adjust = 1``: A loop is added to each terminal node in the\n          graph, and their ``edge_type`` of that loop is set to 0.\n        * ``adjust = 2``: All edges leading to terminal nodes have\n          their ``edge_type`` set to 0.\n\n    **kwargs :\n        Unused.\n\n    Returns\n    -------\n    out : :any:`networkx.DiGraph`\n        A directed graph with the ``edge_type`` edge property.\n\n    Raises\n    ------\n    TypeError\n        Is raised if ``adjacency`` is not a dict or\n        :class:`~numpy.ndarray`.\n\n    Examples\n    --------\n    If terminal nodes are such that all in-edges have edge type ``0``\n    then nothing is changed. However, if a node is a terminal node then\n    a loop is added with edge type 0.\n\n    >>> import queueing_tool as qt\n    >>> adj = {\n    ...     0: {1: {}},\n    ...     1: {2: {},\n    ...         3: {}},\n    ...     3: {0: {}}}\n    >>> eTy = {0: {1: 1}, 1: {2: 2, 3: 4}, 3: {0: 1}}\n    >>> # A loop will be added to vertex 2\n    >>> g = qt.adjacency2graph(adj, edge_type=eTy)\n    >>> ans = qt.graph2dict(g)\n    >>> sorted(ans.items())     # doctest: +NORMALIZE_WHITESPACE\n    [(0, {1: {'edge_type': 1}}),\n     (1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}), \n     (2, {2: {'edge_type': 0}}),\n     (3, {0: {'edge_type': 1}})]\n\n    You can use a dict of lists to represent the adjacency list.\n\n    >>> adj = {0 : [1], 1: [2, 3], 3: [0]}\n    >>> g = qt.adjacency2graph(adj, edge_type=eTy)\n    >>> ans = qt.graph2dict(g)\n    >>> sorted(ans.items())     # doctest: +NORMALIZE_WHITESPACE\n    [(0, {1: {'edge_type': 1}}),\n     (1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}),\n     (2, {2: {'edge_type': 0}}),\n     (3, {0: {'edge_type': 1}})]\n\n    Alternatively, you could have this function adjust the edges that\n    lead to terminal vertices by changing their edge type to 0:\n\n    >>> # The graph is unaltered\n    >>> g = qt.adjacency2graph(adj, edge_type=eTy, adjust=2)\n    >>> ans = qt.graph2dict(g)\n    >>> sorted(ans.items())     # doctest: +NORMALIZE_WHITESPACE\n    [(0, {1: {'edge_type': 1}}),\n     (1, {2: {'edge_type': 0}, 3: {'edge_type': 4}}),\n     (2, {}),\n     (3, {0: {'edge_type': 1}})]", "id": "f14541:m3"}
{"signature": "def lines_scatter_args(self, line_kwargs=None, scatter_kwargs=None, pos=None):", "body": "if pos is not None:<EOL><INDENT>self.set_pos(pos)<EOL><DEDENT>elif self.pos is None:<EOL><INDENT>self.set_pos()<EOL><DEDENT>edge_pos = [<NUM_LIT:0> for e in self.edges()]<EOL>for e in self.edges():<EOL><INDENT>ei = self.edge_index[e]<EOL>edge_pos[ei] = (self.pos[e[<NUM_LIT:0>]], self.pos[e[<NUM_LIT:1>]])<EOL><DEDENT>line_collecton_kwargs = {<EOL>'<STR_LIT>': edge_pos,<EOL>'<STR_LIT>': self.edge_color,<EOL>'<STR_LIT>': (<NUM_LIT:1>,),<EOL>'<STR_LIT>': (<NUM_LIT:1>,),<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': plt.cm.ocean_r,<EOL>'<STR_LIT>': <NUM_LIT:5>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': None,<EOL>}<EOL>scatter_kwargs_ = {<EOL>'<STR_LIT:x>': self.pos[:, <NUM_LIT:0>],<EOL>'<STR_LIT:y>': self.pos[:, <NUM_LIT:1>],<EOL>'<STR_LIT:s>': <NUM_LIT:50>,<EOL>'<STR_LIT:c>': self.vertex_fill_color,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': '<STR_LIT:o>',<EOL>'<STR_LIT>': <NUM_LIT:2>,<EOL>'<STR_LIT>': plt.cm.ocean_r,<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT>': self.vertex_color,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': None,<EOL>}<EOL>line_kwargs = {} if line_kwargs is None else line_kwargs<EOL>scatter_kwargs = {} if scatter_kwargs is None else scatter_kwargs<EOL>for key, value in line_kwargs.items():<EOL><INDENT>if key in line_collecton_kwargs:<EOL><INDENT>line_collecton_kwargs[key] = value<EOL><DEDENT><DEDENT>for key, value in scatter_kwargs.items():<EOL><INDENT>if key in scatter_kwargs_:<EOL><INDENT>scatter_kwargs_[key] = value<EOL><DEDENT><DEDENT>return line_collecton_kwargs, scatter_kwargs_<EOL>", "docstring": "Returns the arguments used when plotting.\n\n        Takes any keyword arguments for\n        :class:`~matplotlib.collections.LineCollection` and\n        :meth:`~matplotlib.axes.Axes.scatter` and returns two\n        dictionaries with all the defaults set.\n\n        Parameters\n        ----------\n        line_kwargs : dict (optional, default: ``None``)\n            Any keyword arguments accepted by\n            :class:`~matplotlib.collections.LineCollection`.\n        scatter_kwargs : dict (optional, default: ``None``)\n            Any keyword arguments accepted by\n            :meth:`~matplotlib.axes.Axes.scatter`.\n\n        Returns\n        -------\n        tuple\n            A 2-tuple of dicts. The first entry is the keyword\n            arguments for\n            :class:`~matplotlib.collections.LineCollection` and the\n            second is the keyword args for\n            :meth:`~matplotlib.axes.Axes.scatter`.\n\n        Notes\n        -----\n        If a specific keyword argument is not passed then the defaults\n        are used.", "id": "f14541:c0:m16"}
{"signature": "def get_edge_type(self, edge_type):", "body": "edges = []<EOL>for e in self.edges():<EOL><INDENT>if self.adj[e[<NUM_LIT:0>]][e[<NUM_LIT:1>]].get('<STR_LIT>') == edge_type:<EOL><INDENT>edges.append(e)<EOL><DEDENT><DEDENT>return edges<EOL>", "docstring": "Returns all edges with the specified edge type.\n\n        Parameters\n        ----------\n        edge_type : int\n            An integer specifying what type of edges to return.\n\n        Returns\n        -------\n        out : list of 2-tuples\n            A list of 2-tuples representing the edges in the graph\n            with the specified edge type.\n\n        Examples\n        --------\n        Lets get type 2 edges from the following graph\n\n        >>> import queueing_tool as qt\n        >>> adjacency = {\n        ...     0: {1: {'edge_type': 2}},\n        ...     1: {2: {'edge_type': 1},\n        ...         3: {'edge_type': 4}},\n        ...     2: {0: {'edge_type': 2}},\n        ...     3: {3: {'edge_type': 0}}\n        ... }\n        >>> G = qt.QueueNetworkDiGraph(adjacency)\n        >>> ans = G.get_edge_type(2)\n        >>> ans.sort()\n        >>> ans\n        [(0, 1), (2, 0)]", "id": "f14541:c0:m14"}
{"signature": "def _matrix2dict(matrix, etype=False):", "body": "n = len(matrix)<EOL>adj = {k: {} for k in range(n)}<EOL>for k in range(n):<EOL><INDENT>for j in range(n):<EOL><INDENT>if matrix[k, j] != <NUM_LIT:0>:<EOL><INDENT>adj[k][j] = {} if not etype else matrix[k, j]<EOL><DEDENT><DEDENT><DEDENT>return adj<EOL>", "docstring": "Takes an adjacency matrix and returns an adjacency list.", "id": "f14541:m0"}
{"signature": "def _prepare_graph(g, g_colors, q_cls, q_arg, adjust_graph):", "body": "g = _test_graph(g)<EOL>if adjust_graph:<EOL><INDENT>pos = nx.get_node_attributes(g, '<STR_LIT>')<EOL>ans = nx.to_dict_of_dicts(g)<EOL>g = adjacency2graph(ans, adjust=<NUM_LIT:2>, is_directed=g.is_directed())<EOL>g = QueueNetworkDiGraph(g)<EOL>if len(pos) > <NUM_LIT:0>:<EOL><INDENT>g.set_pos(pos)<EOL><DEDENT><DEDENT>g.new_vertex_property('<STR_LIT>')<EOL>g.new_vertex_property('<STR_LIT>')<EOL>g.new_vertex_property('<STR_LIT>')<EOL>g.new_vertex_property('<STR_LIT>')<EOL>g.new_edge_property('<STR_LIT>')<EOL>g.new_edge_property('<STR_LIT>')<EOL>g.new_edge_property('<STR_LIT>')<EOL>g.new_edge_property('<STR_LIT>')<EOL>queues = _set_queues(g, q_cls, q_arg, '<STR_LIT>' in g.vertex_properties())<EOL>if '<STR_LIT>' not in g.vertex_properties():<EOL><INDENT>g.set_pos()<EOL><DEDENT>for k, e in enumerate(g.edges()):<EOL><INDENT>g.set_ep(e, '<STR_LIT>', <NUM_LIT>)<EOL>g.set_ep(e, '<STR_LIT>', <NUM_LIT:8>)<EOL>if e[<NUM_LIT:0>] == e[<NUM_LIT:1>]:<EOL><INDENT>g.set_ep(e, '<STR_LIT>', queues[k].colors['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>g.set_ep(e, '<STR_LIT>', queues[k].colors['<STR_LIT>'])<EOL><DEDENT><DEDENT>for v in g.nodes():<EOL><INDENT>g.set_vp(v, '<STR_LIT>', <NUM_LIT:1>)<EOL>g.set_vp(v, '<STR_LIT>', <NUM_LIT:8>)<EOL>e = (v, v)<EOL>if g.is_edge(e):<EOL><INDENT>g.set_vp(v, '<STR_LIT>', queues[g.edge_index[e]]._current_color(<NUM_LIT:2>))<EOL>g.set_vp(v, '<STR_LIT>', queues[g.edge_index[e]]._current_color())<EOL><DEDENT>else:<EOL><INDENT>g.set_vp(v, '<STR_LIT>', g_colors['<STR_LIT>'])<EOL>g.set_vp(v, '<STR_LIT>', g_colors['<STR_LIT>'])<EOL><DEDENT><DEDENT>return g, queues<EOL>", "docstring": "Prepares a graph for use in :class:`.QueueNetwork`.\n\n    This function is called by ``__init__`` in the\n    :class:`.QueueNetwork` class. It creates the :class:`.QueueServer`\n    instances that sit on the edges, and sets various edge and node\n    properties that are used when drawing the graph.\n\n    Parameters\n    ----------\n    g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, \\\n        ``None``,  etc.\n        Any object that networkx can turn into a\n        :any:`DiGraph<networkx.DiGraph>`\n    g_colors : dict\n        A dictionary of colors. The specific keys used are\n        ``vertex_color`` and ``vertex_fill_color`` for vertices that\n        do not have any loops. Set :class:`.QueueNetwork` for the\n        default values passed.\n    q_cls : dict\n        A dictionary where the keys are integers that represent an edge\n        type, and the values are :class:`.QueueServer` classes.\n    q_args : dict\n        A dictionary where the keys are integers that represent an edge\n        type, and the values are the arguments that are used when\n        creating an instance of that :class:`.QueueServer` class.\n    adjust_graph : bool\n        Specifies whether the graph will be adjusted using\n        :func:`.adjacency2graph`.\n\n    Returns\n    -------\n    g : :class:`.QueueNetworkDiGraph`\n    queues : list\n        A list of :class:`QueueServers<.QueueServer>` where\n        ``queues[k]`` is the ``QueueServer`` that sets on the edge with\n        edge index ``k``.\n\n    Notes\n    -----\n    The graph ``g`` should have the ``edge_type`` edge property map.\n    If it does not then an ``edge_type`` edge property is\n    created and set to 1.\n\n    The following properties are set by each queue: ``vertex_color``,\n    ``vertex_fill_color``, ``vertex_fill_color``, ``edge_color``.\n    See :class:`.QueueServer` for more on setting these values.\n\n    The following properties are assigned as a properties to the graph;\n    their default values for each edge or vertex is shown:\n\n        * ``vertex_pen_width``: ``1``,\n        * ``vertex_size``: ``8``,\n        * ``edge_control_points``: ``[]``\n        * ``edge_marker_size``: ``8``\n        * ``edge_pen_width``: ``1.25``\n\n    Raises\n    ------\n    TypeError\n        Raised when the parameter ``g`` is not of a type that can be\n        made into a :any:`networkx.DiGraph`.", "id": "f14543:m1"}
{"signature": "def _calculate_distance(latlon1, latlon2):", "body": "lat1, lon1 = latlon1<EOL>lat2, lon2 = latlon2<EOL>dlon = lon2 - lon1<EOL>dlat = lat2 - lat1<EOL>R = <NUM_LIT>  <EOL>a = np.sin(dlat / <NUM_LIT:2>)**<NUM_LIT:2> + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon / <NUM_LIT:2>))**<NUM_LIT:2><EOL>c = <NUM_LIT:2> * np.pi * R * np.arctan2(np.sqrt(a), np.sqrt(<NUM_LIT:1> - a)) / <NUM_LIT><EOL>return c<EOL>", "docstring": "Calculates the distance between two points on earth.", "id": "f14544:m0"}
{"signature": "def generate_random_graph(num_vertices=<NUM_LIT>, prob_loop=<NUM_LIT:0.5>, **kwargs):", "body": "g = minimal_random_graph(num_vertices, **kwargs)<EOL>for v in g.nodes():<EOL><INDENT>e = (v, v)<EOL>if not g.is_edge(e):<EOL><INDENT>if np.random.uniform() < prob_loop:<EOL><INDENT>g.add_edge(*e)<EOL><DEDENT><DEDENT><DEDENT>g = set_types_random(g, **kwargs)<EOL>return g<EOL>", "docstring": "Creates a random graph where the edges have different types.\n\n    This method calls :func:`.minimal_random_graph`, and then adds\n    a loop to each vertex with ``prob_loop`` probability. It then\n    calls :func:`.set_types_random` on the resulting graph.\n\n    Parameters\n    ----------\n    num_vertices : int (optional, default: 250)\n        The number of vertices in the graph.\n    prob_loop : float (optional, default: 0.5)\n        The probability that a loop gets added to a vertex.\n    **kwargs :\n        Any parameters to send to :func:`.minimal_random_graph` or\n        :func:`.set_types_random`.\n\n    Returns\n    -------\n    :class:`.QueueNetworkDiGraph`\n        A graph with the position of the vertex set as a property.\n        The position property is called ``pos``. Also, the ``edge_type``\n        edge property is set for each edge.\n\n    Examples\n    --------\n    The following generates a directed graph with 50 vertices where half\n    the edges are type 1 and 1/4th are type 2 and 1/4th are type 3:\n\n    >>> import queueing_tool as qt\n    >>> pTypes = {1: 0.5, 2: 0.25, 3: 0.25}\n    >>> g = qt.generate_random_graph(100, proportions=pTypes, seed=17)\n    >>> non_loops = [e for e in g.edges() if e[0] != e[1]]\n    >>> p1 = np.sum([g.ep(e, 'edge_type') == 1 for e in non_loops])\n    >>> float(p1) / len(non_loops) # doctest: +ELLIPSIS\n    0.486...\n    >>> p2 = np.sum([g.ep(e, 'edge_type') == 2 for e in non_loops])\n    >>> float(p2) / len(non_loops) # doctest: +ELLIPSIS\n    0.249...\n    >>> p3 = np.sum([g.ep(e, 'edge_type') == 3 for e in non_loops])\n    >>> float(p3) / len(non_loops) # doctest: +ELLIPSIS\n    0.264...\n\n    To make an undirected graph with 25 vertices where there are 4\n    different edge types with random proportions:\n\n    >>> p = np.random.rand(4)\n    >>> p = p / sum(p)\n    >>> p = {k + 1: p[k] for k in range(4)}\n    >>> g = qt.generate_random_graph(num_vertices=25, is_directed=False, proportions=p)\n\n    Note that none of the edge types in the above example are 0. It is\n    recommended use edge type indices starting at 1, since 0 is\n    typically used for terminal edges.", "id": "f14545:m1"}
{"signature": "def set_types_random(g, proportions=None, loop_proportions=None, seed=None,<EOL>**kwargs):", "body": "g = _test_graph(g)<EOL>if isinstance(seed, numbers.Integral):<EOL><INDENT>np.random.seed(seed)<EOL><DEDENT>if proportions is None:<EOL><INDENT>proportions = {k: <NUM_LIT:1.> / <NUM_LIT:3> for k in range(<NUM_LIT:1>, <NUM_LIT:4>)}<EOL><DEDENT>if loop_proportions is None:<EOL><INDENT>loop_proportions = {k: <NUM_LIT:1.> / <NUM_LIT:4> for k in range(<NUM_LIT:4>)}<EOL><DEDENT>edges = [e for e in g.edges() if e[<NUM_LIT:0>] != e[<NUM_LIT:1>]]<EOL>loops = [e for e in g.edges() if e[<NUM_LIT:0>] == e[<NUM_LIT:1>]]<EOL>props = list(proportions.values())<EOL>lprops = list(loop_proportions.values())<EOL>if not np.isclose(sum(props), <NUM_LIT:1.0>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not np.isclose(sum(lprops), <NUM_LIT:1.0>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>eTypes = {}<EOL>types = list(proportions.keys())<EOL>values = np.random.choice(types, size=len(edges), replace=True, p=props)<EOL>for k, e in enumerate(edges):<EOL><INDENT>eTypes[e] = values[k]<EOL><DEDENT>types = list(loop_proportions.keys())<EOL>values = np.random.choice(types, size=len(loops), replace=True, p=lprops)<EOL>for k, e in enumerate(loops):<EOL><INDENT>eTypes[e] = values[k]<EOL><DEDENT>g.new_edge_property('<STR_LIT>')<EOL>for e in g.edges():<EOL><INDENT>g.set_ep(e, '<STR_LIT>', eTypes[e])<EOL><DEDENT>return g<EOL>", "docstring": "Randomly sets ``edge_type`` (edge type) properties of the graph.\n\n    This function randomly assigns each edge a type. The probability of\n    an edge being a specific type is proscribed in the\n    ``proportions``, ``loop_proportions`` variables.\n\n    Parameters\n    ----------\n    g : :any:`networkx.DiGraph`, :class:`numpy.ndarray`, dict, etc.\n        Any object that :any:`DiGraph<networkx.DiGraph>` accepts.\n    proportions : dict (optional, default: ``{k: 0.25 for k in range(1, 4)}``)\n        A dictionary of edge types and proportions, where the keys are\n        the types and the values are the proportion of non-loop edges\n        that are expected to be of that type. The values can must sum\n        to one.\n    loop_proportions : dict (optional, default: ``{k: 0.25 for k in range(4)}``)\n        A dictionary of edge types and proportions, where the keys are\n        the types and the values are the proportion of loop edges\n        that are expected to be of that type. The values can must sum\n        to one.\n    seed : int (optional)\n        An integer used to initialize numpy's psuedorandom number\n        generator.\n    **kwargs :\n        Unused.\n\n    Returns\n    -------\n    :class:`.QueueNetworkDiGraph`\n        Returns the a graph with an ``edge_type`` edge property.\n\n    Raises\n    ------\n    TypeError\n        Raised when the parameter ``g`` is not of a type that can be\n        made into a :any:`networkx.DiGraph`.\n\n    ValueError\n        Raises a :exc:`~ValueError` if the ``pType`` values do not sum\n        to one.\n\n    Notes\n    -----\n    If ``pTypes`` is not explicitly specified in the arguments, then it\n    defaults to four types in the graph (types 0, 1, 2, and 3). It sets\n    non-loop edges to be either 1, 2, or 3 33\\% chance, and loops are\n    types 0, 1, 2, 3 with 25\\% chance.", "id": "f14545:m4"}
{"signature": "def out_of_date(original, derived):", "body": "return (not os.path.exists(derived)<EOL>or os.stat(derived).st_mtime < os.stat(original).st_mtime)<EOL>", "docstring": "Returns True if derivative is out-of-date wrt original,\nboth of which are full file paths.", "id": "f14548:m10"}
{"signature": "def makefig(code, code_path, output_dir, output_base, config):", "body": "<EOL>default_dpi = {'<STR_LIT>': <NUM_LIT>, '<STR_LIT>': <NUM_LIT:200>, '<STR_LIT>': <NUM_LIT:50>}<EOL>formats = []<EOL>for fmt in config.plot_formats:<EOL><INDENT>if isinstance(fmt, str):<EOL><INDENT>formats.append((fmt, default_dpi.get(fmt, <NUM_LIT>)))<EOL><DEDENT>elif type(fmt) in (tuple, list) and len(fmt)==<NUM_LIT:2>:<EOL><INDENT>formats.append((str(fmt[<NUM_LIT:0>]), int(fmt[<NUM_LIT:1>])))<EOL><DEDENT>else:<EOL><INDENT>raise PlotError('<STR_LIT>' % fmt)<EOL><DEDENT><DEDENT>code_pieces = split_code_at_show(code)<EOL>all_exists = True<EOL>img = ImageFile(output_base, output_dir)<EOL>for format, dpi in formats:<EOL><INDENT>if out_of_date(code_path, img.filename(format)):<EOL><INDENT>all_exists = False<EOL>break<EOL><DEDENT>img.formats.append(format)<EOL><DEDENT>if all_exists:<EOL><INDENT>return [(code, [img])]<EOL><DEDENT>results = []<EOL>all_exists = True<EOL>for i, code_piece in enumerate(code_pieces):<EOL><INDENT>images = []<EOL>for j in range(<NUM_LIT:1000>):<EOL><INDENT>img = ImageFile('<STR_LIT>' % (output_base, i, j), output_dir)<EOL>for format, dpi in formats:<EOL><INDENT>if out_of_date(code_path, img.filename(format)):<EOL><INDENT>all_exists = False<EOL>break<EOL><DEDENT>img.formats.append(format)<EOL><DEDENT>if not all_exists:<EOL><INDENT>all_exists = (j > <NUM_LIT:0>)<EOL>break<EOL><DEDENT>images.append(img)<EOL><DEDENT>if not all_exists:<EOL><INDENT>break<EOL><DEDENT>results.append((code_piece, images))<EOL><DEDENT>if all_exists:<EOL><INDENT>return results<EOL><DEDENT>results = []<EOL>ns = {}<EOL>for i, code_piece in enumerate(code_pieces):<EOL><INDENT>plt.close('<STR_LIT:all>')<EOL>run_code(code_piece, code_path, ns)<EOL>images = []<EOL>fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()<EOL>for j, figman in enumerate(fig_managers):<EOL><INDENT>if len(fig_managers) == <NUM_LIT:1> and len(code_pieces) == <NUM_LIT:1>:<EOL><INDENT>img = ImageFile(output_base, output_dir)<EOL><DEDENT>else:<EOL><INDENT>img = ImageFile(\"<STR_LIT>\" % (output_base, i, j),<EOL>output_dir)<EOL><DEDENT>images.append(img)<EOL>for format, dpi in formats:<EOL><INDENT>try:<EOL><INDENT>figman.canvas.figure.savefig(img.filename(format), dpi=dpi)<EOL><DEDENT>except exceptions.BaseException as err:<EOL><INDENT>raise PlotError(traceback.format_exc())<EOL><DEDENT>img.formats.append(format)<EOL><DEDENT><DEDENT>results.append((code_piece, images))<EOL><DEDENT>return results<EOL>", "docstring": "Run a pyplot script *code* and save the images under *output_dir*\nwith file names derived from *output_base*", "id": "f14548:m11"}
{"signature": "def split_code_at_show(text):", "body": "parts = []<EOL>is_doctest = contains_doctest(text)<EOL>part = []<EOL>for line in text.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>if (not is_doctest and line.strip() == '<STR_LIT>') or(is_doctest and line.strip() == '<STR_LIT>'):<EOL><INDENT>part.append(line)<EOL>parts.append(\"<STR_LIT:\\n>\".join(part))<EOL>part = []<EOL><DEDENT>else:<EOL><INDENT>part.append(line)<EOL><DEDENT><DEDENT>if \"<STR_LIT:\\n>\".join(part).strip():<EOL><INDENT>parts.append(\"<STR_LIT:\\n>\".join(part))<EOL><DEDENT>return parts<EOL>", "docstring": "Split code at plt.show()", "id": "f14548:m8"}
{"signature": "def _Keyword(self, t):", "body": "self._write(t.name)<EOL>self._write(\"<STR_LIT:=>\")<EOL>self._dispatch(t.expr)<EOL>", "docstring": "Keyword value assignment within function calls and definitions.", "id": "f14555:c0:m29"}
{"signature": "def _From(self, t):", "body": "<EOL>self._fill(\"<STR_LIT>\")<EOL>self._write(t.modname)<EOL>self._write(\"<STR_LIT>\")<EOL>for i, (name,asname) in enumerate(t.names):<EOL><INDENT>if i != <NUM_LIT:0>:<EOL><INDENT>self._write(\"<STR_LIT:U+002CU+0020>\")<EOL><DEDENT>self._write(name)<EOL>if asname is not None:<EOL><INDENT>self._write(\"<STR_LIT>\"+asname)<EOL><DEDENT><DEDENT>", "docstring": "Handle \"from xyz import foo, bar as baz\".", "id": "f14555:c0:m23"}
{"signature": "def _AssAttr(self, t):", "body": "self._dispatch(t.expr)<EOL>self._write('<STR_LIT:.>'+t.attrname)<EOL>", "docstring": "Handle assigning an attribute of an object", "id": "f14555:c0:m8"}
{"signature": "def _Function(self, t):", "body": "if t.decorators is not None:<EOL><INDENT>self._fill(\"<STR_LIT:@>\")<EOL>self._dispatch(t.decorators)<EOL><DEDENT>self._fill(\"<STR_LIT>\"+t.name + \"<STR_LIT:(>\")<EOL>defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)<EOL>for i, arg in enumerate(zip(t.argnames, defaults)):<EOL><INDENT>self._write(arg[<NUM_LIT:0>])<EOL>if arg[<NUM_LIT:1>] is not None:<EOL><INDENT>self._write('<STR_LIT:=>')<EOL>self._dispatch(arg[<NUM_LIT:1>])<EOL><DEDENT>if i < len(t.argnames)-<NUM_LIT:1>:<EOL><INDENT>self._write('<STR_LIT:U+002CU+0020>')<EOL><DEDENT><DEDENT>self._write(\"<STR_LIT:)>\")<EOL>if self._single_func:<EOL><INDENT>self._do_indent = False<EOL><DEDENT>self._enter()<EOL>self._dispatch(t.code)<EOL>self._leave()<EOL>self._do_indent = True<EOL>", "docstring": "Handle function definitions", "id": "f14555:c0:m24"}
{"signature": "def _Import(self, t):", "body": "self._fill(\"<STR_LIT>\")<EOL>for i, (name,asname) in enumerate(t.names):<EOL><INDENT>if i != <NUM_LIT:0>:<EOL><INDENT>self._write(\"<STR_LIT:U+002CU+0020>\")<EOL><DEDENT>self._write(name)<EOL>if asname is not None:<EOL><INDENT>self._write(\"<STR_LIT>\"+asname)<EOL><DEDENT><DEDENT>", "docstring": "Handle \"import xyz.foo\".", "id": "f14555:c0:m28"}
{"signature": "def _fill(self, text = \"<STR_LIT>\"):", "body": "if self._do_indent:<EOL><INDENT>self._write(\"<STR_LIT:\\n>\"+\"<STR_LIT:U+0020>\"*self._indent + text)<EOL><DEDENT>else:<EOL><INDENT>self._write(text)<EOL><DEDENT>", "docstring": "Indent a piece of text, according to the current indentation level", "id": "f14555:c0:m1"}
{"signature": "def _enter(self):", "body": "self._write(\"<STR_LIT>\")<EOL>self._indent += <NUM_LIT:1><EOL>", "docstring": "Print ':', and increase the indentation.", "id": "f14555:c0:m3"}
{"signature": "def _Getattr(self, t):", "body": "if isinstance(t.expr, (Div, Mul, Sub, Add)):<EOL><INDENT>self._write('<STR_LIT:(>')<EOL>self._dispatch(t.expr)<EOL>self._write('<STR_LIT:)>')<EOL><DEDENT>else:<EOL><INDENT>self._dispatch(t.expr)<EOL><DEDENT>self._write('<STR_LIT:.>'+t.attrname)<EOL>", "docstring": "Handle getting an attribute of an object", "id": "f14555:c0:m25"}
{"signature": "def _CallFunc(self, t):", "body": "self._dispatch(t.node)<EOL>self._write(\"<STR_LIT:(>\")<EOL>comma = False<EOL>for e in t.args:<EOL><INDENT>if comma: self._write(\"<STR_LIT:U+002CU+0020>\")<EOL>else: comma = True<EOL>self._dispatch(e)<EOL><DEDENT>if t.star_args:<EOL><INDENT>if comma: self._write(\"<STR_LIT:U+002CU+0020>\")<EOL>else: comma = True<EOL>self._write(\"<STR_LIT:*>\")<EOL>self._dispatch(t.star_args)<EOL><DEDENT>if t.dstar_args:<EOL><INDENT>if comma: self._write(\"<STR_LIT:U+002CU+0020>\")<EOL>else: comma = True<EOL>self._write(\"<STR_LIT>\")<EOL>self._dispatch(t.dstar_args)<EOL><DEDENT>self._write(\"<STR_LIT:)>\")<EOL>", "docstring": "Function call.", "id": "f14555:c0:m15"}
{"signature": "def _parse_index(self, section, content):", "body": "def strip_each_in(lst):<EOL><INDENT>return [s.strip() for s in lst]<EOL><DEDENT>out = {}<EOL>section = section.split('<STR_LIT>')<EOL>if len(section) > <NUM_LIT:1>:<EOL><INDENT>out['<STR_LIT:default>'] = strip_each_in(section[<NUM_LIT:1>].split('<STR_LIT:U+002C>'))[<NUM_LIT:0>]<EOL><DEDENT>for line in content:<EOL><INDENT>line = line.split('<STR_LIT::>')<EOL>if len(line) > <NUM_LIT:2>:<EOL><INDENT>out[line[<NUM_LIT:1>]] = strip_each_in(line[<NUM_LIT:2>].split('<STR_LIT:U+002C>'))<EOL><DEDENT><DEDENT>return out<EOL>", "docstring": ".. index: default\n   :refguide: something, else, and more", "id": "f14557:c1:m9"}
{"signature": "def _parse_summary(self):", "body": "if self._is_at_section():<EOL><INDENT>return<EOL><DEDENT>while True:<EOL><INDENT>summary = self._doc.read_to_next_empty_line()<EOL>summary_str = \"<STR_LIT:U+0020>\".join([s.strip() for s in summary]).strip()<EOL>if re.compile('<STR_LIT>').match(summary_str):<EOL><INDENT>self['<STR_LIT>'] = summary_str<EOL>if not self._is_at_section():<EOL><INDENT>continue<EOL><DEDENT><DEDENT>break<EOL><DEDENT>if summary is not None:<EOL><INDENT>self['<STR_LIT>'] = summary<EOL><DEDENT>if not self._is_at_section():<EOL><INDENT>self['<STR_LIT>'] = self._read_to_next_section()<EOL><DEDENT>", "docstring": "Grab signature (if given) and summary", "id": "f14557:c1:m10"}
{"signature": "def _parse_see_also(self, content):", "body": "items = []<EOL>def parse_item_name(text):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>m = self._name_rgx.match(text)<EOL>if m:<EOL><INDENT>g = m.groups()<EOL>if g[<NUM_LIT:1>] is None:<EOL><INDENT>return g[<NUM_LIT:3>], None<EOL><DEDENT>else:<EOL><INDENT>return g[<NUM_LIT:2>], g[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>raise ValueError(\"<STR_LIT>\" % text)<EOL><DEDENT>def push_item(name, rest):<EOL><INDENT>if not name:<EOL><INDENT>return<EOL><DEDENT>name, role = parse_item_name(name)<EOL>items.append((name, list(rest), role))<EOL>del rest[:]<EOL><DEDENT>current_func = None<EOL>rest = []<EOL>for line in content:<EOL><INDENT>if not line.strip(): continue<EOL>m = self._name_rgx.match(line)<EOL>if m and line[m.end():].strip().startswith('<STR_LIT::>'):<EOL><INDENT>push_item(current_func, rest)<EOL>current_func, line = line[:m.end()], line[m.end():]<EOL>rest = [line.split('<STR_LIT::>', <NUM_LIT:1>)[<NUM_LIT:1>].strip()]<EOL>if not rest[<NUM_LIT:0>]:<EOL><INDENT>rest = []<EOL><DEDENT><DEDENT>elif not line.startswith('<STR_LIT:U+0020>'):<EOL><INDENT>push_item(current_func, rest)<EOL>current_func = None<EOL>if '<STR_LIT:U+002C>' in line:<EOL><INDENT>for func in line.split('<STR_LIT:U+002C>'):<EOL><INDENT>if func.strip():<EOL><INDENT>push_item(func, [])<EOL><DEDENT><DEDENT><DEDENT>elif line.strip():<EOL><INDENT>current_func = line<EOL><DEDENT><DEDENT>elif current_func is not None:<EOL><INDENT>rest.append(line.strip())<EOL><DEDENT><DEDENT>push_item(current_func, rest)<EOL>return items<EOL>", "docstring": "func_name : Descriptive text\n    continued text\nanother_func_name : Descriptive text\nfunc_name1, func_name2, :meth:`func_name`, func_name3", "id": "f14557:c1:m8"}
{"signature": "def add(self, string, start, end, line):", "body": "if string.strip():<EOL><INDENT>self.start_lineno = min(self.start_lineno, start[<NUM_LIT:0>])<EOL>self.end_lineno = max(self.end_lineno, end[<NUM_LIT:0>])<EOL><DEDENT>", "docstring": "Add lines to the block.", "id": "f14562:c1:m1"}
{"signature": "def make_index(self):", "body": "for prev, block in zip(self.blocks[:-<NUM_LIT:1>], self.blocks[<NUM_LIT:1>:]):<EOL><INDENT>if not block.is_comment:<EOL><INDENT>self.index[block.start_lineno] = prev<EOL><DEDENT><DEDENT>", "docstring": "Make the index mapping lines of actual code to their associated\n        prefix comments.", "id": "f14562:c2:m5"}
{"signature": "def get_observations(self):", "body": "if self.empty:<EOL><INDENT>return []<EOL><DEDENT>rows = list(self.tbody)<EOL>observations = []<EOL>for row_observation, row_details in zip(rows[::<NUM_LIT:2>], rows[<NUM_LIT:1>::<NUM_LIT:2>]):<EOL><INDENT>data = {}<EOL>cells = OBSERVATION_XPATH(row_observation)<EOL>data['<STR_LIT:name>'] = _clean_cell(cells[<NUM_LIT:0>])<EOL>data['<STR_LIT:date>'] = _clean_cell(cells[<NUM_LIT:1>])<EOL>data['<STR_LIT>'] = _clean_cell(cells[<NUM_LIT:3>])<EOL>data['<STR_LIT>'] = _clean_cell(cells[<NUM_LIT:6>])<EOL>cells = DETAILS_XPATH(row_details)<EOL>data['<STR_LIT>'] = _clean_cell(cells[<NUM_LIT:0>])<EOL>data['<STR_LIT>'] = _clean_cell(cells[<NUM_LIT:3>]).replace('<STR_LIT:None>', '<STR_LIT>')<EOL>data['<STR_LIT>'] = _clean_cell(cells[<NUM_LIT:4>])<EOL>data['<STR_LIT>'] = _clean_cell(cells[<NUM_LIT:5>])<EOL>observations.append(data)<EOL><DEDENT>return observations<EOL>", "docstring": "Parses the HTML table into a list of dictionaries, each of which\nrepresents a single observation.", "id": "f14570:c0:m1"}
{"signature": "def download_observations(observer_code):", "body": "page_number = <NUM_LIT:1><EOL>observations = []<EOL>while True:<EOL><INDENT>logger.info('<STR_LIT>', page_number)<EOL>response = requests.get(WEBOBS_RESULTS_URL, params={<EOL>'<STR_LIT>': observer_code,<EOL>'<STR_LIT>': <NUM_LIT:200>,<EOL>'<STR_LIT>': '<STR_LIT:all>',<EOL>'<STR_LIT>': page_number,<EOL>})<EOL>logger.debug(response.request.url)<EOL>parser = WebObsResultsParser(response.text)<EOL>observations.extend(parser.get_observations())<EOL>if '<STR_LIT>' not in response.text:<EOL><INDENT>break<EOL><DEDENT>page_number += <NUM_LIT:1><EOL><DEDENT>return observations<EOL>", "docstring": "Downloads all variable star observations by a given observer.\n\nPerforms a series of HTTP requests to AAVSO's WebObs search and\ndownloads the results page by page. Each page is then passed to\n:py:class:`~pyaavso.parsers.webobs.WebObsResultsParser` and parse results\nare added to the final observation list.", "id": "f14571:m0"}
{"signature": "def create_table(self):", "body": "table = self.conn.create_table(<EOL>name=self.get_table_name(),<EOL>schema=self.get_schema(),<EOL>read_units=self.get_read_units(),<EOL>write_units=self.get_write_units(),<EOL>)<EOL>if table.status != '<STR_LIT>':<EOL><INDENT>table.refresh(wait_for_active=True, retry_seconds=<NUM_LIT:1>)<EOL><DEDENT>return table<EOL>", "docstring": "Hook point for overriding how the CounterPool creates a new table\nin DynamooDB", "id": "f14577:c0:m6"}
{"signature": "def __init__(self, aws_access_key=None, aws_secret_key=None, table_name=None, schema=None, read_units=None, write_units=None, auto_create_table=True, ):", "body": "self.conn = self.get_conn(aws_access_key, aws_secret_key)<EOL>self.table_name = table_name or self.table_name<EOL>self.schema = schema or self.schema<EOL>self.read_units = read_units or self.read_units<EOL>self.write_units = write_units or self.write_units<EOL>self.auto_create_table = auto_create_table<EOL>super(CounterPool, self).__init__()<EOL>", "docstring": ":aws_access_key:\n    AWS Acccess Key ID with permissions to use DynamoDB\n:aws_secret_key:\n    AWS Access Secret Key for the given Access Key ID\n:table_name:\n    The DynamoDB table that should be used to store this pool's\n    counters.  See http://bit.ly/DynamoDBModel for details on\n    DynamoDB's data model.\n:schema:\n    The schema that will be used to create a table if one does not\n    already exist.  See the `boto`<http://bit.ly/BotoCreateTable>_\n    docs for details on what's expected for a schema.\n:read_units:\n    Read throughput to be set when a table is created.  See\n    http://bit.ly/DynamoThoughput for details on Dynamo's provisioned\n    throughput system.\n:write_units:\n    Write throughput to be set when a table is created.\n:auto_create_table:\n    Should Albertson create a dynamodb table if the provided\n    `table_name` doesn't exist.", "id": "f14577:c0:m0"}
{"signature": "def get_write_units(self):", "body": "return self.write_units<EOL>", "docstring": "Hook point for overriding how the CounterPool determines the write\nthroughput units to set on a newly created table.", "id": "f14577:c0:m5"}
{"signature": "def get_counter(self, name, start=<NUM_LIT:0>):", "body": "item = self.get_item(hash_key=name, start=start)<EOL>counter = Counter(dynamo_item=item, pool=self)<EOL>return counter<EOL>", "docstring": "Gets the DynamoDB item behind a counter and ties it to a Counter\ninstace.", "id": "f14577:c0:m10"}
{"signature": "def get_item(self, hash_key, start=<NUM_LIT:0>, extra_attrs=None):", "body": "table = self.get_table()<EOL>try:<EOL><INDENT>item = table.get_item(hash_key=hash_key)<EOL><DEDENT>except DynamoDBKeyNotFoundError:<EOL><INDENT>item = None<EOL><DEDENT>if item is None:<EOL><INDENT>item = self.create_item(<EOL>hash_key=hash_key,<EOL>start=start,<EOL>extra_attrs=extra_attrs,<EOL>)<EOL><DEDENT>return item<EOL>", "docstring": "Hook point for overriding how the CouterPool fetches a DynamoDB item\nfor a given counter.", "id": "f14577:c0:m9"}
{"signature": "def get_schema(self):", "body": "if not self.schema:<EOL><INDENT>raise NotImplementedError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>return self.conn.create_schema(**self.schema)<EOL>", "docstring": "Hook point for overriding how the CounterPool determines the schema\nto be used when creating a missing table.", "id": "f14577:c0:m3"}
{"signature": "@property<EOL><INDENT>def task_identifier(self):<DEDENT>", "body": "task_id = self.celery_self.name<EOL>if self.include_args:<EOL><INDENT>merged_args = str(self.args) + str([(k, self.kwargs[k]) for k in sorted(self.kwargs)])<EOL>task_id += '<STR_LIT>'.format(hashlib.md5(merged_args.encode('<STR_LIT:utf-8>')).hexdigest())<EOL><DEDENT>return task_id<EOL>", "docstring": "Return the unique identifier (string) of a task instance.", "id": "f14581:c1:m1"}
{"signature": "def init_app(self, app):", "body": "_state._register_app = self.original_register_app  <EOL>if not hasattr(app, '<STR_LIT>'):<EOL><INDENT>app.extensions = dict()<EOL><DEDENT>if '<STR_LIT>' in app.extensions:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>app.extensions['<STR_LIT>'] = _CeleryState(self, app)<EOL>super(Celery, self).__init__(app.import_name, broker=app.config['<STR_LIT>'])<EOL>if '<STR_LIT>' in app.config:<EOL><INDENT>self._preconf['<STR_LIT>'] = app.config['<STR_LIT>']<EOL><DEDENT>self.conf.update(app.config)<EOL>task_base = self.Task<EOL>class ContextTask(task_base):<EOL><INDENT>def __call__(self, *_args, **_kwargs):<EOL><INDENT>with app.app_context():<EOL><INDENT>return task_base.__call__(self, *_args, **_kwargs)<EOL><DEDENT><DEDENT><DEDENT>setattr(ContextTask, '<STR_LIT>', True)<EOL>setattr(self, '<STR_LIT>', ContextTask)<EOL>", "docstring": "Actual method to read celery settings from app configuration and initialize the celery instance.\n\n        :param app: Flask application instance.", "id": "f14581:c5:m1"}
{"signature": "def reset_lock(self):", "body": "self.delete_group(self.task_identifier)<EOL>", "docstring": "Removed the lock regardless of timeout.", "id": "f14581:c3:m4"}
{"signature": "def register_blueprint(self, _):", "body": "pass<EOL>", "docstring": "Mock register_blueprint method.", "id": "f14582:c0:m0"}
{"signature": "@worker_ready.connect<EOL>def on_worker_ready(**_):", "body": "WORKER_READY.append(True)<EOL>", "docstring": "Called when the Celery worker thread is ready to do work.\n\n    This is to avoid race conditions since everything is in one python process.", "id": "f14584:m0"}
{"signature": "def generate_config():", "body": "config = dict()<EOL>if os.environ.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>config['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>elif os.environ.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>config['<STR_LIT>'] = '<STR_LIT>'<EOL>config['<STR_LIT>'] = config['<STR_LIT>']<EOL><DEDENT>elif os.environ.get('<STR_LIT>', '<STR_LIT>').startswith('<STR_LIT>'):<EOL><INDENT>config['<STR_LIT>'] = '<STR_LIT>' + os.environ['<STR_LIT>'].split('<STR_LIT:U+002C>', <NUM_LIT:1>)[<NUM_LIT:1>]<EOL>config['<STR_LIT>'] = config['<STR_LIT>']<EOL><DEDENT>elif os.environ.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>config['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>elif os.environ.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>config['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>elif os.environ.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>config['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>elif os.environ.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>config['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>if os.environ.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>config['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>elif os.environ.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>config['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '<STR_LIT>')<EOL>config['<STR_LIT>'] = '<STR_LIT>' + file_path<EOL><DEDENT>config['<STR_LIT>'] = '<STR_LIT>' + config['<STR_LIT>']<EOL>config['<STR_LIT>'] = '<STR_LIT>' + config['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in config and '<STR_LIT>' not in config:<EOL><INDENT>config['<STR_LIT>'] = config['<STR_LIT>']<EOL><DEDENT>return config<EOL>", "docstring": "Generate a Flask config dict with settings for a specific broker based on an environment variable.\n\n    To be merged into app.config.\n\n    :return: Flask config to be fed into app.config.update().\n    :rtype: dict", "id": "f14587:m0"}
{"signature": "@celery.task(bind=True)<EOL>@single_instance()<EOL>def sub(x, y):", "body": "return x - y<EOL>", "docstring": "Celery task: subtract numbers.", "id": "f14587:m5"}
{"signature": "@celery.task(bind=True)<EOL>@single_instance(include_args=True, lock_timeout=<NUM_LIT:20>)<EOL>def mul(x, y):", "body": "return x * y<EOL>", "docstring": "Celery task: multiply numbers.", "id": "f14587:m4"}
{"signature": "@celery.task(bind=True)<EOL>@single_instance<EOL>def add(x, y):", "body": "return x + y<EOL>", "docstring": "Celery task: add numbers.", "id": "f14587:m3"}
{"signature": "@celery.task(bind=True, soft_time_limit=<NUM_LIT>)<EOL>@single_instance<EOL>def add3(x, y):", "body": "return x + y<EOL>", "docstring": "Celery task: add numbers.", "id": "f14587:m7"}
{"signature": "@classmethod<EOL><INDENT>def run(cls):<DEDENT>", "body": "project = __import__(IMPORT, fromlist=['<STR_LIT>'])<EOL>for expected, var in [('<STR_LIT>', '<STR_LIT>'), (LICENSE, '<STR_LIT>'), (VERSION, '<STR_LIT>')]:<EOL><INDENT>if getattr(project, var) != expected:<EOL><INDENT>raise SystemExit('<STR_LIT>'.format(var))<EOL><DEDENT><DEDENT>if not re.compile(r'<STR_LIT>' % VERSION, re.MULTILINE).search(readme()):<EOL><INDENT>raise SystemExit('<STR_LIT>')<EOL><DEDENT>if INSTALL_REQUIRES:<EOL><INDENT>contents = readme('<STR_LIT>')<EOL>section = re.compile(r'<STR_LIT>', re.DOTALL).findall(contents)<EOL>if not section:<EOL><INDENT>raise SystemExit('<STR_LIT>')<EOL><DEDENT>in_tox = re.findall(r'<STR_LIT>', section[<NUM_LIT:0>])<EOL>if INSTALL_REQUIRES != in_tox:<EOL><INDENT>raise SystemExit('<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Check variables.", "id": "f14588:c0:m2"}
{"signature": "@classmethod<EOL><INDENT>def finalize_options(cls):<DEDENT>", "body": "pass<EOL>", "docstring": "Required by distutils.", "id": "f14588:c0:m1"}
{"signature": "@property<EOL><INDENT>def following(self):<DEDENT>", "body": "following = []<EOL>try:<EOL><INDENT>for (nick, url) in self.cfg.items(\"<STR_LIT>\"):<EOL><INDENT>source = Source(nick, url)<EOL>following.append(source)<EOL><DEDENT><DEDENT>except configparser.NoSectionError as e:<EOL><INDENT>logger.debug(e)<EOL><DEDENT>return following<EOL>", "docstring": "A :class:`list` of all :class:`Source` objects.", "id": "f14593:c0:m5"}
{"signature": "@classmethod<EOL><INDENT>def discover(cls):<DEDENT>", "body": "file = os.path.join(Config.config_dir, Config.config_name)<EOL>return cls.from_file(file)<EOL>", "docstring": "Make a guess about the config file location an try loading it.", "id": "f14593:c0:m2"}
{"signature": "def add_source(self, source):", "body": "if not self.cfg.has_section(\"<STR_LIT>\"):<EOL><INDENT>self.cfg.add_section(\"<STR_LIT>\")<EOL><DEDENT>self.cfg.set(\"<STR_LIT>\", source.nick, source.url)<EOL>self.write_config()<EOL>", "docstring": "Adds a new :class:`Source` to the config\u2019s following section.", "id": "f14593:c0:m25"}
{"signature": "def get_source_by_nick(self, nick):", "body": "url = self.cfg.get(\"<STR_LIT>\", nick, fallback=None)<EOL>return Source(nick, url) if url else None<EOL>", "docstring": "Returns the :class:`Source` of the given nick.\n\n        :param str nick: nickname for which will be searched in the config", "id": "f14593:c0:m26"}
{"signature": "def write_config(self):", "body": "with open(self.config_file, \"<STR_LIT:w>\") as config_file:<EOL><INDENT>self.cfg.write(config_file)<EOL><DEDENT>", "docstring": "Writes `self.cfg` to `self.config_file`.", "id": "f14593:c0:m4"}
{"signature": "def build_default_map(self):", "body": "default_map = {<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": self.check_following,<EOL>\"<STR_LIT>\": self.timeout,<EOL>\"<STR_LIT>\": self.porcelain,<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": self.twtfile,<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": self.use_pager,<EOL>\"<STR_LIT>\": self.use_cache,<EOL>\"<STR_LIT>\": self.limit_timeline,<EOL>\"<STR_LIT>\": self.timeout,<EOL>\"<STR_LIT>\": self.sorting,<EOL>\"<STR_LIT>\": self.porcelain,<EOL>\"<STR_LIT>\": self.twtfile,<EOL>\"<STR_LIT>\": self.timeline_update_interval,<EOL>},<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": self.use_pager,<EOL>\"<STR_LIT>\": self.use_cache,<EOL>\"<STR_LIT>\": self.limit_timeline,<EOL>\"<STR_LIT>\": self.timeout,<EOL>\"<STR_LIT>\": self.sorting,<EOL>\"<STR_LIT>\": self.porcelain,<EOL>\"<STR_LIT>\": self.timeline_update_interval,<EOL>}<EOL>}<EOL>return default_map<EOL>", "docstring": "Maps config options to the default values used by click, returns :class:`dict`.", "id": "f14593:c0:m28"}
{"signature": "def remove_tweets(self, url):", "body": "try:<EOL><INDENT>del self.cache[url]<EOL>self.mark_updated()<EOL>return True<EOL><DEDENT>except KeyError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Tries to remove cached tweets.", "id": "f14594:c0:m12"}
{"signature": "def __init__(self, cache_file, cache, update_interval):", "body": "self.cache_file = cache_file<EOL>self.cache = cache<EOL>self.update_interval = update_interval<EOL>", "docstring": "Initializes new :class:`Cache` object.\n\n        :param str cache_file: full path to the loaded cache file.\n        :param ~shelve.Shelve cache: a Shelve object, with cache loaded.\n        :param int update_interval: number of seconds the cache is considered to be\n                                    up-to-date without calling any external resources.", "id": "f14594:c0:m0"}
{"signature": "def add_tweets(self, url, last_modified, tweets):", "body": "try:<EOL><INDENT>self.cache[url] = {\"<STR_LIT>\": last_modified, \"<STR_LIT>\": tweets}<EOL>self.mark_updated()<EOL>return True<EOL><DEDENT>except TypeError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Adds new tweets to the cache.", "id": "f14594:c0:m10"}
{"signature": "def mark_updated(self):", "body": "if not self.is_valid:<EOL><INDENT>self.cache[\"<STR_LIT>\"] = timestamp()<EOL><DEDENT>", "docstring": "Mark cache as updated at current *NIX timestamp", "id": "f14594:c0:m7"}
{"signature": "@classmethod<EOL><INDENT>def discover(cls, *args, **kwargs):<DEDENT>", "body": "file = os.path.join(Cache.cache_dir, Cache.cache_name)<EOL>return cls.from_file(file, *args, **kwargs)<EOL>", "docstring": "Make a guess about the cache file location an try loading it.", "id": "f14594:c0:m4"}
{"signature": "def get_tweets(self, url, limit=None):", "body": "try:<EOL><INDENT>tweets = self.cache[url][\"<STR_LIT>\"]<EOL>self.mark_updated()<EOL>return sorted(tweets, reverse=True)[:limit]<EOL><DEDENT>except KeyError:<EOL><INDENT>return []<EOL><DEDENT>", "docstring": "Retrieves tweets from the cache.", "id": "f14594:c0:m11"}
{"signature": "def close(self):", "body": "try:<EOL><INDENT>self.cache.close()<EOL>return True<EOL><DEDENT>except AttributeError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Closes Shelve object.", "id": "f14594:c0:m13"}
{"signature": "@property<EOL><INDENT>def last_updated(self):<DEDENT>", "body": "try:<EOL><INDENT>return self.cache[\"<STR_LIT>\"]<EOL><DEDENT>except KeyError:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "Returns *NIX timestamp of last update of the cache.", "id": "f14594:c0:m5"}
{"signature": "def parse_iso8601(string):", "body": "return make_aware(dateutil.parser.parse(string))<EOL>", "docstring": "Parse string using dateutil.parser.", "id": "f14595:m1"}
{"signature": "def parse_tweet(raw_tweet, source, now=None):", "body": "if now is None:<EOL><INDENT>now = datetime.now(timezone.utc)<EOL><DEDENT>raw_created_at, text = raw_tweet.split(\"<STR_LIT:\\t>\", <NUM_LIT:1>)<EOL>created_at = parse_iso8601(raw_created_at)<EOL>if created_at > now:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return Tweet(click.unstyle(text.strip()), created_at, source)<EOL>", "docstring": "Parses a single raw tweet line from a twtxt file\nand returns a :class:`Tweet` object.\n\n:param str raw_tweet: a single raw tweet line\n:param Source source: the source of the given tweet\n:param Datetime now: the current datetime\n\n:returns: the parsed tweet\n:rtype: Tweet", "id": "f14595:m3"}
{"signature": "@cli.command()<EOL>@click.option(\"<STR_LIT>\",<EOL>is_flag=True,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>type=click.INT,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>flag_value=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>flag_value=\"<STR_LIT>\",<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\",<EOL>type=click.FLOAT,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\",<EOL>is_flag=True,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\",<EOL>is_flag=True,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\",<EOL>is_flag=True,<EOL>help=\"<STR_LIT>\")<EOL>@click.argument(\"<STR_LIT:source>\")<EOL>@click.pass_context<EOL>def view(ctx, **kwargs):", "body": "ctx.forward(timeline)<EOL>", "docstring": "Show feed of given source.", "id": "f14597:m3"}
{"signature": "@cli.command()<EOL>@click.argument(\"<STR_LIT>\")<EOL>@click.argument(\"<STR_LIT:url>\")<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>flag_value=True,<EOL>help=\"<STR_LIT>\")<EOL>@click.pass_context<EOL>def follow(ctx, nick, url, force):", "body": "source = Source(nick, url)<EOL>sources = ctx.obj['<STR_LIT>'].following<EOL>if not force:<EOL><INDENT>if source.nick in (source.nick for source in sources):<EOL><INDENT>click.confirm(\"<STR_LIT>\".format(<EOL>click.style(source.nick, bold=True)), default=False, abort=True)<EOL><DEDENT>_, status = get_remote_status([source])[<NUM_LIT:0>]<EOL>if not status or status.status_code != <NUM_LIT:200>:<EOL><INDENT>click.confirm(\"<STR_LIT>\".format(<EOL>click.style(source.nick, bold=True),<EOL>click.style(source.url, bold=True)), default=False, abort=True)<EOL><DEDENT><DEDENT>ctx.obj['<STR_LIT>'].add_source(source)<EOL>click.echo(\"<STR_LIT>\".format(<EOL>click.style(source.nick, bold=True)))<EOL>", "docstring": "Add a new source to your followings.", "id": "f14597:m5"}
{"signature": "@click.group()<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT:-c>\",<EOL>type=click.Path(exists=True, file_okay=True, readable=True, writable=True, resolve_path=True),<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>is_flag=True, default=False,<EOL>help=\"<STR_LIT>\")<EOL>@click.version_option()<EOL>@click.pass_context<EOL>def cli(ctx, config, verbose):", "body": "init_logging(debug=verbose)<EOL>if ctx.invoked_subcommand == \"<STR_LIT>\":<EOL><INDENT>return  <EOL><DEDENT>try:<EOL><INDENT>if config:<EOL><INDENT>conf = Config.from_file(config)<EOL><DEDENT>else:<EOL><INDENT>conf = Config.discover()<EOL><DEDENT><DEDENT>except ValueError as e:<EOL><INDENT>if \"<STR_LIT>\" in str(e):<EOL><INDENT>click.echo(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>click.echo(\"<STR_LIT>\")<EOL><DEDENT>sys.exit()<EOL><DEDENT>ctx.default_map = conf.build_default_map()<EOL>ctx.obj = {'<STR_LIT>': conf}<EOL>", "docstring": "Decentralised, minimalist microblogging service for hackers.", "id": "f14597:m0"}
{"signature": "@cli.command()<EOL>@click.option(\"<STR_LIT>\",<EOL>is_flag=True,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\",<EOL>type=click.FLOAT,<EOL>help=\"<STR_LIT>\")<EOL>@click.option(\"<STR_LIT>\",<EOL>is_flag=True,<EOL>help=\"<STR_LIT>\")<EOL>@click.pass_context<EOL>def following(ctx, check, timeout, porcelain):", "body": "sources = ctx.obj['<STR_LIT>'].following<EOL>if check:<EOL><INDENT>sources = get_remote_status(sources, timeout)<EOL>for (source, status) in sources:<EOL><INDENT>click.echo(style_source_with_status(source, status, porcelain))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sources = sorted(sources, key=lambda source: source.nick)<EOL>for source in sources:<EOL><INDENT>click.echo(style_source(source, porcelain))<EOL><DEDENT><DEDENT>", "docstring": "Return the list of sources you\u2019re following.", "id": "f14597:m4"}
{"signature": "@cli.command()<EOL>@click.argument(\"<STR_LIT>\")<EOL>@click.pass_context<EOL>def unfollow(ctx, nick):", "body": "source = ctx.obj['<STR_LIT>'].get_source_by_nick(nick)<EOL>try:<EOL><INDENT>with Cache.discover() as cache:<EOL><INDENT>cache.remove_tweets(source.url)<EOL><DEDENT><DEDENT>except OSError as e:<EOL><INDENT>logger.debug(e)<EOL><DEDENT>ret_val = ctx.obj['<STR_LIT>'].remove_source_by_nick(nick)<EOL>if ret_val:<EOL><INDENT>click.echo(\"<STR_LIT>\".format(<EOL>click.style(source.nick, bold=True)))<EOL><DEDENT>else:<EOL><INDENT>click.echo(\"<STR_LIT>\".format(<EOL>click.style(nick, bold=True)))<EOL><DEDENT>", "docstring": "Remove an existing source from your followings.", "id": "f14597:m6"}
{"signature": "def format_mentions(text, format_callback=format_mention):", "body": "def handle_mention(match):<EOL><INDENT>name, url = match.groups()<EOL>return format_callback(name, url)<EOL><DEDENT>return mention_re.sub(handle_mention, text)<EOL>", "docstring": "Searches the given text for mentions generated by `expand_mention()` and returns a human-readable form.\n\n    For example:\n    \"@<bob http://example.org/twtxt.txt>\" will result in \"@bob\"\n\n    If you follow a source: source.nick will be bold\n    If you are the mentioned source: source.nick will be bold and coloured\n    If nothing from the above is true: nick will be unstyled\n    If nothing from the above is true and nick is not given: url will be used", "id": "f14600:m4"}
{"signature": "def expand_mentions(text, embed_names=True):", "body": "if embed_names:<EOL><INDENT>mention_format = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>mention_format = \"<STR_LIT>\"<EOL><DEDENT>def handle_mention(match):<EOL><INDENT>source = get_source_by_name(match.group(<NUM_LIT:1>))<EOL>if source is None:<EOL><INDENT>return \"<STR_LIT>\".format(match.group(<NUM_LIT:1>))<EOL><DEDENT>return mention_format.format(<EOL>name=source.nick,<EOL>url=source.url)<EOL><DEDENT>return short_mention_re.sub(handle_mention, text)<EOL>", "docstring": "Searches the given text for mentions and expands them.\n\n    For example:\n    \"@source.nick\" will be expanded to \"@<source.nick source.url>\".", "id": "f14600:m2"}
{"signature": "@property<EOL><INDENT>def relative_datetime(self):<DEDENT>", "body": "now = datetime.now(timezone.utc)<EOL>tense = \"<STR_LIT>\" if self.created_at > now else \"<STR_LIT>\"<EOL>return \"<STR_LIT>\".format(humanize.naturaldelta(now - self.created_at), tense)<EOL>", "docstring": "Return human-readable relative time string.", "id": "f14604:c0:m8"}
{"signature": "def __get_live_version(self):", "body": "try:<EOL><INDENT>import versiontools<EOL><DEDENT>except ImportError:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return str(versiontools.Version.from_expression(self.name))<EOL><DEDENT>", "docstring": "Get a live version string using versiontools", "id": "f14609:c0:m1"}
{"signature": "def fit(self, X, y=None):", "body": "X = check_array(X)<EOL>if self.scale_by_median:<EOL><INDENT>self.median_ = np.median(X[np.triu_indices_from(X, k=<NUM_LIT:1>)],<EOL>overwrite_input=True)<EOL><DEDENT>elif hasattr(self, '<STR_LIT>'):<EOL><INDENT>del self.median_<EOL><DEDENT>return self<EOL>", "docstring": "If scale_by_median, find :attr:`median_`; otherwise, do nothing.\n\nParameters\n----------\nX : array\n    The raw pairwise distances.", "id": "f14611:c3:m1"}
{"signature": "def transform(self, X):", "body": "n = self.flip_.shape[<NUM_LIT:0>]<EOL>if X.ndim != <NUM_LIT:2> or X.shape[<NUM_LIT:1>] != n:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise TypeError(msg.format(self.flip_.shape[<NUM_LIT:0>]))<EOL><DEDENT>return np.dot(X, self.flip_)<EOL>", "docstring": "Transforms X according to the linear transformation corresponding to\nflipping the input eigenvalues.\n\nParameters\n----------\nX : array, shape [n_test, n]\n    The test similarities to training points.\n\nReturns\n-------\nXt : array, shape [n_test, n]\n    The transformed test similarites to training points.", "id": "f14611:c5:m2"}
{"signature": "def fit(self, X, y=None):", "body": "n = X.shape[<NUM_LIT:0>]<EOL>if X.shape != (n, n):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>self.train_ = X<EOL>memory = get_memory(self.memory)<EOL>lo, = memory.cache(scipy.linalg.eigvalsh)(X, eigvals=(<NUM_LIT:0>, <NUM_LIT:0>))<EOL>self.shift_ = max(self.min_eig - lo, <NUM_LIT:0>)<EOL>return self<EOL>", "docstring": "Learn the transformation to shifted eigenvalues. Only depends\non the input dimension.\n\nParameters\n----------\nX : array, shape [n, n]\n    The *symmetric* input similarities.", "id": "f14611:c6:m1"}
{"signature": "def transform(self, X):", "body": "n = self.train_.shape[<NUM_LIT:0>]<EOL>if X.ndim != <NUM_LIT:2> or X.shape[<NUM_LIT:1>] != n:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise TypeError(msg.format(n))<EOL><DEDENT>if self.copy:<EOL><INDENT>X = X.copy()<EOL><DEDENT>if self.shift_ != <NUM_LIT:0> and X is self.train_ or (<EOL>X.shape == self.train_.shape and np.allclose(X, self.train_)):<EOL><INDENT>X[xrange(n), xrange(n)] += self.shift_<EOL><DEDENT>return X<EOL>", "docstring": "Transforms X according to the linear transformation corresponding to\nshifting the input eigenvalues to all be at least ``self.min_eig``.\n\nParameters\n----------\nX : array, shape [n_test, n]\n    The test similarities to training points.\n\nReturns\n-------\nXt : array, shape [n_test, n]\n    The transformed test similarites to training points. Only different\n    from X if X is the training data.", "id": "f14611:c6:m2"}
{"signature": "def fit_transform(self, X, y=None):", "body": "n = X.shape[<NUM_LIT:0>]<EOL>if X.shape != (n, n):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>memory = get_memory(self.memory)<EOL>discard_X = not self.copy and self.negatives_likely<EOL>vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['<STR_LIT>'])(<EOL>X, overwrite_a=discard_X)<EOL>vals = vals[:, None]<EOL>self.clip_ = np.dot(vecs, np.sign(vals) * vecs.T)<EOL>if discard_X or vals[<NUM_LIT:0>, <NUM_LIT:0>] < <NUM_LIT:0>:<EOL><INDENT>del X<EOL>np.abs(vals, out=vals)<EOL>X = np.dot(vecs, vals * vecs.T)<EOL>del vals, vecs<EOL>X = Symmetrize(copy=False).fit_transform(X)<EOL><DEDENT>return X<EOL>", "docstring": "Flips the negative eigenvalues of X.\n\nParameters\n----------\nX : array, shape [n, n]\n    The *symmetric* input similarities. If X is asymmetric, it will be\n    treated as if it were symmetric based on its lower-triangular part.\n\nReturns\n-------\nXt : array, shape [n, n]\n    The transformed training similarities.", "id": "f14611:c5:m3"}
{"signature": "def transform(self, X):", "body": "n = self.clip_.shape[<NUM_LIT:0>]<EOL>if X.ndim != <NUM_LIT:2> or X.shape[<NUM_LIT:1>] != n:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise TypeError(msg.format(self.clip_.shape[<NUM_LIT:0>]))<EOL><DEDENT>return np.dot(X, self.clip_)<EOL>", "docstring": "Transforms X according to the linear transformation corresponding to\nclipping the input eigenvalues.\n\nParameters\n----------\nX : array, shape [n_test, n]\n    The test similarities to training points.\n\nReturns\n-------\nXt : array, shape [n_test, n]\n    The transformed test similarites to training points.", "id": "f14611:c4:m2"}
{"signature": "def fit(self, X, y=None):", "body": "self.features_ = as_features(X, stack=True, bare=True)<EOL>return self<EOL>", "docstring": "Specify the data to which kernel values should be computed.\n\nParameters\n----------\nX : list of arrays or :class:`skl_groups.features.Features`\n    The bags to compute \"to\".", "id": "f14613:c0:m2"}
{"signature": "def transform(self, X):", "body": "X = as_features(X, stack=True, bare=True)<EOL>Y = self.features_<EOL>if X.dim != Y.dim:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>.format(X.dim, Y.dim))<EOL><DEDENT>pointwise = pairwise_kernels(X.stacked_features, Y.stacked_features,<EOL>metric=self.kernel,<EOL>filter_params=True,<EOL>**self._get_kernel_params())<EOL>K = np.empty((len(X), len(Y)))<EOL>for i in range(len(X)):<EOL><INDENT>for j in range(len(Y)):<EOL><INDENT>K[i, j] = pointwise[X._boundaries[i]:X._boundaries[i+<NUM_LIT:1>],<EOL>Y._boundaries[j]:Y._boundaries[j+<NUM_LIT:1>]].mean()<EOL><DEDENT><DEDENT>return K<EOL>", "docstring": "Compute kernels from X to :attr:`features_`.\n\nParameters\n----------\nX : list of arrays or :class:`skl_groups.features.Features`\n    The bags to compute \"from\". Must have same dimension as\n    :attr:`features_`.\n\nReturns\n-------\nK : array of shape ``[len(X), len(features_)]``\n    The kernel evaluations from X to :attr:`features_`.", "id": "f14613:c0:m3"}
{"signature": "def transform(self, X, **params):", "body": "X = as_features(X, stack=True)<EOL>X_new = self.transformer.transform(X.stacked_features, **params)<EOL>return self._gather_outputs(X, X_new)<EOL>", "docstring": "Transform the stacked points.\n\nParameters\n----------\nX : :class:`Features` or list of bag feature arrays\n    New data to transform.\n\nany other keyword argument :\n    Passed on as keyword arguments to the transformer's ``transform()``.\n\nReturns\n-------\nX_new : :class:`Features`\n    Transformed features.", "id": "f14614:c0:m3"}
{"signature": "def transform(self, X):", "body": "X = check_array(X, copy=self.copy)<EOL>X *= self.scale_<EOL>X += self.min_<EOL>if self.truncate:<EOL><INDENT>np.maximum(self.feature_range[<NUM_LIT:0>], X, out=X)<EOL>np.minimum(self.feature_range[<NUM_LIT:1>], X, out=X)<EOL><DEDENT>return X<EOL>", "docstring": "Scaling features of X according to feature_range.\n\n        Parameters\n        ----------\n        X : array-like with shape [n_samples, n_features]\n            Input data that will be transformed.", "id": "f14614:c2:m2"}
{"signature": "def quadratic(Ks, dim, rhos, required=None):", "body": "<EOL>N = rhos.shape[<NUM_LIT:0>]<EOL>Ks = np.asarray(Ks)<EOL>Bs = (Ks - <NUM_LIT:1>) / np.pi ** (dim / <NUM_LIT:2>) * gamma(dim / <NUM_LIT:2> + <NUM_LIT:1>)  <EOL>est = Bs / (N - <NUM_LIT:1>) * np.mean(rhos ** (-dim), axis=<NUM_LIT:0>)<EOL>return est<EOL>", "docstring": "r'''\n    Estimates \\int p^2 based on kNN distances.\n\n    In here because it's used in the l2 distance, above.\n\n    Returns array of shape (num_Ks,).", "id": "f14617:m16"}
{"signature": "def alpha_div(alphas, Ks, dim, num_q, rhos, nus):", "body": "return _get_alpha_div(alphas, Ks, dim)(num_q, rhos, nus)<EOL>", "docstring": "r'''\n    Estimate the alpha divergence between distributions:\n        \\int p^\\alpha q^(1-\\alpha)\n    based on kNN distances.\n\n    Used in Renyi, Hellinger, Bhattacharyya, Tsallis divergences.\n\n    Enforces that estimates are >= 0.\n\n    Returns divergence estimates with shape (num_alphas, num_Ks).", "id": "f14617:m7"}
{"signature": "def renyi(alphas, Ks, dim, required, min_val=np.spacing(<NUM_LIT:1>),<EOL>clamp=True, to_self=False):", "body": "alphas = np.reshape(alphas, (-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>est = required<EOL>est = np.maximum(est, min_val)  <EOL>np.log(est, out=est)<EOL>est /= alphas - <NUM_LIT:1><EOL>if clamp:<EOL><INDENT>np.maximum(est, <NUM_LIT:0>, out=est)<EOL><DEDENT>return est<EOL>", "docstring": "r'''\n    Estimate the Renyi-alpha divergence between distributions, based on kNN\n    distances:  1/(\\alpha-1) \\log \\int p^alpha q^(1-\\alpha)\n\n    If the inner integral is less than min_val (default ``np.spacing(1)``),\n    uses the log of min_val instead.\n\n    If clamp (the default), enforces that the estimates are nonnegative by\n    replacing any negative estimates with 0.\n\n    Returns an array of shape (num_alphas, num_Ks).", "id": "f14617:m13"}
{"signature": "def hellinger(Ks, dim, required, clamp=True, to_self=False):", "body": "bc = required<EOL>est = <NUM_LIT:1> - bc<EOL>np.maximum(est, <NUM_LIT:0>, out=est)<EOL>if clamp:<EOL><INDENT>np.minimum(est, <NUM_LIT:1>, out=est)<EOL><DEDENT>np.sqrt(est, out=est)<EOL>return est<EOL>", "docstring": "r'''\n    Estimate the Hellinger distance between distributions, based on kNN\n    distances:  \\sqrt{1 - \\int \\sqrt{p q}}\n\n    Always enforces 0 <= H, to be able to sqrt; if clamp, also enforces\n    H <= 1.\n\n    Returns a vector: one element for each K.", "id": "f14617:m12"}
{"signature": "def transform(self, X):", "body": "X = as_features(X, stack=True, bare=True)<EOL>Y = self.features_<EOL>Ks = np.asarray(self.Ks)<EOL>if X.dim != Y.dim:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg.format(Y.dim, X.dim))<EOL><DEDENT>memory = self.memory<EOL>if isinstance(memory, string_types):<EOL><INDENT>memory = Memory(cachedir=memory, verbose=<NUM_LIT:0>)<EOL><DEDENT>est = memory.cache(_est_divs, ignore=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>output, self.rhos_ = est(<EOL>X, Y, self.indices_, getattr(self, '<STR_LIT>', None),<EOL>self.div_funcs, Ks,<EOL>self.do_sym, self.clamp, self.version, self.min_dist,<EOL>self._flann_args(), self._n_jobs)<EOL>return output<EOL>", "docstring": "r'''\n        Computes the divergences from X to :attr:`features_`.\n\n        Parameters\n        ----------\n        X : list of bag feature arrays or :class:`skl_groups.features.Features`\n            The bags to search \"from\".\n\n        Returns\n        -------\n        divs : array of shape ``[len(div_funcs), len(Ks), len(X), len(features_)] + ([2] if do_sym else [])``\n            The divergences from X to :attr:`features_`.\n            ``divs[d, k, i, j]`` is the ``div_funcs[d]`` divergence\n            from ``X[i]`` to ``fetaures_[j]`` using a K of ``Ks[k]``.\n            If ``do_sym``, ``divs[d, k, i, j, 0]`` is\n            :math:`D_{d,k}( X_i \\| \\texttt{features_}_j)` and\n            ``divs[d, k, i, j, 1]`` is :math:`D_{d,k}(\\texttt{features_}_j \\| X_i)`.", "id": "f14617:c0:m5"}
{"signature": "def _parse_specs(specs, Ks):", "body": "funcs = {}<EOL>metas = {}<EOL>meta_deps = defaultdict(set)<EOL>def add_func(func, alpha=None, pos=None):<EOL><INDENT>needs_alpha = getattr(func, '<STR_LIT>', False)<EOL>is_meta = hasattr(func, '<STR_LIT>')<EOL>d = metas if is_meta else funcs<EOL>if func not in d:<EOL><INDENT>if needs_alpha:<EOL><INDENT>args = {'<STR_LIT>': [alpha], '<STR_LIT>': [pos]}<EOL><DEDENT>else:<EOL><INDENT>args = {'<STR_LIT>': None, '<STR_LIT>': [pos]}<EOL><DEDENT>if not is_meta:<EOL><INDENT>d[func] = _FuncInfo(**args)<EOL><DEDENT>else:<EOL><INDENT>d[func] = _MetaFuncInfo(deps=[], **args)<EOL>for req in func.needs_results:<EOL><INDENT>if callable(req.alpha):<EOL><INDENT>req_alpha = req.alpha(alpha)<EOL><DEDENT>else:<EOL><INDENT>req_alpha = req.alpha<EOL><DEDENT>add_func(req.func, alpha=req_alpha)<EOL>meta_deps[func].add(req.func)<EOL>meta_deps[req.func]  <EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>info = d[func]<EOL>if not needs_alpha:<EOL><INDENT>if pos is not None:<EOL><INDENT>if info.pos != [None]:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg.format(func_name))<EOL><DEDENT>info.pos[<NUM_LIT:0>] = pos<EOL><DEDENT><DEDENT>else:  <EOL><INDENT>try:<EOL><INDENT>idx = info.alphas.index(alpha)<EOL><DEDENT>except ValueError:<EOL><INDENT>info.alphas.append(alpha)<EOL>info.pos.append(pos)<EOL>if is_meta:<EOL><INDENT>for req in func.needs_results:<EOL><INDENT>if callable(req.alpha):<EOL><INDENT>req_alpha = req.alpha(alpha)<EOL><DEDENT>else:<EOL><INDENT>req_alpha = req.alpha<EOL><DEDENT>add_func(req.func, alpha=req_alpha)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if pos is not None:<EOL><INDENT>if info.pos[idx] is not None:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg.format(func_name, alpha))<EOL><DEDENT>info.pos[idx] = pos<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>for i, spec in enumerate(specs):<EOL><INDENT>func_name, alpha = (spec.split('<STR_LIT::>', <NUM_LIT:1>) + [None])[:<NUM_LIT:2>]<EOL>if alpha is not None:<EOL><INDENT>alpha = float(alpha)<EOL><DEDENT>try:<EOL><INDENT>func = func_mapping[func_name]<EOL><DEDENT>except KeyError:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg.format(func_name))<EOL><DEDENT>needs_alpha = getattr(func, '<STR_LIT>', False)<EOL>if needs_alpha and alpha is None:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg.format(func_name, spec))<EOL><DEDENT>elif not needs_alpha and alpha is not None:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>raise ValueError(msg.format(func_name, spec))<EOL><DEDENT>add_func(func, alpha, i)<EOL><DEDENT>meta_counter = itertools.count(-<NUM_LIT:1>, step=-<NUM_LIT:1>)<EOL>for info in itertools.chain(itervalues(funcs), itervalues(metas)):<EOL><INDENT>for i, pos in enumerate(info.pos):<EOL><INDENT>if pos is None:<EOL><INDENT>info.pos[i] = next(meta_counter)<EOL><DEDENT><DEDENT><DEDENT>for func, info in iteritems(metas):<EOL><INDENT>deps = info.deps<EOL>assert deps == []<EOL>for req in func.needs_results:<EOL><INDENT>f = req.func<EOL>req_info = (metas if hasattr(f, '<STR_LIT>') else funcs)[f]<EOL>if req.alpha is not None:<EOL><INDENT>if callable(req.alpha):<EOL><INDENT>req_alpha = req.alpha(info.alphas)<EOL><DEDENT>else:<EOL><INDENT>req_alpha = req.alpha<EOL><DEDENT>find_alpha = np.vectorize(req_info.alphas.index, otypes=[int])<EOL>pos = np.asarray(req_info.pos)[find_alpha(req_alpha)]<EOL>if np.isscalar(pos):<EOL><INDENT>deps.append(pos[()])<EOL><DEDENT>else:<EOL><INDENT>deps.extend(pos)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pos, = req_info.pos<EOL>deps.append(pos)<EOL><DEDENT><DEDENT><DEDENT>meta_order = topological_sort(meta_deps)<EOL>metas_ordered = OrderedDict(<EOL>(f, metas[f]) for f in meta_order if hasattr(f, '<STR_LIT>'))<EOL>return funcs, metas_ordered, -next(meta_counter) - <NUM_LIT:1><EOL>", "docstring": "Set up the different functions we need to call.\n\nReturns:\n    - a dict mapping base estimator functions to _FuncInfo objects.\n      If the function needs_alpha, then the alphas attribute is an array\n      of alpha values and pos is a corresponding array of indices.\n      Otherwise, alphas is None and pos is a list containing a single index.\n      Indices are >= 0 if they correspond to something in a spec,\n      and negative if they're just used for a meta estimator but not\n      directly requested.\n    - an OrderedDict mapping functions to _MetaFuncInfo objects.\n      alphas and pos are like for _FuncInfo; deps is a list of indices\n      which should be passed to the estimator. Note that these might be\n      other meta functions; this list is guaranteed to be in an order\n      such that all dependencies are resolved before calling that function.\n      If no such order is possible, raise ValueError.\n    - the number of meta-only results\n\n# TODO: update doctests for _parse_specs\n\n>>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9'])\n({<function alpha_div at 0x10954f848>:\n        _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3])},\n OrderedDict([\n    (<function hellinger at 0x10954fc80>,\n        _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])),\n    (<function renyi at 0x10954fcf8>,\n        _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3]))\n ]), 3)\n\n>>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2'])\n({<function alpha_div at 0x10954f848>:\n    _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]),\n  <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[-4])\n }, OrderedDict([\n    (<function hellinger at 0x10954fc80>,\n        _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])),\n    (<function l2 at 0x10954fde8>,\n        _MetaFuncInfo(alphas=None, pos=[3], deps=[-4])),\n    (<function renyi at 0x10954fcf8>,\n        _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3]))\n ]), 4)\n\n>>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2', 'linear'])\n({<function alpha_div at 0x10954f848>:\n    _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]),\n  <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[4])\n }, OrderedDict([\n    (<function hellinger at 0x10954fc80>,\n        _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])),\n    (<function l2 at 0x10954fde8>,\n        _MetaFuncInfo(alphas=None, pos=[3], deps=[4])),\n    (<function renyi at 0x10954fcf8>,\n        _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3]))\n ]), 3)", "id": "f14617:m19"}
{"signature": "def l2(Ks, dim, X_rhos, Y_rhos, required, clamp=True, to_self=False):", "body": "n_X = len(X_rhos)<EOL>n_Y = len(Y_rhos)<EOL>linears = required<EOL>assert linears.shape == (<NUM_LIT:1>, Ks.size, n_X, n_Y, <NUM_LIT:2>)<EOL>X_quadratics = np.empty((Ks.size, n_X), dtype=np.float32)<EOL>for i, rho in enumerate(X_rhos):<EOL><INDENT>X_quadratics[:, i] = quadratic(Ks, dim, rho)<EOL><DEDENT>Y_quadratics = np.empty((Ks.size, n_Y), dtype=np.float32)<EOL>for j, rho in enumerate(Y_rhos):<EOL><INDENT>Y_quadratics[:, j] = quadratic(Ks, dim, rho)<EOL><DEDENT>est = -linears.sum(axis=<NUM_LIT:4>)<EOL>est += X_quadratics[None, :, :, None]<EOL>est += Y_quadratics[None, :, None, :]<EOL>np.maximum(est, <NUM_LIT:0>, out=est)<EOL>np.sqrt(est, out=est)<EOL>if to_self:<EOL><INDENT>est[:, :, xrange(n_X), xrange(n_Y)] = <NUM_LIT:0><EOL><DEDENT>return est[:, :, :, :, None]<EOL>", "docstring": "r'''\n    Estimates the L2 distance between distributions, via\n        \\int (p - q)^2 = \\int p^2 - \\int p q - \\int q p + \\int q^2.\n\n    \\int pq and \\int qp are estimated with the linear function (in both\n    directions), while \\int p^2 and \\int q^2 are estimated via the quadratic\n    function below.\n\n    Always clamps negative estimates of l2^2 to 0, because otherwise the sqrt\n    would break.", "id": "f14617:m15"}
{"signature": "def jensen_shannon(Ks, dim, X_rhos, Y_rhos, required,<EOL>clamp=True, to_self=False):", "body": "X_ns = np.array([rho.shape[<NUM_LIT:0>] for rho in X_rhos])<EOL>Y_ns = np.array([rho.shape[<NUM_LIT:0>] for rho in Y_rhos])<EOL>n_X = X_ns.size<EOL>n_Y = Y_ns.size<EOL>cores = required<EOL>assert cores.shape == (<NUM_LIT:1>, Ks.size, n_X, n_Y, <NUM_LIT:2>)<EOL>X_bits = np.empty((Ks.size, n_X), dtype=np.float32)<EOL>for i, rho in enumerate(X_rhos):<EOL><INDENT>X_bits[:, i] = dim * np.mean(np.log(rho), axis=<NUM_LIT:0>)<EOL><DEDENT>X_bits += np.log(X_ns - <NUM_LIT:1>)[np.newaxis, :]<EOL>Y_bits = np.empty((Ks.size, n_Y), dtype=np.float32)<EOL>for j, rho in enumerate(Y_rhos):<EOL><INDENT>Y_bits[:, j] = dim * np.mean(np.log(rho), axis=<NUM_LIT:0>)<EOL><DEDENT>Y_bits += np.log(Y_ns - <NUM_LIT:1>)[np.newaxis, :]<EOL>est = cores.sum(axis=<NUM_LIT:4>)<EOL>est -= X_bits.reshape(<NUM_LIT:1>, Ks.size, n_X, <NUM_LIT:1>)<EOL>est -= Y_bits.reshape(<NUM_LIT:1>, Ks.size, <NUM_LIT:1>, n_Y)<EOL>est /= <NUM_LIT:2><EOL>est += np.log(-<NUM_LIT:1> + X_ns[None, None, :, None] + Y_ns[None, None, None, :])<EOL>est += psi(Ks)[None, :, None, None]<EOL>if to_self:<EOL><INDENT>est[:, :, xrange(n_X), xrange(n_Y)] = <NUM_LIT:0><EOL><DEDENT>if clamp:  <EOL><INDENT>np.maximum(<NUM_LIT:0>, est, out=est)<EOL>np.minimum(np.log(<NUM_LIT:2>), est, out=est)<EOL><DEDENT>return est[:, :, :, :, None]<EOL>", "docstring": "r'''\n    Estimate the difference between the Shannon entropy of an equally-weighted\n    mixture between X and Y and the mixture of the Shannon entropies:\n\n        JS(X, Y) = H[ (X + Y) / 2 ] - (H[X] + H[Y]) / 2\n\n    We use a special case of the Hino-Murata weighted information estimator with\n    a fixed M = n \\alpha, about equivalent to the K-nearest-neighbor approach\n    used for the other estimators:\n\n        Hideitsu Hino and Noboru Murata (2013).\n        Information estimators for weighted observations. Neural Networks.\n        http://linkinghub.elsevier.com/retrieve/pii/S0893608013001676\n\n\n    The estimator for JS(X, Y) is:\n\n        log volume of the unit ball - log M + log(n + m - 1) + digamma(M)\n        + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i\n                                with no more than M/(n+m-1) weight\n                                where X points have weight 1 / (2 n - 1)\n                                  and Y points have weight n / (m (2 n - 1))\n                      - digamma(# of neighbors in that ball) )\n        + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i\n                                with no more than M/(n+m-1) weight\n                                where X points have weight m / (n (2 m - 1))\n                                  and Y points have weight 1 / (2 m - 1)\n                      - digamma(# of neighbors in that ball) )\n\n        - 1/2 (log volume of the unit ball - log M + log(n - 1) + digamma(M))\n        - 1/2 mean_X( d * log radius of the largest ball in X around X_i\n                                with no more than M/(n-1) weight\n                                where X points have weight 1 / (n - 1))\n                      - digamma(# of neighbors in that ball) )\n\n        - 1/2 (log volume of the unit ball - log M + log(m - 1) + digamma(M))\n        - 1/2 mean_Y( d * log radius of the largest ball in Y around Y_i\n                                with no more than M/(n-1) weight\n                                where X points have weight 1 / (m - 1))\n                      - digamma(# of neighbors in that ball) )\n\n        =\n\n        log(n + m - 1) + digamma(M)\n        + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i\n                                with no more than M/(n+m-1) weight\n                                where X points have weight 1 / (2 n - 1)\n                                  and Y points have weight n / (m (2 n - 1))\n                      - digamma(# of neighbors in that ball) )\n        + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i\n                                with no more than M/(n+m-1) weight\n                                where X points have weight m / (n (2 m - 1))\n                                  and Y points have weight 1 / (2 m - 1)\n                      - digamma(# of neighbors in that ball) )\n        - 1/2 [log(n-1) + mean_X( d * log rho_M(X_i) )]\n        - 1/2 [log(m-1) + mean_Y( d * log rho_M(Y_i) )]", "id": "f14617:m17"}
{"signature": "def linear(Ks, dim, num_q, rhos, nus):", "body": "return _get_linear(Ks, dim)(num_q, rhos, nus)<EOL>", "docstring": "r'''\n    Estimates the linear inner product \\int p q between two distributions,\n    based on kNN distances.", "id": "f14617:m5"}
{"signature": "def jensen_shannon_core(Ks, dim, num_q, rhos, nus):", "body": "ns = np.array([rhos.shape[<NUM_LIT:0>], num_q])<EOL>return _get_jensen_shannon_core(Ks, dim, ns)[<NUM_LIT:0>](num_q, rhos, nus)<EOL>", "docstring": "r'''\n    Estimates\n          1/2 mean_X( d * log radius of largest ball in X+Y around X_i\n                                with no more than M/(n+m-1) weight\n                                where X points have weight 1 / (2 n - 1)\n                                  and Y points have weight n / (m (2 n - 1))\n                      - digamma(# of neighbors in that ball))\n\n    This is the core pairwise component of the estimator of Jensen-Shannon\n    divergence based on the Hino-Murata weighted information estimator. See\n    the docstring for jensen_shannon for an explanation.", "id": "f14617:m9"}
{"signature": "@property<EOL><INDENT>def dim_(self):<DEDENT>", "body": "self._check_fitted()<EOL>return self.inds_.shape[<NUM_LIT:1>]<EOL>", "docstring": "The dimension of the inputs, once fitted.", "id": "f14626:c0:m2"}
{"signature": "def fit(self, X, y=None):", "body": "return self<EOL>", "docstring": "Do nothing; this is a stateless transformer.", "id": "f14627:c0:m1"}
{"signature": "def transform(self, X):", "body": "X = as_features(X)<EOL>return np.vstack([np.mean(bag, axis=<NUM_LIT:0>) for bag in X])<EOL>", "docstring": "Transform a list of bag features into a matrix of its mean features.\n\nParameters\n----------\nX : :class:`skl_groups.features.Features` or list of bag feature arrays\n    Data to transform.\n\nReturns\n-------\nX_new : array, shape ``[len(X), X.dim]``\n    X transformed into its means.", "id": "f14627:c0:m2"}
{"signature": "def transform(self, X):", "body": "self._check_fitted()<EOL>X = as_features(X, stack=True)<EOL>assignments = self.kmeans_fit_.predict(X.stacked_features)<EOL>return self._group_assignments(X, assignments)<EOL>", "docstring": "Transform a list of bag features into its bag-of-words representation.\n\nParameters\n----------\nX : :class:`skl_groups.features.Features` or list of bag feature arrays\n    New data to transform.\n\nReturns\n-------\nX_new : integer array, shape [len(X), kmeans.n_clusters]\n    X transformed into the new space.", "id": "f14628:c0:m6"}
{"signature": "def fit_transform(self, X):", "body": "X = as_features(X, stack=True)<EOL>self.kmeans_fit_ = copy(self.kmeans)<EOL>assignments = self.kmeans_fit_.fit_predict(X.stacked_features) <EOL>return self._group_assignments(X, assignments)<EOL>", "docstring": "Compute clustering and transform a list of bag features into its\nbag-of-words representation. Like calling fit(X) and then transform(X),\nbut more efficient.\n\nParameters\n----------\nX : :class:`skl_groups.features.Features` or list of bag feature arrays\n    New data to transform.\n\nReturns\n-------\nX_new : integer array, shape [len(X), kmeans.n_clusters]\n    X transformed into the new space.", "id": "f14628:c0:m7"}
{"signature": "def fit(self, X, y=None):", "body": "self.kmeans_fit_ = copy(self.kmeans)<EOL>X = as_features(X, stack=True)<EOL>self.kmeans_fit_.fit(X.stacked_features) <EOL>return self<EOL>", "docstring": "Choose the codewords based on a training set.\n\nParameters\n----------\nX : :class:`skl_groups.features.Features` or list of arrays of shape ``[n_samples[i], n_features]``\n    Training set. If a Features object, it will be stacked.", "id": "f14628:c0:m5"}
{"signature": "@property<EOL><INDENT>def codewords_(self):<DEDENT>", "body": "self._check_fitted()<EOL>return self.kmeans_fit_.cluster_centers_<EOL>", "docstring": "The selected codewords; shape [n_codewords, n_features].", "id": "f14628:c0:m3"}
{"signature": "def update(self, idx):", "body": "<EOL>self.logger.info('<STR_LIT>'.format(idx))<EOL>", "docstring": "Update the current state.\n\nParameters\n----------\nidx : int\n    The current state through the process.", "id": "f14629:c1:m2"}
{"signature": "def is_integer(x):", "body": "return np.isscalar(x) and is_integer_type(x)<EOL>", "docstring": "Checks whether the argument is a single integer.", "id": "f14629:m3"}
{"signature": "def finish(self):", "body": "self.logger.info(json.dumps(['<STR_LIT>']))<EOL>", "docstring": "Marks the process as done.", "id": "f14629:c1:m3"}
{"signature": "def is_categorical(x):", "body": "return np.isscalar(x) and is_categorical_type(x)<EOL>", "docstring": "Checks whether the argument is a single integer or boolean.", "id": "f14629:m4"}
{"signature": "@property<EOL><INDENT>def dtype(self):<DEDENT>", "body": "return self.features[<NUM_LIT:0>].dtype<EOL>", "docstring": "The data type of the feature vectors.", "id": "f14630:c0:m4"}
{"signature": "def bare(self):", "body": "if not self.meta:<EOL><INDENT>return self<EOL><DEDENT>elif self.stacked:<EOL><INDENT>return Features(self.stacked_features, self.n_pts, copy=False)<EOL><DEDENT>else:<EOL><INDENT>return Features(self.features, copy=False)<EOL><DEDENT>", "docstring": "Make a Features object with no metadata; points to the same features.", "id": "f14630:c0:m18"}
{"signature": "def make_stacked(self):", "body": "if self.stacked:<EOL><INDENT>return<EOL><DEDENT>self._boundaries = bounds = np.r_[<NUM_LIT:0>, np.cumsum(self.n_pts)]<EOL>self.stacked_features = stacked = np.vstack(self.features)<EOL>self.features = np.array(<EOL>[stacked[bounds[i-<NUM_LIT:1>]:bounds[i]] for i in xrange(<NUM_LIT:1>, len(bounds))],<EOL>dtype=object)<EOL>self.stacked = True<EOL>", "docstring": "If unstacked, convert to stacked. If stacked, do nothing.", "id": "f14630:c0:m1"}
{"signature": "def _lstree(files, dirs):", "body": "for f, sha1 in files:<EOL><INDENT>yield \"<STR_LIT>\".format(sha1, f)<EOL><DEDENT>for d, sha1 in dirs:<EOL><INDENT>yield \"<STR_LIT>\".format(sha1, d)<EOL><DEDENT>", "docstring": "Make git ls-tree like output.", "id": "f14631:m1"}
{"signature": "def hash_dir(path):", "body": "dir_hash = {}<EOL>for root, dirs, files in os.walk(path, topdown=False):<EOL><INDENT>f_hash = ((f, hash_file(join(root, f))) for f in files)<EOL>d_hash = ((d, dir_hash[join(root, d)]) for d in dirs)<EOL>dir_hash[join(*split(root))] = _mktree(f_hash, d_hash)<EOL><DEDENT>return dir_hash[path]<EOL>", "docstring": "Write directory at path to Git index, return its SHA1 as a string.", "id": "f14631:m3"}
{"signature": "def email_subject(words_quantity=<NUM_LIT:4>):", "body": "return lorem_ipsum.title(words_quantity=words_quantity)<EOL>", "docstring": "An alias for lorem_ipsum.title(words_quantity)", "id": "f14649:m4"}
{"signature": "def user_name(with_num=False):", "body": "result = first_name()<EOL>if with_num:<EOL><INDENT>result += str(random.randint(<NUM_LIT>, <NUM_LIT>))<EOL><DEDENT>return result.lower()<EOL>", "docstring": "Return a random user name.\n\n    Basically it's lowercased result of\n    :py:func:`~forgery_py.forgery.name.first_name()` with a number appended\n    if `with_num`.", "id": "f14649:m0"}
{"signature": "def domain_name():", "body": "result = random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>result += '<STR_LIT:.>' + top_level_domain()<EOL>return result.lower()<EOL>", "docstring": "Return a random domain name.\n\n    Lowercased result of :py:func:`~forgery_py.forgery.name.company_name()`\n    plus :py:func:`~top_level_domain()`.", "id": "f14649:m2"}
{"signature": "def _py2_ip_v6():", "body": "<EOL>magnitude = <NUM_LIT:16> ** <NUM_LIT:4><EOL>return \"<STR_LIT::>\".join((\"<STR_LIT>\" % random.randint(<NUM_LIT:0>, magnitude) for _ in range(<NUM_LIT:8>)))<EOL>", "docstring": "Return a random IPv6 address for Python versions prior to 3.3.", "id": "f14649:m7"}
{"signature": "def title(words_quantity=<NUM_LIT:4>):", "body": "result = words(quantity=words_quantity)<EOL>result += random.choice('<STR_LIT>')<EOL>return result.capitalize()<EOL>", "docstring": "Return a random sentence to be used as e.g. an e-mail subject.", "id": "f14650:m2"}
{"signature": "def words(quantity=<NUM_LIT:10>, as_list=False):", "body": "global _words<EOL>if not _words:<EOL><INDENT>_words = '<STR_LIT:U+0020>'.join(get_dictionary('<STR_LIT>')).lower().replace('<STR_LIT:\\n>', '<STR_LIT>')<EOL>_words = re.sub(r'<STR_LIT>', '<STR_LIT>', _words)<EOL>_words = _words.split('<STR_LIT:U+0020>')<EOL><DEDENT>result = random.sample(_words, quantity)<EOL>if as_list:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT:U+0020>'.join(result)<EOL><DEDENT>", "docstring": "Return random words.", "id": "f14650:m1"}
{"signature": "def _to_lower_alpha_only(s):", "body": "s = re.sub(r'<STR_LIT:\\n>', '<STR_LIT:U+0020>',  s.lower())<EOL>return re.sub(r'<STR_LIT>', '<STR_LIT>', s)<EOL>", "docstring": "Return a lowercased string with non alphabetic chars removed.\n\n    White spaces are not to be removed.", "id": "f14650:m7"}
{"signature": "def paragraphs(quantity=<NUM_LIT:2>, separator='<STR_LIT>', wrap_start='<STR_LIT>', wrap_end='<STR_LIT>',<EOL>html=False, sentences_quantity=<NUM_LIT:3>, as_list=False):", "body": "if html:<EOL><INDENT>wrap_start = '<STR_LIT>'<EOL>wrap_end = '<STR_LIT>'<EOL>separator = '<STR_LIT>'<EOL><DEDENT>result = []<EOL>try:<EOL><INDENT>for _ in xrange(<NUM_LIT:0>, quantity):<EOL><INDENT>result.append(wrap_start +<EOL>sentences(sentences_quantity) +<EOL>wrap_end)<EOL><DEDENT><DEDENT>except NameError:<EOL><INDENT>for _ in range(<NUM_LIT:0>, quantity):<EOL><INDENT>result.append(wrap_start +<EOL>sentences(sentences_quantity) +<EOL>wrap_end)<EOL><DEDENT><DEDENT>if as_list:<EOL><INDENT>return result<EOL><DEDENT>else:<EOL><INDENT>return separator.join(result)<EOL><DEDENT>", "docstring": "Return random paragraphs.", "id": "f14650:m6"}
{"signature": "def paragraph(separator='<STR_LIT>', wrap_start='<STR_LIT>', wrap_end='<STR_LIT>',<EOL>html=False, sentences_quantity=<NUM_LIT:3>):", "body": "return paragraphs(quantity=<NUM_LIT:1>, separator=separator, wrap_start=wrap_start,<EOL>wrap_end=wrap_end, html=html,<EOL>sentences_quantity=sentences_quantity)<EOL>", "docstring": "Return a random paragraph.", "id": "f14650:m5"}
{"signature": "def character():", "body": "return characters(quantity=<NUM_LIT:1>)<EOL>", "docstring": "Return a random character.", "id": "f14650:m9"}
{"signature": "def location():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random location name, e.g. ``MI6 Headquarters``.", "id": "f14651:m10"}
{"signature": "def company_name():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random company name.", "id": "f14651:m5"}
{"signature": "def job_title():", "body": "result = random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>result = result.replace('<STR_LIT>', job_title_suffix())<EOL>return result<EOL>", "docstring": "Return a random job title.", "id": "f14651:m6"}
{"signature": "def female_first_name():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random female first name.", "id": "f14651:m4"}
{"signature": "def suffix():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random name suffix, e.g. ``Jr``.", "id": "f14651:m9"}
{"signature": "def male_first_name():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random male first name.", "id": "f14651:m3"}
{"signature": "def day(month_length=<NUM_LIT>):", "body": "return random.randint(<NUM_LIT:1>, month_length)<EOL>", "docstring": "Return a random day number in a `month_length` days long month.", "id": "f14652:m4"}
{"signature": "def datetime(past=False, min_delta=<NUM_LIT:0>, max_delta=<NUM_LIT:20>):", "body": "timedelta = dt.timedelta(days=_delta(past, min_delta, max_delta))<EOL>return dt.datetime.today() + timedelta<EOL>", "docstring": "Return a random `dt.dt` object. Delta args are days.", "id": "f14652:m6"}
{"signature": "def year(past=False, min_delta=<NUM_LIT:0>, max_delta=<NUM_LIT:20>):", "body": "return dt.date.today().year + _delta(past, min_delta, max_delta)<EOL>", "docstring": "Return a random year.", "id": "f14652:m3"}
{"signature": "def formatted_money(min=<NUM_LIT:0>, max=<NUM_LIT:10>):", "body": "return \"<STR_LIT>\" % float(money(min=min, max=max))<EOL>", "docstring": "Return a random sum of money with a dollar sign as a prefix.", "id": "f14653:m1"}
{"signature": "def money(min=<NUM_LIT:0>, max=<NUM_LIT:10>):", "body": "value = random.choice(range(min * <NUM_LIT:100>, max * <NUM_LIT:100>))<EOL>return \"<STR_LIT>\" % (float(value) / <NUM_LIT:100>)<EOL>", "docstring": "Return a str of decimal with two digits after a decimal mark.", "id": "f14653:m0"}
{"signature": "def latitude_degrees():", "body": "return random.randint(<NUM_LIT:0>, <NUM_LIT>) - <NUM_LIT><EOL>", "docstring": "Return a random latitude's degrees component in the range [-180, +180].\n\n    Latitude's degree is an int.", "id": "f14654:m1"}
{"signature": "def longitude_minutes():", "body": "return latitude_minutes()<EOL>", "docstring": "Return a random longitude's minutes component in the range [0, 60).\n\n    longitude's minutes is an int.", "id": "f14654:m8"}
{"signature": "def latitude_direction():", "body": "return random.choice(['<STR_LIT:N>', '<STR_LIT:S>'])<EOL>", "docstring": "Return a random a latitude's direction component.\n\n    Latitude's direction is denoted as either \"N\" (north) or \"S\" (south).", "id": "f14654:m2"}
{"signature": "def latitude():", "body": "return random.uniform(<NUM_LIT:0.0>, <NUM_LIT:1.0>) * <NUM_LIT> - <NUM_LIT><EOL>", "docstring": "Return a random latitude in the range of [-90.0, +90.0].\n\n    Latitude is a float.", "id": "f14654:m0"}
{"signature": "def longitude_direction():", "body": "return random.choice(['<STR_LIT:E>', '<STR_LIT>'])<EOL>", "docstring": "Return a random longitude's direction component.\n\n    Longitude's direction is denoted as either \"E\" (east) or \"W\" (west).", "id": "f14654:m7"}
{"signature": "def frequency():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return random frequency rate.\n\n    Frequency rate is taken from the `frequencies` dictionary.", "id": "f14656:m6"}
{"signature": "def encrypt(password='<STR_LIT:password>', salt=None):", "body": "if not salt:<EOL><INDENT>salt = str(datetime.utcnow())<EOL><DEDENT>try:<EOL><INDENT>dk = hashlib.pbkdf2_hmac('<STR_LIT>', password.encode(), salt.encode(), <NUM_LIT>)<EOL>hexdigest = binascii.hexlify(dk).decode('<STR_LIT:utf-8>')<EOL><DEDENT>except AttributeError:<EOL><INDENT>dk = hashlib.sha1()<EOL>dk.update(password.encode() + salt.encode())<EOL>hexdigest = dk.hexdigest()<EOL><DEDENT>return hexdigest<EOL>", "docstring": "Return SHA1 hexdigest of a password (optionally salted with a string).", "id": "f14656:m5"}
{"signature": "def hex_color():", "body": "return '<STR_LIT>'.join(random.sample(HEX_DIGITS, <NUM_LIT:6>))<EOL>", "docstring": "Return random HEX color.", "id": "f14656:m0"}
{"signature": "def hex_color_short():", "body": "return '<STR_LIT>'.join(random.sample(HEX_DIGITS, <NUM_LIT:3>))<EOL>", "docstring": "Return random short HEX color (e.g. `FFF` color).", "id": "f14656:m1"}
{"signature": "def color():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return random color name.", "id": "f14656:m4"}
{"signature": "def password(at_least=<NUM_LIT:6>, at_most=<NUM_LIT:12>, lowercase=True,<EOL>uppercase=True, digits=True, spaces=False, punctuation=False):", "body": "return text(at_least=at_least, at_most=at_most, lowercase=lowercase,<EOL>uppercase=uppercase, digits=digits, spaces=spaces,<EOL>punctuation=punctuation)<EOL>", "docstring": "Return a random string for use as a password.", "id": "f14656:m8"}
{"signature": "def type():", "body": "return random.choice(list(CARDS.keys()))<EOL>", "docstring": "Return a random credit card type.", "id": "f14657:m0"}
{"signature": "def person_ogrn():", "body": "ogrn = \"<STR_LIT>\".join(map(str, [random.randint(<NUM_LIT:1>, <NUM_LIT:9>) for _ in range(<NUM_LIT>)]))<EOL>ogrn += str((int(ogrn) % <NUM_LIT> % <NUM_LIT:10>))<EOL>return ogrn<EOL>", "docstring": "Return a random government registration ID for a person.", "id": "f14659:m7"}
{"signature": "def person_inn():", "body": "mask11 = [<NUM_LIT:7>, <NUM_LIT:2>, <NUM_LIT:4>, <NUM_LIT:10>, <NUM_LIT:3>, <NUM_LIT:5>, <NUM_LIT:9>, <NUM_LIT:4>, <NUM_LIT:6>, <NUM_LIT:8>]<EOL>mask12 = [<NUM_LIT:3>, <NUM_LIT:7>, <NUM_LIT:2>, <NUM_LIT:4>, <NUM_LIT:10>, <NUM_LIT:3>, <NUM_LIT:5>, <NUM_LIT:9>, <NUM_LIT:4>, <NUM_LIT:6>, <NUM_LIT:8>]<EOL>inn = [random.randint(<NUM_LIT:1>, <NUM_LIT:9>) for _ in range(<NUM_LIT:12>)]<EOL>weighted11 = [v * mask11[i] for i, v in enumerate(inn[:-<NUM_LIT:2>])]<EOL>inn[<NUM_LIT:10>] = sum(weighted11) % <NUM_LIT:11> % <NUM_LIT:10><EOL>weighted12 = [v * mask12[i] for i, v in enumerate(inn[:-<NUM_LIT:1>])]<EOL>inn[<NUM_LIT:11>] = sum(weighted12) % <NUM_LIT:11> % <NUM_LIT:10><EOL>return \"<STR_LIT>\".join(map(str, inn))<EOL>", "docstring": "Return a random taxation ID number for a natural person.", "id": "f14659:m6"}
{"signature": "def ogrn(type=\"<STR_LIT>\"):", "body": "if (type in TYPES) and type == '<STR_LIT>':<EOL><INDENT>return person_ogrn()<EOL><DEDENT>else:<EOL><INDENT>return legal_ogrn()<EOL><DEDENT>", "docstring": "Return a random government registration ID for either a person or a company.\n\n    Further information on the topic can be found in [1] (in russian).\n    [1]: https://ru.wikipedia.org/wiki/\u041e\u0441\u043d\u043e\u0432\u043d\u043e\u0439_\u0433\u043e\u0441\u0443\u0434\u0430\u0440\u0441\u0442\u0432\u0435\u043d\u043d\u044b\u0439_\u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0430\u0446\u0438\u043e\u043d\u043d\u044b\u0439_\u043d\u043e\u043c\u0435\u0440", "id": "f14659:m5"}
{"signature": "def inn(type=\"<STR_LIT>\"):", "body": "if (type in TYPES) and type == '<STR_LIT>':<EOL><INDENT>return person_inn()<EOL><DEDENT>else:<EOL><INDENT>return legal_inn()<EOL><DEDENT>", "docstring": "Return a random taxation ID number for either a person or a company.\n\n    Further information on the topic can be found in [1] (in russian).\n    [1]: https://ru.wikipedia.org/wiki/\u0418\u0434\u0435\u043d\u0442\u0438\u0444\u0438\u043a\u0430\u0446\u0438\u043e\u043d\u043d\u044b\u0439_\u043d\u043e\u043c\u0435\u0440_\u043d\u0430\u043b\u043e\u0433\u043e\u043f\u043b\u0430\u0442\u0435\u043b\u044c\u0449\u0438\u043a\u0430", "id": "f14659:m2"}
{"signature": "def legal_inn():", "body": "mask = [<NUM_LIT:2>, <NUM_LIT:4>, <NUM_LIT:10>, <NUM_LIT:3>, <NUM_LIT:5>, <NUM_LIT:9>, <NUM_LIT:4>, <NUM_LIT:6>, <NUM_LIT:8>]<EOL>inn = [random.randint(<NUM_LIT:1>, <NUM_LIT:9>) for _ in range(<NUM_LIT:10>)]<EOL>weighted = [v * mask[i] for i, v in enumerate(inn[:-<NUM_LIT:1>])]<EOL>inn[<NUM_LIT:9>] = sum(weighted) % <NUM_LIT:11> % <NUM_LIT:10><EOL>return \"<STR_LIT>\".join(map(str, inn))<EOL>", "docstring": "Return a random taxation ID number for a company.", "id": "f14659:m3"}
{"signature": "def address(user=None):", "body": "return internet.email_address(user=user)<EOL>", "docstring": "An alias for internet.email_address(user).", "id": "f14660:m0"}
{"signature": "def subject(words_quantity=<NUM_LIT:4>):", "body": "return lorem_ipsum.title(words_quantity=words_quantity)<EOL>", "docstring": "An alias for lorem_ipsum.title(words_quantity)", "id": "f14660:m2"}
{"signature": "def body(quantity=<NUM_LIT:2>, separator='<STR_LIT>', wrap_start='<STR_LIT>', wrap_end='<STR_LIT>',<EOL>html=False, sentences_quantity=<NUM_LIT:3>, as_list=False):", "body": "return lorem_ipsum.paragraphs(quantity=quantity, separator=separator,<EOL>wrap_start=wrap_start, wrap_end=wrap_end,<EOL>html=html,<EOL>sentences_quantity=sentences_quantity,<EOL>as_list=as_list)<EOL>", "docstring": "Return a random email text.", "id": "f14660:m1"}
{"signature": "def language():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random language name, e.g. ``Polish``.", "id": "f14662:m4"}
{"signature": "def shirt_size():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random shirt size.", "id": "f14662:m2"}
{"signature": "def country():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random country name.", "id": "f14663:m11"}
{"signature": "def state_abbrev():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random US abbreviated state name.", "id": "f14663:m6"}
{"signature": "def state():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random US state name.", "id": "f14663:m5"}
{"signature": "def street_address():", "body": "return '<STR_LIT>' % (street_number(), street_name(), street_suffix())<EOL>", "docstring": "Return a random street address.\n\n    Equivalent of ``street_number() + ' ' +\n    street_name() + ' ' + street_suffix()``.", "id": "f14663:m3"}
{"signature": "def continent():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return a random continent name.", "id": "f14663:m12"}
{"signature": "def province_abbrev():", "body": "return random.choice(get_dictionary('<STR_LIT>')).strip()<EOL>", "docstring": "Return random Canadian province or territory abbreviation.", "id": "f14663:m8"}
{"signature": "def as_list(iterable_of_arrays):", "body": "return [array.tolist() for array in iterable_of_arrays]<EOL>", "docstring": "Converts an iterable of permutation matrices given as NumPy\n    arrays into a list of lists.", "id": "f14668:m0"}
{"signature": "def git_tag(tag):", "body": "print('<STR_LIT>'.format(tag))<EOL>msg = '<STR_LIT>'.format(tag)<EOL>Popen(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', msg, tag]).wait()<EOL>", "docstring": "Tags the current version.", "id": "f14669:m8"}
{"signature": "def git_tags():", "body": "process = Popen(['<STR_LIT>', '<STR_LIT>'], stdout=PIPE)<EOL>return set(process.communicate()[<NUM_LIT:0>].splitlines())<EOL>", "docstring": "Returns a list of the git tags.", "id": "f14669:m5"}
{"signature": "def get_version(filename, pattern):", "body": "with open(filename) as f:<EOL><INDENT>match = re.search(r\"<STR_LIT>\" % pattern, f.read())<EOL><DEDENT>if match:<EOL><INDENT>before, version, after = match.groups()<EOL>return version<EOL><DEDENT>fail('<STR_LIT>'.format(pattern, filename))<EOL>", "docstring": "Gets the current version from the specified file.\n\n    This function assumes the file includes a string of the form::\n\n        <pattern> = <version>", "id": "f14669:m2"}
{"signature": "def git_is_clean():", "body": "return Popen(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']).wait() == <NUM_LIT:0><EOL>", "docstring": "Returns ``True`` if and only if there are no uncommitted changes.", "id": "f14669:m6"}
{"signature": "def bump_version(version, which=None):", "body": "try:<EOL><INDENT>parts = [int(n) for n in version.split('<STR_LIT:.>')]<EOL><DEDENT>except ValueError:<EOL><INDENT>fail('<STR_LIT>')<EOL><DEDENT>if len(parts) != <NUM_LIT:3>:<EOL><INDENT>fail('<STR_LIT>')<EOL><DEDENT>PARTS = {'<STR_LIT>': <NUM_LIT:0>, '<STR_LIT>': <NUM_LIT:1>, '<STR_LIT>': <NUM_LIT:2>}<EOL>index = PARTS[which] if which in PARTS else <NUM_LIT:2><EOL>before, middle, after = parts[:index], parts[index], parts[index + <NUM_LIT:1>:]<EOL>middle += <NUM_LIT:1><EOL>return '<STR_LIT:.>'.join(str(n) for n in before + [middle] + after)<EOL>", "docstring": "Returns the result of incrementing `version`.\n\n    If `which` is not specified, the \"patch\" part of the version number will be\n    incremented.  If `which` is specified, it must be ``'major'``, ``'minor'``,\n    or ``'patch'``. If it is one of these three strings, the corresponding part\n    of the version number will be incremented instead of the patch number.\n\n    Returns a string representing the next version number.\n\n    Example::\n\n        >>> bump_version('2.7.1')\n        '2.7.2'\n        >>> bump_version('2.7.1', 'minor')\n        '2.8.0'\n        >>> bump_version('2.7.1', 'major')\n        '3.0.0'", "id": "f14669:m0"}
{"signature": "def to_pattern_matrix(D):", "body": "result = np.zeros_like(D)<EOL>result[D.nonzero()] = <NUM_LIT:1><EOL>return result<EOL>", "docstring": "Returns the Boolean matrix in the same shape as `D` with ones exactly\n    where there are nonzero entries in `D`.\n\n    `D` must be a NumPy array.", "id": "f14670:m6"}
{"signature": "def hstack(left, right):", "body": "return np.hstack((left, right))<EOL>", "docstring": "Convenience function for ``numpy.hstack((left, right))``.", "id": "f14670:m2"}
{"signature": "def birkhoff_von_neumann_decomposition(D):", "body": "m, n = D.shape<EOL>if m != n:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(m, n))<EOL><DEDENT>indices = list(itertools.product(range(m), range(n)))<EOL>coefficients = []<EOL>permutations = []<EOL>S = D.astype('<STR_LIT:float>')<EOL>while not np.all(S == <NUM_LIT:0>):<EOL><INDENT>W = to_pattern_matrix(S)<EOL>X = to_bipartite_matrix(W)<EOL>G = from_numpy_matrix(X)<EOL>left_nodes = range(n)<EOL>M = maximum_matching(G, left_nodes)<EOL>M = {u: v % n for u, v in M.items() if u < n}<EOL>P = to_permutation_matrix(M)<EOL>q = min(S[i, j] for (i, j) in indices if P[i, j] == <NUM_LIT:1>)<EOL>coefficients.append(q)<EOL>permutations.append(P)<EOL>S -= q * P<EOL>S[np.abs(S) < TOLERANCE] = <NUM_LIT:0.0><EOL><DEDENT>return list(zip(coefficients, permutations))<EOL>", "docstring": "Returns the Birkhoff--von Neumann decomposition of the doubly\n    stochastic matrix `D`.\n\n    The input `D` must be a square NumPy array representing a doubly\n    stochastic matrix (that is, a matrix whose entries are nonnegative\n    reals and whose row sums and column sums are all 1). Each doubly\n    stochastic matrix is a convex combination of at most ``n ** 2``\n    permutation matrices, where ``n`` is the dimension of the input\n    array.\n\n    The returned value is a list of pairs whose length is at most ``n **\n    2``. In each pair, the first element is a real number in the interval **(0,\n    1]** and the second element is a NumPy array representing a permutation\n    matrix. This represents the doubly stochastic matrix as a convex\n    combination of the permutation matrices.\n\n    The input matrix may also be a scalar multiple of a doubly\n    stochastic matrix, in which case the row sums and column sums must\n    each be *c*, for some positive real number *c*. This may be useful\n    in avoiding precision issues: given a doubly stochastic matrix that\n    will have many entries close to one, multiply it by a large positive\n    integer. The returned permutation matrices will be the same\n    regardless of whether the given matrix is a doubly stochastic matrix\n    or a scalar multiple of a doubly stochastic matrix, but in the\n    latter case, the coefficients will all be scaled by the appropriate\n    scalar multiple, and their sum will be that scalar instead of one.\n\n    For example::\n\n        >>> import numpy as np\n        >>> from birkhoff import birkhoff_von_neumann_decomposition as decomp\n        >>> D = np.ones((2, 2))\n        >>> zipped_pairs = decomp(D)\n        >>> coefficients, permutations = zip(*zipped_pairs)\n        >>> coefficients\n        (1.0, 1.0)\n        >>> permutations[0]\n        array([[ 1.,  0.],\n               [ 0.,  1.]])\n        >>> permutations[1]\n        array([[ 0.,  1.],\n               [ 1.,  0.]])\n        >>> zipped_pairs = decomp(D / 2)  # halve each value in the matrix\n        >>> coefficients, permutations = zip(*zipped_pairs)\n        >>> coefficients  # will be half as large as before\n        (0.5, 0.5)\n        >>> permutations[0]  # will be the same as before\n        array([[ 1.,  0.],\n               [ 0.,  1.]])\n        >>> permutations[1]\n        array([[ 0.,  1.],\n               [ 1.,  0.]])\n\n    The returned list of pairs is given in the order computed by the algorithm\n    (so in particular they are not sorted in any way).", "id": "f14670:m7"}
{"signature": "def vstack(top, bottom):", "body": "return np.vstack((top, bottom))<EOL>", "docstring": "Convenience function for ``numpy.vstack((top, bottom))``.", "id": "f14670:m3"}
{"signature": "def open_default(self):", "body": "if HAVECONF:<EOL><INDENT>self.open(DBDATA, DBUSER, DBPASS, DBHOST)<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "This opens the database connection using the default database parameters\ngiven in the ~/.astrobase/astrobase.conf file.", "id": "f14679:c0:m2"}
{"signature": "def autocommit(self):", "body": "if len(self.cursors.keys()) == <NUM_LIT:0>:<EOL><INDENT>self.connection.autocommit = True<EOL><DEDENT>else:<EOL><INDENT>raise AttributeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>", "docstring": "This sets the database connection to autocommit. Must be called before\nany cursors have been instantiated.", "id": "f14679:c0:m3"}
{"signature": "def __init__(self,<EOL>database=None,<EOL>user=None,<EOL>password=None,<EOL>host=None):", "body": "self.connection = None<EOL>self.user = None<EOL>self.database = None<EOL>self.host = None<EOL>self.cursors = {}<EOL>if database and user and password and host:<EOL><INDENT>self.open(database, user, password, host)<EOL><DEDENT>", "docstring": "Constructor for this class.\n\n        Parameters\n        ----------\n\n        database : str\n            Name of the database to connect to.\n\n        user : str\n            User name of the database server user.\n\n        password : str\n            Password for the database server user.\n\n        host : str\n            Database hostname or IP address to connect to.\n\n        Returns\n        -------\n\n        `LCDB` object instance", "id": "f14679:c0:m0"}
{"signature": "def rollback(self):", "body": "if not self.connection.closed:<EOL><INDENT>self.connection.rollback()<EOL><DEDENT>else:<EOL><INDENT>raise AttributeError('<STR_LIT>' %<EOL>self.database)<EOL><DEDENT>", "docstring": "This just calls the connection's commit method.", "id": "f14679:c0:m7"}
{"signature": "def log_posterior_transit(theta, params, model, t, flux, err_flux, priorbounds):", "body": "lp = _log_prior_transit(theta, priorbounds)<EOL>if not np.isfinite(lp):<EOL><INDENT>return -np.inf<EOL><DEDENT>else:<EOL><INDENT>return lp + _log_likelihood_transit(theta, params, model, t, flux,<EOL>err_flux, priorbounds)<EOL><DEDENT>", "docstring": "Evaluate posterior probability given proposed model parameters and\nthe observed flux timeseries.", "id": "f14680:m7"}
{"signature": "def _log_likelihood_transit(theta, params, model, t, flux, err_flux,<EOL>priorbounds):", "body": "u = []<EOL>for ix, key in enumerate(sorted(priorbounds.keys())):<EOL><INDENT>if key == '<STR_LIT>':<EOL><INDENT>params.rp = theta[ix]<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>params.t0 = theta[ix]<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>params.a = theta[ix]<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>params.inc = theta[ix]<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>params.per = theta[ix]<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>params.per = theta[ix]<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>params.w = theta[ix]<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>u.append(theta[ix])<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>u.append(theta[ix])<EOL>params.u = u<EOL><DEDENT><DEDENT>lc = model.light_curve(params)<EOL>residuals = flux - lc<EOL>log_likelihood = -<NUM_LIT:0.5>*(<EOL>np.sum((residuals/err_flux)**<NUM_LIT:2> + np.log(<NUM_LIT:2>*np.pi*(err_flux)**<NUM_LIT:2>))<EOL>)<EOL>return log_likelihood<EOL>", "docstring": "Given a batman TransitModel and its proposed parameters (theta), update the\nbatman params object with the proposed parameters and evaluate the gaussian\nlikelihood.\n\nNote: the priorbounds are only needed to parse theta.", "id": "f14680:m5"}
{"signature": "def traptransit_fit_magseries(times, mags, errs,<EOL>transitparams,<EOL>sigclip=<NUM_LIT>,<EOL>plotfit=False,<EOL>magsarefluxes=False,<EOL>verbose=True):", "body": "stimes, smags, serrs = sigclip_magseries(times, mags, errs,<EOL>sigclip=sigclip,<EOL>magsarefluxes=magsarefluxes)<EOL>nzind = np.nonzero(serrs)<EOL>stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]<EOL>transitperiod, transitepoch, transitdepth = transitparams[<NUM_LIT:0>:<NUM_LIT:3>]<EOL>if transitepoch is None:<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>spfit = spline_fit_magseries(times, mags, errs, transitperiod,<EOL>sigclip=sigclip,<EOL>magsarefluxes=magsarefluxes,<EOL>verbose=verbose)<EOL>transitepoch = spfit['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>except Exception as e:<EOL><INDENT>sgfit = savgol_fit_magseries(times, mags, errs, transitperiod,<EOL>sigclip=sigclip,<EOL>magsarefluxes=magsarefluxes,<EOL>verbose=verbose)<EOL>transitepoch = sgfit['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>finally:<EOL><INDENT>if transitepoch is None:<EOL><INDENT>LOGERROR(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>returndict = {<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':transitparams,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>},<EOL>'<STR_LIT>':np.nan,<EOL>'<STR_LIT>':np.nan,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':magsarefluxes,<EOL>},<EOL>}<EOL>return returndict<EOL><DEDENT>else:<EOL><INDENT>if transitepoch.size > <NUM_LIT:1>:<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>transitparams[<NUM_LIT:1>] = transitepoch[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING(<EOL>'<STR_LIT>'<EOL>% transitepoch<EOL>)<EOL><DEDENT>transitparams[<NUM_LIT:1>] = transitepoch.item()<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if magsarefluxes:<EOL><INDENT>if transitdepth < <NUM_LIT:0.0>:<EOL><INDENT>transitparams[<NUM_LIT:2>] = -transitdepth<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if transitdepth > <NUM_LIT:0.0>:<EOL><INDENT>transitparams[<NUM_LIT:2>] = -transitdepth<EOL><DEDENT><DEDENT>try:<EOL><INDENT>leastsqfit = spleastsq(transits.trapezoid_transit_residual,<EOL>transitparams,<EOL>args=(stimes, smags, serrs),<EOL>full_output=True)<EOL><DEDENT>except Exception as e:<EOL><INDENT>leastsqfit = None<EOL><DEDENT>if leastsqfit and leastsqfit[-<NUM_LIT:1>] in (<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>,<NUM_LIT:4>):<EOL><INDENT>finalparams = leastsqfit[<NUM_LIT:0>]<EOL>covxmatrix = leastsqfit[<NUM_LIT:1>]<EOL>fitmags, phase, ptimes, pmags, perrs, n_transitpoints = (<EOL>transits.trapezoid_transit_func(<EOL>finalparams,<EOL>stimes, smags, serrs,<EOL>get_ntransitpoints=True<EOL>)<EOL>)<EOL>fitchisq = np.sum(<EOL>((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)<EOL>)<EOL>fitredchisq = fitchisq/(len(pmags) - len(finalparams) - <NUM_LIT:1>)<EOL>residuals = leastsqfit[<NUM_LIT:2>]['<STR_LIT>']<EOL>residualvariance = (<EOL>np.sum(residuals*residuals)/(pmags.size - finalparams.size)<EOL>)<EOL>if covxmatrix is not None:<EOL><INDENT>covmatrix = residualvariance*covxmatrix<EOL>stderrs = np.sqrt(np.diag(covmatrix))<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>stderrs = None<EOL><DEDENT>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>' %<EOL>(fitchisq, fitredchisq)<EOL>)<EOL><DEDENT>fperiod, fepoch = finalparams[:<NUM_LIT:2>]<EOL>returndict = {<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':transitparams,<EOL>'<STR_LIT>':finalparams,<EOL>'<STR_LIT>':stderrs,<EOL>'<STR_LIT>':leastsqfit,<EOL>'<STR_LIT>':fitmags,<EOL>'<STR_LIT>':fepoch,<EOL>'<STR_LIT>':n_transitpoints<EOL>},<EOL>'<STR_LIT>':fitchisq,<EOL>'<STR_LIT>':fitredchisq,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':phase,<EOL>'<STR_LIT>':ptimes,<EOL>'<STR_LIT>':pmags,<EOL>'<STR_LIT>':perrs,<EOL>'<STR_LIT>':magsarefluxes,<EOL>},<EOL>}<EOL>if plotfit and isinstance(plotfit, str):<EOL><INDENT>make_fit_plot(phase, pmags, perrs, fitmags,<EOL>fperiod, ptimes.min(), fepoch,<EOL>plotfit,<EOL>magsarefluxes=magsarefluxes)<EOL>returndict['<STR_LIT>'] = plotfit<EOL><DEDENT>return returndict<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>returndict = {<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':transitparams,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':leastsqfit,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':<NUM_LIT:0><EOL>},<EOL>'<STR_LIT>':np.nan,<EOL>'<STR_LIT>':np.nan,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':magsarefluxes,<EOL>},<EOL>}<EOL>return returndict<EOL><DEDENT>", "docstring": "This fits a trapezoid transit model to a magnitude time series.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input mag/flux time-series to fit a trapezoid planet-transit model\n        to.\n\n    period : float\n        The period to use for the model fit.\n\n    transitparams : list of floats\n        These are initial parameters for the transit model fit. A list of the\n        following form is required::\n\n            transitparams = [transitperiod (time),\n                             transitepoch (time),\n                             transitdepth (flux or mags),\n                             transitduration (phase),\n                             ingressduration (phase)]\n\n        - for magnitudes -> `transitdepth` should be < 0\n        - for fluxes     -> `transitdepth` should be > 0\n\n        If `transitepoch` is None, this function will do an initial spline fit\n        to find an approximate minimum of the phased light curve using the given\n        period.\n\n        The `transitdepth` provided is checked against the value of\n        `magsarefluxes`. if `magsarefluxes = True`, the `transitdepth` is forced\n        to be > 0; if `magsarefluxes` = False, the `transitdepth` is forced to\n        be < 0.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    magsarefluxes : bool\n        If True, will treat the input values of `mags` as fluxes for purposes of\n        plotting the fit and sig-clipping.\n\n    plotfit : str or False\n        If this is a string, this function will make a plot for the fit to the\n        mag/flux time-series and writes the plot to the path specified here.\n\n    ignoreinitfail : bool\n        If this is True, ignores the initial failure to find a set of optimized\n        Fourier parameters using the global optimization function and proceeds\n        to do a least-squares fit anyway.\n\n    verbose : bool\n        If True, will indicate progress and warn of any problems.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict containing the model fit parameters, the\n        minimized chi-sq value and the reduced chi-sq value. The form of this\n        dict is mostly standardized across all functions in this module::\n\n            {\n                'fittype':'traptransit',\n                'fitinfo':{\n                    'initialparams':the initial transit params provided,\n                    'finalparams':the final model fit transit params ,\n                    'finalparamerrs':formal errors in the params,\n                    'leastsqfit':the full tuple returned by scipy.leastsq,\n                    'fitmags': the model fit mags,\n                    'fitepoch': the epoch of minimum light for the fit,\n                    'ntransitpoints': the number of LC points in transit phase\n                },\n                'fitchisq': the minimized value of the fit's chi-sq,\n                'fitredchisq':the reduced chi-sq value,\n                'fitplotfile': the output fit plot if fitplot is not None,\n                'magseries':{\n                    'times':input times in phase order of the model,\n                    'phase':the phases of the model mags,\n                    'mags':input mags/fluxes in the phase order of the model,\n                    'errs':errs in the phase order of the model,\n                    'magsarefluxes':input value of magsarefluxes kwarg\n                }\n            }", "id": "f14680:m0"}
{"signature": "def _get_value(quantitystr, fitparams, fixedparams):", "body": "<EOL>fitparamskeys, fixedparamskeys = fitparams.keys(), fixedparams.keys()<EOL>if quantitystr in fitparamskeys:<EOL><INDENT>quantity = fitparams[quantitystr]<EOL><DEDENT>elif quantitystr in fixedparamskeys:<EOL><INDENT>quantity = fixedparams[quantitystr]<EOL><DEDENT>return quantity<EOL>", "docstring": "This decides if a value is to be fit for or is fixed in a model fit.\n\n    When you want to get the value of some parameter, but you're not sure if\n    it's being fit or if it is fixed. then, e.g. for `period`::\n\n        period_value = _get_value('period', fitparams, fixedparams)", "id": "f14680:m1"}
{"signature": "def mandelagol_and_line_fit_magseries(<EOL>times, mags, errs,<EOL>fitparams,<EOL>priorbounds,<EOL>fixedparams,<EOL>trueparams=None,<EOL>burninpercent=<NUM_LIT>,<EOL>plotcorner=False,<EOL>timeoffset=<NUM_LIT:0>,<EOL>samplesavpath=False,<EOL>n_walkers=<NUM_LIT:50>,<EOL>n_mcmc_steps=<NUM_LIT>,<EOL>eps=<NUM_LIT>,<EOL>skipsampling=False,<EOL>overwriteexistingsamples=False,<EOL>mcmcprogressbar=False,<EOL>plotfit=False,<EOL>scatterxdata=None,<EOL>scatteryaxes=None,<EOL>magsarefluxes=True,<EOL>sigclip=<NUM_LIT>,<EOL>verbose=True,<EOL>nworkers=<NUM_LIT:4><EOL>):", "body": "from multiprocessing import Pool<EOL>fittype = '<STR_LIT>'<EOL>if not magsarefluxes:<EOL><INDENT>raise NotImplementedError('<STR_LIT>')<EOL><DEDENT>if not samplesavpath:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if not mandel_agol_dependencies:<EOL><INDENT>raise ImportError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>stimes, smags, serrs = sigclip_magseries(times, mags, errs,<EOL>sigclip=sigclip,<EOL>magsarefluxes=magsarefluxes)<EOL>nzind = np.nonzero(serrs)<EOL>stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]<EOL>init_period = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>init_epoch = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>init_rp = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>init_sma = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>init_incl = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>init_ecc = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>init_omega = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>limb_dark = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>init_u = _get_value('<STR_LIT:u>', fitparams, fixedparams)<EOL>init_poly_order0 = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>init_poly_order1 = _get_value('<STR_LIT>', fitparams, fixedparams)<EOL>if not limb_dark == '<STR_LIT>':<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>init_params, init_m = _transit_model(<EOL>stimes, init_epoch, init_period, init_rp, init_sma, init_incl,<EOL>init_ecc, init_omega, init_u, limb_dark)<EOL>init_flux = (<EOL>init_m.light_curve(init_params) +<EOL>init_poly_order0 + init_poly_order1*stimes<EOL>)<EOL>theta, fitparamnames = [], []<EOL>for k in np.sort(list(fitparams.keys())):<EOL><INDENT>if isinstance(fitparams[k], float) or isinstance(fitparams[k], int):<EOL><INDENT>theta.append(fitparams[k])<EOL>fitparamnames.append(fitparams[k])<EOL><DEDENT>elif isinstance(fitparams[k], list):<EOL><INDENT>if not len(fitparams[k]) == <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>theta.append(fitparams[k][<NUM_LIT:0>])<EOL>theta.append(fitparams[k][<NUM_LIT:1>])<EOL>fitparamnames.append(fitparams[k][<NUM_LIT:0>])<EOL>fitparamnames.append(fitparams[k][<NUM_LIT:1>])<EOL><DEDENT><DEDENT>n_dim = len(theta)<EOL>if not skipsampling:<EOL><INDENT>backend = emcee.backends.HDFBackend(samplesavpath)<EOL>if overwriteexistingsamples:<EOL><INDENT>LOGWARNING(<EOL>'<STR_LIT>'.format(samplesavpath)<EOL>)<EOL>backend.reset(n_walkers, n_dim)<EOL><DEDENT>def nll(*args):<EOL><INDENT>return -_log_likelihood_transit_plus_line(*args)<EOL><DEDENT>soln = spminimize(<EOL>nll, theta, method='<STR_LIT>',<EOL>args=(init_params, init_m, stimes, smags, serrs, priorbounds)<EOL>)<EOL>theta_ml = soln.x<EOL>ml_poly_order0 = theta_ml[<NUM_LIT:0>]<EOL>ml_poly_order1 = theta_ml[<NUM_LIT:1>]<EOL>ml_rp = theta_ml[<NUM_LIT:2>]<EOL>ml_t0 = theta_ml[<NUM_LIT:3>]<EOL>ml_params, ml_m = _transit_model(stimes, ml_t0, init_period,<EOL>ml_rp, init_sma, init_incl,<EOL>init_ecc, init_omega, init_u,<EOL>limb_dark)<EOL>ml_mags = (<EOL>ml_m.light_curve(ml_params) +<EOL>ml_poly_order0 + ml_poly_order1*stimes<EOL>)<EOL>initial_position_vec = [theta_ml + eps*np.random.randn(n_dim)<EOL>for i in range(n_walkers)]<EOL>starting_positions = initial_position_vec<EOL>isfirstrun = True<EOL>if os.path.exists(backend.filename):<EOL><INDENT>if backend.iteration > <NUM_LIT:1>:<EOL><INDENT>starting_positions = None<EOL>isfirstrun = False<EOL><DEDENT><DEDENT>if verbose and isfirstrun:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'.<EOL>format(fittype, n_dim, n_mcmc_steps, n_walkers) +<EOL>'<STR_LIT>'.format(nworkers)<EOL>)<EOL><DEDENT>elif verbose and not isfirstrun:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'.<EOL>format(fittype, n_dim, n_mcmc_steps, n_walkers) +<EOL>'<STR_LIT>'.format(nworkers)<EOL>)<EOL><DEDENT>with Pool(nworkers) as pool:<EOL><INDENT>sampler = emcee.EnsembleSampler(<EOL>n_walkers, n_dim, log_posterior_transit_plus_line,<EOL>args=(init_params, init_m, stimes, smags, serrs, priorbounds),<EOL>pool=pool,<EOL>backend=backend<EOL>)<EOL>sampler.run_mcmc(starting_positions, n_mcmc_steps,<EOL>progress=mcmcprogressbar)<EOL><DEDENT>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'.format(<EOL>fittype, n_mcmc_steps, n_walkers<EOL>) + '<STR_LIT>'.format(nworkers)<EOL>)<EOL><DEDENT><DEDENT>reader = emcee.backends.HDFBackend(samplesavpath)<EOL>n_to_discard = int(burninpercent*n_mcmc_steps)<EOL>samples = reader.get_chain(discard=n_to_discard, flat=True)<EOL>log_prob_samples = reader.get_log_prob(discard=n_to_discard, flat=True)<EOL>log_prior_samples = reader.get_blobs(discard=n_to_discard, flat=True)<EOL>fit_statistics = list(<EOL>map(lambda v: (v[<NUM_LIT:1>], v[<NUM_LIT:2>]-v[<NUM_LIT:1>], v[<NUM_LIT:1>]-v[<NUM_LIT:0>]),<EOL>list(zip( *np.percentile(samples, [<NUM_LIT>, <NUM_LIT:50>, <NUM_LIT>], axis=<NUM_LIT:0>))))<EOL>)<EOL>medianparams, std_perrs, std_merrs = {}, {}, {}<EOL>for ix, k in enumerate(np.sort(list(priorbounds.keys()))):<EOL><INDENT>medianparams[k] = fit_statistics[ix][<NUM_LIT:0>]<EOL>std_perrs[k] = fit_statistics[ix][<NUM_LIT:1>]<EOL>std_merrs[k] = fit_statistics[ix][<NUM_LIT:2>]<EOL><DEDENT>stderrs = {'<STR_LIT>':std_perrs, '<STR_LIT>':std_merrs}<EOL>per = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>t0 = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>rp = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>sma = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>incl = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>ecc = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>omega = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>limb_dark = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>try:<EOL><INDENT>u = fixedparams['<STR_LIT:u>']<EOL><DEDENT>except Exception as e:<EOL><INDENT>u = [medianparams['<STR_LIT>'], medianparams['<STR_LIT>']]<EOL><DEDENT>poly_order0 = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>poly_order1 = _get_value('<STR_LIT>', medianparams, fixedparams)<EOL>fit_params, fit_m = _transit_model(stimes, t0, per, rp, sma, incl, ecc,<EOL>omega, u, limb_dark)<EOL>fitmags = (<EOL>fit_m.light_curve(fit_params) +<EOL>poly_order0 + poly_order1*stimes<EOL>)<EOL>fepoch = t0<EOL>medianparams['<STR_LIT>'] += timeoffset<EOL>returndict = {<EOL>'<STR_LIT>':fittype,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':fitparams,<EOL>'<STR_LIT>':init_flux,<EOL>'<STR_LIT>':fixedparams,<EOL>'<STR_LIT>':medianparams,<EOL>'<STR_LIT>':stderrs,<EOL>'<STR_LIT>':fitmags,<EOL>'<STR_LIT>':fepoch+timeoffset,<EOL>},<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':stimes+timeoffset,<EOL>'<STR_LIT>':smags,<EOL>'<STR_LIT>':serrs,<EOL>'<STR_LIT>':magsarefluxes,<EOL>},<EOL>}<EOL>if plotcorner:<EOL><INDENT>fig = corner.corner(<EOL>samples,<EOL>labels=['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>'.format(timeoffset)],<EOL>truths=[ml_poly_order0, ml_poly_order1, ml_rp, ml_t0],<EOL>quantiles=[<NUM_LIT>, <NUM_LIT:0.5>, <NUM_LIT>], show_titles=True<EOL>)<EOL>plt.savefig(plotcorner, dpi=<NUM_LIT>)<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'.format(plotcorner))<EOL><DEDENT><DEDENT>if plotfit and isinstance(plotfit, str):<EOL><INDENT>plt.close('<STR_LIT:all>')<EOL>f, (a0, a1) = plt.subplots(nrows=<NUM_LIT:2>, ncols=<NUM_LIT:1>, sharex=True,<EOL>figsize=(<NUM_LIT:8>,<NUM_LIT:5>),<EOL>gridspec_kw={'<STR_LIT>':[<NUM_LIT:3>, <NUM_LIT:1>]})<EOL>a0.scatter(stimes, smags, c='<STR_LIT:k>', alpha=<NUM_LIT>, label='<STR_LIT:data>', zorder=<NUM_LIT:1>,<EOL>s=<NUM_LIT:10>, rasterized=True, linewidths=<NUM_LIT:0>)<EOL>DEBUGGING = False<EOL>if DEBUGGING:<EOL><INDENT>a0.scatter(stimes, init_flux, c='<STR_LIT:r>', alpha=<NUM_LIT:1>, s=<NUM_LIT>, zorder=<NUM_LIT:2>,<EOL>rasterized=True, linewidths=<NUM_LIT:0>,<EOL>label='<STR_LIT>')<EOL>a0.scatter(stimes, ml_mags, c='<STR_LIT:g>', alpha=<NUM_LIT:1>, s=<NUM_LIT>, zorder=<NUM_LIT:2>,<EOL>rasterized=True, linewidths=<NUM_LIT:0>, label='<STR_LIT>')<EOL><DEDENT>a0.plot(<EOL>stimes, fitmags, c='<STR_LIT:b>',<EOL>zorder=<NUM_LIT:0>, rasterized=True, lw=<NUM_LIT:2>, alpha=<NUM_LIT>,<EOL>label='<STR_LIT>'.format(fittype, len(fitparamnames))<EOL>)<EOL>a1.scatter(<EOL>stimes, smags-fitmags, c='<STR_LIT:k>', alpha=<NUM_LIT>,<EOL>rasterized=True, s=<NUM_LIT:10>, linewidths=<NUM_LIT:0><EOL>)<EOL>if scatterxdata and scatteryaxes:<EOL><INDENT>import matplotlib.transforms as transforms<EOL>for a in [a0, a1]:<EOL><INDENT>transform = transforms.blended_transform_factory(<EOL>a.transData, a.transAxes<EOL>)<EOL>a.scatter(scatterxdata, scatteryaxes, c='<STR_LIT:r>', alpha=<NUM_LIT>,<EOL>zorder=<NUM_LIT:2>, s=<NUM_LIT:10>, rasterized=True, linewidths=<NUM_LIT:0>,<EOL>marker=\"<STR_LIT>\", transform=transform)<EOL><DEDENT><DEDENT>a1.set_xlabel('<STR_LIT>')<EOL>a0.set_ylabel('<STR_LIT>')<EOL>a1.set_ylabel('<STR_LIT>')<EOL>a0.legend(loc='<STR_LIT>', fontsize='<STR_LIT>')<EOL>for a in [a0, a1]:<EOL><INDENT>a.get_yaxis().set_tick_params(which='<STR_LIT>', direction='<STR_LIT>')<EOL>a.get_xaxis().set_tick_params(which='<STR_LIT>', direction='<STR_LIT>')<EOL><DEDENT>f.tight_layout(h_pad=<NUM_LIT:0>, w_pad=<NUM_LIT:0>)<EOL>f.savefig(plotfit, dpi=<NUM_LIT>, bbox_inches='<STR_LIT>')<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'.format(plotfit))<EOL><DEDENT>returndict['<STR_LIT>'] = plotfit<EOL><DEDENT>return returndict<EOL>", "docstring": "The model fit by this function is: a Mandel & Agol (2002) transit, PLUS a\n    line. You can fit and fix whatever parameters you want.\n\n    A typical use case: you want to measure transit times of individual SNR >~\n    50 transits. You fix all the transit parameters except for the mid-time,\n    and also fit for a line locally.\n\n    NOTE: this only works for flux time-series at the moment.\n\n    NOTE: Between the `fitparams`, `priorbounds`, and `fixedparams` dicts, you\n    must specify all of the planetary transit parameters required by BATMAN and\n    the parameters for the line fit: `['t0', 'rp', 'sma', 'incl', 'u', 'rp',\n    'ecc', 'omega', 'period', 'poly_order0', poly_order1']`, or the BATMAN model\n    will fail to initialize.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input flux time-series to fit a Fourier cosine series to.\n\n    fitparams : dict\n        This is the initial parameter guesses for MCMC, found e.g., by\n        BLS. The key string format must not be changed, but any parameter can be\n        either \"fit\" or \"fixed\". If it is \"fit\", it must have a corresponding\n        prior. For example::\n\n            fitparams = {'t0':1325.9,\n                         'poly_order0':1,\n                         'poly_order1':0.}\n\n        where `t0` is the time of transit-center for a reference transit.\n        `poly_order0` corresponds to the intercept of the line, `poly_order1` is\n        the slope.\n\n    priorbounds : dict\n        This sets the lower & upper bounds on uniform prior, e.g.::\n\n            priorbounds = {'t0':(np.min(time), np.max(time)),\n                            'poly_order0':(0.5,1.5),\n                            'poly_order1':(-0.5,0.5) }\n\n    fixedparams : dict\n        This sets which parameters are fixed, and their values. For example::\n\n            fixedparams = {'ecc':0.,\n                           'omega':90.,\n                           'limb_dark':'quadratic',\n                           'period':fitd['period'],\n                           'rp':np.sqrt(fitd['transitdepth']),\n                           'sma':6.17, 'incl':85, 'u':[0.3, 0.2]}\n\n        `limb_dark` must be \"quadratic\".  It's \"fixed\", because once you\n        choose your limb-darkening model, it's fixed.\n\n    trueparams : list of floats\n        The true parameter values you're fitting for, if they're known (e.g., a\n        known planet, or fake data). Only for plotting purposes.\n\n    burninpercent : float\n        The percent of MCMC samples to discard as burn-in.\n\n    plotcorner : str or False\n        If this is a str, points to the path of output corner plot that will be\n        generated for this MCMC run.\n\n    timeoffset : float\n        If input times are offset by some constant, and you want saved pickles\n        to fix that.\n\n    samplesavpath : str\n        This must be provided so `emcee` can save its MCMC samples to disk as\n        HDF5 files. This will set the path of the output HDF5file written.\n\n    n_walkers : int\n        The number of MCMC walkers to use.\n\n    n_mcmc_steps : int\n        The number of MCMC steps to take.\n\n    eps : float\n        The radius of the `n_walkers-dimensional` Gaussian ball used to\n        initialize the MCMC.\n\n    skipsampling : bool\n        If you've already collected MCMC samples, and you do not want any more\n        sampling (e.g., just make the plots), set this to be True.\n\n    overwriteexistingsamples : bool\n        If you've collected samples, but you want to overwrite them, set this to\n        True. Usually, it should be False, which appends samples to\n        `samplesavpath` HDF5 file.\n\n    mcmcprogressbar : bool\n        If True, will show a progress bar for the MCMC process.\n\n    plotfit: str or bool\n        If a str, indicates the path of the output fit plot file. If False, no\n        fit plot will be made.\n\n    scatterxdata : np.array or None\n        Use this to overplot x,y scatter points on the output model/data\n        lightcurve (e.g., to highlight bad data, or to indicate an ephemeris),\n        this can take a `np.ndarray` with the same units as `times`.\n\n    scatteryaxes : np.array or None\n        Use this to provide the y-values for scatterxdata, in units of fraction\n        of an axis.\n\n    magsarefluxes : bool\n        This indicates if the input measurements in `mags` are actually fluxes.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    verbose : bool\n        If True, will indicate MCMC progress.\n\n    nworkers : int\n        The number of parallel workers to launch for MCMC.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict containing the model fit parameters and\n        other fit information. The form of this dict is mostly standardized\n        across all functions in this module::\n\n            {\n                'fittype':'mandelagol_and_line',\n                'fitinfo':{\n                    'initialparams':the initial transit params provided,\n                    'fixedparams':the fixed transit params provided,\n                    'finalparams':the final model fit transit params,\n                    'finalparamerrs':formal errors in the params,\n                    'fitmags': the model fit mags,\n                    'fitepoch': the epoch of minimum light for the fit,\n                },\n                'fitplotfile': the output fit plot if fitplot is not None,\n                'magseries':{\n                    'times':input times in phase order of the model,\n                    'phase':the phases of the model mags,\n                    'mags':input mags/fluxes in the phase order of the model,\n                    'errs':errs in the phase order of the model,\n                    'magsarefluxes':input value of magsarefluxes kwarg\n                }\n            }", "id": "f14680:m10"}
{"signature": "def spline_fit_magseries(times, mags, errs, period,<EOL>knotfraction=<NUM_LIT>,<EOL>maxknots=<NUM_LIT:30>,<EOL>sigclip=<NUM_LIT>,<EOL>plotfit=False,<EOL>ignoreinitfail=False,<EOL>magsarefluxes=False,<EOL>verbose=True):", "body": "<EOL>if errs is None:<EOL><INDENT>errs = npfull_like(mags, <NUM_LIT>)<EOL><DEDENT>stimes, smags, serrs = sigclip_magseries(times, mags, errs,<EOL>sigclip=sigclip,<EOL>magsarefluxes=magsarefluxes)<EOL>nzind = npnonzero(serrs)<EOL>stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]<EOL>phase, pmags, perrs, ptimes, mintime = (<EOL>get_phased_quantities(stimes, smags, serrs, period)<EOL>)<EOL>nobs = len(phase)<EOL>nknots = int(npfloor(knotfraction*nobs))<EOL>nknots = maxknots if nknots > maxknots else nknots<EOL>splineknots = nplinspace(phase[<NUM_LIT:0>] + <NUM_LIT>,<EOL>phase[-<NUM_LIT:1>] - <NUM_LIT>,<EOL>num=nknots)<EOL>phase_diffs_ind = npdiff(phase) > <NUM_LIT:0.0><EOL>incphase_ind = npconcatenate((nparray([True]), phase_diffs_ind))<EOL>phase, pmags, perrs = (phase[incphase_ind],<EOL>pmags[incphase_ind],<EOL>perrs[incphase_ind])<EOL>spl = LSQUnivariateSpline(phase, pmags, t=splineknots, w=<NUM_LIT:1.0>/perrs)<EOL>fitmags = spl(phase)<EOL>fitchisq = npsum(<EOL>((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)<EOL>)<EOL>fitredchisq = fitchisq/(len(pmags) - nknots - <NUM_LIT:1>)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(nknots, fitchisq, fitredchisq)<EOL>)<EOL><DEDENT>if not magsarefluxes:<EOL><INDENT>fitmagminind = npwhere(fitmags == npmax(fitmags))<EOL><DEDENT>else:<EOL><INDENT>fitmagminind = npwhere(fitmags == npmin(fitmags))<EOL><DEDENT>if len(fitmagminind[<NUM_LIT:0>]) > <NUM_LIT:1>:<EOL><INDENT>fitmagminind = (fitmagminind[<NUM_LIT:0>][<NUM_LIT:0>],)<EOL><DEDENT>magseriesepoch = ptimes[fitmagminind]<EOL>returndict = {<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':nknots,<EOL>'<STR_LIT>':fitmags,<EOL>'<STR_LIT>':magseriesepoch<EOL>},<EOL>'<STR_LIT>':fitchisq,<EOL>'<STR_LIT>':fitredchisq,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':ptimes,<EOL>'<STR_LIT>':phase,<EOL>'<STR_LIT>':pmags,<EOL>'<STR_LIT>':perrs,<EOL>'<STR_LIT>':magsarefluxes<EOL>},<EOL>}<EOL>if plotfit and isinstance(plotfit, str):<EOL><INDENT>make_fit_plot(phase, pmags, perrs, fitmags,<EOL>period, mintime, magseriesepoch,<EOL>plotfit,<EOL>magsarefluxes=magsarefluxes)<EOL>returndict['<STR_LIT>'] = plotfit<EOL><DEDENT>return returndict<EOL>", "docstring": "This fits a univariate cubic spline to the phased light curve.\n\n    This fit may be better than the Fourier fit for sharply variable objects,\n    like EBs, so can be used to distinguish them from other types of variables.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input mag/flux time-series to fit a spline to.\n\n    period : float\n        The period to use for the spline fit.\n\n    knotfraction : float\n        The knot fraction is the number of internal knots to use for the\n        spline. A value of 0.01 (or 1%) of the total number of non-nan\n        observations appears to work quite well, without over-fitting. maxknots\n        controls the maximum number of knots that will be allowed.\n\n    maxknots : int\n        The maximum number of knots that will be used even if `knotfraction`\n        gives a value to use larger than `maxknots`. This helps dealing with\n        over-fitting to short time-scale variations.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    magsarefluxes : bool\n        If True, will treat the input values of `mags` as fluxes for purposes of\n        plotting the fit and sig-clipping.\n\n    plotfit : str or False\n        If this is a string, this function will make a plot for the fit to the\n        mag/flux time-series and writes the plot to the path specified here.\n\n    ignoreinitfail : bool\n        If this is True, ignores the initial failure to find a set of optimized\n        Fourier parameters using the global optimization function and proceeds\n        to do a least-squares fit anyway.\n\n    verbose : bool\n        If True, will indicate progress and warn of any problems.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict containing the model fit parameters, the\n        minimized chi-sq value and the reduced chi-sq value. The form of this\n        dict is mostly standardized across all functions in this module::\n\n            {\n                'fittype':'spline',\n                'fitinfo':{\n                    'nknots': the number of knots used for the fit\n                    'fitmags': the model fit mags,\n                    'fitepoch': the epoch of minimum light for the fit,\n                },\n                'fitchisq': the minimized value of the fit's chi-sq,\n                'fitredchisq':the reduced chi-sq value,\n                'fitplotfile': the output fit plot if fitplot is not None,\n                'magseries':{\n                    'times':input times in phase order of the model,\n                    'phase':the phases of the model mags,\n                    'mags':input mags/fluxes in the phase order of the model,\n                    'errs':errs in the phase order of the model,\n                    'magsarefluxes':input value of magsarefluxes kwarg\n                }\n            }", "id": "f14681:m0"}
{"signature": "def fourier_fit_magseries(times, mags, errs, period,<EOL>fourierorder=None,<EOL>fourierparams=None,<EOL>sigclip=<NUM_LIT>,<EOL>magsarefluxes=False,<EOL>plotfit=False,<EOL>ignoreinitfail=True,<EOL>verbose=True):", "body": "stimes, smags, serrs = sigclip_magseries(times, mags, errs,<EOL>sigclip=sigclip,<EOL>magsarefluxes=magsarefluxes)<EOL>nzind = npnonzero(serrs)<EOL>stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]<EOL>phase, pmags, perrs, ptimes, mintime = (<EOL>get_phased_quantities(stimes, smags, serrs, period)<EOL>)<EOL>if fourierorder and fourierorder > <NUM_LIT:0> and not fourierparams:<EOL><INDENT>fourieramps = [<NUM_LIT>] + [<NUM_LIT>]*(fourierorder - <NUM_LIT:1>)<EOL>fourierphas = [<NUM_LIT:0.1>] + [<NUM_LIT:0.1>]*(fourierorder - <NUM_LIT:1>)<EOL>fourierparams = fourieramps + fourierphas<EOL><DEDENT>elif not fourierorder and fourierparams:<EOL><INDENT>fourierorder = int(len(fourierparams)/<NUM_LIT:2>)<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>fourierorder = <NUM_LIT:3><EOL>fourieramps = [<NUM_LIT>] + [<NUM_LIT>]*(fourierorder - <NUM_LIT:1>)<EOL>fourierphas = [<NUM_LIT:0.1>] + [<NUM_LIT:0.1>]*(fourierorder - <NUM_LIT:1>)<EOL>fourierparams = fourieramps + fourierphas<EOL><DEDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (fourierorder,<EOL>len(phase),<EOL>period,<EOL>mintime))<EOL><DEDENT>initialfit = spminimize(_fourier_chisq,<EOL>fourierparams,<EOL>method='<STR_LIT>',<EOL>args=(phase, pmags, perrs))<EOL>if initialfit.success or ignoreinitfail:<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL><DEDENT>leastsqparams = initialfit.x<EOL>try:<EOL><INDENT>leastsqfit = spleastsq(_fourier_residual,<EOL>leastsqparams,<EOL>args=(phase, pmags))<EOL><DEDENT>except Exception as e:<EOL><INDENT>leastsqfit = None<EOL><DEDENT>if leastsqfit and leastsqfit[-<NUM_LIT:1>] in (<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>,<NUM_LIT:4>):<EOL><INDENT>finalparams = leastsqfit[<NUM_LIT:0>]<EOL>fitmags = _fourier_func(finalparams, phase, pmags)<EOL>fitchisq = npsum(<EOL>((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)<EOL>)<EOL>fitredchisq = fitchisq/(len(pmags) - len(finalparams) - <NUM_LIT:1>)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>' %<EOL>(fitchisq,fitredchisq)<EOL>)<EOL><DEDENT>if not magsarefluxes:<EOL><INDENT>fitmagminind = npwhere(fitmags == npmax(fitmags))<EOL><DEDENT>else:<EOL><INDENT>fitmagminind = npwhere(fitmags == npmin(fitmags))<EOL><DEDENT>if len(fitmagminind[<NUM_LIT:0>]) > <NUM_LIT:1>:<EOL><INDENT>fitmagminind = (fitmagminind[<NUM_LIT:0>][<NUM_LIT:0>],)<EOL><DEDENT>returndict = {<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':fourierorder,<EOL>'<STR_LIT>':finalparams,<EOL>'<STR_LIT>':initialfit,<EOL>'<STR_LIT>':leastsqfit,<EOL>'<STR_LIT>':fitmags,<EOL>'<STR_LIT>':mintime,<EOL>'<STR_LIT>':ptimes[fitmagminind]<EOL>},<EOL>'<STR_LIT>':fitchisq,<EOL>'<STR_LIT>':fitredchisq,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':ptimes,<EOL>'<STR_LIT>':phase,<EOL>'<STR_LIT>':pmags,<EOL>'<STR_LIT>':perrs,<EOL>'<STR_LIT>':magsarefluxes<EOL>},<EOL>}<EOL>if plotfit and isinstance(plotfit, str):<EOL><INDENT>make_fit_plot(phase, pmags, perrs, fitmags,<EOL>period, mintime, mintime,<EOL>plotfit,<EOL>magsarefluxes=magsarefluxes)<EOL>returndict['<STR_LIT>'] = plotfit<EOL><DEDENT>return returndict<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return {<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':fourierorder,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':initialfit,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None<EOL>},<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':ptimes,<EOL>'<STR_LIT>':phase,<EOL>'<STR_LIT>':pmags,<EOL>'<STR_LIT>':perrs,<EOL>'<STR_LIT>':magsarefluxes<EOL>}<EOL>}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% initialfit.message)<EOL>return {<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':fourierorder,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':initialfit,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None<EOL>},<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':ptimes,<EOL>'<STR_LIT>':phase,<EOL>'<STR_LIT>':pmags,<EOL>'<STR_LIT>':perrs,<EOL>'<STR_LIT>':magsarefluxes<EOL>}<EOL>}<EOL><DEDENT>", "docstring": "This fits a Fourier series to a mag/flux time series.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input mag/flux time-series to fit a Fourier cosine series to.\n\n    period : float\n        The period to use for the Fourier fit.\n\n    fourierorder : None or int\n        If this is an int, will be interpreted as the Fourier order of the\n        series to fit to the input mag/flux times-series. If this is None and\n        `fourierparams` is specified, `fourierparams` will be used directly to\n        generate the fit Fourier series. If `fourierparams` is also None, this\n        function will try to fit a Fourier cosine series of order 3 to the\n        mag/flux time-series.\n\n    fourierparams : list of floats or None\n        If this is specified as a list of floats, it must be of the form below::\n\n            [fourier_amp1, fourier_amp2, fourier_amp3,...,fourier_ampN,\n             fourier_phase1, fourier_phase2, fourier_phase3,...,fourier_phaseN]\n\n        to specify a Fourier cosine series of order N. If this is None and\n        `fourierorder` is specified, the Fourier order specified there will be\n        used to construct the Fourier cosine series used to fit the input\n        mag/flux time-series. If both are None, this function will try to fit a\n        Fourier cosine series of order 3 to the input mag/flux time-series.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    magsarefluxes : bool\n        If True, will treat the input values of `mags` as fluxes for purposes of\n        plotting the fit and sig-clipping.\n\n    plotfit : str or False\n        If this is a string, this function will make a plot for the fit to the\n        mag/flux time-series and writes the plot to the path specified here.\n\n    ignoreinitfail : bool\n        If this is True, ignores the initial failure to find a set of optimized\n        Fourier parameters using the global optimization function and proceeds\n        to do a least-squares fit anyway.\n\n    verbose : bool\n        If True, will indicate progress and warn of any problems.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict containing the model fit parameters, the\n        minimized chi-sq value and the reduced chi-sq value. The form of this\n        dict is mostly standardized across all functions in this module::\n\n            {\n                'fittype':'fourier',\n                'fitinfo':{\n                    'finalparams': the list of final model fit params,\n                    'leastsqfit':the full tuple returned by scipy.leastsq,\n                    'fitmags': the model fit mags,\n                    'fitepoch': the epoch of minimum light for the fit,\n                    ... other fit function specific keys ...\n                },\n                'fitchisq': the minimized value of the fit's chi-sq,\n                'fitredchisq':the reduced chi-sq value,\n                'fitplotfile': the output fit plot if fitplot is not None,\n                'magseries':{\n                    'times':input times in phase order of the model,\n                    'phase':the phases of the model mags,\n                    'mags':input mags/fluxes in the phase order of the model,\n                    'errs':errs in the phase order of the model,\n                    'magsarefluxes':input value of magsarefluxes kwarg\n                }\n            }\n\n        NOTE: the returned value of 'fitepoch' in the 'fitinfo' dict returned by\n        this function is the time value of the first observation since this is\n        where the LC is folded for the fit procedure. To get the actual time of\n        minimum epoch as calculated by a spline fit to the phased LC, use the\n        key 'actual_fitepoch' in the 'fitinfo' dict.", "id": "f14682:m3"}
{"signature": "def _fourier_func(fourierparams, phase, mags):", "body": "<EOL>order = int(len(fourierparams)/<NUM_LIT:2>)<EOL>f_amp = fourierparams[:order]<EOL>f_pha = fourierparams[order:]<EOL>f_orders = [f_amp[x]*npcos(<NUM_LIT>*pi_value*x*phase + f_pha[x])<EOL>for x in range(order)]<EOL>total_f = npmedian(mags)<EOL>for fo in f_orders:<EOL><INDENT>total_f += fo<EOL><DEDENT>return total_f<EOL>", "docstring": "This returns a summed Fourier cosine series.\n\n    Parameters\n    ----------\n\n    fourierparams : list\n        This MUST be a list of the following form like so::\n\n            [period,\n             epoch,\n             [amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X],\n             [phase_1, phase_2, phase_3, ..., phase_X]]\n\n        where X is the Fourier order.\n\n    phase,mags : np.array\n        The input phase and magnitude areas to use as the basis for the cosine\n        series. The phases are used directly to generate the values of the\n        function, while the mags array is used to generate the zeroth order\n        amplitude coefficient.\n\n    Returns\n    -------\n\n    np.array\n        The Fourier cosine series function evaluated over `phase`.", "id": "f14682:m0"}
{"signature": "def get_phased_quantities(stimes, smags, serrs, period):", "body": "<EOL>mintime = np.min(stimes)<EOL>iphase = (stimes - mintime)/period - np.floor((stimes - mintime)/period)<EOL>phasesortind = np.argsort(iphase)<EOL>phase = iphase[phasesortind]<EOL>pmags = smags[phasesortind]<EOL>perrs = serrs[phasesortind]<EOL>ptimes = stimes[phasesortind]<EOL>return phase, pmags, perrs, ptimes, mintime<EOL>", "docstring": "Does phase-folding for the mag/flux time-series given a period.\n\n    Given finite and sigma-clipped times, magnitudes, and errors, along with the\n    period at which to phase-fold the data, perform the phase-folding and\n    return the phase-folded values.\n\n    Parameters\n    ----------\n\n    stimes,smags,serrs : np.array\n        The sigma-clipped and finite input mag/flux time-series arrays to\n        operate on.\n\n    period : float\n        The period to phase the mag/flux time-series at. stimes.min() is used as\n        the epoch value to fold the times-series around.\n\n    Returns\n    -------\n\n    (phase, pmags, perrs, ptimes, mintime) : tuple\n        The tuple returned contains the following items:\n\n        - `phase`: phase-sorted values of phase at each of stimes\n        - `pmags`: phase-sorted magnitudes at each phase\n        - `perrs`: phase-sorted errors\n        - `ptimes`: phase-sorted times\n        - `mintime`: earliest time in stimes.", "id": "f14685:m0"}
{"signature": "def parallel_gen_binnedlc_pkls(binnedpkldir,<EOL>textlcdir,<EOL>timebinsec,<EOL>binnedpklglob='<STR_LIT>',<EOL>textlcglob='<STR_LIT>'):", "body": "binnedpkls = sorted(glob.glob(os.path.join(binnedpkldir, binnedpklglob)))<EOL>textlcs = []<EOL>for bpkl in binnedpkls:<EOL><INDENT>objectid = HATIDREGEX.findall(bpkl)<EOL>if objectid is not None:<EOL><INDENT>objectid = objectid[<NUM_LIT:0>]<EOL><DEDENT>searchpath = os.path.join(textlcdir, '<STR_LIT>' % (objectid, textlcglob))<EOL>textlcf = glob.glob(searchpath)<EOL>if textlcf:<EOL><INDENT>textlcs.append(textlcf)<EOL><DEDENT>else:<EOL><INDENT>textlcs.append(None)<EOL><DEDENT><DEDENT>", "docstring": "This generates the binnedlc pkls for a directory of such files.\n\nFIXME: finish this", "id": "f14686:m11"}
{"signature": "def concat_write_pklc(lcbasedir,<EOL>objectid,<EOL>aperture='<STR_LIT>',<EOL>postfix='<STR_LIT>',<EOL>sortby='<STR_LIT>',<EOL>normalize=True,<EOL>outdir=None,<EOL>recursive=True):", "body": "concatlcd = concatenate_textlcs_for_objectid(lcbasedir,<EOL>objectid,<EOL>aperture=aperture,<EOL>sortby=sortby,<EOL>normalize=normalize,<EOL>recursive=recursive)<EOL>if not outdir:<EOL><INDENT>outdir = '<STR_LIT>'<EOL><DEDENT>if not os.path.exists(outdir):<EOL><INDENT>os.mkdir(outdir)<EOL><DEDENT>outfpath = os.path.join(outdir, '<STR_LIT>' % (concatlcd['<STR_LIT>'],<EOL>aperture))<EOL>pklc = lcdict_to_pickle(concatlcd, outfile=outfpath)<EOL>return pklc<EOL>", "docstring": "This concatenates all text LCs for the given object and writes to a pklc.\n\n    Basically a rollup for the concatenate_textlcs_for_objectid and\n    lcdict_to_pickle functions.", "id": "f14686:m5"}
{"signature": "def parallel_concat_lcdir(lcbasedir,<EOL>objectidlist,<EOL>aperture='<STR_LIT>',<EOL>postfix='<STR_LIT>',<EOL>sortby='<STR_LIT>',<EOL>normalize=True,<EOL>outdir=None,<EOL>recursive=True,<EOL>nworkers=<NUM_LIT:32>,<EOL>maxworkertasks=<NUM_LIT:1000>):", "body": "if not outdir:<EOL><INDENT>outdir = '<STR_LIT>'<EOL><DEDENT>if not os.path.exists(outdir):<EOL><INDENT>os.mkdir(outdir)<EOL><DEDENT>tasks = [(lcbasedir, x, {'<STR_LIT>':aperture,<EOL>'<STR_LIT>':postfix,<EOL>'<STR_LIT>':sortby,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':outdir,<EOL>'<STR_LIT>':recursive}) for x in objectidlist]<EOL>pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)<EOL>results = pool.map(parallel_concat_worker, tasks)<EOL>pool.close()<EOL>pool.join()<EOL>return {x:y for (x,y) in zip(objectidlist, results)}<EOL>", "docstring": "This concatenates all text LCs for the given objectidlist.", "id": "f14686:m7"}
{"signature": "def read_hatpi_textlc(lcfile):", "body": "if '<STR_LIT>' in lcfile:<EOL><INDENT>thiscoldefs = COLDEFS + [('<STR_LIT>',float)]<EOL><DEDENT>elif '<STR_LIT>' in lcfile:<EOL><INDENT>thiscoldefs = COLDEFS + [('<STR_LIT>',float)]<EOL><DEDENT>elif '<STR_LIT>' in lcfile:<EOL><INDENT>thiscoldefs = COLDEFS + [('<STR_LIT>',float)]<EOL><DEDENT>LOGINFO('<STR_LIT>' % lcfile)<EOL>if lcfile.endswith('<STR_LIT>'):<EOL><INDENT>infd = gzip.open(lcfile,'<STR_LIT:r>')<EOL><DEDENT>else:<EOL><INDENT>infd = open(lcfile,'<STR_LIT:r>')<EOL><DEDENT>with infd:<EOL><INDENT>lclines = infd.read().decode().split('<STR_LIT:\\n>')<EOL>lclines = [x.split() for x in lclines if ('<STR_LIT:#>' not in x and len(x) > <NUM_LIT:0>)]<EOL>ndet = len(lclines)<EOL>if ndet > <NUM_LIT:0>:<EOL><INDENT>lccols = list(zip(*lclines))<EOL>lcdict = {x[<NUM_LIT:0>]:y for (x,y) in zip(thiscoldefs, lccols)}<EOL>for col in thiscoldefs:<EOL><INDENT>lcdict[col[<NUM_LIT:0>]] = np.array([col[<NUM_LIT:1>](x) for x in lcdict[col[<NUM_LIT:0>]]])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>lcdict = {}<EOL>LOGWARNING('<STR_LIT>' % lcfile)<EOL>for col in thiscoldefs:<EOL><INDENT>lcdict[col[<NUM_LIT:0>]] = np.array([])<EOL><DEDENT><DEDENT>hatid = HATIDREGEX.findall(lcfile)<EOL>lcdict['<STR_LIT>'] = hatid[<NUM_LIT:0>] if hatid else '<STR_LIT>'<EOL>lcdict['<STR_LIT>'] = [x[<NUM_LIT:0>] for x in thiscoldefs]<EOL>lcdict['<STR_LIT>'] = {<EOL>'<STR_LIT>':ndet,<EOL>'<STR_LIT>':hatid[<NUM_LIT:0>] if hatid else '<STR_LIT>',<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>}<EOL>framekeyelems = FRAMEREGEX.findall('<STR_LIT:\\n>'.join(lcdict['<STR_LIT>']))<EOL>lcdict['<STR_LIT>'] = np.array([(int(x[<NUM_LIT:0>]) if x[<NUM_LIT:0>].isdigit() else np.nan)<EOL>for x in framekeyelems])<EOL>lcdict['<STR_LIT>'] = np.array([(int(x[<NUM_LIT:1>]) if x[<NUM_LIT:0>].isdigit() else np.nan)<EOL>for x in framekeyelems])<EOL>lcdict['<STR_LIT>'] = np.array([x[<NUM_LIT:2>] for x in framekeyelems])<EOL>lcdict['<STR_LIT>'] = np.array([(int(x[<NUM_LIT:3>]) if x[<NUM_LIT:0>].isdigit() else np.nan)<EOL>for x in framekeyelems])<EOL>lcdict['<STR_LIT>'].extend(['<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = [<EOL>'<STR_LIT>' % x for x in np.unique(lcdict['<STR_LIT>']).tolist()<EOL>]<EOL><DEDENT>return lcdict<EOL>", "docstring": "This reads in a textlc that is complete up to the TFA stage.", "id": "f14686:m0"}
{"signature": "def read_hatpi_pklc(lcfile):", "body": "try:<EOL><INDENT>if lcfile.endswith('<STR_LIT>'):<EOL><INDENT>infd = gzip.open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>infd = open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>lcdict = pickle.load(infd)<EOL>infd.close()<EOL>return lcdict<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>if lcfile.endswith('<STR_LIT>'):<EOL><INDENT>infd = gzip.open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>infd = open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % lcfile)<EOL>lcdict = pickle.load(infd, encoding='<STR_LIT>')<EOL>infd.close()<EOL>return lcdict<EOL><DEDENT>", "docstring": "This just reads a pickle LC. Returns an lcdict.", "id": "f14686:m2"}
{"signature": "def parallel_concat_worker(task):", "body": "lcbasedir, objectid, kwargs = task<EOL>try:<EOL><INDENT>return concat_write_pklc(lcbasedir, objectid, **kwargs)<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>% (objectid, lcbasedir))<EOL>return None<EOL><DEDENT>", "docstring": "This is a worker for the function below.\n\ntask[0] = lcbasedir\ntask[1] = objectid\ntask[2] = {'aperture','postfix','sortby','normalize','outdir','recursive'}", "id": "f14686:m6"}
{"signature": "def read_original_textlc(lcpath):", "body": "LOGINFO('<STR_LIT>'.format(lcpath))<EOL>N_lines_to_parse_comments = <NUM_LIT:50><EOL>with open(lcpath, '<STR_LIT:rb>') as file:<EOL><INDENT>head = [next(file) for ind in range(N_lines_to_parse_comments)]<EOL><DEDENT>N_comment_lines = len([l for l in head if l.decode('<STR_LIT>')[<NUM_LIT:0>] == '<STR_LIT:#>'])<EOL>if N_comment_lines < N_lines_to_parse_comments:<EOL><INDENT>LOGERROR(<EOL>'<STR_LIT>'.format(fpath=lcpath)<EOL>)<EOL>return None<EOL><DEDENT>first_data_line = list(<EOL>filter(None, head[N_comment_lines].decode('<STR_LIT>').split())<EOL>)<EOL>N_cols = len(first_data_line)<EOL>if N_cols == <NUM_LIT>:<EOL><INDENT>colformat = '<STR_LIT>'<EOL><DEDENT>elif N_cols == <NUM_LIT:20>:<EOL><INDENT>colformat = '<STR_LIT>'<EOL><DEDENT>elif N_cols == <NUM_LIT:32>:<EOL><INDENT>colformat = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(fpath=lcpath,<EOL>ncols=N_cols))<EOL>return None<EOL><DEDENT>if colformat == '<STR_LIT>':<EOL><INDENT>col_names = ['<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>']<EOL>col_dtypes = ['<STR_LIT>',float,<EOL>float,float,'<STR_LIT>',<EOL>float,float,'<STR_LIT>',<EOL>float,float,'<STR_LIT>',<EOL>float,float,float,<EOL>float,float,float]<EOL>dtype_pairs = [el for el in zip(col_names, col_dtypes)]<EOL>data = np.genfromtxt(lcpath, names=col_names, dtype=col_dtypes,<EOL>skip_header=N_comment_lines, delimiter=None)<EOL>out = {}<EOL>for ix in range(len(data.dtype.names)):<EOL><INDENT>out[data.dtype.names[ix]] = data[data.dtype.names[ix]]<EOL><DEDENT><DEDENT>elif colformat == '<STR_LIT>':<EOL><INDENT>col_names = ['<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>']<EOL>col_dtypes = ['<STR_LIT>',float,<EOL>float,float,'<STR_LIT>',<EOL>float,float,'<STR_LIT>',<EOL>float,float,'<STR_LIT>',<EOL>float,float,float,<EOL>float,float,float,<EOL>float,float,float]<EOL>dtype_pairs = [el for el in zip(col_names, col_dtypes)]<EOL>data = np.genfromtxt(lcpath, names=col_names, dtype=col_dtypes,<EOL>skip_header=N_comment_lines, delimiter=None)<EOL>out = {}<EOL>for ix in range(len(data.dtype.names)):<EOL><INDENT>out[data.dtype.names[ix]] = data[data.dtype.names[ix]]<EOL><DEDENT><DEDENT>elif colformat == '<STR_LIT>':<EOL><INDENT>col_names = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>out = astascii.read(lcpath, names=col_names, comment='<STR_LIT:#>')<EOL><DEDENT>return out<EOL>", "docstring": "Read .epdlc, and .tfalc light curves and return a corresponding labelled\ndict (if LC from <2012) or astropy table (if >=2012). Each has different\nkeys that can be accessed via .keys()\n\nInput:\nlcpath: path (string) to light curve data, which is a textfile with HAT\nLC data.\n\nExample:\ndat = read_original_textlc('HAT-115-0003266.epdlc')", "id": "f14688:m0"}
{"signature": "def _parse_csv_header(header):", "body": "<EOL>headerlines = header.split('<STR_LIT:\\n>')<EOL>headerlines = [x.lstrip('<STR_LIT>') for x in headerlines]<EOL>objectstart = headerlines.index('<STR_LIT>')<EOL>metadatastart = headerlines.index('<STR_LIT>')<EOL>camfilterstart = headerlines.index('<STR_LIT>')<EOL>photaperturestart = headerlines.index('<STR_LIT>')<EOL>columnstart = headerlines.index('<STR_LIT>')<EOL>lcstart = headerlines.index('<STR_LIT>')<EOL>objectinfo = headerlines[objectstart+<NUM_LIT:1>:metadatastart-<NUM_LIT:1>]<EOL>metadatainfo = headerlines[metadatastart+<NUM_LIT:1>:camfilterstart-<NUM_LIT:1>]<EOL>camfilterinfo = headerlines[camfilterstart+<NUM_LIT:1>:photaperturestart-<NUM_LIT:1>]<EOL>photapertureinfo = headerlines[photaperturestart+<NUM_LIT:1>:columnstart-<NUM_LIT:1>]<EOL>columninfo = headerlines[columnstart+<NUM_LIT:1>:lcstart-<NUM_LIT:1>]<EOL>metadict = {'<STR_LIT>':{}}<EOL>objectinfo = [x.split('<STR_LIT:;>') for x in objectinfo]<EOL>for elem in objectinfo:<EOL><INDENT>for kvelem in elem:<EOL><INDENT>key, val = kvelem.split('<STR_LIT>',<NUM_LIT:1>)<EOL>metadict['<STR_LIT>'][key.strip()] = (<EOL>_smartcast(val, METAKEYS[key.strip()])<EOL>)<EOL><DEDENT><DEDENT>metadict['<STR_LIT>'] = metadict['<STR_LIT>']['<STR_LIT>'][:]<EOL>del metadict['<STR_LIT>']['<STR_LIT>']<EOL>metadatainfo = [x.split('<STR_LIT:;>') for x in metadatainfo]<EOL>for elem in metadatainfo:<EOL><INDENT>for kvelem in elem:<EOL><INDENT>try:<EOL><INDENT>key, val = kvelem.split('<STR_LIT>',<NUM_LIT:1>)<EOL>if key.strip() == '<STR_LIT>':<EOL><INDENT>val = json.loads(val)<EOL><DEDENT>if key.strip() in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>val = int(val)<EOL><DEDENT>if key.strip() == '<STR_LIT>':<EOL><INDENT>val = float(val)<EOL><DEDENT>metadict[key.strip()] = val<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>' % kvelem)<EOL><DEDENT><DEDENT><DEDENT>metadict['<STR_LIT>'] = []<EOL>for row in camfilterinfo:<EOL><INDENT>filterid, filtername, filterdesc = row.split('<STR_LIT>')<EOL>metadict['<STR_LIT>'].append((int(filterid),<EOL>filtername,<EOL>filterdesc))<EOL><DEDENT>metadict['<STR_LIT>'] = {}<EOL>for row in photapertureinfo:<EOL><INDENT>apnum, appix = row.split('<STR_LIT>')<EOL>appix = float(appix.rstrip('<STR_LIT>'))<EOL>metadict['<STR_LIT>'][apnum.strip()] = appix<EOL><DEDENT>metadict['<STR_LIT>'] = []<EOL>for row in columninfo:<EOL><INDENT>colnum, colname, coldesc = row.split('<STR_LIT>')<EOL>metadict['<STR_LIT>'].append(colname)<EOL><DEDENT>return metadict<EOL>", "docstring": "This parses the CSV header from the CSV HAT sqlitecurve.\n\nReturns a dict that can be used to update an existing lcdict with the\nrelevant metadata info needed to form a full LC.", "id": "f14689:m9"}
{"signature": "def read_and_filter_sqlitecurve(lcfile,<EOL>columns=None,<EOL>sqlfilters=None,<EOL>raiseonfail=False,<EOL>returnarrays=True,<EOL>forcerecompress=False,<EOL>quiet=True):", "body": "<EOL>try:<EOL><INDENT>if '<STR_LIT>' in lcfile[-<NUM_LIT:4>:]:<EOL><INDENT>lcf = _uncompress_sqlitecurve(lcfile)<EOL><DEDENT>else:<EOL><INDENT>lcf = lcfile<EOL><DEDENT>db = sql.connect(lcf)<EOL>cur = db.cursor()<EOL>query = (\"<STR_LIT>\")<EOL>cur.execute(query)<EOL>objectinfo = cur.fetchone()<EOL>query = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>cur.execute(query)<EOL>lcinfo = cur.fetchone()<EOL>(lcversion, lcdatarelease, lccols, lcsortcol,<EOL>lcapertures, lcbestaperture,<EOL>objinfocols, objidcol,<EOL>lcunixtime, lcgitrev, lccomment) = lcinfo<EOL>lcapertures = json.loads(lcapertures)<EOL>lcbestaperture = json.loads(lcbestaperture)<EOL>objectinfokeys = objinfocols.split('<STR_LIT:U+002C>')<EOL>objectinfodict = {x:y for (x,y) in zip(objectinfokeys, objectinfo)}<EOL>objectid = objectinfodict[objidcol]<EOL>query = (\"<STR_LIT>\")<EOL>cur.execute(query)<EOL>filterinfo = cur.fetchall()<EOL>if columns and all([x in lccols.split('<STR_LIT:U+002C>') for x in columns]):<EOL><INDENT>LOGINFO('<STR_LIT>' % columns)<EOL>proceed = True<EOL><DEDENT>elif columns is None:<EOL><INDENT>columns = lccols.split('<STR_LIT:U+002C>')<EOL>proceed = True<EOL><DEDENT>else:<EOL><INDENT>proceed = False<EOL><DEDENT>if not proceed:<EOL><INDENT>if '<STR_LIT>' in lcfile[-<NUM_LIT:4>:] and lcf:<EOL><INDENT>_compress_sqlitecurve(lcf, force=forcerecompress)<EOL><DEDENT>LOGERROR('<STR_LIT>')<EOL>return None, \"<STR_LIT>\"<EOL><DEDENT>lcdict = {'<STR_LIT>':objectid,<EOL>'<STR_LIT>':objectinfodict,<EOL>'<STR_LIT>':objectinfokeys,<EOL>'<STR_LIT>':lcversion,<EOL>'<STR_LIT>':lcdatarelease,<EOL>'<STR_LIT>':columns,<EOL>'<STR_LIT>':lcsortcol,<EOL>'<STR_LIT>':lcapertures,<EOL>'<STR_LIT>':lcbestaperture,<EOL>'<STR_LIT>':lcunixtime,<EOL>'<STR_LIT>':lcgitrev,<EOL>'<STR_LIT>':lccomment,<EOL>'<STR_LIT>':filterinfo}<EOL>if ((sqlfilters is not None) and<EOL>(isinstance(sqlfilters,str) or<EOL>isinstance(sqlfilters, unicode))):<EOL><INDENT>validatedfilters = _validate_sqlitecurve_filters(sqlfilters,<EOL>lccols.split('<STR_LIT:U+002C>'))<EOL>if validatedfilters is not None:<EOL><INDENT>LOGINFO('<STR_LIT>' % validatedfilters)<EOL>filtersok = True<EOL><DEDENT>else:<EOL><INDENT>filtersok = False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>validatedfilters = None<EOL>filtersok = None<EOL><DEDENT>if validatedfilters is not None:<EOL><INDENT>query = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>).format(<EOL>columns='<STR_LIT:U+002C>'.join(columns),  <EOL>sqlfilter=validatedfilters,<EOL>sortcol=lcsortcol<EOL>)<EOL>lcdict['<STR_LIT>'] = validatedfilters<EOL><DEDENT>else:<EOL><INDENT>query = (\"<STR_LIT>\") % (<EOL>'<STR_LIT:U+002C>'.join(columns),<EOL>lcsortcol<EOL>)<EOL><DEDENT>cur.execute(query)<EOL>lightcurve = cur.fetchall()<EOL>if lightcurve and len(lightcurve) > <NUM_LIT:0>:<EOL><INDENT>lightcurve = list(zip(*lightcurve))<EOL>lcdict.update({x:y for (x,y) in zip(lcdict['<STR_LIT>'],<EOL>lightcurve)})<EOL>lcok = True<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = len(lightcurve[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>' % lcdict['<STR_LIT>'])<EOL>lcdict.update({x:y for (x,y) in<EOL>zip(lcdict['<STR_LIT>'],<EOL>[[] for x in lcdict['<STR_LIT>']])})<EOL>lcok = False<EOL><DEDENT>if filtersok is True and lcok:<EOL><INDENT>statusmsg = '<STR_LIT>'<EOL><DEDENT>elif filtersok is None and lcok:<EOL><INDENT>statusmsg = '<STR_LIT>'<EOL><DEDENT>elif filtersok is False and lcok:<EOL><INDENT>statusmsg = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>statusmsg = '<STR_LIT>'<EOL><DEDENT>returnval = (lcdict, statusmsg)<EOL>if '<STR_LIT>' in lcfile[-<NUM_LIT:4>:] and lcf:<EOL><INDENT>_compress_sqlitecurve(lcf, force=forcerecompress)<EOL><DEDENT>if returnarrays:<EOL><INDENT>for column in lcdict['<STR_LIT>']:<EOL><INDENT>lcdict[column] = np.array([x if x is not None else np.nan<EOL>for x in lcdict[column]])<EOL><DEDENT><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>if not quiet:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' % lcfile)<EOL><DEDENT>returnval = (None, '<STR_LIT>')<EOL>if '<STR_LIT>' in lcfile[-<NUM_LIT:4>:] and lcf:<EOL><INDENT>_compress_sqlitecurve(lcf, force=forcerecompress)<EOL><DEDENT>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>return returnval<EOL>", "docstring": "This reads a HAT sqlitecurve and optionally filters it.\n\n    Parameters\n    ----------\n\n    lcfile : str\n        The path to the HAT sqlitecurve file.\n\n    columns : list\n        A list of columns to extract from the ligh curve file. If None, then\n        returns all columns present in the latest `columnlist` in the light\n        curve.\n\n    sqlfilters : list of str\n        If no None, it must be a list of text SQL filters that apply to the\n        columns in the lightcurve.\n\n    raiseonfail : bool\n        If this is True, an Exception when reading the LC will crash the\n        function instead of failing silently and returning None as the result.\n\n    returnarrays : bool\n        If this is True, the output lcdict contains columns as np.arrays instead\n        of lists. You generally want this to be True.\n\n    forcerecompress : bool\n        If True, the sqlitecurve will be recompressed even if a compressed\n        version of it is found. This usually happens when sqlitecurve opening is\n        interrupted by the OS for some reason, leaving behind a gzipped and\n        un-gzipped copy. By default, this function refuses to overwrite the\n        existing gzipped version so if the un-gzipped version is corrupt but\n        that one isn't, it can be safely recovered.\n\n    quiet : bool\n        If True, will not warn about any problems, even if the light curve\n        reading fails (the only clue then will be the return value of\n        None). Useful for batch processing of many many light curves.\n\n    Returns\n    -------\n\n    tuple : (lcdict, status_message)\n        A two-element tuple is returned, with the first element being the\n        lcdict.", "id": "f14689:m6"}
{"signature": "def main():", "body": "<EOL>import signal<EOL>signal.signal(signal.SIGPIPE, signal.SIG_DFL)<EOL>import argparse<EOL>aparser = argparse.ArgumentParser(<EOL>description='<STR_LIT>'<EOL>)<EOL>aparser.add_argument(<EOL>'<STR_LIT>',<EOL>action='<STR_LIT:store>',<EOL>type=str,<EOL>help=(\"<STR_LIT>\")<EOL>)<EOL>aparser.add_argument(<EOL>'<STR_LIT>',<EOL>action='<STR_LIT:store_true>',<EOL>default=False,<EOL>help=(\"<STR_LIT>\")<EOL>)<EOL>args = aparser.parse_args()<EOL>filetoread = args.hatlcfile<EOL>if not os.path.exists(filetoread):<EOL><INDENT>LOGERROR(\"<STR_LIT>\" % filetoread)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>filename = os.path.basename(filetoread)<EOL>if filename.endswith('<STR_LIT>') or filename.endswith('<STR_LIT>'):<EOL><INDENT>if args.describe:<EOL><INDENT>describe(read_csvlc(filename))<EOL>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>with gzip.open(filename,'<STR_LIT:rb>') as infd:<EOL><INDENT>for line in infd:<EOL><INDENT>print(line.decode(),end='<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif filename.endswith('<STR_LIT>'):<EOL><INDENT>lcdict, msg = read_and_filter_sqlitecurve(filetoread)<EOL>describe(lcdict, offsetwith='<STR_LIT:#>')<EOL>if args.describe:<EOL><INDENT>sys.exit(<NUM_LIT:0>)<EOL><DEDENT>apertures = sorted(lcdict['<STR_LIT>'].keys())<EOL>for aper in apertures:<EOL><INDENT>COLUMNDEFS.update({'<STR_LIT>' % (x, aper): COLUMNDEFS[x] for x in<EOL>LC_MAG_COLUMNS})<EOL>COLUMNDEFS.update({'<STR_LIT>' % (x, aper): COLUMNDEFS[x] for x in<EOL>LC_ERR_COLUMNS})<EOL>COLUMNDEFS.update({'<STR_LIT>' % (x, aper): COLUMNDEFS[x] for x in<EOL>LC_FLAG_COLUMNS})<EOL><DEDENT>formstr = '<STR_LIT:U+002C>'.join([COLUMNDEFS[x][<NUM_LIT:1>] for x in lcdict['<STR_LIT>']])<EOL>ndet = lcdict['<STR_LIT>']['<STR_LIT>']<EOL>for ind in range(ndet):<EOL><INDENT>line = [lcdict[x][ind] for x in lcdict['<STR_LIT>']]<EOL>formline = formstr % tuple(line)<EOL>print(formline)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>' % filetoread)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "This is called when we're executed from the commandline.\n\nThe current usage from the command-line is described below::\n\n    usage: hatlc [-h] [--describe] hatlcfile\n\n    read a HAT LC of any format and output to stdout\n\n    positional arguments:\n      hatlcfile   path to the light curve you want to read and pipe to stdout\n\n    optional arguments:\n      -h, --help  show this help message and exit\n      --describe  don't dump the columns, show only object info and LC metadata", "id": "f14689:m17"}
{"signature": "def read_lcc_csvlc(lcfile):", "body": "<EOL>if '<STR_LIT>' in os.path.basename(lcfile):<EOL><INDENT>infd = gzip.open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>infd = open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>lctext = infd.read().decode()<EOL>infd.close()<EOL>lctextlines = lctext.split('<STR_LIT:\\n>')<EOL>lcformat = lctextlines[<NUM_LIT:0>]<EOL>commentchar = lctextlines[<NUM_LIT:1>]<EOL>lcstart = lctextlines.index('<STR_LIT>' % commentchar)<EOL>headerlines = lctextlines[:lcstart+<NUM_LIT:1>]<EOL>lclines = lctextlines[lcstart+<NUM_LIT:1>:]<EOL>metadata, columns, separator = _parse_csv_header_lcc_csv_v1(headerlines)<EOL>objectid = metadata['<STR_LIT>']['<STR_LIT>']<EOL>objectinfo = {key:metadata[key]['<STR_LIT>'] for key in metadata}<EOL>colnames = []<EOL>colnum = []<EOL>coldtypes = []<EOL>for k in columns:<EOL><INDENT>coldef = columns[k]<EOL>colnames.append(k)<EOL>colnum.append(coldef['<STR_LIT>'])<EOL>coldtypes.append(coldef['<STR_LIT>'])<EOL><DEDENT>coldtypes = '<STR_LIT:U+002C>'.join(coldtypes)<EOL>recarr = np.genfromtxt(<EOL>lclines,<EOL>comments=commentchar,<EOL>delimiter=separator,<EOL>usecols=colnum,<EOL>autostrip=True,<EOL>names=colnames,<EOL>dtype=coldtypes<EOL>)<EOL>lcdict = {x:recarr[x] for x in colnames}<EOL>lcdict['<STR_LIT>'] = lcformat<EOL>lcdict['<STR_LIT>'] = objectid<EOL>lcdict['<STR_LIT>'] = objectinfo<EOL>lcdict['<STR_LIT>'] = colnames<EOL>lcdict['<STR_LIT>'] = columns<EOL>lcdict['<STR_LIT>'] = metadata<EOL>return lcdict<EOL>", "docstring": "This reads a CSV LC produced by an `LCC-Server\n    <https://github.com/waqasbhatti/lcc-server>`_ instance.\n\n    Parameters\n    ----------\n\n    lcfile : str\n        The LC file to read.\n\n    Returns\n    -------\n\n    dict\n        Returns an lcdict that's readable by most astrobase functions for\n        further processing.", "id": "f14689:m11"}
{"signature": "def read_csvlc(lcfile):", "body": "<EOL>if '<STR_LIT>' in os.path.basename(lcfile):<EOL><INDENT>LOGINFO('<STR_LIT>' % lcfile)<EOL>infd = gzip.open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>LOGINFO('<STR_LIT>' % lcfile)<EOL>infd = open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>lcformat_check = infd.read(<NUM_LIT:12>).decode()<EOL>if '<STR_LIT>' in lcformat_check:<EOL><INDENT>infd.close()<EOL>return read_lcc_csvlc(lcfile)<EOL><DEDENT>else:<EOL><INDENT>infd.seek(<NUM_LIT:0>)<EOL><DEDENT>lctext = infd.read().decode()  <EOL>infd.close()<EOL>lcstart = lctext.index('<STR_LIT>')<EOL>lcheader = lctext[:lcstart+<NUM_LIT:12>]<EOL>lccolumns = lctext[lcstart+<NUM_LIT>:].split('<STR_LIT:\\n>')<EOL>lccolumns = [x for x in lccolumns if len(x) > <NUM_LIT:0>]<EOL>lcdict = _parse_csv_header(lcheader)<EOL>lccolumns = [x.split('<STR_LIT:U+002C>') for x in lccolumns]<EOL>lccolumns = list(zip(*lccolumns))  <EOL>for colind, col in enumerate(lcdict['<STR_LIT>']):<EOL><INDENT>if (col.split('<STR_LIT:_>')[<NUM_LIT:0>] in LC_MAG_COLUMNS or<EOL>col.split('<STR_LIT:_>')[<NUM_LIT:0>] in LC_ERR_COLUMNS or<EOL>col.split('<STR_LIT:_>')[<NUM_LIT:0>] in LC_FLAG_COLUMNS):<EOL><INDENT>lcdict[col] = np.array([_smartcast(x,<EOL>COLUMNDEFS[col.split('<STR_LIT:_>')[<NUM_LIT:0>]][<NUM_LIT:2>])<EOL>for x in lccolumns[colind]])<EOL><DEDENT>elif col in COLUMNDEFS:<EOL><INDENT>lcdict[col] = np.array([_smartcast(x,COLUMNDEFS[col][<NUM_LIT:2>])<EOL>for x in lccolumns[colind]])<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>' % col)<EOL>continue<EOL><DEDENT><DEDENT>return lcdict<EOL>", "docstring": "This reads a HAT data server or LCC-Server produced CSV light curve\n    into an lcdict.\n\n    This will automatically figure out the format of the file\n    provided. Currently, it can read:\n\n    - legacy HAT data server CSV LCs (e.g. from\n      https://hatsouth.org/planets/lightcurves.html) with an extension of the\n      form: `.hatlc.csv.gz`.\n    - all LCC-Server produced LCC-CSV-V1 LCs (e.g. from\n      https://data.hatsurveys.org) with an extension of the form: `-csvlc.gz`.\n\n\n    Parameters\n    ----------\n\n    lcfile : str\n        The light curve file to read.\n\n    Returns\n    -------\n\n    dict\n        Returns an lcdict that can be read and used by many astrobase processing\n        functions.", "id": "f14689:m13"}
{"signature": "def _smartcast(castee, caster, subval=None):", "body": "try:<EOL><INDENT>return caster(castee)<EOL><DEDENT>except Exception as e:<EOL><INDENT>if caster is float or caster is int:<EOL><INDENT>return nan<EOL><DEDENT>elif caster is str:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return subval<EOL><DEDENT><DEDENT>", "docstring": "This just tries to apply the caster function to castee.\n\nReturns None on failure.", "id": "f14689:m8"}
{"signature": "def normalize_lcdict(lcdict,<EOL>timecol='<STR_LIT>',<EOL>magcols='<STR_LIT:all>',<EOL>mingap=<NUM_LIT>,<EOL>normto='<STR_LIT>',<EOL>debugmode=False,<EOL>quiet=False):", "body": "<EOL>if '<STR_LIT>' in lcdict and len(lcdict['<STR_LIT>']) > <NUM_LIT:0>:<EOL><INDENT>if not quiet:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL><DEDENT>return lcdict<EOL><DEDENT>if timecol in lcdict:<EOL><INDENT>times = lcdict[timecol]<EOL><DEDENT>elif '<STR_LIT>' in lcdict:<EOL><INDENT>times = lcdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\" %<EOL>lcdict['<STR_LIT>'])<EOL>return lcdict<EOL><DEDENT>ngroups, timegroups = find_lc_timegroups(np.array(times),<EOL>mingap=mingap)<EOL>if '<STR_LIT>' in lcdict:<EOL><INDENT>apertures = sorted(lcdict['<STR_LIT>'].keys())<EOL><DEDENT>elif '<STR_LIT>' in lcdict and '<STR_LIT>' in lcdict['<STR_LIT>']:<EOL><INDENT>apertures = sorted(lcdict['<STR_LIT>']['<STR_LIT>'].keys())<EOL><DEDENT>aimcols = [('<STR_LIT>' % x) for x in apertures if ('<STR_LIT>' % x) in lcdict]<EOL>armcols = [('<STR_LIT>' % x) for x in apertures if ('<STR_LIT>' % x) in lcdict]<EOL>aepcols = [('<STR_LIT>' % x)for x in apertures if ('<STR_LIT>' % x) in lcdict]<EOL>atfcols = [('<STR_LIT>' % x) for x in apertures if ('<STR_LIT>' % x) in lcdict]<EOL>psimcols = [x for x in ['<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>'] if x in lcdict]<EOL>irmcols = [('<STR_LIT>' % x) for x in apertures if ('<STR_LIT>' % x) in lcdict]<EOL>iepcols = [('<STR_LIT>' % x) for x in apertures if ('<STR_LIT>' % x) in lcdict]<EOL>itfcols = [('<STR_LIT>' % x) for x in apertures if ('<STR_LIT>' % x) in lcdict]<EOL>if magcols == '<STR_LIT:all>':<EOL><INDENT>cols_to_normalize = (aimcols + armcols + aepcols + atfcols +<EOL>psimcols + irmcols + iepcols + itfcols)<EOL><DEDENT>elif magcols == '<STR_LIT>':<EOL><INDENT>cols_to_normalize = (irmcols + (['<STR_LIT>'] if '<STR_LIT>' in lcdict else []) +<EOL>irmcols)<EOL><DEDENT>elif magcols == '<STR_LIT>':<EOL><INDENT>cols_to_normalize = (aepcols + (['<STR_LIT>'] if '<STR_LIT>' in lcdict else []) +<EOL>iepcols)<EOL><DEDENT>elif magcols == '<STR_LIT>':<EOL><INDENT>cols_to_normalize = (atfcols + (['<STR_LIT>'] if '<STR_LIT>' in lcdict else []) +<EOL>itfcols)<EOL><DEDENT>elif magcols == '<STR_LIT>':<EOL><INDENT>cols_to_normalize = (aepcols + (['<STR_LIT>'] if '<STR_LIT>' in lcdict else []) +<EOL>iepcols + atfcols +<EOL>(['<STR_LIT>'] if '<STR_LIT>' in lcdict else []) +<EOL>itfcols)<EOL><DEDENT>else:<EOL><INDENT>cols_to_normalize = magcols.split('<STR_LIT:U+002C>')<EOL>cols_to_normalize = [x.strip() for x in cols_to_normalize]<EOL><DEDENT>colsnormalized = []<EOL>for col in cols_to_normalize:<EOL><INDENT>if col in lcdict:<EOL><INDENT>mags = lcdict[col]<EOL>mags = [(nan if x is None else x) for x in mags]<EOL>mags = np.array(mags)<EOL>colsnormalized.append(col)<EOL>finite_ind = np.isfinite(mags)<EOL>if any(finite_ind):<EOL><INDENT>global_mag_median = np.median(mags[finite_ind])<EOL>for tgind, tg in enumerate(timegroups):<EOL><INDENT>finite_ind = np.isfinite(mags[tg])<EOL>group_median = np.median((mags[tg])[finite_ind])<EOL>mags[tg] = mags[tg] - group_median<EOL>if debugmode:<EOL><INDENT>LOGDEBUG('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(col, tgind,<EOL>len(mags[tg]),<EOL>len(finite_ind),<EOL>group_median))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>' % col)<EOL>continue<EOL><DEDENT>if normto == '<STR_LIT>':<EOL><INDENT>mags = mags + global_mag_median<EOL><DEDENT>elif normto in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if (normto in lcdict['<STR_LIT>'] and<EOL>lcdict['<STR_LIT>'][normto] is not None):<EOL><INDENT>mags = mags + lcdict['<STR_LIT>'][normto]<EOL><DEDENT>else:<EOL><INDENT>if not quiet:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>' % normto)<EOL><DEDENT>normto = '<STR_LIT>'<EOL>mags = mags + global_mag_median<EOL><DEDENT><DEDENT>lcdict[col] = mags<EOL><DEDENT>else:<EOL><INDENT>if not quiet:<EOL><INDENT>LOGWARNING('<STR_LIT>' % col)<EOL><DEDENT>continue<EOL><DEDENT><DEDENT>lcnormcols = ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>') % (<EOL>repr(colsnormalized),<EOL>mingap,<EOL>normto<EOL>)<EOL>lcdict['<STR_LIT>'] = lcnormcols<EOL>return lcdict<EOL>", "docstring": "This normalizes magcols in `lcdict` using `timecol` to find timegroups.\n\n    Parameters\n    ----------\n\n    lcdict : dict\n        The input lcdict to process.\n\n    timecol : str\n        The key in the lcdict that is to be used to extract the time column.\n\n    magcols : 'all' or list of str\n        If this is 'all', all of the columns in the lcdict that are indicated to\n        be magnitude measurement columns are normalized. If this is a list of\n        str, must contain the keys of the lcdict specifying which magnitude\n        columns will be normalized.\n\n    mingap : float\n        This defines how much the difference between consecutive measurements is\n        allowed to be to consider them as parts of different timegroups. By\n        default it is set to 4.0 days.\n\n    normto : {'globalmedian', 'zero', 'jmag', 'hmag', 'kmag', 'bmag', 'vmag', 'sdssg', 'sdssr', 'sdssi'}\n        This indicates which column will be the normalization target. If this is\n        'globalmedian', the normalization will be to the global median of each\n        LC column. If this is 'zero', will normalize to 0.0 for each LC\n        column. Otherwise, will normalize to the value of one of the other keys\n        in the lcdict['objectinfo'][magkey], meaning the normalization will be\n        to some form of catalog magnitude.\n\n    debugmode : bool\n        If True, will indicate progress as time-groups are found and processed.\n\n    quiet : bool\n        If True, will not emit any messages when processing.\n\n    Returns\n    -------\n\n    dict\n        Returns the lcdict with the magnitude measurements normalized as\n        specified. The normalization happens IN PLACE.", "id": "f14689:m15"}
{"signature": "def read_csv_lightcurve(lcfile):", "body": "<EOL>if '<STR_LIT>' in os.path.basename(lcfile):<EOL><INDENT>LOGINFO('<STR_LIT>' % lcfile)<EOL>infd = gzip.open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>LOGINFO('<STR_LIT>' % lcfile)<EOL>infd = open(lcfile,'<STR_LIT:rb>')<EOL><DEDENT>lctext = infd.read().decode()<EOL>infd.close()<EOL>lcstart = lctext.index('<STR_LIT>')<EOL>lcheader = lctext[:lcstart+<NUM_LIT:12>]<EOL>lccolumns = lctext[lcstart+<NUM_LIT>:].split('<STR_LIT:\\n>')<EOL>lccolumns = [x.split('<STR_LIT:U+002C>') for x in lccolumns if len(x) > <NUM_LIT:0>]<EOL>lcdict = _parse_csv_header(lcheader)<EOL>lccolumns = list(zip(*lccolumns))<EOL>for colind, col in enumerate(lcdict['<STR_LIT>']):<EOL><INDENT>lcdict[col.lower()] = np.array([COLUMNDEFS[col][<NUM_LIT:2>](x)<EOL>for x in lccolumns[colind]])<EOL><DEDENT>lcdict['<STR_LIT>'] = [x.lower() for x in lcdict['<STR_LIT>']]<EOL>return lcdict<EOL>", "docstring": "This reads in a K2 lightcurve in CSV format. Transparently reads gzipped\nfiles.\n\nParameters\n----------\n\nlcfile : str\n    The light curve file to read.\n\nReturns\n-------\n\ndict\n    Returns an lcdict.", "id": "f14690:m1"}
{"signature": "def add_variability_to_fakelc_collection(simbasedir,<EOL>override_paramdists=None,<EOL>overwrite_existingvar=False):", "body": "<EOL>infof = os.path.join(simbasedir,'<STR_LIT>')<EOL>with open(infof, '<STR_LIT:rb>') as infd:<EOL><INDENT>lcinfo = pickle.load(infd)<EOL><DEDENT>lclist = lcinfo['<STR_LIT>']<EOL>varflag = lcinfo['<STR_LIT>']<EOL>vartypes = lcinfo['<STR_LIT>']<EOL>vartind = <NUM_LIT:0><EOL>varinfo = {}<EOL>for lc, varf, _lcind in zip(lclist, varflag, range(len(lclist))):<EOL><INDENT>if varf:<EOL><INDENT>thisvartype = vartypes[vartind]<EOL>if (override_paramdists and<EOL>isinstance(override_paramdists, dict) and<EOL>thisvartype in override_paramdists and<EOL>isinstance(override_paramdists[thisvartype], dict)):<EOL><INDENT>thisoverride_paramdists = override_paramdists[thisvartype]<EOL><DEDENT>else:<EOL><INDENT>thisoverride_paramdists = None<EOL><DEDENT>varlc = add_fakelc_variability(<EOL>lc, thisvartype,<EOL>override_paramdists=thisoverride_paramdists,<EOL>overwrite=overwrite_existingvar<EOL>)<EOL>varinfo[varlc['<STR_LIT>']] = {'<STR_LIT>': varlc['<STR_LIT>'],<EOL>'<STR_LIT>': varlc['<STR_LIT>']}<EOL>vartind = vartind + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>varlc = add_fakelc_variability(<EOL>lc, None,<EOL>overwrite=overwrite_existingvar<EOL>)<EOL>varinfo[varlc['<STR_LIT>']] = {'<STR_LIT>': varlc['<STR_LIT>'],<EOL>'<STR_LIT>': varlc['<STR_LIT>']}<EOL><DEDENT><DEDENT>lcinfo['<STR_LIT>'] = varinfo<EOL>tempoutf = '<STR_LIT>' % (infof, md5(npr.bytes(<NUM_LIT:4>)).hexdigest()[-<NUM_LIT:8>:])<EOL>with open(tempoutf, '<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(lcinfo, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>if os.path.exists(tempoutf):<EOL><INDENT>shutil.copy(tempoutf, infof)<EOL>os.remove(tempoutf)<EOL><DEDENT>else:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' %<EOL>os.path.dirname(tempoutf))<EOL>raise<EOL><DEDENT>return lcinfo<EOL>", "docstring": "This adds variability and noise to all fake LCs in `simbasedir`.\n\n    If an object is marked as variable in the `fakelcs-info`.pkl file in\n    `simbasedir`, a variable signal will be added to its light curve based on\n    its selected type, default period and amplitude distribution, the\n    appropriate params, etc. the epochs for each variable object will be chosen\n    uniformly from its time-range (and may not necessarily fall on a actual\n    observed time). Nonvariable objects will only have noise added as determined\n    by their params, but no variable signal will be added.\n\n    Parameters\n    ----------\n\n    simbasedir : str\n        The directory containing the fake LCs to process.\n\n    override_paramdists : dict\n        This can be used to override the stored variable parameters in each fake\n        LC. It should be a dict of the following form::\n\n            {'<vartype1>': {'<param1>: a scipy.stats distribution function or\n                                       the np.random.randint function,\n                            .\n                            .\n                            .\n                            '<paramN>: a scipy.stats distribution function\n                                       or the np.random.randint function}\n\n        for any vartype in VARTYPE_LCGEN_MAP. These are used to override the\n        default parameter distributions for each variable type.\n\n    overwrite_existingvar : bool\n        If this is True, then will overwrite any existing variability in the\n        input fake LCs in `simbasedir`.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict containing the fake LC filenames as keys and\n        variability info for each as values.", "id": "f14691:m15"}
{"signature": "def generate_transit_lightcurve(<EOL>times,<EOL>mags=None,<EOL>errs=None,<EOL>paramdists={'<STR_LIT>':sps.uniform(loc=<NUM_LIT:0.1>,scale=<NUM_LIT>),<EOL>'<STR_LIT>':sps.uniform(loc=<NUM_LIT>,scale=<NUM_LIT>),<EOL>'<STR_LIT>':sps.uniform(loc=<NUM_LIT>,scale=<NUM_LIT>)},<EOL>magsarefluxes=False,<EOL>):", "body": "if mags is None:<EOL><INDENT>mags = np.full_like(times, <NUM_LIT:0.0>)<EOL><DEDENT>if errs is None:<EOL><INDENT>errs = np.full_like(times, <NUM_LIT:0.0>)<EOL><DEDENT>epoch = npr.random()*(times.max() - times.min()) + times.min()<EOL>period = paramdists['<STR_LIT>'].rvs(size=<NUM_LIT:1>)<EOL>depth = paramdists['<STR_LIT>'].rvs(size=<NUM_LIT:1>)<EOL>duration = paramdists['<STR_LIT>'].rvs(size=<NUM_LIT:1>)<EOL>ingduration = npr.random()*(<NUM_LIT:0.5>*duration - <NUM_LIT>*duration) + <NUM_LIT>*duration<EOL>if magsarefluxes and depth < <NUM_LIT:0.0>:<EOL><INDENT>depth = -depth<EOL><DEDENT>elif not magsarefluxes and depth > <NUM_LIT:0.0>:<EOL><INDENT>depth = -depth<EOL><DEDENT>modelmags, phase, ptimes, pmags, perrs = (<EOL>transits.trapezoid_transit_func([period, epoch, depth,<EOL>duration, ingduration],<EOL>times,<EOL>mags,<EOL>errs)<EOL>)<EOL>timeind = np.argsort(ptimes)<EOL>mtimes = ptimes[timeind]<EOL>mmags = modelmags[timeind]<EOL>merrs = perrs[timeind]<EOL>modeldict = {<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{x:np.asscalar(y) for x,y in zip(['<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'],<EOL>[period,<EOL>epoch,<EOL>depth,<EOL>duration,<EOL>ingduration])},<EOL>'<STR_LIT>':mtimes,<EOL>'<STR_LIT>':mmags,<EOL>'<STR_LIT>':merrs,<EOL>'<STR_LIT>':period,<EOL>'<STR_LIT>':depth<EOL>}<EOL>return modeldict<EOL>", "docstring": "This generates fake planet transit light curves.\n\n    Parameters\n    ----------\n\n    times : np.array\n        This is an array of time values that will be used as the time base.\n\n    mags,errs : np.array\n        These arrays will have the model added to them. If either is\n        None, `np.full_like(times, 0.0)` will used as a substitute and the model\n        light curve will be centered around 0.0.\n\n    paramdists : dict\n        This is a dict containing parameter distributions to use for the\n        model params, containing the following keys ::\n\n            {'transitperiod', 'transitdepth', 'transitduration'}\n\n        The values of these keys should all be 'frozen' scipy.stats distribution\n        objects, e.g.:\n\n        https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n        The variability epoch will be automatically chosen from a uniform\n        distribution between `times.min()` and `times.max()`.\n\n        The ingress duration will be automatically chosen from a uniform\n        distribution ranging from 0.05 to 0.5 of the transitduration.\n\n        The transitdepth will be flipped automatically as appropriate if\n        `magsarefluxes=True`.\n\n    magsarefluxes : bool\n        If the generated time series is meant to be a flux time-series, set this\n        to True to get the correct sign of variability amplitude.\n\n    Returns\n    -------\n\n    dict\n        A dict of the form below is returned::\n\n            {'vartype': 'planet',\n             'params': {'transitperiod': generated value of period,\n                        'transitepoch': generated value of epoch,\n                        'transitdepth': generated value of transit depth,\n                        'transitduration': generated value of transit duration,\n                        'ingressduration': generated value of transit ingress\n                                           duration},\n             'times': the model times,\n             'mags': the model mags,\n             'errs': the model errs,\n             'varperiod': the generated period of variability == 'transitperiod'\n             'varamplitude': the generated amplitude of\n                             variability == 'transitdepth'}", "id": "f14691:m1"}
{"signature": "def collection_worker(task):", "body": "lcfile, outdir, kwargs = task<EOL>try:<EOL><INDENT>fakelcresults = make_fakelc(<EOL>lcfile,<EOL>outdir,<EOL>**kwargs<EOL>)<EOL>return fakelcresults<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' % lcfile)<EOL>return None<EOL><DEDENT>", "docstring": "This wraps `process_fakelc` for `make_fakelc_collection` below.\n\nParameters\n----------\n\ntask : tuple\n    This is of the form::\n\n        task[0] = lcfile\n        task[1] = outdir\n        task[2] = magrms\n        task[3] = dict with keys: {'lcformat', 'timecols', 'magcols',\n                                   'errcols', 'randomizeinfo'}\n\nReturns\n-------\n\ntuple\n    This returns a tuple of the form::\n\n        (fakelc_fpath,\n         fakelc_lcdict['columns'],\n         fakelc_lcdict['objectinfo'],\n         fakelc_lcdict['moments'])", "id": "f14691:m12"}
{"signature": "def generate_flare_lightcurve(<EOL>times,<EOL>mags=None,<EOL>errs=None,<EOL>paramdists={<EOL>'<STR_LIT>':sps.uniform(loc=<NUM_LIT>,scale=<NUM_LIT>),<EOL>'<STR_LIT>':[<NUM_LIT:1>,<NUM_LIT:5>],<EOL>'<STR_LIT>':sps.uniform(loc=<NUM_LIT>, scale=<NUM_LIT>),<EOL>'<STR_LIT>':sps.uniform(loc=<NUM_LIT>, scale=<NUM_LIT>)<EOL>},<EOL>magsarefluxes=False,<EOL>):", "body": "if mags is None:<EOL><INDENT>mags = np.full_like(times, <NUM_LIT:0.0>)<EOL><DEDENT>if errs is None:<EOL><INDENT>errs = np.full_like(times, <NUM_LIT:0.0>)<EOL><DEDENT>nflares = npr.randint(paramdists['<STR_LIT>'][<NUM_LIT:0>],<EOL>high=paramdists['<STR_LIT>'][<NUM_LIT:1>])<EOL>flarepeaktimes = (<EOL>npr.random(<EOL>size=nflares<EOL>)*(times.max() - times.min()) + times.min()<EOL>)<EOL>params = {'<STR_LIT>':nflares}<EOL>for flareind, peaktime in zip(range(nflares), flarepeaktimes):<EOL><INDENT>amp = paramdists['<STR_LIT>'].rvs(size=<NUM_LIT:1>)<EOL>risestdev = paramdists['<STR_LIT>'].rvs(size=<NUM_LIT:1>)<EOL>decayconst = paramdists['<STR_LIT>'].rvs(size=<NUM_LIT:1>)<EOL>if magsarefluxes and amp < <NUM_LIT:0.0>:<EOL><INDENT>amp = -amp<EOL><DEDENT>elif not magsarefluxes and amp > <NUM_LIT:0.0>:<EOL><INDENT>amp = -amp<EOL><DEDENT>modelmags, ptimes, pmags, perrs = (<EOL>flares.flare_model(<EOL>[amp, peaktime, risestdev, decayconst],<EOL>times,<EOL>mags,<EOL>errs<EOL>)<EOL>)<EOL>mags = modelmags<EOL>params[flareind] = {'<STR_LIT>':peaktime,<EOL>'<STR_LIT>':amp,<EOL>'<STR_LIT>':risestdev,<EOL>'<STR_LIT>':decayconst}<EOL><DEDENT>modeldict = {<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':params,<EOL>'<STR_LIT>':times,<EOL>'<STR_LIT>':mags,<EOL>'<STR_LIT>':errs,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':[params[x]['<STR_LIT>']<EOL>for x in range(params['<STR_LIT>'])],<EOL>}<EOL>return modeldict<EOL>", "docstring": "This generates fake flare light curves.\n\n    Parameters\n    ----------\n\n    times : np.array\n        This is an array of time values that will be used as the time base.\n\n    mags,errs : np.array\n        These arrays will have the model added to them. If either is\n        None, `np.full_like(times, 0.0)` will used as a substitute and the model\n        light curve will be centered around 0.0.\n\n    paramdists : dict\n        This is a dict containing parameter distributions to use for the\n        model params, containing the following keys ::\n\n            {'amplitude', 'nflares', 'risestdev', 'decayconst'}\n\n        The values of these keys should all be 'frozen' scipy.stats distribution\n        objects, e.g.:\n\n        https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions\n        The `flare_peak_time` for each flare will be generated automatically\n        between `times.min()` and `times.max()` using a uniform distribution.\n\n        The `amplitude` will be flipped automatically as appropriate if\n        `magsarefluxes=True`.\n\n    magsarefluxes : bool\n        If the generated time series is meant to be a flux time-series, set this\n        to True to get the correct sign of variability amplitude.\n\n    Returns\n    -------\n\n    dict\n        A dict of the form below is returned::\n\n            {'vartype': 'flare',\n             'params': {'amplitude': generated value of flare amplitudes,\n                        'nflares': generated value of number of flares,\n                        'risestdev': generated value of stdev of rise time,\n                        'decayconst': generated value of decay constant,\n                        'peaktime': generated value of flare peak time},\n             'times': the model times,\n             'mags': the model mags,\n             'errs': the model errs,\n             'varamplitude': the generated amplitude of\n                             variability == 'amplitude'}", "id": "f14691:m3"}
{"signature": "def periodicvar_recovery(fakepfpkl,<EOL>simbasedir,<EOL>period_tolerance=<NUM_LIT>):", "body": "if fakepfpkl.endswith('<STR_LIT>'):<EOL><INDENT>infd = gzip.open(fakepfpkl,'<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>infd = open(fakepfpkl,'<STR_LIT:rb>')<EOL><DEDENT>fakepf = pickle.load(infd)<EOL>infd.close()<EOL>objectid, lcfbasename = fakepf['<STR_LIT>'], fakepf['<STR_LIT>']<EOL>lcfpath = os.path.join(simbasedir,'<STR_LIT>',lcfbasename)<EOL>if not os.path.exists(lcfpath):<EOL><INDENT>LOGERROR('<STR_LIT>' % (objectid,<EOL>lcfpath))<EOL>return None<EOL><DEDENT>fakelc = lcproc._read_pklc(lcfpath)<EOL>actual_varparams, actual_varperiod, actual_varamplitude, actual_vartype = (<EOL>fakelc['<STR_LIT>'],<EOL>fakelc['<STR_LIT>'],<EOL>fakelc['<STR_LIT>'],<EOL>fakelc['<STR_LIT>']<EOL>)<EOL>actual_moments = fakelc['<STR_LIT>']<EOL>magcols = fakelc['<STR_LIT>']<EOL>pfres = {<EOL>'<STR_LIT>':objectid,<EOL>'<STR_LIT>':simbasedir,<EOL>'<STR_LIT>':magcols,<EOL>'<STR_LIT>':os.path.abspath(lcfpath),<EOL>'<STR_LIT>':os.path.abspath(fakepfpkl),<EOL>'<STR_LIT>':actual_vartype,<EOL>'<STR_LIT>':actual_varperiod,<EOL>'<STR_LIT>':actual_varamplitude,<EOL>'<STR_LIT>':actual_varparams,<EOL>'<STR_LIT>':actual_moments,<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>}<EOL>for magcol in magcols:<EOL><INDENT>for pfm in lcproc.PFMETHODS:<EOL><INDENT>if pfm in fakepf[magcol]:<EOL><INDENT>for rpi, rp in enumerate(<EOL>fakepf[magcol][pfm]['<STR_LIT>']<EOL>):<EOL><INDENT>if ((not np.any(np.isclose(<EOL>rp,<EOL>np.array(pfres['<STR_LIT>']),<EOL>rtol=period_tolerance<EOL>))) and np.isfinite(rp)):<EOL><INDENT>pfres['<STR_LIT>'].append(rp)<EOL>pfres['<STR_LIT>'].append(pfm)<EOL>pfres['<STR_LIT>'].append(magcol)<EOL>if pfm == '<STR_LIT>':<EOL><INDENT>this_lspval = (<EOL>np.max(fakepf[magcol][pfm]['<STR_LIT>']) -<EOL>fakepf[magcol][pfm]['<STR_LIT>'][rpi]<EOL>)<EOL><DEDENT>else:<EOL><INDENT>this_lspval = (<EOL>fakepf[magcol][pfm]['<STR_LIT>'][rpi] /<EOL>np.max(fakepf[magcol][pfm]['<STR_LIT>'])<EOL>)<EOL><DEDENT>pfres['<STR_LIT>'].append(this_lspval)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>pfres['<STR_LIT>'] = np.array(pfres['<STR_LIT>'])<EOL>pfres['<STR_LIT>'] = np.array(pfres['<STR_LIT>'])<EOL>pfres['<STR_LIT>'] = np.array(pfres['<STR_LIT>'])<EOL>pfres['<STR_LIT>'] = np.array(pfres['<STR_LIT>'])<EOL>if (actual_vartype and<EOL>actual_vartype in PERIODIC_VARTYPES and<EOL>np.isfinite(actual_varperiod)):<EOL><INDENT>if pfres['<STR_LIT>'].size > <NUM_LIT:0>:<EOL><INDENT>for ri in range(pfres['<STR_LIT>'].size):<EOL><INDENT>pfres['<STR_LIT>'].append(pfres['<STR_LIT>'][ri] -<EOL>np.asscalar(actual_varperiod))<EOL>pfres['<STR_LIT>'].append(<EOL>check_periodrec_alias(actual_varperiod,<EOL>pfres['<STR_LIT>'][ri],<EOL>tolerance=period_tolerance)<EOL>)<EOL><DEDENT>pfres['<STR_LIT>'] = np.array(pfres['<STR_LIT>'])<EOL>pfres['<STR_LIT>'] = np.array(pfres['<STR_LIT>'])<EOL>rec_absdiff = np.abs(pfres['<STR_LIT>'])<EOL>best_recp_ind = rec_absdiff == rec_absdiff.min()<EOL>pfres['<STR_LIT>'] = (<EOL>pfres['<STR_LIT>'][best_recp_ind]<EOL>)<EOL>pfres['<STR_LIT>'] = (<EOL>pfres['<STR_LIT>'][best_recp_ind]<EOL>)<EOL>pfres['<STR_LIT>'] = (<EOL>pfres['<STR_LIT>'][best_recp_ind]<EOL>)<EOL>pfres['<STR_LIT>'] = (<EOL>pfres['<STR_LIT>'][best_recp_ind]<EOL>)<EOL>pfres['<STR_LIT>'] = (<EOL>pfres['<STR_LIT>'][best_recp_ind]<EOL>)<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING(<EOL>'<STR_LIT>' %<EOL>fakepfpkl<EOL>)<EOL>pfres['<STR_LIT>'] = np.array(['<STR_LIT>'])<EOL>pfres['<STR_LIT>'] = np.array([np.nan])<EOL>pfres['<STR_LIT>'] = np.array([np.nan])<EOL>pfres['<STR_LIT>'] = np.array([],dtype=np.unicode_)<EOL>pfres['<STR_LIT>'] = np.array([],dtype=np.unicode_)<EOL>pfres['<STR_LIT>'] = np.array([],dtype=np.unicode_)<EOL>pfres['<STR_LIT>'] = np.array([np.nan])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pfres['<STR_LIT>'] = np.array(<EOL>['<STR_LIT>']*pfres['<STR_LIT>'].size<EOL>)<EOL>pfres['<STR_LIT>'] = np.zeros(pfres['<STR_LIT>'].size)<EOL>pfres['<STR_LIT>'] = np.array([np.nan])<EOL>pfres['<STR_LIT>'] = np.array([],dtype=np.unicode_)<EOL>pfres['<STR_LIT>'] = np.array([],dtype=np.unicode_)<EOL>pfres['<STR_LIT>'] = np.array(['<STR_LIT>'])<EOL>pfres['<STR_LIT>'] = np.array([np.nan])<EOL><DEDENT>return pfres<EOL>", "docstring": "Recovers the periodic variable status/info for the simulated PF result.\n\n    - Uses simbasedir and the lcfbasename stored in fakepfpkl to figure out\n      where the LC for this object is.\n    - Gets the actual_varparams, actual_varperiod, actual_vartype,\n      actual_varamplitude elements from the LC.\n    - Figures out if the current objectid is a periodic variable (using\n      actual_vartype).\n    - If it is a periodic variable, gets the canonical period assigned to it.\n    - Checks if the period was recovered in any of the five best periods\n      reported by any of the period-finders, checks if the period recovered was\n      a harmonic of the period.\n    - Returns the objectid, actual period and vartype, recovered period, and\n      recovery status.\n\n\n    Parameters\n    ----------\n\n    fakepfpkl : str\n        This is a periodfinding-<objectid>.pkl[.gz] file produced in the\n        `simbasedir/periodfinding` subdirectory after `run_periodfinding` above\n        is done.\n\n    simbasedir : str\n        The base directory where all of the fake LCs and period-finding results\n        are.\n\n    period_tolerance : float\n        The maximum difference that this function will consider between an\n        actual period (or its aliases) and a recovered period to consider it as\n        as a 'recovered' period.\n\n    Returns\n    -------\n\n    dict\n        Returns a dict of period-recovery results.", "id": "f14692:m12"}
{"signature": "def get_recovered_variables_for_magbin(simbasedir,<EOL>magbinmedian,<EOL>stetson_stdev_min=<NUM_LIT>,<EOL>inveta_stdev_min=<NUM_LIT>,<EOL>iqr_stdev_min=<NUM_LIT>,<EOL>statsonly=True):", "body": "<EOL>with open(os.path.join(simbasedir, '<STR_LIT>'),'<STR_LIT:rb>') as infd:<EOL><INDENT>siminfo = pickle.load(infd)<EOL><DEDENT>objectids = siminfo['<STR_LIT>']<EOL>varflags = siminfo['<STR_LIT>']<EOL>sdssr = siminfo['<STR_LIT>']<EOL>timecols = siminfo['<STR_LIT>']<EOL>magcols = siminfo['<STR_LIT>']<EOL>errcols = siminfo['<STR_LIT>']<EOL>fakelc_formatkey = '<STR_LIT>' % siminfo['<STR_LIT>']<EOL>lcproc.register_lcformat(<EOL>fakelc_formatkey,<EOL>'<STR_LIT>',<EOL>timecols,<EOL>magcols,<EOL>errcols,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>magsarefluxes=siminfo['<STR_LIT>']<EOL>)<EOL>outdir = os.path.join(simbasedir, '<STR_LIT>')<EOL>if not os.path.exists(outdir):<EOL><INDENT>os.mkdir(outdir)<EOL><DEDENT>varfeaturedir = os.path.join(simbasedir, '<STR_LIT>')<EOL>varthreshinfof = os.path.join(<EOL>outdir,<EOL>'<STR_LIT>' % (magbinmedian,<EOL>stetson_stdev_min,<EOL>inveta_stdev_min)<EOL>)<EOL>varthresh = varthreshold.variability_threshold(<EOL>varfeaturedir,<EOL>varthreshinfof,<EOL>lcformat=fakelc_formatkey,<EOL>min_stetj_stdev=stetson_stdev_min,<EOL>min_inveta_stdev=inveta_stdev_min,<EOL>min_iqr_stdev=iqr_stdev_min,<EOL>verbose=False<EOL>)<EOL>magbins = varthresh['<STR_LIT>']<EOL>magbininds = np.digitize(sdssr, magbins)<EOL>binned_objectids = []<EOL>binned_actualvars = []<EOL>binned_actualnotvars = []<EOL>for mbinind, _magi in zip(np.unique(magbininds),<EOL>range(len(magbins)-<NUM_LIT:1>)):<EOL><INDENT>thisbinind = np.where(magbininds == mbinind)<EOL>thisbin_objectids = objectids[thisbinind]<EOL>thisbin_varflags = varflags[thisbinind]<EOL>thisbin_actualvars = thisbin_objectids[thisbin_varflags]<EOL>thisbin_actualnotvars = thisbin_objectids[~thisbin_varflags]<EOL>binned_objectids.append(thisbin_objectids)<EOL>binned_actualvars.append(thisbin_actualvars)<EOL>binned_actualnotvars.append(thisbin_actualnotvars)<EOL><DEDENT>recdict = {<EOL>'<STR_LIT>':simbasedir,<EOL>'<STR_LIT>':timecols,<EOL>'<STR_LIT>':magcols,<EOL>'<STR_LIT>':errcols,<EOL>'<STR_LIT>':siminfo['<STR_LIT>'],<EOL>'<STR_LIT>':stetson_stdev_min,<EOL>'<STR_LIT>':inveta_stdev_min,<EOL>'<STR_LIT>':iqr_stdev_min,<EOL>'<STR_LIT>':magbinmedian,<EOL>}<EOL>for magcol in magcols:<EOL><INDENT>magbinind = np.where(<EOL>np.array(varthresh[magcol]['<STR_LIT>']) == magbinmedian<EOL>)<EOL>magbinind = np.asscalar(magbinind[<NUM_LIT:0>])<EOL>thisbin_objectids = binned_objectids[magbinind]<EOL>thisbin_actualvars = binned_actualvars[magbinind]<EOL>thisbin_actualnotvars = binned_actualnotvars[magbinind]<EOL>stet_recoveredvars = varthresh[magcol][<EOL>'<STR_LIT>'<EOL>][magbinind]<EOL>stet_recoverednotvars = np.setdiff1d(thisbin_objectids,<EOL>stet_recoveredvars)<EOL>stet_truepositives = np.intersect1d(stet_recoveredvars,<EOL>thisbin_actualvars)<EOL>stet_falsepositives = np.intersect1d(stet_recoveredvars,<EOL>thisbin_actualnotvars)<EOL>stet_truenegatives = np.intersect1d(stet_recoverednotvars,<EOL>thisbin_actualnotvars)<EOL>stet_falsenegatives = np.intersect1d(stet_recoverednotvars,<EOL>thisbin_actualvars)<EOL>stet_recall = recall(stet_truepositives.size,<EOL>stet_falsenegatives.size)<EOL>stet_precision = precision(stet_truepositives.size,<EOL>stet_falsepositives.size)<EOL>stet_mcc = matthews_correl_coeff(stet_truepositives.size,<EOL>stet_truenegatives.size,<EOL>stet_falsepositives.size,<EOL>stet_falsenegatives.size)<EOL>inveta_recoveredvars = varthresh[magcol][<EOL>'<STR_LIT>'<EOL>][magbinind]<EOL>inveta_recoverednotvars = np.setdiff1d(thisbin_objectids,<EOL>inveta_recoveredvars)<EOL>inveta_truepositives = np.intersect1d(inveta_recoveredvars,<EOL>thisbin_actualvars)<EOL>inveta_falsepositives = np.intersect1d(inveta_recoveredvars,<EOL>thisbin_actualnotvars)<EOL>inveta_truenegatives = np.intersect1d(inveta_recoverednotvars,<EOL>thisbin_actualnotvars)<EOL>inveta_falsenegatives = np.intersect1d(inveta_recoverednotvars,<EOL>thisbin_actualvars)<EOL>inveta_recall = recall(inveta_truepositives.size,<EOL>inveta_falsenegatives.size)<EOL>inveta_precision = precision(inveta_truepositives.size,<EOL>inveta_falsepositives.size)<EOL>inveta_mcc = matthews_correl_coeff(inveta_truepositives.size,<EOL>inveta_truenegatives.size,<EOL>inveta_falsepositives.size,<EOL>inveta_falsenegatives.size)<EOL>iqr_recoveredvars = varthresh[magcol][<EOL>'<STR_LIT>'<EOL>][magbinind]<EOL>iqr_recoverednotvars = np.setdiff1d(thisbin_objectids,<EOL>iqr_recoveredvars)<EOL>iqr_truepositives = np.intersect1d(iqr_recoveredvars,<EOL>thisbin_actualvars)<EOL>iqr_falsepositives = np.intersect1d(iqr_recoveredvars,<EOL>thisbin_actualnotvars)<EOL>iqr_truenegatives = np.intersect1d(iqr_recoverednotvars,<EOL>thisbin_actualnotvars)<EOL>iqr_falsenegatives = np.intersect1d(iqr_recoverednotvars,<EOL>thisbin_actualvars)<EOL>iqr_recall = recall(iqr_truepositives.size,<EOL>iqr_falsenegatives.size)<EOL>iqr_precision = precision(iqr_truepositives.size,<EOL>iqr_falsepositives.size)<EOL>iqr_mcc = matthews_correl_coeff(iqr_truepositives.size,<EOL>iqr_truenegatives.size,<EOL>iqr_falsepositives.size,<EOL>iqr_falsenegatives.size)<EOL>stet_missed_inveta_found = np.setdiff1d(inveta_truepositives,<EOL>stet_truepositives)<EOL>stet_missed_iqr_found = np.setdiff1d(iqr_truepositives,<EOL>stet_truepositives)<EOL>inveta_missed_stet_found = np.setdiff1d(stet_truepositives,<EOL>inveta_truepositives)<EOL>inveta_missed_iqr_found = np.setdiff1d(iqr_truepositives,<EOL>inveta_truepositives)<EOL>iqr_missed_stet_found = np.setdiff1d(stet_truepositives,<EOL>iqr_truepositives)<EOL>iqr_missed_inveta_found = np.setdiff1d(inveta_truepositives,<EOL>iqr_truepositives)<EOL>if not statsonly:<EOL><INDENT>recdict[magcol] = {<EOL>'<STR_LIT>':stet_recoveredvars,<EOL>'<STR_LIT>':stet_truepositives,<EOL>'<STR_LIT>':stet_falsepositives,<EOL>'<STR_LIT>':stet_truenegatives,<EOL>'<STR_LIT>':stet_falsenegatives,<EOL>'<STR_LIT>':stet_precision,<EOL>'<STR_LIT>':stet_recall,<EOL>'<STR_LIT>':stet_mcc,<EOL>'<STR_LIT>':inveta_recoveredvars,<EOL>'<STR_LIT>':inveta_truepositives,<EOL>'<STR_LIT>':inveta_falsepositives,<EOL>'<STR_LIT>':inveta_truenegatives,<EOL>'<STR_LIT>':inveta_falsenegatives,<EOL>'<STR_LIT>':inveta_precision,<EOL>'<STR_LIT>':inveta_recall,<EOL>'<STR_LIT>':inveta_mcc,<EOL>'<STR_LIT>':iqr_recoveredvars,<EOL>'<STR_LIT>':iqr_truepositives,<EOL>'<STR_LIT>':iqr_falsepositives,<EOL>'<STR_LIT>':iqr_truenegatives,<EOL>'<STR_LIT>':iqr_falsenegatives,<EOL>'<STR_LIT>':iqr_precision,<EOL>'<STR_LIT>':iqr_recall,<EOL>'<STR_LIT>':iqr_mcc,<EOL>'<STR_LIT>':stet_missed_inveta_found,<EOL>'<STR_LIT>':stet_missed_iqr_found,<EOL>'<STR_LIT>':inveta_missed_stet_found,<EOL>'<STR_LIT>':inveta_missed_iqr_found,<EOL>'<STR_LIT>':iqr_missed_stet_found,<EOL>'<STR_LIT>':iqr_missed_inveta_found,<EOL>'<STR_LIT>':thisbin_actualvars,<EOL>'<STR_LIT>':thisbin_actualnotvars,<EOL>'<STR_LIT>':thisbin_objectids,<EOL>'<STR_LIT>':magbinind,<EOL>}<EOL><DEDENT>else:<EOL><INDENT>recdict[magcol] = {<EOL>'<STR_LIT>':stet_recoveredvars.size,<EOL>'<STR_LIT>':stet_truepositives.size,<EOL>'<STR_LIT>':stet_falsepositives.size,<EOL>'<STR_LIT>':stet_truenegatives.size,<EOL>'<STR_LIT>':stet_falsenegatives.size,<EOL>'<STR_LIT>':stet_precision,<EOL>'<STR_LIT>':stet_recall,<EOL>'<STR_LIT>':stet_mcc,<EOL>'<STR_LIT>':inveta_recoveredvars.size,<EOL>'<STR_LIT>':inveta_truepositives.size,<EOL>'<STR_LIT>':inveta_falsepositives.size,<EOL>'<STR_LIT>':inveta_truenegatives.size,<EOL>'<STR_LIT>':inveta_falsenegatives.size,<EOL>'<STR_LIT>':inveta_precision,<EOL>'<STR_LIT>':inveta_recall,<EOL>'<STR_LIT>':inveta_mcc,<EOL>'<STR_LIT>':iqr_recoveredvars.size,<EOL>'<STR_LIT>':iqr_truepositives.size,<EOL>'<STR_LIT>':iqr_falsepositives.size,<EOL>'<STR_LIT>':iqr_truenegatives.size,<EOL>'<STR_LIT>':iqr_falsenegatives.size,<EOL>'<STR_LIT>':iqr_precision,<EOL>'<STR_LIT>':iqr_recall,<EOL>'<STR_LIT>':iqr_mcc,<EOL>'<STR_LIT>':stet_missed_inveta_found.size,<EOL>'<STR_LIT>':stet_missed_iqr_found.size,<EOL>'<STR_LIT>':inveta_missed_stet_found.size,<EOL>'<STR_LIT>':inveta_missed_iqr_found.size,<EOL>'<STR_LIT>':iqr_missed_stet_found.size,<EOL>'<STR_LIT>':iqr_missed_inveta_found.size,<EOL>'<STR_LIT>':thisbin_actualvars.size,<EOL>'<STR_LIT>':thisbin_actualnotvars.size,<EOL>'<STR_LIT>':thisbin_objectids.size,<EOL>'<STR_LIT>':magbinind,<EOL>}<EOL><DEDENT><DEDENT>return recdict<EOL>", "docstring": "This runs variability selection for the given magbinmedian.\n\n    To generate a full recovery matrix over all magnitude bins, run this\n    function for each magbin over the specified stetson_stdev_min and\n    inveta_stdev_min grid.\n\n    Parameters\n    ----------\n\n    simbasedir : str\n        The input directory of fake LCs.\n\n    magbinmedian : float\n        The magbin to run the variable recovery for. This is an item from the\n        dict from `simbasedir/fakelcs-info.pkl: `fakelcinfo['magrms'][magcol]`\n        list for each magcol and designates which magbin to get the recovery\n        stats for.\n\n    stetson_stdev_min : float\n        The minimum sigma above the trend in the Stetson J variability index\n        distribution for this magbin to use to consider objects as variable.\n\n    inveta_stdev_min : float\n        The minimum sigma above the trend in the 1/eta variability index\n        distribution for this magbin to use to consider objects as variable.\n\n    iqr_stdev_min : float\n        The minimum sigma above the trend in the IQR variability index\n        distribution for this magbin to use to consider objects as variable.\n\n    statsonly : bool\n        If this is True, only the final stats will be returned. If False, the\n        full arrays used to generate the stats will also be returned.\n\n    Returns\n    -------\n\n    dict\n        The returned dict contains statistics for this magbin and if requested,\n        the full arrays used to calculate the statistics.", "id": "f14692:m6"}
{"signature": "def read_fakelc(fakelcfile):", "body": "try:<EOL><INDENT>with open(fakelcfile,'<STR_LIT:rb>') as infd:<EOL><INDENT>lcdict = pickle.load(infd)<EOL><DEDENT><DEDENT>except UnicodeDecodeError:<EOL><INDENT>with open(fakelcfile,'<STR_LIT:rb>') as infd:<EOL><INDENT>lcdict = pickle.load(infd, encoding='<STR_LIT>')<EOL><DEDENT><DEDENT>return lcdict<EOL>", "docstring": "This just reads a pickled fake LC.\n\nParameters\n----------\n\nfakelcfile : str\n    The fake LC file to read.\n\nReturns\n-------\n\ndict\n    This returns an lcdict.", "id": "f14692:m1"}
{"signature": "def get_varfeatures(simbasedir,<EOL>mindet=<NUM_LIT:1000>,<EOL>nworkers=None):", "body": "<EOL>with open(os.path.join(simbasedir, '<STR_LIT>'),'<STR_LIT:rb>') as infd:<EOL><INDENT>siminfo = pickle.load(infd)<EOL><DEDENT>lcfpaths = siminfo['<STR_LIT>']<EOL>varfeaturedir = os.path.join(simbasedir,'<STR_LIT>')<EOL>timecols = siminfo['<STR_LIT>']<EOL>magcols = siminfo['<STR_LIT>']<EOL>errcols = siminfo['<STR_LIT>']<EOL>timecols = siminfo['<STR_LIT>']<EOL>magcols = siminfo['<STR_LIT>']<EOL>errcols = siminfo['<STR_LIT>']<EOL>fakelc_formatkey = '<STR_LIT>' % siminfo['<STR_LIT>']<EOL>lcproc.register_lcformat(<EOL>fakelc_formatkey,<EOL>'<STR_LIT>',<EOL>timecols,<EOL>magcols,<EOL>errcols,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>magsarefluxes=siminfo['<STR_LIT>']<EOL>)<EOL>varinfo = lcvfeatures.parallel_varfeatures(lcfpaths,<EOL>varfeaturedir,<EOL>lcformat=fakelc_formatkey,<EOL>mindet=mindet,<EOL>nworkers=nworkers)<EOL>with open(os.path.join(simbasedir,'<STR_LIT>'),'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(varinfo, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>return os.path.join(simbasedir,'<STR_LIT>')<EOL>", "docstring": "This runs `lcproc.lcvfeatures.parallel_varfeatures` on fake LCs in\n    `simbasedir`.\n\n    Parameters\n    ----------\n\n    simbasedir : str\n        The directory containing the fake LCs to process.\n\n    mindet : int\n        The minimum number of detections needed to accept an LC and process it.\n\n    nworkers : int or None\n        The number of parallel workers to use when extracting variability\n        features from the input light curves.\n\n    Returns\n    -------\n\n    str\n        The path to the `varfeatures` pickle created after running the\n        `lcproc.lcvfeatures.parallel_varfeatures` function.", "id": "f14692:m2"}
{"signature": "def recall(ntp, nfn):", "body": "if (ntp+nfn) > <NUM_LIT:0>:<EOL><INDENT>return ntp/(ntp+nfn)<EOL><DEDENT>else:<EOL><INDENT>return np.nan<EOL><DEDENT>", "docstring": "This calculates recall.\n\nhttps://en.wikipedia.org/wiki/Precision_and_recall\n\nParameters\n----------\n\nntp : int\n    The number of true positives.\n\nnfn : int\n    The number of false negatives.\n\nReturns\n-------\n\nfloat\n    The precision calculated using `ntp/(ntp + nfn)`.", "id": "f14692:m4"}
{"signature": "def plot_varind_gridsearch_magbin_results(gridsearch_results):", "body": "<EOL>if (isinstance(gridsearch_results, str) and<EOL>os.path.exists(gridsearch_results)):<EOL><INDENT>with open(gridsearch_results,'<STR_LIT:rb>') as infd:<EOL><INDENT>gridresults = pickle.load(infd)<EOL><DEDENT><DEDENT>elif isinstance(gridsearch_results, dict):<EOL><INDENT>gridresults = gridsearch_results<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return None<EOL><DEDENT>plotres = {'<STR_LIT>':gridresults['<STR_LIT>']}<EOL>recgrid = gridresults['<STR_LIT>']<EOL>simbasedir = gridresults['<STR_LIT>']<EOL>for magcol in gridresults['<STR_LIT>']:<EOL><INDENT>plotres[magcol] = {'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':gridresults['<STR_LIT>']}<EOL>for magbinind, magbinmedian in enumerate(gridresults['<STR_LIT>']):<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>(magcol, magbinmedian))<EOL>stet_mcc = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[::(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)]<EOL>stet_precision = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[::(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)]<EOL>stet_recall = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[::(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)]<EOL>stet_missed_inveta_found = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[::(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)]<EOL>stet_missed_iqr_found = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[::(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)]<EOL>inveta_mcc = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>::gridresults['<STR_LIT>'].size<EOL>]<EOL>inveta_precision = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>::gridresults['<STR_LIT>'].size<EOL>]<EOL>inveta_recall = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>::gridresults['<STR_LIT>'].size<EOL>]<EOL>inveta_missed_stet_found = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>::gridresults['<STR_LIT>'].size<EOL>]<EOL>inveta_missed_iqr_found = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>::gridresults['<STR_LIT>'].size<EOL>]<EOL>iqr_mcc = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>:gridresults['<STR_LIT>'].size<EOL>]<EOL>iqr_precision = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>:gridresults['<STR_LIT>'].size<EOL>]<EOL>iqr_recall = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>:gridresults['<STR_LIT>'].size<EOL>]<EOL>iqr_missed_stet_found = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>:gridresults['<STR_LIT>'].size<EOL>]<EOL>iqr_missed_inveta_found = np.array(<EOL>[x[magcol]['<STR_LIT>']<EOL>for x in recgrid[magbinind]]<EOL>)[:(gridresults['<STR_LIT>'].size *<EOL>gridresults['<STR_LIT>'].size)][<EOL>:gridresults['<STR_LIT>'].size<EOL>]<EOL>fig = plt.figure(figsize=(<NUM_LIT>*<NUM_LIT:5>, <NUM_LIT>*<NUM_LIT:3>))<EOL>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:1>)<EOL>if np.any(np.isfinite(stet_mcc)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>stet_mcc)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:2>)<EOL>if np.any(np.isfinite(stet_precision)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>stet_precision)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:3>)<EOL>if np.any(np.isfinite(stet_recall)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>stet_recall)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:4>)<EOL>if np.any(np.isfinite(stet_missed_inveta_found)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>stet_missed_inveta_found)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:5>)<EOL>if np.any(np.isfinite(stet_missed_iqr_found)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>stet_missed_iqr_found)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:6>)<EOL>if np.any(np.isfinite(inveta_mcc)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>inveta_mcc)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:7>)<EOL>if np.any(np.isfinite(inveta_precision)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>inveta_precision)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:8>)<EOL>if np.any(np.isfinite(inveta_recall)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>inveta_recall)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:9>)<EOL>if np.any(np.isfinite(inveta_missed_stet_found)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>inveta_missed_stet_found)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:10>)<EOL>if np.any(np.isfinite(inveta_missed_iqr_found)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>inveta_missed_iqr_found)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:11>)<EOL>if np.any(np.isfinite(iqr_mcc)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>iqr_mcc)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:12>)<EOL>if np.any(np.isfinite(iqr_precision)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>iqr_precision)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT>)<EOL>if np.any(np.isfinite(iqr_recall)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>iqr_recall)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT>)<EOL>if np.any(np.isfinite(iqr_missed_stet_found)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>iqr_missed_stet_found)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplot(<NUM_LIT:3>,<NUM_LIT:5>,<NUM_LIT:15>)<EOL>if np.any(np.isfinite(iqr_missed_inveta_found)):<EOL><INDENT>plt.plot(gridresults['<STR_LIT>'],<EOL>iqr_missed_inveta_found)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.text(<NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>transform=plt.gca().transAxes,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>plt.xticks([])<EOL>plt.yticks([])<EOL><DEDENT>plt.subplots_adjust(hspace=<NUM_LIT>,wspace=<NUM_LIT>)<EOL>plt.suptitle('<STR_LIT>' % (magcol, magbinmedian))<EOL>plotdir = os.path.join(gridresults['<STR_LIT>'],<EOL>'<STR_LIT>')<EOL>if not os.path.exists(plotdir):<EOL><INDENT>os.mkdir(plotdir)<EOL><DEDENT>gridplotf = os.path.join(<EOL>plotdir,<EOL>'<STR_LIT>' %<EOL>(magcol, magbinmedian)<EOL>)<EOL>plt.savefig(gridplotf,dpi=<NUM_LIT:100>,bbox_inches='<STR_LIT>')<EOL>plt.close('<STR_LIT:all>')<EOL>stet_mcc_maxind = np.where(stet_mcc == np.max(stet_mcc))<EOL>stet_precision_maxind = np.where(<EOL>stet_precision == np.max(stet_precision)<EOL>)<EOL>stet_recall_maxind = np.where(stet_recall == np.max(stet_recall))<EOL>best_stet_mcc = stet_mcc[stet_mcc_maxind]<EOL>best_stet_precision = stet_mcc[stet_precision_maxind]<EOL>best_stet_recall = stet_mcc[stet_recall_maxind]<EOL>stet_with_best_mcc = gridresults['<STR_LIT>'][stet_mcc_maxind]<EOL>stet_with_best_precision = gridresults['<STR_LIT>'][<EOL>stet_precision_maxind<EOL>]<EOL>stet_with_best_recall = (<EOL>gridresults['<STR_LIT>'][stet_recall_maxind]<EOL>)<EOL>inveta_mcc_maxind = np.where(inveta_mcc == np.max(inveta_mcc))<EOL>inveta_precision_maxind = np.where(<EOL>inveta_precision == np.max(inveta_precision)<EOL>)<EOL>inveta_recall_maxind = (<EOL>np.where(inveta_recall == np.max(inveta_recall))<EOL>)<EOL>best_inveta_mcc = inveta_mcc[inveta_mcc_maxind]<EOL>best_inveta_precision = inveta_mcc[inveta_precision_maxind]<EOL>best_inveta_recall = inveta_mcc[inveta_recall_maxind]<EOL>inveta_with_best_mcc = gridresults['<STR_LIT>'][inveta_mcc_maxind]<EOL>inveta_with_best_precision = gridresults['<STR_LIT>'][<EOL>inveta_precision_maxind<EOL>]<EOL>inveta_with_best_recall = gridresults['<STR_LIT>'][<EOL>inveta_recall_maxind<EOL>]<EOL>iqr_mcc_maxind = np.where(iqr_mcc == np.max(iqr_mcc))<EOL>iqr_precision_maxind = np.where(<EOL>iqr_precision == np.max(iqr_precision)<EOL>)<EOL>iqr_recall_maxind = (<EOL>np.where(iqr_recall == np.max(iqr_recall))<EOL>)<EOL>best_iqr_mcc = iqr_mcc[iqr_mcc_maxind]<EOL>best_iqr_precision = iqr_mcc[iqr_precision_maxind]<EOL>best_iqr_recall = iqr_mcc[iqr_recall_maxind]<EOL>iqr_with_best_mcc = gridresults['<STR_LIT>'][iqr_mcc_maxind]<EOL>iqr_with_best_precision = gridresults['<STR_LIT>'][<EOL>iqr_precision_maxind<EOL>]<EOL>iqr_with_best_recall = gridresults['<STR_LIT>'][<EOL>iqr_recall_maxind<EOL>]<EOL>plotres[magcol][magbinmedian] = {<EOL>'<STR_LIT>':gridresults['<STR_LIT>'],<EOL>'<STR_LIT>':stet_mcc,<EOL>'<STR_LIT>':stet_precision,<EOL>'<STR_LIT>':stet_recall,<EOL>'<STR_LIT>':stet_missed_inveta_found,<EOL>'<STR_LIT>':best_stet_mcc,<EOL>'<STR_LIT>':stet_with_best_mcc,<EOL>'<STR_LIT>':best_stet_precision,<EOL>'<STR_LIT>':stet_with_best_precision,<EOL>'<STR_LIT>':best_stet_recall,<EOL>'<STR_LIT>':stet_with_best_recall,<EOL>'<STR_LIT>':gridresults['<STR_LIT>'],<EOL>'<STR_LIT>':inveta_mcc,<EOL>'<STR_LIT>':inveta_precision,<EOL>'<STR_LIT>':inveta_recall,<EOL>'<STR_LIT>':inveta_missed_stet_found,<EOL>'<STR_LIT>':best_inveta_mcc,<EOL>'<STR_LIT>':inveta_with_best_mcc,<EOL>'<STR_LIT>':best_inveta_precision,<EOL>'<STR_LIT>':inveta_with_best_precision,<EOL>'<STR_LIT>':best_inveta_recall,<EOL>'<STR_LIT>':inveta_with_best_recall,<EOL>'<STR_LIT>':gridresults['<STR_LIT>'],<EOL>'<STR_LIT>':iqr_mcc,<EOL>'<STR_LIT>':iqr_precision,<EOL>'<STR_LIT>':iqr_recall,<EOL>'<STR_LIT>':iqr_missed_stet_found,<EOL>'<STR_LIT>':best_iqr_mcc,<EOL>'<STR_LIT>':iqr_with_best_mcc,<EOL>'<STR_LIT>':best_iqr_precision,<EOL>'<STR_LIT>':iqr_with_best_precision,<EOL>'<STR_LIT>':best_iqr_recall,<EOL>'<STR_LIT>':iqr_with_best_recall,<EOL>'<STR_LIT>':gridplotf<EOL>}<EOL>if stet_with_best_mcc.size > <NUM_LIT:1>:<EOL><INDENT>plotres[magcol]['<STR_LIT>'].append(stet_with_best_mcc[<NUM_LIT:0>])<EOL><DEDENT>elif stet_with_best_mcc.size > <NUM_LIT:0>:<EOL><INDENT>plotres[magcol]['<STR_LIT>'].append(stet_with_best_mcc[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>plotres[magcol]['<STR_LIT>'].append(np.nan)<EOL><DEDENT>if inveta_with_best_mcc.size > <NUM_LIT:1>:<EOL><INDENT>plotres[magcol]['<STR_LIT>'].append(inveta_with_best_mcc[<NUM_LIT:0>])<EOL><DEDENT>elif inveta_with_best_mcc.size > <NUM_LIT:0>:<EOL><INDENT>plotres[magcol]['<STR_LIT>'].append(inveta_with_best_mcc[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>plotres[magcol]['<STR_LIT>'].append(np.nan)<EOL><DEDENT>if iqr_with_best_mcc.size > <NUM_LIT:1>:<EOL><INDENT>plotres[magcol]['<STR_LIT>'].append(iqr_with_best_mcc[<NUM_LIT:0>])<EOL><DEDENT>elif iqr_with_best_mcc.size > <NUM_LIT:0>:<EOL><INDENT>plotres[magcol]['<STR_LIT>'].append(iqr_with_best_mcc[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>plotres[magcol]['<STR_LIT>'].append(np.nan)<EOL><DEDENT><DEDENT><DEDENT>plotrespicklef = os.path.join(simbasedir,<EOL>'<STR_LIT>')<EOL>with open(plotrespicklef, '<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(plotres, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>for magcol in gridresults['<STR_LIT>']:<EOL><INDENT>LOGINFO('<STR_LIT>' % magcol)<EOL>LOGINFO('<STR_LIT>')<EOL>for magbin, inveta, stet, iqr in zip(<EOL>plotres[magcol]['<STR_LIT>'],<EOL>plotres[magcol]['<STR_LIT>'],<EOL>plotres[magcol]['<STR_LIT>'],<EOL>plotres[magcol]['<STR_LIT>']):<EOL><INDENT>LOGINFO('<STR_LIT>' % (magbin,<EOL>inveta,<EOL>stet,<EOL>iqr))<EOL><DEDENT><DEDENT>return plotres<EOL>", "docstring": "This plots the gridsearch results from `variable_index_gridsearch_magbin`.\n\n    Parameters\n    ----------\n\n    gridsearch_results : dict\n        This is the dict produced by `variable_index_gridsearch_magbin` above.\n\n    Returns\n    -------\n\n    dict\n        The returned dict contains filenames of the recovery rate plots made for\n        each variability index. These include plots of the precision, recall,\n        and Matthews Correlation Coefficient over each magbin and a heatmap of\n        these values over the grid points of the variability index stdev values\n        arrays used.", "id": "f14692:m9"}
{"signature": "def periodrec_worker(task):", "body": "pfpkl, simbasedir, period_tolerance = task<EOL>try:<EOL><INDENT>return periodicvar_recovery(pfpkl,<EOL>simbasedir,<EOL>period_tolerance=period_tolerance)<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' % repr(task))<EOL>return None<EOL><DEDENT>", "docstring": "This is a parallel worker for running period-recovery.\n\n    Parameters\n    ----------\n\n    task : tuple\n        This is used to pass args to the `periodicvar_recovery` function::\n\n            task[0] = period-finding result pickle to work on\n            task[1] = simbasedir\n            task[2] = period_tolerance\n\n    Returns\n    -------\n\n    dict\n        This is the dict produced by the `periodicvar_recovery` function for the\n        input period-finding result pickle.", "id": "f14692:m13"}
{"signature": "def run_periodfinding(simbasedir,<EOL>pfmethods=('<STR_LIT>','<STR_LIT>','<STR_LIT>'),<EOL>pfkwargs=({},{},{'<STR_LIT>':<NUM_LIT:1.0>,'<STR_LIT>':<NUM_LIT>}),<EOL>getblssnr=False,<EOL>sigclip=<NUM_LIT>,<EOL>nperiodworkers=<NUM_LIT:10>,<EOL>ncontrolworkers=<NUM_LIT:4>,<EOL>liststartindex=None,<EOL>listmaxobjects=None):", "body": "<EOL>with open(os.path.join(simbasedir, '<STR_LIT>'),'<STR_LIT:rb>') as infd:<EOL><INDENT>siminfo = pickle.load(infd)<EOL><DEDENT>lcfpaths = siminfo['<STR_LIT>']<EOL>pfdir = os.path.join(simbasedir,'<STR_LIT>')<EOL>timecols = siminfo['<STR_LIT>']<EOL>magcols = siminfo['<STR_LIT>']<EOL>errcols = siminfo['<STR_LIT>']<EOL>fakelc_formatkey = '<STR_LIT>' % siminfo['<STR_LIT>']<EOL>lcproc.register_lcformat(<EOL>fakelc_formatkey,<EOL>'<STR_LIT>',<EOL>timecols,<EOL>magcols,<EOL>errcols,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>magsarefluxes=siminfo['<STR_LIT>']<EOL>)<EOL>if liststartindex:<EOL><INDENT>lcfpaths = lcfpaths[liststartindex:]<EOL><DEDENT>if listmaxobjects:<EOL><INDENT>lcfpaths = lcfpaths[:listmaxobjects]<EOL><DEDENT>pfinfo = periodsearch.parallel_pf(lcfpaths,<EOL>pfdir,<EOL>lcformat=fakelc_formatkey,<EOL>pfmethods=pfmethods,<EOL>pfkwargs=pfkwargs,<EOL>getblssnr=getblssnr,<EOL>sigclip=sigclip,<EOL>nperiodworkers=nperiodworkers,<EOL>ncontrolworkers=ncontrolworkers)<EOL>with open(os.path.join(simbasedir,<EOL>'<STR_LIT>'),'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(pfinfo, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>return os.path.join(simbasedir,'<STR_LIT>')<EOL>", "docstring": "This runs periodfinding using several period-finders on a collection of\n    fake LCs.\n\n    As a rough benchmark, 25000 fake LCs with 10000--50000 points per LC take\n    about 26 days in total to run on an invocation of this function using\n    GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on\n    a 2 x Xeon E5-2660v3 machine.\n\n    Parameters\n    ----------\n\n    pfmethods : sequence of str\n        This is used to specify which periodfinders to run. These must be in the\n        `lcproc.periodsearch.PFMETHODS` dict.\n\n    pfkwargs : sequence of dict\n        This is used to provide optional kwargs to the period-finders.\n\n    getblssnr : bool\n        If this is True, will run BLS SNR calculations for each object and\n        magcol. This takes a while to run, so it's disabled (False) by default.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    nperiodworkers : int\n        This is the number of parallel period-finding worker processes to use.\n\n    ncontrolworkers : int\n        This is the number of parallel period-finding control workers to\n        use. Each control worker will launch `nperiodworkers` worker processes.\n\n    liststartindex : int\n        The starting index of processing. This refers to the filename list\n        generated by running `glob.glob` on the fake LCs in `simbasedir`.\n\n    maxobjects : int\n        The maximum number of objects to process in this run. Use this with\n        `liststartindex` to effectively distribute working on a large list of\n        input light curves over several sessions or machines.\n\n    Returns\n    -------\n\n    str\n        The path to the output summary pickle produced by\n        `lcproc.periodsearch.parallel_pf`", "id": "f14692:m10"}
{"signature": "def variable_index_gridsearch_magbin(simbasedir,<EOL>stetson_stdev_range=(<NUM_LIT:1.0>,<NUM_LIT>),<EOL>inveta_stdev_range=(<NUM_LIT:1.0>,<NUM_LIT>),<EOL>iqr_stdev_range=(<NUM_LIT:1.0>,<NUM_LIT>),<EOL>ngridpoints=<NUM_LIT:32>,<EOL>ngridworkers=None):", "body": "<EOL>outdir = os.path.join(simbasedir,'<STR_LIT>')<EOL>if not os.path.exists(outdir):<EOL><INDENT>os.mkdir(outdir)<EOL><DEDENT>with open(os.path.join(simbasedir, '<STR_LIT>'),'<STR_LIT:rb>') as infd:<EOL><INDENT>siminfo = pickle.load(infd)<EOL><DEDENT>timecols = siminfo['<STR_LIT>']<EOL>magcols = siminfo['<STR_LIT>']<EOL>errcols = siminfo['<STR_LIT>']<EOL>magbinmedians = siminfo['<STR_LIT>'][magcols[<NUM_LIT:0>]]['<STR_LIT>']<EOL>stetson_grid = np.linspace(stetson_stdev_range[<NUM_LIT:0>],<EOL>stetson_stdev_range[<NUM_LIT:1>],<EOL>num=ngridpoints)<EOL>inveta_grid = np.linspace(inveta_stdev_range[<NUM_LIT:0>],<EOL>inveta_stdev_range[<NUM_LIT:1>],<EOL>num=ngridpoints)<EOL>iqr_grid = np.linspace(iqr_stdev_range[<NUM_LIT:0>],<EOL>iqr_stdev_range[<NUM_LIT:1>],<EOL>num=ngridpoints)<EOL>stet_inveta_iqr_grid = []<EOL>for stet in stetson_grid:<EOL><INDENT>for inveta in inveta_grid:<EOL><INDENT>for iqr in iqr_grid:<EOL><INDENT>grid_point = [stet, inveta, iqr]<EOL>stet_inveta_iqr_grid.append(grid_point)<EOL><DEDENT><DEDENT><DEDENT>grid_results = {'<STR_LIT>':stetson_grid,<EOL>'<STR_LIT>':inveta_grid,<EOL>'<STR_LIT>':iqr_grid,<EOL>'<STR_LIT>':stet_inveta_iqr_grid,<EOL>'<STR_LIT>':magbinmedians,<EOL>'<STR_LIT>':timecols,<EOL>'<STR_LIT>':magcols,<EOL>'<STR_LIT>':errcols,<EOL>'<STR_LIT>':os.path.abspath(simbasedir),<EOL>'<STR_LIT>':[]}<EOL>pool = mp.Pool(ngridworkers)<EOL>for magbinmedian in magbinmedians:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' % magbinmedian)<EOL>tasks = [(simbasedir, gp, magbinmedian) for gp in stet_inveta_iqr_grid]<EOL>thisbin_results = pool.map(magbin_varind_gridsearch_worker, tasks)<EOL>grid_results['<STR_LIT>'].append(thisbin_results)<EOL><DEDENT>pool.close()<EOL>pool.join()<EOL>LOGINFO('<STR_LIT>')<EOL>with open(os.path.join(simbasedir,<EOL>'<STR_LIT>'),'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(grid_results,outfd,pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>return grid_results<EOL>", "docstring": "This runs a variable index grid search per magbin.\n\n    For each magbin, this does a grid search using the stetson and inveta ranges\n    provided and tries to optimize the Matthews Correlation Coefficient (best\n    value is +1.0), indicating the best possible separation of variables\n    vs. nonvariables. The thresholds on these two variable indexes that produce\n    the largest coeff for the collection of fake LCs will probably be the ones\n    that work best for actual variable classification on the real LCs.\n\n    https://en.wikipedia.org/wiki/Matthews_correlation_coefficient\n\n    For each grid-point, calculates the true positives, false positives, true\n    negatives, false negatives. Then gets the precision and recall, confusion\n    matrix, and the ROC curve for variable vs. nonvariable.\n\n    Once we've identified the best thresholds to use, we can then calculate\n    variable object numbers:\n\n    - as a function of magnitude\n    - as a function of period\n    - as a function of number of detections\n    - as a function of amplitude of variability\n\n\n    Writes everything back to `simbasedir/fakevar-recovery.pkl`. Use the\n    plotting function below to make plots for the results.\n\n    Parameters\n    ----------\n\n    simbasedir : str\n        The directory where the fake LCs are located.\n\n    stetson_stdev_range : sequence of 2 floats\n        The min and max values of the Stetson J variability index to generate a\n        grid over these to test for the values of this index that produce the\n        'best' recovery rate for the injected variable stars.\n\n    inveta_stdev_range : sequence of 2 floats\n        The min and max values of the 1/eta variability index to generate a\n        grid over these to test for the values of this index that produce the\n        'best' recovery rate for the injected variable stars.\n\n    iqr_stdev_range : sequence of 2 floats\n        The min and max values of the IQR variability index to generate a\n        grid over these to test for the values of this index that produce the\n        'best' recovery rate for the injected variable stars.\n\n    ngridpoints : int\n        The number of grid points for each variability index grid. Remember that\n        this function will be searching in 3D and will require lots of time to\n        run if ngridpoints is too large.\n\n        For the default number of grid points and 25000 simulated light curves,\n        this takes about 3 days to run on a 40 (effective) core machine with 2 x\n        Xeon E5-2650v3 CPUs.\n\n    ngridworkers : int or None\n        The number of parallel grid search workers that will be launched.\n\n    Returns\n    -------\n\n    dict\n        The returned dict contains a list of recovery stats for each magbin and\n        each grid point in the variability index grids that were used. This dict\n        can be passed to the plotting function below to plot the results.", "id": "f14692:m8"}
{"signature": "def s3_get_url(url,<EOL>altexts=None,<EOL>client=None,<EOL>raiseonfail=False):", "body": "bucket_item = url.replace('<STR_LIT>','<STR_LIT>')<EOL>bucket_item = bucket_item.split('<STR_LIT:/>')<EOL>bucket = bucket_item[<NUM_LIT:0>]<EOL>filekey = '<STR_LIT:/>'.join(bucket_item[<NUM_LIT:1>:])<EOL>return s3_get_file(bucket,<EOL>filekey,<EOL>bucket_item[-<NUM_LIT:1>],<EOL>altexts=altexts,<EOL>client=client,<EOL>raiseonfail=raiseonfail)<EOL>", "docstring": "This gets a file from an S3 bucket based on its s3:// URL.\n\n    Parameters\n    ----------\n\n    url : str\n        S3 URL to download. This should begin with 's3://'.\n\n    altexts : None or list of str\n        If not None, this is a list of alternate extensions to try for the file\n        other than the one provided in `filename`. For example, to get anything\n        that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to\n        strip the .gz.\n\n    client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its operations. Alternatively, pass in an existing `boto3.Client`\n        instance to re-use it here.\n\n    raiseonfail : bool\n        If True, will re-raise whatever Exception caused the operation to fail\n        and break out immediately.\n\n    Returns\n    -------\n\n    str\n        Path to the downloaded filename or None if the download was\n        unsuccessful. The file will be downloaded into the current working\n        directory and will have a filename == basename of the file on S3.", "id": "f14694:m2"}
{"signature": "def s3_delete_file(bucket, filename, client=None, raiseonfail=False):", "body": "if not client:<EOL><INDENT>client = boto3.client('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>resp = client.delete_object(Bucket=bucket, Key=filename)<EOL>if not resp:<EOL><INDENT>LOGERROR('<STR_LIT>' % (filename,<EOL>bucket))<EOL><DEDENT>else:<EOL><INDENT>return resp['<STR_LIT>']<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' % (filename,<EOL>bucket))<EOL>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT>return None<EOL><DEDENT>", "docstring": "This deletes a file from S3.\n\n    Parameters\n    ----------\n\n    bucket : str\n        The AWS S3 bucket to delete the file from.\n\n    filename : str\n        The full file name of the file to delete, including any prefixes.\n\n    client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its operations. Alternatively, pass in an existing `boto3.Client`\n        instance to re-use it here.\n\n    raiseonfail : bool\n        If True, will re-raise whatever Exception caused the operation to fail\n        and break out immediately.\n\n    Returns\n    -------\n\n    str or None\n        If the file was successfully deleted, will return the delete-marker\n        (https://docs.aws.amazon.com/AmazonS3/latest/dev/DeleteMarker.html). If\n        it wasn't, returns None", "id": "f14694:m4"}
{"signature": "def ec2_ssh(ip_address,<EOL>keypem_file,<EOL>username='<STR_LIT>',<EOL>raiseonfail=False):", "body": "c = paramiko.client.SSHClient()<EOL>c.load_system_host_keys()<EOL>c.set_missing_host_key_policy(paramiko.client.AutoAddPolicy)<EOL>privatekey = paramiko.RSAKey.from_private_key_file(keypem_file)<EOL>try:<EOL><INDENT>c.connect(ip_address,<EOL>pkey=privatekey,<EOL>username='<STR_LIT>')<EOL>return c<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(ip_address, keypem_file, username))<EOL>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT>return None<EOL><DEDENT>", "docstring": "This opens an SSH connection to the EC2 instance at `ip_address`.\n\n    Parameters\n    ----------\n\n    ip_address : str\n        IP address of the AWS EC2 instance to connect to.\n\n    keypem_file : str\n        The path to the keypair PEM file generated by AWS to allow SSH\n        connections.\n\n    username : str\n        The username to use to login to the EC2 instance.\n\n    raiseonfail : bool\n        If True, will re-raise whatever Exception caused the operation to fail\n        and break out immediately.\n\n    Returns\n    -------\n\n    paramiko.SSHClient\n        This has all the usual `paramiko` functionality:\n\n        - Use `SSHClient.exec_command(command, environment=None)` to exec a\n          shell command.\n\n        - Use `SSHClient.open_sftp()` to get a `SFTPClient` for the server. Then\n          call SFTPClient.get() and .put() to copy files from and to the server.", "id": "f14694:m0"}
{"signature": "def make_spot_fleet_cluster(<EOL>security_groupid,<EOL>subnet_id,<EOL>keypair_name,<EOL>iam_instance_profile_arn,<EOL>spot_fleet_iam_role,<EOL>target_capacity=<NUM_LIT:20>,<EOL>spot_price=<NUM_LIT>,<EOL>expires_days=<NUM_LIT:7>,<EOL>allocation_strategy='<STR_LIT>',<EOL>instance_types=SPOT_INSTANCE_TYPES,<EOL>instance_weights=None,<EOL>instance_ami='<STR_LIT>',<EOL>instance_user_data=None,<EOL>instance_ebs_optimized=True,<EOL>wait_until_up=True,<EOL>client=None,<EOL>raiseonfail=False<EOL>):", "body": "fleetconfig = copy.deepcopy(SPOT_FLEET_CONFIG)<EOL>fleetconfig['<STR_LIT>'] = spot_fleet_iam_role<EOL>fleetconfig['<STR_LIT>'] = allocation_strategy<EOL>fleetconfig['<STR_LIT>'] = target_capacity<EOL>fleetconfig['<STR_LIT>'] = str(spot_price)<EOL>fleetconfig['<STR_LIT>'] = (<EOL>datetime.utcnow() + timedelta(days=expires_days)<EOL>).strftime(<EOL>'<STR_LIT>'<EOL>)<EOL>if (isinstance(instance_user_data, str) and<EOL>os.path.exists(instance_user_data)):<EOL><INDENT>with open(instance_user_data,'<STR_LIT:rb>') as infd:<EOL><INDENT>udata = base64.b64encode(infd.read()).decode()<EOL><DEDENT><DEDENT>elif isinstance(instance_user_data, str):<EOL><INDENT>udata = base64.b64encode(instance_user_data.encode()).decode()<EOL><DEDENT>else:<EOL><INDENT>udata = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % datetime.utcnow().isoformat()<EOL>)<EOL>udata = base64.b64encode(udata.encode()).decode()<EOL><DEDENT>for ind, itype in enumerate(instance_types):<EOL><INDENT>thisinstance = SPOT_PERINSTANCE_CONFIG.copy()<EOL>thisinstance['<STR_LIT>'] = itype<EOL>thisinstance['<STR_LIT>'] = instance_ami<EOL>thisinstance['<STR_LIT>'] = subnet_id<EOL>thisinstance['<STR_LIT>'] = keypair_name<EOL>thisinstance['<STR_LIT>']['<STR_LIT>'] = iam_instance_profile_arn<EOL>thisinstance['<STR_LIT>'][<NUM_LIT:0>] = {'<STR_LIT>':security_groupid}<EOL>thisinstance['<STR_LIT>'] = udata<EOL>thisinstance['<STR_LIT>'] = instance_ebs_optimized<EOL>if isinstance(instance_weights, list):<EOL><INDENT>thisinstance['<STR_LIT>'] = instance_weights[ind]<EOL><DEDENT>fleetconfig['<STR_LIT>'].append(thisinstance)<EOL><DEDENT>if not client:<EOL><INDENT>client = boto3.client('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>resp = client.request_spot_fleet(<EOL>SpotFleetRequestConfig=fleetconfig,<EOL>)<EOL>if not resp:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return None<EOL><DEDENT>else:<EOL><INDENT>spot_fleet_reqid = resp['<STR_LIT>']<EOL>LOGINFO('<STR_LIT>' %<EOL>spot_fleet_reqid)<EOL>if not wait_until_up:<EOL><INDENT>return spot_fleet_reqid<EOL><DEDENT>else:<EOL><INDENT>ntries = <NUM_LIT:10><EOL>curr_try = <NUM_LIT:0><EOL>while curr_try < ntries:<EOL><INDENT>resp = client.describe_spot_fleet_requests(<EOL>SpotFleetRequestIds=[<EOL>spot_fleet_reqid<EOL>]<EOL>)<EOL>curr_state = resp.get('<STR_LIT>',[])<EOL>if len(curr_state) > <NUM_LIT:0>:<EOL><INDENT>curr_state = curr_state[<NUM_LIT:0>]['<STR_LIT>']<EOL>if curr_state == '<STR_LIT>':<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>spot_fleet_reqid)<EOL>break<EOL><DEDENT><DEDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (curr_try, ntries)<EOL>)<EOL>curr_try = curr_try + <NUM_LIT:1><EOL>time.sleep(<NUM_LIT>)<EOL><DEDENT>return spot_fleet_reqid<EOL><DEDENT><DEDENT><DEDENT>except ClientError as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT>return None<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT>return None<EOL><DEDENT>", "docstring": "This makes an EC2 spot-fleet cluster.\n\n    This requires a security group ID attached to a VPC config and subnet, a\n    keypair generated beforehand, and an IAM role ARN for the instance. See:\n\n    https://docs.aws.amazon.com/cli/latest/userguide/tutorial-ec2-ubuntu.html\n\n    Use `user_data` to launch tasks on instance launch.\n\n    Parameters\n    ----------\n\n    security_groupid : str\n        The security group ID of the AWS VPC where the instances will be\n        launched.\n\n    subnet_id : str\n        The subnet ID of the AWS VPC where the instances will be\n        launched.\n\n    keypair_name : str\n        The name of the keypair to be used to allow SSH access to all instances\n        launched here. This corresponds to an already downloaded AWS keypair PEM\n        file.\n\n    iam_instance_profile_arn : str\n        The ARN string corresponding to the AWS instance profile that describes\n        the permissions the launched instances have to access other AWS\n        resources. Set this up in AWS IAM.\n\n    spot_fleet_iam_role : str\n        This is the name of AWS IAM role that allows the Spot Fleet Manager to\n        scale up and down instances based on demand and instances failing,\n        etc. Set this up in IAM.\n\n    target_capacity : int\n        The number of instances to target in the fleet request. The fleet\n        manager service will attempt to maintain this number over the lifetime\n        of the Spot Fleet Request.\n\n    spot_price : float\n        The bid price in USD for the instances. This is per hour. Keep this at\n        about half the hourly on-demand price of the desired instances to make\n        sure your instances aren't taken away by AWS when it needs capacity.\n\n    expires_days : int\n        The number of days this request is active for. All instances launched by\n        this request will live at least this long and will be terminated\n        automatically after.\n\n    allocation_strategy : {'lowestPrice', 'diversified'}\n        The allocation strategy used by the fleet manager.\n\n    instance_types : list of str\n        List of the instance type to launch. See the following URL for a list of\n        IDs: https://aws.amazon.com/ec2/pricing/on-demand/\n\n    instance_weights : list of float or None\n        If `instance_types` is a list of different instance types, this is the\n        relative weight applied towards launching each instance type. This can\n        be used to launch a mix of instances in a defined ratio among their\n        types. Doing this can make the spot fleet more resilient to AWS taking\n        back the instances if it runs out of capacity.\n\n    instance_ami : str\n        The Amazon Machine Image ID that describes the OS the instances will use\n        after launch. The default ID is Amazon Linux 2 in the US East region.\n\n    instance_user_data : str or None\n        This is either the path to a file on disk that contains a shell-script\n        or a string containing a shell-script that will be executed by root\n        right after the instance is launched. Use to automatically set up\n        workers and queues. If None, will not execute anything at instance\n        start up.\n\n    instance_ebs_optimized : bool\n        If True, will enable EBS optimization to speed up IO. This is usually\n        True for all instances made available in the last couple of years.\n\n    wait_until_up : bool\n        If True, will not return from this function until the spot fleet request\n        is acknowledged by AWS.\n\n    client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its operations. Alternatively, pass in an existing `boto3.Client`\n        instance to re-use it here.\n\n    raiseonfail : bool\n        If True, will re-raise whatever Exception caused the operation to fail\n        and break out immediately.\n\n    Returns\n    -------\n\n    str or None\n        This is the spot fleet request ID if successful. Otherwise, returns\n        None.", "id": "f14694:m12"}
{"signature": "def delete_spot_fleet_cluster(<EOL>spot_fleet_reqid,<EOL>client=None,<EOL>):", "body": "if not client:<EOL><INDENT>client = boto3.client('<STR_LIT>')<EOL><DEDENT>resp = client.cancel_spot_fleet_requests(<EOL>SpotFleetRequestIds=[spot_fleet_reqid],<EOL>TerminateInstances=True<EOL>)<EOL>return resp<EOL>", "docstring": "This deletes a spot-fleet cluster.\n\nParameters\n----------\n\nspot_fleet_reqid : str\n    The fleet request ID returned by `make_spot_fleet_cluster`.\n\nclient : boto3.Client or None\n    If None, this function will instantiate a new `boto3.Client` object to\n    use in its operations. Alternatively, pass in an existing `boto3.Client`\n    instance to re-use it here.\n\nReturns\n-------\n\nNothing.", "id": "f14694:m13"}
{"signature": "def make_ec2_nodes(<EOL>security_groupid,<EOL>subnet_id,<EOL>keypair_name,<EOL>iam_instance_profile_arn,<EOL>launch_instances=<NUM_LIT:1>,<EOL>ami='<STR_LIT>',<EOL>instance='<STR_LIT>',<EOL>ebs_optimized=True,<EOL>user_data=None,<EOL>wait_until_up=True,<EOL>client=None,<EOL>raiseonfail=False,<EOL>):", "body": "if not client:<EOL><INDENT>client = boto3.client('<STR_LIT>')<EOL><DEDENT>if isinstance(user_data, str) and os.path.exists(user_data):<EOL><INDENT>with open(user_data,'<STR_LIT:r>') as infd:<EOL><INDENT>udata = infd.read()<EOL><DEDENT><DEDENT>elif isinstance(user_data, str):<EOL><INDENT>udata = user_data<EOL><DEDENT>else:<EOL><INDENT>udata = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % datetime.utcnow().isoformat()<EOL>)<EOL><DEDENT>try:<EOL><INDENT>resp = client.run_instances(<EOL>ImageId=ami,<EOL>InstanceType=instance,<EOL>SecurityGroupIds=[<EOL>security_groupid,<EOL>],<EOL>SubnetId=subnet_id,<EOL>UserData=udata,<EOL>IamInstanceProfile={'<STR_LIT>':iam_instance_profile_arn},<EOL>InstanceInitiatedShutdownBehavior='<STR_LIT>',<EOL>KeyName=keypair_name,<EOL>MaxCount=launch_instances,<EOL>MinCount=launch_instances,<EOL>EbsOptimized=ebs_optimized,<EOL>)<EOL>if not resp:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return None<EOL><DEDENT>else:<EOL><INDENT>instance_dict = {}<EOL>instance_list = resp.get('<STR_LIT>',[])<EOL>if len(instance_list) > <NUM_LIT:0>:<EOL><INDENT>for instance in instance_list:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (instance['<STR_LIT>'],<EOL>instance['<STR_LIT>'],<EOL>instance['<STR_LIT>'].isoformat(),<EOL>instance['<STR_LIT>']['<STR_LIT:Name>']))<EOL>instance_dict[instance['<STR_LIT>']] = {<EOL>'<STR_LIT:type>':instance['<STR_LIT>'],<EOL>'<STR_LIT>':instance['<STR_LIT>'],<EOL>'<STR_LIT:state>':instance['<STR_LIT>']['<STR_LIT:Name>'],<EOL>'<STR_LIT:info>':instance<EOL>}<EOL><DEDENT><DEDENT>if wait_until_up:<EOL><INDENT>ready_instances = []<EOL>LOGINFO('<STR_LIT>')<EOL>ntries = <NUM_LIT:5><EOL>curr_try = <NUM_LIT:0><EOL>while ( (curr_try < ntries) or<EOL>( len(ready_instances) <<EOL>len(list(instance_dict.keys()))) ):<EOL><INDENT>resp = client.describe_instances(<EOL>InstanceIds=list(instance_dict.keys()),<EOL>)<EOL>if len(resp['<STR_LIT>']) > <NUM_LIT:0>:<EOL><INDENT>for resv in resp['<STR_LIT>']:<EOL><INDENT>if len(resv['<STR_LIT>']) > <NUM_LIT:0>:<EOL><INDENT>for instance in resv['<STR_LIT>']:<EOL><INDENT>if instance['<STR_LIT>']['<STR_LIT:Name>'] == '<STR_LIT>':<EOL><INDENT>ready_instances.append(<EOL>instance['<STR_LIT>']<EOL>)<EOL>instance_dict[<EOL>instance['<STR_LIT>']<EOL>]['<STR_LIT:state>'] = '<STR_LIT>'<EOL>instance_dict[<EOL>instance['<STR_LIT>']<EOL>]['<STR_LIT>'] = instance['<STR_LIT>']<EOL>instance_dict[<EOL>instance['<STR_LIT>']<EOL>]['<STR_LIT:info>'] = instance<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>curr_try = curr_try + <NUM_LIT:1><EOL>time.sleep(<NUM_LIT>)<EOL><DEDENT>if len(ready_instances) == len(list(instance_dict.keys())):<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT>return instance_dict<EOL><DEDENT><DEDENT>except ClientError as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT>return None<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT>return None<EOL><DEDENT>", "docstring": "This makes new EC2 worker nodes.\n\n    This requires a security group ID attached to a VPC config and subnet, a\n    keypair generated beforehand, and an IAM role ARN for the instance. See:\n\n    https://docs.aws.amazon.com/cli/latest/userguide/tutorial-ec2-ubuntu.html\n\n    Use `user_data` to launch tasks on instance launch.\n\n    Parameters\n    ----------\n\n    security_groupid : str\n        The security group ID of the AWS VPC where the instances will be\n        launched.\n\n    subnet_id : str\n        The subnet ID of the AWS VPC where the instances will be\n        launched.\n\n    keypair_name : str\n        The name of the keypair to be used to allow SSH access to all instances\n        launched here. This corresponds to an already downloaded AWS keypair PEM\n        file.\n\n    iam_instance_profile_arn : str\n        The ARN string corresponding to the AWS instance profile that describes\n        the permissions the launched instances have to access other AWS\n        resources. Set this up in AWS IAM.\n\n    launch_instances : int\n        The number of instances to launch in this request.\n\n    ami : str\n        The Amazon Machine Image ID that describes the OS the instances will use\n        after launch. The default ID is Amazon Linux 2 in the US East region.\n\n    instance : str\n        The instance type to launch. See the following URL for a list of IDs:\n        https://aws.amazon.com/ec2/pricing/on-demand/\n\n    ebs_optimized : bool\n        If True, will enable EBS optimization to speed up IO. This is usually\n        True for all instances made available in the last couple of years.\n\n    user_data : str or None\n        This is either the path to a file on disk that contains a shell-script\n        or a string containing a shell-script that will be executed by root\n        right after the instance is launched. Use to automatically set up\n        workers and queues. If None, will not execute anything at instance\n        start up.\n\n    wait_until_up : bool\n        If True, will not return from this function until all launched instances\n        are verified as running by AWS.\n\n    client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its operations. Alternatively, pass in an existing `boto3.Client`\n        instance to re-use it here.\n\n    raiseonfail : bool\n        If True, will re-raise whatever Exception caused the operation to fail\n        and break out immediately.\n\n    Returns\n    -------\n\n    dict\n        Returns launched instance info as a dict, keyed by instance ID.", "id": "f14694:m10"}
{"signature": "def plot_periodbase_lsp(lspinfo, outfile=None, plotdpi=<NUM_LIT:100>):", "body": "<EOL>if isinstance(lspinfo,str) and os.path.exists(lspinfo):<EOL><INDENT>LOGINFO('<STR_LIT>' % lspinfo)<EOL>with open(lspinfo,'<STR_LIT:rb>') as infd:<EOL><INDENT>lspinfo = pickle.load(infd)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>periods = lspinfo['<STR_LIT>']<EOL>lspvals = lspinfo['<STR_LIT>']<EOL>bestperiod = lspinfo['<STR_LIT>']<EOL>lspmethod = lspinfo['<STR_LIT>']<EOL>plt.plot(periods, lspvals)<EOL>plt.xscale('<STR_LIT>',basex=<NUM_LIT:10>)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel(PLOTYLABELS[lspmethod])<EOL>plottitle = '<STR_LIT>' % (METHODSHORTLABELS[lspmethod],<EOL>bestperiod)<EOL>plt.title(plottitle)<EOL>for bestperiod, bestpeak in zip(lspinfo['<STR_LIT>'],<EOL>lspinfo['<STR_LIT>']):<EOL><INDENT>plt.annotate('<STR_LIT>' % bestperiod,<EOL>xy=(bestperiod, bestpeak), xycoords='<STR_LIT:data>',<EOL>xytext=(<NUM_LIT:0.0>,<NUM_LIT>), textcoords='<STR_LIT>',<EOL>arrowprops=dict(arrowstyle=\"<STR_LIT>\"),fontsize='<STR_LIT>')<EOL><DEDENT>plt.grid(color='<STR_LIT>',<EOL>alpha=<NUM_LIT>,<EOL>zorder=<NUM_LIT:0>,<EOL>linewidth=<NUM_LIT:1.0>,<EOL>linestyle='<STR_LIT::>')<EOL>if outfile and isinstance(outfile, str):<EOL><INDENT>if outfile.endswith('<STR_LIT>'):<EOL><INDENT>plt.savefig(outfile,bbox_inches='<STR_LIT>',dpi=plotdpi)<EOL><DEDENT>else:<EOL><INDENT>plt.savefig(outfile,bbox_inches='<STR_LIT>')<EOL><DEDENT>plt.close()<EOL>return os.path.abspath(outfile)<EOL><DEDENT>elif dispok:<EOL><INDENT>plt.show()<EOL>plt.close()<EOL>return<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>outfile = '<STR_LIT>'<EOL>plt.savefig(outfile,bbox_inches='<STR_LIT>',dpi=plotdpi)<EOL>plt.close()<EOL>return os.path.abspath(outfile)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>return<EOL><DEDENT>", "docstring": "Makes a plot of periodograms obtained from `periodbase` functions.\n\n    This takes the output dict produced by any `astrobase.periodbase`\n    period-finder function or a pickle filename containing such a dict and makes\n    a periodogram plot.\n\n    Parameters\n    ----------\n\n    lspinfo : dict or str\n        If lspinfo is a dict, it must be a dict produced by an\n        `astrobase.periodbase` period-finder function or a dict from your own\n        period-finder function or routine that is of the form below with at\n        least these keys::\n\n            {'periods': np.array of all periods searched by the period-finder,\n             'lspvals': np.array of periodogram power value for each period,\n             'bestperiod': a float value that is the period with the highest\n                           peak in the periodogram, i.e. the most-likely actual\n                           period,\n             'method': a three-letter code naming the period-finder used; must\n                       be one of the keys in the `METHODLABELS` dict above,\n             'nbestperiods': a list of the periods corresponding to periodogram\n                             peaks (`nbestlspvals` below) to annotate on the\n                             periodogram plot so they can be called out\n                             visually,\n             'nbestlspvals': a list of the power values associated with\n                             periodogram peaks to annotate on the periodogram\n                             plot so they can be called out visually; should be\n                             the same length as `nbestperiods` above}\n\n        If lspinfo is a str, then it must be a path to a pickle file that\n        contains a dict of the form described above.\n\n    outfile : str or None\n        If this is a str, will write the periodogram plot to the file specified\n        by this string. If this is None, will write to a file called\n        'lsp-plot.png' in the current working directory.\n\n    plotdpi : int\n        Sets the resolution in DPI of the output periodogram plot PNG file.\n\n    Returns\n    -------\n\n    str\n        Absolute path to the periodogram plot file created.", "id": "f14695:m4"}
{"signature": "def fits_finder_chart(<EOL>fitsfile,<EOL>outfile,<EOL>fitsext=<NUM_LIT:0>,<EOL>wcsfrom=None,<EOL>scale=ZScaleInterval(),<EOL>stretch=LinearStretch(),<EOL>colormap=plt.cm.gray_r,<EOL>findersize=None,<EOL>finder_coordlimits=None,<EOL>overlay_ra=None,<EOL>overlay_decl=None,<EOL>overlay_pltopts={'<STR_LIT>':'<STR_LIT:o>',<EOL>'<STR_LIT>':<NUM_LIT>,<EOL>'<STR_LIT>':'<STR_LIT:none>',<EOL>'<STR_LIT>':<NUM_LIT>,<EOL>'<STR_LIT>':'<STR_LIT>'},<EOL>overlay_zoomcontain=False,<EOL>grid=False,<EOL>gridcolor='<STR_LIT:k>'<EOL>):", "body": "<EOL>if wcsfrom is None:<EOL><INDENT>hdulist = pyfits.open(fitsfile)<EOL>img, hdr = hdulist[fitsext].data, hdulist[fitsext].header<EOL>hdulist.close()<EOL>frameshape = (hdr['<STR_LIT>'], hdr['<STR_LIT>'])<EOL>w = WCS(hdr)<EOL><DEDENT>elif os.path.exists(wcsfrom):<EOL><INDENT>hdulist = pyfits.open(fitsfile)<EOL>img, hdr = hdulist[fitsext].data, hdulist[fitsext].header<EOL>hdulist.close()<EOL>frameshape = (hdr['<STR_LIT>'], hdr['<STR_LIT>'])<EOL>w = WCS(wcsfrom)<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>' %<EOL>fitsfile)<EOL>return None<EOL><DEDENT>if findersize is None:<EOL><INDENT>fig = plt.figure(figsize=(frameshape[<NUM_LIT:0>]/<NUM_LIT>,<EOL>frameshape[<NUM_LIT:1>]/<NUM_LIT>))<EOL><DEDENT>else:<EOL><INDENT>fig = plt.figure(figsize=findersize)<EOL><DEDENT>if (overlay_zoomcontain and<EOL>overlay_ra is not None and<EOL>overlay_decl is not None):<EOL><INDENT>finder_coordlimits = [overlay_ra.min()-<NUM_LIT>/<NUM_LIT>,<EOL>overlay_ra.max()+<NUM_LIT>/<NUM_LIT>,<EOL>overlay_decl.min()-<NUM_LIT>/<NUM_LIT>,<EOL>overlay_decl.max()+<NUM_LIT>/<NUM_LIT>]<EOL><DEDENT>if finder_coordlimits and isinstance(finder_coordlimits, (list,tuple)):<EOL><INDENT>minra, maxra, mindecl, maxdecl = finder_coordlimits<EOL>cntra, cntdecl = (minra + maxra)/<NUM_LIT>, (mindecl + maxdecl)/<NUM_LIT><EOL>pixelcoords = w.all_world2pix([[minra, mindecl],<EOL>[maxra, maxdecl],<EOL>[cntra, cntdecl]],<NUM_LIT:1>)<EOL>x1, y1, x2, y2 = (int(pixelcoords[<NUM_LIT:0>,<NUM_LIT:0>]),<EOL>int(pixelcoords[<NUM_LIT:0>,<NUM_LIT:1>]),<EOL>int(pixelcoords[<NUM_LIT:1>,<NUM_LIT:0>]),<EOL>int(pixelcoords[<NUM_LIT:1>,<NUM_LIT:1>]))<EOL>xmin = x1 if x1 < x2 else x2<EOL>xmax = x2 if x2 > x1 else x1<EOL>ymin = y1 if y1 < y2 else y2<EOL>ymax = y2 if y2 > y1 else y1<EOL>whdr = w.to_header()<EOL>whdr['<STR_LIT>'] = (xmax - xmin)/<NUM_LIT:2><EOL>whdr['<STR_LIT>'] = (ymax - ymin)/<NUM_LIT:2><EOL>whdr['<STR_LIT>'] = cntra<EOL>whdr['<STR_LIT>'] = cntdecl<EOL>whdr['<STR_LIT>'] = xmax - xmin<EOL>whdr['<STR_LIT>'] = ymax - ymin<EOL>w = WCS(whdr)<EOL><DEDENT>else:<EOL><INDENT>xmin, xmax, ymin, ymax = <NUM_LIT:0>, hdr['<STR_LIT>'], <NUM_LIT:0>, hdr['<STR_LIT>']<EOL><DEDENT>fig.add_subplot(<NUM_LIT>,projection=w)<EOL>if scale is not None and stretch is not None:<EOL><INDENT>norm = ImageNormalize(img,<EOL>interval=scale,<EOL>stretch=stretch)<EOL>plt.imshow(img[ymin:ymax,xmin:xmax],<EOL>origin='<STR_LIT>',<EOL>cmap=colormap,<EOL>norm=norm)<EOL><DEDENT>else:<EOL><INDENT>plt.imshow(img[ymin:ymax,xmin:xmax],<EOL>origin='<STR_LIT>',<EOL>cmap=colormap)<EOL><DEDENT>if grid:<EOL><INDENT>plt.grid(color=gridcolor,ls='<STR_LIT>',lw=<NUM_LIT:1.0>)<EOL><DEDENT>if overlay_ra is not None and overlay_decl is not None:<EOL><INDENT>our_pltopts = dict(<EOL>transform=plt.gca().get_transform('<STR_LIT>'),<EOL>marker='<STR_LIT:o>',<EOL>markersize=<NUM_LIT>,<EOL>markerfacecolor='<STR_LIT:none>',<EOL>markeredgewidth=<NUM_LIT>,<EOL>markeredgecolor='<STR_LIT>',<EOL>rasterized=True,<EOL>linestyle='<STR_LIT:none>'<EOL>)<EOL>if overlay_pltopts is not None and isinstance(overlay_pltopts,<EOL>dict):<EOL><INDENT>our_pltopts.update(overlay_pltopts)<EOL><DEDENT>plt.gca().set_autoscale_on(False)<EOL>plt.gca().plot(overlay_ra, overlay_decl,<EOL>**our_pltopts)<EOL><DEDENT>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>xax = plt.gca().coords[<NUM_LIT:0>]<EOL>yax = plt.gca().coords[<NUM_LIT:1>]<EOL>yax.set_major_formatter('<STR_LIT>')<EOL>xax.set_major_formatter('<STR_LIT>')<EOL>plt.savefig(outfile, dpi=<NUM_LIT>)<EOL>plt.close('<STR_LIT:all>')<EOL>return outfile<EOL>", "docstring": "This makes a finder chart for a given FITS with an optional object\n    position overlay.\n\n    Parameters\n    ----------\n\n    fitsfile : str\n        `fitsfile` is the FITS file to use to make the finder chart.\n\n    outfile : str\n        `outfile` is the name of the output file. This can be a png or pdf or\n        whatever else matplotlib can write given a filename and extension.\n\n    fitsext : int\n        Sets the FITS extension in `fitsfile` to use to extract the image array\n        from.\n\n    wcsfrom : str or None\n        If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will\n        be taken from the FITS header of `fitsfile`. If this is not None, it\n        must be a FITS or similar file that contains a WCS header in its first\n        extension.\n\n    scale : astropy.visualization.Interval object\n        `scale` sets the normalization for the FITS pixel values. This is an\n        astropy.visualization Interval object.\n        See http://docs.astropy.org/en/stable/visualization/normalization.html\n        for details on `scale` and `stretch` objects.\n\n    stretch : astropy.visualization.Stretch object\n        `stretch` sets the stretch function for mapping FITS pixel values to\n        output pixel values. This is an astropy.visualization Stretch object.\n        See http://docs.astropy.org/en/stable/visualization/normalization.html\n        for details on `scale` and `stretch` objects.\n\n    colormap : matplotlib Colormap object\n        `colormap` is a matplotlib color map object to use for the output image.\n\n    findersize : None or tuple of two ints\n        If `findersize` is None, the output image size will be set by the NAXIS1\n        and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,\n        `findersize` must be a tuple with the intended x and y size of the image\n        in inches (all output images will use a DPI = 100).\n\n    finder_coordlimits : list of four floats or None\n        If not None, `finder_coordlimits` sets x and y limits for the plot,\n        effectively zooming it in if these are smaller than the dimensions of\n        the FITS image. This should be a list of the form: [minra, maxra,\n        mindecl, maxdecl] all in decimal degrees.\n\n    overlay_ra, overlay_decl : np.array or None\n        `overlay_ra` and `overlay_decl` are ndarrays containing the RA and Dec\n        values to overplot on the image as an overlay. If these are both None,\n        then no overlay will be plotted.\n\n    overlay_pltopts : dict\n        `overlay_pltopts` controls how the overlay points will be plotted. This\n        a dict with standard matplotlib marker, etc. kwargs as key-val pairs,\n        e.g. 'markersize', 'markerfacecolor', etc. The default options make red\n        outline circles at the location of each object in the overlay.\n\n    overlay_zoomcontain : bool\n        `overlay_zoomcontain` controls if the finder chart will be zoomed to\n        just contain the overlayed points. Everything outside the footprint of\n        these points will be discarded.\n\n    grid : bool\n        `grid` sets if a grid will be made on the output image.\n\n    gridcolor : str\n        `gridcolor` sets the color of the grid lines. This is a usual matplotib\n        color spec string.\n\n    Returns\n    -------\n\n    str or None\n        The filename of the generated output image if successful. None\n        otherwise.", "id": "f14695:m3"}
{"signature": "def skyview_stamp(ra, decl,<EOL>survey='<STR_LIT>',<EOL>scaling='<STR_LIT>',<EOL>flip=True,<EOL>convolvewith=None,<EOL>forcefetch=False,<EOL>cachedir='<STR_LIT>',<EOL>timeout=<NUM_LIT>,<EOL>retry_failed=False,<EOL>savewcsheader=True,<EOL>verbose=False):", "body": "stampdict = get_stamp(ra, decl,<EOL>survey=survey,<EOL>scaling=scaling,<EOL>forcefetch=forcefetch,<EOL>cachedir=cachedir,<EOL>timeout=timeout,<EOL>retry_failed=retry_failed,<EOL>verbose=verbose)<EOL>if stampdict:<EOL><INDENT>stampfits = pyfits.open(stampdict['<STR_LIT>'])<EOL>header = stampfits[<NUM_LIT:0>].header<EOL>frame = stampfits[<NUM_LIT:0>].data<EOL>stampfits.close()<EOL>if flip:<EOL><INDENT>frame = np.flipud(frame)<EOL><DEDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>% (ra, decl))<EOL><DEDENT>if convolvewith:<EOL><INDENT>convolved = aconv.convolve(frame, convolvewith)<EOL>if savewcsheader:<EOL><INDENT>return convolved, header<EOL><DEDENT>else:<EOL><INDENT>return convolved<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if savewcsheader:<EOL><INDENT>return frame, header<EOL><DEDENT>else:<EOL><INDENT>return frame<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (ra, decl, survey, scaling))<EOL>return None<EOL><DEDENT>", "docstring": "This downloads a DSS FITS stamp centered on the coordinates specified.\n\n    This wraps the function :py:func:`astrobase.services.skyview.get_stamp`,\n    which downloads Digitized Sky Survey stamps in FITS format from the NASA\n    SkyView service:\n\n    https://skyview.gsfc.nasa.gov/current/cgi/query.pl\n\n    Also adds some useful operations on top of the FITS file returned.\n\n    Parameters\n    ----------\n\n    ra,decl : float\n        The center coordinates for the stamp in decimal degrees.\n\n    survey : str\n        The survey name to get the stamp from. This is one of the\n        values in the 'SkyView Surveys' option boxes on the SkyView\n        webpage. Currently, we've only tested using 'DSS2 Red' as the value for\n        this kwarg, but the other ones should work in principle.\n\n    scaling : str\n        This is the pixel value scaling function to use.\n\n    flip : bool\n        Will flip the downloaded image top to bottom. This should usually be\n        True because matplotlib and FITS have different image coord origin\n        conventions. Alternatively, set this to False and use the\n        `origin='lower'` in any call to `matplotlib.pyplot.imshow` when plotting\n        this image.\n\n    convolvewith : astropy.convolution Kernel object or None\n        If `convolvewith` is an astropy.convolution Kernel object from:\n\n        http://docs.astropy.org/en/stable/convolution/kernels.html\n\n        then, this function will return the stamp convolved with that\n        kernel. This can be useful to see effects of wide-field telescopes (like\n        the HATNet and HATSouth lenses) degrading the nominal 1 arcsec/px of\n        DSS, causing blending of targets and any variability.\n\n    forcefetch : bool\n        If True, will disregard any existing cached copies of the stamp already\n        downloaded corresponding to the requested center coordinates and\n        redownload the FITS from the SkyView service.\n\n    cachedir : str\n        This is the path to the astrobase cache directory. All downloaded FITS\n        stamps are stored here as .fits.gz files so we can immediately respond\n        with the cached copy when a request is made for a coordinate center\n        that's already been downloaded.\n\n    timeout : float\n        Sets the timeout in seconds to wait for a response from the NASA SkyView\n        service.\n\n    retry_failed : bool\n        If the initial request to SkyView fails, and this is True, will retry\n        until it succeeds.\n\n    savewcsheader : bool\n        If this is True, also returns the WCS header of the downloaded FITS\n        stamp in addition to the FITS image itself. Useful for projecting object\n        coordinates onto image xy coordinates for visualization.\n\n    verbose : bool\n        If True, indicates progress.\n\n    Returns\n    -------\n\n    tuple or array or None\n        This returns based on the value of `savewcsheader`:\n\n        - If `savewcsheader=True`, returns a tuple:\n          (FITS stamp image as a numpy array, FITS header)\n        - If `savewcsheader=False`, returns only the FITS stamp image as numpy\n          array.\n        - If the stamp retrieval fails, returns None.", "id": "f14695:m2"}
{"signature": "def angle_wrap(angle, radians=False):", "body": "if radians:<EOL><INDENT>wrapped = angle % (<NUM_LIT>*pi_value)<EOL>if wrapped < <NUM_LIT:0.0>:<EOL><INDENT>wrapped = <NUM_LIT>*pi_value + wrapped<EOL><DEDENT><DEDENT>else:<EOL><INDENT>wrapped = angle % <NUM_LIT><EOL>if wrapped < <NUM_LIT:0.0>:<EOL><INDENT>wrapped = <NUM_LIT> + wrapped<EOL><DEDENT><DEDENT>return wrapped<EOL>", "docstring": "Wraps the input angle to 360.0 degrees.\n\n    Parameters\n    ----------\n\n    angle : float\n        The angle to wrap around 360.0 deg.\n\n    radians : bool\n        If True, will assume that the input is in radians. The output will then\n        also be in radians.\n\n    Returns\n    -------\n\n    float\n        Wrapped angle. If radians is True: input is assumed to be in radians,\n        output is also in radians.", "id": "f14696:m0"}
{"signature": "def decimal_to_dms(decimal_value):", "body": "if decimal_value < <NUM_LIT:0>:<EOL><INDENT>negative = True<EOL>dec_val = fabs(decimal_value)<EOL><DEDENT>else:<EOL><INDENT>negative = False<EOL>dec_val = decimal_value<EOL><DEDENT>degrees = trunc(dec_val)<EOL>minutes_deg = dec_val - degrees<EOL>minutes_mm = minutes_deg * <NUM_LIT><EOL>minutes_out = trunc(minutes_mm)<EOL>seconds = (minutes_mm - minutes_out)*<NUM_LIT><EOL>if negative:<EOL><INDENT>degrees = degrees<EOL>return '<STR_LIT:->', degrees, minutes_out, seconds<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT:+>', degrees, minutes_out, seconds<EOL><DEDENT>", "docstring": "Converts from decimal degrees (for declination coords) to DD:MM:SS.\n\n    Parameters\n    ----------\n\n    decimal_value : float\n        A decimal value to convert to degrees, minutes, seconds sexagesimal\n        format.\n\n    Returns\n    -------\n\n    tuple\n        A four element tuple is returned: (sign, HH, MM, SS.ssss...)", "id": "f14696:m1"}
{"signature": "def hms_str_to_tuple(hms_string):", "body": "if '<STR_LIT::>' in hms_string:<EOL><INDENT>separator = '<STR_LIT::>'<EOL><DEDENT>else:<EOL><INDENT>separator = '<STR_LIT:U+0020>'<EOL><DEDENT>hh, mm, ss = hms_string.split(separator)<EOL>return int(hh), int(mm), float(ss)<EOL>", "docstring": "Converts a string of the form HH:MM:SS or HH MM SS to a tuple of the form\n    (HH, MM, SS).\n\n    Parameters\n    ----------\n\n    hms_string : str\n        A RA coordinate string of the form 'HH:MM:SS.sss' or 'HH MM SS.sss'.\n\n    Returns\n    -------\n\n    tuple\n        A three element tuple is returned (HH, MM, SS.ssss...)", "id": "f14696:m3"}
{"signature": "def xieta_from_radecl(inra, indecl,<EOL>incenterra, incenterdecl,<EOL>deg=True):", "body": "if deg:<EOL><INDENT>ra = np.radians(inra)<EOL>decl = np.radians(indecl)<EOL>centerra = np.radians(incenterra)<EOL>centerdecl = np.radians(incenterdecl)<EOL><DEDENT>else:<EOL><INDENT>ra = inra<EOL>decl = indecl<EOL>centerra = incenterra<EOL>centerdecl = incenterdecl<EOL><DEDENT>cdecc = np.cos(centerdecl)<EOL>sdecc = np.sin(centerdecl)<EOL>crac = np.cos(centerra)<EOL>srac = np.sin(centerra)<EOL>uu = np.cos(decl)*np.cos(ra)<EOL>vv = np.cos(decl)*np.sin(ra)<EOL>ww = np.sin(decl)<EOL>uun = uu*cdecc*crac + vv*cdecc*srac + ww*sdecc<EOL>vvn = -uu*srac + vv*crac<EOL>wwn = -uu*sdecc*crac - vv*sdecc*srac + ww*cdecc<EOL>denom = vvn*vvn + wwn*wwn<EOL>aunn = np.zeros_like(uun)<EOL>aunn[uun >= <NUM_LIT:1.0>] = <NUM_LIT:0.0><EOL>aunn[uun < <NUM_LIT:1.0>] = np.arccos(uun)<EOL>xi, eta = np.zeros_like(aunn), np.zeros_like(aunn)<EOL>xi[(aunn <= <NUM_LIT:0.0>) | (denom <= <NUM_LIT:0.0>)] = <NUM_LIT:0.0><EOL>eta[(aunn <= <NUM_LIT:0.0>) | (denom <= <NUM_LIT:0.0>)] = <NUM_LIT:0.0><EOL>sdenom = np.sqrt(denom)<EOL>xi[(aunn > <NUM_LIT:0.0>) | (denom > <NUM_LIT:0.0>)] = aunn*vvn/sdenom<EOL>eta[(aunn > <NUM_LIT:0.0>) | (denom > <NUM_LIT:0.0>)] = aunn*wwn/sdenom<EOL>if deg:<EOL><INDENT>return np.degrees(xi), np.degrees(eta)<EOL><DEDENT>else:<EOL><INDENT>return xi, eta<EOL><DEDENT>", "docstring": "This returns the image-plane projected xi-eta coords for inra, indecl.\n\n    Parameters\n    ----------\n\n    inra,indecl : array-like\n        The equatorial coordinates to get the xi, eta coordinates for in decimal\n        degrees or radians.\n\n    incenterra,incenterdecl : float\n        The center coordinate values to use to calculate the plane-projected\n        coordinates around.\n\n    deg : bool\n        If this is True, the input angles are assumed to be in degrees and the\n        output is in degrees as well.\n\n    Returns\n    -------\n\n    tuple of np.arrays\n        This is the (`xi`, `eta`) coordinate pairs corresponding to the\n        image-plane projected coordinates for each pair of input equatorial\n        coordinates in (`inra`, `indecl`).", "id": "f14696:m19"}
{"signature": "def conesearch_kdtree(kdtree,<EOL>racenter,<EOL>declcenter,<EOL>searchradiusdeg,<EOL>conesearchworkers=<NUM_LIT:1>):", "body": "cosdecl = np.cos(np.radians(declcenter))<EOL>sindecl = np.sin(np.radians(declcenter))<EOL>cosra = np.cos(np.radians(racenter))<EOL>sinra = np.sin(np.radians(racenter))<EOL>xyzdist = <NUM_LIT> * np.sin(np.radians(searchradiusdeg)/<NUM_LIT>)<EOL>kdtindices = kdtree.query_ball_point([cosra*cosdecl,<EOL>sinra*cosdecl,<EOL>sindecl],<EOL>xyzdist,<EOL>n_jobs=conesearchworkers)<EOL>return kdtindices<EOL>", "docstring": "This does a cone-search around (`racenter`, `declcenter`) in `kdtree`.\n\n    Parameters\n    ----------\n\n    kdtree : scipy.spatial.CKDTree\n        This is a kdtree object generated by the `make_kdtree` function.\n\n    racenter,declcenter : float or array-like\n        This is the center coordinate to run the cone-search around in decimal\n        degrees. If this is an np.array, will search for all coordinate pairs in\n        the array.\n\n    searchradiusdeg : float\n        The search radius to use for the cone-search in decimal degrees.\n\n    conesearchworkers : int\n        The number of parallel workers to launch for the cone-search.\n\n    Returns\n    -------\n\n    list or np.array of lists\n        If (`racenter`, `declcenter`) is a single coordinate, this will return a\n        list of the indices of the matching objects in the kdtree. If\n        (`racenter`, `declcenter`) are array-likes, this will return an object\n        array containing lists of matching object indices for each coordinate\n        searched.", "id": "f14696:m13"}
{"signature": "def total_proper_motion(pmra, pmdecl, decl):", "body": "pm = np.sqrt( pmdecl*pmdecl + pmra*pmra*np.cos(np.radians(decl)) *<EOL>np.cos(np.radians(decl)) )<EOL>return pm<EOL>", "docstring": "This calculates the total proper motion of an object.\n\n    Parameters\n    ----------\n\n    pmra : float or array-like\n        The proper motion(s) in right ascension, measured in mas/yr.\n\n    pmdecl : float or array-like\n        The proper motion(s) in declination, measured in mas/yr.\n\n    decl : float or array-like\n        The declination of the object(s) in decimal degrees.\n\n    Returns\n    -------\n\n    float or array-like\n        The total proper motion(s) of the object(s) in mas/yr.", "id": "f14696:m15"}
{"signature": "def xmatch_neighbors(ra1, dec1,<EOL>ra2, dec2,<EOL>match_radius=<NUM_LIT>,<EOL>includeself=False,<EOL>sortresults=True):", "body": "dist = great_circle_dist(ra1,dec1,ra2,dec2)<EOL>if includeself:<EOL><INDENT>match_dist_ind = np.where(dist < match_radius)<EOL><DEDENT>else:<EOL><INDENT>match_dist_ind = np.where((dist < match_radius) & (dist > <NUM_LIT:0.1>))<EOL><DEDENT>if len(match_dist_ind) > <NUM_LIT:0>:<EOL><INDENT>match_dists = dist[match_dist_ind]<EOL>dist_sort_ind = np.argsort(match_dists)<EOL>if sortresults:<EOL><INDENT>match_dist_ind = (match_dist_ind[<NUM_LIT:0>])[dist_sort_ind]<EOL><DEDENT>min_dist = np.min(match_dists)<EOL>return (True,min_dist,match_dist_ind,match_dists[dist_sort_ind])<EOL><DEDENT>else:<EOL><INDENT>return (False,)<EOL><DEDENT>", "docstring": "Finds the closest objects in (`ra2`, `dec2`) to scalar coordinate pair\n    (`ra1`, `dec1`) and returns the indices of the objects that match.\n\n    This is a quick matcher that uses the `great_circle_dist` function to find\n    the closest object in (`ra2`, `dec2`) within `match_radius` arcseconds to\n    (`ra1`, `dec1`). (`ra1`, `dec1`) must be a scalar pair, while\n    (`ra2`, `dec2`) must be array-likes of the same lengths.\n\n    Parameters\n    ----------\n\n    ra1,dec1 : float\n        Coordinate of the object to find matches to. In decimal degrees.\n\n    ra2,dec2 : array-like\n        The coordinates that will be searched for matches. In decimal degrees.\n\n    match_radius : float\n        The match radius in arcseconds to use for the match.\n\n    includeself : bool\n        If this is True, the object itself will be included in the match\n        results.\n\n    sortresults : bool\n        If this is True, the match indices will be sorted by distance.\n\n    Returns\n    -------\n\n    tuple\n        A tuple like the following is returned::\n\n            (True -> matches found or False -> no matches found,\n             minimum distance between target and list,\n             np.array of indices where list of coordinates is\n             closer than `match_radius` arcseconds from the target,\n             np.array of distances in arcseconds)", "id": "f14696:m11"}
{"signature": "def make_kdtree(ra, decl):", "body": "<EOL>cosdecl = np.cos(np.radians(decl))<EOL>sindecl = np.sin(np.radians(decl))<EOL>cosra = np.cos(np.radians(ra))<EOL>sinra = np.sin(np.radians(ra))<EOL>xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))<EOL>kdt = sps.cKDTree(xyz,copy_data=True)<EOL>return kdt<EOL>", "docstring": "This makes a `scipy.spatial.CKDTree` on (`ra`, `decl`).\n\n    Parameters\n    ----------\n\n    ra,decl : array-like\n        The right ascension and declination coordinate pairs in decimal degrees.\n\n    Returns\n    -------\n\n    `scipy.spatial.CKDTree`\n        The cKDTRee object generated by this function is returned and can be\n        used to run various spatial queries.", "id": "f14696:m12"}
{"signature": "def serial_starfeatures(lclist,<EOL>outdir,<EOL>lc_catalog_pickle,<EOL>neighbor_radius_arcsec,<EOL>maxobjects=None,<EOL>deredden=True,<EOL>custom_bandpasses=None,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None):", "body": "<EOL>if not os.path.exists(outdir):<EOL><INDENT>os.makedirs(outdir)<EOL><DEDENT>if maxobjects:<EOL><INDENT>lclist = lclist[:maxobjects]<EOL><DEDENT>with open(lc_catalog_pickle, '<STR_LIT:rb>') as infd:<EOL><INDENT>kdt_dict = pickle.load(infd)<EOL><DEDENT>kdt = kdt_dict['<STR_LIT>']<EOL>objlist = kdt_dict['<STR_LIT>']['<STR_LIT>']<EOL>objlcfl = kdt_dict['<STR_LIT>']['<STR_LIT>']<EOL>tasks = [(x, outdir, kdt, objlist, objlcfl,<EOL>neighbor_radius_arcsec,<EOL>deredden, custom_bandpasses,<EOL>lcformat, lcformatdir) for x in lclist]<EOL>for task in tqdm(tasks):<EOL><INDENT>result = _starfeatures_worker(task)<EOL><DEDENT>return result<EOL>", "docstring": "This drives the `get_starfeatures` function for a collection of LCs.\n\n    Parameters\n    ----------\n\n    lclist : list of str\n        The list of light curve file names to process.\n\n    outdir : str\n        The output directory where the results will be placed.\n\n    lc_catalog_pickle : str\n        The path to a catalog containing at a dict with least:\n\n        - an object ID array accessible with `dict['objects']['objectid']`\n\n        - an LC filename array accessible with `dict['objects']['lcfname']`\n\n        - a `scipy.spatial.KDTree` or `cKDTree` object to use for finding\n          neighbors for each object accessible with `dict['kdtree']`\n\n        A catalog pickle of the form needed can be produced using\n        :py:func:`astrobase.lcproc.catalogs.make_lclist` or\n        :py:func:`astrobase.lcproc.catalogs.filter_lclist`.\n\n    neighbor_radius_arcsec : float\n        This indicates the radius in arcsec to search for neighbors for this\n        object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n        and in GAIA.\n\n    maxobjects : int\n        The number of objects to process from `lclist`.\n\n    deredden : bool\n        This controls if the colors and any color classifications will be\n        dereddened using 2MASS DUST.\n\n    custom_bandpasses : dict or None\n        This is a dict used to define any custom bandpasses in the\n        `in_objectinfo` dict you want to make this function aware of and\n        generate colors for. Use the format below for this dict::\n\n            {\n            '<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',\n                                'label':'<band_label_1>'\n                                'colors':[['<bandkey1>-<bandkey2>',\n                                           '<BAND1> - <BAND2>'],\n                                          ['<bandkey3>-<bandkey4>',\n                                           '<BAND3> - <BAND4>']]},\n            .\n            ...\n            .\n            '<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',\n                                'label':'<band_label_N>'\n                                'colors':[['<bandkey1>-<bandkey2>',\n                                           '<BAND1> - <BAND2>'],\n                                          ['<bandkey3>-<bandkey4>',\n                                           '<BAND3> - <BAND4>']]},\n            }\n\n        Where:\n\n        `bandpass_key` is a key to use to refer to this bandpass in the\n        `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n        `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n        reddening per band-pass. For example, given the following DUST result\n        table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n            |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n            |char       |float  |float             |float  |float           |float|\n            |           |microns|                  |mags   |                |mags |\n             CTIO U       0.3734              4.107   0.209            4.968 0.253\n             CTIO B       0.4309              3.641   0.186            4.325 0.221\n             CTIO V       0.5517              2.682   0.137            3.240 0.165\n            .\n            .\n            ...\n\n        The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n        skip DUST lookup and want to pass in a specific reddening magnitude\n        for your bandpass, use a float for the value of\n        `twomass_dust_key`. If you want to skip DUST lookup entirely for\n        this bandpass, use None for the value of `twomass_dust_key`.\n\n        `band_label` is the label to use for this bandpass, e.g. 'W1' for\n        WISE-1 band, 'u' for SDSS u, etc.\n\n        The 'colors' list contains color definitions for all colors you want\n        to generate using this bandpass. this list contains elements of the\n        form::\n\n            ['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']\n\n        where the the first item is the bandpass keys making up this color,\n        and the second item is the label for this color to be used by the\n        frontends. An example::\n\n            ['sdssu-sdssg','u - g']\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    Returns\n    -------\n\n    list of str\n        A list of all star features pickles produced.", "id": "f14697:m3"}
{"signature": "def get_starfeatures(lcfile,<EOL>outdir,<EOL>kdtree,<EOL>objlist,<EOL>lcflist,<EOL>neighbor_radius_arcsec,<EOL>deredden=True,<EOL>custom_bandpasses=None,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None):", "body": "try:<EOL><INDENT>formatinfo = get_lcformat(lcformat,<EOL>use_lcformat_dir=lcformatdir)<EOL>if formatinfo:<EOL><INDENT>(dfileglob, readerfunc,<EOL>dtimecols, dmagcols, derrcols,<EOL>magsarefluxes, normfunc) = formatinfo<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>try:<EOL><INDENT>lcdict = readerfunc(lcfile)<EOL>if ( (isinstance(lcdict, (list, tuple))) and<EOL>(isinstance(lcdict[<NUM_LIT:0>], dict)) ):<EOL><INDENT>lcdict = lcdict[<NUM_LIT:0>]<EOL><DEDENT>resultdict = {'<STR_LIT>':lcdict['<STR_LIT>'],<EOL>'<STR_LIT:info>':lcdict['<STR_LIT>'],<EOL>'<STR_LIT>':os.path.basename(lcfile)}<EOL>coordfeat = starfeatures.coord_features(lcdict['<STR_LIT>'])<EOL>colorfeat = starfeatures.color_features(<EOL>lcdict['<STR_LIT>'],<EOL>deredden=deredden,<EOL>custom_bandpasses=custom_bandpasses<EOL>)<EOL>colorclass = starfeatures.color_classification(colorfeat,<EOL>coordfeat)<EOL>nbrfeat = starfeatures.neighbor_gaia_features(lcdict['<STR_LIT>'],<EOL>kdtree,<EOL>neighbor_radius_arcsec)<EOL>if nbrfeat['<STR_LIT>'].size > <NUM_LIT:0>:<EOL><INDENT>nbrfeat['<STR_LIT>'] = objlist[nbrfeat['<STR_LIT>']]<EOL>nbrfeat['<STR_LIT>'] = objlist[<EOL>nbrfeat['<STR_LIT>']<EOL>]<EOL>nbrfeat['<STR_LIT>'] = lcflist[<EOL>nbrfeat['<STR_LIT>']<EOL>]<EOL><DEDENT>else:<EOL><INDENT>nbrfeat['<STR_LIT>'] = np.array([])<EOL>nbrfeat['<STR_LIT>'] = np.array([])<EOL>nbrfeat['<STR_LIT>'] = np.array([])<EOL><DEDENT>resultdict.update(coordfeat)<EOL>resultdict.update(colorfeat)<EOL>resultdict.update(colorclass)<EOL>resultdict.update(nbrfeat)<EOL>outfile = os.path.join(outdir,<EOL>'<STR_LIT>' %<EOL>squeeze(resultdict['<STR_LIT>']).replace('<STR_LIT:U+0020>','<STR_LIT:->'))<EOL>with open(outfile, '<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(resultdict, outfd, protocol=<NUM_LIT:4>)<EOL><DEDENT>return outfile<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' %<EOL>(os.path.basename(lcfile), e))<EOL>return None<EOL><DEDENT>", "docstring": "This runs the functions from :py:func:`astrobase.varclass.starfeatures`\n    on a single light curve file.\n\n    Parameters\n    ----------\n\n    lcfile : str\n        This is the LC file to extract star features for.\n\n    outdir : str\n        This is the directory to write the output pickle to.\n\n    kdtree: scipy.spatial.cKDTree\n        This is a `scipy.spatial.KDTree` or `cKDTree` used to calculate neighbor\n        proximity features. This is for the light curve catalog this object is\n        in.\n\n    objlist : np.array\n        This is a Numpy array of object IDs in the same order as the\n        `kdtree.data` np.array. This is for the light curve catalog this object\n        is in.\n\n    lcflist : np.array\n        This is a Numpy array of light curve filenames in the same order as\n        `kdtree.data`. This is for the light curve catalog this object is in.\n\n    neighbor_radius_arcsec : float\n        This indicates the radius in arcsec to search for neighbors for this\n        object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n        and in GAIA.\n\n    deredden : bool\n        This controls if the colors and any color classifications will be\n        dereddened using 2MASS DUST.\n\n    custom_bandpasses : dict or None\n        This is a dict used to define any custom bandpasses in the\n        `in_objectinfo` dict you want to make this function aware of and\n        generate colors for. Use the format below for this dict::\n\n            {\n            '<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',\n                                'label':'<band_label_1>'\n                                'colors':[['<bandkey1>-<bandkey2>',\n                                           '<BAND1> - <BAND2>'],\n                                          ['<bandkey3>-<bandkey4>',\n                                           '<BAND3> - <BAND4>']]},\n            .\n            ...\n            .\n            '<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',\n                                'label':'<band_label_N>'\n                                'colors':[['<bandkey1>-<bandkey2>',\n                                           '<BAND1> - <BAND2>'],\n                                          ['<bandkey3>-<bandkey4>',\n                                           '<BAND3> - <BAND4>']]},\n            }\n\n        Where:\n\n        `bandpass_key` is a key to use to refer to this bandpass in the\n        `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n        `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n        reddening per band-pass. For example, given the following DUST result\n        table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n            |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n            |char       |float  |float             |float  |float           |float|\n            |           |microns|                  |mags   |                |mags |\n             CTIO U       0.3734              4.107   0.209            4.968 0.253\n             CTIO B       0.4309              3.641   0.186            4.325 0.221\n             CTIO V       0.5517              2.682   0.137            3.240 0.165\n            .\n            .\n            ...\n\n        The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n        skip DUST lookup and want to pass in a specific reddening magnitude\n        for your bandpass, use a float for the value of\n        `twomass_dust_key`. If you want to skip DUST lookup entirely for\n        this bandpass, use None for the value of `twomass_dust_key`.\n\n        `band_label` is the label to use for this bandpass, e.g. 'W1' for\n        WISE-1 band, 'u' for SDSS u, etc.\n\n        The 'colors' list contains color definitions for all colors you want\n        to generate using this bandpass. this list contains elements of the\n        form::\n\n            ['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']\n\n        where the the first item is the bandpass keys making up this color,\n        and the second item is the label for this color to be used by the\n        frontends. An example::\n\n            ['sdssu-sdssg','u - g']\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    Returns\n    -------\n\n    str\n        Path to the output pickle containing all of the star features for this\n        object.", "id": "f14697:m1"}
{"signature": "def _starfeatures_worker(task):", "body": "try:<EOL><INDENT>(lcfile, outdir, kdtree, objlist,<EOL>lcflist, neighbor_radius_arcsec,<EOL>deredden, custom_bandpasses, lcformat, lcformatdir) = task<EOL>return get_starfeatures(lcfile, outdir,<EOL>kdtree, objlist, lcflist,<EOL>neighbor_radius_arcsec,<EOL>deredden=deredden,<EOL>custom_bandpasses=custom_bandpasses,<EOL>lcformat=lcformat,<EOL>lcformatdir=lcformatdir)<EOL><DEDENT>except Exception as e:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "This wraps starfeatures.", "id": "f14697:m2"}
{"signature": "def parallel_starfeatures_lcdir(lcdir,<EOL>outdir,<EOL>lc_catalog_pickle,<EOL>neighbor_radius_arcsec,<EOL>fileglob=None,<EOL>maxobjects=None,<EOL>deredden=True,<EOL>custom_bandpasses=None,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None,<EOL>nworkers=NCPUS,<EOL>recursive=True):", "body": "try:<EOL><INDENT>formatinfo = get_lcformat(lcformat,<EOL>use_lcformat_dir=lcformatdir)<EOL>if formatinfo:<EOL><INDENT>(dfileglob, readerfunc,<EOL>dtimecols, dmagcols, derrcols,<EOL>magsarefluxes, normfunc) = formatinfo<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>if not fileglob:<EOL><INDENT>fileglob = dfileglob<EOL><DEDENT>LOGINFO('<STR_LIT>' % (lcformat, lcdir))<EOL>if recursive is False:<EOL><INDENT>matching = glob.glob(os.path.join(lcdir, fileglob))<EOL><DEDENT>else:<EOL><INDENT>if sys.version_info[:<NUM_LIT:2>] > (<NUM_LIT:3>,<NUM_LIT:4>):<EOL><INDENT>matching = glob.glob(os.path.join(lcdir,<EOL>'<STR_LIT>',<EOL>fileglob),recursive=True)<EOL><DEDENT>else:<EOL><INDENT>walker = os.walk(lcdir)<EOL>matching = []<EOL>for root, dirs, _files in walker:<EOL><INDENT>for sdir in dirs:<EOL><INDENT>searchpath = os.path.join(root,<EOL>sdir,<EOL>fileglob)<EOL>foundfiles = glob.glob(searchpath)<EOL>if foundfiles:<EOL><INDENT>matching.extend(foundfiles)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if matching and len(matching) > <NUM_LIT:0>:<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>len(matching))<EOL>return parallel_starfeatures(matching,<EOL>outdir,<EOL>lc_catalog_pickle,<EOL>neighbor_radius_arcsec,<EOL>deredden=deredden,<EOL>custom_bandpasses=custom_bandpasses,<EOL>maxobjects=maxobjects,<EOL>lcformat=lcformat,<EOL>lcformatdir=lcformatdir,<EOL>nworkers=nworkers)<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>' % (lcformat,<EOL>lcdir))<EOL>return None<EOL><DEDENT>", "docstring": "This runs parallel star feature extraction for a directory of LCs.\n\n    Parameters\n    ----------\n\n    lcdir : list of str\n        The directory to search for light curves.\n\n    outdir : str\n        The output directory where the results will be placed.\n\n    lc_catalog_pickle : str\n        The path to a catalog containing at a dict with least:\n\n        - an object ID array accessible with `dict['objects']['objectid']`\n\n        - an LC filename array accessible with `dict['objects']['lcfname']`\n\n        - a `scipy.spatial.KDTree` or `cKDTree` object to use for finding\n          neighbors for each object accessible with `dict['kdtree']`\n\n        A catalog pickle of the form needed can be produced using\n        :py:func:`astrobase.lcproc.catalogs.make_lclist` or\n        :py:func:`astrobase.lcproc.catalogs.filter_lclist`.\n\n    neighbor_radius_arcsec : float\n        This indicates the radius in arcsec to search for neighbors for this\n        object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,\n        and in GAIA.\n\n    fileglob : str\n        The UNIX file glob to use to search for the light curves in `lcdir`. If\n        None, the default value for the light curve format specified will be\n        used.\n\n    maxobjects : int\n        The number of objects to process from `lclist`.\n\n    deredden : bool\n        This controls if the colors and any color classifications will be\n        dereddened using 2MASS DUST.\n\n    custom_bandpasses : dict or None\n        This is a dict used to define any custom bandpasses in the\n        `in_objectinfo` dict you want to make this function aware of and\n        generate colors for. Use the format below for this dict::\n\n            {\n            '<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',\n                                'label':'<band_label_1>'\n                                'colors':[['<bandkey1>-<bandkey2>',\n                                           '<BAND1> - <BAND2>'],\n                                          ['<bandkey3>-<bandkey4>',\n                                           '<BAND3> - <BAND4>']]},\n            .\n            ...\n            .\n            '<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',\n                                'label':'<band_label_N>'\n                                'colors':[['<bandkey1>-<bandkey2>',\n                                           '<BAND1> - <BAND2>'],\n                                          ['<bandkey3>-<bandkey4>',\n                                           '<BAND3> - <BAND4>']]},\n            }\n\n        Where:\n\n        `bandpass_key` is a key to use to refer to this bandpass in the\n        `objectinfo` dict, e.g. 'sdssg' for SDSS g band\n\n        `twomass_dust_key` is the key to use in the 2MASS DUST result table for\n        reddening per band-pass. For example, given the following DUST result\n        table (using http://irsa.ipac.caltech.edu/applications/DUST/)::\n\n            |Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|\n            |char       |float  |float             |float  |float           |float|\n            |           |microns|                  |mags   |                |mags |\n             CTIO U       0.3734              4.107   0.209            4.968 0.253\n             CTIO B       0.4309              3.641   0.186            4.325 0.221\n             CTIO V       0.5517              2.682   0.137            3.240 0.165\n            .\n            .\n            ...\n\n        The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to\n        skip DUST lookup and want to pass in a specific reddening magnitude\n        for your bandpass, use a float for the value of\n        `twomass_dust_key`. If you want to skip DUST lookup entirely for\n        this bandpass, use None for the value of `twomass_dust_key`.\n\n        `band_label` is the label to use for this bandpass, e.g. 'W1' for\n        WISE-1 band, 'u' for SDSS u, etc.\n\n        The 'colors' list contains color definitions for all colors you want\n        to generate using this bandpass. this list contains elements of the\n        form::\n\n            ['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']\n\n        where the the first item is the bandpass keys making up this color,\n        and the second item is the label for this color to be used by the\n        frontends. An example::\n\n            ['sdssu-sdssg','u - g']\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    nworkers : int\n        The number of parallel workers to launch.\n\n    Returns\n    -------\n\n    dict\n        A dict with key:val pairs of the input light curve filename and the\n        output star features pickle for each LC processed.", "id": "f14697:m5"}
{"signature": "def parallel_periodicfeatures(pfpkl_list,<EOL>lcbasedir,<EOL>outdir,<EOL>starfeaturesdir=None,<EOL>fourierorder=<NUM_LIT:5>,<EOL>transitparams=(-<NUM_LIT>,<NUM_LIT:0.1>,<NUM_LIT:0.1>),<EOL>ebparams=(-<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT:0.5>),<EOL>pdiff_threshold=<NUM_LIT>,<EOL>sidereal_threshold=<NUM_LIT>,<EOL>sampling_peak_multiplier=<NUM_LIT>,<EOL>sampling_startp=None,<EOL>sampling_endp=None,<EOL>timecols=None,<EOL>magcols=None,<EOL>errcols=None,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None,<EOL>sigclip=<NUM_LIT>,<EOL>verbose=False,<EOL>maxobjects=None,<EOL>nworkers=NCPUS):", "body": "<EOL>if not os.path.exists(outdir):<EOL><INDENT>os.makedirs(outdir)<EOL><DEDENT>if maxobjects:<EOL><INDENT>pfpkl_list = pfpkl_list[:maxobjects]<EOL><DEDENT>LOGINFO('<STR_LIT>' % len(pfpkl_list))<EOL>if starfeaturesdir and os.path.exists(starfeaturesdir):<EOL><INDENT>starfeatures_list = []<EOL>LOGINFO('<STR_LIT>')<EOL>for pfpkl in pfpkl_list:<EOL><INDENT>sfpkl1 = os.path.basename(pfpkl).replace('<STR_LIT>',<EOL>'<STR_LIT>')<EOL>sfpkl2 = sfpkl1.replace('<STR_LIT>','<STR_LIT>')<EOL>sfpath1 = os.path.join(starfeaturesdir, sfpkl1)<EOL>sfpath2 = os.path.join(starfeaturesdir, sfpkl2)<EOL>if os.path.exists(sfpath1):<EOL><INDENT>starfeatures_list.append(sfpkl1)<EOL><DEDENT>elif os.path.exists(sfpath2):<EOL><INDENT>starfeatures_list.append(sfpkl2)<EOL><DEDENT>else:<EOL><INDENT>starfeatures_list.append(None)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>starfeatures_list = [None for x in pfpkl_list]<EOL><DEDENT>kwargs = {'<STR_LIT>':fourierorder,<EOL>'<STR_LIT>':transitparams,<EOL>'<STR_LIT>':ebparams,<EOL>'<STR_LIT>':pdiff_threshold,<EOL>'<STR_LIT>':sidereal_threshold,<EOL>'<STR_LIT>':sampling_peak_multiplier,<EOL>'<STR_LIT>':sampling_startp,<EOL>'<STR_LIT>':sampling_endp,<EOL>'<STR_LIT>':timecols,<EOL>'<STR_LIT>':magcols,<EOL>'<STR_LIT>':errcols,<EOL>'<STR_LIT>':lcformat,<EOL>'<STR_LIT>':lcformat,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':verbose}<EOL>tasks = [(x, lcbasedir, outdir, y, kwargs) for (x,y) in<EOL>zip(pfpkl_list, starfeatures_list)]<EOL>LOGINFO('<STR_LIT>')<EOL>with ProcessPoolExecutor(max_workers=nworkers) as executor:<EOL><INDENT>resultfutures = executor.map(_periodicfeatures_worker, tasks)<EOL><DEDENT>results = [x for x in resultfutures]<EOL>resdict = {os.path.basename(x):y for (x,y) in zip(pfpkl_list, results)}<EOL>return resdict<EOL>", "docstring": "This runs periodic feature generation in parallel for all periodfinding\n    pickles in the input list.\n\n    Parameters\n    ----------\n\n    pfpkl_list : list of str\n        The list of period-finding pickles to use.\n\n    lcbasedir : str\n        The base directory where the associated light curves are located.\n\n    outdir : str\n        The directory where the results will be written.\n\n    starfeaturesdir : str or None\n        The directory containing the `starfeatures-<objectid>.pkl` files for\n        each object to use calculate neighbor proximity light curve features.\n\n    fourierorder : int\n        The Fourier order to use to generate sinusoidal function and fit that to\n        the phased light curve.\n\n    transitparams : list of floats\n        The transit depth, duration, and ingress duration to use to generate a\n        trapezoid planet transit model fit to the phased light curve. The period\n        used is the one provided in `period`, while the epoch is automatically\n        obtained from a spline fit to the phased light curve.\n\n    ebparams : list of floats\n        The primary eclipse depth, eclipse duration, the primary-secondary depth\n        ratio, and the phase of the secondary eclipse to use to generate an\n        eclipsing binary model fit to the phased light curve. The period used is\n        the one provided in `period`, while the epoch is automatically obtained\n        from a spline fit to the phased light curve.\n\n    pdiff_threshold : float\n        This is the max difference between periods to consider them the same.\n\n    sidereal_threshold : float\n        This is the max difference between any of the 'best' periods and the\n        sidereal day periods to consider them the same.\n\n    sampling_peak_multiplier : float\n        This is the minimum multiplicative factor of a 'best' period's\n        normalized periodogram peak over the sampling periodogram peak at the\n        same period required to accept the 'best' period as possibly real.\n\n    sampling_startp, sampling_endp : float\n        If the `pgramlist` doesn't have a time-sampling Lomb-Scargle\n        periodogram, it will be obtained automatically. Use these kwargs to\n        control the minimum and maximum period interval to be searched when\n        generating this periodogram.\n\n    timecols : list of str or None\n        The timecol keys to use from the lcdict in calculating the features.\n\n    magcols : list of str or None\n        The magcol keys to use from the lcdict in calculating the features.\n\n    errcols : list of str or None\n        The errcol keys to use from the lcdict in calculating the features.\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    verbose : bool\n        If True, will indicate progress while working.\n\n    maxobjects : int\n        The total number of objects to process from `pfpkl_list`.\n\n    nworkers : int\n        The number of parallel workers to launch to process the input.\n\n    Returns\n    -------\n\n    dict\n        A dict containing key: val pairs of the input period-finder result and\n        the output periodic feature result pickles for each input pickle is\n        returned.", "id": "f14698:m4"}
{"signature": "def get_periodicfeatures(<EOL>pfpickle,<EOL>lcbasedir,<EOL>outdir,<EOL>fourierorder=<NUM_LIT:5>,<EOL>transitparams=(-<NUM_LIT>,<NUM_LIT:0.1>,<NUM_LIT:0.1>),<EOL>ebparams=(-<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT:0.5>),<EOL>pdiff_threshold=<NUM_LIT>,<EOL>sidereal_threshold=<NUM_LIT>,<EOL>sampling_peak_multiplier=<NUM_LIT>,<EOL>sampling_startp=None,<EOL>sampling_endp=None,<EOL>starfeatures=None,<EOL>timecols=None,<EOL>magcols=None,<EOL>errcols=None,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None,<EOL>sigclip=<NUM_LIT>,<EOL>verbose=True,<EOL>raiseonfail=False<EOL>):", "body": "try:<EOL><INDENT>formatinfo = get_lcformat(lcformat,<EOL>use_lcformat_dir=lcformatdir)<EOL>if formatinfo:<EOL><INDENT>(fileglob, readerfunc,<EOL>dtimecols, dmagcols, derrcols,<EOL>magsarefluxes, normfunc) = formatinfo<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>if pfpickle.endswith('<STR_LIT>'):<EOL><INDENT>infd = gzip.open(pfpickle)<EOL><DEDENT>else:<EOL><INDENT>infd = open(pfpickle)<EOL><DEDENT>pf = pickle.load(infd)<EOL>infd.close()<EOL>lcfile = os.path.join(lcbasedir, pf['<STR_LIT>'])<EOL>objectid = pf['<STR_LIT>']<EOL>if '<STR_LIT>' in pf:<EOL><INDENT>kwargs = pf['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>kwargs = None<EOL><DEDENT>if kwargs and '<STR_LIT>' in kwargs and timecols is None:<EOL><INDENT>timecols = kwargs['<STR_LIT>']<EOL><DEDENT>elif not kwargs and not timecols:<EOL><INDENT>timecols = dtimecols<EOL><DEDENT>if kwargs and '<STR_LIT>' in kwargs and magcols is None:<EOL><INDENT>magcols = kwargs['<STR_LIT>']<EOL><DEDENT>elif not kwargs and not magcols:<EOL><INDENT>magcols = dmagcols<EOL><DEDENT>if kwargs and '<STR_LIT>' in kwargs and errcols is None:<EOL><INDENT>errcols = kwargs['<STR_LIT>']<EOL><DEDENT>elif not kwargs and not errcols:<EOL><INDENT>errcols = derrcols<EOL><DEDENT>if not os.path.exists(lcfile):<EOL><INDENT>LOGERROR(\"<STR_LIT>\" % (lcfile, objectid))<EOL>return None<EOL><DEDENT>if starfeatures is not None and os.path.exists(starfeatures):<EOL><INDENT>with open(starfeatures,'<STR_LIT:rb>') as infd:<EOL><INDENT>starfeat = pickle.load(infd)<EOL><DEDENT>if starfeat['<STR_LIT>'].size > <NUM_LIT:0>:<EOL><INDENT>nbr_full_lcf = starfeat['<STR_LIT>'][<NUM_LIT:0>]<EOL>if os.path.exists(os.path.join(lcbasedir,<EOL>os.path.basename(nbr_full_lcf))):<EOL><INDENT>nbrlcf = os.path.join(lcbasedir,<EOL>os.path.basename(nbr_full_lcf))<EOL><DEDENT>elif os.path.exists(nbr_full_lcf):<EOL><INDENT>nbrlcf = nbr_full_lcf<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>(os.path.basename(nbr_full_lcf),<EOL>os.path.dirname(nbr_full_lcf),<EOL>lcbasedir))<EOL>nbrlcf = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>nbrlcf = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>nbrlcf = None<EOL><DEDENT>try:<EOL><INDENT>lcdict = readerfunc(lcfile)<EOL>if ( (isinstance(lcdict, (list, tuple))) and<EOL>(isinstance(lcdict[<NUM_LIT:0>], dict)) ):<EOL><INDENT>lcdict = lcdict[<NUM_LIT:0>]<EOL><DEDENT>if nbrlcf is not None:<EOL><INDENT>nbrlcdict = readerfunc(nbrlcf)<EOL>if ( (isinstance(nbrlcdict, (list, tuple))) and<EOL>(isinstance(nbrlcdict[<NUM_LIT:0>], dict)) ):<EOL><INDENT>nbrlcdict = nbrlcdict[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>outfile = os.path.join(outdir, '<STR_LIT>' %<EOL>squeeze(objectid).replace('<STR_LIT:U+0020>','<STR_LIT:->'))<EOL>if normfunc is not None:<EOL><INDENT>lcdict = normfunc(lcdict)<EOL>if nbrlcf:<EOL><INDENT>nbrlcdict = normfunc(nbrlcdict)<EOL><DEDENT><DEDENT>resultdict = {}<EOL>for tcol, mcol, ecol in zip(timecols, magcols, errcols):<EOL><INDENT>if '<STR_LIT:.>' in tcol:<EOL><INDENT>tcolget = tcol.split('<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>tcolget = [tcol]<EOL><DEDENT>times = _dict_get(lcdict, tcolget)<EOL>if nbrlcf:<EOL><INDENT>nbrtimes = _dict_get(nbrlcdict, tcolget)<EOL><DEDENT>else:<EOL><INDENT>nbrtimes = None<EOL><DEDENT>if '<STR_LIT:.>' in mcol:<EOL><INDENT>mcolget = mcol.split('<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>mcolget = [mcol]<EOL><DEDENT>mags = _dict_get(lcdict, mcolget)<EOL>if nbrlcf:<EOL><INDENT>nbrmags = _dict_get(nbrlcdict, mcolget)<EOL><DEDENT>else:<EOL><INDENT>nbrmags = None<EOL><DEDENT>if '<STR_LIT:.>' in ecol:<EOL><INDENT>ecolget = ecol.split('<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>ecolget = [ecol]<EOL><DEDENT>errs = _dict_get(lcdict, ecolget)<EOL>if nbrlcf:<EOL><INDENT>nbrerrs = _dict_get(nbrlcdict, ecolget)<EOL><DEDENT>else:<EOL><INDENT>nbrerrs = None<EOL><DEDENT>finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)<EOL>ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]<EOL>if nbrlcf:<EOL><INDENT>nfinind = (np.isfinite(nbrtimes) &<EOL>np.isfinite(nbrmags) &<EOL>np.isfinite(nbrerrs))<EOL>nbrftimes, nbrfmags, nbrferrs = (nbrtimes[nfinind],<EOL>nbrmags[nfinind],<EOL>nbrerrs[nfinind])<EOL><DEDENT>nzind = np.nonzero(ferrs)<EOL>ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]<EOL>if nbrlcf:<EOL><INDENT>nnzind = np.nonzero(nbrferrs)<EOL>nbrftimes, nbrfmags, nbrferrs = (nbrftimes[nnzind],<EOL>nbrfmags[nnzind],<EOL>nbrferrs[nnzind])<EOL><DEDENT>if normfunc is None:<EOL><INDENT>ntimes, nmags = normalize_magseries(<EOL>ftimes, fmags,<EOL>magsarefluxes=magsarefluxes<EOL>)<EOL>times, mags, errs = ntimes, nmags, ferrs<EOL>if nbrlcf:<EOL><INDENT>nbrntimes, nbrnmags = normalize_magseries(<EOL>nbrftimes, nbrfmags,<EOL>magsarefluxes=magsarefluxes<EOL>)<EOL>nbrtimes, nbrmags, nbrerrs = nbrntimes, nbrnmags, nbrferrs<EOL><DEDENT>else:<EOL><INDENT>nbrtimes, nbrmags, nbrerrs = None, None, None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>times, mags, errs = ftimes, fmags, ferrs<EOL><DEDENT>if times.size > <NUM_LIT>:<EOL><INDENT>available_pfmethods = []<EOL>available_pgrams = []<EOL>available_bestperiods = []<EOL>for k in pf[mcol].keys():<EOL><INDENT>if k in PFMETHODS:<EOL><INDENT>available_pgrams.append(pf[mcol][k])<EOL>if k != '<STR_LIT>':<EOL><INDENT>available_pfmethods.append(<EOL>pf[mcol][k]['<STR_LIT>']<EOL>)<EOL>available_bestperiods.append(<EOL>pf[mcol][k]['<STR_LIT>']<EOL>)<EOL><DEDENT><DEDENT><DEDENT>featkey = '<STR_LIT>' % mcol<EOL>resultdict[featkey] = {}<EOL>pgramfeat = periodicfeatures.periodogram_features(<EOL>available_pgrams, times, mags, errs,<EOL>sigclip=sigclip,<EOL>pdiff_threshold=pdiff_threshold,<EOL>sidereal_threshold=sidereal_threshold,<EOL>sampling_peak_multiplier=sampling_peak_multiplier,<EOL>sampling_startp=sampling_startp,<EOL>sampling_endp=sampling_endp,<EOL>verbose=verbose<EOL>)<EOL>resultdict[featkey].update(pgramfeat)<EOL>resultdict[featkey]['<STR_LIT>'] = available_pfmethods<EOL>for _ind, pfm, bp in zip(range(len(available_bestperiods)),<EOL>available_pfmethods,<EOL>available_bestperiods):<EOL><INDENT>resultdict[featkey][pfm] = periodicfeatures.lcfit_features(<EOL>times, mags, errs, bp,<EOL>fourierorder=fourierorder,<EOL>transitparams=transitparams,<EOL>ebparams=ebparams,<EOL>sigclip=sigclip,<EOL>magsarefluxes=magsarefluxes,<EOL>verbose=verbose<EOL>)<EOL>phasedlcfeat = periodicfeatures.phasedlc_features(<EOL>times, mags, errs, bp,<EOL>nbrtimes=nbrtimes,<EOL>nbrmags=nbrmags,<EOL>nbrerrs=nbrerrs<EOL>)<EOL>resultdict[featkey][pfm].update(phasedlcfeat)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (mcol, pfpickle))<EOL>featkey = '<STR_LIT>' % mcol<EOL>resultdict[featkey] = None<EOL><DEDENT><DEDENT>outfile = os.path.join(outdir, '<STR_LIT>' %<EOL>squeeze(objectid).replace('<STR_LIT:U+0020>','<STR_LIT:->'))<EOL>with open(outfile,'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(resultdict, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>return outfile<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' %<EOL>(pfpickle, lcfile))<EOL>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>", "docstring": "This gets all periodic features for the object.\n\n    Parameters\n    ----------\n\n    pfpickle : str\n        The period-finding result pickle containing period-finder results to use\n        for the calculation of LC fit, periodogram, and phased LC features.\n\n    lcbasedir : str\n        The base directory where the light curve for the current object is\n        located.\n\n    outdir : str\n        The output directory where the results will be written.\n\n    fourierorder : int\n        The Fourier order to use to generate sinusoidal function and fit that to\n        the phased light curve.\n\n    transitparams : list of floats\n        The transit depth, duration, and ingress duration to use to generate a\n        trapezoid planet transit model fit to the phased light curve. The period\n        used is the one provided in `period`, while the epoch is automatically\n        obtained from a spline fit to the phased light curve.\n\n    ebparams : list of floats\n        The primary eclipse depth, eclipse duration, the primary-secondary depth\n        ratio, and the phase of the secondary eclipse to use to generate an\n        eclipsing binary model fit to the phased light curve. The period used is\n        the one provided in `period`, while the epoch is automatically obtained\n        from a spline fit to the phased light curve.\n\n    pdiff_threshold : float\n        This is the max difference between periods to consider them the same.\n\n    sidereal_threshold : float\n        This is the max difference between any of the 'best' periods and the\n        sidereal day periods to consider them the same.\n\n    sampling_peak_multiplier : float\n        This is the minimum multiplicative factor of a 'best' period's\n        normalized periodogram peak over the sampling periodogram peak at the\n        same period required to accept the 'best' period as possibly real.\n\n    sampling_startp, sampling_endp : float\n        If the `pgramlist` doesn't have a time-sampling Lomb-Scargle\n        periodogram, it will be obtained automatically. Use these kwargs to\n        control the minimum and maximum period interval to be searched when\n        generating this periodogram.\n\n    starfeatures : str or None\n        If not None, this should be the filename of the\n        `starfeatures-<objectid>.pkl` created by\n        :py:func:`astrobase.lcproc.lcsfeatures.get_starfeatures` for this\n        object. This is used to get the neighbor's light curve and phase it with\n        this object's period to see if this object is blended.\n\n    timecols : list of str or None\n        The timecol keys to use from the lcdict in calculating the features.\n\n    magcols : list of str or None\n        The magcol keys to use from the lcdict in calculating the features.\n\n    errcols : list of str or None\n        The errcol keys to use from the lcdict in calculating the features.\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    verbose : bool\n        If True, will indicate progress while working.\n\n    raiseonfail : bool\n        If True, will raise an Exception if something goes wrong.\n\n    Returns\n    -------\n\n    str\n        Returns a filename for the output pickle containing all of the periodic\n        features for the input object's LC.", "id": "f14698:m1"}
{"signature": "def parallel_epd_lclist(lclist,<EOL>externalparams,<EOL>timecols=None,<EOL>magcols=None,<EOL>errcols=None,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None,<EOL>epdsmooth_sigclip=<NUM_LIT>,<EOL>epdsmooth_windowsize=<NUM_LIT>,<EOL>epdsmooth_func=smooth_magseries_savgol,<EOL>epdsmooth_extraparams=None,<EOL>nworkers=NCPUS,<EOL>maxworkertasks=<NUM_LIT:1000>):", "body": "try:<EOL><INDENT>formatinfo = get_lcformat(lcformat,<EOL>use_lcformat_dir=lcformatdir)<EOL>if formatinfo:<EOL><INDENT>(fileglob, readerfunc,<EOL>dtimecols, dmagcols, derrcols,<EOL>magsarefluxes, normfunc) = formatinfo<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>if timecols is None:<EOL><INDENT>timecols = dtimecols<EOL><DEDENT>if magcols is None:<EOL><INDENT>magcols = dmagcols<EOL><DEDENT>if errcols is None:<EOL><INDENT>errcols = derrcols<EOL><DEDENT>outdict = {}<EOL>for t, m, e in zip(timecols, magcols, errcols):<EOL><INDENT>tasks = [(x, t, m, e, externalparams, lcformat, lcformatdir,<EOL>epdsmooth_sigclip, epdsmooth_windowsize,<EOL>epdsmooth_func, epdsmooth_extraparams) for<EOL>x in lclist]<EOL>pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)<EOL>results = pool.map(parallel_epd_worker, tasks)<EOL>pool.close()<EOL>pool.join()<EOL>outdict[m] = results<EOL><DEDENT>return outdict<EOL>", "docstring": "This applies EPD in parallel to all LCs in the input list.\n\n    Parameters\n    ----------\n\n    lclist : list of str\n        This is the list of light curve files to run EPD on.\n\n    externalparams : dict or None\n        This is a dict that indicates which keys in the lcdict obtained from the\n        lcfile correspond to the required external parameters. As with timecol,\n        magcol, and errcol, these can be simple keys (e.g. 'rjd') or compound\n        keys ('magaperture1.mags'). The dict should look something like::\n\n          {'fsv':'<lcdict key>' array: S values for each observation,\n           'fdv':'<lcdict key>' array: D values for each observation,\n           'fkv':'<lcdict key>' array: K values for each observation,\n           'xcc':'<lcdict key>' array: x coords for each observation,\n           'ycc':'<lcdict key>' array: y coords for each observation,\n           'bgv':'<lcdict key>' array: sky background for each observation,\n           'bge':'<lcdict key>' array: sky background err for each observation,\n           'iha':'<lcdict key>' array: hour angle for each observation,\n           'izd':'<lcdict key>' array: zenith distance for each observation}\n\n        Alternatively, if these exact keys are already present in the lcdict,\n        indicate this by setting externalparams to None.\n\n    timecols,magcols,errcols : lists of str\n        The keys in the lcdict produced by your light curve reader function that\n        correspond to the times, mags/fluxes, and associated measurement errors\n        that will be used as inputs to the EPD process. If these are None, the\n        default values for `timecols`, `magcols`, and `errcols` for your light\n        curve format will be used here.\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curve files.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    epdsmooth_sigclip : float or int or sequence of two floats/ints or None\n        This specifies how to sigma-clip the input LC before fitting the EPD\n        function to it.\n\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    epdsmooth_windowsize : int\n        This is the number of LC points to smooth over to generate a smoothed\n        light curve that will be used to fit the EPD function.\n\n    epdsmooth_func : Python function\n        This sets the smoothing filter function to use. A Savitsky-Golay filter\n        is used to smooth the light curve by default. The functions that can be\n        used with this kwarg are listed in `varbase.trends`. If you want to use\n        your own function, it MUST have the following signature::\n\n                def smoothfunc(mags_array, window_size, **extraparams)\n\n        and return a numpy array of the same size as `mags_array` with the\n        smoothed time-series. Any extra params can be provided using the\n        `extraparams` dict.\n\n    epdsmooth_extraparams : dict\n        This is a dict of any extra filter params to supply to the smoothing\n        function.\n\n    nworkers : int\n        The number of parallel workers to launch when processing the LCs.\n\n    maxworkertasks : int\n        The maximum number of tasks a parallel worker will complete before it is\n        replaced with a new one (sometimes helps with memory-leaks).\n\n    Returns\n    -------\n\n    dict\n        Returns a dict organized by all the keys in the input `magcols` list,\n        containing lists of EPD pickle light curves for that `magcol`.\n\n    Notes\n    -----\n\n    - S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)\n    - D -> measure of PSF ellipticity in xy direction\n    - K -> measure of PSF ellipticity in cross direction\n\n    S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in\n    A. Pal's thesis: https://arxiv.org/abs/0906.3486", "id": "f14699:m3"}
{"signature": "def runcp_worker(task):", "body": "pfpickle, outdir, lcbasedir, kwargs = task<EOL>try:<EOL><INDENT>return runcp(pfpickle, outdir, lcbasedir, **kwargs)<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' % (pfpickle, e))<EOL>return None<EOL><DEDENT>", "docstring": "This is the worker for running checkplots.\n\nParameters\n----------\n\ntask : tuple\n    This is of the form: (pfpickle, outdir, lcbasedir, kwargs).\n\nReturns\n-------\n\nlist of str\n    The list of checkplot pickles returned by the `runcp` function.", "id": "f14700:m3"}
{"signature": "def parallel_cp(<EOL>pfpicklelist,<EOL>outdir,<EOL>lcbasedir,<EOL>fast_mode=False,<EOL>lcfnamelist=None,<EOL>cprenorm=False,<EOL>lclistpkl=None,<EOL>gaia_max_timeout=<NUM_LIT>,<EOL>gaia_mirror=None,<EOL>nbrradiusarcsec=<NUM_LIT>,<EOL>maxnumneighbors=<NUM_LIT:5>,<EOL>makeneighborlcs=True,<EOL>xmatchinfo=None,<EOL>xmatchradiusarcsec=<NUM_LIT>,<EOL>sigclip=<NUM_LIT>,<EOL>minobservations=<NUM_LIT>,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None,<EOL>timecols=None,<EOL>magcols=None,<EOL>errcols=None,<EOL>skipdone=False,<EOL>done_callback=None,<EOL>done_callback_args=None,<EOL>done_callback_kwargs=None,<EOL>liststartindex=None,<EOL>maxobjects=None,<EOL>nworkers=NCPUS,<EOL>):", "body": "<EOL>if sys.platform == '<STR_LIT>':<EOL><INDENT>import requests<EOL>requests.get('<STR_LIT>')<EOL><DEDENT>if not os.path.exists(outdir):<EOL><INDENT>os.mkdir(outdir)<EOL><DEDENT>if (liststartindex is not None) and (maxobjects is None):<EOL><INDENT>pfpicklelist = pfpicklelist[liststartindex:]<EOL>if lcfnamelist is not None:<EOL><INDENT>lcfnamelist = lcfnamelist[liststartindex:]<EOL><DEDENT><DEDENT>elif (liststartindex is None) and (maxobjects is not None):<EOL><INDENT>pfpicklelist = pfpicklelist[:maxobjects]<EOL>if lcfnamelist is not None:<EOL><INDENT>lcfnamelist = lcfnamelist[:maxobjects]<EOL><DEDENT><DEDENT>elif (liststartindex is not None) and (maxobjects is not None):<EOL><INDENT>pfpicklelist = (<EOL>pfpicklelist[liststartindex:liststartindex+maxobjects]<EOL>)<EOL>if lcfnamelist is not None:<EOL><INDENT>lcfnamelist = lcfnamelist[liststartindex:liststartindex+maxobjects]<EOL><DEDENT><DEDENT>if lcfnamelist is None:<EOL><INDENT>lcfnamelist = [None]*len(pfpicklelist)<EOL><DEDENT>tasklist = [(x, outdir, lcbasedir,<EOL>{'<STR_LIT>':lcformat,<EOL>'<STR_LIT>':lcformatdir,<EOL>'<STR_LIT>':y,<EOL>'<STR_LIT>':timecols,<EOL>'<STR_LIT>':magcols,<EOL>'<STR_LIT>':errcols,<EOL>'<STR_LIT>':lclistpkl,<EOL>'<STR_LIT>':gaia_max_timeout,<EOL>'<STR_LIT>':gaia_mirror,<EOL>'<STR_LIT>':nbrradiusarcsec,<EOL>'<STR_LIT>':maxnumneighbors,<EOL>'<STR_LIT>':makeneighborlcs,<EOL>'<STR_LIT>':xmatchinfo,<EOL>'<STR_LIT>':xmatchradiusarcsec,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':minobservations,<EOL>'<STR_LIT>':skipdone,<EOL>'<STR_LIT>':cprenorm,<EOL>'<STR_LIT>':fast_mode,<EOL>'<STR_LIT>':done_callback,<EOL>'<STR_LIT>':done_callback_args,<EOL>'<STR_LIT>':done_callback_kwargs}) for<EOL>x,y in zip(pfpicklelist, lcfnamelist)]<EOL>resultfutures = []<EOL>results = []<EOL>with ProcessPoolExecutor(max_workers=nworkers) as executor:<EOL><INDENT>resultfutures = executor.map(runcp_worker, tasklist)<EOL><DEDENT>results = [x for x in resultfutures]<EOL>executor.shutdown()<EOL>return results<EOL>", "docstring": "This drives the parallel execution of `runcp` for a list of periodfinding\n    result pickles.\n\n    Parameters\n    ----------\n\n    pfpicklelist : list of str or list of Nones\n        This is the list of the filenames of the period-finding result pickles\n        to process. To make checkplots using the light curves directly, set this\n        to a list of Nones with the same length as the list of light curve files\n        that you provide in `lcfnamelist`.\n\n    outdir : str\n        The directory the checkplot pickles will be written to.\n\n    lcbasedir : str\n        The base directory that this function will look in to find the light\n        curves pointed to by the period-finding result files. If you're using\n        `lcfnamelist` to provide a list of light curve filenames directly, this\n        arg is ignored.\n\n    lcfnamelist : list of str or None\n        If this is provided, it must be a list of the input light curve\n        filenames to process. These can either be associated with each input\n        period-finder result pickle, or can be provided standalone to make\n        checkplots without phased LC plots in them. In the second case, you must\n        set `pfpicklelist` to a list of Nones that matches the length of\n        `lcfnamelist`.\n\n    cprenorm : bool\n        Set this to True if the light curves should be renormalized by\n        `checkplot.checkplot_pickle`. This is set to False by default because we\n        do our own normalization in this function using the light curve's\n        registered normalization function and pass the normalized times, mags,\n        errs to the `checkplot.checkplot_pickle` function.\n\n    lclistpkl : str or dict\n        This is either the filename of a pickle or the actual dict produced by\n        lcproc.make_lclist. This is used to gather neighbor information.\n\n    nbrradiusarcsec : float\n        The radius in arcseconds to use for a search conducted around the\n        coordinates of this object to look for any potential confusion and\n        blending of variability amplitude caused by their proximity.\n\n    maxnumneighbors : int\n        The maximum number of neighbors that will have their light curves and\n        magnitudes noted in this checkplot as potential blends with the target\n        object.\n\n    makeneighborlcs : bool\n        If True, will make light curve and phased light curve plots for all\n        neighbors found in the object collection for each input object.\n\n    fast_mode : bool or float\n        This runs the external catalog operations in a \"fast\" mode, with short\n        timeouts and not trying to hit external catalogs that take a long time\n        to respond.\n\n        If this is set to True, the default settings for the external requests\n        will then become::\n\n                skyview_lookup = False\n                skyview_timeout = 10.0\n                skyview_retry_failed = False\n                dust_timeout = 10.0\n                gaia_submit_timeout = 7.0\n                gaia_max_timeout = 10.0\n                gaia_submit_tries = 2\n                complete_query_later = False\n                search_simbad = False\n\n        If this is a float, will run in \"fast\" mode with the provided timeout\n        value in seconds and the following settings::\n\n                skyview_lookup = True\n                skyview_timeout = fast_mode\n                skyview_retry_failed = False\n                dust_timeout = fast_mode\n                gaia_submit_timeout = 0.66*fast_mode\n                gaia_max_timeout = fast_mode\n                gaia_submit_tries = 2\n                complete_query_later = False\n                search_simbad = False\n\n    gaia_max_timeout : float\n        Sets the timeout in seconds to use when waiting for the GAIA service to\n        respond to our request for the object's information. Note that if\n        `fast_mode` is set, this is ignored.\n\n    gaia_mirror : str or None\n        This sets the GAIA mirror to use. This is a key in the\n        `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n        mirror.\n\n    xmatchinfo : str or dict\n        This is either the xmatch dict produced by the function\n        `load_xmatch_external_catalogs` above, or the path to the xmatch info\n        pickle file produced by that function.\n\n    xmatchradiusarcsec : float\n        This is the cross-matching radius to use in arcseconds.\n\n    minobservations : int\n        The minimum of observations the input object's mag/flux time-series must\n        have for this function to plot its light curve and phased light\n        curve. If the object has less than this number, no light curves will be\n        plotted, but the checkplotdict will still contain all of the other\n        information.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    timecols : list of str or None\n        The timecol keys to use from the lcdict in generating this checkplot.\n\n    magcols : list of str or None\n        The magcol keys to use from the lcdict in generating this checkplot.\n\n    errcols : list of str or None\n        The errcol keys to use from the lcdict in generating this checkplot.\n\n    skipdone : bool\n        This indicates if this function will skip creating checkplots that\n        already exist corresponding to the current `objectid` and `magcol`. If\n        `skipdone` is set to True, this will be done.\n\n    done_callback : Python function or None\n        This is used to provide a function to execute after the checkplot\n        pickles are generated. This is useful if you want to stream the results\n        of checkplot making to some other process, e.g. directly running an\n        ingestion into an LCC-Server collection. The function will always get\n        the list of the generated checkplot pickles as its first arg, and all of\n        the kwargs for runcp in the kwargs dict. Additional args and kwargs can\n        be provided by giving a list in the `done_callbacks_args` kwarg and a\n        dict in the `done_callbacks_kwargs` kwarg.\n\n        NOTE: the function you pass in here should be pickleable by normal\n        Python if you want to use it with the parallel_cp and parallel_cp_lcdir\n        functions below.\n\n    done_callback_args : tuple or None\n        If not None, contains any args to pass into the `done_callback`\n        function.\n\n    done_callback_kwargs : dict or None\n        If not None, contains any kwargs to pass into the `done_callback`\n        function.\n\n    liststartindex : int\n        The index of the `pfpicklelist` (and `lcfnamelist` if provided) to start\n        working at.\n\n    maxobjects : int\n        The maximum number of objects to process in this run. Use this with\n        `liststartindex` to effectively distribute working on a large list of\n        input period-finding result pickles (and light curves if `lcfnamelist`\n        is also provided) over several sessions or machines.\n\n    nworkers : int\n        The number of parallel workers that will work on the checkplot\n        generation process.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict with keys = input period-finding pickles and vals =\n        list of the corresponding checkplot pickles produced.", "id": "f14700:m4"}
{"signature": "def parallel_cp_pfdir(pfpickledir,<EOL>outdir,<EOL>lcbasedir,<EOL>pfpickleglob='<STR_LIT>',<EOL>lclistpkl=None,<EOL>cprenorm=False,<EOL>nbrradiusarcsec=<NUM_LIT>,<EOL>maxnumneighbors=<NUM_LIT:5>,<EOL>makeneighborlcs=True,<EOL>fast_mode=False,<EOL>gaia_max_timeout=<NUM_LIT>,<EOL>gaia_mirror=None,<EOL>xmatchinfo=None,<EOL>xmatchradiusarcsec=<NUM_LIT>,<EOL>minobservations=<NUM_LIT>,<EOL>sigclip=<NUM_LIT>,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None,<EOL>timecols=None,<EOL>magcols=None,<EOL>errcols=None,<EOL>skipdone=False,<EOL>done_callback=None,<EOL>done_callback_args=None,<EOL>done_callback_kwargs=None,<EOL>maxobjects=None,<EOL>nworkers=<NUM_LIT:32>):", "body": "pfpicklelist = sorted(glob.glob(os.path.join(pfpickledir, pfpickleglob)))<EOL>LOGINFO('<STR_LIT>' %<EOL>len(pfpicklelist))<EOL>return parallel_cp(pfpicklelist,<EOL>outdir,<EOL>lcbasedir,<EOL>fast_mode=fast_mode,<EOL>lclistpkl=lclistpkl,<EOL>nbrradiusarcsec=nbrradiusarcsec,<EOL>gaia_max_timeout=gaia_max_timeout,<EOL>gaia_mirror=gaia_mirror,<EOL>maxnumneighbors=maxnumneighbors,<EOL>makeneighborlcs=makeneighborlcs,<EOL>xmatchinfo=xmatchinfo,<EOL>xmatchradiusarcsec=xmatchradiusarcsec,<EOL>sigclip=sigclip,<EOL>minobservations=minobservations,<EOL>cprenorm=cprenorm,<EOL>maxobjects=maxobjects,<EOL>lcformat=lcformat,<EOL>lcformatdir=lcformatdir,<EOL>timecols=timecols,<EOL>magcols=magcols,<EOL>errcols=errcols,<EOL>skipdone=skipdone,<EOL>nworkers=nworkers,<EOL>done_callback=done_callback,<EOL>done_callback_args=done_callback_args,<EOL>done_callback_kwargs=done_callback_kwargs)<EOL>", "docstring": "This drives the parallel execution of `runcp` for a directory of\n    periodfinding pickles.\n\n    Parameters\n    ----------\n\n    pfpickledir : str\n        This is the directory containing all of the period-finding pickles to\n        process.\n\n    outdir : str\n        The directory the checkplot pickles will be written to.\n\n    lcbasedir : str\n        The base directory that this function will look in to find the light\n        curves pointed to by the period-finding result files. If you're using\n        `lcfnamelist` to provide a list of light curve filenames directly, this\n        arg is ignored.\n\n    pkpickleglob : str\n        This is a UNIX file glob to select period-finding result pickles in the\n        specified `pfpickledir`.\n\n    lclistpkl : str or dict\n        This is either the filename of a pickle or the actual dict produced by\n        lcproc.make_lclist. This is used to gather neighbor information.\n\n    cprenorm : bool\n        Set this to True if the light curves should be renormalized by\n        `checkplot.checkplot_pickle`. This is set to False by default because we\n        do our own normalization in this function using the light curve's\n        registered normalization function and pass the normalized times, mags,\n        errs to the `checkplot.checkplot_pickle` function.\n\n    nbrradiusarcsec : float\n        The radius in arcseconds to use for a search conducted around the\n        coordinates of this object to look for any potential confusion and\n        blending of variability amplitude caused by their proximity.\n\n    maxnumneighbors : int\n        The maximum number of neighbors that will have their light curves and\n        magnitudes noted in this checkplot as potential blends with the target\n        object.\n\n    makeneighborlcs : bool\n        If True, will make light curve and phased light curve plots for all\n        neighbors found in the object collection for each input object.\n\n    fast_mode : bool or float\n        This runs the external catalog operations in a \"fast\" mode, with short\n        timeouts and not trying to hit external catalogs that take a long time\n        to respond.\n\n        If this is set to True, the default settings for the external requests\n        will then become::\n\n                skyview_lookup = False\n                skyview_timeout = 10.0\n                skyview_retry_failed = False\n                dust_timeout = 10.0\n                gaia_submit_timeout = 7.0\n                gaia_max_timeout = 10.0\n                gaia_submit_tries = 2\n                complete_query_later = False\n                search_simbad = False\n\n        If this is a float, will run in \"fast\" mode with the provided timeout\n        value in seconds and the following settings::\n\n                skyview_lookup = True\n                skyview_timeout = fast_mode\n                skyview_retry_failed = False\n                dust_timeout = fast_mode\n                gaia_submit_timeout = 0.66*fast_mode\n                gaia_max_timeout = fast_mode\n                gaia_submit_tries = 2\n                complete_query_later = False\n                search_simbad = False\n\n    gaia_max_timeout : float\n        Sets the timeout in seconds to use when waiting for the GAIA service to\n        respond to our request for the object's information. Note that if\n        `fast_mode` is set, this is ignored.\n\n    gaia_mirror : str or None\n        This sets the GAIA mirror to use. This is a key in the\n        `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n        mirror.\n\n    xmatchinfo : str or dict\n        This is either the xmatch dict produced by the function\n        `load_xmatch_external_catalogs` above, or the path to the xmatch info\n        pickle file produced by that function.\n\n    xmatchradiusarcsec : float\n        This is the cross-matching radius to use in arcseconds.\n\n    minobservations : int\n        The minimum of observations the input object's mag/flux time-series must\n        have for this function to plot its light curve and phased light\n        curve. If the object has less than this number, no light curves will be\n        plotted, but the checkplotdict will still contain all of the other\n        information.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    timecols : list of str or None\n        The timecol keys to use from the lcdict in generating this checkplot.\n\n    magcols : list of str or None\n        The magcol keys to use from the lcdict in generating this checkplot.\n\n    errcols : list of str or None\n        The errcol keys to use from the lcdict in generating this checkplot.\n\n    skipdone : bool\n        This indicates if this function will skip creating checkplots that\n        already exist corresponding to the current `objectid` and `magcol`. If\n        `skipdone` is set to True, this will be done.\n\n    done_callback : Python function or None\n        This is used to provide a function to execute after the checkplot\n        pickles are generated. This is useful if you want to stream the results\n        of checkplot making to some other process, e.g. directly running an\n        ingestion into an LCC-Server collection. The function will always get\n        the list of the generated checkplot pickles as its first arg, and all of\n        the kwargs for runcp in the kwargs dict. Additional args and kwargs can\n        be provided by giving a list in the `done_callbacks_args` kwarg and a\n        dict in the `done_callbacks_kwargs` kwarg.\n\n        NOTE: the function you pass in here should be pickleable by normal\n        Python if you want to use it with the parallel_cp and parallel_cp_lcdir\n        functions below.\n\n    done_callback_args : tuple or None\n        If not None, contains any args to pass into the `done_callback`\n        function.\n\n    done_callback_kwargs : dict or None\n        If not None, contains any kwargs to pass into the `done_callback`\n        function.\n\n    maxobjects : int\n        The maximum number of objects to process in this run.\n\n    nworkers : int\n        The number of parallel workers that will work on the checkplot\n        generation process.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict with keys = input period-finding pickles and vals =\n        list of the corresponding checkplot pickles produced.", "id": "f14700:m5"}
{"signature": "def get_lcformat(formatkey, use_lcformat_dir=None):", "body": "if isinstance(use_lcformat_dir, str):<EOL><INDENT>lcformat_jsonpath = os.path.join(<EOL>use_lcformat_dir,<EOL>'<STR_LIT>' % formatkey<EOL>)<EOL>if not os.path.exists(lcformat_jsonpath):<EOL><INDENT>lcformat_jsonpath = os.path.join(<EOL>os.path.expanduser('<STR_LIT>'),<EOL>'<STR_LIT>' % formatkey<EOL>)<EOL>if not os.path.exists(lcformat_jsonpath):<EOL><INDENT>install_path = os.path.dirname(__file__)<EOL>install_path = os.path.abspath(<EOL>os.path.join(install_path, '<STR_LIT:..>', '<STR_LIT:data>','<STR_LIT>')<EOL>)<EOL>lcformat_jsonpath = os.path.join(<EOL>install_path,<EOL>'<STR_LIT>' % formatkey<EOL>)<EOL>if not os.path.exists(lcformat_jsonpath):<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% formatkey)<EOL>return None<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>lcformat_jsonpath = os.path.join(<EOL>os.path.expanduser('<STR_LIT>'),<EOL>'<STR_LIT>' % formatkey<EOL>)<EOL>if not os.path.exists(lcformat_jsonpath):<EOL><INDENT>install_path = os.path.dirname(__file__)<EOL>install_path = os.path.abspath(<EOL>os.path.join(install_path, '<STR_LIT:..>', '<STR_LIT:data>','<STR_LIT>')<EOL>)<EOL>lcformat_jsonpath = os.path.join(<EOL>install_path,<EOL>'<STR_LIT>' % formatkey<EOL>)<EOL>if not os.path.exists(lcformat_jsonpath):<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% formatkey)<EOL>return None<EOL><DEDENT><DEDENT><DEDENT>with open(lcformat_jsonpath) as infd:<EOL><INDENT>lcformatdict = json.load(infd)<EOL><DEDENT>readerfunc_module = lcformatdict['<STR_LIT>']<EOL>readerfunc = lcformatdict['<STR_LIT>']<EOL>readerfunc_kwargs = lcformatdict['<STR_LIT>']<EOL>normfunc_module = lcformatdict['<STR_LIT>']<EOL>normfunc = lcformatdict['<STR_LIT>']<EOL>normfunc_kwargs = lcformatdict['<STR_LIT>']<EOL>fileglob = lcformatdict['<STR_LIT>']<EOL>timecols = lcformatdict['<STR_LIT>']<EOL>magcols = lcformatdict['<STR_LIT>']<EOL>errcols = lcformatdict['<STR_LIT>']<EOL>magsarefluxes = lcformatdict['<STR_LIT>']<EOL>readermodule = _check_extmodule(readerfunc_module, formatkey)<EOL>if not readermodule:<EOL><INDENT>LOGERROR(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>(readerfunc_module, formatkey))<EOL>return None<EOL><DEDENT>try:<EOL><INDENT>readerfunc_in = getattr(readermodule, readerfunc)<EOL><DEDENT>except AttributeError:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (formatkey, readerfunc_module, readerfunc))<EOL>raise<EOL><DEDENT>if normfunc_module:<EOL><INDENT>normmodule = _check_extmodule(normfunc_module, formatkey)<EOL>if not normmodule:<EOL><INDENT>LOGERROR(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>(normfunc_module, formatkey))<EOL>return None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>normmodule = None<EOL><DEDENT>if normfunc_module and normfunc:<EOL><INDENT>try:<EOL><INDENT>normfunc_in = getattr(normmodule, normfunc)<EOL><DEDENT>except AttributeError:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (formatkey, normfunc_module, normfunc))<EOL>raise<EOL><DEDENT><DEDENT>else:<EOL><INDENT>normfunc_in = None<EOL><DEDENT>if isinstance(readerfunc_kwargs, dict):<EOL><INDENT>readerfunc_in = partial(readerfunc_in, **readerfunc_kwargs)<EOL><DEDENT>if normfunc_in is not None:<EOL><INDENT>if isinstance(normfunc_kwargs, dict):<EOL><INDENT>normfunc_in = partial(normfunc_in, **normfunc_kwargs)<EOL><DEDENT><DEDENT>returntuple = (<EOL>fileglob,<EOL>readerfunc_in,<EOL>timecols,<EOL>magcols,<EOL>errcols,<EOL>magsarefluxes,<EOL>normfunc_in,<EOL>)<EOL>return returntuple<EOL>", "docstring": "This loads an LC format description from a previously-saved JSON file.\n\n    Parameters\n    ----------\n\n    formatkey : str\n        The key used to refer to the LC format. This is part of the JSON file's\n        name, e.g. the format key 'hat-csv' maps to the format JSON file:\n        '<astrobase install path>/data/lcformats/hat-csv.json'.\n\n    use_lcformat_dir : str or None\n        If provided, must be the path to a directory that contains the\n        corresponding lcformat JSON file for `formatkey`. If this is None, this\n        function will look for lcformat JSON files corresponding to the given\n        `formatkey`:\n\n        - first, in the directory specified in this kwarg,\n        - if not found there, in the home directory: ~/.astrobase/lcformat-jsons\n        - if not found there, in: <astrobase install path>/data/lcformats\n\n    Returns\n    -------\n\n    tuple\n        A tuple of the following form is returned::\n\n            (fileglob       : the file glob of the associated LC files,\n             readerfunc_in  : the imported Python function for reading LCs,\n             timecols       : list of time col keys to get from the lcdict,\n             magcols        : list of mag col keys to get from the lcdict ,\n             errcols        : list of err col keys to get from the lcdict,\n             magsarefluxes  : True if the measurements are fluxes not mags,\n             normfunc_in    : the imported Python function for normalizing LCs)\n\n        All `astrobase.lcproc` functions can then use this tuple to dynamically\n        import your LC reader and normalization functions to work with your LC\n        format transparently.", "id": "f14701:m4"}
{"signature": "def parallel_pf(lclist,<EOL>outdir,<EOL>timecols=None,<EOL>magcols=None,<EOL>errcols=None,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None,<EOL>pfmethods=('<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>'),<EOL>pfkwargs=({},{},{},{}),<EOL>sigclip=<NUM_LIT>,<EOL>getblssnr=False,<EOL>nperiodworkers=NCPUS,<EOL>ncontrolworkers=<NUM_LIT:1>,<EOL>liststartindex=None,<EOL>listmaxobjects=None,<EOL>minobservations=<NUM_LIT>,<EOL>excludeprocessed=True):", "body": "<EOL>if not os.path.exists(outdir):<EOL><INDENT>os.makedirs(outdir)<EOL><DEDENT>if (liststartindex is not None) and (listmaxobjects is None):<EOL><INDENT>lclist = lclist[liststartindex:]<EOL><DEDENT>elif (liststartindex is None) and (listmaxobjects is not None):<EOL><INDENT>lclist = lclist[:listmaxobjects]<EOL><DEDENT>elif (liststartindex is not None) and (listmaxobjects is not None):<EOL><INDENT>lclist = lclist[liststartindex:liststartindex+listmaxobjects]<EOL><DEDENT>tasklist = [(x, outdir, timecols, magcols, errcols, lcformat, lcformatdir,<EOL>pfmethods, pfkwargs, getblssnr, sigclip, nperiodworkers,<EOL>minobservations,<EOL>excludeprocessed)<EOL>for x in lclist]<EOL>with ProcessPoolExecutor(max_workers=ncontrolworkers) as executor:<EOL><INDENT>resultfutures = executor.map(_runpf_worker, tasklist)<EOL><DEDENT>results = [x for x in resultfutures]<EOL>return results<EOL>", "docstring": "This drives the overall parallel period processing for a list of LCs.\n\n    As a rough benchmark, 25000 HATNet light curves with up to 50000 points per\n    LC take about 26 days in total for an invocation of this function using\n    GLS+PDM+BLS, 10 periodworkers, and 4 controlworkers (so all 40 'cores') on a\n    2 x Xeon E5-2660v3 machine.\n\n    Parameters\n    ----------\n\n    lclist : list of str\n        The list of light curve file to process.\n\n    outdir : str\n        The output directory where the period-finding result pickles will go.\n\n    timecols : list of str or None\n        The timecol keys to use from the lcdict in calculating the features.\n\n    magcols : list of str or None\n        The magcol keys to use from the lcdict in calculating the features.\n\n    errcols : list of str or None\n        The errcol keys to use from the lcdict in calculating the features.\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    pfmethods : list of str\n        This is a list of period finding methods to run. Each element is a\n        string matching the keys of the `PFMETHODS` dict above. By default, this\n        runs GLS, PDM, AoVMH, and the spectral window Lomb-Scargle periodogram.\n\n    pfkwargs : list of dicts\n        This is used to provide any special kwargs as dicts to each\n        period-finding method function specified in `pfmethods`.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    getblssnr : bool\n        If this is True and BLS is one of the methods specified in `pfmethods`,\n        will also calculate the stats for each best period in the BLS results:\n        transit depth, duration, ingress duration, refit period and epoch, and\n        the SNR of the transit.\n\n    nperiodworkers : int\n        The number of parallel period-finding workers to launch per object task.\n\n    ncontrolworkers : int\n        The number of controlling processes to launch. This effectively sets how\n        many objects from `lclist` will be processed in parallel.\n\n    liststartindex : int or None\n        This sets the index from where to start in `lclist`.\n\n    listmaxobjects : int or None\n        This sets the maximum number of objects in `lclist` to run\n        period-finding for in this invocation. Together with `liststartindex`,\n        `listmaxobjects` can be used to distribute processing over several\n        independent machines if the number of light curves is very large.\n\n    minobservations : int\n        The minimum number of finite LC points required to process a light\n        curve.\n\n    excludeprocessed : bool\n        If this is True, light curves that have existing period-finding result\n        pickles in `outdir` will not be processed.\n\n        FIXME: currently, this uses a dumb method of excluding already-processed\n        files. A smarter way to do this is to (i) generate a SHA512 cachekey\n        based on a repr of `{'lcfile', 'timecols', 'magcols', 'errcols',\n        'lcformat', 'pfmethods', 'sigclip', 'getblssnr', 'pfkwargs'}`, (ii) make\n        sure all list kwargs in the dict are sorted, (iii) check if the output\n        file has the same cachekey in its filename (last 8 chars of cachekey\n        should work), so the result was processed in exactly the same way as\n        specifed in the input to this function, and can therefore be\n        ignored. Will implement this later.\n\n    Returns\n    -------\n\n    list of str\n        A list of the period-finding pickles created for all of input LCs\n        processed.", "id": "f14702:m3"}
{"signature": "def cp_objectinfo_worker(task):", "body": "cpf, cpkwargs = task<EOL>try:<EOL><INDENT>newcpf = update_checkplot_objectinfo(cpf, **cpkwargs)<EOL>return newcpf<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' % cpf)<EOL>return None<EOL><DEDENT>", "docstring": "This is a parallel worker for `parallel_update_cp_objectinfo`.\n\n    Parameters\n    ----------\n\n    task : tuple\n        - task[0] = checkplot pickle file\n        - task[1] = kwargs\n\n    Returns\n    -------\n\n    str\n        The name of the checkplot file that was updated. None if the update\n        fails for some reason.", "id": "f14703:m8"}
{"signature": "def parallel_update_objectinfo_cplist(<EOL>cplist,<EOL>liststartindex=None,<EOL>maxobjects=None,<EOL>nworkers=NCPUS,<EOL>fast_mode=False,<EOL>findercmap='<STR_LIT>',<EOL>finderconvolve=None,<EOL>deredden_object=True,<EOL>custom_bandpasses=None,<EOL>gaia_submit_timeout=<NUM_LIT>,<EOL>gaia_submit_tries=<NUM_LIT:3>,<EOL>gaia_max_timeout=<NUM_LIT>,<EOL>gaia_mirror=None,<EOL>complete_query_later=True,<EOL>lclistpkl=None,<EOL>nbrradiusarcsec=<NUM_LIT>,<EOL>maxnumneighbors=<NUM_LIT:5>,<EOL>plotdpi=<NUM_LIT:100>,<EOL>findercachedir='<STR_LIT>',<EOL>verbose=True<EOL>):", "body": "<EOL>if sys.platform == '<STR_LIT>':<EOL><INDENT>import requests<EOL>requests.get('<STR_LIT>')<EOL><DEDENT>if (liststartindex is not None) and (maxobjects is None):<EOL><INDENT>cplist = cplist[liststartindex:]<EOL><DEDENT>elif (liststartindex is None) and (maxobjects is not None):<EOL><INDENT>cplist = cplist[:maxobjects]<EOL><DEDENT>elif (liststartindex is not None) and (maxobjects is not None):<EOL><INDENT>cplist = (<EOL>cplist[liststartindex:liststartindex+maxobjects]<EOL>)<EOL><DEDENT>tasks = [(x, {'<STR_LIT>':fast_mode,<EOL>'<STR_LIT>':findercmap,<EOL>'<STR_LIT>':finderconvolve,<EOL>'<STR_LIT>':deredden_object,<EOL>'<STR_LIT>':custom_bandpasses,<EOL>'<STR_LIT>':gaia_submit_timeout,<EOL>'<STR_LIT>':gaia_submit_tries,<EOL>'<STR_LIT>':gaia_max_timeout,<EOL>'<STR_LIT>':gaia_mirror,<EOL>'<STR_LIT>':complete_query_later,<EOL>'<STR_LIT>':lclistpkl,<EOL>'<STR_LIT>':nbrradiusarcsec,<EOL>'<STR_LIT>':maxnumneighbors,<EOL>'<STR_LIT>':plotdpi,<EOL>'<STR_LIT>':findercachedir,<EOL>'<STR_LIT>':verbose}) for x in cplist]<EOL>resultfutures = []<EOL>results = []<EOL>with ProcessPoolExecutor(max_workers=nworkers) as executor:<EOL><INDENT>resultfutures = executor.map(cp_objectinfo_worker, tasks)<EOL><DEDENT>results = [x for x in resultfutures]<EOL>executor.shutdown()<EOL>return results<EOL>", "docstring": "This updates objectinfo for a list of checkplots.\n\nUseful in cases where a previous round of GAIA/finderchart/external catalog\nacquisition failed. This will preserve the following keys in the checkplots\nif they exist:\n\ncomments\nvarinfo\nobjectinfo.objecttags\n\nParameters\n----------\n\ncplist : list of str\n    A list of checkplot pickle file names to update.\n\nliststartindex : int\n    The index of the input list to start working at.\n\nmaxobjects : int\n    The maximum number of objects to process in this run. Use this with\n    `liststartindex` to effectively distribute working on a large list of\n    input checkplot pickles over several sessions or machines.\n\nnworkers : int\n    The number of parallel workers that will work on the checkplot\n    update process.\n\nfast_mode : bool or float\n    This runs the external catalog operations in a \"fast\" mode, with short\n    timeouts and not trying to hit external catalogs that take a long time\n    to respond. See the docstring for\n    `checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this\n    works. If this is True, will run in \"fast\" mode with default timeouts (5\n    seconds in most cases). If this is a float, will run in \"fast\" mode with\n    the provided timeout value in seconds.\n\nfindercmap : str or matplotlib.cm.Colormap object\n\nfindercmap : str or matplotlib.cm.ColorMap object\n    The Colormap object to use for the finder chart image.\n\nfinderconvolve : astropy.convolution.Kernel object or None\n    If not None, the Kernel object to use for convolving the finder image.\n\nderedden_objects : bool\n    If this is True, will use the 2MASS DUST service to get extinction\n    coefficients in various bands, and then try to deredden the magnitudes\n    and colors of the object already present in the checkplot's objectinfo\n    dict.\n\ncustom_bandpasses : dict\n    This is a dict used to provide custom bandpass definitions for any\n    magnitude measurements in the objectinfo dict that are not automatically\n    recognized by the `varclass.starfeatures.color_features` function. See\n    its docstring for details on the required format.\n\ngaia_submit_timeout : float\n    Sets the timeout in seconds to use when submitting a request to look up\n    the object's information to the GAIA service. Note that if `fast_mode`\n    is set, this is ignored.\n\ngaia_submit_tries : int\n    Sets the maximum number of times the GAIA services will be contacted to\n    obtain this object's information. If `fast_mode` is set, this is\n    ignored, and the services will be contacted only once (meaning that a\n    failure to respond will be silently ignored and no GAIA data will be\n    added to the checkplot's objectinfo dict).\n\ngaia_max_timeout : float\n    Sets the timeout in seconds to use when waiting for the GAIA service to\n    respond to our request for the object's information. Note that if\n    `fast_mode` is set, this is ignored.\n\ngaia_mirror : str\n    This sets the GAIA mirror to use. This is a key in the\n    `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n    mirror.\n\ncomplete_query_later : bool\n    If this is True, saves the state of GAIA queries that are not yet\n    complete when `gaia_max_timeout` is reached while waiting for the GAIA\n    service to respond to our request. A later call for GAIA info on the\n    same object will attempt to pick up the results from the existing query\n    if it's completed. If `fast_mode` is True, this is ignored.\n\nlclistpkl : dict or str\n    If this is provided, must be a dict resulting from reading a catalog\n    produced by the `lcproc.catalogs.make_lclist` function or a str path\n    pointing to the pickle file produced by that function. This catalog is\n    used to find neighbors of the current object in the current light curve\n    collection. Looking at neighbors of the object within the radius\n    specified by `nbrradiusarcsec` is useful for light curves produced by\n    instruments that have a large pixel scale, so are susceptible to\n    blending of variability and potential confusion of neighbor variability\n    with that of the actual object being looked at. If this is None, no\n    neighbor lookups will be performed.\n\nnbrradiusarcsec : float\n    The radius in arcseconds to use for a search conducted around the\n    coordinates of this object to look for any potential confusion and\n    blending of variability amplitude caused by their proximity.\n\nmaxnumneighbors : int\n    The maximum number of neighbors that will have their light curves and\n    magnitudes noted in this checkplot as potential blends with the target\n    object.\n\nplotdpi : int\n    The resolution in DPI of the plots to generate in this function\n    (e.g. the finder chart, etc.)\n\nfindercachedir : str\n    The path to the astrobase cache directory for finder chart downloads\n    from the NASA SkyView service.\n\nverbose : bool\n    If True, will indicate progress and warn about potential problems.\n\nReturns\n-------\n\nlist of str\n    Paths to the updated checkplot pickle file.", "id": "f14703:m9"}
{"signature": "def add_cmds_cplist(cplist, cmdpkl,<EOL>require_cmd_magcolor=True,<EOL>save_cmd_pngs=False):", "body": "<EOL>with open(cmdpkl,'<STR_LIT:rb>') as infd:<EOL><INDENT>cmd = pickle.load(infd)<EOL><DEDENT>for cpf in cplist:<EOL><INDENT>add_cmd_to_checkplot(cpf, cmd,<EOL>require_cmd_magcolor=require_cmd_magcolor,<EOL>save_cmd_pngs=save_cmd_pngs)<EOL><DEDENT>", "docstring": "This adds CMDs for each object in cplist.\n\n    Parameters\n    ----------\n\n    cplist : list of str\n        This is the input list of checkplot pickles to add the CMDs to.\n\n    cmdpkl : str\n        This is the filename of the CMD pickle created previously.\n\n    require_cmd_magcolor : bool\n        If this is True, a CMD plot will not be made if the color and mag keys\n        required by the CMD are not present or are nan in each checkplot's\n        objectinfo dict.\n\n    save_cmd_pngs : bool\n        If this is True, then will save the CMD plots that were generated and\n        added back to the checkplotdict as PNGs to the same directory as\n        `cpx`.\n\n    Returns\n    -------\n\n    Nothing.", "id": "f14703:m6"}
{"signature": "def add_cmd_to_checkplot(<EOL>cpx,<EOL>cmdpkl,<EOL>require_cmd_magcolor=True,<EOL>save_cmd_pngs=False<EOL>):", "body": "<EOL>if isinstance(cpx, str) and os.path.exists(cpx):<EOL><INDENT>cpdict = _read_checkplot_picklefile(cpx)<EOL><DEDENT>elif isinstance(cpx, dict):<EOL><INDENT>cpdict = cpx<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return None<EOL><DEDENT>if isinstance(cmdpkl, str) and os.path.exists(cmdpkl):<EOL><INDENT>with open(cmdpkl, '<STR_LIT:rb>') as infd:<EOL><INDENT>cmd = pickle.load(infd)<EOL><DEDENT><DEDENT>elif isinstance(cmdpkl, dict):<EOL><INDENT>cmd = cmdpkl<EOL><DEDENT>cpdict['<STR_LIT>'] = {}<EOL>cplist_mags = cmd['<STR_LIT>']<EOL>cplist_colors = cmd['<STR_LIT>']<EOL>for c1, c2, ym, ind in zip(cmd['<STR_LIT>'],<EOL>cmd['<STR_LIT>'],<EOL>cmd['<STR_LIT>'],<EOL>range(len(cmd['<STR_LIT>']))):<EOL><INDENT>if (c1 in cpdict['<STR_LIT>'] and<EOL>cpdict['<STR_LIT>'][c1] is not None):<EOL><INDENT>c1mag = cpdict['<STR_LIT>'][c1]<EOL><DEDENT>else:<EOL><INDENT>c1mag = np.nan<EOL><DEDENT>if (c2 in cpdict['<STR_LIT>'] and<EOL>cpdict['<STR_LIT>'][c2] is not None):<EOL><INDENT>c2mag = cpdict['<STR_LIT>'][c2]<EOL><DEDENT>else:<EOL><INDENT>c2mag = np.nan<EOL><DEDENT>if (ym in cpdict['<STR_LIT>'] and<EOL>cpdict['<STR_LIT>'][ym] is not None):<EOL><INDENT>ymmag = cpdict['<STR_LIT>'][ym]<EOL><DEDENT>else:<EOL><INDENT>ymmag = np.nan<EOL><DEDENT>if (require_cmd_magcolor and<EOL>not (np.isfinite(c1mag) and<EOL>np.isfinite(c2mag) and<EOL>np.isfinite(ymmag))):<EOL><INDENT>LOGWARNING(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>(c1, c2, ym, cpdict['<STR_LIT>']))<EOL>continue<EOL><DEDENT>try:<EOL><INDENT>thiscmd_title = r'<STR_LIT>' % (CMD_LABELS[c1],<EOL>CMD_LABELS[c2],<EOL>CMD_LABELS[ym])<EOL>fig = plt.figure(figsize=(<NUM_LIT:10>,<NUM_LIT:8>))<EOL>plt.plot(cplist_colors[:,ind],<EOL>cplist_mags[:,ind],<EOL>rasterized=True,<EOL>marker='<STR_LIT:o>',<EOL>linestyle='<STR_LIT:none>',<EOL>mew=<NUM_LIT:0>,<EOL>ms=<NUM_LIT:3>)<EOL>plt.plot([c1mag - c2mag], [ymmag],<EOL>ms=<NUM_LIT:20>,<EOL>color='<STR_LIT>',<EOL>marker='<STR_LIT:*>',<EOL>mew=<NUM_LIT:0>)<EOL>plt.xlabel(r'<STR_LIT>' % (CMD_LABELS[c1], CMD_LABELS[c2]))<EOL>plt.ylabel(r'<STR_LIT>' % CMD_LABELS[ym])<EOL>plt.title('<STR_LIT>' % (cpdict['<STR_LIT>'], thiscmd_title))<EOL>plt.gca().invert_yaxis()<EOL>cmdpng = StrIO()<EOL>plt.savefig(cmdpng, bbox_inches='<STR_LIT>',<EOL>pad_inches=<NUM_LIT:0.0>, format='<STR_LIT>')<EOL>cmdpng.seek(<NUM_LIT:0>)<EOL>cmdb64 = base64.b64encode(cmdpng.read())<EOL>cmdpng.close()<EOL>plt.close('<STR_LIT:all>')<EOL>plt.gcf().clear()<EOL>cpdict['<STR_LIT>']['<STR_LIT>' % (c1,c2,ym)] = cmdb64<EOL>if save_cmd_pngs:<EOL><INDENT>if isinstance(cpx, str):<EOL><INDENT>outpng = os.path.join(os.path.dirname(cpx),<EOL>'<STR_LIT>' %<EOL>(cpdict['<STR_LIT>'],<EOL>c1,c2,ym))<EOL><DEDENT>else:<EOL><INDENT>outpng = '<STR_LIT>' % (cpdict['<STR_LIT>'],<EOL>c1,c2,ym)<EOL><DEDENT>_base64_to_file(cmdb64, outpng)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' %<EOL>(c1, c2, ym, cmdpkl))<EOL>continue<EOL><DEDENT><DEDENT>if isinstance(cpx, str):<EOL><INDENT>cpf = _write_checkplot_picklefile(cpdict, outfile=cpx, protocol=<NUM_LIT:4>)<EOL>return cpf<EOL><DEDENT>elif isinstance(cpx, dict):<EOL><INDENT>return cpdict<EOL><DEDENT>", "docstring": "This adds CMD figures to a checkplot dict or pickle.\n\n    Looks up the CMDs in `cmdpkl`, adds the object from `cpx` as a gold(-ish)\n    star in the plot, and then saves the figure to a base64 encoded PNG, which\n    can then be read and used by the `checkplotserver`.\n\n    Parameters\n    ----------\n\n    cpx : str or dict\n        This is the input checkplot pickle or dict to add the CMD to.\n\n    cmdpkl : str or dict\n        The CMD pickle generated by the `colormagdiagram_cplist` or\n        `colormagdiagram_cpdir` functions above, or the dict produced by reading\n        this pickle in.\n\n    require_cmd_magcolor : bool\n        If this is True, a CMD plot will not be made if the color and mag keys\n        required by the CMD are not present or are nan in this checkplot's\n        objectinfo dict.\n\n    save_cmd_png : bool\n        If this is True, then will save the CMD plots that were generated and\n        added back to the checkplotdict as PNGs to the same directory as\n        `cpx`. If `cpx` is a dict, will save them to the current working\n        directory.\n\n    Returns\n    -------\n\n    str or dict\n        If `cpx` was a str filename of checkplot pickle, this will return that\n        filename to indicate that the CMD was added to the file. If `cpx` was a\n        checkplotdict, this will return the checkplotdict with a new key called\n        'colormagdiagram' containing the base64 encoded PNG binary streams of\n        all CMDs generated.", "id": "f14703:m5"}
{"signature": "def kill_handler(sig, frame):", "body": "raise KeyboardInterrupt<EOL>", "docstring": "This raises a KeyboardInterrupt when a SIGKILL comes in.\n\n    This is a handle for use with the Python `signal.signal` function.", "id": "f14704:m0"}
{"signature": "def runcp_consumer_loop(<EOL>in_queue_url,<EOL>workdir,<EOL>lclist_pkl_s3url,<EOL>lc_altexts=('<STR_LIT>',),<EOL>wait_time_seconds=<NUM_LIT:5>,<EOL>cache_clean_timer_seconds=<NUM_LIT>,<EOL>shutdown_check_timer_seconds=<NUM_LIT>,<EOL>sqs_client=None,<EOL>s3_client=None<EOL>):", "body": "if not sqs_client:<EOL><INDENT>sqs_client = boto3.client('<STR_LIT>')<EOL><DEDENT>if not s3_client:<EOL><INDENT>s3_client = boto3.client('<STR_LIT>')<EOL><DEDENT>lclist_pklf = lclist_pkl_s3url.split('<STR_LIT:/>')[-<NUM_LIT:1>]<EOL>if not os.path.exists(lclist_pklf):<EOL><INDENT>lclist_pklf = awsutils.s3_get_url(<EOL>lclist_pkl_s3url,<EOL>client=s3_client<EOL>)<EOL><DEDENT>with open(lclist_pklf,'<STR_LIT:rb>') as infd:<EOL><INDENT>lclistpkl = pickle.load(infd)<EOL><DEDENT>signal.signal(signal.SIGINT, kill_handler)<EOL>signal.signal(signal.SIGTERM, kill_handler)<EOL>shutdown_last_time = time.monotonic()<EOL>diskspace_last_time = time.monotonic()<EOL>while True:<EOL><INDENT>curr_time = time.monotonic()<EOL>if (curr_time - shutdown_last_time) > shutdown_check_timer_seconds:<EOL><INDENT>shutdown_check = shutdown_check_handler()<EOL>if shutdown_check:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>break<EOL><DEDENT>shutdown_last_time = time.monotonic()<EOL><DEDENT>if (curr_time - diskspace_last_time) > cache_clean_timer_seconds:<EOL><INDENT>cache_clean_handler()<EOL>diskspace_last_time = time.monotonic()<EOL><DEDENT>try:<EOL><INDENT>work = awsutils.sqs_get_item(in_queue_url,<EOL>client=sqs_client,<EOL>raiseonfail=True)<EOL>if work is not None and len(work) > <NUM_LIT:0>:<EOL><INDENT>recv = work[<NUM_LIT:0>]<EOL>action = recv['<STR_LIT>']['<STR_LIT:action>']<EOL>if action != '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>target = recv['<STR_LIT>']['<STR_LIT:target>']<EOL>args = recv['<STR_LIT>']['<STR_LIT:args>']<EOL>kwargs = recv['<STR_LIT>']['<STR_LIT>']<EOL>outbucket = recv['<STR_LIT>']['<STR_LIT>']<EOL>if '<STR_LIT>' in recv['<STR_LIT>']:<EOL><INDENT>out_queue_url = recv['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>out_queue_url = None<EOL><DEDENT>receipt = recv['<STR_LIT>']<EOL>try:<EOL><INDENT>lc_filename = awsutils.s3_get_url(<EOL>target,<EOL>altexts=lc_altexts,<EOL>client=s3_client,<EOL>)<EOL>if len(args) > <NUM_LIT:0> and args[<NUM_LIT:0>] is not None:<EOL><INDENT>pf_pickle = awsutils.s3_get_url(<EOL>args[<NUM_LIT:0>],<EOL>client=s3_client<EOL>)<EOL><DEDENT>else:<EOL><INDENT>pf_pickle = None<EOL><DEDENT>cpfs = runcp(<EOL>pf_pickle,<EOL>workdir,<EOL>workdir,<EOL>lcfname=lc_filename,<EOL>lclistpkl=lclistpkl,<EOL>makeneighborlcs=False,<EOL>**kwargs<EOL>)<EOL>if cpfs and all(os.path.exists(x) for x in cpfs):<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>(lc_filename, pf_pickle, cpfs))<EOL>resp = s3_client.list_objects_v2(<EOL>Bucket=outbucket,<EOL>MaxKeys=<NUM_LIT:1>,<EOL>Prefix=cpfs[<NUM_LIT:0>]<EOL>)<EOL>outbucket_list = resp.get('<STR_LIT>',[])<EOL>if outbucket_list and len(outbucket_list) > <NUM_LIT:0>:<EOL><INDENT>LOGWARNING(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% target<EOL>)<EOL>awsutils.sqs_delete_item(in_queue_url, receipt)<EOL>continue<EOL><DEDENT>for cpf in cpfs:<EOL><INDENT>put_url = awsutils.s3_put_file(cpf,<EOL>outbucket,<EOL>client=s3_client)<EOL>if put_url is not None:<EOL><INDENT>LOGINFO('<STR_LIT>' % put_url)<EOL>if out_queue_url is not None:<EOL><INDENT>awsutils.sqs_put_item(<EOL>out_queue_url,<EOL>{'<STR_LIT>':put_url,<EOL>'<STR_LIT:target>': target,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':lclist_pklf,<EOL>'<STR_LIT>':kwargs},<EOL>raiseonfail=True<EOL>)<EOL><DEDENT>os.remove(cpf)<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>' % cpf)<EOL><DEDENT><DEDENT>awsutils.sqs_delete_item(in_queue_url,<EOL>receipt)<EOL>if ( (lc_filename is not None) and<EOL>(os.path.exists(lc_filename)) ):<EOL><INDENT>os.remove(lc_filename)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>' %<EOL>(lc_filename, pf_pickle))<EOL>with open('<STR_LIT>' %<EOL>lc_filename, '<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(<EOL>{'<STR_LIT>':in_queue_url,<EOL>'<STR_LIT:target>':target,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':lclist_pklf,<EOL>'<STR_LIT>':kwargs,<EOL>'<STR_LIT>':outbucket,<EOL>'<STR_LIT>':out_queue_url},<EOL>outfd, pickle.HIGHEST_PROTOCOL<EOL>)<EOL><DEDENT>put_url = awsutils.s3_put_file(<EOL>'<STR_LIT>' % lc_filename,<EOL>outbucket,<EOL>client=s3_client<EOL>)<EOL>if out_queue_url is not None:<EOL><INDENT>awsutils.sqs_put_item(<EOL>out_queue_url,<EOL>{'<STR_LIT>':put_url,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':lclist_pklf,<EOL>'<STR_LIT>':kwargs},<EOL>raiseonfail=True<EOL>)<EOL><DEDENT>awsutils.sqs_delete_item(in_queue_url,<EOL>receipt,<EOL>raiseonfail=True)<EOL>if ( (lc_filename is not None) and<EOL>(os.path.exists(lc_filename)) ):<EOL><INDENT>os.remove(lc_filename)<EOL><DEDENT><DEDENT><DEDENT>except ClientError as e:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>break<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>if '<STR_LIT>' in locals():<EOL><INDENT>with open('<STR_LIT>' %<EOL>lc_filename,'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(<EOL>{'<STR_LIT>':in_queue_url,<EOL>'<STR_LIT:target>':target,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':lclist_pklf,<EOL>'<STR_LIT>':kwargs,<EOL>'<STR_LIT>':outbucket,<EOL>'<STR_LIT>':out_queue_url},<EOL>outfd, pickle.HIGHEST_PROTOCOL<EOL>)<EOL><DEDENT>put_url = awsutils.s3_put_file(<EOL>'<STR_LIT>' % lc_filename,<EOL>outbucket,<EOL>client=s3_client<EOL>)<EOL>if out_queue_url is not None:<EOL><INDENT>awsutils.sqs_put_item(<EOL>out_queue_url,<EOL>{'<STR_LIT>':put_url,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':lclist_pklf,<EOL>'<STR_LIT>':kwargs},<EOL>raiseonfail=True<EOL>)<EOL><DEDENT>if ( (lc_filename is not None) and<EOL>(os.path.exists(lc_filename)) ):<EOL><INDENT>os.remove(lc_filename)<EOL><DEDENT><DEDENT>awsutils.sqs_delete_item(in_queue_url,<EOL>receipt,<EOL>raiseonfail=True)<EOL><DEDENT><DEDENT><DEDENT>except KeyboardInterrupt:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>break<EOL><DEDENT>except ClientError as e:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>break<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>if '<STR_LIT>' in locals():<EOL><INDENT>with open('<STR_LIT>' %<EOL>lc_filename,'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(<EOL>{'<STR_LIT>':in_queue_url,<EOL>'<STR_LIT:target>':target,<EOL>'<STR_LIT>':lclist_pklf,<EOL>'<STR_LIT>':kwargs,<EOL>'<STR_LIT>':outbucket,<EOL>'<STR_LIT>':out_queue_url},<EOL>outfd, pickle.HIGHEST_PROTOCOL<EOL>)<EOL><DEDENT>put_url = awsutils.s3_put_file(<EOL>'<STR_LIT>' % lc_filename,<EOL>outbucket,<EOL>client=s3_client<EOL>)<EOL>if out_queue_url is not None:<EOL><INDENT>awsutils.sqs_put_item(<EOL>out_queue_url,<EOL>{'<STR_LIT>':put_url,<EOL>'<STR_LIT>':lclist_pklf,<EOL>'<STR_LIT>':kwargs},<EOL>raiseonfail=True<EOL>)<EOL><DEDENT>if ( (lc_filename is not None) and<EOL>(os.path.exists(lc_filename)) ):<EOL><INDENT>os.remove(lc_filename)<EOL><DEDENT><DEDENT>awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True)<EOL><DEDENT><DEDENT>", "docstring": "This runs checkplot pickle making in a loop until interrupted.\n\n    Consumes work task items from an input queue set up by `runcp_producer_loop`\n    above. For the moment, we don't generate neighbor light curves since this\n    would require a lot more S3 calls.\n\n    Parameters\n    ----------\n\n    in_queue_url : str\n        The SQS URL of the input queue to listen to for work assignment\n        messages. The task orders will include the input and output S3 bucket\n        names, as well as the URL of the output queue to where this function\n        will report its work-complete or work-failed status.\n\n    workdir : str\n        The directory on the local machine where this worker loop will download\n        the input light curves and associated period-finder results (if any),\n        process them, and produce its output checkplot pickles. These will then\n        be uploaded to the specified S3 output bucket and then deleted from the\n        workdir when the upload is confirmed to make it safely to S3.\n\n    lclist_pkl : str\n        S3 URL of a catalog pickle generated by `lcproc.catalogs.make_lclist`\n        that contains objectids and coordinates, as well as a kdtree for all of\n        the objects in the current light curve collection being processed. This\n        is used to look up neighbors for each object being processed.\n\n    lc_altexts : sequence of str\n        If not None, this is a sequence of alternate extensions to try for the\n        input light curve file other than the one provided in the input task\n        order. For example, to get anything that's an .sqlite where .sqlite.gz\n        is expected, use altexts=[''] to strip the .gz.\n\n    wait_time_seconds : int\n        The amount of time to wait in the input SQS queue for an input task\n        order. If this timeout expires and no task has been received, this\n        function goes back to the top of the work loop.\n\n    cache_clean_timer_seconds : float\n        The amount of time in seconds to wait before periodically removing old\n        files (such as finder chart FITS, external service result pickles) from\n        the astrobase cache directory. These accumulate as the work items are\n        processed, and take up significant space, so must be removed\n        periodically.\n\n    shutdown_check_timer_seconds : float\n        The amount of time to wait before checking for a pending EC2 shutdown\n        message for the instance this worker loop is operating on. If a shutdown\n        is noticed, the worker loop is cancelled in preparation for instance\n        shutdown.\n\n    sqs_client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its SQS operations. Alternatively, pass in an existing\n        `boto3.Client` instance to re-use it here.\n\n    s3_client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its S3 operations. Alternatively, pass in an existing\n        `boto3.Client` instance to re-use it here.\n\n    Returns\n    -------\n\n    Nothing.", "id": "f14704:m5"}
{"signature": "def runcp_producer_loop(<EOL>lightcurve_list,<EOL>input_queue,<EOL>input_bucket,<EOL>result_queue,<EOL>result_bucket,<EOL>pfresult_list=None,<EOL>runcp_kwargs=None,<EOL>process_list_slice=None,<EOL>purge_queues_when_done=False,<EOL>delete_queues_when_done=False,<EOL>download_when_done=True,<EOL>save_state_when_done=True,<EOL>s3_client=None,<EOL>sqs_client=None<EOL>):", "body": "if not sqs_client:<EOL><INDENT>sqs_client = boto3.client('<STR_LIT>')<EOL><DEDENT>if not s3_client:<EOL><INDENT>s3_client = boto3.client('<STR_LIT>')<EOL><DEDENT>if isinstance(lightcurve_list, str) and os.path.exists(lightcurve_list):<EOL><INDENT>with open(lightcurve_list, '<STR_LIT:r>') as infd:<EOL><INDENT>lclist = infd.readlines()<EOL><DEDENT>lclist = [x.replace('<STR_LIT:\\n>','<STR_LIT>') for x in lclist if len(x) > <NUM_LIT:0>]<EOL>if process_list_slice is not None:<EOL><INDENT>lclist = lclist[process_list_slice[<NUM_LIT:0>]:process_list_slice[<NUM_LIT:1>]]<EOL><DEDENT>lclist = [x[<NUM_LIT:1>:] for x in lclist if x.startswith('<STR_LIT:/>')]<EOL>lclist = ['<STR_LIT>' % (input_bucket, x) for x in lclist]<EOL><DEDENT>elif isinstance(lightcurve_list, list):<EOL><INDENT>lclist = lightcurve_list<EOL><DEDENT>try:<EOL><INDENT>inq = sqs_client.get_queue_url(QueueName=input_queue)<EOL>inq_url = inq['<STR_LIT>']<EOL>LOGINFO('<STR_LIT>')<EOL><DEDENT>except ClientError as e:<EOL><INDENT>inq = awsutils.sqs_create_queue(input_queue, client=sqs_client)<EOL>inq_url = inq['<STR_LIT:url>']<EOL><DEDENT>try:<EOL><INDENT>outq = sqs_client.get_queue_url(QueueName=result_queue)<EOL>outq_url = outq['<STR_LIT>']<EOL>LOGINFO('<STR_LIT>')<EOL><DEDENT>except ClientError as e:<EOL><INDENT>outq = awsutils.sqs_create_queue(result_queue, client=sqs_client)<EOL>outq_url = outq['<STR_LIT:url>']<EOL><DEDENT>LOGINFO('<STR_LIT>' % inq_url)<EOL>LOGINFO('<STR_LIT>' % outq_url)<EOL>LOGINFO('<STR_LIT>')<EOL>time.sleep(<NUM_LIT>)<EOL>if pfresult_list is None:<EOL><INDENT>pfresult_list = [None for x in lclist]<EOL><DEDENT>for lc, pf in zip(lclist, pfresult_list):<EOL><INDENT>this_item = {<EOL>'<STR_LIT:target>': lc,<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT:args>': (pf,),<EOL>'<STR_LIT>':runcp_kwargs if runcp_kwargs is not None else {},<EOL>'<STR_LIT>': result_bucket,<EOL>'<STR_LIT>': outq_url<EOL>}<EOL>resp = awsutils.sqs_put_item(inq_url, this_item, client=sqs_client)<EOL>if resp:<EOL><INDENT>LOGINFO('<STR_LIT>' % (lc,inq_url))<EOL><DEDENT><DEDENT>done_objects = {}<EOL>LOGINFO('<STR_LIT>')<EOL>signal.signal(signal.SIGINT, kill_handler)<EOL>signal.signal(signal.SIGTERM, kill_handler)<EOL>while len(list(done_objects.keys())) < len(lclist):<EOL><INDENT>try:<EOL><INDENT>result = awsutils.sqs_get_item(outq_url, client=sqs_client)<EOL>if result is not None and len(result) > <NUM_LIT:0>:<EOL><INDENT>recv = result[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>processed_object = recv['<STR_LIT>']['<STR_LIT:target>']<EOL><DEDENT>except KeyError:<EOL><INDENT>LOGWARNING('<STR_LIT>' % recv)<EOL>processed_object = '<STR_LIT>'<EOL><DEDENT>cpf = recv['<STR_LIT>']['<STR_LIT>']<EOL>receipt = recv['<STR_LIT>']<EOL>if processed_object in lclist:<EOL><INDENT>if processed_object not in done_objects:<EOL><INDENT>done_objects[processed_object] = [cpf]<EOL><DEDENT>else:<EOL><INDENT>done_objects[processed_object].append(cpf)<EOL><DEDENT>LOGINFO('<STR_LIT>' % (processed_object, cpf))<EOL>if download_when_done:<EOL><INDENT>getobj = awsutils.awsutils.s3_get_url(<EOL>cpf,<EOL>client=s3_client<EOL>)<EOL>LOGINFO('<STR_LIT>' % (cpf, getobj))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>awsutils.sqs_delete_item(outq_url, receipt)<EOL><DEDENT><DEDENT>except KeyboardInterrupt as e:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>break<EOL><DEDENT><DEDENT>LOGINFO('<STR_LIT>')<EOL>time.sleep(<NUM_LIT:1.0>)<EOL>if purge_queues_when_done:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>sqs_client.purge_queue(QueueUrl=inq_url)<EOL>sqs_client.purge_queue(QueueUrl=outq_url)<EOL>time.sleep(<NUM_LIT>)<EOL><DEDENT>if delete_queues_when_done:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>awsutils.sqs_delete_queue(inq_url)<EOL>awsutils.sqs_delete_queue(outq_url)<EOL><DEDENT>work_state = {<EOL>'<STR_LIT>': done_objects,<EOL>'<STR_LIT>': list(set(lclist) - set(done_objects.keys())),<EOL>'<STR_LIT:args>':((os.path.abspath(lightcurve_list) if<EOL>isinstance(lightcurve_list, str) else lightcurve_list),<EOL>input_queue,<EOL>input_bucket,<EOL>result_queue,<EOL>result_bucket),<EOL>'<STR_LIT>':{'<STR_LIT>':pfresult_list,<EOL>'<STR_LIT>':runcp_kwargs,<EOL>'<STR_LIT>':process_list_slice,<EOL>'<STR_LIT>':download_when_done,<EOL>'<STR_LIT>':purge_queues_when_done,<EOL>'<STR_LIT>':save_state_when_done,<EOL>'<STR_LIT>':delete_queues_when_done}<EOL>}<EOL>if save_state_when_done:<EOL><INDENT>with open('<STR_LIT>','<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(work_state, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT><DEDENT>return work_state<EOL>", "docstring": "This sends checkplot making tasks to the input queue and monitors the\n    result queue for task completion.\n\n    Parameters\n    ----------\n\n    lightcurve_list : str or list of str\n        This is either a string pointing to a file containing a list of light\n        curves filenames to process or the list itself. The names must\n        correspond to the full filenames of files stored on S3, including all\n        prefixes, but not include the 's3://<bucket name>/' bit (these will be\n        added automatically).\n\n    input_queue : str\n        This is the name of the SQS queue which will receive processing tasks\n        generated by this function. The queue URL will automatically be obtained\n        from AWS.\n\n    input_bucket : str\n        The name of the S3 bucket containing the light curve files to process.\n\n    result_queue : str\n        This is the name of the SQS queue that this function will listen to for\n        messages from the workers as they complete processing on their input\n        elements. This function will attempt to match input sent to the\n        `input_queue` with results coming into the `result_queue` so it knows\n        how many objects have been successfully processed. If this function\n        receives task results that aren't in its own input queue, it will\n        acknowledge them so they complete successfully, but not download them\n        automatically. This handles leftover tasks completing from a previous\n        run of this function.\n\n    result_bucket : str\n        The name of the S3 bucket which will receive the results from the\n        workers.\n\n    pfresult_list : list of str or None\n        This is a list of periodfinder result pickle S3 URLs associated with\n        each light curve. If provided, this will be used to add in phased light\n        curve plots to each checkplot pickle. If this is None, the worker loop\n        will produce checkplot pickles that only contain object information,\n        neighbor information, and unphased light curves.\n\n    runcp_kwargs : dict\n        This is a dict used to pass any extra keyword arguments to the\n        `lcproc.checkplotgen.runcp` function that will be run by the worker\n        loop.\n\n    process_list_slice : list\n        This is used to index into the input light curve list so a subset of the\n        full list can be processed in this specific run of this function.\n\n        Use None for a slice index elem to emulate single slice spec behavior:\n\n        process_list_slice = [10, None]  -> lightcurve_list[10:]\n        process_list_slice = [None, 500] -> lightcurve_list[:500]\n\n    purge_queues_when_done : bool\n        If this is True, and this function exits (either when all done, or when\n        it is interrupted with a Ctrl+C), all outstanding elements in the\n        input/output queues that have not yet been acknowledged by workers or by\n        this function will be purged. This effectively cancels all outstanding\n        work.\n\n    delete_queues_when_done : bool\n        If this is True, and this function exits (either when all done, or when\n        it is interrupted with a Ctrl+C'), all outstanding work items will be\n        purged from the input/queues and the queues themselves will be deleted.\n\n    download_when_done : bool\n        If this is True, the generated checkplot pickle for each input work item\n        will be downloaded immediately to the current working directory when the\n        worker functions report they're done with it.\n\n    save_state_when_done : bool\n        If this is True, will save the current state of the work item queue and\n        the work items acknowledged as completed to a pickle in the current\n        working directory. Call the `runcp_producer_loop_savedstate` function\n        below to resume processing from this saved state later.\n\n    s3_client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its S3 download operations. Alternatively, pass in an existing\n        `boto3.Client` instance to re-use it here.\n\n    sqs_client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its SQS operations. Alternatively, pass in an existing\n        `boto3.Client` instance to re-use it here.\n\n    Returns\n    -------\n\n    dict or str\n        Returns the current work state as a dict or str path to the generated\n        work state pickle depending on if `save_state_when_done` is True.", "id": "f14704:m3"}
{"signature": "def runpf_consumer_loop(<EOL>in_queue_url,<EOL>workdir,<EOL>lc_altexts=('<STR_LIT>',),<EOL>wait_time_seconds=<NUM_LIT:5>,<EOL>shutdown_check_timer_seconds=<NUM_LIT>,<EOL>sqs_client=None,<EOL>s3_client=None<EOL>):", "body": "if not sqs_client:<EOL><INDENT>sqs_client = boto3.client('<STR_LIT>')<EOL><DEDENT>if not s3_client:<EOL><INDENT>s3_client = boto3.client('<STR_LIT>')<EOL><DEDENT>signal.signal(signal.SIGINT, kill_handler)<EOL>signal.signal(signal.SIGTERM, kill_handler)<EOL>shutdown_last_time = time.monotonic()<EOL>while True:<EOL><INDENT>curr_time = time.monotonic()<EOL>if (curr_time - shutdown_last_time) > shutdown_check_timer_seconds:<EOL><INDENT>shutdown_check = shutdown_check_handler()<EOL>if shutdown_check:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>break<EOL><DEDENT>shutdown_last_time = time.monotonic()<EOL><DEDENT>try:<EOL><INDENT>work = awsutils.sqs_get_item(in_queue_url,<EOL>client=sqs_client,<EOL>raiseonfail=True)<EOL>if work is not None and len(work) > <NUM_LIT:0>:<EOL><INDENT>recv = work[<NUM_LIT:0>]<EOL>action = recv['<STR_LIT>']['<STR_LIT:action>']<EOL>if action != '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>target = recv['<STR_LIT>']['<STR_LIT:target>']<EOL>args = recv['<STR_LIT>']['<STR_LIT:args>']<EOL>kwargs = recv['<STR_LIT>']['<STR_LIT>']<EOL>outbucket = recv['<STR_LIT>']['<STR_LIT>']<EOL>if '<STR_LIT>' in recv['<STR_LIT>']:<EOL><INDENT>out_queue_url = recv['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>out_queue_url = None<EOL><DEDENT>receipt = recv['<STR_LIT>']<EOL>try:<EOL><INDENT>lc_filename = awsutils.s3_get_url(<EOL>target,<EOL>altexts=lc_altexts,<EOL>client=s3_client<EOL>)<EOL>runpf_args = (lc_filename, args[<NUM_LIT:0>])<EOL>pfresult = runpf(<EOL>*runpf_args,<EOL>**kwargs<EOL>)<EOL>if pfresult and os.path.exists(pfresult):<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>(lc_filename, pfresult))<EOL>resp = s3_client.list_objects_v2(<EOL>Bucket=outbucket,<EOL>MaxKeys=<NUM_LIT:1>,<EOL>Prefix=pfresult<EOL>)<EOL>outbucket_list = resp.get('<STR_LIT>',[])<EOL>if outbucket_list and len(outbucket_list) > <NUM_LIT:0>:<EOL><INDENT>LOGWARNING(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% target<EOL>)<EOL>awsutils.sqs_delete_item(in_queue_url, receipt)<EOL>continue<EOL><DEDENT>put_url = awsutils.s3_put_file(pfresult,<EOL>outbucket,<EOL>client=s3_client)<EOL>if put_url is not None:<EOL><INDENT>LOGINFO('<STR_LIT>' % put_url)<EOL>if out_queue_url is not None:<EOL><INDENT>awsutils.sqs_put_item(<EOL>out_queue_url,<EOL>{'<STR_LIT>':put_url,<EOL>'<STR_LIT:target>': target,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':kwargs},<EOL>raiseonfail=True<EOL>)<EOL><DEDENT>os.remove(pfresult)<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>' % pfresult)<EOL>os.remove(pfresult)<EOL><DEDENT>awsutils.sqs_delete_item(in_queue_url, receipt)<EOL>if ( (lc_filename is not None) and<EOL>(os.path.exists(lc_filename)) ):<EOL><INDENT>os.remove(lc_filename)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>' %<EOL>(lc_filename,))<EOL>with open('<STR_LIT>' %<EOL>lc_filename, '<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(<EOL>{'<STR_LIT>':in_queue_url,<EOL>'<STR_LIT:target>':target,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':kwargs,<EOL>'<STR_LIT>':outbucket,<EOL>'<STR_LIT>':out_queue_url},<EOL>outfd, pickle.HIGHEST_PROTOCOL<EOL>)<EOL><DEDENT>put_url = awsutils.s3_put_file(<EOL>'<STR_LIT>' % lc_filename,<EOL>outbucket,<EOL>client=s3_client<EOL>)<EOL>if out_queue_url is not None:<EOL><INDENT>awsutils.sqs_put_item(<EOL>out_queue_url,<EOL>{'<STR_LIT>':put_url,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':kwargs},<EOL>raiseonfail=True<EOL>)<EOL><DEDENT>awsutils.sqs_delete_item(in_queue_url,<EOL>receipt,<EOL>raiseonfail=True)<EOL>if ( (lc_filename is not None) and<EOL>(os.path.exists(lc_filename)) ):<EOL><INDENT>os.remove(lc_filename)<EOL><DEDENT><DEDENT><DEDENT>except ClientError as e:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>break<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>if '<STR_LIT>' in locals():<EOL><INDENT>with open('<STR_LIT>' %<EOL>lc_filename,'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(<EOL>{'<STR_LIT>':in_queue_url,<EOL>'<STR_LIT:target>':target,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':kwargs,<EOL>'<STR_LIT>':outbucket,<EOL>'<STR_LIT>':out_queue_url},<EOL>outfd, pickle.HIGHEST_PROTOCOL<EOL>)<EOL><DEDENT>put_url = awsutils.s3_put_file(<EOL>'<STR_LIT>' % lc_filename,<EOL>outbucket,<EOL>client=s3_client<EOL>)<EOL>if out_queue_url is not None:<EOL><INDENT>awsutils.sqs_put_item(<EOL>out_queue_url,<EOL>{'<STR_LIT>':put_url,<EOL>'<STR_LIT>':lc_filename,<EOL>'<STR_LIT>':kwargs},<EOL>raiseonfail=True<EOL>)<EOL><DEDENT>if ( (lc_filename is not None) and<EOL>(os.path.exists(lc_filename)) ):<EOL><INDENT>os.remove(lc_filename)<EOL><DEDENT><DEDENT>awsutils.sqs_delete_item(in_queue_url,<EOL>receipt,<EOL>raiseonfail=True)<EOL><DEDENT><DEDENT><DEDENT>except KeyboardInterrupt:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>break<EOL><DEDENT>except ClientError as e:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>break<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>if '<STR_LIT>' in locals():<EOL><INDENT>with open('<STR_LIT>' %<EOL>lc_filename,'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(<EOL>{'<STR_LIT>':in_queue_url,<EOL>'<STR_LIT:target>':target,<EOL>'<STR_LIT>':kwargs,<EOL>'<STR_LIT>':outbucket,<EOL>'<STR_LIT>':out_queue_url},<EOL>outfd, pickle.HIGHEST_PROTOCOL<EOL>)<EOL><DEDENT>put_url = awsutils.s3_put_file(<EOL>'<STR_LIT>' % lc_filename,<EOL>outbucket,<EOL>client=s3_client<EOL>)<EOL>if out_queue_url is not None:<EOL><INDENT>awsutils.sqs_put_item(<EOL>out_queue_url,<EOL>{'<STR_LIT>':put_url,<EOL>'<STR_LIT>':kwargs},<EOL>raiseonfail=True<EOL>)<EOL><DEDENT>if ( (lc_filename is not None) and<EOL>(os.path.exists(lc_filename)) ):<EOL><INDENT>os.remove(lc_filename)<EOL><DEDENT><DEDENT>awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True)<EOL><DEDENT><DEDENT>", "docstring": "This runs period-finding in a loop until interrupted.\n\n    Consumes work task items from an input queue set up by `runpf_producer_loop`\n    above.\n\n    Parameters\n    ----------\n\n    in_queue_url : str\n        The SQS URL of the input queue to listen to for work assignment\n        messages. The task orders will include the input and output S3 bucket\n        names, as well as the URL of the output queue to where this function\n        will report its work-complete or work-failed status.\n\n    workdir : str\n        The directory on the local machine where this worker loop will download\n        the input light curves, process them, and produce its output\n        periodfinding result pickles. These will then be uploaded to the\n        specified S3 output bucket, and then deleted from the local disk.\n\n    lc_altexts : sequence of str\n        If not None, this is a sequence of alternate extensions to try for the\n        input light curve file other than the one provided in the input task\n        order. For example, to get anything that's an .sqlite where .sqlite.gz\n        is expected, use altexts=[''] to strip the .gz.\n\n    wait_time_seconds : int\n        The amount of time to wait in the input SQS queue for an input task\n        order. If this timeout expires and no task has been received, this\n        function goes back to the top of the work loop.\n\n    shutdown_check_timer_seconds : float\n        The amount of time to wait before checking for a pending EC2 shutdown\n        message for the instance this worker loop is operating on. If a shutdown\n        is noticed, the worker loop is cancelled in preparation for instance\n        shutdown.\n\n    sqs_client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its SQS operations. Alternatively, pass in an existing\n        `boto3.Client` instance to re-use it here.\n\n    s3_client : boto3.Client or None\n        If None, this function will instantiate a new `boto3.Client` object to\n        use in its S3 operations. Alternatively, pass in an existing\n        `boto3.Client` instance to re-use it here.\n\n    Returns\n    -------\n\n    Nothing.", "id": "f14704:m7"}
{"signature": "def parallel_timebin_lcdir(lcdir,<EOL>binsizesec,<EOL>maxobjects=None,<EOL>outdir=None,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None,<EOL>timecols=None,<EOL>magcols=None,<EOL>errcols=None,<EOL>minbinelems=<NUM_LIT:7>,<EOL>nworkers=NCPUS,<EOL>maxworkertasks=<NUM_LIT:1000>):", "body": "try:<EOL><INDENT>formatinfo = get_lcformat(lcformat,<EOL>use_lcformat_dir=lcformatdir)<EOL>if formatinfo:<EOL><INDENT>(fileglob, readerfunc,<EOL>dtimecols, dmagcols, derrcols,<EOL>magsarefluxes, normfunc) = formatinfo<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>lclist = sorted(glob.glob(os.path.join(lcdir, fileglob)))<EOL>return parallel_timebin(lclist,<EOL>binsizesec,<EOL>maxobjects=maxobjects,<EOL>outdir=outdir,<EOL>lcformat=lcformat,<EOL>timecols=timecols,<EOL>magcols=magcols,<EOL>errcols=errcols,<EOL>minbinelems=minbinelems,<EOL>nworkers=nworkers,<EOL>maxworkertasks=maxworkertasks)<EOL>", "docstring": "This time bins all the light curves in the specified directory.\n\nParameters\n----------\n\nlcdir : list of str\n    Directory containing the input LCs to process.\n\nbinsizesec : float\n    The time bin size to use in seconds.\n\nmaxobjects : int or None\n    If provided, LC processing will stop at `lclist[maxobjects]`.\n\noutdir : str or None\n    The directory where output LCs will be written. If None, will write to\n    the same directory as the input LCs.\n\nlcformat : str\n    This is the `formatkey` associated with your light curve format, which\n    you previously passed in to the `lcproc.register_lcformat`\n    function. This will be used to look up how to find and read the light\n    curve file.\n\nlcformatdir : str or None\n    If this is provided, gives the path to a directory when you've stored\n    your lcformat description JSONs, other than the usual directories lcproc\n    knows to search for them in. Use this along with `lcformat` to specify\n    an LC format JSON file that's not currently registered with lcproc.\n\ntimecols,magcols,errcols : lists of str\n    The keys in the lcdict produced by your light curve reader function that\n    correspond to the times, mags/fluxes, and associated measurement errors\n    that will be used as inputs to the binning process. If these are None,\n    the default values for `timecols`, `magcols`, and `errcols` for your\n    light curve format will be used here.\n\nminbinelems : int\n    The minimum number of time-bin elements required to accept a time-bin as\n    valid for the output binned light curve.\n\nnworkers : int\n    Number of parallel workers to launch.\n\nmaxworkertasks : int\n    The maximum number of tasks a parallel worker will complete before being\n    replaced to guard against memory leaks.\n\nReturns\n-------\n\ndict\n    The returned dict contains keys = input LCs, vals = output LCs.", "id": "f14705:m4"}
{"signature": "def serial_varfeatures(lclist,<EOL>outdir,<EOL>maxobjects=None,<EOL>timecols=None,<EOL>magcols=None,<EOL>errcols=None,<EOL>mindet=<NUM_LIT:1000>,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None):", "body": "if maxobjects:<EOL><INDENT>lclist = lclist[:maxobjects]<EOL><DEDENT>tasks = [(x, outdir, timecols, magcols, errcols,<EOL>mindet, lcformat, lcformatdir)<EOL>for x in lclist]<EOL>for task in tqdm(tasks):<EOL><INDENT>result = _varfeatures_worker(task)<EOL><DEDENT>return result<EOL>", "docstring": "This runs variability feature extraction for a list of LCs.\n\n    Parameters\n    ----------\n\n    lclist : list of str\n        The list of light curve file names to process.\n\n    outdir : str\n        The directory where the output varfeatures pickle files will be written.\n\n    maxobjects : int\n        The number of LCs to process from `lclist`.\n\n    timecols : list of str or None\n        The timecol keys to use from the lcdict in calculating the features.\n\n    magcols : list of str or None\n        The magcol keys to use from the lcdict in calculating the features.\n\n    errcols : list of str or None\n        The errcol keys to use from the lcdict in calculating the features.\n\n    mindet : int\n        The minimum number of LC points required to generate variability\n        features.\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    Returns\n    -------\n\n    list of str\n        List of the generated variability features pickles for the input LCs,\n        with results for each magcol in the input `magcol` or light curve\n        format's default `magcol` list.", "id": "f14706:m3"}
{"signature": "def apply_tfa_magseries(lcfile,<EOL>timecol,<EOL>magcol,<EOL>errcol,<EOL>templateinfo,<EOL>mintemplatedist_arcmin=<NUM_LIT>,<EOL>lcformat='<STR_LIT>',<EOL>lcformatdir=None,<EOL>interp='<STR_LIT>',<EOL>sigclip=<NUM_LIT>):", "body": "try:<EOL><INDENT>formatinfo = get_lcformat(lcformat,<EOL>use_lcformat_dir=lcformatdir)<EOL>if formatinfo:<EOL><INDENT>(dfileglob, readerfunc,<EOL>dtimecols, dmagcols, derrcols,<EOL>magsarefluxes, normfunc) = formatinfo<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>if isinstance(templateinfo,str) and os.path.exists(templateinfo):<EOL><INDENT>with open(templateinfo,'<STR_LIT:rb>') as infd:<EOL><INDENT>templateinfo = pickle.load(infd)<EOL><DEDENT><DEDENT>lcdict = readerfunc(lcfile)<EOL>if ((isinstance(lcdict, (tuple, list))) and<EOL>isinstance(lcdict[<NUM_LIT:0>], dict)):<EOL><INDENT>lcdict = lcdict[<NUM_LIT:0>]<EOL><DEDENT>objectid = lcdict['<STR_LIT>']<EOL>tmagseries = templateinfo[magcol][<EOL>'<STR_LIT>'<EOL>][::]<EOL>if objectid in templateinfo[magcol]['<STR_LIT>']:<EOL><INDENT>LOGWARNING('<STR_LIT>' %<EOL>objectid)<EOL>templateind = templateinfo[magcol]['<STR_LIT>'] == objectid<EOL>tmagseries = tmagseries[~templateind,:]<EOL><DEDENT>object_matches = coordutils.conesearch_kdtree(<EOL>templateinfo[magcol]['<STR_LIT>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'], lcdict['<STR_LIT>']['<STR_LIT>'],<EOL>mintemplatedist_arcmin/<NUM_LIT><EOL>)<EOL>if len(object_matches) > <NUM_LIT:0>:<EOL><INDENT>LOGWARNING(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>(objectid, mintemplatedist_arcmin, len(object_matches))<EOL>)<EOL>removalind = np.full(<EOL>templateinfo[magcol]['<STR_LIT>'].size,<EOL>False, dtype=np.bool<EOL>)<EOL>removalind[np.array(object_matches)] = True<EOL>tmagseries = tmagseries[~removalind,:]<EOL><DEDENT>normal_matrix = np.dot(tmagseries, tmagseries.T)<EOL>normal_matrix_inverse = spla.pinv2(normal_matrix)<EOL>timebase = templateinfo[magcol]['<STR_LIT>']<EOL>reformed_targetlc = _reform_templatelc_for_tfa((<EOL>lcfile,<EOL>lcformat,<EOL>lcformatdir,<EOL>timecol,<EOL>magcol,<EOL>errcol,<EOL>timebase,<EOL>interp,<EOL>sigclip<EOL>))<EOL>scalar_products = np.dot(tmagseries, reformed_targetlc['<STR_LIT>'])<EOL>corrections = np.dot(normal_matrix_inverse, scalar_products)<EOL>corrected_magseries = (<EOL>reformed_targetlc['<STR_LIT>'] -<EOL>np.dot(tmagseries.T, corrections)<EOL>)<EOL>outdict = {<EOL>'<STR_LIT>':timebase,<EOL>'<STR_LIT>':corrected_magseries,<EOL>'<STR_LIT>':reformed_targetlc['<STR_LIT>'],<EOL>'<STR_LIT>':np.median(corrected_magseries),<EOL>'<STR_LIT>': np.median(np.abs(corrected_magseries -<EOL>np.median(corrected_magseries))),<EOL>'<STR_LIT>':{'<STR_LIT>':tmagseries,<EOL>'<STR_LIT>':normal_matrix,<EOL>'<STR_LIT>':normal_matrix_inverse,<EOL>'<STR_LIT>':scalar_products,<EOL>'<STR_LIT>':corrections,<EOL>'<STR_LIT>':reformed_targetlc},<EOL>}<EOL>lcdict['<STR_LIT>'] = outdict<EOL>outfile = os.path.join(<EOL>os.path.dirname(lcfile),<EOL>'<STR_LIT>' % (<EOL>squeeze(objectid).replace('<STR_LIT:U+0020>','<STR_LIT:->'),<EOL>magcol<EOL>)<EOL>)<EOL>with open(outfile,'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>return outfile<EOL>", "docstring": "This applies the TFA correction to an LC given TFA template information.\n\n    Parameters\n    ----------\n\n    lcfile : str\n        This is the light curve file to apply the TFA correction to.\n\n    timecol,magcol,errcol : str\n        These are the column keys in the lcdict for the LC file to apply the TFA\n        correction to.\n\n    templateinfo : dict or str\n        This is either the dict produced by `tfa_templates_lclist` or the pickle\n        produced by the same function.\n\n    mintemplatedist_arcmin : float\n        This sets the minimum distance required from the target object for\n        objects in the TFA template ensemble. Objects closer than this distance\n        will be removed from the ensemble.\n\n    lcformat : str\n        This is the `formatkey` associated with your light curve format, which\n        you previously passed in to the `lcproc.register_lcformat`\n        function. This will be used to look up how to find and read the light\n        curves specified in `basedir` or `use_list_of_filenames`.\n\n    lcformatdir : str or None\n        If this is provided, gives the path to a directory when you've stored\n        your lcformat description JSONs, other than the usual directories lcproc\n        knows to search for them in. Use this along with `lcformat` to specify\n        an LC format JSON file that's not currently registered with lcproc.\n\n    interp : str\n        This is passed to scipy.interpolate.interp1d as the kind of\n        interpolation to use when reforming this light curve to the timebase of\n        the TFA templates.\n\n    sigclip : float or sequence of two floats or None\n        This is the sigma clip to apply to this light curve before running TFA\n        on it.\n\n    Returns\n    -------\n\n    str\n        This returns the filename of the light curve file generated after TFA\n        applications. This is a pickle (that can be read by `lcproc.read_pklc`)\n        in the same directory as `lcfile`. The `magcol` will be encoded in the\n        filename, so each `magcol` in `lcfile` gets its own output file.", "id": "f14707:m4"}
{"signature": "def _reform_templatelc_for_tfa(task):", "body": "try:<EOL><INDENT>(lcfile, lcformat, lcformatdir,<EOL>tcol, mcol, ecol,<EOL>timebase, interpolate_type, sigclip) = task<EOL>try:<EOL><INDENT>formatinfo = get_lcformat(lcformat,<EOL>use_lcformat_dir=lcformatdir)<EOL>if formatinfo:<EOL><INDENT>(dfileglob, readerfunc,<EOL>dtimecols, dmagcols, derrcols,<EOL>magsarefluxes, normfunc) = formatinfo<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>lcdict = readerfunc(lcfile)<EOL>if ( (isinstance(lcdict, (list, tuple))) and<EOL>(isinstance(lcdict[<NUM_LIT:0>], dict)) ):<EOL><INDENT>lcdict = lcdict[<NUM_LIT:0>]<EOL><DEDENT>outdict = {}<EOL>if '<STR_LIT:.>' in tcol:<EOL><INDENT>tcolget = tcol.split('<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>tcolget = [tcol]<EOL><DEDENT>times = _dict_get(lcdict, tcolget)<EOL>if '<STR_LIT:.>' in mcol:<EOL><INDENT>mcolget = mcol.split('<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>mcolget = [mcol]<EOL><DEDENT>mags = _dict_get(lcdict, mcolget)<EOL>if '<STR_LIT:.>' in ecol:<EOL><INDENT>ecolget = ecol.split('<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>ecolget = [ecol]<EOL><DEDENT>errs = _dict_get(lcdict, ecolget)<EOL>if normfunc is None:<EOL><INDENT>ntimes, nmags = normalize_magseries(<EOL>times, mags,<EOL>magsarefluxes=magsarefluxes<EOL>)<EOL><DEDENT>times, mags, errs = ntimes, nmags, errs<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>sigclip=sigclip)<EOL>mags_interpolator = spi.interp1d(stimes, smags,<EOL>kind=interpolate_type,<EOL>fill_value='<STR_LIT>')<EOL>errs_interpolator = spi.interp1d(stimes, serrs,<EOL>kind=interpolate_type,<EOL>fill_value='<STR_LIT>')<EOL>interpolated_mags = mags_interpolator(timebase)<EOL>interpolated_errs = errs_interpolator(timebase)<EOL>magmedian = np.median(interpolated_mags)<EOL>renormed_mags = interpolated_mags - magmedian<EOL>outdict = {'<STR_LIT>':renormed_mags,<EOL>'<STR_LIT>':interpolated_errs,<EOL>'<STR_LIT>':interpolated_mags}<EOL>return outdict<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' % repr(task))<EOL>return None<EOL><DEDENT>", "docstring": "This is a parallel worker that reforms light curves for TFA.\n\ntask[0] = lcfile\ntask[1] = lcformat\ntask[2] = lcformatdir\ntask[3] = timecol\ntask[4] = magcol\ntask[5] = errcol\ntask[6] = timebase\ntask[7] = interpolate_type\ntask[8] = sigclip", "id": "f14707:m2"}
{"signature": "def add_cpinfo_to_lclist(<EOL>checkplots,  <EOL>initial_lc_catalog,<EOL>magcol,  <EOL>outfile,<EOL>checkplotglob='<STR_LIT>',<EOL>infokeys=CPINFO_DEFAULTKEYS,<EOL>nworkers=NCPUS<EOL>):", "body": "<EOL>if not isinstance(checkplots, list) and os.path.exists(checkplots):<EOL><INDENT>checkplots = sorted(glob.glob(os.path.join(checkplots, checkplotglob)))<EOL><DEDENT>tasklist = [(cpf, infokeys) for cpf in checkplots]<EOL>with ProcessPoolExecutor(max_workers=nworkers) as executor:<EOL><INDENT>resultfutures = executor.map(_cpinfo_key_worker, tasklist)<EOL><DEDENT>results = [x for x in resultfutures]<EOL>executor.shutdown()<EOL>with open(initial_lc_catalog,'<STR_LIT:rb>') as infd:<EOL><INDENT>lc_catalog = pickle.load(infd)<EOL><DEDENT>catalog_objectids = np.array(lc_catalog['<STR_LIT>']['<STR_LIT>'])<EOL>checkplot_objectids = np.array([x[<NUM_LIT:0>] for x in results])<EOL>extrainfokeys = []<EOL>actualkeys = []<EOL>for keyspec in infokeys:<EOL><INDENT>key, dtype, firstlevel, overwrite_append, nonesub, nansub = keyspec<EOL>if firstlevel:<EOL><INDENT>eik = key<EOL><DEDENT>else:<EOL><INDENT>eik = '<STR_LIT>' % (magcol, key)<EOL><DEDENT>extrainfokeys.append(eik)<EOL>eactual = eik.split('<STR_LIT:.>')<EOL>if not eactual[-<NUM_LIT:1>].isdigit():<EOL><INDENT>if not firstlevel:<EOL><INDENT>eactual = '<STR_LIT:.>'.join([eactual[<NUM_LIT:0>], eactual[-<NUM_LIT:1>]])<EOL><DEDENT>else:<EOL><INDENT>eactual = eactual[-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>elastkey = eactual[-<NUM_LIT:2>]<EOL>if elastkey.endswith('<STR_LIT>'):<EOL><INDENT>elastkey = elastkey[:-<NUM_LIT:2>]<EOL><DEDENT>elif elastkey.endswith('<STR_LIT:s>'):<EOL><INDENT>elastkey = elastkey[:-<NUM_LIT:1>]<EOL><DEDENT>if not firstlevel:<EOL><INDENT>eactual = '<STR_LIT:.>'.join([eactual[<NUM_LIT:0>], elastkey])<EOL><DEDENT>else:<EOL><INDENT>eactual = elastkey<EOL><DEDENT><DEDENT>actualkeys.append(eactual)<EOL>if eactual not in lc_catalog['<STR_LIT>']:<EOL><INDENT>lc_catalog['<STR_LIT>'].append(eactual)<EOL><DEDENT>lc_catalog['<STR_LIT>'][eactual] = []<EOL><DEDENT>for catobj in tqdm(catalog_objectids):<EOL><INDENT>cp_objind = np.where(checkplot_objectids == catobj)<EOL>if len(cp_objind[<NUM_LIT:0>]) > <NUM_LIT:0>:<EOL><INDENT>thiscpinfo = results[cp_objind[<NUM_LIT:0>][<NUM_LIT:0>]]<EOL>thiscpinfo = thiscpinfo[<NUM_LIT:1>:]<EOL>for ekind, ek in enumerate(actualkeys):<EOL><INDENT>lc_catalog['<STR_LIT>'][ek].append(<EOL>thiscpinfo[ekind]<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for ekind, ek in enumerate(actualkeys):<EOL><INDENT>thiskeyspec = infokeys[ekind]<EOL>nonesub = thiskeyspec[-<NUM_LIT:2>]<EOL>lc_catalog['<STR_LIT>'][ek].append(<EOL>nonesub<EOL>)<EOL><DEDENT><DEDENT><DEDENT>for ek in actualkeys:<EOL><INDENT>lc_catalog['<STR_LIT>'][ek] = np.array(<EOL>lc_catalog['<STR_LIT>'][ek]<EOL>)<EOL><DEDENT>if '<STR_LIT>' in lc_catalog:<EOL><INDENT>if magcol not in lc_catalog['<STR_LIT>']:<EOL><INDENT>lc_catalog['<STR_LIT>'].append(magcol)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>lc_catalog['<STR_LIT>'] = [magcol]<EOL><DEDENT>with open(outfile, '<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(lc_catalog, outfd, protocol=pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>return outfile<EOL>", "docstring": "This adds checkplot info to the initial light curve catalogs generated by\n    `make_lclist`.\n\n    This is used to incorporate all the extra info checkplots can have for\n    objects back into columns in the light curve catalog produced by\n    `make_lclist`. Objects are matched between the checkplots and the light\n    curve catalog using their `objectid`. This then allows one to search this\n    'augmented' light curve catalog by these extra columns. The 'augmented'\n    light curve catalog also forms the basis for search interface provided by\n    the LCC-Server.\n\n    The default list of keys that will be extracted from a checkplot and added\n    as columns in the initial light curve catalog is listed above in the\n    `CPINFO_DEFAULTKEYS` list.\n\n    Parameters\n    ----------\n\n    checkplots : str or list\n        If this is a str, is interpreted as a directory which will be searched\n        for checkplot pickle files using `checkplotglob`. If this is a list, it\n        will be interpreted as a list of checkplot pickle files to process.\n\n    initial_lc_catalog : str\n        This is the path to the light curve catalog pickle made by\n        `make_lclist`.\n\n    magcol : str\n        This is used to indicate the light curve magnitude column to extract\n        magnitude column specific information. For example, Stetson variability\n        indices can be generated using magnitude measurements in separate\n        photometric apertures, which appear in separate `magcols` in the\n        checkplot. To associate each such feature of the object with its\n        specific `magcol`, pass that `magcol` in here. This `magcol` will then\n        be added as a prefix to the resulting column in the 'augmented' LC\n        catalog, e.g. Stetson J will appear as `magcol1_stetsonj` and\n        `magcol2_stetsonj` for two separate magcols.\n\n    outfile : str\n        This is the file name of the output 'augmented' light curve catalog\n        pickle file that will be written.\n\n    infokeys : list of tuples\n\n        This is a list of keys to extract from the checkplot and some info on\n        how this extraction is to be done. Each key entry is a six-element\n        tuple of the following form:\n\n        - key name in the checkplot\n        - numpy dtype of the value of this key\n        - False if key is associated with a magcol or True otherwise\n        - False if subsequent updates to the same column name will append to\n          existing key values in the output augmented light curve catalog or\n          True if these will overwrite the existing key value\n        - character to use to substitute a None value of the key in the\n          checkplot in the output light curve catalog column\n        - character to use to substitute a nan value of the key in the\n          checkplot in the output light curve catalog column\n\n        See the `CPFINFO_DEFAULTKEYS` list above for examples.\n\n    nworkers : int\n        The number of parallel workers to launch to extract checkplot\n        information.\n\n    Returns\n    -------\n\n    str\n        Returns the path to the generated 'augmented' light curve catalog pickle\n        file.", "id": "f14709:m5"}
{"signature": "def _cpinfo_key_worker(task):", "body": "cpfile, keyspeclist = task<EOL>keystoget = [x[<NUM_LIT:0>] for x in keyspeclist]<EOL>nonesubs = [x[-<NUM_LIT:2>] for x in keyspeclist]<EOL>nansubs = [x[-<NUM_LIT:1>] for x in keyspeclist]<EOL>for i, k in enumerate(keystoget):<EOL><INDENT>thisk = k.split('<STR_LIT:.>')<EOL>if sys.version_info[:<NUM_LIT:2>] < (<NUM_LIT:3>,<NUM_LIT:4>):<EOL><INDENT>thisk = [(int(x) if x.isdigit() else x) for x in thisk]<EOL><DEDENT>else:<EOL><INDENT>thisk = [(int(x) if x.isdecimal() else x) for x in thisk]<EOL><DEDENT>keystoget[i] = thisk<EOL><DEDENT>keystoget.insert(<NUM_LIT:0>,['<STR_LIT>'])<EOL>nonesubs.insert(<NUM_LIT:0>, '<STR_LIT>')<EOL>nansubs.insert(<NUM_LIT:0>,'<STR_LIT>')<EOL>vals = checkplot_infokey_worker((cpfile, keystoget))<EOL>for val, nonesub, nansub, valind in zip(vals, nonesubs,<EOL>nansubs, range(len(vals))):<EOL><INDENT>if val is None:<EOL><INDENT>outval = nonesub<EOL><DEDENT>elif isinstance(val, float) and not np.isfinite(val):<EOL><INDENT>outval = nansub<EOL><DEDENT>elif isinstance(val, (list, tuple)):<EOL><INDENT>outval = '<STR_LIT:U+002CU+0020>'.join(val)<EOL><DEDENT>else:<EOL><INDENT>outval = val<EOL><DEDENT>vals[valind] = outval<EOL><DEDENT>return vals<EOL>", "docstring": "This wraps `checkplotlist.checkplot_infokey_worker`.\n\n    This is used to get the correct dtype for each element in retrieved results.\n\n    Parameters\n    ----------\n\n    task : tuple\n        task[0] = cpfile\n        task[1] = keyspeclist (infokeys kwarg from `add_cpinfo_to_lclist`)\n\n    Returns\n    -------\n\n    dict\n        All of the requested keys from the checkplot are returned along with\n        their values in a dict.", "id": "f14709:m4"}
{"signature": "def main():", "body": "<EOL>tornado.options.parse_command_line()<EOL>DEBUG = True if options.debugmode == <NUM_LIT:1> else False<EOL>LOGGER = logging.getLogger('<STR_LIT>')<EOL>if DEBUG:<EOL><INDENT>LOGGER.setLevel(logging.DEBUG)<EOL><DEDENT>else:<EOL><INDENT>LOGGER.setLevel(logging.INFO)<EOL><DEDENT>MAXPROCS = options.maxprocs<EOL>ASSETPATH = options.assetpath<EOL>BASEURL = options.baseurl<EOL>EXECUTOR = ProcessPoolExecutor(MAXPROCS)<EOL>if options.standalone:<EOL><INDENT>if ( (not options.sharedsecret) or<EOL>(options.sharedsecret and<EOL>not os.path.exists(options.sharedsecret)) ):<EOL><INDENT>LOGGER.error('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>elif options.sharedsecret and os.path.exists(options.sharedsecret):<EOL><INDENT>fileperm = oct(os.stat(options.sharedsecret)[stat.ST_MODE])<EOL>if fileperm == '<STR_LIT>' or fileperm == '<STR_LIT>':<EOL><INDENT>with open(options.sharedsecret,'<STR_LIT:r>') as infd:<EOL><INDENT>SHAREDSECRET = infd.read().strip('<STR_LIT:\\n>')<EOL>standalonespec = (<EOL>r'<STR_LIT>',<EOL>cphandlers.StandaloneHandler,<EOL>{'<STR_LIT>':EXECUTOR,<EOL>'<STR_LIT>':SHAREDSECRET}<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGGER.error('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGGER.error('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>options.sharedsecret)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>HANDLERS = [standalonespec]<EOL><DEDENT>else:<EOL><INDENT>if not BASEURL.endswith('<STR_LIT:/>'):<EOL><INDENT>BASEURL = BASEURL + '<STR_LIT:/>'<EOL><DEDENT>READONLY = options.readonly<EOL>if READONLY:<EOL><INDENT>LOGGER.warning('<STR_LIT>')<EOL><DEDENT>CURRENTDIR = os.getcwd()<EOL>cplistfile = options.checkplotlist<EOL>if cplistfile and os.path.exists(cplistfile):<EOL><INDENT>with open(cplistfile,'<STR_LIT:r>') as infd:<EOL><INDENT>CHECKPLOTLIST = json.load(infd)<EOL><DEDENT>LOGGER.info('<STR_LIT>' % cplistfile)<EOL><DEDENT>elif cplistfile and not os.path.exists(cplistfile):<EOL><INDENT>helpmsg = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>(cplistfile, os.path.join(modpath,'<STR_LIT>'))<EOL>)<EOL>LOGGER.error(helpmsg)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>LOGGER.warning('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % CURRENTDIR)<EOL>if os.path.exists(<EOL>os.path.join(CURRENTDIR,'<STR_LIT>')<EOL>):<EOL><INDENT>cplistfile = os.path.join(CURRENTDIR,'<STR_LIT>')<EOL>with open(cplistfile,'<STR_LIT:r>') as infd:<EOL><INDENT>CHECKPLOTLIST = json.load(infd)<EOL><DEDENT>LOGGER.info('<STR_LIT>' % cplistfile)<EOL><DEDENT>elif os.path.exists(os.path.join(CURRENTDIR,<EOL>'<STR_LIT>')):<EOL><INDENT>cplistfile = os.path.join(CURRENTDIR,<EOL>'<STR_LIT>')<EOL>with open(cplistfile,'<STR_LIT:r>') as infd:<EOL><INDENT>CHECKPLOTLIST = json.load(infd)<EOL><DEDENT>LOGGER.info('<STR_LIT>' % cplistfile)<EOL><DEDENT>else:<EOL><INDENT>helpmsg = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL>LOGGER.error(helpmsg)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>HANDLERS = [<EOL>(r'<STR_LIT>'.format(baseurl=BASEURL),<EOL>cphandlers.IndexHandler,<EOL>{'<STR_LIT>':CURRENTDIR,<EOL>'<STR_LIT>':ASSETPATH,<EOL>'<STR_LIT>':CHECKPLOTLIST,<EOL>'<STR_LIT>':cplistfile,<EOL>'<STR_LIT>':EXECUTOR,<EOL>'<STR_LIT>':READONLY,<EOL>'<STR_LIT>':BASEURL}),<EOL>(r'<STR_LIT>'.format(baseurl=BASEURL),<EOL>cphandlers.CheckplotHandler,<EOL>{'<STR_LIT>':CURRENTDIR,<EOL>'<STR_LIT>':ASSETPATH,<EOL>'<STR_LIT>':CHECKPLOTLIST,<EOL>'<STR_LIT>':cplistfile,<EOL>'<STR_LIT>':EXECUTOR,<EOL>'<STR_LIT>':READONLY}),<EOL>(r'<STR_LIT>'.format(baseurl=BASEURL),<EOL>cphandlers.CheckplotListHandler,<EOL>{'<STR_LIT>':CURRENTDIR,<EOL>'<STR_LIT>':ASSETPATH,<EOL>'<STR_LIT>':CHECKPLOTLIST,<EOL>'<STR_LIT>':cplistfile,<EOL>'<STR_LIT>':EXECUTOR,<EOL>'<STR_LIT>':READONLY}),<EOL>(r'<STR_LIT>'.format(baseurl=BASEURL),<EOL>cphandlers.LCToolHandler,<EOL>{'<STR_LIT>':CURRENTDIR,<EOL>'<STR_LIT>':ASSETPATH,<EOL>'<STR_LIT>':CHECKPLOTLIST,<EOL>'<STR_LIT>':cplistfile,<EOL>'<STR_LIT>':EXECUTOR,<EOL>'<STR_LIT>':READONLY}),<EOL>(r'<STR_LIT>'.format(baseurl=BASEURL),<EOL>tornado.web.StaticFileHandler, {'<STR_LIT:path>': CURRENTDIR})<EOL>]<EOL><DEDENT>app = tornado.web.Application(<EOL>handlers=HANDLERS,<EOL>static_path=ASSETPATH,<EOL>template_path=ASSETPATH,<EOL>static_url_prefix='<STR_LIT>'.format(baseurl=BASEURL),<EOL>compress_response=True,<EOL>debug=DEBUG,<EOL>)<EOL>http_server = tornado.httpserver.HTTPServer(app, xheaders=True)<EOL>portok = False<EOL>serverport = options.port<EOL>maxtrys = <NUM_LIT:5><EOL>thistry = <NUM_LIT:0><EOL>while not portok and thistry < maxtrys:<EOL><INDENT>try:<EOL><INDENT>http_server.listen(serverport, options.serve)<EOL>portok = True<EOL><DEDENT>except socket.error as e:<EOL><INDENT>LOGGER.warning('<STR_LIT>' %<EOL>(options.serve, serverport, serverport + <NUM_LIT:1>))<EOL>serverport = serverport + <NUM_LIT:1><EOL><DEDENT><DEDENT>if not portok:<EOL><INDENT>LOGGER.error('<STR_LIT>')<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>LOGGER.info('<STR_LIT>' %<EOL>(options.serve, serverport, BASEURL))<EOL>signal.signal(signal.SIGINT,_recv_sigint)<EOL>signal.signal(signal.SIGTERM,_recv_sigint)<EOL>try:<EOL><INDENT>tornado.ioloop.IOLoop.instance().start()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>LOGGER.info('<STR_LIT>')<EOL>tornado.ioloop.IOLoop.instance().stop()<EOL><DEDENT>EXECUTOR.shutdown()<EOL>time.sleep(<NUM_LIT:3>)<EOL>", "docstring": "This launches the server. The current script args are shown below::\n\n  Usage: checkplotserver [OPTIONS]\n\n  Options:\n\n    --help                           show this help information\n\n  checkplotserver.py options:\n\n    --assetpath                      Sets the asset (server images, css, js, DB)\n                                     path for checkplotserver.\n                                     (default <astrobase install dir>\n                                      /astrobase/cpserver/cps-assets)\n    --baseurl                        Set the base URL of the checkplotserver.\n                                     This is useful when you're running\n                                     checkplotserver on a remote machine and are\n                                     reverse-proxying more than one instances of\n                                     it so you can access them using HTTP from\n                                     outside on different base URLs like\n                                     /cpserver1/, /cpserver2/, etc. If this is\n                                     set, all URLs will take the form\n                                     [baseurl]/..., instead of /... (default /)\n    --checkplotlist                  The path to the checkplot-filelist.json file\n                                     listing checkplots to load and serve. If\n                                     this is not provided, checkplotserver will\n                                     look for a checkplot-pickle-flist.json in\n                                     the directory that it was started in\n    --debugmode                      start up in debug mode if set to 1. (default\n                                     0)\n    --maxprocs                       Number of background processes to use for\n                                     saving/loading checkplot files and running\n                                     light curves tools (default 2)\n    --port                           Run on the given port. (default 5225)\n    --readonly                       Run the server in readonly mode. This is\n                                     useful for a public-facing instance of\n                                     checkplotserver where you just want to allow\n                                     collaborators to review objects but not edit\n                                     them. (default False)\n    --serve                          Bind to given address and serve content.\n                                     (default 127.0.0.1)\n    --sharedsecret                   a file containing a cryptographically secure\n                                     string that is used to authenticate requests\n                                     that come into the special standalone mode.\n    --standalone                     This starts the server in standalone mode.\n                                     (default 0)\n\n  tornado/log.py options:\n\n    --log-file-max-size              max size of log files before rollover\n                                     (default 100000000)\n    --log-file-num-backups           number of log files to keep (default 10)\n    --log-file-prefix=PATH           Path prefix for log files. Note that if you\n                                     are running multiple tornado processes,\n                                     log_file_prefix must be different for each\n                                     of them (e.g. include the port number)\n    --log-rotate-interval            The interval value of timed rotating\n                                     (default 1)\n    --log-rotate-mode                The mode of rotating files(time or size)\n                                     (default size)\n    --log-rotate-when                specify the type of TimedRotatingFileHandler\n                                     interval other options:('S', 'M', 'H', 'D',\n                                     'W0'-'W6') (default midnight)\n    --log-to-stderr                  Send log output to stderr (colorized if\n                                     possible). By default use stderr if\n                                     --log_file_prefix is not set and no other\n                                     logging is configured.\n    --logging=debug|info|warning|error|none\n                                     Set the Python log level. If 'none', tornado\n                                     won't touch the logging configuration.\n                                     (default info)", "id": "f14710:m1"}
{"signature": "def initialize(self, currentdir, assetpath, cplist,<EOL>cplistfile, executor, readonly):", "body": "self.currentdir = currentdir<EOL>self.assetpath = assetpath<EOL>self.currentproject = cplist<EOL>self.cplistfile = cplistfile<EOL>self.executor = executor<EOL>self.readonly = readonly<EOL>", "docstring": "This handles initial setup of the `RequestHandler`.", "id": "f14711:c4:m0"}
{"signature": "def initialize(self, executor, secret):", "body": "self.executor = executor<EOL>self.secret = secret<EOL>", "docstring": "This handles initial setup of the `RequestHandler`.", "id": "f14711:c5:m0"}
{"signature": "def default(self, obj):", "body": "if isinstance(obj, np.ndarray):<EOL><INDENT>return obj.tolist()<EOL><DEDENT>elif isinstance(obj, bytes):<EOL><INDENT>return obj.decode()<EOL><DEDENT>elif isinstance(obj, complex):<EOL><INDENT>return (obj.real, obj.imag)<EOL><DEDENT>elif (isinstance(obj, (float, np.float64, np.float_)) and<EOL>not np.isfinite(obj)):<EOL><INDENT>return None<EOL><DEDENT>elif isinstance(obj, (np.int8, np.int16, np.int32, np.int64)):<EOL><INDENT>return int(obj)<EOL><DEDENT>else:<EOL><INDENT>return json.JSONEncoder.default(self, obj)<EOL><DEDENT>", "docstring": "Overrides the default serializer for `JSONEncoder`.\n\n        This can serialize the following objects in addition to what\n        `JSONEncoder` can already do.\n\n        - `np.array`\n        - `bytes`\n        - `complex`\n        - `np.float64` and other `np.dtype` objects\n\n        Parameters\n        ----------\n\n        obj : object\n            A Python object to serialize to JSON.\n\n        Returns\n        -------\n\n        str\n            A JSON encoded representation of the input object.", "id": "f14711:c0:m0"}
{"signature": "@gen.coroutine<EOL><INDENT>def post(self, cpfile):<DEDENT>", "body": "<EOL>if self.readonly:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':msg,<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>try:<EOL><INDENT>self.cpfile = base64.b64decode(url_unescape(cpfile)).decode()<EOL>cpcontents = self.get_argument('<STR_LIT>', default=None)<EOL>savetopng = self.get_argument('<STR_LIT>', default=None)<EOL>if not self.cpfile or not cpcontents:<EOL><INDENT>msg = \"<STR_LIT>\"<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':msg,<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>cpcontents = json.loads(cpcontents)<EOL>updated = {'<STR_LIT>': cpcontents['<STR_LIT>'],<EOL>'<STR_LIT>':cpcontents['<STR_LIT>'],<EOL>'<STR_LIT>':cpcontents['<STR_LIT>'],<EOL>'<STR_LIT>':cpcontents['<STR_LIT>']}<EOL>cpfpath = os.path.join(<EOL>os.path.abspath(os.path.dirname(self.cplistfile)),<EOL>self.cpfile<EOL>)<EOL>LOGGER.info('<STR_LIT>' % cpfpath)<EOL>if not os.path.exists(cpfpath):<EOL><INDENT>msg = \"<STR_LIT>\" % cpfpath<EOL>LOGGER.error(msg)<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':msg,<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>updated = yield self.executor.submit(checkplot_pickle_update,<EOL>cpfpath, updated)<EOL>if updated:<EOL><INDENT>LOGGER.info('<STR_LIT>' % updated)<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:success>',<EOL>'<STR_LIT:message>':'<STR_LIT>',<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':{'<STR_LIT>':updated,<EOL>'<STR_LIT>':utime.time(),<EOL>'<STR_LIT>':cpcontents,<EOL>'<STR_LIT>': None}}<EOL>if savetopng:<EOL><INDENT>cpfpng = os.path.abspath(cpfpath.replace('<STR_LIT>','<STR_LIT>'))<EOL>cpfpng = StrIO()<EOL>pngdone = yield self.executor.submit(<EOL>checkplot_pickle_to_png,<EOL>cpfpath, cpfpng<EOL>)<EOL>if pngdone is not None:<EOL><INDENT>pngdone.seek(<NUM_LIT:0>)<EOL>pngbin = pngdone.read()<EOL>pngb64 = base64.b64encode(pngbin)<EOL>pngdone.close()<EOL>del pngbin<EOL>resultdict['<STR_LIT:result>']['<STR_LIT>'] = pngb64<EOL><DEDENT>else:<EOL><INDENT>resultdict['<STR_LIT:result>']['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT><DEDENT>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>else:<EOL><INDENT>LOGGER.error('<STR_LIT>' %<EOL>(self.cpfile, cpcontents))<EOL>msg = \"<STR_LIT>\"<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':msg,<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGGER.exception('<STR_LIT>' %<EOL>(self.cpfile, cpcontents))<EOL>msg = \"<STR_LIT>\"<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':msg,<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>", "docstring": "This handles POST requests.\n\n        Also an AJAX endpoint. Updates the persistent checkplot dict using the\n        changes from the UI, and then saves it back to disk. This could\n        definitely be faster by just loading the checkplot into a server-wide\n        shared dict or something.", "id": "f14711:c2:m2"}
{"signature": "def initialize(self, currentdir, assetpath, cplist,<EOL>cplistfile, executor, readonly, baseurl):", "body": "self.currentdir = currentdir<EOL>self.assetpath = assetpath<EOL>self.currentproject = cplist<EOL>self.cplistfile = cplistfile<EOL>self.executor = executor<EOL>self.readonly = readonly<EOL>self.baseurl = baseurl<EOL>", "docstring": "handles initial setup.", "id": "f14711:c1:m0"}
{"signature": "def get(self):", "body": "<EOL>project_checkplots = self.currentproject['<STR_LIT>']<EOL>project_checkplotbasenames = [os.path.basename(x)<EOL>for x in project_checkplots]<EOL>project_checkplotindices = range(len(project_checkplots))<EOL>project_cpsortkey = self.currentproject['<STR_LIT>']<EOL>if self.currentproject['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>project_cpsortorder = '<STR_LIT>'<EOL><DEDENT>elif self.currentproject['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>project_cpsortorder = '<STR_LIT>'<EOL><DEDENT>project_cpfilterstatements = self.currentproject['<STR_LIT>']<EOL>self.render('<STR_LIT>',<EOL>project_checkplots=project_checkplots,<EOL>project_cpsortorder=project_cpsortorder,<EOL>project_cpsortkey=project_cpsortkey,<EOL>project_cpfilterstatements=project_cpfilterstatements,<EOL>project_checkplotbasenames=project_checkplotbasenames,<EOL>project_checkplotindices=project_checkplotindices,<EOL>project_checkplotfile=self.cplistfile,<EOL>readonly=self.readonly,<EOL>baseurl=self.baseurl)<EOL>", "docstring": "This handles GET requests to the index page.\n\n        TODO: provide the correct baseurl from the checkplotserver options dict,\n        so the frontend JS can just read that off immediately.", "id": "f14711:c1:m1"}
{"signature": "def initialize(self, currentdir, assetpath, cplist,<EOL>cplistfile, executor, readonly):", "body": "self.currentdir = currentdir<EOL>self.assetpath = assetpath<EOL>self.currentproject = cplist<EOL>self.cplistfile = cplistfile<EOL>self.executor = executor<EOL>self.readonly = readonly<EOL>", "docstring": "This handles initial setup of the `RequestHandler`.", "id": "f14711:c3:m0"}
{"signature": "@gen.coroutine<EOL><INDENT>def get(self, cpfile):<DEDENT>", "body": "if cpfile:<EOL><INDENT>self.cpfile = (<EOL>xhtml_escape(base64.b64decode(url_unescape(cpfile)))<EOL>)<EOL>if self.cpfile in self.currentproject['<STR_LIT>']:<EOL><INDENT>cpfpath = os.path.join(<EOL>os.path.abspath(os.path.dirname(self.cplistfile)),<EOL>self.cpfile<EOL>)<EOL>if not os.path.exists(cpfpath):<EOL><INDENT>msg = \"<STR_LIT>\" % cpfpath<EOL>LOGGER.error(msg)<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':msg,<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>forcereload = self.get_argument('<STR_LIT>',False)<EOL>if forcereload and xhtml_escape(forcereload):<EOL><INDENT>forcereload = True if forcereload == '<STR_LIT:true>' else False<EOL><DEDENT>cpobjectid = self.get_argument('<STR_LIT>',None)<EOL>lctool = self.get_argument('<STR_LIT>', None)<EOL>resultdict = {'<STR_LIT:status>':None,<EOL>'<STR_LIT:message>':None,<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>if lctool:<EOL><INDENT>lctool = xhtml_escape(lctool)<EOL>lctoolargs = []<EOL>lctoolkwargs = {}<EOL>if lctool in CPTOOLMAP:<EOL><INDENT>try:<EOL><INDENT>for xkwarg, xkwargtype, xkwargdef in zip(<EOL>CPTOOLMAP[lctool]['<STR_LIT>'],<EOL>CPTOOLMAP[lctool]['<STR_LIT>'],<EOL>CPTOOLMAP[lctool]['<STR_LIT>']<EOL>):<EOL><INDENT>if xkwargtype is list:<EOL><INDENT>wbkwarg = self.get_arguments(xkwarg)<EOL>if len(wbkwarg) > <NUM_LIT:0>:<EOL><INDENT>wbkwarg = [url_unescape(xhtml_escape(x))<EOL>for x in wbkwarg]<EOL><DEDENT>else:<EOL><INDENT>wbkwarg = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>wbkwarg = self.get_argument(xkwarg, None)<EOL>if wbkwarg is not None:<EOL><INDENT>wbkwarg = url_unescape(<EOL>xhtml_escape(wbkwarg)<EOL>)<EOL><DEDENT><DEDENT>LOGGER.info('<STR_LIT>' %<EOL>(xkwarg, repr(wbkwarg)))<EOL>if wbkwarg is None:<EOL><INDENT>wbkwarg = xkwargdef<EOL><DEDENT>else:<EOL><INDENT>if xkwargtype is list:<EOL><INDENT>wbkwarg = [float(x) for x in wbkwarg]<EOL><DEDENT>elif xkwargtype is bool:<EOL><INDENT>if wbkwarg == '<STR_LIT:false>':<EOL><INDENT>wbkwarg = False<EOL><DEDENT>elif wbkwarg == '<STR_LIT:true>':<EOL><INDENT>wbkwarg = True<EOL><DEDENT>else:<EOL><INDENT>wbkwarg = xkwargdef<EOL><DEDENT><DEDENT>else:<EOL><INDENT>wbkwarg = xkwargtype(wbkwarg)<EOL><DEDENT><DEDENT>if xkwarg.endswith('<STR_LIT>'):<EOL><INDENT>xkwarg = xkwarg.rstrip('<STR_LIT>')<EOL><DEDENT>lctoolkwargs.update({xkwarg:wbkwarg})<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGGER.exception('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, xkwarg))<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT:error>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, xkwarg)<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {'<STR_LIT>':cpobjectid}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGGER.error('<STR_LIT>' % lctool)<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT:error>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>' % lctool<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {'<STR_LIT>':cpobjectid}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGGER.error('<STR_LIT>')<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT:error>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>'<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {'<STR_LIT>':cpobjectid}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>LOGGER.info('<STR_LIT>' % cpfpath)<EOL>cpdict = yield self.executor.submit(<EOL>_read_checkplot_picklefile, cpfpath<EOL>)<EOL>tempfpath = cpfpath + '<STR_LIT>'<EOL>if os.path.exists(tempfpath):<EOL><INDENT>tempcpdict = yield self.executor.submit(<EOL>_read_checkplot_picklefile, tempfpath<EOL>)<EOL><DEDENT>else:<EOL><INDENT>tempcpdict = {<EOL>'<STR_LIT>':cpdict['<STR_LIT>'],<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':cpdict['<STR_LIT>']['<STR_LIT>'],<EOL>'<STR_LIT>':cpdict['<STR_LIT>']['<STR_LIT>'],<EOL>'<STR_LIT>':cpdict['<STR_LIT>']['<STR_LIT>'],<EOL>}<EOL>}<EOL><DEDENT>if not forcereload:<EOL><INDENT>cptimes, cpmags, cperrs = (<EOL>tempcpdict['<STR_LIT>']['<STR_LIT>'],<EOL>tempcpdict['<STR_LIT>']['<STR_LIT>'],<EOL>tempcpdict['<STR_LIT>']['<STR_LIT>'],<EOL>)<EOL>LOGGER.info('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>cptimes, cpmags, cperrs = (cpdict['<STR_LIT>']['<STR_LIT>'],<EOL>cpdict['<STR_LIT>']['<STR_LIT>'],<EOL>cpdict['<STR_LIT>']['<STR_LIT>'])<EOL>LOGGER.info('<STR_LIT>')<EOL><DEDENT>for xarg, xargtype in zip(CPTOOLMAP[lctool]['<STR_LIT:args>'],<EOL>CPTOOLMAP[lctool]['<STR_LIT>']):<EOL><INDENT>if xarg is None:<EOL><INDENT>lctoolargs.append(None)<EOL><DEDENT>elif xarg == '<STR_LIT>':<EOL><INDENT>lctoolargs.append(cptimes)<EOL><DEDENT>elif xarg == '<STR_LIT>':<EOL><INDENT>lctoolargs.append(cpmags)<EOL><DEDENT>elif xarg == '<STR_LIT>':<EOL><INDENT>lctoolargs.append(cperrs)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>if xargtype is list:<EOL><INDENT>wbarg = self.get_arguments(xarg)<EOL><DEDENT>else:<EOL><INDENT>wbarg = url_unescape(<EOL>xhtml_escape(<EOL>self.get_argument(xarg, None)<EOL>)<EOL>)<EOL><DEDENT>if xargtype is list:<EOL><INDENT>wbarg = [float(x) for x in wbarg]<EOL><DEDENT>elif xargtype is float and xarg == '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>wbarg = xargtype(wbarg)<EOL><DEDENT>except Exception as e:<EOL><INDENT>wbarg = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>wbarg = xargtype(wbarg)<EOL><DEDENT>lctoolargs.append(wbarg)<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGGER.exception('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, xarg))<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT:error>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, xarg)<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {'<STR_LIT>':cpobjectid}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT><DEDENT><DEDENT>LOGGER.info(lctool)<EOL>LOGGER.info(lctoolargs)<EOL>LOGGER.info(lctoolkwargs)<EOL>resloc = CPTOOLMAP[lctool]['<STR_LIT>']<EOL>objectid = cpdict['<STR_LIT>']<EOL>if lctool in ('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'):<EOL><INDENT>lspmethod = resloc[<NUM_LIT:0>]<EOL>if (lspmethod in tempcpdict and<EOL>isinstance(tempcpdict[lspmethod], dict) and<EOL>(not forcereload)):<EOL><INDENT>bestperiod = (<EOL>tempcpdict[lspmethod]['<STR_LIT>']<EOL>)<EOL>nbestperiods = (<EOL>tempcpdict[lspmethod]['<STR_LIT>']<EOL>)<EOL>nbestlspvals = (<EOL>tempcpdict[lspmethod]['<STR_LIT>']<EOL>)<EOL>periodogram = (<EOL>tempcpdict[lspmethod]['<STR_LIT>']<EOL>)<EOL>phasedlc0plot = (<EOL>tempcpdict[lspmethod][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>phasedlc0period = float(<EOL>tempcpdict[lspmethod][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>phasedlc0epoch = float(<EOL>tempcpdict[lspmethod][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, tempfpath)<EOL>)<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>lspmethod:{<EOL>'<STR_LIT>':nbestperiods,<EOL>'<STR_LIT>':periodogram,<EOL>'<STR_LIT>':bestperiod,<EOL>'<STR_LIT>':nbestlspvals,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':phasedlc0plot,<EOL>'<STR_LIT>':phasedlc0period,<EOL>'<STR_LIT>':phasedlc0epoch,<EOL>}<EOL>}<EOL>}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>else:<EOL><INDENT>if lctoolkwargs['<STR_LIT>'] is not None:<EOL><INDENT>wtimes, wmags, werrs = lcmath.sigclip_magseries(<EOL>lctoolargs[<NUM_LIT:0>],<EOL>lctoolargs[<NUM_LIT:1>],<EOL>lctoolargs[<NUM_LIT:2>],<EOL>sigclip=lctoolkwargs['<STR_LIT>'],<EOL>magsarefluxes=lctoolkwargs['<STR_LIT>']<EOL>)<EOL>lctoolargs[<NUM_LIT:0>] = wtimes<EOL>lctoolargs[<NUM_LIT:1>] = wmags<EOL>lctoolargs[<NUM_LIT:2>] = werrs<EOL><DEDENT>if lctoolkwargs['<STR_LIT>']:<EOL><INDENT>wtimes, wmags, werrs = (lctoolargs[<NUM_LIT:0>],<EOL>lctoolargs[<NUM_LIT:1>],<EOL>lctoolargs[<NUM_LIT:2>])<EOL>filtermasks = [<EOL>np.full_like(wtimes, False, dtype=np.bool_)<EOL>]<EOL>filterstr = lctoolkwargs['<STR_LIT>']<EOL>filters = filterstr.split('<STR_LIT:U+002C>')<EOL>filters = [<EOL>x.strip().lstrip('<STR_LIT:(>').rstrip('<STR_LIT:)>').strip()<EOL>for x in filters<EOL>]<EOL>for filt in filters:<EOL><INDENT>try:<EOL><INDENT>thisfilt = filt.split('<STR_LIT::>')<EOL>if len(thisfilt) == <NUM_LIT:2>:<EOL><INDENT>filt_lo = float(thisfilt[<NUM_LIT:0>])<EOL>filt_hi = float(thisfilt[<NUM_LIT:1>])<EOL>filtermasks.append(<EOL>((wtimes -<EOL>cptimes.min()) < filt_hi) &<EOL>((wtimes -<EOL>cptimes.min()) > filt_lo)<EOL>)<EOL><DEDENT>elif (len(thisfilt) == <NUM_LIT:3> and<EOL>thisfilt[<NUM_LIT:0>].strip() == '<STR_LIT>'):<EOL><INDENT>filt_lo = float(thisfilt[<NUM_LIT:1>])<EOL>filt_hi = float(thisfilt[<NUM_LIT:2>])<EOL>filtermasks.append(np.logical_not(<EOL>(((wtimes -<EOL>cptimes.min()) < filt_hi) &<EOL>((wtimes -<EOL>cptimes.min()) > filt_lo))<EOL>))<EOL><DEDENT>else:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>if len(filtermasks) > <NUM_LIT:0>:<EOL><INDENT>filterind = np.column_stack(filtermasks)<EOL>filterind = np.any(filterind, axis=<NUM_LIT:1>)<EOL>lctoolargs[<NUM_LIT:0>] = wtimes[filterind]<EOL>lctoolargs[<NUM_LIT:1>] = wmags[filterind]<EOL>lctoolargs[<NUM_LIT:2>] = werrs[filterind]<EOL><DEDENT><DEDENT>if lctoolkwargs['<STR_LIT>']:<EOL><INDENT>wtimes, wmags, werrs = (lctoolargs[<NUM_LIT:0>],<EOL>lctoolargs[<NUM_LIT:1>],<EOL>lctoolargs[<NUM_LIT:2>])<EOL>filtermasks = [<EOL>np.full_like(wtimes, False, dtype=np.bool_)<EOL>]<EOL>filterstr = lctoolkwargs['<STR_LIT>']<EOL>filters = filterstr.split('<STR_LIT:U+002C>')<EOL>filters = [<EOL>x.strip().strip()<EOL>for x in filters<EOL>]<EOL>for filt in filters:<EOL><INDENT>try:<EOL><INDENT>thisfilt = filt.split('<STR_LIT::>')<EOL>if len(thisfilt) == <NUM_LIT:2>:<EOL><INDENT>filt_lo = float(thisfilt[<NUM_LIT:0>])<EOL>filt_hi = float(thisfilt[<NUM_LIT:1>])<EOL>filtermasks.append(<EOL>(wmags < filt_hi) &<EOL>(wmags > filt_lo)<EOL>)<EOL><DEDENT>elif (len(thisfilt) == <NUM_LIT:3> and<EOL>thisfilt[<NUM_LIT:0>].strip() == '<STR_LIT>'):<EOL><INDENT>filt_lo = float(thisfilt[<NUM_LIT:1>])<EOL>filt_hi = float(thisfilt[<NUM_LIT:2>])<EOL>filtermasks.append(np.logical_not(<EOL>((wmags < filt_hi) &<EOL>(wmags > filt_lo))<EOL>))<EOL><DEDENT>else:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>if len(filtermasks) > <NUM_LIT:0>:<EOL><INDENT>filterind = np.column_stack(filtermasks)<EOL>filterind = np.any(filterind, axis=<NUM_LIT:1>)<EOL>lctoolargs[<NUM_LIT:0>] = wtimes[filterind]<EOL>lctoolargs[<NUM_LIT:1>] = wmags[filterind]<EOL>lctoolargs[<NUM_LIT:2>] = werrs[filterind]<EOL><DEDENT><DEDENT>del lctoolkwargs['<STR_LIT>']<EOL>del lctoolkwargs['<STR_LIT>']<EOL>lctoolfunction = CPTOOLMAP[lctool]['<STR_LIT>']<EOL>funcresults = yield self.executor.submit(<EOL>lctoolfunction,<EOL>*lctoolargs,<EOL>**lctoolkwargs<EOL>)<EOL>nbestperiods = funcresults['<STR_LIT>']<EOL>nbestlspvals = funcresults['<STR_LIT>']<EOL>bestperiod = funcresults['<STR_LIT>']<EOL>pgramres = yield self.executor.submit(<EOL>_pkl_periodogram,<EOL>funcresults,<EOL>)<EOL>phasedlcargs0 = (None,<EOL>lspmethod,<EOL>-<NUM_LIT:1>,<EOL>lctoolargs[<NUM_LIT:0>],<EOL>lctoolargs[<NUM_LIT:1>],<EOL>lctoolargs[<NUM_LIT:2>],<EOL>nbestperiods[<NUM_LIT:0>],<EOL>'<STR_LIT>')<EOL>if len(nbestperiods) > <NUM_LIT:1>:<EOL><INDENT>phasedlcargs1 = (None,<EOL>lspmethod,<EOL>-<NUM_LIT:1>,<EOL>lctoolargs[<NUM_LIT:0>],<EOL>lctoolargs[<NUM_LIT:1>],<EOL>lctoolargs[<NUM_LIT:2>],<EOL>nbestperiods[<NUM_LIT:1>],<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>phasedlcargs1 = None<EOL><DEDENT>if len(nbestperiods) > <NUM_LIT:2>:<EOL><INDENT>phasedlcargs2 = (None,<EOL>lspmethod,<EOL>-<NUM_LIT:1>,<EOL>lctoolargs[<NUM_LIT:0>],<EOL>lctoolargs[<NUM_LIT:1>],<EOL>lctoolargs[<NUM_LIT:2>],<EOL>nbestperiods[<NUM_LIT:2>],<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>phasedlcargs2 = None<EOL><DEDENT>phasedlckwargs = {<EOL>'<STR_LIT>':False,<EOL>'<STR_LIT>':lctoolkwargs['<STR_LIT>'],<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>}<EOL>phasedlc0 = yield self.executor.submit(<EOL>_pkl_phased_magseries_plot,<EOL>*phasedlcargs0,<EOL>**phasedlckwargs<EOL>)<EOL>if phasedlcargs1 is not None:<EOL><INDENT>phasedlc1 = yield self.executor.submit(<EOL>_pkl_phased_magseries_plot,<EOL>*phasedlcargs1,<EOL>**phasedlckwargs<EOL>)<EOL><DEDENT>else:<EOL><INDENT>phasedlc1 = None<EOL><DEDENT>if phasedlcargs2 is not None:<EOL><INDENT>phasedlc2 = yield self.executor.submit(<EOL>_pkl_phased_magseries_plot,<EOL>*phasedlcargs2,<EOL>**phasedlckwargs<EOL>)<EOL><DEDENT>else:<EOL><INDENT>phasedlc2 = None<EOL><DEDENT>if not self.readonly:<EOL><INDENT>tempcpdict[lspmethod] = {<EOL>'<STR_LIT>':funcresults['<STR_LIT>'],<EOL>'<STR_LIT>':funcresults['<STR_LIT>'],<EOL>'<STR_LIT>':funcresults['<STR_LIT>'],<EOL>'<STR_LIT>':funcresults['<STR_LIT>'],<EOL>'<STR_LIT>':funcresults['<STR_LIT>'],<EOL>'<STR_LIT>':(<EOL>pgramres[lspmethod]['<STR_LIT>']<EOL>),<EOL><NUM_LIT:0>:phasedlc0,<EOL>}<EOL>if phasedlc1 is not None:<EOL><INDENT>tempcpdict[lspmethod][<NUM_LIT:1>] = phasedlc1<EOL><DEDENT>if phasedlc2 is not None:<EOL><INDENT>tempcpdict[lspmethod][<NUM_LIT:2>] = phasedlc2<EOL><DEDENT>savekwargs = {<EOL>'<STR_LIT>':tempfpath,<EOL>'<STR_LIT>':pickle.HIGHEST_PROTOCOL<EOL>}<EOL>savedcpf = yield self.executor.submit(<EOL>_write_checkplot_picklefile,<EOL>tempcpdict,<EOL>**savekwargs<EOL>)<EOL>LOGGER.info(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, savedcpf)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>periodogram = pgramres[lspmethod]['<STR_LIT>']<EOL>phasedlc0plot = phasedlc0['<STR_LIT>']<EOL>phasedlc0period = float(phasedlc0['<STR_LIT>'])<EOL>phasedlc0epoch = float(phasedlc0['<STR_LIT>'])<EOL>if phasedlc1 is not None:<EOL><INDENT>phasedlc1plot = phasedlc1['<STR_LIT>']<EOL>phasedlc1period = float(phasedlc1['<STR_LIT>'])<EOL>phasedlc1epoch = float(phasedlc1['<STR_LIT>'])<EOL><DEDENT>if phasedlc2 is not None:<EOL><INDENT>phasedlc2plot = phasedlc2['<STR_LIT>']<EOL>phasedlc2period = float(phasedlc2['<STR_LIT>'])<EOL>phasedlc2epoch = float(phasedlc2['<STR_LIT>'])<EOL><DEDENT>resultdict['<STR_LIT:status>'] = '<STR_LIT:success>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>lspmethod:{<EOL>'<STR_LIT>':nbestperiods,<EOL>'<STR_LIT>':nbestlspvals,<EOL>'<STR_LIT>':periodogram,<EOL>'<STR_LIT>':bestperiod,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':phasedlc0plot,<EOL>'<STR_LIT>':phasedlc0period,<EOL>'<STR_LIT>':phasedlc0epoch,<EOL>},<EOL>}<EOL>}<EOL>if phasedlc1 is not None:<EOL><INDENT>resultdict['<STR_LIT:result>'][lspmethod]['<STR_LIT>'] = {<EOL>'<STR_LIT>':phasedlc1plot,<EOL>'<STR_LIT>':phasedlc1period,<EOL>'<STR_LIT>':phasedlc1epoch,<EOL>}<EOL><DEDENT>if phasedlc2 is not None:<EOL><INDENT>resultdict['<STR_LIT:result>'][lspmethod]['<STR_LIT>'] = {<EOL>'<STR_LIT>':phasedlc2plot,<EOL>'<STR_LIT>':phasedlc2period,<EOL>'<STR_LIT>':phasedlc2epoch,<EOL>}<EOL><DEDENT>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT><DEDENT>elif lctool == '<STR_LIT>':<EOL><INDENT>lspmethod = lctoolargs[<NUM_LIT:1>]<EOL>periodind = lctoolargs[<NUM_LIT:2>]<EOL>if (not forcereload and lspmethod in tempcpdict and<EOL>isinstance(tempcpdict[lspmethod], dict) and<EOL>periodind in tempcpdict[lspmethod] and<EOL>isinstance(tempcpdict[lspmethod][periodind], dict)):<EOL><INDENT>phasedlc = tempcpdict[lspmethod][periodind]<EOL>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, tempfpath)<EOL>)<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>retkey = '<STR_LIT>' % periodind<EOL>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>lspmethod:{<EOL>retkey:phasedlc<EOL>}<EOL>}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>else:<EOL><INDENT>lctoolkwargs['<STR_LIT>'] = '<STR_LIT>'<EOL>lctoolargs[<NUM_LIT:2>] = -<NUM_LIT:1><EOL>if lctoolkwargs['<STR_LIT>'] is not None:<EOL><INDENT>stimes, smags, serrs = lcmath.sigclip_magseries(<EOL>lctoolargs[<NUM_LIT:3>],<EOL>lctoolargs[<NUM_LIT:4>],<EOL>lctoolargs[<NUM_LIT:5>],<EOL>sigclip=lctoolkwargs['<STR_LIT>'],<EOL>magsarefluxes=lctoolkwargs['<STR_LIT>']<EOL>)<EOL><DEDENT>else:<EOL><INDENT>stimes, smags, serrs = (lctoolargs[<NUM_LIT:3>],<EOL>lctoolargs[<NUM_LIT:4>],<EOL>lctoolargs[<NUM_LIT:5>])<EOL><DEDENT>if lctoolkwargs['<STR_LIT>']:<EOL><INDENT>wtimes, wmags, werrs = stimes, smags, serrs<EOL>filtermasks = [<EOL>np.full_like(wtimes, False, dtype=np.bool_)<EOL>]<EOL>filterstr = lctoolkwargs['<STR_LIT>']<EOL>filters = filterstr.split('<STR_LIT:U+002C>')<EOL>filters = [<EOL>x.strip().lstrip('<STR_LIT:(>').rstrip('<STR_LIT:)>').strip()<EOL>for x in filters<EOL>]<EOL>for filt in filters:<EOL><INDENT>try:<EOL><INDENT>thisfilt = filt.split('<STR_LIT::>')<EOL>if len(thisfilt) == <NUM_LIT:2>:<EOL><INDENT>filt_lo = float(thisfilt[<NUM_LIT:0>])<EOL>filt_hi = float(thisfilt[<NUM_LIT:1>])<EOL>filtermasks.append(<EOL>((wtimes -<EOL>cptimes.min()) < filt_hi) &<EOL>((wtimes -<EOL>cptimes.min()) > filt_lo)<EOL>)<EOL><DEDENT>elif (len(thisfilt) == <NUM_LIT:3> and<EOL>thisfilt[<NUM_LIT:0>].strip() == '<STR_LIT>'):<EOL><INDENT>filt_lo = float(thisfilt[<NUM_LIT:1>])<EOL>filt_hi = float(thisfilt[<NUM_LIT:2>])<EOL>filtermasks.append(np.logical_not(<EOL>(((wtimes -<EOL>cptimes.min()) < filt_hi) &<EOL>((wtimes -<EOL>cptimes.min()) > filt_lo))<EOL>))<EOL><DEDENT>else:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>if len(filtermasks) > <NUM_LIT:0>:<EOL><INDENT>filterind = np.column_stack(filtermasks)<EOL>filterind = np.any(filterind, axis=<NUM_LIT:1>)<EOL>stimes = wtimes[filterind]<EOL>smags = wmags[filterind]<EOL>serrs = werrs[filterind]<EOL><DEDENT><DEDENT>if lctoolkwargs['<STR_LIT>']:<EOL><INDENT>wtimes, wmags, werrs = stimes, smags, serrs<EOL>filtermasks = [<EOL>np.full_like(wtimes, False, dtype=np.bool_)<EOL>]<EOL>filterstr = lctoolkwargs['<STR_LIT>']<EOL>filters = filterstr.split('<STR_LIT:U+002C>')<EOL>filters = [<EOL>x.strip().strip()<EOL>for x in filters<EOL>]<EOL>for filt in filters:<EOL><INDENT>try:<EOL><INDENT>thisfilt = filt.split('<STR_LIT::>')<EOL>if len(thisfilt) == <NUM_LIT:2>:<EOL><INDENT>filt_lo = float(thisfilt[<NUM_LIT:0>])<EOL>filt_hi = float(thisfilt[<NUM_LIT:1>])<EOL>filtermasks.append(<EOL>(wmags < filt_hi) &<EOL>(wmags > filt_lo)<EOL>)<EOL><DEDENT>elif (len(thisfilt) == <NUM_LIT:3> and<EOL>thisfilt[<NUM_LIT:0>].strip() == '<STR_LIT>'):<EOL><INDENT>filt_lo = float(thisfilt[<NUM_LIT:1>])<EOL>filt_hi = float(thisfilt[<NUM_LIT:2>])<EOL>filtermasks.append(np.logical_not(<EOL>((wmags < filt_hi) &<EOL>(wmags > filt_lo))<EOL>))<EOL><DEDENT>else:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>if len(filtermasks) > <NUM_LIT:0>:<EOL><INDENT>filterind = np.column_stack(filtermasks)<EOL>filterind = np.any(filterind, axis=<NUM_LIT:1>)<EOL>stimes = wtimes[filterind]<EOL>smags = wmags[filterind]<EOL>serrs = werrs[filterind]<EOL><DEDENT><DEDENT>del lctoolkwargs['<STR_LIT>']<EOL>del lctoolkwargs['<STR_LIT>']<EOL>if lctoolargs[-<NUM_LIT:1>] is None:<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>try:<EOL><INDENT>spfit = lcfit.spline_fit_magseries(<EOL>stimes,         <EOL>smags,          <EOL>serrs,          <EOL>lctoolargs[<NUM_LIT:6>],  <EOL>magsarefluxes=lctoolkwargs['<STR_LIT>'],<EOL>sigclip=None,<EOL>verbose=True<EOL>)<EOL>lctoolargs[-<NUM_LIT:1>] = spfit['<STR_LIT>']['<STR_LIT>']<EOL>if len(spfit['<STR_LIT>']['<STR_LIT>']) != <NUM_LIT:1>:<EOL><INDENT>lctoolargs[-<NUM_LIT:1>] = (<EOL>spfit['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>]<EOL>)<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGGER.exception(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>lctoolargs[-<NUM_LIT:1>] = np.min(stimes)<EOL><DEDENT><DEDENT>lctoolargs[<NUM_LIT:3>] = stimes<EOL>lctoolargs[<NUM_LIT:4>] = smags<EOL>lctoolargs[<NUM_LIT:5>] = serrs<EOL>del lctoolkwargs['<STR_LIT>']<EOL>lctoolfunction = CPTOOLMAP[lctool]['<STR_LIT>']<EOL>funcresults = yield self.executor.submit(<EOL>lctoolfunction,<EOL>*lctoolargs,<EOL>**lctoolkwargs<EOL>)<EOL>if not self.readonly:<EOL><INDENT>if (lspmethod in tempcpdict and<EOL>isinstance(tempcpdict[lspmethod], dict)):<EOL><INDENT>if periodind in tempcpdict[lspmethod]:<EOL><INDENT>tempcpdict[lspmethod][periodind] = (<EOL>funcresults<EOL>)<EOL><DEDENT>else:<EOL><INDENT>tempcpdict[lspmethod].update(<EOL>{periodind: funcresults}<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>tempcpdict[lspmethod] = {periodind: funcresults}<EOL><DEDENT>savekwargs = {<EOL>'<STR_LIT>':tempfpath,<EOL>'<STR_LIT>':pickle.HIGHEST_PROTOCOL<EOL>}<EOL>savedcpf = yield self.executor.submit(<EOL>_write_checkplot_picklefile,<EOL>tempcpdict,<EOL>**savekwargs<EOL>)<EOL>LOGGER.info(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, savedcpf)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>resultdict['<STR_LIT:status>'] = '<STR_LIT:success>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>retkey = '<STR_LIT>' % periodind<EOL>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>lspmethod:{<EOL>retkey:funcresults<EOL>}<EOL>}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT><DEDENT>elif lctool == '<STR_LIT>':<EOL><INDENT>if (not forcereload and<EOL>'<STR_LIT>' in tempcpdict and<EOL>isinstance(tempcpdict['<STR_LIT>'], dict) and<EOL>'<STR_LIT>' in tempcpdict['<STR_LIT>'] and<EOL>isinstance(tempcpdict['<STR_LIT>']['<STR_LIT>'], dict)):<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, tempfpath)<EOL>)<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': (<EOL>tempcpdict['<STR_LIT>']['<STR_LIT>']<EOL>)<EOL>}<EOL>}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>else:<EOL><INDENT>lctoolfunction = CPTOOLMAP[lctool]['<STR_LIT>']<EOL>funcresults = yield self.executor.submit(<EOL>lctoolfunction,<EOL>*lctoolargs,<EOL>**lctoolkwargs<EOL>)<EOL>if not self.readonly:<EOL><INDENT>if ('<STR_LIT>' in tempcpdict and<EOL>isinstance(tempcpdict['<STR_LIT>'], dict)):<EOL><INDENT>if '<STR_LIT>' in tempcpdict['<STR_LIT>']:<EOL><INDENT>tempcpdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>funcresults<EOL>)<EOL><DEDENT>else:<EOL><INDENT>tempcpdict['<STR_LIT>'].update(<EOL>{'<STR_LIT>': funcresults}<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>tempcpdict['<STR_LIT>'] = {'<STR_LIT>':<EOL>funcresults}<EOL><DEDENT>savekwargs = {<EOL>'<STR_LIT>':tempfpath,<EOL>'<STR_LIT>':pickle.HIGHEST_PROTOCOL<EOL>}<EOL>savedcpf = yield self.executor.submit(<EOL>_write_checkplot_picklefile,<EOL>tempcpdict,<EOL>**savekwargs<EOL>)<EOL>LOGGER.info(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, savedcpf)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>resultdict['<STR_LIT:status>'] = '<STR_LIT:success>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':funcresults<EOL>}<EOL>}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT><DEDENT>elif lctool in ('<STR_LIT>','<STR_LIT>'):<EOL><INDENT>key1, key2 = resloc<EOL>if (not forcereload and<EOL>key1 in tempcpdict and<EOL>isinstance(tempcpdict[key1], dict) and<EOL>key2 in tempcpdict[key1] and<EOL>isinstance(tempcpdict[key1][key2], dict)):<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, tempfpath)<EOL>)<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>key1: {<EOL>key2: (<EOL>tempcpdict[key1][key2]<EOL>)<EOL>}<EOL>}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>else:<EOL><INDENT>lctoolfunction = CPTOOLMAP[lctool]['<STR_LIT>']<EOL>lctoolkwargs['<STR_LIT>'] = StrIO()<EOL>funcresults = yield self.executor.submit(<EOL>lctoolfunction,<EOL>*lctoolargs,<EOL>**lctoolkwargs<EOL>)<EOL>fitfd = funcresults['<STR_LIT>']<EOL>fitfd.seek(<NUM_LIT:0>)<EOL>fitbin = fitfd.read()<EOL>fitb64 = base64.b64encode(fitbin)<EOL>fitfd.close()<EOL>funcresults['<STR_LIT>'] = fitb64<EOL>if not self.readonly:<EOL><INDENT>if (key1 in tempcpdict and<EOL>isinstance(tempcpdict[key1], dict)):<EOL><INDENT>if key2 in tempcpdict[key1]:<EOL><INDENT>tempcpdict[key1][key2] = (<EOL>funcresults<EOL>)<EOL><DEDENT>else:<EOL><INDENT>tempcpdict[key1].update(<EOL>{key2: funcresults}<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>tempcpdict[key1] = {key2: funcresults}<EOL><DEDENT>savekwargs = {<EOL>'<STR_LIT>':tempfpath,<EOL>'<STR_LIT>':pickle.HIGHEST_PROTOCOL<EOL>}<EOL>savedcpf = yield self.executor.submit(<EOL>_write_checkplot_picklefile,<EOL>tempcpdict,<EOL>**savekwargs<EOL>)<EOL>LOGGER.info(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, savedcpf)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>fitreturndict = {'<STR_LIT>':fitb64}<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT:success>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>key1:{<EOL>key2:fitreturndict<EOL>}<EOL>}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT><DEDENT>elif lctool in ('<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'):<EOL><INDENT>key1, key2 = resloc<EOL>if (not forcereload and<EOL>key1 in tempcpdict and<EOL>isinstance(tempcpdict[key1], dict) and<EOL>key2 in tempcpdict[key1] and<EOL>isinstance(tempcpdict[key1][key2], dict)):<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, tempfpath)<EOL>)<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>phasedfitlc = tempcpdict[key1][key2]<EOL>fitresults = {<EOL>'<STR_LIT>':phasedfitlc['<STR_LIT>']['<STR_LIT>'],<EOL>'<STR_LIT>':phasedfitlc['<STR_LIT>']['<STR_LIT>'],<EOL>'<STR_LIT>':phasedfitlc['<STR_LIT>']['<STR_LIT>'],<EOL>'<STR_LIT>':phasedfitlc['<STR_LIT>'],<EOL>'<STR_LIT>':phasedfitlc['<STR_LIT>'],<EOL>'<STR_LIT>':phasedfitlc['<STR_LIT>'],<EOL>}<EOL>if ('<STR_LIT>' in phasedfitlc['<STR_LIT>']['<STR_LIT>'] and<EOL>phasedfitlc['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL>is not None):<EOL><INDENT>fitresults['<STR_LIT>'] = (<EOL>phasedfitlc['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL>)<EOL><DEDENT>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>key1: {<EOL>key2: (<EOL>fitresults<EOL>)<EOL>}<EOL>}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>else:<EOL><INDENT>lctoolfunction = CPTOOLMAP[lctool]['<STR_LIT>']<EOL>funcresults = yield self.executor.submit(<EOL>lctoolfunction,<EOL>*lctoolargs,<EOL>**lctoolkwargs<EOL>)<EOL>phasedlcargs = (None,<EOL>'<STR_LIT>',<EOL>-<NUM_LIT:1>,<EOL>cptimes,<EOL>cpmags,<EOL>cperrs,<EOL>lctoolargs[<NUM_LIT:3>],  <EOL>'<STR_LIT>')<EOL>phasedlckwargs = {<EOL>'<STR_LIT>':False,<EOL>'<STR_LIT>':lctoolkwargs['<STR_LIT>'],<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':funcresults<EOL>}<EOL>phasedlc = yield self.executor.submit(<EOL>_pkl_phased_magseries_plot,<EOL>*phasedlcargs,<EOL>**phasedlckwargs<EOL>)<EOL>if not self.readonly:<EOL><INDENT>if (key1 in tempcpdict and<EOL>isinstance(tempcpdict[key1], dict)):<EOL><INDENT>if key2 in tempcpdict[key1]:<EOL><INDENT>tempcpdict[key1][key2] = (<EOL>phasedlc<EOL>)<EOL><DEDENT>else:<EOL><INDENT>tempcpdict[key1].update(<EOL>{key2: phasedlc}<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>tempcpdict[key1] = {key2: phasedlc}<EOL><DEDENT>savekwargs = {<EOL>'<STR_LIT>':tempfpath,<EOL>'<STR_LIT>':pickle.HIGHEST_PROTOCOL<EOL>}<EOL>savedcpf = yield self.executor.submit(<EOL>_write_checkplot_picklefile,<EOL>tempcpdict,<EOL>**savekwargs<EOL>)<EOL>LOGGER.info(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(lctool, savedcpf)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>fitresults = {<EOL>'<STR_LIT>':phasedlc['<STR_LIT>']['<STR_LIT>'],<EOL>'<STR_LIT>':phasedlc['<STR_LIT>']['<STR_LIT>'],<EOL>'<STR_LIT>':phasedlc['<STR_LIT>']['<STR_LIT>'],<EOL>'<STR_LIT>':phasedlc['<STR_LIT>'],<EOL>'<STR_LIT>':phasedlc['<STR_LIT>'],<EOL>'<STR_LIT>':phasedlc['<STR_LIT>'],<EOL>}<EOL>if ('<STR_LIT>' in funcresults['<STR_LIT>'] and<EOL>funcresults['<STR_LIT>']['<STR_LIT>'] is not None):<EOL><INDENT>fitresults['<STR_LIT>'] = (<EOL>funcresults['<STR_LIT>']['<STR_LIT>']<EOL>)<EOL><DEDENT>resultdict['<STR_LIT:status>'] = '<STR_LIT:success>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>' %<EOL>lctool<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {<EOL>'<STR_LIT>':objectid,<EOL>key1:{<EOL>key2:fitresults<EOL>}<EOL>}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT><DEDENT>elif lctool == '<STR_LIT>':<EOL><INDENT>fitmethod, periodind = lctoolargs<EOL><DEDENT>elif lctool == '<STR_LIT>':<EOL><INDENT>if os.path.exists(tempfpath):<EOL><INDENT>os.remove(tempfpath)<EOL>LOGGER.warning('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(tempfpath, cpfpath))<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT:success>'<EOL><DEDENT>else:<EOL><INDENT>resultdict['<STR_LIT:status>'] = '<STR_LIT:error>'<EOL>LOGGER.warning('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(tempfpath, cpfpath))<EOL><DEDENT>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>'<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {'<STR_LIT>':cpobjectid}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>elif lctool == '<STR_LIT>':<EOL><INDENT>target = self.get_argument('<STR_LIT>',None)<EOL>if target is not None:<EOL><INDENT>target = xhtml_escape(target)<EOL>if (target not in CPTOOLMAP or<EOL>target == '<STR_LIT>' or<EOL>target == '<STR_LIT>' or<EOL>target == '<STR_LIT>' or<EOL>target == '<STR_LIT>'):<EOL><INDENT>LOGGER.error(\"<STR_LIT>\" % target)<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT:error>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>\"<STR_LIT>\" % target<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {'<STR_LIT>':cpobjectid}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>targetloc = CPTOOLMAP[target]['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGGER.error('<STR_LIT>' % lctool)<EOL>resultdict['<STR_LIT:status>'] = '<STR_LIT:error>'<EOL>resultdict['<STR_LIT:message>'] = (<EOL>'<STR_LIT>' % lctool<EOL>)<EOL>resultdict['<STR_LIT:result>'] = {'<STR_LIT>':cpobjectid}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGGER.error('<STR_LIT>' % self.cpfile)<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':\"<STR_LIT>\",<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':'<STR_LIT>',<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>", "docstring": "This handles a GET request to run a specified LC tool.\n\n        Parameters\n        ----------\n\n        cpfile : str\n            This is the checkplot file to run the tool on.\n\n        Returns\n        -------\n\n        str\n            Returns a JSON response.\n\n        Notes\n        -----\n\n        The URI structure is::\n\n            /tools/<cpfile>?[args]\n\n        where args are::\n\n            ?lctool=<lctool>&argkey1=argval1&argkey2=argval2&...\n\n            &forcereload=true <- if this is present, then reload values from\n            original checkplot.\n\n            &objectid=<objectid>\n\n        `lctool` is one of the strings below\n\n        Period search functions::\n\n            psearch-gls: run Lomb-Scargle with given params\n            psearch-bls: run BLS with given params\n            psearch-pdm: run phase dispersion minimization with given params\n            psearch-aov: run analysis-of-variance with given params\n            psearch-mav: run analysis-of-variance (multi-harm) with given params\n            psearch-acf: run ACF period search with given params\n            psearch-win: run spectral window function search with given params\n\n        Arguments recognized by all period-search functions are::\n\n            startp=XX\n            endp=XX\n            magsarefluxes=True|False\n            autofreq=True|False\n            stepsize=XX\n\n        Variability characterization functions::\n\n            var-varfeatures: gets the variability features from the checkplot or\n                             recalculates if they're not present\n\n            var-prewhiten: pre-whitens the light curve with a sinusoidal signal\n\n            var-masksig: masks a given phase location with given width from the\n                         light curve\n\n        Light curve manipulation functions ::\n\n            phasedlc-newplot: make phased LC with new provided period/epoch\n            lcfit-fourier: fit a Fourier function to the phased LC\n            lcfit-spline: fit a spline function to the phased LC\n            lcfit-legendre: fit a Legendre polynomial to the phased LC\n            lcfit-savgol: fit a Savitsky-Golay polynomial to the phased LC\n\n        FIXME: figure out how to cache the results of these functions\n        temporarily and save them back to the checkplot after we click on save\n        in the frontend.\n\n        TODO: look for a checkplot-blah-blah.pkl-cps-processing file in the same\n        place as the usual pickle file. if this exists and is newer than the pkl\n        file, load it instead. Or have a checkplotdict['cpservertemp'] item.", "id": "f14711:c4:m1"}
{"signature": "def initialize(self, currentdir, assetpath, cplist,<EOL>cplistfile, executor, readonly):", "body": "self.currentdir = currentdir<EOL>self.assetpath = assetpath<EOL>self.currentproject = cplist<EOL>self.cplistfile = cplistfile<EOL>self.executor = executor<EOL>self.readonly = readonly<EOL>", "docstring": "This handles initial setup of this `RequestHandler`.", "id": "f14711:c2:m0"}
{"signature": "@gen.coroutine<EOL><INDENT>def get(self):<DEDENT>", "body": "provided_key = self.get_argument('<STR_LIT:key>',default=None)<EOL>if not provided_key:<EOL><INDENT>LOGGER.error('<STR_LIT>')<EOL>retdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':('<STR_LIT>'<EOL>'<STR_LIT>'),<EOL>'<STR_LIT:result>':None,<EOL>'<STR_LIT>':True}<EOL>self.set_status(<NUM_LIT>)<EOL>self.write(retdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>else:<EOL><INDENT>provided_key = xhtml_escape(provided_key)<EOL>if not _time_independent_equals(provided_key,<EOL>self.secret):<EOL><INDENT>LOGGER.error('<STR_LIT>')<EOL>retdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':('<STR_LIT>'<EOL>'<STR_LIT>'),<EOL>'<STR_LIT:result>':None,<EOL>'<STR_LIT>':True}<EOL>self.set_status(<NUM_LIT>)<EOL>self.write(retdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT><DEDENT>LOGGER.info('<STR_LIT>')<EOL>checkplotfname = self.get_argument('<STR_LIT>', default=None)<EOL>if checkplotfname:<EOL><INDENT>try:<EOL><INDENT>cpfpath = xhtml_escape(<EOL>base64.b64decode(url_unescape(checkplotfname))<EOL>)<EOL><DEDENT>except Exception as e:<EOL><INDENT>msg = '<STR_LIT>'<EOL>LOGGER.error(msg)<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':msg,<EOL>'<STR_LIT:result>':None,<EOL>'<STR_LIT>':True}<EOL>self.set_status(<NUM_LIT>)<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>LOGGER.info('<STR_LIT>' % cpfpath)<EOL>if not os.path.exists(cpfpath):<EOL><INDENT>msg = \"<STR_LIT>\" % cpfpath<EOL>LOGGER.error(msg)<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':msg,<EOL>'<STR_LIT:result>':None,<EOL>'<STR_LIT>':True}<EOL>self.set_status(<NUM_LIT>)<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>cpdict = yield self.executor.submit(<EOL>_read_checkplot_picklefile, cpfpath<EOL>)<EOL>LOGGER.info('<STR_LIT>' % cpfpath)<EOL>objectid = cpdict['<STR_LIT>']<EOL>objectinfo = cpdict['<STR_LIT>']<EOL>varinfo = cpdict['<STR_LIT>']<EOL>if '<STR_LIT>' in cpdict:<EOL><INDENT>pfmethods = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>pfmethods = []<EOL>for pfm in PFMETHODS:<EOL><INDENT>if pfm in cpdict:<EOL><INDENT>pfmethods.append(pfm)<EOL><DEDENT><DEDENT><DEDENT>neighbors = []<EOL>if ('<STR_LIT>' in cpdict and<EOL>cpdict['<STR_LIT>'] is not None and<EOL>len(cpdict['<STR_LIT>'])) > <NUM_LIT:0>:<EOL><INDENT>nbrlist = cpdict['<STR_LIT>']<EOL>for nbr in nbrlist:<EOL><INDENT>if '<STR_LIT>' in nbr:<EOL><INDENT>nbrmagdiffs = nbr['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>nbrmagdiffs = None<EOL><DEDENT>if '<STR_LIT>' in nbr:<EOL><INDENT>nbrcolordiffs = nbr['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>nbrcolordiffs = None<EOL><DEDENT>thisnbrdict = {<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbrmagdiffs,<EOL>'<STR_LIT>':nbrcolordiffs<EOL>}<EOL>}<EOL>try:<EOL><INDENT>nbr_magseries = nbr['<STR_LIT>']['<STR_LIT>']<EOL>thisnbrdict['<STR_LIT>'] = nbr_magseries<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGGER.error(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% (nbr['<STR_LIT>'],<EOL>cpdict['<STR_LIT>'])<EOL>)<EOL><DEDENT>try:<EOL><INDENT>for pfm in pfmethods:<EOL><INDENT>if pfm in nbr:<EOL><INDENT>thisnbrdict[pfm] = {<EOL>'<STR_LIT>':nbr[pfm][<NUM_LIT:0>]['<STR_LIT>'],<EOL>'<STR_LIT>':nbr[pfm][<NUM_LIT:0>]['<STR_LIT>'],<EOL>'<STR_LIT>':nbr[pfm][<NUM_LIT:0>]['<STR_LIT>']<EOL>}<EOL><DEDENT><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGGER.error(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% (nbr['<STR_LIT>'],<EOL>cpdict['<STR_LIT>'])<EOL>)<EOL><DEDENT>neighbors.append(thisnbrdict)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>objectcomments = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>objectcomments = None<EOL><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>objectxmatch = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>objectxmatch = None<EOL><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>colormagdiagram = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>colormagdiagram = None<EOL><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>finderchart = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>finderchart = None<EOL><DEDENT>if ('<STR_LIT>' in cpdict and<EOL>isinstance(cpdict['<STR_LIT>'], dict) and<EOL>'<STR_LIT>' in cpdict['<STR_LIT>']):<EOL><INDENT>magseries = cpdict['<STR_LIT>']['<STR_LIT>']<EOL>time0 = cpdict['<STR_LIT>']['<STR_LIT>'].min()<EOL>magseries_ndet = cpdict['<STR_LIT>']['<STR_LIT>'].size<EOL><DEDENT>else:<EOL><INDENT>magseries = None<EOL>time0 = <NUM_LIT:0.0><EOL>magseries_ndet = <NUM_LIT:0><EOL>LOGGER.warning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if '<STR_LIT:status>' in cpdict:<EOL><INDENT>cpstatus = cpdict['<STR_LIT:status>']<EOL><DEDENT>else:<EOL><INDENT>cpstatus = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>uifilters = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>uifilters = {'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None}<EOL><DEDENT>resultdict = {<EOL>'<STR_LIT:status>':'<STR_LIT>',<EOL>'<STR_LIT:message>':'<STR_LIT>' % os.path.basename(cpfpath),<EOL>'<STR_LIT>':True,<EOL>'<STR_LIT:result>':{<EOL>'<STR_LIT>':'<STR_LIT>' % time0,<EOL>'<STR_LIT>':objectid,<EOL>'<STR_LIT>':objectinfo,<EOL>'<STR_LIT>':colormagdiagram,<EOL>'<STR_LIT>':objectcomments,<EOL>'<STR_LIT>':varinfo,<EOL>'<STR_LIT>':uifilters,<EOL>'<STR_LIT>':neighbors,<EOL>'<STR_LIT>':objectxmatch,<EOL>'<STR_LIT>':finderchart,<EOL>'<STR_LIT>':magseries,<EOL>'<STR_LIT>':magseries_ndet,<EOL>'<STR_LIT>':cpstatus,<EOL>'<STR_LIT>':pfmethods<EOL>}<EOL>}<EOL>for key in pfmethods:<EOL><INDENT>periodogram = cpdict[key]['<STR_LIT>']<EOL>if <NUM_LIT:0> in cpdict[key] and isinstance(cpdict[key][<NUM_LIT:0>], dict):<EOL><INDENT>phasedlc0plot = cpdict[key][<NUM_LIT:0>]['<STR_LIT>']<EOL>phasedlc0period = float(cpdict[key][<NUM_LIT:0>]['<STR_LIT>'])<EOL>phasedlc0epoch = float(cpdict[key][<NUM_LIT:0>]['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>phasedlc0plot = None<EOL>phasedlc0period = None<EOL>phasedlc0epoch = None<EOL><DEDENT>if (<NUM_LIT:0> in cpdict[key] and<EOL>isinstance(cpdict[key][<NUM_LIT:0>], dict) and<EOL>'<STR_LIT>' in cpdict[key][<NUM_LIT:0>] and<EOL>isinstance(cpdict[key][<NUM_LIT:0>]['<STR_LIT>'], dict)):<EOL><INDENT>phasedlc0fit = {<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:0>][<EOL>'<STR_LIT>'<EOL>]['<STR_LIT>']['<STR_LIT>'] if<EOL>'<STR_LIT>' in<EOL>cpdict[key][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>'] else None<EOL>)<EOL>}<EOL><DEDENT>else:<EOL><INDENT>phasedlc0fit = None<EOL><DEDENT>if <NUM_LIT:1> in cpdict[key] and isinstance(cpdict[key][<NUM_LIT:1>], dict):<EOL><INDENT>phasedlc1plot = cpdict[key][<NUM_LIT:1>]['<STR_LIT>']<EOL>phasedlc1period = float(cpdict[key][<NUM_LIT:1>]['<STR_LIT>'])<EOL>phasedlc1epoch = float(cpdict[key][<NUM_LIT:1>]['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>phasedlc1plot = None<EOL>phasedlc1period = None<EOL>phasedlc1epoch = None<EOL><DEDENT>if (<NUM_LIT:1> in cpdict[key] and<EOL>isinstance(cpdict[key][<NUM_LIT:1>], dict) and<EOL>'<STR_LIT>' in cpdict[key][<NUM_LIT:1>] and<EOL>isinstance(cpdict[key][<NUM_LIT:1>]['<STR_LIT>'], dict)):<EOL><INDENT>phasedlc1fit = {<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:1>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:1>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:1>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:1>][<EOL>'<STR_LIT>'<EOL>]['<STR_LIT>']['<STR_LIT>'] if<EOL>'<STR_LIT>' in<EOL>cpdict[key][<NUM_LIT:1>]['<STR_LIT>']['<STR_LIT>'] else None<EOL>)<EOL>}<EOL><DEDENT>else:<EOL><INDENT>phasedlc1fit = None<EOL><DEDENT>if <NUM_LIT:2> in cpdict[key] and isinstance(cpdict[key][<NUM_LIT:2>], dict):<EOL><INDENT>phasedlc2plot = cpdict[key][<NUM_LIT:2>]['<STR_LIT>']<EOL>phasedlc2period = float(cpdict[key][<NUM_LIT:2>]['<STR_LIT>'])<EOL>phasedlc2epoch = float(cpdict[key][<NUM_LIT:2>]['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>phasedlc2plot = None<EOL>phasedlc2period = None<EOL>phasedlc2epoch = None<EOL><DEDENT>if (<NUM_LIT:2> in cpdict[key] and<EOL>isinstance(cpdict[key][<NUM_LIT:2>], dict) and<EOL>'<STR_LIT>' in cpdict[key][<NUM_LIT:2>] and<EOL>isinstance(cpdict[key][<NUM_LIT:2>]['<STR_LIT>'], dict)):<EOL><INDENT>phasedlc2fit = {<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:2>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:2>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:2>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:2>][<EOL>'<STR_LIT>'<EOL>]['<STR_LIT>']['<STR_LIT>'] if<EOL>'<STR_LIT>' in<EOL>cpdict[key][<NUM_LIT:2>]['<STR_LIT>']['<STR_LIT>'] else None<EOL>)<EOL>}<EOL><DEDENT>else:<EOL><INDENT>phasedlc2fit = None<EOL><DEDENT>resultdict['<STR_LIT:result>'][key] = {<EOL>'<STR_LIT>':cpdict[key]['<STR_LIT>'],<EOL>'<STR_LIT>':periodogram,<EOL>'<STR_LIT>':cpdict[key]['<STR_LIT>'],<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':phasedlc0plot,<EOL>'<STR_LIT>':phasedlc0period,<EOL>'<STR_LIT>':phasedlc0epoch,<EOL>'<STR_LIT>':phasedlc0fit,<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':phasedlc1plot,<EOL>'<STR_LIT>':phasedlc1period,<EOL>'<STR_LIT>':phasedlc1epoch,<EOL>'<STR_LIT>':phasedlc1fit,<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':phasedlc2plot,<EOL>'<STR_LIT>':phasedlc2period,<EOL>'<STR_LIT>':phasedlc2epoch,<EOL>'<STR_LIT>':phasedlc2fit,<EOL>},<EOL>}<EOL><DEDENT>self.set_header('<STR_LIT:Content-Type>','<STR_LIT>')<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>else:<EOL><INDENT>LOGGER.error('<STR_LIT>')<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':\"<STR_LIT>\",<EOL>'<STR_LIT>':True,<EOL>'<STR_LIT:result>':None}<EOL>self.status(<NUM_LIT>)<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>", "docstring": "This handles GET requests.\n\n        Returns the requested checkplot pickle's information as JSON.\n\n        Requires a pre-shared secret `key` argument for the operation to\n        complete successfully. This is obtained from a command-line argument.", "id": "f14711:c5:m1"}
{"signature": "@gen.coroutine<EOL><INDENT>def get(self, checkplotfname):<DEDENT>", "body": "if checkplotfname:<EOL><INDENT>self.checkplotfname = xhtml_escape(<EOL>base64.b64decode(url_unescape(checkplotfname))<EOL>)<EOL>if self.checkplotfname in self.currentproject['<STR_LIT>']:<EOL><INDENT>cpfpath = os.path.join(<EOL>os.path.abspath(os.path.dirname(self.cplistfile)),<EOL>self.checkplotfname<EOL>)<EOL>LOGGER.info('<STR_LIT>' % cpfpath)<EOL>if not os.path.exists(cpfpath):<EOL><INDENT>msg = \"<STR_LIT>\" % cpfpath<EOL>LOGGER.error(msg)<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':msg,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>raise tornado.web.Finish()<EOL><DEDENT>cpdict = yield self.executor.submit(<EOL>_read_checkplot_picklefile, cpfpath<EOL>)<EOL>LOGGER.info('<STR_LIT>' % cpfpath)<EOL>objectid = cpdict['<STR_LIT>']<EOL>objectinfo = cpdict['<STR_LIT>']<EOL>varinfo = cpdict['<STR_LIT>']<EOL>if '<STR_LIT>' in cpdict:<EOL><INDENT>pfmethods = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>pfmethods = []<EOL>for pfm in PFMETHODS:<EOL><INDENT>if pfm in cpdict:<EOL><INDENT>pfmethods.append(pfm)<EOL><DEDENT><DEDENT><DEDENT>neighbors = []<EOL>if ('<STR_LIT>' in cpdict and<EOL>cpdict['<STR_LIT>'] is not None and<EOL>len(cpdict['<STR_LIT>'])) > <NUM_LIT:0>:<EOL><INDENT>nbrlist = cpdict['<STR_LIT>']<EOL>for nbr in nbrlist:<EOL><INDENT>if '<STR_LIT>' in nbr:<EOL><INDENT>nbrmagdiffs = nbr['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>nbrmagdiffs = None<EOL><DEDENT>if '<STR_LIT>' in nbr:<EOL><INDENT>nbrcolordiffs = nbr['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>nbrcolordiffs = None<EOL><DEDENT>thisnbrdict = {<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbr['<STR_LIT>'],<EOL>'<STR_LIT>':nbrmagdiffs,<EOL>'<STR_LIT>':nbrcolordiffs<EOL>}<EOL>}<EOL>try:<EOL><INDENT>nbr_magseries = nbr['<STR_LIT>']['<STR_LIT>']<EOL>thisnbrdict['<STR_LIT>'] = nbr_magseries<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGGER.error(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% (nbr['<STR_LIT>'],<EOL>cpdict['<STR_LIT>'])<EOL>)<EOL><DEDENT>try:<EOL><INDENT>for pfm in pfmethods:<EOL><INDENT>if pfm in nbr:<EOL><INDENT>thisnbrdict[pfm] = {<EOL>'<STR_LIT>':nbr[pfm][<NUM_LIT:0>]['<STR_LIT>'],<EOL>'<STR_LIT>':nbr[pfm][<NUM_LIT:0>]['<STR_LIT>'],<EOL>'<STR_LIT>':nbr[pfm][<NUM_LIT:0>]['<STR_LIT>']<EOL>}<EOL><DEDENT><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGGER.error(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% (nbr['<STR_LIT>'],<EOL>cpdict['<STR_LIT>'])<EOL>)<EOL><DEDENT>neighbors.append(thisnbrdict)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>objectcomments = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>objectcomments = None<EOL><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>objectxmatch = cpdict['<STR_LIT>']<EOL>for xmcat in objectxmatch:<EOL><INDENT>if isinstance(objectxmatch[xmcat]['<STR_LIT:info>'], dict):<EOL><INDENT>xminfo = objectxmatch[xmcat]['<STR_LIT:info>']<EOL>for xmek in xminfo:<EOL><INDENT>if (isinstance(xminfo[xmek], float) and<EOL>(not np.isfinite(xminfo[xmek]))):<EOL><INDENT>xminfo[xmek] = None<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>objectxmatch = None<EOL><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>colormagdiagram = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>colormagdiagram = None<EOL><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>finderchart = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>finderchart = None<EOL><DEDENT>if ('<STR_LIT>' in cpdict and<EOL>isinstance(cpdict['<STR_LIT>'], dict) and<EOL>'<STR_LIT>' in cpdict['<STR_LIT>']):<EOL><INDENT>magseries = cpdict['<STR_LIT>']['<STR_LIT>']<EOL>time0 = cpdict['<STR_LIT>']['<STR_LIT>'].min()<EOL>magseries_ndet = cpdict['<STR_LIT>']['<STR_LIT>'].size<EOL><DEDENT>else:<EOL><INDENT>magseries = None<EOL>time0 = <NUM_LIT:0.0><EOL>magseries_ndet = <NUM_LIT:0><EOL>LOGGER.warning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>if '<STR_LIT:status>' in cpdict:<EOL><INDENT>cpstatus = cpdict['<STR_LIT:status>']<EOL><DEDENT>else:<EOL><INDENT>cpstatus = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' in cpdict:<EOL><INDENT>uifilters = cpdict['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>uifilters = {'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None}<EOL><DEDENT>resultdict = {<EOL>'<STR_LIT:status>':'<STR_LIT>',<EOL>'<STR_LIT:message>':'<STR_LIT>' % self.checkplotfname,<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':{<EOL>'<STR_LIT>':'<STR_LIT>' % time0,<EOL>'<STR_LIT>':objectid,<EOL>'<STR_LIT>':objectinfo,<EOL>'<STR_LIT>':colormagdiagram,<EOL>'<STR_LIT>':objectcomments,<EOL>'<STR_LIT>':varinfo,<EOL>'<STR_LIT>':uifilters,<EOL>'<STR_LIT>':neighbors,<EOL>'<STR_LIT>':objectxmatch,<EOL>'<STR_LIT>':finderchart,<EOL>'<STR_LIT>':magseries,<EOL>'<STR_LIT>':magseries_ndet,<EOL>'<STR_LIT>':cpstatus,<EOL>'<STR_LIT>':pfmethods<EOL>}<EOL>}<EOL>for key in resultdict['<STR_LIT:result>']['<STR_LIT>']:<EOL><INDENT>if (isinstance(resultdict['<STR_LIT:result>']['<STR_LIT>'][key],<EOL>(float, np.float64, np.float_)) and<EOL>(not np.isfinite(resultdict['<STR_LIT:result>'][<EOL>'<STR_LIT>'<EOL>][key]))):<EOL><INDENT>resultdict['<STR_LIT:result>']['<STR_LIT>'][key] = None<EOL><DEDENT>elif (isinstance(resultdict['<STR_LIT:result>']['<STR_LIT>'][key],<EOL>ndarray)):<EOL><INDENT>thisval = resultdict['<STR_LIT:result>']['<STR_LIT>'][key]<EOL>thisval = thisval.tolist()<EOL>for i, v in enumerate(thisval):<EOL><INDENT>if (isinstance(v,(float, np.float64, np.float_)) and<EOL>(not(np.isfinite(v)))):<EOL><INDENT>thisval[i] = None<EOL><DEDENT><DEDENT>resultdict['<STR_LIT:result>']['<STR_LIT>'][key] = thisval<EOL><DEDENT><DEDENT>for key in resultdict['<STR_LIT:result>']['<STR_LIT>']:<EOL><INDENT>if (isinstance(<EOL>resultdict['<STR_LIT:result>']['<STR_LIT>'][key],<EOL>(float, np.float64, np.float_)) and<EOL>(not np.isfinite(<EOL>resultdict['<STR_LIT:result>']['<STR_LIT>'][key]<EOL>))):<EOL><INDENT>resultdict['<STR_LIT:result>']['<STR_LIT>'][key] = None<EOL><DEDENT>elif (isinstance(<EOL>resultdict['<STR_LIT:result>']['<STR_LIT>'][key],<EOL>ndarray)):<EOL><INDENT>thisval = (<EOL>resultdict['<STR_LIT:result>']['<STR_LIT>'][key]<EOL>)<EOL>thisval = thisval.tolist()<EOL>for i, v in enumerate(thisval):<EOL><INDENT>if (isinstance(v,(float, np.float64, np.float_)) and<EOL>(not(np.isfinite(v)))):<EOL><INDENT>thisval[i] = None<EOL><DEDENT><DEDENT>resultdict['<STR_LIT:result>']['<STR_LIT>'][key] = (<EOL>thisval<EOL>)<EOL><DEDENT><DEDENT>if ('<STR_LIT>' in resultdict['<STR_LIT:result>']['<STR_LIT>'] and<EOL>isinstance(resultdict['<STR_LIT:result>']['<STR_LIT>']['<STR_LIT>'],<EOL>dict)):<EOL><INDENT>for key in resultdict['<STR_LIT:result>']['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>if (isinstance(<EOL>resultdict[<EOL>'<STR_LIT:result>'<EOL>]['<STR_LIT>']['<STR_LIT>'][key],<EOL>(float, np.float64, np.float_)) and<EOL>(not np.isfinite(<EOL>resultdict[<EOL>'<STR_LIT:result>'<EOL>]['<STR_LIT>']['<STR_LIT>'][key]))):<EOL><INDENT>resultdict[<EOL>'<STR_LIT:result>'<EOL>]['<STR_LIT>']['<STR_LIT>'][key] = None<EOL><DEDENT>elif (isinstance(<EOL>resultdict[<EOL>'<STR_LIT:result>'<EOL>]['<STR_LIT>']['<STR_LIT>'][key],<EOL>ndarray)):<EOL><INDENT>thisval = (<EOL>resultdict['<STR_LIT:result>']['<STR_LIT>']['<STR_LIT>'][key]<EOL>)<EOL>thisval = thisval.tolist()<EOL>for i, v in enumerate(thisval):<EOL><INDENT>if (isinstance(v,(float,<EOL>np.float64,<EOL>np.float_)) and<EOL>(not(np.isfinite(v)))):<EOL><INDENT>thisval[i] = None<EOL><DEDENT><DEDENT>resultdict['<STR_LIT:result>']['<STR_LIT>']['<STR_LIT>'][key] = (<EOL>thisval<EOL>)<EOL><DEDENT><DEDENT><DEDENT>for key in pfmethods:<EOL><INDENT>periodogram = cpdict[key]['<STR_LIT>']<EOL>if <NUM_LIT:0> in cpdict[key] and isinstance(cpdict[key][<NUM_LIT:0>], dict):<EOL><INDENT>phasedlc0plot = cpdict[key][<NUM_LIT:0>]['<STR_LIT>']<EOL>phasedlc0period = float(cpdict[key][<NUM_LIT:0>]['<STR_LIT>'])<EOL>phasedlc0epoch = float(cpdict[key][<NUM_LIT:0>]['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>phasedlc0plot = None<EOL>phasedlc0period = None<EOL>phasedlc0epoch = None<EOL><DEDENT>if (<NUM_LIT:0> in cpdict[key] and<EOL>isinstance(cpdict[key][<NUM_LIT:0>], dict) and<EOL>'<STR_LIT>' in cpdict[key][<NUM_LIT:0>] and<EOL>isinstance(cpdict[key][<NUM_LIT:0>]['<STR_LIT>'], dict)):<EOL><INDENT>phasedlc0fit = {<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:0>][<EOL>'<STR_LIT>'<EOL>]['<STR_LIT>']['<STR_LIT>'] if<EOL>'<STR_LIT>' in<EOL>cpdict[key][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>'] else None<EOL>)<EOL>}<EOL><DEDENT>else:<EOL><INDENT>phasedlc0fit = None<EOL><DEDENT>if <NUM_LIT:1> in cpdict[key] and isinstance(cpdict[key][<NUM_LIT:1>], dict):<EOL><INDENT>phasedlc1plot = cpdict[key][<NUM_LIT:1>]['<STR_LIT>']<EOL>phasedlc1period = float(cpdict[key][<NUM_LIT:1>]['<STR_LIT>'])<EOL>phasedlc1epoch = float(cpdict[key][<NUM_LIT:1>]['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>phasedlc1plot = None<EOL>phasedlc1period = None<EOL>phasedlc1epoch = None<EOL><DEDENT>if (<NUM_LIT:1> in cpdict[key] and<EOL>isinstance(cpdict[key][<NUM_LIT:1>], dict) and<EOL>'<STR_LIT>' in cpdict[key][<NUM_LIT:1>] and<EOL>isinstance(cpdict[key][<NUM_LIT:1>]['<STR_LIT>'], dict)):<EOL><INDENT>phasedlc1fit = {<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:1>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:1>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:1>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:1>][<EOL>'<STR_LIT>'<EOL>]['<STR_LIT>']['<STR_LIT>'] if<EOL>'<STR_LIT>' in<EOL>cpdict[key][<NUM_LIT:1>]['<STR_LIT>']['<STR_LIT>'] else None<EOL>)<EOL>}<EOL><DEDENT>else:<EOL><INDENT>phasedlc1fit = None<EOL><DEDENT>if <NUM_LIT:2> in cpdict[key] and isinstance(cpdict[key][<NUM_LIT:2>], dict):<EOL><INDENT>phasedlc2plot = cpdict[key][<NUM_LIT:2>]['<STR_LIT>']<EOL>phasedlc2period = float(cpdict[key][<NUM_LIT:2>]['<STR_LIT>'])<EOL>phasedlc2epoch = float(cpdict[key][<NUM_LIT:2>]['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>phasedlc2plot = None<EOL>phasedlc2period = None<EOL>phasedlc2epoch = None<EOL><DEDENT>if (<NUM_LIT:2> in cpdict[key] and<EOL>isinstance(cpdict[key][<NUM_LIT:2>], dict) and<EOL>'<STR_LIT>' in cpdict[key][<NUM_LIT:2>] and<EOL>isinstance(cpdict[key][<NUM_LIT:2>]['<STR_LIT>'], dict)):<EOL><INDENT>phasedlc2fit = {<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:2>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:2>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:2>]['<STR_LIT>']['<STR_LIT>']<EOL>),<EOL>'<STR_LIT>':(<EOL>cpdict[key][<NUM_LIT:2>][<EOL>'<STR_LIT>'<EOL>]['<STR_LIT>']['<STR_LIT>'] if<EOL>'<STR_LIT>' in<EOL>cpdict[key][<NUM_LIT:2>]['<STR_LIT>']['<STR_LIT>'] else None<EOL>)<EOL>}<EOL><DEDENT>else:<EOL><INDENT>phasedlc2fit = None<EOL><DEDENT>resultdict['<STR_LIT:result>'][key] = {<EOL>'<STR_LIT>':cpdict[key]['<STR_LIT>'],<EOL>'<STR_LIT>':periodogram,<EOL>'<STR_LIT>':cpdict[key]['<STR_LIT>'],<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':phasedlc0plot,<EOL>'<STR_LIT>':phasedlc0period,<EOL>'<STR_LIT>':phasedlc0epoch,<EOL>'<STR_LIT>':phasedlc0fit,<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':phasedlc1plot,<EOL>'<STR_LIT>':phasedlc1period,<EOL>'<STR_LIT>':phasedlc1epoch,<EOL>'<STR_LIT>':phasedlc1fit,<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':phasedlc2plot,<EOL>'<STR_LIT>':phasedlc2period,<EOL>'<STR_LIT>':phasedlc2epoch,<EOL>'<STR_LIT>':phasedlc2fit,<EOL>},<EOL>}<EOL><DEDENT>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT>else:<EOL><INDENT>LOGGER.error('<STR_LIT>' % self.checkplotfname)<EOL>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':\"<STR_LIT>\",<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL>self.finish()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>resultdict = {'<STR_LIT:status>':'<STR_LIT:error>',<EOL>'<STR_LIT:message>':'<STR_LIT>',<EOL>'<STR_LIT>':self.readonly,<EOL>'<STR_LIT:result>':None}<EOL>self.write(resultdict)<EOL><DEDENT>", "docstring": "This handles GET requests to serve a specific checkplot pickle.\n\n        This is an AJAX endpoint; returns JSON that gets converted by the\n        frontend into things to render.", "id": "f14711:c2:m1"}
{"signature": "def checkplot_infokey_worker(task):", "body": "cpf, keys = task<EOL>cpd = _read_checkplot_picklefile(cpf)<EOL>resultkeys = []<EOL>for k in keys:<EOL><INDENT>try:<EOL><INDENT>resultkeys.append(_dict_get(cpd, k))<EOL><DEDENT>except Exception as e:<EOL><INDENT>resultkeys.append(np.nan)<EOL><DEDENT><DEDENT>return resultkeys<EOL>", "docstring": "This gets the required keys from the requested file.\n\n    Parameters\n    ----------\n\n    task : tuple\n        Task is a two element tuple::\n\n        - task[0] is the dict to work on\n\n        - task[1] is a list of lists of str indicating all the key address to\n          extract items from the dict for\n\n    Returns\n    -------\n\n    list\n        This is a list of all of the items at the requested key addresses.", "id": "f14713:m1"}
{"signature": "def collect_nonperiodic_features(<EOL>featuresdir,<EOL>magcol,<EOL>outfile,<EOL>pklglob='<STR_LIT>',<EOL>featurestouse=NONPERIODIC_FEATURES_TO_COLLECT,<EOL>maxobjects=None,<EOL>labeldict=None,<EOL>labeltype='<STR_LIT>',<EOL>):", "body": "<EOL>pklist = glob.glob(os.path.join(featuresdir, pklglob))<EOL>if maxobjects:<EOL><INDENT>pklist = pklist[:maxobjects]<EOL><DEDENT>if TQDM:<EOL><INDENT>listiterator = tqdm(pklist)<EOL><DEDENT>else:<EOL><INDENT>listiterator = pklist<EOL><DEDENT>feature_dict = {'<STR_LIT>':[],'<STR_LIT>':magcol, '<STR_LIT>':[]}<EOL>LOGINFO('<STR_LIT>' % magcol)<EOL>for pkl in listiterator:<EOL><INDENT>with open(pkl,'<STR_LIT:rb>') as infd:<EOL><INDENT>varf = pickle.load(infd)<EOL><DEDENT>objectid = varf['<STR_LIT>']<EOL>if objectid not in feature_dict['<STR_LIT>']:<EOL><INDENT>feature_dict['<STR_LIT>'].append(objectid)<EOL><DEDENT>thisfeatures = varf[magcol]<EOL>if featurestouse and len(featurestouse) > <NUM_LIT:0>:<EOL><INDENT>featurestoget = featurestouse<EOL><DEDENT>else:<EOL><INDENT>featurestoget = NONPERIODIC_FEATURES_TO_COLLECT<EOL><DEDENT>for feature in featurestoget:<EOL><INDENT>if ((feature not in feature_dict['<STR_LIT>']) and<EOL>(feature in thisfeatures)):<EOL><INDENT>feature_dict['<STR_LIT>'].append(feature)<EOL>feature_dict[feature] = []<EOL><DEDENT>if feature in thisfeatures:<EOL><INDENT>feature_dict[feature].append(<EOL>thisfeatures[feature]<EOL>)<EOL><DEDENT><DEDENT><DEDENT>for feat in feature_dict['<STR_LIT>']:<EOL><INDENT>feature_dict[feat] = np.array(feature_dict[feat])<EOL><DEDENT>feature_dict['<STR_LIT>'] = np.array(feature_dict['<STR_LIT>'])<EOL>feature_array = np.column_stack([feature_dict[feat] for feat in<EOL>feature_dict['<STR_LIT>']])<EOL>feature_dict['<STR_LIT>'] = feature_array<EOL>if isinstance(labeldict, dict):<EOL><INDENT>labelarray = np.zeros(feature_dict['<STR_LIT>'].size, dtype=np.int64)<EOL>for ind, objectid in enumerate(feature_dict['<STR_LIT>']):<EOL><INDENT>if objectid in labeldict:<EOL><INDENT>if labeltype == '<STR_LIT>':<EOL><INDENT>if labeldict[objectid]:<EOL><INDENT>labelarray[ind] = <NUM_LIT:1><EOL><DEDENT><DEDENT>elif labeltype == '<STR_LIT>':<EOL><INDENT>labelarray[ind] = labeldict[objectid]<EOL><DEDENT><DEDENT><DEDENT>feature_dict['<STR_LIT>'] = labelarray<EOL><DEDENT>feature_dict['<STR_LIT>'] = {'<STR_LIT>':pklglob,<EOL>'<STR_LIT>':featurestouse,<EOL>'<STR_LIT>':maxobjects,<EOL>'<STR_LIT>':labeltype}<EOL>with open(outfile,'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(feature_dict, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>return feature_dict<EOL>", "docstring": "This collects variability features into arrays for use with the classifer.\n\n    Parameters\n    ----------\n\n    featuresdir : str\n        This is the directory where all the varfeatures pickles are. Use\n        `pklglob` to specify the glob to search for. The `varfeatures` pickles\n        contain objectids, a light curve magcol, and features as dict\n        key-vals. The :py:mod:`astrobase.lcproc.lcvfeatures` module can be used\n        to produce these.\n\n    magcol : str\n        This is the key in each varfeatures pickle corresponding to the magcol\n        of the light curve the variability features were extracted from.\n\n    outfile : str\n        This is the filename of the output pickle that will be written\n        containing a dict of all the features extracted into np.arrays.\n\n    pklglob : str\n        This is the UNIX file glob to use to search for varfeatures pickle files\n        in `featuresdir`.\n\n    featurestouse : list of str\n        Each varfeatures pickle can contain any combination of non-periodic,\n        stellar, and periodic features; these must have the same names as\n        elements in the list of strings provided in `featurestouse`.  This tries\n        to get all the features listed in NONPERIODIC_FEATURES_TO_COLLECT by\n        default. If `featurestouse` is provided as a list, gets only the\n        features listed in this kwarg instead.\n\n    maxobjects : int or None\n        The controls how many pickles from the featuresdir to process. If None,\n        will process all varfeatures pickles.\n\n    labeldict : dict or None\n        If this is provided, it must be a dict with the following key:val list::\n\n            '<objectid>':<label value>\n\n        for each objectid collected from the varfeatures pickles. This will turn\n        the collected information into a training set for classifiers.\n\n        Example: to carry out non-periodic variable feature collection of fake\n        LCS prepared by :py:mod:`astrobase.fakelcs.generation`, use the value\n        of the 'isvariable' dict elem from the `fakelcs-info.pkl` here, like\n        so::\n\n            labeldict={x:y for x,y in zip(fakelcinfo['objectid'],\n                                          fakelcinfo['isvariable'])}\n\n    labeltype : {'binary', 'classes'}\n        This is either 'binary' or 'classes' for binary/multi-class\n        classification respectively.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict with all of the features collected into np.arrays,\n        ready to use as input to a scikit-learn classifier.", "id": "f14714:m1"}
{"signature": "def apply_rf_classifier(classifier,<EOL>varfeaturesdir,<EOL>outpickle,<EOL>maxobjects=None):", "body": "if isinstance(classifier,str) and os.path.exists(classifier):<EOL><INDENT>with open(classifier,'<STR_LIT:rb>') as infd:<EOL><INDENT>clfdict = pickle.load(infd)<EOL><DEDENT><DEDENT>elif isinstance(classifier, dict):<EOL><INDENT>clfdict = classifier<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>if '<STR_LIT>' not in clfdict:<EOL><INDENT>LOGERROR(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % varfeaturesdir)<EOL>return None<EOL><DEDENT>featurestouse = clfdict['<STR_LIT>']<EOL>pklglob = clfdict['<STR_LIT>']['<STR_LIT>']<EOL>magcol = clfdict['<STR_LIT>']<EOL>featfile = os.path.join(<EOL>os.path.dirname(outpickle),<EOL>'<STR_LIT>'<EOL>)<EOL>features = collect_nonperiodic_features(<EOL>varfeaturesdir,<EOL>magcol,<EOL>featfile,<EOL>pklglob=pklglob,<EOL>featurestouse=featurestouse,<EOL>maxobjects=maxobjects<EOL>)<EOL>bestclf = clfdict['<STR_LIT>']<EOL>predicted_labels = bestclf.predict(features['<STR_LIT>'])<EOL>predicted_label_probs = bestclf.predict_proba(<EOL>features['<STR_LIT>']<EOL>)<EOL>outdict = {<EOL>'<STR_LIT>':features,<EOL>'<STR_LIT>':featfile,<EOL>'<STR_LIT>':clfdict,<EOL>'<STR_LIT>':predicted_labels,<EOL>'<STR_LIT>':predicted_label_probs,<EOL>}<EOL>with open(outpickle,'<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>return outdict<EOL>", "docstring": "This applys an RF classifier trained using `train_rf_classifier`\n    to varfeatures pickles in `varfeaturesdir`.\n\n    Parameters\n    ----------\n\n    classifier : dict or str\n        This is the output dict or pickle created by `get_rf_classifier`. This\n        will contain a `features_name` key that will be used to collect the same\n        features used to train the classifier from the varfeatures pickles in\n        varfeaturesdir.\n\n    varfeaturesdir : str\n        The directory containing the varfeatures pickles for objects that will\n        be classified by the trained `classifier`.\n\n    outpickle : str\n        This is a filename for the pickle that will be written containing the\n        result dict from this function.\n\n    maxobjects : int\n        This sets the number of objects to process in `varfeaturesdir`.\n\n    Returns\n    -------\n\n    dict\n        The classification results after running the trained `classifier` as\n        returned as a dict. This contains predicted labels and their prediction\n        probabilities.", "id": "f14714:m3"}
{"signature": "def plot_training_results(classifier,<EOL>classlabels,<EOL>outfile):", "body": "if isinstance(classifier,str) and os.path.exists(classifier):<EOL><INDENT>with open(classifier,'<STR_LIT:rb>') as infd:<EOL><INDENT>clfdict = pickle.load(infd)<EOL><DEDENT><DEDENT>elif isinstance(classifier, dict):<EOL><INDENT>clfdict = classifier<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>confmatrix = clfdict['<STR_LIT>']<EOL>overall_feature_importances = clfdict[<EOL>'<STR_LIT>'<EOL>].feature_importances_<EOL>feature_importances_per_tree = np.array([<EOL>tree.feature_importances_<EOL>for tree in clfdict['<STR_LIT>'].estimators_<EOL>])<EOL>stdev_feature_importances = np.std(feature_importances_per_tree,axis=<NUM_LIT:0>)<EOL>feature_names = np.array(clfdict['<STR_LIT>'])<EOL>plt.figure(figsize=(<NUM_LIT>*<NUM_LIT>,<NUM_LIT>))<EOL>plt.subplot(<NUM_LIT>)<EOL>classes = np.array(classlabels)<EOL>plt.imshow(confmatrix, interpolation='<STR_LIT>', cmap=plt.cm.Blues)<EOL>tick_marks = np.arange(len(classes))<EOL>plt.xticks(tick_marks, classes)<EOL>plt.yticks(tick_marks, classes)<EOL>plt.title('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.xlabel('<STR_LIT>')<EOL>thresh = confmatrix.max() / <NUM_LIT><EOL>for i, j in itertools.product(range(confmatrix.shape[<NUM_LIT:0>]),<EOL>range(confmatrix.shape[<NUM_LIT:1>])):<EOL><INDENT>plt.text(j, i, confmatrix[i, j],<EOL>horizontalalignment=\"<STR_LIT>\",<EOL>color=\"<STR_LIT>\" if confmatrix[i, j] > thresh else \"<STR_LIT>\")<EOL><DEDENT>plt.subplot(<NUM_LIT>)<EOL>features = np.array(feature_names)<EOL>sorted_ind = np.argsort(overall_feature_importances)[::-<NUM_LIT:1>]<EOL>features = features[sorted_ind]<EOL>feature_names = feature_names[sorted_ind]<EOL>overall_feature_importances = overall_feature_importances[sorted_ind]<EOL>stdev_feature_importances = stdev_feature_importances[sorted_ind]<EOL>plt.bar(np.arange(<NUM_LIT:0>,features.size),<EOL>overall_feature_importances,<EOL>yerr=stdev_feature_importances,<EOL>width=<NUM_LIT>,<EOL>color='<STR_LIT>')<EOL>plt.xticks(np.arange(<NUM_LIT:0>,features.size),<EOL>features,<EOL>rotation=<NUM_LIT>)<EOL>plt.yticks([<NUM_LIT:0.0>,<NUM_LIT:0.1>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT:0.5>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT:1.0>])<EOL>plt.xlim(-<NUM_LIT>, features.size - <NUM_LIT:1.0> + <NUM_LIT>)<EOL>plt.ylim(<NUM_LIT:0.0>,<NUM_LIT>)<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL>plt.subplots_adjust(wspace=<NUM_LIT:0.1>)<EOL>plt.savefig(outfile,<EOL>bbox_inches='<STR_LIT>',<EOL>dpi=<NUM_LIT:100>)<EOL>plt.close('<STR_LIT:all>')<EOL>return outfile<EOL>", "docstring": "This plots the training results from the classifier run on the training\n    set.\n\n    - plots the confusion matrix\n\n    - plots the feature importances\n\n    - FIXME: plot the learning curves too, see:\n      http://scikit-learn.org/stable/modules/learning_curve.html\n\n    Parameters\n    ----------\n\n    classifier : dict or str\n        This is the output dict or pickle created by `get_rf_classifier`\n        containing the trained classifier.\n\n    classlabels : list of str\n        This contains all of the class labels for the current classification\n        problem.\n\n    outfile : str\n        This is the filename where the plots will be written.\n\n    Returns\n    -------\n\n    str\n        The path to the generated plot file.", "id": "f14714:m4"}
{"signature": "def mdwarf_subtype_from_sdsscolor(ri_color, iz_color):", "body": "<EOL>if np.isfinite(ri_color) and np.isfinite(iz_color):<EOL><INDENT>obj_sti = <NUM_LIT>*ri_color + <NUM_LIT>*(iz_color + <NUM_LIT>)<EOL>obj_sts = -<NUM_LIT>*ri_color + <NUM_LIT>*(iz_color + <NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>obj_sti = np.nan<EOL>obj_sts = np.nan<EOL><DEDENT>if (np.isfinite(obj_sti) and np.isfinite(obj_sts) and<EOL>(obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT>if ((obj_sti > <NUM_LIT>) and (obj_sti < <NUM_LIT>)):<EOL><INDENT>m_class = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>m_class = None<EOL><DEDENT>return m_class, obj_sti, obj_sts<EOL>", "docstring": "This calculates the M-dwarf subtype given SDSS `r-i` and `i-z` colors.\n\n    Parameters\n    ----------\n\n    ri_color : float\n        The SDSS `r-i` color of the object.\n\n    iz_color : float\n        The SDSS `i-z` color of the object.\n\n    Returns\n    -------\n\n    (subtype, index1, index2) : tuple\n        `subtype`: if the star appears to be an M dwarf, will return an int\n        between 0 and 9 indicating its subtype, e.g. will return 4 for an M4\n        dwarf. If the object isn't an M dwarf, will return None\n\n        `index1`, `index2`: the M-dwarf color locus value and spread of this\n        object calculated from the `r-i` and `i-z` colors.", "id": "f14717:m2"}
{"signature": "def color_classification(colorfeatures, pmfeatures):", "body": "possible_classes = []<EOL>if not colorfeatures:<EOL><INDENT>return possible_classes<EOL><DEDENT>if not pmfeatures:<EOL><INDENT>return possible_classes<EOL><DEDENT>if ( ('<STR_LIT>' in colorfeatures) and<EOL>(colorfeatures['<STR_LIT>'] is not None) and<EOL>(np.isfinite(colorfeatures['<STR_LIT>'])) ):<EOL><INDENT>u = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>u = np.nan<EOL><DEDENT>if ( ('<STR_LIT>' in colorfeatures) and<EOL>(colorfeatures['<STR_LIT>'] is not None) and<EOL>(np.isfinite(colorfeatures['<STR_LIT>'])) ):<EOL><INDENT>g = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>g = np.nan<EOL><DEDENT>if ( ('<STR_LIT>' in colorfeatures) and<EOL>(colorfeatures['<STR_LIT>'] is not None) and<EOL>(np.isfinite(colorfeatures['<STR_LIT>'])) ):<EOL><INDENT>r = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>r = np.nan<EOL><DEDENT>if ( ('<STR_LIT>' in colorfeatures) and<EOL>(colorfeatures['<STR_LIT>'] is not None) and<EOL>(np.isfinite(colorfeatures['<STR_LIT>'])) ):<EOL><INDENT>i = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>i = np.nan<EOL><DEDENT>if ( ('<STR_LIT>' in colorfeatures) and<EOL>(colorfeatures['<STR_LIT>'] is not None) and<EOL>(np.isfinite(colorfeatures['<STR_LIT>'])) ):<EOL><INDENT>z = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>z = np.nan<EOL><DEDENT>if ( ('<STR_LIT>' in colorfeatures) and<EOL>(colorfeatures['<STR_LIT>'] is not None) and<EOL>(np.isfinite(colorfeatures['<STR_LIT>'])) ):<EOL><INDENT>j = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>j = np.nan<EOL><DEDENT>if ( ('<STR_LIT>' in colorfeatures) and<EOL>(colorfeatures['<STR_LIT>'] is not None) and<EOL>(np.isfinite(colorfeatures['<STR_LIT>'])) ):<EOL><INDENT>h = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>h = np.nan<EOL><DEDENT>if ( ('<STR_LIT>' in colorfeatures) and<EOL>(colorfeatures['<STR_LIT>'] is not None) and<EOL>(np.isfinite(colorfeatures['<STR_LIT>'])) ):<EOL><INDENT>k = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>k = np.nan<EOL><DEDENT>if '<STR_LIT>' in colorfeatures and colorfeatures['<STR_LIT>'] is not None:<EOL><INDENT>um = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>um = np.nan<EOL><DEDENT>if '<STR_LIT>' in colorfeatures and colorfeatures['<STR_LIT>'] is not None:<EOL><INDENT>gm = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>gm = np.nan<EOL><DEDENT>if '<STR_LIT>' in colorfeatures and colorfeatures['<STR_LIT>'] is not None:<EOL><INDENT>rm = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>rm = np.nan<EOL><DEDENT>if '<STR_LIT>' in colorfeatures and colorfeatures['<STR_LIT>'] is not None:<EOL><INDENT>im = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>im = np.nan<EOL><DEDENT>if '<STR_LIT>' in colorfeatures and colorfeatures['<STR_LIT>'] is not None:<EOL><INDENT>zm = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>zm = np.nan<EOL><DEDENT>if '<STR_LIT>' in colorfeatures and colorfeatures['<STR_LIT>'] is not None:<EOL><INDENT>jm = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>jm = np.nan<EOL><DEDENT>if '<STR_LIT>' in colorfeatures and colorfeatures['<STR_LIT>'] is not None:<EOL><INDENT>hm = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>hm = np.nan<EOL><DEDENT>if '<STR_LIT>' in colorfeatures and colorfeatures['<STR_LIT>'] is not None:<EOL><INDENT>km = colorfeatures['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>km = np.nan<EOL><DEDENT>rpmj = pmfeatures['<STR_LIT>'] if np.isfinite(pmfeatures['<STR_LIT>']) else None<EOL>if (np.isfinite(u) and np.isfinite(g) and<EOL>np.isfinite(r) and np.isfinite(i) and<EOL>np.isfinite(z)):<EOL><INDENT>v_color = <NUM_LIT>*(u-g)-<NUM_LIT>*(g-r)+<NUM_LIT>*(r-i)+<NUM_LIT>*(i-z)<EOL><DEDENT>else:<EOL><INDENT>v_color = np.nan<EOL><DEDENT>if (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)):<EOL><INDENT>p1_color = <NUM_LIT>*(u-g)+<NUM_LIT>*(g-r)-<NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>p1_color = np.nan<EOL><DEDENT>if (np.isfinite(u) and np.isfinite(g) and<EOL>np.isfinite(r) and np.isfinite(i)):<EOL><INDENT>l_color = -<NUM_LIT>*u + <NUM_LIT>*g - <NUM_LIT>*r - <NUM_LIT>*i + <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>l_color = np.nan<EOL><DEDENT>if (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)):<EOL><INDENT>s_color = -<NUM_LIT>*u + <NUM_LIT>*g - <NUM_LIT>*r + <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>s_color = np.nan<EOL><DEDENT>if (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)):<EOL><INDENT>d_ug = (u-g) + <NUM_LIT>*(g-r) - <NUM_LIT><EOL>d_gr = <NUM_LIT>*(u-g) - (g-r) - <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>d_ug, d_gr = np.nan, np.nan<EOL><DEDENT>m_subtype, m_sti, m_sts = mdwarf_subtype_from_sdsscolor(r-i, i-z)<EOL>if m_subtype and rpmj and rpmj > <NUM_LIT:1.0>:<EOL><INDENT>possible_classes.append('<STR_LIT:d>' + m_subtype)<EOL><DEDENT>if ( np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and<EOL>((g-r) < -<NUM_LIT>) and ((g-r) > -<NUM_LIT:1.0>) and<EOL>((u-g) < <NUM_LIT>) and ((u-g) > -<NUM_LIT:1>) and<EOL>((u-g+<NUM_LIT:2>*(g-r)) < -<NUM_LIT:0.1>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and<EOL>((u-g) < <NUM_LIT>) and ((u-g) > <NUM_LIT>) and<EOL>((g-r) < <NUM_LIT>) and ((g-r) > -<NUM_LIT:0.5>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(p1_color) and np.isfinite(p1_color) and<EOL>np.isfinite(u) and np.isfinite(g) and np.isfinite(r) ) and<EOL>(p1_color < -<NUM_LIT>) and (p1_color > -<NUM_LIT>) and<EOL>((u-g) < <NUM_LIT>) and ((u-g) > <NUM_LIT>) and<EOL>((g-r) < <NUM_LIT>) and ((g-r) > <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and<EOL>np.isfinite(l_color)) and<EOL>((g-r) < <NUM_LIT>) and ((g-r) > -<NUM_LIT:0.5>) and<EOL>((u-g) < <NUM_LIT>) and ((u-g) > <NUM_LIT>) and<EOL>(l_color > <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(p1_color) and np.isfinite(s_color)) and<EOL>(-<NUM_LIT:0.1> < p1_color < <NUM_LIT>) and (s_color > <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(g) and np.isfinite(g) and np.isfinite(r)) and<EOL>((g-r) < <NUM_LIT>) and ((g-r) > <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(g) and np.isfinite(r)) and<EOL>((g-r) < <NUM_LIT>) and ((g-r) > <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(u) and np.isfinite(g) and<EOL>np.isfinite(r) and np.isfinite(i) and<EOL>np.isfinite(l_color)) and<EOL>((g-r) > <NUM_LIT>) and ((g-r) < <NUM_LIT>) and<EOL>(l_color > <NUM_LIT>) and ((u-g) > <NUM_LIT>) and ((u-g) < <NUM_LIT>) and<EOL>((r-i) > <NUM_LIT>) and ((r-i) < <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(u) and np.isfinite(g) and<EOL>np.isfinite(r) and np.isfinite(s_color)) and<EOL>((u-g) < <NUM_LIT>) and ((u-g) > <NUM_LIT>) and<EOL>((g-r) < <NUM_LIT>) and ((g-r) > <NUM_LIT>) and<EOL>(s_color < -<NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(g) and np.isfinite(r)) and<EOL>((g-r) < <NUM_LIT>) and ((g-r) > <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(g) and np.isfinite(r) and np.isfinite(i)) and<EOL>((g-r) > <NUM_LIT>) and ((r-i) < <NUM_LIT>) and ((r-i) > <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(j) and np.isfinite(h) and np.isfinite(k) and<EOL>np.isfinite(g) and np.isfinite(i)) and<EOL>((j-k) > <NUM_LIT>) and<EOL>((j-h) < (<NUM_LIT>*(j-k) + <NUM_LIT>)) and<EOL>((j-h) > (<NUM_LIT>*(j-k) + <NUM_LIT>)) and<EOL>((g-i) > (<NUM_LIT>*(i-k) - <NUM_LIT>)) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(um) and np.isfinite(gm) and<EOL>np.isfinite(rm) and np.isfinite(im)) and<EOL>((um-gm) < <NUM_LIT>) and ((gm-rm) > -<NUM_LIT>) and<EOL>((gm-rm) < <NUM_LIT>) and ((rm-im) > <NUM_LIT:0.5>) and<EOL>((rm-im) < <NUM_LIT>) and<EOL>((gm-rm) > (-<NUM_LIT>*(rm-im)+<NUM_LIT>)) and<EOL>((gm-rm) < (<NUM_LIT>*(rm-im)+<NUM_LIT:0.5>)) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(um) and np.isfinite(gm) and np.isfinite(rm) and<EOL>np.isfinite(im) and np.isfinite(zm)) and<EOL>(zm < <NUM_LIT>) and (um > <NUM_LIT>) and (gm > <NUM_LIT>) and<EOL>(rm > <NUM_LIT>) and ((im - zm) > <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(u) and np.isfinite(g) and np.isfinite(r) and<EOL>np.isfinite(i) and np.isfinite(z) and np.isfinite(d_ug) and<EOL>np.isfinite(d_gr)) and<EOL>((u-g) > <NUM_LIT>) and ((u-g) < <NUM_LIT>) and<EOL>(d_ug > -<NUM_LIT>) and (d_ug < <NUM_LIT>) and<EOL>(d_gr > <NUM_LIT>) and (d_gr < <NUM_LIT>) and<EOL>((r-i) > -<NUM_LIT>) and ((r-i) < <NUM_LIT>) and<EOL>((i-z) > -<NUM_LIT>) and ((i-z) < <NUM_LIT>) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>if ( (np.isfinite(u) and np.isfinite(g) and np.isfinite(r)) and<EOL>( (((u-g) > -<NUM_LIT:0.1>) and ((u-g) < <NUM_LIT>) and<EOL>((g-r) > -<NUM_LIT>) and ((g-r) < <NUM_LIT:0.5>)) or<EOL>((u-g) > (<NUM_LIT>*(g-r) + <NUM_LIT>)) ) ):<EOL><INDENT>possible_classes.append('<STR_LIT>')<EOL><DEDENT>return {'<STR_LIT>':possible_classes,<EOL>'<STR_LIT>':v_color,<EOL>'<STR_LIT>':p1_color,<EOL>'<STR_LIT>':s_color,<EOL>'<STR_LIT>':l_color,<EOL>'<STR_LIT>':d_ug,<EOL>'<STR_LIT>':d_gr,<EOL>'<STR_LIT>':m_sti,<EOL>'<STR_LIT>':m_sts}<EOL>", "docstring": "This calculates rough star type classifications based on star colors\n    in the ugrizJHK bands.\n\n    Uses the output from `color_features` and `coord_features`. By default,\n    `color_features` will use dereddened colors, as are expected by most\n    relations here.\n\n    Based on the color cuts from:\n\n    - SDSS SEGUE (Yanny+ 2009)\n    - SDSS QSO catalog (Schneider+ 2007)\n    - SDSS RR Lyrae catalog (Sesar+ 2011)\n    - SDSS M-dwarf catalog (West+ 2008)\n    - Helmi+ 2003\n    - Bochanski+ 2014\n\n    Parameters\n    ----------\n\n    colorfeatures : dict\n        This is the dict produced by the `color_features` function.\n\n    pmfeatures : dict\n        This is the dict produced by the `coord_features` function.\n\n    Returns\n    -------\n\n    dict\n        A dict containing all of the possible classes this object can belong to\n        as a list in the `color_classes` key, and values of the various color\n        indices used to arrive to that conclusion as the other keys.", "id": "f14717:m3"}
{"signature": "def gilliland_cdpp(times, mags, errs,<EOL>windowlength=<NUM_LIT>,<EOL>polyorder=<NUM_LIT:2>,<EOL>binsize=<NUM_LIT>,  <EOL>sigclip=<NUM_LIT>,<EOL>magsarefluxes=False,<EOL>**kwargs):", "body": "<EOL>if errs is None:<EOL><INDENT>errs = <NUM_LIT>*mags<EOL><DEDENT>find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)<EOL>ftimes = times[find]<EOL>fmags = mags[find]<EOL>ferrs = errs[find]<EOL>if ftimes.size < (<NUM_LIT:3>*windowlength):<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return npnan<EOL><DEDENT>smoothed = savgol_filter(fmags, windowlength, polyorder, **kwargs)<EOL>subtracted = fmags - smoothed<EOL>stimes, smags, serrs = sigclip_magseries(ftimes, subtracted, ferrs,<EOL>magsarefluxes=magsarefluxes)<EOL>binned = time_bin_magseries_with_errs(stimes, smags, serrs,<EOL>binsize=binsize,<EOL>minbinelems=<NUM_LIT:7>)<EOL>bmags = binned['<STR_LIT>']<EOL>cdpp = npstd(bmags) * <NUM_LIT><EOL>return cdpp<EOL>", "docstring": "This calculates the CDPP of a timeseries using the method in the paper:\n\n    Gilliland, R. L., Chaplin, W. J., Dunham, E. W., et al. 2011, ApJS, 197, 6\n    (http://adsabs.harvard.edu/abs/2011ApJS..197....6G)\n\n    The steps are:\n\n    - pass the time-series through a Savitsky-Golay filter.\n\n      - we use `scipy.signal.savgol_filter`, `**kwargs` are passed to this.\n\n      - also see: http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay.\n\n      - the `windowlength` is the number of LC points to use (Kepler uses 2 days\n        = (1440 minutes/day / 30 minutes/LC point) x 2 days = 96 -> 97 LC\n        points).\n\n      - the `polyorder` is a quadratic by default.\n\n\n    - subtract the smoothed time-series from the actual light curve.\n\n    - sigma clip the remaining LC.\n\n    - get the binned mag series by averaging over 6.5 hour bins, only retaining\n      bins with at least 7 points.\n\n    - the standard deviation of the binned averages is the CDPP.\n\n    - multiply this by 1.168 to correct for over-subtraction of white-noise.\n\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input mag/flux time-series to calculate CDPP for.\n\n    windowlength : int\n        The smoothing window size to use.\n\n    polyorder : int\n        The polynomial order to use in the Savitsky-Golay smoothing.\n\n    binsize : int\n        The bin size to use for binning the light curve.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    magsarefluxes : bool\n        If True, indicates the input time-series is fluxes and not mags.\n\n    kwargs : additional kwargs\n        These are passed directly to `scipy.signal.savgol_filter`.\n\n    Returns\n    -------\n\n    float\n        The calculated CDPP value.", "id": "f14718:m6"}
{"signature": "def lightcurve_ptp_measures(ftimes, fmags, ferrs):", "body": "ndet = len(fmags)<EOL>if ndet > <NUM_LIT:9>:<EOL><INDENT>timediffs = npdiff(ftimes)<EOL>nzind = npnonzero(timediffs)<EOL>ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]<EOL>ndet = ftimes.size<EOL>timediffs = npdiff(ftimes)<EOL>p2p_abs_magdiffs = npabs(npdiff(fmags))<EOL>p2p_squared_magdiffs = npdiff(fmags)*npdiff(fmags)<EOL>robstd = npmedian(npabs(fmags - npmedian(fmags)))*<NUM_LIT><EOL>robvar = robstd*robstd<EOL>eta_robust = npmedian(p2p_abs_magdiffs)/robvar<EOL>eta_robust = eta_robust/(ndet - <NUM_LIT:1.0>)<EOL>eta_normal = npsum(p2p_squared_magdiffs)/npvar(fmags)<EOL>eta_normal = eta_normal/(ndet - <NUM_LIT:1.0>)<EOL>timeweights = <NUM_LIT:1.0>/(timediffs*timediffs)<EOL>eta_uneven_normal = (<EOL>(npsum(timeweights*p2p_squared_magdiffs) /<EOL>(npvar(fmags) * npsum(timeweights)) ) *<EOL>npmean(timeweights) *<EOL>(ftimes.max() - ftimes.min())*(ftimes.max() - ftimes.min())<EOL>)<EOL>eta_uneven_robust = (<EOL>(npsum(timeweights*p2p_abs_magdiffs) /<EOL>(robvar * npsum(timeweights)) ) *<EOL>npmedian(timeweights) *<EOL>(ftimes[-<NUM_LIT:1>] - ftimes[<NUM_LIT:0>])*(ftimes[-<NUM_LIT:1>] - ftimes[<NUM_LIT:0>])<EOL>)<EOL>return {<EOL>'<STR_LIT>':eta_normal,<EOL>'<STR_LIT>':eta_robust,<EOL>'<STR_LIT>':eta_uneven_normal,<EOL>'<STR_LIT>':eta_uneven_robust<EOL>}<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "This calculates various point-to-point measures (`eta` in Kim+ 2014).\n\n    Parameters\n    ----------\n\n    ftimes,fmags,ferrs : np.array\n        The input mag/flux time-series with all non-finite elements removed.\n\n    Returns\n    -------\n\n    dict\n        A dict with values of the point-to-point measures, including the `eta`\n        variability index (often used as its inverse `inveta` to have the same\n        sense as increasing variability index -> more likely a variable star).", "id": "f14718:m4"}
{"signature": "def all_nonperiodic_features(times, mags, errs,<EOL>magsarefluxes=False,<EOL>stetson_weightbytimediff=True):", "body": "<EOL>finiteind = npisfinite(times) & npisfinite(mags) & npisfinite(errs)<EOL>ftimes, fmags, ferrs = times[finiteind], mags[finiteind], errs[finiteind]<EOL>nzind = npnonzero(ferrs)<EOL>ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]<EOL>xfeatures = nonperiodic_lightcurve_features(times, mags, errs,<EOL>magsarefluxes=magsarefluxes)<EOL>stetj = stetson_jindex(ftimes, fmags, ferrs,<EOL>weightbytimediff=stetson_weightbytimediff)<EOL>stetk = stetson_kindex(fmags, ferrs)<EOL>xfeatures.update({'<STR_LIT>':stetj,<EOL>'<STR_LIT>':stetk})<EOL>return xfeatures<EOL>", "docstring": "This rolls up the feature functions above and returns a single dict.\n\n    NOTE: this doesn't calculate the CDPP to save time since binning and\n    smoothing takes a while for dense light curves.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input mag/flux time-series to calculate CDPP for.\n\n    magsarefluxes : bool\n        If True, indicates `mags` is actually an array of flux values.\n\n    stetson_weightbytimediff : bool\n        If this is True, the Stetson index for any pair of mags will be\n        reweighted by the difference in times between them using the scheme in\n        Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017)::\n\n            w_i = exp(- (t_i+1 - t_i)/ delta_t )\n\n    Returns\n    -------\n\n    dict\n        Returns a dict with all of the variability features.", "id": "f14718:m7"}
{"signature": "def lightcurve_moments(ftimes, fmags, ferrs):", "body": "ndet = len(fmags)<EOL>if ndet > <NUM_LIT:9>:<EOL><INDENT>series_median = npmedian(fmags)<EOL>series_wmean = (<EOL>npsum(fmags*(<NUM_LIT:1.0>/(ferrs*ferrs)))/npsum(<NUM_LIT:1.0>/(ferrs*ferrs))<EOL>)<EOL>series_mad = npmedian(npabs(fmags - series_median))<EOL>series_stdev = <NUM_LIT>*series_mad<EOL>series_skew = spskew(fmags)<EOL>series_kurtosis = spkurtosis(fmags)<EOL>series_above1std = len(fmags[fmags > (series_median + series_stdev)])<EOL>series_below1std = len(fmags[fmags < (series_median - series_stdev)])<EOL>series_beyond1std = (series_above1std + series_below1std)/float(ndet)<EOL>series_mag_percentiles = nppercentile(<EOL>fmags,<EOL>[<NUM_LIT>,<NUM_LIT:10>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>]<EOL>)<EOL>return {<EOL>'<STR_LIT>':series_median,<EOL>'<STR_LIT>':series_wmean,<EOL>'<STR_LIT>':series_mad,<EOL>'<STR_LIT>':series_stdev,<EOL>'<STR_LIT>':series_skew,<EOL>'<STR_LIT>':series_kurtosis,<EOL>'<STR_LIT>':series_beyond1std,<EOL>'<STR_LIT>':series_mag_percentiles,<EOL>'<STR_LIT>': series_mag_percentiles[<NUM_LIT:8>] - series_mag_percentiles[<NUM_LIT:3>],<EOL>}<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return None<EOL><DEDENT>", "docstring": "This calculates the weighted mean, stdev, median, MAD, percentiles, skew,\n    kurtosis, fraction of LC beyond 1-stdev, and IQR.\n\n    Parameters\n    ----------\n\n    ftimes,fmags,ferrs : np.array\n        The input mag/flux time-series with all non-finite elements removed.\n\n    Returns\n    -------\n\n    dict\n        A dict with all of the light curve moments calculated.", "id": "f14718:m2"}
{"signature": "def stetson_jindex(ftimes, fmags, ferrs, weightbytimediff=False):", "body": "ndet = len(fmags)<EOL>if ndet > <NUM_LIT:9>:<EOL><INDENT>medmag = npmedian(fmags)<EOL>delta_prefactor = (ndet/(ndet - <NUM_LIT:1>))<EOL>sigma_i = delta_prefactor*(fmags - medmag)/ferrs<EOL>sigma_j = nproll(sigma_i,<NUM_LIT:1>)<EOL>if weightbytimediff:<EOL><INDENT>difft = npdiff(ftimes)<EOL>deltat = npmedian(difft)<EOL>weights_i = npexp(- difft/deltat )<EOL>products = (weights_i*sigma_i[<NUM_LIT:1>:]*sigma_j[<NUM_LIT:1>:])<EOL><DEDENT>else:<EOL><INDENT>products = (sigma_i*sigma_j)[<NUM_LIT:1>:]<EOL><DEDENT>stetsonj = (<EOL>npsum(npsign(products) * npsqrt(npabs(products)))<EOL>) / ndet<EOL>return stetsonj<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return npnan<EOL><DEDENT>", "docstring": "This calculates the Stetson index for the magseries, based on consecutive\n    pairs of observations.\n\n    Based on Nicole Loncke's work for her Planets and Life certificate at\n    Princeton in 2014.\n\n    Parameters\n    ----------\n\n    ftimes,fmags,ferrs : np.array\n        The input mag/flux time-series with all non-finite elements removed.\n\n    weightbytimediff : bool\n        If this is True, the Stetson index for any pair of mags will be\n        reweighted by the difference in times between them using the scheme in\n        Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017)::\n\n            w_i = exp(- (t_i+1 - t_i)/ delta_t )\n\n    Returns\n    -------\n\n    float\n        The calculated Stetson J variability index.", "id": "f14718:m0"}
{"signature": "def finalize_checkplot(cpx,<EOL>outdir,<EOL>all_lclistpkl,<EOL>objfits=None):", "body": "", "docstring": "This is used to prevent any further changes to the checkplot.\n\n    TODO: finish this.\n\n    Use this function after all variable classification, period-finding, and\n    object xmatches are done. This function will add a 'final' key to the\n    checkplot, which will contain:\n\n    - a phased LC plot with the period and epoch set after review using the\n      times, mags, errs after any appropriate filtering and sigclip was done in\n      the checkplotserver UI\n\n    - The unphased LC using the times, mags, errs after any appropriate\n      filtering and sigclip was done in the checkplotserver UI\n\n    - the same plots for any LC collection neighbors\n\n    - the survey cutout for the object if objfits is provided and checks out\n\n    - a redone neighbor search using GAIA and all light curves in the collection\n      even if they don't have at least 1000 observations.\n\n    These items will be shown in a special 'Final' tab in the `checkplotserver`\n    webapp (this should be run in `readonly` mode as well). The final tab will\n    also contain downloadable links for the checkplot pickle in pkl and PNG\n    format, as well as the final times, mags, errs as a gzipped CSV with a\n    header containing all of this info.\n\n    Parameters\n    ----------\n\n    cpx : dict or str\n        This is the path to the checkplot dict or pickle file to process.\n\n    outdir : str\n        This is the directory to where the final pickle will be written. If this\n        is set to the same dir as `cpx` and `cpx` is a pickle, the function will\n        return a failure. This is meant to keep the in-process checkplots\n        separate from the finalized versions.\n\n    all_lclistpkl : str or dict\n        This is the path to the pickle or the dict created by\n        :py:func:`astrobase.lcproc.catalogs.make_lclist` with no restrictions on\n        the number of observations (so ALL light curves in the collection). This\n        is used to make sure all neighbors of this object in the light curve\n        collection have had their proximity to this object noted.\n\n    objfits : str or None\n        If this is not None, should be a file path to a FITS file containing a\n        WCS header and this object from the instrument that was used to observe\n        it. This will be used to make a stamp cutout of the object using the\n        actual image it was detected on. This will be a useful comparison to the\n        usual DSS POSS-RED2 image used by the checkplots.\n\n    Returns\n    -------\n\n    str\n        The path to the updated checkplot pickle file with a 'final' key added\n        to it , as described above.", "id": "f14719:m1"}
{"signature": "def parallel_finalize_cpdir(cpdir,<EOL>outdir,<EOL>cpfileglob='<STR_LIT>',<EOL>objfits=None):", "body": "", "docstring": "This is a parallel driver for `finalize_checkplot`, operating on a\n    directory of checkplots.\n\n    Parameters\n    ----------\n\n    cpdir : str\n        This is the path to the directory containing all the checkplot pickles\n        to process and run through the finalization process.\n\n    outdir : str\n        The directory to where the finalized checkplot pickles will be written.\n\n    objfits : str\n        Path to the FITS file containing a WCS header and detections of all\n        objects observed by the actual instrument that obtained the light\n        curves. This should generally be a high quality 'reference' frame so\n        that all of the objects whose checkplots we're finalizing (in `cplist`)\n        can be seen on the frame.\n\n    Returns\n    -------\n\n    dict\n        Dict indicating the success/failure (as True/False) of the checkplot\n        finalize operations for each checkplot pickle provided in `cplist`.", "id": "f14719:m3"}
{"signature": "def load_xmatch_external_catalogs(xmatchto, xmatchkeys, outfile=None):", "body": "outdict = {}<EOL>for xc, xk in zip(xmatchto, xmatchkeys):<EOL><INDENT>parsed_catdef = _parse_xmatch_catalog_header(xc, xk)<EOL>if not parsed_catdef:<EOL><INDENT>continue<EOL><DEDENT>(infd, catdefdict,<EOL>catcolinds, catcoldtypes,<EOL>catcolnames, catcolunits) = parsed_catdef<EOL>catarr = np.genfromtxt(infd,<EOL>usecols=catcolinds,<EOL>names=xk,<EOL>dtype='<STR_LIT:U+002C>'.join(catcoldtypes),<EOL>comments='<STR_LIT:#>',<EOL>delimiter='<STR_LIT:|>',<EOL>autostrip=True)<EOL>infd.close()<EOL>catshortname = os.path.splitext(os.path.basename(xc))[<NUM_LIT:0>]<EOL>catshortname = catshortname.replace('<STR_LIT>','<STR_LIT>')<EOL>objra, objdecl = (catarr[catdefdict['<STR_LIT>']],<EOL>catarr[catdefdict['<STR_LIT>']])<EOL>cosdecl = np.cos(np.radians(objdecl))<EOL>sindecl = np.sin(np.radians(objdecl))<EOL>cosra = np.cos(np.radians(objra))<EOL>sinra = np.sin(np.radians(objra))<EOL>xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))<EOL>kdt = cKDTree(xyz,copy_data=True)<EOL>catoutdict = {'<STR_LIT>':kdt,<EOL>'<STR_LIT:data>':catarr,<EOL>'<STR_LIT>':xk,<EOL>'<STR_LIT>':catcolnames,<EOL>'<STR_LIT>':catcolunits,<EOL>'<STR_LIT:name>':catdefdict['<STR_LIT:name>'],<EOL>'<STR_LIT>':catdefdict['<STR_LIT:description>']}<EOL>outdict[catshortname] = catoutdict<EOL><DEDENT>if outfile is not None:<EOL><INDENT>if sys.platform == '<STR_LIT>':<EOL><INDENT>dumpbytes = pickle.dumps(outdict, protocol=pickle.HIGHEST_PROTOCOL)<EOL>max_bytes = <NUM_LIT:2>**<NUM_LIT> - <NUM_LIT:1><EOL>with open(outfile, '<STR_LIT:wb>') as outfd:<EOL><INDENT>for idx in range(<NUM_LIT:0>, len(dumpbytes), max_bytes):<EOL><INDENT>outfd.write(dumpbytes[idx:idx+max_bytes])<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>with open(outfile, '<STR_LIT:wb>') as outfd:<EOL><INDENT>pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)<EOL><DEDENT><DEDENT>return outfile<EOL><DEDENT>else:<EOL><INDENT>return outdict<EOL><DEDENT>", "docstring": "This loads the external xmatch catalogs into a dict for use in an xmatch.\n\n    Parameters\n    ----------\n\n    xmatchto : list of str\n        This is a list of paths to all the catalog text files that will be\n        loaded.\n\n        The text files must be 'CSVs' that use the '|' character as the\n        separator betwen columns. These files should all begin with a header in\n        JSON format on lines starting with the '#' character. this header will\n        define the catalog and contains the name of the catalog and the column\n        definitions. Column definitions must have the column name and the numpy\n        dtype of the columns (in the same format as that expected for the\n        numpy.genfromtxt function). Any line that does not begin with '#' is\n        assumed to be part of the columns in the catalog. An example is shown\n        below::\n\n            # {\"name\":\"NSVS catalog of variable stars\",\n            #  \"columns\":[\n            #   {\"key\":\"objectid\", \"dtype\":\"U20\", \"name\":\"Object ID\", \"unit\": null},\n            #   {\"key\":\"ra\", \"dtype\":\"f8\", \"name\":\"RA\", \"unit\":\"deg\"},\n            #   {\"key\":\"decl\",\"dtype\":\"f8\", \"name\": \"Declination\", \"unit\":\"deg\"},\n            #   {\"key\":\"sdssr\",\"dtype\":\"f8\",\"name\":\"SDSS r\", \"unit\":\"mag\"},\n            #   {\"key\":\"vartype\",\"dtype\":\"U20\",\"name\":\"Variable type\", \"unit\":null}\n            #  ],\n            #  \"colra\":\"ra\",\n            #  \"coldec\":\"decl\",\n            #  \"description\":\"Contains variable stars from the NSVS catalog\"}\n            objectid1 | 45.0  | -20.0 | 12.0 | detached EB\n            objectid2 | 145.0 | 23.0  | 10.0 | RRab\n            objectid3 | 12.0  | 11.0  | 14.0 | Cepheid\n            .\n            .\n            .\n\n    xmatchkeys : list of lists\n        This is the list of lists of column names (as str) to get out of each\n        `xmatchto` catalog. This should be the same length as `xmatchto` and\n        each element here will apply to the respective file in `xmatchto`.\n\n    outfile : str or None\n        If this is not None, set this to the name of the pickle to write the\n        collected xmatch catalogs to. this pickle can then be loaded\n        transparently by the :py:func:`astrobase.checkplot.pkl.checkplot_dict`,\n        :py:func:`astrobase.checkplot.pkl.checkplot_pickle` functions to provide\n        xmatch info to the\n        :py:func:`astrobase.checkplot.pkl_xmatch.xmatch_external_catalogs`\n        function below.\n\n        If this is None, will return the loaded xmatch catalogs directly. This\n        will be a huge dict, so make sure you have enough RAM.\n\n    Returns\n    -------\n\n    str or dict\n        Based on the `outfile` kwarg, will either return the path to a collected\n        xmatch pickle file or the collected xmatch dict.", "id": "f14721:m1"}
{"signature": "def _make_magseries_plot(axes,<EOL>stimes,<EOL>smags,<EOL>serrs,<EOL>magsarefluxes=False,<EOL>ms=<NUM_LIT>):", "body": "scaledplottime = stimes - npmin(stimes)<EOL>axes.plot(scaledplottime,<EOL>smags,<EOL>marker='<STR_LIT:o>',<EOL>ms=ms, ls='<STR_LIT:None>',mew=<NUM_LIT:0>,<EOL>color='<STR_LIT>',<EOL>rasterized=True)<EOL>if not magsarefluxes:<EOL><INDENT>plot_ylim = axes.get_ylim()<EOL>axes.set_ylim((plot_ylim[<NUM_LIT:1>], plot_ylim[<NUM_LIT:0>]))<EOL><DEDENT>axes.set_xlim((npmin(scaledplottime)-<NUM_LIT:1.0>,<EOL>npmax(scaledplottime)+<NUM_LIT:1.0>))<EOL>axes.grid(color='<STR_LIT>',<EOL>alpha=<NUM_LIT>,<EOL>zorder=<NUM_LIT:0>,<EOL>linewidth=<NUM_LIT:1.0>,<EOL>linestyle='<STR_LIT::>')<EOL>plot_xlabel = '<STR_LIT>' % npmin(stimes)<EOL>if magsarefluxes:<EOL><INDENT>plot_ylabel = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>plot_ylabel = '<STR_LIT>'<EOL><DEDENT>axes.set_xlabel(plot_xlabel)<EOL>axes.set_ylabel(plot_ylabel)<EOL>axes.get_yaxis().get_major_formatter().set_useOffset(False)<EOL>axes.get_xaxis().get_major_formatter().set_useOffset(False)<EOL>", "docstring": "Makes the mag-series plot tile for `checkplot_png` and\n    `twolsp_checkplot_png`.\n\n    axes : matplotlib.axes.Axes object\n        The Axes object where the generated plot will go.\n\n    stimes,smags,serrs : np.array\n        The mag/flux time-series arrays along with associated errors. These\n        should all have been run through nan-stripping and sigma-clipping\n        beforehand.\n\n    magsarefluxes : bool\n        If True, indicates the input time-series is fluxes and not mags so the\n        plot y-axis direction and range can be set appropriately.\n\n    ms : float\n        The `markersize` kwarg to use when making the mag-series plot.\n\n    Returns\n    -------\n\n    Does not return anything, works on the input Axes object directly.", "id": "f14722:m1"}
{"signature": "def _make_phased_magseries_plot(axes,<EOL>periodind,<EOL>stimes, smags, serrs,<EOL>varperiod, varepoch,<EOL>phasewrap, phasesort,<EOL>phasebin, minbinelems,<EOL>plotxlim,<EOL>lspmethod,<EOL>lspmethodind=<NUM_LIT:0>,<EOL>xliminsetmode=False,<EOL>twolspmode=False,<EOL>magsarefluxes=False,<EOL>verbose=True,<EOL>phasems=<NUM_LIT>,<EOL>phasebinms=<NUM_LIT>):", "body": "plotvarepoch = None<EOL>if varepoch is None:<EOL><INDENT>plotvarepoch = npmin(stimes)<EOL><DEDENT>elif isinstance(varepoch, str) and varepoch == '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>spfit = spline_fit_magseries(stimes,<EOL>smags,<EOL>serrs,<EOL>varperiod,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=None,<EOL>verbose=verbose)<EOL>plotvarepoch = spfit['<STR_LIT>']['<STR_LIT>']<EOL>if len(plotvarepoch) != <NUM_LIT:1>:<EOL><INDENT>plotvarepoch = varepoch[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>sgfit = savgol_fit_magseries(stimes,<EOL>smags,<EOL>serrs,<EOL>varperiod,<EOL>sigclip=None,<EOL>magsarefluxes=magsarefluxes,<EOL>verbose=verbose)<EOL>plotvarepoch = sgfit['<STR_LIT>']['<STR_LIT>']<EOL>if len(plotvarepoch) != <NUM_LIT:1>:<EOL><INDENT>plotvarepoch = plotvarepoch[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>if plotvarepoch is None:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>plotvarepoch = npmin(stimes)<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(varepoch, list):<EOL><INDENT>try:<EOL><INDENT>if twolspmode:<EOL><INDENT>thisvarepochlist = varepoch[lspmethodind]<EOL>plotvarepoch = thisvarepochlist[periodind]<EOL><DEDENT>else:<EOL><INDENT>plotvarepoch = varepoch[periodind]<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL>plotvarepoch = npmin(stimes)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>plotvarepoch = varepoch<EOL><DEDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>(varperiod, plotvarepoch))<EOL><DEDENT>phasedlc = phase_magseries(stimes,<EOL>smags,<EOL>varperiod,<EOL>plotvarepoch,<EOL>wrap=phasewrap,<EOL>sort=phasesort)<EOL>plotphase = phasedlc['<STR_LIT>']<EOL>plotmags = phasedlc['<STR_LIT>']<EOL>if phasebin:<EOL><INDENT>binphasedlc = phase_bin_magseries(plotphase,<EOL>plotmags,<EOL>binsize=phasebin,<EOL>minbinelems=minbinelems)<EOL>binplotphase = binphasedlc['<STR_LIT>']<EOL>binplotmags = binphasedlc['<STR_LIT>']<EOL><DEDENT>axes.plot(plotphase,<EOL>plotmags,<EOL>marker='<STR_LIT:o>',<EOL>ms=phasems, ls='<STR_LIT:None>',mew=<NUM_LIT:0>,<EOL>color='<STR_LIT>',<EOL>rasterized=True)<EOL>if phasebin:<EOL><INDENT>axes.plot(binplotphase,<EOL>binplotmags,<EOL>marker='<STR_LIT:o>',<EOL>ms=phasebinms, ls='<STR_LIT:None>',mew=<NUM_LIT:0>,<EOL>color='<STR_LIT>',<EOL>rasterized=True)<EOL><DEDENT>if not magsarefluxes:<EOL><INDENT>plot_ylim = axes.get_ylim()<EOL>axes.set_ylim((plot_ylim[<NUM_LIT:1>], plot_ylim[<NUM_LIT:0>]))<EOL><DEDENT>if not plotxlim:<EOL><INDENT>axes.set_xlim((npmin(plotphase)-<NUM_LIT:0.1>,<EOL>npmax(plotphase)+<NUM_LIT:0.1>))<EOL><DEDENT>else:<EOL><INDENT>axes.set_xlim((plotxlim[<NUM_LIT:0>],plotxlim[<NUM_LIT:1>]))<EOL><DEDENT>axes.grid(color='<STR_LIT>',<EOL>alpha=<NUM_LIT>,<EOL>zorder=<NUM_LIT:0>,<EOL>linewidth=<NUM_LIT:1.0>,<EOL>linestyle='<STR_LIT::>')<EOL>plot_xlabel = '<STR_LIT>'<EOL>if magsarefluxes:<EOL><INDENT>plot_ylabel = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>plot_ylabel = '<STR_LIT>'<EOL><DEDENT>axes.set_xlabel(plot_xlabel)<EOL>axes.set_ylabel(plot_ylabel)<EOL>axes.get_yaxis().get_major_formatter().set_useOffset(False)<EOL>axes.get_xaxis().get_major_formatter().set_useOffset(False)<EOL>if periodind == <NUM_LIT:0>:<EOL><INDENT>plottitle = '<STR_LIT>' % (<EOL>METHODSHORTLABELS[lspmethod],<EOL>varperiod,<EOL>plotvarepoch<EOL>)<EOL><DEDENT>elif periodind == <NUM_LIT:1> and not twolspmode:<EOL><INDENT>plottitle = '<STR_LIT>' % (<EOL>METHODSHORTLABELS[lspmethod],<EOL>varperiod,<EOL>plotvarepoch<EOL>)<EOL><DEDENT>elif periodind == <NUM_LIT:2> and not twolspmode:<EOL><INDENT>plottitle = '<STR_LIT>' % (<EOL>METHODSHORTLABELS[lspmethod],<EOL>varperiod,<EOL>plotvarepoch<EOL>)<EOL><DEDENT>elif periodind > <NUM_LIT:2> and not twolspmode:<EOL><INDENT>plottitle = '<STR_LIT>' % (<EOL>METHODSHORTLABELS[lspmethod],<EOL>periodind-<NUM_LIT:1>,<EOL>varperiod,<EOL>plotvarepoch<EOL>)<EOL><DEDENT>elif periodind > <NUM_LIT:0>:<EOL><INDENT>plottitle = '<STR_LIT>' % (<EOL>METHODSHORTLABELS[lspmethod],<EOL>periodind+<NUM_LIT:1>,<EOL>varperiod,<EOL>plotvarepoch<EOL>)<EOL><DEDENT>axes.set_title(plottitle)<EOL>if (plotxlim and isinstance(plotxlim, (list,tuple)) and<EOL>len(plotxlim) == <NUM_LIT:2> and xliminsetmode is True):<EOL><INDENT>axesylim = axes.get_ylim()<EOL>if magsarefluxes:<EOL><INDENT>axes.set_ylim(axesylim[<NUM_LIT:0>],<EOL>axesylim[<NUM_LIT:1>] + <NUM_LIT:0.5>*npabs(axesylim[<NUM_LIT:1>]-axesylim[<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>axes.set_ylim(axesylim[<NUM_LIT:0>],<EOL>axesylim[<NUM_LIT:1>] - <NUM_LIT:0.5>*npabs(axesylim[<NUM_LIT:1>]-axesylim[<NUM_LIT:0>]))<EOL><DEDENT>inset = inset_axes(axes, width=\"<STR_LIT>\", height=\"<STR_LIT>\", loc=<NUM_LIT:1>)<EOL>inset.plot(plotphase,<EOL>plotmags,<EOL>marker='<STR_LIT:o>',<EOL>ms=<NUM_LIT>, ls='<STR_LIT:None>',mew=<NUM_LIT:0>,<EOL>color='<STR_LIT>',<EOL>rasterized=True)<EOL>if phasebin:<EOL><INDENT>inset.plot(binplotphase,<EOL>binplotmags,<EOL>marker='<STR_LIT:o>',<EOL>ms=<NUM_LIT>, ls='<STR_LIT:None>',mew=<NUM_LIT:0>,<EOL>color='<STR_LIT>',<EOL>rasterized=True)<EOL><DEDENT>if phasewrap:<EOL><INDENT>inset.set_xlim(-<NUM_LIT>,<NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>inset.set_xlim(-<NUM_LIT:0.1>,<NUM_LIT>)<EOL><DEDENT>if not magsarefluxes:<EOL><INDENT>inset_ylim = inset.get_ylim()<EOL>inset.set_ylim((inset_ylim[<NUM_LIT:1>], inset_ylim[<NUM_LIT:0>]))<EOL><DEDENT>inset.text(<NUM_LIT:0.5>,<NUM_LIT:0.1>,'<STR_LIT>',<EOL>ha='<STR_LIT>',va='<STR_LIT>',transform=inset.transAxes)<EOL>inset.set_xticks([])<EOL>inset.set_yticks([])<EOL><DEDENT>", "docstring": "Makes the phased magseries plot tile for the `checkplot_png` and\n    `twolsp_checkplot_png` functions.\n\n    Parameters\n    ----------\n\n    axes : matplotlib.axes.Axes object\n        The Axes object where the generated plot will be written.\n\n    periodind : int\n        The index of the current best period being processed in the lspinfo\n        dict.\n\n    stimes,smags,serrs : np.array\n        The mag/flux time-series arrays along with associated errors. These\n        should all have been run through nan-stripping and sigma-clipping\n        beforehand.\n\n    varperiod : float or None\n        The period to use for this phased light curve plot tile.\n\n    varepoch : 'min' or float or list of lists or None\n        The epoch to use for this phased light curve plot tile. If this is a\n        float, will use the provided value directly. If this is 'min', will\n        automatically figure out the time-of-minimum of the phased light\n        curve. If this is None, will use the mimimum value of `stimes` as the\n        epoch of the phased light curve plot. If this is a list of lists, will\n        use the provided value of `lspmethodind` to look up the current\n        period-finder method and the provided value of `periodind` to look up\n        the epoch associated with that method and the current period. This is\n        mostly only useful when `twolspmode` is True.\n\n    phasewrap : bool\n        If this is True, the phased time-series will be wrapped around\n        phase 0.0.\n\n    phasesort : bool\n        If True, will sort the phased light curve in order of increasing phase.\n\n    phasebin: float\n        The bin size to use to group together measurements closer than this\n        amount in phase. This is in units of phase. If this is a float, a\n        phase-binned version of the phased light curve will be overplotted on\n        top of the regular phased light curve.\n\n    minbinelems : int\n        The minimum number of elements required per phase bin to include it in\n        the phased LC plot.\n\n    plotxlim : sequence of two floats or None\n        The x-range (min, max) of the phased light curve plot. If None, will be\n        determined automatically.\n\n    lspmethod : str\n        One of the three-letter keys corresponding to period-finder method names\n        in the `astrobase.plotbase.METHODSHORTLABELS` dict. Used to set the plot\n        title correctly.\n\n    lspmethodind : int\n        If `twolspmode` is set, this will be used to look up the correct epoch\n        associated with the current period-finder method and period.\n\n    xliminsetmode : bool\n        If this is True, the generated phased light curve plot will use the\n        values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if\n        `plotxlim` is a range smaller than the full phase range), and will show\n        the full phased light curve plot as an smaller inset. Useful for\n        planetary transit light curves.\n\n    twolspmode : bool\n        If this is True, will use the `lspmethodind` and `periodind` to look up\n        the correct values of epoch, etc. in the provided `varepoch` list of\n        lists for plotting purposes.\n\n    magsarefluxes : bool\n        If True, indicates the input time-series is fluxes and not mags so the\n        plot y-axis direction and range can be set appropriately.\n\n    verbose : bool\n        If True, indicates progress.\n\n    phasems : float\n        The marker size to use for the main phased light curve plot symbols.\n\n    phasebinms : float\n        The marker size to use for the binned phased light curve plot symbols.\n\n    Returns\n    -------\n\n    Does not return anything, works on the input Axes object directly.", "id": "f14722:m2"}
{"signature": "def checkplot_png(lspinfo,<EOL>times,<EOL>mags,<EOL>errs,<EOL>varepoch='<STR_LIT>',<EOL>magsarefluxes=False,<EOL>objectinfo=None,<EOL>findercmap='<STR_LIT>',<EOL>finderconvolve=None,<EOL>findercachedir='<STR_LIT>',<EOL>normto='<STR_LIT>',<EOL>normmingap=<NUM_LIT>,<EOL>sigclip=<NUM_LIT>,<EOL>phasewrap=True,<EOL>phasesort=True,<EOL>phasebin=<NUM_LIT>,<EOL>minbinelems=<NUM_LIT:7>,<EOL>plotxlim=(-<NUM_LIT>,<NUM_LIT>),<EOL>xliminsetmode=False,<EOL>bestperiodhighlight=None,<EOL>plotdpi=<NUM_LIT:100>,<EOL>outfile=None,<EOL>verbose=True):", "body": "if not outfile and isinstance(lspinfo,str):<EOL><INDENT>plotfpath = os.path.join(<EOL>os.path.dirname(lspinfo),<EOL>'<STR_LIT>' % (<EOL>os.path.basename(lspinfo),<EOL>)<EOL>)<EOL><DEDENT>elif outfile:<EOL><INDENT>plotfpath = outfile<EOL><DEDENT>else:<EOL><INDENT>plotfpath = '<STR_LIT>'<EOL><DEDENT>if isinstance(lspinfo,str) and os.path.exists(lspinfo):<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % lspinfo)<EOL><DEDENT>if '<STR_LIT>' in lspinfo:<EOL><INDENT>with gzip.open(lspinfo,'<STR_LIT:rb>') as infd:<EOL><INDENT>lspinfo = pickle.load(infd)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with open(lspinfo,'<STR_LIT:rb>') as infd:<EOL><INDENT>lspinfo = pickle.load(infd)<EOL><DEDENT><DEDENT><DEDENT>if ('<STR_LIT>' in lspinfo and<EOL>'<STR_LIT>' in lspinfo and<EOL>'<STR_LIT>' in lspinfo):<EOL><INDENT>bestperiod = lspinfo['<STR_LIT>']<EOL>nbestperiods = lspinfo['<STR_LIT>']<EOL>lspmethod = lspinfo['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return None<EOL><DEDENT>if not npisfinite(bestperiod):<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>return None<EOL><DEDENT>fig, axes = plt.subplots(<NUM_LIT:3>,<NUM_LIT:3>)<EOL>axes = npravel(axes)<EOL>fig.set_size_inches(<NUM_LIT:30>,<NUM_LIT>)<EOL>_make_periodogram(axes[<NUM_LIT:0>],lspinfo,objectinfo,<EOL>findercmap, finderconvolve,<EOL>verbose=verbose,<EOL>findercachedir=findercachedir)<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>if normto is not False:<EOL><INDENT>stimes, smags = normalize_magseries(stimes, smags,<EOL>normto=normto,<EOL>magsarefluxes=magsarefluxes,<EOL>mingap=normmingap)<EOL><DEDENT>if len(stimes) >= <NUM_LIT:50>:<EOL><INDENT>_make_magseries_plot(axes[<NUM_LIT:1>], stimes, smags, serrs,<EOL>magsarefluxes=magsarefluxes)<EOL>lspbestperiods = nbestperiods[::]<EOL>lspperiodone = lspbestperiods[<NUM_LIT:0>]<EOL>lspbestperiods.insert(<NUM_LIT:1>,lspperiodone*<NUM_LIT>)<EOL>lspbestperiods.insert(<NUM_LIT:1>,lspperiodone*<NUM_LIT:0.5>)<EOL>for periodind, varperiod in enumerate(lspbestperiods):<EOL><INDENT>if periodind == <NUM_LIT:0> and bestperiodhighlight:<EOL><INDENT>if MPLVERSION >= (<NUM_LIT:2>,<NUM_LIT:0>,<NUM_LIT:0>):<EOL><INDENT>axes[periodind+<NUM_LIT:2>].set_facecolor(bestperiodhighlight)<EOL><DEDENT>else:<EOL><INDENT>axes[periodind+<NUM_LIT:2>].set_axis_bgcolor(bestperiodhighlight)<EOL><DEDENT><DEDENT>_make_phased_magseries_plot(axes[periodind+<NUM_LIT:2>],<EOL>periodind,<EOL>stimes, smags, serrs,<EOL>varperiod, varepoch,<EOL>phasewrap, phasesort,<EOL>phasebin, minbinelems,<EOL>plotxlim, lspmethod,<EOL>xliminsetmode=xliminsetmode,<EOL>magsarefluxes=magsarefluxes,<EOL>verbose=verbose)<EOL><DEDENT>fig.set_tight_layout(True)<EOL>if plotfpath.endswith('<STR_LIT>'):<EOL><INDENT>fig.savefig(plotfpath,dpi=plotdpi)<EOL><DEDENT>else:<EOL><INDENT>fig.savefig(plotfpath)<EOL><DEDENT>plt.close('<STR_LIT:all>')<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % plotfpath)<EOL><DEDENT>return plotfpath<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>for periodind in range(<NUM_LIT:5>):<EOL><INDENT>axes[periodind+<NUM_LIT:2>].text(<EOL><NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>('<STR_LIT>'),<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>',<EOL>transform=axes[periodind+<NUM_LIT:2>].transAxes<EOL>)<EOL><DEDENT>fig.set_tight_layout(True)<EOL>if plotfpath.endswith('<STR_LIT>'):<EOL><INDENT>fig.savefig(plotfpath, dpi=plotdpi)<EOL><DEDENT>else:<EOL><INDENT>fig.savefig(plotfpath)<EOL><DEDENT>plt.close('<STR_LIT:all>')<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % plotfpath)<EOL><DEDENT>return plotfpath<EOL><DEDENT>", "docstring": "This makes a checkplot PNG using the output from a period-finder routine.\n\n    A checkplot is a 3 x 3 grid of plots like so::\n\n        [periodogram + objectinfo] [     unphased LC     ] [period 1 phased LC]\n        [  period 1 phased LC /2 ] [period 1 phased LC x2] [period 2 phased LC]\n        [   period 3 phased LC   ] [period 4 phased LC   ] [period 5 phased LC]\n\n    This is used to sanity check the five best periods obtained from a\n    period-finder function in `astrobase.periodbase` or from your own\n    period-finder routines if their results can be turned into a dict with the\n    format shown below.\n\n    Parameters\n    ----------\n\n    lspinfo : dict or str\n        If this is a dict, it must be a dict produced by an\n        `astrobase.periodbase` period-finder function or a dict from your own\n        period-finder function or routine that is of the form below with at\n        least these keys::\n\n            {'periods': np.array of all periods searched by the period-finder,\n             'lspvals': np.array of periodogram power value for each period,\n             'bestperiod': a float value that is the period with the highest\n                           peak in the periodogram, i.e. the most-likely actual\n                           period,\n             'method': a three-letter code naming the period-finder used; must\n                       be one of the keys in the\n                       `astrobase.periodbase.METHODLABELS` dict,\n             'nbestperiods': a list of the periods corresponding to periodogram\n                             peaks (`nbestlspvals` below) to annotate on the\n                             periodogram plot so they can be called out\n                             visually,\n             'nbestlspvals': a list of the power values associated with\n                             periodogram peaks to annotate on the periodogram\n                             plot so they can be called out visually; should be\n                             the same length as `nbestperiods` above}\n\n        `nbestperiods` and `nbestlspvals` must have at least 5 elements each,\n        e.g. describing the five 'best' (highest power) peaks in the\n        periodogram.\n\n        If lspinfo is a str, then it must be a path to a pickle file (ending\n        with the extension '.pkl' or '.pkl.gz') that contains a dict of the form\n        described above.\n\n    times,mags,errs : np.array\n        The mag/flux time-series arrays to process along with associated errors.\n\n    varepoch : 'min' or float or None or list of lists\n        This sets the time of minimum light finding strategy for the checkplot::\n\n                                                   the epoch used for all phased\n            If `varepoch` is None               -> light curve plots will be\n                                                   `min(times)`.\n\n            If `varepoch='min'`                 -> automatic epoch finding for all\n                                                   periods using light curve fits.\n\n            If varepoch is a single float       -> this epoch will be used for all\n                                                   phased light curve plots\n\n            If varepoch is a list of floats        each epoch will be applied to\n            with length = `len(nbestperiods)+2` -> the phased light curve for each\n            from period-finder results             period specifically\n\n        If you use a list for varepoch, it must be of length\n        `len(lspinfo['nbestperiods']) + 2`, because we insert half and twice the\n        period into the best periods list to make those phased LC plots.\n\n    magsarefluxes : bool\n        If True, indicates the input time-series is fluxes and not mags so the\n        plot y-axis direction and range can be set appropriately.\n\n    objectinfo : dict or None\n        If provided, this is a dict containing information on the object whose\n        light curve is being processed. This function will then be able to look\n        up and download a finder chart for this object and write that to the\n        output checkplot PNG image.The `objectinfo` dict must be of the form and\n        contain at least the keys described below::\n\n            {'objectid': the name of the object,\n             'ra': the right ascension of the object in decimal degrees,\n             'decl': the declination of the object in decimal degrees,\n             'ndet': the number of observations of this object}\n\n        You can also provide magnitudes and proper motions of the object using\n        the following keys and the appropriate values in the `objectinfo`\n        dict. These will be used to calculate colors, total and reduced proper\n        motion, etc. and display these in the output checkplot PNG.\n\n        - SDSS mag keys: 'sdssu', 'sdssg', 'sdssr', 'sdssi', 'sdssz'\n        - 2MASS mag keys: 'jmag', 'hmag', 'kmag'\n        - Cousins mag keys: 'bmag', 'vmag'\n        - GAIA specific keys: 'gmag', 'teff'\n        - proper motion keys: 'pmra', 'pmdecl'\n\n    findercmap : str or matplotlib.cm.ColorMap object\n        The Colormap object to use for the finder chart image.\n\n    finderconvolve : astropy.convolution.Kernel object or None\n        If not None, the Kernel object to use for convolving the finder image.\n\n    findercachedir : str\n        The directory where the FITS finder images are downloaded and cached.\n\n    normto : {'globalmedian', 'zero'} or a float\n        This sets the normalization target::\n\n            'globalmedian' -> norms each mag to global median of the LC column\n            'zero'         -> norms each mag to zero\n            a float        -> norms each mag to this specified float value.\n\n    normmingap : float\n        This defines how much the difference between consecutive measurements is\n        allowed to be to consider them as parts of different timegroups. By\n        default it is set to 4.0 days.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    phasewrap : bool\n        If this is True, the phased time-series will be wrapped around phase\n        0.0.\n\n    phasesort : bool\n        If this is True, the phased time-series will be sorted in phase.\n\n    phasebin : float or None\n        If this is provided, indicates the bin size to use to group together\n        measurements closer than this amount in phase. This is in units of\n        phase. The binned phased light curve will be overplotted on top of the\n        phased light curve. Useful for when one has many measurement points and\n        needs to pick out a small trend in an otherwise noisy phased light\n        curve.\n\n    minbinelems : int\n        The minimum number of elements in each phase bin.\n\n    plotxlim : sequence of two floats or None\n        The x-axis limits to use when making the phased light curve plot. By\n        default, this is (-0.8, 0.8), which places phase 0.0 at the center of\n        the plot and covers approximately two cycles in phase to make any trends\n        clear.\n\n    xliminsetmode : bool\n        If this is True, the generated phased light curve plot will use the\n        values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if\n        `plotxlim` is a range smaller than the full phase range), and will show\n        the full phased light curve plot as an smaller inset. Useful for\n        planetary transit light curves.\n\n    bestperiodhighlight : str or None\n        If not None, this is a str with a matplotlib color specification to use\n        as the background color to highlight the phased light curve plot of the\n        'best' period and epoch combination. If None, no highlight will be\n        applied.\n\n    outfile : str or None\n        The file name of the file to save the checkplot to. If this is None,\n        will write to a file called 'checkplot.png' in the current working\n        directory.\n\n    plotdpi : int\n        Sets the resolution in DPI for PNG plots (default = 100).\n\n    verbose : bool\n        If False, turns off many of the informational messages. Useful for\n        when an external function is driving lots of `checkplot_png` calls.\n\n    Returns\n    -------\n\n    str\n        The file path to the generated checkplot PNG file.", "id": "f14722:m3"}
{"signature": "def twolsp_checkplot_png(lspinfo1,<EOL>lspinfo2,<EOL>times,<EOL>mags,<EOL>errs,<EOL>varepoch='<STR_LIT>',<EOL>magsarefluxes=False,<EOL>objectinfo=None,<EOL>findercmap='<STR_LIT>',<EOL>finderconvolve=None,<EOL>findercachedir='<STR_LIT>',<EOL>normto='<STR_LIT>',<EOL>normmingap=<NUM_LIT>,<EOL>sigclip=<NUM_LIT>,<EOL>phasewrap=True,<EOL>phasesort=True,<EOL>phasebin=<NUM_LIT>,<EOL>minbinelems=<NUM_LIT:7>,<EOL>plotxlim=(-<NUM_LIT>,<NUM_LIT>),<EOL>unphasedms=<NUM_LIT>,<EOL>phasems=<NUM_LIT>,<EOL>phasebinms=<NUM_LIT>,<EOL>xliminsetmode=False,<EOL>bestperiodhighlight=None,<EOL>plotdpi=<NUM_LIT:100>,<EOL>outfile=None,<EOL>verbose=True):", "body": "<EOL>if not outfile and isinstance(lspinfo1,str):<EOL><INDENT>plotfpath = os.path.join(<EOL>os.path.dirname(lspinfo1),<EOL>'<STR_LIT>' % (<EOL>os.path.basename(lspinfo1),<EOL>)<EOL>)<EOL><DEDENT>elif outfile:<EOL><INDENT>plotfpath = outfile<EOL><DEDENT>else:<EOL><INDENT>plotfpath = '<STR_LIT>'<EOL><DEDENT>if isinstance(lspinfo1,str) and os.path.exists(lspinfo1):<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % lspinfo1)<EOL><DEDENT>if '<STR_LIT>' in lspinfo1:<EOL><INDENT>with gzip.open(lspinfo1,'<STR_LIT:rb>') as infd:<EOL><INDENT>lspinfo1 = pickle.load(infd)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with open(lspinfo1,'<STR_LIT:rb>') as infd:<EOL><INDENT>lspinfo1 = pickle.load(infd)<EOL><DEDENT><DEDENT><DEDENT>if isinstance(lspinfo2,str) and os.path.exists(lspinfo2):<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % lspinfo2)<EOL><DEDENT>if '<STR_LIT>' in lspinfo2:<EOL><INDENT>with gzip.open(lspinfo2,'<STR_LIT:rb>') as infd:<EOL><INDENT>lspinfo2 = pickle.load(infd)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with open(lspinfo2,'<STR_LIT:rb>') as infd:<EOL><INDENT>lspinfo2 = pickle.load(infd)<EOL><DEDENT><DEDENT><DEDENT>if ('<STR_LIT>' in lspinfo1 and '<STR_LIT>' in lspinfo2 and<EOL>'<STR_LIT>' in lspinfo1 and '<STR_LIT>' in lspinfo2 and<EOL>'<STR_LIT>' in lspinfo1 and '<STR_LIT>' in lspinfo2):<EOL><INDENT>bestperiod1 = lspinfo1['<STR_LIT>']<EOL>nbestperiods1 = lspinfo1['<STR_LIT>']<EOL>lspmethod1 = lspinfo1['<STR_LIT>']<EOL>bestperiod2 = lspinfo2['<STR_LIT>']<EOL>nbestperiods2 = lspinfo2['<STR_LIT>']<EOL>lspmethod2 = lspinfo2['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return None<EOL><DEDENT>if (not npisfinite(bestperiod1)) or (not npisfinite(bestperiod2)):<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>return None<EOL><DEDENT>fig, axes = plt.subplots(<NUM_LIT:3>,<NUM_LIT:3>)<EOL>axes = npravel(axes)<EOL>fig.set_size_inches(<NUM_LIT:30>,<NUM_LIT>)<EOL>_make_periodogram(axes[<NUM_LIT:0>], lspinfo1, objectinfo,<EOL>findercmap, finderconvolve,<EOL>verbose=verbose,<EOL>findercachedir=findercachedir)<EOL>_make_periodogram(axes[<NUM_LIT:1>], lspinfo2, None,<EOL>findercmap, finderconvolve)<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>if normto is not False:<EOL><INDENT>stimes, smags = normalize_magseries(stimes, smags,<EOL>normto=normto,<EOL>magsarefluxes=magsarefluxes,<EOL>mingap=normmingap)<EOL><DEDENT>if len(stimes) >= <NUM_LIT:50>:<EOL><INDENT>_make_magseries_plot(axes[<NUM_LIT:2>], stimes, smags, serrs,<EOL>magsarefluxes=magsarefluxes,<EOL>ms=unphasedms)<EOL>lspbestperiods1 = nbestperiods1[::]<EOL>lspbestperiods2 = nbestperiods2[::]<EOL>for periodind, varperiod, plotaxes in zip([<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:2>],<EOL>lspbestperiods1[:<NUM_LIT:3>],<EOL>[axes[<NUM_LIT:3>], axes[<NUM_LIT:4>], axes[<NUM_LIT:5>]]):<EOL><INDENT>if periodind == <NUM_LIT:0> and bestperiodhighlight:<EOL><INDENT>if MPLVERSION >= (<NUM_LIT:2>,<NUM_LIT:0>,<NUM_LIT:0>):<EOL><INDENT>plotaxes.set_facecolor(bestperiodhighlight)<EOL><DEDENT>else:<EOL><INDENT>plotaxes.set_axis_bgcolor(bestperiodhighlight)<EOL><DEDENT><DEDENT>_make_phased_magseries_plot(plotaxes,<EOL>periodind,<EOL>stimes, smags, serrs,<EOL>varperiod, varepoch,<EOL>phasewrap, phasesort,<EOL>phasebin, minbinelems,<EOL>plotxlim, lspmethod1,<EOL>lspmethodind=<NUM_LIT:0>,<EOL>twolspmode=True,<EOL>magsarefluxes=magsarefluxes,<EOL>xliminsetmode=xliminsetmode,<EOL>verbose=verbose,<EOL>phasems=phasems,<EOL>phasebinms=phasebinms)<EOL><DEDENT>for periodind, varperiod, plotaxes in zip([<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:2>],<EOL>lspbestperiods2[:<NUM_LIT:3>],<EOL>[axes[<NUM_LIT:6>], axes[<NUM_LIT:7>], axes[<NUM_LIT:8>]]):<EOL><INDENT>if periodind == <NUM_LIT:0> and bestperiodhighlight:<EOL><INDENT>if MPLVERSION >= (<NUM_LIT:2>,<NUM_LIT:0>,<NUM_LIT:0>):<EOL><INDENT>plotaxes.set_facecolor(bestperiodhighlight)<EOL><DEDENT>else:<EOL><INDENT>plotaxes.set_axis_bgcolor(bestperiodhighlight)<EOL><DEDENT><DEDENT>_make_phased_magseries_plot(plotaxes,<EOL>periodind,<EOL>stimes, smags, serrs,<EOL>varperiod, varepoch,<EOL>phasewrap, phasesort,<EOL>phasebin, minbinelems,<EOL>plotxlim, lspmethod2,<EOL>lspmethodind=<NUM_LIT:1>,<EOL>twolspmode=True,<EOL>magsarefluxes=magsarefluxes,<EOL>xliminsetmode=xliminsetmode,<EOL>verbose=verbose,<EOL>phasems=phasems,<EOL>phasebinms=phasebinms)<EOL><DEDENT>fig.set_tight_layout(True)<EOL>if plotfpath.endswith('<STR_LIT>'):<EOL><INDENT>fig.savefig(plotfpath,dpi=plotdpi)<EOL><DEDENT>else:<EOL><INDENT>fig.savefig(plotfpath)<EOL><DEDENT>plt.close()<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % plotfpath)<EOL><DEDENT>return plotfpath<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>for periodind in range(<NUM_LIT:5>):<EOL><INDENT>axes[periodind+<NUM_LIT:2>].text(<EOL><NUM_LIT:0.5>,<NUM_LIT:0.5>,<EOL>('<STR_LIT>'),<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>',<EOL>transform=axes[periodind+<NUM_LIT:2>].transAxes<EOL>)<EOL><DEDENT>fig.set_tight_layout(True)<EOL>if plotfpath.endswith('<STR_LIT>'):<EOL><INDENT>fig.savefig(plotfpath, dpi=plotdpi)<EOL><DEDENT>else:<EOL><INDENT>fig.savefig(plotfpath)<EOL><DEDENT>plt.close()<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % plotfpath)<EOL><DEDENT>return plotfpath<EOL><DEDENT>", "docstring": "This makes a checkplot using results from two independent period-finders.\n\n    Adapted from Luke Bouma's implementation of a similar function in his\n    work. This makes a special checkplot that uses two lspinfo dictionaries,\n    from two independent period-finding methods. For EBs, it's probably best to\n    use Stellingwerf PDM or Schwarzenberg-Czerny AoV as one of these, and the\n    Box Least-squared Search method as the other one.\n\n    The checkplot layout in this case is::\n\n        [ pgram1 + objectinfo ] [        pgram2       ] [     unphased LC     ]\n        [ pgram1 P1 phased LC ] [ pgram1 P2 phased LC ] [ pgram1 P3 phased LC ]\n        [ pgram2 P1 phased LC ] [ pgram2 P2 phased LC ] [ pgram2 P3 phased LC ]\n\n    where:\n\n    - pgram1 is the plot for the periodogram in the lspinfo1 dict\n    - pgram1 P1, P2, and P3 are the best three periods from lspinfo1\n    - pgram2 is the plot for the periodogram in the lspinfo2 dict\n    - pgram2 P1, P2, and P3 are the best three periods from lspinfo2\n\n    Note that we take the output file name from lspinfo1 if lspinfo1 is a string\n    filename pointing to a (gzipped) pickle containing the results dict from a\n    period-finding routine similar to those in periodbase.\n\n    Parameters\n    ----------\n\n    lspinfo1,lspinfo2 : dict or str\n        If this is a dict, it must be a dict produced by an\n        `astrobase.periodbase` period-finder function or a dict from your own\n        period-finder function or routine that is of the form below with at\n        least these keys::\n\n            {'periods': np.array of all periods searched by the period-finder,\n             'lspvals': np.array of periodogram power value for each period,\n             'bestperiod': a float value that is the period with the highest\n                           peak in the periodogram, i.e. the most-likely actual\n                           period,\n             'method': a three-letter code naming the period-finder used; must\n                       be one of the keys in the\n                       `astrobase.periodbase.METHODLABELS` dict,\n             'nbestperiods': a list of the periods corresponding to periodogram\n                             peaks (`nbestlspvals` below) to annotate on the\n                             periodogram plot so they can be called out\n                             visually,\n             'nbestlspvals': a list of the power values associated with\n                             periodogram peaks to annotate on the periodogram\n                             plot so they can be called out visually; should be\n                             the same length as `nbestperiods` above}\n\n        `nbestperiods` and `nbestlspvals` must have at least 3 elements each,\n        e.g. describing the three 'best' (highest power) peaks in the\n        periodogram.\n\n        If lspinfo is a str, then it must be a path to a pickle file (ending\n        with the extension '.pkl' or '.pkl.gz') that contains a dict of the form\n        described above.\n\n    times,mags,errs : np.array\n        The mag/flux time-series arrays to process along with associated errors.\n\n    varepoch : 'min' or float or None or list of lists\n        This sets the time of minimum light finding strategy for the checkplot::\n\n                                                   the epoch used for all phased\n            If `varepoch` is None               -> light curve plots will be\n                                                   `min(times)`.\n\n            If `varepoch='min'`                 -> automatic epoch finding for all\n                                                   periods using light curve fits.\n\n            If varepoch is a single float       -> this epoch will be used for all\n                                                   phased light curve plots\n\n            If varepoch is a list of floats        each epoch will be applied to\n            with length = `len(nbestperiods)` ->   the phased light curve for each\n            from period-finder results             period specifically\n\n        If you use a list for varepoch, it must be of length\n        `len(lspinfo['nbestperiods'])`.\n\n    magsarefluxes : bool\n        If True, indicates the input time-series is fluxes and not mags so the\n        plot y-axis direction and range can be set appropriately/\n\n    objectinfo : dict or None\n        If provided, this is a dict containing information on the object whose\n        light curve is being processed. This function will then be able to look\n        up and download a finder chart for this object and write that to the\n        output checkplot PNG image.The `objectinfo` dict must be of the form and\n        contain at least the keys described below::\n\n            {'objectid': the name of the object,\n             'ra': the right ascension of the object in decimal degrees,\n             'decl': the declination of the object in decimal degrees,\n             'ndet': the number of observations of this object}\n\n        You can also provide magnitudes and proper motions of the object using\n        the following keys and the appropriate values in the `objectinfo`\n        dict. These will be used to calculate colors, total and reduced proper\n        motion, etc. and display these in the output checkplot PNG.\n\n        - SDSS mag keys: 'sdssu', 'sdssg', 'sdssr', 'sdssi', 'sdssz'\n        - 2MASS mag keys: 'jmag', 'hmag', 'kmag'\n        - Cousins mag keys: 'bmag', 'vmag'\n        - GAIA specific keys: 'gmag', 'teff'\n        - proper motion keys: 'pmra', 'pmdecl'\n\n    findercmap : str or matplotlib.cm.ColorMap object\n        The Colormap object to use for the finder chart image.\n\n    finderconvolve : astropy.convolution.Kernel object or None\n        If not None, the Kernel object to use for convolving the finder image.\n\n    findercachedir : str\n        The directory where the FITS finder images are downloaded and cached.\n\n    normto : {'globalmedian', 'zero'} or a float\n        This sets the LC normalization target::\n\n            'globalmedian' -> norms each mag to global median of the LC column\n            'zero'         -> norms each mag to zero\n            a float        -> norms each mag to this specified float value.\n\n    normmingap : float\n        This defines how much the difference between consecutive measurements is\n        allowed to be to consider them as parts of different timegroups. By\n        default it is set to 4.0 days.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    phasewrap : bool\n        If this is True, the phased time-series will be wrapped around phase\n        0.0.\n\n    phasesort : bool\n        If this is True, the phased time-series will be sorted in phase.\n\n    phasebin : float or None\n        If this is provided, indicates the bin size to use to group together\n        measurements closer than this amount in phase. This is in units of\n        phase. The binned phased light curve will be overplotted on top of the\n        phased light curve. Useful for when one has many measurement points and\n        needs to pick out a small trend in an otherwise noisy phased light\n        curve.\n\n    minbinelems : int\n        The minimum number of elements in each phase bin.\n\n    plotxlim : sequence of two floats or None\n        The x-axis limits to use when making the phased light curve plot. By\n        default, this is (-0.8, 0.8), which places phase 0.0 at the center of\n        the plot and covers approximately two cycles in phase to make any trends\n        clear.\n\n    unphasedms : float\n        The marker size to use for the main unphased light curve plot symbols.\n\n    phasems : float\n        The marker size to use for the main phased light curve plot symbols.\n\n    phasebinms : float\n        The marker size to use for the binned phased light curve plot symbols.\n\n    xliminsetmode : bool\n        If this is True, the generated phased light curve plot will use the\n        values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if\n        `plotxlim` is a range smaller than the full phase range), and will show\n        the full phased light curve plot as an smaller inset. Useful for\n        planetary transit light curves.\n\n    bestperiodhighlight : str or None\n        If not None, this is a str with a matplotlib color specification to use\n        as the background color to highlight the phased light curve plot of the\n        'best' period and epoch combination. If None, no highlight will be\n        applied.\n\n    outfile : str or None\n        The file name of the file to save the checkplot to. If this is None,\n        will write to a file called 'checkplot.png' in the current working\n        directory.\n\n    plotdpi : int\n        Sets the resolution in DPI for PNG plots (default = 100).\n\n    verbose : bool\n        If False, turns off many of the informational messages. Useful for\n        when an external function is driving lots of `checkplot_png` calls.\n\n    Returns\n    -------\n\n    str\n        The file path to the generated checkplot PNG file.", "id": "f14722:m4"}
{"signature": "def checkplot_dict(<EOL>lspinfolist,<EOL>times,<EOL>mags,<EOL>errs,<EOL>fast_mode=False,<EOL>magsarefluxes=False,<EOL>nperiodstouse=<NUM_LIT:3>,<EOL>objectinfo=None,<EOL>deredden_object=True,<EOL>custom_bandpasses=None,<EOL>gaia_submit_timeout=<NUM_LIT>,<EOL>gaia_submit_tries=<NUM_LIT:3>,<EOL>gaia_max_timeout=<NUM_LIT>,<EOL>gaia_mirror=None,<EOL>complete_query_later=True,<EOL>varinfo=None,<EOL>getvarfeatures=True,<EOL>lclistpkl=None,<EOL>nbrradiusarcsec=<NUM_LIT>,<EOL>maxnumneighbors=<NUM_LIT:5>,<EOL>xmatchinfo=None,<EOL>xmatchradiusarcsec=<NUM_LIT>,<EOL>lcfitfunc=None,<EOL>lcfitparams=None,<EOL>externalplots=None,<EOL>findercmap='<STR_LIT>',<EOL>finderconvolve=None,<EOL>findercachedir='<STR_LIT>',<EOL>normto='<STR_LIT>',<EOL>normmingap=<NUM_LIT>,<EOL>sigclip=<NUM_LIT>,<EOL>varepoch='<STR_LIT>',<EOL>phasewrap=True,<EOL>phasesort=True,<EOL>phasebin=<NUM_LIT>,<EOL>minbinelems=<NUM_LIT:7>,<EOL>plotxlim=(-<NUM_LIT>,<NUM_LIT>),<EOL>xliminsetmode=False,<EOL>plotdpi=<NUM_LIT:100>,<EOL>bestperiodhighlight=None,<EOL>xgridlines=None,<EOL>mindet=<NUM_LIT>,<EOL>verbose=True<EOL>):", "body": "<EOL>try:<EOL><INDENT>objuuid = hashlib.sha512(times[<NUM_LIT:5>:<NUM_LIT:10>].tostring() +<EOL>mags[<NUM_LIT:5>:<NUM_LIT:10>].tostring()).hexdigest()[:<NUM_LIT:5>]<EOL><DEDENT>except Exception as e:<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>objuuid = hashlib.sha512(times.tostring() +<EOL>mags.tostring()).hexdigest()[:<NUM_LIT:5>]<EOL><DEDENT><DEDENT>if (objectinfo is None):<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>objectinfo = {'<STR_LIT>':objuuid}<EOL><DEDENT>elif (isinstance(objectinfo, dict) and '<STR_LIT>' in objectinfo):<EOL><INDENT>objectinfo['<STR_LIT>'] = objectinfo['<STR_LIT>']<EOL><DEDENT>elif ((isinstance(objectinfo, dict) and '<STR_LIT>' not in objectinfo) or<EOL>(isinstance(objectinfo, dict) and '<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or objectinfo['<STR_LIT>'] == '<STR_LIT>'))):<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>objectinfo['<STR_LIT>'] = objuuid<EOL><DEDENT>checkplotdict = _pkl_finder_objectinfo(<EOL>objectinfo,<EOL>varinfo,<EOL>findercmap,<EOL>finderconvolve,<EOL>sigclip,<EOL>normto,<EOL>normmingap,<EOL>deredden_object=deredden_object,<EOL>custom_bandpasses=custom_bandpasses,<EOL>lclistpkl=lclistpkl,<EOL>nbrradiusarcsec=nbrradiusarcsec,<EOL>maxnumneighbors=maxnumneighbors,<EOL>plotdpi=plotdpi,<EOL>verbose=verbose,<EOL>findercachedir=findercachedir,<EOL>gaia_submit_timeout=gaia_submit_timeout,<EOL>gaia_submit_tries=gaia_submit_tries,<EOL>gaia_max_timeout=gaia_max_timeout,<EOL>gaia_mirror=gaia_mirror,<EOL>complete_query_later=complete_query_later,<EOL>fast_mode=fast_mode<EOL>)<EOL>if (objectinfo and isinstance(objectinfo, dict) and<EOL>'<STR_LIT>' in objectinfo and objectinfo['<STR_LIT>']):<EOL><INDENT>checkplotdict['<STR_LIT>'] = objectinfo['<STR_LIT>']<EOL><DEDENT>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>if ((stimes is None) or (smags is None) or (serrs is None) or<EOL>(stimes.size < <NUM_LIT>) or (smags.size < <NUM_LIT>) or (serrs.size < <NUM_LIT>)):<EOL><INDENT>LOGERROR(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>checkplotdict['<STR_LIT>'])<EOL>checkplotdict['<STR_LIT>'] = None<EOL>checkplotdict['<STR_LIT:status>'] = '<STR_LIT>'<EOL>return checkplotdict<EOL><DEDENT>if isinstance(stimes, AstColumn):<EOL><INDENT>stimes = stimes.data<EOL>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if isinstance(smags, AstColumn):<EOL><INDENT>smags = smags.data<EOL>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if isinstance(serrs, AstColumn):<EOL><INDENT>serrs = serrs.data<EOL>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(sigclip, len(times), len(stimes)))<EOL><DEDENT>if normto is not False:<EOL><INDENT>stimes, smags = normalize_magseries(stimes, smags,<EOL>normto=normto,<EOL>magsarefluxes=magsarefluxes,<EOL>mingap=normmingap)<EOL><DEDENT>if len(stimes) > mindet:<EOL><INDENT>magseriesdict = _pkl_magseries_plot(stimes, smags, serrs,<EOL>plotdpi=plotdpi,<EOL>magsarefluxes=magsarefluxes)<EOL>checkplotdict.update(magseriesdict)<EOL>checkplot_pfmethods = []<EOL>for lspind, lspinfo in enumerate(lspinfolist):<EOL><INDENT>if isinstance(lspinfo,str) and os.path.exists(lspinfo):<EOL><INDENT>LOGINFO('<STR_LIT>' % lspinfo)<EOL>if '<STR_LIT>' in lspinfo:<EOL><INDENT>with gzip.open(lspinfo,'<STR_LIT:rb>') as infd:<EOL><INDENT>lspinfo = pickle.load(infd)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with open(lspinfo,'<STR_LIT:rb>') as infd:<EOL><INDENT>lspinfo = pickle.load(infd)<EOL><DEDENT><DEDENT><DEDENT>override_pfmethod = '<STR_LIT>' % (lspind, lspinfo['<STR_LIT>'])<EOL>periodogramdict = _pkl_periodogram(<EOL>lspinfo,<EOL>plotdpi=plotdpi,<EOL>override_pfmethod=override_pfmethod<EOL>)<EOL>checkplotdict.update(periodogramdict)<EOL>for nbpind, nbperiod in enumerate(<EOL>lspinfo['<STR_LIT>'][:nperiodstouse]<EOL>):<EOL><INDENT>if lcfitfunc:<EOL><INDENT>try:<EOL><INDENT>if lcfitparams is None:<EOL><INDENT>lcfitparams = {}<EOL><DEDENT>overplotfit = lcfitfunc(stimes,<EOL>smags,<EOL>serrs,<EOL>nbperiod,<EOL>**lcfitparams)<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>overplotfit = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>overplotfit = None<EOL><DEDENT>if (lspinfo is not None and<EOL>'<STR_LIT>' in lspinfo['<STR_LIT>'] and<EOL>'<STR_LIT>' in lspinfo):<EOL><INDENT>thisvarepoch = lspinfo['<STR_LIT>'][nbpind]<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (thisvarepoch, nbperiod)<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>thisvarepoch = varepoch<EOL><DEDENT>checkplotdict = _pkl_phased_magseries_plot(<EOL>checkplotdict,<EOL>lspinfo['<STR_LIT>'],<EOL>nbpind,<EOL>stimes, smags, serrs,<EOL>nbperiod, thisvarepoch,<EOL>lspmethodind=lspind,<EOL>phasewrap=phasewrap,<EOL>phasesort=phasesort,<EOL>phasebin=phasebin,<EOL>minbinelems=minbinelems,<EOL>plotxlim=plotxlim,<EOL>overplotfit=overplotfit,<EOL>plotdpi=plotdpi,<EOL>bestperiodhighlight=bestperiodhighlight,<EOL>magsarefluxes=magsarefluxes,<EOL>xliminsetmode=xliminsetmode,<EOL>xgridlines=xgridlines,<EOL>verbose=verbose,<EOL>override_pfmethod=override_pfmethod,<EOL>)<EOL><DEDENT>if '<STR_LIT>' in lspinfo:<EOL><INDENT>if override_pfmethod in checkplotdict:<EOL><INDENT>checkplotdict[override_pfmethod]['<STR_LIT>'] = (<EOL>lspinfo['<STR_LIT>']<EOL>)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in lspinfo:<EOL><INDENT>if override_pfmethod in checkplotdict:<EOL><INDENT>checkplotdict[override_pfmethod]['<STR_LIT>'] = (<EOL>lspinfo['<STR_LIT>']<EOL>)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in lspinfo:<EOL><INDENT>if override_pfmethod in checkplotdict:<EOL><INDENT>checkplotdict[override_pfmethod]['<STR_LIT>'] = (<EOL>lspinfo['<STR_LIT>']<EOL>)<EOL><DEDENT><DEDENT>checkplot_pfmethods.append(override_pfmethod)<EOL><DEDENT>checkplotdict['<STR_LIT>'] = None<EOL>if getvarfeatures is True:<EOL><INDENT>checkplotdict['<STR_LIT>']['<STR_LIT>'] = all_nonperiodic_features(<EOL>stimes,<EOL>smags,<EOL>serrs,<EOL>magsarefluxes=magsarefluxes,<EOL>)<EOL><DEDENT>checkplotdict['<STR_LIT>'] = {}<EOL>checkplotdict['<STR_LIT>'] = []<EOL>if (externalplots and<EOL>isinstance(externalplots, list) and<EOL>len(externalplots) > <NUM_LIT:0>):<EOL><INDENT>for externalrow in externalplots:<EOL><INDENT>if all(os.path.exists(erowfile) for erowfile in externalrow):<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>repr(externalrow))<EOL><DEDENT>checkplotdict['<STR_LIT>'].append(externalrow)<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% repr(externalrow))<EOL><DEDENT><DEDENT><DEDENT>if xmatchinfo is not None:<EOL><INDENT>checkplotdict = xmatch_external_catalogs(<EOL>checkplotdict,<EOL>xmatchinfo,<EOL>xmatchradiusarcsec=xmatchradiusarcsec<EOL>)<EOL><DEDENT>contents = sorted(list(checkplotdict.keys()))<EOL>checkplotdict['<STR_LIT:status>'] = '<STR_LIT>' % contents<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>checkplotdict['<STR_LIT>'])<EOL>LOGINFO('<STR_LIT>' % contents)<EOL><DEDENT>checkplotdict['<STR_LIT>'] = checkplot_pfmethods<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>' %<EOL>(checkplotdict['<STR_LIT>'],len(stimes),mindet))<EOL>checkplotdict['<STR_LIT>'] = None<EOL>checkplotdict['<STR_LIT:status>'] = '<STR_LIT>'<EOL><DEDENT>return checkplotdict<EOL>", "docstring": "This writes a multiple lspinfo checkplot to a dict.\n\n    This function can take input from multiple lspinfo dicts (e.g. a list of\n    output dicts or gzipped pickles of dicts from independent runs of BLS, PDM,\n    AoV, or GLS period-finders in periodbase).\n\n    NOTE: if `lspinfolist` contains more than one lspinfo object with the same\n    lspmethod ('pdm','gls','sls','aov','bls'), the latest one in the list will\n    overwrite the earlier ones.\n\n    The output dict contains all the plots (magseries and phased magseries),\n    periodograms, object information, variability information, light curves, and\n    phased light curves. This can be written to:\n\n    - a pickle with `checkplot_pickle` below\n    - a PNG with `checkplot.pkl_png.checkplot_pickle_to_png`\n\n    Parameters\n    ----------\n\n    lspinfolist : list of dicts\n        This is a list of dicts containing period-finder results ('lspinfo'\n        dicts). These can be from any of the period-finder methods in\n        astrobase.periodbase. To incorporate external period-finder results into\n        checkplots, these dicts must be of the form below, including at least\n        the keys indicated here::\n\n            {'periods': np.array of all periods searched by the period-finder,\n             'lspvals': np.array of periodogram power value for each period,\n             'bestperiod': a float value that is the period with the highest\n                           peak in the periodogram, i.e. the most-likely actual\n                           period,\n             'method': a three-letter code naming the period-finder used; must\n                       be one of the keys in the\n                       `astrobase.periodbase.METHODLABELS` dict,\n             'nbestperiods': a list of the periods corresponding to periodogram\n                             peaks (`nbestlspvals` below) to annotate on the\n                             periodogram plot so they can be called out\n                             visually,\n             'nbestlspvals': a list of the power values associated with\n                             periodogram peaks to annotate on the periodogram\n                             plot so they can be called out visually; should be\n                             the same length as `nbestperiods` above}\n\n        `nbestperiods` and `nbestlspvals` in each lspinfo dict must have at\n        least as many elements as the `nperiodstouse` kwarg to this function.\n\n    times,mags,errs : np.arrays\n        The magnitude/flux time-series to process for this checkplot along with\n        their associated measurement errors.\n\n    fast_mode : bool or float\n        This runs the external catalog operations in a \"fast\" mode, with short\n        timeouts and not trying to hit external catalogs that take a long time\n        to respond.\n\n        If this is set to True, the default settings for the external requests\n        will then become::\n\n                skyview_lookup = False\n                skyview_timeout = 10.0\n                skyview_retry_failed = False\n                dust_timeout = 10.0\n                gaia_submit_timeout = 7.0\n                gaia_max_timeout = 10.0\n                gaia_submit_tries = 2\n                complete_query_later = False\n                search_simbad = False\n\n        If this is a float, will run in \"fast\" mode with the provided timeout\n        value in seconds and the following settings::\n\n                skyview_lookup = True\n                skyview_timeout = fast_mode\n                skyview_retry_failed = False\n                dust_timeout = fast_mode\n                gaia_submit_timeout = 0.66*fast_mode\n                gaia_max_timeout = fast_mode\n                gaia_submit_tries = 2\n                complete_query_later = False\n                search_simbad = False\n\n    magsarefluxes : bool\n        If True, indicates the input time-series is fluxes and not mags so the\n        plot y-axis direction and range can be set appropriately.\n\n    nperiodstouse : int\n        This controls how many 'best' periods to make phased LC plots for. By\n        default, this is the 3 best. If this is set to None, all 'best' periods\n        present in each lspinfo dict's 'nbestperiods' key will be processed for\n        this checkplot.\n\n    objectinfo : dict or None\n        This is a dict containing information on the object whose light\n        curve is being processed. This function will then be able to\n        look up and download a finder chart for this object and write\n        that to the output checkplotdict. External services such as\n        GAIA, SIMBAD, TIC, etc. will also be used to look up this object\n        by its coordinates, and will add in information available from\n        those services.\n\n        This dict must be of the form and contain at least the keys described\n        below::\n\n            {'objectid': the name of the object,\n             'ra': the right ascension of the object in decimal degrees,\n             'decl': the declination of the object in decimal degrees,\n             'ndet': the number of observations of this object}\n\n        You can also provide magnitudes and proper motions of the object using\n        the following keys and the appropriate values in the `objectinfo`\n        dict. These will be used to calculate colors, total and reduced proper\n        motion, etc. and display these in the output checkplot PNG::\n\n            'pmra'   -> the proper motion in mas/yr in right ascension,\n            'pmdecl' -> the proper motion in mas/yr in declination,\n            'umag'  -> U mag\t\t -> colors: U-B, U-V, U-g\n            'bmag'  -> B mag\t\t -> colors: U-B, B-V\n            'vmag'  -> V mag\t\t -> colors: U-V, B-V, V-R, V-I, V-K\n            'rmag'  -> R mag\t\t -> colors: V-R, R-I\n            'imag'  -> I mag\t\t -> colors: g-I, V-I, R-I, B-I\n            'jmag'  -> 2MASS J mag\t -> colors: J-H, J-K, g-J, i-J\n            'hmag'  -> 2MASS H mag\t -> colors: J-H, H-K\n            'kmag'  -> 2MASS Ks mag\t -> colors: g-Ks, H-Ks, J-Ks, V-Ks\n            'sdssu' -> SDSS u mag\t -> colors: u-g, u-V\n            'sdssg' -> SDSS g mag\t -> colors: g-r, g-i, g-K, u-g, U-g, g-J\n            'sdssr' -> SDSS r mag\t -> colors: r-i, g-r\n            'sdssi' -> SDSS i mag\t -> colors: r-i, i-z, g-i, i-J, i-W1\n            'sdssz' -> SDSS z mag\t -> colors: i-z, z-W2, g-z\n            'ujmag' -> UKIRT J mag\t -> colors: J-H, H-K, J-K, g-J, i-J\n            'uhmag' -> UKIRT H mag\t -> colors: J-H, H-K\n            'ukmag' -> UKIRT K mag\t -> colors: g-K, H-K, J-K, V-K\n            'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2\n            'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3\n            'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3\n            'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4\n            'wise1' -> WISE W1 mag\t -> colors: i-W1, W1-W2\n            'wise2' -> WISE W2 mag\t -> colors: W1-W2, W2-W3\n            'wise3' -> WISE W3 mag\t -> colors: W2-W3\n            'wise4' -> WISE W4 mag\t -> colors: W3-W4\n\n        If you have magnitude measurements in other bands, use the\n        `custom_bandpasses` kwarg to pass these in.\n\n        If this is None, no object information will be incorporated into the\n        checkplot (kind of making it effectively useless for anything other than\n        glancing at the phased light curves at various 'best' periods from the\n        period-finder results).\n\n    deredden_object : bool\n        If this is True, will use the 2MASS DUST service to get extinction\n        coefficients in various bands, and then try to deredden the magnitudes\n        and colors of the object already present in the checkplot's objectinfo\n        dict.\n\n    custom_bandpasses : dict\n        This is a dict used to provide custom bandpass definitions for any\n        magnitude measurements in the objectinfo dict that are not automatically\n        recognized by :py:func:`astrobase.varclass.starfeatures.color_features`.\n\n    gaia_submit_timeout : float\n        Sets the timeout in seconds to use when submitting a request to look up\n        the object's information to the GAIA service. Note that if `fast_mode`\n        is set, this is ignored.\n\n    gaia_submit_tries : int\n        Sets the maximum number of times the GAIA services will be contacted to\n        obtain this object's information. If `fast_mode` is set, this is\n        ignored, and the services will be contacted only once (meaning that a\n        failure to respond will be silently ignored and no GAIA data will be\n        added to the checkplot's objectinfo dict).\n\n    gaia_max_timeout : float\n        Sets the timeout in seconds to use when waiting for the GAIA service to\n        respond to our request for the object's information. Note that if\n        `fast_mode` is set, this is ignored.\n\n    gaia_mirror : str or None\n        This sets the GAIA mirror to use. This is a key in the\n        `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n        mirror.\n\n    complete_query_later : bool\n        If this is True, saves the state of GAIA queries that are not yet\n        complete when `gaia_max_timeout` is reached while waiting for the GAIA\n        service to respond to our request. A later call for GAIA info on the\n        same object will attempt to pick up the results from the existing query\n        if it's completed. If `fast_mode` is True, this is ignored.\n\n    varinfo : dict\n        If this is None, a blank dict of the form below will be added to the\n        checkplotdict::\n\n            {'objectisvar': None -> variability flag (None indicates unset),\n             'vartags': CSV str containing variability type tags from review,\n             'varisperiodic': None -> periodic variability flag (None -> unset),\n             'varperiod': the period associated with the periodic variability,\n             'varepoch': the epoch associated with the periodic variability}\n\n        If you provide a dict matching this format in this kwarg, this will be\n        passed unchanged to the output checkplotdict produced.\n\n    getvarfeatures : bool\n        If this is True, several light curve variability features for this\n        object will be calculated and added to the output checkpotdict as\n        checkplotdict['varinfo']['features']. This uses the function\n        `varclass.varfeatures.all_nonperiodic_features` so see its docstring for\n        the measures that are calculated (e.g. Stetson J indices, dispersion\n        measures, etc.)\n\n    lclistpkl : dict or str\n        If this is provided, must be a dict resulting from reading a catalog\n        produced by the `lcproc.catalogs.make_lclist` function or a str path\n        pointing to the pickle file produced by that function. This catalog is\n        used to find neighbors of the current object in the current light curve\n        collection. Looking at neighbors of the object within the radius\n        specified by `nbrradiusarcsec` is useful for light curves produced by\n        instruments that have a large pixel scale, so are susceptible to\n        blending of variability and potential confusion of neighbor variability\n        with that of the actual object being looked at. If this is None, no\n        neighbor lookups will be performed.\n\n    nbrradiusarcsec : flaot\n        The radius in arcseconds to use for a search conducted around the\n        coordinates of this object to look for any potential confusion and\n        blending of variability amplitude caused by their proximity.\n\n    maxnumneighbors : int\n        The maximum number of neighbors that will have their light curves and\n        magnitudes noted in this checkplot as potential blends with the target\n        object.\n\n    xmatchinfo : str or dict\n        This is either the xmatch dict produced by the function\n        `load_xmatch_external_catalogs` above, or the path to the xmatch info\n        pickle file produced by that function.\n\n    xmatchradiusarcsec : float\n        This is the cross-matching radius to use in arcseconds.\n\n    lcfitfunc : Python function or None\n        If provided, this should be a Python function that is used to fit a\n        model to the light curve. This fit is then overplotted for each phased\n        light curve in the checkplot. This function should have the following\n        signature:\n\n        `def lcfitfunc(times, mags, errs, period, **lcfitparams)`\n\n        where `lcfitparams` encapsulates all external parameters (i.e. number of\n        knots for a spline function, the degree of a Legendre polynomial fit,\n        etc., planet transit parameters) This function should return a Python\n        dict with the following structure (similar to the functions in\n        `astrobase.lcfit`) and at least the keys below::\n\n            {'fittype':<str: name of fit method>,\n             'fitchisq':<float: the chi-squared value of the fit>,\n             'fitredchisq':<float: the reduced chi-squared value of the fit>,\n             'fitinfo':{'fitmags':<ndarray: model mags/fluxes from fit func>},\n             'magseries':{'times':<ndarray: times where fitmags are evaluated>}}\n\n        Additional keys in the dict returned from this function can include\n        `fitdict['fitinfo']['finalparams']` for the final model fit parameters\n        (this will be used by the checkplotserver if present),\n        `fitdict['fitinfo']['fitepoch']` for the minimum light epoch returned by\n        the model fit, among others.\n\n        In any case, the output dict of `lcfitfunc` will be copied to the output\n        checkplotdict as::\n\n            checkplotdict[lspmethod][periodind]['lcfit'][<fittype>]\n\n        for each phased light curve.\n\n    lcfitparams : dict\n        A dict containing the LC fit parameters to use when calling the function\n        provided in `lcfitfunc`. This contains key-val pairs corresponding to\n        parameter names and their respective initial values to be used by the\n        fit function.\n\n    externalplots : list of tuples of str\n        If provided, this is a list of 4-element tuples containing:\n\n        1. path to PNG of periodogram from an external period-finding method\n        2. path to PNG of best period phased LC from the external period-finder\n        3. path to PNG of 2nd-best phased LC from the external period-finder\n        4. path to PNG of 3rd-best phased LC from the external period-finder\n\n        This can be used to incorporate external period-finding method results\n        into the output checkplot pickle or exported PNG to allow for comparison\n        with astrobase results.\n\n        Example of externalplots::\n\n                [('/path/to/external/bls-periodogram.png',\n                 '/path/to/external/bls-phasedlc-plot-bestpeak.png',\n                 '/path/to/external/bls-phasedlc-plot-peak2.png',\n                 '/path/to/external/bls-phasedlc-plot-peak3.png'),\n                 ('/path/to/external/pdm-periodogram.png',\n                 '/path/to/external/pdm-phasedlc-plot-bestpeak.png',\n                 '/path/to/external/pdm-phasedlc-plot-peak2.png',\n                 '/path/to/external/pdm-phasedlc-plot-peak3.png'),\n                 ...]\n\n        If `externalplots` is provided here, these paths will be stored in the\n        output checkplotdict. The `checkplot.pkl_png.checkplot_pickle_to_png`\n        function can then automatically retrieve these plot PNGs and put\n        them into the exported checkplot PNG.\n\n    findercmap : str or matplotlib.cm.ColorMap object\n        The Colormap object to use for the finder chart image.\n\n    finderconvolve : astropy.convolution.Kernel object or None\n        If not None, the Kernel object to use for convolving the finder image.\n\n    findercachedir : str\n        The path to the astrobase cache directory for finder chart downloads\n        from the NASA SkyView service.\n\n    normto : {'globalmedian', 'zero'} or a float\n        These are specified as below:\n        - 'globalmedian' -> norms each mag to the global median of the LC column\n        - 'zero'         -> norms each mag to zero\n        - a float        -> norms each mag to this specified float value.\n\n    normmingap : float\n        This defines how much the difference between consecutive measurements is\n        allowed to be to consider them as parts of different timegroups. By\n        default it is set to 4.0 days.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    varepoch : 'min' or float or list of lists or None\n        The epoch to use for this phased light curve plot tile. If this is a\n        float, will use the provided value directly. If this is 'min', will\n        automatically figure out the time-of-minimum of the phased light\n        curve. If this is None, will use the mimimum value of `stimes` as the\n        epoch of the phased light curve plot. If this is a list of lists, will\n        use the provided value of `lspmethodind` to look up the current\n        period-finder method and the provided value of `periodind` to look up\n        the epoch associated with that method and the current period. This is\n        mostly only useful when `twolspmode` is True.\n\n    phasewrap : bool\n        If this is True, the phased time-series will be wrapped around\n        phase 0.0.\n\n    phasesort : bool\n        If True, will sort the phased light curve in order of increasing phase.\n\n    phasebin: float\n        The bin size to use to group together measurements closer than this\n        amount in phase. This is in units of phase. If this is a float, a\n        phase-binned version of the phased light curve will be overplotted on\n        top of the regular phased light curve.\n\n    minbinelems : int\n        The minimum number of elements required per phase bin to include it in\n        the phased LC plot.\n\n    plotxlim : sequence of two floats or None\n        The x-range (min, max) of the phased light curve plot. If None, will be\n        determined automatically.\n\n    xliminsetmode : bool\n        If this is True, the generated phased light curve plot will use the\n        values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if\n        `plotxlim` is a range smaller than the full phase range), and will show\n        the full phased light curve plot as an smaller inset. Useful for\n        planetary transit light curves.\n\n    plotdpi : int\n        The resolution of the output plot PNGs in dots per inch.\n\n    bestperiodhighlight : str or None\n        If not None, this is a str with a matplotlib color specification to use\n        as the background color to highlight the phased light curve plot of the\n        'best' period and epoch combination. If None, no highlight will be\n        applied.\n\n    xgridlines : list of floats or None\n        If this is provided, must be a list of floats corresponding to the phase\n        values where to draw vertical dashed lines as a means of highlighting\n        these.\n\n    mindet : int\n        The minimum of observations the input object's mag/flux time-series must\n        have for this function to plot its light curve and phased light\n        curve. If the object has less than this number, no light curves will be\n        plotted, but the checkplotdict will still contain all of the other\n        information.\n\n    verbose : bool\n        If True, will indicate progress and warn about problems.\n\n    Returns\n    -------\n\n    dict\n        Returns a checkplotdict.", "id": "f14725:m0"}
{"signature": "def _pkl_finder_objectinfo(<EOL>objectinfo,<EOL>varinfo,<EOL>findercmap,<EOL>finderconvolve,<EOL>sigclip,<EOL>normto,<EOL>normmingap,<EOL>deredden_object=True,<EOL>custom_bandpasses=None,<EOL>lclistpkl=None,<EOL>nbrradiusarcsec=<NUM_LIT>,<EOL>maxnumneighbors=<NUM_LIT:5>,<EOL>plotdpi=<NUM_LIT:100>,<EOL>findercachedir='<STR_LIT>',<EOL>verbose=True,<EOL>gaia_submit_timeout=<NUM_LIT>,<EOL>gaia_submit_tries=<NUM_LIT:3>,<EOL>gaia_max_timeout=<NUM_LIT>,<EOL>gaia_mirror=None,<EOL>fast_mode=False,<EOL>complete_query_later=True<EOL>):", "body": "<EOL>if fast_mode is True:<EOL><INDENT>skyview_lookup = False<EOL>skyview_timeout = <NUM_LIT><EOL>skyview_retry_failed = False<EOL>dust_timeout = <NUM_LIT><EOL>gaia_submit_timeout = <NUM_LIT><EOL>gaia_max_timeout = <NUM_LIT><EOL>gaia_submit_tries = <NUM_LIT:2><EOL>complete_query_later = False<EOL>search_simbad = False<EOL><DEDENT>elif isinstance(fast_mode, (int, float)) and fast_mode > <NUM_LIT:0.0>:<EOL><INDENT>skyview_lookup = True<EOL>skyview_timeout = fast_mode<EOL>skyview_retry_failed = False<EOL>dust_timeout = fast_mode<EOL>gaia_submit_timeout = <NUM_LIT>*fast_mode<EOL>gaia_max_timeout = fast_mode<EOL>gaia_submit_tries = <NUM_LIT:2><EOL>complete_query_later = False<EOL>search_simbad = False<EOL><DEDENT>else:<EOL><INDENT>skyview_lookup = True<EOL>skyview_timeout = <NUM_LIT><EOL>skyview_retry_failed = True<EOL>dust_timeout = <NUM_LIT><EOL>search_simbad = True<EOL><DEDENT>if (isinstance(objectinfo, dict) and<EOL>('<STR_LIT>' in objectinfo or '<STR_LIT>' in objectinfo) and<EOL>'<STR_LIT>' in objectinfo and '<STR_LIT>' in objectinfo and<EOL>objectinfo['<STR_LIT>'] and objectinfo['<STR_LIT>']):<EOL><INDENT>if '<STR_LIT>' not in objectinfo:<EOL><INDENT>objectid = objectinfo['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>objectid = objectinfo['<STR_LIT>']<EOL><DEDENT>if verbose and skyview_lookup:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(objectid, objectinfo['<STR_LIT>'], objectinfo['<STR_LIT>']))<EOL><DEDENT>elif verbose and not skyview_lookup:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(objectid, objectinfo['<STR_LIT>'], objectinfo['<STR_LIT>']))<EOL><DEDENT>try:<EOL><INDENT>if skyview_lookup:<EOL><INDENT>try:<EOL><INDENT>finder, finderheader = skyview_stamp(<EOL>objectinfo['<STR_LIT>'],<EOL>objectinfo['<STR_LIT>'],<EOL>convolvewith=finderconvolve,<EOL>verbose=verbose,<EOL>flip=False,<EOL>cachedir=findercachedir,<EOL>timeout=skyview_timeout,<EOL>retry_failed=skyview_retry_failed,<EOL>)<EOL><DEDENT>except OSError as e:<EOL><INDENT>if not fast_mode:<EOL><INDENT>LOGERROR(<EOL>'<STR_LIT>'<EOL>)<EOL>finder, finderheader = skyview_stamp(<EOL>objectinfo['<STR_LIT>'],<EOL>objectinfo['<STR_LIT>'],<EOL>convolvewith=finderconvolve,<EOL>verbose=verbose,<EOL>flip=False,<EOL>cachedir=findercachedir,<EOL>forcefetch=True,<EOL>timeout=skyview_timeout,<EOL>retry_failed=False  <EOL>)<EOL><DEDENT><DEDENT>finderfig = plt.figure(figsize=(<NUM_LIT:3>,<NUM_LIT:3>),dpi=plotdpi)<EOL>finderwcs = WCS(finderheader)<EOL>ax = finderfig.add_subplot(<NUM_LIT>, frameon=False)<EOL>ax.imshow(finder, cmap=findercmap, origin='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>finder, finderheader, finderfig, finderwcs = (<EOL>None, None, None, None<EOL>)<EOL><DEDENT>if (lclistpkl is not None and<EOL>nbrradiusarcsec is not None and<EOL>nbrradiusarcsec > <NUM_LIT:0.0>):<EOL><INDENT>if isinstance(lclistpkl, str) and os.path.exists(lclistpkl):<EOL><INDENT>if lclistpkl.endswith('<STR_LIT>'):<EOL><INDENT>infd = gzip.open(lclistpkl,'<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>infd = open(lclistpkl,'<STR_LIT:rb>')<EOL><DEDENT>lclist = pickle.load(infd)<EOL>infd.close()<EOL><DEDENT>elif isinstance(lclistpkl, dict):<EOL><INDENT>lclist = lclistpkl<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>lclist = dict()<EOL><DEDENT>if '<STR_LIT>' not in lclist:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (objectid, lclistpkl))<EOL>neighbors = None<EOL>kdt = None<EOL><DEDENT>else:<EOL><INDENT>kdt = lclist['<STR_LIT>']<EOL>obj_cosdecl = np.cos(np.radians(objectinfo['<STR_LIT>']))<EOL>obj_sindecl = np.sin(np.radians(objectinfo['<STR_LIT>']))<EOL>obj_cosra = np.cos(np.radians(objectinfo['<STR_LIT>']))<EOL>obj_sinra = np.sin(np.radians(objectinfo['<STR_LIT>']))<EOL>obj_xyz = np.column_stack((obj_cosra*obj_cosdecl,<EOL>obj_sinra*obj_cosdecl,<EOL>obj_sindecl))<EOL>match_xyzdist = (<EOL><NUM_LIT> * np.sin(np.radians(nbrradiusarcsec/<NUM_LIT>)/<NUM_LIT>)<EOL>)<EOL>matchdists, matchinds = kdt.query(<EOL>obj_xyz,<EOL>k=maxnumneighbors+<NUM_LIT:1>,  <EOL>distance_upper_bound=match_xyzdist<EOL>)<EOL>mdsorted = np.argsort(matchdists[<NUM_LIT:0>])<EOL>matchdists = matchdists[<NUM_LIT:0>][mdsorted]<EOL>matchinds = matchinds[<NUM_LIT:0>][mdsorted]<EOL>neighbors = []<EOL>nbrind = <NUM_LIT:0><EOL>for md, mi in zip(matchdists, matchinds):<EOL><INDENT>if np.isfinite(md) and md > <NUM_LIT:0.0>:<EOL><INDENT>if skyview_lookup:<EOL><INDENT>pixcoords = finderwcs.all_world2pix(<EOL>np.array([[lclist['<STR_LIT>']['<STR_LIT>'][mi],<EOL>lclist['<STR_LIT>']['<STR_LIT>'][mi]]]),<EOL><NUM_LIT:0><EOL>)<EOL>thisnbr = {<EOL>'<STR_LIT>':(<EOL>lclist['<STR_LIT>']['<STR_LIT>'][mi]<EOL>),<EOL>'<STR_LIT>':lclist['<STR_LIT>']['<STR_LIT>'][mi],<EOL>'<STR_LIT>':lclist['<STR_LIT>']['<STR_LIT>'][mi],<EOL>'<STR_LIT>':pixcoords[<NUM_LIT:0>,<NUM_LIT:0>],<EOL>'<STR_LIT>':<NUM_LIT> - pixcoords[<NUM_LIT:0>,<NUM_LIT:1>],<EOL>'<STR_LIT>':_xyzdist_to_distarcsec(md),<EOL>'<STR_LIT>': lclist['<STR_LIT>']['<STR_LIT>'][mi]<EOL>}<EOL>neighbors.append(thisnbr)<EOL>nbrind = nbrind+<NUM_LIT:1><EOL>annotatex = pixcoords[<NUM_LIT:0>,<NUM_LIT:0>]<EOL>annotatey = pixcoords[<NUM_LIT:0>,<NUM_LIT:1>]<EOL>if ((<NUM_LIT> - annotatex) > <NUM_LIT>):<EOL><INDENT>offx = annotatex + <NUM_LIT><EOL>xha = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>offx = annotatex - <NUM_LIT><EOL>xha = '<STR_LIT>'<EOL><DEDENT>if ((<NUM_LIT> - annotatey) > <NUM_LIT>):<EOL><INDENT>offy = annotatey - <NUM_LIT><EOL>yha = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>offy = annotatey + <NUM_LIT><EOL>yha = '<STR_LIT>'<EOL><DEDENT>ax.annotate('<STR_LIT>' % nbrind,<EOL>(annotatex, annotatey),<EOL>xytext=(offx, offy),<EOL>arrowprops={'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT:width>':<NUM_LIT:1.0>,<EOL>'<STR_LIT>':<NUM_LIT:1.0>,<EOL>'<STR_LIT>':<NUM_LIT:0.1>,<EOL>'<STR_LIT>':<NUM_LIT:0.0>},<EOL>color='<STR_LIT>',<EOL>horizontalalignment=xha,<EOL>verticalalignment=yha)<EOL><DEDENT>else:<EOL><INDENT>thisnbr = {<EOL>'<STR_LIT>':(<EOL>lclist['<STR_LIT>']['<STR_LIT>'][mi]<EOL>),<EOL>'<STR_LIT>':lclist['<STR_LIT>']['<STR_LIT>'][mi],<EOL>'<STR_LIT>':lclist['<STR_LIT>']['<STR_LIT>'][mi],<EOL>'<STR_LIT>':<NUM_LIT:0.0>,<EOL>'<STR_LIT>':<NUM_LIT:0.0>,<EOL>'<STR_LIT>':_xyzdist_to_distarcsec(md),<EOL>'<STR_LIT>': lclist['<STR_LIT>']['<STR_LIT>'][mi]<EOL>}<EOL>neighbors.append(thisnbr)<EOL>nbrind = nbrind+<NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>neighbors = None<EOL>kdt = None<EOL><DEDENT>if skyview_lookup:<EOL><INDENT>ax.set_xticks([])<EOL>ax.set_yticks([])<EOL>object_pixcoords = finderwcs.all_world2pix(<EOL>[[objectinfo['<STR_LIT>'],<EOL>objectinfo['<STR_LIT>']]],<EOL><NUM_LIT:0><EOL>)<EOL>ax.axvline(<EOL>x=object_pixcoords[<NUM_LIT:0>,<NUM_LIT:0>],<EOL>ymin=<NUM_LIT>,<EOL>ymax=<NUM_LIT>,<EOL>linewidth=<NUM_LIT:1>,<EOL>color='<STR_LIT:b>'<EOL>)<EOL>ax.axhline(<EOL>y=object_pixcoords[<NUM_LIT:0>,<NUM_LIT:1>],<EOL>xmin=<NUM_LIT>,<EOL>xmax=<NUM_LIT>,<EOL>linewidth=<NUM_LIT:1>,<EOL>color='<STR_LIT:b>'<EOL>)<EOL>ax.set_frame_on(False)<EOL>finderpng = StrIO()<EOL>finderfig.savefig(finderpng,<EOL>bbox_inches='<STR_LIT>',<EOL>pad_inches=<NUM_LIT:0.0>, format='<STR_LIT>')<EOL>plt.close()<EOL>finderpng.seek(<NUM_LIT:0>)<EOL>finderb64 = base64.b64encode(finderpng.read())<EOL>finderpng.close()<EOL><DEDENT>else:<EOL><INDENT>finderb64 = None<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(objectid, objectinfo['<STR_LIT>'], objectinfo['<STR_LIT>']))<EOL>finderb64 = None<EOL>neighbors = None<EOL>kdt = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>finderb64 = None<EOL>neighbors = None<EOL>kdt = None<EOL><DEDENT>if isinstance(objectinfo, dict):<EOL><INDENT>if '<STR_LIT>' not in objectinfo and '<STR_LIT>' in objectinfo:<EOL><INDENT>objectid = objectinfo['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = objectid<EOL><DEDENT>elif '<STR_LIT>' in objectinfo:<EOL><INDENT>objectid = objectinfo['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>objectid = os.urandom(<NUM_LIT:12>).hex()[:<NUM_LIT:7>]<EOL>objectinfo['<STR_LIT>'] = objectid<EOL>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>nbrfeat = neighbor_gaia_features(<EOL>objectinfo,<EOL>kdt,<EOL>nbrradiusarcsec,<EOL>verbose=False,<EOL>gaia_submit_timeout=gaia_submit_timeout,<EOL>gaia_submit_tries=gaia_submit_tries,<EOL>gaia_max_timeout=gaia_max_timeout,<EOL>gaia_mirror=gaia_mirror,<EOL>complete_query_later=complete_query_later,<EOL>search_simbad=search_simbad<EOL>)<EOL>objectinfo.update(nbrfeat)<EOL>if ( ('<STR_LIT>' not in objectinfo) or<EOL>( ('<STR_LIT>' in objectinfo) and<EOL>( (objectinfo['<STR_LIT>'] is None) or<EOL>(not np.isfinite(objectinfo['<STR_LIT>'])) ) ) ):<EOL><INDENT>if '<STR_LIT>' in nbrfeat['<STR_LIT>']:<EOL><INDENT>objectinfo['<STR_LIT>'] = nbrfeat['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = nbrfeat['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = '<STR_LIT>'<EOL>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>objectinfo['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if ( ('<STR_LIT>' not in objectinfo) or<EOL>( ('<STR_LIT>' in objectinfo) and<EOL>( (objectinfo['<STR_LIT>'] is None) or<EOL>(not np.isfinite(objectinfo['<STR_LIT>'])) ) ) ):<EOL><INDENT>if '<STR_LIT>' in nbrfeat['<STR_LIT>']:<EOL><INDENT>objectinfo['<STR_LIT>'] = nbrfeat['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = nbrfeat['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = '<STR_LIT>'<EOL>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>objectinfo['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' in objectinfo['<STR_LIT>']:<EOL><INDENT>objectinfo['<STR_LIT>'] = objectinfo['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = objectinfo['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = objectinfo['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = objectinfo['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = (<EOL>objectinfo['<STR_LIT>'][<NUM_LIT:0>]<EOL>)<EOL>objectinfo['<STR_LIT>'] = objectinfo['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = objectinfo['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = objectinfo['<STR_LIT>'][<NUM_LIT:0>]<EOL>objectinfo['<STR_LIT>'] = objectinfo['<STR_LIT>'][<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>objectinfo['<STR_LIT>'] = None<EOL>objectinfo['<STR_LIT>'] = np.nan<EOL>objectinfo['<STR_LIT>'] = np.nan<EOL>objectinfo['<STR_LIT>'] = np.nan<EOL>objectinfo['<STR_LIT>'] = np.nan<EOL>objectinfo['<STR_LIT>'] = np.nan<EOL>objectinfo['<STR_LIT>'] = np.nan<EOL>objectinfo['<STR_LIT>'] = np.nan<EOL>objectinfo['<STR_LIT>'] = np.nan<EOL><DEDENT>if ('<STR_LIT>' in objectinfo and<EOL>objectinfo['<STR_LIT>'] is not None and<EOL>np.isfinite(objectinfo['<STR_LIT>']) and<EOL>'<STR_LIT>' in objectinfo and<EOL>objectinfo['<STR_LIT>'] is not None and<EOL>np.isfinite(objectinfo['<STR_LIT>'])):<EOL><INDENT>try:<EOL><INDENT>ticres = tic_conesearch(objectinfo['<STR_LIT>'],<EOL>objectinfo['<STR_LIT>'],<EOL>radius_arcmin=<NUM_LIT>/<NUM_LIT>,<EOL>verbose=verbose,<EOL>timeout=gaia_max_timeout,<EOL>maxtries=gaia_submit_tries)<EOL>if ticres is not None:<EOL><INDENT>with open(ticres['<STR_LIT>'],'<STR_LIT:r>') as infd:<EOL><INDENT>ticinfo = json.load(infd)<EOL><DEDENT>if ('<STR_LIT:data>' in ticinfo and<EOL>len(ticinfo['<STR_LIT:data>']) > <NUM_LIT:0> and<EOL>isinstance(ticinfo['<STR_LIT:data>'][<NUM_LIT:0>], dict)):<EOL><INDENT>objectinfo['<STR_LIT>'] = str(ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>'])<EOL>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT:version>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT:d>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>if (objectinfo['<STR_LIT>'] is not None and<EOL>np.isfinite(objectinfo['<STR_LIT>']) and<EOL>objectinfo['<STR_LIT>'] is not None and<EOL>np.isfinite(objectinfo['<STR_LIT>'])):<EOL><INDENT>objectinfo['<STR_LIT>'] = (<EOL>magnitudes.absolute_gaia_magnitude(<EOL>objectinfo['<STR_LIT>'],<EOL>objectinfo['<STR_LIT>']<EOL>)<EOL>)<EOL><DEDENT><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL>objectinfo['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT>if ('<STR_LIT>' not in objectinfo or<EOL>('<STR_LIT>' in objectinfo and<EOL>(objectinfo['<STR_LIT>'] is None or<EOL>not np.isfinite(objectinfo['<STR_LIT>'])))):<EOL><INDENT>objectinfo['<STR_LIT>'] = ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>objectinfo['<STR_LIT>'] = (<EOL>ticinfo['<STR_LIT:data>'][<NUM_LIT:0>]['<STR_LIT>']<EOL>)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(objectinfo['<STR_LIT>'],<EOL>objectinfo['<STR_LIT>'],<EOL>objectinfo['<STR_LIT>']))<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(objectinfo['<STR_LIT>'],<EOL>objectinfo['<STR_LIT>'],<EOL>objectinfo['<STR_LIT>']))<EOL><DEDENT><DEDENT>coordfeat = coord_features(objectinfo)<EOL>colorfeat = color_features(objectinfo,<EOL>deredden=deredden_object,<EOL>custom_bandpasses=custom_bandpasses,<EOL>dust_timeout=dust_timeout)<EOL>colorclass = color_classification(colorfeat, coordfeat)<EOL>objectinfo.update(colorfeat)<EOL>objectinfo.update(coordfeat)<EOL>objectinfo.update(colorclass)<EOL>checkplotdict = {'<STR_LIT>':objectid,<EOL>'<STR_LIT>':neighbors,<EOL>'<STR_LIT>':objectinfo,<EOL>'<STR_LIT>':finderb64,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':normto,<EOL>'<STR_LIT>':normmingap}<EOL>checkplotdict['<STR_LIT>']['<STR_LIT>'] = None<EOL><DEDENT>else:<EOL><INDENT>checkplotdict = {'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':[],<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None<EOL>},<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':normto,<EOL>'<STR_LIT>':normmingap}<EOL><DEDENT>if isinstance(varinfo, dict):<EOL><INDENT>checkplotdict['<STR_LIT>'] = varinfo<EOL><DEDENT>else:<EOL><INDENT>checkplotdict['<STR_LIT>'] = {<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>}<EOL><DEDENT>return checkplotdict<EOL>", "docstring": "This returns the finder chart and object information as a dict.\n\n    Parameters\n    ----------\n\n    objectinfo : dict or None\n        If provided, this is a dict containing information on the object whose\n        light curve is being processed. This function will then be able to look\n        up and download a finder chart for this object and write that to the\n        output checkplotdict. External services such as GAIA, SIMBAD, TIC@MAST,\n        etc. will also be used to look up this object by its coordinates, and\n        will add in information available from those services.\n\n        The `objectinfo` dict must be of the form and contain at least the keys\n        described below::\n\n            {'objectid': the name of the object,\n             'ra': the right ascension of the object in decimal degrees,\n             'decl': the declination of the object in decimal degrees,\n             'ndet': the number of observations of this object}\n\n        You can also provide magnitudes and proper motions of the object using\n        the following keys and the appropriate values in the `objectinfo`\n        dict. These will be used to calculate colors, total and reduced proper\n        motion, etc. and display these in the output checkplot PNG::\n\n            'pmra' -> the proper motion in mas/yr in right ascension,\n            'pmdecl' -> the proper motion in mas/yr in declination,\n            'umag'  -> U mag\t\t -> colors: U-B, U-V, U-g\n            'bmag'  -> B mag\t\t -> colors: U-B, B-V\n            'vmag'  -> V mag\t\t -> colors: U-V, B-V, V-R, V-I, V-K\n            'rmag'  -> R mag\t\t -> colors: V-R, R-I\n            'imag'  -> I mag\t\t -> colors: g-I, V-I, R-I, B-I\n            'jmag'  -> 2MASS J mag\t -> colors: J-H, J-K, g-J, i-J\n            'hmag'  -> 2MASS H mag\t -> colors: J-H, H-K\n            'kmag'  -> 2MASS Ks mag\t -> colors: g-Ks, H-Ks, J-Ks, V-Ks\n            'sdssu' -> SDSS u mag\t -> colors: u-g, u-V\n            'sdssg' -> SDSS g mag\t -> colors: g-r, g-i, g-K, u-g, U-g, g-J\n            'sdssr' -> SDSS r mag\t -> colors: r-i, g-r\n            'sdssi' -> SDSS i mag\t -> colors: r-i, i-z, g-i, i-J, i-W1\n            'sdssz' -> SDSS z mag\t -> colors: i-z, z-W2, g-z\n            'ujmag' -> UKIRT J mag\t -> colors: J-H, H-K, J-K, g-J, i-J\n            'uhmag' -> UKIRT H mag\t -> colors: J-H, H-K\n            'ukmag' -> UKIRT K mag\t -> colors: g-K, H-K, J-K, V-K\n            'irac1' -> Spitzer IRAC1 mag -> colors: i-I1, I1-I2\n            'irac2' -> Spitzer IRAC2 mag -> colors: I1-I2, I2-I3\n            'irac3' -> Spitzer IRAC3 mag -> colors: I2-I3\n            'irac4' -> Spitzer IRAC4 mag -> colors: I3-I4\n            'wise1' -> WISE W1 mag\t -> colors: i-W1, W1-W2\n            'wise2' -> WISE W2 mag\t -> colors: W1-W2, W2-W3\n            'wise3' -> WISE W3 mag\t -> colors: W2-W3\n            'wise4' -> WISE W4 mag\t -> colors: W3-W4\n\n        If you have magnitude measurements in other bands, use the\n        `custom_bandpasses` kwarg to pass these in.\n\n        If this is None, no object information will be incorporated into the\n        checkplot (kind of making it effectively useless for anything other than\n        glancing at the phased light curves at various 'best' periods from the\n        period-finder results).\n\n    varinfo : dict or None\n        If this is None, a blank dict of the form below will be added to the\n        checkplotdict::\n\n            {'objectisvar': None -> variability flag (None indicates unset),\n             'vartags': CSV str containing variability type tags from review,\n             'varisperiodic': None -> periodic variability flag (None -> unset),\n             'varperiod': the period associated with the periodic variability,\n             'varepoch': the epoch associated with the periodic variability}\n\n        If you provide a dict matching this format in this kwarg, this will be\n        passed unchanged to the output checkplotdict produced.\n\n    findercmap : str or matplotlib.cm.ColorMap object\n        The Colormap object to use for the finder chart image.\n\n    finderconvolve : astropy.convolution.Kernel object or None\n        If not None, the Kernel object to use for convolving the finder image.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    normto : {'globalmedian', 'zero'} or a float\n        This is specified as below::\n\n            'globalmedian' -> norms each mag to global median of the LC column\n            'zero'         -> norms each mag to zero\n            a float        -> norms each mag to this specified float value.\n\n    normmingap : float\n        This defines how much the difference between consecutive measurements is\n        allowed to be to consider them as parts of different timegroups. By\n        default it is set to 4.0 days.\n\n    deredden_object : bool\n        If this is True, will use the 2MASS DUST service to get extinction\n        coefficients in various bands, and then try to deredden the magnitudes\n        and colors of the object already present in the checkplot's objectinfo\n        dict.\n\n    custom_bandpasses : dict\n        This is a dict used to provide custom bandpass definitions for any\n        magnitude measurements in the objectinfo dict that are not automatically\n        recognized by :py:func:`astrobase.varclass.starfeatures.color_features`.\n\n    lclistpkl : dict or str\n        If this is provided, must be a dict resulting from reading a catalog\n        produced by the `lcproc.catalogs.make_lclist` function or a str path\n        pointing to the pickle file produced by that function. This catalog is\n        used to find neighbors of the current object in the current light curve\n        collection. Looking at neighbors of the object within the radius\n        specified by `nbrradiusarcsec` is useful for light curves produced by\n        instruments that have a large pixel scale, so are susceptible to\n        blending of variability and potential confusion of neighbor variability\n        with that of the actual object being looked at. If this is None, no\n        neighbor lookups will be performed.\n\n    nbrradiusarcsec : float\n        The radius in arcseconds to use for a search conducted around the\n        coordinates of this object to look for any potential confusion and\n        blending of variability amplitude caused by their proximity.\n\n    maxnumneighbors : int\n        The maximum number of neighbors that will have their light curves and\n        magnitudes noted in this checkplot as potential blends with the target\n        object.\n\n    plotdpi : int\n        The resolution in DPI of the plots to generate in this function\n        (e.g. the finder chart, etc.)\n\n    findercachedir : str\n        The path to the astrobase cache directory for finder chart downloads\n        from the NASA SkyView service.\n\n    verbose : bool\n        If True, will indicate progress and warn about potential problems.\n\n    gaia_submit_timeout : float\n        Sets the timeout in seconds to use when submitting a request to look up\n        the object's information to the GAIA service. Note that if `fast_mode`\n        is set, this is ignored.\n\n    gaia_submit_tries : int\n        Sets the maximum number of times the GAIA services will be contacted to\n        obtain this object's information. If `fast_mode` is set, this is\n        ignored, and the services will be contacted only once (meaning that a\n        failure to respond will be silently ignored and no GAIA data will be\n        added to the checkplot's objectinfo dict).\n\n    gaia_max_timeout : float\n        Sets the timeout in seconds to use when waiting for the GAIA service to\n        respond to our request for the object's information. Note that if\n        `fast_mode` is set, this is ignored.\n\n    gaia_mirror : str\n        This sets the GAIA mirror to use. This is a key in the\n        `services.gaia.GAIA_URLS` dict which defines the URLs to hit for each\n        mirror.\n\n    fast_mode : bool or float\n        This runs the external catalog operations in a \"fast\" mode, with short\n        timeouts and not trying to hit external catalogs that take a long time\n        to respond.\n\n        If this is set to True, the default settings for the external requests\n        will then become::\n\n            skyview_lookup = False\n            skyview_timeout = 10.0\n            skyview_retry_failed = False\n            dust_timeout = 10.0\n            gaia_submit_timeout = 7.0\n            gaia_max_timeout = 10.0\n            gaia_submit_tries = 2\n            complete_query_later = False\n            search_simbad = False\n\n        If this is a float, will run in \"fast\" mode with the provided timeout\n        value in seconds and the following settings::\n\n            skyview_lookup = True\n            skyview_timeout = fast_mode\n            skyview_retry_failed = False\n            dust_timeout = fast_mode\n            gaia_submit_timeout = 0.66*fast_mode\n            gaia_max_timeout = fast_mode\n            gaia_submit_tries = 2\n            complete_query_later = False\n            search_simbad = False\n\n    complete_query_later : bool\n        If this is True, saves the state of GAIA queries that are not yet\n        complete when `gaia_max_timeout` is reached while waiting for the GAIA\n        service to respond to our request. A later call for GAIA info on the\n        same object will attempt to pick up the results from the existing query\n        if it's completed. If `fast_mode` is True, this is ignored.\n\n    Returns\n    -------\n\n    dict\n        A checkplotdict is returned containing the objectinfo and varinfo dicts,\n        ready to use with the functions below to add in light curve plots,\n        phased LC plots, xmatch info, etc.", "id": "f14726:m1"}
{"signature": "def gps_topic_publish():", "body": "", "docstring": "This publishes a JSON message to a topic.\n\nTODO: finish this", "id": "f14728:m8"}
{"signature": "def gcs_get_file(bucketname,<EOL>filename,<EOL>local_file,<EOL>altexts=None,<EOL>client=None,<EOL>service_account_json=None,<EOL>raiseonfail=False):", "body": "if not client:<EOL><INDENT>if (service_account_json is not None and<EOL>os.path.exists(service_account_json)):<EOL><INDENT>client = storage.Client.from_service_account_json(<EOL>service_account_json<EOL>)<EOL><DEDENT>else:<EOL><INDENT>client = storage.Client()<EOL><DEDENT><DEDENT>try:<EOL><INDENT>bucket = client.get_bucket(bucketname)<EOL>blob = bucket.get_blob(filename)<EOL>blob.download_to_filename(local_file)<EOL>return local_file<EOL><DEDENT>except Exception as e:<EOL><INDENT>for alt_extension in altexts:<EOL><INDENT>split_ext = os.path.splitext(filename)<EOL>check_file = split_ext[<NUM_LIT:0>] + alt_extension<EOL>try:<EOL><INDENT>bucket = client.get_bucket(bucket)<EOL>blob = bucket.get_blob(check_file)<EOL>blob.download_to_filename(<EOL>local_file.replace(split_ext[-<NUM_LIT:1>],<EOL>alt_extension)<EOL>)<EOL>return local_file.replace(split_ext[-<NUM_LIT:1>],<EOL>alt_extension)<EOL><DEDENT>except Exception as e:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' % (bucket, filename))<EOL>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT>return None<EOL><DEDENT>", "docstring": "This gets a single file from a Google Cloud Storage bucket.\n\n    Parameters\n    ----------\n\n    bucketname : str\n        The name of the GCS bucket to download the file from.\n\n    filename : str\n        The full name of the file to download, including all prefixes.\n\n    local_file : str\n        Path to where the downloaded file will be stored.\n\n    altexts : None or list of str\n        If not None, this is a list of alternate extensions to try for the file\n        other than the one provided in `filename`. For example, to get anything\n        that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to\n        strip the .gz.\n\n    client : google.cloud.storage.Client instance\n        The instance of the Client to use to perform the download operation. If\n        this is None, a new Client will be used. If this is None and\n        `service_account_json` points to a downloaded JSON file with GCS\n        credentials, a new Client with the provided credentials will be used. If\n        this is not None, the existing Client instance will be used.\n\n    service_account_json : str\n        Path to a downloaded GCS credentials JSON file.\n\n    raiseonfail : bool\n        If True, will re-raise whatever Exception caused the operation to fail\n        and break out immediately.\n\n    Returns\n    -------\n\n    str\n        Path to the downloaded filename or None if the download was\n        unsuccessful.", "id": "f14728:m2"}
{"signature": "def delete_gce_instances():", "body": "", "docstring": "This deletes GCE worker nodes.\n\nTODO: finish this", "id": "f14728:m1"}
{"signature": "def gps_topic_pull():", "body": "", "docstring": "This synchronously pulls a single message from a pubsub topic.\n\nTODO: finish this", "id": "f14728:m7"}
{"signature": "def given_lc_get_out_of_transit_points(<EOL>time, flux, err_flux,<EOL>blsfit_savpath=None,<EOL>trapfit_savpath=None,<EOL>in_out_transit_savpath=None,<EOL>sigclip=None,<EOL>magsarefluxes=True,<EOL>nworkers=<NUM_LIT:1>,<EOL>extra_maskfrac=<NUM_LIT><EOL>):", "body": "tmids_obsd, t_starts, t_ends = (<EOL>given_lc_get_transit_tmids_tstarts_tends(<EOL>time, flux, err_flux, blsfit_savpath=blsfit_savpath,<EOL>trapfit_savpath=trapfit_savpath, magsarefluxes=magsarefluxes,<EOL>nworkers=nworkers, sigclip=sigclip, extra_maskfrac=extra_maskfrac<EOL>)<EOL>)<EOL>in_transit = np.zeros_like(time).astype(bool)<EOL>for t_start, t_end in zip(t_starts, t_ends):<EOL><INDENT>this_transit = ( (time > t_start) & (time < t_end) )<EOL>in_transit |= this_transit<EOL><DEDENT>out_of_transit = ~in_transit<EOL>if in_out_transit_savpath:<EOL><INDENT>_in_out_transit_plot(time, flux, in_transit, out_of_transit,<EOL>in_out_transit_savpath)<EOL><DEDENT>return time[out_of_transit], flux[out_of_transit], err_flux[out_of_transit]<EOL>", "docstring": "This gets the out-of-transit light curve points.\n\n    Relevant during iterative masking of transits for multiple planet system\n    search.\n\n    Parameters\n    ----------\n\n    time,flux,err_flux : np.array\n        The input flux time-series measurements and their associated measurement\n        errors\n\n    blsfit_savpath : str or None\n        If provided as a str, indicates the path of the fit plot to make for a\n        simple BLS model fit to the transit using the obtained period and epoch.\n\n    trapfit_savpath : str or None\n        If provided as a str, indicates the path of the fit plot to make for a\n        trapezoidal transit model fit to the transit using the obtained period\n        and epoch.\n\n    in_out_transit_savpath : str or None\n        If provided as a str, indicates the path of the plot file that will be\n        made for a plot showing the in-transit points and out-of-transit points\n        tagged separately.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    magsarefluxes : bool\n        This is by default True for this function, since it works on fluxes only\n        at the moment.\n\n    nworkers : int\n        The number of parallel BLS period-finder workers to use.\n\n    extra_maskfrac : float\n        This is the separation (N) from in-transit points you desire, in units\n        of the transit duration. `extra_maskfrac = 0` if you just want points\n        inside transit, otherwise::\n\n            t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur\n\n        Thus setting N=0.03 masks slightly more than the guessed transit\n        duration.\n\n    Returns\n    -------\n\n    (times_oot, fluxes_oot, errs_oot) : tuple of np.array\n        The `times`, `flux`, `err_flux` values from the input at the time values\n        out-of-transit are returned.", "id": "f14730:m6"}
{"signature": "def get_snr_of_dip(times,<EOL>mags,<EOL>modeltimes,<EOL>modelmags,<EOL>atol_normalization=<NUM_LIT>,<EOL>indsforrms=None,<EOL>magsarefluxes=False,<EOL>verbose=True,<EOL>transitdepth=None,<EOL>npoints_in_transit=None):", "body": "if magsarefluxes:<EOL><INDENT>if not np.isclose(np.nanmedian(modelmags), <NUM_LIT:1>, atol=atol_normalization):<EOL><INDENT>raise AssertionError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise NotImplementedError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if not transitdepth:<EOL><INDENT>transitdepth = np.abs(np.max(modelmags) - np.min(modelmags))<EOL><DEDENT>if not len(mags) == len(modelmags):<EOL><INDENT>from scipy.interpolate import interp1d<EOL>fn = interp1d(modeltimes, modelmags, kind='<STR_LIT>', bounds_error=True,<EOL>fill_value=np.nan)<EOL>modelmags = fn(times)<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL><DEDENT><DEDENT>subtractedmags = mags - modelmags<EOL>if isinstance(indsforrms, np.ndarray):<EOL><INDENT>subtractedrms = np.std(subtractedmags[indsforrms])<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>subtractedrms = np.std(subtractedmags)<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL><DEDENT><DEDENT>def _get_npoints_in_transit(modelmags):<EOL><INDENT>if np.nanmedian(modelmags) == <NUM_LIT:1>:<EOL><INDENT>return len(modelmags[(modelmags != <NUM_LIT:1>)])<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError<EOL><DEDENT><DEDENT>if not npoints_in_transit:<EOL><INDENT>npoints_in_transit = _get_npoints_in_transit(modelmags)<EOL><DEDENT>snr = np.sqrt(npoints_in_transit) * transitdepth/subtractedrms<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'.format(npoints_in_transit) +<EOL>'<STR_LIT>'.format(transitdepth) +<EOL>'<STR_LIT>'.format(subtractedrms) +<EOL>'<STR_LIT>'.format(snr))<EOL><DEDENT>return snr, transitdepth, subtractedrms<EOL>", "docstring": "Calculate the total SNR of a transit assuming gaussian uncertainties.\n\n    `modelmags` gets interpolated onto the cadence of `mags`. The noise is\n    calculated as the 1-sigma std deviation of the residual (see below).\n\n    Following Carter et al. 2009::\n\n        Q = sqrt( \u0393 T ) * \u03b4 / \u03c3\n\n    for Q the total SNR of the transit in the r->0 limit, where::\n\n        r = Rp/Rstar,\n        T = transit duration,\n        \u03b4 = transit depth,\n        \u03c3 = RMS of the lightcurve in transit.\n        \u0393 = sampling rate\n\n    Thus \u0393 * T is roughly the number of points obtained during transit.\n    (This doesn't correctly account for the SNR during ingress/egress, but this\n    is a second-order correction).\n\n    Note this is the same total SNR as described by e.g., Kovacs et al. 2002,\n    their Equation 11.\n\n    NOTE: this only works with fluxes at the moment.\n\n    Parameters\n    ----------\n\n    times,mags : np.array\n        The input flux time-series to process.\n\n    modeltimes,modelmags : np.array\n        A transiting planet model, either from BLS, a trapezoid model, or a\n        Mandel-Agol model.\n\n    atol_normalization : float\n        The absolute tolerance to which the median of the passed model fluxes\n        must be equal to 1.\n\n    indsforrms : np.array\n        A array of bools of `len(mags)` used to select points for the RMS\n        measurement. If not passed, the RMS of the entire passed timeseries is\n        used as an approximation. Genearlly, it's best to use out of transit\n        points, so the RMS measurement is not model-dependent.\n\n    magsarefluxes : bool\n        Currently forced to be True because this function only works with\n        fluxes.\n\n    verbose : bool\n        If True, indicates progress and warns about problems.\n\n    transitdepth : float or None\n        If the transit depth is known, pass it in here. Otherwise, it is\n        calculated assuming OOT flux is 1.\n\n    npoints_in_transits : int or None\n        If the number of points in transit is known, pass it in here. Otherwise,\n        the function will guess at this value.\n\n    Returns\n    -------\n\n    (snr, transit_depth, noise) : tuple\n        The returned tuple contains the calculated SNR, transit depth, and noise\n        of the residual lightcurve calculated using the relation described\n        above.", "id": "f14730:m1"}
{"signature": "def _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd):", "body": "return (coeffs[<NUM_LIT:0>]*fsv*fsv +<EOL>coeffs[<NUM_LIT:1>]*fsv +<EOL>coeffs[<NUM_LIT:2>]*fdv*fdv +<EOL>coeffs[<NUM_LIT:3>]*fdv +<EOL>coeffs[<NUM_LIT:4>]*fkv*fkv +<EOL>coeffs[<NUM_LIT:5>]*fkv +<EOL>coeffs[<NUM_LIT:6>] +<EOL>coeffs[<NUM_LIT:7>]*fsv*fdv +<EOL>coeffs[<NUM_LIT:8>]*fsv*fkv +<EOL>coeffs[<NUM_LIT:9>]*fdv*fkv +<EOL>coeffs[<NUM_LIT:10>]*np.sin(<NUM_LIT:2>*pi_value*xcc) +<EOL>coeffs[<NUM_LIT:11>]*np.cos(<NUM_LIT:2>*pi_value*xcc) +<EOL>coeffs[<NUM_LIT:12>]*np.sin(<NUM_LIT:2>*pi_value*ycc) +<EOL>coeffs[<NUM_LIT>]*np.cos(<NUM_LIT:2>*pi_value*ycc) +<EOL>coeffs[<NUM_LIT>]*np.sin(<NUM_LIT:4>*pi_value*xcc) +<EOL>coeffs[<NUM_LIT:15>]*np.cos(<NUM_LIT:4>*pi_value*xcc) +<EOL>coeffs[<NUM_LIT:16>]*np.sin(<NUM_LIT:4>*pi_value*ycc) +<EOL>coeffs[<NUM_LIT>]*np.cos(<NUM_LIT:4>*pi_value*ycc) +<EOL>coeffs[<NUM_LIT>]*bgv +<EOL>coeffs[<NUM_LIT>]*bge +<EOL>coeffs[<NUM_LIT:20>]*iha +<EOL>coeffs[<NUM_LIT>]*izd)<EOL>", "docstring": "This is the EPD function to fit using a smoothed mag-series.", "id": "f14731:m6"}
{"signature": "def smooth_magseries_savgol(mags, windowsize, polyorder=<NUM_LIT:2>):", "body": "smoothed = savgol_filter(mags, windowsize, polyorder)<EOL>return smoothed<EOL>", "docstring": "This smooths the magseries with a Savitsky-Golay filter.\n\n    Parameters\n    ----------\n\n    mags : np.array\n        The input mags/flux time-series to smooth.\n\n    windowsize : int\n        This is a odd integer containing the smoothing window size.\n\n    polyorder : int\n        This is an integer containing the polynomial degree order to use when\n        generating the Savitsky-Golay filter.\n\n    Returns\n    -------\n\n    np.array\n        The smoothed mag/flux time-series array.", "id": "f14731:m3"}
{"signature": "def epd_magseries(times, mags, errs,<EOL>fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd,<EOL>magsarefluxes=False,<EOL>epdsmooth_sigclip=<NUM_LIT>,<EOL>epdsmooth_windowsize=<NUM_LIT>,<EOL>epdsmooth_func=smooth_magseries_savgol,<EOL>epdsmooth_extraparams=None):", "body": "finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)<EOL>ftimes, fmags, ferrs = times[::][finind], mags[::][finind], errs[::][finind]<EOL>ffsv, ffdv, ffkv, fxcc, fycc, fbgv, fbge, fiha, fizd = (<EOL>fsv[::][finind],<EOL>fdv[::][finind],<EOL>fkv[::][finind],<EOL>xcc[::][finind],<EOL>ycc[::][finind],<EOL>bgv[::][finind],<EOL>bge[::][finind],<EOL>iha[::][finind],<EOL>izd[::][finind],<EOL>)<EOL>stimes, smags, serrs, separams = sigclip_magseries_with_extparams(<EOL>times, mags, errs,<EOL>[fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd],<EOL>sigclip=epdsmooth_sigclip,<EOL>magsarefluxes=magsarefluxes<EOL>)<EOL>sfsv, sfdv, sfkv, sxcc, sycc, sbgv, sbge, siha, sizd = separams<EOL>if isinstance(epdsmooth_extraparams, dict):<EOL><INDENT>smoothedmags = epdsmooth_func(smags,<EOL>epdsmooth_windowsize,<EOL>**epdsmooth_extraparams)<EOL><DEDENT>else:<EOL><INDENT>smoothedmags = epdsmooth_func(smags, epdsmooth_windowsize)<EOL><DEDENT>initcoeffs = np.zeros(<NUM_LIT>)<EOL>leastsqfit = leastsq(_epd_residual,<EOL>initcoeffs,<EOL>args=(smoothedmags,<EOL>sfsv, sfdv, sfkv, sxcc,<EOL>sycc, sbgv, sbge, siha, sizd),<EOL>full_output=True)<EOL>if leastsqfit[-<NUM_LIT:1>] in (<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>,<NUM_LIT:4>):<EOL><INDENT>fitcoeffs = leastsqfit[<NUM_LIT:0>]<EOL>epdfit = _epd_function(fitcoeffs,<EOL>ffsv, ffdv, ffkv, fxcc, fycc,<EOL>fbgv, fbge, fiha, fizd)<EOL>epdmags = npmedian(fmags) + fmags - epdfit<EOL>retdict = {'<STR_LIT>':ftimes,<EOL>'<STR_LIT>':epdmags,<EOL>'<STR_LIT>':ferrs,<EOL>'<STR_LIT>':fitcoeffs,<EOL>'<STR_LIT>':leastsqfit,<EOL>'<STR_LIT>':epdfit,<EOL>'<STR_LIT>':npmedian(epdmags),<EOL>'<STR_LIT>':npmedian(npabs(epdmags - npmedian(epdmags)))}<EOL>return retdict<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return None<EOL><DEDENT>", "docstring": "Detrends a magnitude series using External Parameter Decorrelation.\n\n    Requires a set of external parameters similar to those present in HAT light\n    curves. At the moment, the HAT light-curve-specific external parameters are:\n\n    - S: the 'fsv' column in light curves,\n    - D: the 'fdv' column in light curves,\n    - K: the 'fkv' column in light curves,\n    - x coords: the 'xcc' column in light curves,\n    - y coords: the 'ycc' column in light curves,\n    - background value: the 'bgv' column in light curves,\n    - background error: the 'bge' column in light curves,\n    - hour angle: the 'iha' column in light curves,\n    - zenith distance: the 'izd' column in light curves\n\n    S, D, and K are defined as follows:\n\n    - S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)\n    - D -> measure of PSF ellipticity in xy direction\n    - K -> measure of PSF ellipticity in cross direction\n\n    S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in\n    A. Pal's thesis: https://arxiv.org/abs/0906.3486\n\n    NOTE: The errs are completely ignored and returned unchanged (except for\n    sigclip and finite filtering).\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input mag/flux time-series to detrend.\n\n    fsv : np.array\n        Array containing the external parameter `S` of the same length as times.\n\n    fdv : np.array\n        Array containing the external parameter `D` of the same length as times.\n\n    fkv : np.array\n        Array containing the external parameter `K` of the same length as times.\n\n    xcc : np.array\n        Array containing the external parameter `x-coords` of the same length as\n        times.\n\n    ycc : np.array\n        Array containing the external parameter `y-coords` of the same length as\n        times.\n\n    bgv : np.array\n        Array containing the external parameter `background value` of the same\n        length as times.\n\n    bge : np.array\n        Array containing the external parameter `background error` of the same\n        length as times.\n\n    iha : np.array\n        Array containing the external parameter `hour angle` of the same length\n        as times.\n\n    izd : np.array\n        Array containing the external parameter `zenith distance` of the same\n        length as times.\n\n    magsarefluxes : bool\n        Set this to True if `mags` actually contains fluxes.\n\n    epdsmooth_sigclip : float or int or sequence of two floats/ints or None\n        This specifies how to sigma-clip the input LC before fitting the EPD\n        function to it.\n\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    epdsmooth_windowsize : int\n        This is the number of LC points to smooth over to generate a smoothed\n        light curve that will be used to fit the EPD function.\n\n    epdsmooth_func : Python function\n        This sets the smoothing filter function to use. A Savitsky-Golay filter\n        is used to smooth the light curve by default. The functions that can be\n        used with this kwarg are listed in `varbase.trends`. If you want to use\n        your own function, it MUST have the following signature::\n\n                def smoothfunc(mags_array, window_size, **extraparams)\n\n        and return a numpy array of the same size as `mags_array` with the\n        smoothed time-series. Any extra params can be provided using the\n        `extraparams` dict.\n\n    epdsmooth_extraparams : dict\n        This is a dict of any extra filter params to supply to the smoothing\n        function.\n\n    Returns\n    -------\n\n    dict\n        Returns a dict of the following form::\n\n            {'times':the input times after non-finite elems removed,\n             'mags':the EPD detrended mag values (the EPD mags),\n             'errs':the errs after non-finite elems removed,\n             'fitcoeffs':EPD fit coefficient values,\n             'fitinfo':the full tuple returned by scipy.leastsq,\n             'fitmags':the EPD fit function evaluated at times,\n             'mags_median': this is median of the EPD mags,\n             'mags_mad': this is the MAD of EPD mags}", "id": "f14731:m9"}
{"signature": "def smooth_magseries_ndimage_medfilt(mags, windowsize):", "body": "return median_filter(mags, size=windowsize, mode='<STR_LIT>')<EOL>", "docstring": "This smooths the magseries with a median filter that reflects the array\n    at the boundary.\n\n    See https://docs.scipy.org/doc/scipy/reference/tutorial/ndimage.html for\n    details.\n\n    Parameters\n    ----------\n\n    mags : np.array\n        The input mags/flux time-series to smooth.\n\n    windowsize : int\n        This is a odd integer containing the smoothing window size.\n\n    Returns\n    -------\n\n    np.array\n        The smoothed mag/flux time-series array.", "id": "f14731:m0"}
{"signature": "def _epd_residual(coeffs, mags, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd):", "body": "f = _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd)<EOL>residual = mags - f<EOL>return residual<EOL>", "docstring": "This is the residual function to minimize using scipy.optimize.leastsq.", "id": "f14731:m7"}
{"signature": "def _epd_residual2(coeffs,<EOL>times, mags, errs,<EOL>fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd):", "body": "f = _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd)<EOL>residual = mags - f<EOL>return residual<EOL>", "docstring": "This is the residual function to minimize using\n    scipy.optimize.least_squares.\n\n    This variant is for :py:func:`.epd_magseries_extparams`.", "id": "f14731:m8"}
{"signature": "def smooth_magseries_signal_medfilt(mags, windowsize):", "body": "return medfilt(mags, windowsize)<EOL>", "docstring": "This smooths the magseries with a simple median filter.\n\n    This function pads with zeros near the boundary, see:\n\n    https://stackoverflow.com/questions/24585706/scipy-medfilt-wrong-result\n\n    Typically this is bad.\n\n    Parameters\n    ----------\n\n    mags : np.array\n        The input mags/flux time-series to smooth.\n\n    windowsize : int\n        This is a odd integer containing the smoothing window size.\n\n    Returns\n    -------\n\n    np.array\n        The smoothed mag/flux time-series array.", "id": "f14731:m1"}
{"signature": "def _autocorr_func2(mags, lag, maglen, magmed, magstd):", "body": "lagindex = nparange(<NUM_LIT:0>,maglen-lag)<EOL>products = (mags[lagindex] - magmed) * (mags[lagindex+lag] - magmed)<EOL>autocovarfunc = npsum(products)/lagindex.size<EOL>varfunc = npsum(<EOL>(mags[lagindex] - magmed)*(mags[lagindex] - magmed)<EOL>)/mags.size<EOL>acorr = autocovarfunc/varfunc<EOL>return acorr<EOL>", "docstring": "This is an alternative function to calculate the autocorrelation.\n\nThis version is from (first definition):\n\nhttps://en.wikipedia.org/wiki/Correlogram#Estimation_of_autocorrelations\n\nParameters\n----------\n\nmags : np.array\n    This is the magnitudes array. MUST NOT have any nans.\n\nlag : float\n    The specific lag value to calculate the auto-correlation for. This MUST\n    be less than total number of observations in `mags`.\n\nmaglen : int\n    The number of elements in the `mags` array.\n\nmagmed : float\n    The median of the `mags` array.\n\nmagstd : float\n    The standard deviation of the `mags` array.\n\nReturns\n-------\n\nfloat\n    The auto-correlation at this specific `lag` value.", "id": "f14732:m1"}
{"signature": "def _autocorr_func1(mags, lag, maglen, magmed, magstd):", "body": "lagindex = nparange(<NUM_LIT:1>,maglen-lag)<EOL>products = (mags[lagindex] - magmed) * (mags[lagindex+lag] - magmed)<EOL>acorr = (<NUM_LIT:1.0>/((maglen - lag)*magstd)) * npsum(products)<EOL>return acorr<EOL>", "docstring": "Calculates the autocorr of mag series for specific lag.\n\n    This version of the function is taken from: Kim et al. (`2011\n    <https://dx.doi.org/10.1088/0004-637X/735/2/68>`_)\n\n    Parameters\n    ----------\n\n    mags : np.array\n        This is the magnitudes array. MUST NOT have any nans.\n\n    lag : float\n        The specific lag value to calculate the auto-correlation for. This MUST\n        be less than total number of observations in `mags`.\n\n    maglen : int\n        The number of elements in the `mags` array.\n\n    magmed : float\n        The median of the `mags` array.\n\n    magstd : float\n        The standard deviation of the `mags` array.\n\n    Returns\n    -------\n\n    float\n        The auto-correlation at this specific `lag` value.", "id": "f14732:m0"}
{"signature": "def prewhiten_magseries(times, mags, errs,<EOL>whitenperiod,<EOL>whitenparams,<EOL>sigclip=<NUM_LIT>,<EOL>magsarefluxes=False,<EOL>plotfit=None,<EOL>plotfitphasedlconly=True,<EOL>rescaletomedian=True):", "body": "stimes, smags, serrs = sigclip_magseries(times, mags, errs,<EOL>sigclip=sigclip,<EOL>magsarefluxes=magsarefluxes)<EOL>median_mag = np.median(smags)<EOL>mintime = np.min(stimes)<EOL>iphase = (<EOL>(stimes - mintime)/whitenperiod -<EOL>np.floor((stimes - mintime)/whitenperiod)<EOL>)<EOL>phasesortind = np.argsort(iphase)<EOL>phase = iphase[phasesortind]<EOL>pmags = smags[phasesortind]<EOL>perrs = serrs[phasesortind]<EOL>ptimes = stimes[phasesortind]<EOL>wmags = pmags - _fourier_func(whitenparams, phase, pmags)<EOL>wtimeorder = np.argsort(ptimes)<EOL>wtimes = ptimes[wtimeorder]<EOL>wphase = phase[wtimeorder]<EOL>wmags = wmags[wtimeorder]<EOL>werrs = perrs[wtimeorder]<EOL>if rescaletomedian:<EOL><INDENT>wmags = wmags + median_mag<EOL><DEDENT>returndict = {'<STR_LIT>':wtimes,  <EOL>'<STR_LIT>':wphase,<EOL>'<STR_LIT>':wmags,<EOL>'<STR_LIT>':werrs,<EOL>'<STR_LIT>':whitenparams,<EOL>'<STR_LIT>':whitenperiod}<EOL>if plotfit and (isinstance(plotfit, str) or isinstance(plotfit, Strio)):<EOL><INDENT>if plotfitphasedlconly:<EOL><INDENT>plt.figure(figsize=(<NUM_LIT:10>,<NUM_LIT>))<EOL><DEDENT>else:<EOL><INDENT>plt.figure(figsize=(<NUM_LIT:16>,<NUM_LIT>))<EOL><DEDENT>if plotfitphasedlconly:<EOL><INDENT>plt.subplot(<NUM_LIT>)<EOL>plt.plot(phase,pmags,<EOL>marker='<STR_LIT:.>',<EOL>color='<STR_LIT:k>',<EOL>linestyle='<STR_LIT:None>',<EOL>markersize=<NUM_LIT>,<EOL>markeredgewidth=<NUM_LIT:0>)<EOL>if not magsarefluxes:<EOL><INDENT>plt.gca().invert_yaxis()<EOL>plt.ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>plt.xlabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.plot(wphase,wmags,<EOL>marker='<STR_LIT:.>',<EOL>color='<STR_LIT:g>',<EOL>linestyle='<STR_LIT:None>',<EOL>markersize=<NUM_LIT>,<EOL>markeredgewidth=<NUM_LIT:0>)<EOL>if not magsarefluxes:<EOL><INDENT>plt.gca().invert_yaxis()<EOL>plt.ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>plt.xlabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.subplot(<NUM_LIT>)<EOL>plt.plot(stimes,smags,<EOL>marker='<STR_LIT:.>',<EOL>color='<STR_LIT:k>',<EOL>linestyle='<STR_LIT:None>',<EOL>markersize=<NUM_LIT>,<EOL>markeredgewidth=<NUM_LIT:0>)<EOL>if not magsarefluxes:<EOL><INDENT>plt.gca().invert_yaxis()<EOL>plt.ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>plt.xlabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.plot(wtimes,wmags,<EOL>marker='<STR_LIT:.>',<EOL>color='<STR_LIT:g>',<EOL>linestyle='<STR_LIT:None>',<EOL>markersize=<NUM_LIT>,<EOL>markeredgewidth=<NUM_LIT:0>)<EOL>if not magsarefluxes:<EOL><INDENT>plt.gca().invert_yaxis()<EOL>plt.ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>plt.xlabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>' % whitenperiod)<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.plot(phase,pmags,<EOL>marker='<STR_LIT:.>',<EOL>color='<STR_LIT:k>',<EOL>linestyle='<STR_LIT:None>',<EOL>markersize=<NUM_LIT>,<EOL>markeredgewidth=<NUM_LIT:0>)<EOL>if not magsarefluxes:<EOL><INDENT>plt.gca().invert_yaxis()<EOL>plt.ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>plt.xlabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.plot(wphase,wmags,<EOL>marker='<STR_LIT:.>',<EOL>color='<STR_LIT:g>',<EOL>linestyle='<STR_LIT:None>',<EOL>markersize=<NUM_LIT>,<EOL>markeredgewidth=<NUM_LIT:0>)<EOL>if not magsarefluxes:<EOL><INDENT>plt.gca().invert_yaxis()<EOL>plt.ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>plt.xlabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>plt.tight_layout()<EOL>plt.savefig(plotfit, format='<STR_LIT>', pad_inches=<NUM_LIT:0.0>)<EOL>plt.close()<EOL>if isinstance(plotfit, str) or isinstance(plotfit, Strio):<EOL><INDENT>returndict['<STR_LIT>'] = plotfit<EOL><DEDENT><DEDENT>return returndict<EOL>", "docstring": "Removes a periodic sinusoidal signal generated using whitenparams from\n    the input magnitude time series.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input mag/flux time-series to prewhiten.\n\n    whitenperiod : float\n        The period of the sinusoidal signal to remove.\n\n    whitenparams : list of floats\n        This contains the Fourier amplitude and phase coefficients of the\n        sinusoidal signal to remove::\n\n            [ampl_1, ampl_2, ampl_3, ..., ampl_X,\n             pha_1, pha_2, pha_3, ..., pha_X]\n\n        where `X` is the Fourier order. These are usually the output of a\n        previous Fourier fit to the light curve (from\n        :py:func:`astrobase.lcfit.sinusoidal.fourier_fit_magseries` for\n        example).\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    magsarefluxes : bool\n        If True, will treat the input values of `mags` as fluxes for purposes of\n        plotting the fit and sig-clipping.\n\n    plotfit : str or False\n        If this is a string, this function will make a plot showing the effect\n        of the pre-whitening on the mag/flux time-series and write the plot to\n        the path specified here.\n\n    plotfitphasedlconly : bool\n        If True, will plot only the phased LC for showing the effect of\n        pre-whitening, and skip plotting the unphased LC.\n\n    rescaletomedian : bool\n        If this is True, then we add back the constant median term of the\n        magnitudes to the final pre-whitened mag series.\n\n    Returns\n    -------\n\n    dict\n        Returns a dict of the form::\n\n            {'wtimes':times array after pre-whitening,\n             'wphase':phase array after pre-whitening,\n             'wmags':mags array after pre-whitening,\n             'werrs':errs array after pre-whitening,\n             'whitenparams':the input pre-whitening params used,\n             'whitenperiod':the input pre-whitening period used,\n             'fitplotfile':the output plot file if plotfit was set}", "id": "f14733:m0"}
{"signature": "def trapezoid_transit_func(transitparams, times, mags, errs,<EOL>get_ntransitpoints=False):", "body": "(transitperiod,<EOL>transitepoch,<EOL>transitdepth,<EOL>transitduration,<EOL>ingressduration) = transitparams<EOL>iphase = (times - transitepoch)/transitperiod<EOL>iphase = iphase - np.floor(iphase)<EOL>phasesortind = np.argsort(iphase)<EOL>phase = iphase[phasesortind]<EOL>ptimes = times[phasesortind]<EOL>pmags = mags[phasesortind]<EOL>perrs = errs[phasesortind]<EOL>zerolevel = np.median(pmags)<EOL>modelmags = np.full_like(phase, zerolevel)<EOL>halftransitduration = transitduration/<NUM_LIT><EOL>bottomlevel = zerolevel - transitdepth<EOL>slope = transitdepth/ingressduration<EOL>firstcontact = <NUM_LIT:1.0> - halftransitduration<EOL>secondcontact = firstcontact + ingressduration<EOL>thirdcontact = halftransitduration - ingressduration<EOL>fourthcontact = halftransitduration<EOL>ingressind = (phase > firstcontact) & (phase < secondcontact)<EOL>bottomind = (phase > secondcontact) | (phase < thirdcontact)<EOL>egressind = (phase > thirdcontact) & (phase < fourthcontact)<EOL>in_transit_points = ingressind | bottomind | egressind<EOL>n_transit_points = np.sum(in_transit_points)<EOL>modelmags[ingressind] = zerolevel - slope*(phase[ingressind] - firstcontact)<EOL>modelmags[bottomind] = bottomlevel<EOL>modelmags[egressind] = bottomlevel + slope*(phase[egressind] - thirdcontact)<EOL>if get_ntransitpoints:<EOL><INDENT>return modelmags, phase, ptimes, pmags, perrs, n_transit_points<EOL><DEDENT>else:<EOL><INDENT>return modelmags, phase, ptimes, pmags, perrs<EOL><DEDENT>", "docstring": "This returns a trapezoid transit-shaped function.\n\n    Suitable for first order modeling of transit signals.\n\n    Parameters\n    ----------\n\n    transitparams : list of float\n        This contains the transiting planet trapezoid model::\n\n            transitparams = [transitperiod (time),\n                             transitepoch (time),\n                             transitdepth (flux or mags),\n                             transitduration (phase),\n                             ingressduration (phase)]\n\n        All of these will then have fitted values after the fit is done.\n\n        - for magnitudes -> `transitdepth` should be < 0\n        - for fluxes     -> `transitdepth` should be > 0\n\n    times,mags,errs : np.array\n        The input time-series of measurements and associated errors for which\n        the transit model will be generated. The times will be used to generate\n        model mags, and the input `times`, `mags`, and `errs` will be resorted\n        by model phase and returned.\n\n    Returns\n    -------\n\n    (modelmags, phase, ptimes, pmags, perrs) : tuple\n        Returns the model mags and phase values. Also returns the input `times`,\n        `mags`, and `errs` sorted by the model's phase.", "id": "f14737:m0"}
{"signature": "def trapezoid_transit_residual(transitparams, times, mags, errs):", "body": "modelmags, phase, ptimes, pmags, perrs = (<EOL>trapezoid_transit_func(transitparams, times, mags, errs)<EOL>)<EOL>return (pmags - modelmags)/perrs<EOL>", "docstring": "This returns the residual between the modelmags and the actual mags.\n\nParameters\n----------\n\ntransitparams : list of float\n    This contains the transiting planet trapezoid model::\n\n        transitparams = [transitperiod (time),\n                         transitepoch (time),\n                         transitdepth (flux or mags),\n                         transitduration (phase),\n                         ingressduration (phase)]\n\n    All of these will then have fitted values after the fit is done.\n\n    - for magnitudes -> `transitdepth` should be < 0\n    - for fluxes     -> `transitdepth` should be > 0\n\ntimes,mags,errs : np.array\n    The input time-series of measurements and associated errors for which\n    the transit model will be generated. The times will be used to generate\n    model mags, and the input `times`, `mags`, and `errs` will be resorted\n    by model phase and returned.\n\nReturns\n-------\n\nnp.array\n    The residuals between the input `mags` and generated `modelmags`,\n    weighted by the measurement errors in `errs`.", "id": "f14737:m1"}
{"signature": "def _double_inverted_gaussian(x,<EOL>amp1, loc1, std1,<EOL>amp2, loc2, std2):", "body": "gaussian1 = -_gaussian(x,amp1,loc1,std1)<EOL>gaussian2 = -_gaussian(x,amp2,loc2,std2)<EOL>return gaussian1 + gaussian2<EOL>", "docstring": "This is a double inverted gaussian.\n\n    Parameters\n    ----------\n\n    x : np.array\n        The items at which the Gaussian is evaluated.\n\n    amp1,amp2 : float\n        The amplitude of Gaussian 1 and Gaussian 2.\n\n    loc1,loc2 : float\n        The central value of Gaussian 1 and Gaussian 2.\n\n    std1,std2 : float\n        The standard deviation of Gaussian 1 and Gaussian 2.\n\n    Returns\n    -------\n\n    np.array\n        Returns a double inverted Gaussian function evaluated at the items in\n        `x`, using the provided parameters of `amp`, `loc`, and `std` for two\n        component Gaussians 1 and 2.", "id": "f14739:m1"}
{"signature": "def flare_model_residual(flareparams, times, mags, errs):", "body": "modelmags, _, _, _ = flare_model(flareparams, times, mags, errs)<EOL>return (mags - modelmags)/errs<EOL>", "docstring": "This returns the residual between model mags and the actual mags.\n\nParameters\n----------\n\nflareparams : list of float\n    This defines the flare model::\n\n        [amplitude,\n         flare_peak_time,\n         rise_gaussian_stdev,\n         decay_time_constant]\n\n    where:\n\n    `amplitude`: the maximum flare amplitude in mags or flux. If flux, then\n    amplitude should be positive. If mags, amplitude should be negative.\n\n    `flare_peak_time`: time at which the flare maximum happens.\n\n    `rise_gaussian_stdev`: the stdev of the gaussian describing the rise of\n    the flare.\n\n    `decay_time_constant`: the time constant of the exponential fall of the\n    flare.\n\ntimes,mags,errs : np.array\n    The input time-series of measurements and associated errors for which\n    the model will be generated. The times will be used to generate\n    model mags.\n\nReturns\n-------\n\nnp.array\n    The residuals between the input `mags` and generated `modelmags`,\n    weighted by the measurement errors in `errs`.", "id": "f14740:m1"}
{"signature": "def phase_magseries_with_errs(times, mags, errs, period, epoch,<EOL>wrap=True, sort=True):", "body": "<EOL>finiteind = np.isfinite(mags)<EOL>finite_times = times[finiteind]<EOL>finite_mags = mags[finiteind]<EOL>finite_errs = errs[finiteind]<EOL>magseries_phase = (<EOL>(finite_times - epoch)/period -<EOL>np.floor(((finite_times - epoch)/period))<EOL>)<EOL>outdict = {'<STR_LIT>':magseries_phase,<EOL>'<STR_LIT>':finite_mags,<EOL>'<STR_LIT>':finite_errs,<EOL>'<STR_LIT>':period,<EOL>'<STR_LIT>':epoch}<EOL>if sort:<EOL><INDENT>sortorder = np.argsort(outdict['<STR_LIT>'])<EOL>outdict['<STR_LIT>'] = outdict['<STR_LIT>'][sortorder]<EOL>outdict['<STR_LIT>'] = outdict['<STR_LIT>'][sortorder]<EOL>outdict['<STR_LIT>'] = outdict['<STR_LIT>'][sortorder]<EOL><DEDENT>if wrap:<EOL><INDENT>outdict['<STR_LIT>'] = np.concatenate((outdict['<STR_LIT>']-<NUM_LIT:1.0>,<EOL>outdict['<STR_LIT>']))<EOL>outdict['<STR_LIT>'] = np.concatenate((outdict['<STR_LIT>'],<EOL>outdict['<STR_LIT>']))<EOL>outdict['<STR_LIT>'] = np.concatenate((outdict['<STR_LIT>'],<EOL>outdict['<STR_LIT>']))<EOL><DEDENT>return outdict<EOL>", "docstring": "Phases a magnitude/flux time-series using a given period and epoch.\n\n    The equation used is::\n\n        phase = (times - epoch)/period - floor((times - epoch)/period)\n\n    This phases the given magnitude timeseries using the given period and\n    epoch. If wrap is True, wraps the result around 0.0 (and returns an array\n    that has twice the number of the original elements). If sort is True,\n    returns the magnitude timeseries in phase sorted order.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The magnitude/flux time-series values and associated measurement errors\n        to phase using the provided `period` and `epoch`. Non-fiinite values\n        will be removed.\n\n    period : float\n        The period to use to phase the time-series.\n\n    epoch : float\n        The epoch to phase the time-series. This is usually the time-of-minimum\n        or time-of-maximum of some periodic light curve\n        phenomenon. Alternatively, one can use the minimum time value in\n        `times`.\n\n    wrap : bool\n        If this is True, the returned phased time-series will be wrapped around\n        phase 0.0, which is useful for plotting purposes. The arrays returned\n        will have twice the number of input elements because of this wrapping.\n\n    sort : bool\n        If this is True, the returned phased time-series will be sorted in\n        increasing phase order.\n\n    Returns\n    -------\n\n    dict\n        A dict of the following form is returned::\n\n            {'phase': the phase values,\n             'mags': the mags/flux values at each phase,\n             'errs': the err values at each phase,\n             'period': the input `period` used to phase the time-series,\n             'epoch': the input `epoch` used to phase the time-series}", "id": "f14742:m5"}
{"signature": "def phase_magseries(times, mags, period, epoch, wrap=True, sort=True):", "body": "<EOL>finiteind = np.isfinite(mags) & np.isfinite(times)<EOL>finite_times = times[finiteind]<EOL>finite_mags = mags[finiteind]<EOL>magseries_phase = (<EOL>(finite_times - epoch)/period -<EOL>np.floor(((finite_times - epoch)/period))<EOL>)<EOL>outdict = {'<STR_LIT>':magseries_phase,<EOL>'<STR_LIT>':finite_mags,<EOL>'<STR_LIT>':period,<EOL>'<STR_LIT>':epoch}<EOL>if sort:<EOL><INDENT>sortorder = np.argsort(outdict['<STR_LIT>'])<EOL>outdict['<STR_LIT>'] = outdict['<STR_LIT>'][sortorder]<EOL>outdict['<STR_LIT>'] = outdict['<STR_LIT>'][sortorder]<EOL><DEDENT>if wrap:<EOL><INDENT>outdict['<STR_LIT>'] = np.concatenate((outdict['<STR_LIT>']-<NUM_LIT:1.0>,<EOL>outdict['<STR_LIT>']))<EOL>outdict['<STR_LIT>'] = np.concatenate((outdict['<STR_LIT>'],<EOL>outdict['<STR_LIT>']))<EOL><DEDENT>return outdict<EOL>", "docstring": "Phases a magnitude/flux time-series using a given period and epoch.\n\n    The equation used is::\n\n        phase = (times - epoch)/period - floor((times - epoch)/period)\n\n    This phases the given magnitude timeseries using the given period and\n    epoch. If wrap is True, wraps the result around 0.0 (and returns an array\n    that has twice the number of the original elements). If sort is True,\n    returns the magnitude timeseries in phase sorted order.\n\n    Parameters\n    ----------\n\n    times,mags : np.array\n        The magnitude/flux time-series values to phase using the provided\n        `period` and `epoch`. Non-fiinite values will be removed.\n\n    period : float\n        The period to use to phase the time-series.\n\n    epoch : float\n        The epoch to phase the time-series. This is usually the time-of-minimum\n        or time-of-maximum of some periodic light curve\n        phenomenon. Alternatively, one can use the minimum time value in\n        `times`.\n\n    wrap : bool\n        If this is True, the returned phased time-series will be wrapped around\n        phase 0.0, which is useful for plotting purposes. The arrays returned\n        will have twice the number of input elements because of this wrapping.\n\n    sort : bool\n        If this is True, the returned phased time-series will be sorted in\n        increasing phase order.\n\n    Returns\n    -------\n\n    dict\n        A dict of the following form is returned::\n\n            {'phase': the phase values,\n             'mags': the mags/flux values at each phase,\n             'period': the input `period` used to phase the time-series,\n             'epoch': the input `epoch` used to phase the time-series}", "id": "f14742:m4"}
{"signature": "def normalize_magseries(times,<EOL>mags,<EOL>mingap=<NUM_LIT>,<EOL>normto='<STR_LIT>',<EOL>magsarefluxes=False,<EOL>debugmode=False):", "body": "ngroups, timegroups = find_lc_timegroups(times,<EOL>mingap=mingap)<EOL>finite_ind = np.isfinite(mags)<EOL>if any(finite_ind):<EOL><INDENT>global_mag_median = np.median(mags[finite_ind])<EOL>for tgind, tg in enumerate(timegroups):<EOL><INDENT>finite_ind = np.isfinite(mags[tg])<EOL>group_median = np.median((mags[tg])[finite_ind])<EOL>if magsarefluxes:<EOL><INDENT>mags[tg] = mags[tg]/group_median<EOL><DEDENT>else:<EOL><INDENT>mags[tg] = mags[tg] - group_median<EOL><DEDENT>if debugmode:<EOL><INDENT>LOGDEBUG('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(tgind,<EOL>len(mags[tg]),<EOL>len(finite_ind),<EOL>group_median))<EOL><DEDENT><DEDENT>if isinstance(normto, str) and normto == '<STR_LIT>':<EOL><INDENT>if magsarefluxes:<EOL><INDENT>mags = mags * global_mag_median<EOL><DEDENT>else:<EOL><INDENT>mags = mags + global_mag_median<EOL><DEDENT><DEDENT>elif isinstance(normto, float):<EOL><INDENT>if magsarefluxes:<EOL><INDENT>mags = mags * normto<EOL><DEDENT>else:<EOL><INDENT>mags = mags + normto<EOL><DEDENT><DEDENT>return times, mags<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return None, None<EOL><DEDENT>", "docstring": "This normalizes the magnitude time-series to a specified value.\n\n    This is used to normalize time series measurements that may have large time\n    gaps and vertical offsets in mag/flux measurement between these\n    'timegroups', either due to instrument changes or different filters.\n\n    NOTE: this works in-place! The mags array will be replaced with normalized\n    mags when this function finishes.\n\n    Parameters\n    ----------\n\n    times,mags : array-like\n        The times (assumed to be some form of JD) and mags (or flux)\n        measurements to be normalized.\n\n    mingap : float\n        This defines how much the difference between consecutive measurements is\n        allowed to be to consider them as parts of different timegroups. By\n        default it is set to 4.0 days.\n\n    normto : {'globalmedian', 'zero'} or a float\n        Specifies the normalization type::\n\n          'globalmedian' -> norms each mag to the global median of the LC column\n          'zero'         -> norms each mag to zero\n          a float        -> norms each mag to this specified float value.\n\n    magsarefluxes : bool\n        Indicates if the input `mags` array is actually an array of flux\n        measurements instead of magnitude measurements. If this is set to True,\n        then:\n\n        - if `normto` is 'zero', then the median flux is divided from each\n          observation's flux value to yield normalized fluxes with 1.0 as the\n          global median.\n\n        - if `normto` is 'globalmedian', then the global median flux value\n          across the entire time series is multiplied with each measurement.\n\n        - if `norm` is set to a `float`, then this number is multiplied with the\n          flux value for each measurement.\n\n    debugmode : bool\n        If this is True, will print out verbose info on each timegroup found.\n\n    Returns\n    -------\n\n    times,normalized_mags : np.arrays\n        Normalized magnitude values after normalization. If normalization fails\n        for some reason, `times` and `normalized_mags` will both be None.", "id": "f14742:m1"}
{"signature": "def objectnames_conesearch(racenter,<EOL>declcenter,<EOL>searchradiusarcsec,<EOL>simbad_mirror='<STR_LIT>',<EOL>returnformat='<STR_LIT>',<EOL>forcefetch=False,<EOL>cachedir='<STR_LIT>',<EOL>verbose=True,<EOL>timeout=<NUM_LIT>,<EOL>refresh=<NUM_LIT>,<EOL>maxtimeout=<NUM_LIT>,<EOL>maxtries=<NUM_LIT:1>,<EOL>complete_query_later=True):", "body": "<EOL>query = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL>formatted_query = query.format(ra_center=racenter,<EOL>decl_center=declcenter,<EOL>search_radius=searchradiusarcsec/<NUM_LIT>)<EOL>return tap_query(formatted_query,<EOL>simbad_mirror=simbad_mirror,<EOL>returnformat=returnformat,<EOL>forcefetch=forcefetch,<EOL>cachedir=cachedir,<EOL>verbose=verbose,<EOL>timeout=timeout,<EOL>refresh=refresh,<EOL>maxtimeout=maxtimeout,<EOL>maxtries=maxtries,<EOL>complete_query_later=complete_query_later)<EOL>", "docstring": "This queries the SIMBAD TAP service for a list of object names near the\n    coords. This is effectively a \"reverse\" name resolver (i.e. this does the\n    opposite of SESAME).\n\n    Parameters\n    ----------\n\n    racenter,declcenter : float\n        The cone-search center coordinates in decimal degrees\n\n    searchradiusarcsec : float\n        The radius in arcseconds to search around the center coordinates.\n\n    simbad_mirror : str\n        This is the key used to select a SIMBAD mirror from the\n        `SIMBAD_URLS` dict above. If set, the specified mirror will be used. If\n        None, a random mirror chosen from that dict will be used.\n\n    returnformat : {'csv','votable','json'}\n        The returned file format to request from the GAIA catalog service.\n\n    forcefetch : bool\n        If this is True, the query will be retried even if cached results for\n        it exist.\n\n    cachedir : str\n        This points to the directory where results will be downloaded.\n\n    verbose : bool\n        If True, will indicate progress and warn of any issues.\n\n    timeout : float\n        This sets the amount of time in seconds to wait for the service to\n        respond to our initial request.\n\n    refresh : float\n        This sets the amount of time in seconds to wait before checking if the\n        result file is available. If the results file isn't available after\n        `refresh` seconds have elapsed, the function will wait for `refresh`\n        seconds continuously, until `maxtimeout` is reached or the results file\n        becomes available.\n\n    maxtimeout : float\n        The maximum amount of time in seconds to wait for a result to become\n        available after submitting our query request.\n\n    maxtries : int\n        The maximum number of tries (across all mirrors tried) to make to either\n        submit the request or download the results, before giving up.\n\n    complete_query_later : bool\n        If set to True, a submitted query that does not return a result before\n        `maxtimeout` has passed will be cancelled but its input request\n        parameters and the result URL provided by the service will be saved. If\n        this function is then called later with these same input request\n        parameters, it will check if the query finally finished and a result is\n        available. If so, will download the results instead of submitting a new\n        query. If it's not done yet, will start waiting for results again. To\n        force launch a new query with the same request parameters, set the\n        `forcefetch` kwarg to True.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict of the following form::\n\n            {'params':dict of the input params used for the query,\n             'provenance':'cache' or 'new download',\n             'result':path to the file on disk with the downloaded data table}", "id": "f14747:m1"}
{"signature": "def tap_query(querystr,<EOL>simbad_mirror='<STR_LIT>',<EOL>returnformat='<STR_LIT>',<EOL>forcefetch=False,<EOL>cachedir='<STR_LIT>',<EOL>verbose=True,<EOL>timeout=<NUM_LIT>,<EOL>refresh=<NUM_LIT>,<EOL>maxtimeout=<NUM_LIT>,<EOL>maxtries=<NUM_LIT:3>,<EOL>complete_query_later=False,<EOL>jitter=<NUM_LIT>):", "body": "<EOL>inputparams = TAP_PARAMS.copy()<EOL>inputparams['<STR_LIT>'] = querystr[::]<EOL>if returnformat in RETURN_FORMATS:<EOL><INDENT>inputparams['<STR_LIT>'] = returnformat<EOL><DEDENT>else:<EOL><INDENT>LOGWARNING('<STR_LIT>' %<EOL>returnformat)<EOL>inputparams['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' in cachedir:<EOL><INDENT>cachedir = os.path.expanduser(cachedir)<EOL><DEDENT>if not os.path.exists(cachedir):<EOL><INDENT>os.makedirs(cachedir)<EOL><DEDENT>xcachekey = '<STR_LIT:->'.join([repr(inputparams[x])<EOL>for x in sorted(inputparams.keys())])<EOL>cachekey = hashlib.sha256(xcachekey.encode()).hexdigest()<EOL>cachefname = os.path.join(<EOL>cachedir,<EOL>'<STR_LIT>' % (cachekey, RETURN_FORMATS[returnformat])<EOL>)<EOL>provenance = '<STR_LIT>'<EOL>incomplete_qpklf = os.path.join(<EOL>cachedir,<EOL>'<STR_LIT>' % cachekey<EOL>)<EOL>if (not forcefetch and<EOL>complete_query_later and<EOL>os.path.exists(incomplete_qpklf)):<EOL><INDENT>with open(incomplete_qpklf, '<STR_LIT:rb>') as infd:<EOL><INDENT>incomplete_qinfo = pickle.load(infd)<EOL><DEDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>waitdone = False<EOL>timeelapsed = <NUM_LIT:0.0><EOL>simbad_mirror = incomplete_qinfo['<STR_LIT>']<EOL>status_url = incomplete_qinfo['<STR_LIT>']<EOL>phasekeyword = incomplete_qinfo['<STR_LIT>']<EOL>resultkeyword = incomplete_qinfo['<STR_LIT>']<EOL>while not waitdone:<EOL><INDENT>if timeelapsed > maxtimeout:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(maxtimeout,<EOL>repr(inputparams),<EOL>status_url))<EOL>return None<EOL><DEDENT>try:<EOL><INDENT>resreq = requests.get(status_url,<EOL>timeout=timeout)<EOL>resreq.raise_for_status()<EOL>resxml = parseString(resreq.text)<EOL>jobstatuselem = (<EOL>resxml.getElementsByTagName(phasekeyword)[<NUM_LIT:0>]<EOL>)<EOL>jobstatus = jobstatuselem.firstChild.toxml()<EOL>if jobstatus == '<STR_LIT>':<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>waitdone = True<EOL><DEDENT>elif jobstatus != '<STR_LIT>':<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (timeelapsed, jobstatus, status_url))<EOL><DEDENT>time.sleep(refresh)<EOL>timeelapsed = timeelapsed + refresh<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(status_url,<EOL>resreq.text))<EOL>os.remove(incomplete_qpklf)<EOL>return None<EOL><DEDENT><DEDENT>except requests.exceptions.Timeout as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams), status_url)<EOL>)<EOL>return None<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams),<EOL>status_url,<EOL>resreq.text)<EOL>)<EOL>os.remove(incomplete_qpklf)<EOL>return None<EOL><DEDENT><DEDENT>LOGINFO('<STR_LIT>')<EOL>result_url_elem = resxml.getElementsByTagName(resultkeyword)[<NUM_LIT:0>]<EOL>result_url = result_url_elem.getAttribute('<STR_LIT>')<EOL>result_nrows = result_url_elem.getAttribute('<STR_LIT>')<EOL>try:<EOL><INDENT>resreq = requests.get(result_url, timeout=timeout)<EOL>resreq.raise_for_status()<EOL>if cachefname.endswith('<STR_LIT>'):<EOL><INDENT>with gzip.open(cachefname,'<STR_LIT:wb>') as outfd:<EOL><INDENT>for chunk in resreq.iter_content(chunk_size=<NUM_LIT>):<EOL><INDENT>outfd.write(chunk)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>with open(cachefname,'<STR_LIT:wb>') as outfd:<EOL><INDENT>for chunk in resreq.iter_content(chunk_size=<NUM_LIT>):<EOL><INDENT>outfd.write(chunk)<EOL><DEDENT><DEDENT><DEDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % result_nrows)<EOL><DEDENT>tablefname = cachefname<EOL>provenance = '<STR_LIT>'<EOL>resdict = {'<STR_LIT>':inputparams,<EOL>'<STR_LIT>':provenance,<EOL>'<STR_LIT:result>':tablefname}<EOL>os.remove(incomplete_qpklf)<EOL>return resdict<EOL><DEDENT>except requests.exceptions.Timeout as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams), result_url)<EOL>)<EOL>return None<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams),<EOL>result_url,<EOL>resreq.status_code)<EOL>)<EOL>os.remove(incomplete_qpklf)<EOL>return None<EOL><DEDENT><DEDENT>if forcefetch or (not os.path.exists(cachefname)):<EOL><INDENT>provenance = '<STR_LIT>'<EOL>time.sleep(random.randint(<NUM_LIT:1>,jitter))<EOL>jobid = '<STR_LIT>' % time.time()<EOL>inputparams['<STR_LIT>'] = jobid<EOL>inputparams['<STR_LIT>'] = '<STR_LIT>'<EOL>try:<EOL><INDENT>waitdone = False<EOL>timeelapsed = <NUM_LIT:0.0><EOL>if simbad_mirror is not None and simbad_mirror in SIMBAD_URLS:<EOL><INDENT>tapurl = SIMBAD_URLS[simbad_mirror]['<STR_LIT:url>']<EOL>resultkeyword = SIMBAD_URLS[simbad_mirror]['<STR_LIT>']<EOL>phasekeyword = SIMBAD_URLS[simbad_mirror]['<STR_LIT>']<EOL>randkey = simbad_mirror<EOL>if '<STR_LIT>' in querystr:<EOL><INDENT>inputparams['<STR_LIT>'] = (<EOL>querystr.format(<EOL>table=SIMBAD_URLS[simbad_mirror]['<STR_LIT>']<EOL>)<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>randkey = random.choice(list(SIMBAD_URLS.keys()))<EOL>tapurl = SIMBAD_URLS[randkey]['<STR_LIT:url>']<EOL>resultkeyword = SIMBAD_URLS[randkey]['<STR_LIT>']<EOL>phasekeyword = SIMBAD_URLS[randkey]['<STR_LIT>']<EOL>if '<STR_LIT>' in querystr:<EOL><INDENT>inputparams['<STR_LIT>'] = (<EOL>querystr.format(<EOL>table=SIMBAD_URLS[randkey]['<STR_LIT>']<EOL>)<EOL>)<EOL><DEDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % tapurl)<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>% repr(inputparams)<EOL>)<EOL><DEDENT>mirrorok = False<EOL>ntries = <NUM_LIT:1><EOL>while (not mirrorok):<EOL><INDENT>if ntries > maxtries:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>maxtries)<EOL>return None<EOL><DEDENT>try:<EOL><INDENT>req = requests.post(tapurl,<EOL>data=inputparams,<EOL>timeout=timeout)<EOL>resp_status = req.status_code<EOL>req.raise_for_status()<EOL>mirrorok = True<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>LOGWARNING(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% tapurl<EOL>)<EOL>mirrorok = False<EOL>remainingmirrors = list(SIMBAD_URLS.keys())<EOL>waittime = random.choice(range(<NUM_LIT:1>,<NUM_LIT:6>))<EOL>time.sleep(waittime)<EOL>randkey = remainingmirrors[<NUM_LIT:0>]<EOL>tapurl = SIMBAD_URLS[randkey]['<STR_LIT:url>']<EOL>resultkeyword = SIMBAD_URLS[randkey]['<STR_LIT>']<EOL>phasekeyword = SIMBAD_URLS[randkey]['<STR_LIT>']<EOL>if '<STR_LIT>' in querystr:<EOL><INDENT>inputparams['<STR_LIT>'] = (<EOL>querystr.format(<EOL>table=SIMBAD_URLS[randkey]['<STR_LIT>']<EOL>)<EOL>)<EOL><DEDENT><DEDENT>except requests.exceptions.Timeout as e:<EOL><INDENT>LOGWARNING(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>mirrorok = False<EOL>remainingmirrors = list(SIMBAD_URLS.keys())<EOL>waittime = random.choice(range(<NUM_LIT:1>,<NUM_LIT:6>))<EOL>time.sleep(waittime)<EOL>randkey = remainingmirrors[<NUM_LIT:0>]<EOL>tapurl = SIMBAD_URLS[randkey]['<STR_LIT:url>']<EOL>resultkeyword = SIMBAD_URLS[randkey]['<STR_LIT>']<EOL>phasekeyword = SIMBAD_URLS[randkey]['<STR_LIT>']<EOL>if '<STR_LIT>' in querystr:<EOL><INDENT>inputparams['<STR_LIT>'] = (<EOL>querystr.format(<EOL>table=SIMBAD_URLS[randkey]['<STR_LIT>']<EOL>)<EOL>)<EOL><DEDENT><DEDENT>ntries = ntries + <NUM_LIT:1><EOL><DEDENT>status_url = req.url<EOL>resxml = parseString(req.text)<EOL>jobstatuselem = resxml.getElementsByTagName(phasekeyword)<EOL>if jobstatuselem:<EOL><INDENT>jobstatuselem = jobstatuselem[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>' % phasekeyword)<EOL>LOGERROR('<STR_LIT:%s>' % req.txt)<EOL>req.close()<EOL>return None<EOL><DEDENT>jobstatus = jobstatuselem.firstChild.toxml()<EOL>if jobstatus == '<STR_LIT>':<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>waitdone = True<EOL><DEDENT><DEDENT>elif jobstatus == '<STR_LIT>':<EOL><INDENT>if verbose:<EOL><INDENT>LOGERROR(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams),<EOL>status_url,<EOL>req.text)<EOL>)<EOL>return None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % jobstatus<EOL>)<EOL><DEDENT>while not waitdone:<EOL><INDENT>if timeelapsed > maxtimeout:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(maxtimeout,<EOL>repr(inputparams),<EOL>status_url,<EOL>jobstatus))<EOL>if complete_query_later and jobstatus in ('<STR_LIT>',<EOL>'<STR_LIT>'):<EOL><INDENT>incomplete_qpklf = os.path.join(<EOL>cachedir,<EOL>'<STR_LIT>' % cachekey<EOL>)<EOL>with open(incomplete_qpklf, '<STR_LIT:wb>') as outfd:<EOL><INDENT>savedict = inputparams.copy()<EOL>savedict['<STR_LIT>'] = status_url<EOL>savedict['<STR_LIT>'] = jobstatus<EOL>savedict['<STR_LIT>'] = simbad_mirror<EOL>savedict['<STR_LIT>'] = phasekeyword<EOL>savedict['<STR_LIT>'] = resultkeyword<EOL>pickle.dump(savedict,<EOL>outfd,<EOL>pickle.HIGHEST_PROTOCOL)<EOL><DEDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>jobstatus)<EOL><DEDENT>return None<EOL><DEDENT>time.sleep(refresh)<EOL>timeelapsed = timeelapsed + refresh<EOL>try:<EOL><INDENT>resreq = requests.get(status_url, timeout=timeout)<EOL>resreq.raise_for_status()<EOL>resxml = parseString(resreq.text)<EOL>jobstatuselem = (<EOL>resxml.getElementsByTagName(phasekeyword)[<NUM_LIT:0>]<EOL>)<EOL>jobstatus = jobstatuselem.firstChild.toxml()<EOL>if jobstatus == '<STR_LIT>':<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>waitdone = True<EOL><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (timeelapsed, jobstatus, status_url))<EOL><DEDENT>continue<EOL><DEDENT><DEDENT>except requests.exceptions.Timeout as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams), status_url)<EOL>)<EOL>return None<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams),<EOL>status_url,<EOL>resreq.text)<EOL>)<EOL>return None<EOL><DEDENT><DEDENT><DEDENT>result_url_elem = resxml.getElementsByTagName(resultkeyword)[<NUM_LIT:0>]<EOL>result_url = result_url_elem.getAttribute('<STR_LIT>')<EOL>result_nrows = result_url_elem.getAttribute('<STR_LIT>')<EOL>try:<EOL><INDENT>resreq = requests.get(result_url, timeout=timeout)<EOL>resreq.raise_for_status()<EOL>if cachefname.endswith('<STR_LIT>'):<EOL><INDENT>with gzip.open(cachefname,'<STR_LIT:wb>') as outfd:<EOL><INDENT>for chunk in resreq.iter_content(chunk_size=<NUM_LIT>):<EOL><INDENT>outfd.write(chunk)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>with open(cachefname,'<STR_LIT:wb>') as outfd:<EOL><INDENT>for chunk in resreq.iter_content(chunk_size=<NUM_LIT>):<EOL><INDENT>outfd.write(chunk)<EOL><DEDENT><DEDENT><DEDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % result_nrows)<EOL><DEDENT>tablefname = cachefname<EOL><DEDENT>except requests.exceptions.Timeout as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams), result_url)<EOL>)<EOL>return None<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams),<EOL>result_url,<EOL>resreq.status_code)<EOL>)<EOL>return None<EOL><DEDENT><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT>' % (resp_status,<EOL>repr(inputparams)))<EOL>return None<EOL><DEDENT>except requests.exceptions.Timeout as e:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT:%s>' % repr(inputparams))<EOL>return None<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT:%s>' % repr(inputparams))<EOL>if '<STR_LIT>' in locals():<EOL><INDENT>LOGERROR('<STR_LIT>' % req.text)<EOL><DEDENT>return None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams)))<EOL><DEDENT>tablefname = cachefname<EOL>try:<EOL><INDENT>infd = gzip.open(cachefname,'<STR_LIT:rb>')<EOL>simbad_objectnames = np.genfromtxt(<EOL>infd,<EOL>names=True,<EOL>delimiter='<STR_LIT:U+002C>',<EOL>dtype='<STR_LIT>',<EOL>usecols=(<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>,<NUM_LIT:4>,<NUM_LIT:5>,<NUM_LIT:6>,<NUM_LIT:7>,<NUM_LIT:8>),<EOL>comments='<STR_LIT:?>',  <EOL>)<EOL>infd.close()<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT>' % cachefname)<EOL>return tap_query(querystr,<EOL>simbad_mirror=simbad_mirror,<EOL>returnformat=returnformat,<EOL>forcefetch=True,<EOL>cachedir=cachedir,<EOL>verbose=verbose,<EOL>timeout=timeout,<EOL>refresh=refresh,<EOL>maxtimeout=maxtimeout)<EOL><DEDENT><DEDENT>resdict = {'<STR_LIT>':inputparams,<EOL>'<STR_LIT>':provenance,<EOL>'<STR_LIT:result>':tablefname}<EOL>return resdict<EOL>", "docstring": "This queries the SIMBAD TAP service using the ADQL query string provided.\n\n    Parameters\n    ----------\n\n    querystr : str\n        This is the ADQL query string. See:\n        http://www.ivoa.net/documents/ADQL/2.0 for the specification.\n\n    simbad_mirror : str\n        This is the key used to select a SIMBAD mirror from the\n        `SIMBAD_URLS` dict above. If set, the specified mirror will be used. If\n        None, a random mirror chosen from that dict will be used.\n\n    returnformat : {'csv','votable','json'}\n        The returned file format to request from the GAIA catalog service.\n\n    forcefetch : bool\n        If this is True, the query will be retried even if cached results for\n        it exist.\n\n    cachedir : str\n        This points to the directory where results will be downloaded.\n\n    verbose : bool\n        If True, will indicate progress and warn of any issues.\n\n    timeout : float\n        This sets the amount of time in seconds to wait for the service to\n        respond to our initial request.\n\n    refresh : float\n        This sets the amount of time in seconds to wait before checking if the\n        result file is available. If the results file isn't available after\n        `refresh` seconds have elapsed, the function will wait for `refresh`\n        seconds continuously, until `maxtimeout` is reached or the results file\n        becomes available.\n\n    maxtimeout : float\n        The maximum amount of time in seconds to wait for a result to become\n        available after submitting our query request.\n\n    maxtries : int\n        The maximum number of tries (across all mirrors tried) to make to either\n        submit the request or download the results, before giving up.\n\n    complete_query_later : bool\n        If set to True, a submitted query that does not return a result before\n        `maxtimeout` has passed will be cancelled but its input request\n        parameters and the result URL provided by the service will be saved. If\n        this function is then called later with these same input request\n        parameters, it will check if the query finally finished and a result is\n        available. If so, will download the results instead of submitting a new\n        query. If it's not done yet, will start waiting for results again. To\n        force launch a new query with the same request parameters, set the\n        `forcefetch` kwarg to True.\n\n    jitter : float\n        This is used to control the scale of the random wait in seconds before\n        starting the query. Useful in parallelized situations.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict of the following form::\n\n            {'params':dict of the input params used for the query,\n             'provenance':'cache' or 'new download',\n             'result':path to the file on disk with the downloaded data table}", "id": "f14747:m0"}
{"signature": "def query_radecl(ra,<EOL>decl,<EOL>filtersystem='<STR_LIT>',<EOL>field_deg2=<NUM_LIT:1.0>,<EOL>usebinaries=True,<EOL>extinction_sigma=<NUM_LIT:0.1>,<EOL>magnitude_limit=<NUM_LIT>,<EOL>maglim_filtercol=<NUM_LIT:4>,<EOL>trilegal_version=<NUM_LIT>,<EOL>extraparams=None,<EOL>forcefetch=False,<EOL>cachedir='<STR_LIT>',<EOL>verbose=True,<EOL>timeout=<NUM_LIT>,<EOL>refresh=<NUM_LIT>,<EOL>maxtimeout=<NUM_LIT>):", "body": "<EOL>radecl = SkyCoord(ra=ra*u.degree, dec=decl*u.degree)<EOL>gl = radecl.galactic.l.degree<EOL>gb = radecl.galactic.b.degree<EOL>return query_galcoords(gl,<EOL>gb,<EOL>filtersystem=filtersystem,<EOL>field_deg2=field_deg2,<EOL>usebinaries=usebinaries,<EOL>extinction_sigma=extinction_sigma,<EOL>magnitude_limit=magnitude_limit,<EOL>maglim_filtercol=maglim_filtercol,<EOL>trilegal_version=trilegal_version,<EOL>extraparams=extraparams,<EOL>forcefetch=forcefetch,<EOL>cachedir=cachedir,<EOL>verbose=verbose,<EOL>timeout=timeout,<EOL>refresh=refresh,<EOL>maxtimeout=maxtimeout)<EOL>", "docstring": "This runs the TRILEGAL query for decimal equatorial coordinates.\n\n    Parameters\n    ----------\n\n    ra,decl : float\n        These are the center equatorial coordinates in decimal degrees\n\n    filtersystem : str\n        This is a key in the TRILEGAL_FILTER_SYSTEMS dict. Use the function\n        :py:func:`astrobase.services.trilegal.list_trilegal_filtersystems` to\n        see a nicely formatted table with the key and description for each of\n        these.\n\n    field_deg2 : float\n        The area of the simulated field in square degrees. This is in the\n        Galactic coordinate system.\n\n    usebinaries : bool\n        If this is True, binaries will be present in the model results.\n\n    extinction_sigma : float\n        This is the applied std dev around the `Av_extinction` value for the\n        galactic coordinates requested.\n\n    magnitude_limit : float\n        This is the limiting magnitude of the simulation in the\n        `maglim_filtercol` band index of the filter system chosen.\n\n    maglim_filtercol : int\n        The index in the filter system list of the magnitude limiting band.\n\n    trilegal_version : float\n        This is the the version of the TRILEGAL form to use. This can usually be\n        left as-is.\n\n    extraparams : dict or None\n        This is a dict that can be used to override parameters of the model\n        other than the basic ones used for input to this function. All\n        parameters are listed in `TRILEGAL_DEFAULT_PARAMS` above. See:\n\n        http://stev.oapd.inaf.it/cgi-bin/trilegal\n\n        for explanations of these parameters.\n\n    forcefetch : bool\n        If this is True, the query will be retried even if cached results for\n        it exist.\n\n    cachedir : str\n        This points to the directory where results will be downloaded.\n\n    verbose : bool\n        If True, will indicate progress and warn of any issues.\n\n    timeout : float\n        This sets the amount of time in seconds to wait for the service to\n        respond to our initial request.\n\n    refresh : float\n        This sets the amount of time in seconds to wait before checking if the\n        result file is available. If the results file isn't available after\n        `refresh` seconds have elapsed, the function will wait for `refresh`\n        seconds continuously, until `maxtimeout` is reached or the results file\n        becomes available.\n\n    maxtimeout : float\n        The maximum amount of time in seconds to wait for a result to become\n        available after submitting our query request.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict of the form::\n\n            {'params':the input param dict used,\n             'extraparams':any extra params used,\n             'provenance':'cached' or 'new download',\n             'tablefile':the path on disk to the downloaded model text file}", "id": "f14749:m2"}
{"signature": "def query_galcoords(gal_lon,<EOL>gal_lat,<EOL>filtersystem='<STR_LIT>',<EOL>field_deg2=<NUM_LIT:1.0>,<EOL>usebinaries=True,<EOL>extinction_sigma=<NUM_LIT:0.1>,<EOL>magnitude_limit=<NUM_LIT>,<EOL>maglim_filtercol=<NUM_LIT:4>,<EOL>trilegal_version=<NUM_LIT>,<EOL>extraparams=None,<EOL>forcefetch=False,<EOL>cachedir='<STR_LIT>',<EOL>verbose=True,<EOL>timeout=<NUM_LIT>,<EOL>refresh=<NUM_LIT>,<EOL>maxtimeout=<NUM_LIT>):", "body": "<EOL>inputparams = copy.deepcopy(TRILEGAL_INPUT_PARAMS)<EOL>inputparams['<STR_LIT>'] = '<STR_LIT:1>' if usebinaries else '<STR_LIT:0>'<EOL>inputparams['<STR_LIT>'] = '<STR_LIT>' % extinction_sigma<EOL>inputparams['<STR_LIT>'] = '<STR_LIT>' % field_deg2<EOL>inputparams['<STR_LIT>'] = str(maglim_filtercol)<EOL>inputparams['<STR_LIT>'] = '<STR_LIT>' % magnitude_limit<EOL>inputparams['<STR_LIT>'] = str(trilegal_version)<EOL>inputparams['<STR_LIT>'] = '<STR_LIT>' % gal_lon<EOL>inputparams['<STR_LIT>'] = '<STR_LIT>' % gal_lat<EOL>if field_deg2 > <NUM_LIT>:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>extinction_info = dust.extinction_query(gal_lon,<EOL>gal_lat,<EOL>coordtype='<STR_LIT>',<EOL>forcefetch=forcefetch,<EOL>verbose=verbose,<EOL>timeout=timeout)<EOL>try:<EOL><INDENT>Av_infinity = extinction_info['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL>inputparams['<STR_LIT>'] = '<STR_LIT>' % Av_infinity<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (gal_lon, gal_lat,<EOL>inputparams['<STR_LIT>'])<EOL>)<EOL><DEDENT>if filtersystem in TRILEGAL_FILTER_SYSTEMS:<EOL><INDENT>inputparams['<STR_LIT>'] = (<EOL>TRILEGAL_FILTER_SYSTEMS[filtersystem]['<STR_LIT>']<EOL>)<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % filtersystem)<EOL>return None<EOL><DEDENT>trilegal_params = copy.deepcopy(TRILEGAL_DEFAULT_PARAMS)<EOL>trilegal_params.update(inputparams)<EOL>if extraparams and isinstance(extraparams, dict):<EOL><INDENT>trilegal_params.update(extraparams)<EOL><DEDENT>if '<STR_LIT>' in cachedir:<EOL><INDENT>cachedir = os.path.expanduser(cachedir)<EOL><DEDENT>if not os.path.exists(cachedir):<EOL><INDENT>os.makedirs(cachedir)<EOL><DEDENT>cachekey = repr(inputparams)<EOL>cachekey = hashlib.sha256(cachekey.encode()).hexdigest()<EOL>cachefname = os.path.join(cachedir, '<STR_LIT>' % cachekey)<EOL>provenance = '<STR_LIT>'<EOL>lockfile = os.path.join(cachedir, '<STR_LIT>' % cachekey)<EOL>if forcefetch or (not os.path.exists(cachefname)):<EOL><INDENT>if os.path.exists(lockfile):<EOL><INDENT>with open(lockfile,'<STR_LIT:r>') as infd:<EOL><INDENT>lock_contents = infd.read()<EOL><DEDENT>lock_contents = lock_contents.replace('<STR_LIT:\\n>','<STR_LIT>')<EOL>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>lock_contents)<EOL>return None<EOL><DEDENT>else:<EOL><INDENT>with open(lockfile,'<STR_LIT:w>') as outfd:<EOL><INDENT>outfd.write(datetime.utcnow().isoformat())<EOL><DEDENT><DEDENT>provenance = '<STR_LIT>'<EOL>try:<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>% repr(inputparams))<EOL><DEDENT>posturl = TRILEGAL_POSTURL.format(formversion=trilegal_version)<EOL>req = requests.post(posturl,<EOL>data=trilegal_params,<EOL>timeout=timeout)<EOL>resp = req.text<EOL>resultfile = TRILEGAL_REGEX.search(resp)<EOL>if resultfile:<EOL><INDENT>resultfile = resultfile[<NUM_LIT:0>]<EOL>waitdone = False<EOL>timeelapsed = <NUM_LIT:0.0><EOL>resultfileurl = '<STR_LIT>' % (<EOL>TRILEGAL_BASEURL,<EOL>resultfile.replace('<STR_LIT>','<STR_LIT>')<EOL>)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>while not waitdone:<EOL><INDENT>if timeelapsed > maxtimeout:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT:%s>' % repr(inputparams))<EOL>if os.path.exists(lockfile):<EOL><INDENT>os.remove(lockfile)<EOL><DEDENT>return None<EOL><DEDENT>time.sleep(refresh)<EOL>timeelapsed = timeelapsed + refresh<EOL>try:<EOL><INDENT>resreq = requests.get(resultfileurl)<EOL>resreq.raise_for_status()<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL><DEDENT>with gzip.open(cachefname,'<STR_LIT:wb>') as outfd:<EOL><INDENT>for chunk in resreq.iter_content(chunk_size=<NUM_LIT>):<EOL><INDENT>outfd.write(chunk)<EOL><DEDENT><DEDENT>tablefname = cachefname<EOL>waitdone = True<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL><DEDENT><DEDENT>except Exception as e:<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (timeelapsed, resultfileurl))<EOL><DEDENT>continue<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>LOGINFO(resp)<EOL>if os.path.exists(lockfile):<EOL><INDENT>os.remove(lockfile)<EOL><DEDENT>return None<EOL><DEDENT><DEDENT>except requests.exceptions.Timeout as e:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT:%s>' % repr(inputparams))<EOL>return None<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>'<EOL>'<STR_LIT:%s>' % repr(inputparams))<EOL>return None<EOL><DEDENT>finally:<EOL><INDENT>if os.path.exists(lockfile):<EOL><INDENT>os.remove(lockfile)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(repr(inputparams)))<EOL><DEDENT>tablefname = cachefname<EOL><DEDENT>resdict = {'<STR_LIT>':inputparams,<EOL>'<STR_LIT>':extraparams,<EOL>'<STR_LIT>':provenance,<EOL>'<STR_LIT>':tablefname}<EOL>return resdict<EOL>", "docstring": "This queries the TRILEGAL model form, downloads results, and parses them.\n\n    Parameters\n    ----------\n\n    gal_lon,gal_lat : float\n        These are the center galactic longitude and latitude in degrees.\n\n    filtersystem : str\n        This is a key in the TRILEGAL_FILTER_SYSTEMS dict. Use the function\n        :py:func:`astrobase.services.trilegal.list_trilegal_filtersystems` to\n        see a nicely formatted table with the key and description for each of\n        these.\n\n    field_deg2 : float\n        The area of the simulated field in square degrees.\n\n    usebinaries : bool\n        If this is True, binaries will be present in the model results.\n\n    extinction_sigma : float\n        This is the applied std dev around the `Av_extinction` value for the\n        galactic coordinates requested.\n\n    magnitude_limit : float\n        This is the limiting magnitude of the simulation in the\n        `maglim_filtercol` band index of the filter system chosen.\n\n    maglim_filtercol : int\n        The index in the filter system list of the magnitude limiting band.\n\n    trilegal_version : float\n        This is the the version of the TRILEGAL form to use. This can usually be\n        left as-is.\n\n    extraparams : dict or None\n        This is a dict that can be used to override parameters of the model\n        other than the basic ones used for input to this function. All\n        parameters are listed in `TRILEGAL_DEFAULT_PARAMS` above. See:\n\n        http://stev.oapd.inaf.it/cgi-bin/trilegal\n\n        for explanations of these parameters.\n\n    forcefetch : bool\n        If this is True, the query will be retried even if cached results for\n        it exist.\n\n    cachedir : str\n        This points to the directory where results will be downloaded.\n\n    verbose : bool\n        If True, will indicate progress and warn of any issues.\n\n    timeout : float\n        This sets the amount of time in seconds to wait for the service to\n        respond to our initial request.\n\n    refresh : float\n        This sets the amount of time in seconds to wait before checking if the\n        result file is available. If the results file isn't available after\n        `refresh` seconds have elapsed, the function will wait for `refresh`\n        seconds continuously, until `maxtimeout` is reached or the results file\n        becomes available.\n\n    maxtimeout : float\n        The maximum amount of time in seconds to wait for a result to become\n        available after submitting our query request.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict of the form::\n\n            {'params':the input param dict used,\n             'extraparams':any extra params used,\n             'provenance':'cached' or 'new download',\n             'tablefile':the path on disk to the downloaded model text file}", "id": "f14749:m1"}
{"signature": "def read_model_table(modelfile):", "body": "infd = gzip.open(modelfile)<EOL>model = np.genfromtxt(infd,names=True)<EOL>infd.close()<EOL>return model<EOL>", "docstring": "This reads a downloaded TRILEGAL model file.\n\nParameters\n----------\n\nmodelfile : str\n    Path to the downloaded model file to read.\n\nReturns\n-------\n\nnp.recarray\n    Returns the model table as a Numpy record array.", "id": "f14749:m3"}
{"signature": "def tic_xmatch(<EOL>ra,<EOL>decl,<EOL>radius_arcsec=<NUM_LIT>,<EOL>apiversion='<STR_LIT>',<EOL>forcefetch=False,<EOL>cachedir='<STR_LIT>',<EOL>verbose=True,<EOL>timeout=<NUM_LIT>,<EOL>refresh=<NUM_LIT>,<EOL>maxtimeout=<NUM_LIT>,<EOL>maxtries=<NUM_LIT:3>,<EOL>jitter=<NUM_LIT>,<EOL>raiseonfail=False<EOL>):", "body": "service = '<STR_LIT>'<EOL>xmatch_input = {'<STR_LIT>':[{'<STR_LIT:name>':'<STR_LIT>','<STR_LIT:type>':'<STR_LIT:float>'},<EOL>{'<STR_LIT:name>':'<STR_LIT>','<STR_LIT:type>':'<STR_LIT:float>'}]}<EOL>xmatch_input['<STR_LIT:data>'] = [{'<STR_LIT>':x, '<STR_LIT>':y} for (x,y) in zip(ra, decl)]<EOL>params = {'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':radius_arcsec/<NUM_LIT>}<EOL>return mast_query(service,<EOL>params,<EOL>data=xmatch_input,<EOL>jitter=jitter,<EOL>apiversion=apiversion,<EOL>forcefetch=forcefetch,<EOL>cachedir=cachedir,<EOL>verbose=verbose,<EOL>timeout=timeout,<EOL>refresh=refresh,<EOL>maxtimeout=maxtimeout,<EOL>maxtries=maxtries,<EOL>raiseonfail=raiseonfail)<EOL>", "docstring": "This does a cross-match with TIC.\n\n    Parameters\n    ----------\n\n    ra,decl : np.arrays or lists of floats\n        The coordinates that will be cross-matched against the TIC.\n\n    radius_arcsec : float\n        The cross-match radius in arcseconds.\n\n    apiversion : str\n        The API version of the MAST service to use. This sets the URL that this\n        function will call, using `apiversion` as key into the `MAST_URLS` dict\n        above.\n\n    forcefetch : bool\n        If this is True, the query will be retried even if cached results for\n        it exist.\n\n    cachedir : str\n        This points to the directory where results will be downloaded.\n\n    verbose : bool\n        If True, will indicate progress and warn of any issues.\n\n    timeout : float\n        This sets the amount of time in seconds to wait for the service to\n        respond to our initial request.\n\n    refresh : float\n        This sets the amount of time in seconds to wait before checking if the\n        result file is available. If the results file isn't available after\n        `refresh` seconds have elapsed, the function will wait for `refresh`\n        seconds continuously, until `maxtimeout` is reached or the results file\n        becomes available.\n\n    maxtimeout : float\n        The maximum amount of time in seconds to wait for a result to become\n        available after submitting our query request.\n\n    maxtries : int\n        The maximum number of tries (across all mirrors tried) to make to either\n        submit the request or download the results, before giving up.\n\n    jitter : float\n        This is used to control the scale of the random wait in seconds before\n        starting the query. Useful in parallelized situations.\n\n    raiseonfail : bool\n        If this is True, the function will raise an Exception if something goes\n        wrong, instead of returning None.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict of the following form::\n\n            {'params':dict of the input params used for the query,\n             'provenance':'cache' or 'new download',\n             'result':path to the file on disk with the downloaded data table}", "id": "f14750:m2"}
{"signature": "def tic_conesearch(<EOL>ra,<EOL>decl,<EOL>radius_arcmin=<NUM_LIT>,<EOL>apiversion='<STR_LIT>',<EOL>forcefetch=False,<EOL>cachedir='<STR_LIT>',<EOL>verbose=True,<EOL>timeout=<NUM_LIT>,<EOL>refresh=<NUM_LIT>,<EOL>maxtimeout=<NUM_LIT>,<EOL>maxtries=<NUM_LIT:3>,<EOL>jitter=<NUM_LIT>,<EOL>raiseonfail=False<EOL>):", "body": "params = {'<STR_LIT>':ra,<EOL>'<STR_LIT>':decl,<EOL>'<STR_LIT>':radius_arcmin/<NUM_LIT>}<EOL>service = '<STR_LIT>'<EOL>return mast_query(service,<EOL>params,<EOL>jitter=jitter,<EOL>apiversion=apiversion,<EOL>forcefetch=forcefetch,<EOL>cachedir=cachedir,<EOL>verbose=verbose,<EOL>timeout=timeout,<EOL>refresh=refresh,<EOL>maxtimeout=maxtimeout,<EOL>maxtries=maxtries,<EOL>raiseonfail=raiseonfail)<EOL>", "docstring": "This runs a TESS Input Catalog cone search on MAST.\n\n    If you use this, please cite the TIC paper (Stassun et al 2018;\n    http://adsabs.harvard.edu/abs/2018AJ....156..102S). Also see the \"living\"\n    TESS input catalog docs:\n\n    https://docs.google.com/document/d/1zdiKMs4Ld4cXZ2DW4lMX-fuxAF6hPHTjqjIwGqnfjqI\n\n    Also see: https://mast.stsci.edu/api/v0/_t_i_cfields.html for the fields\n    returned by the service and present in the result JSON file.\n\n    Parameters\n    ----------\n\n    ra,decl : float\n        The center coordinates of the cone-search in decimal degrees.\n\n    radius_arcmin : float\n        The cone-search radius in arcminutes.\n\n    apiversion : str\n        The API version of the MAST service to use. This sets the URL that this\n        function will call, using `apiversion` as key into the `MAST_URLS` dict\n        above.\n\n    forcefetch : bool\n        If this is True, the query will be retried even if cached results for\n        it exist.\n\n    cachedir : str\n        This points to the directory where results will be downloaded.\n\n    verbose : bool\n        If True, will indicate progress and warn of any issues.\n\n    timeout : float\n        This sets the amount of time in seconds to wait for the service to\n        respond to our initial request.\n\n    refresh : float\n        This sets the amount of time in seconds to wait before checking if the\n        result file is available. If the results file isn't available after\n        `refresh` seconds have elapsed, the function will wait for `refresh`\n        seconds continuously, until `maxtimeout` is reached or the results file\n        becomes available.\n\n    maxtimeout : float\n        The maximum amount of time in seconds to wait for a result to become\n        available after submitting our query request.\n\n    maxtries : int\n        The maximum number of tries (across all mirrors tried) to make to either\n        submit the request or download the results, before giving up.\n\n    jitter : float\n        This is used to control the scale of the random wait in seconds before\n        starting the query. Useful in parallelized situations.\n\n    raiseonfail : bool\n        If this is True, the function will raise an Exception if something goes\n        wrong, instead of returning None.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict of the following form::\n\n            {'params':dict of the input params used for the query,\n             'provenance':'cache' or 'new download',\n             'result':path to the file on disk with the downloaded data table}", "id": "f14750:m1"}
{"signature": "def objectlist_radeclbox(radeclbox,<EOL>gaia_mirror=None,<EOL>columns=('<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT:l>','<STR_LIT:b>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>'),<EOL>extra_filter=None,<EOL>returnformat='<STR_LIT>',<EOL>forcefetch=False,<EOL>cachedir='<STR_LIT>',<EOL>verbose=True,<EOL>timeout=<NUM_LIT>,<EOL>refresh=<NUM_LIT>,<EOL>maxtimeout=<NUM_LIT>,<EOL>maxtries=<NUM_LIT:3>,<EOL>complete_query_later=True):", "body": "<EOL>query = (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL>ra_min, ra_max, decl_min, decl_max = radeclbox<EOL>ra_center = (ra_max + ra_min)/<NUM_LIT><EOL>decl_center = (decl_max + decl_min)/<NUM_LIT><EOL>ra_width = ra_max - ra_min<EOL>decl_height = decl_max - decl_min<EOL>if extra_filter is not None:<EOL><INDENT>extra_filter_str = '<STR_LIT>' % extra_filter<EOL><DEDENT>else:<EOL><INDENT>extra_filter_str = '<STR_LIT>'<EOL><DEDENT>formatted_query = query.format(columns='<STR_LIT:U+002CU+0020>'.join(columns),<EOL>extra_filter_str=extra_filter_str,<EOL>ra_center=ra_center,<EOL>decl_center=decl_center,<EOL>ra_width=ra_width,<EOL>decl_height=decl_height)<EOL>return tap_query(formatted_query,<EOL>gaia_mirror=gaia_mirror,<EOL>returnformat=returnformat,<EOL>forcefetch=forcefetch,<EOL>cachedir=cachedir,<EOL>verbose=verbose,<EOL>timeout=timeout,<EOL>refresh=refresh,<EOL>maxtimeout=maxtimeout,<EOL>maxtries=maxtries,<EOL>complete_query_later=complete_query_later)<EOL>", "docstring": "This queries the GAIA TAP service for a list of objects in an equatorial\n    coordinate box.\n\n    Parameters\n    ----------\n\n    radeclbox : sequence of four floats\n        This defines the box to search in::\n\n            [ra_min, ra_max, decl_min, decl_max]\n\n    gaia_mirror : {'gaia','heidelberg','vizier'} or None\n        This is the key used to select a GAIA catalog mirror from the\n        `GAIA_URLS` dict above. If set, the specified mirror will be used. If\n        None, a random mirror chosen from that dict will be used.\n\n    columns : sequence of str\n        This indicates which columns from the GAIA table to request for the\n        objects found within the search radius.\n\n    extra_filter: str or None\n        If this is provided, must be a valid ADQL filter string that is used to\n        further filter the cone-search results.\n\n    returnformat : {'csv','votable','json'}\n        The returned file format to request from the GAIA catalog service.\n\n    forcefetch : bool\n        If this is True, the query will be retried even if cached results for\n        it exist.\n\n    cachedir : str\n        This points to the directory where results will be downloaded.\n\n    verbose : bool\n        If True, will indicate progress and warn of any issues.\n\n    timeout : float\n        This sets the amount of time in seconds to wait for the service to\n        respond to our initial request.\n\n    refresh : float\n        This sets the amount of time in seconds to wait before checking if the\n        result file is available. If the results file isn't available after\n        `refresh` seconds have elapsed, the function will wait for `refresh`\n        seconds continuously, until `maxtimeout` is reached or the results file\n        becomes available.\n\n    maxtimeout : float\n        The maximum amount of time in seconds to wait for a result to become\n        available after submitting our query request.\n\n    maxtries : int\n        The maximum number of tries (across all mirrors tried) to make to either\n        submit the request or download the results, before giving up.\n\n    completequerylater : bool\n        If set to True, a submitted query that does not return a result before\n        `maxtimeout` has passed will be cancelled but its input request\n        parameters and the result URL provided by the service will be saved. If\n        this function is then called later with these same input request\n        parameters, it will check if the query finally finished and a result is\n        available. If so, will download the results instead of submitting a new\n        query. If it's not done yet, will start waiting for results again. To\n        force launch a new query with the same request parameters, set the\n        `forcefetch` kwarg to True.\n\n    Returns\n    -------\n\n    dict\n        This returns a dict of the following form::\n\n            {'params':dict of the input params used for the query,\n             'provenance':'cache' or 'new download',\n             'result':path to the file on disk with the downloaded data table}", "id": "f14751:m2"}
{"signature": "def get_dataset(lcc_server,<EOL>dataset_id,<EOL>strformat=False,<EOL>page=<NUM_LIT:1>):", "body": "urlparams = {'<STR_LIT>':<NUM_LIT:1> if strformat else <NUM_LIT:0>,<EOL>'<STR_LIT>':page,<EOL>'<STR_LIT>':<NUM_LIT:1>}<EOL>urlqs = urlencode(urlparams)<EOL>dataset_url = '<STR_LIT>' % (lcc_server, dataset_id, urlqs)<EOL>LOGINFO('<STR_LIT>' % (lcc_server,<EOL>dataset_id,<EOL>dataset_url))<EOL>try:<EOL><INDENT>have_apikey, apikey, expires = check_existing_apikey(lcc_server)<EOL>if not have_apikey:<EOL><INDENT>apikey, expires = get_new_apikey(lcc_server)<EOL><DEDENT>if apikey:<EOL><INDENT>headers = {'<STR_LIT>':'<STR_LIT>' % apikey}<EOL><DEDENT>else:<EOL><INDENT>headers = {}<EOL><DEDENT>req = Request(dataset_url, data=None, headers=headers)<EOL>resp = urlopen(req)<EOL>dataset = json.loads(resp.read())<EOL>return dataset<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>return None<EOL><DEDENT>", "docstring": "This downloads a JSON form of a dataset from the specified lcc_server.\n\n    If the dataset contains more than 1000 rows, it will be paginated, so you\n    must use the `page` kwarg to get the page you want. The dataset JSON will\n    contain the keys 'npages', 'currpage', and 'rows_per_page' to help with\n    this. The 'rows' key contains the actual data rows as a list of tuples.\n\n    The JSON contains metadata about the query that produced the dataset,\n    information about the data table's columns, and links to download the\n    dataset's products including the light curve ZIP and the dataset CSV.\n\n    Parameters\n    ----------\n\n    lcc_server : str\n        This is the base URL of the LCC-Server to talk to.\n\n    dataset_id : str\n        This is the unique setid of the dataset you want to get. In the results\n        from the `*_search` functions above, this is the value of the\n        `infodict['result']['setid']` key in the first item (the infodict) in\n        the returned tuple.\n\n    strformat : bool\n        This sets if you want the returned data rows to be formatted in their\n        string representations already. This can be useful if you're piping the\n        returned JSON straight into some sort of UI and you don't want to deal\n        with formatting floats, etc. To do this manually when strformat is set\n        to False, look at the `coldesc` item in the returned dict, which gives\n        the Python and Numpy string format specifiers for each column in the\n        data table.\n\n    page : int\n        This sets which page of the dataset should be retrieved.\n\n    Returns\n    -------\n\n    dict\n        This returns the dataset JSON loaded into a dict.", "id": "f14753:m9"}
{"signature": "def import_apikey(lcc_server, apikey_text_json):", "body": "USERHOME = os.path.expanduser('<STR_LIT>')<EOL>APIKEYFILE = os.path.join(USERHOME,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % lcc_server.replace(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>).replace(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>))<EOL>respdict = json.loads(apikey_text_json)<EOL>apikey = respdict['<STR_LIT>']<EOL>expires = respdict['<STR_LIT>']<EOL>if not os.path.exists(os.path.dirname(APIKEYFILE)):<EOL><INDENT>os.makedirs(os.path.dirname(APIKEYFILE))<EOL><DEDENT>with open(APIKEYFILE,'<STR_LIT:w>') as outfd:<EOL><INDENT>outfd.write('<STR_LIT>' % (apikey, expires))<EOL><DEDENT>os.chmod(APIKEYFILE, <NUM_LIT>)<EOL>LOGINFO('<STR_LIT>' % (lcc_server,<EOL>expires))<EOL>LOGINFO('<STR_LIT>' % APIKEYFILE)<EOL>return apikey, expires<EOL>", "docstring": "This imports an API key from text and writes it to the cache dir.\n\n    Use this with the JSON text copied from the API key text box on your\n    LCC-Server user home page. The API key will thus be tied to the privileges\n    of that user account and can then access objects, datasets, and collections\n    marked as private for the user only or shared with that user.\n\n    Parameters\n    ----------\n\n    lcc_server : str\n        The base URL of the LCC-Server to get the API key for.\n\n    apikey_text_json : str\n        The JSON string from the API key text box on the user's LCC-Server home\n        page at `lcc_server/users/home`.\n\n    Returns\n    -------\n\n    (apikey, expiry) : tuple\n        This returns a tuple with the API key and its expiry date.", "id": "f14753:m2"}
{"signature": "def object_info(lcc_server, objectid, db_collection_id):", "body": "urlparams = {<EOL>'<STR_LIT>':objectid,<EOL>'<STR_LIT>':db_collection_id<EOL>}<EOL>urlqs = urlencode(urlparams)<EOL>url = '<STR_LIT>' % (lcc_server, urlqs)<EOL>try:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>' % (<EOL>objectid,<EOL>db_collection_id,<EOL>lcc_server<EOL>)<EOL>)<EOL>have_apikey, apikey, expires = check_existing_apikey(lcc_server)<EOL>if not have_apikey:<EOL><INDENT>apikey, expires = get_new_apikey(lcc_server)<EOL><DEDENT>if apikey:<EOL><INDENT>headers = {'<STR_LIT>':'<STR_LIT>' % apikey}<EOL><DEDENT>else:<EOL><INDENT>headers = {}<EOL><DEDENT>req = Request(url, data=None, headers=headers)<EOL>resp = urlopen(req)<EOL>objectinfo = json.loads(resp.read())['<STR_LIT:result>']<EOL>return objectinfo<EOL><DEDENT>except HTTPError as e:<EOL><INDENT>if e.code == <NUM_LIT>:<EOL><INDENT>LOGERROR(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (objectid,<EOL>db_collection_id)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(url, e.code, e.reason))<EOL><DEDENT>return None<EOL><DEDENT>", "docstring": "This gets information on a single object from the LCC-Server.\n\n    Returns a dict with all of the available information on an object, including\n    finding charts, comments, object type and variability tags, and\n    period-search results (if available).\n\n    If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is\n    associated with an LCC-Server user account, objects that are visible to this\n    user will be returned, even if they are not visible to the public. Use this\n    to look up objects that have been marked as 'private' or 'shared'.\n\n    NOTE: you can pass the result dict returned by this function directly into\n    the `astrobase.checkplot.checkplot_pickle_to_png` function, e.g.::\n\n        astrobase.checkplot.checkplot_pickle_to_png(result_dict,\n                                                    'object-%s-info.png' %\n                                                    result_dict['objectid'])\n\n    to generate a quick PNG overview of the object information.\n\n    Parameters\n    ----------\n\n    lcc_server : str\n        This is the base URL of the LCC-Server to talk to.\n\n    objectid : str\n        This is the unique database ID of the object to retrieve info for. This\n        is always returned as the `db_oid` column in LCC-Server search results.\n\n    db_collection_id : str\n        This is the collection ID which will be searched for the object. This is\n        always returned as the `collection` column in LCC-Server search results.\n\n    Returns\n    -------\n\n    dict\n        A dict containing the object info is returned. Some important items in\n        the result dict:\n\n        - `objectinfo`: all object magnitude, color, GAIA cross-match, and\n          object type information available for this object\n\n        - `objectcomments`: comments on the object's variability if available\n\n        - `varinfo`: variability comments, variability features, type tags,\n          period and epoch information if available\n\n        - `neighbors`: information on the neighboring objects of this object in\n          its parent light curve collection\n\n        - `xmatch`: information on any cross-matches to external catalogs\n          (e.g. KIC, EPIC, TIC, APOGEE, etc.)\n\n        - `finderchart`: a base-64 encoded PNG image of the object's DSS2 RED\n          finder chart. To convert this to an actual PNG, try the function:\n          `astrobase.checkplot.pkl_io._b64_to_file`.\n\n        - `magseries`: a base-64 encoded PNG image of the object's light\n          curve. To convert this to an actual PNG, try the function:\n          `astrobase.checkplot.pkl_io._b64_to_file`.\n\n        - `pfmethods`: a list of period-finding methods applied to the object if\n          any. If this list is present, use the keys in it to get to the actual\n          period-finding results for each method. These will contain base-64\n          encoded PNGs of the periodogram and phased light curves using the best\n          three peaks in the periodogram, as well as period and epoch\n          information.", "id": "f14753:m10"}
{"signature": "def get_new_apikey(lcc_server):", "body": "USERHOME = os.path.expanduser('<STR_LIT>')<EOL>APIKEYFILE = os.path.join(USERHOME,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % lcc_server.replace(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>).replace(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>))<EOL>url = '<STR_LIT>' % lcc_server<EOL>resp = urlopen(url)<EOL>if resp.code == <NUM_LIT:200>:<EOL><INDENT>respdict = json.loads(resp.read())<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>' %<EOL>lcc_server)<EOL>LOGERROR('<STR_LIT>' % resp.status_code)<EOL>return None<EOL><DEDENT>apikey = respdict['<STR_LIT:result>']['<STR_LIT>']<EOL>expires = respdict['<STR_LIT:result>']['<STR_LIT>']<EOL>if not os.path.exists(os.path.dirname(APIKEYFILE)):<EOL><INDENT>os.makedirs(os.path.dirname(APIKEYFILE))<EOL><DEDENT>with open(APIKEYFILE,'<STR_LIT:w>') as outfd:<EOL><INDENT>outfd.write('<STR_LIT>' % (apikey, expires))<EOL><DEDENT>os.chmod(APIKEYFILE, <NUM_LIT>)<EOL>LOGINFO('<STR_LIT>' % (lcc_server,<EOL>expires))<EOL>LOGINFO('<STR_LIT>' % APIKEYFILE)<EOL>return apikey, expires<EOL>", "docstring": "This gets a new API key from the specified LCC-Server.\n\n    NOTE: this only gets an anonymous API key. To get an API key tied to a user\n    account (and associated privilege level), see the `import_apikey` function\n    below.\n\n    Parameters\n    ----------\n\n    lcc_server : str\n        The base URL of the LCC-Server from where the API key will be fetched.\n\n    Returns\n    -------\n\n    (apikey, expiry) : tuple\n        This returns a tuple with the API key and its expiry date.", "id": "f14753:m1"}
{"signature": "def list_recent_datasets(lcc_server, nrecent=<NUM_LIT>):", "body": "urlparams = {'<STR_LIT>':nrecent}<EOL>urlqs = urlencode(urlparams)<EOL>url = '<STR_LIT>' % (lcc_server, urlqs)<EOL>try:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (<EOL>lcc_server,<EOL>)<EOL>)<EOL>have_apikey, apikey, expires = check_existing_apikey(lcc_server)<EOL>if not have_apikey:<EOL><INDENT>apikey, expires = get_new_apikey(lcc_server)<EOL><DEDENT>if apikey:<EOL><INDENT>headers = {'<STR_LIT>':'<STR_LIT>' % apikey}<EOL><DEDENT>else:<EOL><INDENT>headers = {}<EOL><DEDENT>req = Request(url, data=None, headers=headers)<EOL>resp = urlopen(req)<EOL>recent_datasets = json.loads(resp.read())['<STR_LIT:result>']<EOL>return recent_datasets<EOL><DEDENT>except HTTPError as e:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(url, e.code, e.reason))<EOL>return None<EOL><DEDENT>", "docstring": "This lists recent publicly visible datasets available on the LCC-Server.\n\n    If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is\n    associated with an LCC-Server user account, datasets that belong to this\n    user will be returned as well, even if they are not visible to the public.\n\n    Parameters\n    ----------\n\n    lcc_server : str\n        This is the base URL of the LCC-Server to talk to.\n\n    nrecent : int\n        This indicates how many recent public datasets you want to list. This is\n        always capped at 1000.\n\n    Returns\n    -------\n\n    list of dicts\n        Returns a list of dicts, with each dict containing info on each dataset.", "id": "f14753:m11"}
{"signature": "def list_lc_collections(lcc_server):", "body": "url = '<STR_LIT>' % lcc_server<EOL>try:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % (<EOL>lcc_server,<EOL>)<EOL>)<EOL>have_apikey, apikey, expires = check_existing_apikey(lcc_server)<EOL>if not have_apikey:<EOL><INDENT>apikey, expires = get_new_apikey(lcc_server)<EOL><DEDENT>if apikey:<EOL><INDENT>headers = {'<STR_LIT>':'<STR_LIT>' % apikey}<EOL><DEDENT>else:<EOL><INDENT>headers = {}<EOL><DEDENT>req = Request(url, data=None, headers=headers)<EOL>resp = urlopen(req)<EOL>lcc_list = json.loads(resp.read())['<STR_LIT:result>']['<STR_LIT>']<EOL>return lcc_list<EOL><DEDENT>except HTTPError as e:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(url, e.code, e.reason))<EOL>return None<EOL><DEDENT>", "docstring": "This lists all light curve collections made available on the LCC-Server.\n\n    If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is\n    associated with an LCC-Server user account, light curve collections visible\n    to this user will be returned as well, even if they are not visible to the\n    public.\n\n    Parameters\n    ----------\n\n    lcc_server : str\n        The base URL of the LCC-Server to talk to.\n\n    Returns\n    -------\n\n    dict\n        Returns a dict containing lists of info items per collection. This\n        includes collection_ids, lists of columns, lists of indexed columns,\n        lists of full-text indexed columns, detailed column descriptions, number\n        of objects in each collection, collection sky coverage, etc.", "id": "f14753:m12"}
{"signature": "def cone_search(lcc_server,<EOL>center_ra,<EOL>center_decl,<EOL>radiusarcmin=<NUM_LIT>,<EOL>result_visibility='<STR_LIT>',<EOL>email_when_done=False,<EOL>collections=None,<EOL>columns=None,<EOL>filters=None,<EOL>sortspec=None,<EOL>samplespec=None,<EOL>limitspec=None,<EOL>download_data=True,<EOL>outdir=None,<EOL>maxtimeout=<NUM_LIT>,<EOL>refresh=<NUM_LIT>):", "body": "<EOL>coords = '<STR_LIT>' % (center_ra, center_decl, radiusarcmin)<EOL>params = {<EOL>'<STR_LIT>':coords<EOL>}<EOL>if collections:<EOL><INDENT>params['<STR_LIT>'] = collections<EOL><DEDENT>if columns:<EOL><INDENT>params['<STR_LIT>'] = columns<EOL><DEDENT>if filters:<EOL><INDENT>params['<STR_LIT>'] = filters<EOL><DEDENT>if sortspec:<EOL><INDENT>params['<STR_LIT>'] = json.dumps([sortspec])<EOL><DEDENT>if samplespec:<EOL><INDENT>params['<STR_LIT>'] = int(samplespec)<EOL><DEDENT>if limitspec:<EOL><INDENT>params['<STR_LIT>'] = int(limitspec)<EOL><DEDENT>params['<STR_LIT>'] = result_visibility<EOL>params['<STR_LIT>'] = email_when_done<EOL>if email_when_done:<EOL><INDENT>download_data = False<EOL><DEDENT>have_apikey, apikey, expires = check_existing_apikey(lcc_server)<EOL>if not have_apikey:<EOL><INDENT>apikey, expires = get_new_apikey(lcc_server)<EOL><DEDENT>api_url = '<STR_LIT>' % lcc_server<EOL>searchresult = submit_post_searchquery(api_url, params, apikey)<EOL>status = searchresult[<NUM_LIT:0>]<EOL>if download_data:<EOL><INDENT>if status == '<STR_LIT>':<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL>csv, lczip, pkl = retrieve_dataset_files(searchresult,<EOL>outdir=outdir,<EOL>apikey=apikey)<EOL>if pkl:<EOL><INDENT>return searchresult[<NUM_LIT:1>], csv, lczip, pkl<EOL><DEDENT>else:<EOL><INDENT>return searchresult[<NUM_LIT:1>], csv, lczip<EOL><DEDENT><DEDENT>elif status == '<STR_LIT>':<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(maxtimeout/<NUM_LIT>, refresh))<EOL>timewaited = <NUM_LIT:0.0><EOL>while timewaited < maxtimeout:<EOL><INDENT>try:<EOL><INDENT>time.sleep(refresh)<EOL>csv, lczip, pkl = retrieve_dataset_files(searchresult,<EOL>outdir=outdir,<EOL>apikey=apikey)<EOL>if (csv and os.path.exists(csv) and<EOL>lczip and os.path.exists(lczip)):<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL>return searchresult[<NUM_LIT:1>], csv, lczip<EOL><DEDENT>timewaited = timewaited + refresh<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>LOGWARNING('<STR_LIT>')<EOL>return searchresult[<NUM_LIT:1>], None, None<EOL><DEDENT><DEDENT>LOGERROR('<STR_LIT>')<EOL>return searchresult[<NUM_LIT:1>], None, None<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return searchresult[<NUM_LIT:1>], None, None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return searchresult[<NUM_LIT:1>], None, None<EOL><DEDENT>", "docstring": "This runs a cone-search query.\n\n    Parameters\n    ----------\n\n    lcc_server : str\n        This is the base URL of the LCC-Server to talk to.  (e.g. for HAT, use:\n        https://data.hatsurveys.org)\n\n    center_ra,center_decl : float\n        These are the central coordinates of the search to conduct. These can be\n        either decimal degrees of type float, or sexagesimal coordinates of type\n        str:\n\n        - OK: 290.0, 45.0\n        - OK: 15:00:00 +45:00:00\n        - OK: 15 00 00.0 -45 00 00.0\n        - NOT OK: 290.0 +45:00:00\n        - NOT OK: 15:00:00 45.0\n\n    radiusarcmin : float\n        This is the search radius to use for the cone-search. This is in\n        arcminutes. The maximum radius you can use is 60 arcminutes = 1 degree.\n\n    result_visibility : {'private', 'unlisted', 'public'}\n        This sets the visibility of the dataset produced from the search\n        result::\n\n               'private' -> the dataset and its products are not visible or\n                            accessible by any user other than the one that\n                            created the dataset.\n\n               'unlisted' -> the dataset and its products are not visible in the\n                             list of public datasets, but can be accessed if the\n                             dataset URL is known\n\n               'public' -> the dataset and its products are visible in the list\n                           of public datasets and can be accessed by anyone.\n\n    email_when_done : bool\n        If True, the LCC-Server will email you when the search is complete. This\n        will also set `download_data` to False. Using this requires an\n        LCC-Server account and an API key tied to that account.\n\n    collections : list of str or None\n        This is a list of LC collections to search in. If this is None, all\n        collections will be searched.\n\n    columns : list of str or None\n        This is a list of columns to return in the results. Matching objects'\n        object IDs, RAs, DECs, and links to light curve files will always be\n        returned so there is no need to specify these columns. If None, only\n        these columns will be returned: 'objectid', 'ra', 'decl', 'lcfname'\n\n    filters : str or None\n        This is an SQL-like string to use to filter on database columns in the\n        LCC-Server's collections. To see the columns available for a search,\n        visit the Collections tab in the LCC-Server's browser UI. The filter\n        operators allowed are::\n\n            lt      -> less than\n            gt      -> greater than\n            ge      -> greater than or equal to\n            le      -> less than or equal to\n            eq      -> equal to\n            ne      -> not equal to\n            ct      -> contains text\n            isnull  -> column value is null\n            notnull -> column value is not null\n\n        You may use the `and` and `or` operators between filter specifications\n        to chain them together logically.\n\n        Example filter strings::\n\n            \"(propermotion gt 200.0) and (sdssr lt 11.0)\"\n            \"(dered_jmag_kmag gt 2.0) and (aep_000_stetsonj gt 10.0)\"\n            \"(gaia_status ct 'ok') and (propermotion gt 300.0)\"\n            \"(simbad_best_objtype ct 'RR') and (dered_sdssu_sdssg lt 0.5)\"\n\n    sortspec : tuple of two strs or None\n        If not None, this should be a tuple of two items::\n\n            ('column to sort by', 'asc|desc')\n\n        This sets the column to sort the results by. For cone_search, the\n        default column and sort order are 'dist_arcsec' and 'asc', meaning the\n        distance from the search center in ascending order.\n\n    samplespec : int or None\n        If this is an int, will indicate how many rows from the initial search\n        result will be uniformly random sampled and returned.\n\n    limitspec : int or None\n        If this is an int, will indicate how many rows from the initial search\n        result to return in total.\n\n        `sortspec`, `samplespec`, and `limitspec` are applied in this order:\n\n            sample -> sort -> limit\n\n    download_data : bool\n        This sets if the accompanying data from the search results will be\n        downloaded automatically. This includes the data table CSV, the dataset\n        pickle file, and a light curve ZIP file. Note that if the search service\n        indicates that your query is still in progress, this function will block\n        until the light curve ZIP file becomes available. The maximum wait time\n        in seconds is set by maxtimeout and the refresh interval is set by\n        refresh.\n\n        To avoid the wait block, set download_data to False and the function\n        will write a pickle file to `~/.astrobase/lccs/query-[setid].pkl`\n        containing all the information necessary to retrieve these data files\n        later when the query is done. To do so, call the\n        `retrieve_dataset_files` with the path to this pickle file (it will be\n        returned).\n\n    outdir : str or None\n        If this is provided, sets the output directory of the downloaded dataset\n        files. If None, they will be downloaded to the current directory.\n\n    maxtimeout : float\n        The maximum time in seconds to wait for the LCC-Server to respond with a\n        result before timing out. You can use the `retrieve_dataset_files`\n        function to get results later as needed.\n\n    refresh : float\n        The time to wait in seconds before pinging the LCC-Server to see if a\n        search query has completed and dataset result files can be downloaded.\n\n    Returns\n    -------\n\n    tuple\n        Returns a tuple with the following elements::\n\n            (search result status dict,\n             search result CSV file path,\n             search result LC ZIP path)", "id": "f14753:m5"}
{"signature": "def datetime_to_jd(dt):", "body": "jdutc = astime.Time(dt, format='<STR_LIT>',scale='<STR_LIT>')<EOL>return jdutc.jd<EOL>", "docstring": "This converts a Python datetime object (naive, time in UT) to JD_UTC.\n\n    Parameters\n    ----------\n\n    dt : datetime\n        A naive Python `datetime` object (e.g. with no tz attribute) measured at\n        UTC.\n\n    Returns\n    -------\n\n    jd : float\n        The Julian date corresponding to the `datetime` object.", "id": "f14754:m4"}
{"signature": "def get_epochs_given_midtimes_and_period(<EOL>t_mid,<EOL>period,<EOL>err_t_mid=None,<EOL>t0_fixed=None,<EOL>t0_percentile=None,<EOL>verbose=False<EOL>):", "body": "kwargarr = np.array([isinstance(err_t_mid,np.ndarray),<EOL>t0_fixed,<EOL>t0_percentile])<EOL>if not _single_true(kwargarr) and not np.all(~kwargarr.astype(bool)):<EOL><INDENT>raise AssertionError(<EOL>'<STR_LIT>')<EOL><DEDENT>t_mid = t_mid[np.isfinite(t_mid)]<EOL>N_midtimes = len(t_mid)<EOL>if t0_fixed:<EOL><INDENT>t0 = t0_fixed<EOL><DEDENT>elif isinstance(err_t_mid,np.ndarray):<EOL><INDENT>t0_avg = np.average(t_mid, weights=<NUM_LIT:1>/err_t_mid**<NUM_LIT:2>)<EOL>t0_options = np.arange(min(t_mid), max(t_mid)+period, period)<EOL>t0 = t0_options[np.argmin(np.abs(t0_options - t0_avg))]<EOL><DEDENT>else:<EOL><INDENT>if not t0_percentile:<EOL><INDENT>if N_midtimes % <NUM_LIT:2> == <NUM_LIT:1>:<EOL><INDENT>t0 = np.median(t_mid)<EOL><DEDENT>else:<EOL><INDENT>t0 = t_mid[int(N_midtimes/<NUM_LIT:2>)]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>t0 = np.sort(t_mid)[int(N_midtimes*t0_percentile/<NUM_LIT:100>)]<EOL><DEDENT><DEDENT>epoch = (t_mid - t0)/period<EOL>int_epoch = np.round(epoch, <NUM_LIT:0>)<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>')<EOL>LOGINFO('<STR_LIT>'.format(repr(epoch)))<EOL>LOGINFO('<STR_LIT>')<EOL>LOGINFO('<STR_LIT>'.format(repr(int_epoch)))<EOL><DEDENT>return int_epoch, t0<EOL>", "docstring": "This calculates the future epochs for a transit, given a period and a\n    starting epoch\n\n    The equation used is::\n\n        t_mid = period*epoch + t0\n\n    Default behavior if no kwargs are used is to define `t0` as the median\n    finite time of the passed `t_mid` array.\n\n    Only one of `err_t_mid` or `t0_fixed` should be passed.\n\n    Parameters\n    ----------\n\n    t_mid : np.array\n        A np.array of transit mid-time measurements\n\n    period : float\n        The period used to calculate epochs, per the equation above. For typical\n        use cases, a period precise to ~1e-5 days is sufficient to get correct\n        epochs.\n\n    err_t_mid : None or np.array\n        If provided, contains the errors of the transit mid-time\n        measurements. The zero-point epoch is then set equal to the average of\n        the transit times, weighted as `1/err_t_mid^2` . This minimizes the\n        covariance between the transit epoch and the period (e.g., Gibson et\n        al. 2013). For standard O-C analysis this is the best method.\n\n    t0_fixed : None or float:\n        If provided, use this t0 as the starting epoch. (Overrides all others).\n\n    t0_percentile : None or float\n        If provided, use this percentile of `t_mid` to define `t0`.\n\n    Returns\n    -------\n\n    tuple\n        This is the of the form `(integer_epoch_array, t0)`.\n        `integer_epoch_array` is an array of integer epochs (float-type),\n        of length equal to the number of *finite* mid-times passed.", "id": "f14754:m2"}
{"signature": "def jhk_to_rmag(jmag,hmag,kmag):", "body": "return convert_constants(jmag,hmag,kmag,<EOL>RJHK,<EOL>RJH, RJK, RHK,<EOL>RJ, RH, RK)<EOL>", "docstring": "Converts given J, H, Ks mags to an R magnitude value.\n\n    Parameters\n    ----------\n\n    jmag,hmag,kmag : float\n        2MASS J, H, Ks mags of the object.\n\n    Returns\n    -------\n\n    float\n        The converted R band magnitude.", "id": "f14755:m3"}
{"signature": "def jhk_to_vmag(jmag,hmag,kmag):", "body": "return convert_constants(jmag,hmag,kmag,<EOL>VJHK,<EOL>VJH, VJK, VHK,<EOL>VJ, VH, VK)<EOL>", "docstring": "Converts given J, H, Ks mags to a V magnitude value.\n\n    Parameters\n    ----------\n\n    jmag,hmag,kmag : float\n        2MASS J, H, Ks mags of the object.\n\n    Returns\n    -------\n\n    float\n        The converted V band magnitude.", "id": "f14755:m2"}
{"signature": "def jhk_to_sdssz(jmag,hmag,kmag):", "body": "return convert_constants(jmag,hmag,kmag,<EOL>SDSSZ_JHK,<EOL>SDSSZ_JH, SDSSZ_JK, SDSSZ_HK,<EOL>SDSSZ_J, SDSSZ_H, SDSSZ_K)<EOL>", "docstring": "Converts given J, H, Ks mags to an SDSS z magnitude value.\n\n    Parameters\n    ----------\n\n    jmag,hmag,kmag : float\n        2MASS J, H, Ks mags of the object.\n\n    Returns\n    -------\n\n    float\n        The converted SDSS z band magnitude.", "id": "f14755:m9"}
{"signature": "def filter_tess_lcdict(lcdict,<EOL>filterqualityflags=True,<EOL>nanfilter='<STR_LIT>',<EOL>timestoignore=None,<EOL>quiet=False):", "body": "cols = lcdict['<STR_LIT>']<EOL>if filterqualityflags:<EOL><INDENT>nbefore = lcdict['<STR_LIT:time>'].size<EOL>filterind = lcdict['<STR_LIT>'] == <NUM_LIT:0><EOL>for col in cols:<EOL><INDENT>if '<STR_LIT:.>' in col:<EOL><INDENT>key, subkey = col.split('<STR_LIT:.>')<EOL>lcdict[key][subkey] = lcdict[key][subkey][filterind]<EOL><DEDENT>else:<EOL><INDENT>lcdict[col] = lcdict[col][filterind]<EOL><DEDENT><DEDENT>nafter = lcdict['<STR_LIT:time>'].size<EOL>if not quiet:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (nbefore, nafter))<EOL><DEDENT><DEDENT>if nanfilter and nanfilter == '<STR_LIT>':<EOL><INDENT>notnanind = (<EOL>np.isfinite(lcdict['<STR_LIT>']['<STR_LIT>']) &<EOL>np.isfinite(lcdict['<STR_LIT>']['<STR_LIT>']) &<EOL>np.isfinite(lcdict['<STR_LIT>']['<STR_LIT>']) &<EOL>np.isfinite(lcdict['<STR_LIT>']['<STR_LIT>']) &<EOL>np.isfinite(lcdict['<STR_LIT:time>'])<EOL>)<EOL><DEDENT>elif nanfilter and nanfilter == '<STR_LIT>':<EOL><INDENT>notnanind = (<EOL>np.isfinite(lcdict['<STR_LIT>']['<STR_LIT>']) &<EOL>np.isfinite(lcdict['<STR_LIT>']['<STR_LIT>']) &<EOL>np.isfinite(lcdict['<STR_LIT:time>'])<EOL>)<EOL><DEDENT>elif nanfilter and nanfilter == '<STR_LIT>':<EOL><INDENT>notnanind = (<EOL>np.isfinite(lcdict['<STR_LIT>']['<STR_LIT>']) &<EOL>np.isfinite(lcdict['<STR_LIT>']['<STR_LIT>']) &<EOL>np.isfinite(lcdict['<STR_LIT:time>'])<EOL>)<EOL><DEDENT>elif nanfilter is None:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError<EOL><DEDENT>if nanfilter:<EOL><INDENT>nbefore = lcdict['<STR_LIT:time>'].size<EOL>for col in cols:<EOL><INDENT>if '<STR_LIT:.>' in col:<EOL><INDENT>key, subkey = col.split('<STR_LIT:.>')<EOL>lcdict[key][subkey] = lcdict[key][subkey][notnanind]<EOL><DEDENT>else:<EOL><INDENT>lcdict[col] = lcdict[col][notnanind]<EOL><DEDENT><DEDENT>nafter = lcdict['<STR_LIT:time>'].size<EOL>if not quiet:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>% (nbefore, nafter))<EOL><DEDENT><DEDENT>if (timestoignore and<EOL>isinstance(timestoignore, list) and<EOL>len(timestoignore) > <NUM_LIT:0>):<EOL><INDENT>exclind = np.full_like(lcdict['<STR_LIT:time>'],True).astype(bool)<EOL>nbefore = exclind.size<EOL>for ignoretime in timestoignore:<EOL><INDENT>time0, time1 = ignoretime[<NUM_LIT:0>], ignoretime[<NUM_LIT:1>]<EOL>thismask = ~((lcdict['<STR_LIT:time>'] >= time0) & (lcdict['<STR_LIT:time>'] <= time1))<EOL>exclind = exclind & thismask<EOL><DEDENT>for col in cols:<EOL><INDENT>if '<STR_LIT:.>' in col:<EOL><INDENT>key, subkey = col.split('<STR_LIT:.>')<EOL>lcdict[key][subkey] = lcdict[key][subkey][exclind]<EOL><DEDENT>else:<EOL><INDENT>lcdict[col] = lcdict[col][exclind]<EOL><DEDENT><DEDENT>nafter = lcdict['<STR_LIT:time>'].size<EOL>if not quiet:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>% (nbefore, nafter))<EOL><DEDENT><DEDENT>return lcdict<EOL>", "docstring": "This filters the provided TESS `lcdict`, removing nans and bad\n    observations.\n\n    By default, this function removes points in the TESS LC that have ANY\n    quality flags set.\n\n    Parameters\n    ----------\n\n    lcdict : lcdict\n        An `lcdict` produced by `consolidate_tess_fitslc` or\n        `read_tess_fitslc`.\n\n    filterflags : bool\n        If True, will remove any measurements that have non-zero quality flags\n        present. This usually indicates an issue with the instrument or\n        spacecraft.\n\n    nanfilter : {'sap','pdc','sap,pdc'}\n        Indicates the flux measurement type(s) to apply the filtering to.\n\n    timestoignore : list of tuples or None\n        This is of the form::\n\n            [(time1_start, time1_end), (time2_start, time2_end), ...]\n\n        and indicates the start and end times to mask out of the final\n        lcdict. Use this to remove anything that wasn't caught by the quality\n        flags.\n\n    Returns\n    -------\n\n    lcdict\n        Returns an `lcdict` (this is useable by most astrobase functions for LC\n        processing). The `lcdict` is filtered IN PLACE!", "id": "f14756:m6"}
{"signature": "def read_tess_fitslc(lcfits,<EOL>headerkeys=LCHEADERKEYS,<EOL>datakeys=LCDATAKEYS,<EOL>sapkeys=LCSAPKEYS,<EOL>pdckeys=LCPDCKEYS,<EOL>topkeys=LCTOPKEYS,<EOL>apkeys=LCAPERTUREKEYS,<EOL>normalize=False,<EOL>appendto=None,<EOL>filterqualityflags=False,<EOL>nanfilter=None,<EOL>timestoignore=None):", "body": "<EOL>hdulist = pyfits.open(lcfits)<EOL>lchdr, lcdata = hdulist[<NUM_LIT:1>].header, hdulist[<NUM_LIT:1>].data<EOL>lctophdr, lcaperturehdr, lcaperturedata = (hdulist[<NUM_LIT:0>].header,<EOL>hdulist[<NUM_LIT:2>].header,<EOL>hdulist[<NUM_LIT:2>].data)<EOL>hdulist.close()<EOL>hdrinfo = {}<EOL>for key in headerkeys:<EOL><INDENT>if key in lchdr and lchdr[key] is not None:<EOL><INDENT>hdrinfo[key.lower()] = lchdr[key]<EOL><DEDENT>else:<EOL><INDENT>hdrinfo[key.lower()] = None<EOL><DEDENT><DEDENT>ndet = lchdr['<STR_LIT>']<EOL>for key in topkeys:<EOL><INDENT>if key in lctophdr and lctophdr[key] is not None:<EOL><INDENT>hdrinfo[key.lower()] = lctophdr[key]<EOL><DEDENT>else:<EOL><INDENT>hdrinfo[key.lower()] = None<EOL><DEDENT><DEDENT>for key in lcaperturehdr:<EOL><INDENT>if key in lcaperturehdr and lcaperturehdr[key] is not None:<EOL><INDENT>hdrinfo[key.lower()] = lcaperturehdr[key]<EOL><DEDENT>else:<EOL><INDENT>hdrinfo[key.lower()] = None<EOL><DEDENT><DEDENT>if appendto and isinstance(appendto, dict):<EOL><INDENT>lcdict = appendto<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(<EOL>hdrinfo['<STR_LIT>'] + hdrinfo['<STR_LIT>']<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(lcaperturedata)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(<EOL>(np.abs(hdrinfo['<STR_LIT>']) +<EOL>np.abs(hdrinfo['<STR_LIT>']))*<NUM_LIT>/<NUM_LIT><EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(ndet)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(<EOL>hdrinfo['<STR_LIT>']<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(<EOL>hdrinfo['<STR_LIT>']<EOL>)<EOL>for key in datakeys:<EOL><INDENT>if key.lower() in lcdict:<EOL><INDENT>lcdict[key.lower()] = (<EOL>np.concatenate((lcdict[key.lower()], lcdata[key]))<EOL>)<EOL><DEDENT><DEDENT>for key in sapkeys:<EOL><INDENT>if key.lower() in lcdict['<STR_LIT>']:<EOL><INDENT>sapflux_median = np.nanmedian(lcdata['<STR_LIT>'])<EOL>if normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / sapflux_median<EOL><DEDENT>elif normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / sapflux_median<EOL><DEDENT>elif normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / sapflux_median<EOL><DEDENT>elif normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / sapflux_median<EOL><DEDENT>else:<EOL><INDENT>thislcdata = lcdata[key]<EOL><DEDENT>lcdict['<STR_LIT>'][key.lower()] = (<EOL>np.concatenate((lcdict['<STR_LIT>'][key.lower()], thislcdata))<EOL>)<EOL><DEDENT><DEDENT>for key in pdckeys:<EOL><INDENT>if key.lower() in lcdict['<STR_LIT>']:<EOL><INDENT>pdcsap_flux_median = np.nanmedian(lcdata['<STR_LIT>'])<EOL>if normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / pdcsap_flux_median<EOL><DEDENT>elif normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / pdcsap_flux_median<EOL><DEDENT>else:<EOL><INDENT>thislcdata = lcdata[key]<EOL><DEDENT>lcdict['<STR_LIT>'][key.lower()] = (<EOL>np.concatenate((lcdict['<STR_LIT>'][key.lower()], thislcdata))<EOL>)<EOL><DEDENT><DEDENT>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype=np.float64))<EOL>)<EOL>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype=np.int64))<EOL>)<EOL>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype=np.int64))<EOL>)<EOL>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype=np.int64))<EOL>)<EOL>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype=np.int64))<EOL>)<EOL>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype='<STR_LIT>'))<EOL>)<EOL>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype='<STR_LIT>'))<EOL>)<EOL>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype='<STR_LIT>'))<EOL>)<EOL>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype='<STR_LIT>'))<EOL>)<EOL>lcdict['<STR_LIT>'] = np.concatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>np.full_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>'],<EOL>dtype=np.int64))<EOL>)<EOL><DEDENT>else:<EOL><INDENT>lcdict = {<EOL>'<STR_LIT>':hdrinfo['<STR_LIT:object>'],<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>'] + hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[lcaperturedata],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[(np.abs(hdrinfo['<STR_LIT>']) +<EOL>np.abs(hdrinfo['<STR_LIT>']))*<NUM_LIT>/<NUM_LIT>],<EOL>'<STR_LIT>':[ndet],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':hdrinfo['<STR_LIT:object>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>},<EOL>'<STR_LIT>':{},<EOL>'<STR_LIT>':{},<EOL>}<EOL>for key in datakeys:<EOL><INDENT>lcdict[key.lower()] = lcdata[key]<EOL><DEDENT>for key in sapkeys:<EOL><INDENT>lcdict['<STR_LIT>'][key.lower()] = lcdata[key]<EOL><DEDENT>for key in pdckeys:<EOL><INDENT>lcdict['<STR_LIT>'][key.lower()] = lcdata[key]<EOL><DEDENT>lcdict['<STR_LIT>'] = np.full_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype=np.float64)<EOL>lcdict['<STR_LIT>'] = np.full_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype=np.int64)<EOL>lcdict['<STR_LIT>'] = np.full_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype=np.int64)<EOL>lcdict['<STR_LIT>'] = np.full_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype=np.int64)<EOL>lcdict['<STR_LIT>'] = np.full_like(<EOL>lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype=np.int64,<EOL>)<EOL>lcdict['<STR_LIT>'] = np.full_like(<EOL>lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype='<STR_LIT>',<EOL>)<EOL>lcdict['<STR_LIT>'] = np.full_like(<EOL>lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype='<STR_LIT>',<EOL>)<EOL>lcdict['<STR_LIT>'] = np.full_like(<EOL>lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype='<STR_LIT>',<EOL>)<EOL>lcdict['<STR_LIT>'] = np.full_like(<EOL>lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype='<STR_LIT>',<EOL>)<EOL>lcdict['<STR_LIT>'] = np.full_like(<EOL>lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>],<EOL>dtype=np.int64,<EOL>)<EOL>if normalize:<EOL><INDENT>sapflux_median = np.nanmedian(lcdict['<STR_LIT>']['<STR_LIT>'])<EOL>pdcsap_flux_median = np.nanmedian(lcdict['<STR_LIT>']['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>sapflux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>sapflux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>sapflux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>sapflux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>pdcsap_flux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>pdcsap_flux_median<EOL>)<EOL><DEDENT><DEDENT>lcdict['<STR_LIT>'] = (<EOL>[x.lower() for x in datakeys] +<EOL>['<STR_LIT>' % x.lower() for x in sapkeys] +<EOL>['<STR_LIT>' % x.lower() for x in pdckeys] +<EOL>['<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = sum(lcdict['<STR_LIT>']['<STR_LIT>'])<EOL>if (filterqualityflags is not False or<EOL>nanfilter is not None or<EOL>timestoignore is not None):<EOL><INDENT>lcdict = filter_tess_lcdict(lcdict,<EOL>filterqualityflags,<EOL>nanfilter=nanfilter,<EOL>timestoignore=timestoignore)<EOL><DEDENT>return lcdict<EOL>", "docstring": "This extracts the light curve from a single TESS .lc.fits file.\n\n    This works on the light curves available at MAST.\n\n    TODO: look at:\n\n    https://archive.stsci.edu/missions/tess/doc/EXP-TESS-ARC-ICD-TM-0014.pdf\n\n    for details on the column descriptions and to fill in any other info we\n    need.\n\n    Parameters\n    ----------\n\n    lcfits : str\n        The filename of a MAST Kepler/K2 light curve FITS file.\n\n    headerkeys : list\n        A list of FITS header keys that will be extracted from the FITS light\n        curve file. These describe the observations. The default value for this\n        is given in `LCHEADERKEYS` above.\n\n    datakeys : list\n        A list of FITS column names that correspond to the auxiliary\n        measurements in the light curve. The default is `LCDATAKEYS` above.\n\n    sapkeys : list\n        A list of FITS column names that correspond to the SAP flux\n        measurements in the light curve. The default is `LCSAPKEYS` above.\n\n    pdckeys : list\n        A list of FITS column names that correspond to the PDC flux\n        measurements in the light curve. The default is `LCPDCKEYS` above.\n\n    topkeys : list\n        A list of FITS header keys that describe the object in the light\n        curve. The default is `LCTOPKEYS` above.\n\n    apkeys : list\n        A list of FITS header keys that describe the flux measurement apertures\n        used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.\n\n    normalize : bool\n        If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements\n        will be normalized to 1.0 by dividing out the median flux for the\n        component light curve.\n\n    appendto : lcdict or None\n        If appendto is an `lcdict`, will append measurements of this `lcdict` to\n        that `lcdict`. This is used for consolidating light curves for the same\n        object across different files (sectors/cameras/CCDs?). The appending\n        does not care about the time order. To consolidate light curves in time\n        order, use `consolidate_tess_fitslc` below.\n\n    filterqualityflags : bool\n        If True, will remove any measurements that have non-zero quality flags\n        present. This usually indicates an issue with the instrument or\n        spacecraft.\n\n    nanfilter : {'sap','pdc','sap,pdc'} or None\n        Indicates the flux measurement type(s) to apply the filtering to.\n\n    timestoignore : list of tuples or None\n        This is of the form::\n\n            [(time1_start, time1_end), (time2_start, time2_end), ...]\n\n        and indicates the start and end times to mask out of the final\n        lcdict. Use this to remove anything that wasn't caught by the quality\n        flags.\n\n    Returns\n    -------\n\n    lcdict\n        Returns an `lcdict` (this is useable by most astrobase functions for LC\n        processing).", "id": "f14756:m2"}
{"signature": "def read_tess_pklc(picklefile):", "body": "if picklefile.endswith('<STR_LIT>'):<EOL><INDENT>infd = gzip.open(picklefile, '<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>infd = open(picklefile, '<STR_LIT:rb>')<EOL><DEDENT>try:<EOL><INDENT>with infd:<EOL><INDENT>lcdict = pickle.load(infd)<EOL><DEDENT><DEDENT>except UnicodeDecodeError:<EOL><INDENT>with open(picklefile,'<STR_LIT:rb>') as infd:<EOL><INDENT>lcdict = pickle.load(infd, encoding='<STR_LIT>')<EOL><DEDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' % picklefile)<EOL><DEDENT>return lcdict<EOL>", "docstring": "This turns the pickled lightcurve file back into an `lcdict`.\n\n    Parameters\n    ----------\n\n    picklefile : str\n        The path to a previously written Kepler LC picklefile generated by\n        `tess_lcdict_to_pkl` above.\n\n    Returns\n    -------\n\n    lcdict\n        Returns an `lcdict` (this is useable by most astrobase functions for LC\n        processing).", "id": "f14756:m5"}
{"signature": "def aov_theta(times, mags, errs, frequency,<EOL>binsize=<NUM_LIT>, minbin=<NUM_LIT:9>):", "body": "period = <NUM_LIT:1.0>/frequency<EOL>fold_time = times[<NUM_LIT:0>]<EOL>phased = phase_magseries(times,<EOL>mags,<EOL>period,<EOL>fold_time,<EOL>wrap=False,<EOL>sort=True)<EOL>phases = phased['<STR_LIT>']<EOL>pmags = phased['<STR_LIT>']<EOL>bins = nparange(<NUM_LIT:0.0>, <NUM_LIT:1.0>, binsize)<EOL>ndets = phases.size<EOL>binnedphaseinds = npdigitize(phases, bins)<EOL>bin_s1_tops = []<EOL>bin_s2_tops = []<EOL>binndets = []<EOL>goodbins = <NUM_LIT:0><EOL>all_xbar = npmedian(pmags)<EOL>for x in npunique(binnedphaseinds):<EOL><INDENT>thisbin_inds = binnedphaseinds == x<EOL>thisbin_mags = pmags[thisbin_inds]<EOL>if thisbin_mags.size > minbin:<EOL><INDENT>thisbin_ndet = thisbin_mags.size<EOL>thisbin_xbar = npmedian(thisbin_mags)<EOL>thisbin_s1_top = (<EOL>thisbin_ndet *<EOL>(thisbin_xbar - all_xbar) *<EOL>(thisbin_xbar - all_xbar)<EOL>)<EOL>thisbin_s2_top = npsum((thisbin_mags - all_xbar) *<EOL>(thisbin_mags - all_xbar))<EOL>bin_s1_tops.append(thisbin_s1_top)<EOL>bin_s2_tops.append(thisbin_s2_top)<EOL>binndets.append(thisbin_ndet)<EOL>goodbins = goodbins + <NUM_LIT:1><EOL><DEDENT><DEDENT>bin_s1_tops = nparray(bin_s1_tops)<EOL>bin_s2_tops = nparray(bin_s2_tops)<EOL>binndets = nparray(binndets)<EOL>s1 = npsum(bin_s1_tops)/(goodbins - <NUM_LIT:1.0>)<EOL>s2 = npsum(bin_s2_tops)/(ndets - goodbins)<EOL>theta_aov = s1/s2<EOL>return theta_aov<EOL>", "docstring": "Calculates the Schwarzenberg-Czerny AoV statistic at a test frequency.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input time-series and associated errors.\n\n    frequency : float\n        The test frequency to calculate the theta statistic at.\n\n    binsize : float\n        The phase bin size to use.\n\n    minbin : int\n        The minimum number of items in a phase bin to consider in the\n        calculation of the statistic.\n\n    Returns\n    -------\n\n    theta_aov : float\n        The value of the AoV statistic at the specified `frequency`.", "id": "f14757:m0"}
{"signature": "def aov_periodfind(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=False,<EOL>startp=None,<EOL>endp=None,<EOL>stepsize=<NUM_LIT>,<EOL>autofreq=True,<EOL>normalize=True,<EOL>phasebinsize=<NUM_LIT>,<EOL>mindetperbin=<NUM_LIT:9>,<EOL>nbestpeaks=<NUM_LIT:5>,<EOL>periodepsilon=<NUM_LIT:0.1>,<EOL>sigclip=<NUM_LIT>,<EOL>nworkers=None,<EOL>verbose=True):", "body": "<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>if len(stimes) > <NUM_LIT:9> and len(smags) > <NUM_LIT:9> and len(serrs) > <NUM_LIT:9>:<EOL><INDENT>if startp:<EOL><INDENT>endf = <NUM_LIT:1.0>/startp<EOL><DEDENT>else:<EOL><INDENT>endf = <NUM_LIT:1.0>/<NUM_LIT:0.1><EOL><DEDENT>if endp:<EOL><INDENT>startf = <NUM_LIT:1.0>/endp<EOL><DEDENT>else:<EOL><INDENT>startf = <NUM_LIT:1.0>/(stimes.max() - stimes.min())<EOL><DEDENT>if not autofreq:<EOL><INDENT>frequencies = nparange(startf, endf, stepsize)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>' %<EOL>(frequencies.size, <NUM_LIT:1.0>/endf, <NUM_LIT:1.0>/startf)<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>frequencies = get_frequency_grid(stimes,<EOL>minfreq=startf,<EOL>maxfreq=endf)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(frequencies.size,<EOL><NUM_LIT:1.0>/frequencies.max(),<EOL><NUM_LIT:1.0>/frequencies.min())<EOL>)<EOL><DEDENT><DEDENT>if (not nworkers) or (nworkers > NCPUS):<EOL><INDENT>nworkers = NCPUS<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % nworkers)<EOL><DEDENT><DEDENT>pool = Pool(nworkers)<EOL>if normalize:<EOL><INDENT>nmags = (smags - npmedian(smags))/npstd(smags)<EOL><DEDENT>else:<EOL><INDENT>nmags = smags<EOL><DEDENT>tasks = [(stimes, nmags, serrs, x, phasebinsize, mindetperbin)<EOL>for x in frequencies]<EOL>lsp = pool.map(_aov_worker, tasks)<EOL>pool.close()<EOL>pool.join()<EOL>del pool<EOL>lsp = nparray(lsp)<EOL>periods = <NUM_LIT:1.0>/frequencies<EOL>finitepeakind = npisfinite(lsp)<EOL>finlsp = lsp[finitepeakind]<EOL>finperiods = periods[finitepeakind]<EOL>try:<EOL><INDENT>bestperiodind = npargmax(finlsp)<EOL><DEDENT>except ValueError:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':phasebinsize,<EOL>'<STR_LIT>':mindetperbin,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>sortedlspind = npargsort(finlsp)[::-<NUM_LIT:1>]<EOL>sortedlspperiods = finperiods[sortedlspind]<EOL>sortedlspvals = finlsp[sortedlspind]<EOL>nbestperiods, nbestlspvals, peakcount = (<EOL>[finperiods[bestperiodind]],<EOL>[finlsp[bestperiodind]],<EOL><NUM_LIT:1><EOL>)<EOL>prevperiod = sortedlspperiods[<NUM_LIT:0>]<EOL>for period, lspval in zip(sortedlspperiods, sortedlspvals):<EOL><INDENT>if peakcount == nbestpeaks:<EOL><INDENT>break<EOL><DEDENT>perioddiff = abs(period - prevperiod)<EOL>bestperiodsdiff = [abs(period - x) for x in nbestperiods]<EOL>if (perioddiff > (periodepsilon*prevperiod) and<EOL>all(x > (periodepsilon*period) for x in bestperiodsdiff)):<EOL><INDENT>nbestperiods.append(period)<EOL>nbestlspvals.append(lspval)<EOL>peakcount = peakcount + <NUM_LIT:1><EOL><DEDENT>prevperiod = period<EOL><DEDENT>return {'<STR_LIT>':finperiods[bestperiodind],<EOL>'<STR_LIT>':finlsp[bestperiodind],<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':nbestlspvals,<EOL>'<STR_LIT>':nbestperiods,<EOL>'<STR_LIT>':lsp,<EOL>'<STR_LIT>':periods,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':phasebinsize,<EOL>'<STR_LIT>':mindetperbin,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':phasebinsize,<EOL>'<STR_LIT>':mindetperbin,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>", "docstring": "This runs a parallelized Analysis-of-Variance (AoV) period search.\n\n    NOTE: `normalize = True` here as recommended by Schwarzenberg-Czerny 1996,\n    i.e. mags will be normalized to zero and rescaled so their variance = 1.0.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The mag/flux time-series with associated measurement errors to run the\n        period-finding on.\n\n    magsarefluxes : bool\n        If the input measurement values in `mags` and `errs` are in fluxes, set\n        this to True.\n\n    startp,endp : float or None\n        The minimum and maximum periods to consider for the transit search.\n\n    stepsize : float\n        The step-size in frequency to use when constructing a frequency grid for\n        the period search.\n\n    autofreq : bool\n        If this is True, the value of `stepsize` will be ignored and the\n        :py:func:`astrobase.periodbase.get_frequency_grid` function will be used\n        to generate a frequency grid based on `startp`, and `endp`. If these are\n        None as well, `startp` will be set to 0.1 and `endp` will be set to\n        `times.max() - times.min()`.\n\n    normalize : bool\n        This sets if the input time-series is normalized to 0.0 and rescaled\n        such that its variance = 1.0. This is the recommended procedure by\n        Schwarzenberg-Czerny 1996.\n\n    phasebinsize : float\n        The bin size in phase to use when calculating the AoV theta statistic at\n        a test frequency.\n\n    mindetperbin : int\n        The minimum number of elements in a phase bin to consider it valid when\n        calculating the AoV theta statistic at a test frequency.\n\n    nbestpeaks : int\n        The number of 'best' peaks to return from the periodogram results,\n        starting from the global maximum of the periodogram peak values.\n\n    periodepsilon : float\n        The fractional difference between successive values of 'best' periods\n        when sorting by periodogram power to consider them as separate periods\n        (as opposed to part of the same periodogram peak). This is used to avoid\n        broad peaks in the periodogram and make sure the 'best' periods returned\n        are all actually independent.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    nworkers : int\n        The number of parallel workers to use when calculating the periodogram.\n\n    verbose : bool\n        If this is True, will indicate progress and details about the frequency\n        grid used for the period search.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict, referred to as an `lspinfo` dict in other\n        astrobase functions that operate on periodogram results. This is a\n        standardized format across all astrobase period-finders, and is of the\n        form below::\n\n            {'bestperiod': the best period value in the periodogram,\n             'bestlspval': the periodogram peak associated with the best period,\n             'nbestpeaks': the input value of nbestpeaks,\n             'nbestlspvals': nbestpeaks-size list of best period peak values,\n             'nbestperiods': nbestpeaks-size list of best periods,\n             'lspvals': the full array of periodogram powers,\n             'periods': the full array of periods considered,\n             'method':'aov' -> the name of the period-finder method,\n             'kwargs':{ dict of all of the input kwargs for record-keeping}}", "id": "f14757:m2"}
{"signature": "def _glsp_worker_notau(task):", "body": "try:<EOL><INDENT>return generalized_lsp_value_notau(*task)<EOL><DEDENT>except Exception as e:<EOL><INDENT>return npnan<EOL><DEDENT>", "docstring": "This is a worker to wrap the generalized Lomb-Scargle single-freq func.\n\n    This version doesn't use tau.", "id": "f14758:m7"}
{"signature": "def specwindow_lsp_value(times, mags, errs, omega):", "body": "norm_times = times - times.min()<EOL>tau = (<EOL>(<NUM_LIT:1.0>/(<NUM_LIT>*omega)) *<EOL>nparctan( npsum(npsin(<NUM_LIT>*omega*norm_times)) /<EOL>npsum(npcos(<NUM_LIT>*omega*norm_times)) )<EOL>)<EOL>lspval_top_cos = (npsum(<NUM_LIT:1.0> * npcos(omega*(norm_times-tau))) *<EOL>npsum(<NUM_LIT:1.0> * npcos(omega*(norm_times-tau))))<EOL>lspval_bot_cos = npsum( (npcos(omega*(norm_times-tau))) *<EOL>(npcos(omega*(norm_times-tau))) )<EOL>lspval_top_sin = (npsum(<NUM_LIT:1.0> * npsin(omega*(norm_times-tau))) *<EOL>npsum(<NUM_LIT:1.0> * npsin(omega*(norm_times-tau))))<EOL>lspval_bot_sin = npsum( (npsin(omega*(norm_times-tau))) *<EOL>(npsin(omega*(norm_times-tau))) )<EOL>lspval = <NUM_LIT:0.5> * ( (lspval_top_cos/lspval_bot_cos) +<EOL>(lspval_top_sin/lspval_bot_sin) )<EOL>return lspval<EOL>", "docstring": "This calculates the peak associated with the spectral window function\n    for times and at the specified omega.\n\n    NOTE: this is classical Lomb-Scargle, not the Generalized\n    Lomb-Scargle. `mags` and `errs` are silently ignored since we're calculating\n    the periodogram of the observing window function. These are kept to present\n    a consistent external API so the `pgen_lsp` function below can call this\n    transparently.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The time-series to calculate the periodogram value for.\n\n    omega : float\n        The frequency to calculate the periodogram value at.\n\n    Returns\n    -------\n\n    periodogramvalue : float\n        The normalized periodogram at the specified test frequency `omega`.", "id": "f14758:m3"}
{"signature": "def pgen_lsp(<EOL>times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=False,<EOL>startp=None,<EOL>endp=None,<EOL>stepsize=<NUM_LIT>,<EOL>autofreq=True,<EOL>nbestpeaks=<NUM_LIT:5>,<EOL>periodepsilon=<NUM_LIT:0.1>,<EOL>sigclip=<NUM_LIT>,<EOL>nworkers=None,<EOL>workchunksize=None,<EOL>glspfunc=_glsp_worker_withtau,<EOL>verbose=True<EOL>):", "body": "<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>nzind = npnonzero(serrs)<EOL>stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]<EOL>if len(stimes) > <NUM_LIT:9> and len(smags) > <NUM_LIT:9> and len(serrs) > <NUM_LIT:9>:<EOL><INDENT>if startp:<EOL><INDENT>endf = <NUM_LIT:1.0>/startp<EOL><DEDENT>else:<EOL><INDENT>endf = <NUM_LIT:1.0>/<NUM_LIT:0.1><EOL><DEDENT>if endp:<EOL><INDENT>startf = <NUM_LIT:1.0>/endp<EOL><DEDENT>else:<EOL><INDENT>startf = <NUM_LIT:1.0>/(stimes.max() - stimes.min())<EOL><DEDENT>if not autofreq:<EOL><INDENT>omegas = <NUM_LIT:2>*pi_value*nparange(startf, endf, stepsize)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>' %<EOL>(omegas.size, <NUM_LIT:1.0>/endf, <NUM_LIT:1.0>/startf)<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>freqs = get_frequency_grid(stimes,<EOL>minfreq=startf,<EOL>maxfreq=endf)<EOL>omegas = <NUM_LIT:2>*pi_value*freqs<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(omegas.size, <NUM_LIT:1.0>/freqs.max(), <NUM_LIT:1.0>/freqs.min())<EOL>)<EOL><DEDENT><DEDENT>if (not nworkers) or (nworkers > NCPUS):<EOL><INDENT>nworkers = NCPUS<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % nworkers)<EOL><DEDENT><DEDENT>pool = Pool(nworkers)<EOL>tasks = [(stimes, smags, serrs, x) for x in omegas]<EOL>if workchunksize:<EOL><INDENT>lsp = pool.map(glspfunc, tasks, chunksize=workchunksize)<EOL><DEDENT>else:<EOL><INDENT>lsp = pool.map(glspfunc, tasks)<EOL><DEDENT>pool.close()<EOL>pool.join()<EOL>del pool<EOL>lsp = nparray(lsp)<EOL>periods = <NUM_LIT>*pi_value/omegas<EOL>finitepeakind = npisfinite(lsp)<EOL>finlsp = lsp[finitepeakind]<EOL>finperiods = periods[finitepeakind]<EOL>try:<EOL><INDENT>bestperiodind = npargmax(finlsp)<EOL><DEDENT>except ValueError:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':omegas,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>sortedlspind = npargsort(finlsp)[::-<NUM_LIT:1>]<EOL>sortedlspperiods = finperiods[sortedlspind]<EOL>sortedlspvals = finlsp[sortedlspind]<EOL>nbestperiods, nbestlspvals, peakcount = (<EOL>[finperiods[bestperiodind]],<EOL>[finlsp[bestperiodind]],<EOL><NUM_LIT:1><EOL>)<EOL>prevperiod = sortedlspperiods[<NUM_LIT:0>]<EOL>for period, lspval in zip(sortedlspperiods, sortedlspvals):<EOL><INDENT>if peakcount == nbestpeaks:<EOL><INDENT>break<EOL><DEDENT>perioddiff = abs(period - prevperiod)<EOL>bestperiodsdiff = [abs(period - x) for x in nbestperiods]<EOL>if (perioddiff > (periodepsilon*prevperiod) and<EOL>all(x > (periodepsilon*period) for x in bestperiodsdiff)):<EOL><INDENT>nbestperiods.append(period)<EOL>nbestlspvals.append(lspval)<EOL>peakcount = peakcount + <NUM_LIT:1><EOL><DEDENT>prevperiod = period<EOL><DEDENT>return {'<STR_LIT>':finperiods[bestperiodind],<EOL>'<STR_LIT>':finlsp[bestperiodind],<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':nbestlspvals,<EOL>'<STR_LIT>':nbestperiods,<EOL>'<STR_LIT>':lsp,<EOL>'<STR_LIT>':omegas,<EOL>'<STR_LIT>':periods,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>", "docstring": "This calculates the generalized Lomb-Scargle periodogram.\n\n    Uses the algorithm from Zechmeister and Kurster (2009).\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The mag/flux time-series with associated measurement errors to run the\n        period-finding on.\n\n    magsarefluxes : bool\n        If the input measurement values in `mags` and `errs` are in fluxes, set\n        this to True.\n\n    startp,endp : float or None\n        The minimum and maximum periods to consider for the transit search.\n\n    stepsize : float\n        The step-size in frequency to use when constructing a frequency grid for\n        the period search.\n\n    autofreq : bool\n        If this is True, the value of `stepsize` will be ignored and the\n        :py:func:`astrobase.periodbase.get_frequency_grid` function will be used\n        to generate a frequency grid based on `startp`, and `endp`. If these are\n        None as well, `startp` will be set to 0.1 and `endp` will be set to\n        `times.max() - times.min()`.\n\n    nbestpeaks : int\n        The number of 'best' peaks to return from the periodogram results,\n        starting from the global maximum of the periodogram peak values.\n\n    periodepsilon : float\n        The fractional difference between successive values of 'best' periods\n        when sorting by periodogram power to consider them as separate periods\n        (as opposed to part of the same periodogram peak). This is used to avoid\n        broad peaks in the periodogram and make sure the 'best' periods returned\n        are all actually independent.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    nworkers : int\n        The number of parallel workers to use when calculating the periodogram.\n\n    workchunksize : None or int\n        If this is an int, will use chunks of the given size to break up the\n        work for the parallel workers. If None, the chunk size is set to 1.\n\n    glspfunc : Python function\n        The worker function to use to calculate the periodogram. This can be\n        used to make this function calculate the time-series sampling window\n        function instead of the time-series measurements' GLS periodogram by\n        passing in `_glsp_worker_specwindow` instead of the default\n        `_glsp_worker_withtau` function.\n\n    verbose : bool\n        If this is True, will indicate progress and details about the frequency\n        grid used for the period search.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict, referred to as an `lspinfo` dict in other\n        astrobase functions that operate on periodogram results. This is a\n        standardized format across all astrobase period-finders, and is of the\n        form below::\n\n            {'bestperiod': the best period value in the periodogram,\n             'bestlspval': the periodogram peak associated with the best period,\n             'nbestpeaks': the input value of nbestpeaks,\n             'nbestlspvals': nbestpeaks-size list of best period peak values,\n             'nbestperiods': nbestpeaks-size list of best periods,\n             'lspvals': the full array of periodogram powers,\n             'periods': the full array of periods considered,\n             'method':'gls' -> the name of the period-finder method,\n             'kwargs':{ dict of all of the input kwargs for record-keeping}}", "id": "f14758:m8"}
{"signature": "def probability_peak_exceeds_value(times, peakval):", "body": "return (<NUM_LIT:1.0> - peakval)**((times.size - <NUM_LIT>)/<NUM_LIT>)<EOL>", "docstring": "This calculates the probability that periodogram values exceed the given\n    peak value.\n\n    This is from page 3 of Zechmeister and Kurster (2009)::\n\n        Prob(p > p_best) = (1 \u2212 p_best)**((N\u22123)/2)\n\n    where::\n\n        p_best is the peak value in consideration\n        N is the number of times\n\n    Note that this is for the default normalization of the periodogram,\n    e.g. P_normalized = P(omega), such that P represents the sample variance\n    (see Table 1).\n\n    Parameters\n    ----------\n\n    lspvals : np.array\n        The periodogram power value array.\n\n    peakval : float\n        A single peak value to calculate the probability for.\n\n    Returns\n    -------\n\n    prob: float\n        The probability value.", "id": "f14758:m10"}
{"signature": "def _get_bls_stats(stimes,<EOL>smags,<EOL>serrs,<EOL>thistransdepth,<EOL>thistransduration,<EOL>ingressdurationfraction,<EOL>nphasebins,<EOL>thistransingressbin,<EOL>thistransegressbin,<EOL>thisbestperiod,<EOL>thisnphasebins,<EOL>magsarefluxes=False,<EOL>verbose=False):", "body": "try:<EOL><INDENT>me_epochbin = int((thistransegressbin +<EOL>thistransingressbin)/<NUM_LIT>)<EOL>me_phases = (<EOL>(stimes - stimes.min())/thisbestperiod -<EOL>npfloor((stimes - stimes.min())/thisbestperiod)<EOL>)<EOL>me_phases_sortind = npargsort(me_phases)<EOL>me_sorted_phases = me_phases[me_phases_sortind]<EOL>me_sorted_times = stimes[me_phases_sortind]<EOL>me_bins = nplinspace(<NUM_LIT:0.0>, <NUM_LIT:1.0>, thisnphasebins)<EOL>me_bininds = npdigitize(me_sorted_phases, me_bins)<EOL>me_centertransit_ind = me_bininds == me_epochbin<EOL>me_centertransit_phase = (<EOL>npmedian(me_sorted_phases[me_centertransit_ind])<EOL>)<EOL>me_centertransit_timeloc = npwhere(<EOL>npabs(me_sorted_phases - me_centertransit_phase) ==<EOL>npmin(npabs(me_sorted_phases - me_centertransit_phase))<EOL>)<EOL>me_centertransit_time = me_sorted_times[<EOL>me_centertransit_timeloc<EOL>]<EOL>if me_centertransit_time.size > <NUM_LIT:1>:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(thisbestperiod, repr(me_centertransit_time)))<EOL><DEDENT>thisminepoch = me_centertransit_time[<NUM_LIT:0>]<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>savfit = savgol_fit_magseries(stimes, smags, serrs,<EOL>thisbestperiod,<EOL>magsarefluxes=magsarefluxes,<EOL>verbose=verbose,<EOL>sigclip=None)<EOL>thisminepoch = savfit['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>if isinstance(thisminepoch, npndarray):<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% repr(thisminepoch))<EOL><DEDENT>thisminepoch = thisminepoch[<NUM_LIT:0>]<EOL><DEDENT>transitparams = [<EOL>thisbestperiod,<EOL>thisminepoch,<EOL>thistransdepth,<EOL>thistransduration,<EOL>ingressdurationfraction*thistransduration<EOL>]<EOL>modelfit = traptransit_fit_magseries(<EOL>stimes,<EOL>smags,<EOL>serrs,<EOL>transitparams,<EOL>sigclip=None,<EOL>magsarefluxes=magsarefluxes,<EOL>verbose=verbose<EOL>)<EOL>if modelfit and modelfit['<STR_LIT>']['<STR_LIT>'] is not None:<EOL><INDENT>fitparams = modelfit['<STR_LIT>']['<STR_LIT>']<EOL>fiterrs = modelfit['<STR_LIT>']['<STR_LIT>']<EOL>modelmags, actualmags, modelphase = (<EOL>modelfit['<STR_LIT>']['<STR_LIT>'],<EOL>modelfit['<STR_LIT>']['<STR_LIT>'],<EOL>modelfit['<STR_LIT>']['<STR_LIT>']<EOL>)<EOL>subtractedmags = actualmags - modelmags<EOL>subtractedrms = npstd(subtractedmags)<EOL>fit_period, fit_epoch, fit_depth, fit_duration, fit_ingress_dur = (<EOL>fitparams<EOL>)<EOL>npts_in_transit = modelfit['<STR_LIT>']['<STR_LIT>']<EOL>transit_snr = (<EOL>npsqrt(npts_in_transit) * npabs(fit_depth/subtractedrms)<EOL>)<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(fit_period, fit_epoch))<EOL>LOGINFO('<STR_LIT>' % npts_in_transit)<EOL>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(fit_depth,<EOL>fit_duration,<EOL>transit_snr))<EOL><DEDENT>return {'<STR_LIT>':fit_period,<EOL>'<STR_LIT>':fit_epoch,<EOL>'<STR_LIT>':transit_snr,<EOL>'<STR_LIT>':fit_depth,<EOL>'<STR_LIT>':fit_duration,<EOL>'<STR_LIT>':nphasebins,<EOL>'<STR_LIT>':thistransingressbin,<EOL>'<STR_LIT>':thistransegressbin,<EOL>'<STR_LIT>':npts_in_transit,<EOL>'<STR_LIT>':modelmags,<EOL>'<STR_LIT>':subtractedmags,<EOL>'<STR_LIT>':actualmags,<EOL>'<STR_LIT>':modelphase,<EOL>'<STR_LIT>':fitparams,<EOL>'<STR_LIT>':fiterrs,<EOL>'<STR_LIT>':modelfit}<EOL><DEDENT>else:<EOL><INDENT>phased_magseries = phase_magseries_with_errs(stimes,<EOL>smags,<EOL>serrs,<EOL>thisbestperiod,<EOL>thisminepoch,<EOL>wrap=False,<EOL>sort=True)<EOL>tphase = phased_magseries['<STR_LIT>']<EOL>tmags = phased_magseries['<STR_LIT>']<EOL>transitphase = thistransduration/<NUM_LIT><EOL>transitindices = ((tphase < transitphase) |<EOL>(tphase > (<NUM_LIT:1.0> - transitphase)))<EOL>blsmodel = npfull_like(tmags, npmedian(tmags))<EOL>if magsarefluxes:<EOL><INDENT>blsmodel[transitindices] = (<EOL>blsmodel[transitindices] - thistransdepth<EOL>)<EOL><DEDENT>else:<EOL><INDENT>blsmodel[transitindices] = (<EOL>blsmodel[transitindices] - thistransdepth<EOL>)<EOL><DEDENT>subtractedmags = tmags - blsmodel<EOL>subtractedrms = npstd(subtractedmags)<EOL>npts_in_transit = len(tmags[transitindices])<EOL>thissnr = (<EOL>npsqrt(npts_in_transit) * npabs(thistransdepth/subtractedrms)<EOL>)<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(thisbestperiod, thisminepoch))<EOL>LOGINFO('<STR_LIT>' % (<NUM_LIT:1.0> -<EOL>transitphase,<EOL><NUM_LIT:1.0>))<EOL>LOGINFO('<STR_LIT>' % (<NUM_LIT:0.0>,<EOL>transitphase))<EOL>LOGINFO('<STR_LIT>' % tmags[transitindices].size)<EOL>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(thistransdepth,<EOL>thistransduration,<EOL>thissnr))<EOL><DEDENT>return {'<STR_LIT>':thisbestperiod,<EOL>'<STR_LIT>':thisminepoch,<EOL>'<STR_LIT>':thissnr,<EOL>'<STR_LIT>':thistransdepth,<EOL>'<STR_LIT>':thistransduration,<EOL>'<STR_LIT>':nphasebins,<EOL>'<STR_LIT>':thistransingressbin,<EOL>'<STR_LIT>':thistransegressbin,<EOL>'<STR_LIT>':blsmodel,<EOL>'<STR_LIT>':subtractedmags,<EOL>'<STR_LIT>':tmags,<EOL>'<STR_LIT>':tphase}<EOL><DEDENT>", "docstring": "Actually calculates the stats.", "id": "f14759:m4"}
{"signature": "def bls_serial_pfind(<EOL>times, mags, errs,<EOL>magsarefluxes=False,<EOL>startp=<NUM_LIT:0.1>,  <EOL>endp=<NUM_LIT>,  <EOL>stepsize=<NUM_LIT>,<EOL>mintransitduration=<NUM_LIT>,  <EOL>maxtransitduration=<NUM_LIT>,   <EOL>nphasebins=<NUM_LIT:200>,<EOL>autofreq=True,  <EOL>periodepsilon=<NUM_LIT:0.1>,<EOL>nbestpeaks=<NUM_LIT:5>,<EOL>sigclip=<NUM_LIT>,<EOL>verbose=True,<EOL>get_stats=True,<EOL>):", "body": "<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>if len(stimes) > <NUM_LIT:9> and len(smags) > <NUM_LIT:9> and len(serrs) > <NUM_LIT:9>:<EOL><INDENT>if autofreq:<EOL><INDENT>nphasebins = int(npceil(<NUM_LIT>/mintransitduration))<EOL>if nphasebins > <NUM_LIT>:<EOL><INDENT>nphasebins = <NUM_LIT><EOL><DEDENT>stepsize = <NUM_LIT>*mintransitduration/(stimes.max()-stimes.min())<EOL>minfreq = <NUM_LIT:1.0>/endp<EOL>maxfreq = <NUM_LIT:1.0>/startp<EOL>nfreq = int(npceil((maxfreq - minfreq)/stepsize))<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' % (startp, endp, nfreq,<EOL>minfreq, maxfreq))<EOL>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(stepsize, nphasebins,<EOL>mintransitduration, maxtransitduration))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>minfreq = <NUM_LIT:1.0>/endp<EOL>maxfreq = <NUM_LIT:1.0>/startp<EOL>nfreq = int(npceil((maxfreq - minfreq)/stepsize))<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' % (startp, endp, nfreq,<EOL>minfreq, maxfreq))<EOL>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(stepsize, nphasebins,<EOL>mintransitduration, maxtransitduration))<EOL><DEDENT><DEDENT>if nfreq > <NUM_LIT>:<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if minfreq < (<NUM_LIT:1.0>/(stimes.max() - stimes.min())):<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (endp, stimes.max() - stimes.min()))<EOL><DEDENT>minfreq = <NUM_LIT>/(stimes.max() - stimes.min())<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>(minfreq, maxfreq))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>blsresult = _bls_runner(stimes,<EOL>smags,<EOL>nfreq,<EOL>minfreq,<EOL>stepsize,<EOL>nphasebins,<EOL>mintransitduration,<EOL>maxtransitduration)<EOL>frequencies = minfreq + nparange(nfreq)*stepsize<EOL>periods = <NUM_LIT:1.0>/frequencies<EOL>lsp = blsresult['<STR_LIT>']<EOL>finitepeakind = npisfinite(lsp)<EOL>finlsp = lsp[finitepeakind]<EOL>finperiods = periods[finitepeakind]<EOL>try:<EOL><INDENT>bestperiodind = npargmax(finlsp)<EOL><DEDENT>except ValueError:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':nphasebins,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':magsarefluxes}}<EOL><DEDENT>sortedlspind = npargsort(finlsp)[::-<NUM_LIT:1>]<EOL>sortedlspperiods = finperiods[sortedlspind]<EOL>sortedlspvals = finlsp[sortedlspind]<EOL>nbestperiods, nbestlspvals, peakcount = (<EOL>[finperiods[bestperiodind]],<EOL>[finlsp[bestperiodind]],<EOL><NUM_LIT:1><EOL>)<EOL>prevperiod = sortedlspperiods[<NUM_LIT:0>]<EOL>for period, lspval in zip(sortedlspperiods, sortedlspvals):<EOL><INDENT>if peakcount == nbestpeaks:<EOL><INDENT>break<EOL><DEDENT>perioddiff = abs(period - prevperiod)<EOL>bestperiodsdiff = [abs(period - x) for x in nbestperiods]<EOL>if (perioddiff > (periodepsilon*prevperiod) and<EOL>all(x > (periodepsilon*period)<EOL>for x in bestperiodsdiff)):<EOL><INDENT>nbestperiods.append(period)<EOL>nbestlspvals.append(lspval)<EOL>peakcount = peakcount + <NUM_LIT:1><EOL><DEDENT>prevperiod = period<EOL><DEDENT>resultdict = {<EOL>'<STR_LIT>':finperiods[bestperiodind],<EOL>'<STR_LIT>':finlsp[bestperiodind],<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':nbestlspvals,<EOL>'<STR_LIT>':nbestperiods,<EOL>'<STR_LIT>':lsp,<EOL>'<STR_LIT>':frequencies,<EOL>'<STR_LIT>':periods,<EOL>'<STR_LIT>':blsresult,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':nfreq,<EOL>'<STR_LIT>':nphasebins,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':nphasebins,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':magsarefluxes}<EOL>}<EOL>if get_stats:<EOL><INDENT>resultdict['<STR_LIT>'] = []<EOL>for bp in nbestperiods:<EOL><INDENT>if verbose:<EOL><INDENT>LOGINFO(\"<STR_LIT>\" % bp)<EOL><DEDENT>this_pstats = bls_stats_singleperiod(<EOL>times, mags, errs, bp,<EOL>magsarefluxes=resultdict['<STR_LIT>']['<STR_LIT>'],<EOL>sigclip=resultdict['<STR_LIT>']['<STR_LIT>'],<EOL>nphasebins=resultdict['<STR_LIT>'],<EOL>mintransitduration=resultdict['<STR_LIT>'],<EOL>maxtransitduration=resultdict['<STR_LIT>'],<EOL>verbose=verbose,<EOL>)<EOL>resultdict['<STR_LIT>'].append(this_pstats)<EOL><DEDENT><DEDENT>return resultdict<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':nfreq,<EOL>'<STR_LIT>':nphasebins,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':nphasebins,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':magsarefluxes}}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':nphasebins,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':magsarefluxes}}<EOL><DEDENT>", "docstring": "Runs the Box Least Squares Fitting Search for transit-shaped signals.\n\n    Based on eebls.f from Kovacs et al. 2002 and python-bls from Foreman-Mackey\n    et al. 2015. This is the serial version (which is good enough in most cases\n    because BLS in Fortran is fairly fast). If nfreq > 5e5, this will take a\n    while.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The magnitude/flux time-series to search for transits.\n\n    magsarefluxes : bool\n        If the input measurement values in `mags` and `errs` are in fluxes, set\n        this to True.\n\n    startp,endp : float\n        The minimum and maximum periods to consider for the transit search.\n\n    stepsize : float\n        The step-size in frequency to use when constructing a frequency grid for\n        the period search.\n\n    mintransitduration,maxtransitduration : float\n        The minimum and maximum transitdurations (in units of phase) to consider\n        for the transit search.\n\n    nphasebins : int\n        The number of phase bins to use in the period search.\n\n    autofreq : bool\n        If this is True, the values of `stepsize` and `nphasebins` will be\n        ignored, and these, along with a frequency-grid, will be determined\n        based on the following relations::\n\n            nphasebins = int(ceil(2.0/mintransitduration))\n            if nphasebins > 3000:\n                nphasebins = 3000\n\n            stepsize = 0.25*mintransitduration/(times.max()-times.min())\n\n            minfreq = 1.0/endp\n            maxfreq = 1.0/startp\n            nfreq = int(ceil((maxfreq - minfreq)/stepsize))\n\n    periodepsilon : float\n        The fractional difference between successive values of 'best' periods\n        when sorting by periodogram power to consider them as separate periods\n        (as opposed to part of the same periodogram peak). This is used to avoid\n        broad peaks in the periodogram and make sure the 'best' periods returned\n        are all actually independent.\n\n    nbestpeaks : int\n        The number of 'best' peaks to return from the periodogram results,\n        starting from the global maximum of the periodogram peak values.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    verbose : bool\n        If this is True, will indicate progress and details about the frequency\n        grid used for the period search.\n\n    get_stats : bool\n        If True, runs :py:func:`.bls_stats_singleperiod` for each of the best\n        periods in the output and injects the output into the output dict so you\n        only have to run this function to get the periods and their stats.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict, referred to as an `lspinfo` dict in other\n        astrobase functions that operate on periodogram results. This is a\n        standardized format across all astrobase period-finders, and is of the\n        form below::\n\n            {'bestperiod': the best period value in the periodogram,\n             'bestlspval': the periodogram peak associated with the best period,\n             'nbestpeaks': the input value of nbestpeaks,\n             'nbestlspvals': nbestpeaks-size list of best period peak values,\n             'nbestperiods': nbestpeaks-size list of best periods,\n             'stats': BLS stats for each best period,\n             'lspvals': the full array of periodogram powers,\n             'frequencies': the full array of frequencies considered,\n             'periods': the full array of periods considered,\n             'blsresult': the result dict from the eebls.f wrapper function,\n             'stepsize': the actual stepsize used,\n             'nfreq': the actual nfreq used,\n             'nphasebins': the actual nphasebins used,\n             'mintransitduration': the input mintransitduration,\n             'maxtransitduration': the input maxtransitdurations,\n             'method':'bls' -> the name of the period-finder method,\n             'kwargs':{ dict of all of the input kwargs for record-keeping}}", "id": "f14759:m2"}
{"signature": "def bls_snr(blsdict,<EOL>times,<EOL>mags,<EOL>errs,<EOL>assumeserialbls=False,<EOL>magsarefluxes=False,<EOL>sigclip=<NUM_LIT>,<EOL>npeaks=None,<EOL>perioddeltapercent=<NUM_LIT:10>,<EOL>ingressdurationfraction=<NUM_LIT:0.1>,<EOL>verbose=True):", "body": "<EOL>if (npeaks and (<NUM_LIT:0> < npeaks < len(blsdict['<STR_LIT>']))):<EOL><INDENT>nperiods = npeaks<EOL><DEDENT>else:<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>len(blsdict['<STR_LIT>']))<EOL><DEDENT>nperiods = len(blsdict['<STR_LIT>'])<EOL><DEDENT>nbestperiods = blsdict['<STR_LIT>'][:nperiods]<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>if len(stimes) > <NUM_LIT:9> and len(smags) > <NUM_LIT:9> and len(serrs) > <NUM_LIT:9>:<EOL><INDENT>nbestsnrs = []<EOL>transitdepth, transitduration = [], []<EOL>nphasebins, transingressbin, transegressbin = [], [], []<EOL>allsubtractedmags = []<EOL>allphasedmags = []<EOL>allphases = []<EOL>allblsmodels = []<EOL>refitperiods = []<EOL>refitepochs = []<EOL>for period in nbestperiods:<EOL><INDENT>startp = period - perioddeltapercent*period/<NUM_LIT><EOL>if startp < <NUM_LIT:0>:<EOL><INDENT>startp = period<EOL><DEDENT>endp = period + perioddeltapercent*period/<NUM_LIT><EOL>if not assumeserialbls:<EOL><INDENT>prevkwargs = blsdict['<STR_LIT>'].copy()<EOL>prevkwargs['<STR_LIT>'] = verbose<EOL>prevkwargs['<STR_LIT>'] = startp<EOL>prevkwargs['<STR_LIT>'] = endp<EOL>prevkwargs['<STR_LIT>'] = None<EOL>blsres = bls_serial_pfind(stimes,<EOL>smags,<EOL>serrs,<EOL>**prevkwargs)<EOL><DEDENT>else:<EOL><INDENT>blsres = blsdict<EOL><DEDENT>thistransdepth = blsres['<STR_LIT>']['<STR_LIT>']<EOL>thistransduration = blsres['<STR_LIT>']['<STR_LIT>']<EOL>thisbestperiod = blsres['<STR_LIT>']<EOL>thistransingressbin = blsres['<STR_LIT>']['<STR_LIT>']<EOL>thistransegressbin = blsres['<STR_LIT>']['<STR_LIT>']<EOL>thisnphasebins = blsdict['<STR_LIT>']['<STR_LIT>']<EOL>stats = _get_bls_stats(stimes,<EOL>smags,<EOL>serrs,<EOL>thistransdepth,<EOL>thistransduration,<EOL>ingressdurationfraction,<EOL>nphasebins,<EOL>thistransingressbin,<EOL>thistransegressbin,<EOL>thisbestperiod,<EOL>thisnphasebins,<EOL>magsarefluxes=magsarefluxes,<EOL>verbose=verbose)<EOL>nbestsnrs.append(stats['<STR_LIT>'])<EOL>transitdepth.append(stats['<STR_LIT>'])<EOL>transitduration.append(stats['<STR_LIT>'])<EOL>transingressbin.append(stats['<STR_LIT>'])<EOL>transegressbin.append(stats['<STR_LIT>'])<EOL>nphasebins.append(stats['<STR_LIT>'])<EOL>refitperiods.append(stats['<STR_LIT>'])<EOL>refitepochs.append(stats['<STR_LIT>'])<EOL>allsubtractedmags.append(stats['<STR_LIT>'])<EOL>allphasedmags.append(stats['<STR_LIT>'])<EOL>allphases.append(stats['<STR_LIT>'])<EOL>allblsmodels.append(stats['<STR_LIT>'])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>nbestsnrs = None<EOL>transitdepth, transitduration = None, None<EOL>nphasebins, transingressbin, transegressbin = None, None, None<EOL>allsubtractedmags, allphases, allphasedmags = None, None, None<EOL><DEDENT>return {'<STR_LIT>':npeaks,<EOL>'<STR_LIT>':refitperiods,<EOL>'<STR_LIT>':refitepochs,<EOL>'<STR_LIT>':nbestsnrs,<EOL>'<STR_LIT>':transitdepth,<EOL>'<STR_LIT>':transitduration,<EOL>'<STR_LIT>':nphasebins,<EOL>'<STR_LIT>':transingressbin,<EOL>'<STR_LIT>':transegressbin,<EOL>'<STR_LIT>':allblsmodels,<EOL>'<STR_LIT>':allsubtractedmags,<EOL>'<STR_LIT>':allphasedmags,<EOL>'<STR_LIT>':allphases}<EOL>", "docstring": "Calculates the signal to noise ratio for each best peak in the BLS\n    periodogram, along with transit depth, duration, and refit period and epoch.\n\n    The following equation is used for SNR::\n\n        SNR = (transit model depth / RMS of LC with transit model subtracted)\n              * sqrt(number of points in transit)\n\n    Parameters\n    ----------\n\n    blsdict : dict\n        This is an lspinfo dict produced by either `bls_parallel_pfind` or\n        `bls_serial_pfind` in this module, or by your own BLS function. If you\n        provide results in a dict from an external BLS function, make sure this\n        matches the form below::\n\n            {'bestperiod': the best period value in the periodogram,\n             'bestlspval': the periodogram peak associated with the best period,\n             'nbestpeaks': the input value of nbestpeaks,\n             'nbestlspvals': nbestpeaks-size list of best period peak values,\n             'nbestperiods': nbestpeaks-size list of best periods,\n             'lspvals': the full array of periodogram powers,\n             'frequencies': the full array of frequencies considered,\n             'periods': the full array of periods considered,\n             'blsresult': list of result dicts from eebls.f wrapper functions,\n             'stepsize': the actual stepsize used,\n             'nfreq': the actual nfreq used,\n             'nphasebins': the actual nphasebins used,\n             'mintransitduration': the input mintransitduration,\n             'maxtransitduration': the input maxtransitdurations,\n             'method':'bls' -> the name of the period-finder method,\n             'kwargs':{ dict of all of the input kwargs for record-keeping}}\n\n    times,mags,errs : np.array\n        These contain the magnitude/flux time-series and any associated errors.\n\n    assumeserialbls : bool\n        If this is True, this function will not rerun BLS around each best peak\n        in the input lspinfo dict to refit the periods and epochs. This is\n        usally required for `bls_parallel_pfind` so set this to False if you use\n        results from that function. The parallel method breaks up the frequency\n        space into chunks for speed, and the results may not exactly match those\n        from a regular BLS run.\n\n    magsarefluxes : bool\n        Set to True if the input measurements in `mags` are actually fluxes and\n        not magnitudes.\n\n    npeaks : int or None\n        This controls how many of the periods in `blsdict['nbestperiods']` to\n        find the SNR for. If it's None, then this will calculate the SNR for all\n        of them. If it's an integer between 1 and\n        `len(blsdict['nbestperiods'])`, will calculate for only the specified\n        number of peak periods, starting from the best period.\n\n    perioddeltapercent : float\n        The fraction of the period provided to use to search around this\n        value. This is a percentage. The period range searched will then be::\n\n            [period - (perioddeltapercent/100.0)*period,\n             period + (perioddeltapercent/100.0)*period]\n\n    ingressdurationfraction : float\n        The fraction of the transit duration to use to generate an initial value\n        of the transit ingress duration for the BLS model refit. This will be\n        fit by this function.\n\n    verbose : bool\n        If True, will indicate progress and any problems encountered.\n\n    Returns\n    -------\n\n    dict\n        A dict of the following form is returned::\n\n            {'npeaks: the number of periodogram peaks requested to get SNR for,\n             'period': list of refit best periods for each requested peak,\n             'epoch': list of refit epochs (i.e. mid-transit times),\n             'snr':list of SNRs of the transit for each requested peak,\n             'transitdepth':list of depths of the transits,\n             'transitduration':list of durations of the transits,\n             'nphasebins':the input value of nphasebins,\n             'transingressbin':the phase bin containing transit ingress,\n             'transegressbin':the phase bin containing transit egress,\n             'allblsmodels':the full BLS models used along with its parameters,\n             'allsubtractedmags':BLS models - phased light curves,\n             'allphasedmags':the phase light curves,\n             'allphases': the phase values}", "id": "f14759:m6"}
{"signature": "def get_frequency_grid(times,<EOL>samplesperpeak=<NUM_LIT:5>,<EOL>nyquistfactor=<NUM_LIT:5>,<EOL>minfreq=None,<EOL>maxfreq=None,<EOL>returnf0dfnf=False):", "body": "baseline = times.max() - times.min()<EOL>nsamples = times.size<EOL>df = <NUM_LIT:1.> / baseline / samplesperpeak<EOL>if minfreq is not None:<EOL><INDENT>f0 = minfreq<EOL><DEDENT>else:<EOL><INDENT>f0 = <NUM_LIT:0.5> * df<EOL><DEDENT>if maxfreq is not None:<EOL><INDENT>Nf = int(np.ceil((maxfreq - f0) / df))<EOL><DEDENT>else:<EOL><INDENT>Nf = int(<NUM_LIT:0.5> * samplesperpeak * nyquistfactor * nsamples)<EOL><DEDENT>if returnf0dfnf:<EOL><INDENT>return f0, df, Nf, f0 + df * np.arange(Nf)<EOL><DEDENT>else:<EOL><INDENT>return f0 + df * np.arange(Nf)<EOL><DEDENT>", "docstring": "This calculates a frequency grid for the period finding functions in this\n    module.\n\n    Based on the autofrequency function in astropy.stats.lombscargle.\n\n    http://docs.astropy.org/en/stable/_modules/astropy/stats/lombscargle/core.html#LombScargle.autofrequency\n\n    Parameters\n    ----------\n\n    times : np.array\n        The times to use to generate the frequency grid over.\n\n    samplesperpeak : int\n        The minimum sample coverage each frequency point in the grid will get.\n\n    nyquistfactor : int\n        The multiplier over the Nyquist rate to use.\n\n    minfreq,maxfreq : float or None\n        If not None, these will be the limits of the frequency grid generated.\n\n    returnf0dfnf : bool\n        If this is True, will return the values of `f0`, `df`, and `Nf`\n        generated for this grid.\n\n    Returns\n    -------\n\n    np.array\n        A grid of frequencies.", "id": "f14760:m0"}
{"signature": "def independent_freq_count(frequencies, times, conservative=True):", "body": "M = frequencies.ptp()*times.ptp()<EOL>if conservative:<EOL><INDENT>M_eff = min([times.size, frequencies.size, M])<EOL><DEDENT>else:<EOL><INDENT>M_eff = M<EOL><DEDENT>return M_eff<EOL>", "docstring": "This estimates M: the number of independent frequencies in the periodogram.\n\n    This follows the terminology on page 3 of Zechmeister & Kurster (2009)::\n\n        M = DELTA_f / delta_f\n\n    where::\n\n        DELTA_f = freq.max() - freq.min()\n        delta_f = 1.0/(times.max() - times.min())\n\n    Parameters\n    ----------\n\n    frequencies : np.array\n        The frequencies array used for the calculation of the GLS periodogram.\n\n    times : np.array\n        The array of input times used for the calculation of the GLS\n        periodogram.\n\n    conservative : bool\n        If True, will follow the prescription given in Schwarzenberg-Czerny\n        (2003):\n\n        http://adsabs.harvard.edu/abs/2003ASPC..292..383S\n\n        and estimate the number of independent frequences as::\n\n            min(N_obs, N_freq, DELTA_f/delta_f)\n\n    Returns\n    -------\n\n    M : int\n        The number of independent frequencies.", "id": "f14760:m1"}
{"signature": "def make_combined_periodogram(pflist, outfile, addmethods=False):", "body": "import matplotlib.pyplot as plt<EOL>for pf in pflist:<EOL><INDENT>if pf['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>plt.plot(pf['<STR_LIT>'],<EOL>np.max(pf['<STR_LIT>'])/pf['<STR_LIT>'] - <NUM_LIT:1.0>,<EOL>label='<STR_LIT>' % (pf['<STR_LIT>'], pf['<STR_LIT>']),<EOL>alpha=<NUM_LIT:0.5>)<EOL><DEDENT>else:<EOL><INDENT>plt.plot(pf['<STR_LIT>'],<EOL>pf['<STR_LIT>']/np.max(pf['<STR_LIT>']),<EOL>label='<STR_LIT>' % (pf['<STR_LIT>'], pf['<STR_LIT>']),<EOL>alpha=<NUM_LIT:0.5>)<EOL><DEDENT><DEDENT>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.xscale('<STR_LIT>')<EOL>plt.legend()<EOL>plt.tight_layout()<EOL>plt.savefig(outfile)<EOL>plt.close('<STR_LIT:all>')<EOL>return outfile<EOL>", "docstring": "This just puts all of the period-finders on a single periodogram.\n\n    This will renormalize all of the periodograms so their values lie between 0\n    and 1, with values lying closer to 1 being more significant. Periodograms\n    that give the same best periods will have their peaks line up together.\n\n    Parameters\n    ----------\n\n    pflist : list of dict\n        This is a list of result dicts from any of the period-finders in\n        periodbase. To use your own period-finders' results here, make sure the\n        result dict is of the form and has at least the keys below::\n\n            {'periods': np.array of all periods searched by the period-finder,\n             'lspvals': np.array of periodogram power value for each period,\n             'bestperiod': a float value that is the period with the highest\n                           peak in the periodogram, i.e. the most-likely actual\n                           period,\n             'method': a three-letter code naming the period-finder used; must\n                       be one of the keys in the\n                       `astrobase.periodbase.METHODLABELS` dict,\n             'nbestperiods': a list of the periods corresponding to periodogram\n                             peaks (`nbestlspvals` below) to annotate on the\n                             periodogram plot so they can be called out\n                             visually,\n             'nbestlspvals': a list of the power values associated with\n                             periodogram peaks to annotate on the periodogram\n                             plot so they can be called out visually; should be\n                             the same length as `nbestperiods` above,\n             'kwargs': dict of kwargs passed to your own period-finder function}\n\n    outfile : str\n        This is the output file to write the output to. NOTE: EPS/PS won't work\n        because we use alpha transparency to better distinguish between the\n        various periodograms.\n\n    addmethods : bool\n        If this is True, will add all of the normalized periodograms together,\n        then renormalize them to between 0 and 1. In this way, if all of the\n        period-finders agree on something, it'll stand out easily. FIXME:\n        implement this kwarg.\n\n    Returns\n    -------\n\n    str\n        The name of the generated plot file.", "id": "f14760:m3"}
{"signature": "def analytic_false_alarm_probability(lspinfo,<EOL>times,<EOL>conservative_nfreq_eff=True,<EOL>peakvals=None,<EOL>inplace=True):", "body": "from scipy.stats import beta<EOL>frequencies = <NUM_LIT:1.0>/lspinfo['<STR_LIT>']<EOL>M = independent_freq_count(frequencies,<EOL>times,<EOL>conservative=conservative_nfreq_eff)<EOL>if peakvals is None:<EOL><INDENT>peakvals = lspinfo['<STR_LIT>']<EOL><DEDENT>nphasebins = nparange(<NUM_LIT:0.0>, <NUM_LIT:1.0>, lspinfo['<STR_LIT>']['<STR_LIT>']).size<EOL>ndet = times.size<EOL>false_alarm_probs = []<EOL>for peakval in peakvals:<EOL><INDENT>prob_xval = ((ndet-nphasebins)/(nphasebins-<NUM_LIT:1.0>))*peakval<EOL>prob_exceeds_val = beta.cdf(prob_xval,<EOL>(ndet-nphasebins)/<NUM_LIT>,<EOL>(nphasebins-<NUM_LIT:1.0>)/<NUM_LIT>)<EOL>import ipdb; ipdb.set_trace()<EOL>false_alarm_probs.append(<NUM_LIT:1.0> - (<NUM_LIT:1.0> - prob_exceeds_val)**M)<EOL><DEDENT>if inplace:<EOL><INDENT>lspinfo['<STR_LIT>'] = false_alarm_probs<EOL><DEDENT>return false_alarm_probs<EOL>", "docstring": "This returns the analytic false alarm probabilities for periodogram\n    peak values.\n\n    FIXME: this doesn't actually work. Fix later.\n\n    The calculation follows that on page 3 of Zechmeister & Kurster (2009)::\n\n        FAP = 1 \u2212 [1 \u2212 Prob(z > z0)]**M\n\n    where::\n\n        M is the number of independent frequencies\n        Prob(z > z0) is the probability of peak with value > z0\n        z0 is the peak value we're evaluating\n\n    For PDM, the Prob(z > z0) is described by the beta distribution, according\n    to:\n\n    - Schwarzenberg-Czerny (1997;\n      https://ui.adsabs.harvard.edu/#abs/1997ApJ...489..941S)\n\n    - Zalian, Chadid, and Stellingwerf (2013;\n      http://adsabs.harvard.edu/abs/2014MNRAS.440...68Z)\n\n    This is given by::\n\n        beta( (N-B)/2, (B-1)/2; ((N-B)/(B-1))*theta_pdm )\n\n    Where::\n\n        N = number of observations\n        B = number of phase bins\n\n    This translates to a scipy.stats call to the beta distribution CDF::\n\n        x = ((N-B)/(B-1))*theta_pdm_best\n        prob_exceeds_val = scipy.stats.beta.cdf(x, (N-B)/2.0, (B-1.0)/2.0)\n\n    Which we can then plug into the false alarm prob eqn above with the\n    calculation of M.\n\n    Parameters\n    ----------\n\n    lspinfo : dict\n        The dict returned by the\n        :py:func:`~astrobase.periodbase.spdm.stellingwerf_pdm` function.\n\n    times : np.array\n        The times for which the periodogram result in ``lspinfo`` was\n        calculated.\n\n    conservative_nfreq_eff : bool\n        If True, will follow the prescription given in Schwarzenberg-Czerny\n        (2003):\n\n        http://adsabs.harvard.edu/abs/2003ASPC..292..383S\n\n        and estimate the effective number of independent frequences M_eff as::\n\n            min(N_obs, N_freq, DELTA_f/delta_f)\n\n    peakvals : sequence or None\n        The peak values for which to evaluate the false-alarm probability. If\n        None, will calculate this for each of the peak values in the\n        ``nbestpeaks`` key of the ``lspinfo`` dict.\n\n    inplace : bool\n        If True, puts the results of the FAP calculation into the ``lspinfo``\n        dict as a list available as ``lspinfo['falsealarmprob']``.\n\n    Returns\n    -------\n\n    list\n        The calculated false alarm probabilities for each of the peak values in\n        ``peakvals``.", "id": "f14761:m3"}
{"signature": "def stellingwerf_pdm(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=False,<EOL>startp=None,<EOL>endp=None,<EOL>stepsize=<NUM_LIT>,<EOL>autofreq=True,<EOL>normalize=False,<EOL>phasebinsize=<NUM_LIT>,<EOL>mindetperbin=<NUM_LIT:9>,<EOL>nbestpeaks=<NUM_LIT:5>,<EOL>periodepsilon=<NUM_LIT:0.1>,<EOL>sigclip=<NUM_LIT>,<EOL>nworkers=None,<EOL>verbose=True):", "body": "<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>if len(stimes) > <NUM_LIT:9> and len(smags) > <NUM_LIT:9> and len(serrs) > <NUM_LIT:9>:<EOL><INDENT>if startp:<EOL><INDENT>endf = <NUM_LIT:1.0>/startp<EOL><DEDENT>else:<EOL><INDENT>endf = <NUM_LIT:1.0>/<NUM_LIT:0.1><EOL><DEDENT>if endp:<EOL><INDENT>startf = <NUM_LIT:1.0>/endp<EOL><DEDENT>else:<EOL><INDENT>startf = <NUM_LIT:1.0>/(stimes.max() - stimes.min())<EOL><DEDENT>if not autofreq:<EOL><INDENT>frequencies = nparange(startf, endf, stepsize)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>' %<EOL>(frequencies.size, <NUM_LIT:1.0>/endf, <NUM_LIT:1.0>/startf)<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>frequencies = get_frequency_grid(stimes,<EOL>minfreq=startf,<EOL>maxfreq=endf)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(frequencies.size,<EOL><NUM_LIT:1.0>/frequencies.max(),<EOL><NUM_LIT:1.0>/frequencies.min())<EOL>)<EOL><DEDENT><DEDENT>if (not nworkers) or (nworkers > NCPUS):<EOL><INDENT>nworkers = NCPUS<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % nworkers)<EOL><DEDENT><DEDENT>pool = Pool(nworkers)<EOL>if normalize:<EOL><INDENT>nmags = (smags - npmedian(smags))/npstd(smags)<EOL><DEDENT>else:<EOL><INDENT>nmags = smags<EOL><DEDENT>tasks = [(stimes, nmags, serrs, x, phasebinsize, mindetperbin)<EOL>for x in frequencies]<EOL>lsp = pool.map(_stellingwerf_pdm_worker, tasks)<EOL>pool.close()<EOL>pool.join()<EOL>del pool<EOL>lsp = nparray(lsp)<EOL>periods = <NUM_LIT:1.0>/frequencies<EOL>finitepeakind = npisfinite(lsp)<EOL>finlsp = lsp[finitepeakind]<EOL>finperiods = periods[finitepeakind]<EOL>try:<EOL><INDENT>bestperiodind = npargmin(finlsp)<EOL><DEDENT>except ValueError:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':phasebinsize,<EOL>'<STR_LIT>':mindetperbin,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>sortedlspind = npargsort(finlsp)<EOL>sortedlspperiods = finperiods[sortedlspind]<EOL>sortedlspvals = finlsp[sortedlspind]<EOL>nbestperiods, nbestlspvals, peakcount = (<EOL>[finperiods[bestperiodind]],<EOL>[finlsp[bestperiodind]],<EOL><NUM_LIT:1><EOL>)<EOL>prevperiod = sortedlspperiods[<NUM_LIT:0>]<EOL>for period, lspval in zip(sortedlspperiods, sortedlspvals):<EOL><INDENT>if peakcount == nbestpeaks:<EOL><INDENT>break<EOL><DEDENT>perioddiff = abs(period - prevperiod)<EOL>bestperiodsdiff = [abs(period - x) for x in nbestperiods]<EOL>if (perioddiff > (periodepsilon*prevperiod) and<EOL>all(x > (periodepsilon*period) for x in bestperiodsdiff)):<EOL><INDENT>nbestperiods.append(period)<EOL>nbestlspvals.append(lspval)<EOL>peakcount = peakcount + <NUM_LIT:1><EOL><DEDENT>prevperiod = period<EOL><DEDENT>return {'<STR_LIT>':finperiods[bestperiodind],<EOL>'<STR_LIT>':finlsp[bestperiodind],<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':nbestlspvals,<EOL>'<STR_LIT>':nbestperiods,<EOL>'<STR_LIT>':lsp,<EOL>'<STR_LIT>':periods,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':phasebinsize,<EOL>'<STR_LIT>':mindetperbin,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':phasebinsize,<EOL>'<STR_LIT>':mindetperbin,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>", "docstring": "This runs a parallelized Stellingwerf phase-dispersion minimization (PDM)\n    period search.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The mag/flux time-series with associated measurement errors to run the\n        period-finding on.\n\n    magsarefluxes : bool\n        If the input measurement values in `mags` and `errs` are in fluxes, set\n        this to True.\n\n    startp,endp : float or None\n        The minimum and maximum periods to consider for the transit search.\n\n    stepsize : float\n        The step-size in frequency to use when constructing a frequency grid for\n        the period search.\n\n    autofreq : bool\n        If this is True, the value of `stepsize` will be ignored and the\n        :py:func:`astrobase.periodbase.get_frequency_grid` function will be used\n        to generate a frequency grid based on `startp`, and `endp`. If these are\n        None as well, `startp` will be set to 0.1 and `endp` will be set to\n        `times.max() - times.min()`.\n\n    normalize : bool\n        This sets if the input time-series is normalized to 0.0 and rescaled\n        such that its variance = 1.0. This is the recommended procedure by\n        Schwarzenberg-Czerny 1996.\n\n    phasebinsize : float\n        The bin size in phase to use when calculating the PDM theta statistic at\n        a test frequency.\n\n    mindetperbin : int\n        The minimum number of elements in a phase bin to consider it valid when\n        calculating the PDM theta statistic at a test frequency.\n\n    nbestpeaks : int\n        The number of 'best' peaks to return from the periodogram results,\n        starting from the global maximum of the periodogram peak values.\n\n    periodepsilon : float\n        The fractional difference between successive values of 'best' periods\n        when sorting by periodogram power to consider them as separate periods\n        (as opposed to part of the same periodogram peak). This is used to avoid\n        broad peaks in the periodogram and make sure the 'best' periods returned\n        are all actually independent.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    nworkers : int\n        The number of parallel workers to use when calculating the periodogram.\n\n    verbose : bool\n        If this is True, will indicate progress and details about the frequency\n        grid used for the period search.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict, referred to as an `lspinfo` dict in other\n        astrobase functions that operate on periodogram results. This is a\n        standardized format across all astrobase period-finders, and is of the\n        form below::\n\n            {'bestperiod': the best period value in the periodogram,\n             'bestlspval': the periodogram peak associated with the best period,\n             'nbestpeaks': the input value of nbestpeaks,\n             'nbestlspvals': nbestpeaks-size list of best period peak values,\n             'nbestperiods': nbestpeaks-size list of best periods,\n             'lspvals': the full array of periodogram powers,\n             'periods': the full array of periods considered,\n             'method':'pdm' -> the name of the period-finder method,\n             'kwargs':{ dict of all of the input kwargs for record-keeping}}", "id": "f14761:m2"}
{"signature": "def macf_period_find(<EOL>times,<EOL>mags,<EOL>errs,<EOL>fillgaps=<NUM_LIT:0.0>,<EOL>filterwindow=<NUM_LIT:11>,<EOL>forcetimebin=None,<EOL>maxlags=None,<EOL>maxacfpeaks=<NUM_LIT:10>,<EOL>smoothacf=<NUM_LIT>,  <EOL>smoothfunc=_smooth_acf_savgol,<EOL>smoothfunckwargs=None,<EOL>magsarefluxes=False,<EOL>sigclip=<NUM_LIT>,<EOL>verbose=True,<EOL>periodepsilon=<NUM_LIT:0.1>,  <EOL>nworkers=None,      <EOL>startp=None,        <EOL>endp=None,          <EOL>autofreq=None,      <EOL>stepsize=None,      <EOL>):", "body": "<EOL>acfres = autocorr_magseries(<EOL>times,<EOL>mags,<EOL>errs,<EOL>maxlags=maxlags,<EOL>fillgaps=fillgaps,<EOL>forcetimebin=forcetimebin,<EOL>sigclip=sigclip,<EOL>magsarefluxes=magsarefluxes,<EOL>filterwindow=filterwindow,<EOL>verbose=verbose<EOL>)<EOL>xlags = acfres['<STR_LIT>']<EOL>if smoothacf and isinstance(smoothacf, int) and smoothacf > <NUM_LIT:0>:<EOL><INDENT>if smoothfunckwargs is None:<EOL><INDENT>sfkwargs = {'<STR_LIT>':smoothacf}<EOL><DEDENT>else:<EOL><INDENT>sfkwargs = smoothfunckwargs.copy()<EOL>sfkwargs.update({'<STR_LIT>':smoothacf})<EOL><DEDENT>xacf = smoothfunc(acfres['<STR_LIT>'], **sfkwargs)<EOL><DEDENT>else:<EOL><INDENT>xacf = acfres['<STR_LIT>']<EOL><DEDENT>peakres = _get_acf_peakheights(xlags, xacf, npeaks=maxacfpeaks,<EOL>searchinterval=int(smoothacf/<NUM_LIT:2>))<EOL>bestlspval = peakres['<STR_LIT>']<EOL>try:<EOL><INDENT>fity = npconcatenate((<EOL>[<NUM_LIT:0.0>, peakres['<STR_LIT>']],<EOL>peakres['<STR_LIT>'][peakres['<STR_LIT>'] > peakres['<STR_LIT>']]<EOL>))<EOL>fity = fity*acfres['<STR_LIT>']<EOL>fitx = nparange(fity.size)<EOL>fitcoeffs, fitcovar = nppolyfit(fitx, fity, <NUM_LIT:1>, cov=True)<EOL>fitbestperiod = fitcoeffs[<NUM_LIT:0>]<EOL>bestperiodrms = npsqrt(fitcovar[<NUM_LIT:0>,<NUM_LIT:0>])  <EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>fitcoeffs = nparray([npnan, npnan])<EOL>fitcovar = nparray([[npnan, npnan], [npnan, npnan]])<EOL>fitbestperiod = npnan<EOL>bestperiodrms = npnan<EOL>raise<EOL><DEDENT>naivebestperiod = peakres['<STR_LIT>']*acfres['<STR_LIT>']<EOL>if fitbestperiod < naivebestperiod:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(fitbestperiod, naivebestperiod))<EOL><DEDENT>if npisfinite(fitbestperiod):<EOL><INDENT>bestperiod = fitbestperiod<EOL><DEDENT>else:<EOL><INDENT>bestperiod = naivebestperiod<EOL><DEDENT>return {'<STR_LIT>':bestperiod,<EOL>'<STR_LIT>':bestlspval,<EOL>'<STR_LIT>':maxacfpeaks,<EOL>'<STR_LIT>':npconcatenate([<EOL>[fitbestperiod],<EOL>peakres['<STR_LIT>'][<NUM_LIT:1>:maxacfpeaks]*acfres['<STR_LIT>']<EOL>]),<EOL>'<STR_LIT>':peakres['<STR_LIT>'][:maxacfpeaks],<EOL>'<STR_LIT>':xacf,<EOL>'<STR_LIT>':xlags*acfres['<STR_LIT>'],<EOL>'<STR_LIT>':xacf,<EOL>'<STR_LIT>':xlags,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':naivebestperiod,<EOL>'<STR_LIT>':fitbestperiod,<EOL>'<STR_LIT>':bestperiodrms,<EOL>'<STR_LIT>':fitcoeffs,<EOL>'<STR_LIT>':fitcovar,<EOL>'<STR_LIT>':{'<STR_LIT>':maxlags,<EOL>'<STR_LIT>':maxacfpeaks,<EOL>'<STR_LIT>':fillgaps,<EOL>'<STR_LIT>':filterwindow,<EOL>'<STR_LIT>':smoothacf,<EOL>'<STR_LIT>':sfkwargs,<EOL>'<STR_LIT>':magsarefluxes,<EOL>'<STR_LIT>':sigclip},<EOL>'<STR_LIT>':acfres,<EOL>'<STR_LIT>':peakres}<EOL>", "docstring": "This finds periods using the McQuillan+ (2013a, 2014) ACF method.\n\n    The kwargs from `periodepsilon` to `stepsize` don't do anything but are used\n    to present a consistent API for all periodbase period-finders to an outside\n    driver (e.g. the one in the checkplotserver).\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The input magnitude/flux time-series to run the period-finding for.\n\n    fillgaps : 'noiselevel' or float\n        This sets what to use to fill in gaps in the time series. If this is\n        'noiselevel', will smooth the light curve using a point window size of\n        `filterwindow` (this should be an odd integer), subtract the smoothed LC\n        from the actual LC and estimate the RMS. This RMS will be used to fill\n        in the gaps. Other useful values here are 0.0, and npnan.\n\n    filterwindow : int\n        The light curve's smoothing filter window size to use if\n        `fillgaps='noiselevel`'.\n\n    forcetimebin : None or float\n        This is used to force a particular cadence in the light curve other than\n        the automatically determined cadence. This effectively rebins the light\n        curve to this cadence. This should be in the same time units as `times`.\n\n    maxlags : None or int\n        This is the maximum number of lags to calculate. If None, will calculate\n        all lags.\n\n    maxacfpeaks : int\n        This is the maximum number of ACF peaks to use when finding the highest\n        peak and obtaining a fit period.\n\n    smoothacf : int\n        This is the number of points to use as the window size when smoothing\n        the ACF with the `smoothfunc`. This should be an odd integer value. If\n        this is None, will not smooth the ACF, but this will probably lead to\n        finding spurious peaks in a generally noisy ACF.\n\n        For Kepler, a value between 21 and 51 seems to work fine. For ground\n        based data, much larger values may be necessary: between 1001 and 2001\n        seem to work best for the HAT surveys. This is dependent on cadence, RMS\n        of the light curve, the periods of the objects you're looking for, and\n        finally, any correlated noise in the light curve. Make a plot of the\n        smoothed/unsmoothed ACF vs. lag using the result dict of this function\n        and the `plot_acf_results` function above to see the identified ACF\n        peaks and what kind of smoothing might be needed.\n\n        The value of `smoothacf` will also be used to figure out the interval to\n        use when searching for local peaks in the ACF: this interval is 1/2 of\n        the `smoothacf` value.\n\n    smoothfunc : Python function\n        This is the function that will be used to smooth the ACF. This should\n        take at least one kwarg: 'windowsize'. Other kwargs can be passed in\n        using a dict provided in `smoothfunckwargs`. By default, this uses a\n        Savitsky-Golay filter, a Gaussian filter is also provided but not\n        used. Another good option would be an actual low-pass filter (generated\n        using scipy.signal?) to remove all high frequency noise from the ACF.\n\n    smoothfunckwargs : dict or None\n        The dict of optional kwargs to pass in to the `smoothfunc`.\n\n    magsarefluxes : bool\n        If your input measurements in `mags` are actually fluxes instead of\n        mags, set this is True.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    verbose : bool\n        If True, will indicate progress and report errors.\n\n    Returns\n    -------\n\n    dict\n        Returns a dict with results. dict['bestperiod'] is the estimated best\n        period and dict['fitperiodrms'] is its estimated error. Other\n        interesting things in the output include:\n\n        - dict['acfresults']: all results from calculating the ACF. in\n          particular, the unsmoothed ACF might be of interest:\n          dict['acfresults']['acf'] and dict['acfresults']['lags'].\n\n        - dict['lags'] and dict['acf'] contain the ACF after smoothing was\n          applied.\n\n        - dict['periods'] and dict['lspvals'] can be used to construct a\n          pseudo-periodogram.\n\n        - dict['naivebestperiod'] is obtained by multiplying the lag at the\n          highest ACF peak with the cadence. This is usually close to the fit\n          period (dict['fitbestperiod']), which is calculated by doing a fit to\n          the lags vs. peak index relation as in McQuillan+ 2014.", "id": "f14762:m4"}
{"signature": "def _smooth_acf_savgol(acf, windowsize=<NUM_LIT>, polyorder=<NUM_LIT:2>):", "body": "smoothed = savgol_filter(acf, windowsize, polyorder)<EOL>return smoothed<EOL>", "docstring": "This returns a smoothed version of the ACF.\n\nThis version uses the Savitsky-Golay smoothing filter.\n\nParameters\n----------\n\nacf : np.array\n    The auto-correlation function array to smooth.\n\nwindowsize : int\n    The number of input points to apply the smoothing over.\n\npolyorder : int\n    The order of the polynomial to use in the Savitsky-Golay filter.\n\nReturns\n-------\n\nnp.array\n    Smoothed version of the input ACF array.", "id": "f14762:m1"}
{"signature": "def plot_acf_results(acfp, outfile, maxlags=<NUM_LIT>, yrange=(-<NUM_LIT>,<NUM_LIT>)):", "body": "import matplotlib.pyplot as plt<EOL>lags = acfp['<STR_LIT>']['<STR_LIT>'][:maxlags]<EOL>smoothedacf = acfp['<STR_LIT>'][:maxlags]<EOL>unsmoothedacf = acfp['<STR_LIT>']['<STR_LIT>'][:maxlags]<EOL>acfparams = acfp['<STR_LIT>']['<STR_LIT>'].copy()<EOL>acfparams.update({'<STR_LIT>': int(acfp['<STR_LIT>']['<STR_LIT>']/<NUM_LIT>)})<EOL>fig, ax1 = plt.subplots()<EOL>ax1.plot(lags, unsmoothedacf, label='<STR_LIT>',color='<STR_LIT>')<EOL>ax1.plot(lags, smoothedacf, label='<STR_LIT>', color='<STR_LIT>')<EOL>ax1.set_xlim((<NUM_LIT:0>,maxlags))<EOL>ax1.set_xlabel('<STR_LIT>')<EOL>acfmaxinds = acfp['<STR_LIT>']['<STR_LIT>']<EOL>for i, maxind in enumerate(acfmaxinds):<EOL><INDENT>if i == <NUM_LIT:0>:<EOL><INDENT>ax1.axvline(maxind,<EOL>linewidth=<NUM_LIT>,<EOL>color='<STR_LIT>',<EOL>ymin=<NUM_LIT>, ymax=<NUM_LIT>,<EOL>label='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>ax1.axvline(maxind,<EOL>linewidth=<NUM_LIT>,<EOL>color='<STR_LIT>',<EOL>ymin=<NUM_LIT>, ymax=<NUM_LIT>)<EOL><DEDENT><DEDENT>plt.ylabel('<STR_LIT>')<EOL>plt.ylim(yrange)<EOL>ax1.legend()<EOL>plt.title('<STR_LIT:%s>' % repr(acfparams))<EOL>plt.tight_layout()<EOL>plt.savefig(outfile)<EOL>plt.close('<STR_LIT:all>')<EOL>return outfile<EOL>", "docstring": "This plots the unsmoothed/smoothed ACF vs lag.\n\nParameters\n----------\n\nacfp : dict\n    This is the dict returned from `macf_period_find` below.\n\noutfile : str\n    The output file the plot will be written to.\n\nmaxlags: int\n    The maximum number of lags to include in the plot.\n\nyrange : sequence of two floats\n    The y-range of the ACF vs. lag plot to use.", "id": "f14762:m3"}
{"signature": "def _smooth_acf(acf, windowfwhm=<NUM_LIT:7>, windowsize=<NUM_LIT>):", "body": "convkernel = Gaussian1DKernel(windowfwhm, x_size=windowsize)<EOL>smoothed = convolve(acf, convkernel, boundary='<STR_LIT>')<EOL>return smoothed<EOL>", "docstring": "This returns a smoothed version of the ACF.\n\n    Convolves the ACF with a Gaussian of given `windowsize` and `windowfwhm`.\n\n    Parameters\n    ----------\n\n    acf : np.array\n        The auto-correlation function array to smooth.\n\n    windowfwhm : int\n        The smoothing window Gaussian kernel's FWHM .\n\n    windowsize : int\n        The number of input points to apply the smoothing over.\n\n    Returns\n    -------\n\n    np.array\n        Smoothed version of the input ACF array.", "id": "f14762:m0"}
{"signature": "def parallel_townsend_lsp_sharedarray(times, mags, startp, endp,<EOL>stepsize=<NUM_LIT>,<EOL>nworkers=<NUM_LIT:16>):", "body": "", "docstring": "This is a version of the above which uses shared ctypes arrays for the times\nand mags arrays so as not to copy them to each worker process.\n\nTODO: we'll need to pass a single argument to the worker so make a 2D array\nand wrap the worker function with partial?\n\nFIXME: implement this later.", "id": "f14763:m8"}
{"signature": "def townsend_lombscargle_wrapper(task):", "body": "try:<EOL><INDENT>return townsend_lombscargle_value(*task)<EOL><DEDENT>except Exception as e:<EOL><INDENT>return npnan<EOL><DEDENT>", "docstring": "This wraps the function above for use with mp.Pool.\n\ntask[0] = times\ntask[1] = mags\ntask[2] = omega", "id": "f14763:m6"}
{"signature": "def pdw_worker(task):", "body": "frequency = task[<NUM_LIT:0>]<EOL>times, modmags = task[<NUM_LIT:1>], task[<NUM_LIT:2>]<EOL>fold_time = task[<NUM_LIT:3>]<EOL>j_range = range(task[<NUM_LIT:4>])<EOL>keep_threshold_1 = task[<NUM_LIT:5>]<EOL>keep_threshold_2 = task[<NUM_LIT:6>]<EOL>phasebinsize = task[<NUM_LIT:7>]<EOL>try:<EOL><INDENT>period = <NUM_LIT:1.0>/frequency<EOL>phased = phase_magseries(times,<EOL>modmags,<EOL>period,<EOL>fold_time,<EOL>wrap=False,<EOL>sort=True)<EOL>if phasebinsize is not None and phasebinsize > <NUM_LIT:0>:<EOL><INDENT>bphased = pwd_phasebin(phased['<STR_LIT>'],<EOL>phased['<STR_LIT>'],<EOL>binsize=phasebinsize)<EOL>phase_sorted = bphased[<NUM_LIT:0>]<EOL>mod_mag_sorted = bphased[<NUM_LIT:1>]<EOL>j_range = range(len(mod_mag_sorted) - <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>phase_sorted = phased['<STR_LIT>']<EOL>mod_mag_sorted = phased['<STR_LIT>']<EOL><DEDENT>rolledmags = nproll(mod_mag_sorted,<NUM_LIT:1>)<EOL>rolledphases = nproll(phase_sorted,<NUM_LIT:1>)<EOL>strings = (<EOL>(rolledmags - mod_mag_sorted)*(rolledmags - mod_mag_sorted) +<EOL>(rolledphases - phase_sorted)*(rolledphases - phase_sorted)<EOL>)<EOL>strings[<NUM_LIT:0>] = (<EOL>((mod_mag_sorted[<NUM_LIT:0>] - mod_mag_sorted[-<NUM_LIT:1>]) *<EOL>(mod_mag_sorted[<NUM_LIT:0>] - mod_mag_sorted[-<NUM_LIT:1>])) +<EOL>((phase_sorted[<NUM_LIT:0>] - phase_sorted[-<NUM_LIT:1>] + <NUM_LIT:1>) *<EOL>(phase_sorted[<NUM_LIT:0>] - phase_sorted[-<NUM_LIT:1>] + <NUM_LIT:1>))<EOL>)<EOL>strlen = npsum(npsqrt(strings))<EOL>if (keep_threshold_1 < strlen < keep_threshold_2):<EOL><INDENT>p_goodflag  = True<EOL><DEDENT>else:<EOL><INDENT>p_goodflag = False<EOL><DEDENT>return (period, strlen, p_goodflag)<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>return(period, npnan, False)<EOL><DEDENT>", "docstring": "This is the parallel worker for the function below.\n\ntask[0] = frequency for this worker\ntask[1] = times array\ntask[2] = mags array\ntask[3] = fold_time\ntask[4] = j_range\ntask[5] = keep_threshold_1\ntask[6] = keep_threshold_2\ntask[7] = phasebinsize\n\nwe don't need errs for the worker.", "id": "f14763:m3"}
{"signature": "def pdw_period_find(times,<EOL>mags,<EOL>errs,<EOL>autofreq=True,<EOL>init_p=None,<EOL>end_p=None,<EOL>f_step=<NUM_LIT>,<EOL>phasebinsize=None,<EOL>sigclip=<NUM_LIT>,<EOL>nworkers=None,<EOL>verbose=False):", "body": "<EOL>find = npisfinite(times) & npisfinite(mags) & npisfinite(errs)<EOL>ftimes, fmags, ferrs = times[find], mags[find], errs[find]<EOL>mod_mags = (fmags - npmin(fmags))/(<NUM_LIT>*(npmax(fmags) - npmin(fmags))) - <NUM_LIT><EOL>if len(ftimes) > <NUM_LIT:9> and len(fmags) > <NUM_LIT:9> and len(ferrs) > <NUM_LIT:9>:<EOL><INDENT>median_mag = np.median(fmags)<EOL>stddev_mag = (np.median(np.abs(fmags - median_mag))) * <NUM_LIT><EOL>if sigclip:<EOL><INDENT>sigind = (np.abs(fmags - median_mag)) < (sigclip * stddev_mag)<EOL>stimes = ftimes[sigind]<EOL>smags = fmags[sigind]<EOL>serrs = ferrs[sigind]<EOL>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(sigclip, len(times), len(stimes)))<EOL><DEDENT>else:<EOL><INDENT>stimes = ftimes<EOL>smags = fmags<EOL>serrs = ferrs<EOL><DEDENT>if len(stimes) > <NUM_LIT:9> and len(smags) > <NUM_LIT:9> and len(serrs) > <NUM_LIT:9>:<EOL><INDENT>if init_p:<EOL><INDENT>endf = <NUM_LIT:1.0>/init_p<EOL><DEDENT>else:<EOL><INDENT>endf = <NUM_LIT:1.0>/<NUM_LIT:0.1><EOL><DEDENT>if end_p:<EOL><INDENT>startf = <NUM_LIT:1.0>/end_p<EOL><DEDENT>else:<EOL><INDENT>startf = <NUM_LIT:1.0>/(stimes.max() - stimes.min())<EOL><DEDENT>if not autofreq:<EOL><INDENT>frequencies = np.arange(startf, endf, stepsize)<EOL>LOGINFO(<EOL>'<STR_LIT>' %<EOL>(frequencies.size, <NUM_LIT:1.0>/endf, <NUM_LIT:1.0>/startf)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>frequencies = get_frequency_grid(stimes,<EOL>minfreq=startf,<EOL>maxfreq=endf)<EOL>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(frequencies.size,<EOL><NUM_LIT:1.0>/frequencies.max(),<EOL><NUM_LIT:1.0>/frequencies.min())<EOL>)<EOL><DEDENT>fold_time = npmin(ftimes) <EOL>j_range = len(fmags)-<NUM_LIT:1><EOL>epsilon = <NUM_LIT> * npmean(ferrs)<EOL>delta_l = <NUM_LIT> * (epsilon - <NUM_LIT:0.5>*(epsilon**<NUM_LIT:2>)) * (len(ftimes) -<EOL>npsqrt(<NUM_LIT>/epsilon))<EOL>keep_threshold_1 = <NUM_LIT> + <NUM_LIT>*delta_l<EOL>l = <NUM_LIT>*len(ftimes)<EOL>sig_l = len(ftimes)/<NUM_LIT><EOL>keep_threshold_2 = l + <NUM_LIT>*sig_l<EOL>tasks = [(x,<EOL>ftimes,<EOL>mod_mags,<EOL>fold_time,<EOL>j_range,<EOL>keep_threshold_1,<EOL>keep_threshold_2,<EOL>phasebinsize) for x in frequencies]<EOL>if (not nworkers) or (nworkers > NCPUS):<EOL><INDENT>nworkers = NCPUS<EOL>LOGINFO('<STR_LIT>' % nworkers)<EOL><DEDENT>pool = Pool(nworkers)<EOL>strlen_results = pool.map(pdw_worker, tasks)<EOL>pool.close()<EOL>pool.join()<EOL>del pool<EOL>periods, strlens, goodflags = zip(*strlen_results)<EOL>periods, strlens, goodflags = (np.array(periods),<EOL>np.array(strlens),<EOL>np.array(goodflags))<EOL>strlensort = npargsort(strlens)<EOL>nbeststrlens = strlens[strlensort[:<NUM_LIT:5>]]<EOL>nbestperiods = periods[strlensort[:<NUM_LIT:5>]]<EOL>nbestflags = goodflags[strlensort[:<NUM_LIT:5>]]<EOL>bestperiod = nbestperiods[<NUM_LIT:0>]<EOL>beststrlen = nbeststrlens[<NUM_LIT:0>]<EOL>bestflag = nbestflags[<NUM_LIT:0>]<EOL>return {'<STR_LIT>':bestperiod,<EOL>'<STR_LIT>':beststrlen,<EOL>'<STR_LIT>':bestflag,<EOL>'<STR_LIT>':nbeststrlens,<EOL>'<STR_LIT>':nbestperiods,<EOL>'<STR_LIT>':nbestflags,<EOL>'<STR_LIT>':strlens,<EOL>'<STR_LIT>':periods,<EOL>'<STR_LIT>':goodflags}<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None}<EOL><DEDENT>", "docstring": "This is the parallel version of the function above.\n\n    Uses the string length method in Dworetsky 1983 to calculate the period of a\n    time-series of magnitude measurements and associated magnitude errors. This\n    can optionally bin in phase to try to speed up the calculation.\n\n    PARAMETERS:\n\n    time: series of times at which mags were measured (usually some form of JD)\n    mag: timeseries of magnitudes (np.array)\n    err: associated errs per magnitude measurement (np.array)\n    init_p, end_p: interval to search for periods between (both ends inclusive)\n    f_step: step in frequency [days^-1] to use\n\n    RETURNS:\n\n    tuple of the following form:\n\n    (periods (np.array),\n     string_lengths (np.array),\n     good_period_mask (boolean array))", "id": "f14763:m4"}
{"signature": "def get_frequency_grid(times,<EOL>samplesperpeak=<NUM_LIT:5>,<EOL>nyquistfactor=<NUM_LIT:5>,<EOL>minfreq=None,<EOL>maxfreq=None,<EOL>returnf0dfnf=False):", "body": "baseline = times.max() - times.min()<EOL>nsamples = times.size<EOL>df = <NUM_LIT:1.> / baseline / samplesperpeak<EOL>if minfreq is not None:<EOL><INDENT>f0 = minfreq<EOL><DEDENT>else:<EOL><INDENT>f0 = <NUM_LIT:0.5> * df<EOL><DEDENT>if maxfreq is not None:<EOL><INDENT>Nf = int(npceil((maxfreq - f0) / df))<EOL><DEDENT>else:<EOL><INDENT>Nf = int(<NUM_LIT:0.5> * samplesperpeak * nyquistfactor * nsamples)<EOL><DEDENT>if returnf0dfnf:<EOL><INDENT>return f0, df, Nf, f0 + df * nparange(Nf)<EOL><DEDENT>else:<EOL><INDENT>return f0 + df * nparange(Nf)<EOL><DEDENT>", "docstring": "This calculates a frequency grid for the period finding functions in this\n    module.\n\n    Based on the autofrequency function in astropy.stats.lombscargle.\n\n    http://docs.astropy.org/en/stable/_modules/astropy/stats/lombscargle/core.html#LombScargle.autofrequency", "id": "f14763:m0"}
{"signature": "def pwd_phasebin(phases, mags, binsize=<NUM_LIT>, minbin=<NUM_LIT:9>):", "body": "bins = np.arange(<NUM_LIT:0.0>, <NUM_LIT:1.0>, binsize)<EOL>binnedphaseinds = npdigitize(phases, bins)<EOL>binnedphases, binnedmags = [], []<EOL>for x in npunique(binnedphaseinds):<EOL><INDENT>thisbin_inds = binnedphaseinds == x<EOL>thisbin_phases = phases[thisbin_inds]<EOL>thisbin_mags = mags[thisbin_inds]<EOL>if thisbin_inds.size > minbin:<EOL><INDENT>binnedphases.append(npmedian(thisbin_phases))<EOL>binnedmags.append(npmedian(thisbin_mags))<EOL><DEDENT><DEDENT>return np.array(binnedphases), np.array(binnedmags)<EOL>", "docstring": "This bins the phased mag series using the given binsize.", "id": "f14763:m2"}
{"signature": "def townsend_lombscargle_value(times, mags, omega):", "body": "cos_omegat = npcos(omega*times)<EOL>sin_omegat = npsin(omega*times)<EOL>xc = npsum(mags*cos_omegat)<EOL>xs = npsum(mags*sin_omegat)<EOL>cc = npsum(cos_omegat*cos_omegat)<EOL>ss = npsum(sin_omegat*sin_omegat)<EOL>cs = npsum(cos_omegat*sin_omegat)<EOL>tau = nparctan(<NUM_LIT:2>*cs/(cc - ss))/(<NUM_LIT:2>*omega)<EOL>ctau = npcos(omega*tau)<EOL>stau = npsin(omega*tau)<EOL>leftsumtop = (ctau*xc + stau*xs)*(ctau*xc + stau*xs)<EOL>leftsumbot = ctau*ctau*cc + <NUM_LIT>*ctau*stau*cs + stau*stau*ss<EOL>leftsum = leftsumtop/leftsumbot<EOL>rightsumtop = (ctau*xs - stau*xc)*(ctau*xs - stau*xc)<EOL>rightsumbot = ctau*ctau*ss - <NUM_LIT>*ctau*stau*cs + stau*stau*cc<EOL>rightsum = rightsumtop/rightsumbot<EOL>pval = <NUM_LIT:0.5>*(leftsum + rightsum)<EOL>return pval<EOL>", "docstring": "This calculates the periodogram value for each omega (= 2*pi*f). Mags must\nbe normalized to zero with variance scaled to unity.", "id": "f14763:m5"}
{"signature": "def aovhm_periodfind(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=False,<EOL>startp=None,<EOL>endp=None,<EOL>stepsize=<NUM_LIT>,<EOL>autofreq=True,<EOL>normalize=True,<EOL>nharmonics=<NUM_LIT:6>,<EOL>nbestpeaks=<NUM_LIT:5>,<EOL>periodepsilon=<NUM_LIT:0.1>,<EOL>sigclip=<NUM_LIT>,<EOL>nworkers=None,<EOL>verbose=True):", "body": "<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>if len(stimes) > <NUM_LIT:9> and len(smags) > <NUM_LIT:9> and len(serrs) > <NUM_LIT:9>:<EOL><INDENT>if startp:<EOL><INDENT>endf = <NUM_LIT:1.0>/startp<EOL><DEDENT>else:<EOL><INDENT>endf = <NUM_LIT:1.0>/<NUM_LIT:0.1><EOL><DEDENT>if endp:<EOL><INDENT>startf = <NUM_LIT:1.0>/endp<EOL><DEDENT>else:<EOL><INDENT>startf = <NUM_LIT:1.0>/(stimes.max() - stimes.min())<EOL><DEDENT>if not autofreq:<EOL><INDENT>frequencies = nparange(startf, endf, stepsize)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>' %<EOL>(frequencies.size, <NUM_LIT:1.0>/endf, <NUM_LIT:1.0>/startf)<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>frequencies = get_frequency_grid(stimes,<EOL>minfreq=startf,<EOL>maxfreq=endf)<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(frequencies.size,<EOL><NUM_LIT:1.0>/frequencies.max(),<EOL><NUM_LIT:1.0>/frequencies.min())<EOL>)<EOL><DEDENT><DEDENT>if (not nworkers) or (nworkers > NCPUS):<EOL><INDENT>nworkers = NCPUS<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' % nworkers)<EOL><DEDENT><DEDENT>pool = Pool(nworkers)<EOL>if normalize:<EOL><INDENT>nmags = (smags - npmedian(smags))/npstd(smags)<EOL><DEDENT>else:<EOL><INDENT>nmags = smags<EOL><DEDENT>magvariance_top = npsum(nmags/(serrs*serrs))<EOL>magvariance_bot = (nmags.size - <NUM_LIT:1>)*npsum(<NUM_LIT:1.0>/(serrs*serrs)) / nmags.size<EOL>magvariance = magvariance_top/magvariance_bot<EOL>tasks = [(stimes, nmags, serrs, x, nharmonics, magvariance)<EOL>for x in frequencies]<EOL>lsp = pool.map(_aovhm_theta_worker, tasks)<EOL>pool.close()<EOL>pool.join()<EOL>del pool<EOL>lsp = nparray(lsp)<EOL>periods = <NUM_LIT:1.0>/frequencies<EOL>finitepeakind = npisfinite(lsp)<EOL>finlsp = lsp[finitepeakind]<EOL>finperiods = periods[finitepeakind]<EOL>try:<EOL><INDENT>bestperiodind = npargmax(finlsp)<EOL><DEDENT>except ValueError:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':nharmonics,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>sortedlspind = npargsort(finlsp)[::-<NUM_LIT:1>]<EOL>sortedlspperiods = finperiods[sortedlspind]<EOL>sortedlspvals = finlsp[sortedlspind]<EOL>nbestperiods, nbestlspvals, peakcount = (<EOL>[finperiods[bestperiodind]],<EOL>[finlsp[bestperiodind]],<EOL><NUM_LIT:1><EOL>)<EOL>prevperiod = sortedlspperiods[<NUM_LIT:0>]<EOL>for period, lspval in zip(sortedlspperiods, sortedlspvals):<EOL><INDENT>if peakcount == nbestpeaks:<EOL><INDENT>break<EOL><DEDENT>perioddiff = abs(period - prevperiod)<EOL>bestperiodsdiff = [abs(period - x) for x in nbestperiods]<EOL>if (perioddiff > (periodepsilon*prevperiod) and<EOL>all(x > (periodepsilon*period) for x in bestperiodsdiff)):<EOL><INDENT>nbestperiods.append(period)<EOL>nbestlspvals.append(lspval)<EOL>peakcount = peakcount + <NUM_LIT:1><EOL><DEDENT>prevperiod = period<EOL><DEDENT>return {'<STR_LIT>':finperiods[bestperiodind],<EOL>'<STR_LIT>':finlsp[bestperiodind],<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':nbestlspvals,<EOL>'<STR_LIT>':nbestperiods,<EOL>'<STR_LIT>':lsp,<EOL>'<STR_LIT>':periods,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':nharmonics,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':normalize,<EOL>'<STR_LIT>':nharmonics,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip}}<EOL><DEDENT>", "docstring": "This runs a parallelized harmonic Analysis-of-Variance (AoV) period\n    search.\n\n    NOTE: normalize = True here as recommended by Schwarzenberg-Czerny 1996,\n    i.e. mags will be normalized to zero and rescaled so their variance = 1.0.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The mag/flux time-series with associated measurement errors to run the\n        period-finding on.\n\n    magsarefluxes : bool\n        If the input measurement values in `mags` and `errs` are in fluxes, set\n        this to True.\n\n    startp,endp : float or None\n        The minimum and maximum periods to consider for the transit search.\n\n    stepsize : float\n        The step-size in frequency to use when constructing a frequency grid for\n        the period search.\n\n    autofreq : bool\n        If this is True, the value of `stepsize` will be ignored and the\n        :py:func:`astrobase.periodbase.get_frequency_grid` function will be used\n        to generate a frequency grid based on `startp`, and `endp`. If these are\n        None as well, `startp` will be set to 0.1 and `endp` will be set to\n        `times.max() - times.min()`.\n\n    normalize : bool\n        This sets if the input time-series is normalized to 0.0 and rescaled\n        such that its variance = 1.0. This is the recommended procedure by\n        Schwarzenberg-Czerny 1996.\n\n    nharmonics : int\n        The number of harmonics to use when calculating the AoV theta value at a\n        test frequency. This should be between 4 and 8 in most cases.\n\n    nbestpeaks : int\n        The number of 'best' peaks to return from the periodogram results,\n        starting from the global maximum of the periodogram peak values.\n\n    periodepsilon : float\n        The fractional difference between successive values of 'best' periods\n        when sorting by periodogram power to consider them as separate periods\n        (as opposed to part of the same periodogram peak). This is used to avoid\n        broad peaks in the periodogram and make sure the 'best' periods returned\n        are all actually independent.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    nworkers : int\n        The number of parallel workers to use when calculating the periodogram.\n\n    verbose : bool\n        If this is True, will indicate progress and details about the frequency\n        grid used for the period search.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict, referred to as an `lspinfo` dict in other\n        astrobase functions that operate on periodogram results. This is a\n        standardized format across all astrobase period-finders, and is of the\n        form below::\n\n            {'bestperiod': the best period value in the periodogram,\n             'bestlspval': the periodogram peak associated with the best period,\n             'nbestpeaks': the input value of nbestpeaks,\n             'nbestlspvals': nbestpeaks-size list of best period peak values,\n             'nbestperiods': nbestpeaks-size list of best periods,\n             'lspvals': the full array of periodogram powers,\n             'periods': the full array of periods considered,\n             'method':'mav' -> the name of the period-finder method,\n             'kwargs':{ dict of all of the input kwargs for record-keeping}}", "id": "f14764:m2"}
{"signature": "def _parallel_bls_worker(task):", "body": "try:<EOL><INDENT>times, mags, errs = task[:<NUM_LIT:3>]<EOL>magsarefluxes = task[<NUM_LIT:3>]<EOL>minfreq, nfreq, stepsize = task[<NUM_LIT:4>:<NUM_LIT:7>]<EOL>ndurations, mintransitduration, maxtransitduration = task[<NUM_LIT:7>:<NUM_LIT:10>]<EOL>blsobjective, blsmethod, blsoversample = task[<NUM_LIT:10>:]<EOL>frequencies = minfreq + nparange(nfreq)*stepsize<EOL>periods = <NUM_LIT:1.0>/frequencies<EOL>durations = nplinspace(mintransitduration*periods.min(),<EOL>maxtransitduration*periods.min(),<EOL>ndurations)<EOL>if magsarefluxes:<EOL><INDENT>blsmodel = BoxLeastSquares(<EOL>times*u.day,<EOL>mags*u.dimensionless_unscaled,<EOL>dy=errs*u.dimensionless_unscaled<EOL>)<EOL><DEDENT>else:<EOL><INDENT>blsmodel = BoxLeastSquares(<EOL>times*u.day,<EOL>mags*u.mag,<EOL>dy=errs*u.mag<EOL>)<EOL><DEDENT>blsresult = blsmodel.power(<EOL>periods*u.day,<EOL>durations*u.day,<EOL>objective=blsobjective,<EOL>method=blsmethod,<EOL>oversample=blsoversample<EOL>)<EOL>return {<EOL>'<STR_LIT>': blsresult,<EOL>'<STR_LIT>': blsmodel,<EOL>'<STR_LIT>': durations,<EOL>'<STR_LIT>': nparray(blsresult.power)<EOL>}<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>' %<EOL>(frequencies[<NUM_LIT:0>], frequencies[-<NUM_LIT:1>]))<EOL>return {<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': None,<EOL>'<STR_LIT>': durations,<EOL>'<STR_LIT>': nparray([npnan for x in range(nfreq)]),<EOL>}<EOL><DEDENT>", "docstring": "This wraps Astropy's BoxLeastSquares for use with bls_parallel_pfind below.\n\n`task` is a tuple::\n\n    task[0] = times\n    task[1] = mags\n    task[2] = errs\n    task[3] = magsarefluxes\n\n    task[4] = minfreq\n    task[5] = nfreq\n    task[6] = stepsize\n\n    task[7] = ndurations\n    task[8] = mintransitduration\n    task[9] = maxtransitduration\n\n    task[10] = blsobjective\n    task[11] = blsmethod\n    task[12] = blsoversample", "id": "f14765:m1"}
{"signature": "def bls_serial_pfind(times, mags, errs,<EOL>magsarefluxes=False,<EOL>startp=<NUM_LIT:0.1>,  <EOL>endp=<NUM_LIT>,  <EOL>stepsize=<NUM_LIT>,<EOL>mintransitduration=<NUM_LIT>,  <EOL>maxtransitduration=<NUM_LIT>,   <EOL>ndurations=<NUM_LIT:100>,<EOL>autofreq=True,  <EOL>blsobjective='<STR_LIT>',<EOL>blsmethod='<STR_LIT>',<EOL>blsoversample=<NUM_LIT:10>,<EOL>blsmintransits=<NUM_LIT:3>,<EOL>blsfreqfactor=<NUM_LIT>,<EOL>periodepsilon=<NUM_LIT:0.1>,<EOL>nbestpeaks=<NUM_LIT:5>,<EOL>sigclip=<NUM_LIT>,<EOL>verbose=True,<EOL>raiseonfail=False):", "body": "<EOL>stimes, smags, serrs = sigclip_magseries(times,<EOL>mags,<EOL>errs,<EOL>magsarefluxes=magsarefluxes,<EOL>sigclip=sigclip)<EOL>if len(stimes) > <NUM_LIT:9> and len(smags) > <NUM_LIT:9> and len(serrs) > <NUM_LIT:9>:<EOL><INDENT>if isinstance(autofreq, bool) and autofreq:<EOL><INDENT>stepsize = <NUM_LIT>*mintransitduration/(stimes.max()-stimes.min())<EOL>minfreq = <NUM_LIT:1.0>/endp<EOL>maxfreq = <NUM_LIT:1.0>/startp<EOL>nfreq = int(npceil((maxfreq - minfreq)/stepsize))<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' % (startp, endp, nfreq,<EOL>minfreq, maxfreq))<EOL>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(stepsize, ndurations,<EOL>mintransitduration, maxtransitduration))<EOL><DEDENT>use_autoperiod = False<EOL><DEDENT>elif isinstance(autofreq, bool) and not autofreq:<EOL><INDENT>minfreq = <NUM_LIT:1.0>/endp<EOL>maxfreq = <NUM_LIT:1.0>/startp<EOL>nfreq = int(npceil((maxfreq - minfreq)/stepsize))<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' % (startp, endp, nfreq,<EOL>minfreq, maxfreq))<EOL>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(stepsize, ndurations,<EOL>mintransitduration, maxtransitduration))<EOL><DEDENT>use_autoperiod = False<EOL><DEDENT>elif isinstance(autofreq, str) and autofreq == '<STR_LIT>':<EOL><INDENT>use_autoperiod = True<EOL>minfreq = <NUM_LIT:1.0>/endp<EOL>maxfreq = <NUM_LIT:1.0>/startp<EOL><DEDENT>else:<EOL><INDENT>LOGERROR(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>if minfreq < (<NUM_LIT:1.0>/(stimes.max() - stimes.min())):<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (endp, stimes.max() - stimes.min()))<EOL><DEDENT>minfreq = <NUM_LIT>/(stimes.max() - stimes.min())<EOL>if verbose:<EOL><INDENT>LOGINFO('<STR_LIT>' %<EOL>(minfreq, maxfreq))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>durations = nplinspace(mintransitduration*startp,<EOL>maxtransitduration*startp,<EOL>ndurations)<EOL>if magsarefluxes:<EOL><INDENT>blsmodel = BoxLeastSquares(<EOL>stimes*u.day,<EOL>smags*u.dimensionless_unscaled,<EOL>dy=serrs*u.dimensionless_unscaled<EOL>)<EOL><DEDENT>else:<EOL><INDENT>blsmodel = BoxLeastSquares(<EOL>stimes*u.day,<EOL>smags*u.mag,<EOL>dy=serrs*u.mag<EOL>)<EOL><DEDENT>if use_autoperiod:<EOL><INDENT>periods = nparray(<EOL>blsmodel.autoperiod(<EOL>durations,<EOL>minimum_period=startp,<EOL>maximum_period=endp,<EOL>minimum_n_transit=blsmintransits,<EOL>frequency_factor=blsfreqfactor<EOL>)<EOL>)<EOL>nfreq = periods.size<EOL>if verbose:<EOL><INDENT>LOGINFO(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>(blsmintransits, blsfreqfactor)<EOL>)<EOL>LOGINFO('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(abs(<NUM_LIT:1.0>/periods[<NUM_LIT:1>] - <NUM_LIT:1.0>/periods[<NUM_LIT:0>]),<EOL>nfreq,<EOL><NUM_LIT:1.0>/periods.max(),<EOL><NUM_LIT:1.0>/periods.min(),<EOL>durations.size))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>frequencies = minfreq + nparange(nfreq)*stepsize<EOL>periods = <NUM_LIT:1.0>/frequencies<EOL><DEDENT>if nfreq > <NUM_LIT>:<EOL><INDENT>if verbose:<EOL><INDENT>LOGWARNING('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>blsresult = blsmodel.power(<EOL>periods*u.day,<EOL>durations*u.day,<EOL>objective=blsobjective,<EOL>method=blsmethod,<EOL>oversample=blsoversample<EOL>)<EOL>lsp = nparray(blsresult.power)<EOL>finitepeakind = npisfinite(lsp)<EOL>finlsp = lsp[finitepeakind]<EOL>finperiods = periods[finitepeakind]<EOL>try:<EOL><INDENT>bestperiodind = npargmax(finlsp)<EOL><DEDENT>except ValueError:<EOL><INDENT>LOGERROR('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':ndurations,<EOL>'<STR_LIT>':blsobjective,<EOL>'<STR_LIT>':blsmethod,<EOL>'<STR_LIT>':blsoversample,<EOL>'<STR_LIT>':blsmintransits,<EOL>'<STR_LIT>':blsfreqfactor,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':magsarefluxes}}<EOL><DEDENT>sortedlspind = npargsort(finlsp)[::-<NUM_LIT:1>]<EOL>sortedlspperiods = finperiods[sortedlspind]<EOL>sortedlspvals = finlsp[sortedlspind]<EOL>nbestperiods, nbestlspvals, nbestinds, peakcount = (<EOL>[finperiods[bestperiodind]],<EOL>[finlsp[bestperiodind]],<EOL>[bestperiodind],<EOL><NUM_LIT:1><EOL>)<EOL>prevperiod = sortedlspperiods[<NUM_LIT:0>]<EOL>for period, lspval, ind in zip(sortedlspperiods,<EOL>sortedlspvals,<EOL>sortedlspind):<EOL><INDENT>if peakcount == nbestpeaks:<EOL><INDENT>break<EOL><DEDENT>perioddiff = abs(period - prevperiod)<EOL>bestperiodsdiff = [abs(period - x) for x in nbestperiods]<EOL>if (perioddiff > (periodepsilon*prevperiod) and<EOL>all(x > (periodepsilon*period)<EOL>for x in bestperiodsdiff)):<EOL><INDENT>nbestperiods.append(period)<EOL>nbestlspvals.append(lspval)<EOL>nbestinds.append(ind)<EOL>peakcount = peakcount + <NUM_LIT:1><EOL><DEDENT>prevperiod = period<EOL><DEDENT>resultdict = {<EOL>'<STR_LIT>':finperiods[bestperiodind],<EOL>'<STR_LIT>':finlsp[bestperiodind],<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':nbestinds,<EOL>'<STR_LIT>':nbestlspvals,<EOL>'<STR_LIT>':nbestperiods,<EOL>'<STR_LIT>':lsp,<EOL>'<STR_LIT>':frequencies,<EOL>'<STR_LIT>':periods,<EOL>'<STR_LIT>':durations,<EOL>'<STR_LIT>':blsresult,<EOL>'<STR_LIT>':blsmodel,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':nfreq,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':ndurations,<EOL>'<STR_LIT>':blsobjective,<EOL>'<STR_LIT>':blsmethod,<EOL>'<STR_LIT>':blsoversample,<EOL>'<STR_LIT>':blsmintransits,<EOL>'<STR_LIT>':blsfreqfactor,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':magsarefluxes}<EOL>}<EOL>return resultdict<EOL><DEDENT>except Exception as e:<EOL><INDENT>LOGEXCEPTION('<STR_LIT>')<EOL>if raiseonfail:<EOL><INDENT>raise<EOL><DEDENT>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':nfreq,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':ndurations,<EOL>'<STR_LIT>':blsobjective,<EOL>'<STR_LIT>':blsmethod,<EOL>'<STR_LIT>':blsoversample,<EOL>'<STR_LIT>':blsmintransits,<EOL>'<STR_LIT>':blsfreqfactor,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':magsarefluxes}}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return {'<STR_LIT>':npnan,<EOL>'<STR_LIT>':npnan,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':None,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':'<STR_LIT>',<EOL>'<STR_LIT>':{'<STR_LIT>':startp,<EOL>'<STR_LIT>':endp,<EOL>'<STR_LIT>':stepsize,<EOL>'<STR_LIT>':mintransitduration,<EOL>'<STR_LIT>':maxtransitduration,<EOL>'<STR_LIT>':ndurations,<EOL>'<STR_LIT>':blsobjective,<EOL>'<STR_LIT>':blsmethod,<EOL>'<STR_LIT>':blsoversample,<EOL>'<STR_LIT>':blsmintransits,<EOL>'<STR_LIT>':blsfreqfactor,<EOL>'<STR_LIT>':autofreq,<EOL>'<STR_LIT>':periodepsilon,<EOL>'<STR_LIT>':nbestpeaks,<EOL>'<STR_LIT>':sigclip,<EOL>'<STR_LIT>':magsarefluxes}}<EOL><DEDENT>", "docstring": "Runs the Box Least Squares Fitting Search for transit-shaped signals.\n\n    Based on the version of BLS in Astropy 3.1:\n    `astropy.stats.BoxLeastSquares`. If you don't have Astropy 3.1, this module\n    will fail to import. Note that by default, this implementation of\n    `bls_serial_pfind` doesn't use the `.autoperiod()` function from\n    `BoxLeastSquares` but uses the same auto frequency-grid generation as the\n    functions in `periodbase.kbls`. If you want to use Astropy's implementation,\n    set the value of `autofreq` kwarg to 'astropy'.\n\n    The dict returned from this function contains a `blsmodel` key, which is the\n    generated model from Astropy's BLS. Use the `.compute_stats()` method to\n    calculate the required stats like SNR, depth, duration, etc.\n\n    Parameters\n    ----------\n\n    times,mags,errs : np.array\n        The magnitude/flux time-series to search for transits.\n\n    magsarefluxes : bool\n        If the input measurement values in `mags` and `errs` are in fluxes, set\n        this to True.\n\n    startp,endp : float\n        The minimum and maximum periods to consider for the transit search.\n\n    stepsize : float\n        The step-size in frequency to use when constructing a frequency grid for\n        the period search.\n\n    mintransitduration,maxtransitduration : float\n        The minimum and maximum transitdurations (in units of phase) to consider\n        for the transit search.\n\n    ndurations : int\n        The number of transit durations to use in the period-search.\n\n    autofreq : bool or str\n        If this is True, the values of `stepsize` and `nphasebins` will be\n        ignored, and these, along with a frequency-grid, will be determined\n        based on the following relations::\n\n            nphasebins = int(ceil(2.0/mintransitduration))\n            if nphasebins > 3000:\n                nphasebins = 3000\n\n            stepsize = 0.25*mintransitduration/(times.max()-times.min())\n\n            minfreq = 1.0/endp\n            maxfreq = 1.0/startp\n            nfreq = int(ceil((maxfreq - minfreq)/stepsize))\n\n        If this is False, you must set `startp`, `endp`, and `stepsize` as\n        appropriate.\n\n        If this is str == 'astropy', will use the\n        `astropy.stats.BoxLeastSquares.autoperiod()` function to calculate the\n        frequency grid instead of the kbls method.\n\n    blsobjective : {'likelihood','snr'}\n        Sets the type of objective to optimize in the `BoxLeastSquares.power()`\n        function.\n\n    blsmethod : {'fast','slow'}\n        Sets the type of method to use in the `BoxLeastSquares.power()`\n        function.\n\n    blsoversample : {'likelihood','snr'}\n        Sets the `oversample` kwarg for the `BoxLeastSquares.power()` function.\n\n    blsmintransits : int\n        Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`\n        function.\n\n    blsfreqfactor : float\n        Sets the `frequency_factor` kwarg for the `BoxLeastSquares.autperiod()`\n        function.\n\n    periodepsilon : float\n        The fractional difference between successive values of 'best' periods\n        when sorting by periodogram power to consider them as separate periods\n        (as opposed to part of the same periodogram peak). This is used to avoid\n        broad peaks in the periodogram and make sure the 'best' periods returned\n        are all actually independent.\n\n    nbestpeaks : int\n        The number of 'best' peaks to return from the periodogram results,\n        starting from the global maximum of the periodogram peak values.\n\n    sigclip : float or int or sequence of two floats/ints or None\n        If a single float or int, a symmetric sigma-clip will be performed using\n        the number provided as the sigma-multiplier to cut out from the input\n        time-series.\n\n        If a list of two ints/floats is provided, the function will perform an\n        'asymmetric' sigma-clip. The first element in this list is the sigma\n        value to use for fainter flux/mag values; the second element in this\n        list is the sigma value to use for brighter flux/mag values. For\n        example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma\n        dimmings and greater than 3-sigma brightenings. Here the meaning of\n        \"dimming\" and \"brightening\" is set by *physics* (not the magnitude\n        system), which is why the `magsarefluxes` kwarg must be correctly set.\n\n        If `sigclip` is None, no sigma-clipping will be performed, and the\n        time-series (with non-finite elems removed) will be passed through to\n        the output.\n\n    verbose : bool\n        If this is True, will indicate progress and details about the frequency\n        grid used for the period search.\n\n    raiseonfail : bool\n        If True, raises an exception if something goes wrong. Otherwise, returns\n        None.\n\n    Returns\n    -------\n\n    dict\n        This function returns a dict, referred to as an `lspinfo` dict in other\n        astrobase functions that operate on periodogram results. This is a\n        standardized format across all astrobase period-finders, and is of the\n        form below::\n\n            {'bestperiod': the best period value in the periodogram,\n             'bestlspval': the periodogram peak associated with the best period,\n             'nbestpeaks': the input value of nbestpeaks,\n             'nbestlspvals': nbestpeaks-size list of best period peak values,\n             'nbestperiods': nbestpeaks-size list of best periods,\n             'lspvals': the full array of periodogram powers,\n             'frequencies': the full array of frequencies considered,\n             'periods': the full array of periods considered,\n             'durations': the array of durations used to run BLS,\n             'blsresult': Astropy BLS result object (BoxLeastSquaresResult),\n             'blsmodel': Astropy BLS BoxLeastSquares object used for work,\n             'stepsize': the actual stepsize used,\n             'nfreq': the actual nfreq used,\n             'durations': the durations array used,\n             'mintransitduration': the input mintransitduration,\n             'maxtransitduration': the input maxtransitdurations,\n             'method':'bls' -> the name of the period-finder method,\n             'kwargs':{ dict of all of the input kwargs for record-keeping}}", "id": "f14765:m0"}
{"signature": "def read_kepler_fitslc(<EOL>lcfits,<EOL>headerkeys=LCHEADERKEYS,<EOL>datakeys=LCDATAKEYS,<EOL>sapkeys=LCSAPKEYS,<EOL>pdckeys=LCPDCKEYS,<EOL>topkeys=LCTOPKEYS,<EOL>apkeys=LCAPERTUREKEYS,<EOL>appendto=None,<EOL>normalize=False,<EOL>):", "body": "<EOL>hdulist = pyfits.open(lcfits)<EOL>lchdr, lcdata = hdulist[<NUM_LIT:1>].header, hdulist[<NUM_LIT:1>].data<EOL>lctophdr, lcaperturehdr, lcaperturedata = (hdulist[<NUM_LIT:0>].header,<EOL>hdulist[<NUM_LIT:2>].header,<EOL>hdulist[<NUM_LIT:2>].data)<EOL>hdulist.close()<EOL>hdrinfo = {}<EOL>for key in headerkeys:<EOL><INDENT>if key in lchdr and lchdr[key] is not None:<EOL><INDENT>hdrinfo[key.lower()] = lchdr[key]<EOL><DEDENT>else:<EOL><INDENT>hdrinfo[key.lower()] = None<EOL><DEDENT><DEDENT>ndet = lchdr['<STR_LIT>']<EOL>for key in topkeys:<EOL><INDENT>if key in lctophdr and lctophdr[key] is not None:<EOL><INDENT>hdrinfo[key.lower()] = lctophdr[key]<EOL><DEDENT>else:<EOL><INDENT>hdrinfo[key.lower()] = None<EOL><DEDENT><DEDENT>for key in lcaperturehdr:<EOL><INDENT>if key in lcaperturehdr and lcaperturehdr[key] is not None:<EOL><INDENT>hdrinfo[key.lower()] = lcaperturehdr[key]<EOL><DEDENT>else:<EOL><INDENT>hdrinfo[key.lower()] = None<EOL><DEDENT><DEDENT>if appendto and isinstance(appendto, dict):<EOL><INDENT>lcdict = appendto<EOL>lcdict['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(<EOL>hdrinfo['<STR_LIT>'] + hdrinfo['<STR_LIT>']<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(lcaperturedata)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(<EOL>(npabs(hdrinfo['<STR_LIT>']) +<EOL>npabs(hdrinfo['<STR_LIT>']))*<NUM_LIT>/<NUM_LIT><EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(ndet)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'].append(hdrinfo['<STR_LIT>'])<EOL>for key in datakeys:<EOL><INDENT>if key.lower() in lcdict:<EOL><INDENT>lcdict[key.lower()] = (<EOL>npconcatenate((lcdict[key.lower()], lcdata[key]))<EOL>)<EOL><DEDENT><DEDENT>for key in sapkeys:<EOL><INDENT>if key.lower() in lcdict['<STR_LIT>']:<EOL><INDENT>sapflux_median = np.nanmedian(lcdata['<STR_LIT>'])<EOL>if normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / sapflux_median<EOL><DEDENT>elif normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / sapflux_median<EOL><DEDENT>elif normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / sapflux_median<EOL><DEDENT>elif normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / sapflux_median<EOL><DEDENT>else:<EOL><INDENT>thislcdata = lcdata[key]<EOL><DEDENT>lcdict['<STR_LIT>'][key.lower()] = (<EOL>np.concatenate((lcdict['<STR_LIT>'][key.lower()], thislcdata))<EOL>)<EOL><DEDENT><DEDENT>for key in pdckeys:<EOL><INDENT>if key.lower() in lcdict['<STR_LIT>']:<EOL><INDENT>pdcsap_flux_median = np.nanmedian(lcdata['<STR_LIT>'])<EOL>if normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / pdcsap_flux_median<EOL><DEDENT>elif normalize and key == '<STR_LIT>':<EOL><INDENT>thislcdata = lcdata[key] / pdcsap_flux_median<EOL><DEDENT>else:<EOL><INDENT>thislcdata = lcdata[key]<EOL><DEDENT>lcdict['<STR_LIT>'][key.lower()] = (<EOL>np.concatenate((lcdict['<STR_LIT>'][key.lower()], thislcdata))<EOL>)<EOL><DEDENT><DEDENT>lcdict['<STR_LIT>'] = npconcatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>npfull_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>']))<EOL>)<EOL>lcdict['<STR_LIT>'] = npconcatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>npfull_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>']))<EOL>)<EOL>lcdict['<STR_LIT>'] = npconcatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>npfull_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>']))<EOL>)<EOL>lcdict['<STR_LIT>'] = npconcatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>npfull_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>']))<EOL>)<EOL>lcdict['<STR_LIT>'] = npconcatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>npfull_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>']))<EOL>)<EOL>lcdict['<STR_LIT>'] = npconcatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>npfull_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>']))<EOL>)<EOL>lcdict['<STR_LIT>'] = npconcatenate(<EOL>(lcdict['<STR_LIT>'],<EOL>npfull_like(lcdata['<STR_LIT>'],<EOL>hdrinfo['<STR_LIT>']))<EOL>)<EOL><DEDENT>else:<EOL><INDENT>lcdict = {<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],  <EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT:object>'],<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>'] + hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[lcaperturedata],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[(npabs(hdrinfo['<STR_LIT>']) +<EOL>npabs(hdrinfo['<STR_LIT>']))*<NUM_LIT>/<NUM_LIT>],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[ndet],<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':hdrinfo['<STR_LIT:object>'],  <EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>},<EOL>'<STR_LIT>':{},<EOL>'<STR_LIT>':{},<EOL>}<EOL>for key in datakeys:<EOL><INDENT>lcdict[key.lower()] = lcdata[key]<EOL><DEDENT>for key in sapkeys:<EOL><INDENT>lcdict['<STR_LIT>'][key.lower()] = lcdata[key]<EOL><DEDENT>for key in pdckeys:<EOL><INDENT>lcdict['<STR_LIT>'][key.lower()] = lcdata[key]<EOL><DEDENT>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>'][<NUM_LIT:0>])<EOL>if normalize:<EOL><INDENT>sapflux_median = np.nanmedian(lcdict['<STR_LIT>']['<STR_LIT>'])<EOL>pdcsap_flux_median = np.nanmedian(lcdict['<STR_LIT>']['<STR_LIT>'])<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>sapflux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>sapflux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>sapflux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>sapflux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>pdcsap_flux_median<EOL>)<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = (<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] /<EOL>pdcsap_flux_median<EOL>)<EOL><DEDENT><DEDENT>lcdict['<STR_LIT>'] = (<EOL>[x.lower() for x in datakeys] +<EOL>['<STR_LIT>' % x.lower() for x in sapkeys] +<EOL>['<STR_LIT>' % x.lower() for x in pdckeys] +<EOL>['<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>']<EOL>)<EOL>return lcdict<EOL>", "docstring": "This extracts the light curve from a single Kepler or K2 LC FITS file.\n\n    This works on the light curves available at MAST:\n\n    - `kplr{kepid}-{somedatething}_llc.fits` files from the Kepler mission\n    - `ktwo{epicid}-c{campaign}_llc.fits` files from the K2 mission\n\n    Parameters\n    ----------\n\n    lcfits : str\n        The filename of a MAST Kepler/K2 light curve FITS file.\n\n    headerkeys : list\n        A list of FITS header keys that will be extracted from the FITS light\n        curve file. These describe the observations. The default value for this\n        is given in `LCHEADERKEYS` above.\n\n    datakeys : list\n        A list of FITS column names that correspond to the auxiliary\n        measurements in the light curve. The default is `LCDATAKEYS` above.\n\n    sapkeys : list\n        A list of FITS column names that correspond to the SAP flux\n        measurements in the light curve. The default is `LCSAPKEYS` above.\n\n    pdckeys : list\n        A list of FITS column names that correspond to the PDC flux\n        measurements in the light curve. The default is `LCPDCKEYS` above.\n\n    topkeys : list\n        A list of FITS header keys that describe the object in the light\n        curve. The default is `LCTOPKEYS` above.\n\n    apkeys : list\n        A list of FITS header keys that describe the flux measurement apertures\n        used by the Kepler/K2 pipeline. The default is `LCAPERTUREKEYS` above.\n\n    appendto : lcdict or None\n        If appendto is an `lcdict`, will append measurements of this `lcdict` to\n        that `lcdict`. This is used for consolidating light curves for the same\n        object across different files (quarters). The appending does not care\n        about the time order. To consolidate light curves in time order, use\n        `consolidate_kepler_fitslc` below.\n\n    normalize : bool\n        If True, then each component light curve's SAP_FLUX and PDCSAP_FLUX\n        measurements will be normalized to 1.0 by dividing out the median flux\n        for the component light curve.\n\n    Returns\n    -------\n\n    lcdict\n        Returns an `lcdict` (this is useable by most astrobase functions for LC\n        processing).", "id": "f14766:m4"}
{"signature": "def keplerflux_to_keplermag(keplerflux, f12=<NUM_LIT>):", "body": "kepmag = <NUM_LIT> - <NUM_LIT>*nplog10(keplerflux/f12)<EOL>return kepmag<EOL>", "docstring": "This converts the Kepler flux in electrons/sec to Kepler magnitude.\n\n    The kepler mag/flux relation is::\n\n        fkep = (10.0**(-0.4*(kepmag - 12.0)))*f12\n        f12 = 1.74e5 # electrons/sec\n\n    Parameters\n    ----------\n\n    keplerflux : float or array-like\n        The flux value(s) to convert to magnitudes.\n\n    f12 : float\n        The flux value in the Kepler band corresponding to Kepler mag = 12.0.\n\n    Returns\n    -------\n\n    np.array\n        Magnitudes in the Kepler band corresponding to the input `keplerflux`\n        flux value(s).", "id": "f14766:m0"}
{"signature": "def read_k2sff_lightcurve(lcfits):", "body": "<EOL>hdulist = pyfits.open(lcfits)<EOL>lchdr, lcdata = hdulist[<NUM_LIT:1>].header, hdulist[<NUM_LIT:1>].data<EOL>lctophdr = hdulist[<NUM_LIT:0>].header<EOL>hdulist.close()<EOL>hdrinfo = {}<EOL>ndet = lchdr['<STR_LIT>']<EOL>for key in SFFTOPKEYS:<EOL><INDENT>if key in lctophdr and lctophdr[key] is not None:<EOL><INDENT>hdrinfo[key.lower()] = lctophdr[key]<EOL><DEDENT>else:<EOL><INDENT>hdrinfo[key.lower()] = None<EOL><DEDENT><DEDENT>for key in SFFHEADERKEYS:<EOL><INDENT>if key in lchdr and lchdr[key] is not None:<EOL><INDENT>hdrinfo[key.lower()] = lchdr[key]<EOL><DEDENT>else:<EOL><INDENT>hdrinfo[key.lower()] = None<EOL><DEDENT><DEDENT>lcdict = {<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT:object>'],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>'] + hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[None],<EOL>'<STR_LIT>':[None],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[ndet],<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>'<STR_LIT>':hdrinfo['<STR_LIT>'],<EOL>},<EOL>'<STR_LIT>':{<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>'<STR_LIT>':[hdrinfo['<STR_LIT>']],<EOL>},<EOL>}<EOL>for key in SFFDATAKEYS:<EOL><INDENT>lcdict[key.lower()] = lcdata[key]<EOL><DEDENT>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:t>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:t>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:t>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:t>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:t>'],<EOL>lcdict['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:t>'],<EOL>lcdict['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = npfull_like(lcdict['<STR_LIT:t>'],<EOL>lcdict['<STR_LIT>'][<NUM_LIT:0>])<EOL>lcdict['<STR_LIT>'] = (<EOL>[x.lower() for x in SFFDATAKEYS] +<EOL>['<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>']<EOL>)<EOL>return lcdict<EOL>", "docstring": "This reads a K2 SFF (Vandenberg+ 2014) light curve into an `lcdict`.\n\n    Use this with the light curves from the K2 SFF project at MAST.\n\n    Parameters\n    ----------\n\n    lcfits : str\n        The filename of the FITS light curve file downloaded from MAST.\n\n    Returns\n    -------\n\n    lcdict\n        Returns an `lcdict` (this is useable by most astrobase functions for LC\n        processing).", "id": "f14766:m6"}
{"signature": "def epd_kepler_lightcurve(lcdict,<EOL>xccol='<STR_LIT>',<EOL>yccol='<STR_LIT>',<EOL>timestoignore=None,<EOL>filterflags=True,<EOL>writetodict=True,<EOL>epdsmooth=<NUM_LIT:5>):", "body": "times, fluxes, background, background_err = (lcdict['<STR_LIT:time>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'],<EOL>lcdict['<STR_LIT>']['<STR_LIT>'])<EOL>xcc = lcdict[xccol]<EOL>ycc = lcdict[yccol]<EOL>flags = lcdict['<STR_LIT>']<EOL>if filterflags:<EOL><INDENT>nbefore = times.size<EOL>filterind = flags == <NUM_LIT:0><EOL>times = times[filterind]<EOL>fluxes = fluxes[filterind]<EOL>background = background[filterind]<EOL>background_err = background_err[filterind]<EOL>xcc = xcc[filterind]<EOL>ycc = ycc[filterind]<EOL>flags = flags[filterind]<EOL>nafter = times.size<EOL>LOGINFO('<STR_LIT>'<EOL>% (nbefore, nafter))<EOL><DEDENT>find = (npisfinite(xcc) & npisfinite(ycc) &<EOL>npisfinite(times) & npisfinite(fluxes) &<EOL>npisfinite(background) & npisfinite(background_err))<EOL>nbefore = times.size<EOL>times = times[find]<EOL>fluxes = fluxes[find]<EOL>background = background[find]<EOL>background_err = background_err[find]<EOL>xcc = xcc[find]<EOL>ycc = ycc[find]<EOL>flags = flags[find]<EOL>nafter = times.size<EOL>LOGINFO('<STR_LIT>'<EOL>% (nbefore, nafter))<EOL>if (timestoignore and<EOL>isinstance(timestoignore, list) and<EOL>len(timestoignore) > <NUM_LIT:0>):<EOL><INDENT>exclind = npfull_like(times,True)<EOL>nbefore = times.size<EOL>for ignoretime in timestoignore:<EOL><INDENT>time0, time1 = ignoretime[<NUM_LIT:0>], ignoretime[<NUM_LIT:1>]<EOL>thismask = (times > time0) & (times < time1)<EOL>exclind = exclind & thismask<EOL><DEDENT>times = times[exclind]<EOL>fluxes = fluxes[exclind]<EOL>background = background[exclind]<EOL>background_err = background_err[exclind]<EOL>xcc = xcc[exclind]<EOL>ycc = ycc[exclind]<EOL>flags = flags[exclind]<EOL>nafter = times.size<EOL>LOGINFO('<STR_LIT>'<EOL>% (nbefore, nafter))<EOL><DEDENT>smoothedfluxes = median_filter(fluxes, size=epdsmooth)<EOL>initcoeffs = npones(<NUM_LIT:11>)<EOL>leastsqfit = leastsq(_epd_residual,<EOL>initcoeffs,<EOL>args=(smoothedfluxes,<EOL>xcc, ycc,<EOL>background, background_err))<EOL>if leastsqfit[-<NUM_LIT:1>] in (<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>,<NUM_LIT:4>):<EOL><INDENT>fitcoeffs = leastsqfit[<NUM_LIT:0>]<EOL>epdfit = _epd_function(fitcoeffs,<EOL>fluxes,<EOL>xcc,<EOL>ycc,<EOL>background,<EOL>background_err)<EOL>epdfluxes = npmedian(fluxes) + fluxes - epdfit<EOL>if writetodict:<EOL><INDENT>lcdict['<STR_LIT>'] = {}<EOL>lcdict['<STR_LIT>']['<STR_LIT:time>'] = times<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = fluxes<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = epdfluxes<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = epdfit<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = background<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = background_err<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = xcc<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = ycc<EOL>lcdict['<STR_LIT>']['<STR_LIT>'] = flags<EOL>for newcol in ['<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>']:<EOL><INDENT>if newcol not in lcdict['<STR_LIT>']:<EOL><INDENT>lcdict['<STR_LIT>'].append(newcol)<EOL><DEDENT><DEDENT><DEDENT>return times, epdfluxes, fitcoeffs, epdfit<EOL><DEDENT>else:<EOL><INDENT>LOGERROR('<STR_LIT>')<EOL>return None, None, None, None<EOL><DEDENT>", "docstring": "This runs EPD on the Kepler light curve.\n\n    Following Huang et al. 2015, we fit the following EPD function to a smoothed\n    light curve, and then subtract it to obtain EPD corrected magnitudes::\n\n        f = c0 +\n            c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) +\n            c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) +\n            c9*bgv + c10*bge\n\n    By default, this function removes points in the Kepler LC that have ANY\n    quality flags set.\n\n    Parameters\n    ----------\n\n    lcdict : lcdict\n        An `lcdict` produced by `consolidate_kepler_fitslc` or\n        `read_kepler_fitslc`.\n\n    xcol,ycol : str\n        Indicates the x and y coordinate column names to use from the Kepler LC\n        in the EPD fit.\n\n    timestoignore : list of tuples\n        This is of the form::\n\n            [(time1_start, time1_end), (time2_start, time2_end), ...]\n\n        and indicates the start and end times to mask out of the final\n        lcdict. Use this to remove anything that wasn't caught by the quality\n        flags.\n\n    filterflags : bool\n        If True, will remove any measurements that have non-zero quality flags\n        present. This usually indicates an issue with the instrument or\n        spacecraft.\n\n    writetodict : bool\n        If writetodict is True, adds the following columns to the lcdict::\n\n            epd_time = time array\n            epd_sapflux = uncorrected flux before EPD\n            epd_epdsapflux = corrected flux after EPD\n            epd_epdsapcorr = EPD flux corrections\n            epd_bkg = background array\n            epd_bkg_err = background errors array\n            epd_xcc = xcoord array\n            epd_ycc = ycoord array\n            epd_quality = quality flag array\n\n        and updates the 'columns' list in the lcdict as well.\n\n    epdsmooth : int\n        Sets the number of light curve points to smooth over when generating the\n        EPD fit function.\n\n    Returns\n    -------\n\n    tuple\n        Returns a tuple of the form: (times, epdfluxes, fitcoeffs, epdfit)", "id": "f14766:m13"}
{"signature": "def _epd_residual(coeffs, fluxes, xcc, ycc, bgv, bge):", "body": "f = _epd_function(coeffs, fluxes, xcc, ycc, bgv, bge)<EOL>residual = fluxes - f<EOL>return residual<EOL>", "docstring": "This is the residual function to minimize using scipy.optimize.leastsq.\n\n    Parameters\n    ----------\n\n    coeffs : array-like of floats\n        Contains the EPD coefficients that will be used to generate the EPD fit\n        function.\n\n    fluxes : array-like\n        The flux measurement array being used.\n\n    xcc,ycc : array-like\n        Arrays of the x and y coordinates associated with each measurement in\n        `fluxes`.\n\n    bgv,bge : array-like\n        Arrays of the flux background value and the flux background error\n        associated with each measurement in `fluxes`.\n\n    Returns\n    -------\n\n    np.array\n        Contains the fit function residual evaluated at each flux measurement\n        value.", "id": "f14766:m12"}
{"signature": "def pre(self, command, output_dir, vars):", "body": "<EOL>vars['<STR_LIT>'] = '<STR_LIT>'<EOL>vars['<STR_LIT>'] = time.strftime('<STR_LIT>', time.localtime())<EOL>", "docstring": "Called before template is applied.", "id": "f14769:c0:m0"}
{"signature": "def prop_power(self, propulsion_eff=<NUM_LIT>, sea_margin=<NUM_LIT>):", "body": "PP = (<NUM_LIT:1> + sea_margin) * self.resistance() * self.speed/propulsion_eff<EOL>return PP<EOL>", "docstring": "Total propulsion power of the ship.\n\n:param propulsion_eff: Shaft efficiency of the ship\n:param sea_margin: Sea margin take account of interaction between ship and the sea, e.g. wave\n:return: Watts shaft propulsion power of the ship", "id": "f14773:c0:m5"}
{"signature": "def residual_resistance_coef(slenderness, prismatic_coef, froude_number):", "body": "Cr = cr(slenderness, prismatic_coef, froude_number)<EOL>if math.isnan(Cr):<EOL><INDENT>Cr = cr_nearest(slenderness, prismatic_coef, froude_number)<EOL><DEDENT>return Cr<EOL>", "docstring": "Residual resistance coefficient estimation from slenderness function, prismatic coefficient and Froude number.\n\n:param slenderness: Slenderness coefficient dimensionless :math:`L/(\u2207^{1/3})` where L is length of ship, \u2207 is displacement\n:param prismatic_coef: Prismatic coefficient dimensionless :math:`\u2207/(L\\cdot A_m)` where L is length of ship, \u2207 is displacement Am is midsection area of the ship\n:param froude_number: Froude number of the ship dimensionless \n:return: Residual resistance of the ship", "id": "f14773:m3"}
{"signature": "def get_reynold_number(self):", "body": "return reynolds_number(self.length, self.speed)<EOL>", "docstring": "Return Reynold number of the ship\n\n:return: Reynold number of the ship", "id": "f14773:c0:m4"}
{"signature": "def resistance(self):", "body": "self.total_resistance_coef = frictional_resistance_coef(self.length, self.speed) +residual_resistance_coef(self.slenderness_coefficient,<EOL>self.prismatic_coefficient,<EOL>froude_number(self.speed, self.length))<EOL>RT = <NUM_LIT:1> / <NUM_LIT:2> * self.total_resistance_coef * <NUM_LIT> * self.surface_area * self.speed ** <NUM_LIT:2><EOL>return RT<EOL>", "docstring": "Return resistance of the vehicle.\n\n:return: newton the resistance of the ship", "id": "f14773:c0:m2"}
{"signature": "def froude_number(speed, length):", "body": "g = <NUM_LIT>  <EOL>Fr = speed / np.sqrt(g * length)<EOL>return Fr<EOL>", "docstring": "Froude number utility function that return Froude number for vehicle at specific length and speed.\n\n:param speed: m/s speed of the vehicle\n:param length: metres length of the vehicle\n:return: Froude number of the vehicle (dimensionless)", "id": "f14773:m2"}
{"signature": "def string_insert(str1, str2, i):", "body": "return str1[:i] + str2 + str1[i:]<EOL>", "docstring": "Insert a string in the middle of another string\n:param str1: the original string\n:param str2: the string to be inserted\n:param i: the index of the insertion position\n:return: the resulting string", "id": "f14774:m14"}
{"signature": "def format_datetime(t: datetime = None):", "body": "return (datetime.now() if t is None else t).strftime('<STR_LIT>')<EOL>", "docstring": "Format a datetime object into yyyy-MM-dd hh:mm:ss\n:param t: datetime object, default: now\n:return: the formatted string", "id": "f14774:m15"}
{"signature": "def dict_format_type(d, source, formatter, include_list=True):", "body": "if not isinstance(d, dict):<EOL><INDENT>if isinstance(d, source):<EOL><INDENT>return formatter(d)<EOL><DEDENT>else:<EOL><INDENT>return d<EOL><DEDENT><DEDENT>else:<EOL><INDENT>dd = dict()<EOL>for key, value in d.items():<EOL><INDENT>if include_list and isinstance(value, list):<EOL><INDENT>dd[key] = [dict_format_type(i, source, formatter) for i in value]<EOL><DEDENT>elif isinstance(value, dict):<EOL><INDENT>dd[key] = dict_format_type(value, source, formatter)<EOL><DEDENT>elif isinstance(value, source):<EOL><INDENT>dd[key] = formatter(value)<EOL><DEDENT>else:<EOL><INDENT>dd[key] = value<EOL><DEDENT><DEDENT>return dd<EOL><DEDENT>", "docstring": "Replace the values of a dict with certain type to other values\n:param d: the dictionary\n:param source: the source type, e.g., int\n:param formatter: the formatter method, e.g., return the string format of an int\n:param include_list: whether list should be formatted, otherwise list will be considered as source type\n:return: formatted dictionary", "id": "f14774:m9"}
{"signature": "def random_numbers(n):", "body": "return '<STR_LIT>'.join(random.SystemRandom().choice(string.digits) for _ in range(n))<EOL>", "docstring": "Generate a random string from 0-9\n:param n: length of the string\n:return: the random string", "id": "f14774:m3"}
{"signature": "def dict_top(d, k, n, reverse=False):", "body": "h = list()<EOL>for i in range(len(d)):<EOL><INDENT>heappush(h, (-d[i][k] if reverse else d[i][k], i))<EOL><DEDENT>r = list()<EOL>while len(r) < n and len(h) > <NUM_LIT:0>:<EOL><INDENT>_, i = heappop(h)<EOL>r.append(d[i].copy())<EOL><DEDENT>return r<EOL>", "docstring": "Return top n of a dictionary list sorted by key\n:param d: dictionary list\n:param k: key\n:param n: top n\n:param reverse: whether the value should be reversed\n:return: top n of the sorted dictionary list", "id": "f14774:m7"}
{"signature": "def progress(count, total, prefix='<STR_LIT>', suffix='<STR_LIT>', length=<NUM_LIT>):", "body": "bar_len = length<EOL>filled_len = int(round(bar_len * count / float(total)))<EOL>percents = round(<NUM_LIT> * count / float(total), <NUM_LIT:1>)<EOL>bar = '<STR_LIT:=>' * filled_len + '<STR_LIT:->' * (bar_len - filled_len)<EOL>sys.stdout.write('<STR_LIT>' % (prefix, bar, percents, '<STR_LIT:%>', '<STR_LIT:U+0020>' + suffix))<EOL>sys.stdout.flush()<EOL>", "docstring": "Show a progress bar\n:param count: current progress\n:param total: total progress\n:param prefix: prefix shown before the progress bar\n:param suffix: suffix shown after the progress bar\n:param length: length of the progress bar, default: 60\n:return: none", "id": "f14774:m18"}
{"signature": "def random_letters(n):", "body": "return '<STR_LIT>'.join(random.SystemRandom().choice(string.ascii_letters) for _ in range(n))<EOL>", "docstring": "Generate a random string from a-zA-Z\n:param n: length of the string\n:return: the random string", "id": "f14774:m2"}
{"signature": "def dict_search(d, k, v):", "body": "for i in range(len(d)):<EOL><INDENT>if d[i][k] == v:<EOL><INDENT>return i<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Search dictionary list by key and value\n:param d: dictionary list\n:param k: key\n:param v: value\n:return: the index of the first dictionary in the array with the specific key / value", "id": "f14774:m4"}
{"signature": "def dict_merge(a, b, k):", "body": "c = a.copy()<EOL>for j in range(len(b)):<EOL><INDENT>flag = False<EOL>for i in range(len(c)):<EOL><INDENT>if c[i][k] == b[j][k]:<EOL><INDENT>c[i] = b[j].copy()<EOL>flag = True<EOL><DEDENT><DEDENT>if not flag:<EOL><INDENT>c.append(b[j].copy())<EOL><DEDENT><DEDENT>return c<EOL>", "docstring": "Merge two dictionary lists\n:param a: original list\n:param b: alternative list, element will replace the one in original list with same key\n:param k: key\n:return: the merged list", "id": "f14774:m5"}
{"signature": "def dict_remove_key(d, k):", "body": "dd = dict()<EOL>for key, value in d.items():<EOL><INDENT>if not key == k:<EOL><INDENT>if isinstance(value, dict):<EOL><INDENT>dd[key] = dict_remove_key(value, k)<EOL><DEDENT>elif isinstance(value, list):<EOL><INDENT>dd[key] = [dict_remove_key(i, k) for i in value]<EOL><DEDENT>else:<EOL><INDENT>dd[key] = value<EOL><DEDENT><DEDENT><DEDENT>return dd<EOL>", "docstring": "Recursively remove a key from a dict\n:param d: the dictionary\n:param k: key which should be removed\n:return: formatted dictionary", "id": "f14774:m10"}
{"signature": "def SIRode(y0, time, beta, gamma):", "body": "Xsim = rk4(SIR_D, y0, time, args=(beta,gamma,))<EOL>Xsim = Xsim.transpose()<EOL>return Xsim<EOL>", "docstring": "Integrate SIR epidemic model\n\n    Simulate a very basic deterministic SIR system.\n\n    :param 2x1 numpy array y0: initial conditions\n    :param Ntimestep length numpy array time: Vector of time points that \\\n    solution is returned at\n    :param float beta: transmission rate\n    :param float gamma: recovery rate\n\n    :returns: (2)x(Ntimestep) numpy array Xsim: first row S(t), second row I(t)", "id": "f14791:m0"}
{"signature": "def __init__(self, filename, damethod, date, ensize):", "body": "self.filename = filename<EOL>self.damethod = damethod<EOL>self.date = date<EOL>self.ensize = ensize<EOL>self.dafile = h5py.File(self.filename, \"<STR_LIT:a>\")<EOL>self.dafile.attrs['<STR_LIT>'] = self.damethod<EOL>self.dafile.attrs['<STR_LIT:date>'] = self.date<EOL>self.dafile.attrs['<STR_LIT>'] = self.ensize<EOL>self.dafile.create_group(\"<STR_LIT>\")<EOL>self.dafile.create_group(\"<STR_LIT>\")<EOL>self.dafile.create_group(\"<STR_LIT>\")<EOL>self.dafile.create_group(\"<STR_LIT>\")<EOL>self.dafile.create_group(\"<STR_LIT>\")<EOL>self.dafile.create_group(\"<STR_LIT>\")<EOL>", "docstring": "Initialize darun attributes\n\n        Args:\n            filename (str): Absolute path of file name as a string with `hdf5` extension\n            damethod (str): Name of the assimilation method used, i.e. `enkf`.\n            date (str): Date of the experiment `MM-DD-YYYY:HHHH`\n            ensize (int): ensemble size", "id": "f14798:c0:m0"}
{"signature": "def _timestamp(when):", "body": "return (time.mktime(when.timetuple()) if sys.version_info < (<NUM_LIT:3>,) else<EOL>when.timestamp())<EOL>", "docstring": "Python 2 compatibility for `datetime.timestamp()`.", "id": "f14842:m0"}
{"signature": "def _hashable_bytes(data):", "body": "if isinstance(data, bytes):<EOL><INDENT>return data<EOL><DEDENT>elif isinstance(data, str):<EOL><INDENT>return data.encode('<STR_LIT:ascii>')  <EOL><DEDENT>else:<EOL><INDENT>raise TypeError(data)<EOL><DEDENT>", "docstring": "Coerce strings to hashable bytes.", "id": "f14842:m1"}
{"signature": "@register.tag<EOL>def uservoice(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return UserVoiceNode()<EOL>", "docstring": "UserVoice tracking template tag.\n\nRenders Javascript code to track page visits.  You must supply\nyour UserVoice Widget Key in the ``USERVOICE_WIDGET_KEY``\nsetting or the ``uservoice_widget_key`` template context variable.", "id": "f14847:m0"}
{"signature": "@register.tag<EOL>def kiss_insights(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return KissInsightsNode()<EOL>", "docstring": "KISSinsights set-up template tag.\n\nRenders Javascript code to set-up surveys.  You must supply\nyour account number and site code in the\n``KISS_INSIGHTS_ACCOUNT_NUMBER`` and ``KISS_INSIGHTS_SITE_CODE``\nsettings.", "id": "f14848:m0"}
{"signature": "@register.tag<EOL>def matomo(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return MatomoNode()<EOL>", "docstring": "Matomo tracking template tag.\n\nRenders Javascript code to track page visits.  You must supply\nyour Matomo domain (plus optional URI path), and tracked site ID\nin the ``MATOMO_DOMAIN_PATH`` and the ``MATOMO_SITE_ID`` setting.\n\nCustom variables can be passed in the ``matomo_vars`` context\nvariable.  It is an iterable of custom variables as tuples like:\n``(index, name, value[, scope])`` where scope may be ``'page'``\n(default) or ``'visit'``.  Index should be an integer and the\nother parameters should be strings.", "id": "f14849:m0"}
{"signature": "@register.tag<EOL>def spring_metrics(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return SpringMetricsNode()<EOL>", "docstring": "Spring Metrics tracking template tag.\n\nRenders Javascript code to track page visits.  You must supply\nyour Spring Metrics Tracking ID in the\n``SPRING_METRICS_TRACKING_ID`` setting.", "id": "f14850:m0"}
{"signature": "@register.tag<EOL>def performable(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return PerformableNode()<EOL>", "docstring": "Performable template tag.\n\nRenders Javascript code to set-up Performable tracking.  You must\nsupply your Performable API key in the ``PERFORMABLE_API_KEY``\nsetting.", "id": "f14851:m0"}
{"signature": "@register.simple_tag<EOL>def performable_embed(hostname, page_id):", "body": "return mark_safe(EMBED_CODE % {<EOL>'<STR_LIT>': hostname,<EOL>'<STR_LIT>': page_id,<EOL>})<EOL>", "docstring": "Include a Performable landing page.", "id": "f14851:m1"}
{"signature": "@register.tag<EOL>def rating_mailru(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return RatingMailruNode()<EOL>", "docstring": "Rating@Mail.ru counter template tag.\n\nRenders Javascript code to track page visits. You must supply\nyour website counter ID (as a string) in the\n``RATING_MAILRU_COUNTER_ID`` setting.", "id": "f14852:m0"}
{"signature": "@register.tag<EOL>def hubspot(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return HubSpotNode()<EOL>", "docstring": "HubSpot tracking template tag.\n\nRenders Javascript code to track page visits.  You must supply\nyour portal ID (as a string) in the ``HUBSPOT_PORTAL_ID`` setting.", "id": "f14855:m0"}
{"signature": "@register.tag<EOL>def chartbeat_bottom(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return ChartbeatBottomNode()<EOL>", "docstring": "Bottom Chartbeat template tag.\n\nRender the bottom Javascript code for Chartbeat.  You must supply\nyour Chartbeat User ID (as a string) in the ``CHARTBEAT_USER_ID``\nsetting.", "id": "f14858:m1"}
{"signature": "@register.tag<EOL>def chartbeat_top(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return ChartbeatTopNode()<EOL>", "docstring": "Top Chartbeat template tag.\n\nRender the top Javascript code for Chartbeat.", "id": "f14858:m0"}
{"signature": "@register.tag<EOL>def facebook_pixel_head(parser, token):", "body": "_validate_no_args(token)<EOL>return FacebookPixelHeadNode()<EOL>", "docstring": "Facebook Pixel head template tag.", "id": "f14860:m1"}
{"signature": "@register.tag<EOL>def gauges(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return GaugesNode()<EOL>", "docstring": "Gaug.es template tag.\n\nRenders Javascript code to gaug.es testing.  You must supply\nyour Site ID account number in the ``GAUGES_SITE_ID``\nsetting.", "id": "f14861:m0"}
{"signature": "@register.tag<EOL>def olark(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return OlarkNode()<EOL>", "docstring": "Olark set-up template tag.\n\nRenders Javascript code to set-up Olark chat.  You must supply\nyour site ID in the ``OLARK_SITE_ID`` setting.", "id": "f14862:m0"}
{"signature": "@register.tag<EOL>def clicky(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return ClickyNode()<EOL>", "docstring": "Clicky tracking template tag.\n\nRenders Javascript code to track page visits.  You must supply\nyour Clicky Site ID (as a string) in the ``CLICKY_SITE_ID``\nsetting.", "id": "f14863:m0"}
{"signature": "@register.tag<EOL>def mixpanel(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return MixpanelNode()<EOL>", "docstring": "Mixpanel tracking template tag.\n\nRenders Javascript code to track page visits.  You must supply\nyour Mixpanel token in the ``MIXPANEL_API_TOKEN`` setting.", "id": "f14864:m0"}
{"signature": "@register.tag<EOL>def kiss_metrics(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return KissMetricsNode()<EOL>", "docstring": "KISSinsights tracking template tag.\n\nRenders Javascript code to track page visits.  You must supply\nyour KISSmetrics API key in the ``KISS_METRICS_API_KEY``\nsetting.", "id": "f14865:m0"}
{"signature": "@register.tag<EOL>def snapengage(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return SnapEngageNode()<EOL>", "docstring": "SnapEngage set-up template tag.\n\nRenders Javascript code to set-up SnapEngage chat.  You must supply\nyour widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting.", "id": "f14867:m0"}
{"signature": "@register.tag<EOL>def gosquared(parser, token):", "body": "bits = token.split_contents()<EOL>if len(bits) > <NUM_LIT:1>:<EOL><INDENT>raise TemplateSyntaxError(\"<STR_LIT>\" % bits[<NUM_LIT:0>])<EOL><DEDENT>return GoSquaredNode()<EOL>", "docstring": "GoSquared tracking template tag.\n\nRenders Javascript code to track page visits.  You must supply\nyour GoSquared site token in the ``GOSQUARED_SITE_TOKEN`` setting.", "id": "f14868:m0"}
{"signature": "def is_internal_ip(context, prefix=None):", "body": "try:<EOL><INDENT>request = context['<STR_LIT>']<EOL>remote_ip = request.META.get('<STR_LIT>', '<STR_LIT>')<EOL>if not remote_ip:<EOL><INDENT>remote_ip = request.META.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>if not remote_ip:<EOL><INDENT>return False<EOL><DEDENT>internal_ips = None<EOL>if prefix is not None:<EOL><INDENT>internal_ips = getattr(settings, '<STR_LIT>' % prefix, None)<EOL><DEDENT>if internal_ips is None:<EOL><INDENT>internal_ips = getattr(settings, '<STR_LIT>', None)<EOL><DEDENT>if internal_ips is None:<EOL><INDENT>internal_ips = getattr(settings, '<STR_LIT>', None)<EOL><DEDENT>return remote_ip in (internal_ips or [])<EOL><DEDENT>except (KeyError, AttributeError):<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Return whether the visitor is coming from an internal IP address,\nbased on information from the template context.\n\nThe prefix is used to allow different analytics services to have\ndifferent notions of internal addresses.", "id": "f14870:m5"}
{"signature": "def get_identity(context, prefix=None, identity_func=None, user=None):", "body": "if prefix is not None:<EOL><INDENT>try:<EOL><INDENT>return context['<STR_LIT>' % prefix]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>try:<EOL><INDENT>return context['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>if getattr(settings, '<STR_LIT>', True):<EOL><INDENT>try:<EOL><INDENT>if user is None:<EOL><INDENT>user = get_user_from_context(context)<EOL><DEDENT>if get_user_is_authenticated(user):<EOL><INDENT>if identity_func is not None:<EOL><INDENT>return identity_func(user)<EOL><DEDENT>else:<EOL><INDENT>return user.get_username()<EOL><DEDENT><DEDENT><DEDENT>except (KeyError, AttributeError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Get the identity of a logged in user from a template context.\n\nThe `prefix` argument is used to provide different identities to\ndifferent analytics services.  The `identity_func` argument is a\nfunction that returns the identity of the user; by default the\nidentity is the username.", "id": "f14870:m3"}
{"signature": "def get_user_is_authenticated(user):", "body": "if callable(user.is_authenticated):<EOL><INDENT>return user.is_authenticated()<EOL><DEDENT>else:<EOL><INDENT>return user.is_authenticated<EOL><DEDENT>", "docstring": "Check if the user is authenticated.\n\n    This is a compatibility function needed to support both Django 1.x and 2.x;\n    Django 2.x turns the function into a proper boolean so function calls will\n    fail.", "id": "f14870:m2"}
{"signature": "def disable_html(html, service):", "body": "return HTML_COMMENT % {'<STR_LIT:html>': html, '<STR_LIT>': service}<EOL>", "docstring": "Disable HTML code by commenting it out.\n\nThe `service` argument is used to display a friendly message.", "id": "f14870:m6"}
{"signature": "@force.setter<EOL><INDENT>def force(self, force):<DEDENT>", "body": "self.ode_body.setForce(tuple(force))<EOL>", "docstring": "Set the force acting on this body.\n\n        Parameters\n        ----------\n        force : 3-tuple of float\n            The desired force acting on this body, in world coordinates.", "id": "f14887:c1:m16"}
{"signature": "@property<EOL><INDENT>def feedback(self):<DEDENT>", "body": "return self.ode_obj.getFeedback()<EOL>", "docstring": "Feedback buffer (list of 3-tuples) for this ODE motor/joint.", "id": "f14887:c6:m0"}
{"signature": "def step(self, substeps=<NUM_LIT:2>):", "body": "self.frame_no += <NUM_LIT:1><EOL>dt = self.dt / substeps<EOL>for _ in range(substeps):<EOL><INDENT>self.ode_contactgroup.empty()<EOL>self.ode_space.collide(None, self.on_collision)<EOL>self.ode_world.step(dt)<EOL><DEDENT>", "docstring": "Step the world forward by one frame.\n\n        Parameters\n        ----------\n        substeps : int, optional\n            Split the step into this many sub-steps. This helps to prevent the\n            time delta for an update from being too large.", "id": "f14887:c17:m16"}
{"signature": "@property<EOL><INDENT>def axes(self):<DEDENT>", "body": "return [np.array(self.ode_obj.getAxis1()),<EOL>np.array(self.ode_obj.getAxis2())]<EOL>", "docstring": "A list of axes of rotation for this joint.", "id": "f14887:c15:m0"}
{"signature": "@property<EOL><INDENT>def position_rates(self):<DEDENT>", "body": "return [self.ode_obj.getPositionRate(i) for i in range(self.LDOF)]<EOL>", "docstring": "List of position rates for linear degrees of freedom.", "id": "f14887:c6:m2"}
{"signature": "def _set_params(target, param, values, dof):", "body": "if not isinstance(values, (list, tuple, np.ndarray)):<EOL><INDENT>values = [values] * dof<EOL><DEDENT>assert dof == len(values)<EOL>for s, value in zip(['<STR_LIT>', '<STR_LIT:2>', '<STR_LIT:3>'][:dof], values):<EOL><INDENT>target.setParam(getattr(ode, '<STR_LIT>'.format(param, s)), value)<EOL><DEDENT>", "docstring": "Set the given param for each of the DOFs for a joint.", "id": "f14887:m1"}
{"signature": "@property<EOL><INDENT>def axes(self):<DEDENT>", "body": "return [np.array(self.ode_obj.getAxis())]<EOL>", "docstring": "Axis of rotation for this joint.", "id": "f14887:c13:m2"}
{"signature": "def add_torques(self, torques):", "body": "self.ode_obj.addTorques(*torques)<EOL>", "docstring": "Add the given torques along this motor's axes.\n\n        Parameters\n        ----------\n        torques : sequence of float\n            A sequence of torque values to apply to this motor's axes.", "id": "f14887:c8:m4"}
{"signature": "@property<EOL><INDENT>def angle_rates(self):<DEDENT>", "body": "return [self.ode_obj.getAngle1Rate(), self.ode_obj.getAngle2Rate()]<EOL>", "docstring": "A list of two angle rates for this joint's degrees of freedom.", "id": "f14887:c15:m3"}
{"signature": "def get_joint(self, key):", "body": "return self._joints.get(key, None)<EOL>", "docstring": "Get a joint by key.\n\n        Parameters\n        ----------\n        key : str\n            The key for a joint to look up.\n\n        Returns\n        -------\n        joint : :class:`Joint`\n            The joint in the world with the given key, or None if there is no\n            such joint.", "id": "f14887:c17:m10"}
{"signature": "@property<EOL><INDENT>def torque(self):<DEDENT>", "body": "return np.array(self.ode_body.getTorque())<EOL>", "docstring": "Current net torque acting on this body (in world coordinates).", "id": "f14887:c1:m17"}
{"signature": "def relative_offset_to_world(self, offset):", "body": "return np.array(self.body_to_world(offset * self.dimensions / <NUM_LIT:2>))<EOL>", "docstring": "Convert a relative body offset to world coordinates.\n\n        Parameters\n        ----------\n        offset : 3-tuple of float\n            The offset of the desired point, given as a relative fraction of the\n            size of this body. For example, offset (0, 0, 0) is the center of\n            the body, while (0.5, -0.2, 0.1) describes a point halfway from the\n            center towards the maximum x-extent of the body, 20% of the way from\n            the center towards the minimum y-extent, and 10% of the way from the\n            center towards the maximum z-extent.\n\n        Returns\n        -------\n        position : 3-tuple of float\n            A position in world coordinates of the given body offset.", "id": "f14887:c1:m26"}
{"signature": "def get_body(self, key):", "body": "return self._bodies.get(key, key)<EOL>", "docstring": "Get a body by key.\n\n        Parameters\n        ----------\n        key : str, None, or :class:`Body`\n            The key for looking up a body. If this is None or a :class:`Body`\n            instance, the key itself will be returned.\n\n        Returns\n        -------\n        body : :class:`Body`\n            The body in the world with the given key.", "id": "f14887:c17:m9"}
{"signature": "@property<EOL><INDENT>def gravity(self):<DEDENT>", "body": "return self.ode_world.getGravity()<EOL>", "docstring": "Current gravity vector in the world.", "id": "f14887:c17:m1"}
{"signature": "def disable_feedback(self):", "body": "self.ode_obj.setFeedback(False)<EOL>", "docstring": "Disable feedback on this ODE object.", "id": "f14887:c6:m24"}
{"signature": "@lo_stops.setter<EOL><INDENT>def lo_stops(self, lo_stops):<DEDENT>", "body": "_set_params(self.ode_obj, '<STR_LIT>', lo_stops, self.ADOF + self.LDOF)<EOL>", "docstring": "Set the lo stop values for this object's degrees of freedom.\n\n        Parameters\n        ----------\n        lo_stops : float or sequence of float\n            A lo stop value to set on all degrees of freedom, or a list\n            containing one such value for each degree of freedom. For rotational\n            degrees of freedom, these values must be in radians.", "id": "f14887:c6:m8"}
{"signature": "@axes.setter<EOL><INDENT>def axes(self, axes):<DEDENT>", "body": "assert self.ADOF == len(axes) or self.LDOF == len(axes)<EOL>for i, axis in enumerate(axes):<EOL><INDENT>if axis is not None:<EOL><INDENT>self.ode_obj.setAxis(i, <NUM_LIT:0>, axis)<EOL><DEDENT><DEDENT>", "docstring": "Set the axes for this object's degrees of freedom.\n\n        Parameters\n        ----------\n        axes : list of axes specifications\n            A list of axis values to set. This list must have the same number of\n            elements as the degrees of freedom of the underlying ODE object.\n            Each element can be\n\n            (a) None, which has no effect on the corresponding axis, or\n            (b) three floats specifying the axis to set.", "id": "f14887:c6:m6"}
{"signature": "@property<EOL><INDENT>def LDOF(self):<DEDENT>", "body": "return self.ode_obj.getNumAxes()<EOL>", "docstring": "Number of linear degrees of freedom for this motor.", "id": "f14887:c9:m0"}
{"signature": "@axes.setter<EOL><INDENT>def axes(self, axes):<DEDENT>", "body": "self.amotor.axes = [axes[<NUM_LIT:0>]]<EOL>self.ode_obj.setAxis(tuple(axes[<NUM_LIT:0>]))<EOL>", "docstring": "Set the angular axis of rotation for this joint.\n\n        Parameters\n        ----------\n        axes : list containing one 3-tuple of floats\n            A list of the axes for this joint. For a hinge joint, which has one\n            degree of freedom, this must contain one 3-tuple specifying the X,\n            Y, and Z axis for the joint.", "id": "f14887:c13:m3"}
{"signature": "@cfm.setter<EOL><INDENT>def cfm(self, cfm):<DEDENT>", "body": "return self.ode_world.setCFM(cfm)<EOL>", "docstring": "Set the global CFM value.\n\n        Parameters\n        ----------\n        cfm : float\n            The desired global CFM value.", "id": "f14887:c17:m4"}
{"signature": "def reset(self):", "body": "pass<EOL>", "docstring": "Reset the state of the world.", "id": "f14887:c17:m18"}
{"signature": "@property<EOL><INDENT>def mass(self):<DEDENT>", "body": "return self.ode_body.getMass()<EOL>", "docstring": "The ODE mass object for this body.", "id": "f14887:c1:m2"}
{"signature": "@max_forces.setter<EOL><INDENT>def max_forces(self, max_forces):<DEDENT>", "body": "_set_params(self.ode_obj, '<STR_LIT>', max_forces, self.ADOF + self.LDOF)<EOL>", "docstring": "Set the maximum forces for this object's degrees of freedom.\n\n        Parameters\n        ----------\n        max_forces : float or sequence of float\n            A maximum force value to set on all degrees of freedom, or a list\n            containing one such value for each degree of freedom.", "id": "f14887:c6:m14"}
{"signature": "@property<EOL><INDENT>def force(self):<DEDENT>", "body": "return np.array(self.ode_body.getForce())<EOL>", "docstring": "Current net force acting on this body (in world coordinates).", "id": "f14887:c1:m15"}
{"signature": "def get_body_states(self):", "body": "return [b.state for b in self.bodies]<EOL>", "docstring": "Return the complete state of all bodies in the world.\n\n        Returns\n        -------\n        states : list of state information tuples\n            A list of body state information for each body in the world. See\n            :func:`Body.state`.", "id": "f14887:c17:m14"}
{"signature": "def needs_reset(self):", "body": "return False<EOL>", "docstring": "Return True iff the world needs to be reset.", "id": "f14887:c17:m17"}
{"signature": "@axes.setter<EOL><INDENT>def axes(self, axes):<DEDENT>", "body": "assert len(axes) == self.ADOF<EOL>for i, ax in enumerate(axes):<EOL><INDENT>if ax is None:<EOL><INDENT>continue<EOL><DEDENT>if not isinstance(ax, dict):<EOL><INDENT>ax = dict(axis=ax)<EOL><DEDENT>self.ode_obj.setAxis(i, ax.get('<STR_LIT>', <NUM_LIT:0>), ax['<STR_LIT>'])<EOL><DEDENT>", "docstring": "Set the axes for this object's degrees of freedom.\n\n        Parameters\n        ----------\n        axes : list of axis parameters\n            A list of axis values to set. This list must have the same number of\n            elements as the degrees of freedom of the underlying ODE object.\n            Each element can be\n\n            (a) None, which has no effect on the corresponding axis, or\n            (b) three floats specifying the axis to set, or\n            (c) a dictionary with an \"axis\" key specifying the axis to set and\n                an optional \"rel\" key (defaults to 0) specifying the relative\n                body to set the axis on.", "id": "f14887:c8:m3"}
{"signature": "@property<EOL><INDENT>def axes(self):<DEDENT>", "body": "return [np.array(self.ode_obj.getAxis())]<EOL>", "docstring": "Axis of rotation and displacement for this joint.", "id": "f14887:c14:m0"}
{"signature": "def connect_to(self, joint, other_body, offset=(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>), other_offset=(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>),<EOL>**kwargs):", "body": "anchor = self.world.move_next_to(self, other_body, offset, other_offset)<EOL>self.world.join(joint, self, other_body, anchor=anchor, **kwargs)<EOL>", "docstring": "Move another body next to this one and join them together.\n\n        This method will move the ``other_body`` so that the anchor points for\n        the joint coincide. It then creates a joint to fasten the two bodies\n        together. See :func:`World.move_next_to` and :func:`World.join`.\n\n        Parameters\n        ----------\n        joint : str\n            The type of joint to use when connecting these bodies.\n        other_body : :class:`Body` or str\n            The other body to join with this one.\n        offset : 3-tuple of float, optional\n            The body-relative offset where the anchor for the joint should be\n            placed. Defaults to (0, 0, 0). See :func:`World.move_next_to` for a\n            description of how offsets are specified.\n        other_offset : 3-tuple of float, optional\n            The offset on the second body where the joint anchor should be\n            placed. Defaults to (0, 0, 0). Like ``offset``, this is given as an\n            offset relative to the size and shape of ``other_body``.", "id": "f14887:c1:m30"}
{"signature": "@velocities.setter<EOL><INDENT>def velocities(self, velocities):<DEDENT>", "body": "_set_params(self.ode_obj, '<STR_LIT>', velocities, self.ADOF + self.LDOF)<EOL>", "docstring": "Set the target velocities for this object's degrees of freedom.\n\n        Parameters\n        ----------\n        velocities : float or sequence of float\n            A target velocity value to set on all degrees of freedom, or a list\n            containing one such value for each degree of freedom. For rotational\n            degrees of freedom, these values must be in radians / second.", "id": "f14887:c6:m12"}
{"signature": "@state.setter<EOL><INDENT>def state(self, state):<DEDENT>", "body": "assert self.name == state.name,'<STR_LIT>'.format(state.name, self.name)<EOL>self.position = state.position<EOL>self.quaternion = state.quaternion<EOL>self.linear_velocity = state.linear_velocity<EOL>self.angular_velocity = state.angular_velocity<EOL>", "docstring": "Set the state of this body.\n\n        Parameters\n        ----------\n        state : BodyState tuple\n            The desired state of the body.", "id": "f14887:c1:m4"}
{"signature": "@property<EOL><INDENT>def joints(self):<DEDENT>", "body": "for k in sorted(self._joints):<EOL><INDENT>yield self._joints[k]<EOL><DEDENT>", "docstring": "Sequence of all joints in the world, sorted by name.", "id": "f14887:c17:m8"}
{"signature": "@erp.setter<EOL><INDENT>def erp(self, erp):<DEDENT>", "body": "return self.ode_world.setERP(erp)<EOL>", "docstring": "Set the global ERP value.\n\n        Parameters\n        ----------\n        erp : float\n            The desired global ERP value.", "id": "f14887:c17:m6"}
{"signature": "@property<EOL><INDENT>def positions(self):<DEDENT>", "body": "return [self.ode_obj.getPosition()]<EOL>", "docstring": "List of positions for this joint's linear degrees of freedom.", "id": "f14887:c12:m0"}
{"signature": "@property<EOL><INDENT>def lo_stops(self):<DEDENT>", "body": "return _get_params(self.ode_obj, '<STR_LIT>', self.ADOF + self.LDOF)<EOL>", "docstring": "List of lo stop values for this object's degrees of freedom.", "id": "f14887:c6:m7"}
{"signature": "def set_body_states(self, states):", "body": "for state in states:<EOL><INDENT>self.get_body(state.name).state = state<EOL><DEDENT>", "docstring": "Set the states of some bodies in the world.\n\n        Parameters\n        ----------\n        states : sequence of states\n            A complete state tuple for one or more bodies in the world. See\n            :func:`get_body_states`.", "id": "f14887:c17:m15"}
{"signature": "@property<EOL><INDENT>def position(self):<DEDENT>", "body": "return np.array(self.ode_body.getPosition())<EOL>", "docstring": "The (x, y, z) coordinates of the center of this body.", "id": "f14887:c1:m5"}
{"signature": "def add_torques(self, *torques):", "body": "self.amotor.add_torques(*torques)<EOL>", "docstring": "Add the given torques along this joint's axes.\n\n        Parameters\n        ----------\n        torques : sequence of float\n            A sequence of torque values to apply to this motor's axes.", "id": "f14887:c10:m4"}
{"signature": "@property<EOL><INDENT>def angular_velocity(self):<DEDENT>", "body": "return np.array(self.ode_body.getAngularVel())<EOL>", "docstring": "Current angular velocity of this body (in world coordinates).", "id": "f14887:c1:m13"}
{"signature": "@hi_stops.setter<EOL><INDENT>def hi_stops(self, hi_stops):<DEDENT>", "body": "_set_params(self.ode_obj, '<STR_LIT>', hi_stops, self.ADOF + self.LDOF)<EOL>", "docstring": "Set the hi stop values for this object's degrees of freedom.\n\n        Parameters\n        ----------\n        hi_stops : float or sequence of float\n            A hi stop value to set on all degrees of freedom, or a list\n            containing one such value for each degree of freedom. For rotational\n            degrees of freedom, these values must be in radians.", "id": "f14887:c6:m10"}
{"signature": "def on_key_press(self, key, modifiers, keymap):", "body": "if key == keymap.ENTER:<EOL><INDENT>self.reset()<EOL>return True<EOL><DEDENT>", "docstring": "Handle an otherwise unhandled keypress event (from a GUI).", "id": "f14887:c17:m19"}
{"signature": "def are_connected(self, body_a, body_b):", "body": "return bool(ode.areConnected(<EOL>self.get_body(body_a).ode_body,<EOL>self.get_body(body_b).ode_body))<EOL>", "docstring": "Determine whether the given bodies are currently connected.\n\n        Parameters\n        ----------\n        body_a : str or :class:`Body`\n            One body to test for connectedness. If this is a string, it is\n            treated as the name of a body to look up.\n        body_b : str or :class:`Body`\n            One body to test for connectedness. If this is a string, it is\n            treated as the name of a body to look up.\n\n        Returns\n        -------\n        connected : bool\n            Return True iff the two bodies are connected.", "id": "f14887:c17:m20"}
{"signature": "@gravity.setter<EOL><INDENT>def gravity(self, gravity):<DEDENT>", "body": "return self.ode_world.setGravity(gravity)<EOL>", "docstring": "Set the gravity vector in the world.\n\n        Parameters\n        ----------\n        gravity : 3-tuple of float\n            The vector where gravity should point.", "id": "f14887:c17:m2"}
{"signature": "@property<EOL><INDENT>def axes(self):<DEDENT>", "body": "return [np.array(self.ode_obj.getAxis(i))<EOL>for i in range(self.ADOF or self.LDOF)]<EOL>", "docstring": "List of axes for this object's degrees of freedom.", "id": "f14887:c6:m5"}
{"signature": "@property<EOL><INDENT>def hi_stops(self):<DEDENT>", "body": "return _get_params(self.ode_obj, '<STR_LIT>', self.ADOF + self.LDOF)<EOL>", "docstring": "List of hi stop values for this object's degrees of freedom.", "id": "f14887:c6:m9"}
{"signature": "@linear_velocity.setter<EOL><INDENT>def linear_velocity(self, velocity):<DEDENT>", "body": "self.ode_body.setLinearVel(tuple(velocity))<EOL>", "docstring": "Set the linear velocity for this body.\n\n        Parameters\n        ----------\n        velocity : 3-tuple of float\n            The desired velocity for this body, in world coordinates.", "id": "f14887:c1:m12"}
{"signature": "@angular_velocity.setter<EOL><INDENT>def angular_velocity(self, velocity):<DEDENT>", "body": "self.ode_body.setAngularVel(tuple(velocity))<EOL>", "docstring": "Set the angular velocity for this body.\n\n        Parameters\n        ----------\n        velocity : 3-tuple of float\n            The desired angular velocity for this body, in world coordinates.", "id": "f14887:c1:m14"}
{"signature": "@property<EOL><INDENT>def quaternion(self):<DEDENT>", "body": "return np.array(self.ode_body.getQuaternion())<EOL>", "docstring": "The (w, x, y, z) rotation quaternion for this body.", "id": "f14887:c1:m9"}
{"signature": "@property<EOL><INDENT>def angles(self):<DEDENT>", "body": "return [self.ode_obj.getAngle(i) for i in range(self.ADOF)]<EOL>", "docstring": "List of angles for rotational degrees of freedom.", "id": "f14887:c6:m3"}
{"signature": "@property<EOL><INDENT>def angle_rates(self):<DEDENT>", "body": "return [self.ode_obj.getAngleRate()]<EOL>", "docstring": "List of angle rates for this joint's degrees of freedom.", "id": "f14887:c13:m1"}
{"signature": "def parse_amc(source):", "body": "lines = <NUM_LIT:0><EOL>frames = <NUM_LIT:1><EOL>frame = {}<EOL>degrees = False<EOL>for line in source:<EOL><INDENT>lines += <NUM_LIT:1><EOL>line = line.split('<STR_LIT:#>')[<NUM_LIT:0>].strip()<EOL>if not line:<EOL><INDENT>continue<EOL><DEDENT>if line.startswith('<STR_LIT::>'):<EOL><INDENT>if line.lower().startswith('<STR_LIT>'):<EOL><INDENT>degrees = True<EOL><DEDENT>continue<EOL><DEDENT>if line.isdigit():<EOL><INDENT>if int(line) != frames:<EOL><INDENT>raise RuntimeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(lines, frames, line))<EOL><DEDENT>yield frame<EOL>frames += <NUM_LIT:1><EOL>frame = {}<EOL>continue<EOL><DEDENT>fields = line.split()<EOL>frame[fields[<NUM_LIT:0>]] = list(map(float, fields[<NUM_LIT:1>:]))<EOL><DEDENT>", "docstring": "Parse an AMC motion capture data file.\n\n    Parameters\n    ----------\n    source : file\n        A file-like object that contains AMC motion capture text.\n\n    Yields\n    ------\n    frame : dict\n        Yields a series of motion capture frames. Each frame is a dictionary\n        that maps a bone name to a list of the DOF configurations for that bone.", "id": "f14888:m2"}
{"signature": "def create_joints(self):", "body": "stack = ['<STR_LIT:root>']<EOL>while stack:<EOL><INDENT>parent = stack.pop()<EOL>for child in self.hierarchy.get(parent, ()):<EOL><INDENT>stack.append(child)<EOL><DEDENT>if parent not in self.bones:<EOL><INDENT>continue<EOL><DEDENT>bone = self.bones[parent]<EOL>body = [b for b in self.bodies if b.name == parent][<NUM_LIT:0>]<EOL>for child in self.hierarchy.get(parent, ()):<EOL><INDENT>child_bone = self.bones[child]<EOL>child_body = [b for b in self.bodies if b.name == child][<NUM_LIT:0>]<EOL>shape = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')[len(child_bone.dof)]<EOL>self.joints.append(self.world.join(shape, body, child_body))<EOL><DEDENT><DEDENT>", "docstring": "Traverse the bone hierarchy and create physics joints.", "id": "f14888:c3:m23"}
{"signature": "def get_body_states(self):", "body": "return [b.state for b in self.bodies]<EOL>", "docstring": "Return a list of the states of all bodies in the skeleton.", "id": "f14890:c0:m22"}
{"signature": "def pid(kp=<NUM_LIT:0.>, ki=<NUM_LIT:0.>, kd=<NUM_LIT:0.>, smooth=<NUM_LIT:0.1>):", "body": "state = dict(p=<NUM_LIT:0>, i=<NUM_LIT:0>, d=<NUM_LIT:0>)<EOL>def control(error, dt=<NUM_LIT:1>):<EOL><INDENT>state['<STR_LIT:d>'] = smooth * state['<STR_LIT:d>'] + (<NUM_LIT:1> - smooth) * (error - state['<STR_LIT:p>']) / dt<EOL>state['<STR_LIT:i>'] += error * dt<EOL>state['<STR_LIT:p>'] = error<EOL>return kp * state['<STR_LIT:p>'] + ki * state['<STR_LIT:i>'] + kd * state['<STR_LIT:d>']<EOL><DEDENT>return control<EOL>", "docstring": "r'''Create a callable that implements a PID controller.\n\n    A PID controller returns a control signal :math:`u(t)` given a history of\n    error measurements :math:`e(0) \\dots e(t)`, using proportional (P), integral\n    (I), and derivative (D) terms, according to:\n\n    .. math::\n\n       u(t) = kp * e(t) + ki * \\int_{s=0}^t e(s) ds + kd * \\frac{de(s)}{ds}(t)\n\n    The proportional term is just the current error, the integral term is the\n    sum of all error measurements, and the derivative term is the instantaneous\n    derivative of the error measurement.\n\n    Parameters\n    ----------\n    kp : float\n        The weight associated with the proportional term of the PID controller.\n    ki : float\n        The weight associated with the integral term of the PID controller.\n    kd : float\n        The weight associated with the derivative term of the PID controller.\n    smooth : float in [0, 1]\n        Derivative values will be smoothed with this exponential average. A\n        value of 1 never incorporates new derivative information, a value of 0.5\n        uses the mean of the historic and new information, and a value of 0\n        discards historic information (i.e., the derivative in this case will be\n        unsmoothed). The default is 0.1.\n\n    Returns\n    -------\n    controller : callable (float, float) -> float\n        Returns a function that accepts an error measurement and a delta-time\n        value since the previous measurement, and returns a control signal.", "id": "f14890:m0"}
{"signature": "def set_target_angles(self, angles):", "body": "j = <NUM_LIT:0><EOL>for joint in self.joints:<EOL><INDENT>velocities = [<EOL>ctrl(tgt - cur, self.world.dt) for cur, tgt, ctrl in<EOL>zip(joint.angles, angles[j:j+joint.ADOF], joint.controllers)]<EOL>joint.velocities = velocities<EOL>j += joint.ADOF<EOL><DEDENT>", "docstring": "Move each joint toward a target angle.\n\n        This method uses a PID controller to set a target angular velocity for\n        each degree of freedom in the skeleton, based on the difference between\n        the current and the target angle for the respective DOF.\n\n        PID parameters are by default set to achieve a tiny bit less than\n        complete convergence in one time step, using only the P term (i.e., the\n        P coefficient is set to 1 - \\delta, while I and D coefficients are set\n        to 0). PID parameters can be updated by calling the `set_pid_params`\n        method.\n\n        Parameters\n        ----------\n        angles : list of float\n            A list of the target angles for every joint in the skeleton.", "id": "f14890:c0:m27"}
{"signature": "def set_pid_params(self, *args, **kwargs):", "body": "for joint in self.joints:<EOL><INDENT>joint.target_angles = [None] * joint.ADOF<EOL>joint.controllers = [pid(*args, **kwargs) for i in range(joint.ADOF)]<EOL><DEDENT>", "docstring": "Set PID parameters for all joints in the skeleton.\n\n        Parameters for this method are passed directly to the `pid` constructor.", "id": "f14890:c0:m4"}
{"signature": "def set_body_states(self, states):", "body": "for state in states:<EOL><INDENT>self.world.get_body(state.name).state = state<EOL><DEDENT>", "docstring": "Set the states of all bodies in the skeleton.", "id": "f14890:c0:m23"}
{"signature": "def add_torques(self, torques):", "body": "j = <NUM_LIT:0><EOL>for joint in self.joints:<EOL><INDENT>joint.add_torques(<EOL>list(torques[j:j+joint.ADOF]) + [<NUM_LIT:0>] * (<NUM_LIT:3> - joint.ADOF))<EOL>j += joint.ADOF<EOL><DEDENT>", "docstring": "Add torques for each degree of freedom in the skeleton.\n\n        Parameters\n        ----------\n        torques : list of float\n            A list of the torques to add to each degree of freedom in the\n            skeleton.", "id": "f14890:c0:m28"}
{"signature": "def disable_motors(self):", "body": "self.enable_motors(<NUM_LIT:0>)<EOL>", "docstring": "Disable joint motors in this skeleton.\n\n        This method sets to 0 the maximum force that joint motors are allowed to\n        apply, in addition to disabling torque feedback.", "id": "f14890:c0:m26"}
{"signature": "def as_flat_array(iterables):", "body": "arr = []<EOL>for x in iterables:<EOL><INDENT>arr.extend(x)<EOL><DEDENT>return np.array(arr)<EOL>", "docstring": "Given a sequence of sequences, return a flat numpy array.\n\n    Parameters\n    ----------\n    iterables : sequence of sequence of number\n        A sequence of tuples or lists containing numbers. Typically these come\n        from something that represents each joint in a skeleton, like angle.\n\n    Returns\n    -------\n    ndarray :\n        An array of flattened data from each of the source iterables.", "id": "f14890:m1"}
{"signature": "@property<EOL><INDENT>def body_linear_velocities(self):<DEDENT>", "body": "return as_flat_array(b.linear_velocity for b in self.bodies)<EOL>", "docstring": "Get a list of all current body velocities in the skeleton.", "id": "f14890:c0:m13"}
{"signature": "@property<EOL><INDENT>def joint_torques(self):<DEDENT>", "body": "return as_flat_array(getattr(j, '<STR_LIT>', j).feedback[-<NUM_LIT:1>][:j.ADOF]<EOL>for j in self.joints)<EOL>", "docstring": "Get a list of all current joint torques in the skeleton.", "id": "f14890:c0:m10"}
{"signature": "def render(self, dt):", "body": "for frame in self._frozen:<EOL><INDENT>for body in frame:<EOL><INDENT>self.draw_body(body)<EOL><DEDENT><DEDENT>for body in self.world.bodies:<EOL><INDENT>self.draw_body(body)<EOL><DEDENT>if hasattr(self.world, '<STR_LIT>'):<EOL><INDENT>window.glColor4f(<NUM_LIT>, <NUM_LIT:0.1>, <NUM_LIT:0.1>, <NUM_LIT>)<EOL>window.glLineWidth(<NUM_LIT:3>)<EOL>for j in self.world.markers.joints.values():<EOL><INDENT>window.glBegin(window.GL_LINES)<EOL>window.glVertex3f(*j.getAnchor())<EOL>window.glVertex3f(*j.getAnchor2())<EOL>window.glEnd()<EOL><DEDENT><DEDENT>", "docstring": "Draw all bodies in the world.", "id": "f14891:c1:m4"}
{"signature": "def create_bodies(self):", "body": "self.bodies = {}<EOL>for label in self.channels:<EOL><INDENT>body = self.world.create_body(<EOL>'<STR_LIT>', name='<STR_LIT>'.format(label), radius=<NUM_LIT>)<EOL>body.is_kinematic = True<EOL>body.color = <NUM_LIT>, <NUM_LIT:0.1>, <NUM_LIT:0.1>, <NUM_LIT:0.5><EOL>self.bodies[label] = body<EOL><DEDENT>", "docstring": "Create physics bodies corresponding to each marker in our data.", "id": "f14892:c0:m10"}
{"signature": "def follow_markers(self, start=<NUM_LIT:0>, end=<NUM_LIT>, states=None):", "body": "if states is not None:<EOL><INDENT>self.skeleton.set_body_states(states)<EOL><DEDENT>for frame_no, frame in enumerate(self.markers):<EOL><INDENT>if frame_no < start:<EOL><INDENT>continue<EOL><DEDENT>if frame_no >= end:<EOL><INDENT>break<EOL><DEDENT>for states in self._step_to_marker_frame(frame_no):<EOL><INDENT>yield states<EOL><DEDENT><DEDENT>", "docstring": "Iterate over a set of marker data, dragging its skeleton along.\n\n        Parameters\n        ----------\n        start : int, optional\n            Start following marker data after this frame. Defaults to 0.\n        end : int, optional\n            Stop following marker data after this frame. Defaults to the end of\n            the marker data.\n        states : list of body states, optional\n            If given, set the states of the skeleton bodies to these values\n            before starting to follow the marker data.", "id": "f14892:c1:m5"}
{"signature": "def reposition(self, frame_no):", "body": "for label, j in self.channels.items():<EOL><INDENT>body = self.bodies[label]<EOL>body.position = self.positions[frame_no, j]<EOL>body.linear_velocity = self.velocities[frame_no, j]<EOL><DEDENT>", "docstring": "Reposition markers to a specific frame of data.\n\n        Parameters\n        ----------\n        frame_no : int\n            The frame of data where we should reposition marker bodies. Markers\n            will be positioned in the appropriate places in world coordinates.\n            In addition, linear velocities of the markers will be set according\n            to the data as long as there are no dropouts in neighboring frames.", "id": "f14892:c0:m14"}
{"signature": "@property<EOL><INDENT>def num_frames(self):<DEDENT>", "body": "return self.data.shape[<NUM_LIT:0>]<EOL>", "docstring": "Return the number of frames of marker data.", "id": "f14892:c0:m1"}
{"signature": "def detach(self):", "body": "self.jointgroup.empty()<EOL>self.joints = {}<EOL>", "docstring": "Detach all marker bodies from their associated skeleton bodies.", "id": "f14892:c0:m12"}
{"signature": "def inverse_dynamics(self, angles, start=<NUM_LIT:0>, end=<NUM_LIT>, states=None, max_force=<NUM_LIT:100>):", "body": "if states is not None:<EOL><INDENT>self.skeleton.set_body_states(states)<EOL><DEDENT>for frame_no, frame in enumerate(angles):<EOL><INDENT>if frame_no < start:<EOL><INDENT>continue<EOL><DEDENT>if frame_no >= end:<EOL><INDENT>break<EOL><DEDENT>self.ode_space.collide(None, self.on_collision)<EOL>states = self.skeleton.get_body_states()<EOL>self.skeleton.set_body_states(states)<EOL>self.skeleton.enable_motors(max_force)<EOL>self.skeleton.set_target_angles(angles[frame_no])<EOL>self.ode_world.step(self.dt)<EOL>torques = self.skeleton.joint_torques<EOL>self.skeleton.disable_motors()<EOL>self.skeleton.set_body_states(states)<EOL>self.skeleton.add_torques(torques)<EOL>yield torques<EOL>self.ode_world.step(self.dt)<EOL>self.ode_contactgroup.empty()<EOL><DEDENT>", "docstring": "Follow a set of angle data, yielding dynamic joint torques.\n\n        Parameters\n        ----------\n        angles : ndarray (num-frames x num-dofs)\n            Follow angle data provided by this array of angle values.\n        start : int, optional\n            Start following angle data after this frame. Defaults to the start\n            of the angle data.\n        end : int, optional\n            Stop following angle data after this frame. Defaults to the end of\n            the angle data.\n        states : list of body states, optional\n            If given, set the states of the skeleton bodies to these values\n            before starting to follow the marker data.\n        max_force : float, optional\n            Allow each degree of freedom in the skeleton to exert at most this\n            force when attempting to follow the given joint angles. Defaults to\n            100N. Setting this value to be large results in more accurate\n            following but can cause oscillations in the PID controllers,\n            resulting in noisy torques.\n\n        Returns\n        -------\n        torques : sequence of torque frames\n            Returns a generator of joint torque data for the skeleton. One set\n            of joint torques will be generated for each frame of angle data\n            between `start` and `end`.", "id": "f14892:c1:m8"}
{"signature": "def step(self, substeps=<NUM_LIT:2>):", "body": "<EOL>self.frame_no += <NUM_LIT:1><EOL>try:<EOL><INDENT>next(self.follower)<EOL><DEDENT>except (AttributeError, StopIteration) as err:<EOL><INDENT>self.reset()<EOL><DEDENT>", "docstring": "Advance the physics world by one step.\n\n        Typically this is called as part of a :class:`pagoda.viewer.Viewer`, but\n        it can also be called manually (or some other stepping mechanism\n        entirely can be used).", "id": "f14892:c1:m2"}
{"signature": "def load_attachments(self, source, skeleton):", "body": "self.targets = {}<EOL>self.offsets = {}<EOL>filename = source<EOL>if isinstance(source, str):<EOL><INDENT>source = open(source)<EOL><DEDENT>else:<EOL><INDENT>filename = '<STR_LIT>'.format(id(source))<EOL><DEDENT>for i, line in enumerate(source):<EOL><INDENT>tokens = line.split('<STR_LIT:#>')[<NUM_LIT:0>].strip().split()<EOL>if not tokens:<EOL><INDENT>continue<EOL><DEDENT>label = tokens.pop(<NUM_LIT:0>)<EOL>if label not in self.channels:<EOL><INDENT>logging.info('<STR_LIT>', filename, i, label)<EOL>continue<EOL><DEDENT>if not tokens:<EOL><INDENT>continue<EOL><DEDENT>name = tokens.pop(<NUM_LIT:0>)<EOL>bodies = [b for b in skeleton.bodies if b.name == name]<EOL>if len(bodies) != <NUM_LIT:1>:<EOL><INDENT>logging.info('<STR_LIT>',<EOL>filename, i, len(bodies), name)<EOL>continue<EOL><DEDENT>b = self.targets[label] = bodies[<NUM_LIT:0>]<EOL>o = self.offsets[label] =np.array(list(map(float, tokens))) * b.dimensions / <NUM_LIT:2><EOL>logging.info('<STR_LIT>', label, b.name, o)<EOL><DEDENT>", "docstring": "Load attachment configuration from the given text source.\n\n        The attachment configuration file has a simple format. After discarding\n        Unix-style comments (any part of a line that starts with the pound (#)\n        character), each line in the file is then expected to have the following\n        format::\n\n            marker-name body-name X Y Z\n\n        The marker name must correspond to an existing \"channel\" in our marker\n        data. The body name must correspond to a rigid body in the skeleton. The\n        X, Y, and Z coordinates specify the body-relative offsets where the\n        marker should be attached: 0 corresponds to the center of the body along\n        the given axis, while -1 and 1 correspond to the minimal (maximal,\n        respectively) extent of the body's bounding box along the corresponding\n        dimension.\n\n        Parameters\n        ----------\n        source : str or file-like\n            A filename or file-like object that we can use to obtain text\n            configuration that describes how markers are attached to skeleton\n            bodies.\n\n        skeleton : :class:`pagoda.skeleton.Skeleton`\n            The skeleton to attach our marker data to.", "id": "f14892:c0:m11"}
{"signature": "def forces(self, dx_tm1=None):", "body": "cfm = self.cfms[self._frame_no][:, None]<EOL>kp = self.erp / (cfm * self.world.dt)<EOL>kd = (<NUM_LIT:1> - self.erp) / cfm<EOL>dx = self.distances()<EOL>F = kp * dx<EOL>if dx_tm1 is not None:<EOL><INDENT>bad = np.isnan(dx) | np.isnan(dx_tm1)<EOL>F[~bad] += (kd * (dx - dx_tm1) / self.world.dt)[~bad]<EOL><DEDENT>return F<EOL>", "docstring": "Return an array of the forces exerted by marker springs.\n\n        Notes\n        -----\n\n        The forces exerted by the marker springs can be approximated by::\n\n          F = kp * dx\n\n        where ``dx`` is the current array of marker distances. An even more\n        accurate value is computed by approximating the velocity of the spring\n        displacement::\n\n          F = kp * dx + kd * (dx - dx_tm1) / dt\n\n        where ``dx_tm1`` is an array of distances from the previous time step.\n\n        Parameters\n        ----------\n        dx_tm1 : ndarray\n            An array of distances from markers to their attachment targets,\n            measured at the previous time step.\n\n        Returns\n        -------\n        F : ndarray\n            An array of forces that the markers are exerting on the skeleton.", "id": "f14892:c0:m16"}
{"signature": "def distances(self):", "body": "distances = []<EOL>for label in self.labels:<EOL><INDENT>joint = self.joints.get(label)<EOL>distances.append([np.nan, np.nan, np.nan] if joint is None else<EOL>np.array(joint.getAnchor()) - joint.getAnchor2())<EOL><DEDENT>return np.array(distances)<EOL>", "docstring": "Get a list of the distances between markers and their attachments.\n\n        Returns\n        -------\n        distances : ndarray of shape (num-markers, 3)\n            Array of distances for each marker joint in our attachment setup. If\n            a marker does not currently have an associated joint (e.g. because\n            it is not currently visible) this will contain NaN for that row.", "id": "f14892:c0:m15"}
{"signature": "def load_c3d(self, filename, start_frame=<NUM_LIT:0>, max_frames=int(<NUM_LIT>)):", "body": "import c3d<EOL>with open(filename, '<STR_LIT:rb>') as handle:<EOL><INDENT>reader = c3d.Reader(handle)<EOL>logging.info('<STR_LIT>',<EOL><NUM_LIT:1> / self.world.dt, reader.point_rate)<EOL>self.channels = self._map_labels_to_channels([<EOL>s.strip() for s in reader.point_labels])<EOL>data = []<EOL>for i, (_, frame, _) in enumerate(reader.read_frames()):<EOL><INDENT>if i >= start_frame:<EOL><INDENT>data.append(frame[:, [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:4>]])<EOL><DEDENT>if len(data) > max_frames:<EOL><INDENT>break<EOL><DEDENT><DEDENT>self.data = np.array(data)<EOL>if reader.get('<STR_LIT>').string_value.strip().lower() == '<STR_LIT>':<EOL><INDENT>logging.info('<STR_LIT>')<EOL>self.data[:, :, :<NUM_LIT:3>] /= <NUM_LIT><EOL><DEDENT><DEDENT>logging.info('<STR_LIT>', filename, self.data.shape)<EOL>self.process_data()<EOL>self.create_bodies()<EOL>", "docstring": "Load marker data from a C3D file.\n\n        The file will be imported using the c3d module, which must be installed\n        to use this method. (``pip install c3d``)\n\n        Parameters\n        ----------\n        filename : str\n            Name of the C3D file to load.\n        start_frame : int, optional\n            Discard the first N frames. Defaults to 0.\n        max_frames : int, optional\n            Maximum number of frames to load. Defaults to loading all frames.", "id": "f14892:c0:m8"}
{"signature": "def process_data(self):", "body": "self.visibility = self.data[:, :, <NUM_LIT:3>]<EOL>self.positions = self.data[:, :, :<NUM_LIT:3>]<EOL>self.velocities = np.zeros_like(self.positions) + <NUM_LIT:1000><EOL>for frame_no in range(<NUM_LIT:1>, len(self.data) - <NUM_LIT:1>):<EOL><INDENT>prev = self.data[frame_no - <NUM_LIT:1>]<EOL>next = self.data[frame_no + <NUM_LIT:1>]<EOL>for c in range(self.num_markers):<EOL><INDENT>if -<NUM_LIT:1> < prev[c, <NUM_LIT:3>] < <NUM_LIT:100> and -<NUM_LIT:1> < next[c, <NUM_LIT:3>] < <NUM_LIT:100>:<EOL><INDENT>self.velocities[frame_no, c] = (<EOL>next[c, :<NUM_LIT:3>] - prev[c, :<NUM_LIT:3>]) / (<NUM_LIT:2> * self.world.dt)<EOL><DEDENT><DEDENT><DEDENT>self.cfms = np.zeros_like(self.visibility) + self.DEFAULT_CFM<EOL>", "docstring": "Process data to produce velocity and dropout information.", "id": "f14892:c0:m9"}
{"signature": "def _step_to_marker_frame(self, frame_no, dt=None):", "body": "<EOL>self.markers.detach()<EOL>self.markers.reposition(frame_no)<EOL>self.markers.attach(frame_no)<EOL>self.ode_space.collide(None, self.on_collision)<EOL>states = self.skeleton.get_body_states()<EOL>self.skeleton.set_body_states(states)<EOL>yield states<EOL>self.ode_world.step(dt or self.dt)<EOL>self.ode_contactgroup.empty()<EOL>", "docstring": "Update the simulator to a specific frame of marker data.\n\n        This method returns a generator of body states for the skeleton! This\n        generator must be exhausted (e.g., by consuming this call in a for loop)\n        for the simulator to work properly.\n\n        This process involves the following steps:\n\n        - Move the markers to their new location:\n          - Detach from the skeleton\n          - Update marker locations\n          - Reattach to the skeleton\n        - Detect ODE collisions\n        - Yield the states of the bodies in the skeleton\n        - Advance the ODE world one step\n\n        Parameters\n        ----------\n        frame_no : int\n            Step to this frame of marker data.\n        dt : float, optional\n            Step with this time duration. Defaults to ``self.dt``.\n\n        Returns\n        -------\n        states : sequence of state tuples\n            A generator of a sequence of one body state for the skeleton. This\n            generator must be exhausted for the simulation to work properly.", "id": "f14892:c1:m6"}
{"signature": "def inverse_kinematics(self, start=<NUM_LIT:0>, end=<NUM_LIT>, states=None, max_force=<NUM_LIT:20>):", "body": "zeros = None<EOL>if max_force > <NUM_LIT:0>:<EOL><INDENT>self.skeleton.enable_motors(max_force)<EOL>zeros = np.zeros(self.skeleton.num_dofs)<EOL><DEDENT>for _ in self.follow_markers(start, end, states):<EOL><INDENT>if zeros is not None:<EOL><INDENT>self.skeleton.set_target_angles(zeros)<EOL><DEDENT>yield self.skeleton.joint_angles<EOL><DEDENT>", "docstring": "Follow a set of marker data, yielding kinematic joint angles.\n\n        Parameters\n        ----------\n        start : int, optional\n            Start following marker data after this frame. Defaults to 0.\n        end : int, optional\n            Stop following marker data after this frame. Defaults to the end of\n            the marker data.\n        states : list of body states, optional\n            If given, set the states of the skeleton bodies to these values\n            before starting to follow the marker data.\n        max_force : float, optional\n            Allow each degree of freedom in the skeleton to exert at most this\n            force when attempting to maintain its equilibrium position. This\n            defaults to 20N. Set this value higher to simulate a stiff skeleton\n            while following marker data.\n\n        Returns\n        -------\n        angles : sequence of angle frames\n            Returns a generator of joint angle data for the skeleton. One set of\n            joint angles will be generated for each frame of marker data between\n            `start` and `end`.", "id": "f14892:c1:m7"}
{"signature": "def sccs_bit_sync(y,Ns):", "body": "<EOL>rx_symb_d = np.zeros(int(np.fix(len(y)/Ns)))<EOL>track = np.zeros(int(np.fix(len(y)/Ns)))<EOL>bit_count = -<NUM_LIT:1><EOL>y_abs = np.zeros(len(y))<EOL>clk = np.zeros(len(y))<EOL>k = Ns+<NUM_LIT:1> <EOL>for i in range(len(y)):<EOL><INDENT>if i >= Ns: <EOL><INDENT>y_abs[i] = np.abs(np.sum(y[i-Ns+<NUM_LIT:1>:i+<NUM_LIT:1>]))<EOL>if (k == <NUM_LIT:0>):<EOL><INDENT>w_hat = y_abs[i-<NUM_LIT:2>:i+<NUM_LIT:1>]<EOL>bit_count += <NUM_LIT:1><EOL>if w_hat[<NUM_LIT:1>] != <NUM_LIT:0>:<EOL><INDENT>if w_hat[<NUM_LIT:0>] < w_hat[<NUM_LIT:2>]:<EOL><INDENT>k = Ns-<NUM_LIT:1><EOL>clk[i-<NUM_LIT:2>] = <NUM_LIT:1><EOL>rx_symb_d[bit_count] = y[i-<NUM_LIT:2>-int(np.round(Ns/<NUM_LIT:2>))-<NUM_LIT:1>]<EOL><DEDENT>elif w_hat[<NUM_LIT:0>] > w_hat[<NUM_LIT:2>]:<EOL><INDENT>k = Ns+<NUM_LIT:1><EOL>clk[i] = <NUM_LIT:1><EOL>rx_symb_d[bit_count] = y[i-int(np.round(Ns/<NUM_LIT:2>))-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>k = Ns<EOL>clk[i-<NUM_LIT:1>] = <NUM_LIT:1><EOL>rx_symb_d[bit_count] = y[i-<NUM_LIT:1>-int(np.round(Ns/<NUM_LIT:2>))-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>k = Ns<EOL>clk[i-<NUM_LIT:1>] = <NUM_LIT:1><EOL>rx_symb_d[bit_count] = y[i-<NUM_LIT:1>-int(np.round(Ns/<NUM_LIT:2>))]<EOL><DEDENT>track[bit_count] = np.mod(i,Ns)<EOL><DEDENT><DEDENT>k -= <NUM_LIT:1><EOL><DEDENT>rx_symb_d = rx_symb_d[:bit_count]<EOL>return rx_symb_d, clk, track<EOL>", "docstring": "rx_symb_d,clk,track = sccs_bit_sync(y,Ns)\n\n//////////////////////////////////////////////////////\n Symbol synchronization algorithm using SCCS\n//////////////////////////////////////////////////////\n     y = baseband NRZ data waveform\n    Ns = nominal number of samples per symbol\n\nReworked from ECE 5675 Project\nTranslated from m-code version\nMark Wickert April 2014", "id": "f14895:m6"}
{"signature": "def wav2complex(filename):", "body": "fs, x_LR_cols = ss.from_wav(filename)<EOL>x = x_LR_cols[:,<NUM_LIT:0>] + <NUM_LIT>*x_LR_cols[:,<NUM_LIT:1>]<EOL>return fs,x<EOL>", "docstring": "Return a complex signal vector from a wav file that was used to store\nthe real (I) and imaginary (Q) values of a complex signal ndarray. \nThe rate is included as means of recalling the original signal sample \nrate.\n\nfs,x = wav2complex(filename)\n\nMark Wickert April 2014", "id": "f14895:m10"}
{"signature": "def complex2wav(filename,rate,x):", "body": "x_wav = np.hstack((np.array([x.real]).T,np.array([x.imag]).T))<EOL>ss.to_wav(filename, rate, x_wav)<EOL>print('<STR_LIT>')<EOL>", "docstring": "Save a complex signal vector to a wav file for compact binary\nstorage of 16-bit signal samples. The wav left and right channels\nare used to save real (I) and imaginary (Q) values. The rate is\njust a convent way of documenting the original signal sample rate.\n\ncomplex2wav(filename,rate,x)\n\nMark Wickert April 2014", "id": "f14895:m9"}
{"signature": "def fsk_BEP(rx_data,m,flip):", "body": "Nbits = len(rx_data)<EOL>c = dc.m_seq(m)<EOL>if flip == <NUM_LIT:1>:<EOL><INDENT>c.shape = (<NUM_LIT:1>,len(c))<EOL>c = np.fliplr(c).flatten()<EOL><DEDENT>L = int(np.ceil(Nbits/float(len(c))))<EOL>tx_data = np.dot(c.reshape(len(c),<NUM_LIT:1>),np.ones((<NUM_LIT:1>,L)))<EOL>tx_data = tx_data.T.reshape((<NUM_LIT:1>,len(c)*L)).flatten()<EOL>tx_data = tx_data[:Nbits]<EOL>tx_data = <NUM_LIT:2>*tx_data - <NUM_LIT:1><EOL>Bit_count,Bit_errors = dc.BPSK_BEP(rx_data,tx_data)<EOL>print('<STR_LIT>' % (len(rx_data),len(tx_data)))<EOL>Pe = Bit_errors/float(Bit_count)<EOL>print('<STR_LIT>')<EOL>print('<STR_LIT>' % Bit_errors)<EOL>print('<STR_LIT>' % Bit_count)<EOL>print('<STR_LIT>' % Pe)<EOL>print('<STR_LIT>')<EOL>", "docstring": "fsk_BEP(rx_data,m,flip)\n\nEstimate the BEP of the data bits recovered\nby the RTL-SDR Based FSK Receiver.\n\nThe reference m-sequence generated in Python\nwas found to produce sequences running in the opposite\ndirection relative to the m-sequences generated by the\nmbed. To allow error detection the reference m-sequence\nis flipped.\n\nMark Wickert April 2014", "id": "f14895:m7"}
{"signature": "def cyclic_encoder(self,x,G='<STR_LIT>'):", "body": "<EOL>if(len(x) % self.k or len(x) < self.k):<EOL><INDENT>raise ValueError('<STR_LIT>' %self.k)<EOL><DEDENT>if(np.dtype(x[<NUM_LIT:0>]) != int):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>Num_blocks = int(len(x) / self.k)<EOL>codewords = np.zeros((Num_blocks,self.n),dtype=int)<EOL>x = np.reshape(x,(Num_blocks,self.k))<EOL>for p in range(Num_blocks):<EOL><INDENT>S = np.zeros(len(self.G))<EOL>codeword = np.zeros(self.n)<EOL>current_block = x[p,:]<EOL>for i in range(<NUM_LIT:0>,self.n):<EOL><INDENT>if(i < self.k):<EOL><INDENT>S[<NUM_LIT:0>] = current_block[i]<EOL>S0temp = <NUM_LIT:0><EOL>for m in range(<NUM_LIT:0>,len(self.G)):<EOL><INDENT>if(self.G[m] == '<STR_LIT:1>'):<EOL><INDENT>S0temp = S0temp + S[m]<EOL><DEDENT><DEDENT>S0temp = S0temp % <NUM_LIT:2><EOL>S = np.roll(S,<NUM_LIT:1>)<EOL>codeword[i] = current_block[i]<EOL>S[<NUM_LIT:1>] = S0temp<EOL><DEDENT>else:<EOL><INDENT>out = <NUM_LIT:0><EOL>for m in range(<NUM_LIT:1>,len(self.G)):<EOL><INDENT>if(self.G[m] == '<STR_LIT:1>'):<EOL><INDENT>out = out + S[m]<EOL><DEDENT><DEDENT>codeword[i] = out % <NUM_LIT:2><EOL>S = np.roll(S,<NUM_LIT:1>)<EOL>S[<NUM_LIT:1>] = <NUM_LIT:0><EOL><DEDENT><DEDENT>codewords[p,:] = codeword<EOL><DEDENT>codewords = np.reshape(codewords,np.size(codewords))<EOL>return codewords.astype(int)<EOL>", "docstring": "Encodes input bit array x using cyclic block code.\n\nparameters\n----------\nx: vector of source bits to be encoded by block encoder. Numpy array\n   of integers expected.\n\nreturns\n-------\ncodewords: vector of code words generated from input vector\n\nAndrew Smit November 2018", "id": "f14896:c1:m1"}
{"signature": "def block_single_error_Pb_bound(j,SNRdB,coded=True,M=<NUM_LIT:2>):", "body": "Pb = np.zeros_like(SNRdB)<EOL>Ps = np.zeros_like(SNRdB)<EOL>SNR = <NUM_LIT>**(SNRdB/<NUM_LIT>)<EOL>n = <NUM_LIT:2>**j-<NUM_LIT:1><EOL>k = n-j<EOL>for i,SNRn in enumerate(SNR):<EOL><INDENT>if coded: <EOL><INDENT>if M == <NUM_LIT:2>:<EOL><INDENT>Ps[i] = Q_fctn(np.sqrt(k*<NUM_LIT>*SNRn/n))<EOL><DEDENT>else:<EOL><INDENT>Ps[i] = <NUM_LIT>/np.log2(M)*(<NUM_LIT:1> - <NUM_LIT:1>/np.sqrt(M))*np.gaussQ(np.sqrt(<NUM_LIT:3>*np.log2(M)/(M-<NUM_LIT:1>)*SNRn))/k<EOL><DEDENT><DEDENT>else: <EOL><INDENT>if M == <NUM_LIT:2>:<EOL><INDENT>Pb[i] = Q_fctn(np.sqrt(<NUM_LIT>*SNRn))<EOL><DEDENT>else:<EOL><INDENT>Pb[i] = <NUM_LIT>/np.log2(M)*(<NUM_LIT:1> - <NUM_LIT:1>/np.sqrt(M))*np.gaussQ(np.sqrt(<NUM_LIT:3>*np.log2(M)/(M-<NUM_LIT:1>)*SNRn))<EOL><DEDENT><DEDENT><DEDENT>if coded:<EOL><INDENT>Pb = ser2ber(M,n,<NUM_LIT:3>,<NUM_LIT:1>,Ps)<EOL><DEDENT>return Pb<EOL>", "docstring": "Finds the bit error probability bounds according to Ziemer and Tranter \npage 656.\n\nparameters:\n-----------\nj: number of parity bits used in single error correction block code\nSNRdB: Eb/N0 values in dB\ncoded: Select single error correction code (True) or uncoded (False)\nM: modulation order\n\nreturns:\n--------\nPb: bit error probability bound", "id": "f14896:m1"}
{"signature": "def hamm_gen(self,j):", "body": "if(j < <NUM_LIT:3>):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>n = <NUM_LIT:2>**j-<NUM_LIT:1><EOL>k = n-j<EOL>G = np.zeros((k,n),dtype=int)<EOL>H = np.zeros((j,n),dtype=int)<EOL>P = np.zeros((j,k),dtype=int)<EOL>R = np.zeros((k,n),dtype=int)<EOL>for i in range(<NUM_LIT:1>,n+<NUM_LIT:1>):<EOL><INDENT>b = list(binary(i,j))<EOL>for m in range(<NUM_LIT:0>,len(b)):<EOL><INDENT>b[m] = int(b[m])<EOL><DEDENT>H[:,i-<NUM_LIT:1>] = np.array(b)<EOL><DEDENT>H1 = np.zeros((<NUM_LIT:1>,j),dtype=int)<EOL>H2 = np.zeros((<NUM_LIT:1>,j),dtype=int)<EOL>for i in range(<NUM_LIT:0>,j):<EOL><INDENT>idx1 = <NUM_LIT:2>**i-<NUM_LIT:1><EOL>idx2 = n-i-<NUM_LIT:1><EOL>H1[<NUM_LIT:0>,:] = H[:,idx1]<EOL>H2[<NUM_LIT:0>,:] = H[:,idx2]<EOL>H[:,idx1] = H2<EOL>H[:,idx2] = H1<EOL><DEDENT>P = H[:,:k]<EOL>G[:,:k] = np.diag(np.ones(k))<EOL>G[:,k:] = P.T<EOL>R[:,:k] = np.diag(np.ones(k))<EOL>return G, H, R, n, k<EOL>", "docstring": "Generates parity check matrix (H) and generator\nmatrix (G). \n\nParameters\n----------\nj: Number of Hamming code parity bits with n = 2^j-1 and k = n-j\n\nreturns\n-------\nG: Systematic generator matrix with left-side identity matrix\nH: Systematic parity-check matrix with right-side identity matrix\nR: k x k identity matrix\nn: number of total bits/block\nk: number of source bits/block\n\nAndrew Smit November 2018", "id": "f14896:c0:m1"}
{"signature": "def hard_Pk(k,R,SNR,M=<NUM_LIT:2>):", "body": "k = int(k)<EOL>if M == <NUM_LIT:2>:<EOL><INDENT>p = Q_fctn(np.sqrt(<NUM_LIT>*R*SNR))<EOL><DEDENT>else:<EOL><INDENT>p = <NUM_LIT>/np.log2(M)*(<NUM_LIT:1> - <NUM_LIT:1.>/np.sqrt(M))*Q_fctn(np.sqrt(<NUM_LIT:3>*R*np.log2(M)/float(M-<NUM_LIT:1>)*SNR))<EOL><DEDENT>Pk = <NUM_LIT:0><EOL>if np.mod(k,<NUM_LIT:2>) == <NUM_LIT:0>:<EOL><INDENT>for e in range(int(k/<NUM_LIT:2>+<NUM_LIT:1>),int(k+<NUM_LIT:1>)):<EOL><INDENT>Pk += float(factorial(k))/(factorial(e)*factorial(k-e))*p**e*(<NUM_LIT:1>-p)**(k-e);<EOL><DEDENT>Pk += <NUM_LIT:1.>/<NUM_LIT:2>*float(factorial(k))/(factorial(int(k/<NUM_LIT:2>))*factorial(int(k-k/<NUM_LIT:2>)))*p**(k/<NUM_LIT:2>)*(<NUM_LIT:1>-p)**(k/<NUM_LIT:2>);<EOL><DEDENT>elif np.mod(k,<NUM_LIT:2>) == <NUM_LIT:1>:<EOL><INDENT>for e in range(int((k+<NUM_LIT:1>)//<NUM_LIT:2>),int(k+<NUM_LIT:1>)):<EOL><INDENT>Pk += factorial(k)/(factorial(e)*factorial(k-e))*p**e*(<NUM_LIT:1>-p)**(k-e);<EOL><DEDENT><DEDENT>return Pk<EOL>", "docstring": "Pk = hard_Pk(k,R,SNR)\n\nCalculates Pk as found in Ziemer & Peterson eq. 7-12, p.505\n\nMark Wickert and Andrew Smit 2018", "id": "f14897:m2"}
{"signature": "def conv_Pb_bound(R,dfree,Ck,SNRdB,hard_soft,M=<NUM_LIT:2>):", "body": "Pb = np.zeros_like(SNRdB)<EOL>SNR = <NUM_LIT>**(SNRdB/<NUM_LIT>)<EOL>for n,SNRn in enumerate(SNR):<EOL><INDENT>for k in range(dfree,len(Ck)+dfree):<EOL><INDENT>if hard_soft == <NUM_LIT:0>: <EOL><INDENT>Pb[n] += Ck[k-dfree]*hard_Pk(k,R,SNRn,M)<EOL><DEDENT>elif hard_soft == <NUM_LIT:1>: <EOL><INDENT>Pb[n] += Ck[k-dfree]*soft_Pk(k,R,SNRn,M)<EOL><DEDENT>else: <EOL><INDENT>if M == <NUM_LIT:2>:<EOL><INDENT>Pb[n] = Q_fctn(np.sqrt(<NUM_LIT>*SNRn))<EOL><DEDENT>else:<EOL><INDENT>Pb[n] = <NUM_LIT>/np.log2(M)*(<NUM_LIT:1> - <NUM_LIT:1>/np.sqrt(M))*np.gaussQ(np.sqrt(<NUM_LIT:3>*np.log2(M)/(M-<NUM_LIT:1>)*SNRn));<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return Pb<EOL>", "docstring": "Coded bit error probabilty\n\nConvolution coding bit error probability upper bound\naccording to Ziemer & Peterson 7-16, p. 507\n\nMark Wickert November 2014\n\nParameters\n----------\nR: Code rate\ndfree: Free distance of the code\nCk: Weight coefficient\nSNRdB: Signal to noise ratio in dB\nhard_soft: 0 hard, 1 soft, 2 uncoded\nM: M-ary\n\nExamples\n--------\n>>> import numpy as np\n>>> from sk_dsp_comm import fec_conv as fec\n>>> import matplotlib.pyplot as plt\n>>> SNRdB = np.arange(2,12,.1)\n>>> Pb = fec.conv_Pb_bound(1./2,10,[36, 0, 211, 0, 1404, 0, 11633],SNRdB,2)\n>>> Pb_1_2 = fec.conv_Pb_bound(1./2,10,[36, 0, 211, 0, 1404, 0, 11633],SNRdB,1)\n>>> Pb_3_4 = fec.conv_Pb_bound(3./4,4,[164, 0, 5200, 0, 151211, 0, 3988108],SNRdB,1)\n>>> plt.semilogy(SNRdB,Pb)\n>>> plt.semilogy(SNRdB,Pb_1_2)\n>>> plt.semilogy(SNRdB,Pb_3_4)\n>>> plt.axis([2,12,1e-7,1e0])\n>>> plt.xlabel(r'$E_b/N_0$ (dB)')\n>>> plt.ylabel(r'Symbol Error Probability')\n>>> plt.legend(('Uncoded BPSK','R=1/2, K=7, Soft','R=3/4 (punc), K=7, Soft'),loc='best')\n>>> plt.grid();\n>>> plt.show()\n\nNotes\n-----\nThe code rate R is given by :math:`R_{s} = \\\\frac{k}{n}`.\nMark Wickert and Andrew Smit 2018", "id": "f14897:m1"}
{"signature": "def binary(num, length=<NUM_LIT:8>):", "body": "return format(num, '<STR_LIT>'.format(length))<EOL>", "docstring": "Format an integer to binary without the leading '0b'", "id": "f14897:m0"}
{"signature": "def conv_encoder(self,input,state):", "body": "output = []<EOL>if(self.rate == Fraction(<NUM_LIT:1>,<NUM_LIT:2>)):<EOL><INDENT>for n in range(len(input)):<EOL><INDENT>u1 = int(input[n])<EOL>u2 = int(input[n])<EOL>for m in range(<NUM_LIT:1>,self.constraint_length):<EOL><INDENT>if int(self.G_polys[<NUM_LIT:0>][m]) == <NUM_LIT:1>: <EOL><INDENT>u1 = u1 ^ int(state[m-<NUM_LIT:1>])<EOL><DEDENT>if int(self.G_polys[<NUM_LIT:1>][m]) == <NUM_LIT:1>: <EOL><INDENT>u2 = u2 ^ int(state[m-<NUM_LIT:1>])<EOL><DEDENT><DEDENT>output = np.hstack((output, [u1, u2]))<EOL>state = bin(int(input[n]))[-<NUM_LIT:1>] + state[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>elif(self.rate == Fraction(<NUM_LIT:1>,<NUM_LIT:3>)):<EOL><INDENT>for n in range(len(input)):<EOL><INDENT>if(int(self.G_polys[<NUM_LIT:0>][<NUM_LIT:0>]) == <NUM_LIT:1>):<EOL><INDENT>u1 = int(input[n])<EOL><DEDENT>else:<EOL><INDENT>u1 = <NUM_LIT:0><EOL><DEDENT>if(int(self.G_polys[<NUM_LIT:1>][<NUM_LIT:0>]) == <NUM_LIT:1>):<EOL><INDENT>u2 = int(input[n])<EOL><DEDENT>else:<EOL><INDENT>u2 = <NUM_LIT:0><EOL><DEDENT>if(int(self.G_polys[<NUM_LIT:2>][<NUM_LIT:0>]) == <NUM_LIT:1>):<EOL><INDENT>u3 = int(input[n])<EOL><DEDENT>else:<EOL><INDENT>u3 = <NUM_LIT:0><EOL><DEDENT>for m in range(<NUM_LIT:1>,self.constraint_length):<EOL><INDENT>if int(self.G_polys[<NUM_LIT:0>][m]) == <NUM_LIT:1>: <EOL><INDENT>u1 = u1 ^ int(state[m-<NUM_LIT:1>])<EOL><DEDENT>if int(self.G_polys[<NUM_LIT:1>][m]) == <NUM_LIT:1>: <EOL><INDENT>u2 = u2 ^ int(state[m-<NUM_LIT:1>])<EOL><DEDENT>if int(self.G_polys[<NUM_LIT:2>][m]) == <NUM_LIT:1>: <EOL><INDENT>u3 = u3 ^ int(state[m-<NUM_LIT:1>])<EOL><DEDENT><DEDENT>output = np.hstack((output, [u1, u2, u3]))<EOL>state = bin(int(input[n]))[-<NUM_LIT:1>] + state[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>return output, state<EOL>", "docstring": "output, state = conv_encoder(input,state)\nWe get the 1/2 or 1/3 rate from self.rate\nPolys G1 and G2 are entered as binary strings, e.g,\nG1 = '111' and G2 = '101' for K = 3\nG1 = '1011011' and G2 = '1111001' for K = 7\nG3 is also included for rate 1/3\nInput state as a binary string of length K-1, e.g., '00' or '0000000' \ne.g., state = '00' for K = 3\ne.g., state = '000000' for K = 7\nMark Wickert and Andrew Smit 2018", "id": "f14897:c3:m3"}
{"signature": "def __init__(self,G = ('<STR_LIT>','<STR_LIT>'), Depth = <NUM_LIT:10>):", "body": "self.G_polys = G<EOL>self.constraint_length = len(self.G_polys[<NUM_LIT:0>]) <EOL>self.Nstates = <NUM_LIT:2>**(self.constraint_length-<NUM_LIT:1>) <EOL>self.decision_depth = Depth<EOL>self.input_zero = trellis_nodes(self.Nstates)<EOL>self.input_one = trellis_nodes(self.Nstates)<EOL>self.paths = trellis_paths(self.Nstates,self.decision_depth)<EOL>self.rate = Fraction(<NUM_LIT:1>,len(G))<EOL>if(len(G) == <NUM_LIT:2> or len(G) == <NUM_LIT:3>):<EOL><INDENT>print('<STR_LIT>' %(self.rate))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL>raise ValueError('<STR_LIT>')<EOL>pass<EOL><DEDENT>for m in range(self.Nstates):<EOL><INDENT>self.input_zero.fn[m] = m<EOL>self.input_one.fn[m] = m<EOL>output0,state0 = self.conv_encoder([<NUM_LIT:0>],<EOL>binary(m,self.constraint_length-<NUM_LIT:1>))<EOL>output1,state1 = self.conv_encoder([<NUM_LIT:1>],<EOL>binary(m,self.constraint_length-<NUM_LIT:1>))<EOL>self.input_zero.tn[m] = int(state0,<NUM_LIT:2>)<EOL>self.input_one.tn[m] = int(state1,<NUM_LIT:2>)<EOL>if(self.rate == Fraction(<NUM_LIT:1>,<NUM_LIT:2>)):<EOL><INDENT>self.input_zero.out_bits[m] = <NUM_LIT:2>*output0[<NUM_LIT:0>] + output0[<NUM_LIT:1>]<EOL>self.input_one.out_bits[m] = <NUM_LIT:2>*output1[<NUM_LIT:0>] + output1[<NUM_LIT:1>]<EOL><DEDENT>elif(self.rate == Fraction(<NUM_LIT:1>,<NUM_LIT:3>)):<EOL><INDENT>self.input_zero.out_bits[m] = <NUM_LIT:4>*output0[<NUM_LIT:0>] + <NUM_LIT:2>*output0[<NUM_LIT:1>] + output0[<NUM_LIT:2>]<EOL>self.input_one.out_bits[m] = <NUM_LIT:4>*output1[<NUM_LIT:0>] + <NUM_LIT:2>*output1[<NUM_LIT:1>] + output1[<NUM_LIT:2>]<EOL><DEDENT><DEDENT>self.branches = trellis_branches(self.Nstates)<EOL>for m in range(self.Nstates):<EOL><INDENT>match_zero_idx = np.where(self.input_zero.tn == m)<EOL>match_one_idx = np.where(self.input_one.tn == m)<EOL>if len(match_zero_idx[<NUM_LIT:0>]) != <NUM_LIT:0>:<EOL><INDENT>self.branches.states1[m] = self.input_zero.fn[match_zero_idx[<NUM_LIT:0>][<NUM_LIT:0>]]<EOL>self.branches.states2[m] = self.input_zero.fn[match_zero_idx[<NUM_LIT:0>][<NUM_LIT:1>]]<EOL>self.branches.bits1[m] = self.input_zero.out_bits[match_zero_idx[<NUM_LIT:0>][<NUM_LIT:0>]]<EOL>self.branches.bits2[m] = self.input_zero.out_bits[match_zero_idx[<NUM_LIT:0>][<NUM_LIT:1>]]<EOL>self.branches.input1[m] = <NUM_LIT:0><EOL>self.branches.input2[m] = <NUM_LIT:0><EOL><DEDENT>elif len(match_one_idx[<NUM_LIT:0>]) != <NUM_LIT:0>:<EOL><INDENT>self.branches.states1[m] = self.input_one.fn[match_one_idx[<NUM_LIT:0>][<NUM_LIT:0>]]<EOL>self.branches.states2[m] = self.input_one.fn[match_one_idx[<NUM_LIT:0>][<NUM_LIT:1>]]<EOL>self.branches.bits1[m] = self.input_one.out_bits[match_one_idx[<NUM_LIT:0>][<NUM_LIT:0>]]<EOL>self.branches.bits2[m] = self.input_one.out_bits[match_one_idx[<NUM_LIT:0>][<NUM_LIT:1>]]<EOL>self.branches.input1[m] = <NUM_LIT:1><EOL>self.branches.input2[m] = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>", "docstring": "cc1 = fec_conv(G = ('111','101'), Depth = 10)\nInstantiate a Rate 1/2 or Rate 1/3 convolutional \ncoder/decoder object. Polys G1 and G2 are entered \nas binary strings, e.g,\n\nRate 1/2\nG1 = '111' and G2 = '101' for K = 3 and\nG1 = '1111001' and G2 = '1011011' for K = 7.\n\nRate 1/3\nG1 = '111', G2 = '011' and G3 = '101' for K = 3 and\nG1 = '1111001', G2 = '1100101' and G3 = '1011011'\nfor K= 7\n\nThe rate will automatically be selected by the number\nof G polynomials (only rate 1/2 and 1/3 are available)\n\nViterbi decoding has a decision depth of Depth.\n\nData structures than manage the VA are created \nupon instantiation via the __init__ method.\n\nOther ideal polynomial considerations (taken from\n\"Introduction to Digital Communication\" Second Edition\nby Ziemer and Peterson:\n\nRate 1/2\nK=3 ('111','101')\nK=4 ('1111','1101')\nK=5 ('11101','10011')\nK=6 ('111101','101011')\nK=7 ('1111001','1011011')\nK=8 ('11111001','10100111')\nK=9 ('111101011','101110001')\n\nRate 1/3\nK=3 ('111','111','101')\nK=4 ('1111','1101','1011')\nK=5 ('11111','11011','10101')\nK=6 ('111101','101011','100111')\nK=7 ('1111001','1100101','1011011')\nK=8 ('11110111','11011001','10010101')\n\nMark Wickert and Andrew Smit October 2018", "id": "f14897:c3:m0"}
{"signature": "def in_out_check(self):", "body": "devices = available_devices()<EOL>if not self.in_idx in devices:<EOL><INDENT>raise OSError(\"<STR_LIT>\")<EOL><DEDENT>in_check = devices[self.in_idx]<EOL>if not self.out_idx in devices:<EOL><INDENT>raise OSError(\"<STR_LIT>\")<EOL><DEDENT>out_check = devices[self.out_idx]<EOL>if((in_check['<STR_LIT>'] == <NUM_LIT:0>) and (out_check['<STR_LIT>']==<NUM_LIT:0>)):<EOL><INDENT>raise StandardError('<STR_LIT>')<EOL><DEDENT>elif(in_check['<STR_LIT>'] == <NUM_LIT:0>):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif(out_check['<STR_LIT>'] == <NUM_LIT:0>):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return True<EOL>", "docstring": "Checks the input and output to see if they are valid", "id": "f14898:c0:m1"}
{"signature": "def pack_LR(self,left_out,right_out):", "body": "for i in range(<NUM_LIT:0>,self.frame_length*<NUM_LIT:2>):<EOL><INDENT>if i % <NUM_LIT:2>:<EOL><INDENT>self.out[i] = right_out[(int)(i/<NUM_LIT:2>)]<EOL><DEDENT>else:<EOL><INDENT>self.out[i] = left_out[(int)(i/<NUM_LIT:2>)]<EOL><DEDENT><DEDENT>return self.out<EOL>", "docstring": "Packs separate left and right channel data into one array to output\nand returns the output.\n\nParameters\n----------\nleft_out : left channel array of samples going to output\nright_out : right channel array of samples going to output\n\nReturns\n-------\nout : packed left and right channel array of samples", "id": "f14898:c0:m14"}
{"signature": "def thread_stream(self,Tsec = <NUM_LIT:2>,numChan = <NUM_LIT:1>):", "body": "def stream_thread(time,channel):<EOL><INDENT>self.stream(Tsec=time,numChan = channel)<EOL><DEDENT>t = Thread(target=stream_thread, args=(Tsec,numChan,))<EOL>t.start()<EOL>", "docstring": "Stream audio in a thread using callback. The stream is threaded, so widgets can be\nused simultaneously during stream.\n\nParameters\n----------\n\nTsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite \nmode. When in infinite mode, Tsec.stop() can be used to stop the stream.\n\nnumChan : number of channels. Use 1 for mono and 2 for stereo.", "id": "f14898:c0:m4"}
{"signature": "def available_devices():", "body": "devices = {}<EOL>pA = pyaudio.PyAudio()<EOL>device_string = str()<EOL>for k in range(pA.get_device_count()):<EOL><INDENT>dev = pA.get_device_info_by_index(k)<EOL>devices[k] = {'<STR_LIT:name>': dev['<STR_LIT:name>'], '<STR_LIT>': dev['<STR_LIT>'], '<STR_LIT>': dev['<STR_LIT>']}<EOL>device_string += '<STR_LIT>' %(k,dev['<STR_LIT:name>'],dev['<STR_LIT>'],dev['<STR_LIT>'])<EOL><DEDENT>logger.debug(device_string)<EOL>return devices<EOL>", "docstring": "Display available input and output audio devices along with their\nport indices.\n\n:return:  Dictionary whose keys are the device index, the number of inputs and outputs, and their names.\n:rtype: dict", "id": "f14898:m0"}
{"signature": "def __init__(self,x,start_offset = <NUM_LIT:0>):", "body": "self.n_chan = x.ndim<EOL>if self.n_chan == <NUM_LIT:2>:<EOL><INDENT>if x.shape[<NUM_LIT:1>] != <NUM_LIT:2>:<EOL><INDENT>x = x.T<EOL><DEDENT><DEDENT>self.x = x<EOL>self.x_len = x.shape[<NUM_LIT:0>]<EOL>self.loop_pointer = start_offset<EOL>", "docstring": "Create a 1D or 2D array for audio looping", "id": "f14898:c1:m0"}
{"signature": "def get_LR(self,in_data):", "body": "for i in range(<NUM_LIT:0>,self.frame_length*<NUM_LIT:2>):<EOL><INDENT>if i % <NUM_LIT:2>:<EOL><INDENT>self.right_in[(int)(i/<NUM_LIT:2>)] = in_data[i]<EOL><DEDENT>else:<EOL><INDENT>self.left_in[(int)(i/<NUM_LIT:2>)] = in_data[i]<EOL><DEDENT><DEDENT>return self.left_in, self.right_in<EOL>", "docstring": "Splits incoming packed stereo data into separate left and right channels\nand returns an array of left samples and an array of right samples\n\nParameters\n----------\nin_data : input data from the streaming object in the callback function. \n\nReturns\n-------\nleft_in : array of incoming left channel samples\nright_in : array of incoming right channel samples", "id": "f14898:c0:m13"}
{"signature": "def cb_active_plot(self,start_ms,stop_ms,line_color='<STR_LIT:b>'):", "body": "<EOL>k_min_idx = np.nonzero(np.ravel(np.array(self.DSP_tic)*<NUM_LIT:1000> < start_ms))[<NUM_LIT:0>]<EOL>if len(k_min_idx) < <NUM_LIT:1>:<EOL><INDENT>k_min = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>k_min = k_min_idx[-<NUM_LIT:1>]<EOL><DEDENT>k_max_idx = np.nonzero(np.ravel(np.array(self.DSP_tic)*<NUM_LIT:1000> > stop_ms))[<NUM_LIT:0>]<EOL>if len(k_min_idx) < <NUM_LIT:1>:<EOL><INDENT>k_max= len(self.DSP_tic)<EOL><DEDENT>else:<EOL><INDENT>k_max = k_max_idx[<NUM_LIT:0>]<EOL><DEDENT>for k in range(k_min,k_max):<EOL><INDENT>if k == <NUM_LIT:0>:<EOL><INDENT>plt.plot([<NUM_LIT:0>,self.DSP_tic[k]*<NUM_LIT:1000>,self.DSP_tic[k]*<NUM_LIT:1000>,<EOL>self.DSP_toc[k]*<NUM_LIT:1000>,self.DSP_toc[k]*<NUM_LIT:1000>],<EOL>[<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:1>,<NUM_LIT:0>],'<STR_LIT:b>')<EOL><DEDENT>else:<EOL><INDENT>plt.plot([self.DSP_toc[k-<NUM_LIT:1>]*<NUM_LIT:1000>,self.DSP_tic[k]*<NUM_LIT:1000>,self.DSP_tic[k]*<NUM_LIT:1000>,<EOL>self.DSP_toc[k]*<NUM_LIT:1000>,self.DSP_toc[k]*<NUM_LIT:1000>],[<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:1>,<NUM_LIT:0>],'<STR_LIT:b>')<EOL><DEDENT><DEDENT>plt.plot([self.DSP_toc[k_max-<NUM_LIT:1>]*<NUM_LIT:1000>,stop_ms],[<NUM_LIT:0>,<NUM_LIT:0>],'<STR_LIT:b>')<EOL>plt.xlim([start_ms,stop_ms])<EOL>plt.title(r'<STR_LIT>')<EOL>plt.ylabel(r'<STR_LIT>')<EOL>plt.xlabel(r'<STR_LIT>')<EOL>plt.grid()<EOL>", "docstring": "Plot timing information of time spent in the callback. This is similar\nto what a logic analyzer provides when probing an interrupt.\n\ncb_active_plot( start_ms,stop_ms,line_color='b')", "id": "f14898:c0:m12"}
{"signature": "def DSP_capture_add_samples(self,new_data):", "body": "self.capture_sample_count += len(new_data)<EOL>if self.Tcapture > <NUM_LIT:0>:<EOL><INDENT>self.data_capture = np.hstack((self.data_capture,new_data))<EOL>if (self.Tcapture > <NUM_LIT:0>) and (len(self.data_capture) > self.Ncapture):<EOL><INDENT>self.data_capture = self.data_capture[-self.Ncapture:]<EOL><DEDENT><DEDENT>", "docstring": "Append new samples to the data_capture array and increment the sample counter\nIf length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0 \nthen new values are not appended to the data_capture array.", "id": "f14898:c0:m7"}
{"signature": "def interactive_stream(self,Tsec = <NUM_LIT:2>, numChan = <NUM_LIT:1>):", "body": "self.Tsec = Tsec<EOL>self.numChan = numChan<EOL>self.interactiveFG = <NUM_LIT:1><EOL>self.play = interactive(self.interaction,Stream = ToggleButtons(<EOL>options=['<STR_LIT>', '<STR_LIT>'],<EOL>description = '<STR_LIT:U+0020>',<EOL>value = '<STR_LIT>') )<EOL>display(self.play)<EOL>", "docstring": "Stream audio with start and stop radio buttons\n\nInteractive stream is designed for streaming audio through this object using\na callback function. This stream is threaded, so it can be used with ipywidgets.\nClick on the \"Start Streaming\" button to start streaming and click on \"Stop Streaming\"\nbutton to stop streaming.\n\nParameters\n----------\n\nTsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite \nmode. When in infinite mode, the \"Stop Streaming\" radio button or Tsec.stop() can be \nused to stop the stream.\n\nnumChan : number of channels. Use 1 for mono and 2 for stereo.", "id": "f14898:c0:m3"}
{"signature": "def stream_stats(self):", "body": "Tp = self.frame_length/float(self.fs)*<NUM_LIT:1000><EOL>print('<STR_LIT>'% (self.DSP_tic[<NUM_LIT:0>]*<NUM_LIT:1000>,))<EOL>print('<STR_LIT>' % Tp)<EOL>Tmp_mean = np.mean(np.diff(np.array(self.DSP_tic))[<NUM_LIT:1>:]*<NUM_LIT:1000>)<EOL>print('<STR_LIT>' % Tmp_mean)<EOL>Tprocess_mean = np.mean(np.array(self.DSP_toc)-np.array(self.DSP_tic))*<NUM_LIT:1000><EOL>print('<STR_LIT>' % Tprocess_mean)<EOL>", "docstring": "Display basic statistics of callback execution: ideal period \nbetween callbacks, average measured period between callbacks,\nand average time spent in the callback.", "id": "f14898:c0:m11"}
{"signature": "def DSP_capture_add_samples_stereo(self,new_data_left,new_data_right):", "body": "self.capture_sample_count = self.capture_sample_count + len(new_data_left) + len(new_data_right)<EOL>if self.Tcapture > <NUM_LIT:0>:<EOL><INDENT>self.data_capture_left = np.hstack((self.data_capture_left,new_data_left))<EOL>self.data_capture_right = np.hstack((self.data_capture_right,new_data_right))<EOL>if (len(self.data_capture_left) > self.Ncapture):<EOL><INDENT>self.data_capture_left = self.data_capture_left[-self.Ncapture:]<EOL><DEDENT>if (len(self.data_capture_right) > self.Ncapture):<EOL><INDENT>self.data_capture_right = self.data_capture_right[-self.Ncapture:]<EOL><DEDENT><DEDENT>", "docstring": "Append new samples to the data_capture_left array and the data_capture_right\narray and increment the sample counter. If length reaches Tcapture, then the \nnewest samples will be kept. If Tcapture = 0 then new values are not appended \nto the data_capture array.", "id": "f14898:c0:m8"}
{"signature": "def CA_code_header(fname_out, Nca):", "body": "dir_path = os.path.dirname(os.path.realpath(__file__))<EOL>ca = loadtxt(dir_path + '<STR_LIT>', dtype=int16, usecols=(Nca - <NUM_LIT:1>,), unpack=True)<EOL>M = <NUM_LIT>  <EOL>N = <NUM_LIT>  <EOL>Sca = '<STR_LIT>' + str(Nca)<EOL>f = open(fname_out, '<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>' % M)<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>');<EOL>f.write('<STR_LIT>'% Nca);<EOL>f.write('<STR_LIT>' % Nca)<EOL>kk = <NUM_LIT:0>;<EOL>for k in range(M):<EOL><INDENT>if (kk < N - <NUM_LIT:1>) and (k < M - <NUM_LIT:1>):<EOL><INDENT>f.write('<STR_LIT>' % ca[k])<EOL>kk += <NUM_LIT:1><EOL><DEDENT>elif (kk == N - <NUM_LIT:1>) & (k < M - <NUM_LIT:1>):<EOL><INDENT>f.write('<STR_LIT>' % ca[k])<EOL>if k < M:<EOL><INDENT>if Nca < <NUM_LIT:10>:<EOL><INDENT>f.write('<STR_LIT:U+0020>')<EOL><DEDENT>else:<EOL><INDENT>f.write('<STR_LIT:U+0020>')<EOL><DEDENT>kk = <NUM_LIT:0><EOL><DEDENT><DEDENT>else:<EOL><INDENT>f.write('<STR_LIT>' % ca[k])<EOL><DEDENT><DEDENT>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.close()<EOL>", "docstring": "Write 1023 bit CA (Gold) Code Header Files\n\nMark Wickert February 2015", "id": "f14899:m4"}
{"signature": "def freqz_resp_list(b, a=np.array([<NUM_LIT:1>]), mode='<STR_LIT>', fs=<NUM_LIT:1.0>, Npts=<NUM_LIT>, fsize=(<NUM_LIT:6>, <NUM_LIT:4>)):", "body": "if type(b) == list:<EOL><INDENT>N_filt = len(b)<EOL><DEDENT>f = np.arange(<NUM_LIT:0>, Npts) / (<NUM_LIT> * Npts)<EOL>for n in range(N_filt):<EOL><INDENT>w, H = signal.freqz(b[n], a[n], <NUM_LIT:2> * np.pi * f)<EOL>if n == <NUM_LIT:0>:<EOL><INDENT>plt.figure(figsize=fsize)<EOL><DEDENT>if mode.lower() == '<STR_LIT>':<EOL><INDENT>plt.plot(f * fs, <NUM_LIT:20> * np.log10(np.abs(H)))<EOL>if n == N_filt - <NUM_LIT:1>:<EOL><INDENT>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT><DEDENT>elif mode.lower() == '<STR_LIT>':<EOL><INDENT>plt.plot(f * fs, np.angle(H))<EOL>if n == N_filt - <NUM_LIT:1>:<EOL><INDENT>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT><DEDENT>elif (mode.lower() == '<STR_LIT>') or (mode.lower() == '<STR_LIT>'):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>theta = np.unwrap(np.angle(H))<EOL>theta2 = np.unwrap(<NUM_LIT:2> * theta) / <NUM_LIT><EOL>theta_dif = np.diff(theta2)<EOL>f_diff = np.diff(f)<EOL>Tg = -np.diff(theta2) / np.diff(w)<EOL>idx = pylab.find(<NUM_LIT:20> * np.log10(H[:-<NUM_LIT:1>]) < -<NUM_LIT>)<EOL>Tg[idx] = np.zeros(len(idx))<EOL>max_Tg = np.max(Tg)<EOL>if mode.lower() == '<STR_LIT>':<EOL><INDENT>max_Tg /= fs<EOL>plt.plot(f[:-<NUM_LIT:1>] * fs, Tg / fs)<EOL>plt.ylim([<NUM_LIT:0>, <NUM_LIT> * max_Tg])<EOL><DEDENT>else:<EOL><INDENT>plt.plot(f[:-<NUM_LIT:1>] * fs, Tg)<EOL>plt.ylim([<NUM_LIT:0>, <NUM_LIT> * max_Tg])<EOL><DEDENT>if n == N_filt - <NUM_LIT:1>:<EOL><INDENT>plt.xlabel('<STR_LIT>')<EOL>if mode.lower() == '<STR_LIT>':<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>plt.title('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>s1 = '<STR_LIT>'<EOL>s2 = '<STR_LIT>'<EOL>print(s1 + s2)<EOL><DEDENT><DEDENT>", "docstring": "A method for displaying digital filter frequency response magnitude,\nphase, and group delay. A plot is produced using matplotlib\n\nfreq_resp(self,mode = 'dB',Npts = 1024)\n\nA method for displaying the filter frequency response magnitude,\nphase, and group delay. A plot is produced using matplotlib\n\nfreqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))\n\nParameters\n----------\nb : ndarray of numerator coefficients\na : ndarray of denominator coefficents\nmode : display mode: 'dB' magnitude, 'phase' in radians, or\n        'groupdelay_s' in samples and 'groupdelay_t' in sec, \n        all versus frequency in Hz\nNpts : number of points to plot; default is 1024\nfsize : figure size; defult is (6,4) inches\n\nMark Wickert, January 2015", "id": "f14899:m3"}
{"signature": "def FIR_fix_header(fname_out, h):", "body": "M = len(h)<EOL>hq = int16(rint(h * <NUM_LIT:2> ** <NUM_LIT:15>))<EOL>N = <NUM_LIT:8>  <EOL>f = open(fname_out, '<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>' % M)<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>');<EOL>f.write('<STR_LIT>');<EOL>f.write('<STR_LIT>')<EOL>kk = <NUM_LIT:0>;<EOL>for k in range(M):<EOL><INDENT>if (kk < N - <NUM_LIT:1>) and (k < M - <NUM_LIT:1>):<EOL><INDENT>f.write('<STR_LIT>' % hq[k])<EOL>kk += <NUM_LIT:1><EOL><DEDENT>elif (kk == N - <NUM_LIT:1>) & (k < M - <NUM_LIT:1>):<EOL><INDENT>f.write('<STR_LIT>' % hq[k])<EOL>if k < M:<EOL><INDENT>f.write('<STR_LIT:U+0020>')<EOL>kk = <NUM_LIT:0><EOL><DEDENT><DEDENT>else:<EOL><INDENT>f.write('<STR_LIT>' % hq[k])<EOL><DEDENT><DEDENT>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.close()<EOL>", "docstring": "Write FIR Fixed-Point Filter Header Files \n\nMark Wickert February 2015", "id": "f14899:m1"}
{"signature": "def IIR_sos_header(fname_out, SOS_mat):", "body": "Ns, Mcol = SOS_mat.shape<EOL>f = open(fname_out, '<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>' % Ns)<EOL>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>');<EOL>f.write('<STR_LIT>');<EOL>f.write('<STR_LIT>' % (<NUM_LIT:5> * Ns))<EOL>for k in range(Ns):<EOL><INDENT>if (k < Ns - <NUM_LIT:1>):<EOL><INDENT>f.write('<STR_LIT>' %(SOS_mat[k, <NUM_LIT:0>], SOS_mat[k, <NUM_LIT:1>], SOS_mat[k, <NUM_LIT:2>]))<EOL>f.write('<STR_LIT>' %(-SOS_mat[k, <NUM_LIT:4>], -SOS_mat[k, <NUM_LIT:5>]))<EOL><DEDENT>else:<EOL><INDENT>f.write('<STR_LIT>' %(SOS_mat[k, <NUM_LIT:0>], SOS_mat[k, <NUM_LIT:1>], SOS_mat[k, <NUM_LIT:2>]))<EOL>f.write('<STR_LIT>' %(-SOS_mat[k, <NUM_LIT:4>], -SOS_mat[k, <NUM_LIT:5>]))<EOL><DEDENT><DEDENT>f.write('<STR_LIT>')<EOL>f.write('<STR_LIT>')<EOL>f.close()<EOL>", "docstring": "Write IIR SOS Header Files\nFile format is compatible with CMSIS-DSP IIR \nDirectform II Filter Functions\n\nMark Wickert March 2015-October 2016", "id": "f14899:m2"}
{"signature": "def IIR_lpf(f_pass, f_stop, Ripple_pass, Atten_stop, <EOL>fs = <NUM_LIT>, ftype = '<STR_LIT>'):", "body": "b,a = signal.iirdesign(<NUM_LIT:2>*float(f_pass)/fs, <NUM_LIT:2>*float(f_stop)/fs,<EOL>Ripple_pass, Atten_stop,<EOL>ftype = ftype, output='<STR_LIT>')<EOL>sos = signal.iirdesign(<NUM_LIT:2>*float(f_pass)/fs, <NUM_LIT:2>*float(f_stop)/fs,<EOL>Ripple_pass, Atten_stop,<EOL>ftype = ftype, output='<STR_LIT>')<EOL>tag = '<STR_LIT>' + ftype + '<STR_LIT>'<EOL>print('<STR_LIT>' % (tag,len(a)-<NUM_LIT:1>))<EOL>return b, a, sos<EOL>", "docstring": "Design an IIR lowpass filter using scipy.signal.iirdesign. \nThe filter order is determined based on \nf_pass Hz, f_stop Hz, and the desired stopband attenuation\nd_stop in dB, all relative to a sampling rate of fs Hz.\n\nParameters\n----------\nf_pass : Passband critical frequency in Hz\nf_stop : Stopband critical frequency in Hz\nRipple_pass : Filter gain in dB at f_pass\nAtten_stop : Filter attenuation in dB at f_stop\nfs : Sampling rate in Hz\nftype : Analog prototype from 'butter' 'cheby1', 'cheby2',\n        'ellip', and 'bessel'\n\nReturns\n-------\nb : ndarray of the numerator coefficients\na : ndarray of the denominator coefficients\nsos : 2D ndarray of second-order section coefficients\n\nNotes\n-----\nAdditionally a text string telling the user the filter order is\nwritten to the console, e.g., IIR cheby1 order = 8.\n\nExamples\n--------\n>>> fs = 48000\n>>> f_pass = 5000\n>>> f_stop = 8000\n>>> b_but,a_but,sos_but = IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter')\n>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1')\n>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2')\n>>> b_elli,a_elli,sos_elli = IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip')\n\n\nMark Wickert October 2016", "id": "f14906:m0"}
{"signature": "def sos_cascade(sos1,sos2):", "body": "return np.vstack((sos1,sos2))<EOL>", "docstring": "Mark Wickert October 2016", "id": "f14906:m8"}
{"signature": "def sos_zplane(sos,auto_scale=True,size=<NUM_LIT:2>,tol = <NUM_LIT>):", "body": "Ns,Mcol = sos.shape<EOL>N_roots = []<EOL>for k in range(Ns):<EOL><INDENT>N_roots_tmp = np.roots(sos[k,:<NUM_LIT:3>])<EOL>if N_roots_tmp[<NUM_LIT:1>] == <NUM_LIT:0.>:<EOL><INDENT>N_roots = np.hstack((N_roots,N_roots_tmp[<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>N_roots = np.hstack((N_roots,N_roots_tmp))<EOL><DEDENT><DEDENT>D_roots = []<EOL>for k in range(Ns):<EOL><INDENT>D_roots_tmp = np.roots(sos[k,<NUM_LIT:3>:])<EOL>if D_roots_tmp[<NUM_LIT:1>] == <NUM_LIT:0.>:<EOL><INDENT>D_roots = np.hstack((D_roots,D_roots_tmp[<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>D_roots = np.hstack((D_roots,D_roots_tmp))<EOL><DEDENT><DEDENT>x_scale = <NUM_LIT>*size<EOL>y_scale = <NUM_LIT>*size   <EOL>x_off = <NUM_LIT><EOL>y_off = <NUM_LIT><EOL>M = len(N_roots)<EOL>N = len(D_roots)<EOL>if auto_scale:<EOL><INDENT>if M > <NUM_LIT:0> and N > <NUM_LIT:0>:<EOL><INDENT>size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+<NUM_LIT><EOL><DEDENT>elif M > <NUM_LIT:0>:<EOL><INDENT>size = max(np.max(np.abs(N_roots)),<NUM_LIT:1.0>)+<NUM_LIT><EOL><DEDENT>elif N > <NUM_LIT:0>:<EOL><INDENT>size = max(<NUM_LIT:1.0>,np.max(np.abs(D_roots)))+<NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>size = <NUM_LIT><EOL><DEDENT><DEDENT>plt.figure(figsize=(<NUM_LIT:5>,<NUM_LIT:5>))<EOL>plt.axis('<STR_LIT>')<EOL>r = np.linspace(<NUM_LIT:0>,<NUM_LIT:2>*np.pi,<NUM_LIT:200>)<EOL>plt.plot(np.cos(r),np.sin(r),'<STR_LIT>')<EOL>plt.plot([-size,size],[<NUM_LIT:0>,<NUM_LIT:0>],'<STR_LIT>')<EOL>plt.plot([<NUM_LIT:0>,<NUM_LIT:0>],[-size,size],'<STR_LIT>')<EOL>if M > <NUM_LIT:0>:<EOL><INDENT>N_uniq, N_mult=unique_cpx_roots(N_roots,tol=tol)<EOL>plt.plot(np.real(N_uniq),np.imag(N_uniq),'<STR_LIT>',mfc='<STR_LIT:None>',ms=<NUM_LIT:8>)<EOL>idx_N_mult = np.nonzero(np.ravel(N_mult><NUM_LIT:1>))[<NUM_LIT:0>]<EOL>for k in range(len(idx_N_mult)):<EOL><INDENT>x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale<EOL>y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale<EOL>plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),<EOL>ha='<STR_LIT>',va='<STR_LIT>',fontsize=<NUM_LIT:10>)<EOL><DEDENT><DEDENT>if N > <NUM_LIT:0>:<EOL><INDENT>D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol)<EOL>plt.plot(np.real(D_uniq),np.imag(D_uniq),'<STR_LIT>',ms=<NUM_LIT:8>)<EOL>idx_D_mult = np.nonzero(np.ravel(D_mult><NUM_LIT:1>))[<NUM_LIT:0>]<EOL>for k in range(len(idx_D_mult)):<EOL><INDENT>x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale<EOL>y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale<EOL>plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),<EOL>ha='<STR_LIT>',va='<STR_LIT>',fontsize=<NUM_LIT:10>)            <EOL><DEDENT><DEDENT>if M - N < <NUM_LIT:0>:<EOL><INDENT>plt.plot(<NUM_LIT:0.0>,<NUM_LIT:0.0>,'<STR_LIT>',mfc='<STR_LIT:None>',ms=<NUM_LIT:8>)<EOL><DEDENT>elif M - N > <NUM_LIT:0>:<EOL><INDENT>plt.plot(<NUM_LIT:0.0>,<NUM_LIT:0.0>,'<STR_LIT>',ms=<NUM_LIT:8>)<EOL><DEDENT>if abs(M - N) > <NUM_LIT:1>:<EOL><INDENT>plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),<EOL>ha='<STR_LIT>',va='<STR_LIT>',fontsize=<NUM_LIT:10>)        <EOL><DEDENT>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL>plt.axis([-size,size,-size,size])<EOL>return M,N<EOL>", "docstring": "Create an z-plane pole-zero plot.\n\nCreate an z-plane pole-zero plot using the numerator\nand denominator z-domain system function coefficient\nndarrays b and a respectively. Assume descending powers of z.\n\nParameters\n----------\nsos : ndarray of the sos coefficients\nauto_scale : bool (default True)\nsize : plot radius maximum when scale = False\n\nReturns\n-------\n(M,N) : tuple of zero and pole counts + plot window\n\nNotes\n-----\nThis function tries to identify repeated poles and zeros and will \nplace the multiplicity number above and to the right of the pole or zero.\nThe difficulty is setting the tolerance for this detection. Currently it\nis set at 1e-3 via the function signal.unique_roots.\n\nExamples\n--------\n>>> # Here the plot is generated using auto_scale\n>>> sos_zplane(sos)\n>>> # Here the plot is generated using manual scaling\n>>> sos_zplane(sos,False,1.5)", "id": "f14906:m9"}
{"signature": "def freqz_resp_cas_list(sos,mode = '<STR_LIT>',fs=<NUM_LIT:1.0>,Npts = <NUM_LIT>,fsize=(<NUM_LIT:6>,<NUM_LIT:4>)):", "body": "if type(sos) == list:<EOL><INDENT>N_filt = len(sos)<EOL><DEDENT>f = np.arange(<NUM_LIT:0>,Npts)/(<NUM_LIT>*Npts)<EOL>for n in range(N_filt):<EOL><INDENT>w,H = freqz_cas(sos[n],<NUM_LIT:2>*np.pi*f)<EOL>if n == <NUM_LIT:0>:<EOL><INDENT>plt.figure(figsize=fsize)<EOL><DEDENT>if mode.lower() == '<STR_LIT>':<EOL><INDENT>plt.plot(f*fs,<NUM_LIT:20>*np.log10(np.abs(H)))<EOL>if n == N_filt-<NUM_LIT:1>:<EOL><INDENT>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT><DEDENT>elif mode.lower() == '<STR_LIT>':<EOL><INDENT>plt.plot(f*fs,np.angle(H))<EOL>if n == N_filt-<NUM_LIT:1>:<EOL><INDENT>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT><DEDENT>elif (mode.lower() == '<STR_LIT>') or (mode.lower() == '<STR_LIT>'):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>theta = np.unwrap(np.angle(H))<EOL>theta2 = np.unwrap(<NUM_LIT:2>*theta)/<NUM_LIT><EOL>theta_dif = np.diff(theta2)<EOL>f_diff = np.diff(f)<EOL>Tg = -np.diff(theta2)/np.diff(w)<EOL>idx = np.nonzero(np.ravel(<NUM_LIT:20>*np.log10(H[:-<NUM_LIT:1>]) < -<NUM_LIT>))[<NUM_LIT:0>]<EOL>Tg[idx] = np.zeros(len(idx))<EOL>max_Tg = np.max(Tg)<EOL>if mode.lower() == '<STR_LIT>':<EOL><INDENT>max_Tg /= fs<EOL>plt.plot(f[:-<NUM_LIT:1>]*fs,Tg/fs)<EOL>plt.ylim([<NUM_LIT:0>,<NUM_LIT>*max_Tg])<EOL><DEDENT>else:<EOL><INDENT>plt.plot(f[:-<NUM_LIT:1>]*fs,Tg)<EOL>plt.ylim([<NUM_LIT:0>,<NUM_LIT>*max_Tg])<EOL><DEDENT>if n == N_filt-<NUM_LIT:1>:<EOL><INDENT>plt.xlabel('<STR_LIT>')<EOL>if mode.lower() == '<STR_LIT>':<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>plt.title('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>s1 = '<STR_LIT>'<EOL>s2 = '<STR_LIT>'<EOL>print(s1 + s2)<EOL><DEDENT><DEDENT>", "docstring": "A method for displaying cascade digital filter form frequency response \nmagnitude, phase, and group delay. A plot is produced using matplotlib\n\nfreq_resp(self,mode = 'dB',Npts = 1024)\n\nA method for displaying the filter frequency response magnitude,\nphase, and group delay. A plot is produced using matplotlib\n\nfreqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))\n\n    b = ndarray of numerator coefficients\n    a = ndarray of denominator coefficents\n mode = display mode: 'dB' magnitude, 'phase' in radians, or \n        'groupdelay_s' in samples and 'groupdelay_t' in sec, \n        all versus frequency in Hz\n Npts = number of points to plot; default is 1024\nfsize = figure size; defult is (6,4) inches\n\nMark Wickert, January 2015", "id": "f14906:m6"}
{"signature": "def IIR_hpf(f_stop, f_pass, Ripple_pass, Atten_stop, <EOL>fs = <NUM_LIT>, ftype = '<STR_LIT>'):", "body": "b,a = signal.iirdesign(<NUM_LIT:2>*float(f_pass)/fs, <NUM_LIT:2>*float(f_stop)/fs,<EOL>Ripple_pass, Atten_stop,<EOL>ftype = ftype, output='<STR_LIT>')<EOL>sos = signal.iirdesign(<NUM_LIT:2>*float(f_pass)/fs, <NUM_LIT:2>*float(f_stop)/fs,<EOL>Ripple_pass, Atten_stop,<EOL>ftype =ftype, output='<STR_LIT>')<EOL>tag = '<STR_LIT>' + ftype + '<STR_LIT>'<EOL>print('<STR_LIT>' % (tag,len(a)-<NUM_LIT:1>))<EOL>return b, a, sos<EOL>", "docstring": "Design an IIR highpass filter using scipy.signal.iirdesign. \nThe filter order is determined based on \nf_pass Hz, f_stop Hz, and the desired stopband attenuation\nd_stop in dB, all relative to a sampling rate of fs Hz.\n\nParameters\n----------\nf_stop : \nf_pass : \nRipple_pass : \nAtten_stop : \nfs : sampling rate in Hz\nftype : Analog prototype from 'butter' 'cheby1', 'cheby2',\n        'ellip', and 'bessel'\n\nReturns\n-------\nb : ndarray of the numerator coefficients\na : ndarray of the denominator coefficients\nsos : 2D ndarray of second-order section coefficients\n\nExamples\n--------\n>>> fs = 48000\n>>> f_pass = 8000\n>>> f_stop = 5000\n>>> b_but,a_but,sos_but = IIR_hpf(f_stop,f_pass,0.5,60,fs,'butter')\n>>> b_cheb1,a_cheb1,sos_cheb1 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby1')\n>>> b_cheb2,a_cheb2,sos_cheb2 = IIR_hpf(f_stop,f_pass,0.5,60,fs,'cheby2')\n>>> b_elli,a_elli,sos_elli = IIR_hpf(f_stop,f_pass,0.5,60,fs,'ellip')\n\nMark Wickert October 2016", "id": "f14906:m1"}
{"signature": "def PLL_cbb(x,fs,loop_type,Kv,fn,zeta):", "body": "T = <NUM_LIT:1>/float(fs)<EOL>Kv = <NUM_LIT:2>*np.pi*Kv <EOL>if loop_type == <NUM_LIT:1>:<EOL><INDENT>K = <NUM_LIT:2>*np.pi*fn <EOL><DEDENT>elif loop_type == <NUM_LIT:2>:<EOL><INDENT>K = <NUM_LIT:4> *np.pi*zeta*fn <EOL>tau2 = zeta/(np.pi*fn)<EOL><DEDENT>elif loop_type == <NUM_LIT:3>:<EOL><INDENT>K = Kv <EOL>tau1 = K/((<NUM_LIT:2>*np.pi*fn)^<NUM_LIT:2>);<EOL>tau2 = <NUM_LIT:2>*zeta/(<NUM_LIT:2>*np.pi*fn)*(<NUM_LIT:1> - <NUM_LIT:2>*np.pi*fn/K*<NUM_LIT:1>/(<NUM_LIT:2>*zeta))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>filt_in_last = <NUM_LIT:0>; filt_out_last = <NUM_LIT:0>;<EOL>vco_in_last = <NUM_LIT:0>; vco_out = <NUM_LIT:0>; vco_out_last = <NUM_LIT:0>;<EOL>vco_out_cbb = <NUM_LIT:0><EOL>n = np.arange(len(x))<EOL>theta_hat = np.zeros(len(x))<EOL>ev = np.zeros(len(x))<EOL>phi = np.zeros(len(x))<EOL>for k in  range(len(n)):<EOL><INDENT>phi[k] = np.imag(x[k] * np.conj(vco_out_cbb))<EOL>pd_out = phi[k]<EOL>gain_out = K/Kv*pd_out <EOL>if loop_type == <NUM_LIT:2>:<EOL><INDENT>filt_in = (<NUM_LIT:1>/tau2)*gain_out<EOL>filt_out = filt_out_last + T/<NUM_LIT:2>*(filt_in + filt_in_last)<EOL>filt_in_last = filt_in<EOL>filt_out_last = filt_out<EOL>filt_out = filt_out + gain_out<EOL><DEDENT>elif loop_type == <NUM_LIT:3>:<EOL><INDENT>filt_in = (tau2/tau1)*gain_out - (<NUM_LIT:1>/tau1)*filt_out_last<EOL>u3 = filt_in + (<NUM_LIT:1>/tau2)*filt_out_last<EOL>filt_out = filt_out_last + T/<NUM_LIT:2>*(filt_in + filt_in_last)<EOL>filt_in_last = filt_in<EOL>filt_out_last = filt_out<EOL><DEDENT>else:<EOL><INDENT>filt_out = gain_out;<EOL><DEDENT>vco_in = filt_out<EOL>if loop_type == <NUM_LIT:3>:<EOL><INDENT>vco_in = u3<EOL><DEDENT>vco_out = vco_out_last + T/<NUM_LIT:2>*(vco_in + vco_in_last)<EOL>vco_in_last = vco_in<EOL>vco_out_last = vco_out<EOL>vco_out = Kv*vco_out <EOL>vco_out_cbb = np.exp(<NUM_LIT>*vco_out)<EOL>ev[k] = vco_in<EOL>theta_hat[k] = vco_out<EOL><DEDENT>return theta_hat, ev, phi<EOL>", "docstring": "Baseband Analog PLL Simulation Model\n\n:param x: input phase deviation in radians\n:param fs: sampling rate in sample per second or Hz\n:param loop_type: 1, first-order loop filter F(s)=K_LF; 2, integrator\n            with lead compensation F(s) = (1 + s tau2)/(s tau1),\n            i.e., a type II, or 3, lowpass with lead compensation\n            F(s) = (1 + s tau2)/(1 + s tau1)\n:param Kv: VCO gain in Hz/v; note presently assume Kp = 1v/rad\n            and K_LF = 1; the user can easily change this\n:param fn: Loop natural frequency (loops 2 & 3) or cutoff\n            frequency (loop 1)\n:param zeta: Damping factor for loops 2 & 3\n:return: theta_hat = Output phase estimate of the input theta in radians,\n         ev = VCO control voltage,\n         phi = phase error = theta - theta_hat\n\nMark Wickert, April 2007 for ECE 5625/4625\nModified February 2008 and July 2014 for ECE 5675/4675\nPython version August 2014", "id": "f14907:m5"}
{"signature": "def PLL1(theta,fs,loop_type,Kv,fn,zeta,non_lin):", "body": "T = <NUM_LIT:1>/float(fs)<EOL>Kv = <NUM_LIT:2>*np.pi*Kv <EOL>if loop_type == <NUM_LIT:1>:<EOL><INDENT>K = <NUM_LIT:2>*np.pi*fn <EOL><DEDENT>elif loop_type == <NUM_LIT:2>:<EOL><INDENT>K = <NUM_LIT:4> *np.pi*zeta*fn <EOL>tau2 = zeta/(np.pi*fn)<EOL><DEDENT>elif loop_type == <NUM_LIT:3>:<EOL><INDENT>K = Kv <EOL>tau1 = K/((<NUM_LIT:2>*np.pi*fn)**<NUM_LIT:2>)<EOL>tau2 = <NUM_LIT:2>*zeta/(<NUM_LIT:2>*np.pi*fn)*(<NUM_LIT:1> - <NUM_LIT:2>*np.pi*fn/K*<NUM_LIT:1>/(<NUM_LIT:2>*zeta))<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>filt_in_last = <NUM_LIT:0>; filt_out_last = <NUM_LIT:0>;<EOL>vco_in_last = <NUM_LIT:0>; vco_out = <NUM_LIT:0>; vco_out_last = <NUM_LIT:0>;<EOL>n = np.arange(len(theta))<EOL>theta_hat = np.zeros_like(theta)<EOL>ev = np.zeros_like(theta)<EOL>phi = np.zeros_like(theta)<EOL>for k in  range(len(n)):<EOL><INDENT>phi[k] = theta[k] - vco_out<EOL>if non_lin == <NUM_LIT:1>:<EOL><INDENT>pd_out = np.sin(phi[k])<EOL><DEDENT>else:<EOL><INDENT>pd_out = phi[k]<EOL><DEDENT>gain_out = K/Kv*pd_out <EOL>if loop_type == <NUM_LIT:2>:<EOL><INDENT>filt_in = (<NUM_LIT:1>/tau2)*gain_out<EOL>filt_out = filt_out_last + T/<NUM_LIT:2>*(filt_in + filt_in_last)<EOL>filt_in_last = filt_in<EOL>filt_out_last = filt_out<EOL>filt_out = filt_out + gain_out<EOL><DEDENT>elif loop_type == <NUM_LIT:3>:<EOL><INDENT>filt_in = (tau2/tau1)*gain_out - (<NUM_LIT:1>/tau1)*filt_out_last<EOL>u3 = filt_in + (<NUM_LIT:1>/tau2)*filt_out_last<EOL>filt_out = filt_out_last + T/<NUM_LIT:2>*(filt_in + filt_in_last)<EOL>filt_in_last = filt_in<EOL>filt_out_last = filt_out<EOL><DEDENT>else:<EOL><INDENT>filt_out = gain_out;<EOL><DEDENT>vco_in = filt_out<EOL>if loop_type == <NUM_LIT:3>:<EOL><INDENT>vco_in = u3<EOL><DEDENT>vco_out = vco_out_last + T/<NUM_LIT:2>*(vco_in + vco_in_last)<EOL>vco_in_last = vco_in<EOL>vco_out_last = vco_out<EOL>vco_out = Kv*vco_out <EOL>ev[k] = vco_in<EOL>theta_hat[k] = vco_out<EOL><DEDENT>return theta_hat, ev, phi<EOL>", "docstring": "Baseband Analog PLL Simulation Model\n\n:param theta: input phase deviation in radians\n:param fs: sampling rate in sample per second or Hz\n:param loop_type: 1, first-order loop filter F(s)=K_LF; 2, integrator\n            with lead compensation F(s) = (1 + s tau2)/(s tau1),\n            i.e., a type II, or 3, lowpass with lead compensation\n            F(s) = (1 + s tau2)/(1 + s tau1)\n:param Kv: VCO gain in Hz/v; note presently assume Kp = 1v/rad\n            and K_LF = 1; the user can easily change this\n:param fn: Loop natural frequency (loops 2 & 3) or cutoff\n            frquency (loop 1)\n:param zeta: Damping factor for loops 2 & 3\n:param non_lin: 0, linear phase detector; 1, sinusoidal phase detector\n:return: theta_hat = Output phase estimate of the input theta in radians,\n         ev = VCO control voltage,\n         phi = phase error = theta - theta_hat\n\nNotes\n-----\nAlternate input in place of natural frequency, fn, in Hz is\nthe noise equivalent bandwidth Bn in Hz.\n\n\nMark Wickert, April 2007 for ECE 5625/4625\nModified February 2008 and July 2014 for ECE 5675/4675\nPython version August 2014", "id": "f14907:m4"}
{"signature": "def firwin_kaiser_hpf(f_stop, f_pass, d_stop, fs = <NUM_LIT:1.0>, N_bump=<NUM_LIT:0>):", "body": "<EOL>f_pass_eq = fs/<NUM_LIT> - f_pass<EOL>f_stop_eq = fs/<NUM_LIT> - f_stop<EOL>wc = <NUM_LIT:2>*np.pi*(f_pass_eq + f_stop_eq)/<NUM_LIT:2>/fs<EOL>delta_w = <NUM_LIT:2>*np.pi*(f_stop_eq - f_pass_eq)/fs<EOL>M = np.ceil((d_stop - <NUM_LIT:8>)/(<NUM_LIT>*delta_w))<EOL>M += N_bump<EOL>N_taps = M + <NUM_LIT:1><EOL>beta = signal.kaiser_beta(d_stop)<EOL>w_k = signal.kaiser(N_taps,beta)<EOL>n = np.arange(N_taps)<EOL>b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/<NUM_LIT:2>)) * w_k<EOL>b_k /= np.sum(b_k)<EOL>n = np.arange(len(b_k))<EOL>b_k *= (-<NUM_LIT:1>)**n<EOL>print('<STR_LIT>' % N_taps)<EOL>return b_k<EOL>", "docstring": "Design an FIR highpass filter using the sinc() kernel and\na Kaiser window. The filter order is determined based on \nf_pass Hz, f_stop Hz, and the desired stopband attenuation\nd_stop in dB, all relative to a sampling rate of fs Hz.\nNote: the passband ripple cannot be set independent of the\nstopband attenuation.\n\nMark Wickert October 2016", "id": "f14908:m3"}
{"signature": "def firwin_lpf(N_taps, fc, fs = <NUM_LIT:1.0>):", "body": "return signal.firwin(N_taps,<NUM_LIT:2>*fc/fs)<EOL>", "docstring": "Design a windowed FIR lowpass filter in terms of passband\ncritical frequencies f1 < f2 in Hz relative to sampling rate\nfs in Hz. The number of taps must be provided.\n\nMark Wickert October 2016", "id": "f14908:m0"}
{"signature": "def firwin_bpf(N_taps, f1, f2, fs = <NUM_LIT:1.0>, pass_zero=False):", "body": "return signal.firwin(N_taps,<NUM_LIT:2>*(f1,f2)/fs,pass_zero=pass_zero)<EOL>", "docstring": "Design a windowed FIR bandpass filter in terms of passband\ncritical frequencies f1 < f2 in Hz relative to sampling rate\nfs in Hz. The number of taps must be provided.\n\nMark Wickert October 2016", "id": "f14908:m1"}
{"signature": "def firwin_kaiser_bsf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop, <EOL>fs = <NUM_LIT:1.0>, N_bump=<NUM_LIT:0>):", "body": "<EOL>f_pass = (f_pass2 - f_pass1)/<NUM_LIT:2><EOL>f_stop = (f_stop2 - f_stop1)/<NUM_LIT:2><EOL>wc = <NUM_LIT:2>*np.pi*(f_pass + f_stop)/<NUM_LIT:2>/fs<EOL>delta_w = <NUM_LIT:2>*np.pi*(f_stop - f_pass)/fs<EOL>M = np.ceil((d_stop - <NUM_LIT:8>)/(<NUM_LIT>*delta_w))<EOL>M += N_bump<EOL>if ((M+<NUM_LIT:1>)/<NUM_LIT>-int((M+<NUM_LIT:1>)/<NUM_LIT>)) == <NUM_LIT:0>:<EOL><INDENT>M += <NUM_LIT:1><EOL><DEDENT>N_taps = M + <NUM_LIT:1><EOL>beta = signal.kaiser_beta(d_stop)<EOL>w_k = signal.kaiser(N_taps,beta)<EOL>n = np.arange(N_taps)<EOL>b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/<NUM_LIT:2>)) * w_k<EOL>b_k /= np.sum(b_k)<EOL>f0 = (f_pass2 + f_pass1)/<NUM_LIT:2><EOL>w0 = <NUM_LIT:2>*np.pi*f0/fs<EOL>n = np.arange(len(b_k))<EOL>b_k_bs = <NUM_LIT:2>*b_k*np.cos(w0*(n-M/<NUM_LIT:2>))<EOL>b_k_bs = -b_k_bs<EOL>b_k_bs[int(M/<NUM_LIT:2>)] += <NUM_LIT:1> <EOL>print('<STR_LIT>' % N_taps)<EOL>return b_k_bs<EOL>", "docstring": "Design an FIR bandstop filter using the sinc() kernel and\na Kaiser window. The filter order is determined based on \nf_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the \ndesired stopband attenuation d_stop in dB for both stopbands,\nall relative to a sampling rate of fs Hz.\nNote: The passband ripple cannot be set independent of the\nstopband attenuation.\nNote: The filter order is forced to be even (odd number of taps)\nso there is a center tap that can be used to form 1 - H_BPF.\n\nMark Wickert October 2016", "id": "f14908:m5"}
{"signature": "def bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2, dpass_dB, dstop_dB, fsamp = <NUM_LIT:1>):", "body": "dpass = <NUM_LIT:1> - <NUM_LIT:10>**(-dpass_dB/<NUM_LIT:20>)<EOL>dstop = <NUM_LIT:10>**(-dstop_dB/<NUM_LIT:20>)<EOL>Df1 = (f_pass1 - f_stop1)/fsamp<EOL>Df2 = (f_stop2 - f_pass2)/fsamp<EOL>b1 = <NUM_LIT><EOL>b2 = <NUM_LIT><EOL>b3 = -<NUM_LIT><EOL>b4 = <NUM_LIT><EOL>b5 = -<NUM_LIT><EOL>b6 = -<NUM_LIT><EOL>Df = min(Df1, Df2)<EOL>Cinf = np.log10(dstop)*(b1*np.log10(dpass)**<NUM_LIT:2> + b2*np.log10(dpass) + b3)+ (b4*np.log10(dpass)**<NUM_LIT:2> + b5*np.log10(dpass) + b6)<EOL>g = -<NUM_LIT>*np.log10(dpass/dstop) - <NUM_LIT><EOL>N = Cinf/Df + g*Df + <NUM_LIT:1><EOL>ff = <NUM_LIT:2>*np.array([<NUM_LIT:0>, f_stop1, f_pass1, f_pass2, f_stop2, fsamp/<NUM_LIT:2>])/fsamp<EOL>aa = np.array([<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>])<EOL>wts = np.array([dpass/dstop, <NUM_LIT:1>, dpass/dstop])<EOL>return int(N), ff, aa, wts<EOL>", "docstring": "Optimal FIR (equal ripple) Bandpass Order Determination\n\nText reference: Ifeachor, Digital Signal Processing a Practical Approach, \nsecond edition, Prentice Hall, 2002.\nJournal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum\nFIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp. \n204-206, April,1979.", "id": "f14908:m7"}
{"signature": "def lowpass_order(f_pass, f_stop, dpass_dB, dstop_dB, fsamp = <NUM_LIT:1>):", "body": "dpass = <NUM_LIT:1> - <NUM_LIT:10>**(-dpass_dB/<NUM_LIT:20>)<EOL>dstop = <NUM_LIT:10>**(-dstop_dB/<NUM_LIT:20>)<EOL>Df = (f_stop - f_pass)/fsamp<EOL>a1 = <NUM_LIT><EOL>a2 = <NUM_LIT><EOL>a3 = -<NUM_LIT><EOL>a4 = -<NUM_LIT><EOL>a5 = -<NUM_LIT><EOL>a6 = -<NUM_LIT><EOL>Dinf = np.log10(dstop)*(a1*np.log10(dpass)**<NUM_LIT:2> + a2*np.log10(dpass) + a3)+ (a4*np.log10(dpass)**<NUM_LIT:2> + a5*np.log10(dpass) + a6)<EOL>f = <NUM_LIT> + <NUM_LIT>*(np.log10(dpass) - np.log10(dstop))<EOL>N = Dinf/Df - f*Df + <NUM_LIT:1><EOL>ff = <NUM_LIT:2>*np.array([<NUM_LIT:0>, f_pass, f_stop, fsamp/<NUM_LIT:2>])/fsamp<EOL>aa = np.array([<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>])<EOL>wts = np.array([<NUM_LIT:1.0>, dpass/dstop])<EOL>return int(N), ff, aa, wts<EOL>", "docstring": "Optimal FIR (equal ripple) Lowpass Order Determination\n\nText reference: Ifeachor, Digital Signal Processing a Practical Approach, \nsecond edition, Prentice Hall, 2002.\nJournal paper reference: Herriman et al., Practical Design Rules for Optimum\nFinite Imulse Response Digitl Filters, Bell Syst. Tech. J., vol 52, pp. \n769-799, July-Aug., 1973.IEEE, 1973.", "id": "f14908:m6"}
{"signature": "def am_tx(m,a_mod,fc=<NUM_LIT>):", "body": "m24 = interp24(m)<EOL>t192 = np.arange(len(m24))/<NUM_LIT><EOL>m_max = np.max(np.abs(m24))<EOL>x192 = (<NUM_LIT:1> + a_mod*m24/m_max)*np.cos(<NUM_LIT:2>*np.pi*fc*t192) <EOL>return x192, t192, m24<EOL>", "docstring": "AM transmitter for Case Study of Chapter 17.\n\nAssume input is sampled at 8 Ksps and upsampling\nby 24 is performed to arrive at fs_out = 192 Ksps.\n\nParameters\n----------\nm : ndarray of the input message signal \na_mod : AM modulation index, between 0 and 1\nfc : the carrier frequency in Hz\n\nReturns\n-------\nx192 : ndarray of the upsampled by 24 and modulated carrier\nt192 : ndarray of the upsampled by 24 time axis\nm24 : ndarray of the upsampled by 24 message signal\n\nNotes\n-----\nThe sampling rate of the input signal is assumed to be 8 kHz.\n\nExamples\n--------\n>>> n = arange(0,1000)\n>>> # 1 kHz message signal\n>>> m = cos(2*pi*1000/8000.*n)\n>>> x192, t192 = am_tx(m,0.8,fc=75e3)", "id": "f14910:m45"}
{"signature": "def ex6_2(n):", "body": "x = np.zeros(len(n))<EOL>for k, nn in enumerate(n):<EOL><INDENT>if nn >= -<NUM_LIT:2> and nn <= <NUM_LIT:5>:<EOL><INDENT>x[k] = <NUM_LIT:8> - nn<EOL><DEDENT><DEDENT>return x<EOL>", "docstring": "Generate a triangle pulse as described in Example 6-2\nof Chapter 6.\n\nYou need to supply an index array n that covers at least [-2, 5]. \nThe function returns the hard-coded signal of the example.\n\nParameters\n----------\nn : time index ndarray covering at least -2 to +5.\n\nReturns\n-------\nx : ndarray of signal samples in x\n\nExamples\n--------\n>>> import numpy as np\n>>> import matplotlib.pyplot as plt\n>>> from sk_dsp_comm import sigsys as ss\n>>> n = np.arange(-5,8)\n>>> x = ss.ex6_2(n)\n>>> plt.stem(n,x) # creates a stem plot of x vs n", "id": "f14910:m4"}
{"signature": "def interp24(x):", "body": "<EOL>b2,a2 = signal.butter(<NUM_LIT:10>,<NUM_LIT:1>/<NUM_LIT>)<EOL>y1 = upsample(x,<NUM_LIT:2>)<EOL>y1 = signal.lfilter(b2,a2,<NUM_LIT:2>*y1)<EOL>b3,a3 = signal.butter(<NUM_LIT:10>,<NUM_LIT:1>/<NUM_LIT>)<EOL>y2 = upsample(y1,<NUM_LIT:3>)<EOL>y2 = signal.lfilter(b3,a3,<NUM_LIT:3>*y2)<EOL>b4,a4 = signal.butter(<NUM_LIT:10>,<NUM_LIT:1>/<NUM_LIT>)<EOL>y3 = upsample(y2,<NUM_LIT:4>)<EOL>y3 = signal.lfilter(b4,a4,<NUM_LIT:4>*y3)<EOL>return y3<EOL>", "docstring": "Interpolate by L = 24 using Butterworth filters.\n\nThe interpolation is done using three stages. Upsample by \nL = 2 and lowpass filter, upsample by 3 and lowpass filter, then\nupsample by L = 4 and lowpass filter. In all cases the lowpass\nfilter is a 10th-order Butterworth lowpass.\n\nParameters\n----------\nx : ndarray of the input signal\n\nReturns\n-------\ny : ndarray of the output signal\n\nNotes\n-----\nThe cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to \ntrack the upsampling by 2, 3, and 4 respectively.\n\nExamples\n--------\n>>> y = interp24(x)", "id": "f14910:m49"}
{"signature": "def ten_band_eq_filt(x,GdB,Q=<NUM_LIT>):", "body": "fs = <NUM_LIT> <EOL>NB = len(GdB)<EOL>if not NB == <NUM_LIT:10>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>Fc = <NUM_LIT>*<NUM_LIT:2>**np.arange(NB)<EOL>B = np.zeros((NB,<NUM_LIT:3>))<EOL>A = np.zeros((NB,<NUM_LIT:3>))<EOL>for k in range(NB):<EOL><INDENT>[b,a] = peaking(GdB[k],Fc[k],Q)<EOL>B[k,:] = b<EOL>A[k,:] = a<EOL><DEDENT>y = np.zeros(len(x))<EOL>for k in range(NB):<EOL><INDENT>if k == <NUM_LIT:0>:<EOL><INDENT>y = signal.lfilter(B[k,:],A[k,:],x)<EOL><DEDENT>else:<EOL><INDENT>y = signal.lfilter(B[k,:],A[k,:],y)<EOL><DEDENT><DEDENT>return y<EOL>", "docstring": "Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB.\n\nThe signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and\nstopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate\nis assumed to be 44.1 kHz. \n\nParameters\n----------\nx : ndarray of the input signal samples\nGdB : ndarray containing ten octave band gain values [G0dB,...,G9dB]\nQ : Quality factor vector for each of the NB peaking filters\n\nReturns\n-------\ny : ndarray of output signal samples\n\nExamples\n--------\n>>> # Test with white noise\n>>> w = randn(100000)\n>>> y = ten_band_eq_filt(x,GdB)\n>>> psd(y,2**10,44.1)", "id": "f14910:m1"}
{"signature": "def biquad2(w_num, r_num, w_den, r_den):", "body": "b = np.array([<NUM_LIT:1>, -<NUM_LIT:2>*r_num*np.cos(w_num), r_num**<NUM_LIT:2>])<EOL>a = np.array([<NUM_LIT:1>, -<NUM_LIT:2>*r_den*np.cos(w_den), r_den**<NUM_LIT:2>])<EOL>return b, a<EOL>", "docstring": "A biquadratic filter in terms of conjugate pole and zero pairs.\n\nParameters\n----------\nw_num : zero frequency (angle) in rad/sample\nr_num : conjugate zeros radius\nw_den : pole frequency (angle) in rad/sample\nr_den : conjugate poles radius; less than 1 for stability\n\nReturns\n-------\nb : ndarray of numerator coefficients\na : ndarray of denominator coefficients\n\nExamples\n--------\n>>> b,a = biquad2(pi/4., 1, pi/4., 0.95)", "id": "f14910:m56"}
{"signature": "def ft_approx(x,t,Nfft):", "body": "fs = <NUM_LIT:1>/(t[<NUM_LIT:1>] - t[<NUM_LIT:0>])<EOL>t0 = (t[-<NUM_LIT:1>]+t[<NUM_LIT:0>])/<NUM_LIT:2> <EOL>N0 = len(t)/<NUM_LIT:2> <EOL>f = np.arange(-<NUM_LIT:1.>/<NUM_LIT:2>,<NUM_LIT:1.>/<NUM_LIT:2>,<NUM_LIT:1.>/Nfft)<EOL>w, X = signal.freqz(x,<NUM_LIT:1>,<NUM_LIT:2>*np.pi*f)<EOL>X /= fs <EOL>X *= np.exp(-<NUM_LIT>*<NUM_LIT:2>*np.pi*f*fs*t0)<EOL>X *= np.exp(<NUM_LIT>*<NUM_LIT:2>*np.pi*f*N0)<EOL>F = f*fs<EOL>return F, X<EOL>", "docstring": "Approximate the Fourier transform of a finite duration signal using scipy.signal.freqz()\n\nParameters\n----------\nx : input signal array\nt : time array used to create x(t)\nNfft : the number of frdquency domain points used to \n       approximate X(f) on the interval [fs/2,fs/2], where\n       fs = 1/Dt. Dt being the time spacing in array t\n\nReturns\n-------\nf : frequency axis array in Hz\nX : the Fourier transform approximation (complex)\n\nNotes\n-----\nThe output time axis starts at the sum of the starting values in x1 and x2 \nand ends at the sum of the two ending values in x1 and x2. The default \nextents of ('f','f') are used for signals that are active (have support) \non or within n1 and n2 respectively. A right-sided signal such as \n:math:`a^n*u[n]` is semi-infinite, so it has extent 'r' and the\nconvolution output will be truncated to display only the valid results.\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> import numpy as np\n>>> import sk_dsp_comm.sigsys as ss\n>>> fs = 100 # sampling rate in Hz\n>>> tau = 1\n>>> t = np.arange(-5,5,1/fs)\n>>> x0 = ss.rect(t-.5,tau)\n>>> plt.figure(figsize=(6,5))\n>>> plt.plot(t,x0)\n>>> plt.grid()\n>>> plt.ylim([-0.1,1.1])\n>>> plt.xlim([-2,2])\n>>> plt.title(r'Exact Waveform')\n>>> plt.xlabel(r'Time (s)')\n>>> plt.ylabel(r'$x_0(t)$')\n>>> plt.show()\n\n>>> # FT Exact Plot\n>>> import matplotlib.pyplot as plt\n>>> import numpy as np\n>>> import sk_dsp_comm.sigsys as ss\n>>> fs = 100 # sampling rate in Hz\n>>> tau = 1\n>>> t = np.arange(-5,5,1/fs)\n>>> x0 = ss.rect(t-.5,tau)\n>>> fe = np.arange(-10,10,.01)\n>>> X0e = tau*np.sinc(fe*tau)\n>>> plt.plot(fe,abs(X0e))\n>>> #plot(f,angle(X0))\n>>> plt.grid()\n>>> plt.xlim([-10,10])\n>>> plt.title(r'Exact (Theory) Spectrum Magnitude')\n>>> plt.xlabel(r'Frequency (Hz)')\n>>> plt.ylabel(r'$|X_0e(f)|$')\n>>> plt.show()\n\n>>> # FT Approximation Plot\n>>> import matplotlib.pyplot as plt\n>>> import numpy as np\n>>> import sk_dsp_comm.sigsys as ss\n>>> fs = 100 # sampling rate in Hz\n>>> tau = 1\n>>> t = np.arange(-5,5,1/fs)\n>>> x0 = ss.rect(t-.5,tau)\n>>> f,X0 = ss.ft_approx(x0,t,4096)\n>>> plt.plot(f,abs(X0))\n>>> #plt.plot(f,angle(X0))\n>>> plt.grid()\n>>> plt.xlim([-10,10])\n>>> plt.title(r'Approximation Spectrum Magnitude')\n>>> plt.xlabel(r'Frequency (Hz)')\n>>> plt.ylabel(r'$|X_0(f)|$');\n>>> plt.tight_layout()\n>>> plt.show()", "id": "f14910:m23"}
{"signature": "def line_spectra(fk,Xk,mode,sides=<NUM_LIT:2>,linetype='<STR_LIT:b>',lwidth=<NUM_LIT:2>,floor_dB=-<NUM_LIT:100>,fsize=(<NUM_LIT:6>,<NUM_LIT:4>)):", "body": "plt.figure(figsize=fsize)<EOL>idx = np.nonzero(Xk)[<NUM_LIT:0>]<EOL>Xk = Xk[idx]<EOL>fk = fk[idx]<EOL>if mode == '<STR_LIT>':<EOL><INDENT>for k in range(len(fk)):<EOL><INDENT>if fk[k] == <NUM_LIT:0> and sides == <NUM_LIT:2>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[<NUM_LIT:0>, np.abs(Xk[k])],linetype, linewidth=lwidth)<EOL><DEDENT>elif fk[k] == <NUM_LIT:0> and sides == <NUM_LIT:1>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[<NUM_LIT:0>, np.abs(Xk[k])],linetype, linewidth=<NUM_LIT:2>*lwidth)            <EOL><DEDENT>elif fk[k] > <NUM_LIT:0> and sides == <NUM_LIT:2>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[<NUM_LIT:0>, np.abs(Xk[k])],linetype, linewidth=lwidth)<EOL>plt.plot([-fk[k], -fk[k]],[<NUM_LIT:0>, np.abs(Xk[k])],linetype, linewidth=lwidth)<EOL><DEDENT>elif fk[k] > <NUM_LIT:0> and sides == <NUM_LIT:1>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[<NUM_LIT:0>, <NUM_LIT>*np.abs(Xk[k])],linetype, linewidth=lwidth)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>plt.grid()<EOL>if sides == <NUM_LIT:2>:<EOL><INDENT>plt.axis([-<NUM_LIT>*max(fk), <NUM_LIT>*max(fk), <NUM_LIT:0>, <NUM_LIT>*max(abs(Xk))])        <EOL><DEDENT>elif sides == <NUM_LIT:1>:<EOL><INDENT>plt.axis([<NUM_LIT:0>, <NUM_LIT>*max(fk), <NUM_LIT:0>, <NUM_LIT>*<NUM_LIT:2>*max(abs(Xk))])<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>plt.ylabel('<STR_LIT>')<EOL>plt.xlabel('<STR_LIT>')<EOL><DEDENT>elif mode == '<STR_LIT>':<EOL><INDENT>Xk_dB = <NUM_LIT:20>*np.log10(np.abs(Xk))<EOL>for k in range(len(fk)):<EOL><INDENT>if fk[k] == <NUM_LIT:0> and sides == <NUM_LIT:2>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)<EOL><DEDENT>elif fk[k] == <NUM_LIT:0> and sides == <NUM_LIT:1>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=<NUM_LIT:2>*lwidth)            <EOL><DEDENT>elif fk[k] > <NUM_LIT:0> and sides == <NUM_LIT:2>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)<EOL>plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)<EOL><DEDENT>elif fk[k] > <NUM_LIT:0> and sides == <NUM_LIT:1>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+<NUM_LIT>],linetype, linewidth=lwidth)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>plt.grid()<EOL>max_dB = np.ceil(max(Xk_dB/<NUM_LIT>))*<NUM_LIT:10><EOL>min_dB = max(floor_dB,np.floor(min(Xk_dB/<NUM_LIT>))*<NUM_LIT:10>)<EOL>if sides == <NUM_LIT:2>:<EOL><INDENT>plt.axis([-<NUM_LIT>*max(fk), <NUM_LIT>*max(fk), min_dB, max_dB])        <EOL><DEDENT>elif sides == <NUM_LIT:1>:<EOL><INDENT>plt.axis([<NUM_LIT:0>, <NUM_LIT>*max(fk), min_dB, max_dB])<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>plt.ylabel('<STR_LIT>')<EOL>plt.xlabel('<STR_LIT>')<EOL><DEDENT>elif mode == '<STR_LIT>':<EOL><INDENT>Xk_dB = <NUM_LIT:20>*np.log10(np.abs(Xk)/max(np.abs(Xk)))<EOL>for k in range(len(fk)):<EOL><INDENT>if fk[k] == <NUM_LIT:0> and sides == <NUM_LIT:2>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)<EOL><DEDENT>elif fk[k] == <NUM_LIT:0> and sides == <NUM_LIT:1>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=<NUM_LIT:2>*lwidth)            <EOL><DEDENT>elif fk[k] > <NUM_LIT:0> and sides == <NUM_LIT:2>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)<EOL>plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)<EOL><DEDENT>elif fk[k] > <NUM_LIT:0> and sides == <NUM_LIT:1>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+<NUM_LIT>],linetype, linewidth=lwidth)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>plt.grid()<EOL>max_dB = np.ceil(max(Xk_dB/<NUM_LIT>))*<NUM_LIT:10><EOL>min_dB = max(floor_dB,np.floor(min(Xk_dB/<NUM_LIT>))*<NUM_LIT:10>)<EOL>if sides == <NUM_LIT:2>:<EOL><INDENT>plt.axis([-<NUM_LIT>*max(fk), <NUM_LIT>*max(fk), min_dB, max_dB])        <EOL><DEDENT>elif sides == <NUM_LIT:1>:<EOL><INDENT>plt.axis([<NUM_LIT:0>, <NUM_LIT>*max(fk), min_dB, max_dB])<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>plt.ylabel('<STR_LIT>')<EOL>plt.xlabel('<STR_LIT>')    <EOL><DEDENT>elif mode == '<STR_LIT>':<EOL><INDENT>for k in range(len(fk)):<EOL><INDENT>if fk[k] == <NUM_LIT:0> and sides == <NUM_LIT:2>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[<NUM_LIT:0>, np.angle(Xk[k])],linetype, linewidth=lwidth)<EOL><DEDENT>elif fk[k] == <NUM_LIT:0> and sides == <NUM_LIT:1>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[<NUM_LIT:0>, np.angle(Xk[k])],linetype, linewidth=<NUM_LIT:2>*lwidth)<EOL><DEDENT>elif fk[k] > <NUM_LIT:0> and sides == <NUM_LIT:2>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[<NUM_LIT:0>, np.angle(Xk[k])],linetype, linewidth=lwidth)<EOL>plt.plot([-fk[k], -fk[k]],[<NUM_LIT:0>, -np.angle(Xk[k])],linetype, linewidth=lwidth)<EOL><DEDENT>elif fk[k] > <NUM_LIT:0> and sides == <NUM_LIT:1>:<EOL><INDENT>plt.plot([fk[k], fk[k]],[<NUM_LIT:0>, np.angle(Xk[k])],linetype, linewidth=lwidth)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>plt.grid()<EOL>if sides == <NUM_LIT:2>:<EOL><INDENT>plt.plot([-<NUM_LIT>*max(fk), <NUM_LIT>*max(fk)], [<NUM_LIT:0>, <NUM_LIT:0>],'<STR_LIT:k>')<EOL>plt.axis([-<NUM_LIT>*max(fk), <NUM_LIT>*max(fk), -<NUM_LIT>*max(np.abs(np.angle(Xk))), <NUM_LIT>*max(np.abs(np.angle(Xk)))])<EOL><DEDENT>elif sides == <NUM_LIT:1>:<EOL><INDENT>plt.plot([<NUM_LIT:0>, <NUM_LIT>*max(fk)], [<NUM_LIT:0>, <NUM_LIT:0>],'<STR_LIT:k>')<EOL>plt.axis([<NUM_LIT:0>, <NUM_LIT>*max(fk), -<NUM_LIT>*max(np.abs(np.angle(Xk))), <NUM_LIT>*max(np.abs(np.angle(Xk)))])<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>plt.ylabel('<STR_LIT>')<EOL>plt.xlabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>", "docstring": "Plot the Fouier series line spectral given the coefficients.\n\nThis function plots two-sided and one-sided line spectra of a periodic\nsignal given the complex exponential Fourier series coefficients and\nthe corresponding harmonic frequencies.\n\nParameters\n----------\nfk : vector of real sinusoid frequencies\nXk : magnitude and phase at each positive frequency in fk\nmode : 'mag' => magnitude plot, 'magdB' => magnitude in dB plot,\nmode cont : 'magdBn' => magnitude in dB normalized, 'phase' => a phase plot in radians \nsides : 2; 2-sided or 1-sided\nlinetype : line type per Matplotlib definitions, e.g., 'b';\nlwidth : 2; linewidth in points\nfsize : optional figure size in inches, default = (6,4) inches \n\nReturns\n-------\nNothing : A plot window opens containing the line spectrum plot\n\nNotes\n-----\nSince real signals are assumed the frequencies of fk are 0 and/or positive\nnumbers. The supplied Fourier coefficients correspond.\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> import numpy as np\n>>> from sk_dsp_comm.sigsys import line_spectra\n>>> n = np.arange(0,25)\n>>> # a pulse train with 10 Hz fundamental and 20% duty cycle\n>>> fk = n*10\n>>> Xk = np.sinc(n*10*.02)*np.exp(-1j*2*np.pi*n*10*.01) # 1j = sqrt(-1)\n\n>>> line_spectra(fk,Xk,'mag')\n>>> plt.show()\n\n>>> line_spectra(fk,Xk,'phase')", "id": "f14910:m20"}
{"signature": "def conv_sum(x1,nx1,x2,nx2,extent=('<STR_LIT:f>','<STR_LIT:f>')):", "body": "nnx1 = np.arange(<NUM_LIT:0>,len(nx1))<EOL>nnx2 = np.arange(<NUM_LIT:0>,len(nx2))<EOL>n1 = nnx1[<NUM_LIT:0>]<EOL>n2 = nnx1[-<NUM_LIT:1>]<EOL>n3 = nnx2[<NUM_LIT:0>]<EOL>n4 = nnx2[-<NUM_LIT:1>]<EOL>if extent[<NUM_LIT:0>] == '<STR_LIT:f>' and extent[<NUM_LIT:1>] == '<STR_LIT:f>':<EOL><INDENT>nny = np.arange(n1+n3,n2+<NUM_LIT:1>+n4+<NUM_LIT:1>-<NUM_LIT:1>)<EOL>ny = np.arange(<NUM_LIT:0>,len(x1)+len(x2)-<NUM_LIT:1>) + nx1[<NUM_LIT:0>]+nx2[<NUM_LIT:0>]<EOL><DEDENT>elif extent[<NUM_LIT:0>] == '<STR_LIT:f>' and extent[<NUM_LIT:1>] == '<STR_LIT:r>':<EOL><INDENT>nny = np.arange(n1+n3,n1+<NUM_LIT:1>+n4+<NUM_LIT:1>-<NUM_LIT:1>)<EOL>ny = nny + nx1[<NUM_LIT:0>]+nx2[<NUM_LIT:0>]<EOL><DEDENT>elif extent[<NUM_LIT:0>] == '<STR_LIT:r>' and extent[<NUM_LIT:1>] == '<STR_LIT:f>':<EOL><INDENT>nny = np.arange(n1+n3,n2+<NUM_LIT:1>+n3+<NUM_LIT:1>-<NUM_LIT:1>)<EOL>ny = nny + nx1[<NUM_LIT:0>]+nx2[<NUM_LIT:0>]<EOL><DEDENT>elif extent[<NUM_LIT:0>] == '<STR_LIT:f>' and extent[<NUM_LIT:1>] == '<STR_LIT:l>':<EOL><INDENT>nny = np.arange(n2+n3,n2+<NUM_LIT:1>+n4+<NUM_LIT:1>-<NUM_LIT:1>)<EOL>ny = nny + nx1[-<NUM_LIT:1>]+nx2[<NUM_LIT:0>]<EOL><DEDENT>elif extent[<NUM_LIT:0>] == '<STR_LIT:l>' and extent[<NUM_LIT:1>] == '<STR_LIT:f>':<EOL><INDENT>nny = np.arange(n1+n4,n2+<NUM_LIT:1>+n4+<NUM_LIT:1>-<NUM_LIT:1>)<EOL>ny = nny + nx1[<NUM_LIT:0>]+nx2[-<NUM_LIT:1>]<EOL><DEDENT>elif extent[<NUM_LIT:0>] == '<STR_LIT:r>' and extent[<NUM_LIT:1>] == '<STR_LIT:r>':<EOL><INDENT>nny = np.arange(n1+n3,min(n1+<NUM_LIT:1>+n4+<NUM_LIT:1>,n2+<NUM_LIT:1>+n3+<NUM_LIT:1>)-<NUM_LIT:1>)<EOL>ny = nny + nx1[<NUM_LIT:0>]+nx2[<NUM_LIT:0>]<EOL><DEDENT>elif extent[<NUM_LIT:0>] == '<STR_LIT:l>' and extent[<NUM_LIT:1>] == '<STR_LIT:l>':<EOL><INDENT>nny = np.arange(max(n1+n4,n2+n3),n2+<NUM_LIT:1>+n4+<NUM_LIT:1>-<NUM_LIT:1>)<EOL>ny = nny + max(nx1[<NUM_LIT:0>]+nx2[-<NUM_LIT:1>],nx1[-<NUM_LIT:1>]+nx2[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>y = signal.convolve(x1, x2)<EOL>print('<STR_LIT>' % (ny[<NUM_LIT:0>],ny[-<NUM_LIT:1>]))<EOL>return y[nny], ny<EOL>", "docstring": "Discrete convolution of x1 and x2 with proper tracking of the output time axis.\n\nConvolve two discrete-time signals using the SciPy function :func:`scipy.signal.convolution`.\nThe time (sequence axis) are managed from input to output. y[n] = x1[n]*x2[n].\n\nParameters\n----------\nx1 : ndarray of signal x1 corresponding to nx1\nnx1 : ndarray time axis for x1\nx2  : ndarray of signal x2 corresponding to nx2\nnx2 : ndarray time axis for x2\nextent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided\n\nReturns\n-------\ny : ndarray of output values y\nny : ndarray of the corresponding sequence index n\n\nNotes\n-----\nThe output time axis starts at the sum of the starting values in x1 and x2 \nand ends at the sum of the two ending values in x1 and x2. The default \nextents of ('f','f') are used for signals that are active (have support) \non or within n1 and n2 respectively. A right-sided signal such as \na^n*u[n] is semi-infinite, so it has extent 'r' and the\nconvolution output will be truncated to display only the valid results.\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> import numpy as np\n>>> import sk_dsp_comm.sigsys as ss\n>>> nx = np.arange(-5,10)\n>>> x = ss.drect(nx,4)\n>>> y,ny = ss.conv_sum(x,nx,x,nx)\n>>> plt.stem(ny,y)\n>>> plt.show()\n\nConsider a pulse convolved with an exponential. ('r' type extent)\n\n>>> h = 0.5**nx*ss.dstep(nx)\n>>> y,ny = ss.conv_sum(x,nx,h,nx,('f','r')) # note extents set\n>>> plt.stem(ny,y) # expect a pulse charge and discharge sequence", "id": "f14910:m24"}
{"signature": "def downsample(x,M,p=<NUM_LIT:0>):", "body": "if not isinstance(M, int):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>x = x[<NUM_LIT:0>:int(np.floor(len(x)/M))*M]<EOL>x = x.reshape((int(np.floor(len(x)/M)),M))<EOL>y = x[:,p]<EOL>return y<EOL>", "docstring": "Downsample by factor M\n\nKeep every Mth sample of the input. The phase of the input samples\nkept can be selected.\n\nParameters\n----------\nx : ndarray of input signal values\nM : downsample factor\np : phase of decimated value, 0 (default), 1, ..., M-1\n\nReturns\n-------\ny : ndarray of the output signal values\n\nExamples\n--------\n>>> y = downsample(x,3)\n>>> y = downsample(x,3,1)", "id": "f14910:m52"}
{"signature": "def peaking(GdB, fc, Q=<NUM_LIT>, fs=<NUM_LIT>):", "body": "mu = <NUM_LIT:10>**(GdB/<NUM_LIT>)<EOL>kq = <NUM_LIT:4>/(<NUM_LIT:1> + mu)*np.tan(<NUM_LIT:2>*np.pi*fc/fs/(<NUM_LIT:2>*Q))<EOL>Cpk = (<NUM_LIT:1> + kq *mu)/(<NUM_LIT:1> + kq)<EOL>b1 = -<NUM_LIT:2>*np.cos(<NUM_LIT:2>*np.pi*fc/fs)/(<NUM_LIT:1> + kq*mu)<EOL>b2 = (<NUM_LIT:1> - kq*mu)/(<NUM_LIT:1> + kq*mu)<EOL>a1 = -<NUM_LIT:2>*np.cos(<NUM_LIT:2>*np.pi*fc/fs)/(<NUM_LIT:1> + kq)<EOL>a2 = (<NUM_LIT:1> - kq)/(<NUM_LIT:1> + kq)<EOL>b = Cpk*np.array([<NUM_LIT:1>, b1, b2])<EOL>a = np.array([<NUM_LIT:1>, a1, a2])<EOL>return b,a<EOL>", "docstring": "A second-order peaking filter having GdB gain at fc and approximately\nand 0 dB otherwise.\n\nThe filter coefficients returns correspond to a biquadratic system function\ncontaining five parameters.\n\nParameters\n----------\nGdB : Lowpass gain in dB\nfc : Center frequency in Hz\nQ : Filter Q which is inversely proportional to bandwidth\nfs : Sampling frquency in Hz\n\nReturns\n-------\nb : ndarray containing the numerator filter coefficients\na : ndarray containing the denominator filter coefficients\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> import numpy as np\n>>> from sk_dsp_comm.sigsys import peaking\n>>> from scipy import signal\n>>> b,a = peaking(2.0,500)\n>>> f = np.logspace(1,5,400)\n>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)\n>>> plt.semilogx(f,20*np.log10(abs(H)))\n>>> plt.ylabel(\"Power Spectral Density (dB)\")\n>>> plt.xlabel(\"Frequency (Hz)\")\n>>> plt.show()\n\n>>> b,a = peaking(-5.0,500,4)\n>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)\n>>> plt.semilogx(f,20*np.log10(abs(H)))\n>>> plt.ylabel(\"Power Spectral Density (dB)\")\n>>> plt.xlabel(\"Frequency (Hz)\")", "id": "f14910:m3"}
{"signature": "def rect(t,tau):", "body": "x = np.zeros(len(t))<EOL>for k,tk in enumerate(t):<EOL><INDENT>if np.abs(tk) > tau/<NUM_LIT>:<EOL><INDENT>x[k] = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>x[k] = <NUM_LIT:1><EOL><DEDENT><DEDENT>return x<EOL>", "docstring": "Approximation to the rectangle pulse Pi(t/tau).\n\nIn this numerical version of Pi(t/tau) the pulse is active\nover -tau/2 <= t <= tau/2.\n\nParameters\n----------\nt : ndarray of the time axis\ntau : the pulse width\n\nReturns\n-------\nx : ndarray of the signal Pi(t/tau)\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> from numpy import arange\n>>> from sk_dsp_comm.sigsys import rect\n>>> t = arange(-1,5,.01)\n>>> x = rect(t,1.0)\n>>> plt.plot(t,x)\n>>> plt.ylim([0, 1.01])\n>>> plt.show()\n\nTo turn on the pulse at t = 1 shift t.\n\n>>> x = rect(t - 1.0,1.0)\n>>> plt.plot(t,x)\n>>> plt.ylim([0, 1.01])", "id": "f14910:m28"}
{"signature": "def m_seq(m):", "body": "if m == <NUM_LIT:2>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:3>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:4>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:5>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:6>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:7>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:8>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:9>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:10>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:11>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:12>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>])<EOL><DEDENT>elif m == <NUM_LIT:16>:<EOL><INDENT>taps = np.array([<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>sr = np.ones(m)<EOL>Q = <NUM_LIT:2>**m - <NUM_LIT:1><EOL>c = np.zeros(Q)<EOL>for n in range(Q):<EOL><INDENT>tap_xor = <NUM_LIT:0><EOL>c[n] = sr[-<NUM_LIT:1>]<EOL>for k in range(<NUM_LIT:1>,m):<EOL><INDENT>if taps[k] == <NUM_LIT:1>:<EOL><INDENT>tap_xor = np.bitwise_xor(tap_xor,np.bitwise_xor(int(sr[-<NUM_LIT:1>]),int(sr[m-<NUM_LIT:1>-k])))<EOL><DEDENT><DEDENT>sr[<NUM_LIT:1>:] = sr[:-<NUM_LIT:1>]<EOL>sr[<NUM_LIT:0>] = tap_xor<EOL><DEDENT>return c<EOL>", "docstring": "Generate an m-sequence ndarray using an all-ones initialization.\n\nAvailable m-sequence (PN generators) include m = 2,3,...,12, & 16.\n\nParameters\n----------\nm : the number of shift registers. 2,3, .., 12, & 16\n\nReturns\n-------\nc : ndarray of one period of the m-sequence\n\nNotes\n-----\nThe sequence period is 2**m - 1 (2^m - 1).  \n\nExamples\n--------\n>>> c = m_seq(5)", "id": "f14910:m36"}
{"signature": "def lms_ic(r,M,mu,delta=<NUM_LIT:1>):", "body": "N = len(r)-<NUM_LIT:1>;<EOL>y = signal.lfilter(np.hstack((np.zeros(delta), np.array([<NUM_LIT:1>]))),<NUM_LIT:1>,r)<EOL>r_hat = np.zeros(N+<NUM_LIT:1>)<EOL>e = np.zeros(N+<NUM_LIT:1>)<EOL>ao = np.zeros(M+<NUM_LIT:1>)<EOL>z = np.zeros(M)<EOL>ym = np.zeros(M+<NUM_LIT:1>)<EOL>for k in range(N+<NUM_LIT:1>):<EOL><INDENT>r_hat[k],z = signal.lfilter(ao,np.array([<NUM_LIT:1>]),np.array([y[k]]),zi=z)<EOL>e[k] = r[k] - r_hat[k]<EOL>ao = ao + <NUM_LIT:2>*mu*e[k]*ym<EOL>ym = np.hstack((np.array([y[k]]), ym[:-<NUM_LIT:1>]))<EOL><DEDENT>F, Ao = signal.freqz(ao,<NUM_LIT:1>,<NUM_LIT>)<EOL>F/= (<NUM_LIT:2>*np.pi)<EOL>Ao = <NUM_LIT:20>*np.log10(abs(Ao))<EOL>return np.arange(<NUM_LIT:0>,N+<NUM_LIT:1>), r, r_hat, e, ao, F, Ao<EOL>", "docstring": "Least mean square (LMS) interference canceller adaptive filter.\n\nA complete LMS adaptive filter simulation function for the case of\ninterference cancellation. Used in the digital filtering case study.\n\nParameters\n----------\nM : FIR Filter length (order M-1)\ndelta : Delay used to generate the reference signal\nmu : LMS step-size\ndelta : decorrelation delay between input and FIR filter input\n\nReturns\n-------\nn : ndarray Index vector\nr : ndarray noisy (with interference) input signal\nr_hat : ndarray filtered output (NB_hat[n])\ne : ndarray error sequence (WB_hat[n])\nao : ndarray final value of weight vector\nF : ndarray frequency response axis vector\nAo : ndarray frequency response of filter\n\nExamples\n----------\n>>> # import a speech signal\n>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')\n>>> # add interference at 1kHz and 1.5 kHz and\n>>> # truncate to 5 seconds\n>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])\n>>> # simulate with a 64 tap FIR and mu = 0.005\n>>> n,r,r_hat,e,ao,F,Ao = lms_ic(r,64,0.005)", "id": "f14910:m17"}
{"signature": "def BPSK_tx(N_bits,Ns,ach_fc=<NUM_LIT>,ach_lvl_dB=-<NUM_LIT:100>,pulse='<STR_LIT>',alpha = <NUM_LIT>,M=<NUM_LIT:6>):", "body": "pulse_types = ['<STR_LIT>', '<STR_LIT:src>']<EOL>if pulse not in pulse_types:<EOL><INDENT>raise ValueError('<STR_LIT>''<STR_LIT>')<EOL><DEDENT>x0,b,data0 = NRZ_bits(N_bits,Ns,pulse,alpha,M)<EOL>x1p,b,data1p = NRZ_bits(N_bits,Ns,pulse,alpha,M)<EOL>x1m,b,data1m = NRZ_bits(N_bits,Ns,pulse,alpha,M)<EOL>n = np.arange(len(x0))<EOL>x1p = x1p*np.exp(<NUM_LIT>*<NUM_LIT:2>*np.pi*ach_fc/float(Ns)*n)<EOL>x1m = x1m*np.exp(-<NUM_LIT>*<NUM_LIT:2>*np.pi*ach_fc/float(Ns)*n)<EOL>ach_lvl = <NUM_LIT:10>**(ach_lvl_dB/<NUM_LIT>)<EOL>return x0 + ach_lvl*(x1p + x1m), b, data0<EOL>", "docstring": "Generates biphase shift keyed (BPSK) transmitter with adjacent channel interference.\n\nGenerates three BPSK signals with rectangular or square root raised cosine (SRC) \npulse shaping of duration N_bits and Ns samples per bit. The desired signal is\ncentered on f = 0, which the adjacent channel signals to the left and right\nare also generated at dB level relative to the desired signal. Used in the \ndigital communications Case Study supplement.\n\nParameters\n----------\nN_bits : the number of bits to simulate\nNs : the number of samples per bit\nach_fc : the frequency offset of the adjacent channel signals (default 2.0)\nach_lvl_dB : the level of the adjacent channel signals in dB (default -100)\npulse :the pulse shape 'rect' or 'src'\nalpha : square root raised cosine pulse shape factor (default = 0.25)\nM : square root raised cosine pulse truncation factor (default = 6)\n\nReturns\n-------\nx : ndarray of the composite signal x0 + ach_lvl*(x1p + x1m)\nb : the transmit pulse shape \ndata0 : the data bits used to form the desired signal; used for error checking\n\nNotes\n-----\n\nExamples\n--------\n>>> x,b,data0 = BPSK_tx(1000,10,pulse='src')", "id": "f14910:m37"}
{"signature": "def drect(n,N):", "body": "x = np.zeros(len(n))<EOL>for k,nn in enumerate(n):<EOL><INDENT>if nn >= <NUM_LIT:0> and nn < N:<EOL><INDENT>x[k] = <NUM_LIT:1.0><EOL><DEDENT><DEDENT>return x<EOL>", "docstring": "Discrete rectangle function of duration N samples.\n\nThe signal is active on the interval 0 <= n <= N-1. Also known\nas the rectangular window function, which is available in \nscipy.signal.\n\nParameters\n----------\nn : ndarray of the time axis\nN : the pulse duration\n\nReturns\n-------\nx : ndarray of the signal\n\nNotes\n-----\nThe discrete rectangle turns on at n = 0, off at n = N-1 and \nhas duration of exactly N samples.\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> from numpy import arange\n>>> from sk_dsp_comm.sigsys import drect\n>>> n = arange(-5,5)\n>>> x = drect(n, N=3)\n>>> plt.stem(n,x)\n>>> plt.show()\n\nShift the delta left by 2.\n\n>>> x = drect(n+2, N=3)\n>>> plt.stem(n,x)", "id": "f14910:m32"}
{"signature": "def unique_cpx_roots(rlist,tol = <NUM_LIT>):", "body": "uniq = [rlist[<NUM_LIT:0>]]<EOL>mult = [<NUM_LIT:1>]<EOL>for k in range(<NUM_LIT:1>,len(rlist)):<EOL><INDENT>N_uniq = len(uniq)<EOL>for m in range(N_uniq):<EOL><INDENT>if abs(rlist[k]-uniq[m]) <= tol:<EOL><INDENT>mult[m] += <NUM_LIT:1><EOL>uniq[m] = (uniq[m]*(mult[m]-<NUM_LIT:1>) + rlist[k])/float(mult[m])<EOL>break<EOL><DEDENT><DEDENT>uniq = np.hstack((uniq,rlist[k]))<EOL>mult = np.hstack((mult,[<NUM_LIT:1>]))<EOL><DEDENT>return np.array(uniq), np.array(mult)<EOL>", "docstring": "The average of the root values is used when multiplicity \nis greater than one.\n\nMark Wickert October 2016", "id": "f14910:m53"}
{"signature": "def am_rx(x192):", "body": "x_edet192 = env_det(x192)<EOL>m_rx8 = deci24(x_edet192)<EOL>m_rx8 -= np.mean(m_rx8)<EOL>t8 = np.arange(len(m_rx8))/<NUM_LIT><EOL>\"\"\"<STR_LIT>\"\"\"<EOL>b192,a192 = signal.butter(<NUM_LIT:5>,<NUM_LIT:2>*<NUM_LIT>/<NUM_LIT>)<EOL>m_rx192 = signal.lfilter(b192,a192,x_edet192)<EOL>m_rx192 = signal.lfilter(b192,a192,m_rx192)<EOL>m_rx192 -= np.mean(m_rx192)<EOL>return m_rx8,t8,m_rx192,x_edet192<EOL>", "docstring": "AM envelope detector receiver for the Chapter 17 Case Study\n\nThe receiver bandpass filter is not included in this function.\n\nParameters\n----------\nx192 : ndarray of the AM signal at sampling rate 192 ksps\n\nReturns\n-------\nm_rx8 : ndarray of the demodulated message at 8 ksps\nt8 : ndarray of the time axis at 8 ksps\nm_rx192 : ndarray of the demodulated output at 192 ksps\nx_edet192 : ndarray of the envelope detector output at 192 ksps\n\nNotes\n-----\nThe bandpass filter needed at the receiver front-end can be designed\nusing b_bpf,a_bpf = :func:`am_rx_BPF`.\n\nExamples\n--------\n>>> import numpy as np\n>>> n = np.arange(0,1000)\n>>> # 1 kHz message signal\n>>> m = np.cos(2*np.pi*1000/8000.*n)\n>>> m_rx8,t8,m_rx192,x_edet192 = am_rx(x192)", "id": "f14910:m46"}
{"signature": "def lp_tri(f, fb):", "body": "x = np.zeros(len(f))<EOL>for k in range(len(f)):<EOL><INDENT>if abs(f[k]) <= fb:<EOL><INDENT>x[k] = <NUM_LIT:1> - abs(f[k])/float(fb)<EOL><DEDENT><DEDENT>return x<EOL>", "docstring": "Triangle spectral shape function used by :func:`lp_samp`.\n\nParameters\n----------\nf : ndarray containing frequency samples\nfb : the bandwidth as a float constant\n\nReturns\n-------\nx : ndarray of spectrum samples for a single triangle shape\n\nNotes\n-----\nThis is a support function for the lowpass spectrum plotting function\n:func:`lp_samp`.\n\nExamples\n--------\n>>> x = lp_tri(f, fb)", "id": "f14910:m11"}
{"signature": "def simple_SA(x,NS,NFFT,fs,NAVG=<NUM_LIT:1>,window='<STR_LIT>'):", "body": "Nx = len(x)<EOL>K = int(Nx/NS)<EOL>print('<STR_LIT>', K)<EOL>if NAVG > K:<EOL><INDENT>print('<STR_LIT>')<EOL>return <NUM_LIT:0>,<NUM_LIT:0><EOL><DEDENT>if window.lower() == '<STR_LIT>' or window.lower() == '<STR_LIT>':<EOL><INDENT>w = signal.boxcar(NS)<EOL><DEDENT>elif window.lower() == '<STR_LIT>':<EOL><INDENT>w = signal.hanning(NS)<EOL><DEDENT>xsw = np.zeros((K,NS)) + <NUM_LIT>*np.zeros((K,NS))<EOL>for k in range(NAVG):<EOL><INDENT>xsw[k,] = w*x[k*NS:(k+<NUM_LIT:1>)*NS]<EOL><DEDENT>Sx = np.zeros(NFFT)<EOL>for k in range(NAVG):<EOL><INDENT>X = fft.fft(xsw[k,],NFFT)<EOL>Sx += abs(X)**<NUM_LIT:2><EOL><DEDENT>Sx /= float(NAVG)<EOL>Sx /= float(NFFT**<NUM_LIT:2>)<EOL>NFFTby2 = int(NFFT/<NUM_LIT:2>)<EOL>if x.dtype != '<STR_LIT>':<EOL><INDENT>n = np.arange(NFFTby2)<EOL>f = fs*n/float(NFFT)<EOL>Sx = Sx[<NUM_LIT:0>:NFFTby2]<EOL><DEDENT>else:<EOL><INDENT>n = np.arange(NFFTby2)<EOL>f = fs*np.hstack((np.arange(-NFFTby2,<NUM_LIT:0>),np.arange(NFFTby2)))/float(NFFT)<EOL>Sx = np.hstack((Sx[NFFTby2:],Sx[<NUM_LIT:0>:NFFTby2]))<EOL><DEDENT>return f, Sx<EOL>", "docstring": "Spectral estimation using windowing and averaging.\n\nThis function implements averaged periodogram spectral estimation\nestimation similar to the NumPy's psd() function, but more\nspecialized for the the windowing case study of Chapter 16.\n\nParameters\n----------\nx : ndarray containing the input signal\nNS : The subrecord length less zero padding, e.g. NS < NFFT\nNFFT : FFT length, e.g., 1024 = 2**10\nfs : sampling rate in Hz\nNAVG : the number of averages, e.g., 1 for deterministic signals\nwindow : hardcoded window 'boxcar' (default) or 'hanning'\n\nReturns\n-------\nf : ndarray frequency axis in Hz on [0, fs/2]\nSx : ndarray the power spectrum estimate\n\nNotes\n-----\nThe function also prints the maximum number of averages K possible\nfor the input data record.\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> import numpy as np\n>>> from sk_dsp_comm import sigsys as ss\n>>> n = np.arange(0,2048)\n>>> x = np.cos(2*np.pi*1000/10000*n) + 0.01*np.cos(2*np.pi*3000/10000*n)\n>>> f, Sx = ss.simple_SA(x,128,512,10000)\n>>> plt.plot(f, 10*np.log10(Sx))\n>>> plt.ylim([-80, 0])\n>>> plt.xlabel(\"Frequency (Hz)\")\n>>> plt.ylabel(\"Power Spectral Density (dB)\")\n>>> plt.show()\n\nWith a hanning window.\n\n>>> f, Sx = ss.simple_SA(x,256,1024,10000,window='hanning')\n>>> plt.plot(f, 10*np.log10(Sx))\n>>> plt.xlabel(\"Frequency (Hz)\")\n>>> plt.ylabel(\"Power Spectral Density (dB)\")\n>>> plt.ylim([-80, 0])", "id": "f14910:m19"}
{"signature": "def am_rx_BPF(N_order = <NUM_LIT:7>, ripple_dB = <NUM_LIT:1>, B = <NUM_LIT>, fs = <NUM_LIT>):", "body": "b_bpf,a_bpf = signal.cheby1(N_order,ripple_dB,<NUM_LIT:2>*np.array([<NUM_LIT>-B/<NUM_LIT>,<NUM_LIT>+B/<NUM_LIT>])/fs,'<STR_LIT>')<EOL>return b_bpf,a_bpf<EOL>", "docstring": "Bandpass filter design for the AM receiver Case Study of Chapter 17.\n\nDesign a 7th-order Chebyshev type 1 bandpass filter to remove/reduce\nadjacent channel intereference at the envelope detector input.\n\nParameters\n----------\nN_order : the filter order (default = 7)\nripple_dB : the passband ripple in dB (default = 1)\nB : the RF bandwidth (default = 10e3)\nfs : the sampling frequency \n\nReturns\n-------\nb_bpf : ndarray of the numerator filter coefficients\na_bpf : ndarray of the denominator filter coefficients\n\nExamples\n--------\n>>> from scipy import signal\n>>> import numpy as np\n>>> import matplotlib.pyplot as plt\n>>> import sk_dsp_comm.sigsys as ss\n>>> # Use the default values\n>>> b_bpf,a_bpf = ss.am_rx_BPF()\n\nPole-zero plot of the filter.\n\n>>> ss.zplane(b_bpf,a_bpf)\n>>> plt.show()\n\nPlot of the frequency response.\n\n>>> f = np.arange(0,192/2.,.1)\n>>> w, Hbpf = signal.freqz(b_bpf,a_bpf,2*np.pi*f/192)\n>>> plt.plot(f*10,20*np.log10(abs(Hbpf)))\n>>> plt.axis([0,1920/2.,-80,10])\n>>> plt.ylabel(\"Power Spectral Density (dB)\")\n>>> plt.xlabel(\"Frequency (kHz)\")\n>>> plt.show()", "id": "f14910:m47"}
{"signature": "def ten_band_eq_resp(GdB,Q=<NUM_LIT>):", "body": "fs = <NUM_LIT> <EOL>NB = len(GdB)<EOL>if not NB == <NUM_LIT:10>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>Fc = <NUM_LIT>*<NUM_LIT:2>**np.arange(NB)<EOL>B = np.zeros((NB,<NUM_LIT:3>));<EOL>A = np.zeros((NB,<NUM_LIT:3>));<EOL>for k in range(NB):<EOL><INDENT>b,a = peaking(GdB[k],Fc[k],Q,fs)<EOL>B[k,:] = b<EOL>A[k,:] = a<EOL><DEDENT>F = np.logspace(<NUM_LIT:1>,np.log10(<NUM_LIT>),<NUM_LIT:1000>)<EOL>H = np.ones(len(F))*np.complex(<NUM_LIT:1.0>,<NUM_LIT:0.0>)<EOL>for k in range(NB):<EOL><INDENT>w,Htemp = signal.freqz(B[k,:],A[k,:],<NUM_LIT:2>*np.pi*F/fs)<EOL>H *= Htemp<EOL><DEDENT>plt.figure(figsize=(<NUM_LIT:6>,<NUM_LIT:4>))<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.semilogx(F,<NUM_LIT:20>*np.log10(abs(H)))<EOL>plt.axis([<NUM_LIT:10>, fs/<NUM_LIT:2>, -<NUM_LIT:12>, <NUM_LIT:12>])<EOL>plt.grid()<EOL>plt.title('<STR_LIT>')<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.subplot(<NUM_LIT>)<EOL>plt.stem(np.arange(NB),GdB,'<STR_LIT:b>','<STR_LIT>')<EOL>plt.axis([<NUM_LIT:0>, NB-<NUM_LIT:1>, -<NUM_LIT:12>, <NUM_LIT:12>])<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.grid()<EOL>", "docstring": "Create a frequency response magnitude plot in dB of a ten band equalizer\nusing a semilogplot (semilogx()) type plot\n\n\nParameters\n----------\nGdB : Gain vector for 10 peaking filters [G0,...,G9]\nQ : Quality factor for each peaking filter (default 3.5)\n\nReturns\n-------\nNothing : two plots are created\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> from sk_dsp_comm import sigsys as ss\n>>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0])\n>>> plt.show()", "id": "f14910:m2"}
{"signature": "def deci24(x):", "body": "<EOL>b2,a2 = signal.butter(<NUM_LIT:10>,<NUM_LIT:1>/<NUM_LIT>)<EOL>y1 = signal.lfilter(b2,a2,x)<EOL>y1 = downsample(y1,<NUM_LIT:2>)<EOL>b3,a3 = signal.butter(<NUM_LIT:10>,<NUM_LIT:1>/<NUM_LIT>)<EOL>y2 = signal.lfilter(b3,a3,y1)<EOL>y2 = downsample(y2,<NUM_LIT:3>)<EOL>b4,a4 = signal.butter(<NUM_LIT:10>,<NUM_LIT:1>/<NUM_LIT>)<EOL>y3 = signal.lfilter(b4,a4,y2)<EOL>y3 = downsample(y3,<NUM_LIT:4>)<EOL>return y3<EOL>", "docstring": "Decimate by L = 24 using Butterworth filters.\n\nThe decimation is done using two three stages. Downsample sample by \nL = 2 and lowpass filter, downsample by 3 and lowpass filter, then\ndownsample by L = 4 and lowpass filter. In all cases the lowpass\nfilter is a 10th-order Butterworth lowpass.\n\nParameters\n----------\nx : ndarray of the input signal\n\nReturns\n-------\ny : ndarray of the output signal\n\nNotes\n-----\nThe cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to \ntrack the upsampling by 2, 3, and 4 respectively.\n\nExamples\n--------\n>>> y = deci24(x)", "id": "f14910:m50"}
{"signature": "def NRZ_bits2(data,Ns,pulse='<STR_LIT>',alpha = <NUM_LIT>,M=<NUM_LIT:6>):", "body": "N_bits = len(data)<EOL>n_zeros = np.zeros((N_bits,int(Ns)-<NUM_LIT:1>))<EOL>x = np.hstack((<NUM_LIT:2>*data.reshape(N_bits,<NUM_LIT:1>)-<NUM_LIT:1>,n_zeros))<EOL>x = x.flatten()<EOL>if pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = np.ones(int(Ns))<EOL><DEDENT>elif pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = rc_imp(Ns,alpha,M)<EOL><DEDENT>elif pulse.lower() == '<STR_LIT:src>':<EOL><INDENT>b = sqrt_rc_imp(Ns,alpha,M)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>x = signal.lfilter(b,<NUM_LIT:1>,x)<EOL>return x,b/float(Ns)<EOL>", "docstring": "Generate non-return-to-zero (NRZ) data bits with pulse shaping with user data\n\nA baseband digital data signal using +/-1 amplitude signal values\nand including pulse shaping. The data sequence is user supplied.\n\nParameters\n----------\ndata : ndarray of the data bits as 0/1 values\nNs : the number of samples per bit,\npulse_type : 'rect' , 'rc', 'src' (default 'rect')\nalpha : excess bandwidth factor(default 0.25)\nM : single sided pulse duration (default = 6) \n\nReturns\n-------\nx : ndarray of the NRZ signal values\nb : ndarray of the pulse shape\n\nNotes\n-----\nPulse shapes include 'rect' (rectangular), 'rc' (raised cosine), \n'src' (root raised cosine). The actual pulse length is 2*M+1 samples.\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> from sk_dsp_comm.sigsys import NRZ_bits2\n>>> from sk_dsp_comm.sigsys import m_seq\n>>> from numpy import arange\n>>> x,b = NRZ_bits2(m_seq(5),10)\n>>> t = arange(len(x))\n>>> plt.ylim([-1.01, 1.01])\n>>> plt.plot(t,x)", "id": "f14910:m39"}
{"signature": "def delta_eps(t,eps):", "body": "d = np.zeros(len(t))<EOL>for k,tt in enumerate(t):<EOL><INDENT>if abs(tt) <= eps/<NUM_LIT>:<EOL><INDENT>d[k] = <NUM_LIT:1>/float(eps)<EOL><DEDENT><DEDENT>return d<EOL>", "docstring": "Rectangular pulse approximation to impulse function.\n\nParameters\n----------\nt : ndarray of time axis\neps : pulse width\n\nReturns\n-------\nd : ndarray containing the impulse approximation\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> from numpy import arange\n>>> from sk_dsp_comm.sigsys import delta_eps\n>>> t = np.arange(-2,2,.001)\n>>> d = delta_eps(t,.1)\n>>> plt.plot(t,d)\n>>> plt.show()", "id": "f14910:m26"}
{"signature": "def fs_approx(Xk,fk,t):", "body": "x_approx = np.zeros(len(t))<EOL>for k,Xkk in enumerate(Xk):<EOL><INDENT>if fk[k] == <NUM_LIT:0>:<EOL><INDENT>x_approx += Xkk.real*np.ones(len(t))<EOL><DEDENT>else:<EOL><INDENT>x_approx += <NUM_LIT:2>*np.abs(Xkk)*np.cos(<NUM_LIT:2>*np.pi*fk[k]*t+np.angle(Xkk))<EOL><DEDENT><DEDENT>return x_approx<EOL>", "docstring": "Synthesize periodic signal x(t) using Fourier series coefficients at harmonic frequencies\n\nAssume the signal is real so coefficients Xk are supplied for nonnegative\nindicies. The negative index coefficients are assumed to be complex\nconjugates.\n\nParameters\n----------\nXk : ndarray of complex Fourier series coefficients\nfk : ndarray of harmonic frequencies in Hz\nt : ndarray time axis corresponding to output signal array x_approx\n\nReturns\n-------\nx_approx : ndarray of periodic waveform approximation over time span t\n\nExamples\n--------\n>>> t = arange(0,2,.002)\n>>> # a 20% duty cycle pulse train\n>>> n = arange(0,20,1) # 0 to 19th harmonic\n>>> fk = 1*n % period = 1s\n>>> t, x_approx = fs_approx(Xk,fk,t)\n>>> plot(t,x_approx)", "id": "f14910:m22"}
{"signature": "def freq_resp(self, mode= '<STR_LIT>', fs = <NUM_LIT>, ylim = [-<NUM_LIT:100>,<NUM_LIT:2>]):", "body": "iir_d.freqz_resp_cas_list([self.sos],mode,fs=fs)<EOL>pylab.grid()<EOL>pylab.ylim(ylim)<EOL>", "docstring": "Frequency response plot", "id": "f14911:c2:m4"}
{"signature": "def dn(self,x,M_change = <NUM_LIT:12>):", "body": "y = signal.lfilter(self.b,[<NUM_LIT:1>],x)<EOL>y = ssd.downsample(y,M_change)<EOL>return y<EOL>", "docstring": "Downsample and filter the signal", "id": "f14911:c1:m3"}
{"signature": "def dn(self,x):", "body": "y = signal.lfilter(self.b,self.a,x)<EOL>y = ssd.downsample(y,self.M)<EOL>return y<EOL>", "docstring": "Downsample and filter the signal", "id": "f14911:c0:m2"}
{"signature": "def __init__(self,M_change = <NUM_LIT:12>,fcutoff=<NUM_LIT>,N_filt_order=<NUM_LIT:8>,ftype='<STR_LIT>'):", "body": "self.M = M_change <EOL>self.fc = fcutoff*<NUM_LIT> <EOL>self.N_forder = N_filt_order<EOL>if ftype.lower() == '<STR_LIT>':<EOL><INDENT>self.b, self.a = signal.butter(self.N_forder,<NUM_LIT:2>/self.M*self.fc)<EOL><DEDENT>elif ftype.lower() == '<STR_LIT>':<EOL><INDENT>self.b, self.a = signal.cheby1(self.N_forder,<NUM_LIT>,<NUM_LIT:2>/self.M*self.fc)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>", "docstring": "Object constructor method", "id": "f14911:c0:m0"}
{"signature": "def up(self,x,L_change = <NUM_LIT:12>):", "body": "y = L_change*ssd.upsample(x,L_change)<EOL>y = signal.lfilter(self.b,[<NUM_LIT:1>],y)<EOL>return y<EOL>", "docstring": "Upsample and filter the signal", "id": "f14911:c1:m2"}
{"signature": "def zplane(self,auto_scale=True,size=<NUM_LIT:2>,detect_mult=True,tol=<NUM_LIT>):", "body": "iir_d.sos_zplane(self.sos,auto_scale,size,tol)<EOL>", "docstring": "Plot the poles and zeros of the FIR filter in the z-plane", "id": "f14911:c2:m5"}
{"signature": "def freqz_resp(b,a=[<NUM_LIT:1>],mode = '<STR_LIT>',fs=<NUM_LIT:1.0>,Npts = <NUM_LIT>,fsize=(<NUM_LIT:6>,<NUM_LIT:4>)):", "body": "f = np.arange(<NUM_LIT:0>,Npts)/(<NUM_LIT>*Npts)<EOL>w,H = signal.freqz(b,a,<NUM_LIT:2>*np.pi*f)<EOL>plt.figure(figsize=fsize)<EOL>if mode.lower() == '<STR_LIT>':<EOL><INDENT>plt.plot(f*fs,<NUM_LIT:20>*np.log10(np.abs(H)))<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>elif mode.lower() == '<STR_LIT>':<EOL><INDENT>plt.plot(f*fs,np.angle(H))<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>plt.title('<STR_LIT>')<EOL><DEDENT>elif (mode.lower() == '<STR_LIT>') or (mode.lower() == '<STR_LIT>'):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>theta = np.unwrap(np.angle(H))<EOL>theta2 = np.unwrap(<NUM_LIT:2>*theta)/<NUM_LIT><EOL>theta_dif = np.diff(theta2)<EOL>f_diff = np.diff(f)<EOL>Tg = -np.diff(theta2)/np.diff(w)<EOL>idx = pylab.find(<NUM_LIT:20>*np.log10(H[:-<NUM_LIT:1>]) < -<NUM_LIT>)<EOL>Tg[idx] = np.zeros(len(idx))<EOL>max_Tg = np.max(Tg)<EOL>if mode.lower() == '<STR_LIT>':<EOL><INDENT>max_Tg /= fs<EOL>plt.plot(f[:-<NUM_LIT:1>]*fs,Tg/fs)<EOL>plt.ylim([<NUM_LIT:0>,<NUM_LIT>*max_Tg])<EOL><DEDENT>else:<EOL><INDENT>plt.plot(f[:-<NUM_LIT:1>]*fs,Tg)<EOL>plt.ylim([<NUM_LIT:0>,<NUM_LIT>*max_Tg])<EOL><DEDENT>plt.xlabel('<STR_LIT>')<EOL>if mode.lower() == '<STR_LIT>':<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>plt.ylabel('<STR_LIT>')<EOL><DEDENT>plt.title('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>s1 = '<STR_LIT>'<EOL>s2 = '<STR_LIT>'<EOL>print(s1 + s2)<EOL><DEDENT>", "docstring": "A method for displaying digital filter frequency response magnitude,\nphase, and group delay. A plot is produced using matplotlib\n\nfreq_resp(self,mode = 'dB',Npts = 1024)\n\nA method for displaying the filter frequency response magnitude,\nphase, and group delay. A plot is produced using matplotlib\n\nfreqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))\n\n    b = ndarray of numerator coefficients\n    a = ndarray of denominator coefficents\n mode = display mode: 'dB' magnitude, 'phase' in radians, or \n        'groupdelay_s' in samples and 'groupdelay_t' in sec, \n        all versus frequency in Hz\n Npts = number of points to plot; defult is 1024\nfsize = figure size; defult is (6,4) inches\n\nMark Wickert, January 2015", "id": "f14911:m0"}
{"signature": "def zplane(self,auto_scale=True,size=<NUM_LIT:2>,detect_mult=True,tol=<NUM_LIT>):", "body": "ssd.zplane(self.b,[<NUM_LIT:1>],auto_scale,size,tol)<EOL>", "docstring": "Plot the poles and zeros of the FIR filter in the z-plane", "id": "f14911:c1:m5"}
{"signature": "def strips(x,Nx,fig_size=(<NUM_LIT:6>,<NUM_LIT:4>)):", "body": "plt.figure(figsize=fig_size)<EOL>N = len(x)<EOL>Mx = int(np.ceil(N/float(Nx)))<EOL>x_max = np.max(np.abs(x))<EOL>for kk in range(Mx):<EOL><INDENT>plt.plot(np.array([<NUM_LIT:0>,Nx]),-kk*Nx*np.array([<NUM_LIT:1>,<NUM_LIT:1>]),'<STR_LIT>')<EOL>plt.plot(x[kk*Nx:(kk+<NUM_LIT:1>)*Nx]/x_max*<NUM_LIT>*Nx-kk*Nx,'<STR_LIT:b>')<EOL><DEDENT>plt.axis([<NUM_LIT:0>,Nx,-Nx*(Mx-<NUM_LIT:0.5>),Nx*<NUM_LIT:0.5>])<EOL>plt.yticks(np.arange(<NUM_LIT:0>,-Nx*Mx,-Nx),np.arange(<NUM_LIT:0>,Nx*Mx,Nx))<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.ylabel('<STR_LIT>')<EOL>return <NUM_LIT:0><EOL>", "docstring": "Plots the contents of real ndarray x as a vertical stacking of\nstrips, each of length Nx. The default figure size is (6,4) inches.\nThe yaxis tick labels are the starting index of each strip. The red\ndashed lines correspond to zero amplitude in each strip.\n\nstrips(x,Nx,my_figsize=(6,4))\n\nMark Wickert April 2014", "id": "f14912:m3"}
{"signature": "def PCM_encode(x,N_bits):", "body": "xq = np.int16(np.rint(x*<NUM_LIT:2>**(N_bits-<NUM_LIT:1>)))<EOL>x_bits = np.zeros((N_bits,len(xq)))<EOL>for k, xk in enumerate(xq):<EOL><INDENT>x_bits[:,k] = to_bin(xk,N_bits)<EOL><DEDENT>x_bits = np.reshape(x_bits,(<NUM_LIT:1>,len(x)*N_bits),'<STR_LIT:F>')<EOL>return np.int16(x_bits.flatten())<EOL>", "docstring": "Parameters\n----------\nx : signal samples to be PCM encoded\nN_bits ; bit precision of PCM samples\n\nReturns\n-------\nx_bits = encoded serial bit stream of 0/1 values. MSB first.\n\nMark Wickert, Mark 2015", "id": "f14912:m22"}
{"signature": "def QPSK_rx(fc,N_symb,Rs,EsN0=<NUM_LIT:100>,fs=<NUM_LIT>,lfsr_len=<NUM_LIT:10>,phase=<NUM_LIT:0>,pulse='<STR_LIT:src>'):", "body": "Ns = int(np.round(fs/Rs))<EOL>print('<STR_LIT>', Ns)<EOL>print('<STR_LIT>', fs/float(Ns))<EOL>print('<STR_LIT>', EsN0, '<STR_LIT>')<EOL>print('<STR_LIT>', phase, '<STR_LIT>')<EOL>print('<STR_LIT>', pulse)<EOL>x, b, data = QPSK_bb(N_symb,Ns,lfsr_len,pulse)<EOL>x = cpx_AWGN(x,EsN0,Ns)<EOL>n = np.arange(len(x))<EOL>xc = x*np.exp(<NUM_LIT>*<NUM_LIT:2>*np.pi*fc/float(fs)*n) * np.exp(<NUM_LIT>*phase)<EOL>return xc, b, data<EOL>", "docstring": "This function generates", "id": "f14912:m9"}
{"signature": "def bit_errors(tx_data,rx_data,Ncorr = <NUM_LIT>,Ntransient = <NUM_LIT:0>):", "body": "<EOL>tx_data = <NUM_LIT:2>*tx_data[Ntransient:]-<NUM_LIT:1><EOL>rx_data = <NUM_LIT:2>*rx_data[Ntransient:]-<NUM_LIT:1><EOL>R0 = np.fft.ifft(np.fft.fft(rx_data,Ncorr)*<EOL>np.conj(np.fft.fft(tx_data,Ncorr)))<EOL>R1 = np.fft.ifft(np.fft.fft(-<NUM_LIT:1>*rx_data,Ncorr)*<EOL>np.conj(np.fft.fft(tx_data,Ncorr)))<EOL>R0 = np.fft.fftshift(R0)<EOL>R1 = np.fft.fftshift(R1)<EOL>R0max = np.max(R0.real)<EOL>R1max = np.max(R1.real)<EOL>R = np.array([R0max,R1max])<EOL>Rmax = np.max(R)<EOL>kphase_max = np.where(R == Rmax)[<NUM_LIT:0>]<EOL>kmax = kphase_max[<NUM_LIT:0>]<EOL>if kmax == <NUM_LIT:0>:<EOL><INDENT>lagmax = np.where(R0.real == Rmax)[<NUM_LIT:0>] - Ncorr/<NUM_LIT:2><EOL><DEDENT>elif kmax == <NUM_LIT:1>:<EOL><INDENT>lagmax = np.where(R1.real == Rmax)[<NUM_LIT:0>] - Ncorr/<NUM_LIT:2><EOL><DEDENT>taumax = lagmax[<NUM_LIT:0>]<EOL>print('<STR_LIT>' % (kmax, taumax))<EOL>if taumax < <NUM_LIT:0>:<EOL><INDENT>tx_data = tx_data[int(-taumax):]<EOL>tx_data = tx_data[:min(len(tx_data),len(rx_data))]<EOL>rx_data = (-<NUM_LIT:1>)**kmax*rx_data[:len(tx_data)]<EOL><DEDENT>else:<EOL><INDENT>rx_data = (-<NUM_LIT:1>)**kmax * rx_data[int(taumax):]<EOL>rx_data = rx_data[:min(len(tx_data),len(rx_data))]<EOL>tx_data = tx_data[:len(rx_data)]<EOL><DEDENT>Bit_count = len(tx_data)<EOL>tx_I = np.int16((tx_data.real + <NUM_LIT:1>)/<NUM_LIT:2>)<EOL>rx_I = np.int16((rx_data.real + <NUM_LIT:1>)/<NUM_LIT:2>)<EOL>Bit_errors = tx_I ^ rx_I<EOL>return Bit_count,np.sum(Bit_errors)<EOL>", "docstring": "Count bit errors between a transmitted and received BPSK signal.\nTime delay between streams is detected as well as ambiquity resolution\ndue to carrier phase lock offsets of :math:`k*\\\\pi`, k=0,1.\nThe ndarray tx_data is Tx 0/1 bits as real numbers I.\nThe ndarray rx_data is Rx 0/1 bits as real numbers I.\nNote: Ncorr needs to be even", "id": "f14912:m4"}
{"signature": "def Q_fctn(x):", "body": "return <NUM_LIT:1.>/<NUM_LIT:2>*erfc(x/np.sqrt(<NUM_LIT>))<EOL>", "docstring": "Gaussian Q-function", "id": "f14912:m21"}
{"signature": "def rc_imp(Ns,alpha,M=<NUM_LIT:6>):", "body": "<EOL>n = np.arange(-M*Ns,M*Ns+<NUM_LIT:1>)<EOL>b = np.zeros(len(n))<EOL>a = alpha<EOL>Ns *= <NUM_LIT:1.0><EOL>for i in range(len(n)):<EOL><INDENT>if (<NUM_LIT:1> - <NUM_LIT:4>*(a*n[i]/Ns)**<NUM_LIT:2>) == <NUM_LIT:0>:<EOL><INDENT>b[i] = np.pi/<NUM_LIT:4>*np.sinc(<NUM_LIT:1>/(<NUM_LIT>*a))<EOL><DEDENT>else:<EOL><INDENT>b[i] = np.sinc(n[i]/Ns)*np.cos(np.pi*a*n[i]/Ns)/(<NUM_LIT:1> - <NUM_LIT:4>*(a*n[i]/Ns)**<NUM_LIT:2>)<EOL><DEDENT><DEDENT>return b<EOL>", "docstring": "A truncated raised cosine pulse used in digital communications.\n\nThe pulse shaping factor :math:`0 < \\\\alpha < 1` is required as well as the\ntruncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`.\n\nParameters\n----------\nNs : number of samples per symbol\nalpha : excess bandwidth factor on (0, 1), e.g., 0.35\nM : equals RC one-sided symbol truncation factor\n\nReturns\n-------\nb : ndarray containing the pulse shape\n\nSee Also\n--------\nsqrt_rc_imp\n\nNotes\n-----\nThe pulse shape b is typically used as the FIR filter coefficients\nwhen forming a pulse shaped digital communications waveform.\n\nExamples\n--------\nTen samples per symbol and :math:`\\\\alpha = 0.35`.\n\n>>> import matplotlib.pyplot as plt\n>>> from sk_dsp_comm.digitalcom import rc_imp\n>>> from numpy import arange\n>>> b = rc_imp(10,0.35)\n>>> n = arange(-10*6,10*6+1)\n>>> plt.stem(n,b)\n>>> plt.show()", "id": "f14912:m15"}
{"signature": "def gray2bin(d_word,b_width):", "body": "bits_in = to_bin(d_word,b_width)<EOL>bits_out = np.zeros(b_width,dtype=np.int)<EOL>for k, bit_k in enumerate(bits_in):<EOL><INDENT>if k > <NUM_LIT:0>:<EOL><INDENT>bits_out[k] = bit_k^bits_out[k-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>bits_out[k] = bit_k<EOL><DEDENT><DEDENT>return from_bin(bits_out)<EOL>", "docstring": "Convert gray encoded binary words to integer bit words via\nGray decoding starting from the MSB to the LSB\n\nMark Wickert November 2018", "id": "f14912:m32"}
{"signature": "def PCM_decode(x_bits,N_bits):", "body": "N_samples = len(x_bits)//N_bits<EOL>xrs_bits = x_bits.copy()<EOL>xrs_bits = np.reshape(xrs_bits,(N_bits,N_samples),'<STR_LIT:F>')<EOL>xq = np.zeros(N_samples)<EOL>w = <NUM_LIT:2>**np.arange(N_bits-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>) <EOL>for k in range(N_samples):<EOL><INDENT>xq[k] = np.dot(xrs_bits[:,k],w) - xrs_bits[<NUM_LIT:0>,k]*<NUM_LIT:2>**N_bits<EOL><DEDENT>return xq/<NUM_LIT:2>**(N_bits-<NUM_LIT:1>)<EOL>", "docstring": "Parameters\n----------\nx_bits : serial bit stream of 0/1 values. The length of\n         x_bits must be a multiple of N_bits\nN_bits : bit precision of PCM samples\n\nReturns\n-------\n  xhat : decoded PCM signal samples\n\nMark Wickert, March 2015", "id": "f14912:m25"}
{"signature": "def to_bin(data, width):", "body": "data_str = bin(data & (<NUM_LIT:2>**width-<NUM_LIT:1>))[<NUM_LIT:2>:].zfill(width)<EOL>return [int(x) for x in tuple(data_str)]<EOL>", "docstring": "Convert an unsigned integer to a numpy binary array with the first\nelement the MSB and the last element the LSB.", "id": "f14912:m23"}
{"signature": "def sqrt_rc_imp(Ns,alpha,M=<NUM_LIT:6>):", "body": "<EOL>n = np.arange(-M*Ns,M*Ns+<NUM_LIT:1>)<EOL>b = np.zeros(len(n))<EOL>Ns *= <NUM_LIT:1.0><EOL>a = alpha<EOL>for i in range(len(n)):<EOL><INDENT>if abs(<NUM_LIT:1> - <NUM_LIT:16>*a**<NUM_LIT:2>*(n[i]/Ns)**<NUM_LIT:2>) <= np.finfo(np.float).eps/<NUM_LIT:2>:<EOL><INDENT>b[i] = <NUM_LIT:1>/<NUM_LIT>*((<NUM_LIT:1>+a)*np.sin((<NUM_LIT:1>+a)*np.pi/(<NUM_LIT>*a))-(<NUM_LIT:1>-a)*np.cos((<NUM_LIT:1>-a)*np.pi/(<NUM_LIT>*a))+(<NUM_LIT:4>*a)/np.pi*np.sin((<NUM_LIT:1>-a)*np.pi/(<NUM_LIT>*a)))<EOL><DEDENT>else:<EOL><INDENT>b[i] = <NUM_LIT:4>*a/(np.pi*(<NUM_LIT:1> - <NUM_LIT:16>*a**<NUM_LIT:2>*(n[i]/Ns)**<NUM_LIT:2>))<EOL>b[i] = b[i]*(np.cos((<NUM_LIT:1>+a)*np.pi*n[i]/Ns) + np.sinc((<NUM_LIT:1>-a)*n[i]/Ns)*(<NUM_LIT:1>-a)*np.pi/(<NUM_LIT>*a))<EOL><DEDENT><DEDENT>return b<EOL>", "docstring": "A truncated square root raised cosine pulse used in digital communications.\n\nThe pulse shaping factor :math:`0 < \\\\alpha < 1` is required as well as the\ntruncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`.\n\n\nParameters\n----------\nNs : number of samples per symbol\nalpha : excess bandwidth factor on (0, 1), e.g., 0.35\nM : equals RC one-sided symbol truncation factor\n\nReturns\n-------\nb : ndarray containing the pulse shape\n\nNotes\n-----\nThe pulse shape b is typically used as the FIR filter coefficients\nwhen forming a pulse shaped digital communications waveform. When \nsquare root raised cosine (SRC) pulse is used to generate Tx signals and\nat the receiver used as a matched filter (receiver FIR filter), the \nreceived signal is now raised cosine shaped, thus having zero\nintersymbol interference and the optimum removal of additive white \nnoise if present at the receiver input.\n\nExamples\n--------\nTen samples per symbol and :math:`\\\\alpha = 0.35`.\n\n>>> import matplotlib.pyplot as plt\n>>> from numpy import arange\n>>> from sk_dsp_comm.digitalcom import sqrt_rc_imp\n>>> b = sqrt_rc_imp(10,0.35)\n>>> n = arange(-10*6,10*6+1)\n>>> plt.stem(n,b)\n>>> plt.show()", "id": "f14912:m16"}
{"signature": "def bin2gray(d_word,b_width):", "body": "bits_in = to_bin(d_word,b_width)<EOL>bits_out = np.zeros(b_width,dtype=np.int)<EOL>for k, bit_k in enumerate(bits_in):<EOL><INDENT>if k > <NUM_LIT:0>:<EOL><INDENT>bits_out[k] = bit_k^bits_in[k-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>bits_out[k] = bit_k<EOL><DEDENT><DEDENT>return from_bin(bits_out)<EOL>", "docstring": "Convert integer bit words to gray encoded binary words via\nGray coding starting from the MSB to the LSB\n\nMark Wickert November 2018", "id": "f14912:m31"}
{"signature": "def MPSK_gray_encode_bb(N_symb,Ns,M=<NUM_LIT:4>,pulse='<STR_LIT>',alpha=<NUM_LIT>,ext_data=None):", "body": "<EOL>bin2gray1 = [<NUM_LIT:0>,<NUM_LIT:1>]<EOL>bin2gray2 = [<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:3>,<NUM_LIT:2>]<EOL>bin2gray3 = [<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:3>,<NUM_LIT:2>,<NUM_LIT:7>,<NUM_LIT:6>,<NUM_LIT:4>,<NUM_LIT:5>] <EOL>bin2gray4 = [<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:3>,<NUM_LIT:2>,<NUM_LIT:7>,<NUM_LIT:6>,<NUM_LIT:4>,<NUM_LIT:5>,<NUM_LIT:15>,<NUM_LIT>,<NUM_LIT:12>,<NUM_LIT>,<NUM_LIT:8>,<NUM_LIT:9>,<NUM_LIT:11>,<NUM_LIT:10>]<EOL>bin2gray5 = [<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:3>,<NUM_LIT:2>,<NUM_LIT:7>,<NUM_LIT:6>,<NUM_LIT:4>,<NUM_LIT:5>,<NUM_LIT:15>,<NUM_LIT>,<NUM_LIT:12>,<NUM_LIT>,<NUM_LIT:8>,<NUM_LIT:9>,<NUM_LIT:11>,<NUM_LIT:10>,<NUM_LIT>,<NUM_LIT:30>,<EOL><NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT:16>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT>,<NUM_LIT:20>,<NUM_LIT>]<EOL>N_word = int(np.log2(M))<EOL>if N_symb == None:<EOL><INDENT>N_symb = int(np.floor(len(ext_data)/N_word))<EOL>data = ext_data[:N_symb*N_word]<EOL><DEDENT>else:<EOL><INDENT>data = np.random.randint(<NUM_LIT:0>,<NUM_LIT:2>,size=int(np.log2(M))*N_symb)<EOL><DEDENT>x_IQ = np.zeros(N_symb,dtype=np.complex128)<EOL>bin_wgts = <NUM_LIT:2>**np.arange(N_word-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>)<EOL>if M == <NUM_LIT:2>: <EOL><INDENT>x_IQ = <NUM_LIT:2>*data - <NUM_LIT:1><EOL><DEDENT>elif M == <NUM_LIT:4>: <EOL><INDENT>for k in range(N_symb):<EOL><INDENT>word_phase = data[k*N_word:(k+<NUM_LIT:1>)*N_word]<EOL>x_phase = <NUM_LIT:2>*np.pi*bin2gray2[np.dot(word_phase,bin_wgts)]/M + np.pi/M<EOL>x_IQ[k] = np.exp(<NUM_LIT>*x_phase)<EOL><DEDENT><DEDENT>elif M == <NUM_LIT:8>:<EOL><INDENT>for k in range(N_symb):<EOL><INDENT>word_phase = data[k*N_word:(k+<NUM_LIT:1>)*N_word]<EOL>x_phase = <NUM_LIT:2>*np.pi*bin2gray3[np.dot(word_phase,bin_wgts)]/M<EOL>x_IQ[k] = np.exp(<NUM_LIT>*x_phase)<EOL><DEDENT><DEDENT>elif M == <NUM_LIT:16>:<EOL><INDENT>for k in range(N_symb):<EOL><INDENT>word_phase = data[k*N_word:(k+<NUM_LIT:1>)*N_word]<EOL>x_phase = <NUM_LIT:2>*np.pi*bin2gray4[np.dot(word_phase,bin_wgts)]/M<EOL>x_IQ[k] = np.exp(<NUM_LIT>*x_phase)<EOL><DEDENT><DEDENT>elif M == <NUM_LIT:32>:<EOL><INDENT>for k in range(N_symb):<EOL><INDENT>word_phase = data[k*N_word:(k+<NUM_LIT:1>)*N_word]<EOL>x_phase = <NUM_LIT:2>*np.pi*bin2gray5[np.dot(word_phase,bin_wgts)]/M<EOL>x_IQ[k] = np.exp(<NUM_LIT>*x_phase)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')        <EOL><DEDENT>if Ns > <NUM_LIT:1>:<EOL><INDENT>if pulse.lower() == '<STR_LIT:src>':<EOL><INDENT>b = sqrt_rc_imp(Ns,alpha,<NUM_LIT:6>)<EOL><DEDENT>elif pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = rc_imp(Ns,alpha,<NUM_LIT:6>)    <EOL><DEDENT>elif pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = np.ones(int(Ns)) <EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>x = signal.lfilter(b,<NUM_LIT:1>,upsample(x_IQ,Ns))<EOL>b = b/sum(b)<EOL>return x, b, data<EOL><DEDENT>else:<EOL><INDENT>return x_IQ, <NUM_LIT:1>, data<EOL><DEDENT>", "docstring": "MPSK_gray_bb: A gray code mapped MPSK complex baseband transmitter \nx,b,tx_data = MPSK_gray_bb(K,Ns,M)\n\n//////////// Inputs //////////////////////////////////////////////////\n  N_symb = the number of symbols to process\n      Ns = number of samples per symbol\n       M = modulation order: 2, 4, 8, 16 MPSK\n   alpha = squareroot raised cosine excess bandwidth factor.\n           Can range over 0 < alpha < 1.\n   pulse = 'rect', 'src', or 'rc'\n//////////// Outputs /////////////////////////////////////////////////\n       x = complex baseband digital modulation\n       b = transmitter shaping filter, rectangle or SRC\n tx_data = xI+1j*xQ = inphase symbol sequence + \n           1j*quadrature symbol sequence\n\nMark Wickert November 2018", "id": "f14912:m35"}
{"signature": "def RZ_bits(N_bits,Ns,pulse='<STR_LIT>',alpha = <NUM_LIT>,M=<NUM_LIT:6>):", "body": "data = np.random.randint(<NUM_LIT:0>,<NUM_LIT:2>,N_bits) <EOL>x = np.hstack((data.reshape(N_bits,<NUM_LIT:1>),np.zeros((N_bits,int(Ns)-<NUM_LIT:1>))))<EOL>x =x.flatten()<EOL>if pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = np.ones(int(Ns))<EOL><DEDENT>elif pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = rc_imp(Ns,alpha,M)<EOL><DEDENT>elif pulse.lower() == '<STR_LIT:src>':<EOL><INDENT>b = sqrt_rc_imp(Ns,alpha,M)<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>x = signal.lfilter(b,<NUM_LIT:1>,x)<EOL>return x,b/float(Ns),data<EOL>", "docstring": "Generate return-to-zero (RZ) data bits with pulse shaping.\n\nA baseband digital data signal using +/-1 amplitude signal values\nand including pulse shaping.\n\nParameters\n----------\nN_bits : number of RZ {0,1} data bits to produce\nNs : the number of samples per bit,\npulse_type : 'rect' , 'rc', 'src' (default 'rect')\nalpha : excess bandwidth factor(default 0.25)\nM : single sided pulse duration (default = 6) \n\nReturns\n-------\nx : ndarray of the RZ signal values\nb : ndarray of the pulse shape\ndata : ndarray of the underlying data bits\n\nNotes\n-----\nPulse shapes include 'rect' (rectangular), 'rc' (raised cosine), \n'src' (root raised cosine). The actual pulse length is 2*M+1 samples.\nThis function is used by BPSK_tx in the Case Study article.\n\nExamples\n--------\n>>> import matplotlib.pyplot as plt\n>>> from numpy import arange\n>>> from sk_dsp_comm.digitalcom import RZ_bits\n>>> x,b,data = RZ_bits(100,10)\n>>> t = arange(len(x))\n>>> plt.plot(t,x)\n>>> plt.ylim([-0.01, 1.01])\n>>> plt.show()", "id": "f14912:m17"}
{"signature": "def from_bin(bin_array):", "body": "width = len(bin_array)<EOL>bin_wgts = <NUM_LIT:2>**np.arange(width-<NUM_LIT:1>,-<NUM_LIT:1>,-<NUM_LIT:1>)<EOL>return int(np.dot(bin_array,bin_wgts))<EOL>", "docstring": "Convert binary array back a nonnegative integer. The array length is \nthe bit width. The first input index holds the MSB and the last holds the LSB.", "id": "f14912:m24"}
{"signature": "def QAM_gray_decode(x_hat,M = <NUM_LIT:4>):", "body": "<EOL>gray2bin1 = [<NUM_LIT:0>,<NUM_LIT:1>]<EOL>gray2bin2 = [<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:3>,<NUM_LIT:2>]<EOL>gray2bin3 = [<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:3>,<NUM_LIT:2>,<NUM_LIT:6>,<NUM_LIT:7>,<NUM_LIT:5>,<NUM_LIT:4>] <EOL>gray2bin4 = [<NUM_LIT:0>,<NUM_LIT:1>,<NUM_LIT:3>,<NUM_LIT:2>,<NUM_LIT:6>,<NUM_LIT:7>,<NUM_LIT:5>,<NUM_LIT:4>,<NUM_LIT:12>,<NUM_LIT>,<NUM_LIT:15>,<NUM_LIT>,<NUM_LIT:10>,<NUM_LIT:11>,<NUM_LIT:9>,<NUM_LIT:8>]<EOL>x_m = np.sqrt(M)-<NUM_LIT:1><EOL>if M == <NUM_LIT:2>: x_m = <NUM_LIT:1><EOL>N_symb = len(x_hat)<EOL>N_word = int(np.log2(M)/<NUM_LIT:2>)<EOL>x_hat = x_hat/(np.std(x_hat) * np.sqrt(<NUM_LIT:3>/(<NUM_LIT:2>*(M-<NUM_LIT:1>))))<EOL>k_hat_gray = (x_hat + x_m*(<NUM_LIT:1>+<NUM_LIT>))/<NUM_LIT:2><EOL>k_hat_grayI = np.int16(np.clip(np.rint(k_hat_gray.real),<NUM_LIT:0>,x_m))<EOL>k_hat_grayQ = np.int16(np.clip(np.rint(k_hat_gray.imag),<NUM_LIT:0>,x_m))<EOL>data_hat = np.zeros(<NUM_LIT:2>*N_word*N_symb,dtype=int)<EOL>for k in range(N_symb):<EOL><INDENT>if M == <NUM_LIT:2>: <EOL><INDENT>data_hat = k_hat_grayI<EOL><DEDENT>elif M == <NUM_LIT:4>: <EOL><INDENT>data_hat[<NUM_LIT:2>*k*N_word:<NUM_LIT:2>*(k+<NUM_LIT:1>)*N_word]= np.hstack((to_bin(gray2bin1[k_hat_grayI[k]],N_word),<EOL>to_bin(gray2bin1[k_hat_grayQ[k]],N_word)))<EOL><DEDENT>elif M == <NUM_LIT:16>:<EOL><INDENT>data_hat[<NUM_LIT:2>*k*N_word:<NUM_LIT:2>*(k+<NUM_LIT:1>)*N_word]= np.hstack((to_bin(gray2bin2[k_hat_grayI[k]],N_word),<EOL>to_bin(gray2bin2[k_hat_grayQ[k]],N_word)))            <EOL><DEDENT>elif M == <NUM_LIT:64>:<EOL><INDENT>data_hat[<NUM_LIT:2>*k*N_word:<NUM_LIT:2>*(k+<NUM_LIT:1>)*N_word]= np.hstack((to_bin(gray2bin3[k_hat_grayI[k]],N_word),<EOL>to_bin(gray2bin3[k_hat_grayQ[k]],N_word)))            <EOL><DEDENT>elif M == <NUM_LIT>:<EOL><INDENT>data_hat[<NUM_LIT:2>*k*N_word:<NUM_LIT:2>*(k+<NUM_LIT:1>)*N_word]= np.hstack((to_bin(gray2bin4[k_hat_grayI[k]],N_word),<EOL>to_bin(gray2bin4[k_hat_grayQ[k]],N_word)))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')  <EOL><DEDENT><DEDENT>return data_hat<EOL>", "docstring": "Decode MQAM IQ symbols to a serial bit stream using\ngray2bin decoding\n\nx_hat = symbol spaced samples of the QAM waveform taken at the maximum\n        eye opening. Normally this is following the matched filter\n\nMark Wickert April 2018", "id": "f14912:m34"}
{"signature": "def xcorr(x1,x2,Nlags):", "body": "K = <NUM_LIT:2>*(int(np.floor(len(x1)/<NUM_LIT:2>)))<EOL>X1 = fft.fft(x1[:K])<EOL>X2 = fft.fft(x2[:K])<EOL>E1 = sum(abs(x1[:K])**<NUM_LIT:2>)<EOL>E2 = sum(abs(x2[:K])**<NUM_LIT:2>)<EOL>r12 = np.fft.ifft(X1*np.conj(X2))/np.sqrt(E1*E2)<EOL>k = np.arange(K) - int(np.floor(K/<NUM_LIT:2>))<EOL>r12 = np.fft.fftshift(r12)<EOL>idx = np.nonzero(np.ravel(abs(k) <= Nlags))<EOL>return r12[idx], k[idx]<EOL>", "docstring": "r12, k = xcorr(x1,x2,Nlags), r12 and k are ndarray's\nCompute the energy normalized cross correlation between the sequences\nx1 and x2. If x1 = x2 the cross correlation is the autocorrelation.\nThe number of lags sets how many lags to return centered about zero", "id": "f14912:m20"}
{"signature": "def OFDM_rx(x, Nf, N, Np=<NUM_LIT:0>, cp=False, Ncp=<NUM_LIT:0>, alpha=<NUM_LIT>, ht=None):", "body": "N_symb = len(x) // (N + Ncp)<EOL>y_out = np.zeros(N_symb * N, dtype=np.complex128)<EOL>for k in range(N_symb):<EOL><INDENT>if cp:<EOL><INDENT>buff = x[k * (N + Ncp) + Ncp:(k + <NUM_LIT:1>) * (N + Ncp)]<EOL><DEDENT>else:<EOL><INDENT>buff = x[k * N:(k + <NUM_LIT:1>) * N]<EOL><DEDENT>y_out[k * N:(k + <NUM_LIT:1>) * N] = fft.fft(buff)<EOL><DEDENT>z_out = np.reshape(y_out, (N_symb, N))<EOL>z_out = np.hstack((z_out[:, <NUM_LIT:1>:Nf // <NUM_LIT:2> + <NUM_LIT:1>], z_out[:, N - Nf // <NUM_LIT:2>:N]))<EOL>if Np > <NUM_LIT:0>:<EOL><INDENT>if isinstance(type(None), type(ht)):<EOL><INDENT>z_out, H = chan_est_equalize(z_out, Np, alpha)<EOL><DEDENT>else:<EOL><INDENT>Ht = fft.fft(ht, N)<EOL>Hht = np.hstack((Ht[<NUM_LIT:1>:Nf // <NUM_LIT:2> + <NUM_LIT:1>], Ht[N - Nf // <NUM_LIT:2>:]))<EOL>z_out, H = chan_est_equalize(z_out, Np, alpha, Hht)<EOL><DEDENT><DEDENT>elif Np == -<NUM_LIT:1>:  <EOL><INDENT>Ht = fft.fft(ht, N)<EOL>H = np.hstack((Ht[<NUM_LIT:1>:Nf // <NUM_LIT:2> + <NUM_LIT:1>], Ht[N - Nf // <NUM_LIT:2>:]))<EOL>for k in range(N_symb):<EOL><INDENT>z_out[k, :] /= H<EOL><DEDENT><DEDENT>else:<EOL><INDENT>H = np.ones(Nf)<EOL><DEDENT>return z_out.flatten(), H<EOL>", "docstring": "Parameters\n----------\nx : Received complex baseband OFDM signal\nNf : Number of filled carriers, must be even and Nf < N\nN : Total number of carriers; generally a power 2, e.g., 64, 1024, etc\nNp : Period of pilot code blocks; 0 <=> no pilots; -1 <=> use the ht impulse response input to equalize the OFDM symbols; note equalization still requires Ncp > 0 to work on a delay spread channel.\ncp : False/True <=> if False assume no CP is present\nNcp : The length of the cyclic prefix\nalpha : The filter forgetting factor in the channel estimator. Typically alpha is 0.9 to 0.99.\nnt : Input the known theoretical channel impulse response\n\nReturns\n-------\nz_out : Recovered complex baseband QAM symbols as a serial stream; as appropriate channel estimation has been applied.\nH : channel estimate (in the frequency domain at each subcarrier)\n\nSee Also\n--------\nOFDM_tx\n\nExamples\n--------\n\n>>> import matplotlib.pyplot as plt\n>>> from sk_dsp_comm import digitalcom as dc\n>>> from scipy import signal\n>>> from numpy import array\n>>> hc = array([1.0, 0.1, -0.05, 0.15, 0.2, 0.05]) # impulse response spanning five symbols\n>>> # Quick example using the above channel with no cyclic prefix\n>>> x1,b1,IQ_data1 = dc.QAM_bb(50000,1,'16qam')\n>>> x_out = dc.OFDM_tx(IQ_data1,32,64,0,True,0)\n>>> c_out = signal.lfilter(hc,1,x_out) # Apply channel distortion\n>>> r_out = dc.cpx_AWGN(c_out,100,64/32) # Es/N0 = 100 dB\n>>> z_out,H = dc.OFDM_rx(r_out,32,64,-1,True,0,alpha=0.95,ht=hc)\n>>> plt.plot(z_out[200:].real,z_out[200:].imag,'.')\n>>> plt.xlabel('In-Phase')\n>>> plt.ylabel('Quadrature')\n>>> plt.axis('equal')\n>>> plt.grid()\n>>> plt.show()\n\nAnother example with noise using a 10 symbol cyclic prefix and channel estimation:\n\n>>> x_out = dc.OFDM_tx(IQ_data1,32,64,100,True,10)\n>>> c_out = signal.lfilter(hc,1,x_out) # Apply channel distortion\n>>> r_out = dc.cpx_AWGN(c_out,25,64/32) # Es/N0 = 25 dB\n>>> z_out,H = dc.OFDM_rx(r_out,32,64,100,True,10,alpha=0.95,ht=hc);\n>>> plt.figure() # if channel estimation is turned on need this\n>>> plt.plot(z_out[-2000:].real,z_out[-2000:].imag,'.') # allow settling time\n>>> plt.xlabel('In-Phase')\n>>> plt.ylabel('Quadrature')\n>>> plt.axis('equal')\n>>> plt.grid()\n>>> plt.show()", "id": "f14912:m30"}
{"signature": "def QAM_SEP(tx_data,rx_data,mod_type,Ncorr = <NUM_LIT>,Ntransient = <NUM_LIT:0>,SEP_disp=True):", "body": "<EOL>tx_data = tx_data[Ntransient:]<EOL>rx_data = rx_data[Ntransient:]<EOL>Nmin = min([len(tx_data),len(rx_data)])<EOL>tx_data = tx_data[:Nmin]<EOL>rx_data = rx_data[:Nmin]<EOL>if mod_type.lower() == '<STR_LIT>':<EOL><INDENT>M = <NUM_LIT:2> <EOL><DEDENT>elif mod_type.lower() == '<STR_LIT>':<EOL><INDENT>M = <NUM_LIT:4><EOL><DEDENT>elif mod_type.lower() == '<STR_LIT>':<EOL><INDENT>M = <NUM_LIT:8><EOL><DEDENT>elif mod_type.lower() == '<STR_LIT>':<EOL><INDENT>M = <NUM_LIT:16><EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>rx_data = np.rint((M-<NUM_LIT:1>)*(rx_data + (<NUM_LIT:1>+<NUM_LIT>))/<NUM_LIT>)<EOL>s1r = np.nonzero(np.ravel(rx_data.real > M - <NUM_LIT:1>))[<NUM_LIT:0>]<EOL>s2r = np.nonzero(np.ravel(rx_data.real < <NUM_LIT:0>))[<NUM_LIT:0>]<EOL>rx_data.real[s1r] = (M - <NUM_LIT:1>)*np.ones(len(s1r))<EOL>rx_data.real[s2r] = np.zeros(len(s2r))<EOL>s1i = np.nonzero(np.ravel(rx_data.imag > M - <NUM_LIT:1>))[<NUM_LIT:0>]<EOL>s2i = np.nonzero(np.ravel(rx_data.imag < <NUM_LIT:0>))[<NUM_LIT:0>]<EOL>rx_data.imag[s1i] = (M - <NUM_LIT:1>)*np.ones(len(s1i))<EOL>rx_data.imag[s2i] = np.zeros(len(s2i))<EOL>rx_data = <NUM_LIT:2>*rx_data - (M - <NUM_LIT:1>)*(<NUM_LIT:1> + <NUM_LIT>)<EOL>R0,lags = xcorr(rx_data,tx_data,Ncorr)<EOL>R1,lags = xcorr(rx_data*(<NUM_LIT>)**<NUM_LIT:1>,tx_data,Ncorr) <EOL>R2,lags = xcorr(rx_data*(<NUM_LIT>)**<NUM_LIT:2>,tx_data,Ncorr) <EOL>R3,lags = xcorr(rx_data*(<NUM_LIT>)**<NUM_LIT:3>,tx_data,Ncorr) <EOL>R0max = np.max(R0.real)<EOL>R1max = np.max(R1.real)<EOL>R2max = np.max(R2.real)<EOL>R3max = np.max(R3.real)<EOL>R = np.array([R0max,R1max,R2max,R3max])<EOL>Rmax = np.max(R)<EOL>kphase_max = np.where(R == Rmax)[<NUM_LIT:0>]<EOL>kmax = kphase_max[<NUM_LIT:0>]<EOL>if kmax == <NUM_LIT:0>:<EOL><INDENT>lagmax = lags[np.where(R0.real == Rmax)[<NUM_LIT:0>]]<EOL><DEDENT>elif kmax == <NUM_LIT:1>:<EOL><INDENT>lagmax = lags[np.where(R1.real == Rmax)[<NUM_LIT:0>]]<EOL><DEDENT>elif kmax == <NUM_LIT:2>:<EOL><INDENT>lagmax = lags[np.where(R2.real == Rmax)[<NUM_LIT:0>]]<EOL><DEDENT>elif kmax == <NUM_LIT:3>:<EOL><INDENT>lagmax = lags[np.where(R3.real == Rmax)[<NUM_LIT:0>]]<EOL><DEDENT>taumax = lagmax[<NUM_LIT:0>]<EOL>if SEP_disp:<EOL><INDENT>print('<STR_LIT>' % (kmax, taumax))<EOL><DEDENT>if taumax < <NUM_LIT:0>:<EOL><INDENT>tx_data = tx_data[-taumax:]<EOL>tx_data = tx_data[:min(len(tx_data),len(rx_data))]<EOL>rx_data = (<NUM_LIT>)**kmax*rx_data[:len(tx_data)]<EOL><DEDENT>else:<EOL><INDENT>rx_data = (<NUM_LIT>)**kmax*rx_data[taumax:]<EOL>rx_data = rx_data[:min(len(tx_data),len(rx_data))]<EOL>tx_data = tx_data[:len(rx_data)]<EOL><DEDENT>errors = np.int16(abs(rx_data-tx_data))<EOL>idx = np.nonzero(np.ravel(errors != <NUM_LIT:0>))[<NUM_LIT:0>]<EOL>if SEP_disp:<EOL><INDENT>print('<STR_LIT>'% (len(errors), len(idx), len(idx)/float(len(errors))))<EOL><DEDENT>return  len(errors), len(idx), len(idx)/float(len(errors))<EOL>", "docstring": "Nsymb, Nerr, SEP_hat =\nQAM_symb_errors(tx_data,rx_data,mod_type,Ncorr = 1024,Ntransient = 0)\n\nCount symbol errors between a transmitted and received QAM signal.\nThe received symbols are assumed to be soft values on a unit square.\nTime delay between streams is detected.\nThe ndarray tx_data is Tx complex symbols.\nThe ndarray rx_data is Rx complex symbols.\nNote: Ncorr needs to be even", "id": "f14912:m6"}
{"signature": "def QAM_bb(N_symb,Ns,mod_type='<STR_LIT>',pulse='<STR_LIT>',alpha=<NUM_LIT>):", "body": "<EOL>if pulse.lower() == '<STR_LIT:src>':<EOL><INDENT>b = sqrt_rc_imp(Ns,alpha,<NUM_LIT:6>)<EOL><DEDENT>elif pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = rc_imp(Ns,alpha,<NUM_LIT:6>)    <EOL><DEDENT>elif pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = np.ones(int(Ns)) <EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if mod_type.lower() == '<STR_LIT>':<EOL><INDENT>M = <NUM_LIT:2> <EOL><DEDENT>elif mod_type.lower() == '<STR_LIT>':<EOL><INDENT>M = <NUM_LIT:4><EOL><DEDENT>elif mod_type.lower() == '<STR_LIT>':<EOL><INDENT>M = <NUM_LIT:8><EOL><DEDENT>elif mod_type.lower() == '<STR_LIT>':<EOL><INDENT>M = <NUM_LIT:16><EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>xI = np.random.randint(<NUM_LIT:0>,M,N_symb)<EOL>xI = <NUM_LIT:2>*xI - (M-<NUM_LIT:1>)<EOL>xQ = np.random.randint(<NUM_LIT:0>,M,N_symb)<EOL>xQ = <NUM_LIT:2>*xQ - (M-<NUM_LIT:1>)<EOL>symbI = np.hstack((xI.reshape(N_symb,<NUM_LIT:1>),np.zeros((N_symb,int(Ns)-<NUM_LIT:1>))))<EOL>symbI = symbI.flatten()<EOL>symbQ = np.hstack((xQ.reshape(N_symb,<NUM_LIT:1>),np.zeros((N_symb,int(Ns)-<NUM_LIT:1>))))<EOL>symbQ = symbQ.flatten()<EOL>symb = symbI + <NUM_LIT>*symbQ<EOL>if M > <NUM_LIT:2>:<EOL><INDENT>symb /= (M-<NUM_LIT:1>)<EOL><DEDENT>x = signal.lfilter(b,<NUM_LIT:1>,symb)<EOL>x = x.flatten() <EOL>b = b/sum(b)<EOL>return x, b, xI+<NUM_LIT>*xQ<EOL>", "docstring": "QAM_BB_TX: A complex baseband transmitter \nx,b,tx_data = QAM_bb(K,Ns,M)\n\n//////////// Inputs //////////////////////////////////////////////////\n  N_symb = the number of symbols to process\n      Ns = number of samples per symbol\nmod_type = modulation type: qpsk, 16qam, 64qam, or 256qam\n   alpha = squareroot raised codine pulse shape bandwidth factor.\n           For DOCSIS alpha = 0.12 to 0.18. In general alpha can \n           range over 0 < alpha < 1.\n     SRC = pulse shape: 0-> rect, 1-> SRC\n//////////// Outputs /////////////////////////////////////////////////\n       x = complex baseband digital modulation\n       b = transmitter shaping filter, rectangle or SRC\n tx_data = xI+1j*xQ = inphase symbol sequence + \n           1j*quadrature symbol sequence\n\nMark Wickert November 2014", "id": "f14912:m5"}
{"signature": "def MPSK_bb(N_symb,Ns,M,pulse='<STR_LIT>',alpha = <NUM_LIT>,MM=<NUM_LIT:6>):", "body": "data = np.random.randint(<NUM_LIT:0>,M,N_symb) <EOL>xs = np.exp(<NUM_LIT>*<NUM_LIT:2>*np.pi/M*data)<EOL>x = np.hstack((xs.reshape(N_symb,<NUM_LIT:1>),np.zeros((N_symb,int(Ns)-<NUM_LIT:1>))))<EOL>x =x.flatten()<EOL>if pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = np.ones(int(Ns))<EOL><DEDENT>elif pulse.lower() == '<STR_LIT>':<EOL><INDENT>b = rc_imp(Ns,alpha,MM)<EOL><DEDENT>elif pulse.lower() == '<STR_LIT:src>':<EOL><INDENT>b = sqrt_rc_imp(Ns,alpha,MM)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>x = signal.lfilter(b,<NUM_LIT:1>,x)<EOL>if M == <NUM_LIT:4>:<EOL><INDENT>x = x*np.exp(<NUM_LIT>*np.pi/<NUM_LIT:4>); <EOL><DEDENT>return x,b/float(Ns),data<EOL>", "docstring": "Generate a complex baseband MPSK signal with pulse shaping.\n\nParameters\n----------\nN_symb : number of MPSK symbols to produce\nNs : the number of samples per bit,\nM : MPSK modulation order, e.g., 4, 8, 16, ...\npulse_type : 'rect' , 'rc', 'src' (default 'rect')\nalpha : excess bandwidth factor(default 0.25)\nMM : single sided pulse duration (default = 6) \n\nReturns\n-------\nx : ndarray of the MPSK signal values\nb : ndarray of the pulse shape\ndata : ndarray of the underlying data bits\n\nNotes\n-----\nPulse shapes include 'rect' (rectangular), 'rc' (raised cosine), \n'src' (root raised cosine). The actual pulse length is 2*M+1 samples.\nThis function is used by BPSK_tx in the Case Study article.\n\nExamples\n--------\n>>> from sk_dsp_comm import digitalcom as dc\n>>> import scipy.signal as signal\n>>> import matplotlib.pyplot as plt\n>>> x,b,data = dc.MPSK_bb(500,10,8,'src',0.35)\n>>> # Matched filter received signal x\n>>> y = signal.lfilter(b,1,x)\n>>> plt.plot(y.real[12*10:],y.imag[12*10:])\n>>> plt.xlabel('In-Phase')\n>>> plt.ylabel('Quadrature')\n>>> plt.axis('equal')\n>>> # Sample once per symbol\n>>> plt.plot(y.real[12*10::10],y.imag[12*10::10],'r.')\n>>> plt.show()", "id": "f14912:m8"}
{"signature": "def grab(self, bbox=None):", "body": "w = Gdk.get_default_root_window()<EOL>if bbox is not None:<EOL><INDENT>g = [bbox[<NUM_LIT:0>], bbox[<NUM_LIT:1>], bbox[<NUM_LIT:2>] - bbox[<NUM_LIT:0>], bbox[<NUM_LIT:3>] - bbox[<NUM_LIT:1>]]<EOL><DEDENT>else:<EOL><INDENT>g = w.get_geometry()<EOL><DEDENT>pb = Gdk.pixbuf_get_from_window(w, *g)<EOL>if pb.get_bits_per_sample() != <NUM_LIT:8>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif pb.get_n_channels() != <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>pixel_bytes = pb.read_pixel_bytes().get_data()  <EOL>width, height = g[<NUM_LIT:2>], g[<NUM_LIT:3>]<EOL>return Image.frombytes(<EOL>'<STR_LIT>', (width, height), pixel_bytes, '<STR_LIT>', '<STR_LIT>', pb.get_rowstride(), <NUM_LIT:1>)<EOL>", "docstring": "Grabs an image directly to a buffer.\n\n        :param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates\n            of sub-region to capture.\n        :return: PIL RGB image\n        :raises: ValueError, if image data does not have 3 channels (RGB), each with 8\n            bits.\n        :rtype: Image", "id": "f14935:c0:m1"}
{"signature": "def grab(bbox=None, childprocess=None, backend=None):", "body": "if childprocess is None:<EOL><INDENT>childprocess = childprocess_default_value()<EOL><DEDENT>return _grab(<EOL>to_file=False, childprocess=childprocess, backend=backend, bbox=bbox)<EOL>", "docstring": "Copy the contents of the screen to PIL image memory.\n\n    :param bbox: optional bounding box (x1,y1,x2,y2)\n    :param childprocess: pyscreenshot can cause an error,\n            if it is used on more different virtual displays\n            and back-end is not in different process.\n            Some back-ends are always different processes: scrot, imagemagick\n            The default is False if the program was started inside IDLE,\n            otherwise it is True.\n    :param backend: back-end can be forced if set (examples:scrot, wx,..),\n                    otherwise back-end is automatic", "id": "f14951:m3"}
{"signature": "def grab_to_file(filename, childprocess=None, backend=None):", "body": "if childprocess is None:<EOL><INDENT>childprocess = childprocess_default_value()<EOL><DEDENT>return _grab(to_file=True, childprocess=childprocess,<EOL>backend=backend, filename=filename)<EOL>", "docstring": "Copy the contents of the screen to a file. Internal function! Use\n    PIL.Image.save() for saving image to file.\n\n    :param filename: file for saving\n    :param childprocess: see :py:func:`grab`\n    :param backend: see :py:func:`grab`", "id": "f14951:m4"}
{"signature": "def backend_version(backend, childprocess=None):", "body": "if childprocess is None:<EOL><INDENT>childprocess = childprocess_default_value()<EOL><DEDENT>if not childprocess:<EOL><INDENT>return _backend_version(backend)<EOL><DEDENT>else:<EOL><INDENT>return run_in_childprocess(_backend_version, None, backend)<EOL><DEDENT>", "docstring": "Back-end version.\n\n    :param backend: back-end (examples:scrot, wx,..)\n    :param childprocess: see :py:func:`grab`\n    :return: version as string", "id": "f14951:m7"}
{"signature": "def backends():", "body": "return Loader().all_names<EOL>", "docstring": "Back-end names as a list.\n\n    :return: back-ends as string list", "id": "f14951:m5"}
{"signature": "def jenks_breaks(values, nb_class):", "body": "if not isinstance(values, Iterable) or isinstance(values, (str, bytes)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if isinstance(nb_class, float) and int(nb_class) == nb_class:<EOL><INDENT>nb_class = int(nb_class)<EOL><DEDENT>if not isinstance(nb_class, int):<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>.format(type(nb_class)))<EOL><DEDENT>nb_values = len(values)<EOL>if np and isinstance(values, np.ndarray):<EOL><INDENT>values = values[np.argwhere(np.isfinite(values)).reshape(-<NUM_LIT:1>)]<EOL><DEDENT>else:<EOL><INDENT>values = [i for i in values if isfinite(i)]<EOL><DEDENT>if len(values) != nb_values:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL>nb_values = len(values)<EOL><DEDENT>if nb_class >= nb_values or nb_class < <NUM_LIT:2>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>return jenks._jenks_breaks(values, nb_class)<EOL>", "docstring": "Compute jenks natural breaks on a sequence of `values`, given `nb_class`,\nthe number of desired class.\n\nParameters\n----------\nvalues : array-like\n    The Iterable sequence of numbers (integer/float) to be used.\nnb_class : int\n    The desired number of class (as some other functions requests\n    a `k` value, `nb_class` is like `k` + 1). Have to be lesser than\n    the length of `values` and greater than 2.\n\nReturns\n-------\nbreaks : tuple of floats\n    The computed break values, including minimum and maximum, in order\n    to have all the bounds for building `nb_class` class,\n    so the returned tuple has a length of `nb_class` + 1.\n\n\nExamples\n--------\nUsing nb_class = 3, expecting 4 break values , including min and max :\n\n>>> jenks_breaks(\n        [1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3],\n        nb_class = 3)  # Should output (1.2, 2.3, 5.0, 7.8)", "id": "f14960:m0"}
{"signature": "def pop_marker(self, reset):", "body": "marker = self.markers.pop()<EOL>if reset:<EOL><INDENT>marker.extend(self.look_ahead)<EOL>self.look_ahead = marker<EOL><DEDENT>elif self.markers:<EOL><INDENT>self.markers[-<NUM_LIT:1>].extend(marker)<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Pop a marker off of the marker stack. If reset is True then the\n        iterator will be returned to the state it was in before the\n        corresponding call to push_marker().", "id": "f14963:c0:m10"}
{"signature": "def push_marker(self):", "body": "self.saved_markers.append(self.marker)<EOL>", "docstring": "Push a marker on to the marker stack", "id": "f14963:c1:m9"}
{"signature": "def push_marker(self):", "body": "self.markers.append(list())<EOL>", "docstring": "Push a marker on to the marker stack", "id": "f14963:c0:m9"}
{"signature": "def look(self, i=<NUM_LIT:0>):", "body": "try:<EOL><INDENT>self.value = self.list[self.marker + i]<EOL><DEDENT>except IndexError:<EOL><INDENT>return self.default<EOL><DEDENT>return self.value<EOL>", "docstring": "Look ahead of the iterable by some number of values with advancing\n        past them.\n\n        If the requested look ahead is past the end of the iterable then None is\n        returned.", "id": "f14963:c1:m5"}
{"signature": "def pop_marker(self, reset):", "body": "saved = self.saved_markers.pop()<EOL>if reset:<EOL><INDENT>self.marker = saved<EOL><DEDENT>elif self.saved_markers:<EOL><INDENT>self.saved_markers[-<NUM_LIT:1>] = saved<EOL><DEDENT>", "docstring": "Pop a marker off of the marker stack. If reset is True then the\n        iterator will be returned to the state it was in before the\n        corresponding call to push_marker().", "id": "f14963:c1:m10"}
{"signature": "def is_annotation(self, i=<NUM_LIT:0>):", "body": "return (isinstance(self.tokens.look(i), Annotation)<EOL>and not self.tokens.look(i + <NUM_LIT:1>).value == '<STR_LIT>')<EOL>", "docstring": "Returns true if the position is the start of an annotation application\n        (as opposed to an annotation declaration)", "id": "f14964:c3:m8"}
{"signature": "def is_annotation_declaration(self, i=<NUM_LIT:0>):", "body": "return (isinstance(self.tokens.look(i), Annotation)<EOL>and self.tokens.look(i + <NUM_LIT:1>).value == '<STR_LIT>')<EOL>", "docstring": "Returns true if the position is the start of an annotation application\n        (as opposed to an annotation declaration)", "id": "f14964:c3:m9"}
{"signature": "def get_sha(a_file):", "body": "try:<EOL><INDENT>BLOCKSIZE = <NUM_LIT><EOL>hasher = hashlib.sha1()<EOL>with io.open(a_file, \"<STR_LIT:rb>\") as fh:<EOL><INDENT>buf = fh.read(BLOCKSIZE)<EOL>while len(buf) > <NUM_LIT:0>:<EOL><INDENT>hasher.update(buf)<EOL>buf = fh.read(BLOCKSIZE)<EOL><DEDENT><DEDENT>the_hash = hasher.hexdigest()<EOL><DEDENT>except IOError:<EOL><INDENT>errmes = \"<STR_LIT>\".format(a_file)<EOL>sys.stdout.write(errmes)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>except:<EOL><INDENT>errmes = \"<STR_LIT>\"<EOL>sys.stdout.write(errmes)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return the_hash<EOL>", "docstring": "Returns sha1 hash of the file supplied as an argument", "id": "f14979:m0"}
{"signature": "def write_shas_to_shastore(sha_dict):", "body": "if sys.version_info[<NUM_LIT:0>] < <NUM_LIT:3>:<EOL><INDENT>fn_open = open<EOL><DEDENT>else:<EOL><INDENT>fn_open = io.open<EOL><DEDENT>with fn_open(\"<STR_LIT>\", \"<STR_LIT:w>\") as fh:<EOL><INDENT>fh.write(\"<STR_LIT>\")<EOL>fh.write('<STR_LIT>'.format(constants.VERSION))<EOL>if sha_dict:<EOL><INDENT>fh.write(yaml.dump(sha_dict))<EOL><DEDENT>fh.write(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Writes a sha1 dictionary stored in memory to\nthe .shastore file", "id": "f14982:m2"}
{"signature": "def parallel_run_these(G, list_of_targets, in_mem_shas, from_store,<EOL>settings, dont_update_shas_of):", "body": "verbose = settings[\"<STR_LIT>\"]<EOL>quiet = settings[\"<STR_LIT>\"]<EOL>error = settings[\"<STR_LIT:error>\"]<EOL>sprint = settings[\"<STR_LIT>\"]<EOL>if len(list_of_targets) == <NUM_LIT:1>:<EOL><INDENT>target = list_of_targets[<NUM_LIT:0>]<EOL>sprint(\"<STR_LIT>\".format(target),<EOL>level=\"<STR_LIT>\")<EOL>run_the_target(G, target, settings)<EOL>node_dict = get_the_node_dict(G, target)<EOL>if \"<STR_LIT>\" in node_dict:<EOL><INDENT>for output in acts.get_all_outputs(node_dict):<EOL><INDENT>if output not in dont_update_shas_of:<EOL><INDENT>in_mem_shas['<STR_LIT>'][output] = {\"<STR_LIT>\": get_sha(output,<EOL>settings)}<EOL>in_mem_shas[output] = get_sha(output, settings)<EOL>write_shas_to_shastore(in_mem_shas)<EOL><DEDENT><DEDENT><DEDENT>if \"<STR_LIT>\" in node_dict:<EOL><INDENT>for dep in acts.get_all_dependencies(node_dict):<EOL><INDENT>if dep not in dont_update_shas_of:<EOL><INDENT>in_mem_shas['<STR_LIT>'][dep] = {\"<STR_LIT>\": get_sha(dep, settings)}<EOL>write_shas_to_shastore(in_mem_shas)<EOL><DEDENT><DEDENT><DEDENT>return True<EOL><DEDENT>a_failure_occurred = False<EOL>out = \"<STR_LIT>\"<EOL>sprint(out.format(\"<STR_LIT:U+002CU+0020>\".join(list_of_targets)))<EOL>info = [(target, get_the_node_dict(G, target))<EOL>for target in list_of_targets]<EOL>commands = [item[<NUM_LIT:1>]['<STR_LIT>'].rstrip() for item in info]<EOL>if not quiet:<EOL><INDENT>procs = [Popen(command, shell=True) for command in commands]<EOL><DEDENT>else:<EOL><INDENT>procs = [Popen(command, shell=True, stdout=PIPE, stderr=PIPE)<EOL>for command in commands]<EOL><DEDENT>for index, process in enumerate(procs):<EOL><INDENT>if process.wait():<EOL><INDENT>error(\"<STR_LIT>\".format(info[index][<NUM_LIT:0>]))<EOL>a_failure_occurred = True<EOL><DEDENT>else:<EOL><INDENT>if \"<STR_LIT>\" in info[index][<NUM_LIT:1>]:<EOL><INDENT>for output in acts.get_all_outputs(info[index][<NUM_LIT:1>]):<EOL><INDENT>if output not in dont_update_shas_of:<EOL><INDENT>in_mem_shas['<STR_LIT>'][output] = {\"<STR_LIT>\": get_sha(output,<EOL>settings)}<EOL>write_shas_to_shastore(in_mem_shas)<EOL><DEDENT><DEDENT><DEDENT>if \"<STR_LIT>\" in info[index][<NUM_LIT:1>]:<EOL><INDENT>for dep in acts.get_all_dependencies(info[index][<NUM_LIT:1>]):<EOL><INDENT>if dep not in dont_update_shas_of:<EOL><INDENT>in_mem_shas['<STR_LIT>'][dep] = {\"<STR_LIT>\": get_sha(dep, settings)}<EOL>write_shas_to_shastore(in_mem_shas)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if a_failure_occurred:<EOL><INDENT>error(\"<STR_LIT>\")<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return True<EOL>", "docstring": "The parallel equivalent of \"run_this_target()\"\nIt receives a list of targets to execute in parallel.\nUnlike \"run_this_target()\" it has to update the shas\n(in memory and in the store) within the function.\nThis is because one of the targets may fail but many can\nsucceed, and those outputs need to be updated\n\nArgs:\n    G\n    A graph\n    A list of targets that we need to build in parallel\n    The dictionary containing the in-memory sha store\n    The dictionary containing the contents of the .shastore file\n    The settings dictionary\n    A list of outputs to not update shas of", "id": "f14982:m13"}
{"signature": "def run_commands(commands, settings):", "body": "sprint = settings[\"<STR_LIT>\"]<EOL>quiet = settings[\"<STR_LIT>\"]<EOL>error = settings[\"<STR_LIT:error>\"]<EOL>enhanced_errors = True<EOL>the_shell = None<EOL>if settings[\"<STR_LIT>\"]:<EOL><INDENT>enhanced_errors = False<EOL><DEDENT>if \"<STR_LIT>\" in settings:<EOL><INDENT>the_shell = settings[\"<STR_LIT>\"]<EOL><DEDENT>windows_p = sys.platform == \"<STR_LIT:win32>\"<EOL>STDOUT = None<EOL>STDERR = None<EOL>if quiet:<EOL><INDENT>STDOUT = PIPE<EOL>STDERR = PIPE<EOL><DEDENT>commands = commands.rstrip()<EOL>sprint(\"<STR_LIT>\".format(commands), level=\"<STR_LIT>\")<EOL>if not quiet:<EOL><INDENT>sprint(commands)<EOL><DEDENT>if the_shell:<EOL><INDENT>tmp = shlex.split(the_shell)<EOL>the_shell = tmp[<NUM_LIT:0>]<EOL>tmp = tmp[<NUM_LIT:1>:]<EOL>if enhanced_errors and not windows_p:<EOL><INDENT>tmp.append(\"<STR_LIT>\")<EOL><DEDENT>tmp.append(commands)<EOL>commands = tmp<EOL><DEDENT>else:<EOL><INDENT>if enhanced_errors and not windows_p:<EOL><INDENT>commands = [\"<STR_LIT>\", commands]<EOL><DEDENT><DEDENT>p = Popen(commands, shell=True, stdout=STDOUT, stderr=STDERR,<EOL>executable=the_shell)<EOL>out, err = p.communicate()<EOL>if p.returncode:<EOL><INDENT>if quiet:<EOL><INDENT>error(err.decode(locale.getpreferredencoding()))<EOL><DEDENT>error(\"<STR_LIT>\")<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Runs the commands supplied as an argument\nIt will exit the program if the commands return a\nnon-zero code\n\nArgs:\n    the commands to run\n    The settings dictionary", "id": "f14982:m5"}
{"signature": "def get_direct_ancestors(G, list_of_nodes):", "body": "parents = []<EOL>for item in list_of_nodes:<EOL><INDENT>anc = G.predecessors(item)<EOL>for one in anc:<EOL><INDENT>parents.append(one)<EOL><DEDENT><DEDENT>return parents<EOL>", "docstring": "Returns a list of nodes that are the parents\nfrom all of the nodes given as an argument.\nThis is for use in the parallel topo sort", "id": "f14982:m8"}
{"signature": "def get_sha(a_file, settings=None):", "body": "if settings:<EOL><INDENT>error = settings[\"<STR_LIT:error>\"]<EOL><DEDENT>else:<EOL><INDENT>error = ERROR_FN<EOL><DEDENT>try:<EOL><INDENT>BLOCKSIZE = <NUM_LIT><EOL>hasher = hashlib.sha1()<EOL>with io.open(a_file, \"<STR_LIT:rb>\") as fh:<EOL><INDENT>buf = fh.read(BLOCKSIZE)<EOL>while len(buf) > <NUM_LIT:0>:<EOL><INDENT>hasher.update(buf)<EOL>buf = fh.read(BLOCKSIZE)<EOL><DEDENT><DEDENT>the_hash = hasher.hexdigest()<EOL><DEDENT>except IOError:<EOL><INDENT>errmes = \"<STR_LIT>\".format(a_file)<EOL>error(errmes)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>except:<EOL><INDENT>errmes = \"<STR_LIT>\"<EOL>error(errmes)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return the_hash<EOL>", "docstring": "Returns sha1 hash of the file supplied as an argument", "id": "f14982:m1"}
{"signature": "def get_the_node_dict(G, name):", "body": "for node in G.nodes(data=True):<EOL><INDENT>if node[<NUM_LIT:0>] == name:<EOL><INDENT>return node[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>", "docstring": "Helper function that returns the node data\nof the node with the name supplied", "id": "f14982:m7"}
{"signature": "def check_integrity(sakefile, settings):", "body": "sprint = settings[\"<STR_LIT>\"]<EOL>error = settings[\"<STR_LIT:error>\"]<EOL>sprint(\"<STR_LIT>\", level=\"<STR_LIT>\")<EOL>if not sakefile:<EOL><INDENT>error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>if len(sakefile.keys()) != len(set(sakefile.keys())):<EOL><INDENT>error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>for target in sakefile:<EOL><INDENT>if target == \"<STR_LIT:all>\":<EOL><INDENT>if not check_target_integrity(target, sakefile[\"<STR_LIT:all>\"], all=True):<EOL><INDENT>error(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>continue<EOL><DEDENT>if \"<STR_LIT>\" not in sakefile[target]:<EOL><INDENT>if not check_target_integrity(target, sakefile[target],<EOL>meta=True):<EOL><INDENT>errmes = \"<STR_LIT>\".format(target)<EOL>error(errmes)<EOL>return False<EOL><DEDENT>for atom_target in sakefile[target]:<EOL><INDENT>if atom_target == \"<STR_LIT>\":<EOL><INDENT>continue<EOL><DEDENT>if not check_target_integrity(atom_target,<EOL>sakefile[target][atom_target],<EOL>parent=target):<EOL><INDENT>errmes = \"<STR_LIT>\".format(<EOL>atom_target)<EOL>error(errmes)<EOL>return False<EOL><DEDENT><DEDENT>continue<EOL><DEDENT>if not check_target_integrity(target, sakefile[target]):<EOL><INDENT>errmes = \"<STR_LIT>\".format(target)<EOL>error(errmes)<EOL>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Checks the format of the sakefile dictionary\nto ensure it conforms to specification\n\nArgs:\n    A dictionary that is the parsed Sakefile (from sake.py)\n    The setting dictionary (for print functions)\nReturns:\n    True if the Sakefile is conformant\n    False if not", "id": "f14983:m0"}
{"signature": "def get_print_functions(settings):", "body": "verbose = settings[\"<STR_LIT>\"]<EOL>def sprint(message, level=None, color=False):<EOL><INDENT>if level==\"<STR_LIT>\" and not verbose:<EOL><INDENT>return<EOL><DEDENT>prepend = \"<STR_LIT>\"<EOL>postfix = \"<STR_LIT>\"<EOL>if settings[\"<STR_LIT>\"] and color:<EOL><INDENT>prepend = \"<STR_LIT>\"<EOL>postfix = \"<STR_LIT>\"<EOL><DEDENT>print(\"<STR_LIT>\".format(prepend, message, postfix))<EOL>sys.stdout.flush()<EOL><DEDENT>def warn(message, level=None, color=True):<EOL><INDENT>if level==\"<STR_LIT>\" and not verbose:<EOL><INDENT>return<EOL><DEDENT>prepend = \"<STR_LIT>\"<EOL>postfix = \"<STR_LIT>\"<EOL>if settings[\"<STR_LIT>\"] and color:<EOL><INDENT>prepend = \"<STR_LIT>\"<EOL>postfix = \"<STR_LIT>\"<EOL><DEDENT>print(\"<STR_LIT>\".format(prepend, message, postfix))<EOL>sys.stdout.flush()<EOL><DEDENT>def error(message, level=None, color=True):<EOL><INDENT>if level==\"<STR_LIT>\" and not verbose:<EOL><INDENT>return<EOL><DEDENT>prepend = \"<STR_LIT>\"<EOL>postfix = \"<STR_LIT>\"<EOL>if settings[\"<STR_LIT>\"] and color:<EOL><INDENT>prepend = \"<STR_LIT>\"<EOL>postfix = \"<STR_LIT>\"<EOL><DEDENT>print(\"<STR_LIT>\".format(prepend, message, postfix), file=sys.stderr)<EOL>sys.stderr.flush()<EOL><DEDENT>return sprint, warn, error<EOL>", "docstring": "This returns the appropriate print functions\nin a tuple\nThe print function are:\n    - sprint - for standard printing\n    - warn - for warnings\n    - error - for errors\nThis will all be the same if color is False.\n\nThe returned print functions will contain an optional parameter\nthat specifies the output level (verbose or not). If not verbose,\nthe print function will ignore the message.", "id": "f14984:m0"}
{"signature": "def get_ties(G):", "body": "<EOL>ties = []<EOL>dep_dict = {}<EOL>for node in G.nodes(data=True):<EOL><INDENT>if '<STR_LIT>' in node[<NUM_LIT:1>]:<EOL><INDENT>for item in node[<NUM_LIT:1>]['<STR_LIT>']:<EOL><INDENT>if item not in dep_dict:<EOL><INDENT>dep_dict[item] = []<EOL><DEDENT>dep_dict[item].append(node[<NUM_LIT:0>])<EOL><DEDENT><DEDENT><DEDENT>for item in dep_dict:<EOL><INDENT>if len(list(set(dep_dict[item]))) > <NUM_LIT:1>:<EOL><INDENT>ties.append(list(set(dep_dict[item])))<EOL><DEDENT><DEDENT>return ties<EOL>", "docstring": "If you specify a target that shares a dependency with another target,\nboth targets need to be updated. This is because running one will resolve\nthe sha mismatch and sake will think that the other one doesn't have to\nrun. This is called a \"tie\". This function will find such ties.", "id": "f14984:m11"}
{"signature": "def check_for_dep_in_outputs(dep, verbose, G):", "body": "if verbose:<EOL><INDENT>print(\"<STR_LIT>\".format(dep))<EOL><DEDENT>ret_list = []<EOL>for node in G.nodes(data=True):<EOL><INDENT>if \"<STR_LIT>\" not in node[<NUM_LIT:1>]:<EOL><INDENT>continue<EOL><DEDENT>for out in node[<NUM_LIT:1>]['<STR_LIT>']:<EOL><INDENT>if fnmatch.fnmatch(out, dep):<EOL><INDENT>ret_list.append(node[<NUM_LIT:0>])<EOL>break<EOL><DEDENT><DEDENT><DEDENT>return ret_list<EOL>", "docstring": "Function to help construct_graph() identify dependencies\n\nArgs:\n    A dependency\n    A flag indication verbosity\n    A (populated) NetworkX DiGraph\n\nReturns:\n    A list of targets that build given dependency", "id": "f14984:m8"}
{"signature": "def get_all_outputs(node_dict):", "body": "outlist = []<EOL>for item in node_dict['<STR_LIT>']:<EOL><INDENT>glist = glob.glob(item)<EOL>if glist:<EOL><INDENT>for oneglob in glist:<EOL><INDENT>outlist.append(oneglob)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>outlist.append(item)<EOL><DEDENT><DEDENT>return outlist<EOL>", "docstring": "This function takes a node dictionary and returns a list of\nthe node's output files. Some of the entries in the 'output'\nattribute may be globs, and without this function, sake won't\nknow how to handle that. This will unglob all globs and return\nthe true list of *all* outputs.", "id": "f14984:m14"}
{"signature": "def visualize(G, settings, filename=\"<STR_LIT>\", no_graphviz=False):", "body": "error = settings[\"<STR_LIT:error>\"]<EOL>if no_graphviz:<EOL><INDENT>write_dot_file(G, filename)<EOL>return <NUM_LIT:0><EOL><DEDENT>write_dot_file(G, \"<STR_LIT>\")<EOL>renderer = \"<STR_LIT>\"<EOL>if re.search(\"<STR_LIT>\", filename, re.IGNORECASE):<EOL><INDENT>renderer = \"<STR_LIT>\"<EOL><DEDENT>elif re.search(\"<STR_LIT>\", filename, re.IGNORECASE):<EOL><INDENT>renderer = \"<STR_LIT>\"<EOL><DEDENT>elif re.search(\"<STR_LIT>\", filename, re.IGNORECASE):<EOL><INDENT>renderer = \"<STR_LIT>\"<EOL><DEDENT>elif re.search(\"<STR_LIT>\", filename, re.IGNORECASE):<EOL><INDENT>renderer = \"<STR_LIT>\"<EOL><DEDENT>elif re.search(\"<STR_LIT>\", filename, re.IGNORECASE):<EOL><INDENT>renderer = \"<STR_LIT>\"<EOL><DEDENT>elif re.search(\"<STR_LIT>\", filename, re.IGNORECASE):<EOL><INDENT>renderer = \"<STR_LIT>\"<EOL><DEDENT>elif re.search(\"<STR_LIT>\", filename, re.IGNORECASE):<EOL><INDENT>renderer = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>renderer = \"<STR_LIT>\"<EOL>filename += \"<STR_LIT>\"<EOL><DEDENT>command = \"<STR_LIT>\".format(renderer, filename)<EOL>p = Popen(command, shell=True)<EOL>p.communicate()<EOL>if p.returncode:<EOL><INDENT>errmes = \"<STR_LIT>\"<EOL>os.remove(\"<STR_LIT>\")<EOL>error(errmes)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>os.remove(\"<STR_LIT>\")<EOL>return <NUM_LIT:0><EOL>", "docstring": "Uses networkX to draw a graphviz dot file either (a) calls the\ngraphviz command \"dot\" to turn it into a SVG and remove the\ndotfile (default), or (b) if no_graphviz is True, just output\nthe graphviz dot file\n\nArgs:\n    a NetworkX DiGraph\n    the settings dictionary\n    a filename (a default is provided\n    a flag indicating whether graphviz should *not* be called\n\nReturns:\n    0 if everything worked\n    will cause fatal error on failure", "id": "f14984:m18"}
{"signature": "def clean_all(G, settings):", "body": "quiet = settings[\"<STR_LIT>\"]<EOL>recon = settings[\"<STR_LIT>\"]<EOL>sprint = settings[\"<STR_LIT>\"]<EOL>error = settings[\"<STR_LIT:error>\"]<EOL>all_outputs = []<EOL>for node in G.nodes(data=True):<EOL><INDENT>if \"<STR_LIT>\" in node[<NUM_LIT:1>]:<EOL><INDENT>for item in get_all_outputs(node[<NUM_LIT:1>]):<EOL><INDENT>all_outputs.append(item)<EOL><DEDENT><DEDENT><DEDENT>all_outputs.append(\"<STR_LIT>\")<EOL>retcode = <NUM_LIT:0><EOL>for item in sorted(all_outputs):<EOL><INDENT>if os.path.isfile(item):<EOL><INDENT>if recon:<EOL><INDENT>sprint(\"<STR_LIT>\".format(item))<EOL>continue<EOL><DEDENT>sprint(\"<STR_LIT>\", level=\"<STR_LIT>\")<EOL>try:<EOL><INDENT>os.remove(item)<EOL>sprint(\"<STR_LIT>\", level=\"<STR_LIT>\")<EOL><DEDENT>except:<EOL><INDENT>errmes = \"<STR_LIT>\"<EOL>error(errmes.format(item))<EOL>retcode = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>if not retcode and not recon:<EOL><INDENT>sprint(\"<STR_LIT>\", color=True)<EOL><DEDENT>return retcode<EOL>", "docstring": "Removes all the output files from all targets. Takes\nthe graph as the only argument\n\nArgs:\n    The networkx graph object\n    The settings dictionary\n\nReturns:\n    0 if successful\n    1 if removing even one file failed", "id": "f14984:m16"}
{"signature": "def parse_defines(args):", "body": "macros = {}<EOL>for arg in args:<EOL><INDENT>try:<EOL><INDENT>var, val = arg.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL><DEDENT>except ValueError:<EOL><INDENT>var = arg<EOL>val = '<STR_LIT:1>'<EOL><DEDENT>macros[var] = val<EOL><DEDENT>return macros<EOL>", "docstring": "This parses a list of define argument in the form of -DNAME=VALUE or -DNAME (\nwhich is treated as -DNAME=1).", "id": "f14984:m6"}
{"signature": "def write_dot_file(G, filename):", "body": "with io.open(filename, \"<STR_LIT:w>\") as fh:<EOL><INDENT>fh.write(\"<STR_LIT>\")<EOL>edge_list = G.edges()<EOL>node_list = set(G.nodes())<EOL>if edge_list:<EOL><INDENT>for edge in sorted(edge_list):<EOL><INDENT>source, targ = edge<EOL>node_list = node_list - set(source)<EOL>node_list = node_list - set(targ)<EOL>line = '<STR_LIT>'<EOL>fh.write(line.format(source, targ))<EOL><DEDENT><DEDENT>if node_list:<EOL><INDENT>for node in sorted(node_list):<EOL><INDENT>line = '<STR_LIT>'.format(node)<EOL>fh.write(line)<EOL><DEDENT><DEDENT>fh.write(\"<STR_LIT:}>\")<EOL><DEDENT>", "docstring": "Writes the graph G in dot file format for graphviz visualization.\n\nArgs:\n    a Networkx graph\n    A filename to name the dot files", "id": "f14984:m17"}
{"signature": "def clean_path(a_path, force_os=None, force_start=None):", "body": "if not force_start:<EOL><INDENT>force_start = os.curdir<EOL><DEDENT>if force_os == \"<STR_LIT>\":<EOL><INDENT>import ntpath<EOL>return ntpath.relpath(ntpath.normpath(a_path),<EOL>start=force_start)<EOL><DEDENT>if force_os == \"<STR_LIT>\":<EOL><INDENT>import posixpath<EOL>return posixpath.relpath(posixpath.normpath(a_path),<EOL>start=force_start)<EOL><DEDENT>return os.path.relpath(os.path.normpath(a_path),<EOL>start=force_start)<EOL>", "docstring": "This function is used to normalize the path (of an output or\ndependency) and also provide the path in relative form. It is\nrelative to the current working directory", "id": "f14984:m3"}
{"signature": "def get_help(sakefile):", "body": "full_string = \"<STR_LIT>\"<EOL>errmes = \"<STR_LIT>\"<EOL>outerlines = []<EOL>for target in sakefile:<EOL><INDENT>if target == \"<STR_LIT:all>\":<EOL><INDENT>continue<EOL><DEDENT>middle_lines = []<EOL>if \"<STR_LIT>\" not in sakefile[target]:<EOL><INDENT>innerstr = \"<STR_LIT>\".format(escp(target),<EOL>sakefile[target][\"<STR_LIT>\"])<EOL>inner = []<EOL>for atom_target in sakefile[target]:<EOL><INDENT>if atom_target == \"<STR_LIT>\":<EOL><INDENT>continue<EOL><DEDENT>inner.append(\"<STR_LIT>\".format(escp(atom_target),<EOL>sakefile[target][atom_target][\"<STR_LIT>\"]))<EOL><DEDENT>if inner:<EOL><INDENT>innerstr += '<STR_LIT:\\n>'.join(sorted(inner))<EOL><DEDENT>middle_lines.append(innerstr)<EOL><DEDENT>else:<EOL><INDENT>middle_lines.append(\"<STR_LIT>\".format(escp(target),<EOL>sakefile[target][\"<STR_LIT>\"]))<EOL><DEDENT>if middle_lines:<EOL><INDENT>outerlines.append('<STR_LIT:\\n>'.join(sorted(middle_lines)))<EOL><DEDENT><DEDENT>if outerlines:<EOL><INDENT>full_string += '<STR_LIT:\\n>'.join(sorted(outerlines))<EOL><DEDENT>what_clean_does = \"<STR_LIT>\"<EOL>full_string += \"<STR_LIT>\".format(what_clean_does)<EOL>what_visual_does = \"<STR_LIT>\"<EOL>full_string += \"<STR_LIT>\".format(what_visual_does)<EOL>full_string = re.sub(\"<STR_LIT>\", \"<STR_LIT>\", full_string)<EOL>return full_string<EOL>", "docstring": "Returns the prettily formatted help strings (for printing)\n\nArgs:\n    A dictionary that is the parsed Sakefile (from sake.py)\n\nNOTE:\n    the list sorting in this function is required for this\n    function to be deterministic", "id": "f14984:m5"}
{"signature": "def expand_macros(raw_text, macros):", "body": "includes = {}<EOL>result = []<EOL>pattern = re.compile(\"<STR_LIT>\", re.UNICODE)<EOL>ipattern = re.compile(\"<STR_LIT>\", re.UNICODE)<EOL>for line in raw_text.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>line = string.Template(line).safe_substitute(macros)<EOL>result.append(line)<EOL>if line.startswith(\"<STR_LIT>\"):<EOL><INDENT>match = pattern.match(line)<EOL>try:<EOL><INDENT>var, opt, val, or_ = match.group(<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:4>)<EOL><DEDENT>except:<EOL><INDENT>raise InvalidMacroError(\"<STR_LIT>\".format(line))<EOL><DEDENT>if or_:<EOL><INDENT>if var not in macros:<EOL><INDENT>raise InvalidMacroError(\"<STR_LIT>\".format(var, or_))<EOL><DEDENT><DEDENT>elif not (opt and var in macros):<EOL><INDENT>macros[var] = val<EOL><DEDENT><DEDENT>elif line.startswith(\"<STR_LIT>\"):<EOL><INDENT>match = ipattern.match(line)<EOL>try:<EOL><INDENT>filename = match.group(<NUM_LIT:1>)<EOL><DEDENT>except:<EOL><INDENT>error(\"<STR_LIT>\".format(line))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>try:<EOL><INDENT>with io.open(filename, '<STR_LIT:r>') as f:<EOL><INDENT>includes[filename] = expand_macros(f.read(), macros)<EOL><DEDENT><DEDENT>except IOError:<EOL><INDENT>if match.group(<NUM_LIT:2>):<EOL><INDENT>if match.group(<NUM_LIT:2>).startswith('<STR_LIT>'):<EOL><INDENT>sprint(match.group(<NUM_LIT:3>))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>error(\"<STR_LIT>\".format(filename))<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return \"<STR_LIT:\\n>\".join(result), includes<EOL>", "docstring": "this gets called before the sakefile is parsed. it looks for\nmacros defined anywhere in the sakefile (the start of the line\nis '#!') and then replaces all occurences of '$variable' with the\nvalue defined in the macro. it then returns the contents of the\nfile with the macros expanded.", "id": "f14984:m7"}
{"signature": "@property<EOL><INDENT>def mapper_data(self):<DEDENT>", "body": "return self['<STR_LIT>']<EOL>", "docstring": "Returns the name of the data associated with the mapper.", "id": "f14985:c0:m6"}
{"signature": "@property<EOL><INDENT>def supported_keys(self):<DEDENT>", "body": "return self['<STR_LIT>']<EOL>", "docstring": "Returns a list with all keys which are supported by the mapper.", "id": "f14985:c0:m4"}
{"signature": "@property<EOL><INDENT>def name(self):<DEDENT>", "body": "return self._name<EOL>", "docstring": "Returns the name of the mapper.", "id": "f14985:c1:m1"}
{"signature": "def map(self, ID_s,<EOL>FROM=None,<EOL>TO=None,<EOL>target_as_set=False,<EOL>no_match_sub=None):", "body": "def io_mode(ID_s):<EOL><INDENT>'''<STR_LIT>'''<EOL>unlist_return = False<EOL>list_of_lists = False<EOL>if isinstance(ID_s, str):<EOL><INDENT>ID_s = [ID_s]<EOL>unlist_return = True<EOL><DEDENT>elif isinstance(ID_s, list):<EOL><INDENT>if len(ID_s) > <NUM_LIT:0> and isinstance(ID_s[<NUM_LIT:0>], list):<EOL><INDENT>list_of_lists = True<EOL><DEDENT><DEDENT>return ID_s, unlist_return, list_of_lists<EOL><DEDENT>if FROM == TO:<EOL><INDENT>return ID_s<EOL><DEDENT>ID_s, unlist_return, list_of_lists = io_mode(ID_s)<EOL>if list_of_lists:<EOL><INDENT>mapped_ids = [self.map(ID, FROM, TO, target_as_set, no_match_sub) for ID in ID_s]<EOL><DEDENT>else:<EOL><INDENT>mapped_ids = self._map(ID_s, FROM, TO, target_as_set, no_match_sub)<EOL><DEDENT>if unlist_return:<EOL><INDENT>return mapped_ids[<NUM_LIT:0>]<EOL><DEDENT>return Mapping(ID_s, mapped_ids)<EOL>", "docstring": "The main method of this class and the essence of the package.\nIt allows to \"map\" stuff.\n\nArgs:\n\n    ID_s: Nested lists with strings as leafs (plain strings also possible)\n    FROM (str): Origin key for the mapping (default: main key)\n    TO (str): Destination key for the mapping (default: main key)\n    target_as_set (bool): Whether to summarize the output as a set (removes duplicates)\n    no_match_sub: Object representing the status of an ID not being able to be matched\n                  (default: None)\n\nReturns:\n\n    Mapping: a mapping object capturing the result of the mapping request", "id": "f14985:c1:m6"}
{"signature": "@property<EOL><INDENT>def list_valued_keys(self):<DEDENT>", "body": "return self.get('<STR_LIT>', [])<EOL>", "docstring": "Returns a list of supported keys whose values are\nlist of strings.", "id": "f14985:c0:m13"}
{"signature": "@property<EOL><INDENT>def disjoint(self):<DEDENT>", "body": "return self.get('<STR_LIT>', [])<EOL>", "docstring": "Returns all list-values keys for which it is assured\nthat for any two values of that key the set of the\nentries of that value are disjoint (do not have an\nelement in common).", "id": "f14985:c0:m12"}
{"signature": "@property<EOL><INDENT>def name(self):<DEDENT>", "body": "return self['<STR_LIT:name>']<EOL>", "docstring": "Returns the name of the mapper.", "id": "f14985:c0:m5"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def _get_all(self, key):<DEDENT>", "body": "pass<EOL>", "docstring": "Backend-specific implementation of Mapper.get_all.", "id": "f14985:c1:m14"}
{"signature": "@property<EOL><INDENT>def definition(self):<DEDENT>", "body": "return self._definition<EOL>", "docstring": "Returns the definition of the mapper.", "id": "f14985:c1:m2"}
{"signature": "def dt_format_and_regex(fmt, no_date=False):", "body": "if fmt is None:<EOL><INDENT>return {'<STR_LIT>': None, '<STR_LIT>': None, '<STR_LIT>': None}<EOL><DEDENT>tz_marker = None<EOL>match = re.search('<STR_LIT>', fmt)<EOL>if match:<EOL><INDENT>tz_marker = match.group('<STR_LIT>')<EOL>if len(set(tz_marker.strip())) != <NUM_LIT:1>:  <EOL><INDENT>raise ValueError(fmt)<EOL><DEDENT>fmt = fmt[:match.start()]<EOL><DEDENT>date_patterns = {<EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>\"<STR_LIT>\",  <EOL>}<EOL>time_patterns = {\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"}<EOL>translate = {<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT:M>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT:d>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>'<STR_LIT>': ('<STR_LIT>', '<STR_LIT>'),<EOL>}<EOL>for dt_sep in '<STR_LIT>':  <EOL><INDENT>if dt_sep in fmt:<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>dt_sep = None<EOL><DEDENT>if dt_sep:<EOL><INDENT>dfmt, tfmt = fmt.split(dt_sep)<EOL><DEDENT>elif no_date:<EOL><INDENT>dfmt, tfmt = None, fmt<EOL><DEDENT>else:<EOL><INDENT>dfmt, tfmt = fmt, None<EOL><DEDENT>msecs = None  <EOL>if tfmt and '<STR_LIT:.>' in tfmt:  <EOL><INDENT>tfmt, msecs = tfmt.split('<STR_LIT:.>')  <EOL>if set(msecs) != {'<STR_LIT:S>'}:  <EOL><INDENT>raise ValueError(fmt)<EOL><DEDENT>msecs = len(msecs)   <EOL><DEDENT>if (dfmt and dfmt not in date_patterns) or (tfmt and tfmt not in time_patterns):<EOL><INDENT>raise ValueError(fmt)<EOL><DEDENT>regex, format = '<STR_LIT>', '<STR_LIT>'  <EOL>if dfmt:<EOL><INDENT>for d_sep in '<STR_LIT>':  <EOL><INDENT>if d_sep in dfmt:<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')  <EOL><DEDENT>for i, part in enumerate(dfmt.split(d_sep)):<EOL><INDENT>if i > <NUM_LIT:0>:<EOL><INDENT>format += d_sep<EOL>regex += re.escape(d_sep)<EOL><DEDENT>f, r = translate[part]<EOL>format += f<EOL>regex += r<EOL><DEDENT><DEDENT>if dt_sep:<EOL><INDENT>format += dt_sep<EOL>regex += re.escape(dt_sep)<EOL><DEDENT>if tfmt:<EOL><INDENT>for i, part in enumerate(tfmt.split('<STR_LIT::>')):<EOL><INDENT>if i > <NUM_LIT:0>:<EOL><INDENT>format += '<STR_LIT::>'<EOL>regex += re.escape('<STR_LIT::>')<EOL><DEDENT>f, r = translate[part]<EOL>format += f<EOL>regex += r<EOL><DEDENT><DEDENT>if msecs:<EOL><INDENT>format += '<STR_LIT>' % msecs<EOL>regex += '<STR_LIT>' % msecs<EOL><DEDENT>return {'<STR_LIT>': re.compile(regex), '<STR_LIT>': format, '<STR_LIT>': tz_marker}<EOL>", "docstring": ".. seealso:: http://w3c.github.io/csvw/syntax/#formats-for-dates-and-times", "id": "f14999:m1"}
{"signature": "def write(self, _force=False, _exists_ok=False, **items):", "body": "if self.fname and self.fname.exists():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>with self.connection() as db:<EOL><INDENT>for table in self.tables:<EOL><INDENT>db.execute(table.sql(translate=self.translate))<EOL><DEDENT>db.execute('<STR_LIT>')<EOL>db.commit()<EOL>refs = defaultdict(list)  <EOL>for t in self.tables:<EOL><INDENT>if t.name not in items:<EOL><INDENT>continue<EOL><DEDENT>rows, keys = [], []<EOL>cols = {c.name: c for c in t.columns}<EOL>for i, row in enumerate(items[t.name]):<EOL><INDENT>pk = row[t.primary_key[<NUM_LIT:0>]]if t.primary_key and len(t.primary_key) == <NUM_LIT:1> else None<EOL>values = []<EOL>for k, v in row.items():<EOL><INDENT>if k in t.many_to_many:<EOL><INDENT>assert pk<EOL>at = t.many_to_many[k]<EOL>atkey = tuple([at.name] + [c.name for c in at.columns])<EOL>for vv in v:<EOL><INDENT>fkey, context = self.association_table_context(t, k, vv)<EOL>refs[atkey].append((pk, fkey, context))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>col = cols[k]<EOL>if isinstance(v, list):<EOL><INDENT>v = (col.separator or '<STR_LIT:;>').join(<EOL>col.convert(vv) for vv in v)<EOL><DEDENT>else:<EOL><INDENT>v = col.convert(v) if v is not None else None<EOL><DEDENT>if i == <NUM_LIT:0>:<EOL><INDENT>keys.append(col.name)<EOL><DEDENT>values.append(v)<EOL><DEDENT><DEDENT>rows.append(tuple(values))<EOL><DEDENT>insert(db, self.translate, t.name, keys, *rows)<EOL><DEDENT>for atkey, rows in refs.items():<EOL><INDENT>insert(db, self.translate, atkey[<NUM_LIT:0>], atkey[<NUM_LIT:1>:], *rows)<EOL><DEDENT>db.commit()<EOL><DEDENT>", "docstring": "Creates a db file with the core schema.\n\n:param force: If `True` an existing db file will be overwritten.", "id": "f15001:c2:m9"}
{"signature": "def sql(self, translate):", "body": "col_translate = partial(translate, self.name)<EOL>clauses = [col.sql(col_translate) for col in self.columns]<EOL>if self.primary_key:<EOL><INDENT>clauses.append('<STR_LIT>'.format(quoted(<EOL>*[col_translate(c) for c in self.primary_key])))<EOL><DEDENT>for fk, ref, refcols in self.foreign_keys:<EOL><INDENT>clauses.append('<STR_LIT>'.format(<EOL>quoted(*[col_translate(c) for c in fk]),<EOL>quoted(translate(ref)),<EOL>quoted(*[translate(ref, c) for c in refcols])))<EOL><DEDENT>return \"<STR_LIT>\".format(<EOL>translate(self.name), '<STR_LIT>'.join(clauses))<EOL>", "docstring": ":param translate:\n:return: The SQL statement to create the table.", "id": "f15001:c1:m2"}
{"signature": "def schema(tg):", "body": "tables = {}<EOL>for tname, table in tg.tabledict.items():<EOL><INDENT>t = TableSpec.from_table_metadata(table)<EOL>tables[t.name] = t<EOL>for at in t.many_to_many.values():<EOL><INDENT>tables[at.name] = at<EOL><DEDENT><DEDENT>ordered = OrderedDict()<EOL>i = <NUM_LIT:0><EOL>while tables and i < <NUM_LIT:100>:<EOL><INDENT>i += <NUM_LIT:1><EOL>for table in list(tables.keys()):<EOL><INDENT>if all((ref[<NUM_LIT:1>] in ordered) or ref[<NUM_LIT:1>] == table for ref in tables[table].foreign_keys):<EOL><INDENT>ordered[table] = tables.pop(table)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>if tables:  <EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return list(ordered.values())<EOL>", "docstring": "Convert the table and column descriptions of a `TableGroup` into specifications for the\nDB schema.\n\n:param ds:\n:return: A pair (tables, reference_tables).", "id": "f15001:m4"}
{"signature": "def check(self, translate):", "body": "if not self.csvw:<EOL><INDENT>return<EOL><DEDENT>c, cname = self.csvw, translate(self.name)<EOL>constraints = []<EOL>if (c.minimum is not None) or (c.maximum is not None):<EOL><INDENT>func = {<EOL>'<STR_LIT:date>': '<STR_LIT:date>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}.get(self.csvw_type)<EOL>if c.minimum is not None:<EOL><INDENT>if func:<EOL><INDENT>constraints.append(\"<STR_LIT>\".format(cname, c.minimum, func))<EOL><DEDENT>else:<EOL><INDENT>constraints.append('<STR_LIT>'.format(cname, c.minimum))<EOL><DEDENT><DEDENT>if c.maximum is not None:<EOL><INDENT>if func:<EOL><INDENT>constraints.append(\"<STR_LIT>\".format(cname, c.maximum, func))<EOL><DEDENT>else:<EOL><INDENT>constraints.append('<STR_LIT>'.format(cname, c.maximum))<EOL><DEDENT><DEDENT><DEDENT>elif any(cc is not None for cc in [c.length, c.minLength, c.maxLength]):<EOL><INDENT>if c.length:<EOL><INDENT>constraints.append('<STR_LIT>'.format(cname, c.length))<EOL><DEDENT>if c.minLength:<EOL><INDENT>constraints.append('<STR_LIT>'.format(cname, c.minLength))<EOL><DEDENT>if c.maxLength:<EOL><INDENT>constraints.append('<STR_LIT>'.format(cname, c.maxLength))<EOL><DEDENT><DEDENT>return '<STR_LIT>'.join(constraints)<EOL>", "docstring": "We try to convert as many data constraints as possible into SQLite CHECK constraints.\n\n:param translate:\n:return:", "id": "f15001:c0:m1"}
{"signature": "def filter_rows_as_dict(fname, filter_, **kw):", "body": "filter_ = DictFilter(filter_)<EOL>rewrite(fname, filter_, **kw)<EOL>return filter_.removed<EOL>", "docstring": "Rewrite a dsv file, filtering the rows.\n\n    :param fname: Path to dsv file\n    :param filter_: callable which accepts a `dict` with a row's data as single argument\\\n    returning a `Boolean` indicating whether to keep the row (`True`) or to discard it \\\n    `False`.\n    :param kw: Keyword arguments to be passed `UnicodeReader` and `UnicodeWriter`.\n    :return: The number of rows that have been removed.", "id": "f15002:m4"}
{"signature": "def __next__(self):", "body": "<EOL>row = super(UnicodeReaderWithLineNumber, self).__next__()<EOL>return self.lineno + <NUM_LIT:1>, row<EOL>", "docstring": ":return: a pair (1-based line number in the input, row)", "id": "f15002:c2:m0"}
{"signature": "@property<EOL><INDENT>def base(self):<DEDENT>", "body": "return self._fname.parent<EOL>", "docstring": "We only support data in the filesystem, thus we make sure `base` is a `pathlib.Path`.", "id": "f15003:c12:m7"}
{"signature": "def read(self):", "body": "return {tname: list(t.iterdicts()) for tname, t in self.tabledict.items()}<EOL>", "docstring": "Read all data of a TableGroup", "id": "f15003:c12:m3"}
{"signature": "@classmethod<EOL><INDENT>def fromvalue(cls, v):<DEDENT>", "body": "if isinstance(v, text_type):<EOL><INDENT>return cls(base=v)<EOL><DEDENT>if isinstance(v, dict):<EOL><INDENT>return cls(**DescriptionBase.partition_properties(v))<EOL><DEDENT>if isinstance(v, cls):<EOL><INDENT>return v<EOL><DEDENT>raise ValueError(v)<EOL>", "docstring": ":param v: Initialization data for `cls`; either a single string that is the main datatype \\\nof the values of the cell or a datatype description object, i.e. a `dict` or a `cls`\ninstance.\n:return: An instance of `cls`", "id": "f15003:c4:m0"}
{"signature": "def copy(self, dest):", "body": "dest = pathlib.Path(dest)<EOL>for table in self.tables:<EOL><INDENT>shutil.copy(str(table.url.resolve(self.base)), str(table.url.resolve(dest)))<EOL><DEDENT>self._fname = dest / self._fname.name<EOL>self.to_file(self._fname)<EOL>", "docstring": "Write a TableGroup's data and metadata to files relative to `dest`, adapting the `base`\nattribute.\n\n:param dest:\n:return:", "id": "f15003:c12:m5"}
{"signature": "def slug(s, remove_whitespace=True, lowercase=True):", "body": "res = '<STR_LIT>'.join(c for c in unicodedata.normalize('<STR_LIT>', s)<EOL>if unicodedata.category(c) != '<STR_LIT>')<EOL>if lowercase:<EOL><INDENT>res = res.lower()<EOL><DEDENT>for c in string.punctuation:<EOL><INDENT>res = res.replace(c, '<STR_LIT>')<EOL><DEDENT>res = re.sub('<STR_LIT>', '<STR_LIT>' if remove_whitespace else '<STR_LIT:U+0020>', res)<EOL>res = res.encode('<STR_LIT:ascii>', '<STR_LIT:ignore>').decode('<STR_LIT:ascii>')<EOL>assert re.match('<STR_LIT>', res)<EOL>return res<EOL>", "docstring": "Condensed version of s, containing only lowercase alphanumeric characters.\n\n    >>> str(slug('A B. \\u00e4C'))\n    'abac'", "id": "f15004:m4"}
{"signature": "def normalize_name(s):", "body": "s = s.replace('<STR_LIT:->', '<STR_LIT:_>').replace('<STR_LIT:.>', '<STR_LIT:_>').replace('<STR_LIT:U+0020>', '<STR_LIT:_>')<EOL>if s in keyword.kwlist:<EOL><INDENT>return s + '<STR_LIT:_>'<EOL><DEDENT>s = '<STR_LIT:_>'.join(slug(ss, lowercase=False) for ss in s.split('<STR_LIT:_>'))<EOL>if not s:<EOL><INDENT>s = '<STR_LIT:_>'<EOL><DEDENT>if s[<NUM_LIT:0>] not in string.ascii_letters + '<STR_LIT:_>':<EOL><INDENT>s = '<STR_LIT:_>' + s<EOL><DEDENT>return s<EOL>", "docstring": "Convert a string into a valid python attribute name.\n    This function is called to convert ASCII strings to something that can pass as\n    python attribute name, to be used with namedtuples.\n\n    >>> str(normalize_name('class'))\n    'class_'\n    >>> str(normalize_name('a-name'))\n    'a_name'\n    >>> str(normalize_name('a n\\u00e4me'))\n    'a_name'\n    >>> str(normalize_name('Name'))\n    'Name'\n    >>> str(normalize_name(''))\n    '_'\n    >>> str(normalize_name('1'))\n    '_1'", "id": "f15004:m3"}
{"signature": "def to_zebra_params(params):", "body": "def to_zebra_value(value):<EOL><INDENT>transform_funcs = {<EOL>bool: lambda v: '<STR_LIT:true>' if v else '<STR_LIT:false>',<EOL>}<EOL>return transform_funcs.get(type(value), lambda v: v)(value)<EOL><DEDENT>return {param: to_zebra_value(value) for param, value in params.items()}<EOL>", "docstring": "Transforms the given `params` dict to values that are understood by Zebra (eg. False is represented as 'false')", "id": "f15010:m2"}
{"signature": "def show_response_messages(response_json):", "body": "message_type_kwargs = {<EOL>'<STR_LIT>': {'<STR_LIT>': '<STR_LIT>'},<EOL>'<STR_LIT:error>': {'<STR_LIT>': '<STR_LIT>'},<EOL>}<EOL>for message in response_json.get('<STR_LIT>', []):<EOL><INDENT>click.secho(message['<STR_LIT:text>'], **message_type_kwargs.get(message['<STR_LIT:type>'], {}))<EOL><DEDENT>", "docstring": "Show all messages in the `messages` key of the given dict.", "id": "f15010:m3"}
{"signature": "def build_message(self, data):", "body": "if not data:<EOL><INDENT>return None<EOL><DEDENT>return Message(<EOL>id=data['<STR_LIT:message>']['<STR_LIT>'],<EOL>platform=self.platform,<EOL>text=data['<STR_LIT:message>']['<STR_LIT:text>'],<EOL>user=data['<STR_LIT>']['<STR_LIT:id>'],<EOL>timestamp=data['<STR_LIT>'],<EOL>raw=data,<EOL>chat=None,  <EOL>)<EOL>", "docstring": "Return a Message instance according to the data received from\nFacebook Messenger API.", "id": "f15022:c0:m4"}
{"signature": "async def message_handler(self, data):", "body": "message = self.build_message(data)<EOL>if not message:<EOL><INDENT>logger.error(<EOL>'<STR_LIT>',<EOL>self.engine_name,<EOL>data<EOL>)<EOL>return<EOL><DEDENT>logger.info('<STR_LIT>', self.engine_name,<EOL>message.user, message.text)<EOL>response = await self.get_response(message)<EOL>if response:<EOL><INDENT>await self.send_response(response)<EOL><DEDENT>", "docstring": "For each new message, build its platform specific message\nobject and get a response.", "id": "f15026:c0:m9"}
{"signature": "def discovery_view(self, message):", "body": "for handler in self.registered_handlers:<EOL><INDENT>if handler.check(message):<EOL><INDENT>return handler.view<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Use the new message to search for a registered view according\nto its pattern.", "id": "f15026:c0:m8"}
{"signature": "def unicode_range():", "body": "for x in range(<NUM_LIT:1>, <NUM_LIT>): yield x<EOL>", "docstring": "internal: all reasonable Unicode range", "id": "f15048:m3"}
{"signature": "def validate_pi_text(text):", "body": "if __PI_TEXT_CHECK_PATTERN.search(text):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "validates XML Processing Instruction text", "id": "f15051:m2"}
{"signature": "def validate_comment_text(text):", "body": "if __COMMENT_TEXT_CHECK_PATTERN.search(text):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "validates XML comment text", "id": "f15051:m3"}
{"signature": "def parse(filename):", "body": "for event, elt in et.iterparse(filename, events= ('<STR_LIT:start>', '<STR_LIT:end>', '<STR_LIT>', '<STR_LIT>'), huge_tree=True):<EOL><INDENT>if event == '<STR_LIT:start>':<EOL><INDENT>obj = _elt2obj(elt)<EOL>obj['<STR_LIT:type>'] = ENTER<EOL>yield obj<EOL>if elt.text:<EOL><INDENT>yield {'<STR_LIT:type>': TEXT, '<STR_LIT:text>': elt.text}<EOL><DEDENT><DEDENT>elif event == '<STR_LIT:end>':<EOL><INDENT>yield {'<STR_LIT:type>': EXIT}<EOL>if elt.tail:<EOL><INDENT>yield {'<STR_LIT:type>': TEXT, '<STR_LIT:text>': elt.tail}<EOL><DEDENT>elt.clear()<EOL><DEDENT>elif event == '<STR_LIT>':<EOL><INDENT>yield {'<STR_LIT:type>': COMMENT, '<STR_LIT:text>': elt.text}<EOL><DEDENT>elif event == '<STR_LIT>':<EOL><INDENT>yield {'<STR_LIT:type>': PI, '<STR_LIT:text>': elt.text}<EOL><DEDENT>else:<EOL><INDENT>assert False, (event, elt)<EOL><DEDENT><DEDENT>", "docstring": "Parses file content into events stream", "id": "f15052:m4"}
{"signature": "def scan(xml):", "body": "if xml.tag is et.Comment:<EOL><INDENT>yield {'<STR_LIT:type>': COMMENT, '<STR_LIT:text>': xml.text}<EOL>return<EOL><DEDENT>if xml.tag is et.PI:<EOL><INDENT>if xml.text:<EOL><INDENT>yield {'<STR_LIT:type>': PI, '<STR_LIT:target>': xml.target, '<STR_LIT:text>': xml.text}<EOL><DEDENT>else:<EOL><INDENT>yield {'<STR_LIT:type>': PI, '<STR_LIT:target>': xml.target}<EOL><DEDENT>return<EOL><DEDENT>obj = _elt2obj(xml)<EOL>obj['<STR_LIT:type>'] = ENTER<EOL>yield obj<EOL>assert type(xml.tag) is str, xml<EOL>if xml.text:<EOL><INDENT>yield {'<STR_LIT:type>': TEXT, '<STR_LIT:text>': xml.text}<EOL><DEDENT>for c in xml:<EOL><INDENT>for x in scan(c): yield x<EOL>if c.tail:<EOL><INDENT>yield {'<STR_LIT:type>': TEXT, '<STR_LIT:text>': c.tail}<EOL><DEDENT><DEDENT>yield {'<STR_LIT:type>': EXIT}<EOL>", "docstring": "Converts XML tree to event generator", "id": "f15052:m2"}
{"signature": "def text_of(events):", "body": "return '<STR_LIT>'.join(o['<STR_LIT:text>'] for o in events if o['<STR_LIT:type>']==TEXT)<EOL>", "docstring": "extracts text content from event stream", "id": "f15052:m8"}
{"signature": "def with_peer(events):", "body": "stack = []<EOL>for obj in events:<EOL><INDENT>if obj['<STR_LIT:type>'] == ENTER:<EOL><INDENT>stack.append(obj)<EOL>yield obj, None<EOL><DEDENT>elif obj['<STR_LIT:type>'] == EXIT:<EOL><INDENT>yield obj, stack.pop()<EOL><DEDENT>else:<EOL><INDENT>yield obj, None<EOL><DEDENT><DEDENT>", "docstring": "locates ENTER peer for each EXIT object. Convenient when selectively\n    filtering out XML markup", "id": "f15052:m7"}
{"signature": "def get_n_splits(self, X=None, y=None, groups=None):", "body": "return self.n_splits<EOL>", "docstring": "Returns the number of splitting iterations in the cross-validator\n        Parameters\n        ----------\n        X : object\n            Always ignored, exists for compatibility.\n            ``np.zeros(n_samples)`` may be used as a placeholder.\n        y : object\n            Always ignored, exists for compatibility.\n            ``np.zeros(n_samples)`` may be used as a placeholder.\n        groups : array-like, with shape (n_samples,)\n            Group labels for the samples used while splitting the dataset into\n            train/test set.\n        Returns\n        -------\n        n_splits : int\n            Returns the number of splitting iterations in the cross-validator.", "id": "f15062:c0:m1"}
{"signature": "def split(self, X, y=None, groups=None):", "body": "X, y, groups = indexable(X, y, groups)<EOL>cgrs = [~r for r in X]<EOL>condition_structure = defaultdict(set)<EOL>for structure, condition in zip(cgrs, groups):<EOL><INDENT>condition_structure[condition].add(structure)<EOL><DEDENT>train_data = defaultdict(list)<EOL>test_data = []<EOL>for n, (structure, condition) in enumerate(zip(cgrs, groups)):<EOL><INDENT>train_data[structure].append(n)<EOL>if len(condition_structure[condition]) > <NUM_LIT:1>:<EOL><INDENT>test_data.append(n)<EOL><DEDENT><DEDENT>if self.n_splits > len(train_data):<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% (self.n_splits, len(train_data)))<EOL><DEDENT>structures_weight = sorted(((x, len(y)) for x, y in train_data.items()), key=lambda x: x[<NUM_LIT:1>], reverse=True)<EOL>fold_mean_size = len(cgrs) // self.n_splits<EOL>if structures_weight[<NUM_LIT:0>][<NUM_LIT:1>] > fold_mean_size:<EOL><INDENT>warning('<STR_LIT>')<EOL><DEDENT>for idx in range(self.n_repeats):<EOL><INDENT>train_folds = [[] for _ in range(self.n_splits)]<EOL>for structure, structure_length in structures_weight:<EOL><INDENT>if self.shuffle:<EOL><INDENT>check_random_state(self.random_state).shuffle(train_folds)<EOL><DEDENT>for fold in train_folds[:-<NUM_LIT:1>]:<EOL><INDENT>if len(fold) + structure_length <= fold_mean_size:<EOL><INDENT>fold.extend(train_data[structure])<EOL>break<EOL><DEDENT>else:<EOL><INDENT>roulette_param = (structure_length - fold_mean_size + len(fold)) / structure_length<EOL>if random() > roulette_param:<EOL><INDENT>fold.extend(train_data[structure])<EOL>break<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>train_folds[-<NUM_LIT:1>].extend(train_data[structure])<EOL><DEDENT><DEDENT>test_folds = [[] for _ in range(self.n_splits)]<EOL>for test, train in zip(test_folds, train_folds):<EOL><INDENT>for index in train:<EOL><INDENT>if index in test_data:<EOL><INDENT>test.append(index)<EOL><DEDENT><DEDENT><DEDENT>for i in range(self.n_splits):<EOL><INDENT>train_index = []<EOL>for fold in train_folds[:i]:<EOL><INDENT>train_index.extend(fold)<EOL><DEDENT>for fold in train_folds[i+<NUM_LIT:1>:]:<EOL><INDENT>train_index.extend(fold)<EOL><DEDENT>test_index = test_folds[i]<EOL>yield array(train_index), array(test_index)<EOL><DEDENT><DEDENT>", "docstring": "Generate indices to split data into training and test set.\n        Parameters\n        ----------\n        X : array-like, of length n_samples\n            Training data, includes reaction's containers\n        y : array-like, of length n_samples\n            The target variable for supervised learning problems.\n        groups : array-like, with shape (n_samples,)\n            Group labels for the samples used while splitting the dataset into\n            train/test set.\n        Yields\n        ------\n        train : ndarray\n            The training set indices for that split.\n        test : ndarray\n            The testing set indices for that split.", "id": "f15062:c0:m2"}
{"signature": "def split(self, X, y=None, groups=None):", "body": "X, y, groups = indexable(X, y, groups)<EOL>cgrs = [~r for r in X]<EOL>structure_condition = defaultdict(set)<EOL>for structure, condition in zip(cgrs, groups):<EOL><INDENT>structure_condition[structure].add(condition)<EOL><DEDENT>train_data = defaultdict(list)<EOL>test_data = []<EOL>for n, (structure, condition) in enumerate(zip(cgrs, groups)):<EOL><INDENT>train_data[condition].append(n)<EOL>if len(structure_condition[structure]) > <NUM_LIT:1>:<EOL><INDENT>test_data.append(n)<EOL><DEDENT><DEDENT>for condition, indexes in train_data.items():<EOL><INDENT>test_index = [index for index in indexes if index in test_data]<EOL>if test_index:<EOL><INDENT>train_index = [i for cond, ind in train_data.items() if cond != condition for i in ind]<EOL>yield array(train_index), array(test_index)<EOL><DEDENT><DEDENT>", "docstring": "Generate indices to split data into training and test set.\n        Parameters\n        ----------\n        X : array-like, of length n_samples\n            Training data, includes reaction's containers\n        y : array-like, of length n_samples\n            The target variable for supervised learning problems.\n        groups : array-like, with shape (n_samples,)\n            Group labels for the samples used while splitting the dataset into\n            train/test set.\n        Yields\n        ------\n        train : ndarray\n            The training set indices for that split.\n        test : ndarray\n            The testing set indices for that split.", "id": "f15063:c0:m1"}
{"signature": "def molconvert_chemaxon(data):", "body": "if isinstance(data, Path):<EOL><INDENT>with data.open('<STR_LIT:rb>') as f:<EOL><INDENT>data = f.read()<EOL><DEDENT><DEDENT>elif isinstance(data, StringIO):<EOL><INDENT>data = data.read().encode()<EOL><DEDENT>elif isinstance(data, BytesIO):<EOL><INDENT>data = data.read()<EOL><DEDENT>elif hasattr(data, '<STR_LIT>'):  <EOL><INDENT>data = data.read()<EOL>if isinstance(data, str):<EOL><INDENT>data = data.encode()<EOL><DEDENT><DEDENT>elif isinstance(data, str):<EOL><INDENT>data = data.encode()<EOL><DEDENT>elif not isinstance(data, bytes):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>p = run(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'], input=data, stdout=PIPE)<EOL><DEDENT>except FileNotFoundError as e:<EOL><INDENT>raise ConfigurationError from e<EOL><DEDENT>if p.returncode != <NUM_LIT:0>:<EOL><INDENT>raise ConfigurationError(p.stderr.decode())<EOL><DEDENT>with BytesIO(p.stdout) as f, MRVread(f) as r:<EOL><INDENT>return iter2array(r)<EOL><DEDENT>", "docstring": "molconvert wrapper\n:param data: buffer or string or path to file\n:return: array of molecules of reactions", "id": "f15067:m0"}
{"signature": "def predict_proba(self, X):", "body": "<EOL>check_is_fitted(self, ['<STR_LIT>'])<EOL>X = check_array(X)<EOL>return self.tree.query(X)[<NUM_LIT:0>].flatten()<EOL>", "docstring": "Returns the value of the nearest neighbor from the training set.\n\n        Parameters\n        ----------\n         X : array-like or sparse matrix, shape (n_samples, n_features)\n            The input samples. Internally, it will be converted to\n            ``dtype=np.float32`` and if a sparse matrix is provided\n            to a sparse ``csr_matrix``.\n\n        Returns\n        -------\n        y : array, shape (n_samples,)", "id": "f15068:c0:m2"}
{"signature": "def predict(self, X):", "body": "<EOL>check_is_fitted(self, ['<STR_LIT>'])<EOL>X = check_array(X)<EOL>return self.tree.query(X)[<NUM_LIT:0>].flatten() <= self.threshold_value<EOL>", "docstring": "Predict if a particular sample is an outlier or not.\n\n        Parameters\n        ----------\n         X : array-like or sparse matrix, shape (n_samples, n_features)\n            The input samples. Internally, it will be converted to\n            ``dtype=np.float32`` and if a sparse matrix is provided\n            to a sparse ``csr_matrix``.\n\n        Returns\n        -------\n        y : array, shape (n_samples,)\n            For each observations, tells whether or not (True or False) it should\n            be considered as an inlier according to the fitted model.", "id": "f15068:c0:m3"}
{"signature": "def fit(self, X, y=None):", "body": "<EOL>X = check_array(X)<EOL>self.tree = BallTree(X, leaf_size=self.leaf_size, metric=self.metric)<EOL>dist_train = self.tree.query(X, k=<NUM_LIT:2>)[<NUM_LIT:0>]<EOL>if self.threshold == '<STR_LIT>':<EOL><INDENT>self.threshold_value = <NUM_LIT:0.5> * sqrt(var(dist_train[:, <NUM_LIT:1>])) + mean(dist_train[:, <NUM_LIT:1>])<EOL><DEDENT>elif self.threshold == '<STR_LIT>':<EOL><INDENT>if y is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>y = check_array(y, accept_sparse='<STR_LIT>', ensure_2d=False, dtype=None)<EOL>self.threshold_value = <NUM_LIT:0><EOL>score = <NUM_LIT:0><EOL>Y_pred, Y_true, AD = [], [], []<EOL>cv = KFold(n_splits=<NUM_LIT:5>, random_state=<NUM_LIT:1>, shuffle=True)<EOL>for train_index, test_index in cv.split(X):<EOL><INDENT>x_train = safe_indexing(X, train_index)<EOL>x_test = safe_indexing(X, test_index)<EOL>y_train = safe_indexing(y, train_index)<EOL>y_test = safe_indexing(y, test_index)<EOL>data_test = safe_indexing(dist_train[:, <NUM_LIT:1>], test_index)<EOL>if self.reg_model is None:<EOL><INDENT>reg_model = RandomForestRegressor(n_estimators=<NUM_LIT>, random_state=<NUM_LIT:1>).fit(x_train, y_train)<EOL><DEDENT>else:<EOL><INDENT>reg_model = clone(self.reg_model).fit(x_train, y_train)<EOL><DEDENT>Y_pred.append(reg_model.predict(x_test))<EOL>Y_true.append(y_test)<EOL>AD.append(data_test)<EOL><DEDENT>AD_ = unique(hstack(AD))<EOL>for z in AD_:<EOL><INDENT>AD_new = hstack(AD) <= z<EOL>if self.score == '<STR_LIT>':<EOL><INDENT>val = balanced_accuracy_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new)<EOL><DEDENT>elif self.score == '<STR_LIT>':<EOL><INDENT>val = rmse_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new)<EOL><DEDENT>if val >= score:<EOL><INDENT>score = val<EOL>self.threshold_value = z<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.threshold_value = self.threshold<EOL><DEDENT>return self<EOL>", "docstring": "Fit distance-based AD.\n\n        Parameters\n        ----------\n        X : array-like or sparse matrix, shape (n_samples, n_features)\n            The input samples. Use ``dtype=np.float32`` for maximum\n            efficiency.\n\n        Returns\n        -------\n        self : object\n            Returns self.", "id": "f15068:c0:m1"}
{"signature": "def predict(self, X):", "body": "<EOL>check_is_fitted(self, ['<STR_LIT>', '<STR_LIT>'])<EOL>X = check_array(X)<EOL>return ((X - self._x_min).min(axis=<NUM_LIT:1>) >= <NUM_LIT:0>) & ((self._x_max - X).min(axis=<NUM_LIT:1>) >= <NUM_LIT:0>)<EOL>", "docstring": "Predict if a particular sample is an outlier or not.\n\n        Parameters\n        ----------\n        X : array-like or sparse matrix, shape (n_samples, n_features)\n            The input samples. Internally, it will be converted to\n            ``dtype=np.float32`` and if a sparse matrix is provided\n            to a sparse ``csr_matrix``.\n\n        Returns\n        -------\n        is_inlier : array, shape (n_samples,)\n                   For each observations, tells whether or not (True or False) it should\n                   be considered as an inlier according to the fitted model.", "id": "f15069:c0:m2"}
{"signature": "def fit(self, X, y=None):", "body": "<EOL>X = check_array(X)<EOL>self.inverse_influence_matrix = self.__make_inverse_matrix(X)<EOL>if self.threshold == '<STR_LIT>':<EOL><INDENT>self.threshold_value = <NUM_LIT:3> * (<NUM_LIT:1> + X.shape[<NUM_LIT:1>]) / X.shape[<NUM_LIT:0>]<EOL><DEDENT>elif self.threshold == '<STR_LIT>':<EOL><INDENT>if y is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>y = check_array(y, accept_sparse='<STR_LIT>', ensure_2d=False, dtype=None)<EOL>self.threshold_value = <NUM_LIT:0><EOL>score = <NUM_LIT:0><EOL>Y_pred, Y_true, AD = [], [], []<EOL>cv = KFold(n_splits=<NUM_LIT:5>, random_state=<NUM_LIT:1>, shuffle=True)<EOL>for train_index, test_index in cv.split(X):<EOL><INDENT>x_train = safe_indexing(X, train_index)<EOL>x_test = safe_indexing(X, test_index)<EOL>y_train = safe_indexing(y, train_index)<EOL>y_test = safe_indexing(y, test_index)<EOL>if self.reg_model is None:<EOL><INDENT>reg_model = RandomForestRegressor(n_estimators=<NUM_LIT>, random_state=<NUM_LIT:1>).fit(x_train, y_train)<EOL><DEDENT>else:<EOL><INDENT>reg_model = clone(self.reg_model).fit(x_train, y_train)<EOL><DEDENT>Y_pred.append(reg_model.predict(x_test))<EOL>Y_true.append(y_test)<EOL>ad_model = self.__make_inverse_matrix(x_train)<EOL>AD.append(self.__find_leverages(x_test, ad_model))<EOL><DEDENT>AD_ = unique(hstack(AD))<EOL>for z in AD_:<EOL><INDENT>AD_new = hstack(AD) <= z<EOL>if self.score == '<STR_LIT>':<EOL><INDENT>val = balanced_accuracy_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new)<EOL><DEDENT>elif self.score == '<STR_LIT>':<EOL><INDENT>val = rmse_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new)<EOL><DEDENT>if val >= score:<EOL><INDENT>score = val<EOL>self.threshold_value = z<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.threshold_value = self.threshold<EOL><DEDENT>return self<EOL>", "docstring": "Learning is to find the inverse matrix for X and calculate the threshold.\n\n        Parameters\n        ----------\n        X : array-like or sparse matrix, shape (n_samples, n_features)\n            The input samples. Use ``dtype=np.float32`` for maximum\n            efficiency.\n        y : array-like, shape = [n_samples] or [n_samples, n_outputs]\n            The target values (real numbers in regression).\n\n        Returns\n        -------\n        self : object", "id": "f15071:c0:m3"}
{"signature": "def predict(self, X):", "body": "check_is_fitted(self, ['<STR_LIT>'])<EOL>X = iter2array(X, dtype=ReactionContainer)<EOL>return array([self.__get_signature(x) in self._train_signatures for x in X])<EOL>", "docstring": "Reaction is considered belonging to model\u2019s AD\n        if its reaction signature coincides with ones used in training set.\n\n        Parameters\n        ----------\n        X : after read rdf file\n\n        Returns\n        -------\n        self : array contains True (reaction in AD) and False (reaction residing outside AD).", "id": "f15072:c0:m3"}
{"signature": "def get_feature_names(self):", "body": "return ['<STR_LIT>', '<STR_LIT>'] + [f'<STR_LIT>' for x in range(<NUM_LIT:1>, self.max_solvents + <NUM_LIT:1>)] +[f'<STR_LIT>' for x in range(<NUM_LIT:1>, self.max_solvents + <NUM_LIT:1>)]<EOL>", "docstring": "Get feature names.\n\n        Returns\n        -------\n        feature_names : list of strings\n            Names of the features produced by transform.", "id": "f15074:c2:m1"}
{"signature": "def __init__(self, templates, balance_groups=False):", "body": "self.templates = templates<EOL>self.balance_groups = balance_groups<EOL>self.__init()<EOL>", "docstring": "CGR standardization and reaction balancing\n\n:param templates: CGRTemplates. list of rules for graph modifications.\n:param balance_groups: if True: for unbalanced reactions contains multiple attached functional groups in\n    products and one of them described in reagents - will be restored information about all equal groups.\n    for example:\n\n        R + B1-X-> B'1-R'-B'2 + X'\n\n    where B' is transformed B, R and X same.\n    we know what B'1 and B'2 is equal and B'1 is transformed B1 =>\n    this groups most likely appeared from a single reagent. we can add copy of B-X to reagents.\n    results will be:\n\n        R + B1-X1 + B2-X2 -> B'1-R'-B'2 + X'1 + X'2", "id": "f15077:c0:m0"}
{"signature": "def __init__(self, fragment_type=<NUM_LIT:3>, min_length=<NUM_LIT:2>, max_length=<NUM_LIT:10>, cgr_dynbonds=<NUM_LIT:0>, doallways=False,<EOL>useformalcharge=False, header=None, workpath='<STR_LIT:.>', version=None, verbose=False, remove_rare_ratio=<NUM_LIT:0>,<EOL>return_domain=False):", "body": "self.fragment_type = fragment_type<EOL>self.min_length = min_length<EOL>self.max_length = max_length<EOL>self.cgr_dynbonds = cgr_dynbonds<EOL>self.doallways = doallways<EOL>self.useformalcharge = useformalcharge<EOL>self.version = version<EOL>self.verbose = verbose<EOL>self.header = header<EOL>self.remove_rare_ratio = remove_rare_ratio<EOL>self.return_domain = return_domain<EOL>self.__init_header()<EOL>self.set_work_path(workpath)<EOL>", "docstring": "ISIDA Fragmentor wrapper\n\n:param workpath: path for temp files.\n:param version: fragmentor version. need for selecting Fragmentor executables named as fragmentor-{version}\n:param header: if None descriptors will be generated on train set\n               if False Fragmentor will work in headless mode. in this mod fit unusable and Fragmentor return\n                   all found descriptors\n               else path string to existing header file acceptable\n:param remove_rare_ratio: if descriptors found on train less then given ratio it will be removed from header.\n                          if partial fit used, be sure to use finalize method.\n                          unusable if headless mode set\n:param return_domain: add AD bool column. if False molecule has new features", "id": "f15080:c0:m0"}
{"signature": "def fit(self, x, y=None):", "body": "x = iter2array(x, dtype=(MoleculeContainer, CGRContainer))<EOL>if self.__head_less:<EOL><INDENT>warn(f'<STR_LIT>')<EOL>return self<EOL><DEDENT>self._reset()<EOL>self.__prepare(x)<EOL>return self<EOL>", "docstring": "Compute the header.", "id": "f15080:c0:m10"}
{"signature": "def finalize(self):", "body": "if self.__head_less:<EOL><INDENT>warn(f'<STR_LIT>')<EOL><DEDENT>elif not self.__head_generate:<EOL><INDENT>warn(f'<STR_LIT>')<EOL><DEDENT>elif not self.__head_dict:<EOL><INDENT>raise NotFittedError(f'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>if self.remove_rare_ratio:<EOL><INDENT>self.__clean_head(*self.__head_rare)<EOL>self.__prepare_header()<EOL>self.__head_rare = None<EOL><DEDENT>self.__head_generate = False<EOL><DEDENT>", "docstring": "finalize partial fitting procedure", "id": "f15080:c0:m7"}
{"signature": "def get_feature_names(self):", "body": "if self.__head_less:<EOL><INDENT>raise AttributeError(f'<STR_LIT>')<EOL><DEDENT>elif not self.__head_dict:<EOL><INDENT>raise NotFittedError(f'<STR_LIT>')<EOL><DEDENT>return list(self.__head_dict.values())<EOL>", "docstring": "Get feature names.\n\n        Returns\n        -------\n        feature_names : list of strings\n            Names of the features produced by transform.", "id": "f15080:c0:m9"}
{"signature": "def _reset(self):", "body": "if not self.__head_less:<EOL><INDENT>if not self.__head_generate:<EOL><INDENT>self.__head_generate = True<EOL><DEDENT>if self.__head_dict:<EOL><INDENT>self.__head_dump = self.__head_dict = None<EOL><DEDENT>if self.__head_rare is not None:<EOL><INDENT>self.__head_rare = None<EOL><DEDENT>self.delete_work_path()<EOL><DEDENT>", "docstring": "Reset internal data-dependent state.\n        __init__ parameters are not touched.", "id": "f15080:c0:m8"}
{"signature": "@staticmethod<EOL><INDENT>def __parser(expression):<DEDENT>", "body": "expr_stack = []<EOL>def push_first(strg, loc, toks):<EOL><INDENT>expr_stack.append(toks[<NUM_LIT:0>])<EOL><DEDENT>def push_u_minus(strg, loc, toks):<EOL><INDENT>if toks and toks[<NUM_LIT:0>] == '<STR_LIT:->':<EOL><INDENT>expr_stack.append('<STR_LIT>')<EOL><DEDENT><DEDENT>point = Literal('<STR_LIT:.>')<EOL>_e = CaselessLiteral('<STR_LIT:E>')<EOL>fnumber = Combine(Word('<STR_LIT>' + nums, nums) +<EOL>Optional(point + Optional(Word(nums))) +<EOL>Optional(_e + Word('<STR_LIT>' + nums, nums)))<EOL>ident = Word(alphas, alphas + nums + '<STR_LIT>')<EOL>plus = Literal(\"<STR_LIT:+>\")<EOL>minus = Literal(\"<STR_LIT:->\")<EOL>mult = Literal(\"<STR_LIT:*>\")<EOL>div = Literal(\"<STR_LIT:/>\")<EOL>lpar = Literal(\"<STR_LIT:(>\").suppress()<EOL>rpar = Literal(\"<STR_LIT:)>\").suppress()<EOL>addop = plus | minus<EOL>multop = mult | div<EOL>expop = Literal(\"<STR_LIT>\")<EOL>_pi = CaselessLiteral(\"<STR_LIT>\")<EOL>x = CaselessLiteral(\"<STR_LIT:X>\")<EOL>expr = Forward()<EOL>atom = (Optional(\"<STR_LIT:->\") + (x | _pi | _e | fnumber | ident + lpar + expr + rpar).setParseAction(push_first) |<EOL>(lpar + expr.suppress() + rpar)).setParseAction(push_u_minus)<EOL>factor = Forward()<EOL>factor << atom + ZeroOrMore((expop + factor).setParseAction(push_first))<EOL>term = factor + ZeroOrMore((multop + factor).setParseAction(push_first))<EOL>expr << term + ZeroOrMore((addop + term).setParseAction(push_first))<EOL>expr.parseString(expression)<EOL>return expr_stack<EOL>", "docstring": "adopted from Paul McGuire example. http://pyparsing.wikispaces.com/file/view/fourFn.py", "id": "f15081:c1:m2"}
{"signature": "def get_feature_names(self):", "body": "return [f'<STR_LIT>']<EOL>", "docstring": "Get feature names.\n\n        Returns\n        -------\n        feature_names : list of strings\n            Names of the features produced by transform.", "id": "f15081:c0:m1"}
{"signature": "def execute(helper, config, args):", "body": "pass<EOL>", "docstring": "empty command to allow help messages to work", "id": "f15086:m1"}
{"signature": "def execute(helper, config, args):", "body": "env_config = parse_env_config(config, args.environment)<EOL>helper.rebuild_environment(args.environment)<EOL>if not args.dont_wait:<EOL><INDENT>helper.wait_for_environments(args.environment, health='<STR_LIT>', status='<STR_LIT>')<EOL><DEDENT>", "docstring": "Rebuilds an environment", "id": "f15087:m1"}
{"signature": "def execute(helper, config, args):", "body": "envs = config.get('<STR_LIT>', {}).get('<STR_LIT>', [])<EOL>out(\"<STR_LIT>\")<EOL>for name, conf in list(envs.items()):<EOL><INDENT>out('<STR_LIT:\\t>'+name)<EOL><DEDENT>envs = helper.get_environments()<EOL>out(\"<STR_LIT>\")<EOL>for env in envs:<EOL><INDENT>if env['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>out('<STR_LIT:\\t>'+str(env['<STR_LIT>'])+'<STR_LIT>'+str(env['<STR_LIT>'])+'<STR_LIT:U+002CU+0020>'+str(env['<STR_LIT>'])+'<STR_LIT:)>')<EOL><DEDENT><DEDENT>", "docstring": "Lists environments", "id": "f15088:m0"}
{"signature": "def execute(helper, config, args):", "body": "versions = helper.get_versions()<EOL>out(\"<STR_LIT>\")<EOL>for version in versions:<EOL><INDENT>out(version)<EOL><DEDENT>", "docstring": "Lists environments", "id": "f15089:m0"}
{"signature": "def execute(helper, config, args):", "body": "env = parse_env_config(config, args.environment)<EOL>option_settings = env.get('<STR_LIT>', {})<EOL>settings = parse_option_settings(option_settings)<EOL>for setting in settings:<EOL><INDENT>out(str(setting))<EOL><DEDENT>", "docstring": "dump command dumps things", "id": "f15092:m1"}
{"signature": "def add_arguments(parser):", "body": "parser.add_argument('<STR_LIT>', '<STR_LIT>', help='<STR_LIT>', required=True)<EOL>", "docstring": "adds arguments for the dump command", "id": "f15092:m0"}
{"signature": "def get_command_without_error_checking(name):", "body": "__import__('<STR_LIT>' + name + '<STR_LIT>')<EOL>return sys.modules['<STR_LIT>' + name + '<STR_LIT>']<EOL>", "docstring": "Returns a command module", "id": "f15093:m3"}
{"signature": "def get_command_names():", "body": "ret = []<EOL>for f in os.listdir(COMMAND_MODULE_PATH):<EOL><INDENT>if os.path.isfile(os.path.join(COMMAND_MODULE_PATH, f)) and f.endswith(COMMAND_MODULE_SUFFIX):<EOL><INDENT>ret.append(f[:-len(COMMAND_MODULE_SUFFIX)])<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "Returns a list of command names supported", "id": "f15093:m1"}
{"signature": "def execute(helper, config, args):", "body": "version_label = args.version_label<EOL>archive = args.archive<EOL>env_config = parse_env_config(config, args.environment)<EOL>option_settings = parse_option_settings(env_config.get('<STR_LIT>', {}))<EOL>cname_prefix = env_config.get('<STR_LIT>', None)<EOL>tier_name = env_config.get('<STR_LIT>', '<STR_LIT>')<EOL>if tier_name != '<STR_LIT>':<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (tier_name, ))<EOL><DEDENT>out(\"<STR_LIT>\")<EOL>new_env_name = None<EOL>if not helper.environment_exists(args.environment):<EOL><INDENT>new_env_name = args.environment<EOL><DEDENT>else:<EOL><INDENT>for i in range(<NUM_LIT:10>):<EOL><INDENT>temp_env_name = args.environment + '<STR_LIT:->' + str(i)<EOL>if not helper.environment_exists(temp_env_name):<EOL><INDENT>new_env_name = temp_env_name<EOL>break<EOL><DEDENT><DEDENT><DEDENT>if new_env_name is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>out(\"<STR_LIT>\" + new_env_name)<EOL>out(\"<STR_LIT>\")<EOL>new_env_cname = None<EOL>for i in range(<NUM_LIT:10>):<EOL><INDENT>temp_cname = cname_prefix + '<STR_LIT:->' + str(i)<EOL>if not helper.environment_name_for_cname(temp_cname):<EOL><INDENT>new_env_cname = temp_cname<EOL>break<EOL><DEDENT><DEDENT>if new_env_cname is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>out(\"<STR_LIT>\" + new_env_cname)<EOL>version_label = upload_application_archive(<EOL>helper, env_config, archive=args.archive, directory=args.directory, version_label=version_label)<EOL>helper.create_environment(new_env_name,<EOL>solution_stack_name=env_config.get('<STR_LIT>'),<EOL>cname_prefix=new_env_cname,<EOL>description=env_config.get('<STR_LIT:description>', None),<EOL>option_settings=option_settings,<EOL>version_label=version_label,<EOL>tier_name=tier_name,<EOL>tier_type=env_config.get('<STR_LIT>'),<EOL>tier_version=env_config.get('<STR_LIT>'))<EOL>helper.wait_for_environments(new_env_name, status='<STR_LIT>', health='<STR_LIT>', include_deleted=False)<EOL>old_env_name = helper.environment_name_for_cname(cname_prefix)<EOL>if old_env_name is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\" + cname_prefix)<EOL><DEDENT>out(\"<STR_LIT>\" + old_env_name)<EOL>out(\"<STR_LIT>\")<EOL>helper.swap_environment_cnames(old_env_name, new_env_name)<EOL>helper.wait_for_environments([old_env_name, new_env_name], status='<STR_LIT>', include_deleted=False)<EOL>if args.termination_delay:<EOL><INDENT>out(\"<STR_LIT>\".format(args.termination_delay))<EOL>time.sleep(args.termination_delay)<EOL><DEDENT>out(\"<STR_LIT>\".format(old_env_name))<EOL>helper.delete_environment(old_env_name)<EOL>helper.delete_unused_versions(versions_to_keep=int(get(config, '<STR_LIT>', <NUM_LIT:10>)))<EOL>", "docstring": "Deploys to an environment", "id": "f15095:m1"}
{"signature": "def execute(helper, config, args):", "body": "out(\"<STR_LIT>\")<EOL>for stack in helper.list_available_solution_stacks():<EOL><INDENT>out(\"<STR_LIT:U+0020>\"+str(stack))<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Lists solution stacks", "id": "f15096:m0"}
{"signature": "def execute(helper, config, args):", "body": "env_config = parse_env_config(config, args.environment)<EOL>environments_to_wait_for_term = []<EOL>environments = helper.get_environments()<EOL>for env in environments:<EOL><INDENT>if env['<STR_LIT>'] == args.environment:<EOL><INDENT>if env['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>out(\"<STR_LIT>\" + env['<STR_LIT>']<EOL>+ \"<STR_LIT>\"<EOL>+ env['<STR_LIT>'] + \"<STR_LIT:)>\")<EOL><DEDENT>else:<EOL><INDENT>out(\"<STR_LIT>\"+env['<STR_LIT>'])<EOL>helper.delete_environment(env['<STR_LIT>'])<EOL>environments_to_wait_for_term.append(env['<STR_LIT>'])<EOL><DEDENT><DEDENT><DEDENT>if not args.dont_wait:<EOL><INDENT>helper.wait_for_environments(environments_to_wait_for_term,<EOL>status='<STR_LIT>',<EOL>include_deleted=True)<EOL><DEDENT>out(\"<STR_LIT>\")<EOL>return <NUM_LIT:0><EOL>", "docstring": "Deletes an environment", "id": "f15097:m1"}
{"signature": "def add_arguments(parser):", "body": "parser.add_argument('<STR_LIT>', '<STR_LIT>',  help='<STR_LIT>', required=False, nargs='<STR_LIT:+>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', help='<STR_LIT>', action='<STR_LIT:store_true>')<EOL>", "docstring": "Args for the init command", "id": "f15099:m0"}
{"signature": "def execute(helper, config, args):", "body": "environments = []<EOL>if args.environment:<EOL><INDENT>for env_name in args.environment:<EOL><INDENT>environments.append(env_name)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for env_name, env_config in list(get(config, '<STR_LIT>').items()):<EOL><INDENT>environments.append(env_name)<EOL><DEDENT><DEDENT>wait_environments = []<EOL>for env_name in environments:<EOL><INDENT>env = parse_env_config(config, env_name)<EOL>option_settings = parse_option_settings(env.get('<STR_LIT>', {}))<EOL>helper.update_environment(env_name,<EOL>description=env.get('<STR_LIT:description>', None),<EOL>option_settings=option_settings,<EOL>tier_type=env.get('<STR_LIT>'),<EOL>tier_name=env.get('<STR_LIT>'),<EOL>tier_version=env.get('<STR_LIT>'))<EOL>wait_environments.append(env_name)<EOL><DEDENT>if not args.dont_wait:<EOL><INDENT>helper.wait_for_environments(wait_environments, health='<STR_LIT>', status='<STR_LIT>')<EOL><DEDENT>", "docstring": "Updates environments", "id": "f15099:m1"}
{"signature": "def add_arguments(parser):", "body": "parser.add_argument('<STR_LIT>', '<STR_LIT>', help='<STR_LIT>', required=True)<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', help='<STR_LIT>', required=True)<EOL>", "docstring": "adds arguments for the swap urls command", "id": "f15101:m0"}
{"signature": "def create_application(self, description=None):", "body": "out(\"<STR_LIT>\" + str(self.app_name))<EOL>self.ebs.create_application(self.app_name, description=description)<EOL>", "docstring": "Creats an application and sets the helpers current\napp_name to the created application", "id": "f15103:c1:m4"}
{"signature": "def delete_unused_versions(self, versions_to_keep=<NUM_LIT:10>):", "body": "<EOL>environments = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False)<EOL>environments = environments['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL>versions_in_use = []<EOL>for env in environments:<EOL><INDENT>versions_in_use.append(env['<STR_LIT>'])<EOL><DEDENT>versions = self.ebs.describe_application_versions(application_name=self.app_name)<EOL>versions = versions['<STR_LIT>']['<STR_LIT>'][<EOL>'<STR_LIT>']<EOL>versions = sorted(versions, reverse=True, key=functools.cmp_to_key(lambda x, y: (x['<STR_LIT>'] > y['<STR_LIT>']) - (x['<STR_LIT>'] < y['<STR_LIT>'])))<EOL>for version in versions[versions_to_keep:]:<EOL><INDENT>if version['<STR_LIT>'] in versions_in_use:<EOL><INDENT>out(\"<STR_LIT>\" + version[\"<STR_LIT>\"] + \"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>out(\"<STR_LIT>\" + version[\"<STR_LIT>\"])<EOL>self.ebs.delete_application_version(application_name=self.app_name,<EOL>version_label=version['<STR_LIT>'])<EOL>sleep(<NUM_LIT:2>)<EOL><DEDENT><DEDENT>", "docstring": "Deletes unused versions", "id": "f15103:c1:m17"}
{"signature": "def get(vals, key, default_val=None):", "body": "val = vals<EOL>for part in key.split('<STR_LIT:.>'):<EOL><INDENT>if isinstance(val, dict):<EOL><INDENT>val = val.get(part, None)<EOL>if val is None:<EOL><INDENT>return default_val<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return default_val<EOL><DEDENT><DEDENT>return val<EOL>", "docstring": "Returns a dictionary value", "id": "f15103:m2"}
{"signature": "def delete_application(self):", "body": "out(\"<STR_LIT>\" + str(self.app_name))<EOL>self.ebs.delete_application(self.app_name, terminate_env_by_force=True)<EOL>", "docstring": "Creats an application and sets the helpers current\napp_name to the created application", "id": "f15103:c1:m5"}
{"signature": "def application_exists(self):", "body": "response = self.ebs.describe_applications(application_names=[self.app_name])<EOL>return len(response['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']) > <NUM_LIT:0><EOL>", "docstring": "Returns whether or not the given app_name exists", "id": "f15103:c1:m6"}
{"signature": "def parse_option_settings(option_settings):", "body": "ret = []<EOL>for namespace, params in list(option_settings.items()):<EOL><INDENT>for key, value in list(params.items()):<EOL><INDENT>ret.append((namespace, key, value))<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "Parses option_settings as they are defined in the configuration file", "id": "f15103:m3"}
{"signature": "def deploy_version(self, environment_name, version_label):", "body": "out(\"<STR_LIT>\" + str(version_label) + \"<STR_LIT>\" + str(environment_name))<EOL>self.ebs.update_environment(environment_name=environment_name, version_label=version_label)<EOL>", "docstring": "Deploys a version to an environment", "id": "f15103:c1:m14"}
{"signature": "def get_environments(self):", "body": "response = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False)<EOL>return response['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL>", "docstring": "Returns the environments", "id": "f15103:c1:m10"}
{"signature": "def delete_environment(self, environment_name):", "body": "self.ebs.terminate_environment(environment_name=environment_name, terminate_resources=True)<EOL>", "docstring": "Deletes an environment", "id": "f15103:c1:m11"}
{"signature": "def swap_environment_cnames(self, from_env_name, to_env_name):", "body": "self.ebs.swap_environment_cnames(source_environment_name=from_env_name,<EOL>destination_environment_name=to_env_name)<EOL>", "docstring": "Swaps cnames for an environment", "id": "f15103:c1:m1"}
{"signature": "def update_environment(self, environment_name, description=None, option_settings=[], tier_type=None, tier_name=None,<EOL>tier_version='<STR_LIT:1.0>'):", "body": "out(\"<STR_LIT>\" + str(environment_name))<EOL>messages = self.ebs.validate_configuration_settings(self.app_name, option_settings,<EOL>environment_name=environment_name)<EOL>messages = messages['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL>ok = True<EOL>for message in messages:<EOL><INDENT>if message['<STR_LIT>'] == '<STR_LIT:error>':<EOL><INDENT>ok = False<EOL><DEDENT>out(\"<STR_LIT:[>\" + message['<STR_LIT>'] + \"<STR_LIT>\" + str(environment_name) + \"<STR_LIT>\"+ message['<STR_LIT>'] + \"<STR_LIT::>\" + message['<STR_LIT>'] + \"<STR_LIT>\" + message['<STR_LIT>'])<EOL><DEDENT>self.ebs.update_environment(<EOL>environment_name=environment_name,<EOL>description=description,<EOL>option_settings=option_settings,<EOL>tier_type=tier_type,<EOL>tier_name=tier_name,<EOL>tier_version=tier_version)<EOL>", "docstring": "Updates an application version", "id": "f15103:c1:m12"}
{"signature": "def __init__(self, aws, wait_time_secs, app_name=None,):", "body": "self.aws = aws<EOL>self.ebs = connect_to_region(aws.region, aws_access_key_id=aws.access_key,<EOL>aws_secret_access_key=aws.secret_key,<EOL>security_token=aws.security_token)<EOL>self.s3 = S3Connection(<EOL>aws_access_key_id=aws.access_key, <EOL>aws_secret_access_key=aws.secret_key, <EOL>security_token=aws.security_token,<EOL>host=(lambda r: '<STR_LIT>' if r == '<STR_LIT>' else '<STR_LIT>' + r + '<STR_LIT>')(aws.region))<EOL>self.app_name = app_name<EOL>self.wait_time_secs = wait_time_secs<EOL>", "docstring": "Creates the EbsHelper", "id": "f15103:c1:m0"}
{"signature": "def rebuild_environment(self, env_name):", "body": "out(\"<STR_LIT>\" + str(env_name))<EOL>self.ebs.rebuild_environment(environment_name=env_name)<EOL>", "docstring": "Rebuilds an environment", "id": "f15103:c1:m9"}
{"signature": "def out(message):", "body": "sys.stdout.write(message + \"<STR_LIT:\\n>\")<EOL>sys.stdout.flush()<EOL>", "docstring": "print alias", "id": "f15103:m0"}
{"signature": "def get_random_str(self):", "body": "rule = string.letters + string.digits<EOL>str = random.sample(rule, <NUM_LIT:16>)<EOL>return \"<STR_LIT>\".join(str)<EOL>", "docstring": "\u968f\u673a\u751f\u621016\u4f4d\u5b57\u7b26\u4e32\n        @return: 16\u4f4d\u5b57\u7b26\u4e32", "id": "f15109:c4:m3"}
{"signature": "def throw_exception(message, exception_class=FormatException):", "body": "raise exception_class(message)<EOL>", "docstring": "my define raise exception function", "id": "f15109:m0"}
{"signature": "def getSHA1(self, token, timestamp, nonce, encrypt):", "body": "try:<EOL><INDENT>sortlist = [token, timestamp, nonce, encrypt]<EOL>sortlist.sort()<EOL>sha = hashlib.sha1()<EOL>sha.update(\"<STR_LIT>\".join(sortlist))<EOL>return WXBizMsgCrypt_OK, sha.hexdigest()<EOL><DEDENT>except Exception:<EOL><INDENT>return WXBizMsgCrypt_ComputeSignature_Error, None<EOL><DEDENT>", "docstring": "\u7528SHA1\u7b97\u6cd5\u751f\u6210\u5b89\u5168\u7b7e\u540d\n        @param token:  \u7968\u636e\n        @param timestamp: \u65f6\u95f4\u6233\n        @param encrypt: \u5bc6\u6587\n        @param nonce: \u968f\u673a\u5b57\u7b26\u4e32\n        @return: \u5b89\u5168\u7b7e\u540d", "id": "f15109:c1:m0"}
{"signature": "def generate(self, encrypt, signature, timestamp, nonce):", "body": "resp_dict = {<EOL>'<STR_LIT>': encrypt,<EOL>'<STR_LIT>': signature,<EOL>'<STR_LIT>': timestamp,<EOL>'<STR_LIT>': nonce,<EOL>}<EOL>resp_xml = self.AES_TEXT_RESPONSE_TEMPLATE % resp_dict<EOL>return resp_xml<EOL>", "docstring": "\u751f\u6210xml\u6d88\u606f\n        @param encrypt: \u52a0\u5bc6\u540e\u7684\u6d88\u606f\u5bc6\u6587\n        @param signature: \u5b89\u5168\u7b7e\u540d\n        @param timestamp: \u65f6\u95f4\u6233\n        @param nonce: \u968f\u673a\u5b57\u7b26\u4e32\n        @return: \u751f\u6210\u7684xml\u5b57\u7b26\u4e32", "id": "f15109:c2:m1"}
{"signature": "def md_table(table, *, padding=DEFAULT_PADDING, divider='<STR_LIT:|>', header_div='<STR_LIT:->'):", "body": "table = normalize_cols(table)<EOL>table = pad_cells(table)<EOL>header = table[<NUM_LIT:0>]<EOL>body = table[<NUM_LIT:1>:]<EOL>col_widths = [len(cell) for cell in header]<EOL>horiz = horiz_div(col_widths, header_div, divider, padding)<EOL>header = add_dividers(header, divider, padding)<EOL>body = [add_dividers(row, divider, padding) for row in body]<EOL>table = [header, horiz]<EOL>table.extend(body)<EOL>table = [row.rstrip() for row in table]<EOL>return '<STR_LIT:\\n>'.join(table)<EOL>", "docstring": "Convert a 2D array of items into a Markdown table.\n\npadding: the number of padding spaces on either side of each divider\ndivider: the vertical divider to place between columns\nheader_div: the horizontal divider to place between the header row and\n    body cells", "id": "f15118:m6"}
{"signature": "def runTest(self):", "body": "self.vis.delete()<EOL>v = self.vis[\"<STR_LIT>\"]<EOL>filename = os.path.join(meshcat.viewer_assets_path(),<EOL>\"<STR_LIT>\")<EOL>with open(filename, \"<STR_LIT:r>\") as f:<EOL><INDENT>fio = StringIO(f.read())<EOL>v[\"<STR_LIT>\"].set_object(g.Mesh(g.ObjMeshGeometry.from_stream(fio)))<EOL>v[\"<STR_LIT>\"].set_transform(tf.translation_matrix([<NUM_LIT:0>, <NUM_LIT:0.0>, <NUM_LIT:0>]))<EOL><DEDENT>filename = os.path.join(meshcat.viewer_assets_path(),<EOL>\"<STR_LIT>\")<EOL>with open(filename, \"<STR_LIT:r>\") as f:<EOL><INDENT>fio = StringIO(f.read())<EOL>v[\"<STR_LIT>\"].set_object(g.Mesh(g.StlMeshGeometry.from_stream(fio)))<EOL>v[\"<STR_LIT>\"].set_transform(tf.translation_matrix([<NUM_LIT:0>, -<NUM_LIT:0.5>, <NUM_LIT:0>]))<EOL><DEDENT>filename = os.path.join(meshcat.viewer_assets_path(),<EOL>\"<STR_LIT>\")<EOL>with open(filename, \"<STR_LIT:rb>\") as f:<EOL><INDENT>fio = BytesIO(f.read())<EOL>v[\"<STR_LIT>\"].set_object(g.Mesh(g.StlMeshGeometry.from_stream(fio)))<EOL>v[\"<STR_LIT>\"].set_transform(tf.translation_matrix([<NUM_LIT:0>, -<NUM_LIT:1.0>, <NUM_LIT:0>]))<EOL><DEDENT>filename = os.path.join(meshcat.viewer_assets_path(),<EOL>\"<STR_LIT>\")<EOL>with open(filename, \"<STR_LIT:r>\") as f:<EOL><INDENT>fio = StringIO(f.read())<EOL>v[\"<STR_LIT>\"].set_object(g.Mesh(g.DaeMeshGeometry.from_stream(fio)))<EOL>v[\"<STR_LIT>\"].set_transform(tf.translation_matrix([<NUM_LIT:0>, -<NUM_LIT>, <NUM_LIT:0>]))<EOL><DEDENT>", "docstring": "Applications using meshcat may already have meshes loaded in memory. It is\n        more efficient to load these meshes with streams rather than going to and then\n        from a file on disk. To test this we are importing meshes from disk and\n        converting them into streams so it kind of defeats the intended purpose! But at\n        least it tests the functionality.", "id": "f15126:c2:m0"}
{"signature": "def convert_frames_to_video(tar_file_path, output_path=\"<STR_LIT>\", framerate=<NUM_LIT>, overwrite=False):", "body": "output_path = os.path.abspath(output_path)<EOL>if os.path.isfile(output_path) and not overwrite:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(output_path))<EOL><DEDENT>with tempfile.TemporaryDirectory() as tmp_dir:<EOL><INDENT>with tarfile.open(tar_file_path) as tar:<EOL><INDENT>tar.extractall(tmp_dir)<EOL><DEDENT>args = [\"<STR_LIT>\",<EOL>\"<STR_LIT>\", str(framerate),<EOL>\"<STR_LIT>\", r\"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\"]<EOL>if overwrite:<EOL><INDENT>args.append(\"<STR_LIT>\")<EOL><DEDENT>args.append(output_path)<EOL>try:<EOL><INDENT>subprocess.check_call(args, cwd=tmp_dir)<EOL><DEDENT>except subprocess.CalledProcessError as e:<EOL><INDENT>print(\"\"\"<STR_LIT>\"\"\")<EOL>raise<EOL><DEDENT><DEDENT>print(\"<STR_LIT>\".format(output_path))<EOL>return output_path<EOL>", "docstring": "Try to convert a tar file containing a sequence of frames saved by the\nmeshcat viewer into a single video file.\n\nThis relies on having `ffmpeg` installed on your system.", "id": "f15127:m2"}
{"signature": "def translation_matrix(direction):", "body": "M = numpy.identity(<NUM_LIT:4>)<EOL>M[:<NUM_LIT:3>, <NUM_LIT:3>] = direction[:<NUM_LIT:3>]<EOL>return M<EOL>", "docstring": "Return matrix to translate by direction vector.\n\n    >>> v = numpy.random.random(3) - 0.5\n    >>> numpy.allclose(v, translation_matrix(v)[:3, 3])\n    True", "id": "f15133:m1"}
{"signature": "def matrix(self):", "body": "return quaternion_matrix(self._qnow)<EOL>", "docstring": "Return homogeneous rotation matrix.", "id": "f15133:c0:m8"}
{"signature": "@constrain.setter<EOL><INDENT>def constrain(self, value):<DEDENT>", "body": "self._constrain = bool(value)<EOL>", "docstring": "Set state of constrain to axis mode.", "id": "f15133:c0:m4"}
{"signature": "def scale_matrix(factor, origin=None, direction=None):", "body": "if direction is None:<EOL><INDENT>M = numpy.diag([factor, factor, factor, <NUM_LIT:1.0>])<EOL>if origin is not None:<EOL><INDENT>M[:<NUM_LIT:3>, <NUM_LIT:3>] = origin[:<NUM_LIT:3>]<EOL>M[:<NUM_LIT:3>, <NUM_LIT:3>] *= <NUM_LIT:1.0> - factor<EOL><DEDENT><DEDENT>else:<EOL><INDENT>direction = unit_vector(direction[:<NUM_LIT:3>])<EOL>factor = <NUM_LIT:1.0> - factor<EOL>M = numpy.identity(<NUM_LIT:4>)<EOL>M[:<NUM_LIT:3>, :<NUM_LIT:3>] -= factor * numpy.outer(direction, direction)<EOL>if origin is not None:<EOL><INDENT>M[:<NUM_LIT:3>, <NUM_LIT:3>] = (factor * numpy.dot(origin[:<NUM_LIT:3>], direction)) * direction<EOL><DEDENT><DEDENT>return M<EOL>", "docstring": "Return matrix to scale by factor around origin in direction.\n\n    Use factor -1 for point symmetry.\n\n    >>> v = (numpy.random.rand(4, 5) - 0.5) * 20\n    >>> v[3] = 1\n    >>> S = scale_matrix(-1.234)\n    >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])\n    True\n    >>> factor = random.random() * 10 - 5\n    >>> origin = numpy.random.random(3) - 0.5\n    >>> direct = numpy.random.random(3) - 0.5\n    >>> S = scale_matrix(factor, origin)\n    >>> S = scale_matrix(factor, origin, direct)", "id": "f15133:m7"}
{"signature": "@property<EOL><INDENT>def constrain(self):<DEDENT>", "body": "return self._constrain<EOL>", "docstring": "Return state of constrain to axis mode.", "id": "f15133:c0:m3"}
{"signature": "def projection_from_matrix(matrix, pseudo=False):", "body": "M = numpy.array(matrix, dtype=numpy.float64, copy=False)<EOL>M33 = M[:<NUM_LIT:3>, :<NUM_LIT:3>]<EOL>w, V = numpy.linalg.eig(M)<EOL>i = numpy.where(abs(numpy.real(w) - <NUM_LIT:1.0>) < <NUM_LIT>)[<NUM_LIT:0>]<EOL>if not pseudo and len(i):<EOL><INDENT>point = numpy.real(V[:, i[-<NUM_LIT:1>]]).squeeze()<EOL>point /= point[<NUM_LIT:3>]<EOL>w, V = numpy.linalg.eig(M33)<EOL>i = numpy.where(abs(numpy.real(w)) < <NUM_LIT>)[<NUM_LIT:0>]<EOL>if not len(i):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>direction = numpy.real(V[:, i[<NUM_LIT:0>]]).squeeze()<EOL>direction /= vector_norm(direction)<EOL>w, V = numpy.linalg.eig(M33.T)<EOL>i = numpy.where(abs(numpy.real(w)) < <NUM_LIT>)[<NUM_LIT:0>]<EOL>if len(i):<EOL><INDENT>normal = numpy.real(V[:, i[<NUM_LIT:0>]]).squeeze()<EOL>normal /= vector_norm(normal)<EOL>return point, normal, direction, None, False<EOL><DEDENT>else:<EOL><INDENT>return point, direction, None, None, False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>i = numpy.where(abs(numpy.real(w)) > <NUM_LIT>)[<NUM_LIT:0>]<EOL>if not len(i):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>point = numpy.real(V[:, i[-<NUM_LIT:1>]]).squeeze()<EOL>point /= point[<NUM_LIT:3>]<EOL>normal = - M[<NUM_LIT:3>, :<NUM_LIT:3>]<EOL>perspective = M[:<NUM_LIT:3>, <NUM_LIT:3>] / numpy.dot(point[:<NUM_LIT:3>], normal)<EOL>if pseudo:<EOL><INDENT>perspective -= normal<EOL><DEDENT>return point, normal, None, perspective, pseudo<EOL><DEDENT>", "docstring": "Return projection plane and perspective point from projection matrix.\n\n    Return values are same as arguments for projection_matrix function:\n    point, normal, direction, perspective, and pseudo.\n\n    >>> point = numpy.random.random(3) - 0.5\n    >>> normal = numpy.random.random(3) - 0.5\n    >>> direct = numpy.random.random(3) - 0.5\n    >>> persp = numpy.random.random(3) - 0.5\n    >>> P0 = projection_matrix(point, normal)\n    >>> result = projection_from_matrix(P0)\n    >>> P1 = projection_matrix(*result)\n    >>> is_same_transform(P0, P1)\n    True\n    >>> P0 = projection_matrix(point, normal, direct)\n    >>> result = projection_from_matrix(P0)\n    >>> P1 = projection_matrix(*result)\n    >>> is_same_transform(P0, P1)\n    True\n    >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)\n    >>> result = projection_from_matrix(P0, pseudo=False)\n    >>> P1 = projection_matrix(*result)\n    >>> is_same_transform(P0, P1)\n    True\n    >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)\n    >>> result = projection_from_matrix(P0, pseudo=True)\n    >>> P1 = projection_matrix(*result)\n    >>> is_same_transform(P0, P1)\n    True", "id": "f15133:m10"}
{"signature": "def setaxes(self, *axes):", "body": "if axes is None:<EOL><INDENT>self._axes = None<EOL><DEDENT>else:<EOL><INDENT>self._axes = [unit_vector(axis) for axis in axes]<EOL><DEDENT>", "docstring": "Set axes to constrain rotations.", "id": "f15133:c0:m2"}
{"signature": "def is_same_transform(matrix0, matrix1):", "body": "matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)<EOL>matrix0 /= matrix0[<NUM_LIT:3>, <NUM_LIT:3>]<EOL>matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)<EOL>matrix1 /= matrix1[<NUM_LIT:3>, <NUM_LIT:3>]<EOL>return numpy.allclose(matrix0, matrix1)<EOL>", "docstring": "Return True if two matrices perform same transformation.\n\n    >>> is_same_transform(numpy.identity(4), numpy.identity(4))\n    True\n    >>> is_same_transform(numpy.identity(4), random_rotation_matrix())\n    False", "id": "f15133:m44"}
{"signature": "def reflection_from_matrix(matrix):", "body": "M = numpy.array(matrix, dtype=numpy.float64, copy=False)<EOL>w, V = numpy.linalg.eig(M[:<NUM_LIT:3>, :<NUM_LIT:3>])<EOL>i = numpy.where(abs(numpy.real(w) + <NUM_LIT:1.0>) < <NUM_LIT>)[<NUM_LIT:0>]<EOL>if not len(i):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>normal = numpy.real(V[:, i[<NUM_LIT:0>]]).squeeze()<EOL>w, V = numpy.linalg.eig(M)<EOL>i = numpy.where(abs(numpy.real(w) - <NUM_LIT:1.0>) < <NUM_LIT>)[<NUM_LIT:0>]<EOL>if not len(i):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>point = numpy.real(V[:, i[-<NUM_LIT:1>]]).squeeze()<EOL>point /= point[<NUM_LIT:3>]<EOL>return point, normal<EOL>", "docstring": "Return mirror plane point and normal vector from reflection matrix.\n\n    >>> v0 = numpy.random.random(3) - 0.5\n    >>> v1 = numpy.random.random(3) - 0.5\n    >>> M0 = reflection_matrix(v0, v1)\n    >>> point, normal = reflection_from_matrix(M0)\n    >>> M1 = reflection_matrix(point, normal)\n    >>> is_same_transform(M0, M1)\n    True", "id": "f15133:m4"}
{"signature": "def quaternion_from_matrix(matrix, isprecise=False):", "body": "M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:<NUM_LIT:4>, :<NUM_LIT:4>]<EOL>if isprecise:<EOL><INDENT>q = numpy.empty((<NUM_LIT:4>, ))<EOL>t = numpy.trace(M)<EOL>if t > M[<NUM_LIT:3>, <NUM_LIT:3>]:<EOL><INDENT>q[<NUM_LIT:0>] = t<EOL>q[<NUM_LIT:3>] = M[<NUM_LIT:1>, <NUM_LIT:0>] - M[<NUM_LIT:0>, <NUM_LIT:1>]<EOL>q[<NUM_LIT:2>] = M[<NUM_LIT:0>, <NUM_LIT:2>] - M[<NUM_LIT:2>, <NUM_LIT:0>]<EOL>q[<NUM_LIT:1>] = M[<NUM_LIT:2>, <NUM_LIT:1>] - M[<NUM_LIT:1>, <NUM_LIT:2>]<EOL><DEDENT>else:<EOL><INDENT>i, j, k = <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3><EOL>if M[<NUM_LIT:1>, <NUM_LIT:1>] > M[<NUM_LIT:0>, <NUM_LIT:0>]:<EOL><INDENT>i, j, k = <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:1><EOL><DEDENT>if M[<NUM_LIT:2>, <NUM_LIT:2>] > M[i, i]:<EOL><INDENT>i, j, k = <NUM_LIT:3>, <NUM_LIT:1>, <NUM_LIT:2><EOL><DEDENT>t = M[i, i] - (M[j, j] + M[k, k]) + M[<NUM_LIT:3>, <NUM_LIT:3>]<EOL>q[i] = t<EOL>q[j] = M[i, j] + M[j, i]<EOL>q[k] = M[k, i] + M[i, k]<EOL>q[<NUM_LIT:3>] = M[k, j] - M[j, k]<EOL><DEDENT>q *= <NUM_LIT:0.5> / math.sqrt(t * M[<NUM_LIT:3>, <NUM_LIT:3>])<EOL><DEDENT>else:<EOL><INDENT>m00 = M[<NUM_LIT:0>, <NUM_LIT:0>]<EOL>m01 = M[<NUM_LIT:0>, <NUM_LIT:1>]<EOL>m02 = M[<NUM_LIT:0>, <NUM_LIT:2>]<EOL>m10 = M[<NUM_LIT:1>, <NUM_LIT:0>]<EOL>m11 = M[<NUM_LIT:1>, <NUM_LIT:1>]<EOL>m12 = M[<NUM_LIT:1>, <NUM_LIT:2>]<EOL>m20 = M[<NUM_LIT:2>, <NUM_LIT:0>]<EOL>m21 = M[<NUM_LIT:2>, <NUM_LIT:1>]<EOL>m22 = M[<NUM_LIT:2>, <NUM_LIT:2>]<EOL>K = numpy.array([[m00-m11-m22, <NUM_LIT:0.0>,         <NUM_LIT:0.0>,         <NUM_LIT:0.0>],<EOL>[m01+m10,     m11-m00-m22, <NUM_LIT:0.0>,         <NUM_LIT:0.0>],<EOL>[m02+m20,     m12+m21,     m22-m00-m11, <NUM_LIT:0.0>],<EOL>[m21-m12,     m02-m20,     m10-m01,     m00+m11+m22]])<EOL>K /= <NUM_LIT><EOL>w, V = numpy.linalg.eigh(K)<EOL>q = V[[<NUM_LIT:3>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>], numpy.argmax(w)]<EOL><DEDENT>if q[<NUM_LIT:0>] < <NUM_LIT:0.0>:<EOL><INDENT>numpy.negative(q, q)<EOL><DEDENT>return q<EOL>", "docstring": "Return quaternion from rotation matrix.\n\n    If isprecise is True, the input matrix is assumed to be a precise rotation\n    matrix and a faster algorithm is used.\n\n    >>> q = quaternion_from_matrix(numpy.identity(4), True)\n    >>> numpy.allclose(q, [1, 0, 0, 0])\n    True\n    >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))\n    >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])\n    True\n    >>> R = rotation_matrix(0.123, (1, 2, 3))\n    >>> q = quaternion_from_matrix(R, True)\n    >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])\n    True\n    >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],\n    ...      [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]\n    >>> q = quaternion_from_matrix(R)\n    >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])\n    True\n    >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],\n    ...      [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]\n    >>> q = quaternion_from_matrix(R)\n    >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])\n    True\n    >>> R = random_rotation_matrix()\n    >>> q = quaternion_from_matrix(R)\n    >>> is_same_transform(R, quaternion_matrix(q))\n    True\n    >>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)\n    >>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),\n    ...                quaternion_from_matrix(R, isprecise=True))\n    True", "id": "f15133:m25"}
{"signature": "def vector_norm(data, axis=None, out=None):", "body": "data = numpy.array(data, dtype=numpy.float64, copy=True)<EOL>if out is None:<EOL><INDENT>if data.ndim == <NUM_LIT:1>:<EOL><INDENT>return math.sqrt(numpy.dot(data, data))<EOL><DEDENT>data *= data<EOL>out = numpy.atleast_1d(numpy.sum(data, axis=axis))<EOL>numpy.sqrt(out, out)<EOL>return out<EOL><DEDENT>else:<EOL><INDENT>data *= data<EOL>numpy.sum(data, axis=axis, out=out)<EOL>numpy.sqrt(out, out)<EOL><DEDENT>", "docstring": "Return length, i.e. Euclidean norm, of ndarray along axis.\n\n    >>> v = numpy.random.random(3)\n    >>> n = vector_norm(v)\n    >>> numpy.allclose(n, numpy.linalg.norm(v))\n    True\n    >>> v = numpy.random.rand(6, 5, 3)\n    >>> n = vector_norm(v, axis=-1)\n    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))\n    True\n    >>> n = vector_norm(v, axis=1)\n    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))\n    True\n    >>> v = numpy.random.rand(5, 4, 3)\n    >>> n = numpy.empty((5, 3))\n    >>> vector_norm(v, axis=1, out=n)\n    >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))\n    True\n    >>> vector_norm([])\n    0.0\n    >>> vector_norm([1])\n    1.0", "id": "f15133:m37"}
{"signature": "def unit_vector(data, axis=None, out=None):", "body": "if out is None:<EOL><INDENT>data = numpy.array(data, dtype=numpy.float64, copy=True)<EOL>if data.ndim == <NUM_LIT:1>:<EOL><INDENT>data /= math.sqrt(numpy.dot(data, data))<EOL>return data<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if out is not data:<EOL><INDENT>out[:] = numpy.array(data, copy=False)<EOL><DEDENT>data = out<EOL><DEDENT>length = numpy.atleast_1d(numpy.sum(data*data, axis))<EOL>numpy.sqrt(length, length)<EOL>if axis is not None:<EOL><INDENT>length = numpy.expand_dims(length, axis)<EOL><DEDENT>data /= length<EOL>if out is None:<EOL><INDENT>return data<EOL><DEDENT>", "docstring": "Return ndarray normalized by length, i.e. Euclidean norm, along axis.\n\n    >>> v0 = numpy.random.random(3)\n    >>> v1 = unit_vector(v0)\n    >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))\n    True\n    >>> v0 = numpy.random.rand(5, 4, 3)\n    >>> v1 = unit_vector(v0, axis=-1)\n    >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)\n    >>> numpy.allclose(v1, v2)\n    True\n    >>> v1 = unit_vector(v0, axis=1)\n    >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)\n    >>> numpy.allclose(v1, v2)\n    True\n    >>> v1 = numpy.empty((5, 4, 3))\n    >>> unit_vector(v0, axis=1, out=v1)\n    >>> numpy.allclose(v1, v2)\n    True\n    >>> list(unit_vector([]))\n    []\n    >>> list(unit_vector([1]))\n    [1.0]", "id": "f15133:m38"}
{"signature": "def quaternion_multiply(quaternion1, quaternion0):", "body": "w0, x0, y0, z0 = quaternion0<EOL>w1, x1, y1, z1 = quaternion1<EOL>return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,<EOL>x1*w0 + y1*z0 - z1*y0 + w1*x0,<EOL>-x1*z0 + y1*w0 + z1*x0 + w1*y0,<EOL>x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)<EOL>", "docstring": "Return multiplication of two quaternions.\n\n    >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])\n    >>> numpy.allclose(q, [28, -44, -14, 48])\n    True", "id": "f15133:m26"}
{"signature": "def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):", "body": "v0 = numpy.array(v0, dtype=numpy.float64, copy=True)<EOL>v1 = numpy.array(v1, dtype=numpy.float64, copy=True)<EOL>ndims = v0.shape[<NUM_LIT:0>]<EOL>if ndims < <NUM_LIT:2> or v0.shape[<NUM_LIT:1>] < ndims or v0.shape != v1.shape:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>t0 = -numpy.mean(v0, axis=<NUM_LIT:1>)<EOL>M0 = numpy.identity(ndims+<NUM_LIT:1>)<EOL>M0[:ndims, ndims] = t0<EOL>v0 += t0.reshape(ndims, <NUM_LIT:1>)<EOL>t1 = -numpy.mean(v1, axis=<NUM_LIT:1>)<EOL>M1 = numpy.identity(ndims+<NUM_LIT:1>)<EOL>M1[:ndims, ndims] = t1<EOL>v1 += t1.reshape(ndims, <NUM_LIT:1>)<EOL>if shear:<EOL><INDENT>A = numpy.concatenate((v0, v1), axis=<NUM_LIT:0>)<EOL>u, s, vh = numpy.linalg.svd(A.T)<EOL>vh = vh[:ndims].T<EOL>B = vh[:ndims]<EOL>C = vh[ndims:<NUM_LIT:2>*ndims]<EOL>t = numpy.dot(C, numpy.linalg.pinv(B))<EOL>t = numpy.concatenate((t, numpy.zeros((ndims, <NUM_LIT:1>))), axis=<NUM_LIT:1>)<EOL>M = numpy.vstack((t, ((<NUM_LIT:0.0>,)*ndims) + (<NUM_LIT:1.0>,)))<EOL><DEDENT>elif usesvd or ndims != <NUM_LIT:3>:<EOL><INDENT>u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))<EOL>R = numpy.dot(u, vh)<EOL>if numpy.linalg.det(R) < <NUM_LIT:0.0>:<EOL><INDENT>R -= numpy.outer(u[:, ndims-<NUM_LIT:1>], vh[ndims-<NUM_LIT:1>, :]*<NUM_LIT>)<EOL>s[-<NUM_LIT:1>] *= -<NUM_LIT:1.0><EOL><DEDENT>M = numpy.identity(ndims+<NUM_LIT:1>)<EOL>M[:ndims, :ndims] = R<EOL><DEDENT>else:<EOL><INDENT>xx, yy, zz = numpy.sum(v0 * v1, axis=<NUM_LIT:1>)<EOL>xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -<NUM_LIT:1>, axis=<NUM_LIT:0>), axis=<NUM_LIT:1>)<EOL>xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -<NUM_LIT:2>, axis=<NUM_LIT:0>), axis=<NUM_LIT:1>)<EOL>N = [[xx+yy+zz, <NUM_LIT:0.0>,      <NUM_LIT:0.0>,      <NUM_LIT:0.0>],<EOL>[yz-zy,    xx-yy-zz, <NUM_LIT:0.0>,      <NUM_LIT:0.0>],<EOL>[zx-xz,    xy+yx,    yy-xx-zz, <NUM_LIT:0.0>],<EOL>[xy-yx,    zx+xz,    yz+zy,    zz-xx-yy]]<EOL>w, V = numpy.linalg.eigh(N)<EOL>q = V[:, numpy.argmax(w)]<EOL>q /= vector_norm(q)  <EOL>M = quaternion_matrix(q)<EOL><DEDENT>if scale and not shear:<EOL><INDENT>v0 *= v0<EOL>v1 *= v1<EOL>M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))<EOL><DEDENT>M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))<EOL>M /= M[ndims, ndims]<EOL>return M<EOL>", "docstring": "Return affine transform matrix to register two point sets.\n\n    v0 and v1 are shape (ndims, *) arrays of at least ndims non-homogeneous\n    coordinates, where ndims is the dimensionality of the coordinate space.\n\n    If shear is False, a similarity transformation matrix is returned.\n    If also scale is False, a rigid/Euclidean transformation matrix\n    is returned.\n\n    By default the algorithm by Hartley and Zissermann [15] is used.\n    If usesvd is True, similarity and Euclidean transformation matrices\n    are calculated by minimizing the weighted sum of squared deviations\n    (RMSD) according to the algorithm by Kabsch [8].\n    Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]\n    is used, which is slower when using this Python implementation.\n\n    The returned matrix performs rotation, translation and uniform scaling\n    (if specified).\n\n    >>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]\n    >>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]\n    >>> affine_matrix_from_points(v0, v1)\n    array([[   0.14549,    0.00062,  675.50008],\n           [   0.00048,    0.14094,   53.24971],\n           [   0.     ,    0.     ,    1.     ]])\n    >>> T = translation_matrix(numpy.random.random(3)-0.5)\n    >>> R = random_rotation_matrix(numpy.random.random(3))\n    >>> S = scale_matrix(random.random())\n    >>> M = concatenate_matrices(T, R, S)\n    >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20\n    >>> v0[3] = 1\n    >>> v1 = numpy.dot(M, v0)\n    >>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)\n    >>> M = affine_matrix_from_points(v0[:3], v1[:3])\n    >>> numpy.allclose(v1, numpy.dot(M, v0))\n    True\n\n    More examples in superimposition_matrix()", "id": "f15133:m17"}
{"signature": "def place(self, center, radius):", "body": "self._radius = float(radius)<EOL>self._center[<NUM_LIT:0>] = center[<NUM_LIT:0>]<EOL>self._center[<NUM_LIT:1>] = center[<NUM_LIT:1>]<EOL>", "docstring": "Place Arcball, e.g. when window size changes.\n\n        center : sequence[2]\n            Window coordinates of trackball center.\n        radius : float\n            Radius of trackball in window coordinates.", "id": "f15133:c0:m1"}
{"signature": "def arcball_constrain_to_axis(point, axis):", "body": "v = numpy.array(point, dtype=numpy.float64, copy=True)<EOL>a = numpy.array(axis, dtype=numpy.float64, copy=True)<EOL>v -= a * numpy.dot(a, v)  <EOL>n = vector_norm(v)<EOL>if n > _EPS:<EOL><INDENT>if v[<NUM_LIT:2>] < <NUM_LIT:0.0>:<EOL><INDENT>numpy.negative(v, v)<EOL><DEDENT>v /= n<EOL>return v<EOL><DEDENT>if a[<NUM_LIT:2>] == <NUM_LIT:1.0>:<EOL><INDENT>return numpy.array([<NUM_LIT:1.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>])<EOL><DEDENT>return unit_vector([-a[<NUM_LIT:1>], a[<NUM_LIT:0>], <NUM_LIT:0.0>])<EOL>", "docstring": "Return sphere point perpendicular to axis.", "id": "f15133:m35"}
{"signature": "def arcball_map_to_sphere(point, center, radius):", "body": "v0 = (point[<NUM_LIT:0>] - center[<NUM_LIT:0>]) / radius<EOL>v1 = (center[<NUM_LIT:1>] - point[<NUM_LIT:1>]) / radius<EOL>n = v0*v0 + v1*v1<EOL>if n > <NUM_LIT:1.0>:<EOL><INDENT>n = math.sqrt(n)<EOL>return numpy.array([v0/n, v1/n, <NUM_LIT:0.0>])<EOL><DEDENT>else:<EOL><INDENT>return numpy.array([v0, v1, math.sqrt(<NUM_LIT:1.0> - n)])<EOL><DEDENT>", "docstring": "Return unit sphere coordinates from window coordinates.", "id": "f15133:m34"}
{"signature": "def inverse_matrix(matrix):", "body": "return numpy.linalg.inv(matrix)<EOL>", "docstring": "Return inverse of square transformation matrix.\n\n    >>> M0 = random_rotation_matrix()\n    >>> M1 = inverse_matrix(M0.T)\n    >>> numpy.allclose(M1, numpy.linalg.inv(M0.T))\n    True\n    >>> for size in range(1, 7):\n    ...     M0 = numpy.random.rand(size, size)\n    ...     M1 = inverse_matrix(M0)\n    ...     if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)", "id": "f15133:m42"}
{"signature": "def next(self, acceleration=<NUM_LIT:0.0>):", "body": "q = quaternion_slerp(self._qpre, self._qnow, <NUM_LIT>+acceleration, False)<EOL>self._qpre, self._qnow = self._qnow, q<EOL>", "docstring": "Continue rotation in direction of last drag.", "id": "f15133:c0:m7"}
{"signature": "def concatenate_matrices(*matrices):", "body": "M = numpy.identity(<NUM_LIT:4>)<EOL>for i in matrices:<EOL><INDENT>M = numpy.dot(M, i)<EOL><DEDENT>return M<EOL>", "docstring": "Return concatenation of series of transformation matrices.\n\n    >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5\n    >>> numpy.allclose(M, concatenate_matrices(M))\n    True\n    >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))\n    True", "id": "f15133:m43"}
{"signature": "def vector_product(v0, v1, axis=<NUM_LIT:0>):", "body": "return numpy.cross(v0, v1, axis=axis)<EOL>", "docstring": "Return vector perpendicular to vectors.\n\n    >>> v = vector_product([2, 0, 0], [0, 3, 0])\n    >>> numpy.allclose(v, [0, 0, 6])\n    True\n    >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]\n    >>> v1 = [[3], [0], [0]]\n    >>> v = vector_product(v0, v1)\n    >>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])\n    True\n    >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]\n    >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]\n    >>> v = vector_product(v0, v1, axis=1)\n    >>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])\n    True", "id": "f15133:m40"}
{"signature": "def decompose_matrix(matrix):", "body": "M = numpy.array(matrix, dtype=numpy.float64, copy=True).T<EOL>if abs(M[<NUM_LIT:3>, <NUM_LIT:3>]) < _EPS:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>M /= M[<NUM_LIT:3>, <NUM_LIT:3>]<EOL>P = M.copy()<EOL>P[:, <NUM_LIT:3>] = <NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:1.0><EOL>if not numpy.linalg.det(P):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>scale = numpy.zeros((<NUM_LIT:3>, ))<EOL>shear = [<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>]<EOL>angles = [<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>]<EOL>if any(abs(M[:<NUM_LIT:3>, <NUM_LIT:3>]) > _EPS):<EOL><INDENT>perspective = numpy.dot(M[:, <NUM_LIT:3>], numpy.linalg.inv(P.T))<EOL>M[:, <NUM_LIT:3>] = <NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:1.0><EOL><DEDENT>else:<EOL><INDENT>perspective = numpy.array([<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:1.0>])<EOL><DEDENT>translate = M[<NUM_LIT:3>, :<NUM_LIT:3>].copy()<EOL>M[<NUM_LIT:3>, :<NUM_LIT:3>] = <NUM_LIT:0.0><EOL>row = M[:<NUM_LIT:3>, :<NUM_LIT:3>].copy()<EOL>scale[<NUM_LIT:0>] = vector_norm(row[<NUM_LIT:0>])<EOL>row[<NUM_LIT:0>] /= scale[<NUM_LIT:0>]<EOL>shear[<NUM_LIT:0>] = numpy.dot(row[<NUM_LIT:0>], row[<NUM_LIT:1>])<EOL>row[<NUM_LIT:1>] -= row[<NUM_LIT:0>] * shear[<NUM_LIT:0>]<EOL>scale[<NUM_LIT:1>] = vector_norm(row[<NUM_LIT:1>])<EOL>row[<NUM_LIT:1>] /= scale[<NUM_LIT:1>]<EOL>shear[<NUM_LIT:0>] /= scale[<NUM_LIT:1>]<EOL>shear[<NUM_LIT:1>] = numpy.dot(row[<NUM_LIT:0>], row[<NUM_LIT:2>])<EOL>row[<NUM_LIT:2>] -= row[<NUM_LIT:0>] * shear[<NUM_LIT:1>]<EOL>shear[<NUM_LIT:2>] = numpy.dot(row[<NUM_LIT:1>], row[<NUM_LIT:2>])<EOL>row[<NUM_LIT:2>] -= row[<NUM_LIT:1>] * shear[<NUM_LIT:2>]<EOL>scale[<NUM_LIT:2>] = vector_norm(row[<NUM_LIT:2>])<EOL>row[<NUM_LIT:2>] /= scale[<NUM_LIT:2>]<EOL>shear[<NUM_LIT:1>:] /= scale[<NUM_LIT:2>]<EOL>if numpy.dot(row[<NUM_LIT:0>], numpy.cross(row[<NUM_LIT:1>], row[<NUM_LIT:2>])) < <NUM_LIT:0>:<EOL><INDENT>numpy.negative(scale, scale)<EOL>numpy.negative(row, row)<EOL><DEDENT>angles[<NUM_LIT:1>] = math.asin(-row[<NUM_LIT:0>, <NUM_LIT:2>])<EOL>if math.cos(angles[<NUM_LIT:1>]):<EOL><INDENT>angles[<NUM_LIT:0>] = math.atan2(row[<NUM_LIT:1>, <NUM_LIT:2>], row[<NUM_LIT:2>, <NUM_LIT:2>])<EOL>angles[<NUM_LIT:2>] = math.atan2(row[<NUM_LIT:0>, <NUM_LIT:1>], row[<NUM_LIT:0>, <NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>angles[<NUM_LIT:0>] = math.atan2(-row[<NUM_LIT:2>, <NUM_LIT:1>], row[<NUM_LIT:1>, <NUM_LIT:1>])<EOL>angles[<NUM_LIT:2>] = <NUM_LIT:0.0><EOL><DEDENT>return scale, shear, angles, translate, perspective<EOL>", "docstring": "Return sequence of transformations from transformation matrix.\n\n    matrix : array_like\n        Non-degenerative homogeneous transformation matrix\n\n    Return tuple of:\n        scale : vector of 3 scaling factors\n        shear : list of shear factors for x-y, x-z, y-z axes\n        angles : list of Euler angles about static x, y, z axes\n        translate : translation vector along x, y, z axes\n        perspective : perspective partition of matrix\n\n    Raise ValueError if matrix is of wrong type or degenerative.\n\n    >>> T0 = translation_matrix([1, 2, 3])\n    >>> scale, shear, angles, trans, persp = decompose_matrix(T0)\n    >>> T1 = translation_matrix(trans)\n    >>> numpy.allclose(T0, T1)\n    True\n    >>> S = scale_matrix(0.123)\n    >>> scale, shear, angles, trans, persp = decompose_matrix(S)\n    >>> scale[0]\n    0.123\n    >>> R0 = euler_matrix(1, 2, 3)\n    >>> scale, shear, angles, trans, persp = decompose_matrix(R0)\n    >>> R1 = euler_matrix(*angles)\n    >>> numpy.allclose(R0, R1)\n    True", "id": "f15133:m14"}
{"signature": "def compose_matrix(scale=None, shear=None, angles=None, translate=None,<EOL>perspective=None):", "body": "M = numpy.identity(<NUM_LIT:4>)<EOL>if perspective is not None:<EOL><INDENT>P = numpy.identity(<NUM_LIT:4>)<EOL>P[<NUM_LIT:3>, :] = perspective[:<NUM_LIT:4>]<EOL>M = numpy.dot(M, P)<EOL><DEDENT>if translate is not None:<EOL><INDENT>T = numpy.identity(<NUM_LIT:4>)<EOL>T[:<NUM_LIT:3>, <NUM_LIT:3>] = translate[:<NUM_LIT:3>]<EOL>M = numpy.dot(M, T)<EOL><DEDENT>if angles is not None:<EOL><INDENT>R = euler_matrix(angles[<NUM_LIT:0>], angles[<NUM_LIT:1>], angles[<NUM_LIT:2>], '<STR_LIT>')<EOL>M = numpy.dot(M, R)<EOL><DEDENT>if shear is not None:<EOL><INDENT>Z = numpy.identity(<NUM_LIT:4>)<EOL>Z[<NUM_LIT:1>, <NUM_LIT:2>] = shear[<NUM_LIT:2>]<EOL>Z[<NUM_LIT:0>, <NUM_LIT:2>] = shear[<NUM_LIT:1>]<EOL>Z[<NUM_LIT:0>, <NUM_LIT:1>] = shear[<NUM_LIT:0>]<EOL>M = numpy.dot(M, Z)<EOL><DEDENT>if scale is not None:<EOL><INDENT>S = numpy.identity(<NUM_LIT:4>)<EOL>S[<NUM_LIT:0>, <NUM_LIT:0>] = scale[<NUM_LIT:0>]<EOL>S[<NUM_LIT:1>, <NUM_LIT:1>] = scale[<NUM_LIT:1>]<EOL>S[<NUM_LIT:2>, <NUM_LIT:2>] = scale[<NUM_LIT:2>]<EOL>M = numpy.dot(M, S)<EOL><DEDENT>M /= M[<NUM_LIT:3>, <NUM_LIT:3>]<EOL>return M<EOL>", "docstring": "Return transformation matrix from sequence of transformations.\n\n    This is the inverse of the decompose_matrix function.\n\n    Sequence of transformations:\n        scale : vector of 3 scaling factors\n        shear : list of shear factors for x-y, x-z, y-z axes\n        angles : list of Euler angles about static x, y, z axes\n        translate : translation vector along x, y, z axes\n        perspective : perspective partition of matrix\n\n    >>> scale = numpy.random.random(3) - 0.5\n    >>> shear = numpy.random.random(3) - 0.5\n    >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)\n    >>> trans = numpy.random.random(3) - 0.5\n    >>> persp = numpy.random.random(4) - 0.5\n    >>> M0 = compose_matrix(scale, shear, angles, trans, persp)\n    >>> result = decompose_matrix(M0)\n    >>> M1 = compose_matrix(*result)\n    >>> is_same_transform(M0, M1)\n    True", "id": "f15133:m15"}
{"signature": "def superimposition_matrix(v0, v1, scale=False, usesvd=True):", "body": "v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:<NUM_LIT:3>]<EOL>v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:<NUM_LIT:3>]<EOL>return affine_matrix_from_points(v0, v1, shear=False,<EOL>scale=scale, usesvd=usesvd)<EOL>", "docstring": "Return matrix to transform given 3D point set into second point set.\n\n    v0 and v1 are shape (3, *) or (4, *) arrays of at least 3 points.\n\n    The parameters scale and usesvd are explained in the more general\n    affine_matrix_from_points function.\n\n    The returned matrix is a similarity or Euclidean transformation matrix.\n    This function has a fast C implementation in transformations.c.\n\n    >>> v0 = numpy.random.rand(3, 10)\n    >>> M = superimposition_matrix(v0, v0)\n    >>> numpy.allclose(M, numpy.identity(4))\n    True\n    >>> R = random_rotation_matrix(numpy.random.random(3))\n    >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]\n    >>> v1 = numpy.dot(R, v0)\n    >>> M = superimposition_matrix(v0, v1)\n    >>> numpy.allclose(v1, numpy.dot(M, v0))\n    True\n    >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20\n    >>> v0[3] = 1\n    >>> v1 = numpy.dot(R, v0)\n    >>> M = superimposition_matrix(v0, v1)\n    >>> numpy.allclose(v1, numpy.dot(M, v0))\n    True\n    >>> S = scale_matrix(random.random())\n    >>> T = translation_matrix(numpy.random.random(3)-0.5)\n    >>> M = concatenate_matrices(T, R, S)\n    >>> v1 = numpy.dot(M, v0)\n    >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)\n    >>> M = superimposition_matrix(v0, v1, scale=True)\n    >>> numpy.allclose(v1, numpy.dot(M, v0))\n    True\n    >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)\n    >>> numpy.allclose(v1, numpy.dot(M, v0))\n    True\n    >>> v = numpy.empty((4, 100, 3))\n    >>> v[:, :, 0] = v0\n    >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)\n    >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))\n    True", "id": "f15133:m18"}
{"signature": "def dump_pathlib_mate_path(self, obj, class_name=pathlib_mate_path_class_name):", "body": "return {\"<STR_LIT:$>\" + class_name: str(obj)}<EOL>", "docstring": "``pathlib_mate.PathCLs`` dumper.", "id": "f15148:c4:m2"}
{"signature": "def dump_nparray(self, obj, class_name=numpy_ndarray_class_name):", "body": "return {\"<STR_LIT:$>\" + class_name: self._json_convert(obj.tolist())}<EOL>", "docstring": "``numpy.ndarray`` dumper.", "id": "f15148:c3:m0"}
{"signature": "def load_nparray(self, dct, class_name=numpy_ndarray_class_name):", "body": "return np.array(dct[\"<STR_LIT:$>\" + class_name])<EOL>", "docstring": "``numpy.ndarray`` loader.", "id": "f15148:c3:m1"}
{"signature": "def load_datetime(self, dct, class_name=\"<STR_LIT>\"):", "body": "return parse(dct[\"<STR_LIT:$>\" + class_name])<EOL>", "docstring": "``datetime.datetime`` loader.", "id": "f15148:c2:m3"}
{"signature": "def dump_OrderedDict(self, obj, class_name=\"<STR_LIT>\"):", "body": "return {<EOL>\"<STR_LIT:$>\" + class_name: [<EOL>(key, self._json_convert(value)) for key, value in iteritems(obj)<EOL>]<EOL>}<EOL>", "docstring": "``collections.OrderedDict`` dumper.", "id": "f15148:c2:m10"}
{"signature": "def load_date(self, dct, class_name=\"<STR_LIT>\"):", "body": "return datetime.strptime(dct[\"<STR_LIT:$>\" + class_name], \"<STR_LIT>\").date()<EOL>", "docstring": "``datetime.date`` loader.", "id": "f15148:c2:m5"}
{"signature": "def dump_date(self, obj, class_name=\"<STR_LIT>\"):", "body": "return {\"<STR_LIT:$>\" + class_name: str(obj)}<EOL>", "docstring": "``datetime.date`` dumper.", "id": "f15148:c2:m4"}
{"signature": "def is_loader_method(func):", "body": "if getfullargspec(func).args == [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Test if it is a loader method.", "id": "f15148:m2"}
{"signature": "def load_pathlib_path(self, dct, class_name=pathlib_path_class_name):", "body": "return Path(dct[\"<STR_LIT:$>\" + class_name])<EOL>", "docstring": "``pathlib.Path`` or ``pathlib2.Path`` loader.", "id": "f15148:c4:m1"}
{"signature": "def strip_comments(string, comment_symbols=frozenset(('<STR_LIT:#>', '<STR_LIT>'))):", "body": "lines = string.splitlines()<EOL>for k in range(len(lines)):<EOL><INDENT>for symbol in comment_symbols:<EOL><INDENT>lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol)<EOL><DEDENT><DEDENT>return '<STR_LIT:\\n>'.join(lines)<EOL>", "docstring": "Strip comments from json string.\n\n    :param string: A string containing json with comments started by comment_symbols.\n    :param comment_symbols: Iterable of symbols that start a line comment (default # or //).\n    :return: The string with the comments removed.", "id": "f15150:m1"}
{"signature": "def get_code(self, fullname):", "body": "self.__get_module(fullname)  <EOL>return None<EOL>", "docstring": "Return None\n\n        Required, if is_package is implemented", "id": "f15153:c4:m7"}
{"signature": "def remove_move(name):", "body": "try:<EOL><INDENT>delattr(_MovedItems, name)<EOL><DEDENT>except AttributeError:<EOL><INDENT>try:<EOL><INDENT>del moves.__dict__[name]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise AttributeError(\"<STR_LIT>\" % (name,))<EOL><DEDENT><DEDENT>", "docstring": "Remove item from six.moves.", "id": "f15153:m3"}
{"signature": "def is_package(self, fullname):", "body": "return hasattr(self.__get_module(fullname), \"<STR_LIT>\")<EOL>", "docstring": "Return true, if the named module is a package.\n\nWe need this method to get correct spec objects with\nPython 3.4 (see PEP451)", "id": "f15153:c4:m6"}
{"signature": "def python_2_unicode_compatible(klass):", "body": "if PY2:<EOL><INDENT>if '<STR_LIT>' not in klass.__dict__:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %<EOL>klass.__name__)<EOL><DEDENT>klass.__unicode__ = klass.__str__<EOL>klass.__str__ = lambda self: self.__unicode__().encode('<STR_LIT:utf-8>')<EOL><DEDENT>return klass<EOL>", "docstring": "A decorator that defines __unicode__ and __str__ methods under Python 2.\nUnder Python 3 it does nothing.\n\nTo support Python 2 and 3 with a single code base, define a __str__ method\nreturning text and apply this decorator to the class.", "id": "f15153:m9"}
{"signature": "def add_move(move):", "body": "setattr(_MovedItems, move.name, move)<EOL>", "docstring": "Add an item to six.moves.", "id": "f15153:m2"}
{"signature": "def add_metaclass(metaclass):", "body": "def wrapper(cls):<EOL><INDENT>orig_vars = cls.__dict__.copy()<EOL>slots = orig_vars.get('<STR_LIT>')<EOL>if slots is not None:<EOL><INDENT>if isinstance(slots, str):<EOL><INDENT>slots = [slots]<EOL><DEDENT>for slots_var in slots:<EOL><INDENT>orig_vars.pop(slots_var)<EOL><DEDENT><DEDENT>orig_vars.pop('<STR_LIT>', None)<EOL>orig_vars.pop('<STR_LIT>', None)<EOL>return metaclass(cls.__name__, cls.__bases__, orig_vars)<EOL><DEDENT>return wrapper<EOL>", "docstring": "Class decorator for creating a class with a metaclass.", "id": "f15153:m8"}
{"signature": "def with_metaclass(meta, *bases):", "body": "<EOL>class metaclass(meta):<EOL><INDENT>def __new__(cls, name, this_bases, d):<EOL><INDENT>return meta(name, bases, d)<EOL><DEDENT><DEDENT>return type.__new__(metaclass, '<STR_LIT>', (), {})<EOL>", "docstring": "Create a base class with a metaclass.", "id": "f15153:m7"}
{"signature": "def atomic_write(path, writer_cls=AtomicWriter, **cls_kwargs):", "body": "return writer_cls(path, **cls_kwargs).open()<EOL>", "docstring": "Simple atomic writes. This wraps :py:class:`AtomicWriter`::\n\n    with atomic_write(path) as f:\n        f.write(...)\n\n:param path: The target path to write to.\n:param writer_cls: The writer class to use. This parameter is useful if you\n    subclassed :py:class:`AtomicWriter` to change some behavior and want to\n    use that new subclass.\n\nAdditional keyword arguments are passed to the writer class. See\n:py:class:`AtomicWriter`.", "id": "f15154:m3"}
{"signature": "def replace_atomic(src, dst):", "body": "return _replace_atomic(src, dst)<EOL>", "docstring": "Move ``src`` to ``dst``. If ``dst`` exists, it will be silently\noverwritten.\n\nBoth paths must reside on the same filesystem for the operation to be\natomic.", "id": "f15154:m1"}
{"signature": "def rollback(self, f):", "body": "os.unlink(f.name)<EOL>", "docstring": "Clean up all temporary resources.", "id": "f15154:c0:m6"}
{"signature": "def sync(self, f):", "body": "f.flush()<EOL>_proper_fsync(f.fileno())<EOL>", "docstring": "responsible for clearing as many file caches as possible before\n        commit", "id": "f15154:c0:m4"}
{"signature": "def commit(self, f):", "body": "if self._overwrite:<EOL><INDENT>replace_atomic(f.name, self._path)<EOL><DEDENT>else:<EOL><INDENT>move_atomic(f.name, self._path)<EOL><DEDENT>", "docstring": "Move the temporary file to the target location.", "id": "f15154:c0:m5"}
{"signature": "def get_fileobject(self, dir=None, **kwargs):", "body": "if dir is None:<EOL><INDENT>dir = os.path.normpath(os.path.dirname(self._path))<EOL><DEDENT>descriptor, name = tempfile.mkstemp(dir=dir)<EOL>os.close(descriptor)<EOL>kwargs['<STR_LIT>'] = self._mode<EOL>kwargs['<STR_LIT:file>'] = name<EOL>return io.open(**kwargs)<EOL>", "docstring": "Return the temporary file to use.", "id": "f15154:c0:m3"}
{"signature": "def open(self):", "body": "return self._open(self.get_fileobject)<EOL>", "docstring": "Open the temporary file.", "id": "f15154:c0:m1"}
{"signature": "def _compress_bytes(b, level):", "body": "return zlib.compress(b, level)<EOL>", "docstring": "Compress bytes to bytes.", "id": "f15155:m2"}
{"signature": "def _compress_obj(obj, level):", "body": "return zlib.compress(pickle.dumps(obj, protocol=<NUM_LIT:2>), level)<EOL>", "docstring": "Compress object to bytes.", "id": "f15155:m0"}
{"signature": "def count(self):", "body": "if self._len is None:<EOL><INDENT>for x in self:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return self._len<EOL>", "docstring": "Returns the number of recurrences in this set. It will have go\n            trough the whole recurrence, if this hasn't been done before.", "id": "f15156:c1:m6"}
{"signature": "def between(self, after, before, inc=False, count=<NUM_LIT:1>):", "body": "if self._cache_complete:<EOL><INDENT>gen = self._cache<EOL><DEDENT>else:<EOL><INDENT>gen = self<EOL><DEDENT>started = False<EOL>l = []<EOL>if inc:<EOL><INDENT>for i in gen:<EOL><INDENT>if i > before:<EOL><INDENT>break<EOL><DEDENT>elif not started:<EOL><INDENT>if i >= after:<EOL><INDENT>started = True<EOL>l.append(i)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>l.append(i)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for i in gen:<EOL><INDENT>if i >= before:<EOL><INDENT>break<EOL><DEDENT>elif not started:<EOL><INDENT>if i > after:<EOL><INDENT>started = True<EOL>l.append(i)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>l.append(i)<EOL><DEDENT><DEDENT><DEDENT>return l<EOL>", "docstring": "Returns all the occurrences of the rrule between after and before.\n        The inc keyword defines what happens if after and/or before are\n        themselves occurrences. With inc=True, they will be included in the\n        list, if they are found in the recurrence set.", "id": "f15156:c1:m10"}
{"signature": "def after(self, dt, inc=False):", "body": "if self._cache_complete:<EOL><INDENT>gen = self._cache<EOL><DEDENT>else:<EOL><INDENT>gen = self<EOL><DEDENT>if inc:<EOL><INDENT>for i in gen:<EOL><INDENT>if i >= dt:<EOL><INDENT>return i<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for i in gen:<EOL><INDENT>if i > dt:<EOL><INDENT>return i<EOL><DEDENT><DEDENT><DEDENT>return None<EOL>", "docstring": "Returns the first recurrence after the given datetime instance. The\n            inc keyword defines what happens if dt is an occurrence. With\n            inc=True, if dt itself is an occurrence, it will be returned.", "id": "f15156:c1:m8"}
{"signature": "@_invalidates_cache<EOL><INDENT>def rrule(self, rrule):<DEDENT>", "body": "self._rrule.append(rrule)<EOL>", "docstring": "Include the given :py:class:`rrule` instance in the recurrence set\n            generation.", "id": "f15156:c4:m1"}
{"signature": "def xafter(self, dt, count=None, inc=False):", "body": "if self._cache_complete:<EOL><INDENT>gen = self._cache<EOL><DEDENT>else:<EOL><INDENT>gen = self<EOL><DEDENT>if inc:<EOL><INDENT>def comp(dc, dtc): return dc >= dtc<EOL><DEDENT>else:<EOL><INDENT>def comp(dc, dtc): return dc > dtc<EOL><DEDENT>n = <NUM_LIT:0><EOL>for d in gen:<EOL><INDENT>if comp(d, dt):<EOL><INDENT>if count is not None:<EOL><INDENT>n += <NUM_LIT:1><EOL>if n > count:<EOL><INDENT>break<EOL><DEDENT><DEDENT>yield d<EOL><DEDENT><DEDENT>", "docstring": "Generator which yields up to `count` recurrences after the given\ndatetime instance, equivalent to `after`.\n\n:param dt:\n    The datetime at which to start generating recurrences.\n\n:param count:\n    The maximum number of recurrences to generate. If `None` (default),\n    dates are generated until the recurrence rule is exhausted.\n\n:param inc:\n    If `dt` is an instance of the rule and `inc` is `True`, it is\n    included in the output.\n\n:yields: Yields a sequence of `datetime` objects.", "id": "f15156:c1:m9"}
{"signature": "def __str__(self):", "body": "output = []<EOL>h, m, s = [None] * <NUM_LIT:3><EOL>if self._dtstart:<EOL><INDENT>output.append(self._dtstart.strftime('<STR_LIT>'))<EOL>h, m, s = self._dtstart.timetuple()[<NUM_LIT:3>:<NUM_LIT:6>]<EOL><DEDENT>parts = ['<STR_LIT>' + FREQNAMES[self._freq]]<EOL>if self._interval != <NUM_LIT:1>:<EOL><INDENT>parts.append('<STR_LIT>' + str(self._interval))<EOL><DEDENT>if self._wkst:<EOL><INDENT>parts.append('<STR_LIT>' + repr(weekday(self._wkst))[<NUM_LIT:0>:<NUM_LIT:2>])<EOL><DEDENT>if self._count is not None:<EOL><INDENT>parts.append('<STR_LIT>' + str(self._count))<EOL><DEDENT>if self._until:<EOL><INDENT>parts.append(self._until.strftime('<STR_LIT>'))<EOL><DEDENT>if self._original_rule.get('<STR_LIT>') is not None:<EOL><INDENT>original_rule = dict(self._original_rule)<EOL>wday_strings = []<EOL>for wday in original_rule['<STR_LIT>']:<EOL><INDENT>if wday.n:<EOL><INDENT>wday_strings.append('<STR_LIT>'.format(<EOL>n=wday.n,<EOL>wday=repr(wday)[<NUM_LIT:0>:<NUM_LIT:2>]))<EOL><DEDENT>else:<EOL><INDENT>wday_strings.append(repr(wday))<EOL><DEDENT><DEDENT>original_rule['<STR_LIT>'] = wday_strings<EOL><DEDENT>else:<EOL><INDENT>original_rule = self._original_rule<EOL><DEDENT>partfmt = '<STR_LIT>'<EOL>for name, key in [('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>')]:<EOL><INDENT>value = original_rule.get(key)<EOL>if value:<EOL><INDENT>parts.append(partfmt.format(name=name, vals=('<STR_LIT:U+002C>'.join(str(v)<EOL>for v in value))))<EOL><DEDENT><DEDENT>output.append('<STR_LIT:;>'.join(parts))<EOL>return '<STR_LIT:\\n>'.join(output)<EOL>", "docstring": "Output a string that would generate this RRULE if passed to rrulestr.\nThis is mostly compatible with RFC2445, except for the\ndateutil-specific extension BYEASTER.", "id": "f15156:c2:m1"}
{"signature": "@classmethod<EOL><INDENT>def isspace(cls, nextchar):<DEDENT>", "body": "return nextchar.isspace()<EOL>", "docstring": "Whether the next character is whitespace", "id": "f15158:c0:m8"}
{"signature": "@classmethod<EOL><INDENT>def isword(cls, nextchar):<DEDENT>", "body": "return nextchar.isalpha()<EOL>", "docstring": "Whether or not the next character is part of a word", "id": "f15158:c0:m6"}
{"signature": "def is_ambiguous(self, dt):", "body": "dt = dt.replace(tzinfo=self)<EOL>wall_0 = enfold(dt, fold=<NUM_LIT:0>)<EOL>wall_1 = enfold(dt, fold=<NUM_LIT:1>)<EOL>same_offset = wall_0.utcoffset() == wall_1.utcoffset()<EOL>same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)<EOL>return same_dt and not same_offset<EOL>", "docstring": "Whether or not the \"wall time\" of a given datetime is ambiguous in this\nzone.\n\n:param dt:\n    A :py:class:`datetime.datetime`, naive or time zone aware.\n\n\n:return:\n    Returns ``True`` if ambiguous, ``False`` otherwise.\n\n.. versionadded:: 2.6.0", "id": "f15161:c0:m0"}
{"signature": "def _fold_status(self, dt_utc, dt_wall):", "body": "if self.is_ambiguous(dt_wall):<EOL><INDENT>delta_wall = dt_wall - dt_utc<EOL>_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))<EOL><DEDENT>else:<EOL><INDENT>_fold = <NUM_LIT:0><EOL><DEDENT>return _fold<EOL>", "docstring": "Determine the fold status of a \"wall\" datetime, given a representation\nof the same datetime as a (naive) UTC datetime. This is calculated based\non the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all\ndatetimes, and that this offset is the actual number of hours separating\n``dt_utc`` and ``dt_wall``.\n\n:param dt_utc:\n    Representation of the datetime as UTC\n\n:param dt_wall:\n    Representation of the datetime as \"wall time\". This parameter must\n    either have a `fold` attribute or have a fold-naive\n    :class:`datetime.tzinfo` attached, otherwise the calculation may\n    fail.", "id": "f15161:c0:m1"}
{"signature": "def load_name(self, offset):", "body": "resource = self.p_wchar()<EOL>lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)<EOL>nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, <NUM_LIT:0>)<EOL>return resource[:nchar]<EOL>", "docstring": "Load a timezone name from a DLL offset (integer).\n\n>>> from dateutil.tzwin import tzres\n>>> tzr = tzres()\n>>> print(tzr.load_name(112))\n'Eastern Standard Time'\n\n:param offset:\n    A positive integer value referring to a string from the tzres dll.\n\n..note:\n    Offsets found in the registry are generally of the form\n    `@tzres.dll,-114`. The offset in this case if 114, not -114.", "id": "f15162:c0:m1"}
{"signature": "def valuestodict(key):", "body": "dout = {}<EOL>size = winreg.QueryInfoKey(key)[<NUM_LIT:1>]<EOL>tz_res = None<EOL>for i in range(size):<EOL><INDENT>key_name, value, dtype = winreg.EnumValue(key, i)<EOL>if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:<EOL><INDENT>if value & (<NUM_LIT:1> << <NUM_LIT>):<EOL><INDENT>value = value - (<NUM_LIT:1> << <NUM_LIT:32>)<EOL><DEDENT><DEDENT>elif dtype == winreg.REG_SZ:<EOL><INDENT>if value.startswith('<STR_LIT>'):<EOL><INDENT>tz_res = tz_res or tzres()<EOL>value = tz_res.name_from_string(value)<EOL><DEDENT>value = value.rstrip('<STR_LIT:\\x00>')    <EOL><DEDENT>dout[key_name] = value<EOL><DEDENT>return dout<EOL>", "docstring": "Convert a registry key's values to a dictionary.", "id": "f15162:m2"}
{"signature": "@staticmethod<EOL><INDENT>def list():<DEDENT>", "body": "with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:<EOL><INDENT>with winreg.OpenKey(handle, TZKEYNAME) as tzkey:<EOL><INDENT>result = [winreg.EnumKey(tzkey, i)<EOL>for i in range(winreg.QueryInfoKey(tzkey)[<NUM_LIT:0>])]<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Return a list of all time zones known to the system.", "id": "f15162:c1:m2"}
{"signature": "def name_from_string(self, tzname_str):", "body": "if not tzname_str.startswith('<STR_LIT:@>'):<EOL><INDENT>return tzname_str<EOL><DEDENT>name_splt = tzname_str.split('<STR_LIT>')<EOL>try:<EOL><INDENT>offset = int(name_splt[<NUM_LIT:1>])<EOL><DEDENT>except:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return self.load_name(offset)<EOL>", "docstring": "Parse strings as returned from the Windows registry into the time zone\nname as defined in the registry.\n\n>>> from dateutil.tzwin import tzres\n>>> tzr = tzres()\n>>> print(tzr.name_from_string('@tzres.dll,-251'))\n'Dateline Daylight Time'\n>>> print(tzr.name_from_string('Eastern Standard Time'))\n'Eastern Standard Time'\n\n:param tzname_str:\n    A timezone name string as returned from a Windows registry key.\n\n:return:\n    Returns the localized timezone string from tzres.dll if the string\n    is of the form `@tzres.dll,-offset`, else returns the input string.", "id": "f15162:c0:m2"}
{"signature": "def is_ambiguous(self, dt):", "body": "return False<EOL>", "docstring": "Whether or not the \"wall time\" of a given datetime is ambiguous in this\nzone.\n\n:param dt:\n    A :py:class:`datetime.datetime`, naive or time zone aware.\n\n\n:return:\n    Returns ``True`` if ambiguous, ``False`` otherwise.\n\n.. versionadded:: 2.6.0", "id": "f15164:c1:m5"}
{"signature": "def transitions(self, year):", "body": "if not self.hasdst:<EOL><INDENT>return None<EOL><DEDENT>base_year = datetime.datetime(year, <NUM_LIT:1>, <NUM_LIT:1>)<EOL>start = base_year + self._start_delta<EOL>end = base_year + self._end_delta<EOL>return (start, end)<EOL>", "docstring": "For a given year, get the DST on and off transition times, expressed\nalways on the standard time side. For zones with no transitions, this\nfunction returns ``None``.\n\n:param year:\n    The year whose transitions you would like to query.\n\n:return:\n    Returns a :class:`tuple` of :class:`datetime.datetime` objects,\n    ``(dston, dstoff)`` for zones with an annual DST transition, or\n    ``None`` for fixed offset zones.", "id": "f15164:c6:m1"}
{"signature": "def is_ambiguous(self, dt):", "body": "naive_dst = self._naive_is_dst(dt)<EOL>return (not naive_dst and<EOL>(naive_dst != self._naive_is_dst(dt - self._dst_saved)))<EOL>", "docstring": "Whether or not the \"wall time\" of a given datetime is ambiguous in this\nzone.\n\n:param dt:\n    A :py:class:`datetime.datetime`, naive or time zone aware.\n\n\n:return:\n    Returns ``True`` if ambiguous, ``False`` otherwise.\n\n.. versionadded:: 2.6.0", "id": "f15164:c2:m4"}
{"signature": "def is_ambiguous(self, dt, idx=None):", "body": "if idx is None:<EOL><INDENT>idx = self._find_last_transition(dt)<EOL><DEDENT>timestamp = _datetime_to_timestamp(dt)<EOL>tti = self._get_ttinfo(idx)<EOL>if idx is None or idx <= <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>od = self._get_ttinfo(idx - <NUM_LIT:1>).offset - tti.offset<EOL>tt = self._trans_list[idx]          <EOL>return timestamp < tt + od<EOL>", "docstring": "Whether or not the \"wall time\" of a given datetime is ambiguous in this\nzone.\n\n:param dt:\n    A :py:class:`datetime.datetime`, naive or time zone aware.\n\n\n:return:\n    Returns ``True`` if ambiguous, ``False`` otherwise.\n\n.. versionadded:: 2.6.0", "id": "f15164:c5:m7"}
{"signature": "def datetime_ambiguous(dt, tz=None):", "body": "if tz is None:<EOL><INDENT>if dt.tzinfo is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>tz = dt.tzinfo<EOL><DEDENT>is_ambiguous_fn = getattr(tz, '<STR_LIT>', None)<EOL>if is_ambiguous_fn is not None:<EOL><INDENT>try:<EOL><INDENT>return tz.is_ambiguous(dt)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>dt = dt.replace(tzinfo=tz)<EOL>wall_0 = enfold(dt, fold=<NUM_LIT:0>)<EOL>wall_1 = enfold(dt, fold=<NUM_LIT:1>)<EOL>same_offset = wall_0.utcoffset() == wall_1.utcoffset()<EOL>same_dst = wall_0.dst() == wall_1.dst()<EOL>return not (same_offset and same_dst)<EOL>", "docstring": "Given a datetime and a time zone, determine whether or not a given datetime\nis ambiguous (i.e if there are two times differentiated only by their DST\nstatus).\n\n:param dt:\n    A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``\n    is provided.)\n\n:param tz:\n    A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If\n    ``None`` or not provided, the datetime's own time zone will be used.\n\n:return:\n    Returns a boolean value whether or not the \"wall time\" is ambiguous in\n    ``tz``.\n\n.. versionadded:: 2.6.0", "id": "f15164:m2"}
{"signature": "def keys(self):", "body": "return list(self._vtz.keys())<EOL>", "docstring": "Retrieves the available time zones as a list.", "id": "f15164:c10:m1"}
{"signature": "def is_ambiguous(self, dt):", "body": "return False<EOL>", "docstring": "Whether or not the \"wall time\" of a given datetime is ambiguous in this\nzone.\n\n:param dt:\n    A :py:class:`datetime.datetime`, naive or time zone aware.\n\n\n:return:\n    Returns ``True`` if ambiguous, ``False`` otherwise.\n\n.. versionadded:: 2.6.0", "id": "f15164:c0:m3"}
{"signature": "def normalized(self):", "body": "<EOL>days = int(self.days)<EOL>hours_f = round(self.hours + <NUM_LIT> * (self.days - days), <NUM_LIT:11>)<EOL>hours = int(hours_f)<EOL>minutes_f = round(self.minutes + <NUM_LIT> * (hours_f - hours), <NUM_LIT:10>)<EOL>minutes = int(minutes_f)<EOL>seconds_f = round(self.seconds + <NUM_LIT> * (minutes_f - minutes), <NUM_LIT:8>)<EOL>seconds = int(seconds_f)<EOL>microseconds = round(self.microseconds + <NUM_LIT> * (seconds_f - seconds))<EOL>return self.__class__(years=self.years, months=self.months,<EOL>days=days, hours=hours, minutes=minutes,<EOL>seconds=seconds, microseconds=microseconds,<EOL>leapdays=self.leapdays, year=self.year,<EOL>month=self.month, day=self.day,<EOL>weekday=self.weekday, hour=self.hour,<EOL>minute=self.minute, second=self.second,<EOL>microsecond=self.microsecond)<EOL>", "docstring": "Return a version of this object represented entirely using integer\nvalues for the relative attributes.\n\n>>> relativedelta(days=1.5, hours=2).normalized()\nrelativedelta(days=1, hours=14)\n\n:return:\n    Returns a :class:`dateutil.relativedelta.relativedelta` object.", "id": "f15166:c0:m5"}
{"signature": "def get(self, name, default=None):", "body": "return self.zones.get(name, default)<EOL>", "docstring": "Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method\nfor retrieving zones from the zone dictionary.\n\n:param name:\n    The name of the zone to retrieve. (Generally IANA zone names)\n\n:param default:\n    The value to return in the event of a missing key.\n\n.. versionadded:: 2.6.0", "id": "f15167:c1:m1"}
{"signature": "def get_zonefile_instance(new_instance=False):", "body": "if new_instance:<EOL><INDENT>zif = None<EOL><DEDENT>else:<EOL><INDENT>zif = getattr(get_zonefile_instance, '<STR_LIT>', None)<EOL><DEDENT>if zif is None:<EOL><INDENT>zif = ZoneInfoFile(getzoneinfofile_stream())<EOL>get_zonefile_instance._cached_instance = zif<EOL><DEDENT>return zif<EOL>", "docstring": "This is a convenience function which provides a :class:`ZoneInfoFile`\ninstance using the data provided by the ``dateutil`` package. By default, it\ncaches a single instance of the ZoneInfoFile object and returns that.\n\n:param new_instance:\n    If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and\n    used as the cached instance for the next call. Otherwise, new instances\n    are created only as necessary.\n\n:return:\n    Returns a :class:`ZoneInfoFile` object.\n\n.. versionadded:: 2.6", "id": "f15167:m1"}
{"signature": "def gettz_db_metadata():", "body": "warnings.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>DeprecationWarning)<EOL>if len(_CLASS_ZONE_INSTANCE) == <NUM_LIT:0>:<EOL><INDENT>_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))<EOL><DEDENT>return _CLASS_ZONE_INSTANCE[<NUM_LIT:0>].metadata<EOL>", "docstring": "Get the zonefile metadata\n\n    See `zonefile_metadata`_\n\n    :returns:\n        A dictionary with the database metadata\n\n    .. deprecated:: 2.6\n        See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,\n        query the attribute ``zoneinfo.ZoneInfoFile.metadata``.", "id": "f15167:m3"}
{"signature": "def gettz(name):", "body": "warnings.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>DeprecationWarning)<EOL>if len(_CLASS_ZONE_INSTANCE) == <NUM_LIT:0>:<EOL><INDENT>_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))<EOL><DEDENT>return _CLASS_ZONE_INSTANCE[<NUM_LIT:0>].zones.get(name)<EOL>", "docstring": "This retrieves a time zone from the local zoneinfo tarball that is packaged\nwith dateutil.\n\n:param name:\n    An IANA-style time zone name, as found in the zoneinfo file.\n\n:return:\n    Returns a :class:`dateutil.tz.tzfile` time zone object.\n\n.. warning::\n    It is generally inadvisable to use this function, and it is only\n    provided for API compatibility with earlier versions. This is *not*\n    equivalent to ``dateutil.tz.gettz()``, which selects an appropriate\n    time zone based on the inputs, favoring system zoneinfo. This is ONLY\n    for accessing the dateutil-specific zoneinfo (which may be out of\n    date compared to the system zoneinfo).\n\n.. deprecated:: 2.6\n    If you need to use a specific zoneinfofile over the system zoneinfo,\n    instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call\n    :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.\n\n    Use :func:`get_zonefile_instance` to retrieve an instance of the\n    dateutil-provided zoneinfo.", "id": "f15167:m2"}
{"signature": "def rebuild(filename, tag=None, format=\"<STR_LIT>\", zonegroups=[], metadata=None):", "body": "tmpdir = tempfile.mkdtemp()<EOL>zonedir = os.path.join(tmpdir, \"<STR_LIT>\")<EOL>moduledir = os.path.dirname(__file__)<EOL>try:<EOL><INDENT>with tar_open(filename) as tf:<EOL><INDENT>for name in zonegroups:<EOL><INDENT>tf.extract(name, tmpdir)<EOL><DEDENT>filepaths = [os.path.join(tmpdir, n) for n in zonegroups]<EOL>try:<EOL><INDENT>check_call([\"<STR_LIT>\", \"<STR_LIT>\", zonedir] + filepaths)<EOL><DEDENT>except OSError as e:<EOL><INDENT>_print_on_nosuchfile(e)<EOL>raise<EOL><DEDENT><DEDENT>with open(os.path.join(zonedir, METADATA_FN), '<STR_LIT:w>') as f:<EOL><INDENT>json.dump(metadata, f, indent=<NUM_LIT:4>, sort_keys=True)<EOL><DEDENT>target = os.path.join(moduledir, ZONEFILENAME)<EOL>with tar_open(target, \"<STR_LIT>\" % format) as tf:<EOL><INDENT>for entry in os.listdir(zonedir):<EOL><INDENT>entrypath = os.path.join(zonedir, entry)<EOL>tf.add(entrypath, entry)<EOL><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>shutil.rmtree(tmpdir)<EOL><DEDENT>", "docstring": "Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*\n\n    filename is the timezone tarball from ftp.iana.org/tz.", "id": "f15168:m0"}
{"signature": "def easter(year, method=EASTER_WESTERN):", "body": "if not (<NUM_LIT:1> <= method <= <NUM_LIT:3>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>y = year<EOL>g = y % <NUM_LIT><EOL>e = <NUM_LIT:0><EOL>if method < <NUM_LIT:3>:<EOL><INDENT>i = (<NUM_LIT> * g + <NUM_LIT:15>) % <NUM_LIT:30><EOL>j = (y + y // <NUM_LIT:4> + i) % <NUM_LIT:7><EOL>if method == <NUM_LIT:2>:<EOL><INDENT>e = <NUM_LIT:10><EOL>if y > <NUM_LIT>:<EOL><INDENT>e = e + y // <NUM_LIT:100> - <NUM_LIT:16> - (y // <NUM_LIT:100> - <NUM_LIT:16>) // <NUM_LIT:4><EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>c = y // <NUM_LIT:100><EOL>h = (c - c // <NUM_LIT:4> - (<NUM_LIT:8> * c + <NUM_LIT>) // <NUM_LIT> + <NUM_LIT> * g + <NUM_LIT:15>) % <NUM_LIT:30><EOL>i = h - (h // <NUM_LIT>) * (<NUM_LIT:1> - (h // <NUM_LIT>) *<EOL>(<NUM_LIT> // (h + <NUM_LIT:1>)) * ((<NUM_LIT> - g) // <NUM_LIT:11>))<EOL>j = (y + y // <NUM_LIT:4> + i + <NUM_LIT:2> - c + c // <NUM_LIT:4>) % <NUM_LIT:7><EOL><DEDENT>p = i - j + e<EOL>d = <NUM_LIT:1> + (p + <NUM_LIT> + (p + <NUM_LIT:6>) // <NUM_LIT>) % <NUM_LIT><EOL>m = <NUM_LIT:3> + (p + <NUM_LIT>) // <NUM_LIT:30><EOL>return datetime.date(int(y), int(m), int(d))<EOL>", "docstring": "This method was ported from the work done by GM Arts,\non top of the algorithm by Claus Tondering, which was\nbased in part on the algorithm of Ouding (1940), as\nquoted in \"Explanatory Supplement to the Astronomical\nAlmanac\", P.  Kenneth Seidelmann, editor.\n\nThis algorithm implements three different easter\ncalculation methods:\n\n1 - Original calculation in Julian calendar, valid in\n    dates after 326 AD\n2 - Original method, with date converted to Gregorian\n    calendar, valid in years 1583 to 4099\n3 - Revised method, in Gregorian calendar, valid in\n    years 1583 to 4099 as well\n\nThese methods are represented by the constants:\n\n* ``EASTER_JULIAN   = 1``\n* ``EASTER_ORTHODOX = 2``\n* ``EASTER_WESTERN  = 3``\n\nThe default method is method 3.\n\nMore about the algorithm may be found at:\n\nhttp://users.chariot.net.au/~gmarts/eastalg.htm\n\nand\n\nhttp://www.tondering.dk/claus/calendar.html", "id": "f15169:m0"}
{"signature": "def prt_console(message, verbose):  ", "body": "if verbose:<EOL><INDENT>logger.info(message)<EOL><DEDENT>", "docstring": "Print message to console, if ``verbose`` is True.", "id": "f15170:m0"}
{"signature": "def initialize_sentry_integration():  ", "body": "<EOL>try:<EOL><INDENT>import sentry_sdk<EOL>from sentry_sdk.integrations.pyramid import PyramidIntegration<EOL>from sentry_sdk.integrations.celery import CeleryIntegration<EOL><DEDENT>except ImportError:<EOL><INDENT>warnings.warn(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>UserWarning,<EOL>)<EOL>return  <EOL><DEDENT>try:<EOL><INDENT>dsn = os.environ['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>warnings.warn(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>UserWarning,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>sentry_sdk.init(<EOL>dsn=dsn,<EOL>integrations=[PyramidIntegration(), CeleryIntegration()],<EOL>)<EOL><DEDENT>", "docstring": "\\\n    Used to optionally initialize the Sentry service with this app.\n    See https://docs.sentry.io/platforms/python/pyramid/", "id": "f15179:m1"}
{"signature": "def expandvars_dict(settings):", "body": "return dict(<EOL>(key, os.path.expandvars(value))<EOL>for key, value in settings.iteritems()<EOL>)<EOL>", "docstring": "Expands all environment variables in a settings dictionary.", "id": "f15179:m0"}
{"signature": "def _insert_control_id(uuid_, cursor):", "body": "cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(uuid_,))<EOL>return cursor.fetchone()[<NUM_LIT:0>]<EOL>", "docstring": "Inserts a UUID value into the ``document_controls`` table.", "id": "f15184:m5"}
{"signature": "def _insert_user_info(model, cursor):", "body": "user_ids = set([])<EOL>for role_attr in cnxepub.ATTRIBUTED_ROLE_KEYS:<EOL><INDENT>for role in model.metadata.get(role_attr, []):<EOL><INDENT>user_ids.add(role['<STR_LIT:id>'])<EOL><DEDENT><DEDENT>cursor.execute(\"<STR_LIT>\",<EOL>(list(user_ids),))<EOL>try:<EOL><INDENT>existing_user_ids = [x[<NUM_LIT:0>] for x in cursor.fetchall()]<EOL><DEDENT>except TypeError:<EOL><INDENT>existing_user_ids = []<EOL><DEDENT>new_user_ids = [u for u in user_ids if u not in existing_user_ids]<EOL>for user_id in new_user_ids:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (user_id,))<EOL><DEDENT>", "docstring": "Insert the user shadow table info.", "id": "f15184:m7"}
{"signature": "def _set_uri(model):", "body": "<EOL>model.set_uri('<STR_LIT>', model.ident_hash)<EOL>", "docstring": "Set the model's cnx-archive-uri to the model's ident_hash.", "id": "f15184:m4"}
{"signature": "def _insert_file(file_bytes, media_type, cursor):", "body": "cursor.execute('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>(Binary(file_bytes), media_type))<EOL>return cursor.fetchone()[<NUM_LIT:0>]<EOL>", "docstring": "Insert a file, with media_type, into the files table. Returns fileid", "id": "f15184:m8"}
{"signature": "def db_connect(method):", "body": "@functools.wraps(method)<EOL>def wrapped(self, *args, **kwargs):<EOL><INDENT>connect = db_connection_factory()<EOL>with connect() as db_connection:<EOL><INDENT>with db_connection.cursor() as cursor:<EOL><INDENT>return method(self, cursor, *args, **kwargs)<EOL><DEDENT><DEDENT><DEDENT>return wrapped<EOL>", "docstring": "Decorator for methods that need to use the database\n\n    Example:\n    @db_connect\n    def setUp(self, cursor):\n        cursor.execute(some_sql)\n        # some other code", "id": "f15186:m3"}
{"signature": "def config_uri():", "body": "config_uri = os.environ.get('<STR_LIT>', None)<EOL>if config_uri is None:<EOL><INDENT>config_uri = os.path.join(here, '<STR_LIT>')<EOL><DEDENT>return config_uri<EOL>", "docstring": "Return the file path of the testing config uri", "id": "f15186:m0"}
{"signature": "def app_login(self, username, password):", "body": "path = '<STR_LIT>'<EOL>data = {<EOL>'<STR_LIT:username>': username,<EOL>'<STR_LIT:password>': password,<EOL>}<EOL>resp = self.app.post(path, data)<EOL>return resp<EOL>", "docstring": "Logins in to the app using (a stub) accounts.", "id": "f15192:c1:m11"}
{"signature": "def make_one(self, binder, content):", "body": "<EOL>metadata = [x.metadata.copy()<EOL>for x in cnxepub.flatten_to_documents(binder)][<NUM_LIT:0>]<EOL>del metadata['<STR_LIT>']<EOL>del metadata['<STR_LIT:version>']<EOL>metadata['<STR_LIT:title>'] = \"<STR_LIT>\"<EOL>publisher = [p['<STR_LIT:id>'] for p in metadata['<STR_LIT>']][<NUM_LIT:0>]<EOL>message = \"<STR_LIT>\"<EOL>composite_doc = cnxepub.CompositeDocument(None, content, metadata)<EOL>return publisher, message, composite_doc<EOL>", "docstring": "Given a binder and content, make a composite document for that\n        binder. Returns publisher, message and CompositeDocument instance.", "id": "f15192:c2:m4"}
{"signature": "def app_post_json_license_acceptance(self, publication_id, uid,<EOL>data, headers=[]):", "body": "path = '<STR_LIT>'.format(publication_id, uid)<EOL>self.app.post_json(path, data, headers=headers)<EOL>", "docstring": "User at ``uid`` accepts the license for publication at\n        ``publication_id either for or against as ``accept``.\n        The ``data`` value is expected to be a python type that\n        this method will marshal to JSON.", "id": "f15192:c1:m5"}
{"signature": "def app_get_license_acceptance(self, publication_id, uid, headers=[]):", "body": "path = '<STR_LIT>'.format(publication_id, uid)<EOL>return self.app.get(path, headers=headers)<EOL>", "docstring": "User at ``uid`` lookups up the HTML page for license acceptance.", "id": "f15192:c1:m4"}
{"signature": "def _get_recipe_ids(module_ident, cursor):", "body": "cursor.execute(\"\"\"<STR_LIT>\"\"\", (module_ident,))<EOL>return cursor.fetchone()<EOL>", "docstring": "Returns a tuple of length 2 of primary and fallback recipe ids.\n\n    The primary will be based on the print_style of the book. It is the first\n    of:\n        1. default recipe currently associated with the print_style of the book\n           being baked (defined by module_ident)\n        2. A CSS file associated with this book that is named the same as the\n           print_style\n        3. A CSS file associated with this book that is named 'ruleset.css'\n\n        The fallback is the recipe used for last successful bake of this book,\n        if different than the primary. Either value or both values may be\n        None", "id": "f15197:m2"}
{"signature": "def make_wsgi_app(global_config, **settings):  ", "body": "from .config import configure<EOL>return configure(settings).make_wsgi_app()<EOL>", "docstring": "Application factory", "id": "f15200:m1"}
{"signature": "def _upsert_persons(cursor, person_ids, lookup_func):", "body": "person_ids = list(set(person_ids))  <EOL>cursor.execute(\"<STR_LIT>\",<EOL>(person_ids,))<EOL>existing_person_ids = [x[<NUM_LIT:0>] for x in cursor.fetchall()]<EOL>new_person_ids = [p for p in person_ids if p not in existing_person_ids]<EOL>for person_id in existing_person_ids:<EOL><INDENT>person_info = lookup_func(person_id)<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", person_info)<EOL><DEDENT>for person_id in new_person_ids:<EOL><INDENT>person_info = lookup_func(person_id)<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", person_info)<EOL><DEDENT>", "docstring": "Upsert's user info into the database.\n    The model contains the user info as part of the role values.", "id": "f15202:m38"}
{"signature": "def check_publication_state(publication_id):", "body": "with db_connect() as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (publication_id,))<EOL>publication_state, publication_messages = cursor.fetchone()<EOL><DEDENT><DEDENT>return publication_state, publication_messages<EOL>", "docstring": "Check the publication's current state.", "id": "f15202:m26"}
{"signature": "def set_publication_failure(cursor, exc):", "body": "publication_id = exc.publication_id<EOL>if publication_id is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (publication_id,))<EOL>state_messages = cursor.fetchone()[<NUM_LIT:0>]<EOL>if state_messages is None:<EOL><INDENT>state_messages = []<EOL><DEDENT>entry = exc.__dict__<EOL>entry['<STR_LIT:message>'] = exc.message<EOL>state_messages.append(entry)<EOL>state_messages = json.dumps(state_messages)<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", ('<STR_LIT>', state_messages, publication_id,))<EOL>", "docstring": "Given a publication exception, set the publication as failed and\n    append the failure message to the publication record.", "id": "f15202:m19"}
{"signature": "def obtain_licenses():", "body": "with db_connect() as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\")<EOL>licenses = {r[<NUM_LIT:0>]: r[<NUM_LIT:1>] for r in cursor.fetchall()}<EOL><DEDENT><DEDENT>return licenses<EOL>", "docstring": "Obtain the licenses in a dictionary form, keyed by url.", "id": "f15202:m9"}
{"signature": "def add_publication(cursor, epub, epub_file, is_pre_publication=False):", "body": "publisher = epub[<NUM_LIT:0>].metadata['<STR_LIT>']<EOL>publish_message = epub[<NUM_LIT:0>].metadata['<STR_LIT>']<EOL>epub_binary = psycopg2.Binary(epub_file.read())<EOL>args = (publisher, publish_message, epub_binary, is_pre_publication,)<EOL>cursor.execute(", "docstring": "Adds a publication entry and makes each item\n    a pending document.", "id": "f15202:m20"}
{"signature": "def _validate_derived_from(cursor, model):", "body": "derived_from_uri = model.metadata.get('<STR_LIT>')<EOL>if derived_from_uri is None:<EOL><INDENT>return  <EOL><DEDENT>try:<EOL><INDENT>ident_hash = parse_archive_uri(derived_from_uri)<EOL>uuid_, version = split_ident_hash(ident_hash, split_version=True)<EOL><DEDENT>except (ValueError, IdentHashSyntaxError, IdentHashShortId) as exc:<EOL><INDENT>raise exceptions.InvalidMetadata('<STR_LIT>', derived_from_uri,<EOL>original_exception=exc)<EOL><DEDENT>args = [uuid_]<EOL>table = '<STR_LIT>'<EOL>version_condition = '<STR_LIT>'<EOL>if version != (None, None,):<EOL><INDENT>args.extend(version)<EOL>table = '<STR_LIT>'<EOL>version_condition = \"<STR_LIT>\"\"<STR_LIT>\".format(version[<NUM_LIT:1>] is None and '<STR_LIT>' or '<STR_LIT:=>')<EOL><DEDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\"<EOL>.format(table, version_condition), args)<EOL>try:<EOL><INDENT>_exists = cursor.fetchone()[<NUM_LIT:0>]  <EOL><DEDENT>except TypeError:  <EOL><INDENT>raise exceptions.InvalidMetadata('<STR_LIT>', derived_from_uri)<EOL><DEDENT>model.metadata['<STR_LIT>'] = ident_hash<EOL>", "docstring": "Given a database cursor and model, check the derived-from\n    value accurately points to content in the archive.\n    The value can be nothing or must point to existing content.", "id": "f15202:m12"}
{"signature": "def _check_pending_document_license_state(cursor, document_id):", "body": "cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(document_id,))<EOL>try:<EOL><INDENT>is_accepted = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:<EOL><INDENT>is_accepted = True<EOL><DEDENT>return is_accepted<EOL>", "docstring": "Check the aggregate state on the pending document.", "id": "f15202:m21"}
{"signature": "def _check_pending_document_role_state(cursor, document_id):", "body": "cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(document_id,))<EOL>try:<EOL><INDENT>is_accepted = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:<EOL><INDENT>is_accepted = True<EOL><DEDENT>return is_accepted<EOL>", "docstring": "Check the aggregate state on the pending document.", "id": "f15202:m22"}
{"signature": "def notify_users(cursor, document_id):", "body": "return<EOL>registry = get_current_registry()<EOL>accounts = registry.getUtility(IOpenstaxAccounts)<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\"CT user_id, array_agg(role_type)::text[]<EOL>role_acceptances AS ra<EOL>E<EOL>.uuid = (SELECT uuid FROM pending_documents WHERE id = %s)<EOL>D ra.notified IS NULL AND (NOT ra.accepted or ra.accepted IS UNKNOWN)<EOL>P BY user_id<EOL>(document_id,))<EOL>roles = {u: r for u, r in cursor.fetchall()}<EOL>needs_notified = set(licensors + list(roles.keys()))<EOL>for user_id in needs_notified:<EOL><INDENT>data = {<EOL>'<STR_LIT>': user_id,<EOL>'<STR_LIT>': None,  <EOL>'<STR_LIT>': user_id in licensors,<EOL>'<STR_LIT>': roles.get(user_id, []),<EOL>}<EOL>message = NOTIFICATION_TEMPLATE.render(**data)<EOL>accounts.send_message(user_id, NOFIFICATION_SUBJECT, message)<EOL><DEDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (document_id, licensors,))<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (document_id, list(roles.keys()),))<EOL>", "docstring": "Notify all users about their role and/or license acceptance\n    for a piece of content associated with the given ``document_id``.", "id": "f15202:m41"}
{"signature": "def remove_acl(cursor, uuid_, permissions):", "body": "if not isinstance(permissions, (list, set, tuple)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>.format(type(permissions)))<EOL><DEDENT>permissions = set(permissions)<EOL>for uid, permission in permissions:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(uuid_, uid, permission,))<EOL><DEDENT>", "docstring": "Given a ``uuid`` and a set of permissions given as a tuple\n    of ``uid`` and ``permission``, remove these entries from the database.", "id": "f15202:m37"}
{"signature": "def publish_pending(cursor, publication_id):", "body": "cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(publication_id, publication_id,))<EOL>publisher, message = cursor.fetchone()<EOL>cursor.connection.commit()<EOL>all_models = []<EOL>from .publish import publish_model<EOL>type_ = cnxepub.Document.__name__<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (type_, publication_id,))<EOL>for row in cursor.fetchall():<EOL><INDENT>id, major_version, minor_version = row[<NUM_LIT:1>:<NUM_LIT:4>]<EOL>id = str(id)<EOL>version = '<STR_LIT:.>'.join([str(x)<EOL>for x in (major_version, minor_version,)<EOL>if x is not None])<EOL>metadata, content = row[-<NUM_LIT:2>:]<EOL>content = content[:]<EOL>metadata['<STR_LIT:version>'] = version<EOL>document = cnxepub.Document(id, content, metadata)<EOL>for ref in document.references:<EOL><INDENT>if ref.uri.startswith('<STR_LIT>'):<EOL><INDENT>hash = ref.uri[len('<STR_LIT>'):]<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (hash,))<EOL>data, media_type = cursor.fetchone()<EOL>document.resources.append(cnxepub.Resource(<EOL>hash, io.BytesIO(data[:]), media_type, filename=hash))<EOL><DEDENT><DEDENT>_ident_hash = publish_model(cursor, document, publisher, message)  <EOL>all_models.append(document)<EOL><DEDENT>type_ = cnxepub.Binder.__name__<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (type_, publication_id,))<EOL>for row in cursor.fetchall():<EOL><INDENT>id, major_version, minor_version, metadata = row[<NUM_LIT:1>:<NUM_LIT:5>]<EOL>tree = metadata['<STR_LIT>']<EOL>binder = _reassemble_binder(str(id), tree, metadata)<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(binder.ident_hash,))<EOL>binder.resources = [<EOL>cnxepub.Resource(r_hash,<EOL>io.BytesIO(r_data[:]),<EOL>r_media_type,<EOL>filename=r_filename)<EOL>for (r_hash, r_data, r_media_type, r_filename)<EOL>in cursor.fetchall()]<EOL>_ident_hash = publish_model(cursor, binder, publisher, message)  <EOL>all_models.append(binder)<EOL><DEDENT>from .publish import republish_binders<EOL>_republished_ident_hashes = republish_binders(cursor, all_models)  <EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (publication_id,))<EOL>state = cursor.fetchone()[<NUM_LIT:0>]<EOL>return state<EOL>", "docstring": "Given a publication id as ``publication_id``,\n    write the documents to the *Connexions Archive*.", "id": "f15202:m29"}
{"signature": "def _upsert_users(cursor, user_ids, lookup_func):", "body": "user_ids = list(set(user_ids))  <EOL>cursor.execute(\"<STR_LIT>\",<EOL>(user_ids,))<EOL>existing_user_ids = [x[<NUM_LIT:0>] for x in cursor.fetchall()]<EOL>new_user_ids = [u for u in user_ids if u not in existing_user_ids]<EOL>for user_id in existing_user_ids:<EOL><INDENT>user_info = lookup_func(user_id)<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", user_info)<EOL><DEDENT>for user_id in new_user_ids:<EOL><INDENT>user_info = lookup_func(user_id)<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", user_info)<EOL><DEDENT>", "docstring": "Upsert's user info into the database.\n    The model contains the user info as part of the role values.", "id": "f15202:m39"}
{"signature": "def _node_to_model(tree_or_item, metadata=None, parent=None,<EOL>lucent_id=cnxepub.TRANSLUCENT_BINDER_ID):", "body": "if '<STR_LIT>' in tree_or_item:<EOL><INDENT>tree = tree_or_item<EOL>binder = cnxepub.TranslucentBinder(metadata=tree)<EOL>for item in tree['<STR_LIT>']:<EOL><INDENT>node = _node_to_model(item, parent=binder,<EOL>lucent_id=lucent_id)<EOL>if node.metadata['<STR_LIT:title>'] != item['<STR_LIT:title>']:<EOL><INDENT>binder.set_title_for_node(node, item['<STR_LIT:title>'])<EOL><DEDENT><DEDENT>result = binder<EOL><DEDENT>else:<EOL><INDENT>item = tree_or_item<EOL>result = cnxepub.DocumentPointer(item['<STR_LIT:id>'], metadata=item)<EOL><DEDENT>if parent is not None:<EOL><INDENT>parent.append(result)<EOL><DEDENT>return result<EOL>", "docstring": "Given a tree, parse to a set of models", "id": "f15202:m27"}
{"signature": "def with_db_cursor(func):", "body": "@functools.wraps(func)<EOL>def wrapped(*args, **kwargs):<EOL><INDENT>if '<STR_LIT>' in kwargs or func.__code__.co_argcount == len(args):<EOL><INDENT>return func(*args, **kwargs)<EOL><DEDENT>with db_connect() as db_connection:<EOL><INDENT>with db_connection.cursor() as cursor:<EOL><INDENT>kwargs['<STR_LIT>'] = cursor<EOL>return func(*args, **kwargs)<EOL><DEDENT><DEDENT><DEDENT>return wrapped<EOL>", "docstring": "Decorator that supplies a cursor to the function.\n    This passes in a psycopg2 Cursor as the argument 'cursor'.\n    It also accepts a cursor if one is given.", "id": "f15202:m1"}
{"signature": "@contextlib.contextmanager<EOL>def db_connect(connection_string=None, **kwargs):", "body": "if connection_string is None:<EOL><INDENT>connection_string = get_current_registry().settings[CONNECTION_STRING]<EOL><DEDENT>db_conn = psycopg2.connect(connection_string, **kwargs)<EOL>try:<EOL><INDENT>with db_conn:<EOL><INDENT>yield db_conn<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>db_conn.close()<EOL><DEDENT>", "docstring": "Function to supply a database connection object.", "id": "f15202:m0"}
{"signature": "def accept_publication_license(cursor, publication_id, user_id,<EOL>document_ids, is_accepted=False):", "body": "cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(is_accepted, publication_id, user_id, document_ids,))<EOL>", "docstring": "Accept or deny  the document license for the publication\n    (``publication_id``) and user (at ``user_id``)\n    for the documents (listed by id as ``document_ids``).", "id": "f15202:m30"}
{"signature": "def _dissect_roles(metadata):", "body": "for role_key in cnxepub.ATTRIBUTED_ROLE_KEYS:<EOL><INDENT>for user in metadata.get(role_key, []):<EOL><INDENT>if user['<STR_LIT:type>'] != '<STR_LIT>':<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>uid = parse_user_uri(user['<STR_LIT:id>'])<EOL>yield uid, role_key<EOL><DEDENT><DEDENT>raise StopIteration()<EOL>", "docstring": "Given a model's ``metadata``, iterate over the roles.\n    Return values are the role identifier and role type as a tuple.", "id": "f15202:m4"}
{"signature": "def _validate_license(model):", "body": "license_mapping = obtain_licenses()<EOL>try:<EOL><INDENT>license_url = model.metadata['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>raise exceptions.MissingRequiredMetadata('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>license = license_mapping[license_url]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise exceptions.InvalidLicense(license_url)<EOL><DEDENT>if not license['<STR_LIT>']:<EOL><INDENT>raise exceptions.InvalidLicense(license_url)<EOL><DEDENT>", "docstring": "Given the model, check the license is one valid for publication.", "id": "f15202:m10"}
{"signature": "def _role_type_to_db_type(type_):", "body": "with db_connect() as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\")<EOL>db_types = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return dict(list(zip(cnxepub.ATTRIBUTED_ROLE_KEYS, db_types)))[type_]<EOL>", "docstring": "Translates a role type (a value found in\n    ``cnxepub.ATTRIBUTED_ROLE_KEYS``) to a database compatible\n    value for ``role_types``.", "id": "f15202:m3"}
{"signature": "def upsert_role_requests(cursor, uuid_, roles):", "body": "if not isinstance(roles, (list, set, tuple)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>.format(type(roles)))<EOL><DEDENT>acceptors = set([(x['<STR_LIT>'], x['<STR_LIT>'],) for x in roles])<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (uuid_,))<EOL>existing_roles = cursor.fetchall()<EOL>existing_acceptors = set([(r, t,) for r, t, _ in existing_roles])<EOL>new_acceptors = acceptors.difference(existing_acceptors)<EOL>for acceptor, type_ in new_acceptors:<EOL><INDENT>has_accepted = [x.get('<STR_LIT>', None)<EOL>for x in roles<EOL>if acceptor == x['<STR_LIT>'] and type_ == x['<STR_LIT>']][<NUM_LIT:0>]<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (uuid_, acceptor, type_, has_accepted,))<EOL><DEDENT>acceptors = set([<EOL>(x['<STR_LIT>'], x['<STR_LIT>'], x.get('<STR_LIT>', None),)<EOL>for x in roles<EOL>if (x['<STR_LIT>'], x.get('<STR_LIT>', None),) not in new_acceptors<EOL>])<EOL>existing_acceptors = set([<EOL>x for x in existing_roles<EOL>if (x[<NUM_LIT:0>], x[<NUM_LIT:1>],) not in new_acceptors<EOL>])<EOL>tobe_updated_acceptors = acceptors.difference(existing_acceptors)<EOL>for uid, type_, has_accepted in tobe_updated_acceptors:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(has_accepted, uuid_, uid, type_,))<EOL><DEDENT>", "docstring": "Given a ``uuid`` and list of dicts containing the ``uid`` and\n    ``role`` for creating a role acceptance entry. The ``roles`` dict\n    can optionally contain a ``has_accepted`` value, which will default\n    to true.", "id": "f15202:m34"}
{"signature": "def _insert_resource_file(cursor, module_ident, resource):", "body": "with resource.open() as file:<EOL><INDENT>fileid, _ = _insert_file(cursor, file, resource.media_type)<EOL><DEDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(fileid, module_ident, resource.filename,))<EOL>try:<EOL><INDENT>is_same_file = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:  <EOL><INDENT>is_same_file = None<EOL><DEDENT>if is_same_file:<EOL><INDENT>return<EOL><DEDENT>elif is_same_file is not None:  <EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>args = (module_ident, fileid, resource.filename,)<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", args)<EOL>", "docstring": "Insert a resource into the modules_files table. This will\n    create a new file entry or associates an existing one.", "id": "f15204:m6"}
{"signature": "def publish_model(cursor, model, publisher, message):", "body": "publishers = publisher<EOL>if isinstance(publishers, list) and len(publishers) > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>.format(len(publishers), publishers))<EOL><DEDENT>module_ident, ident_hash = _insert_metadata(cursor, model,<EOL>publisher, message)<EOL>for resource in getattr(model, '<STR_LIT>', []):<EOL><INDENT>_insert_resource_file(cursor, module_ident, resource)<EOL><DEDENT>if isinstance(model, Document):<EOL><INDENT>html = bytes(cnxepub.DocumentContentFormatter(model))<EOL>sha1 = hashlib.new('<STR_LIT>', html).hexdigest()<EOL>cursor.execute(\"<STR_LIT>\", (sha1,))<EOL>try:<EOL><INDENT>fileid = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:<EOL><INDENT>file_args = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:data>': psycopg2.Binary(html),<EOL>}<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", file_args)<EOL>fileid = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>args = {<EOL>'<STR_LIT>': module_ident,<EOL>'<STR_LIT:filename>': '<STR_LIT>',<EOL>'<STR_LIT>': fileid,<EOL>}<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", args)<EOL><DEDENT>elif isinstance(model, Binder):<EOL><INDENT>tree = cnxepub.model_to_tree(model)<EOL>tree = _insert_tree(cursor, tree)<EOL><DEDENT>return ident_hash<EOL>", "docstring": "Publishes the ``model`` and return its ident_hash.", "id": "f15204:m8"}
{"signature": "def _insert_tree(cursor, tree, parent_id=None, index=<NUM_LIT:0>, is_collated=False):", "body": "if isinstance(tree, dict):<EOL><INDENT>if tree['<STR_LIT:id>'] == '<STR_LIT>':<EOL><INDENT>document_id = None<EOL>title = tree['<STR_LIT:title>']<EOL><DEDENT>else:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (tree['<STR_LIT:id>'],))<EOL>try:<EOL><INDENT>document_id, document_title = cursor.fetchone()<EOL><DEDENT>except TypeError:  <EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>.format(tree['<STR_LIT:id>']))<EOL><DEDENT>if tree.get('<STR_LIT:title>', None):<EOL><INDENT>title = tree['<STR_LIT:title>']<EOL><DEDENT>else:<EOL><INDENT>title = document_title<EOL><DEDENT><DEDENT>is_latest = True<EOL>cursor.execute(TREE_NODE_INSERT,<EOL>dict(document_id=document_id, parent_id=parent_id,<EOL>title=title, child_order=index,<EOL>is_latest=is_latest, is_collated=is_collated))<EOL>node_id = cursor.fetchone()[<NUM_LIT:0>]<EOL>if '<STR_LIT>' in tree:<EOL><INDENT>_insert_tree(cursor, tree['<STR_LIT>'], parent_id=node_id,<EOL>is_collated=is_collated)<EOL><DEDENT><DEDENT>elif isinstance(tree, list):<EOL><INDENT>for tree_node in tree:<EOL><INDENT>_insert_tree(cursor, tree_node, parent_id=parent_id,<EOL>index=tree.index(tree_node), is_collated=is_collated)<EOL><DEDENT><DEDENT>", "docstring": "Inserts a binder tree into the archive.", "id": "f15204:m7"}
{"signature": "def _insert_optional_roles(cursor, model, ident):", "body": "optional_roles = [<EOL>('<STR_LIT>', <NUM_LIT:4>,),<EOL>('<STR_LIT>', <NUM_LIT:5>,),<EOL>]<EOL>for attr, role_id in optional_roles:<EOL><INDENT>roles = model.metadata.get(attr)<EOL>if not roles:<EOL><INDENT>continue<EOL><DEDENT>usernames = [parse_user_uri(x['<STR_LIT:id>']) for x in roles]<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (ident, role_id, usernames,))<EOL><DEDENT>", "docstring": "Inserts the optional roles if values for the optional roles\n    exist.", "id": "f15204:m2"}
{"signature": "def publish_collated_document(cursor, model, parent_model):", "body": "html = bytes(cnxepub.DocumentContentFormatter(model))<EOL>sha1 = hashlib.new('<STR_LIT>', html).hexdigest()<EOL>cursor.execute(\"<STR_LIT>\", (sha1,))<EOL>try:<EOL><INDENT>fileid = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:<EOL><INDENT>file_args = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:data>': psycopg2.Binary(html),<EOL>}<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", file_args)<EOL>fileid = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>args = {<EOL>'<STR_LIT>': model.ident_hash,<EOL>'<STR_LIT>': parent_model.ident_hash,<EOL>'<STR_LIT>': fileid,<EOL>}<EOL>stmt = \"\"\"<STR_LIT>\"\"\"<EOL>cursor.execute(stmt, args)<EOL>", "docstring": "Publish a given `module`'s collated content in the context of\n    the `parent_model`. Note, the model's content is expected to already\n    have the collated content. This will just persist that content to\n    the archive.", "id": "f15204:m10"}
{"signature": "def rebuild_collection_tree(cursor, ident_hash, history_map):", "body": "collection_tree_sql = \"\"\"<STR_LIT>\"\"\"<EOL>tree_insert_sql = \"\"\"<STR_LIT>\"\"\"<EOL>def get_tree():<EOL><INDENT>cursor.execute(collection_tree_sql, (ident_hash,))<EOL>for row in cursor.fetchall():<EOL><INDENT>yield row[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>def insert(fields):<EOL><INDENT>cursor.execute(tree_insert_sql, fields)<EOL>results = cursor.fetchone()[<NUM_LIT:0>]<EOL>return results<EOL><DEDENT>tree = {}  <EOL>children = {}  <EOL>for node in get_tree():<EOL><INDENT>tree[node['<STR_LIT>']] = node<EOL>children.setdefault(node['<STR_LIT>'], [])<EOL>children[node['<STR_LIT>']].append(node['<STR_LIT>'])<EOL><DEDENT>def build_tree(nodeid, parent_id):<EOL><INDENT>data = tree[nodeid]<EOL>data['<STR_LIT>'] = parent_id<EOL>if history_map.get(data['<STR_LIT>']) is not Noneand (data['<STR_LIT>'] or parent_id is None):<EOL><INDENT>data['<STR_LIT>'] = history_map[data['<STR_LIT>']]<EOL><DEDENT>new_nodeid = insert(data)<EOL>for child_nodeid in children.get(nodeid, []):<EOL><INDENT>build_tree(child_nodeid, new_nodeid)<EOL><DEDENT><DEDENT>root_node = children[None][<NUM_LIT:0>]<EOL>build_tree(root_node, None)<EOL>", "docstring": "Create a new tree for the collection based on the old tree but with\n    new document ids", "id": "f15204:m16"}
{"signature": "def republish_binders(cursor, models):", "body": "documents = set([])<EOL>binders = set([])<EOL>history_mapping = {}  <EOL>if not isinstance(models, (list, tuple, set,)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(models))<EOL><DEDENT>for model in models:<EOL><INDENT>if isinstance(model, (cnxepub.Binder,)):<EOL><INDENT>binders.add(split_ident_hash(model.ident_hash)[<NUM_LIT:0>])<EOL>for doc in cnxepub.flatten_to_documents(model):<EOL><INDENT>documents.add(split_ident_hash(doc.ident_hash))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>documents.add(split_ident_hash(model.ident_hash))<EOL><DEDENT><DEDENT>to_be_republished = []<EOL>for (uuid, version) in documents:<EOL><INDENT>ident_hash = join_ident_hash(uuid, version)<EOL>previous_ident_hash = get_previous_publication(cursor, ident_hash)<EOL>if previous_ident_hash is None:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>history_mapping[previous_ident_hash] = ident_hash<EOL><DEDENT>cursor.execute(", "docstring": "Republish the Binders that share Documents in the publication context.\n    This needs to be given all the models in the publication context.", "id": "f15204:m12"}
{"signature": "def _insert_metadata(cursor, model, publisher, message):", "body": "params = model.metadata.copy()<EOL>params['<STR_LIT>'] = publisher<EOL>params['<STR_LIT>'] = message<EOL>params['<STR_LIT>'] = _model_to_portaltype(model)<EOL>params['<STR_LIT>'] = str(cnxepub.DocumentSummaryFormatter(model))<EOL>for person_field in ATTRIBUTED_ROLE_KEYS:<EOL><INDENT>params[person_field] = [parse_user_uri(x['<STR_LIT:id>'])<EOL>for x in params.get(person_field, [])]<EOL><DEDENT>params['<STR_LIT>'] = parse_parent_ident_hash(model)<EOL>if model.ident_hash is not None:<EOL><INDENT>uuid, version = split_ident_hash(model.ident_hash,<EOL>split_version=True)<EOL>params['<STR_LIT>'] = uuid<EOL>params['<STR_LIT>'], params['<STR_LIT>'] = version<EOL>cursor.execute(\"<STR_LIT>\",<EOL>(uuid,))<EOL>try:<EOL><INDENT>moduleid = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:  <EOL><INDENT>moduleid = None<EOL><DEDENT>params['<STR_LIT>'] = moduleid<EOL>cursor.execute(\"<STR_LIT>\",<EOL>(uuid,))<EOL>try:<EOL><INDENT>cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:  <EOL><INDENT>cursor.execute(\"<STR_LIT>\",<EOL>(uuid,))<EOL><DEDENT>created = model.metadata.get('<STR_LIT>', None)<EOL>stmt = MODULE_INSERTION_TEMPLATE.format(**{<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': moduleid is None and \"<STR_LIT>\" or \"<STR_LIT>\",<EOL>'<STR_LIT>': created is None and \"<STR_LIT>\" or \"<STR_LIT>\",<EOL>})<EOL><DEDENT>else:<EOL><INDENT>created = model.metadata.get('<STR_LIT>', None)<EOL>stmt = MODULE_INSERTION_TEMPLATE.format(**{<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': \"<STR_LIT>\",<EOL>'<STR_LIT>': created is None and \"<STR_LIT>\" or \"<STR_LIT>\",<EOL>})<EOL><DEDENT>cursor.execute(stmt, params)<EOL>module_ident, ident_hash = cursor.fetchone()<EOL>_insert_optional_roles(cursor, model, module_ident)<EOL>return module_ident, ident_hash<EOL>", "docstring": "Insert a module with the given ``metadata``.", "id": "f15204:m3"}
{"signature": "def __init__(self, metadata_key, value):", "body": "super(InvalidRole, self).__init__()<EOL>self._key = metadata_key<EOL>self._value = value<EOL>", "docstring": "``metadata_key`` tells which metadata role name the\n        invalid role is under. ``value`` is the invalid role.", "id": "f15206:c6:m0"}
{"signature": "def __init__(self, document_pointer, exists, is_document):", "body": "super(InvalidDocumentPointer, self).__init__()<EOL>self._document_pointer = document_pointer<EOL>self._exists = bool(exists)<EOL>self._is_document = bool(is_document)<EOL>", "docstring": "``document_pointer`` is the DocumentPointer object that contains\n        the invalid reference.", "id": "f15206:c9:m0"}
{"signature": "def __init__(self, metadata_key, value, original_exception=None):", "body": "super(InvalidMetadata, self).__init__()<EOL>self._key = metadata_key<EOL>self._value = value<EOL>self._original_exception = original_exception<EOL>", "docstring": "``metadata_key`` tells which metadata has the\n        invalid ``value``. If ``original_exception`` is supplied, it will be\n        used to supply additional information.", "id": "f15206:c7:m0"}
{"signature": "@property<EOL><INDENT>def __dict__(self):<DEDENT>", "body": "data = {<EOL>'<STR_LIT:code>': self.code,<EOL>'<STR_LIT:type>': self.__class__.__name__,<EOL>'<STR_LIT>': self.publication_id,<EOL>'<STR_LIT>': self.epub_filename,<EOL>'<STR_LIT>': self.pending_document_id,<EOL>'<STR_LIT>': self.pending_ident_hash,<EOL>}<EOL>return data<EOL>", "docstring": "Render the except to dict", "id": "f15206:c2:m5"}
{"signature": "def processor():  ", "body": "registry = get_current_registry()<EOL>settings = registry.settings<EOL>connection_string = settings[CONNECTION_STRING]<EOL>channels = _get_channels(settings)<EOL>with psycopg2.connect(connection_string) as conn:<EOL><INDENT>conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)<EOL>with conn.cursor() as cursor:<EOL><INDENT>for channel in channels:<EOL><INDENT>cursor.execute('<STR_LIT>'.format(channel))<EOL>logger.debug('<STR_LIT>'<EOL>.format(channel))<EOL><DEDENT><DEDENT>registry.notify(ChannelProcessingStartUpEvent())<EOL>rlist = [conn]  <EOL>wlist = []  <EOL>xlist = []  <EOL>timeout = <NUM_LIT:5><EOL>while True:<EOL><INDENT>if select.select(rlist, wlist, xlist, timeout) != ([], [], []):<EOL><INDENT>conn.poll()<EOL>while conn.notifies:<EOL><INDENT>notif = conn.notifies.pop(<NUM_LIT:0>)<EOL>logger.debug('<STR_LIT>'<EOL>.format(notif.pid, notif.channel,<EOL>notif.payload))<EOL>event = create_pg_notify_event(notif)<EOL>try:<EOL><INDENT>registry.notify(event)<EOL><DEDENT>except Exception:<EOL><INDENT>logger.exception('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Churns over PostgreSQL notifications on configured channels.\n    This requires the application be setup and the registry be available.\n    This function uses the database connection string and a list of\n    pre configured channels.", "id": "f15208:m2"}
{"signature": "@view_config(route_name='<STR_LIT>', request_method='<STR_LIT:GET>',<EOL>renderer='<STR_LIT>',<EOL>permission='<STR_LIT>', http_cache=<NUM_LIT:0>)<EOL>def admin_print_styles_single(request):", "body": "style = request.matchdict['<STR_LIT>']<EOL>with db_connect(cursor_factory=DictCursor) as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>if style != '<STR_LIT>':<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", vars=(style,))<EOL>info = cursor.fetchall()<EOL>if len(info) < <NUM_LIT:1>:<EOL><INDENT>current_recipe = None<EOL>recipe_type = None<EOL>status = None<EOL><DEDENT>else:<EOL><INDENT>current_recipe = info[<NUM_LIT:0>]['<STR_LIT>']<EOL>recipe_type = info[<NUM_LIT:0>]['<STR_LIT>']<EOL>status = '<STR_LIT>'<EOL><DEDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", vars=(style,))<EOL><DEDENT>else:<EOL><INDENT>current_recipe = '<STR_LIT>'<EOL>recipe_type = '<STR_LIT>'<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", vars=(style,))<EOL>status = '<STR_LIT>'<EOL><DEDENT>collections = []<EOL>for row in cursor.fetchall():<EOL><INDENT>recipe = row['<STR_LIT>']<EOL>if (status != '<STR_LIT>' and<EOL>current_recipe is not None and<EOL>recipe != current_recipe):<EOL><INDENT>status = '<STR_LIT>'<EOL><DEDENT>collections.append({<EOL>'<STR_LIT:title>': row['<STR_LIT:name>'].decode('<STR_LIT:utf-8>'),<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT>': request.route_path('<STR_LIT>',<EOL>hash=row['<STR_LIT>']),<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT>': request.route_path('<STR_LIT>',<EOL>ident_hash=row['<STR_LIT>']),<EOL>'<STR_LIT:status>': status,<EOL>'<STR_LIT>': request.route_path(<EOL>'<STR_LIT>', uuid=row['<STR_LIT>']),<EOL>})<EOL><DEDENT><DEDENT><DEDENT>return {'<STR_LIT>': len(collections),<EOL>'<STR_LIT>': collections,<EOL>'<STR_LIT>': style,<EOL>'<STR_LIT>': recipe_type}<EOL>", "docstring": "Returns all books with any version of the given print style.\n\n    Returns the print_style, recipe type, num books using the print_style,\n    along with a dictionary of the book, author, revision date, recipe,\n    tag of the print_style, and a link to the content.", "id": "f15214:m1"}
{"signature": "@view_config(route_name='<STR_LIT>', request_method='<STR_LIT:GET>',<EOL>renderer='<STR_LIT>',<EOL>permission='<STR_LIT>', http_cache=<NUM_LIT:0>)<EOL>def admin_print_styles(request):", "body": "styles = []<EOL>with db_connect(cursor_factory=DictCursor) as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\")<EOL>for row in cursor.fetchall():<EOL><INDENT>styles.append({<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT:title>': row['<STR_LIT:title>'],<EOL>'<STR_LIT:type>': row['<STR_LIT:type>'],<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT>': row['<STR_LIT:count>'],<EOL>'<STR_LIT>': row['<STR_LIT>'],<EOL>'<STR_LIT>': request.route_path('<STR_LIT>',<EOL>style=row['<STR_LIT>'])<EOL>})<EOL><DEDENT><DEDENT><DEDENT>return {'<STR_LIT>': styles}<EOL>", "docstring": "Returns a dictionary of all unique print_styles, and their latest tag,\nrevision, and recipe_type.", "id": "f15214:m0"}
{"signature": "@view_config(route_name='<STR_LIT>', request_method='<STR_LIT:POST>', renderer='<STR_LIT>',<EOL>permission='<STR_LIT>', http_cache=<NUM_LIT:0>)<EOL>def publish(request):", "body": "if '<STR_LIT>' not in request.POST:<EOL><INDENT>raise httpexceptions.HTTPBadRequest(\"<STR_LIT>\")<EOL><DEDENT>is_pre_publication = asbool(request.POST.get('<STR_LIT>'))<EOL>epub_upload = request.POST['<STR_LIT>'].file<EOL>try:<EOL><INDENT>epub = cnxepub.EPUB.from_file(epub_upload)<EOL><DEDENT>except:  <EOL><INDENT>raise httpexceptions.HTTPBadRequest('<STR_LIT>')<EOL><DEDENT>with db_connect() as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>epub_upload.seek(<NUM_LIT:0>)<EOL>publication_id, publications = add_publication(<EOL>cursor, epub, epub_upload, is_pre_publication)<EOL><DEDENT><DEDENT>state, messages = poke_publication_state(publication_id)<EOL>response_data = {<EOL>'<STR_LIT>': publication_id,<EOL>'<STR_LIT>': publications,<EOL>'<STR_LIT:state>': state,<EOL>'<STR_LIT>': messages,<EOL>}<EOL>return response_data<EOL>", "docstring": "Accept a publication request at form value 'epub", "id": "f15217:m0"}
{"signature": "@view_config(route_name='<STR_LIT>', request_method='<STR_LIT:GET>',<EOL>accept='<STR_LIT:application/json>', renderer='<STR_LIT>', http_cache=<NUM_LIT:0>)<EOL>def get_accept_role(request):", "body": "publication_id = request.matchdict['<STR_LIT:id>']<EOL>user_id = request.matchdict['<STR_LIT>']<EOL>with db_connect() as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\",<EOL>(publication_id, user_id))<EOL>user_documents = [r[<NUM_LIT:0>] for r in cursor.fetchall()]<EOL><DEDENT><DEDENT>return {'<STR_LIT>': publication_id,<EOL>'<STR_LIT>': user_id,<EOL>'<STR_LIT>': user_documents,<EOL>}<EOL>", "docstring": "This produces JSON data for a user (at ``uid``) to view the role(s)\n    they have accepted or will need to accept for a publication (at ``id``).", "id": "f15217:m4"}
{"signature": "def declare_browsable_routes(config):", "body": "<EOL>config.add_notfound_view(default_exceptionresponse_view,<EOL>append_slash=True)<EOL>add_route = config.add_route<EOL>add_route('<STR_LIT>', '<STR_LIT>')<EOL>add_route('<STR_LIT>', '<STR_LIT>')<EOL>add_route('<STR_LIT>', '<STR_LIT>')<EOL>add_route('<STR_LIT>', '<STR_LIT>',<EOL>request_method='<STR_LIT:GET>')<EOL>add_route('<STR_LIT>', '<STR_LIT>',<EOL>request_method='<STR_LIT:POST>')<EOL>add_route('<STR_LIT>', '<STR_LIT>',<EOL>request_method='<STR_LIT>')<EOL>add_route('<STR_LIT>', '<STR_LIT>',<EOL>request_method='<STR_LIT:GET>')<EOL>add_route('<STR_LIT>', '<STR_LIT>',<EOL>request_method='<STR_LIT:POST>')<EOL>add_route('<STR_LIT>', '<STR_LIT>')<EOL>add_route('<STR_LIT>', '<STR_LIT>')<EOL>add_route('<STR_LIT>', '<STR_LIT>')<EOL>add_route('<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "Declaration of routes that can be browsed by users.", "id": "f15219:m1"}
{"signature": "@view_config(route_name='<STR_LIT>',<EOL>request_method='<STR_LIT:GET>',<EOL>accept='<STR_LIT:application/json>', renderer='<STR_LIT>', http_cache=<NUM_LIT:0>)<EOL>def get_license_request(request):", "body": "uuid_ = request.matchdict['<STR_LIT>']<EOL>user_id = request.matchdict.get('<STR_LIT>')<EOL>args = [uuid_]<EOL>if user_id is not None:<EOL><INDENT>fmt_conditional = \"<STR_LIT>\"<EOL>args.append(user_id)<EOL><DEDENT>else:<EOL><INDENT>fmt_conditional = \"<STR_LIT>\"<EOL><DEDENT>with db_connect() as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (uuid_,))<EOL>try:<EOL><INDENT>license_url = cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:  <EOL><INDENT>raise httpexceptions.HTTPNotFound()<EOL><DEDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\".format(fmt_conditional), args)<EOL>acceptances = [r[<NUM_LIT:0>] for r in cursor.fetchall()]<EOL><DEDENT><DEDENT>if user_id is not None:<EOL><INDENT>acceptances = acceptances[<NUM_LIT:0>]<EOL><DEDENT>resp_value = {<EOL>'<STR_LIT>': license_url,<EOL>'<STR_LIT>': acceptances,<EOL>}<EOL>return resp_value<EOL>", "docstring": "Returns a list of those accepting the license.", "id": "f15220:m0"}
{"signature": "@view_config(route_name='<STR_LIT>',<EOL>request_method='<STR_LIT:GET>',<EOL>accept='<STR_LIT:application/json>', renderer='<STR_LIT>', http_cache=<NUM_LIT:0>)<EOL>def get_acl(request):", "body": "uuid_ = request.matchdict['<STR_LIT>']<EOL>with db_connect() as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (uuid_,))<EOL>try:<EOL><INDENT>cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:<EOL><INDENT>raise httpexceptions.HTTPNotFound()<EOL><DEDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (uuid_,))<EOL>acl = [r[<NUM_LIT:0>] for r in cursor.fetchall()]<EOL><DEDENT><DEDENT>return acl<EOL>", "docstring": "Returns the ACL for the given content identified by ``uuid``.", "id": "f15220:m6"}
{"signature": "@view_config(route_name='<STR_LIT>',<EOL>permission='<STR_LIT>',<EOL>request_method='<STR_LIT:POST>', accept='<STR_LIT:application/json>', http_cache=<NUM_LIT:0>)<EOL>def post_roles_request(request):", "body": "uuid_ = request.matchdict['<STR_LIT>']<EOL>posted_roles = request.json<EOL>with db_connect() as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (uuid_,))<EOL>try:<EOL><INDENT>cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>except TypeError:<EOL><INDENT>if request.has_permission('<STR_LIT>'):<EOL><INDENT>cursor.execute(\"\"\"<STR_LIT>\"\"\", (uuid_,))<EOL><DEDENT>else:<EOL><INDENT>raise httpexceptions.HTTPNotFound()<EOL><DEDENT><DEDENT>try:<EOL><INDENT>upsert_users(cursor, [r['<STR_LIT>'] for r in posted_roles])<EOL><DEDENT>except UserFetchError as exc:<EOL><INDENT>raise httpexceptions.HTTPBadRequest(exc.message)<EOL><DEDENT>upsert_role_requests(cursor, uuid_, posted_roles)<EOL><DEDENT><DEDENT>resp = request.response<EOL>resp.status_int = <NUM_LIT><EOL>return resp<EOL>", "docstring": "Submission to create a role acceptance request.", "id": "f15220:m4"}
{"signature": "@view_config(route_name='<STR_LIT>',<EOL>permission='<STR_LIT>',<EOL>request_method='<STR_LIT>', accept='<STR_LIT:application/json>', http_cache=<NUM_LIT:0>)<EOL>def delete_license_request(request):", "body": "uuid_ = request.matchdict['<STR_LIT>']<EOL>posted_uids = [x['<STR_LIT>'] for x in request.json.get('<STR_LIT>', [])]<EOL>with db_connect() as db_conn:<EOL><INDENT>with db_conn.cursor() as cursor:<EOL><INDENT>remove_license_requests(cursor, uuid_, posted_uids)<EOL><DEDENT><DEDENT>resp = request.response<EOL>resp.status_int = <NUM_LIT:200><EOL>return resp<EOL>", "docstring": "Submission to remove a license acceptance request.", "id": "f15220:m2"}
{"signature": "@with_db_cursor<EOL>def remove_baked(binder_ident_hash, cursor):", "body": "<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (binder_ident_hash,))<EOL>cursor.execute(\"\"\"<STR_LIT>\"\"\", (binder_ident_hash,))<EOL>", "docstring": "Given a binder's ident_hash, remove the baked results.", "id": "f15221:m3"}
{"signature": "@with_db_cursor<EOL>def bake(binder, recipe_id, publisher, message, cursor):", "body": "recipe = _get_recipe(recipe_id, cursor)<EOL>includes = _formatter_callback_factory()<EOL>binder = collate_models(binder, ruleset=recipe, includes=includes)<EOL>def flatten_filter(model):<EOL><INDENT>return (isinstance(model, cnxepub.CompositeDocument) or<EOL>(isinstance(model, cnxepub.Binder) and<EOL>model.metadata.get('<STR_LIT:type>') == '<STR_LIT>'))<EOL><DEDENT>def only_documents_filter(model):<EOL><INDENT>return isinstance(model, cnxepub.Document)and not isinstance(model, cnxepub.CompositeDocument)<EOL><DEDENT>for doc in cnxepub.flatten_to(binder, flatten_filter):<EOL><INDENT>publish_composite_model(cursor, doc, binder, publisher, message)<EOL><DEDENT>for doc in cnxepub.flatten_to(binder, only_documents_filter):<EOL><INDENT>publish_collated_document(cursor, doc, binder)<EOL><DEDENT>tree = cnxepub.model_to_tree(binder)<EOL>publish_collated_tree(cursor, tree)<EOL>return []<EOL>", "docstring": "Given a `Binder` as `binder`, bake the contents and\n    persist those changes alongside the published content.", "id": "f15221:m2"}
{"signature": "def _discover_requesting_party(self, request):", "body": "user_id = None<EOL>api_key = request.headers.get('<STR_LIT>', None)<EOL>try:<EOL><INDENT>principal_info = self.user_info_by_key[api_key]<EOL><DEDENT>except KeyError:<EOL><INDENT>principal_info = None<EOL><DEDENT>if principal_info is not None:<EOL><INDENT>user_id = principal_info['<STR_LIT>']<EOL><DEDENT>return api_key, user_id, principal_info<EOL>", "docstring": "With the request object, discover who is making the request.\n        Returns both the api-key and the principal-id", "id": "f15222:c0:m1"}
{"signature": "@cache_manager.cache(expire=<NUM_LIT> * <NUM_LIT> * <NUM_LIT>)  <EOL>def lookup_api_key_info():", "body": "info = {}<EOL>with db_connect() as conn:<EOL><INDENT>with conn.cursor() as cursor:<EOL><INDENT>cursor.execute(ALL_KEY_INFO_SQL_STMT)<EOL>for row in cursor.fetchall():<EOL><INDENT>id, key, name, groups = row<EOL>user_id = \"<STR_LIT>\".format(id)<EOL>info[key] = dict(id=id, user_id=user_id,<EOL>name=name, groups=groups)<EOL><DEDENT><DEDENT><DEDENT>return info<EOL>", "docstring": "Given a dbapi cursor, lookup all the api keys and their information.", "id": "f15222:m0"}
{"signature": "def effective_principals(self, request):", "body": "api_key, user_id, info = self._discover_requesting_party(request)<EOL>if api_key is None or user_id is None:<EOL><INDENT>return []<EOL><DEDENT>try:<EOL><INDENT>principals = list(info['<STR_LIT>'])<EOL><DEDENT>except TypeError:<EOL><INDENT>principals = []<EOL><DEDENT>principals.append(security.Everyone)<EOL>principals.append(security.Authenticated)<EOL>return principals<EOL>", "docstring": "Return a sequence representing the effective principals\n        including the userid and any groups belonged to by the current\n        user, including 'system' groups such as Everyone and\n        Authenticated.", "id": "f15222:c0:m3"}
{"signature": "def run(self):", "body": "if self.args.roster_cache and os.path.exists(self.args.roster_cache):<EOL><INDENT>logging.info(\"<STR_LIT>\"<EOL>.format(self.args.roster_cache))<EOL>try:<EOL><INDENT>self.client.roster_client.load_roster(self.args.roster_cache)<EOL><DEDENT>except (IOError, ValueError) as err:<EOL><INDENT>logging.error(\"<STR_LIT>\".format(err))<EOL><DEDENT><DEDENT>self.client.connect()<EOL>self.client.run()<EOL>", "docstring": "Request client connection and start the main loop.", "id": "f15227:c0:m1"}
{"signature": "def disconnect(self):", "body": "self.client.disconnect()<EOL>self.client.run(timeout = <NUM_LIT:2>)<EOL>", "docstring": "Request disconnection and let the main loop run for a 2 more\n        seconds for graceful disconnection.", "id": "f15227:c0:m2"}
{"signature": "def main():", "body": "parser = argparse.ArgumentParser(description = '<STR_LIT>',<EOL>parents = [XMPPSettings.get_arg_parser()])<EOL>parser.add_argument('<STR_LIT:source>', metavar = '<STR_LIT>', <EOL>help = '<STR_LIT>')<EOL>parser.add_argument('<STR_LIT:target>', metavar = '<STR_LIT>', nargs = '<STR_LIT:?>',<EOL>help = '<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>',<EOL>action = '<STR_LIT>', dest = '<STR_LIT>',<EOL>const = logging.DEBUG, default = logging.INFO,<EOL>help = '<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', const = logging.ERROR,<EOL>action = '<STR_LIT>', dest = '<STR_LIT>',<EOL>help = '<STR_LIT>')<EOL>args = parser.parse_args()<EOL>settings = XMPPSettings()<EOL>settings.load_arguments(args)<EOL>if settings.get(\"<STR_LIT:password>\") is None:<EOL><INDENT>password = getpass(\"<STR_LIT>\".format(args.source))<EOL>if sys.version_info.major < <NUM_LIT:3>:<EOL><INDENT>password = password.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>settings[\"<STR_LIT:password>\"] = password<EOL><DEDENT>if sys.version_info.major < <NUM_LIT:3>:<EOL><INDENT>args.source = args.source.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>source = JID(args.source)<EOL>if args.target:<EOL><INDENT>if sys.version_info.major < <NUM_LIT:3>:<EOL><INDENT>args.target = args.target.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>target = JID(args.target)<EOL><DEDENT>else:<EOL><INDENT>target = JID(source.domain)<EOL><DEDENT>logging.basicConfig(level = args.log_level)<EOL>checker = VersionChecker(source, target, settings)<EOL>try:<EOL><INDENT>checker.run()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>checker.disconnect()<EOL><DEDENT>", "docstring": "Parse the command-line arguments and run the tool.", "id": "f15229:m0"}
{"signature": "@event_handler(DisconnectedEvent)<EOL><INDENT>def handle_disconnected(self, event):<DEDENT>", "body": "return QUIT<EOL>", "docstring": "Quit the main loop upon disconnection.", "id": "f15229:c0:m6"}
{"signature": "def disconnect(self):", "body": "self.client.disconnect()<EOL>self.client.run(timeout = <NUM_LIT:2>)<EOL>", "docstring": "Request disconnection and let the main loop run for a 2 more\n        seconds for graceful disconnection.", "id": "f15229:c0:m2"}
{"signature": "@event_handler(AuthorizedEvent)<EOL><INDENT>def handle_authorized(self, event):<DEDENT>", "body": "request_software_version(self.client, self.target_jid,<EOL>self.success, self.failure)<EOL>", "docstring": "Send the initial presence after log-in.", "id": "f15229:c0:m3"}
{"signature": "def disconnect(self):", "body": "self.client.disconnect()<EOL>self.client.run(timeout = <NUM_LIT:2>)<EOL>", "docstring": "Request disconnection and let the main loop run for a 2 more\n        seconds for graceful disconnection.", "id": "f15230:c0:m2"}
{"signature": "@event_handler()<EOL><INDENT>def handle_all(self, event):<DEDENT>", "body": "logging.info(\"<STR_LIT>\".format(event))<EOL>", "docstring": "Log all events.", "id": "f15230:c0:m9"}
{"signature": "def main():", "body": "parser = argparse.ArgumentParser(description = '<STR_LIT>',<EOL>parents = [XMPPSettings.get_arg_parser()])<EOL>parser.add_argument('<STR_LIT>', metavar = '<STR_LIT>', <EOL>help = '<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>',<EOL>action = '<STR_LIT>', dest = '<STR_LIT>',<EOL>const = logging.DEBUG, default = logging.INFO,<EOL>help = '<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', const = logging.ERROR,<EOL>action = '<STR_LIT>', dest = '<STR_LIT>',<EOL>help = '<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', action = '<STR_LIT:store_true>',<EOL>help = '<STR_LIT>')<EOL>args = parser.parse_args()<EOL>settings = XMPPSettings({<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>})<EOL>settings.load_arguments(args)<EOL>if settings.get(\"<STR_LIT:password>\") is None:<EOL><INDENT>password = getpass(\"<STR_LIT>\".format(args.jid))<EOL>if sys.version_info.major < <NUM_LIT:3>:<EOL><INDENT>password = password.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>settings[\"<STR_LIT:password>\"] = password<EOL><DEDENT>if sys.version_info.major < <NUM_LIT:3>:<EOL><INDENT>args.jid = args.jid.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>logging.basicConfig(level = args.log_level)<EOL>if args.trace:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>handler = logging.StreamHandler()<EOL>handler.setLevel(logging.DEBUG)<EOL>for logger in (\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>logger = logging.getLogger(logger)<EOL>logger.setLevel(logging.DEBUG)<EOL>logger.addHandler(handler)<EOL>logger.propagate = False<EOL><DEDENT><DEDENT>bot = EchoBot(JID(args.jid), settings)<EOL>try:<EOL><INDENT>bot.run()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>bot.disconnect()<EOL><DEDENT>", "docstring": "Parse the command-line arguments and run the bot.", "id": "f15230:m0"}
{"signature": "@event_handler(DisconnectedEvent)<EOL><INDENT>def handle_disconnected(self, event):<DEDENT>", "body": "return QUIT<EOL>", "docstring": "Quit the main loop upon disconnection.", "id": "f15230:c0:m8"}
{"signature": "def _bind_success(self, stanza):", "body": "<EOL>payload = stanza.get_payload(ResourceBindingPayload)<EOL>jid = payload.jid<EOL>if not jid:<EOL><INDENT>raise BadRequestProtocolError(u\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>self.stream.me = jid<EOL>self.stream.event(AuthorizedEvent(self.stream.me))<EOL>", "docstring": "Handle resource binding success.\n\n        [initiating entity only]\n\n        :Parameters:\n            - `stanza`: <iq type=\"result\"/> stanza received.\n\n        Set `streambase.StreamBase.me` to the full JID negotiated.", "id": "f15231:c1:m4"}
{"signature": "def handle_stream_features(self, stream, features):", "body": "logger.debug(u\"<STR_LIT>\".format(<EOL>element_to_unicode(features)))<EOL>element = features.find(FEATURE_BIND)<EOL>if element is None:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>resource = stream.settings[\"<STR_LIT>\"]<EOL>self.bind(stream, resource)<EOL>return StreamFeatureHandled(\"<STR_LIT>\", mandatory = True)<EOL>", "docstring": "Process incoming <stream:features/> element.\n\n        [initiating entity only]\n\n        The received features element is available in `features`.", "id": "f15231:c1:m2"}
{"signature": "def make_stream_features(self, stream, features):", "body": "self.stream = stream<EOL>if stream.peer_authenticated and not stream.peer.resource:<EOL><INDENT>ElementTree.SubElement(features, FEATURE_BIND)<EOL><DEDENT>", "docstring": "Add resource binding feature to the <features/> element of the\n        stream.\n\n        [receving entity only]\n\n        :returns: update <features/> element.", "id": "f15231:c1:m1"}
{"signature": "def finish(self, data):", "body": "return Success({\"<STR_LIT>\": self.authzid})<EOL>", "docstring": "Handle authentication success information from the server.\n\n        :Parameters:\n            - `data`: the optional additional data returned with the success.\n        :Types:\n            - `data`: `bytes`\n\n        :return: a success indicator.\n        :returntype: `Success`", "id": "f15232:c0:m3"}
{"signature": "def filter_mechanism_list(mechanisms, properties, allow_insecure = False,<EOL>server_side = False):", "body": "<EOL>result = []<EOL>for mechanism in mechanisms:<EOL><INDENT>try:<EOL><INDENT>if server_side:<EOL><INDENT>klass = SERVER_MECHANISMS_D[mechanism]<EOL><DEDENT>else:<EOL><INDENT>klass = CLIENT_MECHANISMS_D[mechanism]<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(mechanism))<EOL>continue<EOL><DEDENT>secure = properties.get(\"<STR_LIT>\")<EOL>if not allow_insecure and not klass._pyxmpp_sasl_secure and not secure:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(mechanism))<EOL>continue<EOL><DEDENT>if not klass.are_properties_sufficient(properties):<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(mechanism))<EOL>continue<EOL><DEDENT>result.append(mechanism)<EOL><DEDENT>return result<EOL>", "docstring": "Filter a mechanisms list only to include those mechanisms that cans\n    succeed with the provided properties and are secure enough.\n\n    :Parameters:\n        - `mechanisms`: list of the mechanisms names\n        - `properties`: available authentication properties\n        - `allow_insecure`: allow insecure mechanisms\n    :Types:\n        - `mechanisms`: sequence of `unicode`\n        - `properties`: mapping\n        - `allow_insecure`: `bool`\n\n    :returntype: `list` of `unicode`", "id": "f15234:m2"}
{"signature": "def server_authenticator_factory(mechanism, password_database):", "body": "authenticator = SERVER_MECHANISMS_D[mechanism]<EOL>return authenticator(password_database)<EOL>", "docstring": "Create a server authenticator object for given SASL mechanism and\n    password databaser.\n\n    :Parameters:\n        - `mechanism`: name of the SASL mechanism (\"PLAIN\", \"DIGEST-MD5\" or \"GSSAPI\").\n        - `password_database`: name of the password database object to be used\n          for authentication credentials verification.\n    :Types:\n        - `mechanism`: `str`\n        - `password_database`: `PasswordDatabase`\n\n    :raises `KeyError`: if no server authenticator is available for this\n              mechanism\n\n    :return: new authenticator.\n    :returntype: `sasl.core.ServerAuthenticator`", "id": "f15234:m1"}
{"signature": "def client_authenticator_factory(mechanism):", "body": "authenticator = CLIENT_MECHANISMS_D[mechanism]<EOL>return authenticator()<EOL>", "docstring": "Create a client authenticator object for given SASL mechanism.\n\n    :Parameters:\n        - `mechanism`: name of the SASL mechanism (\"PLAIN\", \"DIGEST-MD5\" or\n          \"GSSAPI\").\n    :Types:\n        - `mechanism`: `unicode`\n\n    :raises `KeyError`: if no client authenticator is available for this\n              mechanism\n\n    :return: new authenticator.\n    :returntype: `sasl.core.ClientAuthenticator`", "id": "f15234:m0"}
{"signature": "def encode(self):", "body": "if self.data is None:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>elif not self.data:<EOL><INDENT>return \"<STR_LIT:=>\"<EOL><DEDENT>else:<EOL><INDENT>ret = standard_b64encode(self.data)<EOL>return ret.decode(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Base64-encode the data contained in the reply when appropriate.\n\n        :return: encoded data.\n        :returntype: `unicode`", "id": "f15236:c1:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def start(self, properties):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Start the authentication process.\n\n        :Parameters:\n            - `properties`: the `authentication properties`_\n        :Types:\n            - `properties`: mapping\n\n        :return: the initial response to send to the server or a failuer\n            indicator.\n        :returntype: `Response` or `Failure`", "id": "f15236:c6:m2"}
{"signature": "def get_password(self, username, acceptable_formats, properties):", "body": "<EOL>return None, None<EOL>", "docstring": "Get the password for user authentication.\n\n        By default returns (None, None) providing no password. Should be\n        overridden in derived classes unless only `check_password` functionality\n        is available.\n\n        :Parameters:\n            - `username`: the username for which the password is requested.\n            - `acceptable_formats`: a sequence of acceptable formats of the\n              password data. Could be \"plain\" (plain text password),\n              \"md5:user:realm:password\" (MD5 hex digest of user:realm:password)\n              or any other mechanism-specific encoding. This allows\n              non-plain-text storage of passwords. But only \"plain\" format will\n              work with all password authentication mechanisms.\n            - `properties`: mapping with authentication properties (those\n              provided to the authenticator's ``start()`` method plus some\n              already obtained via the mechanism).\n        :Types:\n            - `username`: `unicode`\n            - `acceptable_formats`: sequence of `unicode`\n            - `properties`: mapping\n\n        :return: the password and its encoding (format).\n        :returntype: `unicode`,`unicode` tuple.", "id": "f15236:c0:m0"}
{"signature": "def __init__(self):", "body": "pass<EOL>", "docstring": "Initialize a `ClientAuthenticator` object.", "id": "f15236:c6:m0"}
{"signature": "def __init__(self, data):", "body": "Reply.__init__(self, data)<EOL>", "docstring": "Initialize the `Challenge` object.", "id": "f15236:c2:m0"}
{"signature": "def _register_server_authenticator(klass, name):", "body": "<EOL>SERVER_MECHANISMS_D[name] = klass<EOL>items = sorted(SERVER_MECHANISMS_D.items(), key = _key_func, reverse = True)<EOL>SERVER_MECHANISMS[:] = [k for (k, v) in items ]<EOL>SECURE_SERVER_MECHANISMS[:] = [k for (k, v) in items<EOL>if v._pyxmpp_sasl_secure]<EOL>", "docstring": "Add a client authenticator class to `SERVER_MECHANISMS_D`,\n    `SERVER_MECHANISMS` and, optionally, to `SECURE_SERVER_MECHANISMS`", "id": "f15236:m3"}
{"signature": "def sasl_mechanism(name, secure, preference = <NUM_LIT:50>):", "body": "<EOL>def decorator(klass):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>klass._pyxmpp_sasl_secure = secure<EOL>klass._pyxmpp_sasl_preference = preference<EOL>if issubclass(klass, ClientAuthenticator):<EOL><INDENT>_register_client_authenticator(klass, name)<EOL><DEDENT>elif issubclass(klass, ServerAuthenticator):<EOL><INDENT>_register_server_authenticator(klass, name)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>return klass<EOL><DEDENT>return decorator<EOL>", "docstring": "Class decorator generator for `ClientAuthenticator` or\n    `ServerAuthenticator` subclasses. Adds the class to the pyxmpp.sasl\n    mechanism registry.\n\n    :Parameters:\n        - `name`: SASL mechanism name\n        - `secure`: if the mechanims can be considered secure - `True`\n          if it can be used over plain-text channel\n        - `preference`: mechanism preference level (the higher the better)\n    :Types:\n        - `name`: `unicode`\n        - `secure`: `bool`\n        - `preference`: `int`", "id": "f15236:m4"}
{"signature": "def _register_client_authenticator(klass, name):", "body": "<EOL>CLIENT_MECHANISMS_D[name] = klass<EOL>items = sorted(CLIENT_MECHANISMS_D.items(), key = _key_func, reverse = True)<EOL>CLIENT_MECHANISMS[:] = [k for (k, v) in items ]<EOL>SECURE_CLIENT_MECHANISMS[:] = [k for (k, v) in items<EOL>if v._pyxmpp_sasl_secure]<EOL>", "docstring": "Add a client authenticator class to `CLIENT_MECHANISMS_D`,\n    `CLIENT_MECHANISMS` and, optionally, to `SECURE_CLIENT_MECHANISMS`", "id": "f15236:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def start(self, properties, initial_response):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Start the authentication process.\n\n        :Parameters:\n            - `properties`: the `authentication properties`_\n            - `initial_response`: the initial response send by the client with\n              the authentication request.\n\n        :Types:\n            - `properties`: mapping\n            - `initial_response`: `bytes`\n\n        :return: a challenge, a success or a failure indicator.\n        :returntype: `Challenge` or `Failure` or `Success`", "id": "f15236:c7:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def challenge(self, challenge):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Process the server's challenge.\n\n        :Parameters:\n            - `challenge`: the challenge.\n        :Types:\n            - `challenge`: `bytes`\n\n        :return: the response or a failure indicator.\n        :returntype: `Response` or `Failure`", "id": "f15236:c6:m3"}
{"signature": "def challenge(self, challenge):", "body": "<EOL>if not challenge:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>if self._server_first_message:<EOL><INDENT>return self._final_challenge(challenge)<EOL><DEDENT>match = SERVER_FIRST_MESSAGE_RE.match(challenge)<EOL>if not match:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(challenge))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>self._server_first_message = challenge<EOL>mext = match.group(\"<STR_LIT>\")<EOL>if mext:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(mext))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>nonce = match.group(\"<STR_LIT>\")<EOL>if not nonce.startswith(self._c_nonce):<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>salt = match.group(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>salt = a2b_base64(salt)<EOL><DEDENT>except ValueError:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(salt))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>iteration_count = match.group(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>iteration_count = int(iteration_count)<EOL><DEDENT>except ValueError:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(iteration_count))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>return self._make_response(nonce, salt, iteration_count)<EOL>", "docstring": "Process a challenge and return the response.\n\n        :Parameters:\n            - `challenge`: the challenge from server.\n        :Types:\n            - `challenge`: `bytes`\n\n        :return: the response or a failure indicator.\n        :returntype: `sasl.Response` or `sasl.Failure`", "id": "f15238:c1:m3"}
{"signature": "def H(self, str_):", "body": "<EOL>return self.hash_factory(str_).digest()<EOL>", "docstring": "The H(str) function.", "id": "f15238:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def escape(data):<DEDENT>", "body": "return data.replace(b'<STR_LIT:=>', b'<STR_LIT>').replace(b'<STR_LIT:U+002C>', b'<STR_LIT>')<EOL>", "docstring": "Escape the ',' and '=' characters for 'a=' and 'n=' attributes.\n\n        Replaces '=' with '=3D' and ',' with '=2C'.\n\n        :Parameters:\n            - `data`: string to escape\n        :Types:\n            - `data`: `bytes`", "id": "f15238:c0:m5"}
{"signature": "def __init__(self, hash_name, channel_binding, password_database):", "body": "ServerAuthenticator.__init__(self, password_database)<EOL>SCRAMOperations.__init__(self, hash_name)<EOL>self.name = \"<STR_LIT>\".format(hash_name)<EOL>if channel_binding:<EOL><INDENT>self.name += \"<STR_LIT>\"<EOL><DEDENT>self.channel_binding = channel_binding<EOL>self.properties = None<EOL>self.out_properties = None<EOL>self._client_first_message_bare = None<EOL>self._stored_key = None<EOL>self._server_key = None<EOL>", "docstring": "Initialize a `SCRAMClientAuthenticator` object.\n\n        :Parameters:\n            - `hash_function_name`: hash function name, e.g. ``\"SHA-1\"``\n            - `channel_binding`: `True` to enable channel binding\n        :Types:\n            - `hash_function_name`: `unicode`\n            - `channel_binding`: `bool`", "id": "f15238:c2:m0"}
{"signature": "@staticmethod<EOL><INDENT>def Normalize(str_):<DEDENT>", "body": "<EOL>if isinstance(str_, bytes):<EOL><INDENT>str_ = str_.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>return SASLPREP.prepare(str_).encode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "The Normalize(str) function.\n\n        This one also accepts Unicode string input (in the RFC only UTF-8\n        strings are used).", "id": "f15238:c0:m1"}
{"signature": "def _make_response(self, nonce, salt, iteration_count):", "body": "self._salted_password = self.Hi(self.Normalize(self.password), salt,<EOL>iteration_count)<EOL>self.password = None <EOL>if self.channel_binding:<EOL><INDENT>channel_binding = b\"<STR_LIT>\" + standard_b64encode(self._gs2_header +<EOL>self._cb_data)<EOL><DEDENT>else:<EOL><INDENT>channel_binding = b\"<STR_LIT>\" + standard_b64encode(self._gs2_header)<EOL><DEDENT>client_final_message_without_proof = (channel_binding + b\"<STR_LIT>\" + nonce)<EOL>client_key = self.HMAC(self._salted_password, b\"<STR_LIT>\")<EOL>stored_key = self.H(client_key)<EOL>auth_message = ( self._client_first_message_bare + b\"<STR_LIT:U+002C>\" +<EOL>self._server_first_message + b\"<STR_LIT:U+002C>\" +<EOL>client_final_message_without_proof )<EOL>self._auth_message = auth_message<EOL>client_signature = self.HMAC(stored_key, auth_message)<EOL>client_proof = self.XOR(client_key, client_signature)<EOL>proof = b\"<STR_LIT>\" + standard_b64encode(client_proof)<EOL>client_final_message = (client_final_message_without_proof + b\"<STR_LIT:U+002C>\" +<EOL>proof)<EOL>return Response(client_final_message)<EOL>", "docstring": "Make a response for the first challenge from the server.\n\n        :return: the response or a failure indicator.\n        :returntype: `sasl.Response` or `sasl.Failure`", "id": "f15238:c1:m4"}
{"signature": "def _check_params(self, username, realm, cnonce, digest_uri, response_val,<EOL>authzid, nonce_count):", "body": "<EOL>if not cnonce:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>if not response_val:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>if not username:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>if not digest_uri:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>if not nonce_count:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>return self._make_final_challenge(username, realm, cnonce, digest_uri,<EOL>response_val, authzid, nonce_count)<EOL>", "docstring": "Check parameters of a client reponse and pass them to further\n        processing.\n\n        :Parameters:\n            - `username`: user name.\n            - `realm`: realm.\n            - `cnonce`: cnonce value.\n            - `digest_uri`: digest-uri value.\n            - `response_val`: response value computed by the client.\n            - `authzid`: authorization id.\n            - `nonce_count`: nonce count value.\n        :Types:\n            - `username`: `bytes`\n            - `realm`: `bytes`\n            - `cnonce`: `bytes`\n            - `digest_uri`: `bytes`\n            - `response_val`: `bytes`\n            - `authzid`: `bytes`\n            - `nonce_count`: `bytes`\n\n        :return: a challenge, a success indicator or a failure indicator.\n        :returntype: `sasl.Challenge`, `sasl.Success` or `sasl.Failure`", "id": "f15240:c1:m4"}
{"signature": "def response(self, response):", "body": "if self.out_properties:<EOL><INDENT>return Success(self.out_properties)<EOL><DEDENT>if not response:<EOL><INDENT>return Failure(\"<STR_LIT>\")<EOL><DEDENT>return self._parse_response(response)<EOL>", "docstring": "Process a client reponse.\n\n        :Parameters:\n            - `response`: the response from the client.\n        :Types:\n            - `response`: `bytes`\n\n        :return: a challenge, a success indicator or a failure indicator.\n        :returntype: `sasl.Challenge`, `sasl.Success` or `sasl.Failure`", "id": "f15240:c1:m2"}
{"signature": "def _make_final_challenge(self, username, realm, cnonce, digest_uri,<EOL>response_val, authzid, nonce_count):", "body": "<EOL>username_uq = username.replace(b'<STR_LIT:\\\\>', b'<STR_LIT>')<EOL>if authzid:<EOL><INDENT>authzid_uq = authzid.replace(b'<STR_LIT:\\\\>', b'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>authzid_uq = None<EOL><DEDENT>if realm:<EOL><INDENT>realm_uq = realm.replace(b'<STR_LIT:\\\\>', b'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>realm_uq = None<EOL><DEDENT>digest_uri_uq = digest_uri.replace(b'<STR_LIT:\\\\>', b'<STR_LIT>')<EOL>props = dict(self.in_properties)<EOL>props[\"<STR_LIT>\"] = realm_uq.decode(\"<STR_LIT:utf-8>\")<EOL>password, pformat = self.password_database.get_password(<EOL>username_uq.decode(\"<STR_LIT:utf-8>\"),<EOL>(u\"<STR_LIT>\", u\"<STR_LIT>\"), props)<EOL>if pformat == u\"<STR_LIT>\":<EOL><INDENT>urp_hash = password.a2b_hex()<EOL><DEDENT>elif pformat == u\"<STR_LIT>\":<EOL><INDENT>urp_hash = _make_urp_hash(username, realm, password.encode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT>else:<EOL><INDENT>logger.debug(u\"<STR_LIT>\")<EOL>return Failure(u\"<STR_LIT>\")<EOL><DEDENT>valid_response = _compute_response(urp_hash, self.nonce, cnonce,<EOL>nonce_count, authzid, digest_uri)<EOL>if response_val != valid_response:<EOL><INDENT>logger.debug(u\"<STR_LIT>\".format(<EOL>response_val, valid_response))<EOL>return Failure(u\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>fields = digest_uri_uq.split(b\"<STR_LIT:/>\")<EOL>if len(fields) == <NUM_LIT:3>:<EOL><INDENT>serv_type, host, serv_name = [f.decode(\"<STR_LIT:utf-8>\") for f in fields]<EOL><DEDENT>elif len(fields) == <NUM_LIT:2>:<EOL><INDENT>serv_type, host = [f.decode(\"<STR_LIT:utf-8>\") for f in fields]<EOL>serv_name = None<EOL><DEDENT>else:<EOL><INDENT>raise ValueError<EOL><DEDENT><DEDENT>except (ValueError, UnicodeError):<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(digest_uri_uq))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in self.in_properties:<EOL><INDENT>if serv_type != self.in_properties[\"<STR_LIT>\"]:<EOL><INDENT>logger.debug(u\"<STR_LIT>\"<EOL>.format(serv_type, self.in_properties[\"<STR_LIT>\"]))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if \"<STR_LIT>\" in self.in_properties:<EOL><INDENT>if serv_name:<EOL><INDENT>if serv_name != self.in_properties[\"<STR_LIT>\"]:<EOL><INDENT>logger.debug(u\"<STR_LIT>\".format(serv_name,<EOL>self.in_properties[\"<STR_LIT>\"]))<EOL><DEDENT>return Failure(\"<STR_LIT>\")<EOL><DEDENT>elif (host != self.in_properties[\"<STR_LIT>\"]<EOL>and host != self.in_properties.get(\"<STR_LIT>\")):<EOL><INDENT>logger.debug(u\"<STR_LIT>\"<EOL>u\"<STR_LIT>\".format(host,<EOL>self.in_properties[\"<STR_LIT>\"],<EOL>self.in_properties.get(\"<STR_LIT>\")))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if \"<STR_LIT>\" in self.in_properties:<EOL><INDENT>if host != self.in_properties[\"<STR_LIT>\"]:<EOL><INDENT>logger.debug(u\"<STR_LIT>\".format(host,<EOL>self.in_properties[\"<STR_LIT>\"]))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>rspauth = _compute_response_auth(urp_hash, self.nonce, cnonce,<EOL>nonce_count, authzid, digest_uri)<EOL>if authzid_uq is not None:<EOL><INDENT>authzid_uq =  authzid_uq.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>self.out_properties = {<EOL>\"<STR_LIT:username>\": username.decode(\"<STR_LIT:utf-8>\"),<EOL>\"<STR_LIT>\": realm.decode(\"<STR_LIT:utf-8>\"),<EOL>\"<STR_LIT>\": authzid_uq,<EOL>\"<STR_LIT>\": serv_type,<EOL>\"<STR_LIT>\": serv_name if serv_name else host,<EOL>\"<STR_LIT>\": host<EOL>}<EOL>return Success(self.out_properties, b\"<STR_LIT>\" + rspauth)<EOL>", "docstring": "Send the second challenge in reply to the client response.\n\n        :Parameters:\n            - `username`: user name.\n            - `realm`: realm.\n            - `cnonce`: cnonce value.\n            - `digest_uri`: digest-uri value.\n            - `response_val`: response value computed by the client.\n            - `authzid`: authorization id.\n            - `nonce_count`: nonce count value.\n        :Types:\n            - `username`: `bytes`\n            - `realm`: `bytes`\n            - `cnonce`: `bytes`\n            - `digest_uri`: `bytes`\n            - `response_val`: `bytes`\n            - `authzid`: `bytes`\n            - `nonce_count`: `bytes`\n\n        :return: a challenge, a success indicator or a failure indicator.\n        :returntype: `sasl.Success` or `sasl.Failure`", "id": "f15240:c1:m5"}
{"signature": "def _quote(data):", "body": "data = data.replace(b'<STR_LIT:\\\\>', b'<STR_LIT>')<EOL>data = data.replace(b'<STR_LIT:\">', b'<STR_LIT>')<EOL>return data<EOL>", "docstring": "Prepare a string for quoting for DIGEST-MD5 challenge or response.\n\n    Don't add the quotes, only escape '\"' and \"\\\\\" with backslashes.\n\n    :Parameters:\n        - `data`: a raw string.\n    :Types:\n        - `data`: `bytes`\n\n    :return: `data` with '\"' and \"\\\\\" escaped using \"\\\\\".\n    :returntype: `bytes`", "id": "f15240:m1"}
{"signature": "def __init__(self):", "body": "ClientAuthenticator.__init__(self)<EOL>self.username = None<EOL>self.rspauth_checked = None<EOL>self.response_auth = None<EOL>self.authzid = None<EOL>self.realm = None<EOL>self.nonce_count = None<EOL>self.in_properties = None<EOL>", "docstring": "Initialize a `DigestMD5ClientAuthenticator` object.", "id": "f15240:c0:m0"}
{"signature": "def _unquote(data):", "body": "if not data.startswith(b'<STR_LIT:\">') or not data.endswith(b'<STR_LIT:\">'):<EOL><INDENT>return data<EOL><DEDENT>return QUOTE_RE.sub(b\"<STR_LIT>\", data[<NUM_LIT:1>:-<NUM_LIT:1>])<EOL>", "docstring": "Unquote quoted value from DIGEST-MD5 challenge or response.\n\n    If `data` doesn't start or doesn't end with '\"' then return it unchanged,\n    remove the quotes and escape backslashes otherwise.\n\n    :Parameters:\n        - `data`: a quoted string.\n    :Types:\n        - `data`: `bytes`\n\n    :return: the unquoted string.\n    :returntype: `bytes`", "id": "f15240:m0"}
{"signature": "def challenge(self, challenge):", "body": "<EOL>if not challenge:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>challenge = challenge.split(b'<STR_LIT:\\x00>')[<NUM_LIT:0>]<EOL>if self.response_auth:<EOL><INDENT>return self._final_challenge(challenge)<EOL><DEDENT>realms = []<EOL>nonce = None<EOL>charset = \"<STR_LIT>\"<EOL>while challenge:<EOL><INDENT>match = PARAM_RE.match(challenge)<EOL>if not match:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(challenge))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>challenge = match.group(\"<STR_LIT>\")<EOL>var = match.group(\"<STR_LIT>\")<EOL>val = match.group(\"<STR_LIT>\")<EOL>logger.debug(\"<STR_LIT>\".format(var, val))<EOL>if var == b\"<STR_LIT>\":<EOL><INDENT>realms.append(_unquote(val))<EOL><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>if nonce:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>nonce = _unquote(val)<EOL><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>qopl = _unquote(val).split(b\"<STR_LIT:U+002C>\")<EOL>if b\"<STR_LIT>\" not in qopl:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>if val != b\"<STR_LIT:utf-8>\":<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>charset = \"<STR_LIT:utf-8>\"<EOL><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>if val != b\"<STR_LIT>\":<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>if not nonce:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>return self._make_response(charset, realms, nonce)<EOL>", "docstring": "Process a challenge and return the response.\n\n        :Parameters:\n            - `challenge`: the challenge from server.\n        :Types:\n            - `challenge`: `bytes`\n\n        :return: the response or a failure indicator.\n        :returntype: `sasl.Response` or `sasl.Failure`", "id": "f15240:c0:m3"}
{"signature": "def _make_response(self, charset, realms, nonce):", "body": "<EOL>params = []<EOL>realm = self._get_realm(realms, charset)<EOL>if isinstance(realm, Failure):<EOL><INDENT>return realm<EOL><DEDENT>elif realm:<EOL><INDENT>realm = _quote(realm)<EOL>params.append(b'<STR_LIT>' + realm + b'<STR_LIT:\">')<EOL><DEDENT>try:<EOL><INDENT>username = self.username.encode(charset)<EOL><DEDENT>except UnicodeError:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(charset))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>username = _quote(username)<EOL>params.append(b'<STR_LIT>' + username + b'<STR_LIT:\">')<EOL>cnonce = self.in_properties.get(<EOL>\"<STR_LIT>\", default_nonce_factory)()<EOL>cnonce = _quote(cnonce)<EOL>params.append(b'<STR_LIT>' + cnonce + b'<STR_LIT:\">')<EOL>params.append(b'<STR_LIT>' + nonce + b'<STR_LIT:\">')<EOL>self.nonce_count += <NUM_LIT:1><EOL>nonce_count = \"<STR_LIT>\".format(self.nonce_count).encode(\"<STR_LIT>\")<EOL>params.append(b'<STR_LIT>' + nonce_count)<EOL>params.append(b'<STR_LIT>')<EOL>serv_type = self.in_properties[\"<STR_LIT>\"]<EOL>serv_type = serv_type.encode(\"<STR_LIT>\")<EOL>serv_name = self.in_properties[\"<STR_LIT>\"]<EOL>host = self.in_properties.get(\"<STR_LIT>\", serv_name)<EOL>serv_name = serv_name.encode(\"<STR_LIT>\")<EOL>host = host.encode(\"<STR_LIT>\")<EOL>if serv_name and serv_name != host:<EOL><INDENT>digest_uri = b\"<STR_LIT:/>\".join((serv_type, host, serv_name))<EOL><DEDENT>else:<EOL><INDENT>digest_uri = b\"<STR_LIT:/>\".join((serv_type, host))<EOL><DEDENT>digest_uri = _quote(digest_uri)<EOL>params.append(b'<STR_LIT>' + digest_uri + b'<STR_LIT:\">')<EOL>if self.authzid:<EOL><INDENT>try:<EOL><INDENT>authzid = self.authzid.encode(charset)<EOL><DEDENT>except UnicodeError:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(charset))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>authzid = _quote(authzid)<EOL><DEDENT>else:<EOL><INDENT>authzid = b\"<STR_LIT>\"<EOL><DEDENT>try:<EOL><INDENT>epasswd = self.in_properties[\"<STR_LIT:password>\"].encode(charset)<EOL><DEDENT>except UnicodeError:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(charset))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(epasswd))<EOL>urp_hash = _make_urp_hash(username, realm, epasswd)<EOL>response = _compute_response(urp_hash, nonce, cnonce, nonce_count,<EOL>authzid, digest_uri)<EOL>self.response_auth = _compute_response_auth(urp_hash, nonce, cnonce,<EOL>nonce_count, authzid, digest_uri)<EOL>params.append(b'<STR_LIT>' + response)<EOL>if authzid:<EOL><INDENT>params.append(b'<STR_LIT>' + authzid + b'<STR_LIT:\">')<EOL><DEDENT>return Response(b\"<STR_LIT:U+002C>\".join(params))<EOL>", "docstring": "Make a response for the first challenge from the server.\n\n        :Parameters:\n            - `charset`: charset name from the challenge.\n            - `realms`: realms list from the challenge.\n            - `nonce`: nonce value from the challenge.\n        :Types:\n            - `charset`: `bytes`\n            - `realms`: `bytes`\n            - `nonce`: `bytes`\n\n        :return: the response or a failure indicator.\n        :returntype: `sasl.Response` or `sasl.Failure`", "id": "f15240:c0:m4"}
{"signature": "def _kd_value(k_val, s_val):", "body": "return _h_value(b\"<STR_LIT::>\".join((k_val, s_val)))<EOL>", "docstring": "KD function of the DIGEST-MD5 algorithm.\n\n    :Parameters:\n        - `k_val`: a byte string.\n        - `s_val`: a byte string.\n    :Types:\n        - `k_val`: `bytes`\n        - `s_val`: `bytes`\n\n    :return: MD5 sum of the strings joined with ':'.\n    :returntype: `bytes`", "id": "f15240:m3"}
{"signature": "def _parse_response(self, response):", "body": "<EOL>response = response.split(b'<STR_LIT:\\x00>')[<NUM_LIT:0>]<EOL>if self.realm:<EOL><INDENT>realm = self.realm.encode(\"<STR_LIT:utf-8>\")<EOL>realm = _quote(realm)<EOL><DEDENT>else:<EOL><INDENT>realm = None<EOL><DEDENT>username = None<EOL>cnonce = None<EOL>digest_uri = None<EOL>response_val = None<EOL>authzid = None<EOL>nonce_count = None<EOL>while response:<EOL><INDENT>match = PARAM_RE.match(response)<EOL>if not match:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(response))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>response = match.group(\"<STR_LIT>\")<EOL>var = match.group(\"<STR_LIT>\")<EOL>val = match.group(\"<STR_LIT>\")<EOL>logger.debug(\"<STR_LIT>\".format(var, val))<EOL>if var == b\"<STR_LIT>\":<EOL><INDENT>realm = val[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>if cnonce:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>cnonce = val[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>if val != b'<STR_LIT>':<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>digest_uri = val[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>authzid = val[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>elif var == b\"<STR_LIT:username>\":<EOL><INDENT>username = val[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>response_val = val<EOL><DEDENT>elif var == b\"<STR_LIT>\":<EOL><INDENT>nonce_count = val<EOL>self.last_nonce_count += <NUM_LIT:1><EOL>if int(nonce_count) != self.last_nonce_count:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(nonce_count, self.last_nonce_count))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>return self._check_params(username, realm, cnonce, digest_uri,<EOL>response_val, authzid, nonce_count)<EOL>", "docstring": "Parse a client reponse and pass to further processing.\n\n        :Parameters:\n            - `response`: the response from the client.\n        :Types:\n            - `response`: `bytes`\n\n        :return: a challenge, a success indicator or a failure indicator.\n        :returntype: `sasl.Challenge`, `sasl.Success` or `sasl.Failure`", "id": "f15240:c1:m3"}
{"signature": "def finish(self, data):", "body": "if not self.response_auth:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT>if self.rspauth_checked:<EOL><INDENT>properties = {<EOL>\"<STR_LIT:username>\": self.username,<EOL>\"<STR_LIT>\": self.realm,<EOL>\"<STR_LIT>\": self.authzid<EOL>}<EOL>return Success(properties)<EOL><DEDENT>else:<EOL><INDENT>ret = self._final_challenge(data)<EOL>if isinstance(ret, Failure):<EOL><INDENT>return ret<EOL><DEDENT>if self.rspauth_checked:<EOL><INDENT>properties = {<EOL>\"<STR_LIT:username>\": self.username,<EOL>\"<STR_LIT>\": self.realm,<EOL>\"<STR_LIT>\": self.authzid<EOL>}<EOL>return Success(properties)<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Process success indicator from the server.\n\n        Process any addiitional data passed with the success.\n        Fail if the server was not authenticated.\n\n        :Parameters:\n            - `data`: an optional additional data with success.\n        :Types:\n            - `data`: `bytes`\n\n        :return: success or failure indicator.\n        :returntype: `sasl.Success` or `sasl.Failure`", "id": "f15240:c0:m7"}
{"signature": "def _get_realm(self, realms, charset):", "body": "if realms:<EOL><INDENT>realm = realms[<NUM_LIT:0>]<EOL>ap_realms = self.in_properties.get(\"<STR_LIT>\")<EOL>if ap_realms is not None:<EOL><INDENT>realms = (unicode(r, charset) for r in realms)<EOL>for ap_realm in ap_realms:<EOL><INDENT>if ap_realm in realms:<EOL><INDENT>realm = ap_realm<EOL>break<EOL><DEDENT><DEDENT><DEDENT>realm = realm.decode(charset)<EOL><DEDENT>else:<EOL><INDENT>realm = self.in_properties.get(\"<STR_LIT>\")<EOL><DEDENT>if realm is not None:<EOL><INDENT>self.realm = realm<EOL>try:<EOL><INDENT>realm = realm.encode(charset)<EOL><DEDENT>except UnicodeError:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(charset))<EOL>return Failure(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return realm<EOL>", "docstring": "Choose a realm from the list specified by the server.\n\n        :Parameters:\n            - `realms`: the realm list.\n            - `charset`: encoding of realms on the list.\n        :Types:\n            - `realms`: `list` of `bytes`\n            - `charset`: `bytes`\n\n        :return: the realm chosen or a failure indicator.\n        :returntype: `bytes` or `Failure`", "id": "f15240:c0:m5"}
{"signature": "def _h_value(data):", "body": "<EOL>return hashlib.md5(data).digest()<EOL>", "docstring": "H function of the DIGEST-MD5 algorithm (MD5 sum).\n\n    :Parameters:\n        - `data`: a byte string.\n    :Types:\n        - `data`: `bytes`\n\n    :return: MD5 sum of the string.\n    :returntype: `bytes`", "id": "f15240:m2"}
{"signature": "def _try_backup_item(self):", "body": "if not self._backup_state:<EOL><INDENT>return False<EOL><DEDENT>item = self.cache.get_item(self.address, self._backup_state)<EOL>if item:<EOL><INDENT>self._object_handler(item.address, item.value, item.state)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>False<EOL><DEDENT>", "docstring": "Check if a backup item is available in cache and call\n        the item handler if it is.\n\n        :return: `True` if backup item was found.\n        :returntype: `bool`", "id": "f15241:c1:m7"}
{"signature": "def unregister_fetcher(self, object_class):", "body": "self._lock.acquire()<EOL>try:<EOL><INDENT>cache = self._caches.get(object_class)<EOL>if not cache:<EOL><INDENT>return<EOL><DEDENT>cache.set_fetcher(None)<EOL><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>", "docstring": "Unregister a fetcher class for an object class.\n\n        :Parameters:\n            - `object_class`: class retrieved by the fetcher.\n        :Types:\n            - `object_class`: `classobj`", "id": "f15241:c3:m4"}
{"signature": "def error(self, error_data):", "body": "if not self.active:<EOL><INDENT>return<EOL><DEDENT>if not self._try_backup_item():<EOL><INDENT>self._error_handler(self.address, error_data)<EOL><DEDENT>self.cache.invalidate_object(self.address)<EOL>self._deactivate()<EOL>", "docstring": "Handle a retrieval error and call apriopriate handler.\n\n        Should be called when retrieval fails.\n\n        Do nothing when the fetcher is not active any more (after\n        one of handlers was already called).\n\n        :Parameters:\n            - `error_data`: additional information about the error (e.g. `StanzaError` instance).\n        :Types:\n            - `error_data`: fetcher dependant", "id": "f15241:c1:m5"}
{"signature": "def __init__(self, max_items, default_freshness_period = _hour,<EOL>default_expiration_period = <NUM_LIT:12>*_hour, default_purge_period = <NUM_LIT>*_hour):", "body": "self.default_freshness_period = default_freshness_period<EOL>self.default_expiration_period = default_expiration_period<EOL>self.default_purge_period = default_purge_period<EOL>self.max_items = max_items<EOL>self._items = {}<EOL>self._items_list = []<EOL>self._fetcher = None<EOL>self._active_fetchers = []<EOL>self._purged = <NUM_LIT:0><EOL>self._lock = threading.RLock()<EOL>", "docstring": "Initialize a `Cache` object.\n\n            :Parameters:\n                - `default_freshness_period`: default freshness period (in seconds).\n                - `default_expiration_period`: default expiration period (in seconds).\n                - `default_purge_period`: default purge period (in seconds). When\n                  0 then items are never purged because of their age.\n                - `max_items`: maximum number of items to store.\n            :Types:\n                - `default_freshness_period`: number\n                - `default_expiration_period`: number\n                - `default_purge_period`: number\n                - `max_items`: number", "id": "f15241:c2:m0"}
{"signature": "def __init__(self, max_items, default_freshness_period = _hour,<EOL>default_expiration_period = <NUM_LIT:12>*_hour, default_purge_period = <NUM_LIT>*_hour):", "body": "self.default_freshness_period = default_freshness_period<EOL>self.default_expiration_period = default_expiration_period<EOL>self.default_purge_period = default_purge_period<EOL>self.max_items = max_items<EOL>self._caches = {}<EOL>self._lock = threading.RLock()<EOL>", "docstring": "Initialize a `Cache` object.\n\n            :Parameters:\n                - `default_freshness_period`: default freshness period (in seconds).\n                - `default_expiration_period`: default expiration period (in seconds).\n                - `default_purge_period`: default purge period (in seconds). When\n                  0 then items are never purged because of their age.\n                - `max_items`: maximum number of items to store.\n            :Types:\n                - `default_freshness_period`: number\n                - `default_expiration_period`: number\n                - `default_purge_period`: number\n                - `max_items`: number", "id": "f15241:c3:m0"}
{"signature": "def _deactivate(self):", "body": "self.cache.remove_fetcher(self)<EOL>if self.active:<EOL><INDENT>self._deactivated()<EOL><DEDENT>", "docstring": "Remove the fetcher from cache and mark it not active.", "id": "f15241:c1:m1"}
{"signature": "def add_item(self, item):", "body": "self._lock.acquire()<EOL>try:<EOL><INDENT>state = item.update_state()<EOL>if state != '<STR_LIT>':<EOL><INDENT>if len(self._items_list) >= self.max_items:<EOL><INDENT>self.purge_items()<EOL><DEDENT>self._items[item.address] = item<EOL>self._items_list.append(item)<EOL>self._items_list.sort()<EOL><DEDENT>return item.state<EOL><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>", "docstring": "Add an item to the cache.\n\n        Item state is updated before adding it (it will not be 'new' any more).\n\n        :Parameters:\n            - `item`: the item to add.\n        :Types:\n            - `item`: `CacheItem`\n\n        :return: state of the item after addition.\n        :returntype: `str`", "id": "f15241:c2:m3"}
{"signature": "def request_object(self, object_class, address, state, object_handler,<EOL>error_handler = None, timeout_handler = None,<EOL>backup_state = None, timeout = None,<EOL>freshness_period = None, expiration_period = None, purge_period = None):", "body": "self._lock.acquire()<EOL>try:<EOL><INDENT>if object_class not in self._caches:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % (object_class,))<EOL><DEDENT>self._caches[object_class].request_object(address, state, object_handler,<EOL>error_handler, timeout_handler, backup_state, timeout,<EOL>freshness_period, expiration_period, purge_period)<EOL><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT>", "docstring": "Request an object of given class, with given address and state not\n        worse than `state`. The object will be taken from cache if available,\n        and created/fetched otherwise. The request is asynchronous -- this\n        metod doesn't return the object directly, but the `object_handler` is\n        called as soon as the object is available (this may be before\n        `request_object` returns and may happen in other thread). On error the\n        `error_handler` will be called, and on timeout -- the\n        `timeout_handler`.\n\n        :Parameters:\n            - `object_class`: class (type) of the object requested.\n            - `address`: address of the object requested.\n            - `state`: the worst acceptable object state. When 'new' then always\n              a new object will be created/fetched. 'stale' will select any\n              item available in cache.\n            - `object_handler`: function to be called when object is available.\n              It will be called with the following arguments: address, object\n              and its state.\n            - `error_handler`: function to be called on object retrieval error.\n              It will be called with two arguments: requested address and\n              additional error information (fetcher-specific, may be\n              StanzaError for XMPP objects).  If not given, then the object\n              handler will be called with object set to `None` and state\n              \"error\".\n            - `timeout_handler`: function to be called on object retrieval\n              timeout.  It will be called with only one argument: the requested\n              address. If not given, then the `error_handler` will be called\n              instead, with error details set to `None`.\n            - `backup_state`: when set and object in state `state` is not\n              available in the cache and object retrieval failed then object\n              with this state will also be looked-up in the cache and provided\n              if available.\n            - `timeout`: time interval after which retrieval of the object\n              should be given up.\n            - `freshness_period`: time interval after which the item created\n              should become 'old'.\n            - `expiration_period`: time interval after which the item created\n              should become 'stale'.\n            - `purge_period`: time interval after which the item created\n              shuld be removed from the cache.\n        :Types:\n            - `object_class`: `classobj`\n            - `address`: any hashable\n            - `state`: \"new\", \"fresh\", \"old\" or \"stale\"\n            - `object_handler`: callable(address, value, state)\n            - `error_handler`: callable(address, error_data)\n            - `timeout_handler`: callable(address)\n            - `backup_state`: \"new\", \"fresh\", \"old\" or \"stale\"\n            - `timeout`: `timedelta`\n            - `freshness_period`: `timedelta`\n            - `expiration_period`: `timedelta`\n            - `purge_period`: `timedelta`", "id": "f15241:c3:m1"}
{"signature": "def num_items(self):", "body": "return len(self._items_list)<EOL>", "docstring": "Get the number of items in the cache.\n\n        :return: number of items.\n        :returntype: `int`", "id": "f15241:c2:m6"}
{"signature": "def _deactivated(self):", "body": "self.active = False<EOL>", "docstring": "Mark the fetcher inactive after it is removed from the cache.", "id": "f15241:c1:m2"}
{"signature": "def as_unicode(self):", "body": "result = self.domain<EOL>if self.local:<EOL><INDENT>result = self.local + '<STR_LIT:@>' + result<EOL><DEDENT>if self.resource:<EOL><INDENT>result = result + '<STR_LIT:/>' + self.resource<EOL><DEDENT>if result not in JID.cache:<EOL><INDENT>JID.cache[result] = self<EOL><DEDENT>return result<EOL>", "docstring": "Unicode string JID representation.\n\n        :return: JID as Unicode string.", "id": "f15242:c0:m11"}
{"signature": "@staticmethod<EOL><INDENT>def __prepare_local(data):<DEDENT>", "body": "if not data:<EOL><INDENT>return None<EOL><DEDENT>data = str(data)<EOL>try:<EOL><INDENT>local = NODEPREP.prepare(data)<EOL><DEDENT>except StringprepError as err:<EOL><INDENT>raise JIDError(\"<STR_LIT>\".format(err))<EOL><DEDENT>if len(local.encode(\"<STR_LIT:utf-8>\")) > <NUM_LIT>:<EOL><INDENT>raise JIDError(\"<STR_LIT>\")<EOL><DEDENT>return local<EOL>", "docstring": "Prepare localpart of the JID\n\n        :Parameters:\n            - `data`: localpart of the JID\n        :Types:\n            - `data`: `unicode`\n\n        :raise JIDError: if the local name is too long.\n        :raise pyxmpp.xmppstringprep.StringprepError: if the\n            local name fails Nodeprep preparation.", "id": "f15242:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def __prepare_resource(data):<DEDENT>", "body": "if not data:<EOL><INDENT>return None<EOL><DEDENT>data = str(data)<EOL>try:<EOL><INDENT>resource = RESOURCEPREP.prepare(data)<EOL><DEDENT>except StringprepError as err:<EOL><INDENT>raise JIDError(\"<STR_LIT>\".format(err))<EOL><DEDENT>if len(resource.encode(\"<STR_LIT:utf-8>\")) > <NUM_LIT>:<EOL><INDENT>raise JIDError(\"<STR_LIT>\")<EOL><DEDENT>return resource<EOL>", "docstring": "Prepare the resourcepart of the JID.\n\n        :Parameters:\n            - `data`: Resourcepart of the JID\n\n        :raise JIDError: if the resource name is too long.\n        :raise pyxmpp.xmppstringprep.StringprepError: if the\n            resourcepart fails Resourceprep preparation.", "id": "f15242:c0:m6"}
{"signature": "def as_string(self):", "body": "warnings.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\", DeprecationWarning, stacklevel=<NUM_LIT:1>)<EOL>return self.as_utf8()<EOL>", "docstring": "UTF-8 encoded JID representation.\n\n        *Deprecated* Always use Unicode objects, or `as_utf8` if you really want.\n\n        :return: UTF-8 encoded JID.", "id": "f15242:c0:m10"}
{"signature": "def are_domains_equal(domain1, domain2):", "body": "domain1 = domain1.encode(\"<STR_LIT>\")<EOL>domain2 = domain2.encode(\"<STR_LIT>\")<EOL>return domain1.lower() == domain2.lower()<EOL>", "docstring": "Compare two International Domain Names.\n\n    :Parameters:\n        - `domain1`: domains name to compare\n        - `domain2`: domains name to compare\n    :Types:\n        - `domain1`: `unicode`\n        - `domain2`: `unicode`\n\n    :return: True `domain1` and `domain2` are equal as domain names.", "id": "f15242:m0"}
{"signature": "def _expire_item(self, key):", "body": "(timeout, callback) = self._timeouts[key]<EOL>now = time.time()<EOL>if timeout <= now:<EOL><INDENT>item = dict.pop(self, key)<EOL>del self._timeouts[key]<EOL>if callback:<EOL><INDENT>try:<EOL><INDENT>callback(key, item)<EOL><DEDENT>except TypeError:<EOL><INDENT>try:<EOL><INDENT>callback(key)<EOL><DEDENT>except TypeError:<EOL><INDENT>callback()<EOL><DEDENT><DEDENT><DEDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return timeout - now<EOL><DEDENT>", "docstring": "Do the expiration of a dictionary item.\n\n        Remove the item if it has expired by now.\n\n        :Parameters:\n            - `key`: key to the object.\n        :Types:\n            - `key`: any hashable value", "id": "f15243:c0:m8"}
{"signature": "def __init__(self, default_timeout = <NUM_LIT>):", "body": "dict.__init__(self)<EOL>self._timeouts = {}<EOL>self._default_timeout = default_timeout<EOL>self._lock = threading.RLock()<EOL>", "docstring": "Initialize an `ExpiringDictionary` object.\n\n        :Parameters:\n            - `default_timeout`: default timeout value (in seconds) for stored\n              objects.\n        :Types:\n            - `default_timeout`: `float`", "id": "f15243:c0:m0"}
{"signature": "def _run(self, thread_n):", "body": "try:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(self, thread_n))<EOL>resolver = self._make_resolver()<EOL>while True:<EOL><INDENT>request = self.queue.get()<EOL>if request is None:<EOL><INDENT>break<EOL><DEDENT>method, args = request<EOL>logger.debug(\"<STR_LIT>\"<EOL>.format(resolver, method, args))<EOL>getattr(resolver, method)(*args) <EOL>self.queue.task_done()<EOL><DEDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(self, thread_n))<EOL><DEDENT>finally:<EOL><INDENT>self.threads.remove(threading.currentThread())<EOL><DEDENT>", "docstring": "The thread function.", "id": "f15244:c0:m6"}
{"signature": "def stop(self):", "body": "with self.lock:<EOL><INDENT>for dummy in self.threads:<EOL><INDENT>self.queue.put(None)<EOL><DEDENT><DEDENT>", "docstring": "Stop the resolver threads.", "id": "f15244:c0:m2"}
{"signature": "def reorder_srv(records):", "body": "records = list(records)<EOL>records.sort()<EOL>ret = []<EOL>tmp = []<EOL>for rrecord in records:<EOL><INDENT>if not tmp or rrecord.priority == tmp[<NUM_LIT:0>].priority:<EOL><INDENT>tmp.append(rrecord)<EOL>continue<EOL><DEDENT>ret += shuffle_srv(tmp)<EOL>tmp = [rrecord]<EOL><DEDENT>if tmp:<EOL><INDENT>ret += shuffle_srv(tmp)<EOL><DEDENT>return ret<EOL>", "docstring": "Reorder SRV records using their priorities and weights.\n\n    :Parameters:\n        - `records`: SRV records to shuffle.\n    :Types:\n        - `records`: `list` of :dns:`dns.rdtypes.IN.SRV`\n\n    :return: reordered records.\n    :returntype: `list` of :dns:`dns.rdtypes.IN.SRV`", "id": "f15244:m3"}
{"signature": "def items(self):", "body": "return self._items<EOL>", "docstring": "Return the roster items.\n\n        :Returntype: iterable of `RosterType`", "id": "f15245:c4:m8"}
{"signature": "@event_handler(AuthorizedEvent)<EOL><INDENT>def handle_authorized_event(self, event):<DEDENT>", "body": "self.server = event.authorized_jid.bare()<EOL>if \"<STR_LIT>\" in self.server_features:<EOL><INDENT>if self.roster is not None and self.roster.version is not None:<EOL><INDENT>version = self.roster.version<EOL><DEDENT>else:<EOL><INDENT>version = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>else:<EOL><INDENT>version = None<EOL><DEDENT>self.request_roster(version)<EOL>", "docstring": "Request roster upon login.", "id": "f15245:c6:m4"}
{"signature": "@classmethod<EOL><INDENT>def from_xml(cls, element):<DEDENT>", "body": "if element.tag != ITEM_TAG:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(element))<EOL><DEDENT>try:<EOL><INDENT>jid = JID(element.get(\"<STR_LIT>\"))<EOL><DEDENT>except ValueError:<EOL><INDENT>raise BadRequestProtocolError(\"<STR_LIT>\")<EOL><DEDENT>subscription = element.get(\"<STR_LIT>\")<EOL>ask = element.get(\"<STR_LIT>\")<EOL>name = element.get(\"<STR_LIT:name>\")<EOL>duplicate_group = False<EOL>groups = set()<EOL>for child in element:<EOL><INDENT>if child.tag != GROUP_TAG:<EOL><INDENT>continue<EOL><DEDENT>group = child.text<EOL>if group is None:<EOL><INDENT>group = \"<STR_LIT>\"<EOL><DEDENT>if group in groups:<EOL><INDENT>duplicate_group = True<EOL><DEDENT>else:<EOL><INDENT>groups.add(group)<EOL><DEDENT><DEDENT>approved = element.get(\"<STR_LIT>\")<EOL>if approved == \"<STR_LIT:true>\":<EOL><INDENT>approved = True<EOL><DEDENT>elif approved in (\"<STR_LIT:false>\", None):<EOL><INDENT>approved = False<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(approved))<EOL>approved = False<EOL><DEDENT>result = cls(jid, name, groups, subscription, ask, approved)<EOL>result._duplicate_group = duplicate_group<EOL>return result<EOL>", "docstring": "Make a RosterItem from an XML element.\n\n        :Parameters:\n            - `element`: the XML element\n        :Types:\n            - `element`: :etree:`ElementTree.Element`\n\n        :return: a freshly created roster item\n        :returntype: `cls`", "id": "f15245:c3:m1"}
{"signature": "def remove_item(self, jid, callback = None, error_callback = None):", "body": "item = self.roster[jid]<EOL>if jid not in self.roster:<EOL><INDENT>raise KeyError(jid)<EOL><DEDENT>item = RosterItem(jid, subscription = \"<STR_LIT>\")<EOL>self._roster_set(item, callback, error_callback)<EOL>", "docstring": "Remove a contact from the roster.\n\n        :Parameters:\n            - `jid`: contact's jid\n            - `callback`: function to call when the request succeeds. It should\n              accept a single argument - a `RosterItem` describing the\n              requested change\n            - `error_callback`: function to call when the request fails. It\n              should accept a single argument - an error stanza received\n              (`None` in case of timeout)\n        :Types:\n            - `jid`: `JID`", "id": "f15245:c6:m11"}
{"signature": "def __init__(self, items = None, version = None):", "body": "if items is not None:<EOL><INDENT>self._items = list(items)<EOL><DEDENT>else:<EOL><INDENT>self._items = []<EOL><DEDENT>self.version = version<EOL>", "docstring": ":Parameters:\n    - `items`: sequence of roster items\n    - `version`: optional roster version string\n:Types:\n    - `items`: iterable\n    - `version`: `unicode`", "id": "f15245:c4:m0"}
{"signature": "def load_roster(self, source):", "body": "try:<EOL><INDENT>tree = ElementTree.parse(source)<EOL><DEDENT>except ElementTree.ParseError as err:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(err))<EOL><DEDENT>roster = Roster.from_xml(tree.getroot())<EOL>for item in roster:<EOL><INDENT>item.verify_roster_result(True)<EOL><DEDENT>self.roster = roster<EOL>", "docstring": "Load roster from an XML file.\n\n        Can be used before the connection is started to load saved\n        roster copy, for efficient retrieval of versioned roster.\n\n        :Parameters:\n            - `source`: file name or a file object\n        :Types:\n            - `source`: `str` or file-like object", "id": "f15245:c6:m1"}
{"signature": "@classmethod<EOL><INDENT>def from_xml(cls, element):<DEDENT>", "body": "<EOL>items = []<EOL>jids = set()<EOL>if element.tag != QUERY_TAG:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(element))<EOL><DEDENT>version = element.get(\"<STR_LIT>\")<EOL>for child in element:<EOL><INDENT>if child.tag != ITEM_TAG:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(child))<EOL>continue<EOL><DEDENT>item = RosterItem.from_xml(child)<EOL>if item.jid in jids:<EOL><INDENT>logger.warning(\"<STR_LIT>\".format(<EOL>item.jid))<EOL>continue<EOL><DEDENT>jids.add(item.jid)<EOL>items.append(item)<EOL><DEDENT>return cls(items, version)<EOL>", "docstring": "Create a `RosterPayload` object from an XML element.\n\n:Parameters:\n    - `element`: the XML element\n:Types:\n    - `element`: :etree:`ElementTree.Element`\n\n:return: a freshly created roster payload\n:returntype: `cls`", "id": "f15245:c4:m1"}
{"signature": "def _roster_set(self, item, callback, error_callback):", "body": "stanza = Iq(to_jid = self.server, stanza_type = \"<STR_LIT>\")<EOL>payload = RosterPayload([item])<EOL>stanza.set_payload(payload)<EOL>def success_cb(result_stanza):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if callback:<EOL><INDENT>callback(item)<EOL><DEDENT><DEDENT>def error_cb(error_stanza):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if error_callback:<EOL><INDENT>error_callback(error_stanza)<EOL><DEDENT>else:<EOL><INDENT>logger.error(\"<STR_LIT>\".format(item.jid))<EOL><DEDENT><DEDENT>processor = self.stanza_processor<EOL>processor.set_response_handlers(stanza,<EOL>success_cb, error_cb)<EOL>processor.send(stanza)<EOL>", "docstring": "Send a 'roster set' to the server.\n\n        :Parameters:\n            - `item`: the requested change\n        :Types:\n            - `item`: `RosterItem`", "id": "f15245:c6:m12"}
{"signature": "def __init__(self, jid, name = None, groups = None,<EOL>subscription = None, ask = None, approved = None):", "body": "<EOL>self.jid = JID(jid)<EOL>if name is not None:<EOL><INDENT>self.name = str(name)<EOL><DEDENT>else:<EOL><INDENT>self.name = None<EOL><DEDENT>if groups is not None:<EOL><INDENT>self.groups = set(groups)<EOL><DEDENT>else:<EOL><INDENT>self.groups = set()<EOL><DEDENT>if subscription == \"<STR_LIT:none>\":<EOL><INDENT>subscription = None<EOL><DEDENT>self.subscription = subscription<EOL>if ask is not None:<EOL><INDENT>self.ask = ask<EOL><DEDENT>else:<EOL><INDENT>self.ask = None<EOL><DEDENT>self.approved = bool(approved)<EOL>self._duplicate_group = False<EOL>", "docstring": "Initialize a roster item element.\n\n:Parameters:\n    - `jid`: entry jid\n    - `name`: item visible name\n    - `groups`: iterable of groups the item is member of\n    - `subscription`: subscription type (None, \"to\", \"from\", \"both\"\n                                                            or \"remove\")\n    - `ask`: \"subscribe\" if there was unreplied subscription request\n      sent\n    - `approved`: `True` if the entry subscription is pre-approved", "id": "f15245:c3:m0"}
{"signature": "def keys(self):", "body": "return list(self._jids.keys())<EOL>", "docstring": "Return the JIDs in the roster.\n\n        :Returntype: iterable of `JID`", "id": "f15245:c5:m4"}
{"signature": "def verify_roster_result(self, fix = False):", "body": "self._verify((None, \"<STR_LIT>\", \"<STR_LIT:to>\", \"<STR_LIT>\"), fix)<EOL>", "docstring": "Check if `self` is valid roster item.\n\n        Valid item must have proper `subscription` value other than 'remove'\n        and valid value for 'ask'.\n\n        :Parameters:\n            - `fix`: if `True` than replace invalid 'subscription' and 'ask'\n              values with the defaults\n        :Types:\n            - `fix`: `bool`\n\n        :Raise: `ValueError` if the item is invalid.", "id": "f15245:c3:m4"}
{"signature": "def _get_success(self, stanza):", "body": "payload = stanza.get_payload(RosterPayload)<EOL>if payload is None:<EOL><INDENT>if \"<STR_LIT>\" in self.server_features and self.roster:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>self._event_queue.put(RosterNotReceivedEvent(self, stanza))<EOL>return<EOL><DEDENT><DEDENT>else:<EOL><INDENT>items = list(payload)<EOL>for item in items:<EOL><INDENT>item.verify_roster_result(True)<EOL><DEDENT>self.roster = Roster(items, payload.version)<EOL><DEDENT>self._event_queue.put(RosterReceivedEvent(self, self.roster))<EOL>", "docstring": "Handle successful response to the roster request.", "id": "f15245:c6:m6"}
{"signature": "def get_items_by_name(self, name, case_sensitive = True):", "body": "if not case_sensitive and name:<EOL><INDENT>name = name.lower()<EOL><DEDENT>result = []<EOL>for item in self._items:<EOL><INDENT>if item.name == name:<EOL><INDENT>result.append(item)<EOL><DEDENT>elif item.name is None:<EOL><INDENT>continue<EOL><DEDENT>elif not case_sensitive and item.name.lower() == name:<EOL><INDENT>result.append(item)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Return a list of items with given name.\n\n:Parameters:\n    - `name`: name to look-up\n    - `case_sensitive`: if `False` the matching will be case\n      insensitive.\n:Types:\n    - `name`: `unicode`\n    - `case_sensitive`: `bool`\n\n:Returntype: `list` of `RosterItem`", "id": "f15245:c5:m7"}
{"signature": "def as_xml(self, parent = None):", "body": "if parent is not None:<EOL><INDENT>element = ElementTree.SubElement(parent, ITEM_TAG)<EOL><DEDENT>else:<EOL><INDENT>element = ElementTree.Element(ITEM_TAG)<EOL><DEDENT>element.set(\"<STR_LIT>\", str(self.jid))<EOL>if self.name is not None:<EOL><INDENT>element.set(\"<STR_LIT:name>\", self.name)<EOL><DEDENT>if self.subscription is not None:<EOL><INDENT>element.set(\"<STR_LIT>\", self.subscription)<EOL><DEDENT>if self.ask:<EOL><INDENT>element.set(\"<STR_LIT>\", self.ask)<EOL><DEDENT>if self.approved:<EOL><INDENT>element.set(\"<STR_LIT>\", \"<STR_LIT:true>\")<EOL><DEDENT>for group in self.groups:<EOL><INDENT>ElementTree.SubElement(element, GROUP_TAG).text = group<EOL><DEDENT>return element<EOL>", "docstring": "Make an XML element from self.\n\n        :Parameters:\n            - `parent`: Parent element\n        :Types:\n            - `parent`: :etree:`ElementTree.Element`", "id": "f15245:c3:m2"}
{"signature": "def verify_roster_push(self, fix = False):", "body": "self._verify((None, \"<STR_LIT>\", \"<STR_LIT:to>\", \"<STR_LIT>\", \"<STR_LIT>\"), fix)<EOL>", "docstring": "Check if `self` is valid roster push item.\n\n        Valid item must have proper `subscription` value other and valid value\n        for 'ask'.\n\n        :Parameters:\n            - `fix`: if `True` than replace invalid 'subscription' and 'ask'\n              values with the defaults\n        :Types:\n            - `fix`: `bool`\n\n        :Raise: `ValueError` if the item is invalid.", "id": "f15245:c3:m5"}
{"signature": "def get_items_by_group(self, group, case_sensitive = True):", "body": "result = []<EOL>if not group:<EOL><INDENT>for item in self._items:<EOL><INDENT>if not item.groups:<EOL><INDENT>result.append(item)<EOL><DEDENT><DEDENT>return result<EOL><DEDENT>if not case_sensitive:<EOL><INDENT>group = group.lower()<EOL><DEDENT>for item in self._items:<EOL><INDENT>if group in item.groups:<EOL><INDENT>result.append(item)<EOL><DEDENT>elif not case_sensitive and group in [g.lower() for g<EOL>in item.groups]:<EOL><INDENT>result.append(item)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Return a list of items within a given group.\n\n:Parameters:\n    - `name`: name to look-up\n    - `case_sensitive`: if `False` the matching will be case\n      insensitive.\n:Types:\n    - `name`: `unicode`\n    - `case_sensitive`: `bool`\n\n:Returntype: `list` of `RosterItem`", "id": "f15245:c5:m8"}
{"signature": "def _get_error(self, stanza):", "body": "if stanza:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(<EOL>stanza.error.condition_name))<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>self._event_queue.put(RosterNotReceivedEvent(self, stanza))<EOL>", "docstring": "Handle failure of the roster request.", "id": "f15245:c6:m7"}
{"signature": "def run(self):", "body": "self.client.connect()<EOL>self.client.run()<EOL>", "docstring": "Request client connection and start the main loop.", "id": "f15246:c0:m1"}
{"signature": "def _languages_factory(settings):", "body": "return [settings[\"<STR_LIT>\"]]<EOL>", "docstring": "Make the default value of the :r:`languages setting`.", "id": "f15247:m0"}
{"signature": "def set_authenticated(self, me, restart_stream = False):", "body": "with self.lock:<EOL><INDENT>self.authenticated = True<EOL>self.me = me<EOL>if restart_stream:<EOL><INDENT>self._restart_stream()<EOL><DEDENT><DEDENT>self.event(AuthenticatedEvent(self.me))<EOL>", "docstring": "Mark stream authenticated as `me`.\n\n        :Parameters:\n            - `me`: local JID just authenticated\n            - `restart_stream`: `True` when stream should be restarted (needed\n              after SASL authentication)\n        :Types:\n            - `me`: `JID`\n            - `restart_stream`: `bool`", "id": "f15247:c0:m32"}
{"signature": "def uplink_receive(self, stanza):", "body": "with self.lock:<EOL><INDENT>if self.stanza_route:<EOL><INDENT>self.stanza_route.uplink_receive(stanza)<EOL><DEDENT>else:<EOL><INDENT>logger.debug(u\"<STR_LIT>\".format(stanza))<EOL><DEDENT><DEDENT>", "docstring": "Handle stanza received from the stream.", "id": "f15247:c0:m25"}
{"signature": "def _restart_stream(self):", "body": "self._input_state = \"<STR_LIT>\"<EOL>self._output_state = \"<STR_LIT>\"<EOL>self.features = None<EOL>self.transport.restart()<EOL>if self.initiator:<EOL><INDENT>self._send_stream_start(self.stream_id)<EOL><DEDENT>", "docstring": "Restart the stream as needed after SASL and StartTLS negotiation.", "id": "f15247:c0:m17"}
{"signature": "def is_connected(self):", "body": "return self.transport.is_connected() and self._output_state == \"<STR_LIT>\"<EOL>", "docstring": "Check if stream is is_connected and stanzas may be sent.\n\n        :return: True if stream connection is active.", "id": "f15247:c0:m30"}
{"signature": "@property<EOL><INDENT>def auth_properties(self):<DEDENT>", "body": "props = dict(self.settings[\"<STR_LIT>\"])<EOL>if self.transport:<EOL><INDENT>props.update(self.transport.auth_properties)<EOL><DEDENT>props[\"<STR_LIT>\"] = self.me<EOL>props[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>return props<EOL>", "docstring": "Authentication properties of the stream.\n\n        Derived from the transport with 'local-jid' and 'service-type' added.", "id": "f15247:c0:m35"}
{"signature": "def _make_stream_features(self):", "body": "features = ElementTree.Element(FEATURES_TAG)<EOL>for handler in self._stream_feature_handlers:<EOL><INDENT>handler.make_stream_features(self, features)<EOL><DEDENT>return features<EOL>", "docstring": "Create the <features/> element for the stream.\n\n        [receving entity only]\n\n        :returns: new <features/> element\n        :returntype: :etree:`ElementTree.Element`", "id": "f15247:c0:m18"}
{"signature": "def send_stream_error(self, condition):", "body": "with self.lock:<EOL><INDENT>self._send_stream_error(condition)<EOL><DEDENT>", "docstring": "Send stream error element.\n\n        :Parameters:\n            - `condition`: stream error condition name, as defined in the\n              XMPP specification.", "id": "f15247:c0:m15"}
{"signature": "def _got_features(self, features):", "body": "self.features = features<EOL>logger.debug(\"<STR_LIT>\")<EOL>handled = self.event(GotFeaturesEvent(self.features))<EOL>logger.debug(\"<STR_LIT>\".format(handled))<EOL>if not handled:<EOL><INDENT>mandatory_handled = []<EOL>mandatory_not_handled = []<EOL>logger.debug(\"<STR_LIT>\"<EOL>.format(self._stream_feature_handlers))<EOL>for handler in self._stream_feature_handlers:<EOL><INDENT>ret = handler.handle_stream_features(self, self.features)<EOL>if ret is None:<EOL><INDENT>continue<EOL><DEDENT>elif isinstance(ret, StreamFeatureHandled):<EOL><INDENT>if ret.mandatory:<EOL><INDENT>mandatory_handled.append(unicode(ret))<EOL>break<EOL><DEDENT>break<EOL><DEDENT>elif isinstance(ret, StreamFeatureNotHandled):<EOL><INDENT>if ret.mandatory:<EOL><INDENT>mandatory_not_handled.append(unicode(ret))<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(ret))<EOL><DEDENT><DEDENT>if mandatory_not_handled and not mandatory_handled:<EOL><INDENT>self.send_stream_error(\"<STR_LIT>\")<EOL>raise FatalStreamError(<EOL>u\"<STR_LIT>\"<EOL>+ u\"<STR_LIT:U+0020>\".join(mandatory_not_handled))<EOL><DEDENT><DEDENT>", "docstring": "Process incoming <stream:features/> element.\n\n        [initiating entity only]\n\n        The received features node is available in `features`.", "id": "f15247:c0:m29"}
{"signature": "def fix_in_stanza(self, stanza):", "body": "<EOL>return stanza<EOL>", "docstring": "Fix incoming stanza, setting the implicit fields.\n\n        Used for for servers side of client stream to set proper stanza from.", "id": "f15247:c0:m33"}
{"signature": "def close(self):", "body": "self.transport.close()<EOL>", "docstring": "Forcibly close the connection and clear the stream state.", "id": "f15247:c0:m8"}
{"signature": "def transport_connected(self):", "body": "with self.lock:<EOL><INDENT>if self.initiator:<EOL><INDENT>if self._output_state is None:<EOL><INDENT>self._initiate()<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Called when transport has been connected.\n\n        Send the stream head if initiator.", "id": "f15247:c0:m7"}
{"signature": "def event(self, event): ", "body": "event.stream = self<EOL>logger.debug(u\"<STR_LIT>\".format(event))<EOL>self.settings[\"<STR_LIT>\"].put(event)<EOL>return False<EOL>", "docstring": "Handle a stream event.\n\n        Called when connection state is changed.\n\n        Should not be called with self.lock acquired!", "id": "f15247:c0:m6"}
{"signature": "def generate_id(self):", "body": "<EOL>return unicode(uuid.uuid4())<EOL>", "docstring": "Generate a random and unique stream ID.\n\n        :return: the id string generated.", "id": "f15247:c0:m28"}
{"signature": "def _write_element(self, element):", "body": "self.transport.send_element(element)<EOL>", "docstring": "Same as `write_element` but with `lock` already acquired.", "id": "f15247:c0:m21"}
{"signature": "def __init__(self, stanza_namespace, stanza_route, handlers,<EOL>settings = None):", "body": "XMLStreamHandler.__init__(self)<EOL>self.lock = threading.RLock()<EOL>if settings is None:<EOL><INDENT>settings = XMPPSettings()<EOL><DEDENT>self.settings = settings<EOL>self.stanza_namespace = stanza_namespace<EOL>self._stanza_namespace_p = \"<STR_LIT>\".format(stanza_namespace)<EOL>self.stanza_route = stanza_route<EOL>self.handlers = handlers<EOL>self._stream_feature_handlers = []<EOL>for handler in handlers:<EOL><INDENT>if isinstance(handler, StreamFeatureHandler):<EOL><INDENT>self._stream_feature_handlers.append(handler)<EOL><DEDENT><DEDENT>self.me = None<EOL>self.peer = None<EOL>self.stream_id = None<EOL>self.initiator = None<EOL>self.features = None<EOL>self.authenticated = False<EOL>self.peer_authenticated = False<EOL>self.tls_established = False<EOL>self.auth_method_used = None<EOL>self.version = None<EOL>self.language = None<EOL>self.peer_language = None<EOL>self.transport = None<EOL>self._input_state = None<EOL>self._output_state = None<EOL>self._element_handlers = {}<EOL>", "docstring": "Initialize StreamBase object\n\n        :Parameters:\n          - `stanza_namespace`: stream's default namespace URI (\"jabber:client\"\n            for client, \"jabber:server\" for server, etc.)\n          - `stanza_route`: object to handle received stanzas\n          - `handlers`: objects to handle the stream events and elements\n          - `settings`: extra settings\n        :Types:\n          - `stanza_namespace`: `unicode`\n          - `stanza_route`: `StanzaRoute`\n          - `settings`: XMPPSettings\n          - `handlers`: `list` of objects", "id": "f15247:c0:m0"}
{"signature": "def write_element(self, element):", "body": "with self.lock:<EOL><INDENT>self._write_element(element)<EOL><DEDENT>", "docstring": "Write XML `element` to the stream.\n\n        :Parameters:\n            - `element`: Element node to send.\n        :Types:\n            - `element`: :etree:`ElementTree.Element`", "id": "f15247:c0:m20"}
{"signature": "def receive(self, transport, myname):", "body": "with self.lock:<EOL><INDENT>self.transport = transport<EOL>transport.set_target(self)<EOL>self.me = JID(myname)<EOL>self.initiator = False<EOL>self._setup_stream_element_handlers()<EOL><DEDENT>", "docstring": "Receive an XMPP connection over the `transport`.\n\n        :Parameters:\n            - `transport`: an XMPP transport instance\n            - `myname`: local stream endpoint name.", "id": "f15247:c0:m3"}
{"signature": "def stream_parse_error(self, descr):", "body": "self.send_stream_error(\"<STR_LIT>\")<EOL>raise StreamParseError(descr)<EOL>", "docstring": "Called when an error is encountered in the stream.\n\n        :Parameters:\n            - `descr`: description of the error\n        :Types:\n            - `descr`: `unicode`", "id": "f15247:c0:m13"}
{"signature": "def _process_element(self, element):", "body": "tag = element.tag<EOL>if tag in self._element_handlers:<EOL><INDENT>handler = self._element_handlers[tag]<EOL>logger.debug(\"<STR_LIT>\"<EOL>.format(element, handler))<EOL>handled = handler(self, element)<EOL>if handled:<EOL><INDENT>return<EOL><DEDENT><DEDENT>if tag.startswith(self._stanza_namespace_p):<EOL><INDENT>stanza = stanza_factory(element, self, self.language)<EOL>self.uplink_receive(stanza)<EOL><DEDENT>elif tag == ERROR_TAG:<EOL><INDENT>error = StreamErrorElement(element)<EOL>self.process_stream_error(error)<EOL><DEDENT>elif tag == FEATURES_TAG:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(serialize(element)))<EOL>self._got_features(element)<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(serialize(element)))<EOL>logger.debug(\"<STR_LIT>\".format(<EOL>self._element_handlers))<EOL><DEDENT>", "docstring": "Process first level element of the stream.\n\n        The element may be stream error or features, StartTLS\n        request/response, SASL request/response or a stanza.\n\n        :Parameters:\n            - `element`: XML element\n        :Types:\n            - `element`: :etree:`ElementTree.Element`", "id": "f15247:c0:m24"}
{"signature": "def _send_stream_features(self):", "body": "self.features = self._make_stream_features()<EOL>self._write_element(self.features)<EOL>", "docstring": "Send stream <features/>.\n\n        [receiving entity only]", "id": "f15247:c0:m19"}
{"signature": "def set_peer_authenticated(self, peer, restart_stream = False):", "body": "with self.lock:<EOL><INDENT>self.peer_authenticated = True<EOL>self.peer = peer<EOL>if restart_stream:<EOL><INDENT>self._restart_stream()<EOL><DEDENT><DEDENT>self.event(AuthenticatedEvent(self.peer))<EOL>", "docstring": "Mark the other side of the stream authenticated as `peer`\n\n        :Parameters:\n            - `peer`: local JID just authenticated\n            - `restart_stream`: `True` when stream should be restarted (needed\n              after SASL authentication)\n        :Types:\n            - `peer`: `JID`\n            - `restart_stream`: `bool`", "id": "f15247:c0:m31"}
{"signature": "def _initiate(self):", "body": "self._setup_stream_element_handlers()<EOL>self._send_stream_start()<EOL>", "docstring": "Initiate an XMPP connection over a connected `transport`.\n\n        [ called with `lock` acquired ]", "id": "f15247:c0:m2"}
{"signature": "def request_software_version(stanza_processor, target_jid, callback,<EOL>error_callback = None):", "body": "stanza = Iq(to_jid = target_jid, stanza_type = \"<STR_LIT>\")<EOL>payload = VersionPayload()<EOL>stanza.set_payload(payload)<EOL>def wrapper(stanza):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>payload = stanza.get_payload(VersionPayload)<EOL>if payload is None:<EOL><INDENT>if error_callback:<EOL><INDENT>error_callback(stanza)<EOL><DEDENT>else:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>callback(payload)<EOL><DEDENT><DEDENT>stanza_processor.set_response_handlers(stanza, wrapper, error_callback)<EOL>stanza_processor.send(stanza)<EOL>", "docstring": "Request software version information from a remote entity.\n\n    When a valid response is received the `callback` will be handled\n    with a `VersionPayload` instance as its only argument. The object will\n    provide the requested infromation.\n\n    In case of error stanza received or invalid response the `error_callback`\n    (if provided) will be called with the offending stanza (which can\n    be ``<iq type='error'/>`` or ``<iq type='result'>``) as its argument.\n\n    The same function will be called on timeout, with the argument set to\n    `None`.\n\n    :Parameters:\n        - `stanza_processor`: a object used to send the query and handle\n          response. E.g. a `pyxmpp2.client.Client` instance\n        - `target_jid`: the JID of the entity to query\n        - `callback`: function to be called with a valid response\n        - `error_callback`: function to be called on error\n    :Types:\n        - `stanza_processor`: `StanzaProcessor`\n        - `target_jid`: `JID`", "id": "f15248:m0"}
{"signature": "def _version_factory(settings):", "body": "<EOL>if \"<STR_LIT>\" not in settings:<EOL><INDENT>return unicode(pyxmpp2_version)<EOL><DEDENT>else:<EOL><INDENT>return u\"<STR_LIT>\".format(pyxmpp2_version)<EOL><DEDENT>", "docstring": "Factory for the :r:`software_version setting` default.", "id": "f15248:m2"}
{"signature": "def __init__(self,node_or_datetime,delay_from=None,reason=None,utc=True):", "body": "if isinstance(node_or_datetime,libxml2.xmlNode):<EOL><INDENT>self.from_xml(node_or_datetime)<EOL><DEDENT>else:<EOL><INDENT>if utc:<EOL><INDENT>self.timestamp=node_or_datetime<EOL><DEDENT>else:<EOL><INDENT>self.timestamp=datetime_local_to_utc(node_or_datetime)<EOL><DEDENT>self.delay_from=JID(delay_from)<EOL>self.reason=unicode(reason)<EOL><DEDENT>", "docstring": "Initialize the Delay object.\n\n:Parameters:\n    - `node_or_datetime`: an XML node to parse or the timestamp.\n    - `delay_from`: JID of the entity which adds the delay mark\n      (when `node_or_datetime` is a timestamp).\n    - `reason`: reason of the delay (when `node_or_datetime` is a\n      timestamp).\n    - `utc`: if `True` then the timestamp is assumed to be UTC,\n      otherwise it is assumed to be local time.\n:Types:\n    - `node_or_datetime`: `libxml2.xmlNode` or `datetime.datetime`\n    - `delay_from`: `pyxmpp.JID`\n    - `reason`: `unicode`\n    - `utc`: `bool`", "id": "f15249:c0:m0"}
{"signature": "def invalidate_features(self):", "body": "warnings.warn(\"<STR_LIT>\", DeprecationWarning, stacklevel=<NUM_LIT:1>)<EOL>", "docstring": "Clear cached feature list.", "id": "f15250:c3:m7"}
{"signature": "def set_category(self, category):", "body": "if not category:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>category = str(category)<EOL>self.xmlnode.setProp(\"<STR_LIT>\", category.encode(\"<STR_LIT:utf-8>\"))<EOL>", "docstring": "Set the category of the item.\n\n        :Parameters:\n            - `category`: the new category.\n        :Types:\n            - `category`: `unicode`", "id": "f15250:c1:m7"}
{"signature": "def __timeout(self,stanza):", "body": "pass<EOL>", "docstring": "Handle disco timeout.", "id": "f15250:c4:m3"}
{"signature": "def get_name(self):", "body": "var = self.xmlnode.prop(\"<STR_LIT:name>\")<EOL>if not var:<EOL><INDENT>var = \"<STR_LIT>\"<EOL><DEDENT>return var.decode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "Get the name of the item.\n\n        :return: the name of the item or `None`.\n        :returntype: `unicode`", "id": "f15250:c1:m4"}
{"signature": "def __init__(self,xmlnode_or_node=None, parent=None, doc=None):", "body": "self.xmlnode=None<EOL>self.xpath_ctxt=None<EOL>if not doc:<EOL><INDENT>doc=common_doc<EOL><DEDENT>if not parent:<EOL><INDENT>parent=common_root<EOL><DEDENT>if isinstance(xmlnode_or_node,libxml2.xmlNode):<EOL><INDENT>ns=xmlnode_or_node.ns()<EOL>if ns.getContent() != DISCO_INFO_NS:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.xmlnode=xmlnode_or_node.docCopyNode(doc,<NUM_LIT:1>)<EOL>parent.addChild(self.xmlnode)<EOL><DEDENT>else:<EOL><INDENT>self.xmlnode=parent.newChild(None,\"<STR_LIT>\",None)<EOL>self.ns=self.xmlnode.newNs(DISCO_INFO_NS,None)<EOL>self.xmlnode.setNs(self.ns)<EOL>self.set_node(xmlnode_or_node)<EOL><DEDENT>self.xpath_ctxt=doc.xpathNewContext()<EOL>self.xpath_ctxt.setContextNode(self.xmlnode)<EOL>self.xpath_ctxt.xpathRegisterNs(\"<STR_LIT:d>\",DISCO_INFO_NS)<EOL>", "docstring": "Initialize an `DiscoInfo` object.\n\n        Wrap an existing disco#info XML element or create a new one.\n\n        :Parameters:\n            - `xmlnode_or_node`: XML node to be wrapped into `self` or an item\n              node name.\n            - `parent`: parent node for the `DiscoInfo` element.\n            - `doc`: document for the `DiscoInfo` element.\n        :Types:\n            - `xmlnode_or_node`: `libxml2.xmlNode` or `unicode`\n            - `parent`: `libxml2.xmlNode`\n            - `doc`: `libxml2.xmlDoc`", "id": "f15250:c3:m0"}
{"signature": "def remove(self):", "body": "if self.disco is None:<EOL><INDENT>return<EOL><DEDENT>self.xmlnode.unlinkNode()<EOL>oldns=self.xmlnode.ns()<EOL>ns=self.xmlnode.newNs(oldns.getContent(),None)<EOL>self.xmlnode.replaceNs(oldns,ns)<EOL>common_root.addChild(self.xmlnode())<EOL>self.disco=None<EOL>", "docstring": "Remove `self` from the containing `DiscoItems` object.", "id": "f15250:c0:m3"}
{"signature": "def has_feature(self,var):", "body": "if not var:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if '<STR_LIT:\">' not in var:<EOL><INDENT>expr='<STR_LIT>' % (var,)<EOL><DEDENT>elif \"<STR_LIT:'>\" not in var:<EOL><INDENT>expr=\"<STR_LIT>\" % (var,)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>l=self.xpath_ctxt.xpathEval(to_utf8(expr))<EOL>if l:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Check if `self` contains the named feature.\n\n        :Parameters:\n            - `var`: the feature name.\n        :Types:\n            - `var`: `unicode`\n\n        :return: `True` if the feature is found in `self`.\n        :returntype: `bool`", "id": "f15250:c3:m6"}
{"signature": "def add_item(self,jid,node=None,name=None,action=None):", "body": "return DiscoItem(self,jid,node,name,action)<EOL>", "docstring": "Add a new item to the `DiscoItems` object.\n\n        :Parameters:\n            - `jid`: item JID.\n            - `node`: item node name.\n            - `name`: item name.\n            - `action`: action for a \"disco push\".\n        :Types:\n            - `jid`: `pyxmpp.JID`\n            - `node`: `unicode`\n            - `name`: `unicode`\n            - `action`: `unicode`\n\n        :returns: the item created.\n        :returntype: `DiscoItem`.", "id": "f15250:c2:m7"}
{"signature": "def add_identity(self,item_name,item_category=None,item_type=None):", "body": "return DiscoIdentity(self,item_name,item_category,item_type)<EOL>", "docstring": "Add an identity to the `DiscoInfo` object.\n\n        :Parameters:\n            - `item_name`: name of the item.\n            - `item_category`: category of the item.\n            - `item_type`: type of the item.\n        :Types:\n            - `item_name`: `unicode`\n            - `item_category`: `unicode`\n            - `item_type`: `unicode`\n\n        :returns: the identity created.\n        :returntype: `DiscoIdentity`", "id": "f15250:c3:m14"}
{"signature": "def get_node(self):", "body": "node = self.xmlnode.prop(\"<STR_LIT>\")<EOL>if node is None:<EOL><INDENT>return None<EOL><DEDENT>return node.decode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "Get the node of the item.\n\n        :return: the node of the item or `None`.\n        :returntype: `unicode`", "id": "f15250:c0:m6"}
{"signature": "def remove(self):", "body": "if self.disco is None:<EOL><INDENT>return<EOL><DEDENT>self.xmlnode.unlinkNode()<EOL>oldns=self.xmlnode.ns()<EOL>ns=self.xmlnode.newNs(oldns.getContent(),None)<EOL>self.xmlnode.replaceNs(oldns,ns)<EOL>common_root.addChild(self.xmlnode())<EOL>self.disco=None<EOL>", "docstring": "Remove `self` from the containing `DiscoInfo` object.", "id": "f15250:c1:m3"}
{"signature": "def set_identities(self,identities):", "body": "for identity in self.identities:<EOL><INDENT>identity.remove()<EOL><DEDENT>for identity in identities:<EOL><INDENT>try:<EOL><INDENT>self.add_identity(identity.name,identity.category,identity.type)<EOL><DEDENT>except AttributeError:<EOL><INDENT>self.add_identity(*identity)<EOL><DEDENT><DEDENT>", "docstring": "Set identities in the disco#info object.\n\n        Remove all existing identities from `self`.\n\n        :Parameters:\n            - `identities`: list of identities or identity properties\n              (jid,node,category,type,name).\n        :Types:\n            - `identities`: sequence of `DiscoIdentity` or sequence of sequences", "id": "f15250:c3:m11"}
{"signature": "def get_action(self):", "body": "action=self.xmlnode.prop(\"<STR_LIT:action>\")<EOL>if action is None:<EOL><INDENT>return None<EOL><DEDENT>return action.decode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "Get the action attribute of the item.\n\n        :return: the action of the item or `None`.\n        :returntype: `unicode`", "id": "f15250:c0:m8"}
{"signature": "def __init__(self,xmlnode_or_node=None):", "body": "self.xmlnode=None<EOL>self.xpath_ctxt=None<EOL>if isinstance(xmlnode_or_node,libxml2.xmlNode):<EOL><INDENT>ns=xmlnode_or_node.ns()<EOL>if ns.getContent() != DISCO_ITEMS_NS:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.xmlnode=xmlnode_or_node.docCopyNode(common_doc,<NUM_LIT:1>)<EOL>common_root.addChild(self.xmlnode)<EOL>self.ns=self.xmlnode.ns()<EOL><DEDENT>else:<EOL><INDENT>self.xmlnode=common_root.newChild(None,\"<STR_LIT>\",None)<EOL>self.ns=self.xmlnode.newNs(DISCO_ITEMS_NS,None)<EOL>self.xmlnode.setNs(self.ns)<EOL>self.set_node(xmlnode_or_node)<EOL><DEDENT>self.xpath_ctxt=common_doc.xpathNewContext()<EOL>self.xpath_ctxt.setContextNode(self.xmlnode)<EOL>self.xpath_ctxt.xpathRegisterNs(\"<STR_LIT:d>\",DISCO_ITEMS_NS)<EOL>", "docstring": "Initialize an `DiscoItems` object.\n\n        Wrap an existing disco#items XML element or create a new one.\n\n        :Parameters:\n            - `xmlnode_or_node`: XML node to be wrapped into `self` or an item\n              node name.\n        :Types:\n            - `xmlnode_or_node`: `libxml2.xmlNode` or `unicode`", "id": "f15250:c2:m0"}
{"signature": "def get_node(self):", "body": "node = self.xmlnode.prop(\"<STR_LIT>\")<EOL>if not node:<EOL><INDENT>return None<EOL><DEDENT>return node.decode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "Get the node address of the `DiscoItems` object.\n\n        :return: the node name.\n        :returntype: `unicode`", "id": "f15250:c2:m2"}
{"signature": "def get_jid(self):", "body": "jid = self.xmlnode.prop(\"<STR_LIT>\")<EOL>return JID( jid.decode(\"<STR_LIT:utf-8>\") )<EOL>", "docstring": "Get the JID of the item.\n\n        :return: the JID of the item.\n        :returntype: `JID`", "id": "f15250:c0:m10"}
{"signature": "def remove_feature(self,var):", "body": "if not var:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if '<STR_LIT:\">' not in var:<EOL><INDENT>expr='<STR_LIT>' % (var,)<EOL><DEDENT>elif \"<STR_LIT:'>\" not in var:<EOL><INDENT>expr=\"<STR_LIT>\" % (var,)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>l=self.xpath_ctxt.xpathEval(expr)<EOL>if not l:<EOL><INDENT>return<EOL><DEDENT>for f in l:<EOL><INDENT>f.unlinkNode()<EOL>f.freeNode()<EOL><DEDENT>", "docstring": "Remove a feature from `self`.\n\n        :Parameters:\n            - `var`: the feature name.\n        :Types:\n            - `var`: `unicode`", "id": "f15250:c3:m9"}
{"signature": "def has_item(self,jid,node=None):", "body": "l=self.xpath_ctxt.xpathEval(\"<STR_LIT>\")<EOL>if l is None:<EOL><INDENT>return False<EOL><DEDENT>for it in l:<EOL><INDENT>di=DiscoItem(self,it)<EOL>if di.jid==jid and di.node==node:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Check if `self` contains an item.\n\n        :Parameters:\n            - `jid`: JID of the item.\n            - `node`: node name of the item.\n        :Types:\n            - `jid`: `JID`\n            - `node`: `libxml2.xmlNode`\n\n        :return: `True` if the item is found in `self`.\n        :returntype: `bool`", "id": "f15250:c2:m8"}
{"signature": "def __error(self,stanza):", "body": "try:<EOL><INDENT>self.error(stanza.get_error())<EOL><DEDENT>except ProtocolError:<EOL><INDENT>from ..error import StanzaErrorNode<EOL>self.error(StanzaErrorNode(\"<STR_LIT>\"))<EOL><DEDENT>", "docstring": "Handle disco error response.\n\n        :Parameters:\n            - `stanza`: the stanza received.\n        :Types:\n            - `stanza`: `pyxmpp.stanza.Stanza`", "id": "f15250:c4:m2"}
{"signature": "def set_action(self,action):", "body": "if action is None:<EOL><INDENT>if self.xmlnode.hasProp(\"<STR_LIT:action>\"):<EOL><INDENT>self.xmlnode.unsetProp(\"<STR_LIT:action>\")<EOL><DEDENT>return<EOL><DEDENT>if action not in (\"<STR_LIT>\",\"<STR_LIT>\"):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>action = str(action)<EOL>self.xmlnode.setProp(\"<STR_LIT:action>\", action.encode(\"<STR_LIT:utf-8>\"))<EOL>", "docstring": "Set the action of the item.\n\n        :Parameters:\n            - `action`: the new action or `None`.\n        :Types:\n            - `action`: `unicode`", "id": "f15250:c0:m9"}
{"signature": "def _digest_auth_in_stage2(self, username, _unused, stanza):", "body": "digest=stanza.xpath_eval(\"<STR_LIT>\",{\"<STR_LIT:a>\":\"<STR_LIT>\"})<EOL>if digest:<EOL><INDENT>digest=digest[<NUM_LIT:0>].getContent()<EOL><DEDENT>if not digest:<EOL><INDENT>self.__logger.debug(\"<STR_LIT>\")<EOL>iq=stanza.make_error_response(\"<STR_LIT>\")<EOL>self.send(iq)<EOL>return<EOL><DEDENT>password,pwformat=self.get_password(username)<EOL>if not password or pwformat!=\"<STR_LIT>\":<EOL><INDENT>iq=stanza.make_error_response(\"<STR_LIT>\")<EOL>e=iq.get_error()<EOL>e.add_custom_condition('<STR_LIT>',\"<STR_LIT>\")<EOL>self.send(iq)<EOL>return<EOL><DEDENT>mydigest = hashlib.sha1(to_utf8(self.stream_id)+to_utf8(password)).hexdigest()<EOL>if mydigest==digest:<EOL><INDENT>iq=stanza.make_result_response()<EOL>self.send(iq)<EOL>self.peer_authenticated=True<EOL>self.auth_method_used=\"<STR_LIT>\"<EOL>self.state_change(\"<STR_LIT>\",self.peer)<EOL>self._post_auth()<EOL><DEDENT>else:<EOL><INDENT>self.__logger.debug(\"<STR_LIT>\" % (digest,mydigest))<EOL>iq=stanza.make_error_response(\"<STR_LIT>\")<EOL>e=iq.get_error()<EOL>e.add_custom_condition('<STR_LIT>',\"<STR_LIT>\")<EOL>self.send(iq)<EOL><DEDENT>", "docstring": "Handle the second stage (<iq type='set'/>) of legacy \"digest\"\n        authentication.\n\n        [server only]", "id": "f15251:c0:m14"}
{"signature": "def _reset(self):", "body": "ClientStream._reset(self)<EOL>self.available_auth_methods = None<EOL>self.auth_stanza = None<EOL>self.registration_callback = None<EOL>", "docstring": "Reset the `LegacyClientStream` object state, making the object ready\n        to handle new connections.", "id": "f15251:c0:m1"}
{"signature": "def _plain_auth_in_stage2(self, username, _unused, stanza):", "body": "password=stanza.xpath_eval(\"<STR_LIT>\",{\"<STR_LIT:a>\":\"<STR_LIT>\"})<EOL>if password:<EOL><INDENT>password=from_utf8(password[<NUM_LIT:0>].getContent())<EOL><DEDENT>if not password:<EOL><INDENT>self.__logger.debug(\"<STR_LIT>\")<EOL>iq=stanza.make_error_response(\"<STR_LIT>\")<EOL>self.send(iq)<EOL>return<EOL><DEDENT>if self.check_password(username,password):<EOL><INDENT>iq=stanza.make_result_response()<EOL>self.send(iq)<EOL>self.peer_authenticated=True<EOL>self.auth_method_used=\"<STR_LIT>\"<EOL>self.state_change(\"<STR_LIT>\",self.peer)<EOL>self._post_auth()<EOL><DEDENT>else:<EOL><INDENT>self.__logger.debug(\"<STR_LIT>\")<EOL>iq=stanza.make_error_response(\"<STR_LIT>\")<EOL>e=iq.get_error()<EOL>e.add_custom_condition('<STR_LIT>',\"<STR_LIT>\")<EOL>self.send(iq)<EOL><DEDENT>", "docstring": "Handle the second stage (<iq type='set'/>) of legacy \"plain\"\n        authentication.\n\n        [server only]", "id": "f15251:c0:m12"}
{"signature": "def _try_auth(self):", "body": "if self.authenticated:<EOL><INDENT>self.__logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>self.__logger.debug(\"<STR_LIT>\" % (self._auth_methods_left,))<EOL>if not self._auth_methods_left:<EOL><INDENT>raise LegacyAuthenticationError(\"<STR_LIT>\")<EOL><DEDENT>method=self._auth_methods_left[<NUM_LIT:0>]<EOL>if method.startswith(\"<STR_LIT>\"):<EOL><INDENT>return ClientStream._try_auth(self)<EOL><DEDENT>elif method not in (\"<STR_LIT>\",\"<STR_LIT>\"):<EOL><INDENT>self._auth_methods_left.pop(<NUM_LIT:0>)<EOL>self.__logger.debug(\"<STR_LIT>\" % method)<EOL>return self._try_auth()<EOL><DEDENT>elif self.available_auth_methods is not None:<EOL><INDENT>if method in self.available_auth_methods:<EOL><INDENT>self._auth_methods_left.pop(<NUM_LIT:0>)<EOL>self.auth_method_used=method<EOL>if method==\"<STR_LIT>\":<EOL><INDENT>self._digest_auth_stage2(self.auth_stanza)<EOL><DEDENT>else:<EOL><INDENT>self._plain_auth_stage2(self.auth_stanza)<EOL><DEDENT>self.auth_stanza=None<EOL>return<EOL><DEDENT>else:<EOL><INDENT>self.__logger.debug(\"<STR_LIT>\" % method)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._auth_stage1()<EOL><DEDENT>", "docstring": "Try to authenticate using the first one of allowed authentication\n        methods left.\n\n        [client only]", "id": "f15251:c0:m4"}
{"signature": "def _digest_auth_stage2(self, _unused):", "body": "iq=Iq(stanza_type=\"<STR_LIT>\")<EOL>q=iq.new_query(\"<STR_LIT>\")<EOL>q.newTextChild(None,\"<STR_LIT:username>\",to_utf8(self.my_jid.node))<EOL>q.newTextChild(None,\"<STR_LIT>\",to_utf8(self.my_jid.resource))<EOL>digest = hashlib.sha1(to_utf8(self.stream_id)+to_utf8(self.password)).hexdigest()<EOL>q.newTextChild(None,\"<STR_LIT>\",digest)<EOL>self.send(iq)<EOL>self.set_response_handlers(iq,self.auth_finish,self.auth_error)<EOL>iq.free()<EOL>", "docstring": "Do the second stage (<iq type='set'/>) of legacy \"digest\"\n        authentication.\n\n        [client only]", "id": "f15251:c0:m13"}
{"signature": "def auth_in_stage2(self,stanza):", "body": "self.lock.acquire()<EOL>try:<EOL><INDENT>if \"<STR_LIT>\" not in self.auth_methods and \"<STR_LIT>\" not in self.auth_methods:<EOL><INDENT>iq=stanza.make_error_response(\"<STR_LIT>\")<EOL>self.send(iq)<EOL>return<EOL><DEDENT>username=stanza.xpath_eval(\"<STR_LIT>\",{\"<STR_LIT:a>\":\"<STR_LIT>\"})<EOL>if username:<EOL><INDENT>username=from_utf8(username[<NUM_LIT:0>].getContent())<EOL><DEDENT>resource=stanza.xpath_eval(\"<STR_LIT>\",{\"<STR_LIT:a>\":\"<STR_LIT>\"})<EOL>if resource:<EOL><INDENT>resource=from_utf8(resource[<NUM_LIT:0>].getContent())<EOL><DEDENT>if not username or not resource:<EOL><INDENT>self.__logger.debug(\"<STR_LIT>\")<EOL>iq=stanza.make_error_response(\"<STR_LIT>\")<EOL>self.send(iq)<EOL>return<EOL><DEDENT>if stanza.xpath_eval(\"<STR_LIT>\",{\"<STR_LIT:a>\":\"<STR_LIT>\"}):<EOL><INDENT>if \"<STR_LIT>\" not in self.auth_methods:<EOL><INDENT>iq=stanza.make_error_response(\"<STR_LIT>\")<EOL>self.send(iq)<EOL>return<EOL><DEDENT>else:<EOL><INDENT>return self._plain_auth_in_stage2(username,resource,stanza)<EOL><DEDENT><DEDENT>if stanza.xpath_eval(\"<STR_LIT>\",{\"<STR_LIT:a>\":\"<STR_LIT>\"}):<EOL><INDENT>if \"<STR_LIT>\" not in self.auth_methods:<EOL><INDENT>iq=stanza.make_error_response(\"<STR_LIT>\")<EOL>self.send(iq)<EOL>return<EOL><DEDENT>else:<EOL><INDENT>return self._digest_auth_in_stage2(username,resource,stanza)<EOL><DEDENT><DEDENT><DEDENT>finally:<EOL><INDENT>self.lock.release()<EOL><DEDENT>", "docstring": "Handle the second stage (<iq type='set'/>) of legacy (\"plain\" or\n        \"digest\") authentication.\n\n        [server only]", "id": "f15251:c0:m6"}
{"signature": "def auth_error(self,stanza):", "body": "self.lock.acquire()<EOL>try:<EOL><INDENT>err=stanza.get_error()<EOL>ae=err.xpath_eval(\"<STR_LIT>\",{\"<STR_LIT:e>\":\"<STR_LIT>\"})<EOL>if ae:<EOL><INDENT>ae=ae[<NUM_LIT:0>].name<EOL><DEDENT>else:<EOL><INDENT>ae=err.get_condition().name<EOL><DEDENT>raise LegacyAuthenticationError(\"<STR_LIT>\"<EOL>% (ae,))<EOL><DEDENT>finally:<EOL><INDENT>self.lock.release()<EOL><DEDENT>", "docstring": "Handle legacy authentication error.\n\n        [client only]", "id": "f15251:c0:m9"}
{"signature": "def auth_timeout(self):", "body": "self.lock.acquire()<EOL>try:<EOL><INDENT>self.__logger.debug(\"<STR_LIT>\")<EOL>if self._auth_methods_left:<EOL><INDENT>self._auth_methods_left.pop(<NUM_LIT:0>)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self.lock.release()<EOL><DEDENT>", "docstring": "Handle legacy authentication timeout.\n\n        [client only]", "id": "f15251:c0:m8"}
{"signature": "def _auth(self):", "body": "if self.authenticated:<EOL><INDENT>self.__logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>self.__logger.debug(\"<STR_LIT>\")<EOL>hash_value=self._compute_handshake()<EOL>n=common_root.newTextChild(None,\"<STR_LIT>\",hash_value)<EOL>self._write_node(n)<EOL>n.unlinkNode()<EOL>n.freeNode()<EOL>self.__logger.debug(\"<STR_LIT>\")<EOL>", "docstring": "Authenticate on the server.\n\n        [component only]", "id": "f15253:c0:m8"}
{"signature": "def accept(self,sock):", "body": "Stream.accept(self,sock,None)<EOL>", "docstring": "Accept an incoming component connection.\n\n        [server only]\n\n        :Parameters:\n            - `sock`: a listening socket.", "id": "f15253:c0:m4"}
{"signature": "def __init__(self, jid, secret, server, port, keepalive = <NUM_LIT:0>, owner = None):", "body": "Stream.__init__(self, \"<STR_LIT>\",<EOL>sasl_mechanisms = [],<EOL>tls_settings = None,<EOL>keepalive = keepalive,<EOL>owner = owner)<EOL>self.server=server<EOL>self.port=port<EOL>self.me=jid<EOL>self.secret=secret<EOL>self.process_all_stanzas=<NUM_LIT:1><EOL>self.__logger=logging.getLogger(\"<STR_LIT>\")<EOL>", "docstring": "Initialize a `ComponentStream` object.\n\n        :Parameters:\n            - `jid`: JID of the component.\n            - `secret`: authentication secret.\n            - `server`: server address.\n            - `port`: TCP port number on the server.\n            - `keepalive`: keepalive interval. 0 to disable.\n            - `owner`: `Client`, `Component` or similar object \"owning\" this stream.", "id": "f15253:c0:m0"}
{"signature": "def _post_connect(self):", "body": "if self.initiator:<EOL><INDENT>self._auth()<EOL><DEDENT>", "docstring": "Initialize authentication when the connection is established\n        and we are the initiator.", "id": "f15253:c0:m6"}
{"signature": "def _connect(self,server=None,port=None):", "body": "if self.me.node or self.me.resource:<EOL><INDENT>raise Value(\"<STR_LIT>\")<EOL><DEDENT>if not server:<EOL><INDENT>server=self.server<EOL><DEDENT>if not port:<EOL><INDENT>port=self.port<EOL><DEDENT>if not server or not port:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>Stream._connect(self,server,port,None,self.me)<EOL>", "docstring": "Same as `ComponentStream.connect` but assume `self.lock` is acquired.", "id": "f15253:c0:m3"}
{"signature": "def complete_xml_element(self, xmlnode, doc):", "body": "ns = xmlnode.ns()<EOL>if self.instructions is not None:<EOL><INDENT>xmlnode.newTextChild(ns, \"<STR_LIT>\", to_utf8(self.instructions))<EOL><DEDENT>if self.form:<EOL><INDENT>self.form.as_xml(xmlnode, doc)<EOL><DEDENT>if self.remove:<EOL><INDENT>xmlnode.newChild(ns, \"<STR_LIT>\", None)<EOL><DEDENT>else:<EOL><INDENT>if self.registered:<EOL><INDENT>xmlnode.newChild(ns, \"<STR_LIT>\", None)<EOL><DEDENT>for field in legacy_fields:<EOL><INDENT>value = getattr(self, field)<EOL>if value is not None:<EOL><INDENT>xmlnode.newTextChild(ns, field, to_utf8(value))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Complete the XML node with `self` content.\n\n        :Parameters:\n            - `xmlnode`: XML node with the element being built. It has already\n              right name and namespace, but no attributes or content.\n            - `doc`: document to which the element belongs.\n        :Types:\n            - `xmlnode`: `libxml2.xmlNode`\n            - `doc`: `libxml2.xmlDoc`", "id": "f15254:c0:m2"}
{"signature": "def get_form(self, form_type = \"<STR_LIT>\"):", "body": "if self.form:<EOL><INDENT>if self.form.type != form_type:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return self.form<EOL><DEDENT>form = Form(form_type, instructions = self.instructions)<EOL>form.add_field(\"<STR_LIT>\", [u\"<STR_LIT>\"], \"<STR_LIT>\")<EOL>for field in legacy_fields:<EOL><INDENT>field_type, field_label = legacy_fields[field]<EOL>value = getattr(self, field)<EOL>if value is None:<EOL><INDENT>continue<EOL><DEDENT>if form_type == \"<STR_LIT>\":<EOL><INDENT>if not value:<EOL><INDENT>value = None<EOL><DEDENT>form.add_field(name = field, field_type = field_type, label = field_label,<EOL>value = value, required = True)<EOL><DEDENT>else:<EOL><INDENT>form.add_field(name = field, value = value)<EOL><DEDENT><DEDENT>return form<EOL>", "docstring": "Return Data Form for the `Register` object.\n\n        Convert legacy fields to a data form if `self.form` is `None`, return `self.form` otherwise.\n\n        :Parameters:\n            - `form_type`: If \"form\", then a form to fill-in should be\n              returned. If \"sumbit\", then a form with submitted data.\n        :Types:\n            - `form_type`: `unicode`\n\n        :return: `self.form` or a form created from the legacy fields\n        :returntype: `pyxmpp.jabber.dataforms.Form`", "id": "f15254:c0:m3"}
{"signature": "def clear_muc_child(self):", "body": "if self.muc_child:<EOL><INDENT>self.muc_child.free_borrowed()<EOL>self.muc_child=None<EOL><DEDENT>if not self.xmlnode.children:<EOL><INDENT>return<EOL><DEDENT>n=self.xmlnode.children<EOL>while n:<EOL><INDENT>if n.name not in (\"<STR_LIT:x>\",\"<STR_LIT>\"):<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>ns=n.ns()<EOL>if not ns:<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>ns_uri=ns.getContent()<EOL>if ns_uri in (MUC_NS,MUC_USER_NS,MUC_ADMIN_NS,MUC_OWNER_NS):<EOL><INDENT>n.unlinkNode()<EOL>n.freeNode()<EOL><DEDENT>n=n.next<EOL><DEDENT>", "docstring": "Remove the MUC specific stanza payload element.", "id": "f15255:c9:m2"}
{"signature": "def xpath_eval(self,expr):", "body": "ctxt = common_doc.xpathNewContext()<EOL>ctxt.setContextNode(self.xmlnode)<EOL>ctxt.xpathRegisterNs(\"<STR_LIT>\",self.ns.getContent())<EOL>ret=ctxt.xpathEval(to_utf8(expr))<EOL>ctxt.xpathFreeContext()<EOL>return ret<EOL>", "docstring": "Evaluate XPath expression in context of `self.xmlnode`.\n\n:Parameters:\n    - `expr`: the XPath expression\n:Types:\n    - `expr`: `unicode`\n\n:return: the result of the expression evaluation.\n:returntype: list of `libxml2.xmlNode`", "id": "f15255:c0:m4"}
{"signature": "def __init__(self):", "body": "if self.__class__ is MucStanzaExt:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>self.xmlnode=None<EOL>self.muc_child=None<EOL>", "docstring": "Initialize a `MucStanzaExt` derived object.", "id": "f15255:c9:m0"}
{"signature": "def set_history(self, parameters):", "body": "for child in xml_element_iter(self.xmlnode.children):<EOL><INDENT>if get_node_ns_uri(child) == MUC_NS and child.name == \"<STR_LIT>\":<EOL><INDENT>child.unlinkNode()<EOL>child.freeNode()<EOL>break<EOL><DEDENT><DEDENT>if parameters.maxchars and parameters.maxchars < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if parameters.maxstanzas and parameters.maxstanzas < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if parameters.maxseconds and parameters.maxseconds < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>hnode=self.xmlnode.newChild(self.xmlnode.ns(), \"<STR_LIT>\", None)<EOL>if parameters.maxchars is not None:<EOL><INDENT>hnode.setProp(\"<STR_LIT>\", str(parameters.maxchars))<EOL><DEDENT>if parameters.maxstanzas is not None:<EOL><INDENT>hnode.setProp(\"<STR_LIT>\", str(parameters.maxstanzas))<EOL><DEDENT>if parameters.maxseconds is not None:<EOL><INDENT>hnode.setProp(\"<STR_LIT>\", str(parameters.maxseconds))<EOL><DEDENT>if parameters.since is not None:<EOL><INDENT>hnode.setProp(\"<STR_LIT>\", parameters.since.strftime(\"<STR_LIT>\"))<EOL><DEDENT>", "docstring": "Set history parameters.\n\nTypes:\n    - `parameters`: `HistoryParameters`", "id": "f15255:c1:m1"}
{"signature": "def set_password(self, password):", "body": "for child in xml_element_iter(self.xmlnode.children):<EOL><INDENT>if get_node_ns_uri(child) == MUC_NS and child.name == \"<STR_LIT:password>\":<EOL><INDENT>child.unlinkNode()<EOL>child.freeNode()<EOL>break<EOL><DEDENT><DEDENT>if password is not None:<EOL><INDENT>self.xmlnode.newTextChild(self.xmlnode.ns(), \"<STR_LIT:password>\", to_utf8(password))<EOL><DEDENT>", "docstring": "Set password for the MUC request.\n\n        :Parameters:\n            - `password`: password\n        :Types:\n            - `password`: `unicode`", "id": "f15255:c1:m3"}
{"signature": "def __init__(self,xmlnode_or_affiliation,role=None,jid=None,nick=None,actor=None,reason=None):", "body": "self.jid,self.nick,self.actor,self.affiliation,self.reason,self.role=(None,)*<NUM_LIT:6><EOL>MucItemBase.__init__(self)<EOL>if isinstance(xmlnode_or_affiliation,libxml2.xmlNode):<EOL><INDENT>self.__from_xmlnode(xmlnode_or_affiliation)<EOL><DEDENT>else:<EOL><INDENT>self.__init(xmlnode_or_affiliation,role,jid,nick,actor,reason)<EOL><DEDENT>", "docstring": "Initialize a `MucItem` object.\n\n:Parameters:\n    - `xmlnode_or_affiliation`: XML node to be pased or the affiliation of\n      the user being described.\n    - `role`: role of the user.\n    - `jid`: JID of the user.\n    - `nick`: nickname of the user.\n    - `actor`: actor modyfying the user data.\n    - `reason`: reason of change of the user data.\n:Types:\n    - `xmlnode_or_affiliation`: `libxml2.xmlNode` or `str`\n    - `role`: `str`\n    - `jid`: `JID`\n    - `nick`: `unicode`\n    - `actor`: `JID`\n    - `reason`: `unicode`", "id": "f15255:c4:m0"}
{"signature": "def free_borrowed(self):", "body": "self.xmlnode=None<EOL>", "docstring": "Detach the XML node borrowed by `self`.", "id": "f15255:c0:m3"}
{"signature": "def free(self):", "body": "self.muc_free()<EOL>Iq.free(self)<EOL>", "docstring": "Free the data associated with this `MucIq` object.", "id": "f15255:c11:m3"}
{"signature": "def make_muc_userinfo(self):", "body": "self.clear_muc_child()<EOL>self.muc_child=MucUserX(parent=self.xmlnode)<EOL>return self.muc_child<EOL>", "docstring": "Create <x xmlns=\"...muc#user\"/> element in the stanza.\n\n:return: the element created.\n:returntype: `MucUserX`", "id": "f15255:c9:m3"}
{"signature": "def serialize(self):", "body": "return self.xmlnode.serialize()<EOL>", "docstring": "Serialize `self` as XML.\n\n:return: serialized `self.xmlnode`.\n:returntype: `str`", "id": "f15255:c0:m5"}
{"signature": "def make_kick_request(self,nick,reason):", "body": "self.clear_muc_child()<EOL>self.muc_child=MucAdminQuery(parent=self.xmlnode)<EOL>item=MucItem(\"<STR_LIT:none>\",\"<STR_LIT:none>\",nick=nick,reason=reason)<EOL>self.muc_child.add_item(item)<EOL>return self.muc_child<EOL>", "docstring": "Make the iq stanza a MUC room participant kick request.\n\n:Parameters:\n    - `nick`: nickname of user to kick.\n    - `reason`: reason of the kick.\n:Types:\n    - `nick`: `unicode`\n    - `reason`: `unicode`\n\n:return: object describing the kick request details.\n:returntype: `MucItem`", "id": "f15255:c11:m2"}
{"signature": "def __init(self,affiliation,role,jid=None,nick=None,actor=None,reason=None):", "body": "if not affiliation:<EOL><INDENT>affiliation=None<EOL><DEDENT>elif affiliation not in affiliations:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.affiliation=affiliation<EOL>if not role:<EOL><INDENT>role=None<EOL><DEDENT>elif role not in roles:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.role=role<EOL>if jid:<EOL><INDENT>self.jid=JID(jid)<EOL><DEDENT>else:<EOL><INDENT>self.jid=None<EOL><DEDENT>if actor:<EOL><INDENT>self.actor=JID(actor)<EOL><DEDENT>else:<EOL><INDENT>self.actor=None<EOL><DEDENT>self.nick=nick<EOL>self.reason=reason<EOL>", "docstring": "Initialize a `MucItem` object from a set of attributes.\n\n        :Parameters:\n            - `affiliation`: affiliation of the user.\n            - `role`: role of the user.\n            - `jid`: JID of the user.\n            - `nick`: nickname of the user.\n            - `actor`: actor modyfying the user data.\n            - `reason`: reason of change of the user data.\n        :Types:\n            - `affiliation`: `str`\n            - `role`: `str`\n            - `jid`: `JID`\n            - `nick`: `unicode`\n            - `actor`: `JID`\n            - `reason`: `unicode`", "id": "f15255:c4:m1"}
{"signature": "def copy(self):", "body": "return MucIq(self)<EOL>", "docstring": "Return a copy of `self`.", "id": "f15255:c11:m1"}
{"signature": "def __from_xmlnode(self, xmlnode):", "body": "actor=None<EOL>reason=None<EOL>n=xmlnode.children<EOL>while n:<EOL><INDENT>ns=n.ns()<EOL>if ns and ns.getContent()!=MUC_USER_NS:<EOL><INDENT>continue<EOL><DEDENT>if n.name==\"<STR_LIT>\":<EOL><INDENT>actor=n.getContent()<EOL><DEDENT>if n.name==\"<STR_LIT>\":<EOL><INDENT>reason=n.getContent()<EOL><DEDENT>n=n.next<EOL><DEDENT>self.__init(<EOL>from_utf8(xmlnode.prop(\"<STR_LIT>\")),<EOL>from_utf8(xmlnode.prop(\"<STR_LIT>\")),<EOL>from_utf8(xmlnode.prop(\"<STR_LIT>\")),<EOL>from_utf8(xmlnode.prop(\"<STR_LIT>\")),<EOL>from_utf8(actor),<EOL>from_utf8(reason),<EOL>)<EOL>", "docstring": "Initialize a `MucItem` object from an XML node.\n\n        :Parameters:\n            - `xmlnode`: the XML node.\n        :Types:\n            - `xmlnode`: `libxml2.xmlNode`", "id": "f15255:c4:m2"}
{"signature": "def clear(self):", "body": "if not self.xmlnode.children:<EOL><INDENT>return<EOL><DEDENT>n=self.xmlnode.children<EOL>while n:<EOL><INDENT>ns=n.ns()<EOL>if ns and ns.getContent()!=MUC_USER_NS:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>n.unlinkNode()<EOL>n.freeNode()<EOL><DEDENT>n=n.next<EOL><DEDENT>", "docstring": "Clear the content of `self.xmlnode` removing all <item/>, <status/>, etc.", "id": "f15255:c6:m1"}
{"signature": "def add_item(self,item):", "body": "if not isinstance(item,MucItemBase):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>item.as_xml(self.xmlnode)<EOL>", "docstring": "Add an item to `self`.\n\n        :Parameters:\n            - `item`: the item to add.\n        :Types:\n            - `item`: `MucItemBase`", "id": "f15255:c6:m2"}
{"signature": "def copy(self):", "body": "return MucPresence(self)<EOL>", "docstring": "Return a copy of `self`.", "id": "f15255:c10:m1"}
{"signature": "def process_configuration_error(self, stanza):", "body": "self.handler.room_configuration_error(stanza)<EOL>", "docstring": "Process error response for a room configuration request.\n\n:Parameters:\n    - `stanza`: the stanza received.\n:Types:\n    - `stanza`: `Presence`", "id": "f15256:c2:m19"}
{"signature": "def presence_changed(self,user,stanza):", "body": "pass<EOL>", "docstring": "Called whenever user's presence changes (includes nick, role or\naffiliation changes).\n\n:Parameters:\n    - `user`: MucRoomUser object describing the user.\n    - `stanza`: the stanza received.\n\n:Types:\n    - `user`: `MucRoomUser`\n    - `stanza`: `pyxmpp.stanza.Stanza`", "id": "f15256:c0:m11"}
{"signature": "def process_unavailable_presence(self,stanza):", "body": "fr=stanza.get_from()<EOL>if not fr.resource:<EOL><INDENT>return<EOL><DEDENT>nick=fr.resource<EOL>user=self.users.get(nick)<EOL>if user:<EOL><INDENT>old_user=MucRoomUser(user)<EOL>user.update_presence(stanza)<EOL>self.handler.presence_changed(user,stanza)<EOL>if user.new_nick:<EOL><INDENT>mc=stanza.get_muc_child()<EOL>if isinstance(mc,MucUserX):<EOL><INDENT>renames=[i for i in mc.get_items() if isinstance(i,MucStatus) and i.code==<NUM_LIT>]<EOL>if renames:<EOL><INDENT>self.users[user.new_nick]=user<EOL>del self.users[nick]<EOL>return<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>old_user=None<EOL>user=MucRoomUser(stanza)<EOL>self.users[user.nick]=user<EOL>self.handler.presence_changed(user,stanza)<EOL><DEDENT>if fr==self.room_jid and self.joined:<EOL><INDENT>self.joined=False<EOL>self.handler.user_left(user,stanza)<EOL>self.manager.forget(self)<EOL>self.me=user<EOL><DEDENT>elif old_user:<EOL><INDENT>self.handler.user_left(user,stanza)<EOL><DEDENT>", "docstring": "Process <presence type=\"unavailable\"/> received from the room.\n\n:Parameters:\n    - `stanza`: the stanza received.\n:Types:\n    - `stanza`: `MucPresence`", "id": "f15256:c2:m11"}
{"signature": "def user_joined(self,user,stanza):", "body": "pass<EOL>", "docstring": "Called when a new participant joins the room.\n\n:Parameters:\n    - `user`: the user joining.\n    - `stanza`: the stanza received.\n\n:Types:\n    - `user`: `MucRoomUser`\n    - `stanza`: `pyxmpp.stanza.Stanza`", "id": "f15256:c0:m5"}
{"signature": "def set_stream(self,stream):", "body": "_unused = stream<EOL>if self.joined and self.handler:<EOL><INDENT>self.handler.user_left(self.me,None)<EOL><DEDENT>self.joined=False<EOL>", "docstring": "Called when current stream changes.\n\nMark the room not joined and inform `self.handler` that it was left.\n\n:Parameters:\n    - `stream`: the new stream.\n:Types:\n    - `stream`: `pyxmpp.stream.Stream`", "id": "f15256:c2:m2"}
{"signature": "def set_subject(self,subject):", "body": "m=Message(to_jid=self.room_jid.bare(),stanza_type=\"<STR_LIT>\",subject=subject)<EOL>self.manager.stream.send(m)<EOL>", "docstring": "Send a subject change request to the room.\n\n:Parameters:\n    - `subject`: the new subject.\n:Types:\n    - `subject`: `unicode`", "id": "f15256:c2:m6"}
{"signature": "def set_stream(self,stream):", "body": "self.jid=stream.me<EOL>self.stream=stream<EOL>for r in self.rooms.values():<EOL><INDENT>r.set_stream(stream)<EOL><DEDENT>", "docstring": "Change the stream assigned to `self`.\n\n:Parameters:\n    - `stream`: the new stream to be assigned to `self`.\n:Types:\n    - `stream`: `pyxmpp.stream.Stream`", "id": "f15256:c3:m1"}
{"signature": "def __error_message(self,stanza):", "body": "fr=stanza.get_from()<EOL>key=fr.bare().as_unicode()<EOL>rs=self.rooms.get(key)<EOL>if not rs:<EOL><INDENT>return False<EOL><DEDENT>rs.process_error_message(stanza)<EOL>return True<EOL>", "docstring": "Process an error message from a MUC room.\n\n        :Parameters:\n            - `stanza`: the stanza received.\n        :Types:\n            - `stanza`: `Message`\n\n        :return: `True` if the message was properly recognized as directed to\n            one of the managed rooms, `False` otherwise.\n        :returntype: `bool`", "id": "f15256:c3:m7"}
{"signature": "def nick_changed(self,user,old_nick,stanza):", "body": "pass<EOL>", "docstring": "Called after a user nick has been changed.\n\n:Parameters:\n    - `user`: the user (after update).\n    - `old_nick`: the old nick.\n    - `stanza`: the stanza received.\n\n:Types:\n    - `user`: `MucRoomUser`\n    - `old_nick`: `unicode`\n    - `stanza`: `pyxmpp.stanza.Stanza`", "id": "f15256:c0:m10"}
{"signature": "def get_nick(self):", "body": "return self.room_jid.resource<EOL>", "docstring": "Get own nick.\n\n:return: own nick.\n:returntype: `unicode`", "id": "f15256:c2:m9"}
{"signature": "def same_as(self,other):", "body": "return self.room_jid==other.room_jid<EOL>", "docstring": "Check if two `MucRoomUser` objects describe the same user in the\n        same room.\n\n        :Parameters:\n            - `other`: the user object to compare `self` with.\n        :Types:\n            - `other`: `MucRoomUser`\n\n        :return: `True` if the two object describe the same user.\n        :returntype: `bool`", "id": "f15256:c1:m2"}
{"signature": "def send_message(self,body):", "body": "m=Message(to_jid=self.room_jid.bare(),stanza_type=\"<STR_LIT>\",body=body)<EOL>self.manager.stream.send(m)<EOL>", "docstring": "Send a message to the room.\n\n:Parameters:\n    - `body`: the message body.\n:Types:\n    - `body`: `unicode`", "id": "f15256:c2:m5"}
{"signature": "def set_handlers(self,priority=<NUM_LIT:10>):", "body": "self.stream.set_message_handler(\"<STR_LIT>\",self.__groupchat_message,None,priority)<EOL>self.stream.set_message_handler(\"<STR_LIT:error>\",self.__error_message,None,priority)<EOL>self.stream.set_presence_handler(\"<STR_LIT>\",self.__presence_available,None,priority)<EOL>self.stream.set_presence_handler(\"<STR_LIT>\",self.__presence_unavailable,None,priority)<EOL>self.stream.set_presence_handler(\"<STR_LIT:error>\",self.__presence_error,None,priority)<EOL>", "docstring": "Assign MUC stanza handlers to the `self.stream`.\n\n:Parameters:\n    - `priority`: priority for the handlers.\n:Types:\n    - `priority`: `int`", "id": "f15256:c3:m2"}
{"signature": "def configuration_form_received(self,form):", "body": "pass<EOL>", "docstring": "Called when a requested configuration form is received.\n\nThe form, after filling-in shoul be passed to `self.room_state.configure_room`.\n\n:Parameters:\n    - `form`: the configuration form.\n\n:Types:\n    - `form`: `pyxmpp.jabber.dataforms.Form`", "id": "f15256:c0:m3"}
{"signature": "def process_configuration_form_error(self, stanza):", "body": "self.handler.error(stanza)<EOL>", "docstring": "Process error response for a room configuration form request.\n\n:Parameters:\n    - `stanza`: the stanza received.\n:Types:\n    - `stanza`: `Presence`", "id": "f15256:c2:m16"}
{"signature": "def role_changed(self,user,old_role,new_role,stanza):", "body": "pass<EOL>", "docstring": "Called when a role of an user has been changed.\n\n:Parameters:\n    - `user`: the user (after update).\n    - `old_role`: user's role before update.\n    - `new_role`: user's role after update.\n    - `stanza`: the stanza received.\n\n:Types:\n    - `user`: `MucRoomUser`\n    - `old_role`: `unicode`\n    - `new_role`: `unicode`\n    - `stanza`: `pyxmpp.stanza.Stanza`", "id": "f15256:c0:m7"}
{"signature": "def change_nick(self,new_nick):", "body": "new_room_jid=JID(self.room_jid.node,self.room_jid.domain,new_nick)<EOL>p=Presence(to_jid=new_room_jid)<EOL>self.manager.stream.send(p)<EOL>", "docstring": "Send a nick change request to the room.\n\n:Parameters:\n    - `new_nick`: the new nickname requested.\n:Types:\n    - `new_nick`: `unicode`", "id": "f15256:c2:m7"}
{"signature": "def process_groupchat_message(self,stanza):", "body": "fr=stanza.get_from()<EOL>user=self.get_user(fr,True)<EOL>s=stanza.get_subject()<EOL>if s:<EOL><INDENT>self.subject=s<EOL>self.handler.subject_changed(user,stanza)<EOL><DEDENT>else:<EOL><INDENT>self.handler.message_received(user,stanza)<EOL><DEDENT>", "docstring": "Process <message type=\"groupchat\"/> received from the room.\n\n:Parameters:\n    - `stanza`: the stanza received.\n:Types:\n    - `stanza`: `Message`", "id": "f15256:c2:m12"}
{"signature": "def get_room_jid(self,nick=None):", "body": "if nick is None:<EOL><INDENT>return self.room_jid<EOL><DEDENT>return JID(self.room_jid.node,self.room_jid.domain,nick)<EOL>", "docstring": "Get own room JID or a room JID for given `nick`.\n\n:Parameters:\n    - `nick`: a nick for which the room JID is requested.\n:Types:\n    - `nick`: `unicode`\n\n:return: the room JID.\n:returntype: `JID`", "id": "f15256:c2:m8"}
{"signature": "def leave(self):", "body": "if self.joined:<EOL><INDENT>p=MucPresence(to_jid=self.room_jid,stanza_type=\"<STR_LIT>\")<EOL>self.manager.stream.send(p)<EOL><DEDENT>", "docstring": "Send a leave request for the room.", "id": "f15256:c2:m4"}
{"signature": "def __presence_available(self,stanza):", "body": "fr=stanza.get_from()<EOL>key=fr.bare().as_unicode()<EOL>rs=self.rooms.get(key)<EOL>if not rs:<EOL><INDENT>return False<EOL><DEDENT>rs.process_available_presence(MucPresence(stanza))<EOL>return True<EOL>", "docstring": "Process an available presence from a MUC room.\n\n        :Parameters:\n            - `stanza`: the stanza received.\n        :Types:\n            - `stanza`: `Presence`\n\n        :return: `True` if the stanza was properly recognized as generated by\n            one of the managed rooms, `False` otherwise.\n        :returntype: `bool`", "id": "f15256:c3:m9"}
{"signature": "def __init__(self,presence_or_user_or_jid):", "body": "if isinstance(presence_or_user_or_jid,MucRoomUser):<EOL><INDENT>self.presence=presence_or_user_or_jid.presence<EOL>self.role=presence_or_user_or_jid.role<EOL>self.affiliation=presence_or_user_or_jid.affiliation<EOL>self.room_jid=presence_or_user_or_jid.room_jid<EOL>self.real_jid=presence_or_user_or_jid.real_jid<EOL>self.nick=presence_or_user_or_jid.nick<EOL>self.new_nick=None<EOL><DEDENT>else:<EOL><INDENT>self.affiliation=\"<STR_LIT:none>\"<EOL>self.presence=None<EOL>self.real_jid=None<EOL>self.new_nick=None<EOL>if isinstance(presence_or_user_or_jid,JID):<EOL><INDENT>self.nick=presence_or_user_or_jid.resource<EOL>self.room_jid=presence_or_user_or_jid<EOL>self.role=\"<STR_LIT:none>\"<EOL><DEDENT>elif isinstance(presence_or_user_or_jid,Presence):<EOL><INDENT>self.nick=None<EOL>self.room_jid=None<EOL>self.role=\"<STR_LIT>\"<EOL>self.update_presence(presence_or_user_or_jid)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>", "docstring": "Initialize a `MucRoomUser` object.\n\n:Parameters:\n    - `presence_or_user_or_jid`: a MUC presence stanza with user\n      information, a user object to copy or a room JID of a user.\n:Types:\n    - `presence_or_user_or_jid`: `MucPresence` or `MucRoomUser` or\n      `JID`\n\nWhen `presence_or_user_or_jid` is a JID user's\nrole and affiliation are set to \"none\".", "id": "f15256:c1:m0"}
{"signature": "def __init__(self,manager,own_jid,room_jid,handler):", "body": "self.own_jid=own_jid<EOL>self.room_jid=room_jid<EOL>self.handler=handler<EOL>self.manager=weakref.proxy(manager)<EOL>self.joined=False<EOL>self.subject=None<EOL>self.users={}<EOL>self.me=MucRoomUser(room_jid)<EOL>self.configured = None<EOL>self.configuration_form = None<EOL>handler.assign_state(self)<EOL>self.__logger=logging.getLogger(\"<STR_LIT>\")<EOL>", "docstring": "Initialize a `MucRoomState` object.\n\n:Parameters:\n    - `manager`: an object to manage this room.\n    - `own_jid`: real JID of the owner (client using this class).\n    - `room_jid`: room JID of the owner (provides the room name and\n      the nickname).\n    - `handler`: an object to handle room events.\n:Types:\n    - `manager`: `MucRoomManager`\n    - `own_jid`: JID\n    - `room_jid`: JID\n    - `handler`: `MucRoomHandler`", "id": "f15256:c2:m0"}
{"signature": "def room_configured(self):", "body": "pass<EOL>", "docstring": "Called after a successfull room configuration.", "id": "f15256:c0:m4"}
{"signature": "def rfc2426(self):", "body": "if self.uri:<EOL><INDENT>return rfc2425encode(self.name,self.uri,{\"<STR_LIT:value>\":\"<STR_LIT>\"})<EOL><DEDENT>elif self.sound:<EOL><INDENT>return rfc2425encode(self.name,self.sound)<EOL><DEDENT>", "docstring": "RFC2426-encode the field content.\n\n        :return: the field in the RFC 2426 format.\n        :returntype: `str`", "id": "f15257:c14:m1"}
{"signature": "def __init__(self,name, value, rfc2425parameters = None, empty_ok = False):", "body": "_unused = rfc2425parameters<EOL>VCardField.__init__(self,name)<EOL>if isinstance(value,libxml2.xmlNode):<EOL><INDENT>value=value.getContent()<EOL>if value:<EOL><INDENT>self.value=unicode(value,\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\").strip()<EOL><DEDENT>else:<EOL><INDENT>self.value=u\"<STR_LIT>\"<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.value=value<EOL><DEDENT>if not self.value and not empty_ok:<EOL><INDENT>raise Empty(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Initialize a `VCardString` object.\n\n        :Parameters:\n            - `name`: field name\n            - `value`: field value as string or an XML node\n            - `rfc2425parameters`: optional RFC 2425 parameters\n        :Types:\n            - `name`: `str`\n            - `value`: `str` or `libxml2.xmlNode`\n            - `rfc2425parameters`: `dict`", "id": "f15257:c2:m0"}
{"signature": "def complete_xml_element(self, xmlnode, _unused):", "body": "for _unused1, value in self.content.items():<EOL><INDENT>if value is None:<EOL><INDENT>continue<EOL><DEDENT>if type(value) is list:<EOL><INDENT>for v in value:<EOL><INDENT>v.as_xml(xmlnode)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>value.as_xml(xmlnode)<EOL><DEDENT><DEDENT>", "docstring": "Complete the XML node with `self` content.\n\n        Should be overriden in classes derived from `StanzaPayloadObject`.\n\n        :Parameters:\n            - `xmlnode`: XML node with the element being built. It has already\n              right name and namespace, but no attributes or content.\n            - `_unused`: document to which the element belongs.\n        :Types:\n            - `xmlnode`: `libxml2.xmlNode`\n            - `_unused`: `libxml2.xmlDoc`", "id": "f15257:c17:m7"}
{"signature": "def __from_xml(self,data):", "body": "ns=get_node_ns(data)<EOL>if ns and ns.getContent()!=VCARD_NS:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (VCARD_NS,))<EOL><DEDENT>if data.name!=\"<STR_LIT>\":<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (data.name,))<EOL><DEDENT>n=data.children<EOL>dns=get_node_ns(data)<EOL>while n:<EOL><INDENT>if n.type!='<STR_LIT>':<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>ns=get_node_ns(n)<EOL>if (ns and dns and ns.getContent()!=dns.getContent()):<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>if not self.components.has_key(n.name):<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>cl,tp=self.components[n.name]<EOL>if tp in (\"<STR_LIT>\",\"<STR_LIT>\"):<EOL><INDENT>if self.content.has_key(n.name):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (n.name,))<EOL><DEDENT>try:<EOL><INDENT>self.content[n.name]=cl(n.name,n)<EOL><DEDENT>except Empty:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>elif tp==\"<STR_LIT>\":<EOL><INDENT>if not self.content.has_key(n.name):<EOL><INDENT>self.content[n.name]=[]<EOL><DEDENT>try:<EOL><INDENT>self.content[n.name].append(cl(n.name,n))<EOL><DEDENT>except Empty:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>n=n.next<EOL><DEDENT>", "docstring": "Initialize a VCard object from XML node.\n\n        :Parameters:\n            - `data`: vcard to parse.\n        :Types:\n            - `data`: `libxml2.xmlNode`", "id": "f15257:c17:m2"}
{"signature": "def __init__(self,name,value,rfc2425parameters=None):", "body": "_unused = rfc2425parameters<EOL>VCardField.__init__(self,name)<EOL>if isinstance(value,libxml2.xmlNode):<EOL><INDENT>try:<EOL><INDENT>self.value=pyxmpp.jid.JID(value.getContent())<EOL><DEDENT>except JIDError:<EOL><INDENT>raise JIDMalformedProtocolError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.value=pyxmpp.jid.JID(value)<EOL><DEDENT>if not self.value:<EOL><INDENT>raise Empty(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Initialize a `VCardJID` object.\n\n        :Parameters:\n            - `name`: field name\n            - `value`: field value as string or an XML node\n            - `rfc2425parameters`: optional RFC 2425 parameters\n        :Types:\n            - `name`: `str`\n            - `value`: `str` or `libxml2.xmlNode`\n            - `rfc2425parameters`: `dict`", "id": "f15257:c4:m0"}
{"signature": "def __init__(self,name,value,rfc2425parameters=None):", "body": "VCardField.__init__(self,name)<EOL>if not rfc2425parameters:<EOL><INDENT>rfc2425parameters={}<EOL><DEDENT>self.uri,self.sound,self.phonetic=[None]*<NUM_LIT:3><EOL>if isinstance(value,libxml2.xmlNode):<EOL><INDENT>n=value.children<EOL>vns=get_node_ns(value)<EOL>while n:<EOL><INDENT>if n.type!='<STR_LIT>':<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>ns=get_node_ns(n)<EOL>if (ns and vns and ns.getContent()!=vns.getContent()):<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>if (self.phonetic or self.uri):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.sound=base64.decodestring(n.getContent())<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>if (self.sound or self.uri):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.phonetic=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>if (self.phonetic or self.sound):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.uri=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>n=n.next<EOL><DEDENT>if (not self.phonetic and not self.uri and not self.sound):<EOL><INDENT>raise Empty(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if rfc2425parameters.get(\"<STR_LIT:value>\", \"<STR_LIT>\").lower()==\"<STR_LIT>\":<EOL><INDENT>self.uri=value<EOL>self.sound=None<EOL>self.phonetic=None<EOL><DEDENT>else:<EOL><INDENT>self.sound=value<EOL>self.uri=None<EOL>self.phonetic=None<EOL><DEDENT><DEDENT>", "docstring": "Initialize a `VCardSound` object.\n\n        :Parameters:\n            - `name`: field name\n            - `value`: field value as string or an XML node\n            - `rfc2425parameters`: optional RFC 2425 parameters\n        :Types:\n            - `name`: `str`\n            - `value`: `str` or `libxml2.xmlNode`\n            - `rfc2425parameters`: `dict`", "id": "f15257:c14:m0"}
{"signature": "def as_xml(self,parent):", "body": "if self.value in (\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\"):<EOL><INDENT>n=parent.newChild(None,self.name.upper(),None)<EOL>n.newChild(None,self.value.upper(),None)<EOL>return n<EOL><DEDENT>return None<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c15:m2"}
{"signature": "def as_xml(self,parent):", "body": "n=parent.newChild(None,\"<STR_LIT>\",None)<EOL>for t in (\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\"):<EOL><INDENT>if t in self.type:<EOL><INDENT>n.newChild(None,t.upper(),None)<EOL><DEDENT><DEDENT>for l in self.lines:<EOL><INDENT>n.newTextChild(None,\"<STR_LIT>\",l)<EOL><DEDENT>return n<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c8:m2"}
{"signature": "def as_xml(self,parent):", "body": "n=parent.newChild(None,\"<STR_LIT:N>\",None)<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.family))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.given))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.middle))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.prefix))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.suffix))<EOL>return n<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c5:m2"}
{"signature": "def as_xml(self,parent):", "body": "n=parent.newChild(None,self.name.upper(),None)<EOL>if self.type:<EOL><INDENT>n.newTextChild(None,\"<STR_LIT>\",self.type)<EOL><DEDENT>n.newTextChild(None,\"<STR_LIT>\",binascii.b2a_base64(self.cred))<EOL>return n<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c16:m2"}
{"signature": "def __init__(self,name,value,rfc2425parameters=None):", "body": "VCardField.__init__(self,name)<EOL>if not rfc2425parameters:<EOL><INDENT>rfc2425parameters={}<EOL><DEDENT>if self.name.upper()!=\"<STR_LIT>\":<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>if isinstance(value,libxml2.xmlNode):<EOL><INDENT>self.number=None<EOL>self.type=[]<EOL>n=value.children<EOL>vns=get_node_ns(value)<EOL>while n:<EOL><INDENT>if n.type!='<STR_LIT>':<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>ns=get_node_ns(n)<EOL>if (ns and vns and ns.getContent()!=vns.getContent()):<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>self.number=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>elif n.name in (\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",<EOL>\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",<EOL>\"<STR_LIT>\"):<EOL><INDENT>self.type.append(n.name.lower())<EOL><DEDENT>n=n.next<EOL><DEDENT>if self.type==[]:<EOL><INDENT>self.type=[\"<STR_LIT>\"]<EOL><DEDENT>if not self.number:<EOL><INDENT>raise Empty(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>t=rfc2425parameters.get(\"<STR_LIT:type>\")<EOL>if t:<EOL><INDENT>self.type=t.split(\"<STR_LIT:U+002C>\")<EOL><DEDENT>else:<EOL><INDENT>self.type=[\"<STR_LIT>\"]<EOL><DEDENT>self.number=value<EOL><DEDENT>", "docstring": "Initialize a `VCardTel` object.\n\n        :Parameters:\n            - `name`: field name\n            - `value`: field value as string or an XML node\n            - `rfc2425parameters`: optional RFC 2425 parameters\n        :Types:\n            - `name`: `str`\n            - `value`: `str` or `libxml2.xmlNode`\n            - `rfc2425parameters`: `dict`", "id": "f15257:c9:m0"}
{"signature": "def as_xml(self,parent):", "body": "n=parent.newChild(None,\"<STR_LIT>\",None)<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.lat))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.lon))<EOL>return n<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c11:m2"}
{"signature": "def rfc2426(self):", "body": "return rfc2425encode(self.name,self.value)<EOL>", "docstring": "RFC2426-encode the field content.\n\n        :return: the field in the RFC 2426 format.\n        :returntype: `str`", "id": "f15257:c2:m1"}
{"signature": "def as_xml(self,parent):", "body": "n=parent.newChild(None,\"<STR_LIT>\",None)<EOL>for t in (\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\"):<EOL><INDENT>if t in self.type:<EOL><INDENT>n.newChild(None,t.upper(),None)<EOL><DEDENT><DEDENT>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.pobox))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.extadr))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.street))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.locality))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.region))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.pcode))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.ctry))<EOL>return n<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c7:m3"}
{"signature": "def as_xml(self,parent):", "body": "n=parent.newChild(None,self.name.upper(),None)<EOL>if self.uri:<EOL><INDENT>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.uri))<EOL><DEDENT>elif self.phonetic:<EOL><INDENT>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.phonetic))<EOL><DEDENT>else:<EOL><INDENT>n.newTextChild(None,\"<STR_LIT>\",binascii.b2a_base64(self.sound))<EOL><DEDENT>return n<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c14:m2"}
{"signature": "def __init__(self,name,value,rfc2425parameters=None):", "body": "VCardField.__init__(self,name)<EOL>if not rfc2425parameters:<EOL><INDENT>rfc2425parameters={}<EOL><DEDENT>self.uri,self.type,self.image=[None]*<NUM_LIT:3><EOL>if isinstance(value,libxml2.xmlNode):<EOL><INDENT>n=value.children<EOL>vns=get_node_ns(value)<EOL>while n:<EOL><INDENT>if n.type!='<STR_LIT>':<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>ns=get_node_ns(n)<EOL>if (ns and vns and ns.getContent()!=vns.getContent()):<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>self.type=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>self.image=base64.decodestring(n.getContent())<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>self.uri=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>n=n.next<EOL><DEDENT>if (self.uri and self.image) or (not self.uri and not self.image):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (name,))<EOL><DEDENT>if (not self.uri and not self.image):<EOL><INDENT>raise Empty(\"<STR_LIT>\" % (name,))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if rfc2425parameters.get(\"<STR_LIT:value>\", \"<STR_LIT>\").lower()==\"<STR_LIT>\":<EOL><INDENT>self.uri=value<EOL>self.type=None<EOL><DEDENT>else:<EOL><INDENT>self.type=rfc2425parameters.get(\"<STR_LIT:type>\")<EOL>self.image=value<EOL><DEDENT><DEDENT>", "docstring": "Initialize a `VCardImage` object.\n\n        :Parameters:\n            - `name`: field name\n            - `value`: field value as string or an XML node\n            - `rfc2425parameters`: optional RFC 2425 parameters\n        :Types:\n            - `name`: `str`\n            - `value`: `str` or `libxml2.xmlNode`\n            - `rfc2425parameters`: `dict`", "id": "f15257:c6:m0"}
{"signature": "def __from_xml(self,value):", "body": "n=value.children<EOL>vns=get_node_ns(value)<EOL>while n:<EOL><INDENT>if n.type!='<STR_LIT>':<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>ns=get_node_ns(n)<EOL>if (ns and vns and ns.getContent()!=vns.getContent()):<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>self.pobox=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>elif n.name in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.extadr=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>elif n.name=='<STR_LIT>':<EOL><INDENT>self.street=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>elif n.name=='<STR_LIT>':<EOL><INDENT>self.locality=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>elif n.name=='<STR_LIT>':<EOL><INDENT>self.region=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>elif n.name=='<STR_LIT>':<EOL><INDENT>self.pcode=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>elif n.name=='<STR_LIT>':<EOL><INDENT>self.ctry=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\")<EOL><DEDENT>elif n.name in (\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",<EOL>\"<STR_LIT>\"):<EOL><INDENT>self.type.append(n.name.lower())<EOL><DEDENT>n=n.next<EOL><DEDENT>if self.type==[]:<EOL><INDENT>self.type=[\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\"]<EOL><DEDENT>elif \"<STR_LIT>\" in self.type and \"<STR_LIT>\" in self.type:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Initialize a `VCardAdr` object from and XML element.\n\n        :Parameters:\n            - `value`: field value as an XML node\n        :Types:\n            - `value`: `libxml2.xmlNode`", "id": "f15257:c7:m1"}
{"signature": "def rfc2426(self):", "body": "return rfc2425encode(\"<STR_LIT>\",u'<STR_LIT:;>'.join(quote_semicolon(val) for val in<EOL>(self.lat,self.lon)))<EOL>", "docstring": "RFC2426-encode the field content.\n\n        :return: the field in the RFC 2426 format.\n        :returntype: `str`", "id": "f15257:c11:m1"}
{"signature": "def __init__(self,name,value,rfc2425parameters=None):", "body": "_unused = rfc2425parameters<EOL>VCardField.__init__(self,name)<EOL>if isinstance(value,libxml2.xmlNode):<EOL><INDENT>self.value=None<EOL>n=value.children<EOL>vns=get_node_ns(value)<EOL>while n:<EOL><INDENT>if n.type!='<STR_LIT>':<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>ns=get_node_ns(n)<EOL>if (ns and vns and ns.getContent()!=vns.getContent()):<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>self.value=\"<STR_LIT>\"<EOL><DEDENT>elif n.name=='<STR_LIT>':<EOL><INDENT>self.value=\"<STR_LIT>\"<EOL><DEDENT>elif n.name=='<STR_LIT>':<EOL><INDENT>self.value=\"<STR_LIT>\"<EOL><DEDENT>n=n.next<EOL><DEDENT>if not self.value:<EOL><INDENT>raise Empty<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.value=value<EOL><DEDENT>", "docstring": "Initialize a `VCardPrivacy` object.\n\n        :Parameters:\n            - `name`: field name\n            - `value`: field value as string or an XML node\n            - `rfc2425parameters`: optional RFC 2425 parameters\n        :Types:\n            - `name`: `str`\n            - `value`: `str` or `libxml2.xmlNode`\n            - `rfc2425parameters`: `dict`", "id": "f15257:c15:m0"}
{"signature": "def as_xml(self,parent):", "body": "return parent.newTextChild(None, to_utf8(self.name.upper()), to_utf8(self.value))<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c2:m2"}
{"signature": "def rfc2426(self):", "body": "return rfc2425encode(\"<STR_LIT:email>\",self.address,{\"<STR_LIT:type>\":\"<STR_LIT:U+002C>\".join(self.type)})<EOL>", "docstring": "RFC2426-encode the field content.\n\n        :return: the field in the RFC 2426 format.\n        :returntype: `str`", "id": "f15257:c10:m1"}
{"signature": "def as_xml(self,parent):", "body": "name=to_utf8(self.name.upper())<EOL>content=self.value.as_utf8()<EOL>return parent.newTextChild(None, name, content)<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c4:m2"}
{"signature": "def __init__(self,data):", "body": "<EOL>self.n = None<EOL>del self.n<EOL>self.content={}<EOL>if isinstance(data,libxml2.xmlNode):<EOL><INDENT>self.__from_xml(data)<EOL><DEDENT>else:<EOL><INDENT>self.__from_rfc2426(data)<EOL><DEDENT>if not self.content.get(\"<STR_LIT:N>\") and self.content.get(\"<STR_LIT>\"):<EOL><INDENT>s=self.content['<STR_LIT>'].value.replace(\"<STR_LIT:;>\",\"<STR_LIT:U+002C>\")<EOL>s=s.split(None,<NUM_LIT:2>)<EOL>if len(s)==<NUM_LIT:2>:<EOL><INDENT>s=u\"<STR_LIT>\" % (s[<NUM_LIT:1>],s[<NUM_LIT:0>])<EOL><DEDENT>elif len(s)==<NUM_LIT:3>:<EOL><INDENT>s=u\"<STR_LIT>\" % (s[<NUM_LIT:2>],s[<NUM_LIT:0>],s[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>s=u\"<STR_LIT>\" % (s[<NUM_LIT:0>],)<EOL><DEDENT>self.content[\"<STR_LIT:N>\"]=VCardName(\"<STR_LIT:N>\",s)<EOL><DEDENT>elif not self.content.get(\"<STR_LIT>\") and self.content.get(\"<STR_LIT:N>\"):<EOL><INDENT>self.__make_fn()<EOL><DEDENT>for c, (_unused, tp) in self.components.items():<EOL><INDENT>if self.content.has_key(c):<EOL><INDENT>continue<EOL><DEDENT>if tp==\"<STR_LIT>\":<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (c,))<EOL><DEDENT>elif tp==\"<STR_LIT>\":<EOL><INDENT>self.content[c]=[]<EOL><DEDENT>elif tp==\"<STR_LIT>\":<EOL><INDENT>self.content[c]=None<EOL><DEDENT>else:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>", "docstring": "Initialize a VCard object from data which may be XML node\n        or an RFC2426 string.\n\n        :Parameters:\n            - `data`: vcard to parse.\n        :Types:\n            - `data`: `libxml2.xmlNode`, `unicode` or `str`", "id": "f15257:c17:m0"}
{"signature": "def as_xml(self,parent):", "body": "n=parent.newChild(None,\"<STR_LIT>\",None)<EOL>for t in (\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\"):<EOL><INDENT>if t in self.type:<EOL><INDENT>n.newChild(None,t.upper(),None)<EOL><DEDENT><DEDENT>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.address))<EOL>return n<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c10:m2"}
{"signature": "def __init__(self,name):", "body": "self.name=name<EOL>", "docstring": "Initialize a `VCardField` object.\n\n        Set its name.\n\n        :Parameters:\n            - `name`: field name\n        :Types:\n            - `name`: `str`", "id": "f15257:c1:m0"}
{"signature": "def __make_fn(self):", "body": "s=[]<EOL>if self.n.prefix:<EOL><INDENT>s.append(self.n.prefix)<EOL><DEDENT>if self.n.given:<EOL><INDENT>s.append(self.n.given)<EOL><DEDENT>if self.n.middle:<EOL><INDENT>s.append(self.n.middle)<EOL><DEDENT>if self.n.family:<EOL><INDENT>s.append(self.n.family)<EOL><DEDENT>if self.n.suffix:<EOL><INDENT>s.append(self.n.suffix)<EOL><DEDENT>s=u\"<STR_LIT:U+0020>\".join(s)<EOL>self.content[\"<STR_LIT>\"]=VCardString(\"<STR_LIT>\", s, empty_ok = True)<EOL>", "docstring": "Initialize the mandatory `self.fn` from `self.n`.\n\n        This is a workaround for buggy clients which set only one of them.", "id": "f15257:c17:m1"}
{"signature": "def __init__(self,name,value,rfc2425parameters=None):", "body": "VCardField.__init__(self,name)<EOL>if not rfc2425parameters:<EOL><INDENT>rfc2425parameters={}<EOL><DEDENT>if self.name.upper()!=\"<STR_LIT>\":<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>if isinstance(value,libxml2.xmlNode):<EOL><INDENT>self.lines=[]<EOL>self.type=[]<EOL>n=value.children<EOL>vns=get_node_ns(value)<EOL>while n:<EOL><INDENT>if n.type!='<STR_LIT>':<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>ns=get_node_ns(n)<EOL>if (ns and vns and ns.getContent()!=vns.getContent()):<EOL><INDENT>n=n.next<EOL>continue<EOL><DEDENT>if n.name=='<STR_LIT>':<EOL><INDENT>l=unicode(n.getContent(),\"<STR_LIT:utf-8>\",\"<STR_LIT:replace>\").strip()<EOL>l=l.replace(\"<STR_LIT:\\n>\",\"<STR_LIT:U+0020>\").replace(\"<STR_LIT:\\r>\",\"<STR_LIT:U+0020>\")<EOL>self.lines.append(l)<EOL><DEDENT>elif n.name in (\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",<EOL>\"<STR_LIT>\"):<EOL><INDENT>self.type.append(n.name.lower())<EOL><DEDENT>n=n.next<EOL><DEDENT>if self.type==[]:<EOL><INDENT>self.type=[\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\"]<EOL><DEDENT>elif \"<STR_LIT>\" in self.type and \"<STR_LIT>\" in self.type:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if not self.lines:<EOL><INDENT>self.lines=[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>t=rfc2425parameters.get(\"<STR_LIT:type>\")<EOL>if t:<EOL><INDENT>self.type=t.split(\"<STR_LIT:U+002C>\")<EOL><DEDENT>else:<EOL><INDENT>self.type=[\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\",\"<STR_LIT>\"]<EOL><DEDENT>self.lines=value.split(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Initialize a `VCardLabel` object.\n\n        :Parameters:\n            - `name`: field name\n            - `value`: field value as string or an XML node\n            - `rfc2425parameters`: optional RFC 2425 parameters\n        :Types:\n            - `name`: `str`\n            - `value`: `str` or `libxml2.xmlNode`\n            - `rfc2425parameters`: `dict`", "id": "f15257:c8:m0"}
{"signature": "def as_xml(self,parent):", "body": "n=parent.newChild(None,\"<STR_LIT>\",None)<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.name))<EOL>n.newTextChild(None,\"<STR_LIT>\",to_utf8(self.unit))<EOL>return n<EOL>", "docstring": "Create vcard-tmp XML representation of the field.\n\n        :Parameters:\n            - `parent`: parent node for the element\n        :Types:\n            - `parent`: `libxml2.xmlNode`\n\n        :return: xml node with the field data.\n        :returntype: `libxml2.xmlNode`", "id": "f15257:c12:m2"}
{"signature": "def rfc2426(self):", "body": "if self.unit:<EOL><INDENT>return rfc2425encode(\"<STR_LIT>\",u'<STR_LIT:;>'.join(quote_semicolon(val) for val in<EOL>(self.name,self.unit)))<EOL><DEDENT>else:<EOL><INDENT>return rfc2425encode(\"<STR_LIT>\",unicode(quote_semicolon(self.name)))<EOL><DEDENT>", "docstring": "RFC2426-encode the field content.\n\n        :return: the field in the RFC 2426 format.\n        :returntype: `str`", "id": "f15257:c12:m1"}
{"signature": "def complete_xml_element(self, xmlnode, doc):", "body": "if self.type is not None and self.type not in self.allowed_types:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (self.type,))<EOL><DEDENT>if self.type is not None:<EOL><INDENT>xmlnode.setProp(\"<STR_LIT:type>\", self.type)<EOL><DEDENT>if not self.label is None:<EOL><INDENT>xmlnode.setProp(\"<STR_LIT:label>\", to_utf8(self.label))<EOL><DEDENT>if not self.name is None:<EOL><INDENT>xmlnode.setProp(\"<STR_LIT>\", to_utf8(self.name))<EOL><DEDENT>if self.values:<EOL><INDENT>if self.type and len(self.values) > <NUM_LIT:1> and not self.type.endswith(u\"<STR_LIT>\"):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (self.type,))<EOL><DEDENT>for value in self.values:<EOL><INDENT>xmlnode.newTextChild(xmlnode.ns(), \"<STR_LIT:value>\", to_utf8(value))<EOL><DEDENT><DEDENT>for option in self.options:<EOL><INDENT>option.as_xml(xmlnode, doc)<EOL><DEDENT>if self.required:<EOL><INDENT>xmlnode.newChild(xmlnode.ns(), \"<STR_LIT>\", None)<EOL><DEDENT>if self.desc:<EOL><INDENT>xmlnode.newTextChild(xmlnode.ns(), \"<STR_LIT>\", to_utf8(self.desc))<EOL><DEDENT>return xmlnode<EOL>", "docstring": "Complete the XML node with `self` content.\n\n        :Parameters:\n            - `xmlnode`: XML node with the element being built. It has already\n              right name and namespace, but no attributes or content.\n            - `doc`: document to which the element belongs.\n        :Types:\n            - `xmlnode`: `libxml2.xmlNode`\n            - `doc`: `libxml2.xmlDoc`", "id": "f15258:c1:m4"}
{"signature": "@classmethod<EOL><INDENT>def _new_from_xml(cls, xmlnode):<DEDENT>", "body": "label = from_utf8(xmlnode.prop(\"<STR_LIT:label>\"))<EOL>child = xmlnode.children<EOL>value = None<EOL>for child in xml_element_ns_iter(xmlnode.children, DATAFORM_NS):<EOL><INDENT>if child.name == \"<STR_LIT:value>\":<EOL><INDENT>value = from_utf8(child.getContent())<EOL>break<EOL><DEDENT><DEDENT>if value is None:<EOL><INDENT>raise BadRequestProtocolError(\"<STR_LIT>\")<EOL><DEDENT>return cls(value, label)<EOL>", "docstring": "Create a new `Option` object from an XML element.\n\n        :Parameters:\n            - `xmlnode`: the XML element.\n        :Types:\n            - `xmlnode`: `libxml2.xmlNode`\n\n        :return: the object created.\n        :returntype: `Option`", "id": "f15258:c0:m3"}
{"signature": "def __init__(self, value = None, label = None, values = None):", "body": "self.label = label<EOL>if value:<EOL><INDENT>self.value = value<EOL><DEDENT>elif values:<EOL><INDENT>warnings.warn(\"<STR_LIT>\", DeprecationWarning, stacklevel=<NUM_LIT:1>)<EOL>self.value = values[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Initialize an `Option` object.\n\n        :Parameters:\n            - `value`: option value (mandatory).\n            - `label`: option label (human-readable description).\n            - `values`: for backward compatibility only.\n        :Types:\n            - `label`: `unicode`\n            - `value`: `unicode`", "id": "f15258:c0:m0"}
{"signature": "def __from_xml(self, xmlnode):", "body": "self.fields = []<EOL>self.reported_fields = []<EOL>self.items = []<EOL>self.title = None<EOL>self.instructions = None<EOL>if (xmlnode.type != \"<STR_LIT>\" or xmlnode.name != \"<STR_LIT:x>\"<EOL>or xmlnode.ns().content != DATAFORM_NS):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + xmlnode.serialize())<EOL><DEDENT>self.type = xmlnode.prop(\"<STR_LIT:type>\")<EOL>if not self.type in self.allowed_types:<EOL><INDENT>raise BadRequestProtocolError(\"<STR_LIT>\" % (self.type,))<EOL><DEDENT>child = xmlnode.children<EOL>while child:<EOL><INDENT>if child.type != \"<STR_LIT>\" or child.ns().content != DATAFORM_NS:<EOL><INDENT>pass<EOL><DEDENT>elif child.name == \"<STR_LIT:title>\":<EOL><INDENT>self.title = from_utf8(child.getContent())<EOL><DEDENT>elif child.name == \"<STR_LIT>\":<EOL><INDENT>self.instructions = from_utf8(child.getContent())<EOL><DEDENT>elif child.name == \"<STR_LIT>\":<EOL><INDENT>self.fields.append(Field._new_from_xml(child))<EOL><DEDENT>elif child.name == \"<STR_LIT>\":<EOL><INDENT>self.items.append(Item._new_from_xml(child))<EOL><DEDENT>elif child.name == \"<STR_LIT>\":<EOL><INDENT>self.__get_reported(child)<EOL><DEDENT>child = child.next<EOL><DEDENT>", "docstring": "Initialize a `Form` object from an XML element.\n\n        :Parameters:\n            - `xmlnode`: the XML element.\n        :Types:\n            - `xmlnode`: `libxml2.xmlNode`", "id": "f15258:c3:m9"}
{"signature": "def make_submit(self, keep_types = False):", "body": "result = Form(\"<STR_LIT>\")<EOL>for field in self.fields:<EOL><INDENT>if field.type == \"<STR_LIT>\":<EOL><INDENT>continue<EOL><DEDENT>if not field.values:<EOL><INDENT>if field.required:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>continue<EOL><DEDENT>if keep_types:<EOL><INDENT>result.add_field(field.name, field.values, field.type)<EOL><DEDENT>else:<EOL><INDENT>result.add_field(field.name, field.values)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Make a \"submit\" form using data in `self`.\n\n        Remove uneeded information from the form. The information removed\n        includes: title, instructions, field labels, fixed fields etc.\n\n        :raise ValueError: when any required field has no value.\n\n        :Parameters:\n            - `keep_types`: when `True` field type information will be included\n              in the result form. That is usually not needed.\n        :Types:\n            - `keep_types`: `bool`\n\n        :return: the form created.\n        :returntype: `Form`", "id": "f15258:c3:m6"}
{"signature": "def add_field(self, name = None, values = None, field_type = None,<EOL>label = None, options = None, required = False, desc = None, value = None):", "body": "field = Field(name, values, field_type, label, options, required, desc, value)<EOL>self.fields.append(field)<EOL>return field<EOL>", "docstring": "Add a field to the item.\n\n        :Parameters:\n            - `name`: field name.\n            - `values`: raw field values. Not to be used together with `value`.\n            - `field_type`: field type.\n            - `label`: field label.\n            - `options`: optional values for the field.\n            - `required`: `True` if the field is required.\n            - `desc`: natural-language description of the field.\n            - `value`: field value or values in a field_type-specific type. May be used only\n              if `values` parameter is not provided.\n        :Types:\n            - `name`: `unicode`\n            - `values`: `list` of `unicode`\n            - `field_type`: `str`\n            - `label`: `unicode`\n            - `options`: `list` of `Option`\n            - `required`: `bool`\n            - `desc`: `unicode`\n            - `value`: `bool` for \"boolean\" field, `JID` for \"jid-single\", `list` of `JID`\n              for \"jid-multi\", `list` of `unicode` for \"list-multi\" and \"text-multi\"\n              and `unicode` for other field types.\n\n        :return: the field added.\n        :returntype: `Field`", "id": "f15258:c2:m4"}
{"signature": "@classmethod<EOL><INDENT>def _new_from_xml(cls, xmlnode):<DEDENT>", "body": "child = xmlnode.children<EOL>fields = []<EOL>while child:<EOL><INDENT>if child.type != \"<STR_LIT>\" or child.ns().content != DATAFORM_NS:<EOL><INDENT>pass<EOL><DEDENT>elif child.name == \"<STR_LIT>\":<EOL><INDENT>fields.append(Field._new_from_xml(child))<EOL><DEDENT>child = child.next<EOL><DEDENT>return cls(fields)<EOL>", "docstring": "Create a new `Item` object from an XML element.\n\n        :Parameters:\n            - `xmlnode`: the XML element.\n        :Types:\n            - `xmlnode`: `libxml2.xmlNode`\n\n        :return: the object created.\n        :returntype: `Item`", "id": "f15258:c2:m6"}
{"signature": "def get_xml(self):", "body": "if not self._dirty:<EOL><INDENT>return self._element<EOL><DEDENT>element = self.as_xml()<EOL>self._element = element<EOL>self._dirty = False<EOL>return element<EOL>", "docstring": "Return the XML stanza representation.\n\n        This returns the original or cached XML representation, which\n        may be much more efficient than `as_xml`.\n\n        Result of this function should never be modified.\n\n        :returntype: :etree:`ElementTree.Element`", "id": "f15259:c0:m6"}
{"signature": "def __init__(self, element, from_jid = None, to_jid = None,<EOL>stanza_type = None, stanza_id = None,<EOL>error = None, error_cond = None,<EOL>return_path = None, language = None):", "body": "<EOL>self._error = None<EOL>self._from_jid = None<EOL>self._to_jid = None<EOL>self._stanza_type = None<EOL>self._stanza_id = None<EOL>self._language = language<EOL>if isinstance(element, ElementClass):<EOL><INDENT>self._element = element<EOL>self._dirty = False<EOL>self._decode_attributes()<EOL>if not element.tag.startswith(\"<STR_LIT:{>\"):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self._namespace, self.element_name = element.tag[<NUM_LIT:1>:].split(\"<STR_LIT:}>\")<EOL>if self._namespace not in STANZA_NAMESPACES:<EOL><INDENT>raise BadRequestProtocolError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>self._payload = None<EOL><DEDENT>else:<EOL><INDENT>self._element = None<EOL>self._dirty = True<EOL>self.element_name = unicode(element)<EOL>self._namespace = STANZA_CLIENT_NS<EOL>self._payload = []<EOL><DEDENT>self._ns_prefix = \"<STR_LIT>\".format(self._namespace)<EOL>self._element_qname = self._ns_prefix + self.element_name<EOL>if from_jid is not None:<EOL><INDENT>self.from_jid = from_jid<EOL><DEDENT>if to_jid is not None:<EOL><INDENT>self.to_jid = to_jid<EOL><DEDENT>if stanza_type:<EOL><INDENT>self.stanza_type = stanza_type<EOL><DEDENT>if stanza_id:<EOL><INDENT>self.stanza_id = stanza_id<EOL><DEDENT>if self.stanza_type == \"<STR_LIT:error>\":<EOL><INDENT>if error:<EOL><INDENT>self._error = StanzaErrorElement(error)<EOL><DEDENT>elif error_cond:<EOL><INDENT>self._error = StanzaErrorElement(error_cond)<EOL><DEDENT>else:<EOL><INDENT>self._decode_error()<EOL><DEDENT><DEDENT>if return_path is not None:<EOL><INDENT>self._return_path = weakref.ref(return_path)<EOL><DEDENT>", "docstring": "Initialize a Stanza object.\n\n        :Parameters:\n            - `element`: XML element of this stanza, or element name for a new\n              stanza. If element is given it must not be modified later,\n              unless `decode_payload()` and `mark_dirty()` methods are called\n              first (the element changes won't affec the stanza then).\n            - `from_jid`: sender JID.\n            - `to_jid`: recipient JID.\n            - `stanza_type`: staza type: one of: \"get\", \"set\", \"result\" or\n              \"error\".\n            - `stanza_id`: stanza id -- value of stanza's \"id\" attribute. If\n              not given, then unique for the session value is generated.\n            - `error`: error object. Ignored if `stanza_type` is not \"error\".\n            - `error_cond`: error condition name. Ignored if `stanza_type` is\n              not \"error\" or `error` is not None.\n            - `return_path`: route for sending responses to this stanza. Will\n              be weakly referenced.\n            - `language`: default language for the stanza content\n        :Types:\n            - `element`: `unicode` or :etree:`ElementTree.Element`\n            - `from_jid`: `JID`\n            - `to_jid`: `JID`\n            - `stanza_type`: `unicode`\n            - `stanza_id`: `unicode`\n            - `error`: `pyxmpp.error.StanzaErrorElement`\n            - `error_cond`: `unicode`\n            - `return_path`: `StanzaRoute`\n            - `language`: `unicode`", "id": "f15259:c0:m0"}
{"signature": "def get_payload(self, payload_class, payload_key = None,<EOL>specialize = False):", "body": "if self._payload is None:<EOL><INDENT>self.decode_payload()<EOL><DEDENT>if payload_class is None:<EOL><INDENT>if self._payload:<EOL><INDENT>payload = self._payload[<NUM_LIT:0>]<EOL>if specialize and isinstance(payload, XMLPayload):<EOL><INDENT>klass = payload_class_for_element_name(<EOL>payload.element.tag)<EOL>if klass is not XMLPayload:<EOL><INDENT>payload = klass.from_xml(payload.element)<EOL>self._payload[<NUM_LIT:0>] = payload<EOL><DEDENT><DEDENT>return payload<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>elements = payload_class._pyxmpp_payload_element_name<EOL>for i, payload in enumerate(self._payload):<EOL><INDENT>if isinstance(payload, XMLPayload):<EOL><INDENT>if payload_class is not XMLPayload:<EOL><INDENT>if payload.xml_element_name not in elements:<EOL><INDENT>continue<EOL><DEDENT>payload = payload_class.from_xml(payload.element)<EOL><DEDENT><DEDENT>elif not isinstance(payload, payload_class):<EOL><INDENT>continue<EOL><DEDENT>if payload_key is not None and payload_key != payload.handler_key():<EOL><INDENT>continue<EOL><DEDENT>self._payload[i] = payload<EOL>return payload<EOL><DEDENT>return None<EOL>", "docstring": "Get the first payload item matching the given class\n        and optional key.\n\n        Payloads may be addressed using a specific payload class or\n        via the generic `XMLPayload` element, though the `XMLPayload`\n        representation is available only as long as the element is not\n        requested by a more specific type.\n\n        :Parameters:\n            - `payload_class`: requested payload class, a subclass of\n              `StanzaPayload`. If `None` get the first payload in whatever\n              class is available.\n            - `payload_key`: optional key for additional match. When used\n              with `payload_class` = `XMLPayload` this selects the element to\n              match\n            - `specialize`: If `True`, and `payload_class` is `None` then\n              return object of a specialized `StanzaPayload` subclass whenever\n              possible\n        :Types:\n            - `payload_class`: `StanzaPayload`\n            - `specialize`: `bool`\n\n        :Return: payload element found or `None`\n        :Returntype: `StanzaPayload`", "id": "f15259:c0:m23"}
{"signature": "@property<EOL><INDENT>def return_path(self): <DEDENT>", "body": "return self._return_path()<EOL>", "docstring": "Stream the stanza was received from.\n\n        :returntype: `StanzaRoute`", "id": "f15259:c0:m18"}
{"signature": "def set_payload(self, payload):", "body": "if isinstance(payload, ElementClass):<EOL><INDENT>self._payload = [ XMLPayload(payload) ]<EOL><DEDENT>elif isinstance(payload, StanzaPayload):<EOL><INDENT>self._payload = [ payload ]<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>self._dirty = True<EOL>", "docstring": "Set stanza payload to a single item.\n\n        All current stanza content of will be dropped.\n        Marks the stanza dirty.\n\n        :Parameters:\n            - `payload`: XML element or stanza payload object to use\n        :Types:\n            - `payload`: :etree:`ElementTree.Element` or `StanzaPayload`", "id": "f15259:c0:m20"}
{"signature": "def mark_dirty(self):", "body": "self._dirty = True<EOL>", "docstring": "Mark the stanza 'dirty' so the XML representation will be\n        re-built the next time it is requested.\n\n        This should be called each time the payload attached to the stanza is\n        modifed.", "id": "f15259:c0:m19"}
{"signature": "@property<EOL><INDENT>def to_jid(self): <DEDENT>", "body": "return self._to_jid<EOL>", "docstring": "Destination JID of the stanza.\n\n        :returntype: `JID`", "id": "f15259:c0:m10"}
{"signature": "@property<EOL><INDENT>def from_jid(self): <DEDENT>", "body": "return self._from_jid<EOL>", "docstring": "Source JID of the stanza.\n\n        :returntype: `JID`", "id": "f15259:c0:m8"}
{"signature": "def add_payload(self, payload):", "body": "if self._payload is None:<EOL><INDENT>self.decode_payload()<EOL><DEDENT>if isinstance(payload, ElementClass):<EOL><INDENT>self._payload.append(XMLPayload(payload))<EOL><DEDENT>elif isinstance(payload, StanzaPayload):<EOL><INDENT>self._payload.append(payload)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>self._dirty = True<EOL>", "docstring": "Add new the stanza payload.\n\n        Marks the stanza dirty.\n\n        :Parameters:\n            - `payload`: XML element or stanza payload object to add\n        :Types:\n            - `payload`: :etree:`ElementTree.Element` or `StanzaPayload`", "id": "f15259:c0:m21"}
{"signature": "def serialize(self):", "body": "return serialize(self.get_xml())<EOL>", "docstring": "Serialize the stanza into a Unicode XML string.\n\n        :return: serialized stanza.\n        :returntype: `unicode`", "id": "f15259:c0:m4"}
{"signature": "def copy(self):", "body": "result = Stanza(self.element_name, self.from_jid, self.to_jid,<EOL>self.stanza_type, self.stanza_id, self.error,<EOL>self._return_path())<EOL>if self._payload is None:<EOL><INDENT>self.decode_payload()<EOL><DEDENT>for payload in self._payload:<EOL><INDENT>result.add_payload(payload.copy())<EOL><DEDENT>return result<EOL>", "docstring": "Create a deep copy of the stanza.\n\n        :returntype: `Stanza`", "id": "f15259:c0:m3"}
{"signature": "def _decode_subject(self, subject):", "body": "self.common_names = []<EOL>subject_name = []<EOL>for rdnss in subject:<EOL><INDENT>for rdns in rdnss:<EOL><INDENT>rdnss_list = []<EOL>for nameval in rdns:<EOL><INDENT>val_type = nameval.getComponentByName('<STR_LIT:type>')<EOL>value = nameval.getComponentByName('<STR_LIT:value>')<EOL>if val_type not in DN_OIDS:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(val_type))<EOL>continue<EOL><DEDENT>val_type = DN_OIDS[val_type]<EOL>value = der_decoder.decode(value,<EOL>asn1Spec = DirectoryString())[<NUM_LIT:0>]<EOL>value = value.getComponent()<EOL>try:<EOL><INDENT>value = _decode_asn1_string(value)<EOL><DEDENT>except UnicodeError:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(value))<EOL>continue<EOL><DEDENT>if val_type == \"<STR_LIT>\":<EOL><INDENT>self.common_names.append(value)<EOL><DEDENT>rdnss_list.append((val_type, value))<EOL><DEDENT>subject_name.append(tuple(rdnss_list))<EOL><DEDENT><DEDENT>self.subject_name = tuple(subject_name)<EOL>", "docstring": "Load data from a ASN.1 subject.", "id": "f15260:c2:m2"}
{"signature": "@classmethod<EOL><INDENT>def from_der_data(cls, data):<DEDENT>", "body": "<EOL>logger.debug(\"<STR_LIT>\".format(data))<EOL>if cls._cert_asn1_type is None:<EOL><INDENT>cls._cert_asn1_type = Certificate()<EOL><DEDENT>cert = der_decoder.decode(data, asn1Spec = cls._cert_asn1_type)[<NUM_LIT:0>]<EOL>result = cls()<EOL>tbs_cert = cert.getComponentByName('<STR_LIT>')<EOL>subject = tbs_cert.getComponentByName('<STR_LIT>')<EOL>logger.debug(\"<STR_LIT>\".format(subject))<EOL>result._decode_subject(subject)<EOL>validity = tbs_cert.getComponentByName('<STR_LIT>')<EOL>result._decode_validity(validity)<EOL>extensions = tbs_cert.getComponentByName('<STR_LIT>')<EOL>if extensions:<EOL><INDENT>for extension in extensions:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(extension))<EOL>oid = extension.getComponentByName('<STR_LIT>')<EOL>logger.debug(\"<STR_LIT>\".format(oid))<EOL>if oid != SUBJECT_ALT_NAME_OID:<EOL><INDENT>continue<EOL><DEDENT>value = extension.getComponentByName('<STR_LIT>')<EOL>logger.debug(\"<STR_LIT>\".format(value))<EOL>if isinstance(value, Any):<EOL><INDENT>value = der_decoder.decode(value,<EOL>asn1Spec = OctetString())[<NUM_LIT:0>]<EOL><DEDENT>alt_names = der_decoder.decode(value,<EOL>asn1Spec = GeneralNames())[<NUM_LIT:0>]<EOL>logger.debug(\"<STR_LIT>\".format(alt_names))<EOL>result._decode_alt_names(alt_names)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Decode DER-encoded certificate.\n\n        :Parameters:\n            - `data`: the encoded certificate\n        :Types:\n            - `data`: `bytes`\n\n        :Return: decoded certificate data\n        :Returntype: ASN1CertificateData", "id": "f15260:c2:m1"}
{"signature": "def _decode_names(self):", "body": "if self.subject_name is not None:<EOL><INDENT>subject_name = []<EOL>for part in self.subject_name:<EOL><INDENT>new_part = []<EOL>for name, value in part:<EOL><INDENT>try:<EOL><INDENT>name = name.decode(\"<STR_LIT:utf-8>\")<EOL>value = value.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>except UnicodeError:<EOL><INDENT>continue<EOL><DEDENT>new_part.append((name, value))<EOL><DEDENT>subject_name.append(tuple(new_part))<EOL><DEDENT>self.subject_name = tuple(subject_name)<EOL><DEDENT>for key, old in list(self.alt_names.items()):<EOL><INDENT>new = []<EOL>for name in old:<EOL><INDENT>try:<EOL><INDENT>name = name.decode(\"<STR_LIT:utf-8>\")<EOL><DEDENT>except UnicodeError:<EOL><INDENT>continue<EOL><DEDENT>new.append(name)<EOL><DEDENT>self.alt_names[key] = new<EOL><DEDENT>", "docstring": "Decode names (hopefully ASCII or UTF-8) into Unicode.", "id": "f15260:c1:m1"}
{"signature": "@property<EOL><INDENT>def display_name(self):<DEDENT>", "body": "if self.subject_name:<EOL><INDENT>return \"<STR_LIT:U+002CU+0020>\".join( [ \"<STR_LIT:U+002CU+0020>\".join(<EOL>[ \"<STR_LIT>\".format(k,v) for k, v in dn_tuple ] )<EOL>for dn_tuple in self.subject_name ])<EOL><DEDENT>for name_type in (\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>names = self.alt_names.get(name_type)<EOL>if names:<EOL><INDENT>return names[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return \"<STR_LIT>\"<EOL>", "docstring": "Get human-readable subject name derived from the SubjectName\n        or SubjectAltName field.", "id": "f15260:c0:m1"}
{"signature": "def get_jids(self):", "body": "result = []<EOL>if (\"<STR_LIT>\" in self.alt_names or \"<STR_LIT>\" in self.alt_names<EOL>or \"<STR_LIT>\" in self.alt_names):<EOL><INDENT>addrs =  self.alt_names.get(\"<STR_LIT>\", [])<EOL>addrs += [ addr for addr in self.alt_names.get(\"<STR_LIT>\", [])<EOL>if not addr.startswith(\"<STR_LIT>\") ]<EOL>addrs += [ addr.split(\"<STR_LIT:.>\", <NUM_LIT:1>)[<NUM_LIT:1>] for addr<EOL>in self.alt_names.get(\"<STR_LIT>\", [])<EOL>if (addr.startswith(\"<STR_LIT>\")<EOL>or addr.startswith(\"<STR_LIT>\"))]<EOL>warn_bad = True<EOL><DEDENT>elif self.common_names:<EOL><INDENT>addrs = [addr for addr in self.common_names<EOL>if \"<STR_LIT:@>\" not in addr and \"<STR_LIT:/>\" not in addr]<EOL>warn_bad = False<EOL><DEDENT>else:<EOL><INDENT>return []<EOL><DEDENT>for addr in addrs:<EOL><INDENT>try:<EOL><INDENT>jid = JID(addr)<EOL>if jid not in result:<EOL><INDENT>result.append(jid)<EOL><DEDENT><DEDENT>except JIDError as err:<EOL><INDENT>if warn_bad:<EOL><INDENT>logger.warning(\"<STR_LIT>\"<EOL>.format(addr, err))<EOL><DEDENT><DEDENT><DEDENT>return result<EOL>", "docstring": "Return JIDs for which this certificate is valid (except the domain\n        wildcards).\n\n        :Returtype: `list` of `JID`", "id": "f15260:c0:m2"}
{"signature": "def verify_jid_against_srv_name(self, jid, srv_type):", "body": "srv_prefix = \"<STR_LIT:_>\" + srv_type + \"<STR_LIT:.>\"<EOL>srv_prefix_l = len(srv_prefix)<EOL>for srv in self.alt_names.get(\"<STR_LIT>\", []):<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(jid,<EOL>srv))<EOL>if not srv.startswith(srv_prefix):<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(srv, srv_prefix))<EOL>continue<EOL><DEDENT>try:<EOL><INDENT>srv_jid = JID(srv[srv_prefix_l:])<EOL><DEDENT>except ValueError:<EOL><INDENT>continue<EOL><DEDENT>if srv_jid == jid:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Check if the cerificate is valid for given domain-only JID\n        and a service type.\n\n        :Parameters:\n            - `jid`: JID requested (domain part only)\n            - `srv_type`: service type, e.g. 'xmpp-client'\n        :Types:\n            - `jid`: `JID`\n            - `srv_type`: `unicode`\n        :Returntype: `bool`", "id": "f15260:c0:m5"}
{"signature": "def verify_server(self, server_name, srv_type = '<STR_LIT>'):", "body": "server_jid = JID(server_name)<EOL>if \"<STR_LIT>\" not in self.alt_names and \"<STR_LIT>\" not in self.alt_namesand \"<STR_LIT>\" not in self.alt_names:<EOL><INDENT>return self.verify_jid_against_common_name(server_jid)<EOL><DEDENT>names = [name for name in self.alt_names.get(\"<STR_LIT>\", [])<EOL>if not name.startswith(\"<STR_LIT>\")]<EOL>names += self.alt_names.get(\"<STR_LIT>\", [])<EOL>for name in names:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(server_jid,<EOL>name))<EOL>try:<EOL><INDENT>jid = JID(name)<EOL><DEDENT>except ValueError:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(name))<EOL>continue<EOL><DEDENT>if jid == server_jid:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return True<EOL><DEDENT><DEDENT>if srv_type and self.verify_jid_against_srv_name(server_jid, srv_type):<EOL><INDENT>return True<EOL><DEDENT>wildcards = [name[<NUM_LIT:2>:] for name in self.alt_names.get(\"<STR_LIT>\", [])<EOL>if name.startswith(\"<STR_LIT>\")]<EOL>if not wildcards or not \"<STR_LIT:.>\" in server_jid.domain:<EOL><INDENT>return False<EOL><DEDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(server_jid, wildcards))<EOL>server_domain = JID(domain = server_jid.domain.split(\"<STR_LIT:.>\", <NUM_LIT:1>)[<NUM_LIT:1>])<EOL>for domain in wildcards:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(server_domain,<EOL>domain))<EOL>try:<EOL><INDENT>jid = JID(domain)<EOL><DEDENT>except ValueError:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(name))<EOL>continue<EOL><DEDENT>if jid == server_domain:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Verify certificate for a server.\n\n        :Parameters:\n            - `server_name`: name of the server presenting the cerificate\n            - `srv_type`: service type requested, as used in the SRV record\n        :Types:\n            - `server_name`: `unicode` or `JID`\n            - `srv_type`: `unicode`\n\n        :Return: `True` if the certificate is valid for given name, `False`\n        otherwise.", "id": "f15260:c0:m3"}
{"signature": "def _decode_validity(self, validity):", "body": "not_after = validity.getComponentByName('<STR_LIT>')<EOL>not_after = str(not_after.getComponent())<EOL>if isinstance(not_after, GeneralizedTime):<EOL><INDENT>self.not_after = datetime.strptime(not_after, \"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self.not_after = datetime.strptime(not_after, \"<STR_LIT>\")<EOL><DEDENT>self.alt_names = defaultdict(list)<EOL>", "docstring": "Load data from a ASN.1 validity value.", "id": "f15260:c2:m3"}
{"signature": "def make_deny_response(self):", "body": "if self.stanza_type not in (\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>stanza = Presence(stanza_type = DENY_RESPONSES[self.stanza_type],<EOL>from_jid = self.to_jid, to_jid = self.from_jid,<EOL>stanza_id = self.stanza_id)<EOL>return stanza<EOL>", "docstring": "Create \"deny\" response for the \"subscribe\" / \"subscribed\" /\n        \"unsubscribe\" / \"unsubscribed\" presence stanza.\n\n        :return: new presence stanza.\n        :returntype: `Presence`", "id": "f15261:c0:m11"}
{"signature": "@property<EOL><INDENT>def show(self): <DEDENT>", "body": "return self._show<EOL>", "docstring": "Presence status type.\n\n        :returntype: `unicode`", "id": "f15261:c0:m4"}
{"signature": "def _make_prefix(self, declared_prefixes):", "body": "used_prefixes = set(self._prefixes.values())<EOL>used_prefixes |= set(declared_prefixes.values())<EOL>while True:<EOL><INDENT>prefix = \"<STR_LIT>\".format(self._next_id)<EOL>self._next_id += <NUM_LIT:1><EOL>if prefix not in used_prefixes:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return prefix<EOL>", "docstring": "Make up a new namespace prefix, which won't conflict\n        with `_prefixes` and prefixes declared in the current scope.\n\n        :Parameters:\n            - `declared_prefixes`: namespace to prefix mapping for the current\n              scope\n        :Types:\n            - `declared_prefixes`: `unicode` to `unicode` dictionary\n\n        :Returns: a new prefix\n        :Returntype: `unicode`", "id": "f15262:c0:m5"}
{"signature": "def _make_prefixed(self, name, is_element, declared_prefixes, declarations):", "body": "namespace, name = self._split_qname(name, is_element)<EOL>if namespace is None:<EOL><INDENT>prefix = None<EOL><DEDENT>elif namespace in declared_prefixes:<EOL><INDENT>prefix = declared_prefixes[namespace]<EOL><DEDENT>elif namespace in self._prefixes:<EOL><INDENT>prefix = self._prefixes[namespace]<EOL>declarations[namespace] = prefix<EOL>declared_prefixes[namespace] = prefix<EOL><DEDENT>else:<EOL><INDENT>if is_element:<EOL><INDENT>prefix = None<EOL><DEDENT>else:<EOL><INDENT>prefix = self._make_prefix(declared_prefixes)<EOL><DEDENT>declarations[namespace] = prefix<EOL>declared_prefixes[namespace] = prefix<EOL><DEDENT>if prefix:<EOL><INDENT>return prefix + \"<STR_LIT::>\" + name<EOL><DEDENT>else:<EOL><INDENT>return name<EOL><DEDENT>", "docstring": "Return namespace-prefixed tag or attribute name.\n\n        Add appropriate declaration to `declarations` when neccessary.\n\n        If no prefix for an element namespace is defined, make the elements\n        namespace default (no prefix). For attributes, make up a prefix in such\n        case.\n\n        :Parameters:\n            - `name`: QName ('{namespace-uri}local-name')\n              to convert\n            - `is_element`: `True` for element, `False` for an attribute\n            - `declared_prefixes`: mapping of prefixes already declared\n              at this scope\n            - `declarations`: XMLNS declarations on the current element.\n        :Types:\n            - `name`: `unicode`\n            - `is_element`: `bool`\n            - `declared_prefixes`: `unicode` to `unicode` dictionary\n            - `declarations`: `unicode` to `unicode` dictionary\n\n        :Returntype: `unicode`", "id": "f15262:c0:m6"}
{"signature": "def emit_stanza(self, element):", "body": "if not self._head_emitted:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>string = self._emit_element(element, level = <NUM_LIT:1>,<EOL>declared_prefixes = self._root_prefixes)<EOL>return remove_evil_characters(string)<EOL>", "docstring": "Serialize a stanza.\n\n        Must be called after `emit_head`.\n\n        :Parameters:\n            - `element`: the element to serialize\n        :Types:\n            - `element`: :etree:`ElementTree.Element`\n\n        :Return: serialized element\n        :Returntype: `unicode`", "id": "f15262:c0:m9"}
{"signature": "def _split_qname(self, name, is_element):", "body": "if name.startswith(\"<STR_LIT:{>\"):<EOL><INDENT>namespace, name = name[<NUM_LIT:1>:].split(\"<STR_LIT:}>\", <NUM_LIT:1>)<EOL>if namespace in STANZA_NAMESPACES:<EOL><INDENT>namespace = self.stanza_namespace<EOL><DEDENT><DEDENT>elif is_element:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(name))<EOL><DEDENT>else:<EOL><INDENT>namespace = None<EOL><DEDENT>return namespace, name<EOL>", "docstring": "Split an element of attribute qname into namespace and local\n        name.\n\n        :Parameters:\n            - `name`: element or attribute QName\n            - `is_element`: `True` for an element, `False` for an attribute\n        :Types:\n            - `name`: `unicode`\n            - `is_element`: `bool`\n\n        :Return: namespace URI, local name\n        :returntype: `unicode`, `unicode`", "id": "f15262:c0:m4"}
{"signature": "def serialize(element):", "body": "if getattr(_THREAD, \"<STR_LIT>\", None) is None:<EOL><INDENT>_THREAD.serializer = XMPPSerializer(\"<STR_LIT>\")<EOL>_THREAD.serializer.emit_head(None, None)<EOL><DEDENT>return _THREAD.serializer.emit_stanza(element)<EOL>", "docstring": "Serialize an XMPP element.\n\n    Utility function for debugging or logging.\n\n        :Parameters:\n            - `element`: the element to serialize\n        :Types:\n            - `element`: :etree:`ElementTree.Element`\n\n        :Return: serialized element\n        :Returntype: `unicode`", "id": "f15262:m1"}
{"signature": "def __init__(self, stanza_namespace, extra_prefixes = None):", "body": "self.stanza_namespace = stanza_namespace<EOL>self._prefixes = {}<EOL>if extra_prefixes:<EOL><INDENT>self._prefixes.update(extra_prefixes)<EOL><DEDENT>self._root_prefixes = None<EOL>self._head_emitted = False<EOL>self._next_id = <NUM_LIT:1><EOL>", "docstring": ":Parameters:\n    - `stanza_namespace`: the default namespace used for XMPP stanzas.\n      E.g. 'jabber:client' for c2s connections.\n    - `extra_prefixes`: mapping of namespaces to prefixes (not the\n      other way) to be used on the stream. These prefixes will be\n      declared on the root element and used in all descendants. That\n      may be used to optimize the stream for size.\n:Types:\n    - `stanza_namespace`: `unicode`\n    - `extra_prefixes`: `unicode` to `unicode` mapping.", "id": "f15262:c0:m0"}
{"signature": "def remove_evil_characters(data):", "body": "return EVIL_CHARACTERS_RE.sub(\"<STR_LIT>\", data)<EOL>", "docstring": "Remove control characters (not allowed in XML) from a string.", "id": "f15262:m0"}
{"signature": "def stop(self):", "body": "self._quit = True<EOL>", "docstring": "Request the thread to stop.", "id": "f15264:c4:m3"}
{"signature": "def stop(self, join = False, timeout = None):", "body": "logger.debug(\"<STR_LIT>\")<EOL>for handler in self.io_handlers:<EOL><INDENT>handler.close()<EOL><DEDENT>if self.event_thread and self.event_thread.is_alive():<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self.event_queue.put(QUIT)<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>threads = self.io_threads + self.timeout_threads<EOL>for thread in threads:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(thread))<EOL>thread.stop()<EOL><DEDENT>if not join:<EOL><INDENT>return<EOL><DEDENT>if self.event_thread:<EOL><INDENT>threads.append(self.event_thread)<EOL><DEDENT>if timeout is None:<EOL><INDENT>for thread in threads:<EOL><INDENT>thread.join()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>timeout1 = (timeout * <NUM_LIT>) / len(threads)<EOL>threads_left = []<EOL>for thread in threads:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(thread))<EOL>thread.join(timeout1)<EOL>if thread.is_alive():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(thread))<EOL>threads_left.append(thread)<EOL><DEDENT><DEDENT>if threads_left:<EOL><INDENT>timeout2 = (timeout * <NUM_LIT>) / len(threads_left)<EOL>for thread in threads_left:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(thread))<EOL>thread.join(timeout2)<EOL><DEDENT><DEDENT><DEDENT>self.io_threads = []<EOL>self.event_thread = None<EOL>", "docstring": "Stop the threads.\n\n        :Parameters:\n            - `join`: join the threads (wait until they exit)\n            - `timeout`: maximum time (in seconds) to wait when `join` is\n              `True`).  No limit when `timeout` is `None`.", "id": "f15264:c5:m10"}
{"signature": "def run(self):", "body": "raise NotImplementedError<EOL>", "docstring": "The thread function.", "id": "f15264:c0:m6"}
{"signature": "def _run(self):", "body": "logger.debug(\"<STR_LIT>\".format(self.name))<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>self.run()<EOL><DEDENT>except Exception: <EOL><INDENT>self.exc_info = sys.exc_info()<EOL>logger.debug(\"<STR_LIT>\"<EOL>.format(self.name), exc_info = self.exc_info)<EOL>if self.exc_queue:<EOL><INDENT>self.exc_queue.put( (self, self.exc_info) )<EOL>continue<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(self.name))<EOL>return<EOL><DEDENT><DEDENT>except:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(self.name))<EOL>return<EOL><DEDENT>break<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(self.name))<EOL>", "docstring": "The thread function. Calls `self.run()` and if it raises\n        an exception, stores it in self.exc_info", "id": "f15264:c4:m5"}
{"signature": "def join(self, timeout=None):", "body": "return self.thread.join(timeout)<EOL>", "docstring": "Join the thread (wait until it stops).", "id": "f15264:c4:m4"}
{"signature": "def _remove_timeout_handler(self, handler):", "body": "if handler not in self.timeout_handlers:<EOL><INDENT>return<EOL><DEDENT>self.timeout_handlers.remove(handler)<EOL>for thread in self.timeout_threads:<EOL><INDENT>if thread.method.__self__ is handler:<EOL><INDENT>thread.stop()<EOL><DEDENT><DEDENT>", "docstring": "Remove a TimeoutHandler from the pool.", "id": "f15264:c5:m8"}
{"signature": "def is_alive(self):", "body": "return self.thread.is_alive()<EOL>", "docstring": "Check if the thread is alive.", "id": "f15264:c0:m2"}
{"signature": "def start(self, daemon = False):", "body": "self.daemon = daemon<EOL>self.io_threads = []<EOL>self.event_thread = EventDispatcherThread(self.event_dispatcher,<EOL>daemon = daemon, exc_queue = self.exc_queue)<EOL>self.event_thread.start()<EOL>for handler in self.io_handlers:<EOL><INDENT>self._run_io_threads(handler)<EOL><DEDENT>for handler in self.timeout_handlers:<EOL><INDENT>self._run_timeout_threads(handler)<EOL><DEDENT>", "docstring": "Start the threads.", "id": "f15264:c5:m9"}
{"signature": "def _run_io_threads(self, handler):", "body": "reader = ReadingThread(self.settings, handler, daemon = self.daemon,<EOL>exc_queue = self.exc_queue)<EOL>writter = WrittingThread(self.settings, handler, daemon = self.daemon,<EOL>exc_queue = self.exc_queue)<EOL>self.io_threads += [reader, writter]<EOL>reader.start()<EOL>writter.start()<EOL>", "docstring": "Start threads for an IOHandler.", "id": "f15264:c5:m4"}
{"signature": "def run(self):", "body": "<EOL>interval = self.settings[\"<STR_LIT>\"]<EOL>prepared = False<EOL>timeout = <NUM_LIT:0.1><EOL>while not self._quit:<EOL><INDENT>if not prepared:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(<EOL>self.name, self.io_handler))<EOL>ret = self.io_handler.prepare()<EOL>logger.debug(\"<STR_LIT>\".format(self.name,<EOL>ret))<EOL>if isinstance(ret, HandlerReady):<EOL><INDENT>prepared = True<EOL><DEDENT>elif isinstance(ret, PrepareAgain):<EOL><INDENT>if ret.timeout is not None:<EOL><INDENT>timeout = ret.timeout<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if self.io_handler.is_readable():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(self.name))<EOL>fileno = self.io_handler.fileno()<EOL>if fileno is not None:<EOL><INDENT>readable = wait_for_read(fileno, interval)<EOL>if readable:<EOL><INDENT>self.io_handler.handle_read()<EOL><DEDENT><DEDENT><DEDENT>elif not prepared:<EOL><INDENT>if timeout:<EOL><INDENT>time.sleep(timeout)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(self.name))<EOL>if not self.io_handler.wait_for_readability():<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "The thread function.\n\n        First, call the handler's 'prepare' method until it returns\n        `HandlerReady` then loop waiting for the socket input and calling\n        'handle_read' on the handler.", "id": "f15264:c1:m1"}
{"signature": "def _remove_io_handler(self, handler):", "body": "if handler not in self.io_handlers:<EOL><INDENT>return<EOL><DEDENT>self.io_handlers.remove(handler)<EOL>for thread in self.io_threads:<EOL><INDENT>if thread.io_handler is handler:<EOL><INDENT>thread.stop()<EOL><DEDENT><DEDENT>", "docstring": "Remove an IOHandler from the pool.", "id": "f15264:c5:m5"}
{"signature": "def _add_io_handler(self, handler):", "body": "self.io_handlers.append(handler)<EOL>if self.event_thread is None:<EOL><INDENT>return<EOL><DEDENT>", "docstring": "Add an IOHandler to the pool.", "id": "f15264:c5:m3"}
{"signature": "def _run_timeout_threads(self, handler):", "body": "<EOL>for dummy, method in inspect.getmembers(handler, callable):<EOL><INDENT>if not hasattr(method, \"<STR_LIT>\"):<EOL><INDENT>continue<EOL><DEDENT>thread = TimeoutThread(method, daemon = self.daemon,<EOL>exc_queue = self.exc_queue)<EOL>self.timeout_threads.append(thread)<EOL>thread.start()<EOL><DEDENT>", "docstring": "Start threads for a TimeoutHandler.", "id": "f15264:c5:m7"}
{"signature": "def loop_iteration(self, timeout = <NUM_LIT:0.1>):", "body": "try:<EOL><INDENT>exc_info = self.exc_queue.get(True, timeout)[<NUM_LIT:1>]<EOL><DEDENT>except queue.Empty:<EOL><INDENT>return<EOL><DEDENT>exc_type, exc_value, ext_stack = exc_info<EOL>raise exc_type(exc_value).with_traceback(ext_stack)<EOL>", "docstring": "Wait up to `timeout` seconds, raise any exception from the\n        threads.", "id": "f15264:c5:m15"}
{"signature": "def _configure_io_handler(self, handler):", "body": "if self.check_events():<EOL><INDENT>return<EOL><DEDENT>if handler in self._unprepared_handlers:<EOL><INDENT>old_fileno = self._unprepared_handlers[handler]<EOL>prepared = self._prepare_io_handler(handler)<EOL><DEDENT>else:<EOL><INDENT>old_fileno = None<EOL>prepared = True<EOL><DEDENT>fileno = handler.fileno()<EOL>if old_fileno is not None and fileno != old_fileno:<EOL><INDENT>del self._handlers[old_fileno]<EOL>try:<EOL><INDENT>self.poll.unregister(old_fileno)<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if not prepared:<EOL><INDENT>self._unprepared_handlers[handler] = fileno<EOL><DEDENT>if not fileno:<EOL><INDENT>return<EOL><DEDENT>self._handlers[fileno] = handler<EOL>events = <NUM_LIT:0><EOL>if handler.is_readable():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>events |= select.POLLIN<EOL><DEDENT>if handler.is_writable():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>events |= select.POLLOUT<EOL><DEDENT>if events:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(handler, fileno, events))<EOL>self.poll.register(fileno, events)<EOL><DEDENT>", "docstring": "Register an io-handler at the polling object.", "id": "f15265:c0:m2"}
{"signature": "def _add_io_handler(self, handler):", "body": "self._unprepared_handlers[handler] = None<EOL>self._configure_io_handler(handler)<EOL>", "docstring": "Add an I/O handler to the loop.", "id": "f15265:c0:m1"}
{"signature": "def _prepare_io_handler(self, handler):", "body": "logger.debug(\"<STR_LIT>\".format(handler))<EOL>ret = handler.prepare()<EOL>logger.debug(\"<STR_LIT>\".format(ret))<EOL>if isinstance(ret, HandlerReady):<EOL><INDENT>del self._unprepared_handlers[handler]<EOL>prepared = True<EOL><DEDENT>elif isinstance(ret, PrepareAgain):<EOL><INDENT>if ret.timeout is not None:<EOL><INDENT>if self._timeout is not None:<EOL><INDENT>self._timeout = min(self._timeout, ret.timeout)<EOL><DEDENT>else:<EOL><INDENT>self._timeout = ret.timeout<EOL><DEDENT><DEDENT>prepared = False<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>return prepared<EOL>", "docstring": "Call the `interfaces.IOHandler.prepare` method and\n        remove the handler from unprepared handler list when done.", "id": "f15265:c0:m3"}
{"signature": "def timeout_handler(interval, recurring = None):", "body": "def decorator(func):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>func._pyxmpp_timeout = interval<EOL>func._pyxmpp_recurring = recurring<EOL>return func<EOL><DEDENT>return decorator<EOL>", "docstring": "Method decorator generator for decorating event handlers.\n\n    To be used on `TimeoutHandler` subclass methods only.\n\n    :Parameters:\n        - `interval`: interval (in seconds) before the method will be called.\n        - `recurring`: When `True`, the handler will be called each `interval`\n          seconds, when `False` it will be called only once. If `True`,\n          then the handler should return the next interval or `None` if it\n          should not be called again.\n    :Types:\n        - `interval`: `float`\n        - `recurring`: `bool`", "id": "f15266:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def remove_handler(self, handler):<DEDENT>", "body": "pass<EOL>", "docstring": "Add a new handler to the main loop.\n\n        Do nothing if the handler is not registered at the main loop.\n\n        :Parameters:\n            - `handler`: the handler object to add\n        :Types:\n            - `handler`: `IOHandler` or `EventHandler` or `TimeoutHandler`", "id": "f15266:c8:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def loop_iteration(self, timeout = <NUM_LIT:1>):<DEDENT>", "body": "pass<EOL>", "docstring": "Single loop iteration.\n\n        :Parameters:\n            - `timeout`: maximum time (in seconds) to block for\n        :Types:\n            - `timeout`: `float`", "id": "f15266:c8:m7"}
{"signature": "@abstractmethod<EOL><INDENT>def is_writable(self):<DEDENT>", "body": "return False<EOL>", "docstring": ":Return: `True` when the I/O channel can be written to", "id": "f15266:c3:m3"}
{"signature": "@abstractmethod<EOL><INDENT>def fileno(self):<DEDENT>", "body": "return None<EOL>", "docstring": "Return file descriptor to poll or select.", "id": "f15266:c3:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def handle_write(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Handle the 'channel writable' state. E.g. send buffered data via a\nsocket.", "id": "f15266:c3:m6"}
{"signature": "@abstractproperty<EOL><INDENT>def started(self):<DEDENT>", "body": "return False<EOL>", "docstring": "`True` then the loop has been started.", "id": "f15266:c8:m4"}
{"signature": "@abstractproperty<EOL><INDENT>def finished(self):<DEDENT>", "body": "return False<EOL>", "docstring": "`True` then the loop has been finished or is about to finish (the\n        final iteration in progress).", "id": "f15266:c8:m5"}
{"signature": "@abstractmethod<EOL><INDENT>def close(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Close the channell immediately, so it won't expect more events.", "id": "f15266:c3:m11"}
{"signature": "@abstractmethod<EOL><INDENT>def is_readable(self):<DEDENT>", "body": "return False<EOL>", "docstring": ":Return: `True` when the I/O channel can be read", "id": "f15266:c3:m1"}
{"signature": "def _remove_timeout_handler(self, handler):", "body": "self._timeout_handlers = [(t, h) for (t, h)<EOL>in self._timeout_handlers<EOL>if h.im_self != handler]<EOL>", "docstring": "Remove `TimeoutHandler` from the main loop.", "id": "f15267:c0:m12"}
{"signature": "def _call_timeout_handlers(self):", "body": "sources_handled = <NUM_LIT:0><EOL>now = time.time()<EOL>schedule = None<EOL>while self._timeout_handlers:<EOL><INDENT>schedule, handler = self._timeout_handlers[<NUM_LIT:0>]<EOL>if schedule <= now:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(handler))<EOL>self._timeout_handlers = self._timeout_handlers[<NUM_LIT:1>:]<EOL>result = handler()<EOL>logger.debug(\"<STR_LIT>\".format(result))<EOL>rec = handler._pyxmpp_recurring<EOL>if rec:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(handler._pyxmpp_timeout))<EOL>self._timeout_handlers.append(<EOL>(now + handler._pyxmpp_timeout, handler))<EOL>self._timeout_handlers.sort(key = lambda x: x[<NUM_LIT:0>])<EOL><DEDENT>elif rec is None and result is not None:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(result))<EOL>self._timeout_handlers.append((now + result, handler))<EOL>self._timeout_handlers.sort(key = lambda x: x[<NUM_LIT:0>])<EOL><DEDENT>sources_handled += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT>if self.check_events():<EOL><INDENT>return <NUM_LIT:0>, sources_handled<EOL><DEDENT><DEDENT>if self._timeout_handlers and schedule:<EOL><INDENT>timeout = schedule - now<EOL><DEDENT>else:<EOL><INDENT>timeout = None<EOL><DEDENT>return timeout, sources_handled<EOL>", "docstring": "Call the timeout handlers due.\n\n        :Return: (next_event_timeout, sources_handled) tuple.\n            next_event_timeout is number of seconds until the next timeout\n            event, sources_handled is number of handlers called.", "id": "f15267:c0:m13"}
{"signature": "def _prepare_handlers(self):", "body": "timeout = None<EOL>readable = []<EOL>writable = []<EOL>for handler in self._handlers:<EOL><INDENT>if handler not in self._prepared:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>ret = handler.prepare()<EOL>logger.debug(\"<STR_LIT>\".format(ret))<EOL>if isinstance(ret, HandlerReady):<EOL><INDENT>self._prepared.add(handler)<EOL><DEDENT>elif isinstance(ret, PrepareAgain):<EOL><INDENT>if ret.timeout is not None:<EOL><INDENT>if timeout is None:<EOL><INDENT>timeout = ret.timeout<EOL><DEDENT>else:<EOL><INDENT>timeout = min(timeout, ret.timeout)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if not handler.fileno():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>continue<EOL><DEDENT>if handler.is_readable():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>readable.append(handler)<EOL><DEDENT>if handler.is_writable():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>writable.append(handler)<EOL><DEDENT><DEDENT>return readable, writable, timeout<EOL>", "docstring": "Prepare the I/O handlers.\n\n        :Return: (readable, writable, timeout) tuple. 'readable' is the list\n            of readable handlers, 'writable' - the list of writable handlers,\n            'timeout' the suggested maximum timeout for this loop iteration or\n            `None`", "id": "f15268:c0:m4"}
{"signature": "def _configure_io_handler(self, handler):", "body": "if self.check_events():<EOL><INDENT>return<EOL><DEDENT>if handler in self._unprepared_handlers:<EOL><INDENT>old_fileno = self._unprepared_handlers[handler]<EOL>prepared = self._prepare_io_handler(handler)<EOL><DEDENT>else:<EOL><INDENT>old_fileno = None<EOL>prepared = True<EOL><DEDENT>fileno = handler.fileno()<EOL>if old_fileno is not None and fileno != old_fileno:<EOL><INDENT>tag = self._io_sources.pop(handler, None)<EOL>if tag is not None:<EOL><INDENT>glib.source_remove(tag)<EOL><DEDENT><DEDENT>if not prepared:<EOL><INDENT>self._unprepared_handlers[handler] = fileno<EOL><DEDENT>if fileno is None:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(handler))<EOL>return<EOL><DEDENT>events = <NUM_LIT:0><EOL>if handler.is_readable():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>events |= glib.IO_IN | glib.IO_ERR<EOL><DEDENT>if handler.is_writable():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>events |= glib.IO_OUT | glib.IO_HUP | glib.IO_ERR<EOL><DEDENT>if events:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(handler, fileno, events))<EOL>glib.io_add_watch(fileno, events, self._io_callback, handler)<EOL><DEDENT>", "docstring": "Register an io-handler with the glib main loop.", "id": "f15270:c0:m3"}
{"signature": "def _prepare_pending(self):", "body": "if not self._unprepared_pending:<EOL><INDENT>return<EOL><DEDENT>for handler in list(self._unprepared_pending):<EOL><INDENT>self._configure_io_handler(handler)<EOL><DEDENT>self.check_events()<EOL>", "docstring": "Prepare pending handlers.", "id": "f15270:c0:m6"}
{"signature": "def _loop_timeout_cb(self, main_loop):", "body": "self._anything_done = True<EOL>logger.debug(\"<STR_LIT>\")<EOL>main_loop.quit()<EOL>", "docstring": "Stops the loop after the time specified in the `loop` call.", "id": "f15270:c0:m14"}
{"signature": "def __init__(self, settings = None, handlers = None):", "body": "if settings is None:<EOL><INDENT>settings = XMPPSettings()<EOL><DEDENT>self.queue = settings[\"<STR_LIT>\"]<EOL>self._handler_map = defaultdict(list)<EOL>if handlers:<EOL><INDENT>self.handlers = list(handlers)<EOL><DEDENT>else:<EOL><INDENT>self.handlers = []<EOL><DEDENT>self._update_handlers()<EOL>self.lock = threading.RLock()<EOL>", "docstring": "Initialize the event dispatcher.\n\n        :Parameters:\n            - `settings`: the settings. \"event_queue\" settings provides the\n              event queue object.\n            - `handlers`: the initial list of event handler objects.\n        :Types:\n            - `settings`: `XMPPSettings`\n            - `handlers`: iterable of objects", "id": "f15271:c0:m0"}
{"signature": "def add_handler(self, handler):", "body": "if not isinstance(handler, EventHandler):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>with self.lock:<EOL><INDENT>if handler in self.handlers:<EOL><INDENT>return<EOL><DEDENT>self.handlers.append(handler)<EOL>self._update_handlers()<EOL><DEDENT>", "docstring": "Add a handler object.\n\n        :Parameters:\n            - `handler`: the object providing event handler methods\n        :Types:\n            - `handler`: `EventHandler`", "id": "f15271:c0:m1"}
{"signature": "def loop(self):", "body": "while self.dispatch(True) is not QUIT:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Wait for and dispatch events until `QUIT` is reached.", "id": "f15271:c0:m6"}
{"signature": "def _remove_io_handler(self, handler):", "body": "if handler in self._unprepared_handlers:<EOL><INDENT>old_fileno = self._unprepared_handlers[handler]<EOL>del self._unprepared_handlers[handler]<EOL><DEDENT>else:<EOL><INDENT>old_fileno = handler.fileno()<EOL><DEDENT>if old_fileno is not None:<EOL><INDENT>del self._handlers[old_fileno]<EOL>self.io_loop.remove_handler(handler.fileno())<EOL><DEDENT>", "docstring": "Remove an i/o-handler.", "id": "f15273:c0:m4"}
{"signature": "def _configure_io_handler(self, handler):", "body": "if self.check_events():<EOL><INDENT>return<EOL><DEDENT>if handler in self._unprepared_handlers:<EOL><INDENT>old_fileno = self._unprepared_handlers[handler]<EOL>prepared = self._prepare_io_handler(handler)<EOL><DEDENT>else:<EOL><INDENT>old_fileno = None<EOL>prepared = True<EOL><DEDENT>fileno = handler.fileno()<EOL>if old_fileno is not None and fileno != old_fileno:<EOL><INDENT>del self._handlers[old_fileno]<EOL>self.io_loop.remove_handler(old_fileno)<EOL><DEDENT>if not prepared:<EOL><INDENT>self._unprepared_handlers[handler] = fileno<EOL><DEDENT>if not fileno:<EOL><INDENT>return<EOL><DEDENT>update = fileno in self._handlers<EOL>events = ioloop.IOLoop.NONE<EOL>if handler.is_readable():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>events |= ioloop.IOLoop.READ<EOL><DEDENT>if handler.is_writable():<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(handler))<EOL>events |= ioloop.IOLoop.WRITE<EOL><DEDENT>if self._handlers.get(fileno, None) == events:<EOL><INDENT>return<EOL><DEDENT>self._handlers[fileno] = events<EOL>if events:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(handler, fileno, events))<EOL>if update:<EOL><INDENT>self.io_loop.update_handler(fileno, events)<EOL><DEDENT>else:<EOL><INDENT>self.io_loop.add_handler(<EOL>fileno, partial(self._handle_event, handler), events<EOL>)<EOL><DEDENT><DEDENT>", "docstring": "Register an io-handler at the polling object.", "id": "f15273:c0:m2"}
{"signature": "def stream_element_handler(element_name, usage_restriction = None):", "body": "def decorator(func):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>func._pyxmpp_stream_element_handled = element_name<EOL>func._pyxmpp_usage_restriction = usage_restriction<EOL>return func<EOL><DEDENT>return decorator<EOL>", "docstring": "Method decorator generator for decorating stream element\n    handler methods in `StreamFeatureHandler` subclasses.\n\n    :Parameters:\n        - `element_name`: stream element QName\n        - `usage_restriction`: optional usage restriction: \"initiator\" or\n          \"receiver\"\n    :Types:\n        - `element_name`: `unicode`\n        - `usage_restriction`: `unicode`", "id": "f15275:m8"}
{"signature": "def presence_stanza_handler(stanza_type = None, payload_class = None,<EOL>payload_key = None, usage_restriction = \"<STR_LIT>\"):", "body": "return _stanza_handler(\"<STR_LIT>\", stanza_type, payload_class, payload_key,<EOL>usage_restriction)<EOL>", "docstring": "Method decorator generator for decorating <presence/>\n    stanza handler methods in `XMPPFeatureHandler` subclasses.\n\n    :Parameters:\n        - `payload_class`: payload class expected\n        - `stanza_type`: expected value of the 'type' attribute of the stanza.\n        - `payload_key`: payload class specific filtering key\n        - `usage_restriction`: optional usage restriction: \"pre-auth\" or\n          \"post-auth\"\n    :Types:\n        - `payload_class`: subclass of `StanzaPayload`\n        - `stanza_type`: `unicode`\n        - `usage_restriction`: `unicode`", "id": "f15275:m6"}
{"signature": "def payload_element_name(element_name):", "body": "def decorator(klass):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>from .stanzapayload import STANZA_PAYLOAD_CLASSES<EOL>from .stanzapayload import STANZA_PAYLOAD_ELEMENTS<EOL>if hasattr(klass, \"<STR_LIT>\"):<EOL><INDENT>klass._pyxmpp_payload_element_name.append(element_name)<EOL><DEDENT>else:<EOL><INDENT>klass._pyxmpp_payload_element_name = [element_name]<EOL><DEDENT>if element_name in STANZA_PAYLOAD_CLASSES:<EOL><INDENT>logger = logging.getLogger('<STR_LIT>')<EOL>logger.warning(\"<STR_LIT>\".format(<EOL>element_name))<EOL><DEDENT>STANZA_PAYLOAD_CLASSES[element_name] = klass<EOL>STANZA_PAYLOAD_ELEMENTS[klass].append(element_name)<EOL>return klass<EOL><DEDENT>return decorator<EOL>", "docstring": "Class decorator generator for decorationg\n    `StanzaPayload` subclasses.\n\n    :Parameters:\n        - `element_name`: XML element qname handled by the class\n    :Types:\n        - `element_name`: `unicode`", "id": "f15275:m7"}
{"signature": "@abstractmethod<EOL><INDENT>def send_element(self, element):<DEDENT>", "body": "pass<EOL>", "docstring": "Send an element via the transport.", "id": "f15275:c1:m4"}
{"signature": "def make_stream_features(self, stream, features):", "body": "<EOL>return False<EOL>", "docstring": "Update the features element announced by the stream.\n\n        [receiver only]\n\n        :Parameters:\n            - `stream`: the stream\n            - `features`: the features element about to be sent\n        :Types:\n            - `stream`: `StreamBase`\n            - `features`: :etree:`ElementTree.Element`", "id": "f15275:c7:m1"}
{"signature": "def handle_stream_features(self, stream, features):", "body": "<EOL>return False<EOL>", "docstring": "Handle features announced by the stream peer.\n\n        [initiator only]\n\n        :Parameters:\n            - `stream`: the stream\n            - `features`: the features element just received\n        :Types:\n            - `stream`: `StreamBase`\n            - `features`: :etree:`ElementTree.Element`\n\n        :Return:\n            - `StreamFeatureHandled` instance if a feature was recognized and\n              handled\n            - `StreamFeatureNotHandled` instance if a feature was recognized\n              but not handled\n            - `None` if no feature was recognized", "id": "f15275:c7:m0"}
{"signature": "def _stanza_handler(element_name, stanza_type, payload_class, payload_key,<EOL>usage_restriction):", "body": "def decorator(func):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>func._pyxmpp_stanza_handled = (element_name, stanza_type)<EOL>func._pyxmpp_payload_class_handled = payload_class<EOL>func._pyxmpp_payload_key = payload_key<EOL>func._pyxmpp_usage_restriction = usage_restriction<EOL>return func<EOL><DEDENT>return decorator<EOL>", "docstring": "Method decorator generator for decorating <message/> or <presence/>\n    stanza handler methods in `XMPPFeatureHandler` subclasses.\n\n    :Parameters:\n        - `element_name`: \"message\" or \"presence\"\n        - `stanza_type`: expected value of the 'type' attribute of the stanza\n        - `payload_class`: payload class expected\n        - `payload_key`: payload class specific filtering key\n        - `usage_restriction`: optional usage restriction: \"pre-auth\" or\n          \"post-auth\"\n    :Types:\n        - `element_name`: `unicode`\n        - `stanza_type`: `unicode`\n        - `payload_class`: subclass of `StanzaPayload`\n        - `usage_restriction`: `unicode`", "id": "f15275:m4"}
{"signature": "@abstractmethod<EOL><INDENT>def send_stream_tail(self):<DEDENT>", "body": "pass<EOL>", "docstring": "Send stream tail via the transport.", "id": "f15275:c1:m3"}
{"signature": "@abstractmethod<EOL><INDENT>def uplink_receive(self, stanza):<DEDENT>", "body": "pass<EOL>", "docstring": "Handle stanza received from 'uplink'.", "id": "f15275:c2:m1"}
{"signature": "@property<EOL><INDENT>def auth_properties(self):<DEDENT>", "body": "return {}<EOL>", "docstring": "Channel properties for authentication and authorization.", "id": "f15275:c1:m7"}
{"signature": "@abstractclassmethod<EOL><INDENT>def from_xml(cls, element):<DEDENT>", "body": "<EOL>raise NotImplementedError<EOL>", "docstring": "Create a `cls` instance from an XML element.\n\n        :Parameters:\n            - `element`: the XML element\n        :Types:\n            - `element`: :etree:`ElementTree.Element`", "id": "f15275:c4:m0"}
{"signature": "def _iq_handler(iq_type, payload_class, payload_key, usage_restriction):", "body": "def decorator(func):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>func._pyxmpp_stanza_handled = (\"<STR_LIT>\", iq_type)<EOL>func._pyxmpp_payload_class_handled = payload_class<EOL>func._pyxmpp_payload_key = payload_key<EOL>func._pyxmpp_usage_restriction = usage_restriction<EOL>return func<EOL><DEDENT>return decorator<EOL>", "docstring": "Method decorator generator for decorating <iq type='get'/> stanza\n    handler methods in `XMPPFeatureHandler` subclasses.\n\n    :Parameters:\n        - `payload_class`: payload class expected\n        - `payload_key`: payload class specific filtering key\n        - `usage_restriction`: optional usage restriction: \"pre-auth\" or\n          \"post-auth\"\n    :Types:\n        - `payload_class`: subclass of `StanzaPayload`\n        - `usage_restriction`: `unicode`", "id": "f15275:m1"}
{"signature": "def iq_set_stanza_handler(payload_class, payload_key = None,<EOL>usage_restriction = \"<STR_LIT>\"):", "body": "return _iq_handler(\"<STR_LIT>\", payload_class, payload_key, usage_restriction)<EOL>", "docstring": "Method decorator generator for decorating <iq type='set'/> stanza\n    handler methods in `XMPPFeatureHandler` subclasses.\n\n    :Parameters:\n        - `payload_class`: payload class expected\n        - `payload_key`: payload class specific filtering key\n        - `usage_restriction`: optional usage restriction: \"pre-auth\" or\n          \"post-auth\"\n    :Types:\n        - `payload_class`: subclass of `StanzaPayload`\n        - `usage_restriction`: `unicode`", "id": "f15275:m3"}
{"signature": "@abstractmethod<EOL><INDENT>def send(self, stanza):<DEDENT>", "body": "pass<EOL>", "docstring": "Send stanza through this route.", "id": "f15275:c2:m0"}
{"signature": "def copy(self):", "body": "return deepcopy(self)<EOL>", "docstring": "Return a deep copy of self.", "id": "f15275:c4:m2"}
{"signature": "def roster_client_factory(self):", "body": "return RosterClient(self.settings)<EOL>", "docstring": "Creates the `RosterClient` instance for the `roster_client`\n        attribute.\n\n        Subclasses can provide different behaviour by overriding this. The\n        overriding method can return `None` if no roster client is needed.\n\n        :Return: `RosterClient`", "id": "f15276:c0:m13"}
{"signature": "def disconnect(self):", "body": "with self.lock:<EOL><INDENT>if self.stream:<EOL><INDENT>if self.settings[u\"<STR_LIT>\"]:<EOL><INDENT>self.send(Presence(stanza_type = \"<STR_LIT>\"))<EOL><DEDENT>self.stream.disconnect()<EOL><DEDENT><DEDENT>", "docstring": "Gracefully disconnect from the server.", "id": "f15276:c0:m4"}
{"signature": "def base_handlers_factory(self):", "body": "tls_handler = StreamTLSHandler(self.settings)<EOL>sasl_handler = StreamSASLHandler(self.settings)<EOL>session_handler = SessionHandler()<EOL>binding_handler = ResourceBindingHandler(self.settings)<EOL>return [tls_handler, sasl_handler, binding_handler, session_handler]<EOL>", "docstring": "Default base client handlers factory.\n\n        Subclasses can provide different behaviour by overriding this.\n\n        :Return: list of handlers", "id": "f15276:c0:m12"}
{"signature": "def send(self, stanza):", "body": "if self.uplink:<EOL><INDENT>self.uplink.send(stanza)<EOL><DEDENT>else:<EOL><INDENT>raise NoRouteError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Send a stanza somwhere.\n\n        The default implementation sends it via the `uplink` if it is defined\n        or raises the `NoRouteError`.\n\n        :Parameters:\n            - `stanza`: the stanza to send.\n        :Types:\n            - `stanza`: `pyxmpp.stanza.Stanza`", "id": "f15277:c0:m18"}
{"signature": "def process_presence(self, stanza):", "body": "stanza_type = stanza.stanza_type<EOL>return self.__try_handlers(self._presence_handlers, stanza, stanza_type)<EOL>", "docstring": "Process presence stanza.\n\n        Pass it to a handler of the stanza's type and payload namespace.\n\n        :Parameters:\n            - `stanza`: presence stanza to be handled", "id": "f15277:c0:m7"}
{"signature": "def __try_handlers(self, handler_list, stanza, stanza_type = None):", "body": "<EOL>if stanza_type is None:<EOL><INDENT>stanza_type = stanza.stanza_type<EOL><DEDENT>payload = stanza.get_all_payload()<EOL>classes = [p.__class__ for p in payload]<EOL>keys = [(p.__class__, p.handler_key) for p in payload]<EOL>for handler in handler_list:<EOL><INDENT>type_filter = handler._pyxmpp_stanza_handled[<NUM_LIT:1>]<EOL>class_filter = handler._pyxmpp_payload_class_handled<EOL>extra_filter = handler._pyxmpp_payload_key<EOL>if type_filter != stanza_type:<EOL><INDENT>continue<EOL><DEDENT>if class_filter:<EOL><INDENT>if extra_filter is None and class_filter not in classes:<EOL><INDENT>continue<EOL><DEDENT>if extra_filter and (class_filter, extra_filter) not in keys:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>response = handler(stanza)<EOL>if self._process_handler_result(response):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Search the handler list for handlers matching\n        given stanza type and payload namespace. Run the\n        handlers found ordering them by priority until\n        the first one which returns `True`.\n\n        :Parameters:\n            - `handler_list`: list of available handlers\n            - `stanza`: the stanza to handle\n            - `stanza_type`: stanza type override (value of its \"type\"\n              attribute)\n\n        :return: result of the last handler or `False` if no\n            handler was found.", "id": "f15277:c0:m5"}
{"signature": "def setup_stanza_handlers(self, handler_objects, usage_restriction):", "body": "<EOL>iq_handlers = {\"<STR_LIT>\": {}, \"<STR_LIT>\": {}}<EOL>message_handlers = []<EOL>presence_handlers = []<EOL>for obj in handler_objects:<EOL><INDENT>if not isinstance(obj, XMPPFeatureHandler):<EOL><INDENT>continue<EOL><DEDENT>obj.stanza_processor = self<EOL>for dummy, handler in inspect.getmembers(obj, callable):<EOL><INDENT>if not hasattr(handler, \"<STR_LIT>\"):<EOL><INDENT>continue<EOL><DEDENT>element_name, stanza_type = handler._pyxmpp_stanza_handled<EOL>restr = handler._pyxmpp_usage_restriction<EOL>if restr and restr != usage_restriction:<EOL><INDENT>continue<EOL><DEDENT>if element_name == \"<STR_LIT>\":<EOL><INDENT>payload_class = handler._pyxmpp_payload_class_handled<EOL>payload_key = handler._pyxmpp_payload_key<EOL>if (payload_class, payload_key) in iq_handlers[stanza_type]:<EOL><INDENT>continue<EOL><DEDENT>iq_handlers[stanza_type][(payload_class, payload_key)] =handler<EOL>continue<EOL><DEDENT>elif element_name == \"<STR_LIT:message>\":<EOL><INDENT>handler_list = message_handlers<EOL><DEDENT>elif element_name == \"<STR_LIT>\":<EOL><INDENT>handler_list = presence_handlers<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>handler_list.append(handler)<EOL><DEDENT><DEDENT>with self.lock:<EOL><INDENT>self._iq_handlers = iq_handlers<EOL>self._presence_handlers = presence_handlers<EOL>self._message_handlers = message_handlers<EOL><DEDENT>", "docstring": "Install stanza handlers provided by `handler_objects`", "id": "f15277:c0:m14"}
{"signature": "def fix_out_stanza(self, stanza):", "body": "pass<EOL>", "docstring": "Modify outgoing stanza before sending into the stream.\n\n        This implementation does nothig. It should be overriden in derived\n        classes if needed.", "id": "f15277:c0:m16"}
{"signature": "def process_stanza(self, stanza):", "body": "self.fix_in_stanza(stanza)<EOL>to_jid = stanza.to_jid<EOL>if not self.process_all_stanzas and to_jid and (<EOL>to_jid != self.me and to_jid.bare() != self.me.bare()):<EOL><INDENT>return self.route_stanza(stanza)<EOL><DEDENT>try:<EOL><INDENT>if isinstance(stanza, Iq):<EOL><INDENT>if self.process_iq(stanza):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>elif isinstance(stanza, Message):<EOL><INDENT>if self.process_message(stanza):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>elif isinstance(stanza, Presence):<EOL><INDENT>if self.process_presence(stanza):<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>except ProtocolError as err:<EOL><INDENT>typ = stanza.stanza_type<EOL>if typ != '<STR_LIT:error>' and (typ != '<STR_LIT:result>'<EOL>or stanza.stanza_type != '<STR_LIT>'):<EOL><INDENT>response = stanza.make_error_response(err.xmpp_name)<EOL>self.send(response)<EOL>err.log_reported()<EOL><DEDENT>else:<EOL><INDENT>err.log_ignored()<EOL><DEDENT>return<EOL><DEDENT>logger.debug(\"<STR_LIT>\" % (stanza.stanza_type,<EOL>stanza.serialize()))<EOL>return False<EOL>", "docstring": "Process stanza received from the stream.\n\n        First \"fix\" the stanza with `self.fix_in_stanza()`,\n        then pass it to `self.route_stanza()` if it is not directed\n        to `self.me` and `self.process_all_stanzas` is not True. Otherwise\n        stanza is passwd to `self.process_iq()`, `self.process_message()`\n        or `self.process_presence()` appropriately.\n\n        :Parameters:\n            - `stanza`: the stanza received.\n\n        :returns: `True` when stanza was handled", "id": "f15277:c0:m9"}
{"signature": "def stanza_factory(element, return_path = None, language = None):", "body": "tag = element.tag<EOL>if tag.endswith(\"<STR_LIT>\") or tag == \"<STR_LIT>\":<EOL><INDENT>return Iq(element, return_path = return_path, language = language)<EOL><DEDENT>if tag.endswith(\"<STR_LIT>\") or tag == \"<STR_LIT:message>\":<EOL><INDENT>return Message(element, return_path = return_path, language = language)<EOL><DEDENT>if tag.endswith(\"<STR_LIT>\") or tag == \"<STR_LIT>\":<EOL><INDENT>return Presence(element, return_path = return_path, language = language)<EOL><DEDENT>else:<EOL><INDENT>return Stanza(element, return_path = return_path, language = language)<EOL><DEDENT>", "docstring": "Creates Iq, Message or Presence object for XML stanza `element`\n\n    :Parameters:\n        - `element`: the stanza XML element\n        - `return_path`: object through which responses to this stanza should\n          be sent (will be weakly referenced by the stanza object).\n        - `language`: default language for the stanza\n    :Types:\n        - `element`: :etree:`ElementTree.Element`\n        - `return_path`: `StanzaRoute`\n        - `language`: `unicode`", "id": "f15277:m0"}
{"signature": "def process_message(self, stanza):", "body": "stanza_type = stanza.stanza_type<EOL>if stanza_type is None:<EOL><INDENT>stanza_type = \"<STR_LIT>\"<EOL><DEDENT>if self.__try_handlers(self._message_handlers, stanza,<EOL>stanza_type = stanza_type):<EOL><INDENT>return True<EOL><DEDENT>if stanza_type not in (\"<STR_LIT:error>\", \"<STR_LIT>\"):<EOL><INDENT>return self.__try_handlers(self._message_handlers, stanza,<EOL>stanza_type = \"<STR_LIT>\")<EOL><DEDENT>return False<EOL>", "docstring": "Process message stanza.\n\n        Pass it to a handler of the stanza's type and payload namespace.\n        If no handler for the actual stanza type succeeds then hadlers\n        for type \"normal\" are used.\n\n        :Parameters:\n            - `stanza`: message stanza to be handled", "id": "f15277:c0:m6"}
{"signature": "def __init__(self, default_timeout = <NUM_LIT>):", "body": "self.me = None<EOL>self.peer = None<EOL>self.uplink = None<EOL>self.process_all_stanzas = True<EOL>self._iq_response_handlers = ExpiringDictionary(default_timeout)<EOL>self._iq_handlers = defaultdict(dict)<EOL>self._message_handlers = []<EOL>self._presence_handlers = []<EOL>self.lock = threading.RLock()<EOL>", "docstring": "Initialize a `StanzaProcessor` object.\n\n        :Parameters:\n            - `default_timeout`: default timeout for IQ response handlers", "id": "f15277:c0:m0"}
{"signature": "def process_iq(self, stanza):", "body": "typ = stanza.stanza_type<EOL>if typ in (\"<STR_LIT:result>\", \"<STR_LIT:error>\"):<EOL><INDENT>return self._process_iq_response(stanza)<EOL><DEDENT>if typ not in (\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>raise BadRequestProtocolError(\"<STR_LIT>\")<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(<EOL>stanza, typ))<EOL>payload = stanza.get_payload(None)<EOL>logger.debug(\"<STR_LIT>\".format(payload))<EOL>if not payload:<EOL><INDENT>raise BadRequestProtocolError(\"<STR_LIT>\")<EOL><DEDENT>handler = self._get_iq_handler(typ, payload)<EOL>if not handler:<EOL><INDENT>payload = stanza.get_payload(None, specialize = True)<EOL>logger.debug(\"<STR_LIT>\".format(payload))<EOL>if not isinstance(payload, XMLPayload):<EOL><INDENT>handler = self._get_iq_handler(typ, payload)<EOL><DEDENT><DEDENT>if handler:<EOL><INDENT>response = handler(stanza)<EOL>self._process_handler_result(response)<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>raise ServiceUnavailableProtocolError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Process IQ stanza received.\n\n        :Parameters:\n            - `stanza`: the stanza received\n        :Types:\n            - `stanza`: `Iq`\n\n        If a matching handler is available pass the stanza to it.  Otherwise\n        ignore it if it is \"error\" or \"result\" stanza or return\n        \"feature-not-implemented\" error if it is \"get\" or \"set\".", "id": "f15277:c0:m3"}
{"signature": "def copy(self):", "body": "result = Iq(None, self.from_jid, self.to_jid,<EOL>self.stanza_type, self.stanza_id, self.error,<EOL>self._return_path())<EOL>if self._payload is None:<EOL><INDENT>self.decode_payload()<EOL><DEDENT>for payload in self._payload:<EOL><INDENT>Stanza.add_payload(result, payload)<EOL><DEDENT>return result<EOL>", "docstring": "Create a deep copy of the stanza.\n\n        :returntype: `Iq`", "id": "f15278:c0:m1"}
{"signature": "def make_loop(self, handlers):", "body": "<EOL>from pyxmpp2.mainloop.glib import GLibMainLoop<EOL>self.loop = GLibMainLoop(None, handlers)<EOL>", "docstring": "Return a main loop object for use with this test suite.", "id": "f15283:c8:m0"}
{"signature": "def make_loop(self, handlers):", "body": "<EOL>from pyxmpp2.mainloop.glib import GLibMainLoop<EOL>self.loop = GLibMainLoop(None, handlers)<EOL>", "docstring": "Return a main loop object for use with this test suite.", "id": "f15283:c4:m0"}
{"signature": "def _do_tls_handshake(self):", "body": "logger.debug(\"<STR_LIT>\")<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>self.sock.do_handshake()<EOL>break<EOL><DEDENT>except ssl.SSLError as err:<EOL><INDENT>if err.args[<NUM_LIT:0>] == ssl.SSL_ERROR_WANT_READ:<EOL><INDENT>select.select([self.sock], [], [])<EOL><DEDENT>elif err.args[<NUM_LIT:0>] == ssl.SSL_ERROR_WANT_WRITE:<EOL><INDENT>select.select([], [self.sock], [])<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>self.extra_on_read = None<EOL>self.write_enabled = True<EOL>self.write_cond.notify()<EOL>", "docstring": "Do the TLS handshake. Called from the reader thread\n        after `starttls` is called.", "id": "f15283:c0:m2"}
{"signature": "def tearDown(self):", "body": "if self.server:<EOL><INDENT>self.server.close()<EOL><DEDENT>if self.client:<EOL><INDENT>self.client.close()<EOL><DEDENT>XMPPSettings._defs['<STR_LIT>'].default = None<EOL>", "docstring": "Stop the server and client connections started.", "id": "f15283:c1:m2"}
{"signature": "def read(self):", "body": "with self.lock:<EOL><INDENT>data, self.rdata = self.rdata, \"<STR_LIT>\"<EOL><DEDENT>return data<EOL>", "docstring": "Read data from input buffer (received by the reader thread).", "id": "f15283:c0:m8"}
{"signature": "def make_loop(self, handlers):", "body": "<EOL>self.loop = PollMainLoop(None, handlers)<EOL>", "docstring": "Return a main loop object for use with this test suite.", "id": "f15283:c7:m0"}
{"signature": "def wait_short(self, timeout = <NUM_LIT:0.1>):", "body": "self.loop.loop_iteration(timeout)<EOL>", "docstring": "Run a single main loop iteration.", "id": "f15283:c2:m6"}
{"signature": "def start(self):", "body": "reader_thread = threading.Thread(target = self.reader_run,<EOL>name = \"<STR_LIT>\")<EOL>reader_thread.daemon = True<EOL>writter_thread = threading.Thread(target = self.writter_run,<EOL>name = \"<STR_LIT>\")<EOL>writter_thread.daemon = True<EOL>reader_thread.start()<EOL>writter_thread.start()<EOL>", "docstring": "Start the reader and writter threads.", "id": "f15283:c0:m1"}
{"signature": "def tearDown(self):", "body": "if self.loop:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>self.loop.stop(True, <NUM_LIT:2>)<EOL><DEDENT>except Exception: <EOL><INDENT>logger.exception(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>self.loop.event_dispatcher.flush(False)<EOL><DEDENT>super(ReceiverThreadedTestMixIn, self).tearDown()<EOL>", "docstring": "Tear down the test case object.", "id": "f15283:c9:m2"}
{"signature": "def start_transport(self, handlers):", "body": "self.transport = TCPTransport()<EOL>self.make_loop(handlers + [self.transport])<EOL>", "docstring": "Initialize a transport and a main loop with the provided handlers", "id": "f15283:c2:m1"}
{"signature": "def connect_transport(self):", "body": "addr, port = self.start_server()<EOL>self.transport.connect(addr, port)<EOL>", "docstring": "Start a test server and connect the transport to it.", "id": "f15283:c2:m2"}
{"signature": "def writter_run(self):", "body": "with self.write_cond:<EOL><INDENT>while self.sock is not None:<EOL><INDENT>while self.ready and self.wdata and self.write_enabled:<EOL><INDENT>sent = self.sock.send(self.wdata)<EOL>logger.debug(\"<STR_LIT>\" + repr(self.wdata[:sent]))<EOL>self.wdata = self.wdata[sent:]<EOL>self.write_cond.notify()<EOL><DEDENT>if self._disconnect and not self.wdata:<EOL><INDENT>self.sock.shutdown(socket.SHUT_WR)<EOL>logger.debug(\"<STR_LIT>\")<EOL>break<EOL><DEDENT>self.write_cond.wait()<EOL><DEDENT><DEDENT>", "docstring": "The writter thread function.", "id": "f15283:c0:m4"}
{"signature": "def make_loop(self, handlers):", "body": "self.loop = SelectMainLoop(None, handlers)<EOL>", "docstring": "Create the main loop object.", "id": "f15283:c6:m2"}
{"signature": "@classmethod<EOL><INDENT>def setUpClass(cls):<DEDENT>", "body": "if \"<STR_LIT>\" not in _support.RESOURCES:<EOL><INDENT>raise unittest.SkipTest(\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)<EOL>sock.close()<EOL>cls.can_do_ipv4 = True<EOL><DEDENT>except socket.error as err:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(err))<EOL>pass<EOL><DEDENT>if socket.has_ipv6:<EOL><INDENT>try:<EOL><INDENT>sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)<EOL>sock.close()<EOL>cls.can_do_ipv6 = True<EOL><DEDENT>except socket.error as err:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(err))<EOL>pass<EOL><DEDENT><DEDENT>", "docstring": "Check if loopback networking is enabled and IPv4 and IPv6 sockets\n        available.", "id": "f15283:c1:m0"}
{"signature": "def start_transport(self, handlers):", "body": "super(ReceiverThreadedTestMixIn, self).start_transport(handlers)<EOL>self.loop.start()<EOL>", "docstring": "Create a listening socket for the tested stream,\n        a transport a main loop and create a client connectiong to the\n        socket.", "id": "f15283:c9:m1"}
{"signature": "def make_loop(self, handlers):", "body": "<EOL>self.loop = PollMainLoop(None, handlers)<EOL>", "docstring": "Return a main loop object for use with this test suite.", "id": "f15283:c3:m0"}
{"signature": "def make_loop(self, handlers):", "body": "<EOL>self.loop = ThreadPool(XMPPSettings({\"<STR_LIT>\": <NUM_LIT:0.1>}), handlers)<EOL>", "docstring": "Return a main loop object for use with this test suite.", "id": "f15283:c9:m0"}
{"signature": "def starttls(self, *args, **kwargs):", "body": "kwargs['<STR_LIT>'] = False<EOL>with self.lock:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>while self.wdata:<EOL><INDENT>self.write_cond.wait()<EOL><DEDENT>self.write_enabled = False<EOL>self.write_cond.notify()<EOL>logger.debug(\"<STR_LIT>\")<EOL>self.sock = ssl.wrap_socket(*args, **kwargs)<EOL>self.extra_on_read = self._do_tls_handshake<EOL>self.rdata = b\"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Request switching to TLS.\n\n        First waits untill all currently buffered data is sent.\n\n        :Parameters:\n            - `args`: positional arguments to :std:`ssl.wrap_socket`\n            - `kwargs`: keyword arguments to :std:`ssl.wrap_socket`", "id": "f15283:c0:m3"}
{"signature": "@event_handler()<EOL><INDENT>def handle_event(self, event):<DEDENT>", "body": "self.events_received.append(event)<EOL>return False<EOL>", "docstring": "Handle any event: store it in `events_received`.", "id": "f15283:c10:m1"}
{"signature": "def discover():", "body": "suite = unittest.TestSuite()<EOL>for mod in unittest.defaultTestLoader.discover(\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>suite.addTest(mod)<EOL><DEDENT>return suite<EOL>", "docstring": "Discover all the test suites in pyxmpp2.test.", "id": "f15296:m0"}
{"signature": "@staticmethod<EOL><INDENT>def make_loop(handlers):<DEDENT>", "body": "return SelectMainLoop(None, handlers)<EOL>", "docstring": "Return a main loop object for use with this test suite.", "id": "f15298:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def make_loop(handlers):<DEDENT>", "body": "from pyxmpp2.mainloop.glib import GLibMainLoop<EOL>return GLibMainLoop(None, handlers)<EOL>", "docstring": "Return a main loop object for use with this test suite.", "id": "f15298:c2:m0"}
{"signature": "@staticmethod<EOL><INDENT>def make_loop(handlers):<DEDENT>", "body": "return PollMainLoop(None, handlers)<EOL>", "docstring": "Return a main loop object for use with this test suite.", "id": "f15298:c1:m0"}
{"signature": "def __init__(self, family, address, target):", "body": "self._socket = None<EOL>self._lock = threading.RLock()<EOL>self._target = target<EOL>sock = socket.socket(family, socket.SOCK_STREAM)<EOL>try:<EOL><INDENT>sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, <NUM_LIT:1>)<EOL>sock.bind(address)<EOL><DEDENT>except:<EOL><INDENT>sock.close()<EOL>raise<EOL><DEDENT>self._socket = sock<EOL>", "docstring": "Initialize the `TCPListener` object and create the socket.\n\n        :Parameters:\n            - `family`: address family (:std:`socket.AF_INET` or\n              :std:`socket.AF_INET6`)\n            - `address`: address to listen on (address, port)\n            - `target`: function to call on an accepted connection", "id": "f15303:c0:m0"}
{"signature": "def fileno(self):", "body": "with self._lock:<EOL><INDENT>if self._socket:<EOL><INDENT>return self._socket.fileno()<EOL><DEDENT><DEDENT>", "docstring": "Return file descriptor to poll or select.", "id": "f15303:c0:m4"}
{"signature": "def handle_read(self):", "body": "with self._lock:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>if self._socket is None:<EOL><INDENT>return<EOL><DEDENT>while True:<EOL><INDENT>try:<EOL><INDENT>sock, address = self._socket.accept()<EOL><DEDENT>except socket.error as err:<EOL><INDENT>if err.args[<NUM_LIT:0>] in BLOCKING_ERRORS:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>logger.debug(\"<STR_LIT>\".format(address))<EOL>self._target(sock, address)<EOL><DEDENT><DEDENT>", "docstring": "Accept any incoming connections.", "id": "f15303:c0:m10"}
{"signature": "def payload_factory(element):", "body": "return payload_class_for_element_name(element.tag).from_xml(element)<EOL>", "docstring": "Return a specialized `StanzaPayload` object for given element.", "id": "f15304:m2"}
{"signature": "def payload_class_for_element_name(element_name):", "body": "logger.debug(\"<STR_LIT>\".format(<EOL>element_name))<EOL>logger.debug(\"<STR_LIT>\".format(STANZA_PAYLOAD_CLASSES))<EOL>if element_name in STANZA_PAYLOAD_CLASSES:<EOL><INDENT>return STANZA_PAYLOAD_CLASSES[element_name]<EOL><DEDENT>else:<EOL><INDENT>return XMLPayload<EOL><DEDENT>", "docstring": "Return a payload class for given element name.", "id": "f15304:m0"}
{"signature": "def stream_start(self, element):", "body": "logger.error(\"<STR_LIT>\".format(element))<EOL>", "docstring": "Called when the start tag of root element is encountered\n        in the stream.\n\n        :Parameters:\n            - `element`: the root element\n        :Types:\n            - `element`: :etree:`ElementTree.Element`", "id": "f15305:c0:m0"}
{"signature": "def __init__(self, handler):", "body": "self._handler = handler<EOL>self._head = \"<STR_LIT>\"<EOL>self._tail = \"<STR_LIT>\"<EOL>self._builder = None<EOL>self._level = <NUM_LIT:0><EOL>self._root = None<EOL>", "docstring": "Initialize the SAX handler.\n\n        :Parameters:\n            - `handler`: Object to handle stream start, end and stanzas.\n        :Types:\n            - `handler`: `XMLStreamHandler`", "id": "f15305:c1:m0"}
{"signature": "def __init__(self, handler):", "body": "self.handler = handler<EOL>self.parser = ElementTree.XMLParser(target = ParserTarget(handler))<EOL>self.lock = threading.RLock()<EOL>self.in_use = False<EOL>self._started = False<EOL>", "docstring": "Initialize the reader.\n\n        :Parameters:\n            - `handler`: Object to handle stream start, end and stanzas.\n        :Types:\n            - `handler`: `XMLStreamHandler`", "id": "f15305:c2:m0"}
{"signature": "def prohibit(self, data):", "body": "for char in data:<EOL><INDENT>for lookup in self.prohibited:<EOL><INDENT>if lookup(char):<EOL><INDENT>raise StringprepError(\"<STR_LIT>\"<EOL>.format(char))<EOL><DEDENT><DEDENT><DEDENT>return data<EOL>", "docstring": "Checks for prohibited characters.", "id": "f15306:c0:m4"}
{"signature": "def map(self, data):", "body": "result = []<EOL>for char in data:<EOL><INDENT>ret = None<EOL>for lookup in self.mapping:<EOL><INDENT>ret = lookup(char)<EOL>if ret is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if ret is not None:<EOL><INDENT>result.append(ret)<EOL><DEDENT>else:<EOL><INDENT>result.append(char)<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Mapping part of string preparation.", "id": "f15306:c0:m3"}
{"signature": "def set_stringprep_cache_size(size):", "body": "<EOL>global _stringprep_cache_size<EOL>_stringprep_cache_size = size<EOL>if len(Profile.cache_items) > size:<EOL><INDENT>remove = Profile.cache_items[:-size]<EOL>for profile, key in remove:<EOL><INDENT>try:<EOL><INDENT>del profile.cache[key]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>Profile.cache_items = Profile.cache_items[-size:]<EOL><DEDENT>", "docstring": "Modify stringprep cache size.\n\n    :Parameters:\n        - `size`: new cache size", "id": "f15306:m3"}
{"signature": "def event(self, event):", "body": "logger.debug(\"<STR_LIT>\".format(event))<EOL>if self._stream:<EOL><INDENT>event.stream = self._stream<EOL><DEDENT>self._event_queue.put(event)<EOL>", "docstring": "Pass an event to the target stream or just log it.", "id": "f15308:c5:m37"}
{"signature": "def _set_state(self, state):", "body": "logger.debug(\"<STR_LIT>\".format(state))<EOL>self._state = state<EOL>self._state_cond.notify()<EOL>", "docstring": "Set `_state` and notify any threads waiting for the change.", "id": "f15308:c5:m1"}
{"signature": "def send_element(self, element):", "body": "with self.lock:<EOL><INDENT>if self._eof or self._socket is None or not self._serializer:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(<EOL>element_to_unicode(element)))<EOL>return<EOL><DEDENT>data = self._serializer.emit_stanza(element)<EOL>self._write(data.encode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT>", "docstring": "Send an element via the transport.", "id": "f15308:c5:m16"}
{"signature": "def send_stream_head(self, stanza_namespace, stream_from, stream_to,<EOL>stream_id = None, version = '<STR_LIT:1.0>', language = None):", "body": "<EOL>with self.lock:<EOL><INDENT>self._serializer = XMPPSerializer(stanza_namespace,<EOL>self.settings[\"<STR_LIT>\"])<EOL>head = self._serializer.emit_head(stream_from, stream_to,<EOL>stream_id, version, language)<EOL>self._write(head.encode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT>", "docstring": "Send stream head via the transport.\n\n:Parameters:\n    - `stanza_namespace`: namespace of stream stanzas (e.g.\n      'jabber:client')\n    - `stream_from`: the 'from' attribute of the stream. May be `None`.\n    - `stream_to`: the 'to' attribute of the stream. May be `None`.\n    - `version`: the 'version' of the stream.\n    - `language`: the 'xml:lang' of the stream\n:Types:\n    - `stanza_namespace`: `unicode`\n    - `stream_from`: `unicode`\n    - `stream_to`: `unicode`\n    - `version`: `unicode`\n    - `language`: `unicode`", "id": "f15308:c5:m13"}
{"signature": "def _resolve_hostname(self):", "body": "self._set_state(\"<STR_LIT>\")<EOL>resolver = self.settings[\"<STR_LIT>\"] <EOL>logger.debug(\"<STR_LIT>\".format(self._dst_nameports))<EOL>name, port = self._dst_nameports.pop(<NUM_LIT:0>)<EOL>self._dst_hostname = name<EOL>resolver.resolve_address(name, callback = partial(<EOL>self._got_addresses, name, port),<EOL>allow_cname = self._dst_service is None)<EOL>self.event(ResolvingAddressEvent(name))<EOL>", "docstring": "Start hostname resolution for the next name to try.\n\n        [called with `lock` acquired]", "id": "f15308:c5:m6"}
{"signature": "def handle_read(self):", "body": "with self.lock:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>if self._eof or self._socket is None:<EOL><INDENT>return<EOL><DEDENT>if self._state == \"<STR_LIT>\":<EOL><INDENT>while True:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self._continue_tls_handshake()<EOL>logger.debug(\"<STR_LIT>\".format(self._tls_state))<EOL>if self._tls_state != \"<STR_LIT>\":<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>elif self._tls_state == \"<STR_LIT>\":<EOL><INDENT>while self._socket and not self._eof:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>data = self._socket.read(<NUM_LIT>)<EOL><DEDENT>except ssl.SSLError as err:<EOL><INDENT>if err.args[<NUM_LIT:0>] == ssl.SSL_ERROR_WANT_READ:<EOL><INDENT>break<EOL><DEDENT>elif err.args[<NUM_LIT:0>] == ssl.SSL_ERROR_WANT_WRITE:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>except socket.error as err:<EOL><INDENT>if err.args[<NUM_LIT:0>] == errno.EINTR:<EOL><INDENT>continue<EOL><DEDENT>elif err.args[<NUM_LIT:0>] in BLOCKING_ERRORS:<EOL><INDENT>break<EOL><DEDENT>elif err.args[<NUM_LIT:0>] == errno.ECONNRESET:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>data = None<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>self._feed_reader(data)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>while self._socket and not self._eof:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>try:<EOL><INDENT>data = self._socket.recv(<NUM_LIT>)<EOL><DEDENT>except socket.error as err:<EOL><INDENT>if err.args[<NUM_LIT:0>] == errno.EINTR:<EOL><INDENT>continue<EOL><DEDENT>elif err.args[<NUM_LIT:0>] in BLOCKING_ERRORS:<EOL><INDENT>break<EOL><DEDENT>elif err.args[<NUM_LIT:0>] == errno.ECONNRESET:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>data = None<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>self._feed_reader(data)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Handle the 'channel readable' state. E.g. read from a socket.", "id": "f15308:c5:m28"}
{"signature": "def _connected(self):", "body": "self._auth_properties['<STR_LIT>'] = self._dst_addr[<NUM_LIT:0>]<EOL>if self._dst_service:<EOL><INDENT>self._auth_properties['<STR_LIT>'] = self._dst_name<EOL><DEDENT>if self._dst_hostname is not None:<EOL><INDENT>self._auth_properties['<STR_LIT>'] = self._dst_hostname<EOL><DEDENT>else:<EOL><INDENT>self._auth_properties['<STR_LIT>'] = self._dst_addr[<NUM_LIT:0>]<EOL><DEDENT>self._auth_properties['<STR_LIT>'] = None<EOL>self.event(ConnectedEvent(self._dst_addr))<EOL>self._set_state(\"<STR_LIT>\")<EOL>self._stream.transport_connected()<EOL>", "docstring": "Handle connection success.", "id": "f15308:c5:m9"}
{"signature": "def _write(self, data):", "body": "OUT_LOGGER.debug(\"<STR_LIT>\", data)<EOL>if self._hup or not self._socket:<EOL><INDENT>raise PyXMPPIOError(\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>while data:<EOL><INDENT>try:<EOL><INDENT>sent = self._socket.send(data)<EOL><DEDENT>except ssl.SSLError as err:<EOL><INDENT>if err.args[<NUM_LIT:0>] == ssl.SSL_ERROR_WANT_WRITE:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>except socket.error as err:<EOL><INDENT>if err.args[<NUM_LIT:0>] == errno.EINTR:<EOL><INDENT>continue<EOL><DEDENT>if err.args[<NUM_LIT:0>] in BLOCKING_ERRORS:<EOL><INDENT>wait_for_write(self._socket)<EOL>continue<EOL><DEDENT>raise<EOL><DEDENT>data = data[sent:]<EOL><DEDENT><DEDENT>except (IOError, OSError, socket.error) as err:<EOL><INDENT>raise PyXMPPIOError(\"<STR_LIT>\".format(err))<EOL><DEDENT>", "docstring": "Write raw data to the socket.\n\n        :Parameters:\n            - `data`: data to send\n        :Types:\n            - `data`: `bytes`", "id": "f15308:c5:m11"}
{"signature": "def set_target(self, stream):", "body": "with self.lock:<EOL><INDENT>if self._stream:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self._stream = stream<EOL>self._reader = StreamReader(stream)<EOL><DEDENT>", "docstring": "Make the `stream` the target for this transport instance.\n\n        The 'stream_start', 'stream_end' and 'stream_element' methods\n        of the target will be called when appropriate content is received.\n\n        :Parameters:\n            - `stream`: the stream handler to receive stream content\n              from the transport\n        :Types:\n            - `stream`: `StreamBase`", "id": "f15308:c5:m12"}
{"signature": "def disconnect(self):", "body": "logger.debug(\"<STR_LIT>\")<EOL>with self.lock:<EOL><INDENT>if self._socket is None:<EOL><INDENT>if self._state != \"<STR_LIT>\":<EOL><INDENT>self.event(DisconnectedEvent(self._dst_addr))<EOL>self._set_state(\"<STR_LIT>\")<EOL><DEDENT>return<EOL><DEDENT>if self._hup or not self._serializer:<EOL><INDENT>self._close()<EOL><DEDENT>else:<EOL><INDENT>self.send_stream_tail()<EOL><DEDENT><DEDENT>", "docstring": "Disconnect the stream gracefully.", "id": "f15308:c5:m33"}
{"signature": "def is_writable(self):", "body": "with self.lock:<EOL><INDENT>return self._socket and bool(self._write_queue)<EOL><DEDENT>", "docstring": ":Return: `False` as currently the data is always written synchronously", "id": "f15308:c5:m21"}
{"signature": "def _continue_tls_handshake(self):", "body": "try:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self._socket.do_handshake()<EOL><DEDENT>except ssl.SSLError as err:<EOL><INDENT>if err.args[<NUM_LIT:0>] == ssl.SSL_ERROR_WANT_READ:<EOL><INDENT>self._tls_state = \"<STR_LIT>\"<EOL>logger.debug(\"<STR_LIT>\")<EOL>self._state_cond.notify()<EOL>return<EOL><DEDENT>elif err.args[<NUM_LIT:0>] == ssl.SSL_ERROR_WANT_WRITE:<EOL><INDENT>self._tls_state = \"<STR_LIT>\"<EOL>logger.debug(\"<STR_LIT>\")<EOL>self._write_queue.appendleft(TLSHandshake)<EOL>return<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>self._tls_state = \"<STR_LIT>\"<EOL>self._set_state(\"<STR_LIT>\")<EOL>self._auth_properties['<STR_LIT>'] = \"<STR_LIT>\"<EOL>if \"<STR_LIT>\" in CHANNEL_BINDING_TYPES:<EOL><INDENT>try:<EOL><INDENT>tls_unique = self._socket.get_channel_binding(\"<STR_LIT>\")<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>self._auth_properties['<STR_LIT>'] = {<EOL>\"<STR_LIT>\": tls_unique}<EOL><DEDENT><DEDENT>try:<EOL><INDENT>cipher = self._socket.cipher()<EOL><DEDENT>except AttributeError:<EOL><INDENT>cipher = \"<STR_LIT>\"<EOL><DEDENT>cert = get_certificate_from_ssl_socket(self._socket)<EOL>self.event(TLSConnectedEvent(cipher, cert))<EOL>", "docstring": "Continue a TLS handshake.", "id": "f15308:c5:m27"}
{"signature": "def handle_hup(self):", "body": "with self.lock:<EOL><INDENT>if self._state == '<STR_LIT>' and self._dst_addrs:<EOL><INDENT>self._hup = False<EOL>self._set_state(\"<STR_LIT>\")<EOL>return<EOL><DEDENT><DEDENT>self._hup = True<EOL>", "docstring": "Handle the 'channel hungup' state. The handler should not be writable\nafter this.", "id": "f15308:c5:m29"}
{"signature": "def handle_nval(self):", "body": "if self._socket is None:<EOL><INDENT>return<EOL><DEDENT>self._set_state(\"<STR_LIT>\")<EOL>raise PyXMPPIOError(\"<STR_LIT>\")<EOL>", "docstring": "Handle an error reported.", "id": "f15308:c5:m31"}
{"signature": "def handle_err(self):", "body": "with self.lock:<EOL><INDENT>if self._state == '<STR_LIT>' and self._dst_addrs:<EOL><INDENT>self._hup = False<EOL>self._set_state(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>self._socket.close()<EOL>self._socket = None<EOL>self._set_state(\"<STR_LIT>\")<EOL>self._write_queue.clear()<EOL>self._write_queue_cond.notify()<EOL><DEDENT>raise PyXMPPIOError(\"<STR_LIT>\")<EOL>", "docstring": "Handle an error reported.", "id": "f15308:c5:m30"}
{"signature": "def connect(self, addr, port = None, service = None):", "body": "with self.lock:<EOL><INDENT>self._connect(addr, port, service)<EOL><DEDENT>", "docstring": "Start establishing TCP connection with given address.\n\n        One of: `port` or `service` must be provided and `addr` must be\n        a domain name and not an IP address if `port` is not given.\n\n        When `service` is given try an SRV lookup for that service\n        at domain `addr`. If `service` is not given or `addr` is an IP address,\n        or the SRV lookup fails, connect to `port` at host `addr` directly.\n\n        [initiating entity only]\n\n        :Parameters:\n            - `addr`: peer name or IP address\n            - `port`: port number to connect to\n            - `service`: service name (to be resolved using SRV DNS records)", "id": "f15308:c5:m2"}
{"signature": "def send_stream_tail(self):", "body": "with self.lock:<EOL><INDENT>if not self._socket or self._hup:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>data = self._serializer.emit_tail()<EOL>try:<EOL><INDENT>self._write(data.encode(\"<STR_LIT:utf-8>\"))<EOL><DEDENT>except (IOError, SystemError, socket.error) as err:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(err))<EOL><DEDENT>self._serializer = None<EOL>self._hup = True<EOL>if self._tls_state is None:<EOL><INDENT>try:<EOL><INDENT>self._socket.shutdown(socket.SHUT_WR)<EOL><DEDENT>except socket.error:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>self._set_state(\"<STR_LIT>\")<EOL>self._write_queue.clear()<EOL>self._write_queue_cond.notify()<EOL><DEDENT>", "docstring": "Send stream tail via the transport.", "id": "f15308:c5:m15"}
{"signature": "def starttls(self, **kwargs):", "body": "with self.lock:<EOL><INDENT>self.event(TLSConnectingEvent())<EOL>self._write_queue.append(StartTLS(**kwargs))<EOL>self._write_queue_cond.notify()<EOL><DEDENT>", "docstring": "Request a TLS handshake on the socket ans switch\n        to encrypted output.\n        The handshake will start after any currently buffered data is sent.\n\n        :Parameters:\n            - `kwargs`: arguments for :std:`ssl.wrap_socket`", "id": "f15308:c5:m24"}
{"signature": "def __init__(self, settings = None, sock = None):", "body": "if settings:<EOL><INDENT>self.settings = settings<EOL><DEDENT>else:<EOL><INDENT>self.settings = XMPPSettings()<EOL><DEDENT>self.lock = threading.RLock()<EOL>self._write_queue = deque()<EOL>self._write_queue_cond = threading.Condition(self.lock)<EOL>self._eof = False<EOL>self._hup = False<EOL>self._stream = None<EOL>self._serializer = None<EOL>self._reader = None<EOL>self._dst_name = None<EOL>self._dst_port = None<EOL>self._dst_service = None<EOL>self._dst_nameports = None<EOL>self._dst_hostname = None<EOL>self._dst_addrs = None<EOL>self._tls_state = None<EOL>self._state_cond = threading.Condition(self.lock)<EOL>if sock is None:<EOL><INDENT>self._socket = None<EOL>self._dst_addr = None<EOL>self._family = None<EOL>self._state = None<EOL><DEDENT>else:<EOL><INDENT>self._socket = sock<EOL>self._family = sock.family<EOL>self._dst_addr = sock.getpeername()<EOL>self._state = \"<STR_LIT>\"<EOL>self._socket.setblocking(False)<EOL><DEDENT>self._event_queue = self.settings[\"<STR_LIT>\"]<EOL>self._auth_properties = {}<EOL>", "docstring": "Initialize the `TCPTransport` object.\n\n        :Parameters:\n            - `settings`: XMPP settings to use\n            - `sock`: existing socket, e.g. for accepted incoming connection.", "id": "f15308:c5:m0"}
{"signature": "def __init__(self, jid, stanza_route, handlers, settings = None):", "body": "if handlers is None:<EOL><INDENT>handlers = []<EOL><DEDENT>if settings is None:<EOL><INDENT>settings = XMPPSettings()<EOL><DEDENT>if \"<STR_LIT>\" not in settings:<EOL><INDENT>settings[\"<STR_LIT>\"] = jid.resource<EOL><DEDENT>StreamBase.__init__(self, STANZA_CLIENT_NS, stanza_route,<EOL>handlers, settings)<EOL>self.me = JID(jid.local, jid.domain)<EOL>", "docstring": "Initialize the ClientStream object.\n\n        :Parameters:\n            - `jid`: local JID.\n            - `handlers`: XMPP feature and event handlers\n            - `settings`: PyXMPP settings for the stream\n        :Types:\n            - `jid`: `JID`\n            - `settings`: `XMPPSettings`", "id": "f15309:c0:m0"}
{"signature": "def receive(self, transport, myname = None):", "body": "if myname is None:<EOL><INDENT>myname = JID(self.me.domain)<EOL><DEDENT>return StreamBase.receive(self, transport, myname)<EOL>", "docstring": "Receive an XMPP connection over the `transport`.\n\n        :Parameters:\n            - `transport`: an XMPP transport instance\n            - `myname`: local stream endpoint name (defaults to own jid domain\n              part).", "id": "f15309:c0:m2"}
{"signature": "def initiate(self, transport, to = None):", "body": "if to is None:<EOL><INDENT>to = JID(self.me.domain)<EOL><DEDENT>return StreamBase.initiate(self, transport, to)<EOL>", "docstring": "Initiate an XMPP connection over the `transport`.\n\n        :Parameters:\n            - `transport`: an XMPP transport instance\n            - `to`: peer name (defaults to own jid domain part)", "id": "f15309:c0:m1"}
{"signature": "def __init__(self, element = None, from_jid = None, to_jid = None,<EOL>stanza_type = None, stanza_id = None,<EOL>error = None, error_cond = None, return_path = None,<EOL>language = None,<EOL>subject = None, body = None, thread = None):", "body": "<EOL>self._subject = None<EOL>self._body = None<EOL>self._thread = None<EOL>if element is None:<EOL><INDENT>element = \"<STR_LIT:message>\"<EOL><DEDENT>elif not isinstance(element, ElementClass):<EOL><INDENT>raise TypeError(\"<STR_LIT>\" + repr(element))<EOL><DEDENT>Stanza.__init__(self, element, from_jid = from_jid, to_jid = to_jid,<EOL>stanza_type = stanza_type, stanza_id = stanza_id,<EOL>error = error, error_cond = error_cond,<EOL>return_path = return_path, language = language)<EOL>if self.element_name != \"<STR_LIT:message>\":<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self._subject_tag = self._ns_prefix + \"<STR_LIT>\"<EOL>self._body_tag = self._ns_prefix + \"<STR_LIT:body>\"<EOL>self._thread_tag = self._ns_prefix + \"<STR_LIT>\"<EOL>if self._element is not None:<EOL><INDENT>self._decode_subelements()<EOL><DEDENT>if subject is not None:<EOL><INDENT>self.subject = subject<EOL><DEDENT>if body is not None:<EOL><INDENT>self.body = body<EOL><DEDENT>if thread is not None:<EOL><INDENT>self.thread = thread<EOL><DEDENT>", "docstring": "Initialize a `Message` object.\n\n        :Parameters:\n            - `element`: XML element of this stanza.\n            - `from_jid`: sender JID.\n            - `to_jid`: recipient JID.\n            - `stanza_type`: staza type: one of: \"normal\", \"chat\", \"headline\",\n              \"error\", \"groupchat\"\n            - `stanza_id`: stanza id -- value of stanza's \"id\" attribute. If\n              not given, then unique for the session value is generated.\n            - `error_cond`: error condition name. Ignored if `stanza_type`\n              is not \"error\".\n            - `language`: default language for the stanza content\n            - `subject`: message subject,\n            - `body`: message body.\n            - `thread`: message thread id.\n        :Types:\n            - `element`: :etree:`ElementTree.Element`\n            - `from_jid`: `JID`\n            - `to_jid`: `JID`\n            - `stanza_type`: `unicode`\n            - `stanza_id`: `unicode`\n            - `error`: `pyxmpp.error.StanzaErrorElement`\n            - `error_cond`: `unicode`\n            - `subject`: `unicode`\n            - `body`: `unicode`\n            - `thread`: `unicode`\n            - `language`: `unicode`", "id": "f15310:c0:m0"}
{"signature": "@property<EOL><INDENT>def thread(self): <DEDENT>", "body": "return self._thread<EOL>", "docstring": "Thread id.\n\n        :Returntype: `unicode`", "id": "f15310:c0:m8"}
{"signature": "def make_error_response(self, cond):", "body": "if self.stanza_type == \"<STR_LIT:error>\":<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>msg = Message(stanza_type = \"<STR_LIT:error>\", from_jid = self.to_jid,<EOL>to_jid = self.from_jid, stanza_id = self.stanza_id,<EOL>error_cond = cond,<EOL>subject = self._subject, body = self._body,<EOL>thread = self._thread)<EOL>if self._payload is None:<EOL><INDENT>self.decode_payload()<EOL><DEDENT>for payload in self._payload:<EOL><INDENT>msg.add_payload(payload.copy())<EOL><DEDENT>return msg<EOL>", "docstring": "Create error response for any non-error message stanza.\n\n        :Parameters:\n            - `cond`: error condition name, as defined in XMPP specification.\n\n        :return: new message stanza with the same \"id\" as self, \"from\" and\n            \"to\" attributes swapped, type=\"error\" and containing <error />\n            element plus payload of `self`.\n        :returntype: `Message`", "id": "f15310:c0:m10"}
{"signature": "def as_xml(self):", "body": "result = Stanza.as_xml(self)<EOL>if self._subject:<EOL><INDENT>child = ElementTree.SubElement(result, self._subject_tag)<EOL>child.text = self._subject<EOL><DEDENT>if self._body:<EOL><INDENT>child = ElementTree.SubElement(result, self._body_tag)<EOL>child.text = self._body<EOL><DEDENT>if self._thread:<EOL><INDENT>child = ElementTree.SubElement(result, self._thread_tag)<EOL>child.text = self._thread<EOL><DEDENT>return result<EOL>", "docstring": "Return the XML stanza representation.\n\n        Always return an independent copy of the stanza XML representation,\n        which can be freely modified without affecting the stanza.\n\n        :returntype: :etree:`ElementTree.Element`", "id": "f15310:c0:m2"}
{"signature": "def _decode_subelements(self):", "body": "for child in self._element:<EOL><INDENT>if child.tag == self._subject_tag:<EOL><INDENT>self._subject = child.text<EOL><DEDENT>elif child.tag == self._body_tag:<EOL><INDENT>self._body = child.text<EOL><DEDENT>elif child.tag == self._thread_tag:<EOL><INDENT>self._thread = child.text<EOL><DEDENT><DEDENT>", "docstring": "Decode the stanza subelements.", "id": "f15310:c0:m1"}
{"signature": "def __setitem__(self, key, value):", "body": "self._settings[unicode(key)] = value<EOL>", "docstring": "Set a parameter value.\n\n        :Parameters:\n            - `key`: the parameter name\n            - `value`: the new value\n        :Types:\n            - `key`: `unicode`", "id": "f15311:c1:m5"}
{"signature": "def __getitem__(self, key):", "body": "return self.get(key, required = True)<EOL>", "docstring": "Get a parameter value. Return the default if no value is set\n        and the default is provided by PyXMPP.\n\n        :Parameters:\n            - `key`: the parameter name\n        :Types:\n            - `key`: `unicode`", "id": "f15311:c1:m4"}
{"signature": "def __len__(self):", "body": "return len(self._settings)<EOL>", "docstring": "Number of parameters set.", "id": "f15311:c1:m1"}
{"signature": "def __init__(self, data = None):", "body": "if data is None:<EOL><INDENT>self._settings = {}<EOL><DEDENT>else:<EOL><INDENT>self._settings = dict(data)<EOL><DEDENT>", "docstring": "Create settings, optionally initialized with `data`.\n\n        :Parameters:\n            - `data`: initial data\n        :Types:\n            - `data`: any mapping, including `XMPPSettings`", "id": "f15311:c1:m0"}
{"signature": "@staticmethod<EOL><INDENT>def get_int_range_validator(start, stop):<DEDENT>", "body": "def validate_int_range(value):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>value = int(value)<EOL>if value >= start and value < stop:<EOL><INDENT>return value<EOL><DEDENT>raise ValueError(\"<STR_LIT>\".format(start, stop))<EOL><DEDENT>return validate_int_range<EOL>", "docstring": "Return an integer range validator to be used with `add_setting`.\n\n        :Parameters:\n            - `start`: minimum value for the integer\n            - `stop`: the upper bound (maximum value + 1)\n        :Types:\n            - `start`: `int`\n            - `stop`: `int`\n\n        :return: a validator function", "id": "f15311:c1:m15"}
{"signature": "@classmethod<EOL><INDENT>def add_setting(cls, name, type = unicode, default = None, factory = None,<EOL>cache = False, default_d = None, doc = None,<EOL>cmdline_help = None, validator = None, basic = False):<DEDENT>", "body": "<EOL>setting_def = _SettingDefinition(name, type, default, factory,<EOL>cache, default_d, doc,<EOL>cmdline_help, validator, basic)<EOL>if name not in cls._defs:<EOL><INDENT>cls._defs[name] = setting_def<EOL>return<EOL><DEDENT>duplicate = cls._defs[name]<EOL>if duplicate.type != setting_def.type:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if duplicate.default != setting_def.default:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if duplicate.factory != setting_def.factory:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Add a new setting definition.\n\n        :Parameters:\n            - `name`: setting name\n            - `type`: setting type object or type description\n            - `default`: default value\n            - `factory`: default value factory\n            - `cache`: if `True` the `factory` will be called only once\n              and its value stored as a constant default.\n            - `default_d`: description of the default value\n            - `doc`: setting documentation\n            - `cmdline_help`: command line argument description. When not\n              provided then the setting won't be available as a command-line\n              option\n            - `basic`: when `True` the option is considered a basic option -\n              one of those which should usually stay configurable in\n              an application.\n            - `validator`: function validating command-line option value string\n              and returning proper value for the settings objects. Defaults\n              to `type`.\n        :Types:\n            - `name`: `unicode`\n            - `type`: type or `unicode`\n            - `factory`: a callable\n            - `cache`: `bool`\n            - `default_d`: `unicode`\n            - `doc`: `unicode`\n            - `cmdline_help`: `unicode`\n            - `basic`: `bool`\n            - `validator`: a callable", "id": "f15311:c1:m11"}
{"signature": "def __iter__(self):", "body": "for key in self._settings.iterkeys():<EOL><INDENT>return self[key]<EOL><DEDENT>", "docstring": "Iterate over the parameter names.", "id": "f15311:c1:m2"}
{"signature": "@staticmethod<EOL><INDENT>def validate_string_list(value):<DEDENT>", "body": "try:<EOL><INDENT>if sys.version_info.major < <NUM_LIT:3>:<EOL><INDENT>from locale import getpreferredencoding<EOL>encoding = getpreferredencoding()<EOL>value = value.decode(encoding)<EOL><DEDENT>return [x.strip() for x in value.split(u\"<STR_LIT:U+002C>\")]<EOL><DEDENT>except (AttributeError, TypeError, UnicodeError):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Validator for string lists to be used with `add_setting`.", "id": "f15311:c1:m12"}
{"signature": "def element_to_unicode(element):", "body": "if hasattr(ElementTree, '<STR_LIT>'):<EOL><INDENT>return ElementTree.tounicode(\"<STR_LIT>\")<EOL><DEDENT>elif sys.version_info.major < <NUM_LIT:3>:<EOL><INDENT>return unicode(ElementTree.tostring(element))<EOL><DEDENT>else:<EOL><INDENT>return ElementTree.tostring(element, encoding = \"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Serialize an XML element into a unicode string.\n\n    This should work the same on Python2 and Python3 and with all\n    :etree:`ElementTree` implementations.\n\n    :Parameters:\n        - `element`: the XML element to serialize\n    :Types:\n        - `element`: :etree:`ElementTree.Element`", "id": "f15312:m0"}
{"signature": "def datetime_utc_to_local(utc):", "body": "<EOL>ts = time.time()<EOL>cur = datetime.datetime.fromtimestamp(ts)<EOL>cur_utc = datetime.datetime.utcfromtimestamp(ts)<EOL>offset = cur - cur_utc<EOL>t = utc<EOL>d = datetime.timedelta(hours = <NUM_LIT:2>)<EOL>while d > _MINUTE:<EOL><INDENT>local = t + offset<EOL>tm = local.timetuple()<EOL>tm = tm[<NUM_LIT:0>:<NUM_LIT:8>] + (<NUM_LIT:0>, )<EOL>ts = time.mktime(tm)<EOL>u = datetime.datetime.utcfromtimestamp(ts)<EOL>diff = u - utc<EOL>if diff < _MINUTE and diff > -_MINUTE:<EOL><INDENT>break<EOL><DEDENT>if diff > _NULLDELTA:<EOL><INDENT>offset -= d<EOL><DEDENT>else:<EOL><INDENT>offset += d<EOL><DEDENT>d //= <NUM_LIT:2><EOL><DEDENT>return local<EOL>", "docstring": "An ugly hack to convert naive :std:`datetime.datetime` object containing\nUTC time to a naive :std:`datetime.datetime` object with local time.\nIt seems standard Python 2.3 library doesn't provide any better way to\ndo that.", "id": "f15313:m1"}
{"signature": "@stream_element_handler(FAILURE_TAG, \"<STR_LIT>\")<EOL><INDENT>def _process_sasl_failure(self, stream, element):<DEDENT>", "body": "_unused = stream<EOL>if not self.authenticator:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>logger.debug(\"<STR_LIT>\".format(<EOL>element_to_unicode(element)))<EOL>raise SASLAuthenticationFailed(\"<STR_LIT>\")<EOL>", "docstring": "Process incoming <sasl:failure/> element.\n\n        [initiating entity only]", "id": "f15314:c1:m9"}
{"signature": "def _handle_auth_success(self, stream, success):", "body": "if not self._check_authorization(success.properties, stream):<EOL><INDENT>element = ElementTree.Element(FAILURE_TAG)<EOL>ElementTree.SubElement(element, SASL_QNP + \"<STR_LIT>\")<EOL>return True<EOL><DEDENT>authzid = success.properties.get(\"<STR_LIT>\")<EOL>if authzid:<EOL><INDENT>peer = JID(success.authzid)<EOL><DEDENT>elif \"<STR_LIT:username>\" in success.properties:<EOL><INDENT>peer = JID(success.properties[\"<STR_LIT:username>\"], stream.me.domain)<EOL><DEDENT>else:<EOL><INDENT>peer = None<EOL><DEDENT>stream.set_peer_authenticated(peer, True)<EOL>", "docstring": "Handle successful authentication.\n\n        Send <success/> and mark the stream peer authenticated.\n\n        [receiver only]", "id": "f15314:c1:m4"}
{"signature": "@stream_element_handler(CHALLENGE_TAG, \"<STR_LIT>\")<EOL><INDENT>def _process_sasl_challenge(self, stream, element):<DEDENT>", "body": "if not self.authenticator:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>content = element.text.encode(\"<STR_LIT>\")<EOL>ret = self.authenticator.challenge(a2b_base64(content))<EOL>if isinstance(ret, sasl.Response):<EOL><INDENT>element = ElementTree.Element(RESPONSE_TAG)<EOL>element.text = ret.encode()<EOL><DEDENT>else:<EOL><INDENT>element = ElementTree.Element(ABORT_TAG)<EOL><DEDENT>stream.write_element(element)<EOL>if isinstance(ret, sasl.Failure):<EOL><INDENT>stream.disconnect()<EOL>raise SASLAuthenticationFailed(\"<STR_LIT>\")<EOL><DEDENT>return True<EOL>", "docstring": "Process incoming <sasl:challenge/> element.\n\n        [initiating entity only]", "id": "f15314:c1:m5"}
{"signature": "def _check_authorization(self, properties, stream):", "body": "authzid = properties.get(\"<STR_LIT>\")<EOL>if not authzid:<EOL><INDENT>return True<EOL><DEDENT>try:<EOL><INDENT>jid = JID(authzid)<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>if \"<STR_LIT:username>\" not in properties:<EOL><INDENT>result = False<EOL><DEDENT>elif jid.local != properties[\"<STR_LIT:username>\"]:<EOL><INDENT>result = False<EOL><DEDENT>elif jid.domain != stream.me.domain:<EOL><INDENT>result = False<EOL><DEDENT>elif jid.resource:<EOL><INDENT>result = False<EOL><DEDENT>else:<EOL><INDENT>result = True<EOL><DEDENT>return result<EOL>", "docstring": "Check authorization id and other properties returned by the\n        authentication mechanism.\n\n        [receiving entity only]\n\n        Allow only no authzid or authzid equal to current username@domain\n\n        FIXME: other rules in s2s\n\n        :Parameters:\n            - `properties`: data obtained during authentication\n        :Types:\n            - `properties`: mapping\n\n        :return: `True` if user is authorized to use a provided authzid\n        :returntype: `bool`", "id": "f15314:c1:m7"}
{"signature": "@stream_element_handler(AUTH_TAG, \"<STR_LIT>\")<EOL><INDENT>def process_sasl_auth(self, stream, element):<DEDENT>", "body": "if self.authenticator:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>password_db = self.settings[\"<STR_LIT>\"]<EOL>mechanism = element.get(\"<STR_LIT>\")<EOL>if not mechanism:<EOL><INDENT>stream.send_stream_error(\"<STR_LIT>\")<EOL>raise FatalStreamError(\"<STR_LIT>\")<EOL><DEDENT>stream.auth_method_used = mechanism<EOL>self.authenticator = sasl.server_authenticator_factory(mechanism,<EOL>password_db)<EOL>content = element.text.encode(\"<STR_LIT>\")<EOL>ret = self.authenticator.start(stream.auth_properties,<EOL>a2b_base64(content))<EOL>if isinstance(ret, sasl.Success):<EOL><INDENT>element = ElementTree.Element(SUCCESS_TAG)<EOL>element.text = ret.encode()<EOL><DEDENT>elif isinstance(ret, sasl.Challenge):<EOL><INDENT>element = ElementTree.Element(CHALLENGE_TAG)<EOL>element.text = ret.encode()<EOL><DEDENT>else:<EOL><INDENT>element = ElementTree.Element(FAILURE_TAG)<EOL>ElementTree.SubElement(element, SASL_QNP + ret.reason)<EOL><DEDENT>stream.write_element(element)<EOL>if isinstance(ret, sasl.Success):<EOL><INDENT>self._handle_auth_success(stream, ret)<EOL><DEDENT>elif isinstance(ret, sasl.Failure):<EOL><INDENT>raise SASLAuthenticationFailed(\"<STR_LIT>\"<EOL>.format(ret.reason))<EOL><DEDENT>return True<EOL>", "docstring": "Process incoming <sasl:auth/> element.\n\n        [receiving entity only]", "id": "f15314:c1:m3"}
{"signature": "def serialize(self):", "body": "return serialize(self.as_xml())<EOL>", "docstring": "Serialize the stanza into a Unicode XML string.\n\n        :return: serialized element.\n        :returntype: `unicode`", "id": "f15315:c0:m4"}
{"signature": "def _from_xml(self, element):", "body": "<EOL>if element.tag != self.error_qname:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(<EOL>element, self.error_qname))<EOL><DEDENT>lang = element.get(XML_LANG_QNAME, None)<EOL>if lang:<EOL><INDENT>self.language = lang<EOL><DEDENT>self.condition = None<EOL>for child in element:<EOL><INDENT>if child.tag.startswith(self.cond_qname_prefix):<EOL><INDENT>if self.condition is not None:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>continue<EOL><DEDENT>self.condition = deepcopy(child)<EOL><DEDENT>elif child.tag == self.text_qname:<EOL><INDENT>lang = child.get(XML_LANG_QNAME, None)<EOL>if lang:<EOL><INDENT>self.language = lang<EOL><DEDENT>self.text = child.text.strip()<EOL><DEDENT>else:<EOL><INDENT>bad = False<EOL>for prefix in (STREAM_QNP, STANZA_CLIENT_QNP, STANZA_SERVER_QNP,<EOL>STANZA_ERROR_QNP, STREAM_ERROR_QNP):<EOL><INDENT>if child.tag.startswith(prefix):<EOL><INDENT>logger.warning(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>bad = True<EOL>break<EOL><DEDENT><DEDENT>if not bad:<EOL><INDENT>self.custom_condition.append( deepcopy(child) )<EOL><DEDENT><DEDENT><DEDENT>if self.condition is None:<EOL><INDENT>self.condition = ElementTree.Element(self.cond_qname_prefix<EOL>+ \"<STR_LIT>\")<EOL><DEDENT>if self.condition.tag in OBSOLETE_CONDITIONS:<EOL><INDENT>new_cond_name = OBSOLETE_CONDITIONS[self.condition.tag]<EOL>self.condition = ElementTree.Element(new_cond_name)<EOL><DEDENT>", "docstring": "Initialize an ErrorElement object from an XML element.\n\n        :Parameters:\n            - `element`: XML element to be decoded.\n        :Types:\n            - `element`: :etree:`ElementTree.Element`", "id": "f15315:c0:m1"}
{"signature": "def _from_xml(self, element):", "body": "ErrorElement._from_xml(self, element)<EOL>error_type = element.get(\"<STR_LIT:type>\")<EOL>if error_type:<EOL><INDENT>self.error_type = error_type<EOL><DEDENT>", "docstring": "Initialize an ErrorElement object from an XML element.\n\n        :Parameters:\n            - `element`: XML element to be decoded.\n        :Types:\n            - `element`: :etree:`ElementTree.Element`", "id": "f15315:c2:m1"}
{"signature": "def as_xml(self):", "body": "result = ElementTree.Element(self.error_qname)<EOL>result.append(deepcopy(self.condition))<EOL>if self.text:<EOL><INDENT>text = ElementTree.SubElement(result, self.text_qname)<EOL>if self.language:<EOL><INDENT>text.set(XML_LANG_QNAME, self.language)<EOL><DEDENT>text.text = self.text<EOL><DEDENT>return result<EOL>", "docstring": "Return the XML error representation.\n\n        :returntype: :etree:`ElementTree.Element`", "id": "f15315:c0:m5"}
{"signature": "@property<EOL><INDENT>def condition_name(self):<DEDENT>", "body": "return self.condition.tag.split(\"<STR_LIT:}>\", <NUM_LIT:1>)[<NUM_LIT:1>]<EOL>", "docstring": "Return the condition name (condition element name without the\n        namespace).", "id": "f15315:c0:m2"}
{"signature": "def as_xml(self, stanza_namespace = None): ", "body": "if stanza_namespace:<EOL><INDENT>self.error_qname = \"<STR_LIT>\".format(stanza_namespace)<EOL>self.text_qname = \"<STR_LIT>\".format(stanza_namespace)<EOL><DEDENT>result = ErrorElement.as_xml(self)<EOL>result.set(\"<STR_LIT:type>\", self.error_type)<EOL>return result<EOL>", "docstring": "Return the XML error representation.\n\n        :Parameters:\n            - `stanza_namespace`: namespace URI of the containing stanza\n        :Types:\n            - `stanza_namespace`: `unicode`\n\n        :returntype: :etree:`ElementTree.Element`", "id": "f15315:c2:m3"}
{"signature": "def get_message(self):", "body": "cond = self.condition_name<EOL>if cond in STREAM_ERRORS:<EOL><INDENT>return STREAM_ERRORS[cond][<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get the standard English message for the error.\n\n        :return: the error message.\n        :returntype: `unicode`", "id": "f15315:c1:m1"}
{"signature": "def __init__(self, settings = None):", "body": "if settings is None:<EOL><INDENT>self.settings = XMPPSettings()<EOL><DEDENT>else:<EOL><INDENT>self.settings = settings<EOL><DEDENT>self.stream = None<EOL>self.requested = False<EOL>self.tls_socket = None<EOL>", "docstring": "Initialize the TLS handler.\n\n        :Parameters:\n          - `settings`: settings for StartTLS.\n        :Types:\n          - `settings`: `XMPPSettings`", "id": "f15316:c0:m0"}
{"signature": "def _request_tls(self):", "body": "self.requested = True<EOL>element = ElementTree.Element(STARTTLS_TAG)<EOL>self.stream.write_element(element)<EOL>", "docstring": "Request a TLS-encrypted connection.\n\n        [initiating entity only]", "id": "f15316:c0:m3"}
{"signature": "@stream_element_handler(PROCEED_TAG, \"<STR_LIT>\")<EOL><INDENT>def _process_tls_proceed(self, stream, element):<DEDENT>", "body": "<EOL>if not self.requested:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(element))<EOL>return False<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>self.requested = False<EOL>self._make_tls_connection()<EOL>return True<EOL>", "docstring": "Handle the <proceed /> element.", "id": "f15316:c0:m5"}
{"signature": "@staticmethod<EOL><INDENT>def is_certificate_valid(stream, cert):<DEDENT>", "body": "try:<EOL><INDENT>logger.debug(\"<STR_LIT>\".format(cert))<EOL>if not cert:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>if not cert.validated:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>srv_type = stream.transport._dst_service <EOL>if cert.verify_server(stream.peer, srv_type):<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(stream.peer))<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>logger.debug(\"<STR_LIT>\"<EOL>.format(stream.peer))<EOL>return False<EOL><DEDENT><DEDENT>except:<EOL><INDENT>logger.exception(\"<STR_LIT>\")<EOL>raise<EOL><DEDENT>", "docstring": "Default certificate verification callback for TLS connections.\n\n        :Parameters:\n            - `cert`: certificate information\n        :Types:\n            - `cert`: `CertificateData`\n\n        :return: computed verification result.", "id": "f15316:c0:m9"}
{"signature": "@stream_element_handler(STARTTLS_TAG, \"<STR_LIT>\")<EOL><INDENT>def _process_tls_starttls(self, stream, element):<DEDENT>", "body": "<EOL>_unused, _unused = stream, element<EOL>raise FatalStreamError(\"<STR_LIT>\")<EOL>", "docstring": "Handle <starttls/> element.", "id": "f15316:c0:m6"}
{"signature": "@stream_element_handler(FAILURE_TAG, \"<STR_LIT>\")<EOL><INDENT>def _process_tls_failure(self, stream, element):<DEDENT>", "body": "<EOL>_unused, _unused = stream, element<EOL>raise TLSNegotiationFailed(\"<STR_LIT>\")<EOL>", "docstring": "Handle the <failure /> element.", "id": "f15316:c0:m4"}
{"signature": "def log_ignored(self):", "body": "self.logger_ignored.debug(u\"<STR_LIT>\"<EOL>.format(self.message))<EOL>", "docstring": "Log message via the \"pyxmpp.ProtocolError.ignored\" logger.", "id": "f15317:c26:m4"}
{"signature": "def log_reported(self):", "body": "self.logger_reported.debug(u\"<STR_LIT>\"<EOL>.format(self.message))<EOL>", "docstring": "Log message via the \"pyxmpp.ProtocolError.reported\" logger.", "id": "f15317:c26:m3"}
{"signature": "@property<EOL><INDENT>def xmpp_name(self):<DEDENT>", "body": "return self.args[<NUM_LIT:0>]<EOL>", "docstring": "XMPP error name which should be reported", "id": "f15317:c26:m1"}
{"signature": "@iq_set_stanza_handler(XMLPayload, SESSION_TAG)<EOL><INDENT>def handle_bind_iq_set(self, stanza):<DEDENT>", "body": "<EOL>logger.debug(\"<STR_LIT>\")<EOL>return stanza.make_result_response()<EOL>", "docstring": "Handler <iq type=\"set\"/> for session establishment.", "id": "f15318:c0:m6"}
{"signature": "@property<EOL><INDENT>def colnames(self):<DEDENT>", "body": "return self.__data_columns.colnames<EOL>", "docstring": "Getter for the columns names of the DataFrame.\n\n:return: returns a list of column names\n:rtype: list(str)", "id": "f15329:c0:m14"}
{"signature": "def cbind(self, **kwargs):", "body": "self.__data_columns.cbind(**kwargs)<EOL>return self<EOL>", "docstring": "Bind a column to the DataFrame.\n\n:param kwargs: named list of elements you want to add\n:type kwargs: keyword tuple\n:return: self\n:rtype: DataFrame", "id": "f15329:c0:m16"}
{"signature": "def subset(self, *args):", "body": "cols = {}<EOL>for k in self.colnames:<EOL><INDENT>if k in args:<EOL><INDENT>cols[str(k)] =self.__data_columns[self.colnames.index(k)].values<EOL><DEDENT><DEDENT>return DataFrame(**cols)<EOL>", "docstring": "Subset only some of the columns of the DataFrame.\n\n:param args: list of column names of the object that should be subsetted\n:type args: tuple\n:return: returns dataframe with only the columns you selected\n:rtype: DataFrame", "id": "f15329:c0:m8"}
{"signature": "def which_colnames(self, *args):", "body": "return self.__data_columns.which_colnames(*args)<EOL>", "docstring": "Computes the indexes of the columns in the DataFrame.\n\n:param args: list of column names\n:type args: tuple\n:return: returns a list of indexes\n:rtype: list(int)", "id": "f15329:c0:m15"}
{"signature": "def modify(self, clazz, new_col, *args):", "body": "if is_callable(clazz) and not is_none(new_col) and has_elements(*args):<EOL><INDENT>return self.__do_modify(clazz, new_col, *args)<EOL><DEDENT>", "docstring": "Modify some columns (i.e. apply a function) and add the\nresult to the table.\n\n:param clazz: name of a class that extends class Callable\n:type clazz: class\n:param new_col: name of the new column\n:type new_col: str\n:param args: list of column names of the object that\nfunction should be applied to\n:type args: tuple\n:return: returns a new dataframe object with the modiefied values,\n i.e. the new column\n:rtype: DataFrame", "id": "f15329:c0:m10"}
{"signature": "def __init__(self, piping_method, *args):", "body": "self.__piping_method = piping_method<EOL>if args and isinstance(args[<NUM_LIT:0>], dataframe.DataFrame):<EOL><INDENT>raise PipingException(\"<STR_LIT>\")<EOL><DEDENT>elif not args:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self.__args = args<EOL><DEDENT>", "docstring": "Constructor for chainable. Takes a tuple which is either a dataframe\nand column names to group by or only the column names\n\n:param args: tuple of params", "id": "f15331:c1:m0"}
{"signature": "@property<EOL><INDENT>def colname(self):<DEDENT>", "body": "return self.__colname<EOL>", "docstring": "Getter for the column name.\n\n:return: returns the column name", "id": "f15333:c0:m3"}
{"signature": "def subset(*args):", "body": "if args and isinstance(args[<NUM_LIT:0>], dataframe.DataFrame):<EOL><INDENT>return args[<NUM_LIT:0>].subset(*args[<NUM_LIT:1>:])<EOL><DEDENT>elif not args:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return pipeable.Pipeable(pipeable.PipingMethod.SUBSET, *args)<EOL><DEDENT>", "docstring": "Pipeable subsetting method.\n\nTakes either\n - a dataframe and a tuple of arguments required for subsetting,\n - a tuple of arguments if a dataframe has already been piped into.\n\n:Example:\n\nsubset(dataframe, \"column\")\n\n:Example:\n\ndataframe >> subset(\"column\")\n\n:param args: tuple of arguments\n:type args: tuple\n:return: returns a dataframe object\n:rtype: DataFrame", "id": "f15334:m2"}
{"signature": "def aggregate(*args):", "body": "if args and isinstance(args[<NUM_LIT:0>], dataframe.DataFrame):<EOL><INDENT>return args[<NUM_LIT:0>].aggregate(args[<NUM_LIT:1>], args[<NUM_LIT:2>], *args[<NUM_LIT:3>:])<EOL><DEDENT>elif not args:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>return pipeable.Pipeable(pipeable.PipingMethod.AGGREGATE, *args)<EOL><DEDENT>", "docstring": "Pipeable aggregation method.\n\nTakes either \n - a dataframe and a tuple of arguments required for aggregation,\n - a tuple of arguments if a dataframe has already been piped into.\nIn any case one argument has to be a class that extends callable.\n\n:Example:\n\naggregate(dataframe, Function, \"new_col_name\", \"old_col_name\")\n\n:Example:\n\ndataframe >> aggregate(Function, \"new_col_name\", \"old_col_name\")\n\n:param args: tuple of arguments\n:type args: tuple\n:return: returns a dataframe object\n:rtype: DataFrame", "id": "f15334:m1"}
{"signature": "def find(self, *args):", "body": "curr_node = self.__root<EOL>return self.__traverse(curr_node, <NUM_LIT:0>,  *args)<EOL>", "docstring": "Find a node in the tree. If the node is not found it is added first and then returned.\n\n:param args: a tuple\n:return: returns the node", "id": "f15337:c0:m1"}
{"signature": "def __getitem__(self, item):", "body": "if isinstance(item, str) and item in self.__data_columns.colnames:<EOL><INDENT>return self.__data_columns[self.__data_columns.colnames.index(item)]<EOL><DEDENT>raise TypeError(\"<STR_LIT>\" +<EOL>\"<STR_LIT>\")<EOL>", "docstring": "Getter method for DataFrame. Returns the column with name item.\n\n:param item: the name of a column\n:type item: str\n:return: returns a column from the DataFrame\n:rtype: DataFrameColumn", "id": "f15338:c0:m1"}
{"signature": "@property<EOL><INDENT>def grouping_colnames(self):<DEDENT>", "body": "return self.__grouping_colnames<EOL>", "docstring": "Getter for the grouping column names.\n\n:return: returns the grouping column names", "id": "f15338:c0:m6"}
{"signature": "def __call__(self, *args):", "body": "pass<EOL>", "docstring": "Call method. Is used when object is called like this: object()\n\n:param args: tuple of columns\n:type args: tuple\n:return: returns a list/scalar\n:rtype: list(any)/scalar", "id": "f15339:c0:m0"}
{"signature": "@property<EOL><INDENT>def groups(self):<DEDENT>", "body": "return self.__grouping.groups<EOL>", "docstring": "Getter for all groups.\n\n:return: returns the groups\n:rtype: list(DataFrameGroup)", "id": "f15342:c0:m6"}
{"signature": "@property<EOL><INDENT>def colnames(self):<DEDENT>", "body": "return self.__grouping.ungroup().colnames<EOL>", "docstring": "Getter for the column names of the DataFrame.\n\n:return: returns column names\n:rtype: list(str)", "id": "f15342:c0:m5"}
{"signature": "def group(self, *args):", "body": "args = list(args)<EOL>args.extend([x for x in<EOL>self.__grouping.grouping_colnames if x not in args])<EOL>return GroupedDataFrame(self.__grouping.ungroup(), *args)<EOL>", "docstring": "Group the DataFrame into row-subsets.\n\n:param args: list of column names taht should be used for grouping\n:type args: tuple\n:return: returns a dataframe that has grouping information\n:rtype: GroupedDataFrame", "id": "f15342:c0:m9"}
{"signature": "def ungroup(self):", "body": "return self.__grouping.ungroup()<EOL>", "docstring": "Undo the grouping and return the DataFrame.\n\n:return: returns the original DataFrame\n:rtype: DataFrame", "id": "f15342:c0:m7"}
{"signature": "def modify(self, clazz, new_col, *args):", "body": "if is_callable(clazz)and not is_none(new_col)and has_elements(*args)and is_disjoint(self.__grouping.grouping_colnames,<EOL>args,<EOL>__DISJOINT_SETS_ERROR__):<EOL><INDENT>return self.__do_modify(clazz, new_col, *args)<EOL><DEDENT>", "docstring": "Modify some columns (i.e. apply a function)\n and add the result to the table.\n\n:param clazz: name of a class that extends class Callable\n:type clazz: class\n:param new_col: name of the new column\n:type new_col: str\n:param args: list of column names of the object that\n function should be applied to\n:type args: tuple\n:return: returns a new GroupedDataFrame object with the modified\n  values, i.e. the new column of values\n:rtype: GroupedDataFrame", "id": "f15342:c0:m10"}
{"signature": "def aggregate(self, clazz, new_col, *args):", "body": "if is_callable(clazz)and not is_none(new_col)and has_elements(*args)and is_disjoint(self.__grouping.grouping_colnames,<EOL>args,<EOL>__DISJOINT_SETS_ERROR__):<EOL><INDENT>return self.__do_aggregate(clazz, new_col, *args)<EOL><DEDENT>", "docstring": "Aggregate the rows of each group into a single value.\n\n:param clazz: name of a class that extends class Callable\n:type clazz: class\n:param new_col: name of the new column\n:type new_col: str\n:param args: list of column names of the object that\n function should be applied to\n:type args: varargs\n:return: returns a new dataframe object with the aggregated value\n:rtype: DataFrame", "id": "f15342:c0:m12"}
{"signature": "def __str__(self):", "body": "return self.__grouping.__str__()<EOL>", "docstring": "ToString method for GroupedDataFrame.\n\n:return: returns the string representation\n:rtype: str", "id": "f15342:c0:m2"}
{"signature": "def has_elements(*args):", "body": "if not args:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return True<EOL>", "docstring": "Check if args has elements.\n\n:param args: a tuple\n:return: returns true if args has elements", "id": "f15343:m2"}
{"signature": "def is_disjoint(set1, set2, warn):", "body": "for elem in set2:<EOL><INDENT>if elem in set1:<EOL><INDENT>raise ValueError(warn)<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Checks if elements of set2 are in set1.\n\n:param set1: a set of values\n:param set2: a set of values\n:param warn: the error message that should be thrown\n when the sets are NOT disjoint\n:return: returns true no elements of set2 are in set1", "id": "f15343:m3"}
{"signature": "def contains_all(set1, set2, warn):", "body": "for elem in set2:<EOL><INDENT>if elem not in set1:<EOL><INDENT>raise ValueError(warn)<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Checks if all elements from set2 are in set1.\n\n:param set1:  a set of values\n:param set2:  a set of values\n:param warn: the error message that should be thrown \n when the sets are not containd\n:return: returns true if all values of set2 are in set1", "id": "f15343:m4"}
{"signature": "def is_callable(func):", "body": "if not issubclass(func, dataframe.Callable):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>return True<EOL>", "docstring": "Check if a function extends callable.\n\n:param func: the function to be checked\n:return: returns true if the function is callable", "id": "f15343:m0"}
{"signature": "def ungroup(self):", "body": "return self.__dataframe<EOL>", "docstring": "Getter for the normal DataFrame object without grouping information.\n\n:return: returns the ungrouped DataFrame\n:rtype: DataFrame", "id": "f15344:c0:m6"}
{"signature": "@property<EOL><INDENT>def groups(self):<DEDENT>", "body": "return self.__groups.values()<EOL>", "docstring": "Getter the values of the group dictionary, i.e. the Group objects\n\n:return: returns the groupings of the rows\n:rtype: list(DataFrameGroup)", "id": "f15344:c0:m5"}
{"signature": "def unpickle(filepath):", "body": "arr = []<EOL>with open(filepath, '<STR_LIT:rb>') as f:<EOL><INDENT>carr = f.read(blosc.MAX_BUFFERSIZE)<EOL>while len(carr) > <NUM_LIT:0>:<EOL><INDENT>arr.append(blosc.decompress(carr))<EOL>carr = f.read(blosc.MAX_BUFFERSIZE)<EOL><DEDENT><DEDENT>return pkl.loads(b\"<STR_LIT>\".join(arr))<EOL>", "docstring": "Decompress and unpickle.", "id": "f15348:m1"}
{"signature": "def task_download_bib():", "body": "return {<EOL>'<STR_LIT>': ['<STR_LIT:U+0020>'.join([<EOL>'<STR_LIT>', '<STR_LIT>', BIBFILE,<EOL>'<STR_LIT>'.format(CITEULIKE_GROUP),<EOL>])],<EOL>'<STR_LIT>': [BIBFILE],<EOL>}<EOL>", "docstring": "Download bibliography from CiteULike group", "id": "f15351:m0"}
{"signature": "def sample_states(<EOL>graph, spanning_cluster=True, model='<STR_LIT>', copy_result=True<EOL>):", "body": "if model != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if spanning_cluster:<EOL><INDENT>auxiliary_node_attributes = nx.get_node_attributes(graph, '<STR_LIT>')<EOL>auxiliary_nodes = auxiliary_node_attributes.keys()<EOL>if not list(auxiliary_nodes):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>spanning_sides = list(set(auxiliary_node_attributes.values()))<EOL>if len(spanning_sides) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>auxiliary_edge_attributes = nx.get_edge_attributes(graph, '<STR_LIT>')<EOL><DEDENT>if spanning_cluster:<EOL><INDENT>perc_graph = graph.subgraph(<EOL>[<EOL>node for node in graph.nodes_iter()<EOL>if '<STR_LIT>' not in graph.node[node]<EOL>]<EOL>)<EOL><DEDENT>else:<EOL><INDENT>perc_graph = graph<EOL><DEDENT>perc_edges = perc_graph.edges()<EOL>num_nodes = nx.number_of_nodes(perc_graph)<EOL>num_edges = nx.number_of_edges(perc_graph)<EOL>ret = dict()<EOL>ret['<STR_LIT:n>'] = <NUM_LIT:0><EOL>ret['<STR_LIT:N>'] = num_nodes<EOL>ret['<STR_LIT:M>'] = num_edges<EOL>ret['<STR_LIT>'] = <NUM_LIT:1><EOL>ret['<STR_LIT>'] = np.ones(<NUM_LIT:5>) * (num_nodes - <NUM_LIT:1>)<EOL>if spanning_cluster:<EOL><INDENT>ret['<STR_LIT>'] = False<EOL><DEDENT>if copy_result:<EOL><INDENT>yield copy.deepcopy(ret)<EOL><DEDENT>else:<EOL><INDENT>yield ret<EOL><DEDENT>perm_edges = np.random.permutation(num_edges)<EOL>ds = nx.utils.union_find.UnionFind()<EOL>if spanning_cluster:<EOL><INDENT>ds_spanning = nx.utils.union_find.UnionFind()<EOL>side_roots = dict()<EOL>for side in spanning_sides:<EOL><INDENT>nodes = [<EOL>node for (node, node_side) in auxiliary_node_attributes.items()<EOL>if node_side is side<EOL>]<EOL>ds_spanning.union(*nodes)<EOL>side_roots[side] = ds_spanning[nodes[<NUM_LIT:0>]]<EOL><DEDENT>for (edge, edge_side) in auxiliary_edge_attributes.items():<EOL><INDENT>ds_spanning.union(side_roots[edge_side], *edge)<EOL><DEDENT>side_roots = [<EOL>ds_spanning[side_root] for side_root in side_roots.values()<EOL>]<EOL><DEDENT>max_cluster_root = next(perc_graph.nodes_iter())<EOL>for n in range(num_edges):<EOL><INDENT>ret['<STR_LIT:n>'] = n + <NUM_LIT:1><EOL>edge_index = perm_edges[n]<EOL>edge = perc_edges[edge_index]<EOL>ret['<STR_LIT>'] = edge<EOL>roots = [<EOL>ds[node] for node in edge<EOL>]<EOL>weights = [<EOL>ds.weights[root] for root in roots<EOL>]<EOL>if roots[<NUM_LIT:0>] is not roots[<NUM_LIT:1>]:<EOL><INDENT>ds.union(*roots)<EOL>if spanning_cluster:<EOL><INDENT>ds_spanning.union(*roots)<EOL>ret['<STR_LIT>'] = (<EOL>ds_spanning[side_roots[<NUM_LIT:0>]] == ds_spanning[side_roots[<NUM_LIT:1>]]<EOL>)<EOL><DEDENT>root = ds[edge[<NUM_LIT:0>]]<EOL>weight = ds.weights[root]<EOL>for i in [<NUM_LIT:0>, <NUM_LIT:1>]:<EOL><INDENT>if roots[i] is max_cluster_root:<EOL><INDENT>continue<EOL><DEDENT>ret['<STR_LIT>'] -= weights[i] ** np.arange(<NUM_LIT:5>)<EOL><DEDENT>if max_cluster_root in roots:<EOL><INDENT>max_cluster_root = root<EOL>ret['<STR_LIT>'] = weight<EOL><DEDENT>else:<EOL><INDENT>if ret['<STR_LIT>'] >= weight:<EOL><INDENT>ret['<STR_LIT>'] += weight ** np.arange(<NUM_LIT:5>)<EOL><DEDENT>else:<EOL><INDENT>max_cluster_root = root<EOL>ret['<STR_LIT>'] += ret['<STR_LIT>'] ** np.arange(<NUM_LIT:5>)<EOL>ret['<STR_LIT>'] = weight<EOL><DEDENT><DEDENT><DEDENT>if copy_result:<EOL><INDENT>yield copy.deepcopy(ret)<EOL><DEDENT>else:<EOL><INDENT>yield ret<EOL><DEDENT><DEDENT>", "docstring": "Generate successive sample states of the percolation model\n\nThis is a :ref:`generator function <python:tut-generators>` to successively\nadd one edge at a time from the graph to the percolation model.\nAt each iteration, it calculates and returns the cluster statistics.\n\nParameters\n----------\ngraph : networkx.Graph\n    The substrate graph on which percolation is to take place\n\nspanning_cluster : bool, optional\n    Whether to detect a spanning cluster or not.\n    Defaults to ``True``.\n\nmodel : str, optional\n    The percolation model (either ``'bond'`` or ``'site'``).\n    Defaults to ``'bond'``.\n\n    .. note:: Other models than ``'bond'`` are not supported yet.\n\ncopy_result : bool, optional\n    Whether to return a copy or a reference to the result dictionary.\n    Defaults to ``True``.\n\nYields\n------\nret : dict\n    Cluster statistics\n\nret['n'] : int\n    Number of occupied bonds\n\nret['N'] : int\n    Total number of sites\n\nret['M'] : int\n    Total number of bonds\n\nret['has_spanning_cluster'] : bool\n    ``True`` if there is a spanning cluster, ``False`` otherwise.\n    Only exists if `spanning_cluster` argument is set to ``True``.\n\nret['max_cluster_size'] : int\n    Size of the largest cluster (absolute number of sites)\n\nret['moments'] : 1-D :py:class:`numpy.ndarray` of int\n    Array of size ``5``.\n    The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster\n    size distribution, with ``k`` ranging from ``0`` to ``4``.\n\nRaises\n------\nValueError\n    If `model` does not equal ``'bond'``.\n\nValueError\n    If `spanning_cluster` is ``True``, but `graph` does not contain any\n    auxiliary nodes to detect spanning clusters.\n\nSee also\n--------\n\nmicrocanonical_averages : Evolves multiple sample states in parallel\n\nNotes\n-----\nIterating through this generator is a single run of the Newman-Ziff\nalgorithm. [2]_\nThe first iteration yields the trivial state with :math:`n = 0` occupied\nbonds.\n\nSpanning cluster\n\n    In order to detect a spanning cluster, `graph` needs to contain\n    auxiliary nodes and edges, cf. Reference [2]_, Figure 6.\n    The auxiliary nodes and edges have the ``'span'`` `attribute\n    <http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.\n    The value is either ``0`` or ``1``, distinguishing the two sides of the\n    graph to span.\n\nRaw moments of the cluster size distribution\n\n    The :math:`k`-th raw moment of the (absolute) cluster size distribution\n    is :math:`\\sum_s' s^k N_s`, where :math:`s` is the cluster size and\n    :math:`N_s` is the number of clusters of size :math:`s`. [3]_\n    The primed sum :math:`\\sum'` signifies that the largest cluster is\n    excluded from the sum. [4]_\n\nReferences\n----------\n.. [2] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site\n    or bond percolation. Physical Review E 64, 016706+ (2001),\n    `doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.\n\n.. [3] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &\n   Francis, London, 1994), second edn.\n\n.. [4] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical\n   Physics (Springer, Berlin, Heidelberg, 2010),\n   `doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_.", "id": "f15353:m1"}
{"signature": "def percolation_graph(graph, spanning_cluster=True):", "body": "ret = dict()<EOL>ret['<STR_LIT>'] = graph<EOL>ret['<STR_LIT>'] = bool(spanning_cluster)<EOL>if spanning_cluster:<EOL><INDENT>spanning_auxiliary_node_attributes = nx.get_node_attributes(<EOL>graph, '<STR_LIT>'<EOL>)<EOL>ret['<STR_LIT>'] = spanning_auxiliary_node_attributes<EOL>auxiliary_nodes = spanning_auxiliary_node_attributes.keys()<EOL>if not list(auxiliary_nodes):<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>spanning_sides = list(set(spanning_auxiliary_node_attributes.values()))<EOL>if len(spanning_sides) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>ret['<STR_LIT>'] = spanning_sides<EOL>ret['<STR_LIT>'] = nx.get_edge_attributes(<EOL>graph, '<STR_LIT>'<EOL>)<EOL><DEDENT>if spanning_cluster:<EOL><INDENT>perc_graph = graph.subgraph(<EOL>[<EOL>node for node in graph.nodes_iter()<EOL>if '<STR_LIT>' not in graph.node[node]<EOL>]<EOL>)<EOL><DEDENT>else:<EOL><INDENT>perc_graph = graph<EOL><DEDENT>ret['<STR_LIT>'] = perc_graph<EOL>ret['<STR_LIT>'] = nx.number_of_nodes(perc_graph)<EOL>ret['<STR_LIT>'] = nx.number_of_edges(perc_graph)<EOL>return ret<EOL>", "docstring": "Prepare the (internal) percolation graph from a given graph\n\nHelper function to prepare the given graph for spanning cluster detection\n(if required).\nBasically it strips off the auxiliary nodes and edges again.\nIt also returns fundamental graph quantitities (number of nodes and edges).\n\nParameters\n----------\ngraph\nspanning_cluster\n\nReturns\n-------\nret : tuple", "id": "f15353:m0"}
{"signature": "def microcanonical_averages_arrays(microcanonical_averages):", "body": "ret = dict()<EOL>for n, microcanonical_average in enumerate(microcanonical_averages):<EOL><INDENT>assert n == microcanonical_average['<STR_LIT:n>']<EOL>if n == <NUM_LIT:0>:<EOL><INDENT>num_edges = microcanonical_average['<STR_LIT:M>']<EOL>num_sites = microcanonical_average['<STR_LIT:N>']<EOL>spanning_cluster = ('<STR_LIT>' in microcanonical_average)<EOL>ret['<STR_LIT>'] = np.empty(num_edges + <NUM_LIT:1>)<EOL>ret['<STR_LIT>'] = np.empty((num_edges + <NUM_LIT:1>, <NUM_LIT:2>))<EOL>if spanning_cluster:<EOL><INDENT>ret['<STR_LIT>'] = np.empty(num_edges + <NUM_LIT:1>)<EOL>ret['<STR_LIT>'] = np.empty((num_edges + <NUM_LIT:1>, <NUM_LIT:2>))<EOL><DEDENT>ret['<STR_LIT>'] = np.empty((<NUM_LIT:5>, num_edges + <NUM_LIT:1>))<EOL>ret['<STR_LIT>'] = np.empty((<NUM_LIT:5>, num_edges + <NUM_LIT:1>, <NUM_LIT:2>))<EOL><DEDENT>ret['<STR_LIT>'][n] = microcanonical_average['<STR_LIT>']<EOL>ret['<STR_LIT>'][n] = (<EOL>microcanonical_average['<STR_LIT>']<EOL>)<EOL>if spanning_cluster:<EOL><INDENT>ret['<STR_LIT>'][n] = (<EOL>microcanonical_average['<STR_LIT>']<EOL>)<EOL>ret['<STR_LIT>'][n] = (<EOL>microcanonical_average['<STR_LIT>']<EOL>)<EOL><DEDENT>ret['<STR_LIT>'][:, n] = microcanonical_average['<STR_LIT>']<EOL>ret['<STR_LIT>'][:, n] = microcanonical_average['<STR_LIT>']<EOL><DEDENT>for key in ret:<EOL><INDENT>if '<STR_LIT>' in key:<EOL><INDENT>continue<EOL><DEDENT>ret[key] /= num_sites<EOL><DEDENT>ret['<STR_LIT:M>'] = num_edges<EOL>ret['<STR_LIT:N>'] = num_sites<EOL>return ret<EOL>", "docstring": "Compile microcanonical averages over all iteration steps into single arrays\n\nHelper function to aggregate the microcanonical averages over all iteration\nsteps into single arrays for further processing\n\nParameters\n----------\n\nmicrocanonical_averages : iterable\n   Typically, this is the :func:`microcanonical_averages` generator\n\nReturns\n-------\n\nret : dict\n   Aggregated cluster statistics\n\nret['N'] : int\n    Total number of sites\n\nret['M'] : int\n    Total number of bonds\n\nret['spanning_cluster'] : 1-D :py:class:`numpy.ndarray` of float\n    The percolation probability:\n    The normalized average number of runs that have a spanning cluster.\n\nret['spanning_cluster_ci'] : 2-D :py:class:`numpy.ndarray` of float, size 2\n    The lower and upper bounds of the percolation probability.\n\nret['max_cluster_size'] : 1-D :py:class:`numpy.ndarray` of float\n    The percolation strength:\n    Average relative size of the largest cluster\n\nret['max_cluster_size_ci'] : 2-D :py:class:`numpy.ndarray` of float\n    Lower and upper bounds of the normal confidence interval of the\n    percolation strength.\n\nret['moments'] : 2-D :py:class:`numpy.ndarray` of float, shape (5, M + 1)\n    Average raw moments of the (relative) cluster size distribution.\n\nret['moments_ci'] : 3-D :py:class:`numpy.ndarray` of float, shape (5, M + 1, 2)\n    Lower and upper bounds of the normal confidence interval of the raw\n    moments of the (relative) cluster size distribution.\n\nSee Also\n--------\n\nmicrocanonical_averages", "id": "f15353:m9"}
{"signature": "def _microcanonical_average_spanning_cluster(has_spanning_cluster, alpha):", "body": "ret = dict()<EOL>runs = has_spanning_cluster.size<EOL>k = has_spanning_cluster.sum(dtype=np.float)<EOL>ret['<STR_LIT>'] = (<EOL>(k + <NUM_LIT:1>) / (runs + <NUM_LIT:2>)<EOL>)<EOL>ret['<STR_LIT>'] = scipy.stats.beta.ppf(<EOL>[alpha / <NUM_LIT:2>, <NUM_LIT:1> - alpha / <NUM_LIT:2>], k + <NUM_LIT:1>, runs - k + <NUM_LIT:1><EOL>)<EOL>return ret<EOL>", "docstring": "r'''\n    Compute the average number of runs that have a spanning cluster\n\n    Helper function for :func:`microcanonical_averages`\n\n    Parameters\n    ----------\n\n    has_spanning_cluster : 1-D :py:class:`numpy.ndarray` of bool\n        Each entry is the ``has_spanning_cluster`` field of the output of\n        :func:`sample_states`:\n        An entry is ``True`` if there is a spanning cluster in that respective\n        run, and ``False`` otherwise.\n\n    alpha : float\n        Significance level.\n\n    Returns\n    -------\n\n    ret : dict\n        Spanning cluster statistics\n\n    ret['spanning_cluster'] : float\n        The average relative number (Binomial proportion) of runs that have a\n        spanning cluster.\n        This is the Bayesian point estimate of the posterior mean, with a\n        uniform prior.\n\n    ret['spanning_cluster_ci'] : 1-D :py:class:`numpy.ndarray` of float, size 2\n        The lower and upper bounds of the Binomial proportion confidence\n        interval with uniform prior.\n\n    See Also\n    --------\n\n    sample_states : spanning cluster detection\n\n    microcanonical_averages : spanning cluster statistics\n\n    Notes\n    -----\n\n    Averages and confidence intervals for Binomial proportions\n\n    As Cameron [8]_ puts it, the normal approximation to the confidence\n    interval for a Binomial proportion :math:`p` \"suffers a *systematic*\n    decline in performance (...) towards extreme values of :math:`p` near\n    :math:`0` and :math:`1`, generating binomial [confidence intervals]\n    with effective coverage far below the desired level.\" (see also\n    References [6]_ and [7]_).\n\n    A different approach to quantifying uncertainty is Bayesian inference.\n    [5]_\n    For :math:`n` independent Bernoulli trails with common success\n    probability :math:`p`, the *likelihood* to have :math:`k` successes\n    given :math:`p` is the binomial distribution\n\n    .. math::\n\n        P(k|p) = \\binom{n}{k} p^k (1-p)^{n-k} \\equiv B(a,b),\n\n    where :math:`B(a, b)` is the *Beta distribution* with parameters\n    :math:`a = k + 1` and :math:`b = n - k + 1`.\n    Assuming a uniform prior :math:`P(p) = 1`, the *posterior* is [5]_\n\n    .. math::\n\n        P(p|k) = P(k|p)=B(a,b).\n\n    A point estimate is the posterior mean\n\n    .. math::\n\n        \\bar{p} = \\frac{k+1}{n+2}\n\n    with the :math:`1 - \\alpha` credible interval :math:`(p_l, p_u)` given\n    by\n\n    .. math::\n\n        \\int_0^{p_l} dp B(a,b) = \\int_{p_u}^1 dp B(a,b) = \\frac{\\alpha}{2}.\n\n    References\n    ----------\n\n    .. [5] Wasserman, L. All of Statistics (Springer New York, 2004),\n       `doi:10.1007/978-0-387-21736-9 <http://dx.doi.org/10.1007/978-0-387-21736-9>`_.\n\n    .. [6] DasGupta, A., Cai, T. T. & Brown, L. D. Interval Estimation for a\n       Binomial Proportion. Statistical Science 16, 101-133 (2001).\n       `doi:10.1214/ss/1009213286 <http://dx.doi.org/10.1214/ss/1009213286>`_.\n\n    .. [7] Agresti, A. & Coull, B. A. Approximate is Better than \"Exact\" for\n       Interval Estimation of Binomial Proportions. The American Statistician\n       52, 119-126 (1998),\n       `doi:10.2307/2685469 <http://dx.doi.org/10.2307/2685469>`_.\n\n    .. [8] Cameron, E. On the Estimation of Confidence Intervals for Binomial\n       Population Proportions in Astronomy: The Simplicity and Superiority of\n       the Bayesian Approach. Publications of the Astronomical Society of\n       Australia 28, 128-139 (2011),\n       `doi:10.1071/as10046 <http://dx.doi.org/10.1071/as10046>`_.", "id": "f15353:m3"}
{"signature": "def canonical_averages(ps, microcanonical_averages_arrays):", "body": "num_sites = microcanonical_averages_arrays['<STR_LIT:N>']<EOL>num_edges = microcanonical_averages_arrays['<STR_LIT:M>']<EOL>spanning_cluster = ('<STR_LIT>' in microcanonical_averages_arrays)<EOL>ret = dict()<EOL>ret['<STR_LIT>'] = ps<EOL>ret['<STR_LIT:N>'] = num_sites<EOL>ret['<STR_LIT:M>'] = num_edges<EOL>ret['<STR_LIT>'] = np.empty(ps.size)<EOL>ret['<STR_LIT>'] = np.empty((ps.size, <NUM_LIT:2>))<EOL>if spanning_cluster:<EOL><INDENT>ret['<STR_LIT>'] = np.empty(ps.size)<EOL>ret['<STR_LIT>'] = np.empty((ps.size, <NUM_LIT:2>))<EOL><DEDENT>ret['<STR_LIT>'] = np.empty((<NUM_LIT:5>, ps.size))<EOL>ret['<STR_LIT>'] = np.empty((<NUM_LIT:5>, ps.size, <NUM_LIT:2>))<EOL>for p_index, p in enumerate(ps):<EOL><INDENT>binomials = _binomial_pmf(n=num_edges, p=p)<EOL>for key, value in microcanonical_averages_arrays.items():<EOL><INDENT>if len(key) <= <NUM_LIT:1>:<EOL><INDENT>continue<EOL><DEDENT>if key in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>ret[key][p_index] = np.sum(binomials * value)<EOL><DEDENT>elif key in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>ret[key][p_index] = np.sum(<EOL>np.tile(binomials, (<NUM_LIT:2>, <NUM_LIT:1>)).T * value, axis=<NUM_LIT:0><EOL>)<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>ret[key][:, p_index] = np.sum(<EOL>np.tile(binomials, (<NUM_LIT:5>, <NUM_LIT:1>)) * value, axis=<NUM_LIT:1><EOL>)<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>ret[key][:, p_index] = np.sum(<EOL>np.rollaxis(np.tile(binomials, (<NUM_LIT:5>, <NUM_LIT:2>, <NUM_LIT:1>)), <NUM_LIT:2>, <NUM_LIT:1>) * value,<EOL>axis=<NUM_LIT:1><EOL>)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError(<EOL>'<STR_LIT>'.format(value.ndim)<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return ret<EOL>", "docstring": "Compute the canonical cluster statistics from microcanonical statistics\n\nThis is according to Newman and Ziff, Equation (2).\nNote that we also simply average the bounds of the confidence intervals\naccording to this formula.\n\nParameters\n----------\n\nps : iterable of float\n   Each entry is a probability for which to form the canonical ensemble\n   and compute the weighted statistics from the microcanonical statistics\n\nmicrocanonical_averages_arrays\n   Typically the output of :func:`microcanonical_averages_arrays`\n\nReturns\n-------\n\nret : dict\n   Canonical ensemble cluster statistics\n\nret['ps'] : iterable of float\n    The parameter `ps`\n\nret['N'] : int\n    Total number of sites\n\nret['M'] : int\n    Total number of bonds\n\nret['spanning_cluster'] : 1-D :py:class:`numpy.ndarray` of float\n    The percolation probability:\n    The normalized average number of runs that have a spanning cluster.\n\nret['spanning_cluster_ci'] : 2-D :py:class:`numpy.ndarray` of float, size 2\n    The lower and upper bounds of the percolation probability.\n\nret['max_cluster_size'] : 1-D :py:class:`numpy.ndarray` of float\n    The percolation strength:\n    Average relative size of the largest cluster\n\nret['max_cluster_size_ci'] : 2-D :py:class:`numpy.ndarray` of float\n    Lower and upper bounds of the normal confidence interval of the\n    percolation strength.\n\nret['moments'] : 2-D :py:class:`numpy.ndarray` of float, shape (5, M + 1)\n    Average raw moments of the (relative) cluster size distribution.\n\nret['moments_ci'] : 3-D :py:class:`numpy.ndarray` of float, shape (5, M + 1, 2)\n    Lower and upper bounds of the normal confidence interval of the raw\n    moments of the (relative) cluster size distribution.\n\nSee Also\n--------\n\nmicrocanonical_averages\n\nmicrocanonical_averages_arrays", "id": "f15353:m11"}
{"signature": "def spanning_2d_grid(length):", "body": "ret = nx.grid_2d_graph(length + <NUM_LIT:2>, length)<EOL>for i in range(length):<EOL><INDENT>ret.node[(<NUM_LIT:0>, i)]['<STR_LIT>'] = <NUM_LIT:0><EOL>ret[(<NUM_LIT:0>, i)][(<NUM_LIT:1>, i)]['<STR_LIT>'] = <NUM_LIT:0><EOL>ret.node[(length + <NUM_LIT:1>, i)]['<STR_LIT>'] = <NUM_LIT:1><EOL>ret[(length + <NUM_LIT:1>, i)][(length, i)]['<STR_LIT>'] = <NUM_LIT:1><EOL><DEDENT>return ret<EOL>", "docstring": "Generate a square lattice with auxiliary nodes for spanning detection\n\nParameters\n----------\n\nlength : int\n   Number of nodes in one dimension, excluding the auxiliary nodes.\n\nReturns\n-------\n\nnetworkx.Graph\n   A square lattice graph with auxiliary nodes for spanning cluster\n   detection\n\nSee Also\n--------\n\nsample_states : spanning cluster detection", "id": "f15353:m8"}
{"signature": "def statistics(<EOL>graph, ps, spanning_cluster=True, model='<STR_LIT>', alpha=alpha_1sigma, runs=<NUM_LIT><EOL>):", "body": "my_microcanonical_averages = microcanonical_averages(<EOL>graph=graph, runs=runs, spanning_cluster=spanning_cluster, model=model,<EOL>alpha=alpha<EOL>)<EOL>my_microcanonical_averages_arrays = microcanonical_averages_arrays(<EOL>my_microcanonical_averages<EOL>)<EOL>return canonical_averages(ps, my_microcanonical_averages_arrays)<EOL>", "docstring": "Helper function to compute percolation statistics\n\nSee Also\n--------\n\ncanonical_averages\n\nmicrocanonical_averages\n\nsample_states", "id": "f15353:m12"}
{"signature": "def _microcanonical_average_moments(moments, alpha):", "body": "ret = dict()<EOL>runs = moments.shape[<NUM_LIT:0>]<EOL>sqrt_n = np.sqrt(runs)<EOL>moments_sample_mean = moments.mean(axis=<NUM_LIT:0>)<EOL>ret['<STR_LIT>'] = moments_sample_mean<EOL>moments_sample_std = moments.std(axis=<NUM_LIT:0>, ddof=<NUM_LIT:1>)<EOL>ret['<STR_LIT>'] = np.empty((<NUM_LIT:5>, <NUM_LIT:2>))<EOL>for k in range(<NUM_LIT:5>):<EOL><INDENT>if moments_sample_std[k]:<EOL><INDENT>old_settings = np.seterr(all='<STR_LIT>')<EOL>ret['<STR_LIT>'][k] = scipy.stats.t.interval(<EOL><NUM_LIT:1> - alpha,<EOL>df=runs - <NUM_LIT:1>,<EOL>loc=moments_sample_mean[k],<EOL>scale=moments_sample_std[k] / sqrt_n<EOL>)<EOL>np.seterr(**old_settings)<EOL><DEDENT>else:<EOL><INDENT>ret['<STR_LIT>'][k] = (<EOL>moments_sample_mean[k] * np.ones(<NUM_LIT:2>)<EOL>)<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "Compute the average moments of the cluster size distributions\n\nHelper function for :func:`microcanonical_averages`\n\nParameters\n----------\n\nmoments : 2-D :py:class:`numpy.ndarray` of int\n    ``moments.shape[1] == 5`.\n    Each array ``moments[r, :]`` is the ``moments`` field of the output of\n    :func:`sample_states`:\n    The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster\n    size distribution.\n\nalpha: float\n    Significance level.\n\nReturns\n-------\n\nret : dict\n    Moment statistics\n\nret['moments'] : 1-D :py:class:`numpy.ndarray` of float, size 5\n    The ``k``-th entry is the average ``k``-th raw moment of the (absolute)\n    cluster size distribution, with ``k`` ranging from ``0`` to ``4``.\n\nret['moments_ci'] : 2-D :py:class:`numpy.ndarray` of float, shape (5,2)\n    ``ret['moments_ci'][k]`` are the lower and upper bounds of the normal\n    confidence interval of the average ``k``-th raw moment of the\n    (absolute) cluster size distribution, with ``k`` ranging from ``0`` to\n    ``4``.\n\nSee Also\n--------\n\nsample_states : computation of moments\n\nmicrocanonical_averages : moment statistics", "id": "f15353:m5"}
{"signature": "def spanning_1d_chain(length):", "body": "ret = nx.grid_graph(dim=[int(length + <NUM_LIT:2>)])<EOL>ret.node[<NUM_LIT:0>]['<STR_LIT>'] = <NUM_LIT:0><EOL>ret[<NUM_LIT:0>][<NUM_LIT:1>]['<STR_LIT>'] = <NUM_LIT:0><EOL>ret.node[length + <NUM_LIT:1>]['<STR_LIT>'] = <NUM_LIT:1><EOL>ret[length][length + <NUM_LIT:1>]['<STR_LIT>'] = <NUM_LIT:1><EOL>return ret<EOL>", "docstring": "Generate a linear chain with auxiliary nodes for spanning cluster detection\n\nParameters\n----------\n\nlength : int\n   Number of nodes in the chain, excluding the auxiliary nodes.\n\nReturns\n-------\n\nnetworkx.Graph\n   A linear chain graph with auxiliary nodes for spanning cluster detection\n\nSee Also\n--------\n\nsample_states : spanning cluster detection", "id": "f15353:m7"}
{"signature": "def dummy_hash(x):", "body": "return '<STR_LIT>'.encode('<STR_LIT:utf-8>')<EOL>", "docstring": "Supplies a constant dummy hash", "id": "f15354:m5"}
{"signature": "@TaskGenerator<EOL>def bond_task(<EOL>perc_graph_result, seeds, ps, convolution_factors_tasks_iterator<EOL>):", "body": "<EOL>convolution_factors_tasks = list(convolution_factors_tasks_iterator)<EOL>return reduce(<EOL>percolate.hpc.bond_reduce,<EOL>map(<EOL>bond_run,<EOL>itertools.repeat(perc_graph_result),<EOL>seeds,<EOL>itertools.repeat(ps),<EOL>itertools.repeat(convolution_factors_tasks),<EOL>)<EOL>)<EOL>", "docstring": "Perform a number of runs\n\nThe number of runs is the number of seeds\n\nconvolution_factors_tasks_iterator needs to be an iterator\n\nWe shield the convolution factors tasks from jug value/result mechanism\nby supplying an iterator to the list of tasks for lazy evaluation\nhttp://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L100\nhttp://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L455", "id": "f15354:m3"}
{"signature": "def bond_run(perc_graph_result, seed, ps, convolution_factors_tasks):", "body": "microcanonical_statistics = percolate.hpc.bond_microcanonical_statistics(<EOL>seed=seed, **perc_graph_result<EOL>)<EOL>canonical_statistics = np.empty(<EOL>ps.size,<EOL>dtype=percolate.hpc.canonical_statistics_dtype(<EOL>spanning_cluster=SPANNING_CLUSTER,<EOL>)<EOL>)<EOL>for row, convolution_factors_task in zip(<EOL>np.nditer(canonical_statistics, op_flags=['<STR_LIT>']),<EOL>convolution_factors_tasks,<EOL>):<EOL><INDENT>assert not convolution_factors_task.is_loaded()<EOL>convolution_factors_task.load()<EOL>my_convolution_factors = convolution_factors_task.result<EOL>row[...] = percolate.hpc.bond_canonical_statistics(<EOL>microcanonical_statistics=microcanonical_statistics,<EOL>convolution_factors=my_convolution_factors,<EOL>spanning_cluster=SPANNING_CLUSTER,<EOL>)<EOL>convolution_factors_task.unload()<EOL><DEDENT>ret = percolate.hpc.bond_initialize_canonical_averages(<EOL>canonical_statistics=canonical_statistics,<EOL>spanning_cluster=SPANNING_CLUSTER,<EOL>)<EOL>return ret<EOL>", "docstring": "Perform a single run (realization) over all microstates and return the\ncanonical cluster statistics", "id": "f15354:m2"}
{"signature": "def microcanonical_statistics_dtype(spanning_cluster=True):", "body": "fields = list()<EOL>fields.extend([<EOL>('<STR_LIT:n>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>])<EOL>if spanning_cluster:<EOL><INDENT>fields.extend([<EOL>('<STR_LIT>', '<STR_LIT:bool>'),<EOL>])<EOL><DEDENT>fields.extend([<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>])<EOL>return _ndarray_dtype(fields)<EOL>", "docstring": "Return the numpy structured array data type for sample states\n\nHelper function\n\nParameters\n----------\nspanning_cluster : bool, optional\n    Whether to detect a spanning cluster or not.\n    Defaults to ``True``.\n\nReturns\n-------\nret : list of pairs of str\n    A list of tuples of field names and data types to be used as ``dtype``\n    argument in numpy ndarray constructors\n\nSee Also\n--------\nhttp://docs.scipy.org/doc/numpy/user/basics.rec.html\ncanonical_statistics_dtype", "id": "f15358:m1"}
{"signature": "def bond_initialize_canonical_averages(<EOL>canonical_statistics, **kwargs<EOL>):", "body": "<EOL>spanning_cluster = (<EOL>'<STR_LIT>' in canonical_statistics.dtype.names<EOL>)<EOL>ret = np.empty_like(<EOL>canonical_statistics,<EOL>dtype=canonical_averages_dtype(spanning_cluster=spanning_cluster),<EOL>)<EOL>ret['<STR_LIT>'] = <NUM_LIT:1><EOL>if spanning_cluster:<EOL><INDENT>ret['<STR_LIT>'] = (<EOL>canonical_statistics['<STR_LIT>']<EOL>)<EOL>ret['<STR_LIT>'] = <NUM_LIT:0.0><EOL><DEDENT>ret['<STR_LIT>'] = (<EOL>canonical_statistics['<STR_LIT>']<EOL>)<EOL>ret['<STR_LIT>'] = <NUM_LIT:0.0><EOL>ret['<STR_LIT>'] = canonical_statistics['<STR_LIT>']<EOL>ret['<STR_LIT>'] = <NUM_LIT:0.0><EOL>return ret<EOL>", "docstring": "Initialize the canonical averages from a single-run cluster statistics\n\nParameters\n----------\ncanonical_statistics : 1-D structured ndarray\n    Typically contains the canonical statistics for a range of values\n    of the occupation probability ``p``.\n    The dtype is the result of `canonical_statistics_dtype`.\n\nReturns\n-------\nret : structured ndarray\n    The dype is the result of `canonical_averages_dtype`.\n\nret['number_of_runs'] : 1-D ndarray of int\n    Equals ``1`` (initial run).\n\nret['percolation_probability_mean'] : 1-D array of float\n    Equals ``canonical_statistics['percolation_probability']``\n    (if ``percolation_probability`` is present)\n\nret['percolation_probability_m2'] : 1-D array of float\n    Each entry is ``0.0``\n\nret['max_cluster_size_mean'] : 1-D array of float\n    Equals ``canonical_statistics['max_cluster_size']``\n\nret['max_cluster_size_m2'] : 1-D array of float\n    Each entry is ``0.0``\n\nret['moments_mean'] : 2-D array of float\n    Equals ``canonical_statistics['moments']``\n\nret['moments_m2'] : 2-D array of float\n    Each entry is ``0.0``\n\nSee Also\n--------\ncanonical_averages_dtype\nbond_canonical_statistics", "id": "f15358:m7"}
{"signature": "def finalize_canonical_averages(<EOL>number_of_nodes, ps, canonical_averages, alpha,<EOL>):", "body": "spanning_cluster = (<EOL>(<EOL>'<STR_LIT>' in<EOL>canonical_averages.dtype.names<EOL>) and<EOL>'<STR_LIT>' in canonical_averages.dtype.names<EOL>)<EOL>ret = np.empty_like(<EOL>canonical_averages,<EOL>dtype=finalized_canonical_averages_dtype(<EOL>spanning_cluster=spanning_cluster<EOL>),<EOL>)<EOL>n = canonical_averages['<STR_LIT>']<EOL>sqrt_n = np.sqrt(canonical_averages['<STR_LIT>'])<EOL>ret['<STR_LIT>'] = n<EOL>ret['<STR_LIT:p>'] = ps<EOL>ret['<STR_LIT>'] = alpha<EOL>def _transform(<EOL>original_key, final_key=None, normalize=False, transpose=False,<EOL>):<EOL><INDENT>if final_key is None:<EOL><INDENT>final_key = original_key<EOL><DEDENT>keys_mean = [<EOL>'<STR_LIT>'.format(key)<EOL>for key in [original_key, final_key]<EOL>]<EOL>keys_std = [<EOL>'<STR_LIT>'.format(original_key),<EOL>'<STR_LIT>'.format(final_key),<EOL>]<EOL>key_ci = '<STR_LIT>'.format(final_key)<EOL>ret[keys_mean[<NUM_LIT:1>]] = canonical_averages[keys_mean[<NUM_LIT:0>]]<EOL>if normalize:<EOL><INDENT>ret[keys_mean[<NUM_LIT:1>]] /= number_of_nodes<EOL><DEDENT>array = canonical_averages[keys_std[<NUM_LIT:0>]]<EOL>result = np.sqrt(<EOL>(array.T if transpose else array) / (n - <NUM_LIT:1>)<EOL>)<EOL>ret[keys_std[<NUM_LIT:1>]] = (<EOL>result.T if transpose else result<EOL>)<EOL>if normalize:<EOL><INDENT>ret[keys_std[<NUM_LIT:1>]] /= number_of_nodes<EOL><DEDENT>array = ret[keys_std[<NUM_LIT:1>]]<EOL>scale = (array.T if transpose else array) / sqrt_n<EOL>array = ret[keys_mean[<NUM_LIT:1>]]<EOL>mean = (array.T if transpose else array)<EOL>result = scipy.stats.t.interval(<EOL><NUM_LIT:1> - alpha,<EOL>df=n - <NUM_LIT:1>,<EOL>loc=mean,<EOL>scale=scale,<EOL>)<EOL>(<EOL>ret[key_ci][..., <NUM_LIT:0>], ret[key_ci][..., <NUM_LIT:1>]<EOL>) = ([my_array.T for my_array in result] if transpose else result)<EOL><DEDENT>if spanning_cluster:<EOL><INDENT>_transform('<STR_LIT>')<EOL><DEDENT>_transform('<STR_LIT>', '<STR_LIT>', normalize=True)<EOL>_transform('<STR_LIT>', normalize=True, transpose=True)<EOL>return ret<EOL>", "docstring": "Finalize canonical averages", "id": "f15358:m10"}
{"signature": "def bond_microcanonical_statistics(<EOL>perc_graph, num_nodes, num_edges, seed,<EOL>spanning_cluster=True,<EOL>auxiliary_node_attributes=None, auxiliary_edge_attributes=None,<EOL>spanning_sides=None,<EOL>**kwargs<EOL>):", "body": "<EOL>sample_states = bond_sample_states(<EOL>perc_graph=perc_graph,<EOL>num_nodes=num_nodes,<EOL>num_edges=num_edges,<EOL>seed=seed,<EOL>spanning_cluster=spanning_cluster,<EOL>auxiliary_node_attributes=auxiliary_node_attributes,<EOL>auxiliary_edge_attributes=auxiliary_edge_attributes,<EOL>spanning_sides=spanning_sides,<EOL>)<EOL>return np.fromiter(<EOL>sample_states,<EOL>dtype=microcanonical_statistics_dtype(spanning_cluster),<EOL>count=num_edges + <NUM_LIT:1><EOL>)<EOL>", "docstring": "Evolve a single run over all microstates (bond occupation numbers)\n\nReturn the cluster statistics for each microstate\n\nParameters\n----------\nperc_graph : networkx.Graph\n    The substrate graph on which percolation is to take place\n\nnum_nodes : int\n    Number ``N`` of sites in the graph\n\nnum_edges : int\n    Number ``M`` of bonds in the graph\n\nseed : {None, int, array_like}\n    Random seed initializing the pseudo-random number generator.\n    Piped through to `numpy.random.RandomState` constructor.\n\nspanning_cluster : bool, optional\n    Whether to detect a spanning cluster or not.\n    Defaults to ``True``.\n\nauxiliary_node_attributes : optional\n    Value of ``networkx.get_node_attributes(graph, 'span')``\n\nauxiliary_edge_attributes : optional\n    Value of ``networkx.get_edge_attributes(graph, 'span')``\n\nspanning_sides : list, optional\n    List of keys (attribute values) of the two sides of the auxiliary\n    nodes.\n    Return value of ``list(set(auxiliary_node_attributes.values()))``\n\nReturns\n-------\nret : ndarray of size ``num_edges + 1``\n    Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'),\n    ('max_cluster_size', 'uint32'), ('moments', 'uint64', 5)]``\n\nret['n'] : ndarray of int\n    The number of bonds added at the particular iteration\n\nret['edge'] : ndarray of int\n    The index of the edge added at the particular iteration.\n    Note that ``ret['edge'][0]`` is undefined!\n\nret['has_spanning_cluster'] : ndarray of bool\n    ``True`` if there is a spanning cluster, ``False`` otherwise.\n    Only exists if `spanning_cluster` argument is set to ``True``.\n\nret['max_cluster_size'] : int\n    Size of the largest cluster (absolute number of sites)\n\nret['moments'] : 2-D :py:class:`numpy.ndarray` of int\n    Array of shape ``(num_edges + 1, 5)``.\n    The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster\n    size distribution, with ``k`` ranging from ``0`` to ``4``.\n\nSee also\n--------\n\nbond_sample_states\nmicrocanonical_statistics_dtype\n\nnumpy.random.RandomState", "id": "f15358:m3"}
{"signature": "def bond_sample_states(<EOL>perc_graph, num_nodes, num_edges, seed, spanning_cluster=True,<EOL>auxiliary_node_attributes=None, auxiliary_edge_attributes=None,<EOL>spanning_sides=None,<EOL>**kwargs<EOL>):", "body": "<EOL>rng = np.random.RandomState(seed=seed)<EOL>if spanning_cluster:<EOL><INDENT>if len(spanning_sides) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT>perc_edges = perc_graph.edges()<EOL>perm_edges = rng.permutation(num_edges)<EOL>ret = np.empty(<EOL><NUM_LIT:1>, dtype=microcanonical_statistics_dtype(spanning_cluster)<EOL>)<EOL>ret['<STR_LIT:n>'] = <NUM_LIT:0><EOL>ret['<STR_LIT>'] = <NUM_LIT:1><EOL>ret['<STR_LIT>'] = np.ones(<NUM_LIT:5>, dtype='<STR_LIT>') * (num_nodes - <NUM_LIT:1>)<EOL>if spanning_cluster:<EOL><INDENT>ret['<STR_LIT>'] = False<EOL><DEDENT>yield ret<EOL>ds = nx.utils.union_find.UnionFind()<EOL>if spanning_cluster:<EOL><INDENT>ds_spanning = nx.utils.union_find.UnionFind()<EOL>side_roots = dict()<EOL>for side in spanning_sides:<EOL><INDENT>nodes = [<EOL>node for (node, node_side) in auxiliary_node_attributes.items()<EOL>if node_side is side<EOL>]<EOL>ds_spanning.union(*nodes)<EOL>side_roots[side] = ds_spanning[nodes[<NUM_LIT:0>]]<EOL><DEDENT>for (edge, edge_side) in auxiliary_edge_attributes.items():<EOL><INDENT>ds_spanning.union(side_roots[edge_side], *edge)<EOL><DEDENT>side_roots = [<EOL>ds_spanning[side_root] for side_root in side_roots.values()<EOL>]<EOL><DEDENT>max_cluster_root = next(perc_graph.nodes_iter())<EOL>for n in range(num_edges):<EOL><INDENT>ret['<STR_LIT:n>'] += <NUM_LIT:1><EOL>edge_index = perm_edges[n]<EOL>edge = perc_edges[edge_index]<EOL>ret['<STR_LIT>'] = edge_index<EOL>roots = [<EOL>ds[node] for node in edge<EOL>]<EOL>weights = [<EOL>ds.weights[root] for root in roots<EOL>]<EOL>if roots[<NUM_LIT:0>] is not roots[<NUM_LIT:1>]:<EOL><INDENT>ds.union(*roots)<EOL>if spanning_cluster:<EOL><INDENT>ds_spanning.union(*roots)<EOL>ret['<STR_LIT>'] = (<EOL>ds_spanning[side_roots[<NUM_LIT:0>]] == ds_spanning[side_roots[<NUM_LIT:1>]]<EOL>)<EOL><DEDENT>root = ds[edge[<NUM_LIT:0>]]<EOL>weight = ds.weights[root]<EOL>for i in [<NUM_LIT:0>, <NUM_LIT:1>]:<EOL><INDENT>if roots[i] is max_cluster_root:<EOL><INDENT>continue<EOL><DEDENT>ret['<STR_LIT>'] -= weights[i] ** np.arange(<NUM_LIT:5>, dtype='<STR_LIT>')<EOL><DEDENT>if max_cluster_root in roots:<EOL><INDENT>max_cluster_root = root<EOL>ret['<STR_LIT>'] = weight<EOL><DEDENT>else:<EOL><INDENT>if ret['<STR_LIT>'] >= weight:<EOL><INDENT>ret['<STR_LIT>'] += weight ** np.arange(<NUM_LIT:5>, dtype='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>max_cluster_root = root<EOL>ret['<STR_LIT>'] += ret['<STR_LIT>'] ** np.arange(<EOL><NUM_LIT:5>, dtype='<STR_LIT>'<EOL>)<EOL>ret['<STR_LIT>'] = weight<EOL><DEDENT><DEDENT><DEDENT>yield ret<EOL><DEDENT>", "docstring": "Generate successive sample states of the bond percolation model\n\nThis is a :ref:`generator function <python:tut-generators>` to successively\nadd one edge at a time from the graph to the percolation model.\nAt each iteration, it calculates and returns the cluster statistics.\nCAUTION: it returns a reference to the internal array, not a copy.\n\nParameters\n----------\nperc_graph : networkx.Graph\n    The substrate graph on which percolation is to take place\n\nnum_nodes : int\n    Number ``N`` of sites in the graph\n\nnum_edges : int\n    Number ``M`` of bonds in the graph\n\nseed : {None, int, array_like}\n    Random seed initializing the pseudo-random number generator.\n    Piped through to `numpy.random.RandomState` constructor.\n\nspanning_cluster : bool, optional\n    Whether to detect a spanning cluster or not.\n    Defaults to ``True``.\n\nauxiliary_node_attributes : optional\n    Return value of ``networkx.get_node_attributes(graph, 'span')``\n\nauxiliary_edge_attributes : optional\n    Return value of ``networkx.get_edge_attributes(graph, 'span')``\n\nspanning_sides : list, optional\n    List of keys (attribute values) of the two sides of the auxiliary\n    nodes.\n    Return value of ``list(set(auxiliary_node_attributes.values()))``\n\nYields\n------\nret : ndarray\n    Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'),\n    ('max_cluster_size', 'uint32'), ('moments', 'int64', 5)]``\n\nret['n'] : ndarray of int\n    The number of bonds added at the particular iteration\n\nret['edge'] : ndarray of int\n    The index of the edge added at the particular iteration\n    Note that in the first step, when ``ret['n'] == 0``, this value is\n    undefined!\n\nret['has_spanning_cluster'] : ndarray of bool\n    ``True`` if there is a spanning cluster, ``False`` otherwise.\n    Only exists if `spanning_cluster` argument is set to ``True``.\n\nret['max_cluster_size'] : int\n    Size of the largest cluster (absolute number of sites)\n\nret['moments'] : 1-D :py:class:`numpy.ndarray` of int\n    Array of size ``5``.\n    The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster\n    size distribution, with ``k`` ranging from ``0`` to ``4``.\n\nRaises\n------\nValueError\n    If `spanning_cluster` is ``True``, but `graph` does not contain any\n    auxiliary nodes to detect spanning clusters.\n\nSee also\n--------\n\nnumpy.random.RandomState\n\nmicrocanonical_statistics_dtype\n\nNotes\n-----\nIterating through this generator is a single run of the Newman-Ziff\nalgorithm. [12]_\nThe first iteration yields the trivial state with :math:`n = 0` occupied\nbonds.\n\nSpanning cluster\n\n    In order to detect a spanning cluster, `graph` needs to contain\n    auxiliary nodes and edges, cf. Reference [12]_, Figure 6.\n    The auxiliary nodes and edges have the ``'span'`` `attribute\n    <http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.\n    The value is either ``0`` or ``1``, distinguishing the two sides of the\n    graph to span.\n\nRaw moments of the cluster size distribution\n\n    The :math:`k`-th raw moment of the (absolute) cluster size distribution\n    is :math:`\\sum_s' s^k N_s`, where :math:`s` is the cluster size and\n    :math:`N_s` is the number of clusters of size :math:`s`. [13]_\n    The primed sum :math:`\\sum'` signifies that the largest cluster is\n    excluded from the sum. [14]_\n\nReferences\n----------\n.. [12] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site\n    or bond percolation. Physical Review E 64, 016706+ (2001),\n    `doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.\n\n.. [13] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &\n   Francis, London, 1994), second edn.\n\n.. [14] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical\n   Physics (Springer, Berlin, Heidelberg, 2010),\n   `doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_.", "id": "f15358:m2"}
{"signature": "def finalized_canonical_averages_dtype(spanning_cluster=True):", "body": "fields = list()<EOL>fields.extend([<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT:p>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>])<EOL>if spanning_cluster:<EOL><INDENT>fields.extend([<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>])<EOL><DEDENT>fields.extend([<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>])<EOL>return _ndarray_dtype(fields)<EOL>", "docstring": "The NumPy Structured Array type for finalized canonical averages over\nseveral runs\n\nHelper function\n\nParameters\n----------\nspanning_cluster : bool, optional\n    Whether to detect a spanning cluster or not.\n    Defaults to ``True``.\n\nReturns\n-------\nret : list of pairs of str\n    A list of tuples of field names and data types to be used as ``dtype``\n    argument in numpy ndarray constructors\n\nSee Also\n--------\nhttp://docs.scipy.org/doc/numpy/user/basics.rec.html\ncanonical_averages_dtype", "id": "f15358:m9"}
{"signature": "def bond_canonical_statistics(<EOL>microcanonical_statistics,<EOL>convolution_factors,<EOL>**kwargs<EOL>):", "body": "<EOL>spanning_cluster = (<EOL>'<STR_LIT>' in microcanonical_statistics.dtype.names<EOL>)<EOL>ret = np.empty(<NUM_LIT:1>, dtype=canonical_statistics_dtype(spanning_cluster))<EOL>if spanning_cluster:<EOL><INDENT>ret['<STR_LIT>'] = np.sum(<EOL>convolution_factors *<EOL>microcanonical_statistics['<STR_LIT>']<EOL>)<EOL><DEDENT>ret['<STR_LIT>'] = np.sum(<EOL>convolution_factors *<EOL>microcanonical_statistics['<STR_LIT>']<EOL>)<EOL>ret['<STR_LIT>'] = np.sum(<EOL>convolution_factors[:, np.newaxis] *<EOL>microcanonical_statistics['<STR_LIT>'],<EOL>axis=<NUM_LIT:0>,<EOL>)<EOL>return ret<EOL>", "docstring": "canonical cluster statistics for a single run and a single probability\n\nParameters\n----------\n\nmicrocanonical_statistics : ndarray\n    Return value of `bond_microcanonical_statistics`\n\nconvolution_factors : 1-D array_like\n    The coefficients of the convolution for the given probabilty ``p``\n    and for each occupation number ``n``.\n\nReturns\n-------\nret : ndarray of size ``1``\n    Structured array with dtype as returned by\n    `canonical_statistics_dtype`\n\nret['percolation_probability'] : ndarray of float\n    The \"percolation probability\" of this run at the value of ``p``.\n    Only exists if `microcanonical_statistics` argument has the\n    ``has_spanning_cluster`` field.\n\nret['max_cluster_size'] : ndarray of int\n    Weighted size of the largest cluster (absolute number of sites)\n\nret['moments'] : 1-D :py:class:`numpy.ndarray` of float\n    Array of size ``5``.\n    The ``k``-th entry is the weighted ``k``-th raw moment of the\n    (absolute) cluster size distribution, with ``k`` ranging from ``0`` to\n    ``4``.\n\nSee Also\n--------\n\nbond_microcanonical_statistics\ncanonical_statistics_dtype", "id": "f15358:m5"}
{"signature": "def canonical_averages_dtype(spanning_cluster=True):", "body": "fields = list()<EOL>fields.extend([<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>])<EOL>if spanning_cluster:<EOL><INDENT>fields.extend([<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>])<EOL><DEDENT>fields.extend([<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>])<EOL>return _ndarray_dtype(fields)<EOL>", "docstring": "The NumPy Structured Array type for canonical averages over several\nruns\n\nHelper function\n\nParameters\n----------\nspanning_cluster : bool, optional\n    Whether to detect a spanning cluster or not.\n    Defaults to ``True``.\n\nReturns\n-------\nret : list of pairs of str\n    A list of tuples of field names and data types to be used as ``dtype``\n    argument in numpy ndarray constructors\n\nSee Also\n--------\nhttp://docs.scipy.org/doc/numpy/user/basics.rec.html\ncanonical_statistics_dtype\nfinalized_canonical_averages_dtype", "id": "f15358:m6"}
{"signature": "def bond_reduce(row_a, row_b):", "body": "spanning_cluster = (<EOL>'<STR_LIT>' in row_a.dtype.names and<EOL>'<STR_LIT>' in row_b.dtype.names and<EOL>'<STR_LIT>' in row_a.dtype.names and<EOL>'<STR_LIT>' in row_b.dtype.names<EOL>)<EOL>ret = np.empty_like(row_a)<EOL>def _reducer(key, transpose=False):<EOL><INDENT>mean_key = '<STR_LIT>'.format(key)<EOL>m2_key = '<STR_LIT>'.format(key)<EOL>res = simoa.stats.online_variance(*[<EOL>(<EOL>row['<STR_LIT>'],<EOL>row[mean_key].T if transpose else row[mean_key],<EOL>row[m2_key].T if transpose else row[m2_key],<EOL>)<EOL>for row in [row_a, row_b]<EOL>])<EOL>(<EOL>ret[mean_key],<EOL>ret[m2_key],<EOL>) = (<EOL>res[<NUM_LIT:1>].T,<EOL>res[<NUM_LIT:2>].T,<EOL>) if transpose else res[<NUM_LIT:1>:]<EOL><DEDENT>if spanning_cluster:<EOL><INDENT>_reducer('<STR_LIT>')<EOL><DEDENT>_reducer('<STR_LIT>')<EOL>_reducer('<STR_LIT>', transpose=True)<EOL>ret['<STR_LIT>'] = row_a['<STR_LIT>'] + row_b['<STR_LIT>']<EOL>return ret<EOL>", "docstring": "Reduce the canonical averages over several runs\n\nThis is a \"true\" reducer.\nIt is associative and commutative.\n\nThis is a wrapper around `simoa.stats.online_variance`.\n\nParameters\n----------\nrow_a, row_b : structured ndarrays\n    Output of this function, or initial input from\n    `bond_initialize_canonical_averages`\n\nReturns\n-------\nret : structured ndarray\n    Array is of dtype as returned by `canonical_averages_dtype`\n\nSee Also\n--------\nbond_initialize_canonical_averages\ncanonical_averages_dtype\nsimoa.stats.online_variance", "id": "f15358:m8"}
{"signature": "def full_suite():", "body": "logging.basicConfig(filename='<STR_LIT>', level=logging.INFO,<EOL>format='<STR_LIT>')<EOL>from .misc import MiscTestCase<EOL>from . import uploader<EOL>miscsuite = unittest.TestLoader().loadTestsFromTestCase(MiscTestCase)<EOL>uploadersuite = unittest.TestLoader().loadTestsFromModule(uploader)<EOL>return unittest.TestSuite([miscsuite, uploadersuite])<EOL>", "docstring": "creates a full suite of tests", "id": "f15362:m1"}
{"signature": "def terminal(port=default_port(), baud='<STR_LIT>'):", "body": "testargs = ['<STR_LIT>', port, baud]<EOL>sys.argv = testargs<EOL>miniterm.main()<EOL>", "docstring": "Launch minterm from pyserial", "id": "f15364:m0"}
{"signature": "def write_file(self, path, destination='<STR_LIT>', verify='<STR_LIT:none>'):", "body": "filename = os.path.basename(path)<EOL>if not destination:<EOL><INDENT>destination = filename<EOL><DEDENT>log.info('<STR_LIT>', path, destination)<EOL>self.__writeln(\"<STR_LIT>\")<EOL>res = self.__expect('<STR_LIT>')<EOL>if not res.endswith('<STR_LIT>'):<EOL><INDENT>log.error('<STR_LIT>', res)<EOL>raise CommunicationTimeout('<STR_LIT>', res)<EOL><DEDENT>log.debug('<STR_LIT>', destination)<EOL>self.__write(destination + '<STR_LIT:\\x00>', True)<EOL>if not self.__got_ack():<EOL><INDENT>log.error('<STR_LIT>')<EOL>raise NoAckException('<STR_LIT>')<EOL><DEDENT>content = from_file(path)<EOL>log.debug('<STR_LIT>', len(content), filename)<EOL>pos = <NUM_LIT:0><EOL>chunk_size = <NUM_LIT><EOL>while pos < len(content):<EOL><INDENT>rest = len(content) - pos<EOL>if rest > chunk_size:<EOL><INDENT>rest = chunk_size<EOL><DEDENT>data = content[pos:pos+rest]<EOL>if not self.__write_chunk(data):<EOL><INDENT>resp = self.__expect()<EOL>log.error('<STR_LIT>', resp, hexify(resp))<EOL>raise BadResponseException('<STR_LIT>', ACK, resp)<EOL><DEDENT>pos += chunk_size<EOL><DEDENT>log.debug('<STR_LIT>')<EOL>self.__write_chunk('<STR_LIT>')<EOL>if verify != '<STR_LIT:none>':<EOL><INDENT>self.verify_file(path, destination, verify)<EOL><DEDENT>", "docstring": "sends a file to the device using the transfer protocol", "id": "f15366:c0:m12"}
{"signature": "def read_file(self, filename, destination='<STR_LIT>'):", "body": "if not destination:<EOL><INDENT>destination = filename<EOL><DEDENT>log.info('<STR_LIT>', filename, destination)<EOL>data = self.download_file(filename)<EOL>log.info(destination)<EOL>if not os.path.exists(os.path.dirname(destination)):<EOL><INDENT>try:<EOL><INDENT>os.makedirs(os.path.dirname(destination))<EOL><DEDENT>except OSError as e:  <EOL><INDENT>if e.errno != errno.EEXIST:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>with open(destination, '<STR_LIT:w>') as fil:<EOL><INDENT>fil.write(data)<EOL><DEDENT>", "docstring": "reading data from device into local file", "id": "f15366:c0:m11"}
{"signature": "def file_list(self):", "body": "log.info('<STR_LIT>')<EOL>res = self.__exchange(LIST_FILES)<EOL>res = res.split('<STR_LIT:\\r\\n>')<EOL>res = res[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>files = []<EOL>for line in res:<EOL><INDENT>files.append(line.split('<STR_LIT:\\t>'))<EOL><DEDENT>return files<EOL>", "docstring": "list files on the device", "id": "f15366:c0:m19"}
{"signature": "def __write(self, output, binary=False):", "body": "if not binary:<EOL><INDENT>log.debug('<STR_LIT>', output)<EOL><DEDENT>else:<EOL><INDENT>log.debug('<STR_LIT>', hexify(output))<EOL><DEDENT>self._port.write(output)<EOL>self._port.flush()<EOL>", "docstring": "write data on the nodemcu port. If 'binary' is True the debug log\n        will show the intended output as hex, otherwise as string", "id": "f15366:c0:m5"}
{"signature": "def file_print(self, filename):", "body": "log.info('<STR_LIT>' + filename)<EOL>res = self.__exchange(PRINT_FILE.format(filename=filename))<EOL>log.info(res)<EOL>return res<EOL>", "docstring": "Prints a file on the device to console", "id": "f15366:c0:m22"}
{"signature": "def __got_ack(self):", "body": "log.debug('<STR_LIT>')<EOL>res = self._port.read(<NUM_LIT:1>)<EOL>log.debug('<STR_LIT>', hexify(res))<EOL>return res == ACK<EOL>", "docstring": "Returns true if ACK is received", "id": "f15366:c0:m15"}
{"signature": "def __set_baudrate(self, baud):", "body": "log.info('<STR_LIT>', baud)<EOL>self.__writeln(UART_SETUP.format(baud=baud))<EOL>time.sleep(<NUM_LIT:0.1>)<EOL>try:<EOL><INDENT>self._port.setBaudrate(baud)<EOL><DEDENT>except AttributeError:<EOL><INDENT>self._port.baudrate = baud<EOL><DEDENT>", "docstring": "setting baudrate if supported", "id": "f15366:c0:m1"}
{"signature": "def exec_file(self, path):", "body": "filename = os.path.basename(path)<EOL>log.info('<STR_LIT>', filename)<EOL>content = from_file(path).replace('<STR_LIT:\\r>', '<STR_LIT>').split('<STR_LIT:\\n>')<EOL>res = '<STR_LIT>'<EOL>for line in content:<EOL><INDENT>line = line.rstrip('<STR_LIT:\\n>')<EOL>retlines = (res + self.__exchange(line)).splitlines()<EOL>res = retlines.pop()<EOL>for lin in retlines:<EOL><INDENT>log.info(lin)<EOL><DEDENT><DEDENT>log.info(res)<EOL>", "docstring": "execute the lines in the local file 'path", "id": "f15366:c0:m14"}
{"signature": "def verify_file(self, path, destination, verify='<STR_LIT:none>'):", "body": "content = from_file(path)<EOL>log.info('<STR_LIT>' % verify)<EOL>if verify == '<STR_LIT>':<EOL><INDENT>data = self.download_file(destination)<EOL>if content != data:<EOL><INDENT>log.error('<STR_LIT>')<EOL>raise VerificationError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>log.info('<STR_LIT>')<EOL><DEDENT><DEDENT>elif verify == '<STR_LIT>':<EOL><INDENT>data = self.__exchange('<STR_LIT>'+destination+'<STR_LIT>').splitlines()[<NUM_LIT:1>]<EOL>log.info('<STR_LIT>', data)<EOL>filehashhex = hashlib.sha1(content.encode(ENCODING)).hexdigest()<EOL>log.info('<STR_LIT>', filehashhex)<EOL>if data != filehashhex:<EOL><INDENT>log.error('<STR_LIT>')<EOL>raise VerificationError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>log.info('<STR_LIT>')<EOL><DEDENT><DEDENT>elif verify != '<STR_LIT:none>':<EOL><INDENT>raise Exception(verify + '<STR_LIT>')<EOL><DEDENT>", "docstring": "Tries to verify if path has same checksum as destination.\n            Valid options for verify is 'raw', 'sha1' or 'none'", "id": "f15366:c0:m13"}
{"signature": "def file_compile(self, path):", "body": "log.info('<STR_LIT>'+path)<EOL>cmd = '<STR_LIT>' % path<EOL>res = self.__exchange(cmd)<EOL>log.info(res)<EOL>return res<EOL>", "docstring": "Compiles a file specified by path on the device", "id": "f15366:c0:m25"}
{"signature": "def operation_download(uploader, sources):", "body": "sources, destinations = destination_from_source(sources, False)<EOL>print('<STR_LIT>', sources)<EOL>print('<STR_LIT>', destinations)<EOL>if len(destinations) == len(sources):<EOL><INDENT>if uploader.prepare():<EOL><INDENT>for filename, dst in zip(sources, destinations):<EOL><INDENT>uploader.read_file(filename, dst)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>log.info('<STR_LIT>')<EOL>", "docstring": "The download operation", "id": "f15367:m2"}
{"signature": "def arg_auto_int(value):", "body": "return int(value, <NUM_LIT:0>)<EOL>", "docstring": "parsing function for integer arguments", "id": "f15367:m5"}
{"signature": "def operation_file(uploader, cmd, filename='<STR_LIT>'):", "body": "if cmd == '<STR_LIT:list>':<EOL><INDENT>operation_list(uploader)<EOL><DEDENT>if cmd == '<STR_LIT>':<EOL><INDENT>for path in filename:<EOL><INDENT>uploader.file_do(path)<EOL><DEDENT><DEDENT>elif cmd == '<STR_LIT>':<EOL><INDENT>uploader.file_format()<EOL><DEDENT>elif cmd == '<STR_LIT>':<EOL><INDENT>for path in filename:<EOL><INDENT>uploader.file_remove(path)<EOL><DEDENT><DEDENT>elif cmd == '<STR_LIT>':<EOL><INDENT>for path in filename:<EOL><INDENT>uploader.file_print(path)<EOL><DEDENT><DEDENT>", "docstring": "File operations", "id": "f15367:m4"}
{"signature": "def operation_upload(uploader, sources, verify, do_compile, do_file, do_restart):", "body": "sources, destinations = destination_from_source(sources)<EOL>if len(destinations) == len(sources):<EOL><INDENT>if uploader.prepare():<EOL><INDENT>for filename, dst in zip(sources, destinations):<EOL><INDENT>if do_compile:<EOL><INDENT>uploader.file_remove(os.path.splitext(dst)[<NUM_LIT:0>]+'<STR_LIT>')<EOL><DEDENT>uploader.write_file(filename, dst, verify)<EOL>if do_compile and dst != '<STR_LIT>':<EOL><INDENT>uploader.file_compile(dst)<EOL>uploader.file_remove(dst)<EOL>if do_file:<EOL><INDENT>uploader.file_do(os.path.splitext(dst)[<NUM_LIT:0>]+'<STR_LIT>')<EOL><DEDENT><DEDENT>elif do_file:<EOL><INDENT>uploader.file_do(dst)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if do_restart:<EOL><INDENT>uploader.node_restart()<EOL><DEDENT>log.info('<STR_LIT>')<EOL>", "docstring": "The upload operation", "id": "f15367:m1"}
{"signature": "def can_remove(self):", "body": "if self.children.count() == <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>ancestors = set(self.ancestors_root())<EOL>children = set(self.children.all())<EOL>return children.issubset(ancestors)<EOL>", "docstring": "Returns True if it is legal to remove this node and still leave the\n        graph as a single connected entity, not splitting it into a forest.\n        Only nodes with no children or those who cause a cycle can be deleted.", "id": "f15383:c2:m11"}
{"signature": "def next_state(self, rule=None):", "body": "num_kids = self.current_node.children.count()<EOL>next_node = None<EOL>if num_kids == <NUM_LIT:0>:<EOL><INDENT>raise AttributeError('<STR_LIT>' % (<EOL>self.flow.id))<EOL><DEDENT>elif num_kids == <NUM_LIT:1>:<EOL><INDENT>next_node = self.current_node.children.first()<EOL><DEDENT>else:<EOL><INDENT>if not rule:<EOL><INDENT>raise AttributeError(('<STR_LIT>'<EOL>'<STR_LIT>') % self.current_node.data.rule_name)<EOL><DEDENT>for node in self.current_node.children.all():<EOL><INDENT>if node.data.rule_label == rule.class_label:<EOL><INDENT>next_node = node<EOL>break<EOL><DEDENT><DEDENT>if not next_node:<EOL><INDENT>raise AttributeError(('<STR_LIT>'<EOL>'<STR_LIT>') % (<EOL>self.current_node.data.rule_name))<EOL><DEDENT><DEDENT>self.current_node.data.rule.on_leave(self)<EOL>next_node.data.rule.on_enter(self)<EOL>self.current_node = next_node<EOL>self.save()<EOL>", "docstring": "Proceeds to the next step in the flow.  Calls the associated\n        :func:`Rule.on_leave` method for the for the current rule and the\n        :func:`Rule.on_enter` for the rule being entered.  If the current step\n        in the flow is multipath then a valid :class:`Rule` subclass must be\n        passed into this call.  \n\n        If there is only one possible path in the flow and a :class:`Rule` is\n        given it will be ignored.\n\n        :param rule: \n            if the current :class:`Rule` in the :class:`Flow` is multipath\n            then the next :class:`Rule` in the flow must be provided.", "id": "f15383:c9:m2"}
{"signature": "@classmethod<EOL><INDENT>@transaction.atomic<EOL>def factory_from_graph(cls, data_class, root_args, children):<DEDENT>", "body": "graph = cls.factory(data_class, **root_args)<EOL>for child in children:<EOL><INDENT>cls._depth_create(graph.root, child[<NUM_LIT:0>], child[<NUM_LIT:1>])<EOL><DEDENT>return graph<EOL>", "docstring": "Creates a ``DCCGraph`` and corresponding nodes.  The root_args parm\n        is a dictionary specifying the parameters for creating a :class:`Node`\n        and its corresponding :class:`BaseNodeData` subclass from the\n        data_class specified.  The children parm is an iterable containing\n        pairs of dictionaries and iterables, where the dictionaries specify\n        the parameters for a :class:`BaseNodeData` subclass and the iterable\n        the list of children.\n\n        Example::\n\n            DCCGraph.factory_from_graph(Label, \n                {'name':'A'}, [\n                    ({'name':'B', []),\n                    ({'name':'C', [])\n                ])\n\n        creates the graph::\n\n                 A\n                / \\\n               B   C\n\n        :param data_class: \n            django model class that extends :class:`BaseNodeData` and is used\n            to associate information with the Nodes in this graph\n        :param root_args: \n            dictionary of arguments to pass to constructor of the data class\n            instance that is to be created for the root node\n        :param children: \n            iterable with a list of dictionary and iterable pairs\n        :returns: \n            instance of the newly created ``DCCGraph``", "id": "f15383:c0:m3"}
{"signature": "@property<EOL><INDENT>def root_data(self):<DEDENT>", "body": "return self.state_graph.root.data<EOL>", "docstring": "Returns the :class:`FlowDataNode` object for the root node in the\n        graph.", "id": "f15383:c8:m3"}
{"signature": "def add_child_rule(self, child_rule):", "body": "self._child_allowed(child_rule)<EOL>child_node = self.node.add_child(rule_label=child_rule.class_label)<EOL>return child_node.data<EOL>", "docstring": "Add a child path in the :class:`Flow` graph using the given \n        :class:`Rule` subclass.  This will create a new child :class:`Node` in\n        the associated :class:`Flow` object's state graph with a new\n        :class:`FlowNodeData` instance attached.\n\n        The :class:`Rule` must be allowed at this stage of the flow according\n        to the hierarchy of rules.\n\n        :param child_rule: \n            :class:`Rule` class to add to the flow as a child of :class:`Node`\n            that this object owns\n        :returns: \n            ``FlowNodeData`` that was added", "id": "f15383:c7:m3"}
{"signature": "def prune_list(self):", "body": "targets = self.descendents_root()<EOL>try:<EOL><INDENT>targets.remove(self.graph.root)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>targets.append(self)<EOL>return targets<EOL>", "docstring": "Returns a list of nodes that would be removed if prune were called\n        on this element.", "id": "f15383:c2:m14"}
{"signature": "@classmethod<EOL><INDENT>def on_leave(cls, state):<DEDENT>", "body": "pass<EOL>", "docstring": "Called when a state corresponding to this ``Rule`` is left.", "id": "f15383:c5:m1"}
{"signature": "@classmethod<EOL><INDENT>def on_enter(cls, state):<DEDENT>", "body": "pass<EOL>", "docstring": "Called when a state corresponding to this ``Rule`` is entered.", "id": "f15383:c5:m0"}
{"signature": "def _child_allowed(self, child_rule):", "body": "num_kids = self.node.children.count()<EOL>num_kids_allowed = len(self.rule.children)<EOL>if not self.rule.multiple_paths:<EOL><INDENT>num_kids_allowed = <NUM_LIT:1><EOL><DEDENT>if num_kids >= num_kids_allowed:<EOL><INDENT>raise AttributeError('<STR_LIT>' % (<EOL>self.rule_name, self.num_kids_allowed))<EOL><DEDENT>for node in self.node.children.all():<EOL><INDENT>if node.data.rule_label == child_rule.class_label:<EOL><INDENT>raise AttributeError('<STR_LIT>')<EOL><DEDENT><DEDENT>if child_rule not in self.rule.children:<EOL><INDENT>raise AttributeError('<STR_LIT>' % (<EOL>child_rule.__name__, self.rule_name))<EOL><DEDENT>", "docstring": "Called to verify that the given rule can become a child of the\n        current node.  \n\n        :raises AttributeError: \n            if the child is not allowed", "id": "f15383:c7:m2"}
{"signature": "def ancestors_root(self):", "body": "if self.is_root():<EOL><INDENT>return []<EOL><DEDENT>ancestors = set([])<EOL>self._depth_ascend(self, ancestors, True)<EOL>try:<EOL><INDENT>ancestors.remove(self)<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>return list(ancestors)<EOL>", "docstring": "Returns a list of the ancestors of this node but does not pass the\n        root node, even if the root has parents due to cycles.", "id": "f15383:c2:m7"}
{"signature": "def connect_child(self, child_node):", "body": "self._child_allowed(child_node.rule)<EOL>self.node.connect_child(child_node.node)<EOL>", "docstring": "Adds a connection to an existing rule in the :class`Flow` graph.\n        The given :class`Rule` subclass must be allowed to be connected at\n        this stage of the flow according to the hierarchy of rules.\n\n        :param child_node: \n            ``FlowNodeData`` to attach as a child", "id": "f15383:c7:m4"}
{"signature": "def remove(self):", "body": "if not self.can_remove():<EOL><INDENT>raise AttributeError('<STR_LIT>')<EOL><DEDENT>data = self.data<EOL>self.parents.remove(self)<EOL>self.delete()<EOL>return data<EOL>", "docstring": "Removes the node from the graph.  Note this does not remove the\n        associated data object.  See :func:`Node.can_remove` for limitations\n        on what can be deleted.\n\n        :returns: \n            :class:`BaseNodeData` subclass associated with the deleted Node\n\n        :raises AttributeError:\n           if called on a ``Node`` that cannot be deleted", "id": "f15383:c2:m12"}
{"signature": "def prune(self):", "body": "targets = self.descendents_root()<EOL>try:<EOL><INDENT>targets.remove(self.graph.root)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>results = [n.data for n in targets]<EOL>results.append(self.data)<EOL>for node in targets:<EOL><INDENT>node.delete()<EOL><DEDENT>for parent in self.parents.all():<EOL><INDENT>parent.children.remove(self)<EOL><DEDENT>self.delete()<EOL>return results<EOL>", "docstring": "Removes the node and all descendents without looping back past the\n        root.  Note this does not remove the associated data objects.\n\n        :returns:\n            list of :class:`BaseDataNode` subclassers associated with the\n            removed ``Node`` objects.", "id": "f15383:c2:m13"}
{"signature": "@classmethod<EOL><INDENT>@transaction.atomic<EOL>def factory(cls, data_class, **kwargs):<DEDENT>", "body": "if not issubclass(data_class, BaseNodeData):<EOL><INDENT>raise AttributeError('<STR_LIT>')<EOL><DEDENT>content_type = ContentType.objects.get_for_model(data_class)<EOL>graph = DCCGraph.objects.create(data_content_type=content_type)<EOL>node = Node.objects.create(graph=graph)<EOL>data_class.objects.create(node=node, **kwargs)<EOL>graph.root = node<EOL>graph.save()<EOL>return graph<EOL>", "docstring": "Creates a ``DCCGraph``, a root :class:`Node`: and the node's\n        associated data class instance.  This factory is used to get around\n        the chicken-and-egg problem of the ``DCCGraph`` and ``Nodes`` within\n        it having pointers to each other.\n\n        :param data_class: \n            django model class that extends :class:`BaseNodeData` and is used\n            to associate information with the nodes in this graph\n        :param kwargs: \n            arguments to pass to constructor of the data class instance that\n            is to be created for the root node\n        :returns: \n            instance of the newly created ``DCCGraph``", "id": "f15383:c0:m1"}
{"signature": "async def async_close(self):", "body": "if self._async_lock is None:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>async with self._async_lock:<EOL><INDENT>self.close()<EOL><DEDENT>", "docstring": "Close the current connection asyncrounosly.", "id": "f15392:c1:m2"}
{"signature": "def init_async(self, loop=None):", "body": "self._loop = loop or asyncio.get_event_loop()<EOL>self._async_lock = asyncio.Lock(loop=loop)<EOL>if not self.database == '<STR_LIT>':<EOL><INDENT>self._state = ConnectionLocal()<EOL><DEDENT>", "docstring": "Use when application is starting.", "id": "f15392:c1:m0"}
{"signature": "@property<EOL><INDENT>def __current__(self):<DEDENT>", "body": "loop = asyncio.get_event_loop()<EOL>if not loop or not loop.is_running():<EOL><INDENT>return THREADING_LOCAL<EOL><DEDENT>task = asyncio.Task.current_task(loop=loop)<EOL>if not task:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if not hasattr(task, '<STR_LIT>'):<EOL><INDENT>task._locals = lambda: None<EOL><DEDENT>return task._locals<EOL>", "docstring": "Create namespace inside running task.", "id": "f15392:c0:m3"}
{"signature": "async def async_connect(self):", "body": "if self._waiters is None:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if self._waiters or self.max_connections and (len(self._in_use) >= self.max_connections):<EOL><INDENT>waiter = asyncio.Future(loop=self._loop)<EOL>self._waiters.append(waiter)<EOL>try:<EOL><INDENT>logger.debug('<STR_LIT>')<EOL>await waiter<EOL><DEDENT>finally:<EOL><INDENT>self._waiters.remove(waiter)<EOL><DEDENT><DEDENT>self.connect()<EOL>return self._state.conn<EOL>", "docstring": "Asyncronously wait for a connection from the pool.", "id": "f15392:c2:m1"}
{"signature": "def render_vars(self):", "body": "return {<EOL>'<STR_LIT>': [<EOL>{<EOL>'<STR_LIT:message>': record.getMessage(),<EOL>'<STR_LIT:time>': dt.datetime.fromtimestamp(record.created).strftime('<STR_LIT>'),<EOL>} for record in self.handler.records<EOL>]<EOL>}<EOL>", "docstring": "Template variables.", "id": "f15393:c0:m4"}
{"signature": "@property<EOL><INDENT>def nav_title(self):<DEDENT>", "body": "return \"<STR_LIT>\" % (self.title, len(self.handler.records))<EOL>", "docstring": "Get a navigation title.", "id": "f15393:c0:m2"}
{"signature": "def __init__(self, app, request=None):", "body": "super(DebugPanel, self).__init__(app, request)<EOL>LOGGER.setLevel(logging.DEBUG)<EOL>self.handler = LoggingTrackingHandler()<EOL>", "docstring": "Initialize the panel.", "id": "f15393:c0:m0"}
{"signature": "def __init__(self, dumps=None, loads=None, *args, **kwargs):", "body": "self.dumps = dumps or ujson.dumps<EOL>self.loads = loads or ujson.loads<EOL>super(JSONField, self).__init__(*args, **kwargs)<EOL>", "docstring": "Initialize the serializer.", "id": "f15394:c0:m0"}
{"signature": "@cached_property<EOL><INDENT>def field_type(self):<DEDENT>", "body": "if not self.model:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>database = self.model._meta.database<EOL>if isinstance(database, Proxy):<EOL><INDENT>database = database.obj<EOL><DEDENT>if Json and isinstance(database, PostgresqlDatabase):<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Return database field type.", "id": "f15394:c0:m1"}
{"signature": "def python_value(self, value):", "body": "if self.field_type == '<STR_LIT>' and isinstance(value, str):<EOL><INDENT>return self.loads(value)<EOL><DEDENT>return value<EOL>", "docstring": "Parse value from database.", "id": "f15394:c0:m3"}
{"signature": "def __iter__(self):", "body": "return iter(self._choices)<EOL>", "docstring": "Iterate self.", "id": "f15396:c0:m2"}
{"signature": "async def manage(self):", "body": "cm = _ContextManager(self.database)<EOL>if isinstance(self.database.obj, AIODatabase):<EOL><INDENT>cm.connection = await self.database.async_connect()<EOL><DEDENT>else:<EOL><INDENT>cm.connection = self.database.connect()<EOL><DEDENT>return cm<EOL>", "docstring": "Manage a database connection.", "id": "f15397:c0:m6"}
{"signature": "def cleanup(self, app):", "body": "if hasattr(self.database.obj, '<STR_LIT>'):<EOL><INDENT>self.database.close_all()<EOL><DEDENT>", "docstring": "Close all connections.", "id": "f15397:c0:m3"}
{"signature": "def register(self, model):", "body": "self.models[model._meta.table_name] = model<EOL>model._meta.database = self.database<EOL>return model<EOL>", "docstring": "Register a model in self.", "id": "f15397:c0:m5"}
{"signature": "@app.register('<STR_LIT>')<EOL>def clean(request):", "body": "models.DataItem.delete().execute()<EOL>return muffin.HTTPFound('<STR_LIT:/>')<EOL>", "docstring": "Create a new DataItem.", "id": "f15400:m2"}
{"signature": "@app.register('<STR_LIT>')<EOL>def generate(request):", "body": "models.DataItem.create(<EOL>content='<STR_LIT>'.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(<NUM_LIT:20>))<EOL>)<EOL>return muffin.HTTPFound('<STR_LIT:/>')<EOL>", "docstring": "Create a new DataItem.", "id": "f15400:m1"}
{"signature": "def add_prefix(multicodec, bytes_):", "body": "prefix = get_prefix(multicodec)<EOL>return b'<STR_LIT>'.join([prefix, bytes_])<EOL>", "docstring": "Adds multicodec prefix to the given bytes input\n\n:param str multicodec: multicodec to use for prefixing\n:param bytes bytes_: data to prefix\n:return: prefixed byte data\n:rtype: bytes", "id": "f15407:m2"}
{"signature": "def extract_prefix(bytes_):", "body": "try:<EOL><INDENT>return varint.decode_bytes(bytes_)<EOL><DEDENT>except TypeError:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Extracts the prefix from multicodec prefixed data\n\n:param bytes bytes_: multicodec prefixed data\n:return: prefix for the prefixed data\n:rtype: bytes\n:raises ValueError: when incorrect varint is provided", "id": "f15407:m0"}
{"signature": "def remove_prefix(bytes_):", "body": "prefix_int = extract_prefix(bytes_)<EOL>prefix = varint.encode(prefix_int)<EOL>return bytes_[len(prefix):]<EOL>", "docstring": "Removes prefix from a prefixed data\n\n:param bytes bytes_: multicodec prefixed data bytes\n:return: prefix removed data bytes\n:rtype: bytes", "id": "f15407:m3"}
{"signature": "def load_key(pubkey):", "body": "try:<EOL><INDENT>return load_pem_public_key(pubkey.encode(), default_backend())<EOL><DEDENT>except ValueError:<EOL><INDENT>pubkey = pubkey.replace('<STR_LIT>', '<STR_LIT>').replace('<STR_LIT>', '<STR_LIT>')<EOL>return load_pem_public_key(pubkey.encode(), default_backend())<EOL><DEDENT>", "docstring": "Load public RSA key.\n\n    Work around keys with incorrect header/footer format.\n\n    Read more about RSA encryption with cryptography:\n    https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/", "id": "f15410:m0"}
{"signature": "def encrypt(pubkey, password):", "body": "key = load_key(pubkey)<EOL>encrypted_password = key.encrypt(password, PKCS1v15())<EOL>return base64.b64encode(encrypted_password)<EOL>", "docstring": "Encrypt password using given RSA public key and encode it with base64.\n\n    The encrypted password can only be decrypted by someone with the\n    private key (in this case, only Travis).", "id": "f15410:m1"}
{"signature": "def update_travis_deploy_password(encrypted_password):", "body": "config = load_yaml_config(TRAVIS_CONFIG_FILE)<EOL>config['<STR_LIT>']['<STR_LIT:password>'] = dict(secure=encrypted_password)<EOL>save_yaml_config(TRAVIS_CONFIG_FILE, config)<EOL>line = ('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>prepend_line(TRAVIS_CONFIG_FILE, line)<EOL>", "docstring": "Put `encrypted_password` into the deploy section of .travis.yml.", "id": "f15410:m6"}
{"signature": "def __init__(self, secret_key, region, service, date=None,<EOL>store_secret_key=True):", "body": "self.region = region<EOL>self.service = service<EOL>self.date = date or datetime.utcnow().strftime('<STR_LIT>')<EOL>self.scope = '<STR_LIT>'.format(<EOL>self.date,<EOL>self.region,<EOL>self.service)<EOL>self.store_secret_key = store_secret_key<EOL>self.secret_key = secret_key if self.store_secret_key else None<EOL>self.key = self.generate_key(secret_key, self.region,<EOL>self.service, self.date)<EOL>", "docstring": ">>> AWS4SigningKey(secret_key, region, service[, date]\n...                [, store_secret_key])\n\nsecret_key -- This is your AWS secret access key\nregion     -- The region you're connecting to, as per list at\n              http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region\n              e.g. us-east-1. For services which don't require a\n              region (e.g. IAM), use us-east-1.\nservice    -- The name of the service you're connecting to, as per\n              endpoints at:\n              http://docs.aws.amazon.com/general/latest/gr/rande.html\n              e.g. elasticbeanstalk\ndate       -- 8-digit date of the form YYYYMMDD. Key is only valid for\n              requests with a Date or X-Amz-Date header matching this\n              date. If date is not supplied the current date is\n              used.\nstore_secret_key\n           -- Whether the secret key is stored in the instance. By\n              default this is True, meaning the key is stored in\n              the secret_key property and is available to any\n              code the instance is passed to. Having the secret\n              key retained makes it easier to regenerate the key\n              if a scope parameter changes (usually the date).\n              This is used by the AWS4Auth class to perform its\n              automatic key updates when a request date/scope date\n              mismatch is encountered.\n\n              If you are passing instances to untrusted code you can\n              set this to False. This will cause the secret key to be\n              discarded as soon as the signing key has been generated.\n              Note though that you will need to manually regenerate\n              keys when needed (or if you use the regenerate_key()\n              method on an AWS4Auth instance you will need to pass it\n              the secret key).\n\nAll arguments should be supplied as strings.", "id": "f15413:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def get_canonical_headers(cls, req, include=None):<DEDENT>", "body": "if include is None:<EOL><INDENT>include = cls.default_include_headers<EOL><DEDENT>include = [x.lower() for x in include]<EOL>headers = req.headers.copy()<EOL>if '<STR_LIT:host>' not in headers:<EOL><INDENT>headers['<STR_LIT:host>'] = urlparse(req.url).netloc.split('<STR_LIT::>')[<NUM_LIT:0>]<EOL><DEDENT>cano_headers_dict = {}<EOL>for hdr, val in headers.items():<EOL><INDENT>hdr = hdr.strip().lower()<EOL>val = cls.amz_norm_whitespace(val).strip()<EOL>if (hdr in include or '<STR_LIT:*>' in include or<EOL>('<STR_LIT>' in include and hdr.startswith('<STR_LIT>') and not<EOL>hdr == '<STR_LIT>')):<EOL><INDENT>vals = cano_headers_dict.setdefault(hdr, [])<EOL>vals.append(val)<EOL><DEDENT><DEDENT>cano_headers = '<STR_LIT>'<EOL>signed_headers_list = []<EOL>for hdr in sorted(cano_headers_dict):<EOL><INDENT>vals = cano_headers_dict[hdr]<EOL>val = '<STR_LIT:U+002C>'.join(sorted(vals))<EOL>cano_headers += '<STR_LIT>'.format(hdr, val)<EOL>signed_headers_list.append(hdr)<EOL><DEDENT>signed_headers = '<STR_LIT:;>'.join(signed_headers_list)<EOL>return (cano_headers, signed_headers)<EOL>", "docstring": "Generate the Canonical Headers section of the Canonical Request.\n\nReturn the Canonical Headers and the Signed Headers strs as a tuple\n(canonical_headers, signed_headers).\n\nreq     -- Requests PreparedRequest object\ninclude -- List of headers to include in the canonical and signed\n           headers. It's primarily included to allow testing against\n           specific examples from Amazon. If omitted or None it\n           includes host, content-type and any header starting 'x-amz-'\n           except for x-amz-client context, which appears to break\n           mobile analytics auth if included. Except for the\n           x-amz-client-context exclusion these defaults are per the\n           AWS documentation.", "id": "f15416:c0:m8"}
{"signature": "def handle_date_mismatch(self, req):", "body": "raise DateMismatchError<EOL>", "docstring": "Handle a request whose date doesn't match the signing key process, by\nraising a DateMismatchError.\n\nOverrides the default behaviour of AWS4Auth where the signing key\nis automatically regenerated to match the request date\n\nTo update the signing key if this is hit, call\nStrictAWS4Auth.regenerate_signing_key().", "id": "f15416:c1:m0"}
{"signature": "def get_canonical_request(self, req, cano_headers, signed_headers):", "body": "url = urlparse(req.url)<EOL>path = self.amz_cano_path(url.path)<EOL>split = req.url.split('<STR_LIT:?>', <NUM_LIT:1>)<EOL>qs = split[<NUM_LIT:1>] if len(split) == <NUM_LIT:2> else '<STR_LIT>'<EOL>qs = self.amz_cano_querystring(qs)<EOL>payload_hash = req.headers['<STR_LIT>']<EOL>req_parts = [req.method.upper(), path, qs, cano_headers,<EOL>signed_headers, payload_hash]<EOL>cano_req = '<STR_LIT:\\n>'.join(req_parts)<EOL>return cano_req<EOL>", "docstring": "Create the AWS authentication Canonical Request string.\n\nreq            -- Requests PreparedRequest object. Should already\n                  include an x-amz-content-sha256 header\ncano_headers   -- Canonical Headers section of Canonical Request, as\n                  returned by get_canonical_headers()\nsigned_headers -- Signed Headers, as returned by\n                  get_canonical_headers()", "id": "f15416:c0:m7"}
{"signature": "@staticmethod<EOL><INDENT>def amz_norm_whitespace(text):<DEDENT>", "body": "return '<STR_LIT:U+0020>'.join(shlex.split(text, posix=False))<EOL>", "docstring": "Replace runs of whitespace with a single space.\n\nIgnore text enclosed in quotes.", "id": "f15416:c0:m12"}
{"signature": "@staticmethod<EOL><INDENT>def parse_date(date_str):<DEDENT>", "body": "months = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>formats = {<EOL>r'<STR_LIT>':<EOL>lambda m: '<STR_LIT>'.format(<EOL>m.group(<NUM_LIT:3>),<EOL>months.index(m.group(<NUM_LIT:2>).lower())+<NUM_LIT:1>,<EOL>m.group(<NUM_LIT:1>)),<EOL>r'<STR_LIT>':<EOL>lambda m: '<STR_LIT>'.format(<EOL>str(datetime.date.today().year)[:<NUM_LIT:2>],<EOL>m.group(<NUM_LIT:3>),<EOL>months.index(m.group(<NUM_LIT:2>).lower())+<NUM_LIT:1>,<EOL>m.group(<NUM_LIT:1>)),<EOL>r'<STR_LIT>':<EOL>lambda m: '<STR_LIT>'.format(<EOL>m.group(<NUM_LIT:3>),<EOL>months.index(m.group(<NUM_LIT:1>).lower())+<NUM_LIT:1>,<EOL>int(m.group(<NUM_LIT:2>))),<EOL>r'<STR_LIT>':<EOL>lambda m: '<STR_LIT>'.format(*m.groups()),<EOL>r'<STR_LIT>':<EOL>lambda m: m.group(<NUM_LIT:1>),<EOL>}<EOL>out_date = None<EOL>for regex, xform in formats.items():<EOL><INDENT>m = re.search(regex, date_str)<EOL>if m:<EOL><INDENT>out_date = xform(m)<EOL>break<EOL><DEDENT><DEDENT>if out_date is None:<EOL><INDENT>raise DateFormatError<EOL><DEDENT>else:<EOL><INDENT>return out_date<EOL><DEDENT>", "docstring": "Check if date_str is in a recognised format and return an ISO\nyyyy-mm-dd format version if so. Raise DateFormatError if not.\n\nRecognised formats are:\n* RFC 7231 (e.g. Mon, 09 Sep 2011 23:36:00 GMT)\n* RFC 850 (e.g. Sunday, 06-Nov-94 08:49:37 GMT)\n* C time (e.g. Wed Dec 4 00:00:00 2002)\n* Amz-Date format (e.g. 20090325T010101Z)\n* ISO 8601 / RFC 3339 (e.g. 2009-03-25T10:11:12.13-01:00)\n\ndate_str -- Str containing a date and optional time", "id": "f15416:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def amz_cano_querystring(qs):<DEDENT>", "body": "safe_qs_amz_chars = '<STR_LIT>'<EOL>safe_qs_unresvd = '<STR_LIT>'<EOL>if PY2:<EOL><INDENT>qs = qs.encode('<STR_LIT:utf-8>')<EOL>safe_qs_amz_chars = safe_qs_amz_chars.encode()<EOL>safe_qs_unresvd = safe_qs_unresvd.encode()<EOL><DEDENT>qs = unquote(qs)<EOL>space = b'<STR_LIT:U+0020>' if PY2 else '<STR_LIT:U+0020>'<EOL>qs = qs.split(space)[<NUM_LIT:0>]<EOL>qs = quote(qs, safe=safe_qs_amz_chars)<EOL>qs_items = {}<EOL>for name, vals in parse_qs(qs, keep_blank_values=True).items():<EOL><INDENT>name = quote(name, safe=safe_qs_unresvd)<EOL>vals = [quote(val, safe=safe_qs_unresvd) for val in vals]<EOL>qs_items[name] = vals<EOL><DEDENT>qs_strings = []<EOL>for name, vals in qs_items.items():<EOL><INDENT>for val in vals:<EOL><INDENT>qs_strings.append('<STR_LIT:=>'.join([name, val]))<EOL><DEDENT><DEDENT>qs = '<STR_LIT:&>'.join(sorted(qs_strings))<EOL>if PY2:<EOL><INDENT>qs = unicode(qs)<EOL><DEDENT>return qs<EOL>", "docstring": "Parse and format querystring as per AWS4 auth requirements.\n\nPerform percent quoting as needed.\n\nqs -- querystring", "id": "f15416:c0:m11"}
{"signature": "@staticmethod<EOL><INDENT>def encode_body(req):<DEDENT>", "body": "if isinstance(req.body, text_type):<EOL><INDENT>split = req.headers.get('<STR_LIT>', '<STR_LIT>').split('<STR_LIT:;>')<EOL>if len(split) == <NUM_LIT:2>:<EOL><INDENT>ct, cs = split<EOL>cs = cs.split('<STR_LIT:=>')[<NUM_LIT:1>]<EOL>req.body = req.body.encode(cs)<EOL><DEDENT>else:<EOL><INDENT>ct = split[<NUM_LIT:0>]<EOL>if (ct == '<STR_LIT>' or<EOL>'<STR_LIT>' in ct):<EOL><INDENT>req.body = req.body.encode()<EOL><DEDENT>else:<EOL><INDENT>req.body = req.body.encode('<STR_LIT:utf-8>')<EOL>req.headers['<STR_LIT>'] = ct + '<STR_LIT>'<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Encode body of request to bytes and update content-type if required.\n\nIf the body of req is Unicode then encode to the charset found in\ncontent-type header if present, otherwise UTF-8, or ASCII if\ncontent-type is application/x-www-form-urlencoded. If encoding to UTF-8\nthen add charset to content-type. Modifies req directly, does not\nreturn a modified copy.\n\nreq -- Requests PreparedRequest object", "id": "f15416:c0:m6"}
{"signature": "def compact_name(self, hashsize=<NUM_LIT:6>):", "body": "<EOL>s = self.compact_name_core(hashsize, t_max=True)<EOL>s += \"<STR_LIT>\" % (self.ID, self.EID)<EOL>return s<EOL>", "docstring": "Compact representation of all simulation parameters", "id": "f15419:c5:m10"}
{"signature": "def get_seed(seed, ID=<NUM_LIT:0>, EID=<NUM_LIT:0>):", "body": "return seed + EID + <NUM_LIT:100> * ID<EOL>", "docstring": "Get a random seed that is a combination of `seed`, `ID` and `EID`.\n    Provides different, but deterministic, seeds in parallel computations", "id": "f15419:m0"}
{"signature": "def simulate_timestamps_mix_da_online(self, max_rates_d, max_rates_a,<EOL>populations, bg_rate_d, bg_rate_a,<EOL>rs=None, seed=<NUM_LIT:1>, chunksize=<NUM_LIT:2>**<NUM_LIT:16>,<EOL>comp_filter=None, overwrite=False,<EOL>skip_existing=False, scale=<NUM_LIT:10>,<EOL>path=None, t_chunksize=<NUM_LIT:2>**<NUM_LIT>,<EOL>timeslice=None):", "body": "self.open_store_timestamp(chunksize=chunksize, path=path)<EOL>rs = self._get_group_randomstate(rs, seed, self.ts_group)<EOL>if t_chunksize is None:<EOL><INDENT>t_chunksize = <NUM_LIT:2>**<NUM_LIT><EOL><DEDENT>timeslice_size = self.n_samples<EOL>if timeslice is not None:<EOL><INDENT>timeslice_size = timeslice // self.t_step<EOL><DEDENT>name_d = self._get_ts_name_mix(max_rates_d, populations, bg_rate_d, rs)<EOL>name_a = self._get_ts_name_mix(max_rates_a, populations, bg_rate_a, rs)<EOL>kw = dict(clk_p=self.t_step / scale,<EOL>populations=populations,<EOL>num_particles=self.num_particles,<EOL>bg_particle=self.num_particles,<EOL>overwrite=overwrite, chunksize=chunksize)<EOL>if comp_filter is not None:<EOL><INDENT>kw.update(comp_filter=comp_filter)<EOL><DEDENT>kw.update(name=name_d, max_rates=max_rates_d, bg_rate=bg_rate_d)<EOL>try:<EOL><INDENT>self._timestamps_d, self._tparticles_d = (self.ts_store<EOL>.add_timestamps(**kw))<EOL><DEDENT>except ExistingArrayError as e:<EOL><INDENT>if skip_existing:<EOL><INDENT>print('<STR_LIT>')<EOL>return<EOL><DEDENT>else:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT>kw.update(name=name_a, max_rates=max_rates_a, bg_rate=bg_rate_a)<EOL>try:<EOL><INDENT>self._timestamps_a, self._tparticles_a = (self.ts_store<EOL>.add_timestamps(**kw))<EOL><DEDENT>except ExistingArrayError as e:<EOL><INDENT>if skip_existing:<EOL><INDENT>print('<STR_LIT>')<EOL>return<EOL><DEDENT>else:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT>self.ts_group._v_attrs['<STR_LIT>'] = rs.get_state()<EOL>self.ts_group.attrs['<STR_LIT>'] = <NUM_LIT:1><EOL>self._timestamps_d.attrs['<STR_LIT>'] = rs.get_state()<EOL>self._timestamps_d.attrs['<STR_LIT>'] = __version__<EOL>self._timestamps_a.attrs['<STR_LIT>'] = __version__<EOL>print('<STR_LIT>' % ctime(), flush=True)<EOL>par_start_pos = self.particles.positions<EOL>bg_rates_d = [None] * (len(max_rates_d) - <NUM_LIT:1>) + [bg_rate_d]<EOL>bg_rates_a = [None] * (len(max_rates_a) - <NUM_LIT:1>) + [bg_rate_a]<EOL>prev_time = <NUM_LIT:0><EOL>for i_start, i_end in iter_chunk_index(timeslice_size, t_chunksize):<EOL><INDENT>curr_time = np.around(i_start * self.t_step, decimals=<NUM_LIT:1>)<EOL>if curr_time > prev_time:<EOL><INDENT>print('<STR_LIT>' % curr_time, end='<STR_LIT>', flush=True)<EOL>prev_time = curr_time<EOL><DEDENT>_, em_chunk = self._sim_trajectories(t_chunksize, par_start_pos,<EOL>rs,<EOL>total_emission=False,<EOL>save_pos=False, radial=False,<EOL>wrap_func=wrap_periodic)<EOL>times_chunk_s_d, par_index_chunk_s_d =self._sim_timestamps_populations(<EOL>em_chunk, max_rates_d, populations, bg_rates_d, i_start,<EOL>rs, scale)<EOL>times_chunk_s_a, par_index_chunk_s_a =self._sim_timestamps_populations(<EOL>em_chunk, max_rates_a, populations, bg_rates_a, i_start,<EOL>rs, scale)<EOL>self._timestamps_d.append(times_chunk_s_d)<EOL>self._tparticles_d.append(par_index_chunk_s_d)<EOL>self._timestamps_a.append(times_chunk_s_a)<EOL>self._tparticles_a.append(par_index_chunk_s_a)<EOL><DEDENT>self.ts_group._v_attrs['<STR_LIT>'] = rs.get_state()<EOL>self._timestamps_d._v_attrs['<STR_LIT>'] = rs.get_state()<EOL>self.ts_store.h5file.flush()<EOL>print('<STR_LIT>' % ctime(), flush=True)<EOL>", "docstring": "Compute D and A timestamps arrays for a mixture of N populations.\n\n        This method simulates the diffusion, emission and generates a pair\n        of timestamps arrays (e.g. donor and acceptor) all at the same time.\n        This method avoids saving the trajectories to disk.\n\n        Timestamp data are saved to disk and accessible as pytables arrays in\n        `._timestamps_d/a` and `._tparticles_d/a`.\n        The background generated timestamps are assigned a\n        conventional particle number (last particle index + 1).\n\n        Arguments:\n            max_rates (list): list of the peak max emission rate for each\n                population.\n            populations (list of slices): slices to `self.particles`\n                defining each population.\n            bg_rate (float, cps): rate for a Poisson background process\n            rs (RandomState object): random state object used as random number\n                generator. If None, use a random state initialized from seed.\n            seed (uint): when `rs` is None, `seed` is used to initialize the\n                random state, otherwise is ignored.\n            chunksize (int): chunk size used for the on-disk timestamp array\n            comp_filter (tables.Filter or None): compression filter to use\n                for the on-disk `timestamps` and `tparticles` arrays.\n                If None use default compression.\n            overwrite (bool): if True, overwrite any pre-existing timestamps\n                array. If False, never overwrite. The outcome of simulating an\n                existing array is controlled by `skip_existing` flag.\n            skip_existing (bool): if True, skip simulation if the same\n                timestamps array is already present.\n            scale (int): `self.t_step` is multiplied by `scale` to obtain the\n                timestamps units in seconds.\n            path (string): folder where to save the data.\n            timeslice (float or None): timestamps are simulated until\n                `timeslice` seconds. If None, simulate until `self.t_max`.", "id": "f15419:c5:m29"}
{"signature": "def concentration(self, pM=False):", "body": "concentr = (self.num_particles / NA) / self.box.volume_L<EOL>if pM:<EOL><INDENT>concentr *= <NUM_LIT><EOL><DEDENT>return concentr<EOL>", "docstring": "Return the concentration (in Moles) of the particles in the box.", "id": "f15419:c5:m13"}
{"signature": "@staticmethod<EOL><INDENT>def _generate(num_particles, D, box, rs):<DEDENT>", "body": "X0 = rs.rand(num_particles) * (box.x2 - box.x1) + box.x1<EOL>Y0 = rs.rand(num_particles) * (box.y2 - box.y1) + box.y1<EOL>Z0 = rs.rand(num_particles) * (box.z2 - box.z1) + box.z1<EOL>return [Particle(D=D, x0=x0, y0=y0, z0=z0)<EOL>for x0, y0, z0 in zip(X0, Y0, Z0)]<EOL>", "docstring": "Generate a list of `Particle` objects.", "id": "f15419:c2:m0"}
{"signature": "def _sim_timestamps(self, max_rate, bg_rate, emission, i_start, rs,<EOL>ip_start=<NUM_LIT:0>, scale=<NUM_LIT:10>, sort=True):", "body": "counts_chunk = sim_timetrace_bg(emission, max_rate, bg_rate,<EOL>self.t_step, rs=rs)<EOL>nrows = emission.shape[<NUM_LIT:0>]<EOL>if bg_rate is not None:<EOL><INDENT>nrows += <NUM_LIT:1><EOL><DEDENT>assert counts_chunk.shape == (nrows, emission.shape[<NUM_LIT:1>])<EOL>max_counts = counts_chunk.max()<EOL>if max_counts == <NUM_LIT:0>:<EOL><INDENT>return np.array([], dtype=np.int64), np.array([], dtype=np.int64)<EOL><DEDENT>time_start = i_start * scale<EOL>time_stop = time_start + counts_chunk.shape[<NUM_LIT:1>] * scale<EOL>ts_range = np.arange(time_start, time_stop, scale, dtype='<STR_LIT>')<EOL>times_chunk_p = []<EOL>par_index_chunk_p = []<EOL>for ip, counts_chunk_ip in enumerate(counts_chunk):<EOL><INDENT>times_c_ip = []<EOL>for v in range(<NUM_LIT:1>, max_counts + <NUM_LIT:1>):<EOL><INDENT>times_c_ip.append(ts_range[counts_chunk_ip >= v])<EOL><DEDENT>t = np.hstack(times_c_ip)<EOL>times_chunk_p.append(t)<EOL>par_index_chunk_p.append(np.full(t.size, ip + ip_start, dtype='<STR_LIT>'))<EOL><DEDENT>times_chunk = np.hstack(times_chunk_p)<EOL>par_index_chunk = np.hstack(par_index_chunk_p)<EOL>if sort:<EOL><INDENT>index_sort = times_chunk.argsort(kind='<STR_LIT>')<EOL>times_chunk = times_chunk[index_sort]<EOL>par_index_chunk = par_index_chunk[index_sort]<EOL><DEDENT>return times_chunk, par_index_chunk<EOL>", "docstring": "Simulate timestamps from emission trajectories.\n\n        Uses attributes: `.t_step`.\n\n        Returns:\n            A tuple of two arrays: timestamps and particles.", "id": "f15419:c5:m25"}
{"signature": "def __init__(self, num_particles, D, box, rs=None, seed=<NUM_LIT:1>, particles=None):", "body": "if rs is None:<EOL><INDENT>rs = np.random.RandomState(seed=seed)<EOL><DEDENT>self.rs = rs<EOL>self.init_random_state = rs.get_state()<EOL>self.box = box<EOL>if particles is None:<EOL><INDENT>self._plist = self._generate(num_particles, D, box, rs)<EOL><DEDENT>else:<EOL><INDENT>self._plist = list(particles)<EOL><DEDENT>self.rs_hash = hash_(self.init_random_state)[:<NUM_LIT:3>]<EOL>", "docstring": "A set of `N` Particle() objects with random position in `box`.\n\n        Arguments:\n            num_particles (int): number of particles to be generated\n            D (float): diffusion coefficient in S.I. units (m^2/s)\n            box (Box object): the simulation box\n            rs (RandomState object): random state object used as random number\n                generator. If None, use a random state initialized from seed.\n            seed (uint): when `rs` is None, `seed` is used to initialize the\n                random state. `seed` is ignored when `rs` is not None.\n            particles (list or None): when not None, initialize the object from\n                this list that must containing only `Particle` objects.", "id": "f15419:c2:m1"}
{"signature": "@property<EOL><INDENT>def positions(self):<DEDENT>", "body": "return np.vstack([p.r0 for p in self]).reshape(len(self), <NUM_LIT:3>, <NUM_LIT:1>)<EOL>", "docstring": "Initial position for each particle. Shape (N, 3, 1).", "id": "f15419:c2:m10"}
{"signature": "def get_timestamps_part(self, name):", "body": "par_name = name + '<STR_LIT>'<EOL>timestamps = self.ts_store.h5file.get_node('<STR_LIT>', name)<EOL>particles = self.ts_store.h5file.get_node('<STR_LIT>', par_name)<EOL>return timestamps, particles<EOL>", "docstring": "Return matching (timestamps, particles) pytables arrays.", "id": "f15419:c5:m23"}
{"signature": "def _sim_trajectories(self, time_size, start_pos, rs,<EOL>total_emission=False, save_pos=False, radial=False,<EOL>wrap_func=wrap_periodic):", "body": "time_size = int(time_size)<EOL>num_particles = self.num_particles<EOL>if total_emission:<EOL><INDENT>em = np.zeros(time_size, dtype=np.float32)<EOL><DEDENT>else:<EOL><INDENT>em = np.zeros((num_particles, time_size), dtype=np.float32)<EOL><DEDENT>POS = []<EOL>for i, sigma_1d in enumerate(self.sigma_1d):<EOL><INDENT>delta_pos = rs.normal(loc=<NUM_LIT:0>, scale=sigma_1d,<EOL>size=<NUM_LIT:3> * time_size)<EOL>delta_pos = delta_pos.reshape(<NUM_LIT:3>, time_size)<EOL>pos = np.cumsum(delta_pos, axis=-<NUM_LIT:1>, out=delta_pos)<EOL>pos += start_pos[i]<EOL>for coord in (<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>):<EOL><INDENT>pos[coord] = wrap_func(pos[coord], *self.box.b[coord])<EOL><DEDENT>Ro = sqrt(pos[<NUM_LIT:0>]**<NUM_LIT:2> + pos[<NUM_LIT:1>]**<NUM_LIT:2>)  <EOL>Z = pos[<NUM_LIT:2>]<EOL>current_em = self.psf.eval_xz(Ro, Z)**<NUM_LIT:2><EOL>if total_emission:<EOL><INDENT>em += current_em.astype(np.float32)<EOL><DEDENT>else:<EOL><INDENT>em[i] = current_em.astype(np.float32)<EOL><DEDENT>if save_pos:<EOL><INDENT>pos_save = np.vstack((Ro, Z)) if radial else pos<EOL>POS.append(pos_save[np.newaxis, :, :])<EOL><DEDENT>start_pos[i] = pos[:, -<NUM_LIT:1>:]<EOL><DEDENT>return POS, em<EOL>", "docstring": "Simulate (in-memory) `time_size` steps of trajectories.\n\n        Simulate Brownian motion diffusion and emission of all the particles.\n        Uses the attributes: num_particles, sigma_1d, box, psf.\n\n        Arguments:\n            time_size (int): number of time steps to be simulated.\n            start_pos (array): shape (num_particles, 3), particles start\n                positions. This array is modified to store the end position\n                after this method is called.\n            rs (RandomState): a `numpy.random.RandomState` object used\n                to generate the random numbers.\n            total_emission (bool): if True, store only the total emission array\n                containing the sum of emission of all the particles.\n            save_pos (bool): if True, save the particles 3D trajectories\n            wrap_func (function): the function used to apply the boundary\n                condition (use :func:`wrap_periodic` or :func:`wrap_mirror`).\n\n        Returns:\n            POS (list): list of 3D trajectories arrays (3 x time_size)\n            em (array): array of emission (total or per-particle)", "id": "f15419:c5:m17"}
{"signature": "def simulate_timestamps_mix(self, max_rates, populations, bg_rate,<EOL>rs=None, seed=<NUM_LIT:1>, chunksize=<NUM_LIT:2>**<NUM_LIT:16>,<EOL>comp_filter=None, overwrite=False,<EOL>skip_existing=False, scale=<NUM_LIT:10>,<EOL>path=None, t_chunksize=None, timeslice=None):", "body": "self.open_store_timestamp(chunksize=chunksize, path=path)<EOL>rs = self._get_group_randomstate(rs, seed, self.ts_group)<EOL>if t_chunksize is None:<EOL><INDENT>t_chunksize = self.emission.chunkshape[<NUM_LIT:1>]<EOL><DEDENT>timeslice_size = self.n_samples<EOL>if timeslice is not None:<EOL><INDENT>timeslice_size = timeslice // self.t_step<EOL><DEDENT>name = self._get_ts_name_mix(max_rates, populations, bg_rate, rs=rs)<EOL>kw = dict(name=name, clk_p=self.t_step / scale,<EOL>max_rates=max_rates, bg_rate=bg_rate, populations=populations,<EOL>num_particles=self.num_particles,<EOL>bg_particle=self.num_particles,<EOL>overwrite=overwrite, chunksize=chunksize)<EOL>if comp_filter is not None:<EOL><INDENT>kw.update(comp_filter=comp_filter)<EOL><DEDENT>try:<EOL><INDENT>self._timestamps, self._tparticles = (self.ts_store<EOL>.add_timestamps(**kw))<EOL><DEDENT>except ExistingArrayError as e:<EOL><INDENT>if skip_existing:<EOL><INDENT>print('<STR_LIT>')<EOL>return<EOL><DEDENT>else:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT>self.ts_group._v_attrs['<STR_LIT>'] = rs.get_state()<EOL>self._timestamps.attrs['<STR_LIT>'] = rs.get_state()<EOL>self._timestamps.attrs['<STR_LIT>'] = __version__<EOL>ts_list, part_list = [], []<EOL>bg_rates = [None] * (len(max_rates) - <NUM_LIT:1>) + [bg_rate]<EOL>prev_time = <NUM_LIT:0><EOL>for i_start, i_end in iter_chunk_index(timeslice_size, t_chunksize):<EOL><INDENT>curr_time = np.around(i_start * self.t_step, decimals=<NUM_LIT:0>)<EOL>if curr_time > prev_time:<EOL><INDENT>print('<STR_LIT>' % curr_time, end='<STR_LIT>', flush=True)<EOL>prev_time = curr_time<EOL><DEDENT>em_chunk = self.emission[:, i_start:i_end]<EOL>times_chunk_s, par_index_chunk_s =self._sim_timestamps_populations(<EOL>em_chunk, max_rates, populations, bg_rates, i_start,<EOL>rs, scale)<EOL>ts_list.append(times_chunk_s)<EOL>part_list.append(par_index_chunk_s)<EOL><DEDENT>for ts, part in zip(ts_list, part_list):<EOL><INDENT>self._timestamps.append(ts)<EOL>self._tparticles.append(part)<EOL><DEDENT>self.ts_group._v_attrs['<STR_LIT>'] = rs.get_state()<EOL>self._timestamps.attrs['<STR_LIT>'] = rs.get_state()<EOL>self.ts_store.h5file.flush()<EOL>", "docstring": "Compute one timestamps array for a mixture of N populations.\n\n        Timestamp data are saved to disk and accessible as pytables arrays in\n        `._timestamps` and `._tparticles`.\n        The background generated timestamps are assigned a\n        conventional particle number (last particle index + 1).\n\n        Arguments:\n            max_rates (list): list of the peak max emission rate for each\n                population.\n            populations (list of slices): slices to `self.particles`\n                defining each population.\n            bg_rate (float, cps): rate for a Poisson background process\n            rs (RandomState object): random state object used as random number\n                generator. If None, use a random state initialized from seed.\n            seed (uint): when `rs` is None, `seed` is used to initialize the\n                random state, otherwise is ignored.\n            chunksize (int): chunk size used for the on-disk timestamp array\n            comp_filter (tables.Filter or None): compression filter to use\n                for the on-disk `timestamps` and `tparticles` arrays.\n                If None use default compression.\n            overwrite (bool): if True, overwrite any pre-existing timestamps\n                array. If False, never overwrite. The outcome of simulating an\n                existing array is controlled by `skip_existing` flag.\n            skip_existing (bool): if True, skip simulation if the same\n                timestamps array is already present.\n            scale (int): `self.t_step` is multiplied by `scale` to obtain the\n                timestamps units in seconds.\n            path (string): folder where to save the data.\n            timeslice (float or None): timestamps are simulated until\n                `timeslice` seconds. If None, simulate until `self.t_max`.", "id": "f15419:c5:m27"}
{"signature": "def hash(self):", "body": "hash_numeric = '<STR_LIT>' %(self.t_step, self.t_max, self.num_particles, self.concentration())<EOL>hash_list = [hash_numeric, self.particles.short_repr(), repr(self.box),<EOL>self.psf.hash()]<EOL>return hashlib.md5(repr(hash_list).encode()).hexdigest()<EOL>", "docstring": "Return an hash for the simulation parameters (excluding ID and EID)\n        This can be used to generate unique file names for simulations\n        that have the same parameters and just different ID or EID.", "id": "f15419:c5:m8"}
{"signature": "def sim_timetrace_bg2(emission, max_rate, bg_rate, t_step, rs=None):", "body": "if rs is None:<EOL><INDENT>rs = np.random.RandomState()<EOL><DEDENT>emiss_bin_rate = np.zeros((emission.shape[<NUM_LIT:0>] + <NUM_LIT:1>, emission.shape[<NUM_LIT:1>]),<EOL>dtype='<STR_LIT>')<EOL>emiss_bin_rate[:-<NUM_LIT:1>] = emission * max_rate * t_step<EOL>if bg_rate is not None:<EOL><INDENT>emiss_bin_rate[-<NUM_LIT:1>] = bg_rate * t_step<EOL>counts = rs.poisson(lam=emiss_bin_rate).astype('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>counts = rs.poisson(lam=emiss_bin_rate[:-<NUM_LIT:1>]).astype('<STR_LIT>')<EOL><DEDENT>return counts<EOL>", "docstring": "Draw random emitted photons from r.v. ~ Poisson(emission_rates).\n\n    This is an alternative implementation of :func:`sim_timetrace_bg`.", "id": "f15419:m6"}
{"signature": "def compact_name_core(self, hashsize=<NUM_LIT:6>, t_max=False):", "body": "Moles = self.concentration()<EOL>name = \"<STR_LIT>\" % (<EOL>self.particles.short_repr(), Moles * <NUM_LIT>, self.t_step * <NUM_LIT>)<EOL>if hashsize > <NUM_LIT:0>:<EOL><INDENT>name = self.hash()[:hashsize] + '<STR_LIT:_>' + name<EOL><DEDENT>if t_max:<EOL><INDENT>name += \"<STR_LIT>\" % self.t_max<EOL><DEDENT>return name<EOL>", "docstring": "Compact representation of simulation params (no ID, EID and t_max)", "id": "f15419:c5:m9"}
{"signature": "def sim_timestamps_em_list2(S, max_rate=<NUM_LIT:1>, bg_rate=<NUM_LIT:0>, rs=None, seed=None):", "body": "if rs is None:<EOL><INDENT>rs = np.random.RandomState(seed=seed)<EOL><DEDENT>fractions = [<NUM_LIT:5>, <NUM_LIT:2>, <NUM_LIT:8>, <NUM_LIT:4>, <NUM_LIT:9>, <NUM_LIT:1>, <NUM_LIT:7>, <NUM_LIT:3>, <NUM_LIT:6>, <NUM_LIT:9>, <NUM_LIT:0>, <NUM_LIT:5>, <NUM_LIT:2>, <NUM_LIT:8>, <NUM_LIT:4>, <NUM_LIT:9>]<EOL>scale = <NUM_LIT:10><EOL>max_counts = <NUM_LIT:4><EOL>S.all_times_chunks_list = []<EOL>S.all_par_chunks_list = []<EOL>for i_start, i_end in iter_chunk_index(S.n_samples,<EOL>S.emission.chunkshape[<NUM_LIT:1>]):<EOL><INDENT>counts_chunk = sim_timetrace_bg2(S.emission[:, i_start:i_end],<EOL>max_rate, bg_rate, S.t_step, rs=rs)<EOL>index = np.arange(<NUM_LIT:0>, counts_chunk.shape[<NUM_LIT:1>])<EOL>times_chunk_p = []      <EOL>par_index_chunk_p = []  <EOL>for p_i, counts_chunk_p_i in enumerate(counts_chunk.copy()):<EOL><INDENT>times_c_i = [(index[counts_chunk_p_i >= <NUM_LIT:1>] + i_start)*scale]<EOL>for frac, v in zip(fractions, range(<NUM_LIT:2>, max_counts + <NUM_LIT:1>)):<EOL><INDENT>times_c_i.append(<EOL>(index[counts_chunk_p_i >= v] + i_start)*scale + frac<EOL>)<EOL><DEDENT>t = np.hstack(times_c_i)<EOL>times_chunk_p.append(t)<EOL>par_index_chunk_p.append(np.full(t.size, p_i, dtype='<STR_LIT>'))<EOL><DEDENT>times_chunk_s = np.hstack(times_chunk_p)  <EOL>par_index_chunk_s = np.hstack(par_index_chunk_p)  <EOL>index_sort = times_chunk_s.argsort(kind='<STR_LIT>')<EOL>times_chunk_s = times_chunk_s[index_sort]<EOL>par_index_chunk_s = par_index_chunk_s[index_sort]<EOL>S.all_times_chunks_list.append(times_chunk_s)<EOL>S.all_par_chunks_list.append(par_index_chunk_s)<EOL><DEDENT>", "docstring": "Compute timestamps and particles and store results in a list.\n    Each element contains timestamps from one chunk of emission.\n    Background computed in sim_timetrace_bg2() as last fake particle.", "id": "f15421:m2"}
{"signature": "def em_rates_from_E_unique(em_rate_tot, E_values):", "body": "em_rates_d, em_rates_a = em_rates_from_E_DA(em_rate_tot, E_values)<EOL>return np.unique(np.hstack([em_rates_d, em_rates_a]))<EOL>", "docstring": "Array of unique emission rates for given total emission and E (FRET).", "id": "f15424:m2"}
{"signature": "def em_rates_from_E_DA_mix(em_rates_tot, E_values):", "body": "em_rates_d, em_rates_a = [], []<EOL>for em_rate_tot, E_value in zip(em_rates_tot, E_values):<EOL><INDENT>em_rate_di, em_rate_ai = em_rates_from_E_DA(em_rate_tot, E_value)<EOL>em_rates_d.append(em_rate_di)<EOL>em_rates_a.append(em_rate_ai)<EOL><DEDENT>return em_rates_d, em_rates_a<EOL>", "docstring": "D and A emission rates for two populations.", "id": "f15424:m3"}
{"signature": "def save_photon_hdf5(self, identity=None, overwrite=True, path=None):", "body": "filepath = self.filepath<EOL>if path is not None:<EOL><INDENT>filepath = Path(path, filepath.name)<EOL><DEDENT>self.merge_da()<EOL>data = self._make_photon_hdf5(identity=identity)<EOL>phc.hdf5.save_photon_hdf5(data, h5_fname=str(filepath),<EOL>overwrite=overwrite)<EOL>", "docstring": "Create a smFRET Photon-HDF5 file with current timestamps.", "id": "f15424:c0:m13"}
{"signature": "def merge_da(self):", "body": "print('<STR_LIT>', flush=True)<EOL>ts_d, ts_par_d = self.S.get_timestamps_part(self.name_timestamps_d)<EOL>ts_a, ts_par_a = self.S.get_timestamps_part(self.name_timestamps_a)<EOL>ts, a_ch, part = merge_da(ts_d, ts_par_d, ts_a, ts_par_a)<EOL>assert a_ch.sum() == ts_a.shape[<NUM_LIT:0>]<EOL>assert (~a_ch).sum() == ts_d.shape[<NUM_LIT:0>]<EOL>assert a_ch.size == ts_a.shape[<NUM_LIT:0>] + ts_d.shape[<NUM_LIT:0>]<EOL>self.ts, self.a_ch, self.part = ts, a_ch, part<EOL>self.clk_p = ts_d.attrs['<STR_LIT>']<EOL>", "docstring": "Merge donor and acceptor timestamps, computes `ts`, `a_ch`, `part`.", "id": "f15424:c0:m11"}
{"signature": "def __init__(self, datafile, path='<STR_LIT>', nparams=dict(), attr_params=dict(),<EOL>mode='<STR_LIT:r>'):", "body": "super().__init__(datafile, path=path, nparams=nparams,<EOL>attr_params=attr_params, mode=mode)<EOL>if mode != '<STR_LIT:r>':<EOL><INDENT>self.h5file.create_group('<STR_LIT:/>', '<STR_LIT>',<EOL>'<STR_LIT>')<EOL>self.h5file.create_group('<STR_LIT:/>', '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>", "docstring": "Return a new HDF5 file to store simulation results.\n\n        The HDF5 file has two groups:\n        '/parameters'\n            containing all the simulation numeric-parameters\n\n        '/trajectories'\n            containing simulation trajectories (positions, emission traces)\n\n        If `mode='w'`, `datafile` will be overwritten (if exists).", "id": "f15425:c2:m0"}
{"signature": "def __init__(self, datafile, path='<STR_LIT>', nparams=dict(), attr_params=dict(),<EOL>mode='<STR_LIT:r>'):", "body": "if isinstance(datafile, Path):<EOL><INDENT>self.filepath = datafile<EOL><DEDENT>else:<EOL><INDENT>if not Path(path).exists():<EOL><INDENT>raise ValueError('<STR_LIT>' % path)<EOL><DEDENT>self.filepath = Path(path, datafile)<EOL><DEDENT>self.h5file = tables.open_file(str(self.filepath), mode=mode)<EOL>self.filename = str(self.filepath)<EOL>if mode == '<STR_LIT:w>':<EOL><INDENT>self.h5file.title = \"<STR_LIT>\"<EOL>self.h5file.create_group('<STR_LIT:/>', '<STR_LIT>', '<STR_LIT>')<EOL>self.set_sim_params(nparams, attr_params)<EOL><DEDENT>", "docstring": "Return a new HDF5 file to store simulation results.\n\n        The HDF5 file has two groups:\n        '/parameters'\n            containing all the simulation numeric-parameters\n\n        If `mode='w'`, `datafile` will be overwritten (if exists).", "id": "f15425:c1:m1"}
{"signature": "def add_trajectory(self, name, overwrite=False, shape=(<NUM_LIT:0>,), title='<STR_LIT>',<EOL>chunksize=<NUM_LIT:2>**<NUM_LIT>, comp_filter=default_compression,<EOL>atom=tables.Float64Atom(), params=dict(),<EOL>chunkslice='<STR_LIT>'):", "body": "group = self.h5file.root.trajectories<EOL>if name in group:<EOL><INDENT>print(\"<STR_LIT>\" % name, end='<STR_LIT>')<EOL>if overwrite:<EOL><INDENT>self.h5file.remove_node(group, name)<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return group.get_node(name)<EOL><DEDENT><DEDENT>nparams = self.numeric_params<EOL>num_t_steps = nparams['<STR_LIT>'] / nparams['<STR_LIT>']<EOL>chunkshape = self.calc_chunkshape(chunksize, shape, kind=chunkslice)<EOL>store_array = self.h5file.create_earray(<EOL>group, name, atom=atom,<EOL>shape = shape,<EOL>chunkshape = chunkshape,<EOL>expectedrows = num_t_steps,<EOL>filters = comp_filter,<EOL>title = title)<EOL>for key, value in params.items():<EOL><INDENT>store_array.set_attr(key, value)<EOL><DEDENT>store_array.set_attr('<STR_LIT>', __version__)<EOL>store_array.set_attr('<STR_LIT>', current_time())<EOL>return store_array<EOL>", "docstring": "Add an trajectory array in '/trajectories'.", "id": "f15425:c2:m1"}
{"signature": "def __init__(self, datafile, path='<STR_LIT>', nparams=dict(), attr_params=dict(),<EOL>mode='<STR_LIT:r>'):", "body": "super().__init__(datafile, path=path, nparams=nparams,<EOL>attr_params=attr_params, mode=mode)<EOL>if mode != '<STR_LIT:r>':<EOL><INDENT>if '<STR_LIT>' not in self.h5file.root:<EOL><INDENT>self.h5file.create_group('<STR_LIT:/>', '<STR_LIT>',<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>", "docstring": "Return a new HDF5 file to store simulation results.\n\n        The HDF5 file has two groups:\n        '/parameters'\n            containing all the simulation numeric-parameters\n\n        '/timestamps'\n            containing simulated timestamps\n\n        If `overwrite=True` (default) `datafile` is overwritten (if exists).", "id": "f15425:c3:m0"}
{"signature": "def add_position(self, radial=False, chunksize=<NUM_LIT:2>**<NUM_LIT>, chunkslice='<STR_LIT>',<EOL>comp_filter=default_compression, overwrite=False,<EOL>params=dict()):", "body": "nparams = self.numeric_params<EOL>num_particles = nparams['<STR_LIT>']<EOL>name, ncoords, prefix = '<STR_LIT>', <NUM_LIT:3>, '<STR_LIT>'<EOL>if radial:<EOL><INDENT>name, ncoords, prefix = '<STR_LIT>', <NUM_LIT:2>, '<STR_LIT>'<EOL><DEDENT>title = '<STR_LIT>' % prefix<EOL>return self.add_trajectory(name, shape=(num_particles, ncoords, <NUM_LIT:0>),<EOL>overwrite=overwrite, chunksize=chunksize,<EOL>comp_filter=comp_filter,<EOL>atom=tables.Float32Atom(),<EOL>title=title,<EOL>params=params)<EOL>", "docstring": "Add the `position` array in '/trajectories'.", "id": "f15425:c2:m4"}
{"signature": "def open(self):", "body": "self.__init__(self.h5file.filename, mode='<STR_LIT:r>')<EOL>", "docstring": "Reopen a file after has been closed (uses the store filename).", "id": "f15425:c1:m3"}
{"signature": "@property<EOL><INDENT>def numeric_params_meta(self):<DEDENT>", "body": "nparams = dict()<EOL>for p in self.h5file.root.parameters:<EOL><INDENT>nparams[p.name] = (p.read(), p.title)<EOL><DEDENT>return nparams<EOL>", "docstring": "Return a dict with all parameters and metadata in '/parameters'.\n\n        This returns the same dict format as returned by get_params() method\n        in ParticlesSimulation().", "id": "f15425:c1:m6"}
{"signature": "def add_emission_tot(self, chunksize=<NUM_LIT:2>**<NUM_LIT>, comp_filter=default_compression,<EOL>overwrite=False, params=dict(),<EOL>chunkslice='<STR_LIT>'):", "body": "kwargs = dict(overwrite=overwrite, chunksize=chunksize, params=params,<EOL>comp_filter=comp_filter, atom=tables.Float32Atom(),<EOL>title='<STR_LIT>')<EOL>return self.add_trajectory('<STR_LIT>', **kwargs)<EOL>", "docstring": "Add the `emission_tot` array in '/trajectories'.", "id": "f15425:c2:m2"}
{"signature": "def set_sim_params(self, nparams, attr_params):", "body": "for name, value in nparams.items():<EOL><INDENT>val = value[<NUM_LIT:0>] if value[<NUM_LIT:0>] is not None else '<STR_LIT:none>'<EOL>self.h5file.create_array('<STR_LIT>', name, obj=val,<EOL>title=value[<NUM_LIT:1>])<EOL><DEDENT>for name, value in attr_params.items():<EOL><INDENT>self.h5file.set_node_attr('<STR_LIT>', name, value)<EOL><DEDENT>", "docstring": "Store parameters in `params` in `h5file.root.parameters`.\n\n        `nparams` (dict)\n            A dict as returned by `get_params()` in `ParticlesSimulation()`\n            The format is:\n            keys:\n                used as parameter name\n            values: (2-elements tuple)\n                first element is the parameter value\n                second element is a string used as \"title\" (description)\n        `attr_params` (dict)\n            A dict whole items are stored as attributes in '/parameters'", "id": "f15425:c1:m4"}
{"signature": "def check_clean_status(git_path=None):", "body": "output = get_status(git_path)<EOL>is_unmodified = (len(output.strip()) == <NUM_LIT:0>)<EOL>return is_unmodified<EOL>", "docstring": "Returns whether there are uncommitted changes in the working dir.", "id": "f15427:m4"}
{"signature": "def git_path_valid(git_path=None):", "body": "if git_path is None and GIT_PATH is None:<EOL><INDENT>return False<EOL><DEDENT>if git_path is None: git_path = GIT_PATH<EOL>try:<EOL><INDENT>call([git_path, '<STR_LIT>'])<EOL>return True<EOL><DEDENT>except OSError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Check whether the git executable is found.", "id": "f15427:m1"}
{"signature": "def print_summary(string='<STR_LIT>', git_path=None):", "body": "if git_path is None: git_path = GIT_PATH<EOL>if not git_path_valid():<EOL><INDENT>print('<STR_LIT>' % string)<EOL><DEDENT>else:<EOL><INDENT>last_commit = get_last_commit_line()<EOL>print('<STR_LIT>'.format(string, last_commit))<EOL>if not check_clean_status():<EOL><INDENT>print('<STR_LIT>')<EOL>print(get_status())<EOL><DEDENT><DEDENT>", "docstring": "Print the last commit line and eventual uncommitted changes.", "id": "f15427:m7"}
{"signature": "def get_last_commit_line(git_path=None):", "body": "if git_path is None: git_path = GIT_PATH<EOL>output = check_output([git_path, \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\"])<EOL>return output.strip()[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>", "docstring": "Get one-line description of HEAD commit for repository in current dir.", "id": "f15427:m5"}
{"signature": "def print_children(data_file, group='<STR_LIT:/>'):", "body": "base = data_file.get_node(group)<EOL>print ('<STR_LIT>' % base)<EOL>for node in base._f_walk_groups():<EOL><INDENT>if node is not base:<EOL><INDENT>print ('<STR_LIT>' % node)<EOL><DEDENT><DEDENT>print ('<STR_LIT>' % group)<EOL>for node in base._v_leaves.itervalues():<EOL><INDENT>info = node.shape<EOL>if len(info) == <NUM_LIT:0>:<EOL><INDENT>info = node.read()<EOL><DEDENT>print ('<STR_LIT>' % (node.name, info))<EOL>if len(node.title) > <NUM_LIT:0>:<EOL><INDENT>print ('<STR_LIT>' % node.title)<EOL><DEDENT><DEDENT>", "docstring": "Print all the sub-groups in `group` and leaf-nodes children of `group`.\n\n    Parameters:\n        data_file (pytables HDF5 file object): the data file to print\n        group (string): path name of the group to be printed.\n            Default: '/', the root node.", "id": "f15428:m1"}
{"signature": "def iter_chunk_index(num_samples, chunksize):", "body": "i = <NUM_LIT:0><EOL>for c_size in iter_chunksize(num_samples, chunksize):<EOL><INDENT>yield i, i + c_size<EOL>i += c_size<EOL><DEDENT>", "docstring": "Iterator used to iterate in chunks over an array of size `num_samples`.\n\n    At each iteration returns a start and stop index for a slice of size\n    `chunksize`. In the last iteration the slice may be smaller.", "id": "f15429:m2"}
{"signature": "def iter_chunksize(num_samples, chunksize):", "body": "last_chunksize = int(np.mod(num_samples, chunksize))<EOL>chunksize = int(chunksize)<EOL>for _ in range(int(num_samples) // chunksize):<EOL><INDENT>yield chunksize<EOL><DEDENT>if last_chunksize > <NUM_LIT:0>:<EOL><INDENT>yield last_chunksize<EOL><DEDENT>", "docstring": "Iterator used to iterate in chunks over an array of size `num_samples`.\n    At each iteration returns `chunksize` except for the last iteration.", "id": "f15429:m0"}
{"signature": "def __init__(self, fname='<STR_LIT>',<EOL>dir_=None, x_step=<NUM_LIT:0.5> / <NUM_LIT:8>, z_step=<NUM_LIT:0.5> / <NUM_LIT:8>,<EOL>psf_pytables=None):", "body": "if psf_pytables is not None:<EOL><INDENT>self.psflab_psf_raw = psf_pytables[:]<EOL>for name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>setattr(self, name, psf_pytables.get_attr(name))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.fname = fname<EOL>if dir_ is None:<EOL><INDENT>dir_ = pkg_resources.resource_filename('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>self.dir_ = dir_<EOL>self.x_step, self.z_step = x_step, z_step<EOL>self.psflab_psf_raw = load_PSFLab_file('<STR_LIT:/>'.join([dir_, fname]))<EOL><DEDENT>xi, zi, hdata, zm = convert_PSFLab_xz(self.psflab_psf_raw,<EOL>x_step=x_step, z_step=z_step,<EOL>normalize=True)<EOL>self._fun_um = SI.RectBivariateSpline(xi, zi, hdata.T, kx=<NUM_LIT:1>, ky=<NUM_LIT:1>)<EOL>self.xi, self.zi, self.hdata, self.zm = xi, zi, hdata, zm<EOL>self.x_step, self.z_step = xi[<NUM_LIT:1>] - xi[<NUM_LIT:0>], zi[<NUM_LIT:1>] - zi[<NUM_LIT:0>]<EOL>self.kind = '<STR_LIT>'<EOL>", "docstring": "Create a PSF object for interpolation from numeric data.\n\n        `dir_+fname`: should be a valid path\n\n        If `dir_` is None use the \"system\" folder where the PSF shipped with\n        pybromo are placed.", "id": "f15431:c1:m0"}
{"signature": "def get_bromo_fnames_da(d_em_kHz, d_bg_kHz, a_em_kHz, a_bg_kHz,<EOL>ID='<STR_LIT>', t_tot='<STR_LIT>', num_p='<STR_LIT>', pM='<STR_LIT>',<EOL>t_step=<NUM_LIT>, D=<NUM_LIT>, dir_='<STR_LIT>'):", "body": "clk_p = t_step/<NUM_LIT> <EOL>E_sim = <NUM_LIT:1.>*a_em_kHz/(a_em_kHz + d_em_kHz)<EOL>FRET_val = <NUM_LIT>*E_sim<EOL>print(\"<STR_LIT>\" % FRET_val)<EOL>d_em_kHz_str = \"<STR_LIT>\" % d_em_kHz<EOL>a_em_kHz_str = \"<STR_LIT>\" % a_em_kHz<EOL>d_bg_kHz_str = \"<STR_LIT>\" % d_bg_kHz<EOL>a_bg_kHz_str = \"<STR_LIT>\" % a_bg_kHz<EOL>print(\"<STR_LIT>\" % (d_em_kHz_str, d_bg_kHz_str))<EOL>print(\"<STR_LIT>\" % (a_em_kHz_str, a_bg_kHz_str))<EOL>fname_d = ('<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>em=d_em_kHz_str, bg=d_bg_kHz_str, t_tot=t_tot, pM=pM,<EOL>np=num_p, ID=ID, ts_us=t_step*<NUM_LIT>, D=D)<EOL>fname_a = ('<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>em=a_em_kHz_str, bg=a_bg_kHz_str, t_tot=t_tot, pM=pM,<EOL>np=num_p, ID=ID, ts_us=t_step*<NUM_LIT>, D=D)<EOL>print(fname_d)<EOL>print(fname_a)<EOL>name = ('<STR_LIT>'<EOL>'<STR_LIT>').format(FRET_val, d_bg_kHz, a_bg_kHz, d_em_kHz)<EOL>return dir_+fname_d, dir_+fname_a, name, clk_p, E_sim<EOL>", "docstring": "Get filenames for donor and acceptor timestamps for the given parameters", "id": "f15432:m0"}
{"signature": "def __init__(self, fig, page_step=<NUM_LIT:1>):", "body": "<EOL>self.fig = fig<EOL>self.page_step = page_step <EOL>self.xmin, self.xmax = fig.axes[<NUM_LIT:0>].get_xlim()<EOL>self.width = min(<NUM_LIT:1>, self.xmax-self.xmin) <EOL>self.pos = <NUM_LIT:0>   <EOL>self.scale = <NUM_LIT> <EOL>self.ax = self.fig.axes[<NUM_LIT:0>]<EOL>self.draw = self.fig.canvas.draw<EOL>QMainWin = fig.canvas.parent()<EOL>toolbar = QtGui.QToolBar(QMainWin)<EOL>QMainWin.addToolBar(QtCore.Qt.BottomToolBarArea, toolbar)<EOL>self.set_slider(toolbar)<EOL>self.set_spinbox(toolbar)<EOL>self.ax.set_xlim(self.pos,self.pos+self.width)<EOL>self.draw()<EOL>", "docstring": "Make a scrolling x axis on figure `fig`.\n        `page_step` is the multiplier for page-step scrolling.", "id": "f15435:c2:m0"}
{"signature": "@staticmethod<EOL><INDENT>def from_ymd(year, month, day):<DEDENT>", "body": "return BaseDateDatetimeDate(year, month, day)<EOL>", "docstring": "converts date as `(year, month, day)` tuple into Microsoft Excel representation style\n\n:param tuple(int, int, int): int tuple `year, month, day`\n:return BaseDatetimeDate:", "id": "f15440:c1:m1"}
{"signature": "@staticmethod<EOL><INDENT>def diff_in_years(start, end):<DEDENT>", "body": "return BaseDateDatetimeDate.diff_in_days(start, end) / DAYS_IN_YEAR<EOL>", "docstring": "calculate difference between given dates in years. The difference corresponds to Act/365.25 year fraction\n\n:param BaseDateDatetimeDate start: state date\n:param BaseDateDatetimeDate end: end date\n:return float: difference between end date and start date in years", "id": "f15440:c1:m6"}
{"signature": "def from_ymd_to_excel(year, month, day):", "body": "if not is_valid_ymd(year, month, day):<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(year, month, day))<EOL><DEDENT>days = _cum_month_days[month - <NUM_LIT:1>] + day<EOL>days += <NUM_LIT:1> if (is_leap_year(year) and month > <NUM_LIT:2>) else <NUM_LIT:0><EOL>years_distance = year - <NUM_LIT><EOL>days += years_distance * <NUM_LIT> +(years_distance + <NUM_LIT:3>) // <NUM_LIT:4> - (years_distance + <NUM_LIT>) // <NUM_LIT:100> + (years_distance + <NUM_LIT>) // <NUM_LIT><EOL>days += <NUM_LIT:1> if (year, month, day) > (<NUM_LIT>, <NUM_LIT:2>, <NUM_LIT>) else <NUM_LIT:0><EOL>return days<EOL>", "docstring": "converts date as `(year, month, day)` tuple into Microsoft Excel representation style\n\n:param tuple(int, int, int): int tuple `year, month, day`\n:return int:", "id": "f15440:m5"}
{"signature": "@staticmethod<EOL><INDENT>def add_years(d, years_int):<DEDENT>", "body": "y, m, d = BaseDateDatetimeDate.to_ymd(d)<EOL>y += years_int<EOL>if not is_leap_year(y) and m == <NUM_LIT:2>:<EOL><INDENT>d = min(<NUM_LIT>, d)<EOL><DEDENT>return BaseDateDatetimeDate.from_ymd(y, m, d)<EOL>", "docstring": "addition of a number of years\n\n:param BaseDateDatetimeDate d:\n:param int years_int:\n:return BaseDatetimeDate:", "id": "f15440:c1:m4"}
{"signature": "@staticmethod<EOL><INDENT>def add_years(d, years_int):<DEDENT>", "body": "y, m, d = BaseDate.to_ymd(d)<EOL>if not is_leap_year(years_int) and m == <NUM_LIT:2>:<EOL><INDENT>d = min(<NUM_LIT>, d)<EOL><DEDENT>return BaseDateFloat.from_ymd(y + years_int, m, d)<EOL>", "docstring": "adds number of years to a date\n:param BaseDateFloat d: date to add years to\n:param int years_int: number of years to add\n:return BaseDate: resulting date", "id": "f15440:c0:m6"}
{"signature": "def __new__(cls, *args):", "body": "new = super(BaseDateTuple, cls).__new__(cls)<EOL>if BaseDateTuple._is_valid_args(args):<EOL><INDENT>if isinstance(args[<NUM_LIT:0>], BaseDateTuple):<EOL><INDENT>new.date = args[<NUM_LIT:0>].date<EOL><DEDENT>else:<EOL><INDENT>new.date = (args[<NUM_LIT:0>], args[<NUM_LIT:1>], args[<NUM_LIT:2>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>msg = str(args) + \"<STR_LIT>\"<EOL>raise ValueError(msg)<EOL><DEDENT>return new<EOL>", "docstring": ":param args: should be three ints (year, month, day)", "id": "f15440:c2:m1"}
{"signature": "@staticmethod<EOL><INDENT>def from_ymd(year, month, day):<DEDENT>", "body": "return BaseDate(from_ymd_to_excel(year, month, day))<EOL>", "docstring": "creates date for year, month and day\n:param int year:\n:param int month:\n:param int day:\n:return BaseDate:", "id": "f15440:c0:m3"}
{"signature": "@property<EOL><INDENT>def year(self):<DEDENT>", "body": "return BaseDateFloat.to_ymd(self)[<NUM_LIT:0>]<EOL>", "docstring": "year of date\n:return int:", "id": "f15440:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def diff_in_days(start, end):<DEDENT>", "body": "diff = from_ymd_to_excel(*end.date)-from_ymd_to_excel(*start.date)<EOL>return float(diff)<EOL>", "docstring": "calculate difference between given dates in days\n\n:param BaseDateTuple start: state date\n:param BaseDateTuple end: end date\n:return float: difference between end date and start date in days", "id": "f15440:c2:m9"}
{"signature": "@staticmethod<EOL><INDENT>def diff_in_days(start, end):<DEDENT>", "body": "return super(BaseDateFloat, end).__sub__(start)<EOL>", "docstring": "returns distance of two dates as number of days\n:param BaseDateFloat start: start date\n:param BaseDateFloat end: end date\n:return float: difference between end date and start date in days", "id": "f15440:c0:m7"}
{"signature": "def is_leap_year(year):", "body": "return (year % <NUM_LIT:4> == <NUM_LIT:0> and year % <NUM_LIT:100> != <NUM_LIT:0>) or (year % <NUM_LIT> == <NUM_LIT:0>)<EOL>", "docstring": "returns True for leap year and False otherwise\n\n:param int year: calendar year\n:return bool:", "id": "f15440:m0"}
{"signature": "def is_valid_ymd(year, month, day):", "body": "return <NUM_LIT:1> <= month <= <NUM_LIT:12> and <NUM_LIT:1> <= day <= days_in_month(year, month) and year >= <NUM_LIT><EOL>", "docstring": "return True if (year,month, day) can be represented in Excel-notation\n(number of days since 30.12.1899) for calendar days, otherwise False\n\n:param int year: calendar year\n:param int month: calendar month\n:param int day: calendar day\n:return bool:", "id": "f15440:m3"}
{"signature": "@property<EOL><INDENT>def day(self):<DEDENT>", "body": "return BaseDateFloat.to_ymd(self)[<NUM_LIT:2>]<EOL>", "docstring": "day of date\n:return int:", "id": "f15440:c0:m0"}
{"signature": "def days_in_year(year):", "body": "return <NUM_LIT> if is_leap_year(year) else <NUM_LIT><EOL>", "docstring": "returns number of days in the given calendar year\n\n:param int year: calendar year\n:return int:", "id": "f15440:m1"}
{"signature": "@staticmethod<EOL><INDENT>def diff_in_days(start, end):<DEDENT>", "body": "diff = date(end.year, end.month, end.day) - date(start.year, start.month, start.day)<EOL>return float(diff.days)<EOL>", "docstring": "calculate difference between given dates in days\n\n:param BaseDateDatetimeDate start: state date\n:param BaseDateDatetimeDate end: end date\n:return float: difference between end date and start date in days", "id": "f15440:c1:m5"}
{"signature": "@staticmethod<EOL><INDENT>def is_businessdate(in_date):<DEDENT>", "body": "<EOL>if not isinstance(in_date, BaseDate):<EOL><INDENT>try:  <EOL><INDENT>in_date = BusinessDate(in_date)<EOL><DEDENT>except:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>y, m, d, = in_date.to_ymd()<EOL>return is_valid_ymd(y, m, d)<EOL>", "docstring": "checks whether the provided date is a date\n:param BusinessDate, int or float in_date:\n:return bool:", "id": "f15441:c2:m25"}
{"signature": "def add_business_days(self, days_int, holiday_obj=None):", "body": "res = self<EOL>if days_int >= <NUM_LIT:0>:<EOL><INDENT>count = <NUM_LIT:0><EOL>while count < days_int:<EOL><INDENT>res = BusinessDate.add_days(res, <NUM_LIT:1>)<EOL>if BusinessDate.is_business_day(res, holiday_obj):<EOL><INDENT>count += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>count = <NUM_LIT:0><EOL>while count > days_int:<EOL><INDENT>res = BusinessDate.add_days(res, -<NUM_LIT:1>)<EOL>if BusinessDate.is_business_day(res, holiday_obj):<EOL><INDENT>count -= <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return res<EOL>", "docstring": "private method for the addition of business days, used in the addition of a BusinessPeriod only\n\n:param BusinessDate d:\n:param int days_int:\n:param list holiday_obj:\n:return: BusinessDate", "id": "f15441:c2:m35"}
{"signature": "def diff(self, end_date):", "body": "if end_date < self:<EOL><INDENT>y, m, d = BusinessDate.diff(end_date, self)<EOL>return -y, -m, -d<EOL><DEDENT>y = end_date.year - self.year<EOL>m = end_date.month - self.month<EOL>while m < <NUM_LIT:0>:<EOL><INDENT>y -= <NUM_LIT:1><EOL>m += <NUM_LIT:12><EOL><DEDENT>while m > <NUM_LIT:12>:<EOL><INDENT>y += <NUM_LIT:1><EOL>m -= <NUM_LIT:12><EOL><DEDENT>s = BusinessDate.add_years(BusinessDate.add_months(self, m), y)<EOL>d = BusinessDate.diff_in_days(s, end_date)<EOL>if d < <NUM_LIT:0>:<EOL><INDENT>m -= <NUM_LIT:1><EOL>if m < <NUM_LIT:0>:<EOL><INDENT>y -= <NUM_LIT:1><EOL>m += <NUM_LIT:12><EOL><DEDENT>s = BusinessDate.add_years(BusinessDate.add_months(self, m), y)<EOL><DEDENT>d = BusinessDate.diff_in_days(s, end_date)<EOL>return -int(y), -int(m), -int(d)<EOL>", "docstring": "difference expressed as a tuple of years, months, days\n(see also the python lib dateutils.relativedelta)\n\n:param BusinessDate start_date:\n:param BusinessDate end_date:\n:return (int, int, int):", "id": "f15441:c2:m36"}
{"signature": "def get_act_act(self, end):", "body": "<EOL>if end.year - self.year == <NUM_LIT:0>:<EOL><INDENT>if BusinessDate.is_leap_year(self.year):<EOL><INDENT>return BusinessDate.diff_in_days(self, end) / <NUM_LIT>  <EOL><DEDENT>else:<EOL><INDENT>return BusinessDate.diff_in_days(self, end) / <NUM_LIT>  <EOL><DEDENT><DEDENT>else:<EOL><INDENT>rest_year1 = BusinessDate.diff_in_days(self, BusinessDate(<EOL>date(self.year, <NUM_LIT:12>, <NUM_LIT>))) + <NUM_LIT:1>  <EOL>rest_year2 = abs(BusinessDate.diff_in_days(end, BusinessDate(<EOL>date(end.year, <NUM_LIT:1>, <NUM_LIT:1>))))  <EOL>years_in_between = end.year - self.year - <NUM_LIT:1><EOL>return years_in_between + rest_year1 / (<NUM_LIT> if is_leap_year(self.year) else <NUM_LIT>) + rest_year2 / (<EOL><NUM_LIT> if is_leap_year(end.year) else <NUM_LIT>)<EOL><DEDENT>", "docstring": "implements Act/Act day count convention (4.16(b) 2006 ISDA Definitions)", "id": "f15441:c2:m41"}
{"signature": "def to_string(self, date_format=DATE_FORMAT):", "body": "return self.to_date().strftime(date_format)<EOL>", "docstring": "return BusinessDate as 'date.strftime(DATE_FORMAT)'\n\n:return string:", "id": "f15441:c2:m21"}
{"signature": "def adjust_previous(self, holidays_obj=None):", "body": "while not BusinessDate.is_business_day(self, holidays_obj):<EOL><INDENT>self = BusinessDate.add_days(self, -<NUM_LIT:1>)<EOL><DEDENT>return self<EOL>", "docstring": "adjusts to Business Day Convention \"Preceding\" (4.12(a) (iii) 2006 ISDA Definitions).", "id": "f15441:c2:m44"}
{"signature": "def get_30_360(self, end):", "body": "start_day = min(self.day, <NUM_LIT:30>)<EOL>end_day = <NUM_LIT:30> if (start_day == <NUM_LIT:30> and end.day == <NUM_LIT>) else end.day<EOL>return (<NUM_LIT> * (end.year - self.year) + <NUM_LIT:30> * (end.month - self.month) + (end_day - start_day)) / <NUM_LIT><EOL>", "docstring": "implements 30/360 Day Count Convention (4.16(f) 2006 ISDA Definitions)", "id": "f15441:c2:m37"}
{"signature": "def __init__(self, start, end, step, roll=None):", "body": "if not roll:<EOL><INDENT>roll = end<EOL><DEDENT>if not isinstance(start, BusinessDate):<EOL><INDENT>if isinstance(start, BusinessPeriod):<EOL><INDENT>start = start.to_businessdate()<EOL><DEDENT>else:<EOL><INDENT>start = BusinessDate(start)<EOL><DEDENT><DEDENT>if not isinstance(end, BusinessDate):<EOL><INDENT>if isinstance(end, BusinessPeriod):<EOL><INDENT>end = end.to_businessdate()<EOL><DEDENT>else:<EOL><INDENT>end = BusinessDate(end)<EOL><DEDENT><DEDENT>super(BusinessSchedule, self).__init__(start, end, step, roll)<EOL>if start not in self:<EOL><INDENT>self.insert(<NUM_LIT:0>, start)<EOL><DEDENT>if end not in self:<EOL><INDENT>self.append(end)<EOL><DEDENT>", "docstring": "class to build date schedules incl. start and end date\n\n:param BusinessDate start: start date of schedule\n:param BusinessDate end: end date of schedule\n:param BusinessPeriod step: period distance of two dates\n:param BusinessDate roll: origin of schedule\n\nconvenient class to build date schedules\na schedule includes always start and end date\nand rolls on roll, i.e. builds a sequence by\nadding and/or substracting step to/from roll.\nstart and end slice the relevant dates.", "id": "f15441:c5:m0"}
{"signature": "def __cmp__(self, other):", "body": "assert type(self) == type(other), \"<STR_LIT>\" % str((type(self), type(other)))<EOL>s = (self.years * <NUM_LIT:12> + self.months) * <NUM_LIT> + self.days<EOL>o = (other.years * <NUM_LIT:12> + other.months) * <NUM_LIT> + other.days<EOL>return s - o<EOL>", "docstring": "compare BusinessPeriods, comparison by (years*12+months)*31+days\n\n        :param BusinessPeriod other:\n        :return: int", "id": "f15441:c3:m8"}
{"signature": "@staticmethod<EOL><INDENT>def from_string(date_str):<DEDENT>", "body": "if date_str.count('<STR_LIT:->'):<EOL><INDENT>str_format = '<STR_LIT>'<EOL><DEDENT>elif date_str.count('<STR_LIT:.>'):<EOL><INDENT>str_format = '<STR_LIT>'<EOL><DEDENT>elif date_str.count('<STR_LIT:/>'):<EOL><INDENT>str_format = '<STR_LIT>'<EOL><DEDENT>elif len(date_str) == <NUM_LIT:8>:<EOL><INDENT>str_format = '<STR_LIT>'<EOL><DEDENT>elif len(date_str) == <NUM_LIT:4>:<EOL><INDENT>year = ord(date_str[<NUM_LIT:0>]) * <NUM_LIT> + ord(date_str[<NUM_LIT:1>])<EOL>month = ord(date_str[<NUM_LIT:2>])<EOL>day = ord(date_str[<NUM_LIT:3>])<EOL>return BusinessDate.from_ymd(year, month, day)<EOL><DEDENT>else:<EOL><INDENT>msg = \"<STR_LIT>\" + date_str + \"<STR_LIT>\"<EOL>raise ValueError(msg)<EOL><DEDENT>d = datetime.strptime(date_str, str_format)<EOL>return BusinessDate.from_ymd(d.year, d.month, d.day)<EOL>", "docstring": "construction from the following string patterns\n'%Y-%m-%d'\n'%d.%m.%Y'\n'%m/%d/%Y'\n'%Y%m%d'\n\n:param str date_str:\n:return BusinessDate:", "id": "f15441:c2:m12"}
{"signature": "@staticmethod<EOL><INDENT>def is_leap_year(year):<DEDENT>", "body": "return is_leap_year(year)<EOL>", "docstring": "returns True for leap year and False otherwise\n\n:param int year: calendar year\n:return bool:", "id": "f15441:c2:m22"}
{"signature": "def adjust_follow(self, holidays_obj=None):", "body": "while not BusinessDate.is_business_day(self, holidays_obj):<EOL><INDENT>self = BusinessDate.add_days(self, <NUM_LIT:1>)<EOL><DEDENT>return self<EOL>", "docstring": "adjusts to Business Day Convention \"Following\" (4.12(a) (i) 2006 ISDA Definitions).", "id": "f15441:c2:m45"}
{"signature": "def __sub__(self, other):", "body": "if isinstance(other, BusinessDate):<EOL><INDENT>y, m, d = self.diff(other)<EOL>return BusinessPeriod(years=y, months=m, days=d)<EOL><DEDENT>elif isinstance(other, BusinessPeriod):<EOL><INDENT>return self.add_period(-<NUM_LIT:1> * other)<EOL><DEDENT>elif BusinessDate.is_businessdate(other):<EOL><INDENT>return self - BusinessDate(other)<EOL><DEDENT>elif BusinessPeriod.is_businessperiod(other):<EOL><INDENT>return self - BusinessPeriod(other)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>' % type(other))<EOL><DEDENT>", "docstring": "subtraction of BusinessDate.\n\n:param object other: can be other BusinessDate, BusinessPeriod or any thing that might be casted to those.", "id": "f15441:c2:m4"}
{"signature": "def get_act_365(self, end):", "body": "return BusinessDate.diff_in_days(self, end) / <NUM_LIT><EOL>", "docstring": "implements Act/365 day count convention (4.16(d) 2006 ISDA Definitions)", "id": "f15441:c2:m39"}
{"signature": "def __init__(self, iterable=None):", "body": "if iterable:<EOL><INDENT>super(BusinessHolidays, self).__init__(BusinessDate(iterable))<EOL><DEDENT>else:<EOL><INDENT>super(BusinessHolidays, self).__init__()<EOL><DEDENT>", "docstring": ":param iterable iterable: sequence of holiday dates", "id": "f15441:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def days_in_month(year, month):<DEDENT>", "body": "return days_in_month(year, month)<EOL>", "docstring": "returns number of days for the given year and month\n\n:param int year: calendar year\n:param int month: calendar month\n:return int:", "id": "f15441:c2:m24"}
{"signature": "@staticmethod<EOL><INDENT>def days_in_year(year):<DEDENT>", "body": "return days_in_year(year)<EOL>", "docstring": "returns number of days in the given calendar year\n\n:param int year: calendar year\n:return int:", "id": "f15441:c2:m23"}
{"signature": "def get_30E_360(self, end):", "body": "y1, m1, d1 = self.to_ymd()<EOL>y2, m2, d2 = end.add_days(<NUM_LIT:0>).to_ymd()<EOL>d1 = min(d1, <NUM_LIT:30>)<EOL>d2 = min(d2, <NUM_LIT:30>)<EOL>return (<NUM_LIT> * (y2 - y1) + <NUM_LIT:30> * (m2 - m1) + (d2 - d1)) / <NUM_LIT><EOL>", "docstring": "implements the 30E/360 Day Count Convention (4.16(g) 2006 ISDA Definitons)", "id": "f15441:c2:m42"}
{"signature": "def get_act_360(self, end):", "body": "return BusinessDate.diff_in_days(self, end) / <NUM_LIT><EOL>", "docstring": "implements Act/360 day count convention (4.16(e) 2006 ISDA Definitions)", "id": "f15441:c2:m40"}
{"signature": "def adjust_mod_previous(self, holidays_obj=None):", "body": "month = self.month<EOL>new = BusinessDate.adjust_previous(self, holidays_obj)<EOL>if month != new.month:<EOL><INDENT>new = BusinessDate.adjust_follow(self, holidays_obj)<EOL><DEDENT>self = new<EOL>return self<EOL>", "docstring": "ajusts to Business Day Convention \"Modified Preceding\" (not in 2006 ISDA Definitons).", "id": "f15441:c2:m47"}
{"signature": "def __new__(cls, date_value=None):", "body": "if date_value is None:<EOL><INDENT>new_date = BusinessDate(BASE_DATE)<EOL><DEDENT>elif isinstance(date_value, BaseDateFloat):<EOL><INDENT>return super(BusinessDate, cls).__new__(cls, float(date_value))<EOL><DEDENT>elif isinstance(date_value, (BaseDateDatetimeDate, BaseDateTuple)):<EOL><INDENT>return super(BusinessDate, cls).__new__(cls, date_value.year, date_value.month, date_value.day)<EOL><DEDENT>elif isinstance(date_value, (int, float)):<EOL><INDENT>if date_value >= <NUM_LIT>:<EOL><INDENT>new_date = BusinessDate.from_string(str(date_value))<EOL><DEDENT>elif <NUM_LIT:1> < date_value < <NUM_LIT:200> * <NUM_LIT>:<EOL><INDENT>new_date = BusinessDate.from_excel(date_value)<EOL><DEDENT>else:<EOL><INDENT>new_date = BusinessDate.from_ordinal(date_value)<EOL><DEDENT><DEDENT>elif isinstance(date_value, (str, unicode)):<EOL><INDENT>new_date = BusinessDate.from_string(str(date_value))<EOL><DEDENT>elif isinstance(date_value, (date, datetime)):<EOL><INDENT>new_date = BusinessDate.from_date(date_value)<EOL><DEDENT>elif isinstance(date_value, list):<EOL><INDENT>new_date = [BusinessDate(d) for d in date_value]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % str(type(date_value)))<EOL><DEDENT>return new_date<EOL>", "docstring": "r\"\"\"\n        fundamental date class\n\n        :param date_value: input value to create BusinessDate instance\n        :type date_value: int, float, string or datetime.date\n        :return: BusinessDate\n\n        creates new BusinessDate either from `int`, `float`, `string`, `datetime.date`\n        therefore the following will create the same\n\n        .. code-block:: python\n\n            BusinessDate(datetime.date(2015, 12, 31))\n            BusinessDate(20151231)\n            BusinessDate(2015-12-31)\n            BusinessDate(31.12.2015)\n            BusinessDate(12/31/2015)\n            BusinessDate(42369)\n            BusinessDate(42369.0)\n            BusinessDate(735963)\n            BusinessDate(735963.0)\n            BusinessDate()\n\n        **caution:** recommended is the use of class methods BusinessDate.from_string, from_date etc.", "id": "f15441:c2:m0"}
{"signature": "def __add__(self, other):", "body": "if isinstance(other, BusinessPeriod):<EOL><INDENT>return self.add_period(other)<EOL><DEDENT>elif BusinessPeriod.is_businessperiod(other):<EOL><INDENT>return self + BusinessPeriod(other)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>' % type(other))<EOL><DEDENT>", "docstring": "addition of BusinessDate.\n\n:param BusinessDate or BusinessPeriod or str other: can be BusinessPeriod or\nany thing that might be casted to it.", "id": "f15441:c2:m3"}
{"signature": "def __field(self):", "body": "try:<EOL><INDENT>tok = self.__consume()<EOL><DEDENT>except DXParserNoTokens:<EOL><INDENT>return<EOL><DEDENT>if tok.equals('<STR_LIT>'):<EOL><INDENT>component = self.__consume().value()<EOL>if not self.__consume().equals('<STR_LIT:value>'):<EOL><INDENT>raise DXParseError('<STR_LIT>')<EOL><DEDENT>classid = self.__consume().value()<EOL>try:<EOL><INDENT>self.currentobject['<STR_LIT>'][component] = classid<EOL><DEDENT>except KeyError:<EOL><INDENT>self.currentobject['<STR_LIT>'] = {component:classid}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise DXParseError('<STR_LIT>'+str(tok)+'<STR_LIT>')<EOL><DEDENT>", "docstring": "Level-2 parser for a DX field object.\n\n        pattern:\n        object \"site map 1\" class field\n        component \"positions\" value 1\n        component \"connections\" value 2\n        component \"data\" value 3", "id": "f15445:c9:m8"}
{"signature": "def edges(self):", "body": "return [self.delta[d,d] * numpy.arange(self.shape[d]+<NUM_LIT:1>) + self.origin[d]- <NUM_LIT:0.5>*self.delta[d,d]     for d in range(self.rank)]<EOL>", "docstring": "Edges of the grid cells, origin at centre of 0,0,..,0 grid cell.\n\n        Only works for regular, orthonormal grids.", "id": "f15445:c1:m2"}
{"signature": "def use_parser(self,parsername):", "body": "self.__parser = self.parsers[parsername]<EOL>self.__parser()<EOL>", "docstring": "Set parsername as the current parser and apply it.", "id": "f15445:c9:m9"}
{"signature": "def histogramdd(self):", "body": "shape = self.components['<STR_LIT>'].shape<EOL>edges = self.components['<STR_LIT>'].edges()<EOL>hist = self.components['<STR_LIT:data>'].array.reshape(shape)<EOL>return (hist,edges)<EOL>", "docstring": "Return array data as (edges,grid), i.e. a numpy nD histogram.", "id": "f15445:c4:m6"}
{"signature": "def ndformat(self,s):", "body": "return s * len(self.shape)<EOL>", "docstring": "Returns a string with as many repetitions of s as self\n        has dimensions (derived from shape)", "id": "f15445:c0:m3"}
{"signature": "def __object(self):", "body": "self.__consume()                    <EOL>classid = self.__consume().text<EOL>word = self.__consume().text<EOL>if word != \"<STR_LIT:class>\":<EOL><INDENT>raise DXParseError(\"<STR_LIT>\" % word)<EOL><DEDENT>if self.currentobject:<EOL><INDENT>self.objects.append(self.currentobject)<EOL><DEDENT>classtype = self.__consume().text<EOL>self.currentobject = DXInitObject(classtype=classtype,classid=classid)<EOL>self.use_parser(classtype)<EOL>", "docstring": "Level-1 parser for objects.\n\n        pattern: 'object' id 'class' type ...\n\n        id ::=   integer|string|'\"'white space string'\"'\n        type ::= string", "id": "f15445:c9:m4"}
{"signature": "def sorted_components(self):", "body": "for component, object insorted(self.components.items(),<EOL>key=lambda comp_obj: comp_obj[<NUM_LIT:1>].id):<EOL><INDENT>yield component, object<EOL><DEDENT>", "docstring": "iterator that returns (component,object) in id order", "id": "f15445:c4:m5"}
{"signature": "def initialize(self):", "body": "return self.DXclasses[self.type](self.id,**self.args)<EOL>", "docstring": "Initialize the corresponding DXclass from the data.\n\n        class = DXInitObject.initialize()", "id": "f15445:c8:m1"}
{"signature": "def __general(self):", "body": "while <NUM_LIT:1>:                            <EOL><INDENT>try:<EOL><INDENT>tok = self.__peek()         <EOL><DEDENT>except DXParserNoTokens:<EOL><INDENT>if self.currentobject and self.currentobject not in self.objects:<EOL><INDENT>self.objects.append(self.currentobject)<EOL><DEDENT>return                      <EOL><DEDENT>if tok.iscode('<STR_LIT>'):<EOL><INDENT>self.set_parser('<STR_LIT>')  <EOL><DEDENT>elif tok.iscode('<STR_LIT>') and tok.equals('<STR_LIT:object>'):<EOL><INDENT>self.set_parser('<STR_LIT:object>')   <EOL><DEDENT>elif self.__parser is self.__general:<EOL><INDENT>raise DXParseError('<STR_LIT>'+str(tok))<EOL><DEDENT>self.apply_parser()<EOL><DEDENT>", "docstring": "Level-0 parser and main loop.\n\n        Look for a token that matches a level-1 parser and hand over control.", "id": "f15445:c9:m2"}
{"signature": "def write(self, file):", "body": "if self.type not in self.dx_types:<EOL><INDENT>raise ValueError((\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\").format(<EOL>self.type, list(self.dx_types.keys())))<EOL><DEDENT>typelabel = (self.typequote+self.type+self.typequote)<EOL>DXclass.write(self,file,<EOL>'<STR_LIT>'.format(<EOL>typelabel, self.array.size))<EOL>fmt_string = \"<STR_LIT>\"<EOL>if (self.array.dtype.kind == '<STR_LIT:f>' or self.array.dtype.kind == '<STR_LIT:c>'):<EOL><INDENT>precision = numpy.finfo(self.array.dtype).precision<EOL>fmt_string = \"<STR_LIT>\"+\"<STR_LIT>\".format(precision)+\"<STR_LIT>\"<EOL><DEDENT>values_per_line = <NUM_LIT:3><EOL>values = self.array.flat<EOL>while <NUM_LIT:1>:<EOL><INDENT>try:<EOL><INDENT>for i in range(values_per_line):<EOL><INDENT>file.write(fmt_string.format(next(values)) + \"<STR_LIT:\\t>\")<EOL><DEDENT>file.write('<STR_LIT:\\n>')<EOL><DEDENT>except StopIteration:<EOL><INDENT>file.write('<STR_LIT:\\n>')<EOL>break<EOL><DEDENT><DEDENT>file.write('<STR_LIT>')<EOL>", "docstring": "Write the *class array* section.\n\n        Parameters\n        ----------\n        file : file\n\n        Raises\n        ------\n        ValueError\n             If the `dxtype` is not a valid type, :exc:`ValueError` is\n             raised.", "id": "f15445:c3:m1"}
{"signature": "def __consume(self,):", "body": "self.__refill_tokenbuffer()<EOL>try:<EOL><INDENT>return self.tokens.pop(<NUM_LIT:0>)  <EOL><DEDENT>except IndexError:<EOL><INDENT>raise DXParserNoTokens<EOL><DEDENT>", "docstring": "Get the next token from the buffer and remove it/them.\n\n        try:\n          while 1:\n             token = __consume()\n        except DXParserNoTokens:\n          pass", "id": "f15445:c9:m15"}
{"signature": "def __init__(self, classid, array=None, type=None, typequote='<STR_LIT:\">',<EOL>**kwargs):", "body": "if array is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.id = classid<EOL>self.name = '<STR_LIT>'<EOL>self.component = '<STR_LIT:data>'<EOL>if type is None:<EOL><INDENT>self.array = numpy.asarray(array)<EOL>try:<EOL><INDENT>self.type = self.np_types[self.array.dtype.name]<EOL><DEDENT>except KeyError:<EOL><INDENT>warnings.warn((\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\").format(<EOL>self.array.dtype.name))<EOL>self.type = self.array.dtype.name  <EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>self.array = numpy.asarray(array, dtype=self.dx_types[type])<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ValueError((\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(type,<EOL>list(self.dx_types.values()))))<EOL><DEDENT>self.type = type<EOL><DEDENT>self.typequote = typequote<EOL>", "docstring": "Parameters\n----------\nclassid : int\narray : array_like\ntype : str (optional)\n     Set the DX type in the output file and cast `array` to\n     the closest numpy dtype.  `type` must be one of the\n     allowed types in DX files as defined under `Array\n     Objects`_.  The default ``None`` tries to set the type\n     from the :class:`numpy.dtype` of `array`.\n\n     .. versionadded:: 0.4.0\n\nRaises\n------\nValueError\n     if `array` is not provided; or if `type` is not of the correct\n     DX type", "id": "f15445:c3:m0"}
{"signature": "def set_parser(self,parsername):", "body": "self.__parser = self.parsers[parsername]<EOL>", "docstring": "Set parsername as the current parser.", "id": "f15445:c9:m10"}
{"signature": "def __comment(self):", "body": "tok = self.__consume()<EOL>self.DXfield.add_comment(tok.value())<EOL>self.set_parser('<STR_LIT>')<EOL>", "docstring": "Level-1 parser for comments.\n\n        pattern: #.*\n        Append comment (with initial '# ' stripped) to all comments.", "id": "f15445:c9:m3"}
{"signature": "def write(self, filename):", "body": "<EOL>maxcol = <NUM_LIT><EOL>with open(filename,'<STR_LIT:w>') as outfile:<EOL><INDENT>for line in self.comments:<EOL><INDENT>comment = '<STR_LIT>'+str(line)<EOL>outfile.write(comment[:maxcol]+'<STR_LIT:\\n>')<EOL><DEDENT>for component,object in self.sorted_components():<EOL><INDENT>object.write(outfile)<EOL><DEDENT>DXclass.write(self,outfile,quote=True)<EOL>for component,object in self.sorted_components():<EOL><INDENT>outfile.write('<STR_LIT>' % (component,str(object.id)))<EOL><DEDENT><DEDENT>", "docstring": "Write the complete dx object to the file.\n\n        This is the simple OpenDX format which includes the data into\n        the header via the 'object array ... data follows' statement.\n\n        Only simple regular arrays are supported.\n\n        The format should be compatible with VMD's dx reader plugin.", "id": "f15445:c4:m1"}
{"signature": "def add_comment(self,comment):", "body": "self.comments.append(comment)<EOL>", "docstring": "add comments", "id": "f15445:c4:m4"}
{"signature": "def __init__(self,classid='<STR_LIT:0>',components=None,comments=None):", "body": "if components is None:<EOL><INDENT>components = dict(positions=None,connections=None,data=None)<EOL><DEDENT>if comments is None:<EOL><INDENT>comments = ['<STR_LIT>',<EOL>'<STR_LIT>']<EOL><DEDENT>elif type(comments) is not list:<EOL><INDENT>comments = [str(comments)]<EOL><DEDENT>self.id = classid       <EOL>self.name = '<STR_LIT>'<EOL>self.component = None   <EOL>self.components = components<EOL>self.comments= comments<EOL>", "docstring": "OpenDX object, which is build from a list of components.\n\n        Parameters\n        ----------\n\n        id : str\n               arbitrary string\n        components : dict\n               dictionary of DXclass instances (no sanity check on the\n               individual ids!) which correspond to\n\n               * positions\n               * connections\n               * data\n\n        comments : list\n               list of strings; each string becomes a comment line\n               prefixed with '#'. Avoid newlines.\n\n\n        A field must have at least the components 'positions',\n        'connections', and 'data'. Those components are associated\n        with objects belonging to the field. When writing a dx file\n        from the field, only the required objects are dumped to the file.\n\n        (For a more general class that can use field:\n        Because there could be more objects than components, we keep a\n        separate object list. When dumping the dx file, first all\n        objects are written and then the field object describes its\n        components. Objects are referenced by their unique id.)\n\n        .. Note:: uniqueness of the *id* is not checked.\n\n\n        Example\n        -------\n        Create a new dx object::\n\n           dx = OpenDX.field('density',[gridpoints,gridconnections,array])", "id": "f15445:c4:m0"}
{"signature": "def render_pep440_pre(pieces):", "body": "if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered = pieces[\"<STR_LIT>\"]<EOL>if pieces[\"<STR_LIT>\"]:<EOL><INDENT>rendered += \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rendered = \"<STR_LIT>\" % pieces[\"<STR_LIT>\"]<EOL><DEDENT>return rendered<EOL>", "docstring": "TAG[.post.devDISTANCE] -- No -dirty.\n\n    Exceptions:\n    1: no tags. 0.post.devDISTANCE", "id": "f15451:m10"}
{"signature": "def render(pieces, style):", "body": "if pieces[\"<STR_LIT:error>\"]:<EOL><INDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": pieces.get(\"<STR_LIT>\"),<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": pieces[\"<STR_LIT:error>\"],<EOL>\"<STR_LIT:date>\": None}<EOL><DEDENT>if not style or style == \"<STR_LIT:default>\":<EOL><INDENT>style = \"<STR_LIT>\"  <EOL><DEDENT>if style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_pre(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_post(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_pep440_old(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe(pieces)<EOL><DEDENT>elif style == \"<STR_LIT>\":<EOL><INDENT>rendered = render_git_describe_long(pieces)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % style)<EOL><DEDENT>return {\"<STR_LIT:version>\": rendered, \"<STR_LIT>\": pieces[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\": pieces[\"<STR_LIT>\"], \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": pieces.get(\"<STR_LIT:date>\")}<EOL>", "docstring": "Render the given version pieces into the requested style.", "id": "f15451:m15"}
{"signature": "def plus_or_dot(pieces):", "body": "if \"<STR_LIT:+>\" in pieces.get(\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>return \"<STR_LIT:.>\"<EOL><DEDENT>return \"<STR_LIT:+>\"<EOL>", "docstring": "Return a + if we don't already have one, else return a .", "id": "f15451:m8"}
{"signature": "def get_keywords():", "body": "<EOL>git_refnames = \"<STR_LIT>\"<EOL>git_full = \"<STR_LIT>\"<EOL>git_date = \"<STR_LIT>\"<EOL>keywords = {\"<STR_LIT>\": git_refnames, \"<STR_LIT>\": git_full, \"<STR_LIT:date>\": git_date}<EOL>return keywords<EOL>", "docstring": "Get the keywords needed to look up the version information.", "id": "f15451:m0"}
{"signature": "def get_config():", "body": "<EOL>cfg = VersioneerConfig()<EOL>cfg.VCS = \"<STR_LIT>\"<EOL>cfg.style = \"<STR_LIT>\"<EOL>cfg.tag_prefix = \"<STR_LIT>\"<EOL>cfg.parentdir_prefix = \"<STR_LIT>\"<EOL>cfg.versionfile_source = \"<STR_LIT>\"<EOL>cfg.verbose = False<EOL>return cfg<EOL>", "docstring": "Create, populate and return the VersioneerConfig() object.", "id": "f15451:m1"}
{"signature": "def get_versions():", "body": "<EOL>cfg = get_config()<EOL>verbose = cfg.verbose<EOL>try:<EOL><INDENT>return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,<EOL>verbose)<EOL><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>root = os.path.realpath(__file__)<EOL>for i in cfg.versionfile_source.split('<STR_LIT:/>'):<EOL><INDENT>root = os.path.dirname(root)<EOL><DEDENT><DEDENT>except NameError:<EOL><INDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\", \"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": \"<STR_LIT>\",<EOL>\"<STR_LIT:date>\": None}<EOL><DEDENT>try:<EOL><INDENT>pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)<EOL>return render(pieces, cfg.style)<EOL><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>if cfg.parentdir_prefix:<EOL><INDENT>return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)<EOL><DEDENT><DEDENT>except NotThisMethod:<EOL><INDENT>pass<EOL><DEDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\", \"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT:error>\": \"<STR_LIT>\", \"<STR_LIT:date>\": None}<EOL>", "docstring": "Get version information or return default if unable to do so.", "id": "f15451:m16"}
{"signature": "@register_vcs_handler(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>def git_versions_from_keywords(keywords, tag_prefix, verbose):", "body": "if not keywords:<EOL><INDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>date = keywords.get(\"<STR_LIT:date>\")<EOL>if date is not None:<EOL><INDENT>date = date.strip().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:T>\", <NUM_LIT:1>).replace(\"<STR_LIT:U+0020>\", \"<STR_LIT>\", <NUM_LIT:1>)<EOL><DEDENT>refnames = keywords[\"<STR_LIT>\"].strip()<EOL>if refnames.startswith(\"<STR_LIT>\"):<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>raise NotThisMethod(\"<STR_LIT>\")<EOL><DEDENT>refs = set([r.strip() for r in refnames.strip(\"<STR_LIT>\").split(\"<STR_LIT:U+002C>\")])<EOL>TAG = \"<STR_LIT>\"<EOL>tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])<EOL>if not tags:<EOL><INDENT>tags = set([r for r in refs if re.search(r'<STR_LIT>', r)])<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(refs - tags))<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % \"<STR_LIT:U+002C>\".join(sorted(tags)))<EOL><DEDENT>for ref in sorted(tags):<EOL><INDENT>if ref.startswith(tag_prefix):<EOL><INDENT>r = ref[len(tag_prefix):]<EOL>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % r)<EOL><DEDENT>return {\"<STR_LIT:version>\": r,<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": None,<EOL>\"<STR_LIT:date>\": date}<EOL><DEDENT><DEDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return {\"<STR_LIT:version>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": keywords[\"<STR_LIT>\"].strip(),<EOL>\"<STR_LIT>\": False, \"<STR_LIT:error>\": \"<STR_LIT>\", \"<STR_LIT:date>\": None}<EOL>", "docstring": "Get version information from git keywords.", "id": "f15451:m6"}
{"signature": "@property<EOL><INDENT>def edges(self):<DEDENT>", "body": "<EOL>return [self.delta[d, d] * np.arange(self.shape[d] + <NUM_LIT:1>) +<EOL>self.origin[d] - <NUM_LIT:0.5> * self.delta[d, d]<EOL>for d in range(self.rank)]<EOL>", "docstring": "Edges of the grid cells, origin at centre of 0,0,..,0 grid cell.\n\n        Only works for regular, orthonormal grids.", "id": "f15452:c0:m3"}
{"signature": "def read(self, filename):", "body": "if filename is not None:<EOL><INDENT>self.filename = filename<EOL><DEDENT>with open(self.filename, '<STR_LIT:rb>') as ccp4:<EOL><INDENT>h = self.header = self._read_header(ccp4)<EOL>nentries = h['<STR_LIT>'] * h['<STR_LIT>'] * h['<STR_LIT>']<EOL>datafmt = h['<STR_LIT>'] + str(nentries) + self._data_bintype<EOL>a = np.array(struct.unpack(datafmt, ccp4.read(struct.calcsize(datafmt))))<EOL><DEDENT>self.header['<STR_LIT:filename>'] = self.filename<EOL>order = '<STR_LIT:C>' if h['<STR_LIT>'] == '<STR_LIT:z>' else '<STR_LIT:F>'<EOL>self.array = a.reshape(h['<STR_LIT>'], h['<STR_LIT>'], h['<STR_LIT>'], order=order)<EOL>self.delta = self._delta()<EOL>self.origin = np.zeros(<NUM_LIT:3>)<EOL>self.rank = <NUM_LIT:3><EOL>", "docstring": "Populate the instance from the ccp4 file *filename*.", "id": "f15452:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def _detect_byteorder(ccp4file):<DEDENT>", "body": "bsaflag = None<EOL>ccp4file.seek(<NUM_LIT> * <NUM_LIT:4>)<EOL>mapbin = ccp4file.read(<NUM_LIT:4>)<EOL>for flag in '<STR_LIT>':<EOL><INDENT>mapstr = struct.unpack(flag + '<STR_LIT>', mapbin)[<NUM_LIT:0>].decode('<STR_LIT:utf-8>')<EOL>if mapstr.upper() == '<STR_LIT>':<EOL><INDENT>bsaflag = flag<EOL>break  <EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>ccp4file.seek(<NUM_LIT:0>)<EOL>return bsaflag<EOL>", "docstring": "Detect the byteorder of stream `ccp4file` and return format character.\n\n        Try all endinaness and alignment options until we find\n        something that looks sensible (\"MAPS \" in the first 4 bytes).\n\n        (The ``machst`` field could be used to obtain endianness, but\n        it does not specify alignment.)\n\n        .. SeeAlso::\n\n          :mod:`struct`", "id": "f15452:c0:m5"}
{"signature": "def read(self, filename):", "body": "from struct import calcsize, unpack<EOL>if not filename is None:<EOL><INDENT>self.filename = filename<EOL><DEDENT>with open(self.filename, '<STR_LIT:rb>') as plt:<EOL><INDENT>h = self.header = self._read_header(plt)<EOL>nentries = h['<STR_LIT>'] * h['<STR_LIT>'] * h['<STR_LIT>']<EOL>datafmt = h['<STR_LIT>']+str(nentries)+self._data_bintype<EOL>a = numpy.array(unpack(datafmt, plt.read(calcsize(datafmt))))<EOL><DEDENT>self.header['<STR_LIT:filename>'] = self.filename<EOL>self.array = a.reshape(h['<STR_LIT>'], h['<STR_LIT>'], h['<STR_LIT>']).transpose()  <EOL>self.delta = self._delta()<EOL>self.origin = numpy.array([h['<STR_LIT>'], h['<STR_LIT>'], h['<STR_LIT>']]) + <NUM_LIT:0.5>*numpy.diagonal(self.delta)<EOL>self.rank = h['<STR_LIT>']<EOL>", "docstring": "Populate the instance from the plt file *filename*.", "id": "f15453:c1:m1"}
{"signature": "def histogramdd(self):", "body": "return (self.array, self.edges)<EOL>", "docstring": "Return array data as (edges,grid), i.e. a numpy nD histogram.", "id": "f15453:c1:m6"}
{"signature": "def _read_header(self, pltfile):", "body": "nheader = struct.calcsize(self._headerfmt)<EOL>names = [r.key for r in self._header_struct]<EOL>binheader = pltfile.read(nheader)<EOL>def decode_header(bsaflag='<STR_LIT:@>'):<EOL><INDENT>h = dict(zip(names, struct.unpack(bsaflag+self._headerfmt, binheader)))<EOL>h['<STR_LIT>'] = bsaflag<EOL>return h<EOL><DEDENT>for flag in '<STR_LIT>':<EOL><INDENT>header = decode_header(flag)<EOL>if header['<STR_LIT>'] == <NUM_LIT:3>:<EOL><INDENT>break   <EOL><DEDENT>header = None<EOL><DEDENT>if header is None:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>for rec in self._header_struct:<EOL><INDENT>if not rec.is_legal_dict(header):<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % (rec.key, header[rec.key]))<EOL><DEDENT><DEDENT>return header<EOL>", "docstring": "Read header bytes, try all possibilities for byte order/size/alignment.", "id": "f15453:c1:m5"}
{"signature": "def ndmeshgrid(*arrs):", "body": "<EOL>arrs = tuple(arrs)<EOL>lens = list(map(len, arrs))<EOL>dim = len(arrs)<EOL>sz = <NUM_LIT:1><EOL>for s in lens:<EOL><INDENT>sz *= s<EOL><DEDENT>ans = []<EOL>for i, arr in enumerate(arrs):<EOL><INDENT>slc = [<NUM_LIT:1>] * dim<EOL>slc[i] = lens[i]<EOL>arr2 = numpy.asanyarray(arr).reshape(slc)<EOL>for j, sz in enumerate(lens):<EOL><INDENT>if j != i:<EOL><INDENT>arr2 = arr2.repeat(sz, axis=j)<EOL><DEDENT><DEDENT>ans.append(arr2)<EOL><DEDENT>return tuple(ans)<EOL>", "docstring": "Return a mesh grid for N dimensions.\n\n    The input are N arrays, each of which contains the values along one axis of\n    the coordinate system. The arrays do not have to have the same number of\n    entries. The function returns arrays that can be fed into numpy functions\n    so that they produce values for *all* points spanned by the axes *arrs*.\n\n    Original from\n    http://stackoverflow.com/questions/1827489/numpy-meshgrid-in-3d and fixed.\n\n    .. SeeAlso: :func:`numpy.meshgrid` for the 2D case.", "id": "f15455:m1"}
{"signature": "def _load_dx(self, filename):", "body": "dx = OpenDX.field(<NUM_LIT:0>)<EOL>dx.read(filename)<EOL>grid, edges = dx.histogramdd()<EOL>self.__init__(grid=grid, edges=edges, metadata=self.metadata)<EOL>", "docstring": "Initializes Grid from a OpenDX file.", "id": "f15455:c0:m18"}
{"signature": "def save(self, filename):", "body": "self.export(filename, file_format=\"<STR_LIT>\")<EOL>", "docstring": "Save a grid object to <filename>.pickle\n\n        Internally, this calls\n        ``Grid.export(filename, format=\"python\")``. A grid can be\n        regenerated from the saved data with ::\n\n           g = Grid(filename=\"grid.pickle\")\n\n        .. note::\n           The pickle format depends on the Python version and\n           therefore it is not guaranteed that a grid saved with, say,\n           Python 2.7 can also be read with Python 3.5. The OpenDX format\n           is a better alternative for portability.", "id": "f15455:c0:m23"}
{"signature": "def resample(self, edges):", "body": "try:<EOL><INDENT>edges = edges.edges  <EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>midpoints = self._midpoints(edges)<EOL>coordinates = ndmeshgrid(*midpoints)<EOL>newgrid = self.interpolated(*coordinates)<EOL>return self.__class__(newgrid, edges)<EOL>", "docstring": "Resample data to a new grid with edges *edges*.\n\n        This method creates a new grid with the data from the current\n        grid resampled to a regular grid specified by *edges*.  The\n        order of the interpolation is set by\n        :attr:`Grid.interpolation_spline_order`: change the value\n        *before* calling :meth:`resample`.\n\n        Parameters\n        ----------\n        edges : tuple of arrays or Grid\n             edges of the new grid or a :class:`Grid` instance that\n             provides :attr:`Grid.edges`\n\n        Returns\n        -------\n        Grid\n             a new :class:`Grid` with the data interpolated over the\n             new grid cells\n\n\n        Examples\n        --------\n\n        Providing *edges* (a tuple of three arrays, indicating the\n        boundaries of each grid cell)::\n\n          g = grid.resample(edges)\n\n        As a convenience, one can also supply another :class:`Grid` as\n        the argument for this method ::\n\n          g = grid.resample(othergrid)\n\n        and the edges are taken from :attr:`Grid.edges`.", "id": "f15455:c0:m3"}
{"signature": "def _export_python(self, filename, **kwargs):", "body": "data = dict(grid=self.grid, edges=self.edges, metadata=self.metadata)<EOL>with open(filename, '<STR_LIT:wb>') as f:<EOL><INDENT>cPickle.dump(data, f, cPickle.HIGHEST_PROTOCOL)<EOL><DEDENT>", "docstring": "Pickle the Grid object\n\n        The object is dumped as a dictionary with grid and edges: This\n        is sufficient to recreate the grid object with __init__().", "id": "f15455:c0:m21"}
{"signature": "def _load_plt(self, filename):", "body": "g = gOpenMol.Plt()<EOL>g.read(filename)<EOL>grid, edges = g.histogramdd()<EOL>self.__init__(grid=grid, edges=edges, metadata=self.metadata)<EOL>", "docstring": "Initialize Grid from gOpenMol plt file.", "id": "f15455:c0:m19"}
{"signature": "@property<EOL><INDENT>def interpolated(self):<DEDENT>", "body": "if self.__interpolated is None:<EOL><INDENT>self.__interpolated = self._interpolationFunctionFactory()<EOL><DEDENT>return self.__interpolated<EOL>", "docstring": "B-spline function over the data grid(x,y,z).\n\n        The :func:`interpolated` function allows one to obtain data\n        values for any values of the coordinates::\n\n           interpolated([x1,x2,...],[y1,y2,...],[z1,z2,...]) -> F[x1,y1,z1],F[x2,y2,z2],...\n\n        The interpolation order is set in\n        :attr:`Grid.interpolation_spline_order`.\n\n        The interpolated function is computed once and is cached for better\n        performance. Whenever :attr:`~Grid.interpolation_spline_order` is\n        modified, :meth:`Grid.interpolated` is recomputed.\n\n        The value for unknown data is set in :attr:`Grid.interpolation_cval`\n        (TODO: also recompute when ``interpolation_cval`` value is changed.)\n\n        Example\n        -------\n        Example usage for resampling::\n\n           XX, YY, ZZ = numpy.mgrid[40:75:0.5, 96:150:0.5, 20:50:0.5]\n           FF = interpolated(XX, YY, ZZ)\n\n        Note\n        ----\n        Values are interpolated with a spline function. It is possible\n        that the spline will generate values that would not normally\n        appear in the data. For example, a density is non-negative but\n        a cubic spline interpolation can generate negative values,\n        especially at the boundary between 0 and high values.", "id": "f15455:c0:m6"}
{"signature": "def get_clusters(self):", "body": "return self._clusters<EOL>", "docstring": "!\n        @brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.\n\n        @see process()\n        @see get_representatives()", "id": "f15464:c1:m4"}
{"signature": "def process(self):", "body": "if self._ccore is True:<EOL><INDENT>self.__process_by_ccore();<EOL><DEDENT>else:<EOL><INDENT>self.__prcess_by_python();<EOL><DEDENT>", "docstring": "!\n        @brief Performs cluster analysis in line with rules of BSAS algorithm.\n\n        @remark Results of clustering can be obtained using corresponding get methods.\n\n        @see get_clusters()\n        @see get_representatives()", "id": "f15464:c1:m1"}
{"signature": "def _find_nearest_cluster(self, point):", "body": "index_cluster = -<NUM_LIT:1>;<EOL>nearest_distance = float('<STR_LIT>');<EOL>for index in range(len(self._representatives)):<EOL><INDENT>distance = self._metric(point, self._representatives[index]);<EOL>if distance < nearest_distance:<EOL><INDENT>index_cluster = index;<EOL>nearest_distance = distance;<EOL><DEDENT><DEDENT>return index_cluster, nearest_distance<EOL>", "docstring": "!\n        @brief Find nearest cluster to the specified point.\n\n        @param[in] point (list): Point from dataset.\n\n        @return (uint, double) Index of nearest cluster and distance to it.", "id": "f15464:c1:m7"}
{"signature": "def __init__(self, data, amount_centers):", "body": "self.__data = data<EOL>self.__amount = amount_centers<EOL>self.__available_indexes = set(list(range(len(self.__data))))<EOL>if self.__amount <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if self.__amount > len(self.__data):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.__amount)<EOL><DEDENT>", "docstring": "!\n        @brief Creates instance of random center initializer.\n\n        @param[in] data (list): List of points where each point is represented by list of coordinates.\n        @param[in] amount_centers (unit): Amount of centers that should be initialized.", "id": "f15465:c0:m0"}
{"signature": "def __get_initial_center(self, return_index):", "body": "index_center = random.randint(<NUM_LIT:0>, len(self.__data) - <NUM_LIT:1>)<EOL>if return_index:<EOL><INDENT>return index_center<EOL><DEDENT>return self.__data[index_center]<EOL>", "docstring": "!\n        @brief Choose randomly first center.\n\n        @param[in] return_index (bool): If True then return center's index instead of point.\n\n        @return (array_like) First center.<br>\n                (uint) Index of first center.", "id": "f15465:c1:m4"}
{"signature": "def __check_parameters(self):", "body": "if (self.__amount <= <NUM_LIT:0>) or (self.__amount > len(self.__data)):<EOL><INDENT>raise AttributeError(\"<STR_LIT>\" + str(self.__amount) + \"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if self.__candidates != kmeans_plusplus_initializer.FARTHEST_CENTER_CANDIDATE:<EOL><INDENT>if (self.__candidates <= <NUM_LIT:0>) or (self.__candidates > len(self.__data)):<EOL><INDENT>raise AttributeError(\"<STR_LIT>\" + str(self.__candidates) + \"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if len(self.__data) == <NUM_LIT:0>:<EOL><INDENT>raise AttributeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "!\n        @brief Checks input parameters of the algorithm and if something wrong then corresponding exception is thrown.", "id": "f15465:c1:m1"}
{"signature": "def __create_center(self, return_index):", "body": "random_index_point = random.randint(<NUM_LIT:0>, len(self.__data[<NUM_LIT:0>]))<EOL>if random_index_point not in self.__available_indexes:<EOL><INDENT>random_index_point = self.__available_indexes.pop()<EOL><DEDENT>else:<EOL><INDENT>self.__available_indexes.remove(random_index_point)<EOL><DEDENT>if return_index:<EOL><INDENT>return random_index_point<EOL><DEDENT>return self.__data[random_index_point]<EOL>", "docstring": "!\n        @brief Generates and returns random center.\n\n        @param[in] return_index (bool): If True then returns index of point from input data instead of point itself.", "id": "f15465:c0:m2"}
{"signature": "def initialize(self, **kwargs):", "body": "return_index = kwargs.get('<STR_LIT>', False)<EOL>index_point = self.__get_initial_center(True)<EOL>centers = [index_point]<EOL>self.__free_indexes.remove(index_point)<EOL>for _ in range(<NUM_LIT:1>, self.__amount):<EOL><INDENT>index_point = self.__get_next_center(centers, True)<EOL>centers.append(index_point)<EOL>self.__free_indexes.remove(index_point)<EOL><DEDENT>if not return_index:<EOL><INDENT>centers = [self.__data[index] for index in centers]<EOL><DEDENT>return centers<EOL>", "docstring": "!\n        @brief Calculates initial centers using K-Means++ method.\n\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'return_index').\n\n        <b>Keyword Args:</b><br>\n            - return_index (bool): If True then returns indexes of points from input data instead of points itself.\n\n        @return (list) List of initialized initial centers.\n                  If argument 'return_index' is False then returns list of points.\n                  If argument 'return_index' is True then returns list of indexes.", "id": "f15465:c1:m7"}
{"signature": "def get_cells(self):", "body": "return self.__cells<EOL>", "docstring": "!\n        @brief Returns CLIQUE blocks that are formed during clustering process.\n        @details CLIQUE blocks can be used for visualization purposes. Each CLIQUE block contain its logical location\n                  in grid, spatial location in data space and points that belong to block.\n\n        @return (list) List of CLIQUE blocks.", "id": "f15466:c4:m4"}
{"signature": "def get_noise(self):", "body": "return self.__noise<EOL>", "docstring": "!\n        @brief Returns allocated noise.\n\n        @remark Allocated noise is returned only after data processing (method process()). Otherwise empty list is returned.\n\n        @return (list) List of indexes that are marked as a noise.\n\n        @see process()\n        @see get_clusters()", "id": "f15466:c4:m3"}
{"signature": "@property<EOL><INDENT>def visited(self):<DEDENT>", "body": "return self.__visited<EOL>", "docstring": "!\n        @brief Defines whether block is visited during cluster analysis.\n        @details If cluster analysis has not been performed then value will False.\n\n        @return (bool) True if block has been visited during processing, False otherwise.", "id": "f15466:c2:m9"}
{"signature": "def increment(self):", "body": "for index_dimension in range(self.__dimension):<EOL><INDENT>if self.__coordiate[index_dimension] + <NUM_LIT:1> < self.__intervals:<EOL><INDENT>self.__coordiate[index_dimension] += <NUM_LIT:1><EOL>return<EOL><DEDENT>else:<EOL><INDENT>self.__coordiate[index_dimension] = <NUM_LIT:0><EOL><DEDENT><DEDENT>self.__coordiate = None<EOL>", "docstring": "!\n        @brief Forms logical location for next block.", "id": "f15466:c3:m2"}
{"signature": "def __init__(self, logical_location=None, spatial_location=None, points=None, visited=False):", "body": "self.__logical_location = logical_location or []<EOL>self.__spatial_location = spatial_location<EOL>self.__points = points or []<EOL>self.__visited = visited<EOL>", "docstring": "!\n        @brief Initializes CLIQUE block.\n\n        @param[in] logical_location (list): Logical location of the block in CLIQUE grid.\n        @param[in] spatial_location (spatial_block): Spatial location in data space.\n        @param[in] points (array_like): Points that belong to this block (can be obtained by method 'capture_points',\n                    this parameter is used by CLIQUE in case of processing by C++ implementation when clustering\n                    result are passed back to Python code.\n        @param[in] visited (bool): Marks if block is visited during clustering process.", "id": "f15466:c2:m0"}
{"signature": "@property<EOL><INDENT>def points(self):<DEDENT>", "body": "return self.__points<EOL>", "docstring": "!\n        @brief Points that belong to the CLIQUE block.\n        @details Points are represented by indexes that correspond to points in input data space.\n\n        @return (array_like) Points that belong to the CLIQUE block.\n\n        @see capture_points", "id": "f15466:c2:m8"}
{"signature": "@logical_location.setter<EOL><INDENT>def logical_location(self, location):<DEDENT>", "body": "self.__logical_location = location<EOL>", "docstring": "!\n        @brief Assign logical location to CLIQUE block.\n\n        @param[in] location (list): New logical location of the block in CLIQUE grid.", "id": "f15466:c2:m4"}
{"signature": "def process(self):", "body": "if self.__ccore:<EOL><INDENT>self.__process_by_ccore()<EOL><DEDENT>else:<EOL><INDENT>self.__process_by_python()<EOL><DEDENT>return self<EOL>", "docstring": "!\n        @brief Performs clustering process in line with rules of CLIQUE clustering algorithm.\n\n        @return (clique) Returns itself (CLIQUE instance).\n\n        @see get_clusters()\n        @see get_noise()\n        @see get_cells()", "id": "f15466:c4:m1"}
{"signature": "def get_coordinate(self):", "body": "return self.__coordiate<EOL>", "docstring": "!\n        @brief Returns current block coordinate.", "id": "f15466:c3:m1"}
{"signature": "def __str__(self):", "body": "return str(self.__logical_location)<EOL>", "docstring": "!\n        @brief Returns string representation of the block using its logical location in CLIQUE grid.", "id": "f15466:c2:m1"}
{"signature": "def __expand_cluster(self, cell):", "body": "cell.visited = True<EOL>if len(cell.points) <= self.__density_threshold:<EOL><INDENT>if len(cell.points) > <NUM_LIT:0>:<EOL><INDENT>self.__noise.extend(cell.points)<EOL><DEDENT>return<EOL><DEDENT>cluster = cell.points[:]<EOL>neighbors = self.__get_neighbors(cell)<EOL>for neighbor in neighbors:<EOL><INDENT>if len(neighbor.points) > self.__density_threshold:<EOL><INDENT>cluster.extend(neighbor.points)<EOL>neighbors += self.__get_neighbors(neighbor)<EOL><DEDENT>elif len(neighbor.points) > <NUM_LIT:0>:<EOL><INDENT>self.__noise.extend(neighbor.points)<EOL><DEDENT><DEDENT>self.__clusters.append(cluster)<EOL>", "docstring": "!\n        @brief Tries to expand cluster from specified cell.\n        @details During expanding points are marked as noise or append to new cluster.\n\n        @param[in] cell (clique_block): CLIQUE block from that cluster should be expanded.", "id": "f15466:c4:m10"}
{"signature": "def __repr__(self):", "body": "return str(self.__logical_location)<EOL>", "docstring": "!\n        @brief Returns string representation of the block using its logical location in CLIQUE grid.", "id": "f15466:c2:m2"}
{"signature": "def get_location_neighbors(self, edge):", "body": "neighbors = []<EOL>for index_dimension in range(len(self.__logical_location)):<EOL><INDENT>if self.__logical_location[index_dimension] + <NUM_LIT:1> < edge:<EOL><INDENT>position = self.__logical_location[:]<EOL>position[index_dimension] += <NUM_LIT:1><EOL>neighbors.append(position)<EOL><DEDENT>if self.__logical_location[index_dimension] - <NUM_LIT:1> >= <NUM_LIT:0>:<EOL><INDENT>position = self.__logical_location[:]<EOL>position[index_dimension] -= <NUM_LIT:1><EOL>neighbors.append(position)<EOL><DEDENT><DEDENT>return neighbors<EOL>", "docstring": "!\n        @brief Forms list of logical location of each neighbor for this particular CLIQUE block.\n\n        @param[in] edge (uint): Amount of intervals in each dimension that is used for clustering process.\n\n        @return (list) Logical location of each neighbor for this particular CLIQUE block.", "id": "f15466:c2:m12"}
{"signature": "def __validate_arguments(self):", "body": "if len(self.__data) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if self.__amount_intervals <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.__amount_intervals)<EOL><DEDENT>if self.__density_threshold < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.__density_threshold)<EOL><DEDENT>", "docstring": "!\n        @brief Check input arguments of CLIQUE algorithm and if one of them is not correct then appropriate exception\n                is thrown.", "id": "f15466:c4:m8"}
{"signature": "def __create_grid(self):", "body": "data_sizes, min_corner, max_corner = self.__get_data_size_derscription()<EOL>dimension = len(self.__data[<NUM_LIT:0>])<EOL>cell_sizes = [dimension_length / self.__amount_intervals for dimension_length in data_sizes]<EOL>self.__cells = [clique_block() for _ in range(pow(self.__amount_intervals, dimension))]<EOL>iterator = coordinate_iterator(dimension, self.__amount_intervals)<EOL>point_availability = [True] * len(self.__data)<EOL>self.__cell_map = {}<EOL>for index_cell in range(len(self.__cells)):<EOL><INDENT>logical_location = iterator.get_coordinate()<EOL>iterator.increment()<EOL>self.__cells[index_cell].logical_location = logical_location[:]<EOL>cur_max_corner, cur_min_corner = self.__get_spatial_location(logical_location, min_corner, max_corner, cell_sizes)<EOL>self.__cells[index_cell].spatial_location = spatial_block(cur_max_corner, cur_min_corner)<EOL>self.__cells[index_cell].capture_points(self.__data, point_availability)<EOL>self.__cell_map[self.__location_to_key(logical_location)] = self.__cells[index_cell]<EOL><DEDENT>", "docstring": "!\n        @brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process.", "id": "f15466:c4:m12"}
{"signature": "def __str__(self):", "body": "return \"<STR_LIT>\" % (self.__max_corner, self.__min_corner)<EOL>", "docstring": "!\n        @brief Returns string block description.\n\n        @return String representation of the block.", "id": "f15466:c1:m1"}
{"signature": "def __process_by_ccore(self):", "body": "(self.__clusters, self.__noise, block_logical_locations, block_max_corners, block_min_corners, block_points) =wrapper.clique(self.__data, self.__amount_intervals, self.__density_threshold)<EOL>amount_cells = len(block_logical_locations)<EOL>for i in range(amount_cells):<EOL><INDENT>self.__cells.append(clique_block(block_logical_locations[i],<EOL>spatial_block(block_max_corners[i], block_min_corners[i]),<EOL>block_points[i],<EOL>True))<EOL><DEDENT>", "docstring": "!\n        @brief Performs cluster analysis using C++ implementation of CLIQUE algorithm that is used by default if\n                user's target platform is supported.", "id": "f15466:c4:m6"}
{"signature": "def __get_neighbors(self, cell):", "body": "neighbors = []<EOL>location_neighbors = cell.get_location_neighbors(self.__amount_intervals)<EOL>for i in range(len(location_neighbors)):<EOL><INDENT>key = self.__location_to_key(location_neighbors[i])<EOL>candidate_neighbor = self.__cell_map[key]<EOL>if not candidate_neighbor.visited:<EOL><INDENT>candidate_neighbor.visited = True<EOL>neighbors.append(candidate_neighbor)<EOL><DEDENT><DEDENT>return neighbors<EOL>", "docstring": "!\n        @brief Returns neighbors for specified CLIQUE block as clique_block objects.\n\n        @return (list) Neighbors as clique_block objects.", "id": "f15466:c4:m11"}
{"signature": "@property<EOL><INDENT>def spatial_location(self):<DEDENT>", "body": "return self.__spatial_location<EOL>", "docstring": "!\n        @brief Spatial location is represented by real data space coordinates.\n        @return (spatial_block) Spatial block that describes location in data space.", "id": "f15466:c2:m5"}
{"signature": "def __allocate_clusters(self):", "body": "for cell in self.__cells:<EOL><INDENT>if cell.visited is False:<EOL><INDENT>self.__expand_cluster(cell)<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Performs cluster analysis using formed CLIQUE blocks.", "id": "f15466:c4:m9"}
{"signature": "def __process_by_python(self):", "body": "self.__create_grid()<EOL>self.__allocate_clusters()<EOL>self.__cells_map.clear()<EOL>", "docstring": "!\n        @brief Performs cluster analysis using Python implementation of CLIQUE algorithm.", "id": "f15466:c4:m7"}
{"signature": "def __init__(self, max_corner, min_corner):", "body": "self.__max_corner = max_corner<EOL>self.__min_corner = min_corner<EOL>", "docstring": "!\n        @brief Creates spatial block in data space.\n\n        @param[in] max_corner (array_like): Maximum corner coordinates of the block.\n        @param[in] min_corner (array_like): Minimal corner coordinates of the block.", "id": "f15466:c1:m0"}
{"signature": "def process(self):", "body": "random.seed()<EOL>for _ in range(<NUM_LIT:0>, self.__numlocal):<EOL><INDENT>self.__current = random.sample(range(<NUM_LIT:0>, len(self.__pointer_data)), self.__number_clusters)<EOL>self.__update_clusters(self.__current)<EOL>self.__optimize_configuration()<EOL>estimation = self.__calculate_estimation()<EOL>if estimation < self.__optimal_estimation:<EOL><INDENT>self.__optimal_medoids = self.__current[:]<EOL>self.__optimal_estimation = estimation<EOL><DEDENT><DEDENT>self.__update_clusters(self.__optimal_medoids)<EOL>", "docstring": "!\n        @brief Performs cluster analysis in line with rules of CLARANS algorithm.\n\n        @see get_clusters()\n        @see get_medoids()", "id": "f15467:c0:m1"}
{"signature": "def get_clusters(self):", "body": "return self.__clusters<EOL>", "docstring": "!\n        @brief Returns allocated clusters by the algorithm.\n\n        @remark Allocated clusters can be returned only after data processing (use method process()), otherwise empty list is returned.\n\n        @return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.\n\n        @see process()\n        @see get_medoids()", "id": "f15467:c0:m2"}
{"signature": "def __update_clusters(self, medoids):", "body": "self.__belong = [<NUM_LIT:0>] * len(self.__pointer_data)<EOL>self.__clusters = [[] for i in range(len(medoids))]<EOL>for index_point in range(len(self.__pointer_data)):<EOL><INDENT>index_optim = -<NUM_LIT:1><EOL>dist_optim = <NUM_LIT:0.0><EOL>for index in range(len(medoids)):<EOL><INDENT>dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])<EOL>if (dist < dist_optim) or (index is <NUM_LIT:0>):<EOL><INDENT>index_optim = index<EOL>dist_optim = dist<EOL><DEDENT><DEDENT>self.__clusters[index_optim].append(index_point)<EOL>self.__belong[index_point] = index_optim<EOL><DEDENT>self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > <NUM_LIT:0>]<EOL>", "docstring": "!\n        @brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.", "id": "f15467:c0:m5"}
{"signature": "def __find_another_nearest_medoid(self, point_index, current_medoid_index):", "body": "other_medoid_index = -<NUM_LIT:1><EOL>other_distance_nearest = float('<STR_LIT>')<EOL>for index_medoid in self.__current:<EOL><INDENT>if (index_medoid != current_medoid_index):<EOL><INDENT>other_distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])<EOL>if other_distance_candidate < other_distance_nearest:<EOL><INDENT>other_distance_nearest = other_distance_candidate<EOL>other_medoid_index = index_medoid<EOL><DEDENT><DEDENT><DEDENT>return other_medoid_index<EOL>", "docstring": "!\n        @brief Finds the another nearest medoid for the specified point that is differ from the specified medoid. \n\n        @param[in] point_index: index of point in dataspace for that searching of medoid in current list of medoids is perfomed.\n        @param[in] current_medoid_index: index of medoid that shouldn't be considered as a nearest.\n\n        @return (uint) index of the another nearest medoid for the point.", "id": "f15467:c0:m7"}
{"signature": "def __get_nearest_feature(self, point, feature_collection):", "body": "minimum_distance = float(\"<STR_LIT>\");<EOL>index_nearest_feature = -<NUM_LIT:1>;<EOL>for index_entry in range(<NUM_LIT:0>, len(feature_collection)):<EOL><INDENT>point_entry = cfentry(<NUM_LIT:1>, linear_sum([ point ]), square_sum([ point ]));<EOL>distance = feature_collection[index_entry].get_distance(point_entry, self.__measurement_type);<EOL>if (distance < minimum_distance):<EOL><INDENT>minimum_distance = distance;<EOL>index_nearest_feature = index_entry;<EOL><DEDENT><DEDENT>return (minimum_distance, index_nearest_feature)<EOL>", "docstring": "!\n        @brief Find nearest entry for specified point.\n\n        @param[in] point (list): Pointer to point from input dataset.\n        @param[in] feature_collection (list): Feature collection that is used for obtaining nearest feature for the specified point.\n\n        @return (double, uint) Tuple of distance to nearest entry to the specified point and index of that entry.", "id": "f15468:c0:m9"}
{"signature": "def __rebuild_tree(self, index_point):", "body": "rebuild_result = False;<EOL>increased_diameter = self.__tree.threshold * self.__diameter_multiplier;<EOL>tree = None;<EOL>while(rebuild_result is False):<EOL><INDENT>if (increased_diameter == <NUM_LIT:0.0>):<EOL><INDENT>increased_diameter = <NUM_LIT:1.0>;<EOL><DEDENT>tree = cftree(self.__tree.branch_factor, self.__tree.max_entries, increased_diameter, self.__tree.type_measurement);<EOL>for index_point in range(<NUM_LIT:0>, index_point + <NUM_LIT:1>):<EOL><INDENT>point = self.__pointer_data[index_point];<EOL>tree.insert_cluster([point]);<EOL>if (tree.amount_entries > self.__entry_size_limit):<EOL><INDENT>increased_diameter *= self.__diameter_multiplier;<EOL>continue;<EOL><DEDENT><DEDENT>rebuild_result = True;<EOL><DEDENT>return tree<EOL>", "docstring": "!\n        @brief Rebuilt tree in case of maxumum number of entries is exceeded.\n\n        @param[in] index_point (uint): Index of point that is used as end point of re-building.\n\n        @return (cftree) Rebuilt tree with encoded points till specified point from input data space.", "id": "f15468:c0:m7"}
{"signature": "def get_cluster_encoding(self):", "body": "return type_encoding.CLUSTER_INDEX_LIST_SEPARATION<EOL>", "docstring": "!\n        @brief Returns clustering result representation type that indicate how clusters are encoded.\n\n        @return (type_encoding) Clustering result representation.\n\n        @see get_clusters()", "id": "f15468:c0:m3"}
{"signature": "def __extract_features(self):", "body": "self.__features = [];<EOL>if (len(self.__tree.leafes) == <NUM_LIT:1>):<EOL><INDENT>for entry in self.__tree.leafes[<NUM_LIT:0>].entries:<EOL><INDENT>self.__features.append(entry);<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for node in self.__tree.leafes:<EOL><INDENT>self.__features.append(node.feature);<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Extracts features from CF-tree cluster.", "id": "f15468:c0:m4"}
{"signature": "def __find_nearest_cluster_features(self):", "body": "minimum_distance = float(\"<STR_LIT>\");<EOL>index1 = <NUM_LIT:0>;<EOL>index2 = <NUM_LIT:0>;<EOL>for index_candidate1 in range(<NUM_LIT:0>, len(self.__features)):<EOL><INDENT>feature1 = self.__features[index_candidate1];<EOL>for index_candidate2 in range(index_candidate1 + <NUM_LIT:1>, len(self.__features)):<EOL><INDENT>feature2 = self.__features[index_candidate2];<EOL>distance = feature1.get_distance(feature2, self.__measurement_type);<EOL>if (distance < minimum_distance):<EOL><INDENT>minimum_distance = distance;<EOL>index1 = index_candidate1;<EOL>index2 = index_candidate2;<EOL><DEDENT><DEDENT><DEDENT>return [index1, index2]<EOL>", "docstring": "!\n        @brief Find pair of nearest CF entries.\n\n        @return (list) List of two nearest enties that are represented by list [index_point1, index_point2].", "id": "f15468:c0:m8"}
{"signature": "def process(self):", "body": "if self.__ccore is True:<EOL><INDENT>self.__process_by_ccore()<EOL><DEDENT>else:<EOL><INDENT>self.__process_by_python()<EOL><DEDENT>", "docstring": "!\n        @brief Performs cluster analysis in line with rules of CURE algorithm.\n\n        @remark Results of clustering can be obtained using corresponding get methods.\n\n        @see get_clusters()", "id": "f15541:c1:m1"}
{"signature": "def __insert_cluster(self, cluster):", "body": "for index in range(len(self.__queue)):<EOL><INDENT>if cluster.distance < self.__queue[index].distance:<EOL><INDENT>self.__queue.insert(index, cluster)<EOL>return<EOL><DEDENT><DEDENT>self.__queue.append(cluster)<EOL>", "docstring": "!\n        @brief Insert cluster to the list (sorted queue) in line with sequence order (distance).\n\n        @param[in] cluster (cure_cluster): Cluster that should be inserted.", "id": "f15541:c1:m10"}
{"signature": "def get_cluster_encoding(self):", "body": "return type_encoding.CLUSTER_INDEX_LIST_SEPARATION<EOL>", "docstring": "!\n        @brief Returns clustering result representation type that indicate how clusters are encoded.\n\n        @return (type_encoding) Clustering result representation.\n\n        @see get_clusters()", "id": "f15541:c1:m7"}
{"signature": "def __delete_represented_points(self, cluster):", "body": "for point in cluster.rep:<EOL><INDENT>self.__tree.remove(point, payload=cluster)<EOL><DEDENT>", "docstring": "!\n        @brief Remove representation points of clusters from the k-d tree\n\n        @param[in] cluster (cure_cluster): Cluster whose representation points should be removed.", "id": "f15541:c1:m14"}
{"signature": "def __merge_clusters(self, cluster1, cluster2):", "body": "merged_cluster = cure_cluster(None, None)<EOL>merged_cluster.points = cluster1.points + cluster2.points<EOL>merged_cluster.indexes = cluster1.indexes + cluster2.indexes<EOL>dimension = len(cluster1.mean)<EOL>merged_cluster.mean = [<NUM_LIT:0>] * dimension<EOL>if merged_cluster.points[<NUM_LIT:1>:] == merged_cluster.points[:-<NUM_LIT:1>]:<EOL><INDENT>merged_cluster.mean = merged_cluster.points[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>for index in range(dimension):<EOL><INDENT>merged_cluster.mean[index] = ( len(cluster1.points) * cluster1.mean[index] + len(cluster2.points) * cluster2.mean[index] ) / ( len(cluster1.points) + len(cluster2.points) );<EOL><DEDENT><DEDENT>temporary = list()<EOL>for index in range(self.__number_represent_points):<EOL><INDENT>maximal_distance = <NUM_LIT:0><EOL>maximal_point = None<EOL>for point in merged_cluster.points:<EOL><INDENT>minimal_distance = <NUM_LIT:0><EOL>if index == <NUM_LIT:0>:<EOL><INDENT>minimal_distance = euclidean_distance_square(point, merged_cluster.mean)<EOL><DEDENT>else:<EOL><INDENT>minimal_distance = min([euclidean_distance_square(point, p) for p in temporary])<EOL><DEDENT>if minimal_distance >= maximal_distance:<EOL><INDENT>maximal_distance = minimal_distance<EOL>maximal_point = point<EOL><DEDENT><DEDENT>if maximal_point not in temporary:<EOL><INDENT>temporary.append(maximal_point)<EOL><DEDENT><DEDENT>for point in temporary:<EOL><INDENT>representative_point = [<NUM_LIT:0>] * dimension<EOL>for index in range(dimension):<EOL><INDENT>representative_point[index] = point[index] + self.__compression * (merged_cluster.mean[index] - point[index])<EOL><DEDENT>merged_cluster.rep.append(representative_point)<EOL><DEDENT>return merged_cluster<EOL>", "docstring": "!\n        @brief Merges two clusters and returns new merged cluster. Representation points and mean points are calculated for the new cluster.\n\n        @param[in] cluster1 (cure_cluster): Cluster that should be merged.\n        @param[in] cluster2 (cure_cluster): Cluster that should be merged.\n\n        @return (cure_cluster) New merged CURE cluster.", "id": "f15541:c1:m15"}
{"signature": "def __validate_arguments(self):", "body": "if len(self.__pointer_data) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if self.__number_cluster <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.__number_cluster)<EOL><DEDENT>if self.__compression < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.__compression)<EOL><DEDENT>if self.__number_represent_points <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.__number_cluster)<EOL><DEDENT>", "docstring": "!\n        @brief Check input arguments of BANG algorithm and if one of them is not correct then appropriate exception\n                is thrown.", "id": "f15541:c1:m9"}
{"signature": "def __create_queue(self):", "body": "self.__queue = [cure_cluster(self.__pointer_data[index_point], index_point) for index_point in range(len(self.__pointer_data))]<EOL>for i in range(<NUM_LIT:0>, len(self.__queue)):<EOL><INDENT>minimal_distance = float('<STR_LIT>')<EOL>closest_index_cluster = -<NUM_LIT:1><EOL>for k in range(<NUM_LIT:0>, len(self.__queue)):<EOL><INDENT>if i != k:<EOL><INDENT>dist = self.__cluster_distance(self.__queue[i], self.__queue[k])<EOL>if dist < minimal_distance:<EOL><INDENT>minimal_distance = dist<EOL>closest_index_cluster = k<EOL><DEDENT><DEDENT><DEDENT>self.__queue[i].closest = self.__queue[closest_index_cluster]<EOL>self.__queue[i].distance = minimal_distance<EOL><DEDENT>self.__queue.sort(key = lambda x: x.distance, reverse = False)<EOL>", "docstring": "!\n        @brief Create queue of sorted clusters by distance between them, where first cluster has the nearest neighbor. At the first iteration each cluster contains only one point.\n\n        @param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.\n\n        @return (list) Create queue of sorted clusters by distance between them.", "id": "f15541:c1:m16"}
{"signature": "def __create_kdtree(self):", "body": "self.__tree = kdtree()<EOL>for current_cluster in self.__queue:<EOL><INDENT>for representative_point in current_cluster.rep:<EOL><INDENT>self.__tree.insert(representative_point, current_cluster)<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Create k-d tree in line with created clusters. At the first iteration contains all points from the input data set.\n\n        @return (kdtree) k-d tree that consist of representative points of CURE clusters.", "id": "f15541:c1:m17"}
{"signature": "def __init__(self, data, number_cluster, number_represent_points = <NUM_LIT:5>, compression = <NUM_LIT:0.5>, ccore = True):", "body": "self.__pointer_data = self.__prepare_data_points(data)<EOL>self.__clusters = None<EOL>self.__representors = None<EOL>self.__means = None<EOL>self.__number_cluster = number_cluster<EOL>self.__number_represent_points = number_represent_points<EOL>self.__compression = compression<EOL>self.__ccore = ccore<EOL>if self.__ccore:<EOL><INDENT>self.__ccore = ccore_library.workable()<EOL><DEDENT>self.__validate_arguments()<EOL>", "docstring": "!\n        @brief Constructor of clustering algorithm CURE.\n\n        @param[in] data (array_like): Input data that should be processed.\n        @param[in] number_cluster (uint): Number of clusters that should be allocated.\n        @param[in] number_represent_points (uint): Number of representative points for each cluster.\n        @param[in] compression (double): Coefficient defines level of shrinking of representation points toward the mean of the new created cluster after merging on each step. Usually it destributed from 0 to 1.\n        @param[in] ccore (bool): If True then CCORE (C++ solution) will be used for solving.", "id": "f15541:c1:m0"}
{"signature": "def __init__(self, point, index):", "body": "<EOL>self.points = [ ]<EOL>self.indexes = -<NUM_LIT:1><EOL>self.mean = None<EOL>self.rep = [ ]<EOL>if point is not None:<EOL><INDENT>self.points = [ point ]<EOL>self.indexes = [ index ]<EOL>self.mean = point<EOL>self.rep = [ point ]<EOL><DEDENT>self.closest = None<EOL>self.distance = float('<STR_LIT>')<EOL>", "docstring": "!\n        @brief Constructor of CURE cluster.\n\n        @param[in] point (list): Point represented by list of coordinates.\n        @param[in] index (uint): Index point in dataset.", "id": "f15541:c0:m0"}
{"signature": "def __relocate_cluster(self, cluster):", "body": "self.__queue.remove(cluster)<EOL>self.__insert_cluster(cluster)<EOL>", "docstring": "!\n        @brief Relocate cluster in list in line with distance order.\n\n        @param[in] cluster (cure_cluster): Cluster that should be relocated in line with order.", "id": "f15541:c1:m11"}
{"signature": "def get_clusters(self):", "body": "return self.__clusters<EOL>", "docstring": "!\n        @brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.\n\n        @return (list) List of allocated clusters.\n\n        @see process()\n        @see get_representors()\n        @see get_means()", "id": "f15541:c1:m4"}
{"signature": "def get_means(self):", "body": "return self.__means<EOL>", "docstring": "!\n        @brief Returns list of mean values of each cluster.\n        @details Cluster index should be used for navigation between mean values.\n\n        @return (list) List of mean values of each cluster.\n\n        @see get_clusters()\n        @see get_representors()", "id": "f15541:c1:m6"}
{"signature": "def __prepare_data_points(self, sample):", "body": "if isinstance(sample, numpy.ndarray):<EOL><INDENT>return sample.tolist()<EOL><DEDENT>return sample<EOL>", "docstring": "!\n        @brief Prepare data points for clustering.\n        @details In case of numpy.array there are a lot of overloaded basic operators, such as __contains__, __eq__.\n\n        @return (list) Returns sample in list format.", "id": "f15541:c1:m8"}
{"signature": "def get_representors(self):", "body": "return self.__representors<EOL>", "docstring": "!\n        @brief Returns list of point-representors of each cluster.\n        @details Cluster index should be used for navigation between lists of point-representors.\n\n        @return (list) List of point-representors of each cluster.\n\n        @see get_clusters()\n        @see get_means()", "id": "f15541:c1:m5"}
{"signature": "def __neighbor_indexes_points(self, optic_object):", "body": "kdnodes = self.__kdtree.find_nearest_dist_nodes(self.__sample_pointer[optic_object.index_object], self.__eps)<EOL>return [[node_tuple[<NUM_LIT:1>].payload, math.sqrt(node_tuple[<NUM_LIT:0>])] for node_tuple in kdnodes if<EOL>node_tuple[<NUM_LIT:1>].payload != optic_object.index_object]<EOL>", "docstring": "!\n        @brief Return neighbors of the specified object in case of sequence of points.\n\n        @param[in] optic_object (optics_descriptor): Object for which neighbors should be returned in line with connectivity radius.\n\n        @return (list) List of indexes of neighbors in line the connectivity radius.", "id": "f15542:c3:m16"}
{"signature": "def __process_by_ccore(self):", "body": "(self.__clusters, self.__noise, self.__ordering, self.__eps,<EOL>objects_indexes, objects_core_distances, objects_reachability_distances) =wrapper.optics(self.__sample_pointer, self.__eps, self.__minpts, self.__amount_clusters, self.__data_type)<EOL>self.__optics_objects = []<EOL>for i in range(len(objects_indexes)):<EOL><INDENT>if objects_core_distances[i] < <NUM_LIT:0.0>:<EOL><INDENT>objects_core_distances[i] = None<EOL><DEDENT>if objects_reachability_distances[i] < <NUM_LIT:0.0>:<EOL><INDENT>objects_reachability_distances[i] = None<EOL><DEDENT>optics_object = optics_descriptor(objects_indexes[i], objects_core_distances[i], objects_reachability_distances[i])<EOL>optics_object.processed = True<EOL>self.__optics_objects.append(optics_object)<EOL><DEDENT>", "docstring": "!\n        @brief Performs cluster analysis using CCORE (C/C++ part of pyclustering library).", "id": "f15542:c3:m2"}
{"signature": "def __len__(self):", "body": "return len(self.__ordering)<EOL>", "docstring": "!\n        @brief Returns length of clustering-ordering diagram.", "id": "f15542:c1:m2"}
{"signature": "def __init__(self, sample, eps, minpts, amount_clusters = None, ccore = True, **kwargs):", "body": "self.__sample_pointer = sample      <EOL>self.__eps = eps                    <EOL>self.__minpts = minpts              <EOL>self.__amount_clusters = amount_clusters<EOL>self.__ordering = None<EOL>self.__clusters = None<EOL>self.__noise = None<EOL>self.__optics_objects = None<EOL>self.__data_type = kwargs.get('<STR_LIT>', '<STR_LIT>')<EOL>self.__kdtree = None<EOL>self.__ccore = ccore<EOL>self.__neighbor_searcher = self.__create_neighbor_searcher(self.__data_type)<EOL>if self.__ccore:<EOL><INDENT>self.__ccore = ccore_library.workable()<EOL><DEDENT>", "docstring": "!\n        @brief Constructor of clustering algorithm OPTICS.\n\n        @param[in] sample (list): Input data that is presented as a list of points (objects), where each point is represented by list or tuple.\n        @param[in] eps (double): Connectivity radius between points, points may be connected if distance between them less than the radius.\n        @param[in] minpts (uint): Minimum number of shared neighbors that is required for establishing links between points.\n        @param[in] amount_clusters (uint): Optional parameter where amount of clusters that should be allocated is specified.\n                    In case of usage 'amount_clusters' connectivity radius can be greater than real, in other words, there is place for mistake\n                    in connectivity radius usage.\n        @param[in] ccore (bool): if True than DLL CCORE (C++ solution) will be used for solving the problem.\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'data_type').\n\n        <b>Keyword Args:</b><br>\n            - data_type (string): Data type of input sample 'data' that is processed by the algorithm ('points', 'distance_matrix').", "id": "f15542:c3:m0"}
{"signature": "def __init__(self, index, core_distance = None, reachability_distance = None):", "body": "<EOL>self.index_object = index<EOL>self.core_distance = core_distance<EOL>self.reachability_distance = reachability_distance<EOL>self.processed = False<EOL>", "docstring": "!\n        @brief Constructor of object description in optics terms.\n\n        @param[in] index (uint): Index of the object in the data set.\n        @param[in] core_distance (double): Core distance that is minimum distance to specified number of neighbors.\n        @param[in] reachability_distance (double): Reachability distance to this object.", "id": "f15542:c2:m0"}
{"signature": "def __initialize(self, sample):", "body": "self.__processed = [False] * len(sample)<EOL>self.__optics_objects = [optics_descriptor(i) for i in range(len(sample))]      <EOL>self.__ordered_database = []        <EOL>self.__clusters = None      <EOL>self.__noise = None         <EOL>", "docstring": "!\n        @brief Initializes internal states and resets clustering results in line with input sample.", "id": "f15542:c3:m4"}
{"signature": "def get_clusters(self):", "body": "return self.__clusters<EOL>", "docstring": "!\n        @brief Returns list of allocated clusters, where each cluster contains indexes of objects and each cluster is represented by list.\n\n        @return (list) List of allocated clusters.\n\n        @see process()\n        @see get_noise()\n        @see get_ordering()\n        @see get_radius()", "id": "f15542:c3:m6"}
{"signature": "def get_ordering(self):", "body": "if self.__ordering is None:<EOL><INDENT>self.__ordering = []<EOL>for cluster in self.__clusters:<EOL><INDENT>for index_object in cluster:<EOL><INDENT>optics_object = self.__optics_objects[index_object]<EOL>if optics_object.reachability_distance is not None:<EOL><INDENT>self.__ordering.append(optics_object.reachability_distance)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return self.__ordering<EOL>", "docstring": "!\n        @brief Returns clustering ordering information about the input data set.\n        @details Clustering ordering of data-set contains the information about the internal clustering structure in line with connectivity radius.\n\n        @return (ordering_analyser) Analyser of clustering ordering.\n\n        @see process()\n        @see get_clusters()\n        @see get_noise()\n        @see get_radius()\n        @see get_optics_objects()", "id": "f15542:c3:m8"}
{"signature": "def __extract_clusters(self):", "body": "self.__clusters = []<EOL>self.__noise = []<EOL>current_cluster = self.__noise<EOL>for optics_object in self.__ordered_database:<EOL><INDENT>if (optics_object.reachability_distance is None) or (optics_object.reachability_distance > self.__eps):<EOL><INDENT>if (optics_object.core_distance is not None) and (optics_object.core_distance <= self.__eps):<EOL><INDENT>self.__clusters.append([ optics_object.index_object ])<EOL>current_cluster = self.__clusters[-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>self.__noise.append(optics_object.index_object)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>current_cluster.append(optics_object.index_object)<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Extract clusters and noise from order database.", "id": "f15542:c3:m14"}
{"signature": "def extract_cluster_amount(self, radius):", "body": "amount_clusters = <NUM_LIT:1><EOL>cluster_start = False<EOL>cluster_pick = False<EOL>total_similarity = True<EOL>previous_cluster_distance = None<EOL>previous_distance = None<EOL>cluster_borders = []<EOL>for index_ordering in range(len(self.__ordering)):<EOL><INDENT>distance = self.__ordering[index_ordering]<EOL>if distance >= radius:<EOL><INDENT>if cluster_start is False:<EOL><INDENT>cluster_start = True<EOL>amount_clusters += <NUM_LIT:1><EOL>if index_ordering != <NUM_LIT:0>:<EOL><INDENT>cluster_borders.append(index_ordering)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if (distance < previous_cluster_distance) and (cluster_pick is False):<EOL><INDENT>cluster_pick = True<EOL><DEDENT>elif (distance > previous_cluster_distance) and (cluster_pick is True):<EOL><INDENT>cluster_pick = False<EOL>amount_clusters += <NUM_LIT:1><EOL>if index_ordering != <NUM_LIT:0>:<EOL><INDENT>cluster_borders.append(index_ordering)<EOL><DEDENT><DEDENT><DEDENT>previous_cluster_distance = distance<EOL><DEDENT>else:<EOL><INDENT>cluster_start = False<EOL>cluster_pick = False<EOL><DEDENT>if (previous_distance is not None) and (distance != previous_distance):<EOL><INDENT>total_similarity = False<EOL><DEDENT>previous_distance = distance<EOL><DEDENT>if (total_similarity is True) and (previous_distance > radius):<EOL><INDENT>amount_clusters = <NUM_LIT:0><EOL><DEDENT>return amount_clusters, cluster_borders<EOL>", "docstring": "!\n        @brief Obtains amount of clustering that can be allocated by using specified radius for ordering diagram and borders between them.\n        @details When growth of reachability-distances is detected than it is considered as a start point of cluster, \n                 than pick is detected and after that recession is observed until new growth (that means end of the\n                 current cluster and start of a new one) or end of diagram.\n\n        @param[in] radius (double): connectivity radius that is used for cluster allocation.\n\n        @return (unit, list) Amount of clusters that can be allocated by the connectivity radius on ordering diagram and borders between them using indexes\n                 from ordering diagram (amount_clusters, border_clusters).", "id": "f15542:c1:m4"}
{"signature": "def get_noise(self):", "body": "return self.__noise<EOL>", "docstring": "!\n        @brief Returns list of noise that contains indexes of objects that corresponds to input data.\n\n        @return (list) List of allocated noise objects.\n\n        @see process()\n        @see get_clusters()\n        @see get_ordering()\n        @see get_radius()", "id": "f15542:c3:m7"}
{"signature": "def __init__(self, ordering_diagram):", "body": "self.__ordering = ordering_diagram<EOL>", "docstring": "!\n        @brief Analyser of ordering diagram that is based on reachability-distances.\n\n        @see calculate_connvectivity_radius", "id": "f15542:c1:m1"}
{"signature": "def process(self):", "body": "if self.__ccore is True:<EOL><INDENT>self.__process_by_ccore()<EOL><DEDENT>else:<EOL><INDENT>self.__process_by_python()<EOL><DEDENT>", "docstring": "!\n        @brief Performs cluster analysis in line with rules of OPTICS algorithm.\n\n        @remark Results of clustering can be obtained using corresponding gets methods.\n\n        @see get_clusters()\n        @see get_noise()\n        @see get_ordering()", "id": "f15542:c3:m1"}
{"signature": "@property<EOL><INDENT>def cluster_ordering(self):<DEDENT>", "body": "return self.__ordering<EOL>", "docstring": "!\n        @brief (list) Returns values of dataset cluster ordering.", "id": "f15542:c1:m0"}
{"signature": "def calculate_connvectivity_radius(self, amount_clusters, maximum_iterations = <NUM_LIT:100>):", "body": "maximum_distance = max(self.__ordering)<EOL>upper_distance = maximum_distance<EOL>lower_distance = <NUM_LIT:0.0><EOL>result = None<EOL>amount, borders = self.extract_cluster_amount(maximum_distance)<EOL>if amount <= amount_clusters:<EOL><INDENT>for _ in range(maximum_iterations):<EOL><INDENT>radius = (lower_distance + upper_distance) / <NUM_LIT><EOL>amount, borders = self.extract_cluster_amount(radius)<EOL>if amount == amount_clusters:<EOL><INDENT>result = radius<EOL>break<EOL><DEDENT>elif amount == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>elif amount > amount_clusters:<EOL><INDENT>lower_distance = radius<EOL><DEDENT>elif amount < amount_clusters:<EOL><INDENT>upper_distance = radius<EOL><DEDENT><DEDENT><DEDENT>return result, borders<EOL>", "docstring": "!\n        @brief Calculates connectivity radius of allocation specified amount of clusters using ordering diagram and marks borders of clusters using indexes of values of ordering diagram.\n        @details Parameter 'maximum_iterations' is used to protect from hanging when it is impossible to allocate specified number of clusters.\n\n        @param[in] amount_clusters (uint): amount of clusters that should be allocated by calculated connectivity radius.\n        @param[in] maximum_iterations (uint): maximum number of iteration for searching connectivity radius to allocated specified amount of clusters (by default it is restricted by 100 iterations).\n\n        @return (double, list) Value of connectivity radius and borders of clusters like (radius, borders), radius may be 'None' as well as borders may be '[]'\n                                if connectivity radius hasn't been found for the specified amount of iterations.", "id": "f15542:c1:m3"}
{"signature": "@property<EOL><INDENT>def get_encoding(self):<DEDENT>", "body": "return self.__type_representation<EOL>", "docstring": "!\n        @brief Returns current cluster representation.", "id": "f15543:c1:m1"}
{"signature": "def __init__(self, encoding, clusters, data):", "body": "self.__type_representation = encoding;<EOL>self.__clusters = clusters;<EOL>self.__data = data<EOL>", "docstring": "!\n        @brief Constructor of clustering result representor.\n\n        @param[in] encoding (type_encoding): Type of clusters representation (index list, object list or labels).\n        @param[in] clusters (list): Current clusters representation.\n        @param[in] data (list): Data that corresponds to clusters.", "id": "f15543:c1:m0"}
{"signature": "@staticmethod<EOL><INDENT>def get_uniform(probabilities):<DEDENT>", "body": "<EOL>res_idx = None<EOL>random_num = np.random.rand()<EOL>for _idx in range(len(probabilities)):<EOL><INDENT>if random_num < probabilities[_idx]:<EOL><INDENT>res_idx = _idx<EOL>break<EOL><DEDENT><DEDENT>if res_idx is None:<EOL><INDENT>print('<STR_LIT>', probabilities)<EOL>raise AttributeError(\"<STR_LIT>\")<EOL><DEDENT>return res_idx<EOL>", "docstring": "!\n        @brief Returns index in probabilities.\n\n        @param[in] probabilities (list): List with segments in increasing sequence with val in [0, 1],\n                   for example, [0 0.1 0.2 0.3 1.0].", "id": "f15544:c0:m6"}
{"signature": "@staticmethod<EOL><INDENT>def get_centres(chromosomes, data, count_clusters):<DEDENT>", "body": "centres = ga_math.calc_centers(chromosomes, data, count_clusters)<EOL>return centres<EOL>", "docstring": "!", "id": "f15544:c0:m2"}
{"signature": "def process(self, order = <NUM_LIT>, solution = solve_type.FAST, collect_dynamic = True):", "body": "if (self._ccore_network_pointer is not None):<EOL><INDENT>pointer_output_dynamic = syncnet_process(self._ccore_network_pointer, order, solution, collect_dynamic);<EOL>return syncnet_analyser(None, None, pointer_output_dynamic);<EOL><DEDENT>else:<EOL><INDENT>output_sync_dynamic = self.simulate_dynamic(order, solution, collect_dynamic);<EOL>return syncnet_analyser(output_sync_dynamic.output, output_sync_dynamic.time, None);<EOL><DEDENT>", "docstring": "!\n        @brief Peforms cluster analysis using simulation of the oscillatory network.\n\n        @param[in] order (double): Order of synchronization that is used as indication for stopping processing.\n        @param[in] solution (solve_type): Specified type of solving diff. equation.\n        @param[in] collect_dynamic (bool): Specified requirement to collect whole dynamic of the network.\n\n        @return (syncnet_analyser) Returns analyser of results of clustering.", "id": "f15545:c2:m3"}
{"signature": "def __init__(self, phase, time, pointer_sync_analyser):", "body": "super().__init__(phase, time, pointer_sync_analyser)<EOL>", "docstring": "!\n        @brief Constructor of the analyser.\n\n        @param[in] phase (list): Output dynamic of the oscillatory network, where one iteration consists of all phases of oscillators.\n        @param[in] time (list): Simulation time.\n        @param[in] pointer_sync_analyser (POINTER): Pointer to CCORE analyser, if specified then other arguments can be omitted.", "id": "f15545:c0:m0"}
{"signature": "def allocate_clusters(self, eps = <NUM_LIT>, indexes = None, iteration = None):", "body": "return self.allocate_sync_ensembles(eps, indexes, iteration)<EOL>", "docstring": "!\n        @brief Returns list of clusters in line with state of ocillators (phases).\n\n        @param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one cluster.\n        @param[in] indexes (list): List of real object indexes and it should be equal to amount of oscillators (in case of 'None' - indexes are in range [0; amount_oscillators]).\n        @param[in] iteration (uint): Iteration of simulation that should be used for allocation.\n\n        @return (list) List of clusters, for example [ [cluster1], [cluster2], ... ].)", "id": "f15545:c0:m2"}
{"signature": "def get_cluster_encoding(self):", "body": "return type_encoding.CLUSTER_INDEX_LIST_SEPARATION<EOL>", "docstring": "!\n        @brief Returns clustering result representation type that indicate how clusters are encoded.\n\n        @return (type_encoding) Clustering result representation.\n\n        @see get_clusters()", "id": "f15545:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def animate_cluster_allocation(dataset, analyser, animation_velocity = <NUM_LIT>, tolerance = <NUM_LIT:0.1>, save_movie = None, title = None):<DEDENT>", "body": "figure = plt.figure();<EOL>def init_frame():<EOL><INDENT>return frame_generation(<NUM_LIT:0>);<EOL><DEDENT>def frame_generation(index_dynamic):<EOL><INDENT>figure.clf();<EOL>if (title is not None):<EOL><INDENT>figure.suptitle(title, fontsize = <NUM_LIT>, fontweight = '<STR_LIT>');<EOL><DEDENT>ax1 = figure.add_subplot(<NUM_LIT>, projection='<STR_LIT>');<EOL>clusters = analyser.allocate_clusters(eps = tolerance, iteration = index_dynamic);<EOL>dynamic = analyser.output[index_dynamic];<EOL>visualizer = cluster_visualizer(size_row = <NUM_LIT:2>);<EOL>visualizer.append_clusters(clusters, dataset);<EOL>artist1, = ax1.plot(dynamic, [<NUM_LIT:1.0>] * len(dynamic), marker = '<STR_LIT:o>', color = '<STR_LIT>', ls = '<STR_LIT>');<EOL>visualizer.show(figure, display = False);<EOL>artist2 = figure.gca();<EOL>return [ artist1, artist2 ];<EOL><DEDENT>cluster_animation = animation.FuncAnimation(figure, frame_generation, len(analyser), interval = animation_velocity, init_func = init_frame, repeat_delay = <NUM_LIT>);<EOL>if (save_movie is not None):<EOL><INDENT>plt.rcParams['<STR_LIT>'] = '<STR_LIT>';<EOL>ffmpeg_writer = animation.FFMpegWriter(fps = <NUM_LIT:15>);<EOL>cluster_animation.save(save_movie, writer = ffmpeg_writer);<EOL>", "docstring": "!\n        @brief Shows animation of output dynamic (output of each oscillator) during simulation on a circle from [0; 2pi].\n\n        @param[in] dataset (list): Input data that was used for processing by the network.\n        @param[in] analyser (syncnet_analyser): Output dynamic analyser of the Sync network.\n        @param[in] animation_velocity (uint): Interval between frames in milliseconds.\n        @param[in] tolerance (double): Tolerance level that define maximal difference between phases of oscillators in one cluster.\n        @param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.\n        @param[in] title (string): If it is specified then title will be displayed on the animation plot.", "id": "f15545:c1:m0"}
{"signature": "def __init__(self, sample, radius, conn_repr = conn_represent.MATRIX, initial_phases = initial_type.RANDOM_GAUSSIAN, enable_conn_weight = False, ccore = True):", "body": "self._ccore_network_pointer = None;<EOL>self._osc_loc = sample;<EOL>self._num_osc = len(sample);<EOL>if ( (ccore is True) and ccore_library.workable() ):<EOL><INDENT>self._ccore_network_pointer = syncnet_create_network(sample, radius, initial_phases, enable_conn_weight);<EOL>self._conn_represent = conn_represent.MATRIX;<EOL><DEDENT>else:<EOL><INDENT>super().__init__(len(sample), <NUM_LIT:1>, <NUM_LIT:0>, conn_type.DYNAMIC, conn_repr, initial_phases, False);<EOL>self._conn_weight = None;<EOL>self._ena_conn_weight = enable_conn_weight;<EOL>if (radius is not None):<EOL><INDENT>self._create_connections(radius);<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Contructor of the oscillatory network SYNC for cluster analysis.\n\n        @param[in] sample (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.\n        @param[in] radius (double): Connectivity radius between points, points should be connected if distance between them less then the radius.\n        @param[in] conn_repr (conn_represent): Internal representation of connection in the network: matrix or list. Ignored in case of usage of CCORE library.\n        @param[in] initial_phases (initial_type): Type of initialization of initial phases of oscillators (random, uniformly distributed, etc.).\n        @param[in] enable_conn_weight (bool): If True - enable mode when strength between oscillators depends on distance between two oscillators.\n              If False - all connection between oscillators have the same strength that equals to 1 (True).\n        @param[in] ccore (bool): Defines should be CCORE C++ library used instead of Python code or not.", "id": "f15545:c2:m0"}
{"signature": "def __init__(self, data, eps, number_clusters, threshold = <NUM_LIT:0.5>, ccore = True):", "body": "self.__pointer_data = data;<EOL>self.__eps = eps;<EOL>self.__number_clusters = number_clusters;<EOL>self.__threshold = threshold;<EOL>self.__clusters = None;<EOL>self.__ccore = ccore;<EOL>if (self.__ccore):<EOL><INDENT>self.__ccore = ccore_library.workable();<EOL><DEDENT>self.__degree_normalization = <NUM_LIT:1.0> + <NUM_LIT> * ( (<NUM_LIT:1.0> - threshold) / (<NUM_LIT:1.0> + threshold) );<EOL>self.__adjacency_matrix = None;<EOL>self.__create_adjacency_matrix()<EOL>", "docstring": "!\n        @brief Constructor of clustering algorithm ROCK.\n\n        @param[in] data (list): Input data - list of points where each point is represented by list of coordinates.\n        @param[in] eps (double): Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius.\n        @param[in] number_clusters (uint): Defines number of clusters that should be allocated from the input data set.\n        @param[in] threshold (double): Value that defines degree of normalization that influences on choice of clusters for merging during processing.\n        @param[in] ccore (bool): Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.", "id": "f15546:c0:m0"}
{"signature": "def __create_adjacency_matrix(self):", "body": "size_data = len(self.__pointer_data);<EOL>self.__adjacency_matrix = [ [ <NUM_LIT:0> for i in range(size_data) ] for j in range(size_data) ];<EOL>for i in range(<NUM_LIT:0>, size_data):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, size_data):<EOL><INDENT>distance = euclidean_distance(self.__pointer_data[i], self.__pointer_data[j]);<EOL>if (distance <= self.__eps):<EOL><INDENT>self.__adjacency_matrix[i][j] = <NUM_LIT:1>;<EOL>self.__adjacency_matrix[j][i] = <NUM_LIT:1>;<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "!\n        @brief Creates 2D adjacency matrix (list of lists) where each element described existence of link between points (means that points are neighbors).", "id": "f15546:c0:m6"}
{"signature": "def process(self):", "body": "<EOL>if (self.__ccore is True):<EOL><INDENT>self.__clusters = wrapper.rock(self.__pointer_data, self.__eps, self.__number_clusters, self.__threshold);<EOL><DEDENT>else:  <EOL><INDENT>self.__clusters = [[index] for index in range(len(self.__pointer_data))];<EOL>while (len(self.__clusters) > self.__number_clusters):<EOL><INDENT>indexes = self.__find_pair_clusters(self.__clusters);<EOL>if (indexes != [-<NUM_LIT:1>, -<NUM_LIT:1>]):<EOL><INDENT>self.__clusters[indexes[<NUM_LIT:0>]] += self.__clusters[indexes[<NUM_LIT:1>]];<EOL>self.__clusters.pop(indexes[<NUM_LIT:1>]);   <EOL><DEDENT>else:<EOL><INDENT>break;<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "!\n        @brief Performs cluster analysis in line with rules of ROCK algorithm.\n\n        @remark Results of clustering can be obtained using corresponding get methods.\n\n        @see get_clusters()", "id": "f15546:c0:m1"}
{"signature": "def __find_pair_clusters(self, clusters):", "body": "maximum_goodness = <NUM_LIT:0.0>;<EOL>cluster_indexes = [-<NUM_LIT:1>, -<NUM_LIT:1>];<EOL>for i in range(<NUM_LIT:0>, len(clusters)):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, len(clusters)):<EOL><INDENT>goodness = self.__calculate_goodness(clusters[i], clusters[j]);<EOL>if (goodness > maximum_goodness):<EOL><INDENT>maximum_goodness = goodness;<EOL>cluster_indexes = [i, j];<EOL><DEDENT><DEDENT><DEDENT>return cluster_indexes<EOL>", "docstring": "!\n        @brief Returns pair of clusters that are best candidates for merging in line with goodness measure.\n               The pair of clusters for which the above goodness measure is maximum is the best pair of clusters to be merged.\n\n        @param[in] clusters (list): List of clusters that have been allocated during processing, each cluster is represented by list of indexes of points from the input data set.\n\n        @return (list) List that contains two indexes of clusters (from list 'clusters') that should be merged on this step.\n                It can be equals to [-1, -1] when no links between clusters.", "id": "f15546:c0:m4"}
{"signature": "def get_cluster_encoding(self):", "body": "return type_encoding.CLUSTER_INDEX_LIST_SEPARATION<EOL>", "docstring": "!\n        @brief Returns clustering result representation type that indicate how clusters are encoded.\n\n        @return (type_encoding) Clustering result representation.\n\n        @see get_clusters()", "id": "f15546:c0:m3"}
{"signature": "def __find_optimal_kvalue(self):", "body": "optimal_elbow_value = max(self.__elbows)<EOL>self.__kvalue = self.__elbows.index(optimal_elbow_value) + <NUM_LIT:1> + self.__kmin<EOL>", "docstring": "!\n        @brief Finds elbow and returns corresponding K-value.", "id": "f15547:c0:m7"}
{"signature": "def __process_by_ccore(self):", "body": "if isinstance(self.__initializer, kmeans_plusplus_initializer):<EOL><INDENT>initializer = wrapper.elbow_center_initializer.KMEANS_PLUS_PLUS<EOL><DEDENT>else:<EOL><INDENT>initializer = wrapper.elbow_center_initializer.RANDOM<EOL><DEDENT>result = wrapper.elbow(self.__data, self.__kmin, self.__kmax, initializer)<EOL>self.__kvalue = result[<NUM_LIT:0>]<EOL>self.__wce = result[<NUM_LIT:1>]<EOL>", "docstring": "!\n        @brief Performs processing using C++ implementation.", "id": "f15547:c0:m2"}
{"signature": "def process(self):", "body": "if self.__ccore:<EOL><INDENT>self.__process_by_ccore()<EOL><DEDENT>else:<EOL><INDENT>self.__process_by_python()<EOL><DEDENT>return self<EOL>", "docstring": "!\n        @brief Performs analysis to find out appropriate amount of clusters.\n\n        @return", "id": "f15547:c0:m1"}
{"signature": "def get_wce(self):", "body": "return self.__wce<EOL>", "docstring": "!\n        @brief Returns list of total within cluster errors for each K-value (kmin, kmin + 1, ..., kmax - 1).", "id": "f15547:c0:m5"}
{"signature": "def get_clusters(self):", "body": "return self.__network.capture_objects<EOL>", "docstring": "!\n        @brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.\n\n        @see process()", "id": "f15548:c0:m2"}
{"signature": "def __init__(self, data, amount_clusters, epouch = <NUM_LIT:100>, ccore = True):", "body": "self.__data_pointer = data;<EOL>self.__amount_clusters = amount_clusters;<EOL>self.__epouch = epouch;<EOL>self.__ccore = ccore;<EOL>self.__network = None<EOL>", "docstring": "!\n        @brief Creates SOM-SC (Self Organized Map for Simple Clustering) algorithm for clustering analysis.\n\n        @param[in] data (list): List of points that are used for processing.\n        @param[in] amount_clusters (uint): Amount of clusters that should be allocated.\n        @param[in] epouch (uint): Number of epochs for training of SOM.\n        @param[in] ccore (bool): If it is True then CCORE implementation will be used for clustering analysis.", "id": "f15548:c0:m0"}
{"signature": "def __expand_cluster(self, index_point):", "body": "cluster = None<EOL>self.__visited[index_point] = True<EOL>neighbors = self.__neighbor_searcher(index_point)<EOL>if len(neighbors) >= self.__neighbors:<EOL><INDENT>cluster = [index_point]<EOL>self.__belong[index_point] = True<EOL>for i in neighbors:<EOL><INDENT>if self.__visited[i] is False:<EOL><INDENT>self.__visited[i] = True<EOL>next_neighbors = self.__neighbor_searcher(i)<EOL>if len(next_neighbors) >= self.__neighbors:<EOL><INDENT>neighbors += [k for k in next_neighbors if ( (k in neighbors) == False) and k != index_point]<EOL><DEDENT><DEDENT>if self.__belong[i] is False:<EOL><INDENT>cluster.append(i)<EOL>self.__belong[i] = True<EOL><DEDENT><DEDENT><DEDENT>return cluster<EOL>", "docstring": "!\n        @brief Expands cluster from specified point in the input data space.\n\n        @param[in] index_point (list): Index of a point from the data.\n\n        @return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded.", "id": "f15549:c0:m6"}
{"signature": "def __calculate_radius(self, number_neighbors, radius):", "body": "if (number_neighbors >= len(self._osc_loc)):<EOL><INDENT>return radius * self.__increase_persent + radius;<EOL><DEDENT>return average_neighbor_distance(self._osc_loc, number_neighbors)<EOL>", "docstring": "!\n        @brief Calculate new connectivity radius.\n\n        @param[in] number_neighbors (uint): Average amount of neighbors that should be connected by new radius.\n        @param[in] radius (double): Current connectivity radius.\n\n        @return New connectivity radius.", "id": "f15550:c0:m3"}
{"signature": "def process(self, order = <NUM_LIT>, solution = solve_type.FAST, collect_dynamic = False):", "body": "if (self.__ccore_network_pointer is not None):<EOL><INDENT>analyser = wrapper.hsyncnet_process(self.__ccore_network_pointer, order, solution, collect_dynamic);<EOL>return syncnet_analyser(None, None, analyser);<EOL><DEDENT>number_neighbors = self.__initial_neighbors;<EOL>current_number_clusters = float('<STR_LIT>');<EOL>dyn_phase = [];<EOL>dyn_time = [];<EOL>radius = average_neighbor_distance(self._osc_loc, number_neighbors);<EOL>increase_step = int(len(self._osc_loc) * self.__increase_persent);<EOL>if (increase_step < <NUM_LIT:1>):<EOL><INDENT>increase_step = <NUM_LIT:1>;<EOL><DEDENT>analyser = None;<EOL>while(current_number_clusters > self._number_clusters):<EOL><INDENT>self._create_connections(radius);<EOL>analyser = self.simulate_dynamic(order, solution, collect_dynamic);<EOL>if (collect_dynamic == True):<EOL><INDENT>if (len(dyn_phase) == <NUM_LIT:0>):<EOL><INDENT>self.__store_dynamic(dyn_phase, dyn_time, analyser, True);<EOL><DEDENT>self.__store_dynamic(dyn_phase, dyn_time, analyser, False);<EOL><DEDENT>clusters = analyser.allocate_sync_ensembles(<NUM_LIT>);<EOL>current_number_clusters = len(clusters);<EOL>number_neighbors += increase_step;<EOL>radius = self.__calculate_radius(number_neighbors, radius);<EOL><DEDENT>if (collect_dynamic != True):<EOL><INDENT>self.__store_dynamic(dyn_phase, dyn_time, analyser, False);<EOL><DEDENT>return syncnet_analyser(dyn_phase, dyn_time, None)<EOL>", "docstring": "!\n        @brief Performs clustering of input data set in line with input parameters.\n\n        @param[in] order (double): Level of local synchronization between oscillator that defines end of synchronization process, range [0..1].\n        @param[in] solution (solve_type) Type of solving differential equation.\n        @param[in] collect_dynamic (bool): If True - returns whole history of process synchronization otherwise - only final state (when process of clustering is over).\n\n        @return (tuple) Returns dynamic of the network as tuple of lists on each iteration (time, oscillator_phases) that depends on collect_dynamic parameter. \n\n        @see get_clusters()", "id": "f15550:c0:m2"}
{"signature": "def __calculate_centers(self):", "body": "dimension = self.__data.shape[<NUM_LIT:1>]<EOL>centers = numpy.zeros((len(self.__centers), dimension))<EOL>for i in range(len(self.__centers)):<EOL><INDENT>centers[i] = numpy.divide(self.__membership[:, i] @ self.__data, numpy.sum(self.__membership[:, i]))<EOL><DEDENT>return centers<EOL>", "docstring": "!\n        @brief Calculate center using membership of each cluster.\n\n        @return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data.\n\n        @return (numpy.array) Updated centers.", "id": "f15551:c0:m7"}
{"signature": "def __process_by_ccore(self):", "body": "result = wrapper.fcm_algorithm(self.__data, self.__centers, self.__m, self.__tolerance, self.__itermax)<EOL>self.__clusters = result[wrapper.fcm_package_indexer.INDEX_CLUSTERS]<EOL>self.__centers = result[wrapper.fcm_package_indexer.INDEX_CENTERS]<EOL>self.__membership = result[wrapper.fcm_package_indexer.INDEX_MEMBERSHIP]<EOL>", "docstring": "!\n        @brief Performs cluster analysis using C/C++ implementation.", "id": "f15551:c0:m5"}
{"signature": "def get_membership(self):", "body": "return self.__membership<EOL>", "docstring": "!\n        @brief Returns cluster membership (probability) for each point in data.\n\n        @return (array_like) Membership for each point in format [[Px1(c1), Px1(c2), ...], [Px2(c1), Px2(c2), ...], ...],\n                 where [Px1(c1), Px1(c2), ...] membership for point x1.\n\n        @see process()\n        @see get_clusters()\n        @see get_centers()", "id": "f15551:c0:m4"}
{"signature": "def __calculate_changes(self, updated_centers):", "body": "changes = numpy.sum(numpy.square(self.__centers - updated_centers), axis=<NUM_LIT:1>).T<EOL>return numpy.max(changes)<EOL>", "docstring": "!\n        @brief Calculate changes between centers.\n\n        @return (float) Maximum change between centers.", "id": "f15551:c0:m9"}
{"signature": "def get_clusters(self):", "body": "return self.__clusters<EOL>", "docstring": "!\n        @brief Returns allocated clusters that consists of points that most likely (in line with membership) belong to\n                these clusters.\n\n        @remark Allocated clusters can be returned only after data processing (use method process()). Otherwise empty list is returned.\n\n        @return (list) List of allocated clusters, each cluster contains indexes from input data.\n\n        @see process()\n        @see get_centers()\n        @see get_membership()", "id": "f15551:c0:m2"}
{"signature": "def get_centers(self):", "body": "return self.__centers<EOL>", "docstring": "!\n        @brief Returns list of centers of allocated clusters.\n\n        @return (array_like) Cluster centers.\n\n        @see process()\n        @see get_clusters()\n        @see get_membership()", "id": "f15551:c0:m3"}
{"signature": "def __init__(self, data, initial_centers, **kwargs):", "body": "self.__data = data<EOL>self.__clusters = []<EOL>self.__centers = initial_centers<EOL>self.__membership = []<EOL>self.__tolerance = kwargs.get('<STR_LIT>', <NUM_LIT>)<EOL>self.__itermax = kwargs.get('<STR_LIT>', <NUM_LIT:200>)<EOL>self.__m = kwargs.get('<STR_LIT:m>', <NUM_LIT:2>)<EOL>self.__degree = <NUM_LIT> / (self.__m - <NUM_LIT:1>)<EOL>self.__ccore = kwargs.get('<STR_LIT>', True)<EOL>if self.__ccore is True:<EOL><INDENT>self.__ccore = ccore_library.workable()<EOL><DEDENT>", "docstring": "!\n        @brief Initialize Fuzzy C-Means algorithm.\n\n        @param[in] data (array_like): Input data that is presented as array of points (objects), each point should be represented by array_like data structure.\n        @param[in] initial_centers (array_like): Initial coordinates of centers of clusters that are represented by array_like data structure: [center1, center2, ...].\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'tolerance', 'itermax', 'm').\n\n        <b>Keyword Args:</b><br>\n            - ccore (bool): Defines should be CCORE library (C++ pyclustering library) used instead of Python code or not.\n            - tolerance (float): Stop condition: if maximum value of change of centers of clusters is less than tolerance then algorithm stops processing.\n            - itermax (uint): Maximum number of iterations that is used for clustering process (by default: 200).\n            - m (float): Hyper-parameter that controls how fuzzy the cluster will be. The higher it is, the fuzzier the cluster will be in the end.\n               This parameter should be greater than 1 (by default: 2).", "id": "f15551:c0:m0"}
{"signature": "def process(self, collect_dynamic = False, order = <NUM_LIT>):", "body": "<EOL>self._som.train(self._data, <NUM_LIT:100>);<EOL>weights = list();<EOL>self._som_osc_table.clear();        <EOL>for i in range(self._som.size):<EOL><INDENT>if (self._som.awards[i] > <NUM_LIT:0>):<EOL><INDENT>weights.append(self._som.weights[i]);<EOL>self._som_osc_table.append(i);<EOL><DEDENT><DEDENT>self._sync = self.__create_sync_layer(weights);<EOL>self._analyser = self._sync.process(order, collect_dynamic = collect_dynamic);<EOL>return (self._analyser.time, self._analyser.output)<EOL>", "docstring": "!\n        @brief Performs simulation of the oscillatory network.\n\n        @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\n        @param[in] order (double): Order of process synchronization that should be considered as end of clustering, destributed 0..1.\n\n        @return (tuple) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,\n                otherwise returns only last values (last step of simulation) of dynamic.\n\n        @see get_som_clusters()\n        @see get_clusters()", "id": "f15552:c0:m3"}
{"signature": "@property<EOL><INDENT>def sync_layer(self):<DEDENT>", "body": "return self._sync<EOL>", "docstring": "!\n        @brief The second layer of the oscillatory network based on Kuramoto model.", "id": "f15552:c0:m1"}
{"signature": "@property<EOL><INDENT>def som_layer(self):<DEDENT>", "body": "return self._som<EOL>", "docstring": "!\n        @brief The first layer of the oscillatory network - self-organized feature map.", "id": "f15552:c0:m0"}
{"signature": "def __has_object_connection(self, oscillator_index1, oscillator_index2):", "body": "som_neuron_index1 = self._som_osc_table[oscillator_index1];<EOL>som_neuron_index2 = self._som_osc_table[oscillator_index2];<EOL>for index_object1 in self._som.capture_objects[som_neuron_index1]:<EOL><INDENT>for index_object2 in self._som.capture_objects[som_neuron_index2]:<EOL><INDENT>distance = euclidean_distance_square(self._data[index_object1], self._data[index_object2]);<EOL>if (distance <= self._radius):<EOL><INDENT>return True;<EOL><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "!\n        @brief Searches for pair of objects that are encoded by specified neurons and that are connected in line with connectivity radius.\n\n        @param[in] oscillator_index1 (uint): Index of the first oscillator in the second layer.\n        @param[in] oscillator_index2 (uint): Index of the second oscillator in the second layer.\n\n        @return (bool) True - if there is pair of connected objects encoded by specified oscillators.", "id": "f15552:c0:m5"}
{"signature": "def cluster_wing_nut():", "body": "start_centers = [[-<NUM_LIT>, <NUM_LIT>], [<NUM_LIT>, <NUM_LIT>]]<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_WING_NUT)<EOL>", "docstring": "Almost good!", "id": "f15558:m12"}
{"signature": "def cluster_lsun():", "body": "start_centers = [[<NUM_LIT:1.0>, <NUM_LIT>], [<NUM_LIT>, <NUM_LIT:0.5>], [<NUM_LIT>, <NUM_LIT>]]<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_LSUN)<EOL>", "docstring": "Not so applicable for this sample", "id": "f15558:m9"}
{"signature": "def cluster_lsun():", "body": "start_centers = [[<NUM_LIT:1.0>, <NUM_LIT>], [<NUM_LIT>, <NUM_LIT:0.5>], [<NUM_LIT>, <NUM_LIT>]]<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_LSUN, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_LSUN, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)<EOL>", "docstring": "Not so applicable for this sample", "id": "f15574:m12"}
{"signature": "def cluster_sample2():", "body": "start_centers = [[<NUM_LIT>, <NUM_LIT>], [<NUM_LIT>, <NUM_LIT>]]<EOL>template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE2, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)<EOL>template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_SIMPLE2, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)<EOL>", "docstring": "Start with wrong number of clusters.", "id": "f15574:m3"}
{"signature": "def cluster_two_diamonds():", "body": "start_centers = [[<NUM_LIT>, <NUM_LIT>]]<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)<EOL>", "docstring": "Start with wrong number of clusters.", "id": "f15574:m14"}
{"signature": "def cluster_target():", "body": "start_centers = [[<NUM_LIT>, <NUM_LIT>], [<NUM_LIT:0.0>, -<NUM_LIT>], [<NUM_LIT>, -<NUM_LIT>], [<NUM_LIT>, <NUM_LIT>], [-<NUM_LIT>, <NUM_LIT>], [-<NUM_LIT>, -<NUM_LIT>]]<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_TARGET, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_TARGET, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)<EOL>", "docstring": "Not so applicable for this sample", "id": "f15574:m13"}
{"signature": "def cluster_hepta():", "body": "start_centers = [[<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>], [<NUM_LIT>, <NUM_LIT:0.0>, <NUM_LIT:0.0>], [-<NUM_LIT>, <NUM_LIT:0.0>, <NUM_LIT:0.0>], [<NUM_LIT:0.0>, <NUM_LIT>, <NUM_LIT:0.0>], [<NUM_LIT:0.0>, -<NUM_LIT>, <NUM_LIT:0.0>], [<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT>]]<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_HEPTA, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION)<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_HEPTA, criterion = splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH)<EOL>", "docstring": "Start with wrong number of clusters.", "id": "f15574:m18"}
{"signature": "def cluster_golf_ball():", "body": "template_clustering(<NUM_LIT:0.5>, <NUM_LIT:3>, FCPS_SAMPLES.SAMPLE_GOLF_BALL)<EOL>", "docstring": "Toooooooooooo looooong", "id": "f15578:m15"}
{"signature": "def cluster_wing_nut():", "body": "template_clustering(<NUM_LIT>, <NUM_LIT:2>, FCPS_SAMPLES.SAMPLE_WING_NUT)<EOL>", "docstring": "It's hard to choose properly parameters, but it's OK", "id": "f15578:m12"}
{"signature": "def cluster_elongate():", "body": "start_centers = [[<NUM_LIT:1.0>, <NUM_LIT>], [<NUM_LIT>, <NUM_LIT>]]<EOL>template_clustering(start_centers, SIMPLE_SAMPLES.SAMPLE_ELONGATE)<EOL>", "docstring": "Not so applicable for this sample", "id": "f15581:m9"}
{"signature": "def cluster_wing_nut():", "body": "start_centers = [[-<NUM_LIT>, <NUM_LIT>], [<NUM_LIT>, <NUM_LIT>]]<EOL>template_clustering(start_centers, FCPS_SAMPLES.SAMPLE_WING_NUT)<EOL>", "docstring": "Almost good!", "id": "f15581:m13"}
{"signature": "def __create_canvas(self, dimension, pairs, position, **kwargs):", "body": "visible_grid = kwargs.get('<STR_LIT>', True)<EOL>visible_labels = kwargs.get('<STR_LIT>', True)<EOL>visible_axis = kwargs.get('<STR_LIT>', False)<EOL>ax = self.__figure.add_subplot(self.__grid_spec[position])<EOL>if dimension > <NUM_LIT:1>:<EOL><INDENT>if visible_labels:<EOL><INDENT>ax.set_xlabel(\"<STR_LIT>\" % pairs[position][<NUM_LIT:0>])<EOL>ax.set_ylabel(\"<STR_LIT>\" % pairs[position][<NUM_LIT:1>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ax.set_ylim(-<NUM_LIT:0.5>, <NUM_LIT:0.5>)<EOL>ax.set_yticklabels([])<EOL><DEDENT>if visible_grid:<EOL><INDENT>ax.grid(True)<EOL><DEDENT>if not visible_axis:<EOL><INDENT>ax.set_yticklabels([])<EOL>ax.set_xticklabels([])<EOL><DEDENT>return ax<EOL>", "docstring": "!\n        @brief Create new canvas with user defined parameters to display cluster or chunk of cluster on it.\n\n        @param[in] dimension (uint): Data-space dimension.\n        @param[in] pairs (list): Pair of coordinates that will be displayed on the canvas. If empty than label will not\n                    be displayed on the canvas.\n        @param[in] position (uint): Index position of canvas on a grid.\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'visible_axis' 'visible_labels', 'visible_grid').\n\n        <b>Keyword Args:</b><br>\n            - visible_axis (bool): Defines visibility of axes on each canvas, if True - axes are visible.\n               By default axis are not displayed.\n            - visible_labels (bool): Defines visibility of labels on each canvas, if True - labels is displayed.\n               By default labels are displayed.\n            - visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed.\n               By default grid is displayed.\n\n        @return (matplotlib.Axis) Canvas to display cluster of chuck of cluster.", "id": "f15584:c1:m6"}
{"signature": "def append_cluster_attribute(self, index_canvas, index_cluster, data, marker = None, markersize = None):", "body": "cluster_descr = self.__canvas_clusters[index_canvas][index_cluster]<EOL>attribute_marker = marker<EOL>if attribute_marker is None:<EOL><INDENT>attribute_marker = cluster_descr.marker<EOL><DEDENT>attribure_markersize = markersize<EOL>if attribure_markersize is None:<EOL><INDENT>attribure_markersize = cluster_descr.markersize<EOL><DEDENT>attribute_color = cluster_descr.color<EOL>added_attribute_cluster_descriptor = canvas_cluster_descr(data, None, attribute_marker, attribure_markersize, attribute_color)<EOL>self.__canvas_clusters[index_canvas][index_cluster].attributes.append(added_attribute_cluster_descriptor)<EOL>", "docstring": "!\n        @brief Append cluster attribure for cluster on specific canvas.\n        @details Attribute it is data that is visualized for specific cluster using its color, marker and markersize if last two is not specified.\n\n        @param[in] index_canvas (uint): Index canvas where cluster is located.\n        @param[in] index_cluster (uint): Index cluster whose attribute should be added.\n        @param[in] data (list): List of points (data) that represents attribute.\n        @param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.\n        @param[in] markersize (uint): Size of marker.", "id": "f15584:c2:m2"}
{"signature": "def show(self, figure=None, invisible_axis=True, visible_grid=True, display=True, shift=None):", "body": "canvas_shift = shift<EOL>if canvas_shift is None:<EOL><INDENT>if figure is not None:<EOL><INDENT>canvas_shift = len(figure.get_axes())<EOL><DEDENT>else:<EOL><INDENT>canvas_shift = <NUM_LIT:0><EOL><DEDENT><DEDENT>if figure is not None:<EOL><INDENT>cluster_figure = figure<EOL><DEDENT>else:<EOL><INDENT>cluster_figure = plt.figure()<EOL><DEDENT>maximum_cols = self.__size_row<EOL>maximum_rows = math.ceil( (self.__number_canvases + canvas_shift) / maximum_cols)<EOL>grid_spec = gridspec.GridSpec(maximum_rows, maximum_cols)<EOL>for index_canvas in range(len(self.__canvas_clusters)):<EOL><INDENT>canvas_data = self.__canvas_clusters[index_canvas]<EOL>if len(canvas_data) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>dimension = self.__canvas_dimensions[index_canvas]<EOL>if (dimension == <NUM_LIT:1>) or (dimension == <NUM_LIT:2>):<EOL><INDENT>ax = cluster_figure.add_subplot(grid_spec[index_canvas + canvas_shift])<EOL><DEDENT>else:<EOL><INDENT>ax = cluster_figure.add_subplot(grid_spec[index_canvas + canvas_shift], projection='<STR_LIT>')<EOL><DEDENT>if len(canvas_data) == <NUM_LIT:0>:<EOL><INDENT>plt.setp(ax, visible=False)<EOL><DEDENT>for cluster_descr in canvas_data:<EOL><INDENT>self.__draw_canvas_cluster(ax, dimension, cluster_descr)<EOL>for attribute_descr in cluster_descr.attributes:<EOL><INDENT>self.__draw_canvas_cluster(ax, dimension, attribute_descr)<EOL><DEDENT><DEDENT>if invisible_axis is True:<EOL><INDENT>ax.xaxis.set_ticklabels([])<EOL>ax.yaxis.set_ticklabels([])<EOL>if (dimension == <NUM_LIT:3>):<EOL><INDENT>ax.zaxis.set_ticklabels([])<EOL><DEDENT><DEDENT>if self.__canvas_titles[index_canvas] is not None:<EOL><INDENT>ax.set_title(self.__canvas_titles[index_canvas])<EOL><DEDENT>ax.grid(visible_grid)<EOL><DEDENT>if display is True:<EOL><INDENT>plt.show()<EOL><DEDENT>return cluster_figure<EOL>", "docstring": "!\n        @brief Shows clusters (visualize).\n\n        @param[in] figure (fig): Defines requirement to use specified figure, if None - new figure is created for drawing clusters.\n        @param[in] invisible_axis (bool): Defines visibility of axes on each canvas, if True - axes are invisible.\n        @param[in] visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed.\n        @param[in] display (bool): Defines requirement to display clusters on a stage, if True - clusters are displayed,\n                    if False - plt.show() should be called by user.\"\n        @param[in] shift (uint): Force canvas shift value - defines canvas index from which custers should be visualized.\n\n        @return (fig) Figure where clusters are shown.", "id": "f15584:c2:m6"}
{"signature": "def __draw_cluster_item_multi_dimension(self, ax, pair, item, cluster_descr):", "body": "index_dimension1 = pair[<NUM_LIT:0>]<EOL>index_dimension2 = pair[<NUM_LIT:1>]<EOL>if cluster_descr.data is None:<EOL><INDENT>ax.plot(item[index_dimension1], item[index_dimension2],<EOL>color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize)<EOL><DEDENT>else:<EOL><INDENT>ax.plot(cluster_descr.data[item][index_dimension1], cluster_descr.data[item][index_dimension2],<EOL>color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize)<EOL><DEDENT>", "docstring": "!\n        @brief Draw cluster chunk defined by pair coordinates in data space with dimension greater than 1.\n\n        @param[in] ax (axis): Matplotlib axis that is used to display chunk of cluster point.\n        @param[in] pair (list): Coordinate of the point that should be displayed.\n        @param[in] item (list): Data point or index of data point.\n        @param[in] cluster_descr (canvas_cluster_descr): Cluster description whose point is visualized.", "id": "f15584:c1:m8"}
{"signature": "def set_canvas_title(self, text, canvas = <NUM_LIT:0>):", "body": "if canvas > self.__number_canvases:<EOL><INDENT>raise NameError('<STR_LIT>' + canvas + '<STR_LIT>')<EOL><DEDENT>self.__canvas_titles[canvas] = text<EOL>", "docstring": "!\n        @brief Set title for specified canvas.\n\n        @param[in] text (string): Title for canvas.\n        @param[in] canvas (uint): Index of canvas where title should be displayed.", "id": "f15584:c2:m4"}
{"signature": "def __draw_cluster_item_one_dimension(self, ax, item, cluster_descr):", "body": "if cluster_descr.data is None:<EOL><INDENT>ax.plot(item[<NUM_LIT:0>], <NUM_LIT:0.0>,<EOL>color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize)<EOL><DEDENT>else:<EOL><INDENT>ax.plot(cluster_descr.data[item][<NUM_LIT:0>], <NUM_LIT:0.0>,<EOL>color=cluster_descr.color, marker=cluster_descr.marker, markersize=cluster_descr.markersize)<EOL><DEDENT>", "docstring": "!\n        @brief Draw cluster point in one dimensional data space..\n\n        @param[in] ax (axis): Matplotlib axis that is used to display chunk of cluster point.\n        @param[in] item (list): Data point or index of data point.\n        @param[in] cluster_descr (canvas_cluster_descr): Cluster description whose point is visualized.", "id": "f15584:c1:m9"}
{"signature": "def append_cluster(self, cluster, data=None, canvas=<NUM_LIT:0>, marker='<STR_LIT:.>', markersize=None, color=None):", "body": "if len(cluster) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>if canvas > self.__number_canvases or canvas < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.__number_canvases or canvas)<EOL><DEDENT>if color is None:<EOL><INDENT>index_color = len(self.__canvas_clusters[canvas]) % len(color_list.TITLES)<EOL>color = color_list.TITLES[index_color]<EOL><DEDENT>added_canvas_descriptor = canvas_cluster_descr(cluster, data, marker, markersize, color)<EOL>self.__canvas_clusters[canvas].append( added_canvas_descriptor )<EOL>if data is None:<EOL><INDENT>dimension = len(cluster[<NUM_LIT:0>])<EOL>if self.__canvas_dimensions[canvas] is None:<EOL><INDENT>self.__canvas_dimensions[canvas] = dimension<EOL><DEDENT>elif self.__canvas_dimensions[canvas] != dimension:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>dimension = len(data[<NUM_LIT:0>])<EOL>if self.__canvas_dimensions[canvas] is None:<EOL><INDENT>self.__canvas_dimensions[canvas] = dimension<EOL><DEDENT>elif self.__canvas_dimensions[canvas] != dimension:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if (dimension < <NUM_LIT:1>) or (dimension > <NUM_LIT:3>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if markersize is None:<EOL><INDENT>if (dimension == <NUM_LIT:1>) or (dimension == <NUM_LIT:2>):<EOL><INDENT>added_canvas_descriptor.markersize = self.__default_2d_marker_size<EOL><DEDENT>elif dimension == <NUM_LIT:3>:<EOL><INDENT>added_canvas_descriptor.markersize = self.__default_3d_marker_size<EOL><DEDENT><DEDENT>return len(self.__canvas_clusters[canvas]) - <NUM_LIT:1><EOL>", "docstring": "!\n        @brief Appends cluster to canvas for drawing.\n\n        @param[in] cluster (list): cluster that may consist of indexes of objects from the data or object itself.\n        @param[in] data (list): If defines that each element of cluster is considered as a index of object from the data.\n        @param[in] canvas (uint): Number of canvas that should be used for displaying cluster.\n        @param[in] marker (string): Marker that is used for displaying objects from cluster on the canvas.\n        @param[in] markersize (uint): Size of marker.\n        @param[in] color (string): Color of marker.\n\n        @return Returns index of cluster descriptor on the canvas.", "id": "f15584:c2:m1"}
{"signature": "def show(self, pair_filter=None, **kwargs):", "body": "if not len(self.__clusters) > <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>cluster_data = self.__clusters[<NUM_LIT:0>].data or self.__clusters[<NUM_LIT:0>].cluster<EOL>dimension = len(cluster_data[<NUM_LIT:0>])<EOL>acceptable_pairs = pair_filter or []<EOL>pairs = []<EOL>amount_axis = <NUM_LIT:1><EOL>axis_storage = []<EOL>if dimension > <NUM_LIT:1>:<EOL><INDENT>pairs = self.__create_pairs(dimension, acceptable_pairs)<EOL>amount_axis = len(pairs)<EOL><DEDENT>self.__figure = plt.figure()<EOL>self.__grid_spec = self.__create_grid_spec(amount_axis, kwargs.get('<STR_LIT>', <NUM_LIT:4>))<EOL>for index in range(amount_axis):<EOL><INDENT>ax = self.__create_canvas(dimension, pairs, index, **kwargs)<EOL>axis_storage.append(ax)<EOL><DEDENT>for cluster_descr in self.__clusters:<EOL><INDENT>self.__draw_canvas_cluster(axis_storage, cluster_descr, pairs)<EOL><DEDENT>plt.show()<EOL>", "docstring": "!\n        @brief Shows clusters (visualize) in multi-dimensional space.\n\n        @param[in] pair_filter (list): List of coordinate pairs that should be displayed. This argument is used as a filter.\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'visible_axis' 'visible_labels', 'visible_grid', 'row_size').\n\n        <b>Keyword Args:</b><br>\n            - visible_axis (bool): Defines visibility of axes on each canvas, if True - axes are visible.\n               By default axis of each canvas are not displayed.\n            - visible_labels (bool): Defines visibility of labels on each canvas, if True - labels is displayed.\n               By default labels of each canvas are displayed.\n            - visible_grid (bool): Defines visibility of grid on each canvas, if True - grid is displayed.\n               By default grid of each canvas is displayed.\n            - max_row_size (uint): Maximum number of canvases on one row.", "id": "f15584:c1:m3"}
{"signature": "def get_clusters(self):", "body": "return self.__clusters<EOL>", "docstring": "!\n        @brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.\n\n        @remark Results of clustering can be obtained using corresponding gets methods.\n\n        @return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.\n\n        @see process()", "id": "f15585:c1:m2"}
{"signature": "def __init__(self, data, number_clusters, link = None, ccore = True):", "body": "self.__pointer_data = data<EOL>self.__number_clusters = number_clusters<EOL>self.__similarity = link<EOL>if self.__similarity is None:<EOL><INDENT>self.__similarity = type_link.CENTROID_LINK<EOL><DEDENT>self.__clusters = []<EOL>self.__ccore = ccore<EOL>if (self.__ccore):<EOL><INDENT>self.__ccore = ccore_library.workable()<EOL><DEDENT>if (self.__similarity == type_link.CENTROID_LINK):<EOL><INDENT>self.__centers = self.__pointer_data.copy()<EOL><DEDENT>", "docstring": "!\n        @brief Constructor of agglomerative hierarchical algorithm.\n\n        @param[in] data (list): Input data that is presented as a list of points (objects), each point should be represented by list, for example\n                    [[0.1, 0.2], [0.4, 0.5], [1.3, 0.9]].\n        @param[in] number_clusters (uint): Number of clusters that should be allocated.\n        @param[in] link (type_link): Link type that is used for calculation similarity between objects and clusters, if it is not specified centroid link will be used by default.\n        @param[in] ccore (bool): Defines should be CCORE (C++ pyclustering library) used instead of Python code or not (by default it is 'False').", "id": "f15585:c1:m0"}
{"signature": "def __calculate_nearest_distance(self, index_cluster1, index_cluster2):", "body": "candidate_minimum_distance = float('<STR_LIT>');<EOL>for index_object1 in self.__clusters[index_cluster1]:<EOL><INDENT>for index_object2 in self.__clusters[index_cluster2]:<EOL><INDENT>distance = euclidean_distance_square(self.__pointer_data[index_object1], self.__pointer_data[index_object2]);<EOL>if (distance < candidate_minimum_distance):<EOL><INDENT>candidate_minimum_distance = distance;<EOL><DEDENT><DEDENT><DEDENT>return candidate_minimum_distance<EOL>", "docstring": "!\n        @brief Finds two nearest objects in two specified clusters and returns distance between them.\n\n        @param[in] (uint) Index of the first cluster.\n        @param[in] (uint) Index of the second cluster.\n\n        @return The nearest euclidean distance between two clusters.", "id": "f15585:c1:m10"}
{"signature": "def __merge_by_average_link(self):", "body": "minimum_average_distance = float('<STR_LIT>');<EOL>for index_cluster1 in range(<NUM_LIT:0>, len(self.__clusters)):<EOL><INDENT>for index_cluster2 in range(index_cluster1 + <NUM_LIT:1>, len(self.__clusters)):<EOL><INDENT>candidate_average_distance = <NUM_LIT:0.0>;<EOL>for index_object1 in self.__clusters[index_cluster1]:<EOL><INDENT>for index_object2 in self.__clusters[index_cluster2]:<EOL><INDENT>candidate_average_distance += euclidean_distance_square(self.__pointer_data[index_object1], self.__pointer_data[index_object2]);<EOL><DEDENT><DEDENT>candidate_average_distance /= (len(self.__clusters[index_cluster1]) + len(self.__clusters[index_cluster2]));<EOL>if (candidate_average_distance < minimum_average_distance):<EOL><INDENT>minimum_average_distance = candidate_average_distance;<EOL>indexes = [index_cluster1, index_cluster2];<EOL><DEDENT><DEDENT><DEDENT>self.__clusters[indexes[<NUM_LIT:0>]] += self.__clusters[indexes[<NUM_LIT:1>]];  <EOL>self.__clusters.pop(indexes[<NUM_LIT:1>])<EOL>", "docstring": "!\n        @brief Merges the most similar clusters in line with average link type.", "id": "f15585:c1:m5"}
{"signature": "def __merge_by_signle_link(self):", "body": "minimum_single_distance = float('<STR_LIT>');<EOL>indexes = None;<EOL>for index_cluster1 in range(<NUM_LIT:0>, len(self.__clusters)):<EOL><INDENT>for index_cluster2 in range(index_cluster1 + <NUM_LIT:1>, len(self.__clusters)):<EOL><INDENT>candidate_minimum_distance = self.__calculate_nearest_distance(index_cluster1, index_cluster2);<EOL>if (candidate_minimum_distance < minimum_single_distance):<EOL><INDENT>minimum_single_distance = candidate_minimum_distance;<EOL>indexes = [index_cluster1, index_cluster2];<EOL><DEDENT><DEDENT><DEDENT>self.__clusters[indexes[<NUM_LIT:0>]] += self.__clusters[indexes[<NUM_LIT:1>]];  <EOL>self.__clusters.pop(indexes[<NUM_LIT:1>])<EOL>", "docstring": "!\n        @brief Merges the most similar clusters in line with single link type.", "id": "f15585:c1:m9"}
{"signature": "def process(self):", "body": "if self.__ccore is True:<EOL><INDENT>ccore_metric = metric_wrapper.create_instance(self.__metric)<EOL>self.__clusters, self.__medians = wrapper.kmedians(self.__pointer_data, self.__medians, self.__tolerance, self.__itermax, ccore_metric.get_pointer())<EOL><DEDENT>else:<EOL><INDENT>changes = float('<STR_LIT>')<EOL>if len(self.__pointer_data[<NUM_LIT:0>]) != len(self.__medians[<NUM_LIT:0>]):<EOL><INDENT>raise NameError('<STR_LIT>')<EOL><DEDENT>iterations = <NUM_LIT:0><EOL>while changes > self.__tolerance and iterations < self.__itermax:<EOL><INDENT>self.__clusters = self.__update_clusters()<EOL>updated_centers = self.__update_medians()<EOL>changes = max([self.__metric(self.__medians[index], updated_centers[index]) for index in range(len(updated_centers))])<EOL>self.__medians = updated_centers<EOL>iterations += <NUM_LIT:1><EOL><DEDENT><DEDENT>return self<EOL>", "docstring": "!\n        @brief Performs cluster analysis in line with rules of K-Medians algorithm.\n\n        @return (kmedians) Returns itself (K-Medians instance).\n\n        @remark Results of clustering can be obtained using corresponding get methods.\n\n        @see get_clusters()\n        @see get_medians()", "id": "f15586:c0:m1"}
{"signature": "def get_medians(self):", "body": "return self.__medians<EOL>", "docstring": "!\n        @brief Returns list of centers of allocated clusters.\n\n        @see process()\n        @see get_clusters()", "id": "f15586:c0:m3"}
{"signature": "def __update_medians(self):", "body": "medians = [[] for i in range(len(self.__clusters))]<EOL>for index in range(len(self.__clusters)):<EOL><INDENT>medians[index] = [<NUM_LIT:0.0> for i in range(len(self.__pointer_data[<NUM_LIT:0>]))]<EOL>length_cluster = len(self.__clusters[index])<EOL>for index_dimension in range(len(self.__pointer_data[<NUM_LIT:0>])):<EOL><INDENT>sorted_cluster = sorted(self.__clusters[index], key=lambda x: self.__pointer_data[x][index_dimension])<EOL>relative_index_median = int(math.floor((length_cluster - <NUM_LIT:1>) / <NUM_LIT:2>))<EOL>index_median = sorted_cluster[relative_index_median]<EOL>if (length_cluster % <NUM_LIT:2>) == <NUM_LIT:0>:<EOL><INDENT>index_median_second = sorted_cluster[relative_index_median + <NUM_LIT:1>]<EOL>medians[index][index_dimension] = (self.__pointer_data[index_median][index_dimension] + self.__pointer_data[index_median_second][index_dimension]) / <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>medians[index][index_dimension] = self.__pointer_data[index_median][index_dimension]<EOL><DEDENT><DEDENT><DEDENT>return medians<EOL>", "docstring": "!\n        @brief Calculate medians of clusters in line with contained objects.\n\n        @return (list) list of medians for current number of clusters.", "id": "f15586:c0:m6"}
{"signature": "def __init__(self, data, initial_centers, tolerance=<NUM_LIT>, ccore=True, **kwargs):", "body": "self.__pointer_data = data<EOL>self.__clusters = []<EOL>self.__medians = initial_centers[:]<EOL>self.__tolerance = tolerance<EOL>self.__itermax = kwargs.get('<STR_LIT>', <NUM_LIT:100>)<EOL>self.__metric = kwargs.get('<STR_LIT>', distance_metric(type_metric.EUCLIDEAN_SQUARE))<EOL>if self.__metric is None:<EOL><INDENT>self.__metric = distance_metric(type_metric.EUCLIDEAN_SQUARE)<EOL><DEDENT>self.__ccore = ccore and self.__metric.get_type() != type_metric.USER_DEFINED<EOL>if self.__ccore:<EOL><INDENT>self.__ccore = ccore_library.workable()<EOL><DEDENT>", "docstring": "!\n        @brief Constructor of clustering algorithm K-Medians.\n\n        @param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.\n        @param[in] initial_centers (list): Initial coordinates of medians of clusters that are represented by list: [center1, center2, ...].\n        @param[in] tolerance (double): Stop condition: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing\n        @param[in] ccore (bool): Defines should be CCORE library (C++ pyclustering library) used instead of Python code or not.\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'metric', 'itermax').\n\n        <b>Keyword Args:</b><br>\n            - metric (distance_metric): Metric that is used for distance calculation between two points.\n            - itermax (uint): Maximum number of iterations for cluster analysis.", "id": "f15586:c0:m0"}
{"signature": "def __update_clusters(self):", "body": "clusters = [[] for i in range(len(self.__medians))]<EOL>for index_point in range(len(self.__pointer_data)):<EOL><INDENT>index_optim = -<NUM_LIT:1><EOL>dist_optim = <NUM_LIT:0.0><EOL>for index in range(len(self.__medians)):<EOL><INDENT>dist = self.__metric(self.__pointer_data[index_point], self.__medians[index])<EOL>if (dist < dist_optim) or (index == <NUM_LIT:0>):<EOL><INDENT>index_optim = index<EOL>dist_optim = dist<EOL><DEDENT><DEDENT>clusters[index_optim].append(index_point)<EOL><DEDENT>clusters = [cluster for cluster in clusters if len(cluster) > <NUM_LIT:0>]<EOL>return clusters<EOL>", "docstring": "!\n        @brief Calculate Manhattan distance to each point from the each cluster. \n        @details Nearest points are captured by according clusters and as a result clusters are updated.\n\n        @return (list) updated clusters as list of clusters where each cluster contains indexes of objects from data.", "id": "f15586:c0:m5"}
{"signature": "def __calculate_volume(self):", "body": "volume = <NUM_LIT:0.0><EOL>for i in range(<NUM_LIT:0>, len(self.__max_corner)):<EOL><INDENT>side_length = self.__max_corner[i] - self.__min_corner[i]<EOL>if side_length != <NUM_LIT:0.0>:<EOL><INDENT>if volume == <NUM_LIT:0.0>: volume = side_length<EOL>else: volume *= side_length<EOL><DEDENT><DEDENT>return volume<EOL>", "docstring": "!\n        @brief Calculates volume of current spatial block.\n        @details If empty dimension is detected (where all points has the same value) then such dimension is ignored\n                  during calculation of volume.\n\n        @return (double) Volume of current spatial block.", "id": "f15588:c3:m8"}
{"signature": "def __validate_arguments(self):", "body": "if len(self.__directory.get_data()[<NUM_LIT:0>]) != <NUM_LIT:2>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "!\n        @brief Check correctness of input arguments and throw exception if incorrect is found.", "id": "f15588:c1:m1"}
{"signature": "def split(self, split_dimension, cache_points):", "body": "left_region_number = self.__region_number<EOL>right_region_number = self.__region_number + <NUM_LIT:2> ** self.__level<EOL>first_spatial_block, second_spatial_block = self.__spatial_block.split(split_dimension)<EOL>left = bang_block(self.__data, left_region_number, self.__level + <NUM_LIT:1>, first_spatial_block, cache_points)<EOL>right = bang_block(self.__data, right_region_number, self.__level + <NUM_LIT:1>, second_spatial_block, cache_points)<EOL>return left, right<EOL>", "docstring": "!\n        @brief Split BANG-block into two new blocks in specified dimension.\n\n        @param[in] split_dimension (uint): Dimension where block should be split.\n        @param[in] cache_points (bool): If True then covered points are cached. Used for leaf blocks.\n\n        @return (tuple) Pair of BANG-block that were formed from the current.", "id": "f15588:c4:m10"}
{"signature": "def get_directory(self):", "body": "return self.__directory<EOL>", "docstring": "!\n        @brief Returns grid directory that describes grid of the processed data.\n\n        @remark Grid directory is returned only after data processing (method process()). Otherwise None value is returned.\n\n        @return (bang_directory) BANG directory that describes grid of process data.\n\n        @see process()", "id": "f15588:c5:m4"}
{"signature": "def get_dendrogram(self):", "body": "return self.__dendrogram<EOL>", "docstring": "!\n        @brief Returns dendrogram of clusters.\n        @details Dendrogram is created in following way: the density indices of all regions are calculated and sorted\n                  in decreasing order for each cluster during clustering process.\n\n        @remark Dendrogram is returned only after data processing (method process()). Otherwise empty list is returned.", "id": "f15588:c5:m5"}
{"signature": "def __init__(self, max_corner, min_corner):", "body": "self.__max_corner = max_corner<EOL>self.__min_corner = min_corner<EOL>self.__volume = self.__calculate_volume()<EOL>", "docstring": "!\n        @brief Creates spatial block in data space.\n\n        @param[in] max_corner (array_like): Maximum corner coordinates of the block.\n        @param[in] min_corner (array_like): Minimal corner coordinates of the block.", "id": "f15588:c3:m0"}
{"signature": "def __expand_cluster_block(self, block, cluster_index, leaf_blocks, unhandled_block_indexes):", "body": "block.set_cluster(cluster_index)<EOL>self.__update_cluster_dendrogram(cluster_index, [block])<EOL>neighbors = self.__find_block_neighbors(block, leaf_blocks, unhandled_block_indexes)<EOL>self.__update_cluster_dendrogram(cluster_index, neighbors)<EOL>for neighbor in neighbors:<EOL><INDENT>neighbor.set_cluster(cluster_index)<EOL>neighbor_neighbors = self.__find_block_neighbors(neighbor, leaf_blocks, unhandled_block_indexes)<EOL>self.__update_cluster_dendrogram(cluster_index, neighbor_neighbors)<EOL>neighbors += neighbor_neighbors<EOL><DEDENT>", "docstring": "!\n        @brief Expand cluster from specific block that is considered as a central block.\n\n        @param[in] block (bang_block): Block that is considered as a central block for cluster.\n        @param[in] cluster_index (uint): Index of cluster that is assigned to blocks that forms new cluster.\n        @param[in] leaf_blocks (list): Leaf BANG-blocks that are considered during cluster formation.\n        @param[in] unhandled_block_indexes (set): Set of candidates (BANG block indexes) to become a cluster member. The\n                    parameter helps to reduce traversing among BANG-block providing only restricted set of block that\n                    should be considered.", "id": "f15588:c5:m9"}
{"signature": "def __cache_covered_data(self):", "body": "self.__cache_points = True<EOL>self.__points = []<EOL>for index_point in range(len(self.__data)):<EOL><INDENT>if self.__data[index_point] in self.__spatial_block:<EOL><INDENT>self.__cache_point(index_point)<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Cache covered data.", "id": "f15588:c4:m13"}
{"signature": "@staticmethod<EOL><INDENT>def __draw_blocks(ax, blocks, pair):<DEDENT>", "body": "ax.grid(False)<EOL>density_scale = blocks[-<NUM_LIT:1>].get_density()<EOL>for block in blocks:<EOL><INDENT>bang_visualizer.__draw_block(ax, pair, block, density_scale)<EOL><DEDENT>", "docstring": "!\n        @brief Display BANG-blocks on specified figure.\n\n        @param[in] ax (Axis): Axis where bang-blocks should be displayed.\n        @param[in] blocks (list): List of blocks that should be displyed.\n        @param[in] pair (tuple): Pair of coordinate index that should be displayed.", "id": "f15588:c0:m4"}
{"signature": "def __contains__(self, point):", "body": "for i in range(len(point)):<EOL><INDENT>if point[i] < self.__min_corner[i] or point[i] > self.__max_corner[i]:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "!\n        @brief Point is considered as contained if it lies in block (belong to it).\n\n        @return (bool) True if point is in block, otherwise False.", "id": "f15588:c3:m2"}
{"signature": "@staticmethod<EOL><INDENT>def __draw_block(ax, pair, block, density_scale):<DEDENT>", "body": "max_corner, min_corner = bang_visualizer.__get_rectangle_description(block, pair)<EOL>belong_cluster = block.get_cluster() is not None<EOL>if density_scale != <NUM_LIT:0.0>:<EOL><INDENT>density_scale = bang_visualizer.__maximum_density_alpha * block.get_density() / density_scale<EOL><DEDENT>face_color = matplotlib.colors.to_rgba('<STR_LIT>', alpha=density_scale)<EOL>edge_color = matplotlib.colors.to_rgba('<STR_LIT>', alpha=<NUM_LIT:1.0>)<EOL>rect = patches.Rectangle(min_corner, max_corner[<NUM_LIT:0>] - min_corner[<NUM_LIT:0>], max_corner[<NUM_LIT:1>] - min_corner[<NUM_LIT:1>],<EOL>fill=belong_cluster,<EOL>facecolor=face_color,<EOL>edgecolor=edge_color,<EOL>linewidth=<NUM_LIT:0.5>)<EOL>ax.add_patch(rect)<EOL>", "docstring": "!\n        @brief Display BANG-block on the specified ax.\n\n        @param[in] ax (Axis): Axis where block should be displayed.\n        @param[in] pair (tuple): Pair of coordinate index that should be displayed.\n        @param[in] block (bang_block): BANG-block that should be displayed.\n        @param[in] density_scale (double): Max density to display density of the block by appropriate tone.", "id": "f15588:c0:m5"}
{"signature": "@staticmethod<EOL><INDENT>def __draw_two_dimension_data(ax, data, pair):<DEDENT>", "body": "ax.set_xlabel(\"<STR_LIT>\" % pair[<NUM_LIT:0>])<EOL>ax.set_ylabel(\"<STR_LIT>\" % pair[<NUM_LIT:1>])<EOL>for point in data:<EOL><INDENT>if len(data[<NUM_LIT:0>]) > <NUM_LIT:1>:<EOL><INDENT>ax.plot(point[pair[<NUM_LIT:0>]], point[pair[<NUM_LIT:1>]], color='<STR_LIT>', marker='<STR_LIT:.>')<EOL><DEDENT>else:<EOL><INDENT>ax.plot(point[pair[<NUM_LIT:0>]], <NUM_LIT:0>, color='<STR_LIT>', marker='<STR_LIT:.>')<EOL>ax.yaxis.set_ticklabels([])<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Display data in two-dimensional canvas.\n\n        @param[in] ax (Axis): Canvas where data should be displayed.\n        @param[in] data (list): Data points that should be displayed.\n        @param[in] pair (tuple): Pair of dimension indexes.", "id": "f15588:c0:m3"}
{"signature": "def get_points(self):", "body": "if self.__points is None:<EOL><INDENT>self.__cache_covered_data()<EOL><DEDENT>return self.__points<EOL>", "docstring": "!\n        @brief Return points that covers by the BANG-block.\n\n        @return (list) List of point indexes that are covered by the block.", "id": "f15588:c4:m7"}
{"signature": "def __build_directory_levels(self):", "body": "previous_level_blocks = [ self.__root ]<EOL>for level in range(<NUM_LIT:1>, self.__levels):<EOL><INDENT>previous_level_blocks = self.__build_level(previous_level_blocks, level)<EOL>self.__store_level_blocks(previous_level_blocks)<EOL><DEDENT>self.__leafs = sorted(self.__leafs, key=lambda block: block.get_density())<EOL>", "docstring": "!\n        @brief Build levels of direction if amount of level is greater than one.", "id": "f15588:c2:m8"}
{"signature": "def get_cluster_encoding(self):", "body": "return type_encoding.CLUSTER_INDEX_LIST_SEPARATION<EOL>", "docstring": "!\n        @brief Returns clustering result representation type that indicate how clusters are encoded.\n\n        @return (type_encoding) Clustering result representation.\n\n        @see get_clusters()", "id": "f15588:c5:m6"}
{"signature": "def __find_block_neighbors(self, block, level_blocks, unhandled_block_indexes):", "body": "neighbors = []<EOL>handled_block_indexes = []<EOL>for unhandled_index in unhandled_block_indexes:<EOL><INDENT>if block.is_neighbor(level_blocks[unhandled_index]):<EOL><INDENT>handled_block_indexes.append(unhandled_index)<EOL>neighbors.append(level_blocks[unhandled_index])<EOL>if len(neighbors) == <NUM_LIT:8>:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>for handled_index in handled_block_indexes:<EOL><INDENT>unhandled_block_indexes.remove(handled_index)<EOL><DEDENT>return neighbors<EOL>", "docstring": "!\n        @brief Search block neighbors that are parts of new clusters (density is greater than threshold and that are\n                not cluster members yet), other neighbors are ignored.\n\n        @param[in] block (bang_block): BANG-block for which neighbors should be found (which can be part of cluster).\n        @param[in] level_blocks (list): BANG-blocks on specific level.\n        @param[in] unhandled_block_indexes (set): Blocks that have not been processed yet.\n\n        @return (list) Block neighbors that can become part of cluster.", "id": "f15588:c5:m12"}
{"signature": "def __validate_arguments(self):", "body": "if self.__levels <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.__levels)<EOL><DEDENT>if len(self.__data) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if self.__density_threshold < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.__density_threshold)<EOL><DEDENT>", "docstring": "!\n        @brief Check input arguments of BANG algorithm and if one of them is not correct then appropriate exception\n                is thrown.", "id": "f15588:c5:m7"}
{"signature": "def is_neighbor(self, block):", "body": "if block is not self:<EOL><INDENT>block_max_corner, _ = block.get_corners()<EOL>dimension = len(block_max_corner)<EOL>neighborhood_score = self.__calculate_neighborhood(block_max_corner)<EOL>if neighborhood_score == dimension:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "!\n        @brief Performs calculation to identify whether specified block is neighbor of current block.\n        @details It also considers diagonal blocks as neighbors.\n\n        @param[in] block (spatial_block): Another block that is check whether it is neighbor.\n\n        @return (bool) True is blocks are neighbors, False otherwise.", "id": "f15588:c3:m6"}
{"signature": "@staticmethod<EOL><INDENT>def show_blocks(directory):<DEDENT>", "body": "dimension = len(directory.get_data()[<NUM_LIT:0>])<EOL>amount_canvases = <NUM_LIT:1><EOL>if dimension > <NUM_LIT:1>:<EOL><INDENT>amount_canvases = int(dimension * (dimension - <NUM_LIT:1>) / <NUM_LIT:2>)<EOL><DEDENT>figure = plt.figure()<EOL>grid_spec = gridspec.GridSpec(<NUM_LIT:1>, amount_canvases)<EOL>pairs = list(itertools.combinations(range(dimension), <NUM_LIT:2>))<EOL>if len(pairs) == <NUM_LIT:0>: pairs = [(<NUM_LIT:0>, <NUM_LIT:0>)]<EOL>for index in range(amount_canvases):<EOL><INDENT>ax = figure.add_subplot(grid_spec[index])<EOL>bang_visualizer.__draw_blocks(ax, directory.get_leafs(), pairs[index])<EOL>bang_visualizer.__draw_two_dimension_data(ax, directory.get_data(), pairs[index])<EOL><DEDENT>plt.show()<EOL>", "docstring": "!\n        @brief Show BANG-blocks (leafs only) in data space.\n        @details BANG-blocks represents grid that was used for clustering process.\n\n        @param[in] directory (bang_directory): Directory that was created by BANG algorithm during clustering process.", "id": "f15588:c0:m0"}
{"signature": "def __allocate_clusters(self):", "body": "leaf_blocks = self.__directory.get_leafs()<EOL>unhandled_block_indexes = set([i for i in range(len(leaf_blocks)) if leaf_blocks[i].get_density() > self.__density_threshold])<EOL>current_block = self.__find_block_center(leaf_blocks, unhandled_block_indexes)<EOL>cluster_index = <NUM_LIT:0><EOL>while current_block is not None:<EOL><INDENT>if current_block.get_density() <= self.__density_threshold or len(current_block) <= self.__amount_threshold:<EOL><INDENT>break<EOL><DEDENT>self.__expand_cluster_block(current_block, cluster_index, leaf_blocks, unhandled_block_indexes)<EOL>current_block = self.__find_block_center(leaf_blocks, unhandled_block_indexes)<EOL>cluster_index += <NUM_LIT:1><EOL><DEDENT>self.__store_clustering_results(cluster_index, leaf_blocks)<EOL>", "docstring": "!\n        @brief Performs cluster allocation using leafs of tree in BANG directory (the smallest cells).", "id": "f15588:c5:m8"}
{"signature": "def __init__(self, directory, clusters):", "body": "self.__directory = directory<EOL>self.__clusters = clusters<EOL>self.__noise = []<EOL>self.__current_block = <NUM_LIT:0><EOL>self.__current_level = <NUM_LIT:0><EOL>self.__level_blocks = directory.get_level(<NUM_LIT:0>)<EOL>self.__figure = plt.figure()<EOL>self.__ax = self.__figure.add_subplot(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>)<EOL>self.__special_frame = <NUM_LIT:0><EOL>self.__validate_arguments()<EOL>", "docstring": "!\n        @brief Creates BANG animator instance.\n\n        @param[in] directory (bang_directory): BANG directory that was formed during BANG clustering process.\n        @param[in] clusters (list): Allocated clusters during BANG clustering process.", "id": "f15588:c1:m0"}
{"signature": "@staticmethod<EOL><INDENT>def show_clusters(data, clusters, noise=None):<DEDENT>", "body": "visualizer = cluster_visualizer()<EOL>visualizer.append_clusters(clusters, data)<EOL>visualizer.append_cluster(noise or [], data, marker='<STR_LIT:x>')<EOL>visualizer.show()<EOL>", "docstring": "!\n        @brief Display BANG clustering results.\n\n        @param[in] data (list): Dataset that was used for clustering.\n        @param[in] clusters (array_like): Clusters that were allocated by the algorithm.\n        @param[in] noise (array_like): Noise that were allocated by the algorithm.", "id": "f15588:c0:m2"}
{"signature": "def get_height(self):", "body": "return len(self.__level_blocks)<EOL>", "docstring": "!\n        @brief Returns height of BANG tree where blocks are stored.\n\n        @return (uint) Height of BANG tree.", "id": "f15588:c2:m5"}
{"signature": "def __increment_block(self):", "body": "self.__current_block += <NUM_LIT:1><EOL>if self.__current_block >= len(self.__level_blocks):<EOL><INDENT>self.__current_block = <NUM_LIT:0><EOL>self.__current_level += <NUM_LIT:1><EOL>if self.__current_level < self.__directory.get_height():<EOL><INDENT>self.__level_blocks = self.__directory.get_level(self.__current_level)<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Increment BANG block safely by updating block index, level and level block.", "id": "f15588:c1:m2"}
{"signature": "def get_region(self):", "body": "return self.__region_number<EOL>", "docstring": "!\n        @brief Returns region number of BANG-block.\n        @details Region number is unique on among region numbers on a directory level. Pair of region number and level\n                  is unique for all directory.\n\n        @return (uint) Region number.", "id": "f15588:c4:m3"}
{"signature": "def __calculate_neighborhood(self, block_max_corner):", "body": "dimension = len(block_max_corner)<EOL>length_edges = [self.__max_corner[i] - self.__min_corner[i] for i in range(dimension)]<EOL>neighborhood_score = <NUM_LIT:0><EOL>for i in range(dimension):<EOL><INDENT>diff = abs(block_max_corner[i] - self.__max_corner[i])<EOL>if diff <= length_edges[i] + length_edges[i] * <NUM_LIT>:<EOL><INDENT>neighborhood_score += <NUM_LIT:1><EOL><DEDENT><DEDENT>return neighborhood_score<EOL>", "docstring": "!\n        @brief Calculates neighborhood score that defined whether blocks are neighbors.\n\n        @param[in] block_max_corner (list): Maximum coordinates of other block.\n\n        @return (uint) Neighborhood score.", "id": "f15588:c3:m7"}
{"signature": "@staticmethod<EOL><INDENT>def show_dendrogram(dendrogram):<DEDENT>", "body": "plt.figure()<EOL>axis = plt.subplot(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>)<EOL>current_position = <NUM_LIT:0><EOL>for index_cluster in range(len(dendrogram)):<EOL><INDENT>densities = [ block.get_density() for block in dendrogram[index_cluster] ]<EOL>xrange = range(current_position, current_position + len(densities))<EOL>axis.bar(xrange, densities, <NUM_LIT:1.0>, linewidth=<NUM_LIT:0.0>, color=color_list.get_color(index_cluster))<EOL>current_position += len(densities)<EOL><DEDENT>axis.set_ylabel(\"<STR_LIT>\")<EOL>axis.set_xlabel(\"<STR_LIT>\")<EOL>axis.xaxis.set_ticklabels([])<EOL>plt.xlim([-<NUM_LIT:0.5>, current_position - <NUM_LIT:0.5>])<EOL>plt.show()<EOL>", "docstring": "!\n        @brief Display dendrogram of BANG-blocks.\n\n        @param[in] dendrogram (list): List representation of dendrogram of BANG-blocks.\n\n        @see bang.get_dendrogram()", "id": "f15588:c0:m1"}
{"signature": "def __update_cluster_dendrogram(self, index_cluster, blocks):", "body": "if len(self.__dendrogram) <= index_cluster:<EOL><INDENT>self.__dendrogram.append([])<EOL><DEDENT>blocks = sorted(blocks, key=lambda block: block.get_density(), reverse=True)<EOL>self.__dendrogram[index_cluster] += blocks<EOL>", "docstring": "!\n        @brief Append clustered blocks to dendrogram.\n\n        @param[in] index_cluster (uint): Cluster index that was assigned to blocks.\n        @param[in] blocks (list): Blocks that were clustered.", "id": "f15588:c5:m13"}
{"signature": "def is_neighbor(self, block):", "body": "return self.get_spatial_block().is_neighbor(block.get_spatial_block())<EOL>", "docstring": "!\n        @brief Performs calculation to check whether specified block is neighbor to the current.\n\n        @param[in] block (bang_block): Other BANG-block that should be checked for neighborhood.\n\n        @return (bool) True if blocks are neighbors, False if blocks are not neighbors.", "id": "f15588:c4:m9"}
{"signature": "def __draw_block(self, block, block_alpha=<NUM_LIT:0.0>):", "body": "max_corner, min_corner = block.get_spatial_block().get_corners()<EOL>face_color = matplotlib.colors.to_rgba('<STR_LIT>', alpha=block_alpha)<EOL>edge_color = matplotlib.colors.to_rgba('<STR_LIT>', alpha=<NUM_LIT:1.0>)<EOL>rect = patches.Rectangle(min_corner, max_corner[<NUM_LIT:0>] - min_corner[<NUM_LIT:0>], max_corner[<NUM_LIT:1>] - min_corner[<NUM_LIT:1>],<EOL>fill=True,<EOL>facecolor=face_color,<EOL>edgecolor=edge_color,<EOL>linewidth=<NUM_LIT:0.5>)<EOL>self.__ax.add_patch(rect)<EOL>", "docstring": "!\n        @brief Display single BANG block on axis.\n\n        @param[in] block (bang_block): BANG block that should be displayed.\n        @param[in] block_alpha (double): Transparency level - value of alpha.", "id": "f15588:c1:m3"}
{"signature": "def __store_clustering_results(self, amount_clusters, leaf_blocks):", "body": "self.__clusters = [[] for _ in range(amount_clusters)]<EOL>for block in leaf_blocks:<EOL><INDENT>index = block.get_cluster()<EOL>if index is not None:<EOL><INDENT>self.__clusters[index] += block.get_points()<EOL><DEDENT>else:<EOL><INDENT>self.__noise += block.get_points()<EOL><DEDENT><DEDENT>self.__clusters = [ list(set(cluster)) for cluster in self.__clusters ]<EOL>self.__noise = list(set(self.__noise))<EOL>", "docstring": "!\n        @brief Stores clustering results in a convenient way.\n\n        @param[in] amount_clusters (uint): Amount of cluster that was allocated during processing.\n        @param[in] leaf_blocks (list): Leaf BANG-blocks (the smallest cells).", "id": "f15588:c5:m10"}
{"signature": "def __draw_cluster(self, data, cluster, color, marker):", "body": "for item in cluster:<EOL><INDENT>self.__ax.plot(data[item][<NUM_LIT:0>], data[item][<NUM_LIT:1>], color=color, marker=marker)<EOL><DEDENT>", "docstring": "!\n        @brief Draw 2-D single cluster on axis using specified color and marker.", "id": "f15588:c1:m6"}
{"signature": "def get_data(self):", "body": "return self.__data<EOL>", "docstring": "!\n        @brief Return data that is stored in the directory.\n\n        @return (list) List of points that represents stored data.", "id": "f15588:c2:m2"}
{"signature": "def get_corners(self):", "body": "return self.__max_corner, self.__min_corner<EOL>", "docstring": "!\n        @brief Return spatial description of current block.\n\n        @return (tuple) Pair of maximum and minimum corners (max_corner, min_corner).", "id": "f15588:c3:m3"}
{"signature": "def get_density(self):", "body": "return self.__density<EOL>", "docstring": "!\n        @brief Returns density of the BANG-block.\n\n        @return (double) BANG-block density.", "id": "f15588:c4:m4"}
{"signature": "def __init__(self, data, levels, ccore=False, **kwargs):", "body": "self.__data = data<EOL>self.__levels = levels<EOL>self.__directory = None<EOL>self.__clusters = []<EOL>self.__noise = []<EOL>self.__cluster_blocks = []<EOL>self.__dendrogram = []<EOL>self.__density_threshold = kwargs.get('<STR_LIT>', <NUM_LIT:0.0>)<EOL>self.__amount_threshold = kwargs.get('<STR_LIT>', <NUM_LIT:0>)<EOL>self.__ccore = ccore<EOL>self.__validate_arguments()<EOL>", "docstring": "!\n        @brief Create BANG clustering algorithm.\n\n        @param[in] data (list): Input data (list of points) that should be clustered.\n        @param[in] levels (uint): Amount of levels in tree that is used for splitting (how many times block should be\n                    split). For example, if amount of levels is two then surface will be divided into two blocks and\n                    each obtained block will be divided into blocks also.\n        @param[in] ccore (bool): Reserved positional argument - not used yet.\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'observe').\n\n        <b>Keyword Args:</b><br>\n            - density_threshold (double): If block density is smaller than this value then contained data by this\n               block is considered as a noise and its points as outliers. Block density is defined by amount of\n               points in block divided by block volume: <i>amount_block_points</i>/<i>block_volume</i>. By default\n               it is 0.0 - means than only empty blocks are considered as noise. Be aware that this parameter is used\n               with parameter 'amount_threshold' - the maximum threshold is considered during processing.\n            - amount_threshold (uint): Amount of points in the block when it contained data in bang-block is\n               considered as a noise and there is no need to split it till the last level. Be aware that this parameter\n               is used with parameter 'density_threshold' - the maximum threshold is considered during processing.", "id": "f15588:c5:m0"}
{"signature": "def __verify_arguments(self):", "body": "if self.__kmax > len(self.__data):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(self.__kmax) + \"<STR_LIT>\" +<EOL>str(len(self.__data)) + \"<STR_LIT>\")<EOL><DEDENT>if self.__kmin <= <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" + str(self.__kmin) + \"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "!\n        @brief Checks algorithm's arguments and if some of them is incorrect then exception is thrown.", "id": "f15589:c2:m8"}
{"signature": "def __calculate_clusters(self, k):", "body": "initial_values = kmeans_plusplus_initializer(self.__data, k).initialize(return_index=self.__return_index)<EOL>algorithm_type = self.__algorithm.get_type()<EOL>return algorithm_type(self.__data, initial_values).process().get_clusters()<EOL>", "docstring": "!\n        @brief Performs cluster analysis using specified K value.\n\n        @param[in] k (uint): Amount of clusters that should be allocated.\n\n        @return (array_like) Allocated clusters.", "id": "f15589:c2:m7"}
{"signature": "def get_type(self):", "body": "if self == silhouette_ksearch_type.KMEANS:<EOL><INDENT>return kmeans<EOL><DEDENT>elif self == silhouette_ksearch_type.KMEDIANS:<EOL><INDENT>return kmedians<EOL><DEDENT>elif self == silhouette_ksearch_type.KMEDOIDS:<EOL><INDENT>return kmedoids<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "!\n        @brief Returns algorithm type that corresponds to specified enumeration value.\n\n        @return (type) Algorithm type for cluster analysis.", "id": "f15589:c1:m0"}
{"signature": "def get_scores(self):", "body": "return self.__scores<EOL>", "docstring": "!\n        @brief Returns silhouette score for each K value (amount of clusters).\n\n        @return (dict) Silhouette score for each K value, where key is a K value and value is a silhouette score.\n\n        @see process, get_score", "id": "f15589:c2:m6"}
{"signature": "def process(self):", "body": "if self.__ccore is True:<EOL><INDENT>self.__process_by_ccore()<EOL><DEDENT>else:<EOL><INDENT>self.__process_by_python()<EOL><DEDENT>return self<EOL>", "docstring": "!\n        @brief Performs analysis to find optimal amount of clusters.\n\n        @see get_amount, get_score, get_scores\n\n        @return (silhouette_search) Itself instance (silhouette_search)", "id": "f15589:c2:m1"}
{"signature": "def __process_by_ccore(self):", "body": "results = wrapper.silhoeutte_ksearch(self.__data, self.__kmin, self.__kmax, self.__algorithm)<EOL>self.__amount = results[<NUM_LIT:0>]<EOL>self.__score = results[<NUM_LIT:1>]<EOL>self.__scores = results[<NUM_LIT:2>]<EOL>", "docstring": "!\n        @brief Performs processing using CCORE (C/C++ part of pyclustering library).", "id": "f15589:c2:m2"}
{"signature": "def __caclulate_optimal_neighbor_cluster_score(self, index_cluster, difference):", "body": "optimal_score = float('<STR_LIT>')<EOL>for index_neighbor_cluster in range(len(self.__clusters)):<EOL><INDENT>if index_cluster != index_neighbor_cluster:<EOL><INDENT>candidate_score = self.__calculate_cluster_score(index_neighbor_cluster, difference)<EOL>if candidate_score < optimal_score:<EOL><INDENT>optimal_score = candidate_score<EOL><DEDENT><DEDENT><DEDENT>if optimal_score == float('<STR_LIT>'):<EOL><INDENT>optimal_score = -<NUM_LIT:1.0><EOL><DEDENT>return optimal_score<EOL>", "docstring": "!\n        @brief Calculates 'B' score for the specific object for the nearest cluster.\n\n        @param[in] index_point (uint): Index point from input data for which 'B' score should be calculated.\n        @param[in] index_cluster (uint): Index cluster to which the point is belong to.\n\n        @return (float) 'B' score for the object.", "id": "f15589:c0:m8"}
{"signature": "def __calculate_cluster_difference(self, index_cluster, difference):", "body": "cluster_difference = <NUM_LIT:0.0><EOL>for index_point in self.__clusters[index_cluster]:<EOL><INDENT>cluster_difference += difference[index_point]<EOL><DEDENT>return cluster_difference<EOL>", "docstring": "!\n        @brief Calculates distance from each object in specified cluster to specified object.\n\n        @param[in] index_point (uint): Index point for which difference is calculated.\n\n        @return (list) Distance from specified object to each object from input data in specified cluster.", "id": "f15589:c0:m9"}
{"signature": "def process(self):", "body": "if self.__ccore is True:<EOL><INDENT>self.__process_by_ccore()<EOL><DEDENT>else:<EOL><INDENT>self.__process_by_python()<EOL><DEDENT>return self<EOL>", "docstring": "!\n        @brief Calculates Silhouette score for each object from input data.\n\n        @return (silhouette) Instance of the method (self).", "id": "f15589:c0:m1"}
{"signature": "def __generate_point(self, index_cluster):", "body": "return [ random.gauss(self.__cluster_centers[index_cluster][index_dimension],<EOL>self.__cluster_width[index_cluster] / <NUM_LIT>)<EOL>for index_dimension in range(self.__dimension) ]<EOL>", "docstring": "!\n        @brief Generates point in line with parameters of specified cluster.\n\n        @param[in] index_cluster (uint): Index of cluster whose parameters are used for point generation.\n\n        @return (list) New generated point in line with normal distribution and cluster parameters.", "id": "f15590:c0:m2"}
{"signature": "def __init__(self, amount_clusters, dimension, cluster_sizes, cluster_centers=None, cluster_width=<NUM_LIT:1.0>):", "body": "self.__amount_clusters = amount_clusters<EOL>self.__dimension = dimension<EOL>self.__cluster_sizes = cluster_sizes<EOL>if not isinstance(self.__cluster_sizes, collections.Iterable):<EOL><INDENT>self.__cluster_sizes = [self.__cluster_sizes] * amount_clusters<EOL><DEDENT>self.__cluster_width = cluster_width<EOL>if not isinstance(self.__cluster_width, collections.Iterable):<EOL><INDENT>self.__cluster_width = [self.__cluster_width] * amount_clusters<EOL><DEDENT>self.__cluster_centers = cluster_centers<EOL>if self.__cluster_centers is None:<EOL><INDENT>self.__cluster_centers = self.__generate_cluster_centers(self.__cluster_width)<EOL><DEDENT>", "docstring": "!\n        @brief Constructs data generator for generating data-sets.\n\n        @param[in] amount_clusters (uint): Amount of clusters that should be generated.\n        @param[in] dimension (uint): Dimension of each generated point.\n        @param[in] cluster_sizes (uint|array_like): Size of each cluster. In case of 'array_like' input clusters with\n                    corresponding sizes are generated.\n        @param[in] cluster_centers (array_like): Optional parameter that defines cluster centers (means).\n        @param[in] cluster_width (uint|array_like): Optional parameter that defines cluster width (standard deviation).\n                    In case of 'array_like' input each cluster has own standard deviation.", "id": "f15590:c0:m0"}
{"signature": "def __update_medoids(self):", "body": "medoid_indexes = [-<NUM_LIT:1>] * len(self.__clusters)<EOL>for index in range(len(self.__clusters)):<EOL><INDENT>medoid_index = medoid(self.__pointer_data, self.__clusters[index], metric=self.__metric, data_type=self.__data_type)<EOL>medoid_indexes[index] = medoid_index<EOL><DEDENT>return medoid_indexes<EOL>", "docstring": "!\n        @brief Find medoids of clusters in line with contained objects.\n\n        @return (list) list of medoids for current number of clusters.", "id": "f15591:c0:m7"}
{"signature": "def get_medoids(self):", "body": "return self.__medoid_indexes<EOL>", "docstring": "!\n        @brief Returns list of medoids of allocated clusters represented by indexes from the input data.\n\n        @see process()\n        @see get_clusters()", "id": "f15591:c0:m3"}
{"signature": "def __init__(self, data, maximum_clusters, threshold, ccore=True, **kwargs):", "body": "super().__init__(data, maximum_clusters, threshold, ccore, **kwargs)<EOL>", "docstring": "!\n        @brief Creates MBSAS algorithm.\n\n        @param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.\n        @param[in] maximum_clusters: Maximum allowable number of clusters that can be allocated during processing.\n        @param[in] threshold: Threshold of dissimilarity (maximum distance) between points.\n        @param[in] ccore (bool): If True than DLL CCORE (C++ solution) will be used for solving.\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'metric').\n\n        <b>Keyword Args:</b><br>\n            - metric (distance_metric): Metric that is used for distance calculation between two points.", "id": "f15592:c0:m0"}
{"signature": "def __splitting_criterion(self, clusters, centers):", "body": "if self.__criterion == splitting_type.BAYESIAN_INFORMATION_CRITERION:<EOL><INDENT>return self.__bayesian_information_criterion(clusters, centers)<EOL><DEDENT>elif self.__criterion == splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH:<EOL><INDENT>return self.__minimum_noiseless_description_length(clusters, centers)<EOL><DEDENT>else:<EOL><INDENT>assert <NUM_LIT:0>;<EOL><DEDENT>", "docstring": "!\n        @brief Calculates splitting criterion for input clusters.\n\n        @param[in] clusters (list): Clusters for which splitting criterion should be calculated.\n        @param[in] centers (list): Centers of the clusters.\n\n        @return (double) Returns splitting criterion. High value of splitting cretion means that current structure is much better.\n\n        @see __bayesian_information_criterion(clusters, centers)\n        @see __minimum_noiseless_description_length(clusters, centers)", "id": "f15593:c1:m8"}
{"signature": "def get_cluster_encoding(self):", "body": "return type_encoding.CLUSTER_INDEX_LIST_SEPARATION<EOL>", "docstring": "!\n        @brief Returns clustering result representation type that indicate how clusters are encoded.\n\n        @return (type_encoding) Clustering result representation.\n\n        @see get_clusters()", "id": "f15593:c1:m4"}
{"signature": "def __improve_parameters(self, centers, available_indexes = None):", "body": "if available_indexes and len(available_indexes) == <NUM_LIT:1>:<EOL><INDENT>index_center = available_indexes[<NUM_LIT:0>]<EOL>return [ available_indexes ], self.__pointer_data[index_center]<EOL><DEDENT>local_data = self.__pointer_data<EOL>if available_indexes:<EOL><INDENT>local_data = [ self.__pointer_data[i] for i in available_indexes ]<EOL><DEDENT>local_centers = centers<EOL>if centers is None:<EOL><INDENT>local_centers = kmeans_plusplus_initializer(local_data, <NUM_LIT:2>, kmeans_plusplus_initializer.FARTHEST_CENTER_CANDIDATE).initialize()<EOL><DEDENT>kmeans_instance = kmeans(local_data, local_centers, tolerance=self.__tolerance, ccore=False)<EOL>kmeans_instance.process()<EOL>local_centers = kmeans_instance.get_centers()<EOL>clusters = kmeans_instance.get_clusters()<EOL>if available_indexes:<EOL><INDENT>clusters = self.__local_to_global_clusters(clusters, available_indexes)<EOL><DEDENT>return clusters, local_centers<EOL>", "docstring": "!\n        @brief Performs k-means clustering in the specified region.\n\n        @param[in] centers (list): Centers of clusters.\n        @param[in] available_indexes (list): Indexes that defines which points can be used for k-means clustering, if None - then all points are used.\n\n        @return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.", "id": "f15593:c1:m5"}
{"signature": "def __improve_structure(self, clusters, centers):", "body": "allocated_centers = []<EOL>amount_free_centers = self.__kmax - len(centers)<EOL>for index_cluster in range(len(clusters)):<EOL><INDENT>(parent_child_clusters, parent_child_centers) = self.__improve_parameters(None, clusters[index_cluster])<EOL>if len(parent_child_clusters) > <NUM_LIT:1>:<EOL><INDENT>parent_scores = self.__splitting_criterion([ clusters[index_cluster] ], [ centers[index_cluster] ])<EOL>child_scores = self.__splitting_criterion([ parent_child_clusters[<NUM_LIT:0>], parent_child_clusters[<NUM_LIT:1>] ], parent_child_centers)<EOL>split_require = False<EOL>if self.__criterion == splitting_type.BAYESIAN_INFORMATION_CRITERION:<EOL><INDENT>if parent_scores < child_scores: split_require = True<EOL><DEDENT>elif self.__criterion == splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH:<EOL><INDENT>if parent_scores > child_scores: split_require = True;<EOL><DEDENT>if (split_require is True) and (amount_free_centers > <NUM_LIT:0>):<EOL><INDENT>allocated_centers.append(parent_child_centers[<NUM_LIT:0>])<EOL>allocated_centers.append(parent_child_centers[<NUM_LIT:1>])<EOL>amount_free_centers -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>allocated_centers.append(centers[index_cluster])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>allocated_centers.append(centers[index_cluster])<EOL><DEDENT><DEDENT>return allocated_centers<EOL>", "docstring": "!\n        @brief Check for best structure: divides each cluster into two and checks for best results using splitting criterion.\n\n        @param[in] clusters (list): Clusters that have been allocated (each cluster contains indexes of points from data).\n        @param[in] centers (list): Centers of clusters.\n\n        @return (list) Allocated centers for clustering.", "id": "f15593:c1:m7"}
{"signature": "def get_clusters(self):", "body": "return self.__clusters<EOL>", "docstring": "!\n        @brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.\n\n        @return (list) List of allocated clusters.\n\n        @see process()\n        @see get_centers()", "id": "f15593:c1:m2"}
{"signature": "def __local_to_global_clusters(self, local_clusters, available_indexes):", "body": "clusters = []<EOL>for local_cluster in local_clusters:<EOL><INDENT>current_cluster = []<EOL>for index_point in local_cluster:<EOL><INDENT>current_cluster.append(available_indexes[index_point])<EOL><DEDENT>clusters.append(current_cluster)<EOL><DEDENT>return clusters<EOL>", "docstring": "!\n        @brief Converts clusters in local region define by 'available_indexes' to global clusters.\n\n        @param[in] local_clusters (list): Local clusters in specific region.\n        @param[in] available_indexes (list): Map between local and global point's indexes.\n\n        @return Global clusters.", "id": "f15593:c1:m6"}
{"signature": "def __calculate_dataset_difference(self, amount_clusters):", "body": "dataset_differences = numpy.zeros((amount_clusters, len(self.__pointer_data)))<EOL>for index_center in range(amount_clusters):<EOL><INDENT>if self.__metric.get_type() != type_metric.USER_DEFINED:<EOL><INDENT>dataset_differences[index_center] = self.__metric(self.__pointer_data, self.__centers[index_center])<EOL><DEDENT>else:<EOL><INDENT>dataset_differences[index_center] = [ self.__metric(point, self.__centers[index_center])<EOL>for point in self.__pointer_data ]<EOL><DEDENT><DEDENT>return dataset_differences<EOL>", "docstring": "!\n        @brief Calculate distance from each point to each cluster center.", "id": "f15594:c2:m11"}
{"signature": "def get_total_wce(self):", "body": "return self.__total_wce<EOL>", "docstring": "!\n        @brief Returns sum of metric errors that depends on metric that was used for clustering (by default SSE - Sum of Squared Errors).\n        @details Sum of metric errors is calculated using distance between point and its center:\n                 \\f[error=\\sum_{i=0}^{N}distance(x_{i}-center(x_{i}))\\f]\n\n        @see process()\n        @see get_clusters()", "id": "f15594:c2:m6"}
{"signature": "def __init__(self, data, initial_centers, tolerance=<NUM_LIT>, ccore=True, **kwargs):", "body": "self.__pointer_data = numpy.array(data)<EOL>self.__clusters = []<EOL>self.__centers = numpy.array(initial_centers)<EOL>self.__tolerance = tolerance<EOL>self.__total_wce = <NUM_LIT:0><EOL>self.__observer = kwargs.get('<STR_LIT>', None)<EOL>self.__metric = kwargs.get('<STR_LIT>', distance_metric(type_metric.EUCLIDEAN_SQUARE))<EOL>self.__itermax = kwargs.get('<STR_LIT>', <NUM_LIT:100>)<EOL>if self.__metric.get_type() != type_metric.USER_DEFINED:<EOL><INDENT>self.__metric.enable_numpy_usage()<EOL><DEDENT>else:<EOL><INDENT>self.__metric.disable_numpy_usage()<EOL><DEDENT>self.__ccore = ccore and self.__metric.get_type() != type_metric.USER_DEFINED<EOL>if self.__ccore is True:<EOL><INDENT>self.__ccore = ccore_library.workable()<EOL><DEDENT>", "docstring": "!\n        @brief Constructor of clustering algorithm K-Means.\n        @details Center initializer can be used for creating initial centers, for example, K-Means++ method.\n\n        @param[in] data (array_like): Input data that is presented as array of points (objects), each point should be represented by array_like data structure.\n        @param[in] initial_centers (array_like): Initial coordinates of centers of clusters that are represented by array_like data structure: [center1, center2, ...].\n        @param[in] tolerance (double): Stop condition: if maximum value of change of centers of clusters is less than tolerance then algorithm stops processing.\n        @param[in] ccore (bool): Defines should be CCORE library (C++ pyclustering library) used instead of Python code or not.\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'observer', 'metric', 'itermax').\n\n        <b>Keyword Args:</b><br>\n            - observer (kmeans_observer): Observer of the algorithm to collect information about clustering process on each iteration.\n            - metric (distance_metric): Metric that is used for distance calculation between two points (by default euclidean square distance).\n            - itermax (uint): Maximum number of iterations that is used for clustering process (by default: 200).\n\n        @see center_initializer", "id": "f15594:c2:m0"}
{"signature": "def __process_by_python(self):", "body": "maximum_change = float('<STR_LIT>')<EOL>iteration = <NUM_LIT:0><EOL>if self.__observer is not None:<EOL><INDENT>initial_clusters = self.__update_clusters()<EOL>self.__observer.notify(initial_clusters, self.__centers.tolist())<EOL><DEDENT>while maximum_change > self.__tolerance and iteration < self.__itermax:<EOL><INDENT>self.__clusters = self.__update_clusters()<EOL>updated_centers = self.__update_centers()  <EOL>if self.__observer is not None:<EOL><INDENT>self.__observer.notify(self.__clusters, updated_centers.tolist())<EOL><DEDENT>maximum_change = self.__calculate_changes(updated_centers)<EOL>self.__centers = updated_centers    <EOL>iteration += <NUM_LIT:1><EOL><DEDENT>self.__calculate_total_wce()<EOL>", "docstring": "!\n        @brief Performs cluster analysis using python code.", "id": "f15594:c2:m3"}
{"signature": "def __process_by_ccore(self):", "body": "ccore_metric = metric_wrapper.create_instance(self.__metric)<EOL>results = wrapper.kmeans(self.__pointer_data, self.__centers, self.__tolerance, self.__itermax, (self.__observer is not None), ccore_metric.get_pointer())<EOL>self.__clusters = results[<NUM_LIT:0>]<EOL>self.__centers = results[<NUM_LIT:1>]<EOL>if self.__observer is not None:<EOL><INDENT>self.__observer.set_evolution_clusters(results[<NUM_LIT:2>])<EOL>self.__observer.set_evolution_centers(results[<NUM_LIT:3>])<EOL><DEDENT>self.__total_wce = results[<NUM_LIT:4>][<NUM_LIT:0>]<EOL>", "docstring": "!\n        @brief Performs cluster analysis using CCORE (C/C++ part of pyclustering library).", "id": "f15594:c2:m2"}
{"signature": "def set_evolution_clusters(self, evolution_clusters):", "body": "self.__evolution_clusters = evolution_clusters<EOL>", "docstring": "!\n        @brief Set evolution of changes of centers during clustering process.\n\n        @param[in] evolution_clusters (array_like): Evolution of changes of clusters during clustering process.", "id": "f15594:c0:m5"}
{"signature": "def get_centers(self):", "body": "if isinstance(self.__centers, list):<EOL><INDENT>return self.__centers<EOL><DEDENT>return self.__centers.tolist()<EOL>", "docstring": "!\n        @brief Returns list of centers of allocated clusters.\n\n        @see process()\n        @see get_clusters()", "id": "f15594:c2:m5"}
{"signature": "def notify(self, clusters, centers):", "body": "self.__evolution_clusters.append(clusters)<EOL>self.__evolution_centers.append(centers)<EOL>", "docstring": "!\n        @brief This method is called by K-Means algorithm to notify about changes.\n\n        @param[in] clusters (array_like): Allocated clusters by K-Means algorithm.\n        @param[in] centers (array_like): Allocated centers by K-Means algorithm.", "id": "f15594:c0:m2"}
{"signature": "def get_clusters(self, iteration):", "body": "return self.__evolution_clusters[iteration]<EOL>", "docstring": "!\n        @brief Get method to return allocated clusters at specific iteration of clustering process.\n\n        @param[in] iteration (uint): Clustering process iteration at which clusters are required.\n\n        @return (array_like) Clusters at specific iteration.", "id": "f15594:c0:m6"}
{"signature": "def process(self):", "body": "if len(self.__pointer_data[<NUM_LIT:0>]) != len(self.__centers[<NUM_LIT:0>]):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if self.__ccore is True:<EOL><INDENT>self.__process_by_ccore()<EOL><DEDENT>else:<EOL><INDENT>self.__process_by_python()<EOL><DEDENT>return self<EOL>", "docstring": "!\n        @brief Performs cluster analysis in line with rules of K-Means algorithm.\n\n        @return (kmeans) Returns itself (K-Means instance).\n\n        @remark Results of clustering can be obtained using corresponding get methods.\n\n        @see get_clusters()\n        @see get_centers()", "id": "f15594:c2:m1"}
{"signature": "def get_iterations(self):", "body": "return len(self.__means_evolution)<EOL>", "docstring": "!\n        @return (uint) Amount of iterations that were done by the EM algorithm.", "id": "f15595:c2:m2"}
{"signature": "def initialize(self, init_type = ema_init_type.KMEANS_INITIALIZATION):", "body": "if init_type == ema_init_type.KMEANS_INITIALIZATION:<EOL><INDENT>return self.__initialize_kmeans()<EOL><DEDENT>elif init_type == ema_init_type.RANDOM_INITIALIZATION:<EOL><INDENT>return self.__initialize_random()<EOL><DEDENT>raise NameError(\"<STR_LIT>\")<EOL>", "docstring": "!\n        @brief Calculates initial parameters for EM algorithm: means and covariances using\n                specified strategy.\n\n        @param[in] init_type (ema_init_type): Strategy for initialization.\n\n        @return (float|list, float|numpy.array) Initial means and variance (covariance matrix in case multi-dimensional data).", "id": "f15595:c1:m1"}
{"signature": "def get_clusters(self):", "body": "return self.__clusters<EOL>", "docstring": "!\n        @return (list) Allocated clusters where each cluster is represented by list of indexes of points from dataset,\n                        for example, two cluster may have following representation [[0, 1, 4], [2, 3, 5, 6]].", "id": "f15595:c4:m2"}
{"signature": "def get_evolution_covariances(self):", "body": "return self.__covariances_evolution<EOL>", "docstring": "!\n        @return (list) Covariance matrix (or variance in case of one-dimensional data) of each cluster on each step of clustering.", "id": "f15595:c2:m4"}
{"signature": "def __len__(self):", "body": "return len(self.__means_evolution)<EOL>", "docstring": "!\n        @return (uint) Amount of iterations that were done by the EM algorithm.", "id": "f15595:c2:m1"}
{"signature": "def process(self):", "body": "previous_likelihood = -<NUM_LIT><EOL>current_likelihood = -<NUM_LIT><EOL>current_iteration = <NUM_LIT:0><EOL>while(self.__stop is False) and (abs(previous_likelihood - current_likelihood) > self.__tolerance) and (current_iteration < self.__iterations):<EOL><INDENT>self.__expectation_step()<EOL>self.__maximization_step()<EOL>current_iteration += <NUM_LIT:1><EOL>self.__extract_clusters()<EOL>self.__notify()<EOL>previous_likelihood = current_likelihood<EOL>current_likelihood = self.__log_likelihood()<EOL>self.__stop = self.__get_stop_condition()<EOL><DEDENT>self.__normalize_probabilities()<EOL>", "docstring": "!\n        @brief Run clustering process of the algorithm.\n        @details This method should be called before call 'get_clusters()'.", "id": "f15595:c4:m1"}
{"signature": "def get_covariances(self):", "body": "return self.__variances<EOL>", "docstring": "!\n        @return (list) Corresponding variances (or covariances in case of multi-dimensional data) of clusters.", "id": "f15595:c4:m4"}
{"signature": "def get_evolution_clusters(self):", "body": "return self.__clusters_evolution<EOL>", "docstring": "!\n        @return (list) Allocated clusters on each step of clustering.", "id": "f15595:c2:m5"}
{"signature": "@staticmethod<EOL><INDENT>def _select(chromosomes, data, count_clusters, select_coeff):<DEDENT>", "body": "<EOL>centres = ga_math.get_centres(chromosomes, data, count_clusters)<EOL>fitness = genetic_algorithm._calc_fitness_function(centres, data, chromosomes)<EOL>for _idx in range(len(fitness)):<EOL><INDENT>fitness[_idx] = math.exp(<NUM_LIT:1> + fitness[_idx] * select_coeff)<EOL><DEDENT>probabilities = ga_math.calc_probability_vector(fitness)<EOL>new_chromosomes = np.zeros(chromosomes.shape, dtype=np.int)<EOL>for _idx in range(len(chromosomes)):<EOL><INDENT>new_chromosomes[_idx] = chromosomes[ga_math.get_uniform(probabilities)]<EOL><DEDENT>return new_chromosomes<EOL>", "docstring": "!\n        @brief Performs selection procedure where new chromosomes are calculated.\n\n        @param[in] chromosomes (numpy.array): Chromosomes", "id": "f15596:c2:m4"}
{"signature": "@staticmethod<EOL><INDENT>def _init_population(count_clusters, count_data, chromosome_count):<DEDENT>", "body": "population = np.random.randint(count_clusters, size=(chromosome_count, count_data))<EOL>return population<EOL>", "docstring": "!\n        @brief Returns first population as a uniform random choice.\n\n        @param[in] count_clusters (uint): Amount of clusters that should be allocated.\n        @param[in] count_data (uint): Data size that is used for clustering process.\n        @param[in] chromosome_count (uint):Amount of chromosome that is used for clustering.", "id": "f15596:c2:m9"}
{"signature": "@staticmethod<EOL><INDENT>def _calc_fitness_function(centres, data, chromosomes):<DEDENT>", "body": "<EOL>count_chromosome = len(chromosomes)<EOL>fitness_function = np.zeros(count_chromosome)<EOL>for _idx_chromosome in range(count_chromosome):<EOL><INDENT>centres_data = np.zeros(data.shape)<EOL>for _idx in range(len(data)):<EOL><INDENT>centres_data[_idx] = centres[_idx_chromosome][chromosomes[_idx_chromosome][_idx]]<EOL><DEDENT>fitness_function[_idx_chromosome] += np.sum(abs(data - centres_data))<EOL><DEDENT>return fitness_function<EOL>", "docstring": "!\n        @brief Calculate fitness function values for chromosomes.\n\n        @param[in] centres (list): Cluster centers.\n        @param[in] data (list): Input data that is used for clustering process.\n        @param[in] chromosomes (list): Chromosomes whose fitness function's values are calculated.\n\n        @return (list) Fitness function value for each chromosome correspondingly.", "id": "f15596:c2:m11"}
{"signature": "def get_observer(self):", "body": "return self._observer<EOL>", "docstring": "!\n        @brief Returns genetic algorithm observer.", "id": "f15596:c2:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _crossover_a_pair(chromosome_1, chromosome_2, mask):<DEDENT>", "body": "for _idx in range(len(chromosome_1)):<EOL><INDENT>if mask[_idx] == <NUM_LIT:1>:<EOL><INDENT>chromosome_1[_idx], chromosome_2[_idx] = chromosome_2[_idx], chromosome_1[_idx]<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Crossovers a pair of chromosomes.\n\n        @param[in] chromosome_1 (numpy.array): The first chromosome for crossover.\n        @param[in] chromosome_2 (numpy.array): The second chromosome for crossover.\n        @param[in] mask (numpy.array): Crossover mask that defines which genes should be swapped.", "id": "f15596:c2:m7"}
{"signature": "def get_mean_fitness_function(self):", "body": "return self._mean_ff_result<EOL>", "docstring": "!\n        @brief (list) Returns fitness function's values on each iteration.", "id": "f15596:c0:m7"}
{"signature": "def __init__(self, need_global_best=False, need_population_best=False, need_mean_ff=False):", "body": "<EOL>self._global_best_result = {'<STR_LIT>': [], '<STR_LIT>': []}<EOL>self._best_population_result = {'<STR_LIT>': [], '<STR_LIT>': []}<EOL>self._mean_ff_result = []<EOL>self._need_global_best = need_global_best<EOL>self._need_population_best = need_population_best<EOL>self._need_mean_ff = need_mean_ff<EOL>", "docstring": "!\n        @brief Constructs genetic algorithm observer to collect specific information.\n\n        @param[in] need_global_best (bool): If 'True' then the best chromosomes and its fitness function value (global optimum) for each iteration are stored.\n        @param[in] need_population_best (bool): If 'True' then current (on each iteration) best chromosomes and its fitness function value (local optimum) are stored.\n        @param[in] need_mean_ff (bool): If 'True' then average value of fitness function on each iteration is stored.", "id": "f15596:c0:m0"}
{"signature": "def som_get_winner_number(som_pointer):", "body": "ccore = ccore_library.get()<EOL>ccore.som_get_winner_number.restype = c_size_t<EOL>return ccore.som_get_winner_number(som_pointer)<EOL>", "docstring": "!\n    @brief Returns of number of winner at the last step of learning process.\n\n    @param[in] som_pointer (c_pointer): pointer to object of self-organized map.", "id": "f15597:m5"}
{"signature": "def som_load(som_pointer, weights, award, capture_objects):", "body": "if len(weights) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>ccore = ccore_library.get()<EOL>package_weights = package_builder(weights, c_double).create()<EOL>package_award = package_builder(award, c_size_t).create()<EOL>package_capture_objects = package_builder(capture_objects, c_size_t).create()<EOL>ccore.som_load(som_pointer, package_weights, package_award, package_capture_objects)<EOL>", "docstring": "!\n    @brief Load dump of the network to SOM.\n    @details Initialize SOM using existed weights, amount of captured objects by each neuron, captured\n              objects by each neuron. Initialization is not performed if weights are empty.\n\n    @param[in] som_pointer (POINTER): pointer to object of self-organized map.\n    @param[in] weights (list): weights that should assigned to neurons.\n    @param[in] awards (list): amount of captured objects by each neuron.\n    @param[in] capture_objects (list): captured objects by each neuron.", "id": "f15597:m1"}
{"signature": "def som_train(som_pointer, data, epochs, autostop):", "body": "pointer_data = package_builder(data, c_double).create()<EOL>ccore = ccore_library.get()<EOL>ccore.som_train.restype = c_size_t<EOL>return ccore.som_train(som_pointer, pointer_data, c_uint(epochs), autostop)<EOL>", "docstring": "!\n    @brief Trains self-organized feature map (SOM) using CCORE pyclustering library.\n\n    @param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.\n    @param[in] epochs (uint): Number of epochs for training.        \n    @param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred.\n\n    @return (uint) Number of learining iterations.", "id": "f15597:m3"}
{"signature": "def extract(self):", "body": "return self.__extract_data(self.__package_pointer)<EOL>", "docstring": "!\n        @brief Performs unpacking procedure of the pyclustering package to the data.\n\n        @return (list) Extracted data from the pyclustering package.", "id": "f15609:c3:m1"}
{"signature": "@staticmethod<EOL><INDENT>def get_ctype(pyclustering_package_type):<DEDENT>", "body": "return pyclustering_type_data.__PYCLUSTERING_CTYPE_MAP[pyclustering_package_type]<EOL>", "docstring": "!\n        @return (ctype) Return ctype that corresponds to pyclustering type data.", "id": "f15609:c1:m0"}
{"signature": "def create(self):", "body": "return self.__create_package(self.__dataset)<EOL>", "docstring": "!\n        @brief Performs packing procedure of the data to the package.\n\n        @return (pointer) ctype-pointer to pyclustering package.", "id": "f15609:c2:m1"}
{"signature": "def rock(sample, eps, number_clusters, threshold):", "body": "pointer_data = package_builder(sample, c_double).create();<EOL>ccore = ccore_library.get();<EOL>ccore.rock_algorithm.restype = POINTER(pyclustering_package);<EOL>package = ccore.rock_algorithm(pointer_data, c_double(eps), c_size_t(number_clusters), c_double(threshold));<EOL>list_of_clusters = package_extractor(package).extract();<EOL>ccore.free_pyclustering_package(package);<EOL>return list_of_clusters<EOL>", "docstring": "@brief Clustering algorithm ROCK returns allocated clusters and noise that are consisted from input data. \n@details Calculation is performed via CCORE (C/C++ part of the pyclustering).\"\n\n@param[in] sample: input data - list of points where each point is represented by list of coordinates.\n@param[in] eps: connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius.\n@param[in] number_clusters: defines number of clusters that should be allocated from the input data set.\n@param[in] threshold: value that defines degree of normalization that influences on choice of clusters for merging during processing.\n\n@return List of allocated clusters, each cluster contains indexes of objects in list of data.", "id": "f15628:m0"}
{"signature": "def __init__(self, canvas, x_title=None, y_title=None, x_lim=None, y_lim=None, x_labels=True, y_labels=True):", "body": "self.__size = canvas;<EOL>self.__canvases = [ canvas_descr(x_title, y_title, x_lim, y_lim, x_labels, y_labels) for _ in range(canvas) ];<EOL>self.__dynamic_storage = []<EOL>", "docstring": "!\n        @brief Construct dynamic visualizer.\n        @details Default properties that are generalized in the constructor, for example, X axis title, can be\n                  changed by corresponding method: 'set_canvas_properties'.\n\n        @param[in] canvas (uint): Amount of canvases that is used for visualization.\n        @param[in] x_title (string): Title for X axis of canvases, if 'None', then nothing is displayed.\n        @param[in] y_title (string): Title for Y axis of canvases, if 'None', then nothing is displayed.\n        @param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then\n                    borders are calculated automatically.\n        @param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated\n                    automatically.\n        @param[in] x_labels (bool): If True then labels of X axis are displayed.\n        @param[in] y_labels (bool): If True then labels of Y axis are displayed.", "id": "f15629:c2:m0"}
{"signature": "def __init__(self, x_title=None, y_title=None, x_lim=None, y_lim=None, x_labels=True, y_labels=True):", "body": "<EOL>self.x_title  = x_title;<EOL>self.y_title  = y_title;<EOL>self.x_lim    = x_lim;<EOL>self.y_lim    = y_lim;<EOL>self.x_labels = x_labels;<EOL>self.y_labels = y_labels<EOL>", "docstring": "!\n        @brief Constructor of canvas.\n\n        @param[in] x_title (string): Title for X axis, if 'None', then nothing is displayed.\n        @param[in] y_title (string): Title for Y axis, if 'None', then nothing is displayed.\n        @param[in] x_lim (list): Defines borders of X axis like [from, to], for example [0, 3.14], if 'None' then\n                    borders are calculated automatically.\n        @param[in] y_lim (list): Defines borders of Y axis like [from, to], if 'None' then borders are calculated\n                    automatically.\n        @param[in] x_labels (bool): If True then labels of X axis are displayed.\n        @param[in] y_labels (bool): If True then labels of Y axis are displayed.", "id": "f15629:c0:m0"}
{"signature": "def show(self, axis=None, display=True):", "body": "if (not axis):<EOL><INDENT>(_, axis) = plt.subplots(self.__size, <NUM_LIT:1>);<EOL><DEDENT>self.__format_canvases(axis);<EOL>for dynamic in self.__dynamic_storage:<EOL><INDENT>self.__display_dynamic(axis, dynamic);<EOL><DEDENT>if (display):<EOL><INDENT>plt.show();<EOL><DEDENT>", "docstring": "!\n        @brief Draw and show output dynamics.\n\n        @param[in] axis (axis): If is not 'None' then user specified axis is used to display output dynamic.\n        @param[in] display (bool): Whether output dynamic should be displayed or not, if not, then user\n                    should call 'plt.show()' by himself.", "id": "f15629:c2:m4"}
{"signature": "def append_dynamics(self, t, dynamics, canvas=<NUM_LIT:0>, separate=False, color='<STR_LIT>'):", "body": "description = dynamic_descr(canvas, t, dynamics, separate, color);<EOL>self.__dynamic_storage.append(description);<EOL>self.__update_canvas_xlim(description.time, description.separate)<EOL>", "docstring": "!\n        @brief Append several dynamics to canvas or canvases (defined by 'canvas' and 'separate' arguments).\n\n        @param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis.\n        @param[in] dynamics (list): Dynamics where each of them is considered on Y axis.\n        @param[in] canvas (uint): Index of canvas where dynamic should be displayed, in case of 'separate'\n                    representation this argument is considered as a first canvas from that displaying should be done.\n        @param[in] separate (bool|list): If 'True' then each dynamic is displayed on separate canvas, if it is defined\n                    by list, for example, [ [1, 2], [3, 4] ], then the first and the second dynamics are displayed on\n                    the canvas with index 'canvas' and the third and forth are displayed on the next 'canvas + 1'\n                    canvas.\n        @param[in] color (string): Color that is used to display output dynamic(s).", "id": "f15629:c2:m3"}
{"signature": "def append_dynamic(self, t, dynamic, canvas=<NUM_LIT:0>, color='<STR_LIT>'):", "body": "description = dynamic_descr(canvas, t, dynamic, False, color);<EOL>self.__dynamic_storage.append(description);<EOL>self.__update_canvas_xlim(description.time, description.separate)<EOL>", "docstring": "!\n        @brief Append single dynamic to specified canvas (by default to the first with index '0').\n\n        @param[in] t (list): Time points that corresponds to dynamic values and considered on a X axis.\n        @param[in] dynamic (list): Value points of dynamic that are considered on an Y axis.\n        @param[in] canvas (uint): Canvas where dynamic should be displayed.\n        @param[in] color (string): Color that is used for drawing dynamic on the canvas.", "id": "f15629:c2:m2"}
{"signature": "@staticmethod<EOL><INDENT>def show_phase_matrix(sync_output_dynamic, grid_width = None, grid_height = None, iteration = None):<DEDENT>", "body": "_ = plt.figure();<EOL>phase_matrix = sync_output_dynamic.allocate_phase_matrix(grid_width, grid_height, iteration);<EOL>plt.imshow(phase_matrix, cmap = plt.get_cmap('<STR_LIT>'), interpolation='<STR_LIT>', vmin = <NUM_LIT:0.0>, vmax = <NUM_LIT> * math.pi); <EOL>plt.show()<EOL>", "docstring": "!\n        @brief Shows 2D matrix of phase values of oscillators at the specified iteration.\n        @details User should ensure correct matrix sizes in line with following expression grid_width x grid_height that should be equal to \n                  amount of oscillators otherwise exception is thrown. If grid_width or grid_height are not specified than phase matrix size \n                  will by calculated automatically by square root.\n\n        @param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network whose phase matrix should be shown.\n        @param[in] grid_width (uint): Width of the phase matrix.\n        @param[in] grid_height (uint): Height of the phase matrix.\n        @param[in] iteration (uint): Number of iteration of simulation for which correlation matrix should be allocated.\n                    If iternation number is not specified, the last step of simulation is used for the matrix allocation.", "id": "f15630:c2:m3"}
{"signature": "@staticmethod<EOL><INDENT>def animate_correlation_matrix(sync_output_dynamic, animation_velocity = <NUM_LIT>, colormap = '<STR_LIT>', save_movie = None):<DEDENT>", "body": "figure = plt.figure()<EOL>correlation_matrix = sync_output_dynamic.allocate_correlation_matrix(<NUM_LIT:0>)<EOL>artist = plt.imshow(correlation_matrix, cmap = plt.get_cmap(colormap), interpolation='<STR_LIT>', vmin = <NUM_LIT:0.0>, vmax = <NUM_LIT:1.0>)<EOL>def init_frame(): <EOL><INDENT>return [ artist ]<EOL><DEDENT>def frame_generation(index_dynamic):<EOL><INDENT>correlation_matrix = sync_output_dynamic.allocate_correlation_matrix(index_dynamic)<EOL>artist.set_data(correlation_matrix)<EOL>return [ artist ]<EOL><DEDENT>correlation_animation = animation.FuncAnimation(figure, frame_generation, len(sync_output_dynamic), init_func = init_frame, interval = animation_velocity , repeat_delay = <NUM_LIT:1000>, blit = True)<EOL>if (save_movie is not None):<EOL><INDENT>correlation_animation.save(save_movie, writer = '<STR_LIT>', fps = <NUM_LIT:15>, bitrate = <NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>plt.show()<EOL><DEDENT>", "docstring": "!\n        @brief Shows animation of correlation matrix between oscillators during simulation.\n\n        @param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network.\n        @param[in] animation_velocity (uint): Interval between frames in milliseconds.\n        @param[in] colormap (string): Name of colormap that is used by matplotlib ('gray', 'pink', 'cool', spring', etc.).\n        @param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.", "id": "f15630:c2:m7"}
{"signature": "def _phase_kuramoto(self, teta, t, argv):", "body": "index = argv;<EOL>phase = <NUM_LIT:0>;<EOL>for k in range(<NUM_LIT:0>, self._num_osc):<EOL><INDENT>if (self.has_connection(index, k) == True):<EOL><INDENT>phase += math.sin(self._phases[k] - teta);<EOL><DEDENT><DEDENT>return ( self._freq[index] + (phase * self._weight / self._num_osc) )<EOL>", "docstring": "!\n        @brief Returns result of phase calculation for specified oscillator in the network.\n\n        @param[in] teta (double): Phase of the oscillator that is differentiated.\n        @param[in] t (double): Current time of simulation.\n        @param[in] argv (tuple): Index of the oscillator in the list.\n\n        @return (double) New phase for specified oscillator (don't assign here).", "id": "f15630:c3:m4"}
{"signature": "@staticmethod<EOL><INDENT>def calculate_sync_order(oscillator_phases):<DEDENT>", "body": "exp_amount = <NUM_LIT:0.0>;<EOL>average_phase = <NUM_LIT:0.0>;<EOL>for phase in oscillator_phases:<EOL><INDENT>exp_amount += math.expm1( abs(<NUM_LIT> * phase) );<EOL>average_phase += phase;<EOL><DEDENT>exp_amount /= len(oscillator_phases);<EOL>average_phase = math.expm1( abs(<NUM_LIT> * (average_phase / len(oscillator_phases))) );<EOL>return abs(average_phase) / abs(exp_amount)<EOL>", "docstring": "!\n        @brief Calculates level of global synchronization (order parameter) for input phases.\n        @details This parameter is tend 1.0 when the oscillatory network close to global synchronization and it tend to 0.0 when \n                  desynchronization is observed in the network.\n\n        @param[in] oscillator_phases (list): List of oscillator phases that are used for level of global synchronization.\n\n        @return (double) Level of global synchronization (order parameter).\n\n        @see calculate_order_parameter()", "id": "f15630:c0:m0"}
{"signature": "def simulate(self, steps, time, solution = solve_type.FAST, collect_dynamic = True):", "body": "return self.simulate_static(steps, time, solution, collect_dynamic)<EOL>", "docstring": "!\n        @brief Performs static simulation of Sync oscillatory network.\n\n        @param[in] steps (uint): Number steps of simulations during simulation.\n        @param[in] time (double): Time of simulation.\n        @param[in] solution (solve_type): Type of solution (solving).\n        @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\n\n        @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,\n                otherwise returns only last values (last step of simulation) of dynamic.\n\n        @see simulate_dynamic()\n        @see simulate_static()", "id": "f15630:c3:m5"}
{"signature": "def __init__(self, phase, time, ccore = None):", "body": "self._dynamic = phase;<EOL>self._time = time;<EOL>self._ccore_sync_dynamic_pointer = ccore<EOL>", "docstring": "!\n        @brief Constructor of Sync dynamic.\n\n        @param[in] phase (list): Dynamic of oscillators on each step of simulation. If ccore pointer is specified than it can be ignored.\n        @param[in] time (list): Simulation time.\n        @param[in] ccore (ctypes.pointer): Pointer to CCORE sync_dynamic instance in memory.", "id": "f15630:c1:m2"}
{"signature": "def simulate_static(self, steps, time, solution = solve_type.FAST, collect_dynamic = False):", "body": "if (self._ccore_network_pointer is not None):<EOL><INDENT>ccore_instance_dynamic = wrapper.sync_simulate_static(self._ccore_network_pointer, steps, time, solution, collect_dynamic);<EOL>return sync_dynamic(None, None, ccore_instance_dynamic);<EOL><DEDENT>dyn_phase = [];<EOL>dyn_time = [];<EOL>if (collect_dynamic == True):<EOL><INDENT>dyn_phase.append(self._phases);<EOL>dyn_time.append(<NUM_LIT:0>);<EOL><DEDENT>step = time / steps;<EOL>int_step = step / <NUM_LIT>;<EOL>for t in numpy.arange(step, time + step, step):<EOL><INDENT>self._phases = self._calculate_phases(solution, t, step, int_step);<EOL>if (collect_dynamic == True):<EOL><INDENT>dyn_phase.append(self._phases);<EOL>dyn_time.append(t);<EOL><DEDENT><DEDENT>if (collect_dynamic != True):<EOL><INDENT>dyn_phase.append(self._phases);<EOL>dyn_time.append(time);<EOL><DEDENT>output_sync_dynamic = sync_dynamic(dyn_phase, dyn_time);<EOL>return output_sync_dynamic<EOL>", "docstring": "!\n        @brief Performs static simulation of oscillatory network.\n\n        @param[in] steps (uint): Number steps of simulations during simulation.\n        @param[in] time (double): Time of simulation.\n        @param[in] solution (solve_type): Type of solution.\n        @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\n\n        @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,\n                otherwise returns only last values (last step of simulation) of dynamic.\n\n        @see simulate()\n        @see simulate_dynamic()", "id": "f15630:c3:m7"}
{"signature": "@staticmethod<EOL><INDENT>def show_correlation_matrix(sync_output_dynamic, iteration = None):<DEDENT>", "body": "_ = plt.figure();<EOL>correlation_matrix = sync_output_dynamic.allocate_correlation_matrix(iteration);<EOL>plt.imshow(correlation_matrix, cmap = plt.get_cmap('<STR_LIT>'), interpolation='<STR_LIT>', vmin = <NUM_LIT:0.0>, vmax = <NUM_LIT:1.0>); <EOL>plt.show()<EOL>", "docstring": "!\n        @brief Shows correlation matrix between oscillators at the specified iteration.\n\n        @param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network.\n        @param[in] iteration (uint): Number of interation of simulation for which correlation matrix should be allocated.\n                                      If iternation number is not specified, the last step of simulation is used for the matrix allocation.", "id": "f15630:c2:m2"}
{"signature": "def simulate_dynamic(self, order = <NUM_LIT>, solution = solve_type.FAST, collect_dynamic = False, step = <NUM_LIT:0.1>, int_step = <NUM_LIT>, threshold_changes = <NUM_LIT>):", "body": "if (self._ccore_network_pointer is not None):<EOL><INDENT>ccore_instance_dynamic = wrapper.sync_simulate_dynamic(self._ccore_network_pointer, order, solution, collect_dynamic, step, int_step, threshold_changes);<EOL>return sync_dynamic(None, None, ccore_instance_dynamic);<EOL><DEDENT>time_counter = <NUM_LIT:0>;<EOL>previous_order = <NUM_LIT:0>;<EOL>current_order = self.sync_local_order();<EOL>dyn_phase = [];<EOL>dyn_time = [];<EOL>if (collect_dynamic == True):<EOL><INDENT>dyn_phase.append(self._phases);<EOL>dyn_time.append(<NUM_LIT:0>);<EOL><DEDENT>while (current_order < order):<EOL><INDENT>self._phases = self._calculate_phases(solution, time_counter, step, int_step);<EOL>time_counter += step;<EOL>if (collect_dynamic == True):<EOL><INDENT>dyn_phase.append(self._phases);<EOL>dyn_time.append(time_counter);<EOL><DEDENT>previous_order = current_order;<EOL>current_order = self.sync_local_order();<EOL>if (abs(current_order - previous_order) < threshold_changes):<EOL><INDENT>break;<EOL><DEDENT><DEDENT>if (collect_dynamic != True):<EOL><INDENT>dyn_phase.append(self._phases);<EOL>dyn_time.append(time_counter);<EOL><DEDENT>output_sync_dynamic = sync_dynamic(dyn_phase, dyn_time, None);<EOL>return output_sync_dynamic<EOL>", "docstring": "!\n        @brief Performs dynamic simulation of the network until stop condition is not reached. Stop condition is defined by input argument 'order'.\n\n        @param[in] order (double): Order of process synchronization, distributed 0..1.\n        @param[in] solution (solve_type): Type of solution.\n        @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\n        @param[in] step (double): Time step of one iteration of simulation.\n        @param[in] int_step (double): Integration step, should be less than step.\n        @param[in] threshold_changes (double): Additional stop condition that helps prevent infinite simulation, defines limit of changes of oscillators between current and previous steps.\n\n        @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,\n                otherwise returns only last values (last step of simulation) of dynamic.\n\n        @see simulate()\n        @see simulate_static()", "id": "f15630:c3:m6"}
{"signature": "@property<EOL><INDENT>def output(self):<DEDENT>", "body": "if ( (self._ccore_sync_dynamic_pointer is not None) and ( (self._dynamic is None) or (len(self._dynamic) == <NUM_LIT:0>) ) ):<EOL><INDENT>self._dynamic = wrapper.sync_dynamic_get_output(self._ccore_sync_dynamic_pointer);<EOL><DEDENT>return self._dynamic<EOL>", "docstring": "!\n        @brief (list) Returns output dynamic of the Sync network (phase coordinates of each oscillator in the network) during simulation.", "id": "f15630:c1:m0"}
{"signature": "@staticmethod<EOL><INDENT>def show_local_order_parameter(sync_output_dynamic, oscillatory_network, start_iteration = None, stop_iteration = None):<DEDENT>", "body": "(start_iteration, stop_iteration) = sync_visualizer.__get_start_stop_iterations(sync_output_dynamic, start_iteration, stop_iteration);<EOL>order_parameter = sync_output_dynamic.calculate_local_order_parameter(oscillatory_network, start_iteration, stop_iteration);<EOL>axis = plt.subplot(<NUM_LIT>);<EOL>plt.plot(sync_output_dynamic.time[start_iteration:stop_iteration], order_parameter, '<STR_LIT>', linewidth = <NUM_LIT>);<EOL>set_ax_param(axis, \"<STR_LIT:t>\", \"<STR_LIT>\", None, [<NUM_LIT:0.0>, <NUM_LIT>]);<EOL>plt.show()<EOL>", "docstring": "!\n        @brief Shows evolution of local order parameter (level of local synchronization in the network).\n\n        @param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network whose evolution of global synchronization should be visualized.\n        @param[in] oscillatory_network (sync): Sync oscillatory network whose structure of connections is required for calculation.\n        @param[in] start_iteration (uint): The first iteration that is used for calculation, if 'None' then the first is used\n        @param[in] stop_iteration (uint): The last iteration that is used for calculation, if 'None' then the last is used.", "id": "f15630:c2:m5"}
{"signature": "@staticmethod<EOL><INDENT>def show_output_dynamics(sync_output_dynamics):<DEDENT>", "body": "draw_dynamics_set(sync_output_dynamics, \"<STR_LIT:t>\", \"<STR_LIT>\", None, [<NUM_LIT:0>, <NUM_LIT:2> * <NUM_LIT>], False, False)<EOL>", "docstring": "!\n        @brief Shows several output dynamics (output of each oscillator) during simulation.\n        @details Each dynamic is presented on separate plot.\n\n        @param[in] sync_output_dynamics (list): list of output dynamics 'sync_dynamic' of the Sync network.\n\n        @see show_output_dynamic", "id": "f15630:c2:m1"}
{"signature": "def sync_order(self):", "body": "if (self._ccore_network_pointer is not None):<EOL><INDENT>return wrapper.sync_order(self._ccore_network_pointer);<EOL><DEDENT>return order_estimator.calculate_sync_order(self._phases)<EOL>", "docstring": "!\n        @brief Calculates current level of global synchorization (order parameter) in the network.\n        @details This parameter is tend 1.0 when the oscillatory network close to global synchronization and it tend to 0.0 when \n                  desynchronization is observed in the network. Order parameter is calculated using following equation:\n\n                  \\f[\n                  r_{c}=\\frac{1}{Ne^{i\\varphi }}\\sum_{j=0}^{N}e^{i\\theta_{j}};\n                  \\f]\n\n                  where \\f$\\varphi\\f$ is a average phase coordinate in the network, \\f$N\\f$ is an amount of oscillators in the network.\n\n        Example:\n        @code\n            oscillatory_network = sync(16, type_conn = conn_type.ALL_TO_ALL);\n            output_dynamic = oscillatory_network.simulate_static(100, 10);\n\n            if (oscillatory_network.sync_order() < 0.9): print(\"Global synchronization is not reached yet.\");\n            else: print(\"Global synchronization is reached.\");\n        @endcode\n\n        @return (double) Level of global synchronization (order parameter).\n\n        @see sync_local_order()", "id": "f15630:c3:m2"}
{"signature": "@property<EOL><INDENT>def time(self):<DEDENT>", "body": "if ( (self._ccore_sync_dynamic_pointer is not None) and ( (self._time is None) or (len(self._time) == <NUM_LIT:0>) ) ):<EOL><INDENT>self._time = wrapper.sync_dynamic_get_time(self._ccore_sync_dynamic_pointer);<EOL><DEDENT>return self._time<EOL>", "docstring": "!\n        @brief (list) Returns sampling times when dynamic is measured during simulation.", "id": "f15630:c1:m1"}
{"signature": "@staticmethod<EOL><INDENT>def animate_output_dynamic(sync_output_dynamic, animation_velocity = <NUM_LIT>, save_movie = None):<DEDENT>", "body": "figure = plt.figure();<EOL>dynamic = sync_output_dynamic.output[<NUM_LIT:0>];<EOL>artist, = plt.polar(dynamic, [<NUM_LIT:1.0>] * len(dynamic), '<STR_LIT:o>', color = '<STR_LIT>');<EOL>def init_frame():<EOL><INDENT>return [ artist ];<EOL><DEDENT>def frame_generation(index_dynamic):<EOL><INDENT>dynamic = sync_output_dynamic.output[index_dynamic];<EOL>artist.set_data(dynamic, [<NUM_LIT:1.0>] * len(dynamic));<EOL>return [ artist ];<EOL><DEDENT>phase_animation = animation.FuncAnimation(figure, frame_generation, len(sync_output_dynamic), interval = animation_velocity, init_func = init_frame, repeat_delay = <NUM_LIT>);<EOL>if (save_movie is not None):<EOL><INDENT>phase_animation.save(save_movie, writer = '<STR_LIT>', fps = <NUM_LIT:15>, bitrate = <NUM_LIT>);<EOL><DEDENT>else:<EOL><INDENT>plt.show();<EOL><DEDENT>", "docstring": "!\n        @brief Shows animation of output dynamic (output of each oscillator) during simulation on a circle from [0; 2pi].\n\n        @param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network.\n        @param[in] animation_velocity (uint): Interval between frames in milliseconds.\n        @param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.", "id": "f15630:c2:m6"}
{"signature": "def _calculate_phases(self, solution, t, step, int_step):", "body": "next_phases = [<NUM_LIT:0.0>] * self._num_osc;    <EOL>for index in range (<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>):<EOL><INDENT>if (solution == solve_type.FAST):<EOL><INDENT>result = self._phases[index] + self._phase_kuramoto(self._phases[index], <NUM_LIT:0>, index);<EOL>next_phases[index] = self._phase_normalization(result);<EOL><DEDENT>elif ( (solution == solve_type.RK4) or (solution == solve_type.RKF45) ):<EOL><INDENT>result = odeint(self._phase_kuramoto, self._phases[index], numpy.arange(t - step, t, int_step), (index , ));<EOL>next_phases[index] = self._phase_normalization(result[len(result) - <NUM_LIT:1>][<NUM_LIT:0>]);<EOL><DEDENT>else:<EOL><INDENT>raise NameError(\"<STR_LIT>\" + str(solution) + \"<STR_LIT>\");<EOL><DEDENT><DEDENT>return next_phases<EOL>", "docstring": "!\n        @brief Calculates new phases for oscillators in the network in line with current step.\n\n        @param[in] solution (solve_type): Type solver of the differential equation.\n        @param[in] t (double): Time of simulation.\n        @param[in] step (double): Step of solution at the end of which states of oscillators should be calculated.\n        @param[in] int_step (double): Step differentiation that is used for solving differential equation.\n\n        @return (list) New states (phases) for oscillators.", "id": "f15630:c3:m8"}
{"signature": "def __init__(self, num_osc, weight = <NUM_LIT:1>, frequency = <NUM_LIT:0>, type_conn = conn_type.ALL_TO_ALL, representation = conn_represent.MATRIX, initial_phases = initial_type.RANDOM_GAUSSIAN, ccore = True):", "body": "self._ccore_network_pointer = None;      <EOL>if ( (ccore is True) and ccore_library.workable() ):<EOL><INDENT>self._ccore_network_pointer = wrapper.sync_create_network(num_osc, weight, frequency, type_conn, initial_phases);<EOL>self._num_osc = num_osc;<EOL>self._conn_represent = conn_represent.MATRIX;<EOL><DEDENT>else:   <EOL><INDENT>super().__init__(num_osc, type_conn, representation);<EOL>self._weight = weight;<EOL>self._phases = list();<EOL>self._freq = list();<EOL>random.seed();<EOL>for index in range(<NUM_LIT:0>, num_osc, <NUM_LIT:1>):<EOL><INDENT>if (initial_phases == initial_type.RANDOM_GAUSSIAN):<EOL><INDENT>self._phases.append(random.random() * <NUM_LIT:2> * pi);<EOL><DEDENT>elif (initial_phases == initial_type.EQUIPARTITION):<EOL><INDENT>self._phases.append( pi / num_osc * index);<EOL><DEDENT>self._freq.append(random.random() * frequency);<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Constructor of oscillatory network is based on Kuramoto model.\n\n        @param[in] num_osc (uint): Number of oscillators in the network.\n        @param[in] weight (double): Coupling strength of the links between oscillators.\n        @param[in] frequency (double): Multiplier of internal frequency of the oscillators.\n        @param[in] type_conn (conn_type): Type of connection between oscillators in the network (all-to-all, grid, bidirectional list, etc.).\n        @param[in] representation (conn_represent): Internal representation of connection in the network: matrix or list.\n        @param[in] initial_phases (initial_type): Type of initialization of initial phases of oscillators (random, uniformly distributed, etc.).\n        @param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering).", "id": "f15630:c3:m0"}
{"signature": "@staticmethod<EOL><INDENT>def animate_phase_matrix(sync_output_dynamic, grid_width = None, grid_height = None, animation_velocity = <NUM_LIT>, colormap = '<STR_LIT>', save_movie = None):<DEDENT>", "body": "figure = plt.figure();<EOL>def init_frame(): <EOL><INDENT>return frame_generation(<NUM_LIT:0>);<EOL><DEDENT>def frame_generation(index_dynamic):<EOL><INDENT>figure.clf();<EOL>axis = figure.add_subplot(<NUM_LIT>);<EOL>phase_matrix = sync_output_dynamic.allocate_phase_matrix(grid_width, grid_height, index_dynamic);<EOL>axis.imshow(phase_matrix, cmap = plt.get_cmap(colormap), interpolation='<STR_LIT>', vmin = <NUM_LIT:0.0>, vmax = <NUM_LIT> * math.pi);<EOL>artist = figure.gca();<EOL>return [ artist ];<EOL><DEDENT>phase_animation = animation.FuncAnimation(figure, frame_generation, len(sync_output_dynamic), init_func = init_frame, interval = animation_velocity , repeat_delay = <NUM_LIT:1000>);<EOL>if (save_movie is not None):<EOL><INDENT>phase_animation.save(save_movie, writer = '<STR_LIT>', fps = <NUM_LIT:15>, bitrate = <NUM_LIT>);<EOL><DEDENT>else:<EOL><INDENT>plt.show();<EOL><DEDENT>", "docstring": "!\n        @brief Shows animation of phase matrix between oscillators during simulation on 2D stage.\n        @details If grid_width or grid_height are not specified than phase matrix size will by calculated automatically by square root.\n\n        @param[in] sync_output_dynamic (sync_dynamic): Output dynamic of the Sync network.\n        @param[in] grid_width (uint): Width of the phase matrix.\n        @param[in] grid_height (uint): Height of the phase matrix.\n        @param[in] animation_velocity (uint): Interval between frames in milliseconds.\n        @param[in] colormap (string): Name of colormap that is used by matplotlib ('gray', 'pink', 'cool', spring', etc.).\n        @param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.", "id": "f15630:c2:m8"}
{"signature": "def allocate_sync_ensembles(self, tolerance = <NUM_LIT>, indexes = None, iteration = None):", "body": "if (self._ccore_sync_dynamic_pointer is not None):<EOL><INDENT>ensembles = wrapper.sync_dynamic_allocate_sync_ensembles(self._ccore_sync_dynamic_pointer, tolerance, iteration);<EOL>if (indexes is not None):<EOL><INDENT>for ensemble in ensembles:<EOL><INDENT>for index in range(len(ensemble)):<EOL><INDENT>ensemble[index] = indexes[ ensemble[index] ];<EOL><DEDENT><DEDENT><DEDENT>return ensembles;<EOL><DEDENT>if ( (self._dynamic is None) or (len(self._dynamic) == <NUM_LIT:0>) ):<EOL><INDENT>return [];<EOL><DEDENT>number_oscillators = len(self._dynamic[<NUM_LIT:0>]);<EOL>last_state = None;<EOL>if (iteration is None):<EOL><INDENT>last_state = self._dynamic[len(self._dynamic) - <NUM_LIT:1>];<EOL><DEDENT>else:<EOL><INDENT>last_state = self._dynamic[iteration];<EOL><DEDENT>clusters = [];<EOL>if (number_oscillators > <NUM_LIT:0>):<EOL><INDENT>clusters.append([<NUM_LIT:0>]);<EOL><DEDENT>for i in range(<NUM_LIT:1>, number_oscillators, <NUM_LIT:1>):<EOL><INDENT>cluster_allocated = False;<EOL>for cluster in clusters:<EOL><INDENT>for neuron_index in cluster:<EOL><INDENT>last_state_shifted = abs(last_state[i] - <NUM_LIT:2> * pi);<EOL>if ( ( (last_state[i] < (last_state[neuron_index] + tolerance)) and (last_state[i] > (last_state[neuron_index] - tolerance)) ) or<EOL>( (last_state_shifted < (last_state[neuron_index] + tolerance)) and (last_state_shifted > (last_state[neuron_index] - tolerance)) ) ):<EOL><INDENT>cluster_allocated = True;<EOL>real_index = i;<EOL>if (indexes is not None):<EOL><INDENT>real_index = indexes[i];<EOL><DEDENT>cluster.append(real_index);<EOL>break;<EOL><DEDENT><DEDENT>if (cluster_allocated == True):<EOL><INDENT>break;<EOL><DEDENT><DEDENT>if (cluster_allocated == False):<EOL><INDENT>clusters.append([i]);<EOL><DEDENT><DEDENT>return clusters<EOL>", "docstring": "!\n        @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster.\n\n        @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators.\n        @param[in] indexes (list): List of real object indexes and it should be equal to amount of oscillators (in case of 'None' - indexes are in range [0; amount_oscillators]).\n        @param[in] iteration (uint): Iteration of simulation that should be used for allocation.\n\n        @return (list) Grours (lists) of indexes of synchronous oscillators.\n                For example [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].", "id": "f15630:c1:m6"}
{"signature": "def calculate_order_parameter(self, start_iteration = None, stop_iteration = None):", "body": "(start_iteration, stop_iteration) = self.__get_start_stop_iterations(start_iteration, stop_iteration);<EOL>if (self._ccore_sync_dynamic_pointer is not None):<EOL><INDENT>return wrapper.sync_dynamic_calculate_order(self._ccore_sync_dynamic_pointer, start_iteration, stop_iteration);<EOL><DEDENT>sequence_order = [];<EOL>for index in range(start_iteration, stop_iteration):<EOL><INDENT>sequence_order.append(order_estimator.calculate_sync_order(self.output[index]));<EOL><DEDENT>return sequence_order<EOL>", "docstring": "!\n        @brief Calculates level of global synchorization (order parameter).\n        @details This parameter is tend 1.0 when the oscillatory network close to global synchronization and it tend to 0.0 when \n                  desynchronization is observed in the network. Order parameter is calculated using following equation:\n\n                  \\f[\n                  r_{c}=\\frac{1}{Ne^{i\\varphi }}\\sum_{j=0}^{N}e^{i\\theta_{j}};\n                  \\f]\n\n                  where \\f$\\varphi\\f$ is a average phase coordinate in the network, \\f$N\\f$ is an amount of oscillators in the network.\n\n        @param[in] start_iteration (uint): The first iteration that is used for calculation, if 'None' then the last iteration is used.\n        @param[in] stop_iteration (uint): The last iteration that is used for calculation, if 'None' then 'start_iteration' + 1 is used.\n\n        Example:\n        @code\n            oscillatory_network = sync(16, type_conn = conn_type.ALL_TO_ALL);\n            output_dynamic = oscillatory_network.simulate_static(100, 10);\n\n            print(\"Order parameter at the last step: \", output_dynamic.calculate_order_parameter());\n            print(\"Order parameter at the first step:\", output_dynamic.calculate_order_parameter(0));\n            print(\"Order parameter evolution between 40 and 50 steps:\", output_dynamic.calculate_order_parameter(40, 50));\n        @endcode\n\n        @return (list) List of levels of global synchronization (order parameter evolution).\n\n        @see order_estimator", "id": "f15630:c1:m9"}
{"signature": "def calculate_local_order_parameter(self, oscillatory_network, start_iteration = None, stop_iteration = None):", "body": "(start_iteration, stop_iteration) = self.__get_start_stop_iterations(start_iteration, stop_iteration);<EOL>if (self._ccore_sync_dynamic_pointer is not None):<EOL><INDENT>network_pointer = oscillatory_network._ccore_network_pointer;<EOL>return wrapper.sync_dynamic_calculate_local_order(self._ccore_sync_dynamic_pointer, network_pointer, start_iteration, stop_iteration);<EOL><DEDENT>sequence_local_order = [];<EOL>for index in range(start_iteration, stop_iteration):<EOL><INDENT>sequence_local_order.append(order_estimator.calculate_local_sync_order(self.output[index], oscillatory_network));<EOL><DEDENT>return sequence_local_order<EOL>", "docstring": "!\n        @brief Calculates local order parameter.\n        @details Local order parameter or so-called level of local or partial synchronization is calculated by following expression:\n\n        \\f[\n        r_{c}=\\left | \\sum_{i=0}^{N} \\frac{1}{N_{i}} \\sum_{j=0}e^{ \\theta_{j} - \\theta_{i} } \\right |;\n        \\f]\n\n        where N - total amount of oscillators in the network and \\f$N_{i}\\f$ - amount of neighbors of oscillator with index \\f$i\\f$.\n\n        @param[in] oscillatory_network (sync): Sync oscillatory network whose structure of connections is required for calculation.\n        @param[in] start_iteration (uint): The first iteration that is used for calculation, if 'None' then the last iteration is used.\n        @param[in] stop_iteration (uint): The last iteration that is used for calculation, if 'None' then 'start_iteration' + 1 is used.\n\n        @return (list) List of levels of local (partial) synchronization (local order parameter evolution).", "id": "f15630:c1:m10"}
{"signature": "def _phase_normalization(self, teta):", "body": "norm_teta = teta;<EOL>while (norm_teta > (<NUM_LIT> * pi)) or (norm_teta < <NUM_LIT:0>):<EOL><INDENT>if (norm_teta > (<NUM_LIT> * pi)):<EOL><INDENT>norm_teta -= <NUM_LIT> * pi;<EOL><DEDENT>else:<EOL><INDENT>norm_teta += <NUM_LIT> * pi;<EOL><DEDENT><DEDENT>return norm_teta<EOL>", "docstring": "!\n        @brief Normalization of phase of oscillator that should be placed between [0; 2 * pi].\n\n        @param[in] teta (double): phase of oscillator.\n\n        @return (double) Normalized phase.", "id": "f15630:c3:m9"}
{"signature": "def __init__(self):", "body": "<EOL>self.membrane_potential      = <NUM_LIT:0.0>;<EOL>self.active_cond_sodium      = <NUM_LIT:0.0>;<EOL>self.inactive_cond_sodium    = <NUM_LIT:0.0>;<EOL>self.active_cond_potassium   = <NUM_LIT:0.0>;<EOL>self.pulse_generation = False;<EOL>self.pulse_generation_time = []<EOL>", "docstring": "!\n        @brief Constructor of central element.", "id": "f15660:c1:m0"}
{"signature": "def __update_peripheral_neurons(self, t, step, next_membrane, next_active_sodium, next_inactive_sodium, next_active_potassium):", "body": "self._membrane_potential = next_membrane[:];<EOL>self._active_cond_sodium = next_active_sodium[:];<EOL>self._inactive_cond_sodium = next_inactive_sodium[:];<EOL>self._active_cond_potassium = next_active_potassium[:];<EOL>for index in range(<NUM_LIT:0>, self._num_osc):<EOL><INDENT>if (self._pulse_generation[index] is False):<EOL><INDENT>if (self._membrane_potential[index] >= <NUM_LIT:0.0>):<EOL><INDENT>self._pulse_generation[index] = True;<EOL>self._pulse_generation_time[index].append(t);<EOL><DEDENT><DEDENT>elif (self._membrane_potential[index] < <NUM_LIT:0.0>):<EOL><INDENT>self._pulse_generation[index] = False;<EOL><DEDENT>if (self._link_weight3[index] == <NUM_LIT:0.0>):<EOL><INDENT>if (self._membrane_potential[index] > self._params.threshold):<EOL><INDENT>self._link_pulse_counter[index] += step;<EOL>if (self._link_pulse_counter[index] >= <NUM_LIT:1> / self._params.eps):<EOL><INDENT>self._link_weight3[index] = self._params.w3;<EOL>self._link_activation_time[index] = t;<EOL><DEDENT><DEDENT><DEDENT>elif ( not ((self._link_activation_time[index] < t) and (t < self._link_activation_time[index] + self._params.deltah)) ):<EOL><INDENT>self._link_weight3[index] = <NUM_LIT:0.0>;<EOL>self._link_pulse_counter[index] = <NUM_LIT:0.0>;<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Update peripheral neurons in line with new values of current in channels.\n\n        @param[in] t (doubles): Current time of simulation.\n        @param[in] step (uint): Step (time duration) during simulation when states of oscillators should be calculated.\n        @param[in] next_membrane (list): New values of membrane potentials for peripheral neurons.\n        @Param[in] next_active_sodium (list): New values of activation conductances of the sodium channels for peripheral neurons.\n        @param[in] next_inactive_sodium (list): New values of inactivaton conductances of the sodium channels for peripheral neurons.\n        @param[in] next_active_potassium (list): New values of activation conductances of the potassium channel for peripheral neurons.", "id": "f15660:c2:m5"}
{"signature": "def __repr__(self):", "body": "return \"<STR_LIT>\" % (self.membrane_potential, self.pulse_generation_time)<EOL>", "docstring": "!\n        @brief Returns string that represents central element.", "id": "f15660:c1:m1"}
{"signature": "def hnn_state(self, inputs, t, argv):", "body": "index = argv;<EOL>v = inputs[<NUM_LIT:0>]; <EOL>m = inputs[<NUM_LIT:1>]; <EOL>h = inputs[<NUM_LIT:2>]; <EOL>n = inputs[<NUM_LIT:3>]; <EOL>active_sodium_part = self._params.gNa * (m ** <NUM_LIT:3>) * h * (v - self._params.vNa);<EOL>inactive_sodium_part = self._params.gK * (n ** <NUM_LIT:4>) * (v - self._params.vK);<EOL>active_potassium_part = self._params.gL * (v - self._params.vL);<EOL>Iion = active_sodium_part + inactive_sodium_part + active_potassium_part;<EOL>Iext = <NUM_LIT:0.0>;<EOL>Isyn = <NUM_LIT:0.0>;<EOL>if (index < self._num_osc): <EOL><INDENT>Iext = self._stimulus[index] * self._noise[index];    <EOL>memory_impact1 = <NUM_LIT:0.0>;<EOL>for i in range(<NUM_LIT:0>, len(self._central_element[<NUM_LIT:0>].pulse_generation_time)):<EOL><INDENT>memory_impact1 += self.__alfa_function(t - self._central_element[<NUM_LIT:0>].pulse_generation_time[i], self._params.alfa_inhibitory, self._params.betta_inhibitory);<EOL><DEDENT>memory_impact2 = <NUM_LIT:0.0>;<EOL>for i in range(<NUM_LIT:0>, len(self._central_element[<NUM_LIT:1>].pulse_generation_time)):<EOL><INDENT>memory_impact2 += self.__alfa_function(t - self._central_element[<NUM_LIT:1>].pulse_generation_time[i], self._params.alfa_inhibitory, self._params.betta_inhibitory);<EOL><DEDENT>Isyn = self._params.w2 * (v - self._params.Vsyninh) * memory_impact1 + self._link_weight3[index] * (v - self._params.Vsyninh) * memory_impact2;<EOL><DEDENT>else:<EOL><INDENT>central_index = index - self._num_osc;<EOL>if (central_index == <NUM_LIT:0>):<EOL><INDENT>Iext = self._params.Icn1;   <EOL>memory_impact = <NUM_LIT:0.0>;<EOL>for index_oscillator in range(<NUM_LIT:0>, self._num_osc):<EOL><INDENT>for index_generation in range(<NUM_LIT:0>, len(self._pulse_generation_time[index_oscillator])):<EOL><INDENT>memory_impact += self.__alfa_function(t - self._pulse_generation_time[index_oscillator][index_generation], self._params.alfa_excitatory, self._params.betta_excitatory);<EOL><DEDENT><DEDENT>Isyn = self._params.w1 * (v - self._params.Vsynexc) * memory_impact;<EOL><DEDENT>elif (central_index == <NUM_LIT:1>):<EOL><INDENT>Iext = self._params.Icn2;   <EOL>Isyn = <NUM_LIT:0.0>;<EOL><DEDENT>else:<EOL><INDENT>assert <NUM_LIT:0>;<EOL><DEDENT><DEDENT>dv = -Iion + Iext - Isyn;<EOL>potential = v - self._params.vRest;<EOL>am = (<NUM_LIT> - <NUM_LIT:0.1> * potential) / (math.exp(<NUM_LIT> - <NUM_LIT:0.1> * potential) - <NUM_LIT:1.0>);<EOL>ah = <NUM_LIT> * math.exp(-potential / <NUM_LIT>);<EOL>an = (<NUM_LIT:0.1> - <NUM_LIT> * potential) / (math.exp(<NUM_LIT:1.0> - <NUM_LIT:0.1> * potential) - <NUM_LIT:1.0>);<EOL>bm = <NUM_LIT> * math.exp(-potential / <NUM_LIT>);<EOL>bh = <NUM_LIT:1.0> / (math.exp(<NUM_LIT> - <NUM_LIT:0.1> * potential) + <NUM_LIT:1.0>);<EOL>bn = <NUM_LIT> * math.exp(-potential / <NUM_LIT>);<EOL>dm = am * (<NUM_LIT:1.0> - m) - bm * m;<EOL>dh = ah * (<NUM_LIT:1.0> - h) - bh * h;<EOL>dn = an * (<NUM_LIT:1.0> - n) - bn * n;<EOL>return [dv, dm, dh, dn]<EOL>", "docstring": "!\n        @brief Returns new values of excitatory and inhibitory parts of oscillator and potential of oscillator.\n\n        @param[in] inputs (list): States of oscillator for integration [v, m, h, n] (see description below).\n        @param[in] t (double): Current time of simulation.\n        @param[in] argv (tuple): Extra arguments that are not used for integration - index of oscillator.\n\n        @return (list) new values of oscillator [v, m, h, n], where:\n                v - membrane potantial of oscillator,\n                m - activation conductance of the sodium channel,\n                h - inactication conductance of the sodium channel,\n                n - activation conductance of the potassium channel.", "id": "f15660:c2:m7"}
{"signature": "def __init__(self, num_osc, stimulus = None, parameters = None, type_conn = None, type_conn_represent = conn_represent.MATRIX, ccore = True):", "body": "super().__init__(num_osc, conn_type.NONE, type_conn_represent);<EOL>if (stimulus is None):<EOL><INDENT>self._stimulus = [<NUM_LIT:0.0>] * num_osc;<EOL><DEDENT>else:<EOL><INDENT>self._stimulus = stimulus;<EOL><DEDENT>if (parameters is not None):<EOL><INDENT>self._params = parameters;<EOL><DEDENT>else:<EOL><INDENT>self._params = hhn_parameters();<EOL><DEDENT>self.__ccore_hhn_pointer = None;<EOL>self.__ccore_hhn_dynamic_pointer = None;<EOL>if ( (ccore is True) and ccore_library.workable() ):<EOL><INDENT>self.__ccore_hhn_pointer = wrapper.hhn_create(num_osc, self._params);<EOL><DEDENT>else:<EOL><INDENT>self._membrane_dynamic_pointer = None;        <EOL>self._membrane_potential        = [<NUM_LIT:0.0>] * self._num_osc;<EOL>self._active_cond_sodium        = [<NUM_LIT:0.0>] * self._num_osc;<EOL>self._inactive_cond_sodium      = [<NUM_LIT:0.0>] * self._num_osc;<EOL>self._active_cond_potassium     = [<NUM_LIT:0.0>] * self._num_osc;<EOL>self._link_activation_time      = [<NUM_LIT:0.0>] * self._num_osc;<EOL>self._link_pulse_counter        = [<NUM_LIT:0.0>] * self._num_osc;<EOL>self._link_deactivation_time    = [<NUM_LIT:0.0>] * self._num_osc;<EOL>self._link_weight3              = [<NUM_LIT:0.0>] * self._num_osc;<EOL>self._pulse_generation_time     = [ [] for i in range(self._num_osc) ];<EOL>self._pulse_generation          = [False] * self._num_osc;<EOL>self._noise = [random.random() * <NUM_LIT> - <NUM_LIT:1.0> for i in range(self._num_osc)];<EOL>self._central_element = [central_element(), central_element()];<EOL><DEDENT>", "docstring": "!\n        @brief Constructor of oscillatory network based on Hodgkin-Huxley neuron model.\n\n        @param[in] num_osc (uint): Number of peripheral oscillators in the network.\n        @param[in] stimulus (list): List of stimulus for oscillators, number of stimulus should be equal to number of peripheral oscillators.\n        @param[in] parameters (hhn_parameters): Parameters of the network.\n        @param[in] type_conn (conn_type): Type of connections between oscillators in the network (ignored for this type of network).\n        @param[in] type_conn_represent (conn_represent): Internal representation of connection in the network: matrix or list.\n        @param[in] ccore (bool): If 'True' then CCORE is used (C/C++ implementation of the model).", "id": "f15660:c2:m0"}
{"signature": "def allocate_spike_ensembles(self):", "body": "if self.__ccore_pcnn_dynamic_pointer is not None:<EOL><INDENT>return wrapper.pcnn_dynamic_allocate_spike_ensembles(self.__ccore_pcnn_dynamic_pointer)<EOL><DEDENT>spike_ensembles = []<EOL>number_oscillators = len(self.__dynamic[<NUM_LIT:0>])<EOL>for t in range(len(self.__dynamic)):<EOL><INDENT>spike_ensemble = []<EOL>for index in range(number_oscillators):<EOL><INDENT>if self.__dynamic[t][index] == self.__OUTPUT_TRUE:<EOL><INDENT>spike_ensemble.append(index)<EOL><DEDENT><DEDENT>if len(spike_ensemble) > <NUM_LIT:0>:<EOL><INDENT>spike_ensembles.append(spike_ensemble)<EOL><DEDENT><DEDENT>return spike_ensembles<EOL>", "docstring": "!\n        @brief Analyses output dynamic of network and allocates spikes on each iteration as a list of indexes of oscillators.\n        @details Each allocated spike ensemble represents list of indexes of oscillators whose output is active.\n\n        @return (list) Spike ensembles of oscillators.", "id": "f15661:c1:m6"}
{"signature": "def allocate_time_signal(self):", "body": "if self.__ccore_pcnn_dynamic_pointer is not None:<EOL><INDENT>return wrapper.pcnn_dynamic_allocate_time_signal(self.__ccore_pcnn_dynamic_pointer)<EOL><DEDENT>signal_vector_information = []<EOL>for t in range(<NUM_LIT:0>, len(self.__dynamic)):<EOL><INDENT>signal_vector_information.append(sum(self.__dynamic[t]))<EOL><DEDENT>return signal_vector_information<EOL>", "docstring": "!\n        @brief Analyses output dynamic and calculates time signal (signal vector information) of network output.\n\n        @return (list) Time signal of network output.", "id": "f15661:c1:m7"}
{"signature": "def __del__(self):", "body": "if self.__ccore_pcnn_dynamic_pointer is not None:<EOL><INDENT>wrapper.pcnn_dynamic_destroy(self.__ccore_pcnn_dynamic_pointer)<EOL><DEDENT>", "docstring": "!\n        @brief Default destructor of PCNN dynamic.", "id": "f15661:c1:m3"}
{"signature": "@property<EOL><INDENT>def time(self):<DEDENT>", "body": "if self.__ccore_pcnn_dynamic_pointer is not None:<EOL><INDENT>return wrapper.pcnn_dynamic_get_time(self.__ccore_pcnn_dynamic_pointer)<EOL><DEDENT>return list(range(len(self)))<EOL>", "docstring": "!\n        @brief (list) Returns sampling times when dynamic is measured during simulation.", "id": "f15661:c1:m1"}
{"signature": "@staticmethod<EOL><INDENT>def show_output_dynamic(pcnn_output_dynamic, separate_representation = False):<DEDENT>", "body": "draw_dynamics(pcnn_output_dynamic.time, pcnn_output_dynamic.output, x_title = \"<STR_LIT:t>\", y_title = \"<STR_LIT>\", separate = separate_representation)<EOL>", "docstring": "!\n        @brief Shows output dynamic (output of each oscillator) during simulation.\n\n        @param[in] pcnn_output_dynamic (pcnn_dynamic): Output dynamic of the pulse-coupled neural network.\n        @param[in] separate_representation (list): Consists of lists of oscillators where each such list consists of oscillator indexes that will be shown on separated stage.", "id": "f15661:c2:m1"}
{"signature": "def allocate_sync_ensembles(self, tolerance = <NUM_LIT:0.1>):", "body": "return pyclustering.utils.allocate_sync_ensembles(self.__amplitude, tolerance, <NUM_LIT:0.0>)<EOL>", "docstring": "!\n        @brief Allocate clusters in line with ensembles of synchronous oscillators where each synchronous ensemble corresponds to only one cluster.\n\n        @param[in] tolerance (double): Maximum error for allocation of synchronous ensemble oscillators.\n\n        @return (list) Grours of indexes of synchronous oscillators, for example, [ [index_osc1, index_osc3], [index_osc2], [index_osc4, index_osc5] ].", "id": "f15662:c0:m5"}
{"signature": "def __init__(self, num_osc, factor_frequency = <NUM_LIT:1.0>, factor_radius = <NUM_LIT:1.0>, factor_coupling = <NUM_LIT:1.0>, type_conn = conn_type.ALL_TO_ALL, representation = conn_represent.MATRIX):", "body": "super().__init__(num_osc, type_conn, representation);<EOL>self.__frequency = factor_frequency if isinstance(factor_frequency, list) else [ fsync_network.__DEFAULT_FREQUENCY_VALUE * factor_frequency for _ in range(num_osc) ];<EOL>self.__radius = factor_radius if isinstance(factor_radius, list) else [ fsync_network.__DEFAULT_RADIUS_VALUE * factor_radius for _ in range(num_osc) ];<EOL>self.__coupling_strength = fsync_network.__DEFAULT_COUPLING_STRENGTH * factor_coupling;<EOL>self.__properties = [ self.__oscillator_property(index) for index in range(self._num_osc) ];<EOL>random.seed();<EOL>self.__amplitude = [ random.random() for _ in range(num_osc) ]<EOL>", "docstring": "!\n        @brief Constructor of oscillatory network based on synchronization Kuramoto model and Landau-Stuart oscillator.\n\n        @param[in] num_osc (uint): Amount oscillators in the network.\n        @param[in] factor_frequency (double|list): Frequency of oscillators, it can be specified as common value for all oscillators by\n                    single double value and for each separately by list.\n        @param[in] factor_radius (double|list): Radius of oscillators that affects amplitude, it can be specified as common value for all oscillators by\n                    single double value and for each separately by list.\n        @param[in] factor_coupling (double): Coupling strength between oscillators.\n        @param[in] type_conn (conn_type): Type of connection between oscillators in the network (all-to-all, grid, bidirectional list, etc.).\n        @param[in] representation (conn_represent): Internal representation of connection in the network: matrix or list.", "id": "f15662:c2:m0"}
{"signature": "def __landau_stuart(self, amplitude, index):", "body": "return (self.__properties[index] - numpy.absolute(amplitude) ** <NUM_LIT:2>) * amplitude<EOL>", "docstring": "!\n        @brief Calculate Landau-Stuart state.\n\n        @param[in] amplitude (double): Current amplitude of oscillator.\n        @param[in] index (uint): Oscillator index whose state is calculated. \n\n        @return (double) Landau-Stuart state.", "id": "f15662:c2:m4"}
{"signature": "@property<EOL><INDENT>def time(self):<DEDENT>", "body": "return self.__time<EOL>", "docstring": "!\n        @brief (list) Returns time-points corresponds to dynamic-points points.", "id": "f15662:c0:m2"}
{"signature": "def simulate(self, steps, time, collect_dynamic = False):", "body": "dynamic_amplitude, dynamic_time = ([], []) if collect_dynamic is False else ([self.__amplitude], [<NUM_LIT:0>]);<EOL>step = time / steps;<EOL>int_step = step / <NUM_LIT>;<EOL>for t in numpy.arange(step, time + step, step):<EOL><INDENT>self.__amplitude = self.__calculate(t, step, int_step);<EOL>if collect_dynamic is True:<EOL><INDENT>dynamic_amplitude.append([ numpy.real(amplitude)[<NUM_LIT:0>] for amplitude in self.__amplitude ]);<EOL>dynamic_time.append(t);<EOL><DEDENT><DEDENT>if collect_dynamic is False:<EOL><INDENT>dynamic_amplitude.append([ numpy.real(amplitude)[<NUM_LIT:0>] for amplitude in self.__amplitude ]);<EOL>dynamic_time.append(time);<EOL><DEDENT>output_sync_dynamic = fsync_dynamic(dynamic_amplitude, dynamic_time);<EOL>return output_sync_dynamic<EOL>", "docstring": "!\n        @brief Performs static simulation of oscillatory network.\n\n        @param[in] steps (uint): Number simulation steps.\n        @param[in] time (double): Time of simulation.\n        @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\n\n        @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' is True, than return dynamic for the whole simulation time,\n                 otherwise returns only last values (last step of simulation) of output dynamic.\n\n        @see simulate()\n        @see simulate_dynamic()", "id": "f15662:c2:m1"}
{"signature": "def __synchronization_mechanism(self, amplitude, index):", "body": "sync_influence = <NUM_LIT:0.0>;<EOL>for k in range(self._num_osc):<EOL><INDENT>if self.has_connection(index, k) is True:<EOL><INDENT>amplitude_neighbor = numpy.array(self.__amplitude[k], dtype = numpy.complex128, ndmin = <NUM_LIT:1>);<EOL>sync_influence += amplitude_neighbor - amplitude;<EOL><DEDENT><DEDENT>return sync_influence * self.__coupling_strength / self._num_osc<EOL>", "docstring": "!\n        @brief Calculate synchronization part using Kuramoto synchronization mechanism.\n\n        @param[in] amplitude (double): Current amplitude of oscillator.\n        @param[in] index (uint): Oscillator index whose synchronization influence is calculated.\n\n        @return (double) Synchronization influence for the specified oscillator.", "id": "f15662:c2:m5"}
{"signature": "def __init__(self, amplitude, time):", "body": "self.__amplitude = amplitude;<EOL>self.__time = time<EOL>", "docstring": "!\n        @brief Constructor of Sync dynamic in frequency domain.\n\n        @param[in] amplitude (list): Dynamic of oscillators on each step of simulation.\n        @param[in] time (list): Simulation time where each time-point corresponds to amplitude-point.", "id": "f15662:c0:m0"}
{"signature": "@property<EOL><INDENT>def output(self):<DEDENT>", "body": "return self.__amplitude<EOL>", "docstring": "!\n        @brief (list) Returns output dynamic of the Sync network (amplitudes of each oscillator in the network) during simulation.", "id": "f15662:c0:m1"}
{"signature": "def extract_number_oscillations(self, index, amplitude_threshold):", "body": "return pyclustering.utils.extract_number_oscillations(self.__amplitude, index, amplitude_threshold)<EOL>", "docstring": "!\n        @brief Extracts number of oscillations of specified oscillator.\n\n        @param[in] index (uint): Index of oscillator whose dynamic is considered.\n        @param[in] amplitude_threshold (double): Amplitude threshold when oscillation is taken into account, for example,\n                    when oscillator amplitude is greater than threshold then oscillation is incremented.\n\n        @return (uint) Number of oscillations of specified oscillator.", "id": "f15662:c0:m6"}
{"signature": "def _neuron_states(self, inputs, t, argv):", "body": "xi = inputs[<NUM_LIT:0>];<EOL>index = argv;<EOL>impact = self._weight[index][index] * self._outputs[index];<EOL>for i in range(<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>):<EOL><INDENT>if (self.has_connection(i, index)):<EOL><INDENT>impact += self._weight[index][i] * self._outputs[i];<EOL><DEDENT><DEDENT>x = -xi + impact;<EOL>if (xi > <NUM_LIT:1>): self._outputs_buffer[index] = <NUM_LIT:1>; <EOL>if (xi < -<NUM_LIT:1>): self._outputs_buffer[index] = -<NUM_LIT:1>;<EOL>return x<EOL>", "docstring": "!\n        @brief Returns new value of the neuron (oscillator).\n\n        @param[in] inputs (list): Initial values (current) of the neuron - excitatory.\n        @param[in] t (double): Current time of simulation.\n        @param[in] argv (tuple): Extra arguments that are not used for integration - index of the neuron.\n\n        @return (double) New value of the neuron.", "id": "f15663:c2:m5"}
{"signature": "@property<EOL><INDENT>def states(self):<DEDENT>", "body": "return self._states<EOL>", "docstring": "!\n        @brief Return current states of neurons.\n\n        @return (list) States of neurons.", "id": "f15663:c2:m2"}
{"signature": "@outputs.setter<EOL><INDENT>def outputs(self, values):<DEDENT>", "body": "self._outputs = [val for val in values];<EOL>self._outputs_buffer = [val for val in values]<EOL>", "docstring": "!\n        @brief Sets outputs of neurons.", "id": "f15663:c2:m1"}
{"signature": "def __len__(self):", "body": "return len(self._dynamic)<EOL>", "docstring": "!\n        @brief (uint) Returns number of simulation steps that are stored in dynamic.", "id": "f15663:c0:m3"}
{"signature": "@states.setter<EOL><INDENT>def states(self, values):<DEDENT>", "body": "self._states = [val for val in values]<EOL>", "docstring": "!\n        @brief Set current states of neurons.", "id": "f15663:c2:m3"}
{"signature": "def _calculate_states(self, solution, t, step, int_step):", "body": "next_states = [<NUM_LIT:0>] * self._num_osc;<EOL>for index in range (<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>):            <EOL><INDENT>result = odeint(self._neuron_states, self._states[index], numpy.arange(t - step, t, int_step), (index , ));<EOL>next_states[index] = result[len(result) - <NUM_LIT:1>][<NUM_LIT:0>];<EOL><DEDENT>self._outputs = [val for val in self._outputs_buffer];<EOL>return next_states<EOL>", "docstring": "!\n        @brief Calculates new states for neurons using differential calculus. Returns new states for neurons.\n\n        @param[in] solution (solve_type): Type solver of the differential equation.\n        @param[in] t (double): Current time of simulation.\n        @param[in] step (double): Step of solution at the end of which states of oscillators should be calculated.\n        @param[in] int_step (double): Step differentiation that is used for solving differential equation.\n\n        @return (list) New states for neurons.", "id": "f15663:c2:m8"}
{"signature": "@staticmethod<EOL><INDENT>def show_pattern(syncpr_output_dynamic, image_height, image_width):<DEDENT>", "body": "number_pictures = len(syncpr_output_dynamic);<EOL>iteration_math_step = <NUM_LIT:1.0>;<EOL>if (number_pictures > <NUM_LIT:50>):<EOL><INDENT>iteration_math_step = number_pictures / <NUM_LIT>;<EOL>number_pictures = <NUM_LIT:50>;<EOL><DEDENT>number_cols = int(numpy.ceil(number_pictures ** <NUM_LIT:0.5>));<EOL>number_rows = int(numpy.ceil(number_pictures / number_cols));<EOL>real_index = <NUM_LIT:0>, <NUM_LIT:0>;<EOL>double_indexer = True;<EOL>if ( (number_cols == <NUM_LIT:1>) or (number_rows == <NUM_LIT:1>) ):<EOL><INDENT>real_index = <NUM_LIT:0>;<EOL>double_indexer = False;<EOL><DEDENT>(_, axarr) = plt.subplots(number_rows, number_cols);<EOL>if (number_pictures > <NUM_LIT:1>):<EOL><INDENT>plt.setp([ax for ax in axarr], visible = False);<EOL><DEDENT>iteration_display = <NUM_LIT:0.0>;<EOL>for iteration in range(len(syncpr_output_dynamic)):<EOL><INDENT>if (iteration >= iteration_display):<EOL><INDENT>iteration_display += iteration_math_step;<EOL>ax_handle = axarr;<EOL>if (number_pictures > <NUM_LIT:1>):<EOL><INDENT>ax_handle = axarr[real_index];<EOL><DEDENT>syncpr_visualizer.__show_pattern(ax_handle, syncpr_output_dynamic, image_height, image_width, iteration);<EOL>if (double_indexer is True):<EOL><INDENT>real_index = real_index[<NUM_LIT:0>], real_index[<NUM_LIT:1>] + <NUM_LIT:1>;<EOL>if (real_index[<NUM_LIT:1>] >= number_cols):<EOL><INDENT>real_index = real_index[<NUM_LIT:0>] + <NUM_LIT:1>, <NUM_LIT:0>; <EOL><DEDENT><DEDENT>else:<EOL><INDENT>real_index += <NUM_LIT:1>;<EOL><DEDENT><DEDENT><DEDENT>plt.show()<EOL>", "docstring": "!\n        @brief Displays evolution of phase oscillators as set of patterns where the last one means final result of recognition.\n\n        @param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.\n        @param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).\n        @param[in] image_width (uint): Width of the pattern.", "id": "f15664:c1:m0"}
{"signature": "def train(self, samples):", "body": "<EOL>for pattern in samples:<EOL><INDENT>self.__validate_pattern(pattern);<EOL><DEDENT>if (self._ccore_network_pointer is not None):<EOL><INDENT>return wrapper.syncpr_train(self._ccore_network_pointer, samples);<EOL><DEDENT>length = len(self);<EOL>number_samples = len(samples);<EOL>for i in range(length):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, len(self), <NUM_LIT:1>):<EOL><INDENT>for p in range(number_samples):<EOL><INDENT>value1 = samples[p][i];<EOL>value2 = samples[p][j];<EOL>self._coupling[i][j] += value1 * value2;<EOL><DEDENT>self._coupling[i][j] /= length;<EOL>self._coupling[j][i] = self._coupling[i][j];<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Trains syncpr network using Hebbian rule for adjusting strength of connections between oscillators during training.\n\n        @param[in] samples (list): list of patterns where each pattern is represented by list of features that are equal to [-1; 1].", "id": "f15664:c2:m3"}
{"signature": "@staticmethod<EOL><INDENT>def animate_pattern_recognition(syncpr_output_dynamic, image_height, image_width, animation_velocity = <NUM_LIT>, title = None, save_movie = None):<DEDENT>", "body": "figure = plt.figure();<EOL>def init_frame():<EOL><INDENT>return frame_generation(<NUM_LIT:0>);<EOL><DEDENT>def frame_generation(index_dynamic):<EOL><INDENT>figure.clf();<EOL>if (title is not None):<EOL><INDENT>figure.suptitle(title, fontsize = <NUM_LIT>, fontweight = '<STR_LIT>')<EOL><DEDENT>ax1 = figure.add_subplot(<NUM_LIT>, projection='<STR_LIT>');<EOL>ax2 = figure.add_subplot(<NUM_LIT>);<EOL>dynamic = syncpr_output_dynamic.output[index_dynamic];<EOL>artist1, = ax1.plot(dynamic, [<NUM_LIT:1.0>] * len(dynamic), marker = '<STR_LIT:o>', color = '<STR_LIT>', ls = '<STR_LIT>');<EOL>artist2 = syncpr_visualizer.__show_pattern(ax2, syncpr_output_dynamic, image_height, image_width, index_dynamic);<EOL>return [ artist1, artist2 ];<EOL><DEDENT>cluster_animation = animation.FuncAnimation(figure, frame_generation, len(syncpr_output_dynamic), interval = animation_velocity, init_func = init_frame, repeat_delay = <NUM_LIT>);<EOL>if (save_movie is not None):<EOL><INDENT>plt.rcParams['<STR_LIT>'] = '<STR_LIT>';<EOL>ffmpeg_writer = animation.FFMpegWriter();<EOL>cluster_animation.save(save_movie, writer = ffmpeg_writer, fps = <NUM_LIT:15>);<EOL>", "docstring": "!\n        @brief Shows animation of pattern recognition process that has been preformed by the oscillatory network.\n\n        @param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network.\n        @param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators).\n        @param[in] image_width (uint): Width of the pattern.\n        @param[in] animation_velocity (uint): Interval between frames in milliseconds.\n        @param[in] title (string): Title of the animation that is displayed on a figure if it is specified.\n        @param[in] save_movie (string): If it is specified then animation will be stored to file that is specified in this parameter.", "id": "f15664:c1:m1"}
{"signature": "def simulate_dynamic(self, pattern, order = <NUM_LIT>, solution = solve_type.RK4, collect_dynamic = False, step = <NUM_LIT:0.1>, int_step = <NUM_LIT>, threshold_changes = <NUM_LIT>):", "body": "self.__validate_pattern(pattern);<EOL>if (self._ccore_network_pointer is not None):<EOL><INDENT>ccore_instance_dynamic = wrapper.syncpr_simulate_dynamic(self._ccore_network_pointer, pattern, order, solution, collect_dynamic, step);<EOL>return syncpr_dynamic(None, None, ccore_instance_dynamic);<EOL><DEDENT>for i in range(<NUM_LIT:0>, len(pattern), <NUM_LIT:1>):<EOL><INDENT>if (pattern[i] > <NUM_LIT:0.0>):<EOL><INDENT>self._phases[i] = <NUM_LIT:0.0>;<EOL><DEDENT>else:<EOL><INDENT>self._phases[i] = math.pi / <NUM_LIT>;<EOL><DEDENT><DEDENT>time_counter = <NUM_LIT:0>;<EOL>previous_order = <NUM_LIT:0>;<EOL>current_order = self.__calculate_memory_order(pattern);<EOL>dyn_phase = [];<EOL>dyn_time = [];<EOL>if (collect_dynamic == True):<EOL><INDENT>dyn_phase.append(self._phases);<EOL>dyn_time.append(<NUM_LIT:0>);<EOL><DEDENT>while (current_order < order):<EOL><INDENT>self._phases = self._calculate_phases(solution, time_counter, step, int_step);<EOL>time_counter += step;<EOL>if (collect_dynamic == True):<EOL><INDENT>dyn_phase.append(self._phases);<EOL>dyn_time.append(time_counter);<EOL><DEDENT>previous_order = current_order;<EOL>current_order = self.__calculate_memory_order(pattern);<EOL>if (abs(current_order - previous_order) < threshold_changes):<EOL><INDENT>break;<EOL><DEDENT><DEDENT>if (collect_dynamic != True):<EOL><INDENT>dyn_phase.append(self._phases);<EOL>dyn_time.append(time_counter);<EOL><DEDENT>output_sync_dynamic = syncpr_dynamic(dyn_phase, dyn_time, None);<EOL>return output_sync_dynamic<EOL>", "docstring": "!\n        @brief Performs dynamic simulation of the network until stop condition is not reached.\n        @details In other words network performs pattern recognition during simulation. \n                 Stop condition is defined by input argument 'order' that represents memory order, but\n                 process of simulation can be stopped if convergance rate is low whose threshold is defined\n                 by the argument 'threshold_changes'.\n\n        @param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].\n        @param[in] order (double): Order of process synchronization, distributed 0..1.\n        @param[in] solution (solve_type): Type of solution.\n        @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\n        @param[in] step (double): Time step of one iteration of simulation.\n        @param[in] int_step (double): Integration step, should be less than step.\n        @param[in] threshold_changes (double): Additional stop condition that helps prevent infinite simulation, defines limit of changes of oscillators between current and previous steps.\n\n        @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,\n                otherwise returns only last values (last step of simulation) of dynamic.\n\n        @see simulate()\n        @see simulate_static()", "id": "f15664:c2:m5"}
{"signature": "def __validate_pattern(self, pattern):", "body": "if (len(pattern) != len(self)):<EOL><INDENT>raise NameError('<STR_LIT>' + len(pattern) + '<STR_LIT>');<EOL><DEDENT>for feature in pattern:<EOL><INDENT>if ( (feature != -<NUM_LIT:1.0>) and (feature != <NUM_LIT:1.0>) ):<EOL><INDENT>raise NameError('<STR_LIT>' + feature + '<STR_LIT>');<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Validates pattern.\n        @details Throws exception if length of pattern is not equal to size of the network or if it consists feature with value that are not equal to [-1; 1].\n\n        @param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].", "id": "f15664:c2:m10"}
{"signature": "def simulate(self, steps, time, pattern, solution = solve_type.RK4, collect_dynamic = True):", "body": "return self.simulate_static(steps, time, pattern, solution, collect_dynamic)<EOL>", "docstring": "!\n        @brief Performs static simulation of syncpr oscillatory network.\n        @details In other words network performs pattern recognition during simulation.\n\n        @param[in] steps (uint): Number steps of simulations during simulation.\n        @param[in] time (double): Time of simulation.\n        @param[in] pattern (list): Pattern for recognition represented by list of features that are equal to [-1; 1].\n        @param[in] solution (solve_type): Type of solver that should be used for simulation.\n        @param[in] collect_dynamic (bool): If True - returns whole dynamic of oscillatory network, otherwise returns only last values of dynamics.\n\n        @return (list) Dynamic of oscillatory network. If argument 'collect_dynamic' = True, than return dynamic for the whole simulation time,\n                otherwise returns only last values (last step of simulation) of dynamic.\n\n        @see simulate_dynamic()\n        @see simulate_static()", "id": "f15664:c2:m4"}
{"signature": "def __len__(self):", "body": "if (self._ccore_network_pointer is not None):<EOL><INDENT>return wrapper.syncpr_get_size(self._ccore_network_pointer);<EOL><DEDENT>else:<EOL><INDENT>return self._num_osc;<EOL><DEDENT>", "docstring": "!\n        @brief Returns size of the network.", "id": "f15664:c2:m2"}
{"signature": "def __create_weights_all_to_all(self, stimulus):", "body": "for i in range(len(stimulus)):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, len(stimulus)):<EOL><INDENT>weight = self.__calculate_weight(stimulus[i], stimulus[j])<EOL>self.__weights[i][j] = weight<EOL>self.__weights[j][i] = weight<EOL>self.__weights_summary[i] += weight<EOL>self.__weights_summary[j] += weight<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Create weight all-to-all structure between neurons in line with stimulus.\n\n        @param[in] stimulus (list): External stimulus for the chaotic neural network.", "id": "f15665:c3:m6"}
{"signature": "def __create_surface(self, dimension):", "body": "rcParams['<STR_LIT>'] = ['<STR_LIT>']<EOL>rcParams['<STR_LIT>'] = <NUM_LIT:12><EOL>fig = plt.figure()<EOL>axes = None<EOL>if dimension == <NUM_LIT:2>:<EOL><INDENT>axes = fig.add_subplot(<NUM_LIT>)<EOL><DEDENT>elif dimension == <NUM_LIT:3>:<EOL><INDENT>axes = fig.gca(projection='<STR_LIT>')<EOL><DEDENT>surface_font = FontProperties()<EOL>surface_font.set_name('<STR_LIT>')<EOL>surface_font.set_size('<STR_LIT>')<EOL>return (fig, axes)<EOL>", "docstring": "!\n        @brief Prepares surface for showing network structure in line with specified dimension.\n\n        @param[in] dimension (uint): Dimension of processed data (external stimulus).\n\n        @return (tuple) Description of surface for drawing network structure.", "id": "f15665:c3:m10"}
{"signature": "@staticmethod<EOL><INDENT>def show_observation_matrix(cnn_output_dynamic):<DEDENT>", "body": "observation_matrix = numpy.array(cnn_output_dynamic.allocate_observation_matrix())<EOL>plt.imshow(observation_matrix.T, cmap = plt.get_cmap('<STR_LIT>'), interpolation='<STR_LIT:None>', vmin = <NUM_LIT:0.0>, vmax = <NUM_LIT:1.0>)<EOL>plt.show()<EOL>", "docstring": "!\n        @brief Shows observation matrix as black/white blocks.\n        @details This type of visualization is convenient for observing allocated clusters.\n\n        @param[in] cnn_output_dynamic (cnn_dynamic): Output dynamic of the chaotic neural network.\n\n        @see show_output_dynamic\n        @see show_dynamic_matrix", "id": "f15665:c2:m2"}
{"signature": "def __create_weights(self, stimulus):", "body": "self.__average_distance = average_neighbor_distance(stimulus, self.__amount_neighbors)<EOL>self.__weights = [ [ <NUM_LIT:0.0> for _ in range(len(stimulus)) ] for _ in range(len(stimulus)) ]<EOL>self.__weights_summary = [ <NUM_LIT:0.0> for _ in range(self.__num_osc) ]<EOL>if self.__conn_type == type_conn.ALL_TO_ALL:<EOL><INDENT>self.__create_weights_all_to_all(stimulus)<EOL><DEDENT>elif self.__conn_type == type_conn.TRIANGULATION_DELAUNAY:<EOL><INDENT>self.__create_weights_delaunay_triangulation(stimulus)<EOL><DEDENT>", "docstring": "!\n        @brief Create weights between neurons in line with stimulus.\n\n        @param[in] stimulus (list): External stimulus for the chaotic neural network.", "id": "f15665:c3:m5"}
{"signature": "def simulate(self, steps, stimulus):", "body": "self.__create_weights(stimulus)<EOL>self.__location = stimulus<EOL>dynamic = cnn_dynamic([], [])<EOL>dynamic.output.append(self.__output)<EOL>dynamic.time.append(<NUM_LIT:0>)<EOL>for step in range(<NUM_LIT:1>, steps, <NUM_LIT:1>):<EOL><INDENT>self.__output = self.__calculate_states()<EOL>dynamic.output.append(self.__output)<EOL>dynamic.time.append(step)<EOL><DEDENT>return dynamic<EOL>", "docstring": "!\n        @brief Simulates chaotic neural network with extrnal stimulus during specified steps.\n        @details Stimulus are considered as a coordinates of neurons and in line with that weights\n                 are initialized.\n\n        @param[in] steps (uint): Amount of steps for simulation.\n        @param[in] stimulus (list): Stimulus that are used for simulation.\n\n        @return (cnn_dynamic) Output dynamic of the chaotic neural network.", "id": "f15665:c3:m2"}
{"signature": "def __calculate_states(self):", "body": "output = [ <NUM_LIT:0.0> for _ in range(self.__num_osc) ]<EOL>for i in range(self.__num_osc):<EOL><INDENT>output[i] = self.__neuron_evolution(i)<EOL><DEDENT>return output<EOL>", "docstring": "!\n        @brief Calculates new state of each neuron.\n        @detail There is no any assignment.\n\n        @return (list) Returns new states (output).", "id": "f15665:c3:m3"}
{"signature": "def __len__(self):", "body": "return self.__num_osc<EOL>", "docstring": "!\n        @brief Returns size of the chaotic neural network that is defined by amount of neurons.", "id": "f15665:c3:m1"}
{"signature": "def _competition(self, x):", "body": "index = <NUM_LIT:0><EOL>minimum = euclidean_distance_square(self._weights[<NUM_LIT:0>], x)<EOL>for i in range(<NUM_LIT:1>, self._size, <NUM_LIT:1>):<EOL><INDENT>candidate = euclidean_distance_square(self._weights[i], x)<EOL>if candidate < minimum:<EOL><INDENT>index = i<EOL>minimum = candidate<EOL><DEDENT><DEDENT>return index<EOL>", "docstring": "!\n        @brief Calculates neuron winner (distance, neuron index).\n\n        @param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.\n\n        @return (uint) Returns index of neuron that is winner.", "id": "f15666:c3:m14"}
{"signature": "def __len__(self):", "body": "return self._size<EOL>", "docstring": "!\n        @brief Returns size of the network that defines by amount of neuron in it.\n\n        @return (uint) Size of self-organized map (amount of neurons).", "id": "f15666:c3:m6"}
{"signature": "def __init__(self):", "body": "<EOL>self.init_type = type_init.uniform_grid<EOL>self.init_radius = None<EOL>self.init_learn_rate = <NUM_LIT:0.1><EOL>self.adaptation_threshold = <NUM_LIT><EOL>", "docstring": "!\n        @brief Constructor container of SOM parameters.", "id": "f15666:c2:m0"}
{"signature": "def __initialize_distances(self, size, location):", "body": "sqrt_distances = [ [ [] for i in range(size) ] for j in range(size) ]<EOL>for i in range(size):<EOL><INDENT>for j in range(i, size, <NUM_LIT:1>):<EOL><INDENT>dist = euclidean_distance_square(location[i], location[j])<EOL>sqrt_distances[i][j] = dist<EOL>sqrt_distances[j][i] = dist<EOL><DEDENT><DEDENT>return sqrt_distances<EOL>", "docstring": "!\n        @brief Initialize distance matrix in SOM grid.\n\n        @param[in] size (uint): Amount of neurons in the network.\n        @param[in] location (list): List of coordinates of each neuron in the network.\n\n        @return (list) Distance matrix between neurons in the network.", "id": "f15666:c3:m11"}
{"signature": "def __setstate__(self, som_state):", "body": "if som_state['<STR_LIT>'] is True and ccore_library.workable():<EOL><INDENT>self.__upload_dump_to_ccore(som_state['<STR_LIT:state>'])<EOL><DEDENT>else:<EOL><INDENT>self.__upload_dump_to_python(som_state['<STR_LIT:state>'])<EOL><DEDENT>", "docstring": "@brief Set state of SOM network that can be used to load network.", "id": "f15666:c3:m8"}
{"signature": "def get_density_matrix(self, surface_divider = <NUM_LIT>):", "body": "if self.__ccore_som_pointer is not None:<EOL><INDENT>self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)<EOL><DEDENT>density_matrix = [[<NUM_LIT:0>] * self._cols for i in range(self._rows)]<EOL>dimension = len(self._weights[<NUM_LIT:0>])<EOL>dim_max = [ float('<STR_LIT>') ] * dimension<EOL>dim_min = [ float('<STR_LIT>') ] * dimension<EOL>for weight in self._weights:<EOL><INDENT>for index_dim in range(dimension):<EOL><INDENT>if weight[index_dim] > dim_max[index_dim]:<EOL><INDENT>dim_max[index_dim] = weight[index_dim]<EOL><DEDENT>if weight[index_dim] < dim_min[index_dim]:<EOL><INDENT>dim_min[index_dim] = weight[index_dim]<EOL><DEDENT><DEDENT><DEDENT>radius = [<NUM_LIT:0.0>] * len(self._weights[<NUM_LIT:0>])<EOL>for index_dim in range(dimension):<EOL><INDENT>radius[index_dim] = ( dim_max[index_dim] - dim_min[index_dim] ) / surface_divider<EOL><DEDENT>for point in self._data:<EOL><INDENT>for index_neuron in range(len(self)):<EOL><INDENT>point_covered = True<EOL>for index_dim in range(dimension):<EOL><INDENT>if abs(point[index_dim] - self._weights[index_neuron][index_dim]) > radius[index_dim]:<EOL><INDENT>point_covered = False<EOL>break<EOL><DEDENT><DEDENT>row = int(math.floor(index_neuron / self._cols))<EOL>col = index_neuron - row * self._cols<EOL>if point_covered is True:<EOL><INDENT>density_matrix[row][col] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return density_matrix<EOL>", "docstring": "!\n        @brief Calculates density matrix (P-Matrix).\n\n        @param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.\n\n        @return (list) Density matrix (P-Matrix).\n\n        @see get_distance_matrix()", "id": "f15666:c3:m23"}
{"signature": "def show_winner_matrix(self):", "body": "if self.__ccore_som_pointer is not None:<EOL><INDENT>self._award = wrapper.som_get_awards(self.__ccore_som_pointer)<EOL><DEDENT>(fig, ax) = plt.subplots()<EOL>winner_matrix = [[<NUM_LIT:0>] * self._cols for i in range(self._rows)]<EOL>for i in range(self._rows):<EOL><INDENT>for j in range(self._cols):<EOL><INDENT>neuron_index = i * self._cols + j<EOL>winner_matrix[i][j] = self._award[neuron_index]<EOL>ax.text(i, j, str(winner_matrix[i][j]), va='<STR_LIT>', ha='<STR_LIT>')<EOL><DEDENT><DEDENT>ax.imshow(winner_matrix, cmap = plt.get_cmap('<STR_LIT>'), interpolation='<STR_LIT:none>')<EOL>ax.grid(True)<EOL>plt.title(\"<STR_LIT>\")<EOL>plt.show()<EOL>", "docstring": "!\n        @brief Show winner matrix where each element corresponds to neuron and value represents\n               amount of won objects from input dataspace at the last training iteration.\n\n        @see show_distance_matrix()", "id": "f15666:c3:m24"}
{"signature": "def show_network(self, awards = False, belongs = False, coupling = True, dataset = True, marker_type = '<STR_LIT:o>'):", "body": "if self.__ccore_som_pointer is not None:<EOL><INDENT>self._size = wrapper.som_get_size(self.__ccore_som_pointer)<EOL>self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)<EOL>self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)<EOL>self._award = wrapper.som_get_awards(self.__ccore_som_pointer)<EOL><DEDENT>dimension = len(self._weights[<NUM_LIT:0>])<EOL>fig = plt.figure()<EOL>if (dimension == <NUM_LIT:1>) or (dimension == <NUM_LIT:2>):<EOL><INDENT>axes = fig.add_subplot(<NUM_LIT>)<EOL><DEDENT>elif dimension == <NUM_LIT:3>:<EOL><INDENT>axes = fig.gca(projection='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError('<STR_LIT>')<EOL><DEDENT>if (self._data is not None) and (dataset is True):<EOL><INDENT>for x in self._data:<EOL><INDENT>if dimension == <NUM_LIT:1>:<EOL><INDENT>axes.plot(x[<NUM_LIT:0>], <NUM_LIT:0.0>, '<STR_LIT>', ms = <NUM_LIT:30>)<EOL><DEDENT>elif dimension == <NUM_LIT:2>:<EOL><INDENT>axes.plot(x[<NUM_LIT:0>], x[<NUM_LIT:1>], '<STR_LIT>')<EOL><DEDENT>elif dimension == <NUM_LIT:3>:<EOL><INDENT>axes.scatter(x[<NUM_LIT:0>], x[<NUM_LIT:1>], x[<NUM_LIT:2>], c = '<STR_LIT:b>', marker = '<STR_LIT:.>')<EOL><DEDENT><DEDENT><DEDENT>for index in range(self._size):<EOL><INDENT>color = '<STR_LIT:g>'<EOL>if self._award[index] == <NUM_LIT:0>:<EOL><INDENT>color = '<STR_LIT:y>'<EOL><DEDENT>if dimension == <NUM_LIT:1>:<EOL><INDENT>axes.plot(self._weights[index][<NUM_LIT:0>], <NUM_LIT:0.0>, color + marker_type)<EOL>if awards:<EOL><INDENT>location = '<STR_LIT>'.format(self._award[index])<EOL>axes.text(self._weights[index][<NUM_LIT:0>], <NUM_LIT:0.0>, location, color='<STR_LIT>', fontsize = <NUM_LIT:10>)<EOL><DEDENT>if belongs and self._data is not None:<EOL><INDENT>location = '<STR_LIT>'.format(index)<EOL>axes.text(self._weights[index][<NUM_LIT:0>], <NUM_LIT:0.0>, location, color='<STR_LIT>', fontsize = <NUM_LIT:12>)<EOL>for k in range(len(self._capture_objects[index])):<EOL><INDENT>point = self._data[self._capture_objects[index][k]]<EOL>axes.text(point[<NUM_LIT:0>], <NUM_LIT:0.0>, location, color='<STR_LIT>', fontsize = <NUM_LIT:10>)<EOL><DEDENT><DEDENT><DEDENT>if dimension == <NUM_LIT:2>:<EOL><INDENT>axes.plot(self._weights[index][<NUM_LIT:0>], self._weights[index][<NUM_LIT:1>], color + marker_type)<EOL>if awards:<EOL><INDENT>location = '<STR_LIT>'.format(self._award[index])<EOL>axes.text(self._weights[index][<NUM_LIT:0>], self._weights[index][<NUM_LIT:1>], location, color='<STR_LIT>', fontsize=<NUM_LIT:10>)<EOL><DEDENT>if belongs and self._data is not None:<EOL><INDENT>location = '<STR_LIT>'.format(index)<EOL>axes.text(self._weights[index][<NUM_LIT:0>], self._weights[index][<NUM_LIT:1>], location, color='<STR_LIT>', fontsize=<NUM_LIT:12>)<EOL>for k in range(len(self._capture_objects[index])):<EOL><INDENT>point = self._data[self._capture_objects[index][k]]<EOL>axes.text(point[<NUM_LIT:0>], point[<NUM_LIT:1>], location, color='<STR_LIT>', fontsize=<NUM_LIT:10>)<EOL><DEDENT><DEDENT>if (self._conn_type != type_conn.func_neighbor) and (coupling != False):<EOL><INDENT>for neighbor in self._neighbors[index]:<EOL><INDENT>if neighbor > index:<EOL><INDENT>axes.plot([self._weights[index][<NUM_LIT:0>], self._weights[neighbor][<NUM_LIT:0>]],<EOL>[self._weights[index][<NUM_LIT:1>], self._weights[neighbor][<NUM_LIT:1>]],<EOL>'<STR_LIT:g>', linewidth=<NUM_LIT:0.5>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif dimension == <NUM_LIT:3>:<EOL><INDENT>axes.scatter(self._weights[index][<NUM_LIT:0>], self._weights[index][<NUM_LIT:1>], self._weights[index][<NUM_LIT:2>], c=color, marker=marker_type)<EOL>if (self._conn_type != type_conn.func_neighbor) and (coupling != False):<EOL><INDENT>for neighbor in self._neighbors[index]:<EOL><INDENT>if neighbor > index:<EOL><INDENT>axes.plot([self._weights[index][<NUM_LIT:0>], self._weights[neighbor][<NUM_LIT:0>]],<EOL>[self._weights[index][<NUM_LIT:1>], self._weights[neighbor][<NUM_LIT:1>]],<EOL>[self._weights[index][<NUM_LIT:2>], self._weights[neighbor][<NUM_LIT:2>]],<EOL>'<STR_LIT>', linewidth=<NUM_LIT:0.5>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>plt.title(\"<STR_LIT>\")<EOL>plt.grid()<EOL>plt.show()<EOL>", "docstring": "!\n        @brief Shows neurons in the dimension of data.\n\n        @param[in] awards (bool): If True - displays how many objects won each neuron.\n        @param[in] belongs (bool): If True - marks each won object by according index of neuron-winner (only when dataset is displayed too).\n        @param[in] coupling (bool): If True - displays connections between neurons (except case when function neighbor is used).\n        @param[in] dataset (bool): If True - displays inputs data set.\n        @param[in] marker_type (string): Defines marker that is used for dispaying neurons in the network.", "id": "f15666:c3:m25"}
{"signature": "def _create_initial_weights(self, init_type):", "body": "dim_info = dimension_info(self._data)<EOL>step_x = dim_info.get_center()[<NUM_LIT:0>]<EOL>if self._rows > <NUM_LIT:1>: step_x = dim_info.get_width()[<NUM_LIT:0>] / (self._rows - <NUM_LIT:1>);<EOL>step_y = <NUM_LIT:0.0><EOL>if dim_info.get_dimensions() > <NUM_LIT:1>:<EOL><INDENT>step_y = dim_info.get_center()[<NUM_LIT:1>]<EOL>if self._cols > <NUM_LIT:1>: step_y = dim_info.get_width()[<NUM_LIT:1>] / (self._cols - <NUM_LIT:1>);<EOL><DEDENT>random.seed()<EOL>if init_type == type_init.uniform_grid:<EOL><INDENT>self._weights = [ [ [] for i in range(dim_info.get_dimensions()) ] for j in range(self._size)]<EOL>for i in range(self._size):<EOL><INDENT>location = self._location[i]<EOL>for dim in range(dim_info.get_dimensions()):<EOL><INDENT>if dim == <NUM_LIT:0>:<EOL><INDENT>if self._rows > <NUM_LIT:1>:<EOL><INDENT>self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_x * location[dim]<EOL><DEDENT>else:<EOL><INDENT>self._weights[i][dim] = dim_info.get_center()[dim]<EOL><DEDENT><DEDENT>elif dim == <NUM_LIT:1>:<EOL><INDENT>if self._cols > <NUM_LIT:1>:<EOL><INDENT>self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_y * location[dim]<EOL><DEDENT>else:<EOL><INDENT>self._weights[i][dim] = dim_info.get_center()[dim]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._weights[i][dim] = dim_info.get_center()[dim]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif init_type == type_init.random_surface:<EOL><INDENT>self._weights = [[random.uniform(dim_info.get_minimum_coordinate()[i], dim_info.get_maximum_coordinate()[i]) for i in range(dim_info.get_dimensions())] for _ in range(self._size)]<EOL><DEDENT>elif init_type == type_init.random_centroid:<EOL><INDENT>self._weights = [[(random.random() + dim_info.get_center()[i])  for i in range(dim_info.get_dimensions())] for _ in range(self._size)]<EOL><DEDENT>else:<EOL><INDENT>self._weights = [[random.random() for i in range(dim_info.get_dimensions())] for _ in range(self._size)]<EOL><DEDENT>", "docstring": "!\n        @brief Creates initial weights for neurons in line with the specified initialization.\n\n        @param[in] init_type (type_init): Type of initialization of initial neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).", "id": "f15666:c3:m12"}
{"signature": "def simulate(self, input_pattern):", "body": "if self.__ccore_som_pointer is not None:<EOL><INDENT>return wrapper.som_simulate(self.__ccore_som_pointer, input_pattern)<EOL><DEDENT>return self._competition(input_pattern)<EOL>", "docstring": "!\n        @brief Processes input pattern (no learining) and returns index of neuron-winner.\n               Using index of neuron winner catched object can be obtained using property capture_objects.\n\n        @param[in] input_pattern (list): Input pattern.\n\n        @return (uint) Returns index of neuron-winner.\n\n        @see capture_objects", "id": "f15666:c3:m17"}
{"signature": "def __getstate__(self):", "body": "if self.__ccore_som_pointer is not None:<EOL><INDENT>self.__download_dump_from_ccore()<EOL>return self.__get_dump_from_python(True)<EOL><DEDENT>return self.__get_dump_from_python(False)<EOL>", "docstring": "@brief Returns state of SOM network that can be used to store network.", "id": "f15666:c3:m7"}
{"signature": "def __initialize_locations(self, rows, cols):", "body": "location = list()<EOL>for i in range(rows):<EOL><INDENT>for j in range(cols):<EOL><INDENT>location.append([float(i), float(j)])<EOL><DEDENT><DEDENT>return location<EOL>", "docstring": "!\n        @brief Initialize locations (coordinates in SOM grid) of each neurons in the map.\n\n        @param[in] rows (uint): Number of neurons in the column (number of rows).\n        @param[in] cols (uint): Number of neurons in the row (number of columns).\n\n        @return (list) List of coordinates of each neuron in map.", "id": "f15666:c3:m10"}
{"signature": "def small_abc_image_recognition():", "body": "images = [];<EOL>images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_A;<EOL>images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_B;<EOL>images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_C;<EOL>template_recognition_image(images, <NUM_LIT>, <NUM_LIT>)<EOL>", "docstring": "!\n    @brief Trains network using letters 'A', 'B', 'C', and recognize each of them with and without noise.", "id": "f15668:m2"}
{"signature": "def two_oscillators_sync():", "body": "template_dynamic(<NUM_LIT:2>, -<NUM_LIT:4>, <NUM_LIT:1>, [<NUM_LIT:1>, <NUM_LIT:0>], [<NUM_LIT:1>, <NUM_LIT:1>])<EOL>", "docstring": "Comment: Different initial state - state of sync. will be reached.", "id": "f15669:m3"}
{"signature": "def five_oscillators_positive_conn():", "body": "template_dynamic(<NUM_LIT:5>, -<NUM_LIT:4>, <NUM_LIT:1>, [<NUM_LIT:1>, <NUM_LIT:0.5>, <NUM_LIT:0>, -<NUM_LIT:0.5>, -<NUM_LIT:1>], [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>]);<EOL>template_dynamic(<NUM_LIT:5>, -<NUM_LIT:4>, <NUM_LIT:1>, [<NUM_LIT:1>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>], [-<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>])<EOL>", "docstring": "Note: Oscillations are dead in this case (sync. should be in ideal case)", "id": "f15669:m5"}
{"signature": "def thirteen_simplify_oscillator_three_stimulated_ensembles_list():", "body": "\"<STR_LIT>\"<EOL>parameters = legion_parameters();<EOL>parameters.Wt = <NUM_LIT>;<EOL>parameters.fi = <NUM_LIT>;<EOL>parameters.ENABLE_POTENTIONAL = False;<EOL>template_dynamic_legion(<NUM_LIT:15>, <NUM_LIT:1000>, <NUM_LIT:1000>, conn_type = conn_type.LIST_BIDIR, <EOL>stimulus = [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>], <EOL>params = parameters, <EOL>separate_repr = [ [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>], [<NUM_LIT:3>, <NUM_LIT:4>, <NUM_LIT:5>, <NUM_LIT:9>, <NUM_LIT:10>], [<NUM_LIT:6>, <NUM_LIT:7>, <NUM_LIT:8>], [<NUM_LIT:11>, <NUM_LIT:12>, <NUM_LIT>, <NUM_LIT>] ])<EOL>", "docstring": "Good example of three synchronous ensembels", "id": "f15675:m9"}
{"signature": "def sixteen_oscillator_two_stimulated_ensembles_grid():", "body": "parameters = legion_parameters();<EOL>parameters.teta_x = -<NUM_LIT>;<EOL>template_dynamic_legion(<NUM_LIT:16>, <NUM_LIT>, <NUM_LIT>, conn_type = conn_type.GRID_FOUR, params = parameters, stimulus = [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <EOL><NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>])<EOL>", "docstring": "Not accurate false due to spikes are observed", "id": "f15675:m10"}
{"signature": "def thirteen_oscillator_three_stimulated_ensembles_list():", "body": "\"<STR_LIT>\"<EOL>parameters = legion_parameters();<EOL>parameters.Wt = <NUM_LIT>;<EOL>parameters.fi = <NUM_LIT>;<EOL>template_dynamic_legion(<NUM_LIT:15>, <NUM_LIT:1000>, <NUM_LIT:1000>, conn_type = conn_type.LIST_BIDIR, stimulus = [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>], params = parameters, separate_repr = [ [<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>], [<NUM_LIT:3>, <NUM_LIT:4>, <NUM_LIT:5>, <NUM_LIT:9>, <NUM_LIT:10>], [<NUM_LIT:6>, <NUM_LIT:7>, <NUM_LIT:8>], [<NUM_LIT:11>, <NUM_LIT:12>, <NUM_LIT>, <NUM_LIT>] ])<EOL>", "docstring": "Good example of three synchronous ensembels", "id": "f15675:m8"}
{"signature": "def nine_neurons_stimulated_one_sync():", "body": "params = pcnn_parameters();<EOL>template_dynamic_pcnn(<NUM_LIT:9>, <NUM_LIT:100>, [<NUM_LIT:1.0>] * <NUM_LIT:9>, params, conn_type.GRID_FOUR)<EOL>", "docstring": "Just dynamic demonstration", "id": "f15677:m3"}
{"signature": "def twenty_five_neurons_mix_stimulated():", "body": "\"<STR_LIT>\"<EOL>params = pcnn_parameters();<EOL>params.AF = <NUM_LIT:0.1>;<EOL>params.AL = <NUM_LIT:0.0>;<EOL>params.AT = <NUM_LIT>;<EOL>params.VF = <NUM_LIT:1.0>;<EOL>params.VL = <NUM_LIT:1.0>;<EOL>params.VT = <NUM_LIT>;<EOL>params.M = <NUM_LIT:0.0>;<EOL>template_dynamic_pcnn(<NUM_LIT>, <NUM_LIT:100>, [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>], params, conn_type.GRID_FOUR, False)<EOL>", "docstring": "Object allocation", "id": "f15677:m5"}
{"signature": "def nine_neurons_mix_stimulated():", "body": "template_dynamic_pcnn(<NUM_LIT:9>, <NUM_LIT:100>, [<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <EOL><NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>], None, conn_type.GRID_FOUR)<EOL>", "docstring": "Just dynamic demonstration", "id": "f15677:m4"}
{"signature": "def hundred_neurons_mix_stimulated():", "body": "params = pcnn_parameters();<EOL>params.AF = <NUM_LIT:0.1>;<EOL>params.AL = <NUM_LIT:0.1>;<EOL>params.AT = <NUM_LIT>;<EOL>params.VF = <NUM_LIT:1.0>;<EOL>params.VL = <NUM_LIT:1.0>;<EOL>params.VT = <NUM_LIT>;<EOL>params.W = <NUM_LIT:1.0>;<EOL>params.M = <NUM_LIT:1.0>;<EOL>template_dynamic_pcnn(<NUM_LIT:100>, <NUM_LIT:50>,  [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>,<EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>], params, conn_type.GRID_EIGHT, False)<EOL>", "docstring": "Allocate several clusters: the first contains borders (indexes of oscillators) and the second objects (indexes of oscillators)", "id": "f15677:m6"}
{"signature": "def segmentation_gray_image_building():", "body": "template_segmentation_image(IMAGE_SIMPLE_SAMPLES.IMAGE_SIMPLE_BUILDING, None, <NUM_LIT>, None, True, False, True)<EOL>", "docstring": "Long processing", "id": "f15678:m10"}
{"signature": "@property<EOL><INDENT>def width(self):<DEDENT>", "body": "return self.__width<EOL>", "docstring": "!\n        @brief Width of the network grid, this value is zero in case of non-grid structure.\n\n        @note This property returns valid value only for network with grid structure.", "id": "f15681:c4:m1"}
{"signature": "def set_connection(self, i, j):", "body": "if (self.structure != conn_type.DYNAMIC):<EOL><INDENT>raise NameError(\"<STR_LIT>\");<EOL><DEDENT>if (self._conn_represent == conn_represent.MATRIX):<EOL><INDENT>self._osc_conn[i][j] = True;<EOL>self._osc_conn[j][i] = True;<EOL><DEDENT>else:<EOL><INDENT>self._osc_conn[i].append(j);<EOL>self._osc_conn[j].append(i);<EOL><DEDENT>", "docstring": "!\n        @brief Couples two specified oscillators in the network with dynamic connections.\n\n        @param[in] i (uint): index of an oscillator that should be coupled with oscillator 'j' in the network.\n        @param[in] j (uint): index of an oscillator that should be coupled with oscillator 'i' in the network.\n\n        @note This method can be used only in case of DYNAMIC connections, otherwise it throws expection.", "id": "f15681:c4:m14"}
{"signature": "def __create_all_to_all_connections(self):", "body": "if (self._conn_represent == conn_represent.MATRIX):<EOL><INDENT>for index in range(<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>):<EOL><INDENT>self._osc_conn.append([True] * self._num_osc);<EOL>self._osc_conn[index][index] = False;<EOL><DEDENT><DEDENT>elif (self._conn_represent == conn_represent.LIST):<EOL><INDENT>for index in range(<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>):<EOL><INDENT>self._osc_conn.append([neigh for neigh in range(<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>) if index != neigh]);<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Creates connections between all oscillators.", "id": "f15681:c4:m6"}
{"signature": "def __create_grid_four_connections(self):", "body": "side_size = self.__width;<EOL>if (self._conn_represent == conn_represent.MATRIX):<EOL><INDENT>self._osc_conn = [[<NUM_LIT:0>] * self._num_osc for index in range(<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>)];<EOL><DEDENT>elif (self._conn_represent == conn_represent.LIST):<EOL><INDENT>self._osc_conn = [[] for index in range(<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>)];<EOL><DEDENT>else:<EOL><INDENT>raise NameError(\"<STR_LIT>\");<EOL><DEDENT>for index in range(<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>):<EOL><INDENT>upper_index = index - side_size;<EOL>lower_index = index + side_size;<EOL>left_index = index - <NUM_LIT:1>;<EOL>right_index = index + <NUM_LIT:1>;<EOL>node_row_index = math.ceil(index / side_size);<EOL>if (upper_index >= <NUM_LIT:0>):<EOL><INDENT>self.__create_connection(index, upper_index);<EOL><DEDENT>if (lower_index < self._num_osc):<EOL><INDENT>self.__create_connection(index, lower_index);<EOL><DEDENT>if ( (left_index >= <NUM_LIT:0>) and (math.ceil(left_index / side_size) == node_row_index) ):<EOL><INDENT>self.__create_connection(index, left_index);<EOL><DEDENT>if ( (right_index < self._num_osc) and (math.ceil(right_index / side_size) == node_row_index) ):<EOL><INDENT>self.__create_connection(index, right_index);<EOL><DEDENT><DEDENT>", "docstring": "!\n        @brief Creates network with connections that make up four grid structure.\n        @details Each oscillator may be connected with four neighbors in line with 'grid' structure: right, upper, left, lower.", "id": "f15681:c4:m7"}
{"signature": "def __init__(self, output, inhibitor, time, ccore = None):", "body": "self.__output = output;<EOL>self.__inhibitor = inhibitor;<EOL>self._time = time;<EOL>self.__ccore_legion_dynamic_pointer = ccore<EOL>", "docstring": "!\n        @brief Constructor of legion dynamic.\n\n        @param[in] output (list): Output dynamic of the network represented by excitatory values of oscillators.\n        @param[in] inhibitor (list): Output dynamic of the global inhibitor of the network.\n        @param[in] time (list): Simulation time.\n        @param[in] ccore (POINTER): Pointer to CCORE legion_dynamic. If it is specified then others arguments can be omitted.", "id": "f15682:c1:m3"}
{"signature": "def __del__(self):", "body": "if (self.__ccore_legion_pointer is not None):<EOL><INDENT>wrapper.legion_destroy(self.__ccore_legion_pointer);<EOL>self.__ccore_legion_pointer = None;<EOL><DEDENT>", "docstring": "!\n        @brief Default destructor of LEGION.", "id": "f15682:c2:m1"}
{"signature": "@property<EOL><INDENT>def output(self):<DEDENT>", "body": "if (self.__ccore_legion_dynamic_pointer is not None):<EOL><INDENT>return wrapper.legion_dynamic_get_output(self.__ccore_legion_dynamic_pointer);<EOL><DEDENT>return self.__output<EOL>", "docstring": "!\n        @brief Returns output dynamic of the network.", "id": "f15682:c1:m0"}
{"signature": "@property<EOL><INDENT>def time(self):<DEDENT>", "body": "if (self.__ccore_legion_dynamic_pointer is not None):<EOL><INDENT>return wrapper.legion_dynamic_get_time(self.__ccore_legion_dynamic_pointer);<EOL><DEDENT>return list(range(len(self)))<EOL>", "docstring": "!\n        @brief Returns simulation time.", "id": "f15682:c1:m2"}
{"signature": "def _global_inhibitor_state(self, z, t, argv):", "body": "sigma = <NUM_LIT:0.0>;<EOL>for x in self._excitatory:<EOL><INDENT>if (x > self._params.teta_zx):<EOL><INDENT>sigma = <NUM_LIT:1.0>;<EOL>break;<EOL><DEDENT><DEDENT>return self._params.fi * (sigma - z)<EOL>", "docstring": "!\n        @brief Returns new value of global inhibitory\n\n        @param[in] z (dobule): Current value of inhibitory.\n        @param[in] t (double): Current time of simulation.\n        @param[in] argv (tuple): It's not used, can be ignored.\n\n        @return (double) New value if global inhibitory (not assign).", "id": "f15682:c2:m7"}
{"signature": "def __create_dynamic_connections(self):", "body": "if (self._stimulus is None):<EOL><INDENT>raise NameError(\"<STR_LIT>\");<EOL><DEDENT>self._dynamic_coupling = [ [<NUM_LIT:0>] * self._num_osc for i in range(self._num_osc)];<EOL>for i in range(self._num_osc):<EOL><INDENT>neighbors = self.get_neighbors(i);<EOL>if ( (len(neighbors) > <NUM_LIT:0>) and (self._stimulus[i] > <NUM_LIT:0>) ):<EOL><INDENT>number_stimulated_neighbors = <NUM_LIT:0.0>;<EOL>for j in neighbors:<EOL><INDENT>if (self._stimulus[j] > <NUM_LIT:0>):<EOL><INDENT>number_stimulated_neighbors += <NUM_LIT:1.0>;<EOL><DEDENT><DEDENT>if (number_stimulated_neighbors > <NUM_LIT:0>):<EOL><INDENT>dynamic_weight = self._params.Wt / number_stimulated_neighbors;<EOL>for j in neighbors:<EOL><INDENT>self._dynamic_coupling[i][j] = dynamic_weight;<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "!\n        @brief Create dynamic connection in line with input stimulus.", "id": "f15682:c2:m4"}
{"signature": "def __init__(self):", "body": "<EOL>self.eps         = <NUM_LIT>;<EOL>self.alpha       = <NUM_LIT>;<EOL>self.gamma       = <NUM_LIT>;<EOL>self.betta       = <NUM_LIT:0.1>;<EOL>self.lamda       = <NUM_LIT:0.1>;<EOL>self.teta        = <NUM_LIT>;<EOL>self.teta_x      = -<NUM_LIT>;<EOL>self.teta_p      = <NUM_LIT>;<EOL>self.teta_xz     = <NUM_LIT:0.1>;<EOL>self.teta_zx     = <NUM_LIT:0.1>;<EOL>self.T           = <NUM_LIT>;<EOL>self.mu          = <NUM_LIT>;<EOL>self.Wz          = <NUM_LIT>;<EOL>self.Wt          = <NUM_LIT>;<EOL>self.fi          = <NUM_LIT>;<EOL>self.ro          = <NUM_LIT>;<EOL>self.I           = <NUM_LIT>;<EOL>self.ENABLE_POTENTIONAL = True<EOL>", "docstring": "!\n        @brief    Default constructor of parameters for LEGION (local excitatory global inhibitory oscillatory network).\n        @details  Constructor initializes parameters by default non-zero values that can be\n                  used for simple simulation.", "id": "f15682:c0:m0"}
{"signature": "def __len__(self):", "body": "if (self.__ccore_legion_pointer is not None):<EOL><INDENT>return wrapper.legion_get_size(self.__ccore_legion_pointer);<EOL><DEDENT>return self._num_osc<EOL>", "docstring": "!\n        @brief (uint) Returns size of LEGION.", "id": "f15682:c2:m2"}
{"signature": "@property<EOL><INDENT>def inhibitor(self):<DEDENT>", "body": "if (self.__ccore_legion_dynamic_pointer is not None):<EOL><INDENT>return wrapper.legion_dynamic_get_inhibitory_output(self.__ccore_legion_dynamic_pointer);<EOL><DEDENT>return self.__inhibitor<EOL>", "docstring": "!\n        @brief Returns output dynamic of the global inhibitor of the network.", "id": "f15682:c1:m1"}
{"signature": "def _legion_state(self, inputs, t, argv):", "body": "index = argv;<EOL>x = inputs[<NUM_LIT:0>];  <EOL>y = inputs[<NUM_LIT:1>];  <EOL>p = inputs[<NUM_LIT:2>];  <EOL>potential_influence = heaviside(p + math.exp(-self._params.alpha * t) - self._params.teta);<EOL>dx = <NUM_LIT> * x - x ** <NUM_LIT> + <NUM_LIT> - y + self._stimulus[index] * potential_influence + self._coupling_term[index] + self._noise[index];<EOL>dy = self._params.eps * (self._params.gamma * (<NUM_LIT:1.0> + math.tanh(x / self._params.betta)) - y);<EOL>neighbors = self.get_neighbors(index);<EOL>potential = <NUM_LIT:0.0>;<EOL>for index_neighbor in neighbors:<EOL><INDENT>potential += self._params.T * heaviside(self._excitatory[index_neighbor] - self._params.teta_x);<EOL><DEDENT>dp = self._params.lamda * (<NUM_LIT:1.0> - p) * heaviside(potential - self._params.teta_p) - self._params.mu * p;<EOL>return [dx, dy, dp]<EOL>", "docstring": "!\n        @brief Returns new values of excitatory and inhibitory parts of oscillator and potential of oscillator.\n\n        @param[in] inputs (list): Initial values (current) of oscillator [excitatory, inhibitory, potential].\n        @param[in] t (double): Current time of simulation.\n        @param[in] argv (uint): Extra arguments that are not used for integration - index of oscillator.\n\n        @return (list) New values of excitatoty and inhibitory part of oscillator and new value of potential (not assign).", "id": "f15682:c2:m9"}
{"signature": "def _calculate_states(self, solution, t, step, int_step):", "body": "next_excitatory = [<NUM_LIT:0.0>] * self._num_osc;<EOL>next_inhibitory = [<NUM_LIT:0.0>] * self._num_osc;<EOL>next_potential = [];<EOL>if (self._params.ENABLE_POTENTIONAL is True):<EOL><INDENT>next_potential = [<NUM_LIT:0.0>] * self._num_osc;<EOL><DEDENT>for index in range (<NUM_LIT:0>, self._num_osc, <NUM_LIT:1>):<EOL><INDENT>if (self._params.ENABLE_POTENTIONAL is True):<EOL><INDENT>result = odeint(self._legion_state, [self._excitatory[index], self._inhibitory[index], self._potential[index]], numpy.arange(t - step, t, int_step), (index , ));<EOL>[ next_excitatory[index], next_inhibitory[index], next_potential[index] ] = result[len(result) - <NUM_LIT:1>][<NUM_LIT:0>:<NUM_LIT:3>];<EOL><DEDENT>else:<EOL><INDENT>result = odeint(self._legion_state_simplify, [self._excitatory[index], self._inhibitory[index] ], numpy.arange(t - step, t, int_step), (index , ));<EOL>[ next_excitatory[index], next_inhibitory[index] ] = result[len(result) - <NUM_LIT:1>][<NUM_LIT:0>:<NUM_LIT:2>];<EOL><DEDENT>neighbors = self.get_neighbors(index);<EOL>coupling = <NUM_LIT:0><EOL>for index_neighbor in neighbors:<EOL><INDENT>coupling += self._dynamic_coupling[index][index_neighbor] * heaviside(self._excitatory[index_neighbor] - self._params.teta_x);<EOL><DEDENT>self._buffer_coupling_term[index] = coupling - self._params.Wz * heaviside(self._global_inhibitor - self._params.teta_xz);<EOL><DEDENT>result = odeint(self._global_inhibitor_state, self._global_inhibitor, numpy.arange(t - step, t, int_step), (None, ));<EOL>self._global_inhibitor = result[len(result) - <NUM_LIT:1>][<NUM_LIT:0>];<EOL>self._noise = [random.random() * self._params.ro for i in range(self._num_osc)];<EOL>self._coupling_term = self._buffer_coupling_term[:];<EOL>self._inhibitory = next_inhibitory[:];<EOL>self._excitatory = next_excitatory[:];<EOL>if (self._params.ENABLE_POTENTIONAL is True):<EOL><INDENT>self._potential = next_potential[:];<EOL><DEDENT>", "docstring": "!\n        @brief Calculates new state of each oscillator in the network.\n\n        @param[in] solution (solve_type): Type solver of the differential equation.\n        @param[in] t (double): Current time of simulation.\n        @param[in] step (double): Step of solution at the end of which states of oscillators should be calculated.\n        @param[in] int_step (double): Step differentiation that is used for solving differential equation.", "id": "f15682:c2:m6"}
{"signature": "def __analyse_colors(self, image_data, collect_dynamic):", "body": "network = syncnet(image_data, self.__color_radius, initial_phases = initial_type.RANDOM_GAUSSIAN, ccore = self.__ccore);<EOL>analyser = network.process(self.__order_color, solve_type.FAST, collect_dynamic);<EOL>return analyser<EOL>", "docstring": "!\n        @brief Performs color segmentation by the first layer.\n\n        @param[in] image_data (array_like): Image sample as a array-like structure.\n        @param[in] collect_dynamic (bool): If 'True' then whole dynamic of the first layer of the network is collected.\n\n        @return (syncnet_analyser) Analyser of color segmentation results of the first layer.", "id": "f15683:c2:m2"}
{"signature": "@staticmethod<EOL><INDENT>def show_first_layer_dynamic(analyser):<DEDENT>", "body": "sync_visualizer.show_output_dynamic(analyser.get_first_layer_analyser())<EOL>", "docstring": "!\n        @brief Shows output dynamic of the first layer.\n\n        @param[in] analyser (syncsegm_analyser): Analyser of output dynamic of the 'syncsegm' oscillatory network.", "id": "f15683:c0:m0"}
{"signature": "def __init__(self, color_analyser, object_segment_analysers = None):", "body": "self.__color_analyser = color_analyser;<EOL>self.__object_segment_analysers = object_segment_analysers<EOL>", "docstring": "!\n        @brief Constructor of the analyser.\n\n        @param[in] color_analyser (list): Analyser of coloring segmentation results of the first layer.\n        @param[in] object_segment_analysers (list): Analysers of objects on image segments - results of the second layer.", "id": "f15683:c1:m0"}
{"signature": "def get_first_layer_analyser(self):", "body": "return self.__color_analyser<EOL>", "docstring": "!\n        @brief Returns analyser of coloring segmentation of the first layer.", "id": "f15683:c1:m1"}
{"signature": "def extract_number_oscillations(osc_dyn, index = <NUM_LIT:0>, amplitude_threshold = <NUM_LIT:1.0>):", "body": "number_oscillations = <NUM_LIT:0>;<EOL>waiting_differential = False;<EOL>threshold_passed = False;<EOL>high_level_trigger = True if (osc_dyn[<NUM_LIT:0>][index] > amplitude_threshold) else False;<EOL>for values in osc_dyn:<EOL><INDENT>if ( (values[index] >= amplitude_threshold) and (high_level_trigger is False) ):<EOL><INDENT>high_level_trigger = True;<EOL>threshold_passed = True;<EOL><DEDENT>elif ( (values[index] < amplitude_threshold) and (high_level_trigger is True) ):<EOL><INDENT>high_level_trigger = False;<EOL>threshold_passed = True;<EOL><DEDENT>if (threshold_passed is True):<EOL><INDENT>threshold_passed = False;<EOL>if (waiting_differential is True and high_level_trigger is False):<EOL><INDENT>number_oscillations += <NUM_LIT:1>;<EOL>waiting_differential = False;<EOL><DEDENT>else:<EOL><INDENT>waiting_differential = True;<EOL><DEDENT><DEDENT><DEDENT>return number_oscillations<EOL>", "docstring": "!\n    @brief Extracts number of oscillations of specified oscillator.\n\n    @param[in] osc_dyn (list): Dynamic of oscillators.\n    @param[in] index (uint): Index of oscillator in dynamic.\n    @param[in] amplitude_threshold (double): Amplitude threshold when oscillation is taken into account, for example,\n                when oscillator amplitude is greater than threshold then oscillation is incremented.\n\n    @return (uint) Number of oscillations of specified oscillator.", "id": "f15694:m19"}
{"signature": "def set_ax_param(ax, x_title = None, y_title = None, x_lim = None, y_lim = None, x_labels = True, y_labels = True, grid = True):", "body": "from matplotlib.font_manager import FontProperties;<EOL>from matplotlib import rcParams;<EOL>if (_platform == \"<STR_LIT>\") or (_platform == \"<STR_LIT>\"):<EOL><INDENT>rcParams['<STR_LIT>'] = ['<STR_LIT>'];<EOL><DEDENT>else:<EOL><INDENT>rcParams['<STR_LIT>'] = ['<STR_LIT>'];<EOL><DEDENT>rcParams['<STR_LIT>'] = <NUM_LIT:12>;<EOL>surface_font = FontProperties();<EOL>if (_platform == \"<STR_LIT>\") or (_platform == \"<STR_LIT>\"):<EOL><INDENT>surface_font.set_name('<STR_LIT>');<EOL><DEDENT>else:<EOL><INDENT>surface_font.set_name('<STR_LIT>');<EOL><DEDENT>surface_font.set_size('<STR_LIT>');<EOL>if (y_title is not None): ax.set_ylabel(y_title, fontproperties = surface_font);<EOL>if (x_title is not None): ax.set_xlabel(x_title, fontproperties = surface_font);<EOL>if (x_lim is not None): ax.set_xlim(x_lim[<NUM_LIT:0>], x_lim[<NUM_LIT:1>]);<EOL>if (y_lim is not None): ax.set_ylim(y_lim[<NUM_LIT:0>], y_lim[<NUM_LIT:1>]);<EOL>if (x_labels is False): ax.xaxis.set_ticklabels([]);<EOL>if (y_labels is False): ax.yaxis.set_ticklabels([]);<EOL>ax.grid(grid)<EOL>", "docstring": "!\n    @brief Sets parameters for matplotlib ax.\n\n    @param[in] ax (Axes): Axes for which parameters should applied.\n    @param[in] x_title (string): Title for Y.\n    @param[in] y_title (string): Title for X.\n    @param[in] x_lim (double): X limit.\n    @param[in] y_lim (double): Y limit.\n    @param[in] x_labels (bool): If True - shows X labels.\n    @param[in] y_labels (bool): If True - shows Y labels.\n    @param[in] grid (bool): If True - shows grid.", "id": "f15694:m23"}
{"signature": "def draw_dynamics_set(dynamics, xtitle = None, ytitle = None, xlim = None, ylim = None, xlabels = False, ylabels = False):", "body": "<EOL>number_dynamics = len(dynamics);<EOL>if (number_dynamics == <NUM_LIT:1>):<EOL><INDENT>draw_dynamics(dynamics[<NUM_LIT:0>][<NUM_LIT:0>], dynamics[<NUM_LIT:0>][<NUM_LIT:1>], xtitle, ytitle, xlim, ylim, xlabels, ylabels);<EOL>return;<EOL><DEDENT>number_cols = int(numpy.ceil(number_dynamics ** <NUM_LIT:0.5>));<EOL>number_rows = int(numpy.ceil(number_dynamics / number_cols));<EOL>real_index = <NUM_LIT:0>, <NUM_LIT:0>;<EOL>double_indexer = True;<EOL>if ( (number_cols == <NUM_LIT:1>) or (number_rows == <NUM_LIT:1>) ):<EOL><INDENT>real_index = <NUM_LIT:0>;<EOL>double_indexer = False;<EOL><DEDENT>(_, axarr) = plt.subplots(number_rows, number_cols);<EOL>for dynamic in dynamics:<EOL><INDENT>axarr[real_index] = draw_dynamics(dynamic[<NUM_LIT:0>], dynamic[<NUM_LIT:1>], xtitle, ytitle, xlim, ylim, xlabels, ylabels, axes = axarr[real_index]);<EOL>if (double_indexer is True):<EOL><INDENT>real_index = real_index[<NUM_LIT:0>], real_index[<NUM_LIT:1>] + <NUM_LIT:1>;<EOL>if (real_index[<NUM_LIT:1>] >= number_cols):<EOL><INDENT>real_index = real_index[<NUM_LIT:0>] + <NUM_LIT:1>, <NUM_LIT:0>; <EOL><DEDENT><DEDENT>else:<EOL><INDENT>real_index += <NUM_LIT:1>;<EOL><DEDENT><DEDENT>plt.show()<EOL>", "docstring": "!\n    @brief Draw lists of dynamics of neurons (oscillators) in the network.\n\n    @param[in] dynamics (list): List of network outputs that are represented by values of output of oscillators (used by y axis).\n    @param[in] xtitle (string): Title for Y.\n    @param[in] ytitle (string): Title for X.\n    @param[in] xlim (double): X limit.\n    @param[in] ylim (double): Y limit.\n    @param[in] xlabels (bool): If True - shows X labels.\n    @param[in] ylabels (bool): If True - shows Y labels.", "id": "f15694:m24"}
{"signature": "def rgb2gray(image_rgb_array):", "body": "image_gray_array = [<NUM_LIT:0.0>] * len(image_rgb_array);<EOL>for index in range(<NUM_LIT:0>, len(image_rgb_array), <NUM_LIT:1>):<EOL><INDENT>image_gray_array[index] = float(image_rgb_array[index][<NUM_LIT:0>]) * <NUM_LIT> + float(image_rgb_array[index][<NUM_LIT:1>]) * <NUM_LIT> + float(image_rgb_array[index][<NUM_LIT:2>]) * <NUM_LIT>;<EOL><DEDENT>return image_gray_array<EOL>", "docstring": "!\n    @brief Returns image as 1-dimension (gray colored) matrix, where one element of list describes pixel.\n    @details Luma coding is used for transformation and that is calculated directly from gamma-compressed primary intensities as a weighted sum:\n\n    \\f[Y = 0.2989R + 0.587G + 0.114B\\f]\n\n    @param[in] image_rgb_array (list): Image represented by RGB list.\n\n    @return (list) Image as gray colored matrix, where one element of list describes pixel.\n\n    @code\n        colored_image = read_image(file_name);\n        gray_image = rgb2gray(colored_image);\n    @endcode\n\n    @see read_image()", "id": "f15694:m3"}
{"signature": "def heaviside(value):", "body": "if (value > <NUM_LIT:0.0>): <EOL><INDENT>return <NUM_LIT:1.0>;<EOL><DEDENT>return <NUM_LIT:0.0><EOL>", "docstring": "!\n    @brief Calculates Heaviside function that represents step function.\n    @details If input value is greater than 0 then returns 1, otherwise returns 0.\n\n    @param[in] value (double): Argument of Heaviside function.\n\n    @return (double) Value of Heaviside function.", "id": "f15694:m17"}
{"signature": "def list_math_addition(a, b):", "body": "return [a[i] + b[i] for i in range(len(a))]<EOL>", "docstring": "!\n    @brief Addition of two lists.\n    @details Each element from list 'a' is added to element from list 'b' accordingly.\n\n    @param[in] a (list): List of elements that supports mathematic addition..\n    @param[in] b (list): List of elements that supports mathematic addition..\n\n    @return (list) Results of addtion of two lists.", "id": "f15694:m31"}
{"signature": "def average_intra_cluster_distance(cluster1, cluster2, data=None):", "body": "distance = <NUM_LIT:0.0><EOL>for i in range(len(cluster1) + len(cluster2)):<EOL><INDENT>for j in range(len(cluster1) + len(cluster2)):<EOL><INDENT>if data is None:<EOL><INDENT>if i < len(cluster1):<EOL><INDENT>first_point = cluster1[i]<EOL><DEDENT>else:<EOL><INDENT>first_point = cluster2[i - len(cluster1)]<EOL><DEDENT>if j < len(cluster1):<EOL><INDENT>second_point = cluster1[j]<EOL><DEDENT>else:<EOL><INDENT>second_point = cluster2[j - len(cluster1)]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if i < len(cluster1):<EOL><INDENT>first_point = data[cluster1[i]]<EOL><DEDENT>else:<EOL><INDENT>first_point = data[cluster2[i - len(cluster1)]]<EOL><DEDENT>if j < len(cluster1):<EOL><INDENT>second_point = data[cluster1[j]]<EOL><DEDENT>else:<EOL><INDENT>second_point = data[cluster2[j - len(cluster1)]]<EOL><DEDENT><DEDENT>distance += euclidean_distance_square(first_point, second_point)<EOL><DEDENT><DEDENT>distance /= float((len(cluster1) + len(cluster2)) * (len(cluster1) + len(cluster2) - <NUM_LIT:1.0>))<EOL>return distance ** <NUM_LIT:0.5><EOL>", "docstring": "!\n    @brief Calculates average intra-cluster distance between two clusters.\n    @details Clusters can be represented by list of coordinates (in this case data shouldn't be specified),\n             or by list of indexes of points from the data (represented by list of points), in this case \n             data should be specified.\n\n    @param[in] cluster1 (list): The first cluster.\n    @param[in] cluster2 (list): The second cluster.\n    @param[in] data (list): If specified than elements of clusters will be used as indexes,\n               otherwise elements of cluster will be considered as points.\n\n    @return (double) Average intra-cluster distance between two clusters.", "id": "f15694:m12"}
{"signature": "def gray_pattern_borders(image):", "body": "width, height = image.size;<EOL>width_start = width;<EOL>width_end = <NUM_LIT:0>;<EOL>height_start = height;<EOL>height_end = <NUM_LIT:0>;<EOL>row, col = <NUM_LIT:0>, <NUM_LIT:0>;<EOL>for pixel in image.getdata():<EOL><INDENT>value = float(pixel[<NUM_LIT:0>]) * <NUM_LIT> + float(pixel[<NUM_LIT:1>]) * <NUM_LIT> + float(pixel[<NUM_LIT:2>]) * <NUM_LIT>;<EOL>if (value < <NUM_LIT>):<EOL><INDENT>if (width_end < col): <EOL><INDENT>width_end = col;<EOL><DEDENT>if (height_end < row):<EOL><INDENT>height_end = row;<EOL><DEDENT>if (width_start > col):<EOL><INDENT>width_start = col;<EOL><DEDENT>if (height_start > row):<EOL><INDENT>height_start = row;<EOL><DEDENT><DEDENT>col += <NUM_LIT:1>;<EOL>if (col >= width):<EOL><INDENT>col = <NUM_LIT:0>;<EOL>row += <NUM_LIT:1>;<EOL><DEDENT><DEDENT>return (width_start, height_start, width_end + <NUM_LIT:1>, height_end + <NUM_LIT:1>)<EOL>", "docstring": "!\n    @brief Returns coordinates of gray image content on the input image.\n\n    @param[in] image (Image): PIL Image instance that is processed.\n\n    @return (tuple) Returns coordinates of gray image content as (width_start, height_start, width_end, height_end).", "id": "f15694:m5"}
{"signature": "def read_image(filename):", "body": "with Image.open(filename) as image_source:<EOL><INDENT>data = [list(pixel) for pixel in image_source.getdata()]<EOL>return data<EOL><DEDENT>", "docstring": "!\n    @brief Returns image as N-dimension (depends on the input image) matrix, where one element of list describes pixel.\n\n    @param[in] filename (string): Path to image.\n\n    @return (list) Pixels where each pixel described by list of RGB-values.", "id": "f15694:m2"}
{"signature": "def draw_image_mask_segments(source, clusters, hide_axes = True):", "body": "if (len(clusters) == <NUM_LIT:0>):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return;<EOL><DEDENT>image_source = Image.open(source);<EOL>image_size = image_source.size;<EOL>number_clusters = len(clusters) + <NUM_LIT:1>; <EOL>number_cols = int(numpy.ceil(number_clusters ** <NUM_LIT:0.5>));<EOL>number_rows = int(numpy.ceil(number_clusters / number_cols));<EOL>real_index = <NUM_LIT:0>, <NUM_LIT:0>;<EOL>double_indexer = True;<EOL>if ( (number_cols == <NUM_LIT:1>) or (number_rows == <NUM_LIT:1>) ):<EOL><INDENT>real_index = <NUM_LIT:0>;<EOL>double_indexer = False;<EOL><DEDENT>(fig, axarr) = plt.subplots(number_rows, number_cols);<EOL>plt.setp([ax for ax in axarr], visible = False);<EOL>axarr[real_index].imshow(image_source, interpolation = '<STR_LIT:none>');<EOL>plt.setp(axarr[real_index], visible = True);<EOL>if (hide_axes is True):<EOL><INDENT>axarr[real_index].xaxis.set_ticklabels([]);<EOL>axarr[real_index].yaxis.set_ticklabels([]);<EOL>axarr[real_index].xaxis.set_ticks_position('<STR_LIT:none>');<EOL>axarr[real_index].yaxis.set_ticks_position('<STR_LIT:none>');<EOL><DEDENT>if (double_indexer is True):<EOL><INDENT>real_index = <NUM_LIT:0>, <NUM_LIT:1>;<EOL><DEDENT>else:<EOL><INDENT>real_index += <NUM_LIT:1>;<EOL><DEDENT>for cluster in clusters:<EOL><INDENT>stage_cluster = [(<NUM_LIT:255>, <NUM_LIT:255>, <NUM_LIT:255>)] * (image_size[<NUM_LIT:0>] * image_size[<NUM_LIT:1>]);<EOL>for index in cluster:<EOL><INDENT>stage_cluster[index] = (<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>);<EOL><DEDENT>stage = array(stage_cluster, numpy.uint8);<EOL>stage = numpy.reshape(stage, (image_size[<NUM_LIT:1>], image_size[<NUM_LIT:0>]) + ((<NUM_LIT:3>),)); <EOL>image_cluster = Image.fromarray(stage, '<STR_LIT>');<EOL>axarr[real_index].imshow(image_cluster, interpolation = '<STR_LIT:none>');<EOL>plt.setp(axarr[real_index], visible = True);<EOL>if (hide_axes is True):<EOL><INDENT>axarr[real_index].xaxis.set_ticklabels([]);<EOL>axarr[real_index].yaxis.set_ticklabels([]);<EOL>axarr[real_index].xaxis.set_ticks_position('<STR_LIT:none>');<EOL>axarr[real_index].yaxis.set_ticks_position('<STR_LIT:none>');<EOL><DEDENT>if (double_indexer is True):<EOL><INDENT>real_index = real_index[<NUM_LIT:0>], real_index[<NUM_LIT:1>] + <NUM_LIT:1>;<EOL>if (real_index[<NUM_LIT:1>] >= number_cols):<EOL><INDENT>real_index = real_index[<NUM_LIT:0>] + <NUM_LIT:1>, <NUM_LIT:0>; <EOL><DEDENT><DEDENT>else:<EOL><INDENT>real_index += <NUM_LIT:1>;<EOL><DEDENT><DEDENT>plt.show()<EOL>", "docstring": "!\n    @brief Shows image segments using black masks.\n    @details Each black mask of allocated segment is presented on separate plot.\n             The first image is initial and others are black masks of segments.\n\n    @param[in] source (string): Path to image.\n    @param[in] clusters (list): List of clusters (allocated segments of image) where each cluster\n                                consists of indexes of pixel from source image.\n    @param[in] hide_axes (bool): If True then axes will not be displayed.", "id": "f15694:m26"}
{"signature": "def read_sample(filename):", "body": "file = open(filename, '<STR_LIT:r>')<EOL>sample = [[float(val) for val in line.split()] for line in file if len(line.strip()) > <NUM_LIT:0>]<EOL>file.close()<EOL>return sample<EOL>", "docstring": "!\n    @brief Returns data sample from simple text file.\n    @details This function should be used for text file with following format:\n    @code\n    point_1_coord_1 point_1_coord_2 ... point_1_coord_n\n    point_2_coord_1 point_2_coord_2 ... point_2_coord_n\n    ... ...\n    @endcode\n\n    @param[in] filename (string): Path to file with data.\n\n    @return (list) Points where each point represented by list of coordinates.", "id": "f15694:m0"}
{"signature": "def manhattan_distance(a, b):", "body": "if ( ((type(a) == float) and (type(b) == float)) or ((type(a) == int) and (type(b) == int)) ):<EOL><INDENT>return abs(a - b);<EOL><DEDENT>distance = <NUM_LIT:0.0>;<EOL>dimension = len(a);<EOL>for i in range(<NUM_LIT:0>, dimension):<EOL><INDENT>distance += abs(a[i] - b[i]);<EOL><DEDENT>return distance<EOL>", "docstring": "!\n    @brief Calculate Manhattan distance between vector a and b.\n\n    @param[in] a (list): The first cluster.\n    @param[in] b (list): The second cluster.\n\n    @return (double) Manhattan distance between two vectors.", "id": "f15694:m10"}
{"signature": "def average_inter_cluster_distance(cluster1, cluster2, data = None):", "body": "distance = <NUM_LIT:0.0>;<EOL>if (data is None):<EOL><INDENT>for i in range(len(cluster1)):<EOL><INDENT>for j in range(len(cluster2)):<EOL><INDENT>distance += euclidean_distance_square(cluster1[i], cluster2[j]);<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for i in range(len(cluster1)):<EOL><INDENT>for j in range(len(cluster2)):<EOL><INDENT>distance += euclidean_distance_square(data[ cluster1[i]], data[ cluster2[j]]);<EOL><DEDENT><DEDENT><DEDENT>distance /= float(len(cluster1) * len(cluster2));<EOL>return distance ** <NUM_LIT:0.5><EOL>", "docstring": "!\n    @brief Calculates average inter-cluster distance between two clusters.\n    @details Clusters can be represented by list of coordinates (in this case data shouldn't be specified),\n             or by list of indexes of points from the data (represented by list of points), in this case \n             data should be specified.\n\n    @param[in] cluster1 (list): The first cluster where each element can represent index from the data or object itself.\n    @param[in] cluster2 (list): The second cluster where each element can represent index from the data or object itself.\n    @param[in] data (list): If specified than elements of clusters will be used as indexes,\n               otherwise elements of cluster will be considered as points.\n\n    @return (double) Average inter-cluster distance between two clusters.", "id": "f15694:m11"}
{"signature": "def stretch_pattern(image_source):", "body": "wsize, hsize = image_source.size;<EOL>(ws, hs, we, he) = gray_pattern_borders(image_source);<EOL>image_source = image_source.crop((ws, hs, we, he));<EOL>image_source = image_source.resize((wsize, hsize), Image.ANTIALIAS);<EOL>data = [pixel for pixel in image_source.getdata()];<EOL>image_pattern = rgb2gray(data);<EOL>return (image_pattern, image_source)<EOL>", "docstring": "!\n    @brief Returns stretched content as 1-dimension (gray colored) matrix with size of input image.\n\n    @param[in] image_source (Image): PIL Image instance.\n\n    @return (list, Image) Stretched image as gray colored matrix and source image.", "id": "f15694:m4"}
{"signature": "def list_math_multiplication_number(a, b):", "body": "return [a[i] * b for i in range(len(a))]<EOL>", "docstring": "!\n    @brief Multiplication between list and number.\n    @details Each element from list 'a' is multiplied by number 'b'.\n\n    @param[in] a (list): List of elements that supports mathematic division.\n    @param[in] b (double): Number that supports mathematic division.\n\n    @return (list) Result of division between list and number.", "id": "f15694:m35"}
{"signature": "def medoid(data, indexes=None, **kwargs):", "body": "index_median = None<EOL>distance = float('<STR_LIT>')<EOL>metric = kwargs.get('<STR_LIT>', type_metric.EUCLIDEAN_SQUARE)<EOL>data_type = kwargs.get('<STR_LIT>', '<STR_LIT>')<EOL>if data_type == '<STR_LIT>':<EOL><INDENT>calculator = lambda index1, index2: metric(data[index1], data[index2])<EOL><DEDENT>elif data_type == '<STR_LIT>':<EOL><INDENT>if isinstance(data, numpy.matrix):<EOL><INDENT>calculator = lambda index1, index2: data.item(index1, index2)<EOL><DEDENT>else:<EOL><INDENT>calculator = lambda index1, index2: data[index1][index2]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % data_type)<EOL><DEDENT>if indexes is None:<EOL><INDENT>range_points = range(len(data))<EOL><DEDENT>else:<EOL><INDENT>range_points = indexes<EOL><DEDENT>for index_candidate in range_points:<EOL><INDENT>distance_candidate = <NUM_LIT:0.0><EOL>for index in range_points:<EOL><INDENT>distance_candidate += calculator(index_candidate, index)<EOL><DEDENT>if distance_candidate < distance:<EOL><INDENT>distance = distance_candidate<EOL>index_median = index_candidate<EOL><DEDENT><DEDENT>return index_median<EOL>", "docstring": "!\n    @brief Calculate medoid for input points using Euclidean distance.\n\n    @param[in] data (list): Set of points for that median should be calculated.\n    @param[in] indexes (list): Indexes of input set of points that will be taken into account during median calculation.\n    @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'metric', 'data_type').\n\n    <b>Keyword Args:</b><br>\n        - metric (distance_metric): Metric that is used for distance calculation between two points.\n        - data_type (string): Data type of input sample 'data' (available values: 'points', 'distance_matrix').\n\n    @return (uint) index of point in input set that corresponds to median.", "id": "f15694:m7"}
{"signature": "def average_neighbor_distance(points, num_neigh):", "body": "if num_neigh > len(points) - <NUM_LIT:1>:<EOL><INDENT>raise NameError('<STR_LIT>');<EOL><DEDENT>dist_matrix = [ [ <NUM_LIT:0.0> for i in range(len(points)) ] for j in range(len(points)) ];<EOL>for i in range(<NUM_LIT:0>, len(points), <NUM_LIT:1>):<EOL><INDENT>for j in range(i + <NUM_LIT:1>, len(points), <NUM_LIT:1>):<EOL><INDENT>distance = euclidean_distance(points[i], points[j]);<EOL>dist_matrix[i][j] = distance;<EOL>dist_matrix[j][i] = distance;<EOL><DEDENT>dist_matrix[i] = sorted(dist_matrix[i]);<EOL><DEDENT>total_distance = <NUM_LIT:0>;<EOL>for i in range(<NUM_LIT:0>, len(points), <NUM_LIT:1>):<EOL><INDENT>for j in range(<NUM_LIT:0>, num_neigh, <NUM_LIT:1>):<EOL><INDENT>total_distance += dist_matrix[i][j + <NUM_LIT:1>];<EOL><DEDENT><DEDENT>return ( total_distance / (num_neigh * len(points)) )<EOL>", "docstring": "!\n    @brief Returns average distance for establish links between specified number of nearest neighbors.\n\n    @param[in] points (list): Input data, list of points where each point represented by list.\n    @param[in] num_neigh (uint): Number of neighbors that should be used for distance calculation.\n\n    @return (double) Average distance for establish links between 'num_neigh' in data set 'points'.", "id": "f15694:m6"}
{"signature": "def calculate_ellipse_description(covariance, scale = <NUM_LIT>):", "body": "eigh_values, eigh_vectors = numpy.linalg.eigh(covariance)<EOL>order = eigh_values.argsort()[::-<NUM_LIT:1>]<EOL>values, vectors = eigh_values[order], eigh_vectors[order]<EOL>angle = numpy.degrees(numpy.arctan2(*vectors[:,<NUM_LIT:0>][::-<NUM_LIT:1>]))<EOL>if <NUM_LIT:0.0> in values:<EOL><INDENT>return <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0><EOL><DEDENT>width, height = <NUM_LIT> * scale * numpy.sqrt(values)<EOL>return angle, width, height<EOL>", "docstring": "!\n    @brief Calculates description of ellipse using covariance matrix.\n\n    @param[in] covariance (numpy.array): Covariance matrix for which ellipse area should be calculated.\n    @param[in] scale (float): Scale of the ellipse.\n\n    @return (float, float, float) Return ellipse description: angle, width, height.", "id": "f15694:m14"}
{"signature": "def list_math_addition_number(a, b):", "body": "return [a[i] + b for i in range(len(a))]<EOL>", "docstring": "!\n    @brief Addition between list and number.\n    @details Each element from list 'a' is added to number 'b'.\n\n    @param[in] a (list): List of elements that supports mathematic addition.\n    @param[in] b (double): Value that supports mathematic addition.\n\n    @return (list) Result of addtion of two lists.", "id": "f15694:m32"}
{"signature": "def manhattan_distance_numpy(object1, object2):", "body": "return numpy.sum(numpy.absolute(object1 - object2), axis=<NUM_LIT:1>).T<EOL>", "docstring": "!\n    @brief Calculate Manhattan distance between two objects using numpy.\n\n    @param[in] object1 (array_like): The first array_like object.\n    @param[in] object2 (array_like): The second array_like object.\n\n    @return (double) Manhattan distance between two objects.", "id": "f15695:m5"}
{"signature": "def minkowski_distance_numpy(object1, object2, degree=<NUM_LIT:2>):", "body": "return numpy.sum(numpy.power(numpy.power(object1 - object2, degree), <NUM_LIT:1>/degree), axis=<NUM_LIT:1>).T<EOL>", "docstring": "!\n    @brief Calculate Minkowski distance between objects using numpy.\n\n    @param[in] object1 (array_like): The first array_like object.\n    @param[in] object2 (array_like): The second array_like object.\n    @param[in] degree (numeric): Degree of that is used for Minkowski distance.\n\n    @return (double) Minkowski distance between two object.", "id": "f15695:m9"}
{"signature": "def __create_distance_calculator_numpy(self):", "body": "if self.__type == type_metric.EUCLIDEAN:<EOL><INDENT>return euclidean_distance_numpy<EOL><DEDENT>elif self.__type == type_metric.EUCLIDEAN_SQUARE:<EOL><INDENT>return euclidean_distance_square_numpy<EOL><DEDENT>elif self.__type == type_metric.MANHATTAN:<EOL><INDENT>return manhattan_distance_numpy<EOL><DEDENT>elif self.__type == type_metric.CHEBYSHEV:<EOL><INDENT>return chebyshev_distance_numpy<EOL><DEDENT>elif self.__type == type_metric.MINKOWSKI:<EOL><INDENT>return lambda object1, object2: minkowski_distance_numpy(object1, object2, self.__args.get('<STR_LIT>', <NUM_LIT:2>))<EOL><DEDENT>elif self.__type == type_metric.CANBERRA:<EOL><INDENT>return canberra_distance_numpy<EOL><DEDENT>elif self.__type == type_metric.CHI_SQUARE:<EOL><INDENT>return chi_square_distance_numpy<EOL><DEDENT>elif self.__type == type_metric.USER_DEFINED:<EOL><INDENT>return self.__func<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\", self.__type)<EOL><DEDENT>", "docstring": "!\n        @brief Creates distance metric calculator that uses numpy.\n\n        @return (callable) Callable object of distance metric calculator.", "id": "f15695:c1:m9"}
{"signature": "def manhattan_distance(point1, point2):", "body": "distance = <NUM_LIT:0.0><EOL>dimension = len(point1)<EOL>for i in range(dimension):<EOL><INDENT>distance += abs(point1[i] - point2[i])<EOL><DEDENT>return distance<EOL>", "docstring": "!\n    @brief Calculate Manhattan distance between between two vectors.\n\n    \\f[\n    dist(a, b) = \\sum_{i=0}^{N}\\left | a_{i} - b_{i} \\right |;\n    \\f]\n\n    @param[in] point1 (array_like): The first vector.\n    @param[in] point2 (array_like): The second vector.\n\n    @return (double) Manhattan distance between two vectors.\n\n    @see euclidean_distance_square, euclidean_distance, chebyshev_distance", "id": "f15695:m4"}
{"signature": "def get_function(self):", "body": "return self.__func<EOL>", "docstring": "!\n        @brief Return user-defined function for calculation distance metric.\n\n        @return (callable): User-defined distance metric function.", "id": "f15695:c1:m4"}
{"signature": "def canberra_distance_numpy(object1, object2):", "body": "with numpy.errstate(divide='<STR_LIT:ignore>', invalid='<STR_LIT:ignore>'):<EOL><INDENT>result = numpy.divide(numpy.abs(object1 - object2), numpy.abs(object1) + numpy.abs(object2))<EOL><DEDENT>if len(result.shape) > <NUM_LIT:1>:<EOL><INDENT>return numpy.sum(numpy.nan_to_num(result), axis=<NUM_LIT:1>).T<EOL><DEDENT>else:<EOL><INDENT>return numpy.sum(numpy.nan_to_num(result))<EOL><DEDENT>", "docstring": "!\n    @brief Calculate Canberra distance between two objects using numpy.\n\n    @param[in] object1 (array_like): The first vector.\n    @param[in] object2 (array_like): The second vector.\n\n    @return (float) Canberra distance between two objects.", "id": "f15695:m11"}
{"signature": "def chi_square_distance_numpy(object1, object2):", "body": "with numpy.errstate(divide='<STR_LIT:ignore>', invalid='<STR_LIT:ignore>'):<EOL><INDENT>result = numpy.divide(numpy.power(object1 - object2, <NUM_LIT:2>), numpy.abs(object1) + numpy.abs(object2))<EOL><DEDENT>if len(result.shape) > <NUM_LIT:1>:<EOL><INDENT>return numpy.sum(numpy.nan_to_num(result), axis=<NUM_LIT:1>).T<EOL><DEDENT>else:<EOL><INDENT>return numpy.sum(numpy.nan_to_num(result))<EOL><DEDENT>", "docstring": "!\n    @brief Calculate Chi square distance between two vectors using numpy.\n\n    @param[in] object1 (array_like): The first vector.\n    @param[in] object2 (array_like): The second vector.\n\n    @return (float) Chi square distance between two objects.", "id": "f15695:m13"}
{"signature": "def euclidean_distance_square(point1, point2):", "body": "distance = <NUM_LIT:0.0><EOL>for i in range(len(point1)):<EOL><INDENT>distance += (point1[i] - point2[i]) ** <NUM_LIT><EOL><DEDENT>return distance<EOL>", "docstring": "!\n    @brief Calculate square Euclidean distance between two vectors.\n\n    \\f[\n    dist(a, b) = \\sum_{i=0}^{N}(a_{i} - b_{i})^{2};\n    \\f]\n\n    @param[in] point1 (array_like): The first vector.\n    @param[in] point2 (array_like): The second vector.\n\n    @return (double) Square Euclidean distance between two vectors.\n\n    @see euclidean_distance, manhattan_distance, chebyshev_distance", "id": "f15695:m2"}
{"signature": "def chebyshev_distance_numpy(object1, object2):", "body": "return numpy.max(numpy.absolute(object1 - object2), axis=<NUM_LIT:1>).T<EOL>", "docstring": "!\n    @brief Calculate Chebyshev distance between two objects using numpy.\n\n    @param[in] object1 (array_like): The first array_like object.\n    @param[in] object2 (array_like): The second array_like object.\n\n    @return (double) Chebyshev distance between two objects.", "id": "f15695:m7"}
{"signature": "def __create_distance_calculator(self):", "body": "if self.__numpy is True:<EOL><INDENT>return self.__create_distance_calculator_numpy()<EOL><DEDENT>return self.__create_distance_calculator_basic()<EOL>", "docstring": "!\n        @brief Creates distance metric calculator.\n\n        @return (callable) Callable object of distance metric calculator.", "id": "f15695:c1:m7"}
{"signature": "def __init__(self, metric_type, **kwargs):", "body": "self.__type = metric_type<EOL>self.__args = kwargs<EOL>self.__func = self.__args.get('<STR_LIT>', None)<EOL>self.__numpy = self.__args.get('<STR_LIT>', False)<EOL>self.__calculator = self.__create_distance_calculator()<EOL>", "docstring": "!\n        @brief Creates distance metric instance for calculation distance between two points.\n\n        @param[in] metric_type (type_metric):\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'numpy_usage' 'func' and corresponding additional argument for\n                    for specific metric types).\n\n        <b>Keyword Args:</b><br>\n            - func (callable): Callable object with two arguments (point #1 and point #2) or (object #1 and object #2) in case of numpy usage.\n                                This argument is used only if metric is 'type_metric.USER_DEFINED'.\n            - degree (numeric): Only for 'type_metric.MINKOWSKI' - degree of Minkowski equation.\n            - numpy_usage (bool): If True then numpy is used for calculation (by default is False).", "id": "f15695:c1:m0"}
{"signature": "def euclidean_distance(point1, point2):", "body": "distance = euclidean_distance_square(point1, point2)<EOL>return distance ** <NUM_LIT:0.5><EOL>", "docstring": "!\n    @brief Calculate Euclidean distance between two vectors.\n    @details The Euclidean between vectors (points) a and b is calculated by following formula:\n\n    \\f[\n    dist(a, b) = \\sqrt{ \\sum_{i=0}^{N}(a_{i} - b_{i})^{2} };\n    \\f]\n\n    Where N is a length of each vector.\n\n    @param[in] point1 (array_like): The first vector.\n    @param[in] point2 (array_like): The second vector.\n\n    @return (double) Euclidean distance between two vectors.\n\n    @see euclidean_distance_square, manhattan_distance, chebyshev_distance", "id": "f15695:m0"}
{"signature": "def enable_numpy_usage(self):", "body": "self.__numpy = True<EOL>if self.__type != type_metric.USER_DEFINED:<EOL><INDENT>self.__calculator = self.__create_distance_calculator()<EOL><DEDENT>", "docstring": "!\n        @brief Start numpy for distance calculation.\n        @details Useful in case matrices to increase performance. No effect in case of type_metric.USER_DEFINED type.", "id": "f15695:c1:m5"}
{"signature": "def minkowski_distance(point1, point2, degree=<NUM_LIT:2>):", "body": "distance = <NUM_LIT:0.0><EOL>for i in range(len(point1)):<EOL><INDENT>distance += (point1[i] - point2[i]) ** degree<EOL><DEDENT>return distance ** (<NUM_LIT:1.0> / degree)<EOL>", "docstring": "!\n    @brief Calculate Minkowski distance between two vectors.\n\n    \\f[\n    dist(a, b) = \\sqrt[p]{ \\sum_{i=0}^{N}\\left(a_{i} - b_{i}\\right)^{p} };\n    \\f]\n\n    @param[in] point1 (array_like): The first vector.\n    @param[in] point2 (array_like): The second vector.\n    @param[in] degree (numeric): Degree of that is used for Minkowski distance.\n\n    @return (double) Minkowski distance between two vectors.\n\n    @see euclidean_distance", "id": "f15695:m8"}
{"signature": "def chi_square_distance(point1, point2):", "body": "distance = <NUM_LIT:0.0><EOL>for i in range(len(point1)):<EOL><INDENT>divider = abs(point1[i]) + abs(point2[i])<EOL>if divider == <NUM_LIT:0.0>:<EOL><INDENT>continue<EOL><DEDENT>distance += ((point1[i] - point2[i]) ** <NUM_LIT>) / divider<EOL><DEDENT>return distance<EOL>", "docstring": "!\n    @brief Calculate Chi square distance between two vectors.\n\n    \\f[\n    dist(a, b) = \\sum_{i=0}^{N}\\frac{\\left ( a_{i} - b_{i} \\right )^{2}}{\\left | a_{i} \\right | + \\left | b_{i} \\right |};\n    \\f]\n\n    @param[in] point1 (array_like): The first vector.\n    @param[in] point2 (array_like): The second vector.\n\n    @return (float) Chi square distance between two objects.", "id": "f15695:m12"}
{"signature": "def draw_graph(graph_instance, map_coloring = None):", "body": "if (graph_instance.space_description is None):<EOL><INDENT>raise NameError(\"<STR_LIT>\");<EOL><DEDENT>if (map_coloring is not None):<EOL><INDENT>if (len(graph_instance) != len(map_coloring)):<EOL><INDENT>raise NameError(\"<STR_LIT>\");<EOL><DEDENT><DEDENT>fig = plt.figure();<EOL>axes = fig.add_subplot(<NUM_LIT>);<EOL>available_colors = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'];<EOL>if (map_coloring is not None):<EOL><INDENT>if (len(map_coloring) > len(available_colors)):<EOL><INDENT>raise NameError('<STR_LIT>');<EOL><DEDENT><DEDENT>x_maximum = -float('<STR_LIT>');<EOL>x_minimum = float('<STR_LIT>');<EOL>y_maximum = -float('<STR_LIT>');<EOL>y_minimum = float('<STR_LIT>');<EOL>for i in range(<NUM_LIT:0>, len(graph_instance.space_description), <NUM_LIT:1>):<EOL><INDENT>if (graph_instance.type_graph_descr == type_graph_descr.GRAPH_MATRIX_DESCR):<EOL><INDENT>for j in range(i, len(graph_instance.space_description), <NUM_LIT:1>):    <EOL><INDENT>if (graph_instance.data[i][j] == <NUM_LIT:1>):<EOL><INDENT>axes.plot([graph_instance.space_description[i][<NUM_LIT:0>], graph_instance.space_description[j][<NUM_LIT:0>]], [graph_instance.space_description[i][<NUM_LIT:1>], graph_instance.space_description[j][<NUM_LIT:1>]], '<STR_LIT>', linewidth = <NUM_LIT>);<EOL><DEDENT><DEDENT><DEDENT>elif (graph_instance.type_graph_descr == type_graph_descr.GRAPH_VECTOR_DESCR):<EOL><INDENT>for j in graph_instance.data[i]:<EOL><INDENT>if (i > j):     <EOL><INDENT>axes.plot([graph_instance.space_description[i][<NUM_LIT:0>], graph_instance.space_description[j][<NUM_LIT:0>]], [graph_instance.space_description[i][<NUM_LIT:1>], graph_instance.space_description[j][<NUM_LIT:1>]], '<STR_LIT>', linewidth = <NUM_LIT>);   <EOL><DEDENT><DEDENT><DEDENT>color_node = '<STR_LIT:b>';<EOL>if (map_coloring is not None):<EOL><INDENT>color_node = colors.hex2color(available_colors[map_coloring[i]]);<EOL><DEDENT>axes.plot(graph_instance.space_description[i][<NUM_LIT:0>], graph_instance.space_description[i][<NUM_LIT:1>], color = color_node, marker = '<STR_LIT:o>', markersize = <NUM_LIT:20>);<EOL>if (x_maximum < graph_instance.space_description[i][<NUM_LIT:0>]): x_maximum = graph_instance.space_description[i][<NUM_LIT:0>];<EOL>if (x_minimum > graph_instance.space_description[i][<NUM_LIT:0>]): x_minimum = graph_instance.space_description[i][<NUM_LIT:0>];  <EOL>if (y_maximum < graph_instance.space_description[i][<NUM_LIT:1>]): y_maximum = graph_instance.space_description[i][<NUM_LIT:1>]; <EOL>if (y_minimum > graph_instance.space_description[i][<NUM_LIT:1>]): y_minimum = graph_instance.space_description[i][<NUM_LIT:1>];<EOL><DEDENT>plt.xlim(x_minimum - <NUM_LIT:0.5>, x_maximum + <NUM_LIT:0.5>);<EOL>plt.ylim(y_minimum - <NUM_LIT:0.5>, y_maximum + <NUM_LIT:0.5>);<EOL>plt.show()<EOL>", "docstring": "!\n    @brief Draw graph.\n\n    @param[in] graph_instance (graph): Graph that should be drawn.\n    @param[in] map_coloring (list): List of color indexes for each vertex. Size of this list should be equal to size of graph (number of vertices).\n                                    If it's not specified (None) than graph without coloring will be dwarn.\n\n    @warning Graph can be represented if there is space representation for it.", "id": "f15696:m1"}
{"signature": "def read_graph(filename):", "body": "file = open(filename, '<STR_LIT:r>');<EOL>comments = \"<STR_LIT>\";<EOL>space_descr = [];<EOL>data = [];<EOL>data_type = None;<EOL>map_data_repr = dict();   <EOL>for line in file:<EOL><INDENT>if (line[<NUM_LIT:0>] == '<STR_LIT:c>' or line[<NUM_LIT:0>] == '<STR_LIT:p>'): <EOL><INDENT>comments += line[<NUM_LIT:1>:]; <EOL><DEDENT>elif (line[<NUM_LIT:0>] == '<STR_LIT:r>'): <EOL><INDENT>node_coordinates = [float(val) for val in line[<NUM_LIT:1>:].split()];<EOL>if (len(node_coordinates) != <NUM_LIT:2>):<EOL><INDENT>raise NameError('<STR_LIT>');<EOL><DEDENT>space_descr.append( [float(val) for val in line[<NUM_LIT:1>:].split()] );<EOL><DEDENT>elif (line[<NUM_LIT:0>] == '<STR_LIT:m>'):<EOL><INDENT>if ( (data_type is not None) and (data_type != '<STR_LIT:m>') ):<EOL><INDENT>raise NameError('<STR_LIT>');<EOL><DEDENT>data_type = '<STR_LIT:m>';<EOL>data.append( [float(val) for val in line[<NUM_LIT:1>:].split()] );<EOL><DEDENT>elif (line[<NUM_LIT:0>] == '<STR_LIT:v>'):<EOL><INDENT>if ( (data_type is not None) and (data_type != '<STR_LIT:v>') ):<EOL><INDENT>raise NameError('<STR_LIT>');<EOL><DEDENT>data_type = '<STR_LIT:v>';<EOL>data.append( [float(val) for val in line[<NUM_LIT:1>:].split()] );<EOL><DEDENT>elif (line[<NUM_LIT:0>] == '<STR_LIT:e>'):<EOL><INDENT>if ( (data_type is not None) and (data_type != '<STR_LIT:e>') ):<EOL><INDENT>raise NameError('<STR_LIT>');<EOL><DEDENT>data_type = '<STR_LIT:e>';<EOL>vertices = [int(val) for val in line[<NUM_LIT:1>:].split()];<EOL>if (vertices[<NUM_LIT:0>] not in map_data_repr):<EOL><INDENT>map_data_repr[ vertices[<NUM_LIT:0>] ] = [ vertices[<NUM_LIT:1>] ];<EOL><DEDENT>else:<EOL><INDENT>map_data_repr[ vertices[<NUM_LIT:0>] ].append(vertices[<NUM_LIT:1>])<EOL><DEDENT>if (vertices[<NUM_LIT:1>] not in map_data_repr):<EOL><INDENT>map_data_repr[ vertices[<NUM_LIT:1>] ] = [ vertices[<NUM_LIT:0>] ];<EOL><DEDENT>else:<EOL><INDENT>map_data_repr[ vertices[<NUM_LIT:1>] ].append(vertices[<NUM_LIT:0>]);<EOL><DEDENT><DEDENT>elif (len(line.strip()) == <NUM_LIT:0>): continue;<EOL>else: <EOL><INDENT>print(line);<EOL>raise NameError('<STR_LIT>');<EOL><DEDENT><DEDENT>if (data_type == '<STR_LIT:e>'):<EOL><INDENT>for index in range(len(map_data_repr)):<EOL><INDENT>data.append([<NUM_LIT:0>] * len(map_data_repr));<EOL>for index_neighbour in map_data_repr[index + <NUM_LIT:1>]:<EOL><INDENT>data[index][index_neighbour - <NUM_LIT:1>] = <NUM_LIT:1>;<EOL><DEDENT><DEDENT><DEDENT>file.close();<EOL>graph_descr = None;<EOL>if (data_type == '<STR_LIT:m>'): graph_descr = type_graph_descr.GRAPH_MATRIX_DESCR;<EOL>elif (data_type == '<STR_LIT:v>'): graph_descr = type_graph_descr.GRAPH_VECTOR_DESCR;<EOL>elif (data_type == '<STR_LIT:e>'): graph_descr = type_graph_descr.GRAPH_MATRIX_DESCR;<EOL>else:<EOL><INDENT>raise NameError('<STR_LIT>');<EOL><DEDENT>if (space_descr != []):<EOL><INDENT>if (len(data) != len(space_descr)):<EOL><INDENT>raise NameError(\"<STR_LIT>\");<EOL><DEDENT><DEDENT>return graph(data, graph_descr, space_descr, comments)<EOL>", "docstring": "!\n    @brief Read graph from file in GRPR format.\n\n    @param[in] filename (string): Path to file with graph in GRPR format.\n\n    @return (graph) Graph that is read from file.", "id": "f15696:m0"}
{"signature": "def __len__(self):", "body": "return len(self.__data)<EOL>", "docstring": "!\n        @return (uint) Size of graph defined by number of vertices.", "id": "f15696:c1:m1"}
{"signature": "@property<EOL><INDENT>def space_description(self):<DEDENT>", "body": "if (self.__space_descr == [] or self.__space_descr is None):<EOL><INDENT>return None;<EOL><DEDENT>return self.__space_descr<EOL>", "docstring": "!\n        @return (list) Space description.", "id": "f15696:c1:m3"}
{"signature": "@staticmethod<EOL><INDENT>def get_color(sequential_index):<DEDENT>", "body": "return color.TITLES[sequential_index % len(color.TITLES)]<EOL>", "docstring": "!\n        @brief Returns color using round robin to avoid out of range exception.\n\n        @param[in] sequential_index (uint): Index that should be converted to valid color index.\n\n        @return (uint) Color from list color.TITLES.", "id": "f15697:c0:m0"}
{"signature": "def _create_connections(self, graph_matrix):", "body": "for row in range(<NUM_LIT:0>, len(graph_matrix)):<EOL><INDENT>for column in range (<NUM_LIT:0>, len(graph_matrix[row])):<EOL><INDENT>if (graph_matrix[row][column] > <NUM_LIT:0>):<EOL><INDENT>self.set_connection(row, column);<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "!\n        @brief Creates connection in the network in line with graph.\n\n        @param[in] graph_matrix (list): Matrix representation of the graph.", "id": "f15698:c1:m1"}
{"signature": "def allocate_color_clusters(self, tolerance = <NUM_LIT:0.1>):", "body": "return self.allocate_sync_ensembles(tolerance)<EOL>", "docstring": "!\n        @brief Allocates clusters, when one cluster defines only one color.\n\n        @param[in] tolerance (double): Defines maximum deviation between phases.\n\n        @return (list) Clusters [vertices with color 1], [vertices with color 2], ..., [vertices with color n].", "id": "f15698:c0:m1"}
{"signature": "def allocate_map_coloring(self, tolerance = <NUM_LIT:0.1>):", "body": "clusters = self.allocate_color_clusters(tolerance);<EOL>number_oscillators = len(self._dynamic[<NUM_LIT:0>]);<EOL>coloring_map = [<NUM_LIT:0>] * number_oscillators;<EOL>for color_index in range(len(clusters)):<EOL><INDENT>for node_index in clusters[color_index]:<EOL><INDENT>coloring_map[node_index] = color_index;<EOL><DEDENT><DEDENT>return coloring_map<EOL>", "docstring": "!\n        @brief Allocates coloring map for graph that has been processed.\n\n        @param[in] tolerance (double): Defines maximum deviation between phases.\n\n        @return (list) Colors for each node (index of node in graph), for example [color1, color2, color2, ...].", "id": "f15698:c0:m2"}
{"signature": "def _phase_kuramoto(self, teta, t, argv):", "body": "index = argv;<EOL>phase = <NUM_LIT:0>;<EOL>for k in range(<NUM_LIT:0>, self._num_osc):<EOL><INDENT>if (self.has_connection(index, k) == True):<EOL><INDENT>phase += self._negative_weight * math.sin(self._phases[k] - teta);<EOL><DEDENT>else:<EOL><INDENT>phase += self._positive_weight * math.sin(self._phases[k] - teta);<EOL><DEDENT><DEDENT>return ( phase / self._reduction )<EOL>", "docstring": "!\n        @brief Returns result of phase calculation for oscillator in the network.\n\n        @param[in] teta (double): Value of phase of the oscillator with index argv in the network.\n        @param[in] t (double): Unused, can be ignored.\n        @param[in] argv (uint): Index of the oscillator in the network.\n\n        @return (double) New value of phase for oscillator with index argv.", "id": "f15698:c1:m2"}
{"signature": "def __init__(self, phase, time, pointer_sync_analyser):", "body": "super().__init__(phase, time, pointer_sync_analyser)<EOL>", "docstring": "!\n        @brief Constructor of the analyser.\n\n        @param[in] phase (list): Output dynamic of the oscillatory network, where one iteration consists of all phases of oscillators.\n        @param[in] time (list): Simulation time.\n        @param[in] pointer_sync_analyser (POINTER): Pointer to CCORE analyser, if specified then other arguments can be omitted.", "id": "f15698:c0:m0"}
{"signature": "def process(self, steps, time, collect_dynamic=True):", "body": "output_dynamic = super().simulate(steps, time, collect_dynamic=collect_dynamic)<EOL>return hysteresis_analyser(output_dynamic.output, output_dynamic.time)<EOL>", "docstring": "!\n        @brief Peforms graph coloring analysis using simulation of the oscillatory network.\n\n        @param[in] steps (uint): Number steps of simulations during simulation.\n        @param[in] time (double): Time of simulation.\n        @param[in] collect_dynamic (bool): Specified requirement to collect whole dynamic of the network.\n\n        @return (hysteresis_analyser) Returns analyser of results of clustering.", "id": "f15703:c1:m1"}
{"signature": "def __init__(self, graph_matrix, alpha, eps):", "body": "number_oscillators = len(graph_matrix)<EOL>super().__init__(number_oscillators)<EOL>self._states = [<NUM_LIT:0>] * self._num_osc<EOL>for i in range(<NUM_LIT:0>, self._num_osc):<EOL><INDENT>self._states[i] = <NUM_LIT:1> - (<NUM_LIT:2> / self._num_osc) * i<EOL><DEDENT>self._outputs = [-<NUM_LIT:1>] * self._num_osc<EOL>self._outputs_buffer = [-<NUM_LIT:1>] * self._num_osc<EOL>self._time_contant = <NUM_LIT:1><EOL>self._weight = []<EOL>for row in range(<NUM_LIT:0>, self._num_osc):<EOL><INDENT>self._weight.append([<NUM_LIT:0>] * self._num_osc)<EOL>for col in range(<NUM_LIT:0>, self._num_osc):<EOL><INDENT>if (row != col):<EOL><INDENT>self._weight[row][col] = -alpha * (graph_matrix[row][col]) / sum(graph_matrix[row])<EOL><DEDENT>else:<EOL><INDENT>self._weight[row][col] = -alpha - eps<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "!\n        @brief Constructor of hysteresis oscillatory network for graph coloring.\n\n        @param[in] graph_matrix (list): Matrix representation of a graph.\n        @param[in] alpha (double): Positive constant (affect weight between two oscillators w[i][j]).\n        @param[in] eps (double): Positive constant (affect feedback to itself (i = j) of each oscillator w[i][j] = -alpha - eps).", "id": "f15703:c1:m0"}
{"signature": "def __init__(self, amplitudes, time):", "body": "super().__init__(amplitudes, time)<EOL>", "docstring": "!\n        @brief Constructor of the analyser.\n\n        @param[in] amplitudes (list): Output dynamic of the hysteresis oscillatory network, where one iteration consists of all amplitudes of oscillators.\n        @param[in] time (list): Simulation time (timestamps of simulation steps) when amplitudes are stored.", "id": "f15703:c0:m0"}
{"signature": "def allocate_map_coloring(self, tolerance, threshold_steps = <NUM_LIT:10>):", "body": "clusters = self.allocate_clusters(tolerance, threshold_steps)<EOL>coloring_map = [<NUM_LIT:0>] * len(self._dynamic[<NUM_LIT:0>])<EOL>for color_index in range(len(clusters)):<EOL><INDENT>for node_index in clusters[color_index]:<EOL><INDENT>coloring_map[node_index] = color_index<EOL><DEDENT><DEDENT>return coloring_map<EOL>", "docstring": "!\n        @brief Returns list of color indexes that are assigned to each object from input data space accordingly.\n\n        @param[in] tolerance (double): Tolerance level that define maximal difference between outputs of oscillators in one synchronous ensemble.\n        @param[in] threshold_steps (uint): Number of steps from the end of simulation that should be analysed for ensemble allocation.\n                    If amount of simulation steps has been less than threshold steps than amount of steps will be reduced to amount\n                    of simulation steps.\n\n        @remark Results can be obtained only after network simulation (graph processing by the network).\n\n        @return (list) Color indexes that are assigned to each object from input data space accordingly.\n\n        @see allocate_clusters()", "id": "f15703:c0:m2"}
{"signature": "def graph_five_pointed_frame_star():", "body": "template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_FIVE_POINTED_FRAME_STAR, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:30>)<EOL>", "docstring": "Good result - not optimal", "id": "f15705:m10"}
{"signature": "def graph_full_interconnected1():", "body": "template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_FULL1, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:20>, tolerance = <NUM_LIT>)<EOL>", "docstring": "Bad result - two vertices colored by the same color", "id": "f15705:m5"}
{"signature": "def graph_one_line():", "body": "template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_LINE, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:20>)<EOL>", "docstring": "Good result - optimal", "id": "f15705:m2"}
{"signature": "def graph_one_crossroad():", "body": "template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_ONE_CROSSROAD, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:20>)<EOL>", "docstring": "Good result - optimal", "id": "f15705:m3"}
{"signature": "def graph_two_crossroads():", "body": "template_graph_coloring(GRAPH_SIMPLE_SAMPLES.GRAPH_TWO_CROSSROADS, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:20>)<EOL>", "docstring": "Good result - optimal", "id": "f15705:m4"}
{"signature": "def __get_amount_color(self, node_indexes, color_number):", "body": "color_counter = <NUM_LIT:0>;  <EOL>for index in node_indexes:<EOL><INDENT>if (self.__coloring[index] == color_number):<EOL><INDENT>color_counter += <NUM_LIT:1>;<EOL><DEDENT><DEDENT>return color_counter<EOL>", "docstring": "!\n        @brief Countes how many nodes has color 'color_number'.\n\n        @param[in] node_indexes (list): Indexes of graph nodes for checking.\n        @param[in] color_number (uint): Number of color that is searched in nodes.\n\n        @return (uint) Number found nodes with the specified color 'color_number'.", "id": "f15708:c0:m3"}
{"signature": "def process(self):", "body": "color_counter = <NUM_LIT:1>;<EOL>degrees = list();<EOL>saturation_degrees = [<NUM_LIT:0>] * len(self.__data_pointer);<EOL>self.__coloring = [<NUM_LIT:0>] * len(self.__data_pointer);<EOL>uncolored_vertices = set(range(len(self.__data_pointer)));<EOL>index_maximum_degree = <NUM_LIT:0>;<EOL>maximum_degree = <NUM_LIT:0>;<EOL>for index_node in range(len(self.__data_pointer)):<EOL><INDENT>degrees.append( ( sum(self.__data_pointer[index_node]), index_node ) );<EOL>if (degrees[index_node][<NUM_LIT:0>] > maximum_degree):<EOL><INDENT>(maximum_degree, node_index) = degrees[index_node];<EOL>index_maximum_degree = index_node;<EOL><DEDENT><DEDENT>neighbors = self.__get_neighbors(index_maximum_degree);<EOL>for index_neighbor in neighbors:<EOL><INDENT>saturation_degrees[index_neighbor] += <NUM_LIT:1>;<EOL><DEDENT>self.__coloring[index_maximum_degree] = color_counter;<EOL>uncolored_vertices.remove(index_maximum_degree);<EOL>while(len(uncolored_vertices) > <NUM_LIT:0>):<EOL><INDENT>maximum_satur_degree = -<NUM_LIT:1>;<EOL>for index in uncolored_vertices:<EOL><INDENT>if (saturation_degrees[index] > maximum_satur_degree):<EOL><INDENT>maximum_satur_degree = saturation_degrees[index];<EOL><DEDENT><DEDENT>indexes_maximum_satur_degree = [index for index in uncolored_vertices if saturation_degrees[index] == maximum_satur_degree];           <EOL>coloring_index = indexes_maximum_satur_degree[<NUM_LIT:0>];<EOL>if (len(indexes_maximum_satur_degree) > <NUM_LIT:1>): <EOL><INDENT>maximum_degree = -<NUM_LIT:1>;<EOL>for index in indexes_maximum_satur_degree:<EOL><INDENT>(degree, node_index) = degrees[index];<EOL>if (degree > maximum_degree):<EOL><INDENT>coloring_index = node_index;<EOL>maximum_degree = degree;<EOL><DEDENT><DEDENT><DEDENT>node_index_neighbors = self.__get_neighbors(coloring_index);<EOL>for number_color in range(<NUM_LIT:1>, color_counter + <NUM_LIT:1>, <NUM_LIT:1>):<EOL><INDENT>if (self.__get_amount_color(node_index_neighbors, number_color) == <NUM_LIT:0>):<EOL><INDENT>self.__coloring[coloring_index] = number_color;<EOL>break;<EOL><DEDENT><DEDENT>if (self.__coloring[coloring_index] == <NUM_LIT:0>):<EOL><INDENT>color_counter += <NUM_LIT:1>;     <EOL>self.__coloring[coloring_index] = color_counter;<EOL><DEDENT>uncolored_vertices.remove(coloring_index);<EOL>for index_neighbor in node_index_neighbors:<EOL><INDENT>subneighbors = self.__get_neighbors(index_neighbor);<EOL>if (self.__get_amount_color(subneighbors, self.__coloring[coloring_index]) == <NUM_LIT:1>):<EOL><INDENT>saturation_degrees[index_neighbor] += <NUM_LIT:1>;<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "!\n        @brief Perform graph coloring using DSATUR algorithm.\n\n        @see get_colors()", "id": "f15708:c0:m1"}
{"signature": "def get_radius(self):", "body": "if (self.__radius is not None):<EOL><INDENT>return self.__radius;<EOL><DEDENT>centroid = self.get_centroid();<EOL>radius_part_1 = self.square_sum;<EOL>radius_part_2 = <NUM_LIT:0.0>;<EOL>radius_part_3 = <NUM_LIT:0.0>;<EOL>if (type(centroid) == list):<EOL><INDENT>radius_part_2 = <NUM_LIT> * sum(list_math_multiplication(self.linear_sum, centroid));<EOL>radius_part_3 = self.number_points * sum(list_math_multiplication(centroid, centroid));<EOL><DEDENT>else:<EOL><INDENT>radius_part_2 = <NUM_LIT> * self.linear_sum * centroid;<EOL>radius_part_3 = self.number_points * centroid * centroid;<EOL><DEDENT>self.__radius = ( (<NUM_LIT:1.0> / self.number_points) * (radius_part_1 - radius_part_2 + radius_part_3) ) ** <NUM_LIT:0.5>;<EOL>return self.__radius<EOL>", "docstring": "!\n        @brief Calculates radius of cluster that is represented by the entry.\n        @details It's calculated once when it's requested after the last changes.\n\n        @return (double) Radius of cluster that is represented by the entry.", "id": "f15714:c2:m12"}
{"signature": "def __init__(self, feature, parent, entries, payload):", "body": "super().__init__(feature, parent, payload);<EOL>self.type = cfnode_type.CFNODE_LEAF;<EOL>self.__entries = entries<EOL>", "docstring": "!\n        @brief Create CF Leaf node.\n\n        @param[in] feature (cfentry): Clustering feature of the created node.\n        @param[in] parent (non_leaf_node): Parent of the created node.\n        @param[in] entries (list): List of entries of the node.\n        @param[in] payload (*): Data that is stored by the node.", "id": "f15714:c5:m1"}
{"signature": "def get_nearest_successors(self, type_measurement):", "body": "nearest_node1 = None;<EOL>nearest_node2 = None;<EOL>nearest_distance = float(\"<STR_LIT>\");<EOL>for i in range(<NUM_LIT:0>, len(self.successors)):<EOL><INDENT>candidate1 = self.successors[i];<EOL>for j in range(i + <NUM_LIT:1>, len(self.successors)):<EOL><INDENT>candidate2 = self.successors[j];<EOL>candidate_distance = candidate1.get_distance(candidate2, type_measurement);<EOL>if (candidate_distance < nearest_distance):<EOL><INDENT>nearest_distance = candidate_distance;<EOL>nearest_node1 = candidate1;<EOL>nearest_node2 = candidate2;<EOL><DEDENT><DEDENT><DEDENT>return [nearest_node1, nearest_node2]<EOL>", "docstring": "!\n        @brief Find pair of nearest successors of the node in line with measurement type.\n\n        @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest successors.\n\n        @return (list) Pair of nearest successors represented by list.", "id": "f15714:c4:m8"}
{"signature": "def merge(self, node):", "body": "self.feature += node.feature;<EOL>for child in node.successors:<EOL><INDENT>child.parent = self;<EOL>self.successors.append(child);<EOL><DEDENT>", "docstring": "!\n        @brief Merge non-leaf node to the current.\n\n        @param[in] node (non_leaf_node): Non-leaf node that should be merged with current.", "id": "f15714:c4:m6"}
{"signature": "def __get_average_intra_cluster_distance(self, entry):", "body": "linear_part_first = list_math_addition(self.linear_sum, entry.linear_sum);<EOL>linear_part_second = linear_part_first;<EOL>linear_part_distance = sum(list_math_multiplication(linear_part_first, linear_part_second));<EOL>general_part_distance = <NUM_LIT> * (self.number_points + entry.number_points) * (self.square_sum + entry.square_sum) - <NUM_LIT> * linear_part_distance;<EOL>return (general_part_distance / ( (self.number_points + entry.number_points) * (self.number_points + entry.number_points - <NUM_LIT:1.0>) )) ** <NUM_LIT:0.5><EOL>", "docstring": "!\n        @brief Calculates average intra cluster distance between current and specified clusters.\n\n        @param[in] entry (cfentry): Clustering feature to which distance should be obtained.\n\n        @return (double) Average intra cluster distance.", "id": "f15714:c2:m15"}
{"signature": "def __eq__(self, entry):", "body": "tolerance = <NUM_LIT>;<EOL>result = (self.__number_points == entry.number_points);<EOL>result &= ( (self.square_sum + tolerance > entry.square_sum) and (self.square_sum - tolerance < entry.square_sum) );<EOL>for index_dimension in range(<NUM_LIT:0>, len(self.linear_sum)):<EOL><INDENT>result &= ( (self.linear_sum[index_dimension] + tolerance > entry.linear_sum[index_dimension]) and (self.linear_sum[index_dimension] - tolerance < entry.linear_sum[index_dimension]) );<EOL><DEDENT>return result<EOL>", "docstring": "!\n        @brief Overloaded operator eq. \n        @details Performs comparison of two clustering features.\n\n        @param[in] entry (cfentry): Entry that is used for comparison with current.\n\n        @return (bool) True is both clustering features are equals in line with tolerance, otherwise False.", "id": "f15714:c2:m9"}
{"signature": "def show_feature_destibution(self, data = None):", "body": "visualizer = cluster_visualizer();<EOL>print(\"<STR_LIT>\", self.__amount_nodes);<EOL>if (data is not None):<EOL><INDENT>visualizer.append_cluster(data, marker = '<STR_LIT:x>');<EOL><DEDENT>for level in range(<NUM_LIT:0>, self.height):<EOL><INDENT>level_nodes = self.get_level_nodes(level);<EOL>centers = [ node.feature.get_centroid() for node in level_nodes ];<EOL>visualizer.append_cluster(centers, None, markersize = (self.height - level + <NUM_LIT:1>) * <NUM_LIT:5>);<EOL><DEDENT>visualizer.show()<EOL>", "docstring": "!\n        @brief Shows feature distribution.\n        @details Only features in 1D, 2D, 3D space can be visualized.\n\n        @param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only.", "id": "f15714:c6:m22"}
{"signature": "def __insert_for_noneleaf_node(self, entry, search_node):", "body": "node_amount_updation = False;<EOL>min_key = lambda child_node: child_node.get_distance(search_node, self.__type_measurement);<EOL>nearest_child_node = min(search_node.successors, key = min_key);<EOL>child_node_updation = self.__recursive_insert(entry, nearest_child_node);<EOL>search_node.feature += entry;<EOL>if (len(search_node.successors) > self.__branch_factor):<EOL><INDENT>if (search_node is self.__root):<EOL><INDENT>self.__root = non_leaf_node(search_node.feature, None, [ search_node ], None);<EOL>search_node.parent = self.__root;<EOL>self.__amount_nodes += <NUM_LIT:1>;<EOL>self.__height += <NUM_LIT:1>;<EOL><DEDENT>[new_node1, new_node2] = self.__split_nonleaf_node(search_node);<EOL>parent = search_node.parent;<EOL>parent.successors.remove(search_node);<EOL>parent.successors.append(new_node1);<EOL>parent.successors.append(new_node2);<EOL>self.__amount_nodes += <NUM_LIT:1>;<EOL>node_amount_updation = True;<EOL><DEDENT>elif (child_node_updation is True):<EOL><INDENT>if (self.__merge_nearest_successors(search_node) is True):<EOL><INDENT>self.__amount_nodes -= <NUM_LIT:1>;<EOL><DEDENT><DEDENT>return node_amount_updation<EOL>", "docstring": "!\n        @brief Recursive insert entry from none-leaf node to the tree.\n\n        @param[in] entry (cfentry): Clustering feature.\n        @param[in] search_node (cfnode): None-leaf node from that insertion should be started.\n\n        @return (bool) True if number of nodes at the below level is changed, otherwise False.", "id": "f15714:c6:m17"}
{"signature": "def __add__(self, entry):", "body": "number_points = self.number_points + entry.number_points;<EOL>result_linear_sum = list_math_addition(self.linear_sum, entry.linear_sum);<EOL>result_square_sum = self.square_sum + entry.square_sum;  <EOL>return cfentry(number_points, result_linear_sum, result_square_sum)<EOL>", "docstring": "!\n        @brief Overloaded operator add. Performs addition of two clustering features.\n\n        @param[in] entry (cfentry): Entry that is added to the current.\n\n        @return (cfentry) Result of addition of two clustering features.", "id": "f15714:c2:m7"}
{"signature": "def __get_variance_increase_distance(self, entry):", "body": "linear_part_12 = list_math_addition(self.linear_sum, entry.linear_sum);<EOL>variance_part_first = (self.square_sum + entry.square_sum) -<NUM_LIT> * sum(list_math_multiplication(linear_part_12, linear_part_12)) / (self.number_points + entry.number_points) +(self.number_points + entry.number_points) * sum(list_math_multiplication(linear_part_12, linear_part_12)) / (self.number_points + entry.number_points)**<NUM_LIT>;<EOL>linear_part_11 = sum(list_math_multiplication(self.linear_sum, self.linear_sum));<EOL>variance_part_second = -( self.square_sum - (<NUM_LIT> * linear_part_11 / self.number_points) + (linear_part_11 / self.number_points) );<EOL>linear_part_22 = sum(list_math_multiplication(entry.linear_sum, entry.linear_sum));<EOL>variance_part_third = -( entry.square_sum - (<NUM_LIT> / entry.number_points) * linear_part_22 + entry.number_points * (<NUM_LIT:1.0> / entry.number_points ** <NUM_LIT>) * linear_part_22 );<EOL>return (variance_part_first + variance_part_second + variance_part_third)<EOL>", "docstring": "!\n        @brief Calculates variance increase distance between current and specified clusters.\n\n        @param[in] entry (cfentry): Clustering feature to which distance should be obtained.\n\n        @return (double) Variance increase distance.", "id": "f15714:c2:m16"}
{"signature": "@property<EOL><INDENT>def branch_factor(self):<DEDENT>", "body": "return self.__branch_factor<EOL>", "docstring": "!\n        @return (uint) Branching factor of the tree.\n        @details Branching factor defines maximum number of successors in each non-leaf node.", "id": "f15714:c6:m5"}
{"signature": "def __str__(self):", "body": "return self.__repr__()<EOL>", "docstring": "!\n        @return (string) String representation of CF node.", "id": "f15714:c3:m2"}
{"signature": "def get_centroid(self):", "body": "if (self.__centroid is not None):<EOL><INDENT>return self.__centroid;<EOL><DEDENT>self.__centroid = [<NUM_LIT:0>] * len(self.linear_sum);<EOL>for index_dimension in range(<NUM_LIT:0>, len(self.linear_sum)):<EOL><INDENT>self.__centroid[index_dimension] = self.linear_sum[index_dimension] / self.number_points;<EOL><DEDENT>return self.__centroid<EOL>", "docstring": "!\n        @brief Calculates centroid of cluster that is represented by the entry. \n        @details It's calculated once when it's requested after the last changes.\n\n        @return (list) Centroid of cluster that is represented by the entry.", "id": "f15714:c2:m11"}
{"signature": "def __sub__(self, entry):", "body": "number_points = self.number_points - entry.number_points;<EOL>result_linear_sum = list_math_subtraction(self.linear_sum, entry.linear_sum);<EOL>result_square_sum = self.square_sum - entry.square_sum;<EOL>if ( (number_points < <NUM_LIT:0>) or (result_square_sum < <NUM_LIT:0>) ):<EOL><INDENT>raise NameError(\"<STR_LIT>\");<EOL><DEDENT>return cfentry(number_points, result_linear_sum, result_square_sum)<EOL>", "docstring": "!\n        @brief Overloaded operator sub. Performs substraction of two clustering features.\n        @details Substraction can't be performed with clustering feature whose description is less then substractor.\n\n        @param[in] entry (cfentry): Entry that is substracted from the current.\n\n        @return (cfentry) Result of substraction of two clustering features.", "id": "f15714:c2:m8"}
{"signature": "@property<EOL><INDENT>def successors(self):<DEDENT>", "body": "return self.__successors<EOL>", "docstring": "!\n        @return (list) List of successors of the node.", "id": "f15714:c4:m0"}
{"signature": "def __init__(self, number_points, linear_sum, square_sum):", "body": "self.__number_points = number_points;<EOL>self.__linear_sum = linear_sum;<EOL>self.__square_sum = square_sum;<EOL>self.__centroid = None;<EOL>self.__radius = None;<EOL>self.__diameter = None<EOL>", "docstring": "!\n        @brief CF-entry constructor.\n\n        @param[in] number_points (uint): Number of objects that is represented by the entry.\n        @param[in] linear_sum (list): Linear sum of values that represent objects in each dimension.\n        @param[in] square_sum (double): Square sum of values that represent objects.", "id": "f15714:c2:m3"}
{"signature": "@property<EOL><INDENT>def linear_sum(self):<DEDENT>", "body": "return self.__linear_sum<EOL>", "docstring": "!\n        @brief Returns linear sum.\n\n        @return (list) Linear sum.", "id": "f15714:c2:m1"}
{"signature": "def __init__(self, feature, parent, payload):", "body": "<EOL>self.feature = copy(feature);<EOL>self.parent = parent;<EOL>self.type = cfnode_type.CFNODE_DUMMY;<EOL>self.payload = payload<EOL>", "docstring": "!\n        @brief Constructor of abstract CF node.\n\n        @param[in] feature (cfentry): Clustering feature of the created node.\n        @param[in] parent (cfnode): Parent of the created node.\n        @param[in] payload (*): Data that is stored by the node.", "id": "f15714:c3:m0"}
{"signature": "@property<EOL><INDENT>def type_measurement(self):<DEDENT>", "body": "return self.__type_measurement<EOL>", "docstring": "!\n        @return (measurement_type) Type that is used for measuring.", "id": "f15714:c6:m8"}
{"signature": "def __repr__(self):", "body": "text_entries = \"<STR_LIT:\\n>\";<EOL>for entry in self.entries:<EOL><INDENT>text_entries += \"<STR_LIT:\\t>\" + str(entry) + \"<STR_LIT:\\n>\";<EOL><DEDENT>return '<STR_LIT>' % ( hex(id(self)), self.parent, self.feature, len(self.entries), text_entries )<EOL>", "docstring": "!\n        @return (string) Default leaf node represenation.", "id": "f15714:c5:m2"}
{"signature": "def insert_cluster(self, cluster):", "body": "entry = cfentry(len(cluster), linear_sum(cluster), square_sum(cluster));<EOL>self.insert(entry)<EOL>", "docstring": "!\n        @brief Insert cluster that is represented as list of points where each point is represented by list of coordinates.\n        @details Clustering feature is created for that cluster and inserted to the tree.\n\n        @param[in] cluster (list): Cluster that is represented by list of points that should be inserted to the tree.", "id": "f15714:c6:m12"}
{"signature": "def get_nearest_index_entry(self, entry, type_measurement):", "body": "minimum_distance = float('<STR_LIT>');<EOL>nearest_index = <NUM_LIT:0>;<EOL>for candidate_index in range(<NUM_LIT:0>, len(self.entries)):<EOL><INDENT>candidate_distance = self.entries[candidate_index].get_distance(entry, type_measurement);<EOL>if (candidate_distance < minimum_distance):<EOL><INDENT>nearest_index = candidate_index;<EOL><DEDENT><DEDENT>return nearest_index<EOL>", "docstring": "!\n        @brief Find nearest index of nearest entry of node for the specified entry.\n\n        @param[in] entry (cfentry): Entry that is used for calculation distance.\n        @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest entry to the specified.\n\n        @return (uint) Index of nearest entry of node for the specified entry.", "id": "f15714:c5:m8"}
{"signature": "def __split_nonleaf_node(self, node):", "body": "[farthest_node1, farthest_node2] = node.get_farthest_successors(self.__type_measurement);<EOL>new_node1 = non_leaf_node(farthest_node1.feature, node.parent, [ farthest_node1 ], None);<EOL>new_node2 = non_leaf_node(farthest_node2.feature, node.parent, [ farthest_node2 ], None);<EOL>farthest_node1.parent = new_node1;<EOL>farthest_node2.parent = new_node2;<EOL>for successor in node.successors:<EOL><INDENT>if ( (successor is not farthest_node1) and (successor is not farthest_node2) ):<EOL><INDENT>distance1 = new_node1.get_distance(successor, self.__type_measurement);<EOL>distance2 = new_node2.get_distance(successor, self.__type_measurement);<EOL>if (distance1 < distance2):<EOL><INDENT>new_node1.insert_successor(successor);<EOL><DEDENT>else:<EOL><INDENT>new_node2.insert_successor(successor);<EOL><DEDENT><DEDENT><DEDENT>return [new_node1, new_node2]<EOL>", "docstring": "!\n        @brief Performs splitting of the specified non-leaf node.\n\n        @param[in] node (non_leaf_node): Non-leaf node that should be splitted.\n\n        @return (list) New pair of non-leaf nodes [non_leaf_node1, non_leaf_node2].", "id": "f15714:c6:m20"}
{"signature": "def __recursive_insert(self, entry, search_node):", "body": "<EOL>if (search_node.type == cfnode_type.CFNODE_NONLEAF):<EOL><INDENT>return self.__insert_for_noneleaf_node(entry, search_node);<EOL><DEDENT>else:<EOL><INDENT>return self.__insert_for_leaf_node(entry, search_node);<EOL><DEDENT>", "docstring": "!\n        @brief Recursive insert of the entry to the tree.\n        @details It performs all required procedures during insertion such as splitting, merging.\n\n        @param[in] entry (cfentry): Clustering feature.\n        @param[in] search_node (cfnode): Node from that insertion should be started.\n\n        @return (bool) True if number of nodes at the below level is changed, otherwise False.", "id": "f15714:c6:m15"}
{"signature": "def __get_average_inter_cluster_distance(self, entry):", "body": "linear_part_distance = sum(list_math_multiplication(self.linear_sum, entry.linear_sum));<EOL>return ( (entry.number_points * self.square_sum - <NUM_LIT> * linear_part_distance + self.number_points * entry.square_sum) / (self.number_points * entry.number_points) ) ** <NUM_LIT:0.5><EOL>", "docstring": "!\n        @brief Calculates average inter cluster distance between current and specified clusters.\n\n        @param[in] entry (cfentry): Clustering feature to which distance should be obtained.\n\n        @return (double) Average inter cluster distance.", "id": "f15714:c2:m14"}
{"signature": "@property<EOL><INDENT>def leafes(self):<DEDENT>", "body": "return self.__leafes<EOL>", "docstring": "!\n        @return (list) List of all non-leaf nodes in the tree.", "id": "f15714:c6:m1"}
{"signature": "def get_farthest_successors(self, type_measurement):", "body": "farthest_node1 = None;<EOL>farthest_node2 = None;<EOL>farthest_distance = <NUM_LIT:0>;<EOL>for i in range(<NUM_LIT:0>, len(self.successors)):<EOL><INDENT>candidate1 = self.successors[i];<EOL>for j in range(i + <NUM_LIT:1>, len(self.successors)):<EOL><INDENT>candidate2 = self.successors[j];<EOL>candidate_distance = candidate1.get_distance(candidate2, type_measurement);<EOL>if (candidate_distance > farthest_distance):<EOL><INDENT>farthest_distance = candidate_distance;<EOL>farthest_node1 = candidate1;<EOL>farthest_node2 = candidate2;        <EOL>return [farthest_node1, farthest_node2];<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "!\n        @brief Find pair of farthest successors of the node in line with measurement type.\n\n        @param[in] type_measurement (measurement_type): Measurement type that is used for obtaining farthest successors.\n\n        @return (list) Pair of farthest successors represented by list [cfnode1, cfnode2].", "id": "f15714:c4:m7"}
{"signature": "def get_diameter(self):", "body": "if (self.__diameter is not None):<EOL><INDENT>return self.__diameter;<EOL><DEDENT>diameter_part = <NUM_LIT:0.0>;<EOL>if (type(self.linear_sum) == list):<EOL><INDENT>diameter_part = self.square_sum * self.number_points - <NUM_LIT> * sum(list_math_multiplication(self.linear_sum, self.linear_sum)) + self.square_sum * self.number_points;<EOL><DEDENT>else:<EOL><INDENT>diameter_part = self.square_sum * self.number_points - <NUM_LIT> * self.linear_sum * self.linear_sum + self.square_sum * self.number_points;<EOL><DEDENT>self.__diameter = ( diameter_part / (self.number_points * (self.number_points - <NUM_LIT:1>)) ) ** <NUM_LIT:0.5>;<EOL>return self.__diameter<EOL>", "docstring": "!\n        @brief Calculates diameter of cluster that is represented by the entry.\n        @details It's calculated once when it's requested after the last changes.\n\n        @return (double) Diameter of cluster that is represented by the entry.", "id": "f15714:c2:m13"}
{"signature": "@property<EOL><INDENT>def amount_nodes(self):<DEDENT>", "body": "return self.__amount_nodes<EOL>", "docstring": "!\n        @return (unit) Number of nodes (leaf and non-leaf) in the tree.", "id": "f15714:c6:m2"}
{"signature": "def get_distance(self, entry, type_measurement):", "body": "if (type_measurement is measurement_type.CENTROID_EUCLIDEAN_DISTANCE):<EOL><INDENT>return euclidean_distance_square(entry.get_centroid(), self.get_centroid());<EOL><DEDENT>elif (type_measurement is measurement_type.CENTROID_MANHATTAN_DISTANCE):<EOL><INDENT>return manhattan_distance(entry.get_centroid(), self.get_centroid());<EOL><DEDENT>elif (type_measurement is measurement_type.AVERAGE_INTER_CLUSTER_DISTANCE):<EOL><INDENT>return self.__get_average_inter_cluster_distance(entry);<EOL><DEDENT>elif (type_measurement is measurement_type.AVERAGE_INTRA_CLUSTER_DISTANCE):<EOL><INDENT>return self.__get_average_intra_cluster_distance(entry);<EOL><DEDENT>elif (type_measurement is measurement_type.VARIANCE_INCREASE_DISTANCE):<EOL><INDENT>return self.__get_variance_increase_distance(entry);<EOL><DEDENT>else:<EOL><INDENT>assert <NUM_LIT:0>;<EOL><DEDENT>", "docstring": "!\n        @brief Calculates distance between two clusters in line with measurement type.\n\n        @details In case of usage CENTROID_EUCLIDIAN_DISTANCE square euclidian distance will be returned.\n                 Square root should be taken from the result for obtaining real euclidian distance between\n                 entries. \n\n        @param[in] entry (cfentry): Clustering feature to which distance should be obtained.\n        @param[in] type_measurement (measurement_type): Distance measurement algorithm between two clusters.\n\n        @return (double) Distance between two clusters.", "id": "f15714:c2:m10"}
{"signature": "def children(self, node_parent):", "body": "if node_parent.left is not None:<EOL><INDENT>yield node_parent.left<EOL><DEDENT>if node_parent.right is not None:<EOL><INDENT>yield node_parent.right<EOL><DEDENT>", "docstring": "!\n        @brief Returns list of children of node.\n\n        @param[in] node_parent (node): Node whose children are required. \n\n        @return (list) Children of node. If node haven't got any child then None is returned.", "id": "f15715:c2:m13"}
{"signature": "def traverse(self, start_node = None, level = None):", "body": "if start_node is None:<EOL><INDENT>start_node  = self.__root<EOL>level = <NUM_LIT:0><EOL><DEDENT>if start_node is None:<EOL><INDENT>return []<EOL><DEDENT>items = [ (level, start_node) ]<EOL>for child in self.children(start_node):<EOL><INDENT>if child is not None:<EOL><INDENT>items += self.traverse(child, level + <NUM_LIT:1>)<EOL><DEDENT><DEDENT>return items<EOL>", "docstring": "!\n        @brief Traverses all nodes of subtree that is defined by node specified in input parameter.\n\n        @param[in] start_node (node): Node from that travering of subtree is performed.\n        @param[in, out] level (uint): Should be ignored by application.\n\n        @return (list) All nodes of the subtree.", "id": "f15715:c2:m14"}
{"signature": "def remove(self, point, **kwargs):", "body": "<EOL>node_for_remove = None<EOL>if '<STR_LIT>' in kwargs:<EOL><INDENT>node_for_remove = self.find_node_with_payload(point, kwargs['<STR_LIT>'], None)<EOL><DEDENT>else:<EOL><INDENT>node_for_remove = self.find_node(point, None)<EOL><DEDENT>if node_for_remove is None:<EOL><INDENT>return None<EOL><DEDENT>parent = node_for_remove.parent<EOL>minimal_node = self.__recursive_remove(node_for_remove)<EOL>if parent is None:<EOL><INDENT>self.__root = minimal_node<EOL>if minimal_node is not None:<EOL><INDENT>minimal_node.parent = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if parent.left is node_for_remove:<EOL><INDENT>parent.left = minimal_node<EOL><DEDENT>elif parent.right is node_for_remove:<EOL><INDENT>parent.right = minimal_node<EOL><DEDENT><DEDENT>return self.__root<EOL>", "docstring": "!\n        @brief Remove specified point from kd-tree.\n        @details It removes the first found node that satisfy to the input parameters. Make sure that\n                  pair (point, payload) is unique for each node, othewise the first found is removed.\n\n        @param[in] point (list): Coordinates of the point of removed node.\n        @param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'payload').\n\n        <b>Keyword Args:</b><br>\n            - payload (any): Payload of the node that should be removed.\n\n        @return (node) Root if node has been successfully removed, otherwise None.", "id": "f15715:c2:m2"}
{"signature": "def insert(self, point, payload):", "body": "if self.__root is None:<EOL><INDENT>self.__dimension = len(point)<EOL>self.__root = node(point, payload, None, None, <NUM_LIT:0>)<EOL>self.__point_comparator = self.__create_point_comparator(type(point))<EOL>return self.__root<EOL><DEDENT>cur_node = self.__root<EOL>while True:<EOL><INDENT>if cur_node.data[cur_node.disc] <= point[cur_node.disc]:<EOL><INDENT>if cur_node.right is None:<EOL><INDENT>discriminator = cur_node.disc + <NUM_LIT:1><EOL>if discriminator >= self.__dimension:<EOL><INDENT>discriminator = <NUM_LIT:0><EOL><DEDENT>cur_node.right = node(point, payload, None, None, discriminator, cur_node)<EOL>return cur_node.right<EOL><DEDENT>else: <EOL><INDENT>cur_node = cur_node.right<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if cur_node.left is None:<EOL><INDENT>discriminator = cur_node.disc + <NUM_LIT:1><EOL>if discriminator >= self.__dimension:<EOL><INDENT>discriminator = <NUM_LIT:0><EOL><DEDENT>cur_node.left = node(point, payload, None, None, discriminator, cur_node)<EOL>return cur_node.left<EOL><DEDENT>else:<EOL><INDENT>cur_node = cur_node.left<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "!\n        @brief Insert new point with payload to kd-tree.\n\n        @param[in] point (list): Coordinates of the point of inserted node.\n        @param[in] payload (any-type): Payload of inserted node. It can be identificator of the node or\n                    some useful payload that belongs to the point.\n\n        @return (node) Inserted node to the kd-tree.", "id": "f15715:c2:m1"}
{"signature": "def find_nearest_dist_node(self, point, distance, retdistance = False):", "body": "best_nodes = self.find_nearest_dist_nodes(point, distance)<EOL>if best_nodes == []:<EOL><INDENT>return None<EOL><DEDENT>nearest = min(best_nodes, key = lambda item: item[<NUM_LIT:0>])<EOL>if retdistance is True:<EOL><INDENT>return nearest<EOL><DEDENT>else:<EOL><INDENT>return nearest[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "!\n        @brief Find nearest neighbor in area with radius = distance.\n\n        @param[in] point (list): Maximum distance where neighbors are searched.\n        @param[in] distance (double): Maximum distance where neighbors are searched.\n        @param[in] retdistance (bool): If True - returns neighbors with distances to them, otherwise only neighbors is returned.\n\n        @return (node|list) Nearest neighbor if 'retdistance' is False and list with two elements [node, distance] if 'retdistance' is True,\n                 where the first element is pointer to node and the second element is distance to it.", "id": "f15715:c2:m10"}
{"signature": "def __create_point_comparator(self, type_point):", "body": "if type_point == numpy.ndarray:<EOL><INDENT>return lambda obj1, obj2: numpy.array_equal(obj1, obj2)<EOL><DEDENT>return lambda obj1, obj2: obj1 == obj2<EOL>", "docstring": "!\n        @brief Create point comparator.\n        @details In case of numpy.array specific comparator is required.\n\n        @param[in] type_point (data_type): Type of point that is stored in KD-node.\n\n        @return (callable) Callable point comparator to compare to points.", "id": "f15715:c2:m6"}
{"signature": "def visualize(self, display=True):", "body": "kdnodes = self.__get_nodes()<EOL>level = kdnodes[<NUM_LIT:0>]<EOL>for kdnode in kdnodes:<EOL><INDENT>self.__print_node(level, kdnode)<EOL><DEDENT>self.__tree_text += self.__tree_level_text<EOL>if display is True:<EOL><INDENT>print(self.__tree_text)<EOL><DEDENT>return self.__tree_text<EOL>", "docstring": "!\n        @brief Display KD-tree to console.\n\n        @param[in] display (bool): If 'True' then tree will be shown in console.\n\n        @return (string) Text representation of the KD-tree.", "id": "f15715:c0:m1"}
{"signature": "def __repr__(self):", "body": "left = None<EOL>right = None<EOL>if self.left is not None:<EOL><INDENT>left = self.left.data<EOL><DEDENT>if self.right is not None:<EOL><INDENT>right = self.right.data<EOL><DEDENT>return \"<STR_LIT>\" % (self.data, left, right)<EOL>", "docstring": "!\n        @return (string) Default representation of the node.", "id": "f15715:c1:m1"}
{"signature": "def get_cluster_lengths(self):", "body": "clusters = self.get_clusters()<EOL>return [len(cluster) for cluster in clusters]<EOL>", "docstring": "!\n        @brief Read proper cluster lengths.\n        @details Cluster length means amount of point in a cluster.\n\n        @return (list) Cluster lengths where each length means amount of points in a cluster.", "id": "f15719:c0:m3"}
{"signature": "def __init__(self, answer_path):", "body": "self.__answer_path = answer_path<EOL>self.__clusters = None<EOL>self.__noise = None<EOL>", "docstring": "!\n        @brief Creates instance of answer reader to read proper clustering results of samples.\n\n        @param[in] answer_path (string): Path to clustering results (answers).", "id": "f15719:c0:m0"}
{"signature": "def Hf(CASRN, AvailableMethods=False, Method=None):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in API_TDB_data.index:<EOL><INDENT>methods.append(API_TDB)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == API_TDB:<EOL><INDENT>_Hf = float(API_TDB_data.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>_Hf = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _Hf<EOL>", "docstring": "r'''This function handles the retrieval of a chemical's standard-phase\n    heat of formation. The lookup is based on CASRNs. Selects the only\n    data source available ('API TDB') if the chemical is in it.\n    Returns None if the data is not available.\n\n    Function has data for 571 chemicals.\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    Hf : float\n        Standard-state heat of formation, [J/mol]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain Hf with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        A string for the method name to use, as defined by constants in\n        Hf_methods\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        Hf for the desired chemical, and will return methods instead of Hf\n\n    Notes\n    -----\n    Only one source of information is available to this function. it is:\n\n        * 'API_TDB', a compilation of heats of formation of unspecified phase.\n          Not the original data, but as reproduced in [1]_. Some chemicals with\n          duplicated CAS numbers were removed.\n\n    Examples\n    --------\n    >>> Hf(CASRN='7732-18-5')\n    -241820.0\n\n    References\n    ----------\n    .. [1] Albahri, Tareq A., and Abdulla F. Aljasmi. \"SGC Method for\n       Predicting the Standard Enthalpy of Formation of Pure Compounds from\n       Their Molecular Structures.\" Thermochimica Acta 568\n       (September 20, 2013): 46-60. doi:10.1016/j.tca.2013.06.020.", "id": "f15773:m0"}
{"signature": "def Hf_g(CASRN, AvailableMethods=False, Method=None):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in ATcT_g.index:<EOL><INDENT>methods.append(ATCT_G)<EOL><DEDENT>if CASRN in TRC_gas_data.index and not np.isnan(TRC_gas_data.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(TRC)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == ATCT_G:<EOL><INDENT>_Hfg = float(ATcT_g.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == TRC:<EOL><INDENT>_Hfg = float(TRC_gas_data.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _Hfg<EOL>", "docstring": "r'''This function handles the retrieval of a chemical's gas heat of\n    formation. Lookup is based on CASRNs. Will automatically select a data\n    source to use if no Method is provided; returns None if the data is not\n    available.\n\n    Prefered sources are 'Active Thermochemical Tables (g)' for high accuracy,\n    and 'TRC' for less accuracy but more chemicals.\n    Function has data for approximately 2000 chemicals.\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    _Hfg : float\n        Gas phase heat of formation, [J/mol]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain Hf(g) with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        A string for the method name to use, as defined by constants in\n        Hf_g_methods\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        Hf(g) for the desired chemical, and will return methods instead of Hf(g)\n\n    Notes\n    -----\n    Sources are:\n\n        * 'ATCT_G', the Active Thermochemical Tables version 1.112.\n        * 'TRC', from a 1994 compilation.\n\n    Examples\n    --------\n    >>> Hf_g('67-56-1')\n    -200700.0\n\n    References\n    ----------\n    .. [1] Ruscic, Branko, Reinhardt E. Pinzon, Gregor von Laszewski, Deepti\n       Kodeboyina, Alexander Burcat, David Leahy, David Montoy, and Albert F.\n       Wagner. \"Active Thermochemical Tables: Thermochemistry for the 21st\n       Century.\" Journal of Physics: Conference Series 16, no. 1\n       (January 1, 2005): 561. doi:10.1088/1742-6596/16/1/078.\n    .. [2] Frenkel\u02b9, M. L, Texas Engineering Experiment Station, and\n       Thermodynamics Research Center. Thermodynamics of Organic Compounds in\n       the Gas State. College Station, Tex.: Thermodynamics Research Center,\n       1994.", "id": "f15773:m2"}
{"signature": "def checkCAS(CASRN):", "body": "try:<EOL><INDENT>check = CASRN[-<NUM_LIT:1>]<EOL>CASRN = CASRN[::-<NUM_LIT:1>][<NUM_LIT:1>:]<EOL>productsum = <NUM_LIT:0><EOL>i = <NUM_LIT:1><EOL>for num in CASRN:<EOL><INDENT>if num == '<STR_LIT:->':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>productsum += i*int(num)<EOL>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>return (productsum % <NUM_LIT:10> == int(check))<EOL><DEDENT>except:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Checks if a CAS number is valid. Returns False if the parser cannot \n    parse the given string..\n\n    Parameters\n    ----------\n    CASRN : string\n        A three-piece, dash-separated set of numbers\n\n    Returns\n    -------\n    result : bool\n        Boolean value if CASRN was valid. If parsing fails, return False also.\n\n    Notes\n    -----\n    Check method is according to Chemical Abstract Society. However, no lookup\n    to their service is performed; therefore, this function cannot detect\n    false positives.\n\n    Function also does not support additional separators, apart from '-'.\n\n    CAS numbers up to the series 1 XXX XXX-XX-X are now being issued.\n\n    A long can hold CAS numbers up to 2 147 483-64-7\n\n    Examples\n    --------\n    >>> checkCAS('7732-18-5')\n    True\n    >>> checkCAS('77332-18-5')\n    False", "id": "f15774:m0"}
{"signature": "def InChI(CASRN):", "body": "return pubchem_db.search_CAS(CASRN).InChI<EOL>", "docstring": ">>> InChI('7732-18-5')\n'H2O/h1H2'", "id": "f15774:m6"}
{"signature": "def InChI_Key(CASRN):", "body": "return pubchem_db.search_CAS(CASRN).InChI_key<EOL>", "docstring": ">>> InChI_Key('7732-18-5')\n'XLYOFNOQVPJJNP-UHFFFAOYSA-N'", "id": "f15774:m7"}
{"signature": "def MW(CASRN):", "body": "return pubchem_db.search_CAS(CASRN).MW<EOL>", "docstring": "Given a CASRN in the database, obtain the Molecular weight of the\n    compound, if it is in the database.\n\n    Parameters\n    ----------\n    CASRN : string\n        Valid CAS number in PubChem database\n\n    Returns\n    -------\n    MolecularWeight : float\n\n    Notes\n    -----\n    CASRN must be an indexing key in the pubchem database. No MW Calculation is\n    performed; nor are any historical isotopic corrections applied.\n\n    Examples\n    --------\n    >>> MW('7732-18-5')\n    18.01528\n\n    References\n    ----------\n    .. [1] Pubchem.", "id": "f15774:m3"}
{"signature": "def Tm_depression_eutectic(Tm, Hm, x=None, M=None, MW=None):", "body": "if x:<EOL><INDENT>dTm = R*Tm**<NUM_LIT:2>*x/Hm<EOL><DEDENT>elif M and MW:<EOL><INDENT>MW = MW/<NUM_LIT> <EOL>dTm = R*Tm**<NUM_LIT:2>*MW*M/Hm<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return dTm<EOL>", "docstring": "r'''Returns the freezing point depression caused by a solute in a solvent.\n    Can use either the mole fraction of the solute or its molality and the\n    molecular weight of the solvent. Assumes ideal system behavior.\n\n    .. math::\n        \\Delta T_m = \\frac{R T_m^2 x}{\\Delta H_m}\n\n        \\Delta T_m = \\frac{R T_m^2 (MW) M}{1000 \\Delta H_m}\n\n    Parameters\n    ----------\n    Tm : float\n        Melting temperature of the solute [K]\n    Hm : float\n        Heat of melting at the melting temperature of the solute [J/mol]\n    x : float, optional\n        Mole fraction of the solute [-]\n    M : float, optional\n        Molality [mol/kg]\n    MW: float, optional\n        Molecular weight of the solvent [g/mol]\n\n    Returns\n    -------\n    dTm : float\n        Freezing point depression [K]\n\n    Notes\n    -----\n    MW is the molecular weight of the solvent. M is the molality of the solute.\n\n    Examples\n    --------\n    From [1]_, matching example.\n\n    >>> Tm_depression_eutectic(353.35, 19110, .02)\n    1.0864594900639515\n\n    References\n    ----------\n    .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.\n       Weinheim, Germany: Wiley-VCH, 2012.", "id": "f15775:m2"}
{"signature": "def solubility_eutectic(T, Tm, Hm, Cpl=<NUM_LIT:0>, Cps=<NUM_LIT:0>, gamma=<NUM_LIT:1>):", "body": "dCp = Cpl-Cps<EOL>x = exp(- Hm/R/T*(<NUM_LIT:1>-T/Tm) + dCp*(Tm-T)/R/T - dCp/R*log(Tm/T))/gamma<EOL>return x<EOL>", "docstring": "r'''Returns the maximum solubility of a solute in a solvent.\n\n    .. math::\n        \\ln x_i^L \\gamma_i^L = \\frac{\\Delta H_{m,i}}{RT}\\left(\n        1 - \\frac{T}{T_{m,i}}\\right) - \\frac{\\Delta C_{p,i}(T_{m,i}-T)}{RT}\n        + \\frac{\\Delta C_{p,i}}{R}\\ln\\frac{T_m}{T}\n\n        \\Delta C_{p,i} = C_{p,i}^L - C_{p,i}^S\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the system [K]\n    Tm : float\n        Melting temperature of the solute [K]\n    Hm : float\n        Heat of melting at the melting temperature of the solute [J/mol]\n    Cpl : float, optional\n        Molar heat capacity of the solute as a liquid [J/mol/K]\n    Cpls: float, optional\n        Molar heat capacity of the solute as a solid [J/mol/K]\n    gamma : float, optional\n        Activity coefficient of the solute as a liquid [-]\n\n    Returns\n    -------\n    x : float\n        Mole fraction of solute at maximum solubility [-]\n\n    Notes\n    -----\n    gamma is of the solute in liquid phase\n\n    Examples\n    --------\n    From [1]_, matching example\n\n    >>> solubility_eutectic(T=260., Tm=278.68, Hm=9952., Cpl=0, Cps=0, gamma=3.0176)\n    0.24340068761677464\n\n    References\n    ----------\n    .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.\n       Weinheim, Germany: Wiley-VCH, 2012.", "id": "f15775:m1"}
{"signature": "def solubility_parameter(T=<NUM_LIT>, Hvapm=None, Vml=None,<EOL>CASRN='<STR_LIT>', AvailableMethods=False, Method=None):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if T and Hvapm and Vml:<EOL><INDENT>methods.append(DEFINITION)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == DEFINITION:<EOL><INDENT>if (not Hvapm) or (not T) or (not Vml):<EOL><INDENT>delta = None<EOL><DEDENT>else:<EOL><INDENT>if Hvapm < R*T or Vml < <NUM_LIT:0>:  <EOL><INDENT>delta = None<EOL><DEDENT>else:<EOL><INDENT>delta = ((Hvapm - R*T)/Vml)**<NUM_LIT:0.5><EOL><DEDENT><DEDENT><DEDENT>elif Method == NONE:<EOL><INDENT>delta = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return delta<EOL>", "docstring": "r'''This function handles the calculation of a chemical's solubility\n    parameter. Calculation is a function of temperature, but is not always\n    presented as such. No lookup values are available; either `Hvapm`, `Vml`,\n    and `T` are provided or the calculation cannot be performed.\n\n    .. math::\n        \\delta = \\sqrt{\\frac{\\Delta H_{vap} - RT}{V_m}}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [k]\n    Hvapm : float\n        Heat of vaporization [J/mol/K]\n    Vml : float\n        Specific volume of the liquid [m^3/mol]\n    CASRN : str, optional\n        CASRN of the fluid, not currently used [-]\n\n    Returns\n    -------\n    delta : float\n        Solubility parameter, [Pa^0.5]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain the solubility parameter\n        with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        A string for the method name to use, as defined by constants in\n        solubility_parameter_methods\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        the solubility parameter for the desired chemical, and will return\n        methods instead of the solubility parameter\n\n    Notes\n    -----\n    Undefined past the critical point. For convenience, if Hvap is not defined,\n    an error is not raised; None is returned instead. Also for convenience,\n    if Hvapm is less than RT, None is returned to avoid taking the root of a\n    negative number.\n\n    This parameter is often given in units of cal/ml, which is 2045.48 times\n    smaller than the value returned here.\n\n    Examples\n    --------\n    Pentane at STP\n\n    >>> solubility_parameter(T=298.2, Hvapm=26403.3, Vml=0.000116055)\n    14357.681538173534\n\n    References\n    ----------\n    .. [1] Barton, Allan F. M. CRC Handbook of Solubility Parameters and Other\n       Cohesion Parameters, Second Edition. CRC Press, 1991.", "id": "f15775:m0"}
{"signature": "def load_all_methods(self):", "body": "methods = []<EOL>Tmins, Tmaxs = [], []<EOL>if self.CASRN in CRC_Permittivity_data.index:<EOL><INDENT>methods.append(CRC_CONSTANT)<EOL>_, self.CRC_CONSTANT_T, self.CRC_permittivity, A, B, C, D, Tmin, Tmax = _CRC_Permittivity_data_values[CRC_Permittivity_data.index.get_loc(self.CASRN)].tolist()<EOL>self.CRC_Tmin = Tmin<EOL>self.CRC_Tmax = Tmax<EOL>self.CRC_coeffs = [<NUM_LIT:0> if np.isnan(x) else x for x in [A, B, C, D] ]<EOL>if not np.isnan(Tmin):<EOL><INDENT>Tmins.append(Tmin); Tmaxs.append(Tmax)<EOL><DEDENT>if self.CRC_coeffs[<NUM_LIT:0>]:<EOL><INDENT>methods.append(CRC)<EOL><DEDENT><DEDENT>self.all_methods = set(methods)<EOL>if Tmins and Tmaxs:<EOL><INDENT>self.Tmin = min(Tmins)<EOL>self.Tmax = max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method which picks out coefficients for the specified chemical\n        from the various dictionaries and DataFrames storing it. All data is\n        stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,\n        and :obj:`all_methods` as a set of methods for which the data exists for.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15777:c0:m1"}
{"signature": "def VDI_tabular_data(CASRN, prop):", "body": "try:<EOL><INDENT>d = _VDISaturationDict[CASRN]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>props, Ts = d[prop], d['<STR_LIT:T>']<EOL><DEDENT>except:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>Ts = [T for p, T in zip(props, Ts) if p]<EOL>props = [p for p in props if p]<EOL>if prop == '<STR_LIT>':<EOL><INDENT>Ts.append(d['<STR_LIT>'])<EOL>props.append(<NUM_LIT:0>)<EOL><DEDENT>return Ts, props<EOL>", "docstring": "r'''This function retrieves the tabular data available for a given chemical\n    and a given property. Lookup is based on CASRNs. Length of data returned\n    varies between chemicals. All data is at saturation condition from [1]_.\n\n    Function has data for 58 chemicals.\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n    prop : string\n        Property [-]\n\n    Returns\n    -------\n    Ts : list\n        Temperatures where property data is available, [K]\n    props : list\n        Properties at each temperature, [various]\n\n    Notes\n    -----\n    The available properties are 'P', 'Density (l)', 'Density (g)', 'Hvap',\n    'Cp (l)', 'Cp (g)', 'Mu (l)', 'Mu (g)', 'K (l)', 'K (g)', 'Pr (l)',\n    'Pr (g)', 'sigma', 'Beta', 'Volume (l)', and 'Volume (g)'.\n\n    Data is available for all properties and all chemicals; surface tension\n    data was missing for mercury, but added as estimated from the a/b\n    coefficients listed in Jasper (1972) to simplify the function.\n\n    Examples\n    --------\n    >>> VDI_tabular_data('67-56-1', 'Mu (g)')\n    ([337.63, 360.0, 385.0, 410.0, 435.0, 460.0, 500.0], [1.11e-05, 1.18e-05, 1.27e-05, 1.36e-05, 1.46e-05, 1.59e-05, 2.04e-05])\n\n    References\n    ----------\n    .. [1] Gesellschaft, VDI, ed. VDI Heat Atlas. 2E. Berlin\u202f: Springer, 2010.", "id": "f15778:m0"}
{"signature": "def Rackett(T, Tc, Pc, Zc):", "body": "return R*Tc/Pc*Zc**(<NUM_LIT:1> + (<NUM_LIT:1> - T/Tc)**(<NUM_LIT:2>/<NUM_LIT>))<EOL>", "docstring": "r'''Calculates saturation liquid volume, using Rackett CSP method and\n    critical properties.\n\n    The molar volume of a liquid is given by:\n\n    .. math::\n        V_s = \\frac{RT_c}{P_c}{Z_c}^{[1+(1-{T/T_c})^{2/7} ]}\n\n    Units are all currently in m^3/mol - this can be changed to kg/m^3\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    Zc : float\n        Critical compressibility of fluid, [-]\n\n    Returns\n    -------\n    Vs : float\n        Saturation liquid volume, [m^3/mol]\n\n    Notes\n    -----\n    Units are dependent on gas constant R, imported from scipy\n    According to Reid et. al, underpredicts volume for compounds with Zc < 0.22\n\n    Examples\n    --------\n    Propane, example from the API Handbook\n\n    >>> Vm_to_rho(Rackett(272.03889, 369.83, 4248000.0, 0.2763), 44.09562)\n    531.3223212651092\n\n    References\n    ----------\n    .. [1] Rackett, Harold G. \"Equation of State for Saturated Liquids.\"\n       Journal of Chemical & Engineering Data 15, no. 4 (1970): 514-517.\n       doi:10.1021/je60047a012", "id": "f15779:m1"}
{"signature": "def calculate_P(self, T, P, method):", "body": "if method == EOS:<EOL><INDENT>self.eos[<NUM_LIT:0>] = self.eos[<NUM_LIT:0>].to_TP(T=T, P=P)<EOL>Vm = self.eos[<NUM_LIT:0>].V_g<EOL><DEDENT>elif method == TSONOPOULOS_EXTENDED:<EOL><INDENT>B = BVirial_Tsonopoulos_extended(T, self.Tc, self.Pc, self.omega, dipole=self.dipole)<EOL>Vm = ideal_gas(T, P) + B<EOL><DEDENT>elif method == TSONOPOULOS:<EOL><INDENT>B = BVirial_Tsonopoulos(T, self.Tc, self.Pc, self.omega)<EOL>Vm = ideal_gas(T, P) + B<EOL><DEDENT>elif method == ABBOTT:<EOL><INDENT>B = BVirial_Abbott(T, self.Tc, self.Pc, self.omega)<EOL>Vm = ideal_gas(T, P) + B<EOL><DEDENT>elif method == PITZER_CURL:<EOL><INDENT>B = BVirial_Pitzer_Curl(T, self.Tc, self.Pc, self.omega)<EOL>Vm = ideal_gas(T, P) + B<EOL><DEDENT>elif method == CRC_VIRIAL:<EOL><INDENT>a1, a2, a3, a4, a5 = self.CRC_VIRIAL_coeffs<EOL>t = <NUM_LIT>/T - <NUM_LIT:1.><EOL>B = (a1 + a2*t + a3*t**<NUM_LIT:2> + a4*t**<NUM_LIT:3> + a5*t**<NUM_LIT:4>)/<NUM_LIT><EOL>Vm = ideal_gas(T, P) + B<EOL><DEDENT>elif method == IDEAL:<EOL><INDENT>Vm = ideal_gas(T, P)<EOL><DEDENT>elif method == COOLPROP:<EOL><INDENT>Vm = <NUM_LIT:1.>/PropsSI('<STR_LIT>', '<STR_LIT:T>', T, '<STR_LIT:P>', P, self.CASRN)<EOL><DEDENT>elif method in self.tabular_data:<EOL><INDENT>Vm = self.interpolate_P(T, P, method)<EOL><DEDENT>return Vm<EOL>", "docstring": "r'''Method to calculate pressure-dependent gas molar volume at\n        temperature `T` and pressure `P` with a given method.\n\n        This method has no exception handling; see `TP_dependent_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate molar volume, [K]\n        P : float\n            Pressure at which to calculate molar volume, [K]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        Vm : float\n            Molar volume of the gas at T and P, [m^3/mol]", "id": "f15779:c2:m2"}
{"signature": "def Campbell_Thodos(T, Tb, Tc, Pc, M, dipole=None, hydroxyl=False):", "body": "Tr = T/Tc<EOL>Tbr = Tb/Tc<EOL>Pc = Pc/<NUM_LIT><EOL>s = Tbr * log(Pc)/(<NUM_LIT:1>-Tbr)<EOL>Lambda = Pc**(<NUM_LIT:1>/<NUM_LIT>)/(M**<NUM_LIT:0.5>*Tc**(<NUM_LIT:5>/<NUM_LIT>))<EOL>alpha = <NUM_LIT> - <NUM_LIT>*s<EOL>beta = <NUM_LIT>*s - <NUM_LIT> + <NUM_LIT>*Lambda**(<NUM_LIT>)<EOL>if dipole:<EOL><INDENT>theta = Pc*dipole**<NUM_LIT:2>/Tc**<NUM_LIT:2><EOL>alpha -= <NUM_LIT> * theta**<NUM_LIT><EOL>beta += <NUM_LIT> * theta**<NUM_LIT><EOL><DEDENT>if hydroxyl:<EOL><INDENT>beta = <NUM_LIT>*s - <NUM_LIT> + <NUM_LIT>*Lambda**(<NUM_LIT>) + <NUM_LIT>*theta**<NUM_LIT><EOL>alpha = (<NUM_LIT>*Tbr - <NUM_LIT> + <NUM_LIT>/Tbr**<NUM_LIT>)*Pc**<NUM_LIT><EOL><DEDENT>Zra = alpha + beta*(<NUM_LIT:1>-Tr)<EOL>Vs = R*Tc/(Pc*<NUM_LIT>)*Zra**(<NUM_LIT:1>+(<NUM_LIT:1>-Tr)**(<NUM_LIT:2>/<NUM_LIT>))<EOL>return Vs<EOL>", "docstring": "r'''Calculate saturation liquid density using the Campbell-Thodos [1]_\n    CSP method.\n\n    An old and uncommon estimation method.\n\n    .. math::\n        V_s = \\frac{RT_c}{P_c}{Z_{RA}}^{[1+(1-T_r)^{2/7}]}\n\n        Z_{RA} = \\alpha + \\beta(1-T_r)\n\n        \\alpha = 0.3883-0.0179s\n\n        s = T_{br} \\frac{\\ln P_c}{(1-T_{br})}\n\n        \\beta = 0.00318s-0.0211+0.625\\Lambda^{1.35}\n\n        \\Lambda = \\frac{P_c^{1/3}} { M^{1/2} T_c^{5/6}}\n\n    For polar compounds:\n\n    .. math::\n        \\theta = P_c \\mu^2/T_c^2\n\n        \\alpha = 0.3883 - 0.0179s - 130540\\theta^{2.41}\n\n        \\beta = 0.00318s - 0.0211 + 0.625\\Lambda^{1.35} + 9.74\\times\n        10^6 \\theta^{3.38}\n\n    Polar Combounds with hydroxyl groups (water, alcohols)\n\n    .. math::\n        \\alpha = \\left[0.690T_{br} -0.3342 + \\frac{5.79\\times 10^{-10}}\n        {T_{br}^{32.75}}\\right] P_c^{0.145}\n\n        \\beta = 0.00318s - 0.0211 + 0.625 \\Lambda^{1.35} + 5.90\\Theta^{0.835}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tb : float\n        Boiling temperature of the fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    M : float\n        Molecular weight of the fluid [g/mol]\n    dipole : float, optional\n        Dipole moment of the fluid [debye]\n    hydroxyl : bool, optional\n        Swith to use the hydroxyl variant for polar fluids\n\n    Returns\n    -------\n    Vs : float\n        Saturation liquid volume\n\n    Notes\n    -----\n    If a dipole is provided, the polar chemical method is used.\n    The paper is an excellent read.\n    Pc is internally converted to atm.\n\n    Examples\n    --------\n    Ammonia, from [1]_.\n\n    >>> Campbell_Thodos(T=405.45, Tb=239.82, Tc=405.45, Pc=111.7*101325, M=17.03, dipole=1.47)\n    7.347363635885525e-05\n\n    References\n    ----------\n    .. [1] Campbell, Scott W., and George Thodos. \"Prediction of Saturated\n       Liquid Densities and Critical Volumes for Polar and Nonpolar\n       Substances.\" Journal of Chemical & Engineering Data 30, no. 1\n       (January 1, 1985): 102-11. doi:10.1021/je00039a032.", "id": "f15779:m6"}
{"signature": "def calculate(self, T, method):", "body": "if method == CRC_INORG_S:<EOL><INDENT>Vms = self.CRC_INORG_S_Vm<EOL>", "docstring": "r'''Method to calculate the molar volume of a solid at tempearture `T`\n        with a given method.\n\n        This method has no exception handling; see `T_dependent_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate molar volume, [K]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        Vms : float\n            Molar volume of the solid at T, [m^3/mol]", "id": "f15779:c4:m2"}
{"signature": "def load_all_methods(self):", "body": "methods = [SIMPLE]     <EOL>self.all_methods = set(methods)<EOL>", "docstring": "r'''Method to initialize the object by precomputing any values which\n        may be used repeatedly and by retrieving mixture-specific variables.\n        All data are stored as attributes. This method also sets :obj:`Tmin`, \n        :obj:`Tmax`, and :obj:`all_methods` as a set of methods which should \n        work to calculate the property.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15779:c5:m1"}
{"signature": "def Yamada_Gunn(T, Tc, Pc, omega):", "body": "return R*Tc/Pc*(<NUM_LIT> - <NUM_LIT>*omega)**(<NUM_LIT:1> + (<NUM_LIT:1> - T/Tc)**(<NUM_LIT:2>/<NUM_LIT>))<EOL>", "docstring": "r'''Calculates saturation liquid volume, using Yamada and Gunn CSP method\n    and a chemical's critical properties and acentric factor.\n\n    The molar volume of a liquid is given by:\n\n    .. math::\n        V_s = \\frac{RT_c}{P_c}{(0.29056-0.08775\\omega)}^{[1+(1-{T/T_c})^{2/7}]}\n\n    Units are in m^3/mol.\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    omega : float\n        Acentric factor for fluid, [-]\n\n    Returns\n    -------\n    Vs : float\n        saturation liquid volume, [m^3/mol]\n\n    Notes\n    -----\n    This equation is an improvement on the Rackett equation.\n    This is often presented as the Rackett equation.\n    The acentric factor is used here, instead of the critical compressibility\n    A variant using a reference fluid also exists\n\n    Examples\n    --------\n    >>> Yamada_Gunn(300, 647.14, 22048320.0, 0.245)\n    2.1882836429895796e-05\n\n    References\n    ----------\n    .. [1] Gunn, R. D., and Tomoyoshi Yamada. \"A Corresponding States\n        Correlation of Saturated Liquid Volumes.\" AIChE Journal 17, no. 6\n        (1971): 1341-45. doi:10.1002/aic.690170613\n    .. [2] Yamada, Tomoyoshi, and Robert D. Gunn. \"Saturated Liquid Molar\n        Volumes. Rackett Equation.\" Journal of Chemical & Engineering Data 18,\n        no. 2 (1973): 234-36. doi:10.1021/je60057a006", "id": "f15779:m2"}
{"signature": "def ideal_gas(T, P):", "body": "return R*T/P<EOL>", "docstring": "r'''Calculates ideal gas molar volume.\n    The molar volume of an ideal gas is given by:\n\n    .. math::\n        V = \\frac{RT}{P}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    P : float\n        Pressure of fluid [Pa]\n\n    Returns\n    -------\n    V : float\n        Gas volume, [m^3/mol]\n\n    Examples\n    --------\n    >>> ideal_gas(298.15, 101325.)\n    0.02446539540458919", "id": "f15779:m13"}
{"signature": "def COSTALD(T, Tc, Vc, omega):", "body": "Tr = T/Tc<EOL>V_delta = (-<NUM_LIT> + <NUM_LIT>*Tr - <NUM_LIT>*Tr**<NUM_LIT:2><EOL>- <NUM_LIT>*Tr**<NUM_LIT:3>)/(Tr - <NUM_LIT>)<EOL>V_0 = <NUM_LIT:1> - <NUM_LIT>*(<NUM_LIT:1>-Tr)**(<NUM_LIT:1>/<NUM_LIT>) + <NUM_LIT>*(<NUM_LIT:1>-Tr)**(<NUM_LIT:2>/<NUM_LIT>)- <NUM_LIT>*(<NUM_LIT:1>-Tr) + <NUM_LIT>*(<NUM_LIT:1>-Tr)**(<NUM_LIT:4>/<NUM_LIT>)<EOL>return Vc*V_0*(<NUM_LIT:1>-omega*V_delta)<EOL>", "docstring": "r'''Calculate saturation liquid density using the COSTALD CSP method.\n\n    A popular and accurate estimation method. If possible, fit parameters are\n    used; alternatively critical properties work well.\n\n    The density of a liquid is given by:\n\n    .. math::\n        V_s=V^*V^{(0)}[1-\\omega_{SRK}V^{(\\delta)}]\n\n        V^{(0)}=1-1.52816(1-T_r)^{1/3}+1.43907(1-T_r)^{2/3}\n        - 0.81446(1-T_r)+0.190454(1-T_r)^{4/3}\n\n        V^{(\\delta)}=\\frac{-0.296123+0.386914T_r-0.0427258T_r^2-0.0480645T_r^3}\n        {T_r-1.00001}\n\n    Units are that of critical or fit constant volume.\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Vc : float\n        Critical volume of fluid [m^3/mol].\n        This parameter is alternatively a fit parameter\n    omega : float\n        (ideally SRK) Acentric factor for fluid, [-]\n        This parameter is alternatively a fit parameter.\n\n    Returns\n    -------\n    Vs : float\n        Saturation liquid volume\n\n    Notes\n    -----\n    196 constants are fit to this function in [1]_.\n    Range: 0.25 < Tr < 0.95, often said to be to 1.0\n\n    This function has been checked with the API handbook example problem.\n\n    Examples\n    --------\n    Propane, from an example in the API Handbook\n\n    >>> Vm_to_rho(COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532), 44.097)\n    530.3009967969841\n\n\n    References\n    ----------\n    .. [1] Hankinson, Risdon W., and George H. Thomson. \"A New Correlation for\n       Saturated Densities of Liquids and Their Mixtures.\" AIChE Journal\n       25, no. 4 (1979): 653-663. doi:10.1002/aic.690250412", "id": "f15779:m5"}
{"signature": "def load_all_methods(self):", "body": "methods = []<EOL>methods_P = []<EOL>Tmins, Tmaxs = [], []<EOL>if has_CoolProp and self.CASRN in coolprop_dict:<EOL><INDENT>methods.append(COOLPROP); methods_P.append(COOLPROP)<EOL>self.CP_f = coolprop_fluids[self.CASRN]<EOL>Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc)<EOL><DEDENT>if self.CASRN in CRC_inorg_l_data.index:<EOL><INDENT>methods.append(CRC_INORG_L)<EOL>_, self.CRC_INORG_L_MW, self.CRC_INORG_L_rho, self.CRC_INORG_L_k, self.CRC_INORG_L_Tm, self.CRC_INORG_L_Tmax = _CRC_inorg_l_data_values[CRC_inorg_l_data.index.get_loc(self.CASRN)].tolist()<EOL>Tmins.append(self.CRC_INORG_L_Tm); Tmaxs.append(self.CRC_INORG_L_Tmax)<EOL><DEDENT>if self.CASRN in Perry_l_data.index:<EOL><INDENT>methods.append(PERRYDIPPR)<EOL>_, C1, C2, C3, C4, self.DIPPR_Tmin, self.DIPPR_Tmax = _Perry_l_data_values[Perry_l_data.index.get_loc(self.CASRN)].tolist()<EOL>self.DIPPR_coeffs = [C1, C2, C3, C4]<EOL>Tmins.append(self.DIPPR_Tmin); Tmaxs.append(self.DIPPR_Tmax)<EOL><DEDENT>if self.CASRN in VDI_PPDS_2.index:<EOL><INDENT>methods.append(VDI_PPDS)<EOL>_, MW, Tc, rhoc, A, B, C, D = _VDI_PPDS_2_values[VDI_PPDS_2.index.get_loc(self.CASRN)].tolist()<EOL>self.VDI_PPDS_coeffs = [A, B, C, D]<EOL>self.VDI_PPDS_MW = MW<EOL>self.VDI_PPDS_Tc = Tc<EOL>self.VDI_PPDS_rhoc = rhoc<EOL>Tmaxs.append(self.VDI_PPDS_Tc)<EOL><DEDENT>if self.CASRN in _VDISaturationDict:<EOL><INDENT>methods.append(VDI_TABULAR)<EOL>Ts, props = VDI_tabular_data(self.CASRN, '<STR_LIT>')<EOL>self.VDI_Tmin = Ts[<NUM_LIT:0>]<EOL>self.VDI_Tmax = Ts[-<NUM_LIT:1>]<EOL>self.tabular_data[VDI_TABULAR] = (Ts, props)<EOL>Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)<EOL><DEDENT>if self.Tc and self.CASRN in COSTALD_data.index:<EOL><INDENT>methods.append(HTCOSTALDFIT)<EOL>self.COSTALD_Vchar = float(COSTALD_data.at[self.CASRN, '<STR_LIT>'])<EOL>self.COSTALD_omega_SRK = float(COSTALD_data.at[self.CASRN, '<STR_LIT>'])<EOL>Tmins.append(<NUM_LIT:0>); Tmaxs.append(self.Tc)<EOL><DEDENT>if self.Tc and self.Pc and self.CASRN in COSTALD_data.index and not np.isnan(COSTALD_data.at[self.CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(RACKETTFIT)<EOL>self.RACKETT_Z_RA = float(COSTALD_data.at[self.CASRN, '<STR_LIT>'])<EOL>Tmins.append(<NUM_LIT:0>); Tmaxs.append(self.Tc)<EOL><DEDENT>if self.CASRN in CRC_inorg_l_const_data.index:<EOL><INDENT>methods.append(CRC_INORG_L_CONST)<EOL>self.CRC_INORG_L_CONST_Vm = float(CRC_inorg_l_const_data.at[self.CASRN, '<STR_LIT>'])<EOL><DEDENT>if all((self.Tc, self.Vc, self.Zc)):<EOL><INDENT>methods.append(YEN_WOODS_SAT)<EOL>Tmins.append(<NUM_LIT:0>); Tmaxs.append(self.Tc)<EOL><DEDENT>if all((self.Tc, self.Pc, self.Zc)):<EOL><INDENT>methods.append(RACKETT)<EOL>Tmins.append(<NUM_LIT:0>); Tmaxs.append(self.Tc)<EOL><DEDENT>if all((self.Tc, self.Pc, self.omega)):<EOL><INDENT>methods.append(YAMADA_GUNN)<EOL>methods.append(BHIRUD_NORMAL)<EOL>Tmins.append(<NUM_LIT:0>); Tmaxs.append(self.Tc)<EOL><DEDENT>if all((self.Tc, self.Vc, self.omega)):<EOL><INDENT>methods.append(TOWNSEND_HALES)<EOL>methods.append(HTCOSTALD)<EOL>methods.append(MMSNM0)<EOL>if self.CASRN in SNM0_data.index:<EOL><INDENT>methods.append(MMSNM0FIT)<EOL>self.SNM0_delta_SRK = float(SNM0_data.at[self.CASRN, '<STR_LIT>'])<EOL><DEDENT>Tmins.append(<NUM_LIT:0>); Tmaxs.append(self.Tc)<EOL><DEDENT>if all((self.Tc, self.Vc, self.omega, self.Tb, self.MW)):<EOL><INDENT>methods.append(CAMPBELL_THODOS)<EOL>Tmins.append(<NUM_LIT:0>); Tmaxs.append(self.Tc)<EOL><DEDENT>if all((self.Tc, self.Pc, self.omega)):<EOL><INDENT>methods_P.append(COSTALD_COMPRESSED)<EOL>if self.eos:<EOL><INDENT>methods_P.append(EOS)<EOL><DEDENT><DEDENT>if Tmins and Tmaxs:<EOL><INDENT>self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)<EOL><DEDENT>self.all_methods = set(methods)<EOL>self.all_methods_P = set(methods_P)<EOL>", "docstring": "r'''Method which picks out coefficients for the specified chemical\n        from the various dictionaries and DataFrames storing it. All data is\n        stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,\n        :obj:`all_methods` and obj:`all_methods_P` as a set of methods for\n        which the data exists for.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15779:c0:m1"}
{"signature": "def solve_T(self, P, V, quick=True):", "body": "<EOL><INDENT>return super(PR, self).solve_T(P, V, quick=quick) <EOL><DEDENT>Tc, a, b, kappa0, kappa1, kappa2, kappa3 = self.Tc, self.a, self.b, self.kappa0, self.kappa1, self.kappa2, self.kappa3<EOL>if quick:<EOL><INDENT>x0 = V - b<EOL>R_x0 = R/x0<EOL>x5 = (<NUM_LIT>*(V*(V + b) + b*x0))<EOL>x4 = <NUM_LIT>*kappa0<EOL>def to_solve(T):<EOL><INDENT>x1 = T/Tc<EOL>x2 = x1**<NUM_LIT:0.5><EOL>x3 = x2 - <NUM_LIT:1.><EOL>return (R_x0*T - a*(x3*(x4 - (kappa1 + kappa2*x3*(-kappa3 + x1))*(<NUM_LIT>*x1 - <NUM_LIT>)*(x2 + <NUM_LIT:1.>)) - <NUM_LIT>)**<NUM_LIT:2>/x5) - P<EOL><DEDENT><DEDENT>else:<EOL><INDENT>def to_solve(T):<EOL><INDENT>P_calc = R*T/(V - b) - a*((kappa0 + (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>))*(-sqrt(T/Tc) + <NUM_LIT:1>) + <NUM_LIT:1>)**<NUM_LIT:2>/(V*(V + b) + b*(V - b))<EOL>return P_calc - P<EOL><DEDENT><DEDENT>return newton(to_solve, Tc*<NUM_LIT:0.5>)<EOL>", "docstring": "r'''Method to calculate `T` from a specified `P` and `V` for the PRSV2\n        EOS. Uses `Tc`, `a`, `b`, `kappa0`, `kappa1`, `kappa2`, and `kappa3`\n        as well, obtained from the class's namespace.\n\n        Parameters\n        ----------\n        P : float\n            Pressure, [Pa]\n        V : float\n            Molar volume, [m^3/mol]\n        quick : bool, optional\n            Whether to use a SymPy cse-derived expression (somewhat faster) or \n            individual formulas.\n\n        Returns\n        -------\n        T : float\n            Temperature, [K]\n\n        Notes\n        -----\n        Not guaranteed to produce a solution. There are actually 8 solutions,\n        six with an imaginary component at a tested point. The two temperature\n        solutions are quite far apart, with one much higher than the other;\n        it is possible the solver could converge on the higher solution, so use\n        `T` inputs with care. This extra solution is a perfectly valid one\n        however.", "id": "f15780:c6:m1"}
{"signature": "def solve_T(self, P, V, quick=True):", "body": "a, b, Tc, m = self.a, self.b, self.Tc, self.m<EOL>if quick:<EOL><INDENT>x0 = R*Tc<EOL>x1 = V*b<EOL>x2 = x0*x1<EOL>x3 = V*V<EOL>x4 = x0*x3<EOL>x5 = m*m<EOL>x6 = a*x5<EOL>x7 = b*x6<EOL>x8 = V*x6<EOL>x9 = (x2 + x4 + x7 - x8)**<NUM_LIT:2><EOL>x10 = x3*x3<EOL>x11 = R*R*Tc*Tc<EOL>x12 = a*a<EOL>x13 = x5*x5<EOL>x14 = x12*x13<EOL>x15 = b*b<EOL>x16 = x3*V<EOL>x17 = a*x0<EOL>x18 = x17*x5<EOL>x19 = <NUM_LIT>*b*x16<EOL>x20 = -<NUM_LIT>*V*b*x14 + <NUM_LIT>*V*x15*x18 + x10*x11 + x11*x15*x3 + x11*x19 + x14*x15 + x14*x3 - <NUM_LIT:2>*x16*x18<EOL>x21 = V - b<EOL>x22 = <NUM_LIT:2>*m*x17<EOL>x23 = P*x4<EOL>x24 = P*x8<EOL>x25 = x1*x17<EOL>x26 = P*R*Tc<EOL>x27 = x17*x3<EOL>x28 = V*x12<EOL>x29 = <NUM_LIT>*m*m*m<EOL>x30 = b*x12<EOL>return -Tc*(<NUM_LIT>*a*m*x9*(V*x21*x21*x21*(V + b)*(P*x2 + P*x7 + x17 + x18 + x22 + x23 - x24))**<NUM_LIT:0.5>*(m + <NUM_LIT:1.>) - x20*x21*(-P*x16*x6 + x1*x22 + x10*x26 + x13*x28 - x13*x30 + x15*x23 + x15*x24 + x19*x26 + x22*x3 + x25*x5 + x25 + x27*x5 + x27 + x28*x29 + x28*x5 - x29*x30 - x30*x5))/(x20*x9)<EOL><DEDENT>else:<EOL><INDENT>return Tc*(-<NUM_LIT:2>*a*m*sqrt(V*(V - b)**<NUM_LIT:3>*(V + b)*(P*R*Tc*V**<NUM_LIT:2> + P*R*Tc*V*b - P*V*a*m**<NUM_LIT:2> + P*a*b*m**<NUM_LIT:2> + R*Tc*a*m**<NUM_LIT:2> + <NUM_LIT:2>*R*Tc*a*m + R*Tc*a))*(m + <NUM_LIT:1>)*(R*Tc*V**<NUM_LIT:2> + R*Tc*V*b - V*a*m**<NUM_LIT:2> + a*b*m**<NUM_LIT:2>)**<NUM_LIT:2> + (V - b)*(R**<NUM_LIT:2>*Tc**<NUM_LIT:2>*V**<NUM_LIT:4> + <NUM_LIT:2>*R**<NUM_LIT:2>*Tc**<NUM_LIT:2>*V**<NUM_LIT:3>*b + R**<NUM_LIT:2>*Tc**<NUM_LIT:2>*V**<NUM_LIT:2>*b**<NUM_LIT:2> - <NUM_LIT:2>*R*Tc*V**<NUM_LIT:3>*a*m**<NUM_LIT:2> + <NUM_LIT:2>*R*Tc*V*a*b**<NUM_LIT:2>*m**<NUM_LIT:2> + V**<NUM_LIT:2>*a**<NUM_LIT:2>*m**<NUM_LIT:4> - <NUM_LIT:2>*V*a**<NUM_LIT:2>*b*m**<NUM_LIT:4> + a**<NUM_LIT:2>*b**<NUM_LIT:2>*m**<NUM_LIT:4>)*(P*R*Tc*V**<NUM_LIT:4> + <NUM_LIT:2>*P*R*Tc*V**<NUM_LIT:3>*b + P*R*Tc*V**<NUM_LIT:2>*b**<NUM_LIT:2> - P*V**<NUM_LIT:3>*a*m**<NUM_LIT:2> + P*V*a*b**<NUM_LIT:2>*m**<NUM_LIT:2> + R*Tc*V**<NUM_LIT:2>*a*m**<NUM_LIT:2> + <NUM_LIT:2>*R*Tc*V**<NUM_LIT:2>*a*m + R*Tc*V**<NUM_LIT:2>*a + R*Tc*V*a*b*m**<NUM_LIT:2> + <NUM_LIT:2>*R*Tc*V*a*b*m + R*Tc*V*a*b + V*a**<NUM_LIT:2>*m**<NUM_LIT:4> + <NUM_LIT:2>*V*a**<NUM_LIT:2>*m**<NUM_LIT:3> + V*a**<NUM_LIT:2>*m**<NUM_LIT:2> - a**<NUM_LIT:2>*b*m**<NUM_LIT:4> - <NUM_LIT:2>*a**<NUM_LIT:2>*b*m**<NUM_LIT:3> - a**<NUM_LIT:2>*b*m**<NUM_LIT:2>))/((R*Tc*V**<NUM_LIT:2> + R*Tc*V*b - V*a*m**<NUM_LIT:2> + a*b*m**<NUM_LIT:2>)**<NUM_LIT:2>*(R**<NUM_LIT:2>*Tc**<NUM_LIT:2>*V**<NUM_LIT:4> + <NUM_LIT:2>*R**<NUM_LIT:2>*Tc**<NUM_LIT:2>*V**<NUM_LIT:3>*b + R**<NUM_LIT:2>*Tc**<NUM_LIT:2>*V**<NUM_LIT:2>*b**<NUM_LIT:2> - <NUM_LIT:2>*R*Tc*V**<NUM_LIT:3>*a*m**<NUM_LIT:2> + <NUM_LIT:2>*R*Tc*V*a*b**<NUM_LIT:2>*m**<NUM_LIT:2> + V**<NUM_LIT:2>*a**<NUM_LIT:2>*m**<NUM_LIT:4> - <NUM_LIT:2>*V*a**<NUM_LIT:2>*b*m**<NUM_LIT:4> + a**<NUM_LIT:2>*b**<NUM_LIT:2>*m**<NUM_LIT:4>))<EOL><DEDENT>", "docstring": "r'''Method to calculate `T` from a specified `P` and `V` for the SRK\n        EOS. Uses `a`, `b`, and `Tc` obtained from the class's namespace.\n\n        Parameters\n        ----------\n        P : float\n            Pressure, [Pa]\n        V : float\n            Molar volume, [m^3/mol]\n        quick : bool, optional\n            Whether to use a SymPy cse-derived expression (3x faster) or \n            individual formulas\n\n        Returns\n        -------\n        T : float\n            Temperature, [K]\n\n        Notes\n        -----\n        The exact solution can be derived as follows; it is excluded for \n        breviety.\n\n        >>> from sympy import *\n        >>> P, T, V, R, a, b, m = symbols('P, T, V, R, a, b, m')\n        >>> Tc, Pc, omega = symbols('Tc, Pc, omega')\n        >>> a_alpha = a*(1 + m*(1-sqrt(T/Tc)))**2\n        >>> SRK = R*T/(V-b) - a_alpha/(V*(V+b)) - P\n        >>> # solve(SRK, T)", "id": "f15780:c9:m2"}
{"signature": "def Psat(self, T, polish=False):", "body": "alpha = self.a_alpha_and_derivatives(T, full=False)/self.a<EOL>Tr = T/self.Tc<EOL>x = alpha/Tr - <NUM_LIT:1.><EOL>c = self.Psat_coeffs_limiting if Tr < <NUM_LIT> else self.Psat_coeffs<EOL>y = horner(c, x)<EOL>try:<EOL><INDENT>Psat = exp(y)*Tr*self.Pc<EOL><DEDENT>except OverflowError:<EOL><INDENT>polish = False<EOL>Psat = <NUM_LIT:0><EOL><DEDENT>if polish:<EOL><INDENT>def to_solve(P):<EOL><INDENT>e = self.__class__(Tc=self.Tc, Pc=self.Pc, omega=self.omega, T=T, P=P)<EOL>err = e.fugacity_l - e.fugacity_g<EOL>return err<EOL><DEDENT>Psat = newton(to_solve, Psat)<EOL><DEDENT>return Psat<EOL>", "docstring": "r'''Generic method to calculate vapor pressure for a specified `T`.\n\n        From Tc to 0.32Tc, uses a 10th order polynomial of the following form:\n\n        .. math::\n            \\ln\\frac{P_r}{T_r} = \\sum_{k=0}^{10} C_k\\left(\\frac{\\alpha}{T_r}\n            -1\\right)^{k}\n\n        If `polish` is True, SciPy's `newton` solver is launched with the \n        calculated vapor pressure as an initial guess in an attempt to get more\n        accuracy. This may not converge however.\n\n        Results above the critical temperature are meaningless. A first-order \n        polynomial is used to extrapolate under 0.32 Tc; however, there is \n        normally not a volume solution to the EOS which can produce that\n        low of a pressure.\n\n        Parameters\n        ----------\n        T : float\n            Temperature, [K]\n        polish : bool, optional\n            Whether to attempt to use a numerical solver to make the solution\n            more precise or not\n\n        Returns\n        -------\n        Psat : float\n            Vapor pressure, [Pa]\n\n        Notes\n        -----\n        EOSs sharing the same `b`, `delta`, and `epsilon` have the same\n        coefficient sets.\n\n        All coefficients were derived with numpy's polyfit. The intersection\n        between the polynomials is continuous, but there is a step change\n        in its derivative.\n\n        Form for the regression is inspired from [1]_.\n\n        References\n        ----------\n        .. [1] Soave, G. \"Direct Calculation of Pure-Compound Vapour Pressures \n           through Cubic Equations of State.\" Fluid Phase Equilibria 31, no. 2 \n           (January 1, 1986): 203-7. doi:10.1016/0378-3812(86)90013-0.", "id": "f15780:c0:m9"}
{"signature": "@staticmethod<EOL><INDENT>def Soave_1984(self, T, full=True, quick=True):<DEDENT>", "body": "c1, c2 = self.alpha_function_coeffs<EOL>T, Tc, a = self.T, self.Tc, self.a<EOL>a_alpha = a*(c1*(-T/Tc + <NUM_LIT:1>) + c2*(-<NUM_LIT:1> + Tc/T) + <NUM_LIT:1>)<EOL>if not full:<EOL><INDENT>return a_alpha<EOL><DEDENT>else:<EOL><INDENT>da_alpha_dT = a*(-c1/Tc - Tc*c2/T**<NUM_LIT:2>)<EOL>d2a_alpha_dT2 = a*(<NUM_LIT:2>*Tc*c2/T**<NUM_LIT:3>)<EOL>return a_alpha, da_alpha_dT, d2a_alpha_dT2<EOL><DEDENT>", "docstring": "r'''Method to calculate `a_alpha` and its first and second\n        derivatives according to Soave (1984) [1]_. Returns `a_alpha`, `da_alpha_dT`, and \n        `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more \n        documentation. Two coefficients needed.\n\n        .. math::\n            \\alpha = c_{1} \\left(- \\frac{T}{Tc} + 1\\right) + c_{2} \\left(-1\n            + \\frac{Tc}{T}\\right) + 1\n\n        References\n        ----------\n        .. [1] Soave, G. \"Improvement of the Van Der Waals Equation of State.\" \n           Chemical Engineering Science 39, no. 2 (January 1, 1984): 357-69. \n           doi:10.1016/0009-2509(84)80034-2.", "id": "f15780:c2:m6"}
{"signature": "def TWU_a_alpha_common(T, Tc, omega, a, full=True, quick=True, method='<STR_LIT>'):", "body": "Tr = T/Tc<EOL>if method == '<STR_LIT>':<EOL><INDENT>if Tr < <NUM_LIT:1>:<EOL><INDENT>L0, M0, N0 = <NUM_LIT>, <NUM_LIT>, <NUM_LIT><EOL>L1, M1, N1 = <NUM_LIT>, <NUM_LIT>, <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>L0, M0, N0 = <NUM_LIT>, <NUM_LIT>, -<NUM_LIT><EOL>L1, M1, N1 = <NUM_LIT>, <NUM_LIT>, -<NUM_LIT>  <EOL><DEDENT><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>if Tr < <NUM_LIT:1>:<EOL><INDENT>L0, M0, N0 = <NUM_LIT>, <NUM_LIT>, <NUM_LIT><EOL>L1, M1, N1 = <NUM_LIT>, <NUM_LIT>, <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>L0, M0, N0 = <NUM_LIT>, <NUM_LIT>, -<NUM_LIT><EOL>L1, M1, N1 = <NUM_LIT>,  <NUM_LIT>, -<NUM_LIT><EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if not full:<EOL><INDENT>alpha0 = Tr**(N0*(M0-<NUM_LIT:1.>))*exp(L0*(<NUM_LIT:1.>-Tr**(N0*M0)))<EOL>alpha1 = Tr**(N1*(M1-<NUM_LIT:1.>))*exp(L1*(<NUM_LIT:1.>-Tr**(N1*M1)))<EOL>alpha = alpha0 + omega*(alpha1 - alpha0)<EOL>return a*alpha<EOL><DEDENT>else:<EOL><INDENT>if quick:<EOL><INDENT>x0 = T/Tc<EOL>x1 = M0 - <NUM_LIT:1><EOL>x2 = N0*x1<EOL>x3 = x0**x2<EOL>x4 = M0*N0<EOL>x5 = x0**x4<EOL>x6 = exp(-L0*(x5 - <NUM_LIT:1.>))<EOL>x7 = x3*x6<EOL>x8 = M1 - <NUM_LIT:1.><EOL>x9 = N1*x8<EOL>x10 = x0**x9<EOL>x11 = M1*N1<EOL>x12 = x0**x11<EOL>x13 = x2*x7<EOL>x14 = L0*M0*N0*x3*x5*x6<EOL>x15 = x13 - x14<EOL>x16 = exp(-L1*(x12 - <NUM_LIT:1>))<EOL>x17 = -L1*M1*N1*x10*x12*x16 + x10*x16*x9 - x13 + x14<EOL>x18 = N0*N0<EOL>x19 = x18*x3*x6<EOL>x20 = x1**<NUM_LIT:2>*x19<EOL>x21 = M0**<NUM_LIT:2><EOL>x22 = L0*x18*x3*x5*x6<EOL>x23 = x21*x22<EOL>x24 = <NUM_LIT:2>*M0*x1*x22<EOL>x25 = L0**<NUM_LIT:2>*x0**(<NUM_LIT:2>*x4)*x19*x21<EOL>x26 = N1**<NUM_LIT:2><EOL>x27 = x10*x16*x26<EOL>x28 = M1**<NUM_LIT:2><EOL>x29 = L1*x10*x12*x16*x26<EOL>a_alpha = a*(-omega*(-x10*exp(L1*(-x12 + <NUM_LIT:1>)) + x3*exp(L0*(-x5 + <NUM_LIT:1>))) + x7)<EOL>da_alpha_dT = a*(omega*x17 + x15)/T<EOL>d2a_alpha_dT2 = a*(-(omega*(-L1**<NUM_LIT:2>*x0**(<NUM_LIT>*x11)*x27*x28 + <NUM_LIT>*M1*x29*x8 + x17 + x20 - x23 - x24 + x25 - x27*x8**<NUM_LIT:2> + x28*x29) + x15 - x20 + x23 + x24 - x25)/T**<NUM_LIT:2>)<EOL><DEDENT>else:<EOL><INDENT>a_alpha = TWU_a_alpha_common(T=T, Tc=Tc, omega=omega, a=a, full=False, quick=quick, method=method)<EOL>da_alpha_dT = a*(-L0*M0*N0*(T/Tc)**(M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*exp(L0*(-(T/Tc)**(M0*N0) + <NUM_LIT:1>))/T + N0*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*(M0 - <NUM_LIT:1>)*exp(L0*(-(T/Tc)**(M0*N0) + <NUM_LIT:1>))/T + omega*(L0*M0*N0*(T/Tc)**(M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*exp(L0*(-(T/Tc)**(M0*N0) + <NUM_LIT:1>))/T - L1*M1*N1*(T/Tc)**(M1*N1)*(T/Tc)**(N1*(M1 - <NUM_LIT:1>))*exp(L1*(-(T/Tc)**(M1*N1) + <NUM_LIT:1>))/T - N0*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*(M0 - <NUM_LIT:1>)*exp(L0*(-(T/Tc)**(M0*N0) + <NUM_LIT:1>))/T + N1*(T/Tc)**(N1*(M1 - <NUM_LIT:1>))*(M1 - <NUM_LIT:1>)*exp(L1*(-(T/Tc)**(M1*N1) + <NUM_LIT:1>))/T))<EOL>d2a_alpha_dT2 = a*((L0**<NUM_LIT:2>*M0**<NUM_LIT:2>*N0**<NUM_LIT:2>*(T/Tc)**(<NUM_LIT:2>*M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) - L0*M0**<NUM_LIT:2>*N0**<NUM_LIT:2>*(T/Tc)**(M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) - <NUM_LIT:2>*L0*M0*N0**<NUM_LIT:2>*(T/Tc)**(M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*(M0 - <NUM_LIT:1>)*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) + L0*M0*N0*(T/Tc)**(M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) + N0**<NUM_LIT:2>*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*(M0 - <NUM_LIT:1>)**<NUM_LIT:2>*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) - N0*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*(M0 - <NUM_LIT:1>)*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) - omega*(L0**<NUM_LIT:2>*M0**<NUM_LIT:2>*N0**<NUM_LIT:2>*(T/Tc)**(<NUM_LIT:2>*M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) - L0*M0**<NUM_LIT:2>*N0**<NUM_LIT:2>*(T/Tc)**(M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) - <NUM_LIT:2>*L0*M0*N0**<NUM_LIT:2>*(T/Tc)**(M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*(M0 - <NUM_LIT:1>)*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) + L0*M0*N0*(T/Tc)**(M0*N0)*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) - L1**<NUM_LIT:2>*M1**<NUM_LIT:2>*N1**<NUM_LIT:2>*(T/Tc)**(<NUM_LIT:2>*M1*N1)*(T/Tc)**(N1*(M1 - <NUM_LIT:1>))*exp(-L1*((T/Tc)**(M1*N1) - <NUM_LIT:1>)) + L1*M1**<NUM_LIT:2>*N1**<NUM_LIT:2>*(T/Tc)**(M1*N1)*(T/Tc)**(N1*(M1 - <NUM_LIT:1>))*exp(-L1*((T/Tc)**(M1*N1) - <NUM_LIT:1>)) + <NUM_LIT:2>*L1*M1*N1**<NUM_LIT:2>*(T/Tc)**(M1*N1)*(T/Tc)**(N1*(M1 - <NUM_LIT:1>))*(M1 - <NUM_LIT:1>)*exp(-L1*((T/Tc)**(M1*N1) - <NUM_LIT:1>)) - L1*M1*N1*(T/Tc)**(M1*N1)*(T/Tc)**(N1*(M1 - <NUM_LIT:1>))*exp(-L1*((T/Tc)**(M1*N1) - <NUM_LIT:1>)) + N0**<NUM_LIT:2>*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*(M0 - <NUM_LIT:1>)**<NUM_LIT:2>*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) - N0*(T/Tc)**(N0*(M0 - <NUM_LIT:1>))*(M0 - <NUM_LIT:1>)*exp(-L0*((T/Tc)**(M0*N0) - <NUM_LIT:1>)) - N1**<NUM_LIT:2>*(T/Tc)**(N1*(M1 - <NUM_LIT:1>))*(M1 - <NUM_LIT:1>)**<NUM_LIT:2>*exp(-L1*((T/Tc)**(M1*N1) - <NUM_LIT:1>)) + N1*(T/Tc)**(N1*(M1 - <NUM_LIT:1>))*(M1 - <NUM_LIT:1>)*exp(-L1*((T/Tc)**(M1*N1) - <NUM_LIT:1>))))/T**<NUM_LIT:2>)<EOL><DEDENT>return a_alpha, da_alpha_dT, d2a_alpha_dT2<EOL><DEDENT>", "docstring": "r'''Function to calculate `a_alpha` and optionally its first and second\n    derivatives for the TWUPR or TWUSRK EOS. Returns 'a_alpha', and \n    optionally 'da_alpha_dT' and 'd2a_alpha_dT2'.\n    Used by `TWUPR` and `TWUSRK`; has little purpose on its own.\n    See either class for the correct reference, and examples of using the EOS.\n\n    Parameters\n    ----------\n    T : float\n        Temperature, [K]\n    Tc : float\n        Critical temperature, [K]\n    omega : float\n        Acentric factor, [-]\n    a : float\n        Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]\n    full : float\n        Whether or not to return its first and second derivatives\n    quick : bool, optional\n        Whether to use a SymPy cse-derived expression (3x faster) or \n        individual formulas\n    method : str\n        Either 'PR' or 'SRK'\n\n    Notes\n    -----\n    The derivatives are somewhat long and are not described here for \n    brevity; they are obtainable from the following SymPy expression.\n\n    >>> from sympy import *\n    >>> T, Tc, omega, N1, N0, M1, M0, L1, L0 = symbols('T, Tc, omega, N1, N0, M1, M0, L1, L0')\n    >>> Tr = T/Tc\n    >>> alpha0 = Tr**(N0*(M0-1))*exp(L0*(1-Tr**(N0*M0)))\n    >>> alpha1 = Tr**(N1*(M1-1))*exp(L1*(1-Tr**(N1*M1)))\n    >>> alpha = alpha0 + omega*(alpha1-alpha0)\n    >>> # diff(alpha, T)\n    >>> # diff(alpha, T, T)", "id": "f15780:m0"}
{"signature": "@staticmethod<EOL><INDENT>def Androulakis(self, T, full=True, quick=True):<DEDENT>", "body": "c1, c2, c3 = self.alpha_function_coeffs<EOL>T, Tc, a = self.T, self.Tc, self.a<EOL>a_alpha = a*(c1*(-(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>) + <NUM_LIT:1>) + c2*(-(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>) + <NUM_LIT:1>)**<NUM_LIT:2> + c3*(-(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>) + <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>)<EOL>if not full:<EOL><INDENT>return a_alpha<EOL><DEDENT>else:<EOL><INDENT>da_alpha_dT = a*(-<NUM_LIT:2>*c1*(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>)/(<NUM_LIT:3>*T) - <NUM_LIT:4>*c2*(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>)*(-(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>) + <NUM_LIT:1>)/(<NUM_LIT:3>*T) - <NUM_LIT:2>*c3*(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>)*(-(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>) + <NUM_LIT:1>)**<NUM_LIT:2>/T)<EOL>d2a_alpha_dT2 = a*(<NUM_LIT:2>*(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>)*(c1 + <NUM_LIT:4>*c2*(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>) - <NUM_LIT:2>*c2*((T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>) - <NUM_LIT:1>) - <NUM_LIT:12>*c3*(T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>)*((T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>) - <NUM_LIT:1>) + <NUM_LIT:3>*c3*((T/Tc)**(<NUM_LIT:2>/<NUM_LIT:3>) - <NUM_LIT:1>)**<NUM_LIT:2>)/(<NUM_LIT:9>*T**<NUM_LIT:2>))<EOL>return a_alpha, da_alpha_dT, d2a_alpha_dT2<EOL><DEDENT>", "docstring": "r'''Method to calculate `a_alpha` and its first and second\n        derivatives according to Androulakis et al. (1989) [1]_. Returns `a_alpha`, \n        `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`  \n        for more documentation. Three coefficients needed.\n\n        .. math::\n            \\alpha = c_{1} \\left(- \\left(\\frac{T}{Tc}\\right)^{\\frac{2}{3}}\n            + 1\\right) + c_{2} \\left(- \\left(\\frac{T}{Tc}\\right)^{\\frac{2}{3}} \n            + 1\\right)^{2} + c_{3} \\left(- \\left(\\frac{T}{Tc}\\right)^{\n            \\frac{2}{3}} + 1\\right)^{3} + 1\n\n        References\n        ----------\n        .. [1] Androulakis, I. P., N. S. Kalospiros, and D. P. Tassios. \n           \"Thermophysical Properties of Pure Polar and Nonpolar Compounds with\n           a Modified VdW-711 Equation of State.\" Fluid Phase Equilibria 45, \n           no. 2 (April 1, 1989): 135-63. doi:10.1016/0378-3812(89)80254-7.", "id": "f15780:c2:m10"}
{"signature": "@staticmethod<EOL><INDENT>def Melhem(self, T, full=True, quick=True):<DEDENT>", "body": "c1, c2 = self.alpha_function_coeffs<EOL>T, Tc, a = self.T, self.Tc, self.a<EOL>a_alpha = a*exp(c1*(-T/Tc + <NUM_LIT:1>) + c2*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:2>)<EOL>if not full:<EOL><INDENT>return a_alpha<EOL><DEDENT>else:<EOL><INDENT>da_alpha_dT = a*((-c1/Tc - c2*sqrt(T/Tc)*(-sqrt(T/Tc) + <NUM_LIT:1>)/T)*exp(c1*(-T/Tc + <NUM_LIT:1>) + c2*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:2>))<EOL>d2a_alpha_dT2 = a*(((c1/Tc - c2*sqrt(T/Tc)*(sqrt(T/Tc) - <NUM_LIT:1>)/T)**<NUM_LIT:2> + c2*(<NUM_LIT:1>/Tc - sqrt(T/Tc)*(sqrt(T/Tc) - <NUM_LIT:1>)/T)/(<NUM_LIT:2>*T))*exp(-c1*(T/Tc - <NUM_LIT:1>) + c2*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:2>))<EOL>return a_alpha, da_alpha_dT, d2a_alpha_dT2<EOL><DEDENT>", "docstring": "r'''Method to calculate `a_alpha` and its first and second\n        derivatives according to Melhem et al. (1989) [1]_. Returns `a_alpha`, \n        `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`  \n        for more documentation. Two coefficients needed.\n\n        .. math::\n            \\alpha = e^{c_{1} \\left(- \\frac{T}{Tc} + 1\\right) + c_{2} \n            \\left(- \\sqrt{\\frac{T}{Tc}} + 1\\right)^{2}}\n\n        References\n        ----------\n        .. [1] Melhem, Georges A., Riju Saini, and Bernard M. Goodwin. \"A \n           Modified Peng-Robinson Equation of State.\" Fluid Phase Equilibria \n           47, no. 2 (August 1, 1989): 189-237. \n           doi:10.1016/0378-3812(89)80176-1.", "id": "f15780:c2:m9"}
{"signature": "def a_alpha_and_derivatives(self, T, full=True, quick=True):", "body": "Tc, a, kappa0, kappa1, kappa2, kappa3 = self.Tc, self.a, self.kappa0, self.kappa1, self.kappa2, self.kappa3<EOL>if not full:<EOL><INDENT>Tr = T/Tc<EOL>kappa = kappa0 + ((kappa1 + kappa2*(kappa3 - Tr)*(<NUM_LIT:1> - Tr**<NUM_LIT:0.5>))*(<NUM_LIT:1> + Tr**<NUM_LIT:0.5>)*(<NUM_LIT> - Tr))<EOL>return a*(<NUM_LIT:1> + kappa*(<NUM_LIT:1>-sqrt(T/Tc)))**<NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>if quick:<EOL><INDENT>x1 = T/Tc<EOL>x2 = sqrt(x1)<EOL>x3 = x2 - <NUM_LIT:1.><EOL>x4 = x2 + <NUM_LIT:1.><EOL>x5 = <NUM_LIT>*x1 - <NUM_LIT><EOL>x6 = -kappa3 + x1<EOL>x7 = kappa1 + kappa2*x3*x6<EOL>x8 = x5*x7<EOL>x9 = <NUM_LIT>*kappa0 - x4*x8<EOL>x10 = x3*x9<EOL>x11 = x10*<NUM_LIT:0.1> - <NUM_LIT:1.><EOL>x13 = x2/T<EOL>x14 = x7/Tc<EOL>x15 = kappa2*x4*x5<EOL>x16 = <NUM_LIT>*(-x2 + <NUM_LIT:1.>)/Tc + x13*(kappa3 - x1)<EOL>x17 = -x13*x8 - x14*(<NUM_LIT>*x2 + <NUM_LIT>) + x15*x16<EOL>x18 = x13*x9 + x17*x3<EOL>x19 = x2/(T*T)<EOL>x20 = <NUM_LIT>*x2/T<EOL>a_alpha = a*x11*x11<EOL>da_alpha_dT = a*x11*x18*<NUM_LIT:0.1><EOL>d2a_alpha_dT2 = a*(x18*x18 + (x10 - <NUM_LIT>)*(x17*x20 - x19*x9 + x3*(<NUM_LIT>*kappa2/Tc*x16*x4 + kappa2*x16*x20*x5 - <NUM_LIT>/T*x14*x2 - x15/T*x2*(<NUM_LIT>/Tc - x6/T) + x19*x8)))/<NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>a_alpha = a*(<NUM_LIT:1> + self.kappa*(<NUM_LIT:1>-sqrt(T/Tc)))**<NUM_LIT:2><EOL>da_alpha_dT = a*((kappa0 + (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>))*(-sqrt(T/Tc) + <NUM_LIT:1>) + <NUM_LIT:1>)*(<NUM_LIT:2>*(-sqrt(T/Tc) + <NUM_LIT:1>)*((sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)*(-kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)/Tc - kappa2*sqrt(T/Tc)*(-T/Tc + kappa3)/(<NUM_LIT:2>*T)) - (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)/Tc + sqrt(T/Tc)*(kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)/(<NUM_LIT:2>*T)) - sqrt(T/Tc)*(kappa0 + (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>))/T)<EOL>d2a_alpha_dT2 = a*((kappa0 + (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>))*(-sqrt(T/Tc) + <NUM_LIT:1>) + <NUM_LIT:1>)*(<NUM_LIT:2>*(-sqrt(T/Tc) + <NUM_LIT:1>)*((sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)*(kappa2*sqrt(T/Tc)/(T*Tc) + kappa2*sqrt(T/Tc)*(-T/Tc + kappa3)/(<NUM_LIT:4>*T**<NUM_LIT:2>)) - <NUM_LIT:2>*(sqrt(T/Tc) + <NUM_LIT:1>)*(-kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)/Tc - kappa2*sqrt(T/Tc)*(-T/Tc + kappa3)/(<NUM_LIT:2>*T))/Tc + sqrt(T/Tc)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)*(-kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)/Tc - kappa2*sqrt(T/Tc)*(-T/Tc + kappa3)/(<NUM_LIT:2>*T))/T - sqrt(T/Tc)*(kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))/(T*Tc) - sqrt(T/Tc)*(kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)/(<NUM_LIT:4>*T**<NUM_LIT:2>)) - <NUM_LIT:2>*sqrt(T/Tc)*((sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)*(-kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)/Tc - kappa2*sqrt(T/Tc)*(-T/Tc + kappa3)/(<NUM_LIT:2>*T)) - (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)/Tc + sqrt(T/Tc)*(kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)/(<NUM_LIT:2>*T))/T + sqrt(T/Tc)*(kappa0 + (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>))/(<NUM_LIT:2>*T**<NUM_LIT:2>)) + a*((-sqrt(T/Tc) + <NUM_LIT:1>)*((sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)*(-kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)/Tc - kappa2*sqrt(T/Tc)*(-T/Tc + kappa3)/(<NUM_LIT:2>*T)) - (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)/Tc + sqrt(T/Tc)*(kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)/(<NUM_LIT:2>*T)) - sqrt(T/Tc)*(kappa0 + (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>))/(<NUM_LIT:2>*T))*(<NUM_LIT:2>*(-sqrt(T/Tc) + <NUM_LIT:1>)*((sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)*(-kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)/Tc - kappa2*sqrt(T/Tc)*(-T/Tc + kappa3)/(<NUM_LIT:2>*T)) - (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)/Tc + sqrt(T/Tc)*(kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>)/(<NUM_LIT:2>*T)) - sqrt(T/Tc)*(kappa0 + (kappa1 + kappa2*(-sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + kappa3))*(sqrt(T/Tc) + <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:7>/<NUM_LIT:10>))/T)<EOL><DEDENT>return a_alpha, da_alpha_dT, d2a_alpha_dT2<EOL><DEDENT>", "docstring": "r'''Method to calculate `a_alpha` and its first and second\n        derivatives for this EOS. Returns `a_alpha`, `da_alpha_dT`, and \n        `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more \n        documentation. Uses the set values of `Tc`, `kappa0`, `kappa1`,\n        `kappa2`, `kappa3`, and `a`. \n\n        For use in `solve_T`, returns only `a_alpha` if full is False.\n\n        The first and second derivatives of `a_alpha` are available through the\n        following SymPy expression.\n\n        >>> from sympy import *\n        >>> P, T, V = symbols('P, T, V')\n        >>> Tc, Pc, omega = symbols('Tc, Pc, omega')\n        >>> R, a, b, kappa0, kappa1, kappa2, kappa3 = symbols('R, a, b, kappa0, kappa1, kappa2, kappa3')\n        >>> Tr = T/Tc\n        >>> kappa = kappa0 + (kappa1 + kappa2*(kappa3-Tr)*(1-sqrt(Tr)))*(1+sqrt(Tr))*(Rational('0.7')-Tr)\n        >>> a_alpha = a*(1 + kappa*(1-sqrt(T/Tc)))**2\n        >>> # diff(a_alpha, T)\n        >>> # diff(a_alpha, T, 2)", "id": "f15780:c6:m2"}
{"signature": "def check_sufficient_inputs(self):", "body": "if not ((self.T and self.P) or (self.T and self.V) or (self.P and self.V)):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "Method to an exception if none of the pairs (T, P), (T, V), or \n        (P, V) are given.", "id": "f15780:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def Coquelet(self, T, full=True, quick=True):<DEDENT>", "body": "c1, c2, c3 = self.alpha_function_coeffs<EOL>T, Tc, a = self.T, self.Tc, self.a<EOL>a_alpha = a*(exp(c1*(-T/Tc + <NUM_LIT:1>)*(c2*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:2> + c3*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>)**<NUM_LIT:2>))<EOL>if not full:<EOL><INDENT>return a_alpha<EOL><DEDENT>else:<EOL><INDENT>da_alpha_dT = a*((c1*(-T/Tc + <NUM_LIT:1>)*(-<NUM_LIT:2>*c2*sqrt(T/Tc)*(-sqrt(T/Tc) + <NUM_LIT:1>)/T - <NUM_LIT:3>*c3*sqrt(T/Tc)*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:2>/T)*(c2*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:2> + c3*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>) - c1*(c2*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:2> + c3*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>)**<NUM_LIT:2>/Tc)*exp(c1*(-T/Tc + <NUM_LIT:1>)*(c2*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:2> + c3*(-sqrt(T/Tc) + <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>)**<NUM_LIT:2>))<EOL>d2a_alpha_dT2 = a*(c1*(c1*(-(c2*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:2> - c3*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>)/Tc + sqrt(T/Tc)*(-<NUM_LIT:2>*c2 + <NUM_LIT:3>*c3*(sqrt(T/Tc) - <NUM_LIT:1>))*(sqrt(T/Tc) - <NUM_LIT:1>)*(T/Tc - <NUM_LIT:1>)/T)**<NUM_LIT:2>*(c2*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:2> - c3*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>)**<NUM_LIT:2> - ((T/Tc - <NUM_LIT:1>)*(c2*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:2> - c3*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>)*(<NUM_LIT:2>*c2/Tc - <NUM_LIT:6>*c3*(sqrt(T/Tc) - <NUM_LIT:1>)/Tc - <NUM_LIT:2>*c2*sqrt(T/Tc)*(sqrt(T/Tc) - <NUM_LIT:1>)/T + <NUM_LIT:3>*c3*sqrt(T/Tc)*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:2>/T) + <NUM_LIT:4>*sqrt(T/Tc)*(<NUM_LIT:2>*c2 - <NUM_LIT:3>*c3*(sqrt(T/Tc) - <NUM_LIT:1>))*(sqrt(T/Tc) - <NUM_LIT:1>)*(c2*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:2> - c3*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>)/Tc + (<NUM_LIT:2>*c2 - <NUM_LIT:3>*c3*(sqrt(T/Tc) - <NUM_LIT:1>))**<NUM_LIT:2>*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:2>*(T/Tc - <NUM_LIT:1>)/Tc)/(<NUM_LIT:2>*T))*exp(-c1*(T/Tc - <NUM_LIT:1>)*(c2*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:2> - c3*(sqrt(T/Tc) - <NUM_LIT:1>)**<NUM_LIT:3> + <NUM_LIT:1>)**<NUM_LIT:2>))<EOL>return a_alpha, da_alpha_dT, d2a_alpha_dT2<EOL><DEDENT>", "docstring": "r'''Method to calculate `a_alpha` and its first and second\n        derivatives according to Coquelet et al. (2004) [1]_. Returns `a_alpha`, `da_alpha_dT`, and \n        `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more \n        documentation. Three coefficients needed.\n\n        .. math::\n            \\alpha = e^{c_{1} \\left(- \\frac{T}{Tc} + 1\\right) \\left(c_{2} \n            \\left(- \\sqrt{\\frac{T}{Tc}} + 1\\right)^{2} + c_{3} \n            \\left(- \\sqrt{\\frac{T}{Tc}} + 1\\right)^{3} + 1\\right)^{2}}\n\n        References\n        ----------\n        .. [1] Coquelet, C., A. Chapoy, and D. Richon. \"Development of a New \n           Alpha Function for the Peng\u2013Robinson Equation of State: Comparative \n           Study of Alpha Function Models for Pure Gases (Natural Gas \n           Components) and Water-Gas Systems.\" International Journal of \n           Thermophysics 25, no. 1 (January 1, 2004): 133-58. \n           doi:10.1023/B:IJOT.0000022331.46865.2f.", "id": "f15780:c2:m16"}
{"signature": "@staticmethod<EOL><INDENT>def Twu(self, T, full=True, quick=True):<DEDENT>", "body": "c1, c2, c3 = self.alpha_function_coeffs<EOL>T, Tc, a = self.T, self.Tc, self.a<EOL>a_alpha = a*((T/Tc)**(c3*(c2 - <NUM_LIT:1>))*exp(c1*(-(T/Tc)**(c2*c3) + <NUM_LIT:1>)))<EOL>if not full:<EOL><INDENT>return a_alpha<EOL><DEDENT>else:<EOL><INDENT>da_alpha_dT = a*(-c1*c2*c3*(T/Tc)**(c2*c3)*(T/Tc)**(c3*(c2 - <NUM_LIT:1>))*exp(c1*(-(T/Tc)**(c2*c3) + <NUM_LIT:1>))/T + c3*(T/Tc)**(c3*(c2 - <NUM_LIT:1>))*(c2 - <NUM_LIT:1>)*exp(c1*(-(T/Tc)**(c2*c3) + <NUM_LIT:1>))/T)<EOL>d2a_alpha_dT2 = a*(c3*(T/Tc)**(c3*(c2 - <NUM_LIT:1>))*(c1**<NUM_LIT:2>*c2**<NUM_LIT:2>*c3*(T/Tc)**(<NUM_LIT:2>*c2*c3) - c1*c2**<NUM_LIT:2>*c3*(T/Tc)**(c2*c3) - <NUM_LIT:2>*c1*c2*c3*(T/Tc)**(c2*c3)*(c2 - <NUM_LIT:1>) + c1*c2*(T/Tc)**(c2*c3) - c2 + c3*(c2 - <NUM_LIT:1>)**<NUM_LIT:2> + <NUM_LIT:1>)*exp(-c1*((T/Tc)**(c2*c3) - <NUM_LIT:1>))/T**<NUM_LIT:2>)<EOL>return a_alpha, da_alpha_dT, d2a_alpha_dT2<EOL><DEDENT>", "docstring": "r'''Method to calculate `a_alpha` and its first and second\n        derivatives according to Twu et al. (1991) [1]_. Returns `a_alpha`, \n        `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`  \n        for more documentation. Three coefficients needed.\n\n        .. math::\n            \\alpha = \\left(\\frac{T}{Tc}\\right)^{c_{3} \\left(c_{2} \n            - 1\\right)} e^{c_{1} \\left(- \\left(\\frac{T}{Tc}\n            \\right)^{c_{2} c_{3}} + 1\\right)}\n\n        References\n        ----------\n        .. [1] Twu, Chorng H., David Bluck, John R. Cunningham, and John E. \n           Coon. \"A Cubic Equation of State with a New Alpha Function and a \n           New Mixing Rule.\" Fluid Phase Equilibria 69 (December 10, 1991): \n           33-50. doi:10.1016/0378-3812(91)90024-2.", "id": "f15780:c2:m13"}
{"signature": "def solve_T(self, P, V, quick=True):", "body": "if self.S2 == <NUM_LIT:0>:<EOL><INDENT>self.m = self.S1<EOL>return SRK.solve_T(self, P, V, quick=quick)<EOL><DEDENT>else:<EOL><INDENT>return super(SRK, self).solve_T(P, V, quick=quick) <EOL>", "docstring": "r'''Method to calculate `T` from a specified `P` and `V` for the API \n        SRK EOS. Uses `a`, `b`, and `Tc` obtained from the class's namespace.\n\n        Parameters\n        ----------\n        P : float\n            Pressure, [Pa]\n        V : float\n            Molar volume, [m^3/mol]\n        quick : bool, optional\n            Whether to use a SymPy cse-derived expression (3x faster) or \n            individual formulas\n\n        Returns\n        -------\n        T : float\n            Temperature, [K]\n\n        Notes\n        -----\n        If S2 is set to 0, the solution is the same as in the SRK EOS, and that\n        is used. Otherwise, newton's method must be used to solve for `T`. \n        There are 8 roots of T in that case, six of them real. No guarantee can\n        be made regarding which root will be obtained.", "id": "f15780:c10:m2"}
{"signature": "@staticmethod<EOL><INDENT>def main_derivatives_and_departures(T, P, V, b, delta, epsilon, a_alpha,<EOL>da_alpha_dT, d2a_alpha_dT2, quick=True):<DEDENT>", "body": "dP_dT = R/(V - b)<EOL>dP_dV = -R*T/(V - b)**<NUM_LIT:2> + <NUM_LIT:2>*a_alpha/V**<NUM_LIT:3><EOL>d2P_dT2 = <NUM_LIT:0><EOL>d2P_dV2 = <NUM_LIT:2>*(R*T/(V - b)**<NUM_LIT:3> - <NUM_LIT:3>*a_alpha/V**<NUM_LIT:4>)<EOL>d2P_dTdV = -R/(V - b)**<NUM_LIT:2><EOL>H_dep = P*V - R*T - a_alpha/V<EOL>S_dep = R*(-log(V) + log(V - b)) + R*log(P*V/(R*T))<EOL>Cv_dep = <NUM_LIT:0><EOL>return [dP_dT, dP_dV, d2P_dT2, d2P_dV2, d2P_dTdV, H_dep, S_dep, Cv_dep]<EOL>", "docstring": "Re-implementation of derivatives and excess property calculations, \n        as ZeroDivisionError errors occur with the general solution. The \n        following derivation is the source of these formulas.\n\n        >>> from sympy import *\n        >>> P, T, V, R, b, a = symbols('P, T, V, R, b, a')\n        >>> P_vdw = R*T/(V-b) - a/(V*V)\n        >>> vdw = P_vdw - P\n        >>> \n        >>> dP_dT = diff(vdw, T)\n        >>> dP_dV = diff(vdw, V)\n        >>> d2P_dT2 = diff(vdw, T, 2)\n        >>> d2P_dV2 = diff(vdw, V, 2)\n        >>> d2P_dTdV = diff(vdw, T, V)\n        >>> H_dep = integrate(T*dP_dT - P_vdw, (V, oo, V))\n        >>> H_dep += P*V - R*T\n        >>> S_dep = integrate(dP_dT - R/V, (V,oo,V))\n        >>> S_dep += R*log(P*V/(R*T))\n        >>> Cv_dep = T*integrate(d2P_dT2, (V,oo,V))\n        >>> \n        >>> dP_dT, dP_dV, d2P_dT2, d2P_dV2, d2P_dTdV, H_dep, S_dep, Cv_dep\n        (R/(V - b), -R*T/(V - b)**2 + 2*a/V**3, 0, 2*(R*T/(V - b)**3 - 3*a/V**4), -R/(V - b)**2, P*V - R*T - a/V, R*(-log(V) + log(V - b)) + R*log(P*V/(R*T)), 0)", "id": "f15780:c7:m3"}
{"signature": "@staticmethod<EOL><INDENT>def Mathias(self, T, full=True, quick=True):<DEDENT>", "body": "c1, c2 = self.alpha_function_coeffs<EOL>T, Tc, a = self.T, self.Tc, self.a<EOL>a_alpha = a*(<NUM_LIT:1> + c1*(<NUM_LIT:1>-sqrt(Tr)) -c2*(<NUM_LIT:1>-Tr)*(<NUM_LIT>-Tr))**<NUM_LIT:2><EOL>if not full:<EOL><INDENT>return a_alpha<EOL><DEDENT>else:<EOL><INDENT>da_alpha_dT = a*(c1*(-sqrt(T/Tc) + <NUM_LIT:1>) - c2*(-T/Tc + <NUM_LIT>)*(-T/Tc + <NUM_LIT:1>) + <NUM_LIT:1>)*(<NUM_LIT:2>*c2*(-T/Tc + <NUM_LIT>)/Tc + <NUM_LIT:2>*c2*(-T/Tc + <NUM_LIT:1>)/Tc - c1*sqrt(T/Tc)/T)<EOL>d2a_alpha_dT2 = a*((<NUM_LIT:8>*c2/Tc**<NUM_LIT:2> - c1*sqrt(T/Tc)/T**<NUM_LIT:2>)*(c1*(sqrt(T/Tc) - <NUM_LIT:1>) + c2*(T/Tc - <NUM_LIT:1>)*(T/Tc - <NUM_LIT>) - <NUM_LIT:1>) + (<NUM_LIT:2>*c2*(T/Tc - <NUM_LIT:1>)/Tc + <NUM_LIT:2>*c2*(T/Tc - <NUM_LIT>)/Tc + c1*sqrt(T/Tc)/T)**<NUM_LIT:2>)/<NUM_LIT:2><EOL>return a_alpha, da_alpha_dT, d2a_alpha_dT2<EOL><DEDENT>", "docstring": "r'''Method to calculate `a_alpha` and its first and second\n        derivatives according to Mathias (1983) [1]_. Returns `a_alpha`, \n        `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`  \n        for more documentation. Two coefficients needed.\n\n        .. math::\n            \\alpha = \\left(c_{1} \\left(- \\sqrt{\\frac{T}{Tc}} + 1\\right)\n            - c_{2} \\left(- \\frac{T}{Tc} + 0.7\\right) \\left(- \\frac{T}{Tc} \n            + 1\\right) + 1\\right)^{2}\n\n        References\n        ----------\n        .. [1] Mathias, Paul M. \"A Versatile Phase Equilibrium Equation of \n           State.\" Industrial & Engineering Chemistry Process Design and \n           Development 22, no. 3 (July 1, 1983): 385-91. \n           doi:10.1021/i200022a008.", "id": "f15780:c2:m3"}
{"signature": "@staticmethod<EOL><INDENT>def Almeida(self, T, full=True, quick=True):<DEDENT>", "body": "<EOL>c1, c2, c3 = self.alpha_function_coeffs<EOL>T, Tc, a = self.T, self.Tc, self.a<EOL>a_alpha = a*exp(c1*(-T/Tc + <NUM_LIT:1>)*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:1>) + c3*(-<NUM_LIT:1> + Tc/T))<EOL>if not full:<EOL><INDENT>return a_alpha<EOL><DEDENT>else:<EOL><INDENT>da_alpha_dT = a*((c1*(c2 - <NUM_LIT:1>)*(-T/Tc + <NUM_LIT:1>)*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:1>)*copysign(<NUM_LIT:1>, T/Tc - <NUM_LIT:1>)/(Tc*Abs(T/Tc - <NUM_LIT:1>)) - c1*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:1>)/Tc - Tc*c3/T**<NUM_LIT:2>)*exp(c1*(-T/Tc + <NUM_LIT:1>)*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:1>) + c3*(-<NUM_LIT:1> + Tc/T)))<EOL>d2a_alpha_dT2 = a*exp(c3*(Tc/T - <NUM_LIT:1>) - c1*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:1>)*(T/Tc - <NUM_LIT:1>))*((c1*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:1>))/Tc + (Tc*c3)/T**<NUM_LIT:2> + (c1*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:2>)*copysign(<NUM_LIT:1>, T/Tc - <NUM_LIT:1>)*(c2 - <NUM_LIT:1>)*(T/Tc - <NUM_LIT:1>))/Tc)**<NUM_LIT:2> - exp(c3*(Tc/T - <NUM_LIT:1>) - c1*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:1>)*(T/Tc - <NUM_LIT:1>))*((<NUM_LIT:2>*c1*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:2>)*copysign(<NUM_LIT:1>, T/Tc - <NUM_LIT:1>)*(c2 - <NUM_LIT:1>))/Tc**<NUM_LIT:2> - (<NUM_LIT:2>*Tc*c3)/T**<NUM_LIT:3> + (c1*abs(T/Tc - <NUM_LIT:1>)**(c2 - <NUM_LIT:3>)*copysign(<NUM_LIT:1>, T/Tc - <NUM_LIT:1>)**<NUM_LIT:2>*(c2 - <NUM_LIT:1>)*(c2 - <NUM_LIT:2>)*(T/Tc - <NUM_LIT:1>))/Tc**<NUM_LIT:2>)<EOL>return a_alpha, da_alpha_dT, d2a_alpha_dT2<EOL><DEDENT>", "docstring": "r'''Method to calculate `a_alpha` and its first and second\n        derivatives according to Almeida et al. (1991) [1]_. Returns `a_alpha`, \n        `da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`  \n        for more documentation. Three coefficients needed.\n\n        .. math::\n            \\alpha = e^{c_{1} \\left(- \\frac{T}{Tc} + 1\\right) \\left|{\n            \\frac{T}{Tc} - 1}\\right|^{c_{2} - 1} + c_{3} \\left(-1 \n            + \\frac{Tc}{T}\\right)}\n\n        References\n        ----------\n        .. [1] Almeida, G. S., M. Aznar, and A. S. Telles. \"Uma Nova Forma de \n           Depend\u00eancia Com a Temperatura Do Termo Atrativo de Equa\u00e7\u00f5es de \n           Estado C\u00fabicas.\" RBE, Rev. Bras. Eng., Cad. Eng. Quim 8 (1991): 95.", "id": "f15780:c2:m12"}
{"signature": "def Clapeyron(T, Tc, Pc, dZ=<NUM_LIT:1>, Psat=<NUM_LIT>):", "body": "Tr = T/Tc<EOL>return R*T*dZ*log(Pc/Psat)/(<NUM_LIT:1.> - Tr)<EOL>", "docstring": "r'''Calculates enthalpy of vaporization at arbitrary temperatures using the\n    Clapeyron equation.\n\n    The enthalpy of vaporization is given by:\n\n    .. math::\n        \\Delta H_{vap} = RT \\Delta Z \\frac{\\ln (P_c/Psat)}{(1-T_{r})}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    dZ : float\n        Change in compressibility factor between liquid and gas, []\n    Psat : float\n        Saturation pressure of fluid [Pa], optional\n\n    Returns\n    -------\n    Hvap : float\n        Enthalpy of vaporization, [J/mol]\n\n    Notes\n    -----\n    No original source is available for this equation.\n    [1]_ claims this equation overpredicts enthalpy by several percent.\n    Under Tr = 0.8, dZ = 1 is a reasonable assumption.\n    This equation is most accurate at the normal boiling point.\n\n    Internal units are bar.\n\n    WARNING: I believe it possible that the adjustment for pressure may be incorrect\n\n    Examples\n    --------\n    Problem from Perry's examples.\n\n    >>> Clapeyron(T=294.0, Tc=466.0, Pc=5.55E6)\n    26512.354585061985\n\n    References\n    ----------\n    .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.", "id": "f15781:m2"}
{"signature": "def Tliquidus(Tms=None, ws=None, xs=None, CASRNs=None, AvailableMethods=False,<EOL>Method=None):  ", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if none_and_length_check([Tms]):<EOL><INDENT>methods.append('<STR_LIT>')<EOL>methods.append('<STR_LIT>')<EOL><DEDENT>methods.append('<STR_LIT:None>')<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == '<STR_LIT>':<EOL><INDENT>_Tliq = max(Tms)<EOL><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>_Tliq = mixing_simple(xs, Tms)<EOL><DEDENT>elif Method == '<STR_LIT:None>':<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _Tliq<EOL>", "docstring": "This function handles the retrival of a mixtures's liquidus point.\n\n    This API is considered experimental, and is expected to be removed in a\n    future release in favor of a more complete object-oriented interface.\n\n    >>> Tliquidus(Tms=[250.0, 350.0], xs=[0.5, 0.5])\n    350.0\n    >>> Tliquidus(Tms=[250, 350], xs=[0.5, 0.5], Method='Simple')\n    300.0\n    >>> Tliquidus(Tms=[250, 350], xs=[0.5, 0.5], AvailableMethods=True)\n    ['Maximum', 'Simple', 'None']", "id": "f15781:m14"}
{"signature": "def Riedel(Tb, Tc, Pc):", "body": "Pc = Pc/<NUM_LIT>  <EOL>Tbr = Tb/Tc<EOL>return <NUM_LIT>*Tb*R*(log(Pc) - <NUM_LIT>)/(<NUM_LIT> - Tbr)<EOL>", "docstring": "r'''Calculates enthalpy of vaporization at the boiling point, using the\n    Ridel [1]_ CSP method. Required information are critical temperature\n    and pressure, and boiling point. Equation taken from [2]_ and [3]_.\n\n    The enthalpy of vaporization is given by:\n\n    .. math::\n        \\Delta_{vap} H=1.093 T_b R\\frac{\\ln P_c-1.013}{0.930-T_{br}}\n\n    Parameters\n    ----------\n    Tb : float\n        Boiling temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n\n    Returns\n    -------\n    Hvap : float\n        Enthalpy of vaporization at the normal boiling point, [J/mol]\n\n    Notes\n    -----\n    This equation has no example calculation in any source. The source has not\n    been verified. It is equation 4-144 in Perry's. Perry's also claims that\n    errors seldom surpass 5%.\n\n    [2]_ is the source of example work here, showing a calculation at 0.0%\n    error.\n\n    Internal units of pressure are bar.\n\n    Examples\n    --------\n    Pyridine, 0.0% err vs. exp: 35090 J/mol; from Poling [2]_.\n\n    >>> Riedel(388.4, 620.0, 56.3E5)\n    35089.78989646058\n\n    References\n    ----------\n    .. [1] Riedel, L. \"Eine Neue Universelle Dampfdruckformel Untersuchungen\n       Uber Eine Erweiterung Des Theorems Der Ubereinstimmenden Zustande. Teil\n       I.\" Chemie Ingenieur Technik 26, no. 2 (February 1, 1954): 83-89.\n       doi:10.1002/cite.330260206.\n    .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.\n    .. [3] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,\n       Eighth Edition. McGraw-Hill Professional, 2007.", "id": "f15781:m7"}
{"signature": "def Hfus(T=<NUM_LIT>, P=<NUM_LIT>, MW=None, AvailableMethods=False, Method=None, CASRN='<STR_LIT>'):  ", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in CRCHfus_data.index:<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>methods.append('<STR_LIT:None>')<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == '<STR_LIT>':<EOL><INDENT>_Hfus = CRCHfus_data.at[CASRN, '<STR_LIT>']<EOL><DEDENT>elif Method == '<STR_LIT:None>' or not MW:<EOL><INDENT>_Hfus = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>_Hfus = property_molar_to_mass(_Hfus, MW)<EOL>return _Hfus<EOL>", "docstring": "This function handles the calculation of a chemical's enthalpy of fusion.\n    Generally this, is used by the chemical class, as all parameters are passed.\n    Calling the function directly works okay.\n\n    Enthalpy of fusion is a weak function of pressure, and its effects are\n    neglected.\n\n    This API is considered experimental, and is expected to be removed in a\n    future release in favor of a more complete object-oriented interface.", "id": "f15781:m12"}
{"signature": "def Velasco(T, Tc, omega):", "body": "return (<NUM_LIT> + <NUM_LIT>*omega + <NUM_LIT>*omega**<NUM_LIT:2>)*(<NUM_LIT:1>-T/Tc)**<NUM_LIT>*R*Tc<EOL>", "docstring": "r'''Calculates enthalpy of vaporization at arbitrary temperatures using a\n    the work of [1]_; requires a chemical's critical temperature and\n    acentric factor.\n\n    The enthalpy of vaporization is given by:\n\n    .. math::\n        \\Delta_{vap} H = RT_c(7.2729 + 10.4962\\omega + 0.6061\\omega^2)(1-T_r)^{0.38}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    omega : float\n        Acentric factor [-]\n\n    Returns\n    -------\n    Hvap : float\n        Enthalpy of vaporization, [J/mol]\n\n    Notes\n    -----\n    The original article has been reviewed. It is regressed from enthalpy of\n    vaporization values at 0.7Tr, from 121 fluids in REFPROP 9.1.\n    A value in the article was read to be similar, but slightly too low from\n    that calculated here.\n\n    Examples\n    --------\n    From graph, in [1]_ for perfluoro-n-heptane.\n\n    >>> Velasco(333.2, 476.0, 0.5559)\n    33299.41734936356\n\n    References\n    ----------\n    .. [1] Velasco, S., M. J. Santos, and J. A. White. \"Extended Corresponding\n       States Expressions for the Changes in Enthalpy, Compressibility Factor\n       and Constant-Volume Heat Capacity at Vaporization.\" The Journal of\n       Chemical Thermodynamics 85 (June 2015): 68-76.\n       doi:10.1016/j.jct.2015.01.011.", "id": "f15781:m6"}
{"signature": "def load_all_methods(self):", "body": "methods = []<EOL>Tmins, Tmaxs = [], []<EOL>if has_CoolProp and self.CASRN in coolprop_dict:<EOL><INDENT>methods.append(COOLPROP)<EOL>self.CP_f = coolprop_fluids[self.CASRN]<EOL>Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc)<EOL><DEDENT>if self.CASRN in _VDISaturationDict:<EOL><INDENT>methods.append(VDI_TABULAR)<EOL>Ts, props = VDI_tabular_data(self.CASRN, '<STR_LIT>')<EOL>self.VDI_Tmin = Ts[<NUM_LIT:0>]<EOL>self.VDI_Tmax = Ts[-<NUM_LIT:1>]<EOL>self.tabular_data[VDI_TABULAR] = (Ts, props)<EOL>Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)<EOL><DEDENT>if self.CASRN in Alibakhshi_Cs.index and self.Tc:<EOL><INDENT>methods.append(ALIBAKHSHI)<EOL>self.Alibakhshi_C = float(Alibakhshi_Cs.at[self.CASRN, '<STR_LIT:C>'])<EOL>Tmaxs.append( max(self.Tc-<NUM_LIT>, <NUM_LIT:0>) )<EOL><DEDENT>if self.CASRN in CRCHvap_data.index and not np.isnan(CRCHvap_data.at[self.CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(CRC_HVAP_TB)<EOL>self.CRC_HVAP_TB_Tb = float(CRCHvap_data.at[self.CASRN, '<STR_LIT>'])<EOL>self.CRC_HVAP_TB_Hvap = float(CRCHvap_data.at[self.CASRN, '<STR_LIT>'])<EOL><DEDENT>if self.CASRN in CRCHvap_data.index and not np.isnan(CRCHvap_data.at[self.CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(CRC_HVAP_298)<EOL>self.CRC_HVAP_298 = float(CRCHvap_data.at[self.CASRN, '<STR_LIT>'])<EOL><DEDENT>if self.CASRN in GharagheiziHvap_data.index:<EOL><INDENT>methods.append(GHARAGHEIZI_HVAP_298)<EOL>self.GHARAGHEIZI_HVAP_298_Hvap = float(GharagheiziHvap_data.at[self.CASRN, '<STR_LIT>'])<EOL><DEDENT>if all((self.Tc, self.omega)):<EOL><INDENT>methods.extend(self.CSP_methods)<EOL>Tmaxs.append(self.Tc); Tmins.append(<NUM_LIT:0>)<EOL><DEDENT>if all((self.Tc, self.Pc)):<EOL><INDENT>methods.append(CLAPEYRON)<EOL>Tmaxs.append(self.Tc); Tmins.append(<NUM_LIT:0>)<EOL><DEDENT>if all((self.Tb, self.Tc, self.Pc)):<EOL><INDENT>methods.extend(self.boiling_methods)<EOL>Tmaxs.append(self.Tc); Tmins.append(<NUM_LIT:0>)<EOL><DEDENT>if self.CASRN in Perrys2_150.index:<EOL><INDENT>methods.append(DIPPR_PERRY_8E)<EOL>_, Tc, C1, C2, C3, C4, self.Perrys2_150_Tmin, self.Perrys2_150_Tmax = _Perrys2_150_values[Perrys2_150.index.get_loc(self.CASRN)].tolist()<EOL>self.Perrys2_150_coeffs = [Tc, C1, C2, C3, C4]<EOL>Tmins.append(self.Perrys2_150_Tmin); Tmaxs.append(self.Perrys2_150_Tmax)<EOL><DEDENT>if self.CASRN in VDI_PPDS_4.index:<EOL><INDENT>_,  MW, Tc, A, B, C, D, E = _VDI_PPDS_4_values[VDI_PPDS_4.index.get_loc(self.CASRN)].tolist()<EOL>self.VDI_PPDS_coeffs = [A, B, C, D, E]<EOL>self.VDI_PPDS_Tc = Tc<EOL>self.VDI_PPDS_MW = MW<EOL>methods.append(VDI_PPDS)<EOL>Tmaxs.append(self.VDI_PPDS_Tc); <EOL><DEDENT>self.all_methods = set(methods)<EOL>if Tmins and Tmaxs:<EOL><INDENT>self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method which picks out coefficients for the specified chemical\n        from the various dictionaries and DataFrames storing it. All data is\n        stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,\n        and :obj:`all_methods` as a set of methods for which the data exists for.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15781:c0:m1"}
{"signature": "def Liu(Tb, Tc, Pc):", "body": "Tbr = Tb/Tc<EOL>return R*Tb*(Tb/<NUM_LIT>)**<NUM_LIT>*(<NUM_LIT:1.> - Tbr)**<NUM_LIT>*log(Pc/<NUM_LIT>)/ (<NUM_LIT:1> - Tbr + <NUM_LIT>*Tbr*log(Tbr))<EOL>", "docstring": "r'''Calculates enthalpy of vaporization at the normal boiling point using\n    the Liu [1]_ correlation, and a chemical's critical temperature, pressure\n    and boiling point.\n\n    The enthalpy of vaporization is given by:\n\n    .. math::\n        \\Delta H_{vap} = RT_b \\left[ \\frac{T_b}{220}\\right]^{0.0627} \\frac{\n        (1-T_{br})^{0.38} \\ln(P_c/P_A)}{1-T_{br} + 0.38 T_{br} \\ln T_{br}}\n\n    Parameters\n    ----------\n    Tb : float\n        Boiling temperature of the fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n\n    Returns\n    -------\n    Hvap : float\n        Enthalpy of vaporization, [J/mol]\n\n    Notes\n    -----\n    This formulation can be adjusted for lower boiling points, due to the use\n    of a rationalized pressure relationship. The formulation is taken from\n    the original article.\n\n    A correction for alcohols and organic acids based on carbon number,\n    which only modifies the boiling point, is available but not implemented.\n\n    No sample calculations are available in the article.\n\n    Internal units: Pa and K\n\n    Examples\n    --------\n    Same problem as in Perry's examples\n\n    >>> Liu(294.0, 466.0, 5.55E6)\n    26378.566319606754\n\n    References\n    ----------\n    .. [1] LIU, ZHI-YONG. \"Estimation of Heat of Vaporization of Pure Liquid at\n       Its Normal Boiling Temperature.\" Chemical Engineering Communications\n       184, no. 1 (February 1, 2001): 221-28. doi:10.1080/00986440108912849.", "id": "f15781:m9"}
{"signature": "def SMK(T, Tc, omega):", "body": "omegaR1, omegaR2 = <NUM_LIT>, <NUM_LIT><EOL>A10 = <NUM_LIT><EOL>A20 = -<NUM_LIT><EOL>A30 = -<NUM_LIT><EOL>B10 = <NUM_LIT><EOL>B20 = <NUM_LIT><EOL>B30 = -<NUM_LIT><EOL>A11 = -<NUM_LIT><EOL>A21 = -<NUM_LIT><EOL>A31 = -<NUM_LIT><EOL>B11 = <NUM_LIT><EOL>B21 = <NUM_LIT><EOL>B31 = -<NUM_LIT><EOL>tau = <NUM_LIT:1.> - T/Tc<EOL>L0 = A10*tau**(<NUM_LIT:1>/<NUM_LIT>) + A20*tau**(<NUM_LIT:5>/<NUM_LIT>) + A30*tau**(<NUM_LIT:1>-<NUM_LIT:1>/<NUM_LIT> + <NUM_LIT:1>/<NUM_LIT>) +B10*tau + B20*tau**<NUM_LIT:2> + B30*tau**<NUM_LIT:3><EOL>L1 = A11*tau**(<NUM_LIT:1>/<NUM_LIT>) + A21*tau**(<NUM_LIT:5>/<NUM_LIT>) + A31*tau**(<NUM_LIT:1>-<NUM_LIT:1>/<NUM_LIT> + <NUM_LIT:1>/<NUM_LIT>) +B11*tau + B21*tau**<NUM_LIT:2> + B31*tau**<NUM_LIT:3><EOL>domega = (omega - omegaR1)/(omegaR2 - omegaR1)<EOL>return R*Tc*(L0 + domega*L1)<EOL>", "docstring": "r'''Calculates enthalpy of vaporization at arbitrary temperatures using a\n    the work of [1]_; requires a chemical's critical temperature and\n    acentric factor.\n\n    The enthalpy of vaporization is given by:\n\n    .. math::\n         \\frac{\\Delta H_{vap}} {RT_c} =\n         \\left( \\frac{\\Delta H_{vap}} {RT_c} \\right)^{(R1)} + \\left(\n         \\frac{\\omega - \\omega^{(R1)}} {\\omega^{(R2)} - \\omega^{(R1)}} \\right)\n         \\left[\\left( \\frac{\\Delta H_{vap}} {RT_c} \\right)^{(R2)} - \\left(\n         \\frac{\\Delta H_{vap}} {RT_c} \\right)^{(R1)} \\right]\n\n        \\left( \\frac{\\Delta H_{vap}} {RT_c} \\right)^{(R1)}\n        = 6.537 \\tau^{1/3} - 2.467 \\tau^{5/6} - 77.251 \\tau^{1.208} +\n        59.634 \\tau + 36.009 \\tau^2 - 14.606 \\tau^3\n\n        \\left( \\frac{\\Delta H_{vap}} {RT_c} \\right)^{(R2)} - \\left(\n        \\frac{\\Delta H_{vap}} {RT_c} \\right)^{(R1)}=-0.133 \\tau^{1/3} - 28.215\n        \\tau^{5/6} - 82.958 \\tau^{1.208} + 99.00 \\tau  + 19.105 \\tau^2 -2.796 \\tau^3\n\n        \\tau = 1-T/T_c\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    omega : float\n        Acentric factor [-]\n\n    Returns\n    -------\n    Hvap : float\n        Enthalpy of vaporization, [J/mol]\n\n    Notes\n    -----\n    The original article has been reviewed and found to have coefficients with\n    slightly more precision. Additionally, the form of the equation is slightly\n    different, but numerically equivalent.\n\n    The refence fluids are:\n\n    :math:`\\omega_0` = benzene = 0.212\n\n    :math:`\\omega_1` = carbazole = 0.461\n\n    A sample problem in the article has been verified. The numerical result\n    presented by the author requires high numerical accuracy to obtain.\n\n    Examples\n    --------\n    Problem in [1]_:\n\n    >>> SMK(553.15, 751.35, 0.302)\n    39866.17647797959\n\n    References\n    ----------\n    .. [1] Sivaraman, Alwarappa, Joe W. Magee, and Riki Kobayashi. \"Generalized\n       Correlation of Latent Heats of Vaporization of Coal-Liquid Model Compounds\n       between Their Freezing Points and Critical Points.\" Industrial &\n       Engineering Chemistry Fundamentals 23, no. 1 (February 1, 1984): 97-100.\n       doi:10.1021/i100013a017.", "id": "f15781:m4"}
{"signature": "def conductivity_McCleskey(T, M, lambda_coeffs, A_coeffs, B, multiplier, rho=<NUM_LIT>):", "body": "t = T - <NUM_LIT><EOL>lambda_coeff = horner(lambda_coeffs, t)<EOL>A = horner(A_coeffs, t)<EOL>M_root = M**<NUM_LIT:0.5><EOL>param = lambda_coeff - A*M_root/(<NUM_LIT:1.> + B*M_root)<EOL>C = M*rho/<NUM_LIT> <EOL>return param*C*multiplier*<NUM_LIT:0.1><EOL>", "docstring": "r'''This function handles the calculation of the electrical conductivity of \n    an electrolytic aqueous solution with one electrolyte in solution. It\n    handles temperature dependency and concentrated solutions. Requires the \n    temperature of the solution; its molality, and four sets of coefficients\n    `lambda_coeffs`, `A_coeffs`, `B`, and `multiplier`.\n\n    .. math::\n        \\Lambda = \\frac{\\kappa}{C}\n\n        \\Lambda = \\Lambda^0(t) - A(t) \\frac{m^{1/2}}{1+Bm^{1/2}}\n\n        \\Lambda^\\circ(t) = c_1 t^2 + c_2 t + c_3\n\n        A(t) = d_1 t^2 + d_2 t + d_3\n\n    In the above equations, `t` is temperature in degrees Celcius;\n    `m` is molality in mol/kg, and C is the concentration of the elctrolytes\n    in mol/m^3, calculated as the product of density and molality.\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the solution, [K]\n    M : float\n        Molality of the solution with respect to one electrolyte\n        (mol solute / kg solvent), [mol/kg]\n    lambda_coeffs : list[float]\n        List of coefficients for the polynomial used to calculate `lambda`;\n        length-3 coefficients provided in [1]_,  [-]\n    A_coeffs : list[float]\n        List of coefficients for the polynomial used to calculate `A`;\n        length-3 coefficients provided in [1]_, [-]\n    B : float\n        Empirical constant for an electrolyte, [-]\n    multiplier : float\n        The multiplier to obtain the absolute conductivity from the equivalent\n        conductivity; ex 2 for CaCl2, [-]\n    rho : float, optional\n        The mass density of the aqueous mixture, [kg/m^3]\n\n    Returns\n    -------\n    kappa : float\n        Electrical conductivity of the solution at the specified molality and \n        temperature [S/m]\n\n    Notes\n    -----\n    Coefficients provided in [1]_ result in conductivity being calculated in\n    units of mS/cm; they are converted to S/m before returned.\n\n    Examples\n    --------\n    A 0.5 wt% solution of CaCl2, conductivity calculated in mS/cm\n\n    >>> conductivity_McCleskey(T=293.15, M=0.045053, A_coeffs=[.03918, 3.905, \n    ... 137.7], lambda_coeffs=[0.01124, 2.224, 72.36], B=3.8, multiplier=2)\n    0.8482584585108555\n\n    References\n    ----------\n    .. [1] McCleskey, R. Blaine. \"Electrical Conductivity of Electrolytes Found\n       In Natural Waters from (5 to 90) \u00b0C.\" Journal of Chemical & Engineering \n       Data 56, no. 2 (February 10, 2011): 317-27. doi:10.1021/je101012n.", "id": "f15783:m10"}
{"signature": "def Laliberte_density(T, ws, CASRNs):", "body": "rho_w = Laliberte_density_w(T)<EOL>w_w = <NUM_LIT:1> - sum(ws)<EOL>rho = w_w/rho_w<EOL>for i in range(len(CASRNs)):<EOL><INDENT>d = _Laliberte_Density_ParametersDict[CASRNs[i]]<EOL>rho_i = Laliberte_density_i(T, w_w, d[\"<STR_LIT>\"], d[\"<STR_LIT>\"], d[\"<STR_LIT>\"], d[\"<STR_LIT>\"], d[\"<STR_LIT>\"])<EOL>rho = rho + ws[i]/rho_i<EOL><DEDENT>return <NUM_LIT:1.>/rho<EOL>", "docstring": "r'''Calculate the density of an aqueous electrolyte mixture using the form proposed by [1]_.\n    Parameters are loaded by the function as needed. Units are Kelvin and Pa*s.\n\n    .. math::\n        \\rho_m = \\left(\\frac{w_w}{\\rho_w} + \\sum_i \\frac{w_i}{\\rho_{app_i}}\\right)^{-1}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    ws : array\n        Weight fractions of fluid components other than water\n    CASRNs : array\n        CAS numbers of the fluid components other than water\n\n    Returns\n    -------\n    rho_i : float\n        Solution density, [kg/m^3]\n\n    Notes\n    -----\n    Temperature range check is not used here.\n\n    Examples\n    --------\n    >>> Laliberte_density(273.15, [0.0037838838], ['7647-14-5'])\n    1002.6250120185854\n\n    References\n    ----------\n    .. [1] Laliberte, Marc. \"A Model for Calculating the Heat Capacity of\n       Aqueous Solutions, with Updated Density and Viscosity Data.\" Journal of\n       Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.\n       doi:10.1021/je8008123", "id": "f15783:m5"}
{"signature": "def Kweq_IAPWS_gas(T):", "body": "gamma0 = <NUM_LIT><EOL>gamma1 = <NUM_LIT><EOL>gamma2 = -<NUM_LIT><EOL>gamma3 = <NUM_LIT><EOL>K_w_G = <NUM_LIT:10>**(-<NUM_LIT:1>*(gamma0 + gamma1/T + gamma2/T**<NUM_LIT:2> + gamma3/T**<NUM_LIT:3>))<EOL>return K_w_G<EOL>", "docstring": "r'''Calculates equilibrium constant for OH- and H+ in water vapor,\n    according to [1]_.\n    This is the most recent formulation available.\n\n    .. math::\n        -log_{10}  K_w^G = \\gamma_0 + \\gamma_1 T^{-1} + \\gamma_2 T^{-2} + \\gamma_3 T^{-3}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of H2O [K]\n\n    Returns\n    -------\n    K_w_G : float\n\n    Notes\n    -----\n    gamma0 = 6.141500E-1; \n    gamma1 = 4.825133E4; \n    gamma2 = -6.770793E4; \n    gamma3 = 1.010210E7\n\n    Examples\n    --------\n    >>> Kweq_IAPWS_gas(800)\n    1.4379721554798815e-61\n\n    References\n    ----------\n    .. [1] Bandura, Andrei V., and Serguei N. Lvov. \"The Ionization Constant\n       of Water over Wide Ranges of Temperature and Density.\" Journal of Physical\n       and Chemical Reference Data 35, no. 1 (March 1, 2006): 15-30.\n       doi:10.1063/1.1928231", "id": "f15783:m15"}
{"signature": "def ion_balance_proportional(anion_charges, cation_charges, zs, n_anions, <EOL>n_cations, balance_error, method):", "body": "anion_zs = zs[<NUM_LIT:0>:n_anions]<EOL>cation_zs = zs[n_anions:n_cations+n_anions]<EOL>anion_balance_error = sum([zi*ci for zi, ci in zip(anion_zs, anion_charges)])<EOL>cation_balance_error = sum([zi*ci for zi, ci in zip(cation_zs, cation_charges)])<EOL>if method == '<STR_LIT>':<EOL><INDENT>if balance_error < <NUM_LIT:0>:<EOL><INDENT>multiplier = -anion_balance_error/cation_balance_error<EOL>cation_zs = [i*multiplier for i in cation_zs]<EOL><DEDENT>else:<EOL><INDENT>multiplier = -cation_balance_error/anion_balance_error<EOL>anion_zs = [i*multiplier for i in anion_zs]<EOL><DEDENT><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>if balance_error < <NUM_LIT:0>:<EOL><INDENT>multiplier = -cation_balance_error/anion_balance_error<EOL>anion_zs = [i*multiplier for i in anion_zs]<EOL><DEDENT>else:<EOL><INDENT>multiplier = -anion_balance_error/cation_balance_error<EOL>cation_zs = [i*multiplier for i in cation_zs]<EOL><DEDENT><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>multiplier = -anion_balance_error/cation_balance_error<EOL>cation_zs = [i*multiplier for i in cation_zs]<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>multiplier = -cation_balance_error/anion_balance_error<EOL>anion_zs = [i*multiplier for i in anion_zs]<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>' %charge_balance_methods)<EOL><DEDENT>z_water = <NUM_LIT:1.> - sum(anion_zs) - sum(cation_zs)<EOL>return anion_zs, cation_zs, z_water<EOL>", "docstring": "Helper method for balance_ions for the proportional family of methods. \n    See balance_ions for a description of the methods; parameters are fairly\n    obvious.", "id": "f15783:m20"}
{"signature": "def thermal_conductivity_Magomedov(T, P, ws, CASRNs, k_w=None):", "body": "P = P/<NUM_LIT><EOL>ws = [i*<NUM_LIT:100> for i in ws]<EOL>if not k_w:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>sum1 = <NUM_LIT:0><EOL>for i, CASRN in enumerate(CASRNs):<EOL><INDENT>Ai = float(Magomedovk_thermal_cond.at[CASRN, '<STR_LIT>'])<EOL>sum1 += Ai*(ws[i] + <NUM_LIT>*ws[i]**<NUM_LIT:3>)<EOL><DEDENT>return k_w*(<NUM_LIT:1> - sum1) - <NUM_LIT>*P*T*sum(ws)<EOL>", "docstring": "r'''Calculate the thermal conductivity of an aqueous mixture of\n    electrolytes using the form proposed by Magomedov [1]_.\n    Parameters are loaded by the function as needed. Function will fail if an\n    electrolyte is not in the database.\n\n    .. math::\n        \\lambda = \\lambda_w\\left[ 1 - \\sum_{i=1}^n A_i (w_i + 2\\times10^{-4}\n        w_i^3)\\right] - 2\\times10^{-8} PT\\sum_{i=1}^n w_i\n\n    Parameters\n    ----------\n    T : float\n        Temperature of liquid [K]\n    P : float\n        Pressure of the liquid [Pa]\n    ws : array\n        Weight fractions of liquid components other than water\n    CASRNs : array\n        CAS numbers of the liquid components other than water\n    k_w : float\n        Liquid thermal condiuctivity or pure water at T and P, [W/m/K]\n\n    Returns\n    -------\n    kl : float\n        Liquid thermal condiuctivity, [W/m/K]\n\n    Notes\n    -----\n    Range from 273 K to 473 K, P from 0.1 MPa to 100 MPa. C from 0 to 25 mass%.\n    Internal untis are MPa for pressure and weight percent.\n\n    An example is sought for this function. It is not possible to reproduce\n    the author's values consistently.\n\n    Examples\n    --------\n    >>> thermal_conductivity_Magomedov(293., 1E6, [.25], ['7758-94-3'], k_w=0.59827)\n    0.548654049375\n\n    References\n    ----------\n    .. [1] Magomedov, U. B. \"The Thermal Conductivity of Binary and\n       Multicomponent Aqueous Solutions of Inorganic Substances at High\n       Parameters of State.\" High Temperature 39, no. 2 (March 1, 2001):\n       221-26. doi:10.1023/A:1017518731726.", "id": "f15783:m12"}
{"signature": "def conductivity(CASRN=None, AvailableMethods=False, Method=None, full_info=True):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in Lange_cond_pure.index:<EOL><INDENT>methods.append(LANGE_COND)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == LANGE_COND:<EOL><INDENT>kappa = float(Lange_cond_pure.at[CASRN, '<STR_LIT>'])<EOL>if full_info:<EOL><INDENT>T = float(Lange_cond_pure.at[CASRN, '<STR_LIT:T>'])<EOL><DEDENT><DEDENT>elif Method == NONE:<EOL><INDENT>kappa, T = None, None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if full_info:<EOL><INDENT>return kappa, T<EOL><DEDENT>else:<EOL><INDENT>return kappa<EOL><DEDENT>", "docstring": "r'''This function handles the retrieval of a chemical's conductivity.\n    Lookup is based on CASRNs. Will automatically select a data source to use\n    if no Method is provided; returns None if the data is not available.\n\n    Function has data for approximately 100 chemicals.\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    kappa : float\n        Electrical conductivity of the fluid, [S/m]\n    T : float, only returned if full_info == True\n        Temperature at which conductivity measurement was made\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain RI with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        A string for the method name to use, as defined by constants in\n        conductivity_methods\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        conductivity for the desired chemical, and will return methods instead\n        of conductivity\n    full_info : bool, optional\n        If True, function will return the temperature at which the conductivity\n        reading was made\n\n    Notes\n    -----\n    Only one source is available in this function. It is:\n\n        * 'LANGE_COND' which is from Lange's Handbook, Table 8.34 Electrical \n        Conductivity of Various Pure Liquids', a compillation of data in [1]_.\n\n    Examples\n    --------\n    >>> conductivity('7732-18-5')\n    (4e-06, 291.15)\n\n    References\n    ----------\n    .. [1] Speight, James. Lange's Handbook of Chemistry. 16 edition.\n       McGraw-Hill Professional, 2005.", "id": "f15783:m11"}
{"signature": "def Kweq_1981(T, rho_w):", "body": "rho_w = rho_w/<NUM_LIT><EOL>A = -<NUM_LIT><EOL>B = -<NUM_LIT><EOL>C = <NUM_LIT><EOL>D = -<NUM_LIT><EOL>E = <NUM_LIT><EOL>F = -<NUM_LIT><EOL>G = <NUM_LIT><EOL>return <NUM_LIT:10>**(A + B/T + C/T**<NUM_LIT:2> + D/T**<NUM_LIT:3> + (E + F/T + G/T**<NUM_LIT:2>)*log10(rho_w))<EOL>", "docstring": "r'''Calculates equilibrium constant for OH- and H+ in water, according to\n    [1]_. Second most recent formulation.\n\n    .. math::\n        \\log_{10} K_w= A + B/T + C/T^2 + D/T^3 + (E+F/T+G/T^2)\\log_{10} \\rho_w\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    rho_w : float\n        Density of water, [kg/m^3]\n\n    Returns\n    -------\n    Kweq : float\n        Ionization constant of water, [-]\n\n    Notes\n    -----\n    Density is internally converted to units of g/cm^3.\n\n    A = -4.098;\n    B = -3245.2;\n    C = 2.2362E5;\n    D = -3.984E7;\n    E = 13.957;\n    F = -1262.3;\n    G = 8.5641E5\n\n    Examples\n    --------\n    >>> -1*log10(Kweq_1981(600, 700))\n    11.274522047458206\n\n    References\n    ----------\n    .. [1] Marshall, William L., and E. U. Franck. \"Ion Product of Water\n       Substance, 0-1000  degree C, 1010,000 Bars New International Formulation\n       and Its Background.\" Journal of Physical and Chemical Reference Data 10,\n       no. 2 (April 1, 1981): 295-304. doi:10.1063/1.555643.", "id": "f15783:m14"}
{"signature": "def ionic_strength(mis, zis):", "body": "return <NUM_LIT:0.5>*sum([mi*zi*zi for mi, zi in zip(mis, zis)])<EOL>", "docstring": "r'''Calculate the ionic strength of a solution in one of two ways,\n    depending on the inputs only. For Pitzer and Bromley models,\n    `mis` should be molalities of each component. For eNRTL models,\n    `mis` should be mole fractions of each electrolyte in the solution.\n    This will sum to be much less than 1.\n\n    .. math::\n        I = \\frac{1}{2} \\sum M_i z_i^2\n\n        I = \\frac{1}{2} \\sum x_i z_i^2\n\n    Parameters\n    ----------\n    mis : list\n        Molalities of each ion, or mole fractions of each ion [mol/kg or -]\n    zis : list\n        Charges of each ion [-]\n\n    Returns\n    -------\n    I : float\n        ionic strength, [?]\n\n    Examples\n    --------\n    >>> ionic_strength([0.1393, 0.1393], [1, -1])\n    0.1393\n\n    References\n    ----------\n    .. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. \"Local\n       Composition Model for Excess Gibbs Energy of Electrolyte Systems.\n       Part I: Single Solvent, Single Completely Dissociated Electrolyte\n       Systems.\" AIChE Journal 28, no. 4 (July 1, 1982): 588-96.\n       doi:10.1002/aic.690280410\n    .. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.\n       Weinheim, Germany: Wiley-VCH, 2012.", "id": "f15783:m13"}
{"signature": "def balance_ions(anions, cations, anion_zs=None, cation_zs=None, <EOL>anion_concs=None, cation_concs=None, rho_w=<NUM_LIT>, <EOL>method='<STR_LIT>', selected_ion=None):", "body": "anions = list(anions)<EOL>cations = list(cations)<EOL>n_anions = len(anions)<EOL>n_cations = len(cations)<EOL>ions = anions + cations<EOL>anion_charges = [i.charge for i in anions]<EOL>cation_charges = [i.charge for i in cations]<EOL>charges = anion_charges + cation_charges + [<NUM_LIT:0>]<EOL>MW_water = [<NUM_LIT>]<EOL>rho_w = rho_w/<NUM_LIT:1000> <EOL>if anion_concs is not None and cation_concs is not None:<EOL><INDENT>anion_ws = [i*<NUM_LIT>/rho_w for i in anion_concs]<EOL>cation_ws = [i*<NUM_LIT>/rho_w for i in cation_concs]<EOL>w_water = <NUM_LIT:1> - sum(anion_ws) - sum(cation_ws)<EOL>anion_MWs = [i.MW for i in anions]<EOL>cation_MWs = [i.MW for i in cations]<EOL>MWs = anion_MWs + cation_MWs + MW_water<EOL>zs = ws_to_zs(anion_ws + cation_ws + [w_water], MWs)<EOL><DEDENT>else:<EOL><INDENT>if anion_zs is None or cation_zs is None:<EOL><INDENT>raise Exception('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>zs = anion_zs + cation_zs<EOL>zs = zs + [<NUM_LIT:1> - sum(zs)]<EOL><DEDENT><DEDENT>impacts = [zi*ci for zi, ci in zip(zs, charges)]<EOL>balance_error = sum(impacts)<EOL>if abs(balance_error) < <NUM_LIT>:<EOL><INDENT>anion_zs = zs[<NUM_LIT:0>:n_anions]<EOL>cation_zs = zs[n_anions:n_cations+n_anions]<EOL>z_water = zs[-<NUM_LIT:1>]<EOL>return anions, cations, anion_zs, cation_zs, z_water<EOL><DEDENT>if '<STR_LIT>' in method:<EOL><INDENT>anion_zs, cation_zs, z_water = ion_balance_dominant(impacts,<EOL>balance_error, charges, zs, n_anions, n_cations, method)<EOL>return anions, cations, anion_zs, cation_zs, z_water<EOL><DEDENT>elif '<STR_LIT>' in method:<EOL><INDENT>anion_zs, cation_zs, z_water = ion_balance_proportional(<EOL>anion_charges, cation_charges, zs, n_anions, n_cations,<EOL>balance_error, method)<EOL>return anions, cations, anion_zs, cation_zs, z_water<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>increase = True<EOL>if balance_error < <NUM_LIT:0>:<EOL><INDENT>selected_ion = pubchem_db.search_name('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>selected_ion = pubchem_db.search_name('<STR_LIT>')<EOL><DEDENT><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>increase = False<EOL>if balance_error > <NUM_LIT:0>:<EOL><INDENT>selected_ion = pubchem_db.search_name('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>selected_ion = pubchem_db.search_name('<STR_LIT>')<EOL><DEDENT><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>increase = None<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>increase = True<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>increase = False<EOL><DEDENT>elif method == '<STR_LIT>':<EOL><INDENT>increase = True<EOL>if balance_error < <NUM_LIT:0>:<EOL><INDENT>selected_ion = selected_ion[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>selected_ion = selected_ion[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if selected_ion is None:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>anion_zs, cation_zs, z_water = ion_balance_adjust_wrapper(charges, zs, n_anions, n_cations,<EOL>anions, cations, selected_ion, increase=increase)<EOL>return anions, cations, anion_zs, cation_zs, z_water<EOL>", "docstring": "r'''Performs an ion balance to adjust measured experimental ion \n    compositions to electroneutrality. Can accept either the actual mole \n    fractions of the ions, or their concentrations in units of [mg/L] as well\n    for convinience.\n\n    The default method will locate the most prevalent ion in the type of \n    ion not in excess - and increase it until the two ion types balance.\n\n    Parameters\n    ----------\n    anions : list(ChemicalMetadata)\n        List of all negatively charged ions measured as being in the solution;\n        ChemicalMetadata instances or simply objects with the attributes `MW` \n        and `charge`, [-]\n    cations : list(ChemicalMetadata)\n        List of all positively charged ions measured as being in the solution;\n        ChemicalMetadata instances or simply objects with the attributes `MW` \n        and `charge`, [-]\n    anion_zs : list, optional\n        Mole fractions of each anion as measured in the aqueous solution, [-]\n    cation_zs : list, optional\n        Mole fractions of each cation as measured in the aqueous solution, [-]\n    anion_concs : list, optional\n        Concentrations of each anion in the aqueous solution in the units often\n        reported (for convinience only) [mg/L]\n    cation_concs : list, optional\n        Concentrations of each cation in the aqueous solution in the units \n        often reported (for convinience only) [mg/L]\n    rho_w : float, optional\n        Density of the aqueous solutionr at the temperature and pressure the\n        anion and cation concentrations were measured (if specified), [kg/m^3]\n    method : str, optional\n        The method to use to balance the ionimbalance; one of 'dominant', \n        'decrease dominant', 'increase dominant',\n        'proportional insufficient ions increase', \n        'proportional excess ions decrease', \n        'proportional cation adjustment', 'proportional anion adjustment', \n        'Na or Cl increase', 'Na or Cl decrease', 'adjust', 'increase', \n        'decrease', 'makeup'].\n    selected_ion : ChemicalMetadata, optional\n        Some methods adjust only one user-specified ion; this is that input.\n        For the case of the 'makeup' method, this is a tuple of (anion, cation)\n        ChemicalMetadata instances and only the ion type not in excess will be\n        used.\n\n    Returns\n    -------\n    anions : list(ChemicalMetadata)\n        List of all negatively charged ions measured as being in the solution;\n        ChemicalMetadata instances after potentially adding in an ion which\n        was not present but specified by the user, [-]\n    cations : list(ChemicalMetadata)\n        List of all positively charged ions measured as being in the solution;\n        ChemicalMetadata instances after potentially adding in an ion which\n        was not present but specified by the user, [-]\n    anion_zs : list,\n        Mole fractions of each anion in the aqueous solution after the charge\n        balance, [-]\n    cation_zs : list\n        Mole fractions of each cation in the aqueous solution after the charge\n        balance, [-]\n    z_water : float\n        Mole fraction of the water in the solution, [-]\n\n    Notes\n    -----\n    The methods perform the charge balance as follows:\n\n    * 'dominant' : The ion with the largest mole fraction in solution has its\n      concentration adjusted up or down as necessary to balance the solution.\n    * 'decrease dominant' : The ion with the largest mole fraction in the type\n      of ion with *excess* charge has its own mole fraction decreased to balance\n      the solution.\n    * 'increase dominant' : The ion with the largest mole fraction in the type\n      of ion with *insufficient* charge has its own mole fraction decreased to \n      balance the solution.\n    * 'proportional insufficient ions increase' : The ion charge type which is\n      present insufficiently has each of the ions mole fractions *increased*\n      proportionally until the solution is balanced.\n    * 'proportional excess ions decrease' :  The ion charge type which is\n      present in excess has each of the ions mole fractions *decreased*\n      proportionally until the solution is balanced.\n    * 'proportional cation adjustment' : All *cations* have their mole fractions\n      increased or decreased proportionally as necessary to balance the \n      solution.\n    * 'proportional anion adjustment' : All *anions* have their mole fractions\n      increased or decreased proportionally as necessary to balance the \n      solution.\n    * 'Na or Cl increase' : Either Na+ or Cl- is *added* to the solution until\n      the solution is balanced; the species will be added if they were not\n      present initially as well.\n    * 'Na or Cl decrease' : Either Na+ or Cl- is *removed* from the solution \n      until the solution is balanced; the species will be added if they were \n      not present initially as well.\n    * 'adjust' : An ion specified with the parameter `selected_ion` has its\n      mole fraction *increased or decreased* as necessary to balance the \n      solution. An exception is raised if the specified ion alone cannot \n      balance the solution.\n    * 'increase' : An ion specified with the parameter `selected_ion` has its\n      mole fraction *increased* as necessary to balance the \n      solution. An exception is raised if the specified ion alone cannot \n      balance the solution.\n    * 'decrease' : An ion specified with the parameter `selected_ion` has its\n      mole fraction *decreased* as necessary to balance the \n      solution. An exception is raised if the specified ion alone cannot \n      balance the solution.\n    * 'makeup' : Two ions ase specified as a tuple with the parameter \n      `selected_ion`. Whichever ion type is present in the solution \n      insufficiently is added; i.e. if the ions were Mg+2 and Cl-, and there\n      was too much negative charge in the solution, Mg+2 would be added until\n      the solution was balanced.\n\n    Examples\n    --------\n    >>> anions_n = ['Cl-', 'HCO3-', 'SO4-2']\n    >>> cations_n = ['Na+', 'K+', 'Ca+2', 'Mg+2']\n    >>> cations = [pubchem_db.search_name(i) for i in cations_n]\n    >>> anions = [pubchem_db.search_name(i) for i in anions_n]\n    >>> an_res, cat_res, an_zs, cat_zs, z_water = balance_ions(anions, cations,\n    ... anion_zs=[0.02557, 0.00039, 0.00026], cation_zs=[0.0233, 0.00075,\n    ... 0.00262, 0.00119], method='proportional excess ions decrease')\n    >>> an_zs\n    [0.02557, 0.00039, 0.00026]\n    >>> cat_zs\n    [0.01948165456267761, 0.0006270918850647299, 0.0021906409851594564, 0.0009949857909693717]\n    >>> z_water\n    0.9504856267761288\n\n    References\n    ----------", "id": "f15783:m21"}
{"signature": "def Laliberte_heat_capacity(T, ws, CASRNs):", "body": "Cp_w = Laliberte_heat_capacity_w(T)<EOL>w_w = <NUM_LIT:1> - sum(ws)<EOL>Cp = w_w*Cp_w<EOL>for i in range(len(CASRNs)):<EOL><INDENT>d = _Laliberte_Heat_Capacity_ParametersDict[CASRNs[i]]<EOL>Cp_i = Laliberte_heat_capacity_i(T, w_w, d[\"<STR_LIT>\"], d[\"<STR_LIT>\"], d[\"<STR_LIT>\"], d[\"<STR_LIT>\"], d[\"<STR_LIT>\"], d[\"<STR_LIT>\"])<EOL>Cp = Cp + ws[i]*Cp_i<EOL><DEDENT>return Cp<EOL>", "docstring": "r'''Calculate the heat capacity of an aqueous electrolyte mixture using the\n    form proposed by [1]_.\n    Parameters are loaded by the function as needed.\n\n    .. math::\n        TODO\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    ws : array\n        Weight fractions of fluid components other than water\n    CASRNs : array\n        CAS numbers of the fluid components other than water\n\n    Returns\n    -------\n    Cp : float\n        Solution heat capacity, [J/kg/K]\n\n    Notes\n    -----\n    Temperature range check is not implemented.\n    Units are Kelvin and J/kg/K.\n\n    Examples\n    --------\n    >>> Laliberte_heat_capacity(273.15+1.5, [0.00398447], ['7647-14-5']) \n    4186.569908672113\n\n    References\n    ----------\n    .. [1] Laliberte, Marc. \"A Model for Calculating the Heat Capacity of\n       Aqueous Solutions, with Updated Density and Viscosity Data.\" Journal of\n       Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.\n       doi:10.1021/je8008123", "id": "f15783:m8"}
{"signature": "def Laliberte_viscosity_i(T, w_w, v1, v2, v3, v4, v5, v6):", "body": "t = T-<NUM_LIT><EOL>mu_i = exp((v1*(<NUM_LIT:1>-w_w)**v2 + v3)/(v4*t+<NUM_LIT:1>))/(v5*(<NUM_LIT:1>-w_w)**v6 + <NUM_LIT:1>)<EOL>return mu_i/<NUM_LIT><EOL>", "docstring": "r'''Calculate the viscosity of a solute using the form proposed by [1]_\n    Parameters are needed, and a temperature. Units are Kelvin and Pa*s.\n\n    .. math::\n        \\mu_i = \\frac{\\exp\\left( \\frac{v_1(1-w_w)^{v_2}+v_3}{v_4 t +1}\\right)}\n            {v_5(1-w_w)^{v_6}+1}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    w_w : float\n        Weight fraction of water in the solution\n    v1-v6 : floats\n        Function fit parameters\n\n    Returns\n    -------\n    mu_i : float\n        Solute partial viscosity, Pa*s\n\n    Notes\n    -----\n    Temperature range check is outside of this function.\n    Check is performed using NaCl at 5 degC from the first value in [1]_'s spreadsheet.\n\n    Examples\n    --------\n    >>> d =  _Laliberte_Viscosity_ParametersDict['7647-14-5']\n    >>> Laliberte_viscosity_i(273.15+5, 1-0.005810, d[\"V1\"], d[\"V2\"], d[\"V3\"], d[\"V4\"], d[\"V5\"], d[\"V6\"] )\n    0.004254025533308794\n\n    References\n    ----------\n    .. [1] Laliberte, Marc. \"A Model for Calculating the Heat Capacity of\n       Aqueous Solutions, with Updated Density and Viscosity Data.\" Journal of\n       Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.\n       doi:10.1021/je8008123", "id": "f15783:m1"}
{"signature": "def identify_phase(T, P, Tm=None, Tb=None, Tc=None, Psat=None):", "body": "if Tm and T <= Tm:<EOL><INDENT>return '<STR_LIT:s>'<EOL><DEDENT>elif Tc and T >= Tc:<EOL><INDENT>return '<STR_LIT:g>'<EOL><DEDENT>elif Psat:<EOL><INDENT>if P <= Psat:<EOL><INDENT>return '<STR_LIT:g>'<EOL><DEDENT>elif P > Psat:<EOL><INDENT>return '<STR_LIT:l>'<EOL><DEDENT><DEDENT>elif Tb:<EOL><INDENT>if <NUM_LIT> < P < <NUM_LIT>:<EOL><INDENT>if T < Tb:<EOL><INDENT>return  '<STR_LIT:l>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT:g>'<EOL><DEDENT><DEDENT>elif P > <NUM_LIT> and T <= Tb:<EOL><INDENT>return '<STR_LIT:l>'<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "r'''Determines the phase of a one-species chemical system according to\n    basic rules, using whatever information is available. Considers only the\n    phases liquid, solid, and gas; does not consider two-phase\n    scenarios, as should occurs between phase boundaries.\n\n    * If the melting temperature is known and the temperature is under or equal\n      to it, consider it a solid.\n    * If the critical temperature is known and the temperature is greater or\n      equal to it, consider it a gas.\n    * If the vapor pressure at `T` is known and the pressure is under or equal\n      to it, consider it a gas. If the pressure is greater than the vapor\n      pressure, consider it a liquid.\n    * If the melting temperature, critical temperature, and vapor pressure are\n      not known, attempt to use the boiling point to provide phase information.\n      If the pressure is between 90 kPa and 110 kPa (approximately normal),\n      consider it a liquid if it is under the boiling temperature and a gas if\n      above the boiling temperature.\n    * If the pressure is above 110 kPa and the boiling temperature is known,\n      consider it a liquid if the temperature is under the boiling temperature.\n    * Return None otherwise.\n\n    Parameters\n    ----------\n    T : float\n        Temperature, [K]\n    P : float\n        Pressure, [Pa]\n    Tm : float, optional\n        Normal melting temperature, [K]\n    Tb : float, optional\n        Normal boiling point, [K]\n    Tc : float, optional\n        Critical temperature, [K]\n    Psat : float, optional\n        Vapor pressure of the fluid at `T`, [Pa]\n\n    Returns\n    -------\n    phase : str\n        Either 's', 'l', 'g', or None if the phase cannot be determined\n\n    Notes\n    -----\n    No special attential is paid to any phase transition. For the case where\n    the melting point is not provided, the possibility of the fluid being solid\n    is simply ignored.\n\n    Examples\n    --------\n    >>> identify_phase(T=280, P=101325, Tm=273.15, Psat=991)\n    'l'", "id": "f15784:m11"}
{"signature": "def Li_Johns_Ahmadi_solution(zs, Ks):", "body": "<EOL>p = sorted(zip(Ks,zs), reverse=True)<EOL>Ks_sorted, zs_sorted = [K for (K,z) in p], [z for (K,z) in p]<EOL>k1 = Ks_sorted[<NUM_LIT:0>]<EOL>z1 = zs_sorted[<NUM_LIT:0>]<EOL>kn = Ks_sorted[-<NUM_LIT:1>]<EOL>x_min = (<NUM_LIT:1.> - kn)/(k1 - kn)*z1<EOL>x_max = (<NUM_LIT:1.> - kn)/(k1 - kn)<EOL>x_min2 = max(<NUM_LIT:0.>, x_min)<EOL>x_max2 = min(<NUM_LIT:1.>, x_max)<EOL>x_guess = (x_min2 + x_max2)*<NUM_LIT:0.5><EOL>length = len(zs)-<NUM_LIT:1><EOL>kn_m_1 = kn-<NUM_LIT:1.><EOL>k1_m_1 = (k1-<NUM_LIT:1.>)<EOL>t1 = (k1-kn)/(kn-<NUM_LIT:1.>)<EOL>objective = lambda x1: <NUM_LIT:1.> + t1*x1 + sum([(ki-kn)/(kn_m_1) * zi*k1_m_1*x1 /( (ki-<NUM_LIT:1.>)*z1 + (k1-ki)*x1) for ki, zi in zip(Ks_sorted[<NUM_LIT:1>:length], zs_sorted[<NUM_LIT:1>:length])])<EOL>try:<EOL><INDENT>x1 = newton(objective, x_guess)<EOL>assert x1 >= x_min2<EOL>assert x1 <= x_max2<EOL>V_over_F = (-x1 + z1)/(x1*(k1 - <NUM_LIT:1.>))<EOL>assert <NUM_LIT:0> <= V_over_F <= <NUM_LIT:1><EOL><DEDENT>except:<EOL><INDENT>x1 = brenth(objective, x_min, x_max)<EOL>V_over_F = (-x1 + z1)/(x1*(k1 - <NUM_LIT:1.>))<EOL><DEDENT>xs = [zi/(<NUM_LIT:1.>+V_over_F*(Ki-<NUM_LIT:1.>)) for zi, Ki in zip(zs, Ks)]<EOL>ys = [Ki*xi for xi, Ki in zip(xs, Ks)]<EOL>return V_over_F, xs, ys<EOL>", "docstring": "r'''Solves the objective function of the Li-Johns-Ahmadi flash equation.\n    Uses the method proposed in [1]_ to obtain an initial guess.\n\n    .. math::\n        0 = 1 + \\left(\\frac{K_{max}-K_{min}}{K_{min}-1}\\right)x_1\n        + \\sum_{i=2}^{n-1}\\frac{K_i-K_{min}}{K_{min}-1}\\left[\\frac{z_i(K_{max}\n        -1)x_{max}}{(K_i-1)z_{max} + (K_{max}-K_i)x_{max}}\\right]\n\n    Parameters\n    ----------\n    zs : list[float]\n        Overall mole fractions of all species, [-]\n    Ks : list[float]\n        Equilibrium K-values, [-]\n\n    Returns\n    -------\n    V_over_F : float\n        Vapor fraction solution [-]\n    xs : list[float]\n        Mole fractions of each species in the liquid phase, [-]\n    ys : list[float]\n        Mole fractions of each species in the vapor phase, [-]\n\n    Notes\n    -----\n    The initial guess is the average of the following, as described in [1]_.\n    Each guess should be limited to be between 0 and 1 as they are often\n    negative or larger than 1. `max` refers to the corresponding mole fractions\n    for the species with the largest K value.\n\n    .. math::\n        \\left(\\frac{1-K_{min}}{K_{max}-K_{min}}\\right)z_{max}\\le x_{max} \\le\n        \\left(\\frac{1-K_{min}}{K_{max}-K_{min}}\\right)\n\n    If the `newton` method does not converge, a bisection method (brenth) is\n    used instead. However, it is somewhat slower, especially as newton will\n    attempt 50 iterations before giving up.\n\n    This method does not work for problems of only two components.\n    K values are sorted internally. Has not been found to be quicker than the\n    Rachford-Rice equation.\n\n    Examples\n    --------\n    >>> Li_Johns_Ahmadi_solution(zs=[0.5, 0.3, 0.2], Ks=[1.685, 0.742, 0.532])\n    (0.6907302627738544, [0.33940869696634357, 0.3650560590371706, 0.2955352439964858], [0.5719036543882889, 0.27087159580558057, 0.15722474980613044])\n\n    References\n    ----------\n    .. [1] Li, Yinghui, Russell T. Johns, and Kaveh Ahmadi. \"A Rapid and Robust\n       Alternative to Rachford-Rice in Flash Calculations.\" Fluid Phase\n       Equilibria 316 (February 25, 2012): 85-97.\n       doi:10.1016/j.fluid.2011.12.005.", "id": "f15784:m3"}
{"signature": "def Rachford_Rice_flash_error(V_over_F, zs, Ks):", "body": "return sum([zi*(Ki-<NUM_LIT:1.>)/(<NUM_LIT:1.>+V_over_F*(Ki-<NUM_LIT:1.>)) for Ki, zi in zip(Ks, zs)])<EOL>", "docstring": "r'''Calculates the objective function of the Rachford-Rice flash equation.\n    This function should be called by a solver seeking a solution to a flash\n    calculation. The unknown variable is `V_over_F`, for which a solution\n    must be between 0 and 1.\n\n    .. math::\n        \\sum_i \\frac{z_i(K_i-1)}{1 + \\frac{V}{F}(K_i-1)} = 0\n\n    Parameters\n    ----------\n    V_over_F : float\n        Vapor fraction guess [-]\n    zs : list[float]\n        Overall mole fractions of all species, [-]\n    Ks : list[float]\n        Equilibrium K-values, [-]\n\n    Returns\n    -------\n    error : float\n        Deviation between the objective function at the correct V_over_F\n        and the attempted V_over_F, [-]\n\n    Notes\n    -----\n    The derivation is as follows:\n\n    .. math::\n        F z_i = L x_i + V y_i\n\n        x_i = \\frac{z_i}{1 + \\frac{V}{F}(K_i-1)}\n\n        \\sum_i y_i = \\sum_i K_i x_i = 1\n\n        \\sum_i(y_i - x_i)=0\n\n        \\sum_i \\frac{z_i(K_i-1)}{1 + \\frac{V}{F}(K_i-1)} = 0\n\n    Examples\n    --------\n    >>> Rachford_Rice_flash_error(0.5, zs=[0.5, 0.3, 0.2],\n    ... Ks=[1.685, 0.742, 0.532])\n    0.04406445591174976\n\n    References\n    ----------\n    .. [1] Rachford, H. H. Jr, and J. D. Rice. \"Procedure for Use of Electronic\n       Digital Computers in Calculating Flash Vaporization Hydrocarbon\n       Equilibrium.\" Journal of Petroleum Technology 4, no. 10 (October 1,\n       1952): 19-3. doi:10.2118/952327-G.", "id": "f15784:m1"}
{"signature": "def UNIQUAC(xs, rs, qs, taus):", "body": "cmps = range(len(xs))<EOL>rsxs = sum([rs[i]*xs[i] for i in cmps])<EOL>phis = [rs[i]*xs[i]/rsxs for i in cmps]<EOL>qsxs = sum([qs[i]*xs[i] for i in cmps])<EOL>vs = [qs[i]*xs[i]/qsxs for i in cmps]<EOL>Ss = [sum([vs[j]*taus[j][i] for j in cmps]) for i in cmps]<EOL>loggammacs = [log(phis[i]/xs[i]) + <NUM_LIT:1> - phis[i]/xs[i]<EOL>- <NUM_LIT:5>*qs[i]*(log(phis[i]/vs[i]) + <NUM_LIT:1> - phis[i]/vs[i]) for i in cmps]<EOL>loggammars = [qs[i]*(<NUM_LIT:1> - log(Ss[i]) - sum([taus[i][j]*vs[j]/Ss[j]<EOL>for j in cmps])) for i in cmps]<EOL>return [exp(loggammacs[i] + loggammars[i]) for i in cmps]<EOL>", "docstring": "r'''Calculates the activity coefficients of each species in a mixture\n    using the Universal quasi-chemical (UNIQUAC) equation, given their mole\n    fractions, `rs`, `qs`, and dimensionless interaction parameters. The\n    interaction parameters are normally correlated with temperature, and need\n    to be calculated separately.\n\n    .. math::\n        \\ln \\gamma_i = \\ln \\frac{\\Phi_i}{x_i} + \\frac{z}{2} q_i \\ln\n        \\frac{\\theta_i}{\\Phi_i}+ l_i - \\frac{\\Phi_i}{x_i}\\sum_j^N x_j l_j\n        - q_i \\ln\\left( \\sum_j^N \\theta_j \\tau_{ji}\\right)+ q_i - q_i\\sum_j^N\n        \\frac{\\theta_j \\tau_{ij}}{\\sum_k^N \\theta_k \\tau_{kj}}\n\n        \\theta_i = \\frac{x_i q_i}{\\displaystyle\\sum_{j=1}^{n} x_j q_j}\n\n         \\Phi_i = \\frac{x_i r_i}{\\displaystyle\\sum_{j=1}^{n} x_j r_j}\n\n         l_i = \\frac{z}{2}(r_i - q_i) - (r_i - 1)\n\n    Parameters\n    ----------\n    xs : list[float]\n        Liquid mole fractions of each species, [-]\n    rs : list[float]\n        Van der Waals volume parameters for each species, [-]\n    qs : list[float]\n        Surface area parameters for each species, [-]\n    taus : list[list[float]]\n        Dimensionless interaction parameters of each compound with each other,\n        [-]\n\n    Returns\n    -------\n    gammas : list[float]\n        Activity coefficient for each species in the liquid mixture, [-]\n\n    Notes\n    -----\n    This model needs N^2 parameters.\n\n    The original expression for the interaction parameters is as follows:\n\n    .. math::\n        \\tau_{ji} = \\exp\\left(\\frac{-\\Delta u_{ij}}{RT}\\right)\n\n    However, it is seldom used. Most correlations for the interaction\n    parameters include some of the terms shown in the following form:\n\n    .. math::\n        \\ln \\tau{ij} =a_{ij}+\\frac{b_{ij}}{T}+c_{ij}\\ln T + d_{ij}T\n        + \\frac{e_{ij}}{T^2}\n\n    This model is recast in a slightly more computationally efficient way in\n    [2]_, as shown below:\n\n    .. math::\n        \\ln \\gamma_i = \\ln \\gamma_i^{res} + \\ln \\gamma_i^{comb}\n\n        \\ln \\gamma_i^{res} = q_i \\left(1 - \\ln\\frac{\\sum_j^N q_j x_j \\tau_{ji}}\n        {\\sum_j^N q_j x_j}- \\sum_j \\frac{q_k x_j \\tau_{ij}}{\\sum_k q_k x_k\n        \\tau_{kj}}\\right)\n\n        \\ln \\gamma_i^{comb} = (1 - V_i + \\ln V_i) - \\frac{z}{2}q_i\\left(1 -\n        \\frac{V_i}{F_i} + \\ln \\frac{V_i}{F_i}\\right)\n\n        V_i = \\frac{r_i}{\\sum_j^N r_j x_j}\n\n        F_i = \\frac{q_i}{\\sum_j q_j x_j}\n\n    Examples\n    --------\n    Ethanol-water example, at 343.15 K and 1 MPa:\n\n    >>> UNIQUAC(xs=[0.252, 0.748], rs=[2.1055, 0.9200], qs=[1.972, 1.400],\n    ... taus=[[1.0, 1.0919744384510301], [0.37452902779205477, 1.0]])\n    [2.35875137797083, 1.2442093415968987]\n\n    References\n    ----------\n    .. [1] Abrams, Denis S., and John M. Prausnitz. \"Statistical Thermodynamics\n       of Liquid Mixtures: A New Expression for the Excess Gibbs Energy of\n       Partly or Completely Miscible Systems.\" AIChE Journal 21, no. 1 (January\n       1, 1975): 116-28. doi:10.1002/aic.690210115.\n    .. [2] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey.\n       Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim:\n       Wiley-VCH, 2012.\n    .. [3] Maurer, G., and J. M. Prausnitz. \"On the Derivation and Extension of\n       the Uniquac Equation.\" Fluid Phase Equilibria 2, no. 2 (January 1,\n       1978): 91-99. doi:10.1016/0378-3812(78)85002-X.", "id": "f15784:m7"}
{"signature": "def NRTL(xs, taus, alphas):", "body": "gammas = []<EOL>cmps = range(len(xs))<EOL>Gs = [[exp(-alphas[i][j]*taus[i][j]) for j in cmps] for i in cmps]<EOL>for i in cmps:<EOL><INDENT>tn1, td1, total2 = <NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:0.><EOL>for j in cmps:<EOL><INDENT>tn1 += xs[j]*taus[j][i]*Gs[j][i]<EOL>td1 +=  xs[j]*Gs[j][i]<EOL>tn2 = xs[j]*Gs[i][j]<EOL>td2 = td3 = sum([xs[k]*Gs[k][j] for k in cmps])<EOL>tn3 = sum([xs[m]*taus[m][j]*Gs[m][j] for m in cmps])<EOL>total2 += tn2/td2*(taus[i][j] - tn3/td3)<EOL><DEDENT>gamma = exp(tn1/td1 + total2)<EOL>gammas.append(gamma)<EOL><DEDENT>return gammas<EOL>", "docstring": "r'''Calculates the activity coefficients of each species in a mixture\n    using the Non-Random Two-Liquid (NRTL) method, given their mole fractions,\n    dimensionless interaction parameters, and nonrandomness constants. Those\n    are normally correlated with temperature in some form, and need to be\n    calculated separately.\n\n    .. math::\n        \\ln(\\gamma_i)=\\frac{\\displaystyle\\sum_{j=1}^{n}{x_{j}\\tau_{ji}G_{ji}}}\n        {\\displaystyle\\sum_{k=1}^{n}{x_{k}G_{ki}}}+\\sum_{j=1}^{n}\n        {\\frac{x_{j}G_{ij}}{\\displaystyle\\sum_{k=1}^{n}{x_{k}G_{kj}}}}\n        {\\left ({\\tau_{ij}-\\frac{\\displaystyle\\sum_{m=1}^{n}{x_{m}\\tau_{mj}\n        G_{mj}}}{\\displaystyle\\sum_{k=1}^{n}{x_{k}G_{kj}}}}\\right )}\n\n        G_{ij}=\\text{exp}\\left ({-\\alpha_{ij}\\tau_{ij}}\\right )\n\n    Parameters\n    ----------\n    xs : list[float]\n        Liquid mole fractions of each species, [-]\n    taus : list[list[float]]\n        Dimensionless interaction parameters of each compound with each other,\n        [-]\n    alphas : list[list[float]]\n        Nonrandomness constants of each compound interacting with each other, [-]\n\n    Returns\n    -------\n    gammas : list[float]\n        Activity coefficient for each species in the liquid mixture, [-]\n\n    Notes\n    -----\n    This model needs N^2 parameters.\n\n    One common temperature dependence of the nonrandomness constants is:\n\n    .. math::\n        \\alpha_{ij}=c_{ij}+d_{ij}T\n\n    Most correlations for the interaction parameters include some of the terms\n    shown in the following form:\n\n    .. math::\n        \\tau_{ij}=A_{ij}+\\frac{B_{ij}}{T}+\\frac{C_{ij}}{T^{2}}+D_{ij}\n        \\ln{\\left ({T}\\right )}+E_{ij}T^{F_{ij}}\n\n    Examples\n    --------\n    Ethanol-water example, at 343.15 K and 1 MPa:\n\n    >>> NRTL(xs=[0.252, 0.748], taus=[[0, -0.178], [1.963, 0]],\n    ... alphas=[[0, 0.2974],[.2974, 0]])\n    [1.9363183763514304, 1.1537609663170014]\n\n    References\n    ----------\n    .. [1] Renon, Henri, and J. M. Prausnitz. \"Local Compositions in\n       Thermodynamic Excess Functions for Liquid Mixtures.\" AIChE Journal 14,\n       no. 1 (1968): 135-144. doi:10.1002/aic.690140124.\n    .. [2] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey.\n       Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim:\n       Wiley-VCH, 2012.", "id": "f15784:m5"}
{"signature": "def identify_phase_mixture(T=None, P=None, zs=None, Tcs=None, Pcs=None,<EOL>Psats=None, CASRNs=None,<EOL>AvailableMethods=False, Method=None):  ", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if Psats and none_and_length_check((Psats, zs)):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>if Tcs and none_and_length_check([Tcs]) and all([T >= i for i in Tcs]):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>if Pcs and none_and_length_check([Pcs]) and all([P >= i for i in Pcs]):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>if Tcs and none_and_length_check([zs, Tcs]) and any([T > Tc for Tc in Tcs]):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>methods.append('<STR_LIT>')<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>xs, ys, phase, V_over_F = None, None, None, None<EOL>if Method == '<STR_LIT>':<EOL><INDENT>Pdew = dew_at_T(zs, Psats)<EOL>Pbubble = bubble_at_T(zs, Psats)<EOL>if P >= Pbubble:<EOL><INDENT>phase = '<STR_LIT:l>'<EOL>ys = None<EOL>xs = zs<EOL>V_over_F = <NUM_LIT:0><EOL><DEDENT>elif P <= Pdew:<EOL><INDENT>phase = '<STR_LIT:g>'<EOL>ys = zs<EOL>xs = None<EOL>V_over_F = <NUM_LIT:1><EOL><DEDENT>elif Pdew < P < Pbubble:<EOL><INDENT>xs, ys, V_over_F = flash(P, zs, Psats)<EOL>phase = '<STR_LIT>'<EOL><DEDENT><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>if all([T >= i for i in Tcs]):<EOL><INDENT>phase = '<STR_LIT:g>'<EOL><DEDENT>else: <EOL><INDENT>phase = '<STR_LIT>'<EOL><DEDENT><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>if all([P >= i for i in Pcs]):<EOL><INDENT>phase = '<STR_LIT:g>'<EOL><DEDENT>else: <EOL><INDENT>phase = '<STR_LIT>'<EOL><DEDENT><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>Psats = list(Psats)<EOL>for i in range(len(Psats)):<EOL><INDENT>if not Psats[i] and Tcs[i] and Tcs[i] <= T:<EOL><INDENT>Psats[i] = <NUM_LIT><EOL><DEDENT><DEDENT>Pdew = dew_at_T(zs, Psats)<EOL>Pbubble = <NUM_LIT><EOL>if P >= Pbubble:<EOL><INDENT>phase = '<STR_LIT:l>'<EOL>ys = None<EOL>xs = zs<EOL>V_over_F = <NUM_LIT:0><EOL><DEDENT>elif P <= Pdew:<EOL><INDENT>phase = '<STR_LIT:g>'<EOL>ys = zs<EOL>xs = None<EOL>V_over_F = <NUM_LIT:1><EOL><DEDENT>elif Pdew < P < Pbubble:<EOL><INDENT>xs, ys, V_over_F = flash(P, zs, Psats)<EOL>phase = '<STR_LIT>'<EOL><DEDENT><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return phase, xs, ys, V_over_F<EOL>", "docstring": ">>> identify_phase_mixture(T=280, P=5000., zs=[0.5, 0.5], Psats=[1400, 7000])\n('l', [0.5, 0.5], None, 0)\n>>> identify_phase_mixture(T=280, P=3000., zs=[0.5, 0.5], Psats=[1400, 7000])\n('two-phase', [0.7142857142857143, 0.2857142857142857], [0.33333333333333337, 0.6666666666666666], 0.5625000000000001)\n>>> identify_phase_mixture(T=280, P=800., zs=[0.5, 0.5], Psats=[1400, 7000])\n('g', None, [0.5, 0.5], 1)\n>>> identify_phase_mixture(T=280, P=800., zs=[0.5, 0.5])\n(None, None, None, None)", "id": "f15784:m12"}
{"signature": "def bubble_at_P(P, zs, vapor_pressure_eqns, fugacities=None, gammas=None):", "body": "def bubble_P_error(T):<EOL><INDENT>Psats = [VP(T) for VP in vapor_pressure_eqns]<EOL>Pcalc = bubble_at_T(zs, Psats, fugacities, gammas)<EOL>return P - Pcalc<EOL><DEDENT>T_bubble = newton(bubble_P_error, <NUM_LIT>)<EOL>return T_bubble<EOL>", "docstring": "Calculates bubble point for a given pressure\n\n    Parameters\n    ----------\n    P : float\n        Pressure, [Pa]\n    zs : list[float]\n        Overall mole fractions of all species, [-]\n    vapor_pressure_eqns : list[functions]\n        Temperature dependent function for each specie, Returns Psat, [Pa]\n    fugacities : list[float], optional\n        fugacities of each species, defaults to list of ones, [-]\n    gammas : list[float], optional\n        gammas of each species, defaults to list of ones, [-]\n\n    Returns\n    -------\n    Tbubble : float, optional\n        Temperature of bubble point at pressure `P`, [K]", "id": "f15784:m14"}
{"signature": "def bubble_at_T(zs, Psats, fugacities=None, gammas=None):", "body": "if not fugacities:<EOL><INDENT>fugacities = [<NUM_LIT:1> for i in range(len(Psats))]<EOL><DEDENT>if not gammas:<EOL><INDENT>gammas = [<NUM_LIT:1> for i in range(len(Psats))]<EOL><DEDENT>if not none_and_length_check((zs, Psats, fugacities, gammas)):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>P = sum(zs[i]*Psats[i]*gammas[i]/fugacities[i] for i in range(len(zs)))<EOL>return P<EOL>", "docstring": ">>> bubble_at_T([0.5, 0.5], [1400, 7000])\n4200.0\n>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75])\n3395.0\n>>> bubble_at_T([0.5, 0.5], [1400, 7000], gammas=[1.1, .75], fugacities=[.995, 0.98])\n3452.440775305097", "id": "f15784:m10"}
{"signature": "def omega_mixture(omegas, zs, CASRNs=None, Method=None,<EOL>AvailableMethods=False):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if none_and_length_check([zs, omegas]):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>methods.append('<STR_LIT>')<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == '<STR_LIT>':<EOL><INDENT>_omega = mixing_simple(zs, omegas)<EOL><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>_omega = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _omega<EOL>", "docstring": "r'''This function handles the calculation of a mixture's acentric factor.\n    Calculation is based on the omegas provided for each pure component. Will\n    automatically select a method to use if no Method is provided;\n    returns None if insufficient data is available.\n\n    Examples\n    --------\n    >>> omega_mixture([0.025, 0.12], [0.3, 0.7])\n    0.0915\n\n    Parameters\n    ----------\n    omegas : array-like\n        acentric factors of each component, [-]\n    zs : array-like\n        mole fractions of each component, [-]\n    CASRNs: list of strings\n        CASRNs, not currently used [-]\n\n    Returns\n    -------\n    omega : float\n        acentric factor of the mixture, [-]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain omega with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        The method name to use. Only 'SIMPLE' is accepted so far.\n        All valid values are also held in the list omega_mixture_methods.\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        omega for the desired chemical, and will return methods instead of\n        omega\n\n    Notes\n    -----\n    The only data used in the methods implemented to date are mole fractions\n    and pure-component omegas. An alternate definition could be based on\n    the dew point or bubble point of a multicomponent mixture, but this has\n    not been done to date.\n\n    References\n    ----------\n    .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.", "id": "f15785:m2"}
{"signature": "def LK_omega(Tb, Tc, Pc):", "body": "T_br = Tb/Tc<EOL>omega = (log(<NUM_LIT>/Pc) - <NUM_LIT> + <NUM_LIT>/T_br + <NUM_LIT>*log(T_br) -<EOL><NUM_LIT>*T_br**<NUM_LIT:6>)/(<NUM_LIT> - <NUM_LIT>/T_br - <NUM_LIT>*log(T_br) +<EOL><NUM_LIT>*T_br**<NUM_LIT:6>)<EOL>return omega<EOL>", "docstring": "r'''Estimates the acentric factor of a fluid using a correlation in [1]_.\n\n    .. math::\n        \\omega = \\frac{\\ln P_{br}^{sat} - 5.92714 + 6.09648/T_{br} + 1.28862\n        \\ln T_{br} -0.169347T_{br}^6}\n        {15.2518 - 15.6875/T_{br} - 13.4721 \\ln T_{br} + 0.43577 T_{br}^6}\n\n    Parameters\n    ----------\n    Tb : float\n        Boiling temperature of the fluid [K]\n    Tc : float\n        Critical temperature of the fluid [K]\n    Pc : float\n        Critical pressure of the fluid [Pa]\n\n    Returns\n    -------\n    omega : float\n        Acentric factor of the fluid [-]\n\n    Notes\n    -----\n    Internal units are atmosphere and Kelvin.\n    Example value from Reid (1987). Using ASPEN V8.4, LK method gives 0.325595.\n\n    Examples\n    --------\n    Isopropylbenzene\n\n    >>> LK_omega(425.6, 631.1, 32.1E5)\n    0.32544249926397856\n\n    References\n    ----------\n    .. [1] Lee, Byung Ik, and Michael G. Kesler. \"A Generalized Thermodynamic\n       Correlation Based on Three-Parameter Corresponding States.\" AIChE Journal\n       21, no. 3 (1975): 510-527. doi:10.1002/aic.690210313.", "id": "f15785:m1"}
{"signature": "def StielPolar(Tc=None, Pc=None, omega=None, CASRN='<STR_LIT>', Method=None,<EOL>AvailableMethods=False):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if Tc and Pc and omega:<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>methods.append('<STR_LIT>')<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == '<STR_LIT>':<EOL><INDENT>P = VaporPressure(CASRN=CASRN).T_dependent_property(Tc*<NUM_LIT>)<EOL>if not P:<EOL><INDENT>factor = None<EOL><DEDENT>else:<EOL><INDENT>Pr = P/Pc<EOL>factor = log10(Pr) + <NUM_LIT>*omega + <NUM_LIT><EOL><DEDENT><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>factor = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return factor<EOL>", "docstring": "r'''This function handles the calculation of a chemical's Stiel Polar\n    factor, directly through the definition of Stiel-polar factor if possible.\n    Requires Tc, Pc, acentric factor, and a vapor pressure datum at Tr=0.6.\n\n    Will automatically select a method to use if no Method is provided;\n    returns None if the data is not available and cannot be calculated.\n\n    .. math::\n        x = \\log P_r|_{T_r=0.6} + 1.70 \\omega + 1.552\n\n    Parameters\n    ----------\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    omega : float\n        Acentric factor of the fluid [-]\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    factor : float\n        Stiel polar factor of compound\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain Stiel polar factor with the\n        given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        The method name to use. Only 'DEFINITION' is accepted so far.\n        All valid values are also held in the list Stiel_polar_methods.\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        Stiel-polar factor for the desired chemical, and will return methods\n        instead of stiel-polar factor\n\n    Notes\n    -----\n    Only one source is available for this function. It is:\n\n        * 'DEFINITION', based on the definition of\n          Stiel Polar Factor presented in [1]_, using vapor pressure data.\n\n    A few points have also been published in [2]_, which may be used for\n    comparison. Currently this is only used for a surface tension correlation.\n\n    Examples\n    --------\n    >>> StielPolar(647.3, 22048321.0, 0.344, CASRN='7732-18-5')\n    0.024581140348734376\n\n    References\n    ----------\n    .. [1] Halm, Roland L., and Leonard I. Stiel. \"A Fourth Parameter for the\n       Vapor Pressure and Entropy of Vaporization of Polar Fluids.\" AIChE\n       Journal 13, no. 2 (1967): 351-355. doi:10.1002/aic.690130228.\n    .. [2] D, Kukoljac Milo\u0161, and Grozdani\u0107 Du\u0161an K. \"New Values of the\n       Polarity Factor.\" Journal of the Serbian Chemical Society 65, no. 12\n       (January 1, 2000). http://www.shd.org.rs/JSCS/Vol65/No12-Pdf/JSCS12-07.pdf", "id": "f15785:m3"}
{"signature": "def omega(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=['<STR_LIT>', '<STR_LIT>']):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>if CASRN in _crit_PassutDanner.index and not np.isnan(_crit_PassutDanner.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>Tcrit, Pcrit = Tc(CASRN), Pc(CASRN)<EOL>if Tcrit and Pcrit:<EOL><INDENT>if Tb(CASRN):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>if VaporPressure(CASRN=CASRN).T_dependent_property(Tcrit*<NUM_LIT>):<EOL><INDENT>methods.append('<STR_LIT>')  <EOL><DEDENT><DEDENT>if IgnoreMethods:<EOL><INDENT>for Method in IgnoreMethods:<EOL><INDENT>if Method in methods:<EOL><INDENT>methods.remove(Method)<EOL><DEDENT><DEDENT><DEDENT>methods.append('<STR_LIT>')<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == '<STR_LIT>':<EOL><INDENT>_omega = float(_crit_PSRKR4.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>_omega = float(_crit_PassutDanner.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>_omega = float(_crit_Yaws.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>_omega = LK_omega(Tb(CASRN), Tc(CASRN), Pc(CASRN))<EOL><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>P = VaporPressure(CASRN=CASRN).T_dependent_property(Tc(CASRN)*<NUM_LIT>)<EOL>_omega = -log10(P/Pc(CASRN)) - <NUM_LIT:1.0><EOL><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>_omega = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _omega<EOL>", "docstring": "r'''This function handles the retrieval of a chemical's acentric factor,\n    `omega`, or its calculation from correlations or directly through the\n    definition of acentric factor if possible. Requires a known boiling point,\n    critical temperature and pressure for use of the correlations. Requires\n    accurate vapor pressure data for direct calculation.\n\n    Will automatically select a method to use if no Method is provided;\n    returns None if the data is not available and cannot be calculated.\n\n    .. math::\n        \\omega \\equiv -\\log_{10}\\left[\\lim_{T/T_c=0.7}(P^{sat}/P_c)\\right]-1.0\n\n    Examples\n    --------\n    >>> omega(CASRN='64-17-5')\n    0.635\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    omega : float\n        Acentric factor of compound\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain omega with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        The method name to use. Accepted methods are 'PSRK', 'PD', 'YAWS', \n        'LK', and 'DEFINITION'. All valid values are also held in the list\n        omega_methods.\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        omega for the desired chemical, and will return methods instead of\n        omega\n    IgnoreMethods : list, optional\n        A list of methods to ignore in obtaining the full list of methods,\n        useful for for performance reasons and ignoring inaccurate methods\n\n    Notes\n    -----\n    A total of five sources are available for this function. They are:\n\n        * 'PSRK', a compillation of experimental and estimated data published \n          in the Appendix of [15]_, the fourth revision of the PSRK model.\n        * 'PD', an older compillation of\n          data published in (Passut & Danner, 1973) [16]_.\n        * 'YAWS', a large compillation of data from a\n          variety of sources; no data points are sourced in the work of [17]_.\n        * 'LK', a estimation method for hydrocarbons.\n        * 'DEFINITION', based on the definition of omega as\n          presented in [1]_, using vapor pressure data.\n\n    References\n    ----------\n    .. [1] Pitzer, K. S., D. Z. Lippmann, R. F. Curl, C. M. Huggins, and\n       D. E. Petersen: The Volumetric and Thermodynamic Properties of Fluids.\n       II. Compressibility Factor, Vapor Pressure and Entropy of Vaporization.\n       J. Am. Chem. Soc., 77: 3433 (1955).\n    .. [2] Horstmann, Sven, Anna Jab\u0142oniec, J\u00f6rg Krafczyk, Kai Fischer, and\n       J\u00fcrgen Gmehling. \"PSRK Group Contribution Equation of State:\n       Comprehensive Revision and Extension IV, Including Critical Constants\n       and \u0391-Function Parameters for 1000 Components.\" Fluid Phase Equilibria\n       227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.\n    .. [3] Passut, Charles A., and Ronald P. Danner. \"Acentric Factor. A\n       Valuable Correlating Parameter for the Properties of Hydrocarbons.\"\n       Industrial & Engineering Chemistry Process Design and Development 12,\n       no. 3 (July 1, 1973): 365-68. doi:10.1021/i260047a026.\n    .. [4] Yaws, Carl L. Thermophysical Properties of Chemicals and\n       Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n       Publishing, 2014.", "id": "f15785:m0"}
{"signature": "def BVirial_Pitzer_Curl(T, Tc, Pc, omega, order=<NUM_LIT:0>):", "body": "Tr = T/Tc<EOL>if order == <NUM_LIT:0>:<EOL><INDENT>B0 = <NUM_LIT> - <NUM_LIT>/Tr - <NUM_LIT>/Tr**<NUM_LIT:2> - <NUM_LIT>/Tr**<NUM_LIT:3><EOL>B1 = <NUM_LIT> + <NUM_LIT>/Tr - <NUM_LIT:0.5>/Tr**<NUM_LIT:2> - <NUM_LIT>/Tr**<NUM_LIT:3> - <NUM_LIT>/Tr**<NUM_LIT:8><EOL><DEDENT>elif order == <NUM_LIT:1>:<EOL><INDENT>B0 = Tc*(<NUM_LIT>*T**<NUM_LIT:2> + <NUM_LIT>*T*Tc + <NUM_LIT>*Tc**<NUM_LIT:2>)/(<NUM_LIT>*T**<NUM_LIT:4>)<EOL>B1 = Tc*(-<NUM_LIT>*T**<NUM_LIT:7> + <NUM_LIT>*T**<NUM_LIT:6>*Tc + <NUM_LIT>*T**<NUM_LIT:5>*Tc**<NUM_LIT:2> + <NUM_LIT>*Tc**<NUM_LIT:7>)/(<NUM_LIT>*T**<NUM_LIT:9>)<EOL><DEDENT>elif order == <NUM_LIT:2>:<EOL><INDENT>B0 = -<NUM_LIT:3>*Tc*(<NUM_LIT>*T**<NUM_LIT:2> + <NUM_LIT>*T*Tc + <NUM_LIT>*Tc**<NUM_LIT:2>)/(<NUM_LIT>*T**<NUM_LIT:5>)<EOL>B1 = Tc*(<NUM_LIT>*T**<NUM_LIT:7> - <NUM_LIT>*T**<NUM_LIT:6>*Tc - <NUM_LIT>*T**<NUM_LIT:5>*Tc**<NUM_LIT:2> - <NUM_LIT>*Tc**<NUM_LIT:7>)/(<NUM_LIT>*T**<NUM_LIT:10>)<EOL><DEDENT>elif order == <NUM_LIT:3>:<EOL><INDENT>B0 = <NUM_LIT:3>*Tc*(<NUM_LIT>*T**<NUM_LIT:2> + <NUM_LIT>*T*Tc + <NUM_LIT>*Tc**<NUM_LIT:2>)/(<NUM_LIT>*T**<NUM_LIT:6>)<EOL>B1 = <NUM_LIT:3>*Tc*(-<NUM_LIT>*T**<NUM_LIT:7> + <NUM_LIT:1000>*T**<NUM_LIT:6>*Tc + <NUM_LIT>*T**<NUM_LIT:5>*Tc**<NUM_LIT:2> + <NUM_LIT>*Tc**<NUM_LIT:7>)/(<NUM_LIT>*T**<NUM_LIT:11>)<EOL><DEDENT>elif order == -<NUM_LIT:1>:<EOL><INDENT>B0 = <NUM_LIT>*T/<NUM_LIT> - <NUM_LIT>*Tc*log(T)/<NUM_LIT:100> + (<NUM_LIT>*T*Tc**<NUM_LIT:2> + <NUM_LIT>*Tc**<NUM_LIT:3>)/(<NUM_LIT>*T**<NUM_LIT:2>)<EOL>B1 = <NUM_LIT>*T/<NUM_LIT:1000> + <NUM_LIT>*Tc*log(T)/<NUM_LIT:50> + (<NUM_LIT>*T**<NUM_LIT:6>*Tc**<NUM_LIT:2> + <NUM_LIT>*T**<NUM_LIT:5>*Tc**<NUM_LIT:3> + <NUM_LIT>*Tc**<NUM_LIT:8>)/(<NUM_LIT>*T**<NUM_LIT:7>)<EOL><DEDENT>elif order == -<NUM_LIT:2>:<EOL><INDENT>B0 = <NUM_LIT>*T**<NUM_LIT:2>/<NUM_LIT> - <NUM_LIT>*T*Tc*log(T)/<NUM_LIT:100> + <NUM_LIT>*T*Tc/<NUM_LIT:100> + <NUM_LIT>*Tc**<NUM_LIT:2>*log(T)/<NUM_LIT> - <NUM_LIT>*Tc**<NUM_LIT:3>/(<NUM_LIT>*T)<EOL>B1 = <NUM_LIT>*T**<NUM_LIT:2>/<NUM_LIT> + <NUM_LIT>*T*Tc*log(T)/<NUM_LIT:50> - <NUM_LIT>*T*Tc/<NUM_LIT:50> + Tc**<NUM_LIT:2>*log(T)/<NUM_LIT:2> - (<NUM_LIT>*T**<NUM_LIT:5>*Tc**<NUM_LIT:3> + <NUM_LIT>*Tc**<NUM_LIT:8>)/(<NUM_LIT>*T**<NUM_LIT:6>)<EOL><DEDENT>else: <EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>Br = B0 + omega*B1<EOL>return Br*R*Tc/Pc<EOL>", "docstring": "r'''Calculates the second virial coefficient using the model in [1]_.\n    Designed for simple calculations.\n\n    .. math::\n        B_r=B^{(0)}+\\omega B^{(1)}\n\n        B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3\n\n        B^{(1)} = 0.073+0.46/T_r-0.5/T_r^2 -0.097/T_r^3 - 0.0073/T_r^8\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of the fluid [Pa]\n    omega : float\n        Acentric factor for fluid, [-]\n    order : int, optional\n        Order of the calculation. 0 for the calculation of B itself; for 1/2/3, \n        the first/second/third derivative of B with respect to temperature; and  \n        for -1/-2, the first/second indefinite integral of B with respect to \n        temperature. No other integrals or derivatives are implemented, and an \n        exception will be raised if any other order is given.\n\n    Returns\n    -------\n    B : float\n        Second virial coefficient in density form or its integral/derivative if\n        specified, [m^3/mol or m^3/mol/K^order]\n\n    Notes\n    -----\n    Analytical models for derivatives and integrals are available for orders\n    -2, -1, 1, 2, and 3, all obtained with SymPy.\n\n    For first temperature derivative of B:\n\n    .. math::\n        \\frac{d B^{(0)}}{dT} = \\frac{33 Tc}{100 T^{2}} + \\frac{277 Tc^{2}}{1000 T^{3}} + \\frac{363 Tc^{3}}{10000 T^{4}}\n\n        \\frac{d B^{(1)}}{dT} = - \\frac{23 Tc}{50 T^{2}} + \\frac{Tc^{2}}{T^{3}} + \\frac{291 Tc^{3}}{1000 T^{4}} + \\frac{73 Tc^{8}}{1250 T^{9}}\n\n    For the second temperature derivative of B:\n\n    .. math::\n        \\frac{d^2 B^{(0)}}{dT^2} = - \\frac{3 Tc}{5000 T^{3}} \\left(1100 + \\frac{1385 Tc}{T} + \\frac{242 Tc^{2}}{T^{2}}\\right)\n\n        \\frac{d^2 B^{(1)}}{dT^2} = \\frac{Tc}{T^{3}} \\left(\\frac{23}{25} - \\frac{3 Tc}{T} - \\frac{291 Tc^{2}}{250 T^{2}} - \\frac{657 Tc^{7}}{1250 T^{7}}\\right)\n\n    For the third temperature derivative of B:\n\n    .. math::\n        \\frac{d^3 B^{(0)}}{dT^3} = \\frac{3 Tc}{500 T^{4}} \\left(330 + \\frac{554 Tc}{T} + \\frac{121 Tc^{2}}{T^{2}}\\right)\n\n        \\frac{d^3 B^{(1)}}{dT^3} = \\frac{3 Tc}{T^{4}} \\left(- \\frac{23}{25} + \\frac{4 Tc}{T} + \\frac{97 Tc^{2}}{50 T^{2}} + \\frac{219 Tc^{7}}{125 T^{7}}\\right)\n\n    For the first indefinite integral of B:\n\n    .. math::\n        \\int{B^{(0)}} dT = \\frac{289 T}{2000} - \\frac{33 Tc}{100} \\log{\\left (T \\right )} + \\frac{1}{20000 T^{2}} \\left(2770 T Tc^{2} + 121 Tc^{3}\\right)\n\n        \\int{B^{(1)}} dT = \\frac{73 T}{1000} + \\frac{23 Tc}{50} \\log{\\left (T \\right )} + \\frac{1}{70000 T^{7}} \\left(35000 T^{6} Tc^{2} + 3395 T^{5} Tc^{3} + 73 Tc^{8}\\right)\n\n    For the second indefinite integral of B:\n\n    .. math::\n        \\int\\int B^{(0)} dT dT = \\frac{289 T^{2}}{4000} - \\frac{33 T}{100} Tc \\log{\\left (T \\right )} + \\frac{33 T}{100} Tc + \\frac{277 Tc^{2}}{2000} \\log{\\left (T \\right )} - \\frac{121 Tc^{3}}{20000 T}\n\n        \\int\\int B^{(1)} dT dT = \\frac{73 T^{2}}{2000} + \\frac{23 T}{50} Tc \\log{\\left (T \\right )} - \\frac{23 T}{50} Tc + \\frac{Tc^{2}}{2} \\log{\\left (T \\right )} - \\frac{1}{420000 T^{6}} \\left(20370 T^{5} Tc^{3} + 73 Tc^{8}\\right)\n\n    Examples\n    --------\n    Example matching that in BVirial_Abbott, for isobutane.\n\n    >>> BVirial_Pitzer_Curl(510., 425.2, 38E5, 0.193)\n    -0.0002084535541385102\n\n    References\n    ----------\n    .. [1] Pitzer, Kenneth S., and R. F. Curl. \"The Volumetric and\n       Thermodynamic Properties of Fluids. III. Empirical Equation for the\n       Second Virial Coefficient1.\" Journal of the American Chemical Society\n       79, no. 10 (May 1, 1957): 2369-70. doi:10.1021/ja01567a007.", "id": "f15786:m0"}
{"signature": "def BVirial_Tsonopoulos_extended(T, Tc, Pc, omega, a=<NUM_LIT:0>, b=<NUM_LIT:0>, species_type='<STR_LIT>', <EOL>dipole=<NUM_LIT:0>, order=<NUM_LIT:0>):", "body": "Tr = T/Tc<EOL>if order == <NUM_LIT:0>:<EOL><INDENT>B0 = <NUM_LIT> - <NUM_LIT>/Tr - <NUM_LIT>/Tr**<NUM_LIT:2> - <NUM_LIT>/Tr**<NUM_LIT:3> - <NUM_LIT>/Tr**<NUM_LIT:8><EOL>B1 = <NUM_LIT> + <NUM_LIT>/Tr**<NUM_LIT:2> - <NUM_LIT>/Tr**<NUM_LIT:3> - <NUM_LIT>/Tr**<NUM_LIT:8><EOL>B2 = <NUM_LIT:1.>/Tr**<NUM_LIT:6><EOL>B3 = -<NUM_LIT:1.>/Tr**<NUM_LIT:8><EOL><DEDENT>elif order == <NUM_LIT:1>:<EOL><INDENT>B0 = <NUM_LIT>*Tc/(<NUM_LIT:100>*T**<NUM_LIT:2>) + <NUM_LIT>*Tc**<NUM_LIT:2>/(<NUM_LIT:1000>*T**<NUM_LIT:3>) + <NUM_LIT>*Tc**<NUM_LIT:3>/(<NUM_LIT>*T**<NUM_LIT:4>) + <NUM_LIT>*Tc**<NUM_LIT:8>/(<NUM_LIT>*T**<NUM_LIT:9>)<EOL>B1 = -<NUM_LIT>*Tc**<NUM_LIT:2>/(<NUM_LIT>*T**<NUM_LIT:3>) + <NUM_LIT>*Tc**<NUM_LIT:3>/(<NUM_LIT:1000>*T**<NUM_LIT:4>) + <NUM_LIT:8>*Tc**<NUM_LIT:8>/(<NUM_LIT>*T**<NUM_LIT:9>)<EOL>B2 = -<NUM_LIT>*Tc**<NUM_LIT:6>/T**<NUM_LIT:7><EOL>B3 = <NUM_LIT>*Tc**<NUM_LIT:8>/T**<NUM_LIT:9><EOL><DEDENT>elif order == <NUM_LIT:2>:<EOL><INDENT>B0 = -<NUM_LIT:3>*Tc*(<NUM_LIT> + <NUM_LIT>*Tc/T + <NUM_LIT>*Tc**<NUM_LIT:2>/T**<NUM_LIT:2> + <NUM_LIT>*Tc**<NUM_LIT:7>/T**<NUM_LIT:7>)/(<NUM_LIT>*T**<NUM_LIT:3>)<EOL>B1 = <NUM_LIT:3>*Tc**<NUM_LIT:2>*(<NUM_LIT> - <NUM_LIT>*Tc/T - <NUM_LIT>*Tc**<NUM_LIT:6>/T**<NUM_LIT:6>)/(<NUM_LIT>*T**<NUM_LIT:4>)<EOL>B2 = <NUM_LIT>*Tc**<NUM_LIT:6>/T**<NUM_LIT:8><EOL>B3 = -<NUM_LIT>*Tc**<NUM_LIT:8>/T**<NUM_LIT:10><EOL><DEDENT>elif order == <NUM_LIT:3>:<EOL><INDENT>B0 = <NUM_LIT:3>*Tc*(<NUM_LIT> + <NUM_LIT>*Tc/T + <NUM_LIT>*Tc**<NUM_LIT:2>/T**<NUM_LIT:2> + <NUM_LIT>*Tc**<NUM_LIT:7>/T**<NUM_LIT:7>)/(<NUM_LIT>*T**<NUM_LIT:4>)<EOL>B1 = <NUM_LIT:3>*Tc**<NUM_LIT:2>*(-<NUM_LIT> + <NUM_LIT>*Tc/T + <NUM_LIT>*Tc**<NUM_LIT:6>/T**<NUM_LIT:6>)/(<NUM_LIT>*T**<NUM_LIT:5>)<EOL>B2 = -<NUM_LIT>*Tc**<NUM_LIT:6>/T**<NUM_LIT:9><EOL>B3 = <NUM_LIT>*Tc**<NUM_LIT:8>/T**<NUM_LIT:11><EOL><DEDENT>elif order == -<NUM_LIT:1>:<EOL><INDENT>B0 = <NUM_LIT>*T/<NUM_LIT> - <NUM_LIT>*Tc*log(T)/<NUM_LIT> + (<NUM_LIT>*T**<NUM_LIT:6>*Tc**<NUM_LIT:2> + <NUM_LIT>*T**<NUM_LIT:5>*Tc**<NUM_LIT:3> + <NUM_LIT>*Tc**<NUM_LIT:8>)/(<NUM_LIT>*T**<NUM_LIT:7>)<EOL>B1 = <NUM_LIT>*T/<NUM_LIT> - (<NUM_LIT>*T**<NUM_LIT:6>*Tc**<NUM_LIT:2> - <NUM_LIT>*T**<NUM_LIT:5>*Tc**<NUM_LIT:3> - <NUM_LIT>*Tc**<NUM_LIT:8>)/(<NUM_LIT>*T**<NUM_LIT:7>)<EOL>B2 = -Tc**<NUM_LIT:6>/(<NUM_LIT:5>*T**<NUM_LIT:5>)<EOL>B3 = Tc**<NUM_LIT:8>/(<NUM_LIT:7>*T**<NUM_LIT:7>)<EOL><DEDENT>elif order == -<NUM_LIT:2>:<EOL><INDENT>B0 = <NUM_LIT>*T**<NUM_LIT:2>/<NUM_LIT> - <NUM_LIT>*T*Tc*log(T)/<NUM_LIT> + <NUM_LIT>*T*Tc/<NUM_LIT> + <NUM_LIT>*Tc**<NUM_LIT:2>*log(T)/<NUM_LIT> - (<NUM_LIT>*T**<NUM_LIT:5>*Tc**<NUM_LIT:3> + <NUM_LIT>*Tc**<NUM_LIT:8>)/(<NUM_LIT>*T**<NUM_LIT:6>)<EOL>B1 = <NUM_LIT>*T**<NUM_LIT:2>/<NUM_LIT> - <NUM_LIT>*Tc**<NUM_LIT:2>*log(T)/<NUM_LIT> - (<NUM_LIT>*T**<NUM_LIT:5>*Tc**<NUM_LIT:3> + <NUM_LIT>*Tc**<NUM_LIT:8>)/(<NUM_LIT>*T**<NUM_LIT:6>)<EOL>B2 = Tc**<NUM_LIT:6>/(<NUM_LIT:20>*T**<NUM_LIT:4>)<EOL>B3 = -Tc**<NUM_LIT:8>/(<NUM_LIT>*T**<NUM_LIT:6>)<EOL><DEDENT>else: <EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if a == <NUM_LIT:0> and b == <NUM_LIT:0> and species_type != '<STR_LIT>':<EOL><INDENT>if species_type == '<STR_LIT>' or species_type == '<STR_LIT>':<EOL><INDENT>a, b = <NUM_LIT:0>, <NUM_LIT:0><EOL><DEDENT>elif species_type == '<STR_LIT>':<EOL><INDENT>a, b = <NUM_LIT>, <NUM_LIT><EOL><DEDENT>elif species_type == '<STR_LIT>':<EOL><INDENT>a, b = -<NUM_LIT>, <NUM_LIT:0><EOL><DEDENT>elif dipole != <NUM_LIT:0> and Tc != <NUM_LIT:0> and Pc != <NUM_LIT:0>:<EOL><INDENT>dipole_r = <NUM_LIT>*dipole**<NUM_LIT:2>*(Pc/<NUM_LIT>)/Tc**<NUM_LIT:2><EOL>if (species_type == '<STR_LIT>' or species_type == '<STR_LIT>'<EOL>or species_type == '<STR_LIT>' or species_type == '<STR_LIT>'<EOL>or species_type == '<STR_LIT>' or species_type == '<STR_LIT>'):<EOL><INDENT>a, b = -<NUM_LIT>*dipole_r-<NUM_LIT>*dipole_r**<NUM_LIT:8>, <NUM_LIT:0><EOL><DEDENT>elif (species_type == '<STR_LIT>' or species_type == '<STR_LIT>'<EOL>or species_type == '<STR_LIT>' or species_type == '<STR_LIT>'):<EOL><INDENT>a, b = -<NUM_LIT>*dipole_r**<NUM_LIT:4>-<NUM_LIT>*dipole_r**<NUM_LIT:8>, <NUM_LIT:0><EOL><DEDENT>elif species_type == '<STR_LIT>':<EOL><INDENT>a, b = <NUM_LIT>, <NUM_LIT>+<NUM_LIT>*dipole_r<EOL><DEDENT><DEDENT><DEDENT>Br = B0 + omega*B1 + a*B2 + b*B3<EOL>return Br*R*Tc/Pc<EOL>", "docstring": "r'''Calculates the second virial coefficient using the\n    comprehensive model in [1]_. See the notes for the calculation of `a` and\n    `b`.\n\n    .. math::\n        \\frac{BP_c}{RT_c} = B^{(0)} + \\omega B^{(1)} + a B^{(2)} + b B^{(3)}\n\n        B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3\n\n        B^{(1)} = 0.0637+0.331/T_r^2-0.423/T_r^3 -0.423/T_r^3 - 0.008/T_r^8\n\n        B^{(2)} = 1/T_r^6\n\n        B^{(3)} = -1/T_r^8\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of the fluid [Pa]\n    omega : float\n        Acentric factor for fluid, [-]\n    a : float, optional\n        Fit parameter, calculated based on species_type if a is not given and\n        species_type matches on of the supported chemical classes.\n    b : float, optional\n        Fit parameter, calculated based on species_type if a is not given and\n        species_type matches on of the supported chemical classes.\n    species_type : str, optional\n        One of .\n    dipole : float\n        dipole moment, optional, [Debye]\n    order : int, optional\n        Order of the calculation. 0 for the calculation of B itself; for 1/2/3, \n        the first/second/third derivative of B with respect to temperature; and  \n        for -1/-2, the first/second indefinite integral of B with respect to \n        temperature. No other integrals or derivatives are implemented, and an \n        exception will be raised if any other order is given.\n\n    Returns\n    -------\n    B : float\n        Second virial coefficient in density form or its integral/derivative if\n        specified, [m^3/mol or m^3/mol/K^order]\n\n    Notes\n    -----\n    Analytical models for derivatives and integrals are available for orders\n    -2, -1, 1, 2, and 3, all obtained with SymPy.\n\n\n    To calculate `a` or `b`, the following rules are used:\n\n    For 'simple' or 'normal' fluids:\n\n    .. math::\n        a = 0\n\n        b = 0\n\n    For 'ketone', 'aldehyde', 'alkyl nitrile', 'ether', 'carboxylic acid',\n    or 'ester' types of chemicals:\n\n    .. math::\n        a = -2.14\\times 10^{-4} \\mu_r - 4.308 \\times 10^{-21} (\\mu_r)^8\n\n        b = 0\n\n    For 'alkyl halide', 'mercaptan', 'sulfide', or 'disulfide' types of \n    chemicals:\n\n    .. math::\n        a = -2.188\\times 10^{-4} (\\mu_r)^4 - 7.831 \\times 10^{-21} (\\mu_r)^8\n\n        b = 0\n\n    For 'alkanol' types of chemicals (except methanol):\n\n    .. math::\n        a = 0.0878\n\n        b = 0.00908 + 0.0006957 \\mu_r\n\n    For methanol:\n\n    .. math::\n        a = 0.0878\n\n        b = 0.0525\n\n    For water:\n\n    .. math::\n        a = -0.0109\n\n        b = 0\n\n    If required, the form of dipole moment used in the calculation of some\n    types of `a` and `b` values is as follows:\n\n    .. math::\n        \\mu_r = 100000\\frac{\\mu^2(Pc/101325.0)}{Tc^2}\n\n\n    For first temperature derivative of B:\n\n    .. math::\n        \\frac{d B^{(0)}}{dT} = \\frac{33 Tc}{100 T^{2}} + \\frac{277 Tc^{2}}{1000 T^{3}} + \\frac{363 Tc^{3}}{10000 T^{4}} + \\frac{607 Tc^{8}}{125000 T^{9}}\n\n        \\frac{d B^{(1)}}{dT} = - \\frac{331 Tc^{2}}{500 T^{3}} + \\frac{1269 Tc^{3}}{1000 T^{4}} + \\frac{8 Tc^{8}}{125 T^{9}}\n\n        \\frac{d B^{(2)}}{dT} = - \\frac{6 Tc^{6}}{T^{7}}\n\n        \\frac{d B^{(3)}}{dT} = \\frac{8 Tc^{8}}{T^{9}}\n\n    For the second temperature derivative of B:\n\n    .. math::\n        \\frac{d^2 B^{(0)}}{dT^2} = - \\frac{3 Tc}{125000 T^{3}} \\left(27500 + \\frac{34625 Tc}{T} + \\frac{6050 Tc^{2}}{T^{2}} + \\frac{1821 Tc^{7}}{T^{7}}\\right)\n\n        \\frac{d^2 B^{(1)}}{dT^2} = \\frac{3 Tc^{2}}{500 T^{4}} \\left(331 - \\frac{846 Tc}{T} - \\frac{96 Tc^{6}}{T^{6}}\\right)\n\n        \\frac{d^2 B^{(2)}}{dT^2} = \\frac{42 Tc^{6}}{T^{8}}\n\n        \\frac{d^2 B^{(3)}}{dT^2} = - \\frac{72 Tc^{8}}{T^{10}}\n\n    For the third temperature derivative of B:\n\n    .. math::\n        \\frac{d^3 B^{(0)}}{dT^3} = \\frac{3 Tc}{12500 T^{4}} \\left(8250 + \\frac{13850 Tc}{T} + \\frac{3025 Tc^{2}}{T^{2}} + \\frac{1821 Tc^{7}}{T^{7}}\\right)\n\n        \\frac{d^3 B^{(1)}}{dT^3} = \\frac{3 Tc^{2}}{250 T^{5}} \\left(-662 + \\frac{2115 Tc}{T} + \\frac{480 Tc^{6}}{T^{6}}\\right)\n\n        \\frac{d^3 B^{(2)}}{dT^3} = - \\frac{336 Tc^{6}}{T^{9}}\n\n        \\frac{d^3 B^{(3)}}{dT^3} = \\frac{720 Tc^{8}}{T^{11}}\n\n    For the first indefinite integral of B:\n\n    .. math::\n        \\int{B^{(0)}} dT = \\frac{289 T}{2000} - \\frac{33 Tc}{100} \\log{\\left (T \\right )} + \\frac{1}{7000000 T^{7}} \\left(969500 T^{6} Tc^{2} + 42350 T^{5} Tc^{3} + 607 Tc^{8}\\right)\n\n        \\int{B^{(1)}} dT = \\frac{637 T}{10000} - \\frac{1}{70000 T^{7}} \\left(23170 T^{6} Tc^{2} - 14805 T^{5} Tc^{3} - 80 Tc^{8}\\right)\n\n        \\int{B^{(2)}} dT = - \\frac{Tc^{6}}{5 T^{5}}\n\n        \\int{B^{(3)}} dT = \\frac{Tc^{8}}{7 T^{7}}\n\n    For the second indefinite integral of B:\n\n    .. math::\n        \\int\\int B^{(0)} dT dT = \\frac{289 T^{2}}{4000} - \\frac{33 T}{100} Tc \\log{\\left (T \\right )} + \\frac{33 T}{100} Tc + \\frac{277 Tc^{2}}{2000} \\log{\\left (T \\right )} - \\frac{1}{42000000 T^{6}} \\left(254100 T^{5} Tc^{3} + 607 Tc^{8}\\right)\n\n        \\int\\int B^{(1)} dT dT = \\frac{637 T^{2}}{20000} - \\frac{331 Tc^{2}}{1000} \\log{\\left (T \\right )} - \\frac{1}{210000 T^{6}} \\left(44415 T^{5} Tc^{3} + 40 Tc^{8}\\right)\n\n        \\int\\int B^{(2)} dT dT = \\frac{Tc^{6}}{20 T^{4}}\n\n        \\int\\int B^{(3)} dT dT = - \\frac{Tc^{8}}{42 T^{6}}\n\n    Examples\n    --------\n    Example from Perry's Handbook, 8E, p2-499. Matches to a decimal place.\n\n    >>> BVirial_Tsonopoulos_extended(430., 405.65, 11.28E6, 0.252608, a=0, b=0, species_type='ketone', dipole=1.469)\n    -9.679715056695323e-05\n\n    References\n    ----------\n    .. [1] Tsonopoulos, C., and J. L. Heidman. \"From the Virial to the Cubic\n       Equation of State.\" Fluid Phase Equilibria 57, no. 3 (1990): 261-76.\n       doi:10.1016/0378-3812(90)85126-U\n    .. [2] Tsonopoulos, Constantine, and John H. Dymond. \"Second Virial\n       Coefficients of Normal Alkanes, Linear 1-Alkanols (and Water), Alkyl\n       Ethers, and Their Mixtures.\" Fluid Phase Equilibria, International\n       Workshop on Vapour-Liquid Equilibria and Related Properties in Binary\n       and Ternary Mixtures of Ethers, Alkanes and Alkanols, 133, no. 1-2\n       (June 1997): 11-34. doi:10.1016/S0378-3812(97)00058-7.", "id": "f15786:m3"}
{"signature": "def BVirial_Abbott(T, Tc, Pc, omega, order=<NUM_LIT:0>):", "body": "Tr = T/Tc<EOL>if order == <NUM_LIT:0>:<EOL><INDENT>B0 = <NUM_LIT> - <NUM_LIT>/Tr**<NUM_LIT><EOL>B1 = <NUM_LIT> - <NUM_LIT>/Tr**<NUM_LIT><EOL><DEDENT>elif order == <NUM_LIT:1>:<EOL><INDENT>B0 = <NUM_LIT>*Tr**(-<NUM_LIT>)/T<EOL>B1 = <NUM_LIT>*Tr**(-<NUM_LIT>)/T<EOL><DEDENT>elif order == <NUM_LIT:2>:<EOL><INDENT>B0 = -<NUM_LIT>*Tr**(-<NUM_LIT>)/T**<NUM_LIT:2><EOL>B1 = -<NUM_LIT>*Tr**(-<NUM_LIT>)/T**<NUM_LIT:2><EOL><DEDENT>elif order == <NUM_LIT:3>:<EOL><INDENT>B0 = <NUM_LIT>*Tr**(-<NUM_LIT>)/T**<NUM_LIT:3><EOL>B1 = <NUM_LIT>*Tr**(-<NUM_LIT>)/T**<NUM_LIT:3><EOL><DEDENT>elif order == -<NUM_LIT:1>:<EOL><INDENT>B0 = <NUM_LIT>*T + <NUM_LIT>/<NUM_LIT>*Tc*(Tr)**(-<NUM_LIT>)<EOL>B1 = <NUM_LIT>*T + <NUM_LIT>*Tc*Tr**(-<NUM_LIT>)<EOL><DEDENT>elif order == -<NUM_LIT:2>:<EOL><INDENT>B0 = <NUM_LIT>*T**<NUM_LIT:2> + <NUM_LIT>/<NUM_LIT>*Tc**<NUM_LIT:2>*Tr**<NUM_LIT><EOL>B1 = <NUM_LIT>*T**<NUM_LIT:2> - <NUM_LIT>/<NUM_LIT>*Tc**<NUM_LIT:2>*Tr**(-<NUM_LIT>)<EOL><DEDENT>else: <EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>Br = B0 + omega*B1<EOL>return Br*R*Tc/Pc<EOL>", "docstring": "r'''Calculates the second virial coefficient using the model in [1]_.\n    Simple fit to the Lee-Kesler equation.\n\n    .. math::\n        B_r=B^{(0)}+\\omega B^{(1)}\n\n        B^{(0)}=0.083+\\frac{0.422}{T_r^{1.6}}\n\n        B^{(1)}=0.139-\\frac{0.172}{T_r^{4.2}}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of the fluid [Pa]\n    omega : float\n        Acentric factor for fluid, [-]\n    order : int, optional\n        Order of the calculation. 0 for the calculation of B itself; for 1/2/3, \n        the first/second/third derivative of B with respect to temperature; and  \n        for -1/-2, the first/second indefinite integral of B with respect to \n        temperature. No other integrals or derivatives are implemented, and an \n        exception will be raised if any other order is given.\n\n    Returns\n    -------\n    B : float\n        Second virial coefficient in density form or its integral/derivative if\n        specified, [m^3/mol or m^3/mol/K^order]\n\n    Notes\n    -----\n    Analytical models for derivatives and integrals are available for orders\n    -2, -1, 1, 2, and 3, all obtained with SymPy.\n\n    For first temperature derivative of B:\n\n    .. math::\n        \\frac{d B^{(0)}}{dT} = \\frac{0.6752}{T \\left(\\frac{T}{Tc}\\right)^{1.6}}\n\n        \\frac{d B^{(1)}}{dT} = \\frac{0.7224}{T \\left(\\frac{T}{Tc}\\right)^{4.2}}\n\n    For the second temperature derivative of B:\n\n    .. math::\n        \\frac{d^2 B^{(0)}}{dT^2} = - \\frac{1.75552}{T^{2} \\left(\\frac{T}{Tc}\\right)^{1.6}}\n\n        \\frac{d^2 B^{(1)}}{dT^2} = - \\frac{3.75648}{T^{2} \\left(\\frac{T}{Tc}\\right)^{4.2}}\n\n    For the third temperature derivative of B:\n\n    .. math::\n        \\frac{d^3 B^{(0)}}{dT^3} = \\frac{6.319872}{T^{3} \\left(\\frac{T}{Tc}\\right)^{1.6}}\n\n        \\frac{d^3 B^{(1)}}{dT^3} = \\frac{23.290176}{T^{3} \\left(\\frac{T}{Tc}\\right)^{4.2}}\n\n    For the first indefinite integral of B:\n\n    .. math::\n        \\int{B^{(0)}} dT = 0.083 T + \\frac{\\frac{211}{300} Tc}{\\left(\\frac{T}{Tc}\\right)^{0.6}}\n\n        \\int{B^{(1)}} dT = 0.139 T + \\frac{0.05375 Tc}{\\left(\\frac{T}{Tc}\\right)^{3.2}}\n\n    For the second indefinite integral of B:\n\n    .. math::\n        \\int\\int B^{(0)} dT dT = 0.0415 T^{2} + \\frac{211}{120} Tc^{2} \\left(\\frac{T}{Tc}\\right)^{0.4}\n\n        \\int\\int B^{(1)} dT dT = 0.0695 T^{2} - \\frac{\\frac{43}{1760} Tc^{2}}{\\left(\\frac{T}{Tc}\\right)^{2.2}}\n\n    Examples\n    --------\n    Example is from [1]_, p. 93, and matches the result exactly, for isobutane.\n\n    >>> BVirial_Abbott(510., 425.2, 38E5, 0.193)\n    -0.00020570178037383633\n\n    References\n    ----------\n    .. [1] Smith, H. C. Van Ness Joseph M. Introduction to Chemical Engineering\n       Thermodynamics 4E 1987.", "id": "f15786:m1"}
{"signature": "def Hcombustion(atoms, Hf=None, HfH2O=-<NUM_LIT>, HfCO2=-<NUM_LIT>,<EOL>HfSO2=-<NUM_LIT>, HfBr2=<NUM_LIT>, HfI2=<NUM_LIT>, HfHCl=-<NUM_LIT>,<EOL>HfHF=-<NUM_LIT>, HfP4O10=-<NUM_LIT>, HfO2=<NUM_LIT:0>, HfN2=<NUM_LIT:0>):", "body": "if not Hf or not atoms:<EOL><INDENT>return None<EOL><DEDENT>nC, nH, nN, nO, nS, nBr, nI, nCl, nF, nP = <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0><EOL>if '<STR_LIT:C>' in atoms and atoms['<STR_LIT:C>'] != <NUM_LIT:0>:<EOL><INDENT>nC = atoms['<STR_LIT:C>']<EOL><DEDENT>else:<EOL><INDENT>return None  <EOL><DEDENT>if '<STR_LIT:H>' in atoms:<EOL><INDENT>nH = atoms['<STR_LIT:H>']<EOL><DEDENT>if '<STR_LIT:N>' in atoms:<EOL><INDENT>nN = atoms['<STR_LIT:N>']<EOL><DEDENT>if '<STR_LIT:O>' in atoms:<EOL><INDENT>nO = atoms['<STR_LIT:O>']<EOL><DEDENT>if '<STR_LIT:S>' in atoms:<EOL><INDENT>nS = atoms['<STR_LIT:S>']<EOL><DEDENT>if '<STR_LIT>' in atoms:<EOL><INDENT>nBr = atoms['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT:I>' in atoms:<EOL><INDENT>nI = atoms['<STR_LIT:I>']<EOL><DEDENT>if '<STR_LIT>' in atoms:<EOL><INDENT>nCl = atoms['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT:F>' in atoms:<EOL><INDENT>nF = atoms['<STR_LIT:F>']<EOL><DEDENT>if '<STR_LIT:P>' in atoms:<EOL><INDENT>nP = atoms['<STR_LIT:P>']<EOL><DEDENT>nO2_req = nC + nS + nH/<NUM_LIT> + <NUM_LIT:5>*nP/<NUM_LIT> - (nCl + nF)/<NUM_LIT> - nO/<NUM_LIT><EOL>nCO2 = nC<EOL>nBr2 = nBr/<NUM_LIT><EOL>nI2 = nI/<NUM_LIT><EOL>nHCl = nCl<EOL>nHF = nF<EOL>nSO2 = nS<EOL>nN2 = nN/<NUM_LIT><EOL>nP4O10 = nP/<NUM_LIT><EOL>nH2O = (nH - nCl - nF)/<NUM_LIT><EOL>Hc = (nBr2*HfBr2 + nI2*HfI2) + (nHCl*HfHCl + nHF*HfHF) + nSO2*HfSO2 +nN2*HfN2 + nP4O10*HfP4O10 + nH2O*HfH2O - nO2_req*HfO2 + nCO2*HfCO2 - Hf<EOL>return Hc<EOL>", "docstring": "Calculates the heat of combustion, in J/mol.\n    Value non-hydrocarbons is not correct, but still calculable.\n\n    Parameters\n    ----------\n    atoms : dict\n        Dictionary of atoms and their counts, []\n    Hf : float\n        Heat of formation of given chemical, [J/mol]\n    HfH2O : float, optional\n        Heat of formation of water, [J/mol]\n    HfCO2 : float, optional\n        Heat of formation of carbon dioxide, [J/mol]\n    HfSO2 : float, optional\n        Heat of formation of sulfur dioxide, [J/mol]\n    HfBr2 : float, optional\n        Heat of formation of bromine, [J/mol]\n    HfI2 : float, optional\n        Heat of formation of iodine, [J/mol]\n    HfHCl : float, optional\n        Heat of formation of chlorine, [J/mol]\n    HfHF : float, optional\n        Heat of formation of hydrogen fluoride, [J/mol]\n    HfP4O10 : float, optional\n        Heat of formation of phosphorus pentoxide, [J/mol]\n    HfO2 : float, optional\n        Heat of formation of oxygen, [J/mol]\n    HfN2 : float, optional\n        Heat of formation of nitrogen, [J/mol]\n\n    Returns\n    -------\n    Hc : float\n        Heat of combustion of chemical, [J/mol]\n\n    Notes\n    -----\n    Default heats of formation for chemicals are at 298 K, 1 atm.\n\n    Examples\n    --------\n    Liquid methanol burning\n\n    >>> Hcombustion({'H': 4, 'C': 1, 'O': 1}, Hf=-239100)\n    -726024.0", "id": "f15787:m0"}
{"signature": "def Wagner_original(T, Tc, Pc, a, b, c, d):", "body": "Tr = T/Tc<EOL>tau = <NUM_LIT:1.0> - Tr<EOL>return Pc*exp((a*tau + b*tau**<NUM_LIT> + c*tau**<NUM_LIT:3> + d*tau**<NUM_LIT:6>)/Tr)<EOL>", "docstring": "r'''Calculates vapor pressure using the Wagner equation (3, 6 form).\n\n    Requires critical temperature and pressure as well as four coefficients\n    specific to each chemical.\n\n    .. math::\n        \\ln P^{sat}= \\ln P_c + \\frac{a\\tau + b \\tau^{1.5} + c\\tau^3 + d\\tau^6}\n        {T_r}\n\n        \\tau = 1 - \\frac{T}{T_c}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid, [K]\n    Tc : float\n        Critical temperature, [K]\n    Pc : float\n        Critical pressure, [Pa]\n    a, b, c, d : floats\n        Parameters for wagner equation. Specific to each chemical. [-]\n\n    Returns\n    -------\n    Psat : float\n        Vapor pressure at T [Pa]\n\n    Notes\n    -----\n    Warning: Pc is often treated as adjustable constant.\n\n    Examples\n    --------\n    Methane, coefficients from [2]_, at 100 K.\n\n    >>> Wagner_original(100.0, 190.53, 4596420., a=-6.00435, b=1.1885, \n    ... c=-0.834082, d=-1.22833)\n    34520.44601450496\n\n    References\n    ----------\n    .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.\n    .. [2] McGarry, Jack. \"Correlation and Prediction of the Vapor Pressures of\n       Pure Liquids over Large Pressure Ranges.\" Industrial & Engineering\n       Chemistry Process Design and Development 22, no. 2 (April 1, 1983):\n       313-22. doi:10.1021/i200021a023.", "id": "f15789:m2"}
{"signature": "def Edalat(T, Tc, Pc, omega):", "body": "tau = <NUM_LIT:1.> - T/Tc<EOL>a = -<NUM_LIT> - <NUM_LIT>*omega<EOL>c = -<NUM_LIT> - <NUM_LIT>*omega<EOL>d = <NUM_LIT:1.>/(-<NUM_LIT> - <NUM_LIT>*omega + <NUM_LIT>*omega**<NUM_LIT:2>)<EOL>b = <NUM_LIT> - <NUM_LIT>*omega - <NUM_LIT>*d<EOL>lnPr = (a*tau + b*tau**<NUM_LIT> + c*tau**<NUM_LIT:3> + d*tau**<NUM_LIT:6>)/(<NUM_LIT:1.>-tau)<EOL>return exp(lnPr)*Pc<EOL>", "docstring": "r'''Calculates vapor pressure of a fluid at arbitrary temperatures using a\n    CSP relationship by [1]_. Requires a chemical's critical temperature,\n    pressure, and acentric factor. Claimed to have a higher accuracy than the\n    Lee-Kesler CSP relationship.\n\n    The vapor pressure of a chemical at `T` is given by:\n\n    .. math::\n        \\ln(P^{sat}/P_c) = \\frac{a\\tau + b\\tau^{1.5} + c\\tau^3 + d\\tau^6}\n        {1-\\tau}\n\n        a = -6.1559 - 4.0855\\omega\n\n        b = 1.5737 - 1.0540\\omega - 4.4365\\times 10^{-3} d\n\n        c = -0.8747 - 7.8874\\omega\n\n        d = \\frac{1}{-0.4893 - 0.9912\\omega + 3.1551\\omega^2}\n\n        \\tau = 1 - \\frac{T}{T_c}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    omega : float\n        Acentric factor [-]\n\n    Returns\n    -------\n    Psat : float\n        Vapor pressure, [Pa]\n\n    Notes\n    -----\n    [1]_ found an average error of 6.06% on 94 compounds and 1106 data points.\n\n    Examples\n    --------\n    >>> Edalat(347.2, 617.1, 36E5, 0.299)\n    13461.273080743307\n\n    References\n    ----------\n    .. [1] Edalat, M., R. B. Bozar-Jomehri, and G. A. Mansoori. \"Generalized \n       Equation Predicts Vapor Pressure of Hydrocarbons.\" Oil and Gas Journal; \n       91:5 (February 1, 1993).", "id": "f15789:m8"}
{"signature": "def Lee_Kesler(T, Tc, Pc, omega):", "body": "Tr = T/Tc<EOL>f0 = <NUM_LIT> - <NUM_LIT>/Tr - <NUM_LIT>*log(Tr) + <NUM_LIT>*Tr**<NUM_LIT:6><EOL>f1 = <NUM_LIT> - <NUM_LIT>/Tr - <NUM_LIT>*log(Tr) + <NUM_LIT>*Tr**<NUM_LIT:6><EOL>return exp(f0 + omega*f1)*Pc<EOL>", "docstring": "r'''Calculates vapor pressure of a fluid at arbitrary temperatures using a\n    CSP relationship by [1]_; requires a chemical's critical temperature and\n    acentric factor.\n\n    The vapor pressure is given by:\n\n    .. math::\n        \\ln P^{sat}_r = f^{(0)} + \\omega f^{(1)}\n\n        f^{(0)} = 5.92714-\\frac{6.09648}{T_r}-1.28862\\ln T_r + 0.169347T_r^6\n\n        f^{(1)} = 15.2518-\\frac{15.6875}{T_r} - 13.4721 \\ln T_r + 0.43577T_r^6\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    omega : float\n        Acentric factor [-]\n\n    Returns\n    -------\n    Psat : float\n        Vapor pressure at T [Pa]\n\n    Notes\n    -----\n    This equation appears in [1]_ in expanded form.\n    The reduced pressure form of the equation ensures predicted vapor pressure \n    cannot surpass the critical pressure.\n\n    Examples\n    --------\n    Example from [2]_; ethylbenzene at 347.2 K.\n\n    >>> Lee_Kesler(347.2, 617.1, 36E5, 0.299)\n    13078.694162949312\n\n    References\n    ----------\n    .. [1] Lee, Byung Ik, and Michael G. Kesler. \"A Generalized Thermodynamic\n       Correlation Based on Three-Parameter Corresponding States.\" AIChE Journal\n       21, no. 3 (1975): 510-527. doi:10.1002/aic.690210313.\n    .. [2] Reid, Robert C..; Prausnitz, John M.;; Poling, Bruce E.\n       The Properties of Gases and Liquids. McGraw-Hill Companies, 1987.", "id": "f15789:m5"}
{"signature": "def Wagner(T, Tc, Pc, a, b, c, d):", "body": "Tr = T/Tc<EOL>tau = <NUM_LIT:1.0> - T/Tc<EOL>return Pc*exp((a*tau + b*tau**<NUM_LIT> + c*tau**<NUM_LIT> + d*tau**<NUM_LIT:5>)/Tr)<EOL>", "docstring": "r'''Calculates vapor pressure using the Wagner equation (2.5, 5 form).\n\n    Requires critical temperature and pressure as well as four coefficients\n    specific to each chemical.\n\n    .. math::\n        \\ln P^{sat}= \\ln P_c + \\frac{a\\tau + b \\tau^{1.5} + c\\tau^{2.5}\n        + d\\tau^5} {T_r}\n\n        \\tau = 1 - \\frac{T}{T_c}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid, [K]\n    Tc : float\n        Critical temperature, [K]\n    Pc : float\n        Critical pressure, [Pa]\n    a, b, c, d : floats\n        Parameters for wagner equation. Specific to each chemical. [-]\n\n    Returns\n    -------\n    Psat : float\n        Vapor pressure at T [Pa]\n\n    Notes\n    -----\n    Warning: Pc is often treated as adjustable constant.\n\n    Examples\n    --------\n    Methane, coefficients from [2]_, at 100 K.\n\n    >>> Wagner(100., 190.551, 4599200, -6.02242, 1.26652, -0.5707, -1.366)\n    34415.00476263708\n\n    References\n    ----------\n    .. [1] Wagner, W. \"New Vapour Pressure Measurements for Argon and Nitrogen and\n       a New Method for Establishing Rational Vapour Pressure Equations.\"\n       Cryogenics 13, no. 8 (August 1973): 470-82. doi:10.1016/0011-2275(73)90003-9\n    .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.", "id": "f15789:m3"}
{"signature": "def TRC_Antoine_extended(T, Tc, to, A, B, C, n, E, F):", "body": "x = max((T - to - <NUM_LIT>)/Tc, <NUM_LIT:0.>)<EOL>return <NUM_LIT>**(A - B/(T+C) + <NUM_LIT>*x**n + E*x**<NUM_LIT:8> + F*x**<NUM_LIT:12>)<EOL>", "docstring": "r'''Calculates vapor pressure of a chemical using the TRC Extended Antoine\n    equation. Parameters are chemical dependent, and said to be from the \n    Thermodynamics Research Center (TRC) at Texas A&M. Coefficients for various\n    chemicals can be found in [1]_.\n\n    .. math::\n        \\log_{10} P^{sat} = A - \\frac{B}{T + C} + 0.43429x^n + Ex^8 + Fx^{12}\n\n        x = \\max \\left(\\frac{T-t_o-273.15}{T_c}, 0 \\right)\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid, [K]\n    A, B, C, n, E, F : floats\n        Regressed coefficients for the Antoine Extended (TRC) equation,\n        specific for each chemical, [-]\n\n    Returns\n    -------\n    Psat : float\n        Vapor pressure calculated with coefficients [Pa]\n\n    Notes\n    -----\n    Assumes coefficients are for calculating vapor pressure in Pascal. \n    Coefficients should be consistent with input temperatures in Kelvin;\n\n    Examples\n    --------\n    Tetrafluoromethane, coefficients from [1]_, at 180 K:\n\n    >>> TRC_Antoine_extended(180.0, 227.51, -120., 8.95894, 510.595, -15.95, \n    ... 2.41377, -93.74, 7425.9) \n    706317.0898414153\n\n    References\n    ----------\n    .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.", "id": "f15789:m1"}
{"signature": "def load_all_methods(self):", "body": "methods = []<EOL>Tmins, Tmaxs = [], []<EOL>if self.CASRN in WagnerMcGarry.index:<EOL><INDENT>methods.append(WAGNER_MCGARRY)<EOL>_, A, B, C, D, self.WAGNER_MCGARRY_Pc, self.WAGNER_MCGARRY_Tc, self.WAGNER_MCGARRY_Tmin = _WagnerMcGarry_values[WagnerMcGarry.index.get_loc(self.CASRN)].tolist()<EOL>self.WAGNER_MCGARRY_coefs = [A, B, C, D]<EOL>Tmins.append(self.WAGNER_MCGARRY_Tmin); Tmaxs.append(self.WAGNER_MCGARRY_Tc)<EOL><DEDENT>if self.CASRN in WagnerPoling.index:<EOL><INDENT>methods.append(WAGNER_POLING)<EOL>_, A, B, C, D, self.WAGNER_POLING_Tc, self.WAGNER_POLING_Pc, Tmin, self.WAGNER_POLING_Tmax = _WagnerPoling_values[WagnerPoling.index.get_loc(self.CASRN)].tolist()<EOL>self.WAGNER_POLING_Tmin = Tmin if not np.isnan(Tmin) else self.WAGNER_POLING_Tmax*<NUM_LIT:0.1><EOL>self.WAGNER_POLING_coefs = [A, B, C, D]<EOL>Tmins.append(Tmin); Tmaxs.append(self.WAGNER_POLING_Tmax)<EOL><DEDENT>if self.CASRN in AntoineExtended.index:<EOL><INDENT>methods.append(ANTOINE_EXTENDED_POLING)<EOL>_, A, B, C, Tc, to, n, E, F, self.ANTOINE_EXTENDED_POLING_Tmin, self.ANTOINE_EXTENDED_POLING_Tmax = _AntoineExtended_values[AntoineExtended.index.get_loc(self.CASRN)].tolist()<EOL>self.ANTOINE_EXTENDED_POLING_coefs = [Tc, to, A, B, C, n, E, F]<EOL>Tmins.append(self.ANTOINE_EXTENDED_POLING_Tmin); Tmaxs.append(self.ANTOINE_EXTENDED_POLING_Tmax)<EOL><DEDENT>if self.CASRN in AntoinePoling.index:<EOL><INDENT>methods.append(ANTOINE_POLING)<EOL>_, A, B, C, self.ANTOINE_POLING_Tmin, self.ANTOINE_POLING_Tmax = _AntoinePoling_values[AntoinePoling.index.get_loc(self.CASRN)].tolist()<EOL>self.ANTOINE_POLING_coefs = [A, B, C]<EOL>Tmins.append(self.ANTOINE_POLING_Tmin); Tmaxs.append(self.ANTOINE_POLING_Tmax)<EOL><DEDENT>if self.CASRN in Perrys2_8.index:<EOL><INDENT>methods.append(DIPPR_PERRY_8E)<EOL>_, C1, C2, C3, C4, C5, self.Perrys2_8_Tmin, self.Perrys2_8_Tmax = _Perrys2_8_values[Perrys2_8.index.get_loc(self.CASRN)].tolist()<EOL>self.Perrys2_8_coeffs = [C1, C2, C3, C4, C5]<EOL>Tmins.append(self.Perrys2_8_Tmin); Tmaxs.append(self.Perrys2_8_Tmax)<EOL><DEDENT>if has_CoolProp and self.CASRN in coolprop_dict:<EOL><INDENT>methods.append(COOLPROP)<EOL>self.CP_f = coolprop_fluids[self.CASRN]<EOL>Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tc)<EOL><DEDENT>if self.CASRN in _VDISaturationDict:<EOL><INDENT>methods.append(VDI_TABULAR)<EOL>Ts, props = VDI_tabular_data(self.CASRN, '<STR_LIT:P>')<EOL>self.VDI_Tmin = Ts[<NUM_LIT:0>]<EOL>self.VDI_Tmax = Ts[-<NUM_LIT:1>]<EOL>self.tabular_data[VDI_TABULAR] = (Ts, props)<EOL>Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)<EOL><DEDENT>if self.CASRN in VDI_PPDS_3.index:<EOL><INDENT>_,  Tm, Tc, Pc, A, B, C, D = _VDI_PPDS_3_values[VDI_PPDS_3.index.get_loc(self.CASRN)].tolist()<EOL>self.VDI_PPDS_coeffs = [A, B, C, D]<EOL>self.VDI_PPDS_Tc = Tc<EOL>self.VDI_PPDS_Tm = Tm<EOL>self.VDI_PPDS_Pc = Pc<EOL>methods.append(VDI_PPDS)<EOL>Tmins.append(self.VDI_PPDS_Tm); Tmaxs.append(self.VDI_PPDS_Tc)<EOL><DEDENT>if all((self.Tb, self.Tc, self.Pc)):<EOL><INDENT>methods.append(BOILING_CRITICAL)<EOL>Tmins.append(<NUM_LIT>); Tmaxs.append(self.Tc)<EOL><DEDENT>if all((self.Tc, self.Pc, self.omega)):<EOL><INDENT>methods.append(LEE_KESLER_PSAT)<EOL>methods.append(AMBROSE_WALTON)<EOL>methods.append(SANJARI)<EOL>methods.append(EDALAT)<EOL>if self.eos:<EOL><INDENT>methods.append(EOS)<EOL><DEDENT>Tmins.append(<NUM_LIT>); Tmaxs.append(self.Tc)<EOL><DEDENT>self.all_methods = set(methods)<EOL>if Tmins and Tmaxs:<EOL><INDENT>self.Tmin = min(Tmins)<EOL>self.Tmax = max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method which picks out coefficients for the specified chemical\n        from the various dictionaries and DataFrames storing it. All data is\n        stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,\n        and :obj:`all_methods` as a set of methods for which the data exists for.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15789:c0:m1"}
{"signature": "def Ambrose_Walton(T, Tc, Pc, omega):", "body": "Tr = T/Tc<EOL>tau = <NUM_LIT:1> - T/Tc<EOL>f0 = (-<NUM_LIT>*tau + <NUM_LIT>*tau**<NUM_LIT> - <NUM_LIT>*tau**<NUM_LIT> - <NUM_LIT>*tau**<NUM_LIT:5>)/Tr<EOL>f1 = (-<NUM_LIT>*tau + <NUM_LIT>*tau**<NUM_LIT> - <NUM_LIT>*tau**<NUM_LIT> - <NUM_LIT>*tau**<NUM_LIT:5>)/Tr<EOL>f2 = (-<NUM_LIT>*tau + <NUM_LIT>*tau**<NUM_LIT> - <NUM_LIT>*tau**<NUM_LIT> + <NUM_LIT>*tau**<NUM_LIT:5>)/Tr<EOL>return Pc*exp(f0 + omega*f1 + omega**<NUM_LIT:2>*f2)<EOL>", "docstring": "r'''Calculates vapor pressure of a fluid at arbitrary temperatures using a\n    CSP relationship by [1]_; requires a chemical's critical temperature and\n    acentric factor.\n\n    The vapor pressure is given by:\n\n    .. math::\n        \\ln P_r=f^{(0)}+\\omega f^{(1)}+\\omega^2f^{(2)}\n\n        f^{(0)}=\\frac{-5.97616\\tau + 1.29874\\tau^{1.5}- 0.60394\\tau^{2.5}\n        -1.06841\\tau^5}{T_r}\n\n        f^{(1)}=\\frac{-5.03365\\tau + 1.11505\\tau^{1.5}- 5.41217\\tau^{2.5}\n        -7.46628\\tau^5}{T_r}\n\n        f^{(2)}=\\frac{-0.64771\\tau + 2.41539\\tau^{1.5}- 4.26979\\tau^{2.5}\n        +3.25259\\tau^5}{T_r}\n\n        \\tau = 1-T_{r}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    omega : float\n        Acentric factor [-]\n\n    Returns\n    -------\n    Psat : float\n        Vapor pressure at T [Pa]\n\n    Notes\n    -----\n    Somewhat more accurate than the :obj:`Lee_Kesler` formulation.\n\n    Examples\n    --------\n    Example from [2]_; ethylbenzene at 347.25 K.\n\n    >>> Ambrose_Walton(347.25, 617.15, 36.09E5, 0.304)\n    13278.878504306222\n\n    References\n    ----------\n    .. [1] Ambrose, D., and J. Walton. \"Vapour Pressures up to Their Critical\n       Temperatures of Normal Alkanes and 1-Alkanols.\" Pure and Applied\n       Chemistry 61, no. 8 (1989): 1395-1403. doi:10.1351/pac198961081395.\n    .. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.", "id": "f15789:m6"}
{"signature": "def Lakshmi_Prasad(T, M):", "body": "return <NUM_LIT> - <NUM_LIT>*T + (<NUM_LIT> - <NUM_LIT>*T)*M**-<NUM_LIT:0.5><EOL>", "docstring": "r'''Estimates thermal conductivity of pure liquids as a function of\n    temperature using a reference fluid approach. Low accuracy but quick.\n    Developed using several organic fluids.\n\n    .. math::\n        \\lambda = 0.0655-0.0005T + \\frac{1.3855-0.00197T}{M^{0.5}}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [K]\n    M : float\n        Molecular weight of the fluid [g/mol]\n\n    Returns\n    -------\n    kl : float\n        Estimated liquid thermal conductivity [W/m/k]\n\n    Notes\n    -----\n    This equation returns negative numbers at high T sometimes.\n    This equation is one of those implemented by DDBST.\n    If this results in a negative thermal conductivity, no value is returned.\n\n    Examples\n    --------\n    >>> Lakshmi_Prasad(273.15, 100)\n    0.013664450000000009\n\n    References\n    ----------\n    .. [1] Lakshmi, D. S., and D. H. L. Prasad. \"A Rapid Estimation Method for\n       Thermal Conductivity of Pure Liquids.\" The Chemical Engineering Journal\n       48, no. 3 (April 1992): 211-14. doi:10.1016/0300-9467(92)80037-B", "id": "f15790:m2"}
{"signature": "def DIPPR9B(T, MW, Cvm, mu, Tc=None, chemtype=None):", "body": "Cvm = Cvm*<NUM_LIT>  <EOL>if not chemtype:<EOL><INDENT>chemtype = '<STR_LIT>'<EOL><DEDENT>if chemtype == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT>*mu*Cvm/MW<EOL><DEDENT>elif chemtype == '<STR_LIT>':<EOL><INDENT>Tr = T/Tc<EOL>return mu/MW*(<NUM_LIT>*Cvm + <NUM_LIT> - <NUM_LIT>/Tr)<EOL><DEDENT>elif chemtype == '<STR_LIT>':<EOL><INDENT>return mu/MW*(<NUM_LIT>*Cvm + <NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Calculates the thermal conductivity of a gas using one of several\n    emperical equations developed in [1]_, [2]_, and presented in [3]_.\n\n    For monoatomic gases:\n\n    .. math::\n        k = 2.5 \\frac{\\eta C_v}{MW}\n\n    For linear molecules:\n\n    .. math::\n        k = \\frac{\\eta}{MW} \\left( 1.30 C_v + 14644.00 - \\frac{2928.80}{T_r}\\right)\n\n    For nonlinear molecules:\n\n    .. math::\n        k = \\frac{\\eta}{MW}(1.15C_v + 16903.36)\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [K]\n    Tc : float\n        Critical temperature of the fluid [K]\n    MW : float\n        Molwcular weight of fluid [g/mol]\n    Cvm : float\n        Molar heat capacity at constant volume of fluid, [J/mol/K]\n    mu : float\n        Viscosity of gas, [Pa*S]\n\n    Returns\n    -------\n    k_g : float\n        Thermal conductivity of gas, [W/m/k]\n\n    Notes\n    -----\n    Tested with DIPPR values.\n    Cvm is internally converted to J/kmol/K.\n\n    Examples\n    --------\n    CO:\n\n    >>> DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92, chemtype='linear')\n    0.01813208676438415\n\n    References\n    ----------\n    .. [1] Bromley, LeRoy A., Berkeley. University of California, and U.S.\n       Atomic Energy Commission. Thermal Conductivity of Gases at Moderate\n       Pressures. UCRL;1852. Berkeley, CA: University of California Radiation\n       Laboratory, 1952.\n    .. [2] Stiel, Leonard I., and George Thodos. \"The Thermal Conductivity of\n       Nonpolar Substances in the Dense Gaseous and Liquid Regions.\" AIChE\n       Journal 10, no. 1 (January 1, 1964): 26-30. doi:10.1002/aic.690100114\n    .. [3] Danner, Ronald P, and Design Institute for Physical Property Data.\n       Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.", "id": "f15790:m14"}
{"signature": "def Eucken(MW, Cvm, mu):", "body": "MW = MW/<NUM_LIT><EOL>return (<NUM_LIT:1.> + <NUM_LIT:9>/<NUM_LIT>/(Cvm/R))*mu*Cvm/MW<EOL>", "docstring": "r'''Estimates the thermal conductivity of a gas as a function of\n    temperature using the CSP method of Eucken [1]_.\n\n    .. math::\n        \\frac{\\lambda M}{\\eta C_v} = 1 + \\frac{9/4}{C_v/R}\n\n    Parameters\n    ----------\n    MW : float\n        Molecular weight of the gas [g/mol]\n    Cvm : float\n        Molar contant volume heat capacity of the gas [J/mol/K]\n    mu : float\n        Gas viscosity [Pa*S]\n\n    Returns\n    -------\n    kg : float\n        Estimated gas thermal conductivity [W/m/k]\n\n    Notes\n    -----\n    Temperature dependence is introduced via heat capacity and viscosity.\n    A theoretical equation. No original author located.\n    MW internally converted to kg/g-mol.\n\n    Examples\n    --------\n    2-methylbutane at low pressure, 373.15 K. Mathes calculation in [1]_.\n\n    >>> Eucken(MW=72.151, Cvm=135.9, mu=8.77E-6)\n    0.018792644287722975\n\n    References\n    ----------\n    .. [1] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.\n       Properties of Gases and Liquids. McGraw-Hill Companies, 1987.", "id": "f15790:m12"}
{"signature": "def load_all_methods(self):", "body": "methods = []        <EOL>methods.append(SIMPLE)<EOL>if none_and_length_check((self.Tbs, self.MWs)):<EOL><INDENT>methods.append(LINDSAY_BROMLEY)<EOL><DEDENT>self.all_methods = set(methods)<EOL>Tmins = [i.Tmin for i in self.ThermalConductivityGases if i.Tmin]<EOL>Tmaxs = [i.Tmax for i in self.ThermalConductivityGases if i.Tmax]<EOL>if Tmins:<EOL><INDENT>self.Tmin = max(Tmins)<EOL><DEDENT>if Tmaxs:<EOL><INDENT>self.Tmax = max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method to initialize the object by precomputing any values which\n        may be used repeatedly and by retrieving mixture-specific variables.\n        All data are stored as attributes. This method also sets :obj:`Tmin`, \n        :obj:`Tmax`, and :obj:`all_methods` as a set of methods which should \n        work to calculate the property.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15790:c3:m1"}
{"signature": "def eli_hanley(T, MW, Tc, Vc, Zc, omega, Cvm):", "body": "Cs = [<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <EOL><NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>]<EOL>Tr = T/Tc<EOL>if Tr > <NUM_LIT:2>: Tr = <NUM_LIT:2><EOL>theta = <NUM_LIT:1> + (omega - <NUM_LIT>)*(<NUM_LIT> - <NUM_LIT>*log(Tr) - <NUM_LIT>/Tr)<EOL>psi = (<NUM_LIT:1> + (omega-<NUM_LIT>)*(<NUM_LIT> - <NUM_LIT>*log(Tr)))*<NUM_LIT>/Zc<EOL>f = Tc/<NUM_LIT>*theta<EOL>h = Vc/<NUM_LIT>*psi<EOL>T0 = T/f<EOL>eta0 = <NUM_LIT>*sum([Ci*T0**((i+<NUM_LIT:1.> - <NUM_LIT>)/<NUM_LIT>) for i, Ci in enumerate(Cs)])<EOL>k0 = <NUM_LIT>*eta0<EOL>H = (<NUM_LIT>/MW)**<NUM_LIT:0.5>*f**<NUM_LIT:0.5>*h**(-<NUM_LIT:2>/<NUM_LIT>)<EOL>etas = eta0*H*MW/<NUM_LIT><EOL>ks = k0*H<EOL>return ks + etas/(MW/<NUM_LIT>)*<NUM_LIT>*(Cvm - <NUM_LIT>*R)<EOL>", "docstring": "r'''Estimates the thermal conductivity of a gas as a function of\n    temperature using the reference fluid method of Eli and Hanley [1]_ as\n    shown in [2]_.\n\n    .. math::\n        \\lambda = \\lambda^* + \\frac{\\eta^*}{MW}(1.32)\\left(C_v - \\frac{3R}{2}\\right)\n\n        Tr = \\text{min}(Tr, 2)\n\n        \\theta = 1 + (\\omega-0.011)\\left(0.56553 - 0.86276\\ln Tr - \\frac{0.69852}{Tr}\\right)\n\n        \\psi = [1 + (\\omega - 0.011)(0.38560 - 1.1617\\ln Tr)]\\frac{0.288}{Z_c}\n\n        f = \\frac{T_c}{190.4}\\theta\n\n        h = \\frac{V_c}{9.92E-5}\\psi\n\n        T_0 = T/f\n\n        \\eta_0^*(T_0)= \\sum_{n=1}^9 C_n T_0^{(n-4)/3}\n\n        \\theta_0 = 1944 \\eta_0\n\n        \\lambda^* = \\lambda_0 H\n\n        \\eta^* = \\eta^*_0 H \\frac{MW}{16.04}\n\n        H = \\left(\\frac{16.04}{MW}\\right)^{0.5}f^{0.5}/h^{2/3}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the gas [K]\n    MW : float\n        Molecular weight of the gas [g/mol]\n    Tc : float\n        Critical temperature of the gas [K]\n    Vc : float\n        Critical volume of the gas [m^3/mol]\n    Zc : float\n        Critical compressibility of the gas []\n    omega : float\n        Acentric factor of the gas [-]\n    Cvm : float\n        Molar contant volume heat capacity of the gas [J/mol/K]\n\n    Returns\n    -------\n    kg : float\n        Estimated gas thermal conductivity [W/m/k]\n\n    Notes\n    -----\n    Reference fluid is Methane.\n    MW internally converted to kg/g-mol.\n\n    Examples\n    --------\n    2-methylbutane at low pressure, 373.15 K. Mathes calculation in [2]_.\n\n    >>> eli_hanley(T=373.15, MW=72.151, Tc=460.4, Vc=3.06E-4, Zc=0.267,\n    ... omega=0.227, Cvm=135.9)\n    0.02247951789135337\n\n    References\n    ----------\n    .. [1] Ely, James F., and H. J. M. Hanley. \"Prediction of Transport\n       Properties. 2. Thermal Conductivity of Pure Fluids and Mixtures.\"\n       Industrial & Engineering Chemistry Fundamentals 22, no. 1 (February 1,\n       1983): 90-97. doi:10.1021/i100009a016.\n    .. [2] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.\n       Properties of Gases and Liquids. McGraw-Hill Companies, 1987.", "id": "f15790:m16"}
{"signature": "def Mersmann_Kind_thermal_conductivity_liquid(T, MW, Tc, Vc, atoms):", "body": "na = sum(atoms.values())<EOL>lambda_star = <NUM_LIT:2>/<NUM_LIT>*(na + <NUM_LIT>*(<NUM_LIT:1.> - T/Tc)**<NUM_LIT:0.5>)<EOL>Vc = Vc*<NUM_LIT:1000> <EOL>N_A2 = N_A*<NUM_LIT:1000> <EOL>kl = lambda_star*(k*Tc)**<NUM_LIT>*N_A2**(<NUM_LIT:7>/<NUM_LIT>)*Vc**(-<NUM_LIT:2>/<NUM_LIT>)/Tc*MW**-<NUM_LIT:0.5><EOL>return kl<EOL>", "docstring": "r'''Estimates the thermal conductivity of organic liquid substances\n    according to the method of [1]_.\n\n    .. math::\n        \\lambda^* = \\frac{\\lambda\\cdot V_c^{2/3}\\cdot T_c\\cdot \\text{MW}^{0.5}}\n        {(k\\cdot T_c)^{1.5}\\cdot N_A^{7/6}}\n\n        \\lambda^* = \\frac{2}{3}\\left(n_a + 40\\sqrt{1-T_r}\\right)\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [K]\n    M : float\n        Molecular weight of the fluid [g/mol]\n    Tc : float\n        Critical temperature of the fluid [K]\n    Vc : float\n        Critical volume of the fluid [m^3/mol]\n    atoms : dict\n        Dictionary of atoms and their counts, [-]\n\n    Returns\n    -------\n    kl : float\n        Estimated liquid thermal conductivity [W/m/k]\n\n    Notes\n    -----\n    In the equation, all quantities must be in SI units but N_A is in a kmol\n    basis and Vc is in units of (m^3/kmol); this is converted internally.\n\n    Examples\n    --------\n    Dodecane at 400 K:\n\n    >>> Mersmann_Kind_thermal_conductivity_liquid(400, 170.33484, 658.0, \n    ... 0.000754, {'C': 12, 'H': 26})\n    0.08952713798442789\n\n    References\n    ----------\n    .. [1] Mersmann, Alfons, and Matthias Kind. \"Prediction of Mechanical and \n       Thermal Properties of Pure Liquids, of Critical Data, and of Vapor \n       Pressure.\" Industrial & Engineering Chemistry Research, January 31, \n       2017. https://doi.org/10.1021/acs.iecr.6b04323.", "id": "f15790:m7"}
{"signature": "def eli_hanley_dense(T, MW, Tc, Vc, Zc, omega, Cvm, Vm):", "body": "Cs = [<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>,<EOL><NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>,<EOL><NUM_LIT>]<EOL>Tr = T/Tc<EOL>if Tr > <NUM_LIT:2>:<EOL><INDENT>Tr = <NUM_LIT:2><EOL><DEDENT>Vr = Vm/Vc<EOL>if Vr > <NUM_LIT:2>:<EOL><INDENT>Vr = <NUM_LIT:2><EOL><DEDENT>theta = <NUM_LIT:1> + (omega - <NUM_LIT>)*(<NUM_LIT> - <NUM_LIT>*log(Tr) + (<NUM_LIT> - <NUM_LIT>/Tr)*(Vr-<NUM_LIT:0.5>))<EOL>psi = (<NUM_LIT:1> + (omega-<NUM_LIT>)*(<NUM_LIT>*(Vr-<NUM_LIT>) - <NUM_LIT>*(Vr-<NUM_LIT>)*log(Tr)))*<NUM_LIT>/Zc<EOL>f = Tc/<NUM_LIT>*theta<EOL>h = Vc/<NUM_LIT>*psi<EOL>T0 = T/f<EOL>rho0 = <NUM_LIT>/(Vm*<NUM_LIT>)*h  <EOL>eta0 = <NUM_LIT>*sum([Cs[i]*T0**((i+<NUM_LIT:1>-<NUM_LIT:4>)/<NUM_LIT>) for i in range(len(Cs))])<EOL>k1 = <NUM_LIT>*eta0<EOL>b1 = -<NUM_LIT><EOL>b2 = <NUM_LIT><EOL>b3 = <NUM_LIT><EOL>b4 = <NUM_LIT><EOL>k2 = (b1 + b2*(b3 - log(T0/b4))**<NUM_LIT:2>)/<NUM_LIT>*rho0<EOL>a1 = -<NUM_LIT><EOL>a2 = <NUM_LIT><EOL>a3 = <NUM_LIT><EOL>a4 = -<NUM_LIT><EOL>a5 = <NUM_LIT><EOL>a6 = <NUM_LIT><EOL>a7 = -<NUM_LIT><EOL>k3 = exp(a1 + a2/T0)*(exp((a3 + a4/T0**<NUM_LIT>)*rho0**<NUM_LIT:0.1> + (rho0/<NUM_LIT> - <NUM_LIT:1>)*rho0**<NUM_LIT:0.5>*(a5 + a6/T0 + a7/T0**<NUM_LIT:2>)) - <NUM_LIT:1>)/<NUM_LIT><EOL>if T/Tc > <NUM_LIT:2>:<EOL><INDENT>dtheta = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>dtheta = (omega - <NUM_LIT>)*(-<NUM_LIT>/T + (Vr-<NUM_LIT:0.5>)*<NUM_LIT>*Tc/T**<NUM_LIT:2>)<EOL><DEDENT>dfdT = Tc/<NUM_LIT>*dtheta<EOL>X = ((<NUM_LIT:1> - T/f*dfdT)*<NUM_LIT>/Zc)**<NUM_LIT><EOL>H = (<NUM_LIT>/MW)**<NUM_LIT:0.5>*f**<NUM_LIT:0.5>/h**(<NUM_LIT:2>/<NUM_LIT>)<EOL>ks = (k1*X + k2 + k3)*H<EOL>theta = <NUM_LIT:1> + (omega - <NUM_LIT>)*(<NUM_LIT> - <NUM_LIT>*log(Tr) - <NUM_LIT>/Tr)<EOL>psi = (<NUM_LIT:1> + (omega-<NUM_LIT>)*(<NUM_LIT> - <NUM_LIT>*log(Tr)))*<NUM_LIT>/Zc<EOL>f = Tc/<NUM_LIT>*theta<EOL>h = Vc/<NUM_LIT>*psi<EOL>T0 = T/f<EOL>eta0 = <NUM_LIT>*sum([Cs[i]*T0**((i+<NUM_LIT:1>-<NUM_LIT:4>)/<NUM_LIT>) for i in range(len(Cs))])<EOL>H = (<NUM_LIT>/MW)**<NUM_LIT:0.5>*f**<NUM_LIT:0.5>/h**(<NUM_LIT:2>/<NUM_LIT>)<EOL>etas = eta0*H*MW/<NUM_LIT><EOL>k = ks + etas/(MW/<NUM_LIT>)*<NUM_LIT>*(Cvm-<NUM_LIT:3>*R/<NUM_LIT>)<EOL>return k<EOL>", "docstring": "r'''Estimates the thermal conductivity of a gas at high pressure as a\n    function of temperature using the reference fluid method of Eli and\n    Hanley [1]_ as shown in [2]_.\n\n    .. math::\n        Tr = min(Tr, 2)\n\n        Vr = min(Vr, 2)\n\n        f = \\frac{T_c}{190.4}\\theta\n\n        h = \\frac{V_c}{9.92E-5}\\psi\n\n        T_0 = T/f\n\n        \\rho_0 = \\frac{16.04}{V}h\n\n        \\theta = 1 + (\\omega-0.011)\\left(0.09057 - 0.86276\\ln Tr + \\left(\n        0.31664 - \\frac{0.46568}{Tr}\\right) (V_r - 0.5)\\right)\n\n        \\psi = [1 + (\\omega - 0.011)(0.39490(V_r - 1.02355) - 0.93281(V_r -\n        0.75464)\\ln T_r]\\frac{0.288}{Z_c}\n\n        \\lambda_1 = 1944 \\eta_0\n\n        \\lambda_2 = \\left\\{b_1 + b_2\\left[b_3 - \\ln \\left(\\frac{T_0}{b_4}\n        \\right)\\right]^2\\right\\}\\rho_0\n\n        \\lambda_3 = \\exp\\left(a_1 + \\frac{a_2}{T_0}\\right)\\left\\{\\exp[(a_3 +\n        \\frac{a_4}{T_0^{1.5}})\\rho_0^{0.1} + (\\frac{\\rho_0}{0.1617} - 1)\n        \\rho_0^{0.5}(a_5 + \\frac{a_6}{T_0} + \\frac{a_7}{T_0^2})] - 1\\right\\}\n\n        \\lambda^{**} = [\\lambda_1 + \\lambda_2 + \\lambda_3]H\n\n        H = \\left(\\frac{16.04}{MW}\\right)^{0.5}f^{0.5}/h^{2/3}\n\n        X = \\left\\{\\left[1 - \\frac{T}{f}\\left(\\frac{df}{dT}\\right)_v \\right]\n        \\frac{0.288}{Z_c}\\right\\}^{1.5}\n\n        \\left(\\frac{df}{dT}\\right)_v = \\frac{T_c}{190.4}\\left(\\frac{d\\theta}\n        {d T}\\right)_v\n\n        \\left(\\frac{d\\theta}{d T}\\right)_v = (\\omega-0.011)\\left[\n        \\frac{-0.86276}{T} + (V_r-0.5)\\frac{0.46568T_c}{T^2}\\right]\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the gas [K]\n    MW : float\n        Molecular weight of the gas [g/mol]\n    Tc : float\n        Critical temperature of the gas [K]\n    Vc : float\n        Critical volume of the gas [m^3/mol]\n    Zc : float\n        Critical compressibility of the gas []\n    omega : float\n        Acentric factor of the gas [-]\n    Cvm : float\n        Molar contant volume heat capacity of the gas [J/mol/K]\n    Vm : float\n        Volume of the gas at T and P [m^3/mol]\n\n    Returns\n    -------\n    kg : float\n        Estimated dense gas thermal conductivity [W/m/k]\n\n    Notes\n    -----\n    Reference fluid is Methane.\n    MW internally converted to kg/g-mol.\n\n    Examples\n    --------\n    >>> eli_hanley_dense(T=473., MW=42.081, Tc=364.9, Vc=1.81E-4, Zc=0.274,\n    ... omega=0.144, Cvm=82.70, Vm=1.721E-4)\n    0.06038475936515042\n\n    References\n    ----------\n    .. [1] Ely, James F., and H. J. M. Hanley. \"Prediction of Transport\n       Properties. 2. Thermal Conductivity of Pure Fluids and Mixtures.\"\n       Industrial & Engineering Chemistry Fundamentals 22, no. 1 (February 1,\n       1983): 90-97. doi:10.1021/i100009a016.\n    .. [2] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.\n       Properties of Gases and Liquids. McGraw-Hill Companies, 1987.", "id": "f15790:m20"}
{"signature": "def calculate(self, T, P, zs, ws, method):", "body": "if method == SIMPLE:<EOL><INDENT>ks = [i(T, P) for i in self.ThermalConductivityLiquids]<EOL>return mixing_simple(zs, ks)<EOL><DEDENT>elif method == DIPPR_9H:<EOL><INDENT>ks = [i(T, P) for i in self.ThermalConductivityLiquids]<EOL>return DIPPR9H(ws, ks)<EOL><DEDENT>elif method == FILIPPOV:<EOL><INDENT>ks = [i(T, P) for i in self.ThermalConductivityLiquids]<EOL>return Filippov(ws, ks)<EOL><DEDENT>elif method == MAGOMEDOV:<EOL><INDENT>k_w = self.ThermalConductivityLiquids[self.index_w](T, P)<EOL>ws = list(ws) ; ws.pop(self.index_w)<EOL>return thermal_conductivity_Magomedov(T, P, ws, self.wCASs, k_w)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Method to calculate thermal conductivity of a liquid mixture at \n        temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n        `ws` with a given method.\n\n        This method has no exception handling; see `mixture_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the property, [K]\n        P : float\n            Pressure at which to calculate the property, [Pa]\n        zs : list[float]\n            Mole fractions of all species in the mixture, [-]\n        ws : list[float]\n            Weight fractions of all species in the mixture, [-]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        k : float\n            Thermal conductivity of the liquid mixture, [W/m/K]", "id": "f15790:c1:m2"}
{"signature": "def Sheffy_Johnson(T, M, Tm):", "body": "return <NUM_LIT>*(<NUM_LIT:1> - <NUM_LIT>*(T - Tm))/(Tm**<NUM_LIT>*M**<NUM_LIT>)<EOL>", "docstring": "r'''Calculate the thermal conductivity of a liquid as a function of\n    temperature using the Sheffy-Johnson (1961) method. Requires\n    Temperature, molecular weight, and melting point.\n\n    .. math::\n        k = 1.951 \\frac{1-0.00126(T-T_m)}{T_m^{0.216}MW^{0.3}}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [K]\n    M : float\n        Molecular weight of the fluid [g/mol]\n    Tm : float\n        Melting point of the fluid [K]\n\n    Returns\n    -------\n    kl : float\n        Thermal conductivity of the fluid, W/m/k\n\n    Notes\n    -----\n    The origin of this equation has been challenging to trace. It is\n    presently unknown, and untested.\n\n    Examples\n    --------\n    >>> Sheffy_Johnson(300, 47, 280)\n    0.17740150413112196\n\n    References\n    ----------\n    .. [1] Scheffy, W. J., and E. F. Johnson. \"Thermal Conductivities of\n       Liquids at High Temperatures.\" Journal of Chemical & Engineering Data\n       6, no. 2 (April 1, 1961): 245-49. doi:10.1021/je60010a019", "id": "f15790:m0"}
{"signature": "def Nicola_original(T, M, Tc, omega, Hfus):", "body": "Tr = T/Tc<EOL>Hfus = Hfus*<NUM_LIT:1000><EOL>return -<NUM_LIT> - <NUM_LIT>*Tr + <NUM_LIT>*Hfus + <NUM_LIT>*omega + (<NUM_LIT:1.>/M)**<NUM_LIT><EOL>", "docstring": "r'''Estimates the thermal conductivity of a liquid as a function of\n    temperature using the CSP method of Nicola [1]_. A  simpler but long\n    method claiming high-accuracy and using only statistically significant\n    variable following analalysis.\n\n    Requires temperature, molecular weight, critical temperature, acentric\n    factor and the heat of vaporization.\n\n    .. math::\n        \\frac{\\lambda}{1 \\text{Wm/K}}=-0.5694-0.1436T_r+5.4893\\times10^{-10}\n        \\frac{\\Delta_{fus}H}{\\text{kmol/J}}+0.0508\\omega +\n        \\left(\\frac{1 \\text{kg/kmol}}{MW}\\right)^{0.0622}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [K]\n    M : float\n        Molecular weight of the fluid [g/mol]\n    Tc : float\n        Critical temperature of the fluid [K]\n    omega : float\n        Acentric factor of the fluid [-]\n    Hfus : float\n        Heat of fusion of the fluid [J/mol]\n\n    Returns\n    -------\n    kl : float\n        Estimated liquid thermal conductivity [W/m/k]\n\n    Notes\n    -----\n    A weird statistical correlation. Recent and yet to be reviewed.\n    This correlation has been superceded by the author's later work.\n    Hfus is internally converted to be in J/kmol.\n\n    Examples\n    --------\n    >>> Nicola_original(300, 142.3, 611.7, 0.49, 201853)\n    0.2305018632230984\n\n    References\n    ----------\n    .. [1] Nicola, Giovanni Di, Eleonora Ciarrocchi, Mariano Pierantozzi, and\n        Roman Stryjek. \"A New Equation for the Thermal Conductivity of Organic\n        Compounds.\" Journal of Thermal Analysis and Calorimetry 116, no. 1\n        (April 1, 2014): 135-40. doi:10.1007/s10973-013-3422-7", "id": "f15790:m4"}
{"signature": "def DIPPR9G(T, P, Tc, Pc, kl):", "body": "Tr = T/Tc<EOL>Pr = P/Pc<EOL>return kl*(<NUM_LIT> + <NUM_LIT>*Pr*Tr**<NUM_LIT> + <NUM_LIT>*Tr**<NUM_LIT>*(Pr/(<NUM_LIT> + Pr)))<EOL>", "docstring": "r'''Adjustes for pressure the thermal conductivity of a liquid using an\n    emperical formula based on [1]_, but as given in [2]_.\n\n    .. math::\n        k = k^* \\left[ 0.98 + 0.0079 P_r T_r^{1.4} + 0.63 T_r^{1.2}\n        \\left( \\frac{P_r}{30 + P_r}\\right)\\right]\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    P : float\n        Pressure of fluid [Pa]\n    Tc: float\n        Critical point of fluid [K]\n    Pc : float\n        Critical pressure of the fluid [Pa]\n    kl : float\n        Thermal conductivity of liquid at 1 atm or saturation, [W/m/K]\n\n    Returns\n    -------\n    kl_dense : float\n        Thermal conductivity of liquid at P, [W/m/K]\n\n    Notes\n    -----\n    This equation is entrely dimensionless; all dimensions cancel.\n    The original source has not been reviewed.\n\n    This is DIPPR Procedure 9G: Method for the Thermal Conductivity of Pure\n    Nonhydrocarbon Liquids at High Pressures\n\n    Examples\n    --------\n    From [2]_, for butyl acetate.\n\n    >>> DIPPR9G(515.05, 3.92E7, 579.15, 3.212E6, 7.085E-2)\n    0.0864419738671184\n\n    References\n    ----------\n    .. [1] Missenard, F. A., Thermal Conductivity of Organic Liquids of a\n       Series or a Group of Liquids , Rev. Gen.Thermodyn., 101 649 (1970).\n    .. [2] Danner, Ronald P, and Design Institute for Physical Property Data.\n       Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.", "id": "f15790:m8"}
{"signature": "def calculate(self, T, P, zs, ws, method):", "body": "if method == SIMPLE:<EOL><INDENT>ks = [i(T, P) for i in self.ThermalConductivityGases]<EOL>return mixing_simple(zs, ks)<EOL><DEDENT>elif method == LINDSAY_BROMLEY:<EOL><INDENT>ks = [i(T, P) for i in self.ThermalConductivityGases]<EOL>mus = [i(T, P) for i in self.ViscosityGases]<EOL>return Lindsay_Bromley(T=T, ys=zs, ks=ks, mus=mus, Tbs=self.Tbs, MWs=self.MWs)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Method to calculate thermal conductivity of a gas mixture at \n        temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n        `ws` with a given method.\n\n        This method has no exception handling; see `mixture_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the property, [K]\n        P : float\n            Pressure at which to calculate the property, [Pa]\n        zs : list[float]\n            Mole fractions of all species in the mixture, [-]\n        ws : list[float]\n            Weight fractions of all species in the mixture, [-]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        kg : float\n            Thermal conductivity of gas mixture, [W/m/K]", "id": "f15790:c3:m2"}
{"signature": "def load_all_methods(self):", "body": "methods, methods_P = [], []<EOL>Tmins, Tmaxs = [], []<EOL>if self.CASRN in _VDISaturationDict:<EOL><INDENT>methods.append(VDI_TABULAR)<EOL>Ts, props = VDI_tabular_data(self.CASRN, '<STR_LIT>')<EOL>self.VDI_Tmin = Ts[<NUM_LIT:0>]<EOL>self.VDI_Tmax = Ts[-<NUM_LIT:1>]<EOL>self.tabular_data[VDI_TABULAR] = (Ts, props)<EOL>Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)<EOL><DEDENT>if has_CoolProp and self.CASRN in coolprop_dict:<EOL><INDENT>methods.append(COOLPROP); methods_P.append(COOLPROP)<EOL>self.CP_f = coolprop_fluids[self.CASRN]<EOL>Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tc)<EOL><DEDENT>if self.CASRN in Perrys2_314.index:<EOL><INDENT>methods.append(DIPPR_PERRY_8E)<EOL>_, C1, C2, C3, C4, self.Perrys2_314_Tmin, self.Perrys2_314_Tmax = _Perrys2_314_values[Perrys2_314.index.get_loc(self.CASRN)].tolist()<EOL>self.Perrys2_314_coeffs = [C1, C2, C3, C4]<EOL>Tmins.append(self.Perrys2_314_Tmin); Tmaxs.append(self.Perrys2_314_Tmax)<EOL><DEDENT>if self.CASRN in VDI_PPDS_10.index:<EOL><INDENT>_,  A, B, C, D, E = _VDI_PPDS_10_values[VDI_PPDS_10.index.get_loc(self.CASRN)].tolist()<EOL>self.VDI_PPDS_coeffs = [A, B, C, D, E]<EOL>self.VDI_PPDS_coeffs.reverse()<EOL>methods.append(VDI_PPDS)<EOL><DEDENT>if all((self.MW, self.Tb, self.Pc, self.omega)):<EOL><INDENT>methods.append(GHARAGHEIZI_G)<EOL>Tmaxs.append(<NUM_LIT>)<EOL><DEDENT>if all((self.Cvgm, self.mug, self.MW, self.Tc)):<EOL><INDENT>methods.append(DIPPR_9B)<EOL>Tmins.append(<NUM_LIT>); Tmaxs.append(<NUM_LIT>)  <EOL><DEDENT>if all((self.Cvgm, self.mug, self.MW, self.Tc, self.omega)):<EOL><INDENT>methods.append(CHUNG)<EOL>Tmins.append(<NUM_LIT>); Tmaxs.append(<NUM_LIT>)  <EOL><DEDENT>if all((self.Cvgm, self.MW, self.Tc, self.Vc, self.Zc, self.omega)):<EOL><INDENT>methods.append(ELI_HANLEY)<EOL>Tmaxs.append(<NUM_LIT>)  <EOL><DEDENT>if all((self.Cvgm, self.mug, self.MW)):<EOL><INDENT>methods.append(EUCKEN_MOD)<EOL>methods.append(EUCKEN)<EOL>Tmins.append(<NUM_LIT>); Tmaxs.append(<NUM_LIT>)  <EOL><DEDENT>if self.MW:<EOL><INDENT>methods.append(BAHADORI_G)<EOL><DEDENT>if all([self.MW, self.Tc, self.Vc, self.Zc, self.omega]):<EOL><INDENT>methods_P.append(ELI_HANLEY_DENSE)<EOL><DEDENT>if all([self.MW, self.Tc, self.Vc, self.omega, self.dipole]):<EOL><INDENT>methods_P.append(CHUNG_DENSE)<EOL><DEDENT>if all([self.MW, self.Tc, self.Pc, self.Vc, self.Zc]):<EOL><INDENT>methods_P.append(STIEL_THODOS_DENSE)<EOL><DEDENT>self.all_methods = set(methods)<EOL>self.all_methods_P = set(methods_P)<EOL>if Tmins and Tmaxs:<EOL><INDENT>self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method which picks out coefficients for the specified chemical\n        from the various dictionaries and DataFrames storing it. All data is\n        stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,\n        :obj:`all_methods` and obj:`all_methods_P` as a set of methods for\n        which the data exists for.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15790:c2:m1"}
{"signature": "def DIPPR9H(ws, ks):", "body": "if not none_and_length_check([ks, ws]):  <EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return sum(ws[i]/ks[i]**<NUM_LIT:2> for i in range(len(ws)))**(-<NUM_LIT:0.5>)<EOL>", "docstring": "r'''Calculates thermal conductivity of a liquid mixture according to\n    mixing rules in [1]_ and also in [2]_.\n\n    .. math::\n        \\lambda_m = \\left( \\sum_i w_i \\lambda_i^{-2}\\right)^{-1/2}\n\n    Parameters\n    ----------\n    ws : float\n        Mass fractions of components\n    ks : float\n        Liquid thermal conductivites of all components, [W/m/K]\n\n    Returns\n    -------\n    kl : float\n        Thermal conductivity of liquid mixture, [W/m/K]\n\n    Notes\n    -----\n    This equation is entirely dimensionless; all dimensions cancel.\n    The example is from [2]_; all results agree.\n    The original source has not been reviewed.\n\n    DIPPR Procedure 9H: Method for the Thermal Conductivity of Nonaqueous Liquid Mixtures\n\n    Average deviations of 3%. for 118 nonaqueous systems with 817 data points.\n    Max deviation 20%. According to DIPPR.\n\n    Examples\n    --------\n    >>> DIPPR9H([0.258, 0.742], [0.1692, 0.1528])\n    0.15657104706719646\n\n    References\n    ----------\n    .. [1] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E. The\n       Properties of Gases and Liquids. McGraw-Hill Companies, 1987.\n    .. [2] Danner, Ronald P, and Design Institute for Physical Property Data.\n       Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.", "id": "f15790:m10"}
{"signature": "def Suzuki_LFL(Hc=None):", "body": "Hc = Hc/<NUM_LIT><EOL>LFL = -<NUM_LIT>/Hc + <NUM_LIT>*Hc + <NUM_LIT>*Hc*Hc + <NUM_LIT><EOL>return LFL/<NUM_LIT><EOL>", "docstring": "r'''Calculates lower flammability limit, using the Suzuki [1]_ correlation.\n    Uses heat of combustion only.\n\n    The lower flammability limit of a gas is air is:\n\n    .. math::\n        \\text{LFL} = \\frac{-3.42}{\\Delta H_c^{\\circ}} + 0.569\n        \\Delta H_c^{\\circ} + 0.0538\\Delta H_c^{\\circ 2} + 1.80\n\n    Parameters\n    ----------\n    Hc : float\n        Heat of combustion of gas [J/mol]\n\n    Returns\n    -------\n    LFL : float\n        Lower flammability limit, mole fraction [-]\n\n    Notes\n    -----\n    Fit performed with 112 compounds, r^2 was 0.977.\n    LFL in percent volume in air. Hc is at standard conditions, in MJ/mol.\n    11 compounds left out as they were outliers.\n    Equation does not apply for molecules with halogen atoms, only hydrocarbons\n    with oxygen or nitrogen or sulfur.\n    No sample calculation provided with the article. However, the equation is\n    straightforward.\n\n    Limits of equations's validity are -6135596 J where it predicts a\n    LFL of 0, and -48322129 J where it predicts a LFL of 1.\n\n    Examples\n    --------\n    Pentane, 1.5 % LFL in literature\n\n    >>> Suzuki_LFL(-3536600)\n    0.014276107095811815\n\n    References\n    ----------\n    .. [1] Suzuki, Takahiro. \"Note: Empirical Relationship between Lower\n       Flammability Limits and Standard Enthalpies of Combustion of Organic\n       Compounds.\" Fire and Materials 18, no. 5 (September 1, 1994): 333-36.\n       doi:10.1002/fam.810180509.", "id": "f15791:m15"}
{"signature": "def Carcinogen(CASRN, AvailableMethods=False, Method=None):", "body": "methods = [COMBINED, IARC, NTP]<EOL>if AvailableMethods:<EOL><INDENT>return methods<EOL><DEDENT>if not Method:<EOL><INDENT>Method = methods[<NUM_LIT:0>]<EOL><DEDENT>if Method == IARC:<EOL><INDENT>if CASRN in IARC_data.index:<EOL><INDENT>status = IARC_codes[IARC_data.at[CASRN, '<STR_LIT>']]<EOL><DEDENT>else:<EOL><INDENT>status = UNLISTED<EOL><DEDENT><DEDENT>elif Method == NTP:<EOL><INDENT>if CASRN in NTP_data.index:<EOL><INDENT>status = NTP_codes[NTP_data.at[CASRN, '<STR_LIT>']]<EOL><DEDENT>else:<EOL><INDENT>status = UNLISTED<EOL><DEDENT><DEDENT>elif Method == COMBINED:<EOL><INDENT>status = {}<EOL>for method in methods[<NUM_LIT:1>:]:<EOL><INDENT>status[method] = Carcinogen(CASRN, Method=method)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return status<EOL>", "docstring": "r'''Looks up if a chemical is listed as a carcinogen or not according to\n    either a specifc method or with all methods.\n\n    Returns either the status as a string for a specified method, or the\n    status of the chemical in all available data sources, in the format\n    {source: status}.\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    status : str or dict\n        Carcinogen status information [-]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain carcinogen status with the\n        given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        A string for the method name to use, as defined by constants in\n        Carcinogen_methods\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        if a chemical is listed as carcinogenic, and will return methods\n        instead of the status\n\n    Notes\n    -----\n    Supported methods are:\n\n        * **IARC**: International Agency for Research on Cancer, [1]_. As\n          extracted with a last update of  February 22, 2016. Has listing\n          information of 843 chemicals with CAS numbers. Chemicals without\n          CAS numbers not included here. If two listings for the same CAS\n          were available, that closest to the CAS number was used. If two\n          listings were available published at different times, the latest\n          value was used. All else equal, the most pessimistic value was used.\n        * **NTP**: National Toxicology Program, [2]_. Has data on 226\n          chemicals.\n\n    Examples\n    --------\n    >>> Carcinogen('61-82-5')\n    {'National Toxicology Program 13th Report on Carcinogens': 'Reasonably Anticipated', 'International Agency for Research on Cancer': 'Not classifiable as to its carcinogenicity to humans (3)'}\n\n    References\n    ----------\n    .. [1] International Agency for Research on Cancer. Agents Classified by\n       the IARC Monographs, Volumes 1-115. Lyon, France: IARC; 2016 Available\n       from: http://monographs.iarc.fr/ENG/Classification/\n    .. [2] NTP (National Toxicology Program). 2014. Report on Carcinogens,\n       Thirteenth Edition. Research Triangle Park, NC: U.S. Department of\n       Health and Human Services, Public Health Service.\n       http://ntp.niehs.nih.gov/pubhealth/roc/roc13/", "id": "f15791:m7"}
{"signature": "def Ceiling(CASRN, AvailableMethods=False, Method=None):  ", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in _OntarioExposureLimits and (_OntarioExposureLimits[CASRN][\"<STR_LIT>\"] or _OntarioExposureLimits[CASRN][\"<STR_LIT>\"]):<EOL><INDENT>methods.append(ONTARIO)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == ONTARIO:<EOL><INDENT>if _OntarioExposureLimits[CASRN][\"<STR_LIT>\"]:<EOL><INDENT>_Ceiling = (_OntarioExposureLimits[CASRN][\"<STR_LIT>\"], '<STR_LIT>')<EOL><DEDENT>elif _OntarioExposureLimits[CASRN][\"<STR_LIT>\"]:<EOL><INDENT>_Ceiling = (_OntarioExposureLimits[CASRN][\"<STR_LIT>\"], '<STR_LIT>')<EOL><DEDENT><DEDENT>elif Method == NONE:<EOL><INDENT>_Ceiling = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _Ceiling<EOL>", "docstring": "This function handles the retrieval of Ceiling limits on worker\n    exposure to dangerous chemicals.\n\n    This API is considered experimental, and is expected to be removed in a\n    future release in favor of a more complete object-oriented interface.\n\n    >>> Ceiling('75-07-0')\n    (25.0, 'ppm')\n    >>> Ceiling('1395-21-7')\n    (6e-05, 'mg/m^3')\n    >>> Ceiling('7572-29-4', AvailableMethods=True)\n    ['Ontario Limits', 'None']", "id": "f15791:m5"}
{"signature": "def Tautoignition(CASRN, AvailableMethods=False, Method=None):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in IEC_2010.index and not np.isnan(IEC_2010.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(IEC)<EOL><DEDENT>if CASRN in NFPA_2008.index and not np.isnan(NFPA_2008.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(NFPA)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == IEC:<EOL><INDENT>return float(IEC_2010.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == NFPA:<EOL><INDENT>return float(NFPA_2008.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''This function handles the retrieval or calculation of a chemical's\n    autoifnition temperature. Lookup is based on CASRNs. No predictive methods\n    are currently implemented. Will automatically select a data source to use\n    if no Method is provided; returns None if the data is not available.\n\n    Prefered source is 'IEC 60079-20-1 (2010)' [1]_, with the secondary source\n    'NFPA 497 (2008)' [2]_ having very similar data.\n\n    Examples\n    --------\n    >>> Tautoignition(CASRN='71-43-2')\n    771.15\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    Tautoignition : float\n        Autoignition point of the chemical, [K]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain Tautoignition with the\n        given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        A string for the method name to use, as defined by constants in\n        Tautoignition_methods\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        Tautoignition for the desired chemical, and will return methods\n        instead of Tautoignition\n\n    Notes\n    -----\n\n    References\n    ----------\n    .. [1] IEC. \u201cIEC 60079-20-1:2010 Explosive atmospheres - Part 20-1:\n       Material characteristics for gas and vapour classification - Test\n       methods and data.\u201d https://webstore.iec.ch/publication/635. See also\n       https://law.resource.org/pub/in/bis/S05/is.iec.60079.20.1.2010.pdf\n    .. [2] National Fire Protection Association. NFPA 497: Recommended\n       Practice for the Classification of Flammable Liquids, Gases, or Vapors\n       and of Hazardous. NFPA, 2008.", "id": "f15791:m9"}
{"signature": "def Crowl_Louvar_UFL(atoms):", "body": "nC, nH, nO = <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0><EOL>if '<STR_LIT:C>' in atoms and atoms['<STR_LIT:C>']:<EOL><INDENT>nC = atoms['<STR_LIT:C>']<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>if '<STR_LIT:H>' in atoms:<EOL><INDENT>nH = atoms['<STR_LIT:H>']<EOL><DEDENT>if '<STR_LIT:O>' in atoms:<EOL><INDENT>nO = atoms['<STR_LIT:O>']<EOL><DEDENT>return <NUM_LIT>/(<NUM_LIT>*nC + <NUM_LIT>*nH - <NUM_LIT>*nO + <NUM_LIT:1.>)<EOL>", "docstring": "r'''Calculates upper flammability limit, using the Crowl-Louvar [1]_\n    correlation. Uses molecular formula only.\n\n    The upper flammability limit of a gas is air is:\n\n    .. math::\n        C_mH_xO_y + zO_2 \\to mCO_2 + \\frac{x}{2}H_2O\n\n        \\text{UFL} = \\frac{3.5}{4.76m + 1.19x - 2.38y + 1}\n\n    Parameters\n    ----------\n    atoms : dict\n        Dictionary of atoms and atom counts\n\n    Returns\n    -------\n    UFL : float\n        Upper flammability limit, mole fraction\n\n    Notes\n    -----\n    Coefficient of 3.5 taken from [2]_\n\n    Examples\n    --------\n    Hexane, example from [1]_, lit. 7.5 %\n\n    >>> Crowl_Louvar_UFL({'H': 14, 'C': 6})\n    0.07572479446127219\n\n    References\n    ----------\n    .. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:\n       Fundamentals with Applications. 2E. Upper Saddle River, N.J:\n       Prentice Hall, 2001.\n    .. [2] Jones, G. W. \"Inflammation Limits and Their Practical Application\n       in Hazardous Industrial Operations.\" Chemical Reviews 22, no. 1\n       (February 1, 1938): 1-26. doi:10.1021/cr60071a001", "id": "f15791:m18"}
{"signature": "def mgm3_to_ppmv(mgm3, MW, T=<NUM_LIT>, P=<NUM_LIT>):", "body": "n = mgm3/MW/<NUM_LIT><EOL>parts = n*R*T/P<EOL>ppm = parts/<NUM_LIT><EOL>return ppm<EOL>", "docstring": "r'''Converts a concentration in  mg/m^3 to units of ppmv. Used in\n    industrial toxicology.\n\n    .. math::\n        ppmv = \\frac{1000RT}{MW\\cdot P} \\cdot \\frac{mg}{m^3}\n\n    Parameters\n    ----------\n    mgm3 : float\n        Concentration of a substance in an ideal gas mixture [mg/m^3]\n    MW : float\n        Molecular weight of the trace gas [g/mol]\n    T : float, optional\n        Temperature of the gas at which the ppmv is reported\n    P : float, optional\n        Pressure of the gas at which the ppmv is reported\n\n    Returns\n    -------\n    ppmv : float\n        Concentration of a component in a gas mixure [parts per million,\n        volumetric]\n\n    Notes\n    -----\n    The term P/(RT)/1000 converts to 0.040874 at STP. Its inverse is reported\n    as 24.45 in [1]_.\n\n    Examples\n    --------\n    >>> mgm3_to_ppmv(1.635, 40)\n    1.0000230371625833\n\n    References\n    ----------\n    .. [1] ACGIH. Industrial Ventilation: A Manual of Recommended Practice,\n       23rd Edition. American Conference of Governmental and Industrial\n       Hygenists, 2004.", "id": "f15791:m1"}
{"signature": "def TWA(CASRN, AvailableMethods=False, Method=None):  ", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in _OntarioExposureLimits and (_OntarioExposureLimits[CASRN][\"<STR_LIT>\"] or _OntarioExposureLimits[CASRN][\"<STR_LIT>\"]):<EOL><INDENT>methods.append(ONTARIO)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == ONTARIO:<EOL><INDENT>if _OntarioExposureLimits[CASRN][\"<STR_LIT>\"]:<EOL><INDENT>_TWA = (_OntarioExposureLimits[CASRN][\"<STR_LIT>\"], '<STR_LIT>')<EOL><DEDENT>elif _OntarioExposureLimits[CASRN][\"<STR_LIT>\"]:<EOL><INDENT>_TWA = (_OntarioExposureLimits[CASRN][\"<STR_LIT>\"], '<STR_LIT>')<EOL><DEDENT><DEDENT>elif Method == NONE:<EOL><INDENT>_TWA = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _TWA<EOL>", "docstring": "This function handles the retrieval of Time-Weighted Average limits on worker\n    exposure to dangerous chemicals.\n\n    This API is considered experimental, and is expected to be removed in a\n    future release in favor of a more complete object-oriented interface.\n\n    >>> TWA('98-00-0')\n    (10.0, 'ppm')\n    >>> TWA('1303-00-0')\n    (5.0742430905659505e-05, 'ppm')\n    >>> TWA('7782-42-5', AvailableMethods=True)\n    ['Ontario Limits', 'None']", "id": "f15791:m3"}
{"signature": "def LFL_mixture(ys=None, LFLs=None, CASRNs=None, AvailableMethods=False,<EOL>Method=None):  ", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRNs:<EOL><INDENT>CASRNs2 = list(CASRNs)<EOL>LFLs2 = list(LFLs)<EOL>for i in inerts:<EOL><INDENT>if i in CASRNs2:<EOL><INDENT>ind = CASRNs.index(i)<EOL>CASRNs2.remove(i)<EOL>LFLs2.remove(LFLs[ind])<EOL><DEDENT><DEDENT>if none_and_length_check([LFLs2]):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if none_and_length_check([LFLs]):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT><DEDENT>methods.append('<STR_LIT:None>')<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL>", "docstring": "Inert gases are ignored.\n\n    This API is considered experimental, and is expected to be removed in a\n    future release in favor of a more complete object-oriented interface.\n\n    >>> LFL_mixture(ys=normalize([0.0024, 0.0061, 0.0015]), LFLs=[.012, .053, .031])\n    0.02751172136637643\n    >>> LFL_mixture(LFLs=[None, None, None, None, None, None, None, None, None, None, None, None, None, None, 0.025, 0.06, 0.073, 0.020039, 0.011316], ys=[0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.10, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05], CASRNs=['7440-37-1', '124-38-9', '7440-59-7', '7440-01-9', '7727-37-9', '7440-63-3', '10102-43-9', '7782-44-7', '132259-10-0', '7439-90-9', '10043-92-2', '7732-18-5', '7782-50-5', '7782-41-4', '67-64-1', '67-56-1', '75-52-5', '590-19-2', '277-10-1'])\n    0.023964903630937385", "id": "f15791:m13"}
{"signature": "def ppmv_to_mgm3(ppmv, MW, T=<NUM_LIT>, P=<NUM_LIT>):", "body": "parts = ppmv*<NUM_LIT><EOL>n = parts*P/(R*T)<EOL>mgm3 = MW*n*<NUM_LIT:1000>  <EOL>return mgm3<EOL>", "docstring": "r'''Converts a concentration in ppmv to units of mg/m^3. Used in\n    industrial toxicology.\n\n    .. math::\n        \\frac{mg}{m^3} = \\frac{ppmv\\cdot P}{RT}\\cdot \\frac{MW}{1000}\n\n    Parameters\n    ----------\n    ppmv : float\n        Concentratoin of a component in a gas mixure [parts per million,\n        volumetric]\n    MW : float\n        Molecular weight of the trace gas [g/mol]\n    T : float, optional\n        Temperature of the gas at which the ppmv is reported\n    P : float, optional\n        Pressure of the gas at which the ppmv is reported\n\n    Returns\n    -------\n    mgm3 : float\n        Concentration of a substance in an ideal gas mixture [mg/m^3]\n\n    Notes\n    -----\n    The term P/(RT)/1000 converts to 0.040874 at STP. Its inverse is reported\n    as 24.45 in [1]_.\n\n    Examples\n    --------\n    >>> ppmv_to_mgm3(1, 40)\n    1.6349623351068687\n\n    References\n    ----------\n    .. [1] ACGIH. Industrial Ventilation: A Manual of Recommended Practice,\n       23rd Edition. American Conference of Governmental and Industrial\n       Hygenists, 2004.", "id": "f15791:m0"}
{"signature": "def Suzuki_UFL(Hc=None):", "body": "Hc = Hc/<NUM_LIT><EOL>UFL = <NUM_LIT>*Hc + <NUM_LIT>*Hc*Hc + <NUM_LIT><EOL>return UFL/<NUM_LIT><EOL>", "docstring": "r'''Calculates upper flammability limit, using the Suzuki [1]_ correlation.\n    Uses heat of combustion only.\n\n    The upper flammability limit of a gas is air is:\n\n    .. math::\n        \\text{UFL} = 6.3\\Delta H_c^\\circ + 0.567\\Delta H_c^{\\circ 2} + 23.5\n\n    Parameters\n    ----------\n    Hc : float\n        Heat of combustion of gas [J/mol]\n\n    Returns\n    -------\n    UFL : float\n        Upper flammability limit, mole fraction\n\n    Notes\n    -----\n    UFL in percent volume in air according to original equation.\n    Hc is at standard conditions in the equation, in units of MJ/mol.\n    AAPD = 1.2% for 95 compounds used in fit.\n    Somewhat better results than the High and Danner method.\n    4.9% < UFL < 23.0%\n    -890.3 kJ/mol < dHc < -6380 kJ/mol\n    r^2 = 0.989\n    Sample calculations provided for all chemicals, both this method and\n    High and Danner. Examples are from the article.\n\n    Predicts a UFL of 1 at 7320190 J and a UFL of 0 at -5554160 J.\n\n    Examples\n    --------\n    Pentane, literature 7.8% UFL\n\n    >>> Suzuki_UFL(-3536600)\n    0.0831119493052\n\n    References\n    ----------\n    .. [1] Suzuki, Takahiro, and Kozo Koide. \"Short Communication: Correlation\n       between Upper Flammability Limits and Thermochemical Properties of\n       Organic Compounds.\" Fire and Materials 18, no. 6 (November 1, 1994):\n       393-97. doi:10.1002/fam.810180608.", "id": "f15791:m16"}
{"signature": "def Tflash(CASRN, AvailableMethods=False, Method=None):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in IEC_2010.index and not np.isnan(IEC_2010.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(IEC)<EOL><DEDENT>if CASRN in NFPA_2008.index and not np.isnan(NFPA_2008.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(NFPA)<EOL><DEDENT>if CASRN in DIPPR_SERAT.index:<EOL><INDENT>methods.append(SERAT)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == IEC:<EOL><INDENT>return float(IEC_2010.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == NFPA:<EOL><INDENT>return float(NFPA_2008.at[CASRN, \"<STR_LIT>\"])<EOL><DEDENT>elif Method == SERAT:<EOL><INDENT>return float(DIPPR_SERAT.at[CASRN, \"<STR_LIT>\"])<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''This function handles the retrieval or calculation of a chemical's\n    flash point. Lookup is based on CASRNs. No predictive methods are currently\n    implemented. Will automatically select a data source to use if no Method\n    is provided; returns None if the data is not available.\n\n    Prefered source is 'IEC 60079-20-1 (2010)' [1]_, with the secondary source\n    'NFPA 497 (2008)' [2]_ having very similar data. A third source \n    'Serat DIPPR (2017)' [3]_ provides third hand experimental but evaluated \n    data from the DIPPR database, version unspecified, for 870 compounds.\n\n    Examples\n    --------\n    >>> Tflash(CASRN='64-17-5')\n    285.15\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    Tflash : float\n        Flash point of the chemical, [K]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain Tflash with the given\n        inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        A string for the method name to use, as defined by constants in\n        Tflash_methods\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        Tflash for the desired chemical, and will return methods instead of\n        Tflash\n\n    Notes\n    -----\n    The predicted values from the DIPPR databank are also available in the\n    supporting material in [3]_, but are not included.\n\n    References\n    ----------\n    .. [1] IEC. \"IEC 60079-20-1:2010 Explosive atmospheres - Part 20-1:\n       Material characteristics for gas and vapour classification - Test\n       methods and data.\" https://webstore.iec.ch/publication/635. See also\n       https://law.resource.org/pub/in/bis/S05/is.iec.60079.20.1.2010.pdf\n    .. [2] National Fire Protection Association. NFPA 497: Recommended\n       Practice for the Classification of Flammable Liquids, Gases, or Vapors\n       and of Hazardous. NFPA, 2008.\n    .. [3] Serat, Fatima Zohra, Ali Mustapha Benkouider, Ahmed Yahiaoui, and \n       Farid Bagui. \"Nonlinear Group Contribution Model for the Prediction of \n       Flash Points Using Normal Boiling Points.\" Fluid Phase Equilibria 449 \n       (October 15, 2017): 52-59. doi:10.1016/j.fluid.2017.06.008.", "id": "f15791:m8"}
{"signature": "def Crowl_Louvar_LFL(atoms):", "body": "nC, nH, nO = <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0><EOL>if '<STR_LIT:C>' in atoms and atoms['<STR_LIT:C>']:<EOL><INDENT>nC = atoms['<STR_LIT:C>']<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>if '<STR_LIT:H>' in atoms:<EOL><INDENT>nH = atoms['<STR_LIT:H>']<EOL><DEDENT>if '<STR_LIT:O>' in atoms:<EOL><INDENT>nO = atoms['<STR_LIT:O>']<EOL><DEDENT>return <NUM_LIT>/(<NUM_LIT>*nC + <NUM_LIT>*nH - <NUM_LIT>*nO + <NUM_LIT:1.>)<EOL>", "docstring": "r'''Calculates lower flammability limit, using the Crowl-Louvar [1]_\n    correlation. Uses molecular formula only.\n\n    The lower flammability limit of a gas is air is:\n\n    .. math::\n        C_mH_xO_y + zO_2 \\to mCO_2 + \\frac{x}{2}H_2O\n\n        \\text{LFL} = \\frac{0.55}{4.76m + 1.19x - 2.38y + 1}\n\n    Parameters\n    ----------\n    atoms : dict\n        Dictionary of atoms and atom counts\n\n    Returns\n    -------\n    LFL : float\n        Lower flammability limit, mole fraction\n\n    Notes\n    -----\n    Coefficient of 0.55 taken from [2]_\n\n    Examples\n    --------\n    Hexane, example from [1]_, lit. 1.2 %\n\n    >>> Crowl_Louvar_LFL({'H': 14, 'C': 6})\n    0.011899610558199915\n\n    References\n    ----------\n    .. [1] Crowl, Daniel A., and Joseph F. Louvar. Chemical Process Safety:\n       Fundamentals with Applications. 2E. Upper Saddle River, N.J:\n       Prentice Hall, 2001.\n    .. [2] Jones, G. W. \"Inflammation Limits and Their Practical Application\n       in Hazardous Industrial Operations.\" Chemical Reviews 22, no. 1\n       (February 1, 1938): 1-26. doi:10.1021/cr60071a001", "id": "f15791:m17"}
{"signature": "def Skin(CASRN, AvailableMethods=False, Method=None):  ", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in _OntarioExposureLimits:<EOL><INDENT>methods.append(ONTARIO)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == ONTARIO:<EOL><INDENT>_Skin = (_OntarioExposureLimits[CASRN][\"<STR_LIT>\"])<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>_Skin = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _Skin<EOL>", "docstring": "This function handles the retrieval of whether or not a chemical can\n    be absorbed through the skin, relevant to chemical safety calculations.\n\n    This API is considered experimental, and is expected to be removed in a\n    future release in favor of a more complete object-oriented interface.\n\n    >>> Skin('108-94-1')\n    True\n    >>> Skin('1395-21-7')\n    False\n    >>> Skin('7572-29-4', AvailableMethods=True)\n    ['Ontario Limits', 'None']", "id": "f15791:m6"}
{"signature": "def calculate(self, T, P, zs, ws, method):", "body": "if method == SIMPLE:<EOL><INDENT>Cpgms = [i(T) for i in self.HeatCapacityGases]<EOL>return mixing_simple(zs, Cpgms)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Method to calculate heat capacity of a gas mixture at \n        temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n        `ws` with a given method.\n\n        This method has no exception handling; see `mixture_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the property, [K]\n        P : float\n            Pressure at which to calculate the property, [Pa]\n        zs : list[float]\n            Mole fractions of all species in the mixture, [-]\n        ws : list[float]\n            Weight fractions of all species in the mixture, [-]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        Cpgm : float\n            Molar heat capacity of the gas mixture at the given conditions,\n            [J/mol]", "id": "f15792:c7:m2"}
{"signature": "def calculate_integral_over_T(self, T1, T2):", "body": "return (Zabransky_quasi_polynomial_integral_over_T(T2, self.Tc, *self.coeffs)<EOL>- Zabransky_quasi_polynomial_integral_over_T(T1, self.Tc, *self.coeffs))<EOL>", "docstring": "r'''Method to compute the entropy integral of heat capacity from \n         `T1` to `T2`.\n\n        Parameters\n        ----------\n        T1 : float\n            Initial temperature, [K]\n        T2 : float\n            Final temperature, [K]\n\n        Returns\n        -------\n        dS : float\n            Entropy difference between `T1` and `T2`, [J/mol/K]", "id": "f15792:c1:m3"}
{"signature": "def TRCCp(T, a0, a1, a2, a3, a4, a5, a6, a7):", "body": "if T <= a7:<EOL><INDENT>y = <NUM_LIT:0.><EOL><DEDENT>else:<EOL><INDENT>y = (T - a7)/(T + a6)<EOL><DEDENT>Cp = R*(a0 + (a1/T**<NUM_LIT:2>)*exp(-a2/T) + a3*y**<NUM_LIT:2> + (a4 - a5/(T-a7)**<NUM_LIT:2> )*y**<NUM_LIT>)<EOL>return Cp<EOL>", "docstring": "r'''Calculates ideal gas heat capacity using the model developed in [1]_.\n\n    The ideal gas heat capacity is given by:\n\n    .. math::\n        C_p = R\\left(a_0 + (a_1/T^2) \\exp(-a_2/T) + a_3 y^2\n        + (a_4 - a_5/(T-a_7)^2 )y^j \\right)\n\n        y = \\frac{T-a_7}{T+a_6} \\text{ for } T > a_7 \\text{ otherwise } 0\n\n    Parameters\n    ----------\n    T : float\n        Temperature [K]\n    a1-a7 : float\n        Coefficients\n\n    Returns\n    -------\n    Cp : float\n        Ideal gas heat capacity , [J/mol/K]\n\n    Notes\n    -----\n    j is set to 8. Analytical integrals are available for this expression.\n\n    Examples\n    --------\n    >>> TRCCp(300, 4.0, 7.65E5, 720., 3.565, -0.052, -1.55E6, 52., 201.)\n    42.06525682312236\n\n    References\n    ----------\n    .. [1] Kabo, G. J., and G. N. Roganov. Thermodynamics of Organic Compounds\n       in the Gas State, Volume II: V. 2. College Station, Tex: CRC Press, 1994.", "id": "f15792:m3"}
{"signature": "def load_all_methods(self):", "body": "methods = []<EOL>Tmins, Tmaxs = [], []<EOL>if self.CASRN in zabransky_dict_const_s:<EOL><INDENT>methods.append(ZABRANSKY_SPLINE)<EOL>self.Zabransky_spline = zabransky_dict_const_s[self.CASRN]<EOL><DEDENT>if self.CASRN in zabransky_dict_const_p:<EOL><INDENT>methods.append(ZABRANSKY_QUASIPOLYNOMIAL)<EOL>self.Zabransky_quasipolynomial = zabransky_dict_const_p[self.CASRN]<EOL><DEDENT>if self.CASRN in zabransky_dict_iso_s:<EOL><INDENT>methods.append(ZABRANSKY_SPLINE_C)<EOL>self.Zabransky_spline_iso = zabransky_dict_iso_s[self.CASRN]<EOL><DEDENT>if self.CASRN in zabransky_dict_iso_p:<EOL><INDENT>methods.append(ZABRANSKY_QUASIPOLYNOMIAL_C)<EOL>self.Zabransky_quasipolynomial_iso = zabransky_dict_iso_p[self.CASRN]<EOL><DEDENT>if self.CASRN in Poling_data.index and not np.isnan(Poling_data.at[self.CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(POLING_CONST)<EOL>self.POLING_T = <NUM_LIT><EOL>self.POLING_constant = float(Poling_data.at[self.CASRN, '<STR_LIT>'])<EOL><DEDENT>if self.CASRN in CRC_standard_data.index and not np.isnan(CRC_standard_data.at[self.CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(CRCSTD)<EOL>self.CRCSTD_T = <NUM_LIT><EOL>self.CRCSTD_constant = float(CRC_standard_data.at[self.CASRN, '<STR_LIT>'])<EOL><DEDENT>if self.CASRN in zabransky_dict_sat_s:<EOL><INDENT>methods.append(ZABRANSKY_SPLINE_SAT)<EOL>self.Zabransky_spline_sat = zabransky_dict_sat_s[self.CASRN]<EOL><DEDENT>if self.CASRN in zabransky_dict_sat_p:<EOL><INDENT>methods.append(ZABRANSKY_QUASIPOLYNOMIAL_SAT)<EOL>self.Zabransky_quasipolynomial_sat = zabransky_dict_sat_p[self.CASRN]<EOL><DEDENT>if self.CASRN in _VDISaturationDict:<EOL><INDENT>methods.append(VDI_TABULAR)<EOL>Ts, props = VDI_tabular_data(self.CASRN, '<STR_LIT>')<EOL>self.VDI_Tmin = Ts[<NUM_LIT:0>]<EOL>self.VDI_Tmax = Ts[-<NUM_LIT:1>]<EOL>self.tabular_data[VDI_TABULAR] = (Ts, props)<EOL>Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)<EOL><DEDENT>if self.Tc and self.omega:<EOL><INDENT>methods.extend([ROWLINSON_POLING, ROWLINSON_BONDI])<EOL><DEDENT>if has_CoolProp and self.CASRN in coolprop_dict:<EOL><INDENT>methods.append(COOLPROP)<EOL>self.CP_f = coolprop_fluids[self.CASRN]<EOL>Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc)<EOL><DEDENT>if self.MW and self.similarity_variable:<EOL><INDENT>methods.append(DADGOSTAR_SHAW)<EOL><DEDENT>self.all_methods = set(methods)<EOL>if Tmins and Tmaxs:<EOL><INDENT>self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method which picks out coefficients for the specified chemical\n        from the various dictionaries and DataFrames storing it. All data is\n        stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,\n        and :obj:`all_methods` as a set of methods for which the data exists for.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15792:c3:m1"}
{"signature": "def calculate(self, T, method):", "body": "if method == ZABRANSKY_SPLINE:<EOL><INDENT>return self.Zabransky_spline.calculate(T)<EOL><DEDENT>elif method == ZABRANSKY_QUASIPOLYNOMIAL:<EOL><INDENT>return self.Zabransky_quasipolynomial.calculate(T)<EOL><DEDENT>elif method == ZABRANSKY_SPLINE_C:<EOL><INDENT>return self.Zabransky_spline_iso.calculate(T)<EOL><DEDENT>elif method == ZABRANSKY_QUASIPOLYNOMIAL_C:<EOL><INDENT>return self.Zabransky_quasipolynomial_iso.calculate(T)<EOL><DEDENT>elif method == ZABRANSKY_SPLINE_SAT:<EOL><INDENT>return self.Zabransky_spline_sat.calculate(T)<EOL><DEDENT>elif method == ZABRANSKY_QUASIPOLYNOMIAL_SAT:<EOL><INDENT>return self.Zabransky_quasipolynomial_sat.calculate(T)<EOL><DEDENT>elif method == COOLPROP:<EOL><INDENT>return CoolProp_T_dependent_property(T, self.CASRN , '<STR_LIT>', '<STR_LIT:l>')<EOL><DEDENT>elif method == POLING_CONST:<EOL><INDENT>return self.POLING_constant<EOL><DEDENT>elif method == CRCSTD:<EOL><INDENT>return self.CRCSTD_constant<EOL><DEDENT>elif method == ROWLINSON_POLING:<EOL><INDENT>Cpgm = self.Cpgm(T) if hasattr(self.Cpgm, '<STR_LIT>') else self.Cpgm<EOL>return Rowlinson_Poling(T, self.Tc, self.omega, Cpgm)<EOL><DEDENT>elif method == ROWLINSON_BONDI:<EOL><INDENT>Cpgm = self.Cpgm(T) if hasattr(self.Cpgm, '<STR_LIT>') else self.Cpgm<EOL>return Rowlinson_Bondi(T, self.Tc, self.omega, Cpgm)<EOL><DEDENT>elif method == DADGOSTAR_SHAW:<EOL><INDENT>Cp = Dadgostar_Shaw(T, self.similarity_variable)<EOL>return property_mass_to_molar(Cp, self.MW)<EOL><DEDENT>elif method in self.tabular_data:<EOL><INDENT>return self.interpolate(T, method)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Method to calculate heat capacity of a liquid at temperature `T`\n        with a given method.\n\n        This method has no exception handling; see `T_dependent_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate heat capacity, [K]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        Cp : float\n            Heat capacity of the liquid at T, [J/mol/K]", "id": "f15792:c3:m2"}
{"signature": "def Zabransky_cubic_integral_over_T(T, a1, a2, a3, a4):", "body": "T = T/<NUM_LIT><EOL>return R*(T*(T*(T*a4/<NUM_LIT:3> + a3/<NUM_LIT:2>) + a2) + a1*log(T))<EOL>", "docstring": "r'''Calculates the integral of liquid heat capacity over T using the model \n    developed in [1]_.\n\n    Parameters\n    ----------\n    T : float\n        Temperature [K]\n    a1-a4 : float\n        Coefficients\n\n    Returns\n    -------\n    S : float\n        Difference in entropy from 0 K, [J/mol/K]\n\n    Notes\n    -----\n    The analytical integral was derived with Sympy; it is a simple polynomial,\n    plus a logarithm\n\n    Examples\n    --------\n    >>> Zabransky_cubic_integral_over_T(298.15, 20.9634, -10.1344, 2.8253, \n    ... -0.256738)\n    24.73245695987246\n\n    References\n    ----------\n    .. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski.\n       Heat Capacity of Liquids: Critical Review and Recommended Values.\n       2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996.", "id": "f15792:m16"}
{"signature": "def calculate_integral(self, T1, T2):", "body": "<EOL>if T2 < T1:<EOL><INDENT>flipped = True<EOL>T1, T2 = T2, T1<EOL><DEDENT>else:<EOL><INDENT>flipped = False<EOL><DEDENT>if self.n == <NUM_LIT:1>:<EOL><INDENT>dH = (Zabransky_cubic_integral(T2, *self.coeff_sets[<NUM_LIT:0>])<EOL>- Zabransky_cubic_integral(T1, *self.coeff_sets[<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>ind_T1, ind_T2 = self._coeff_ind_from_T(T1), self._coeff_ind_from_T(T2)<EOL>if ind_T1 == ind_T2:<EOL><INDENT>dH = (Zabransky_cubic_integral(T2, *self.coeff_sets[ind_T2])<EOL>- Zabransky_cubic_integral(T1, *self.coeff_sets[ind_T1]))<EOL><DEDENT>else:<EOL><INDENT>dH = (Zabransky_cubic_integral(self.Ts[ind_T1], *self.coeff_sets[ind_T1])<EOL>- Zabransky_cubic_integral(T1, *self.coeff_sets[ind_T1]))<EOL>for i in range(ind_T1, ind_T2):<EOL><INDENT>diff =(Zabransky_cubic_integral(self.Ts[i+<NUM_LIT:1>], *self.coeff_sets[i])<EOL>- Zabransky_cubic_integral(self.Ts[i], *self.coeff_sets[i]))<EOL>dH += diff<EOL><DEDENT>end = (Zabransky_cubic_integral(T2, *self.coeff_sets[ind_T2])<EOL>- Zabransky_cubic_integral(self.Ts[ind_T2], *self.coeff_sets[ind_T2]))<EOL>dH += end<EOL><DEDENT><DEDENT>return -dH if flipped else dH<EOL>", "docstring": "r'''Method to compute the enthalpy integral of heat capacity from \n        `T1` to `T2`. Analytically integrates across the piecewise spline\n        as necessary.\n\n        Parameters\n        ----------\n        T1 : float\n            Initial temperature, [K]\n        T2 : float\n            Final temperature, [K]\n\n        Returns\n        -------\n        dS : float\n            Enthalpy difference between `T1` and `T2`, [J/mol/K]", "id": "f15792:c2:m4"}
{"signature": "def load_all_methods(self):", "body": "methods = []<EOL>Tmins, Tmaxs = [], []<EOL>if self.CASRN in TRC_gas_data.index:<EOL><INDENT>methods.append(TRCIG)<EOL>_, self.TRCIG_Tmin, self.TRCIG_Tmax, a0, a1, a2, a3, a4, a5, a6, a7, _, _, _ = _TRC_gas_data_values[TRC_gas_data.index.get_loc(self.CASRN)].tolist()<EOL>self.TRCIG_coefs = [a0, a1, a2, a3, a4, a5, a6, a7]<EOL>Tmins.append(self.TRCIG_Tmin); Tmaxs.append(self.TRCIG_Tmax)<EOL><DEDENT>if self.CASRN in Poling_data.index and not np.isnan(Poling_data.at[self.CASRN, '<STR_LIT>']):<EOL><INDENT>_, self.POLING_Tmin, self.POLING_Tmax, a0, a1, a2, a3, a4, Cpg, Cpl = _Poling_data_values[Poling_data.index.get_loc(self.CASRN)].tolist()<EOL>methods.append(POLING)<EOL>self.POLING_coefs = [a0, a1, a2, a3, a4]<EOL>Tmins.append(self.POLING_Tmin); Tmaxs.append(self.POLING_Tmax)<EOL><DEDENT>if self.CASRN in Poling_data.index and not np.isnan(Poling_data.at[self.CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(POLING_CONST)<EOL>self.POLING_T = <NUM_LIT><EOL>self.POLING_constant = float(Poling_data.at[self.CASRN, '<STR_LIT>'])<EOL><DEDENT>if self.CASRN in CRC_standard_data.index and not np.isnan(CRC_standard_data.at[self.CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(CRCSTD)<EOL>self.CRCSTD_T = <NUM_LIT><EOL>self.CRCSTD_constant = float(CRC_standard_data.at[self.CASRN, '<STR_LIT>'])<EOL><DEDENT>if self.CASRN in _VDISaturationDict:<EOL><INDENT>methods.append(VDI_TABULAR)<EOL>Ts, props = VDI_tabular_data(self.CASRN, '<STR_LIT>')<EOL>self.VDI_Tmin = Ts[<NUM_LIT:0>]<EOL>self.VDI_Tmax = Ts[-<NUM_LIT:1>]<EOL>self.tabular_data[VDI_TABULAR] = (Ts, props)<EOL>Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)<EOL><DEDENT>if has_CoolProp and self.CASRN in coolprop_dict:<EOL><INDENT>methods.append(COOLPROP)<EOL>self.CP_f = coolprop_fluids[self.CASRN]<EOL>Tmins.append(self.CP_f.Tt); Tmaxs.append(self.CP_f.Tc)<EOL><DEDENT>if self.MW and self.similarity_variable:<EOL><INDENT>methods.append(LASTOVKA_SHAW)<EOL><DEDENT>self.all_methods = set(methods)<EOL>if Tmins and Tmaxs:<EOL><INDENT>self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method which picks out coefficients for the specified chemical\n        from the various dictionaries and DataFrames storing it. All data is\n        stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,\n        and :obj:`all_methods` as a set of methods for which the data exists for.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15792:c0:m1"}
{"signature": "def load_all_methods(self):", "body": "methods = [SIMPLE]        <EOL>self.all_methods = set(methods)<EOL>", "docstring": "r'''Method to initialize the object by precomputing any values which\n        may be used repeatedly and by retrieving mixture-specific variables.\n        All data are stored as attributes. This method also sets :obj:`Tmin`, \n        :obj:`Tmax`, and :obj:`all_methods` as a set of methods which should \n        work to calculate the property.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15792:c7:m1"}
{"signature": "def TRCCp_integral_over_T(T, a0, a1, a2, a3, a4, a5, a6, a7, J=<NUM_LIT:0>):", "body": "<EOL>if T <= a7:<EOL><INDENT>y = <NUM_LIT:0.><EOL><DEDENT>else:<EOL><INDENT>y = (T - a7)/(T + a6)<EOL><DEDENT>z = T/(T + a6)*(a7 + a6)/a7<EOL>if T <= a7:<EOL><INDENT>s = <NUM_LIT:0.><EOL><DEDENT>else:<EOL><INDENT>a72 = a7*a7<EOL>a62 = a6*a6<EOL>a7_a6 = a7/a6 <EOL>a7_a6_2 = a7_a6*a7_a6<EOL>a7_a6_4 = a7_a6_2*a7_a6_2<EOL>x1 = (a4*a72 - a5)/a62 <EOL>first = (a3 + ((a4*a72 - a5)/a62)*a7_a6_4)*a7_a6_2*log(z)<EOL>second = (a3 + a4)*log((T + a6)/(a6 + a7))<EOL>fourth = -(a3/a6*(a6 + a7) + a5*y**<NUM_LIT:6>/(<NUM_LIT>*a7*(a6 + a7)))*y<EOL>third = sum([(x1*(-a7_a6)**(<NUM_LIT:6>-i) - a4)*y**i/i for i in range(<NUM_LIT:1>, <NUM_LIT:8>)])<EOL>s = first + second + third + fourth<EOL><DEDENT>return R*(J + a0*log(T) + a1/(a2*a2)*(<NUM_LIT:1.> + a2/T)*exp(-a2/T) + s)<EOL>", "docstring": "r'''Integrates ideal gas heat capacity over T using the model developed in \n    [1]_. Best used as a delta only.\n\n    The difference in ideal-gas entropy with respect to 0 K is given by:\n\n    .. math::\n        \\frac{S^\\circ}{R} = J + a_0\\ln T + \\frac{a_1}{a_2^2}\\left(1\n        + \\frac{a_2}{T}\\right)x(a_2) + s(T)\n\n        s(T) = \\left[\\left\\{a_3 + \\left(\\frac{a_4 a_7^2 - a_5}{a_6^2}\\right)\n        \\left(\\frac{a_7}{a_6}\\right)^4\\right\\}\\left(\\frac{a_7}{a_6}\\right)^2\n        \\ln z + (a_3 + a_4)\\ln\\left(\\frac{T+a_6}{a_6+a_7}\\right)\n        +\\sum_{i=1}^7 \\left\\{\\left(\\frac{a_4 a_7^2 - a_5}{a_6^2}\\right)\\left(\n        \\frac{-a_7}{a_6}\\right)^{6-i} - a_4\\right\\}\\frac{y^i}{i}\n        - \\left\\{\\frac{a_3}{a_6}(a_6 + a_7) + \\frac{a_5 y^6}{7a_7(a_6+a_7)}\n        \\right\\}y\\right]\n\n        s(T) = 0 \\text{ for } T \\le a_7\n\n        z = \\frac{T}{T+a_6} \\cdot \\frac{a_7 + a_6}{a_7}\n\n        y = \\frac{T-a_7}{T+a_6} \\text{ for } T > a_7 \\text{ otherwise } 0\n\n    Parameters\n    ----------\n    T : float\n        Temperature [K]\n    a1-a7 : float\n        Coefficients\n    J : float, optional\n        Integral offset\n\n    Returns\n    -------\n    S-S(0) : float\n        Difference in entropy from 0 K , [J/mol/K]\n\n    Notes\n    -----\n    Analytical integral as provided in [1]_ and verified with numerical\n    integration. \n\n    Examples\n    --------\n    >>> TRCCp_integral_over_T(300, 4.0, 124000, 245, 50.539, -49.469, \n    ... 220440000, 560, 78)\n    213.80148972435018\n\n    References\n    ----------\n    .. [1] Kabo, G. J., and G. N. Roganov. Thermodynamics of Organic Compounds\n       in the Gas State, Volume II: V. 2. College Station, Tex: CRC Press, 1994.", "id": "f15792:m5"}
{"signature": "def add_coeffs(self, Tmin, Tmax, coeffs):", "body": "self.n += <NUM_LIT:1><EOL>if not self.Ts:<EOL><INDENT>self.Ts = [Tmin, Tmax]<EOL>self.coeff_sets = [coeffs]<EOL><DEDENT>else:<EOL><INDENT>for ind, T in enumerate(self.Ts):<EOL><INDENT>if Tmin < T:<EOL><INDENT>self.Ts.insert(ind, Tmin) <EOL>self.coeff_sets.insert(ind, coeffs)<EOL>return<EOL><DEDENT><DEDENT>self.Ts.append(Tmax)<EOL>self.coeff_sets.append(coeffs)<EOL><DEDENT>", "docstring": "Called internally during the parsing of the Zabransky database, to\n        add coefficients as they are read one per line", "id": "f15792:c2:m1"}
{"signature": "def Lastovka_Shaw_integral(T, similarity_variable, cyclic_aliphatic=False):", "body": "a = similarity_variable<EOL>if cyclic_aliphatic:<EOL><INDENT>A1 = -<NUM_LIT><EOL>A2 = <NUM_LIT><EOL>first = A1 + A2*a<EOL><DEDENT>else:<EOL><INDENT>A1 = <NUM_LIT><EOL>A2 = <NUM_LIT><EOL>A3 = <NUM_LIT> <EOL>A4 = <NUM_LIT><EOL>first = A2 + (A1-A2)/(<NUM_LIT:1.>+exp((a-A3)/A4)) <EOL><DEDENT>B11 = <NUM_LIT><EOL>B12 = <NUM_LIT><EOL>C11 = <NUM_LIT><EOL>C12 = <NUM_LIT><EOL>B21 = <NUM_LIT><EOL>B22 = <NUM_LIT><EOL>C21 = <NUM_LIT><EOL>C22 = <NUM_LIT><EOL>return <NUM_LIT>*(T*first - (B11 + B12*a)*(-C11 - C12*a)**<NUM_LIT:2>/(-C11 - C12*a + (C11 <EOL>+ C12*a)*exp((-C11 - C12*a)/T)) - (B21 + B22*a)*(-C21 - C22*a)**<NUM_LIT:2>/(-C21 <EOL>- C22*a + (C21 + C22*a)*exp((-C21 - C22*a)/T)))<EOL>", "docstring": "r'''Calculate the integral of ideal-gas constant-pressure heat capacitiy \n    with the similarity variable concept and method as shown in [1]_.\n\n    Parameters\n    ----------\n    T : float\n        Temperature of gas [K]\n    similarity_variable : float\n        similarity variable as defined in [1]_, [mol/g]\n\n    Returns\n    -------\n    H : float\n        Difference in enthalpy from 0 K, [J/kg]\n\n    Notes\n    -----\n    Original model is in terms of J/g/K. Note that the model is for predicting\n    mass heat capacity, not molar heat capacity like most other methods!\n    Integral was computed with SymPy.\n\n    See Also\n    --------\n    Lastovka_Shaw\n    Lastovka_Shaw_integral_over_T\n\n    Examples\n    --------\n    >>> Lastovka_Shaw_integral(300.0, 0.1333)\n    5283095.816018478\n\n    References\n    ----------\n    .. [1] Lastovka, Vaclav, and John M. Shaw. \"Predictive Correlations for\n       Ideal Gas Heat Capacities of Pure Hydrocarbons and Petroleum Fractions.\"\n       Fluid Phase Equilibria 356 (October 25, 2013): 338-370.\n       doi:10.1016/j.fluid.2013.07.023.", "id": "f15792:m1"}
{"signature": "def Dadgostar_Shaw_integral(T, similarity_variable):", "body": "a = similarity_variable<EOL>a2 = a*a<EOL>T2 = T*T<EOL>a11 = -<NUM_LIT><EOL>a12 = <NUM_LIT><EOL>a21 = <NUM_LIT><EOL>a22 = -<NUM_LIT><EOL>a31 = -<NUM_LIT><EOL>a32 = <NUM_LIT><EOL>constant = <NUM_LIT><EOL>H = T2*T/<NUM_LIT>*(a2*a32 + a*a31) + T2*<NUM_LIT:0.5>*(a2*a22 + a*a21) + T*constant*(a2*a12 + a*a11)<EOL>return H*<NUM_LIT><EOL>", "docstring": "r'''Calculate the integral of liquid constant-pressure heat capacitiy \n    with the similarity variable concept and method as shown in [1]_.\n\n    Parameters\n    ----------\n    T : float\n        Temperature of gas [K]\n    similarity_variable : float\n        similarity variable as defined in [1]_, [mol/g]\n\n    Returns\n    -------\n    H : float\n        Difference in enthalpy from 0 K, [J/kg]\n\n    Notes\n    -----\n    Original model is in terms of J/g/K. Note that the model is for predicting\n    mass heat capacity, not molar heat capacity like most other methods!\n    Integral was computed with SymPy.\n\n    See Also\n    --------\n    Dadgostar_Shaw\n    Dadgostar_Shaw_integral_over_T\n\n    Examples\n    --------\n    >>> Dadgostar_Shaw_integral(300.0, 0.1333)\n    238908.15142664989\n\n    References\n    ----------\n    .. [1] Dadgostar, Nafiseh, and John M. Shaw. \"A Predictive Correlation for\n       the Constant-Pressure Specific Heat Capacity of Pure and Ill-Defined\n       Liquid Hydrocarbons.\" Fluid Phase Equilibria 313 (January 15, 2012):\n       211-226. doi:10.1016/j.fluid.2011.09.015.", "id": "f15792:m9"}
{"signature": "def calculate_integral_over_T(self, T1, T2):", "body": "<EOL>if T2 < T1:<EOL><INDENT>flipped = True<EOL>T1, T2 = T2, T1<EOL><DEDENT>else:<EOL><INDENT>flipped = False<EOL><DEDENT>if self.n == <NUM_LIT:1>:<EOL><INDENT>dS = (Zabransky_cubic_integral_over_T(T2, *self.coeff_sets[<NUM_LIT:0>])<EOL>- Zabransky_cubic_integral_over_T(T1, *self.coeff_sets[<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>ind_T1, ind_T2 = self._coeff_ind_from_T(T1), self._coeff_ind_from_T(T2)<EOL>if ind_T1 == ind_T2:<EOL><INDENT>dS = (Zabransky_cubic_integral_over_T(T2, *self.coeff_sets[ind_T2])<EOL>- Zabransky_cubic_integral_over_T(T1, *self.coeff_sets[ind_T1]))<EOL><DEDENT>else:<EOL><INDENT>dS = (Zabransky_cubic_integral_over_T(self.Ts[ind_T1], *self.coeff_sets[ind_T1])<EOL>- Zabransky_cubic_integral_over_T(T1, *self.coeff_sets[ind_T1]))<EOL>for i in range(ind_T1, ind_T2):<EOL><INDENT>diff =(Zabransky_cubic_integral_over_T(self.Ts[i+<NUM_LIT:1>], *self.coeff_sets[i])<EOL>- Zabransky_cubic_integral_over_T(self.Ts[i], *self.coeff_sets[i]))<EOL>dS += diff<EOL><DEDENT>end = (Zabransky_cubic_integral_over_T(T2, *self.coeff_sets[ind_T2])<EOL>- Zabransky_cubic_integral_over_T(self.Ts[ind_T2], *self.coeff_sets[ind_T2]))<EOL>dS += end<EOL><DEDENT><DEDENT>return -dS if flipped else dS<EOL>", "docstring": "r'''Method to compute the entropy integral of heat capacity from \n        `T1` to `T2`. Analytically integrates across the piecewise spline\n        as necessary.\n\n        Parameters\n        ----------\n        T1 : float\n            Initial temperature, [K]\n        T2 : float\n            Final temperature, [K]\n\n        Returns\n        -------\n        dS : float\n            Entropy difference between `T1` and `T2`, [J/mol/K]", "id": "f15792:c2:m5"}
{"signature": "def Lastovka_solid_integral(T, similarity_variable):", "body": "A1 = <NUM_LIT><EOL>A2 = <NUM_LIT><EOL>theta = <NUM_LIT><EOL>C1 = <NUM_LIT><EOL>C2 = -<NUM_LIT><EOL>D1 = <NUM_LIT><EOL>D2 = -<NUM_LIT><EOL>similarity_variable2 = similarity_variable*similarity_variable<EOL>return (T*T*T*(<NUM_LIT>*D1*similarity_variable/<NUM_LIT> <EOL>+ <NUM_LIT>*D2*similarity_variable2/<NUM_LIT>) + T*T*(<NUM_LIT>*C1*similarity_variable <EOL>+ <NUM_LIT>*C2*similarity_variable2)<EOL>+ (<NUM_LIT>*A1*R*similarity_variable*theta<EOL>+ <NUM_LIT>*A2*R*similarity_variable2*theta)/(exp(theta/T) - <NUM_LIT:1.>))<EOL>", "docstring": "r'''Integrates solid constant-pressure heat capacitiy with the similarity\n    variable concept and method as shown in [1]_.\n\n    Uses a explicit form as derived with Sympy.\n\n    Parameters\n    ----------\n    T : float\n        Temperature of solid [K]\n    similarity_variable : float\n        similarity variable as defined in [1]_, [mol/g]\n\n    Returns\n    -------\n    H : float\n        Difference in enthalpy from 0 K, [J/kg]\n\n    Notes\n    -----\n    Original model is in terms of J/g/K. Note that the model is for predicting\n    mass heat capacity, not molar heat capacity like most other methods!\n\n    See Also\n    --------\n    Lastovka_solid\n\n    Examples\n    --------\n    >>> Lastovka_solid_integral(300, 0.2139)\n    283246.1242170376\n\n    References\n    ----------\n    .. [1] La\u0161tovka, V\u00e1clav, Michal Fulem, Mildred Becerra, and John M. Shaw.\n       \"A Similarity Variable for Estimating the Heat Capacity of Solid Organic\n       Compounds: Part II. Application: Heat Capacity Calculation for\n       Ill-Defined Organic Solids.\" Fluid Phase Equilibria 268, no. 1-2\n       (June 25, 2008): 134-41. doi:10.1016/j.fluid.2008.03.018.", "id": "f15792:m18"}
{"signature": "def calculate_integral(self, T1, T2):", "body": "return (Zabransky_quasi_polynomial_integral(T2, self.Tc, *self.coeffs)<EOL>- Zabransky_quasi_polynomial_integral(T1, self.Tc, *self.coeffs))<EOL>", "docstring": "r'''Method to compute the enthalpy integral of heat capacity from \n         `T1` to `T2`.\n\n        Parameters\n        ----------\n        T1 : float\n            Initial temperature, [K]\n        T2 : float\n            Final temperature, [K]\n\n        Returns\n        -------\n        dH : float\n            Enthalpy difference between `T1` and `T2`, [J/mol]", "id": "f15792:c1:m2"}
{"signature": "def calculate_integral_over_T(self, T1, T2, method):", "body": "if method == PERRY151:<EOL><INDENT>S2 = (self.PERRY151_const*log(T2) + self.PERRY151_lin*T2 <EOL>- self.PERRY151_quadinv/(<NUM_LIT>*T2**<NUM_LIT:2>) + <NUM_LIT:0.5>*self.PERRY151_quad*T2**<NUM_LIT:2>)<EOL>S1 = (self.PERRY151_const*log(T1) + self.PERRY151_lin*T1<EOL>- self.PERRY151_quadinv/(<NUM_LIT>*T1**<NUM_LIT:2>) + <NUM_LIT:0.5>*self.PERRY151_quad*T1**<NUM_LIT:2>)<EOL>return (S2 - S1)*calorie<EOL><DEDENT>elif method == CRCSTD:<EOL><INDENT>S2 = self.CRCSTD_Cp*log(T2)<EOL>S1 = self.CRCSTD_Cp*log(T1)<EOL>return (S2 - S1)<EOL><DEDENT>elif method == LASTOVKA_S:<EOL><INDENT>dS = (Lastovka_solid_integral_over_T(T2, self.similarity_variable)<EOL>- Lastovka_solid_integral_over_T(T1, self.similarity_variable))<EOL>return property_mass_to_molar(dS, self.MW)<EOL><DEDENT>elif method in self.tabular_data:<EOL><INDENT>return float(quad(lambda T: self.calculate(T, method)/T, T1, T2)[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Method to calculate the integral of a property over temperature\n        with respect to temperature, using a specified method. Implements the \n        analytical integrals of all available methods except for tabular data.\n\n        Parameters\n        ----------\n        T1 : float\n            Lower limit of integration, [K]\n        T2 : float\n            Upper limit of integration, [K]\n        method : str\n            Method for which to find the integral\n\n        Returns\n        -------\n        integral : float\n            Calculated integral of the property over the given range, \n            [`units`]", "id": "f15792:c4:m5"}
{"signature": "def calculate_integral(self, T1, T2, method):", "body": "if method == ZABRANSKY_SPLINE:<EOL><INDENT>return self.Zabransky_spline.calculate_integral(T1, T2)<EOL><DEDENT>elif method == ZABRANSKY_SPLINE_C:<EOL><INDENT>return self.Zabransky_spline_iso.calculate_integral(T1, T2)<EOL><DEDENT>elif method == ZABRANSKY_SPLINE_SAT:<EOL><INDENT>return self.Zabransky_spline_sat.calculate_integral(T1, T2)<EOL><DEDENT>elif method == ZABRANSKY_QUASIPOLYNOMIAL:<EOL><INDENT>return self.Zabransky_quasipolynomial.calculate_integral(T1, T2)<EOL><DEDENT>elif method == ZABRANSKY_QUASIPOLYNOMIAL_C:<EOL><INDENT>return self.Zabransky_quasipolynomial_iso.calculate_integral(T1, T2)<EOL><DEDENT>elif method == ZABRANSKY_QUASIPOLYNOMIAL_SAT:<EOL><INDENT>return self.Zabransky_quasipolynomial_sat.calculate_integral(T1, T2)<EOL><DEDENT>elif method == POLING_CONST:<EOL><INDENT>return (T2 - T1)*self.POLING_constant<EOL><DEDENT>elif method == CRCSTD:<EOL><INDENT>return (T2 - T1)*self.CRCSTD_constant<EOL><DEDENT>elif method == DADGOSTAR_SHAW:<EOL><INDENT>dH = (Dadgostar_Shaw_integral(T2, self.similarity_variable)<EOL>- Dadgostar_Shaw_integral(T1, self.similarity_variable))<EOL>return property_mass_to_molar(dH, self.MW)<EOL><DEDENT>elif method in self.tabular_data or method == COOLPROP or method in [ROWLINSON_POLING, ROWLINSON_BONDI]:<EOL><INDENT>return float(quad(self.calculate, T1, T2, args=(method))[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Method to calculate the integral of a property with respect to\n        temperature, using a specified method.  Implements the \n        analytical integrals of all available methods except for tabular data,\n        the case of multiple coefficient sets needed to encompass the temperature\n        range of any of the ZABRANSKY methods, and the CSP methods using the\n        vapor phase properties.\n\n        Parameters\n        ----------\n        T1 : float\n            Lower limit of integration, [K]\n        T2 : float\n            Upper limit of integration, [K]\n        method : str\n            Method for which to find the integral\n\n        Returns\n        -------\n        integral : float\n            Calculated integral of the property over the given range, \n            [`units*K`]", "id": "f15792:c3:m4"}
{"signature": "def calculate(self, T, P, zs, ws, method):", "body": "if method == SIMPLE:<EOL><INDENT>Cplms = [i(T) for i in self.HeatCapacityLiquids]<EOL>return mixing_simple(zs, Cplms)<EOL><DEDENT>elif method == LALIBERTE:<EOL><INDENT>ws = list(ws) ; ws.pop(self.index_w)<EOL>Cpl = Laliberte_heat_capacity(T, ws, self.wCASs)<EOL>MW = mixing_simple(zs, self.MWs)<EOL>return property_mass_to_molar(Cpl, MW)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Method to calculate heat capacity of a liquid mixture at \n        temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n        `ws` with a given method.\n\n        This method has no exception handling; see `mixture_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the property, [K]\n        P : float\n            Pressure at which to calculate the property, [Pa]\n        zs : list[float]\n            Mole fractions of all species in the mixture, [-]\n        ws : list[float]\n            Weight fractions of all species in the mixture, [-]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        Cplm : float\n            Molar heat capacity of the liquid mixture at the given conditions,\n            [J/mol]", "id": "f15792:c5:m2"}
{"signature": "def Rowlinson_Bondi(T, Tc, omega, Cpgm):", "body": "Tr = T/Tc<EOL>Cplm = Cpgm + R*(<NUM_LIT> + <NUM_LIT>/(<NUM_LIT:1.>-Tr) + <NUM_LIT>*omega*(<NUM_LIT><EOL>+ <NUM_LIT>*(<NUM_LIT:1>-Tr)**(<NUM_LIT:1>/<NUM_LIT>)/Tr + <NUM_LIT>/(<NUM_LIT:1.>-Tr)))<EOL>return Cplm<EOL>", "docstring": "r'''Calculate liquid constant-pressure heat capacitiy with the CSP method\n    shown in [1]_.\n\n    The heat capacity of a liquid is given by:\n\n    .. math::\n        \\frac{Cp^L - Cp^{ig}}{R} = 1.45 + 0.45(1-T_r)^{-1} + 0.25\\omega\n        [17.11 + 25.2(1-T_r)^{1/3}T_r^{-1} + 1.742(1-T_r)^{-1}]\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    omega : float\n        Acentric factor for fluid, [-]\n    Cpgm : float\n        Constant-pressure gas heat capacity, [J/mol/K]\n\n    Returns\n    -------\n    Cplm : float\n        Liquid constant-pressure heat capacitiy, [J/mol/K]\n\n    Notes\n    -----\n    Less accurate than `Rowlinson_Poling`.\n\n    Examples\n    --------\n    >>> Rowlinson_Bondi(T=373.28, Tc=535.55, omega=0.323, Cpgm=119.342)\n    175.39760730048116\n\n    References\n    ----------\n    .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.\n    .. [2] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.\n       Berlin; New York:: Springer, 2010.\n    .. [3] J.S. Rowlinson, Liquids and Liquid Mixtures, 2nd Ed.,\n       Butterworth, London (1969).", "id": "f15792:m7"}
{"signature": "def load_all_methods(self):", "body": "methods = [SIMPLE]        <EOL>if len(self.CASs) > <NUM_LIT:1> and '<STR_LIT>' in self.CASs:<EOL><INDENT>wCASs = [i for i in self.CASs if i != '<STR_LIT>'] <EOL>if all([i in _Laliberte_Heat_Capacity_ParametersDict for i in wCASs]):<EOL><INDENT>methods.append(LALIBERTE)<EOL>self.wCASs = wCASs<EOL>self.index_w = self.CASs.index('<STR_LIT>')<EOL><DEDENT><DEDENT>self.all_methods = set(methods)<EOL>", "docstring": "r'''Method to initialize the object by precomputing any values which\n        may be used repeatedly and by retrieving mixture-specific variables.\n        All data are stored as attributes. This method also sets :obj:`Tmin`, \n        :obj:`Tmax`, and :obj:`all_methods` as a set of methods which should \n        work to calculate the property.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15792:c5:m1"}
{"signature": "def Zabransky_cubic(T, a1, a2, a3, a4):", "body": "T = T/<NUM_LIT><EOL>return R*(((a4*T + a3)*T + a2)*T + a1)<EOL>", "docstring": "r'''Calculates liquid heat capacity using the model developed in [1]_.\n\n    .. math::\n        \\frac{C}{R}=\\sum_{j=0}^3 A_{j+1} \\left(\\frac{T}{100}\\right)^j\n\n    Parameters\n    ----------\n    T : float\n        Temperature [K]\n    a1-a4 : float\n        Coefficients\n\n    Returns\n    -------\n    Cp : float\n        Liquid heat capacity, [J/mol/K]\n\n    Notes\n    -----\n    Most often form used in [1]_.\n    Analytical integrals are available for this expression.\n\n    Examples\n    --------\n    >>> Zabransky_cubic(298.15, 20.9634, -10.1344, 2.8253, -0.256738)\n    75.31462591538556\n\n    References\n    ----------\n    .. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski.\n       Heat Capacity of Liquids: Critical Review and Recommended Values.\n       2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996.", "id": "f15792:m14"}
{"signature": "def calculate_integral(self, T1, T2, method):", "body": "if method == PERRY151:<EOL><INDENT>H2 = (self.PERRY151_const*T2 + <NUM_LIT:0.5>*self.PERRY151_lin*T2**<NUM_LIT:2> <EOL>- self.PERRY151_quadinv/T2 + self.PERRY151_quad*T2**<NUM_LIT:3>/<NUM_LIT>)<EOL>H1 = (self.PERRY151_const*T1 + <NUM_LIT:0.5>*self.PERRY151_lin*T1**<NUM_LIT:2> <EOL>- self.PERRY151_quadinv/T1 + self.PERRY151_quad*T1**<NUM_LIT:3>/<NUM_LIT>)<EOL>return (H2-H1)*calorie<EOL><DEDENT>elif method == CRCSTD:<EOL><INDENT>return (T2-T1)*self.CRCSTD_Cp<EOL><DEDENT>elif method == LASTOVKA_S:<EOL><INDENT>dH = (Lastovka_solid_integral(T2, self.similarity_variable)<EOL>- Lastovka_solid_integral(T1, self.similarity_variable))<EOL>return property_mass_to_molar(dH, self.MW)<EOL><DEDENT>elif method in self.tabular_data:<EOL><INDENT>return float(quad(self.calculate, T1, T2, args=(method))[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Method to calculate the integral of a property with respect to\n        temperature, using a specified method. Implements the analytical\n        integrals of all available methods except for tabular data.\n\n        Parameters\n        ----------\n        T1 : float\n            Lower limit of integration, [K]\n        T2 : float\n            Upper limit of integration, [K]\n        method : str\n            Method for which to find the integral\n\n        Returns\n        -------\n        integral : float\n            Calculated integral of the property over the given range, \n            [`units*K`]", "id": "f15792:c4:m4"}
{"signature": "def load_all_methods(self):", "body": "methods = []<EOL>Tmins, Tmaxs = [], []<EOL>if self.CASRN and self.CASRN in _PerryI and '<STR_LIT:c>' in _PerryI[self.CASRN]:<EOL><INDENT>self.PERRY151_Tmin = _PerryI[self.CASRN]['<STR_LIT:c>']['<STR_LIT>'] if _PerryI[self.CASRN]['<STR_LIT:c>']['<STR_LIT>'] else <NUM_LIT:0><EOL>self.PERRY151_Tmax = _PerryI[self.CASRN]['<STR_LIT:c>']['<STR_LIT>'] if _PerryI[self.CASRN]['<STR_LIT:c>']['<STR_LIT>'] else <NUM_LIT><EOL>self.PERRY151_const = _PerryI[self.CASRN]['<STR_LIT:c>']['<STR_LIT>']<EOL>self.PERRY151_lin = _PerryI[self.CASRN]['<STR_LIT:c>']['<STR_LIT>']<EOL>self.PERRY151_quad = _PerryI[self.CASRN]['<STR_LIT:c>']['<STR_LIT>']<EOL>self.PERRY151_quadinv = _PerryI[self.CASRN]['<STR_LIT:c>']['<STR_LIT>']<EOL>methods.append(PERRY151)<EOL>Tmins.append(self.PERRY151_Tmin); Tmaxs.append(self.PERRY151_Tmax)<EOL><DEDENT>if self.CASRN in CRC_standard_data.index and not np.isnan(CRC_standard_data.at[self.CASRN, '<STR_LIT>']):<EOL><INDENT>self.CRCSTD_Cp = float(CRC_standard_data.at[self.CASRN, '<STR_LIT>'])<EOL>methods.append(CRCSTD)<EOL><DEDENT>if self.MW and self.similarity_variable:<EOL><INDENT>methods.append(LASTOVKA_S)<EOL>Tmins.append(<NUM_LIT:1.0>); Tmaxs.append(<NUM_LIT>)<EOL><DEDENT>self.all_methods = set(methods)<EOL>if Tmins and Tmaxs:<EOL><INDENT>self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method which picks out coefficients for the specified chemical\n        from the various dictionaries and DataFrames storing it. All data is\n        stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,\n        and :obj:`all_methods` as a set of methods for which the data exists for.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15792:c4:m1"}
{"signature": "def calculate(self, T):", "body": "return Zabransky_quasi_polynomial(T, self.Tc, *self.coeffs)<EOL>", "docstring": "r'''Method to actually calculate heat capacity as a function of \n        temperature.\n\n        Parameters\n        ----------\n        T : float\n            Temperature, [K]\n\n        Returns\n        -------\n        Cp : float\n            Liquid heat capacity as T, [J/mol/K]", "id": "f15792:c1:m1"}
{"signature": "def sigma_Stiel_Thodos(Vc, Zc):", "body": "Vc = Vc*<NUM_LIT><EOL>sigma = <NUM_LIT>*Vc**(<NUM_LIT:1>/<NUM_LIT>)*Zc**(-<NUM_LIT>)<EOL>return sigma<EOL>", "docstring": "r'''Calculates Lennard-Jones molecular diameter.\n    Uses critical volume and compressibility. CSP method by [1]_.\n\n    .. math::\n        \\sigma = 0.1866 V_c^{1/3} Z_c^{-6/5}\n\n    Parameters\n    ----------\n    Vc : float\n        Critical volume of fluid [m^3/mol]\n    Zc : float\n        Critical compressibility of fluid, [-]\n\n    Returns\n    -------\n    sigma : float\n        Lennard-Jones molecular diameter, [Angstrom]\n\n    Notes\n    -----\n    Vc is originally in units of mL/mol.\n\n    Examples\n    --------\n    Monofluorobenzene\n\n    >>> sigma_Stiel_Thodos(0.000271, 0.265)\n    5.94300853971033\n\n    References\n    ----------\n    .. [1] Stiel, L. I., and George Thodos. \"Lennard-Jones Force Constants\n       Predicted from Critical Properties.\" Journal of Chemical & Engineering\n       Data 7, no. 2 (April 1, 1962): 234-36. doi:10.1021/je60013a023", "id": "f15793:m7"}
{"signature": "def epsilon_Bird_Stewart_Lightfoot_boiling(Tb):", "body": "epsilon_k = <NUM_LIT>*Tb<EOL>return epsilon_k<EOL>", "docstring": "r'''Calculates Lennard-Jones depth of potential-energy minimum.\n    Uses boiling temperature. CSP method by [1]_.\n\n    .. math::\n        \\epsilon/k = 1.15 T_b\n\n    Parameters\n    ----------\n    Tb : float\n        Boiling temperature [K]\n\n    Returns\n    -------\n    epsilon_k : float\n        Lennard-Jones depth of potential-energy minimum over k, [K]\n\n    Notes\n    -----\n\n    Examples\n    --------\n    >>> epsilon_Bird_Stewart_Lightfoot_boiling(357.85)\n    411.5275\n\n    References\n    ----------\n    .. [1] Bird, R. Byron, Warren E. Stewart, and Edwin N. Lightfoot.\n       Transport Phenomena, Revised 2nd Edition. New York:\n       John Wiley & Sons, Inc., 2006", "id": "f15793:m13"}
{"signature": "def collision_integral_Neufeld_Janzen_Aziz(Tstar, l=<NUM_LIT:1>, s=<NUM_LIT:1>):", "body": "if (l, s) not in Neufeld_collision:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>A, B, C, D, E, F, G, H, R, S, W, P = Neufeld_collision[(l, s)]<EOL>omega = A/Tstar**B + C/exp(D*Tstar) + E/exp(F*Tstar)<EOL>if (l, s) in [(<NUM_LIT:1>, <NUM_LIT:1>), (<NUM_LIT:1>, <NUM_LIT:2>), (<NUM_LIT:3>, <NUM_LIT:3>)]:<EOL><INDENT>omega += G/exp(H*Tstar)<EOL><DEDENT>if (l, s) not in [(<NUM_LIT:1>, <NUM_LIT:1>), (<NUM_LIT:1>, <NUM_LIT:2>)]:<EOL><INDENT>omega += R*Tstar**B*sin(S*Tstar**W-P)<EOL><DEDENT>return omega<EOL>", "docstring": "r'''Calculates Lennard-Jones collision integral for any of 16 values of\n    (l,j) for the wide range of 0.3 < Tstar < 100. Values are accurate to\n    0.1 % of actual values, but the calculation of actual values is\n    computationally intensive and so these simplifications are used, developed\n    in [1]_.\n\n    .. math::\n        \\Omega_D = \\frac{A}{T^{*B}} + \\frac{C}{\\exp(DT^*)} +\n        \\frac{E}{\\exp(FT^{*})} + \\frac{G}{\\exp(HT^*)} + RT^{*B}\\sin(ST^{*W}-P)\n\n    Parameters\n    ----------\n    Tstar : float\n        Reduced temperature of the fluid [-]\n    l : int\n        term\n    s : int\n        term\n\n    Returns\n    -------\n    Omega : float\n        Collision integral of A and B\n\n    Notes\n    -----\n    Acceptable pairs of (l,s) are (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),\n    (1, 6), (1, 7), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (3, 3), (3, 4),\n    (3, 5), and (4, 4).\n\n    .. math::\n        T^* = \\frac{k_b T}{\\epsilon}\n\n    Results are very similar to those of the more modern formulation,\n    `collision_integral_Kim_Monroe`.\n\n    Calculations begin to yield overflow errors in some values of (l, 2) after\n    Tstar = 75, beginning with (1, 7). Also susceptible are (1, 5) and (1, 6).\n\n    Examples\n    --------\n    >>> collision_integral_Neufeld_Janzen_Aziz(100, 1, 1)\n    0.516717697672334\n\n    References\n    ----------\n    .. [1] Neufeld, Philip D., A. R. Janzen, and R. A. Aziz. \"Empirical\n       Equations to Calculate 16 of the Transport Collision Integrals\n       Omega(l, S)* for the Lennard-Jones (12-6) Potential.\" The Journal of\n       Chemical Physics 57, no. 3 (August 1, 1972): 1100-1102.\n       doi:10.1063/1.1678363", "id": "f15793:m18"}
{"signature": "def epsilon_Stiel_Thodos(Tc, Zc):", "body": "epsilon_k = <NUM_LIT>*Tc*Zc**<NUM_LIT><EOL>return epsilon_k<EOL>", "docstring": "r'''Calculates Lennard-Jones depth of potential-energy minimum.\n    Uses Critical temperature and critical compressibility. CSP method by [1]_.\n\n    .. math::\n        \\epsilon/k = 65.3 T_c Z_c^{3.6}\n\n    Parameters\n    ----------\n    Tc : float\n        Critical temperature of fluid [K]\n    Zc : float\n        Critical compressibility of fluid, [-]\n\n    Returns\n    -------\n    epsilon_k : float\n        Lennard-Jones depth of potential-energy minimum over k, [K]\n\n    Notes\n    -----\n\n    Examples\n    --------\n    Fluorobenzene\n\n    >>> epsilon_Stiel_Thodos(358.5, 0.265)\n    196.3755830305783\n\n    References\n    ----------\n    .. [1] Stiel, L. I., and George Thodos. \"Lennard-Jones Force Constants\n       Predicted from Critical Properties.\" Journal of Chemical & Engineering\n       Data 7, no. 2 (April 1, 1962): 234-36. doi:10.1021/je60013a023", "id": "f15793:m15"}
{"signature": "def sigma_Bird_Stewart_Lightfoot_boiling(Vb):", "body": "Vb = Vb*<NUM_LIT><EOL>sigma = <NUM_LIT>*Vb**(<NUM_LIT:1>/<NUM_LIT>)<EOL>return sigma<EOL>", "docstring": "r'''Calculates Lennard-Jones molecular diameter.\n    Uses molar volume of liquid at boiling. CSP method by [1]_.\n\n    .. math::\n        \\sigma = 1.166V_{b,liq}^{1/3}\n\n    Parameters\n    ----------\n    Vb : float\n        Boiling molar volume of liquid [m^3/mol]\n\n    Returns\n    -------\n    sigma : float\n        Lennard-Jones collision integral, [Angstrom]\n\n    Notes\n    -----\n    Original units of Vb are mL/mol.\n\n    Examples\n    --------\n    >>> sigma_Bird_Stewart_Lightfoot_boiling(0.0001015)\n    5.439018856944655\n\n    References\n    ----------\n    .. [1] Bird, R. Byron, Warren E. Stewart, and Edwin N. Lightfoot.\n       Transport Phenomena, Revised 2nd Edition. New York:\n       John Wiley & Sons, Inc., 2006", "id": "f15793:m5"}
{"signature": "def epsilon_Bird_Stewart_Lightfoot_melting(Tm):", "body": "epsilon_k = <NUM_LIT>*Tm<EOL>return epsilon_k<EOL>", "docstring": "r'''Calculates Lennard-Jones depth of potential-energy minimum.\n    Uses melting temperature. CSP method by [1]_.\n\n    .. math::\n        \\epsilon/k = 1.92T_m\n\n    Parameters\n    ----------\n    Tm : float\n        Melting temperature [K]\n\n    Returns\n    -------\n    epsilon_k : float\n        Lennard-Jones depth of potential-energy minimum over k, [K]\n\n    Notes\n    -----\n\n    Examples\n    --------\n    >>> epsilon_Bird_Stewart_Lightfoot_melting(231.15)\n    443.808\n\n    References\n    ----------\n    .. [1] Bird, R. Byron, Warren E. Stewart, and Edwin N. Lightfoot.\n       Transport Phenomena, Revised 2nd Edition. New York:\n       John Wiley & Sons, Inc., 2006", "id": "f15793:m14"}
{"signature": "def sigma_Bird_Stewart_Lightfoot_critical_1(Vc):", "body": "Vc = Vc*<NUM_LIT>  <EOL>sigma = <NUM_LIT>*Vc**(<NUM_LIT:1>/<NUM_LIT>)<EOL>return sigma<EOL>", "docstring": "r'''Calculates Lennard-Jones molecular diameter.\n    Uses critical volume. CSP method by [1]_.\n\n    .. math::\n        \\sigma = 0.841 V_c^{1/3}\n\n    Parameters\n    ----------\n    Vc : float\n        Critical volume of fluid [m^3/mol]\n\n    Returns\n    -------\n    sigma : float\n        Lennard-Jones molecular diameter, [Angstrom]\n\n    Notes\n    -----\n    Original units of Vc are mL/mol.\n\n    Examples\n    --------\n    >>> sigma_Bird_Stewart_Lightfoot_critical_1(0.000268)\n    5.422184116631474\n\n    References\n    ----------\n    .. [1] Bird, R. Byron, Warren E. Stewart, and Edwin N. Lightfoot.\n       Transport Phenomena, Revised 2nd Edition. New York:\n       John Wiley & Sons, Inc., 2006", "id": "f15793:m4"}
{"signature": "def sigma_Tee_Gotoh_Steward_1(Tc, Pc):", "body": "Pc = Pc/<NUM_LIT><EOL>sigma = <NUM_LIT>*(Tc/Pc)**(<NUM_LIT:1>/<NUM_LIT>)<EOL>return sigma<EOL>", "docstring": "r'''Calculates Lennard-Jones molecular diameter.\n    Uses critical temperature and pressure. CSP method by [1]_.\n\n    .. math::\n        \\sigma = 2.3647 \\left(\\frac{T_c}{P_c}\\right)^{1/3}\n\n    Parameters\n    ----------\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n\n    Returns\n    -------\n    sigma : float\n        Lennard-Jones molecular diameter, [Angstrom]\n\n    Notes\n    -----\n    Original units of Pc are atm. Further regressions with other parameters\n    were performed in [1]_ but are not included here, except for\n    `sigma_Tee_Gotoh_Steward_2`.\n\n    Examples\n    --------\n    >>> sigma_Tee_Gotoh_Steward_1(560.1, 4550000)\n    5.48402779790962\n\n    References\n    ----------\n    .. [1] Tee, L. S., Sukehiro Gotoh, and W. E. Stewart. \"Molecular\n       Parameters for Normal Fluids. Lennard-Jones 12-6 Potential.\" Industrial\n       & Engineering Chemistry Fundamentals 5, no. 3 (August 1, 1966): 356-63.\n       doi:10.1021/i160019a011", "id": "f15793:m8"}
{"signature": "def logP(CASRN, AvailableMethods=False, Method=None):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in CRClogPDict.index:<EOL><INDENT>methods.append(CRC)<EOL><DEDENT>if CASRN in SyrresDict2.index:<EOL><INDENT>methods.append(SYRRES)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == CRC:<EOL><INDENT>return float(CRClogPDict.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == SYRRES:<EOL><INDENT>return float(SyrresDict2.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''This function handles the retrieval of a chemical's octanol-water\n    partition coefficient. Lookup is based on CASRNs. Will automatically\n    select a data source to use if no Method is provided; returns None if the\n    data is not available.\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    logP : float\n        Octanol-water partition coefficient, [-]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain logP with the\n        given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        The method name to use. Accepted methods are 'SYRRES', or 'CRC', \n        All valid values are also held in the list logP_methods.\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        the logP for the desired chemical, and will return methods\n        instead of the logP\n\n    Notes\n    -----\n    .. math::\n        \\log P_{ oct/wat} = \\log\\left(\\frac{\\left[{solute}\n        \\right]_{ octanol}^{un-ionized}}{\\left[{solute}\n        \\right]_{ water}^{ un-ionized}}\\right)\n\n    Examples\n    --------\n    >>> logP('67-56-1')\n    -0.74\n\n    References\n    ----------\n    .. [1] Syrres. 2006. KOWWIN Data, SrcKowData2.zip.\n       http://esc.syrres.com/interkow/Download/SrcKowData2.zip\n    .. [2] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n       Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.", "id": "f15794:m2"}
{"signature": "def GWP(CASRN, AvailableMethods=False, Method=None):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in GWP_data.index:<EOL><INDENT>methods.append(IPCC100)<EOL>if not pd.isnull(GWP_data.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(IPCC100SAR)<EOL><DEDENT>methods.append(IPCC20)<EOL>methods.append(IPCC500)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == IPCC100:<EOL><INDENT>return float(GWP_data.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == IPCC100SAR:<EOL><INDENT>return float(GWP_data.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == IPCC20:<EOL><INDENT>return float(GWP_data.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == IPCC500:<EOL><INDENT>return float(GWP_data.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''This function handles the retrieval of a chemical's Global Warming\n    Potential, relative to CO2. Lookup is based on CASRNs. Will automatically\n    select a data source to use if no Method is provided; returns None if the\n    data is not available.\n\n    Returns the GWP for the 100yr outlook by default.\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    GWP : float\n        Global warming potential, [(impact/mass chemical)/(impact/mass CO2)]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain GWP with the\n        given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        The method name to use. Accepted methods are IPCC (2007) 100yr',\n        'IPCC (2007) 100yr-SAR', 'IPCC (2007) 20yr', and 'IPCC (2007) 500yr'. \n        All valid values are also held in the list GWP_methods.\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        the GWP for the desired chemical, and will return methods\n        instead of the GWP\n\n    Notes\n    -----\n    All data is from [1]_, the official source. Several chemicals are available\n    in [1]_ are not included here as they do not have a CAS.\n    Methods are 'IPCC (2007) 100yr', 'IPCC (2007) 100yr-SAR',\n    'IPCC (2007) 20yr', and 'IPCC (2007) 500yr'.\n\n    Examples\n    --------\n    Methane, 100-yr outlook\n\n    >>> GWP(CASRN='74-82-8')\n    25.0\n\n    References\n    ----------\n    .. [1] IPCC. \"2.10.2 Direct Global Warming Potentials - AR4 WGI Chapter 2:\n       Changes in Atmospheric Constituents and in Radiative Forcing.\" 2007.\n       https://www.ipcc.ch/publications_and_data/ar4/wg1/en/ch2s2-10-2.html.", "id": "f15794:m0"}
{"signature": "def economic_status(CASRN, Method=None, AvailableMethods=False):  ", "body": "load_economic_data()<EOL>CASi = CAS2int(CASRN)<EOL>def list_methods():<EOL><INDENT>methods = []<EOL>methods.append('<STR_LIT>')<EOL>if CASRN in _EPACDRDict:<EOL><INDENT>methods.append(EPACDR)<EOL><DEDENT>if CASRN in _ECHATonnageDict:<EOL><INDENT>methods.append(ECHA)<EOL><DEDENT>if CASi in HPV_data.index:<EOL><INDENT>methods.append(OECD)<EOL><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == EPACDR:<EOL><INDENT>status = '<STR_LIT>' + str(_EPACDRDict[CASRN])<EOL><DEDENT>elif Method == ECHA:<EOL><INDENT>status = _ECHATonnageDict[CASRN]<EOL><DEDENT>elif Method == OECD:<EOL><INDENT>status = '<STR_LIT>'<EOL><DEDENT>elif Method == '<STR_LIT>':<EOL><INDENT>status = []<EOL>if CASRN in _EPACDRDict:<EOL><INDENT>status += ['<STR_LIT>' + str(_EPACDRDict[CASRN])]<EOL><DEDENT>if CASRN in _ECHATonnageDict:<EOL><INDENT>status += _ECHATonnageDict[CASRN]<EOL><DEDENT>if CASi in HPV_data.index:<EOL><INDENT>status += ['<STR_LIT>']<EOL><DEDENT><DEDENT>elif Method == NONE:<EOL><INDENT>status = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return status<EOL>", "docstring": "Look up the economic status of a chemical.\n\n    This API is considered experimental, and is expected to be removed in a\n    future release in favor of a more complete object-oriented interface.\n\n    >>> pprint(economic_status(CASRN='98-00-0'))\n    [\"US public: {'Manufactured': 0.0, 'Imported': 10272.711, 'Exported': 184.127}\",\n     u'10,000 - 100,000 tonnes per annum',\n     'OECD HPV Chemicals']\n\n    >>> economic_status(CASRN='13775-50-3')  # SODIUM SESQUISULPHATE\n    []\n    >>> economic_status(CASRN='98-00-0', Method='OECD high production volume chemicals')\n    'OECD HPV Chemicals'\n    >>> economic_status(CASRN='98-01-1', Method='European Chemicals Agency Total Tonnage Bands')\n    [u'10,000 - 100,000 tonnes per annum']", "id": "f15795:m3"}
{"signature": "def legal_status(CASRN, Method=None, AvailableMethods=False, CASi=None):", "body": "load_law_data()<EOL>if not CASi:<EOL><INDENT>CASi = CAS2int(CASRN)<EOL><DEDENT>methods = [COMBINED, DSL, TSCA, EINECS, NLP, SPIN]<EOL>if AvailableMethods:<EOL><INDENT>return methods<EOL><DEDENT>if not Method:<EOL><INDENT>Method = methods[<NUM_LIT:0>]<EOL><DEDENT>if Method == DSL:<EOL><INDENT>if CASi in DSL_data.index:<EOL><INDENT>status = CAN_DSL_flags[DSL_data.at[CASi, '<STR_LIT>']]<EOL><DEDENT>else:<EOL><INDENT>status = UNLISTED<EOL><DEDENT><DEDENT>elif Method == TSCA:<EOL><INDENT>if CASi in TSCA_data.index:<EOL><INDENT>data = TSCA_data.loc[CASi].to_dict()<EOL>if any(data.values()):<EOL><INDENT>status = sorted([TSCA_flags[i] for i in data.keys() if data[i]])<EOL><DEDENT>else:<EOL><INDENT>status = LISTED<EOL><DEDENT><DEDENT>else:<EOL><INDENT>status = UNLISTED<EOL><DEDENT><DEDENT>elif Method == EINECS:<EOL><INDENT>if CASi in EINECS_data.index:<EOL><INDENT>status = LISTED<EOL><DEDENT>else:<EOL><INDENT>status = UNLISTED<EOL><DEDENT><DEDENT>elif Method == NLP:<EOL><INDENT>if CASi in NLP_data.index:<EOL><INDENT>status = LISTED<EOL><DEDENT>else:<EOL><INDENT>status = UNLISTED<EOL><DEDENT><DEDENT>elif Method == SPIN:<EOL><INDENT>if CASi in SPIN_data.index:<EOL><INDENT>status = LISTED<EOL><DEDENT>else:<EOL><INDENT>status = UNLISTED<EOL><DEDENT><DEDENT>elif Method == COMBINED:<EOL><INDENT>status = {}<EOL>for method in methods[<NUM_LIT:1>:]:<EOL><INDENT>status[method] = legal_status(CASRN, Method=method, CASi=CASi)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return status<EOL>", "docstring": "r'''Looks up the legal status of a chemical according to either a specifc\n    method or with all methods.\n\n    Returns either the status as a string for a specified method, or the\n    status of the chemical in all available data sources, in the format\n    {source: status}.\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    status : str or dict\n        Legal status information [-]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain legal status with the\n        given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        A string for the method name to use, as defined by constants in\n        legal_status_methods\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        the legal status for the desired chemical, and will return methods\n        instead of the status\n    CASi : int, optional\n        CASRN as an integer, used internally [-]\n\n    Notes\n    -----\n\n    Supported methods are:\n\n        * **DSL**: Canada Domestic Substance List, [1]_. As extracted on Feb 11, 2015\n          from a html list. This list is updated continuously, so this version\n          will always be somewhat old. Strictly speaking, there are multiple\n          lists but they are all bundled together here. A chemical may be\n          'Listed', or be on the 'Non-Domestic Substances List (NDSL)',\n          or be on the list of substances with 'Significant New Activity (SNAc)',\n          or be on the DSL but with a 'Ministerial Condition pertaining to this\n          substance', or have been removed from the DSL, or have had a\n          Ministerial prohibition for the substance.\n        * **TSCA**: USA EPA Toxic Substances Control Act Chemical Inventory, [2]_.\n          This list is as extracted on 2016-01. It is believed this list is\n          updated on a periodic basis (> 6 month). A chemical may simply be\n          'Listed', or may have certain flags attached to it. All these flags\n          are described in the dict TSCA_flags.\n        * **EINECS**: European INventory of Existing Commercial chemical\n          Substances, [3]_. As extracted from a spreadsheet dynamically\n          generated at [1]_. This list was obtained March 2015; a more recent\n          revision already exists.\n        * **NLP**: No Longer Polymers, a list of chemicals with special\n          regulatory exemptions in EINECS. Also described at [3]_.\n        * **SPIN**: Substances Prepared in Nordic Countries. Also a boolean\n          data type. Retrieved 2015-03 from [4]_.\n\n    Other methods which could be added are:\n\n        * Australia: AICS Australian Inventory of Chemical Substances\n        * China: Inventory of Existing Chemical Substances Produced or Imported\n          in China (IECSC)\n        * Europe: REACH List of Registered Substances\n        * India: List of Hazardous Chemicals\n        * Japan: ENCS: Inventory of existing and new chemical substances\n        * Korea: Existing Chemicals Inventory (KECI)\n        * Mexico: INSQ National Inventory of Chemical Substances in Mexico\n        * New Zealand:  Inventory of Chemicals (NZIoC)\n        * Philippines: PICCS Philippines Inventory of Chemicals and Chemical\n          Substances\n\n    Examples\n    --------\n    >>> pprint(legal_status('64-17-5'))\n    {'DSL': 'LISTED',\n     'EINECS': 'LISTED',\n     'NLP': 'UNLISTED',\n     'SPIN': 'LISTED',\n     'TSCA': 'LISTED'}\n\n    References\n    ----------\n    .. [1] Government of Canada.. \"Substances Lists\" Feb 11, 2015.\n       https://www.ec.gc.ca/subsnouvelles-newsubs/default.asp?n=47F768FE-1.\n    .. [2] US EPA. \"TSCA Chemical Substance Inventory.\" Accessed April 2016.\n       https://www.epa.gov/tsca-inventory.\n    .. [3] ECHA. \"EC Inventory\". Accessed March 2015.\n       http://echa.europa.eu/information-on-chemicals/ec-inventory.\n    .. [4] SPIN. \"SPIN Substances in Products In Nordic Countries.\" Accessed\n       March 2015. http://195.215.202.233/DotNetNuke/default.aspx.", "id": "f15795:m1"}
{"signature": "def Zc(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[COMBINED]):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(IUPAC)<EOL><DEDENT>if CASRN in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(MATTHEWS)<EOL><DEDENT>if CASRN in _crit_CRC.index and not np.isnan(_crit_CRC.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(CRC)<EOL><DEDENT>if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(PSRK)<EOL><DEDENT>if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(YAWS)<EOL><DEDENT>if Tc(CASRN) and Vc(CASRN) and Pc(CASRN):<EOL><INDENT>methods.append(COMBINED)<EOL><DEDENT>if IgnoreMethods:<EOL><INDENT>for Method in IgnoreMethods:<EOL><INDENT>if Method in methods:<EOL><INDENT>methods.remove(Method)<EOL><DEDENT><DEDENT><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == IUPAC:<EOL><INDENT>_Zc = float(_crit_IUPAC.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == PSRK:<EOL><INDENT>_Zc = float(_crit_PSRKR4.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == MATTHEWS:<EOL><INDENT>_Zc = float(_crit_Matthews.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == CRC:<EOL><INDENT>_Zc = float(_crit_CRC.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == YAWS:<EOL><INDENT>_Zc = float(_crit_Yaws.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == COMBINED:<EOL><INDENT>_Zc = Vc(CASRN)*Pc(CASRN)/Tc(CASRN)/R<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _Zc<EOL>", "docstring": "r'''This function handles the retrieval of a chemical's critical\n    compressibility. Lookup is based on CASRNs. Will automatically select a\n    data source to use if no Method is provided; returns None if the data is\n    not available.\n\n    Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for \n    inorganic chemicals. Function has data for approximately 1000 chemicals.\n\n    Examples\n    --------\n    >>> Zc(CASRN='64-17-5')\n    0.24100000000000002\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    Zc : float\n        Critical compressibility, [-]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain Vc with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS', \n        'CRC', 'PSRK', 'YAWS', and 'COMBINED'. All valid values are also held  \n        in `Zc_methods`.\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        Zc for the desired chemical, and will return methods instead of Zc\n    IgnoreMethods : list, optional\n        A list of methods to ignore in obtaining the full list of methods,\n        useful for for performance reasons and ignoring inaccurate methods\n\n    Notes\n    -----\n    A total of five sources are available for this function. They are:\n\n        * 'IUPAC', a series of critically evaluated\n          experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,\n          [5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.\n        * 'MATTHEWS', a series of critically\n          evaluated data for inorganic compounds in [13]_.\n        * 'CRC', a compillation of critically\n          evaluated data by the TRC as published in [14]_.\n        * 'PSRK', a compillation of experimental and\n          estimated data published in [15]_.\n        * 'YAWS', a large compillation of data from a\n          variety of sources; no data points are sourced in the work of [16]_.\n\n    References\n    ----------\n    .. [1] Ambrose, Douglas, and Colin L. Young. \"Vapor-Liquid Critical\n       Properties of Elements and Compounds. 1. An Introductory Survey.\"\n       Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):\n       154-154. doi:10.1021/je950378q.\n    .. [2] Ambrose, Douglas, and Constantine Tsonopoulos. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 2. Normal Alkanes.\"\n       Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.\n       doi:10.1021/je00019a001.\n    .. [3] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 3. Aromatic\n       Hydrocarbons.\" Journal of Chemical & Engineering Data 40, no. 3\n       (May 1, 1995): 547-58. doi:10.1021/je00019a002.\n    .. [4] Gude, Michael, and Amyn S. Teja. \"Vapor-Liquid Critical Properties\n       of Elements and Compounds. 4. Aliphatic Alkanols.\" Journal of Chemical\n       & Engineering Data 40, no. 5 (September 1, 1995): 1025-36.\n       doi:10.1021/je00021a001.\n    .. [5] Daubert, Thomas E. \"Vapor-Liquid Critical Properties of Elements\n       and Compounds. 5. Branched Alkanes and Cycloalkanes.\" Journal of\n       Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.\n       doi:10.1021/je9501548.\n    .. [6] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic\n       Hydrocarbons.\" Journal of Chemical & Engineering Data 41, no. 4\n       (January 1, 1996): 645-56. doi:10.1021/je9501999.\n    .. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen\n       Compounds Other Than Alkanols and Cycloalkanols.\" Journal of Chemical &\n       Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.\n    .. [8] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 8. Organic Sulfur,\n       Silicon, and Tin Compounds (C + H + S, Si, and Sn).\" Journal of Chemical\n       & Engineering Data 46, no. 3 (May 1, 2001): 480-85.\n       doi:10.1021/je000210r.\n    .. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,\n       and Constantine Tsonopoulos. \"Vapor-Liquid Critical Properties of\n       Elements and Compounds. 9. Organic Compounds Containing Nitrogen.\"\n       Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):\n       305-14. doi:10.1021/je050221q.\n    .. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,\n       Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic\n       Compounds Containing Halogens.\" Journal of Chemical & Engineering Data\n       52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.\n    .. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic\n       Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;\n       N + O; and O + S, + Si.\" Journal of Chemical & Engineering Data 54,\n       no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.\n    .. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David\n       W. Morton, and Kenneth N. Marsh. \"Vapor-Liquid Critical Properties of\n       Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and\n       Non-Hydrocarbons.\" Journal of Chemical & Engineering Data, October 5,\n       2015, 151005081500002. doi:10.1021/acs.jced.5b00571.\n    .. [13] Mathews, Joseph F. \"Critical Constants of Inorganic Substances.\"\n       Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.\n       doi:10.1021/cr60275a004.\n    .. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n       Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.\n    .. [15] Horstmann, Sven, Anna Jab\u0142oniec, J\u00f6rg Krafczyk, Kai Fischer, and\n       J\u00fcrgen Gmehling. \"PSRK Group Contribution Equation of State:\n       Comprehensive Revision and Extension IV, Including Critical Constants\n       and \u0391-Function Parameters for 1000 Components.\" Fluid Phase Equilibria\n       227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.\n    .. [16] Yaws, Carl L. Thermophysical Properties of Chemicals and\n       Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n       Publishing, 2014.", "id": "f15798:m3"}
{"signature": "def Pc_mixture(Pcs=None, zs=None, CASRNs=None, AvailableMethods=False, Method=None):  ", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if none_and_length_check([Pcs]):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>methods.append('<STR_LIT:None>')<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == '<STR_LIT>':<EOL><INDENT>return mixing_simple(zs, Pcs)<EOL><DEDENT>elif Method == '<STR_LIT:None>':<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "This function handles the retrival of a mixture's critical temperature.\n\n    This API is considered experimental, and is expected to be removed in a\n    future release in favor of a more complete object-oriented interface.\n\n    >>> Pc_mixture([2.2E7, 1.1E7], [0.3, 0.7])\n    14300000.0", "id": "f15798:m15"}
{"signature": "def Mersmann_Kind_predictor(atoms, coeff=<NUM_LIT>, power=<NUM_LIT:0.5>, <EOL>covalent_radii=rcovs_Mersmann_Kind):", "body": "H_RADIUS_COV = covalent_radii['<STR_LIT:H>']<EOL>tot = <NUM_LIT:0><EOL>atom_count = <NUM_LIT:0><EOL>for atom, count in atoms.items():<EOL><INDENT>if atom not in covalent_radii:<EOL><INDENT>raise Exception('<STR_LIT>' %atom)<EOL><DEDENT>tot += count*covalent_radii[atom]<EOL>atom_count += count<EOL><DEDENT>da = <NUM_LIT>*tot/atom_count<EOL>ra = da/<NUM_LIT><EOL>da_SI = da*<NUM_LIT> <EOL>return ((coeff/(ra/H_RADIUS_COV)**power)*da_SI)**<NUM_LIT:3>*N_A*atom_count<EOL>", "docstring": "r'''Predicts the critical molar volume of a chemical based only on its\n    atomic composition according to [1]_ and [2]_. This is a crude approach,\n    but provides very reasonable\n    estimates in practice. Optionally, the `coeff` used and the `power` in the\n    fraction as well as the atomic contributions can be adjusted; this method\n    is general and atomic contributions can be regressed to predict other\n    properties with this routine.\n\n    .. math::\n        \\frac{\\left(\\frac{V_c}{n_a N_A}\\right)^{1/3}}{d_a}\n        = \\frac{3.645}{\\left(\\frac{r_a}{r_H}\\right)^{1/2}}\n\n        r_a = d_a/2\n\n        d_a = 2 \\frac{\\sum_i (n_i r_i)}{n_a}\n\n    In the above equations, :math:`n_i` is the number of atoms of species i in\n    the molecule, :math:`r_i` is the covalent atomic radius of the atom, and \n    :math:`n_a` is the total number of atoms in the molecule.\n\n    Parameters\n    ----------\n    atoms : dict\n        Dictionary of atoms and their counts, [-]\n    coeff : float, optional\n        Coefficient used in the relationship, [m^2]\n    power : float, optional\n        Power applied to the relative atomic radius, [-]\n    covalent_radii : dict or indexable, optional\n        Object which can be indexed to atomic contrinbutions (by symbol), [-]\n\n    Returns\n    -------\n    Vc : float\n        Predicted critical volume of the chemical, [m^3/mol]\n\n    Notes\n    -----    \n    Using the :obj:`thermo.elements.periodic_table` covalent radii (from RDKit), \n    the coefficient and power should be 4.261206523632586 and 0.5597281770786228\n    respectively for best results.\n\n    Examples\n    --------\n    Prediction of critical volume of decane:\n\n    >>> Mersmann_Kind_predictor({'C': 10, 'H': 22})\n    0.0005851859052024729\n\n    This is compared against the experimental value, 0.000624 (a 6.2% relative\n    error)\n\n    Using custom fitted coefficients we can do a bit better:\n\n    >>> from thermo.critical import rcovs_regressed\n    >>> Mersmann_Kind_predictor({'C': 10, 'H': 22}, coeff=4.261206523632586, \n    ... power=0.5597281770786228, covalent_radii=rcovs_regressed)\n    0.0005956871011923075\n\n    The relative error is only 4.5% now. This is compared to an experimental \n    uncertainty of 5.6%.\n\n    Evaluating 1321 critical volumes in the database, the average relative\n    error is 5.0%; standard deviation 6.8%; and worst value of 79% relative\n    error for phosphorus.\n\n    References\n    ----------\n    .. [1] Mersmann, Alfons, and Matthias Kind. \"Correlation for the Prediction\n       of Critical Molar Volume.\" Industrial & Engineering Chemistry Research,\n       October 16, 2017. https://doi.org/10.1021/acs.iecr.7b03171.\n    .. [2] Mersmann, Alfons, and Matthias Kind. \"Prediction of Mechanical and \n       Thermal Properties of Pure Liquids, of Critical Data, and of Vapor \n       Pressure.\" Industrial & Engineering Chemistry Research, January 31, \n       2017. https://doi.org/10.1021/acs.iecr.6b04323.", "id": "f15798:m4"}
{"signature": "def Tc(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[SURF]):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(IUPAC)<EOL><DEDENT>if CASRN in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(MATTHEWS)<EOL><DEDENT>if CASRN in _crit_CRC.index and not np.isnan(_crit_CRC.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(CRC)<EOL><DEDENT>if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(PSRK)<EOL><DEDENT>if CASRN in _crit_PassutDanner.index and not np.isnan(_crit_PassutDanner.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(PD)<EOL><DEDENT>if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(YAWS)<EOL><DEDENT>if CASRN:<EOL><INDENT>methods.append(SURF)<EOL><DEDENT>if IgnoreMethods:<EOL><INDENT>for Method in IgnoreMethods:<EOL><INDENT>if Method in methods:<EOL><INDENT>methods.remove(Method)<EOL><DEDENT><DEDENT><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == IUPAC:<EOL><INDENT>_Tc = float(_crit_IUPAC.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == MATTHEWS:<EOL><INDENT>_Tc = float(_crit_Matthews.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == PSRK:<EOL><INDENT>_Tc = float(_crit_PSRKR4.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == PD:<EOL><INDENT>_Tc = float(_crit_PassutDanner.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == CRC:<EOL><INDENT>_Tc = float(_crit_CRC.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == YAWS:<EOL><INDENT>_Tc = float(_crit_Yaws.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == SURF:<EOL><INDENT>_Tc = third_property(CASRN=CASRN, T=True)<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>_Tc = None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _Tc<EOL>", "docstring": "r'''This function handles the retrieval of a chemical's critical\n    temperature. Lookup is based on CASRNs. Will automatically select a data\n    source to use if no Method is provided; returns None if the data is not\n    available.\n\n    Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for \n    inorganic chemicals. Function has data for approximately 1000 chemicals.\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    Tc : float\n        Critical temperature, [K]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain Tc with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS', \n        'CRC', 'PSRK', 'PD', 'YAWS', and 'SURF'. All valid values are also held  \n        in the list `Tc_methods`.\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        Tc for the desired chemical, and will return methods instead of Tc\n    IgnoreMethods : list, optional\n        A list of methods to ignore in obtaining the full list of methods,\n        useful for for performance reasons and ignoring inaccurate methods\n\n    Notes\n    -----\n    A total of seven sources are available for this function. They are:\n\n        * 'IUPAC Organic Critical Properties', a series of critically evaluated\n          experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,\n          [5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.\n        * 'Matthews Inorganic Critical Properties', a series of critically\n          evaluated data for inorganic compounds in [13]_.\n        * 'CRC Organic Critical Properties', a compillation of critically\n          evaluated data by the TRC as published in [14]_.\n        * 'PSRK Revision 4 Appendix', a compillation of experimental and\n          estimated data published in [15]_.\n        * 'Passut Danner 1973 Critical Properties', an older compillation of\n          data published in [16]_\n        * 'Yaws Critical Properties', a large compillation of data from a\n          variety of sources; no data points are sourced in the work of [17]_.\n        * Critical Surface', an estimation method using a\n          simple quadratic method for estimating Tc from Pc and Vc. This is\n          ignored and not returned as a method by default, as no compounds\n          have values of Pc and Vc but not Tc currently.\n\n    Examples\n    --------\n    >>> Tc(CASRN='64-17-5')\n    514.0\n\n    References\n    ----------\n    .. [1] Ambrose, Douglas, and Colin L. Young. \"Vapor-Liquid Critical\n       Properties of Elements and Compounds. 1. An Introductory Survey.\"\n       Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):\n       154-154. doi:10.1021/je950378q.\n    .. [2] Ambrose, Douglas, and Constantine Tsonopoulos. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 2. Normal Alkanes.\"\n       Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.\n       doi:10.1021/je00019a001.\n    .. [3] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 3. Aromatic\n       Hydrocarbons.\" Journal of Chemical & Engineering Data 40, no. 3\n       (May 1, 1995): 547-58. doi:10.1021/je00019a002.\n    .. [4] Gude, Michael, and Amyn S. Teja. \"Vapor-Liquid Critical Properties\n       of Elements and Compounds. 4. Aliphatic Alkanols.\" Journal of Chemical\n       & Engineering Data 40, no. 5 (September 1, 1995): 1025-36.\n       doi:10.1021/je00021a001.\n    .. [5] Daubert, Thomas E. \"Vapor-Liquid Critical Properties of Elements\n       and Compounds. 5. Branched Alkanes and Cycloalkanes.\" Journal of\n       Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.\n       doi:10.1021/je9501548.\n    .. [6] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic\n       Hydrocarbons.\" Journal of Chemical & Engineering Data 41, no. 4\n       (January 1, 1996): 645-56. doi:10.1021/je9501999.\n    .. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen\n       Compounds Other Than Alkanols and Cycloalkanols.\" Journal of Chemical &\n       Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.\n    .. [8] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 8. Organic Sulfur,\n       Silicon, and Tin Compounds (C + H + S, Si, and Sn).\" Journal of Chemical\n       & Engineering Data 46, no. 3 (May 1, 2001): 480-85.\n       doi:10.1021/je000210r.\n    .. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,\n       and Constantine Tsonopoulos. \"Vapor-Liquid Critical Properties of\n       Elements and Compounds. 9. Organic Compounds Containing Nitrogen.\"\n       Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):\n       305-14. doi:10.1021/je050221q.\n    .. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,\n       Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic\n       Compounds Containing Halogens.\" Journal of Chemical & Engineering Data\n       52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.\n    .. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic\n       Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;\n       N + O; and O + S, + Si.\" Journal of Chemical & Engineering Data 54,\n       no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.\n    .. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David\n       W. Morton, and Kenneth N. Marsh. \"Vapor-Liquid Critical Properties of\n       Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and\n       Non-Hydrocarbons.\" Journal of Chemical & Engineering Data, October 5,\n       2015, 151005081500002. doi:10.1021/acs.jced.5b00571.\n    .. [13] Mathews, Joseph F. \"Critical Constants of Inorganic Substances.\"\n       Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.\n       doi:10.1021/cr60275a004.\n    .. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n       Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.\n    .. [15] Horstmann, Sven, Anna Jab\u0142oniec, J\u00f6rg Krafczyk, Kai Fischer, and\n       J\u00fcrgen Gmehling. \"PSRK Group Contribution Equation of State:\n       Comprehensive Revision and Extension IV, Including Critical Constants\n       and \u0391-Function Parameters for 1000 Components.\" Fluid Phase Equilibria\n       227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.\n    .. [16] Passut, Charles A., and Ronald P. Danner. \"Acentric Factor. A\n       Valuable Correlating Parameter for the Properties of Hydrocarbons.\"\n       Industrial & Engineering Chemistry Process Design and Development 12,\n       no. 3 (July 1, 1973): 365\u201368. doi:10.1021/i260047a026.\n    .. [17] Yaws, Carl L. Thermophysical Properties of Chemicals and\n       Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n       Publishing, 2014.", "id": "f15798:m0"}
{"signature": "def Meissner(Tc=None, Pc=None, Vc=None):", "body": "if Tc and Vc:<EOL><INDENT>Vc = Vc*<NUM_LIT><EOL>Pc = <NUM_LIT>*Tc/(Vc-<NUM_LIT:8>)<EOL>Pc = <NUM_LIT>*Pc  <EOL>return Pc<EOL><DEDENT>elif Tc and Pc:<EOL><INDENT>Pc = Pc/<NUM_LIT>  <EOL>Vc = <NUM_LIT>/<NUM_LIT>*Tc/Pc+<NUM_LIT:8><EOL>Vc = Vc/<NUM_LIT>  <EOL>return Vc<EOL><DEDENT>elif Pc and Vc:<EOL><INDENT>Pc = Pc/<NUM_LIT>  <EOL>Vc = Vc*<NUM_LIT>  <EOL>Tc = <NUM_LIT>/<NUM_LIT>*Pc*(Vc-<NUM_LIT:8>)<EOL>return Tc<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Old (1942) relationship for estimating critical\n    properties from each other. Two of the three properties are required.\n    This model uses the \"critical surface\", a general plot of Tc vs Pc vs Vc.\n    The model used 42 organic and inorganic compounds to derive the equation.\n    The general equation is in [1]_:\n\n    .. math::\n        P_c = \\frac{2.08 T_c}{V_c-8}\n\n    Parameters\n    ----------\n    Tc : float, optional\n        Critical temperature of fluid [K]\n    Pc : float, optional\n        Critical pressure of fluid [Pa]\n    Vc : float, optional\n        Critical volume of fluid [m^3/mol]\n\n    Returns\n    -------\n    Tc, Pc or Vc : float\n        Critical property of fluid [K], [Pa], or [m^3/mol]\n\n    Notes\n    -----\n    The prediction of Tc from Pc and Vc is not tested, as this is not necessary\n    anywhere, but it is implemented.\n    Internal units are atm, cm^3/mol, and K. A slight error occurs when\n    Pa, cm^3/mol and K are used instead, on the order of <0.2%.\n    This equation is less accurate than that of Ihmels, but surprisingly close.\n    The author also proposed means of estimated properties independently.\n\n    Examples\n    --------\n    Succinic acid [110-15-6]\n\n    >>> Meissner(Tc=851.0, Vc=0.000308)\n    5978445.199999999\n\n    References\n    ----------\n    .. [1] Meissner, H. P., and E. M. Redding. \"Prediction of Critical\n           Constants.\" Industrial & Engineering Chemistry 34, no. 5\n           (May 1, 1942): 521-26. doi:10.1021/ie50389a003.", "id": "f15798:m6"}
{"signature": "def Ihmels(Tc=None, Pc=None, Vc=None):", "body": "if Tc and Vc:<EOL><INDENT>Vc = Vc*<NUM_LIT>  <EOL>Pc = -<NUM_LIT>+<NUM_LIT>*Tc/Vc<EOL>Pc = Pc*<NUM_LIT>  <EOL>return Pc<EOL><DEDENT>elif Tc and Pc:<EOL><INDENT>Pc = Pc/<NUM_LIT>  <EOL>Vc = <NUM_LIT>*Tc/(<NUM_LIT:200>*Pc+<NUM_LIT:5>)<EOL>Vc = Vc/<NUM_LIT>  <EOL>return Vc<EOL><DEDENT>elif Pc and Vc:<EOL><INDENT>Pc = Pc/<NUM_LIT>  <EOL>Vc = Vc*<NUM_LIT>  <EOL>Tc = <NUM_LIT>/<NUM_LIT>*(<NUM_LIT>*Pc*Vc + Vc)<EOL>return Tc<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Most recent, and most recommended method of estimating critical\n    properties from each other. Two of the three properties are required.\n    This model uses the \"critical surface\", a general plot of Tc vs Pc vs Vc.\n    The model used 421 organic compounds to derive equation.\n    The general equation is in [1]_:\n\n    .. math::\n        P_c = -0.025 + 2.215 \\frac{T_c}{V_c}\n\n    Parameters\n    ----------\n    Tc : float\n        Critical temperature of fluid (optional) [K]\n    Pc : float\n        Critical pressure of fluid (optional) [Pa]\n    Vc : float\n        Critical volume of fluid (optional) [m^3/mol]\n\n    Returns\n    -------\n    Tc, Pc or Vc : float\n        Critical property of fluid [K], [Pa], or [m^3/mol]\n\n    Notes\n    -----\n    The prediction of Tc from Pc and Vc is not tested, as this is not necessary\n    anywhere, but it is implemented.\n    Internal units are MPa, cm^3/mol, and K. A slight error occurs when\n    Pa, cm^3/mol and K are used instead, on the order of <0.2%.\n    Their equation was also compared with 56 inorganic and elements.\n    Devations of 20% for <200K or >1000K points.\n\n    Examples\n    --------a\n    Succinic acid [110-15-6]\n\n    >>> Ihmels(Tc=851.0, Vc=0.000308)\n    6095016.233766234\n\n    References\n    ----------\n    .. [1] Ihmels, E. Christian. \"The Critical Surface.\" Journal of Chemical\n           & Engineering Data 55, no. 9 (September 9, 2010): 3474-80.\n           doi:10.1021/je100167w.", "id": "f15798:m5"}
{"signature": "def Vc(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[SURF]):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(IUPAC)<EOL><DEDENT>if CASRN in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(MATTHEWS)<EOL><DEDENT>if CASRN in _crit_CRC.index and not np.isnan(_crit_CRC.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(CRC)<EOL><DEDENT>if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(PSRK)<EOL><DEDENT>if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(YAWS)<EOL><DEDENT>if CASRN:<EOL><INDENT>methods.append(SURF)<EOL><DEDENT>if IgnoreMethods:<EOL><INDENT>for Method in IgnoreMethods:<EOL><INDENT>if Method in methods:<EOL><INDENT>methods.remove(Method)<EOL><DEDENT><DEDENT><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == IUPAC:<EOL><INDENT>_Vc = float(_crit_IUPAC.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == PSRK:<EOL><INDENT>_Vc = float(_crit_PSRKR4.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == MATTHEWS:<EOL><INDENT>_Vc = float(_crit_Matthews.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == CRC:<EOL><INDENT>_Vc = float(_crit_CRC.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == YAWS:<EOL><INDENT>_Vc = float(_crit_Yaws.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == SURF:<EOL><INDENT>_Vc = third_property(CASRN=CASRN, V=True)<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _Vc<EOL>", "docstring": "r'''This function handles the retrieval of a chemical's critical\n    volume. Lookup is based on CASRNs. Will automatically select a data\n    source to use if no Method is provided; returns None if the data is not\n    available.\n\n    Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for \n    inorganic chemicals. Function has data for approximately 1000 chemicals.\n\n    Examples\n    --------\n    >>> Vc(CASRN='64-17-5')\n    0.000168\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    Vc : float\n        Critical volume, [m^3/mol]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain Vc with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS', \n        'CRC', 'PSRK', 'YAWS', and 'SURF'. All valid values are also held  \n        in the list `Vc_methods`.\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        Vc for the desired chemical, and will return methods instead of Vc\n    IgnoreMethods : list, optional\n        A list of methods to ignore in obtaining the full list of methods,\n        useful for for performance reasons and ignoring inaccurate methods\n\n    Notes\n    -----\n    A total of six sources are available for this function. They are:\n\n        * 'IUPAC', a series of critically evaluated\n          experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,\n          [5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.\n        * 'MATTHEWS', a series of critically\n          evaluated data for inorganic compounds in [13]_.\n        * 'CRC', a compillation of critically\n          evaluated data by the TRC as published in [14]_.\n        * 'PSRK', a compillation of experimental and\n          estimated data published in [15]_.\n        * 'YAWS', a large compillation of data from a\n          variety of sources; no data points are sourced in the work of [16]_.\n        * 'SURF', an estimation method using a\n          simple quadratic method for estimating Pc from Tc and Vc. This is\n          ignored and not returned as a method by default\n\n    References\n    ----------\n    .. [1] Ambrose, Douglas, and Colin L. Young. \"Vapor-Liquid Critical\n       Properties of Elements and Compounds. 1. An Introductory Survey.\"\n       Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):\n       154-154. doi:10.1021/je950378q.\n    .. [2] Ambrose, Douglas, and Constantine Tsonopoulos. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 2. Normal Alkanes.\"\n       Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.\n       doi:10.1021/je00019a001.\n    .. [3] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 3. Aromatic\n       Hydrocarbons.\" Journal of Chemical & Engineering Data 40, no. 3\n       (May 1, 1995): 547-58. doi:10.1021/je00019a002.\n    .. [4] Gude, Michael, and Amyn S. Teja. \"Vapor-Liquid Critical Properties\n       of Elements and Compounds. 4. Aliphatic Alkanols.\" Journal of Chemical\n       & Engineering Data 40, no. 5 (September 1, 1995): 1025-36.\n       doi:10.1021/je00021a001.\n    .. [5] Daubert, Thomas E. \"Vapor-Liquid Critical Properties of Elements\n       and Compounds. 5. Branched Alkanes and Cycloalkanes.\" Journal of\n       Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.\n       doi:10.1021/je9501548.\n    .. [6] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic\n       Hydrocarbons.\" Journal of Chemical & Engineering Data 41, no. 4\n       (January 1, 1996): 645-56. doi:10.1021/je9501999.\n    .. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen\n       Compounds Other Than Alkanols and Cycloalkanols.\" Journal of Chemical &\n       Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.\n    .. [8] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 8. Organic Sulfur,\n       Silicon, and Tin Compounds (C + H + S, Si, and Sn).\" Journal of Chemical\n       & Engineering Data 46, no. 3 (May 1, 2001): 480-85.\n       doi:10.1021/je000210r.\n    .. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,\n       and Constantine Tsonopoulos. \"Vapor-Liquid Critical Properties of\n       Elements and Compounds. 9. Organic Compounds Containing Nitrogen.\"\n       Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):\n       305-14. doi:10.1021/je050221q.\n    .. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,\n       Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic\n       Compounds Containing Halogens.\" Journal of Chemical & Engineering Data\n       52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.\n    .. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic\n       Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;\n       N + O; and O + S, + Si.\" Journal of Chemical & Engineering Data 54,\n       no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.\n    .. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David\n       W. Morton, and Kenneth N. Marsh. \"Vapor-Liquid Critical Properties of\n       Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and\n       Non-Hydrocarbons.\" Journal of Chemical & Engineering Data, October 5,\n       2015, 151005081500002. doi:10.1021/acs.jced.5b00571.\n    .. [13] Mathews, Joseph F. \"Critical Constants of Inorganic Substances.\"\n       Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.\n       doi:10.1021/cr60275a004.\n    .. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n       Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.\n    .. [15] Horstmann, Sven, Anna Jab\u0142oniec, J\u00f6rg Krafczyk, Kai Fischer, and\n       J\u00fcrgen Gmehling. \"PSRK Group Contribution Equation of State:\n       Comprehensive Revision and Extension IV, Including Critical Constants\n       and \u0391-Function Parameters for 1000 Components.\" Fluid Phase Equilibria\n       227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.\n    .. [16] Yaws, Carl L. Thermophysical Properties of Chemicals and\n       Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n       Publishing, 2014.", "id": "f15798:m2"}
{"signature": "def Grigoras(Tc=None, Pc=None, Vc=None):", "body": "if Tc and Vc:<EOL><INDENT>Vc = Vc*<NUM_LIT>  <EOL>Pc = <NUM_LIT> + <NUM_LIT>*Tc/Vc<EOL>Pc = Pc*<NUM_LIT>  <EOL>return Pc<EOL><DEDENT>elif Tc and Pc:<EOL><INDENT>Pc = Pc/<NUM_LIT>  <EOL>Vc = <NUM_LIT>*Tc/(<NUM_LIT:10>*Pc-<NUM_LIT>)<EOL>Vc = Vc/<NUM_LIT>  <EOL>return Vc<EOL><DEDENT>elif Pc and Vc:<EOL><INDENT>Pc = Pc/<NUM_LIT>  <EOL>Vc = Vc*<NUM_LIT>  <EOL>Tc = <NUM_LIT:1.0>/<NUM_LIT>*(<NUM_LIT:10>*Pc-<NUM_LIT>)*Vc<EOL>return Tc<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Relatively recent (1990) relationship for estimating critical\n    properties from each other. Two of the three properties are required.\n    This model uses the \"critical surface\", a general plot of Tc vs Pc vs Vc.\n    The model used 137 organic and inorganic compounds to derive the equation.\n    The general equation is in [1]_:\n\n    .. math::\n        P_c = 2.9 + 20.2 \\frac{T_c}{V_c}\n\n    Parameters\n    ----------\n    Tc : float\n        Critical temperature of fluid (optional) [K]\n    Pc : float\n        Critical pressure of fluid (optional) [Pa]\n    Vc : float\n        Critical volume of fluid (optional) [m^3/mol]\n\n    Returns\n    -------\n    Tc, Pc or Vc : float\n        Critical property of fluid [K], [Pa], or [m^3/mol]\n\n    Notes\n    -----\n    The prediction of Tc from Pc and Vc is not tested, as this is not necessary\n    anywhere, but it is implemented.\n    Internal units are bar, cm^3/mol, and K. A slight error occurs when\n    Pa, cm^3/mol and K are used instead, on the order of <0.2%.\n    This equation is less accurate than that of Ihmels, but surprisingly close.\n    The author also investigated an early QSPR model.\n\n    Examples\n    --------\n    Succinic acid [110-15-6]\n\n    >>> Grigoras(Tc=851.0, Vc=0.000308)\n    5871233.766233766\n\n    References\n    ----------\n    .. [1] Grigoras, Stelian. \"A Structural Approach to Calculate Physical\n           Properties of Pure Organic Substances: The Critical Temperature,\n           Critical Volume and Related Properties.\" Journal of Computational\n           Chemistry 11, no. 4 (May 1, 1990): 493-510.\n           doi:10.1002/jcc.540110408", "id": "f15798:m7"}
{"signature": "def Pc(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[SURF]):", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if CASRN in _crit_IUPAC.index and not np.isnan(_crit_IUPAC.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(IUPAC)<EOL><DEDENT>if CASRN in _crit_Matthews.index and not np.isnan(_crit_Matthews.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(MATTHEWS)<EOL><DEDENT>if CASRN in _crit_CRC.index and not np.isnan(_crit_CRC.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(CRC)<EOL><DEDENT>if CASRN in _crit_PSRKR4.index and not np.isnan(_crit_PSRKR4.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(PSRK)<EOL><DEDENT>if CASRN in _crit_PassutDanner.index and not np.isnan(_crit_PassutDanner.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(PD)<EOL><DEDENT>if CASRN in _crit_Yaws.index and not np.isnan(_crit_Yaws.at[CASRN, '<STR_LIT>']):<EOL><INDENT>methods.append(YAWS)<EOL><DEDENT>if CASRN:<EOL><INDENT>methods.append(SURF)<EOL><DEDENT>if IgnoreMethods:<EOL><INDENT>for Method in IgnoreMethods:<EOL><INDENT>if Method in methods:<EOL><INDENT>methods.remove(Method)<EOL><DEDENT><DEDENT><DEDENT>methods.append(NONE)<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == IUPAC:<EOL><INDENT>_Pc = float(_crit_IUPAC.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == MATTHEWS:<EOL><INDENT>_Pc = float(_crit_Matthews.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == CRC:<EOL><INDENT>_Pc = float(_crit_CRC.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == PSRK:<EOL><INDENT>_Pc = float(_crit_PSRKR4.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == PD:<EOL><INDENT>_Pc = float(_crit_PassutDanner.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == YAWS:<EOL><INDENT>_Pc = float(_crit_Yaws.at[CASRN, '<STR_LIT>'])<EOL><DEDENT>elif Method == SURF:<EOL><INDENT>_Pc = third_property(CASRN=CASRN, P=True)<EOL><DEDENT>elif Method == NONE:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return _Pc<EOL>", "docstring": "r'''This function handles the retrieval of a chemical's critical\n    pressure. Lookup is based on CASRNs. Will automatically select a data\n    source to use if no Method is provided; returns None if the data is not\n    available.\n\n    Prefered sources are 'IUPAC' for organic chemicals, and 'MATTHEWS' for \n    inorganic chemicals. Function has data for approximately 1000 chemicals.\n\n    Examples\n    --------\n    >>> Pc(CASRN='64-17-5')\n    6137000.0\n\n    Parameters\n    ----------\n    CASRN : string\n        CASRN [-]\n\n    Returns\n    -------\n    Pc : float\n        Critical pressure, [Pa]\n    methods : list, only returned if AvailableMethods == True\n        List of methods which can be used to obtain Pc with the given inputs\n\n    Other Parameters\n    ----------------\n    Method : string, optional\n        The method name to use. Accepted methods are 'IUPAC', 'MATTHEWS', \n        'CRC', 'PSRK', 'PD', 'YAWS', and 'SURF'. All valid values are also held  \n        in the list `Pc_methods`.\n    AvailableMethods : bool, optional\n        If True, function will determine which methods can be used to obtain\n        Pc for the desired chemical, and will return methods instead of Pc\n    IgnoreMethods : list, optional\n        A list of methods to ignore in obtaining the full list of methods,\n        useful for for performance reasons and ignoring inaccurate methods\n\n    Notes\n    -----\n    A total of seven sources are available for this function. They are:\n\n        * 'IUPAC', a series of critically evaluated\n          experimental datum for organic compounds in [1]_, [2]_, [3]_, [4]_,\n          [5]_, [6]_, [7]_, [8]_, [9]_, [10]_, [11]_, and [12]_.\n        * 'MATTHEWS', a series of critically\n          evaluated data for inorganic compounds in [13]_.\n        * 'CRC', a compillation of critically\n          evaluated data by the TRC as published in [14]_.\n        * 'PSRK', a compillation of experimental and\n          estimated data published in [15]_.\n        * 'PD', an older compillation of\n          data published in [16]_\n        * 'YAWS', a large compillation of data from a\n          variety of sources; no data points are sourced in the work of [17]_.\n        * SURF', an estimation method using a\n          simple quadratic method for estimating Pc from Tc and Vc. This is\n          ignored and not returned as a method by default.\n\n    References\n    ----------\n    .. [1] Ambrose, Douglas, and Colin L. Young. \"Vapor-Liquid Critical\n       Properties of Elements and Compounds. 1. An Introductory Survey.\"\n       Journal of Chemical & Engineering Data 41, no. 1 (January 1, 1996):\n       154-154. doi:10.1021/je950378q.\n    .. [2] Ambrose, Douglas, and Constantine Tsonopoulos. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 2. Normal Alkanes.\"\n       Journal of Chemical & Engineering Data 40, no. 3 (May 1, 1995): 531-46.\n       doi:10.1021/je00019a001.\n    .. [3] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 3. Aromatic\n       Hydrocarbons.\" Journal of Chemical & Engineering Data 40, no. 3\n       (May 1, 1995): 547-58. doi:10.1021/je00019a002.\n    .. [4] Gude, Michael, and Amyn S. Teja. \"Vapor-Liquid Critical Properties\n       of Elements and Compounds. 4. Aliphatic Alkanols.\" Journal of Chemical\n       & Engineering Data 40, no. 5 (September 1, 1995): 1025-36.\n       doi:10.1021/je00021a001.\n    .. [5] Daubert, Thomas E. \"Vapor-Liquid Critical Properties of Elements\n       and Compounds. 5. Branched Alkanes and Cycloalkanes.\" Journal of\n       Chemical & Engineering Data 41, no. 3 (January 1, 1996): 365-72.\n       doi:10.1021/je9501548.\n    .. [6] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 6. Unsaturated Aliphatic\n       Hydrocarbons.\" Journal of Chemical & Engineering Data 41, no. 4\n       (January 1, 1996): 645-56. doi:10.1021/je9501999.\n    .. [7] Kudchadker, Arvind P., Douglas Ambrose, and Constantine Tsonopoulos.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 7. Oxygen\n       Compounds Other Than Alkanols and Cycloalkanols.\" Journal of Chemical &\n       Engineering Data 46, no. 3 (May 1, 2001): 457-79. doi:10.1021/je0001680.\n    .. [8] Tsonopoulos, Constantine, and Douglas Ambrose. \"Vapor-Liquid\n       Critical Properties of Elements and Compounds. 8. Organic Sulfur,\n       Silicon, and Tin Compounds (C + H + S, Si, and Sn).\" Journal of Chemical\n       & Engineering Data 46, no. 3 (May 1, 2001): 480-85.\n       doi:10.1021/je000210r.\n    .. [9] Marsh, Kenneth N., Colin L. Young, David W. Morton, Douglas Ambrose,\n       and Constantine Tsonopoulos. \"Vapor-Liquid Critical Properties of\n       Elements and Compounds. 9. Organic Compounds Containing Nitrogen.\"\n       Journal of Chemical & Engineering Data 51, no. 2 (March 1, 2006):\n       305-14. doi:10.1021/je050221q.\n    .. [10] Marsh, Kenneth N., Alan Abramson, Douglas Ambrose, David W. Morton,\n       Eugene Nikitin, Constantine Tsonopoulos, and Colin L. Young.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 10. Organic\n       Compounds Containing Halogens.\" Journal of Chemical & Engineering Data\n       52, no. 5 (September 1, 2007): 1509-38. doi:10.1021/je700336g.\n    .. [11] Ambrose, Douglas, Constantine Tsonopoulos, and Eugene D. Nikitin.\n       \"Vapor-Liquid Critical Properties of Elements and Compounds. 11. Organic\n       Compounds Containing B + O; Halogens + N, + O, + O + S, + S, + Si;\n       N + O; and O + S, + Si.\" Journal of Chemical & Engineering Data 54,\n       no. 3 (March 12, 2009): 669-89. doi:10.1021/je800580z.\n    .. [12] Ambrose, Douglas, Constantine Tsonopoulos, Eugene D. Nikitin, David\n       W. Morton, and Kenneth N. Marsh. \"Vapor-Liquid Critical Properties of\n       Elements and Compounds. 12. Review of Recent Data for Hydrocarbons and\n       Non-Hydrocarbons.\" Journal of Chemical & Engineering Data, October 5,\n       2015, 151005081500002. doi:10.1021/acs.jced.5b00571.\n    .. [13] Mathews, Joseph F. \"Critical Constants of Inorganic Substances.\"\n       Chemical Reviews 72, no. 1 (February 1, 1972): 71-100.\n       doi:10.1021/cr60275a004.\n    .. [14] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of\n       Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.\n    .. [15] Horstmann, Sven, Anna Jab\u0142oniec, J\u00f6rg Krafczyk, Kai Fischer, and\n       J\u00fcrgen Gmehling. \"PSRK Group Contribution Equation of State:\n       Comprehensive Revision and Extension IV, Including Critical Constants\n       and \u0391-Function Parameters for 1000 Components.\" Fluid Phase Equilibria\n       227, no. 2 (January 25, 2005): 157-64. doi:10.1016/j.fluid.2004.11.002.\n    .. [16] Passut, Charles A., and Ronald P. Danner. \"Acentric Factor. A\n       Valuable Correlating Parameter for the Properties of Hydrocarbons.\"\n       Industrial & Engineering Chemistry Process Design and Development 12,\n       no. 3 (July 1, 1973): 365\u201368. doi:10.1021/i260047a026.\n    .. [17] Yaws, Carl L. Thermophysical Properties of Chemicals and\n       Hydrocarbons, Second Edition. Amsterdam Boston: Gulf Professional\n       Publishing, 2014.", "id": "f15798:m1"}
{"signature": "def Chueh_Prausnitz_Vc(zs, Vcs, nus):", "body": "if not none_and_length_check([zs, Vcs]): <EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>denominator = sum(zs[i]*Vcs[i]**(<NUM_LIT:2>/<NUM_LIT>) for i in range(len(zs)))<EOL>Vcm = <NUM_LIT:0><EOL>for i in range(len(zs)):<EOL><INDENT>Vcm += zs[i]*Vcs[i]**(<NUM_LIT:2>/<NUM_LIT>)*Vcs[i]/denominator<EOL>for j in range(len(zs)):<EOL><INDENT>Vcm += (zs[i]*Vcs[i]**(<NUM_LIT:2>/<NUM_LIT>)/denominator)*(zs[j]*Vcs[j]**(<NUM_LIT:2>/<NUM_LIT>)/denominator)*nus[i][j]/<NUM_LIT><EOL><DEDENT><DEDENT>return Vcm<EOL>", "docstring": "r'''Calculates critical volume of a mixture according to\n    mixing rules in [1]_ with an interaction parameter.\n\n    .. math::\n        V_{cm} = \\sum_i^n \\theta_i V_{ci} + \\sum_i^n\\sum_j^n(\\theta_i \\theta_j \\nu_{ij})V_{ref}\n        \\theta = \\frac{x_i V_{ci}^{2/3}}{\\sum_{j=1}^n x_j V_{cj}^{2/3}}\n\n    Parameters\n    ----------\n    zs : float\n        Mole fractions of all components\n    Vcs : float\n        Critical volumes of all components, [m^3/mol]\n    nus : matrix\n        Interaction parameters, [cm^3/mol]\n\n    Returns\n    -------\n    Vcm : float\n        Critical volume of the mixture, [m^3/mol]\n\n    Notes\n    -----\n    All parameters, even if zero, must be given to this function.\n    nu parameters are in cm^3/mol, but are converted to m^3/mol inside the function\n\n\n    Examples\n    --------\n    1-butanol/benzene 0.4271/0.5729 mixture, Vcm = 268.096 mL/mol.\n\n    >>> Chueh_Prausnitz_Vc([0.4271, 0.5729], [0.000273, 0.000256], [[0, 5.61847], [5.61847, 0]])\n    0.00026620503424517445\n\n    References\n    ----------\n    .. [1] Chueh, P. L., and J. M. Prausnitz. \"Vapor-Liquid Equilibria at High\n       Pressures: Calculation of Critical Temperatures, Volumes, and Pressures\n       of Nonpolar Mixtures.\" AIChE Journal 13, no. 6 (November 1, 1967):\n       1107-13. doi:10.1002/aic.690130613.\n    .. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.\n       \"Prediction of True Critical Volume of Multi-Component Mixtures:\n       Extending Fast Estimation Methods.\" Fluid Phase Equilibria 386\n       (January 25, 2015): 13-29. doi:10.1016/j.fluid.2014.11.008.", "id": "f15798:m16"}
{"signature": "def Chueh_Prausnitz_Tc(zs, Tcs, Vcs, taus):", "body": "if not none_and_length_check([zs, Tcs, Vcs]):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>denominator = sum(zs[i]*Vcs[i]**(<NUM_LIT:2>/<NUM_LIT>) for i in range(len(zs)))<EOL>Tcm = <NUM_LIT:0><EOL>for i in range(len(zs)):<EOL><INDENT>Tcm += zs[i]*Vcs[i]**(<NUM_LIT:2>/<NUM_LIT>)*Tcs[i]/denominator<EOL>for j in range(len(zs)):<EOL><INDENT>Tcm += (zs[i]*Vcs[i]**(<NUM_LIT:2>/<NUM_LIT>)/denominator)*(zs[j]*Vcs[j]**(<NUM_LIT:2>/<NUM_LIT>)/denominator)*taus[i][j]<EOL><DEDENT><DEDENT>return Tcm<EOL>", "docstring": "r'''Calculates critical temperature of a mixture according to\n    mixing rules in [1]_.\n\n    .. math::\n        T_{cm} = \\sum_i^n \\theta_i Tc_i + \\sum_i^n\\sum_j^n(\\theta_i \\theta_j\n        \\tau_{ij})T_{ref}\n\n        \\theta = \\frac{x_i V_{ci}^{2/3}}{\\sum_{j=1}^n x_j V_{cj}^{2/3}}\n\n    For a binary mxiture, this simplifies to:\n\n    .. math::\n        T_{cm} = \\theta_1T_{c1} + \\theta_2T_{c2}  + 2\\theta_1\\theta_2\\tau_{12}\n\n    Parameters\n    ----------\n    zs : array-like\n        Mole fractions of all components\n    Tcs : array-like\n        Critical temperatures of all components, [K]\n    Vcs : array-like\n        Critical volumes of all components, [m^3/mol]\n    taus : array-like of shape `zs` by `zs`\n        Interaction parameters\n\n    Returns\n    -------\n    Tcm : float\n        Critical temperatures of the mixture, [K]\n\n    Notes\n    -----\n    All parameters, even if zero, must be given to this function.\n\n    Examples\n    --------\n    butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.\n\n    >>> Chueh_Prausnitz_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6],\n    ... [0.000255, 0.000313, 0.000371], [[0, 1.92681, 6.80358],\n    ... [1.92681, 0, 1.89312], [ 6.80358, 1.89312, 0]])\n    450.1225764723492\n\n    References\n    ----------\n    .. [1] Chueh, P. L., and J. M. Prausnitz. \"Vapor-Liquid Equilibria at High\n       Pressures: Calculation of Critical Temperatures, Volumes, and Pressures\n       of Nonpolar Mixtures.\" AIChE Journal 13, no. 6 (November 1, 1967):\n       1107-13. doi:10.1002/aic.690130613.\n    .. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.\n       \"Prediction of True Critical Temperature of Multi-Component Mixtures:\n       Extending Fast Estimation Methods.\" Fluid Phase Equilibria 392\n       (April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.", "id": "f15798:m11"}
{"signature": "def Vc_mixture(Vcs=None, zs=None, CASRNs=None, AvailableMethods=False, Method=None):  ", "body": "def list_methods():<EOL><INDENT>methods = []<EOL>if none_and_length_check([Vcs]):<EOL><INDENT>methods.append('<STR_LIT>')<EOL><DEDENT>methods.append('<STR_LIT:None>')<EOL>return methods<EOL><DEDENT>if AvailableMethods:<EOL><INDENT>return list_methods()<EOL><DEDENT>if not Method:<EOL><INDENT>Method = list_methods()[<NUM_LIT:0>]<EOL><DEDENT>if Method == '<STR_LIT>':<EOL><INDENT>return mixing_simple(zs, Vcs)<EOL><DEDENT>elif Method == '<STR_LIT:None>':<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "This function handles the retrival of a mixture's critical temperature.\n\n    This API is considered experimental, and is expected to be removed in a\n    future release in favor of a more complete object-oriented interface.\n\n    >>> Vc_mixture([5.6E-5, 2E-4], [0.3, 0.7])\n    0.0001568", "id": "f15798:m18"}
{"signature": "def T_converter(T, current, desired):", "body": "def range_check(T, Tmin, Tmax):<EOL><INDENT>if T < Tmin or T > Tmax:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>try:<EOL><INDENT>if current == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>elif current == '<STR_LIT>':<EOL><INDENT>range_check(T, <NUM_LIT>, <NUM_LIT>)<EOL>T = T68_to_T90(T)<EOL><DEDENT>elif current == '<STR_LIT>':<EOL><INDENT>range_check(T, <NUM_LIT>, <NUM_LIT>)<EOL>T = T76_to_T90(T)<EOL><DEDENT>elif current == '<STR_LIT>':<EOL><INDENT>range_check(T, <NUM_LIT>, <NUM_LIT>)<EOL>T = T48_to_T90(T)<EOL><DEDENT>elif current == '<STR_LIT>':<EOL><INDENT>range_check(T, <NUM_LIT>, <NUM_LIT>)<EOL>T = T27_to_T90(T)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if desired == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>elif desired == '<STR_LIT>':<EOL><INDENT>range_check(T, <NUM_LIT>, <NUM_LIT>)<EOL>T = T90_to_T68(T)<EOL><DEDENT>elif desired == '<STR_LIT>':<EOL><INDENT>range_check(T, <NUM_LIT>, <NUM_LIT>)<EOL>T = T90_to_T76(T)<EOL><DEDENT>elif desired == '<STR_LIT>':<EOL><INDENT>range_check(T, <NUM_LIT>, <NUM_LIT>)<EOL>T = T90_to_T48(T)<EOL><DEDENT>elif desired == '<STR_LIT>':<EOL><INDENT>range_check(T, <NUM_LIT>, <NUM_LIT>)<EOL>T = T90_to_T27(T)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>return float(T)<EOL>", "docstring": "r'''Converts the a temperature reading made in any of the scales\n    'ITS-90', 'ITS-68','ITS-48', 'ITS-76', or 'ITS-27' to any of the other\n    scales. Not all temperature ranges can be converted to other ranges; for\n    instance, 'ITS-76' is purely for low temperatures, and 5 K on it has no\n    conversion to 'ITS-90' or any other scale. Both a conversion to ITS-90 and\n    to the desired scale must be possible for the conversion to occur.\n    The conversion uses cubic spline interpolation.\n\n    ITS-68 conversion is valid from 14 K to 4300 K.\n    ITS-48 conversion is valid from 93.15 K to 4273.15 K\n    ITS-76 conversion is valid from 5 K to 27 K.\n    ITS-27 is valid from 903.15 K to 4273.15 k.\n\n    Parameters\n    ----------\n    T : float\n        Temperature, on `current` scale [K]\n    current : str\n        String representing the scale T is in, 'ITS-90', 'ITS-68',\n        'ITS-48', 'ITS-76', or 'ITS-27'.\n    desired : str\n        String representing the scale T will be returned in, 'ITS-90',\n        'ITS-68', 'ITS-48', 'ITS-76', or 'ITS-27'.\n\n    Returns\n    -------\n    T : float\n        Temperature, on scale `desired` [K]\n\n    Notes\n    -----\n    Because the conversion is performed by spline functions, a re-conversion\n    of a value will not yield exactly the original value. However, it is quite\n    close.\n\n    The use of splines is quite quick (20 micro seconds/calculation). While\n    just a spline for one-way conversion could be used, a numerical solver\n    would have to be used to obtain an exact result for the reverse conversion.\n    This was found to take approximately 1 ms/calculation, depending on the\n    region.\n\n    Examples\n    --------\n    >>> T_converter(500, 'ITS-68', 'ITS-48')\n    499.9470092992346\n\n    References\n    ----------\n    .. [1] Wier, Ron D., and Robert N. Goldberg. \"On the Conversion of\n       Thermodynamic Properties to the Basis of the International Temperature\n       Scale of 1990.\" The Journal of Chemical Thermodynamics 28, no. 3\n       (March 1996): 261-76. doi:10.1006/jcht.1996.0026.\n    .. [2] Goldberg, Robert N., and R. D. Weir. \"Conversion of Temperatures\n       and Thermodynamic Properties to the Basis of the International\n       Temperature Scale of 1990 (Technical Report).\" Pure and Applied\n       Chemistry 64, no. 10 (1992): 1545-1562. doi:10.1351/pac199264101545.", "id": "f15799:m1"}
{"signature": "def ITS90_68_difference(T):", "body": "ais = [-<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, <NUM_LIT>,<EOL>-<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>,<EOL>-<NUM_LIT>]<EOL>bis = [<NUM_LIT:0>, -<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, -<NUM_LIT>,<EOL><NUM_LIT>, -<NUM_LIT>]<EOL><INDENT>cis = [-<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>,<EOL><NUM_LIT>]<EOL><DEDENT>new_cs = [<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>,<EOL><NUM_LIT>, -<NUM_LIT>]<EOL>dT = <NUM_LIT:0><EOL>if T < <NUM_LIT>:<EOL><INDENT>dT = <NUM_LIT:0><EOL><DEDENT>elif T >= <NUM_LIT> and T <= <NUM_LIT>:<EOL><INDENT>for i in range(<NUM_LIT>):<EOL><INDENT>dT += ais[i]*((T - <NUM_LIT>)/<NUM_LIT>)**i<EOL><DEDENT><DEDENT>elif T > <NUM_LIT> and T < <NUM_LIT>:<EOL><INDENT>dT = <NUM_LIT:0><EOL><DEDENT>elif T >= <NUM_LIT> and T <= <NUM_LIT>:<EOL><INDENT>for i in range(<NUM_LIT:9>):<EOL><INDENT>dT += bis[i]*((T - <NUM_LIT>)/<NUM_LIT>)**i<EOL><DEDENT><DEDENT>elif T > <NUM_LIT> and T <= <NUM_LIT>:<EOL><INDENT>for i in range(<NUM_LIT:6>):<EOL><INDENT>dT += new_cs[i]*(T-<NUM_LIT>)**i<EOL><DEDENT><DEDENT>elif T > <NUM_LIT>:<EOL><INDENT>dT = -<NUM_LIT>*T**<NUM_LIT:2><EOL><DEDENT>return dT<EOL>", "docstring": "r'''Calculates the difference between ITS-90 and ITS-68 scales using a\n    series of models listed in [1]_, [2]_, and [3]_.\n\n    The temperature difference is given by the following equations:\n\n    From 13.8 K to 73.15 K:\n\n    .. math::\n        T_{90} - T_{68} = a_0 + \\sum_{i=1}^{12} a_i[(T_{90}/K-40)/40]^i\n\n    From 83.8 K to 903.75 K:\n\n    .. math::\n        T_{90} - T_{68} = \\sum_{i=1}^8 b_i[(T_{90}/K - 273.15)/630]^i\n\n    From 903.75 K to 1337.33 K:\n\n    .. math::\n        T_{90} - T_{68} = \\sum_{i=0}^5 c_i[T_{90}/^\\circ C]^i\n\n    Above 1337.33 K:\n\n    .. math::\n        T_{90} - T_{68} = -1.398\\cdot 10^{-7}\\left(\\frac{T_{90}}{K}\\right)^2\n\n\n    Parameters\n    ----------\n    T : float\n        Temperature, ITS-90, or approximately ITS-68 [K]\n\n    Returns\n    -------\n    dT : float\n        Temperature, difference between ITS-90 and ITS-68 at T [K]\n\n    Notes\n    -----\n    The conversion is straightforward when T90 is known. Theoretically, the\n    model should be solved numerically to convert the reverse way. However,\n    according to [4]_, the difference is under 0.05 mK from 73.15 K to\n    903.15 K, and under 0.26 mK up to 1337.33 K.\n\n    For temperatures under 13.8 K, no conversion is performed.\n\n    The first set of coefficients are:\n    -0.005903, 0.008174, -0.061924, -0.193388, 1.490793, 1.252347, -9.835868,\n    1.411912, 25.277595, -19.183815, -18.437089, 27.000895, -8.716324.\n\n    The second set of coefficients are:\n    0, -0.148759, -0.267408, 1.08076, 1.269056, -4.089591, -1.871251,\n    7.438081, -3.536296.\n\n    The third set of coefficients are:\n    7.8687209E1, -4.7135991E-1, 1.0954715E-3, -1.2357884E-6, 6.7736583E-10,\n    -1.4458081E-13.\n    These last coefficients use the temperature in degrees Celcius. A slightly\n    older model used the following coefficients but a different equation over\n    the same range:\n    -0.00317, -0.97737, 1.2559, 2.03295, -5.91887, -3.23561, 7.23364,\n    5.04151. The model for these coefficients was:\n\n    .. math::\n        T_{90} - T_{68} = c_0 + \\sum_{i=1}^7 c_i[(T_{90}/K - 1173.15)/300]^i\n\n    For temperatures larger than several thousand K, the differences have no\n    meaning and grows quadratically.\n\n    Examples\n    --------\n    >>> ITS90_68_difference(1000.)\n    0.01231818956580355\n\n    References\n    ----------\n    .. [1] Bedford, R. E., G. Bonnier, H. Maas, and F. Pavese. \"Techniques for\n       Approximating the International Temperature Scale of 1990.\" Bureau\n       International Des Poids et Mesures, Sfievres, 1990.\n    .. [2] Wier, Ron D., and Robert N. Goldberg. \"On the Conversion of\n       Thermodynamic Properties to the Basis of the International Temperature\n       Scale of 1990.\" The Journal of Chemical Thermodynamics 28, no. 3\n       (March 1996): 261-76. doi:10.1006/jcht.1996.0026.\n    .. [3] Goldberg, Robert N., and R. D. Weir. \"Conversion of Temperatures\n       and Thermodynamic Properties to the Basis of the International\n       Temperature Scale of 1990 (Technical Report).\" Pure and Applied\n       Chemistry 64, no. 10 (1992): 1545-1562. doi:10.1351/pac199264101545.\n    .. [4] Code10.info. \"Conversions among International Temperature Scales.\"\n       Accessed May 22, 2016. http://www.code10.info/index.php%3Foption%3Dcom_content%26view%3Darticle%26id%3D83:conversions-among-international-temperature-scales%26catid%3D60:temperature%26Itemid%3D83.", "id": "f15799:m0"}
{"signature": "def CoolProp_T_dependent_property(T, CASRN, prop, phase):", "body": "if not has_CoolProp:  <EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if CASRN not in coolprop_dict:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>Tc = coolprop_fluids[CASRN].Tc<EOL>T = float(T) <EOL>if phase == '<STR_LIT:l>':<EOL><INDENT>if T > Tc:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if PhaseSI('<STR_LIT:T>', T, '<STR_LIT:P>', <NUM_LIT>, CASRN) in [u'<STR_LIT>', u'<STR_LIT>']:<EOL><INDENT>return PropsSI(prop, '<STR_LIT:T>', T, '<STR_LIT:P>', <NUM_LIT>, CASRN)<EOL><DEDENT>else:<EOL><INDENT>return PropsSI(prop, '<STR_LIT:T>', T, '<STR_LIT>', <NUM_LIT:0>, CASRN)<EOL><DEDENT><DEDENT>elif phase == '<STR_LIT:g>':<EOL><INDENT>if PhaseSI('<STR_LIT:T>', T, '<STR_LIT:P>', <NUM_LIT>, CASRN) == '<STR_LIT>':<EOL><INDENT>return PropsSI(prop, '<STR_LIT:T>', T, '<STR_LIT:P>', <NUM_LIT>, CASRN)<EOL><DEDENT>else:<EOL><INDENT>if T < Tc:<EOL><INDENT>return PropsSI(prop, '<STR_LIT:T>', T, '<STR_LIT>', <NUM_LIT:1>, CASRN)<EOL><DEDENT>else:<EOL><INDENT>return PropsSI(prop, '<STR_LIT:T>', T, '<STR_LIT:P>', <NUM_LIT>, CASRN)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Calculates a property of a chemical in either the liquid or gas phase\n    as a function of temperature only. This means that the property is\n    either at 1 atm or along the saturation curve.\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [K]\n    CASRN : str\n        CAS number of the fluid\n    prop : str\n        CoolProp string shortcut for desired property\n    phase : str\n        Either 'l' or 'g' for liquid or gas properties respectively\n\n    Returns\n    -------\n    prop : float\n        Desired chemical property, [units]\n\n    Notes\n    -----\n    For liquids above their boiling point, the liquid property is found on the\n    saturation line (at higher pressures). Under their boiling point, the\n    property is calculated at 1 atm.\n\n    No liquid calculations are permitted above the critical temperature.\n\n    For gases under the chemical's boiling point, the gas property is found\n    on the saturation line (at sub-atmospheric pressures). Above the boiling\n    point, the property is calculated at 1 atm.\n\n    An exception is raised if the desired CAS is not supported, or if CoolProp\n    is not available.\n\n    The list of strings acceptable as an input for property types is:\n    http://www.coolprop.org/coolprop/HighLevelAPI.html#table-of-string-inputs-to-propssi-function\n\n    Examples\n    --------\n    Water at STP according to IAPWS-95\n\n    >>> CoolProp_T_dependent_property(298.15, '7732-18-5', 'D', 'l')\n    997.047636760347\n\n    References\n    ----------\n    .. [1] Bell, Ian H., Jorrit Wronski, Sylvain Quoilin, and Vincent Lemort.\n       \"Pure and Pseudo-Pure Fluid Thermophysical Property Evaluation and the\n       Open-Source Thermophysical Property Library CoolProp.\" Industrial &\n       Engineering Chemistry Research 53, no. 6 (February 12, 2014):\n       2498-2508. doi:10.1021/ie4033999. http://www.coolprop.org/", "id": "f15801:m0"}
{"signature": "def mass_fractions(atoms, MW=None):", "body": "if not MW:<EOL><INDENT>MW = molecular_weight(atoms)<EOL><DEDENT>mfracs = {}<EOL>for i in atoms:<EOL><INDENT>if i in periodic_table:<EOL><INDENT>mfracs[i] = periodic_table[i].MW*atoms[i]/MW<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>return mfracs<EOL>", "docstring": "r'''Calculates the mass fractions of each element in a compound,\n    given a dictionary of its atoms and their counts, in the format\n    {symbol: count}.\n\n    .. math::\n        w_i =  \\frac{n_i MW_i}{\\sum_i n_i MW_i}\n\n    Parameters\n    ----------\n    atoms : dict\n        dictionary of counts of individual atoms, indexed by symbol with\n        proper capitalization, [-]\n    MW : float, optional\n        Molecular weight, [g/mol]\n\n    Returns\n    -------\n    mfracs : dict\n        dictionary of mass fractions of individual atoms, indexed by symbol\n        with proper capitalization, [-]\n\n    Notes\n    -----\n    Molecular weight is optional, but speeds up the calculation slightly. It\n    is calculated using the function `molecular_weight` if not specified.\n\n    Elemental data is from rdkit, with CAS numbers added. An exception is\n    raised if an incorrect element symbol is given. Elements up to 118 are\n    supported.\n\n    Examples\n    --------\n    >>> mass_fractions({'H': 12, 'C': 20, 'O': 5})\n    {'H': 0.03639798802478244, 'C': 0.7228692758981262, 'O': 0.24073273607709128}\n\n    References\n    ----------\n    .. [1] RDKit: Open-source cheminformatics; http://www.rdkit.org", "id": "f15802:m1"}
{"signature": "def charge_from_formula(formula):", "body": "negative = '<STR_LIT:->' in formula<EOL>positive = '<STR_LIT:+>' in formula<EOL>if positive and negative:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif not (positive or negative):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>multiplier, sign = (-<NUM_LIT:1>, '<STR_LIT:->') if negative else (<NUM_LIT:1>, '<STR_LIT:+>')<EOL>hit = False<EOL>if '<STR_LIT:(>' in formula:<EOL><INDENT>hit = bracketed_charge_re.findall(formula)<EOL>if hit:<EOL><INDENT>formula = hit[-<NUM_LIT:1>].replace('<STR_LIT:(>', '<STR_LIT>').replace('<STR_LIT:)>', '<STR_LIT>')<EOL><DEDENT><DEDENT>count = formula.count(sign)<EOL>if count == <NUM_LIT:1>:<EOL><INDENT>splits = formula.split(sign)<EOL>if splits[<NUM_LIT:1>] == '<STR_LIT>' or splits[<NUM_LIT:1>] == '<STR_LIT:)>':<EOL><INDENT>return multiplier<EOL><DEDENT>else:<EOL><INDENT>return multiplier*int(splits[<NUM_LIT:1>])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return multiplier*count<EOL><DEDENT>", "docstring": "r'''Basic formula parser to determine the charge from a formula - given\n    that the charge is already specified as one element of the formula.\n\n    Performs no sanity checking that elements are actually elements.\n\n    Parameters\n    ----------\n    formula : str\n        Formula string, very simply formats only, ending in one of '+x',\n        '-x', n*'+', or n*'-' or any of them surrounded by brackets but always\n        at the end of a formula.\n\n    Returns\n    -------\n    charge : int\n        Charge of the molecule, [faraday]\n\n    Notes\n    -----\n\n    Examples\n    --------\n    >>> charge_from_formula('Br3-')\n    -1\n    >>> charge_from_formula('Br3(-)')\n    -1", "id": "f15802:m7"}
{"signature": "def molecular_weight(atoms):", "body": "MW = <NUM_LIT:0><EOL>for i in atoms:<EOL><INDENT>if i in periodic_table:<EOL><INDENT>MW += periodic_table[i].MW*atoms[i]<EOL><DEDENT>elif i == '<STR_LIT:D>':<EOL><INDENT>MW += <NUM_LIT>*atoms[i]<EOL><DEDENT>elif i == '<STR_LIT:T>':<EOL><INDENT>MW += <NUM_LIT>*atoms[i]<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>return MW<EOL>", "docstring": "r'''Calculates molecular weight of a molecule given a dictionary of its\n    atoms and their counts, in the format {symbol: count}.\n\n    .. math::\n        MW = \\sum_i n_i MW_i\n\n    Parameters\n    ----------\n    atoms : dict\n        dictionary of counts of individual atoms, indexed by symbol with\n        proper capitalization, [-]\n\n    Returns\n    -------\n    MW : float\n        Calculated molecular weight [g/mol]\n\n    Notes\n    -----\n    Elemental data is from rdkit, with CAS numbers added. An exception is\n    raised if an incorrect element symbol is given. Elements up to 118 are\n    supported, as are deutreium and tritium.\n\n    Examples\n    --------\n    >>> molecular_weight({'H': 12, 'C': 20, 'O': 5}) # DNA\n    332.30628\n\n    References\n    ----------\n    .. [1] RDKit: Open-source cheminformatics; http://www.rdkit.org", "id": "f15802:m0"}
{"signature": "def nested_formula_parser(formula, check=True):", "body": "formula = formula.replace('<STR_LIT:[>', '<STR_LIT>').replace('<STR_LIT:]>', '<STR_LIT>')<EOL>charge_splits = bracketed_charge_re.split(formula)<EOL>if len(charge_splits) > <NUM_LIT:1>:<EOL><INDENT>formula = charge_splits[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>formula = formula.split('<STR_LIT:+>')[<NUM_LIT:0>].split('<STR_LIT:->')[<NUM_LIT:0>]<EOL><DEDENT>stack = [[]]<EOL>last = stack[<NUM_LIT:0>]<EOL>tokens = formula_token_matcher_rational.findall(formula)<EOL>if check:<EOL><INDENT>token_letters = set([j for i in tokens for j in i if j in letter_set])<EOL>formula_letters = set(i for i in formula if i in letter_set)<EOL>if formula_letters != token_letters:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>for token in tokens:<EOL><INDENT>if token == \"<STR_LIT:(>\":<EOL><INDENT>stack.append([])<EOL>last = stack[-<NUM_LIT:1>]<EOL><DEDENT>elif token == \"<STR_LIT:)>\":<EOL><INDENT>temp_dict = {}<EOL>for d in last:<EOL><INDENT>for ele, count in d.items():<EOL><INDENT>if ele in temp_dict:<EOL><INDENT>temp_dict[ele] = temp_dict[ele] + count<EOL><DEDENT>else:<EOL><INDENT>temp_dict[ele] = count<EOL><DEDENT><DEDENT><DEDENT>stack.pop()<EOL>last = stack[-<NUM_LIT:1>]<EOL>last.append(temp_dict)<EOL><DEDENT>elif token.isalpha():<EOL><INDENT>last.append({token: <NUM_LIT:1>})<EOL><DEDENT>else:<EOL><INDENT>v = float(token)<EOL>v_int = int(v)<EOL>if v_int == v:<EOL><INDENT>v = v_int<EOL><DEDENT>last[-<NUM_LIT:1>] = {ele: count*v for ele, count in last[-<NUM_LIT:1>].items()}<EOL><DEDENT><DEDENT>ans = {}<EOL>for d in last:<EOL><INDENT>for ele, count in d.items():<EOL><INDENT>if ele in ans:<EOL><INDENT>ans[ele] = ans[ele] + count<EOL><DEDENT>else:<EOL><INDENT>ans[ele] = count<EOL><DEDENT><DEDENT><DEDENT>return ans<EOL>", "docstring": "r'''Improved formula parser which handles braces and their multipliers, \n    as well as rational element counts.\n\n    Strips charges from the end of a formula first. Accepts repeated chemical\n    units. Performs no sanity checking that elements are actually elements.\n    As it uses regular expressions for matching, errors are mostly just ignored.\n\n    Parameters\n    ----------\n    formula : str\n        Formula string, very simply formats only.\n    check : bool\n        If `check` is True, a simple check will be performed to determine if\n        a formula is not a formula and an exception will be raised if it is\n        not, [-]\n\n    Returns\n    -------\n    atoms : dict\n        dictionary of counts of individual atoms, indexed by symbol with\n        proper capitalization, [-]\n\n    Notes\n    -----\n    Inspired by the approach taken by CrazyMerlyn on a reddit DailyProgrammer\n    challenge, at https://www.reddit.com/r/dailyprogrammer/comments/6eerfk/20170531_challenge_317_intermediate_counting/\n\n    Examples\n    --------\n    >>> pprint(nested_formula_parser('Pd(NH3)4.0001+2'))\n    {'H': 12.0003, 'N': 4.0001, 'Pd': 1}", "id": "f15802:m6"}
{"signature": "def serialize_formula(formula):", "body": "charge = charge_from_formula(formula)<EOL>element_dict = nested_formula_parser(formula)<EOL>base = atoms_to_Hill(element_dict)<EOL>if charge  == <NUM_LIT:0>:<EOL><INDENT>pass<EOL><DEDENT>elif charge > <NUM_LIT:0>:<EOL><INDENT>if charge == <NUM_LIT:1>:<EOL><INDENT>base += '<STR_LIT:+>'<EOL><DEDENT>else:<EOL><INDENT>base += '<STR_LIT:+>' + str(charge)<EOL><DEDENT><DEDENT>elif charge < <NUM_LIT:0>:<EOL><INDENT>if charge == -<NUM_LIT:1>:<EOL><INDENT>base += '<STR_LIT:->'<EOL><DEDENT>else:<EOL><INDENT>base +=  str(charge)<EOL><DEDENT><DEDENT>return base<EOL>", "docstring": "r'''Basic formula serializer to construct a consistently-formatted formula.\n    This is necessary for handling user-supplied formulas, which are not always\n    well formatted.\n\n    Performs no sanity checking that elements are actually elements.\n\n    Parameters\n    ----------\n    formula : str\n        Formula string as parseable by the method nested_formula_parser, [-]\n\n    Returns\n    -------\n    formula : str\n        A consistently formatted formula to describe a molecular formula, [-]\n\n    Notes\n    -----\n\n    Examples\n    --------\n    >>> serialize_formula('Pd(NH3)4+3')\n    'H12N4Pd+3'", "id": "f15802:m8"}
{"signature": "def setup_a_alpha_and_derivatives(self, i, T=None):", "body": "self.a, self.m, self.Tc = self.ais[i], self.ms[i], self.Tcs[i]<EOL>", "docstring": "r'''Sets `a`, `m`, and `Tc` for a specific component before the \n        pure-species EOS's `a_alpha_and_derivatives` method is called. Both are \n        called by `GCEOSMIX.a_alpha_and_derivatives` for every component.", "id": "f15803:c2:m1"}
{"signature": "def fugacity_coefficients(self, Z, zs):", "body": "phis = []<EOL>V = Z*R*self.T/self.P<EOL>for i in self.cmps:<EOL><INDENT>phi = self.bs[i]/(V-self.b) - log(Z*(<NUM_LIT:1.> - self.b/V)) - <NUM_LIT>*(self.a_alpha*self.ais[i])**<NUM_LIT:0.5>/(R*self.T*V)<EOL>phis.append(exp(phi))<EOL><DEDENT>return phis<EOL>", "docstring": "r'''Literature formula for calculating fugacity coefficients for each\n        species in a mixture. Verified numerically.\n        Called by `fugacities` on initialization, or by a solver routine \n        which is performing a flash calculation.\n\n        .. math::\n            \\ln \\hat \\phi_i = \\frac{b_i}{V-b} - \\ln\\left[Z\\left(1\n            - \\frac{b}{V}\\right)\\right] - \\frac{2\\sqrt{aa_i}}{RTV}\n\n        Parameters\n        ----------\n        Z : float\n            Compressibility of the mixture for a desired phase, [-]\n        zs : list[float], optional\n            List of mole factions, either overall or in a specific phase, [-]\n\n        Returns\n        -------\n        phis : float\n            Fugacity coefficient for each species, [-]\n\n        References\n        ----------\n        .. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering. \n           Butterworth-Heinemann, 1985.", "id": "f15803:c4:m3"}
{"signature": "def cleanup_a_alpha_and_derivatives(self):", "body": "del(self.a, self.kappa, self.Tc)<EOL>", "docstring": "r'''Removes properties set by `setup_a_alpha_and_derivatives`; run by\n        `GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for \n        every component", "id": "f15803:c1:m2"}
{"signature": "def setup_a_alpha_and_derivatives(self, i, T=None):", "body": "self.a = self.ais[i]<EOL>", "docstring": "r'''Sets `a` for a specific component before the \n        pure-species EOS's `a_alpha_and_derivatives` method is called. Both are \n        called by `GCEOSMIX.a_alpha_and_derivatives` for every component.", "id": "f15803:c4:m1"}
{"signature": "def cleanup_a_alpha_and_derivatives(self):", "body": "del(self.a, self.Tc, self.S1, self.S2)<EOL>", "docstring": "r'''Removes properties set by `setup_a_alpha_and_derivatives`; run by\n        `GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for \n        every component", "id": "f15803:c9:m2"}
{"signature": "def fugacity_coefficients(self, Z, zs):", "body": "from cmath import log<EOL>A = self.a_alpha*self.P/(R2*self.T*self.T)<EOL>B = self.b*self.P/(R*self.T)<EOL>phis = []<EOL>for i in self.cmps:<EOL><INDENT>t1 = self.bs[i]/self.b*(Z - <NUM_LIT:1.>) - log(Z - B).real<EOL>t2 = <NUM_LIT>/self.a_alpha*sum([zs[j]*self.a_alpha_ijs[i][j] for j in self.cmps])<EOL>t3 = t1 - A/(two_root_two*B)*(t2 - self.bs[i]/self.b)*log((Z + (root_two + <NUM_LIT:1.>)*B)/(Z - (root_two - <NUM_LIT:1.>)*B)).real<EOL>phis.append(exp(t3))<EOL><DEDENT>return phis<EOL>", "docstring": "r'''Literature formula for calculating fugacity coefficients for each\n        species in a mixture. Verified numerically. Applicable to most \n        derivatives of the Peng-Robinson equation of state as well.\n        Called by `fugacities` on initialization, or by a solver routine \n        which is performing a flash calculation.\n\n        .. math::\n            \\ln \\hat \\phi_i = \\frac{B_i}{B}(Z-1)-\\ln(Z-B) + \\frac{A}{2\\sqrt{2}B}\n            \\left[\\frac{B_i}{B} - \\frac{2}{a\\alpha}\\sum_i y_i(a\\alpha)_{ij}\\right]\n            \\log\\left[\\frac{Z + (1+\\sqrt{2})B}{Z-(\\sqrt{2}-1)B}\\right]\n\n            A = \\frac{(a\\alpha)P}{R^2 T^2}\n\n            B = \\frac{b P}{RT}\n\n        Parameters\n        ----------\n        Z : float\n            Compressibility of the mixture for a desired phase, [-]\n        zs : list[float], optional\n            List of mole factions, either overall or in a specific phase, [-]\n\n        Returns\n        -------\n        phis : float\n            Fugacity coefficient for each species, [-]\n\n        References\n        ----------\n        .. [1] Peng, Ding-Yu, and Donald B. Robinson. \"A New Two-Constant  \n           Equation of State.\" Industrial & Engineering Chemistry Fundamentals \n           15, no. 1 (February 1, 1976): 59-64. doi:10.1021/i160057a011.\n        .. [2] Walas, Stanley M. Phase Equilibria in Chemical Engineering. \n           Butterworth-Heinemann, 1985.", "id": "f15803:c1:m3"}
{"signature": "def fugacities(self, xs=None, ys=None):", "body": "if self.phase in ['<STR_LIT:l>', '<STR_LIT>']:<EOL><INDENT>if xs is None:<EOL><INDENT>xs = self.zs<EOL><DEDENT>self.phis_l = self.fugacity_coefficients(self.Z_l, zs=xs)<EOL>self.fugacities_l = [phi*x*self.P for phi, x in zip(self.phis_l, xs)]<EOL>self.lnphis_l = [log(i) for i in self.phis_l]<EOL><DEDENT>if self.phase in ['<STR_LIT:g>', '<STR_LIT>']:<EOL><INDENT>if ys is None:<EOL><INDENT>ys = self.zs<EOL><DEDENT>self.phis_g = self.fugacity_coefficients(self.Z_g, zs=ys)<EOL>self.fugacities_g = [phi*y*self.P for phi, y in zip(self.phis_g, ys)]<EOL>self.lnphis_g = [log(i) for i in self.phis_g]<EOL><DEDENT>", "docstring": "r'''Helper method for calculating fugacity coefficients for any \n        phases present, using either the overall mole fractions for both phases\n        or using specified mole fractions for each phase.\n\n        Requires `fugacity_coefficients` to be implemented by each subclassing\n        EOS.\n\n        In addition to setting `fugacities_l` and/or `fugacities_g`, this also\n        sets the fugacity coefficients `phis_l` and/or `phis_g`.\n\n        .. math::\n            \\hat \\phi_i^g = \\frac{\\hat f_i^g}{x_i P}\n\n            \\hat \\phi_i^l = \\frac{\\hat f_i^l}{x_i P}\n\n        Parameters\n        ----------\n        xs : list[float], optional\n            Liquid-phase mole fractions of each species, [-]\n        ys : list[float], optional\n            Vapor-phase mole fractions of each species, [-]\n\n        Notes\n        -----\n        It is helpful to check that `fugacity_coefficients` has been\n        implemented correctly using the following expression, from [1]_.\n\n        .. math::\n            \\ln \\hat \\phi_i = \\left[\\frac{\\partial (n\\log \\phi)}{\\partial \n            n_i}\\right]_{T,P,n_j,V_t}\n\n        For reference, several expressions for fugacity of a component are as\n        follows, shown in [1]_ and [2]_.\n\n        .. math::\n             \\ln \\hat \\phi_i = \\int_{0}^P\\left(\\frac{\\hat V_i}\n             {RT} - \\frac{1}{P}\\right)dP\n\n             \\ln \\hat \\phi_i = \\int_V^\\infty \\left[\n             \\frac{1}{RT}\\frac{\\partial P}{ \\partial n_i}\n             - \\frac{1}{V}\\right] d V - \\ln Z\n\n        References\n        ----------\n        .. [1] Hu, Jiawen, Rong Wang, and Shide Mao. \"Some Useful Expressions \n           for Deriving Component Fugacity Coefficients from Mixture Fugacity \n           Coefficient.\" Fluid Phase Equilibria 268, no. 1-2 (June 25, 2008): \n           7-13. doi:10.1016/j.fluid.2008.03.007.\n        .. [2] Walas, Stanley M. Phase Equilibria in Chemical Engineering. \n           Butterworth-Heinemann, 1985.", "id": "f15803:c0:m1"}
{"signature": "def cleanup_a_alpha_and_derivatives(self):", "body": "del(self.a, self.kappa, self.kappa0, self.kappa1, self.kappa2, self.kappa3, self.Tc)<EOL>", "docstring": "r'''Removes properties set by `setup_a_alpha_and_derivatives`; run by\n        `GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for \n        every component", "id": "f15803:c6:m2"}
{"signature": "def cleanup_a_alpha_and_derivatives(self):", "body": "del(self.a, self.kappa, self.kappa0, self.kappa1, self.Tc)<EOL>", "docstring": "r'''Removes properties set by `setup_a_alpha_and_derivatives`; run by\n        `GCEOSMIX.a_alpha_and_derivatives` after `a_alpha` is calculated for \n        every component", "id": "f15803:c5:m2"}
{"signature": "def load_all_methods(self):", "body": "methods = [SIMPLE]        <EOL>if none_and_length_check((self.MWs, self.molecular_diameters, self.Stockmayers)):<EOL><INDENT>methods.append(BROKAW)<EOL><DEDENT>if none_and_length_check([self.MWs]):<EOL><INDENT>methods.extend([WILKE, HERNING_ZIPPERER])<EOL><DEDENT>self.all_methods = set(methods)<EOL>Tmins = [i.Tmin for i in self.ViscosityGases if i.Tmin]<EOL>Tmaxs = [i.Tmax for i in self.ViscosityGases if i.Tmax]<EOL>if Tmins:<EOL><INDENT>self.Tmin = max(Tmins)<EOL><DEDENT>if Tmaxs:<EOL><INDENT>self.Tmax = max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method to initialize the object by precomputing any values which\n        may be used repeatedly and by retrieving mixture-specific variables.\n        All data are stored as attributes. This method also sets :obj:`Tmin`, \n        :obj:`Tmax`, and :obj:`all_methods` as a set of methods which should \n        work to calculate the property.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15804:c3:m1"}
{"signature": "def calculate(self, T, method):", "body": "if method == DUTT_PRASAD:<EOL><INDENT>A, B, C = self.DUTT_PRASAD_coeffs<EOL>mu = ViswanathNatarajan3(T, A, B, C, )<EOL><DEDENT>elif method == VISWANATH_NATARAJAN_3:<EOL><INDENT>A, B, C = self.VISWANATH_NATARAJAN_3_coeffs<EOL>mu = ViswanathNatarajan3(T, A, B, C)<EOL><DEDENT>elif method == VISWANATH_NATARAJAN_2:<EOL><INDENT>A, B = self.VISWANATH_NATARAJAN_2_coeffs<EOL>mu = ViswanathNatarajan2(T, self.VISWANATH_NATARAJAN_2_coeffs[<NUM_LIT:0>], self.VISWANATH_NATARAJAN_2_coeffs[<NUM_LIT:1>])<EOL><DEDENT>elif method == VISWANATH_NATARAJAN_2E:<EOL><INDENT>C, D = self.VISWANATH_NATARAJAN_2E_coeffs<EOL>mu = ViswanathNatarajan2Exponential(T, C, D)<EOL><DEDENT>elif method == DIPPR_PERRY_8E:<EOL><INDENT>mu = EQ101(T, *self.Perrys2_313_coeffs)<EOL><DEDENT>elif method == COOLPROP:<EOL><INDENT>mu = CoolProp_T_dependent_property(T, self.CASRN, '<STR_LIT>', '<STR_LIT:l>')<EOL><DEDENT>elif method == LETSOU_STIEL:<EOL><INDENT>mu = Letsou_Stiel(T, self.MW, self.Tc, self.Pc, self.omega)<EOL><DEDENT>elif method == PRZEDZIECKI_SRIDHAR:<EOL><INDENT>Vml = self.Vml(T) if hasattr(self.Vml, '<STR_LIT>') else self.Vml<EOL>mu = Przedziecki_Sridhar(T, self.Tm, self.Tc, self.Pc, self.Vc, Vml, self.omega, self.MW)<EOL><DEDENT>elif method == VDI_PPDS:<EOL><INDENT>A, B, C, D, E = self.VDI_PPDS_coeffs<EOL>term = (C - T)/(T-D)<EOL>if term < <NUM_LIT:0>:<EOL><INDENT>term1 = -((T - C)/(T-D))**(<NUM_LIT:1>/<NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>term1 = term**(<NUM_LIT:1>/<NUM_LIT>)<EOL><DEDENT>term2 = term*term1<EOL>mu = E*exp(A*term1 + B*term2)<EOL><DEDENT>elif method in self.tabular_data:<EOL><INDENT>mu = self.interpolate(T, method)<EOL><DEDENT>return mu<EOL>", "docstring": "r'''Method to calculate low-pressure liquid viscosity at tempearture\n        `T` with a given method.\n\n        This method has no exception handling; see `T_dependent_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate viscosity, [K]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        mu : float\n            Viscosity of the liquid at T and a low pressure, [Pa*S]", "id": "f15804:c0:m2"}
{"signature": "def lucas_gas(T, Tc, Pc, Zc, MW, dipole=<NUM_LIT:0>, CASRN=None):", "body": "Tr = T/Tc<EOL>xi = <NUM_LIT>*(Tc/MW**<NUM_LIT:3>/(Pc/<NUM_LIT>)**<NUM_LIT:4>)**(<NUM_LIT:1>/<NUM_LIT>)  <EOL>if dipole is None:<EOL><INDENT>dipole = <NUM_LIT:0><EOL><DEDENT>dipoler = <NUM_LIT>*dipole**<NUM_LIT:2>*(Pc/<NUM_LIT>)/Tc**<NUM_LIT:2>  <EOL>if dipoler < <NUM_LIT>:<EOL><INDENT>Fp = <NUM_LIT:1><EOL><DEDENT>elif <NUM_LIT> <= dipoler < <NUM_LIT>:<EOL><INDENT>Fp = <NUM_LIT:1> + <NUM_LIT>*(<NUM_LIT> - Zc)**<NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>Fp = <NUM_LIT:1> + <NUM_LIT>*(<NUM_LIT> - Zc)**<NUM_LIT>*abs(<NUM_LIT> + <NUM_LIT:0.1>*(Tr-<NUM_LIT>))<EOL><DEDENT>if CASRN and CASRN in _lucas_Q_dict:<EOL><INDENT>Q = _lucas_Q_dict[CASRN]<EOL>if Tr - <NUM_LIT:12> > <NUM_LIT:0>:<EOL><INDENT>value = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>value = -<NUM_LIT:1><EOL><DEDENT>FQ = <NUM_LIT>*Q**<NUM_LIT>*(<NUM_LIT:1> + <NUM_LIT>*((Tr-<NUM_LIT:12>)**<NUM_LIT:2>)**(<NUM_LIT:1.>/MW)*value)<EOL><DEDENT>else:<EOL><INDENT>FQ = <NUM_LIT:1><EOL><DEDENT>eta = (<NUM_LIT>*Tr**<NUM_LIT> - <NUM_LIT>*exp(-<NUM_LIT>*Tr) + <NUM_LIT>*exp(-<NUM_LIT>*Tr) + <NUM_LIT>)*Fp*FQ/xi<EOL>return eta/<NUM_LIT><EOL>", "docstring": "r'''Estimate the viscosity of a gas using an emperical\n    formula developed in several sources, but as discussed in [1]_ as the\n    original sources are in German or merely personal communications with the\n    authors of [1]_.\n\n    .. math::\n        \\eta  = \\left[0.807T_r^{0.618}-0.357\\exp(-0.449T_r) + 0.340\\exp(-4.058\n        T_r) + 0.018\\right]F_p^\\circ F_Q^\\circ /\\xi\n\n        F_p^\\circ=1, 0 \\le \\mu_{r} < 0.022\n\n        F_p^\\circ = 1+30.55(0.292-Z_c)^{1.72}, 0.022 \\le \\mu_{r} < 0.075\n\n        F_p^\\circ = 1+30.55(0.292-Z_c)^{1.72}|0.96+0.1(T_r-0.7)| 0.075 < \\mu_{r}\n\n        F_Q^\\circ = 1.22Q^{0.15}\\left\\{ 1+0.00385[(T_r-12)^2]^{1/M}\\text{sign}\n        (T_r-12)\\right\\}\n\n        \\mu_r = 52.46 \\frac{\\mu^2 P_c}{T_c^2}\n\n        \\xi=0.176\\left(\\frac{T_c}{MW^3 P_c^4}\\right)^{1/6}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc: float\n        Critical point of fluid [K]\n    Pc : float\n        Critical pressure of the fluid [Pa]\n    Zc : float\n        Critical compressibility of the fluid [Pa]\n    dipole : float\n        Dipole moment of fluid [debye]\n    CASRN : str, optional\n        CAS of the fluid\n\n    Returns\n    -------\n    mu_g : float\n        Viscosity of gas, [Pa*s]\n\n    Notes\n    -----\n    The example is from [1]_; all results agree.\n    Viscosity is calculated in micropoise, and converted to SI internally (1E-7).\n    Q for He = 1.38; Q for H2 = 0.76; Q for D2 = 0.52.\n\n    Examples\n    --------\n    >>> lucas_gas(T=550., Tc=512.6, Pc=80.9E5, Zc=0.224, MW=32.042, dipole=1.7)\n    1.7822676912698928e-05\n\n    References\n    ----------\n    .. [1] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.\n       Properties of Gases and Liquids. McGraw-Hill Companies, 1987.", "id": "f15804:m8"}
{"signature": "def load_all_methods(self):", "body": "methods = [MIXING_LOG_MOLAR, MIXING_LOG_MASS]<EOL>if len(self.CASs) > <NUM_LIT:1> and '<STR_LIT>' in self.CASs:<EOL><INDENT>wCASs = [i for i in self.CASs if i != '<STR_LIT>'] <EOL>if all([i in _Laliberte_Viscosity_ParametersDict for i in wCASs]):<EOL><INDENT>methods.append(LALIBERTE_MU)<EOL>self.wCASs = wCASs<EOL>self.index_w = self.CASs.index('<STR_LIT>')<EOL><DEDENT><DEDENT>self.all_methods = set(methods)<EOL>Tmins = [i.Tmin for i in self.ViscosityLiquids if i.Tmin]<EOL>Tmaxs = [i.Tmax for i in self.ViscosityLiquids if i.Tmax]<EOL>if Tmins:<EOL><INDENT>self.Tmin = max(Tmins)<EOL><DEDENT>if Tmaxs:<EOL><INDENT>self.Tmax = max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method to initialize the object by precomputing any values which\n        may be used repeatedly and by retrieving mixture-specific variables.\n        All data are stored as attributes. This method also sets :obj:`Tmin`, \n        :obj:`Tmax`, and :obj:`all_methods` as a set of methods which should \n        work to calculate the property.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15804:c1:m1"}
{"signature": "def viscosity_index(nu_40, nu_100, rounding=False):", "body": "nu_40, nu_100 = nu_40*<NUM_LIT>, nu_100*<NUM_LIT>  <EOL>if nu_100 < <NUM_LIT:2>:<EOL><INDENT>return None  <EOL><DEDENT>elif nu_100 < <NUM_LIT>:<EOL><INDENT>L = np.interp(nu_100, VI_nus, VI_Ls)<EOL>H = np.interp(nu_100, VI_nus, VI_Hs)<EOL><DEDENT>else:<EOL><INDENT>L = <NUM_LIT>*nu_100**<NUM_LIT:2> + <NUM_LIT>*nu_100 - <NUM_LIT><EOL>H = <NUM_LIT>*nu_100**<NUM_LIT:2> + <NUM_LIT>*nu_100 - <NUM_LIT><EOL><DEDENT>if nu_40 > H:<EOL><INDENT>VI = (L-nu_40)/(L-H)*<NUM_LIT:100><EOL><DEDENT>else:<EOL><INDENT>N = (log(H) - log(nu_40))/log(nu_100)<EOL>VI = (<NUM_LIT:10>**N-<NUM_LIT:1>)/<NUM_LIT> + <NUM_LIT:100><EOL><DEDENT>if rounding:<EOL><INDENT>VI = _round_whole_even(VI)<EOL><DEDENT>return VI<EOL>", "docstring": "r'''Calculates the viscosity index of a liquid. Requires dynamic viscosity\n    of a liquid at 40\u00b0C and 100\u00b0C. Value may either be returned with or\n    without rounding. Rounding is performed per the standard.\n\n    if nu_100 < 70:\n\n    .. math::\n        L, H = interp(nu_100)\n\n    else:\n\n    .. math::\n        L = 0.8353\\nu_{100}^2 + 14.67\\nu_{100} - 216\n\n        H = 0.1684\\nu_{100}^2 + 11.85\\nu_{100} - 97\n\n    if nu_40 > H:\n\n    .. math::\n        VI = \\frac{L-nu_{40}}{L-H}\\cdot 100\n\n    else:\n\n    .. math::\n        N = \\frac{\\log(H) - \\log(\\nu_{40})}{\\log (\\nu_{100})}\n\n         VI = \\frac{10^N-1}{0.00715} + 100\n\n    Parameters\n    ----------\n    nu_40 : float\n        Dynamic viscosity of fluid at 40\u00b0C, [m^2/s]\n    nu_100 : float\n        Dynamic viscosity of fluid at 100\u00b0C, [m^2/s]\n    rounding : bool, optional\n        Whether to round the value or not.\n\n    Returns\n    -------\n    VI: float\n        Viscosity index [-]\n\n    Notes\n    -----\n    VI is undefined for nu_100 under 2 mm^2/s. None is returned if this is the\n    case. Internal units are mm^2/s. Higher values of viscosity index suggest\n    a lesser decrease in kinematic viscosity as temperature increases.\n\n    Note that viscosity is a pressure-dependent property, and that the \n    viscosity index is defined for a fluid at whatever pressure it is at.\n    The viscosity index is thus also a function of pressure.\n\n    Examples\n    --------\n    >>> viscosity_index(73.3E-6, 8.86E-6, rounding=True)\n    92\n\n    References\n    ----------\n    .. [1] ASTM D2270-10(2016) Standard Practice for Calculating Viscosity\n       Index from Kinematic Viscosity at 40\u2009\u00b0C and 100\u2009\u00b0C, ASTM International,\n       West Conshohocken, PA, 2016, http://dx.doi.org/10.1520/D2270-10R16", "id": "f15804:m14"}
{"signature": "def Przedziecki_Sridhar(T, Tm, Tc, Pc, Vc, Vm, omega, MW):", "body": "Pc = Pc/<NUM_LIT>  <EOL>Vm, Vc = Vm*<NUM_LIT>, Vc*<NUM_LIT>  <EOL>Tr = T/Tc<EOL>Gamma = <NUM_LIT> - <NUM_LIT>*Tr - <NUM_LIT>*Tr**<NUM_LIT:2><EOL>VrT = <NUM_LIT>-<NUM_LIT>*Tr + <NUM_LIT>*Tr**<NUM_LIT:2> - <NUM_LIT>*Tr**<NUM_LIT:3> + <NUM_LIT>*Tr**<NUM_LIT:4><EOL>V = VrT*(<NUM_LIT:1>-omega*Gamma)*Vc<EOL>Vo = <NUM_LIT>*omega*Tc - <NUM_LIT> + Vm/(<NUM_LIT>*(Tm/Tc) + <NUM_LIT>)  <EOL>E = -<NUM_LIT> + Vc/(<NUM_LIT> + <NUM_LIT:0.1>*MW - <NUM_LIT>*Pc + <NUM_LIT>*Tm - <NUM_LIT>*(Tm/Tc))<EOL>return Vo/(E*(V-Vo))/<NUM_LIT><EOL>", "docstring": "r'''Calculates the viscosity of a liquid using an emperical formula\n    developed in [1]_.\n\n    .. math::\n        \\mu=\\frac{V_o}{E(V-V_o)}\n\n        E=-1.12+\\frac{V_c}{12.94+0.10MW-0.23P_c+0.0424T_{m}-11.58(T_{m}/T_c)}\n\n        V_o = 0.0085\\omega T_c-2.02+\\frac{V_{m}}{0.342(T_m/T_c)+0.894}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [K]\n    Tm : float\n        Melting point of fluid [K]\n    Tc : float\n        Critical temperature of the fluid [K]\n    Pc : float\n        Critical pressure of the fluid [Pa]\n    Vc : float\n        Critical volume of the fluid [m^3/mol]\n    Vm : float\n        Molar volume of the fluid at temperature [K]\n    omega : float\n        Acentric factor of compound\n    MW : float\n        Molwcular weight of fluid [g/mol]\n\n    Returns\n    -------\n    mu_l : float\n        Viscosity of liquid, [Pa*S]\n\n    Notes\n    -----\n    A test by Reid (1983) is used, but only mostly correct.\n    This function is not recommended. Its use has been removed from the Liquid Viscosity function.\n    Internal units are bar and mL/mol.\n    TODO: Test again with data from 5th ed table.\n\n    Examples\n    --------\n    >>> Przedziecki_Sridhar(383., 178., 591.8, 41E5, 316E-6, 95E-6, .263, 92.14)\n    0.0002198147995603383\n\n    References\n    ----------\n    .. [1] Przedziecki, J. W., and T. Sridhar. \"Prediction of Liquid\n       Viscosities.\" AIChE Journal 31, no. 2 (February 1, 1985): 333-35.\n       doi:10.1002/aic.690310225.", "id": "f15804:m4"}
{"signature": "def calculate(self, T, P, zs, ws, method):", "body": "if method == SIMPLE:<EOL><INDENT>mus = [i(T, P) for i in self.ViscosityGases]<EOL>return mixing_simple(zs, mus)<EOL><DEDENT>elif method == HERNING_ZIPPERER:<EOL><INDENT>mus = [i(T, P) for i in self.ViscosityGases]<EOL>return Herning_Zipperer(zs, mus, self.MWs)<EOL><DEDENT>elif method == WILKE:<EOL><INDENT>mus = [i(T, P) for i in self.ViscosityGases]<EOL>return Wilke(zs, mus, self.MWs)<EOL><DEDENT>elif method == BROKAW:<EOL><INDENT>mus = [i(T, P) for i in self.ViscosityGases]<EOL>return Brokaw(T, zs, mus, self.MWs, self.molecular_diameters, self.Stockmayers)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Method to calculate viscosity of a gas mixture at \n        temperature `T`, pressure `P`, mole fractions `zs` and weight fractions\n        `ws` with a given method.\n\n        This method has no exception handling; see `mixture_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the property, [K]\n        P : float\n            Pressure at which to calculate the property, [Pa]\n        zs : list[float]\n            Mole fractions of all species in the mixture, [-]\n        ws : list[float]\n            Weight fractions of all species in the mixture, [-]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        mu : float\n            Viscosity of gas mixture, [Pa*s]", "id": "f15804:c3:m2"}
{"signature": "def Yoon_Thodos(T, Tc, Pc, MW):", "body": "Tr = T/Tc<EOL>xi = <NUM_LIT>*Tc**(<NUM_LIT:1>/<NUM_LIT>)/(MW**<NUM_LIT:0.5>*Pc**(<NUM_LIT:2>/<NUM_LIT>))<EOL>a = <NUM_LIT><EOL>b = <NUM_LIT><EOL>c = <NUM_LIT><EOL>d = -<NUM_LIT><EOL>e = <NUM_LIT><EOL>f = -<NUM_LIT><EOL>return (<NUM_LIT:1.> + a*Tr**b - c * exp(d*Tr) + e*exp(f*Tr))/(<NUM_LIT>*xi)<EOL>", "docstring": "r'''Calculates the viscosity of a gas using an emperical formula\n    developed in [1]_.\n\n    .. math::\n        \\eta \\xi \\times 10^8 = 46.10 T_r^{0.618} - 20.40 \\exp(-0.449T_r) + 1\n        9.40\\exp(-4.058T_r)+1\n\n        \\xi = 2173.424 T_c^{1/6} MW^{-1/2} P_c^{-2/3}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [K]\n    Tc : float\n        Critical temperature of the fluid [K]\n    Pc : float\n        Critical pressure of the fluid [Pa]\n    MW : float\n        Molwcular weight of fluid [g/mol]\n\n    Returns\n    -------\n    mu_g : float\n        Viscosity of gas, [Pa*S]\n\n    Notes\n    -----\n    This equation has been tested. The equation uses SI units only internally.\n    The constant 2173.424 is an adjustment factor for units.\n    Average deviation within 3% for most compounds.\n    Greatest accuracy with dipole moments close to 0.\n    Hydrogen and helium have different coefficients, not implemented.\n    This is DIPPR Procedure 8B: Method for the Viscosity of Pure,\n    non hydrocarbon, nonpolar gases at low pressures\n\n    Examples\n    --------\n    >>> Yoon_Thodos(300., 556.35, 4.5596E6, 153.8)\n    1.0194885727776819e-05\n\n    References\n    ----------\n    .. [1]  Yoon, Poong, and George Thodos. \"Viscosity of Nonpolar Gaseous\n       Mixtures at Normal Pressures.\" AIChE Journal 16, no. 2 (1970): 300-304.\n       doi:10.1002/aic.690160225.", "id": "f15804:m6"}
{"signature": "def load_all_methods(self):", "body": "methods, methods_P = [], []<EOL>Tmins, Tmaxs = [], []<EOL>if has_CoolProp and self.CASRN in coolprop_dict:<EOL><INDENT>methods.append(COOLPROP); methods_P.append(COOLPROP)<EOL>self.CP_f = coolprop_fluids[self.CASRN]<EOL>Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tc)<EOL><DEDENT>if self.CASRN in _VDISaturationDict:<EOL><INDENT>methods.append(VDI_TABULAR)<EOL>Ts, props = VDI_tabular_data(self.CASRN, '<STR_LIT>')<EOL>self.VDI_Tmin = Ts[<NUM_LIT:0>]<EOL>self.VDI_Tmax = Ts[-<NUM_LIT:1>]<EOL>self.tabular_data[VDI_TABULAR] = (Ts, props)<EOL>Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax)<EOL><DEDENT>if self.CASRN in Dutt_Prasad.index:<EOL><INDENT>methods.append(DUTT_PRASAD)<EOL>_, A, B, C, self.DUTT_PRASAD_Tmin, self.DUTT_PRASAD_Tmax = _Dutt_Prasad_values[Dutt_Prasad.index.get_loc(self.CASRN)].tolist()<EOL>self.DUTT_PRASAD_coeffs = [A, B, C]<EOL>Tmins.append(self.DUTT_PRASAD_Tmin); Tmaxs.append(self.DUTT_PRASAD_Tmax)<EOL><DEDENT>if self.CASRN in VN3_data.index:<EOL><INDENT>methods.append(VISWANATH_NATARAJAN_3)<EOL>_, _, A, B, C, self.VISWANATH_NATARAJAN_3_Tmin, self.VISWANATH_NATARAJAN_3_Tmax = _VN3_data_values[VN3_data.index.get_loc(self.CASRN)].tolist()<EOL>self.VISWANATH_NATARAJAN_3_coeffs = [A, B, C]<EOL>Tmins.append(self.VISWANATH_NATARAJAN_3_Tmin); Tmaxs.append(self.VISWANATH_NATARAJAN_3_Tmax)<EOL><DEDENT>if self.CASRN in VN2_data.index:<EOL><INDENT>methods.append(VISWANATH_NATARAJAN_2)<EOL>_, _, A, B, self.VISWANATH_NATARAJAN_2_Tmin, self.VISWANATH_NATARAJAN_2_Tmax = _VN2_data_values[VN2_data.index.get_loc(self.CASRN)].tolist()<EOL>self.VISWANATH_NATARAJAN_2_coeffs = [A, B]<EOL>Tmins.append(self.VISWANATH_NATARAJAN_2_Tmin); Tmaxs.append(self.VISWANATH_NATARAJAN_2_Tmax)<EOL><DEDENT>if self.CASRN in VN2E_data.index:<EOL><INDENT>methods.append(VISWANATH_NATARAJAN_2E)<EOL>_, _, C, D, self.VISWANATH_NATARAJAN_2E_Tmin, self.VISWANATH_NATARAJAN_2E_Tmax = _VN2E_data_values[VN2E_data.index.get_loc(self.CASRN)].tolist()<EOL>self.VISWANATH_NATARAJAN_2E_coeffs = [C, D]<EOL>Tmins.append(self.VISWANATH_NATARAJAN_2E_Tmin); Tmaxs.append(self.VISWANATH_NATARAJAN_2E_Tmax)<EOL><DEDENT>if self.CASRN in Perrys2_313.index:<EOL><INDENT>methods.append(DIPPR_PERRY_8E)<EOL>_, C1, C2, C3, C4, C5, self.Perrys2_313_Tmin, self.Perrys2_313_Tmax = _Perrys2_313_values[Perrys2_313.index.get_loc(self.CASRN)].tolist()<EOL>self.Perrys2_313_coeffs = [C1, C2, C3, C4, C5]<EOL>Tmins.append(self.Perrys2_313_Tmin); Tmaxs.append(self.Perrys2_313_Tmax)<EOL><DEDENT>if self.CASRN in VDI_PPDS_7.index:<EOL><INDENT>methods.append(VDI_PPDS)<EOL>self.VDI_PPDS_coeffs = _VDI_PPDS_7_values[VDI_PPDS_7.index.get_loc(self.CASRN)].tolist()[<NUM_LIT:2>:]<EOL><DEDENT>if all((self.MW, self.Tc, self.Pc, self.omega)):<EOL><INDENT>methods.append(LETSOU_STIEL)<EOL>Tmins.append(self.Tc/<NUM_LIT:4>); Tmaxs.append(self.Tc) <EOL><DEDENT>if all((self.MW, self.Tm, self.Tc, self.Pc, self.Vc, self.omega, self.Vml)):<EOL><INDENT>methods.append(PRZEDZIECKI_SRIDHAR)<EOL>Tmins.append(self.Tm); Tmaxs.append(self.Tc) <EOL><DEDENT>if all([self.Tc, self.Pc, self.omega]):<EOL><INDENT>methods_P.append(LUCAS)<EOL><DEDENT>self.all_methods = set(methods)<EOL>self.all_methods_P = set(methods_P)<EOL>if Tmins and Tmaxs:<EOL><INDENT>self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)<EOL><DEDENT>", "docstring": "r'''Method which picks out coefficients for the specified chemical\n        from the various dictionaries and DataFrames storing it. All data is\n        stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`,\n        :obj:`all_methods` and obj:`all_methods_P` as a set of methods for\n        which the data exists for.\n\n        Called on initialization only. See the source code for the variables at\n        which the coefficients are stored. The coefficients can safely be\n        altered once the class is initialized. This method can be called again\n        to reset the parameters.", "id": "f15804:c0:m1"}
{"signature": "def calculate_P(self, T, P, method):", "body": "if method == COOLPROP:<EOL><INDENT>mu = PropsSI('<STR_LIT>', '<STR_LIT:T>', T, '<STR_LIT:P>', P, self.CASRN)<EOL><DEDENT>elif method in self.tabular_data:<EOL><INDENT>mu = self.interpolate_P(T, P, method)<EOL><DEDENT>return mu<EOL>", "docstring": "r'''Method to calculate pressure-dependent gas viscosity\n        at temperature `T` and pressure `P` with a given method.\n\n        This method has no exception handling; see `TP_dependent_property`\n        for that.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate gas viscosity, [K]\n        P : float\n            Pressure at which to calculate gas viscosity, [K]\n        method : str\n            Name of the method to use\n\n        Returns\n        -------\n        mu : float\n            Viscosity of the gas at T and P, [Pa*]", "id": "f15804:c2:m4"}
{"signature": "def ViswanathNatarajan3(T, A, B, C):", "body": "mu = <NUM_LIT:10>**(A + B/(C - T))<EOL>return mu/<NUM_LIT><EOL>", "docstring": "r'''Calculate the viscosity of a liquid using the 3-term Antoine form\n    representation developed in [1]_. Requires input coefficients. The `A`\n    coefficient is assumed to yield coefficients in centipoise, as all \n    coefficients found so far have been.\n\n    .. math::\n        \\log_{10} \\mu = A + B/(T + C)\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n\n    Returns\n    -------\n    mu : float\n        Liquid viscosity, [Pa*s]\n\n    Notes\n    -----\n    No other source for these coefficients has been found.\n\n    Examples\n    --------\n    >>> ViswanathNatarajan3(298.15, -2.7173, -1071.18, -129.51)\n    0.0006129806445142112\n\n    References\n    ----------\n    .. [1] Viswanath, Dabir S., and G. Natarajan. Databook On The Viscosity Of\n       Liquids. New York: Taylor & Francis, 1989", "id": "f15804:m2"}
{"signature": "def Brokaw(T, ys, mus, MWs, molecular_diameters, Stockmayers):", "body": "cmps = range(len(ys))<EOL>MDs = molecular_diameters<EOL>if not none_and_length_check([ys, mus, MWs, molecular_diameters, Stockmayers]): <EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>Tsts = [T/Stockmayer_i for Stockmayer_i in Stockmayers]<EOL>Sij = [[<NUM_LIT:0> for i in cmps] for j in cmps]<EOL>Mij = [[<NUM_LIT:0> for i in cmps] for j in cmps]<EOL>mij = [[<NUM_LIT:0> for i in cmps] for j in cmps]<EOL>Aij = [[<NUM_LIT:0> for i in cmps] for j in cmps]<EOL>phiij =[[<NUM_LIT:0> for i in cmps] for j in cmps]<EOL>for i in cmps:<EOL><INDENT>for j in cmps:<EOL><INDENT>Sij[i][j] = (<NUM_LIT:1>+(Tsts[i]*Tsts[j])**<NUM_LIT:0.5> + (MDs[i]*MDs[j])/<NUM_LIT>)/(<NUM_LIT:1> + Tsts[i] + (MDs[i]**<NUM_LIT:2>/<NUM_LIT>))**<NUM_LIT:0.5>/(<NUM_LIT:1> + Tsts[j] + (MDs[j]**<NUM_LIT:2>/<NUM_LIT>))**<NUM_LIT:0.5><EOL>if MDs[i] <= <NUM_LIT:0.1> and MDs[j] <= <NUM_LIT:0.1>:<EOL><INDENT>Sij[i][j] = <NUM_LIT:1><EOL><DEDENT>Mij[i][j] = MWs[i]/MWs[j]<EOL>mij[i][j] = (<NUM_LIT>/(<NUM_LIT:1>+Mij[i][j]**-<NUM_LIT:1>)/(<NUM_LIT:1>+Mij[i][j]))**<NUM_LIT><EOL>Aij[i][j] = mij[i][j]*Mij[i][j]**-<NUM_LIT:0.5>*(<NUM_LIT:1> + (Mij[i][j]-Mij[i][j]**<NUM_LIT>)/(<NUM_LIT:2>*(<NUM_LIT:1>+Mij[i][j]) + (<NUM_LIT:1>+Mij[i][j]**<NUM_LIT>)*mij[i][j]**-<NUM_LIT:0.5>/(<NUM_LIT:1>+mij[i][j])))<EOL>phiij[i][j] = (mus[i]/mus[j])**<NUM_LIT:0.5>*Sij[i][j]*Aij[i][j]<EOL><DEDENT><DEDENT>return sum([ys[i]*mus[i]/sum([ys[j]*phiij[i][j] for j in cmps]) for i in cmps])<EOL>", "docstring": "r'''Calculates viscosity of a gas mixture according to\n    mixing rules in [1]_.\n\n    .. math::\n        \\eta_{mix} = \\sum_{i=1}^n \\frac{y_i \\eta_i}{\\sum_{j=1}^n y_j \\phi_{ij}}\n\n        \\phi_{ij} = \\left( \\frac{\\eta_i}{\\eta_j} \\right)^{0.5} S_{ij} A_{ij}\n\n        A_{ij} = m_{ij} M_{ij}^{-0.5} \\left[1 +\n        \\frac{M_{ij} - M_{ij}^{0.45}}\n        {2(1+M_{ij}) + \\frac{(1 + M_{ij}^{0.45}) m_{ij}^{-0.5}}{1 + m_{ij}}} \\right]\n\n        m_{ij} = \\left[ \\frac{4}{(1+M_{ij}^{-1})(1+M_{ij})}\\right]^{0.25}\n\n        M_{ij} = \\frac{M_i}{M_j}\n\n        S_{ij} = \\frac{1 + (T_i^* T_j^*)^{0.5} + (\\delta_i \\delta_j/4)}\n        {[1+T_i^* + (\\delta_i^2/4)]^{0.5}[1+T_j^*+(\\delta_j^2/4)]^{0.5}}\n\n        T^* = kT/\\epsilon\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid, [K]\n    ys : float\n        Mole fractions of gas components\n    mus : float\n        Gas viscosities of all components, [Pa*S]\n    MWs : float\n        Molecular weights of all components, [g/mol]\n    molecular_diameters : float\n        L-J molecular diameter  of all components, [angstroms]\n    Stockmayers : float\n        L-J Stockmayer energy parameters of all components, []\n\n    Returns\n    -------\n    mug : float\n        Viscosity of gas mixture, [Pa*S]\n\n    Notes\n    -----\n    This equation is entirely dimensionless; all dimensions cancel.\n    The original source has not been reviewed.\n\n    This is DIPPR Procedure 8D: Method for the Viscosity of Nonhydrocarbon\n    Vapor Mixtures at Low Pressure (Polar and Nonpolar)\n\n    Examples\n    --------\n    >>> Brokaw(308.2, [0.05, 0.95], [1.34E-5, 9.5029E-6], [64.06, 46.07], [0.42, 0.19], [347, 432])\n    9.699085099801568e-06\n\n    References\n    ----------\n    .. [1] Brokaw, R. S. \"Predicting Transport Properties of Dilute Gases.\"\n       Industrial & Engineering Chemistry Process Design and Development\n       8, no. 2 (April 1, 1969): 240-53. doi:10.1021/i260030a015.\n    .. [2] Brokaw, R. S. Viscosity of Gas Mixtures, NASA-TN-D-4496, 1968.\n    .. [3] Danner, Ronald P, and Design Institute for Physical Property Data.\n       Manual for Predicting Chemical Process Design Data. New York, N.Y, 1982.", "id": "f15804:m12"}
{"signature": "def __call__(self, T):", "body": "if T == self.T_cached:<EOL><INDENT>return self.prop_cached<EOL><DEDENT>else:<EOL><INDENT>self.prop_cached = self.T_dependent_property(T)<EOL>self.T_cached = T<EOL>return self.prop_cached<EOL><DEDENT>", "docstring": "r'''Convenience method to calculate the property; calls \n        :obj:`T_dependent_property`. Caches previously calculated value,\n        which is an overhead when calculating many different values of\n        a property. See :obj:`T_dependent_property` for more details as to the\n        calculation procedure.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the property, [K]\n\n        Returns\n        -------\n        prop : float\n            Calculated property, [`units`]", "id": "f15806:c0:m2"}
{"signature": "def __call__(self, T, P):", "body": "if (T, P) == self.TP_cached:<EOL><INDENT>return self.prop_cached<EOL><DEDENT>else:<EOL><INDENT>self.prop_cached = self.TP_or_T_dependent_property(T, P)<EOL>self.TP_cached = (T, P)<EOL>return self.prop_cached<EOL><DEDENT>", "docstring": "r'''Convenience method to calculate the property; calls \n        :obj:`TP_dependent_property`. Caches previously calculated value,\n        which is an overhead when calculating many different values of\n        a property. See :obj:`TP_dependent_property` for more details as to the\n        calculation procedure.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the property, [K]\n        P : float\n            Pressure at which to calculate the property, [Pa]\n\n        Returns\n        -------\n        prop : float\n            Calculated property, [`units`]", "id": "f15806:c1:m0"}
{"signature": "def T_dependent_property_integral_over_T(self, T1, T2):", "body": "Tavg = <NUM_LIT:0.5>*(T1+T2)<EOL>if self.method:<EOL><INDENT>if self.test_method_validity(Tavg, self.method):<EOL><INDENT>try:<EOL><INDENT>return self.calculate_integral_over_T(T1, T2, self.method)<EOL><DEDENT>except:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>sorted_valid_methods = self.select_valid_methods(Tavg)<EOL>for method in sorted_valid_methods:<EOL><INDENT>try:<EOL><INDENT>return self.calculate_integral_over_T(T1, T2, method)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "r'''Method to calculate the integral of a property over temperature \n        with respect to temperature, using a specified method. Methods found\n        valid by `select_valid_methods` are attempted until a method succeeds. \n        If no methods are valid and succeed, None is returned.\n\n        Calls `calculate_integral_over_T` internally to perform the actual\n        calculation.\n\n        .. math::\n            \\text{integral} = \\int_{T_1}^{T_2} \\frac{\\text{property}}{T} \\; dT\n\n        Parameters\n        ----------\n        T1 : float\n            Lower limit of integration, [K]\n        T2 : float\n            Upper limit of integration, [K]\n        method : str\n            Method for which to find the integral\n\n        Returns\n        -------\n        integral : float\n            Calculated integral of the property over the given range, \n            [`units`]", "id": "f15806:c0:m17"}
{"signature": "def mixing_simple(fracs, props):", "body": "if not none_and_length_check([fracs, props]):<EOL><INDENT>return None<EOL><DEDENT>result = sum(frac*prop for frac, prop in zip(fracs, props))<EOL>return result<EOL>", "docstring": "r'''Simple function calculates a property based on weighted averages of\n    properties. Weights could be mole fractions, volume fractions, mass\n    fractions, or anything else.\n\n    .. math::\n        y = \\sum_i \\text{frac}_i \\cdot \\text{prop}_i\n\n    Parameters\n    ----------\n    fracs : array-like\n        Fractions of a mixture\n    props: array-like\n        Properties\n\n    Returns\n    -------\n    prop : value\n        Calculated property\n\n    Notes\n    -----\n    Returns None if any fractions or properties are missing or are not of the\n    same length.\n\n    Examples\n    --------\n    >>> mixing_simple([0.1, 0.9], [0.01, 0.02])\n    0.019000000000000003", "id": "f15806:m33"}
{"signature": "def calculate_derivative_P(self, P, T, zs, ws, method, order=<NUM_LIT:1>):", "body": "f = lambda P: self.calculate(T, P, zs, ws, method)<EOL>return derivative(f, P, dx=<NUM_LIT>, n=order, order=<NUM_LIT:1>+order*<NUM_LIT:2>)<EOL>", "docstring": "r'''Method to calculate a derivative of a mixture property with respect \n        to pressure at constant temperature and composition\n        of a given order using a specified method. Uses SciPy's derivative \n        function, with a delta of 0.01 Pa and a number of points equal to \n        2*order + 1.\n\n        This method can be overwritten by subclasses who may perfer to add\n        analytical methods for some or all methods as this is much faster.\n\n        If the calculation does not succeed, returns the actual error\n        encountered.\n\n        Parameters\n        ----------\n        P : float\n            Pressure at which to calculate the derivative, [Pa]\n        T : float\n            Temperature at which to calculate the derivative, [K]\n        zs : list[float]\n            Mole fractions of all species in the mixture, [-]\n        ws : list[float]\n            Weight fractions of all species in the mixture, [-]\n        method : str\n            Method for which to find the derivative\n        order : int\n            Order of the derivative, >= 1\n\n        Returns\n        -------\n        d_prop_d_P_at_T : float\n            Calculated derivative property at constant temperature, \n            [`units/Pa^order`]", "id": "f15806:c2:m6"}
{"signature": "def TP_dependent_property_derivative_P(self, T, P, order=<NUM_LIT:1>):", "body": "sorted_valid_methods_P = self.select_valid_methods_P(T, P)<EOL>for method in sorted_valid_methods_P:<EOL><INDENT>try:<EOL><INDENT>return self.calculate_derivative_P(P, T, method, order)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "r'''Method to calculate a derivative of a temperature and pressure\n        dependent property with respect to pressure at constant temperature,\n        of a given order. Methods found valid by `select_valid_methods_P` are \n        attempted until a method succeeds. If no methods are valid and succeed,\n        None is returned.\n\n        Calls `calculate_derivative_P` internally to perform the actual\n        calculation.\n\n        .. math::\n            \\text{derivative} = \\frac{d (\\text{property})}{d P}|_{T}\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the derivative, [K]\n        P : float\n            Pressure at which to calculate the derivative, [Pa]\n        order : int\n            Order of the derivative, >= 1\n\n        Returns\n        -------\n        d_prop_d_P_at_T : float\n            Calculated derivative property, [`units/Pa^order`]", "id": "f15806:c1:m13"}
{"signature": "def normalize(values):", "body": "tot = sum(values)<EOL>return [i/tot for i in values]<EOL>", "docstring": "r'''Simple function which normalizes a series of values to be from 0 to 1,\n    and for their sum to add to 1.\n\n    .. math::\n        x = \\frac{x}{sum_i x_i}\n\n    Parameters\n    ----------\n    values : array-like\n        array of values\n\n    Returns\n    -------\n    fractions : array-like\n        Array of values from 0 to 1\n\n    Notes\n    -----\n    Does not work on negative values.\n\n    Examples\n    --------\n    >>> normalize([3, 2, 1])\n    [0.5, 0.3333333333333333, 0.16666666666666666]", "id": "f15806:m32"}
{"signature": "def Joule_Thomson(T, V, Cp, dV_dT=None, beta=None):", "body": "if dV_dT:<EOL><INDENT>return (T*dV_dT - V)/Cp<EOL><DEDENT>elif beta:<EOL><INDENT>return V/Cp*(beta*T - <NUM_LIT:1.>)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>", "docstring": "r'''Calculate a real fluid's Joule Thomson coefficient. The required \n    derivative should be calculated with an equation of state, and `Cp` is the\n    real fluid versions. This can either be calculated with `dV_dT` directly, \n    or with `beta` if it is already known.\n\n    .. math::\n        \\mu_{JT} = \\left(\\frac{\\partial T}{\\partial P}\\right)_H = \\frac{1}{C_p}\n        \\left[T \\left(\\frac{\\partial V}{\\partial T}\\right)_P - V\\right]\n        = \\frac{V}{C_p}\\left(\\beta T-1\\right)\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid, [K]\n    V : float\n        Molar volume of fluid, [m^3/mol]\n    Cp : float\n        Real fluid heat capacity at constant pressure, [J/mol/K]\n    dV_dT : float, optional\n        Derivative of `V` with respect to `T`, [m^3/mol/K]\n    beta : float, optional\n        Isobaric coefficient of a thermal expansion, [1/K]\n\n    Returns\n    -------\n    mu_JT : float\n        Joule-Thomson coefficient [K/Pa]\n\n    Examples\n    --------\n    Example from [2]_:\n\n    >>> Joule_Thomson(T=390, V=0.00229754, Cp=153.235, dV_dT=1.226396e-05)\n    1.621956080529905e-05\n\n    References\n    ----------\n    .. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering. \n       Butterworth-Heinemann, 1985.\n    .. [2] Pratt, R. M. \"Thermodynamic Properties Involving Derivatives: Using \n       the Peng-Robinson Equation of State.\" Chemical Engineering Education 35,\n       no. 2 (March 1, 2001): 112-115.", "id": "f15806:m15"}
{"signature": "def T_dependent_property_derivative(self, T, order=<NUM_LIT:1>):", "body": "if self.method:<EOL><INDENT>if self.test_method_validity(T, self.method):<EOL><INDENT>try:<EOL><INDENT>return self.calculate_derivative(T, self.method, order)<EOL><DEDENT>except:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>sorted_valid_methods = self.select_valid_methods(T)<EOL>for method in sorted_valid_methods:<EOL><INDENT>try:<EOL><INDENT>return self.calculate_derivative(T, method, order)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "r'''Method to obtain a derivative of a property with respect to \n        temperature, of a given order. Methods found valid by \n        `select_valid_methods` are attempted until a method succeeds. If no \n        methods are valid and succeed, None is returned.\n\n        Calls `calculate_derivative` internally to perform the actual\n        calculation.\n\n        .. math::\n            \\text{derivative} = \\frac{d (\\text{property})}{d T}\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the derivative, [K]\n        order : int\n            Order of the derivative, >= 1\n\n        Returns\n        -------\n        derivative : float\n            Calculated derivative property, [`units/K^order`]", "id": "f15806:c0:m13"}
{"signature": "def plot_TP_dependent_property(self, Tmin=None, Tmax=None, Pmin=None,<EOL>Pmax=None,  methods_P=[], pts=<NUM_LIT:15>, <EOL>only_valid=True):  ", "body": "if not has_matplotlib:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>from mpl_toolkits.mplot3d import axes3d<EOL>from matplotlib.ticker import FormatStrFormatter<EOL>import numpy.ma as ma<EOL>if Pmin is None:<EOL><INDENT>if self.Pmin is not None:<EOL><INDENT>Pmin = self.Pmin<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if Pmax is None:<EOL><INDENT>if self.Pmax is not None:<EOL><INDENT>Pmax = self.Pmax<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if Tmin is None:<EOL><INDENT>if self.Tmin is not None:<EOL><INDENT>Tmin = self.Tmin<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if Tmax is None:<EOL><INDENT>if self.Tmax is not None:<EOL><INDENT>Tmax = self.Tmax<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if not methods_P:<EOL><INDENT>methods_P = self.user_methods_P if self.user_methods_P else self.all_methods_P<EOL><DEDENT>Ps = np.linspace(Pmin, Pmax, pts)<EOL>Ts = np.linspace(Tmin, Tmax, pts)<EOL>Ts_mesh, Ps_mesh = np.meshgrid(Ts, Ps)<EOL>fig = plt.figure()<EOL>ax = fig.gca(projection='<STR_LIT>')<EOL>handles = []<EOL>for method_P in methods_P:<EOL><INDENT>if only_valid:<EOL><INDENT>properties = []<EOL>for T in Ts:<EOL><INDENT>T_props = []<EOL>for P in Ps:<EOL><INDENT>if self.test_method_validity_P(T, P, method_P):<EOL><INDENT>try:<EOL><INDENT>p = self.calculate_P(T, P, method_P)<EOL>if self.test_property_validity(p):<EOL><INDENT>T_props.append(p)<EOL><DEDENT>else:<EOL><INDENT>T_props.append(None)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>T_props.append(None)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>T_props.append(None)<EOL><DEDENT><DEDENT>properties.append(T_props)<EOL><DEDENT>properties = ma.masked_invalid(np.array(properties, dtype=np.float).T)<EOL>handles.append(ax.plot_surface(Ts_mesh, Ps_mesh, properties, cstride=<NUM_LIT:1>, rstride=<NUM_LIT:1>, alpha=<NUM_LIT:0.5>))<EOL><DEDENT>else:<EOL><INDENT>properties = [[self.calculate_P(T, P, method_P) for P in Ps] for T in Ts]<EOL>handles.append(ax.plot_surface(Ts_mesh, Ps_mesh, properties, cstride=<NUM_LIT:1>, rstride=<NUM_LIT:1>, alpha=<NUM_LIT:0.5>))<EOL><DEDENT><DEDENT>ax.yaxis.set_major_formatter(FormatStrFormatter('<STR_LIT>'))<EOL>ax.zaxis.set_major_formatter(FormatStrFormatter('<STR_LIT>'))<EOL>ax.xaxis.set_major_formatter(FormatStrFormatter('<STR_LIT>'))<EOL>ax.set_xlabel('<STR_LIT>')<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_zlabel(self.name + '<STR_LIT:U+002CU+0020>' + self.units)<EOL>plt.title(self.name + '<STR_LIT>' + self.CASRN)<EOL>plt.show(block=False)<EOL>ax.legend(handles, methods_P)<EOL>plt.show(block=False)<EOL>", "docstring": "r'''Method to create a plot of the property vs temperature and pressure \n        according to either a specified list of methods, or user methods (if \n        set), or all methods. User-selectable number of points for each \n        variable. If only_valid is set,`test_method_validity_P` will be used to\n        check if each condition in the specified range is valid, and\n        `test_property_validity` will be used to test the answer, and the\n        method is allowed to fail; only the valid points will be plotted.\n        Otherwise, the result will be calculated and displayed as-is. This will\n        not suceed if the any method fails for any point.\n\n        Parameters\n        ----------\n        Tmin : float\n            Minimum temperature, to begin calculating the property, [K]\n        Tmax : float\n            Maximum temperature, to stop calculating the property, [K]\n        Pmin : float\n            Minimum pressure, to begin calculating the property, [Pa]\n        Pmax : float\n            Maximum pressure, to stop calculating the property, [Pa]\n        methods_P : list, optional\n            List of methods to consider\n        pts : int, optional\n            A list of points to calculate the property at for both temperature \n            and pressure; pts^2 points will be calculated.\n        only_valid : bool\n            If True, only plot successful methods and calculated properties,\n            and handle errors; if False, attempt calculation without any\n            checking and use methods outside their bounds", "id": "f15806:c1:m9"}
{"signature": "def plot_property(self, zs, ws, Tmin=None, Tmax=None, Pmin=<NUM_LIT>, Pmax=<NUM_LIT>, <EOL>methods=[], pts=<NUM_LIT:15>, only_valid=True):  ", "body": "if not has_matplotlib:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>from mpl_toolkits.mplot3d import axes3d<EOL>from matplotlib.ticker import FormatStrFormatter<EOL>import numpy.ma as ma<EOL>if Pmin is None:<EOL><INDENT>if self.Pmin is not None:<EOL><INDENT>Pmin = self.Pmin<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if Pmax is None:<EOL><INDENT>if self.Pmax is not None:<EOL><INDENT>Pmax = self.Pmax<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if Tmin is None:<EOL><INDENT>if self.Tmin is not None:<EOL><INDENT>Tmin = self.Tmin<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if Tmax is None:<EOL><INDENT>if self.Tmax is not None:<EOL><INDENT>Tmax = self.Tmax<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if not methods:<EOL><INDENT>methods = self.user_methods if self.user_methods else self.all_methods<EOL><DEDENT>Ps = np.linspace(Pmin, Pmax, pts)<EOL>Ts = np.linspace(Tmin, Tmax, pts)<EOL>Ts_mesh, Ps_mesh = np.meshgrid(Ts, Ps)<EOL>fig = plt.figure()<EOL>ax = fig.gca(projection='<STR_LIT>')<EOL>handles = []<EOL>for method in methods:<EOL><INDENT>if only_valid:<EOL><INDENT>properties = []<EOL>for T in Ts:<EOL><INDENT>T_props = []<EOL>for P in Ps:<EOL><INDENT>if self.test_method_validity(T, P, zs, ws, method):<EOL><INDENT>try:<EOL><INDENT>p = self.calculate(T, P, zs, ws, method)<EOL>if self.test_property_validity(p):<EOL><INDENT>T_props.append(p)<EOL><DEDENT>else:<EOL><INDENT>T_props.append(None)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>T_props.append(None)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>T_props.append(None)<EOL><DEDENT><DEDENT>properties.append(T_props)<EOL><DEDENT>properties = ma.masked_invalid(np.array(properties, dtype=np.float).T)<EOL>handles.append(ax.plot_surface(Ts_mesh, Ps_mesh, properties, cstride=<NUM_LIT:1>, rstride=<NUM_LIT:1>, alpha=<NUM_LIT:0.5>))<EOL><DEDENT>else:<EOL><INDENT>properties = [[self.calculate(T, P, zs, ws, method) for P in Ps] for T in Ts]<EOL>handles.append(ax.plot_surface(Ts_mesh, Ps_mesh, properties, cstride=<NUM_LIT:1>, rstride=<NUM_LIT:1>, alpha=<NUM_LIT:0.5>))<EOL><DEDENT><DEDENT>ax.yaxis.set_major_formatter(FormatStrFormatter('<STR_LIT>'))<EOL>ax.zaxis.set_major_formatter(FormatStrFormatter('<STR_LIT>'))<EOL>ax.xaxis.set_major_formatter(FormatStrFormatter('<STR_LIT>'))<EOL>ax.set_xlabel('<STR_LIT>')<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_zlabel(self.name + '<STR_LIT:U+002CU+0020>' + self.units)<EOL>plt.title(self.name + '<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(self.CASs) <EOL>+ '<STR_LIT>' + '<STR_LIT:U+002CU+0020>'.join(str(round(i, <NUM_LIT:4>)) for i in zs) + '<STR_LIT:.>')<EOL>plt.show(block=False)<EOL>ax.legend(handles, methods)<EOL>plt.show(block=False)<EOL>", "docstring": "r'''Method to create a plot of the property vs temperature and pressure \n        according to either a specified list of methods, or user methods (if \n        set), or all methods. User-selectable number of points for each \n        variable. If only_valid is set,`test_method_validity` will be used to\n        check if each condition in the specified range is valid, and\n        `test_property_validity` will be used to test the answer, and the\n        method is allowed to fail; only the valid points will be plotted.\n        Otherwise, the result will be calculated and displayed as-is. This will\n        not suceed if the any method fails for any point.\n\n        Parameters\n        ----------\n        Tmin : float\n            Minimum temperature, to begin calculating the property, [K]\n        Tmax : float\n            Maximum temperature, to stop calculating the property, [K]\n        Pmin : float\n            Minimum pressure, to begin calculating the property, [Pa]\n        Pmax : float\n            Maximum pressure, to stop calculating the property, [Pa]\n        methods : list, optional\n            List of methods to consider\n        pts : int, optional\n            A list of points to calculate the property at for both temperature \n            and pressure; pts^2 points will be calculated.\n        only_valid : bool\n            If True, only plot successful methods and calculated properties,\n            and handle errors; if False, attempt calculation without any\n            checking and use methods outside their bounds", "id": "f15806:c2:m11"}
{"signature": "def isothermal_compressibility(V, dV_dP):", "body": "return -dV_dP/V<EOL>", "docstring": "r'''Calculate the isothermal coefficient of a compressibility, given its \n    molar volume at a certain `T` and `P`, and its derivative of molar volume\n    with respect to `P`.\n\n    .. math::\n        \\kappa = -\\frac{1}{V}\\left(\\frac{\\partial V}{\\partial P} \\right)_T\n\n    Parameters\n    ----------\n    V : float\n        Molar volume at `T` and `P`, [m^3/mol]\n    dV_dP : float\n        Derivative of molar volume with respect to `P`, [m^3/mol/Pa]\n\n    Returns\n    -------\n    kappa : float\n        Isothermal coefficient of a compressibility, [1/Pa]\n\n    Notes\n    -----\n    For an ideal gas, this expression simplified to:\n\n    .. math::\n        \\kappa = \\frac{1}{P}\n\n    Examples\n    --------\n    Calculated for hexane from the PR EOS at 299 K and 1 MPa (liquid):\n\n    >>> isothermal_compressibility(0.000130229900873546, -2.72902118209903e-13)\n    2.095541165119158e-09\n\n    References\n    ----------\n    .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.", "id": "f15806:m10"}
{"signature": "def horner(coeffs, x):", "body": "tot = <NUM_LIT:0><EOL>for c in coeffs:<EOL><INDENT>tot = tot * x + c<EOL><DEDENT>return tot<EOL>", "docstring": "r'''Simple function to calculate the value of a polynomial at a specific\n    value of `x`, using the Horner evaluation scheme\n\n    Parameters\n    ----------\n    coeffs : array-like\n        Coefficients, where coeffs[0] is multiplied by the largest power of x,\n        and coeffs[-1] is added to the sum with no multiplication.\n    x : float\n        Value to evaluate the polynomial at\n\n    Returns\n    -------\n    y : float\n        Evaluated result\n\n    Notes\n    -----\n    Efficient. Faster than numpy.polyval.\n\n    Examples\n    --------\n    >>> horner([1,2,3], 3)\n    18", "id": "f15806:m30"}
{"signature": "def mixture_property(self, T, P, zs, ws):", "body": "<EOL>if self.method:<EOL><INDENT>if self.test_method_validity(T, P, zs, ws, self.method):<EOL><INDENT>try:<EOL><INDENT>prop = self.calculate(T, P, zs, ws, self.method)<EOL>if self.test_property_validity(prop):<EOL><INDENT>return prop<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>self.sorted_valid_methods = self.select_valid_methods(T, P, zs, ws)<EOL>for method in self.sorted_valid_methods:<EOL><INDENT>try:<EOL><INDENT>prop = self.calculate(T, P, zs, ws, method)<EOL>if self.test_property_validity(prop):<EOL><INDENT>self.method = method<EOL>return prop<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "r'''Method to calculate the property with sanity checking and without\n        specifying a specific method. `select_valid_methods` is used to obtain\n        a sorted list of methods to try. Methods are then tried in order until\n        one succeeds. The methods are allowed to fail, and their results are\n        checked with `test_property_validity`. On success, the used method\n        is stored in the variable `method`.\n\n        If `method` is set, this method is first checked for validity with\n        `test_method_validity` for the specified temperature, and if it is\n        valid, it is then used to calculate the property. The result is checked\n        for validity, and returned if it is valid. If either of the checks fail,\n        the function retrieves a full list of valid methods with\n        `select_valid_methods` and attempts them as described above.\n\n        If no methods are found which succeed, returns None.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the property, [K]\n        P : float\n            Pressure at which to calculate the property, [Pa]\n        zs : list[float]\n            Mole fractions of all species in the mixture, [-]\n        ws : list[float]\n            Weight fractions of all species in the mixture, [-]\n\n        Returns\n        -------\n        prop : float\n            Calculated property, [`units`]", "id": "f15806:c2:m4"}
{"signature": "def set_tabular_data(self, Ts, properties, name=None, check_properties=True):", "body": "<EOL>if check_properties:<EOL><INDENT>for p in properties:<EOL><INDENT>if not self.test_property_validity(p):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>if not all(b > a for a, b in zip(Ts, Ts[<NUM_LIT:1>:])):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if name is None:<EOL><INDENT>name = '<STR_LIT>' + str(len(self.tabular_data))  <EOL><DEDENT>self.tabular_data[name] = (Ts, properties)<EOL>self.method = None<EOL>self.user_methods.insert(<NUM_LIT:0>, name)<EOL>self.all_methods.add(name)<EOL>self.set_user_methods(user_methods=self.user_methods, forced=self.forced)<EOL>", "docstring": "r'''Method to set tabular data to be used for interpolation.\n        Ts must be in increasing order. If no name is given, data will be\n        assigned the name 'Tabular data series #x', where x is the number of\n        previously added tabular data series. The name is added to all\n        methods and iserted at the start of user methods,\n\n        Parameters\n        ----------\n        Ts : array-like\n            Increasing array of temperatures at which properties are specified, [K]\n        properties : array-like\n            List of properties at Ts, [`units`]\n        name : str, optional\n            Name assigned to the data\n        check_properties : bool\n            If True, the properties will be checked for validity with\n            `test_property_validity` and raise an exception if any are not\n            valid", "id": "f15806:c0:m10"}
{"signature": "def phase_identification_parameter(V, dP_dT, dP_dV, d2P_dV2, d2P_dVdT):", "body": "return V*(d2P_dVdT/dP_dT - d2P_dV2/dP_dV)<EOL>", "docstring": "r'''Calculate the Phase Identification Parameter developed in [1]_ for\n    the accurate and efficient determination of whether a fluid is a liquid or\n    a gas based on the results of an equation of state. For supercritical \n    conditions, this provides a good method for choosing which property \n    correlations to use.\n\n    .. math::\n        \\Pi = V \\left[\\frac{\\frac{\\partial^2 P}{\\partial V \\partial T}}\n        {\\frac{\\partial P }{\\partial T}}- \\frac{\\frac{\\partial^2 P}{\\partial \n        V^2}}{\\frac{\\partial P}{\\partial V}} \\right]\n\n    Parameters\n    ----------\n    V : float\n        Molar volume at `T` and `P`, [m^3/mol]\n    dP_dT : float\n        Derivative of `P` with respect to `T`, [Pa/K]\n    dP_dV : float\n        Derivative of `P` with respect to `V`, [Pa*mol/m^3]\n    d2P_dV2 : float\n        Second derivative of `P` with respect to `V`, [Pa*mol^2/m^6]\n    d2P_dVdT : float\n        Second derivative of `P` with respect to both `V` and `T`, [Pa*mol/m^3/K]\n\n    Returns\n    -------\n    PIP : float\n        Phase Identification Parameter, [-]\n\n    Notes\n    -----\n    Heuristics were used by process simulators before the invent of this \n    parameter. \n\n    The criteria for liquid is Pi > 1; for vapor, Pi <= 1.\n\n    There is also a solid phase mechanism available. For solids, the Solid  \n    Phase Identification Parameter is greater than 1, like liquids; however,  \n    unlike liquids, d2P_dVdT is always >0; it is < 0 for liquids and gases.\n\n    Examples\n    --------\n    Calculated for hexane from the PR EOS at 299 K and 1 MPa (liquid):\n\n    >>> phase_identification_parameter(0.000130229900874, 582169.397484, \n    ... -3.66431747236e+12, 4.48067893805e+17, -20518995218.2)\n    11.33428990564796\n\n    References\n    ----------\n    .. [1] Venkatarathnam, G., and L. R. Oellrich. \"Identification of the Phase\n       of a Fluid Using Partial Derivatives of Pressure, Volume, and \n       Temperature without Reference to Saturation Properties: Applications in \n       Phase Equilibria Calculations.\" Fluid Phase Equilibria 301, no. 2 \n       (February 25, 2011): 225-33. doi:10.1016/j.fluid.2010.12.001.\n    .. [2] Jayanti, Pranava Chaitanya, and G. Venkatarathnam. \"Identification\n       of the Phase of a Substance from the Derivatives of Pressure, Volume and\n       Temperature, without Prior Knowledge of Saturation Properties: Extension\n       to Solid Phase.\" Fluid Phase Equilibria 425 (October 15, 2016): 269-277.\n       doi:10.1016/j.fluid.2016.06.001.", "id": "f15806:m11"}
{"signature": "def set_user_methods(self, user_methods, forced=False):", "body": "<EOL>if isinstance(user_methods, str):<EOL><INDENT>user_methods = [user_methods]<EOL><DEDENT>self.user_methods = user_methods<EOL>self.forced = forced<EOL>if set(self.user_methods).difference(self.all_methods):<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if not self.user_methods and self.forced:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>self.method = None<EOL>self.sorted_valid_methods = []<EOL>self.T_cached = None<EOL>", "docstring": "r'''Method used to select certain property methods as having a higher\n        priority than were set by default. If `forced` is true, then methods\n        which were not specified are excluded from consideration.\n\n        As a side effect, `method` is removed to ensure than the new methods\n        will be used in calculations afterwards.\n\n        An exception is raised if any of the methods specified aren't available\n        for the chemical. An exception is raised if no methods are provided.\n\n        Parameters\n        ----------\n        user_methods : str or list\n            Methods by name to be considered or prefered\n        forced : bool, optional\n            If True, only the user specified methods will ever be considered;\n            if False other methods will be considered if no user methods\n            suceed", "id": "f15806:c0:m3"}
{"signature": "def SG(rho, rho_ref=<NUM_LIT>):", "body": "return rho/rho_ref<EOL>", "docstring": "r'''Calculates the specific gravity of a substance with respect to another\n    substance; by default, this is water at 15.6 \u00b0C (60 \u00b0F). For gases, \n    normally the reference density is 1.2 kg/m^3, that of dry air. However, in \n    general specific gravity should always be specified with respect to the\n    temperature and pressure of its reference fluid. This can vary widely.\n\n    .. math::\n        SG = \\frac{\\rho}{\\rho_{ref}}\n\n    Parameters\n    ----------\n    rho : float\n        Density of the substance, [kg/m^3]\n    rho_ref : float, optional\n        Density of the reference substance, [kg/m^3]\n\n    Returns\n    -------\n    SG : float\n        Specific gravity of the substance with respect to the reference \n        density, [-]\n\n    Notes\n    -----\n    Another common reference point is water at 4\u00b0C (rho_ref=999.9748691393087).\n    Specific gravity is often used by consumers instead of density.\n    The reference for solids is normally the same as for liquids - water.\n\n    Examples\n    --------\n    >>> SG(860)\n    0.8608461408159591", "id": "f15806:m8"}
{"signature": "def interpolate(self, T, name):", "body": "key = (name, self.interpolation_T, self.interpolation_property, self.interpolation_property_inv)<EOL><INDENT>if isinstance(self.tabular_data_interpolators, dict) and key in self.tabular_data_interpolators:<EOL><INDENT>extrapolator, spline = self.tabular_data_interpolators[key]<EOL><DEDENT><DEDENT>if key in self.tabular_data_interpolators:<EOL><INDENT>extrapolator, spline = self.tabular_data_interpolators[key]<EOL><DEDENT>else:<EOL><INDENT>Ts, properties = self.tabular_data[name]<EOL>if self.interpolation_T:  <EOL><INDENT>Ts2 = [self.interpolation_T(T2) for T2 in Ts]<EOL><DEDENT>else:<EOL><INDENT>Ts2 = Ts<EOL><DEDENT>if self.interpolation_property:  <EOL><INDENT>properties2 = [self.interpolation_property(p) for p in properties]<EOL><DEDENT>else:<EOL><INDENT>properties2 = properties<EOL><DEDENT>extrapolator = interp1d(Ts2, properties2, fill_value='<STR_LIT>')<EOL>if len(properties) >= <NUM_LIT:5>:<EOL><INDENT>spline = interp1d(Ts2, properties2, kind='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>spline = None<EOL>", "docstring": "r'''Method to perform interpolation on a given tabular data set\n        previously added via :obj:`set_tabular_data`. This method will create the\n        interpolators the first time it is used on a property set, and store\n        them for quick future use.\n\n        Interpolation is cubic-spline based if 5 or more points are available,\n        and linearly interpolated if not. Extrapolation is always performed\n        linearly. This function uses the transforms `interpolation_T`,\n        `interpolation_property`, and `interpolation_property_inv` if set. If\n        any of these are changed after the interpolators were first created,\n        new interpolators are created with the new transforms.\n        All interpolation is performed via the `interp1d` function.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to interpolate the property, [K]\n        name : str\n            The name assigned to the tabular data set\n\n        Returns\n        -------\n        prop : float\n            Calculated property, [`units`]", "id": "f15806:c0:m9"}
{"signature": "def to_num(values):", "body": "for i in range(len(values)):<EOL><INDENT>try:<EOL><INDENT>values[i] = float(values[i])<EOL><DEDENT>except:<EOL><INDENT>if values[i] == '<STR_LIT>':<EOL><INDENT>values[i] = None<EOL><DEDENT>else:<EOL><INDENT>values[i] = values[i].strip()<EOL>pass<EOL><DEDENT><DEDENT><DEDENT>return values<EOL>", "docstring": "r'''Legacy function to turn a list of strings into either floats\n    (if numeric), stripped strings (if not) or None if the string is empty.\n    Accepts any numeric formatting the float function does.\n\n    Parameters\n    ----------\n    values : list\n        list of strings\n\n    Returns\n    -------\n    values : list\n        list of floats, strings, and None values [-]\n\n    Examples\n    --------\n    >>> to_num(['1', '1.1', '1E5', '0xB4', ''])\n    [1.0, 1.1, 100000.0, '0xB4', None]", "id": "f15806:m0"}
{"signature": "def T_dependent_property(self, T):", "body": "<EOL>if self.method:<EOL><INDENT>if self.test_method_validity(T, self.method):<EOL><INDENT>try:<EOL><INDENT>prop = self.calculate(T, self.method)<EOL>if self.test_property_validity(prop):<EOL><INDENT>return prop<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>self.sorted_valid_methods = self.select_valid_methods(T)<EOL>for method in self.sorted_valid_methods:<EOL><INDENT>try:<EOL><INDENT>prop = self.calculate(T, method)<EOL>if self.test_property_validity(prop):<EOL><INDENT>self.method = method<EOL>return prop<EOL><DEDENT><DEDENT>except:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "r'''Method to calculate the property with sanity checking and without\n        specifying a specific method. `select_valid_methods` is used to obtain\n        a sorted list of methods to try. Methods are then tried in order until\n        one succeeds. The methods are allowed to fail, and their results are\n        checked with `test_property_validity`. On success, the used method\n        is stored in the variable `method`.\n\n        If `method` is set, this method is first checked for validity with\n        `test_method_validity` for the specified temperature, and if it is\n        valid, it is then used to calculate the property. The result is checked\n        for validity, and returned if it is valid. If either of the checks fail,\n        the function retrieves a full list of valid methods with\n        `select_valid_methods` and attempts them as described above.\n\n        If no methods are found which succeed, returns None.\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the property, [K]\n\n        Returns\n        -------\n        prop : float\n            Calculated property, [`units`]", "id": "f15806:c0:m6"}
{"signature": "def property_derivative_P(self, T, P, zs, ws, order=<NUM_LIT:1>):", "body": "sorted_valid_methods = self.select_valid_methods(T, P, zs, ws)<EOL>for method in sorted_valid_methods:<EOL><INDENT>try:<EOL><INDENT>return self.calculate_derivative_P(P, T, zs, ws, method, order)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "r'''Method to calculate a derivative of a mixture property with respect\n        to pressure at constant temperature and composition,\n        of a given order. Methods found valid by `select_valid_methods` are \n        attempted until a method succeeds. If no methods are valid and succeed,\n        None is returned.\n\n        Calls `calculate_derivative_P` internally to perform the actual\n        calculation.\n\n        .. math::\n            \\text{derivative} = \\frac{d (\\text{property})}{d P}|_{T, z}\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the derivative, [K]\n        P : float\n            Pressure at which to calculate the derivative, [Pa]\n        zs : list[float]\n            Mole fractions of all species in the mixture, [-]\n        ws : list[float]\n            Weight fractions of all species in the mixture, [-]\n        order : int\n            Order of the derivative, >= 1\n\n        Returns\n        -------\n        d_prop_d_P_at_T : float\n            Calculated derivative property, [`units/Pa^order`]", "id": "f15806:c2:m8"}
{"signature": "def property_derivative_T(self, T, P, zs, ws, order=<NUM_LIT:1>):", "body": "sorted_valid_methods = self.select_valid_methods(T, P, zs, ws)<EOL>for method in sorted_valid_methods:<EOL><INDENT>try:<EOL><INDENT>return self.calculate_derivative_T(T, P, zs, ws, method, order)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "r'''Method to calculate a derivative of a mixture property with respect\n        to temperature at constant pressure and composition,\n        of a given order. Methods found valid by `select_valid_methods` are \n        attempted until a method succeeds. If no methods are valid and succeed,\n        None is returned.\n\n        Calls `calculate_derivative_T` internally to perform the actual\n        calculation.\n\n        .. math::\n            \\text{derivative} = \\frac{d (\\text{property})}{d T}|_{P, z}\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the derivative, [K]\n        P : float\n            Pressure at which to calculate the derivative, [Pa]\n        zs : list[float]\n            Mole fractions of all species in the mixture, [-]\n        ws : list[float]\n            Weight fractions of all species in the mixture, [-]\n        order : int\n            Order of the derivative, >= 1\n\n        Returns\n        -------\n        d_prop_d_T_at_P : float\n            Calculated derivative property, [`units/K^order`]", "id": "f15806:c2:m7"}
{"signature": "def Vfs_to_zs(Vfs, Vms):", "body": "mols_i = [Vfi/Vmi for Vfi, Vmi in zip(Vfs, Vms)]<EOL>mols = sum(mols_i)<EOL>return [mol_i/mols for mol_i in mols_i]<EOL>", "docstring": "r'''Converts a list of mass fractions to mole fractions. Requires molecular\n    weights for all species.\n\n    .. math::\n        z_i = \\frac{\\frac{\\text{Vf}_i}{V_{m,i}}}{\\sum_i\n        \\frac{\\text{Vf}_i}{V_{m,i}}}\n\n    Parameters\n    ----------\n    Vfs : iterable\n        Molar volume fractions [-]\n    VMs : iterable\n        Molar volumes of species [m^3/mol]\n\n    Returns\n    -------\n    zs : list\n        Mole fractions [-]\n\n    Notes\n    -----\n    Does not check that the sums add to one. Does not check that inputs are of\n    the same length.\n\n    Molar volumes are specified in terms of pure components only. Function\n    works with any phase.\n\n    Examples\n    --------\n    Acetone and benzene example\n\n    >>> Vfs_to_zs([0.596, 0.404], [8.0234e-05, 9.543e-05])\n    [0.6369779395901142, 0.3630220604098858]", "id": "f15806:m27"}
{"signature": "def polylog2(x):", "body": "if <NUM_LIT:0> <= x <= <NUM_LIT>:<EOL><INDENT>p = [<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, <NUM_LIT>]<EOL>q = [-<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT:1.0>]<EOL>offset = <NUM_LIT><EOL><DEDENT>elif <NUM_LIT> < x <= <NUM_LIT>:<EOL><INDENT>p = [<NUM_LIT>, -<NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>]<EOL>q = [-<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT:1.0>]<EOL>offset = <NUM_LIT><EOL><DEDENT>elif <NUM_LIT> < x <= <NUM_LIT:1>:<EOL><INDENT>p = [<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>]<EOL>q = [-<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT>, -<NUM_LIT>, <NUM_LIT:1.0>]<EOL>offset = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>x = x - offset<EOL>return horner(p, x)/horner(q, x)<EOL>", "docstring": "r'''Simple function to calculate PolyLog(2, x) from ranges 0 <= x <= 1,\n    with relative error guaranteed to be < 1E-7 from 0 to 0.99999. This\n    is a Pade approximation, with three coefficient sets with splits at 0.7 \n    and 0.99. An exception is raised if x is under 0 or above 1. \n\n\n    Parameters\n    ----------\n    x : float\n        Value to evaluate PolyLog(2, x) T\n\n    Returns\n    -------\n    y : float\n        Evaluated result\n\n    Notes\n    -----\n    Efficient (2-4 microseconds). No implementation of this function exists in \n    SciPy. Derived with mpmath's pade approximation.\n    Required for the entropy integral of \n    :obj:`thermo.heat_capacity.Zabransky_quasi_polynomial`.\n\n    Examples\n    --------\n    >>> polylog2(0.5)\n    0.5822405264516294", "id": "f15806:m31"}
{"signature": "def Vm_to_rho(Vm, MW):", "body": "return (Vm)**-<NUM_LIT:1>*MW/<NUM_LIT><EOL>", "docstring": "r'''Calculate the density of a chemical, given its molar volume and\n    molecular weight.\n\n    .. math::\n        \\rho = \\frac{MW}{1000\\cdot VM}\n\n    Parameters\n    ----------\n    Vm : float\n        Molar volume, [m^3/mol]\n    MW : float\n        Molecular weight, [g/mol]\n\n    Returns\n    -------\n    rho : float\n        Density, [kg/m^3]\n\n    Examples\n    --------\n    >>> Vm_to_rho(0.000132, 86.18)\n    652.8787878787879\n\n    References\n    ----------\n    .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.", "id": "f15806:m17"}
{"signature": "def ws_to_zs(ws, MWs):", "body": "tot = sum(w/MW for w, MW in zip(ws, MWs))<EOL>zs = [w/MW/tot for w, MW in zip(ws, MWs)]<EOL>return zs<EOL>", "docstring": "r'''Converts a list of mass fractions to mole fractions. Requires molecular\n    weights for all species.\n\n    .. math::\n        z_i = \\frac{\\frac{w_i}{MW_i}}{\\sum_i \\frac{w_i}{MW_i}}\n\n    Parameters\n    ----------\n    ws : iterable\n        Mass fractions [-]\n    MWs : iterable\n        Molecular weights [g/mol]\n\n    Returns\n    -------\n    zs : iterable\n        Mole fractions [-]\n\n    Notes\n    -----\n    Does not check that the sums add to one. Does not check that inputs are of\n    the same length.\n\n    Examples\n    --------\n    >>> ws_to_zs([0.3333333333333333, 0.6666666666666666], [10, 20])\n    [0.5, 0.5]", "id": "f15806:m25"}
{"signature": "def TP_dependent_property_derivative_T(self, T, P, order=<NUM_LIT:1>):", "body": "sorted_valid_methods_P = self.select_valid_methods_P(T, P)<EOL>for method in sorted_valid_methods_P:<EOL><INDENT>try:<EOL><INDENT>return self.calculate_derivative_T(T, P, method, order)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "r'''Method to calculate a derivative of a temperature and pressure\n        dependent property with respect to temperature at constant pressure,\n        of a given order. Methods found valid by `select_valid_methods_P` are \n        attempted until a method succeeds. If no methods are valid and succeed,\n        None is returned.\n\n        Calls `calculate_derivative_T` internally to perform the actual\n        calculation.\n\n        .. math::\n            \\text{derivative} = \\frac{d (\\text{property})}{d T}|_{P}\n\n        Parameters\n        ----------\n        T : float\n            Temperature at which to calculate the derivative, [K]\n        P : float\n            Pressure at which to calculate the derivative, [Pa]\n        order : int\n            Order of the derivative, >= 1\n\n        Returns\n        -------\n        d_prop_d_T_at_P : float\n            Calculated derivative property, [`units/K^order`]", "id": "f15806:c1:m12"}
{"signature": "def Z(T, P, V):", "body": "return V*P/T/R<EOL>", "docstring": "r'''Calculates the compressibility factor of a gas, given its\n    temperature, pressure, and molar volume.\n\n    .. math::\n        Z = \\frac{PV}{RT}\n\n    Parameters\n    ----------\n    T : float\n        Temperature, [K]\n    P : float\n        Pressure [Pa]\n    V : float\n        Molar volume, [m^3/mol]\n\n    Returns\n    -------\n    Z : float\n        Compressibility factor, [-]\n\n    Examples\n    --------\n    >>> Z(600, P=1E6, V=0.00463)\n    0.9281019876560912\n\n    References\n    ----------\n    .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.", "id": "f15806:m19"}
{"signature": "def plot_isobar(self, P, Tmin=None, Tmax=None, methods_P=[], pts=<NUM_LIT:50>,<EOL>only_valid=True):  ", "body": "if not has_matplotlib:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if Tmin is None:<EOL><INDENT>if self.Tmin is not None:<EOL><INDENT>Tmin = self.Tmin<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if Tmax is None:<EOL><INDENT>if self.Tmax is not None:<EOL><INDENT>Tmax = self.Tmax<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>if not methods_P:<EOL><INDENT>if self.user_methods_P:<EOL><INDENT>methods_P = self.user_methods_P<EOL><DEDENT>else:<EOL><INDENT>methods_P = self.all_methods_P<EOL><DEDENT><DEDENT>Ts = np.linspace(Tmin, Tmax, pts)<EOL>for method_P in methods_P:<EOL><INDENT>if only_valid:<EOL><INDENT>properties, Ts2 = [], []<EOL>for T in Ts:<EOL><INDENT>if self.test_method_validity_P(T, P, method_P):<EOL><INDENT>try:<EOL><INDENT>p = self.calculate_P(T, P, method_P)<EOL>if self.test_property_validity(p):<EOL><INDENT>properties.append(p)<EOL>Ts2.append(T)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>plt.plot(Ts2, properties, label=method_P)<EOL><DEDENT>else:<EOL><INDENT>properties = [self.calculate_P(T, P, method_P) for T in Ts]<EOL>plt.plot(Ts, properties, label=method_P)<EOL><DEDENT><DEDENT>plt.legend(loc='<STR_LIT>')<EOL>plt.ylabel(self.name + '<STR_LIT:U+002CU+0020>' + self.units)<EOL>plt.xlabel('<STR_LIT>')<EOL>plt.title(self.name + '<STR_LIT>' + self.CASRN)<EOL>plt.show()<EOL>", "docstring": "r'''Method to create a plot of the property vs temperature at a \n        specific pressure according to\n        either a specified list of methods, or user methods (if set), or all\n        methods. User-selectable number of points, and temperature range. If\n        only_valid is set,`test_method_validity_P` will be used to check if \n        each condition in the specified range is valid, and\n        `test_property_validity` will be used to test the answer, and the\n        method is allowed to fail; only the valid points will be plotted.\n        Otherwise, the result will be calculated and displayed as-is. This will\n        not suceed if the method fails.\n\n        Parameters\n        ----------\n        P : float\n            Pressure for the isobar, [Pa]\n        Tmin : float\n            Minimum temperature, to begin calculating the property, [K]\n        Tmax : float\n            Maximum temperature, to stop calculating the property, [K]\n        methods_P : list, optional\n            List of methods to consider\n        pts : int, optional\n            A list of points to calculate the property at; if Tmin to Tmax\n            covers a wide range of method validities, only a few points may end\n            up calculated for a given method so this may need to be large\n        only_valid : bool\n            If True, only plot successful methods and calculated properties,\n            and handle errors; if False, attempt calculation without any\n            checking and use methods outside their bounds", "id": "f15806:c1:m8"}
{"signature": "def Brock_Bird(T, Tb, Tc, Pc):", "body": "Tbr = Tb/Tc<EOL>Tr = T/Tc<EOL>Pc = Pc/<NUM_LIT>  <EOL>Q = <NUM_LIT>*(<NUM_LIT:1> + Tbr*log(Pc/<NUM_LIT>)/(<NUM_LIT:1>-Tbr))-<NUM_LIT><EOL>sigma = (Pc)**(<NUM_LIT:2>/<NUM_LIT>)*Tc**(<NUM_LIT:1>/<NUM_LIT>)*Q*(<NUM_LIT:1>-Tr)**(<NUM_LIT:11>/<NUM_LIT>)<EOL>sigma = sigma/<NUM_LIT:1000>  <EOL>return sigma<EOL>", "docstring": "r'''Calculates air-water surface tension  using the [1]_\n    emperical method. Old and tested.\n\n    .. math::\n        \\sigma = P_c^{2/3}T_c^{1/3}Q(1-T_r)^{11/9}\n\n        Q = 0.1196 \\left[ 1 + \\frac{T_{br}\\ln (P_c/1.01325)}{1-T_{br}}\\right]-0.279\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tb : float\n        Boiling temperature of the fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n\n    Returns\n    -------\n    sigma : float\n        Liquid surface tension, N/m\n\n    Notes\n    -----\n    Numerous arrangements of this equation are available.\n    This is DIPPR Procedure 7A: Method for the Surface Tension of Pure,\n    Nonpolar, Nonhydrocarbon Liquids\n    The exact equation is not in the original paper.\n    If the equation yields a negative result, return None.\n\n    Examples\n    --------\n    p-dichloribenzene at 412.15 K, from DIPPR; value differs due to a slight\n    difference in method.\n\n    >>> Brock_Bird(412.15, 447.3, 685, 3.952E6)\n    0.02208448325192495\n\n    Chlorobenzene from Poling, as compared with a % error value at 293 K.\n\n    >>> Brock_Bird(293.15, 404.75, 633.0, 4530000.0)\n    0.032985686413713036\n\n    References\n    ----------\n    .. [1] Brock, James R., and R. Byron Bird. \"Surface Tension and the\n       Principle of Corresponding States.\" AIChE Journal 1, no. 2\n       (June 1, 1955): 174-77. doi:10.1002/aic.690010208", "id": "f15807:m3"}
{"signature": "def Hakim_Steinberg_Stiel(T, Tc, Pc, omega, StielPolar=<NUM_LIT:0>):", "body": "Q = (<NUM_LIT> + <NUM_LIT>*omega - <NUM_LIT>*StielPolar - <NUM_LIT>*StielPolar**<NUM_LIT:2><EOL>- <NUM_LIT>*omega**<NUM_LIT:2> + <NUM_LIT>*StielPolar*omega)<EOL>m = (<NUM_LIT> + <NUM_LIT>*omega - <NUM_LIT>*StielPolar - <NUM_LIT>*StielPolar**<NUM_LIT:2><EOL>- <NUM_LIT>*omega**<NUM_LIT:2> + <NUM_LIT>*StielPolar*omega)<EOL>Tr = T/Tc<EOL>Pc = Pc/<NUM_LIT><EOL>sigma = Pc**(<NUM_LIT:2>/<NUM_LIT>)*Tc**(<NUM_LIT:1>/<NUM_LIT>)*Q*((<NUM_LIT:1> - Tr)/<NUM_LIT>)**m<EOL>sigma = sigma/<NUM_LIT>  <EOL>return sigma<EOL>", "docstring": "r'''Calculates air-water surface tension using the reference fluids methods\n    of [1]_.\n\n    .. math::\n        \\sigma = 4.60104\\times 10^{-7} P_c^{2/3}T_c^{1/3}Q_p \\left(\\frac{1-T_r}{0.4}\\right)^m\n\n        Q_p = 0.1574+0.359\\omega-1.769\\chi-13.69\\chi^2-0.51\\omega^2+1.298\\omega\\chi\n\n        m = 1.21+0.5385\\omega-14.61\\chi-32.07\\chi^2-1.65\\omega^2+22.03\\omega\\chi\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    omega : float\n        Acentric factor for fluid, [-]\n    StielPolar : float, optional\n        Stiel Polar Factor, [-]\n\n    Returns\n    -------\n    sigma : float\n        Liquid surface tension, N/m\n\n    Notes\n    -----\n    Original equation for m and Q are used. Internal units are atm and mN/m.\n\n    Examples\n    --------\n    1-butanol, as compared to value in CRC Handbook of 0.02493.\n\n    >>> Hakim_Steinberg_Stiel(298.15, 563.0, 4414000.0, 0.59, StielPolar=-0.07872)\n    0.021907902575190447\n\n    References\n    ----------\n    .. [1] Hakim, D. I., David Steinberg, and L. I. Stiel. \"Generalized\n       Relationship for the Surface Tension of Polar Fluids.\" Industrial &\n       Engineering Chemistry Fundamentals 10, no. 1 (February 1, 1971): 174-75.\n       doi:10.1021/i160037a032.", "id": "f15807:m7"}
{"signature": "def Jasper(T, a, b):", "body": "sigma = (a - b*(T-<NUM_LIT>))/<NUM_LIT:1000><EOL>return sigma<EOL>", "docstring": "r'''Calculates surface tension of a fluid given two parameters, a linear\n    fit in Celcius from [1]_ with data reprinted in [2]_.\n\n    .. math::\n        \\sigma = a - bT\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid, [K]\n    a : float\n        Parameter for equation. Chemical specific.\n    b : float\n        Parameter for equation. Chemical specific.\n\n    Returns\n    -------\n    sigma: float\n        Surface tension [N/m]\n\n    Notes\n    -----\n    Internal units are mN/m, and degrees Celcius.\n    This function has been checked against several references.\n\n    Examples\n    --------\n    >>> Jasper(298.15, 24, 0.0773)\n    0.0220675\n\n    References\n    ----------\n    .. [1] Jasper, Joseph J. \"The Surface Tension of Pure Liquid Compounds.\"\n       Journal of Physical and Chemical Reference Data 1, no. 4\n       (October 1, 1972): 841-1010. doi:10.1063/1.3253106.\n    .. [2] Speight, James. Lange's Handbook of Chemistry. 16 edition.\n       McGraw-Hill Professional, 2005.", "id": "f15807:m2"}
{"signature": "def Aleem(T, MW, Tb, rhol, Hvap_Tb, Cpl):", "body": "MW = MW/<NUM_LIT> <EOL>sphericity = <NUM_LIT:1.> - <NUM_LIT>*MW + <NUM_LIT>*MW*MW<EOL>return sphericity*MW**(<NUM_LIT:1>/<NUM_LIT>)/(<NUM_LIT>*N_A**(<NUM_LIT:1>/<NUM_LIT>))*rhol**(<NUM_LIT:2>/<NUM_LIT>)*(Hvap_Tb + Cpl*(Tb-T))<EOL>", "docstring": "r'''Calculates vapor-liquid surface tension using the correlation derived by\n    [1]_ based on critical property CSP methods.\n\n    .. math::\n        \\sigma = \\phi \\frac{MW^{1/3}} {6N_A^{1/3}}\\rho_l^{2/3}\\left[H_{vap}\n        + C_{p,l}(T_b-T)\\right]\n\n        \\phi = 1 - 0.0047MW + 6.8\\times 10^{-6} MW^2\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    MW : float\n        Molecular weight [g/mol]\n    Tb : float\n        Boiling temperature of the fluid [K]\n    rhol : float\n        Liquid density at T and P [kg/m^3]\n    Hvap_Tb : float\n        Mass enthalpy of vaporization at the normal boiling point [kg/m^3]\n    Cpl : float\n        Liquid heat capacity of the chemical at T [J/kg/K]\n\n    Returns\n    -------\n    sigma : float\n        Liquid-vapor surface tension [N/m]\n\n    Notes\n    -----\n    Internal units of molecuar weight are kg/mol. This model is dimensionally\n    consistent.\n\n    This model does not use the critical temperature. After it predicts a \n    surface tension of 0 at a sufficiently high temperature, it returns \n    negative results. The temperature at which this occurs (the \"predicted\"\n    critical temperature) can be calculated as follows:\n\n    .. math::\n        \\sigma = 0 \\to T_{c,predicted} \\text{ at } T_b + \\frac{H_{vap}}{Cp_l}\n\n    Because of its dependence on density, it has the potential to model the \n    effect of pressure on surface tension.\n\n    Claims AAD of 4.3%. Developed for normal alkanes. Total of 472 data points. \n    Behaves worse for higher alkanes. Behaves very poorly overall.\n\n    Examples\n    --------\n    Methane at 90 K\n\n    >>> Aleem(T=90, MW=16.04246, Tb=111.6, rhol=458.7, Hvap_Tb=510870.,\n    ... Cpl=2465.)\n    0.01669970221165325\n\n    References\n    ----------\n    .. [1] Aleem, W., N. Mellon, S. Sufian, M. I. A. Mutalib, and D. Subbarao.\n       \"A Model for the Estimation of Surface Tension of Pure Hydrocarbon \n       Liquids.\" Petroleum Science and Technology 33, no. 23-24 (December 17, \n       2015): 1908-15. doi:10.1080/10916466.2015.1110593.", "id": "f15807:m9"}
{"signature": "def Miqueu(T, Tc, Vc, omega):", "body": "Vc = Vc*<NUM_LIT><EOL>t = <NUM_LIT:1.>-T/Tc<EOL>sigma = k*Tc*(N_A/Vc)**(<NUM_LIT:2>/<NUM_LIT>)*(<NUM_LIT> + <NUM_LIT>*omega)*t**<NUM_LIT>*(<NUM_LIT:1>+<NUM_LIT>*t**<NUM_LIT:0.5> - <NUM_LIT>*t)*<NUM_LIT><EOL>return sigma<EOL>", "docstring": "r'''Calculates air-water surface tension using the methods of [1]_.\n\n    .. math::\n        \\sigma = k T_c \\left( \\frac{N_a}{V_c}\\right)^{2/3}\n        (4.35 + 4.14 \\omega)t^{1.26}(1+0.19t^{0.5} - 0.487t)\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Vc : float\n        Critical volume of fluid [m^3/mol]\n    omega : float\n        Acentric factor for fluid, [-]\n\n    Returns\n    -------\n    sigma : float\n        Liquid surface tension, N/m\n\n    Notes\n    -----\n    Uses Avogadro's constant and the Boltsman constant.\n    Internal units of volume are mL/mol and mN/m. However, either a typo\n    is in the article or author's work, or my value of k is off by 10; this is\n    corrected nonetheless.\n    Created with 31 normal fluids, none polar or hydrogen bonded. Has an\n    AARD of 3.5%.\n\n    Examples\n    --------\n    Bromotrifluoromethane, 2.45 mN/m\n\n    >>> Miqueu(300., 340.1, 0.000199, 0.1687)\n    0.003474099603581931\n\n    References\n    ----------\n    .. [1] Miqueu, C, D Broseta, J Satherley, B Mendiboure, J Lachaise, and\n       A Graciaa. \"An Extended Scaled Equation for the Temperature Dependence\n       of the Surface Tension of Pure Compounds Inferred from an Analysis of\n       Experimental Data.\" Fluid Phase Equilibria 172, no. 2 (July 5, 2000):\n       169-82. doi:10.1016/S0378-3812(00)00384-8.", "id": "f15807:m8"}
{"signature": "def Pitzer(T, Tc, Pc, omega):", "body": "Tr = T/Tc<EOL>Pc = Pc/<NUM_LIT>  <EOL>sigma = Pc**(<NUM_LIT:2>/<NUM_LIT>)*Tc**(<NUM_LIT:1>/<NUM_LIT>)*(<NUM_LIT>+<NUM_LIT>*omega)/<NUM_LIT> * (<EOL>(<NUM_LIT>+<NUM_LIT>*omega)/(<NUM_LIT>-<NUM_LIT>*omega))**(<NUM_LIT:2>/<NUM_LIT>)*(<NUM_LIT:1>-Tr)**(<NUM_LIT:11>/<NUM_LIT>)<EOL>sigma = sigma/<NUM_LIT:1000>  <EOL>return sigma<EOL>", "docstring": "r'''Calculates air-water surface tension using the correlation derived\n    by [1]_ from the works of [2]_ and [3]_. Based on critical property CSP\n    methods.\n\n    .. math::\n        \\sigma = P_c^{2/3}T_c^{1/3}\\frac{1.86 + 1.18\\omega}{19.05}\n        \\left[ \\frac{3.75 + 0.91 \\omega}{0.291 - 0.08 \\omega}\\right]^{2/3} (1-T_r)^{11/9}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    omega : float\n        Acentric factor for fluid, [-]\n\n    Returns\n    -------\n    sigma : float\n        Liquid surface tension, N/m\n\n    Notes\n    -----\n    The source of this equation has not been reviewed.\n    Internal units of presure are bar, surface tension of mN/m.\n\n    Examples\n    --------\n    Chlorobenzene from Poling, as compared with a % error value at 293 K.\n\n    >>> Pitzer(293., 633.0, 4530000.0, 0.249)\n    0.03458453513446387\n\n    References\n    ----------\n    .. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th edition.\n       New York: McGraw-Hill Professional, 2000.\n    .. [2] Curl, R. F., and Kenneth Pitzer. \"Volumetric and Thermodynamic\n       Properties of Fluids-Enthalpy, Free Energy, and Entropy.\" Industrial &\n       Engineering Chemistry 50, no. 2 (February 1, 1958): 265-74.\n       doi:10.1021/ie50578a047\n    .. [3] Pitzer, K. S.: Thermodynamics, 3d ed., New York, McGraw-Hill,\n       1995, p. 521.", "id": "f15807:m4"}
{"signature": "def Diguilio_Teja(T, xs, sigmas_Tb, Tbs, Tcs):", "body": "if not none_and_length_check([xs, sigmas_Tb, Tbs, Tcs]):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>Tc = mixing_simple(xs, Tcs)<EOL>if T > Tc:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>Tb = mixing_simple(xs, Tbs)<EOL>sigmar = mixing_simple(xs, sigmas_Tb)<EOL>Tst = (Tc/T - <NUM_LIT:1.>)/(Tc/Tb - <NUM_LIT:1>)<EOL>return <NUM_LIT>*Tst**<NUM_LIT>*(T/Tb)*sigmar<EOL>", "docstring": "r'''Calculates surface tension of a liquid mixture according to\n    mixing rules in [1]_.\n\n    .. math::\n        \\sigma = 1.002855(T^*)^{1.118091} \\frac{T}{T_b} \\sigma_r\n\n        T^*  = \\frac{(T_c/T)-1}{(T_c/T_b)-1}\n\n        \\sigma_r = \\sum x_i \\sigma_i\n\n        T_b = \\sum x_i T_{b,i}\n\n        T_c = \\sum x_i T_{c,i}\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    xs : array-like\n        Mole fractions of all components\n    sigmas_Tb : array-like\n        Surface tensions of all components at the boiling point, [N/m]\n    Tbs : array-like\n        Boiling temperatures of all components, [K]\n    Tcs : array-like\n        Critical temperatures of all components, [K]\n\n    Returns\n    -------\n    sigma : float\n        Air-liquid surface tension of mixture, [N/m]\n\n    Notes\n    -----\n    Simple model, however it has 0 citations. Gives similar results to the\n    `Winterfeld_Scriven_Davis` model.\n\n    Raises a ValueError if temperature is greater than the mixture's critical\n    temperature or if the given temperature is negative, or if the mixture's\n    boiling temperature is higher than its critical temperature.\n\n    [1]_ claims a 4.63 percent average absolute error on 21 binary and 4 \n    ternary non-aqueous systems. [1]_ also considered Van der Waals mixing \n    rules for `Tc`, but found it provided a higher error of 5.58%\n\n    Examples\n    --------\n    >>> Diguilio_Teja(T=298.15, xs=[0.1606, 0.8394],\n    ... sigmas_Tb=[0.01424, 0.02530], Tbs=[309.21, 312.95], Tcs=[469.7, 508.0])\n    0.025716823875045505\n\n    References\n    ----------\n    .. [1] Diguilio, Ralph, and Amyn S. Teja. \"Correlation and Prediction of\n       the Surface Tensions of Mixtures.\" The Chemical Engineering Journal 38,\n       no. 3 (July 1988): 205-8. doi:10.1016/0300-9467(88)80079-0.", "id": "f15807:m12"}
{"signature": "def Mersmann_Kind_surface_tension(T, Tm, Tb, Tc, Pc, n_associated=<NUM_LIT:1>):", "body": "Tr = T/Tc<EOL>sigma_star = ((Tb - Tm)/Tm)**(<NUM_LIT:1>/<NUM_LIT>)*(<NUM_LIT>*(<NUM_LIT:1.> - Tr) + <NUM_LIT>*(<NUM_LIT:1.> - Tr)**(<NUM_LIT:4>/<NUM_LIT>))<EOL>sigma = sigma_star*(k*Tc)**(<NUM_LIT:1>/<NUM_LIT>)*(Tm/Tc)*Pc**(<NUM_LIT:2>/<NUM_LIT>)*n_associated**(-<NUM_LIT:1>/<NUM_LIT>)<EOL>return sigma<EOL>", "docstring": "r'''Estimates the surface tension of organic liquid substances\n    according to the method of [1]_.\n\n    .. math::\n        \\sigma^* = \\frac{\\sigma n_{ass}^{1/3}} {(kT_c)^{1/3} T_{rm}P_c^{2/3}}\n\n        \\sigma^* = \\left(\\frac{T_b - T_m}{T_m}\\right)^{1/3}\n        \\left[6.25(1-T_r) + 31.3(1-T_r)^{4/3}\\right]\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the fluid [K]\n    Tm : float\n        Melting temperature [K]\n    Tb : float\n        Boiling temperature of the fluid [K]\n    Tc : float\n        Critical temperature of the fluid [K]\n    Pc : float\n        Critical pressure of the fluid [Pa]\n    n_associated : float\n        Number of associated molecules in a cluster (2 for alcohols, 1\n        otherwise), [-]\n\n    Returns\n    -------\n    sigma : float\n        Liquid-vapor surface tension [N/m]\n\n    Notes\n    -----\n    In the equation, all quantities must be in SI units. `k` is the boltzman\n    constant.\n\n    Examples\n    --------\n    MTBE at STP (the actual value is 0.0181):\n\n    >>> Mersmann_Kind_surface_tension(298.15, 164.15, 328.25, 497.1, 3430000.0)\n    0.016744309508833335\n\n    References\n    ----------\n    .. [1] Mersmann, Alfons, and Matthias Kind. \"Prediction of Mechanical and \n       Thermal Properties of Pure Liquids, of Critical Data, and of Vapor \n       Pressure.\" Industrial & Engineering Chemistry Research, January 31, \n       2017. https://doi.org/10.1021/acs.iecr.6b04323.", "id": "f15807:m10"}
{"signature": "def Zuo_Stenby(T, Tc, Pc, omega):", "body": "Tc_1, Pc_1, omega_1 = <NUM_LIT>, <NUM_LIT>/<NUM_LIT>, <NUM_LIT><EOL>Tc_2, Pc_2, omega_2 = <NUM_LIT>, <NUM_LIT>/<NUM_LIT>, <NUM_LIT><EOL>Pc = Pc/<NUM_LIT><EOL>def ST_r(ST, Tc, Pc):<EOL><INDENT>return log(<NUM_LIT:1> + ST/(Tc**(<NUM_LIT:1>/<NUM_LIT>)*Pc**(<NUM_LIT:2>/<NUM_LIT>)))<EOL><DEDENT>ST_1 = <NUM_LIT>*(<NUM_LIT:1> - T/Tc)**<NUM_LIT>  <EOL>ST_2 = <NUM_LIT>*(<NUM_LIT:1> - T/Tc)**<NUM_LIT>  <EOL>ST_r_1, ST_r_2 = ST_r(ST_1, Tc_1, Pc_1), ST_r(ST_2, Tc_2, Pc_2)<EOL>sigma_r = ST_r_1 + (omega-omega_1)/(omega_2 - omega_1)*(ST_r_2-ST_r_1)<EOL>sigma = Tc**(<NUM_LIT:1>/<NUM_LIT>)*Pc**(<NUM_LIT:2>/<NUM_LIT>)*(exp(sigma_r)-<NUM_LIT:1>)<EOL>sigma = sigma/<NUM_LIT:1000>  <EOL>return sigma<EOL>", "docstring": "r'''Calculates air-water surface tension using the reference fluids\n    methods of [1]_.\n\n    .. math::\n        \\sigma^{(1)} = 40.520(1-T_r)^{1.287}\n        \\sigma^{(2)} = 52.095(1-T_r)^{1.21548}\n        \\sigma_r = \\sigma_r^{(1)}+ \\frac{\\omega - \\omega^{(1)}}\n        {\\omega^{(2)}-\\omega^{(1)}} (\\sigma_r^{(2)}-\\sigma_r^{(1)})\n        \\sigma = T_c^{1/3}P_c^{2/3}[\\exp{(\\sigma_r)} -1]\n\n    Parameters\n    ----------\n    T : float\n        Temperature of fluid [K]\n    Tc : float\n        Critical temperature of fluid [K]\n    Pc : float\n        Critical pressure of fluid [Pa]\n    omega : float\n        Acentric factor for fluid, [-]\n\n    Returns\n    -------\n    sigma : float\n        Liquid surface tension, N/m\n\n    Notes\n    -----\n    Presently untested. Have not personally checked the sources.\n    I strongly believe it is broken.\n    The reference values for methane and n-octane are from the DIPPR database.\n\n    Examples\n    --------\n    Chlorobenzene\n\n    >>> Zuo_Stenby(293., 633.0, 4530000.0, 0.249)\n    0.03345569011871088\n\n    References\n    ----------\n    .. [1] Zuo, You-Xiang, and Erling H. Stenby. \"Corresponding-States and\n       Parachor Models for the Calculation of Interfacial Tensions.\" The\n       Canadian Journal of Chemical Engineering 75, no. 6 (December 1, 1997):\n       1130-37. doi:10.1002/cjce.5450750617", "id": "f15807:m6"}
{"signature": "def EQ115(T, A, B, C=<NUM_LIT:0>, D=<NUM_LIT:0>, E=<NUM_LIT:0>):", "body": "return exp(A+B/T+C*log(T)+D*T**<NUM_LIT:2> + E/T**<NUM_LIT:2>)<EOL>", "docstring": "r'''DIPPR Equation #115. No major uses; has been used as an alternate\n    liquid viscosity expression, and as a model for vapor pressure.\n    Only parameters A and B are required.\n\n    .. math::\n        Y = \\exp\\left(A + \\frac{B}{T} + C\\log T + D T^2 + \\frac{E}{T^2}\\right)\n\n    Parameters\n    ----------\n    T : float\n        Temperature, [K]\n    A-E : float\n        Parameter for the equation; chemical and property specific [-]\n\n    Returns\n    -------\n    Y : float\n        Property [constant-specific]\n\n    Notes\n    -----\n    No coefficients found for this expression.\n    This function is not integrable for either dT or Y/T dT.\n\n    References\n    ----------\n    .. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801\n       DIPPR/AIChE", "id": "f15808:m8"}
{"signature": "def EQ107(T, A=<NUM_LIT:0>, B=<NUM_LIT:0>, C=<NUM_LIT:0>, D=<NUM_LIT:0>, E=<NUM_LIT:0>, order=<NUM_LIT:0>):", "body": "if order == <NUM_LIT:0>:<EOL><INDENT>return A + B*((C/T)/sinh(C/T))**<NUM_LIT:2> + D*((E/T)/cosh(E/T))**<NUM_LIT:2><EOL><DEDENT>elif order == <NUM_LIT:1>:<EOL><INDENT>return (<NUM_LIT:2>*B*C**<NUM_LIT:3>*cosh(C/T)/(T**<NUM_LIT:4>*sinh(C/T)**<NUM_LIT:3>) <EOL>- <NUM_LIT:2>*B*C**<NUM_LIT:2>/(T**<NUM_LIT:3>*sinh(C/T)**<NUM_LIT:2>) <EOL>+ <NUM_LIT:2>*D*E**<NUM_LIT:3>*sinh(E/T)/(T**<NUM_LIT:4>*cosh(E/T)**<NUM_LIT:3>)<EOL>- <NUM_LIT:2>*D*E**<NUM_LIT:2>/(T**<NUM_LIT:3>*cosh(E/T)**<NUM_LIT:2>))<EOL><DEDENT>elif order == -<NUM_LIT:1>:<EOL><INDENT>return A*T + B*C/tanh(C/T) - D*E*tanh(E/T)<EOL><DEDENT>elif order == -<NUM_LIT>:<EOL><INDENT>return (A*log(T) + B*C/tanh(C/T)/T - B*log(sinh(C/T)) <EOL>- D*E*tanh(E/T)/T + D*log(cosh(E/T)))<EOL><DEDENT>else:<EOL><INDENT>raise Exception(order_not_found_msg)<EOL><DEDENT>", "docstring": "r'''DIPPR Equation #107. Often used in calculating ideal-gas heat capacity.\n    All 5 parameters are required.\n    Also called the Aly-Lee equation.\n\n    .. math::\n        Y = A + B\\left[\\frac{C/T}{\\sinh(C/T)}\\right]^2 + D\\left[\\frac{E/T}{\n        \\cosh(E/T)}\\right]^2\n\n    Parameters\n    ----------\n    T : float\n        Temperature, [K]\n    A-E : float\n        Parameter for the equation; chemical and property specific [-]\n    order : int, optional\n        Order of the calculation. 0 for the calculation of the result itself;\n        for 1, the first derivative of the property is returned, for\n        -1, the indefinite integral of the property with respect to temperature\n        is returned; and for -1j, the indefinite integral of the property\n        divided by temperature with respect to temperature is returned. No \n        other integrals or derivatives are implemented, and an exception will \n        be raised if any other order is given.\n\n    Returns\n    -------\n    Y : float\n        Property [constant-specific; if order == 1, property/K; if order == -1,\n                  property*K; if order == -1j, unchanged from default]\n\n    Notes\n    -----\n    The derivative with respect to T, integral with respect to T, and integral\n    over T with respect to T are computed as follows. The derivative is \n    obtained via SymPy; the integrals from Wolfram Alpha.\n\n    .. math::\n        \\frac{d Y}{dT} = \\frac{2 B C^{3} \\cosh{\\left (\\frac{C}{T} \\right )}}\n        {T^{4} \\sinh^{3}{\\left (\\frac{C}{T} \\right )}} - \\frac{2 B C^{2}}{T^{3}\n        \\sinh^{2}{\\left (\\frac{C}{T} \\right )}} + \\frac{2 D E^{3} \\sinh{\\left\n        (\\frac{E}{T} \\right )}}{T^{4} \\cosh^{3}{\\left (\\frac{E}{T} \\right )}} \n        - \\frac{2 D E^{2}}{T^{3} \\cosh^{2}{\\left (\\frac{E}{T} \\right )}}\n\n    .. math::\n        \\int Y dT = A T + \\frac{B C}{\\tanh{\\left (\\frac{C}{T} \\right )}}\n        - D E \\tanh{\\left (\\frac{E}{T} \\right )}\n\n    .. math::\n        \\int \\frac{Y}{T} dT = A \\log{\\left (T \\right )} + \\frac{B C}{T \\tanh{\n        \\left (\\frac{C}{T} \\right )}} - B \\log{\\left (\\sinh{\\left (\\frac{C}{T}\n        \\right )} \\right )} - \\frac{D E}{T} \\tanh{\\left (\\frac{E}{T} \\right )}\n        + D \\log{\\left (\\cosh{\\left (\\frac{E}{T} \\right )} \\right )}\n\n    Examples\n    --------\n    Water ideal gas molar heat capacity; DIPPR coefficients normally in\n    J/kmol/K\n\n    >>> EQ107(300., 33363., 26790., 2610.5, 8896., 1169.)\n    33585.90452768923\n\n    References\n    ----------\n    .. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801\n       DIPPR/AIChE\n    .. [2] Aly, Fouad A., and Lloyd L. Lee. \"Self-Consistent Equations for\n       Calculating the Ideal Gas Heat Capacity, Enthalpy, and Entropy.\" Fluid\n       Phase Equilibria 6, no. 3 (January 1, 1981): 169-79.\n       doi:10.1016/0378-3812(81)85002-9.", "id": "f15808:m6"}
{"signature": "def EQ104(T, A, B, C, D, E, order=<NUM_LIT:0>):", "body": "if order == <NUM_LIT:0>:<EOL><INDENT>T2 = T*T<EOL>return A + (B + (C + (D + E/T)/(T2*T2*T))/T2)/T<EOL><DEDENT>elif order == <NUM_LIT:1>:<EOL><INDENT>T2 = T*T<EOL>T4 = T2*T2<EOL>return (-B + (-<NUM_LIT:3>*C + (-<NUM_LIT:8>*D - <NUM_LIT:9>*E/T)/(T4*T))/T2)/T2<EOL><DEDENT>elif order == -<NUM_LIT:1>:<EOL><INDENT>return A*T + B*log(T) - (<NUM_LIT>*C*T**<NUM_LIT:6> + <NUM_LIT:8>*D*T + <NUM_LIT:7>*E)/(<NUM_LIT>*T**<NUM_LIT:8>)<EOL><DEDENT>elif order == -<NUM_LIT>:<EOL><INDENT>return A*log(T) - (<NUM_LIT>*B*T**<NUM_LIT:8> + <NUM_LIT>*C*T**<NUM_LIT:6> + <NUM_LIT:9>*D*T + <NUM_LIT:8>*E)/(<NUM_LIT>*T**<NUM_LIT:9>)<EOL><DEDENT>else:<EOL><INDENT>raise Exception(order_not_found_msg)<EOL><DEDENT>", "docstring": "r'''DIPPR Equation #104. Often used in calculating second virial\n    coefficients of gases. All 5 parameters are required.\n    C, D, and E are normally large values.\n\n    .. math::\n        Y = A + \\frac{B}{T} + \\frac{C}{T^3} + \\frac{D}{T^8} + \\frac{E}{T^9}\n\n    Parameters\n    ----------\n    T : float\n        Temperature, [K]\n    A-E : float\n        Parameter for the equation; chemical and property specific [-]\n    order : int, optional\n        Order of the calculation. 0 for the calculation of the result itself;\n        for 1, the first derivative of the property is returned, for\n        -1, the indefinite integral of the property with respect to temperature\n        is returned; and for -1j, the indefinite integral of the property\n        divided by temperature with respect to temperature is returned. No \n        other integrals or derivatives are implemented, and an exception will \n        be raised if any other order is given.\n\n    Returns\n    -------\n    Y : float\n        Property [constant-specific; if order == 1, property/K; if order == -1,\n                  property*K; if order == -1j, unchanged from default]\n\n    Notes\n    -----\n    The derivative with respect to T, integral with respect to T, and integral\n    over T with respect to T are computed as follows. All expressions can be\n    obtained with SymPy readily.\n\n    .. math::\n        \\frac{d Y}{dT} = - \\frac{B}{T^{2}} - \\frac{3 C}{T^{4}} \n        - \\frac{8 D}{T^{9}} - \\frac{9 E}{T^{10}}\n\n    .. math::\n        \\int Y dT = A T + B \\log{\\left (T \\right )} - \\frac{1}{56 T^{8}} \n        \\left(28 C T^{6} + 8 D T + 7 E\\right)\n\n    .. math::\n        \\int \\frac{Y}{T} dT = A \\log{\\left (T \\right )} - \\frac{1}{72 T^{9}} \n        \\left(72 B T^{8} + 24 C T^{6} + 9 D T + 8 E\\right)\n\n    Examples\n    --------\n    Water second virial coefficient; DIPPR coefficients normally dimensionless.\n\n    >>> EQ104(300, 0.02222, -26.38, -16750000, -3.894E19, 3.133E21)\n    -1.1204179007265156\n\n    References\n    ----------\n    .. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801\n       DIPPR/AIChE", "id": "f15808:m3"}
{"signature": "def EQ100(T, A=<NUM_LIT:0>, B=<NUM_LIT:0>, C=<NUM_LIT:0>, D=<NUM_LIT:0>, E=<NUM_LIT:0>, F=<NUM_LIT:0>, G=<NUM_LIT:0>, order=<NUM_LIT:0>):", "body": "if order == <NUM_LIT:0>:<EOL><INDENT>return A + T*(B + T*(C + T*(D + T*(E + T*(F + G*T)))))<EOL><DEDENT>elif order == <NUM_LIT:1>:<EOL><INDENT>return B + T*(<NUM_LIT:2>*C + T*(<NUM_LIT:3>*D + T*(<NUM_LIT:4>*E + T*(<NUM_LIT:5>*F + <NUM_LIT:6>*G*T))))<EOL><DEDENT>elif order == -<NUM_LIT:1>:<EOL><INDENT>return T*(A + T*(B/<NUM_LIT:2> + T*(C/<NUM_LIT:3> + T*(D/<NUM_LIT:4> + T*(E/<NUM_LIT:5> + T*(F/<NUM_LIT:6> + G*T/<NUM_LIT:7>))))))<EOL><DEDENT>elif order == -<NUM_LIT>:<EOL><INDENT>return A*log(T) + T*(B + T*(C/<NUM_LIT:2> + T*(D/<NUM_LIT:3> + T*(E/<NUM_LIT:4> + T*(F/<NUM_LIT:5> + G*T/<NUM_LIT:6>)))))<EOL><DEDENT>else:<EOL><INDENT>raise Exception(order_not_found_msg)<EOL><DEDENT>", "docstring": "r'''DIPPR Equation # 100. Used in calculating the molar heat capacities\n    of liquids and solids, liquid thermal conductivity, and solid density.\n    All parameters default to zero. As this is a straightforward polynomial,\n    no restrictions on parameters apply. Note that high-order polynomials like\n    this may need large numbers of decimal places to avoid unnecessary error.\n\n    .. math::\n        Y = A + BT + CT^2 + DT^3 + ET^4 + FT^5 + GT^6\n\n    Parameters\n    ----------\n    T : float\n        Temperature, [K]\n    A-G : float\n        Parameter for the equation; chemical and property specific [-]\n    order : int, optional\n        Order of the calculation. 0 for the calculation of the result itself;\n        for 1, the first derivative of the property is returned, for\n        -1, the indefinite integral of the property with respect to temperature\n        is returned; and for -1j, the indefinite integral of the property\n        divided by temperature with respect to temperature is returned. No \n        other integrals or derivatives are implemented, and an exception will \n        be raised if any other order is given.\n\n    Returns\n    -------\n    Y : float\n        Property [constant-specific; if order == 1, property/K; if order == -1,\n                  property*K; if order == -1j, unchanged from default]\n\n    Notes\n    -----\n    The derivative with respect to T, integral with respect to T, and integral\n    over T with respect to T are computed as follows. All derivatives and \n    integrals are easily computed with SymPy.\n\n    .. math::\n        \\frac{d Y}{dT} = B + 2 C T + 3 D T^{2} + 4 E T^{3} + 5 F T^{4} \n        + 6 G T^{5}\n\n    .. math::\n        \\int Y dT = A T + \\frac{B T^{2}}{2} + \\frac{C T^{3}}{3} + \\frac{D \n        T^{4}}{4} + \\frac{E T^{5}}{5} + \\frac{F T^{6}}{6} + \\frac{G T^{7}}{7}\n\n    .. math::\n        \\int \\frac{Y}{T} dT = A \\log{\\left (T \\right )} + B T + \\frac{C T^{2}}\n        {2} + \\frac{D T^{3}}{3} + \\frac{E T^{4}}{4} + \\frac{F T^{5}}{5} \n        + \\frac{G T^{6}}{6}\n\n    Examples\n    --------\n    Water liquid heat capacity; DIPPR coefficients normally listed in J/kmol/K.\n\n    >>> EQ100(300, 276370., -2090.1, 8.125, -0.014116, 0.0000093701)\n    75355.81000000003\n\n    References\n    ----------\n    .. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801\n       DIPPR/AIChE", "id": "f15808:m0"}
{"signature": "@staticmethod<EOL><INDENT>def Hfus(counts):<DEDENT>", "body": "tot = <NUM_LIT:0.0><EOL>for group, count in counts.items():<EOL><INDENT>tot += joback_groups_id_dict[group].Hfus*count<EOL><DEDENT>Hfus = -<NUM_LIT> + tot<EOL>return Hfus*<NUM_LIT:1000><EOL>", "docstring": "r'''Estimates the enthalpy of fusion of an organic compound at its \n        melting point using the Joback method as a function of chemical \n        structure only. \n\n        .. math::\n            \\Delta H_{fus} = -0.88 + \\sum_i H_{fus,i}\n\n        In the above equation, enthalpy of fusion is calculated in \n        kJ/mol; it is converted to J/mol here.\n\n        For 155 compounds tested by Joback, the absolute average error was\n        485.2 cal/mol  and standard deviation was 661.4 cal/mol; the average \n        relative error was 38.7%. \n\n        Parameters\n        ----------\n        counts : dict\n            Dictionary of Joback groups present (numerically indexed) and their\n            counts, [-]\n\n        Returns\n        -------\n        Hfus : float\n            Estimated enthalpy of fusion of the compound at its melting point,\n            [J/mol]\n\n        Examples\n        --------\n        >>> Joback.Hfus({1: 2, 24: 1})\n        5125.0", "id": "f15809:c0:m9"}
{"signature": "@staticmethod<EOL><INDENT>def Hvap(counts):<DEDENT>", "body": "tot = <NUM_LIT:0.0><EOL>for group, count in counts.items():<EOL><INDENT>tot += joback_groups_id_dict[group].Hvap*count<EOL><DEDENT>Hvap = <NUM_LIT> + tot<EOL>return Hvap*<NUM_LIT:1000><EOL>", "docstring": "r'''Estimates the enthalpy of vaporization of an organic compound at  \n        its normal boiling point using the Joback method as a function of  \n        chemical structure only. \n\n        .. math::\n            \\Delta H_{vap} = 15.30 + \\sum_i H_{vap,i}\n\n        In the above equation, enthalpy of fusion is calculated in \n        kJ/mol; it is converted to J/mol here.\n\n        For 368 compounds tested by Joback, the absolute average error was\n        303.5 cal/mol  and standard deviation was 429 cal/mol; the average \n        relative error was 3.88%. \n\n        Parameters\n        ----------\n        counts : dict\n            Dictionary of Joback groups present (numerically indexed) and their\n            counts, [-]\n\n        Returns\n        -------\n        Hvap : float\n            Estimated enthalpy of vaporization of the compound at its normal\n            boiling point, [J/mol]\n\n        Examples\n        --------\n        >>> Joback.Hvap({1: 2, 24: 1})\n        29018.0", "id": "f15809:c0:m10"}
{"signature": "def UNIFAC_psi(T, subgroup1, subgroup2, subgroup_data, interaction_data, <EOL>modified=False):", "body": "main1 = subgroup_data[subgroup1].main_group_id<EOL>main2 = subgroup_data[subgroup2].main_group_id<EOL>if modified:<EOL><INDENT>try:<EOL><INDENT>a, b, c = interaction_data[main1][main2]<EOL><DEDENT>except:<EOL><INDENT>return <NUM_LIT:1.><EOL><DEDENT>return exp((-a/T -b - c*T))<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>return exp(-interaction_data[main1][main2]/T)<EOL><DEDENT>except:<EOL><INDENT>return <NUM_LIT:1.><EOL><DEDENT><DEDENT>", "docstring": "r'''Calculates the interaction parameter psi(m, n) for two UNIFAC \n    subgroups, given the system temperature, the UNIFAC subgroups considered \n    for the variant of UNIFAC used, the interaction parameters for the \n    variant of UNIFAC used, and whether or not the temperature dependence is \n    modified from the original form, as shown below.\n\n    Original temperature dependence:\n\n    .. math::\n        \\Psi_{mn} = \\exp\\left(\\frac{-a_{mn}}{T}\\right)\n\n    Modified temperature dependence:\n\n    .. math::\n        \\Psi_{mn} = \\exp\\left(\\frac{-a_{mn} - b_{mn}T - c_{mn}T^2}{T}\\right)\n\n    Parameters\n    ----------\n    T : float\n        Temperature of the system, [K]\n    subgroup1 : int\n        First UNIFAC subgroup for identifier, [-]\n    subgroup2 : int\n        Second UNIFAC subgroup for identifier, [-]\n    subgroup_data : dict[UNIFAC_subgroup]\n        Normally provided as inputs to `UNIFAC`.\n    interaction_data : dict[dict[tuple(a_mn, b_mn, c_mn)]]\n        Normally provided as inputs to `UNIFAC`.\n    modified : bool\n        True if the modified temperature dependence is used by the interaction\n        parameters, otherwise False\n\n    Returns\n    -------\n    psi : float\n        UNIFAC interaction parameter term, [-]\n\n    Notes\n    -----\n    UNIFAC interaction parameters are asymmetric. No warning is raised if an\n    interaction parameter is missing.\n\n    Examples\n    --------\n    >>> from thermo.unifac import UFSG, UFIP, DOUFSG, DOUFIP2006\n\n    >>> UNIFAC_psi(307, 18, 1, UFSG, UFIP)\n    0.9165248264184787\n\n    >>> UNIFAC_psi(373.15, 9, 78, DOUFSG, DOUFIP2006, modified=True)\n    1.3703140538273264\n\n    References\n    ----------\n    .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.\n       Weinheim, Germany: Wiley-VCH, 2012.\n    .. [2] Fredenslund, Aage, Russell L. Jones, and John M. Prausnitz. \"Group\n       Contribution Estimation of Activity Coefficients in Nonideal Liquid \n       Mixtures.\" AIChE Journal 21, no. 6 (November 1, 1975): 1086-99. \n       doi:10.1002/aic.690210607.", "id": "f15810:m4"}
{"signature": "def Van_der_Waals_volume(R):", "body": "return R*<NUM_LIT><EOL>", "docstring": "r'''Calculates a species Van der Waals molar volume with the UNIFAC method,\n    given a species's R parameter.\n\n    .. math::\n        V_{wk} = 15.17R_k\n\n    Parameters\n    ----------\n    R : float\n        R UNIFAC parameter (normalized Van der Waals Volume)  [-]\n\n    Returns\n    -------\n    V_vdw : float\n        Unnormalized Van der Waals volume, [m^3/mol]\n\n    Notes\n    -----\n    The volume was originally given in cm^3/mol, but is converted to SI here.\n\n    Examples\n    --------    \n    >>> Van_der_Waals_volume(4.4998)\n    6.826196599999999e-05\n\n    References\n    ----------\n    .. [1] Wei, James, Morton M. Denn, John H. Seinfeld, Arup Chakraborty, \n       Jackie Ying, Nicholas Peppas, and George Stephanopoulos. Molecular \n       Modeling and Theory in Chemical Engineering. Academic Press, 2001.", "id": "f15810:m2"}
{"signature": "@property<EOL><INDENT>def charge_balance(self):<DEDENT>", "body": "return sum([zi*ci for zi, ci in zip(self.zs, self.charges)])<EOL>", "docstring": "r'''Charge imbalance of the mixture, in units of [faraday].\n        Mixtures meeting the electroneutrality condition will have an imbalance\n        of 0.\n\n        Examples\n        --------\n        >>> Mixture(['Na+', 'Cl-', 'water'], zs=[.01, .01, .98]).charge_balance\n        0.0", "id": "f15811:c0:m41"}
{"signature": "@property<EOL><INDENT>def Vmss(self):<DEDENT>", "body": "return [i.Vms for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component solid-phase molar volumes of the chemicals in the\n        mixture at its current temperature, in units of [m^3/mol].\n\n        Examples\n        --------\n        >>> Mixture(['iron'], ws=[1], T=320).Vmss\n        [7.09593392630242e-06]", "id": "f15811:c0:m54"}
{"signature": "@property<EOL><INDENT>def Cpl(self):<DEDENT>", "body": "Cplm = self.HeatCapacityLiquidMixture(self.T, self.P, self.zs, self.ws)<EOL>if Cplm:<EOL><INDENT>return property_molar_to_mass(Cplm, self.MW)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Liquid-phase heat capacity of the mixture at its current\n        temperature and composition, in units of [J/kg/K]. For calculation of\n        this property at other temperatures or compositions, or specifying\n        manually the method used to calculate it, and more - see the object\n        oriented interface :obj:`thermo.heat_capacity.HeatCapacityLiquidMixture`;\n        each Mixture instance creates one to actually perform the calculations.\n        Note that that interface provides output in molar units.\n\n        Examples\n        --------\n        >>> Mixture(['water', 'sodium chloride'], ws=[.9, .1], T=301.5).Cpl\n        3735.4604049449786", "id": "f15811:c0:m95"}
{"signature": "@property<EOL><INDENT>def rhogm(self):<DEDENT>", "body": "Vmg = self.Vmg<EOL>if Vmg:<EOL><INDENT>return <NUM_LIT:1.>/Vmg<EOL><DEDENT>return None<EOL>", "docstring": "r'''Molar density of the mixture in the gas phase at the\n        current temperature, pressure, and composition in units of [mol/m^3].\n\n        Utilizes the object oriented interface and\n        :obj:`thermo.volume.VolumeGasMixture` to perform the actual\n        calculation of molar volume.\n\n        Examples\n        --------\n        >>> Mixture(['water'], ws=[1], T=500).rhogm\n        24.467426039789093", "id": "f15811:c0:m88"}
{"signature": "@property<EOL><INDENT>def Vmg(self):<DEDENT>", "body": "return self.VolumeGasMixture(T=self.T, P=self.P, zs=self.zs, ws=self.ws)<EOL>", "docstring": "r'''Gas-phase molar volume of the mixture at its current\n        temperature, pressure, and composition in units of [m^3/mol]. For\n        calculation of this property at other temperatures or pressures or\n        compositions, or specifying manually the method used to calculate it,\n        and more - see the object oriented interface\n        :obj:`thermo.volume.VolumeGasMixture`; each Mixture instance\n        creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Mixture(['hexane'], ws=[1], T=300, P=2E5).Vmg\n        0.010888694235142216", "id": "f15811:c0:m110"}
{"signature": "@property<EOL><INDENT>def Cpg(self):<DEDENT>", "body": "Cpgm = self.HeatCapacityGasMixture(self.T, self.P, self.zs, self.ws)<EOL>if Cpgm:<EOL><INDENT>return property_molar_to_mass(Cpgm, self.MW)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Gas-phase heat capacity of the mixture at its current temperature ,\n        and composition in units of [J/kg/K]. For calculation of this property at\n        other temperatures or compositions, or specifying manually the method\n        used to calculate it, and more - see the object oriented interface\n        :obj:`thermo.heat_capacity.HeatCapacityGasMixture`; each Mixture\n        instance creates one to actually perform the calculations. Note that\n        that interface provides output in molar units.\n\n        Examples\n        --------\n        >>> Mixture(['oxygen', 'nitrogen'], ws=[.4, .6], T=350, P=1E6).Cpg\n        995.8911053614883", "id": "f15811:c0:m96"}
{"signature": "@property<EOL><INDENT>def Hvapms(self):<DEDENT>", "body": "return [i.Hvapm for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component enthalpies of vaporization of the chemicals in the\n        mixture at its current temperature, in units of [J/mol].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Hvapms\n        [32639.806783391632, 36851.7902195611]", "id": "f15811:c0:m43"}
{"signature": "@property<EOL><INDENT>def atom_fractions(self):<DEDENT>", "body": "things = dict()<EOL>for zi, atoms in zip(self.zs, self.atomss):<EOL><INDENT>for atom, count in atoms.iteritems():<EOL><INDENT>if atom in things:<EOL><INDENT>things[atom] += zi*count<EOL><DEDENT>else:<EOL><INDENT>things[atom] = zi*count<EOL><DEDENT><DEDENT><DEDENT>tot = sum(things.values())<EOL>return {atom : value/tot for atom, value in things.iteritems()}<EOL>", "docstring": "r'''Dictionary of atomic fractions for each atom in the mixture.\n\n        Examples\n        --------\n        >>> Mixture(['CO2', 'O2'], zs=[0.5, 0.5]).atom_fractions\n        {'C': 0.2, 'O': 0.8}", "id": "f15811:c0:m32"}
{"signature": "@property<EOL><INDENT>def alphag(self):<DEDENT>", "body": "kg, rhog, Cpg = self.kg, self.rhog, self.Cpg<EOL>if all([kg, rhog, Cpg]):<EOL><INDENT>return thermal_diffusivity(k=kg, rho=rhog, Cp=Cpg)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Thermal diffusivity of the gas phase of the mixture if one exists\n        at its current temperature and pressure, in units of [m^2/s].\n\n        .. math::\n            \\alpha = \\frac{k}{\\rho Cp}\n\n        Examples\n        --------\n        >>> Mixture(['ammonia'], ws=[1]).alphag\n        1.6968517002221566e-05", "id": "f15811:c0:m106"}
{"signature": "@property<EOL><INDENT>def rholms(self):<DEDENT>", "body": "return [i.rholm for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component molar densities of the chemicals in the mixture in\n        the liquid phase at the current temperature and pressure, in units of\n        [mol/m^3].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).rholms\n        [10882.699301520635, 9135.590853014008]", "id": "f15811:c0:m61"}
{"signature": "@property<EOL><INDENT>def similarity_variables(self):<DEDENT>", "body": "return [i.similarity_variable for i in self.Chemicals]<EOL>", "docstring": "r'''Similarity variables for all chemicals in the mixture, see \n        :obj:`thermo.elements.similarity_variable` for the definition, [mol/g]\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5]).similarity_variables\n        [0.15362587797189262, 0.16279853724428964]", "id": "f15811:c0:m28"}
{"signature": "@property<EOL><INDENT>def nuls(self):<DEDENT>", "body": "return [i.nul for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component kinematic viscosities of the liquid phase of the\n        chemicals in the mixture at its current temperature and pressure, in\n        units of [m^2/s].\n\n        .. math::\n            \\nu = \\frac{\\mu}{\\rho}\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).nuls\n        [5.357870271650772e-07, 3.8127962283230277e-07]", "id": "f15811:c0:m78"}
{"signature": "@property<EOL><INDENT>def ringss(self):<DEDENT>", "body": "return [i.rings for i in self.Chemicals]<EOL>", "docstring": "r'''List of ring counts for all chemicals in the mixture.\n\n        Examples\n        --------\n        >>> Mixture(['Docetaxel', 'Paclitaxel'], zs=[.5, .5]).ringss\n        [6, 7]", "id": "f15811:c0:m30"}
{"signature": "@property<EOL><INDENT>def InChI_Keys(self):<DEDENT>", "body": "return [i.InChI_Key for i in self.Chemicals]<EOL>", "docstring": "r'''InChI keys for all chemicals in the mixture.\n\n        Examples\n        --------\n        >>> Mixture(['1-nonene'], zs=[1]).InChI_Keys\n        ['JRZJOMJEPLMPRA-UHFFFAOYSA-N']", "id": "f15811:c0:m24"}
{"signature": "@property<EOL><INDENT>def rhogs(self):<DEDENT>", "body": "return [i.rhog for i in self.Chemicals]<EOL>", "docstring": "r'''Pure-component gas-phase mass densities of the chemicals in the\n        mixture at its current temperature and pressure, in units of [kg/m^3].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).rhogs\n        [3.1333721283939258, 3.8152260283954584]", "id": "f15811:c0:m59"}
{"signature": "@property<EOL><INDENT>def rhom(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, s=None, l=self.rholm, g=self.rhogm)<EOL>", "docstring": "r'''Molar density of the mixture at its current phase and\n        temperature and pressure, in units of [mol/m^3].\n        Available only if single phase.\n\n        Examples\n        --------\n        >>> Mixture(['1-hexanol'], ws=[1]).rhom\n        7983.414573003429", "id": "f15811:c0:m121"}
{"signature": "@property<EOL><INDENT>def alphags(self):<DEDENT>", "body": "return [i.alphag for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component thermal diffusivities of the chemicals in the\n        mixture in the gas phase at the current temperature and pressure, in\n        units of [m^2/s].\n\n        .. math::\n            \\alpha = \\frac{k}{\\rho Cp}\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).alphags\n        [3.3028044028118324e-06, 2.4412958544059014e-06]", "id": "f15811:c0:m81"}
{"signature": "@property<EOL><INDENT>def SGg(self):<DEDENT>", "body": "Vmg = self.VolumeGasMixture(T=<NUM_LIT>, P=<NUM_LIT>, zs=self.zs, ws=self.ws)<EOL>if Vmg:<EOL><INDENT>rho = Vm_to_rho(Vmg, self.MW)<EOL>return SG(rho, rho_ref=<NUM_LIT>) <EOL><DEDENT>return None<EOL>", "docstring": "r'''Specific gravity of a hypothetical gas phase of the mixture, .\n        [dimensionless]. The reference condition is air at 15.6 \u00b0C (60 \u00b0F) and \n        1 atm (rho=1.223 kg/m^3). The definition for gases uses the \n        compressibility factor of the reference gas and the mixture both at the \n        reference conditions, not the conditions of the mixture.\n\n        Examples\n        --------\n        >>> Mixture('argon').SGg\n        1.3800407778218216", "id": "f15811:c0:m111"}
{"signature": "@property<EOL><INDENT>def alphals(self):<DEDENT>", "body": "return [i.alphal for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component thermal diffusivities of the chemicals in the\n        mixture in the liquid phase at the current temperature and pressure, in\n        units of [m^2/s].\n\n        .. math::\n            \\alpha = \\frac{k}{\\rho Cp}\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).alphals\n        [8.732683564481583e-08, 7.57355434073289e-08]", "id": "f15811:c0:m80"}
{"signature": "@property<EOL><INDENT>def rholm_STP(self):<DEDENT>", "body": "Vml = self.Vml_STP<EOL>if Vml:<EOL><INDENT>return <NUM_LIT:1.>/Vml<EOL><DEDENT>return None<EOL>", "docstring": "r'''Molar density of the mixture in the liquid phase at 298.15 K and 101.325 kPa,\n        and the current composition, in units of [mol/m^3].\n\n        Examples\n        --------\n        >>> Mixture(['water'], ws=[1]).rholm_STP\n        55344.59086372442", "id": "f15811:c0:m137"}
{"signature": "@property<EOL><INDENT>def k(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, s=None, l=self.kl, g=self.kg)<EOL>", "docstring": "r'''Thermal conductivity of the mixture at its current phase,\n        temperature, and pressure in units of [W/m/K].\n        Available only if single phase.\n\n        Examples\n        --------\n        >>> Mixture(['ethanol'], ws=[1], T=300).kl\n        0.16313594741877802", "id": "f15811:c0:m127"}
{"signature": "@property<EOL><INDENT>def Cp(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, s=self.Cps, l=self.Cpl, g=self.Cpg)<EOL>", "docstring": "r'''Mass heat capacity of the mixture at its current phase and\n        temperature, in units of [J/kg/K].\n\n        Examples\n        --------\n        >>> w = Mixture(['water'], ws=[1])\n        >>> w.Cp, w.phase\n        (4180.597021827336, 'l')\n        >>> Pd = Mixture(['palladium'], ws=[1])\n        >>> Pd.Cp, Pd.phase\n        (234.26767209171211, 's')", "id": "f15811:c0:m117"}
{"signature": "@property<EOL><INDENT>def rhog_STP(self):<DEDENT>", "body": "Vmg = self.Vmg_STP<EOL>if Vmg:<EOL><INDENT>return Vm_to_rho(Vmg, self.MW)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Gas-phase mass density of the mixture at 298.15 K and 101.325 kPa,\n        and the current composition in units of [kg/m^3].\n\n        Examples\n        --------\n        >>> Mixture(['nitrogen'], ws=[1]).rhog_STP\n        1.145534453639403", "id": "f15811:c0:m134"}
{"signature": "@property<EOL><INDENT>def nug(self):<DEDENT>", "body": "mug, rhog = self.mug, self.rhog<EOL>if all([mug, rhog]):<EOL><INDENT>return nu_mu_converter(mu=mug, rho=rhog)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Kinematic viscosity of the gas phase of the mixture if one exists\n        at its current temperature and pressure, in units of [m^2/s].\n\n        .. math::\n            \\nu = \\frac{\\mu}{\\rho}\n\n        Examples\n        --------\n        >>> Mixture(['methane'], ws=[1], T=115).nug\n        2.5118460023343146e-06", "id": "f15811:c0:m104"}
{"signature": "@property<EOL><INDENT>def kg(self):<DEDENT>", "body": "return self.ThermalConductivityGasMixture(self.T, self.P, self.zs, self.ws)<EOL>", "docstring": "r'''Thermal conductivity of the mixture in the gas phase at its current\n        temperature, pressure, and composition in units of [Pa*s].\n\n        For calculation of this property at other temperatures and pressures,\n        or specifying manually the method used to calculate it, and more - see\n        the object oriented interface\n        :obj:`thermo.thermal_conductivity.ThermalConductivityGasMixture`;\n        each Mixture instance creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Mixture(['water'], ws=[1], T=500).kg\n        0.036035173297862676", "id": "f15811:c0:m116"}
{"signature": "@property<EOL><INDENT>def sigmas(self):<DEDENT>", "body": "return [i.sigma for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component surface tensions of the chemicals in the mixture at\n        its current temperature, in units of [N/m].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).sigmas\n        [0.02533469712937521, 0.025254723406585546]", "id": "f15811:c0:m74"}
{"signature": "@property<EOL><INDENT>def legal_statuses(self):<DEDENT>", "body": "return [i.legal_status for i in self.Chemicals]<EOL>", "docstring": "r'''List of dictionaries of the legal status for all chemicals in the\n        mixture.\n\n        Examples\n        --------\n        >>> pprint(Mixture(['oxygen', 'nitrogen'], zs=[.5, .5]).legal_statuses)\n        [{'DSL': 'LISTED',\n          'EINECS': 'LISTED',\n          'NLP': 'UNLISTED',\n          'SPIN': 'LISTED',\n          'TSCA': 'LISTED'},\n         {'DSL': 'LISTED',\n          'EINECS': 'LISTED',\n          'NLP': 'UNLISTED',\n          'SPIN': 'LISTED',\n          'TSCA': 'LISTED'}]", "id": "f15811:c0:m35"}
{"signature": "@property<EOL><INDENT>def nul(self):<DEDENT>", "body": "mul, rhol = self.mul, self.rhol<EOL>if all([mul, rhol]):<EOL><INDENT>return nu_mu_converter(mu=mul, rho=rhol)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Kinematic viscosity of the liquid phase of the mixture if one\n        exists at its current temperature and pressure, in units of [m^2/s].\n\n        .. math::\n            \\nu = \\frac{\\mu}{\\rho}\n\n        Examples\n        --------\n        >>> Mixture(['methane'], ws=[1], T=110).nul\n        2.858088468937333e-07", "id": "f15811:c0:m103"}
{"signature": "@property<EOL><INDENT>def Z(self):<DEDENT>", "body": "Vm = self.Vm<EOL>if Vm:<EOL><INDENT>return Z(self.T, self.P, Vm)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Compressibility factor of the mixture at its current phase and\n        temperature and pressure, [dimensionless].\n        Available only if single phase.\n\n        Examples\n        --------\n        >>> Mixture(['MTBE'], ws=[1], T=900, P=1E-2).Z\n        0.9999999999056374", "id": "f15811:c0:m122"}
{"signature": "@property<EOL><INDENT>def Prl(self):<DEDENT>", "body": "Cpl, mul, kl = self.Cpl, self.mul, self.kl<EOL>if all([Cpl, mul, kl]):<EOL><INDENT>return Prandtl(Cp=Cpl, mu=mul, k=kl)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Prandtl number of the liquid phase of the mixture if one exists at\n        its current temperature and pressure, [dimensionless].\n\n        .. math::\n            Pr = \\frac{C_p \\mu}{k}\n\n        Examples\n        --------\n        >>> Mixture(['nitrogen'], ws=[1], T=70).Prl\n        2.782821450148889", "id": "f15811:c0:m107"}
{"signature": "@property<EOL><INDENT>def Zg(self):<DEDENT>", "body": "Vmg = self.Vmg<EOL>if Vmg:<EOL><INDENT>return Z(self.T, self.P, Vmg)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Compressibility factor of the mixture in the gas phase at the\n        current temperature, pressure, and composition, [dimensionless].\n\n        Utilizes the object oriented interface and\n        :obj:`thermo.volume.VolumeGasMixture` to perform the actual calculation\n        of molar volume.\n\n        Examples\n        --------\n        >>> Mixture(['hexane'], ws=[1], T=300, P=1E5).Zg\n        0.9403859376888885", "id": "f15811:c0:m90"}
{"signature": "@property<EOL><INDENT>def SGl(self):<DEDENT>", "body": "rhol = self.rhol<EOL>if rhol is not None:<EOL><INDENT>return SG(rhol)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Specific gravity of a hypothetical liquid phase of the mixture at  \n        the specified temperature and pressure, [dimensionless].\n        The reference condition is water at 4 \u00b0C and 1 atm \n        (rho=999.017 kg/m^3). For liquids, SG is defined that the reference\n        chemical's T and P are fixed, but the chemical itself varies with\n        the specified T and P.\n\n        Examples\n        --------\n        >>> Mixture('water', ws=[1], T=365).SGl\n        0.9650065522428539", "id": "f15811:c0:m67"}
{"signature": "@property<EOL><INDENT>def muls(self):<DEDENT>", "body": "return [i.mul for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component viscosities of the chemicals in the mixture in the\n        liquid phase at its current temperature and pressure, in units of \n        [Pa*s].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).muls\n        [0.00045545522798131764, 0.00043274394349114754]", "id": "f15811:c0:m70"}
{"signature": "@property<EOL><INDENT>def Psats(self):<DEDENT>", "body": "return [i.Psat for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component vapor pressures of the chemicals in the mixture at\n        its current temperature, in units of [Pa].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Psats\n        [32029.25774454549, 10724.419010511821]", "id": "f15811:c0:m42"}
{"signature": "@property<EOL><INDENT>def Vmg_STP(self):<DEDENT>", "body": "return self.VolumeGasMixture(T=<NUM_LIT>, P=<NUM_LIT>, zs=self.zs, ws=self.ws)<EOL>", "docstring": "r'''Gas-phase molar volume of the mixture at 298.15 K and 101.325 kPa,\n        and the current composition in units of [m^3/mol].\n\n        Examples\n        --------\n        >>> Mixture(['nitrogen'], ws=[1]).Vmg_STP\n        0.02445443688838904", "id": "f15811:c0:m132"}
{"signature": "@property<EOL><INDENT>def formulas(self):<DEDENT>", "body": "return [i.formula for i in self.Chemicals]<EOL>", "docstring": "r'''Chemical formulas for all chemicals in the mixture.\n\n        Examples\n        --------\n        >>> Mixture(['ethanol', 'trichloroethylene', 'furfuryl alcohol'],\n        ... ws=[0.5, 0.2, 0.3]).formulas\n        ['C2H6O', 'C2HCl3', 'C5H6O2']", "id": "f15811:c0:m21"}
{"signature": "@property<EOL><INDENT>def Cpss(self):<DEDENT>", "body": "return [i.Cps for i in self.Chemicals]<EOL>", "docstring": "r'''Solid-phase pure component heat capacity of the chemicals in the\n        mixture at its current temperature, in units of [J/kg/K].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cpss\n        [1405.341925822248, 1467.6412627521154]", "id": "f15811:c0:m48"}
{"signature": "@property<EOL><INDENT>def isentropic_exponents(self):<DEDENT>", "body": "return [i.isentropic_exponent for i in self.Chemicals]<EOL>", "docstring": "r'''Gas-phase pure component ideal-gas isentropic exponent of the\n        chemicals in the  mixture at its current temperature, [dimensionless].\n         Does not include pressure-compensation from an equation of state.\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).isentropic_exponents\n        [1.1023398979313739, 1.080418846592871]", "id": "f15811:c0:m53"}
{"signature": "@property<EOL><INDENT>def atomss(self):<DEDENT>", "body": "return [i.atoms for i in self.Chemicals]<EOL>", "docstring": "r'''List of dictionaries of atom counts for all chemicals in the mixture.\n\n        Examples\n        --------\n        >>> Mixture(['nitrogen', 'oxygen'], zs=[.01, .99]).atomss\n        [{'N': 2}, {'O': 2}]", "id": "f15811:c0:m29"}
{"signature": "@property<EOL><INDENT>def Cpgs(self):<DEDENT>", "body": "return [i.Cpg for i in self.Chemicals]<EOL>", "docstring": "r'''Gas-phase pure component heat capacity of the chemicals in the\n        mixture at its current temperature, in units of [J/kg/K].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cpgs\n        [1146.5360555565146, 1212.3488046342566]", "id": "f15811:c0:m50"}
{"signature": "@property<EOL><INDENT>def Hvaps(self):<DEDENT>", "body": "return [i.Hvap for i in self.Chemicals]<EOL>", "docstring": "r'''Enthalpy of vaporization of the chemicals in the mixture at its\n        current temperature, in units of [J/kg].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Hvaps\n        [417859.9144942896, 399961.16950519773]", "id": "f15811:c0:m44"}
{"signature": "@property<EOL><INDENT>def JTg(self):<DEDENT>", "body": "Vmg, Cpgm, isobaric_expansion_g = self.Vmg, self.Cpgm, self.isobaric_expansion_g<EOL>if all((Vmg, Cpgm, isobaric_expansion_g)):<EOL><INDENT>return Joule_Thomson(T=self.T, V=Vmg, Cp=Cpgm, beta=isobaric_expansion_g)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Joule Thomson coefficient of the gas phase of the mixture if one\n        exists at its current temperature and pressure, in units of [K/Pa].\n\n        .. math::\n            \\mu_{JT} = \\left(\\frac{\\partial T}{\\partial P}\\right)_H = \\frac{1}{C_p}\n            \\left[T \\left(\\frac{\\partial V}{\\partial T}\\right)_P - V\\right]\n            = \\frac{V}{C_p}\\left(\\beta T-1\\right)\n\n        Examples\n        --------\n        >>> Mixture(['dodecane'], ws=[1], T=400, P=1000).JTg\n        5.4089897835384913e-05", "id": "f15811:c0:m102"}
{"signature": "@property<EOL><INDENT>def JTl(self):<DEDENT>", "body": "Vml, Cplm, isobaric_expansion_l = self.Vml, self.Cplm, self.isobaric_expansion_l<EOL>if all((Vml, Cplm, isobaric_expansion_l)):<EOL><INDENT>return Joule_Thomson(T=self.T, V=Vml, Cp=Cplm, beta=isobaric_expansion_l)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Joule Thomson coefficient of the liquid phase of the mixture if one\n        exists at its current temperature and pressure, in units of [K/Pa].\n\n        .. math::\n            \\mu_{JT} = \\left(\\frac{\\partial T}{\\partial P}\\right)_H = \\frac{1}{C_p}\n            \\left[T \\left(\\frac{\\partial V}{\\partial T}\\right)_P - V\\right]\n            = \\frac{V}{C_p}\\left(\\beta T-1\\right)\n\n        Examples\n        --------\n        >>> Mixture(['dodecane'], ws=[1], T=400).JTl\n        -3.193910574559279e-07", "id": "f15811:c0:m101"}
{"signature": "@property<EOL><INDENT>def rho(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, s=self.rhos, l=self.rhol, g=self.rhog)<EOL>", "docstring": "r'''Mass density of the mixture at its current phase and\n        temperature and pressure, in units of [kg/m^3].\n        Available only if single phase.\n\n        Examples\n        --------\n        >>> Mixture(['decane'], ws=[1], T=550, P=2E6).rho\n        498.67008448640604", "id": "f15811:c0:m120"}
{"signature": "@property<EOL><INDENT>def mugs(self):<DEDENT>", "body": "return [i.mug for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component viscosities of the chemicals in the mixture in the\n        gas phase at its current temperature and pressure, in units of [Pa*s].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).mugs\n        [8.082880451060605e-06, 7.442602145854158e-06]", "id": "f15811:c0:m71"}
{"signature": "@property<EOL><INDENT>def JT(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, l=self.JTl, g=self.JTg)<EOL>", "docstring": "r'''Joule Thomson coefficient of the mixture at its\n        current phase, temperature, and pressure in units of [K/Pa].\n        Available only if single phase.\n\n        .. math::\n            \\mu_{JT} = \\left(\\frac{\\partial T}{\\partial P}\\right)_H = \\frac{1}{C_p}\n            \\left[T \\left(\\frac{\\partial V}{\\partial T}\\right)_P - V\\right]\n            = \\frac{V}{C_p}\\left(\\beta T-1\\right)\n\n        Examples\n        --------\n        >>> Mixture(['water'], ws=[1]).JT\n        -2.2150394958666412e-07", "id": "f15811:c0:m125"}
{"signature": "@property<EOL><INDENT>def JTgs(self):<DEDENT>", "body": "return [i.JTg for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component Joule Thomson coefficients of the chemicals in the\n        mixture in the gas phase at its current temperature and pressure, in\n        units of [K/Pa].\n\n        .. math::\n            \\mu_{JT} = \\left(\\frac{\\partial T}{\\partial P}\\right)_H = \\frac{1}{C_p}\n            \\left[T \\left(\\frac{\\partial V}{\\partial T}\\right)_P - V\\right]\n            = \\frac{V}{C_p}\\left(\\beta T-1\\right)\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).JTgs\n        [6.0940046688790938e-05, 4.1290005523287549e-05]", "id": "f15811:c0:m77"}
{"signature": "@property<EOL><INDENT>def solubility_parameters(self):<DEDENT>", "body": "return [i.solubility_parameter for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component solubility parameters of the chemicals in the\n        mixture at its current temperature and pressure, in units of [Pa^0.5].\n\n        .. math::\n            \\delta = \\sqrt{\\frac{\\Delta H_{vap} - RT}{V_m}}\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).solubility_parameters\n        [18062.51359608708, 14244.12852702228]", "id": "f15811:c0:m84"}
{"signature": "@property<EOL><INDENT>def Cpgms(self):<DEDENT>", "body": "return [i.Cpgm for i in self.Chemicals]<EOL>", "docstring": "r'''Gas-phase ideal gas heat capacity of the chemicals at its current\n        temperature, in units of [J/mol/K].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cpgms\n        [89.55804092586159, 111.70390334788907]", "id": "f15811:c0:m47"}
{"signature": "@property<EOL><INDENT>def PubChems(self):<DEDENT>", "body": "return [i.PubChem for i in self.Chemicals]<EOL>", "docstring": "r'''PubChem Component ID numbers for all chemicals in the mixture.\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5]).PubChems\n        [241, 1140]", "id": "f15811:c0:m20"}
{"signature": "@property<EOL><INDENT>def Vml_STP(self):<DEDENT>", "body": "return self.VolumeLiquidMixture(T=<NUM_LIT>, P=<NUM_LIT>, zs=self.zs, ws=self.ws)<EOL>", "docstring": "r'''Liquid-phase molar volume of the mixture at 298.15 K and 101.325 kPa,\n        and the current composition in units of [m^3/mol].\n\n        Examples\n        --------\n        >>> Mixture(['cyclobutane'], ws=[1]).Vml_STP\n        8.143327329133706e-05", "id": "f15811:c0:m131"}
{"signature": "@property<EOL><INDENT>def rhosms(self):<DEDENT>", "body": "return [i.rhosm for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component molar densities of the chemicals in the solid phase\n        at the current temperature and pressure, in units of [mol/m^3].\n\n        Examples\n        --------\n        >>> Mixture(['iron'], ws=[1], T=320).rhosms\n        [140925.7767033753]", "id": "f15811:c0:m60"}
{"signature": "@property<EOL><INDENT>def Prls(self):<DEDENT>", "body": "return [i.Prl for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component Prandtl numbers of the liquid phase of the chemicals\n        in the mixture at its current temperature and pressure, [dimensionless].\n\n        .. math::\n            Pr = \\frac{C_p \\mu}{k}\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).Prls\n        [6.13542244155373, 5.034355147908088]", "id": "f15811:c0:m82"}
{"signature": "@property<EOL><INDENT>def mass_fractions(self):<DEDENT>", "body": "things = dict()<EOL>for zi, atoms in zip(self.zs, self.atomss):<EOL><INDENT>for atom, count in atoms.iteritems():<EOL><INDENT>if atom in things:<EOL><INDENT>things[atom] += zi*count<EOL><DEDENT>else:<EOL><INDENT>things[atom] = zi*count<EOL><DEDENT><DEDENT><DEDENT>return mass_fractions(things)<EOL>", "docstring": "r'''Dictionary of mass fractions for each atom in the mixture.\n\n        Examples\n        --------\n        >>> Mixture(['CO2', 'O2'], zs=[0.5, 0.5]).mass_fractions\n        {'C': 0.15801826905745822, 'O': 0.8419817309425419}", "id": "f15811:c0:m34"}
{"signature": "@property<EOL><INDENT>def Cvgms(self):<DEDENT>", "body": "return [i.Cvgm for i in self.Chemicals]<EOL>", "docstring": "r'''Gas-phase pure component ideal-gas contant-volume heat capacities\n        of the chemicals in the mixture at its current temperature, in units\n        of [J/mol/K].  Subtracts R from the ideal-gas heat capacities; does not\n        include pressure-compensation from an equation of state.\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).Cvgms\n        [81.2435811258616, 103.38944354788907]", "id": "f15811:c0:m51"}
{"signature": "@property<EOL><INDENT>def PSRK_groups(self):<DEDENT>", "body": "return [i.PSRK_groups for i in self.Chemicals]<EOL>", "docstring": "r'''List of dictionaries of PSRK subgroup: count groups for each chemical in the mixture. Uses the PSRK subgroups,\n        as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.\n\n        Examples\n        --------\n        >>> pprint(Mixture(['1-pentanol', 'decane'], ws=[0.5, 0.5]).PSRK_groups)\n        [{1: 1, 2: 4, 14: 1}, {1: 2, 2: 8}]", "id": "f15811:c0:m39"}
{"signature": "@property<EOL><INDENT>def alpha(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, l=self.alphal, g=self.alphag)<EOL>", "docstring": "r'''Thermal diffusivity of the mixture at its current temperature,\n        pressure, and phase in units of [m^2/s].\n        Available only if single phase.\n\n        .. math::\n            \\alpha = \\frac{k}{\\rho Cp}\n\n        Examples\n        --------\n        >>> Mixture(['furfural'], ws=[1]).alpha\n        8.696537158635412e-08", "id": "f15811:c0:m129"}
{"signature": "@property<EOL><INDENT>def Zl(self):<DEDENT>", "body": "Vml = self.Vml<EOL>if Vml:<EOL><INDENT>return Z(self.T, self.P, Vml)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Compressibility factor of the mixture in the liquid phase at the\n        current temperature, pressure, and composition, [dimensionless].\n\n        Utilizes the object oriented interface and\n        :obj:`thermo.volume.VolumeLiquidMixture` to perform the actual\n        calculation of molar volume.\n\n        Examples\n        --------\n        >>> Mixture(['water'], ws=[1]).Zl\n        0.0007385375470263454", "id": "f15811:c0:m89"}
{"signature": "@property<EOL><INDENT>def R_specific(self):<DEDENT>", "body": "return property_molar_to_mass(R, self.MW)<EOL>", "docstring": "r'''Specific gas constant of the mixture, in units of [J/kg/K].\n\n        Examples\n        --------\n        >>> Mixture(['N2', 'O2'], zs=[0.79, .21]).R_specific\n        288.1928437986195", "id": "f15811:c0:m40"}
{"signature": "@property<EOL><INDENT>def Vml(self):<DEDENT>", "body": "return self.VolumeLiquidMixture(T=self.T, P=self.P, zs=self.zs, ws=self.ws)<EOL>", "docstring": "r'''Liquid-phase molar volume of the mixture at its current\n        temperature, pressure, and composition in units of [m^3/mol]. For\n        calculation of this property at other temperatures or pressures or\n        compositions, or specifying manually the method used to calculate it,\n        and more - see the object oriented interface\n        :obj:`thermo.volume.VolumeLiquidMixture`; each Mixture instance\n        creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Mixture(['cyclobutane'], ws=[1], T=225).Vml\n        7.42395423425395e-05", "id": "f15811:c0:m109"}
{"signature": "@property<EOL><INDENT>def UNIFAC_groups(self):<DEDENT>", "body": "return [i.UNIFAC_groups for i in self.Chemicals]<EOL>", "docstring": "r'''List of dictionaries of UNIFAC subgroup: count groups for each chemical in the mixture. Uses the original\n        UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.\n\n        Examples\n        --------\n        >>> pprint(Mixture(['1-pentanol', 'decane'], ws=[0.5, 0.5]).UNIFAC_groups)\n        [{1: 1, 2: 4, 14: 1}, {1: 2, 2: 8}]", "id": "f15811:c0:m37"}
{"signature": "@property<EOL><INDENT>def nu(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, l=self.nul, g=self.nug)<EOL>", "docstring": "r'''Kinematic viscosity of the the mixture at its current temperature,\n        pressure, and phase in units of [m^2/s].\n        Available only if single phase.\n\n        .. math::\n            \\nu = \\frac{\\mu}{\\rho}\n\n        Examples\n        --------\n        >>> Mixture(['argon'], ws=[1]).nu\n        1.3842643382482236e-05", "id": "f15811:c0:m128"}
{"signature": "@property<EOL><INDENT>def Cps(self):<DEDENT>", "body": "Cpsm = self.HeatCapacitySolidMixture(self.T, self.P, self.zs, self.ws)<EOL>if Cpsm:<EOL><INDENT>return property_molar_to_mass(Cpsm, self.MW)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Solid-phase heat capacity of the mixture at its current temperature\n        and composition, in units of [J/kg/K]. For calculation of this property\n        at other temperatures or compositions, or specifying manually the\n        method used to calculate it,  and more - see the object oriented\n        interface :obj:`thermo.heat_capacity.HeatCapacitySolidMixture`; each\n        Mixture instance creates one to actually perform the calculations. Note\n        that that interface provides output in molar units.\n\n        Examples\n        --------\n        >>> Mixture(['silver', 'platinum'], ws=[0.95, 0.05]).Cps\n        229.55166388430328", "id": "f15811:c0:m94"}
{"signature": "@property<EOL><INDENT>def kgs(self):<DEDENT>", "body": "return [i.kg for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component thermal conductivies of the chemicals in the mixture\n        in the gas phase at its current temperature and pressure, in units of\n        [W/m/K].\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'toluene'], ws=[0.5, 0.5], T=320).kgs\n        [0.011865404482987936, 0.010981336502491088]", "id": "f15811:c0:m73"}
{"signature": "@property<EOL><INDENT>def JTls(self):<DEDENT>", "body": "return [i.JTl for i in self.Chemicals]<EOL>", "docstring": "r'''Pure component Joule Thomson coefficients of the chemicals in the\n        mixture in the liquid phase at its current temperature and pressure, in\n        units of [K/Pa].\n\n        .. math::\n            \\mu_{JT} = \\left(\\frac{\\partial T}{\\partial P}\\right)_H = \\frac{1}{C_p}\n            \\left[T \\left(\\frac{\\partial V}{\\partial T}\\right)_P - V\\right]\n            = \\frac{V}{C_p}\\left(\\beta T-1\\right)\n\n        Examples\n        --------\n        >>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).JTls\n        [-3.8633730709853161e-07, -3.464395792560331e-07]", "id": "f15811:c0:m76"}
{"signature": "@property<EOL><INDENT>def Pr(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, l=self.Prl, g=self.Prg)<EOL>", "docstring": "r'''Prandtl number of the mixture at its current temperature,\n        pressure, and phase; [dimensionless].\n        Available only if single phase.\n\n        .. math::\n            Pr = \\frac{C_p \\mu}{k}\n\n        Examples\n        --------\n        >>> Mixture(['acetone'], ws=[1]).Pr\n        4.183039103542711", "id": "f15811:c0:m130"}
{"signature": "@property<EOL><INDENT>def rhogm(self):<DEDENT>", "body": "Vmg = self.Vmg<EOL>if Vmg:<EOL><INDENT>return <NUM_LIT:1.>/Vmg<EOL><DEDENT>return None<EOL>", "docstring": "r'''Molar density of the chemical in the gas phase at the\n        current temperature and pressure, in units of [mol/m^3].\n\n        Utilizes the object oriented interface and\n        :obj:`thermo.volume.VolumeGas` to perform the actual calculation of\n        molar volume.\n\n        Examples\n        --------\n        >>> Chemical('tungsten hexafluoride').rhogm\n        42.01349946063116", "id": "f15812:c0:m63"}
{"signature": "@property<EOL><INDENT>def Hill(self):<DEDENT>", "body": "if self.__Hill:<EOL><INDENT>return self.__Hill<EOL><DEDENT>else:<EOL><INDENT>self.__Hill = atoms_to_Hill(self.atoms)<EOL>return self.__Hill<EOL><DEDENT>", "docstring": "r'''Hill formula of a compound. For a description of the Hill system,\n        see :obj:`thermo.elements.atoms_to_Hill`.\n\n        Examples\n        --------\n        >>> Chemical('furfuryl alcohol').Hill\n        'C5H6O2'", "id": "f15812:c0:m30"}
{"signature": "@property<EOL><INDENT>def Psat(self):<DEDENT>", "body": "return self.VaporPressure(self.T)<EOL>", "docstring": "r'''Vapor pressure of the chemical at its current temperature, in units\n        of [Pa]. For calculation of this property at other temperatures,\n        or specifying manually the method used to calculate it, and more - see\n        the object oriented interface :obj:`thermo.vapor_pressure.VaporPressure`;\n        each Chemical instance creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Chemical('water', T=320).Psat\n        10533.614271198725\n        >>> Chemical('water').VaporPressure.T_dependent_property(320)\n        10533.614271198725\n        >>> Chemical('water').VaporPressure.all_methods\n        set(['VDI_PPDS', 'BOILING_CRITICAL', 'WAGNER_MCGARRY', 'AMBROSE_WALTON', 'COOLPROP', 'LEE_KESLER_PSAT', 'EOS', 'ANTOINE_POLING', 'SANJARI', 'DIPPR_PERRY_8E', 'Edalat'])", "id": "f15812:c0:m43"}
{"signature": "@property<EOL><INDENT>def JTl(self):<DEDENT>", "body": "Vml, Cplm, isobaric_expansion_l = self.Vml, self.Cplm, self.isobaric_expansion_l<EOL>if all((Vml, Cplm, isobaric_expansion_l)):<EOL><INDENT>return Joule_Thomson(T=self.T, V=Vml, Cp=Cplm, beta=isobaric_expansion_l)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Joule Thomson coefficient of the chemical in the liquid phase at\n        its current temperature and pressure, in units of [K/Pa].\n\n        .. math::\n            \\mu_{JT} = \\left(\\frac{\\partial T}{\\partial P}\\right)_H = \\frac{1}{C_p}\n            \\left[T \\left(\\frac{\\partial V}{\\partial T}\\right)_P - V\\right]\n            = \\frac{V}{C_p}\\left(\\beta T-1\\right)\n\n        Utilizes the temperature-derivative method of\n        :obj:`thermo.volume.VolumeLiquid` and the temperature-dependent heat\n        capacity method :obj:`thermo.heat_capacity.HeatCapacityLiquid` to\n        obtain the properties required for the actual calculation.\n\n        Examples\n        --------\n        >>> Chemical('dodecane', T=400).JTl\n        -3.0827160465192742e-07", "id": "f15812:c0:m81"}
{"signature": "def draw_3d(self, width=<NUM_LIT>, height=<NUM_LIT>, style='<STR_LIT>', Hs=True): ", "body": "try:<EOL><INDENT>import py3Dmol<EOL>from IPython.display import display<EOL>if Hs:<EOL><INDENT>mol = self.rdkitmol_Hs<EOL><DEDENT>else:<EOL><INDENT>mol = self.rdkitmol<EOL><DEDENT>AllChem.EmbedMultipleConfs(mol)<EOL>mb = Chem.MolToMolBlock(mol)<EOL>p = py3Dmol.view(width=width,height=height)<EOL>p.addModel(mb,'<STR_LIT>')<EOL>p.setStyle({style:{}})<EOL>p.zoomTo()<EOL>display(p.show())<EOL><DEDENT>except:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>", "docstring": "r'''Interface for drawing an interactive 3D view of the molecule.\n        Requires an HTML5 browser, and the libraries RDKit, pymol3D, and\n        IPython. An exception is raised if all three of these libraries are\n        not installed.\n\n        Parameters\n        ----------\n        width : int\n            Number of pixels wide for the view\n        height : int\n            Number of pixels tall for the view\n        style : str\n            One of 'stick', 'line', 'cross', or 'sphere'\n        Hs : bool\n            Whether or not to show hydrogen\n\n        Examples\n        --------\n        >>> Chemical('cubane').draw_3d()\n        <IPython.core.display.HTML object>", "id": "f15812:c0:m4"}
{"signature": "@property<EOL><INDENT>def Prg(self):<DEDENT>", "body": "Cpg, mug, kg = self.Cpg, self.mug, self.kg<EOL>if all([Cpg, mug, kg]):<EOL><INDENT>return Prandtl(Cp=Cpg, mu=mug, k=kg)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Prandtl number of the gas phase of the chemical at its\n        current temperature and pressure, [dimensionless].\n\n        .. math::\n            Pr = \\frac{C_p \\mu}{k}\n\n        Utilizes the temperature and pressure dependent object oriented\n        interfaces :obj:`thermo.viscosity.ViscosityGas`,\n        :obj:`thermo.thermal_conductivity.ThermalConductivityGas`,\n        and :obj:`thermo.heat_capacity.HeatCapacityGas` to calculate the\n        actual properties.\n\n        Examples\n        --------\n        >>> Chemical('NH3').Prg\n        0.847263731933008", "id": "f15812:c0:m88"}
{"signature": "@property<EOL><INDENT>def rholm(self):<DEDENT>", "body": "Vml = self.Vml<EOL>if Vml:<EOL><INDENT>return <NUM_LIT:1.>/Vml<EOL><DEDENT>return None<EOL>", "docstring": "r'''Molar density of the chemical in the liquid phase at the\n        current temperature and pressure, in units of [mol/m^3].\n\n        Utilizes the object oriented interface and\n        :obj:`thermo.volume.VolumeLiquid` to perform the actual calculation of\n        molar volume.\n\n        Examples\n        --------\n        >>> Chemical('nitrogen', T=70).rholm\n        29937.20179186975", "id": "f15812:c0:m62"}
{"signature": "@property<EOL><INDENT>def eos(self):<DEDENT>", "body": "return self.eos_in_a_box[<NUM_LIT:0>]<EOL>", "docstring": "r'''Equation of state object held by the chemical; used to calculate\n        excess thermodynamic quantities, and also provides a vapor pressure\n        curve, enthalpy of vaporization curve, fugacity, thermodynamic partial\n        derivatives, and more; see :obj:`thermo.eos` for a full listing.\n\n        Examples\n        --------\n        >>> Chemical('methane').eos.V_g\n        0.02441019502181826", "id": "f15812:c0:m8"}
{"signature": "@property<EOL><INDENT>def Cpg(self):<DEDENT>", "body": "Cpgm = self.HeatCapacityGas(self.T)<EOL>if Cpgm:<EOL><INDENT>return property_molar_to_mass(Cpgm, self.MW)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Gas-phase heat capacity of the chemical at its current temperature,\n        in units of [J/kg/K]. For calculation of this property at other\n        temperatures, or specifying manually the method used to calculate it,\n        and more - see the object oriented interface\n        :obj:`thermo.heat_capacity.HeatCapacityGas`; each Chemical instance\n        creates one to actually perform the calculations. Note that that\n        interface provides output in molar units.\n\n        Examples\n        --------\n        >>> w = Chemical('water', T=520)\n        >>> w.Cpg\n        1967.6698314620658", "id": "f15812:c0:m51"}
{"signature": "@property<EOL><INDENT>def charge(self):<DEDENT>", "body": "try:<EOL><INDENT>if not self.rdkitmol:<EOL><INDENT>return charge_from_formula(self.formula)<EOL><DEDENT>else:<EOL><INDENT>return Chem.GetFormalCharge(self.rdkitmol)<EOL><DEDENT><DEDENT>except:<EOL><INDENT>return charge_from_formula(self.formula)<EOL><DEDENT>", "docstring": "r'''Charge of a chemical, computed with RDKit from a chemical's SMILES.\n        If RDKit is not available, holds None.\n\n        Examples\n        --------\n        >>> Chemical('sodium ion').charge\n        1", "id": "f15812:c0:m25"}
{"signature": "@property<EOL><INDENT>def mug(self):<DEDENT>", "body": "return self.ViscosityGas(self.T, self.P)<EOL>", "docstring": "r'''Viscosity of the chemical in the gas phase at its current\n        temperature and pressure, in units of [Pa*s].\n\n        For calculation of this property at other temperatures and pressures,\n        or specifying manually the method used to calculate it, and more - see\n        the object oriented interface\n        :obj:`thermo.viscosity.ViscosityGas`; each Chemical instance\n        creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Chemical('water', T=320, P=100).mug\n        1.0431450856297212e-05", "id": "f15812:c0:m75"}
{"signature": "@property<EOL><INDENT>def U(self):<DEDENT>", "body": "return property_molar_to_mass(self.Um, self.MW) if (self.Um is not None) else None<EOL>", "docstring": "r'''Internal energy of the chemical at its current temperature and\n        pressure, in units of [J/kg].\n\n        This property requires that :obj:`thermo.chemical.set_thermo` ran\n        successfully to be accurate.\n        It also depends on the molar volume of the chemical at its current\n        conditions.", "id": "f15812:c0:m22"}
{"signature": "@property<EOL><INDENT>def Cp(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, s=self.Cps, l=self.Cpl, g=self.Cpg)<EOL>", "docstring": "r'''Mass heat capacity of the chemical at its current phase and\n        temperature, in units of [J/kg/K].\n\n        Utilizes the object oriented interfaces\n        :obj:`thermo.heat_capacity.HeatCapacitySolid`,\n        :obj:`thermo.heat_capacity.HeatCapacityLiquid`,\n        and :obj:`thermo.heat_capacity.HeatCapacityGas` to perform the\n        actual calculation of each property. Note that those interfaces provide\n        output in molar units (J/mol/K).\n\n        Examples\n        --------\n        >>> w = Chemical('water')\n        >>> w.Cp, w.phase\n        (4180.597021827336, 'l')\n        >>> Chemical('palladium').Cp\n        234.26767209171211", "id": "f15812:c0:m91"}
{"signature": "@property<EOL><INDENT>def Um(self):<DEDENT>", "body": "return self.Hm - self.P*self.Vm if (self.Vm and self.Hm is not None) else None<EOL>", "docstring": "r'''Internal energy of the chemical at its current temperature and\n        pressure, in units of [J/mol].\n\n        This property requires that :obj:`thermo.chemical.set_thermo` ran\n        successfully to be accurate.\n        It also depends on the molar volume of the chemical at its current\n        conditions.", "id": "f15812:c0:m21"}
{"signature": "@property<EOL><INDENT>def Cpgm(self):<DEDENT>", "body": "return self.HeatCapacityGas(self.T)<EOL>", "docstring": "r'''Gas-phase ideal gas heat capacity of the chemical at its current\n        temperature, in units of [J/mol/K]. For calculation of this property at\n        other temperatures, or specifying manually the method used to calculate\n        it, and more - see the object oriented interface\n        :obj:`thermo.heat_capacity.HeatCapacityGas`; each Chemical instance\n        creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Chemical('water').Cpgm\n        33.583577868850675\n        >>> Chemical('water').HeatCapacityGas.T_dependent_property(320)\n        33.67865044005934\n        >>> Chemical('water').HeatCapacityGas.T_dependent_property_integral(300, 320)\n        672.6480417835064", "id": "f15812:c0:m48"}
{"signature": "@property<EOL><INDENT>def kg(self):<DEDENT>", "body": "return self.ThermalConductivityGas(self.T, self.P)<EOL>", "docstring": "r'''Thermal conductivity of the chemical in the gas phase at its\n        current temperature and pressure, in units of [W/m/K].\n\n        For calculation of this property at other temperatures and pressures,\n        or specifying manually the method used to calculate it, and more - see\n        the object oriented interface\n        :obj:`thermo.thermal_conductivity.ThermalConductivityGas`; each\n        Chemical instance creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Chemical('water', T=320).kg\n        0.021273128263091207", "id": "f15812:c0:m77"}
{"signature": "@property<EOL><INDENT>def economic_status(self):<DEDENT>", "body": "if self.__economic_status:<EOL><INDENT>return self.__economic_status<EOL><DEDENT>else:<EOL><INDENT>self.__economic_status = economic_status(self.CAS, Method='<STR_LIT>')<EOL>return self.__economic_status<EOL><DEDENT>", "docstring": "r'''Dictionary of economic status indicators for the chemical.\n\n        Examples\n        --------\n        >>> pprint(Chemical('benzene').economic_status)\n        [\"US public: {'Manufactured': 6165232.1, 'Imported': 463146.474, 'Exported': 271908.252}\",\n         u'1,000,000 - 10,000,000 tonnes per annum',\n         u'Intermediate Use Only',\n         'OECD HPV Chemicals']", "id": "f15812:c0:m34"}
{"signature": "@property<EOL><INDENT>def Cvgm(self):<DEDENT>", "body": "Cpgm = self.HeatCapacityGas(self.T)<EOL>if Cpgm:<EOL><INDENT>return Cpgm - R<EOL><DEDENT>return None<EOL>", "docstring": "r'''Gas-phase ideal-gas contant-volume heat capacity of the chemical at\n        its current temperature, in units of [J/mol/K]. Subtracts R from\n        the ideal-gas heat capacity; does not include pressure-compensation\n        from an equation of state.\n\n        Examples\n        --------\n        >>> w = Chemical('water', T=520)\n        >>> w.Cvgm\n        27.13366316134193", "id": "f15812:c0:m52"}
{"signature": "@property<EOL><INDENT>def Poynting(self):<DEDENT>", "body": "Vml, Psat = self.Vml, self.Psat<EOL>if Vml and Psat:<EOL><INDENT>return exp(Vml*(self.P-Psat)/R/self.T)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Poynting correction factor [dimensionless] for use in phase \n        equilibria methods based on activity coefficients or other reference \n        states. Performs the shortcut calculation assuming molar volume is \n        independent of pressure.\n\n        .. math::\n            \\text{Poy} =  \\exp\\left[\\frac{V_l (P-P^{sat})}{RT}\\right]\n\n        The full calculation normally returns values very close to the\n        approximate ones. This property is defined in terms of\n        pure components only.\n\n        Examples\n        --------\n        >>> Chemical('pentane', T=300, P=1E7).Poynting\n        1.5743051250679803\n\n        Notes\n        -----\n        The full equation shown below can be used as follows:\n\n        .. math::\n            \\text{Poy} = \\exp\\left[\\frac{\\int_{P_i^{sat}}^P V_i^l dP}{RT}\\right]\n\n        >>> from scipy.integrate import quad\n        >>> c = Chemical('pentane', T=300, P=1E7)\n        >>> exp(quad(lambda P : c.VolumeLiquid(c.T, P), c.Psat, c.P)[0]/R/c.T)\n        1.5821826990975127", "id": "f15812:c0:m105"}
{"signature": "@property<EOL><INDENT>def Zl(self):<DEDENT>", "body": "Vml = self.Vml<EOL>if Vml:<EOL><INDENT>return Z(self.T, self.P, Vml)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Compressibility factor of the chemical in the liquid phase at the\n        current temperature and pressure, [dimensionless].\n\n        Utilizes the object oriented interface and\n        :obj:`thermo.volume.VolumeLiquid` to perform the actual calculation of\n        molar volume.\n\n        Examples\n        --------\n        >>> Chemical('water').Zl\n        0.0007385375470263454", "id": "f15812:c0:m65"}
{"signature": "@property<EOL><INDENT>def legal_status(self):<DEDENT>", "body": "if self.__legal_status:<EOL><INDENT>return self.__legal_status<EOL><DEDENT>else:<EOL><INDENT>self.__legal_status = legal_status(self.CAS, Method='<STR_LIT>')<EOL>return self.__legal_status<EOL><DEDENT>", "docstring": "r'''Dictionary of legal status indicators for the chemical.\n\n        Examples\n        --------\n        >>> pprint(Chemical('benzene').legal_status)\n        {'DSL': 'LISTED',\n         'EINECS': 'LISTED',\n         'NLP': 'UNLISTED',\n         'SPIN': 'LISTED',\n         'TSCA': 'LISTED'}", "id": "f15812:c0:m33"}
{"signature": "@property<EOL><INDENT>def mass_fractions(self):<DEDENT>", "body": "if self.__mass_fractions:<EOL><INDENT>return self.__mass_fractions<EOL><DEDENT>else:<EOL><INDENT>self.__mass_fractions =  mass_fractions(self.atoms, self.MW)<EOL>return self.__mass_fractions<EOL><DEDENT>", "docstring": "r'''Dictionary of atom:mass-weighted fractional occurence of elements.\n        Useful when performing mass balances. For atom-fraction occurences, see\n        :obj:`atom_fractions`.\n\n        Examples\n        --------\n        >>> Chemical('water').mass_fractions\n        {'H': 0.11189834407236524, 'O': 0.8881016559276347}", "id": "f15812:c0:m32"}
{"signature": "@property<EOL><INDENT>def Cplm(self):<DEDENT>", "body": "return self.HeatCapacityLiquid(self.T)<EOL>", "docstring": "r'''Liquid-phase heat capacity of the chemical at its current temperature,\n        in units of [J/mol/K]. For calculation of this property at other\n        temperatures, or specifying manually the method used to calculate it,\n        and more - see the object oriented interface\n        :obj:`thermo.heat_capacity.HeatCapacityLiquid`; each Chemical instance\n        creates one to actually perform the calculations.\n\n        Notes\n        -----\n        Some methods give heat capacity along the saturation line, some at\n        1 atm but only up to the normal boiling point, and some give heat\n        capacity at 1 atm up to the normal boiling point and then along the\n        saturation line. Real-liquid heat capacity is pressure dependent, but\n        this interface is not.\n\n        Examples\n        --------\n        >>> Chemical('water').Cplm\n        75.31462591538556\n        >>> Chemical('water').HeatCapacityLiquid.T_dependent_property(320)\n        75.2591744360631\n        >>> Chemical('water').HeatCapacityLiquid.T_dependent_property_integral(300, 320)\n        1505.0619005000553", "id": "f15812:c0:m47"}
{"signature": "@property<EOL><INDENT>def Zs(self):<DEDENT>", "body": "Vms = self.Vms<EOL>if Vms:<EOL><INDENT>return Z(self.T, self.P, Vms)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Compressibility factor of the chemical in the solid phase at the\n        current temperature and pressure, [dimensionless].\n\n        Utilizes the object oriented interface and\n        :obj:`thermo.volume.VolumeSolid` to perform the actual calculation of\n        molar volume.\n\n        Examples\n        --------\n        >>> Chemical('palladium').Z\n        0.00036248477437931853", "id": "f15812:c0:m64"}
{"signature": "@property<EOL><INDENT>def alpha(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, l=self.alphal, g=self.alphag)<EOL>", "docstring": "r'''Thermal diffusivity of the chemical at its current temperature,\n        pressure, and phase in units of [m^2/s].\n\n        .. math::\n            \\alpha = \\frac{k}{\\rho Cp}\n\n        Examples\n        --------\n        >>> Chemical('furfural').alpha\n        8.696537158635412e-08", "id": "f15812:c0:m103"}
{"signature": "@property<EOL><INDENT>def kl(self):<DEDENT>", "body": "return self.ThermalConductivityLiquid(self.T, self.P)<EOL>", "docstring": "r'''Thermal conductivity of the chemical in the liquid phase at its\n        current temperature and pressure, in units of [W/m/K].\n\n        For calculation of this property at other temperatures and pressures,\n        or specifying manually the method used to calculate it, and more - see\n        the object oriented interface\n        :obj:`thermo.thermal_conductivity.ThermalConductivityLiquid`; each\n        Chemical instance creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Chemical('water', T=320).kl\n        0.6369957248212118", "id": "f15812:c0:m76"}
{"signature": "@property<EOL><INDENT>def Zg(self):<DEDENT>", "body": "Vmg = self.Vmg<EOL>if Vmg:<EOL><INDENT>return Z(self.T, self.P, Vmg)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Compressibility factor of the chemical in the gas phase at the\n        current temperature and pressure, [dimensionless].\n\n        Utilizes the object oriented interface and\n        :obj:`thermo.volume.VolumeGas` to perform the actual calculation of\n        molar volume.\n\n        Examples\n        --------\n        >>> Chemical('sulfur hexafluoride', T=700, P=1E9).Zg\n        11.140084184207813", "id": "f15812:c0:m66"}
{"signature": "@property<EOL><INDENT>def rdkitmol_Hs(self):<DEDENT>", "body": "if self.__rdkitmol_Hs:<EOL><INDENT>return self.__rdkitmol_Hs<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>self.__rdkitmol_Hs = Chem.AddHs(self.rdkitmol)<EOL>return self.__rdkitmol_Hs<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>", "docstring": "r'''RDKit object of the chemical, with hydrogen. If RDKit is not\n        available, holds None.\n\n        For examples of what can be done with RDKit, see\n        `their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.", "id": "f15812:c0:m29"}
{"signature": "@property<EOL><INDENT>def rhos(self):<DEDENT>", "body": "Vms = self.Vms<EOL>if Vms:<EOL><INDENT>return Vm_to_rho(Vms, self.MW)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Solid-phase mass density of the chemical at its current temperature,\n        in units of [kg/m^3]. For calculation of this property at\n        other temperatures, or specifying manually the method used\n        to calculate it, and more - see the object oriented interface\n        :obj:`thermo.volume.VolumeSolid`; each Chemical instance\n        creates one to actually perform the calculations. Note that that\n        interface provides output in molar units.\n\n        Examples\n        --------\n        >>> Chemical('iron').rhos\n        7869.999999999994", "id": "f15812:c0:m58"}
{"signature": "@property<EOL><INDENT>def atom_fractions(self):<DEDENT>", "body": "if self.__atom_fractions:<EOL><INDENT>return self.__atom_fractions<EOL><DEDENT>else:<EOL><INDENT>self.__atom_fractions = atom_fractions(self.atoms)<EOL>return self.__atom_fractions<EOL><DEDENT>", "docstring": "r'''Dictionary of atom:fractional occurence of the elements in a\n        chemical. Useful when performing element balances. For mass-fraction\n        occurences, see :obj:`mass_fractions`.\n\n        Examples\n        --------\n        >>> Chemical('Ammonium aluminium sulfate').atom_fractions\n        {'H': 0.25, 'S': 0.125, 'Al': 0.0625, 'O': 0.5, 'N': 0.0625}", "id": "f15812:c0:m31"}
{"signature": "@property<EOL><INDENT>def permittivity(self):<DEDENT>", "body": "return self.Permittivity(self.T)<EOL>", "docstring": "r'''Relative permittivity (dielectric constant) of the chemical at its \n        current temperature, [dimensionless].\n\n        For calculation of this property at other temperatures,\n        or specifying manually the method used to calculate it, and more - see\n        the object oriented interface :obj:`thermo.permittivity.Permittivity`;\n        each Chemical instance creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Chemical('toluene', T=250).permittivity\n        2.49775625", "id": "f15812:c0:m79"}
{"signature": "@property<EOL><INDENT>def nug(self):<DEDENT>", "body": "mug, rhog = self.mug, self.rhog<EOL>if all([mug, rhog]):<EOL><INDENT>return nu_mu_converter(mu=mug, rho=rhog)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Kinematic viscosity of the gas phase of the chemical at its\n        current temperature and pressure, in units of [m^2/s].\n\n        .. math::\n            \\nu = \\frac{\\mu}{\\rho}\n\n        Utilizes the temperature and pressure dependent object oriented\n        interfaces :obj:`thermo.volume.VolumeGas`,\n        :obj:`thermo.viscosity.ViscosityGas`  to calculate the\n        actual properties.\n\n        Examples\n        --------\n        >>> Chemical('methane', T=115).nug\n        2.5056924327995865e-06", "id": "f15812:c0:m84"}
{"signature": "@property<EOL><INDENT>def SGg(self):<DEDENT>", "body": "Vmg = self.VolumeGas(T=<NUM_LIT>, P=<NUM_LIT>)<EOL>if Vmg:<EOL><INDENT>rho = Vm_to_rho(Vmg, self.MW)<EOL>return SG(rho, rho_ref=<NUM_LIT>) <EOL><DEDENT>return None<EOL>", "docstring": "r'''Specific gravity of the gas phase of the chemical, [dimensionless].\n        The reference condition is air at 15.6 \u00b0C (60 \u00b0F) and 1 atm \n        (rho=1.223 kg/m^3). The definition for gases uses the compressibility\n        factor of the reference gas and the chemical both at the reference\n        conditions, not the conditions of the chemical.\n\n        Examples\n        --------\n        >>> Chemical('argon').SGg\n        1.3795835970877504", "id": "f15812:c0:m69"}
{"signature": "@property<EOL><INDENT>def Van_der_Waals_area(self):<DEDENT>", "body": "if self.UNIFAC_Q:<EOL><INDENT>return Van_der_Waals_area(self.UNIFAC_Q)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Unnormalized Van der Waals area, in units of [m^2/mol].\n\n        Examples\n        --------\n        >>> Chemical('hexane').Van_der_Waals_area\n        964000.0", "id": "f15812:c0:m41"}
{"signature": "@property<EOL><INDENT>def SGs(self):<DEDENT>", "body": "rhos = self.rhos<EOL>if rhos is not None:<EOL><INDENT>return SG(rhos)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Specific gravity of the solid phase of the chemical at the \n        specified temperature and pressure, [dimensionless].\n        The reference condition is water at 4 \u00b0C and 1 atm \n        (rho=999.017 kg/m^3). The SG varries with temperature and pressure\n        but only very slightly.\n\n        Examples\n        --------\n        >>> Chemical('iron').SGs\n        7.87774317235069", "id": "f15812:c0:m67"}
{"signature": "@property<EOL><INDENT>def rhog(self):<DEDENT>", "body": "Vmg = self.Vmg<EOL>if Vmg:<EOL><INDENT>return Vm_to_rho(Vmg, self.MW)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Gas-phase mass density of the chemical at its current temperature\n        and pressure, in units of [kg/m^3]. For calculation of this property at\n        other temperatures or pressures, or specifying manually the method used\n        to calculate it, and more - see the object oriented interface\n        :obj:`thermo.volume.VolumeGas`; each Chemical instance\n        creates one to actually perform the calculations. Note that that\n        interface provides output in molar units.\n\n        Examples\n        --------\n        Estimate the density of the core of the sun, at 15 million K and\n        26.5 PetaPascals, assuming pure helium (actually 68% helium):\n\n        >>> Chemical('helium', T=15E6, P=26.5E15).rhog\n        8329.27226509739\n\n        Compared to a result on\n        `Wikipedia <https://en.wikipedia.org/wiki/Solar_core>`_ of 150000\n        kg/m^3, the fundamental equation of state performs poorly.\n\n        >>> He = Chemical('helium', T=15E6, P=26.5E15)\n        >>> He.VolumeGas.set_user_methods_P(['IDEAL']); He.rhog\n        850477.8065477367\n\n        The ideal-gas law performs somewhat better, but vastly overshoots\n        the density prediction.", "id": "f15812:c0:m60"}
{"signature": "@property<EOL><INDENT>def rho(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, s=self.rhos, l=self.rhol, g=self.rhog)<EOL>", "docstring": "r'''Mass density of the chemical at its current phase and\n        temperature and pressure, in units of [kg/m^3].\n\n        Utilizes the object oriented interfaces\n        :obj:`thermo.volume.VolumeSolid`,\n        :obj:`thermo.volume.VolumeLiquid`,\n        and :obj:`thermo.volume.VolumeGas` to perform the\n        actual calculation of each property. Note that those interfaces provide\n        output in units of m^3/mol.\n\n        Examples\n        --------\n        >>> Chemical('decane', T=550, P=2E6).rho\n        498.67008448640604", "id": "f15812:c0:m94"}
{"signature": "@property<EOL><INDENT>def Vms(self):<DEDENT>", "body": "return self.VolumeSolid(self.T)<EOL>", "docstring": "r'''Solid-phase molar volume of the chemical at its current\n        temperature, in units of [m^3/mol]. For calculation of this property at\n        other temperatures, or specifying manually the method used to calculate\n        it, and more - see the object oriented interface\n        :obj:`thermo.volume.VolumeSolid`; each Chemical instance\n        creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Chemical('iron').Vms\n        7.09593392630242e-06", "id": "f15812:c0:m55"}
{"signature": "@property<EOL><INDENT>def UNIFAC_Q(self):<DEDENT>", "body": "if self.UNIFAC_groups:<EOL><INDENT>return UNIFAC_RQ(self.UNIFAC_groups)[<NUM_LIT:1>]<EOL><DEDENT>return None<EOL>", "docstring": "r'''UNIFAC `Q` (normalized Van der Waals area), dimensionless.\n        Used in the UNIFAC model.\n\n        Examples\n        --------\n        >>> Chemical('decane').UNIFAC_Q\n        6.016", "id": "f15812:c0:m39"}
{"signature": "@property<EOL><INDENT>def rdkitmol(self):<DEDENT>", "body": "if self.__rdkitmol:<EOL><INDENT>return self.__rdkitmol<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>self.__rdkitmol = Chem.MolFromSmiles(self.smiles)<EOL>return self.__rdkitmol<EOL><DEDENT>except:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>", "docstring": "r'''RDKit object of the chemical, without hydrogen. If RDKit is not\n        available, holds None.\n\n        For examples of what can be done with RDKit, see\n        `their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.", "id": "f15812:c0:m28"}
{"signature": "@property<EOL><INDENT>def Am(self):<DEDENT>", "body": "return self.Um - self.T*self.Sm if (self.Um is not None and self.Sm is not None) else None<EOL>", "docstring": "r'''Helmholtz energy of the chemical at its current temperature and\n        pressure, in units of [J/mol].\n\n        This property requires that :obj:`thermo.chemical.set_thermo` ran\n        successfully to be accurate.\n        It also depends on the molar volume of the chemical at its current\n        conditions.", "id": "f15812:c0:m23"}
{"signature": "@property<EOL><INDENT>def mu(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, l=self.mul, g=self.mug)<EOL>", "docstring": "r'''Viscosity of the chemical at its current phase, temperature, and\n        pressure in units of [Pa*s].\n\n        Utilizes the object oriented interfaces\n        :obj:`thermo.viscosity.ViscosityLiquid` and\n        :obj:`thermo.viscosity.ViscosityGas` to perform the\n        actual calculation of each property.\n\n        Examples\n        --------\n        >>> Chemical('ethanol', T=300).mu\n        0.001044526538460911\n        >>> Chemical('ethanol', T=400).mu\n        1.1853097849748217e-05", "id": "f15812:c0:m100"}
{"signature": "@property<EOL><INDENT>def Parachor(self):<DEDENT>", "body": "sigma, rhol, rhog = self.sigma, self.rhol, self.rhog<EOL>if all((sigma, rhol, rhog, self.MW)):<EOL><INDENT>return Parachor(sigma=sigma, MW=self.MW, rhol=rhol, rhog=rhog)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Parachor of the chemical at its\n        current temperature and pressure, in units of [N^0.25*m^2.75/mol].\n\n        .. math::\n            P = \\frac{\\sigma^{0.25} MW}{\\rho_L - \\rho_V}\n\n        Calculated based on surface tension, density of the liquid and gas\n        phase, and molecular weight. For uses of this property, see\n        :obj:`thermo.utils.Parachor`.\n\n        Examples\n        --------\n        >>> Chemical('octane').Parachor\n        6.291693072841486e-05", "id": "f15812:c0:m90"}
{"signature": "@property<EOL><INDENT>def JT(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, l=self.JTl, g=self.JTg)<EOL>", "docstring": "r'''Joule Thomson coefficient of the chemical at its\n        current phase and temperature, in units of [K/Pa].\n\n        .. math::\n            \\mu_{JT} = \\left(\\frac{\\partial T}{\\partial P}\\right)_H = \\frac{1}{C_p}\n            \\left[T \\left(\\frac{\\partial V}{\\partial T}\\right)_P - V\\right]\n            = \\frac{V}{C_p}\\left(\\beta T-1\\right)\n\n        Examples\n        --------\n        >>> Chemical('water').JT\n        -2.2150394958666407e-07", "id": "f15812:c0:m99"}
{"signature": "@property<EOL><INDENT>def rhol(self):<DEDENT>", "body": "Vml = self.Vml<EOL>if Vml:<EOL><INDENT>return Vm_to_rho(Vml, self.MW)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Liquid-phase mass density of the chemical at its current\n        temperature and pressure, in units of [kg/m^3]. For calculation of this\n        property at other temperatures and pressures, or specifying manually\n        the method used to calculate it, and more - see the object oriented\n        interface :obj:`thermo.volume.VolumeLiquid`; each Chemical instance\n        creates one to actually perform the calculations. Note that that\n        interface provides output in molar units.\n\n        Examples\n        --------\n        >>> Chemical('o-xylene', T=297).rhol\n        876.9946785618097", "id": "f15812:c0:m59"}
{"signature": "@property<EOL><INDENT>def alphag(self):<DEDENT>", "body": "kg, rhog, Cpg = self.kg, self.rhog, self.Cpg<EOL>if all([kg, rhog, Cpg]):<EOL><INDENT>return thermal_diffusivity(k=kg, rho=rhog, Cp=Cpg)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Thermal diffusivity of the gas phase of the chemical at its\n        current temperature and pressure, in units of [m^2/s].\n\n        .. math::\n            \\alpha = \\frac{k}{\\rho Cp}\n\n        Utilizes the temperature and pressure dependent object oriented\n        interfaces :obj:`thermo.volume.VolumeGas`,\n        :obj:`thermo.thermal_conductivity.ThermalConductivityGas`,\n        and :obj:`thermo.heat_capacity.HeatCapacityGas` to calculate the\n        actual properties.\n\n        Examples\n        --------\n        >>> Chemical('ammonia').alphag\n        1.6931865425158556e-05", "id": "f15812:c0:m86"}
{"signature": "@property<EOL><INDENT>def Vmg(self):<DEDENT>", "body": "return self.VolumeGas(self.T, self.P)<EOL>", "docstring": "r'''Gas-phase molar volume of the chemical at its current\n        temperature and pressure, in units of [m^3/mol]. For calculation of this\n        property at other temperatures or pressures, or specifying manually the\n        method used to calculate it, and more - see the object oriented interface\n        :obj:`thermo.volume.VolumeGas`; each Chemical instance\n        creates one to actually perform the calculations.\n\n        Examples\n        --------\n        Estimate the molar volume of the core of the sun, at 15 million K and\n        26.5 PetaPascals, assuming pure helium (actually 68% helium):\n\n        >>> Chemical('helium', T=15E6, P=26.5E15).Vmg\n        4.805464238181197e-07", "id": "f15812:c0:m57"}
{"signature": "@property<EOL><INDENT>def isentropic_exponent(self):<DEDENT>", "body": "Cp, Cv = self.Cpg, self.Cvg<EOL>if all((Cp, Cv)):<EOL><INDENT>return isentropic_exponent(Cp, Cv)<EOL><DEDENT>return None<EOL>", "docstring": "r'''Gas-phase ideal-gas isentropic exponent of the chemical at its\n        current temperature, [dimensionless]. Does not include\n        pressure-compensation from an equation of state.\n\n        Examples\n        --------\n        >>> Chemical('hydrogen').isentropic_exponent\n        1.405237786321222", "id": "f15812:c0:m54"}
{"signature": "@property<EOL><INDENT>def nu(self):<DEDENT>", "body": "return phase_select_property(phase=self.phase, l=self.nul, g=self.nug)<EOL>", "docstring": "r'''Kinematic viscosity of the the chemical at its current temperature,\n        pressure, and phase in units of [m^2/s].\n\n        .. math::\n            \\nu = \\frac{\\mu}{\\rho}\n\n        Examples\n        --------\n        >>> Chemical('argon').nu\n        1.3846930410865003e-05", "id": "f15812:c0:m102"}
{"signature": "@property<EOL><INDENT>def Cpsm(self):<DEDENT>", "body": "return self.HeatCapacitySolid(self.T)<EOL>", "docstring": "r'''Solid-phase heat capacity of the chemical at its current temperature,\n        in units of [J/mol/K]. For calculation of this property at other\n        temperatures, or specifying manually the method used to calculate it,\n        and more - see the object oriented interface\n        :obj:`thermo.heat_capacity.HeatCapacitySolid`; each Chemical instance\n        creates one to actually perform the calculations.\n\n        Examples\n        --------\n        >>> Chemical('palladium').Cpsm\n        24.930765664000003\n        >>> Chemical('palladium').HeatCapacitySolid.T_dependent_property(320)\n        25.098979200000002\n        >>> Chemical('palladium').HeatCapacitySolid.all_methods\n        set([\"Perry's Table 2-151\", 'CRC Standard Thermodynamic Properties of Chemical Substances', 'Lastovka, Fulem, Becerra and Shaw (2008)'])", "id": "f15812:c0:m46"}
{"signature": "def get_account_descendants(self, account):", "body": "result = []<EOL>for child in account.accounts:<EOL><INDENT>self._get_account_and_descendants_(child, result)<EOL><DEDENT>return result<EOL>", "docstring": "Retrieves an account's descendants from the general ledger structure\ngiven the account name.\n\n:param account_name: The account name.\n\n:returns: The decendants of the account.", "id": "f15818:c4:m5"}
{"signature": "def _get_account_and_descendants_(self, account, result):", "body": "result.append(account)<EOL>for child in account.accounts:<EOL><INDENT>self._get_account_and_descendants_(child, result)<EOL><DEDENT>", "docstring": "Returns the account and all of it's sub accounts.\n\n:param account: The account.\n:param result: The list to add all the accounts to.", "id": "f15818:c4:m6"}
{"signature": "def set_parent_path(self, value):", "body": "self._parent_path = value<EOL>self.path = value + r'<STR_LIT:/>' + self.name<EOL>self._update_childrens_parent_path()<EOL>", "docstring": "Set the parent path and the path from the new parent path.\n\n:param value: The path to the object's parent", "id": "f15818:c1:m3"}
{"signature": "def report(self, format=ReportFormat.printout, output_path=None):", "body": "rpt = GlsRpt(self, output_path)<EOL>return rpt.render(format)<EOL>", "docstring": "Returns a report of this class.\n\n:param format: The format of the report.\n:param output_path: The path to the file the report is written to.\n  If None, then the report is not written to a file.\n\n:returns: The descendants of the account.", "id": "f15818:c4:m8"}
{"signature": "def create_transaction(self, name, description=None,<EOL>tx_date=datetime.min.date(),<EOL>dt_account=None, cr_account=None,<EOL>source=None, amount=<NUM_LIT>):", "body": "new_tx = Transaction(name, description, tx_date,<EOL>dt_account, cr_account, source, amount)<EOL>self.transactions.append(new_tx)<EOL>return new_tx<EOL>", "docstring": "Create a transaction in the general ledger.\n\n:param name: The transaction's name.\n:param description: The transaction's description.\n:param tx_date: The date of the transaction.\n:param cr_account: The transaction's credit account's name.\n:param dt_account: The transaction's debit account's name.\n:param source: The name of source the transaction originated from.\n:param amount: The transaction amount.\n\n:returns: The created transaction.", "id": "f15818:c5:m1"}
{"signature": "def _create_account_(self, name, number, account_type):", "body": "new_acc = GeneralLedgerAccount(name, None, number, account_type)<EOL>self.accounts.append(new_acc)<EOL>return new_acc<EOL>", "docstring": "Create an account in the general ledger structure.\n\n:param name: The account name.\n:param number: The account number.\n:param account_type: The account type.\n\n:returns: The created account.", "id": "f15818:c4:m3"}
{"signature": "def validate_account_names(self, names):", "body": "for name in names:<EOL><INDENT>if self.get_account(name) is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(name))<EOL><DEDENT><DEDENT>", "docstring": "Validates whether the accounts in a list of account names exists.\n\n:param names: The names of the accounts.\n\n:returns: The descendants of the account.", "id": "f15818:c4:m7"}
{"signature": "def get_child_account(self, account_name):", "body": "if r'<STR_LIT:/>' in account_name:<EOL><INDENT>accs_in_path = account_name.split(r'<STR_LIT:/>', <NUM_LIT:1>)<EOL>curr_acc = self[accs_in_path[<NUM_LIT:0>]]<EOL>if curr_acc is None:<EOL><INDENT>return None<EOL><DEDENT>return curr_acc.get_child_account(accs_in_path[<NUM_LIT:1>])<EOL>pass<EOL><DEDENT>else:<EOL><INDENT>return self[account_name]<EOL><DEDENT>", "docstring": "Retrieves a child account.\nThis could be a descendant nested at any level.\n\n:param account_name: The name of the account to retrieve.\n\n:returns: The child account, if found, else None.", "id": "f15818:c1:m9"}
{"signature": "def income_statement(self, start=datetime.min,<EOL>end=datetime.max,<EOL>format=ReportFormat.printout,<EOL>component_path=\"<STR_LIT>\",<EOL>output_path=None):", "body": "rpt = IncomeStatement(self, start, end, component_path, output_path)<EOL>return rpt.render(format)<EOL>", "docstring": "Generate a transaction list report.\n\n:param start: The start date to generate the report for.\n:param end: The end date to generate the report for.\n:param format: The format of the report.\n:param component_path: The path of the component to filter the report's\n  transactions by.\n:param output_path: The path to the file the report is written to.\n  If None, then the report is not written to a file.\n\n:returns: The generated report.", "id": "f15818:c5:m4"}
{"signature": "@property<EOL><INDENT>def HHV(self):<DEDENT>", "body": "return self._HHV<EOL>", "docstring": "Get the higher heating value of the stream.\n\n:returns: MJ/kg coal, higher heating value", "id": "f15823:c2:m14"}
{"signature": "def __add__(self, other):", "body": "<EOL>if type(other) is MaterialPackage:<EOL><INDENT>if self.material == other.material:  <EOL><INDENT>result = MaterialPackage(self.material,<EOL>self._compound_masses +<EOL>other._compound_masses)<EOL>result.H = self._H + other._H<EOL>result.P = self.P<EOL>return result<EOL><DEDENT>else:  <EOL><INDENT>H = self.H + other.H<EOL>result = self.clone()<EOL>for compound in other.material.compounds:<EOL><INDENT>if compound not in self.material.compounds:<EOL><INDENT>raise Exception(\"<STR_LIT>\" + other.material.name +<EOL>\"<STR_LIT>\" +<EOL>self.material.name +<EOL>\"<STR_LIT>\" + compound +<EOL>\"<STR_LIT>\" +<EOL>self.material.name + \"<STR_LIT>\")<EOL><DEDENT>result = result + (compound,<EOL>other.get_compound_mass(compound))<EOL><DEDENT>result.H = H<EOL>return result<EOL><DEDENT><DEDENT>elif self._is_compound_mass_tuple(other):<EOL><INDENT>compound = other[<NUM_LIT:0>]<EOL>index = self.material.get_compound_index(compound)<EOL>mass = other[<NUM_LIT:1>]<EOL>enthalpy = thermo.H(compound, self._T, mass)<EOL>result = self.clone()<EOL>result._compound_masses[index] = result._compound_masses[index] +mass<EOL>result._H += enthalpy<EOL>result._P = self._P<EOL>return result<EOL><DEDENT>elif self._is_compound_mass_temperature_tuple(other):<EOL><INDENT>compound = other[<NUM_LIT:0>]<EOL>index = self.material.get_compound_index(compound)<EOL>mass = other[<NUM_LIT:1>]<EOL>temperature = other[<NUM_LIT:2>]<EOL>enthalpy = thermo.H(compound, temperature, mass)<EOL>result = self * <NUM_LIT:1.0><EOL>result._compound_masses[index] = result._compound_masses[index] +mass<EOL>result.H = self._H + enthalpy<EOL>result._P = self._P<EOL>return result<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Addition operator (+).\n\nAdd this package (self) and 'other' together, return the result as a\nnew package, and leave self unchanged.\n\n:param other: Can can be one of the following:\n         1. MaterialPackage\n            'other' is added to self to create a new package.\n         2. tuple: (compound, mass)\n            The specified mass of the specified compound is added to \\\n            self, assuming the added material has the same \\\n            temperature as self.\n         3. tuple: (compound, mass, temperature)\n            The specified mass of the specified compound at the \\\n            specified temperature is added to self.\n\n:returns: A new Material package that is the sum of self and 'other'.", "id": "f15823:c1:m2"}
{"signature": "def get_compound_mfr(self, compound):", "body": "if compound in self.material.compounds:<EOL><INDENT>return self._compound_mfrs[<EOL>self.material.get_compound_index(compound)]<EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>", "docstring": "Determine the mass flow rate of the specified compound in the stream.\n\n:param compound: Formula and phase of a compound, e.g. \"Fe2O3[S1]\".\n\n:returns: Mass flow rate. [kg/h]", "id": "f15823:c2:m22"}
{"signature": "def get_compound_afr(self, compound):", "body": "index = self.material.get_compound_index(compound)<EOL>return stoich.amount(compound, self._compound_mfrs[index])<EOL>", "docstring": "Determine the amount flow rate of the specified compound.\n\n:returns: Amount flow rate. [kmol/h]", "id": "f15823:c2:m24"}
{"signature": "@property<EOL><INDENT>def P(self):<DEDENT>", "body": "return self._P<EOL>", "docstring": "Determine the pressure of the package.\n\n        :returns: Pressure. [atm]", "id": "f15823:c1:m14"}
{"signature": "@Hfr.setter<EOL><INDENT>def Hfr(self, Hfr):<DEDENT>", "body": "self._Hfr = Hfr<EOL>self._T = self._calculate_T(Hfr)<EOL>", "docstring": "Set the enthalpy flow rate of the stream to the specified value, and\nrecalculate it's temperature.\n\n:param H: The new enthalpy flow rate value. [kWh/h]", "id": "f15823:c2:m11"}
{"signature": "def _is_compound_mfr_temperature_tuple(self, value):", "body": "if not type(value) is tuple:<EOL><INDENT>return False<EOL><DEDENT>elif not len(value) == <NUM_LIT:3>:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:0>]) is str:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:1>]) is float andnot type(value[<NUM_LIT:1>]) is numpy.float64 andnot type(value[<NUM_LIT:1>]) is numpy.float32:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:1>]) is float andnot type(value[<NUM_LIT:1>]) is numpy.float64 andnot type(value[<NUM_LIT:1>]) is numpy.float32:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Determines whether value is a tuple of the format\n        (compound(str), mfr(float), temperature(float)).\n\n        :param value: The value to be tested.\n\n        :returns: True or False", "id": "f15823:c2:m9"}
{"signature": "@property<EOL><INDENT>def mfr(self):<DEDENT>", "body": "return self._compound_mfrs.sum()<EOL>", "docstring": "Get the mass flow rate of the stream.\n\n:returns: Mass flow rate. [kg/h]", "id": "f15823:c2:m21"}
{"signature": "def extract(self, other):", "body": "<EOL>if type(other) is float ortype(other) is numpy.float64 ortype(other) is numpy.float32:<EOL><INDENT>return self._extract_mfr(other)<EOL><DEDENT>elif self._is_compound_mfr_tuple(other):<EOL><INDENT>return self._extract_compound_mfr(other[<NUM_LIT:0>], other[<NUM_LIT:1>])<EOL><DEDENT>elif type(other) is str:<EOL><INDENT>return self._extract_compound(other)<EOL><DEDENT>elif type(other) is Material:<EOL><INDENT>return self._extract_material(other)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Extract 'other' from this stream, modifying this stream and returning\nthe extracted material as a new stream.\n\n:param other: Can be one of the following:\n\n  * float: A mass flow rate equal to other is extracted from self. Self\n    is reduced by other and the extracted stream is returned as\n    a new stream.\n  * tuple (compound, mass): The other tuple specifies the mass flow\n    rate of a compound to be extracted. It is extracted from self and\n    the extracted mass flow rate is returned as a new stream.\n  * string: The 'other' string specifies the compound to be\n    extracted. All of the mass flow rate of that compound will be\n    removed from self and a new stream created with it.\n  * Material: The 'other' material specifies the list of\n    compounds to extract.\n\n\n:returns: New MaterialStream object.", "id": "f15823:c2:m29"}
{"signature": "def extract(self, other):", "body": "<EOL>if type(other) is float ortype(other) is numpy.float64 ortype(other) is numpy.float32:<EOL><INDENT>return self._extract_mass(other)<EOL><DEDENT>elif self._is_compound_mass_tuple(other):<EOL><INDENT>return self._extract_compound_mass(other[<NUM_LIT:0>], other[<NUM_LIT:1>])<EOL><DEDENT>elif type(other) is str:<EOL><INDENT>return self._extract_compound(other)<EOL><DEDENT>elif type(other) is Material:<EOL><INDENT>return self._extract_material(other)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Extract 'other' from this package, modifying this package and\nreturning the extracted material as a new package.\n\n:param other: Can be one of the following:\n\n  * float: A mass equal to other is extracted from self. Self is\n    reduced by other and the extracted package is returned as\n    a new package.\n  * tuple (compound, mass): The other tuple specifies the mass\n    of a compound to be extracted. It is extracted from self and\n    the extracted mass is returned as a new package.\n  * string: The 'other' string specifies the compound to be\n    extracted. All of the mass of that compound will be removed\n    from self and a new package created with it.\n  * Material: The 'other' material specifies the list of\n    compounds to extract.\n\n\n:returns: New MaterialPackage object.", "id": "f15823:c1:m27"}
{"signature": "def _calculate_DH298_coal(self):", "body": "m_C = <NUM_LIT:0>  <EOL>m_H = <NUM_LIT:0>  <EOL>m_O = <NUM_LIT:0>  <EOL>m_N = <NUM_LIT:0>  <EOL>m_S = <NUM_LIT:0>  <EOL>T = <NUM_LIT>  <EOL>Hin = <NUM_LIT:0.0>  <EOL>for compound in self.material.compounds:<EOL><INDENT>index = self.material.get_compound_index(compound)<EOL>if stoich.element_mass_fraction(compound, '<STR_LIT:C>') == <NUM_LIT:1.0>:<EOL><INDENT>m_C += self._compound_masses[index]<EOL>Hin = thermo.H(compound, T, self._compound_masses[index])<EOL><DEDENT>elif stoich.element_mass_fraction(compound, '<STR_LIT:H>') == <NUM_LIT:1.0>:<EOL><INDENT>m_H += self._compound_masses[index]<EOL>Hin = thermo.H(compound, T, self._compound_masses[index])<EOL><DEDENT>elif stoich.element_mass_fraction(compound, '<STR_LIT:O>') == <NUM_LIT:1.0>:<EOL><INDENT>m_O += self._compound_masses[index]<EOL>Hin = thermo.H(compound, T, self._compound_masses[index])<EOL><DEDENT>elif stoich.element_mass_fraction(compound, '<STR_LIT:N>') == <NUM_LIT:1.0>:<EOL><INDENT>m_N += self._compound_masses[index]<EOL>Hin = thermo.H(compound, T, self._compound_masses[index])<EOL><DEDENT>elif stoich.element_mass_fraction(compound, '<STR_LIT:S>') == <NUM_LIT:1.0>:<EOL><INDENT>m_S += self._compound_masses[index]<EOL>Hin = thermo.H(compound, T, self._compound_masses[index])<EOL><DEDENT><DEDENT>m_total = m_C + m_H + m_O + m_N + m_S  <EOL>Hout = <NUM_LIT:0.0>  <EOL>Hout += thermo.H('<STR_LIT>', T, cc(m_C, '<STR_LIT:C>', '<STR_LIT>', '<STR_LIT:C>'))<EOL>Hout += thermo.H('<STR_LIT>', T, cc(m_H, '<STR_LIT:H>', '<STR_LIT>', '<STR_LIT:H>'))<EOL>Hout += thermo.H('<STR_LIT>', T, m_O)<EOL>Hout += thermo.H('<STR_LIT>', T, m_N)<EOL>Hout += thermo.H('<STR_LIT>', T, cc(m_S, '<STR_LIT:S>', '<STR_LIT>', '<STR_LIT:S>'))<EOL>if self.HHV is None:<EOL><INDENT>HHV = Hout - Hin / m_total  <EOL><DEDENT>else:<EOL><INDENT>HHV = self.HHV / <NUM_LIT>  <EOL><DEDENT>return HHV + Hout / m_total<EOL>", "docstring": "Calculate the enthalpy of formation of the package at the specified temperature, in\ncase the material is coal.\n\n:returns: [kWh/kg] enthalpy of formation of daf coal", "id": "f15823:c1:m5"}
{"signature": "def get_element_mfrs(self, elements=None):", "body": "if elements is None:<EOL><INDENT>elements = self.material.elements<EOL><DEDENT>result = numpy.zeros(len(elements))<EOL>for compound in self.material.compounds:<EOL><INDENT>result += self.get_compound_mfr(compound) *stoich.element_mass_fractions(compound, elements)<EOL><DEDENT>return result<EOL>", "docstring": "Determine the mass flow rates of elements in the stream.\n\n:returns: Array of element mass flow rates. [kg/h]", "id": "f15823:c2:m26"}
{"signature": "@property<EOL><INDENT>def T(self):<DEDENT>", "body": "return self._T<EOL>", "docstring": "Get the temperature of the stream.\n\n:returns: Temperature. [\u00b0C]", "id": "f15823:c2:m12"}
{"signature": "def get_element_masses(self, elements=None):", "body": "if elements is None:<EOL><INDENT>elements = self.material.elements<EOL><DEDENT>result = numpy.zeros(len(elements))<EOL>for compound in self.material.compounds:<EOL><INDENT>result += self.get_compound_mass(compound) *numpy.array(stoich.element_mass_fractions(compound, elements))<EOL><DEDENT>return result<EOL>", "docstring": "Determine the masses of elements in the package.\n\n:returns: Array of element masses. [kg]", "id": "f15823:c1:m24"}
{"signature": "def get_compound_afrs(self):", "body": "result = self._compound_mfrs * <NUM_LIT:1.0><EOL>for compound in self.material.compounds:<EOL><INDENT>index = self.material.get_compound_index(compound)<EOL>result[index] = stoich.amount(compound, result[index])<EOL><DEDENT>return result<EOL>", "docstring": "Determine the amount flow rates of all the compounds.\n\n:returns: List of amount flow rates. [kmol/h]", "id": "f15823:c2:m23"}
{"signature": "def __mul__(self, scalar):", "body": "<EOL>if type(scalar) is float or type(scalar) is numpy.float64 ortype(scalar) is numpy.float32:<EOL><INDENT>if scalar < <NUM_LIT:0.0>:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>result = MaterialPackage(self.material, self._compound_masses *<EOL>scalar, self._P, self._T)<EOL>return result<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "The multiplication operator (*).\n\nCreate a new package by multiplying self with scalar.\n\n:param scalar: The result is a new package with its content equal to\n  self multiplied by a scalar, leaving self unchanged.\n\n:returns: New MaterialPackage object.", "id": "f15823:c1:m3"}
{"signature": "def get_element_mass(self, element):", "body": "result = numpy.zeros(<NUM_LIT:1>)<EOL>for compound in self.material.compounds:<EOL><INDENT>result += self.get_compound_mass(compound) *numpy.array(stoich.element_mass_fractions(compound, [element]))<EOL><DEDENT>return result[<NUM_LIT:0>]<EOL>", "docstring": "Determine the mass of the specified elements in the package.\n\n:returns: Masses. [kg]", "id": "f15823:c1:m26"}
{"signature": "@HHV.setter<EOL><INDENT>def HHV(self, HHV):<DEDENT>", "body": "self._HHV = HHV  <EOL>if self.isCoal:<EOL><INDENT>self._DH298 = self._calculate_DH298_coal()<EOL><DEDENT>", "docstring": "Set the higher heating value of the stream to the specified value, and\nrecalculate the formation enthalpy of the daf coal.\n\n:param HHV: MJ/kg coal, higher heating value", "id": "f15823:c2:m15"}
{"signature": "def _calculate_H_coal(self, T):", "body": "m_C = <NUM_LIT:0>  <EOL>m_H = <NUM_LIT:0>  <EOL>m_O = <NUM_LIT:0>  <EOL>m_N = <NUM_LIT:0>  <EOL>m_S = <NUM_LIT:0>  <EOL>H = <NUM_LIT:0.0>  <EOL>for compound in self.material.compounds:<EOL><INDENT>index = self.material.get_compound_index(compound)<EOL>if stoich.element_mass_fraction(compound, '<STR_LIT:C>') == <NUM_LIT:1.0>:<EOL><INDENT>m_C += self._compound_masses[index]<EOL><DEDENT>elif stoich.element_mass_fraction(compound, '<STR_LIT:H>') == <NUM_LIT:1.0>:<EOL><INDENT>m_H += self._compound_masses[index]<EOL><DEDENT>elif stoich.element_mass_fraction(compound, '<STR_LIT:O>') == <NUM_LIT:1.0>:<EOL><INDENT>m_O += self._compound_masses[index]<EOL><DEDENT>elif stoich.element_mass_fraction(compound, '<STR_LIT:N>') == <NUM_LIT:1.0>:<EOL><INDENT>m_N += self._compound_masses[index]<EOL><DEDENT>elif stoich.element_mass_fraction(compound, '<STR_LIT:S>') == <NUM_LIT:1.0>:<EOL><INDENT>m_S += self._compound_masses[index]<EOL><DEDENT>else:<EOL><INDENT>dH = thermo.H(compound, T, self._compound_masses[index])<EOL>H += dH<EOL><DEDENT><DEDENT>m_total = y_C + y_H + y_O + y_N + y_S  <EOL>y_C = m_C / m_total<EOL>y_H = m_H / m_total<EOL>y_O = m_O / m_total<EOL>y_N = m_N / m_total<EOL>y_S = m_S / m_total<EOL>hmodel = coals.DafHTy()<EOL>H = hmodel.calculate(T=T+<NUM_LIT>, y_C=y_C, y_H=y_H, y_O=y_O, y_N=y_N,<EOL>y_S=y_S) / <NUM_LIT>  <EOL>H298 = hmodel.calculate(T=<NUM_LIT>, y_C=y_C, y_H=y_H, y_O=y_O, y_N=y_N,<EOL>y_S=y_S) / <NUM_LIT>  <EOL>Hdaf = H - H298 + self._DH298  <EOL>Hdaf *= m_total  <EOL>H += Hdaf<EOL>return H<EOL>", "docstring": "Calculate the enthalpy of the package at the specified temperature, in\ncase the material is coal.\n\n:param T: [\u00b0C] temperature\n\n:returns: [kWh] enthalpy", "id": "f15823:c1:m6"}
{"signature": "def _prepare_lines(self, lines):", "body": "result = list()<EOL>for line in lines:<EOL><INDENT>line = line.strip()<EOL>line = line.replace(\"<STR_LIT:\\t>\", \"<STR_LIT:U+0020>\")<EOL>while line.find(\"<STR_LIT:U+0020>\") > -<NUM_LIT:1>:<EOL><INDENT>line = line.replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:U+0020>\")<EOL><DEDENT>result.append(line)<EOL><DEDENT>return result<EOL>", "docstring": "Prepare the lines read from the text file before starting to\nprocess it.\n\n:param lines: The lines to prepare.", "id": "f15823:c0:m2"}
{"signature": "def _calculate_T(self, H):", "body": "<EOL>x = list()<EOL>x.append(self._T)<EOL>x.append(self._T + <NUM_LIT>)<EOL>y = list()<EOL>y.append(self._calculate_H(x[<NUM_LIT:0>]) - H)<EOL>y.append(self._calculate_H(x[<NUM_LIT:1>]) - H)<EOL>for i in range(<NUM_LIT:2>, <NUM_LIT:50>):<EOL><INDENT>x.append(x[i-<NUM_LIT:1>] - y[i-<NUM_LIT:1>]*((x[i-<NUM_LIT:1>] - x[i-<NUM_LIT:2>])/(y[i-<NUM_LIT:1>] - y[i-<NUM_LIT:2>])))<EOL>y.append(self._calculate_H(x[i]) - H)<EOL>if abs(y[i-<NUM_LIT:1>]) < <NUM_LIT>:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return x[len(x) - <NUM_LIT:1>]<EOL>", "docstring": "Calculate the temperature of the package given the specified\nenthalpy using a secant algorithm.\n\n:param H: Enthalpy. [kWh]\n\n:returns: Temperature. [\u00b0C]", "id": "f15823:c1:m7"}
{"signature": "def add_assay(self, name, assay):", "body": "if not type(assay) is numpy.ndarray:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>elif not assay.shape == (self.compound_count,):<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>elif name in self.raw_assays.keys():<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>self.raw_assays[name] = assay<EOL>self.converted_assays[name] = assay<EOL>", "docstring": "Add an assay to the material.\n\n:param name:  Assay name.\n:param assay: Numpy array containing the compound mass fractions for\n  the assay. The sequence of the assay's elements must correspond to\n  the sequence of the material's compounds.", "id": "f15823:c0:m8"}
{"signature": "def _calculate_H(self, T):", "body": "if self.isCoal:<EOL><INDENT>return self._calculate_Hfr_coal(T)<EOL><DEDENT>H = <NUM_LIT:0.0><EOL>for compound in self.material.compounds:<EOL><INDENT>index = self.material.get_compound_index(compound)<EOL>dH = thermo.H(compound, T, self._compound_masses[index])<EOL>H = H + dH<EOL><DEDENT>return H<EOL>", "docstring": "Calculate the enthalpy of the package at the specified temperature.\n\n:param T: Temperature. [\u00b0C]\n\n:returns: Enthalpy. [kWh]", "id": "f15823:c1:m4"}
{"signature": "@property<EOL><INDENT>def H(self):<DEDENT>", "body": "return self._H<EOL>", "docstring": "Get the enthalpy of the package.\n\n:returns: Enthalpy. [kWh]", "id": "f15823:c1:m10"}
{"signature": "@P.setter<EOL><INDENT>def P(self, P):<DEDENT>", "body": "self._P = P<EOL>", "docstring": "Set the pressure of the stream to the specified value.\n\n        :param P: Pressure. [atm]", "id": "f15823:c2:m17"}
{"signature": "@property<EOL><INDENT>def mass(self):<DEDENT>", "body": "return self._compound_masses.sum()<EOL>", "docstring": "Get the mass of the package.\n\n:returns: [kg]", "id": "f15823:c1:m19"}
{"signature": "@property<EOL><INDENT>def Hfr(self):<DEDENT>", "body": "return self._Hfr<EOL>", "docstring": "Get the enthalpy flow rate of the stream.\n\n:returns: Enthalpy flow rate. [kWh/h]", "id": "f15823:c2:m10"}
{"signature": "def get_assay(self):", "body": "return self._compound_masses / self._compound_masses.sum()<EOL>", "docstring": "Determine the assay of the package.\n\n:returns: Array of mass fractions.", "id": "f15823:c1:m18"}
{"signature": "def _is_compound_mass_temperature_tuple(self, value):", "body": "if not type(value) is tuple:<EOL><INDENT>return False<EOL><DEDENT>elif not len(value) == <NUM_LIT:3>:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:0>]) is str:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:1>]) is float andnot type(value[<NUM_LIT:1>]) is numpy.float64 andnot type(value[<NUM_LIT:1>]) is numpy.float32:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:1>]) is float andnot type(value[<NUM_LIT:1>]) is numpy.float64 andnot type(value[<NUM_LIT:1>]) is numpy.float32:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Determines whether value is a tuple of the format\n        (compound(str), mass(float), temperature(float)).\n\n        :param value: The value to be tested.\n\n        :returns: True or False", "id": "f15823:c1:m9"}
{"signature": "def clear(self):", "body": "self._compound_mfrs = self._compound_mfrs * <NUM_LIT:0.0><EOL>self._P = <NUM_LIT:1.0><EOL>self._T = <NUM_LIT><EOL>self._H = <NUM_LIT:0.0><EOL>", "docstring": "Set all the compound mass flow rates in the stream to zero.\nSet the pressure to 1, the temperature to 25 and the enthalpy to zero.", "id": "f15823:c2:m19"}
{"signature": "def clone(self):", "body": "result = copy.copy(self)<EOL>result._compound_mfrs = copy.deepcopy(self._compound_mfrs)<EOL>return result<EOL>", "docstring": "Create a complete copy of the stream.\n\n        :returns: A new MaterialStream object.", "id": "f15823:c2:m18"}
{"signature": "@property<EOL><INDENT>def P(self):<DEDENT>", "body": "return self._P<EOL>", "docstring": "Determine the pressure of the stream.\n\n        :returns: Pressure. [atm]", "id": "f15823:c2:m16"}
{"signature": "def _is_compound_mfr_tuple(self, value):", "body": "if not type(value) is tuple:<EOL><INDENT>return False<EOL><DEDENT>elif not len(value) == <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:0>]) is str:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:1>]) is float andnot type(value[<NUM_LIT:1>]) is numpy.float64 andnot type(value[<NUM_LIT:1>]) is numpy.float32:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Determines whether value is a tuple of the format\n(compound(str), mfr(float)).\n\n:param value: The value to be tested.\n\n:returns: True or False", "id": "f15823:c2:m8"}
{"signature": "def create_package(self, assay=None, mass=<NUM_LIT:0.0>, P=<NUM_LIT:1.0>, T=<NUM_LIT>,<EOL>normalise=True):", "body": "if assay is None:<EOL><INDENT>return MaterialPackage(self, self.create_empty_assay(), P, T)<EOL><DEDENT>if normalise:<EOL><INDENT>assay_total = self.get_assay_total(assay)<EOL><DEDENT>else:<EOL><INDENT>assay_total = <NUM_LIT:1.0><EOL><DEDENT>return MaterialPackage(self, mass * self.converted_assays[assay] /<EOL>assay_total, P, T, self._isCoal(assay),<EOL>self._get_HHV(assay))<EOL>", "docstring": "Create a MaterialPackage based on the specified parameters.\n\n:param assay:     Name of the assay to be used to create the package.\n:param mass:      Package mass. [kg]\n:param P:         Package pressure. [atm]\n:param T:         Package temperature. [\u00b0C]\n:param normalise: Indicates whether the assay must be normalised\n  before creating the package.\n\n:returns: MaterialPackage object.", "id": "f15823:c0:m10"}
{"signature": "def get_size_class_index(self, size_class):", "body": "return self.size_classes.index(size_class)<EOL>", "docstring": "Determine the index of the specified size class.\n\n:param size_class: The formula and phase of the specified size class,\n  e.g. 'Fe2O3[S1]'.\n\n:returns: The index of the specified size class.", "id": "f15826:c0:m3"}
{"signature": "def __str__(self):", "body": "result = \"<STR_LIT>\" + self.name + \"<STR_LIT>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT:20>)<EOL>assay_names = sorted(self.assays.keys())<EOL>for assay_name in assay_names:<EOL><INDENT>result = result + assay_name.ljust(<NUM_LIT:20>)<EOL><DEDENT>result = result + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT:20>)<EOL>assay_names = sorted(self.assays.keys())<EOL>for assay_name in assay_names:<EOL><INDENT>result = result + str(self.solid_densities[assay_name]).ljust(<NUM_LIT:20>)<EOL><DEDENT>result = result + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT:20>)<EOL>assay_names = sorted(self.assays.keys())<EOL>for assay_name in assay_names:<EOL><INDENT>result = result + str(self.H2O_fractions[assay_name]).ljust(<NUM_LIT:20>)<EOL><DEDENT>result = result + \"<STR_LIT:\\n>\"<EOL>for size_class in self.size_classes:<EOL><INDENT>result = result + str(size_class).ljust(<NUM_LIT:20>)<EOL>compound_index = self.get_size_class_index(size_class)<EOL>for assay_name in assay_names:<EOL><INDENT>result = result + str(<EOL>self.assays[assay_name][compound_index]).ljust(<NUM_LIT:20>)<EOL><DEDENT>result = result + \"<STR_LIT:\\n>\"<EOL><DEDENT>return result<EOL>", "docstring": "Create a string representation of self.", "id": "f15826:c0:m1"}
{"signature": "def __add__(self, other):", "body": "<EOL>if type(other) is MaterialPackage:<EOL><INDENT>solid_mass = self.get_solid_mass()<EOL>other_solid_mass = other.get_solid_mass()<EOL>solid_density = (solid_mass + other_solid_mass) /(solid_mass / self.solid_density +<EOL>other_solid_mass / other.solid_density)<EOL>H2O_mass = self.H2O_mass + other.H2O_mass<EOL>if self.material == other.material:<EOL><INDENT>result = MaterialPackage(<EOL>self.material,<EOL>solid_density,<EOL>H2O_mass,<EOL>self.size_class_masses + other.size_class_masses)<EOL>return result<EOL><DEDENT>else:  <EOL><INDENT>result = self.clone()<EOL>result.solid_density = solid_density<EOL>result.H2O_mass = H2O_mass<EOL>for size_class in other.material.size_classes:<EOL><INDENT>if size_class not in self.material.size_classes:<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\" + other.material.name +<EOL>\"<STR_LIT>\" +<EOL>self.material.name + \"<STR_LIT>\" +<EOL>size_class + \"<STR_LIT>\" +<EOL>self.material.name + \"<STR_LIT>\")<EOL><DEDENT>result = result + (<EOL>size_class, other.get_size_class_mass(size_class))<EOL><DEDENT>return result<EOL><DEDENT><DEDENT>elif self._is_H2O_mass_tuple(other):<EOL><INDENT>mass = other[<NUM_LIT:1>]<EOL>result = self.clone()<EOL>result.H2O_mass += mass<EOL>return result<EOL><DEDENT>elif self._is_size_class_mass_tuple(other):<EOL><INDENT>size_class = other[<NUM_LIT:0>]<EOL>compound_index = self.material.get_size_class_index(size_class)<EOL>mass = other[<NUM_LIT:1>]<EOL>result = self.clone()<EOL>result.size_class_masses[compound_index] =result.size_class_masses[compound_index] + mass<EOL>return result<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Addition operator (+).\nAdd self and 'other' together, return the result as a new package,\nand leave self unchanged.\n\n:param other: Can can be one of the following:\n  1. MaterialPackage: 'other' is added to self to create a new package.\n  2. tuple: (size class, mass) The specified mass of the specified size\n  class is added to self.\n\n:returns: A new Material package that is the sum of self and 'other'.", "id": "f15826:c1:m2"}
{"signature": "def get_volume_fraction_solids(self):", "body": "return <NUM_LIT:1.0> - (self.H2O_mass / <NUM_LIT:1.0>) / self.get_volume()<EOL>", "docstring": "Determine the volume fraction of the solids of self.", "id": "f15826:c1:m16"}
{"signature": "def clear(self):", "body": "self.solid_density = <NUM_LIT:1.0><EOL>self.H2O_mass = <NUM_LIT:0.0><EOL>self.size_class_masses = self.size_class_masses * <NUM_LIT:0.0><EOL>", "docstring": "Set all the size class masses and H20_mass in the package to zero\nand the solid_density to 1.0", "id": "f15826:c1:m7"}
{"signature": "def __str__(self):", "body": "result = \"<STR_LIT>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT>) + self.material.name + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT>) +str(self.get_mass_fraction_solids()) + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT>) +str(self.get_volume_fraction_solids()) + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT>) +str(self.solid_density) + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT>) +str(self.get_density()) + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT>) + str(self.get_mass()) + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT>) + str(self.H2O_mass) + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\".ljust(<NUM_LIT>) + str(self.get_volume()) + \"<STR_LIT:\\n>\"<EOL>result = result + \"<STR_LIT>\"<EOL>for size_class in self.material.size_classes:<EOL><INDENT>index = self.material.get_size_class_index(size_class)<EOL>result = result + str(size_class).ljust(<NUM_LIT>) + str(<EOL>self.size_class_masses[index]) + \"<STR_LIT:\\n>\"<EOL><DEDENT>return result<EOL>", "docstring": "Create a string representation of the object.", "id": "f15826:c1:m1"}
{"signature": "def _prepare_lines(self, lines):", "body": "result = list()<EOL>for line in lines:<EOL><INDENT>line = line.strip()<EOL>line = line.replace(\"<STR_LIT:\\t>\", \"<STR_LIT:U+0020>\")<EOL>while line.find(\"<STR_LIT:U+0020>\") > -<NUM_LIT:1>:<EOL><INDENT>line = line.replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:U+0020>\")<EOL><DEDENT>result.append(line)<EOL><DEDENT>return result<EOL>", "docstring": "Prepare the lines read from the text file before starting to process\nit.", "id": "f15826:c0:m2"}
{"signature": "def get_mass(self):", "body": "return self.size_class_masses.sum() + self.H2O_mass<EOL>", "docstring": "Determine the mass of self.\n\n:returns: [kg] The mass of self.", "id": "f15826:c1:m9"}
{"signature": "def get_volume(self):", "body": "return self.H2O_mass / <NUM_LIT:1.0> + self.get_solid_mass() / self.solid_density<EOL>", "docstring": "Determine the volume of self.", "id": "f15826:c1:m15"}
{"signature": "def _is_size_class_mass_tuple(self, value):", "body": "if not type(value) is tuple:<EOL><INDENT>return False<EOL><DEDENT>elif not len(value) == <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:0>]) is float:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:1>]) is float andnot type(value[<NUM_LIT:1>]) is numpy.float64 andnot type(value[<NUM_LIT:1>]) is numpy.float32:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Determines whether value is a tuple of the format\n(size class(float), mass(float)).\n\n:param value: The value to check.\n\n:returns: Whether the value is a tuple in the required format.", "id": "f15826:c1:m4"}
{"signature": "def _is_H2O_mass_tuple(self, value):", "body": "if not type(value) is tuple:<EOL><INDENT>return False<EOL><DEDENT>elif not len(value) == <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:0>]) is str and not value[<NUM_LIT:0>] == \"<STR_LIT>\":<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:1>]) is float andnot type(value[<NUM_LIT:1>]) is numpy.float64 andnot type(value[<NUM_LIT:1>]) is numpy.float32:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Determines whether value is a tuple of the format\n(size class(float), mass(float)).\n\n:param value: The value to check.\n\n:returns: Whether the value is a tuple in the required format.", "id": "f15826:c1:m5"}
{"signature": "def get_density(self):", "body": "return self.get_mass() / self.get_volume()<EOL>", "docstring": "Determine the density of self.", "id": "f15826:c1:m13"}
{"signature": "def add_assay(self, name, solid_density, H2O_fraction, assay):", "body": "if not type(solid_density) is float:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>self.solid_densities[name] = solid_density<EOL>if not type(H2O_fraction) is float:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>self.H2O_fractions[name] = H2O_fraction<EOL>if not type(assay) is numpy.ndarray:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>elif not assay.shape == (self.size_class_count,):<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>elif name in self.assays.keys():<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\")<EOL><DEDENT>self.assays[name] = assay<EOL>", "docstring": "Add an assay to the material.\n\n        :param name: The name of the new assay.\n        :param assay: A numpy array containing the size class mass fractions\n          for the assay. The sequence of the assay's elements must correspond\n          to the sequence of the material's size classes.", "id": "f15826:c0:m5"}
{"signature": "def get_size_class_mass(self, size_class):", "body": "return self.size_class_masses[self.material.get_size_class_index(<EOL>size_class)]<EOL>", "docstring": "Determine the mass of the specified size class in self.\n\n:param size_class: The formula and phase of the size class,\n  e.g. 'Fe2O3[S1]'\n\n:returns: [kg] The mass of the size class in self.", "id": "f15827:c1:m9"}
{"signature": "def get_size_class_index(self, size_class):", "body": "return self.size_classes.index(size_class)<EOL>", "docstring": "Determine the index of the specified size class.\n\n:param size_class: The formula and phase of the specified size class,\n  e.g. 'Fe2O3[S1]'.\n\n:returns: The index of the specified size class.", "id": "f15827:c0:m3"}
{"signature": "def add_to(self, other):", "body": "<EOL>if type(other) is MaterialPackage:<EOL><INDENT>if self.material == other.material:<EOL><INDENT>self.size_class_masses =self.size_class_masses + other.size_class_masses<EOL><DEDENT>else:  <EOL><INDENT>for size_class in other.material.size_classes:<EOL><INDENT>if size_class not in self.material.size_classes:<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\" + other.material.name +<EOL>\"<STR_LIT>\" +<EOL>self.material.name +<EOL>\"<STR_LIT>\" + size_class +<EOL>\"<STR_LIT>\" + self.material.name + \"<STR_LIT>\")<EOL><DEDENT>self.add_to(<EOL>(size_class, other.get_size_class_mass(size_class)))<EOL><DEDENT><DEDENT><DEDENT>elif self._is_size_class_mass_tuple(other):<EOL><INDENT>size_class = other[<NUM_LIT:0>]<EOL>compound_index = self.material.get_size_class_index(size_class)<EOL>mass = other[<NUM_LIT:1>]<EOL>self.size_class_masses[compound_index] =self.size_class_masses[compound_index] + mass<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Add another psd material package to this material package.\n\n:param other: The other material package.", "id": "f15827:c1:m12"}
{"signature": "def get_assay(self):", "body": "return self.size_class_masses / self.size_class_masses.sum()<EOL>", "docstring": "Determine the assay of self.\n\n:returns: [mass fractions] An array containing the assay of self.", "id": "f15827:c1:m7"}
{"signature": "def clear(self):", "body": "self.size_class_masses = self.size_class_masses * <NUM_LIT:0.0><EOL>", "docstring": "Set all the size class masses in the package to zero.", "id": "f15827:c1:m6"}
{"signature": "def create_package(self, assay=None, mass=<NUM_LIT:0.0>, normalise=True):", "body": "if assay is None:<EOL><INDENT>return MaterialPackage(self, self.create_empty_assay())<EOL><DEDENT>if normalise:<EOL><INDENT>assay_total = self.get_assay_total(assay)<EOL><DEDENT>else:<EOL><INDENT>assay_total = <NUM_LIT:1.0><EOL><DEDENT>return MaterialPackage(self, mass * self.assays[assay] / assay_total)<EOL>", "docstring": "Create a MaterialPackage based on the specified parameters.\n\n:param assay: The name of the assay based on which the package must be\n  created.\n:param mass: [kg] The mass of the package.\n:param normalise: Indicates whether the assay must be normalised before\n  creating the package.\n\n:returns: The created MaterialPackage.", "id": "f15827:c0:m7"}
{"signature": "def _prepare_lines(self, lines):", "body": "result = list()<EOL>for line in lines:<EOL><INDENT>line = line.strip()<EOL>line = line.replace(\"<STR_LIT:\\t>\", \"<STR_LIT:U+0020>\")<EOL>while line.find(\"<STR_LIT:U+0020>\") > -<NUM_LIT:1>:<EOL><INDENT>line = line.replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:U+0020>\")<EOL><DEDENT>result.append(line)<EOL><DEDENT>return result<EOL>", "docstring": "Prepare the lines read from the text file before starting to process\nit.", "id": "f15827:c0:m2"}
{"signature": "def _is_size_class_mass_tuple(self, value):", "body": "if not type(value) is tuple:<EOL><INDENT>return False<EOL><DEDENT>elif not len(value) == <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:0>]) is float:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:1>]) is float andnot type(value[<NUM_LIT:1>]) is numpy.float64 andnot type(value[<NUM_LIT:1>]) is numpy.float32:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Determines whether value is a tuple of the format\n(size class(float), mass(float)).\n\n:param value: The value to check.\n\n:returns: Whether the value is a tuple in the required format.", "id": "f15827:c1:m4"}
{"signature": "def get_mass(self):", "body": "return self.size_class_masses.sum()<EOL>", "docstring": "Determine the mass of self.\n\nreturns: [kg] The mass of self.", "id": "f15827:c1:m8"}
{"signature": "def mu(self, **state):", "body": "raise NotImplementedError()<EOL>", "docstring": "Calculate the mu value given the material state.\n\n:param **state: material state\n\n:returns: float", "id": "f15830:c0:m5"}
{"signature": "def beta(self, **state):", "body": "raise NotImplementedError()<EOL>", "docstring": "Calculate the alpha value given the material state.\n\n:param **state: material state\n\n:returns: float", "id": "f15830:c0:m2"}
{"signature": "def nu(self, **state):", "body": "return self.mu(**state) / self.rho(**state)<EOL>", "docstring": "Calculate the nu value given the material state.\n\n:param **state: material state\n\n:returns: float", "id": "f15830:c0:m6"}
{"signature": "def add_to(self, other):", "body": "<EOL>if type(other) is MaterialPackage:<EOL><INDENT>if self.material == other.material:<EOL><INDENT>self.compound_masses += other.compound_masses<EOL><DEDENT>else:<EOL><INDENT>for compound in other.material.compounds:<EOL><INDENT>if compound not in self.material.compounds:<EOL><INDENT>raise Exception(\"<STR_LIT>\" + other.material.name +<EOL>\"<STR_LIT>\" +<EOL>self.material.name +<EOL>\"<STR_LIT>\" + compound +<EOL>\"<STR_LIT>\" +<EOL>self.material.name + \"<STR_LIT>\")<EOL><DEDENT>self.add_to((compound, other.get_compound_mass(compound)))<EOL><DEDENT><DEDENT><DEDENT>elif self._is_compound_mass_tuple(other):<EOL><INDENT>compound = other[<NUM_LIT:0>]<EOL>compound_index = self.material.get_compound_index(compound)<EOL>mass = other[<NUM_LIT:1>]<EOL>self.compound_masses[compound_index] += mass<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Add another chem material package to this material package.\n\n:param other: The other material package.", "id": "f15833:c1:m16"}
{"signature": "def clone(self):", "body": "result = copy.copy(self)<EOL>result.compound_masses = copy.deepcopy(self.compound_masses)<EOL>return result<EOL>", "docstring": "Create a complete copy of self.\n\n:returns: A MaterialPackage that is identical to self.", "id": "f15833:c1:m6"}
{"signature": "def clear(self):", "body": "self.compound_masses *= <NUM_LIT:0.0><EOL>", "docstring": "Set all the compound masses in the package to zero.", "id": "f15833:c1:m7"}
{"signature": "def _is_compound_mass_tuple(self, value):", "body": "if not type(value) is tuple:<EOL><INDENT>return False<EOL><DEDENT>elif not len(value) == <NUM_LIT:2>:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:0>]) is str:<EOL><INDENT>return False<EOL><DEDENT>elif not type(value[<NUM_LIT:1>]) is float:<EOL><INDENT>return False<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>", "docstring": "Determines whether value is a tuple of the format (compound(str),\nmass(float)).", "id": "f15833:c1:m5"}
{"signature": "def get_assay_total(self, name):", "body": "return sum(self.assays[name])<EOL>", "docstring": "Calculate the total of the specified assay.\n\n:param name: The name of the assay.\n\n:returns: The total mass fraction of the specified assay.", "id": "f15833:c0:m9"}
{"signature": "def __add__(self, other):", "body": "<EOL>if type(other) is MaterialPackage:<EOL><INDENT>if self.material == other.material:<EOL><INDENT>result = MaterialPackage(<EOL>self.material,<EOL>self.compound_masses + other.compound_masses)<EOL>return result<EOL><DEDENT>else:  <EOL><INDENT>result = self.clone()<EOL>for compound in other.material.compounds:<EOL><INDENT>if compound not in self.material.compounds:<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\" + other.material.name +<EOL>\"<STR_LIT>\" +<EOL>self.material.name +<EOL>\"<STR_LIT>\" + compound +<EOL>\"<STR_LIT>\" + self.material.name + \"<STR_LIT>\")<EOL><DEDENT>result += (compound, other.get_compound_mass(compound))<EOL><DEDENT>return result<EOL><DEDENT><DEDENT>elif self._is_compound_mass_tuple(other):<EOL><INDENT>compound = other[<NUM_LIT:0>]<EOL>compound_index = self.material.get_compound_index(compound)<EOL>mass = other[<NUM_LIT:1>]<EOL>result = self.clone()<EOL>result.compound_masses[compound_index] += mass<EOL>return result<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Add self and 'other' together, return the result as a new package, and\nleave self unchanged.\n\n:param other: Can can be one of the following:\n  1. MaterialPackage: 'other' is added to self to create a new package.\n  2. tuple: (compound, mass): The specified mass of the specified\n  compound is added to self.\n\n:returns: A new Material package that is the sum of self and 'other'.", "id": "f15833:c1:m2"}
{"signature": "def get_element_mass_dictionary(self):", "body": "element_symbols = self.material.elements<EOL>element_masses = self.get_element_masses()<EOL>result = {}<EOL>for s, m in zip(element_symbols, element_masses):<EOL><INDENT>result[s] = m<EOL><DEDENT>return result<EOL>", "docstring": "Determine the masses of elements in the package and return as a\ndictionary.\n\n:returns: [kg] A dictionary of element symbols and masses.", "id": "f15833:c1:m13"}
{"signature": "def __mul__(self, scalar):", "body": "<EOL>if type(scalar) is float:<EOL><INDENT>if scalar < <NUM_LIT:0.0>:<EOL><INDENT>raise Exception(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>result = MaterialPackage(<EOL>self.material, [c * scalar for c in self.compound_masses])<EOL>return result<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "The multiplication operator (*).\n\nCreate a new package by multiplying self with scalar.\n\n:param scalar: The result is a new package with its content equal to\n  self multiplied by a scalar, leaving self unchanged.\n\n:returns: A new MaterialPackage equal to self package multiplied by\n  other.", "id": "f15833:c1:m3"}
{"signature": "def get_element_mass(self, element):", "body": "result = [<NUM_LIT:0>]<EOL>for compound in self.material.compounds:<EOL><INDENT>c = self.get_compound_mass(compound)<EOL>f = [c * x for x in emf(compound, [element])]<EOL>result = [v+f[ix] for ix, v in enumerate(result)]<EOL><DEDENT>return result[<NUM_LIT:0>]<EOL>", "docstring": "Determine the masses of elements in the package.\n\n:returns: [kg] An array of element masses. The sequence of the elements\n  in the result corresponds with the sequence of elements in the\n  element list of the material.", "id": "f15833:c1:m14"}
{"signature": "def get_mass(self):", "body": "return sum(self.compound_masses)<EOL>", "docstring": "Get the mass of the package.\n\n:returns: [kg]", "id": "f15833:c1:m9"}
{"signature": "def create_package(self, assay=None, mass=<NUM_LIT:0.0>, normalise=True):", "body": "if assay is None:<EOL><INDENT>return MaterialPackage(self, self.create_empty_assay())<EOL><DEDENT>if normalise:<EOL><INDENT>assay_total = self.get_assay_total(assay)<EOL><DEDENT>else:<EOL><INDENT>assay_total = <NUM_LIT:1.0><EOL><DEDENT>masses = [(mass * m) / assay_total for m in self.assays[assay]]<EOL>return MaterialPackage(self, masses)<EOL>", "docstring": "Create a MaterialPackage based on the specified parameters.\n\n:param assay: The name of the assay based on which the package must be\n  created.\n:param mass: [kg] The mass of the package.\n:param normalise: Indicates whether the assay must be normalised before\n  creating the package.\n\n:returns: The created MaterialPackage.", "id": "f15833:c0:m10"}
{"signature": "def run(self, clock, generalLedger):", "body": "if not self._meet_execution_criteria(clock.timestep_ix):<EOL><INDENT>return<EOL><DEDENT>if self.description is None:<EOL><INDENT>tx_name = self.name<EOL><DEDENT>else:<EOL><INDENT>tx_name = self.description<EOL><DEDENT>if self._months_executed == <NUM_LIT:0>:<EOL><INDENT>generalLedger.create_transaction(<EOL>tx_name,<EOL>description='<STR_LIT>',<EOL>tx_date=clock.get_datetime(),<EOL>dt_account=self.bank_account,<EOL>cr_account=self.loan_account,<EOL>source=self.path,<EOL>amount=self.amount)<EOL><DEDENT>else:<EOL><INDENT>curr_interest_amount = (self._amount_left *<EOL>self.interest_rate) / <NUM_LIT><EOL>generalLedger.create_transaction(<EOL>tx_name,<EOL>description='<STR_LIT>',<EOL>tx_date=clock.get_datetime(),<EOL>dt_account=self.interest_account,<EOL>cr_account=self.loan_account,<EOL>source=self.path,<EOL>amount=curr_interest_amount)<EOL>generalLedger.create_transaction(<EOL>tx_name,<EOL>description='<STR_LIT>',<EOL>tx_date=clock.get_datetime(),<EOL>dt_account=self.loan_account,<EOL>cr_account=self.bank_account,<EOL>source=self.path,<EOL>amount=self._monthly_payment)<EOL>self._amount_left += curr_interest_amount - self._monthly_payment<EOL><DEDENT>self._months_executed += self.interval<EOL>", "docstring": "Execute the activity at the current clock cycle.\n\n:param clock: The clock containing the current execution time and\n  period information.\n:param generalLedger: The general ledger into which to create the\n  transactions.", "id": "f15837:c1:m8"}
{"signature": "def run(self, clock, generalLedger):", "body": "if not self._meet_execution_criteria(clock.timestep_ix):<EOL><INDENT>return<EOL><DEDENT>generalLedger.create_transaction(<EOL>self.description if self.description is not None else self.name,<EOL>description='<STR_LIT>',<EOL>tx_date=clock.get_datetime(),<EOL>dt_account=self.dt_account,<EOL>cr_account=self.cr_account,<EOL>source=self.path,<EOL>amount=self.amount)<EOL>", "docstring": "Execute the activity at the current clock cycle.\n\n:param clock: The clock containing the current execution time and\n  period information.\n:param generalLedger: The general ledger into which to create the\n  transactions.", "id": "f15837:c0:m2"}
{"signature": "def create_component(self, name, description=None):", "body": "new_comp = Component(name, self.gl, description=description)<EOL>new_comp.set_parent_path(self.path)<EOL>self.components.append(new_comp)<EOL>return new_comp<EOL>", "docstring": "Create a sub component in the business component.\n\n:param name: The new component's name.\n:param description: The new component's description.\n\n:returns: The created component.", "id": "f15838:c1:m7"}
{"signature": "def set_parent_path(self, value):", "body": "self._parent_path = value<EOL>self.path = value + r'<STR_LIT:/>' + self.name<EOL>self._update_childrens_parent_path()<EOL>", "docstring": "Set the parent path and the path from the new parent path.\n\n:param value: The path to the object's parent", "id": "f15838:c2:m2"}
{"signature": "def remove_component(self, name):", "body": "component_to_remove = None<EOL>for c in self.components:<EOL><INDENT>if c.name == name:<EOL><INDENT>component_to_remove = c<EOL><DEDENT><DEDENT>if component_to_remove is not None:<EOL><INDENT>self.components.remove(component_to_remove)<EOL><DEDENT>", "docstring": "Remove a sub component from the component.\n\n:param name: The name of the component to remove.", "id": "f15838:c1:m8"}
{"signature": "def get_referenced_accounts(self):", "body": "return []<EOL>", "docstring": "Retrieve the general ledger accounts referenced in this instance.\n\n:returns: The referenced accounts.", "id": "f15838:c0:m7"}
{"signature": "def set_parent_path(self, value):", "body": "self._parent_path = value<EOL>self.path = value + r'<STR_LIT:/>' + self.name<EOL>self._update_childrens_parent_path()<EOL>", "docstring": "Set the parent path and the path from the new parent path.\n\n:param value: The path to the object's parent.", "id": "f15838:c1:m3"}
{"signature": "def remove_entity(self, name):", "body": "entity_to_remove = None<EOL>for e in self.entities:<EOL><INDENT>if e.name == name:<EOL><INDENT>entity_to_remove = e<EOL><DEDENT><DEDENT>if entity_to_remove is not None:<EOL><INDENT>self.entities.remove(entity_to_remove)<EOL><DEDENT>", "docstring": "Remove an entity from the model.\n\n:param name: The name of the entity to remove.", "id": "f15840:c0:m5"}
{"signature": "def run(self):", "body": "self.prepare_to_run()<EOL>for i in range(<NUM_LIT:0>, self.period_count):<EOL><INDENT>for e in self.entities:<EOL><INDENT>e.run(self.clock)<EOL><DEDENT>self.clock.tick()<EOL><DEDENT>", "docstring": "Execute the model.", "id": "f15840:c0:m7"}
{"signature": "def create_entity(self, name, gl_structure, description=None):", "body": "new_entity = Entity(name, gl_structure, description=description)<EOL>self.entities.append(new_entity)<EOL>return new_entity<EOL>", "docstring": "Create an entity and add it to the model.\n\n:param name: The entity name.\n:param gl_structure: The entity's general ledger structure.\n:param description: The entity description.\n\n:returns: The created entity.", "id": "f15840:c0:m4"}
{"signature": "def calculate(self, **state):", "body": "T = state['<STR_LIT:T>']<EOL>y = state['<STR_LIT:y>']<EOL>x = amount_fractions(y)<EOL>return super().calculate(T=T, x=x)<EOL>", "docstring": "Calculate dynamic viscosity at the specified temperature and\ncomposition:\n\n:param T: [K] temperature\n:param y: [mass fraction] composition dictionary , e.g. \\\n{'SiO2': 0.25, 'CaO': 0.25, 'MgO': 0.25, 'FeO': 0.25}\n\n:returns: [Pa.s] dynamic viscosity\n\nThe **state parameter contains the keyword argument(s) specified above\\\nthat are used to describe the state of the material.", "id": "f15847:c1:m1"}
{"signature": "def calculate(self, **state):", "body": "T = state['<STR_LIT:T>']<EOL>x = state['<STR_LIT:x>']<EOL>compounds_sio2 = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>compounds_cao = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>compounds_al2o3 = ['<STR_LIT>']<EOL>compounds_caf2 = ['<STR_LIT>']<EOL>compounds_na2o = ['<STR_LIT>', '<STR_LIT>']<EOL>compounds_all = (compounds_sio2 + compounds_cao + compounds_al2o3 +<EOL>compounds_caf2 + compounds_na2o)<EOL>if '<STR_LIT>' in x:<EOL><INDENT>x['<STR_LIT>'] = <NUM_LIT> * x['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in x:<EOL><INDENT>x['<STR_LIT>'] = <NUM_LIT> * x['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in x:<EOL><INDENT>x['<STR_LIT>'] = <NUM_LIT> * x['<STR_LIT>']<EOL><DEDENT>x_total = sum([x.get(c, <NUM_LIT:0.0>) for c in compounds_all])<EOL>x = {c: x.get(c, <NUM_LIT:0.0>)/x_total for c in compounds_all}<EOL>x1 = sum([x.get(c, <NUM_LIT:0.0>) for c in compounds_sio2])<EOL>x2 = sum([x.get(c, <NUM_LIT:0.0>) for c in compounds_cao])<EOL>x3 = sum([x.get(c, <NUM_LIT:0.0>) for c in compounds_al2o3])<EOL>x4 = sum([x.get(c, <NUM_LIT:0.0>) for c in compounds_caf2])<EOL>x5 = sum([x.get(c, <NUM_LIT:0.0>) for c in compounds_na2o])<EOL>A = exp(-<NUM_LIT> + <NUM_LIT>*x2 + <NUM_LIT>*x4 + <NUM_LIT>*x5 - <NUM_LIT>*x3)<EOL>B = <NUM_LIT> - <NUM_LIT>*x2 - <NUM_LIT>*x4 - <NUM_LIT>*x5 + <NUM_LIT>*x3<EOL>result = A*T*exp(B/T)  <EOL>return result / <NUM_LIT><EOL>", "docstring": "Calculate dynamic viscosity at the specified temperature and\ncomposition:\n\n:param T: [K] temperature\n:param x: [mole fraction] composition dictionary , e.g. \\\n{'SiO2': 0.25, 'CaO': 0.25, 'MgO': 0.25, 'FeO': 0.25}\n\n:returns: [Pa.s] dynamic viscosity\n\nThe **state parameter contains the keyword argument(s) specified above\\\nthat are used to describe the state of the material.", "id": "f15847:c2:m1"}
{"signature": "def calculate(self, **state):", "body": "T = state['<STR_LIT:T>']<EOL>y = state['<STR_LIT:y>']<EOL>x = amount_fractions(y)<EOL>return super().calculate(T=T, x=x)<EOL>", "docstring": "Calculate dynamic viscosity at the specified temperature and\ncomposition:\n\n:param T: [K] temperature\n:param y: [mass fraction] composition dictionary , e.g. \\\n{'SiO2': 0.25, 'CaO': 0.25, 'MgO': 0.25, 'FeO': 0.25}\n\n:returns: [Pa.s] dynamic viscosity\n\nThe **state parameter contains the keyword argument(s) specified above\\\nthat are used to describe the state of the material.", "id": "f15847:c3:m1"}
{"signature": "def _calc_g0(self, z):", "body": "return <NUM_LIT:1> / (exp(z) - <NUM_LIT:1>)<EOL>", "docstring": "Calculate the g0 parameter.\n\n:param z: dimensionless temperature", "id": "f15849:c2:m1"}
{"signature": "def _calc_a(self, y_C, y_H, y_O, y_N, y_S):", "body": "return <NUM_LIT:1> / (y_C/mm(\"<STR_LIT:C>\") + y_H/mm(\"<STR_LIT:H>\") + y_O/mm(\"<STR_LIT:O>\") + y_N/mm(\"<STR_LIT:N>\") +<EOL>y_S/mm(\"<STR_LIT:S>\"))<EOL>", "docstring": "Calculate the mean atomic weight for the specified element mass\nfractions.\n\n:param y_C: Carbon mass fraction\n:param y_H: Hydrogen mass fraction\n:param y_O: Oxygen mass fraction\n:param y_N: Nitrogen mass fraction\n:param y_S: Sulphur mass fraction\n\n:returns: [kg/kmol] mean atomic weight\n\nSee equation at bottom of page 538 of Merrick1983a.", "id": "f15849:c0:m1"}
{"signature": "def calculate(self, **state):", "body": "super().calculate(**state)<EOL>return np.polyval(self._coeffs, state['<STR_LIT:T>'])<EOL>", "docstring": "Calculate the material physical property at the specified temperature\nin the units specified by the object's 'property_units' property.\n\n:param T: [K] temperature\n\n:returns: physical property value", "id": "f15850:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def create(dataset, symbol, degree):<DEDENT>", "body": "x_vals = dataset.data['<STR_LIT:T>'].tolist()<EOL>y_vals = dataset.data[symbol].tolist()<EOL>coeffs = np.polyfit(x_vals, y_vals, degree)<EOL>result = PolynomialModelT(dataset.material,<EOL>dataset.names_dict[symbol],<EOL>symbol, dataset.display_symbols_dict[symbol],<EOL>dataset.units_dict[symbol],<EOL>None, [dataset.name], coeffs)<EOL>result.state_schema['<STR_LIT:T>']['<STR_LIT>'] = float(min(x_vals))<EOL>result.state_schema['<STR_LIT:T>']['<STR_LIT>'] = float(max(x_vals))<EOL>return result<EOL>", "docstring": "Create a model object from the data set for the property specified by\nthe supplied symbol, using the specified polynomial degree.\n\n:param dataset: a DataSet object\n:param symbol: the symbol of the property to be described, e.g. 'rho'\n:param degree: the polynomial degree to use\n\n:returns: a new PolynomialModelT object", "id": "f15850:c0:m0"}
{"signature": "def calculate(self, **state):", "body": "if not self.state_validator.validate(state):<EOL><INDENT>msg = f\"<STR_LIT>\"<EOL>msg += f\"<STR_LIT>\"<EOL>for key, value in self.state_validator.errors.items():<EOL><INDENT>msg += '<STR_LIT>' % (key, value)<EOL><DEDENT>msg = msg[<NUM_LIT:0>:-<NUM_LIT:1>]+'<STR_LIT:.>'<EOL>raise ValidationError(msg)<EOL><DEDENT>", "docstring": "Base calculate method for models.\nValidates the material state parameter(s).\n\n:param **state: The material state", "id": "f15852:c2:m3"}
{"signature": "@staticmethod<EOL><INDENT>def create_template(material, path, show=False):<DEDENT>", "body": "file_name = '<STR_LIT>' % material.lower()<EOL>file_path = os.path.join(path, file_name)<EOL>with open(file_path, '<STR_LIT:w>', newline='<STR_LIT>') as csvfile:<EOL><INDENT>writer = csv.writer(csvfile, delimiter='<STR_LIT:U+002C>',<EOL>quotechar='<STR_LIT:\">', quoting=csv.QUOTE_MINIMAL)<EOL>writer.writerow(['<STR_LIT:Name>', material])<EOL>writer.writerow(['<STR_LIT>', '<STR_LIT>'<EOL>'<STR_LIT>'])<EOL>writer.writerow(['<STR_LIT>', '<STR_LIT>'<EOL>'<STR_LIT>'])<EOL>writer.writerow(['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'])<EOL>writer.writerow(['<STR_LIT:T>', '<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'])<EOL>writer.writerow(['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'])<EOL>writer.writerow(['<STR_LIT:T>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'])<EOL>for i in range(<NUM_LIT:10>):<EOL><INDENT>writer.writerow([<NUM_LIT> + i*<NUM_LIT:50>, float(i), <NUM_LIT> + i, <NUM_LIT> + i])<EOL><DEDENT><DEDENT>if show is True:<EOL><INDENT>webbrowser.open_new(file_path)<EOL><DEDENT>", "docstring": "Create a template csv file for a data set.\n\n:param material: the name of the material\n:param path: the path of the directory where the file must be written\n:param show: a boolean indicating whether the created file should be \\\ndisplayed after creation", "id": "f15852:c1:m0"}
{"signature": "def calculate(self, **state):", "body": "super().calculate(**state)<EOL>return self.mm * self.P / R / state[\"<STR_LIT:T>\"]<EOL>", "docstring": "Calculate the density at the specified temperature.\n\n:param T: [K] temperature\n\n:returns: [kg/m3] density\n\nThe **state parameter contains the keyword argument(s) specified above\\\nthat are used to describe the state of the material.", "id": "f15853:c1:m1"}
{"signature": "def _create_polynomial_model(<EOL>name: str,<EOL>symbol: str,<EOL>degree: int,<EOL>ds: DataSet,<EOL>dss: dict):", "body": "ds_name = ds.name.split(\"<STR_LIT:.>\")[<NUM_LIT:0>].lower()<EOL>file_name = f\"<STR_LIT>\"<EOL>newmod = PolynomialModelT.create(ds, symbol, degree)<EOL>newmod.plot(dss, _path(f\"<STR_LIT>\"), False)<EOL>newmod.write(_path(f\"<STR_LIT>\"))<EOL>", "docstring": "Create a polynomial model to describe the specified property based on the\nspecified data set, and save it to a .json file.\n\n:param name: material name.\n:param symbol: property symbol.\n:param degree: polynomial degree.\n:param ds: the source data set.\n:param dss: dictionary of all datasets.", "id": "f15854:m2"}
{"signature": "def _create_ds_dict(namelist):", "body": "return {n: DataSet(_path(f\"<STR_LIT>\")) for n in namelist}<EOL>", "docstring": "Create a data set dictionary from the provided list of data set names.\n\n:param namelist: list of data set names (str).\n:return: {str: DataSet}", "id": "f15854:m1"}
{"signature": "def _create_water_vapour():", "body": "name = \"<STR_LIT>\"<EOL>namel = name.lower().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:->\")<EOL>mm = M(\"<STR_LIT>\")  <EOL>ds_dict = _create_ds_dict([<EOL>f\"<STR_LIT>\"])<EOL>active_ds = f\"<STR_LIT>\"<EOL>model_dict = {<EOL>\"<STR_LIT>\": IgRhoT(mm, <NUM_LIT>),<EOL>\"<STR_LIT>\": IgBetaT()}<EOL>model_type = \"<STR_LIT>\"<EOL>for property in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:k>\"]:<EOL><INDENT>name = f\"<STR_LIT>\"<EOL>model_dict[property] = PolynomialModelT.read(_path(name))<EOL><DEDENT>material = Material(name, StateOfMatter.gas, model_dict)<EOL>return material, ds_dict<EOL>", "docstring": "Create a dictionary of datasets and a material object for water vapour.\n\n:return: (Material, {str, DataSet})", "id": "f15854:m10"}
{"signature": "def calculate(self, **state):", "body": "T = state['<STR_LIT:T>']<EOL>x = state['<STR_LIT:x>']<EOL>x_total = sum([<EOL>x for compound, x in x.items()<EOL>if compound in materials])<EOL>x = {<EOL>compound: x[compound]/x_total<EOL>for compound in x.keys()<EOL>if compound in materials}<EOL>mu = {i: materials[i].mu(T=T) for i in x.keys()}<EOL>result = sum([mu[i] * x[i] * sqrt(M(i)) for i in x.keys()])<EOL>result /= sum([x[i] * sqrt(M(i)) for i in x.keys()])<EOL>return result<EOL>", "docstring": "Calculate dynamic viscosity at the specified temperature and\ncomposition:\n\n:param T: [K] temperature\n:param x: [mole fraction] composition dictionary , e.g.\n  {'CO': 0.25, 'CO2': 0.25, 'N2': 0.25, 'O2': 0.25}\n\n:returns: [Pa.s] dynamic viscosity\n\nThe **state parameter contains the keyword argument(s) specified above\nthat are used to describe the state of the material.", "id": "f15854:c1:m1"}
{"signature": "def _path(relative_path):", "body": "path = modules[__name__].__file__<EOL>path = realpath(path)<EOL>path = dirname(path)<EOL>return join(path, relative_path)<EOL>", "docstring": "Calculate the full path of the provided relative path.\n\n:param relative_path: relative path (str).\n:return: str", "id": "f15854:m0"}
{"signature": "def _create_carbon_dioxide():", "body": "name = \"<STR_LIT>\"<EOL>namel = name.lower().replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:->\")<EOL>mm = M(\"<STR_LIT>\")  <EOL>ds_dict = _create_ds_dict([<EOL>f\"<STR_LIT>\"])<EOL>active_ds = f\"<STR_LIT>\"<EOL>model_dict = {<EOL>\"<STR_LIT>\": IgBetaT()}<EOL>model_type = \"<STR_LIT>\"<EOL>for property in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:k>\"]:<EOL><INDENT>name = f\"<STR_LIT>\"<EOL>model_dict[property] = PolynomialModelT.read(_path(name))<EOL><DEDENT>material = Material(name, StateOfMatter.gas, model_dict)<EOL>return material, ds_dict<EOL>", "docstring": "Create a dictionary of datasets and a material object for carbon dioxide.\n\n:return: (Material, {str, DataSet})", "id": "f15854:m6"}
{"signature": "def _create_ammonia():", "body": "name = \"<STR_LIT>\"<EOL>namel = name.lower()<EOL>mm = M(\"<STR_LIT>\")  <EOL>ds_dict = _create_ds_dict([<EOL>f\"<STR_LIT>\"])<EOL>active_ds = f\"<STR_LIT>\"<EOL>model_dict = {<EOL>\"<STR_LIT>\": IgBetaT()}<EOL>model_type = \"<STR_LIT>\"<EOL>for property in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:k>\"]:<EOL><INDENT>name = f\"<STR_LIT>\"<EOL>model_dict[property] = PolynomialModelT.read(_path(name))<EOL><DEDENT>material = Material(name, StateOfMatter.gas, model_dict)<EOL>return material, ds_dict<EOL>", "docstring": "Create a dictionary of datasets and a material object for ammonia.\n\n:return: (Material, {str, DataSet})", "id": "f15854:m5"}
{"signature": "def f_tr_Haaland(Re_D, \u025b, D, warn=True):", "body": "if warn:<EOL><INDENT>try:<EOL><INDENT>if (\u025b / D) < <NUM_LIT:0.0> or (\u025b / D) > <NUM_LIT>:<EOL><INDENT>raise Warning(<EOL>f\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except Warning as w:<EOL><INDENT>ex_type, ex_value, ex_traceback = sys.exc_info()<EOL>print(color_warn(\"<STR_LIT>\"), ex_value)<EOL><DEDENT>try:<EOL><INDENT>if Re_D < <NUM_LIT> or Re_D > <NUM_LIT>:<EOL><INDENT>raise Warning(<EOL>f\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>except Warning as w:<EOL><INDENT>ex_type, ex_value, ex_traceback = sys.exc_info()<EOL>print(color_warn(\"<STR_LIT>\"), ex_value)<EOL><DEDENT><DEDENT>return <NUM_LIT:1> / (<NUM_LIT> * log10((<NUM_LIT> / Re_D) + (\u025b / D / <NUM_LIT>)**<NUM_LIT>))**<NUM_LIT:2><EOL>", "docstring": "Calculate the friction factor of turbulent flow (t) in a rough duct (r) for\nthe provided conditions with Haaland's equation.\n\n:param Re_D: Reynolds number for the specified hydraulic diameter.\n:param \u025b: [m] Surface roughness.\n:param D: [m] Duct hydraulic diameter.\n:return: Friction factor.\n\nSource: lienhard2018, Eq. 7.50.", "id": "f15857:m3"}
{"signature": "def \u0394p(f, L, D, \u03c1, v):", "body": "return f * (L / D) * (\u03c1 * v**<NUM_LIT:2> / <NUM_LIT:2>)<EOL>", "docstring": "Calculate the pressure drop for turbulent flow in a duct.\n\n:param f: Duct Darcy friction factor.\n:param L: [m] Duct length.\n:param D: [m] Duct hydraulic diameter.\n:param \u03c1: [kg/m3] Fluid density.\n:param v: [m/s] Average fluid velocity.\n:return: [Pa] Pressure drop.\n\nSource: lienhard2018, Eq. 3.25.", "id": "f15857:m6"}
{"signature": "def f_tr_Colebrook(Re_D, \u025b, D):", "body": "def cb(f, Re_D, \u025b, D):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>term1 = \u025b / D / <NUM_LIT><EOL>term2 = <NUM_LIT> / Re_D / sqrt(f)<EOL>return -<NUM_LIT:2> * log10(term1 + term2) - <NUM_LIT:1> / sqrt(f)<EOL><DEDENT>return root(cb, <NUM_LIT>, args=(Re_D, \u025b, D)).x[<NUM_LIT:0>]<EOL>", "docstring": "Calculate the friction factor of turbulent flow (t) a rough duct (r) for\nthe provided conditions with Colebrook's equation.\n\n:param Re_D: Reynolds number for the specified hydraulic diameter.\n:param \u025b: [m] Surface roughness.\n:param D: [m] Duct hydraulic diameter.\n:return: Friction factor.", "id": "f15857:m4"}
{"signature": "def Nu_L(self, L, theta, Ts, **statef):", "body": "return self.Nu_x(L, theta, Ts, **statef) / <NUM_LIT><EOL>", "docstring": "Calculate the average Nusselt number.\n\n:param L: [m] characteristic length of the heat transfer surface\n:param theta: [\u00b0] angle of the surface with the vertical\n:param Ts: [K] heat transfer surface temperature\n:param **statef: [K] bulk fluid temperature\n\n:returns: float", "id": "f15858:c1:m8"}
{"signature": "def h_x(self, L, theta, Ts, **statef):", "body": "Nu_x = self.Nu_x(L, theta, Ts, **statef)<EOL>k = self._fluid.k(T=self.Tr)<EOL>return Nu_x * k / L<EOL>", "docstring": "Calculate the local heat transfer coefficient.\n\n:param L: [m] characteristic length of the heat transfer surface\n:param theta: [\u00b0] angle of the surface with the vertical\n:param Ts: [K] heat transfer surface temperature\n:param Tf: [K] bulk fluid temperature\n\n:returns: [W/m2/K] float", "id": "f15858:c1:m9"}
{"signature": "def h_L(self, L, theta, Ts, **statef):", "body": "Nu_L = self.Nu_L(L, theta, Ts, **statef)<EOL>k = self._fluid.k(T=self.Tr)<EOL>return Nu_L * k / L<EOL>", "docstring": "Calculate the average heat transfer coefficient.\n\n:param L: [m] characteristic length of the heat transfer surface\n:param theta: [\u00b0] angle of the surface with the vertical\n:param Ts: [K] heat transfer surface temperature\n:param Tf: [K] bulk fluid temperature\n\n:returns: [W/m2/K] float", "id": "f15858:c1:m10"}
{"signature": "def Re(L: float, v: float, nu: float) -> float:", "body": "return v * L / nu<EOL>", "docstring": "Calculate the Reynolds number.\n\n:param L: [m] surface characteristic length.\n:param v: [m/s] fluid velocity relative to the object.\n:param nu: [m2/s] fluid kinematic viscosity.\n\n:returns: float", "id": "f15861:m2"}
{"signature": "def Pr(nu: float, alpha: float) -> float:", "body": "return nu / alpha<EOL>", "docstring": "Calculate the Prandtl number.\n\n:param nu: [m2/s] fluid kinematic viscosity / momentum diffusivity.\n:param alpha: [m2/s] fluid thermal diffusivity.\n\n:returns: float", "id": "f15861:m1"}
{"signature": "def Gr(L: float, Ts: float, Tf: float, beta: float, nu: float, g: float):", "body": "return g * beta * (Ts - Tf) * L**<NUM_LIT> / nu**<NUM_LIT><EOL>", "docstring": "Calculate the Grashof number.\n\n:param L: [m] heat transfer surface characteristic length.\n:param Ts: [K] heat transfer surface temperature.\n:param Tf: [K] bulk fluid temperature.\n:param beta: [1/K] fluid coefficient of thermal expansion.\n:param nu: [m2/s] fluid kinematic viscosity.\n\n:returns: float\n\n.. math::\n    \\\\mathrm{Gr} = \\\\frac{g \\\\beta (Ts - Tinf ) L^3}{\\\\nu ^2}\n\nCharacteristic dimensions:\n    * vertical plate: vertical length\n    * pipe: diameter\n    * bluff body: diameter", "id": "f15861:m0"}
{"signature": "def Nu(L: float, h: float, k: float) -> float:", "body": "return h * L / k<EOL>", "docstring": "Calculate the Nusselt number.\n\n:param L: [m] heat transfer surface characteristic length.\n:param h: [W/K/m2] convective heat transfer coefficient.\n:param k: [W/K/m] fluid thermal conductivity.\n\n:returns: float", "id": "f15861:m4"}
{"signature": "def stoichiometry_coefficient(compound, element):", "body": "stoichiometry = parse_compound(compound.strip()).count()<EOL>return stoichiometry[element]<EOL>", "docstring": "Determine the stoichiometry coefficient of an element in a chemical\ncompound.\n\n:param compound: Formula of a chemical compound, e.g. 'SiO2'.\n:param element:  Element, e.g. 'Si'.\n\n:returns: Stoichiometry coefficient.", "id": "f15862:m13"}
{"signature": "def element_mass_fractions(compound, elements):", "body": "return [element_mass_fraction(compound, element)<EOL>for element in elements]<EOL>", "docstring": "Determine the mass fractions of a list of elements in a chemical compound.\n\n:param compound: Formula and phase of a chemical compound, e.g.\n  'Fe2O3[S1]'.\n:param elements: List of elements, ['Si', 'O', 'Fe'].\n\n:returns: Mass fractions.", "id": "f15862:m10"}
{"signature": "def mass(compound, amount):", "body": "return amount * molar_mass(compound)<EOL>", "docstring": "Calculate the mass of the specified amount of a chemical compound.\n\n:param compound: Formula and phase of a compound, e.g. 'Fe2O3[S1]'. The\n  phase may be omitted.\n:param amount: [kmol]\n\n:returns: Mass. [kg]", "id": "f15862:m5"}
{"signature": "def amounts(masses):", "body": "return {compound: amount(compound, masses[compound])<EOL>for compound in masses.keys()}<EOL>", "docstring": "Calculate the amounts from the specified compound masses.\n\n:param masses: [kg] dictionary, e.g. {'SiO2': 3.0, 'FeO': 1.5}\n\n:returns: [kmol] dictionary", "id": "f15862:m3"}
{"signature": "def H(compound_string, T, mass=<NUM_LIT:1.0>):", "body": "formula, phase = _split_compound_string_(compound_string)<EOL>TK = T + <NUM_LIT><EOL>compound = compounds[formula]<EOL>result = compound.H(phase, TK)<EOL>return _finalise_result_(compound, result, mass)<EOL>", "docstring": "Calculate the enthalpy of the compound for the specified temperature and\nmass.\n\n:param compound_string: Formula and phase of chemical compound, e.g.\n  'Fe2O3[S1]'.\n:param T: [\u00b0C] temperature\n:param mass: [kg]\n\n:returns: [kWh] Enthalpy.", "id": "f15866:m12"}
{"signature": "def Zero_mag(self, T):", "body": "return <NUM_LIT:0.0><EOL>", "docstring": "Return a zero value for a phase with no magnetic property data.\n\n:param T: [K] temperature\n\n:returns: Zero.", "id": "f15866:c1:m3"}
{"signature": "def _get_default_data_path_():", "body": "module_path = os.path.dirname(sys.modules[__name__].__file__)<EOL>data_path = os.path.join(module_path, r'<STR_LIT>')<EOL>data_path = os.path.abspath(data_path)<EOL>return data_path<EOL>", "docstring": "Calculate the default path in which thermochemical data is stored.\n\n:returns: Default path.", "id": "f15866:m0"}
{"signature": "def Cp(self, T):", "body": "result = <NUM_LIT:0.0><EOL>for c, e in zip(self._coefficients, self._exponents):<EOL><INDENT>result += c*T**e<EOL><DEDENT>return result<EOL>", "docstring": "Calculate the heat capacity of the compound phase.\n\n:param T: [K] temperature\n\n:returns: [J/mol/K] Heat capacity.", "id": "f15866:c0:m2"}
{"signature": "def Cp(self, phase, T):", "body": "if phase not in self._phases:<EOL><INDENT>raise Exception(\"<STR_LIT>\" %<EOL>(phase, self.formula))<EOL><DEDENT>return self._phases[phase].Cp(T)<EOL>", "docstring": "Calculate the heat capacity of a phase of the compound at a specified\ntemperature.\n\n:param phase: A phase of the compound, e.g. 'S', 'L', 'G'.\n:param T: [K] temperature\n\n:returns: [J/mol/K] Heat capacity.", "id": "f15866:c2:m4"}
{"signature": "def load_data_factsage(path='<STR_LIT>'):", "body": "compounds.clear()<EOL>if path == '<STR_LIT>':<EOL><INDENT>path = default_data_path<EOL><DEDENT>if not os.path.exists(path):<EOL><INDENT>warnings.warn('<STR_LIT>' % path)<EOL>return<EOL><DEDENT>files = glob.glob(os.path.join(path, '<STR_LIT>'))<EOL>for file in files:<EOL><INDENT>compound = Compound(_read_compound_from_factsage_file_(file))<EOL>compounds[compound.formula] = compound<EOL><DEDENT>", "docstring": "Load all the thermochemical data factsage files located at a path.\n\n:param path: Path at which the data files are located.", "id": "f15866:m6"}
{"signature": "def load_data_auxi(path='<STR_LIT>'):", "body": "compounds.clear()<EOL>if path == '<STR_LIT>':<EOL><INDENT>path = default_data_path<EOL><DEDENT>if not os.path.exists(path):<EOL><INDENT>warnings.warn('<STR_LIT>' % path)<EOL>return<EOL><DEDENT>files = glob.glob(os.path.join(path, '<STR_LIT>'))<EOL>for file in files:<EOL><INDENT>compound = Compound.read(file)<EOL>compounds[compound.formula] = compound<EOL><DEDENT>", "docstring": "Load all the thermochemical data auxi files located at a path.\n\n:param path: Path at which the data files are located.", "id": "f15866:m7"}
{"signature": "def G_mag(self, T):", "body": "tau = T / self.Tc_mag<EOL>if tau <= <NUM_LIT:1.0>:<EOL><INDENT>g = <NUM_LIT:1> - (self._A_mag/tau +<EOL>self._B_mag*(tau**<NUM_LIT:3>/<NUM_LIT:6> + tau**<NUM_LIT:9>/<NUM_LIT> + tau**<NUM_LIT:15>/<NUM_LIT>)) /self._D_mag<EOL><DEDENT>else:<EOL><INDENT>g = -(tau**-<NUM_LIT:5>/<NUM_LIT:10> + tau**-<NUM_LIT:15>/<NUM_LIT> + tau**-<NUM_LIT>/<NUM_LIT>)/self._D_mag<EOL><DEDENT>return R*T*math.log(self.beta0_mag + <NUM_LIT:1>)*g<EOL>", "docstring": "Calculate the phase's magnetic contribution to Gibbs energy at the\nspecified temperature.\n\n:param T: [K] temperature\n\n:returns: [J/mol] The magnetic Gibbs energy of the compound phase.\n\nDinsdale, A. T. (1991). SGTE data for pure elements. Calphad, 15(4),\n317\u2013425. http://doi.org/10.1016/0364-5916(91)90030-N", "id": "f15866:c1:m11"}
{"signature": "def _read_compound_from_auxi_file_(file_name):", "body": "with open(file_name) as f:<EOL><INDENT>return json.load(f)<EOL><DEDENT>", "docstring": "Build a dictionary containing the auxi thermochemical data of a compound by\nreading the data from a file.\n\n:param file_name: Name of file to read the data from.\n\n:returns: Dictionary containing compound data.", "id": "f15866:m4"}
{"signature": "def S(self, T):", "body": "result = self.Sref<EOL>for Tmax in sorted([float(TT) for TT in self._Cp_records.keys()]):<EOL><INDENT>result += self._Cp_records[str(Tmax)].S(T)<EOL>if T <= Tmax:<EOL><INDENT>return result + self.S_mag(T)<EOL><DEDENT><DEDENT>Tmax = max([float(TT) for TT in self._Cp_records.keys()])<EOL>result += self.Cp(Tmax)*math.log(T / Tmax)<EOL>return result + self.S_mag(T)<EOL>", "docstring": "Calculate the entropy of the compound phase at the specified\ntemperature.\n\n:param T: [K] temperature\n\n:returns: [J/mol/K] The entropy of the compound phase.", "id": "f15866:c1:m8"}
{"signature": "def Cp_mag(self, T):", "body": "tau = T / self.Tc_mag<EOL>if tau <= <NUM_LIT:1.0>:<EOL><INDENT>c = (self._B_mag*(<NUM_LIT:2>*tau**<NUM_LIT:3> + <NUM_LIT:2>*tau**<NUM_LIT:9>/<NUM_LIT:3> + <NUM_LIT:2>*tau**<NUM_LIT:15>/<NUM_LIT:5>))/self._D_mag<EOL><DEDENT>else:<EOL><INDENT>c = (<NUM_LIT:2>*tau**-<NUM_LIT:5> + <NUM_LIT:2>*tau**-<NUM_LIT:15>/<NUM_LIT:3> + <NUM_LIT:2>*tau**-<NUM_LIT>/<NUM_LIT:5>)/self._D_mag<EOL><DEDENT>result = R*math.log(self.beta0_mag + <NUM_LIT:1>)*c<EOL>return result<EOL>", "docstring": "Calculate the phase's magnetic contribution to heat capacity at the\nspecified temperature.\n\n:param T: [K] temperature\n\n:returns: [J/mol/K] The magnetic heat capacity of the compound phase.\n\nDinsdale, A. T. (1991). SGTE data for pure elements. Calphad, 15(4),\n317\u2013425. http://doi.org/10.1016/0364-5916(91)90030-N", "id": "f15866:c1:m5"}
{"signature": "def H_mag(self, T):", "body": "tau = T / self.Tc_mag<EOL>if tau <= <NUM_LIT:1.0>:<EOL><INDENT>h = (-self._A_mag/tau +<EOL>self._B_mag*(tau**<NUM_LIT:3>/<NUM_LIT:2> + tau**<NUM_LIT:9>/<NUM_LIT:15> + tau**<NUM_LIT:15>/<NUM_LIT>))/self._D_mag<EOL><DEDENT>else:<EOL><INDENT>h = -(tau**-<NUM_LIT:5>/<NUM_LIT:2> + tau**-<NUM_LIT:15>/<NUM_LIT> + tau**-<NUM_LIT>/<NUM_LIT>)/self._D_mag<EOL><DEDENT>return R*T*math.log(self.beta0_mag + <NUM_LIT:1>)*h<EOL>", "docstring": "Calculate the phase's magnetic contribution to enthalpy at the\nspecified temperature.\n\n:param T: [K] temperature\n\n:returns: [J/mol] The magnetic enthalpy of the compound phase.\n\nDinsdale, A. T. (1991). SGTE data for pure elements. Calphad, 15(4),\n317\u2013425. http://doi.org/10.1016/0364-5916(91)90030-N", "id": "f15866:c1:m7"}
{"signature": "def list_compounds():", "body": "print('<STR_LIT>')<EOL>for compound in sorted(compounds.keys()):<EOL><INDENT>phases = compounds[compound].get_phase_list()<EOL>print('<STR_LIT>' % (compound, '<STR_LIT:U+002CU+0020>'.join(phases)))<EOL><DEDENT>", "docstring": "List all compounds that are currently loaded in the thermo module, and\ntheir phases.", "id": "f15866:m8"}
{"signature": "def G(compound_string, T, mass=<NUM_LIT:1.0>):", "body": "formula, phase = _split_compound_string_(compound_string)<EOL>TK = T + <NUM_LIT><EOL>compound = compounds[formula]<EOL>result = compound.G(phase, TK)<EOL>return _finalise_result_(compound, result, mass)<EOL>", "docstring": "Calculate the Gibbs free energy of the compound for the specified\ntemperature and mass.\n\n:param compound_string: Formula and phase of chemical compound, e.g.\n  'Fe2O3[S1]'.\n:param T: [\u00b0C] temperature\n:param mass: [kg]\n\n\n:returns: [kWh] Gibbs free energy.", "id": "f15866:m14"}
{"signature": "def Cp(compound_string, T, mass=<NUM_LIT:1.0>):", "body": "formula, phase = _split_compound_string_(compound_string)<EOL>TK = T + <NUM_LIT><EOL>compound = compounds[formula]<EOL>result = compound.Cp(phase, TK)<EOL>return _finalise_result_(compound, result, mass)<EOL>", "docstring": "Calculate the heat capacity of the compound for the specified temperature\nand mass.\n\n:param compound_string: Formula and phase of chemical compound, e.g.\n  'Fe2O3[S1]'.\n:param T: [\u00b0C] temperature\n:param mass: [kg]\n\n:returns: [kWh/K] Heat capacity.", "id": "f15866:m11"}
{"signature": "def S(self, phase, T):", "body": "try:<EOL><INDENT>return self._phases[phase].S(T)<EOL><DEDENT>except KeyError:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>.format(phase, self.formula))<EOL><DEDENT>", "docstring": "Calculate the enthalpy of a phase of the compound at a specified\ntemperature.\n\n:param phase: A phase of the compound, e.g. 'S', 'L', 'G'.\n:param T: [K] temperature\n\n:returns: [J/mol/K] Entropy.", "id": "f15866:c2:m6"}
{"signature": "def get_phase_list(self):", "body": "return sorted(self._phases.keys())<EOL>", "docstring": "Get a list of the compound's phases.\n\n:returns: List of phases.", "id": "f15866:c2:m2"}
{"signature": "def render(self, format=ReportFormat.printout):", "body": "table = self._generate_table_()<EOL>if format == ReportFormat.printout:<EOL><INDENT>print(tabulate(table, headers=\"<STR_LIT>\", tablefmt=\"<STR_LIT>\"))<EOL><DEDENT>elif format == ReportFormat.latex:<EOL><INDENT>self._render_latex_(table)<EOL><DEDENT>elif format == ReportFormat.txt:<EOL><INDENT>self._render_txt_(table)<EOL><DEDENT>elif format == ReportFormat.csv:<EOL><INDENT>self._render_csv_(table)<EOL><DEDENT>elif format == ReportFormat.string:<EOL><INDENT>return str(tabulate(table, headers=\"<STR_LIT>\", tablefmt=\"<STR_LIT>\"))<EOL><DEDENT>elif format == ReportFormat.matplotlib:<EOL><INDENT>self._render_matplotlib_()<EOL><DEDENT>elif format == ReportFormat.png:<EOL><INDENT>if self.output_path is None:<EOL><INDENT>self._render_matplotlib_()<EOL><DEDENT>else:<EOL><INDENT>self._render_matplotlib_(True)<EOL><DEDENT><DEDENT>", "docstring": "Render the report in the specified format\n\n:param format: The format. The default format is to print\n  the report to the console.\n\n:returns: If the format was set to 'string' then a string\n  representation of the report is returned.", "id": "f15870:c1:m3"}
{"signature": "def __init__(self, name, description=None,<EOL>start_datetime=datetime.min,<EOL>timestep_period_duration=TimePeriod.month,<EOL>timestep_period_count=<NUM_LIT:1>):", "body": "super(Clock, self).__init__(name, description)<EOL>self.start_datetime = start_datetime<EOL>self.timestep_period_duration = timestep_period_duration<EOL>self.timestep_period_count = timestep_period_count<EOL>self.timestep_ix = <NUM_LIT:0><EOL>", "docstring": "Initialise the object.\n\n        :param name: The name.\n        :param description: The description.\n        :param start_datetime: The start datetime.\n        :param timestep_period_duration: The duration of each time period.\n        :param timestep_period_count: The number of periods that makes up a\n          timestep.", "id": "f15874:c1:m0"}
{"signature": "def reset(self):", "body": "self.timestep_ix = <NUM_LIT:0><EOL>", "docstring": "Resets the clock's timestep index to '0'.", "id": "f15874:c1:m2"}
{"signature": "def get_datetime_at_period_ix(self, ix):", "body": "if self.timestep_period_duration == TimePeriod.millisecond:<EOL><INDENT>return self.start_datetime + timedelta(milliseconds=ix)<EOL><DEDENT>elif self.timestep_period_duration == TimePeriod.second:<EOL><INDENT>return self.start_datetime + timedelta(seconds=ix)<EOL><DEDENT>elif self.timestep_period_duration == TimePeriod.minute:<EOL><INDENT>return self.start_datetime + timedelta(minutes=ix)<EOL><DEDENT>elif self.timestep_period_duration == TimePeriod.hour:<EOL><INDENT>return self.start_datetime + timedelta(hours=ix)<EOL><DEDENT>elif self.timestep_period_duration == TimePeriod.day:<EOL><INDENT>return self.start_datetime + relativedelta(days=ix)<EOL><DEDENT>elif self.timestep_period_duration == TimePeriod.week:<EOL><INDENT>return self.start_datetime + relativedelta(days=ix*<NUM_LIT:7>)<EOL><DEDENT>elif self.timestep_period_duration == TimePeriod.month:<EOL><INDENT>return self.start_datetime + relativedelta(months=ix)<EOL><DEDENT>elif self.timestep_period_duration == TimePeriod.year:<EOL><INDENT>return self.start_datetime + relativedelta(years=ix)<EOL><DEDENT>", "docstring": "Get the datetime at a given period.\n\n:param period: The index of the period.\n\n:returns: The datetime.", "id": "f15874:c1:m3"}
{"signature": "def get_path_relative_to_module(module_file_path, relative_target_path):", "body": "module_path = os.path.dirname(module_file_path)<EOL>path = os.path.join(module_path, relative_target_path)<EOL>path = os.path.abspath(path)<EOL>return path<EOL>", "docstring": "Calculate a path relative to the specified module file.\n\n:param module_file_path: The file path to the module.", "id": "f15876:m0"}
{"signature": "def model_to_dict(model, sort=False):", "body": "obj = OrderedDict()<EOL>obj[\"<STR_LIT>\"] = list(map(metabolite_to_dict, model.metabolites))<EOL>obj[\"<STR_LIT>\"] = list(map(reaction_to_dict, model.reactions))<EOL>obj[\"<STR_LIT>\"] = list(map(gene_to_dict, model.genes))<EOL>obj[\"<STR_LIT:id>\"] = model.id<EOL>_update_optional(model, obj, _OPTIONAL_MODEL_ATTRIBUTES,<EOL>_ORDERED_OPTIONAL_MODEL_KEYS)<EOL>if sort:<EOL><INDENT>get_id = itemgetter(\"<STR_LIT:id>\")<EOL>obj[\"<STR_LIT>\"].sort(key=get_id)<EOL>obj[\"<STR_LIT>\"].sort(key=get_id)<EOL>obj[\"<STR_LIT>\"].sort(key=get_id)<EOL><DEDENT>return obj<EOL>", "docstring": "Convert model to a dict.\n\n    Parameters\n    ----------\n    model : cobra.Model\n        The model to reformulate as a dict.\n    sort : bool, optional\n        Whether to sort the metabolites, reactions, and genes or maintain the\n        order defined in the model.\n\n    Returns\n    -------\n    OrderedDict\n        A dictionary with elements, 'genes', 'compartments', 'id',\n        'metabolites', 'notes' and 'reactions'; where 'metabolites', 'genes'\n        and 'metabolites' are in turn lists with dictionaries holding all\n        attributes to form the corresponding object.\n\n    See Also\n    --------\n    cobra.io.model_from_dict", "id": "f15887:m8"}
{"signature": "def _create_parameter(model, pid, value, sbo=None, constant=True, units=None,<EOL>flux_udef=None):", "body": "parameter = model.createParameter()  <EOL>parameter.setId(pid)<EOL>parameter.setValue(value)<EOL>parameter.setConstant(constant)<EOL>if sbo:<EOL><INDENT>parameter.setSBOTerm(sbo)<EOL><DEDENT>if units:<EOL><INDENT>parameter.setUnits(flux_udef.getId())<EOL><DEDENT>", "docstring": "Create parameter in SBML model.", "id": "f15889:m13"}
{"signature": "def _parse_annotations(sbase):", "body": "annotation = {}<EOL>if sbase.isSetSBOTerm():<EOL><INDENT>annotation[\"<STR_LIT>\"] = sbase.getSBOTermID()<EOL><DEDENT>cvterms = sbase.getCVTerms()<EOL>if cvterms is None:<EOL><INDENT>return annotation<EOL><DEDENT>for cvterm in cvterms:  <EOL><INDENT>for k in range(cvterm.getNumResources()):<EOL><INDENT>uri = cvterm.getResourceURI(k)<EOL>match = URL_IDENTIFIERS_PATTERN.match(uri)<EOL>if not match:<EOL><INDENT>LOGGER.warning(\"<STR_LIT>\"<EOL>\"<STR_LIT>\", uri)<EOL>continue<EOL><DEDENT>provider, identifier = match.group(<NUM_LIT:1>), match.group(<NUM_LIT:2>)<EOL>if provider in annotation:<EOL><INDENT>if isinstance(annotation[provider], string_types):<EOL><INDENT>annotation[provider] = [annotation[provider]]<EOL><DEDENT>if identifier not in annotation[provider]:<EOL><INDENT>annotation[provider].append(identifier)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>annotation[provider] = identifier<EOL><DEDENT><DEDENT><DEDENT>return annotation<EOL>", "docstring": "Parses cobra annotations from a given SBase object.\n\n    Annotations are dictionaries with the providers as keys.\n\n    Parameters\n    ----------\n    sbase : libsbml.SBase\n        SBase from which the SBML annotations are read\n\n    Returns\n    -------\n    dict (annotation dictionary)\n\n    FIXME: annotation format must be updated (this is a big collection of\n          fixes) - see: https://github.com/opencobra/cobrapy/issues/684)", "id": "f15889:m18"}
{"signature": "def _error_string(error, k=None):", "body": "package = error.getPackage()<EOL>if package == '<STR_LIT>':<EOL><INDENT>package = '<STR_LIT>'<EOL><DEDENT>template = '<STR_LIT>'<EOL>error_str = template.format(k, error.getSeverityAsString(),<EOL>error.getCategoryAsString(), package,<EOL>error.getLine(), error.getShortMessage(),<EOL>error.getMessage())<EOL>return error_str<EOL>", "docstring": "String representation of SBMLError.\n\n    Parameters\n    ----------\n    error : libsbml.SBMLError\n    k : index of error\n\n    Returns\n    -------\n    string representation of error", "id": "f15889:m21"}
{"signature": "def _check_required(sbase, value, attribute):", "body": "if (value is None) or (value == \"<STR_LIT>\"):<EOL><INDENT>msg = \"<STR_LIT>\" %(attribute, sbase)<EOL>if hasattr(sbase, \"<STR_LIT>\") and sbase.getId():<EOL><INDENT>msg += \"<STR_LIT>\" % sbase.getId()<EOL><DEDENT>elif hasattr(sbase, \"<STR_LIT>\") and sbase.getName():<EOL><INDENT>msg += \"<STR_LIT>\" % sbase.getName()<EOL><DEDENT>elif hasattr(sbase, \"<STR_LIT>\") and sbase.getMetaId():<EOL><INDENT>msg += \"<STR_LIT>\" % sbase.getName()<EOL><DEDENT>raise CobraSBMLError(msg)<EOL><DEDENT>return value<EOL>", "docstring": "Get required attribute from SBase.\n\n    Parameters\n    ----------\n    sbase : libsbml.SBase\n    value : existing value\n    attribute: name of attribute\n\n    Returns\n    -------\n    attribute value (or value if already set)", "id": "f15889:m14"}
{"signature": "def _sbase_notes_dict(sbase, notes):", "body": "if notes and len(notes) > <NUM_LIT:0>:<EOL><INDENT>tokens = ['<STR_LIT>'] +[\"<STR_LIT>\".format(k, v) for (k, v) in notes.items()] +[\"<STR_LIT>\"]<EOL>_check(<EOL>sbase.setNotes(\"<STR_LIT:\\n>\".join(tokens)),<EOL>\"<STR_LIT>\".format(sbase)<EOL>)<EOL><DEDENT>", "docstring": "Set SBase notes based on dictionary.\n\n    Parameters\n    ----------\n    sbase : libsbml.SBase\n        SBML object to set notes on\n    notes : notes object\n        notes information from cobra object", "id": "f15889:m17"}
{"signature": "def write_sbml_model(cobra_model, filename, f_replace=F_REPLACE, **kwargs):", "body": "doc = _model_to_sbml(cobra_model, f_replace=f_replace, **kwargs)<EOL>if isinstance(filename, string_types):<EOL><INDENT>libsbml.writeSBMLToFile(doc, filename)<EOL><DEDENT>elif hasattr(filename, \"<STR_LIT>\"):<EOL><INDENT>sbml_str = libsbml.writeSBMLToString(doc)<EOL>filename.write(sbml_str)<EOL><DEDENT>", "docstring": "Writes cobra model to filename.\n\n    The created model is SBML level 3 version 1 (L1V3) with\n    fbc package v2 (fbc-v2).\n\n    If the given filename ends with the suffix \".gz\" (for example,\n    \"myfile.xml.gz\"), libSBML assumes the caller wants the file to be\n    written compressed in gzip format. Similarly, if the given filename\n    ends with \".zip\" or \".bz2\", libSBML assumes the caller wants the\n    file to be compressed in zip or bzip2 format (respectively). Files\n    whose names lack these suffixes will be written uncompressed. Special\n    considerations for the zip format: If the given filename ends with\n    \".zip\", the file placed in the zip archive will have the suffix\n    \".xml\" or \".sbml\".  For example, the file in the zip archive will\n    be named \"test.xml\" if the given filename is \"test.xml.zip\" or\n    \"test.zip\". Similarly, the filename in the archive will be\n    \"test.sbml\" if the given filename is \"test.sbml.zip\".\n\n    Parameters\n    ----------\n    cobra_model : cobra.core.Model\n        Model instance which is written to SBML\n    filename : string\n        path to which the model is written\n    use_fbc_package : boolean {True, False}\n        should the fbc package be used\n    f_replace: dict of replacement functions for id replacement", "id": "f15889:m10"}
{"signature": "def _f_specie(sid, prefix=\"<STR_LIT>\"):", "body": "return _clip(sid, prefix)<EOL>", "docstring": "Clips specie/metabolite prefix from id.", "id": "f15889:m3"}
{"signature": "def _f_specie_rev(sid, prefix=\"<STR_LIT>\"):", "body": "return prefix + sid<EOL>", "docstring": "Adds specie/metabolite prefix to id.", "id": "f15889:m4"}
{"signature": "def _create_bound(model, reaction, bound_type, f_replace, units=None,<EOL>flux_udef=None):", "body": "value = getattr(reaction, bound_type)<EOL>if value == config.lower_bound:<EOL><INDENT>return LOWER_BOUND_ID<EOL><DEDENT>elif value == <NUM_LIT:0>:<EOL><INDENT>return ZERO_BOUND_ID<EOL><DEDENT>elif value == config.upper_bound:<EOL><INDENT>return UPPER_BOUND_ID<EOL><DEDENT>elif value == -float(\"<STR_LIT>\"):<EOL><INDENT>return BOUND_MINUS_INF<EOL><DEDENT>elif value == float(\"<STR_LIT>\"):<EOL><INDENT>return BOUND_PLUS_INF<EOL><DEDENT>else:<EOL><INDENT>rid = reaction.id<EOL>if f_replace and F_REACTION_REV in f_replace:<EOL><INDENT>rid = f_replace[F_REACTION_REV](rid)<EOL><DEDENT>pid = rid + \"<STR_LIT:_>\" + bound_type<EOL>_create_parameter(model, pid=pid, value=value, sbo=SBO_FLUX_BOUND,<EOL>units=units, flux_udef=flux_udef)<EOL>return pid<EOL><DEDENT>", "docstring": "Creates bound in model for given reaction.\n\n    Adds the parameters for the bounds to the SBML model.\n\n    Parameters\n    ----------\n    model : libsbml.Model\n        SBML model instance\n    reaction : cobra.core.Reaction\n        Cobra reaction instance from which the bounds are read.\n    bound_type : {LOWER_BOUND, UPPER_BOUND}\n        Type of bound\n    f_replace : dict of id replacement functions\n    units : flux units\n\n    Returns\n    -------\n    Id of bound parameter.", "id": "f15889:m12"}
{"signature": "def _model_to_sbml(cobra_model, f_replace=None, units=True):", "body": "if f_replace is None:<EOL><INDENT>f_replace = {}<EOL><DEDENT>sbml_ns = libsbml.SBMLNamespaces(<NUM_LIT:3>, <NUM_LIT:1>)  <EOL>sbml_ns.addPackageNamespace(\"<STR_LIT>\", <NUM_LIT:2>)  <EOL>doc = libsbml.SBMLDocument(sbml_ns)  <EOL>doc.setPackageRequired(\"<STR_LIT>\", False)<EOL>doc.setSBOTerm(SBO_FBA_FRAMEWORK)<EOL>model = doc.createModel()  <EOL>model_fbc = model.getPlugin(\"<STR_LIT>\")  <EOL>model_fbc.setStrict(True)<EOL>if cobra_model.id is not None:<EOL><INDENT>model.setId(cobra_model.id)<EOL>model.setMetaId(\"<STR_LIT>\" + cobra_model.id)<EOL><DEDENT>else:<EOL><INDENT>model.setMetaId(\"<STR_LIT>\")<EOL><DEDENT>if cobra_model.name is not None:<EOL><INDENT>model.setName(cobra_model.name)<EOL><DEDENT>_sbase_annotations(model, cobra_model.annotation)<EOL>if hasattr(cobra_model, \"<STR_LIT>\"):<EOL><INDENT>meta = cobra_model._sbml<EOL>if \"<STR_LIT>\" in meta:<EOL><INDENT>_sbase_annotations(doc, meta[\"<STR_LIT>\"])<EOL><DEDENT>if \"<STR_LIT>\" in meta:<EOL><INDENT>_sbase_notes_dict(doc, meta[\"<STR_LIT>\"])<EOL><DEDENT>history = libsbml.ModelHistory()  <EOL>if \"<STR_LIT>\" in meta and meta[\"<STR_LIT>\"]:<EOL><INDENT>history.setCreatedDate(meta[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>time = datetime.datetime.now()<EOL>timestr = time.strftime('<STR_LIT>')<EOL>date = libsbml.Date(timestr)<EOL>_check(history.setCreatedDate(date), '<STR_LIT>')<EOL>_check(history.setModifiedDate(date), '<STR_LIT>')<EOL><DEDENT>if \"<STR_LIT>\" in meta:<EOL><INDENT>for cobra_creator in meta[\"<STR_LIT>\"]:<EOL><INDENT>creator = libsbml.ModelCreator()  <EOL>if cobra_creator.get(\"<STR_LIT>\", None):<EOL><INDENT>creator.setFamilyName(cobra_creator[\"<STR_LIT>\"])<EOL><DEDENT>if cobra_creator.get(\"<STR_LIT>\", None):<EOL><INDENT>creator.setGivenName(cobra_creator[\"<STR_LIT>\"])<EOL><DEDENT>if cobra_creator.get(\"<STR_LIT>\", None):<EOL><INDENT>creator.setOrganisation(cobra_creator[\"<STR_LIT>\"])<EOL><DEDENT>if cobra_creator.get(\"<STR_LIT:email>\", None):<EOL><INDENT>creator.setEmail(cobra_creator[\"<STR_LIT:email>\"])<EOL><DEDENT>_check(history.addCreator(creator),<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>_check(model.setModelHistory(history), '<STR_LIT>')<EOL><DEDENT>if units:<EOL><INDENT>flux_udef = model.createUnitDefinition()  <EOL>flux_udef.setId(UNITS_FLUX[<NUM_LIT:0>])<EOL>for u in UNITS_FLUX[<NUM_LIT:1>]:<EOL><INDENT>unit = flux_udef.createUnit()  <EOL>unit.setKind(u.kind)<EOL>unit.setExponent(u.exponent)<EOL>unit.setScale(u.scale)<EOL>unit.setMultiplier(u.multiplier)<EOL><DEDENT><DEDENT>if len(cobra_model.reactions) > <NUM_LIT:0>:<EOL><INDENT>min_value = min(cobra_model.reactions.list_attr(\"<STR_LIT>\"))<EOL>max_value = max(cobra_model.reactions.list_attr(\"<STR_LIT>\"))<EOL><DEDENT>else:<EOL><INDENT>min_value = config.lower_bound<EOL>max_value = config.upper_bound<EOL><DEDENT>_create_parameter(model, pid=LOWER_BOUND_ID,<EOL>value=min_value, sbo=SBO_DEFAULT_FLUX_BOUND)<EOL>_create_parameter(model, pid=UPPER_BOUND_ID,<EOL>value=max_value, sbo=SBO_DEFAULT_FLUX_BOUND)<EOL>_create_parameter(model, pid=ZERO_BOUND_ID,<EOL>value=<NUM_LIT:0>, sbo=SBO_DEFAULT_FLUX_BOUND)<EOL>_create_parameter(model, pid=BOUND_MINUS_INF,<EOL>value=-float(\"<STR_LIT>\"), sbo=SBO_FLUX_BOUND)<EOL>_create_parameter(model, pid=BOUND_PLUS_INF,<EOL>value=float(\"<STR_LIT>\"), sbo=SBO_FLUX_BOUND)<EOL>for cid, name in iteritems(cobra_model.compartments):<EOL><INDENT>compartment = model.createCompartment()  <EOL>compartment.setId(cid)<EOL>compartment.setName(name)<EOL>compartment.setConstant(True)<EOL><DEDENT>for metabolite in cobra_model.metabolites:<EOL><INDENT>specie = model.createSpecies()  <EOL>mid = metabolite.id<EOL>if f_replace and F_SPECIE_REV in f_replace:<EOL><INDENT>mid = f_replace[F_SPECIE_REV](mid)<EOL><DEDENT>specie.setId(mid)<EOL>specie.setConstant(False)<EOL>specie.setBoundaryCondition(False)<EOL>specie.setHasOnlySubstanceUnits(False)<EOL>specie.setName(metabolite.name)<EOL>specie.setCompartment(metabolite.compartment)<EOL>s_fbc = specie.getPlugin(\"<STR_LIT>\")  <EOL>if metabolite.charge is not None:<EOL><INDENT>s_fbc.setCharge(metabolite.charge)<EOL><DEDENT>if metabolite.formula is not None:<EOL><INDENT>s_fbc.setChemicalFormula(metabolite.formula)<EOL><DEDENT>_sbase_annotations(specie, metabolite.annotation)<EOL>_sbase_notes_dict(specie, metabolite.notes)<EOL><DEDENT>for cobra_gene in cobra_model.genes:<EOL><INDENT>gp = model_fbc.createGeneProduct()  <EOL>gid = cobra_gene.id<EOL>if f_replace and F_GENE_REV in f_replace:<EOL><INDENT>gid = f_replace[F_GENE_REV](gid)<EOL><DEDENT>gp.setId(gid)<EOL>gname = cobra_gene.name<EOL>if gname is None or len(gname) == <NUM_LIT:0>:<EOL><INDENT>gname = gid<EOL><DEDENT>gp.setName(gname)<EOL>gp.setLabel(gid)<EOL>_sbase_annotations(gp, cobra_gene.annotation)<EOL>_sbase_notes_dict(gp, cobra_gene.notes)<EOL><DEDENT>objective = model_fbc.createObjective()  <EOL>objective.setId(\"<STR_LIT>\")<EOL>objective.setType(SHORT_LONG_DIRECTION[cobra_model.objective.direction])<EOL>model_fbc.setActiveObjectiveId(\"<STR_LIT>\")<EOL>reaction_coefficients = linear_reaction_coefficients(cobra_model)<EOL>for cobra_reaction in cobra_model.reactions:<EOL><INDENT>rid = cobra_reaction.id<EOL>if f_replace and F_REACTION_REV in f_replace:<EOL><INDENT>rid = f_replace[F_REACTION_REV](rid)<EOL><DEDENT>reaction = model.createReaction()  <EOL>reaction.setId(rid)<EOL>reaction.setName(cobra_reaction.name)<EOL>reaction.setFast(False)<EOL>reaction.setReversible((cobra_reaction.lower_bound < <NUM_LIT:0>))<EOL>_sbase_annotations(reaction, cobra_reaction.annotation)<EOL>_sbase_notes_dict(reaction, cobra_reaction.notes)<EOL>for metabolite, stoichiometry in iteritems(cobra_reaction._metabolites):  <EOL><INDENT>sid = metabolite.id<EOL>if f_replace and F_SPECIE_REV in f_replace:<EOL><INDENT>sid = f_replace[F_SPECIE_REV](sid)<EOL><DEDENT>if stoichiometry < <NUM_LIT:0>:<EOL><INDENT>sref = reaction.createReactant()  <EOL>sref.setSpecies(sid)<EOL>sref.setStoichiometry(-stoichiometry)<EOL>sref.setConstant(True)<EOL><DEDENT>else:<EOL><INDENT>sref = reaction.createProduct()  <EOL>sref.setSpecies(sid)<EOL>sref.setStoichiometry(stoichiometry)<EOL>sref.setConstant(True)<EOL><DEDENT><DEDENT>r_fbc = reaction.getPlugin(\"<STR_LIT>\")  <EOL>r_fbc.setLowerFluxBound(_create_bound(model, cobra_reaction,<EOL>\"<STR_LIT>\",<EOL>f_replace=f_replace, units=units,<EOL>flux_udef=flux_udef))<EOL>r_fbc.setUpperFluxBound(_create_bound(model, cobra_reaction,<EOL>\"<STR_LIT>\",<EOL>f_replace=f_replace, units=units,<EOL>flux_udef=flux_udef))<EOL>gpr = cobra_reaction.gene_reaction_rule<EOL>if gpr is not None and len(gpr) > <NUM_LIT:0>:<EOL><INDENT>if f_replace and F_GENE_REV in f_replace:<EOL><INDENT>gpr = gpr.replace('<STR_LIT:(>', '<STR_LIT>')<EOL>gpr = gpr.replace('<STR_LIT:)>', '<STR_LIT>')<EOL>tokens = gpr.split('<STR_LIT:U+0020>')<EOL>for k in range(len(tokens)):<EOL><INDENT>if tokens[k] not in ['<STR_LIT:U+0020>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:(>', '<STR_LIT:)>']:<EOL><INDENT>tokens[k] = f_replace[F_GENE_REV](tokens[k])<EOL><DEDENT><DEDENT>gpr_new = \"<STR_LIT:U+0020>\".join(tokens)<EOL><DEDENT>gpa = r_fbc.createGeneProductAssociation()  <EOL>gpa.setAssociation(gpr_new)<EOL><DEDENT>if reaction_coefficients.get(cobra_reaction, <NUM_LIT:0>) != <NUM_LIT:0>:<EOL><INDENT>flux_obj = objective.createFluxObjective()  <EOL>flux_obj.setReaction(rid)<EOL>flux_obj.setCoefficient(cobra_reaction.objective_coefficient)<EOL><DEDENT><DEDENT>if len(cobra_model.groups) > <NUM_LIT:0>:<EOL><INDENT>doc.enablePackage(<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\", True)<EOL>doc.setPackageRequired(\"<STR_LIT>\", False)<EOL>model_group = model.getPlugin(\"<STR_LIT>\")  <EOL>for cobra_group in cobra_model.groups:<EOL><INDENT>group = model_group.createGroup()  <EOL>group.setId(cobra_group.id)<EOL>group.setName(cobra_group.name)<EOL>group.setKind(cobra_group.kind)<EOL>_sbase_notes_dict(group, cobra_group.notes)<EOL>_sbase_annotations(group, cobra_group.annotation)<EOL>for cobra_member in cobra_group.members:<EOL><INDENT>member = group.createMember()  <EOL>mid = cobra_member.id<EOL>m_type = str(type(cobra_member))<EOL>if \"<STR_LIT>\" in m_type:<EOL><INDENT>if f_replace and F_REACTION_REV in f_replace:<EOL><INDENT>mid = f_replace[F_REACTION_REV](mid)<EOL><DEDENT><DEDENT>if \"<STR_LIT>\" in m_type:<EOL><INDENT>if f_replace and F_SPECIE_REV in f_replace:<EOL><INDENT>mid = f_replace[F_SPECIE_REV](mid)<EOL><DEDENT><DEDENT>if \"<STR_LIT>\" in m_type:<EOL><INDENT>if f_replace and F_GENE_REV in f_replace:<EOL><INDENT>mid = f_replace[F_GENE_REV](mid)<EOL><DEDENT><DEDENT>member.setIdRef(mid)<EOL>if cobra_member.name and len(cobra_member.name) > <NUM_LIT:0>:<EOL><INDENT>member.setName(cobra_member.name)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return doc<EOL>", "docstring": "Convert Cobra model to SBMLDocument.\n\n    Parameters\n    ----------\n    cobra_model : cobra.core.Model\n        Cobra model instance\n    f_replace : dict of replacement functions\n        Replacement to apply on identifiers.\n    units : boolean\n        Should the FLUX_UNITS be written in the SBMLDocument.\n\n    Returns\n    -------\n    libsbml.SBMLDocument", "id": "f15889:m11"}
{"signature": "def _check(result):", "body": "if result[\"<STR_LIT:success>\"] is not True:<EOL><INDENT>raise RuntimeError(result[\"<STR_LIT:content>\"][\"<STR_LIT>\"])<EOL><DEDENT>", "docstring": "ensure success of a pymatbridge operation", "id": "f15890:m7"}
{"signature": "def load_matlab_model(infile_path, variable_name=None, inf=inf):", "body": "if not scipy_io:<EOL><INDENT>raise ImportError('<STR_LIT>')<EOL><DEDENT>data = scipy_io.loadmat(infile_path)<EOL>possible_names = []<EOL>if variable_name is None:<EOL><INDENT>meta_vars = {\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"}<EOL>possible_names = sorted(i for i in data if i not in meta_vars)<EOL>if len(possible_names) == <NUM_LIT:1>:<EOL><INDENT>variable_name = possible_names[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>if variable_name is not None:<EOL><INDENT>return from_mat_struct(data[variable_name], model_id=variable_name,<EOL>inf=inf)<EOL><DEDENT>for possible_name in possible_names:<EOL><INDENT>try:<EOL><INDENT>return from_mat_struct(data[possible_name], model_id=possible_name,<EOL>inf=inf)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>raise IOError(\"<STR_LIT>\")<EOL>", "docstring": "Load a cobra model stored as a .mat file\n\n    Parameters\n    ----------\n    infile_path: str\n        path to the file to to read\n    variable_name: str, optional\n        The variable name of the model in the .mat file. If this is not\n        specified, then the first MATLAB variable which looks like a COBRA\n        model will be used\n    inf: value\n        The value to use for infinite bounds. Some solvers do not handle\n        infinite values so for using those, set this to a high numeric value.\n\n    Returns\n    -------\n    cobra.core.Model.Model:\n        The resulting cobra model", "id": "f15890:m2"}
{"signature": "def create_mat_dict(model):", "body": "rxns = model.reactions<EOL>mets = model.metabolites<EOL>mat = OrderedDict()<EOL>mat[\"<STR_LIT>\"] = _cell([met_id for met_id in create_mat_metabolite_id(model)])<EOL>mat[\"<STR_LIT>\"] = _cell(mets.list_attr(\"<STR_LIT:name>\"))<EOL>mat[\"<STR_LIT>\"] = _cell([str(m.formula) for m in mets])<EOL>try:<EOL><INDENT>mat[\"<STR_LIT>\"] = array(mets.list_attr(\"<STR_LIT>\")) * <NUM_LIT:1.><EOL><DEDENT>except TypeError:<EOL><INDENT>pass<EOL><DEDENT>mat[\"<STR_LIT>\"] = _cell(model.genes.list_attr(\"<STR_LIT:id>\"))<EOL>rxn_gene = scipy_sparse.dok_matrix((len(model.reactions),<EOL>len(model.genes)))<EOL>if min(rxn_gene.shape) > <NUM_LIT:0>:<EOL><INDENT>for i, reaction in enumerate(model.reactions):<EOL><INDENT>for gene in reaction.genes:<EOL><INDENT>rxn_gene[i, model.genes.index(gene)] = <NUM_LIT:1><EOL><DEDENT><DEDENT>mat[\"<STR_LIT>\"] = rxn_gene<EOL><DEDENT>mat[\"<STR_LIT>\"] = _cell(rxns.list_attr(\"<STR_LIT>\"))<EOL>mat[\"<STR_LIT>\"] = _cell(rxns.list_attr(\"<STR_LIT:id>\"))<EOL>mat[\"<STR_LIT>\"] = _cell(rxns.list_attr(\"<STR_LIT:name>\"))<EOL>mat[\"<STR_LIT>\"] = _cell(rxns.list_attr(\"<STR_LIT>\"))<EOL>stoich_mat = create_stoichiometric_matrix(model)<EOL>mat[\"<STR_LIT:S>\"] = stoich_mat if stoich_mat is not None else [[]]<EOL>mat[\"<STR_LIT>\"] = array(rxns.list_attr(\"<STR_LIT>\")) * <NUM_LIT:1.><EOL>mat[\"<STR_LIT>\"] = array(rxns.list_attr(\"<STR_LIT>\")) * <NUM_LIT:1.><EOL>mat[\"<STR_LIT:b>\"] = array(mets.list_attr(\"<STR_LIT>\")) * <NUM_LIT:1.><EOL>mat[\"<STR_LIT:c>\"] = array(rxns.list_attr(\"<STR_LIT>\")) * <NUM_LIT:1.><EOL>mat[\"<STR_LIT>\"] = array(rxns.list_attr(\"<STR_LIT>\")) * <NUM_LIT:1><EOL>mat[\"<STR_LIT:description>\"] = str(model.id)<EOL>return mat<EOL>", "docstring": "create a dict mapping model attributes to arrays", "id": "f15890:m5"}
{"signature": "def from_yaml(document):", "body": "content = StringIO(document)<EOL>return model_from_dict(yaml.load(content))<EOL>", "docstring": "Load a cobra model from a YAML document.\n\nParameters\n----------\ndocument : str\n    The YAML document representation of a cobra model.\n\nReturns\n-------\ncobra.Model\n    The cobra model as represented in the YAML document.\n\nSee Also\n--------\nload_yaml_model : Load directly from a file.", "id": "f15891:m1"}
{"signature": "def to_yaml(model, sort=False, **kwargs):", "body": "obj = model_to_dict(model, sort=sort)<EOL>obj[\"<STR_LIT:version>\"] = YAML_SPEC<EOL>return yaml.dump(obj, **kwargs)<EOL>", "docstring": "Return the model as a YAML document.\n\n``kwargs`` are passed on to ``yaml.dump``.\n\nParameters\n----------\nmodel : cobra.Model\n    The cobra model to represent.\nsort : bool, optional\n    Whether to sort the metabolites, reactions, and genes or maintain the\n    order defined in the model.\n\nReturns\n-------\nstr\n    String representation of the cobra model as a YAML document.\n\nSee Also\n--------\nsave_yaml_model : Write directly to a file.\nruamel.yaml.dump : Base function.", "id": "f15891:m0"}
{"signature": "def save_json_model(model, filename, sort=False, pretty=False, **kwargs):", "body": "obj = model_to_dict(model, sort=sort)<EOL>obj[u\"<STR_LIT:version>\"] = JSON_SPEC<EOL>if pretty:<EOL><INDENT>dump_opts = {<EOL>\"<STR_LIT>\": <NUM_LIT:4>, \"<STR_LIT>\": (\"<STR_LIT:U+002C>\", \"<STR_LIT>\"), \"<STR_LIT>\": True,<EOL>\"<STR_LIT>\": False}<EOL><DEDENT>else:<EOL><INDENT>dump_opts = {<EOL>\"<STR_LIT>\": <NUM_LIT:0>, \"<STR_LIT>\": (\"<STR_LIT:U+002C>\", \"<STR_LIT::>\"), \"<STR_LIT>\": False,<EOL>\"<STR_LIT>\": False}<EOL><DEDENT>dump_opts.update(**kwargs)<EOL>if isinstance(filename, string_types):<EOL><INDENT>with open(filename, \"<STR_LIT:w>\") as file_handle:<EOL><INDENT>json.dump(obj, file_handle, **dump_opts)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>json.dump(obj, filename, **dump_opts)<EOL><DEDENT>", "docstring": "Write the cobra model to a file in JSON format.\n\n``kwargs`` are passed on to ``json.dump``.\n\nParameters\n----------\nmodel : cobra.Model\n    The cobra model to represent.\nfilename : str or file-like\n    File path or descriptor that the JSON representation should be\n    written to.\nsort : bool, optional\n    Whether to sort the metabolites, reactions, and genes or maintain the\n    order defined in the model.\npretty : bool, optional\n    Whether to format the JSON more compactly (default) or in a more\n    verbose but easier to read fashion. Can be partially overwritten by the\n    ``kwargs``.\n\nSee Also\n--------\nto_json : Return a string representation.\njson.dump : Base function.", "id": "f15892:m2"}
{"signature": "def from_json(document):", "body": "return model_from_dict(json.loads(document))<EOL>", "docstring": "Load a cobra model from a JSON document.\n\nParameters\n----------\ndocument : str\n    The JSON document representation of a cobra model.\n\nReturns\n-------\ncobra.Model\n    The cobra model as represented in the JSON document.\n\nSee Also\n--------\nload_json_model : Load directly from a file.", "id": "f15892:m1"}
{"signature": "def add_members(self, new_members):", "body": "if isinstance(new_members, string_types) orhasattr(new_members, \"<STR_LIT:id>\"):<EOL><INDENT>warn(\"<STR_LIT>\")<EOL>new_members = [new_members]<EOL><DEDENT>self._members.update(new_members)<EOL>", "docstring": "Add objects to the group.\n\nParameters\n----------\nnew_members : list\n    A list of cobrapy objects to add to the group.", "id": "f15893:c0:m5"}
{"signature": "def remove_members(self, to_remove):", "body": "if isinstance(to_remove, string_types) orhasattr(to_remove, \"<STR_LIT:id>\"):<EOL><INDENT>warn(\"<STR_LIT>\")<EOL>to_remove = [to_remove]<EOL><DEDENT>self._members.difference_update(to_remove)<EOL>", "docstring": "Remove objects from the group.\n\nParameters\n----------\nto_remove : list\n    A list of cobra objects to remove from the group", "id": "f15893:c0:m6"}
{"signature": "def __add__(self, other):", "body": "total = DictList()<EOL>total.extend(self)<EOL>total.extend(other)<EOL>return total<EOL>", "docstring": "x.__add__(y) <==> x + y\n\n        Parameters\n        ----------\n        other : iterable\n            other must contain only unique id's which do not intersect\n            with self", "id": "f15894:c0:m15"}
{"signature": "def __init__(self, *args):", "body": "if len(args) > <NUM_LIT:2>:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % len(args))<EOL><DEDENT>super(DictList, self).__init__(self)<EOL>self._dict = {}<EOL>if len(args) == <NUM_LIT:1>:<EOL><INDENT>other = args[<NUM_LIT:0>]<EOL>if isinstance(other, DictList):<EOL><INDENT>list.extend(self, other)<EOL>self._dict = other._dict.copy()<EOL><DEDENT>else:<EOL><INDENT>self.extend(other)<EOL><DEDENT><DEDENT>", "docstring": "Instantiate a combined dict and list.\n\n        Parameters\n        ----------\n        args : iterable\n            iterable as single argument to create new DictList from", "id": "f15894:c0:m0"}
{"signature": "def _check(self, id):", "body": "if id in self._dict:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % str(id))<EOL><DEDENT>", "docstring": "make sure duplicate id's are not added.\n        This function is called before adding in elements.", "id": "f15894:c0:m2"}
{"signature": "def extend(self, iterable):", "body": "<EOL>if not hasattr(self, \"<STR_LIT>\") or self._dict is None:<EOL><INDENT>self._dict = {}<EOL><DEDENT>_dict = self._dict<EOL>current_length = len(self)<EOL>list.extend(self, iterable)<EOL>for i, obj in enumerate(islice(self, current_length, None),<EOL>current_length):<EOL><INDENT>the_id = obj.id<EOL>if the_id not in _dict:<EOL><INDENT>_dict[the_id] = i<EOL><DEDENT>else:<EOL><INDENT>self = self[:current_length]<EOL>self._check(the_id)<EOL>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (str(the_id), i))<EOL><DEDENT><DEDENT>", "docstring": "extend list by appending elements from the iterable", "id": "f15894:c0:m11"}
{"signature": "def __sub__(self, other):", "body": "total = DictList()<EOL>total.extend(self)<EOL>for item in other:<EOL><INDENT>total.remove(item)<EOL><DEDENT>return total<EOL>", "docstring": "x.__sub__(y) <==> x - y\n\n        Parameters\n        ----------\n        other : iterable\n            other must contain only unique id's present in the list", "id": "f15894:c0:m13"}
{"signature": "def append(self, object):", "body": "the_id = object.id<EOL>self._check(the_id)<EOL>self._dict[the_id] = len(self)<EOL>list.append(self, object)<EOL>", "docstring": "append object to end", "id": "f15894:c0:m9"}
{"signature": "def __setstate__(self, state):", "body": "self._generate_index()<EOL>", "docstring": "sets internal state\n\n        Ignore the passed in state and recalculate it. This is only for\n        compatibility with older pickles which did not correctly specify\n        the initialization class", "id": "f15894:c0:m19"}
{"signature": "def __isub__(self, other):", "body": "for item in other:<EOL><INDENT>self.remove(item)<EOL><DEDENT>return self<EOL>", "docstring": "x.__sub__(y) <==> x -= y\n\n        Parameters\n        ----------\n        other : iterable\n            other must contain only unique id's present in the list", "id": "f15894:c0:m14"}
{"signature": "def __getstate__(self):", "body": "return {\"<STR_LIT>\": self._dict}<EOL>", "docstring": "gets internal state\n\n        This is only provided for backwards compatibility so older\n        versions of cobrapy can load pickles generated with cobrapy. In\n        reality, the \"_dict\" state is ignored when loading a pickle", "id": "f15894:c0:m18"}
{"signature": "def insert(self, index, object):", "body": "self._check(object.id)<EOL>list.insert(self, index, object)<EOL>_dict = self._dict<EOL>for i, j in iteritems(_dict):<EOL><INDENT>if j >= index:<EOL><INDENT>_dict[i] = j + <NUM_LIT:1><EOL><DEDENT><DEDENT>_dict[object.id] = index<EOL>", "docstring": "insert object before index", "id": "f15894:c0:m23"}
{"signature": "def __iadd__(self, other):", "body": "self.extend(other)<EOL>return self<EOL>", "docstring": "x.__iadd__(y) <==> x += y\n\n        Parameters\n        ----------\n        other : iterable\n            other must contain only unique id's whcih do not intersect\n            with self", "id": "f15894:c0:m16"}
{"signature": "def get_by_id(self, id):", "body": "return list.__getitem__(self, self._dict[id])<EOL>", "docstring": "return the element with a matching id", "id": "f15894:c0:m4"}
{"signature": "@property<EOL><INDENT>def forward_variable(self):<DEDENT>", "body": "if self.model is not None:<EOL><INDENT>return self.model.variables[self.id]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "An optlang variable representing the forward flux\n\n        Returns\n        -------\n        optlang.interface.Variable\n            An optlang variable for the forward flux or None if reaction is\n            not associated with a model.", "id": "f15895:c0:m4"}
{"signature": "def _dissociate_gene(self, cobra_gene):", "body": "self._genes.discard(cobra_gene)<EOL>cobra_gene._reaction.discard(self)<EOL>", "docstring": "Dissociates a cobra.Gene object with a cobra.Reaction.\n\n        Parameters\n        ----------\n        cobra_gene : cobra.core.Gene.Gene", "id": "f15895:c0:m56"}
{"signature": "@property<EOL><INDENT>def flux(self):<DEDENT>", "body": "try:<EOL><INDENT>check_solver_status(self._model.solver.status)<EOL>return self.forward_variable.primal - self.reverse_variable.primal<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\".format(self.id))<EOL><DEDENT>except (RuntimeError, OptimizationError) as err:<EOL><INDENT>raise_with_traceback(err)<EOL><DEDENT>except Exception as err:<EOL><INDENT>raise_from(OptimizationError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(str(err))), err)<EOL><DEDENT>", "docstring": "The flux value in the most recent solution.\n\nFlux is the primal value of the corresponding variable in the model.\n\nWarnings\n--------\n* Accessing reaction fluxes through a `Solution` object is the safer,\n  preferred, and only guaranteed to be correct way. You can see how to\n  do so easily in the examples.\n* Reaction flux is retrieved from the currently defined\n  `self._model.solver`. The solver status is checked but there are no\n  guarantees that the current solver state is the one you are looking\n  for.\n* If you modify the underlying model after an optimization, you will\n  retrieve the old optimization values.\n\nRaises\n------\nRuntimeError\n    If the underlying model was never optimized beforehand or the\n    reaction is not part of a model.\nOptimizationError\n    If the solver status is anything other than 'optimal'.\nAssertionError\n    If the flux value is not within the bounds.\n\nExamples\n--------\n>>> import cobra.test\n>>> model = cobra.test.create_test_model(\"textbook\")\n>>> solution = model.optimize()\n>>> model.reactions.PFK.flux\n7.477381962160283\n>>> solution.fluxes.PFK\n7.4773819621602833", "id": "f15895:c0:m18"}
{"signature": "def get_coefficient(self, metabolite_id):", "body": "if isinstance(metabolite_id, Metabolite):<EOL><INDENT>return self._metabolites[metabolite_id]<EOL><DEDENT>_id_to_metabolites = {m.id: m for m in self._metabolites}<EOL>return self._metabolites[_id_to_metabolites[metabolite_id]]<EOL>", "docstring": "Return the stoichiometric coefficient of a metabolite.\n\nParameters\n----------\nmetabolite_id : str or cobra.Metabolite", "id": "f15895:c0:m45"}
{"signature": "@property<EOL><INDENT>def flux_expression(self):<DEDENT>", "body": "if self.model is not None:<EOL><INDENT>return <NUM_LIT:1.> * self.forward_variable - <NUM_LIT:1.> * self.reverse_variable<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Forward flux expression\n\n        Returns\n        -------\n        sympy expression\n            The expression representing the the forward flux (if associated\n            with model), otherwise None. Representing the net flux if\n            model.reversible_encoding == 'unsplit' or None if reaction is\n            not associated with a model", "id": "f15895:c0:m3"}
{"signature": "def build_reaction_from_string(self, reaction_str, verbose=True,<EOL>fwd_arrow=None, rev_arrow=None,<EOL>reversible_arrow=None, term_split=\"<STR_LIT:+>\"):", "body": "<EOL>forward_arrow_finder = _forward_arrow_finder if fwd_arrow is Noneelse re.compile(re.escape(fwd_arrow))<EOL>reverse_arrow_finder = _reverse_arrow_finder if rev_arrow is Noneelse re.compile(re.escape(rev_arrow))<EOL>reversible_arrow_finder = _reversible_arrow_finderif reversible_arrow is Noneelse re.compile(re.escape(reversible_arrow))<EOL>if self._model is None:<EOL><INDENT>warn(\"<STR_LIT>\")<EOL>model = None<EOL><DEDENT>else:<EOL><INDENT>model = self._model<EOL><DEDENT>found_compartments = compartment_finder.findall(reaction_str)<EOL>if len(found_compartments) == <NUM_LIT:1>:<EOL><INDENT>compartment = found_compartments[<NUM_LIT:0>]<EOL>reaction_str = compartment_finder.sub(\"<STR_LIT>\", reaction_str)<EOL><DEDENT>else:<EOL><INDENT>compartment = \"<STR_LIT>\"<EOL><DEDENT>arrow_match = reversible_arrow_finder.search(reaction_str)<EOL>if arrow_match is not None:<EOL><INDENT>self.lower_bound = -<NUM_LIT:1000><EOL>self.upper_bound = <NUM_LIT:1000><EOL><DEDENT>else:  <EOL><INDENT>arrow_match = forward_arrow_finder.search(reaction_str)<EOL>if arrow_match is not None:<EOL><INDENT>self.upper_bound = <NUM_LIT:1000><EOL>self.lower_bound = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>arrow_match = reverse_arrow_finder.search(reaction_str)<EOL>if arrow_match is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" %<EOL>reaction_str)<EOL><DEDENT>else:<EOL><INDENT>self.upper_bound = <NUM_LIT:0><EOL>self.lower_bound = -<NUM_LIT:1000><EOL><DEDENT><DEDENT><DEDENT>reactant_str = reaction_str[:arrow_match.start()].strip()<EOL>product_str = reaction_str[arrow_match.end():].strip()<EOL>self.subtract_metabolites(self.metabolites, combine=True)<EOL>for substr, factor in ((reactant_str, -<NUM_LIT:1>), (product_str, <NUM_LIT:1>)):<EOL><INDENT>if len(substr) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>for term in substr.split(term_split):<EOL><INDENT>term = term.strip()<EOL>if term.lower() == \"<STR_LIT>\":<EOL><INDENT>continue<EOL><DEDENT>if \"<STR_LIT:U+0020>\" in term:<EOL><INDENT>num_str, met_id = term.split()<EOL>num = float(num_str.lstrip(\"<STR_LIT:(>\").rstrip(\"<STR_LIT:)>\")) * factor<EOL><DEDENT>else:<EOL><INDENT>met_id = term<EOL>num = factor<EOL><DEDENT>met_id += compartment<EOL>try:<EOL><INDENT>met = model.metabolites.get_by_id(met_id)<EOL><DEDENT>except KeyError:<EOL><INDENT>if verbose:<EOL><INDENT>print(\"<STR_LIT>\" % met_id)<EOL><DEDENT>met = Metabolite(met_id)<EOL><DEDENT>self.add_metabolites({met: num})<EOL><DEDENT><DEDENT>", "docstring": "Builds reaction from reaction equation reaction_str using parser\n\n        Takes a string and using the specifications supplied in the optional\n        arguments infers a set of metabolites, metabolite compartments and\n        stoichiometries for the reaction.  It also infers the reversibility\n        of the reaction from the reaction arrow.\n\n        Changes to the associated model are reverted upon exit when using\n        the model as a context.\n\n        Parameters\n        ----------\n        reaction_str : string\n            a string containing a reaction formula (equation)\n        verbose: bool\n            setting verbosity of function\n        fwd_arrow : re.compile\n            for forward irreversible reaction arrows\n        rev_arrow : re.compile\n            for backward irreversible reaction arrows\n        reversible_arrow : re.compile\n            for reversible reaction arrows\n        term_split : string\n            dividing individual metabolite entries", "id": "f15895:c0:m58"}
{"signature": "def __setstate__(self, state):", "body": "<EOL>if \"<STR_LIT>\" in state:<EOL><INDENT>state.pop(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in state:<EOL><INDENT>state[\"<STR_LIT>\"] = state.pop(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" in state:<EOL><INDENT>state['<STR_LIT>'] = state.pop('<STR_LIT>')<EOL><DEDENT>if \"<STR_LIT>\" in state:<EOL><INDENT>state['<STR_LIT>'] = state.pop('<STR_LIT>')<EOL><DEDENT>self.__dict__.update(state)<EOL>for x in state['<STR_LIT>']:<EOL><INDENT>setattr(x, '<STR_LIT>', self._model)<EOL>x._reaction.add(self)<EOL><DEDENT>for x in state['<STR_LIT>']:<EOL><INDENT>setattr(x, '<STR_LIT>', self._model)<EOL>x._reaction.add(self)<EOL><DEDENT>", "docstring": "Probably not necessary to set _model as the cobra.Model that\n        contains self sets the _model attribute for all metabolites and genes\n        in the reaction.\n\n        However, to increase performance speed we do want to let the metabolite\n        and gene know that they are employed in this reaction", "id": "f15895:c0:m35"}
{"signature": "def add_metabolites(self, metabolites_to_add, combine=True,<EOL>reversibly=True):", "body": "old_coefficients = self.metabolites<EOL>new_metabolites = []<EOL>_id_to_metabolites = dict([(x.id, x) for x in self._metabolites])<EOL>for metabolite, coefficient in iteritems(metabolites_to_add):<EOL><INDENT>if isinstance(metabolite, Metabolite):<EOL><INDENT>if ((metabolite.model is not None) and<EOL>(metabolite.model is not self._model)):<EOL><INDENT>metabolite = metabolite.copy()<EOL><DEDENT><DEDENT>met_id = str(metabolite)<EOL>if met_id in _id_to_metabolites:<EOL><INDENT>reaction_metabolite = _id_to_metabolites[met_id]<EOL>if combine:<EOL><INDENT>self._metabolites[reaction_metabolite] += coefficient<EOL><DEDENT>else:<EOL><INDENT>self._metabolites[reaction_metabolite] = coefficient<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self._model:<EOL><INDENT>try:<EOL><INDENT>metabolite =self._model.metabolites.get_by_id(met_id)<EOL><DEDENT>except KeyError as e:<EOL><INDENT>if isinstance(metabolite, Metabolite):<EOL><INDENT>new_metabolites.append(metabolite)<EOL><DEDENT>else:<EOL><INDENT>raise e<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(metabolite, string_types):<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% self.id)<EOL><DEDENT>self._metabolites[metabolite] = coefficient<EOL>metabolite._reaction.add(self)<EOL><DEDENT><DEDENT>model = self.model<EOL>if model is not None:<EOL><INDENT>model.add_metabolites(new_metabolites)<EOL>for metabolite, coefficient in self._metabolites.items():<EOL><INDENT>model.constraints[<EOL>metabolite.id].set_linear_coefficients(<EOL>{self.forward_variable: coefficient,<EOL>self.reverse_variable: -coefficient<EOL>})<EOL><DEDENT><DEDENT>for metabolite, the_coefficient in list(self._metabolites.items()):<EOL><INDENT>if the_coefficient == <NUM_LIT:0>:<EOL><INDENT>metabolite._reaction.remove(self)<EOL>self._metabolites.pop(metabolite)<EOL><DEDENT><DEDENT>context = get_context(self)<EOL>if context and reversibly:<EOL><INDENT>if combine:<EOL><INDENT>context(partial(<EOL>self.subtract_metabolites, metabolites_to_add,<EOL>combine=True, reversibly=False))<EOL><DEDENT>else:<EOL><INDENT>mets_to_reset = {<EOL>key: old_coefficients[model.metabolites.get_by_any(key)[<NUM_LIT:0>]]<EOL>for key in iterkeys(metabolites_to_add)}<EOL>context(partial(<EOL>self.add_metabolites, mets_to_reset,<EOL>combine=False, reversibly=False))<EOL><DEDENT><DEDENT>", "docstring": "Add metabolites and stoichiometric coefficients to the reaction.\n        If the final coefficient for a metabolite is 0 then it is removed\n        from the reaction.\n\n        The change is reverted upon exit when using the model as a context.\n\n        Parameters\n        ----------\n        metabolites_to_add : dict\n            Dictionary with metabolite objects or metabolite identifiers as\n            keys and coefficients as values. If keys are strings (name of a\n            metabolite) the reaction must already be part of a model and a\n            metabolite with the given name must exist in the model.\n\n        combine : bool\n            Describes behavior a metabolite already exists in the reaction.\n            True causes the coefficients to be added.\n            False causes the coefficient to be replaced.\n\n        reversibly : bool\n            Whether to add the change to the context to make the change\n            reversibly or not (primarily intended for internal use).", "id": "f15895:c0:m47"}
{"signature": "@property<EOL><INDENT>def objective_coefficient(self):<DEDENT>", "body": "return linear_reaction_coefficients(self.model, [self]).get(self, <NUM_LIT:0>)<EOL>", "docstring": "Get the coefficient for this reaction in a linear\n        objective (float)\n\n        Assuming that the objective of the associated model is summation of\n        fluxes from a set of reactions, the coefficient for each reaction\n        can be obtained individually using this property. A more general way\n        is to use the `model.objective` property directly.", "id": "f15895:c0:m6"}
{"signature": "@property<EOL><INDENT>def upper_bound(self):<DEDENT>", "body": "return self._upper_bound<EOL>", "docstring": "Get or set the upper bound\n\n        Setting the upper bound (float) will also adjust the associated optlang\n        variables associated with the reaction. Infeasible combinations,\n        such as a upper bound lower than the current lower bound will\n        update the other bound.\n\n        When using a `HistoryManager` context, this attribute can be set\n        temporarily, reversed when the exiting the context.", "id": "f15895:c0:m14"}
{"signature": "def delete(self, remove_orphans=False):", "body": "warn(\"<STR_LIT>\",<EOL>DeprecationWarning)<EOL>self.remove_from_model(remove_orphans=remove_orphans)<EOL>", "docstring": "Removes the reaction from a model.\n\n        This removes all associations between a reaction the associated\n        model, metabolites and genes.\n\n        The change is reverted upon exit when using the model as a context.\n\n        Deprecated, use `reaction.remove_from_model` instead.\n\n        Parameters\n        ----------\n        remove_orphans : bool\n            Remove orphaned genes and metabolites from the model as well", "id": "f15895:c0:m34"}
{"signature": "@property<EOL><INDENT>def reduced_cost(self):<DEDENT>", "body": "try:<EOL><INDENT>check_solver_status(self._model.solver.status)<EOL>return self.forward_variable.dual - self.reverse_variable.dual<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\".format(self.id))<EOL><DEDENT>except (RuntimeError, OptimizationError) as err:<EOL><INDENT>raise_with_traceback(err)<EOL><DEDENT>except Exception as err:<EOL><INDENT>raise_from(OptimizationError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(str(err))), err)<EOL><DEDENT>", "docstring": "The reduced cost in the most recent solution.\n\nReduced cost is the dual value of the corresponding variable in the\nmodel.\n\nWarnings\n--------\n* Accessing reduced costs through a `Solution` object is the safer,\n  preferred, and only guaranteed to be correct way. You can see how to\n  do so easily in the examples.\n* Reduced cost is retrieved from the currently defined\n  `self._model.solver`. The solver status is checked but there are no\n  guarantees that the current solver state is the one you are looking\n  for.\n* If you modify the underlying model after an optimization, you will\n  retrieve the old optimization values.\n\nRaises\n------\nRuntimeError\n    If the underlying model was never optimized beforehand or the\n    reaction is not part of a model.\nOptimizationError\n    If the solver status is anything other than 'optimal'.\n\nExamples\n--------\n>>> import cobra.test\n>>> model = cobra.test.create_test_model(\"textbook\")\n>>> solution = model.optimize()\n>>> model.reactions.PFK.reduced_cost\n-8.673617379884035e-18\n>>> solution.reduced_costs.PFK\n-8.6736173798840355e-18", "id": "f15895:c0:m19"}
{"signature": "def __imul__(self, coefficient):", "body": "self._metabolites = {<EOL>met: value * coefficient<EOL>for met, value in iteritems(self._metabolites)}<EOL>if coefficient < <NUM_LIT:0>:<EOL><INDENT>self.bounds = (-self.upper_bound, -self.lower_bound)<EOL><DEDENT>if self._model:<EOL><INDENT>self._model._populate_solver([self])<EOL><DEDENT>context = get_context(self)<EOL>if context:<EOL><INDENT>context(partial(self._model._populate_solver, [self]))<EOL>context(partial(self.__imul__, <NUM_LIT:1.>/coefficient))<EOL><DEDENT>return self<EOL>", "docstring": "Scale coefficients in a reaction by a given value\n\n        E.g. A -> B becomes 2A -> 2B.\n\n        If coefficient is less than zero, the reaction is reversed and the\n        bounds are swapped.", "id": "f15895:c0:m41"}
{"signature": "@property<EOL><INDENT>def reverse_variable(self):<DEDENT>", "body": "if self.model is not None:<EOL><INDENT>return self.model.variables[self.reverse_id]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "An optlang variable representing the reverse flux\n\n        Returns\n        -------\n        optlang.interface.Variable\n            An optlang variable for the reverse flux or None if reaction is\n            not associated with a model.", "id": "f15895:c0:m5"}
{"signature": "def subtract_metabolites(self, metabolites, combine=True, reversibly=True):", "body": "self.add_metabolites({<EOL>k: -v for k, v in iteritems(metabolites)},<EOL>combine=combine, reversibly=reversibly)<EOL>", "docstring": "Subtract metabolites from a reaction.\n\n        That means add the metabolites with -1*coefficient. If the final\n        coefficient for a metabolite is 0 then the metabolite is removed from\n        the reaction.\n\n        Notes\n        -----\n        * A final coefficient < 0 implies a reactant.\n        * The change is reverted upon exit when using the model as a context.\n\n        Parameters\n        ----------\n        metabolites : dict\n            Dictionary where the keys are of class Metabolite and the values\n            are the coefficients. These metabolites will be added to the\n            reaction.\n\n        combine : bool\n            Describes behavior a metabolite already exists in the reaction.\n            True causes the coefficients to be added.\n            False causes the coefficient to be replaced.\n\n        reversibly : bool\n            Whether to add the change to the context to make the change\n            reversibly or not (primarily intended for internal use).", "id": "f15895:c0:m48"}
{"signature": "@property<EOL><INDENT>def gene_name_reaction_rule(self):<DEDENT>", "body": "names = {i.id: i.name for i in self._genes}<EOL>ast = parse_gpr(self._gene_reaction_rule)[<NUM_LIT:0>]<EOL>return ast2str(ast, names=names)<EOL>", "docstring": "Display gene_reaction_rule with names intead.\n\n        Do NOT use this string for computation. It is intended to give a\n        representation of the rule using more familiar gene names instead of\n        the often cryptic ids.", "id": "f15895:c0:m24"}
{"signature": "@property<EOL><INDENT>def bounds(self):<DEDENT>", "body": "return self.lower_bound, self.upper_bound<EOL>", "docstring": "Get or set the bounds directly from a tuple\n\n        Convenience method for setting upper and lower bounds in one line\n        using a tuple of lower and upper bound. Invalid bounds will raise an\n        AssertionError.\n\n        When using a `HistoryManager` context, this attribute can be set\n        temporarily, reversed when the exiting the context.", "id": "f15895:c0:m16"}
{"signature": "def copy(self):", "body": "<EOL>model = self._model<EOL>self._model = None<EOL>for i in self._metabolites:<EOL><INDENT>i._model = None<EOL><DEDENT>for i in self._genes:<EOL><INDENT>i._model = None<EOL><DEDENT>new_reaction = deepcopy(self)<EOL>self._model = model<EOL>for i in self._metabolites:<EOL><INDENT>i._model = model<EOL><DEDENT>for i in self._genes:<EOL><INDENT>i._model = model<EOL><DEDENT>return new_reaction<EOL>", "docstring": "Copy a reaction\n\n        The referenced metabolites and genes are also copied.", "id": "f15895:c0:m36"}
{"signature": "@property<EOL><INDENT>def reversibility(self):<DEDENT>", "body": "return self._lower_bound < <NUM_LIT:0> < self._upper_bound<EOL>", "docstring": "Whether the reaction can proceed in both directions (reversible)\n\n        This is computed from the current upper and lower bounds.", "id": "f15895:c0:m28"}
{"signature": "@property<EOL><INDENT>def reverse_id(self):<DEDENT>", "body": "return '<STR_LIT:_>'.join((self.id, '<STR_LIT>',<EOL>hashlib.md5(<EOL>self.id.encode('<STR_LIT:utf-8>')).hexdigest()[<NUM_LIT:0>:<NUM_LIT:5>]))<EOL>", "docstring": "Generate the id of reverse_variable from the reaction's id.", "id": "f15895:c0:m2"}
{"signature": "def __add__(self, other):", "body": "new_reaction = self.copy()<EOL>if other == <NUM_LIT:0>:<EOL><INDENT>return new_reaction<EOL><DEDENT>else:<EOL><INDENT>new_reaction += other<EOL><DEDENT>return new_reaction<EOL>", "docstring": "Add two reactions\n\n        The stoichiometry will be the combined stoichiometry of the two\n        reactions, and the gene reaction rule will be both rules combined by an\n        and. All other attributes (i.e. reaction bounds) will match those of\n        the first reaction", "id": "f15895:c0:m37"}
{"signature": "@property<EOL><INDENT>def products(self):<DEDENT>", "body": "return [k for k, v in iteritems(self._metabolites) if v >= <NUM_LIT:0>]<EOL>", "docstring": "Return a list of products for the reaction", "id": "f15895:c0:m44"}
{"signature": "def parse_composition(self):", "body": "tmp_formula = self.formula<EOL>if \"<STR_LIT:*>\" in tmp_formula:<EOL><INDENT>warn(\"<STR_LIT>\" % self.formula)<EOL>tmp_formula = self.formula.replace(\"<STR_LIT:*>\", \"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT:(>\" in tmp_formula or \"<STR_LIT:)>\" in tmp_formula:<EOL><INDENT>warn(\"<STR_LIT>\" % self.formula)<EOL>return<EOL><DEDENT>composition = {}<EOL>parsed = element_re.findall(tmp_formula)<EOL>for (element, count) in parsed:<EOL><INDENT>if count == '<STR_LIT>':<EOL><INDENT>count = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>count = float(count)<EOL>int_count = int(count)<EOL>if count == int_count:<EOL><INDENT>count = int_count<EOL><DEDENT>else:<EOL><INDENT>warn(\"<STR_LIT>\" %<EOL>(count, self.formula))<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>warn(\"<STR_LIT>\" %<EOL>(count, self.formula))<EOL>self.elements = {}<EOL>return<EOL><DEDENT><DEDENT>if element in composition:<EOL><INDENT>composition[element] += count<EOL><DEDENT>else:<EOL><INDENT>composition[element] = count<EOL><DEDENT><DEDENT>self.elements = composition<EOL>", "docstring": "Breaks the chemical formula down by element.", "id": "f15896:c0:m2"}
{"signature": "def add_metabolites(self, metabolite_list):", "body": "if not hasattr(metabolite_list, '<STR_LIT>'):<EOL><INDENT>metabolite_list = [metabolite_list]<EOL><DEDENT>if len(metabolite_list) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>metabolite_list = [x for x in metabolite_list<EOL>if x.id not in self.metabolites]<EOL>bad_ids = [m for m in metabolite_list<EOL>if not isinstance(m.id, string_types) or len(m.id) < <NUM_LIT:1>]<EOL>if len(bad_ids) != <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(repr(bad_ids)))<EOL><DEDENT>for x in metabolite_list:<EOL><INDENT>x._model = self<EOL><DEDENT>self.metabolites += metabolite_list<EOL>to_add = []<EOL>for met in metabolite_list:<EOL><INDENT>if met.id not in self.constraints:<EOL><INDENT>constraint = self.problem.Constraint(<EOL>Zero, name=met.id, lb=<NUM_LIT:0>, ub=<NUM_LIT:0>)<EOL>to_add += [constraint]<EOL><DEDENT><DEDENT>self.add_cons_vars(to_add)<EOL>context = get_context(self)<EOL>if context:<EOL><INDENT>context(partial(self.metabolites.__isub__, metabolite_list))<EOL>for x in metabolite_list:<EOL><INDENT>context(partial(setattr, x, '<STR_LIT>', None))<EOL><DEDENT><DEDENT>", "docstring": "Will add a list of metabolites to the model object and add new\n        constraints accordingly.\n\n        The change is reverted upon exit when using the model as a context.\n\n        Parameters\n        ----------\n        metabolite_list : A list of `cobra.core.Metabolite` objects", "id": "f15897:c0:m17"}
{"signature": "def remove_reactions(self, reactions, remove_orphans=False):", "body": "if isinstance(reactions, string_types) or hasattr(reactions, \"<STR_LIT:id>\"):<EOL><INDENT>warn(\"<STR_LIT>\")<EOL>reactions = [reactions]<EOL><DEDENT>context = get_context(self)<EOL>for reaction in reactions:<EOL><INDENT>try:<EOL><INDENT>reaction = self.reactions[self.reactions.index(reaction)]<EOL><DEDENT>except ValueError:<EOL><INDENT>warn('<STR_LIT>' % (reaction, self))<EOL><DEDENT>else:<EOL><INDENT>forward = reaction.forward_variable<EOL>reverse = reaction.reverse_variable<EOL>if context:<EOL><INDENT>obj_coef = reaction.objective_coefficient<EOL>if obj_coef != <NUM_LIT:0>:<EOL><INDENT>context(partial(<EOL>self.solver.objective.set_linear_coefficients,<EOL>{forward: obj_coef, reverse: -obj_coef}))<EOL><DEDENT>context(partial(self._populate_solver, [reaction]))<EOL>context(partial(setattr, reaction, '<STR_LIT>', self))<EOL>context(partial(self.reactions.add, reaction))<EOL><DEDENT>self.remove_cons_vars([forward, reverse])<EOL>self.reactions.remove(reaction)<EOL>reaction._model = None<EOL>for met in reaction._metabolites:<EOL><INDENT>if reaction in met._reaction:<EOL><INDENT>met._reaction.remove(reaction)<EOL>if context:<EOL><INDENT>context(partial(met._reaction.add, reaction))<EOL><DEDENT>if remove_orphans and len(met._reaction) == <NUM_LIT:0>:<EOL><INDENT>self.remove_metabolites(met)<EOL><DEDENT><DEDENT><DEDENT>for gene in reaction._genes:<EOL><INDENT>if reaction in gene._reaction:<EOL><INDENT>gene._reaction.remove(reaction)<EOL>if context:<EOL><INDENT>context(partial(gene._reaction.add, reaction))<EOL><DEDENT>if remove_orphans and len(gene._reaction) == <NUM_LIT:0>:<EOL><INDENT>self.genes.remove(gene)<EOL>if context:<EOL><INDENT>context(partial(self.genes.add, gene))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>associated_groups = self.get_associated_groups(reaction)<EOL>for group in associated_groups:<EOL><INDENT>group.remove_members(reaction)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Remove reactions from the model.\n\n        The change is reverted upon exit when using the model as a context.\n\n        Parameters\n        ----------\n        reactions : list\n            A list with reactions (`cobra.Reaction`), or their id's, to remove\n\n        remove_orphans : bool\n            Remove orphaned genes and metabolites from the model as well", "id": "f15897:c0:m22"}
{"signature": "def __add__(self, other_model):", "body": "warn('<STR_LIT>', DeprecationWarning)<EOL>return self.merge(other_model, objective='<STR_LIT>', inplace=False)<EOL>", "docstring": "Add the content of another model to this model (+).\n\n        The model is copied as a new object, with a new model identifier,\n        and copies of all the reactions in the other model are added to this\n        model. The objective is the sum of the objective expressions for the\n        two models.", "id": "f15897:c0:m14"}
{"signature": "def _populate_solver(self, reaction_list, metabolite_list=None):", "body": "constraint_terms = AutoVivification()<EOL>to_add = []<EOL>if metabolite_list is not None:<EOL><INDENT>for met in metabolite_list:<EOL><INDENT>to_add += [self.problem.Constraint(<EOL>Zero, name=met.id, lb=<NUM_LIT:0>, ub=<NUM_LIT:0>)]<EOL><DEDENT><DEDENT>self.add_cons_vars(to_add)<EOL>for reaction in reaction_list:<EOL><INDENT>if reaction.id not in self.variables:<EOL><INDENT>forward_variable = self.problem.Variable(reaction.id)<EOL>reverse_variable = self.problem.Variable(reaction.reverse_id)<EOL>self.add_cons_vars([forward_variable, reverse_variable])<EOL><DEDENT>else:<EOL><INDENT>reaction = self.reactions.get_by_id(reaction.id)<EOL>forward_variable = reaction.forward_variable<EOL>reverse_variable = reaction.reverse_variable<EOL><DEDENT>for metabolite, coeff in six.iteritems(reaction.metabolites):<EOL><INDENT>if metabolite.id in self.constraints:<EOL><INDENT>constraint = self.constraints[metabolite.id]<EOL><DEDENT>else:<EOL><INDENT>constraint = self.problem.Constraint(<EOL>Zero,<EOL>name=metabolite.id,<EOL>lb=<NUM_LIT:0>, ub=<NUM_LIT:0>)<EOL>self.add_cons_vars(constraint, sloppy=True)<EOL><DEDENT>constraint_terms[constraint][forward_variable] = coeff<EOL>constraint_terms[constraint][reverse_variable] = -coeff<EOL><DEDENT><DEDENT>self.solver.update()<EOL>for reaction in reaction_list:<EOL><INDENT>reaction = self.reactions.get_by_id(reaction.id)<EOL>reaction.update_variable_bounds()<EOL><DEDENT>for constraint, terms in six.iteritems(constraint_terms):<EOL><INDENT>constraint.set_linear_coefficients(terms)<EOL><DEDENT>", "docstring": "Populate attached solver with constraints and variables that\n        model the provided reactions.", "id": "f15897:c0:m35"}
{"signature": "def __iadd__(self, other_model):", "body": "warn('<STR_LIT>', DeprecationWarning)<EOL>return self.merge(other_model, objective='<STR_LIT>', inplace=True)<EOL>", "docstring": "Incrementally add the content of another model to this model (+=).\n\n        Copies of all the reactions in the other model are added to this\n        model. The objective is the sum of the objective expressions for the\n        two models.", "id": "f15897:c0:m15"}
{"signature": "def __enter__(self):", "body": "<EOL>try:<EOL><INDENT>self._contexts.append(HistoryManager())<EOL><DEDENT>except AttributeError:<EOL><INDENT>self._contexts = [HistoryManager()]<EOL><DEDENT>return self<EOL>", "docstring": "Record all future changes to the model, undoing them when a call to\n        __exit__ is received", "id": "f15897:c0:m44"}
{"signature": "def __getstate__(self):", "body": "odict = self.__dict__.copy()<EOL>odict['<STR_LIT>'] = []<EOL>return odict<EOL>", "docstring": "Get state for serialization.\n\n        Ensures that the context stack is cleared prior to serialization,\n        since partial functions cannot be pickled reliably.", "id": "f15897:c0:m1"}
{"signature": "def get_associated_groups(self, element):", "body": "<EOL>return [g for g in self.groups if element in g.members]<EOL>", "docstring": "Returns a list of groups that an element (reaction, metabolite, gene)\n        is associated with.\n\n        Parameters\n        ----------\n        element: `cobra.Reaction`, `cobra.Metabolite`, or `cobra.Gene`\n\n        Returns\n        -------\n        list of `cobra.Group`\n            All groups that the provided object is a member of", "id": "f15897:c0:m25"}
{"signature": "def add_groups(self, group_list):", "body": "def existing_filter(group):<EOL><INDENT>if group.id in self.groups:<EOL><INDENT>LOGGER.warning(<EOL>\"<STR_LIT>\", group.id)<EOL>return False<EOL><DEDENT>return True<EOL><DEDENT>if isinstance(group_list, string_types) orhasattr(group_list, \"<STR_LIT:id>\"):<EOL><INDENT>warn(\"<STR_LIT>\")<EOL>group_list = [group_list]<EOL><DEDENT>pruned = DictList(filter(existing_filter, group_list))<EOL>for group in pruned:<EOL><INDENT>group._model = self<EOL>for member in group.members:<EOL><INDENT>if isinstance(member, Metabolite):<EOL><INDENT>if member not in self.metabolites:<EOL><INDENT>self.add_metabolites([member])<EOL><DEDENT><DEDENT>if isinstance(member, Reaction):<EOL><INDENT>if member not in self.reactions:<EOL><INDENT>self.add_reactions([member])<EOL><DEDENT><DEDENT><DEDENT>self.groups += [group]<EOL><DEDENT>", "docstring": "Add groups to the model.\n\n        Groups with identifiers identical to a group already in the model are\n        ignored.\n\n        If any group contains members that are not in the model, these members\n        are added to the model as well. Only metabolites, reactions, and genes\n        can have groups.\n\n        Parameters\n        ----------\n        group_list : list\n            A list of `cobra.Group` objects to add to the model.", "id": "f15897:c0:m23"}
{"signature": "@property<EOL><INDENT>def boundary(self):<DEDENT>", "body": "return [rxn for rxn in self.reactions if rxn.boundary]<EOL>", "docstring": "Boundary reactions in the model.\n        Reactions that either have no substrate or product.", "id": "f15897:c0:m31"}
{"signature": "@compartments.setter<EOL><INDENT>def compartments(self, value):<DEDENT>", "body": "self._compartments.update(value)<EOL>", "docstring": "Get or set the dictionary of current compartment descriptions.\n\n        Assigning a dictionary to this property updates the model's\n        dictionary of compartment descriptions with the new values.\n\n        Parameters\n        ----------\n        value : dict\n            Dictionary mapping compartments abbreviations to full names.\n\n        Examples\n        --------\n        >>> import cobra.test\n        >>> model = cobra.test.create_test_model(\"textbook\")\n        >>> model.compartments = {'c': 'the cytosol'}\n        {'c': 'the cytosol', 'e': 'extracellular'}", "id": "f15897:c0:m11"}
{"signature": "@property<EOL><INDENT>def solver(self):<DEDENT>", "body": "return self._solver<EOL>", "docstring": "Get or set the attached solver instance.\n\n        The associated the solver object, which manages the interaction with\n        the associated solver, e.g. glpk.\n\n        This property is useful for accessing the optimization problem\n        directly and to define additional non-metabolic constraints.\n\n        Examples\n        --------\n        >>> import cobra.test\n        >>> model = cobra.test.create_test_model(\"textbook\")\n        >>> new = model.problem.Constraint(model.objective.expression,\n        >>> lb=0.99)\n        >>> model.solver.add(new)", "id": "f15897:c0:m3"}
{"signature": "def remove_groups(self, group_list):", "body": "if isinstance(group_list, string_types) orhasattr(group_list, \"<STR_LIT:id>\"):<EOL><INDENT>warn(\"<STR_LIT>\")<EOL>group_list = [group_list]<EOL><DEDENT>for group in group_list:<EOL><INDENT>if group.id not in self.groups:<EOL><INDENT>LOGGER.warning(\"<STR_LIT>\", group, self)<EOL><DEDENT>else:<EOL><INDENT>self.groups.remove(group)<EOL>group._model = None<EOL><DEDENT><DEDENT>", "docstring": "Remove groups from the model.\n\n        Members of each group are not removed\n        from the model (i.e. metabolites, reactions, and genes in the group\n        stay in the model after any groups containing them are removed).\n\n        Parameters\n        ----------\n        group_list : list\n            A list of `cobra.Group` objects to remove from the model.", "id": "f15897:c0:m24"}
{"signature": "@medium.setter<EOL><INDENT>def medium(self, medium):<DEDENT>", "body": "def set_active_bound(reaction, bound):<EOL><INDENT>if reaction.reactants:<EOL><INDENT>reaction.lower_bound = -bound<EOL><DEDENT>elif reaction.products:<EOL><INDENT>reaction.upper_bound = bound<EOL><DEDENT><DEDENT>media_rxns = list()<EOL>exchange_rxns = frozenset(self.exchanges)<EOL>for rxn_id, bound in iteritems(medium):<EOL><INDENT>rxn = self.reactions.get_by_id(rxn_id)<EOL>if rxn not in exchange_rxns:<EOL><INDENT>LOGGER.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>rxn.id)<EOL><DEDENT>media_rxns.append(rxn)<EOL>set_active_bound(rxn, bound)<EOL><DEDENT>media_rxns = frozenset(media_rxns)<EOL>for rxn in (exchange_rxns - media_rxns):<EOL><INDENT>set_active_bound(rxn, <NUM_LIT:0>)<EOL><DEDENT>", "docstring": "Get or set the constraints on the model exchanges.\n\n        `model.medium` returns a dictionary of the bounds for each of the\n        boundary reactions, in the form of `{rxn_id: bound}`, where `bound`\n        specifies the absolute value of the bound in direction of metabolite\n        creation (i.e., lower_bound for `met <--`, upper_bound for `met -->`)\n\n        Parameters\n        ----------\n        medium: dictionary-like\n            The medium to initialize. medium should be a dictionary defining\n            `{rxn_id: bound}` pairs.", "id": "f15897:c0:m13"}
{"signature": "def repair(self, rebuild_index=True, rebuild_relationships=True):", "body": "if rebuild_index:  <EOL><INDENT>self.reactions._generate_index()<EOL>self.metabolites._generate_index()<EOL>self.genes._generate_index()<EOL>self.groups._generate_index()<EOL><DEDENT>if rebuild_relationships:<EOL><INDENT>for met in self.metabolites:<EOL><INDENT>met._reaction.clear()<EOL><DEDENT>for gene in self.genes:<EOL><INDENT>gene._reaction.clear()<EOL><DEDENT>for rxn in self.reactions:<EOL><INDENT>for met in rxn._metabolites:<EOL><INDENT>met._reaction.add(rxn)<EOL><DEDENT>for gene in rxn._genes:<EOL><INDENT>gene._reaction.add(rxn)<EOL><DEDENT><DEDENT><DEDENT>for l in (self.reactions, self.genes, self.metabolites, self.groups):<EOL><INDENT>for e in l:<EOL><INDENT>e._model = self<EOL><DEDENT><DEDENT>", "docstring": "Update all indexes and pointers in a model\n\n        Parameters\n        ----------\n        rebuild_index : bool\n            rebuild the indices kept in reactions, metabolites and genes\n        rebuild_relationships : bool\n             reset all associations between genes, metabolites, model and\n             then re-add them.", "id": "f15897:c0:m38"}
{"signature": "def summary(self, solution=None, threshold=<NUM_LIT>, fva=None, names=False,<EOL>floatfmt='<STR_LIT>'):", "body": "from cobra.flux_analysis.summary import model_summary<EOL>return model_summary(self, solution=solution, threshold=threshold,<EOL>fva=fva, names=names, floatfmt=floatfmt)<EOL>", "docstring": "Print a summary of the input and output fluxes of the model.\n\nParameters\n----------\nsolution: cobra.Solution, optional\n    A previously solved model solution to use for generating the\n    summary. If none provided (default), the summary method will\n    resolve the model. Note that the solution object must match the\n    model, i.e., changes to the model such as changed bounds,\n    added or removed reactions are not taken into account by this\n    method.\nthreshold : float, optional\n    Threshold below which fluxes are not reported.\nfva : pandas.DataFrame, float or None, optional\n    Whether or not to include flux variability analysis in the output.\n    If given, fva should either be a previous FVA solution matching\n    the model or a float between 0 and 1 representing the\n    fraction of the optimum objective to be searched.\nnames : bool, optional\n    Emit reaction and metabolite names rather than identifiers (default\n    False).\nfloatfmt : string, optional\n    Format string for floats (default '.3g').", "id": "f15897:c0:m43"}
{"signature": "def add_cons_vars(self, what, **kwargs):", "body": "add_cons_vars_to_problem(self, what, **kwargs)<EOL>", "docstring": "Add constraints and variables to the model's mathematical problem.\n\n        Useful for variables and constraints that can not be expressed with\n        reactions and simple lower and upper bounds.\n\n        Additions are reversed upon exit if the model itself is used as\n        context.\n\n        Parameters\n        ----------\n        what : list or tuple of optlang variables or constraints.\n           The variables or constraints to add to the model. Must be of\n           class `optlang.interface.Variable` or\n           `optlang.interface.Constraint`.\n        **kwargs : keyword arguments\n           Passed to solver.add()", "id": "f15897:c0:m26"}
{"signature": "def __init__(self, id=None, name=\"<STR_LIT>\"):", "body": "self._id = id<EOL>self.name = name<EOL>self.notes = {}<EOL>self.annotation = {}<EOL>", "docstring": "A simple object with an identifier\n\n        Parameters\n        ----------\n        id: None or a string\n            the identifier to associate with the object", "id": "f15898:c0:m0"}
{"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()<EOL>if '<STR_LIT>' in state:<EOL><INDENT>state['<STR_LIT>'] = None<EOL><DEDENT>return state<EOL>", "docstring": "To prevent excessive replication during deepcopy.", "id": "f15898:c0:m4"}
{"signature": "@property<EOL><INDENT>def shadow_price(self):<DEDENT>", "body": "try:<EOL><INDENT>check_solver_status(self._model.solver.status)<EOL>return self._model.constraints[self.id].dual<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\".format(self.id))<EOL><DEDENT>except (RuntimeError, OptimizationError) as err:<EOL><INDENT>raise_with_traceback(err)<EOL><DEDENT>except Exception as err:<EOL><INDENT>raise_from(OptimizationError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(str(err))), err)<EOL><DEDENT>", "docstring": "The shadow price in the most recent solution.\n\nShadow price is the dual value of the corresponding constraint in the\nmodel.\n\nWarnings\n--------\n* Accessing shadow prices through a `Solution` object is the safer,\n  preferred, and only guaranteed to be correct way. You can see how to\n  do so easily in the examples.\n* Shadow price is retrieved from the currently defined\n  `self._model.solver`. The solver status is checked but there are no\n  guarantees that the current solver state is the one you are looking\n  for.\n* If you modify the underlying model after an optimization, you will\n  retrieve the old optimization values.\n\nRaises\n------\nRuntimeError\n    If the underlying model was never optimized beforehand or the\n    metabolite is not part of a model.\nOptimizationError\n    If the solver status is anything other than 'optimal'.\n\nExamples\n--------\n>>> import cobra\n>>> import cobra.test\n>>> model = cobra.test.create_test_model(\"textbook\")\n>>> solution = model.optimize()\n>>> model.metabolites.glc__D_e.shadow_price\n-0.09166474637510488\n>>> solution.shadow_prices.glc__D_e\n-0.091664746375104883", "id": "f15899:c0:m7"}
{"signature": "@property<EOL><INDENT>def y(self):<DEDENT>", "body": "warn(\"<STR_LIT>\", DeprecationWarning)<EOL>return self.shadow_price<EOL>", "docstring": "The shadow price for the metabolite in the most recent solution\n\n        Shadow prices are computed from the dual values of the bounds in\n        the solution.", "id": "f15899:c0:m6"}
{"signature": "@property<EOL><INDENT>def constraint(self):<DEDENT>", "body": "if self.model is not None:<EOL><INDENT>return self.model.constraints[self.id]<EOL><DEDENT>", "docstring": "Get the constraints associated with this metabolite from the solve\n\n        Returns\n        -------\n        optlang.<interface>.Constraint\n            the optlang constraint for this metabolite", "id": "f15899:c0:m2"}
{"signature": "def summary(self, solution=None, threshold=<NUM_LIT>, fva=None, names=False,<EOL>floatfmt='<STR_LIT>'):", "body": "from cobra.flux_analysis.summary import metabolite_summary<EOL>return metabolite_summary(self, solution=solution, threshold=threshold,<EOL>fva=fva, names=names, floatfmt=floatfmt)<EOL>", "docstring": "Print a summary of the production and consumption fluxes.\n\nThis method requires the model for which this metabolite is a part\nto be solved.\n\nParameters\n----------\nsolution : cobra.Solution, optional\n    A previously solved model solution to use for generating the\n    summary. If none provided (default), the summary method will\n    resolve the model. Note that the solution object must match the\n    model, i.e., changes to the model such as changed bounds,\n    added or removed reactions are not taken into account by this\n    method.\nthreshold : float, optional\n    Threshold below which fluxes are not reported.\nfva : pandas.DataFrame, float or None, optional\n    Whether or not to include flux variability analysis in the output.\n    If given, fva should either be a previous FVA solution matching\n    the model or a float between 0 and 1 representing the\n    fraction of the optimum objective to be searched.\nnames : bool, optional\n    Emit reaction and metabolite names rather than identifiers (default\n    False).\nfloatfmt : string, optional\n    Format string for floats (default '.3g').", "id": "f15899:c0:m9"}
{"signature": "def copy(self):", "body": "return deepcopy(self)<EOL>", "docstring": "When copying a reaction, it is necessary to deepcopy the\n        components so the list references aren't carried over.\n\n        Additionally, a copy of a reaction is no longer in a cobra.Model.\n\n        This should be fixed with self.__deepcopy__ if possible", "id": "f15903:c0:m3"}
{"signature": "def __getstate__(self):", "body": "state = Object.__getstate__(self)<EOL>state['<STR_LIT>'] = set()<EOL>return state<EOL>", "docstring": "Remove the references to container reactions when serializing to\n        avoid problems associated with recursion.", "id": "f15903:c0:m2"}
{"signature": "def eval_gpr(expr, knockouts):", "body": "if isinstance(expr, Expression):<EOL><INDENT>return eval_gpr(expr.body, knockouts)<EOL><DEDENT>elif isinstance(expr, Name):<EOL><INDENT>return expr.id not in knockouts<EOL><DEDENT>elif isinstance(expr, BoolOp):<EOL><INDENT>op = expr.op<EOL>if isinstance(op, Or):<EOL><INDENT>return any(eval_gpr(i, knockouts) for i in expr.values)<EOL><DEDENT>elif isinstance(op, And):<EOL><INDENT>return all(eval_gpr(i, knockouts) for i in expr.values)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" + op.__class__.__name__)<EOL><DEDENT><DEDENT>elif expr is None:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" + repr(expr))<EOL><DEDENT>", "docstring": "evaluate compiled ast of gene_reaction_rule with knockouts\n\n    Parameters\n    ----------\n    expr : Expression\n        The ast of the gene reaction rule\n    knockouts : DictList, set\n        Set of genes that are knocked out\n\n    Returns\n    -------\n    bool\n        True if the gene reaction rule is true with the given knockouts\n        otherwise false", "id": "f15904:m1"}
{"signature": "def ast2str(expr, level=<NUM_LIT:0>, names=None):", "body": "if isinstance(expr, Expression):<EOL><INDENT>return ast2str(expr.body, <NUM_LIT:0>, names)if hasattr(expr, \"<STR_LIT:body>\") else \"<STR_LIT>\"<EOL><DEDENT>elif isinstance(expr, Name):<EOL><INDENT>return names.get(expr.id, expr.id) if names else expr.id<EOL><DEDENT>elif isinstance(expr, BoolOp):<EOL><INDENT>op = expr.op<EOL>if isinstance(op, Or):<EOL><INDENT>str_exp = \"<STR_LIT>\".join(ast2str(i, level + <NUM_LIT:1>, names)<EOL>for i in expr.values)<EOL><DEDENT>elif isinstance(op, And):<EOL><INDENT>str_exp = \"<STR_LIT>\".join(ast2str(i, level + <NUM_LIT:1>, names)<EOL>for i in expr.values)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" + op.__class__.__name)<EOL><DEDENT>return \"<STR_LIT:(>\" + str_exp + \"<STR_LIT:)>\" if level else str_exp<EOL><DEDENT>elif expr is None:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" + repr(expr))<EOL><DEDENT>", "docstring": "convert compiled ast to gene_reaction_rule str\n\n    Parameters\n    ----------\n    expr : str\n        string for a gene reaction rule, e.g \"a and b\"\n    level : int\n        internal use only\n    names : dict\n        Dict where each element id a gene identifier and the value is the\n        gene name. Use this to get a rule str which uses names instead. This\n        should be done for display purposes only. All gene_reaction_rule\n        strings which are computed with should use the id.\n\n    Returns\n    ------\n    string\n        The gene reaction rule", "id": "f15904:m0"}
{"signature": "def knock_out(self):", "body": "self.functional = False<EOL>for reaction in self.reactions:<EOL><INDENT>if not reaction.functional:<EOL><INDENT>reaction.bounds = (<NUM_LIT:0>, <NUM_LIT:0>)<EOL><DEDENT><DEDENT>", "docstring": "Knockout gene by marking it as non-functional and setting all\n        associated reactions bounds to zero.\n\n        The change is reverted upon exit if executed within the model as\n        context.", "id": "f15904:c1:m3"}
{"signature": "def __repr__(self):", "body": "if self.status != \"<STR_LIT>\":<EOL><INDENT>return \"<STR_LIT>\".format(<EOL>self.status, id(self))<EOL><DEDENT>return \"<STR_LIT>\".format(<EOL>self.f, id(self))<EOL>", "docstring": "String representation of the solution instance.", "id": "f15905:c1:m1"}
{"signature": "def __init__(self, f, x=None, x_dict=None, y=None, y_dict=None,<EOL>solver=None, the_time=<NUM_LIT:0>, status='<STR_LIT>', **kwargs):", "body": "super(LegacySolution, self).__init__(**kwargs)<EOL>self.solver = solver<EOL>self.f = f<EOL>self.x = x<EOL>self.x_dict = x_dict<EOL>self.status = status<EOL>self.y = y<EOL>self.y_dict = y_dict<EOL>", "docstring": "Initialize a `LegacySolution` from an objective value.\n\nParameters\n----------\nf : float\n    Objective value.\nsolver : str, optional\n    A string indicating which solver package was used.\nx : iterable, optional\n    List or Array of the fluxes (primal values).\nx_dict : dict, optional\n    A dictionary of reaction IDs that maps to the respective primal\n    values.\ny : iterable, optional\n    List or Array of the dual values.\ny_dict : dict, optional\n    A dictionary of reaction IDs that maps to the respective dual\n    values.\nthe_time : int, optional\nstatus : str, optional\n\n.. warning :: deprecated", "id": "f15905:c1:m0"}
{"signature": "def compare_models(model_1, model_2):", "body": "assert len(model_1.reactions) == len(model_2.reactions)<EOL>assert len(model_1.metabolites) == len(model_2.metabolites)<EOL>assert len(model_1.genes) == len(model_2.genes)<EOL>assert model_1.objective.direction == model_2.objective.direction<EOL>for attr in (\"<STR_LIT:id>\", \"<STR_LIT:name>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>assert getattr(model_1.reactions[<NUM_LIT:0>], attr) == getattr(<EOL>model_2.reactions[<NUM_LIT:0>], attr)<EOL>assert getattr(model_1.reactions[<NUM_LIT:5>], attr) == getattr(<EOL>model_2.reactions[<NUM_LIT:5>], attr)<EOL>assert getattr(model_1.reactions[-<NUM_LIT:1>], attr) == getattr(<EOL>model_2.reactions[-<NUM_LIT:1>], attr)<EOL><DEDENT>for attr in (\"<STR_LIT:id>\", \"<STR_LIT:name>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>assert getattr(model_1.metabolites[<NUM_LIT:0>], attr) == getattr(<EOL>model_2.metabolites[<NUM_LIT:0>], attr)<EOL>assert getattr(model_1.metabolites[<NUM_LIT:5>], attr) == getattr(<EOL>model_2.metabolites[<NUM_LIT:5>], attr)<EOL>assert getattr(model_1.metabolites[-<NUM_LIT:1>], attr) == getattr(<EOL>model_2.metabolites[-<NUM_LIT:1>], attr)<EOL>assert len(model_1.reactions[<NUM_LIT:0>].metabolites) == len(<EOL>model_2.reactions[<NUM_LIT:0>].metabolites)<EOL><DEDENT>assert len(model_1.reactions[<NUM_LIT:8>].metabolites) == len(<EOL>model_2.reactions[<NUM_LIT:8>].metabolites)<EOL>assert len(model_1.reactions[-<NUM_LIT:1>].metabolites) == len(<EOL>model_2.reactions[-<NUM_LIT:1>].metabolites)<EOL>assert len(model_1.genes) == len(model_2.genes)<EOL>solution_1 = model_1.optimize()<EOL>solution_2 = model_2.optimize()<EOL>assert abs(solution_1.objective_value -<EOL>solution_2.objective_value) == pytest.approx(<NUM_LIT:0.0>)<EOL>assert model_1.metabolites[<NUM_LIT:0>]._model is model_1<EOL>assert model_2.metabolites[<NUM_LIT:0>]._model is model_2<EOL>assert model_1.reactions[<NUM_LIT:0>]._model is model_1<EOL>assert model_2.reactions[<NUM_LIT:0>]._model is model_2<EOL>assert model_1.genes[<NUM_LIT:0>]._model is model_1<EOL>assert model_2.genes[<NUM_LIT:0>]._model is model_2<EOL>", "docstring": "Compare two models (only for testing purposes).", "id": "f15942:m1"}
{"signature": "def _check_sbml_annotations(model):", "body": "assert model is not None<EOL>annotation = model.annotation<EOL>assert annotation is not None<EOL>assert len(annotation) == <NUM_LIT:3><EOL>for key in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>assert key in annotation<EOL><DEDENT>assert annotation[\"<STR_LIT>\"] == \"<STR_LIT>\"<EOL>assert annotation[\"<STR_LIT>\"] == \"<STR_LIT>\"<EOL>assert annotation[\"<STR_LIT>\"] == \"<STR_LIT>\"<EOL>annotation = model.genes.G1.annotation<EOL>assert len(annotation) == <NUM_LIT:5><EOL>for key in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>assert key in annotation<EOL><DEDENT>assert annotation[\"<STR_LIT>\"] == \"<STR_LIT>\"<EOL>assert annotation[\"<STR_LIT>\"] == \"<STR_LIT>\"<EOL>assert annotation[\"<STR_LIT>\"] == \"<STR_LIT>\"<EOL>assert annotation[\"<STR_LIT>\"] == \"<STR_LIT>\"<EOL>assert annotation[\"<STR_LIT>\"] == \"<STR_LIT>\"<EOL>annotation = model.metabolites.A.annotation<EOL>for key in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>assert key in annotation<EOL><DEDENT>assert annotation[<EOL>\"<STR_LIT>\"] == \"<STR_LIT>\"  <EOL>annotation = model.reactions.R1.annotation<EOL>for key in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>assert key in annotation<EOL><DEDENT>assert annotation[\"<STR_LIT>\"] == '<STR_LIT>'<EOL>", "docstring": "Checks the annotations from the annotation.xml.", "id": "f15946:m0"}
{"signature": "def reaction_elements(reaction):", "body": "c_elements = [coeff * met.elements.get('<STR_LIT:C>', <NUM_LIT:0>)<EOL>for met, coeff in iteritems(reaction.metabolites)]<EOL>return [elem for elem in c_elements if elem != <NUM_LIT:0>]<EOL>", "docstring": "Split metabolites into the atoms times their stoichiometric coefficients.\n\nParameters\n----------\nreaction : Reaction\n    The metabolic reaction whose components are desired.\n\nReturns\n-------\nlist\n    Each of the reaction's metabolites' desired carbon elements (if any)\n    times that metabolite's stoichiometric coefficient.", "id": "f15948:m3"}
{"signature": "def total_components_flux(flux, components, consumption=True):", "body": "direction = <NUM_LIT:1> if consumption else -<NUM_LIT:1><EOL>c_flux = [elem * flux * direction for elem in components]<EOL>return sum([flux for flux in c_flux if flux > <NUM_LIT:0>])<EOL>", "docstring": "Compute the total components consumption or production flux.\n\nParameters\n----------\nflux : float\n    The reaction flux for the components.\ncomponents : list\n    List of stoichiometrically weighted components.\nconsumption : bool, optional\n    Whether to sum up consumption or production fluxes.", "id": "f15948:m5"}
{"signature": "def production_envelope(model, reactions, objective=None, carbon_sources=None,<EOL>points=<NUM_LIT:20>, threshold=None):", "body": "reactions = model.reactions.get_by_any(reactions)<EOL>objective = model.solver.objective if objective is None else objective<EOL>data = dict()<EOL>if carbon_sources is None:<EOL><INDENT>c_input = find_carbon_sources(model)<EOL><DEDENT>else:<EOL><INDENT>c_input = model.reactions.get_by_any(carbon_sources)<EOL><DEDENT>if c_input is None:<EOL><INDENT>data['<STR_LIT>'] = None<EOL><DEDENT>elif hasattr(c_input, '<STR_LIT:id>'):<EOL><INDENT>data['<STR_LIT>'] = c_input.id<EOL><DEDENT>else:<EOL><INDENT>data['<STR_LIT>'] = '<STR_LIT:U+002CU+0020>'.join(rxn.id for rxn in c_input)<EOL><DEDENT>threshold = normalize_cutoff(model, threshold)<EOL>size = points ** len(reactions)<EOL>for direction in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>data['<STR_LIT>'.format(direction)] = full(size, nan, dtype=float)<EOL>data['<STR_LIT>'.format(direction)] = full(<EOL>size, nan, dtype=float)<EOL>data['<STR_LIT>'.format(direction)] = full(<EOL>size, nan, dtype=float)<EOL><DEDENT>grid = pd.DataFrame(data)<EOL>with model:<EOL><INDENT>model.objective = objective<EOL>objective_reactions = list(sutil.linear_reaction_coefficients(model))<EOL>if len(objective_reactions) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>c_output = objective_reactions[<NUM_LIT:0>]<EOL>min_max = fva(model, reactions, fraction_of_optimum=<NUM_LIT:0>)<EOL>min_max[min_max.abs() < threshold] = <NUM_LIT:0.0><EOL>points = list(product(*[<EOL>linspace(min_max.at[rxn.id, \"<STR_LIT>\"],<EOL>min_max.at[rxn.id, \"<STR_LIT>\"],<EOL>points, endpoint=True) for rxn in reactions]))<EOL>tmp = pd.DataFrame(points, columns=[rxn.id for rxn in reactions])<EOL>grid = pd.concat([grid, tmp], axis=<NUM_LIT:1>, copy=False)<EOL>add_envelope(model, reactions, grid, c_input, c_output, threshold)<EOL><DEDENT>return grid<EOL>", "docstring": "Calculate the objective value conditioned on all combinations of\n    fluxes for a set of chosen reactions\n\n    The production envelope can be used to analyze a model's ability to\n    produce a given compound conditional on the fluxes for another set of\n    reactions, such as the uptake rates. The model is alternately optimized\n    with respect to minimizing and maximizing the objective and the\n    obtained fluxes are recorded. Ranges to compute production is set to the\n    effective\n    bounds, i.e., the minimum / maximum fluxes that can be obtained given\n    current reaction bounds.\n\n    Parameters\n    ----------\n    model : cobra.Model\n        The model to compute the production envelope for.\n    reactions : list or string\n        A list of reactions, reaction identifiers or a single reaction.\n    objective : string, dict, model.solver.interface.Objective, optional\n        The objective (reaction) to use for the production envelope. Use the\n        model's current objective if left missing.\n    carbon_sources : list or string, optional\n       One or more reactions or reaction identifiers that are the source of\n       carbon for computing carbon (mol carbon in output over mol carbon in\n       input) and mass yield (gram product over gram output). Only objectives\n       with a carbon containing input and output metabolite is supported.\n       Will identify active carbon sources in the medium if none are specified.\n    points : int, optional\n       The number of points to calculate production for.\n    threshold : float, optional\n        A cut-off under which flux values will be considered to be zero\n        (default model.tolerance).\n\n    Returns\n    -------\n    pandas.DataFrame\n        A data frame with one row per evaluated point and\n\n        - reaction id : one column per input reaction indicating the flux at\n          each given point,\n        - carbon_source: identifiers of carbon exchange reactions\n\n        A column for the maximum and minimum each for the following types:\n\n        - flux: the objective flux\n        - carbon_yield: if carbon source is defined and the product is a\n          single metabolite (mol carbon product per mol carbon feeding source)\n        - mass_yield: if carbon source is defined and the product is a\n          single metabolite (gram product per 1 g of feeding source)\n\n    Examples\n    --------\n    >>> import cobra.test\n    >>> from cobra.flux_analysis import production_envelope\n    >>> model = cobra.test.create_test_model(\"textbook\")\n    >>> production_envelope(model, [\"EX_glc__D_e\", \"EX_o2_e\"])", "id": "f15948:m0"}
{"signature": "def reaction_weight(reaction):", "body": "if len(reaction.metabolites) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>met, coeff = next(iteritems(reaction.metabolites))<EOL>return [coeff * met.formula_weight]<EOL>", "docstring": "Return the metabolite weight times its stoichiometric coefficient.", "id": "f15948:m4"}
{"signature": "def assess(model, reaction, flux_coefficient_cutoff=<NUM_LIT>, solver=None):", "body": "reaction = model.reactions.get_by_any(reaction)[<NUM_LIT:0>]<EOL>with model as m:<EOL><INDENT>m.objective = reaction<EOL>if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>results = dict()<EOL>results['<STR_LIT>'] = assess_component(<EOL>model, reaction, '<STR_LIT>', flux_coefficient_cutoff)<EOL>results['<STR_LIT>'] = assess_component(<EOL>model, reaction, '<STR_LIT>', flux_coefficient_cutoff)<EOL>return results<EOL><DEDENT><DEDENT>", "docstring": "Assesses production capacity.\n\n    Assesses the capacity of the model to produce the precursors for the\n    reaction and absorb the production of the reaction while the reaction is\n    operating at, or above, the specified cutoff.\n\n    Parameters\n    ----------\n    model : cobra.Model\n        The cobra model to assess production capacity for\n\n    reaction : reaction identifier or cobra.Reaction\n        The reaction to assess\n\n    flux_coefficient_cutoff :  float\n        The minimum flux that reaction must carry to be considered active.\n\n    solver : basestring\n        Solver name. If None, the default solver will be used.\n\n    Returns\n    -------\n    bool or dict\n        True if the model can produce the precursors and absorb the products\n        for the reaction operating at, or above, flux_coefficient_cutoff.\n        Otherwise, a dictionary of {'precursor': Status, 'product': Status}.\n        Where Status is the results from assess_precursors and\n        assess_products, respectively.", "id": "f15949:m0"}
{"signature": "def geometric_fba(model, epsilon=<NUM_LIT>, max_tries=<NUM_LIT:200>, processes=None):", "body": "with model:<EOL><INDENT>consts = []<EOL>obj_vars = []<EOL>updating_vars_cons = []<EOL>prob = model.problem<EOL>add_pfba(model)  <EOL>model.optimize()<EOL>fva_sol = flux_variability_analysis(model, processes=processes)<EOL>mean_flux = (fva_sol[\"<STR_LIT>\"] + fva_sol[\"<STR_LIT>\"]).abs() / <NUM_LIT:2><EOL>for rxn in model.reactions:<EOL><INDENT>var = prob.Variable(\"<STR_LIT>\" + rxn.id,<EOL>lb=<NUM_LIT:0>,<EOL>ub=mean_flux[rxn.id])<EOL>upper_const = prob.Constraint(rxn.flux_expression - var,<EOL>ub=mean_flux[rxn.id],<EOL>name=\"<STR_LIT>\" +<EOL>rxn.id)<EOL>lower_const = prob.Constraint(rxn.flux_expression + var,<EOL>lb=fva_sol.at[rxn.id, \"<STR_LIT>\"],<EOL>name=\"<STR_LIT>\" +<EOL>rxn.id)<EOL>updating_vars_cons.append((rxn.id, var, upper_const, lower_const))<EOL>consts.extend([var, upper_const, lower_const])<EOL>obj_vars.append(var)<EOL><DEDENT>model.add_cons_vars(consts)<EOL>model.objective = prob.Objective(Zero, sloppy=True, direction=\"<STR_LIT>\")<EOL>model.objective.set_linear_coefficients({v: <NUM_LIT:1.0> for v in obj_vars})<EOL>sol = model.optimize()<EOL>fva_sol = flux_variability_analysis(model, processes=processes)<EOL>mean_flux = (fva_sol[\"<STR_LIT>\"] + fva_sol[\"<STR_LIT>\"]).abs() / <NUM_LIT:2><EOL>delta = (fva_sol[\"<STR_LIT>\"] - fva_sol[\"<STR_LIT>\"]).max()<EOL>count = <NUM_LIT:1><EOL>LOGGER.debug(\"<STR_LIT>\",<EOL>count, delta, sol.status)<EOL>while delta > epsilon and count < max_tries:<EOL><INDENT>for rxn_id, var, u_c, l_c in updating_vars_cons:<EOL><INDENT>var.ub = mean_flux[rxn_id]<EOL>u_c.ub = mean_flux[rxn_id]<EOL>l_c.lb = fva_sol.at[rxn_id, \"<STR_LIT>\"]<EOL><DEDENT>sol = model.optimize()<EOL>fva_sol = flux_variability_analysis(model, processes=processes)<EOL>mean_flux = (fva_sol[\"<STR_LIT>\"] + fva_sol[\"<STR_LIT>\"]).abs() / <NUM_LIT:2><EOL>delta = (fva_sol[\"<STR_LIT>\"] - fva_sol[\"<STR_LIT>\"]).max()<EOL>count += <NUM_LIT:1><EOL>LOGGER.debug(\"<STR_LIT>\",<EOL>count, delta, sol.status)<EOL><DEDENT>if count == max_tries:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(max_tries)<EOL>)<EOL><DEDENT><DEDENT>return sol<EOL>", "docstring": "Perform geometric FBA to obtain a unique, centered flux distribution.\n\nGeometric FBA [1]_ formulates the problem as a polyhedron and\nthen solves it by bounding the convex hull of the polyhedron.\nThe bounding forms a box around the convex hull which reduces\nwith every iteration and extracts a unique solution in this way.\n\nParameters\n----------\nmodel: cobra.Model\n    The model to perform geometric FBA on.\nepsilon: float, optional\n    The convergence tolerance of the model (default 1E-06).\nmax_tries: int, optional\n    Maximum number of iterations (default 200).\nprocesses : int, optional\n    The number of parallel processes to run. If not explicitly passed,\n    will be set from the global configuration singleton.\n\nReturns\n-------\ncobra.Solution\n    The solution object containing all the constraints required\n    for geometric FBA.\n\nReferences\n----------\n.. [1] Smallbone, Kieran & Simeonidis, Vangelis. (2009).\n       Flux balance analysis: A geometric perspective.\n       Journal of theoretical biology.258. 311-5.\n       10.1016/j.jtbi.2009.01.027.", "id": "f15950:m0"}
{"signature": "def _init_worker(model, loopless, sense):", "body": "global _model<EOL>global _loopless<EOL>_model = model<EOL>_model.solver.objective.direction = sense<EOL>_loopless = loopless<EOL>", "docstring": "Initialize a global model object for multiprocessing.", "id": "f15951:m0"}
{"signature": "def find_blocked_reactions(model,<EOL>reaction_list=None,<EOL>zero_cutoff=None,<EOL>open_exchanges=False,<EOL>processes=None):", "body": "zero_cutoff = normalize_cutoff(model, zero_cutoff)<EOL>with model:<EOL><INDENT>if open_exchanges:<EOL><INDENT>for reaction in model.exchanges:<EOL><INDENT>reaction.bounds = (min(reaction.lower_bound, -<NUM_LIT:1000>),<EOL>max(reaction.upper_bound, <NUM_LIT:1000>))<EOL><DEDENT><DEDENT>if reaction_list is None:<EOL><INDENT>reaction_list = model.reactions<EOL><DEDENT>model.slim_optimize()<EOL>solution = get_solution(model, reactions=reaction_list)<EOL>reaction_list = solution.fluxes[<EOL>solution.fluxes.abs() < zero_cutoff].index.tolist()<EOL>flux_span = flux_variability_analysis(<EOL>model, fraction_of_optimum=<NUM_LIT:0.>, reaction_list=reaction_list,<EOL>processes=processes<EOL>)<EOL>return flux_span[<EOL>flux_span.abs().max(axis=<NUM_LIT:1>) < zero_cutoff].index.tolist()<EOL><DEDENT>", "docstring": "Find reactions that cannot carry any flux.\n\nThe question whether or not a reaction is blocked is highly dependent\non the current exchange reaction settings for a COBRA model. Hence an\nargument is provided to open all exchange reactions.\n\nNotes\n-----\nSink and demand reactions are left untouched. Please modify them manually.\n\nParameters\n----------\nmodel : cobra.Model\n    The model to analyze.\nreaction_list : list, optional\n    List of reactions to consider, the default includes all model\n    reactions.\nzero_cutoff : float, optional\n    Flux value which is considered to effectively be zero\n    (default model.tolerance).\nopen_exchanges : bool, optional\n    Whether or not to open all exchange reactions to very high flux ranges.\nprocesses : int, optional\n    The number of parallel processes to run. Can speed up the computations\n    if the number of reactions is large. If not explicitly\n    passed, it will be set from the global configuration singleton.\n\nReturns\n-------\nlist\n    List with the identifiers of blocked reactions.", "id": "f15951:m3"}
{"signature": "def find_essential_genes(model, threshold=None, processes=None):", "body": "if threshold is None:<EOL><INDENT>threshold = model.slim_optimize(error_value=None) * <NUM_LIT><EOL><DEDENT>deletions = single_gene_deletion(model, method='<STR_LIT>', processes=processes)<EOL>essential = deletions.loc[deletions['<STR_LIT>'].isna() |<EOL>(deletions['<STR_LIT>'] < threshold), :].index<EOL>return {model.genes.get_by_id(g) for ids in essential for g in ids}<EOL>", "docstring": "Return a set of essential genes.\n\nA gene is considered essential if restricting the flux of all reactions\nthat depend on it to zero causes the objective, e.g., the growth rate,\nto also be zero, below the threshold, or infeasible.\n\nParameters\n----------\nmodel : cobra.Model\n    The model to find the essential genes for.\nthreshold : float, optional\n    Minimal objective flux to be considered viable. By default this is\n    1% of the maximal objective.\nprocesses : int, optional\n    The number of parallel processes to run. If not passed,\n    will be set to the number of CPUs found.\nprocesses : int, optional\n    The number of parallel processes to run. Can speed up the computations\n    if the number of knockouts to perform is large. If not explicitly\n    passed, it will be set from the global configuration singleton.\n\nReturns\n-------\nset\n    Set of essential genes", "id": "f15951:m4"}
{"signature": "def model_summary(model, solution=None, threshold=<NUM_LIT>, fva=None, names=False,<EOL>floatfmt='<STR_LIT>'):", "body": "if names:<EOL><INDENT>emit = attrgetter('<STR_LIT:name>')<EOL><DEDENT>else:<EOL><INDENT>emit = attrgetter('<STR_LIT:id>')<EOL><DEDENT>objective_reactions = linear_reaction_coefficients(model)<EOL>boundary_reactions = model.exchanges<EOL>summary_rxns = set(objective_reactions.keys()).union(boundary_reactions)<EOL>if solution is None:<EOL><INDENT>model.slim_optimize(error_value=None)<EOL>solution = get_solution(model, reactions=summary_rxns)<EOL><DEDENT>obj_fluxes = pd.DataFrame({key: solution[key.id] * value for key,<EOL>value in iteritems(objective_reactions)},<EOL>index=['<STR_LIT>']).T<EOL>obj_fluxes['<STR_LIT:id>'] = obj_fluxes.apply(<EOL>lambda x: format_long_string(x.name.id, <NUM_LIT:15>), <NUM_LIT:1>)<EOL>metabolites = {m for r in boundary_reactions for m in r.metabolites}<EOL>index = sorted(metabolites, key=attrgetter('<STR_LIT:id>'))<EOL>metabolite_fluxes = pd.DataFrame({<EOL>'<STR_LIT:id>': [format_long_string(emit(m), <NUM_LIT:15>) for m in index],<EOL>'<STR_LIT>': zeros(len(index), dtype=float)<EOL>}, index=[m.id for m in index])<EOL>for rxn in boundary_reactions:<EOL><INDENT>for met, stoich in iteritems(rxn.metabolites):<EOL><INDENT>metabolite_fluxes.at[met.id, '<STR_LIT>'] += stoich * solution[rxn.id]<EOL><DEDENT><DEDENT>if fva is not None:<EOL><INDENT>if len(index) != len(boundary_reactions):<EOL><INDENT>LOGGER.warning(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>metabolite_fluxes['<STR_LIT>'] = zeros(len(index), dtype=float)<EOL>metabolite_fluxes['<STR_LIT>'] = zeros(len(index), dtype=float)<EOL>if hasattr(fva, '<STR_LIT>'):<EOL><INDENT>fva_results = fva<EOL><DEDENT>else:<EOL><INDENT>fva_results = flux_variability_analysis(<EOL>model, reaction_list=boundary_reactions,<EOL>fraction_of_optimum=fva)<EOL><DEDENT>for rxn in boundary_reactions:<EOL><INDENT>for met, stoich in iteritems(rxn.metabolites):<EOL><INDENT>fmin = stoich * fva_results.at[rxn.id, '<STR_LIT>']<EOL>fmax = stoich * fva_results.at[rxn.id, '<STR_LIT>']<EOL>if abs(fmin) <= abs(fmax):<EOL><INDENT>metabolite_fluxes.at[met.id, '<STR_LIT>'] += fmin<EOL>metabolite_fluxes.at[met.id, '<STR_LIT>'] += fmax<EOL><DEDENT>else:<EOL><INDENT>metabolite_fluxes.at[met.id, '<STR_LIT>'] += fmax<EOL>metabolite_fluxes.at[met.id, '<STR_LIT>'] += fmin<EOL><DEDENT><DEDENT><DEDENT><DEDENT>metabolite_fluxes = _process_flux_dataframe(<EOL>metabolite_fluxes, fva, threshold, floatfmt)<EOL>def get_str_table(species_df, fva=False):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if fva:<EOL><INDENT>return tabulate(<EOL>species_df.loc[:, ['<STR_LIT:id>', '<STR_LIT>', '<STR_LIT>']].values,<EOL>floatfmt=floatfmt, tablefmt='<STR_LIT>',<EOL>headers=['<STR_LIT:id>', '<STR_LIT>', '<STR_LIT>']).split('<STR_LIT:\\n>')<EOL><DEDENT>else:<EOL><INDENT>return tabulate(species_df.loc[:, ['<STR_LIT:id>', '<STR_LIT>']].values,<EOL>floatfmt=floatfmt, tablefmt='<STR_LIT>').split('<STR_LIT:\\n>')<EOL><DEDENT><DEDENT>in_table = get_str_table(<EOL>metabolite_fluxes[metabolite_fluxes['<STR_LIT>']], fva=fva is not None)<EOL>out_table = get_str_table(<EOL>metabolite_fluxes[~metabolite_fluxes['<STR_LIT>']], fva=fva is not None)<EOL>obj_table = get_str_table(obj_fluxes, fva=False)<EOL>print_(tabulate(<EOL>[entries for entries in zip_longest(in_table, out_table, obj_table)],<EOL>headers=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'], tablefmt='<STR_LIT>'))<EOL>", "docstring": "Print a summary of the input and output fluxes of the model.\n\nParameters\n----------\nsolution: cobra.Solution, optional\n    A previously solved model solution to use for generating the\n    summary. If none provided (default), the summary method will\n    resolve the model. Note that the solution object must match the\n    model, i.e., changes to the model such as changed bounds,\n    added or removed reactions are not taken into account by this\n    method.\nthreshold : float, optional\n    Threshold below which fluxes are not reported.\nfva : pandas.DataFrame, float or None, optional\n    Whether or not to include flux variability analysis in the output.\n    If given, fva should either be a previous FVA solution matching\n    the model or a float between 0 and 1 representing the\n    fraction of the optimum objective to be searched.\nnames : bool, optional\n    Emit reaction and metabolite names rather than identifiers (default\n    False).\nfloatfmt : string, optional\n    Format string for floats (default '.3g').", "id": "f15953:m1"}
{"signature": "def fastcc(model, flux_threshold=<NUM_LIT:1.0>, zero_cutoff=None):", "body": "zero_cutoff = normalize_cutoff(model, zero_cutoff)<EOL>with model:<EOL><INDENT>obj_vars = []<EOL>vars_and_cons = []<EOL>prob = model.problem<EOL>for rxn in model.reactions:<EOL><INDENT>var = prob.Variable(\"<STR_LIT>\".format(rxn.id),<EOL>lb=<NUM_LIT:0.0>, ub=flux_threshold)<EOL>const = prob.Constraint(rxn.forward_variable +<EOL>rxn.reverse_variable -<EOL>var, name=\"<STR_LIT>\".format(rxn.id),<EOL>lb=<NUM_LIT:0.0>)<EOL>vars_and_cons.extend([var, const])<EOL>obj_vars.append(var)<EOL><DEDENT>model.add_cons_vars(vars_and_cons)<EOL>model.objective = prob.Objective(Zero, sloppy=True, direction=\"<STR_LIT>\")<EOL>model.objective.set_linear_coefficients({v: <NUM_LIT:1.0> for v in obj_vars})<EOL>sol = model.optimize()<EOL><DEDENT>rxns_to_remove = sol.fluxes[sol.fluxes.abs() < zero_cutoff].index<EOL>consistent_model = model.copy()<EOL>consistent_model.remove_reactions(rxns_to_remove, remove_orphans=True)<EOL>return consistent_model<EOL>", "docstring": "r\"\"\"\n    Check consistency of a metabolic network using FASTCC [1]_.\n\n    FASTCC (Fast Consistency Check) is an algorithm for rapid and\n    efficient consistency check in metabolic networks. FASTCC is\n    a pure LP implementation and is low on computation resource\n    demand. FASTCC also circumvents the problem associated with\n    reversible reactions for the purpose. Given a global model,\n    it will generate a consistent global model i.e., remove\n    blocked reactions. For more details on FASTCC, please\n    check [1]_.\n\n    Parameters\n    ----------\n    model: cobra.Model\n        The constraint-based model to operate on.\n    flux_threshold: float, optional (default 1.0)\n        The flux threshold to consider.\n    zero_cutoff: float, optional\n        The cutoff to consider for zero flux (default model.tolerance).\n\n    Returns\n    -------\n    cobra.Model\n        The consistent constraint-based model.\n\n    Notes\n    -----\n    The LP used for FASTCC is like so:\n    maximize: \\sum_{i \\in J} z_i\n    s.t.    : z_i \\in [0, \\varepsilon] \\forall i \\in J, z_i \\in \\mathbb{R}_+\n              v_i \\ge z_i \\forall i \\in J\n              Sv = 0 v \\in B\n\n    References\n    ----------\n    .. [1] Vlassis N, Pacheco MP, Sauter T (2014)\n           Fast Reconstruction of Compact Context-Specific Metabolic Network\n           Models.\n           PLoS Comput Biol 10(1): e1003424. doi:10.1371/journal.pcbi.1003424", "id": "f15954:m0"}
{"signature": "def add_room(model, solution=None, linear=False, delta=<NUM_LIT>, epsilon=<NUM_LIT>):", "body": "if '<STR_LIT>' in model.solver.variables:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if solution is None:<EOL><INDENT>solution = pfba(model)<EOL><DEDENT>prob = model.problem<EOL>variable = prob.Variable(\"<STR_LIT>\", ub=solution.objective_value)<EOL>constraint = prob.Constraint(<EOL>model.solver.objective.expression - variable,<EOL>ub=<NUM_LIT:0.0>,<EOL>lb=<NUM_LIT:0.0>,<EOL>name=\"<STR_LIT>\"<EOL>)<EOL>model.objective = prob.Objective(Zero, direction=\"<STR_LIT>\", sloppy=True)<EOL>vars_and_cons = [variable, constraint]<EOL>obj_vars = []<EOL>for rxn in model.reactions:<EOL><INDENT>flux = solution.fluxes[rxn.id]<EOL>if linear:<EOL><INDENT>y = prob.Variable(\"<STR_LIT>\" + rxn.id, lb=<NUM_LIT:0>, ub=<NUM_LIT:1>)<EOL>delta = epsilon = <NUM_LIT:0.0><EOL><DEDENT>else:<EOL><INDENT>y = prob.Variable(\"<STR_LIT>\" + rxn.id, type=\"<STR_LIT>\")<EOL><DEDENT>w_u = flux + (delta * abs(flux)) + epsilon<EOL>upper_const = prob.Constraint(<EOL>rxn.flux_expression - y * (rxn.upper_bound - w_u),<EOL>ub=w_u, name=\"<STR_LIT>\" + rxn.id)<EOL>w_l = flux - (delta * abs(flux)) - epsilon<EOL>lower_const = prob.Constraint(<EOL>rxn.flux_expression - y * (rxn.lower_bound - w_l),<EOL>lb=w_l, name=\"<STR_LIT>\" + rxn.id)<EOL>vars_and_cons.extend([y, upper_const, lower_const])<EOL>obj_vars.append(y)<EOL><DEDENT>model.add_cons_vars(vars_and_cons)<EOL>model.objective.set_linear_coefficients({v: <NUM_LIT:1.0> for v in obj_vars})<EOL>", "docstring": "r\"\"\"\n    Add constraints and objective for ROOM.\n\n    This function adds variables and constraints for applying regulatory\n    on/off minimization (ROOM) to the model.\n\n    Parameters\n    ----------\n    model : cobra.Model\n        The model to add ROOM constraints and objective to.\n    solution : cobra.Solution, optional\n        A previous solution to use as a reference. If no solution is given,\n        one will be computed using pFBA.\n    linear : bool, optional\n        Whether to use the linear ROOM formulation or not (default False).\n    delta: float, optional\n        The relative tolerance range which is additive in nature\n        (default 0.03).\n    epsilon: float, optional\n        The absolute range of tolerance which is multiplicative\n        (default 0.001).\n\n    Notes\n    -----\n    The formulation used here is the same as stated in the original paper [1]_.\n    The mathematical expression is given below:\n\n    minimize \\sum_{i=1}^m y^i\n    s.t. Sv = 0\n         v_min <= v <= v_max\n         v_j = 0\n         j \u2208 A\n         for 1 <= i <= m\n         v_i - y_i(v_{max,i} - w_i^u) <= w_i^u      (1)\n         v_i - y_i(v_{min,i} - w_i^l) <= w_i^l      (2)\n         y_i \u2208 {0,1}                                (3)\n         w_i^u = w_i + \\delta|w_i| + \\epsilon\n         w_i^l = w_i - \\delta|w_i| - \\epsilon\n\n    So, for the linear version of the ROOM , constraint (3) is relaxed to\n    0 <= y_i <= 1.\n\n    See Also\n    --------\n    pfba : parsimonious FBA\n\n    References\n    ----------\n    .. [1] Tomer Shlomi, Omer Berkman and Eytan Ruppin, \"Regulatory on/off\n     minimization of metabolic flux changes after genetic perturbations\",\n     PNAS 2005 102 (21) 7695-7700; doi:10.1073/pnas.0406346102", "id": "f15955:m1"}
{"signature": "def _multi_deletion(model, entity, element_lists, method=\"<STR_LIT>\",<EOL>solution=None, processes=None, **kwargs):", "body": "solver = sutil.interface_to_str(model.problem.__name__)<EOL>if method == \"<STR_LIT>\" and solver not in sutil.qp_solvers:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(solver))<EOL><DEDENT>if processes is None:<EOL><INDENT>processes = CONFIGURATION.processes<EOL><DEDENT>with model:<EOL><INDENT>if \"<STR_LIT>\" in method:<EOL><INDENT>add_moma(model, solution=solution, linear=\"<STR_LIT>\" in method)<EOL><DEDENT>elif \"<STR_LIT>\" in method:<EOL><INDENT>add_room(model, solution=solution, linear=\"<STR_LIT>\" in method,<EOL>**kwargs)<EOL><DEDENT>args = set([frozenset(comb) for comb in product(*element_lists)])<EOL>processes = min(processes, len(args))<EOL>def extract_knockout_results(result_iter):<EOL><INDENT>result = pd.DataFrame([<EOL>(frozenset(ids), growth, status)<EOL>for (ids, growth, status) in result_iter<EOL>], columns=['<STR_LIT>', '<STR_LIT>', '<STR_LIT:status>'])<EOL>result.set_index('<STR_LIT>', inplace=True)<EOL>return result<EOL><DEDENT>if processes > <NUM_LIT:1>:<EOL><INDENT>worker = dict(gene=_gene_deletion_worker,<EOL>reaction=_reaction_deletion_worker)[entity]<EOL>chunk_size = len(args) // processes<EOL>pool = multiprocessing.Pool(<EOL>processes, initializer=_init_worker, initargs=(model,)<EOL>)<EOL>results = extract_knockout_results(pool.imap_unordered(<EOL>worker,<EOL>args,<EOL>chunksize=chunk_size<EOL>))<EOL>pool.close()<EOL>pool.join()<EOL><DEDENT>else:<EOL><INDENT>worker = dict(gene=_gene_deletion,<EOL>reaction=_reaction_deletion)[entity]<EOL>results = extract_knockout_results(map(<EOL>partial(worker, model), args))<EOL><DEDENT>return results<EOL><DEDENT>", "docstring": "Provide a common interface for single or multiple knockouts.\n\nParameters\n----------\nmodel : cobra.Model\n    The metabolic model to perform deletions in.\nentity : 'gene' or 'reaction'\n    The entity to knockout (``cobra.Gene`` or ``cobra.Reaction``).\nelement_lists : list\n    List of iterables ``cobra.Reaction``s or ``cobra.Gene``s (or their IDs)\n    to be deleted.\nmethod: {\"fba\", \"moma\", \"linear moma\", \"room\", \"linear room\"}, optional\n    Method used to predict the growth rate.\nsolution : cobra.Solution, optional\n    A previous solution to use as a reference for (linear) MOMA or ROOM.\nprocesses : int, optional\n    The number of parallel processes to run. Can speed up the computations\n    if the number of knockouts to perform is large. If not passed,\n    will be set to the number of CPUs found.\nkwargs :\n    Passed on to underlying simulation functions.\n\nReturns\n-------\npandas.DataFrame\n    A representation of all combinations of entity deletions. The\n    columns are 'growth' and 'status', where\n\n    index : frozenset([str])\n        The gene or reaction identifiers that were knocked out.\n    growth : float\n        The growth rate of the adjusted model.\n    status : str\n        The solution's status.", "id": "f15957:m7"}
{"signature": "def normalize_cutoff(model, zero_cutoff=None):", "body": "if zero_cutoff is None:<EOL><INDENT>return model.tolerance<EOL><DEDENT>else:<EOL><INDENT>if zero_cutoff < model.tolerance:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>else:<EOL><INDENT>return zero_cutoff<EOL><DEDENT><DEDENT>", "docstring": "Return a valid zero cutoff value.", "id": "f15958:m0"}
{"signature": "def add_loopless(model, zero_cutoff=None):", "body": "zero_cutoff = normalize_cutoff(model, zero_cutoff)<EOL>internal = [i for i, r in enumerate(model.reactions) if not r.boundary]<EOL>s_int = create_stoichiometric_matrix(model)[:, numpy.array(internal)]<EOL>n_int = nullspace(s_int).T<EOL>max_bound = max(max(abs(b) for b in r.bounds) for r in model.reactions)<EOL>prob = model.problem<EOL>to_add = []<EOL>for i in internal:<EOL><INDENT>rxn = model.reactions[i]<EOL>indicator = prob.Variable(\"<STR_LIT>\" + rxn.id, type=\"<STR_LIT>\")<EOL>on_off_constraint = prob.Constraint(<EOL>rxn.flux_expression - max_bound * indicator,<EOL>lb=-max_bound, ub=<NUM_LIT:0>, name=\"<STR_LIT>\" + rxn.id)<EOL>delta_g = prob.Variable(\"<STR_LIT>\" + rxn.id)<EOL>delta_g_range = prob.Constraint(<EOL>delta_g + (max_bound + <NUM_LIT:1>) * indicator,<EOL>lb=<NUM_LIT:1>, ub=max_bound, name=\"<STR_LIT>\" + rxn.id)<EOL>to_add.extend([indicator, on_off_constraint, delta_g, delta_g_range])<EOL><DEDENT>model.add_cons_vars(to_add)<EOL>for i, row in enumerate(n_int):<EOL><INDENT>name = \"<STR_LIT>\" + str(i)<EOL>nullspace_constraint = prob.Constraint(Zero, lb=<NUM_LIT:0>, ub=<NUM_LIT:0>, name=name)<EOL>model.add_cons_vars([nullspace_constraint])<EOL>coefs = {model.variables[<EOL>\"<STR_LIT>\" + model.reactions[ridx].id]: row[i]<EOL>for i, ridx in enumerate(internal) if<EOL>abs(row[i]) > zero_cutoff}<EOL>model.constraints[name].set_linear_coefficients(coefs)<EOL><DEDENT>", "docstring": "Modify a model so all feasible flux distributions are loopless.\n\n    In most cases you probably want to use the much faster `loopless_solution`.\n    May be used in cases where you want to add complex constraints and\n    objecives (for instance quadratic objectives) to the model afterwards\n    or use an approximation of Gibbs free energy directions in you model.\n    Adds variables and constraints to a model which will disallow flux\n    distributions with loops. The used formulation is described in [1]_.\n    This function *will* modify your model.\n\n    Parameters\n    ----------\n    model : cobra.Model\n        The model to which to add the constraints.\n    zero_cutoff : positive float, optional\n        Cutoff used for null space. Coefficients with an absolute value smaller\n        than `zero_cutoff` are considered to be zero (default model.tolerance).\n\n    Returns\n    -------\n    Nothing\n\n    References\n    ----------\n    .. [1] Elimination of thermodynamically infeasible loops in steady-state\n       metabolic models. Schellenberger J, Lewis NE, Palsson BO. Biophys J.\n       2011 Feb 2;100(3):544-53. doi: 10.1016/j.bpj.2010.12.3707. Erratum\n       in: Biophys J. 2011 Mar 2;100(5):1381.", "id": "f15959:m0"}
{"signature": "def add_cons_vars_to_problem(model, what, **kwargs):", "body": "context = get_context(model)<EOL>model.solver.add(what, **kwargs)<EOL>if context:<EOL><INDENT>context(partial(model.solver.remove, what))<EOL><DEDENT>", "docstring": "Add variables and constraints to a Model's solver object.\n\n    Useful for variables and constraints that can not be expressed with\n    reactions and lower/upper bounds. Will integrate with the Model's context\n    manager in order to revert changes upon leaving the context.\n\n    Parameters\n    ----------\n    model : a cobra model\n       The model to which to add the variables and constraints.\n    what : list or tuple of optlang variables or constraints.\n       The variables or constraints to add to the model. Must be of class\n       `model.problem.Variable` or\n       `model.problem.Constraint`.\n    **kwargs : keyword arguments\n        passed to solver.add()", "id": "f15963:m6"}
{"signature": "def interface_to_str(interface):", "body": "if isinstance(interface, ModuleType):<EOL><INDENT>interface = interface.__name__<EOL><DEDENT>return re.sub(r\"<STR_LIT>\", \"<STR_LIT>\", interface)<EOL>", "docstring": "Give a string representation for an optlang interface.\n\n    Parameters\n    ----------\n    interface : string, ModuleType\n        Full name of the interface in optlang or cobra representation.\n        For instance 'optlang.glpk_interface' or 'optlang-glpk'.\n\n    Returns\n    -------\n    string\n       The name of the interface as a string", "id": "f15963:m3"}
{"signature": "def check_solver_status(status, raise_error=False):", "body": "if status == OPTIMAL:<EOL><INDENT>return<EOL><DEDENT>elif (status in has_primals) and not raise_error:<EOL><INDENT>warn(\"<STR_LIT>\".format(status), UserWarning)<EOL><DEDENT>elif status is None:<EOL><INDENT>raise OptimizationError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>raise OptimizationError(\"<STR_LIT>\".format(status))<EOL><DEDENT>", "docstring": "Perform standard checks on a solver's status.", "id": "f15963:m10"}
{"signature": "def get_solver_name(mip=False, qp=False):", "body": "if len(solvers) == <NUM_LIT:0>:<EOL><INDENT>raise SolverNotFound(\"<STR_LIT>\")<EOL><DEDENT>mip_order = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>lp_order = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]<EOL>qp_order = [\"<STR_LIT>\", \"<STR_LIT>\"]<EOL>if mip is False and qp is False:<EOL><INDENT>for solver_name in lp_order:<EOL><INDENT>if solver_name in solvers:<EOL><INDENT>return solver_name<EOL><DEDENT><DEDENT>return list(solvers)[<NUM_LIT:0>]<EOL><DEDENT>elif qp:  <EOL><INDENT>for solver_name in qp_order:<EOL><INDENT>if solver_name in solvers:<EOL><INDENT>return solver_name<EOL><DEDENT><DEDENT>raise SolverNotFound(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>for solver_name in mip_order:<EOL><INDENT>if solver_name in solvers:<EOL><INDENT>return solver_name<EOL><DEDENT><DEDENT><DEDENT>raise SolverNotFound(\"<STR_LIT>\")<EOL>", "docstring": "Select a solver for a given optimization problem.\n\n    Parameters\n    ----------\n    mip : bool\n        Does the solver require mixed integer linear programming capabilities?\n    qp : bool\n        Does the solver require quadratic programming capabilities?\n\n    Returns\n    -------\n    string\n        The name of feasible solver.\n\n    Raises\n    ------\n    SolverNotFound\n        If no suitable solver could be found.", "id": "f15963:m4"}
{"signature": "def add_absolute_expression(model, expression, name=\"<STR_LIT>\", ub=None,<EOL>difference=<NUM_LIT:0>, add=True):", "body": "Components = namedtuple('<STR_LIT>', ['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>'])<EOL>variable = model.problem.Variable(name, lb=<NUM_LIT:0>, ub=ub)<EOL>upper_constraint = model.problem.Constraint(expression - variable,<EOL>ub=difference,<EOL>name=\"<STR_LIT>\" + name),<EOL>lower_constraint = model.problem.Constraint(expression + variable,<EOL>lb=difference,<EOL>name=\"<STR_LIT>\" + name)<EOL>to_add = Components(variable, upper_constraint, lower_constraint)<EOL>if add:<EOL><INDENT>add_cons_vars_to_problem(model, to_add)<EOL><DEDENT>return to_add<EOL>", "docstring": "Add the absolute value of an expression to the model.\n\n    Also defines a variable for the absolute value that can be used in other\n    objectives or constraints.\n\n    Parameters\n    ----------\n    model : a cobra model\n       The model to which to add the absolute expression.\n    expression : A sympy expression\n       Must be a valid expression within the Model's solver object. The\n       absolute value is applied automatically on the expression.\n    name : string\n       The name of the newly created variable.\n    ub : positive float\n       The upper bound for the variable.\n    difference : positive float\n        The difference between the expression and the variable.\n    add : bool\n        Whether to add the variable to the model at once.\n\n    Returns\n    -------\n    namedtuple\n        A named tuple with variable and two constraints (upper_constraint,\n        lower_constraint) describing the new variable and the constraints\n        that assign the absolute value of the expression to it.", "id": "f15963:m8"}
{"signature": "def choose_solver(model, solver=None, qp=False):", "body": "if solver is None:<EOL><INDENT>solver = model.problem<EOL><DEDENT>else:<EOL><INDENT>model.solver = solver<EOL><DEDENT>if qp and interface_to_str(solver) not in qp_solvers:<EOL><INDENT>solver = solvers[get_solver_name(qp=True)]<EOL><DEDENT>return solver<EOL>", "docstring": "Choose a solver given a solver name and model.\n\n    This will choose a solver compatible with the model and required\n    capabilities. Also respects model.solver where it can.\n\n    Parameters\n    ----------\n    model : a cobra model\n        The model for which to choose the solver.\n    solver : str, optional\n        The name of the solver to be used.\n    qp : boolean, optional\n        Whether the solver needs Quadratic Programming capabilities.\n\n    Returns\n    -------\n    solver : an optlang solver interface\n        Returns a valid solver for the problem.\n\n    Raises\n    ------\n    SolverNotFound\n        If no suitable solver could be found.", "id": "f15963:m5"}
{"signature": "def resettable(f):", "body": "def wrapper(self, new_value):<EOL><INDENT>context = get_context(self)<EOL>if context:<EOL><INDENT>old_value = getattr(self, f.__name__)<EOL>if old_value == new_value:<EOL><INDENT>return<EOL><DEDENT>context(partial(f, self, old_value))<EOL><DEDENT>f(self, new_value)<EOL><DEDENT>return wrapper<EOL>", "docstring": "A decorator to simplify the context management of simple object\n    attributes. Gets the value of the attribute prior to setting it, and stores\n    a function to set the value to the old value in the HistoryManager.", "id": "f15964:m1"}
{"signature": "def __call__(self, operation):", "body": "self._history.append(operation)<EOL>", "docstring": "Add the corresponding method to the history stack.\n\n        Parameters\n        ----------\n        operation : `function`\n            A function to be called at a later time", "id": "f15964:c0:m1"}
{"signature": "def get_context(obj):", "body": "try:<EOL><INDENT>return obj._contexts[-<NUM_LIT:1>]<EOL><DEDENT>except (AttributeError, IndexError):<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>return obj._model._contexts[-<NUM_LIT:1>]<EOL><DEDENT>except (AttributeError, IndexError):<EOL><INDENT>pass<EOL><DEDENT>return None<EOL>", "docstring": "Search for a context manager", "id": "f15964:m0"}
{"signature": "def constraint_matrices(model, array_type='<STR_LIT>', include_vars=False,<EOL>zero_tol=<NUM_LIT>):", "body": "if array_type not in ('<STR_LIT>', '<STR_LIT>') and not dok_matrix:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>array_builder = {<EOL>'<STR_LIT>': np.array, '<STR_LIT>': dok_matrix, '<STR_LIT>': lil_matrix,<EOL>'<STR_LIT>': pd.DataFrame,<EOL>}[array_type]<EOL>Problem = namedtuple(\"<STR_LIT>\",<EOL>[\"<STR_LIT>\", \"<STR_LIT:b>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\"])<EOL>equality_rows = []<EOL>inequality_rows = []<EOL>inequality_bounds = []<EOL>b = []<EOL>for const in model.constraints:<EOL><INDENT>lb = -np.inf if const.lb is None else const.lb<EOL>ub = np.inf if const.ub is None else const.ub<EOL>equality = (ub - lb) < zero_tol<EOL>coefs = const.get_linear_coefficients(model.variables)<EOL>coefs = [coefs[v] for v in model.variables]<EOL>if equality:<EOL><INDENT>b.append(lb if abs(lb) > zero_tol else <NUM_LIT:0.0>)<EOL>equality_rows.append(coefs)<EOL><DEDENT>else:<EOL><INDENT>inequality_rows.append(coefs)<EOL>inequality_bounds.append([lb, ub])<EOL><DEDENT><DEDENT>var_bounds = np.array([[v.lb, v.ub] for v in model.variables])<EOL>fixed = var_bounds[:, <NUM_LIT:1>] - var_bounds[:, <NUM_LIT:0>] < zero_tol<EOL>results = Problem(<EOL>equalities=array_builder(equality_rows),<EOL>b=np.array(b),<EOL>inequalities=array_builder(inequality_rows),<EOL>bounds=array_builder(inequality_bounds),<EOL>variable_fixed=np.array(fixed),<EOL>variable_bounds=array_builder(var_bounds))<EOL>return results<EOL>", "docstring": "Create a matrix representation of the problem.\n\n    This is used for alternative solution approaches that do not use optlang.\n    The function will construct the equality matrix, inequality matrix and\n    bounds for the complete problem.\n\n    Notes\n    -----\n    To accomodate non-zero equalities the problem will add the variable\n    \"const_one\" which is a variable that equals one.\n\n    Arguments\n    ---------\n    model : cobra.Model\n        The model from which to obtain the LP problem.\n    array_type : string\n        The type of array to construct. if 'dense', return a standard\n        numpy.array, 'dok', or 'lil' will construct a sparse array using\n        scipy of the corresponding type and 'DataFrame' will give a\n        pandas `DataFrame` with metabolite indices and reaction columns.\n    zero_tol : float\n        The zero tolerance used to judge whether two bounds are the same.\n\n    Returns\n    -------\n    collections.namedtuple\n        A named tuple consisting of 6 matrices and 2 vectors:\n        - \"equalities\" is a matrix S such that S*vars = b. It includes a row\n          for each constraint and one column for each variable.\n        - \"b\" the right side of the equality equation such that S*vars = b.\n        - \"inequalities\" is a matrix M such that lb <= M*vars <= ub.\n          It contains a row for each inequality and as many columns as\n          variables.\n        - \"bounds\" is a compound matrix [lb ub] containing the lower and\n          upper bounds for the inequality constraints in M.\n        - \"variable_fixed\" is a boolean vector indicating whether the variable\n          at that index is fixed (lower bound == upper_bound) and\n          is thus bounded by an equality constraint.\n        - \"variable_bounds\" is a compound matrix [lb ub] containing the\n          lower and upper bounds for all variables.", "id": "f15966:m2"}
{"signature": "def create_stoichiometric_matrix(model, array_type='<STR_LIT>', dtype=None):", "body": "if array_type not in ('<STR_LIT>', '<STR_LIT>') and not dok_matrix:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if dtype is None:<EOL><INDENT>dtype = np.float64<EOL><DEDENT>array_constructor = {<EOL>'<STR_LIT>': np.zeros, '<STR_LIT>': dok_matrix, '<STR_LIT>': lil_matrix,<EOL>'<STR_LIT>': np.zeros,<EOL>}<EOL>n_metabolites = len(model.metabolites)<EOL>n_reactions = len(model.reactions)<EOL>array = array_constructor[array_type]((n_metabolites, n_reactions),<EOL>dtype=dtype)<EOL>m_ind = model.metabolites.index<EOL>r_ind = model.reactions.index<EOL>for reaction in model.reactions:<EOL><INDENT>for metabolite, stoich in iteritems(reaction.metabolites):<EOL><INDENT>array[m_ind(metabolite), r_ind(reaction)] = stoich<EOL><DEDENT><DEDENT>if array_type == '<STR_LIT>':<EOL><INDENT>metabolite_ids = [met.id for met in model.metabolites]<EOL>reaction_ids = [rxn.id for rxn in model.reactions]<EOL>return pd.DataFrame(array, index=metabolite_ids, columns=reaction_ids)<EOL><DEDENT>else:<EOL><INDENT>return array<EOL><DEDENT>", "docstring": "Return a stoichiometric array representation of the given model.\n\n    The the columns represent the reactions and rows represent\n    metabolites. S[i,j] therefore contains the quantity of metabolite `i`\n    produced (negative for consumed) by reaction `j`.\n\n    Parameters\n    ----------\n    model : cobra.Model\n        The cobra model to construct the matrix for.\n    array_type : string\n        The type of array to construct. if 'dense', return a standard\n        numpy.array, 'dok', or 'lil' will construct a sparse array using\n        scipy of the corresponding type and 'DataFrame' will give a\n        pandas `DataFrame` with metabolite indices and reaction columns\n    dtype : data-type\n        The desired data-type for the array. If not given, defaults to float.\n\n    Returns\n    -------\n    matrix of class `dtype`\n        The stoichiometric matrix for the given model.", "id": "f15966:m0"}
{"signature": "def minimal_medium(model, min_objective_value=<NUM_LIT:0.1>, exports=False,<EOL>minimize_components=False, open_exchanges=False):", "body": "exchange_rxns = find_boundary_types(model, \"<STR_LIT>\")<EOL>if isinstance(open_exchanges, bool):<EOL><INDENT>open_bound = <NUM_LIT:1000><EOL><DEDENT>else:<EOL><INDENT>open_bound = open_exchanges<EOL><DEDENT>with model as mod:<EOL><INDENT>if open_exchanges:<EOL><INDENT>LOGGER.debug(\"<STR_LIT>\",<EOL>len(exchange_rxns))<EOL>for rxn in exchange_rxns:<EOL><INDENT>rxn.bounds = (-open_bound, open_bound)<EOL><DEDENT><DEDENT>LOGGER.debug(\"<STR_LIT>\")<EOL>obj_const = mod.problem.Constraint(<EOL>mod.objective.expression, lb=min_objective_value,<EOL>name=\"<STR_LIT>\")<EOL>mod.add_cons_vars([obj_const])<EOL>mod.solver.update()<EOL>mod.objective = Zero<EOL>LOGGER.debug(\"<STR_LIT>\")<EOL>tol = mod.solver.configuration.tolerances.feasibility<EOL>if minimize_components:<EOL><INDENT>add_mip_obj(mod)<EOL>if isinstance(minimize_components, bool):<EOL><INDENT>minimize_components = <NUM_LIT:1><EOL><DEDENT>seen = set()<EOL>best = num_components = mod.slim_optimize()<EOL>if mod.solver.status != OPTIMAL:<EOL><INDENT>LOGGER.warning(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>exclusion = mod.problem.Constraint(Zero, ub=<NUM_LIT:0>)<EOL>mod.add_cons_vars([exclusion])<EOL>mod.solver.update()<EOL>media = []<EOL>for i in range(minimize_components):<EOL><INDENT>LOGGER.info(\"<STR_LIT>\", (i + <NUM_LIT:1>))<EOL>vars = [mod.variables[\"<STR_LIT>\" + s] for s in seen]<EOL>if len(seen) > <NUM_LIT:0>:<EOL><INDENT>exclusion.set_linear_coefficients(<EOL>dict.fromkeys(vars, <NUM_LIT:1>))<EOL>exclusion.ub = best - <NUM_LIT:1><EOL><DEDENT>num_components = mod.slim_optimize()<EOL>if mod.solver.status != OPTIMAL or num_components > best:<EOL><INDENT>break<EOL><DEDENT>medium = _as_medium(exchange_rxns, tol, exports=exports)<EOL>media.append(medium)<EOL>seen.update(medium[medium > <NUM_LIT:0>].index)<EOL><DEDENT>if len(media) > <NUM_LIT:1>:<EOL><INDENT>medium = pd.concat(media, axis=<NUM_LIT:1>, sort=True).fillna(<NUM_LIT:0.0>)<EOL>medium.sort_index(axis=<NUM_LIT:1>, inplace=True)<EOL><DEDENT>else:<EOL><INDENT>medium = media[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>add_linear_obj(mod)<EOL>mod.slim_optimize()<EOL>if mod.solver.status != OPTIMAL:<EOL><INDENT>LOGGER.warning(\"<STR_LIT>\")<EOL>return None<EOL><DEDENT>medium = _as_medium(exchange_rxns, tol, exports=exports)<EOL><DEDENT><DEDENT>return medium<EOL>", "docstring": "Find the minimal growth medium for the model.\n\nFinds the minimal growth medium for the model which allows for\nmodel as well as individual growth. Here, a minimal medium can either\nbe the medium requiring the smallest total import flux or the medium\nrequiring the least components (ergo ingredients), which will be much\nslower due to being a mixed integer problem (MIP).\n\nArguments\n---------\nmodel : cobra.model\n    The model to modify.\nmin_objective_value : positive float or array-like object\n    The minimum growth rate (objective) that has to be achieved.\nexports : boolean\n    Whether to include export fluxes in the returned medium. Defaults to\n    False which will only return import fluxes.\nminimize_components : boolean or positive int\n    Whether to minimize the number of components instead of the total\n    import flux. Might be more intuitive if set to True but may also be\n    slow to calculate for large communities. If set to a number `n` will\n    return up to `n` alternative solutions all with the same number of\n    components.\nopen_exchanges : boolean or number\n    Whether to ignore currently set bounds and make all exchange reactions\n    in the model possible. If set to a number all exchange reactions will\n    be opened with (-number, number) as bounds.\n\nReturns\n-------\npandas.Series, pandas.DataFrame or None\n    A series giving the import flux for each required import\n    reaction and (optionally) the associated export fluxes. All exchange\n    fluxes are oriented into the import reaction e.g. positive fluxes\n    denote imports and negative fluxes exports. If `minimize_components`\n    is a number larger 1 may return a DataFrame where each column is a\n    minimal medium. Returns None if the minimization is infeasible\n    (for instance if min_growth > maximum growth rate).\n\nNotes\n-----\nDue to numerical issues the `minimize_components` option will usually only\nminimize the number of \"large\" import fluxes. Specifically, the detection\nlimit is given by ``integrality_tolerance * max_bound`` where ``max_bound``\nis the largest bound on an import reaction. Thus, if you are interested\nin small import fluxes as well you may have to adjust the integrality\ntolerance at first with\n`model.solver.configuration.tolerances.integrality = 1e-7` for instance.\nHowever, this will be *very* slow for large models especially with GLPK.", "id": "f15967:m3"}
{"signature": "def is_boundary_type(reaction, boundary_type, external_compartment):", "body": "<EOL>sbo_term = reaction.annotation.get(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>if isinstance(sbo_term, list):<EOL><INDENT>sbo_term = sbo_term[<NUM_LIT:0>]<EOL><DEDENT>sbo_term = sbo_term.upper()<EOL>if sbo_term == sbo_terms[boundary_type]:<EOL><INDENT>return True<EOL><DEDENT>if sbo_term in [sbo_terms[k] for k in sbo_terms if k != boundary_type]:<EOL><INDENT>return False<EOL><DEDENT>correct_compartment = external_compartment in reaction.compartments<EOL>if boundary_type != \"<STR_LIT>\":<EOL><INDENT>correct_compartment = not correct_compartment<EOL><DEDENT>rev_type = True<EOL>if boundary_type == \"<STR_LIT>\":<EOL><INDENT>rev_type = not reaction.reversibility<EOL><DEDENT>elif boundary_type == \"<STR_LIT>\":<EOL><INDENT>rev_type = reaction.reversibility<EOL><DEDENT>return (reaction.boundary and not<EOL>any(ex in reaction.id for ex in excludes[boundary_type]) and<EOL>correct_compartment and rev_type)<EOL>", "docstring": "Check whether a reaction is an exchange reaction.\n\n    Arguments\n    ---------\n    reaction : cobra.Reaction\n        The reaction to check.\n    boundary_type : str\n        What boundary type to check for. Must be one of\n        \"exchange\", \"demand\", or \"sink\".\n    external_compartment : str\n        The id for the external compartment.\n\n    Returns\n    -------\n    boolean\n        Whether the reaction looks like the requested type. Might be based\n        on a heuristic.", "id": "f15968:m1"}
{"signature": "def rename_genes(cobra_model, rename_dict):", "body": "recompute_reactions = set()  <EOL>remove_genes = []<EOL>for old_name, new_name in iteritems(rename_dict):<EOL><INDENT>try:<EOL><INDENT>gene_index = cobra_model.genes.index(old_name)<EOL><DEDENT>except ValueError:<EOL><INDENT>gene_index = None<EOL><DEDENT>old_gene_present = gene_index is not None<EOL>new_gene_present = new_name in cobra_model.genes<EOL>if old_gene_present and new_gene_present:<EOL><INDENT>old_gene = cobra_model.genes.get_by_id(old_name)<EOL>if old_gene is not cobra_model.genes.get_by_id(new_name):<EOL><INDENT>remove_genes.append(old_gene)<EOL>recompute_reactions.update(old_gene._reaction)<EOL><DEDENT><DEDENT>elif old_gene_present and not new_gene_present:<EOL><INDENT>gene = cobra_model.genes[gene_index]<EOL>cobra_model.genes._dict.pop(gene.id)  <EOL>gene.id = new_name<EOL>cobra_model.genes[gene_index] = gene<EOL><DEDENT>elif not old_gene_present and new_gene_present:<EOL><INDENT>pass<EOL><DEDENT>else:  <EOL><INDENT>pass<EOL><DEDENT><DEDENT>cobra_model.repair()<EOL>class Renamer(NodeTransformer):<EOL><INDENT>def visit_Name(self, node):<EOL><INDENT>node.id = rename_dict.get(node.id, node.id)<EOL>return node<EOL><DEDENT><DEDENT>gene_renamer = Renamer()<EOL>for rxn, rule in iteritems(get_compiled_gene_reaction_rules(cobra_model)):<EOL><INDENT>if rule is not None:<EOL><INDENT>rxn._gene_reaction_rule = ast2str(gene_renamer.visit(rule))<EOL><DEDENT><DEDENT>for rxn in recompute_reactions:<EOL><INDENT>rxn.gene_reaction_rule = rxn._gene_reaction_rule<EOL><DEDENT>for i in remove_genes:<EOL><INDENT>cobra_model.genes.remove(i)<EOL><DEDENT>", "docstring": "renames genes in a model from the rename_dict", "id": "f15972:m2"}
{"signature": "def _escape_str_id(id_str):", "body": "for c in (\"<STR_LIT:'>\", '<STR_LIT:\">'):<EOL><INDENT>if id_str.startswith(c) and id_str.endswith(c)and id_str.count(c) == <NUM_LIT:2>:<EOL><INDENT>id_str = id_str.strip(c)<EOL><DEDENT><DEDENT>for char, escaped_char in _renames:<EOL><INDENT>id_str = id_str.replace(char, escaped_char)<EOL><DEDENT>return id_str<EOL>", "docstring": "make a single string id SBML compliant", "id": "f15972:m0"}
{"signature": "def add_SBO(model):", "body": "for r in model.reactions:<EOL><INDENT>if r.annotation.get(\"<STR_LIT>\"):<EOL><INDENT>continue<EOL><DEDENT>if len(r.metabolites) != <NUM_LIT:1>:<EOL><INDENT>continue<EOL><DEDENT>met_id = list(r._metabolites)[<NUM_LIT:0>].id<EOL>if r.id.startswith(\"<STR_LIT>\") and r.id == \"<STR_LIT>\" + met_id:<EOL><INDENT>r.annotation[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL><DEDENT>elif r.id.startswith(\"<STR_LIT>\") and r.id == \"<STR_LIT>\" + met_id:<EOL><INDENT>r.annotation[\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>", "docstring": "adds SBO terms for demands and exchanges\n\n    This works for models which follow the standard convention for\n    constructing and naming these reactions.\n\n    The reaction should only contain the single metabolite being exchanged,\n    and the id should be EX_metid or DM_metid", "id": "f15973:m0"}
{"signature": "def undelete_model_genes(cobra_model):", "body": "if cobra_model._trimmed_genes is not None:<EOL><INDENT>for x in cobra_model._trimmed_genes:<EOL><INDENT>x.functional = True<EOL><DEDENT><DEDENT>if cobra_model._trimmed_reactions is not None:<EOL><INDENT>for the_reaction, (lower_bound, upper_bound) incobra_model._trimmed_reactions.items():<EOL><INDENT>the_reaction.lower_bound = lower_bound<EOL>the_reaction.upper_bound = upper_bound<EOL><DEDENT><DEDENT>cobra_model._trimmed_genes = []<EOL>cobra_model._trimmed_reactions = {}<EOL>cobra_model._trimmed = False<EOL>", "docstring": "Undoes the effects of a call to delete_model_genes in place.\n\n    cobra_model:  A cobra.Model which will be modified in place", "id": "f15976:m2"}
{"signature": "def prune_unused_metabolites(cobra_model):", "body": "output_model = cobra_model.copy()<EOL>inactive_metabolites = [m for m in output_model.metabolites<EOL>if len(m.reactions) == <NUM_LIT:0>]<EOL>output_model.remove_metabolites(inactive_metabolites)<EOL>return output_model, inactive_metabolites<EOL>", "docstring": "Remove metabolites that are not involved in any reactions and\n    returns pruned model\n\n    Parameters\n    ----------\n    cobra_model: class:`~cobra.core.Model.Model` object\n        the model to remove unused metabolites from\n\n    Returns\n    -------\n    output_model: class:`~cobra.core.Model.Model` object\n        input model with unused metabolites removed\n    inactive_metabolites: list of class:`~cobra.core.reaction.Reaction`\n        list of metabolites that were removed", "id": "f15976:m0"}
{"signature": "def find_gene_knockout_reactions(cobra_model, gene_list,<EOL>compiled_gene_reaction_rules=None):", "body": "potential_reactions = set()<EOL>for gene in gene_list:<EOL><INDENT>if isinstance(gene, string_types):<EOL><INDENT>gene = cobra_model.genes.get_by_id(gene)<EOL><DEDENT>potential_reactions.update(gene._reaction)<EOL><DEDENT>gene_set = {str(i) for i in gene_list}<EOL>if compiled_gene_reaction_rules is None:<EOL><INDENT>compiled_gene_reaction_rules = {r: parse_gpr(r.gene_reaction_rule)[<NUM_LIT:0>]<EOL>for r in potential_reactions}<EOL><DEDENT>return [r for r in potential_reactions<EOL>if not eval_gpr(compiled_gene_reaction_rules[r], gene_set)]<EOL>", "docstring": "identify reactions which will be disabled when the genes are knocked out\n\n    cobra_model: :class:`~cobra.core.Model.Model`\n\n    gene_list: iterable of :class:`~cobra.core.Gene.Gene`\n\n    compiled_gene_reaction_rules: dict of {reaction_id: compiled_string}\n        If provided, this gives pre-compiled gene_reaction_rule strings.\n        The compiled rule strings can be evaluated much faster. If a rule\n        is not provided, the regular expression evaluation will be used.\n        Because not all gene_reaction_rule strings can be evaluated, this\n        dict must exclude any rules which can not be used with eval.", "id": "f15976:m4"}
{"signature": "def prune_unused_reactions(cobra_model):", "body": "output_model = cobra_model.copy()<EOL>reactions_to_prune = [r for r in output_model.reactions<EOL>if len(r.metabolites) == <NUM_LIT:0>]<EOL>output_model.remove_reactions(reactions_to_prune)<EOL>return output_model, reactions_to_prune<EOL>", "docstring": "Remove reactions with no assigned metabolites, returns pruned model\n\n    Parameters\n    ----------\n    cobra_model: class:`~cobra.core.Model.Model` object\n        the model to remove unused reactions from\n\n    Returns\n    -------\n    output_model: class:`~cobra.core.Model.Model` object\n        input model with unused reactions removed\n    reactions_to_prune: list of class:`~cobra.core.reaction.Reaction`\n        list of reactions that were removed", "id": "f15976:m1"}
{"signature": "def step(sampler, x, delta, fraction=None, tries=<NUM_LIT:0>):", "body": "prob = sampler.problem<EOL>valid = ((np.abs(delta) > sampler.feasibility_tol) &<EOL>np.logical_not(prob.variable_fixed))<EOL>valphas = ((<NUM_LIT:1.0> - sampler.bounds_tol) * prob.variable_bounds -<EOL>x)[:, valid]<EOL>valphas = (valphas / delta[valid]).flatten()<EOL>if prob.bounds.shape[<NUM_LIT:0>] > <NUM_LIT:0>:<EOL><INDENT>ineqs = prob.inequalities.dot(delta)<EOL>valid = np.abs(ineqs) > sampler.feasibility_tol<EOL>balphas = ((<NUM_LIT:1.0> - sampler.bounds_tol) * prob.bounds -<EOL>prob.inequalities.dot(x))[:, valid]<EOL>balphas = (balphas / ineqs[valid]).flatten()<EOL>alphas = np.hstack([valphas, balphas])<EOL><DEDENT>else:<EOL><INDENT>alphas = valphas<EOL><DEDENT>pos_alphas = alphas[alphas > <NUM_LIT:0.0>]<EOL>neg_alphas = alphas[alphas <= <NUM_LIT:0.0>]<EOL>alpha_range = np.array([neg_alphas.max() if len(neg_alphas) > <NUM_LIT:0> else <NUM_LIT:0>,<EOL>pos_alphas.min() if len(pos_alphas) > <NUM_LIT:0> else <NUM_LIT:0>])<EOL>if fraction:<EOL><INDENT>alpha = alpha_range[<NUM_LIT:0>] + fraction * (alpha_range[<NUM_LIT:1>] - alpha_range[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>alpha = np.random.uniform(alpha_range[<NUM_LIT:0>], alpha_range[<NUM_LIT:1>])<EOL><DEDENT>p = x + alpha * delta<EOL>if (np.any(sampler._bounds_dist(p) < -sampler.bounds_tol) or<EOL>np.abs(np.abs(alpha_range).max() * delta).max() <<EOL>sampler.bounds_tol):<EOL><INDENT>if tries > MAX_TRIES:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>LOGGER.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>newdir = sampler.warmup[np.random.randint(sampler.n_warmup)]<EOL>sampler.retries += <NUM_LIT:1><EOL>return step(sampler, sampler.center, newdir - sampler.center, None,<EOL>tries + <NUM_LIT:1>)<EOL><DEDENT>return p<EOL>", "docstring": "Sample a new feasible point from the point `x` in direction `delta`.", "id": "f15977:m1"}
{"signature": "def validate(self, samples):", "body": "samples = np.atleast_2d(samples)<EOL>prob = self.problem<EOL>if samples.shape[<NUM_LIT:1>] == len(self.model.reactions):<EOL><INDENT>S = create_stoichiometric_matrix(self.model)<EOL>b = np.array([self.model.constraints[m.id].lb for m in<EOL>self.model.metabolites])<EOL>bounds = np.array([r.bounds for r in self.model.reactions]).T<EOL><DEDENT>elif samples.shape[<NUM_LIT:1>] == len(self.model.variables):<EOL><INDENT>S = prob.equalities<EOL>b = prob.b<EOL>bounds = prob.variable_bounds<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>feasibility = np.abs(S.dot(samples.T).T - b).max(axis=<NUM_LIT:1>)<EOL>lb_error = (samples - bounds[<NUM_LIT:0>, ]).min(axis=<NUM_LIT:1>)<EOL>ub_error = (bounds[<NUM_LIT:1>, ] - samples).min(axis=<NUM_LIT:1>)<EOL>if (samples.shape[<NUM_LIT:1>] == len(self.model.variables) and<EOL>prob.inequalities.shape[<NUM_LIT:0>]):<EOL><INDENT>consts = prob.inequalities.dot(samples.T)<EOL>lb_error = np.minimum(<EOL>lb_error,<EOL>(consts - prob.bounds[<NUM_LIT:0>, ]).min(axis=<NUM_LIT:1>))<EOL>ub_error = np.minimum(<EOL>ub_error,<EOL>(prob.bounds[<NUM_LIT:1>, ] - consts).min(axis=<NUM_LIT:1>)<EOL>)<EOL><DEDENT>valid = (<EOL>(feasibility < self.feasibility_tol) &<EOL>(lb_error > -self.bounds_tol) &<EOL>(ub_error > -self.bounds_tol))<EOL>codes = np.repeat(\"<STR_LIT>\", valid.shape[<NUM_LIT:0>]).astype(np.dtype((str, <NUM_LIT:3>)))<EOL>codes[valid] = \"<STR_LIT:v>\"<EOL>codes[lb_error <= -self.bounds_tol] = np.char.add(<EOL>codes[lb_error <= -self.bounds_tol], \"<STR_LIT:l>\")<EOL>codes[ub_error <= -self.bounds_tol] = np.char.add(<EOL>codes[ub_error <= -self.bounds_tol], \"<STR_LIT:u>\")<EOL>codes[feasibility > self.feasibility_tol] = np.char.add(<EOL>codes[feasibility > self.feasibility_tol], \"<STR_LIT:e>\")<EOL>return codes<EOL>", "docstring": "Validate a set of samples for equality and inequality feasibility.\n\n        Can be used to check whether the generated samples and warmup points\n        are feasible.\n\n        Parameters\n        ----------\n        samples : numpy.matrix\n            Must be of dimension (n_samples x n_reactions). Contains the\n            samples to be validated. Samples must be from fluxes.\n\n        Returns\n        -------\n        numpy.array\n            A one-dimensional numpy array of length containing\n            a code of 1 to 3 letters denoting the validation result:\n\n            - 'v' means feasible in bounds and equality constraints\n            - 'l' means a lower bound violation\n            - 'u' means a lower bound validation\n            - 'e' means and equality constraint violation", "id": "f15977:c0:m9"}
{"signature": "def batch(self, batch_size, batch_num, fluxes=True):", "body": "for i in range(batch_num):<EOL><INDENT>yield self.sample(batch_size, fluxes=fluxes)<EOL><DEDENT>", "docstring": "Create a batch generator.\n\n        This is useful to generate n batches of m samples each.\n\n        Parameters\n        ----------\n        batch_size : int\n            The number of samples contained in each batch (m).\n        batch_num : int\n            The number of batches in the generator (n).\n        fluxes : boolean\n            Whether to return fluxes or the internal solver variables. If set\n            to False will return a variable for each forward and backward flux\n            as well as all additional variables you might have defined in the\n            model.\n\n        Yields\n        ------\n        pandas.DataFrame\n            A DataFrame with dimensions (batch_size x n_r) containing\n            a valid flux sample for a total of n_r reactions (or variables if\n            fluxes=False) in each row.", "id": "f15977:c0:m8"}
{"signature": "def _bounds_dist(self, p):", "body": "prob = self.problem<EOL>lb_dist = (p - prob.variable_bounds[<NUM_LIT:0>, ]).min()<EOL>ub_dist = (prob.variable_bounds[<NUM_LIT:1>, ] - p).min()<EOL>if prob.bounds.shape[<NUM_LIT:0>] > <NUM_LIT:0>:<EOL><INDENT>const = prob.inequalities.dot(p)<EOL>const_lb_dist = (const - prob.bounds[<NUM_LIT:0>, ]).min()<EOL>const_ub_dist = (prob.bounds[<NUM_LIT:1>, ] - const).min()<EOL>lb_dist = min(lb_dist, const_lb_dist)<EOL>ub_dist = min(ub_dist, const_ub_dist)<EOL><DEDENT>return np.array([lb_dist, ub_dist])<EOL>", "docstring": "Get the lower and upper bound distances. Negative is bad.", "id": "f15977:c0:m6"}
{"signature": "def sample(self, n, fluxes=True):", "body": "samples = np.zeros((n, self.warmup.shape[<NUM_LIT:1>]))<EOL>for i in range(<NUM_LIT:1>, self.thinning * n + <NUM_LIT:1>):<EOL><INDENT>self.__single_iteration()<EOL>if i % self.thinning == <NUM_LIT:0>:<EOL><INDENT>samples[i//self.thinning - <NUM_LIT:1>, ] = self.prev<EOL><DEDENT><DEDENT>if fluxes:<EOL><INDENT>names = [r.id for r in self.model.reactions]<EOL>return pandas.DataFrame(<EOL>samples[:, self.fwd_idx] - samples[:, self.rev_idx],<EOL>columns=names)<EOL><DEDENT>else:<EOL><INDENT>names = [v.name for v in self.model.variables]<EOL>return pandas.DataFrame(samples, columns=names)<EOL><DEDENT>", "docstring": "Generate a set of samples.\n\n        This is the basic sampling function for all hit-and-run samplers.\n\n        Parameters\n        ----------\n        n : int\n            The number of samples that are generated at once.\n        fluxes : boolean\n            Whether to return fluxes or the internal solver variables. If set\n            to False will return a variable for each forward and backward flux\n            as well as all additional variables you might have defined in the\n            model.\n\n        Returns\n        -------\n        numpy.matrix\n            Returns a matrix with `n` rows, each containing a flux sample.\n\n        Notes\n        -----\n        Performance of this function linearly depends on the number\n        of reactions in your model and the thinning factor.", "id": "f15978:c0:m2"}
{"signature": "def __init__(self, model, thinning=<NUM_LIT:100>, nproj=None, seed=None):", "body": "super(ACHRSampler, self).__init__(model, thinning, nproj=nproj,<EOL>seed=seed)<EOL>self.generate_fva_warmup()<EOL>self.prev = self.center = self.warmup.mean(axis=<NUM_LIT:0>)<EOL>np.random.seed(self._seed)<EOL>", "docstring": "Initialize a new ACHRSampler.", "id": "f15978:c0:m0"}
{"signature": "def sample(self, n, fluxes=True):", "body": "if self.processes > <NUM_LIT:1>:<EOL><INDENT>n_process = np.ceil(n / self.processes).astype(int)<EOL>n = n_process * self.processes<EOL>args = list(zip(<EOL>[n_process] * self.processes, range(self.processes)))<EOL>mp = Pool(self.processes, initializer=mp_init, initargs=(self,))<EOL>results = mp.map(_sample_chain, args, chunksize=<NUM_LIT:1>)<EOL>mp.close()<EOL>mp.join()<EOL>chains = np.vstack([r[<NUM_LIT:1>] for r in results])<EOL>self.retries += sum(r[<NUM_LIT:0>] for r in results)<EOL><DEDENT>else:<EOL><INDENT>mp_init(self)<EOL>results = _sample_chain((n, <NUM_LIT:0>))<EOL>chains = results[<NUM_LIT:1>]<EOL><DEDENT>self.center = (self.n_samples * self.center +<EOL>np.atleast_2d(chains).sum(<NUM_LIT:0>)) / (self.n_samples + n)<EOL>self.n_samples += n<EOL>if fluxes:<EOL><INDENT>names = [r.id for r in self.model.reactions]<EOL>return pandas.DataFrame(<EOL>chains[:, self.fwd_idx] - chains[:, self.rev_idx],<EOL>columns=names)<EOL><DEDENT>else:<EOL><INDENT>names = [v.name for v in self.model.variables]<EOL>return pandas.DataFrame(chains, columns=names)<EOL><DEDENT>", "docstring": "Generate a set of samples.\n\n        This is the basic sampling function for all hit-and-run samplers.\n\n        Paramters\n        ---------\n        n : int\n            The minimum number of samples that are generated at once\n            (see Notes).\n        fluxes : boolean\n            Whether to return fluxes or the internal solver variables. If set\n            to False will return a variable for each forward and backward flux\n            as well as all additional variables you might have defined in the\n            model.\n\n        Returns\n        -------\n        numpy.matrix\n            Returns a matrix with `n` rows, each containing a flux sample.\n\n        Notes\n        -----\n        Performance of this function linearly depends on the number\n        of reactions in your model and the thinning factor.\n\n        If the number of processes is larger than one, computation is split\n        across as the CPUs of your machine. This may shorten computation time.\n        However, there is also overhead in setting up parallel computation so\n        we recommend to calculate large numbers of samples at once\n        (`n` > 1000).", "id": "f15979:c0:m1"}
{"signature": "def __getstate__(self):", "body": "d = dict(self.__dict__)<EOL>del d['<STR_LIT>']<EOL>return d<EOL>", "docstring": "Return the object for serialization.", "id": "f15979:c0:m2"}
{"signature": "def __init__(self, model, processes=None, thinning=<NUM_LIT:100>, nproj=None,<EOL>seed=None):", "body": "super(OptGPSampler, self).__init__(model, thinning, seed=seed)<EOL>self.generate_fva_warmup()<EOL>if processes is None:<EOL><INDENT>self.processes = CONFIGURATION.processes<EOL><DEDENT>else:<EOL><INDENT>self.processes = processes<EOL><DEDENT>self.center = shared_np_array((len(model.variables), ),<EOL>self.warmup.mean(axis=<NUM_LIT:0>))<EOL>", "docstring": "Initialize a new OptGPSampler.", "id": "f15979:c0:m0"}
{"signature": "def _sample_chain(args):", "body": "n, idx = args       <EOL>center = sampler.center<EOL>np.random.seed((sampler._seed + idx) % np.iinfo(np.int32).max)<EOL>pi = np.random.randint(sampler.n_warmup)<EOL>prev = sampler.warmup[pi, ]<EOL>prev = step(sampler, center, prev - center, <NUM_LIT>)<EOL>n_samples = max(sampler.n_samples, <NUM_LIT:1>)<EOL>samples = np.zeros((n, center.shape[<NUM_LIT:0>]))<EOL>for i in range(<NUM_LIT:1>, sampler.thinning * n + <NUM_LIT:1>):<EOL><INDENT>pi = np.random.randint(sampler.n_warmup)<EOL>delta = sampler.warmup[pi, ] - center<EOL>prev = step(sampler, prev, delta)<EOL>if sampler.problem.homogeneous and (<EOL>n_samples * sampler.thinning % sampler.nproj == <NUM_LIT:0>):<EOL><INDENT>prev = sampler._reproject(prev)<EOL>center = sampler._reproject(center)<EOL><DEDENT>if i % sampler.thinning == <NUM_LIT:0>:<EOL><INDENT>samples[i//sampler.thinning - <NUM_LIT:1>, ] = prev<EOL><DEDENT>center = ((n_samples * center) / (n_samples + <NUM_LIT:1>) +<EOL>prev / (n_samples + <NUM_LIT:1>))<EOL>n_samples += <NUM_LIT:1><EOL><DEDENT>return (sampler.retries, samples)<EOL>", "docstring": "Sample a single chain for OptGPSampler.\n\n    center and n_samples are updated locally and forgotten afterwards.", "id": "f15979:m1"}
{"signature": "def find_bump(target, tag):", "body": "tmp = tag.split(\"<STR_LIT:.>\")<EOL>existing = [intify(basename(f)) for f in glob(join(target, \"<STR_LIT>\"))]<EOL>latest = max(existing)<EOL>if int(tmp[<NUM_LIT:0>]) > latest[<NUM_LIT:0>]:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>elif int(tmp[<NUM_LIT:1>]) > latest[<NUM_LIT:1>]:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Identify the kind of release by comparing to existing ones.", "id": "f15985:m3"}
{"signature": "def build_hugo_md(filename, tag, bump):", "body": "header = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(date.today().isoformat()),<EOL>'<STR_LIT>'.format(tag),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'.format(bump),<EOL>'<STR_LIT>',<EOL>'<STR_LIT:\\n>'<EOL>]<EOL>with open(filename, \"<STR_LIT:r>\") as file_h:<EOL><INDENT>content = insert_break(file_h.readlines())<EOL><DEDENT>header.extend(content)<EOL>with open(filename, \"<STR_LIT:w>\") as file_h:<EOL><INDENT>file_h.writelines(header)<EOL><DEDENT>", "docstring": "Build the markdown release notes for Hugo.\n\nInserts the required TOML header with specific values and adds a break\nfor long release notes.\n\nParameters\n----------\nfilename : str, path\n    The release notes file.\ntag : str\n    The tag, following semantic versioning, of the current release.\nbump : {\"major\", \"minor\", \"patch\", \"alpha\", \"beta\"}\n    The type of release.", "id": "f15985:m1"}
{"signature": "@disable_for_loaddata<EOL>def handle_user_post_save(sender, **kwargs):  ", "body": "created = kwargs.get(\"<STR_LIT>\", False)<EOL>user_instance = kwargs.get(\"<STR_LIT>\", None)<EOL>if user_instance is None:<EOL><INDENT>return  <EOL><DEDENT>try:<EOL><INDENT>pending_ecu = PendingEnterpriseCustomerUser.objects.get(user_email=user_instance.email)<EOL><DEDENT>except PendingEnterpriseCustomerUser.DoesNotExist:<EOL><INDENT>return  <EOL><DEDENT>if not created:<EOL><INDENT>try:<EOL><INDENT>existing_record = EnterpriseCustomerUser.objects.get(user_id=user_instance.id)<EOL>message_template = \"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>logger.info(message_template.format(<EOL>user=user_instance, enterprise_customer=existing_record.enterprise_customer<EOL>))<EOL>pending_ecu.delete()<EOL>return<EOL><DEDENT>except EnterpriseCustomerUser.DoesNotExist:<EOL><INDENT>pass  <EOL><DEDENT><DEDENT>enterprise_customer_user = EnterpriseCustomerUser.objects.create(<EOL>enterprise_customer=pending_ecu.enterprise_customer,<EOL>user_id=user_instance.id<EOL>)<EOL>pending_enrollments = list(pending_ecu.pendingenrollment_set.all())<EOL>if pending_enrollments:<EOL><INDENT>def _complete_user_enrollment():  <EOL><INDENT>for enrollment in pending_enrollments:<EOL><INDENT>enterprise_customer_user.enroll(<EOL>enrollment.course_id, enrollment.course_mode, cohort=enrollment.cohort_name)<EOL>track_enrollment('<STR_LIT>', user_instance.id, enrollment.course_id)<EOL><DEDENT>pending_ecu.delete()<EOL><DEDENT>transaction.on_commit(_complete_user_enrollment)<EOL><DEDENT>else:<EOL><INDENT>pending_ecu.delete()<EOL><DEDENT>", "docstring": "Handle User model changes - checks if pending enterprise customer user record exists and upgrades it to actual link.\n\nIf there are pending enrollments attached to the PendingEnterpriseCustomerUser, then this signal also takes the\nnewly-created users and enrolls them in the relevant courses.", "id": "f15988:m0"}
{"signature": "@receiver(post_save, sender=EnterpriseCustomerUser)<EOL>def assign_enterprise_learner_role(sender, instance, **kwargs):     ", "body": "if kwargs['<STR_LIT>'] and instance.user:<EOL><INDENT>enterprise_learner_role, __ = SystemWideEnterpriseRole.objects.get_or_create(name=ENTERPRISE_LEARNER_ROLE)<EOL>SystemWideEnterpriseUserRoleAssignment.objects.get_or_create(<EOL>user=instance.user,<EOL>role=enterprise_learner_role<EOL>)<EOL><DEDENT>", "docstring": "Assign an enterprise learner role to EnterpriseCustomerUser whenever a new record is created.", "id": "f15988:m2"}
{"signature": "@receiver(post_delete, sender=EnterpriseCustomerUser)<EOL>def delete_enterprise_learner_role_assignment(sender, instance, **kwargs):     ", "body": "if instance.user:<EOL><INDENT>enterprise_learner_role, __ = SystemWideEnterpriseRole.objects.get_or_create(name=ENTERPRISE_LEARNER_ROLE)<EOL>try:<EOL><INDENT>SystemWideEnterpriseUserRoleAssignment.objects.get(<EOL>user=instance.user,<EOL>role=enterprise_learner_role<EOL>).delete()<EOL><DEDENT>except SystemWideEnterpriseUserRoleAssignment.DoesNotExist:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>", "docstring": "Delete the associated enterprise learner role assignment record when deleting an EnterpriseCustomerUser record.", "id": "f15988:m3"}
{"signature": "@register.filter()<EOL>def only_safe_html(html_text):", "body": "return mark_safe(strip_html_tags(html_text))<EOL>", "docstring": "Django template filter that strips all HTML tags excepts those degined in ALLOWED_TAGS.\n\nGeneral Usage:\n    {{ html_text|only_safe_html }}", "id": "f15989:m5"}
{"signature": "@register.filter(needs_autoescape=True)<EOL>def link_to_modal(link_text, index, autoescape=True):  ", "body": "link = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>index=index,<EOL>link_text=link_text,<EOL>)<EOL>return mark_safe(link)<EOL>", "docstring": "Django template filter that returns an anchor with attributes useful for course modal selection.\n\nGeneral Usage:\n    {{ link_text|link_to_modal:index }}\n\nExamples:\n    {{ course_title|link_to_modal:forloop.counter0 }}\n    {{ course_title|link_to_modal:3 }}\n    {{ view_details_text|link_to_modal:0 }}", "id": "f15989:m4"}
{"signature": "@register.inclusion_tag('<STR_LIT>')<EOL>def fa_icon(message_type):", "body": "return {<EOL>'<STR_LIT>': MESSAGE_ICONS.get(message_type, '<STR_LIT>'.format(message_type))<EOL>}<EOL>", "docstring": "Django template tag that returns font awesome icon depending upon message type.\n\nUsage:\n    {% fa_icon \"success\" %}", "id": "f15989:m0"}
{"signature": "def create_switch(apps, schema_editor):", "body": "Switch = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>Switch.objects.update_or_create(name=ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH, defaults={'<STR_LIT>': False})<EOL>", "docstring": "Create the `role_based_access_control` switch if it does not already exist.", "id": "f16001:m0"}
{"signature": "def delete_switch(apps, schema_editor):", "body": "Switch = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>Switch.objects.filter(name=ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH).delete()<EOL>", "docstring": "Delete the `role_based_access_control` switch.", "id": "f16001:m1"}
{"signature": "def delete_roles(apps, schema_editor):", "body": "EnterpriseFeatureRole = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>EnterpriseFeatureRole.objects.filter(<EOL>name__in=[ENTERPRISE_CATALOG_ADMIN_ROLE, ENTERPRISE_DASHBOARD_ADMIN_ROLE, ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE]<EOL>).delete()<EOL>", "docstring": "Delete the enterprise roles.", "id": "f16002:m1"}
{"signature": "def create_roles(apps, schema_editor):", "body": "SystemWideEnterpriseRole = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>SystemWideEnterpriseRole.objects.update_or_create(name=ENTERPRISE_OPERATOR_ROLE)<EOL>", "docstring": "Create the enterprise roles if they do not already exist.", "id": "f16050:m0"}
{"signature": "def validate_image_extension(value):", "body": "config = get_app_config()<EOL>ext = os.path.splitext(value.name)[<NUM_LIT:1>]<EOL>if config and not ext.lower() in config.valid_image_extensions:<EOL><INDENT>raise ValidationError(_(\"<STR_LIT>\"))<EOL><DEDENT>", "docstring": "Validate that a particular image extension.", "id": "f16058:m1"}
{"signature": "def get_price_text(price, request):", "body": "if waffle.switch_is_active('<STR_LIT>') and get_localized_price_text:<EOL><INDENT>return get_localized_price_text(price, request)<EOL><DEDENT>return format_price(price)<EOL>", "docstring": "Return the localized converted price as string (ex. '$150 USD').\n\nIf the local_currency switch is enabled and the users location has been determined this will convert the\ngiven price based on conversion rate from the Catalog service and return a localized string", "id": "f16059:m2"}
{"signature": "def set_final_prices(self, modes, request):", "body": "result = []<EOL>for mode in modes:<EOL><INDENT>if mode['<STR_LIT>']:<EOL><INDENT>mode['<STR_LIT>'] = EcommerceApiClient(request.user).get_course_final_price(<EOL>mode=mode,<EOL>enterprise_catalog_uuid=request.GET.get(<EOL>'<STR_LIT>'<EOL>) if request.method == '<STR_LIT:GET>' else None,<EOL>)<EOL><DEDENT>result.append(mode)<EOL><DEDENT>return result<EOL>", "docstring": "Set the final discounted price on each premium mode.", "id": "f16059:c3:m0"}
{"signature": "@method_decorator(enterprise_login_required)<EOL><INDENT>def post(self, request, enterprise_uuid, course_id):<DEDENT>", "body": "enterprise_customer, course, course_run, course_modes = self.get_base_details(<EOL>request, enterprise_uuid, course_id<EOL>)<EOL>enterprise_customer_user, __ = EnterpriseCustomerUser.objects.get_or_create(<EOL>enterprise_customer=enterprise_customer,<EOL>user_id=request.user.id<EOL>)<EOL>enterprise_customer_user.update_session(request)<EOL>data_sharing_consent = DataSharingConsent.objects.proxied_get(<EOL>username=enterprise_customer_user.username,<EOL>course_id=course_id,<EOL>enterprise_customer=enterprise_customer<EOL>)<EOL>try:<EOL><INDENT>enterprise_course_enrollment = EnterpriseCourseEnrollment.objects.get(<EOL>enterprise_customer_user__enterprise_customer=enterprise_customer,<EOL>enterprise_customer_user__user_id=request.user.id,<EOL>course_id=course_id<EOL>)<EOL><DEDENT>except EnterpriseCourseEnrollment.DoesNotExist:<EOL><INDENT>enterprise_course_enrollment = None<EOL><DEDENT>enterprise_catalog_uuid = request.POST.get('<STR_LIT>')<EOL>selected_course_mode_name = request.POST.get('<STR_LIT>')<EOL>cohort_name = request.POST.get('<STR_LIT>')<EOL>selected_course_mode = None<EOL>for course_mode in course_modes:<EOL><INDENT>if course_mode['<STR_LIT>'] == selected_course_mode_name:<EOL><INDENT>selected_course_mode = course_mode<EOL>break<EOL><DEDENT><DEDENT>if not selected_course_mode:<EOL><INDENT>return self.get_enterprise_course_enrollment_page(<EOL>request,<EOL>enterprise_customer,<EOL>course,<EOL>course_run,<EOL>course_modes,<EOL>enterprise_course_enrollment,<EOL>data_sharing_consent<EOL>)<EOL><DEDENT>user_consent_needed = get_data_sharing_consent(<EOL>enterprise_customer_user.username,<EOL>enterprise_customer.uuid,<EOL>course_id=course_id<EOL>).consent_required()<EOL>if not selected_course_mode.get('<STR_LIT>') and not user_consent_needed:<EOL><INDENT>if not enterprise_course_enrollment:<EOL><INDENT>enterprise_course_enrollment = EnterpriseCourseEnrollment.objects.create(<EOL>enterprise_customer_user=enterprise_customer_user,<EOL>course_id=course_id,<EOL>)<EOL>track_enrollment('<STR_LIT>', request.user.id, course_id, request.get_full_path())<EOL><DEDENT>client = EnrollmentApiClient()<EOL>client.enroll_user_in_course(<EOL>request.user.username,<EOL>course_id,<EOL>selected_course_mode_name,<EOL>cohort=cohort_name<EOL>)<EOL>return redirect(LMS_COURSEWARE_URL.format(course_id=course_id))<EOL><DEDENT>if user_consent_needed:<EOL><INDENT>query_string_params = {<EOL>'<STR_LIT>': selected_course_mode_name,<EOL>}<EOL>if enterprise_catalog_uuid:<EOL><INDENT>query_string_params.update({'<STR_LIT>': enterprise_catalog_uuid})<EOL><DEDENT>next_url = '<STR_LIT>'.format(<EOL>handle_consent_enrollment_url=reverse(<EOL>'<STR_LIT>', args=[enterprise_customer.uuid, course_id]<EOL>),<EOL>query_string=urlencode(query_string_params)<EOL>)<EOL>failure_url = reverse('<STR_LIT>', args=[enterprise_customer.uuid, course_id])<EOL>if request.META['<STR_LIT>']:<EOL><INDENT>failure_url = '<STR_LIT>'.format(<EOL>course_enrollment_url=reverse(<EOL>'<STR_LIT>', args=[enterprise_customer.uuid, course_id]<EOL>),<EOL>query_string=request.META['<STR_LIT>']<EOL>)<EOL><DEDENT>return redirect(<EOL>'<STR_LIT>'.format(<EOL>grant_data_sharing_url=reverse('<STR_LIT>'),<EOL>params=urlencode(<EOL>{<EOL>'<STR_LIT>': next_url,<EOL>'<STR_LIT>': failure_url,<EOL>'<STR_LIT>': enterprise_customer.uuid,<EOL>'<STR_LIT>': course_id,<EOL>}<EOL>)<EOL>)<EOL>)<EOL><DEDENT>premium_flow = LMS_START_PREMIUM_COURSE_FLOW_URL.format(course_id=course_id)<EOL>if enterprise_catalog_uuid:<EOL><INDENT>premium_flow += '<STR_LIT>'.format(<EOL>catalog_uuid=enterprise_catalog_uuid<EOL>)<EOL><DEDENT>return redirect(premium_flow)<EOL>", "docstring": "Process a submitted track selection form for the enterprise.", "id": "f16059:c3:m4"}
{"signature": "def get_context_from_db(self, consent_page, platform_name, item, context):", "body": "enterprise_customer = consent_page.enterprise_customer<EOL>course_title = context.get('<STR_LIT>', None)<EOL>course_start_date = context.get('<STR_LIT>', None)<EOL>context_data = {<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT>': consent_page.page_title,<EOL>'<STR_LIT>': consent_page.left_sidebar_text.format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>platform_name=platform_name,<EOL>item=item,<EOL>course_title=course_title,<EOL>course_start_date=course_start_date,<EOL>),<EOL>'<STR_LIT>': consent_page.top_paragraph.format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>platform_name=platform_name,<EOL>item=item,<EOL>course_title=course_title,<EOL>course_start_date=course_start_date,<EOL>),<EOL>'<STR_LIT>': consent_page.agreement_text.format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>platform_name=platform_name,<EOL>item=item,<EOL>course_title=course_title,<EOL>course_start_date=course_start_date,<EOL>),<EOL>'<STR_LIT>': consent_page.continue_text,<EOL>'<STR_LIT>': consent_page.abort_text,<EOL>'<STR_LIT>': consent_page.policy_dropdown_header,<EOL>'<STR_LIT>': consent_page.policy_paragraph.format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>platform_name=platform_name,<EOL>item=item,<EOL>course_title=course_title,<EOL>course_start_date=course_start_date,<EOL>),<EOL>'<STR_LIT>': consent_page.confirmation_modal_header.format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>platform_name=platform_name,<EOL>item=item,<EOL>course_title=course_title,<EOL>course_start_date=course_start_date,<EOL>),<EOL>'<STR_LIT>': consent_page.confirmation_modal_text.format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>platform_name=platform_name,<EOL>item=item,<EOL>course_title=course_title,<EOL>course_start_date=course_start_date,<EOL>),<EOL>'<STR_LIT>': consent_page.modal_affirm_decline_text,<EOL>'<STR_LIT>': consent_page.modal_abort_decline_text,<EOL>}<EOL>return context_data<EOL>", "docstring": "Make set of variables(populated from db) that will be used in data sharing consent page.", "id": "f16059:c1:m2"}
{"signature": "def get_enterprise_program_enrollment_page(self, request, enterprise_customer, program_details):", "body": "<EOL>organizations = program_details['<STR_LIT>']<EOL>organization = organizations[<NUM_LIT:0>] if organizations else {}<EOL>platform_name = get_configuration_value('<STR_LIT>', settings.PLATFORM_NAME)<EOL>program_title = program_details['<STR_LIT:title>']<EOL>program_type_details = program_details['<STR_LIT>']<EOL>program_type = program_type_details['<STR_LIT:name>']<EOL>program_courses = program_details['<STR_LIT>']<EOL>course_count = len(program_courses)<EOL>course_count_text = ungettext(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>course_count,<EOL>).format(count=course_count)<EOL>effort_info_text = ungettext_min_max(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>_('<STR_LIT>'),<EOL>program_details.get('<STR_LIT>'),<EOL>program_details.get('<STR_LIT>'),<EOL>)<EOL>length_info_text = ungettext_min_max(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>_('<STR_LIT>'),<EOL>program_details.get('<STR_LIT>'),<EOL>program_details.get('<STR_LIT>'),<EOL>)<EOL>if program_details['<STR_LIT>']:<EOL><INDENT>purchase_action = _('<STR_LIT>')<EOL>item = _('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>purchase_action = _('<STR_LIT>')<EOL>item = _('<STR_LIT>')<EOL><DEDENT>program_data_sharing_consent = get_data_sharing_consent(<EOL>request.user.username,<EOL>enterprise_customer.uuid,<EOL>program_uuid=program_details['<STR_LIT>'],<EOL>)<EOL>if program_data_sharing_consent.exists and not program_data_sharing_consent.granted:<EOL><INDENT>messages.add_consent_declined_message(request, enterprise_customer, program_title)<EOL><DEDENT>discount_data = program_details.get('<STR_LIT>', {})<EOL>one_click_purchase_eligibility = program_details.get('<STR_LIT>', False)<EOL>if not one_click_purchase_eligibility:<EOL><INDENT>messages.add_unenrollable_item_message(request, '<STR_LIT>')<EOL><DEDENT>elif discount_data.get('<STR_LIT>') is None:<EOL><INDENT>messages.add_missing_price_information_message(request, program_title)<EOL><DEDENT>context_data = get_global_context(request, enterprise_customer)<EOL>context_data.update({<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _(\"<STR_LIT>\"),<EOL>'<STR_LIT>': <NUM_LIT:2>,<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>').format(<EOL>platform_name=platform_name,<EOL>program_type=program_type,<EOL>),<EOL>'<STR_LIT>': _('<STR_LIT>').format(<EOL>platform_name=platform_name<EOL>),<EOL>'<STR_LIT>': organization.get('<STR_LIT:name>'),<EOL>'<STR_LIT>': organization.get('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>').format(organization=organization.get('<STR_LIT:name>')),<EOL>'<STR_LIT>': _('<STR_LIT>').format(item=item),<EOL>'<STR_LIT>': program_type_details['<STR_LIT>'].get('<STR_LIT>', {}).get('<STR_LIT:url>', '<STR_LIT>'),<EOL>'<STR_LIT>': program_type,<EOL>'<STR_LIT>': get_program_type_description(program_type),<EOL>'<STR_LIT>': program_title,<EOL>'<STR_LIT>': program_details['<STR_LIT>'],<EOL>'<STR_LIT>': program_details['<STR_LIT>'],<EOL>'<STR_LIT>': get_price_text(discount_data.get('<STR_LIT>', <NUM_LIT:0>), request),<EOL>'<STR_LIT>': get_price_text(discount_data.get('<STR_LIT>', <NUM_LIT:0>), request),<EOL>'<STR_LIT>': discount_data.get('<STR_LIT>', False),<EOL>'<STR_LIT>': program_courses,<EOL>'<STR_LIT>': [<EOL>_('<STR_LIT>'),<EOL>_('<STR_LIT>'),<EOL>],<EOL>'<STR_LIT>': _('<STR_LIT>').format(purchase_action=purchase_action),<EOL>'<STR_LIT>': program_details['<STR_LIT>'],<EOL>'<STR_LIT>': program_details['<STR_LIT>'],<EOL>'<STR_LIT>': course_count_text,<EOL>'<STR_LIT>': length_info_text,<EOL>'<STR_LIT>': effort_info_text,<EOL>'<STR_LIT>': one_click_purchase_eligibility,<EOL>})<EOL>return render(request, '<STR_LIT>', context=context_data)<EOL>", "docstring": "Render Enterprise-specific program enrollment page.", "id": "f16059:c4:m2"}
{"signature": "@method_decorator(enterprise_login_required)<EOL><INDENT>@method_decorator(force_fresh_session)<EOL>def get(self, request, *args, **kwargs):<DEDENT>", "body": "enterprise_customer_uuid, course_run_id, course_key, program_uuid = RouterView.get_path_variables(**kwargs)<EOL>enterprise_customer = get_enterprise_customer_or_404(enterprise_customer_uuid)<EOL>if course_key:<EOL><INDENT>try:<EOL><INDENT>course_run_id = RouterView.get_course_run_id(request.user, enterprise_customer, course_key)<EOL><DEDENT>except Http404:<EOL><INDENT>context_data = get_global_context(request, enterprise_customer)<EOL>error_code = '<STR_LIT>'<EOL>log_message = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>course_key=course_key,<EOL>course_run_id=course_run_id,<EOL>enterprise_customer_uuid=enterprise_customer_uuid,<EOL>error_code=error_code,<EOL>userid=request.user.id,<EOL>program_uuid=program_uuid,<EOL>)<EOL>)<EOL>return render_page_with_error_code_message(request, context_data, error_code, log_message)<EOL><DEDENT>kwargs['<STR_LIT>'] = course_run_id<EOL><DEDENT>with transaction.atomic():<EOL><INDENT>enterprise_customer_user, __ = EnterpriseCustomerUser.objects.get_or_create(<EOL>enterprise_customer=enterprise_customer,<EOL>user_id=request.user.id<EOL>)<EOL>enterprise_customer_user.update_session(request)<EOL><DEDENT>resource_id = course_run_id or program_uuid<EOL>if self.eligible_for_direct_audit_enrollment(request, enterprise_customer, resource_id, course_key):<EOL><INDENT>try:<EOL><INDENT>enterprise_customer_user.enroll(resource_id, '<STR_LIT>', cohort=request.GET.get('<STR_LIT>', None))<EOL>track_enrollment('<STR_LIT>', request.user.id, resource_id, request.get_full_path())<EOL><DEDENT>except (CourseEnrollmentDowngradeError, CourseEnrollmentPermissionError):<EOL><INDENT>pass<EOL><DEDENT>return redirect(LMS_COURSEWARE_URL.format(course_id=resource_id))<EOL><DEDENT>return self.redirect(request, *args, **kwargs)<EOL>", "docstring": "Run some custom GET logic for Enterprise workflows before routing the user through existing views.\n\nIn particular, before routing to existing views:\n- If the requested resource is a course, find the current course run for that course,\n  and make that course run the requested resource instead.\n- Look to see whether a request is eligible for direct audit enrollment, and if so, directly enroll the user.", "id": "f16059:c5:m4"}
{"signature": "def get_base_details(self, request, enterprise_uuid, course_run_id):", "body": "enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)<EOL>enterprise_catalog_uuid = request.GET.get('<STR_LIT>')<EOL>enterprise_catalog = None<EOL>if enterprise_catalog_uuid:<EOL><INDENT>try:<EOL><INDENT>enterprise_catalog_uuid = UUID(enterprise_catalog_uuid)<EOL>enterprise_catalog = enterprise_customer.enterprise_customer_catalogs.get(<EOL>uuid=enterprise_catalog_uuid<EOL>)<EOL><DEDENT>except (ValueError, EnterpriseCustomerCatalog.DoesNotExist):<EOL><INDENT>LOGGER.warning(<EOL>'<STR_LIT>'.format(<EOL>enterprise_catalog_uuid=enterprise_catalog_uuid,<EOL>)<EOL>)<EOL>messages.add_generic_info_message_for_error(request)<EOL><DEDENT><DEDENT>course = None<EOL>course_run = None<EOL>course_modes = []<EOL>if enterprise_catalog:<EOL><INDENT>course, course_run = enterprise_catalog.get_course_and_course_run(course_run_id)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>course, course_run = CourseCatalogApiServiceClient(<EOL>enterprise_customer.site<EOL>).get_course_and_course_run(course_run_id)<EOL><DEDENT>except ImproperlyConfigured:<EOL><INDENT>LOGGER.warning('<STR_LIT>')<EOL>messages.add_generic_info_message_for_error(request)<EOL>return enterprise_customer, course, course_run, course_modes<EOL><DEDENT><DEDENT>if not course or not course_run:<EOL><INDENT>course_id = course['<STR_LIT:key>'] if course else \"<STR_LIT>\"<EOL>course_title = course['<STR_LIT:title>'] if course else \"<STR_LIT>\"<EOL>course_run_title = course_run['<STR_LIT:title>'] if course_run else \"<STR_LIT>\"<EOL>enterprise_catalog_title = enterprise_catalog.title if enterprise_catalog else \"<STR_LIT>\"<EOL>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>course_title=course_title,<EOL>course_id=course_id,<EOL>course_run_title=course_run_title,<EOL>course_run_id=course_run_id,<EOL>enterprise_name=enterprise_customer.name,<EOL>enterprise_uuid=enterprise_customer.uuid,<EOL>enterprise_catalog_title=enterprise_catalog_title,<EOL>enterprise_catalog_uuid=enterprise_catalog_uuid,<EOL>)<EOL>)<EOL>messages.add_generic_info_message_for_error(request)<EOL>return enterprise_customer, course, course_run, course_modes<EOL><DEDENT>if enterprise_catalog_uuid and not enterprise_catalog:<EOL><INDENT>return enterprise_customer, course, course_run, course_modes<EOL><DEDENT>modes = self.get_available_course_modes(request, course_run_id, enterprise_catalog)<EOL>audit_modes = getattr(<EOL>settings,<EOL>'<STR_LIT>',<EOL>['<STR_LIT>', '<STR_LIT>']<EOL>)<EOL>for mode in modes:<EOL><INDENT>if mode['<STR_LIT>']:<EOL><INDENT>price_text = get_price_text(mode['<STR_LIT>'], request)<EOL><DEDENT>else:<EOL><INDENT>price_text = _('<STR_LIT>')<EOL><DEDENT>if mode['<STR_LIT>'] in audit_modes:<EOL><INDENT>description = _('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>description = _('<STR_LIT>')<EOL><DEDENT>course_modes.append({<EOL>'<STR_LIT>': mode['<STR_LIT>'],<EOL>'<STR_LIT>': mode['<STR_LIT>'],<EOL>'<STR_LIT>': mode['<STR_LIT>'],<EOL>'<STR_LIT:title>': mode['<STR_LIT:name>'],<EOL>'<STR_LIT>': price_text,<EOL>'<STR_LIT>': price_text,<EOL>'<STR_LIT:description>': description,<EOL>'<STR_LIT>': mode['<STR_LIT>'] not in audit_modes<EOL>})<EOL><DEDENT>return enterprise_customer, course, course_run, course_modes<EOL>", "docstring": "Retrieve fundamental details used by both POST and GET versions of this view.\n\nSpecifically, take an EnterpriseCustomer UUID and a course run ID, and transform those\ninto an actual EnterpriseCustomer, a set of details about the course, and a list\nof the available course modes for that course run.", "id": "f16059:c3:m2"}
{"signature": "@method_decorator(login_required)<EOL><INDENT>def get(self, request):<DEDENT>", "body": "enterprise_customer_uuid = request.GET.get('<STR_LIT>')<EOL>success_url = request.GET.get('<STR_LIT>')<EOL>failure_url = request.GET.get('<STR_LIT>')<EOL>course_id = request.GET.get('<STR_LIT>', '<STR_LIT>')<EOL>program_uuid = request.GET.get('<STR_LIT>', '<STR_LIT>')<EOL>self.preview_mode = bool(request.GET.get('<STR_LIT>', False))<EOL>enterprise_customer = get_enterprise_customer_or_404(enterprise_customer_uuid)<EOL>context_data = get_global_context(request, enterprise_customer)<EOL>if not self.preview_mode:<EOL><INDENT>if not self.course_or_program_exist(course_id, program_uuid):<EOL><INDENT>error_code = '<STR_LIT>'<EOL>log_message = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>course_id=course_id,<EOL>program_uuid=program_uuid,<EOL>error_code=error_code,<EOL>userid=request.user.id,<EOL>enterprise_customer_uuid=enterprise_customer_uuid,<EOL>)<EOL>)<EOL>return render_page_with_error_code_message(request, context_data, error_code, log_message)<EOL><DEDENT>try:<EOL><INDENT>consent_record = get_data_sharing_consent(<EOL>request.user.username,<EOL>enterprise_customer_uuid,<EOL>program_uuid=program_uuid,<EOL>course_id=course_id<EOL>)<EOL><DEDENT>except NotConnectedToOpenEdX as error:<EOL><INDENT>error_code = '<STR_LIT>'<EOL>log_message = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>userid=request.user.id,<EOL>enterprise_customer_uuid=enterprise_customer_uuid,<EOL>error=error,<EOL>error_code=error_code,<EOL>course_id=course_id,<EOL>)<EOL>)<EOL>return render_page_with_error_code_message(request, context_data, error_code, log_message)<EOL><DEDENT>try:<EOL><INDENT>consent_required = consent_record.consent_required()<EOL><DEDENT>except AttributeError:<EOL><INDENT>consent_required = None<EOL><DEDENT>if consent_record is None or not consent_required:<EOL><INDENT>error_code = '<STR_LIT>'<EOL>log_message = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>userid=request.user.id,<EOL>enterprise_customer_uuid=enterprise_customer_uuid,<EOL>consent_record=consent_record,<EOL>consent_required=consent_required,<EOL>error_code=error_code,<EOL>course_id=course_id,<EOL>)<EOL>)<EOL>return render_page_with_error_code_message(request, context_data, error_code, log_message)<EOL><DEDENT>else:<EOL><INDENT>enterprise_customer = consent_record.enterprise_customer<EOL><DEDENT><DEDENT>elif not request.user.is_staff:<EOL><INDENT>raise PermissionDenied()<EOL><DEDENT>context_data = get_global_context(request, enterprise_customer)<EOL>if not (enterprise_customer_uuid and success_url and failure_url):<EOL><INDENT>error_code = '<STR_LIT>'<EOL>log_message = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>userid=request.user.id,<EOL>enterprise_customer_uuid=enterprise_customer_uuid,<EOL>success_url=success_url,<EOL>failure_url=failure_url,<EOL>error_code=error_code,<EOL>course_id=course_id,<EOL>)<EOL>)<EOL>return render_page_with_error_code_message(request, context_data, error_code, log_message)<EOL><DEDENT>try:<EOL><INDENT>updated_context_dict = self.get_course_or_program_context(<EOL>enterprise_customer,<EOL>course_id=course_id,<EOL>program_uuid=program_uuid<EOL>)<EOL>context_data.update(updated_context_dict)<EOL><DEDENT>except Http404:<EOL><INDENT>error_code = '<STR_LIT>'<EOL>log_message = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>error_code=error_code,<EOL>userid=request.user.id,<EOL>enterprise_customer=enterprise_customer.uuid,<EOL>course_id=course_id,<EOL>)<EOL>)<EOL>return render_page_with_error_code_message(request, context_data, error_code, log_message)<EOL><DEDENT>item = '<STR_LIT>' if course_id else '<STR_LIT>'<EOL>context_data.update({<EOL>'<STR_LIT>': _(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>bold_start='<STR_LIT>',<EOL>bold_end='<STR_LIT>',<EOL>item=item,<EOL>),<EOL>'<STR_LIT>': _(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>bold_start='<STR_LIT>',<EOL>bold_end='<STR_LIT>',<EOL>item=item,<EOL>),<EOL>'<STR_LIT>': success_url,<EOL>'<STR_LIT>': failure_url,<EOL>'<STR_LIT>': request.GET.get('<STR_LIT>') is not None,<EOL>'<STR_LIT>': [<EOL>_('<STR_LIT>').format(item=item),<EOL>_('<STR_LIT>'),<EOL>_('<STR_LIT>'),<EOL>],<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>})<EOL>platform_name = context_data['<STR_LIT>']<EOL>published_only = False if self.preview_mode else True<EOL>enterprise_consent_page = enterprise_customer.get_data_sharing_consent_text_overrides(<EOL>published_only=published_only<EOL>)<EOL>if enterprise_consent_page:<EOL><INDENT>context_data.update(self.get_context_from_db(enterprise_consent_page, platform_name, item, context_data))<EOL><DEDENT>else:<EOL><INDENT>context_data.update(self.get_default_context(enterprise_customer, platform_name))<EOL><DEDENT>return render(request, '<STR_LIT>', context=context_data)<EOL>", "docstring": "Render a form to collect user input about data sharing consent.", "id": "f16059:c1:m4"}
{"signature": "@staticmethod<EOL><INDENT>def get_course_run_id(user, enterprise_customer, course_key):<DEDENT>", "body": "try:<EOL><INDENT>course = CourseCatalogApiServiceClient(enterprise_customer.site).get_course_details(course_key)<EOL><DEDENT>except ImproperlyConfigured:<EOL><INDENT>raise Http404<EOL><DEDENT>users_all_enrolled_courses = EnrollmentApiClient().get_enrolled_courses(user.username)<EOL>users_active_course_runs = get_active_course_runs(<EOL>course,<EOL>users_all_enrolled_courses<EOL>) if users_all_enrolled_courses else []<EOL>course_run = get_current_course_run(course, users_active_course_runs)<EOL>if course_run:<EOL><INDENT>course_run_id = course_run['<STR_LIT:key>']<EOL>return course_run_id<EOL><DEDENT>else:<EOL><INDENT>raise Http404<EOL><DEDENT>", "docstring": "User is requesting a course, we need to translate that into the current course run.\n\n:param user:\n:param enterprise_customer:\n:param course_key:\n:return: course_run_id", "id": "f16059:c5:m1"}
{"signature": "def get_course_or_program_context(self, enterprise_customer, course_id=None, program_uuid=None):", "body": "context_data = {}<EOL>if course_id:<EOL><INDENT>context_data.update({'<STR_LIT>': course_id, '<STR_LIT>': True})<EOL>if not self.preview_mode:<EOL><INDENT>try:<EOL><INDENT>catalog_api_client = CourseCatalogApiServiceClient(enterprise_customer.site)<EOL><DEDENT>except ImproperlyConfigured:<EOL><INDENT>raise Http404<EOL><DEDENT>course_run_details = catalog_api_client.get_course_run(course_id)<EOL>course_start_date = '<STR_LIT>'<EOL>if course_run_details['<STR_LIT:start>']:<EOL><INDENT>course_start_date = parse(course_run_details['<STR_LIT:start>']).strftime('<STR_LIT>')<EOL><DEDENT>context_data.update({<EOL>'<STR_LIT>': course_run_details['<STR_LIT:title>'],<EOL>'<STR_LIT>': course_start_date,<EOL>})<EOL><DEDENT>else:<EOL><INDENT>context_data.update({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': datetime.datetime.now().strftime('<STR_LIT>'),<EOL>})<EOL><DEDENT><DEDENT>else:<EOL><INDENT>context_data.update({<EOL>'<STR_LIT>': program_uuid,<EOL>'<STR_LIT>': True,<EOL>})<EOL><DEDENT>return context_data<EOL>", "docstring": "Return a dict having course or program specific keys for data sharing consent page.", "id": "f16059:c1:m3"}
{"signature": "@method_decorator(enterprise_login_required)<EOL><INDENT>def post(self, request, enterprise_uuid, program_uuid):<DEDENT>", "body": "verify_edx_resources()<EOL>enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)<EOL>with transaction.atomic():<EOL><INDENT>enterprise_customer_user, __ = EnterpriseCustomerUser.objects.get_or_create(<EOL>enterprise_customer=enterprise_customer,<EOL>user_id=request.user.id<EOL>)<EOL>enterprise_customer_user.update_session(request)<EOL><DEDENT>context_data = get_global_context(request, enterprise_customer)<EOL>program_details, error_code = self.get_program_details(request, program_uuid, enterprise_customer)<EOL>if error_code:<EOL><INDENT>return render(<EOL>request,<EOL>ENTERPRISE_GENERAL_ERROR_PAGE,<EOL>context=context_data,<EOL>status=<NUM_LIT>,<EOL>)<EOL><DEDENT>if program_details['<STR_LIT>']:<EOL><INDENT>return redirect(LMS_PROGRAMS_DASHBOARD_URL.format(uuid=program_uuid))<EOL><DEDENT>basket_page = '<STR_LIT>'.format(<EOL>basket_url=BASKET_URL,<EOL>params=urlencode(<EOL>[tuple(['<STR_LIT>', sku]) for sku in program_details['<STR_LIT>']] +<EOL>[tuple(['<STR_LIT>', program_uuid])]<EOL>)<EOL>)<EOL>if get_data_sharing_consent(<EOL>enterprise_customer_user.username,<EOL>enterprise_customer.uuid,<EOL>program_uuid=program_uuid,<EOL>).consent_required():<EOL><INDENT>return redirect(<EOL>'<STR_LIT>'.format(<EOL>grant_data_sharing_url=reverse('<STR_LIT>'),<EOL>params=urlencode(<EOL>{<EOL>'<STR_LIT>': basket_page,<EOL>'<STR_LIT>': reverse(<EOL>'<STR_LIT>',<EOL>args=[enterprise_customer.uuid, program_uuid]<EOL>),<EOL>'<STR_LIT>': enterprise_customer.uuid,<EOL>'<STR_LIT>': program_uuid,<EOL>}<EOL>)<EOL>)<EOL>)<EOL><DEDENT>return redirect(basket_page)<EOL>", "docstring": "Process a submitted track selection form for the enterprise.", "id": "f16059:c4:m4"}
{"signature": "def course_or_program_exist(self, course_id, program_uuid):", "body": "course_exists = course_id and CourseApiClient().get_course_details(course_id)<EOL>program_exists = program_uuid and CourseCatalogApiServiceClient().program_exists(program_uuid)<EOL>return course_exists or program_exists<EOL>", "docstring": "Return whether the input course or program exist.", "id": "f16059:c1:m0"}
{"signature": "def get_default_context(self, enterprise_customer, platform_name):", "body": "context_data = {<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>bold_start='<STR_LIT>',<EOL>bold_end='<STR_LIT>',<EOL>start_link='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>end_link='<STR_LIT>',<EOL>),<EOL>'<STR_LIT>': _(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>platform_name=platform_name,<EOL>),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>enterprise_customer_name=enterprise_customer.name<EOL>),<EOL>'<STR_LIT>': [<EOL>_(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>platform_name=platform_name<EOL>),<EOL>_(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>platform_name=platform_name,<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>),<EOL>_('<STR_LIT>').format(platform_name=platform_name),<EOL>_('<STR_LIT>'),<EOL>_(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>_(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>_(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>_(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>_('<STR_LIT>'),<EOL>_('<STR_LIT>'),<EOL>_('<STR_LIT>'),<EOL>],<EOL>'<STR_LIT>': _(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>platform_name=platform_name,<EOL>),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': [<EOL>_('<STR_LIT>').format(<EOL>enterprise_customer_name=enterprise_customer.name<EOL>),<EOL>],<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>'<STR_LIT>': _('<STR_LIT>').format(<EOL>start_link='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>end_link='<STR_LIT>',<EOL>),<EOL>'<STR_LIT>': _('<STR_LIT>'),<EOL>}<EOL>return context_data<EOL>", "docstring": "Get the set of variables that will populate the template by default.", "id": "f16059:c1:m1"}
{"signature": "def json_serialized_course_modes():", "body": "return json.dumps(COURSE_MODE_SORT_ORDER)<EOL>", "docstring": ":return: serialized course modes.", "id": "f16060:m0"}
{"signature": "def add_consent_declined_message(request, enterprise_customer, item):", "body": "messages.warning(<EOL>request,<EOL>_(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>item=item,<EOL>em_start='<STR_LIT>',<EOL>em_end='<STR_LIT>',<EOL>enterprise_customer_name=enterprise_customer.name,<EOL>link_start='<STR_LIT>'.format(<EOL>support_link=get_configuration_value('<STR_LIT>', settings.ENTERPRISE_SUPPORT_URL),<EOL>),<EOL>platform_name=get_configuration_value('<STR_LIT>', settings.PLATFORM_NAME),<EOL>link_end='<STR_LIT>',<EOL>span_start='<STR_LIT>',<EOL>span_end='<STR_LIT>',<EOL>strong_start='<STR_LIT>',<EOL>strong_end='<STR_LIT>',<EOL>)<EOL>)<EOL>", "docstring": "Add a message to the Django messages store indicating that the user has declined data sharing consent.\n\nArguments:\n    request (HttpRequest): The current request.\n    enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer associated with this request.\n    item (str): A string containing information about the item for which consent was declined.", "id": "f16061:m0"}
{"signature": "def add_unenrollable_item_message(request, item):", "body": "messages.info(<EOL>request,<EOL>_(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>item=item,<EOL>strong_start='<STR_LIT>',<EOL>strong_end='<STR_LIT>',<EOL>span_start='<STR_LIT>',<EOL>span_end='<STR_LIT>',<EOL>)<EOL>)<EOL>", "docstring": "Add a message to the Django message store indicating that the item (i.e. course run, program) is unenrollable.\n\n:param request: The current request.\n:param item: The item that is unenrollable (i.e. a course run).", "id": "f16061:m2"}
{"signature": "def post(self, request, enterprise_customer_uuid):", "body": "transmit_courses_metadata_form = TransmitEnterpriseCoursesForm(request.POST)<EOL>if transmit_courses_metadata_form.is_valid():<EOL><INDENT>channel_worker_username = transmit_courses_metadata_form.cleaned_data['<STR_LIT>']<EOL>call_command(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>', channel_worker_username,<EOL>enterprise_customer=enterprise_customer_uuid<EOL>)<EOL>return HttpResponseRedirect('<STR_LIT>')<EOL><DEDENT>context = self._build_context(request, enterprise_customer_uuid)<EOL>context.update({self.ContextParameters.TRANSMIT_COURSES_METADATA_FORM: transmit_courses_metadata_form})<EOL>return render(request, self.template, context)<EOL>", "docstring": "Handle POST request - handle form submissions.\n\nArguments:\n    request (django.http.request.HttpRequest): Request instance\n    enterprise_customer_uuid (str): Enterprise Customer UUID", "id": "f16062:c1:m3"}
{"signature": "@staticmethod<EOL><INDENT>def get_user_name(request):<DEDENT>", "body": "return request.user.first_name or request.user.username<EOL>", "docstring": "Get a human-readable name for the user.", "id": "f16062:c0:m1"}
{"signature": "def get_enterprise_customer_user_queryset(self, request, search_keyword, customer_uuid, page_size=PAGE_SIZE):", "body": "page = request.GET.get('<STR_LIT>', <NUM_LIT:1>)<EOL>learners = EnterpriseCustomerUser.objects.filter(enterprise_customer__uuid=customer_uuid)<EOL>user_ids = learners.values_list('<STR_LIT>', flat=True)<EOL>matching_users = User.objects.filter(pk__in=user_ids)<EOL>if search_keyword is not None:<EOL><INDENT>matching_users = matching_users.filter(<EOL>Q(email__icontains=search_keyword) | Q(username__icontains=search_keyword)<EOL>)<EOL><DEDENT>matching_user_ids = matching_users.values_list('<STR_LIT>', flat=True)<EOL>learners = learners.filter(user_id__in=matching_user_ids)<EOL>return paginated_list(learners, page, page_size)<EOL>", "docstring": "Get the list of EnterpriseCustomerUsers we want to render.\n\nArguments:\n    request (HttpRequest): HTTP Request instance.\n    search_keyword (str): The keyword to search for in users' email addresses and usernames.\n    customer_uuid (str): A unique identifier to filter down to only users linked to a\n    particular EnterpriseCustomer.\n    page_size (int): Number of learners displayed in each paginated set.", "id": "f16062:c2:m3"}
{"signature": "@classmethod<EOL><INDENT>def is_user_enrolled(cls, user, course_id, course_mode):<DEDENT>", "body": "enrollment_client = EnrollmentApiClient()<EOL>try:<EOL><INDENT>enrollments = enrollment_client.get_course_enrollment(user.username, course_id)<EOL>if enrollments and course_mode == enrollments.get('<STR_LIT>'):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>except HttpClientError as exc:<EOL><INDENT>logging.error(<EOL>'<STR_LIT>',<EOL>dict(user=user.username, message=str(exc))<EOL>)<EOL><DEDENT>except KeyError as exc:<EOL><INDENT>logging.warning(<EOL>'<STR_LIT>',<EOL>dict(user=user.username, message=str(exc))<EOL>)<EOL><DEDENT>return False<EOL>", "docstring": "Query the enrollment API and determine if a learner is enrolled in a given course run track.\n\nArgs:\n    user: The user whose enrollment needs to be checked\n    course_mode: The mode with which the enrollment should be checked\n    course_id: course id of the course where enrollment should be checked.\n\nReturns:\n    Boolean: Whether or not enrollment exists", "id": "f16062:c2:m8"}
{"signature": "@classmethod<EOL><INDENT>def _enroll_users(<EOL>cls,<EOL>request,<EOL>enterprise_customer,<EOL>emails,<EOL>mode,<EOL>course_id=None,<EOL>program_details=None,<EOL>notify=True<EOL>):<DEDENT>", "body": "pending_messages = []<EOL>if course_id:<EOL><INDENT>succeeded, pending, failed = cls.enroll_users_in_course(<EOL>enterprise_customer=enterprise_customer,<EOL>course_id=course_id,<EOL>course_mode=mode,<EOL>emails=emails,<EOL>)<EOL>all_successes = succeeded + pending<EOL>if notify:<EOL><INDENT>enterprise_customer.notify_enrolled_learners(<EOL>catalog_api_user=request.user,<EOL>course_id=course_id,<EOL>users=all_successes,<EOL>)<EOL><DEDENT>if succeeded:<EOL><INDENT>pending_messages.append(cls.get_success_enrollment_message(succeeded, course_id))<EOL><DEDENT>if failed:<EOL><INDENT>pending_messages.append(cls.get_failed_enrollment_message(failed, course_id))<EOL><DEDENT>if pending:<EOL><INDENT>pending_messages.append(cls.get_pending_enrollment_message(pending, course_id))<EOL><DEDENT><DEDENT>if program_details:<EOL><INDENT>succeeded, pending, failed = cls.enroll_users_in_program(<EOL>enterprise_customer=enterprise_customer,<EOL>program_details=program_details,<EOL>course_mode=mode,<EOL>emails=emails,<EOL>)<EOL>all_successes = succeeded + pending<EOL>if notify:<EOL><INDENT>cls.notify_program_learners(<EOL>enterprise_customer=enterprise_customer,<EOL>program_details=program_details,<EOL>users=all_successes<EOL>)<EOL><DEDENT>program_identifier = program_details.get('<STR_LIT:title>', program_details.get('<STR_LIT>', _('<STR_LIT>')))<EOL>if succeeded:<EOL><INDENT>pending_messages.append(cls.get_success_enrollment_message(succeeded, program_identifier))<EOL><DEDENT>if failed:<EOL><INDENT>pending_messages.append(cls.get_failed_enrollment_message(failed, program_identifier))<EOL><DEDENT>if pending:<EOL><INDENT>pending_messages.append(cls.get_pending_enrollment_message(pending, program_identifier))<EOL><DEDENT><DEDENT>cls.send_messages(request, pending_messages)<EOL>", "docstring": "Enroll the users with the given email addresses to the courses specified, either specifically or by program.\n\nArgs:\n    cls (type): The EnterpriseCustomerManageLearnersView class itself\n    request: The HTTP request the enrollment is being created by\n    enterprise_customer: The instance of EnterpriseCustomer whose attached users we're enrolling\n    emails: An iterable of strings containing email addresses to enroll in a course\n    mode: The enrollment mode the users will be enrolled in the course with\n    course_id: The ID of the course in which we want to enroll\n    program_details: Details about a program in which we want to enroll\n    notify: Whether to notify (by email) the users that have been enrolled", "id": "f16062:c2:m17"}
{"signature": "def get(self, request, enterprise_customer_uuid):", "body": "context = self._build_context(request, enterprise_customer_uuid)<EOL>transmit_courses_metadata_form = TransmitEnterpriseCoursesForm()<EOL>context.update({self.ContextParameters.TRANSMIT_COURSES_METADATA_FORM: transmit_courses_metadata_form})<EOL>return render(request, self.template, context)<EOL>", "docstring": "Handle GET request - render \"Transmit courses metadata\" form.\n\nArguments:\n    request (django.http.request.HttpRequest): Request instance\n    enterprise_customer_uuid (str): Enterprise Customer UUID\n\nReturns:\n    django.http.response.HttpResponse: HttpResponse", "id": "f16062:c1:m2"}
{"signature": "@classmethod<EOL><INDENT>def enroll_users_in_program(cls, enterprise_customer, program_details, course_mode, emails, cohort=None):<DEDENT>", "body": "existing_users, unregistered_emails = cls.get_users_by_email(emails)<EOL>course_ids = get_course_runs_from_program(program_details)<EOL>successes = []<EOL>pending = []<EOL>failures = []<EOL>for user in existing_users:<EOL><INDENT>succeeded = cls.enroll_user(enterprise_customer, user, course_mode, *course_ids)<EOL>if succeeded:<EOL><INDENT>successes.append(user)<EOL><DEDENT>else:<EOL><INDENT>failures.append(user)<EOL><DEDENT><DEDENT>for email in unregistered_emails:<EOL><INDENT>pending_user = enterprise_customer.enroll_user_pending_registration(<EOL>email,<EOL>course_mode,<EOL>*course_ids,<EOL>cohort=cohort<EOL>)<EOL>pending.append(pending_user)<EOL><DEDENT>return successes, pending, failures<EOL>", "docstring": "Enroll existing users in all courses in a program, and create pending enrollments for nonexisting users.\n\nArgs:\n    enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment\n    program_details: The details of the program in which we're enrolling\n    course_mode (str): The mode with which we're enrolling in the program\n    emails: An iterable of email addresses which need to be enrolled\n\nReturns:\n    successes: A list of users who were successfully enrolled in all courses of the program\n    pending: A list of PendingEnterpriseCustomerUsers who were successfully linked and had\n        pending enrollments created for them in the database\n    failures: A list of users who could not be enrolled in the program", "id": "f16062:c2:m10"}
{"signature": "@classmethod<EOL><INDENT>def get_failed_enrollment_message(cls, users, enrolled_in):<DEDENT>", "body": "failed_emails = [user.email for user in users]<EOL>return (<EOL>'<STR_LIT:error>',<EOL>_(<EOL>'<STR_LIT>'<EOL>).format(<EOL>enrolled_in=enrolled_in,<EOL>user_list='<STR_LIT:U+002CU+0020>'.join(failed_emails),<EOL>)<EOL>)<EOL>", "docstring": "Create message for the users who were not able to be enrolled in a course or program.\n\nArgs:\n    users: An iterable of users who were not successfully enrolled\n    enrolled_in (str): A string identifier for the course or program with which enrollment was attempted\n\nReturns:\ntuple: A 2-tuple containing a message type and message text", "id": "f16062:c2:m15"}
{"signature": "@classmethod<EOL><INDENT>def enroll_user(cls, enterprise_customer, user, course_mode, *course_ids):<DEDENT>", "body": "enterprise_customer_user, __ = EnterpriseCustomerUser.objects.get_or_create(<EOL>enterprise_customer=enterprise_customer,<EOL>user_id=user.id<EOL>)<EOL>enrollment_client = EnrollmentApiClient()<EOL>succeeded = True<EOL>for course_id in course_ids:<EOL><INDENT>try:<EOL><INDENT>enrollment_client.enroll_user_in_course(user.username, course_id, course_mode)<EOL><DEDENT>except HttpClientError as exc:<EOL><INDENT>if cls.is_user_enrolled(user, course_id, course_mode):<EOL><INDENT>succeeded = True<EOL><DEDENT>else:<EOL><INDENT>succeeded = False<EOL>default_message = '<STR_LIT>'<EOL>try:<EOL><INDENT>error_message = json.loads(exc.content.decode()).get('<STR_LIT:message>', default_message)<EOL><DEDENT>except ValueError:<EOL><INDENT>error_message = default_message<EOL><DEDENT>logging.error(<EOL>'<STR_LIT>',<EOL>dict(user=user.username, message=error_message)<EOL>)<EOL><DEDENT><DEDENT>if succeeded:<EOL><INDENT>__, created = EnterpriseCourseEnrollment.objects.get_or_create(<EOL>enterprise_customer_user=enterprise_customer_user,<EOL>course_id=course_id<EOL>)<EOL>if created:<EOL><INDENT>track_enrollment('<STR_LIT>', user.id, course_id)<EOL><DEDENT><DEDENT><DEDENT>return succeeded<EOL>", "docstring": "Enroll a single user in any number of courses using a particular course mode.\n\nArgs:\n    enterprise_customer: The EnterpriseCustomer which is sponsoring the enrollment\n    user: The user who needs to be enrolled in the course\n    course_mode: The mode with which the enrollment should be created\n    *course_ids: An iterable containing any number of course IDs to eventually enroll the user in.\n\nReturns:\n    Boolean: Whether or not enrollment succeeded for all courses specified", "id": "f16062:c2:m7"}
{"signature": "def _build_context(self, request, customer_uuid):", "body": "<EOL>enterprise_customer = EnterpriseCustomer.objects.get(uuid=customer_uuid)  <EOL>search_keyword = self.get_search_keyword(request)<EOL>linked_learners = self.get_enterprise_customer_user_queryset(request, search_keyword, customer_uuid)<EOL>pending_linked_learners = self.get_pending_users_queryset(search_keyword, customer_uuid)<EOL>context = {<EOL>self.ContextParameters.ENTERPRISE_CUSTOMER: enterprise_customer,<EOL>self.ContextParameters.PENDING_LEARNERS: pending_linked_learners,<EOL>self.ContextParameters.LEARNERS: linked_learners,<EOL>self.ContextParameters.SEARCH_KEYWORD: search_keyword or '<STR_LIT>',<EOL>self.ContextParameters.ENROLLMENT_URL: settings.LMS_ENROLLMENT_API_PATH,<EOL>}<EOL>context.update(admin.site.each_context(request))<EOL>context.update(self._build_admin_context(request, enterprise_customer))<EOL>return context<EOL>", "docstring": "Build common context parts used by different handlers in this view.", "id": "f16062:c2:m1"}
{"signature": "@classmethod<EOL><INDENT>def get_success_enrollment_message(cls, users, enrolled_in):<DEDENT>", "body": "enrolled_count = len(users)<EOL>return (<EOL>'<STR_LIT:success>',<EOL>ungettext(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>enrolled_count,<EOL>).format(<EOL>enrolled_count=enrolled_count,<EOL>enrolled_in=enrolled_in,<EOL>)<EOL>)<EOL>", "docstring": "Create message for the users who were enrolled in a course or program.\n\nArgs:\n    users: An iterable of users who were successfully enrolled\n    enrolled_in (str): A string identifier for the course or program the users were enrolled in\n\nReturns:\n    tuple: A 2-tuple containing a message type and message text", "id": "f16062:c2:m14"}
{"signature": "def get(self, request, template_id, view_type):", "body": "template = get_object_or_404(EnrollmentNotificationEmailTemplate, pk=template_id)<EOL>if view_type not in self.view_type_contexts:<EOL><INDENT>return HttpResponse(status=<NUM_LIT>)<EOL><DEDENT>base_context = self.view_type_contexts[view_type].copy()<EOL>base_context.update({'<STR_LIT>': self.get_user_name(request)})<EOL>return HttpResponse(template.render_html_template(base_context), content_type='<STR_LIT>')<EOL>", "docstring": "Render the given template with the stock data.", "id": "f16062:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def _build_admin_context(request, customer):<DEDENT>", "body": "opts = customer._meta<EOL>codename = get_permission_codename('<STR_LIT>', opts)<EOL>has_change_permission = request.user.has_perm('<STR_LIT>' % (opts.app_label, codename))<EOL>return {<EOL>'<STR_LIT>': has_change_permission,<EOL>'<STR_LIT>': opts<EOL>}<EOL>", "docstring": "Build common admin context.", "id": "f16062:c1:m0"}
{"signature": "@classmethod<EOL><INDENT>def get_users_by_email(cls, emails):<DEDENT>", "body": "users = User.objects.filter(email__in=emails)<EOL>present_emails = users.values_list('<STR_LIT:email>', flat=True)<EOL>missing_emails = list(set(emails) - set(present_emails))<EOL>return users, missing_emails<EOL>", "docstring": "Accept a list of emails, and separate them into users that exist on OpenEdX and users who don't.\n\nArgs:\n    emails: An iterable of email addresses to split between existing and nonexisting\n\nReturns:\n    users: Queryset of users who exist in the OpenEdX platform and who were in the list of email addresses\n    missing_emails: List of unique emails which were in the original list, but do not yet exist as users", "id": "f16062:c2:m9"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(EnterpriseCustomerAdminForm, self).__init__(*args, **kwargs)<EOL>self.fields['<STR_LIT>'] = forms.ChoiceField(<EOL>choices=self.get_catalog_options(),<EOL>required=False,<EOL>help_text='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>catalog_admin_change_url=utils.get_catalog_admin_url_template(mode='<STR_LIT>'),<EOL>catalog_admin_add_url=utils.get_catalog_admin_url_template(mode='<STR_LIT>'))<EOL>)<EOL>", "docstring": "Initialize the form.\n\nSubstitute a ChoiceField in for the catalog field that would\nnormally be set up as a plain number entry field.", "id": "f16063:c1:m0"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "super(EnterpriseCustomerIdentityProviderAdminForm, self).__init__(*args, **kwargs)<EOL>idp_choices = utils.get_idp_choices()<EOL>help_text = '<STR_LIT>'<EOL>if saml_provider_configuration:<EOL><INDENT>provider_id = self.instance.provider_id<EOL>url = reverse('<STR_LIT>'.format(<EOL>saml_provider_configuration._meta.app_label,<EOL>saml_provider_configuration._meta.model_name))<EOL>if provider_id:<EOL><INDENT>identity_provider = utils.get_identity_provider(provider_id)<EOL>if identity_provider:<EOL><INDENT>update_url = url + '<STR_LIT>'.format(identity_provider.pk)<EOL>help_text = '<STR_LIT>'.format(update_url=update_url, identity_provider=identity_provider.name)<EOL><DEDENT>else:<EOL><INDENT>help_text += '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>help_text += '<STR_LIT>''<STR_LIT>'.format(add_url=url)<EOL><DEDENT><DEDENT>if idp_choices is not None:<EOL><INDENT>self.fields['<STR_LIT>'] = forms.TypedChoiceField(<EOL>choices=idp_choices,<EOL>label=_('<STR_LIT>'),<EOL>help_text=mark_safe(help_text),<EOL>)<EOL><DEDENT>", "docstring": "Initialize the form.\n\nSubstitutes CharField with TypedChoiceField for the provider_id field.", "id": "f16063:c2:m0"}
{"signature": "def _validate_course(self):", "body": "<EOL>course_details = self.cleaned_data.get(self.Fields.COURSE)<EOL>if course_details:<EOL><INDENT>course_mode = self.cleaned_data.get(self.Fields.COURSE_MODE)<EOL>if not course_mode:<EOL><INDENT>raise ValidationError(ValidationMessages.COURSE_WITHOUT_COURSE_MODE)<EOL><DEDENT>valid_course_modes = course_details[\"<STR_LIT>\"]<EOL>if all(course_mode != mode[\"<STR_LIT>\"] for mode in valid_course_modes):<EOL><INDENT>error = ValidationError(ValidationMessages.COURSE_MODE_INVALID_FOR_COURSE.format(<EOL>course_mode=course_mode,<EOL>course_id=course_details[\"<STR_LIT>\"],<EOL>))<EOL>raise ValidationError({self.Fields.COURSE_MODE: error})<EOL><DEDENT><DEDENT>", "docstring": "Verify that the selected mode is valid for the given course .", "id": "f16063:c0:m6"}
{"signature": "def clean_program(self):", "body": "program_id = self.cleaned_data[self.Fields.PROGRAM].strip()<EOL>if not program_id:<EOL><INDENT>return None<EOL><DEDENT>try:<EOL><INDENT>client = CourseCatalogApiClient(self._user, self._enterprise_customer.site)<EOL>program = client.get_program_by_uuid(program_id) or client.get_program_by_title(program_id)<EOL><DEDENT>except MultipleProgramMatchError as exc:<EOL><INDENT>raise ValidationError(ValidationMessages.MULTIPLE_PROGRAM_MATCH.format(program_count=exc.programs_matched))<EOL><DEDENT>except (HttpClientError, HttpServerError):<EOL><INDENT>raise ValidationError(ValidationMessages.INVALID_PROGRAM_ID.format(program_id=program_id))<EOL><DEDENT>if not program:<EOL><INDENT>raise ValidationError(ValidationMessages.INVALID_PROGRAM_ID.format(program_id=program_id))<EOL><DEDENT>if program['<STR_LIT:status>'] != ProgramStatuses.ACTIVE:<EOL><INDENT>raise ValidationError(<EOL>ValidationMessages.PROGRAM_IS_INACTIVE.format(program_id=program_id, status=program['<STR_LIT:status>'])<EOL>)<EOL><DEDENT>return program<EOL>", "docstring": "Clean program.\n\nTry obtaining program treating form value as program UUID or title.\n\nReturns:\n    dict: Program information if program found", "id": "f16063:c0:m3"}
{"signature": "def clean_email_or_username(self):", "body": "email_or_username = self.cleaned_data[self.Fields.EMAIL_OR_USERNAME].strip()<EOL>if not email_or_username:<EOL><INDENT>return email_or_username<EOL><DEDENT>email = email_or_username__to__email(email_or_username)<EOL>bulk_entry = len(split_usernames_and_emails(email)) > <NUM_LIT:1><EOL>if bulk_entry:<EOL><INDENT>for email in split_usernames_and_emails(email):<EOL><INDENT>validate_email_to_link(<EOL>email,<EOL>None,<EOL>ValidationMessages.INVALID_EMAIL_OR_USERNAME,<EOL>ignore_existing=True<EOL>)<EOL><DEDENT>email = email_or_username<EOL><DEDENT>else:<EOL><INDENT>validate_email_to_link(<EOL>email,<EOL>email_or_username,<EOL>ValidationMessages.INVALID_EMAIL_OR_USERNAME,<EOL>ignore_existing=True<EOL>)<EOL><DEDENT>return email<EOL>", "docstring": "Clean email form field\n\nReturns:\n    str: the cleaned value, converted to an email address (or an empty string)", "id": "f16063:c0:m1"}
{"signature": "def clean_channel_worker_username(self):", "body": "channel_worker_username = self.cleaned_data['<STR_LIT>'].strip()<EOL>try:<EOL><INDENT>User.objects.get(username=channel_worker_username)<EOL><DEDENT>except User.DoesNotExist:<EOL><INDENT>raise ValidationError(<EOL>ValidationMessages.INVALID_CHANNEL_WORKER.format(<EOL>channel_worker_username=channel_worker_username<EOL>)<EOL>)<EOL><DEDENT>return channel_worker_username<EOL>", "docstring": "Clean enterprise channel worker user form field\n\nReturns:\n    str: the cleaned value of channel user username for transmitting courses metadata.", "id": "f16063:c4:m0"}
{"signature": "def get_catalog_options(self):", "body": "<EOL>if hasattr(self.instance, '<STR_LIT>'):<EOL><INDENT>catalog_api = CourseCatalogApiClient(self.user, self.instance.site)<EOL><DEDENT>else:<EOL><INDENT>catalog_api = CourseCatalogApiClient(self.user)<EOL><DEDENT>catalogs = catalog_api.get_all_catalogs()<EOL>catalogs = sorted(catalogs, key=lambda catalog: catalog.get('<STR_LIT:name>', '<STR_LIT>').lower())<EOL>return BLANK_CHOICE_DASH + [<EOL>(catalog['<STR_LIT:id>'], catalog['<STR_LIT:name>'],)<EOL>for catalog in catalogs<EOL>]<EOL>", "docstring": "Retrieve a list of catalog ID and name pairs.\n\nOnce retrieved, these name pairs can be used directly as a value\nfor the `choices` argument to a ChoiceField.", "id": "f16063:c1:m1"}
{"signature": "def has_enterprise_catalog(self, instance):", "body": "return instance.catalog is not None<EOL>", "docstring": "Return True if EnterpriseCustomer has catalog id with a link to catalog details page.\n\nArguments:\n    instance (enterprise.models.EnterpriseCustomer): `EnterpriseCustomer` model instance", "id": "f16064:c5:m5"}
{"signature": "def has_delete_permission(self, request, obj=None):", "body": "return False<EOL>", "docstring": "Disable deletion for PendingEnrollment.", "id": "f16064:c10:m1"}
{"signature": "def get_enrolled_course_string(self, enterprise_customer_user):", "body": "enrollment_client = EnrollmentApiClient()<EOL>enrolled_courses = enrollment_client.get_enrolled_courses(enterprise_customer_user.username)<EOL>course_details = []<EOL>courses_client = CourseApiClient()<EOL>for course in enrolled_courses:<EOL><INDENT>course_id = course['<STR_LIT>']['<STR_LIT>']<EOL>name = courses_client.get_course_details(course_id)['<STR_LIT:name>']<EOL>course_details.append({'<STR_LIT>': course_id, '<STR_LIT>': name})<EOL><DEDENT>template = '<STR_LIT>'<EOL>joiner = '<STR_LIT>'<EOL>return joiner.join(<EOL>template.format(<EOL>url=reverse('<STR_LIT>', args=[course['<STR_LIT>']]),<EOL>course_name=course['<STR_LIT>'],<EOL>)<EOL>for course in course_details<EOL>)<EOL>", "docstring": "Get an HTML string representing the courses the user is enrolled in.", "id": "f16064:c6:m3"}
{"signature": "def uuid_nowrap(self, obj):", "body": "return format_html('<STR_LIT>'.format(uuid=obj.uuid))<EOL>", "docstring": "Inject html for disabling wrap for uuid", "id": "f16064:c11:m1"}
{"signature": "def ecommerce_coupon_url(self, instance):", "body": "if not instance.entitlement_id:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>return format_html(<EOL>'<STR_LIT>',<EOL>base_url=settings.ECOMMERCE_PUBLIC_URL_ROOT, id=instance.entitlement_id<EOL>)<EOL>", "docstring": "Instance is EnterpriseCustomer. Return e-commerce coupon urls.", "id": "f16064:c2:m0"}
{"signature": "def get_form(self, request, obj=None, **kwargs):", "body": "form = super(EnterpriseCustomerAdmin, self).get_form(request, obj, **kwargs)<EOL>form.user = request.user<EOL>return form<EOL>", "docstring": "Retrieve the appropriate form to use, saving the request user\ninto the form for use in loading catalog details", "id": "f16064:c5:m1"}
{"signature": "def enrolled_courses(self, enterprise_customer_user):", "body": "courses_string = mark_safe(self.get_enrolled_course_string(enterprise_customer_user))<EOL>return courses_string or '<STR_LIT:None>'<EOL>", "docstring": "Return a string representing the courses a given EnterpriseCustomerUser is enrolled in\n\nArgs:\n    enterprise_customer_user: The instance of EnterpriseCustomerUser\n        being rendered with this admin form.", "id": "f16064:c6:m1"}
{"signature": "def has_add_permission(self, request):", "body": "return False<EOL>", "docstring": "Disable add permission for PendingEnrollment.", "id": "f16064:c10:m0"}
{"signature": "def get_readonly_fields(self, request, obj=None):", "body": "readonly_fields = super(EnterpriseCustomerUserAdmin, self).get_readonly_fields(request, obj=obj)<EOL>if obj:  <EOL><INDENT>return readonly_fields + tuple(get_all_field_names(self.model))<EOL><DEDENT>return readonly_fields<EOL>", "docstring": "Make all fields readonly when editing existing model.", "id": "f16064:c6:m2"}
{"signature": "def manage_learners(self, request, obj):  ", "body": "<EOL>manage_learners_url = reverse(\"<STR_LIT>\" + UrlNames.MANAGE_LEARNERS, args=(obj.uuid,))<EOL>return HttpResponseRedirect(manage_learners_url)<EOL>", "docstring": "Object tool handler method - redirects to \"Manage Learners\" view", "id": "f16064:c5:m6"}
{"signature": "def paginated_list(object_list, page, page_size=<NUM_LIT>):", "body": "paginator = CustomPaginator(object_list, page_size)<EOL>try:<EOL><INDENT>object_list = paginator.page(page)<EOL><DEDENT>except PageNotAnInteger:<EOL><INDENT>object_list = paginator.page(<NUM_LIT:1>)<EOL><DEDENT>except EmptyPage:<EOL><INDENT>object_list = paginator.page(paginator.num_pages)<EOL><DEDENT>page_range = []<EOL>page_num = object_list.number<EOL>if paginator.num_pages <= <NUM_LIT:10>:<EOL><INDENT>page_range = range(paginator.num_pages)<EOL><DEDENT>else:<EOL><INDENT>if page_num > (PAGES_ON_EACH_SIDE + PAGES_ON_ENDS + <NUM_LIT:1>):<EOL><INDENT>page_range.extend(range(<NUM_LIT:1>, PAGES_ON_ENDS + <NUM_LIT:1>))<EOL>page_range.append(DOT)<EOL>page_range.extend(range(page_num - PAGES_ON_EACH_SIDE, page_num + <NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>page_range.extend(range(<NUM_LIT:1>, page_num + <NUM_LIT:1>))<EOL><DEDENT>if page_num < (paginator.num_pages - PAGES_ON_EACH_SIDE - PAGES_ON_ENDS):<EOL><INDENT>page_range.extend(range(page_num + <NUM_LIT:1>, page_num + PAGES_ON_EACH_SIDE + <NUM_LIT:1>))<EOL>page_range.append(DOT)<EOL>page_range.extend(range(paginator.num_pages + <NUM_LIT:1> - PAGES_ON_ENDS, paginator.num_pages + <NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>page_range.extend(range(page_num + <NUM_LIT:1>, paginator.num_pages + <NUM_LIT:1>))<EOL><DEDENT>object_list.paginator.page_range = page_range<EOL><DEDENT>return object_list<EOL>", "docstring": "Returns paginated list.\n\nArguments:\n    object_list (QuerySet): A list of records to be paginated.\n    page (int): Current page number.\n    page_size (int): Number of records displayed in each paginated set.\n    show_all (bool): Whether to show all records.\n\nAdopted from django/contrib/admin/templatetags/admin_list.py\nhttps://github.com/django/django/blob/1.11.1/django/contrib/admin/templatetags/admin_list.py#L50", "id": "f16065:m6"}
{"signature": "def validate_email_to_link(email, raw_email=None, message_template=None, ignore_existing=False):", "body": "raw_email = raw_email if raw_email is not None else email<EOL>message_template = message_template if message_template is not None else ValidationMessages.INVALID_EMAIL<EOL>try:<EOL><INDENT>validate_email(email)<EOL><DEDENT>except ValidationError:<EOL><INDENT>raise ValidationError(message_template.format(argument=raw_email))<EOL><DEDENT>existing_record = EnterpriseCustomerUser.objects.get_link_by_email(email)<EOL>if existing_record and not ignore_existing:<EOL><INDENT>raise ValidationError(ValidationMessages.USER_ALREADY_REGISTERED.format(<EOL>email=email, ec_name=existing_record.enterprise_customer.name<EOL>))<EOL><DEDENT>return existing_record or False<EOL>", "docstring": "Validate email to be linked to Enterprise Customer.\n\nPerforms two checks:\n    * Checks that email is valid\n    * Checks that it is not already linked to any Enterprise Customer\n\nArguments:\n    email (str): user email to link\n    raw_email (str): raw value as it was passed by user - used in error message.\n    message_template (str): Validation error template string.\n    ignore_existing (bool): If True to skip the check for an existing Enterprise Customer\n\nRaises:\n    ValidationError: if email is invalid or already linked to Enterprise Customer.\n\nReturns:\n    bool: Whether or not there is an existing record with the same email address.", "id": "f16065:m2"}
{"signature": "def email_or_username__to__email(email_or_username):", "body": "try:<EOL><INDENT>user = User.objects.get(username=email_or_username)<EOL>return user.email<EOL><DEDENT>except User.DoesNotExist:<EOL><INDENT>return email_or_username<EOL><DEDENT>", "docstring": "Convert email_or_username to email.\n\nReturns:\n    str: If `email_or_username` was a username returns user's email, otherwise assumes it was an email and returns\n         as is.", "id": "f16065:m1"}
{"signature": "def export_as_csv_action(description=\"<STR_LIT>\", fields=None, header=True):", "body": "<EOL>def export_as_csv(modeladmin, request, queryset):  <EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>opts = modeladmin.model._meta<EOL>if not fields:<EOL><INDENT>field_names = [field.name for field in opts.fields]<EOL><DEDENT>else:<EOL><INDENT>field_names = fields<EOL><DEDENT>response = HttpResponse(content_type=\"<STR_LIT>\")<EOL>response[\"<STR_LIT>\"] = \"<STR_LIT>\".format(<EOL>filename=str(opts).replace(\"<STR_LIT:.>\", \"<STR_LIT:_>\")<EOL>)<EOL>writer = unicodecsv.writer(response, encoding=\"<STR_LIT:utf-8>\")<EOL>if header:<EOL><INDENT>writer.writerow(field_names)<EOL><DEDENT>for obj in queryset:<EOL><INDENT>row = []<EOL>for field_name in field_names:<EOL><INDENT>field = getattr(obj, field_name)<EOL>if callable(field):<EOL><INDENT>value = field()<EOL><DEDENT>else:<EOL><INDENT>value = field<EOL><DEDENT>if value is None:<EOL><INDENT>row.append(\"<STR_LIT>\")<EOL><DEDENT>elif not value and isinstance(value, string_types):<EOL><INDENT>row.append(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>row.append(value)<EOL><DEDENT><DEDENT>writer.writerow(row)<EOL><DEDENT>return response<EOL><DEDENT>export_as_csv.short_description = description<EOL>return export_as_csv<EOL>", "docstring": "Return an export csv action.\n\nArguments:\n    description (string): action description\n    fields ([string]): list of model fields to include\n    header (bool): whether or not to output the column names as the first row", "id": "f16066:m0"}
{"signature": "@page_range.setter<EOL><INDENT>def page_range(self, value):<DEDENT>", "body": "self._page_range = value<EOL>", "docstring": "We have introduced a setter method here, so as to set value for page_range property.\nThis was not present in Paginator class.", "id": "f16067:c0:m1"}
{"signature": "@property<EOL><INDENT>def auth_user_model(self):<DEDENT>", "body": "return apps.get_app_config(\"<STR_LIT>\").get_model(\"<STR_LIT>\")<EOL>", "docstring": "Return User model for django.contrib.auth.", "id": "f16068:c0:m0"}
{"signature": "def ready(self):", "body": "from enterprise.signals import handle_user_post_save<EOL>from django.db.models.signals import pre_migrate, post_save<EOL>post_save.connect(handle_user_post_save, sender=self.auth_user_model, dispatch_uid=USER_POST_SAVE_DISPATCH_UID)<EOL>pre_migrate.connect(self._disconnect_user_post_save_for_migrations)<EOL>", "docstring": "Perform other one-time initialization steps.", "id": "f16068:c0:m1"}
{"signature": "@rules.predicate<EOL>def rbac_permissions_disabled(user, obj):  ", "body": "return not waffle.switch_is_active(ENTERPRISE_ROLE_BASED_ACCESS_CONTROL_SWITCH)<EOL>", "docstring": "Temporary check for rbac based permissions being enabled.", "id": "f16069:m6"}
{"signature": "@rules.predicate<EOL>def has_implicit_access_to_dashboard(user, obj):  ", "body": "request = get_request_or_stub()<EOL>decoded_jwt = get_decoded_jwt_from_request(request)<EOL>return request_user_has_implicit_access_via_jwt(decoded_jwt, ENTERPRISE_DASHBOARD_ADMIN_ROLE)<EOL>", "docstring": "Check that if request user has implicit access to `ENTERPRISE_DASHBOARD_ADMIN_ROLE` feature role.\n\nReturns:\n    boolean: whether the request user has access or not", "id": "f16069:m0"}
{"signature": "@rules.predicate<EOL>def has_explicit_access_to_dashboard(user, obj):  ", "body": "return user_has_access_via_database(<EOL>user,<EOL>ENTERPRISE_DASHBOARD_ADMIN_ROLE,<EOL>EnterpriseFeatureUserRoleAssignment<EOL>)<EOL>", "docstring": "Check that if request user has explicit access to `ENTERPRISE_DASHBOARD_ADMIN_ROLE` feature role.\n\nReturns:\n    boolean: whether the request user has access or not", "id": "f16069:m1"}
{"signature": "@rules.predicate<EOL>def has_explicit_access_to_catalog(user, obj):", "body": "return user_has_access_via_database(<EOL>user,<EOL>ENTERPRISE_CATALOG_ADMIN_ROLE,<EOL>EnterpriseFeatureUserRoleAssignment,<EOL>obj<EOL>)<EOL>", "docstring": "Check that if request user has explicit access to `ENTERPRISE_CATALOG_ADMIN_ROLE` feature role.\n\nReturns:\n    boolean: whether the request user has access or not", "id": "f16069:m3"}
{"signature": "@rules.predicate<EOL>def has_explicit_access_to_enrollment_api(user, obj):", "body": "return user_has_access_via_database(<EOL>user,<EOL>ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE,<EOL>EnterpriseFeatureUserRoleAssignment,<EOL>obj<EOL>)<EOL>", "docstring": "Check that if request user has explicit access to `ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE` feature role.\n\nReturns:\n    boolean: whether the request user has access or not", "id": "f16069:m5"}
{"signature": "def update_enterprise_courses(self, enterprise_customer, course_container_key='<STR_LIT>', **kwargs):", "body": "enterprise_context = {<EOL>'<STR_LIT>': enterprise_customer and enterprise_customer.identity_provider,<EOL>'<STR_LIT>': enterprise_customer and str(enterprise_customer.uuid),<EOL>}<EOL>enterprise_context.update(**kwargs)<EOL>courses = []<EOL>for course in self.data[course_container_key]:<EOL><INDENT>courses.append(<EOL>self.update_course(course, enterprise_customer, enterprise_context)<EOL>)<EOL><DEDENT>self.data[course_container_key] = courses<EOL>", "docstring": "This method adds enterprise-specific metadata for each course.\n\nWe are adding following field in all the courses.\n    tpa_hint: a string for identifying Identity Provider.\n    enterprise_id: the UUID of the enterprise\n    **kwargs: any additional data one would like to add on a per-use basis.\n\nArguments:\n    enterprise_customer: The customer whose data will be used to fill the enterprise context.\n    course_container_key: The key used to find the container for courses in the serializer's data dictionary.", "id": "f16071:c0:m0"}
{"signature": "def update(self, instance, validated_data):", "body": "pass<EOL>", "docstring": "Do not perform any operations for state changing requests.", "id": "f16072:c0:m1"}
{"signature": "def validate_username(self, value):", "body": "try:<EOL><INDENT>self.user = User.objects.get(username=value)<EOL><DEDENT>except User.DoesNotExist:<EOL><INDENT>raise serializers.ValidationError(\"<STR_LIT>\")<EOL><DEDENT>return value<EOL>", "docstring": "Verify that the username has a matching user.", "id": "f16072:c12:m0"}
{"signature": "def to_representation(self, data):", "body": "return [<EOL>self.child.to_representation(item) if '<STR_LIT>' in item else item for item in data<EOL>]<EOL>", "docstring": "This selectively calls to_representation on each result that was processed by create.", "id": "f16072:c20:m2"}
{"signature": "def save(self):  ", "body": "enterprise_customer = self.validated_data['<STR_LIT>']<EOL>ecu = models.EnterpriseCustomerUser(<EOL>user_id=self.user.pk,<EOL>enterprise_customer=enterprise_customer,<EOL>)<EOL>ecu.save()<EOL>", "docstring": "Save the EnterpriseCustomerUser.", "id": "f16072:c12:m1"}
{"signature": "def validate_course_run_id(self, value):", "body": "enterprise_customer = self.context.get('<STR_LIT>')<EOL>if not enterprise_customer.catalog_contains_course(value):<EOL><INDENT>raise serializers.ValidationError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>course_run_id=value,<EOL>enterprise_customer=enterprise_customer.name,<EOL>)<EOL>)<EOL><DEDENT>return value<EOL>", "docstring": "Validates that the course run id is part of the Enterprise Customer's catalog.", "id": "f16072:c21:m4"}
{"signature": "def to_representation(self, instance):", "body": "updated_course = copy.deepcopy(instance)<EOL>enterprise_customer_catalog = self.context['<STR_LIT>']<EOL>updated_course['<STR_LIT>'] = enterprise_customer_catalog.get_course_enrollment_url(<EOL>updated_course['<STR_LIT:key>']<EOL>)<EOL>for course_run in updated_course['<STR_LIT>']:<EOL><INDENT>course_run['<STR_LIT>'] = enterprise_customer_catalog.get_course_run_enrollment_url(<EOL>course_run['<STR_LIT:key>']<EOL>)<EOL><DEDENT>return updated_course<EOL>", "docstring": "Return the updated course data dictionary.\n\nArguments:\n    instance (dict): The course data.\n\nReturns:\n    dict: The updated course data.", "id": "f16072:c16:m0"}
{"signature": "def create(self, validated_data):", "body": "pass<EOL>", "docstring": "Do not perform any operations for state changing requests.", "id": "f16072:c0:m0"}
{"signature": "def validate_user_email(self, value):", "body": "enterprise_customer = self.context.get('<STR_LIT>')<EOL>try:<EOL><INDENT>user = User.objects.get(email=value)<EOL>return models.EnterpriseCustomerUser.objects.get(<EOL>user_id=user.id,<EOL>enterprise_customer=enterprise_customer<EOL>)<EOL><DEDENT>except (models.EnterpriseCustomerUser.DoesNotExist, User.DoesNotExist):<EOL><INDENT>pass<EOL><DEDENT>return value<EOL>", "docstring": "Validates the user_email, if given, to see if an existing EnterpriseCustomerUser exists for it.\n\nIf it does not, it does not fail validation, unlike for the other field validation methods above.", "id": "f16072:c21:m3"}
{"signature": "def validate(self, data):  ", "body": "lms_user_id = data.get('<STR_LIT>')<EOL>tpa_user_id = data.get('<STR_LIT>')<EOL>user_email = data.get('<STR_LIT>')<EOL>if not lms_user_id and not tpa_user_id and not user_email:<EOL><INDENT>raise serializers.ValidationError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>return data<EOL>", "docstring": "Validate that at least one of the user identifier fields has been passed in.", "id": "f16072:c21:m5"}
{"signature": "def get_data_sharing_consent_records(self, obj):", "body": "return [record.serialize() for record in obj.data_sharing_consent_records]<EOL>", "docstring": "Return serialization of EnterpriseCustomerUser.data_sharing_consent_records property.\n\nArguments:\n    EnterpriseCustomerUser: The EnterpriseCustomerUser.\n\nReturns:\n    list of dict: The serialized DataSharingConsent records associated with the EnterpriseCustomerUser.", "id": "f16072:c11:m0"}
{"signature": "def validate_username(self, value):", "body": "try:<EOL><INDENT>user = User.objects.get(username=value)<EOL><DEDENT>except User.DoesNotExist:<EOL><INDENT>raise serializers.ValidationError(\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>enterprise_customer_user = models.EnterpriseCustomerUser.objects.get(user_id=user.pk)<EOL><DEDENT>except models.EnterpriseCustomerUser.DoesNotExist:<EOL><INDENT>raise serializers.ValidationError(\"<STR_LIT>\")<EOL><DEDENT>self.enterprise_customer_user = enterprise_customer_user<EOL>return value<EOL>", "docstring": "Verify that the username has a matching user, and that the user has an associated EnterpriseCustomerUser.", "id": "f16072:c8:m0"}
{"signature": "def to_representation(self, instance):", "body": "request = self.context['<STR_LIT>']<EOL>enterprise_customer = instance.enterprise_customer<EOL>representation = super(EnterpriseCustomerCatalogDetailSerializer, self).to_representation(instance)<EOL>paginated_content = instance.get_paginated_content(request.GET)<EOL>count = paginated_content['<STR_LIT:count>']<EOL>search_results = paginated_content['<STR_LIT>']<EOL>for item in search_results:<EOL><INDENT>content_type = item['<STR_LIT>']<EOL>marketing_url = item.get('<STR_LIT>')<EOL>if marketing_url:<EOL><INDENT>item['<STR_LIT>'] = utils.update_query_parameters(<EOL>marketing_url, utils.get_enterprise_utm_context(enterprise_customer)<EOL>)<EOL><DEDENT>if content_type == '<STR_LIT>':<EOL><INDENT>item['<STR_LIT>'] = instance.get_course_enrollment_url(item['<STR_LIT:key>'])<EOL><DEDENT>if content_type == '<STR_LIT>':<EOL><INDENT>item['<STR_LIT>'] = instance.get_course_run_enrollment_url(item['<STR_LIT:key>'])<EOL><DEDENT>if content_type == '<STR_LIT>':<EOL><INDENT>item['<STR_LIT>'] = instance.get_program_enrollment_url(item['<STR_LIT>'])<EOL><DEDENT><DEDENT>previous_url = None<EOL>next_url = None<EOL>page = int(request.GET.get('<STR_LIT>', '<STR_LIT:1>'))<EOL>request_uri = request.build_absolute_uri()<EOL>if paginated_content['<STR_LIT>']:<EOL><INDENT>previous_url = utils.update_query_parameters(request_uri, {'<STR_LIT>': page - <NUM_LIT:1>})<EOL><DEDENT>if paginated_content['<STR_LIT>']:<EOL><INDENT>next_url = utils.update_query_parameters(request_uri, {'<STR_LIT>': page + <NUM_LIT:1>})<EOL><DEDENT>representation['<STR_LIT:count>'] = count<EOL>representation['<STR_LIT>'] = previous_url<EOL>representation['<STR_LIT>'] = next_url<EOL>representation['<STR_LIT>'] = search_results<EOL>return representation<EOL>", "docstring": "Serialize the EnterpriseCustomerCatalog object.\n\nArguments:\n    instance (EnterpriseCustomerCatalog): The EnterpriseCustomerCatalog to serialize.\n\nReturns:\n    dict: The EnterpriseCustomerCatalog converted to a dict.", "id": "f16072:c10:m0"}
{"signature": "@method_decorator(require_at_least_one_query_parameter('<STR_LIT>'))<EOL><INDENT>@list_route(permission_classes=[<EOL>permissions.IsAuthenticated,<EOL>IsInEnterpriseGroup,<EOL>])<EOL>def with_access_to(self, request, *args, **kwargs):  <DEDENT>", "body": "self.queryset = self.queryset.order_by('<STR_LIT:name>')<EOL>enterprise_id = self.request.query_params.get('<STR_LIT>', None)<EOL>enterprise_slug = self.request.query_params.get('<STR_LIT>', None)<EOL>enterprise_name = self.request.query_params.get('<STR_LIT>', None)<EOL>if enterprise_id is not None:<EOL><INDENT>self.queryset = self.queryset.filter(uuid=enterprise_id)<EOL><DEDENT>elif enterprise_slug is not None:<EOL><INDENT>self.queryset = self.queryset.filter(slug=enterprise_slug)<EOL><DEDENT>elif enterprise_name is not None:<EOL><INDENT>self.queryset = self.queryset.filter(name__icontains=enterprise_name)<EOL><DEDENT>return self.list(request, *args, **kwargs)<EOL>", "docstring": "Returns the list of enterprise customers the user has a specified group permission access to.", "id": "f16073:c5:m3"}
{"signature": "@method_decorator(enterprise_customer_required)<EOL><INDENT>@detail_route()<EOL>def courses(self, request, enterprise_customer, pk=None):  <DEDENT>", "body": "catalog_api = CourseCatalogApiClient(request.user, enterprise_customer.site)<EOL>courses = catalog_api.get_paginated_catalog_courses(pk, request.GET)<EOL>self.ensure_data_exists(<EOL>request,<EOL>courses,<EOL>error_message=(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(endpoint=request.get_full_path())<EOL>)<EOL>)<EOL>serializer = serializers.EnterpriseCatalogCoursesReadOnlySerializer(courses)<EOL>serializer.update_enterprise_courses(enterprise_customer, catalog_id=pk)<EOL>return get_paginated_response(serializer.data, request)<EOL>", "docstring": "Retrieve the list of courses contained within this catalog.\n\nOnly courses with active course runs are returned. A course run is considered active if it is currently\nopen for enrollment, or will open in the future.", "id": "f16073:c11:m2"}
{"signature": "@permission_required('<STR_LIT>')<EOL><INDENT>def post(self, request):<DEDENT>", "body": "try:<EOL><INDENT>email, enterprise_name, number_of_codes = self.get_required_query_params(request)<EOL><DEDENT>except CodesAPIRequestError as invalid_request:<EOL><INDENT>return Response({'<STR_LIT:error>': str(invalid_request)}, status=HTTP_400_BAD_REQUEST)<EOL><DEDENT>subject_line = _('<STR_LIT>').format(<EOL>token_enterprise_name=enterprise_name<EOL>)<EOL>msg_with_codes = _('<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>token_email=email,<EOL>token_enterprise_name=enterprise_name,<EOL>token_number_codes=number_of_codes)<EOL>msg_without_codes = _('<STR_LIT>'<EOL>'<STR_LIT>').format(<EOL>token_email=email,<EOL>token_enterprise_name=enterprise_name)<EOL>app_config = apps.get_app_config(\"<STR_LIT>\")<EOL>from_email_address = app_config.customer_success_email<EOL>cs_email = app_config.customer_success_email<EOL>data = {<EOL>self.REQUIRED_PARAM_EMAIL: email,<EOL>self.REQUIRED_PARAM_ENTERPRISE_NAME: enterprise_name,<EOL>self.OPTIONAL_PARAM_NUMBER_OF_CODES: number_of_codes,<EOL>}<EOL>try:<EOL><INDENT>mail.send_mail(<EOL>subject_line,<EOL>msg_with_codes if number_of_codes else msg_without_codes,<EOL>from_email_address,<EOL>[cs_email],<EOL>fail_silently=False<EOL>)<EOL>return Response(data, status=HTTP_200_OK)<EOL><DEDENT>except SMTPException:<EOL><INDENT>error_message = _(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>token_cs_email=cs_email,<EOL>token_email=email,<EOL>token_enterprise_name=enterprise_name<EOL>)<EOL>LOGGER.error(error_message)<EOL>return Response(<EOL>{'<STR_LIT:error>': str('<STR_LIT>')},<EOL>status=HTTP_500_INTERNAL_SERVER_ERROR<EOL>)<EOL><DEDENT>", "docstring": "POST /enterprise/api/v1/request_codes\n\nRequires a JSON object of the following format:\n>>> {\n>>>     \"email\": \"bob@alice.com\",\n>>>     \"enterprise_name\": \"IBM\",\n>>>     \"number_of_codes\": \"50\"\n>>> }\n\nKeys:\n*email*\n    Email of the customer who has requested more codes.\n*enterprise_name*\n    The name of the enterprise requesting more codes.\n*number_of_codes*\n    The number of codes requested.", "id": "f16073:c13:m2"}
{"signature": "def get_serializer_class(self):", "body": "if self.request.method in ('<STR_LIT:GET>', ):<EOL><INDENT>return serializers.EnterpriseCourseEnrollmentReadOnlySerializer<EOL><DEDENT>return serializers.EnterpriseCourseEnrollmentWriteSerializer<EOL>", "docstring": "Use a special serializer for any requests that aren't read-only.", "id": "f16073:c6:m0"}
{"signature": "def ensure_data_exists(self, request, data, error_message=None):", "body": "if not data:<EOL><INDENT>error_message = (<EOL>error_message or \"<STR_LIT>\".format(request.get_full_path())<EOL>)<EOL>LOGGER.error(error_message)<EOL>raise NotFound(error_message)<EOL><DEDENT>", "docstring": "Ensure that the wrapped API client's response brings us valid data. If not, raise an error and log it.", "id": "f16073:c0:m0"}
{"signature": "@detail_route(url_path='<STR_LIT>'.format(COURSE_KEY_URL_PATTERN))<EOL><INDENT>@permission_required(<EOL>'<STR_LIT>',<EOL>fn=lambda request, pk, course_key: get_enterprise_customer_from_catalog_id(pk))<EOL>def course_detail(self, request, pk, course_key):  <DEDENT>", "body": "enterprise_customer_catalog = self.get_object()<EOL>course = enterprise_customer_catalog.get_course(course_key)<EOL>if not course:<EOL><INDENT>raise Http404<EOL><DEDENT>context = self.get_serializer_context()<EOL>context['<STR_LIT>'] = enterprise_customer_catalog<EOL>serializer = serializers.CourseDetailSerializer(course, context=context)<EOL>return Response(serializer.data)<EOL>", "docstring": "Return the metadata for the specified course.\n\nThe course needs to be included in the specified EnterpriseCustomerCatalog\nin order for metadata to be returned from this endpoint.", "id": "f16073:c10:m4"}
{"signature": "@list_route()<EOL><INDENT>@permission_required('<STR_LIT>')<EOL>def dashboard_list(self, request, *args, **kwargs):  <DEDENT>", "body": "self.queryset = self.queryset.order_by('<STR_LIT:name>')<EOL>enterprise_id = self.request.query_params.get('<STR_LIT>', None)<EOL>enterprise_slug = self.request.query_params.get('<STR_LIT>', None)<EOL>enterprise_name = self.request.query_params.get('<STR_LIT>', None)<EOL>if enterprise_id is not None:<EOL><INDENT>self.queryset = self.queryset.filter(uuid=enterprise_id)<EOL><DEDENT>elif enterprise_slug is not None:<EOL><INDENT>self.queryset = self.queryset.filter(slug=enterprise_slug)<EOL><DEDENT>elif enterprise_name is not None:<EOL><INDENT>self.queryset = self.queryset.filter(name__icontains=enterprise_name)<EOL><DEDENT>return self.list(request, *args, **kwargs)<EOL>", "docstring": "Supports listing dashboard enterprises for edx-portal frontend.", "id": "f16073:c5:m4"}
{"signature": "@detail_route()<EOL><INDENT>def entitlements(self, request, pk=None):  <DEDENT>", "body": "enterprise_customer_user = self.get_object()<EOL>instance = {\"<STR_LIT>\": enterprise_customer_user.entitlements}<EOL>serializer = serializers.EnterpriseCustomerUserEntitlementSerializer(instance, context={'<STR_LIT>': request})<EOL>return Response(serializer.data)<EOL>", "docstring": "Retrieve the list of entitlements available to this learner.\n\nOnly those entitlements are returned that satisfy enterprise customer's data sharing setting.\n\nArguments:\n    request (HttpRequest): Reference to in-progress request instance.\n    pk (Int): Primary key value of the selected enterprise learner.\n\nReturns:\n    (HttpResponse): Response object containing a list of learner's entitlements.", "id": "f16073:c7:m1"}
{"signature": "def list(self, request):", "body": "catalog_api = CourseCatalogApiClient(request.user)<EOL>catalogs = catalog_api.get_paginated_catalogs(request.GET)<EOL>self.ensure_data_exists(request, catalogs)<EOL>serializer = serializers.ResponsePaginationSerializer(catalogs)<EOL>return get_paginated_response(serializer.data, request)<EOL>", "docstring": "DRF view to list all catalogs.\n\nArguments:\n    request (HttpRequest): Current request\n\nReturns:\n    (Response): DRF response object containing course catalogs.", "id": "f16073:c11:m0"}
{"signature": "def get_required_query_params(self, request):", "body": "email = get_request_value(request, self.REQUIRED_PARAM_EMAIL, '<STR_LIT>')<EOL>enterprise_name = get_request_value(request, self.REQUIRED_PARAM_ENTERPRISE_NAME, '<STR_LIT>')<EOL>number_of_codes = get_request_value(request, self.OPTIONAL_PARAM_NUMBER_OF_CODES, '<STR_LIT>')<EOL>if not (email and enterprise_name):<EOL><INDENT>raise CodesAPIRequestError(<EOL>self.get_missing_params_message([<EOL>(self.REQUIRED_PARAM_EMAIL, bool(email)),<EOL>(self.REQUIRED_PARAM_ENTERPRISE_NAME, bool(enterprise_name)),<EOL>])<EOL>)<EOL><DEDENT>return email, enterprise_name, number_of_codes<EOL>", "docstring": "Gets ``email``, ``enterprise_name``, and ``number_of_codes``,\nwhich are the relevant parameters for this API endpoint.\n\n:param request: The request to this endpoint.\n:return: The ``email``, ``enterprise_name``, and ``number_of_codes`` from the request.", "id": "f16073:c13:m0"}
{"signature": "@detail_route()<EOL><INDENT>@permission_required('<STR_LIT>', fn=lambda request, pk: pk)<EOL>def courses(self, request, pk=None):  <DEDENT>", "body": "enterprise_customer = self.get_object()<EOL>self.check_object_permissions(request, enterprise_customer)<EOL>self.ensure_data_exists(<EOL>request,<EOL>enterprise_customer.catalog,<EOL>error_message=\"<STR_LIT>\".format(<EOL>enterprise_name=enterprise_customer.name,<EOL>path=request.get_full_path()<EOL>)<EOL>)<EOL>catalog_api = CourseCatalogApiClient(request.user, enterprise_customer.site)<EOL>courses = catalog_api.get_paginated_catalog_courses(enterprise_customer.catalog, request.GET)<EOL>self.ensure_data_exists(<EOL>request,<EOL>courses,<EOL>error_message=(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(<EOL>enterprise_name=enterprise_customer.name,<EOL>path=request.get_full_path()<EOL>)<EOL>)<EOL>)<EOL>serializer = serializers.EnterpriseCatalogCoursesReadOnlySerializer(courses)<EOL>serializer.update_enterprise_courses(enterprise_customer, catalog_id=enterprise_customer.catalog)<EOL>return get_paginated_response(serializer.data, request)<EOL>", "docstring": "Retrieve the list of courses contained within the catalog linked to this enterprise.\n\nOnly courses with active course runs are returned. A course run is considered active if it is currently\nopen for enrollment, or will open in the future.", "id": "f16073:c5:m1"}
{"signature": "@method_decorator(require_at_least_one_query_parameter('<STR_LIT>', '<STR_LIT>'))<EOL><INDENT>@detail_route()<EOL>def contains_content_items(self, request, pk, course_run_ids, program_uuids):<DEDENT>", "body": "enterprise_customer_catalog = self.get_object()<EOL>course_run_ids = [unquote(quote_plus(course_run_id)) for course_run_id in course_run_ids]<EOL>contains_content_items = True<EOL>if course_run_ids:<EOL><INDENT>contains_content_items = enterprise_customer_catalog.contains_courses(course_run_ids)<EOL><DEDENT>if program_uuids:<EOL><INDENT>contains_content_items = (<EOL>contains_content_items and<EOL>enterprise_customer_catalog.contains_programs(program_uuids)<EOL>)<EOL><DEDENT>return Response({'<STR_LIT>': contains_content_items})<EOL>", "docstring": "Return whether or not the EnterpriseCustomerCatalog contains the specified content.\n\nMultiple course_run_ids and/or program_uuids query parameters can be sent to this view to check\nfor their existence in the EnterpriseCustomerCatalog. At least one course run key\nor program UUID value must be included in the request.", "id": "f16073:c10:m3"}
{"signature": "@detail_route(methods=['<STR_LIT>'], permission_classes=[<EOL>permissions.IsAuthenticated,<EOL>HasEnterpriseEnrollmentAPIAccess,<EOL>])<EOL><INDENT>@permission_required('<STR_LIT>', fn=lambda request, pk: pk)<EOL>def course_enrollments(self, request, pk):<DEDENT>", "body": "enterprise_customer = self.get_object()<EOL>serializer = serializers.EnterpriseCustomerCourseEnrollmentsSerializer(<EOL>data=request.data,<EOL>many=True,<EOL>context={<EOL>'<STR_LIT>': enterprise_customer,<EOL>'<STR_LIT>': request.user,<EOL>}<EOL>)<EOL>if serializer.is_valid():<EOL><INDENT>serializer.save()<EOL>return Response(serializer.data, status=HTTP_200_OK)<EOL><DEDENT>return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)<EOL>", "docstring": "Creates a course enrollment for an EnterpriseCustomerUser.", "id": "f16073:c5:m2"}
{"signature": "def enterprise_customer_required(view):", "body": "@wraps(view)<EOL>def wrapper(request, *args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>user = request.user<EOL>enterprise_customer = get_enterprise_customer_for_user(user)<EOL>if enterprise_customer:<EOL><INDENT>args = args + (enterprise_customer,)<EOL>return view(request, *args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>raise PermissionDenied(<EOL>'<STR_LIT>'.format(<EOL>username=user.username<EOL>)<EOL>)<EOL><DEDENT><DEDENT>return wrapper<EOL>", "docstring": "Ensure the user making the API request is associated with an EnterpriseCustomer.\n\nThis decorator attempts to find an EnterpriseCustomer associated with the requesting\nuser and passes that EnterpriseCustomer to the view as a parameter. It will return a\nPermissionDenied error if an EnterpriseCustomer cannot be found.\n\nUsage::\n    @enterprise_customer_required()\n    def my_view(request, enterprise_customer):\n        # Some functionality ...\n\n    OR\n\n    class MyView(View):\n        ...\n        @method_decorator(enterprise_customer_required)\n        def get(self, request, enterprise_customer):\n            # Some functionality ...", "id": "f16076:m0"}
{"signature": "def require_at_least_one_query_parameter(*query_parameter_names):", "body": "def outer_wrapper(view):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>@wraps(view)<EOL>def wrapper(request, *args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>requirement_satisfied = False<EOL>for query_parameter_name in query_parameter_names:<EOL><INDENT>query_parameter_values = request.query_params.getlist(query_parameter_name)<EOL>kwargs[query_parameter_name] = query_parameter_values<EOL>if query_parameter_values:<EOL><INDENT>requirement_satisfied = True<EOL><DEDENT><DEDENT>if not requirement_satisfied:<EOL><INDENT>raise ValidationError(<EOL>detail='<STR_LIT>'.format(<EOL>params='<STR_LIT:U+002CU+0020>'.join(query_parameter_names)<EOL>)<EOL>)<EOL><DEDENT>return view(request, *args, **kwargs)<EOL><DEDENT>return wrapper<EOL><DEDENT>return outer_wrapper<EOL>", "docstring": "Ensure at least one of the specified query parameters are included in the request.\n\nThis decorator checks for the existence of at least one of the specified query\nparameters and passes the values as function parameters to the decorated view.\nIf none of the specified query parameters are included in the request, a\nValidationError is raised.\n\nUsage::\n    @require_at_least_one_query_parameter('program_uuids', 'course_run_ids')\n    def my_view(request, program_uuids, course_run_ids):\n        # Some functionality ...", "id": "f16076:m1"}
{"signature": "def __init__(self):", "body": "self.ALLOWED_API_GROUPS = [u'<STR_LIT>', ]<EOL>", "docstring": "Initialize the class with a API_ALLOWED_GROUPS", "id": "f16077:c3:m0"}
{"signature": "def get_paginated_response(data, request):", "body": "url = urlparse(request.build_absolute_uri())._replace(query=None).geturl()<EOL>next_page = None<EOL>previous_page = None<EOL>if data['<STR_LIT>']:<EOL><INDENT>next_page = \"<STR_LIT>\".format(<EOL>base_url=url,<EOL>query_parameters=urlparse(data['<STR_LIT>']).query,<EOL>)<EOL>next_page = next_page.rstrip('<STR_LIT:?>')<EOL><DEDENT>if data['<STR_LIT>']:<EOL><INDENT>previous_page = \"<STR_LIT>\".format(<EOL>base_url=url,<EOL>query_parameters=urlparse(data['<STR_LIT>'] or \"<STR_LIT>\").query,<EOL>)<EOL>previous_page = previous_page.rstrip('<STR_LIT:?>')<EOL><DEDENT>return Response(OrderedDict([<EOL>('<STR_LIT:count>', data['<STR_LIT:count>']),<EOL>('<STR_LIT>', next_page),<EOL>('<STR_LIT>', previous_page),<EOL>('<STR_LIT>', data['<STR_LIT>'])<EOL>]))<EOL>", "docstring": "Update pagination links in course catalog data and return DRF Response.\n\nArguments:\n    data (dict): Dictionary containing catalog courses.\n    request (HttpRequest): Current request object.\n\nReturns:\n    (Response): DRF response object containing pagination links.", "id": "f16078:m0"}
{"signature": "def get_enterprise_customer_from_catalog_id(catalog_id):", "body": "try:<EOL><INDENT>return str(EnterpriseCustomerCatalog.objects.get(pk=catalog_id).enterprise_customer.uuid)<EOL><DEDENT>except EnterpriseCustomerCatalog.DoesNotExist:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get the enterprise customer id given an enterprise customer catalog id.", "id": "f16081:m1"}
{"signature": "def _get_enterprise_catalog_admin_users_batch(self, start, end):", "body": "Application = apps.get_model(OAUTH2_PROVIDER_APPLICATION_MODEL)     <EOL>LOGGER.info('<STR_LIT>', start, end)<EOL>catalog_admin_user_ids = Application.objects.filter(<EOL>user_id__in=self._get_enterprise_customer_user_ids()<EOL>).exclude(name=EDX_ORG_NAME).values('<STR_LIT>')<EOL>return User.objects.filter(pk__in=catalog_admin_user_ids)[start:end]<EOL>", "docstring": "Returns a batched queryset of User objects.", "id": "f16084:c0:m6"}
{"signature": "def _get_enterprise_admin_users_batch(self, start, end):", "body": "LOGGER.info('<STR_LIT>', start, end)<EOL>return User.objects.filter(groups__name=ENTERPRISE_DATA_API_ACCESS_GROUP, is_staff=False)[start:end]<EOL>", "docstring": "Returns a batched queryset of User objects.", "id": "f16084:c0:m2"}
{"signature": "def _assign_enterprise_role_to_users(self, _get_batch_method, options, is_feature_role=False):", "body": "role_name = options['<STR_LIT>']<EOL>batch_limit = options['<STR_LIT>']<EOL>batch_sleep = options['<STR_LIT>']<EOL>batch_offset = options['<STR_LIT>']<EOL>current_batch_index = batch_offset<EOL>users_batch = _get_batch_method(<EOL>batch_offset,<EOL>batch_offset + batch_limit<EOL>)<EOL>role_class = SystemWideEnterpriseRole<EOL>role_assignment_class = SystemWideEnterpriseUserRoleAssignment<EOL>if is_feature_role:<EOL><INDENT>role_class = EnterpriseFeatureRole<EOL>role_assignment_class = EnterpriseFeatureUserRoleAssignment<EOL><DEDENT>enterprise_role = role_class.objects.get(name=role_name)<EOL>while users_batch.count() > <NUM_LIT:0>:<EOL><INDENT>for index, user in enumerate(users_batch):<EOL><INDENT>LOGGER.info(<EOL>'<STR_LIT>',<EOL>current_batch_index + index, user.id<EOL>)<EOL>role_assignment_class.objects.get_or_create(<EOL>user=user,<EOL>role=enterprise_role<EOL>)<EOL><DEDENT>sleep(batch_sleep)<EOL>current_batch_index += len(users_batch)<EOL>users_batch = _get_batch_method(<EOL>current_batch_index,<EOL>current_batch_index + batch_limit<EOL>)<EOL><DEDENT>", "docstring": "Assigns enterprise role to users.", "id": "f16084:c0:m7"}
{"signature": "def _get_enterprise_customer_users_batch(self, start, end):", "body": "LOGGER.info('<STR_LIT>', start, end)<EOL>return User.objects.filter(pk__in=self._get_enterprise_customer_user_ids())[start:end]<EOL>", "docstring": "Returns a batched queryset of EnterpriseCustomerUser objects.", "id": "f16084:c0:m4"}
{"signature": "def _get_enterprise_customer_user_ids(self):", "body": "return EnterpriseCustomerUser.objects.values('<STR_LIT>')<EOL>", "docstring": "Returns a queryset containing user ids.", "id": "f16084:c0:m1"}
{"signature": "def _get_enterprise_enrollment_api_admin_users_batch(self, start, end):     ", "body": "LOGGER.info('<STR_LIT>', start, end)<EOL>return User.objects.filter(groups__name=ENTERPRISE_ENROLLMENT_API_ACCESS_GROUP, is_staff=False)[start:end]<EOL>", "docstring": "Returns a batched queryset of User objects.", "id": "f16084:c0:m5"}
{"signature": "def _dictfetchall(self, cursor):", "body": "columns = [col[<NUM_LIT:0>] for col in cursor.description]<EOL>return [<EOL>dict(zip(columns, row))<EOL>for row in cursor.fetchall()<EOL>]<EOL>", "docstring": "Return all rows from a cursor as a dict.", "id": "f16086:c0:m3"}
{"signature": "def _fetch_course_enrollment_data(self, enterprise_customer_uuid):", "body": "query = '''<STR_LIT>'''<EOL>with connection.cursor() as cursor:<EOL><INDENT>if enterprise_customer_uuid:<EOL><INDENT>cursor.execute(<EOL>query.format(enterprise_customer_filter='<STR_LIT>'),<EOL>[enterprise_customer_uuid]<EOL>)<EOL><DEDENT>else:<EOL><INDENT>cursor.execute(<EOL>query.format(enterprise_customer_filter='<STR_LIT>')<EOL>)<EOL><DEDENT>return self._dictfetchall(cursor)<EOL><DEDENT>", "docstring": "Return enterprise customer UUID/user_id/course_run_id triples which represent CourseEnrollment records\nwhich do not have a matching EnterpriseCourseEnrollment record.\n\nThe query used below looks for CourseEnrollment records that are associated with enterprise\nlearners where the enrollment data is after the creation of the link between the learner\nand the enterprise. It also excludes learners with edx.org email addresses in order to\nfilter out test users.", "id": "f16086:c0:m2"}
{"signature": "def get_enterprise_customer_for_sso(sso_provider_id):", "body": "try:<EOL><INDENT>return EnterpriseCustomer.objects.get(  <EOL>enterprise_customer_identity_provider__provider_id=sso_provider_id<EOL>)<EOL><DEDENT>except EnterpriseCustomer.DoesNotExist:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get the EnterpriseCustomer object tied to an identity provider.", "id": "f16087:m1"}
{"signature": "def get_user_from_social_auth(tpa_provider, tpa_username):", "body": "user_social_auth = UserSocialAuth.objects.select_related('<STR_LIT:user>').filter(<EOL>user__username=tpa_username, provider=tpa_provider.backend_name<EOL>).first()<EOL>return user_social_auth.user if user_social_auth else None<EOL>", "docstring": "Find the LMS user from the LMS model `UserSocialAuth`.\n\nArguments:\n    tpa_provider (third_party_auth.provider): third party auth provider object\n    tpa_username (str): Username returned by the third party auth", "id": "f16087:m3"}
{"signature": "def get_idp_choices():", "body": "try:<EOL><INDENT>from third_party_auth.provider import Registry   <EOL><DEDENT>except ImportError as exception:<EOL><INDENT>LOGGER.warning(\"<STR_LIT>\")<EOL>LOGGER.warning(exception)<EOL>Registry = None  <EOL><DEDENT>first = [(\"<STR_LIT>\", \"<STR_LIT:->\" * <NUM_LIT:7>)]<EOL>if Registry:<EOL><INDENT>return first + [(idp.provider_id, idp.name) for idp in Registry.enabled()]<EOL><DEDENT>return None<EOL>", "docstring": "Get a list of identity providers choices for enterprise customer.\n\nReturn:\n    A list of choices of all identity providers, None if it can not get any available identity provider.", "id": "f16088:m1"}
{"signature": "def update_query_parameters(url, query_parameters):", "body": "scheme, netloc, path, query_string, fragment = urlsplit(url)<EOL>url_params = parse_qs(query_string)<EOL>url_params.update(query_parameters)<EOL>return urlunsplit(<EOL>(scheme, netloc, path, urlencode(sorted(url_params.items()), doseq=True), fragment),<EOL>)<EOL>", "docstring": "Return url with updated query parameters.\n\nArguments:\n    url (str): Original url whose query parameters need to be updated.\n    query_parameters (dict): A dictionary containing query parameters to be added to course selection url.\n\nReturns:\n    (slug): slug identifier for the identity provider that can be used for identity verification of\n        users associated the enterprise customer of the given user.", "id": "f16088:m12"}
{"signature": "def get_current_course_run(course, users_active_course_runs):", "body": "current_course_run = None<EOL>filtered_course_runs = []<EOL>all_course_runs = course['<STR_LIT>']<EOL>if users_active_course_runs:<EOL><INDENT>current_course_run = get_closest_course_run(users_active_course_runs)<EOL><DEDENT>else:<EOL><INDENT>for course_run in all_course_runs:<EOL><INDENT>if is_course_run_enrollable(course_run) and is_course_run_upgradeable(course_run):<EOL><INDENT>filtered_course_runs.append(course_run)<EOL><DEDENT><DEDENT>if not filtered_course_runs:<EOL><INDENT>filtered_course_runs = all_course_runs<EOL><DEDENT>if filtered_course_runs:<EOL><INDENT>current_course_run = get_closest_course_run(filtered_course_runs)<EOL><DEDENT><DEDENT>return current_course_run<EOL>", "docstring": "Return the current course run on the following conditions.\n\n- If user has active course runs (already enrolled) then return course run with closest start date\nOtherwise it will check the following logic:\n- Course run is enrollable (see is_course_run_enrollable)\n- Course run has a verified seat and the upgrade deadline has not expired.\n- Course run start date is closer to now than any other enrollable/upgradeable course runs.\n- If no enrollable/upgradeable course runs, return course run with most recent start date.", "id": "f16088:m33"}
{"signature": "def get_catalog_admin_url_template(mode='<STR_LIT>'):", "body": "api_base_url = getattr(settings, \"<STR_LIT>\", \"<STR_LIT>\")<EOL>match = re.match(r\"<STR_LIT>\", api_base_url)<EOL>if not match:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>if mode == '<STR_LIT>':<EOL><INDENT>return match.group(\"<STR_LIT>\").rstrip(\"<STR_LIT:/>\") + \"<STR_LIT>\"<EOL><DEDENT>elif mode == '<STR_LIT>':<EOL><INDENT>return match.group(\"<STR_LIT>\").rstrip(\"<STR_LIT:/>\") + \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "Get template of catalog admin url.\n\nURL template will contain a placeholder '{catalog_id}' for catalog id.\nArguments:\n    mode e.g. change/add.\n\nReturns:\n    A string containing template for catalog url.\n\nExample:\n    >>> get_catalog_admin_url_template('change')\n    \"http://localhost:18381/admin/catalogs/catalog/{catalog_id}/change/\"", "id": "f16088:m4"}
{"signature": "def is_course_run_enrollable(course_run):", "body": "now = datetime.datetime.now(pytz.UTC)<EOL>end = parse_datetime_handle_invalid(course_run.get('<STR_LIT:end>'))<EOL>enrollment_start = parse_datetime_handle_invalid(course_run.get('<STR_LIT>'))<EOL>enrollment_end = parse_datetime_handle_invalid(course_run.get('<STR_LIT>'))<EOL>return (not end or end > now) and(not enrollment_start or enrollment_start < now) and(not enrollment_end or enrollment_end > now)<EOL>", "docstring": "Return true if the course run is enrollable, false otherwise.\n\nWe look for the following criteria:\n- end is greater than now OR null\n- enrollment_start is less than now OR null\n- enrollment_end is greater than now OR null", "id": "f16088:m28"}
{"signature": "def get_cache_key(**kwargs):", "body": "key = '<STR_LIT>'.join(['<STR_LIT>'.format(item, value) for item, value in iteritems(kwargs)])<EOL>return hashlib.md5(key.encode('<STR_LIT:utf-8>')).hexdigest()<EOL>", "docstring": "Get MD5 encoded cache key for given arguments.\n\nHere is the format of key before MD5 encryption.\n    key1:value1__key2:value2 ...\n\nExample:\n    >>> get_cache_key(site_domain=\"example.com\", resource=\"enterprise\")\n    # Here is key format for above call\n    # \"site_domain:example.com__resource:enterprise\"\n    a54349175618ff1659dee0978e3149ca\n\nArguments:\n    **kwargs: Key word arguments that need to be present in cache key.\n\nReturns:\n     An MD5 encoded key uniquely identified by the key word arguments.", "id": "f16088:m16"}
{"signature": "def get_enterprise_customer_user(user_id, enterprise_uuid):", "body": "EnterpriseCustomerUser = apps.get_model('<STR_LIT>', '<STR_LIT>')  <EOL>try:<EOL><INDENT>return EnterpriseCustomerUser.objects.get(  <EOL>enterprise_customer__uuid=enterprise_uuid,<EOL>user_id=user_id<EOL>)<EOL><DEDENT>except EnterpriseCustomerUser.DoesNotExist:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Return the object for EnterpriseCustomerUser.\n\nArguments:\n    user_id (str): user identifier\n    enterprise_uuid (UUID): Universally unique identifier for the enterprise customer.\n\nReturns:\n    (EnterpriseCustomerUser): enterprise customer user record", "id": "f16088:m10"}
{"signature": "def get_closest_course_run(course_runs):", "body": "if len(course_runs) == <NUM_LIT:1>:<EOL><INDENT>return course_runs[<NUM_LIT:0>]<EOL><DEDENT>now = datetime.datetime.now(pytz.UTC)<EOL>never = now - datetime.timedelta(days=<NUM_LIT>)<EOL>return min(course_runs, key=lambda x: abs(get_course_run_start(x, never) - now))<EOL>", "docstring": "Return course run with start date closest to now.", "id": "f16088:m31"}
{"signature": "def get_all_field_names(model):", "body": "return [f.name for f in model._meta.get_fields()]<EOL>", "docstring": "Return all fields' names from a model.\n\nAccording to `Django documentation`_, ``get_all_field_names`` should become some monstrosity with chained\niterable ternary nested in a list comprehension. For now, a simpler version of iterating over fields and\ngetting their names work, but we might have to switch to full version in future.\n\n.. _Django documentation: https://docs.djangoproject.com/en/1.8/ref/models/meta/", "id": "f16088:m2"}
{"signature": "def parse_course_key(course_identifier):", "body": "try:<EOL><INDENT>course_run_key = CourseKey.from_string(course_identifier)<EOL><DEDENT>except InvalidKeyError:<EOL><INDENT>return course_identifier<EOL><DEDENT>return quote_plus('<STR_LIT:U+0020>'.join([course_run_key.org, course_run_key.course]))<EOL>", "docstring": "Return the serialized course key given either a course run ID or course key.", "id": "f16088:m35"}
{"signature": "def clean_html_for_template_rendering(text):", "body": "<EOL>return text.replace('<STR_LIT>', '<STR_LIT>')<EOL>", "docstring": "Given html text that will be rendered as a variable in a template, strip out characters that impact rendering.\n\nArguments:\n    text (str): The text to clean.\n\nReturns:\n    (str): The cleaned text.", "id": "f16088:m15"}
{"signature": "def get_enterprise_customer_or_404(enterprise_uuid):", "body": "EnterpriseCustomer = apps.get_model('<STR_LIT>', '<STR_LIT>')  <EOL>try:<EOL><INDENT>enterprise_uuid = UUID(enterprise_uuid)<EOL>return EnterpriseCustomer.objects.get(uuid=enterprise_uuid)  <EOL><DEDENT>except (TypeError, ValueError, EnterpriseCustomer.DoesNotExist):<EOL><INDENT>LOGGER.error('<STR_LIT>', enterprise_uuid)<EOL>raise Http404<EOL><DEDENT>", "docstring": "Given an EnterpriseCustomer UUID, return the corresponding EnterpriseCustomer or raise a 404.\n\nArguments:\n    enterprise_uuid (str): The UUID (in string form) of the EnterpriseCustomer to fetch.\n\nReturns:\n    (EnterpriseCustomer): The EnterpriseCustomer given the UUID.", "id": "f16088:m14"}
{"signature": "def __init__(self, programs_matched, *args, **kwargs):", "body": "super(MultipleProgramMatchError, self).__init__(*args, **kwargs)<EOL>self.programs_matched = programs_matched<EOL>", "docstring": "Initialize :class:`MultipleProgramMatchError`.\n\nArguments:\n    programs_matched (int): number of programs matched where one  proram was expected.\n    args (iterable): variable arguments\n    kwargs (dict): keyword arguments", "id": "f16088:c2:m0"}
{"signature": "def is_course_run_upgradeable(course_run):", "body": "now = datetime.datetime.now(pytz.UTC)<EOL>for seat in course_run.get('<STR_LIT>', []):<EOL><INDENT>if seat.get('<STR_LIT:type>') == '<STR_LIT>':<EOL><INDENT>upgrade_deadline = parse_datetime_handle_invalid(seat.get('<STR_LIT>'))<EOL>return not upgrade_deadline or upgrade_deadline > now<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Return true if the course run has a verified seat with an unexpired upgrade deadline, false otherwise.", "id": "f16088:m29"}
{"signature": "def get_request_value(request, key, default=None):", "body": "if request.method in ['<STR_LIT:GET>', '<STR_LIT>']:<EOL><INDENT>return request.query_params.get(key, request.data.get(key, default))<EOL><DEDENT>return request.data.get(key, request.query_params.get(key, default))<EOL>", "docstring": "Get the value in the request, either through query parameters or posted data, from a key.\n\n:param request: The request from which the value should be gotten.\n:param key: The key to use to get the desired value.\n:param default: The backup value to use in case the input key cannot help us get the value.\n:return: The value we're looking for.", "id": "f16088:m22"}
{"signature": "def get_content_metadata_item_id(content_metadata_item):", "body": "if content_metadata_item['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>return content_metadata_item['<STR_LIT>']<EOL><DEDENT>return content_metadata_item['<STR_LIT:key>']<EOL>", "docstring": "Return the unique identifier given a content metadata item dictionary.", "id": "f16088:m36"}
{"signature": "def strip_html_tags(text, allowed_tags=None):", "body": "if text is None:<EOL><INDENT>return<EOL><DEDENT>if allowed_tags is None:<EOL><INDENT>allowed_tags = ALLOWED_TAGS<EOL><DEDENT>return bleach.clean(text, tags=allowed_tags, attributes=['<STR_LIT:id>', '<STR_LIT:class>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:title>'], strip=True)<EOL>", "docstring": "Strip all tags from a string except those tags provided in `allowed_tags` parameter.\n\nArgs:\n    text (str): string to strip html tags from\n    allowed_tags (list): allowed list of html tags\n\nReturns: a string without html tags", "id": "f16088:m34"}
{"signature": "def get_catalog_admin_url(catalog_id):", "body": "return get_catalog_admin_url_template().format(catalog_id=catalog_id)<EOL>", "docstring": "Get url to catalog details admin page.\n\nArguments:\n    catalog_id (int): Catalog id for which to return catalog details url.\n\nReturns:\n     URL pointing to catalog details admin page for the give catalog id.\n\nExample:\n    >>> get_catalog_admin_url_template(2)\n    \"http://localhost:18381/admin/catalogs/catalog/2/change/\"", "id": "f16088:m3"}
{"signature": "def format_price(price, currency='<STR_LIT:$>'):", "body": "if int(price) == price:<EOL><INDENT>return '<STR_LIT>'.format(currency, int(price))<EOL><DEDENT>return '<STR_LIT>'.format(currency, price)<EOL>", "docstring": "Format the price to have the appropriate currency and digits..\n\n:param price: The price amount.\n:param currency: The currency for the price.\n:return: A formatted price string, i.e. '$10', '$10.52'.", "id": "f16088:m19"}
{"signature": "def track_enrollment(pathway, user_id, course_run_id, url_path=None):", "body": "track_event(user_id, '<STR_LIT>', {<EOL>'<STR_LIT>': pathway,<EOL>'<STR_LIT>': url_path,<EOL>'<STR_LIT>': course_run_id,<EOL>})<EOL>", "docstring": "Emit a track event for enterprise course enrollment.", "id": "f16088:m26"}
{"signature": "def get_course_track_selection_url(course_run, query_parameters):", "body": "try:<EOL><INDENT>course_root = reverse('<STR_LIT>', kwargs={'<STR_LIT>': course_run['<STR_LIT:key>']})<EOL><DEDENT>except KeyError:<EOL><INDENT>LOGGER.exception(<EOL>\"<STR_LIT>\", course_run,<EOL>)<EOL>raise<EOL><DEDENT>url = '<STR_LIT>'.format(<EOL>settings.LMS_ROOT_URL,<EOL>course_root<EOL>)<EOL>course_run_url = update_query_parameters(url, query_parameters)<EOL>return course_run_url<EOL>", "docstring": "Return track selection url for the given course.\n\nArguments:\n    course_run (dict): A dictionary containing course run metadata.\n    query_parameters (dict): A dictionary containing query parameters to be added to course selection url.\n\nRaises:\n    (KeyError): Raised when course run dict does not have 'key' key.\n\nReturns:\n    (str): Course track selection url.", "id": "f16088:m11"}
{"signature": "def get_identity_provider(provider_id):", "body": "try:<EOL><INDENT>from third_party_auth.provider import Registry   <EOL><DEDENT>except ImportError as exception:<EOL><INDENT>LOGGER.warning(\"<STR_LIT>\")<EOL>LOGGER.warning(exception)<EOL>Registry = None  <EOL><DEDENT>try:<EOL><INDENT>return Registry and Registry.get(provider_id)<EOL><DEDENT>except ValueError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get Identity Provider with given id.\n\nReturn:\n    Instance of ProviderConfig or None.", "id": "f16088:m0"}
{"signature": "def get_enterprise_utm_context(enterprise_customer):", "body": "return {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': slugify(enterprise_customer.name)<EOL>}<EOL>", "docstring": "Get the UTM context for the enterprise.", "id": "f16088:m24"}
{"signature": "def here(*args):", "body": "return join(abspath(dirname(__file__)), *args)<EOL>", "docstring": "Return the absolute path to a directory from this file.", "id": "f16089:m0"}
{"signature": "def root(*args):", "body": "return abspath(join(abspath(here('<STR_LIT>')), *args))<EOL>", "docstring": "Return the absolute path to some file from the project's root.", "id": "f16089:m1"}
{"signature": "def link_user(self, enterprise_customer, user_email):", "body": "try:<EOL><INDENT>existing_user = User.objects.get(email=user_email)<EOL>self.get_or_create(enterprise_customer=enterprise_customer, user_id=existing_user.id)<EOL><DEDENT>except User.DoesNotExist:<EOL><INDENT>PendingEnterpriseCustomerUser.objects.get_or_create(enterprise_customer=enterprise_customer,<EOL>user_email=user_email)<EOL><DEDENT>", "docstring": "Link user email to Enterprise Customer.\n\nIf :class:`django.contrib.auth.models.User` instance with specified email does not exist,\n:class:`.PendingEnterpriseCustomerUser` instance is created instead.", "id": "f16090:c3:m1"}
{"signature": "def contains_courses(self, content_ids):", "body": "<EOL>course_keys = {parse_course_key(k) for k in content_ids}<EOL>content_ids_in_catalog = self.content_filter_ids<EOL>if not content_ids_in_catalog:<EOL><INDENT>content_ids_in_catalog = self._filter_members('<STR_LIT:key>', list(course_keys))<EOL><DEDENT>return set(content_ids).issubset(content_ids_in_catalog) or course_keys.issubset(content_ids_in_catalog)<EOL>", "docstring": "Return true if this catalog contains the given courses.\n\nThe content_ids parameter should be a list containing course keys\nand/or course run ids.", "id": "f16090:c11:m5"}
{"signature": "def get_course_enrollment_url(self, course_key):", "body": "url = urljoin(<EOL>get_configuration_value('<STR_LIT>', settings.LMS_ROOT_URL),<EOL>reverse(<EOL>'<STR_LIT>',<EOL>kwargs={'<STR_LIT>': self.uuid, '<STR_LIT>': course_key}<EOL>)<EOL>)<EOL>return utils.update_query_parameters(url, utils.get_enterprise_utm_context(self))<EOL>", "docstring": "Return enterprise landing page url for the given course.\n\nArguments:\n    course_key (str): The course key for the course to be displayed.\nReturns:\n    (str): Enterprise landing page url.", "id": "f16090:c2:m8"}
{"signature": "def clean(self):", "body": "validation_errors = {}<EOL>if self.frequency == self.FREQUENCY_TYPE_DAILY:<EOL><INDENT>self.day_of_month = None<EOL>self.day_of_week = None<EOL><DEDENT>elif self.frequency == self.FREQUENCY_TYPE_WEEKLY:<EOL><INDENT>if self.day_of_week is None or self.day_of_week == '<STR_LIT>':<EOL><INDENT>validation_errors['<STR_LIT>'] = _('<STR_LIT>')<EOL><DEDENT>self.day_of_month = None<EOL><DEDENT>elif self.frequency == self.FREQUENCY_TYPE_MONTHLY:<EOL><INDENT>if not self.day_of_month:<EOL><INDENT>validation_errors['<STR_LIT>'] = _('<STR_LIT>')<EOL><DEDENT>self.day_of_week = None<EOL><DEDENT>else:<EOL><INDENT>validation_errors[NON_FIELD_ERRORS] = _('<STR_LIT>')<EOL><DEDENT>if self.delivery_method == self.DELIVERY_METHOD_EMAIL:<EOL><INDENT>if not self.email:<EOL><INDENT>validation_errors['<STR_LIT:email>'] = _(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if not self.decrypted_password:<EOL><INDENT>validation_errors['<STR_LIT>'] = _(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT>elif self.delivery_method == self.DELIVERY_METHOD_SFTP:<EOL><INDENT>if not self.sftp_hostname:<EOL><INDENT>validation_errors['<STR_LIT>'] = _('<STR_LIT>')<EOL><DEDENT>if not self.sftp_username:<EOL><INDENT>validation_errors['<STR_LIT>'] = _('<STR_LIT>')<EOL><DEDENT>if not self.sftp_file_path:<EOL><INDENT>validation_errors['<STR_LIT>'] = _('<STR_LIT>')<EOL><DEDENT>if not self.decrypted_sftp_password:<EOL><INDENT>validation_errors['<STR_LIT>'] = _(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT><DEDENT>if validation_errors:<EOL><INDENT>raise ValidationError(validation_errors)<EOL><DEDENT>", "docstring": "Override of clean method to perform additional validation on frequency and day_of_month/day_of week.", "id": "f16090:c13:m4"}
{"signature": "@property<EOL><INDENT>def data_sharing_consent_records(self):<DEDENT>", "body": "DataSharingConsent = apps.get_model('<STR_LIT>', '<STR_LIT>')  <EOL>return DataSharingConsent.objects.filter(<EOL>enterprise_customer=self.enterprise_customer,<EOL>username=self.username<EOL>)<EOL>", "docstring": "Return the DataSharingConsent records associated with this EnterpriseCustomerUser.\n\nReturns:\n    QuerySet (DataSharingConsent): The filtered DataSharingConsent QuerySet.", "id": "f16090:c4:m4"}
{"signature": "def unenroll(self, course_run_id):", "body": "enrollment_api_client = EnrollmentApiClient()<EOL>if enrollment_api_client.unenroll_user_from_course(self.username, course_run_id):<EOL><INDENT>EnterpriseCourseEnrollment.objects.filter(enterprise_customer_user=self, course_id=course_run_id).delete()<EOL>return True<EOL><DEDENT>return False<EOL>", "docstring": "Unenroll a user from a course track.", "id": "f16090:c4:m9"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16090:c8:m1"}
{"signature": "@property<EOL><INDENT>def provider_name(self):<DEDENT>", "body": "identity_provider = utils.get_identity_provider(self.provider_id)<EOL>return identity_provider and identity_provider.name<EOL>", "docstring": "Readable name for the identity provider.", "id": "f16090:c8:m2"}
{"signature": "def __str__(self):", "body": "return '<STR_LIT>'.format(<EOL>self.enterprise_customer_user.user.username,<EOL>self.course_id<EOL>)<EOL>", "docstring": "Create string representation of the enrollment.", "id": "f16090:c10:m2"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16090:c5:m1"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return string representation of the enrollment.", "id": "f16090:c10:m3"}
{"signature": "def __str__(self):", "body": "return self.name<EOL>", "docstring": "Return human-readable string representation.", "id": "f16090:c1:m0"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16090:c17:m1"}
{"signature": "def get_default_customer_type():", "body": "enterprise_customer_type, __ = EnterpriseCustomerType.objects.get_or_create(<EOL>name='<STR_LIT>'<EOL>)<EOL>return enterprise_customer_type.id<EOL>", "docstring": "Get default enterprise customer type id to use when creating a new EnterpriseCustomer model.", "id": "f16090:m0"}
{"signature": "def get_course_enrollment_url(self, course_key):", "body": "url = self.enterprise_customer.get_course_enrollment_url(course_key)<EOL>if self.publish_audit_enrollment_urls:<EOL><INDENT>url = utils.update_query_parameters(url, {'<STR_LIT>': '<STR_LIT:true>'})<EOL><DEDENT>return utils.update_query_parameters(url, {'<STR_LIT>': self.uuid})<EOL>", "docstring": "Return enterprise course enrollment page url with the catalog information for the given course.\n\nArguments:\n    course_key (str): The course key for the course to be displayed.\n\nReturns:\n    (str): Enterprise landing page url.", "id": "f16090:c11:m11"}
{"signature": "def get_course_run_enrollment_url(self, course_run_key):", "body": "url = self.enterprise_customer.get_course_run_enrollment_url(course_run_key)<EOL>if self.publish_audit_enrollment_urls:<EOL><INDENT>url = utils.update_query_parameters(url, {'<STR_LIT>': '<STR_LIT:true>'})<EOL><DEDENT>return utils.update_query_parameters(url, {'<STR_LIT>': self.uuid})<EOL>", "docstring": "Return enterprise course enrollment page url with the catalog information for the given course.\n\nArguments:\n    course_run_key (str): The course run id for the course to be displayed.\n\nReturns:\n    (str): Enterprise landing page url.", "id": "f16090:c11:m12"}
{"signature": "def render_plaintext_template(self, kwargs):", "body": "return self.render_template(self.plaintext_template, kwargs)<EOL>", "docstring": "Render just the plaintext template and return it as a string.", "id": "f16090:c12:m1"}
{"signature": "def __str__(self):", "body": "return (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\".format(<EOL>title=self.title,<EOL>uuid=self.uuid,<EOL>enterprise_customer_name=self.enterprise_customer.name<EOL>)<EOL>)<EOL>", "docstring": "Return human-readable string representation.", "id": "f16090:c11:m0"}
{"signature": "def get_program_enrollment_url(self, program_uuid):", "body": "url = self.enterprise_customer.get_program_enrollment_url(program_uuid)<EOL>if self.publish_audit_enrollment_urls:<EOL><INDENT>url = utils.update_query_parameters(url, {'<STR_LIT>': '<STR_LIT:true>'})<EOL><DEDENT>return utils.update_query_parameters(url, {'<STR_LIT>': self.uuid})<EOL>", "docstring": "Return enterprise program enrollment page url with the catalog information for the given program.\n\nArguments:\n    program_uuid (str): The program UUID.\n\nReturns:\n    (str): Enterprise program landing page url.", "id": "f16090:c11:m13"}
{"signature": "def __str__(self):", "body": "return '<STR_LIT>'.format(<EOL>self.enterprise_customer.uuid<EOL>)<EOL>", "docstring": "Return human-readable string representation.", "id": "f16090:c12:m4"}
{"signature": "def get_data_sharing_consent_text_overrides(self, published_only=True):", "body": "<EOL>DataSharingConsentTextOverrides = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>queryset = DataSharingConsentTextOverrides.objects.filter(enterprise_customer=self)<EOL>if published_only:<EOL><INDENT>queryset = queryset.filter(published=True)<EOL><DEDENT>return queryset.first()<EOL>", "docstring": "Return DataSharingConsentTextOverrides associated with this instance.", "id": "f16090:c2:m7"}
{"signature": "def catalog_contains_course(self, course_run_id):", "body": "if self.catalog:<EOL><INDENT>client = CourseCatalogApiServiceClient(self.site)<EOL>if client.is_course_in_catalog(self.catalog, course_run_id):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>for catalog in self.enterprise_customer_catalogs.all():<EOL><INDENT>if catalog.contains_courses([course_run_id]):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL>", "docstring": "Determine if the specified course run is contained in at least one of the enterprise's catalogs.\n\nArguments:\n    course_run_id (str): The string ID of the course or course run in question\n\nReturns:\n    bool: Whether the enterprise catalog includes the given course run.", "id": "f16090:c2:m11"}
{"signature": "def get_context(self):", "body": "<EOL>if self.role.name == ENTERPRISE_OPERATOR_ROLE:<EOL><INDENT>return ALL_ACCESS_CONTEXT<EOL><DEDENT>return super(SystemWideEnterpriseUserRoleAssignment, self).get_context()<EOL>", "docstring": "Return the context for this role assignment class.", "id": "f16090:c16:m0"}
{"signature": "def __str__(self):", "body": "return \"<STR_LIT>\".format(<EOL>customer=self.enterprise_customer,<EOL>id=self.entitlement_id<EOL>)<EOL>", "docstring": "Return human-readable string representation.", "id": "f16090:c9:m0"}
{"signature": "@property<EOL><INDENT>def enables_audit_data_reporting(self):<DEDENT>", "body": "return self.enable_audit_enrollment and self.enable_audit_data_reporting<EOL>", "docstring": "Determine whether the enterprise customer has enabled the ability to report/pass-back audit track data.", "id": "f16090:c2:m5"}
{"signature": "def contains_programs(self, program_uuids):", "body": "content_ids_in_catalog = self.content_filter_ids<EOL>if not content_ids_in_catalog:<EOL><INDENT>content_ids_in_catalog = self._filter_members('<STR_LIT>', program_uuids)<EOL><DEDENT>return set(program_uuids).issubset(content_ids_in_catalog)<EOL>", "docstring": "Return true if this catalog contains the given programs.", "id": "f16090:c11:m6"}
{"signature": "@property<EOL><INDENT>def identity_provider(self):<DEDENT>", "body": "try:<EOL><INDENT>return self.enterprise_customer_identity_provider and self.enterprise_customer_identity_provider.provider_id<EOL><DEDENT>except ObjectDoesNotExist:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Return the unique slug for the identity provider associated with this enterprise customer.\n\nReturns `None` if enterprise customer does not have any identity provider.", "id": "f16090:c2:m0"}
{"signature": "def render_html_template(self, kwargs):", "body": "return self.render_template(mark_safe(self.html_template), kwargs)<EOL>", "docstring": "Render just the HTML template and return it as a string.", "id": "f16090:c12:m0"}
{"signature": "def enroll(self, course_run_id, mode, cohort=None):", "body": "enrollment_api_client = EnrollmentApiClient()<EOL>course_enrollment = enrollment_api_client.get_course_enrollment(self.username, course_run_id) or {}<EOL>enrolled_in_course = course_enrollment and course_enrollment.get('<STR_LIT>', False)<EOL>audit_modes = getattr(settings, '<STR_LIT>', ['<STR_LIT>', '<STR_LIT>'])<EOL>paid_modes = ['<STR_LIT>', '<STR_LIT>']<EOL>is_upgrading = mode in paid_modes and course_enrollment.get('<STR_LIT>') in audit_modes<EOL>if not enrolled_in_course or is_upgrading:<EOL><INDENT>if cohort and not self.enterprise_customer.enable_autocohorting:<EOL><INDENT>raise CourseEnrollmentPermissionError(\"<STR_LIT>\")<EOL><DEDENT>enrollment_api_client.enroll_user_in_course(self.username, course_run_id, mode, cohort=cohort)<EOL>utils.track_event(self.user_id, '<STR_LIT>', {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:label>': course_run_id,<EOL>'<STR_LIT>': str(self.enterprise_customer.uuid),<EOL>'<STR_LIT>': self.enterprise_customer.name,<EOL>'<STR_LIT>': mode,<EOL>'<STR_LIT>': cohort,<EOL>'<STR_LIT>': is_upgrading,<EOL>})<EOL>EnterpriseCourseEnrollment.objects.get_or_create(<EOL>enterprise_customer_user=self,<EOL>course_id=course_run_id<EOL>)<EOL><DEDENT>elif enrolled_in_course and course_enrollment.get('<STR_LIT>') in paid_modes and mode in audit_modes:<EOL><INDENT>raise CourseEnrollmentDowngradeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(<EOL>course_run_id=course_run_id,<EOL>current_mode=course_enrollment.get('<STR_LIT>'),<EOL>given_mode=mode,<EOL>)<EOL>)<EOL><DEDENT>", "docstring": "Enroll a user into a course track, and register an enterprise course enrollment.", "id": "f16090:c4:m8"}
{"signature": "def render_all_templates(self, kwargs):", "body": "return self.render_plaintext_template(kwargs), self.render_html_template(kwargs)<EOL>", "docstring": "Render both templates and return both.", "id": "f16090:c12:m2"}
{"signature": "def __str__(self):", "body": "return '<STR_LIT>'.format(self.user.user_email, self.course_id)<EOL>", "docstring": "Create string representation of the enrollment.", "id": "f16090:c6:m0"}
{"signature": "@property<EOL><INDENT>def requests_data_sharing_consent(self):<DEDENT>", "body": "return self.enable_data_sharing_consent and self.enforce_data_sharing_consent != self.EXTERNALLY_MANAGED<EOL>", "docstring": "Determine whether the enterprise customer has enabled the data sharing consent request.", "id": "f16090:c2:m4"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return string representation of the enrollment.", "id": "f16090:c6:m1"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16090:c11:m1"}
{"signature": "def get_course(self, course_key):", "body": "if not self.contains_courses([course_key]):<EOL><INDENT>return None<EOL><DEDENT>return CourseCatalogApiServiceClient(self.enterprise_customer.site).get_course_details(course_key)<EOL>", "docstring": "Get all of the metadata for the given course.\n\nArguments:\n    course_key (str): The course key which identifies the course.\n\nReturn:\n    dict: The course metadata.", "id": "f16090:c11:m7"}
{"signature": "@property<EOL><INDENT>def is_audit_enrollment(self):<DEDENT>", "body": "course_enrollment_api = EnrollmentApiClient()<EOL>course_enrollment = course_enrollment_api.get_course_enrollment(<EOL>self.enterprise_customer_user.username,<EOL>self.course_id<EOL>)<EOL>audit_modes = getattr(settings, '<STR_LIT>', ['<STR_LIT>', '<STR_LIT>'])<EOL>return course_enrollment and course_enrollment.get('<STR_LIT>') in audit_modes<EOL>", "docstring": "Specify whether the course enrollment associated with this ``EnterpriseCourseEnrollment`` is in audit mode.\n\n:return: Whether the course enrollment mode is of an audit type.", "id": "f16090:c10:m1"}
{"signature": "def get_context(self):", "body": "return self.enterprise_customer_uuid<EOL>", "docstring": "Return the context for this role assignment class.", "id": "f16090:c14:m1"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16090:c13:m3"}
{"signature": "def get_all_catalogs(self):", "body": "return self._load_data(<EOL>self.CATALOGS_ENDPOINT,<EOL>default=[]<EOL>)<EOL>", "docstring": "Return a list of all course catalogs, including name and ID.\n\nReturns:\n    list: List of catalogs available for the user.", "id": "f16091:c0:m3"}
{"signature": "def _load_data(self, resource, default=DEFAULT_VALUE_SAFEGUARD, **kwargs):", "body": "default_val = default if default != self.DEFAULT_VALUE_SAFEGUARD else {}<EOL>try:<EOL><INDENT>return get_edx_api_data(<EOL>api_config=CatalogIntegration.current(),<EOL>resource=resource,<EOL>api=self.client,<EOL>**kwargs<EOL>) or default_val<EOL><DEDENT>except (SlumberBaseException, ConnectionError, Timeout) as exc:<EOL><INDENT>LOGGER.exception(<EOL>'<STR_LIT>',<EOL>resource, kwargs, str(exc)<EOL>)<EOL>return default_val<EOL><DEDENT>", "docstring": "Load data from API client.\n\nArguments:\n    resource(string): type of resource to load\n    default(any): value to return if API query returned empty result. Sensible values: [], {}, None etc.\n\nReturns:\n    dict: Deserialized response from Course Catalog API", "id": "f16091:c0:m17"}
{"signature": "def course_discovery_api_client(user, catalog_url):", "body": "if JwtBuilder is None:<EOL><INDENT>raise NotConnectedToOpenEdX(<EOL>_(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>)<EOL><DEDENT>jwt = JwtBuilder.create_jwt_for_user(user)<EOL>return EdxRestApiClient(catalog_url, jwt=jwt)<EOL>", "docstring": "Return a Course Discovery API client setup with authentication for the specified user.", "id": "f16091:m0"}
{"signature": "def get_program_by_title(self, program_title):", "body": "all_programs = self._load_data(self.PROGRAMS_ENDPOINT, default=[])<EOL>matching_programs = [program for program in all_programs if program.get('<STR_LIT:title>') == program_title]<EOL>if len(matching_programs) > <NUM_LIT:1>:<EOL><INDENT>raise MultipleProgramMatchError(len(matching_programs))<EOL><DEDENT>elif len(matching_programs) == <NUM_LIT:1>:<EOL><INDENT>return matching_programs[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Return single program by name, or None if not found.\n\nArguments:\n    program_title(string): Program title as seen by students and in Course Catalog Admin\n\nReturns:\n    dict: Program data provided by Course Catalog API", "id": "f16091:c0:m11"}
{"signature": "def get_course_final_price(self, mode, currency='<STR_LIT:$>', enterprise_catalog_uuid=None):", "body": "try:<EOL><INDENT>price_details = self.client.baskets.calculate.get(<EOL>sku=[mode['<STR_LIT>']],<EOL>username=self.user.username,<EOL>catalog=enterprise_catalog_uuid,<EOL>)<EOL><DEDENT>except (SlumberBaseException, ConnectionError, Timeout) as exc:<EOL><INDENT>LOGGER.exception('<STR_LIT>', mode['<STR_LIT>'], str(exc))<EOL>price_details = {}<EOL><DEDENT>price = price_details.get('<STR_LIT>', mode['<STR_LIT>'])<EOL>if price != mode['<STR_LIT>']:<EOL><INDENT>return format_price(price, currency)<EOL><DEDENT>return mode['<STR_LIT>']<EOL>", "docstring": "Get course mode's SKU discounted price after applying any entitlement available for this user.\n\nReturns:\n    str: Discounted price of the course mode.", "id": "f16092:c0:m1"}
{"signature": "def get_content_metadata(self, enterprise_customer):", "body": "content_metadata = OrderedDict()<EOL>if enterprise_customer.catalog:<EOL><INDENT>response = self._load_data(<EOL>self.ENTERPRISE_CUSTOMER_ENDPOINT,<EOL>detail_resource='<STR_LIT>',<EOL>resource_id=str(enterprise_customer.uuid),<EOL>traverse_pagination=True,<EOL>)<EOL>for course in response['<STR_LIT>']:<EOL><INDENT>for course_run in course['<STR_LIT>']:<EOL><INDENT>course_run['<STR_LIT>'] = '<STR_LIT>'  <EOL>content_metadata[course_run['<STR_LIT:key>']] = course_run<EOL><DEDENT><DEDENT><DEDENT>for enterprise_customer_catalog in enterprise_customer.enterprise_customer_catalogs.all():<EOL><INDENT>response = self._load_data(<EOL>self.ENTERPRISE_CUSTOMER_CATALOGS_ENDPOINT,<EOL>resource_id=str(enterprise_customer_catalog.uuid),<EOL>traverse_pagination=True,<EOL>querystring={'<STR_LIT>': <NUM_LIT:1000>},<EOL>)<EOL>for item in response['<STR_LIT>']:<EOL><INDENT>content_id = utils.get_content_metadata_item_id(item)<EOL>content_metadata[content_id] = item<EOL><DEDENT><DEDENT>return content_metadata.values()<EOL>", "docstring": "Return all content metadata contained in the catalogs associated with the EnterpriseCustomer.\n\nArguments:\n    enterprise_customer (EnterpriseCustomer): The EnterpriseCustomer to return content metadata for.\n\nReturns:\n    list: List of dicts containing content metadata.", "id": "f16093:c0:m0"}
{"signature": "def get_course_details(self, course_id):", "body": "try:<EOL><INDENT>return self.client.course(course_id).get()<EOL><DEDENT>except (SlumberBaseException, ConnectionError, Timeout) as exc:<EOL><INDENT>LOGGER.exception(<EOL>'<STR_LIT>',<EOL>course_id, str(exc)<EOL>)<EOL>return {}<EOL><DEDENT>", "docstring": "Query the Enrollment API for the course details of the given course_id.\n\nArgs:\n    course_id (str): The string value of the course's unique identifier\n\nReturns:\n    dict: A dictionary containing details about the course, in an enrollment context (allowed modes, etc.)", "id": "f16094:c3:m0"}
{"signature": "def get_course_modes(self, course_id):", "body": "details = self.get_course_details(course_id)<EOL>modes = details.get('<STR_LIT>', [])<EOL>return self._sort_course_modes([mode for mode in modes if mode['<STR_LIT>'] not in EXCLUDED_COURSE_MODES])<EOL>", "docstring": "Query the Enrollment API for the specific course modes that are available for the given course_id.\n\nArguments:\n    course_id (str): The string value of the course's unique identifier\n\nReturns:\n    list: A list of course mode dictionaries.", "id": "f16094:c3:m2"}
{"signature": "def unenroll_user_from_course(self, username, course_id):", "body": "enrollment = self.get_course_enrollment(username, course_id)<EOL>if enrollment and enrollment['<STR_LIT>']:<EOL><INDENT>response = self.client.enrollment.post({<EOL>'<STR_LIT:user>': username,<EOL>'<STR_LIT>': {'<STR_LIT>': course_id},<EOL>'<STR_LIT>': False,<EOL>'<STR_LIT>': enrollment['<STR_LIT>']<EOL>})<EOL>return not response['<STR_LIT>']<EOL><DEDENT>return False<EOL>", "docstring": "Call the enrollment API to unenroll the user in the course specified by course_id.\nArgs:\n    username (str): The username by which the user goes on the OpenEdx platform\n    course_id (str): The string value of the course's unique identifier\nReturns:\n    bool: Whether the unenrollment succeeded", "id": "f16094:c3:m5"}
{"signature": "def token_expired(self):", "body": "return int(time()) > self.expires_at<EOL>", "docstring": "Return True if the JWT token has expired, False if not.", "id": "f16094:c1:m2"}
{"signature": "@JwtLmsApiClient.refresh_token<EOL><INDENT>def get_course_certificate(self, course_id, username):<DEDENT>", "body": "return self.client.certificates(username).courses(course_id).get()<EOL>", "docstring": "Retrieve the certificate for the given username for the given course_id.\n\nArgs:\n* ``course_id`` (str): The string value of the course's unique identifier\n* ``username`` (str): The username ID identifying the user for which to retrieve the certificate\n\nRaises:\n\nHttpNotFoundError if no certificate found for the given user+course.\n\nReturns:\n\na dict containing:\n\n* ``username``: A string representation of an user's username passed in the request.\n* ``course_id``: A string representation of a Course ID.\n* ``certificate_type``: A string representation of the certificate type.\n* ``created_date`: Datetime the certificate was created (tz-aware).\n* ``status``: A string representation of the certificate status.\n* ``is_passing``: True if the certificate has a passing status, False if not.\n* ``download_url``: A string representation of the certificate url.\n* ``grade``: A string representation of a float for the user's course grade.", "id": "f16094:c7:m0"}
{"signature": "def __init__(self):", "body": "session = Session()<EOL>session.headers = {\"<STR_LIT>\": settings.EDX_API_KEY}<EOL>self.client = EdxRestApiClient(<EOL>self.API_BASE_URL, append_slash=self.APPEND_SLASH, session=session<EOL>)<EOL>", "docstring": "Create an LMS API client, authenticated with the API token from Django settings.", "id": "f16094:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def redirect_if_blocked(course_run_ids, user=None, ip_address=None, url=None):<DEDENT>", "body": "for course_run_id in course_run_ids:<EOL><INDENT>redirect_url = embargo_api.redirect_if_blocked(<EOL>CourseKey.from_string(course_run_id),<EOL>user=user,<EOL>ip_address=ip_address,<EOL>url=url<EOL>)<EOL>if redirect_url:<EOL><INDENT>return redirect_url<EOL><DEDENT><DEDENT>", "docstring": "Return redirect to embargo error page if the given user is blocked.", "id": "f16094:c2:m0"}
{"signature": "def __init__(self, user, expires_in=settings.OAUTH_ID_TOKEN_EXPIRATION):", "body": "self.user = user<EOL>self.expires_in = expires_in<EOL>self.expires_at = <NUM_LIT:0><EOL>self.client = None<EOL>", "docstring": "Connect to the REST API.", "id": "f16094:c1:m0"}
{"signature": "@JwtLmsApiClient.refresh_token<EOL><INDENT>def get_course_grade(self, course_id, username):<DEDENT>", "body": "results = self.client.courses(course_id).get(username=username)<EOL>for row in results:<EOL><INDENT>if row.get('<STR_LIT:username>') == username:<EOL><INDENT>return row<EOL><DEDENT><DEDENT>raise HttpNotFoundError('<STR_LIT>'.format(course_id, username))<EOL>", "docstring": "Retrieve the grade for the given username for the given course_id.\n\nArgs:\n* ``course_id`` (str): The string value of the course's unique identifier\n* ``username`` (str): The username ID identifying the user for which to retrieve the grade.\n\nRaises:\n\nHttpNotFoundError if no grade found for the given user+course.\n\nReturns:\n\na dict containing:\n\n* ``username``: A string representation of a user's username passed in the request.\n* ``course_key``: A string representation of a Course ID.\n* ``passed``: Boolean representing whether the course has been passed according the course's grading policy.\n* ``percent``: A float representing the overall grade for the course\n* ``letter_grade``: A letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None", "id": "f16094:c6:m0"}
{"signature": "def get_remote_id(self, identity_provider, username):", "body": "return self._get_results(identity_provider, '<STR_LIT:username>', username, '<STR_LIT>')<EOL>", "docstring": "Retrieve the remote identifier for the given username.\n\nArgs:\n* ``identity_provider`` (str): identifier slug for the third-party authentication service used during SSO.\n* ``username`` (str): The username ID identifying the user for which to retrieve the remote name.\n\nReturns:\n    string or None: the remote name of the given user.  None if not found.", "id": "f16094:c5:m0"}
{"signature": "def enroll_user_in_course(self, username, course_id, mode, cohort=None):", "body": "return self.client.enrollment.post(<EOL>{<EOL>'<STR_LIT:user>': username,<EOL>'<STR_LIT>': {'<STR_LIT>': course_id},<EOL>'<STR_LIT>': mode,<EOL>'<STR_LIT>': cohort,<EOL>}<EOL>)<EOL>", "docstring": "Call the enrollment API to enroll the user in the course specified by course_id.\n\nArgs:\n    username (str): The username by which the user goes on the OpenEdX platform\n    course_id (str): The string value of the course's unique identifier\n    mode (str): The enrollment mode which should be used for the enrollment\n    cohort (str): Add the user to this named cohort\n\nReturns:\n    dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.", "id": "f16094:c3:m4"}
{"signature": "def get_course_enrollment(self, username, course_id):", "body": "endpoint = getattr(<EOL>self.client.enrollment,<EOL>'<STR_LIT>'.format(username=username, course_id=course_id)<EOL>)<EOL>try:<EOL><INDENT>result = endpoint.get()<EOL><DEDENT>except HttpNotFoundError:<EOL><INDENT>LOGGER.error(<EOL>'<STR_LIT>',<EOL>username,<EOL>course_id<EOL>)<EOL>return None<EOL><DEDENT>if not result:<EOL><INDENT>LOGGER.info('<STR_LIT>', username, course_id)<EOL>return None<EOL><DEDENT>return result<EOL>", "docstring": "Query the enrollment API to get information about a single course enrollment.\n\nArgs:\n    username (str): The username by which the user goes on the OpenEdX platform\n    course_id (str): The string value of the course's unique identifier\n\nReturns:\n    dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.", "id": "f16094:c3:m6"}
{"signature": "def ignore_warning(warning):", "body": "def decorator(func):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>warnings.simplefilter('<STR_LIT:ignore>', warning)<EOL>return func(*args, **kwargs)<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "Ignore any emitted warnings from a function.\n\n:param warning: The category of warning to ignore.", "id": "f16095:m1"}
{"signature": "def deprecated(extra):", "body": "def decorator(func):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>@wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>message = '<STR_LIT>'.format(<EOL>function=func.__name__,<EOL>extra=extra<EOL>)<EOL>frame = inspect.currentframe().f_back<EOL>warnings.warn_explicit(<EOL>message,<EOL>category=DeprecationWarning,<EOL>filename=inspect.getfile(frame.f_code),<EOL>lineno=frame.f_lineno<EOL>)<EOL>return func(*args, **kwargs)<EOL><DEDENT>return wrapper<EOL><DEDENT>return decorator<EOL>", "docstring": "Flag a method as deprecated.\n\n:param extra: Extra text you'd like to display after the default text.", "id": "f16095:m0"}
{"signature": "def null_decorator(func):", "body": "return func<EOL>", "docstring": "Use this decorator to stub out decorators for testing.\n\nIf we're unable to import social_core.pipeline.partial, which is the case in our CI platform,\nwe need to be able to wrap the function with something.", "id": "f16095:m5"}
{"signature": "def force_fresh_session(view):", "body": "@wraps(view)<EOL>def wrapper(request, *args, **kwargs):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>if not request.GET.get(FRESH_LOGIN_PARAMETER):<EOL><INDENT>enterprise_customer = get_enterprise_customer_or_404(kwargs.get('<STR_LIT>'))<EOL>provider_id = enterprise_customer.identity_provider or '<STR_LIT>'<EOL>sso_provider = get_identity_provider(provider_id)<EOL>if sso_provider:<EOL><INDENT>scheme, netloc, path, params, query, fragment = urlparse(request.get_full_path())<EOL>redirect_url = urlunparse((scheme, netloc, quote(path), params, query, fragment))<EOL>return redirect(<EOL>'<STR_LIT>'.format(<EOL>logout_url='<STR_LIT>',<EOL>params=urlencode(<EOL>{'<STR_LIT>': redirect_url}<EOL>)<EOL>)<EOL>)<EOL><DEDENT><DEDENT>return view(request, *args, **kwargs)<EOL><DEDENT>return wrapper<EOL>", "docstring": "View decorator which terminates stale TPA sessions.\n\nThis decorator forces the user to obtain a new session\nthe first time they access the decorated view. This prevents\nTPA-authenticated users from hijacking the session of another\nuser who may have been previously logged in using the same\nbrowser window.\n\nThis decorator should be used in conjunction with the\nenterprise_login_required decorator.\n\nUsage::\n    @enterprise_login_required\n    @force_fresh_session()\n    def my_view(request, enterprise_uuid):\n        # Some functionality ...\n\n    OR\n\n    class MyView(View):\n        ...\n        @method_decorator(enterprise_login_required)\n        @method_decorator(force_fresh_session)\n        def get(self, request, enterprise_uuid):\n            # Some functionality ...", "id": "f16095:m4"}
{"signature": "@property<EOL><INDENT>def exists(self):<DEDENT>", "body": "return self._exists or self.enterprise_enrollment_exists<EOL>", "docstring": "Determine whether a record related to the consent scenario exists.\n\nFirst, check the instance's own `_exists` attribute; this is set to True\non database-backed instances that have a primary key, and may be manually\nset to true on ProxyDataSharingConsent objects that have database-backed\nchildren. If unsuccessful, check to see if an EnterpriseCourseEnrollment\nrelated to this consent record exists; we treat that as though this record\nexists for the purposes of serializable API responses.\n\nWe want to check for EnterpriseCourseEnrollment records because there are\ncases where one will be created, but not the other. In particular, proxy\nenrollments create an ECE but not any consent record. The LMS uses the\nAPI's 'exists' key to determine if consent action should be taken for course\nenrollments that have prior existence but for which consent has not been\ngranted. Thus, 'exists' is used as a proxy for the question \"has any workflow\nbeen entered which may involve a necessity for the learner to grant consent?\"", "id": "f16096:c0:m4"}
{"signature": "def __str__(self):", "body": "return \"<STR_LIT>\".format(<EOL>class_name=self.__class__.__name__,<EOL>username=self.username,<EOL>enterprise_name=self.enterprise_customer.name,<EOL>)<EOL>", "docstring": "Return a human-readable string representation.", "id": "f16096:c0:m0"}
{"signature": "@property<EOL><INDENT>def enterprise_enrollment_exists(self):<DEDENT>", "body": "if self.course_id:<EOL><INDENT>try:<EOL><INDENT>user_id = User.objects.get(username=self.username).pk<EOL><DEDENT>except User.DoesNotExist:<EOL><INDENT>return False<EOL><DEDENT>return EnterpriseCourseEnrollment.objects.filter(<EOL>course_id=self.course_id,<EOL>enterprise_customer_user__user_id=user_id,<EOL>enterprise_customer_user__enterprise_customer=self.enterprise_customer,<EOL>).exists()<EOL><DEDENT>return False<EOL>", "docstring": "Determine whether there exists an EnterpriseCourseEnrollment related to this consent record.", "id": "f16096:c0:m3"}
{"signature": "def populate_data_sharing_consent(apps, schema_editor):", "body": "DataSharingConsent = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>EnterpriseCourseEnrollment = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>User = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>for enrollment in EnterpriseCourseEnrollment.objects.all():<EOL><INDENT>user = User.objects.get(pk=enrollment.enterprise_customer_user.user_id)<EOL>data_sharing_consent, __ = DataSharingConsent.objects.get_or_create(<EOL>username=user.username,<EOL>enterprise_customer=enrollment.enterprise_customer_user.enterprise_customer,<EOL>course_id=enrollment.course_id,<EOL>)<EOL>if enrollment.consent_granted is not None:<EOL><INDENT>data_sharing_consent.granted = enrollment.consent_granted<EOL><DEDENT>else:<EOL><INDENT>consent_state = enrollment.enterprise_customer_user.data_sharing_consent.first()<EOL>if consent_state is not None:<EOL><INDENT>data_sharing_consent.granted = consent_state.state in ['<STR_LIT>', '<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>data_sharing_consent.granted = False<EOL><DEDENT><DEDENT>data_sharing_consent.save()<EOL><DEDENT>", "docstring": "Populates the ``DataSharingConsent`` model with the ``enterprise`` application's consent data.\n\nConsent data from the ``enterprise`` application come from the ``EnterpriseCourseEnrollment`` model.", "id": "f16099:m0"}
{"signature": "def preview_as_program(self, request, consent_page):  ", "body": "return self.preview(consent_page, program_uuid='<STR_LIT>')<EOL>", "docstring": "Renders data sharing consent page in program context", "id": "f16102:c1:m2"}
{"signature": "def get_missing_params_message(self, parameter_state):", "body": "params = '<STR_LIT:U+002CU+0020>'.join(name for name, present in parameter_state if not present)<EOL>return self.MISSING_REQUIRED_PARAMS_MSG.format(params)<EOL>", "docstring": "Get a user-friendly message indicating a missing parameter for the API endpoint.", "id": "f16104:c0:m2"}
{"signature": "def get_required_query_params(self, request):", "body": "username = get_request_value(request, self.REQUIRED_PARAM_USERNAME, '<STR_LIT>')<EOL>course_id = get_request_value(request, self.REQUIRED_PARAM_COURSE_ID, '<STR_LIT>')<EOL>program_uuid = get_request_value(request, self.REQUIRED_PARAM_PROGRAM_UUID, '<STR_LIT>')<EOL>enterprise_customer_uuid = get_request_value(request, self.REQUIRED_PARAM_ENTERPRISE_CUSTOMER)<EOL>if not (username and (course_id or program_uuid) and enterprise_customer_uuid):<EOL><INDENT>raise ConsentAPIRequestError(<EOL>self.get_missing_params_message([<EOL>(\"<STR_LIT>\", bool(username)),<EOL>(\"<STR_LIT>\", bool(enterprise_customer_uuid)),<EOL>(\"<STR_LIT>\", bool(course_id or program_uuid)),<EOL>])<EOL>)<EOL><DEDENT>return username, course_id, program_uuid, enterprise_customer_uuid<EOL>", "docstring": "Gets ``username``, ``course_id``, and ``enterprise_customer_uuid``,\nwhich are the relevant query parameters for this API endpoint.\n\n:param request: The request to this endpoint.\n:return: The ``username``, ``course_id``, and ``enterprise_customer_uuid`` from the request.", "id": "f16104:c0:m1"}
{"signature": "def get_consent_record(self, request):", "body": "username, course_id, program_uuid, enterprise_customer_uuid = self.get_required_query_params(request)<EOL>return get_data_sharing_consent(<EOL>username,<EOL>enterprise_customer_uuid,<EOL>course_id=course_id,<EOL>program_uuid=program_uuid<EOL>)<EOL>", "docstring": "Get the consent record relevant to the request at hand.", "id": "f16104:c0:m0"}
{"signature": "def get_program_data_sharing_consent(username, program_uuid, enterprise_customer_uuid):", "body": "enterprise_customer = get_enterprise_customer(enterprise_customer_uuid)<EOL>discovery_client = CourseCatalogApiServiceClient(enterprise_customer.site)<EOL>course_ids = discovery_client.get_program_course_keys(program_uuid)<EOL>child_consents = (<EOL>get_data_sharing_consent(username, enterprise_customer_uuid, course_id=individual_course_id)<EOL>for individual_course_id in course_ids<EOL>)<EOL>return ProxyDataSharingConsent.from_children(program_uuid, *child_consents)<EOL>", "docstring": "Get the data sharing consent object associated with a certain user of a customer for a program.\n\n:param username: The user that grants consent.\n:param program_uuid: The program for which consent is granted.\n:param enterprise_customer_uuid: The consent requester.\n:return: The data sharing consent object", "id": "f16109:m2"}
{"signature": "def get_data_sharing_consent(username, enterprise_customer_uuid, course_id=None, program_uuid=None):", "body": "EnterpriseCustomer = apps.get_model('<STR_LIT>', '<STR_LIT>')  <EOL>try:<EOL><INDENT>if course_id:<EOL><INDENT>return get_course_data_sharing_consent(username, course_id, enterprise_customer_uuid)<EOL><DEDENT>return get_program_data_sharing_consent(username, program_uuid, enterprise_customer_uuid)<EOL><DEDENT>except EnterpriseCustomer.DoesNotExist:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get the data sharing consent object associated with a certain user, enterprise customer, and other scope.\n\n:param username: The user that grants consent\n:param enterprise_customer_uuid: The consent requester\n:param course_id (optional): A course ID to which consent may be related\n:param program_uuid (optional): A program to which consent may be related\n:return: The data sharing consent object, or None if the enterprise customer for the given UUID does not exist.", "id": "f16109:m0"}
{"signature": "@property<EOL><INDENT>def _exists(self):<DEDENT>", "body": "return bool(self.pk)<EOL>", "docstring": "Return whether the instance exists or not.", "id": "f16111:c3:m0"}
{"signature": "def save(self, *args, **kwargs):  ", "body": "return self.commit()<EOL>", "docstring": "Synonym function for ``commit``.", "id": "f16111:c2:m3"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16111:c5:m1"}
{"signature": "def commit(self):", "body": "if self._child_consents:<EOL><INDENT>consents = []<EOL>for consent in self._child_consents:<EOL><INDENT>consent.granted = self.granted<EOL>consents.append(consent.save() or consent)<EOL><DEDENT>return ProxyDataSharingConsent.from_children(self.program_uuid, *consents)<EOL><DEDENT>consent, _ = DataSharingConsent.objects.update_or_create(<EOL>enterprise_customer=self.enterprise_customer,<EOL>username=self.username,<EOL>course_id=self.course_id,<EOL>defaults={<EOL>'<STR_LIT>': self.granted<EOL>}<EOL>)<EOL>self._exists = consent.exists<EOL>return consent<EOL>", "docstring": "Commit a real ``DataSharingConsent`` object to the database, mirroring current field settings.\n\n:return: A ``DataSharingConsent`` object if validation is successful, otherwise ``None``.", "id": "f16111:c2:m2"}
{"signature": "def __init__(<EOL>self,<EOL>enterprise_customer=None,<EOL>username='<STR_LIT>',<EOL>course_id='<STR_LIT>',<EOL>program_uuid='<STR_LIT>',<EOL>granted=False,<EOL>exists=False,<EOL>child_consents=None,<EOL>**kwargs<EOL>):", "body": "ec_keys = {}<EOL>for key in kwargs:<EOL><INDENT>if str(key).startswith('<STR_LIT>'):<EOL><INDENT>enterprise_customer_detail = key[len('<STR_LIT>'):]<EOL>ec_keys[enterprise_customer_detail] = kwargs[key]<EOL><DEDENT><DEDENT>if ec_keys:<EOL><INDENT>enterprise_customer = EnterpriseCustomer.objects.get(**ec_keys)  <EOL><DEDENT>self.enterprise_customer = enterprise_customer<EOL>self.username = username<EOL>self.course_id = course_id<EOL>self.program_uuid = program_uuid<EOL>self.granted = granted<EOL>self._exists = exists<EOL>self._child_consents = child_consents or []<EOL>", "docstring": "Initialize a proxy version of ``DataSharingConsent`` which behaves similarly but does not exist in the DB.", "id": "f16111:c2:m0"}
{"signature": "def setUp(self):", "body": "super(TestSAPSuccessFactorsConfig, self).setUp()<EOL>self.app_config = integrated_channels.sap_success_factors.apps.SAPSuccessFactorsConfig(<EOL>'<STR_LIT>', integrated_channels.sap_success_factors<EOL>)<EOL>", "docstring": "Set up test environment", "id": "f16112:c2:m0"}
{"signature": "def setUp(self):", "body": "super(TestXAPIConfig, self).setUp()<EOL>self.app_config = integrated_channels.xapi.apps.XAPIConfig(<EOL>'<STR_LIT>', integrated_channels.xapi<EOL>)<EOL>", "docstring": "Set up test environment", "id": "f16112:c4:m0"}
{"signature": "def setUp(self):", "body": "super(TestEnterpriseConfig, self).setUp()<EOL>self.post_save_mock = mock.Mock()<EOL>patcher = mock.patch('<STR_LIT>', self.post_save_mock)<EOL>patcher.start()<EOL>self.app_config = enterprise.apps.EnterpriseConfig('<STR_LIT>', enterprise)<EOL>self.addCleanup(patcher.stop)<EOL>", "docstring": "Set up test environment.", "id": "f16112:c0:m0"}
{"signature": "def setUp(self):", "body": "super(TestEnterpriseDecorators, self).setUp()<EOL>faker = FakerFactory.create()<EOL>self.provider_id = faker.slug()  <EOL>self.uuid = faker.uuid4()  <EOL>self.customer = EnterpriseCustomerFactory(uuid=self.uuid)<EOL>EnterpriseCustomerIdentityProviderFactory(provider_id=self.provider_id, enterprise_customer=self.customer)<EOL>self.session_engine = import_module(settings.SESSION_ENGINE)<EOL>", "docstring": "Set up test environment.", "id": "f16113:c0:m0"}
{"signature": "def setUp(self):", "body": "self.faker = FakerFactory.create()<EOL>self.catalog_uuid = self.faker.uuid4()  <EOL>self.enterprise_uuid = self.faker.uuid4()  <EOL>self.enterprise_name = '<STR_LIT>'<EOL>super(TestEnterpriseCustomerCatalog, self).setUp()<EOL>", "docstring": "Setup tests", "id": "f16115:c10:m0"}
{"signature": "def mock_get_available_idps(idps):", "body": "def _():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>idp_list = []<EOL>for idp in idps:<EOL><INDENT>mock_idp = mock.Mock()<EOL>mock_idp.configure_mock(provider_id=idp, name=idp)<EOL>idp_list.append(mock_idp)<EOL><DEDENT>return idp_list<EOL><DEDENT>return _<EOL>", "docstring": "Mock method for get_available_idps.", "id": "f16116:m0"}
{"signature": "def setUp(self):", "body": "super(TestEnterpriseUtils, self).setUp()<EOL>faker = FakerFactory.create()<EOL>self.provider_id = faker.slug()  <EOL>self.uuid = faker.uuid4()  <EOL>self.customer = EnterpriseCustomerFactory(uuid=self.uuid)<EOL>EnterpriseCustomerIdentityProviderFactory(provider_id=self.provider_id, enterprise_customer=self.customer)<EOL>", "docstring": "Set up test environment.", "id": "f16116:c0:m0"}
{"signature": "def migrate_to_origin(self):", "body": "self.executor.loader.build_graph()<EOL>self.executor.migrate(self.migrate_origin)<EOL>", "docstring": "Performs the migration to the designated origin.\n\nThis only really does anything if you have migrated forward\nin some way or no migrations were performed at all.", "id": "f16117:c0:m1"}
{"signature": "def migrate_to_dest(self):", "body": "self.executor.loader.build_graph()<EOL>self.executor.migrate(self.migrate_dest)<EOL>", "docstring": "Performs the migration to the designated destination.", "id": "f16117:c0:m2"}
{"signature": "def setUp(self):", "body": "super(TestConsentAPIPermissions, self).setUp()<EOL>discovery_client_class = mock.patch('<STR_LIT>')<EOL>self.discovery_client = discovery_client_class.start().return_value<EOL>self.discovery_client.is_course_in_catalog.return_value = True<EOL>self.addCleanup(discovery_client_class.stop)<EOL>factories.DataSharingConsentFactory.create(<EOL>course_id=TEST_COURSE,<EOL>username=TEST_USERNAME,<EOL>enterprise_customer__uuid=TEST_UUID<EOL>)<EOL>", "docstring": "Perform operations common to all tests.", "id": "f16118:c0:m0"}
{"signature": "def setUp(self):", "body": "self.enterprise_learner_role, __ = SystemWideEnterpriseRole.objects.get_or_create(name=ENTERPRISE_LEARNER_ROLE)<EOL>self.learner_user = UserFactory(id=<NUM_LIT:2>, email='<STR_LIT>')<EOL>self.enterprise_customer = EnterpriseCustomerFactory(<EOL>catalog=<NUM_LIT:1>,<EOL>name='<STR_LIT>',<EOL>)<EOL>super(TestEnterpriseLearnerRoleSignals, self).setUp()<EOL>", "docstring": "Setup for `TestEnterpriseLearnerRoleSignals` test.", "id": "f16121:c2:m0"}
{"signature": "def setUp(self):", "body": "super(TestIsAdminUserOrInGroupPermissions, self).setUp()<EOL>self.user = UserFactory(email='<STR_LIT>', password='<STR_LIT:test>', is_staff=True)<EOL>", "docstring": "Setup the test cases.", "id": "f16124:c1:m0"}
{"signature": "def create_course_enrollments_context(<EOL>self,<EOL>user_exists,<EOL>lms_user_id,<EOL>tpa_user_id,<EOL>user_email,<EOL>mock_tpa_client,<EOL>mock_enrollment_client,<EOL>course_enrollment,<EOL>mock_catalog_contains_course,<EOL>course_in_catalog,<EOL>enable_autocohorting=False<EOL>):", "body": "enterprise_customer = factories.EnterpriseCustomerFactory(<EOL>uuid=FAKE_UUIDS[<NUM_LIT:0>],<EOL>name=\"<STR_LIT>\",<EOL>enable_autocohorting=enable_autocohorting<EOL>)<EOL>permission = Permission.objects.get(name='<STR_LIT>')<EOL>self.user.user_permissions.add(permission)<EOL>user = None<EOL>if user_exists:<EOL><INDENT>if lms_user_id:<EOL><INDENT>user = factories.UserFactory(id=lms_user_id)<EOL><DEDENT>elif tpa_user_id:<EOL><INDENT>user = factories.UserFactory(username=tpa_user_id)<EOL><DEDENT>elif user_email:<EOL><INDENT>user = factories.UserFactory(email=user_email)<EOL><DEDENT>factories.EnterpriseCustomerUserFactory(<EOL>user_id=user.id,<EOL>enterprise_customer=enterprise_customer,<EOL>)<EOL><DEDENT>if tpa_user_id:<EOL><INDENT>mock_tpa_client.return_value = mock.Mock()<EOL>mock_tpa_client.return_value.get_username_from_remote_id = mock.Mock()<EOL>mock_tpa_client.return_value.get_username_from_remote_id.return_value = tpa_user_id<EOL><DEDENT>mock_enrollment_client.return_value = mock.Mock(<EOL>get_course_enrollment=mock.Mock(return_value=course_enrollment),<EOL>enroll_user_in_course=mock.Mock()<EOL>)<EOL>mock_catalog_contains_course.return_value = course_in_catalog<EOL>return enterprise_customer, user<EOL>", "docstring": "Set up for tests that call the enterprise customer course enrollments detail route.", "id": "f16126:c0:m32"}
{"signature": "def create_items(self, factory, items):", "body": "for item in items:<EOL><INDENT>factory.create(**item)<EOL><DEDENT>", "docstring": "Create model instances using given factory", "id": "f16126:c0:m1"}
{"signature": "def _prepare_request(self, url, user):", "body": "request = RequestFactory().get(url)<EOL>request.user = user<EOL>session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)<EOL>request.session = self.session_engine.SessionStore(session_key)<EOL>return request<EOL>", "docstring": "Prepare request for test.", "id": "f16127:c0:m1"}
{"signature": "def setUp(self):", "body": "super(TestEnterpriseAPIDecorators, self).setUp()<EOL>faker = FakerFactory.create()<EOL>self.provider_id = faker.slug()  <EOL>self.uuid = faker.uuid4()  <EOL>self.customer = EnterpriseCustomerFactory(uuid=self.uuid)<EOL>self.user = UserFactory()<EOL>self.session_engine = import_module(settings.SESSION_ENGINE)<EOL>", "docstring": "Set up test environment.", "id": "f16127:c0:m0"}
{"signature": "def setUp(self):", "body": "super(TestCourseCatalogApiService, self).setUp()<EOL>self.user_mock = mock.Mock(spec=User)<EOL>self.get_data_mock = self._make_patch(self._make_catalog_api_location(\"<STR_LIT>\"))<EOL>self.jwt_builder_mock = self._make_patch(self._make_catalog_api_location(\"<STR_LIT>\"))<EOL>self.integration_config_mock = mock.Mock(enabled=True)<EOL>self.integration_config_mock.get_service_user.return_value = self.user_mock<EOL>self.integration_mock = self._make_patch(self._make_catalog_api_location(\"<STR_LIT>\"))<EOL>self.integration_mock.current.return_value = self.integration_config_mock<EOL>self.api = CourseCatalogApiServiceClient()<EOL>", "docstring": "Set up mocks for the test suite.", "id": "f16133:c3:m0"}
{"signature": "def _assert_num_requests(self, expected_count):", "body": "assert len(responses.calls) == expected_count<EOL>", "docstring": "DRY helper for verifying request counts.", "id": "f16134:c0:m3"}
{"signature": "def setUp(self):", "body": "self.enterprise_customer = EnterpriseCustomerFactory(<EOL>catalog=<NUM_LIT:1>,<EOL>name='<STR_LIT>',<EOL>)<EOL>super(TestEnterpriseApiClient, self).setUp()<EOL>self.catalog_api_config_mock = self._make_patch(self._make_catalog_api_location(\"<STR_LIT>\"))<EOL>self.user = UserFactory(is_staff=True)<EOL>", "docstring": "DRY method for TestEnterpriseApiClient.", "id": "f16134:c0:m0"}
{"signature": "def _setup_ecommerce_client(self, client_mock, total=<NUM_LIT:50>):", "body": "dummy_price_details_mock = mock.MagicMock()<EOL>dummy_price_details_mock.return_value = {<EOL>'<STR_LIT>': total,<EOL>}<EOL>price_details_mock = mock.MagicMock()<EOL>method_name = '<STR_LIT>'<EOL>attrs = {method_name: dummy_price_details_mock}<EOL>price_details_mock.configure_mock(**attrs)<EOL>client_mock.return_value = price_details_mock<EOL>", "docstring": "Sets up the Ecommerce API client", "id": "f16136:c0:m3"}
{"signature": "def _login(self):", "body": "assert self.client.login(username=self.user.username, password=\"<STR_LIT>\")<EOL>", "docstring": "Log user in.", "id": "f16136:c0:m1"}
{"signature": "def _setup_enrollment_client(self, client_mock):", "body": "client = client_mock.return_value<EOL>client.get_course_modes.return_value = self.dummy_demo_course_modes<EOL>client.get_course_enrollment.return_value = None<EOL>", "docstring": "Sets up the Enrollment API client", "id": "f16136:c0:m2"}
{"signature": "def _setup_get_data_sharing_consent(self, client_mock, required):", "body": "client_mock.return_value.consent_required.return_value = required<EOL>", "docstring": "Sets up the ``get_data_sharing_consent`` function mock.", "id": "f16137:c0:m6"}
{"signature": "def setUp(self):", "body": "self.user = UserFactory.create(is_staff=True, is_active=True)<EOL>self.user.set_password(\"<STR_LIT>\")<EOL>self.user.save()<EOL>self.client = Client()<EOL>self.demo_course_1 = FAKE_PROGRAM_RESPONSE3['<STR_LIT>'][<NUM_LIT:0>]<EOL>self.demo_course_2 = FAKE_PROGRAM_RESPONSE3['<STR_LIT>'][<NUM_LIT:1>]<EOL>self.demo_course_id1 = FAKE_PROGRAM_RESPONSE3['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT:key>']<EOL>self.demo_course_id2 = FAKE_PROGRAM_RESPONSE3['<STR_LIT>'][<NUM_LIT:1>]['<STR_LIT:key>']<EOL>self.demo_course_ids = [self.demo_course_id1, self.demo_course_id2]<EOL>self.dummy_program_uuid = FAKE_PROGRAM_RESPONSE3['<STR_LIT>']<EOL>self.dummy_program = FAKE_PROGRAM_RESPONSE3<EOL>super(TestProgramEnrollmentView, self).setUp()<EOL>", "docstring": "Set up reusable fake data.", "id": "f16137:c0:m0"}
{"signature": "def _setup_registry_mock(self, registry_mock, provider_id):", "body": "registry_mock.get.return_value.configure_mock(provider_id=provider_id)<EOL>", "docstring": "Sets up the SSO Registry object.", "id": "f16137:c0:m5"}
{"signature": "def _setup_program_data_extender(self, extender_mock, course_overrides=None):", "body": "<EOL>dummy_program_extended = copy.deepcopy(self.dummy_program)<EOL>dummy_course_extended_1 = copy.deepcopy(self.demo_course_1)<EOL>dummy_course_extended_2 = copy.deepcopy(self.demo_course_2)<EOL>if course_overrides:<EOL><INDENT>dummy_course_extended_1.update(course_overrides)<EOL>dummy_course_extended_2.update(course_overrides)<EOL><DEDENT>dummy_course_extended_1['<STR_LIT>'][<NUM_LIT:0>].update({\"<STR_LIT>\": False, \"<STR_LIT>\": None})<EOL>dummy_course_extended_2['<STR_LIT>'][<NUM_LIT:0>].update({\"<STR_LIT>\": False, \"<STR_LIT>\": None})<EOL>dummy_program_extended.update({<EOL>\"<STR_LIT>\": [<EOL>dummy_course_extended_1,<EOL>dummy_course_extended_2,<EOL>],<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": <NUM_LIT:50>,<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT>\": <NUM_LIT>,<EOL>\"<STR_LIT>\": <NUM_LIT>,<EOL>},<EOL>\"<STR_LIT>\": <NUM_LIT>,<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT>\": [<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>],<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>})<EOL>extender_mock.return_value.extend.return_value = dummy_program_extended<EOL>return extender_mock<EOL>", "docstring": "Sets up the `ProgramDataExtender` mock, a utility from the edx-platform.", "id": "f16137:c0:m4"}
{"signature": "def _make_paragraphs(self, item):", "body": "left_sidebar_text = (<EOL>self.left_sidebar_text<EOL>).format(<EOL>enterprise_customer_name=self.enterprise_customer.name,<EOL>platform_name=self.platform_name,<EOL>)<EOL>top_paragraph = (<EOL>self.top_paragraph<EOL>).format(<EOL>enterprise_customer_name=self.enterprise_customer.name,<EOL>item=item,<EOL>)<EOL>agreement_text = (<EOL>self.agreement_text<EOL>).format(<EOL>enterprise_customer_name=self.enterprise_customer.name,<EOL>platform_name=self.platform_name,<EOL>)<EOL>confirmation_modal_text = (<EOL>self.confirmation_modal_text<EOL>).format(<EOL>enterprise_customer_name=self.enterprise_customer.name,<EOL>item=item,<EOL>)<EOL>return left_sidebar_text, top_paragraph, agreement_text, confirmation_modal_text<EOL>", "docstring": "Returns text to be used paragraphs of data sharing consent page", "id": "f16140:c2:m2"}
{"signature": "def _assert_get_returns_404_with_mock(self, url, get_params):", "body": "with mock.patch('<STR_LIT>') as mock_render:<EOL><INDENT>mock_render.return_value = HttpResponse()<EOL>self.client.get(url, get_params)<EOL>assert mock_render.call_args_list[<NUM_LIT:0>][<NUM_LIT:1>]['<STR_LIT:status>'] == <NUM_LIT><EOL><DEDENT>", "docstring": "Mock the render method, run a GET, and assert it returns 404.", "id": "f16140:c1:m1"}
{"signature": "def _login(self):", "body": "assert self.client.login(username=self.user.username, password=\"<STR_LIT>\")<EOL>", "docstring": "Log user in.", "id": "f16140:c1:m3"}
{"signature": "def get_expected_output(**expected_completion):", "body": "action = '<STR_LIT>'<EOL>action2 = '<STR_LIT>'<EOL>if expected_completion['<STR_LIT>'] == NOW_TIMESTAMP:<EOL><INDENT>degreed_timestamp = '<STR_LIT>'.format(NOW_TIMESTAMP_FORMATTED)<EOL><DEDENT>elif expected_completion['<STR_LIT>'] == PAST_TIMESTAMP:<EOL><INDENT>degreed_timestamp = '<STR_LIT>'.format(PAST_TIMESTAMP_FORMATTED)<EOL><DEDENT>else:<EOL><INDENT>degreed_timestamp = '<STR_LIT:null>'<EOL>action = '<STR_LIT>'<EOL>action2 = action<EOL><DEDENT>degreed_output_template = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>sapsf_output_template = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>expected_output = [<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\" + sapsf_output_template.format(<EOL>user_id='<STR_LIT>',<EOL>course_id=COURSE_KEY,<EOL>provider_id=\"<STR_LIT>\",<EOL>**expected_completion<EOL>),<EOL>\"<STR_LIT>\".format(action),<EOL>\"<STR_LIT>\" + sapsf_output_template.format(<EOL>user_id='<STR_LIT>',<EOL>course_id=COURSE_ID,<EOL>provider_id=\"<STR_LIT>\",<EOL>**expected_completion<EOL>),<EOL>\"<STR_LIT>\".format(action2),<EOL>\"<STR_LIT>\" + sapsf_output_template.format(<EOL>user_id='<STR_LIT>',<EOL>course_id=COURSE_KEY,<EOL>provider_id=\"<STR_LIT>\",<EOL>**expected_completion<EOL>),<EOL>\"<STR_LIT>\".format(action),<EOL>\"<STR_LIT>\" + sapsf_output_template.format(<EOL>user_id='<STR_LIT>',<EOL>course_id=COURSE_ID,<EOL>provider_id=\"<STR_LIT>\",<EOL>**expected_completion<EOL>),<EOL>\"<STR_LIT>\".format(action2),<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\" + degreed_output_template.format(<EOL>user_email='<STR_LIT>',<EOL>course_id=COURSE_KEY,<EOL>timestamp=degreed_timestamp<EOL>),<EOL>\"<STR_LIT>\".format(action),<EOL>\"<STR_LIT>\" + degreed_output_template.format(<EOL>user_email='<STR_LIT>',<EOL>course_id=COURSE_ID,<EOL>timestamp=degreed_timestamp<EOL>),<EOL>\"<STR_LIT>\".format(action2),<EOL>\"<STR_LIT>\" + degreed_output_template.format(<EOL>user_email='<STR_LIT>',<EOL>course_id=COURSE_KEY,<EOL>timestamp=degreed_timestamp<EOL>),<EOL>\"<STR_LIT>\".format(action),<EOL>\"<STR_LIT>\" + degreed_output_template.format(<EOL>user_email='<STR_LIT>',<EOL>course_id=COURSE_ID,<EOL>timestamp=degreed_timestamp<EOL>),<EOL>\"<STR_LIT>\".format(action2),<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>]<EOL>return expected_output<EOL>", "docstring": "Returns the expected JSON record logged by the ``transmit_learner_data`` command.", "id": "f16141:m2"}
{"signature": "def assert_info_logs_sap_learners_unlink(self, expected_messages):", "body": "with LogCapture(level=logging.INFO) as log_capture:<EOL><INDENT>call_command('<STR_LIT>')<EOL>for index, message in enumerate(expected_messages):<EOL><INDENT>assert message in log_capture.records[index].getMessage()<EOL><DEDENT><DEDENT>", "docstring": "DRY method to verify log messages for management command \"unlink_inactive_sap_learners\".", "id": "f16141:c4:m2"}
{"signature": "def _verify_pagination(<EOL>self,<EOL>page_object,<EOL>total_result,<EOL>page_number=<NUM_LIT:1>,<EOL>page_start=<NUM_LIT:0>,<EOL>page_end=PAGE_SIZE,<EOL>page_size=PAGE_SIZE<EOL>):", "body": "<EOL>assert page_object.number == page_number<EOL>assert list(page_object.object_list) == total_result[page_start:page_end]<EOL>assert page_object.paginator.count == len(total_result)<EOL>assert page_object.paginator.per_page == page_size<EOL>result_pages = int(ceil(len(total_result) / float(page_size))) if total_result else <NUM_LIT:1><EOL>assert page_object.paginator.num_pages == result_pages<EOL>assert list(page_object.paginator.object_list) == total_result<EOL>", "docstring": "Verifies pagination.", "id": "f16142:c2:m0"}
{"signature": "def _enroll_user_request(self, user, mode, course_id=\"<STR_LIT>\", program_id=\"<STR_LIT>\", notify=True):", "body": "notify = (<EOL>ManageLearnersForm.NotificationTypes.BY_EMAIL if notify<EOL>else ManageLearnersForm.NotificationTypes.NO_NOTIFICATION<EOL>)<EOL>self._login()<EOL>if isinstance(user, six.string_types):<EOL><INDENT>email_or_username = user<EOL><DEDENT>else:<EOL><INDENT>email_or_username = getattr(user, '<STR_LIT:username>', getattr(user, '<STR_LIT>', None))<EOL><DEDENT>response = self.client.post(self.view_url, data={<EOL>ManageLearnersForm.Fields.EMAIL_OR_USERNAME: email_or_username,<EOL>ManageLearnersForm.Fields.COURSE_MODE: mode,<EOL>ManageLearnersForm.Fields.COURSE: course_id,<EOL>ManageLearnersForm.Fields.PROGRAM: program_id,<EOL>ManageLearnersForm.Fields.NOTIFY: notify<EOL>})<EOL>return response<EOL>", "docstring": "Perform post request to log in and submit the form to enroll a user.", "id": "f16142:c3:m10"}
{"signature": "def setUp(self):", "body": "super(BaseTestEnterpriseCustomerTransmitCoursesView, self).setUp()<EOL>self.user = UserFactory.create(is_staff=True, is_active=True, id=<NUM_LIT:1>)<EOL>self.user.set_password('<STR_LIT>')<EOL>self.user.save()<EOL>self.enterprise_channel_worker = UserFactory.create(is_staff=True, is_active=True)<EOL>self.enterprise_customer = EnterpriseCustomerFactory()<EOL>self.default_context = {<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT>': self.enterprise_customer._meta,<EOL>'<STR_LIT:user>': self.user<EOL>}<EOL>self.transmit_courses_metadata_form = TransmitEnterpriseCoursesForm()<EOL>self.view_url = reverse(<EOL>'<STR_LIT>' + enterprise_admin.utils.UrlNames.TRANSMIT_COURSES_METADATA,<EOL>args=(self.enterprise_customer.uuid,)<EOL>)<EOL>self.client = Client()<EOL>self.context_parameters = EnterpriseCustomerTransmitCoursesView.ContextParameters<EOL>", "docstring": "Test set up", "id": "f16142:c6:m0"}
{"signature": "@staticmethod<EOL><INDENT>def _assert_no_record(email):<DEDENT>", "body": "assert PendingEnterpriseCustomerUser.objects.filter(user_email=email).count() == <NUM_LIT:0><EOL>try:<EOL><INDENT>user = User.objects.get(email=email)<EOL>assert EnterpriseCustomerUser.objects.filter(user_id=user.id).count() == <NUM_LIT:0><EOL><DEDENT>except User.DoesNotExist:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Assert that linked user record with specified email does not exist.", "id": "f16142:c1:m2"}
{"signature": "def setUp(self):", "body": "super(BaseTestEnterpriseCustomerManageLearnersView, self).setUp()<EOL>self.user = UserFactory.create(is_staff=True, is_active=True, id=<NUM_LIT:1>)<EOL>self.user.set_password(\"<STR_LIT>\")<EOL>self.user.save()<EOL>self.enterprise_customer = EnterpriseCustomerFactory()<EOL>self.default_context = {<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT>\": self.enterprise_customer._meta,<EOL>\"<STR_LIT:user>\": self.user<EOL>}<EOL>self.view_url = reverse(<EOL>\"<STR_LIT>\" + enterprise_admin.utils.UrlNames.MANAGE_LEARNERS,<EOL>args=(self.enterprise_customer.uuid,)<EOL>)<EOL>self.client = Client()<EOL>self.context_parameters = EnterpriseCustomerManageLearnersView.ContextParameters<EOL>", "docstring": "Test set up - installs common dependencies.", "id": "f16142:c1:m0"}
{"signature": "def _login(self):", "body": "assert self.client.login(username=self.user.username, password=\"<STR_LIT>\")<EOL>", "docstring": "Log user in.", "id": "f16142:c1:m3"}
{"signature": "@staticmethod<EOL><INDENT>def _make_bound_form(identity_provider):<DEDENT>", "body": "form_data = {<EOL>\"<STR_LIT:name>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": identity_provider,<EOL>\"<STR_LIT>\": <NUM_LIT:1>,<EOL>\"<STR_LIT>\": <NUM_LIT:1>,<EOL>\"<STR_LIT>\": \"<STR_LIT>\",<EOL>}<EOL>return EnterpriseCustomerAdminForm(form_data)<EOL>", "docstring": "Builds bound EnterpriseCustomerAdminForm.", "id": "f16143:c2:m1"}
{"signature": "def setUp(self):", "body": "super(TestEnterpriseCustomerReportingConfigAdminForm, self).setUp()<EOL>self.ent_customer1 = EnterpriseCustomerFactory()<EOL>self.ent_customer2 = EnterpriseCustomerFactory()<EOL>self.ent_catalogs1 = [<EOL>EnterpriseCustomerCatalogFactory(enterprise_customer=self.ent_customer1)<EOL>for _ in range(<NUM_LIT:3>)<EOL>]<EOL>self.ent_catalogs2 = [<EOL>EnterpriseCustomerCatalogFactory(enterprise_customer=self.ent_customer2)<EOL>for _ in range(<NUM_LIT:2>)<EOL>]<EOL>self.form_data = {<EOL>'<STR_LIT>': self.ent_customer1.uuid,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:email>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT:email>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:password>',<EOL>}<EOL>", "docstring": "Test set up.", "id": "f16143:c4:m0"}
{"signature": "def setUp(self):", "body": "super(TestCSVExportAction, self).setUp()<EOL>self.output_stream = BytesIO()<EOL>response_instance_mock = mock.MagicMock(wraps=self.output_stream)<EOL>self.response_mock = self._make_patch(<EOL>\"<STR_LIT>\",<EOL>return_value=response_instance_mock<EOL>)<EOL>self.model_admin_mock = mock.Mock()<EOL>self.model_admin_mock.model._meta.fields = [<EOL>self._make_field(\"<STR_LIT:code>\"), self._make_field(\"<STR_LIT:name>\"), self._make_field(\"<STR_LIT:description>\"),<EOL>]<EOL>", "docstring": "Test suite set up method.", "id": "f16146:c3:m3"}
{"signature": "def _make_patch(self, target, **kwargs):", "body": "target_mock = mock.Mock(**kwargs)<EOL>patch = mock.patch(target, new=target_mock)<EOL>patch.start()<EOL>self.addCleanup(patch.stop)<EOL>return target_mock<EOL>", "docstring": "Patch `target` with mock for the duration of test run.", "id": "f16146:c3:m0"}
{"signature": "def setUp(self):", "body": "self.action = get_clear_catalog_id_action()<EOL>self.model_admin_mock = mock.Mock()<EOL>self.model_admin_mock.model._meta.fields = [<EOL>self._make_field(\"<STR_LIT:code>\"), self._make_field(\"<STR_LIT:name>\"), self._make_field(\"<STR_LIT:description>\"),<EOL>]<EOL>super(TestClearCatalogAction, self).setUp()<EOL>", "docstring": "Provide the necessary moving parts for the tests.", "id": "f16146:c2:m0"}
{"signature": "def _make_field(self, name):", "body": "field = mock.Mock()<EOL>field.name = name<EOL>return field<EOL>", "docstring": "Mock django field.", "id": "f16146:c3:m1"}
{"signature": "def create_switch(apps, schema_editor):", "body": "Switch = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>Switch.objects.get_or_create(name='<STR_LIT>', defaults={'<STR_LIT>': False})<EOL>", "docstring": "Create and activate the SAP_USE_ENTERPRISE_ENROLLMENT_PAGE switch if it does not already exist.", "id": "f16171:m0"}
{"signature": "def create_switch(apps, schema_editor):", "body": "Switch = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>Switch.objects.get_or_create(name='<STR_LIT>', defaults={'<STR_LIT>': False})<EOL>", "docstring": "Create and activate the SAP_USE_ENTERPRISE_ENROLLMENT_PAGE switch if it does not already exist.", "id": "f16173:m0"}
{"signature": "def dropHistoricalTable(apps, schema_editor):", "body": "table_name = '<STR_LIT>'<EOL>if table_name in connection.introspection.table_names():<EOL><INDENT>migrations.DeleteModel(<EOL>name=table_name,<EOL>)<EOL><DEDENT>", "docstring": "Drops the historical sap_success_factors table named herein.", "id": "f16178:m0"}
{"signature": "def __init__(self, enterprise_configuration):", "body": "super(SAPSuccessFactorsAPIClient, self).__init__(enterprise_configuration)<EOL>self.global_sap_config = apps.get_model('<STR_LIT>', '<STR_LIT>').current()<EOL>self._create_session()<EOL>", "docstring": "Instantiate a new client.\n\nArgs:\n    enterprise_configuration (SAPSuccessFactorsEnterpriseCustomerConfiguration): An enterprise customers's\n    configuration model for connecting with SAP SuccessFactors", "id": "f16189:c0:m1"}
{"signature": "def delete_content_metadata(self, serialized_data):", "body": "self._sync_content_metadata(serialized_data)<EOL>", "docstring": "Delete content metadata records using the SuccessFactors OCN Course Import API endpoint.\n\nArguments:\n    serialized_data: Serialized JSON string representing a list of content metadata items.\n\nRaises:\n    ClientError: If SuccessFactors API call fails.", "id": "f16189:c0:m6"}
{"signature": "def create_content_metadata(self, serialized_data):", "body": "self._sync_content_metadata(serialized_data)<EOL>", "docstring": "Create content metadata records using the SuccessFactors OCN Course Import API endpoint.\n\nArguments:\n    serialized_data: Serialized JSON string representing a list of content metadata items.\n\nRaises:\n    ClientError: If SuccessFactors API call fails.", "id": "f16189:c0:m4"}
{"signature": "def create_course_completion(self, user_id, payload):", "body": "url = self.enterprise_configuration.sapsf_base_url + self.global_sap_config.completion_status_api_path<EOL>return self._call_post_with_user_override(user_id, url, payload)<EOL>", "docstring": "Send a completion status payload to the SuccessFactors OCN Completion Status endpoint\n\nArgs:\n    user_id (str): The sap user id that the completion status is being sent for.\n    payload (str): JSON encoded object (serialized from SapSuccessFactorsLearnerDataTransmissionAudit)\n        containing completion status fields per SuccessFactors documentation.\n\nReturns:\n    The body of the response from SAP SuccessFactors, if successful\nRaises:\n    HTTPError: if we received a failure response code from SAP SuccessFactors", "id": "f16189:c0:m3"}
{"signature": "def enterprise_customer_name(self, obj):", "body": "return obj.enterprise_customer.name<EOL>", "docstring": "Returns: the name for the attached EnterpriseCustomer.\n\nArgs:\n    obj: The instance of SAPSuccessFactorsEnterpriseCustomerConfiguration\n        being rendered with this admin form.", "id": "f16190:c1:m0"}
{"signature": "def __init__(self, enterprise_configuration, client=SAPSuccessFactorsAPIClient):", "body": "super(SapSuccessFactorsContentMetadataTransmitter, self).__init__(<EOL>enterprise_configuration=enterprise_configuration,<EOL>client=client<EOL>)<EOL>", "docstring": "Use the ``SAPSuccessFactorsAPIClient`` for content metadata transmission to SAPSF.", "id": "f16192:c0:m0"}
{"signature": "def transmit(self, payload, **kwargs):", "body": "items_to_create, items_to_update, items_to_delete, transmission_map = self._partition_items(payload)<EOL>self._prepare_items_for_delete(items_to_delete)<EOL>prepared_items = {}<EOL>prepared_items.update(items_to_create)<EOL>prepared_items.update(items_to_update)<EOL>prepared_items.update(items_to_delete)<EOL>skip_metadata_transmission = False<EOL>for chunk in chunks(prepared_items, self.enterprise_configuration.transmission_chunk_size):<EOL><INDENT>chunked_items = list(chunk.values())<EOL>if skip_metadata_transmission:<EOL><INDENT>self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>self.client.update_content_metadata(self._serialize_items(chunked_items))<EOL><DEDENT>except ClientError as exc:<EOL><INDENT>LOGGER.error(<EOL>'<STR_LIT>',<EOL>len(chunked_items),<EOL>self.enterprise_configuration.enterprise_customer.name,<EOL>self.enterprise_configuration.channel_code,<EOL>)<EOL>LOGGER.error(exc)<EOL>self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete)<EOL>skip_metadata_transmission = True<EOL><DEDENT><DEDENT><DEDENT>self._create_transmissions(items_to_create)<EOL>self._update_transmissions(items_to_update, transmission_map)<EOL>self._delete_transmissions(items_to_delete.keys())<EOL>", "docstring": "Transmit content metadata items to the integrated channel.", "id": "f16192:c0:m1"}
{"signature": "def _remove_failed_items(self, failed_items, items_to_create, items_to_update, items_to_delete):", "body": "for item in failed_items:<EOL><INDENT>content_metadata_id = item['<STR_LIT>']<EOL>items_to_create.pop(content_metadata_id, None)<EOL>items_to_update.pop(content_metadata_id, None)<EOL>items_to_delete.pop(content_metadata_id, None)<EOL><DEDENT>", "docstring": "Remove content metadata items from the `items_to_create`, `items_to_update`, `items_to_delete` dicts.\n\nArguments:\n    failed_items (list): Failed Items to be removed.\n    items_to_create (dict): dict containing the items created successfully.\n    items_to_update (dict): dict containing the items updated successfully.\n    items_to_delete (dict): dict containing the items deleted successfully.", "id": "f16192:c0:m2"}
{"signature": "def handle_transmission_error(self, learner_data, request_exception):", "body": "try:<EOL><INDENT>sys_msg = request_exception.response.content<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if '<STR_LIT>' in sys_msg:<EOL><INDENT>ecu = EnterpriseCustomerUser.objects.get(<EOL>enterprise_enrollments__id=learner_data.enterprise_course_enrollment_id)<EOL>ecu.active = False<EOL>ecu.save()<EOL>LOGGER.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>ecu.username, ecu.user_id, ecu.user_email, ecu.enterprise_customer<EOL>)<EOL>return<EOL><DEDENT><DEDENT>super(SapSuccessFactorsLearnerTransmitter, self).handle_transmission_error(learner_data, request_exception)<EOL>", "docstring": "Handle the case where the employee on SAPSF's side is marked as inactive.", "id": "f16193:c0:m2"}
{"signature": "def transform_courserun_schedule(self, content_metadata_item):", "body": "start = content_metadata_item.get('<STR_LIT:start>') or UNIX_MIN_DATE_STRING<EOL>end = content_metadata_item.get('<STR_LIT:end>') or UNIX_MAX_DATE_STRING<EOL>return [{<EOL>'<STR_LIT>': parse_datetime_to_epoch_millis(start),<EOL>'<STR_LIT>': parse_datetime_to_epoch_millis(end),<EOL>'<STR_LIT>': current_time_is_in_interval(start, end)<EOL>}]<EOL>", "docstring": "Return the schedule of the courseun content item.", "id": "f16195:c0:m10"}
{"signature": "def transform_description(self, content_metadata_item):", "body": "description_with_locales = []<EOL>for locale in self.enterprise_configuration.get_locales():<EOL><INDENT>description_with_locales.append({<EOL>'<STR_LIT>': locale,<EOL>'<STR_LIT:value>': (<EOL>content_metadata_item.get('<STR_LIT>') or<EOL>content_metadata_item.get('<STR_LIT>') or<EOL>content_metadata_item.get('<STR_LIT:title>', '<STR_LIT>')<EOL>)<EOL>})<EOL><DEDENT>return description_with_locales<EOL>", "docstring": "Return the description of the content item.", "id": "f16195:c0:m3"}
{"signature": "def transform_provider_id(self, content_metadata_item):  ", "body": "return self.enterprise_configuration.provider_id<EOL>", "docstring": "Return the provider ID from the integrated channel configuration.", "id": "f16195:c0:m0"}
{"signature": "def transform_courserun_description(self, content_metadata_item):", "body": "description_with_locales = []<EOL>content_metadata_language_code = transform_language_code(content_metadata_item.get('<STR_LIT>', '<STR_LIT>'))<EOL>for locale in self.enterprise_configuration.get_locales(default_locale=content_metadata_language_code):<EOL><INDENT>description_with_locales.append({<EOL>'<STR_LIT>': locale,<EOL>'<STR_LIT:value>': (<EOL>content_metadata_item['<STR_LIT>'] or<EOL>content_metadata_item['<STR_LIT>'] or<EOL>content_metadata_item['<STR_LIT:title>'] or<EOL>'<STR_LIT>'<EOL>)<EOL>})<EOL><DEDENT>return description_with_locales<EOL>", "docstring": "Return the description of the courserun content item.", "id": "f16195:c0:m9"}
{"signature": "def transform_launch_points(self, content_metadata_item):", "body": "return [{<EOL>'<STR_LIT>': self.enterprise_configuration.provider_id,<EOL>'<STR_LIT>': content_metadata_item['<STR_LIT>'],<EOL>'<STR_LIT>': content_metadata_item['<STR_LIT:title>'],<EOL>'<STR_LIT>': self.get_content_id(content_metadata_item),<EOL>'<STR_LIT>': <NUM_LIT:3>,  <EOL>'<STR_LIT>': True,  <EOL>'<STR_LIT>': content_metadata_item['<STR_LIT>'],<EOL>}]<EOL>", "docstring": "Return the content metadata item launch points.\n\nSAPSF allows you to transmit an arry of content launch points which\nare meant to represent sections of a content item which a learner can\nlaunch into from SAPSF. Currently, we only provide a single launch\npoint for a content item.", "id": "f16195:c0:m5"}
{"signature": "def transform_courserun_title(self, content_metadata_item):", "body": "title = content_metadata_item.get('<STR_LIT:title>') or '<STR_LIT>'<EOL>course_run_start = content_metadata_item.get('<STR_LIT:start>')<EOL>if course_run_start:<EOL><INDENT>if course_available_for_enrollment(content_metadata_item):<EOL><INDENT>title += '<STR_LIT>'.format(<EOL>parse_lms_api_datetime(course_run_start),<EOL>starts=_('<STR_LIT>')<EOL>)<EOL><DEDENT>else:<EOL><INDENT>title += '<STR_LIT>'.format(<EOL>parse_lms_api_datetime(course_run_start),<EOL>enrollment_closed=_('<STR_LIT>')<EOL>)<EOL><DEDENT><DEDENT>title_with_locales = []<EOL>content_metadata_language_code = transform_language_code(content_metadata_item.get('<STR_LIT>', '<STR_LIT>'))<EOL>for locale in self.enterprise_configuration.get_locales(default_locale=content_metadata_language_code):<EOL><INDENT>title_with_locales.append({<EOL>'<STR_LIT>': locale,<EOL>'<STR_LIT:value>': title<EOL>})<EOL><DEDENT>return title_with_locales<EOL>", "docstring": "Return the title of the courserun content item.", "id": "f16195:c0:m8"}
{"signature": "def transform_language_code(code):", "body": "if code is None:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>components = code.split('<STR_LIT:->', <NUM_LIT:2>)<EOL>language_code = components[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>country_code = components[<NUM_LIT:1>]<EOL><DEDENT>except IndexError:<EOL><INDENT>country_code = '<STR_LIT:_>'<EOL><DEDENT>language_family = SUCCESSFACTORS_OCN_LANGUAGE_CODES.get(language_code)<EOL>if not language_family:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>return language_family.get(country_code, language_family['<STR_LIT:_>'])<EOL>", "docstring": "Transform ISO language code (e.g. en-us) to the language name expected by SAPSF.", "id": "f16196:m1"}
{"signature": "def serialize(self, *args, **kwargs):  ", "body": "return json.dumps(self._payload_data(), sort_keys=True)<EOL>", "docstring": "Return a JSON-serialized representation.\n\nSort the keys so the result is consistent and testable.\n\n# TODO: When we refactor to use a serialization flow consistent with how course metadata\n# is serialized, remove the serialization here and make the learner data exporter handle the work.", "id": "f16198:c2:m3"}
{"signature": "def get_content_metadata_exporter(self, user):", "body": "return SapSuccessFactorsContentMetadataExporter(user, self)<EOL>", "docstring": "Return a ``SapSuccessFactorsContentMetadataExporter`` instance.", "id": "f16198:c1:m8"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16198:c1:m2"}
{"signature": "def get_content_metadata_transmitter(self):", "body": "return SapSuccessFactorsContentMetadataTransmitter(self)<EOL>", "docstring": "Return a ``SapSuccessFactorsContentMetadataTransmitter`` instance.", "id": "f16198:c1:m7"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16198:c0:m1"}
{"signature": "def get_learner_manger(self):", "body": "return SapSuccessFactorsLearnerManger(self)<EOL>", "docstring": "Return a ``SapSuccessFactorsLearnerManger`` instance.", "id": "f16198:c1:m9"}
{"signature": "def get_learner_data_exporter(self, user):", "body": "return SapSuccessFactorsLearnerExporter(user, self)<EOL>", "docstring": "Return a ``SapSuccessFactorsLearnerDataExporter`` instance.", "id": "f16198:c1:m6"}
{"signature": "def _payload_data(self):", "body": "return dict(<EOL>userID=self.sapsf_user_id,<EOL>courseID=self.course_id,<EOL>providerID=self.provider_id,<EOL>courseCompleted=\"<STR_LIT:true>\" if self.course_completed else \"<STR_LIT:false>\",<EOL>completedTimestamp=self.completed_timestamp,<EOL>grade=self.grade,<EOL>)<EOL>", "docstring": "Convert the audit record's fields into SAP SuccessFactors key/value pairs.", "id": "f16198:c2:m4"}
{"signature": "@property<EOL><INDENT>def provider_id(self):<DEDENT>", "body": "return SAPSuccessFactorsGlobalConfiguration.current().provider_id<EOL>", "docstring": "Fetch ``provider_id`` from global configuration settings", "id": "f16198:c2:m2"}
{"signature": "def unlink_inactive_learners(self):", "body": "sap_learner_manager = self.get_learner_manger()<EOL>sap_learner_manager.unlink_learners()<EOL>", "docstring": "Unlink inactive SAP learners form their related enterprises", "id": "f16198:c1:m10"}
{"signature": "def get_course_duration(self, obj):", "body": "duration = obj.end - obj.start if obj.start and obj.end else None<EOL>if duration:<EOL><INDENT>return strfdelta(duration, '<STR_LIT>')<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Get course's duration as a timedelta.\n\nArguments:\n    obj (CourseOverview): CourseOverview object\n\nReturns:\n    (timedelta): Duration of a course.", "id": "f16199:c1:m0"}
{"signature": "def get_enterprise_user_id(self, obj):", "body": "<EOL>enterprise_learner = EnterpriseCustomerUser.objects.filter(user_id=obj.id).first()<EOL>return enterprise_learner and enterprise_learner.id<EOL>", "docstring": "Get enterprise user id from user object.\n\nArguments:\n    obj (User): Django User object\n\nReturns:\n    (int): Primary Key identifier for enterprise user object.", "id": "f16199:c0:m0"}
{"signature": "def save_statement(self, statement):", "body": "response = self.lrs.save_statement(statement)<EOL>if not response:<EOL><INDENT>raise ClientError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Save xAPI statement.\n\nArguments:\n    statement (EnterpriseStatement): xAPI Statement to send to the LRS.\n\nRaises:\n    ClientError: If xAPI statement fails to save.", "id": "f16203:c0:m2"}
{"signature": "def handle(self, *args, **options):", "body": "if not CourseEnrollment:<EOL><INDENT>raise NotConnectedToOpenEdX(\"<STR_LIT>\")<EOL><DEDENT>days, enterprise_customer = self.parse_arguments(*args, **options)<EOL>if enterprise_customer:<EOL><INDENT>try:<EOL><INDENT>lrs_configuration = XAPILRSConfiguration.objects.get(<EOL>active=True,<EOL>enterprise_customer=enterprise_customer<EOL>)<EOL><DEDENT>except XAPILRSConfiguration.DoesNotExist:<EOL><INDENT>raise CommandError('<STR_LIT>'.format(<EOL>enterprise_customer=enterprise_customer.name<EOL>))<EOL><DEDENT>self.send_xapi_statements(lrs_configuration, days)<EOL><DEDENT>else:<EOL><INDENT>for lrs_configuration in XAPILRSConfiguration.objects.filter(active=True):<EOL><INDENT>self.send_xapi_statements(lrs_configuration, days)<EOL><DEDENT><DEDENT>", "docstring": "Send xAPI statements.", "id": "f16207:c0:m2"}
{"signature": "def get_course_enrollments(self, enterprise_customer, days):", "body": "return CourseEnrollment.objects.filter(<EOL>created__gt=datetime.datetime.now() - datetime.timedelta(days=days)<EOL>).filter(<EOL>user_id__in=enterprise_customer.enterprise_customer_users.values_list('<STR_LIT>', flat=True)<EOL>)<EOL>", "docstring": "Get course enrollments for all the learners of given enterprise customer.\n\nArguments:\n    enterprise_customer (EnterpriseCustomer): Include Course enrollments for learners\n        of this enterprise customer.\n    days (int): Include course enrollment of this number of days.\n\nReturns:\n    (list): A list of CourseEnrollment objects.", "id": "f16207:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def parse_arguments(*args, **options):  <DEDENT>", "body": "days = options.get('<STR_LIT>', <NUM_LIT:1>)<EOL>enterprise_customer_uuid = options.get('<STR_LIT>')<EOL>enterprise_customer = None<EOL>if enterprise_customer_uuid:<EOL><INDENT>try:<EOL><INDENT>enterprise_customer = EnterpriseCustomer.objects.get(uuid=enterprise_customer_uuid)<EOL><DEDENT>except EnterpriseCustomer.DoesNotExist:<EOL><INDENT>raise CommandError('<STR_LIT>'.format(<EOL>enterprise_customer_uuid=enterprise_customer_uuid<EOL>))<EOL><DEDENT><DEDENT>return days, enterprise_customer<EOL>", "docstring": "Parse and validate arguments for send_course_enrollments command.\n\nArguments:\n    *args: Positional arguments passed to the command\n    **options: optional arguments passed to the command\n\nReturns:\n    A tuple containing parsed values for\n    1. days (int): Integer showing number of days to lookup enterprise enrollments,\n        course completion etc and send to xAPI LRS\n    2. enterprise_customer_uuid (EnterpriseCustomer): Enterprise Customer if present then\n        send xAPI statements just for this enterprise.", "id": "f16207:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def prefetch_courses(persistent_course_grades):<DEDENT>", "body": "return CourseOverview.get_from_ids_if_exists(<EOL>[grade.course_id for grade in persistent_course_grades]<EOL>)<EOL>", "docstring": "Prefetch courses from the list of course_ids present in the persistent_course_grades.\n\nArguments:\n    persistent_course_grades (list): A list of PersistentCourseGrade.\n\nReturns:\n    (dict): A dictionary containing course_id to course_overview mapping.", "id": "f16208:c0:m6"}
{"signature": "@staticmethod<EOL><INDENT>def parse_arguments(*args, **options):  <DEDENT>", "body": "days = options.get('<STR_LIT>', <NUM_LIT:1>)<EOL>enterprise_customer_uuid = options.get('<STR_LIT>')<EOL>enterprise_customer = None<EOL>if enterprise_customer_uuid:<EOL><INDENT>try:<EOL><INDENT>enterprise_customer = EnterpriseCustomer.objects.get(uuid=enterprise_customer_uuid)<EOL><DEDENT>except EnterpriseCustomer.DoesNotExist:<EOL><INDENT>raise CommandError('<STR_LIT>'.format(<EOL>enterprise_customer_uuid=enterprise_customer_uuid<EOL>))<EOL><DEDENT><DEDENT>return days, enterprise_customer<EOL>", "docstring": "Parse and validate arguments for the command.\n\nArguments:\n    *args: Positional arguments passed to the command\n    **options: Optional arguments passed to the command\n\nReturns:\n    A tuple containing parsed values for\n    1. days (int): Integer showing number of days to lookup enterprise enrollments,\n        course completion etc and send to xAPI LRS\n    2. enterprise_customer_uuid (EnterpriseCustomer): Enterprise Customer if present then\n        send xAPI statements just for this enterprise.", "id": "f16208:c0:m1"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16211:c0:m1"}
{"signature": "@property<EOL><INDENT>def authorization_header(self):<DEDENT>", "body": "return '<STR_LIT>'.format(<EOL>base64.b64encode('<STR_LIT>'.format(key=self.key, secret=self.secret).encode()).decode()<EOL>)<EOL>", "docstring": "Authorization header for authenticating requests to LRS.", "id": "f16211:c0:m2"}
{"signature": "def __str__(self):", "body": "return '<STR_LIT>'.format(<EOL>enterprise_name=self.enterprise_customer.name<EOL>)<EOL>", "docstring": "Return human-readable string representation.", "id": "f16211:c0:m0"}
{"signature": "def get_result(self, course_grade):", "body": "return Result(<EOL>score=Score(<EOL>scaled=course_grade.percent,<EOL>raw=course_grade.percent * <NUM_LIT:100>,<EOL>min=MIN_SCORE,<EOL>max=MAX_SCORE,<EOL>),<EOL>success=course_grade.passed,<EOL>completion=course_grade.passed<EOL>)<EOL>", "docstring": "Get result for the statement.\n\nArguments:\n    course_grade (CourseGrade): Course grade.", "id": "f16212:c0:m2"}
{"signature": "def get_actor(self, username, email):", "body": "return Agent(<EOL>name=username,<EOL>mbox='<STR_LIT>'.format(email=email),<EOL>)<EOL>", "docstring": "Get actor for the statement.", "id": "f16213:c0:m0"}
{"signature": "def get_context(self, user_details, course_details):", "body": "return Context(<EOL>extensions=Extensions(<EOL>{<EOL>'<STR_LIT>': user_details,<EOL>'<STR_LIT>': course_details,<EOL>},<EOL>)<EOL>)<EOL>", "docstring": "Get Context for the statement.", "id": "f16213:c0:m1"}
{"signature": "def get_object(self, name, description):", "body": "return Activity(<EOL>id=X_API_ACTIVITY_COURSE,<EOL>definition=ActivityDefinition(<EOL>name=LanguageMap({'<STR_LIT>': (name or '<STR_LIT>').encode(\"<STR_LIT:ascii>\", \"<STR_LIT:ignore>\").decode('<STR_LIT:ascii>')}),<EOL>description=LanguageMap({'<STR_LIT>': (description or '<STR_LIT>').encode(\"<STR_LIT:ascii>\", \"<STR_LIT:ignore>\").decode('<STR_LIT:ascii>')}),<EOL>),<EOL>)<EOL>", "docstring": "Get object for the statement.", "id": "f16213:c0:m2"}
{"signature": "def get_verb(self):", "body": "return Verb(<EOL>id=X_API_VERB_REGISTERED,<EOL>display=LanguageMap({'<STR_LIT>': '<STR_LIT>'}),<EOL>)<EOL>", "docstring": "Get verb for course enrollment statement.", "id": "f16214:c0:m1"}
{"signature": "def _create_session(self, scope):", "body": "now = datetime.datetime.utcnow()<EOL>if self.session is None or self.expires_at is None or now >= self.expires_at:<EOL><INDENT>if self.session:<EOL><INDENT>self.session.close()<EOL><DEDENT>oauth_access_token, expires_at = self._get_oauth_access_token(<EOL>self.enterprise_configuration.key,<EOL>self.enterprise_configuration.secret,<EOL>self.enterprise_configuration.degreed_user_id,<EOL>self.enterprise_configuration.degreed_user_password,<EOL>scope<EOL>)<EOL>session = requests.Session()<EOL>session.timeout = self.SESSION_TIMEOUT<EOL>session.headers['<STR_LIT>'] = '<STR_LIT>'.format(oauth_access_token)<EOL>session.headers['<STR_LIT>'] = '<STR_LIT:application/json>'<EOL>self.session = session<EOL>self.expires_at = expires_at<EOL><DEDENT>", "docstring": "Instantiate a new session object for use in connecting with Degreed", "id": "f16222:c0:m9"}
{"signature": "def _delete(self, url, data, scope):", "body": "self._create_session(scope)<EOL>response = self.session.delete(url, data=data)<EOL>return response.status_code, response.text<EOL>", "docstring": "Make a DELETE request using the session object to a Degreed endpoint.\n\nArgs:\n    url (str): The url to send a DELETE request to.\n    data (str): The json encoded payload to DELETE.\n    scope (str): Must be one of the scopes Degreed expects:\n                - `CONTENT_PROVIDER_SCOPE`\n                - `COMPLETION_PROVIDER_SCOPE`", "id": "f16222:c0:m8"}
{"signature": "def delete_content_metadata(self, serialized_data):", "body": "self._sync_content_metadata(serialized_data, '<STR_LIT>')<EOL>", "docstring": "Delete content metadata using the Degreed course content API.\n\nArgs:\n    serialized_data: JSON-encoded object containing content metadata.\n\nRaises:\n    ClientError: If Degreed API request fails.", "id": "f16222:c0:m5"}
{"signature": "def _get_oauth_access_token(self, client_id, client_secret, user_id, user_password, scope):", "body": "response = requests.post(<EOL>urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.oauth_api_path),<EOL>data={<EOL>'<STR_LIT>': '<STR_LIT:password>',<EOL>'<STR_LIT:username>': user_id,<EOL>'<STR_LIT:password>': user_password,<EOL>'<STR_LIT>': scope,<EOL>},<EOL>auth=(client_id, client_secret),<EOL>headers={'<STR_LIT:Content-Type>': '<STR_LIT>'}<EOL>)<EOL>response.raise_for_status()<EOL>data = response.json()<EOL>try:<EOL><INDENT>expires_at = data['<STR_LIT>'] + int(time.time())<EOL>return data['<STR_LIT>'], datetime.datetime.utcfromtimestamp(expires_at)<EOL><DEDENT>except KeyError:<EOL><INDENT>raise requests.RequestException(response=response)<EOL><DEDENT>", "docstring": "Retrieves OAuth 2.0 access token using the client credentials grant.\n\n        Args:\n            client_id (str): API client ID\n            client_secret (str): API client secret\n            user_id (str): Degreed company ID\n            user_password (str): Degreed user password\n            scope (str): Must be one of the scopes Degreed expects:\n                        - `CONTENT_PROVIDER_SCOPE`\n                        - `COMPLETION_PROVIDER_SCOPE`\n\n        Returns:\n            tuple: Tuple containing access token string and expiration datetime.\n        Raises:\n            HTTPError: If we received a failure response code from Degreed.\n            RequestException: If an unexpected response format was received that we could not parse.", "id": "f16222:c0:m10"}
{"signature": "def __init__(self, enterprise_configuration):", "body": "super(DegreedAPIClient, self).__init__(enterprise_configuration)<EOL>self.global_degreed_config = apps.get_model('<STR_LIT>', '<STR_LIT>').current()<EOL>self.session = None<EOL>self.expires_at = None<EOL>", "docstring": "Instantiate a new client.\n\nArgs:\n    enterprise_configuration (DegreedEnterpriseCustomerConfiguration): An enterprise customers's\n    configuration model for connecting with Degreed", "id": "f16222:c0:m0"}
{"signature": "def update_content_metadata(self, serialized_data):", "body": "self._sync_content_metadata(serialized_data, '<STR_LIT>')<EOL>", "docstring": "Update content metadata using the Degreed course content API.\n\nArgs:\n    serialized_data: JSON-encoded object containing content metadata.\n\nRaises:\n    ClientError: If Degreed API request fails.", "id": "f16222:c0:m4"}
{"signature": "def enterprise_customer_name(self, obj):", "body": "return obj.enterprise_customer.name<EOL>", "docstring": "Returns: the name for the attached EnterpriseCustomer.\n\nArgs:\n    obj: The instance of DegreedEnterpriseCustomerConfiguration\n        being rendered with this admin form.", "id": "f16223:c1:m0"}
{"signature": "def transform_content_language(self, content_metadata_item):  ", "body": "<EOL>return '<STR_LIT>'<EOL>", "docstring": "Return the ISO 639-1 language code that Degreed expects.\n\nExample:\n    en-us -> en\n    None -> en", "id": "f16228:c0:m2"}
{"signature": "def transform_description(self, content_metadata_item):", "body": "full_description = content_metadata_item.get('<STR_LIT>') or '<STR_LIT>'<EOL>if <NUM_LIT:0> < len(full_description) <= self.LONG_STRING_LIMIT:  <EOL><INDENT>return full_description<EOL><DEDENT>return content_metadata_item.get('<STR_LIT>') or content_metadata_item.get('<STR_LIT:title>') or '<STR_LIT>'<EOL>", "docstring": "Return the transformed version of the course description.\n\nWe choose one value out of the course's full description, short description, and title\ndepending on availability and length limits.", "id": "f16228:c0:m0"}
{"signature": "def transform_image(self, content_metadata_item):", "body": "image_url = '<STR_LIT>'<EOL>if content_metadata_item['<STR_LIT>'] in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>image_url = content_metadata_item.get('<STR_LIT>')<EOL><DEDENT>elif content_metadata_item['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>image_url = content_metadata_item.get('<STR_LIT>')<EOL><DEDENT>return image_url<EOL>", "docstring": "Return the image URI of the content item.", "id": "f16228:c0:m3"}
{"signature": "def get_learner_data_exporter(self, user):", "body": "return DegreedLearnerExporter(user, self)<EOL>", "docstring": "Return a ``DegreedLearnerDataExporter`` instance.", "id": "f16230:c1:m4"}
{"signature": "def __str__(self):", "body": "return \"<STR_LIT>\".format(<EOL>enterprise_name=self.enterprise_customer.name<EOL>)<EOL>", "docstring": "Return human-readable string representation.", "id": "f16230:c1:m0"}
{"signature": "def __str__(self):", "body": "return \"<STR_LIT>\".format(id=self.id)<EOL>", "docstring": "Return a human-readable string representation of the object.", "id": "f16230:c0:m0"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16230:c0:m1"}
{"signature": "def get_content_metadata_exporter(self, user):", "body": "return DegreedContentMetadataExporter(user, self)<EOL>", "docstring": "Return a ``DegreedContentMetadataExporter`` instance.", "id": "f16230:c1:m6"}
{"signature": "def __repr__(self):", "body": "return self.__str__()<EOL>", "docstring": "Return uniquely identifying string representation.", "id": "f16230:c1:m1"}
{"signature": "def strfdelta(tdelta, fmt='<STR_LIT>', input_type='<STR_LIT>'):", "body": "<EOL>if input_type == '<STR_LIT>':<EOL><INDENT>remainder = int(tdelta.total_seconds())<EOL><DEDENT>elif input_type in ['<STR_LIT:s>', '<STR_LIT>']:<EOL><INDENT>remainder = int(tdelta)<EOL><DEDENT>elif input_type in ['<STR_LIT:m>', '<STR_LIT>']:<EOL><INDENT>remainder = int(tdelta) * <NUM_LIT><EOL><DEDENT>elif input_type in ['<STR_LIT:h>', '<STR_LIT>']:<EOL><INDENT>remainder = int(tdelta) * <NUM_LIT><EOL><DEDENT>elif input_type in ['<STR_LIT:d>', '<STR_LIT>']:<EOL><INDENT>remainder = int(tdelta) * <NUM_LIT><EOL><DEDENT>elif input_type in ['<STR_LIT:w>', '<STR_LIT>']:<EOL><INDENT>remainder = int(tdelta) * <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>f = Formatter()<EOL>desired_fields = [field_tuple[<NUM_LIT:1>] for field_tuple in f.parse(fmt)]<EOL>possible_fields = ('<STR_LIT>', '<STR_LIT:D>', '<STR_LIT:H>', '<STR_LIT:M>', '<STR_LIT:S>')<EOL>constants = {'<STR_LIT>': <NUM_LIT>, '<STR_LIT:D>': <NUM_LIT>, '<STR_LIT:H>': <NUM_LIT>, '<STR_LIT:M>': <NUM_LIT>, '<STR_LIT:S>': <NUM_LIT:1>}<EOL>values = {}<EOL>for field in possible_fields:<EOL><INDENT>if field in desired_fields and field in constants:<EOL><INDENT>values[field], remainder = divmod(remainder, constants[field])<EOL><DEDENT><DEDENT>return f.format(fmt, **values)<EOL>", "docstring": "Convert a datetime.timedelta object or a regular number to a custom-formatted string.\n\nThis function works like the strftime() method works for datetime.datetime\nobjects.\n\nThe fmt argument allows custom formatting to be specified.  Fields can\ninclude seconds, minutes, hours, days, and weeks.  Each field is optional.\n\nArguments:\n    tdelta (datetime.timedelta, int): time delta object containing the duration or an integer\n        to go with the input_type.\n    fmt (str): Expected format of the time delta. place holders can only be one of the following.\n        1. D to extract days from time delta\n        2. H to extract hours from time delta\n        3. M to extract months from time delta\n        4. S to extract seconds from timedelta\n    input_type (str):  The input_type argument allows tdelta to be a regular number instead of the\n        default, which is a datetime.timedelta object.\n        Valid input_type strings:\n            1. 's', 'seconds',\n            2. 'm', 'minutes',\n            3. 'h', 'hours',\n            4. 'd', 'days',\n            5. 'w', 'weeks'\nReturns:\n    (str): timedelta object interpolated into a string following the given format.\n\nExamples:\n    '{D:02}d {H:02}h {M:02}m {S:02}s' --> '05d 08h 04m 02s' (default)\n    '{W}w {D}d {H}:{M:02}:{S:02}'     --> '4w 5d 8:04:02'\n    '{D:2}d {H:2}:{M:02}:{S:02}'      --> ' 5d  8:04:02'\n    '{H}h {S}s'                       --> '72h 800s'", "id": "f16231:m4"}
{"signature": "def chunks(dictionary, chunk_size):", "body": "iterable = iter(dictionary)<EOL>for __ in range(<NUM_LIT:0>, len(dictionary), chunk_size):<EOL><INDENT>yield {key: dictionary[key] for key in islice(iterable, chunk_size)}<EOL><DEDENT>", "docstring": "Yield successive n-sized chunks from dictionary.", "id": "f16231:m3"}
{"signature": "def delete_content_metadata(self, serialized_data):", "body": "raise NotImplementedError()<EOL>", "docstring": "Delete content metadata using the integrated channel's API.", "id": "f16238:c0:m5"}
{"signature": "def create_content_metadata(self, serialized_data):", "body": "raise NotImplementedError()<EOL>", "docstring": "Create content metadata using the integrated channel's API.", "id": "f16238:c0:m3"}
{"signature": "def update_content_metadata(self, serialized_data):", "body": "raise NotImplementedError()<EOL>", "docstring": "Update content metadata using the integrated channel's API.", "id": "f16238:c0:m4"}
{"signature": "def delete_course_completion(self, user_id, payload):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Make a DELETE request to the integrated channel's completion API to update completion status for a user.\n\n:param user_id: The ID of the user for whom completion status must be updated.\n:param payload: The JSON encoded payload containing the completion data.", "id": "f16238:c0:m2"}
{"signature": "def create_course_completion(self, user_id, payload):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Make a POST request to the integrated channel's completion API to update completion status for a user.\n\n:param user_id: The ID of the user for whom completion status must be updated.\n:param payload: The JSON encoded payload containing the completion data.", "id": "f16238:c0:m1"}
{"signature": "def transmit(self, payload, **kwargs):", "body": "items_to_create, items_to_update, items_to_delete, transmission_map = self._partition_items(payload)<EOL>self._transmit_delete(items_to_delete)<EOL>self._transmit_create(items_to_create)<EOL>self._transmit_update(items_to_update, transmission_map)<EOL>", "docstring": "Transmit content metadata items to the integrated channel.", "id": "f16240:c0:m1"}
{"signature": "def __init__(self, enterprise_configuration, client=IntegratedChannelApiClient):", "body": "super(ContentMetadataTransmitter, self).__init__(<EOL>enterprise_configuration=enterprise_configuration,<EOL>client=client<EOL>)<EOL>", "docstring": "By default, use the abstract integrated channel API client which raises an error when used if not subclassed.", "id": "f16240:c0:m0"}
{"signature": "def _get_transmissions(self):", "body": "<EOL>ContentMetadataItemTransmission = apps.get_model(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>)<EOL>return ContentMetadataItemTransmission.objects.filter(<EOL>enterprise_customer=self.enterprise_configuration.enterprise_customer,<EOL>integrated_channel_code=self.enterprise_configuration.channel_code()<EOL>)<EOL>", "docstring": "Return the ContentMetadataItemTransmision models for previously\ntransmitted content metadata items.", "id": "f16240:c0:m8"}
{"signature": "def _create_transmissions(self, content_metadata_item_map):", "body": "<EOL>ContentMetadataItemTransmission = apps.get_model(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>)<EOL>transmissions = []<EOL>for content_id, channel_metadata in content_metadata_item_map.items():<EOL><INDENT>transmissions.append(<EOL>ContentMetadataItemTransmission(<EOL>enterprise_customer=self.enterprise_configuration.enterprise_customer,<EOL>integrated_channel_code=self.enterprise_configuration.channel_code(),<EOL>content_id=content_id,<EOL>channel_metadata=channel_metadata<EOL>)<EOL>)<EOL><DEDENT>ContentMetadataItemTransmission.objects.bulk_create(transmissions)<EOL>", "docstring": "Create ContentMetadataItemTransmision models for the given content metadata items.", "id": "f16240:c0:m9"}
{"signature": "def _transmit_create(self, channel_metadata_item_map):", "body": "for chunk in chunks(channel_metadata_item_map, self.enterprise_configuration.transmission_chunk_size):<EOL><INDENT>serialized_chunk = self._serialize_items(list(chunk.values()))<EOL>try:<EOL><INDENT>self.client.create_content_metadata(serialized_chunk)<EOL><DEDENT>except ClientError as exc:<EOL><INDENT>LOGGER.error(<EOL>'<STR_LIT>',<EOL>len(chunk),<EOL>self.enterprise_configuration.enterprise_customer.name,<EOL>self.enterprise_configuration.channel_code,<EOL>)<EOL>LOGGER.error(exc)<EOL><DEDENT>else:<EOL><INDENT>self._create_transmissions(chunk)<EOL><DEDENT><DEDENT>", "docstring": "Transmit content metadata creation to integrated channel.", "id": "f16240:c0:m5"}
{"signature": "def transmit(self, payload, **kwargs):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "The abstract interface method for sending exported data to an integrated channel through its API client.", "id": "f16241:c0:m1"}
{"signature": "def __init__(self, enterprise_configuration, client=None):", "body": "self.enterprise_configuration = enterprise_configuration<EOL>self.client = client(enterprise_configuration) if client else None<EOL>", "docstring": "Prepares a configuration and a client to be used to transmit data to an integrated channel.\n\nArguments:\n    * enterprise_configuration - The configuration connecting an enterprise to an integrated channel.\n    * client - The REST API client that'll transmit serialized data.", "id": "f16241:c0:m0"}
{"signature": "def handle_transmission_error(self, learner_data, request_exception):", "body": "try:<EOL><INDENT>sys_msg = request_exception.response.content<EOL><DEDENT>except AttributeError:<EOL><INDENT>sys_msg = '<STR_LIT>'<EOL><DEDENT>LOGGER.error(<EOL>(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>),<EOL>learner_data.enterprise_course_enrollment_id,<EOL>learner_data,<EOL>str(request_exception),<EOL>sys_msg<EOL>)<EOL>", "docstring": "Handle the case where the transmission fails.", "id": "f16242:c0:m2"}
{"signature": "def export(self):", "body": "content_metadata_export = {}<EOL>content_metadata_items = self.enterprise_api.get_content_metadata(self.enterprise_customer)<EOL>LOGGER.info('<STR_LIT>', self.enterprise_customer.name)<EOL>for item in content_metadata_items:<EOL><INDENT>transformed = self._transform_item(item)<EOL>LOGGER.info(<EOL>'<STR_LIT>',<EOL>self.enterprise_configuration,<EOL>json.dumps(transformed, indent=<NUM_LIT:4>),<EOL>)<EOL>content_metadata_item_export = ContentMetadataItemExport(item, transformed)<EOL>content_metadata_export[content_metadata_item_export.content_id] = content_metadata_item_export<EOL><DEDENT>return OrderedDict(sorted(content_metadata_export.items()))<EOL>", "docstring": "Return the exported and transformed content metadata as a dictionary.", "id": "f16244:c0:m1"}
{"signature": "def export(self):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Export (read: serialize) data to be used by a transmitter to transmit to an integrated channel.", "id": "f16245:c0:m1"}
{"signature": "def get_learner_data_records(self, enterprise_enrollment, completed_date=None, grade=None, is_passing=False):", "body": "<EOL>LearnerDataTransmissionAudit = apps.get_model('<STR_LIT>', '<STR_LIT>')<EOL>completed_timestamp = None<EOL>course_completed = False<EOL>if completed_date is not None:<EOL><INDENT>completed_timestamp = parse_datetime_to_epoch_millis(completed_date)<EOL>course_completed = is_passing<EOL><DEDENT>return [<EOL>LearnerDataTransmissionAudit(<EOL>enterprise_course_enrollment_id=enterprise_enrollment.id,<EOL>course_id=enterprise_enrollment.course_id,<EOL>course_completed=course_completed,<EOL>completed_timestamp=completed_timestamp,<EOL>grade=grade,<EOL>)<EOL>]<EOL>", "docstring": "Generate a learner data transmission audit with fields properly filled in.", "id": "f16246:c0:m5"}
{"signature": "def _collect_certificate_data(self, enterprise_enrollment):", "body": "if self.certificates_api is None:<EOL><INDENT>self.certificates_api = CertificatesApiClient(self.user)<EOL><DEDENT>course_id = enterprise_enrollment.course_id<EOL>username = enterprise_enrollment.enterprise_customer_user.user.username<EOL>try:<EOL><INDENT>certificate = self.certificates_api.get_course_certificate(course_id, username)<EOL>completed_date = certificate.get('<STR_LIT>')<EOL>if completed_date:<EOL><INDENT>completed_date = parse_datetime(completed_date)<EOL><DEDENT>else:<EOL><INDENT>completed_date = timezone.now()<EOL><DEDENT>is_passing = certificate.get('<STR_LIT>')<EOL>grade = self.grade_passing if is_passing else self.grade_failing<EOL><DEDENT>except HttpNotFoundError:<EOL><INDENT>completed_date = None<EOL>grade = self.grade_incomplete<EOL>is_passing = False<EOL><DEDENT>return completed_date, grade, is_passing<EOL>", "docstring": "Collect the learner completion data from the course certificate.\n\nUsed for Instructor-paced courses.\n\nIf no certificate is found, then returns the completed_date = None, grade = In Progress, on the idea that a\ncertificate will eventually be generated.\n\nArgs:\n    enterprise_enrollment (EnterpriseCourseEnrollment): the enterprise enrollment record for which we need to\n    collect completion/grade data\n\nReturns:\n    completed_date: Date the course was completed, this is None if course has not been completed.\n    grade: Current grade in the course.\n    is_passing: Boolean indicating if the grade is a passing grade or not.", "id": "f16246:c0:m6"}
{"signature": "@property<EOL><INDENT>def grade_incomplete(self):<DEDENT>", "body": "return self.GRADE_INCOMPLETE<EOL>", "docstring": "Returns the string used for an incomplete course grade.", "id": "f16246:c0:m3"}
{"signature": "def export(self):", "body": "<EOL>enrollment_queryset = EnterpriseCourseEnrollment.objects.select_related(<EOL>'<STR_LIT>'<EOL>).filter(<EOL>enterprise_customer_user__enterprise_customer=self.enterprise_customer,<EOL>enterprise_customer_user__active=True,<EOL>).order_by('<STR_LIT>')<EOL>course_details = None<EOL>for enterprise_enrollment in enrollment_queryset:<EOL><INDENT>course_id = enterprise_enrollment.course_id<EOL>if course_details is None or course_details['<STR_LIT>'] != course_id:<EOL><INDENT>if self.course_api is None:<EOL><INDENT>self.course_api = CourseApiClient()<EOL><DEDENT>course_details = self.course_api.get_course_details(course_id)<EOL><DEDENT>if course_details is None:<EOL><INDENT>LOGGER.error(\"<STR_LIT>\",<EOL>enterprise_enrollment.pk, course_id)<EOL>continue<EOL><DEDENT>consent = DataSharingConsent.objects.proxied_get(<EOL>username=enterprise_enrollment.enterprise_customer_user.username,<EOL>course_id=enterprise_enrollment.course_id,<EOL>enterprise_customer=enterprise_enrollment.enterprise_customer_user.enterprise_customer<EOL>)<EOL>if not consent.granted or enterprise_enrollment.audit_reporting_disabled:<EOL><INDENT>continue<EOL><DEDENT>if course_details.get('<STR_LIT>') == '<STR_LIT>':<EOL><INDENT>completed_date, grade, is_passing = self._collect_certificate_data(enterprise_enrollment)<EOL><DEDENT>else:<EOL><INDENT>completed_date, grade, is_passing = self._collect_grades_data(enterprise_enrollment, course_details)<EOL><DEDENT>records = self.get_learner_data_records(<EOL>enterprise_enrollment=enterprise_enrollment,<EOL>completed_date=completed_date,<EOL>grade=grade,<EOL>is_passing=is_passing,<EOL>)<EOL>if records:<EOL><INDENT>for record in records:<EOL><INDENT>yield record<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Collect learner data for the ``EnterpriseCustomer`` where data sharing consent is granted.\n\nYields a learner data object for each enrollment, containing:\n\n* ``enterprise_enrollment``: ``EnterpriseCourseEnrollment`` object.\n* ``completed_date``: datetime instance containing the course/enrollment completion date; None if not complete.\n  \"Course completion\" occurs for instructor-paced courses when course certificates are issued, and\n  for self-paced courses, when the course end date is passed, or when the learner achieves a passing grade.\n* ``grade``: string grade recorded for the learner in the course.", "id": "f16246:c0:m4"}
{"signature": "@shared_task<EOL>def transmit_content_metadata(username, channel_code, channel_pk):", "body": "start = time.time()<EOL>api_user = User.objects.get(username=username)<EOL>integrated_channel = INTEGRATED_CHANNEL_CHOICES[channel_code].objects.get(pk=channel_pk)<EOL>LOGGER.info('<STR_LIT>', integrated_channel)<EOL>try:<EOL><INDENT>integrated_channel.transmit_content_metadata(api_user)<EOL><DEDENT>except Exception:  <EOL><INDENT>LOGGER.exception(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', username, channel_code, channel_pk<EOL>)<EOL><DEDENT>duration = time.time() - start<EOL>LOGGER.info(<EOL>'<STR_LIT>',<EOL>integrated_channel,<EOL>duration<EOL>)<EOL>", "docstring": "Task to send content metadata to each linked integrated channel.\n\nArguments:\n    username (str): The username of the User to be used for making API requests to retrieve content metadata.\n    channel_code (str): Capitalized identifier for the integrated channel.\n    channel_pk (str): Primary key for identifying integrated channel.", "id": "f16247:m0"}
{"signature": "def add_arguments(self, parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>',<EOL>dest='<STR_LIT>',<EOL>required=True,<EOL>metavar='<STR_LIT>',<EOL>help=_('<STR_LIT>'),<EOL>)<EOL>super(Command, self).add_arguments(parser)<EOL>", "docstring": "Add required --api_user argument to the parser.", "id": "f16248:c0:m0"}
{"signature": "def handle(self, *args, **options):", "body": "username = options['<STR_LIT>']<EOL>try:<EOL><INDENT>User.objects.get(username=username)<EOL><DEDENT>except User.DoesNotExist:<EOL><INDENT>raise CommandError('<STR_LIT>'.format(username))<EOL><DEDENT>channels = self.get_integrated_channels(options)<EOL>for channel in channels:<EOL><INDENT>channel_code = channel.channel_code()<EOL>channel_pk = channel.pk<EOL>transmit_content_metadata.delay(username, channel_code, channel_pk)<EOL><DEDENT>", "docstring": "Transmit the courseware data for the EnterpriseCustomer(s) to the active integration channels.", "id": "f16249:c0:m1"}
{"signature": "def add_arguments(self, parser):", "body": "parser.add_argument(<EOL>'<STR_LIT>',<EOL>dest='<STR_LIT>',<EOL>default=None,<EOL>metavar='<STR_LIT>',<EOL>help=_('<STR_LIT>'<EOL>'<STR_LIT>'),<EOL>)<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>dest='<STR_LIT>',<EOL>default='<STR_LIT>',<EOL>metavar='<STR_LIT>',<EOL>help=_('<STR_LIT>'<EOL>'<STR_LIT>'),<EOL>choices=INTEGRATED_CHANNEL_CHOICES.keys(),<EOL>)<EOL>", "docstring": "Adds the optional arguments: ``--enterprise_customer``, ``--channel``", "id": "f16250:c0:m0"}
{"signature": "def handle(self, *args, **options):", "body": "channels = self.get_integrated_channels(options)<EOL>for channel in channels:<EOL><INDENT>channel_code = channel.channel_code()<EOL>channel_pk = channel.pk<EOL>if channel_code == '<STR_LIT>':<EOL><INDENT>unlink_inactive_learners.delay(channel_code, channel_pk)<EOL><DEDENT><DEDENT>", "docstring": "Unlink inactive EnterpriseCustomer(s) SAP learners.", "id": "f16251:c0:m0"}
{"signature": "def transmit_learner_data(self, user):", "body": "exporter = self.get_learner_data_exporter(user)<EOL>transmitter = self.get_learner_data_transmitter()<EOL>transmitter.transmit(exporter)<EOL>", "docstring": "Iterate over each learner data record and transmit it to the integrated channel.", "id": "f16253:c0:m5"}
{"signature": "def get_learner_data_transmitter(self):", "body": "return LearnerTransmitter(self)<EOL>", "docstring": "Returns the class that can transmit the learner course completion data to the integrated channel.", "id": "f16253:c0:m2"}
{"signature": "def get_learner_data_exporter(self, user):", "body": "return LearnerExporter(user, self)<EOL>", "docstring": "Returns the class that can serialize the learner course completion data to the integrated channel.", "id": "f16253:c0:m1"}
{"signature": "def _payload_data(self):", "body": "return dict(<EOL>courseID=self.course_id,<EOL>courseCompleted=\"<STR_LIT:true>\" if self.course_completed else \"<STR_LIT:false>\",<EOL>completedTimestamp=self.completed_timestamp,<EOL>grade=self.grade,<EOL>)<EOL>", "docstring": "Convert the audit record's fields into SAP SuccessFactors key/value pairs.", "id": "f16253:c1:m4"}
{"signature": "def serialize(self, *args, **kwargs):  ", "body": "return json.dumps(self._payload_data(), sort_keys=True)<EOL>", "docstring": "Return a JSON-serialized representation.\n\nSort the keys so the result is consistent and testable.\n\n# TODO: When we refactor to use a serialization flow consistent with how course metadata\n# is serialized, remove the serialization here and make the learner data exporter handle the work.", "id": "f16253:c1:m3"}
{"signature": "def transmit_content_metadata(self, user):", "body": "exporter = self.get_content_metadata_exporter(user)<EOL>transmitter = self.get_content_metadata_transmitter()<EOL>transmitter.transmit(exporter.export())<EOL>", "docstring": "Transmit content metadata to integrated channel.", "id": "f16253:c0:m6"}
{"signature": "def _get_messages_from_response_cookies(self, response):", "body": "<EOL>try:<EOL><INDENT>return messages.storage.cookie.CookieStorage(response)._decode(response.cookies['<STR_LIT>'].value)<EOL><DEDENT>except KeyError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Get django messages set in response cookies.", "id": "f16255:c0:m0"}
{"signature": "def _setup_embargo_api(self, api_mock, redirect_url=None):", "body": "api_mock.redirect_if_blocked.return_value = redirect_url<EOL>", "docstring": "Set up the embargo API module mock.", "id": "f16255:c2:m0"}
{"signature": "def _assert_request_message(self, request_message, expected_message_tags, expected_message_text):", "body": "self.assertEqual(request_message.tags, expected_message_tags)<EOL>self.assertEqual(request_message.message, expected_message_text)<EOL>", "docstring": "Verify the request message tags and text.", "id": "f16255:c0:m1"}
{"signature": "def _append_fresh_login_param(self, url):", "body": "fresh_login_param = urlencode({FRESH_LOGIN_PARAMETER: '<STR_LIT:yes>'})<EOL>if '<STR_LIT:?>' in url:<EOL><INDENT>return url + '<STR_LIT:&>' + fresh_login_param<EOL><DEDENT>return url + '<STR_LIT:?>' + fresh_login_param<EOL>", "docstring": "Append the FRESH_LOGIN_PARAMETER query parameter to the URL.", "id": "f16255:c3:m0"}
{"signature": "def _assert_consent_not_provided(self, response):", "body": "with self.assertRaises(Exception):<EOL><INDENT>self._assert_consent_provided(response)<EOL><DEDENT>", "docstring": "Assert that consent is not provided.", "id": "f16255:c1:m1"}
{"signature": "def _make_patch(self, patch_location, new=None):", "body": "patch_mock = new if new is not None else mock.Mock()<EOL>patcher = mock.patch(patch_location, patch_mock)<EOL>patcher.start()<EOL>self.addCleanup(patcher.stop)<EOL>return patch_mock<EOL>", "docstring": "Patch `patch_location`, register the patch to stop at test cleanup and return mock object.", "id": "f16256:c0:m1"}
{"signature": "def _make_catalog_api_location(self, catalog_api_member):", "body": "return \"<STR_LIT>\".format(self.CATALOG_API_PATCH_PREFIX, catalog_api_member)<EOL>", "docstring": "Return path for `catalog_api_member` to mock.", "id": "f16256:c0:m0"}
{"signature": "def get_course_details(course_key):", "body": "return FAKE_CATALOG_COURSE_DETAILS_RESPONSES.get(course_key, {}).copy()<EOL>", "docstring": "Fake implementation returning course details by key.\n\nArguments:\n    course_key (str): The course key of the course; not the unique-per-run key.\n\nReturns:\n    dict: Details of the course.", "id": "f16256:m1"}
{"signature": "def get_course_details(course_id):", "body": "if not re.match(COURSE_ID_REGEX, course_id):<EOL><INDENT>raise HttpServerError<EOL><DEDENT>try:<EOL><INDENT>return COURSE_DETAILS[course_id]<EOL><DEDENT>except KeyError:<EOL><INDENT>_raise_client_error(<EOL>\"<STR_LIT>\".format(course_id), \"<STR_LIT>\".format(course_id)<EOL>)<EOL><DEDENT>", "docstring": "Fake implementation returning data from the COURSE_DETAILS dictionary.", "id": "f16257:m1"}
{"signature": "def get_course_enrollment(username, course_id):", "body": "try:<EOL><INDENT>course_details = COURSE_DETAILS[course_id]<EOL><DEDENT>except KeyError:<EOL><INDENT>_raise_client_error(<EOL>\"<STR_LIT>\", \"<STR_LIT>\".format(course_id)<EOL>)<EOL><DEDENT>return {<EOL>\"<STR_LIT:user>\": username,<EOL>\"<STR_LIT>\": course_details,<EOL>\"<STR_LIT>\": True,<EOL>\"<STR_LIT>\": '<STR_LIT>',<EOL>\"<STR_LIT>\": datetime.datetime.now().strftime(\"<STR_LIT>\")<EOL>}<EOL>", "docstring": "Fake implementation.", "id": "f16257:m3"}
{"signature": "def _raise_client_error(url, message, **kwargs):", "body": "content = dict(message=message)<EOL>content.update(kwargs)<EOL>raise HttpClientError(<EOL>\"<STR_LIT>\".format(settings.ENTERPRISE_ENROLLMENT_API_URL, url),<EOL>content=json.dumps(content).encode(),<EOL>)<EOL>", "docstring": "Emulate a client error raised by edx_rest_api_client.", "id": "f16257:m0"}
{"signature": "def __init__(self, header, contents):", "body": "self._header = header<EOL>self._contents = contents<EOL>self._csv_stream = None<EOL>", "docstring": "Initialize context manager.\n\nArguments:\n    header (Iterable): Column headers.\n    contents (Iterable): CSV contents - each item represents a line.", "id": "f16259:c0:m0"}
{"signature": "def __enter__(self):", "body": "self._csv_stream = six.BytesIO()<EOL>writer = unicodecsv.writer(self._csv_stream)<EOL>writer.writerow(self._header)<EOL>writer.writerows(self._contents)<EOL>self._csv_stream.seek(<NUM_LIT:0>)<EOL>return self._csv_stream<EOL>", "docstring": "Enter context setting up context variables.", "id": "f16259:c0:m1"}
{"signature": "def build_fake_enterprise_catalog_detail(enterprise_catalog_uuid=FAKE_UUIDS[<NUM_LIT:1>], title=u'<STR_LIT>',<EOL>enterprise_customer_uuid=FAKE_UUIDS[<NUM_LIT:0>], previous_url=None, next_url=None,<EOL>paginated_content=fake_catalog_api.FAKE_SEARCH_ALL_RESULTS,<EOL>include_enterprise_context=False, add_utm_info=True):", "body": "if include_enterprise_context:<EOL><INDENT>paginated_content = update_search_with_enterprise_context(paginated_content, add_utm_info)<EOL><DEDENT>return {<EOL>'<STR_LIT:count>': paginated_content['<STR_LIT:count>'],<EOL>'<STR_LIT>': previous_url,<EOL>'<STR_LIT>': next_url,<EOL>'<STR_LIT>': enterprise_catalog_uuid,<EOL>'<STR_LIT:title>': title,<EOL>'<STR_LIT>': enterprise_customer_uuid,<EOL>'<STR_LIT>': paginated_content['<STR_LIT>'],<EOL>}<EOL>", "docstring": "Return fake EnterpriseCustomerCatalog detail API result.", "id": "f16260:m0"}
{"signature": "def build_enterprise_api_url(self, resource, *args, **kwargs):", "body": "return '<STR_LIT>'.format(<EOL>lms_root_url=settings.LMS_INTERNAL_ROOT_URL,<EOL>enterprise_api_uri=reverse(resource, args=args),<EOL>params=('<STR_LIT:?>' + urlencode(kwargs)) if kwargs else '<STR_LIT>',<EOL>)<EOL>", "docstring": "DRY method to make Enterprise API URLs.\n\nExample URL: 'enterprise/api/v1/enterprise-customer/{enterprise_uuid}/courses'", "id": "f16260:c0:m1"}
{"signature": "def mock_enterprise_customer_catalogs(self, enterprise_catalog_uuid):", "body": "responses.add(<EOL>responses.GET,<EOL>url=self.build_enterprise_api_url('<STR_LIT>', enterprise_catalog_uuid),<EOL>json=build_fake_enterprise_catalog_detail(<EOL>enterprise_catalog_uuid=enterprise_catalog_uuid,<EOL>include_enterprise_context=True,<EOL>),<EOL>status=<NUM_LIT:200>,<EOL>content_type='<STR_LIT:application/json>',<EOL>)<EOL>", "docstring": "DRY function to register enterprise customer catalog API.", "id": "f16260:c0:m5"}
{"signature": "def mock_ent_courses_api_with_error(self, enterprise_uuid):", "body": "responses.add(<EOL>responses.GET,<EOL>url=self.build_enterprise_api_url('<STR_LIT>', enterprise_uuid),<EOL>json={},<EOL>status=<NUM_LIT>,<EOL>content_type='<STR_LIT:application/json>',<EOL>)<EOL>", "docstring": "DRY function to register enterprise courses API to return error response.", "id": "f16260:c0:m6"}
{"signature": "def mock_empty_response(self, resource, *args, **kwargs):", "body": "responses.add(<EOL>responses.GET,<EOL>url=self.build_enterprise_api_url(resource, *args, **kwargs),<EOL>json={},<EOL>status=<NUM_LIT:200>,<EOL>content_type='<STR_LIT:application/json>',<EOL>)<EOL>", "docstring": "DRY function to register an empty response from some Enterprise API endpoint.", "id": "f16260:c0:m7"}
{"signature": "def update_course_with_enterprise_context(course, add_utm_info=True, enterprise_catalog_uuid=None):", "body": "url = urljoin(<EOL>settings.LMS_ROOT_URL,<EOL>reverse(<EOL>'<STR_LIT>',<EOL>kwargs={'<STR_LIT>': FAKE_UUIDS[<NUM_LIT:0>], '<STR_LIT>': course['<STR_LIT:key>']}<EOL>)<EOL>)<EOL>course['<STR_LIT>'] = update_url_with_enterprise_context(<EOL>url,<EOL>add_utm_info=add_utm_info,<EOL>enterprise_catalog_uuid=enterprise_catalog_uuid<EOL>)<EOL>course_runs = course.get('<STR_LIT>', [])<EOL>for course_run in course_runs:<EOL><INDENT>update_course_run_with_enterprise_context(<EOL>course_run,<EOL>add_utm_info=add_utm_info,<EOL>enterprise_catalog_uuid=enterprise_catalog_uuid<EOL>)<EOL><DEDENT>return course<EOL>", "docstring": "Populate a fake course response with any necessary Enterprise context for testing purposes.", "id": "f16261:m5"}
{"signature": "def create_items(factory, items):", "body": "for item in items:<EOL><INDENT>factory.create(**item)<EOL><DEDENT>", "docstring": "Create model instances using given factory.", "id": "f16261:m2"}
{"signature": "def update_course_run_with_enterprise_context(course_run, add_utm_info=True, enterprise_catalog_uuid=None):", "body": "url = urljoin(<EOL>settings.LMS_ROOT_URL,<EOL>reverse(<EOL>'<STR_LIT>',<EOL>kwargs={'<STR_LIT>': FAKE_UUIDS[<NUM_LIT:0>], '<STR_LIT>': course_run['<STR_LIT:key>']}<EOL>)<EOL>)<EOL>course_run['<STR_LIT>'] = update_url_with_enterprise_context(<EOL>url,<EOL>add_utm_info=add_utm_info,<EOL>enterprise_catalog_uuid=enterprise_catalog_uuid<EOL>)<EOL>return course_run<EOL>", "docstring": "Populate a fake course run response with any necessary Enterprise context for testing purposes.", "id": "f16261:m4"}
{"signature": "def __init__(self, *args, **kwargs):", "body": "self.reset()<EOL>logging.Handler.__init__(self, *args, **kwargs)<EOL>", "docstring": "Reset messages with each initialization.", "id": "f16261:c1:m0"}
{"signature": "def update_url_with_enterprise_context(url, add_utm_info=True, enterprise_catalog_uuid=None):", "body": "query_params = {}<EOL>if enterprise_catalog_uuid:<EOL><INDENT>query_params['<STR_LIT>'] = enterprise_catalog_uuid<EOL><DEDENT>if add_utm_info:<EOL><INDENT>query_params['<STR_LIT>'] = '<STR_LIT>'<EOL>query_params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>url = utils.update_query_parameters(url, query_params)<EOL>return url<EOL>", "docstring": "Append enterprise-related query parameters to the given URL.", "id": "f16261:m3"}
{"signature": "def setUp(self):", "body": "super(APITest, self).setUp()<EOL>self.create_user(username=TEST_USERNAME, email=TEST_EMAIL, password=TEST_PASSWORD)<EOL>self.client = APIClient()<EOL>self.client.login(username=TEST_USERNAME, password=TEST_PASSWORD)<EOL>", "docstring": "Perform operations common to all tests.", "id": "f16261:c0:m0"}
{"signature": "def assert_url(first, second):", "body": "<EOL>scheme, netloc, path, query_string, fragment = urlsplit(first)<EOL>first = (scheme, netloc, path, parse_qs(query_string), fragment)<EOL>scheme, netloc, path, query_string, fragment = urlsplit(second)<EOL>second = (scheme, netloc, path, parse_qs(query_string), fragment)<EOL>assert first == second<EOL>", "docstring": "Compare first and second url.\n\nArguments:\n    first (str) : first url.\n    second (str) : second url.\n\nRaises:\n    Assertion error if both urls do not match.", "id": "f16261:m9"}
{"signature": "def load_json(self, content):", "body": "if isinstance(content, bytes):<EOL><INDENT>content = content.decode('<STR_LIT:utf-8>')<EOL><DEDENT>return json.loads(content)<EOL>", "docstring": "Parse content from django Response object.\n\nArguments:\n    content (bytes | str) : content type id bytes for PY3 and is string for PY2\n\nReturns:\n    dict object containing parsed json from response.content", "id": "f16261:c0:m3"}
{"signature": "def get_magic_name(value):", "body": "return str(value) if six.PY2 else value<EOL>", "docstring": "Return value suitable for __name__ attribute.\n\nFor python2, __name__ must be str, while for python3 it must be unicode (as there are no str at all).\n\nArguments:\n    value basestring: string to \"convert\"\n\nReturns:\n    str or unicode", "id": "f16261:m0"}
{"signature": "def emit(self, record):", "body": "self.messages[record.levelname.lower()].append(record.getMessage())<EOL>", "docstring": "Override to catch messages and store them messages in our internal dicts.", "id": "f16261:c1:m1"}
{"signature": "def get_requirements(requirements_file):", "body": "lines = open(requirements_file).readlines()<EOL>dependencies = []<EOL>dependency_links = []<EOL>for line in lines:<EOL><INDENT>package = line.strip()<EOL>if package.startswith('<STR_LIT:#>'):<EOL><INDENT>continue<EOL><DEDENT>if any(package.startswith(prefix) for prefix in VCS_PREFIXES):<EOL><INDENT>package_link, __, package = package.rpartition('<STR_LIT:#>')<EOL>package_link = re.sub(r'<STR_LIT>', r'<STR_LIT>', package_link)<EOL>package = re.sub(r'<STR_LIT>', r'<STR_LIT>', package)<EOL>package_version = re.sub(r'<STR_LIT>', '<STR_LIT>', line.strip())<EOL>if package:<EOL><INDENT>dependency_links.append(<EOL>'<STR_LIT>'.format(<EOL>package_link=package_link,<EOL>package=package,<EOL>package_version=package_version,<EOL>)<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>package, __, __ = package.partition('<STR_LIT:#>')<EOL>package = package.strip()<EOL><DEDENT>if package:<EOL><INDENT>dependencies.append(package)<EOL><DEDENT><DEDENT>return dependencies, dependency_links<EOL>", "docstring": "Get the contents of a file listing the requirements", "id": "f16263:m1"}
{"signature": "def setup(app):", "body": "event = '<STR_LIT>' if six.PY3 else b'<STR_LIT>'<EOL>app.connect(event, on_init)<EOL>", "docstring": "Sphinx extension: run sphinx-apidoc.", "id": "f16265:m1"}
{"signature": "def on_init(app):  ", "body": "docs_path = os.path.abspath(os.path.dirname(__file__))<EOL>root_path = os.path.abspath(os.path.join(docs_path, '<STR_LIT:..>'))<EOL>apidoc_path = '<STR_LIT>'<EOL>if hasattr(sys, '<STR_LIT>'):  <EOL><INDENT>bin_path = os.path.abspath(os.path.join(sys.prefix, '<STR_LIT>'))<EOL>apidoc_path = os.path.join(bin_path, apidoc_path)<EOL><DEDENT>check_call([apidoc_path, '<STR_LIT>', docs_path, os.path.join(root_path, '<STR_LIT>'),<EOL>os.path.join(root_path, '<STR_LIT>')])<EOL>", "docstring": "Run sphinx-apidoc after Sphinx initialization.\n\nRead the Docs won't run tox or custom shell commands, so we need this to\navoid checking in the generated reStructuredText files.", "id": "f16265:m0"}
{"signature": "def succ_item(self, key, default=_sentinel):", "body": "<EOL>node = self._root<EOL>succ_node = None<EOL>while node is not None:<EOL><INDENT>cmp = self._cmp(self._cmp_data, key, node.key)<EOL>if cmp == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>elif cmp < <NUM_LIT:0>:<EOL><INDENT>if (succ_node is None) or self._cmp(self._cmp_data, node.key, succ_node.key) < <NUM_LIT:0>:<EOL><INDENT>succ_node = node<EOL><DEDENT>node = node.left<EOL><DEDENT>else:<EOL><INDENT>node = node.right<EOL><DEDENT><DEDENT>if node is None:  <EOL><INDENT>if default is _sentinel:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>return default<EOL><DEDENT>if node.right is not None:<EOL><INDENT>node = node.right<EOL>while node.left is not None:<EOL><INDENT>node = node.left<EOL><DEDENT>if succ_node is None:<EOL><INDENT>succ_node = node<EOL><DEDENT>elif self._cmp(self._cmp_data, node.key, succ_node.key) < <NUM_LIT:0>:<EOL><INDENT>succ_node = node<EOL><DEDENT><DEDENT>elif succ_node is None:  <EOL><INDENT>if default is _sentinel:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>return default<EOL><DEDENT>return succ_node.key, succ_node.value<EOL>", "docstring": "Get successor (k,v) pair of key, raises KeyError if key is max key\n        or key does not exist. optimized for pypy.", "id": "f16266:c3:m7"}
{"signature": "def prev_key(self, key, default=_sentinel):", "body": "item = self.prev_item(key, default)<EOL>return default if item is default else item[<NUM_LIT:0>]<EOL>", "docstring": "Get predecessor to key, raises KeyError if key is min key\n        or key does not exist.", "id": "f16266:c3:m16"}
{"signature": "def __setitem__(self, key, value):", "body": "if key == <NUM_LIT:0>:<EOL><INDENT>self.left = value<EOL><DEDENT>else:<EOL><INDENT>self.right = value<EOL><DEDENT>", "docstring": "N.__setitem__(key, value) <==> x[key]=value, where key is 0 (left) or 1 (right).", "id": "f16266:c4:m3"}
{"signature": "def is_empty(self):", "body": "return self.count == <NUM_LIT:0><EOL>", "docstring": "T.is_empty() -> False if T contains any items else True", "id": "f16266:c3:m12"}
{"signature": "def __getitem__(self, key):", "body": "return self.left if key == <NUM_LIT:0> else self.right<EOL>", "docstring": "N.__getitem__(key) <==> x[key], where key is 0 (left) or 1 (right).", "id": "f16266:c4:m2"}
{"signature": "def clear(self):", "body": "def _clear(node):<EOL><INDENT>if node is not None:<EOL><INDENT>_clear(node.left)<EOL>_clear(node.right)<EOL>node.free()<EOL><DEDENT><DEDENT>_clear(self._root)<EOL>self._count = <NUM_LIT:0><EOL>self._root = None<EOL>", "docstring": "T.clear() -> None.  Remove all items from T.", "id": "f16266:c3:m1"}
{"signature": "def key_slice(self, start_key, end_key, reverse=False):", "body": "return (k for k, v in self.iter_items(start_key, end_key, reverse=reverse))<EOL>", "docstring": "T.key_slice(start_key, end_key) -> key iterator:\n        start_key <= key < end_key.\n\n        Yields keys in ascending order if reverse is False else in descending order.", "id": "f16266:c3:m22"}
{"signature": "def __contains__(self, key):", "body": "try:<EOL><INDENT>self.get_value(key)<EOL>return True<EOL><DEDENT>except KeyError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "k in T -> True if T has a key k, else False", "id": "f16266:c3:m10"}
{"signature": "def set_default(self, key, default=None):", "body": "try:<EOL><INDENT>return self.get_value(key)<EOL><DEDENT>except KeyError:<EOL><INDENT>self.insert(key, default)<EOL>return default<EOL><DEDENT>", "docstring": "T.set_default(k[,d]) -> T.get(k,d), also set T[k]=d if k not in T", "id": "f16266:c3:m13"}
{"signature": "def pop_max(self):", "body": "item = self.max_item()<EOL>self.remove(item[<NUM_LIT:0>])<EOL>return item<EOL>", "docstring": "T.pop_max() -> (k, v), remove item with maximum key, raise ValueError\n        if T is empty.", "id": "f16266:c3:m19"}
{"signature": "def pop(self, key, *args):", "body": "if len(args) > <NUM_LIT:1>:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % (<NUM_LIT:1> + len(args)))<EOL><DEDENT>try:<EOL><INDENT>value = self.get_value(key)<EOL>self.remove(key)<EOL>return value<EOL><DEDENT>except KeyError:<EOL><INDENT>if len(args) == <NUM_LIT:0>:<EOL><INDENT>raise<EOL><DEDENT>else:<EOL><INDENT>return args[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>", "docstring": "T.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n        If key is not found, d is returned if given, otherwise KeyError is raised", "id": "f16266:c3:m15"}
{"signature": "@property<EOL><INDENT>def count(self):<DEDENT>", "body": "return self._count<EOL>", "docstring": "Get items count.", "id": "f16266:c3:m2"}
{"signature": "def _new_node(self, key, value):", "body": "self._count += <NUM_LIT:1><EOL>return Node(key, value)<EOL>", "docstring": "Create a new tree node.", "id": "f16266:c5:m3"}
{"signature": "def min_item(self):", "body": "if self.is_empty():<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>node = self._root<EOL>while node.left is not None:<EOL><INDENT>node = node.left<EOL><DEDENT>return node.key, node.value<EOL>", "docstring": "Get item with min key of tree, raises ValueError if tree is empty.", "id": "f16266:c3:m5"}
{"signature": "def iter_items(self,  start_key=None, end_key=None, reverse=False):", "body": "<EOL>if self.is_empty():<EOL><INDENT>return []<EOL><DEDENT>if reverse:<EOL><INDENT>return self._iter_items_backward(start_key, end_key)<EOL><DEDENT>else:<EOL><INDENT>return self._iter_items_forward(start_key, end_key)<EOL><DEDENT>", "docstring": "Iterates over the (key, value) items of the associated tree,\n        in ascending order if reverse is True, iterate in descending order,\n        reverse defaults to False", "id": "f16266:c3:m23"}
{"signature": "def isect_polygon__naive(points):", "body": "isect = []<EOL>n = len(points)<EOL>if Real is float:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>points = [(Real(p[<NUM_LIT:0>]), Real(p[<NUM_LIT:1>])) for p in points]<EOL><DEDENT>for i in range(n):<EOL><INDENT>a0, a1 = points[i], points[(i + <NUM_LIT:1>) % n]<EOL>for j in range(i + <NUM_LIT:1>, n):<EOL><INDENT>b0, b1 = points[j], points[(j + <NUM_LIT:1>) % n]<EOL>if a0 not in (b0, b1) and a1 not in (b0, b1):<EOL><INDENT>ix = isect_seg_seg_v2_point(a0, a1, b0, b1)<EOL>if ix is not None:<EOL><INDENT>if USE_IGNORE_SEGMENT_ENDINGS:<EOL><INDENT>if ((len_squared_v2v2(ix, a0) < NUM_EPS_SQ or<EOL>len_squared_v2v2(ix, a1) < NUM_EPS_SQ) and<EOL>(len_squared_v2v2(ix, b0) < NUM_EPS_SQ or<EOL>len_squared_v2v2(ix, b1) < NUM_EPS_SQ)):<EOL><INDENT>continue<EOL><DEDENT><DEDENT>isect.append(ix)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return isect<EOL>", "docstring": "Brute force O(n2) version of ``isect_polygon`` for test validation.", "id": "f16266:m13"}
{"signature": "def __len__(self):", "body": "return self.count<EOL>", "docstring": "T.__len__() <==> len(x)", "id": "f16266:c3:m11"}
{"signature": "def prev_item(self, key, default=_sentinel):", "body": "<EOL>node = self._root<EOL>prev_node = None<EOL>while node is not None:<EOL><INDENT>cmp = self._cmp(self._cmp_data, key, node.key)<EOL>if cmp == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>elif cmp < <NUM_LIT:0>:<EOL><INDENT>node = node.left<EOL><DEDENT>else:<EOL><INDENT>if (prev_node is None) or self._cmp(self._cmp_data, prev_node.key, node.key) < <NUM_LIT:0>:<EOL><INDENT>prev_node = node<EOL><DEDENT>node = node.right<EOL><DEDENT><DEDENT>if node is None:  <EOL><INDENT>if default is _sentinel:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>return default<EOL><DEDENT>if node.left is not None:<EOL><INDENT>node = node.left<EOL>while node.right is not None:<EOL><INDENT>node = node.right<EOL><DEDENT>if prev_node is None:<EOL><INDENT>prev_node = node<EOL><DEDENT>elif self._cmp(self._cmp_data, prev_node.key, node.key) < <NUM_LIT:0>:<EOL><INDENT>prev_node = node<EOL><DEDENT><DEDENT>elif prev_node is None:  <EOL><INDENT>if default is _sentinel:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>return default<EOL><DEDENT>return prev_node.key, prev_node.value<EOL>", "docstring": "Get predecessor (k,v) pair of key, raises KeyError if key is min key\n        or key does not exist. optimized for pypy.", "id": "f16266:c3:m8"}
{"signature": "def remove(self, key):", "body": "if self._root is None:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>head = Node()  <EOL>node = head<EOL>node.right = self._root<EOL>parent = None<EOL>grand_parent = None<EOL>found = None  <EOL>direction = <NUM_LIT:1><EOL>while node[direction] is not None:<EOL><INDENT>last = direction<EOL>grand_parent = parent<EOL>parent = node<EOL>node = node[direction]<EOL>direction = <NUM_LIT:1> if (self._cmp(self._cmp_data, node.key, key) < <NUM_LIT:0>) else <NUM_LIT:0><EOL>if self._cmp(self._cmp_data, key, node.key) == <NUM_LIT:0>:<EOL><INDENT>found = node<EOL><DEDENT>if not RBTree.is_red(node) and not RBTree.is_red(node[direction]):<EOL><INDENT>if RBTree.is_red(node[<NUM_LIT:1> - direction]):<EOL><INDENT>parent[last] = RBTree.jsw_single(node, direction)<EOL>parent = parent[last]<EOL><DEDENT>elif not RBTree.is_red(node[<NUM_LIT:1> - direction]):<EOL><INDENT>sibling = parent[<NUM_LIT:1> - last]<EOL>if sibling is not None:<EOL><INDENT>if (not RBTree.is_red(sibling[<NUM_LIT:1> - last])) and (not RBTree.is_red(sibling[last])):<EOL><INDENT>parent.red = False<EOL>sibling.red = True<EOL>node.red = True<EOL><DEDENT>else:<EOL><INDENT>direction2 = <NUM_LIT:1> if grand_parent.right is parent else <NUM_LIT:0><EOL>if RBTree.is_red(sibling[last]):<EOL><INDENT>grand_parent[direction2] = RBTree.jsw_double(parent, last)<EOL><DEDENT>elif RBTree.is_red(sibling[<NUM_LIT:1>-last]):<EOL><INDENT>grand_parent[direction2] = RBTree.jsw_single(parent, last)<EOL><DEDENT>grand_parent[direction2].red = True<EOL>node.red = True<EOL>grand_parent[direction2].left.red = False<EOL>grand_parent[direction2].right.red = False<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if found is not None:<EOL><INDENT>found.key = node.key<EOL>found.value = node.value<EOL>parent[int(parent.right is node)] = node[int(node.left is None)]<EOL>node.free()<EOL>self._count -= <NUM_LIT:1><EOL><DEDENT>self._root = head.right<EOL>if self._root is not None:<EOL><INDENT>self._root.red = False<EOL><DEDENT>if not found:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>", "docstring": "T.remove(key) <==> del T[key], remove item <key> from tree.", "id": "f16266:c5:m5"}
{"signature": "def insert(self, key, value):", "body": "if self._root is None:  <EOL><INDENT>self._root = self._new_node(key, value)<EOL>self._root.red = False  <EOL>return<EOL><DEDENT>head = Node()  <EOL>grand_parent = None<EOL>grand_grand_parent = head<EOL>parent = None  <EOL>direction = <NUM_LIT:0><EOL>last = <NUM_LIT:0><EOL>grand_grand_parent.right = self._root<EOL>node = grand_grand_parent.right<EOL>while True:<EOL><INDENT>if node is None:  <EOL><INDENT>node = self._new_node(key, value)<EOL>parent[direction] = node<EOL><DEDENT>elif RBTree.is_red(node.left) and RBTree.is_red(node.right):  <EOL><INDENT>node.red = True<EOL>node.left.red = False<EOL>node.right.red = False<EOL><DEDENT>if RBTree.is_red(node) and RBTree.is_red(parent):<EOL><INDENT>direction2 = <NUM_LIT:1> if grand_grand_parent.right is grand_parent else <NUM_LIT:0><EOL>if node is parent[last]:<EOL><INDENT>grand_grand_parent[direction2] = RBTree.jsw_single(grand_parent, <NUM_LIT:1> - last)<EOL><DEDENT>else:<EOL><INDENT>grand_grand_parent[direction2] = RBTree.jsw_double(grand_parent, <NUM_LIT:1> - last)<EOL><DEDENT><DEDENT>if self._cmp(self._cmp_data, key, node.key) == <NUM_LIT:0>:<EOL><INDENT>node.value = value  <EOL>break<EOL><DEDENT>last = direction<EOL>direction = <NUM_LIT:0> if (self._cmp(self._cmp_data, key, node.key) < <NUM_LIT:0>) else <NUM_LIT:1><EOL>if grand_parent is not None:<EOL><INDENT>grand_grand_parent = grand_parent<EOL><DEDENT>grand_parent = parent<EOL>parent = node<EOL>node = node[direction]<EOL><DEDENT>self._root = head.right  <EOL>self._root.red = False<EOL>", "docstring": "T.insert(key, value) <==> T[key] = value, insert key, value into tree.", "id": "f16266:c5:m4"}
{"signature": "def isect_segments__naive(segments):", "body": "isect = []<EOL>if Real is float:<EOL><INDENT>segments = [<EOL>(s[<NUM_LIT:0>], s[<NUM_LIT:1>]) if s[<NUM_LIT:0>][X] <= s[<NUM_LIT:1>][X] else<EOL>(s[<NUM_LIT:1>], s[<NUM_LIT:0>])<EOL>for s in segments]<EOL><DEDENT>else:<EOL><INDENT>segments = [<EOL>(<EOL>(Real(s[<NUM_LIT:0>][<NUM_LIT:0>]), Real(s[<NUM_LIT:0>][<NUM_LIT:1>])),<EOL>(Real(s[<NUM_LIT:1>][<NUM_LIT:0>]), Real(s[<NUM_LIT:1>][<NUM_LIT:1>])),<EOL>) if (s[<NUM_LIT:0>] <= s[<NUM_LIT:1>]) else<EOL>(<EOL>(Real(s[<NUM_LIT:1>][<NUM_LIT:0>]), Real(s[<NUM_LIT:1>][<NUM_LIT:1>])),<EOL>(Real(s[<NUM_LIT:0>][<NUM_LIT:0>]), Real(s[<NUM_LIT:0>][<NUM_LIT:1>])),<EOL>)<EOL>for s in segments]<EOL><DEDENT>n = len(segments)<EOL>for i in range(n):<EOL><INDENT>a0, a1 = segments[i]<EOL>for j in range(i + <NUM_LIT:1>, n):<EOL><INDENT>b0, b1 = segments[j]<EOL>if a0 not in (b0, b1) and a1 not in (b0, b1):<EOL><INDENT>ix = isect_seg_seg_v2_point(a0, a1, b0, b1)<EOL>if ix is not None:<EOL><INDENT>isect.append(ix)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return isect<EOL>", "docstring": "Brute force O(n2) version of ``isect_segments`` for test validation.", "id": "f16266:m12"}
{"signature": "def noise3d(self, x, y, z):", "body": "<EOL>stretch_offset = (x + y + z) * STRETCH_CONSTANT_3D<EOL>xs = x + stretch_offset<EOL>ys = y + stretch_offset<EOL>zs = z + stretch_offset<EOL>xsb = floor(xs)<EOL>ysb = floor(ys)<EOL>zsb = floor(zs)<EOL>squish_offset = (xsb + ysb + zsb) * SQUISH_CONSTANT_3D<EOL>xb = xsb + squish_offset<EOL>yb = ysb + squish_offset<EOL>zb = zsb + squish_offset<EOL>xins = xs - xsb<EOL>yins = ys - ysb<EOL>zins = zs - zsb<EOL>in_sum = xins + yins + zins<EOL>dx0 = x - xb<EOL>dy0 = y - yb<EOL>dz0 = z - zb<EOL>value = <NUM_LIT:0><EOL>extrapolate = self._extrapolate3d<EOL>if in_sum <= <NUM_LIT:1>: <EOL><INDENT>a_point = <NUM_LIT><EOL>a_score = xins<EOL>b_point = <NUM_LIT><EOL>b_score = yins<EOL>if a_score >= b_score and zins > b_score:<EOL><INDENT>b_score = zins<EOL>b_point = <NUM_LIT><EOL><DEDENT>elif a_score < b_score and zins > a_score:<EOL><INDENT>a_score = zins<EOL>a_point = <NUM_LIT><EOL><DEDENT>wins = <NUM_LIT:1> - in_sum<EOL>if wins > a_score or wins > b_score: <EOL><INDENT>c = b_point if (b_score > a_score) else a_point <EOL>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb - <NUM_LIT:1><EOL>xsv_ext1 = xsb<EOL>dx_ext0 = dx0 + <NUM_LIT:1><EOL>dx_ext1 = dx0<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx_ext1 = dx0 - <NUM_LIT:1><EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb<EOL>dy_ext0 = dy_ext1 = dy0<EOL>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext1 -= <NUM_LIT:1><EOL>dy_ext1 += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext0 -= <NUM_LIT:1><EOL>dy_ext0 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy_ext1 = dy0 - <NUM_LIT:1><EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsb<EOL>zsv_ext1 = zsb - <NUM_LIT:1><EOL>dz_ext0 = dz0<EOL>dz_ext1 = dz0 + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz_ext1 = dz0 - <NUM_LIT:1><EOL><DEDENT><DEDENT>else: <EOL><INDENT>c = (a_point | b_point) <EOL>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb<EOL>xsv_ext1 = xsb - <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dx_ext1 = dx0 + <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dx_ext1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysb<EOL>ysv_ext1 = ysb - <NUM_LIT:1><EOL>dy_ext0 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 + <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsb<EOL>zsv_ext1 = zsb - <NUM_LIT:1><EOL>dz_ext0 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 + <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL><DEDENT><DEDENT>attn0 = <NUM_LIT:2> - dx0 * dx0 - dy0 * dy0 - dz0 * dz0<EOL>if attn0 > <NUM_LIT:0>:<EOL><INDENT>attn0 *= attn0<EOL>value += attn0 * attn0 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, dx0, dy0, dz0)<EOL><DEDENT>dx1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dy1 = dy0 - <NUM_LIT:0> - SQUISH_CONSTANT_3D<EOL>dz1 = dz0 - <NUM_LIT:0> - SQUISH_CONSTANT_3D<EOL>attn1 = <NUM_LIT:2> - dx1 * dx1 - dy1 * dy1 - dz1 * dz1<EOL>if attn1 > <NUM_LIT:0>:<EOL><INDENT>attn1 *= attn1<EOL>value += attn1 * attn1 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, dx1, dy1, dz1)<EOL><DEDENT>dx2 = dx0 - <NUM_LIT:0> - SQUISH_CONSTANT_3D<EOL>dy2 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dz2 = dz1<EOL>attn2 = <NUM_LIT:2> - dx2 * dx2 - dy2 * dy2 - dz2 * dz2<EOL>if attn2 > <NUM_LIT:0>:<EOL><INDENT>attn2 *= attn2<EOL>value += attn2 * attn2 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, dx2, dy2, dz2)<EOL><DEDENT>dx3 = dx2<EOL>dy3 = dy1<EOL>dz3 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>attn3 = <NUM_LIT:2> - dx3 * dx3 - dy3 * dy3 - dz3 * dz3<EOL>if attn3 > <NUM_LIT:0>:<EOL><INDENT>attn3 *= attn3<EOL>value += attn3 * attn3 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, dx3, dy3, dz3)<EOL><DEDENT><DEDENT>elif in_sum >= <NUM_LIT:2>: <EOL><INDENT>a_point = <NUM_LIT><EOL>a_score = xins<EOL>b_point = <NUM_LIT><EOL>b_score = yins<EOL>if a_score <= b_score and zins < b_score:<EOL><INDENT>b_score = zins<EOL>b_point = <NUM_LIT><EOL><DEDENT>elif a_score > b_score and zins < a_score:<EOL><INDENT>a_score = zins<EOL>a_point = <NUM_LIT><EOL><DEDENT>wins = <NUM_LIT:3> - in_sum<EOL>if wins < a_score or wins < b_score: <EOL><INDENT>c = b_point if (b_score < a_score) else a_point <EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb + <NUM_LIT:2><EOL>xsv_ext1 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:2> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL>dx_ext1 = dx0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsb<EOL>dx_ext0 = dx_ext1 = dx0 - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy_ext1 = dy0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext1 += <NUM_LIT:1><EOL>dy_ext1 -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext0 += <NUM_LIT:1><EOL>dy_ext0 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb<EOL>dy_ext0 = dy_ext1 = dy0 - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsb + <NUM_LIT:1><EOL>zsv_ext1 = zsb + <NUM_LIT:2><EOL>dz_ext0 = dz0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:2> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb<EOL>dz_ext0 = dz_ext1 = dz0 - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL><DEDENT><DEDENT>else: <EOL><INDENT>c = (a_point & b_point) <EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb + <NUM_LIT:1><EOL>xsv_ext1 = xsb + <NUM_LIT:2><EOL>dx_ext0 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dx_ext1 = dx0 - <NUM_LIT:2> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsb<EOL>dx_ext0 = dx0 - SQUISH_CONSTANT_3D<EOL>dx_ext1 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysb + <NUM_LIT:1><EOL>ysv_ext1 = ysb + <NUM_LIT:2><EOL>dy_ext0 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 - <NUM_LIT:2> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb<EOL>dy_ext0 = dy0 - SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsb + <NUM_LIT:1><EOL>zsv_ext1 = zsb + <NUM_LIT:2><EOL>dz_ext0 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:2> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb<EOL>dz_ext0 = dz0 - SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL><DEDENT><DEDENT>dx3 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy3 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz3 = dz0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>attn3 = <NUM_LIT:2> - dx3 * dx3 - dy3 * dy3 - dz3 * dz3<EOL>if attn3 > <NUM_LIT:0>:<EOL><INDENT>attn3 *= attn3<EOL>value += attn3 * attn3 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, dx3, dy3, dz3)<EOL><DEDENT>dx2 = dx3<EOL>dy2 = dy0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz2 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>attn2 = <NUM_LIT:2> - dx2 * dx2 - dy2 * dy2 - dz2 * dz2<EOL>if attn2 > <NUM_LIT:0>:<EOL><INDENT>attn2 *= attn2<EOL>value += attn2 * attn2 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, dx2, dy2, dz2)<EOL><DEDENT>dx1 = dx0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy1 = dy3<EOL>dz1 = dz2<EOL>attn1 = <NUM_LIT:2> - dx1 * dx1 - dy1 * dy1 - dz1 * dz1<EOL>if attn1 > <NUM_LIT:0>:<EOL><INDENT>attn1 *= attn1<EOL>value += attn1 * attn1 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, dx1, dy1, dz1)<EOL><DEDENT>dx0 = dx0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL>dy0 = dy0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL>dz0 = dz0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL>attn0 = <NUM_LIT:2> - dx0 * dx0 - dy0 * dy0 - dz0 * dz0<EOL>if attn0 > <NUM_LIT:0>:<EOL><INDENT>attn0 *= attn0<EOL>value += attn0 * attn0 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, dx0, dy0, dz0)<EOL><DEDENT><DEDENT>else: <EOL><INDENT>p1 = xins + yins<EOL>if p1 > <NUM_LIT:1>:<EOL><INDENT>a_score = p1 - <NUM_LIT:1><EOL>a_point = <NUM_LIT><EOL>a_is_further_side = True<EOL><DEDENT>else:<EOL><INDENT>a_score = <NUM_LIT:1> - p1<EOL>a_point = <NUM_LIT><EOL>a_is_further_side = False<EOL><DEDENT>p2 = xins + zins<EOL>if p2 > <NUM_LIT:1>:<EOL><INDENT>b_score = p2 - <NUM_LIT:1><EOL>b_point = <NUM_LIT><EOL>b_is_further_side = True<EOL><DEDENT>else:<EOL><INDENT>b_score = <NUM_LIT:1> - p2<EOL>b_point = <NUM_LIT><EOL>b_is_further_side = False<EOL><DEDENT>p3 = yins + zins<EOL>if p3 > <NUM_LIT:1>:<EOL><INDENT>score = p3 - <NUM_LIT:1><EOL>if a_score <= b_score and a_score < score:<EOL><INDENT>a_point = <NUM_LIT><EOL>a_is_further_side = True<EOL><DEDENT>elif a_score > b_score and b_score < score:<EOL><INDENT>b_point = <NUM_LIT><EOL>b_is_further_side = True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>score = <NUM_LIT:1> - p3<EOL>if a_score <= b_score and a_score < score:<EOL><INDENT>a_point = <NUM_LIT><EOL>a_is_further_side = False<EOL><DEDENT>elif a_score > b_score and b_score < score:<EOL><INDENT>b_point = <NUM_LIT><EOL>b_is_further_side = False<EOL><DEDENT><DEDENT>if a_is_further_side == b_is_further_side:<EOL><INDENT>if a_is_further_side: <EOL><INDENT>dx_ext0 = dx0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL>dy_ext0 = dy0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL>dz_ext0 = dz0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_3D<EOL>xsv_ext0 = xsb + <NUM_LIT:1><EOL>ysv_ext0 = ysb + <NUM_LIT:1><EOL>zsv_ext0 = zsb + <NUM_LIT:1><EOL>c = (a_point & b_point)<EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>dx_ext1 = dx0 - <NUM_LIT:2> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>xsv_ext1 = xsb + <NUM_LIT:2><EOL>ysv_ext1 = ysb<EOL>zsv_ext1 = zsb<EOL><DEDENT>elif (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>dx_ext1 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 - <NUM_LIT:2> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>xsv_ext1 = xsb<EOL>ysv_ext1 = ysb + <NUM_LIT:2><EOL>zsv_ext1 = zsb<EOL><DEDENT>else:<EOL><INDENT>dx_ext1 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:2> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>xsv_ext1 = xsb<EOL>ysv_ext1 = ysb<EOL>zsv_ext1 = zsb + <NUM_LIT:2><EOL><DEDENT><DEDENT>else:<EOL><INDENT>dx_ext0 = dx0<EOL>dy_ext0 = dy0<EOL>dz_ext0 = dz0<EOL>xsv_ext0 = xsb<EOL>ysv_ext0 = ysb<EOL>zsv_ext0 = zsb<EOL>c = (a_point | b_point)<EOL>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>dx_ext1 = dx0 + <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>xsv_ext1 = xsb - <NUM_LIT:1><EOL>ysv_ext1 = ysb + <NUM_LIT:1><EOL>zsv_ext1 = zsb + <NUM_LIT:1><EOL><DEDENT>elif (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>dx_ext1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 + <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>xsv_ext1 = xsb + <NUM_LIT:1><EOL>ysv_ext1 = ysb - <NUM_LIT:1><EOL>zsv_ext1 = zsb + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>dx_ext1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 + <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>xsv_ext1 = xsb + <NUM_LIT:1><EOL>ysv_ext1 = ysb + <NUM_LIT:1><EOL>zsv_ext1 = zsb - <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>else: <EOL><INDENT>if a_is_further_side:<EOL><INDENT>c1 = a_point<EOL>c2 = b_point<EOL><DEDENT>else:<EOL><INDENT>c1 = b_point<EOL>c2 = a_point<EOL><DEDENT>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>dx_ext0 = dx0 + <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dy_ext0 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dz_ext0 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>xsv_ext0 = xsb - <NUM_LIT:1><EOL>ysv_ext0 = ysb + <NUM_LIT:1><EOL>zsv_ext0 = zsb + <NUM_LIT:1><EOL><DEDENT>elif (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>dx_ext0 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dy_ext0 = dy0 + <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dz_ext0 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>xsv_ext0 = xsb + <NUM_LIT:1><EOL>ysv_ext0 = ysb - <NUM_LIT:1><EOL>zsv_ext0 = zsb + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>dx_ext0 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dy_ext0 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dz_ext0 = dz0 + <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>xsv_ext0 = xsb + <NUM_LIT:1><EOL>ysv_ext0 = ysb + <NUM_LIT:1><EOL>zsv_ext0 = zsb - <NUM_LIT:1><EOL><DEDENT>dx_ext1 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy_ext1 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz_ext1 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>xsv_ext1 = xsb<EOL>ysv_ext1 = ysb<EOL>zsv_ext1 = zsb<EOL>if (c2 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>dx_ext1 -= <NUM_LIT:2><EOL>xsv_ext1 += <NUM_LIT:2><EOL><DEDENT>elif (c2 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>dy_ext1 -= <NUM_LIT:2><EOL>ysv_ext1 += <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>dz_ext1 -= <NUM_LIT:2><EOL>zsv_ext1 += <NUM_LIT:2><EOL><DEDENT><DEDENT>dx1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dy1 = dy0 - <NUM_LIT:0> - SQUISH_CONSTANT_3D<EOL>dz1 = dz0 - <NUM_LIT:0> - SQUISH_CONSTANT_3D<EOL>attn1 = <NUM_LIT:2> - dx1 * dx1 - dy1 * dy1 - dz1 * dz1<EOL>if attn1 > <NUM_LIT:0>:<EOL><INDENT>attn1 *= attn1<EOL>value += attn1 * attn1 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, dx1, dy1, dz1)<EOL><DEDENT>dx2 = dx0 - <NUM_LIT:0> - SQUISH_CONSTANT_3D<EOL>dy2 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>dz2 = dz1<EOL>attn2 = <NUM_LIT:2> - dx2 * dx2 - dy2 * dy2 - dz2 * dz2<EOL>if attn2 > <NUM_LIT:0>:<EOL><INDENT>attn2 *= attn2<EOL>value += attn2 * attn2 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, dx2, dy2, dz2)<EOL><DEDENT>dx3 = dx2<EOL>dy3 = dy1<EOL>dz3 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_3D<EOL>attn3 = <NUM_LIT:2> - dx3 * dx3 - dy3 * dy3 - dz3 * dz3<EOL>if attn3 > <NUM_LIT:0>:<EOL><INDENT>attn3 *= attn3<EOL>value += attn3 * attn3 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, dx3, dy3, dz3)<EOL><DEDENT>dx4 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy4 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz4 = dz0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>attn4 = <NUM_LIT:2> - dx4 * dx4 - dy4 * dy4 - dz4 * dz4<EOL>if attn4 > <NUM_LIT:0>:<EOL><INDENT>attn4 *= attn4<EOL>value += attn4 * attn4 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, dx4, dy4, dz4)<EOL><DEDENT>dx5 = dx4<EOL>dy5 = dy0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dz5 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>attn5 = <NUM_LIT:2> - dx5 * dx5 - dy5 * dy5 - dz5 * dz5<EOL>if attn5 > <NUM_LIT:0>:<EOL><INDENT>attn5 *= attn5<EOL>value += attn5 * attn5 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, dx5, dy5, dz5)<EOL><DEDENT>dx6 = dx0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_3D<EOL>dy6 = dy4<EOL>dz6 = dz5<EOL>attn6 = <NUM_LIT:2> - dx6 * dx6 - dy6 * dy6 - dz6 * dz6<EOL>if attn6 > <NUM_LIT:0>:<EOL><INDENT>attn6 *= attn6<EOL>value += attn6 * attn6 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, dx6, dy6, dz6)<EOL><DEDENT><DEDENT>attn_ext0 = <NUM_LIT:2> - dx_ext0 * dx_ext0 - dy_ext0 * dy_ext0 - dz_ext0 * dz_ext0<EOL>if attn_ext0 > <NUM_LIT:0>:<EOL><INDENT>attn_ext0 *= attn_ext0<EOL>value += attn_ext0 * attn_ext0 * extrapolate(xsv_ext0, ysv_ext0, zsv_ext0, dx_ext0, dy_ext0, dz_ext0)<EOL><DEDENT>attn_ext1 = <NUM_LIT:2> - dx_ext1 * dx_ext1 - dy_ext1 * dy_ext1 - dz_ext1 * dz_ext1<EOL>if attn_ext1 > <NUM_LIT:0>:<EOL><INDENT>attn_ext1 *= attn_ext1<EOL>value += attn_ext1 * attn_ext1 * extrapolate(xsv_ext1, ysv_ext1, zsv_ext1, dx_ext1, dy_ext1, dz_ext1)<EOL><DEDENT>return value / NORM_CONSTANT_3D<EOL>", "docstring": "Generate 3D OpenSimplex noise from X,Y,Z coordinates.", "id": "f16267:c0:m5"}
{"signature": "def noise2d(self, x, y):", "body": "<EOL>stretch_offset = (x + y) * STRETCH_CONSTANT_2D<EOL>xs = x + stretch_offset<EOL>ys = y + stretch_offset<EOL>xsb = floor(xs)<EOL>ysb = floor(ys)<EOL>squish_offset = (xsb + ysb) * SQUISH_CONSTANT_2D<EOL>xb = xsb + squish_offset<EOL>yb = ysb + squish_offset<EOL>xins = xs - xsb<EOL>yins = ys - ysb<EOL>in_sum = xins + yins<EOL>dx0 = x - xb<EOL>dy0 = y - yb<EOL>value = <NUM_LIT:0><EOL>dx1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_2D<EOL>dy1 = dy0 - <NUM_LIT:0> - SQUISH_CONSTANT_2D<EOL>attn1 = <NUM_LIT:2> - dx1 * dx1 - dy1 * dy1<EOL>extrapolate = self._extrapolate2d<EOL>if attn1 > <NUM_LIT:0>:<EOL><INDENT>attn1 *= attn1<EOL>value += attn1 * attn1 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, dx1, dy1)<EOL><DEDENT>dx2 = dx0 - <NUM_LIT:0> - SQUISH_CONSTANT_2D<EOL>dy2 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_2D<EOL>attn2 = <NUM_LIT:2> - dx2 * dx2 - dy2 * dy2<EOL>if attn2 > <NUM_LIT:0>:<EOL><INDENT>attn2 *= attn2<EOL>value += attn2 * attn2 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, dx2, dy2)<EOL><DEDENT>if in_sum <= <NUM_LIT:1>: <EOL><INDENT>zins = <NUM_LIT:1> - in_sum<EOL>if zins > xins or zins > yins: <EOL><INDENT>if xins > yins:<EOL><INDENT>xsv_ext = xsb + <NUM_LIT:1><EOL>ysv_ext = ysb - <NUM_LIT:1><EOL>dx_ext = dx0 - <NUM_LIT:1><EOL>dy_ext = dy0 + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>xsv_ext = xsb - <NUM_LIT:1><EOL>ysv_ext = ysb + <NUM_LIT:1><EOL>dx_ext = dx0 + <NUM_LIT:1><EOL>dy_ext = dy0 - <NUM_LIT:1><EOL><DEDENT><DEDENT>else: <EOL><INDENT>xsv_ext = xsb + <NUM_LIT:1><EOL>ysv_ext = ysb + <NUM_LIT:1><EOL>dx_ext = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_2D<EOL>dy_ext = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_2D<EOL><DEDENT><DEDENT>else: <EOL><INDENT>zins = <NUM_LIT:2> - in_sum<EOL>if zins < xins or zins < yins: <EOL><INDENT>if xins > yins:<EOL><INDENT>xsv_ext = xsb + <NUM_LIT:2><EOL>ysv_ext = ysb + <NUM_LIT:0><EOL>dx_ext = dx0 - <NUM_LIT:2> - <NUM_LIT:2> * SQUISH_CONSTANT_2D<EOL>dy_ext = dy0 + <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_2D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext = xsb + <NUM_LIT:0><EOL>ysv_ext = ysb + <NUM_LIT:2><EOL>dx_ext = dx0 + <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_2D<EOL>dy_ext = dy0 - <NUM_LIT:2> - <NUM_LIT:2> * SQUISH_CONSTANT_2D<EOL><DEDENT><DEDENT>else: <EOL><INDENT>dx_ext = dx0<EOL>dy_ext = dy0<EOL>xsv_ext = xsb<EOL>ysv_ext = ysb<EOL><DEDENT>xsb += <NUM_LIT:1><EOL>ysb += <NUM_LIT:1><EOL>dx0 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_2D<EOL>dy0 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_2D<EOL><DEDENT>attn0 = <NUM_LIT:2> - dx0 * dx0 - dy0 * dy0<EOL>if attn0 > <NUM_LIT:0>:<EOL><INDENT>attn0 *= attn0<EOL>value += attn0 * attn0 * extrapolate(xsb, ysb, dx0, dy0)<EOL><DEDENT>attn_ext = <NUM_LIT:2> - dx_ext * dx_ext - dy_ext * dy_ext<EOL>if attn_ext > <NUM_LIT:0>:<EOL><INDENT>attn_ext *= attn_ext<EOL>value += attn_ext * attn_ext * extrapolate(xsv_ext, ysv_ext, dx_ext, dy_ext)<EOL><DEDENT>return value / NORM_CONSTANT_2D<EOL>", "docstring": "Generate 2D OpenSimplex noise from X,Y coordinates.", "id": "f16267:c0:m4"}
{"signature": "def __init__(self, seed=DEFAULT_SEED):", "body": "<EOL>perm = self._perm = [<NUM_LIT:0>] * <NUM_LIT> <EOL>perm_grad_index_3D = self._perm_grad_index_3D = [<NUM_LIT:0>] * <NUM_LIT><EOL>source = [i for i in range(<NUM_LIT:0>, <NUM_LIT>)]<EOL>seed = overflow(seed * <NUM_LIT> + <NUM_LIT>)<EOL>seed = overflow(seed * <NUM_LIT> + <NUM_LIT>)<EOL>seed = overflow(seed * <NUM_LIT> + <NUM_LIT>)<EOL>for i in range(<NUM_LIT:255>, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>seed = overflow(seed * <NUM_LIT> + <NUM_LIT>)<EOL>r = int((seed + <NUM_LIT>) % (i + <NUM_LIT:1>))<EOL>if r < <NUM_LIT:0>:<EOL><INDENT>r += i + <NUM_LIT:1><EOL><DEDENT>perm[i] = source[r]<EOL>perm_grad_index_3D[i] = int((perm[i] % (len(GRADIENTS_3D) / <NUM_LIT:3>)) * <NUM_LIT:3>)<EOL>source[r] = source[i]<EOL><DEDENT>", "docstring": "Initiate the class and generate permutation arrays from a seed number.", "id": "f16267:c0:m0"}
{"signature": "def noise4d(self, x, y, z, w):", "body": "<EOL>stretch_offset = (x + y + z + w) * STRETCH_CONSTANT_4D<EOL>xs = x + stretch_offset<EOL>ys = y + stretch_offset<EOL>zs = z + stretch_offset<EOL>ws = w + stretch_offset<EOL>xsb = floor(xs)<EOL>ysb = floor(ys)<EOL>zsb = floor(zs)<EOL>wsb = floor(ws)<EOL>squish_offset = (xsb + ysb + zsb + wsb) * SQUISH_CONSTANT_4D<EOL>xb = xsb + squish_offset<EOL>yb = ysb + squish_offset<EOL>zb = zsb + squish_offset<EOL>wb = wsb + squish_offset<EOL>xins = xs - xsb<EOL>yins = ys - ysb<EOL>zins = zs - zsb<EOL>wins = ws - wsb<EOL>in_sum = xins + yins + zins + wins<EOL>dx0 = x - xb<EOL>dy0 = y - yb<EOL>dz0 = z - zb<EOL>dw0 = w - wb<EOL>value = <NUM_LIT:0><EOL>extrapolate = self._extrapolate4d<EOL>if in_sum <= <NUM_LIT:1>: <EOL><INDENT>a_po = <NUM_LIT><EOL>a_score = xins<EOL>b_po = <NUM_LIT><EOL>b_score = yins<EOL>if a_score >= b_score and zins > b_score:<EOL><INDENT>b_score = zins<EOL>b_po = <NUM_LIT><EOL><DEDENT>elif a_score < b_score and zins > a_score:<EOL><INDENT>a_score = zins<EOL>a_po = <NUM_LIT><EOL><DEDENT>if a_score >= b_score and wins > b_score:<EOL><INDENT>b_score = wins<EOL>b_po = <NUM_LIT><EOL><DEDENT>elif a_score < b_score and wins > a_score:<EOL><INDENT>a_score = wins<EOL>a_po = <NUM_LIT><EOL><DEDENT>uins = <NUM_LIT:1> - in_sum<EOL>if uins > a_score or uins > b_score: <EOL><INDENT>c = b_po if (b_score > a_score) else a_po <EOL>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb - <NUM_LIT:1><EOL>xsv_ext1 = xsv_ext2 = xsb<EOL>dx_ext0 = dx0 + <NUM_LIT:1><EOL>dx_ext1 = dx_ext2 = dx0<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx_ext1 = dx_ext2 = dx0 - <NUM_LIT:1><EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb<EOL>dy_ext0 = dy_ext1 = dy_ext2 = dy0<EOL>if (c & <NUM_LIT>) == <NUM_LIT>:<EOL><INDENT>ysv_ext0 -= <NUM_LIT:1><EOL>dy_ext0 += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext1 -= <NUM_LIT:1><EOL>dy_ext1 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy_ext1 = dy_ext2 = dy0 - <NUM_LIT:1><EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb<EOL>dz_ext0 = dz_ext1 = dz_ext2 = dz0<EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>if (c & <NUM_LIT>) == <NUM_LIT>:<EOL><INDENT>zsv_ext0 -= <NUM_LIT:1><EOL>dz_ext0 += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>zsv_ext1 -= <NUM_LIT:1><EOL>dz_ext1 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext2 -= <NUM_LIT:1><EOL>dz_ext2 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz_ext1 = dz_ext2 = dz0 - <NUM_LIT:1><EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsb<EOL>wsv_ext2 = wsb - <NUM_LIT:1><EOL>dw_ext0 = dw_ext1 = dw0<EOL>dw_ext2 = dw0 + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb + <NUM_LIT:1><EOL>dw_ext0 = dw_ext1 = dw_ext2 = dw0 - <NUM_LIT:1><EOL><DEDENT><DEDENT>else: <EOL><INDENT>c = (a_po | b_po) <EOL>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsv_ext2 = xsb<EOL>xsv_ext1 = xsb - <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx0 + <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>dx_ext2 = dx0 - SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx_ext2 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb<EOL>dy_ext0 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy_ext1 = dy_ext2 = dy0 - SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) == <NUM_LIT>:<EOL><INDENT>ysv_ext1 -= <NUM_LIT:1><EOL>dy_ext1 += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext2 -= <NUM_LIT:1><EOL>dy_ext2 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy_ext1 = dy_ext2 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb<EOL>dz_ext0 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz_ext1 = dz_ext2 = dz0 - SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) == <NUM_LIT>:<EOL><INDENT>zsv_ext1 -= <NUM_LIT:1><EOL>dz_ext1 += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>zsv_ext2 -= <NUM_LIT:1><EOL>dz_ext2 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz_ext1 = dz_ext2 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsb<EOL>wsv_ext2 = wsb - <NUM_LIT:1><EOL>dw_ext0 = dw0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw0 - SQUISH_CONSTANT_4D<EOL>dw_ext2 = dw0 + <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb + <NUM_LIT:1><EOL>dw_ext0 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw_ext2 = dw0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT><DEDENT>attn0 = <NUM_LIT:2> - dx0 * dx0 - dy0 * dy0 - dz0 * dz0 - dw0 * dw0<EOL>if attn0 > <NUM_LIT:0>:<EOL><INDENT>attn0 *= attn0<EOL>value += attn0 * attn0 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:0>, dx0, dy0, dz0, dw0)<EOL><DEDENT>dx1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>dy1 = dy0 - <NUM_LIT:0> - SQUISH_CONSTANT_4D<EOL>dz1 = dz0 - <NUM_LIT:0> - SQUISH_CONSTANT_4D<EOL>dw1 = dw0 - <NUM_LIT:0> - SQUISH_CONSTANT_4D<EOL>attn1 = <NUM_LIT:2> - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1<EOL>if attn1 > <NUM_LIT:0>:<EOL><INDENT>attn1 *= attn1<EOL>value += attn1 * attn1 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:0>, dx1, dy1, dz1, dw1)<EOL><DEDENT>dx2 = dx0 - <NUM_LIT:0> - SQUISH_CONSTANT_4D<EOL>dy2 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>dz2 = dz1<EOL>dw2 = dw1<EOL>attn2 = <NUM_LIT:2> - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2<EOL>if attn2 > <NUM_LIT:0>:<EOL><INDENT>attn2 *= attn2<EOL>value += attn2 * attn2 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:0>, dx2, dy2, dz2, dw2)<EOL><DEDENT>dx3 = dx2<EOL>dy3 = dy1<EOL>dz3 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>dw3 = dw1<EOL>attn3 = <NUM_LIT:2> - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3<EOL>if attn3 > <NUM_LIT:0>:<EOL><INDENT>attn3 *= attn3<EOL>value += attn3 * attn3 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:0>, dx3, dy3, dz3, dw3)<EOL><DEDENT>dx4 = dx2<EOL>dy4 = dy1<EOL>dz4 = dz1<EOL>dw4 = dw0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>attn4 = <NUM_LIT:2> - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4<EOL>if attn4 > <NUM_LIT:0>:<EOL><INDENT>attn4 *= attn4<EOL>value += attn4 * attn4 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:1>, dx4, dy4, dz4, dw4)<EOL><DEDENT><DEDENT>elif in_sum >= <NUM_LIT:3>: <EOL><INDENT>a_po = <NUM_LIT><EOL>a_score = xins<EOL>b_po = <NUM_LIT><EOL>b_score = yins<EOL>if a_score <= b_score and zins < b_score:<EOL><INDENT>b_score = zins<EOL>b_po = <NUM_LIT><EOL><DEDENT>elif a_score > b_score and zins < a_score:<EOL><INDENT>a_score = zins<EOL>a_po = <NUM_LIT><EOL><DEDENT>if a_score <= b_score and wins < b_score:<EOL><INDENT>b_score = wins<EOL>b_po = <NUM_LIT><EOL><DEDENT>elif a_score > b_score and wins < a_score:<EOL><INDENT>a_score = wins<EOL>a_po = <NUM_LIT><EOL><DEDENT>uins = <NUM_LIT:4> - in_sum<EOL>if uins < a_score or uins < b_score: <EOL><INDENT>c = b_po if (b_score < a_score) else a_po <EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb + <NUM_LIT:2><EOL>xsv_ext1 = xsv_ext2 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:2> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx_ext2 = dx0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb<EOL>dx_ext0 = dx_ext1 = dx_ext2 = dx0 - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy_ext1 = dy_ext2 = dy0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext1 += <NUM_LIT:1><EOL>dy_ext1 -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext0 += <NUM_LIT:1><EOL>dy_ext0 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb<EOL>dy_ext0 = dy_ext1 = dy_ext2 = dy0 - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz_ext1 = dz_ext2 = dz0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) != <NUM_LIT>:<EOL><INDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 += <NUM_LIT:1><EOL>dz_ext0 -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>zsv_ext1 += <NUM_LIT:1><EOL>dz_ext1 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext2 += <NUM_LIT:1><EOL>dz_ext2 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb<EOL>dz_ext0 = dz_ext1 = dz_ext2 = dz0 - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsb + <NUM_LIT:1><EOL>wsv_ext2 = wsb + <NUM_LIT:2><EOL>dw_ext0 = dw_ext1 = dw0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>dw_ext2 = dw0 - <NUM_LIT:2> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb<EOL>dw_ext0 = dw_ext1 = dw_ext2 = dw0 - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL><DEDENT><DEDENT>else: <EOL><INDENT>c = (a_po & b_po) <EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsv_ext2 = xsb + <NUM_LIT:1><EOL>xsv_ext1 = xsb + <NUM_LIT:2><EOL>dx_ext0 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx0 - <NUM_LIT:2> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dx_ext2 = dx0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb<EOL>dx_ext0 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx_ext2 = dx0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy_ext1 = dy_ext2 = dy0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext2 += <NUM_LIT:1><EOL>dy_ext2 -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext1 += <NUM_LIT:1><EOL>dy_ext1 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb<EOL>dy_ext0 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy_ext1 = dy_ext2 = dy0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz_ext1 = dz_ext2 = dz0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext2 += <NUM_LIT:1><EOL>dz_ext2 -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>zsv_ext1 += <NUM_LIT:1><EOL>dz_ext1 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb<EOL>dz_ext0 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz_ext1 = dz_ext2 = dz0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsb + <NUM_LIT:1><EOL>wsv_ext2 = wsb + <NUM_LIT:2><EOL>dw_ext0 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dw_ext2 = dw0 - <NUM_LIT:2> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb<EOL>dw_ext0 = dw0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw_ext2 = dw0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT><DEDENT>dx4 = dx0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dy4 = dy0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dz4 = dz0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dw4 = dw0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>attn4 = <NUM_LIT:2> - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4<EOL>if attn4 > <NUM_LIT:0>:<EOL><INDENT>attn4 *= attn4<EOL>value += attn4 * attn4 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:0>, dx4, dy4, dz4, dw4)<EOL><DEDENT>dx3 = dx4<EOL>dy3 = dy4<EOL>dz3 = dz0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dw3 = dw0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>attn3 = <NUM_LIT:2> - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3<EOL>if attn3 > <NUM_LIT:0>:<EOL><INDENT>attn3 *= attn3<EOL>value += attn3 * attn3 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:1>, dx3, dy3, dz3, dw3)<EOL><DEDENT>dx2 = dx4<EOL>dy2 = dy0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dz2 = dz4<EOL>dw2 = dw3<EOL>attn2 = <NUM_LIT:2> - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2<EOL>if attn2 > <NUM_LIT:0>:<EOL><INDENT>attn2 *= attn2<EOL>value += attn2 * attn2 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:1>, dx2, dy2, dz2, dw2)<EOL><DEDENT>dx1 = dx0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dz1 = dz4<EOL>dy1 = dy4<EOL>dw1 = dw3<EOL>attn1 = <NUM_LIT:2> - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1<EOL>if attn1 > <NUM_LIT:0>:<EOL><INDENT>attn1 *= attn1<EOL>value += attn1 * attn1 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:1>, dx1, dy1, dz1, dw1)<EOL><DEDENT>dx0 = dx0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>dy0 = dy0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>dz0 = dz0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>dw0 = dw0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>attn0 = <NUM_LIT:2> - dx0 * dx0 - dy0 * dy0 - dz0 * dz0 - dw0 * dw0<EOL>if attn0 > <NUM_LIT:0>:<EOL><INDENT>attn0 *= attn0<EOL>value += attn0 * attn0 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:1>, dx0, dy0, dz0, dw0)<EOL><DEDENT><DEDENT>elif in_sum <= <NUM_LIT:2>: <EOL><INDENT>a_is_bigger_side = True<EOL>b_is_bigger_side = True<EOL>if xins + yins > zins + wins:<EOL><INDENT>a_score = xins + yins<EOL>a_po = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>a_score = zins + wins<EOL>a_po = <NUM_LIT><EOL><DEDENT>if xins + zins > yins + wins:<EOL><INDENT>b_score = xins + zins<EOL>b_po = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>b_score = yins + wins<EOL>b_po = <NUM_LIT><EOL><DEDENT>if xins + wins > yins + zins:<EOL><INDENT>score = xins + wins<EOL>if a_score >= b_score and score > b_score:<EOL><INDENT>b_score = score<EOL>b_po = <NUM_LIT><EOL><DEDENT>elif a_score < b_score and score > a_score:<EOL><INDENT>a_score = score<EOL>a_po = <NUM_LIT><EOL><DEDENT><DEDENT>else:<EOL><INDENT>score = yins + zins<EOL>if a_score >= b_score and score > b_score:<EOL><INDENT>b_score = score<EOL>b_po = <NUM_LIT><EOL><DEDENT>elif a_score < b_score and score > a_score:<EOL><INDENT>a_score = score<EOL>a_po = <NUM_LIT><EOL><DEDENT><DEDENT>p1 = <NUM_LIT:2> - in_sum + xins<EOL>if a_score >= b_score and p1 > b_score:<EOL><INDENT>b_score = p1<EOL>b_po = <NUM_LIT><EOL>b_is_bigger_side = False<EOL><DEDENT>elif a_score < b_score and p1 > a_score:<EOL><INDENT>a_score = p1<EOL>a_po = <NUM_LIT><EOL>a_is_bigger_side = False<EOL><DEDENT>p2 = <NUM_LIT:2> - in_sum + yins<EOL>if a_score >= b_score and p2 > b_score:<EOL><INDENT>b_score = p2<EOL>b_po = <NUM_LIT><EOL>b_is_bigger_side = False<EOL><DEDENT>elif a_score < b_score and p2 > a_score:<EOL><INDENT>a_score = p2<EOL>a_po = <NUM_LIT><EOL>a_is_bigger_side = False<EOL><DEDENT>p3 = <NUM_LIT:2> - in_sum + zins<EOL>if a_score >= b_score and p3 > b_score:<EOL><INDENT>b_score = p3<EOL>b_po = <NUM_LIT><EOL>b_is_bigger_side = False<EOL><DEDENT>elif a_score < b_score and p3 > a_score:<EOL><INDENT>a_score = p3<EOL>a_po = <NUM_LIT><EOL>a_is_bigger_side = False<EOL><DEDENT>p4 = <NUM_LIT:2> - in_sum + wins<EOL>if a_score >= b_score and p4 > b_score:<EOL><INDENT>b_po = <NUM_LIT><EOL>b_is_bigger_side = False<EOL><DEDENT>elif a_score < b_score and p4 > a_score:<EOL><INDENT>a_po = <NUM_LIT><EOL>a_is_bigger_side = False<EOL><DEDENT>if a_is_bigger_side == b_is_bigger_side:<EOL><INDENT>if a_is_bigger_side: <EOL><INDENT>c1 = (a_po | b_po)<EOL>c2 = (a_po & b_po)<EOL>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb<EOL>xsv_ext1 = xsb - <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx0 + <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysb<EOL>ysv_ext1 = ysb - <NUM_LIT:1><EOL>dy_ext0 = dy0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dy_ext1 = dy0 + <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dy_ext1 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsb<EOL>zsv_ext1 = zsb - <NUM_LIT:1><EOL>dz_ext0 = dz0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dz_ext1 = dz0 + <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dz_ext1 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>wsv_ext0 = wsb<EOL>wsv_ext1 = wsb - <NUM_LIT:1><EOL>dw_ext0 = dw0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw0 + <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsb + <NUM_LIT:1><EOL>dw_ext0 = dw0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL><DEDENT>xsv_ext2 = xsb<EOL>ysv_ext2 = ysb<EOL>zsv_ext2 = zsb<EOL>wsv_ext2 = wsb<EOL>dx_ext2 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy_ext2 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz_ext2 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw_ext2 = dw0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>if (c2 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>xsv_ext2 += <NUM_LIT:2><EOL>dx_ext2 -= <NUM_LIT:2><EOL><DEDENT>elif (c2 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext2 += <NUM_LIT:2><EOL>dy_ext2 -= <NUM_LIT:2><EOL><DEDENT>elif (c2 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext2 += <NUM_LIT:2><EOL>dz_ext2 -= <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>wsv_ext2 += <NUM_LIT:2><EOL>dw_ext2 -= <NUM_LIT:2><EOL><DEDENT><DEDENT>else: <EOL><INDENT>xsv_ext2 = xsb<EOL>ysv_ext2 = ysb<EOL>zsv_ext2 = zsb<EOL>wsv_ext2 = wsb<EOL>dx_ext2 = dx0<EOL>dy_ext2 = dy0<EOL>dz_ext2 = dz0<EOL>dw_ext2 = dw0<EOL>c = (a_po | b_po)<EOL>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb - <NUM_LIT:1><EOL>xsv_ext1 = xsb<EOL>dx_ext0 = dx0 + <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx0 - SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx_ext1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb<EOL>dy_ext0 = dy_ext1 = dy0 - SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) == <NUM_LIT>:<EOL><INDENT>ysv_ext0 -= <NUM_LIT:1><EOL>dy_ext0 += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext1 -= <NUM_LIT:1><EOL>dy_ext1 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy_ext1 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb<EOL>dz_ext0 = dz_ext1 = dz0 - SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) == <NUM_LIT>:<EOL><INDENT>zsv_ext0 -= <NUM_LIT:1><EOL>dz_ext0 += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>zsv_ext1 -= <NUM_LIT:1><EOL>dz_ext1 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz_ext1 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>wsv_ext0 = wsb<EOL>wsv_ext1 = wsb - <NUM_LIT:1><EOL>dw_ext0 = dw0 - SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw0 + <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsb + <NUM_LIT:1><EOL>dw_ext0 = dw_ext1 = dw0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT><DEDENT><DEDENT>else: <EOL><INDENT>if a_is_bigger_side:<EOL><INDENT>c1 = a_po<EOL>c2 = b_po<EOL><DEDENT>else:<EOL><INDENT>c1 = b_po<EOL>c2 = a_po<EOL><DEDENT>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb - <NUM_LIT:1><EOL>xsv_ext1 = xsb<EOL>dx_ext0 = dx0 + <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx0 - SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx_ext1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb<EOL>dy_ext0 = dy_ext1 = dy0 - SQUISH_CONSTANT_4D<EOL>if (c1 & <NUM_LIT>) == <NUM_LIT>:<EOL><INDENT>ysv_ext0 -= <NUM_LIT:1><EOL>dy_ext0 += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext1 -= <NUM_LIT:1><EOL>dy_ext1 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy_ext1 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb<EOL>dz_ext0 = dz_ext1 = dz0 - SQUISH_CONSTANT_4D<EOL>if (c1 & <NUM_LIT>) == <NUM_LIT>:<EOL><INDENT>zsv_ext0 -= <NUM_LIT:1><EOL>dz_ext0 += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>zsv_ext1 -= <NUM_LIT:1><EOL>dz_ext1 += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz_ext1 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>wsv_ext0 = wsb<EOL>wsv_ext1 = wsb - <NUM_LIT:1><EOL>dw_ext0 = dw0 - SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw0 + <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsb + <NUM_LIT:1><EOL>dw_ext0 = dw_ext1 = dw0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL><DEDENT>xsv_ext2 = xsb<EOL>ysv_ext2 = ysb<EOL>zsv_ext2 = zsb<EOL>wsv_ext2 = wsb<EOL>dx_ext2 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy_ext2 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz_ext2 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw_ext2 = dw0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>if (c2 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>xsv_ext2 += <NUM_LIT:2><EOL>dx_ext2 -= <NUM_LIT:2><EOL><DEDENT>elif (c2 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext2 += <NUM_LIT:2><EOL>dy_ext2 -= <NUM_LIT:2><EOL><DEDENT>elif (c2 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext2 += <NUM_LIT:2><EOL>dz_ext2 -= <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>wsv_ext2 += <NUM_LIT:2><EOL>dw_ext2 -= <NUM_LIT:2><EOL><DEDENT><DEDENT>dx1 = dx0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>dy1 = dy0 - <NUM_LIT:0> - SQUISH_CONSTANT_4D<EOL>dz1 = dz0 - <NUM_LIT:0> - SQUISH_CONSTANT_4D<EOL>dw1 = dw0 - <NUM_LIT:0> - SQUISH_CONSTANT_4D<EOL>attn1 = <NUM_LIT:2> - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1<EOL>if attn1 > <NUM_LIT:0>:<EOL><INDENT>attn1 *= attn1<EOL>value += attn1 * attn1 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:0>, dx1, dy1, dz1, dw1)<EOL><DEDENT>dx2 = dx0 - <NUM_LIT:0> - SQUISH_CONSTANT_4D<EOL>dy2 = dy0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>dz2 = dz1<EOL>dw2 = dw1<EOL>attn2 = <NUM_LIT:2> - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2<EOL>if attn2 > <NUM_LIT:0>:<EOL><INDENT>attn2 *= attn2<EOL>value += attn2 * attn2 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:0>, dx2, dy2, dz2, dw2)<EOL><DEDENT>dx3 = dx2<EOL>dy3 = dy1<EOL>dz3 = dz0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>dw3 = dw1<EOL>attn3 = <NUM_LIT:2> - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3<EOL>if attn3 > <NUM_LIT:0>:<EOL><INDENT>attn3 *= attn3<EOL>value += attn3 * attn3 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:0>, dx3, dy3, dz3, dw3)<EOL><DEDENT>dx4 = dx2<EOL>dy4 = dy1<EOL>dz4 = dz1<EOL>dw4 = dw0 - <NUM_LIT:1> - SQUISH_CONSTANT_4D<EOL>attn4 = <NUM_LIT:2> - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4<EOL>if attn4 > <NUM_LIT:0>:<EOL><INDENT>attn4 *= attn4<EOL>value += attn4 * attn4 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:1>, dx4, dy4, dz4, dw4)<EOL><DEDENT>dx5 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy5 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz5 = dz0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw5 = dw0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn5 = <NUM_LIT:2> - dx5 * dx5 - dy5 * dy5 - dz5 * dz5 - dw5 * dw5<EOL>if attn5 > <NUM_LIT:0>:<EOL><INDENT>attn5 *= attn5<EOL>value += attn5 * attn5 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:0>, dx5, dy5, dz5, dw5)<EOL><DEDENT>dx6 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy6 = dy0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz6 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw6 = dw0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn6 = <NUM_LIT:2> - dx6 * dx6 - dy6 * dy6 - dz6 * dz6 - dw6 * dw6<EOL>if attn6 > <NUM_LIT:0>:<EOL><INDENT>attn6 *= attn6<EOL>value += attn6 * attn6 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:0>, dx6, dy6, dz6, dw6)<EOL><DEDENT>dx7 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy7 = dy0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz7 = dz0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw7 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn7 = <NUM_LIT:2> - dx7 * dx7 - dy7 * dy7 - dz7 * dz7 - dw7 * dw7<EOL>if attn7 > <NUM_LIT:0>:<EOL><INDENT>attn7 *= attn7<EOL>value += attn7 * attn7 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:1>, dx7, dy7, dz7, dw7)<EOL><DEDENT>dx8 = dx0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy8 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz8 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw8 = dw0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn8 = <NUM_LIT:2> - dx8 * dx8 - dy8 * dy8 - dz8 * dz8 - dw8 * dw8<EOL>if attn8 > <NUM_LIT:0>:<EOL><INDENT>attn8 *= attn8<EOL>value += attn8 * attn8 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:0>, dx8, dy8, dz8, dw8)<EOL><DEDENT>dx9 = dx0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy9 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz9 = dz0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw9 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn9 = <NUM_LIT:2> - dx9 * dx9 - dy9 * dy9 - dz9 * dz9 - dw9 * dw9<EOL>if attn9 > <NUM_LIT:0>:<EOL><INDENT>attn9 *= attn9<EOL>value += attn9 * attn9 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:1>, dx9, dy9, dz9, dw9)<EOL><DEDENT>dx10 = dx0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy10 = dy0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz10 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw10 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn10 = <NUM_LIT:2> - dx10 * dx10 - dy10 * dy10 - dz10 * dz10 - dw10 * dw10<EOL>if attn10 > <NUM_LIT:0>:<EOL><INDENT>attn10 *= attn10<EOL>value += attn10 * attn10 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:1>, dx10, dy10, dz10, dw10)<EOL><DEDENT><DEDENT>else: <EOL><INDENT>a_is_bigger_side = True<EOL>b_is_bigger_side = True<EOL>if xins + yins < zins + wins:<EOL><INDENT>a_score = xins + yins<EOL>a_po = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>a_score = zins + wins<EOL>a_po = <NUM_LIT><EOL><DEDENT>if xins + zins < yins + wins:<EOL><INDENT>b_score = xins + zins<EOL>b_po = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>b_score = yins + wins<EOL>b_po = <NUM_LIT><EOL><DEDENT>if xins + wins < yins + zins:<EOL><INDENT>score = xins + wins<EOL>if a_score <= b_score and score < b_score:<EOL><INDENT>b_score = score<EOL>b_po = <NUM_LIT><EOL><DEDENT>elif a_score > b_score and score < a_score:<EOL><INDENT>a_score = score<EOL>a_po = <NUM_LIT><EOL><DEDENT><DEDENT>else:<EOL><INDENT>score = yins + zins<EOL>if a_score <= b_score and score < b_score:<EOL><INDENT>b_score = score<EOL>b_po = <NUM_LIT><EOL><DEDENT>elif a_score > b_score and score < a_score:<EOL><INDENT>a_score = score<EOL>a_po = <NUM_LIT><EOL><DEDENT><DEDENT>p1 = <NUM_LIT:3> - in_sum + xins<EOL>if a_score <= b_score and p1 < b_score:<EOL><INDENT>b_score = p1<EOL>b_po = <NUM_LIT><EOL>b_is_bigger_side = False<EOL><DEDENT>elif a_score > b_score and p1 < a_score:<EOL><INDENT>a_score = p1<EOL>a_po = <NUM_LIT><EOL>a_is_bigger_side = False<EOL><DEDENT>p2 = <NUM_LIT:3> - in_sum + yins<EOL>if a_score <= b_score and p2 < b_score:<EOL><INDENT>b_score = p2<EOL>b_po = <NUM_LIT><EOL>b_is_bigger_side = False<EOL><DEDENT>elif a_score > b_score and p2 < a_score:<EOL><INDENT>a_score = p2<EOL>a_po = <NUM_LIT><EOL>a_is_bigger_side = False<EOL><DEDENT>p3 = <NUM_LIT:3> - in_sum + zins<EOL>if a_score <= b_score and p3 < b_score:<EOL><INDENT>b_score = p3<EOL>b_po = <NUM_LIT><EOL>b_is_bigger_side = False<EOL><DEDENT>elif a_score > b_score and p3 < a_score:<EOL><INDENT>a_score = p3<EOL>a_po = <NUM_LIT><EOL>a_is_bigger_side = False<EOL><DEDENT>p4 = <NUM_LIT:3> - in_sum + wins<EOL>if a_score <= b_score and p4 < b_score:<EOL><INDENT>b_po = <NUM_LIT><EOL>b_is_bigger_side = False<EOL><DEDENT>elif a_score > b_score and p4 < a_score:<EOL><INDENT>a_po = <NUM_LIT><EOL>a_is_bigger_side = False<EOL><DEDENT>if a_is_bigger_side == b_is_bigger_side:<EOL><INDENT>if a_is_bigger_side: <EOL><INDENT>c1 = (a_po & b_po)<EOL>c2 = (a_po | b_po)<EOL>xsv_ext0 = xsv_ext1 = xsb<EOL>ysv_ext0 = ysv_ext1 = ysb<EOL>zsv_ext0 = zsv_ext1 = zsb<EOL>wsv_ext0 = wsv_ext1 = wsb<EOL>dx_ext0 = dx0 - SQUISH_CONSTANT_4D<EOL>dy_ext0 = dy0 - SQUISH_CONSTANT_4D<EOL>dz_ext0 = dz0 - SQUISH_CONSTANT_4D<EOL>dw_ext0 = dw0 - SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy_ext1 = dy0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz_ext1 = dz0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw0 - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>if (c1 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 += <NUM_LIT:1><EOL>dx_ext0 -= <NUM_LIT:1><EOL>xsv_ext1 += <NUM_LIT:2><EOL>dx_ext1 -= <NUM_LIT:2><EOL><DEDENT>elif (c1 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 += <NUM_LIT:1><EOL>dy_ext0 -= <NUM_LIT:1><EOL>ysv_ext1 += <NUM_LIT:2><EOL>dy_ext1 -= <NUM_LIT:2><EOL><DEDENT>elif (c1 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 += <NUM_LIT:1><EOL>dz_ext0 -= <NUM_LIT:1><EOL>zsv_ext1 += <NUM_LIT:2><EOL>dz_ext1 -= <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 += <NUM_LIT:1><EOL>dw_ext0 -= <NUM_LIT:1><EOL>wsv_ext1 += <NUM_LIT:2><EOL>dw_ext1 -= <NUM_LIT:2><EOL><DEDENT>xsv_ext2 = xsb + <NUM_LIT:1><EOL>ysv_ext2 = ysb + <NUM_LIT:1><EOL>zsv_ext2 = zsb + <NUM_LIT:1><EOL>wsv_ext2 = wsb + <NUM_LIT:1><EOL>dx_ext2 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy_ext2 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz_ext2 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw_ext2 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>if (c2 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>xsv_ext2 -= <NUM_LIT:2><EOL>dx_ext2 += <NUM_LIT:2><EOL><DEDENT>elif (c2 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext2 -= <NUM_LIT:2><EOL>dy_ext2 += <NUM_LIT:2><EOL><DEDENT>elif (c2 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext2 -= <NUM_LIT:2><EOL>dz_ext2 += <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>wsv_ext2 -= <NUM_LIT:2><EOL>dw_ext2 += <NUM_LIT:2><EOL><DEDENT><DEDENT>else: <EOL><INDENT>xsv_ext2 = xsb + <NUM_LIT:1><EOL>ysv_ext2 = ysb + <NUM_LIT:1><EOL>zsv_ext2 = zsb + <NUM_LIT:1><EOL>wsv_ext2 = wsb + <NUM_LIT:1><EOL>dx_ext2 = dx0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>dy_ext2 = dy0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>dz_ext2 = dz0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>dw_ext2 = dw0 - <NUM_LIT:1> - <NUM_LIT:4> * SQUISH_CONSTANT_4D<EOL>c = (a_po & b_po)<EOL>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb + <NUM_LIT:2><EOL>xsv_ext1 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:2> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsb<EOL>dx_ext0 = dx_ext1 = dx0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy_ext1 = dy0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 += <NUM_LIT:1><EOL>dy_ext0 -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext1 += <NUM_LIT:1><EOL>dy_ext1 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb<EOL>dy_ext0 = dy_ext1 = dy0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz_ext1 = dz0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>if (c & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 += <NUM_LIT:1><EOL>dz_ext0 -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>zsv_ext1 += <NUM_LIT:1><EOL>dz_ext1 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb<EOL>dz_ext0 = dz_ext1 = dz0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>wsv_ext0 = wsb + <NUM_LIT:1><EOL>wsv_ext1 = wsb + <NUM_LIT:2><EOL>dw_ext0 = dw0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw0 - <NUM_LIT:2> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsb<EOL>dw_ext0 = dw_ext1 = dw0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT><DEDENT><DEDENT>else: <EOL><INDENT>if a_is_bigger_side:<EOL><INDENT>c1 = a_po<EOL>c2 = b_po<EOL><DEDENT>else:<EOL><INDENT>c1 = b_po<EOL>c2 = a_po<EOL><DEDENT>if (c1 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>xsv_ext0 = xsb + <NUM_LIT:2><EOL>xsv_ext1 = xsb + <NUM_LIT:1><EOL>dx_ext0 = dx0 - <NUM_LIT:2> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dx_ext1 = dx0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>xsv_ext0 = xsv_ext1 = xsb<EOL>dx_ext0 = dx_ext1 = dx0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c1 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb + <NUM_LIT:1><EOL>dy_ext0 = dy_ext1 = dy0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext0 += <NUM_LIT:1><EOL>dy_ext0 -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>ysv_ext1 += <NUM_LIT:1><EOL>dy_ext1 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>ysv_ext0 = ysv_ext1 = ysb<EOL>dy_ext0 = dy_ext1 = dy0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c1 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb + <NUM_LIT:1><EOL>dz_ext0 = dz_ext1 = dz0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>if (c1 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext0 += <NUM_LIT:1><EOL>dz_ext0 -= <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>zsv_ext1 += <NUM_LIT:1><EOL>dz_ext1 -= <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>zsv_ext0 = zsv_ext1 = zsb<EOL>dz_ext0 = dz_ext1 = dz0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>if (c1 & <NUM_LIT>) != <NUM_LIT:0>:<EOL><INDENT>wsv_ext0 = wsb + <NUM_LIT:1><EOL>wsv_ext1 = wsb + <NUM_LIT:2><EOL>dw_ext0 = dw0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dw_ext1 = dw0 - <NUM_LIT:2> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>else:<EOL><INDENT>wsv_ext0 = wsv_ext1 = wsb<EOL>dw_ext0 = dw_ext1 = dw0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL><DEDENT>xsv_ext2 = xsb + <NUM_LIT:1><EOL>ysv_ext2 = ysb + <NUM_LIT:1><EOL>zsv_ext2 = zsb + <NUM_LIT:1><EOL>wsv_ext2 = wsb + <NUM_LIT:1><EOL>dx_ext2 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy_ext2 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz_ext2 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw_ext2 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>if (c2 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>xsv_ext2 -= <NUM_LIT:2><EOL>dx_ext2 += <NUM_LIT:2><EOL><DEDENT>elif (c2 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>ysv_ext2 -= <NUM_LIT:2><EOL>dy_ext2 += <NUM_LIT:2><EOL><DEDENT>elif (c2 & <NUM_LIT>) == <NUM_LIT:0>:<EOL><INDENT>zsv_ext2 -= <NUM_LIT:2><EOL>dz_ext2 += <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>wsv_ext2 -= <NUM_LIT:2><EOL>dw_ext2 += <NUM_LIT:2><EOL><DEDENT><DEDENT>dx4 = dx0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dy4 = dy0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dz4 = dz0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dw4 = dw0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>attn4 = <NUM_LIT:2> - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4<EOL>if attn4 > <NUM_LIT:0>:<EOL><INDENT>attn4 *= attn4<EOL>value += attn4 * attn4 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:0>, dx4, dy4, dz4, dw4)<EOL><DEDENT>dx3 = dx4<EOL>dy3 = dy4<EOL>dz3 = dz0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dw3 = dw0 - <NUM_LIT:1> - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>attn3 = <NUM_LIT:2> - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3<EOL>if attn3 > <NUM_LIT:0>:<EOL><INDENT>attn3 *= attn3<EOL>value += attn3 * attn3 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:1>, dx3, dy3, dz3, dw3)<EOL><DEDENT>dx2 = dx4<EOL>dy2 = dy0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dz2 = dz4<EOL>dw2 = dw3<EOL>attn2 = <NUM_LIT:2> - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2<EOL>if attn2 > <NUM_LIT:0>:<EOL><INDENT>attn2 *= attn2<EOL>value += attn2 * attn2 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:1>, dx2, dy2, dz2, dw2)<EOL><DEDENT>dx1 = dx0 - <NUM_LIT:3> * SQUISH_CONSTANT_4D<EOL>dz1 = dz4<EOL>dy1 = dy4<EOL>dw1 = dw3<EOL>attn1 = <NUM_LIT:2> - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1<EOL>if attn1 > <NUM_LIT:0>:<EOL><INDENT>attn1 *= attn1<EOL>value += attn1 * attn1 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:1>, dx1, dy1, dz1, dw1)<EOL><DEDENT>dx5 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy5 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz5 = dz0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw5 = dw0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn5 = <NUM_LIT:2> - dx5 * dx5 - dy5 * dy5 - dz5 * dz5 - dw5 * dw5<EOL>if attn5 > <NUM_LIT:0>:<EOL><INDENT>attn5 *= attn5<EOL>value += attn5 * attn5 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:0>, dx5, dy5, dz5, dw5)<EOL><DEDENT>dx6 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy6 = dy0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz6 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw6 = dw0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn6 = <NUM_LIT:2> - dx6 * dx6 - dy6 * dy6 - dz6 * dz6 - dw6 * dw6<EOL>if attn6 > <NUM_LIT:0>:<EOL><INDENT>attn6 *= attn6<EOL>value += attn6 * attn6 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:0>, dx6, dy6, dz6, dw6)<EOL><DEDENT>dx7 = dx0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy7 = dy0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz7 = dz0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw7 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn7 = <NUM_LIT:2> - dx7 * dx7 - dy7 * dy7 - dz7 * dz7 - dw7 * dw7<EOL>if attn7 > <NUM_LIT:0>:<EOL><INDENT>attn7 *= attn7<EOL>value += attn7 * attn7 * extrapolate(xsb + <NUM_LIT:1>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:1>, dx7, dy7, dz7, dw7)<EOL><DEDENT>dx8 = dx0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy8 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz8 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw8 = dw0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn8 = <NUM_LIT:2> - dx8 * dx8 - dy8 * dy8 - dz8 * dz8 - dw8 * dw8<EOL>if attn8 > <NUM_LIT:0>:<EOL><INDENT>attn8 *= attn8<EOL>value += attn8 * attn8 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:0>, dx8, dy8, dz8, dw8)<EOL><DEDENT>dx9 = dx0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy9 = dy0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz9 = dz0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw9 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn9 = <NUM_LIT:2> - dx9 * dx9 - dy9 * dy9 - dz9 * dz9 - dw9 * dw9<EOL>if attn9 > <NUM_LIT:0>:<EOL><INDENT>attn9 *= attn9<EOL>value += attn9 * attn9 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:1>, zsb + <NUM_LIT:0>, wsb + <NUM_LIT:1>, dx9, dy9, dz9, dw9)<EOL><DEDENT>dx10 = dx0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dy10 = dy0 - <NUM_LIT:0> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dz10 = dz0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>dw10 = dw0 - <NUM_LIT:1> - <NUM_LIT:2> * SQUISH_CONSTANT_4D<EOL>attn10 = <NUM_LIT:2> - dx10 * dx10 - dy10 * dy10 - dz10 * dz10 - dw10 * dw10<EOL>if attn10 > <NUM_LIT:0>:<EOL><INDENT>attn10 *= attn10<EOL>value += attn10 * attn10 * extrapolate(xsb + <NUM_LIT:0>, ysb + <NUM_LIT:0>, zsb + <NUM_LIT:1>, wsb + <NUM_LIT:1>, dx10, dy10, dz10, dw10)<EOL><DEDENT><DEDENT>attn_ext0 = <NUM_LIT:2> - dx_ext0 * dx_ext0 - dy_ext0 * dy_ext0 - dz_ext0 * dz_ext0 - dw_ext0 * dw_ext0<EOL>if attn_ext0 > <NUM_LIT:0>:<EOL><INDENT>attn_ext0 *= attn_ext0<EOL>value += attn_ext0 * attn_ext0 * extrapolate(xsv_ext0, ysv_ext0, zsv_ext0, wsv_ext0, dx_ext0, dy_ext0, dz_ext0, dw_ext0)<EOL><DEDENT>attn_ext1 = <NUM_LIT:2> - dx_ext1 * dx_ext1 - dy_ext1 * dy_ext1 - dz_ext1 * dz_ext1 - dw_ext1 * dw_ext1<EOL>if attn_ext1 > <NUM_LIT:0>:<EOL><INDENT>attn_ext1 *= attn_ext1<EOL>value += attn_ext1 * attn_ext1 * extrapolate(xsv_ext1, ysv_ext1, zsv_ext1, wsv_ext1, dx_ext1, dy_ext1, dz_ext1, dw_ext1)<EOL><DEDENT>attn_ext2 = <NUM_LIT:2> - dx_ext2 * dx_ext2 - dy_ext2 * dy_ext2 - dz_ext2 * dz_ext2 - dw_ext2 * dw_ext2<EOL>if attn_ext2 > <NUM_LIT:0>:<EOL><INDENT>attn_ext2 *= attn_ext2<EOL>value += attn_ext2 * attn_ext2 * extrapolate(xsv_ext2, ysv_ext2, zsv_ext2, wsv_ext2, dx_ext2, dy_ext2, dz_ext2, dw_ext2)<EOL><DEDENT>return value / NORM_CONSTANT_4D<EOL>", "docstring": "Generate 4D OpenSimplex noise from X,Y,Z,W coordinates.", "id": "f16267:c0:m6"}
{"signature": "def iter_items(self,  start_key=None, end_key=None, reverse=False):", "body": "<EOL>if self.is_empty():<EOL><INDENT>return []<EOL><DEDENT>if reverse:<EOL><INDENT>return self._iter_items_backward(start_key, end_key)<EOL><DEDENT>else:<EOL><INDENT>return self._iter_items_forward(start_key, end_key)<EOL><DEDENT>", "docstring": "Iterates over the (key, value) items of the associated tree,\n        in ascending order if reverse is True, iterate in descending order,\n        reverse defaults to False", "id": "f16268:c3:m23"}
{"signature": "def __contains__(self, key):", "body": "try:<EOL><INDENT>self.get_value(key)<EOL>return True<EOL><DEDENT>except KeyError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "k in T -> True if T has a key k, else False", "id": "f16268:c3:m10"}
{"signature": "def pop_max(self):", "body": "item = self.max_item()<EOL>self.remove(item[<NUM_LIT:0>])<EOL>return item<EOL>", "docstring": "T.pop_max() -> (k, v), remove item with maximum key, raise ValueError\n        if T is empty.", "id": "f16268:c3:m19"}
{"signature": "def key_slice(self, start_key, end_key, reverse=False):", "body": "return (k for k, v in self.iter_items(start_key, end_key, reverse=reverse))<EOL>", "docstring": "T.key_slice(start_key, end_key) -> key iterator:\n        start_key <= key < end_key.\n\n        Yields keys in ascending order if reverse is False else in descending order.", "id": "f16268:c3:m22"}
{"signature": "def succ_item(self, key, default=_sentinel):", "body": "<EOL>node = self._root<EOL>succ_node = None<EOL>while node is not None:<EOL><INDENT>cmp = self._cmp(self._cmp_data, key, node.key)<EOL>if cmp == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>elif cmp < <NUM_LIT:0>:<EOL><INDENT>if (succ_node is None) or self._cmp(self._cmp_data, node.key, succ_node.key) < <NUM_LIT:0>:<EOL><INDENT>succ_node = node<EOL><DEDENT>node = node.left<EOL><DEDENT>else:<EOL><INDENT>node = node.right<EOL><DEDENT><DEDENT>if node is None:  <EOL><INDENT>if default is _sentinel:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>return default<EOL><DEDENT>if node.right is not None:<EOL><INDENT>node = node.right<EOL>while node.left is not None:<EOL><INDENT>node = node.left<EOL><DEDENT>if succ_node is None:<EOL><INDENT>succ_node = node<EOL><DEDENT>elif self._cmp(self._cmp_data, node.key, succ_node.key) < <NUM_LIT:0>:<EOL><INDENT>succ_node = node<EOL><DEDENT><DEDENT>elif succ_node is None:  <EOL><INDENT>if default is _sentinel:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>return default<EOL><DEDENT>return succ_node.key, succ_node.value<EOL>", "docstring": "Get successor (k,v) pair of key, raises KeyError if key is max key\n        or key does not exist. optimized for pypy.", "id": "f16268:c3:m7"}
{"signature": "def remove(self, key):", "body": "if self._root is None:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>head = Node()  <EOL>node = head<EOL>node.right = self._root<EOL>parent = None<EOL>grand_parent = None<EOL>found = None  <EOL>direction = <NUM_LIT:1><EOL>while node[direction] is not None:<EOL><INDENT>last = direction<EOL>grand_parent = parent<EOL>parent = node<EOL>node = node[direction]<EOL>direction = <NUM_LIT:1> if (self._cmp(self._cmp_data, node.key, key) < <NUM_LIT:0>) else <NUM_LIT:0><EOL>if self._cmp(self._cmp_data, key, node.key) == <NUM_LIT:0>:<EOL><INDENT>found = node<EOL><DEDENT>if not RBTree.is_red(node) and not RBTree.is_red(node[direction]):<EOL><INDENT>if RBTree.is_red(node[<NUM_LIT:1> - direction]):<EOL><INDENT>parent[last] = RBTree.jsw_single(node, direction)<EOL>parent = parent[last]<EOL><DEDENT>elif not RBTree.is_red(node[<NUM_LIT:1> - direction]):<EOL><INDENT>sibling = parent[<NUM_LIT:1> - last]<EOL>if sibling is not None:<EOL><INDENT>if (not RBTree.is_red(sibling[<NUM_LIT:1> - last])) and (not RBTree.is_red(sibling[last])):<EOL><INDENT>parent.red = False<EOL>sibling.red = True<EOL>node.red = True<EOL><DEDENT>else:<EOL><INDENT>direction2 = <NUM_LIT:1> if grand_parent.right is parent else <NUM_LIT:0><EOL>if RBTree.is_red(sibling[last]):<EOL><INDENT>grand_parent[direction2] = RBTree.jsw_double(parent, last)<EOL><DEDENT>elif RBTree.is_red(sibling[<NUM_LIT:1>-last]):<EOL><INDENT>grand_parent[direction2] = RBTree.jsw_single(parent, last)<EOL><DEDENT>grand_parent[direction2].red = True<EOL>node.red = True<EOL>grand_parent[direction2].left.red = False<EOL>grand_parent[direction2].right.red = False<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if found is not None:<EOL><INDENT>found.key = node.key<EOL>found.value = node.value<EOL>parent[int(parent.right is node)] = node[int(node.left is None)]<EOL>node.free()<EOL>self._count -= <NUM_LIT:1><EOL><DEDENT>self._root = head.right<EOL>if self._root is not None:<EOL><INDENT>self._root.red = False<EOL><DEDENT>if not found:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>", "docstring": "T.remove(key) <==> del T[key], remove item <key> from tree.", "id": "f16268:c5:m5"}
{"signature": "def clear(self):", "body": "def _clear(node):<EOL><INDENT>if node is not None:<EOL><INDENT>_clear(node.left)<EOL>_clear(node.right)<EOL>node.free()<EOL><DEDENT><DEDENT>_clear(self._root)<EOL>self._count = <NUM_LIT:0><EOL>self._root = None<EOL>", "docstring": "T.clear() -> None.  Remove all items from T.", "id": "f16268:c3:m1"}
{"signature": "def poll(self):", "body": "assert(len(self.events_scan) != <NUM_LIT:0>)<EOL>p, events_current = self.events_scan.pop_min()<EOL>return p, events_current<EOL>", "docstring": "Get, and remove, the first (lowest) item from this queue.\n\n:return: the first (lowest) item from this queue.\n:rtype: Point, Event pair.", "id": "f16268:c2:m2"}
{"signature": "def prev_item(self, key, default=_sentinel):", "body": "<EOL>node = self._root<EOL>prev_node = None<EOL>while node is not None:<EOL><INDENT>cmp = self._cmp(self._cmp_data, key, node.key)<EOL>if cmp == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>elif cmp < <NUM_LIT:0>:<EOL><INDENT>node = node.left<EOL><DEDENT>else:<EOL><INDENT>if (prev_node is None) or self._cmp(self._cmp_data, prev_node.key, node.key) < <NUM_LIT:0>:<EOL><INDENT>prev_node = node<EOL><DEDENT>node = node.right<EOL><DEDENT><DEDENT>if node is None:  <EOL><INDENT>if default is _sentinel:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>return default<EOL><DEDENT>if node.left is not None:<EOL><INDENT>node = node.left<EOL>while node.right is not None:<EOL><INDENT>node = node.right<EOL><DEDENT>if prev_node is None:<EOL><INDENT>prev_node = node<EOL><DEDENT>elif self._cmp(self._cmp_data, prev_node.key, node.key) < <NUM_LIT:0>:<EOL><INDENT>prev_node = node<EOL><DEDENT><DEDENT>elif prev_node is None:  <EOL><INDENT>if default is _sentinel:<EOL><INDENT>raise KeyError(str(key))<EOL><DEDENT>return default<EOL><DEDENT>return prev_node.key, prev_node.value<EOL>", "docstring": "Get predecessor (k,v) pair of key, raises KeyError if key is min key\n        or key does not exist. optimized for pypy.", "id": "f16268:c3:m8"}
{"signature": "def max_item(self):", "body": "if self.is_empty():<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>node = self._root<EOL>while node.right is not None:<EOL><INDENT>node = node.right<EOL><DEDENT>return node.key, node.value<EOL>", "docstring": "Get item with max key of tree, raises ValueError if tree is empty.", "id": "f16268:c3:m6"}
{"signature": "def __init__(self, items=None, cmp=None, cmp_data=None):", "body": "self._root = None<EOL>self._count = <NUM_LIT:0><EOL>if cmp is None:<EOL><INDENT>def cmp(cmp_data, a, b):<EOL><INDENT>if a < b:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>elif a > b:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>self._cmp = cmp<EOL>self._cmp_data = cmp_data<EOL>if items is not None:<EOL><INDENT>self.update(items)<EOL><DEDENT>", "docstring": "T.__init__(...) initializes T; see T.__class__.__doc__ for signature", "id": "f16268:c3:m0"}
{"signature": "def is_empty(self):", "body": "return self.count == <NUM_LIT:0><EOL>", "docstring": "T.is_empty() -> False if T contains any items else True", "id": "f16268:c3:m12"}
{"signature": "def __len__(self):", "body": "return self.count<EOL>", "docstring": "T.__len__() <==> len(x)", "id": "f16268:c3:m11"}
{"signature": "def get_intersections_with_segments(self):", "body": "if Real is float:<EOL><INDENT>return [<EOL>(p, [event.segment for event in event_set])<EOL>for p, event_set in self.intersections.items()<EOL>]<EOL><DEDENT>else:<EOL><INDENT>return [<EOL>(<EOL>(float(p[<NUM_LIT:0>]), float(p[<NUM_LIT:1>])),<EOL>[((float(event.segment[<NUM_LIT:0>][<NUM_LIT:0>]), float(event.segment[<NUM_LIT:0>][<NUM_LIT:1>])),<EOL>(float(event.segment[<NUM_LIT:1>][<NUM_LIT:0>]), float(event.segment[<NUM_LIT:1>][<NUM_LIT:1>])))<EOL>for event in event_set],<EOL>)<EOL>for p, event_set in self.intersections.items()<EOL>]<EOL><DEDENT>", "docstring": "Return a list of unordered intersection '(point, segment)' pairs,\nwhere segments may contain 2 or more values.", "id": "f16268:c1:m2"}
{"signature": "def set_default(self, key, default=None):", "body": "try:<EOL><INDENT>return self.get_value(key)<EOL><DEDENT>except KeyError:<EOL><INDENT>self.insert(key, default)<EOL>return default<EOL><DEDENT>", "docstring": "T.set_default(k[,d]) -> T.get(k,d), also set T[k]=d if k not in T", "id": "f16268:c3:m13"}
{"signature": "@property<EOL><INDENT>def count(self):<DEDENT>", "body": "return self._count<EOL>", "docstring": "Get items count.", "id": "f16268:c3:m2"}
{"signature": "def isect_polygon__naive(points) -> list:", "body": "isect = []<EOL>n = len(points)<EOL>if Real is float:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>points = [(Real(p[<NUM_LIT:0>]), Real(p[<NUM_LIT:1>])) for p in points]<EOL><DEDENT>for i in range(n):<EOL><INDENT>a0, a1 = points[i], points[(i + <NUM_LIT:1>) % n]<EOL>for j in range(i + <NUM_LIT:1>, n):<EOL><INDENT>b0, b1 = points[j], points[(j + <NUM_LIT:1>) % n]<EOL>if a0 not in (b0, b1) and a1 not in (b0, b1):<EOL><INDENT>ix = isect_seg_seg_v2_point(a0, a1, b0, b1)<EOL>if ix is not None:<EOL><INDENT>if USE_IGNORE_SEGMENT_ENDINGS:<EOL><INDENT>if ((len_squared_v2v2(ix, a0) < NUM_EPS_SQ or<EOL>len_squared_v2v2(ix, a1) < NUM_EPS_SQ) and<EOL>(len_squared_v2v2(ix, b0) < NUM_EPS_SQ or<EOL>len_squared_v2v2(ix, b1) < NUM_EPS_SQ)):<EOL><INDENT>continue<EOL><DEDENT><DEDENT>isect.append(ix)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return isect<EOL>", "docstring": "Brute force O(n2) version of ``isect_polygon`` for test validation.", "id": "f16268:m13"}
{"signature": "def compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):", "body": "def _make_line(p1, p2):<EOL><INDENT>A = (p1[<NUM_LIT:1>] - p2[<NUM_LIT:1>])<EOL>B = (p2[<NUM_LIT:0>] - p1[<NUM_LIT:0>])<EOL>C = (p1[<NUM_LIT:0>]*p2[<NUM_LIT:1>] - p2[<NUM_LIT:0>]*p1[<NUM_LIT:1>])<EOL>return A, B, -C<EOL><DEDENT>L1 = _make_line((x1, y1), (x2, y2))<EOL>L2 = _make_line((x3, y3), (x4, y4))<EOL>D = L1[<NUM_LIT:0>] * L2[<NUM_LIT:1>] - L1[<NUM_LIT:1>] * L2[<NUM_LIT:0>]<EOL>Dx = L1[<NUM_LIT:2>] * L2[<NUM_LIT:1>] - L1[<NUM_LIT:1>] * L2[<NUM_LIT:2>]<EOL>Dy = L1[<NUM_LIT:0>] * L2[<NUM_LIT:2>] - L1[<NUM_LIT:2>] * L2[<NUM_LIT:0>]<EOL>if D != <NUM_LIT:0>:<EOL><INDENT>x = Dx / D<EOL>y = Dy / D<EOL>return x, y<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Compute the intersection point of two lines.\n\nTaken from https://stackoverflow.com/a/20679579 .\n\nParameters\n----------\nx1 : number\n    x coordinate of the first point on line 1. (The lines extends beyond this point.)\n\ny1 : number\n    y coordinate of the first point on line 1. (The lines extends beyond this point.)\n\nx2 : number\n    x coordinate of the second point on line 1. (The lines extends beyond this point.)\n\ny2 : number\n    y coordinate of the second point on line 1. (The lines extends beyond this point.)\n\nx3 : number\n    x coordinate of the first point on line 2. (The lines extends beyond this point.)\n\ny3 : number\n    y coordinate of the first point on line 2. (The lines extends beyond this point.)\n\nx4 : number\n    x coordinate of the second point on line 2. (The lines extends beyond this point.)\n\ny4 : number\n    y coordinate of the second point on line 2. (The lines extends beyond this point.)\n\nReturns\n-------\ntuple of number or bool\n    The coordinate of the intersection point as a tuple ``(x, y)``.\n    If the lines are parallel (no intersection point or an infinite number of them), the result is False.", "id": "f16269:m31"}
{"signature": "def _quokka_normalize_extract(extract):", "body": "<EOL>from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage<EOL>if extract == \"<STR_LIT>\":<EOL><INDENT>bb = BoundingBox(x1=<NUM_LIT:0>, y1=<NUM_LIT:0>, x2=<NUM_LIT>, y2=<NUM_LIT>)<EOL><DEDENT>elif isinstance(extract, tuple) and len(extract) == <NUM_LIT:4>:<EOL><INDENT>bb = BoundingBox(x1=extract[<NUM_LIT:0>], y1=extract[<NUM_LIT:1>], x2=extract[<NUM_LIT:2>], y2=extract[<NUM_LIT:3>])<EOL><DEDENT>elif isinstance(extract, BoundingBox):<EOL><INDENT>bb = extract<EOL><DEDENT>elif isinstance(extract, BoundingBoxesOnImage):<EOL><INDENT>do_assert(len(extract.bounding_boxes) == <NUM_LIT:1>)<EOL>do_assert(extract.shape[<NUM_LIT:0>:<NUM_LIT:2>] == (<NUM_LIT>, <NUM_LIT>))<EOL>bb = extract.bounding_boxes[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\" % (type(extract),)<EOL>)<EOL><DEDENT>return bb<EOL>", "docstring": "Generate a normalized rectangle to be extract from the standard quokka image.\n\nParameters\n----------\nextract : 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage\n    Unnormalized representation of the image subarea to be extracted.\n\n        * If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)``\n          will be extracted from the image.\n        * If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``\n          and ``y2``.\n        * If a BoundingBox, then that bounding box's area will be extracted from the image.\n        * If a BoundingBoxesOnImage, then expected to contain exactly one bounding box\n          and a shape matching the full image dimensions (i.e. (643, 960, *)). Then the\n          one bounding box will be used similar to BoundingBox.\n\nReturns\n-------\nbb : imgaug.BoundingBox\n    Normalized representation of the area to extract from the standard quokka image.", "id": "f16269:m21"}
{"signature": "def derive_random_state(random_state):", "body": "return derive_random_states(random_state, n=<NUM_LIT:1>)[<NUM_LIT:0>]<EOL>", "docstring": "Create a new random states based on an existing random state or seed.\n\nParameters\n----------\nrandom_state : numpy.random.RandomState\n    Random state or seed from which to derive the new random state.\n\nReturns\n-------\nnumpy.random.RandomState\n    Derived random state.", "id": "f16269:m18"}
{"signature": "def is_generator(val):", "body": "return isinstance(val, types.GeneratorType)<EOL>", "docstring": "Checks whether a variable is a generator.\n\nParameters\n----------\nval\n    The variable to check.\n\nReturns\n-------\nbool\n    True is the variable is a generator. Otherwise False.", "id": "f16269:m10"}
{"signature": "def is_iterable(val):", "body": "return isinstance(val, collections.Iterable)<EOL>", "docstring": "Checks whether a variable is iterable.\n\nParameters\n----------\nval\n    The variable to check.\n\nReturns\n-------\nbool\n    True if the variable is an iterable. Otherwise False.", "id": "f16269:m4"}
{"signature": "def imresize_many_images(images, sizes=None, interpolation=None):", "body": "<EOL>if len(images) == <NUM_LIT:0>:<EOL><INDENT>return images<EOL><DEDENT>do_assert(<EOL>all([image.shape[<NUM_LIT:0>] > <NUM_LIT:0> and image.shape[<NUM_LIT:1>] > <NUM_LIT:0> for image in images]),<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (str([image.shape for image in images]),)<EOL>)<EOL>if is_single_number(sizes) and sizes <= <NUM_LIT:0>:<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\" % (sizes,))<EOL><DEDENT>elif isinstance(sizes, tuple) and (sizes[<NUM_LIT:0>] <= <NUM_LIT:0> or sizes[<NUM_LIT:1>] <= <NUM_LIT:0>):<EOL><INDENT>sizes_str = [<EOL>\"<STR_LIT>\" % (sizes[<NUM_LIT:0>],) if is_single_integer(sizes[<NUM_LIT:0>]) else \"<STR_LIT>\" % (sizes[<NUM_LIT:0>],),<EOL>\"<STR_LIT>\" % (sizes[<NUM_LIT:1>],) if is_single_integer(sizes[<NUM_LIT:1>]) else \"<STR_LIT>\" % (sizes[<NUM_LIT:1>],),<EOL>]<EOL>sizes_str = \"<STR_LIT>\" % (sizes_str[<NUM_LIT:0>], sizes_str[<NUM_LIT:1>])<EOL>raise Exception(<EOL>\"<STR_LIT>\" % (sizes_str,))<EOL><DEDENT>if is_single_number(sizes):<EOL><INDENT>sizes = (sizes, sizes)<EOL><DEDENT>else:<EOL><INDENT>do_assert(len(sizes) == <NUM_LIT:2>, \"<STR_LIT>\" % (len(sizes),))<EOL>do_assert(all([is_single_number(val) for val in sizes]),<EOL>\"<STR_LIT>\" % (str([type(val) for val in sizes]),))<EOL><DEDENT>if isinstance(images, list):<EOL><INDENT>nb_shapes = len(set([image.shape for image in images]))<EOL>if nb_shapes == <NUM_LIT:1>:<EOL><INDENT>return list(imresize_many_images(np.array(images), sizes=sizes, interpolation=interpolation))<EOL><DEDENT>else:<EOL><INDENT>return [imresize_many_images(image[np.newaxis, ...], sizes=sizes, interpolation=interpolation)[<NUM_LIT:0>, ...]<EOL>for image in images]<EOL><DEDENT><DEDENT>shape = images.shape<EOL>do_assert(images.ndim in [<NUM_LIT:3>, <NUM_LIT:4>], \"<STR_LIT>\" % (str(shape),))<EOL>nb_images = shape[<NUM_LIT:0>]<EOL>im_height, im_width = shape[<NUM_LIT:1>], shape[<NUM_LIT:2>]<EOL>nb_channels = shape[<NUM_LIT:3>] if images.ndim > <NUM_LIT:3> else None<EOL>height, width = sizes[<NUM_LIT:0>], sizes[<NUM_LIT:1>]<EOL>height = int(np.round(im_height * height)) if is_single_float(height) else height<EOL>width = int(np.round(im_width * width)) if is_single_float(width) else width<EOL>if height == im_height and width == im_width:<EOL><INDENT>return np.copy(images)<EOL><DEDENT>ip = interpolation<EOL>do_assert(ip is None or ip in IMRESIZE_VALID_INTERPOLATIONS)<EOL>if ip is None:<EOL><INDENT>if height > im_height or width > im_width:<EOL><INDENT>ip = cv2.INTER_AREA<EOL><DEDENT>else:<EOL><INDENT>ip = cv2.INTER_LINEAR<EOL><DEDENT><DEDENT>elif ip in [\"<STR_LIT>\", cv2.INTER_NEAREST]:<EOL><INDENT>ip = cv2.INTER_NEAREST<EOL><DEDENT>elif ip in [\"<STR_LIT>\", cv2.INTER_LINEAR]:<EOL><INDENT>ip = cv2.INTER_LINEAR<EOL><DEDENT>elif ip in [\"<STR_LIT>\", cv2.INTER_AREA]:<EOL><INDENT>ip = cv2.INTER_AREA<EOL><DEDENT>else:  <EOL><INDENT>ip = cv2.INTER_CUBIC<EOL><DEDENT>from . import dtypes as iadt<EOL>if ip == cv2.INTER_NEAREST:<EOL><INDENT>iadt.gate_dtypes(images,<EOL>allowed=[\"<STR_LIT:bool>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>disallowed=[\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>augmenter=None)<EOL><DEDENT>else:<EOL><INDENT>iadt.gate_dtypes(images,<EOL>allowed=[\"<STR_LIT:bool>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>disallowed=[\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>augmenter=None)<EOL><DEDENT>result_shape = (nb_images, height, width)<EOL>if nb_channels is not None:<EOL><INDENT>result_shape = result_shape + (nb_channels,)<EOL><DEDENT>result = np.zeros(result_shape, dtype=images.dtype)<EOL>for i, image in enumerate(images):<EOL><INDENT>input_dtype = image.dtype<EOL>if image.dtype.type == np.bool_:<EOL><INDENT>image = image.astype(np.uint8) * <NUM_LIT:255><EOL><DEDENT>elif image.dtype.type == np.int8 and ip != cv2.INTER_NEAREST:<EOL><INDENT>image = image.astype(np.int16)<EOL><DEDENT>elif image.dtype.type == np.float16:<EOL><INDENT>image = image.astype(np.float32)<EOL><DEDENT>result_img = cv2.resize(image, (width, height), interpolation=ip)<EOL>assert result_img.dtype == image.dtype<EOL>if len(result_img.shape) == <NUM_LIT:2> and nb_channels is not None and nb_channels == <NUM_LIT:1>:<EOL><INDENT>result_img = result_img[:, :, np.newaxis]<EOL><DEDENT>if input_dtype.type == np.bool_:<EOL><INDENT>result_img = result_img > <NUM_LIT><EOL><DEDENT>elif input_dtype.type == np.int8 and ip != cv2.INTER_NEAREST:<EOL><INDENT>from . import dtypes as iadt<EOL>result_img = iadt.restore_dtypes_(result_img, np.int8)<EOL><DEDENT>elif input_dtype.type == np.float16:<EOL><INDENT>from . import dtypes as iadt<EOL>result_img = iadt.restore_dtypes_(result_img, np.float16)<EOL><DEDENT>result[i] = result_img<EOL><DEDENT>return result<EOL>", "docstring": "Resize many images to a specified size.\n\ndtype support::\n\n    * ``uint8``: yes; fully tested\n    * ``uint16``: yes; tested\n    * ``uint32``: no (1)\n    * ``uint64``: no (2)\n    * ``int8``: yes; tested (3)\n    * ``int16``: yes; tested\n    * ``int32``: limited; tested (4)\n    * ``int64``: no (2)\n    * ``float16``: yes; tested (5)\n    * ``float32``: yes; tested\n    * ``float64``: yes; tested\n    * ``float128``: no (1)\n    * ``bool``: yes; tested (6)\n\n    - (1) rejected by ``cv2.imresize``\n    - (2) results too inaccurate\n    - (3) mapped internally to ``int16`` when interpolation!=\"nearest\"\n    - (4) only supported for interpolation=\"nearest\", other interpolations lead to cv2 error\n    - (5) mapped internally to ``float32``\n    - (6) mapped internally to ``uint8``\n\nParameters\n----------\nimages : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray\n    Array of the images to resize.\n    Usually recommended to be of dtype uint8.\n\nsizes : float or iterable of int or iterable of float\n    The new size of the images, given either as a fraction (a single float) or as\n    a ``(height, width)`` tuple of two integers or as a ``(height fraction, width fraction)``\n    tuple of two floats.\n\ninterpolation : None or str or int, optional\n    The interpolation to use during resize.\n    If int, then expected to be one of:\n\n        * ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)\n        * ``cv2.INTER_LINEAR`` (linear interpolation)\n        * ``cv2.INTER_AREA`` (area interpolation)\n        * ``cv2.INTER_CUBIC`` (cubic interpolation)\n\n    If string, then expected to be one of:\n\n        * ``nearest`` (identical to ``cv2.INTER_NEAREST``)\n        * ``linear`` (identical to ``cv2.INTER_LINEAR``)\n        * ``area`` (identical to ``cv2.INTER_AREA``)\n        * ``cubic`` (identical to ``cv2.INTER_CUBIC``)\n\n    If None, the interpolation will be chosen automatically. For size\n    increases, area interpolation will be picked and for size decreases,\n    linear interpolation will be picked.\n\nReturns\n-------\nresult : (N,H',W',[C]) ndarray\n    Array of the resized images.\n\nExamples\n--------\n>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), 2.0)\n\nConverts 2 RGB images of height and width 16 to images of height and width 16*2 = 32.\n\n>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (16, 32))\n\nConverts 2 RGB images of height and width 16 to images of height 16 and width 32.\n\n>>> imresize_many_images(np.zeros((2, 16, 16, 3), dtype=np.uint8), (2.0, 4.0))\n\nConverts 2 RGB images of height and width 16 to images of height 32 and width 64.", "id": "f16269:m33"}
{"signature": "def quokka_bounding_boxes(size=None, extract=None):", "body": "<EOL>from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage<EOL>left, top = <NUM_LIT:0>, <NUM_LIT:0><EOL>if extract is not None:<EOL><INDENT>bb_extract = _quokka_normalize_extract(extract)<EOL>left = bb_extract.x1<EOL>top = bb_extract.y1<EOL><DEDENT>with open(QUOKKA_ANNOTATIONS_FP, \"<STR_LIT:r>\") as f:<EOL><INDENT>json_dict = json.load(f)<EOL><DEDENT>bbs = []<EOL>for bb_dict in json_dict[\"<STR_LIT>\"]:<EOL><INDENT>bbs.append(<EOL>BoundingBox(<EOL>x1=bb_dict[\"<STR_LIT>\"] - left,<EOL>y1=bb_dict[\"<STR_LIT>\"] - top,<EOL>x2=bb_dict[\"<STR_LIT>\"] - left,<EOL>y2=bb_dict[\"<STR_LIT>\"] - top<EOL>)<EOL>)<EOL><DEDENT>if extract is not None:<EOL><INDENT>shape = (bb_extract.height, bb_extract.width, <NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>shape = (<NUM_LIT>, <NUM_LIT>, <NUM_LIT:3>)<EOL><DEDENT>bbsoi = BoundingBoxesOnImage(bbs, shape=shape)<EOL>if size is not None:<EOL><INDENT>shape_resized = _compute_resized_shape(shape, size)<EOL>bbsoi = bbsoi.on(shape_resized)<EOL><DEDENT>return bbsoi<EOL>", "docstring": "Returns example bounding boxes on the standard example quokke image.\n\nCurrently only a single bounding box is returned that covers the quokka.\n\nParameters\n----------\nsize : None or float or tuple of int or tuple of float, optional\n    Size of the output image on which the BBs are placed. If None, then the BBs\n    are not projected to any new size (positions on the original image are used).\n    Floats lead to relative size changes, ints to absolute sizes in pixels.\n\nextract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage\n    Subarea to extract from the image. See :func:`imgaug.quokka`.\n\nReturns\n-------\nbbsoi : imgaug.BoundingBoxesOnImage\n    Example BBs on the quokka image.", "id": "f16269:m28"}
{"signature": "def derive_random_states(random_state, n=<NUM_LIT:1>):", "body": "seed_ = random_state.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>return [new_random_state(seed_+i) for i in sm.xrange(n)]<EOL>", "docstring": "Create N new random states based on an existing random state or seed.\n\nParameters\n----------\nrandom_state : numpy.random.RandomState\n    Random state or seed from which to derive new random states.\n\nn : int, optional\n    Number of random states to derive.\n\nReturns\n-------\nlist of numpy.random.RandomState\n    Derived random states.", "id": "f16269:m19"}
{"signature": "def show_grid(images, rows=None, cols=None):", "body": "grid = draw_grid(images, rows=rows, cols=cols)<EOL>imshow(grid)<EOL>", "docstring": "Converts the input images to a grid image and shows it in a new window.\n\ndtype support::\n\n    minimum of (\n        :func:`imgaug.imgaug.draw_grid`,\n        :func:`imgaug.imgaug.imshow`\n    )\n\nParameters\n----------\nimages : (N,H,W,3) ndarray or iterable of (H,W,3) array\n    See :func:`imgaug.draw_grid`.\n\nrows : None or int, optional\n    See :func:`imgaug.draw_grid`.\n\ncols : None or int, optional\n    See :func:`imgaug.draw_grid`.", "id": "f16269:m42"}
{"signature": "def is_single_number(val):", "body": "return is_single_integer(val) or is_single_float(val)<EOL>", "docstring": "Checks whether a variable is a number, i.e. an integer or float.\n\nParameters\n----------\nval\n    The variable to check.\n\nReturns\n-------\nbool\n    True if the variable is a number. Otherwise False.", "id": "f16269:m3"}
{"signature": "def seed(seedval):", "body": "CURRENT_RANDOM_STATE.seed(seedval)<EOL>", "docstring": "Set the seed used by the global random state and thereby all randomness\nin the library.\n\nThis random state is by default by all augmenters. Under special\ncircumstances (e.g. when an augmenter is switched to deterministic mode),\nthe global random state is replaced by another -- local -- one.\nThe replacement is dependent on the global random state.\n\nParameters\n----------\nseedval : int\n    The seed to use.", "id": "f16269:m13"}
{"signature": "def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):", "body": "do_assert(backend in [\"<STR_LIT>\", \"<STR_LIT>\"], \"<STR_LIT>\" % (backend,))<EOL>if backend == \"<STR_LIT>\":<EOL><INDENT>image_bgr = image<EOL>if image.ndim == <NUM_LIT:3> and image.shape[<NUM_LIT:2>] in [<NUM_LIT:3>, <NUM_LIT:4>]:<EOL><INDENT>image_bgr = image[..., <NUM_LIT:0>:<NUM_LIT:3>][..., ::-<NUM_LIT:1>]<EOL><DEDENT>win_name = \"<STR_LIT>\"<EOL>cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)<EOL>cv2.imshow(win_name, image_bgr)<EOL>cv2.waitKey(<NUM_LIT:0>)<EOL>cv2.destroyWindow(win_name)<EOL><DEDENT>else:<EOL><INDENT>import matplotlib.pyplot as plt<EOL>dpi = <NUM_LIT><EOL>h, w = image.shape[<NUM_LIT:0>] / dpi, image.shape[<NUM_LIT:1>] / dpi<EOL>w = max(w, <NUM_LIT:6>)  <EOL>fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)<EOL>fig.canvas.set_window_title(\"<STR_LIT>\" % (image.shape,))<EOL>ax.imshow(image, cmap=\"<STR_LIT>\")  <EOL>plt.show()<EOL><DEDENT>", "docstring": "Shows an image in a window.\n\ndtype support::\n\n    * ``uint8``: yes; not tested\n    * ``uint16``: ?\n    * ``uint32``: ?\n    * ``uint64``: ?\n    * ``int8``: ?\n    * ``int16``: ?\n    * ``int32``: ?\n    * ``int64``: ?\n    * ``float16``: ?\n    * ``float32``: ?\n    * ``float64``: ?\n    * ``float128``: ?\n    * ``bool``: ?\n\nParameters\n----------\nimage : (H,W,3) ndarray\n    Image to show.\n\nbackend : {'matplotlib', 'cv2'}, optional\n    Library to use to show the image. May be either matplotlib or OpenCV ('cv2').\n    OpenCV tends to be faster, but apparently causes more technical issues.", "id": "f16269:m43"}
{"signature": "def _compute_resized_shape(from_shape, to_shape):", "body": "if is_np_array(from_shape):<EOL><INDENT>from_shape = from_shape.shape<EOL><DEDENT>if is_np_array(to_shape):<EOL><INDENT>to_shape = to_shape.shape<EOL><DEDENT>to_shape_computed = list(from_shape)<EOL>if to_shape is None:<EOL><INDENT>pass<EOL><DEDENT>elif isinstance(to_shape, tuple):<EOL><INDENT>do_assert(len(from_shape) in [<NUM_LIT:2>, <NUM_LIT:3>])<EOL>do_assert(len(to_shape) in [<NUM_LIT:2>, <NUM_LIT:3>])<EOL>if len(from_shape) == <NUM_LIT:3> and len(to_shape) == <NUM_LIT:3>:<EOL><INDENT>do_assert(from_shape[<NUM_LIT:2>] == to_shape[<NUM_LIT:2>])<EOL><DEDENT>elif len(to_shape) == <NUM_LIT:3>:<EOL><INDENT>to_shape_computed.append(to_shape[<NUM_LIT:2>])<EOL><DEDENT>do_assert(all([v is None or is_single_number(v) for v in to_shape[<NUM_LIT:0>:<NUM_LIT:2>]]),<EOL>\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\" % (str([type(v) for v in to_shape[<NUM_LIT:0>:<NUM_LIT:2>]]),))<EOL>for i, from_shape_i in enumerate(from_shape[<NUM_LIT:0>:<NUM_LIT:2>]):<EOL><INDENT>if to_shape[i] is None:<EOL><INDENT>to_shape_computed[i] = from_shape_i<EOL><DEDENT>elif is_single_integer(to_shape[i]):<EOL><INDENT>to_shape_computed[i] = to_shape[i]<EOL><DEDENT>else:  <EOL><INDENT>to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))<EOL><DEDENT><DEDENT><DEDENT>elif is_single_integer(to_shape) or is_single_float(to_shape):<EOL><INDENT>to_shape_computed = _compute_resized_shape(from_shape, (to_shape, to_shape))<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\" % (type(to_shape),))<EOL><DEDENT>return tuple(to_shape_computed)<EOL>", "docstring": "Computes the intended new shape of an image-like array after resizing.\n\nParameters\n----------\nfrom_shape : tuple or ndarray\n    Old shape of the array. Usually expected to be a tuple of form ``(H, W)`` or ``(H, W, C)`` or\n    alternatively an array with two or three dimensions.\n\nto_shape : None or tuple of ints or tuple of floats or int or float or ndarray\n    New shape of the array.\n\n        * If None, then `from_shape` will be used as the new shape.\n        * If an int ``V``, then the new shape will be ``(V, V, [C])``, where ``C`` will be added if it\n          is part of `from_shape`.\n        * If a float ``V``, then the new shape will be ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old\n          height/width.\n        * If a tuple ``(H', W', [C'])`` of ints, then ``H'`` and ``W'`` will be used as the new height\n          and width.\n        * If a tuple ``(H', W', [C'])`` of floats (except ``C``), then ``H'`` and ``W'`` will\n          be used as the new height and width.\n        * If a numpy array, then the array's shape will be used.\n\nReturns\n-------\nto_shape_computed : tuple of int\n    New shape.", "id": "f16269:m22"}
{"signature": "def quokka(size=None, extract=None):", "body": "img = imageio.imread(QUOKKA_FP, pilmode=\"<STR_LIT>\")<EOL>if extract is not None:<EOL><INDENT>bb = _quokka_normalize_extract(extract)<EOL>img = bb.extract_from_image(img)<EOL><DEDENT>if size is not None:<EOL><INDENT>shape_resized = _compute_resized_shape(img.shape, size)<EOL>img = imresize_single_image(img, shape_resized[<NUM_LIT:0>:<NUM_LIT:2>])<EOL><DEDENT>return img<EOL>", "docstring": "Returns an image of a quokka as a numpy array.\n\nParameters\n----------\nsize : None or float or tuple of int, optional\n    Size of the output image. Input into :func:`imgaug.imgaug.imresize_single_image`.\n    Usually expected to be a tuple ``(H, W)``, where ``H`` is the desired height\n    and ``W`` is the width. If None, then the image will not be resized.\n\nextract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage\n    Subarea of the quokka image to extract:\n\n        * If None, then the whole image will be used.\n        * If string ``square``, then a squared area ``(x: 0 to max 643, y: 0 to max 643)`` will\n          be extracted from the image.\n        * If a tuple, then expected to contain four numbers denoting ``x1``, ``y1``, ``x2``\n          and ``y2``.\n        * If a BoundingBox, then that bounding box's area will be extracted from the image.\n        * If a BoundingBoxesOnImage, then expected to contain exactly one bounding box\n          and a shape matching the full image dimensions (i.e. ``(643, 960, *)``). Then the\n          one bounding box will be used similar to BoundingBox.\n\nReturns\n-------\nimg : (H,W,3) ndarray\n    The image array of dtype uint8.", "id": "f16269:m23"}
{"signature": "def is_activated(self, images, augmenter, parents, default):", "body": "if self.activator is None:<EOL><INDENT>return default<EOL><DEDENT>else:<EOL><INDENT>return self.activator(images, augmenter, parents, default)<EOL><DEDENT>", "docstring": "Returns whether an augmenter may be executed.\n\nReturns\n-------\nbool\n    If True, the augmenter may be executed. If False, it may not be executed.", "id": "f16269:c0:m1"}
{"signature": "def new_random_state(seed=None, fully_random=False):", "body": "if seed is None:<EOL><INDENT>if not fully_random:<EOL><INDENT>seed = CURRENT_RANDOM_STATE.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>return np.random.RandomState(seed)<EOL>", "docstring": "Returns a new random state.\n\nParameters\n----------\nseed : None or int, optional\n    Optional seed value to use.\n    The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.\n\nfully_random : bool, optional\n    Whether to use numpy's random initialization for the\n    RandomState (used if set to True). If False, a seed is sampled from\n    the global random state, which is a bit faster and hence the default.\n\nReturns\n-------\nnumpy.random.RandomState\n    The new random state.", "id": "f16269:m15"}
{"signature": "def is_integer_array(val):", "body": "return is_np_array(val) and issubclass(val.dtype.type, np.integer)<EOL>", "docstring": "Checks whether a variable is a numpy integer array.\n\nParameters\n----------\nval\n    The variable to check.\n\nReturns\n-------\nbool\n    True if the variable is a numpy integer array. Otherwise False.", "id": "f16269:m7"}
{"signature": "def caller_name():", "body": "return sys._getframe(<NUM_LIT:1>).f_code.co_name<EOL>", "docstring": "Returns the name of the caller, e.g. a function.\n\nReturns\n-------\nstr\n    The name of the caller as a string", "id": "f16269:m12"}
{"signature": "def pad(arr, top=<NUM_LIT:0>, right=<NUM_LIT:0>, bottom=<NUM_LIT:0>, left=<NUM_LIT:0>, mode=\"<STR_LIT>\", cval=<NUM_LIT:0>):", "body": "do_assert(arr.ndim in [<NUM_LIT:2>, <NUM_LIT:3>])<EOL>do_assert(top >= <NUM_LIT:0>)<EOL>do_assert(right >= <NUM_LIT:0>)<EOL>do_assert(bottom >= <NUM_LIT:0>)<EOL>do_assert(left >= <NUM_LIT:0>)<EOL>if top > <NUM_LIT:0> or right > <NUM_LIT:0> or bottom > <NUM_LIT:0> or left > <NUM_LIT:0>:<EOL><INDENT>mapping_mode_np_to_cv2 = {<EOL>\"<STR_LIT>\": cv2.BORDER_CONSTANT,<EOL>\"<STR_LIT>\": cv2.BORDER_REPLICATE,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": None,<EOL>\"<STR_LIT>\": cv2.BORDER_REFLECT_101,<EOL>\"<STR_LIT>\": cv2.BORDER_REFLECT,<EOL>\"<STR_LIT>\": None,<EOL>cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,<EOL>cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,<EOL>cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,<EOL>cv2.BORDER_REFLECT: cv2.BORDER_REFLECT<EOL>}<EOL>bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None<EOL>bad_datatype_cv2 = arr.dtype.name in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:bool>\"]<EOL>if not bad_datatype_cv2 and not bad_mode_cv2:<EOL><INDENT>cval = float(cval) if arr.dtype.kind == \"<STR_LIT:f>\" else int(cval)  <EOL>if arr.ndim == <NUM_LIT:2> or arr.shape[<NUM_LIT:2>] <= <NUM_LIT:4>:<EOL><INDENT>if arr.ndim == <NUM_LIT:3>:<EOL><INDENT>cval = tuple([cval] * arr.shape[<NUM_LIT:2>])<EOL><DEDENT>arr_pad = cv2.copyMakeBorder(arr, top=top, bottom=bottom, left=left, right=right,<EOL>borderType=mapping_mode_np_to_cv2[mode], value=cval)<EOL>if arr.ndim == <NUM_LIT:3> and arr_pad.ndim == <NUM_LIT:2>:<EOL><INDENT>arr_pad = arr_pad[..., np.newaxis]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>result = []<EOL>channel_start_idx = <NUM_LIT:0><EOL>while channel_start_idx < arr.shape[<NUM_LIT:2>]:<EOL><INDENT>arr_c = arr[..., channel_start_idx:channel_start_idx+<NUM_LIT:4>]<EOL>cval_c = tuple([cval] * arr_c.shape[<NUM_LIT:2>])<EOL>arr_pad_c = cv2.copyMakeBorder(arr_c, top=top, bottom=bottom, left=left, right=right,<EOL>borderType=mapping_mode_np_to_cv2[mode], value=cval_c)<EOL>arr_pad_c = np.atleast_3d(arr_pad_c)<EOL>result.append(arr_pad_c)<EOL>channel_start_idx += <NUM_LIT:4><EOL><DEDENT>arr_pad = np.concatenate(result, axis=<NUM_LIT:2>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>paddings_np = [(top, bottom), (left, right)]  <EOL>if arr.ndim == <NUM_LIT:3>:<EOL><INDENT>paddings_np.append((<NUM_LIT:0>, <NUM_LIT:0>))  <EOL><DEDENT>if mode == \"<STR_LIT>\":<EOL><INDENT>arr_pad = np.pad(arr, paddings_np, mode=mode, constant_values=cval)<EOL><DEDENT>elif mode == \"<STR_LIT>\":<EOL><INDENT>arr_pad = np.pad(arr, paddings_np, mode=mode, end_values=cval)<EOL><DEDENT>else:<EOL><INDENT>arr_pad = np.pad(arr, paddings_np, mode=mode)<EOL><DEDENT><DEDENT>return arr_pad<EOL><DEDENT>return np.copy(arr)<EOL>", "docstring": "Pad an image-like array on its top/right/bottom/left side.\n\nThis function is a wrapper around :func:`numpy.pad`.\n\ndtype support::\n\n    * ``uint8``: yes; fully tested (1)\n    * ``uint16``: yes; fully tested (1)\n    * ``uint32``: yes; fully tested (2) (3)\n    * ``uint64``: yes; fully tested (2) (3)\n    * ``int8``: yes; fully tested (1)\n    * ``int16``: yes; fully tested (1)\n    * ``int32``: yes; fully tested (1)\n    * ``int64``: yes; fully tested (2) (3)\n    * ``float16``: yes; fully tested (2) (3)\n    * ``float32``: yes; fully tested (1)\n    * ``float64``: yes; fully tested (1)\n    * ``float128``: yes; fully tested (2) (3)\n    * ``bool``: yes; tested (2) (3)\n\n    - (1) Uses ``cv2`` if `mode` is one of: ``\"constant\"``, ``\"edge\"``, ``\"reflect\"``, ``\"symmetric\"``.\n          Otherwise uses ``numpy``.\n    - (2) Uses ``numpy``.\n    - (3) Rejected by ``cv2``.\n\nParameters\n----------\narr : (H,W) ndarray or (H,W,C) ndarray\n    Image-like array to pad.\n\ntop : int, optional\n    Amount of pixels to add at the top side of the image. Must be 0 or greater.\n\nright : int, optional\n    Amount of pixels to add at the right side of the image. Must be 0 or greater.\n\nbottom : int, optional\n    Amount of pixels to add at the bottom side of the image. Must be 0 or greater.\n\nleft : int, optional\n    Amount of pixels to add at the left side of the image. Must be 0 or greater.\n\nmode : str, optional\n    Padding mode to use. See :func:`numpy.pad` for details.\n    In case of mode ``constant``, the parameter `cval` will be used as the ``constant_values``\n    parameter to :func:`numpy.pad`.\n    In case of mode ``linear_ramp``, the parameter `cval` will be used as the ``end_values``\n    parameter to :func:`numpy.pad`.\n\ncval : number, optional\n    Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.\n    The cval is expected to match the input array's dtype and value range.\n\nReturns\n-------\narr_pad : (H',W') ndarray or (H',W',C) ndarray\n    Padded array with height ``H'=H+top+bottom`` and width ``W'=W+left+right``.", "id": "f16269:m35"}
{"signature": "def draw_grid(images, rows=None, cols=None):", "body": "nb_images = len(images)<EOL>do_assert(nb_images > <NUM_LIT:0>)<EOL>if is_np_array(images):<EOL><INDENT>do_assert(images.ndim == <NUM_LIT:4>)<EOL><DEDENT>else:<EOL><INDENT>do_assert(is_iterable(images) and is_np_array(images[<NUM_LIT:0>]) and images[<NUM_LIT:0>].ndim == <NUM_LIT:3>)<EOL>dts = [image.dtype.name for image in images]<EOL>nb_dtypes = len(set(dts))<EOL>do_assert(nb_dtypes == <NUM_LIT:1>, (\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (nb_dtypes, \"<STR_LIT:U+002CU+0020>\".join(dts)))<EOL><DEDENT>cell_height = max([image.shape[<NUM_LIT:0>] for image in images])<EOL>cell_width = max([image.shape[<NUM_LIT:1>] for image in images])<EOL>channels = set([image.shape[<NUM_LIT:2>] for image in images])<EOL>do_assert(<EOL>len(channels) == <NUM_LIT:1>,<EOL>\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\" % (str(channels), len(channels))<EOL>)<EOL>nb_channels = list(channels)[<NUM_LIT:0>]<EOL>if rows is None and cols is None:<EOL><INDENT>rows = cols = int(math.ceil(math.sqrt(nb_images)))<EOL><DEDENT>elif rows is not None:<EOL><INDENT>cols = int(math.ceil(nb_images / rows))<EOL><DEDENT>elif cols is not None:<EOL><INDENT>rows = int(math.ceil(nb_images / cols))<EOL><DEDENT>do_assert(rows * cols >= nb_images)<EOL>width = cell_width * cols<EOL>height = cell_height * rows<EOL>dt = images.dtype if is_np_array(images) else images[<NUM_LIT:0>].dtype<EOL>grid = np.zeros((height, width, nb_channels), dtype=dt)<EOL>cell_idx = <NUM_LIT:0><EOL>for row_idx in sm.xrange(rows):<EOL><INDENT>for col_idx in sm.xrange(cols):<EOL><INDENT>if cell_idx < nb_images:<EOL><INDENT>image = images[cell_idx]<EOL>cell_y1 = cell_height * row_idx<EOL>cell_y2 = cell_y1 + image.shape[<NUM_LIT:0>]<EOL>cell_x1 = cell_width * col_idx<EOL>cell_x2 = cell_x1 + image.shape[<NUM_LIT:1>]<EOL>grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image<EOL><DEDENT>cell_idx += <NUM_LIT:1><EOL><DEDENT><DEDENT>return grid<EOL>", "docstring": "Converts multiple input images into a single image showing them in a grid.\n\ndtype support::\n\n    * ``uint8``: yes; fully tested\n    * ``uint16``: yes; fully tested\n    * ``uint32``: yes; fully tested\n    * ``uint64``: yes; fully tested\n    * ``int8``: yes; fully tested\n    * ``int16``: yes; fully tested\n    * ``int32``: yes; fully tested\n    * ``int64``: yes; fully tested\n    * ``float16``: yes; fully tested\n    * ``float32``: yes; fully tested\n    * ``float64``: yes; fully tested\n    * ``float128``: yes; fully tested\n    * ``bool``: yes; fully tested\n\nParameters\n----------\nimages : (N,H,W,3) ndarray or iterable of (H,W,3) array\n    The input images to convert to a grid.\n\nrows : None or int, optional\n    The number of rows to show in the grid.\n    If None, it will be automatically derived.\n\ncols : None or int, optional\n    The number of cols to show in the grid.\n    If None, it will be automatically derived.\n\nReturns\n-------\ngrid : (H',W',3) ndarray\n    Image of the generated grid.", "id": "f16269:m41"}
{"signature": "def is_single_bool(val):", "body": "return type(val) == type(True)<EOL>", "docstring": "Checks whether a variable is a boolean.\n\nParameters\n----------\nval\n    The variable to check.\n\nReturns\n-------\nbool\n    True if the variable is a boolean. Otherwise False.", "id": "f16269:m6"}
{"signature": "def pad_to_aspect_ratio(arr, aspect_ratio, mode=\"<STR_LIT>\", cval=<NUM_LIT:0>, return_pad_amounts=False):", "body": "pad_top, pad_right, pad_bottom, pad_left = compute_paddings_for_aspect_ratio(arr, aspect_ratio)<EOL>arr_padded = pad(<EOL>arr,<EOL>top=pad_top,<EOL>right=pad_right,<EOL>bottom=pad_bottom,<EOL>left=pad_left,<EOL>mode=mode,<EOL>cval=cval<EOL>)<EOL>if return_pad_amounts:<EOL><INDENT>return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)<EOL><DEDENT>else:<EOL><INDENT>return arr_padded<EOL><DEDENT>", "docstring": "Pad an image-like array on its sides so that it matches a target aspect ratio.\n\nDepending on which dimension is smaller (height or width), only the corresponding\nsides (left/right or top/bottom) will be padded. In each case, both of the sides will\nbe padded equally.\n\ndtype support::\n\n    See :func:`imgaug.imgaug.pad`.\n\nParameters\n----------\narr : (H,W) ndarray or (H,W,C) ndarray\n    Image-like array to pad.\n\naspect_ratio : float\n    Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice\n    as much width as height.\n\nmode : str, optional\n    Padding mode to use. See :func:`numpy.pad` for details.\n\ncval : number, optional\n    Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.\n\nreturn_pad_amounts : bool, optional\n    If False, then only the padded image will be returned. If True, a tuple with two\n    entries will be returned, where the first entry is the padded image and the second\n    entry are the amounts by which each image side was padded. These amounts are again a\n    tuple of the form (top, right, bottom, left), with each value being an integer.\n\nReturns\n-------\narr_padded : (H',W') ndarray or (H',W',C) ndarray\n    Padded image as (H',W') or (H',W',C) ndarray, fulfulling the given aspect_ratio.\n\ntuple of int\n    Amounts by which the image was padded on each side, given as a tuple ``(top, right, bottom, left)``.\n    This tuple is only returned if `return_pad_amounts` was set to True.\n    Otherwise only ``arr_padded`` is returned.", "id": "f16269:m37"}
{"signature": "def _augment_images_worker(self, augseq, queue_source, queue_result, seedval):", "body": "np.random.seed(seedval)<EOL>random.seed(seedval)<EOL>augseq.reseed(seedval)<EOL>ia.seed(seedval)<EOL>loader_finished = False<EOL>while not loader_finished:<EOL><INDENT>try:<EOL><INDENT>batch_str = queue_source.get(timeout=<NUM_LIT:0.1>)<EOL>batch = pickle.loads(batch_str)<EOL>if batch is None:<EOL><INDENT>loader_finished = True<EOL>queue_source.put(pickle.dumps(None, protocol=-<NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>batch_aug = augseq.augment_batch(batch)<EOL>batch_str = pickle.dumps(batch_aug, protocol=-<NUM_LIT:1>)<EOL>queue_result.put(batch_str)<EOL><DEDENT><DEDENT>except QueueEmpty:<EOL><INDENT>time.sleep(<NUM_LIT>)<EOL><DEDENT><DEDENT>queue_result.put(pickle.dumps(None, protocol=-<NUM_LIT:1>))<EOL>time.sleep(<NUM_LIT>)<EOL>", "docstring": "Augment endlessly images in the source queue.\n\nThis is a worker function for that endlessly queries the source queue (input batches),\naugments batches in it and sends the result to the output queue.", "id": "f16272:c2:m3"}
{"signature": "def imap_batches_unordered(self, batches, chunksize=<NUM_LIT:1>):", "body": "assert ia.is_generator(batches), (\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (type(batches),)<EOL>gen = self.pool.imap_unordered(_Pool_starworker, self._handle_batch_ids_gen(batches), chunksize=chunksize)<EOL>for batch in gen:<EOL><INDENT>yield batch<EOL><DEDENT>", "docstring": "Augment batches from a generator in a way that does not guarantee to preserve order.\n\nParameters\n----------\nbatches : generator of imgaug.augmentables.batches.Batch\n    The batches to augment, provided as a generator. Each call to the generator should yield exactly one\n    batch.\n\nchunksize : None or int, optional\n    Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve\n    performance.\n\nYields\n------\nimgaug.augmentables.batches.Batch\n    Augmented batch.", "id": "f16272:c0:m5"}
{"signature": "def map_batches_async(self, batches, chunksize=None, callback=None, error_callback=None):", "body": "assert isinstance(batches, list), (\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (type(batches),)<EOL>return self.pool.map_async(_Pool_starworker, self._handle_batch_ids(batches),<EOL>chunksize=chunksize, callback=callback, error_callback=error_callback)<EOL>", "docstring": "Augment batches asynchonously.\n\nParameters\n----------\nbatches : list of imgaug.augmentables.batches.Batch\n    The batches to augment.\n\nchunksize : None or int, optional\n    Rough indicator of how many tasks should be sent to each worker. Increasing this number can improve\n    performance.\n\ncallback : None or callable, optional\n    Function to call upon finish. See `multiprocessing.Pool`.\n\nerror_callback : None or callable, optional\n    Function to call upon errors. See `multiprocessing.Pool`.\n\nReturns\n-------\nmultiprocessing.MapResult\n    Asynchonous result. See `multiprocessing.Pool`.", "id": "f16272:c0:m3"}
{"signature": "def join(self):", "body": "if self._pool is not None:<EOL><INDENT>self._pool.join()<EOL><DEDENT>", "docstring": "Wait for the workers to exit.\n\nThis may only be called after calling :func:`imgaug.multicore.Pool.join` or\n:func:`imgaug.multicore.Pool.terminate`.", "id": "f16272:c0:m10"}
{"signature": "def terminate(self):", "body": "if not self.join_signal.is_set():<EOL><INDENT>self.join_signal.set()<EOL><DEDENT>time.sleep(<NUM_LIT>)<EOL>if self.main_worker_thread.is_alive():<EOL><INDENT>self.main_worker_thread.join()<EOL><DEDENT>if self.threaded:<EOL><INDENT>for worker in self.workers:<EOL><INDENT>if worker.is_alive():<EOL><INDENT>worker.join()<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for worker in self.workers:<EOL><INDENT>if worker.is_alive():<EOL><INDENT>worker.terminate()<EOL>worker.join()<EOL><DEDENT><DEDENT>while not self.all_finished():<EOL><INDENT>time.sleep(<NUM_LIT>)<EOL><DEDENT><DEDENT>if self.queue.full():<EOL><INDENT>self.queue.get()<EOL><DEDENT>self.queue.put(pickle.dumps(None, protocol=-<NUM_LIT:1>))<EOL>time.sleep(<NUM_LIT>)<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>self._queue_internal.get(timeout=<NUM_LIT>)<EOL><DEDENT>except QueueEmpty:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if not self._queue_internal._closed:<EOL><INDENT>self._queue_internal.close()<EOL><DEDENT>if not self.queue._closed:<EOL><INDENT>self.queue.close()<EOL><DEDENT>self._queue_internal.join_thread()<EOL>self.queue.join_thread()<EOL>time.sleep(<NUM_LIT>)<EOL>", "docstring": "Stop all workers.", "id": "f16272:c1:m5"}
{"signature": "@property<EOL><INDENT>def pool(self):<DEDENT>", "body": "if self._pool is None:<EOL><INDENT>processes = self.processes<EOL>if processes is not None and processes < <NUM_LIT:0>:<EOL><INDENT>try:<EOL><INDENT>processes = multiprocessing.cpu_count() - abs(processes)<EOL>processes = max(processes, <NUM_LIT:1>)<EOL><DEDENT>except (ImportError, NotImplementedError):<EOL><INDENT>processes = None<EOL><DEDENT><DEDENT>self._pool = multiprocessing.Pool(processes,<EOL>initializer=_Pool_initialize_worker,<EOL>initargs=(self.augseq, self.seed),<EOL>maxtasksperchild=self.maxtasksperchild)<EOL><DEDENT>return self._pool<EOL>", "docstring": "Return the multiprocessing.Pool instance or create it if not done yet.\n\n        Returns\n        -------\n        multiprocessing.Pool\n            The multiprocessing.Pool used internally by this imgaug.multicore.Pool.", "id": "f16272:c0:m1"}
{"signature": "def generate_similar_points_manhattan(self, nb_steps, step_size, return_array=False):", "body": "<EOL>points = np.zeros((nb_steps + <NUM_LIT:1> + nb_steps + <NUM_LIT:2>*(nb_steps**<NUM_LIT:2>), <NUM_LIT:2>), dtype=np.float32)<EOL>yy = np.linspace(self.y - nb_steps * step_size, self.y + nb_steps * step_size, nb_steps + <NUM_LIT:1> + nb_steps)<EOL>width = <NUM_LIT:1><EOL>nth_point = <NUM_LIT:0><EOL>for i_y, y in enumerate(yy):<EOL><INDENT>if width == <NUM_LIT:1>:<EOL><INDENT>xx = [self.x]<EOL><DEDENT>else:<EOL><INDENT>xx = np.linspace(self.x - (width-<NUM_LIT:1>)//<NUM_LIT:2> * step_size, self.x + (width-<NUM_LIT:1>)//<NUM_LIT:2> * step_size, width)<EOL><DEDENT>for x in xx:<EOL><INDENT>points[nth_point] = [x, y]<EOL>nth_point += <NUM_LIT:1><EOL><DEDENT>if i_y < nb_steps:<EOL><INDENT>width += <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>width -= <NUM_LIT:2><EOL><DEDENT><DEDENT>if return_array:<EOL><INDENT>return points<EOL><DEDENT>return [self.deepcopy(x=points[i, <NUM_LIT:0>], y=points[i, <NUM_LIT:1>]) for i in sm.xrange(points.shape[<NUM_LIT:0>])]<EOL>", "docstring": "Generate nearby points to this keypoint based on manhattan distance.\n\nTo generate the first neighbouring points, a distance of S (step size) is moved from the\ncenter point (this keypoint) to the top, right, bottom and left, resulting in four new\npoints. From these new points, the pattern is repeated. Overlapping points are ignored.\n\nThe resulting points have a shape similar to a square rotated by 45 degrees.\n\nParameters\n----------\nnb_steps : int\n    The number of steps to move from the center point. nb_steps=1 results in a total of\n    5 output points (1 center point + 4 neighbours).\n\nstep_size : number\n    The step size to move from every point to its neighbours.\n\nreturn_array : bool, optional\n    Whether to return the generated points as a list of keypoints or an array\n    of shape ``(N,2)``, where ``N`` is the number of generated points and the second axis contains\n    the x- (first value) and y- (second value) coordinates.\n\nReturns\n-------\npoints : list of imgaug.Keypoint or (N,2) ndarray\n    If return_array was False, then a list of Keypoint.\n    Otherwise a numpy array of shape ``(N,2)``, where ``N`` is the number of generated points and\n    the second axis contains the x- (first value) and y- (second value) coordinates.\n    The center keypoint (the one on which this function was called) is always included.", "id": "f16275:c0:m6"}
{"signature": "@property<EOL><INDENT>def x_int(self):<DEDENT>", "body": "return int(np.round(self.x))<EOL>", "docstring": "Return the keypoint's x-coordinate, rounded to the closest integer.\n\nReturns\n-------\nresult : int\n    Keypoint's x-coordinate, rounded to the closest integer.", "id": "f16275:c0:m1"}
{"signature": "def shift(self, x=<NUM_LIT:0>, y=<NUM_LIT:0>):", "body": "keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]<EOL>return self.deepcopy(keypoints)<EOL>", "docstring": "Move the keypoints around on an image.\n\nParameters\n----------\nx : number, optional\n    Move each keypoint by this value on the x axis.\n\ny : number, optional\n    Move each keypoint by this value on the y axis.\n\nReturns\n-------\nout : KeypointsOnImage\n    Keypoints after moving them.", "id": "f16275:c1:m6"}
{"signature": "@classmethod<EOL><INDENT>def from_xy_array(cls, xy, shape):<DEDENT>", "body": "keypoints = [Keypoint(x=coord[<NUM_LIT:0>], y=coord[<NUM_LIT:1>]) for coord in xy]<EOL>return KeypointsOnImage(keypoints, shape)<EOL>", "docstring": "Convert an array (N,2) with a given image shape to a KeypointsOnImage object.\n\nParameters\n----------\nxy : (N, 2) ndarray\n    Coordinates of ``N`` keypoints on the original image, given\n    as ``(N,2)`` array of xy-coordinates.\n\nshape : tuple of int or ndarray\n    Shape tuple of the image on which the keypoints are placed.\n\nReturns\n-------\nKeypointsOnImage\n    KeypointsOnImage object that contains all keypoints from the array.", "id": "f16275:c1:m10"}
{"signature": "def project(self, from_shape, to_shape):", "body": "xy_proj = project_coords([(self.x, self.y)], from_shape, to_shape)<EOL>return self.deepcopy(x=xy_proj[<NUM_LIT:0>][<NUM_LIT:0>], y=xy_proj[<NUM_LIT:0>][<NUM_LIT:1>])<EOL>", "docstring": "Project the keypoint onto a new position on a new image.\n\nE.g. if the keypoint is on its original image at x=(10 of 100 pixels)\nand y=(20 of 100 pixels) and is projected onto a new image with\nsize (width=200, height=200), its new position will be (20, 40).\n\nThis is intended for cases where the original image is resized.\nIt cannot be used for more complex changes (e.g. padding, cropping).\n\nParameters\n----------\nfrom_shape : tuple of int\n    Shape of the original image. (Before resize.)\n\nto_shape : tuple of int\n    Shape of the new image. (After resize.)\n\nReturns\n-------\nimgaug.Keypoint\n    Keypoint object with new coordinates.", "id": "f16275:c0:m3"}
{"signature": "@property<EOL><INDENT>def empty(self):<DEDENT>", "body": "return len(self.keypoints) == <NUM_LIT:0><EOL>", "docstring": "Returns whether this object contains zero keypoints.\n\nReturns\n-------\nresult : bool\n    True if this object contains zero keypoints.", "id": "f16275:c1:m3"}
{"signature": "def copy(self, keypoints=None, shape=None):", "body": "result = copy.copy(self)<EOL>if keypoints is not None:<EOL><INDENT>result.keypoints = keypoints<EOL><DEDENT>if shape is not None:<EOL><INDENT>result.shape = shape<EOL><DEDENT>return result<EOL>", "docstring": "Create a shallow copy of the KeypointsOnImage object.\n\nParameters\n----------\nkeypoints : None or list of imgaug.Keypoint, optional\n    List of keypoints on the image. If ``None``, the instance's\n    keypoints will be copied.\n\nshape : tuple of int, optional\n    The shape of the image on which the keypoints are placed.\n    If ``None``, the instance's shape will be copied.\n\nReturns\n-------\nimgaug.KeypointsOnImage\n    Shallow copy.", "id": "f16275:c1:m15"}
{"signature": "def draw_on_image(self, image, color=(<NUM_LIT:0>, <NUM_LIT:255>, <NUM_LIT:0>), alpha=<NUM_LIT:1.0>, size=<NUM_LIT:3>,<EOL>copy=True, raise_if_out_of_image=False):", "body": "if copy:<EOL><INDENT>image = np.copy(image)<EOL><DEDENT>if image.ndim == <NUM_LIT:2>:<EOL><INDENT>assert ia.is_single_number(color), (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (str(color),))<EOL><DEDENT>elif image.ndim == <NUM_LIT:3> and ia.is_single_number(color):<EOL><INDENT>color = [color] * image.shape[-<NUM_LIT:1>]<EOL><DEDENT>input_dtype = image.dtype<EOL>alpha_color = color<EOL>if alpha < <NUM_LIT>:<EOL><INDENT>return image<EOL><DEDENT>elif alpha > <NUM_LIT>:<EOL><INDENT>alpha = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>image = image.astype(np.float32, copy=False)<EOL>alpha_color = alpha * np.array(color)<EOL><DEDENT>height, width = image.shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>y, x = self.y_int, self.x_int<EOL>x1 = max(x - size//<NUM_LIT:2>, <NUM_LIT:0>)<EOL>x2 = min(x + <NUM_LIT:1> + size//<NUM_LIT:2>, width)<EOL>y1 = max(y - size//<NUM_LIT:2>, <NUM_LIT:0>)<EOL>y2 = min(y + <NUM_LIT:1> + size//<NUM_LIT:2>, height)<EOL>x1_clipped, x2_clipped = np.clip([x1, x2], <NUM_LIT:0>, width)<EOL>y1_clipped, y2_clipped = np.clip([y1, y2], <NUM_LIT:0>, height)<EOL>x1_clipped_ooi = (x1_clipped < <NUM_LIT:0> or x1_clipped >= width)<EOL>x2_clipped_ooi = (x2_clipped < <NUM_LIT:0> or x2_clipped >= width+<NUM_LIT:1>)<EOL>y1_clipped_ooi = (y1_clipped < <NUM_LIT:0> or y1_clipped >= height)<EOL>y2_clipped_ooi = (y2_clipped < <NUM_LIT:0> or y2_clipped >= height+<NUM_LIT:1>)<EOL>x_ooi = (x1_clipped_ooi and x2_clipped_ooi)<EOL>y_ooi = (y1_clipped_ooi and y2_clipped_ooi)<EOL>x_zero_size = (x2_clipped - x1_clipped) < <NUM_LIT:1>  <EOL>y_zero_size = (y2_clipped - y1_clipped) < <NUM_LIT:1><EOL>if not x_ooi and not y_ooi and not x_zero_size and not y_zero_size:<EOL><INDENT>if alpha == <NUM_LIT:1>:<EOL><INDENT>image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = color<EOL><DEDENT>else:<EOL><INDENT>image[y1_clipped:y2_clipped, x1_clipped:x2_clipped] = (<EOL>(<NUM_LIT:1> - alpha)<EOL>* image[y1_clipped:y2_clipped, x1_clipped:x2_clipped]<EOL>+ alpha_color<EOL>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if raise_if_out_of_image:<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (y, x, image.shape))<EOL><DEDENT><DEDENT>if image.dtype.name != input_dtype.name:<EOL><INDENT>if input_dtype.name == \"<STR_LIT>\":<EOL><INDENT>image = np.clip(image, <NUM_LIT:0>, <NUM_LIT:255>, out=image)<EOL><DEDENT>image = image.astype(input_dtype, copy=False)<EOL><DEDENT>return image<EOL>", "docstring": "Draw the keypoint onto a given image.\n\nThe keypoint is drawn as a square.\n\nParameters\n----------\nimage : (H,W,3) ndarray\n    The image onto which to draw the keypoint.\n\ncolor : int or list of int or tuple of int or (3,) ndarray, optional\n    The RGB color of the keypoint. If a single int ``C``, then that is\n    equivalent to ``(C,C,C)``.\n\nalpha : float, optional\n    The opacity of the drawn keypoint, where ``1.0`` denotes a fully\n    visible keypoint and ``0.0`` an invisible one.\n\nsize : int, optional\n    The size of the keypoint. If set to ``S``, each square will have\n    size ``S x S``.\n\ncopy : bool, optional\n    Whether to copy the image before drawing the keypoint.\n\nraise_if_out_of_image : bool, optional\n    Whether to raise an exception if the keypoint is outside of the\n    image.\n\nReturns\n-------\nimage : (H,W,3) ndarray\n    Image with drawn keypoint.", "id": "f16275:c0:m5"}
{"signature": "def deepcopy(self, x=None, y=None):", "body": "x = self.x if x is None else x<EOL>y = self.y if y is None else y<EOL>return Keypoint(x=x, y=y)<EOL>", "docstring": "Create a deep copy of the Keypoint object.\n\nParameters\n----------\nx : None or number, optional\n    Coordinate of the keypoint on the x axis.\n    If ``None``, the instance's value will be copied.\n\ny : None or number, optional\n    Coordinate of the keypoint on the y axis.\n    If ``None``, the instance's value will be copied.\n\nReturns\n-------\nimgaug.Keypoint\n    Deep copy.", "id": "f16275:c0:m8"}
{"signature": "def to_xy_array(self):", "body": "result = np.zeros((len(self.keypoints), <NUM_LIT:2>), dtype=np.float32)<EOL>for i, keypoint in enumerate(self.keypoints):<EOL><INDENT>result[i, <NUM_LIT:0>] = keypoint.x<EOL>result[i, <NUM_LIT:1>] = keypoint.y<EOL><DEDENT>return result<EOL>", "docstring": "Convert keypoint coordinates to ``(N,2)`` array.\n\nReturns\n-------\n(N, 2) ndarray\n    Array containing the coordinates of all keypoints.\n    Shape is ``(N,2)`` with coordinates in xy-form.", "id": "f16275:c1:m8"}
{"signature": "def copy(self, x=None, y=None):", "body": "return self.deepcopy(x=x, y=y)<EOL>", "docstring": "Create a shallow copy of the Keypoint object.\n\nParameters\n----------\nx : None or number, optional\n    Coordinate of the keypoint on the x axis.\n    If ``None``, the instance's value will be copied.\n\ny : None or number, optional\n    Coordinate of the keypoint on the y axis.\n    If ``None``, the instance's value will be copied.\n\nReturns\n-------\nimgaug.Keypoint\n    Shallow copy.", "id": "f16275:c0:m7"}
{"signature": "def shift(self, x=<NUM_LIT:0>, y=<NUM_LIT:0>):", "body": "return self.deepcopy(self.x + x, self.y + y)<EOL>", "docstring": "Move the keypoint around on an image.\n\nParameters\n----------\nx : number, optional\n    Move by this value on the x axis.\n\ny : number, optional\n    Move by this value on the y axis.\n\nReturns\n-------\nimgaug.Keypoint\n    Keypoint object with new coordinates.", "id": "f16275:c0:m4"}
{"signature": "@staticmethod<EOL><INDENT>def from_distance_maps(distance_maps, inverted=False, if_not_found_coords={\"<STR_LIT:x>\": -<NUM_LIT:1>, \"<STR_LIT:y>\": -<NUM_LIT:1>}, threshold=None, <EOL>nb_channels=None):<DEDENT>", "body": "ia.do_assert(len(distance_maps.shape) == <NUM_LIT:3>)<EOL>height, width, nb_keypoints = distance_maps.shape<EOL>drop_if_not_found = False<EOL>if if_not_found_coords is None:<EOL><INDENT>drop_if_not_found = True<EOL>if_not_found_x = -<NUM_LIT:1><EOL>if_not_found_y = -<NUM_LIT:1><EOL><DEDENT>elif isinstance(if_not_found_coords, (tuple, list)):<EOL><INDENT>ia.do_assert(len(if_not_found_coords) == <NUM_LIT:2>)<EOL>if_not_found_x = if_not_found_coords[<NUM_LIT:0>]<EOL>if_not_found_y = if_not_found_coords[<NUM_LIT:1>]<EOL><DEDENT>elif isinstance(if_not_found_coords, dict):<EOL><INDENT>if_not_found_x = if_not_found_coords[\"<STR_LIT:x>\"]<EOL>if_not_found_y = if_not_found_coords[\"<STR_LIT:y>\"]<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\" % (<EOL>type(if_not_found_coords),))<EOL><DEDENT>keypoints = []<EOL>for i in sm.xrange(nb_keypoints):<EOL><INDENT>if inverted:<EOL><INDENT>hitidx_flat = np.argmax(distance_maps[..., i])<EOL><DEDENT>else:<EOL><INDENT>hitidx_flat = np.argmin(distance_maps[..., i])<EOL><DEDENT>hitidx_ndim = np.unravel_index(hitidx_flat, (height, width))<EOL>if not inverted and threshold is not None:<EOL><INDENT>found = (distance_maps[hitidx_ndim[<NUM_LIT:0>], hitidx_ndim[<NUM_LIT:1>], i] < threshold)<EOL><DEDENT>elif inverted and threshold is not None:<EOL><INDENT>found = (distance_maps[hitidx_ndim[<NUM_LIT:0>], hitidx_ndim[<NUM_LIT:1>], i] >= threshold)<EOL><DEDENT>else:<EOL><INDENT>found = True<EOL><DEDENT>if found:<EOL><INDENT>keypoints.append(Keypoint(x=hitidx_ndim[<NUM_LIT:1>], y=hitidx_ndim[<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>if drop_if_not_found:<EOL><INDENT>pass  <EOL><DEDENT>else:<EOL><INDENT>keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))<EOL><DEDENT><DEDENT><DEDENT>out_shape = (height, width)<EOL>if nb_channels is not None:<EOL><INDENT>out_shape += (nb_channels,)<EOL><DEDENT>return KeypointsOnImage(keypoints, shape=out_shape)<EOL>", "docstring": "Converts maps generated by ``to_distance_maps()`` back to a KeypointsOnImage object.\n\nParameters\n----------\ndistance_maps : (H,W,N) ndarray\n    The distance maps. N is the number of keypoints.\n\ninverted : bool, optional\n    Whether the given distance maps were generated in inverted or normal mode.\n\nif_not_found_coords : tuple or list or dict or None, optional\n    Coordinates to use for keypoints that cannot be found in ``distance_maps``.\n    If this is a list/tuple, it must have two integer values.\n    If it is a dictionary, it must have the keys ``x`` and ``y``, with each\n    containing one integer value.\n    If this is None, then the keypoint will not be added to the final\n    KeypointsOnImage object.\n\nthreshold : float, optional\n    The search for keypoints works by searching for the argmin (non-inverted) or\n    argmax (inverted) in each channel. This parameters contains the maximum (non-inverted)\n    or minimum (inverted) value to accept in order to view a hit as a keypoint.\n    Use None to use no min/max.\n\nnb_channels : None or int, optional\n    Number of channels of the image on which the keypoints are placed.\n    Some keypoint augmenters require that information.\n    If set to None, the keypoint's shape will be set\n    to ``(height, width)``, otherwise ``(height, width, nb_channels)``.\n\nReturns\n-------\nimgaug.KeypointsOnImage\n    The extracted keypoints.", "id": "f16275:c1:m14"}
{"signature": "def to_distance_maps(self, inverted=False):", "body": "ia.do_assert(len(self.keypoints) > <NUM_LIT:0>)<EOL>height, width = self.shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>distance_maps = np.zeros((height, width, len(self.keypoints)), dtype=np.float32)<EOL>yy = np.arange(<NUM_LIT:0>, height)<EOL>xx = np.arange(<NUM_LIT:0>, width)<EOL>grid_xx, grid_yy = np.meshgrid(xx, yy)<EOL>for i, keypoint in enumerate(self.keypoints):<EOL><INDENT>y, x = keypoint.y, keypoint.x<EOL>distance_maps[:, :, i] = (grid_xx - x) ** <NUM_LIT:2> + (grid_yy - y) ** <NUM_LIT:2><EOL><DEDENT>distance_maps = np.sqrt(distance_maps)<EOL>if inverted:<EOL><INDENT>return <NUM_LIT:1>/(distance_maps+<NUM_LIT:1>)<EOL><DEDENT>return distance_maps<EOL>", "docstring": "Generates a ``(H,W,K)`` output containing ``K`` distance maps for ``K`` keypoints.\n\nThe k-th distance map contains at every location ``(y, x)`` the euclidean distance to the k-th keypoint.\n\nThis function can be used as a helper when augmenting keypoints with a method that only supports\nthe augmentation of images.\n\nParameters\n-------\ninverted : bool, optional\n    If True, inverted distance maps are returned where each distance value d is replaced\n    by ``d/(d+1)``, i.e. the distance maps have values in the range ``(0.0, 1.0]`` with 1.0\n    denoting exactly the position of the respective keypoint.\n\nReturns\n-------\ndistance_maps : (H,W,K) ndarray\n    A ``float32`` array containing ``K`` distance maps for ``K`` keypoints. Each location\n    ``(y, x, k)`` in the array denotes the euclidean distance at ``(y, x)`` to the ``k``-th keypoint.\n    In inverted mode the distance ``d`` is replaced by ``d/(d+1)``. The height and width\n    of the array match the height and width in ``KeypointsOnImage.shape``.", "id": "f16275:c1:m13"}
{"signature": "def draw_lines_on_image(self, image, color=(<NUM_LIT:0>, <NUM_LIT:255>, <NUM_LIT:0>),<EOL>alpha=<NUM_LIT:1.0>, size=<NUM_LIT:3>,<EOL>antialiased=True,<EOL>raise_if_out_of_image=False):", "body": "from .. import dtypes as iadt<EOL>from ..augmenters import blend as blendlib<EOL>image_was_empty = False<EOL>if isinstance(image, tuple):<EOL><INDENT>image_was_empty = True<EOL>image = np.zeros(image, dtype=np.uint8)<EOL><DEDENT>assert image.ndim in [<NUM_LIT:2>, <NUM_LIT:3>], (<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (image.shape,))<EOL>if len(self.coords) <= <NUM_LIT:1> or alpha < <NUM_LIT:0> + <NUM_LIT> or size < <NUM_LIT:1>:<EOL><INDENT>return np.copy(image)<EOL><DEDENT>if raise_if_out_of_imageand self.is_out_of_image(image, partly=False, fully=True):<EOL><INDENT>raise Exception(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>self.__str__(), image.shape))<EOL><DEDENT>if image.ndim == <NUM_LIT:2>:<EOL><INDENT>assert ia.is_single_number(color), (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (str(color),))<EOL>color = [color]<EOL><DEDENT>elif image.ndim == <NUM_LIT:3> and ia.is_single_number(color):<EOL><INDENT>color = [color] * image.shape[-<NUM_LIT:1>]<EOL><DEDENT>image = image.astype(np.float32)<EOL>height, width = image.shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>lines = []<EOL>for line_start, line_end in zip(self.coords[:-<NUM_LIT:1>], self.coords[<NUM_LIT:1>:]):<EOL><INDENT>lines.append((line_start[<NUM_LIT:1>], line_start[<NUM_LIT:0>],<EOL>line_end[<NUM_LIT:1>], line_end[<NUM_LIT:0>]))<EOL><DEDENT>lines = np.round(np.float32(lines)).astype(np.int32)<EOL>color = np.float32(color)<EOL>heatmap = np.zeros(image.shape[<NUM_LIT:0>:<NUM_LIT:2>], dtype=np.float32)<EOL>for line in lines:<EOL><INDENT>if antialiased:<EOL><INDENT>rr, cc, val = skimage.draw.line_aa(*line)<EOL><DEDENT>else:<EOL><INDENT>rr, cc = skimage.draw.line(*line)<EOL>val = <NUM_LIT:1.0><EOL><DEDENT>rr_mask = np.logical_and(<NUM_LIT:0> <= rr, rr < height)<EOL>cc_mask = np.logical_and(<NUM_LIT:0> <= cc, cc < width)<EOL>mask = np.logical_and(rr_mask, cc_mask)<EOL>if np.any(mask):<EOL><INDENT>rr = rr[mask]<EOL>cc = cc[mask]<EOL>val = val[mask] if not ia.is_single_number(val) else val<EOL>heatmap[rr, cc] = val * alpha<EOL><DEDENT><DEDENT>if size > <NUM_LIT:1>:<EOL><INDENT>kernel = np.ones((size, size), dtype=np.uint8)<EOL>heatmap = cv2.dilate(heatmap, kernel)<EOL><DEDENT>if image_was_empty:<EOL><INDENT>image_blend = image + heatmap * color<EOL><DEDENT>else:<EOL><INDENT>image_color_shape = image.shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>if image.ndim == <NUM_LIT:3>:<EOL><INDENT>image_color_shape = image_color_shape + (<NUM_LIT:1>,)<EOL><DEDENT>image_color = np.tile(color, image_color_shape)<EOL>image_blend = blendlib.blend_alpha(image_color, image, heatmap)<EOL><DEDENT>image_blend = iadt.restore_dtypes_(image_blend, np.uint8)<EOL>return image_blend<EOL>", "docstring": "Draw the line segments of the line string on a given image.\n\nParameters\n----------\nimage : ndarray or tuple of int\n    The image onto which to draw.\n    Expected to be ``uint8`` and of shape ``(H, W, C)`` with ``C``\n    usually being ``3`` (other values are not tested).\n    If a tuple, expected to be ``(H, W, C)`` and will lead to a new\n    ``uint8`` array of zeros being created.\n\ncolor : int or iterable of int\n    Color to use as RGB, i.e. three values.\n\nalpha : float, optional\n    Opacity of the line string. Higher values denote a more visible\n    line string.\n\nsize : int, optional\n    Thickness of the line segments.\n\nantialiased : bool, optional\n    Whether to draw the line with anti-aliasing activated.\n\nraise_if_out_of_image : bool, optional\n    Whether to raise an error if the line string is fully\n    outside of the image. If set to False, no error will be raised and\n    only the parts inside the image will be drawn.\n\nReturns\n-------\nndarray\n    `image` with line drawn on it.", "id": "f16276:c0:m24"}
{"signature": "@property<EOL><INDENT>def yy_int(self):<DEDENT>", "body": "return np.round(self.yy).astype(np.int32)<EOL>", "docstring": "Get an array of discrete y-coordinates of all points.", "id": "f16276:c0:m5"}
{"signature": "def coords_almost_equals(self, other, max_distance=<NUM_LIT>, points_per_edge=<NUM_LIT:8>):", "body": "if isinstance(other, LineString):<EOL><INDENT>pass<EOL><DEDENT>elif isinstance(other, tuple):<EOL><INDENT>other = LineString([other])<EOL><DEDENT>else:<EOL><INDENT>other = LineString(other)<EOL><DEDENT>if len(self.coords) == <NUM_LIT:0> and len(other.coords) == <NUM_LIT:0>:<EOL><INDENT>return True<EOL><DEDENT>elif <NUM_LIT:0> in [len(self.coords), len(other.coords)]:<EOL><INDENT>return False<EOL><DEDENT>self_subd = self.subdivide(points_per_edge)<EOL>other_subd = other.subdivide(points_per_edge)<EOL>dist_self2other = self_subd.compute_pointwise_distances(other_subd)<EOL>dist_other2self = other_subd.compute_pointwise_distances(self_subd)<EOL>dist = max(np.max(dist_self2other), np.max(dist_other2self))<EOL>return  dist < max_distance<EOL>", "docstring": "Compare this and another LineString's coordinates.\n\nThis is an approximate method based on pointwise distances and can\nin rare corner cases produce wrong outputs.\n\nParameters\n----------\nother : imgaug.augmentables.lines.LineString \\\n        or tuple of number \\\n        or ndarray \\\n        or list of ndarray \\\n        or list of tuple of number\n    The other line string or its coordinates.\n\nmax_distance : float\n    Max distance of any point from the other line string before\n    the two line strings are evaluated to be unequal.\n\npoints_per_edge : int, optional\n    How many points to interpolate on each edge.\n\nReturns\n-------\nbool\n    Whether the two LineString's coordinates are almost identical,\n    i.e. the max distance is below the threshold.\n    If both have no coordinates, ``True`` is returned.\n    If only one has no coordinates, ``False`` is returned.\n    Beyond that, the number of points is not evaluated.", "id": "f16276:c0:m35"}
{"signature": "def almost_equals(self, other, max_distance=<NUM_LIT>, points_per_edge=<NUM_LIT:8>):", "body": "if self.label != other.label:<EOL><INDENT>return False<EOL><DEDENT>return self.coords_almost_equals(<EOL>other, max_distance=max_distance, points_per_edge=points_per_edge)<EOL>", "docstring": "Compare this and another LineString.\n\nParameters\n----------\nother: imgaug.augmentables.lines.LineString\n    The other line string. Must be a LineString instance, not just\n    its coordinates.\n\nmax_distance : float, optional\n    See :func:`imgaug.augmentables.lines.LineString.coords_almost_equals`.\n\npoints_per_edge : int, optional\n    See :func:`imgaug.augmentables.lines.LineString.coords_almost_equals`.\n\nReturns\n-------\nbool\n    ``True`` if the coordinates are almost equal according to\n    :func:`imgaug.augmentables.lines.LineString.coords_almost_equals`\n    and additionally the labels are identical. Otherwise ``False``.", "id": "f16276:c0:m36"}
{"signature": "def clip_out_of_image(self):", "body": "lss_cut = [ls_clipped<EOL>for ls in self.line_strings<EOL>for ls_clipped in ls.clip_out_of_image(self.shape)]<EOL>return LineStringsOnImage(lss_cut, shape=self.shape)<EOL>", "docstring": "Clip off all parts of the line strings that are outside of the image.\n\nReturns\n-------\nimgaug.augmentables.lines.LineStringsOnImage\n    Line strings, clipped to fall within the image dimensions.", "id": "f16276:c1:m7"}
{"signature": "def contains(self, other, max_distance=<NUM_LIT>):", "body": "return self.compute_distance(other, default=np.inf) < max_distance<EOL>", "docstring": "Estimate whether the bounding box contains a point.\n\nParameters\n----------\nother : tuple of number or imgaug.augmentables.kps.Keypoint\n    Point to check for.\n\nmax_distance : float\n    Maximum allowed euclidean distance between the point and the\n    closest point on the line. If the threshold is exceeded, the point\n    is not considered to be contained in the line.\n\nReturns\n-------\nbool\n    True if the point is contained in the line string, False otherwise.\n    It is contained if its distance to the line or any of its points\n    is below a threshold.", "id": "f16276:c0:m12"}
{"signature": "def is_fully_within_image(self, image, default=False):", "body": "if len(self.coords) == <NUM_LIT:0>:<EOL><INDENT>return default<EOL><DEDENT>return np.all(self.get_pointwise_inside_image_mask(image))<EOL>", "docstring": "Estimate whether the line string is fully inside the image area.\n\nParameters\n----------\nimage : ndarray or tuple of int\n    Either an image with shape ``(H,W,[C])`` or a tuple denoting\n    such an image shape.\n\ndefault\n    Default value to return if the line string contains no points.\n\nReturns\n-------\nbool\n    True if the line string is fully inside the image area.\n    False otherwise.", "id": "f16276:c0:m14"}
{"signature": "def deepcopy(self, coords=None, label=None):", "body": "return LineString(<EOL>coords=np.copy(self.coords) if coords is None else coords,<EOL>label=copylib.deepcopy(self.label) if label is None else label)<EOL>", "docstring": "Create a deep copy of the BoundingBox object.\n\nParameters\n----------\ncoords : None or iterable of tuple of number or ndarray\n    If not ``None``, then the coords of the copied object will be set\n    to this value.\n\nlabel : None or str\n    If not ``None``, then the label of the copied object will be set to\n    this value.\n\nReturns\n-------\nimgaug.augmentables.lines.LineString\n    Deep copy.", "id": "f16276:c0:m38"}
{"signature": "def compute_pointwise_distances(self, other, default=None):", "body": "import shapely.geometry<EOL>from .kps import Keypoint<EOL>if isinstance(other, Keypoint):<EOL><INDENT>other = shapely.geometry.Point((other.x, other.y))<EOL><DEDENT>elif isinstance(other, LineString):<EOL><INDENT>if len(other.coords) == <NUM_LIT:0>:<EOL><INDENT>return default<EOL><DEDENT>elif len(other.coords) == <NUM_LIT:1>:<EOL><INDENT>other = shapely.geometry.Point(other.coords[<NUM_LIT:0>, :])<EOL><DEDENT>else:<EOL><INDENT>other = shapely.geometry.LineString(other.coords)<EOL><DEDENT><DEDENT>elif isinstance(other, tuple):<EOL><INDENT>assert len(other) == <NUM_LIT:2><EOL>other = shapely.geometry.Point(other)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (type(other),))<EOL><DEDENT>return [shapely.geometry.Point(point).distance(other)<EOL>for point in self.coords]<EOL>", "docstring": "Compute the minimal distance between each point on self and other.\n\nParameters\n----------\nother : tuple of number \\\n        or imgaug.augmentables.kps.Keypoint \\\n        or imgaug.augmentables.LineString\n    Other object to which to compute the distances.\n\ndefault\n    Value to return if `other` contains no points.\n\nReturns\n-------\nlist of float\n    Distances to `other` or `default` if not distance could be computed.", "id": "f16276:c0:m10"}
{"signature": "def find_intersections_with(self, other):", "body": "import shapely.geometry<EOL>geom = _convert_var_to_shapely_geometry(other)<EOL>result = []<EOL>for p_start, p_end in zip(self.coords[:-<NUM_LIT:1>], self.coords[<NUM_LIT:1>:]):<EOL><INDENT>ls = shapely.geometry.LineString([p_start, p_end])<EOL>intersections = ls.intersection(geom)<EOL>intersections = list(_flatten_shapely_collection(intersections))<EOL>intersections_points = []<EOL>for inter in intersections:<EOL><INDENT>if isinstance(inter, shapely.geometry.linestring.LineString):<EOL><INDENT>inter_start = (inter.coords[<NUM_LIT:0>][<NUM_LIT:0>], inter.coords[<NUM_LIT:0>][<NUM_LIT:1>])<EOL>inter_end = (inter.coords[-<NUM_LIT:1>][<NUM_LIT:0>], inter.coords[-<NUM_LIT:1>][<NUM_LIT:1>])<EOL>intersections_points.extend([inter_start, inter_end])<EOL><DEDENT>else:<EOL><INDENT>assert isinstance(inter, shapely.geometry.point.Point), (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (type(inter),))<EOL>intersections_points.append((inter.x, inter.y))<EOL><DEDENT><DEDENT>inter_sorted = sorted(<EOL>intersections_points,<EOL>key=lambda p: np.linalg.norm(np.float32(p) - p_start)<EOL>)<EOL>result.append(inter_sorted)<EOL><DEDENT>return result<EOL>", "docstring": "Find all intersection points between the line string and `other`.\n\nParameters\n----------\nother : tuple of number or list of tuple of number or \\\n        list of LineString or LineString\n    The other geometry to use during intersection tests.\n\nReturns\n-------\nlist of list of tuple of number\n    All intersection points. One list per pair of consecutive start\n    and end point, i.e. `N-1` lists of `N` points. Each list may\n    be empty or may contain multiple points.", "id": "f16276:c0:m18"}
{"signature": "def to_keypoints(self):", "body": "<EOL>from imgaug.augmentables.kps import Keypoint<EOL>return [Keypoint(x=x, y=y) for (x, y) in self.coords]<EOL>", "docstring": "Convert the line string points to keypoints.\n\nReturns\n-------\nlist of imgaug.augmentables.kps.Keypoint\n    Points of the line string as keypoints.", "id": "f16276:c0:m30"}
{"signature": "def compute_distance(self, other, default=None):", "body": "<EOL>distances = self.compute_pointwise_distances(other, default=[])<EOL>if len(distances) == <NUM_LIT:0>:<EOL><INDENT>return default<EOL><DEDENT>return min(distances)<EOL>", "docstring": "Compute the minimal distance between the line string and `other`.\n\nParameters\n----------\nother : tuple of number \\\n        or imgaug.augmentables.kps.Keypoint \\\n        or imgaug.augmentables.LineString\n    Other object to which to compute the distance.\n\ndefault\n    Value to return if this line string or `other` contain no points.\n\nReturns\n-------\nfloat\n    Distance to `other` or `default` if not distance could be computed.", "id": "f16276:c0:m11"}
{"signature": "def remove_out_of_image(self, fully=True, partly=False):", "body": "lss_clean = [ls for ls in self.line_strings<EOL>if not ls.is_out_of_image(<EOL>self.shape, fully=fully, partly=partly)]<EOL>return LineStringsOnImage(lss_clean, shape=self.shape)<EOL>", "docstring": "Remove all line strings that are fully/partially outside of the image.\n\nParameters\n----------\nfully : bool, optional\n    Whether to remove line strings that are fully outside of the image.\n\npartly : bool, optional\n    Whether to remove line strings that are partially outside of the\n    image.\n\nReturns\n-------\nimgaug.augmentables.lines.LineStringsOnImage\n    Reduced set of line strings, with those that were fully/partially\n    outside of the image removed.", "id": "f16276:c1:m6"}
{"signature": "def draw_points_on_image(self, image, color=(<NUM_LIT:0>, <NUM_LIT>, <NUM_LIT:0>),<EOL>alpha=<NUM_LIT:1.0>, size=<NUM_LIT:3>,<EOL>copy=True, raise_if_out_of_image=False):", "body": "from .kps import KeypointsOnImage<EOL>kpsoi = KeypointsOnImage.from_xy_array(self.coords, shape=image.shape)<EOL>image = kpsoi.draw_on_image(<EOL>image, color=color, alpha=alpha,<EOL>size=size, copy=copy,<EOL>raise_if_out_of_image=raise_if_out_of_image)<EOL>return image<EOL>", "docstring": "Draw the points of the line string on a given image.\n\nParameters\n----------\nimage : ndarray or tuple of int\n    The image onto which to draw.\n    Expected to be ``uint8`` and of shape ``(H, W, C)`` with ``C``\n    usually being ``3`` (other values are not tested).\n    If a tuple, expected to be ``(H, W, C)`` and will lead to a new\n    ``uint8`` array of zeros being created.\n\ncolor : iterable of int\n    Color to use as RGB, i.e. three values.\n\nalpha : float, optional\n    Opacity of the line string points. Higher values denote a more\n    visible points.\n\nsize : int, optional\n    Size of the points in pixels.\n\ncopy : bool, optional\n    Whether it is allowed to draw directly in the input\n    array (``False``) or it has to be copied (``True``).\n    The routine may still have to copy, even if ``copy=False`` was\n    used. Always use the return value.\n\nraise_if_out_of_image : bool, optional\n    Whether to raise an error if the line string is fully\n    outside of the image. If set to False, no error will be raised and\n    only the parts inside the image will be drawn.\n\nReturns\n-------\nndarray\n    Float array of shape `image_shape` (no channel axis) with drawn\n    line string points. All values are in the interval ``[0.0, 1.0]``.", "id": "f16276:c0:m25"}
{"signature": "def get_pointwise_inside_image_mask(self, image):", "body": "if len(self.coords) == <NUM_LIT:0>:<EOL><INDENT>return np.zeros((<NUM_LIT:0>,), dtype=bool)<EOL><DEDENT>shape = normalize_shape(image)<EOL>height, width = shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>x_within = np.logical_and(<NUM_LIT:0> <= self.xx, self.xx < width)<EOL>y_within = np.logical_and(<NUM_LIT:0> <= self.yy, self.yy < height)<EOL>return np.logical_and(x_within, y_within)<EOL>", "docstring": "Get for each point whether it is inside of the given image plane.\n\nParameters\n----------\nimage : ndarray or tuple of int\n    Either an image with shape ``(H,W,[C])`` or a tuple denoting\n    such an image shape.\n\nReturns\n-------\nndarray\n    Boolean array with one value per point indicating whether it is\n    inside of the provided image plane (``True``) or not (``False``).", "id": "f16276:c0:m8"}
{"signature": "def deepcopy(self, line_strings=None, shape=None):", "body": "lss = self.line_strings if line_strings is None else line_strings<EOL>shape = self.shape if shape is None else shape<EOL>return LineStringsOnImage(<EOL>line_strings=[ls.deepcopy() for ls in lss],<EOL>shape=tuple(shape))<EOL>", "docstring": "Create a deep copy of the LineStringsOnImage object.\n\nParameters\n----------\nline_strings : None \\\n               or list of imgaug.augmentables.lines.LineString, optional\n    List of line strings on the image.\n    If not ``None``, then the ``line_strings`` attribute of the copied\n    object will be set to this value.\n\nshape : None or tuple of int or ndarray, optional\n    The shape of the image on which the objects are placed.\n    Either an image with shape ``(H,W,[C])`` or a tuple denoting\n    such an image shape.\n    If not ``None``, then the ``shape`` attribute of the copied object\n    will be set to this value.\n\nReturns\n-------\nimgaug.augmentables.lines.LineStringsOnImage\n    Deep copy.", "id": "f16276:c1:m10"}
{"signature": "def subdivide(self, points_per_edge):", "body": "if len(self.coords) <= <NUM_LIT:1> or points_per_edge < <NUM_LIT:1>:<EOL><INDENT>return self.deepcopy()<EOL><DEDENT>coords = interpolate_points(self.coords, nb_steps=points_per_edge,<EOL>closed=False)<EOL>return self.deepcopy(coords=coords)<EOL>", "docstring": "Adds ``N`` interpolated points with uniform spacing to each edge.\n\nFor each edge between points ``A`` and ``B`` this adds points\nat ``A + (i/(1+N)) * (B - A)``, where ``i`` is the index of the added\npoint and ``N`` is the number of points to add per edge.\n\nCalling this method two times will split each edge at its center\nand then again split each newly created edge at their center.\nIt is equivalent to calling `subdivide(3)`.\n\nParameters\n----------\npoints_per_edge : int\n    Number of points to interpolate on each edge.\n\nReturns\n-------\nLineString\n    Line string with subdivided edges.", "id": "f16276:c0:m29"}
{"signature": "def on(self, image):", "body": "shape = normalize_shape(image)<EOL>if shape[<NUM_LIT:0>:<NUM_LIT:2>] == self.shape[<NUM_LIT:0>:<NUM_LIT:2>]:<EOL><INDENT>return self.deepcopy()<EOL><DEDENT>line_strings = [ls.project(self.shape, shape)<EOL>for ls in self.line_strings]<EOL>return self.deepcopy(line_strings=line_strings, shape=shape)<EOL>", "docstring": "Project bounding boxes from one image to a new one.\n\nParameters\n----------\nimage : ndarray or tuple of int\n    The new image onto which to project.\n    Either an image with shape ``(H,W,[C])`` or a tuple denoting\n    such an image shape.\n\nReturns\n-------\nline_strings : imgaug.augmentables.lines.LineStrings\n    Object containing all projected line strings.", "id": "f16276:c1:m2"}
{"signature": "def is_out_of_image(self, image, fully=True, partly=False, default=True):", "body": "if len(self.coords) == <NUM_LIT:0>:<EOL><INDENT>return default<EOL><DEDENT>if self.is_fully_within_image(image):<EOL><INDENT>return False<EOL><DEDENT>elif self.is_partly_within_image(image):<EOL><INDENT>return partly<EOL><DEDENT>else:<EOL><INDENT>return fully<EOL><DEDENT>", "docstring": "Estimate whether the line is partially/fully outside of the image area.\n\nParameters\n----------\nimage : ndarray or tuple of int\n    Either an image with shape ``(H,W,[C])`` or a tuple denoting\n    such an image shape.\n\nfully : bool, optional\n    Whether to return True if the bounding box is fully outside fo the\n    image area.\n\npartly : bool, optional\n    Whether to return True if the bounding box is at least partially\n    outside fo the image area.\n\ndefault\n    Default value to return if the line string contains no points.\n\nReturns\n-------\nbool\n    `default` if the line string has no points.\n    True if the line string is partially/fully outside of the image\n    area, depending on defined parameters.\n    False otherwise.", "id": "f16276:c0:m16"}
{"signature": "def copy(self, coords=None, label=None):", "body": "return LineString(coords=self.coords if coords is None else coords,<EOL>label=self.label if label is None else label)<EOL>", "docstring": "Create a shallow copy of the LineString object.\n\nParameters\n----------\ncoords : None or iterable of tuple of number or ndarray\n    If not ``None``, then the coords of the copied object will be set\n    to this value.\n\nlabel : None or str\n    If not ``None``, then the label of the copied object will be set to\n    this value.\n\nReturns\n-------\nimgaug.augmentables.lines.LineString\n    Shallow copy.", "id": "f16276:c0:m37"}
{"signature": "def shift(self, top=None, right=None, bottom=None, left=None):", "body": "lss_new = [ls.shift(top=top, right=right, bottom=bottom, left=left)<EOL>for ls in self.line_strings]<EOL>return LineStringsOnImage(lss_new, shape=self.shape)<EOL>", "docstring": "Shift/move the line strings from one or more image sides.\n\nParameters\n----------\ntop : None or int, optional\n    Amount of pixels by which to shift all bounding boxes from the\n    top.\n\nright : None or int, optional\n    Amount of pixels by which to shift all bounding boxes from the\n    right.\n\nbottom : None or int, optional\n    Amount of pixels by which to shift all bounding boxes from the\n    bottom.\n\nleft : None or int, optional\n    Amount of pixels by which to shift all bounding boxes from the\n    left.\n\nReturns\n-------\nimgaug.augmentables.lines.LineStringsOnImage\n    Shifted line strings.", "id": "f16276:c1:m8"}
{"signature": "def compute_neighbour_distances(self):", "body": "if len(self.coords) <= <NUM_LIT:1>:<EOL><INDENT>return np.zeros((<NUM_LIT:0>,), dtype=np.float32)<EOL><DEDENT>return np.sqrt(<EOL>np.sum(<EOL>(self.coords[:-<NUM_LIT:1>, :] - self.coords[<NUM_LIT:1>:, :]) ** <NUM_LIT:2>,<EOL>axis=<NUM_LIT:1><EOL>)<EOL>)<EOL>", "docstring": "Get the euclidean distance between each two consecutive points.\n\nReturns\n-------\nndarray\n    Euclidean distances between point pairs.\n    Same order as in `coords`. For ``N`` points, ``N-1`` distances\n    are returned.", "id": "f16276:c0:m9"}
{"signature": "@property<EOL><INDENT>def xx(self):<DEDENT>", "body": "return self.coords[:, <NUM_LIT:0>]<EOL>", "docstring": "Get an array of x-coordinates of all points of the line string.", "id": "f16276:c0:m2"}
{"signature": "def concatenate(self, other):", "body": "if not isinstance(other, LineString):<EOL><INDENT>other = LineString(other)<EOL><DEDENT>return self.deepcopy(<EOL>coords=np.concatenate([self.coords, other.coords], axis=<NUM_LIT:0>))<EOL>", "docstring": "Concatenate this line string with another one.\n\nThis will add a line segment between the end point of this line string\nand the start point of `other`.\n\nParameters\n----------\nother : imgaug.augmentables.lines.LineString or ndarray \\\n        or iterable of tuple of number\n    The points to add to this line string.\n\nReturns\n-------\nimgaug.augmentables.lines.LineString\n    New line string with concatenated points.\n    The `label` of this line string will be kept.", "id": "f16276:c0:m28"}
{"signature": "def shift(self, top=None, right=None, bottom=None, left=None):", "body": "top = top if top is not None else <NUM_LIT:0><EOL>right = right if right is not None else <NUM_LIT:0><EOL>bottom = bottom if bottom is not None else <NUM_LIT:0><EOL>left = left if left is not None else <NUM_LIT:0><EOL>coords = np.copy(self.coords)<EOL>coords[:, <NUM_LIT:0>] += left - right<EOL>coords[:, <NUM_LIT:1>] += top - bottom<EOL>return self.copy(coords=coords)<EOL>", "docstring": "Shift/move the line string from one or more image sides.\n\nParameters\n----------\ntop : None or int, optional\n    Amount of pixels by which to shift the bounding box from the\n    top.\n\nright : None or int, optional\n    Amount of pixels by which to shift the bounding box from the\n    right.\n\nbottom : None or int, optional\n    Amount of pixels by which to shift the bounding box from the\n    bottom.\n\nleft : None or int, optional\n    Amount of pixels by which to shift the bounding box from the\n    left.\n\nReturns\n-------\nresult : imgaug.augmentables.lines.LineString\n    Shifted line string.", "id": "f16276:c0:m19"}
{"signature": "def draw_on_image(self, image, color=(<NUM_LIT:0>, <NUM_LIT:255>, <NUM_LIT:0>), alpha=<NUM_LIT:1.0>, size=<NUM_LIT:1>,<EOL>copy=True, raise_if_out_of_image=False, thickness=None):", "body": "image = np.copy(image) if copy else image<EOL>for bb in self.bounding_boxes:<EOL><INDENT>image = bb.draw_on_image(<EOL>image,<EOL>color=color,<EOL>alpha=alpha,<EOL>size=size,<EOL>copy=False,<EOL>raise_if_out_of_image=raise_if_out_of_image,<EOL>thickness=thickness<EOL>)<EOL><DEDENT>return image<EOL>", "docstring": "Draw all bounding boxes onto a given image.\n\nParameters\n----------\nimage : (H,W,3) ndarray\n    The image onto which to draw the bounding boxes.\n    This image should usually have the same shape as\n    set in BoundingBoxesOnImage.shape.\n\ncolor : int or list of int or tuple of int or (3,) ndarray, optional\n    The RGB color of all bounding boxes. If a single int ``C``, then\n    that is equivalent to ``(C,C,C)``.\n\nalpha : float, optional\n    Alpha/transparency of the bounding box.\n\nsize : int, optional\n    Thickness in pixels.\n\ncopy : bool, optional\n    Whether to copy the image before drawing the bounding boxes.\n\nraise_if_out_of_image : bool, optional\n    Whether to raise an exception if any bounding box is outside of the\n    image.\n\nthickness : None or int, optional\n    Deprecated.\n\nReturns\n-------\nimage : (H,W,3) ndarray\n    Image with drawn bounding boxes.", "id": "f16277:c1:m7"}
{"signature": "def iou(self, other):", "body": "inters = self.intersection(other)<EOL>if inters is None:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>else:<EOL><INDENT>area_union = self.area + other.area - inters.area<EOL>return inters.area / area_union if area_union > <NUM_LIT:0> else <NUM_LIT:0.0><EOL><DEDENT>", "docstring": "Compute the IoU of this bounding box with another one.\n\nIoU is the intersection over union, defined as::\n\n    ``area(intersection(A, B)) / area(union(A, B))``\n    ``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))``\n\nParameters\n----------\nother : imgaug.BoundingBox\n    Other bounding box with which to compare.\n\nReturns\n-------\nfloat\n    IoU between the two bounding boxes.", "id": "f16277:c0:m15"}
{"signature": "def contains(self, other):", "body": "if isinstance(other, tuple):<EOL><INDENT>x, y = other<EOL><DEDENT>else:<EOL><INDENT>x, y = other.x, other.y<EOL><DEDENT>return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2<EOL>", "docstring": "Estimate whether the bounding box contains a point.\n\nParameters\n----------\nother : tuple of number or imgaug.Keypoint\n    Point to check for.\n\nReturns\n-------\nbool\n    True if the point is contained in the bounding box, False otherwise.", "id": "f16277:c0:m10"}
{"signature": "@property<EOL><INDENT>def x2_int(self):<DEDENT>", "body": "return int(np.round(self.x2))<EOL>", "docstring": "Return the x-coordinate of the bottom left corner as an integer.\n\nReturns\n-------\nint\n    X-coordinate of the bottom left corner, rounded to the closest integer.", "id": "f16277:c0:m3"}
{"signature": "@property<EOL><INDENT>def center_y(self):<DEDENT>", "body": "return self.y1 + self.height/<NUM_LIT:2><EOL>", "docstring": "Estimate the y-coordinate of the center point of the bounding box.\n\nReturns\n-------\nnumber\n    Y-coordinate of the center point of the bounding box.", "id": "f16277:c0:m8"}
{"signature": "def is_partly_within_image(self, image):", "body": "shape = normalize_shape(image)<EOL>height, width = shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>eps = np.finfo(np.float32).eps<EOL>img_bb = BoundingBox(x1=<NUM_LIT:0>, x2=width-eps, y1=<NUM_LIT:0>, y2=height-eps)<EOL>return self.intersection(img_bb) is not None<EOL>", "docstring": "Estimate whether the bounding box is at least partially inside the image area.\n\nParameters\n----------\nimage : (H,W,...) ndarray or tuple of int\n    Image dimensions to use.\n    If an ndarray, its shape will be used.\n    If a tuple, it is assumed to represent the image shape\n    and must contain at least two integers.\n\nReturns\n-------\nbool\n    True if the bounding box is at least partially inside the image area. False otherwise.", "id": "f16277:c0:m17"}
{"signature": "@property<EOL><INDENT>def height(self):<DEDENT>", "body": "return self.shape[<NUM_LIT:0>]<EOL>", "docstring": "Get the height of the image on which the bounding boxes fall.\n\nReturns\n-------\nint\n    Image height.", "id": "f16277:c1:m1"}
{"signature": "@property<EOL><INDENT>def height(self):<DEDENT>", "body": "return self.y2 - self.y1<EOL>", "docstring": "Estimate the height of the bounding box.\n\nReturns\n-------\nnumber\n    Height of the bounding box.", "id": "f16277:c0:m5"}
{"signature": "@property<EOL><INDENT>def width(self):<DEDENT>", "body": "return self.shape[<NUM_LIT:1>]<EOL>", "docstring": "Get the width of the image on which the bounding boxes fall.\n\nReturns\n-------\nint\n    Image width.", "id": "f16277:c1:m2"}
{"signature": "def remove_out_of_image(self, fully=True, partly=False):", "body": "bbs_clean = [bb for bb in self.bounding_boxes<EOL>if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]<EOL>return BoundingBoxesOnImage(bbs_clean, shape=self.shape)<EOL>", "docstring": "Remove all bounding boxes that are fully or partially outside of the image.\n\nParameters\n----------\nfully : bool, optional\n    Whether to remove bounding boxes that are fully outside of the image.\n\npartly : bool, optional\n    Whether to remove bounding boxes that are partially outside of the image.\n\nReturns\n-------\nimgaug.BoundingBoxesOnImage\n    Reduced set of bounding boxes, with those that were fully/partially outside of\n    the image removed.", "id": "f16277:c1:m8"}
{"signature": "def deepcopy(self):", "body": "<EOL>bbs = [bb.deepcopy() for bb in self.bounding_boxes]<EOL>return BoundingBoxesOnImage(bbs, tuple(self.shape))<EOL>", "docstring": "Create a deep copy of the BoundingBoxesOnImage object.\n\nReturns\n-------\nimgaug.BoundingBoxesOnImage\n    Deep copy.", "id": "f16277:c1:m13"}
{"signature": "def intersection(self, other, default=None):", "body": "x1_i = max(self.x1, other.x1)<EOL>y1_i = max(self.y1, other.y1)<EOL>x2_i = min(self.x2, other.x2)<EOL>y2_i = min(self.y2, other.y2)<EOL>if x1_i > x2_i or y1_i > y2_i:<EOL><INDENT>return default<EOL><DEDENT>else:<EOL><INDENT>return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)<EOL><DEDENT>", "docstring": "Compute the intersection bounding box of this bounding box and another one.\n\nNote that in extreme cases, the intersection can be a single point, meaning that the intersection bounding box\nwill exist, but then also has a height and width of zero.\n\nParameters\n----------\nother : imgaug.BoundingBox\n    Other bounding box with which to generate the intersection.\n\ndefault : any, optional\n    Default value to return if there is no intersection.\n\nReturns\n-------\nimgaug.BoundingBox or any\n    Intersection bounding box of the two bounding boxes if there is an intersection.\n    If there is no intersection, the default value will be returned, which can by anything.", "id": "f16277:c0:m13"}
{"signature": "def to_keypoints(self):", "body": "<EOL>from imgaug.augmentables.kps import Keypoint<EOL>return [<EOL>Keypoint(x=self.x1, y=self.y1),<EOL>Keypoint(x=self.x2, y=self.y1),<EOL>Keypoint(x=self.x2, y=self.y2),<EOL>Keypoint(x=self.x1, y=self.y2)<EOL>]<EOL>", "docstring": "Convert the corners of the bounding box to keypoints (clockwise, starting at top left).\n\nReturns\n-------\nlist of imgaug.Keypoint\n    Corners of the bounding box as keypoints.", "id": "f16277:c0:m24"}
{"signature": "def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):", "body": "return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)<EOL>", "docstring": "Create a deep copy of the BoundingBox object.\n\nParameters\n----------\nx1 : None or number\n    If not None, then the x1 coordinate of the copied object will be set to this value.\n\ny1 : None or number\n    If not None, then the y1 coordinate of the copied object will be set to this value.\n\nx2 : None or number\n    If not None, then the x2 coordinate of the copied object will be set to this value.\n\ny2 : None or number\n    If not None, then the y2 coordinate of the copied object will be set to this value.\n\nlabel : None or string\n    If not None, then the label of the copied object will be set to this value.\n\nReturns\n-------\nimgaug.BoundingBox\n    Deep copy.", "id": "f16277:c0:m26"}
{"signature": "def copy(self):", "body": "return copy.copy(self)<EOL>", "docstring": "Create a shallow copy of the BoundingBoxesOnImage object.\n\nReturns\n-------\nimgaug.BoundingBoxesOnImage\n    Shallow copy.", "id": "f16277:c1:m12"}
{"signature": "def project(self, from_shape, to_shape):", "body": "coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],<EOL>from_shape, to_shape)<EOL>return self.copy(<EOL>x1=coords_proj[<NUM_LIT:0>][<NUM_LIT:0>],<EOL>y1=coords_proj[<NUM_LIT:0>][<NUM_LIT:1>],<EOL>x2=coords_proj[<NUM_LIT:1>][<NUM_LIT:0>],<EOL>y2=coords_proj[<NUM_LIT:1>][<NUM_LIT:1>],<EOL>label=self.label)<EOL>", "docstring": "Project the bounding box onto a differently shaped image.\n\nE.g. if the bounding box is on its original image at\nx1=(10 of 100 pixels) and y1=(20 of 100 pixels) and is projected onto\na new image with size (width=200, height=200), its new position will\nbe (x1=20, y1=40). (Analogous for x2/y2.)\n\nThis is intended for cases where the original image is resized.\nIt cannot be used for more complex changes (e.g. padding, cropping).\n\nParameters\n----------\nfrom_shape : tuple of int or ndarray\n    Shape of the original image. (Before resize.)\n\nto_shape : tuple of int or ndarray\n    Shape of the new image. (After resize.)\n\nReturns\n-------\nout : imgaug.BoundingBox\n    BoundingBox object with new coordinates.", "id": "f16277:c0:m11"}
{"signature": "def on(self, image):", "body": "shape = normalize_shape(image)<EOL>if shape[<NUM_LIT:0>:<NUM_LIT:2>] == self.shape[<NUM_LIT:0>:<NUM_LIT:2>]:<EOL><INDENT>return self.deepcopy()<EOL><DEDENT>bounding_boxes = [bb.project(self.shape, shape)<EOL>for bb in self.bounding_boxes]<EOL>return BoundingBoxesOnImage(bounding_boxes, shape)<EOL>", "docstring": "Project bounding boxes from one image to a new one.\n\nParameters\n----------\nimage : ndarray or tuple of int\n    New image onto which the bounding boxes are to be projected.\n    May also simply be that new image's shape tuple.\n\nReturns\n-------\nbounding_boxes : imgaug.BoundingBoxesOnImage\n    Object containing all projected bounding boxes.", "id": "f16277:c1:m4"}
{"signature": "def to_normalized_batch(self):", "body": "assert all([<EOL>attr is None for attr_name, attr in self.__dict__.items()<EOL>if attr_name.endswith(\"<STR_LIT>\")]),\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>images_unaug = nlib.normalize_images(self.images_unaug)<EOL>shapes = None<EOL>if images_unaug is not None:<EOL><INDENT>shapes = [image.shape for image in images_unaug]<EOL><DEDENT>return Batch(<EOL>images=images_unaug,<EOL>heatmaps=nlib.normalize_heatmaps(<EOL>self.heatmaps_unaug, shapes),<EOL>segmentation_maps=nlib.normalize_segmentation_maps(<EOL>self.segmentation_maps_unaug, shapes),<EOL>keypoints=nlib.normalize_keypoints(<EOL>self.keypoints_unaug, shapes),<EOL>bounding_boxes=nlib.normalize_bounding_boxes(<EOL>self.bounding_boxes_unaug, shapes),<EOL>polygons=nlib.normalize_polygons(<EOL>self.polygons_unaug, shapes),<EOL>line_strings=nlib.normalize_line_strings(<EOL>self.line_strings_unaug, shapes),<EOL>data=self.data<EOL>)<EOL>", "docstring": "Convert this unnormalized batch to an instance of Batch.\n\n        As this method is intended to be called before augmentation, it\n        assumes that none of the ``*_aug`` attributes is yet set.\n        It will produce an AssertionError otherwise.\n\n        The newly created Batch's ``*_unaug`` attributes will match the ones\n        in this batch, just in normalized form.\n\n        Returns\n        -------\n        imgaug.augmentables.batches.Batch\n            The batch, with ``*_unaug`` attributes being normalized.", "id": "f16278:c0:m1"}
{"signature": "def fill_from_augmented_normalized_batch(self, batch_aug_norm):", "body": "<EOL>batch = UnnormalizedBatch(<EOL>images=self.images_unaug,<EOL>heatmaps=self.heatmaps_unaug,<EOL>segmentation_maps=self.segmentation_maps_unaug,<EOL>keypoints=self.keypoints_unaug,<EOL>bounding_boxes=self.bounding_boxes_unaug,<EOL>polygons=self.polygons_unaug,<EOL>line_strings=self.line_strings_unaug,<EOL>data=batch_aug_norm.data<EOL>)<EOL>batch.images_aug = nlib.invert_normalize_images(<EOL>batch_aug_norm.images_aug, self.images_unaug)<EOL>batch.heatmaps_aug = nlib.invert_normalize_heatmaps(<EOL>batch_aug_norm.heatmaps_aug, self.heatmaps_unaug)<EOL>batch.segmentation_maps_aug = nlib.invert_normalize_segmentation_maps(<EOL>batch_aug_norm.segmentation_maps_aug, self.segmentation_maps_unaug)<EOL>batch.keypoints_aug = nlib.invert_normalize_keypoints(<EOL>batch_aug_norm.keypoints_aug, self.keypoints_unaug)<EOL>batch.bounding_boxes_aug = nlib.invert_normalize_bounding_boxes(<EOL>batch_aug_norm.bounding_boxes_aug, self.bounding_boxes_unaug)<EOL>batch.polygons_aug = nlib.invert_normalize_polygons(<EOL>batch_aug_norm.polygons_aug, self.polygons_unaug)<EOL>batch.line_strings_aug = nlib.invert_normalize_line_strings(<EOL>batch_aug_norm.line_strings_aug, self.line_strings_unaug)<EOL>return batch<EOL>", "docstring": "Fill this batch with (normalized) augmentation results.\n\nThis method receives a (normalized) Batch instance, takes all\n``*_aug`` attributes out if it and assigns them to this\nbatch *in unnormalized form*. Hence, the datatypes of all ``*_aug``\nattributes will match the datatypes of the ``*_unaug`` attributes.\n\nParameters\n----------\nbatch_aug_norm: imgaug.augmentables.batches.Batch\n    Batch after normalization and augmentation.\n\nReturns\n-------\nimgaug.augmentables.batches.UnnormalizedBatch\n    New UnnormalizedBatch instance. All ``*_unaug`` attributes are\n    taken from the old UnnormalizedBatch (without deepcopying them)\n    and all ``*_aug`` attributes are taken from `batch_normalized`\n    converted to unnormalized form.", "id": "f16278:c0:m2"}
{"signature": "def deepcopy(self):", "body": "return HeatmapsOnImage(self.get_arr(), shape=self.shape, min_value=self.min_value, max_value=self.max_value)<EOL>", "docstring": "Create a deep copy of the Heatmaps object.\n\nReturns\n-------\nimgaug.HeatmapsOnImage\n    Deep copy.", "id": "f16280:c0:m16"}
{"signature": "@classmethod<EOL><INDENT>def change_normalization(cls, arr, source, target):<DEDENT>", "body": "ia.do_assert(ia.is_np_array(arr))<EOL>if isinstance(source, HeatmapsOnImage):<EOL><INDENT>source = (source.min_value, source.max_value)<EOL><DEDENT>else:<EOL><INDENT>ia.do_assert(isinstance(source, tuple))<EOL>ia.do_assert(len(source) == <NUM_LIT:2>)<EOL>ia.do_assert(source[<NUM_LIT:0>] < source[<NUM_LIT:1>])<EOL><DEDENT>if isinstance(target, HeatmapsOnImage):<EOL><INDENT>target = (target.min_value, target.max_value)<EOL><DEDENT>else:<EOL><INDENT>ia.do_assert(isinstance(target, tuple))<EOL>ia.do_assert(len(target) == <NUM_LIT:2>)<EOL>ia.do_assert(target[<NUM_LIT:0>] < target[<NUM_LIT:1>])<EOL><DEDENT>eps = np.finfo(arr.dtype).eps<EOL>mins_same = source[<NUM_LIT:0>] - <NUM_LIT:10>*eps < target[<NUM_LIT:0>] < source[<NUM_LIT:0>] + <NUM_LIT:10>*eps<EOL>maxs_same = source[<NUM_LIT:1>] - <NUM_LIT:10>*eps < target[<NUM_LIT:1>] < source[<NUM_LIT:1>] + <NUM_LIT:10>*eps<EOL>if mins_same and maxs_same:<EOL><INDENT>return np.copy(arr)<EOL><DEDENT>min_source, max_source = source<EOL>min_target, max_target = target<EOL>diff_source = max_source - min_source<EOL>diff_target = max_target - min_target<EOL>arr_0to1 = (arr - min_source) / diff_source<EOL>arr_target = min_target + arr_0to1 * diff_target<EOL>return arr_target<EOL>", "docstring": "Change the value range of a heatmap from one min-max to another min-max.\n\nE.g. the value range may be changed from min=0.0, max=1.0 to min=-1.0, max=1.0.\n\nParameters\n----------\narr : ndarray\n    Heatmap array to modify.\n\nsource : tuple of float\n    Current value range of the input array, given as (min, max), where both are float values.\n\ntarget : tuple of float\n    Desired output value range of the array, given as (min, max), where both are float values.\n\nReturns\n-------\narr_target : ndarray\n    Input array, with value range projected to the desired target value range.", "id": "f16280:c0:m14"}
{"signature": "def resize(self, sizes, interpolation=\"<STR_LIT>\"):", "body": "arr_0to1_resized = ia.imresize_single_image(self.arr_0to1, sizes, interpolation=interpolation)<EOL>arr_0to1_resized = np.clip(arr_0to1_resized, <NUM_LIT:0.0>, <NUM_LIT:1.0>)<EOL>return HeatmapsOnImage.from_0to1(arr_0to1_resized, shape=self.shape, min_value=self.min_value,<EOL>max_value=self.max_value)<EOL>", "docstring": "Resize the heatmap(s) array to the provided size given the provided interpolation.\n\nParameters\n----------\nsizes : float or iterable of int or iterable of float\n    New size of the array in ``(height, width)``.\n    See :func:`imgaug.imgaug.imresize_single_image` for details.\n\ninterpolation : None or str or int, optional\n    The interpolation to use during resize.\n    See :func:`imgaug.imgaug.imresize_single_image` for details.\n\nReturns\n-------\nimgaug.HeatmapsOnImage\n    Resized heatmaps object.", "id": "f16280:c0:m10"}
{"signature": "def pad_to_aspect_ratio(self, aspect_ratio, mode=\"<STR_LIT>\", cval=<NUM_LIT:0.0>, return_pad_amounts=False):", "body": "arr_0to1_padded, pad_amounts = ia.pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,<EOL>cval=cval, return_pad_amounts=True)<EOL>heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,<EOL>max_value=self.max_value)<EOL>if return_pad_amounts:<EOL><INDENT>return heatmaps, pad_amounts<EOL><DEDENT>else:<EOL><INDENT>return heatmaps<EOL><DEDENT>", "docstring": "Pad the heatmaps on their sides so that they match a target aspect ratio.\n\nDepending on which dimension is smaller (height or width), only the corresponding\nsides (left/right or top/bottom) will be padded. In each case, both of the sides will\nbe padded equally.\n\nParameters\n----------\naspect_ratio : float\n    Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice\n    as much width as height.\n\nmode : str, optional\n    Padding mode to use. See :func:`numpy.pad` for details.\n\ncval : number, optional\n    Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.\n\nreturn_pad_amounts : bool, optional\n    If False, then only the padded image will be returned. If True, a tuple with two\n    entries will be returned, where the first entry is the padded image and the second\n    entry are the amounts by which each image side was padded. These amounts are again a\n    tuple of the form (top, right, bottom, left), with each value being an integer.\n\nReturns\n-------\nheatmaps : imgaug.HeatmapsOnImage\n    Padded heatmaps as HeatmapsOnImage object.\n\npad_amounts : tuple of int\n    Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.\n    This tuple is only returned if `return_pad_amounts` was set to True.", "id": "f16280:c0:m6"}
{"signature": "def avg_pool(self, block_size):", "body": "arr_0to1_reduced = ia.avg_pool(self.arr_0to1, block_size, cval=<NUM_LIT:0.0>)<EOL>return HeatmapsOnImage.from_0to1(arr_0to1_reduced, shape=self.shape, min_value=self.min_value,<EOL>max_value=self.max_value)<EOL>", "docstring": "Resize the heatmap(s) array using average pooling of a given block/kernel size.\n\nParameters\n----------\nblock_size : int or tuple of int\n    Size of each block of values to pool, aka kernel size. See :func:`imgaug.pool` for details.\n\nReturns\n-------\nimgaug.HeatmapsOnImage\n    Heatmaps after average pooling.", "id": "f16280:c0:m7"}
{"signature": "def is_partly_within_image(self, image):", "body": "return not self.is_out_of_image(image, fully=True, partly=False)<EOL>", "docstring": "Estimate whether the polygon is at least partially inside the image area.\n\nParameters\n----------\nimage : (H,W,...) ndarray or tuple of int\n    Image dimensions to use.\n    If an ndarray, its shape will be used.\n    If a tuple, it is assumed to represent the image shape and must contain at least two integers.\n\nReturns\n-------\nbool\n    True if the polygon is at least partially inside the image area.\n    False otherwise.", "id": "f16281:c0:m12"}
{"signature": "@property<EOL><INDENT>def is_valid(self):<DEDENT>", "body": "if len(self.exterior) < <NUM_LIT:3>:<EOL><INDENT>return False<EOL><DEDENT>return self.to_shapely_polygon().is_valid<EOL>", "docstring": "Estimate whether the polygon has a valid shape.\n\nTo to be considered valid, the polygons must be made up of at least 3 points and have concave shape.\nMultiple consecutive points are allowed to have the same coordinates.\n\nReturns\n-------\nbool\n    True if polygon has at least 3 points and is concave, otherwise False.", "id": "f16281:c0:m5"}
{"signature": "@property<EOL><INDENT>def area(self):<DEDENT>", "body": "if len(self.exterior) < <NUM_LIT:3>:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>poly = self.to_shapely_polygon()<EOL>return poly.area<EOL>", "docstring": "Estimate the area of the polygon.\n\nReturns\n-------\nnumber\n    Area of the polygon.", "id": "f16281:c0:m6"}
{"signature": "def __init__(self, geoms):", "body": "ia.do_assert(len(geoms) == <NUM_LIT:0> or all([isinstance(el, Polygon) for el in geoms]))<EOL>self.geoms = geoms<EOL>", "docstring": "Create a new MultiPolygon instance.", "id": "f16281:c3:m0"}
{"signature": "def to_shapely_polygon(self):", "body": "<EOL>import shapely.geometry<EOL>return shapely.geometry.Polygon([(point[<NUM_LIT:0>], point[<NUM_LIT:1>]) for point in self.exterior])<EOL>", "docstring": "Convert this polygon to a Shapely polygon.\n\nReturns\n-------\nshapely.geometry.Polygon\n    The Shapely polygon matching this polygon's exterior.", "id": "f16281:c0:m21"}
{"signature": "@property<EOL><INDENT>def width(self):<DEDENT>", "body": "xx = self.xx<EOL>return max(xx) - min(xx)<EOL>", "docstring": "Estimate the width of the polygon.\n\nReturns\n-------\nnumber\n    Width of the polygon.", "id": "f16281:c0:m8"}
{"signature": "def on(self, image):", "body": "shape = normalize_shape(image)<EOL>if shape[<NUM_LIT:0>:<NUM_LIT:2>] == self.shape[<NUM_LIT:0>:<NUM_LIT:2>]:<EOL><INDENT>return self.deepcopy()<EOL><DEDENT>polygons = [poly.project(self.shape, shape) for poly in self.polygons]<EOL>return PolygonsOnImage(polygons, shape)<EOL>", "docstring": "Project polygons from one image to a new one.\n\nParameters\n----------\nimage : ndarray or tuple of int\n    New image onto which the polygons are to be projected.\n    May also simply be that new image's shape tuple.\n\nReturns\n-------\nimgaug.PolygonsOnImage\n    Object containing all projected polygons.", "id": "f16281:c1:m2"}
{"signature": "def change_first_point_by_index(self, point_idx):", "body": "ia.do_assert(<NUM_LIT:0> <= point_idx < len(self.exterior))<EOL>if point_idx == <NUM_LIT:0>:<EOL><INDENT>return self.deepcopy()<EOL><DEDENT>exterior = np.concatenate(<EOL>(self.exterior[point_idx:, :], self.exterior[:point_idx, :]),<EOL>axis=<NUM_LIT:0><EOL>)<EOL>return self.deepcopy(exterior=exterior)<EOL>", "docstring": "Set the first point of the exterior to the given point based on its index.\n\nNote: This method does *not* work in-place.\n\nParameters\n----------\npoint_idx : int\n    Index of the desired starting point.\n\nReturns\n-------\nimgaug.Polygon\n    Copy of this polygon with the new point order.", "id": "f16281:c0:m20"}
{"signature": "@property<EOL><INDENT>def xx(self):<DEDENT>", "body": "return self.exterior[:, <NUM_LIT:0>]<EOL>", "docstring": "Return the x-coordinates of all points in the exterior.\n\nReturns\n-------\n(N,2) ndarray\n    X-coordinates of all points in the exterior as a float32 ndarray.", "id": "f16281:c0:m1"}
{"signature": "def draw_on_image(self,<EOL>image,<EOL>color=(<NUM_LIT:0>, <NUM_LIT:255>, <NUM_LIT:0>), color_face=None,<EOL>color_lines=None, color_points=None,<EOL>alpha=<NUM_LIT:1.0>, alpha_face=None,<EOL>alpha_lines=None, alpha_points=None,<EOL>size=<NUM_LIT:1>, size_lines=None, size_points=None,<EOL>raise_if_out_of_image=False):", "body": "for poly in self.polygons:<EOL><INDENT>image = poly.draw_on_image(<EOL>image,<EOL>color=color,<EOL>color_face=color_face,<EOL>color_lines=color_lines,<EOL>color_points=color_points,<EOL>alpha=alpha,<EOL>alpha_face=alpha_face,<EOL>alpha_lines=alpha_lines,<EOL>alpha_points=alpha_points,<EOL>size=size,<EOL>size_lines=size_lines,<EOL>size_points=size_points,<EOL>raise_if_out_of_image=raise_if_out_of_image<EOL>)<EOL><DEDENT>return image<EOL>", "docstring": "Draw all polygons onto a given image.\n\nParameters\n----------\nimage : (H,W,C) ndarray\n    The image onto which to draw the bounding boxes.\n    This image should usually have the same shape as set in\n    ``PolygonsOnImage.shape``.\n\ncolor : iterable of int, optional\n    The color to use for the whole polygons.\n    Must correspond to the channel layout of the image. Usually RGB.\n    The values for `color_face`, `color_lines` and `color_points`\n    will be derived from this color if they are set to ``None``.\n    This argument has no effect if `color_face`, `color_lines`\n    and `color_points` are all set anything other than ``None``.\n\ncolor_face : None or iterable of int, optional\n    The color to use for the inner polygon areas (excluding perimeters).\n    Must correspond to the channel layout of the image. Usually RGB.\n    If this is ``None``, it will be derived from ``color * 1.0``.\n\ncolor_lines : None or iterable of int, optional\n    The color to use for the lines (aka perimeters/borders) of the\n    polygons. Must correspond to the channel layout of the image.\n    Usually RGB. If this is ``None``, it will be derived\n    from ``color * 0.5``.\n\ncolor_points : None or iterable of int, optional\n    The color to use for the corner points of the polygons.\n    Must correspond to the channel layout of the image. Usually RGB.\n    If this is ``None``, it will be derived from ``color * 0.5``.\n\nalpha : float, optional\n    The opacity of the whole polygons, where ``1.0`` denotes\n    completely visible polygons and ``0.0`` invisible ones.\n    The values for `alpha_face`, `alpha_lines` and `alpha_points`\n    will be derived from this alpha value if they are set to ``None``.\n    This argument has no effect if `alpha_face`, `alpha_lines`\n    and `alpha_points` are all set anything other than ``None``.\n\nalpha_face : None or number, optional\n    The opacity of the polygon's inner areas (excluding the perimeters),\n    where ``1.0`` denotes completely visible inner areas and ``0.0``\n    invisible ones.\n    If this is ``None``, it will be derived from ``alpha * 0.5``.\n\nalpha_lines : None or number, optional\n    The opacity of the polygon's lines (aka perimeters/borders),\n    where ``1.0`` denotes completely visible perimeters and ``0.0``\n    invisible ones.\n    If this is ``None``, it will be derived from ``alpha * 1.0``.\n\nalpha_points : None or number, optional\n    The opacity of the polygon's corner points, where ``1.0`` denotes\n    completely visible corners and ``0.0`` invisible ones.\n    Currently this is an on/off choice, i.e. only ``0.0`` or ``1.0``\n    are allowed.\n    If this is ``None``, it will be derived from ``alpha * 1.0``.\n\nsize : int, optional\n    Size of the polygons.\n    The sizes of the line and points are derived from this value,\n    unless they are set.\n\nsize_lines : None or int, optional\n    Thickness of the polygon lines (aka perimeter/border).\n    If ``None``, this value is derived from `size`.\n\nsize_points : int, optional\n    The size of all corner points. If set to ``C``, each corner point\n    will be drawn as a square of size ``C x C``.\n\nraise_if_out_of_image : bool, optional\n    Whether to raise an error if any polygon is fully\n    outside of the image. If set to False, no error will be raised and\n    only the parts inside the image will be drawn.\n\nReturns\n-------\nimage : (H,W,C) ndarray\n    Image with drawn polygons.", "id": "f16281:c1:m3"}
{"signature": "def remove_out_of_image(self, fully=True, partly=False):", "body": "polys_clean = [<EOL>poly for poly in self.polygons<EOL>if not poly.is_out_of_image(self.shape, fully=fully, partly=partly)<EOL>]<EOL>return PolygonsOnImage(polys_clean, shape=self.shape)<EOL>", "docstring": "Remove all polygons that are fully or partially outside of the image.\n\nParameters\n----------\nfully : bool, optional\n    Whether to remove polygons that are fully outside of the image.\n\npartly : bool, optional\n    Whether to remove polygons that are partially outside of the image.\n\nReturns\n-------\nimgaug.PolygonsOnImage\n    Reduced set of polygons, with those that were fully/partially\n    outside of the image removed.", "id": "f16281:c1:m4"}
{"signature": "def copy(self, exterior=None, label=None):", "body": "return self.deepcopy(exterior=exterior, label=label)<EOL>", "docstring": "Create a shallow copy of the Polygon object.\n\nParameters\n----------\nexterior : list of imgaug.Keypoint or list of tuple or (N,2) ndarray, optional\n    List of points defining the polygon. See :func:`imgaug.Polygon.__init__` for details.\n\nlabel : None or str, optional\n    If not None, then the label of the copied object will be set to this value.\n\nReturns\n-------\nimgaug.Polygon\n    Shallow copy.", "id": "f16281:c0:m29"}
{"signature": "def almost_equals(self, other, max_distance=<NUM_LIT>, points_per_edge=<NUM_LIT:8>):", "body": "if not isinstance(other, Polygon):<EOL><INDENT>return False<EOL><DEDENT>if self.label is not None or other.label is not None:<EOL><INDENT>if self.label is None:<EOL><INDENT>return False<EOL><DEDENT>if other.label is None:<EOL><INDENT>return False<EOL><DEDENT>if self.label != other.label:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return self.exterior_almost_equals(<EOL>other, max_distance=max_distance, points_per_edge=points_per_edge)<EOL>", "docstring": "Estimate if this polygon's and another's geometry/labels are similar.\n\nThis is the same as :func:`imgaug.Polygon.exterior_almost_equals` but\nadditionally compares the labels.\n\nParameters\n----------\nother\n    The object to compare against. If not a Polygon, then False will\n    be returned.\n\nmax_distance : float, optional\n    See :func:`imgaug.augmentables.polys.Polygon.exterior_almost_equals`.\n\npoints_per_edge : int, optional\n    See :func:`imgaug.augmentables.polys.Polygon.exterior_almost_equals`.\n\nReturns\n-------\nbool\n    Whether the two polygons can be viewed as equal. In the case of\n    the exteriors this is an approximate test.", "id": "f16281:c0:m28"}
{"signature": "@staticmethod<EOL><INDENT>def from_shapely(polygon_shapely, label=None):<DEDENT>", "body": "<EOL>import shapely.geometry<EOL>ia.do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon))<EOL>if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == <NUM_LIT:0>:<EOL><INDENT>return Polygon([], label=label)<EOL><DEDENT>exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords])<EOL>return Polygon(exterior, label=label)<EOL>", "docstring": "Create a polygon from a Shapely polygon.\n\nNote: This will remove any holes in the Shapely polygon.\n\nParameters\n----------\npolygon_shapely : shapely.geometry.Polygon\n     The shapely polygon.\n\nlabel : None or str, optional\n    The label of the new polygon.\n\nReturns\n-------\nimgaug.Polygon\n    A polygon with the same exterior as the Shapely polygon.", "id": "f16281:c0:m26"}
{"signature": "def to_line_string(self, closed=True):", "body": "from imgaug.augmentables.lines import LineString<EOL>if not closed or len(self.exterior) <= <NUM_LIT:1>:<EOL><INDENT>return LineString(self.exterior, label=self.label)<EOL><DEDENT>return LineString(<EOL>np.concatenate([self.exterior, self.exterior[<NUM_LIT:0>:<NUM_LIT:1>, :]], axis=<NUM_LIT:0>),<EOL>label=self.label)<EOL>", "docstring": "Convert this polygon's `exterior` to a ``LineString`` instance.\n\nParameters\n----------\nclosed : bool, optional\n    Whether to close the line string, i.e. to add the first point of\n    the `exterior` also as the last point at the end of the line string.\n    This has no effect if the polygon has a single point or zero\n    points.\n\nReturns\n-------\nimgaug.augmentables.lines.LineString\n    Exterior of the polygon as a line string.", "id": "f16281:c0:m25"}
{"signature": "def deepcopy(self, exterior=None, label=None):", "body": "return Polygon(<EOL>exterior=np.copy(self.exterior) if exterior is None else exterior,<EOL>label=self.label if label is None else label<EOL>)<EOL>", "docstring": "Create a deep copy of the Polygon object.\n\nParameters\n----------\nexterior : list of Keypoint or list of tuple or (N,2) ndarray, optional\n    List of points defining the polygon. See `imgaug.Polygon.__init__` for details.\n\nlabel : None or str\n    If not None, then the label of the copied object will be set to this value.\n\nReturns\n-------\nimgaug.Polygon\n    Deep copy.", "id": "f16281:c0:m30"}
{"signature": "@property<EOL><INDENT>def yy(self):<DEDENT>", "body": "return self.exterior[:, <NUM_LIT:1>]<EOL>", "docstring": "Return the y-coordinates of all points in the exterior.\n\nReturns\n-------\n(N,2) ndarray\n    Y-coordinates of all points in the exterior as a float32 ndarray.", "id": "f16281:c0:m2"}
{"signature": "def deepcopy(self):", "body": "<EOL>polys = [poly.deepcopy() for poly in self.polygons]<EOL>return PolygonsOnImage(polys, tuple(self.shape))<EOL>", "docstring": "Create a deep copy of the PolygonsOnImage object.\n\nReturns\n-------\nimgaug.PolygonsOnImage\n    Deep copy.", "id": "f16281:c1:m8"}
{"signature": "def clip_out_of_image(self, image):", "body": "<EOL>import shapely.geometry<EOL>if self.is_out_of_image(image, fully=True, partly=False):<EOL><INDENT>return []<EOL><DEDENT>h, w = image.shape[<NUM_LIT:0>:<NUM_LIT:2>] if ia.is_np_array(image) else image[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>poly_shapely = self.to_shapely_polygon()<EOL>poly_image = shapely.geometry.Polygon([(<NUM_LIT:0>, <NUM_LIT:0>), (w, <NUM_LIT:0>), (w, h), (<NUM_LIT:0>, h)])<EOL>multipoly_inter_shapely = poly_shapely.intersection(poly_image)<EOL>if not isinstance(multipoly_inter_shapely, shapely.geometry.MultiPolygon):<EOL><INDENT>ia.do_assert(isinstance(multipoly_inter_shapely, shapely.geometry.Polygon))<EOL>multipoly_inter_shapely = shapely.geometry.MultiPolygon([multipoly_inter_shapely])<EOL><DEDENT>polygons = []<EOL>for poly_inter_shapely in multipoly_inter_shapely.geoms:<EOL><INDENT>polygons.append(Polygon.from_shapely(poly_inter_shapely, label=self.label))<EOL><DEDENT>polygons_reordered = []<EOL>for polygon in polygons:<EOL><INDENT>found = False<EOL>for x, y in self.exterior:<EOL><INDENT>closest_idx, dist = polygon.find_closest_point_index(x=x, y=y, return_distance=True)<EOL>if dist < <NUM_LIT>:<EOL><INDENT>polygon_reordered = polygon.change_first_point_by_index(closest_idx)<EOL>polygons_reordered.append(polygon_reordered)<EOL>found = True<EOL>break<EOL><DEDENT><DEDENT>ia.do_assert(found)  <EOL><DEDENT>return polygons_reordered<EOL>", "docstring": "Cut off all parts of the polygon that are outside of the image.\n\nThis operation may lead to new points being created.\nAs a single polygon may be split into multiple new polygons, the result\nis always a list, which may contain more than one output polygon.\n\nThis operation will return an empty list if the polygon is completely\noutside of the image plane.\n\nParameters\n----------\nimage : (H,W,...) ndarray or tuple of int\n    Image dimensions to use for the clipping of the polygon.\n    If an ndarray, its shape will be used.\n    If a tuple, it is assumed to represent the image shape and must\n    contain at least two integers.\n\nReturns\n-------\nlist of imgaug.Polygon\n    Polygon, clipped to fall within the image dimensions.\n    Returned as a list, because the clipping can split the polygon into\n    multiple parts. The list may also be empty, if the polygon was\n    fully outside of the image plane.", "id": "f16281:c0:m15"}
{"signature": "@property<EOL><INDENT>def yy_int(self):<DEDENT>", "body": "return np.int32(np.round(self.yy))<EOL>", "docstring": "Return the y-coordinates of all points in the exterior, rounded to the closest integer value.\n\nReturns\n-------\n(N,2) ndarray\n    Y-coordinates of all points in the exterior, rounded to the closest integer value.\n    Result dtype is int32.", "id": "f16281:c0:m4"}
{"signature": "def to_keypoints(self):", "body": "<EOL>from imgaug.augmentables.kps import Keypoint<EOL>return [Keypoint(x=point[<NUM_LIT:0>], y=point[<NUM_LIT:1>]) for point in self.exterior]<EOL>", "docstring": "Convert this polygon's `exterior` to ``Keypoint`` instances.\n\nReturns\n-------\nlist of imgaug.Keypoint\n    Exterior vertices as ``Keypoint`` instances.", "id": "f16281:c0:m24"}
{"signature": "def is_fully_within_image(self, image):", "body": "return not self.is_out_of_image(image, fully=True, partly=True)<EOL>", "docstring": "Estimate whether the polygon is fully inside the image area.\n\nParameters\n----------\nimage : (H,W,...) ndarray or tuple of int\n    Image dimensions to use.\n    If an ndarray, its shape will be used.\n    If a tuple, it is assumed to represent the image shape and must contain at least two integers.\n\nReturns\n-------\nbool\n    True if the polygon is fully inside the image area.\n    False otherwise.", "id": "f16281:c0:m11"}
{"signature": "def __init__(self, exterior, label=None):", "body": "<EOL>from imgaug.augmentables.kps import Keypoint<EOL>if isinstance(exterior, list):<EOL><INDENT>if not exterior:<EOL><INDENT>self.exterior = np.zeros((<NUM_LIT:0>, <NUM_LIT:2>), dtype=np.float32)<EOL><DEDENT>elif isinstance(exterior[<NUM_LIT:0>], Keypoint):<EOL><INDENT>self.exterior = np.float32([[point.x, point.y] for point in exterior])<EOL><DEDENT>else:<EOL><INDENT>self.exterior = np.float32([[point[<NUM_LIT:0>], point[<NUM_LIT:1>]] for point in exterior])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ia.do_assert(ia.is_np_array(exterior),<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (exterior,))<EOL>ia.do_assert(exterior.ndim == <NUM_LIT:2> and exterior.shape[<NUM_LIT:1>] == <NUM_LIT:2>,<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (<EOL>exterior.shape,))<EOL>self.exterior = np.float32(exterior)<EOL><DEDENT>if len(self.exterior) >= <NUM_LIT:2> and np.allclose(self.exterior[<NUM_LIT:0>, :], self.exterior[-<NUM_LIT:1>, :]):<EOL><INDENT>self.exterior = self.exterior[:-<NUM_LIT:1>]<EOL><DEDENT>self.label = label<EOL>", "docstring": "Create a new Polygon instance.", "id": "f16281:c0:m0"}
{"signature": "def resize(self, sizes, interpolation=\"<STR_LIT>\"):", "body": "arr_resized = ia.imresize_single_image(self.arr, sizes, interpolation=interpolation)<EOL>arr_resized = np.clip(arr_resized, <NUM_LIT:0.0>, <NUM_LIT:1.0>)<EOL>segmap = SegmentationMapOnImage(arr_resized, shape=self.shape)<EOL>segmap.input_was = self.input_was<EOL>return segmap<EOL>", "docstring": "Resize the segmentation map array to the provided size given the provided interpolation.\n\nParameters\n----------\nsizes : float or iterable of int or iterable of float\n    New size of the array in ``(height, width)``.\n    See :func:`imgaug.imgaug.imresize_single_image` for details.\n\ninterpolation : None or str or int, optional\n    The interpolation to use during resize.\n    See :func:`imgaug.imgaug.imresize_single_image` for details.\n    Note: The segmentation map is internally stored as multiple float-based heatmaps,\n    making smooth interpolations potentially more reasonable than nearest neighbour\n    interpolation.\n\nReturns\n-------\nsegmap : imgaug.SegmentationMapOnImage\n    Resized segmentation map object.", "id": "f16283:c0:m7"}
{"signature": "def get_arr_int(self, background_threshold=<NUM_LIT>, background_class_id=None):", "body": "if self.input_was[<NUM_LIT:0>] in [\"<STR_LIT:bool>\", \"<STR_LIT:float>\"]:<EOL><INDENT>ia.do_assert(background_class_id is None,<EOL>\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\")<EOL><DEDENT>if background_class_id is None:<EOL><INDENT>background_class_id = <NUM_LIT:0><EOL><DEDENT>channelwise_max_idx = np.argmax(self.arr, axis=<NUM_LIT:2>)<EOL>if self.input_was[<NUM_LIT:0>] in [\"<STR_LIT:bool>\", \"<STR_LIT:float>\"]:<EOL><INDENT>result = <NUM_LIT:1> + channelwise_max_idx<EOL><DEDENT>else:  <EOL><INDENT>result = channelwise_max_idx<EOL><DEDENT>if background_threshold is not None and background_threshold > <NUM_LIT:0>:<EOL><INDENT>probs = np.amax(self.arr, axis=<NUM_LIT:2>)<EOL>result[probs < background_threshold] = background_class_id<EOL><DEDENT>return result.astype(np.int32)<EOL>", "docstring": "Get the segmentation map array as an integer array of shape (H, W).\n\nEach pixel in that array contains an integer value representing the pixel's class.\nIf multiple classes overlap, the one with the highest local float value is picked.\nIf that highest local value is below `background_threshold`, the method instead uses\nthe background class id as the pixel's class value.\nBy default, class id 0 is the background class. This may only be changed if the original\ninput to the segmentation map object was an integer map.\n\nParameters\n----------\nbackground_threshold : float, optional\n    At each pixel, each class-heatmap has a value between 0.0 and 1.0. If none of the\n    class-heatmaps has a value above this threshold, the method uses the background class\n    id instead.\n\nbackground_class_id : None or int, optional\n    Class id to fall back to if no class-heatmap passes the threshold at a spatial\n    location. May only be provided if the original input was an integer mask and in these\n    cases defaults to 0. If the input were float or boolean masks, the background class id\n    may not be set as it is assumed that the background is implicitly defined\n    as 'any spatial location that has zero-like values in all masks'.\n\nReturns\n-------\nresult : (H,W) ndarray\n    Segmentation map array (int32).\n    If the original input consisted of boolean or float masks, then the highest possible\n    class id is ``1+C``, where ``C`` is the number of provided float/boolean masks. The value\n    ``0`` in the integer mask then denotes the background class.", "id": "f16283:c0:m1"}
{"signature": "def pad(self, top=<NUM_LIT:0>, right=<NUM_LIT:0>, bottom=<NUM_LIT:0>, left=<NUM_LIT:0>, mode=\"<STR_LIT>\", cval=<NUM_LIT:0.0>):", "body": "arr_padded = ia.pad(self.arr, top=top, right=right, bottom=bottom, left=left, mode=mode, cval=cval)<EOL>segmap = SegmentationMapOnImage(arr_padded, shape=self.shape)<EOL>segmap.input_was = self.input_was<EOL>return segmap<EOL>", "docstring": "Pad the segmentation map on its top/right/bottom/left side.\n\nParameters\n----------\ntop : int, optional\n    Amount of pixels to add at the top side of the segmentation map. Must be 0 or greater.\n\nright : int, optional\n    Amount of pixels to add at the right side of the segmentation map. Must be 0 or greater.\n\nbottom : int, optional\n    Amount of pixels to add at the bottom side of the segmentation map. Must be 0 or greater.\n\nleft : int, optional\n    Amount of pixels to add at the left side of the segmentation map. Must be 0 or greater.\n\nmode : str, optional\n    Padding mode to use. See :func:`numpy.pad` for details.\n\ncval : number, optional\n    Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.\n\nReturns\n-------\nsegmap : imgaug.SegmentationMapOnImage\n    Padded segmentation map of height ``H'=H+top+bottom`` and width ``W'=W+left+right``.", "id": "f16283:c0:m4"}
{"signature": "def deepcopy(self):", "body": "segmap = SegmentationMapOnImage(self.arr, shape=self.shape, nb_classes=self.nb_classes)<EOL>segmap.input_was = self.input_was<EOL>return segmap<EOL>", "docstring": "Create a deep copy of the segmentation map object.\n\nReturns\n-------\nimgaug.SegmentationMapOnImage\n    Deep copy.", "id": "f16283:c0:m11"}
{"signature": "def to_heatmaps(self, only_nonempty=False, not_none_if_no_nonempty=False):", "body": "<EOL>from imgaug.augmentables.heatmaps import HeatmapsOnImage<EOL>if not only_nonempty:<EOL><INDENT>return HeatmapsOnImage.from_0to1(self.arr, self.shape, min_value=<NUM_LIT:0.0>, max_value=<NUM_LIT:1.0>)<EOL><DEDENT>else:<EOL><INDENT>nonempty_mask = np.sum(self.arr, axis=(<NUM_LIT:0>, <NUM_LIT:1>)) > <NUM_LIT:0> + <NUM_LIT><EOL>if np.sum(nonempty_mask) == <NUM_LIT:0>:<EOL><INDENT>if not_none_if_no_nonempty:<EOL><INDENT>nonempty_mask[<NUM_LIT:0>] = True<EOL><DEDENT>else:<EOL><INDENT>return None, []<EOL><DEDENT><DEDENT>class_indices = np.arange(self.arr.shape[<NUM_LIT:2>])[nonempty_mask]<EOL>channels = self.arr[..., class_indices]<EOL>return HeatmapsOnImage(channels, self.shape, min_value=<NUM_LIT:0.0>, max_value=<NUM_LIT:1.0>), class_indices<EOL><DEDENT>", "docstring": "Convert segmentation map to heatmaps object.\n\nEach segmentation map class will be represented as a single heatmap channel.\n\nParameters\n----------\nonly_nonempty : bool, optional\n    If True, then only heatmaps for classes that appear in the segmentation map will be\n    generated. Additionally, a list of these class ids will be returned.\n\nnot_none_if_no_nonempty : bool, optional\n    If `only_nonempty` is True and for a segmentation map no channel was non-empty,\n    this function usually returns None as the heatmaps object. If however this parameter\n    is set to True, a heatmaps object with one channel (representing class 0)\n    will be returned as a fallback in these cases.\n\nReturns\n-------\nimgaug.HeatmapsOnImage or None\n    Segmentation map as a heatmaps object.\n    If `only_nonempty` was set to True and no class appeared in the segmentation map,\n    then this is None.\n\nclass_indices : list of int\n    Class ids (0 to C-1) of the classes that were actually added to the heatmaps.\n    Only returned if `only_nonempty` was set to True.", "id": "f16283:c0:m8"}
{"signature": "def Clouds(name=None, deterministic=False, random_state=None):", "body": "if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return meta.SomeOf((<NUM_LIT:1>, <NUM_LIT:2>), children=[<EOL>CloudLayer(<EOL>intensity_mean=(<NUM_LIT>, <NUM_LIT:255>), intensity_freq_exponent=(-<NUM_LIT>, -<NUM_LIT>), intensity_coarse_scale=<NUM_LIT:10>,<EOL>alpha_min=<NUM_LIT:0>, alpha_multiplier=(<NUM_LIT>, <NUM_LIT>), alpha_size_px_max=(<NUM_LIT:2>, <NUM_LIT:8>), alpha_freq_exponent=(-<NUM_LIT>, -<NUM_LIT>),<EOL>sparsity=(<NUM_LIT>, <NUM_LIT:1.0>), density_multiplier=(<NUM_LIT:0.5>, <NUM_LIT:1.0>)<EOL>),<EOL>CloudLayer(<EOL>intensity_mean=(<NUM_LIT>, <NUM_LIT:255>), intensity_freq_exponent=(-<NUM_LIT>, -<NUM_LIT:1.0>), intensity_coarse_scale=<NUM_LIT:10>,<EOL>alpha_min=<NUM_LIT:0>, alpha_multiplier=(<NUM_LIT:0.5>, <NUM_LIT:1.0>), alpha_size_px_max=(<NUM_LIT:64>, <NUM_LIT>), alpha_freq_exponent=(-<NUM_LIT>, -<NUM_LIT:1.0>),<EOL>sparsity=(<NUM_LIT:1.0>, <NUM_LIT>), density_multiplier=(<NUM_LIT>, <NUM_LIT>)<EOL>)<EOL>], random_order=False, name=name, deterministic=deterministic, random_state=random_state)<EOL>", "docstring": "Augmenter to draw clouds in images.\n\nThis is a wrapper around ``CloudLayer``. It executes 1 to 2 layers per image, leading to varying densities\nand frequency patterns of clouds.\n\nThis augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``\nand ``960x1280``.\n\ndtype support::\n\n    * ``uint8``: yes; tested\n    * ``uint16``: no (1)\n    * ``uint32``: no (1)\n    * ``uint64``: no (1)\n    * ``int8``: no (1)\n    * ``int16``: no (1)\n    * ``int32``: no (1)\n    * ``int64``: no (1)\n    * ``float16``: no (1)\n    * ``float32``: no (1)\n    * ``float64``: no (1)\n    * ``float128``: no (1)\n    * ``bool``: no (1)\n\n    - (1) Parameters of this augmenter are optimized for the value range of uint8.\n          While other dtypes may be accepted, they will lead to images augmented in\n          ways inappropriate for the respective dtype.\n\nParameters\n----------\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.Clouds()\n\nCreates an augmenter that adds clouds to images.", "id": "f16284:m0"}
{"signature": "@classmethod<EOL><INDENT>def map_coordinates(cls, image, dx, dy, order=<NUM_LIT:1>, cval=<NUM_LIT:0>, mode=\"<STR_LIT>\"):<DEDENT>", "body": "if order == <NUM_LIT:0> and image.dtype.name in [\"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>raise Exception((\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (order, image.dtype.name))<EOL><DEDENT>input_dtype = image.dtype<EOL>if image.dtype.name == \"<STR_LIT:bool>\":<EOL><INDENT>image = image.astype(np.float32)<EOL><DEDENT>elif order == <NUM_LIT:1> and image.dtype.name in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>image = image.astype(np.float64)<EOL><DEDENT>elif order >= <NUM_LIT:2> and image.dtype.name == \"<STR_LIT>\":<EOL><INDENT>image = image.astype(np.int16)<EOL><DEDENT>elif order >= <NUM_LIT:2> and image.dtype.name == \"<STR_LIT>\":<EOL><INDENT>image = image.astype(np.float64)<EOL><DEDENT>shrt_max = <NUM_LIT><EOL>backend = \"<STR_LIT>\"<EOL>if order == <NUM_LIT:0>:<EOL><INDENT>bad_dtype_cv2 = (image.dtype.name in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:bool>\"])<EOL><DEDENT>elif order == <NUM_LIT:1>:<EOL><INDENT>bad_dtype_cv2 = (image.dtype.name in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT:bool>\"])<EOL><DEDENT>else:<EOL><INDENT>bad_dtype_cv2 = (image.dtype.name in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:bool>\"])<EOL><DEDENT>bad_dx_shape_cv2 = (dx.shape[<NUM_LIT:0>] >= shrt_max or dx.shape[<NUM_LIT:1>] >= shrt_max)<EOL>bad_dy_shape_cv2 = (dy.shape[<NUM_LIT:0>] >= shrt_max or dy.shape[<NUM_LIT:1>] >= shrt_max)<EOL>if bad_dtype_cv2 or bad_dx_shape_cv2 or bad_dy_shape_cv2:<EOL><INDENT>backend = \"<STR_LIT>\"<EOL><DEDENT>ia.do_assert(image.ndim == <NUM_LIT:3>)<EOL>result = np.copy(image)<EOL>height, width = image.shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>if backend == \"<STR_LIT>\":<EOL><INDENT>h, w = image.shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>y, x = np.meshgrid(np.arange(h).astype(np.float32), np.arange(w).astype(np.float32), indexing='<STR_LIT>')<EOL>x_shifted = x + (-<NUM_LIT:1>) * dx<EOL>y_shifted = y + (-<NUM_LIT:1>) * dy<EOL>for c in sm.xrange(image.shape[<NUM_LIT:2>]):<EOL><INDENT>remapped_flat = ndimage.interpolation.map_coordinates(<EOL>image[..., c],<EOL>(y_shifted.flatten(), x_shifted.flatten()),<EOL>order=order,<EOL>cval=cval,<EOL>mode=mode<EOL>)<EOL>remapped = remapped_flat.reshape((height, width))<EOL>result[..., c] = remapped<EOL><DEDENT><DEDENT>else:<EOL><INDENT>h, w, nb_channels = image.shape<EOL>y, x = np.meshgrid(np.arange(h).astype(np.float32), np.arange(w).astype(np.float32), indexing='<STR_LIT>')<EOL>x_shifted = x + (-<NUM_LIT:1>) * dx<EOL>y_shifted = y + (-<NUM_LIT:1>) * dy<EOL>if image.dtype.kind == \"<STR_LIT:f>\":<EOL><INDENT>cval = float(cval)<EOL><DEDENT>else:<EOL><INDENT>cval = int(cval)<EOL><DEDENT>border_mode = cls._MAPPING_MODE_SCIPY_CV2[mode]<EOL>interpolation = cls._MAPPING_ORDER_SCIPY_CV2[order]<EOL>is_nearest_neighbour = (interpolation == cv2.INTER_NEAREST)<EOL>map1, map2 = cv2.convertMaps(x_shifted, y_shifted, cv2.CV_16SC2, nninterpolation=is_nearest_neighbour)<EOL>if nb_channels <= <NUM_LIT:4>:<EOL><INDENT>result = cv2.remap(image, map1, map2, interpolation=interpolation, borderMode=border_mode, borderValue=cval)<EOL>if image.ndim == <NUM_LIT:3> and result.ndim == <NUM_LIT:2>:<EOL><INDENT>result = result[..., np.newaxis]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>current_chan_idx = <NUM_LIT:0><EOL>result = []<EOL>while current_chan_idx < nb_channels:<EOL><INDENT>channels = image[..., current_chan_idx:current_chan_idx+<NUM_LIT:4>]<EOL>result_c =  cv2.remap(channels, map1, map2, interpolation=interpolation, borderMode=border_mode,<EOL>borderValue=cval)<EOL>if result_c.ndim == <NUM_LIT:2>:<EOL><INDENT>result_c = result_c[..., np.newaxis]<EOL><DEDENT>result.append(result_c)<EOL>current_chan_idx += <NUM_LIT:4><EOL><DEDENT>result = np.concatenate(result, axis=<NUM_LIT:2>)<EOL><DEDENT><DEDENT>if result.dtype.name != input_dtype.name:<EOL><INDENT>result = iadt.restore_dtypes_(result, input_dtype)<EOL><DEDENT>return result<EOL>", "docstring": "backend=\"scipy\"\n\n    order=0\n        * uint8: yes\n        * uint16: yes\n        * uint32: yes\n        * uint64: no (produces array filled with only 0)\n        * int8: yes\n        * int16: yes\n        * int32: yes\n        * int64: no (produces array filled with <min_value> when testing with <max_value>)\n        * float16: yes\n        * float32: yes\n        * float64: yes\n        * float128: no (causes: 'data type no supported')\n        * bool: yes\n\n    order=1 to 5\n        * uint*, int*: yes (rather loose test, to avoid having to re-compute the interpolation)\n        * float16 - float64: yes (rather loose test, to avoid having to re-compute the interpolation)\n        * float128: no (causes: 'data type no supported')\n        * bool: yes\n\nbackend=\"cv2\"\n\n    order=0\n        * uint8: yes\n        * uint16: yes\n        * uint32: no (causes: src data type = 6 is not supported)\n        * uint64: no (silently converts to int32)\n        * int8: yes\n        * int16: yes\n        * int32: yes\n        * int64: no (silently converts to int32)\n        * float16: yes\n        * float32: yes\n        * float64: yes\n        * float128: no (causes: src data type = 13 is not supported)\n        * bool: no (causes: src data type = 0 is not supported)\n\n    order=1\n        * uint8: yes\n        * uint16: yes\n        * uint32: no (causes: src data type = 6 is not supported)\n        * uint64: no (causes: OpenCV(3.4.5) (...)/imgwarp.cpp:1805: error: (-215:Assertion failed) ifunc != 0\n          in function 'remap')\n        * int8: no (causes: OpenCV(3.4.5) (...)/imgwarp.cpp:1805: error: (-215:Assertion failed) ifunc != 0\n          in function 'remap')\n        * int16: no (causes: OpenCV(3.4.5) (...)/imgwarp.cpp:1805: error: (-215:Assertion failed) ifunc != 0\n          in function 'remap')\n        * int32: no (causes: OpenCV(3.4.5) (...)/imgwarp.cpp:1805: error: (-215:Assertion failed) ifunc != 0\n          in function 'remap')\n        * int64: no (causes: OpenCV(3.4.5) (...)/imgwarp.cpp:1805: error: (-215:Assertion failed) ifunc != 0\n          in function 'remap')\n        * float16: yes\n        * float32: yes\n        * float64: yes\n        * float128: no (causes: src data type = 13 is not supported)\n        * bool: no (causes: src data type = 0 is not supported)\n\n    order=2 to 5:\n        as order=1, but int16 supported", "id": "f16285:c4:m8"}
{"signature": "def localize_random_state_(self, recursive=True):", "body": "if self.random_state == ia.current_random_state():<EOL><INDENT>self.random_state = ia.new_random_state()<EOL><DEDENT>if recursive:<EOL><INDENT>for lst in self.get_children_lists():<EOL><INDENT>for child in lst:<EOL><INDENT>child.localize_random_state_(recursive=recursive)<EOL><DEDENT><DEDENT><DEDENT>return self<EOL>", "docstring": "Converts global random states to local ones.\n\nA global random state exists exactly once. Many augmenters can point\nto it (and thereby use it to sample random numbers).\nLocal random states usually exist for exactly one augmenter and are\nsaved within that augmenter.\n\nUsually there is no need to change global into local random states.\nThe only noteworthy exceptions are\n\n    * whenever you want to use determinism (so that the global random\n      state is not accidentally reverted)\n    * whenever you want to copy random states from one augmenter to\n      another. (Copying the global random state doesn't help very\n      much. If you copy the state from A to B, then execute A and then\n      B, B's (global) random state has already changed because of A's\n      sampling.)\n\nThe case of determinism is handled automatically by\n:func:`imgaug.augmenters.meta.Augmenter.to_deterministic`.\nOnly when you copy random states (via :func:`imgaug.augmenters.meta.Augmenter.copy_random_state`),\nyou need to call this function first.\n\nParameters\n----------\nrecursive : bool, optional\n    Whether to localize the random states of children too.\n\nReturns\n-------\nself : imgaug.augmenters.meta.Augmenter\n    Returns itself (with localized random states).", "id": "f16288:c0:m28"}
{"signature": "def remove_augmenters_inplace(self, func, parents=None):", "body": "parents = [] if parents is None else parents<EOL>subparents = parents + [self]<EOL>for lst in self.get_children_lists():<EOL><INDENT>to_remove = []<EOL>for i, aug in enumerate(lst):<EOL><INDENT>if func(aug, subparents):<EOL><INDENT>to_remove.append((i, aug))<EOL><DEDENT><DEDENT>for count_removed, (i, aug) in enumerate(to_remove):<EOL><INDENT>del lst[i - count_removed]<EOL><DEDENT>for aug in lst:<EOL><INDENT>aug.remove_augmenters_inplace(func, subparents)<EOL><DEDENT><DEDENT>", "docstring": "Remove in-place children of this augmenter that match a condition.\n\nThis is functionally identical to ``remove_augmenters()`` with\n``copy=False``, except that it does not affect the topmost augmenter\n(the one on which this function is initially called on).\n\nParameters\n----------\nfunc : callable\n    See :func:`imgaug.augmenters.meta.Augmenter.remove_augmenters`.\n\nparents : None or list of imgaug.augmenters.meta.Augmenter, optional\n    List of parent Augmenter instances that lead to this\n    Augmenter. If None, an empty list will be used.\n    This parameter can usually be left empty and will be set\n    automatically for children.\n\nExamples\n--------\n>>> seq = iaa.Sequential([\n>>>     iaa.Fliplr(0.5, name=\"fliplr\"),\n>>>    iaa.Flipud(0.5, name=\"flipud\"),\n>>> ])\n>>> seq.remove_augmenters_inplace(lambda a, parents: a.name == \"fliplr\")\n\nThis removes the augmenter Fliplr from the Sequential object's children.", "id": "f16288:c0:m38"}
{"signature": "def augment_batches(self, batches, hooks=None, background=False):", "body": "if isinstance(batches, (Batch, UnnormalizedBatch)):<EOL><INDENT>batches = [batches]<EOL><DEDENT>ia.do_assert(<EOL>(ia.is_iterable(batches)<EOL>and not ia.is_np_array(batches)<EOL>and not ia.is_string(batches))<EOL>or ia.is_generator(batches),<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (type(batches),))<EOL>if background:<EOL><INDENT>ia.do_assert(<EOL>hooks is None,<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>def _normalize_batch(idx, batch):<EOL><INDENT>if isinstance(batch, Batch):<EOL><INDENT>batch_copy = batch.deepcopy()<EOL>batch_copy.data = (idx, batch_copy.data)<EOL>batch_normalized = batch_copy<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>elif isinstance(batch, UnnormalizedBatch):<EOL><INDENT>batch_copy = batch.to_normalized_batch()<EOL>batch_copy.data = (idx, batch_copy.data)<EOL>batch_normalized = batch_copy<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>elif ia.is_np_array(batch):<EOL><INDENT>ia.do_assert(<EOL>batch.ndim in (<NUM_LIT:3>, <NUM_LIT:4>),<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (batch.shape,))<EOL>batch_normalized = Batch(images=batch, data=(idx,))<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>elif isinstance(batch, list):<EOL><INDENT>if len(batch) == <NUM_LIT:0>:<EOL><INDENT>batch_normalized = Batch(data=(idx,))<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>elif ia.is_np_array(batch[<NUM_LIT:0>]):<EOL><INDENT>batch_normalized = Batch(images=batch, data=(idx,))<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>elif isinstance(batch[<NUM_LIT:0>], ia.HeatmapsOnImage):<EOL><INDENT>batch_normalized = Batch(heatmaps=batch, data=(idx,))<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>elif isinstance(batch[<NUM_LIT:0>], ia.SegmentationMapOnImage):<EOL><INDENT>batch_normalized = Batch(segmentation_maps=batch,<EOL>data=(idx,))<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>elif isinstance(batch[<NUM_LIT:0>], ia.KeypointsOnImage):<EOL><INDENT>batch_normalized = Batch(keypoints=batch, data=(idx,))<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>elif isinstance(batch[<NUM_LIT:0>], ia.BoundingBoxesOnImage):<EOL><INDENT>batch_normalized = Batch(bounding_boxes=batch, data=(idx,))<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>elif isinstance(batch[<NUM_LIT:0>], ia.PolygonsOnImage):<EOL><INDENT>batch_normalized = Batch(polygons=batch, data=(idx,))<EOL>batch_orig_dt = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>raise Exception(<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (type(batch[<NUM_LIT:0>]),))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception(<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (type(batch),))<EOL><DEDENT>if batch_orig_dt not in [\"<STR_LIT>\",<EOL>\"<STR_LIT>\"]:<EOL><INDENT>ia.warn_deprecated(<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (<EOL>batch_orig_dt,))<EOL><DEDENT>return batch_normalized, batch_orig_dt<EOL><DEDENT>def _unnormalize_batch(batch_aug, batch_orig, batch_orig_dt):<EOL><INDENT>if batch_orig_dt == \"<STR_LIT>\":<EOL><INDENT>batch_unnormalized = batch_aug<EOL>batch_unnormalized.data = batch_unnormalized.data[<NUM_LIT:1>]<EOL><DEDENT>elif batch_orig_dt == \"<STR_LIT>\":<EOL><INDENT>batch_aug.data = batch_aug.data[<NUM_LIT:1>]<EOL>batch_unnormalized =batch_orig.fill_from_augmented_normalized_batch(batch_aug)<EOL><DEDENT>elif batch_orig_dt == \"<STR_LIT>\":<EOL><INDENT>batch_unnormalized = batch_aug.images_aug<EOL><DEDENT>elif batch_orig_dt == \"<STR_LIT>\":<EOL><INDENT>batch_unnormalized = []<EOL><DEDENT>elif batch_orig_dt == \"<STR_LIT>\":<EOL><INDENT>batch_unnormalized = batch_aug.images_aug<EOL><DEDENT>elif batch_orig_dt == \"<STR_LIT>\":<EOL><INDENT>batch_unnormalized = batch_aug.heatmaps_aug<EOL><DEDENT>elif batch_orig_dt == \"<STR_LIT>\":<EOL><INDENT>batch_unnormalized = batch_aug.segmentation_maps_aug<EOL><DEDENT>elif batch_orig_dt == \"<STR_LIT>\":<EOL><INDENT>batch_unnormalized = batch_aug.keypoints_aug<EOL><DEDENT>elif batch_orig_dt == \"<STR_LIT>\":<EOL><INDENT>batch_unnormalized = batch_aug.bounding_boxes_aug<EOL><DEDENT>else:  <EOL><INDENT>ia.do_assert(batch_orig_dt == \"<STR_LIT>\")<EOL>batch_unnormalized = batch_aug.polygons_aug<EOL><DEDENT>return batch_unnormalized<EOL><DEDENT>if not background:<EOL><INDENT>for idx, batch in enumerate(batches):<EOL><INDENT>batch_normalized, batch_orig_dt = _normalize_batch(idx, batch)<EOL>batch_normalized = self.augment_batch(<EOL>batch_normalized, hooks=hooks)<EOL>batch_unnormalized = _unnormalize_batch(<EOL>batch_normalized, batch, batch_orig_dt)<EOL>yield batch_unnormalized<EOL><DEDENT><DEDENT>else:<EOL><INDENT>import imgaug.multicore as multicore<EOL>id_to_batch_orig = dict()<EOL>def load_batches():<EOL><INDENT>for idx, batch in enumerate(batches):<EOL><INDENT>batch_normalized, batch_orig_dt = _normalize_batch(<EOL>idx, batch)<EOL>id_to_batch_orig[idx] = (batch, batch_orig_dt)<EOL>yield batch_normalized<EOL><DEDENT><DEDENT>with multicore.Pool(self) as pool:<EOL><INDENT>for batch_aug in pool.imap_batches(load_batches()):<EOL><INDENT>idx = batch_aug.data[<NUM_LIT:0>]<EOL>assert idx in id_to_batch_orig<EOL>batch_orig, batch_orig_dt = id_to_batch_orig[idx]<EOL>batch_unnormalized = _unnormalize_batch(<EOL>batch_aug, batch_orig, batch_orig_dt)<EOL>del id_to_batch_orig[idx]<EOL>yield batch_unnormalized<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Augment multiple batches.\n\nIn contrast to other augment functions, this function _yields_ batches\ninstead of just returning a full list. This is more suited for most\ntraining loops. It also supports augmentation on multiple cpu cores,\nactivated via the `background` flag.\n\nParameters\n----------\nbatches : imgaug.augmentables.batches.Batch \\\n          or imgaug.augmentables.batches.UnnormalizedBatch \\\n          or iterable of imgaug.augmentables.batches.Batch \\\n          or iterable of imgaug.augmentables.batches.UnnormalizedBatch\n    A single batch or a list of batches to augment.\n\nhooks : None or imgaug.HooksImages, optional\n    HooksImages object to dynamically interfere with the augmentation\n    process.\n\nbackground : bool, optional\n    Whether to augment the batches in background processes.\n    If true, hooks can currently not be used as that would require\n    pickling functions.\n    Note that multicore augmentation distributes the batches onto\n    different CPU cores. It does not split the data within batches.\n    It is therefore not sensible to use ``background=True`` for a\n    single batch.\n    Note also that multicore augmentation needs some time to start. It\n    is therefore not recommended to use it for very few batches.\n\nYields\n-------\nimgaug.augmentables.batches.Batch \\\n          or imgaug.augmentables.batches.UnnormalizedBatch \\\n          or iterable of imgaug.augmentables.batches.Batch \\\n          or iterable of imgaug.augmentables.batches.UnnormalizedBatch\n    Augmented batches.", "id": "f16288:c0:m1"}
{"signature": "def find_augmenters_by_names(self, names, regex=False, flat=True):", "body": "if regex:<EOL><INDENT>def comparer(aug, parents):<EOL><INDENT>for pattern in names:<EOL><INDENT>if re.match(pattern, aug.name):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL><DEDENT>return self.find_augmenters(comparer, flat=flat)<EOL><DEDENT>else:<EOL><INDENT>return self.find_augmenters(lambda aug, parents: aug.name in names, flat=flat)<EOL><DEDENT>", "docstring": "Find augmenter(s) by names.\n\nParameters\n----------\nnames : list of str\n    Names of the augmenter(s) to search for.\n\nregex : bool, optional\n    Whether `names` is a list of regular expressions.\n\nflat : boolean, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.find_augmenters`.\n\nReturns\n-------\naugmenters : list of imgaug.augmenters.meta.Augmenter\n    Nested list if flat was set to False.\n    Flat list if flat was set to True.", "id": "f16288:c0:m36"}
{"signature": "def augment_segmentation_maps(self, segmaps, parents=None, hooks=None):", "body": "input_was_single_instance = False<EOL>if isinstance(segmaps, ia.SegmentationMapOnImage):<EOL><INDENT>input_was_single_instance = True<EOL>segmaps = [segmaps]<EOL><DEDENT>heatmaps_with_nonempty = [segmap.to_heatmaps(only_nonempty=True, not_none_if_no_nonempty=True)<EOL>for segmap in segmaps]<EOL>heatmaps = [heatmaps_i for heatmaps_i, nonempty_class_indices_i in heatmaps_with_nonempty]<EOL>nonempty_class_indices = [nonempty_class_indices_i<EOL>for heatmaps_i, nonempty_class_indices_i in heatmaps_with_nonempty]<EOL>heatmaps_aug = self.augment_heatmaps(heatmaps, parents=parents, hooks=hooks)<EOL>segmaps_aug = []<EOL>for segmap, heatmaps_aug_i, nonempty_class_indices_i in zip(segmaps, heatmaps_aug, nonempty_class_indices):<EOL><INDENT>segmap_aug = ia.SegmentationMapOnImage.from_heatmaps(heatmaps_aug_i,<EOL>class_indices=nonempty_class_indices_i,<EOL>nb_classes=segmap.nb_classes)<EOL>segmap_aug.input_was = segmap.input_was<EOL>segmaps_aug.append(segmap_aug)<EOL><DEDENT>if input_was_single_instance:<EOL><INDENT>return segmaps_aug[<NUM_LIT:0>]<EOL><DEDENT>return segmaps_aug<EOL>", "docstring": "Augment segmentation maps.\n\nParameters\n----------\nsegmaps : imgaug.SegmentationMapOnImage or \\\n          list of imgaug.SegmentationMapOnImage\n    Segmentation map(s) to augment. Either a single heatmap or a list of\n    segmentation maps.\n\nparents : None or list of imgaug.augmenters.meta.Augmenter, optional\n    Parent augmenters that have previously been called before the\n    call to this function. Usually you can leave this parameter as None.\n    It is set automatically for child augmenters.\n\nhooks : None or imgaug.HooksHeatmaps, optional\n    HooksHeatmaps object to dynamically interfere with the augmentation process.\n\nReturns\n-------\nsegmaps_aug : imgaug.SegmentationMapOnImage or \\\n              list of imgaug.SegmentationMapOnImage\n    Corresponding augmented segmentation map(s).", "id": "f16288:c0:m9"}
{"signature": "def _augment_polygons_as_keypoints(self, polygons_on_images, random_state,<EOL>parents, hooks, recoverer=None):", "body": "kps_ois = []<EOL>kp_counts = []<EOL>for polys_oi in polygons_on_images:<EOL><INDENT>kps = []<EOL>kp_counts_image = []<EOL>for poly in polys_oi.polygons:<EOL><INDENT>poly_kps = poly.to_keypoints()<EOL>kps.extend(poly_kps)<EOL>kp_counts_image.append(len(poly_kps))<EOL><DEDENT>kps_ois.append(ia.KeypointsOnImage(kps, shape=polys_oi.shape))<EOL>kp_counts.append(kp_counts_image)<EOL><DEDENT>kps_ois_aug = self._augment_keypoints(kps_ois, random_state, parents, hooks)<EOL>result = []<EOL>gen = enumerate(zip(kps_ois_aug, kp_counts))<EOL>for img_idx, (kps_oi_aug, kp_counts_image) in gen:<EOL><INDENT>polys_aug = []<EOL>counter = <NUM_LIT:0><EOL>for i, count in enumerate(kp_counts_image):<EOL><INDENT>poly_kps_aug = kps_oi_aug.keypoints[counter:counter+count]<EOL>poly_old = polygons_on_images[img_idx].polygons[i]<EOL>if recoverer is not None:<EOL><INDENT>poly_aug = recoverer.recover_from(<EOL>[(kp.x, kp.y) for kp in poly_kps_aug],<EOL>poly_old,<EOL>random_state=random_state)<EOL><DEDENT>else:<EOL><INDENT>poly_aug = poly_old.deepcopy(exterior=poly_kps_aug)<EOL><DEDENT>polys_aug.append(poly_aug)<EOL>counter += count<EOL><DEDENT>result.append(ia.PolygonsOnImage(polys_aug, shape=kps_oi_aug.shape))<EOL><DEDENT>return result<EOL>", "docstring": "Augment polygons by applying keypoint augmentation to their vertices.\n\nParameters\n----------\npolygons_on_images : list of imgaug.PolygonsOnImage\n    Polygons to augment. They may be changed in-place.\n\nrandom_state : numpy.random.RandomState\n    The random state to use for all sampling tasks during the\n    augmentation.\n\nparents : list of imgaug.augmenters.meta.Augmenter\n    See :func:`imgaug.augmenters.meta.Augmenter.augment_polygons`.\n\nhooks : imgaug.HooksKeypoints or None\n    See :func:`imgaug.augmenters.meta.Augmenter.augment_polygons`.\n\nrecoverer : None or imgaug._ConcavePolygonRecoverer\n    An instance used to repair invalid polygons after augmentation.\n    Must offer the method\n    ``recover_from(new_exterior, old_polygon, random_state=0)``.\n    If ``None`` then invalid polygons are not repaired.\n\nReturns\n----------\nlist of imgaug.PolygonsOnImage\n    The augmented polygons.", "id": "f16288:c0:m17"}
{"signature": "@abstractmethod<EOL><INDENT>def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Augment keypoints on multiple images.\n\nThis is the internal variation of ``augment_keypoints()``.\nIt is called from ``augment_keypoints()`` and should usually not be\ncalled directly. It has to be implemented by every augmenter.\nThis method may transform the keypoints in-place.\nThis method does not have to care about determinism or the\nAugmenter instance's ``random_state`` variable. The parameter\n``random_state`` takes care of both of these.\n\nParameters\n----------\nkeypoints_on_images : list of imgaug.KeypointsOnImage\n    Keypoints to augment. They may be changed in-place.\n\nrandom_state : numpy.random.RandomState\n    The random state to use for all sampling tasks during the augmentation.\n\nparents : list of imgaug.augmenters.meta.Augmenter\n    See :func:`imgaug.augmenters.meta.Augmenter.augment_keypoints`.\n\nhooks : imgaug.HooksKeypoints or None\n    See :func:`imgaug.augmenters.meta.Augmenter.augment_keypoints`.\n\nReturns\n----------\nlist of imgaug.KeypointsOnImage\n    The augmented keypoints.", "id": "f16288:c0:m11"}
{"signature": "def copy(self):", "body": "return copy_module.copy(self)<EOL>", "docstring": "Create a shallow copy of this Augmenter instance.\n\nReturns\n-------\nimgaug.augmenters.meta.Augmenter\n    Shallow copy of this Augmenter instance.", "id": "f16288:c0:m39"}
{"signature": "def augment(self, return_batch=False, hooks=None, **kwargs):", "body": "assert ia.is_single_bool(return_batch), (<EOL>(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (str(type(return_batch)),)<EOL>)<EOL>expected_keys = [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\"]<EOL>expected_keys_call = [\"<STR_LIT:image>\"] + expected_keys<EOL>assert any([key in kwargs for key in expected_keys_call]), (<EOL>\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\" % (<EOL>\"<STR_LIT:U+002CU+0020>\".join(expected_keys_call),))<EOL>unknown_args = [key for key in kwargs.keys()<EOL>if key not in expected_keys_call]<EOL>assert len(unknown_args) == <NUM_LIT:0>, (<EOL>\"<STR_LIT>\" % (<EOL>\"<STR_LIT:U+002CU+0020>\".join(unknown_args)<EOL>))<EOL>if \"<STR_LIT:image>\" in kwargs:<EOL><INDENT>assert \"<STR_LIT>\" not in kwargs, (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>images = [kwargs[\"<STR_LIT:image>\"]]<EOL><DEDENT>else:<EOL><INDENT>images = kwargs.get(\"<STR_LIT>\", None)<EOL><DEDENT>order = \"<STR_LIT>\"<EOL>nb_keys = len(list(kwargs.keys()))<EOL>vinfo = sys.version_info<EOL>is_py36_or_newer = vinfo[<NUM_LIT:0>] > <NUM_LIT:3> or (vinfo[<NUM_LIT:0>] == <NUM_LIT:3> and vinfo[<NUM_LIT:1>] >= <NUM_LIT:6>)<EOL>if is_py36_or_newer:<EOL><INDENT>order = \"<STR_LIT>\"<EOL><DEDENT>elif not return_batch and nb_keys > <NUM_LIT:2>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>elif not return_batch and nb_keys == <NUM_LIT:2> and images is None:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>batch = UnnormalizedBatch(<EOL>images=images,<EOL>heatmaps=kwargs.get(\"<STR_LIT>\", None),<EOL>segmentation_maps=kwargs.get(\"<STR_LIT>\", None),<EOL>keypoints=kwargs.get(\"<STR_LIT>\", None),<EOL>bounding_boxes=kwargs.get(\"<STR_LIT>\", None),<EOL>polygons=kwargs.get(\"<STR_LIT>\", None),<EOL>line_strings=kwargs.get(\"<STR_LIT>\", None)<EOL>)<EOL>batch_aug = self.augment_batch(batch, hooks=hooks)<EOL>if return_batch:<EOL><INDENT>return batch_aug<EOL><DEDENT>result = []<EOL>if order == \"<STR_LIT>\":<EOL><INDENT>for key in kwargs:<EOL><INDENT>if key == \"<STR_LIT:image>\":<EOL><INDENT>attr = getattr(batch_aug, \"<STR_LIT>\")<EOL>result.append(attr[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>result.append(getattr(batch_aug, \"<STR_LIT>\" % (key,)))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for key in expected_keys:<EOL><INDENT>if key == \"<STR_LIT>\" and \"<STR_LIT:image>\" in kwargs:<EOL><INDENT>attr = getattr(batch_aug, \"<STR_LIT>\")<EOL>result.append(attr[<NUM_LIT:0>])<EOL><DEDENT>elif key in kwargs:<EOL><INDENT>result.append(getattr(batch_aug, \"<STR_LIT>\" % (key,)))<EOL><DEDENT><DEDENT><DEDENT>if len(result) == <NUM_LIT:1>:<EOL><INDENT>return result[<NUM_LIT:0>]<EOL><DEDENT>return tuple(result)<EOL>", "docstring": "Augment data.\n\nThis method is a wrapper around\n``imgaug.augmentables.batches.UnnormalizedBatch`` and\n``augment_batch()``. Hence, it supports the same datatypes as\n``UnnormalizedBatch``.\n\nIf `return_batch` was not set to ``True``, the method will return\na tuple of augmentables. It will return the same types of augmentables\n(only augmented) as input into the method. This behaviour\nis partly specific to the python version:\n  * In _python 3.6+_ (if ``return_batch=False``):\n    * Three or more augmentables may be used as input.\n    * The return order matches the order of the named arguments, e.g.\n      ``B, D, C = augment(B=x, D=y, C=z)``.\n    * None of the provided named arguments has to be `image` or `images`.\n  * In _python <3.6_  (if ``return_batch=False``):\n    * One or two augmentables may be used as input, not more than that.\n    * At least one the augmentables has to be `image` or `images`.\n    * The augmented images are always returned first.\n\nIf `return_batch` was not set to ``False``, an instance of\n``UnnormalizedBatch`` will be returned. The output is the same for\nall python version and any number or combination of augmentables may\nbe provided.\n\nAll augmentables must be provided as named arguments.\nE.g. ``augment(<array>)`` will crash, but ``augment(images=<array>)``\nwill work.\n\nParameters\n----------\nimage : None\n    or (H,W,C) ndarray \\\n    or (H,W) ndarray, \\\n    optional\n    The image to augment. Only this or `images` can be set, not both.\n    If `return_batch` is ``False`` and the python version is below 3.6,\n    either this or `images` _must_ be provided.\n\nimages : None \\\n    or (N,H,W,C) ndarray \\\n    or (N,H,W) ndarray \\\n    or iterable of (H,W,C) ndarray \\\n    or iterable of (H,W) ndarray, \\\n    optional\n    The images to augment. Only this or `image` can be set, not both.\n    If `return_batch` is ``False`` and the python version is below 3.6,\n    either this or `image` _must_ be provided.\n\nheatmaps : None \\\n    or (N,H,W,C) ndarray \\\n    or imgaug.augmentables.heatmaps.HeatmapsOnImage \\\n    or iterable of (H,W,C) ndarray \\\n    or iterable of imgaug.augmentables.heatmaps.HeatmapsOnImage, \\\n    optional\n    The heatmaps to augment.\n    If anything else than ``HeatmapsOnImage``, then the number of\n    heatmaps must match the number of images provided via parameter\n    `images`. The number is contained either in ``N`` or the first\n    iterable's size.\n\nsegmentation_maps : None \\\n    or (N,H,W) ndarray \\\n    or imgaug.augmentables.segmaps.SegmentationMapOnImage \\\n    or iterable of (H,W) ndarray \\\n    or iterable of imgaug.augmentables.segmaps.SegmentationMapOnImage, \\\n    optional\n    The segmentation maps to augment.\n    If anything else than ``SegmentationMapOnImage``, then the number\n    of segmaps must match the number of images provided via parameter\n    `images`. The number is contained either in ``N`` or the first\n    iterable's size.\n\nkeypoints : None \\\n    or list of (N,K,2) ndarray \\\n    or tuple of number \\\n    or imgaug.augmentables.kps.Keypoint \\\n    or iterable of (K,2) ndarray \\\n    or iterable of tuple of number \\\n    or iterable of imgaug.augmentables.kps.Keypoint \\\n    or iterable of imgaug.augmentables.kps.KeypointOnImage \\\n    or iterable of iterable of tuple of number \\\n    or iterable of iterable of imgaug.augmentables.kps.Keypoint, \\\n    optional\n    The keypoints to augment.\n    If a tuple (or iterable(s) of tuple), then iterpreted as (x,y)\n    coordinates and must hence contain two numbers.\n    A single tuple represents a single coordinate on one image, an\n    iterable of tuples the coordinates on one image and an iterable of\n    iterable of tuples the coordinates on several images. Analogous if\n    ``Keypoint`` objects are used instead of tuples.\n    If an ndarray, then ``N`` denotes the number of images and ``K``\n    the number of keypoints on each image.\n    If anything else than ``KeypointsOnImage`` is provided, then the\n    number of keypoint groups must match the number of images provided\n    via parameter `images`. The number is contained e.g. in ``N`` or\n    in case of \"iterable of iterable of tuples\" in the first iterable's\n    size.\n\nbounding_boxes : None \\\n    or (N,B,4) ndarray \\\n    or tuple of number \\\n    or imgaug.augmentables.bbs.BoundingBox \\\n    or imgaug.augmentables.bbs.BoundingBoxesOnImage \\\n    or iterable of (B,4) ndarray \\\n    or iterable of tuple of number \\\n    or iterable of imgaug.augmentables.bbs.BoundingBox \\\n    or iterable of imgaug.augmentables.bbs.BoundingBoxesOnImage \\\n    or iterable of iterable of tuple of number \\\n    or iterable of iterable imgaug.augmentables.bbs.BoundingBox, \\\n    optional\n    The bounding boxes to augment.\n    This is analogous to the `keypoints` parameter. However, each\n    tuple -- and also the last index in case of arrays -- has size 4,\n    denoting the bounding box coordinates ``x1``, ``y1``, ``x2`` and\n    ``y2``.\n\npolygons : None  \\\n    or (N,#polys,#points,2) ndarray \\\n    or imgaug.augmentables.polys.Polygon \\\n    or imgaug.augmentables.polys.PolygonsOnImage \\\n    or iterable of (#polys,#points,2) ndarray \\\n    or iterable of tuple of number \\\n    or iterable of imgaug.augmentables.kps.Keypoint \\\n    or iterable of imgaug.augmentables.polys.Polygon \\\n    or iterable of imgaug.augmentables.polys.PolygonsOnImage \\\n    or iterable of iterable of (#points,2) ndarray \\\n    or iterable of iterable of tuple of number \\\n    or iterable of iterable of imgaug.augmentables.kps.Keypoint \\\n    or iterable of iterable of imgaug.augmentables.polys.Polygon \\\n    or iterable of iterable of iterable of tuple of number \\\n    or iterable of iterable of iterable of tuple of \\\n    imgaug.augmentables.kps.Keypoint, \\\n    optional\n    The polygons to augment.\n    This is similar to the `keypoints` parameter. However, each polygon\n    may be made up of several (x,y) coordinates (three or more are\n    required for valid polygons).\n    The following datatypes will be interpreted as a single polygon on\n    a single image:\n      * ``imgaug.augmentables.polys.Polygon``\n      * ``iterable of tuple of number``\n      * ``iterable of imgaug.augmentables.kps.Keypoint``\n\n    The following datatypes will be interpreted as multiple polygons\n    on a single image:\n      * ``imgaug.augmentables.polys.PolygonsOnImage``\n      * ``iterable of imgaug.augmentables.polys.Polygon``\n      * ``iterable of iterable of tuple of number``\n      * ``iterable of iterable of imgaug.augmentables.kps.Keypoint``\n      * ``iterable of iterable of imgaug.augmentables.polys.Polygon``\n\n    The following datatypes will be interpreted as multiple polygons on\n    multiple images:\n      * ``(N,#polys,#points,2) ndarray``\n      * ``iterable of (#polys,#points,2) ndarray``\n      * ``iterable of iterable of (#points,2) ndarray``\n      * ``iterable of iterable of iterable of tuple of number``\n      * ``iterable of iterable of iterable of tuple of imgaug.augmentables.kps.Keypoint``\n\nline_strings : None \\\n    or (N,#lines,#points,2) ndarray \\\n    or imgaug.augmentables.lines.LineString \\\n    or imgaug.augmentables.lines.LineStringOnImage \\\n    or iterable of (#polys,#points,2) ndarray \\\n    or iterable of tuple of number \\\n    or iterable of imgaug.augmentables.kps.Keypoint \\\n    or iterable of imgaug.augmentables.lines.LineString \\\n    or iterable of imgaug.augmentables.lines.LineStringOnImage \\\n    or iterable of iterable of (#points,2) ndarray \\\n    or iterable of iterable of tuple of number \\\n    or iterable of iterable of imgaug.augmentables.kps.Keypoint \\\n    or iterable of iterable of imgaug.augmentables.lines.LineString \\\n    or iterable of iterable of iterable of tuple of number \\\n    or iterable of iterable of iterable of tuple of \\\n    imgaug.augmentables.kps.Keypoint, \\\n    optional\n    The line strings to augment.\n    See `polygons`, which behaves similarly.\n\nreturn_batch : bool, optional\n    Whether to return an instance of\n    `imgaug.augmentables.batches.UnnormalizedBatch`. If the python\n    version is below 3.6 and more than two augmentables were provided\n    (e.g. images, keypoints and polygons), then this must be set to\n    ``True``. Otherwise an error will be raised.\n\nhooks : None or imgaug.imgaug.HooksImages, optional\n    Hooks object to dynamically interfere with the augmentation process.\n\nReturns\n-------\ntuple or imgaug.augmentables.batches.UnnormalizedBatch\n    If `return_batch` was set to ``True``, a instance of\n    ``UnnormalizedBatch`` will be returned.\n    If `return_batch` was set to ``False``, a tuple of augmentables\n    will be returned, e.g. ``(augmented images, augmented keypoints)``.\n    The datatypes match the input datatypes of the corresponding named\n    arguments. In python <3.6, augmented images are always the first\n    entry in the returned tuple. In python 3.6+ the order matches the\n    order of the named arguments.\n\nExamples\n--------\n>>> import numpy as np\n>>> import imgaug as ia\n>>> import imgaug.augmenters as iaa\n>>> aug = iaa.Affine(rotate=(-25, 25))\n>>> image = np.zeros((64, 64, 3), dtype=np.uint8)\n>>> keypoints = [(10, 20), (30, 32)]  # (x,y) coordinates\n>>> images_aug, keypoints_aug = aug.augment(\n>>>     image=image, keypoints=keypoints)\n\nThis creates a single image and a set of two keypoints on it, then\naugments both by applying a random rotation between -25deg and +25deg.\nThe sampled rotation value is automatically aligned between image and\nkeypoints. Note that in python <3.6, augmented images will always be\nreturned first, independent of the order of the named input arguments.\nSo ``keypoints_aug, images_aug = aug.augment(keypoints=keypoints,\nimage=image)`` would _not_ work (except in python 3.6+).\n\n>>> import numpy as np\n>>> import imgaug as ia\n>>> import imgaug.augmenters as iaa\n>>> aug = iaa.Affine(rotate=(-25, 25))\n>>> images = [np.zeros((64, 64, 3), dtype=np.uint8),\n>>>           np.zeros((32, 32, 3), dtype=np.uint8)]\n>>> keypoints = [[(10, 20), (30, 32)],  # KPs on first image\n>>>              [(22, 10), (12, 14)]]  # KPs on second image\n>>> bbs = [\n>>>           [ia.BoundingBox(x1=5, y1=5, x2=50, y2=45)],\n>>>           [ia.BoundingBox(x1=4, y1=6, x2=10, y2=15),\n>>>            ia.BoundingBox(x1=8, y1=9, x2=16, y2=30)]\n>>>       ]  # one BB on first image, two BBs on second image\n>>> batch_aug = aug.augment(\n>>>     images=images, keypoints=keypoints, bounding_boxes=bbs,\n>>>     return_batch=True)\n\nThis creates two images of size 64x64 and 32x32, two sets of keypoints\n(each containing two keypoints) and two sets of bounding boxes (the\nfirst containing one bounding box, the second two bounding boxes).\nThese augmentables are then augmented by applying random rotations\nbetween -25deg and +25deg to them. The rotation values are sampled\nby image and aligned between all augmentables on the same image.\nThe method finally returns an instance of ``UnnormalizedBatch`` from\nwhich the augmented data can be retrieved via ``batch_aug.images_aug``,\n``batch_aug.keypoints_aug``, and ``batch_aug.bounding_boxes_aug``.\nIn python 3.6+, `return_batch` can be kept at ``False`` and the\naugmented data can be retrieved as\n``images_aug, keypoints_aug, bbs_aug = augment(...)``.", "id": "f16288:c0:m19"}
{"signature": "def _augment_coord_augables(self, cls_expected, subaugment_func,<EOL>augables_ois, parents=None,<EOL>hooks=None):", "body": "if self.deterministic:<EOL><INDENT>state_orig = self.random_state.get_state()<EOL><DEDENT>if parents is None:<EOL><INDENT>parents = []<EOL><DEDENT>input_was_single_instance = False<EOL>if isinstance(augables_ois, cls_expected):<EOL><INDENT>input_was_single_instance = True<EOL>augables_ois = [augables_ois]<EOL><DEDENT>ia.do_assert(ia.is_iterable(augables_ois))<EOL>ia.do_assert(all([isinstance(augable_oi, cls_expected)<EOL>for augable_oi in augables_ois]))<EOL>augables_ois_copy = augables_ois<EOL>if len(parents) == <NUM_LIT:0> or hooks is not None:<EOL><INDENT>augables_ois_copy = [augable_oi.deepcopy()<EOL>for augable_oi<EOL>in augables_ois]<EOL><DEDENT>if hooks is not None:<EOL><INDENT>augables_ois_copy = hooks.preprocess(<EOL>augables_ois_copy, augmenter=self, parents=parents)<EOL><DEDENT>augables_ois_result = augables_ois_copy<EOL>is_activated = (hooks is None and self.activated)<EOL>is_activated_hooks = (is_activated is False) and (<EOL>hooks is not None<EOL>and hooks.is_activated(augables_ois_copy,<EOL>augmenter=self, parents=parents,<EOL>default=self.activated)<EOL>)<EOL>if is_activated or is_activated_hooks:<EOL><INDENT>if len(augables_ois) > <NUM_LIT:0>:<EOL><INDENT>augables_ois_result = subaugment_func(<EOL>augables_ois_copy,<EOL>ia.copy_random_state(self.random_state),<EOL>parents,<EOL>hooks<EOL>)<EOL>ia.forward_random_state(self.random_state)<EOL><DEDENT><DEDENT>if hooks is not None:<EOL><INDENT>augables_ois_result = hooks.postprocess(<EOL>augables_ois_result, augmenter=self, parents=parents)<EOL><DEDENT>if self.deterministic:<EOL><INDENT>self.random_state.set_state(state_orig)<EOL><DEDENT>if input_was_single_instance:<EOL><INDENT>return augables_ois_result[<NUM_LIT:0>]<EOL><DEDENT>return augables_ois_result<EOL>", "docstring": "Augment coordinate-based augmentables.\n\nThis is an abstract function called by keypoints, bounding boxes,\npolygons and line strings.\nTODO keypoints, bounding boxes currently missing -- add them\n\nParameters\n----------\ncls_expected : class\n    Class type that is expected. `augmentables_ois` will be\n    verified to use that class.\n\nsubaugment_func : callable\n    Function that will be called to actually augment the data.\n\naugables_ois : imgaug.augmentables.polys.PolygonsOnImage \\\n               or imgaug.augmentables.lines.LineStringsOnImage \\\n               or list of imgaug.augmentables.lines.LineStringsOnImage \\\n               or list of imgaug.augmentables.polys.PolygonsOnImage\n    The augmentables to augment. `augables_ois` is the abbreviation for\n    \"augmentables_on_images\". Expected are the augmentables on a\n    single image (single instance) or 1+ images (list of instances).\n\nparents : None or list of imgaug.augmenters.meta.Augmenter, optional\n    Parent augmenters that have previously been called before the\n    call to this function. Usually you can leave this parameter as None.\n    It is set automatically for child augmenters.\n\nhooks : None or imgaug.HooksKeypoints, optional\n    HooksKeypoints object to dynamically interfere with the\n    augmentation process.\n\nReturns\n-------\nimgaug.augmentables.polys.PolygonsOnImage \\\nor imgaug.augmentables.lines.LineStringsOnImage \\\nor list of imgaug.augmentables.polys.PolygonsOnImage \\\nor list of imgaug.augmentables.lines.LineStringsOnImage\n    Augmented augmentables.", "id": "f16288:c0:m15"}
{"signature": "def shuffle_channels(image, random_state, channels=None):", "body": "if image.ndim < <NUM_LIT:3> or image.shape[<NUM_LIT:2>] == <NUM_LIT:1>:<EOL><INDENT>return image<EOL><DEDENT>nb_channels = image.shape[<NUM_LIT:2>]<EOL>all_channels = np.arange(nb_channels)<EOL>is_all_channels = (<EOL>channels is None<EOL>or channels == ia.ALL<EOL>or len(set(all_channels).difference(set(channels))) == <NUM_LIT:0><EOL>)<EOL>if is_all_channels:<EOL><INDENT>channels_perm = random_state.permutation(all_channels)<EOL>return image[..., channels_perm]<EOL><DEDENT>else:<EOL><INDENT>channels_perm = random_state.permutation(channels)<EOL>channels_perm_full = all_channels<EOL>for channel_source, channel_target in zip(channels, channels_perm):<EOL><INDENT>channels_perm_full[channel_source] = channel_target<EOL><DEDENT>return image[..., channels_perm_full]<EOL><DEDENT>", "docstring": "Randomize the order of (color) channels in an image.\n\ndtype support::\n\n    * ``uint8``: yes; fully tested\n    * ``uint16``: yes; indirectly tested (1)\n    * ``uint32``: yes; indirectly tested (1)\n    * ``uint64``: yes; indirectly tested (1)\n    * ``int8``: yes; indirectly tested (1)\n    * ``int16``: yes; indirectly tested (1)\n    * ``int32``: yes; indirectly tested (1)\n    * ``int64``: yes; indirectly tested (1)\n    * ``float16``: yes; indirectly tested (1)\n    * ``float32``: yes; indirectly tested (1)\n    * ``float64``: yes; indirectly tested (1)\n    * ``float128``: yes; indirectly tested (1)\n    * ``bool``: yes; indirectly tested (1)\n\n    - (1) Indirectly tested via ``ChannelShuffle``.\n\nParameters\n----------\nimage : (H,W,[C]) ndarray\n    Image of any dtype for which to shuffle the channels.\n\nrandom_state : numpy.random.RandomState\n    The random state to use for this shuffling operation.\n\nchannels : None or imgaug.ALL or list of int, optional\n    Which channels are allowed to be shuffled with each other.\n    If this is ``None`` or ``imgaug.ALL``, then all channels may be shuffled. If it is a list of integers,\n    then only the channels with indices in that list may be shuffled. (Values start at 0. All channel indices in\n    the list must exist in each image.)\n\nReturns\n-------\nndarray\n    The input image with shuffled channels.", "id": "f16288:m12"}
{"signature": "def localize_random_state(self, recursive=True):", "body": "aug = self.deepcopy()<EOL>aug.localize_random_state_(<EOL>recursive=recursive<EOL>)<EOL>return aug<EOL>", "docstring": "Converts global random states to local ones.\nSee :func:`Augmenter.localize_random_state_` for more details.\n\nParameters\n----------\nrecursive : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.localize_random_state_`.\n\nReturns\n-------\naug : imgaug.augmenters.meta.Augmenter\n    Returns copy of augmenter and children, with localized random states.", "id": "f16288:c0:m27"}
{"signature": "def get_children_lists(self):", "body": "return []<EOL>", "docstring": "Get a list of lists of children of this augmenter.\n\nFor most augmenters, the result will be a single empty list.\nFor augmenters with children it will often be a list with one sublist containing all\nchildren. In some cases the augmenter will contain multiple distinct lists of children,\ne.g. an if-list and an else-list. This will lead to a result consisting of a single\nlist with multiple sublists, each representing the respective sublist of children.\n\nE.g. for an if/else-augmenter that executes the children ``A1``, ``A2`` if a condition is met\nand otherwise executes the children ``B1``, ``B2``, ``B3`` the result will\nbe ``[[A1, A2], [B1, B2, B3]]``.\n\nIMPORTANT: While the topmost list may be newly created, each of the sublist must be\neditable inplace resulting in a changed children list of the augmenter. E.g. if\nan Augmenter ``IfElse(condition, [A1, A2], [B1, B2, B3])`` returns ``[[A1, A2], [B1, B2, B3]]``\nfor a call to :func:`imgaug.augmenters.meta.Augmenter.get_children_lists` and\n``A2`` is removed inplace from ``[A1, A2]``, then the\nchildren lists of ``IfElse(...)`` must also change to ``[A1], [B1, B2, B3]``. This is used\nin :func:`imgaug.augmeneters.meta.Augmenter.remove_augmenters_inplace`.\n\nReturns\n-------\nchildren : list of list of imgaug.augmenters.meta.Augmenter\n    One or more lists of child augmenter.\n    Can also be a single empty list.", "id": "f16288:c0:m32"}
{"signature": "def augment_heatmaps(self, heatmaps, parents=None, hooks=None):", "body": "if self.deterministic:<EOL><INDENT>state_orig = self.random_state.get_state()<EOL><DEDENT>if parents is None:<EOL><INDENT>parents = []<EOL><DEDENT>input_was_single_instance = False<EOL>if isinstance(heatmaps, ia.HeatmapsOnImage):<EOL><INDENT>input_was_single_instance = True<EOL>heatmaps = [heatmaps]<EOL><DEDENT>ia.do_assert(ia.is_iterable(heatmaps),<EOL>\"<STR_LIT>\" % (type(heatmaps),))<EOL>ia.do_assert(all([isinstance(heatmaps_i, ia.HeatmapsOnImage) for heatmaps_i in heatmaps]),<EOL>\"<STR_LIT>\" % (<EOL>[type(el) for el in heatmaps],))<EOL>if len(parents) == <NUM_LIT:0> or hooks is not None:<EOL><INDENT>heatmaps_copy = [heatmaps_i.deepcopy() for heatmaps_i in heatmaps]<EOL><DEDENT>else:<EOL><INDENT>heatmaps_copy = heatmaps<EOL><DEDENT>if hooks is not None:<EOL><INDENT>heatmaps_copy = hooks.preprocess(heatmaps_copy, augmenter=self, parents=parents)<EOL><DEDENT>if (hooks is None and self.activated)or (hooks is not None<EOL>and hooks.is_activated(heatmaps_copy, augmenter=self, parents=parents, default=self.activated)):<EOL><INDENT>if len(heatmaps_copy) > <NUM_LIT:0>:<EOL><INDENT>heatmaps_result = self._augment_heatmaps(<EOL>heatmaps_copy,<EOL>random_state=ia.copy_random_state(self.random_state),<EOL>parents=parents,<EOL>hooks=hooks<EOL>)<EOL>ia.forward_random_state(self.random_state)<EOL><DEDENT>else:<EOL><INDENT>heatmaps_result = heatmaps_copy<EOL><DEDENT><DEDENT>else:<EOL><INDENT>heatmaps_result = heatmaps_copy<EOL><DEDENT>if hooks is not None:<EOL><INDENT>heatmaps_result = hooks.postprocess(heatmaps_result, augmenter=self, parents=parents)<EOL><DEDENT>if self.deterministic:<EOL><INDENT>self.random_state.set_state(state_orig)<EOL><DEDENT>if input_was_single_instance:<EOL><INDENT>return heatmaps_result[<NUM_LIT:0>]<EOL><DEDENT>return heatmaps_result<EOL>", "docstring": "Augment a heatmap.\n\nParameters\n----------\nheatmaps : imgaug.HeatmapsOnImage or list of imgaug.HeatmapsOnImage\n    Heatmap(s) to augment. Either a single heatmap or a list of\n    heatmaps.\n\nparents : None or list of imgaug.augmenters.meta.Augmenter, optional\n    Parent augmenters that have previously been called before the\n    call to this function. Usually you can leave this parameter as None.\n    It is set automatically for child augmenters.\n\nhooks : None or imaug.HooksHeatmaps, optional\n    HooksHeatmaps object to dynamically interfere with the augmentation process.\n\nReturns\n-------\nheatmap_result : imgaug.HeatmapsOnImage or list of imgaug.HeatmapsOnImage\n    Corresponding augmented heatmap(s).", "id": "f16288:c0:m6"}
{"signature": "def get_all_children(self, flat=False):", "body": "result = []<EOL>for lst in self.get_children_lists():<EOL><INDENT>for aug in lst:<EOL><INDENT>result.append(aug)<EOL>children = aug.get_all_children(flat=flat)<EOL>if len(children) > <NUM_LIT:0>:<EOL><INDENT>if flat:<EOL><INDENT>result.extend(children)<EOL><DEDENT>else:<EOL><INDENT>result.append(children)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return result<EOL>", "docstring": "Returns all children of this augmenter as a list.\n\nIf the augmenter has no children, the returned list is empty.\n\nParameters\n----------\nflat : bool\n    If set to True, the returned list will be flat.\n\nReturns\n-------\nresult : list of imgaug.augmenters.meta.Augmenter\n    The children as a nested or flat list.", "id": "f16288:c0:m33"}
{"signature": "def augment_polygons(self, polygons_on_images, parents=None, hooks=None):", "body": "from imgaug.augmentables.polys import PolygonsOnImage<EOL>def _subaugment(polygons_on_images_, random_state_, parents_, hooks_):<EOL><INDENT>return self._augment_polygons(<EOL>polygons_on_images_,<EOL>random_state=random_state_,<EOL>parents=parents_,<EOL>hooks=hooks_<EOL>)<EOL><DEDENT>return self._augment_coord_augables(<EOL>cls_expected=PolygonsOnImage,<EOL>subaugment_func=_subaugment,<EOL>augables_ois=polygons_on_images,<EOL>parents=parents,<EOL>hooks=hooks<EOL>)<EOL>", "docstring": "Augment polygons.\n\nThis is the corresponding function to ``augment_keypoints()``, just for\npolygons.\nUsually you will want to call ``augment_images()`` with a list of\nimages, e.g. ``augment_images([A, B, C])`` and then\n``augment_polygons()`` with the corresponding list of polygons on these\nimages, e.g. ``augment_polygons([A_poly, B_poly, C_poly])``, where\n``A_poly`` are the polygons on image ``A``.\n\nMake sure to first convert the augmenter(s) to deterministic states\nbefore augmenting images and their corresponding polygons,\ne.g. by\n\n>>> import imgaug as ia\n>>> import imgaug.augmenters as iaa\n>>> A = B = C = np.ones((10, 10), dtype=np.uint8)\n>>> Apoly = Bpoly = Cpoly = ia.PolygonsOnImage(\n>>>     [ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])],\n>>>     shape=(10, 10))\n>>> seq = iaa.Fliplr(0.5)\n>>> seq_det = seq.to_deterministic()\n>>> imgs_aug = seq_det.augment_images([A, B, C])\n>>> polys_aug = seq_det.augment_polygons([Apoly, Bpoly, Cpoly])\n\nOtherwise, different random values will be sampled for the image\nand polygon augmentations, resulting in different augmentations\n(e.g. images might be rotated by ``30deg`` and polygons by\n``-10deg``). Also make sure to call ``to_deterministic()`` again for\neach new batch, otherwise you would augment all batches in the same\nway.\n\nParameters\n----------\npolygons_on_images : imgaug.PolygonsOnImage or \\\n                     list of imgaug.PolygonsOnImage\n    The polygons to augment.\n    Expected is an instance of imgaug.PolygonsOnImage or a list of\n    imgaug.PolygonsOnImage objects, with each such object\n    containing the polygons of a single image.\n\nparents : None or list of imgaug.augmenters.meta.Augmenter, optional\n    Parent augmenters that have previously been called before the\n    call to this function. Usually you can leave this parameter as None.\n    It is set automatically for child augmenters.\n\nhooks : None or imgaug.HooksKeypoints, optional\n    HooksKeypoints object to dynamically interfere with the\n    augmentation process.\n\nReturns\n-------\nresult : imgaug.PolygonsOnImage or list of imgaug.PolygonsOnImage\n    Augmented polygons.", "id": "f16288:c0:m13"}
{"signature": "def copy_random_state(self, source, recursive=True, matching=\"<STR_LIT>\", matching_tolerant=True,<EOL>copy_determinism=False):", "body": "aug = self.deepcopy()<EOL>aug.copy_random_state_(<EOL>source,<EOL>recursive=recursive,<EOL>matching=matching,<EOL>matching_tolerant=matching_tolerant,<EOL>copy_determinism=copy_determinism<EOL>)<EOL>return aug<EOL>", "docstring": "Copy the random states from a source augmenter sequence.\n\nParameters\n----------\nsource : imgaug.augmenters.meta.Augmenter\n    See :func:`imgaug.augmenters.meta.Augmenter.copy_random_state_`.\n\nrecursive : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.copy_random_state_`.\n\nmatching : {'position', 'name'}, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.copy_random_state_`.\n\nmatching_tolerant : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.copy_random_state_`.\n\ncopy_determinism : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.copy_random_state_`.\n\nReturns\n-------\naug : imgaug.augmenters.meta.Augmenter\n    Copy of the augmenter(s) with the same random state(s) as in the\n    source augmenter(s).", "id": "f16288:c0:m29"}
{"signature": "def draw_grid(self, images, rows, cols):", "body": "if ia.is_np_array(images):<EOL><INDENT>if len(images.shape) == <NUM_LIT:4>:<EOL><INDENT>images = [images[i] for i in range(images.shape[<NUM_LIT:0>])]<EOL><DEDENT>elif len(images.shape) == <NUM_LIT:3>:<EOL><INDENT>images = [images]<EOL><DEDENT>elif len(images.shape) == <NUM_LIT:2>:<EOL><INDENT>images = [images[:, :, np.newaxis]]<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\" % (images.shape,))<EOL><DEDENT><DEDENT>elif isinstance(images, list):<EOL><INDENT>for i, image in enumerate(images):<EOL><INDENT>if len(image.shape) == <NUM_LIT:3>:<EOL><INDENT>continue<EOL><DEDENT>elif len(image.shape) == <NUM_LIT:2>:<EOL><INDENT>images[i] = image[:, :, np.newaxis]<EOL><DEDENT>else:<EOL><INDENT>raise Exception((\"<STR_LIT>\"<EOL>+ \"<STR_LIT>\") % (i, image.shape,))<EOL><DEDENT><DEDENT><DEDENT>ia.do_assert(isinstance(images, list))<EOL>det = self if self.deterministic else self.to_deterministic()<EOL>augs = []<EOL>for image in images:<EOL><INDENT>augs.append(det.augment_images([image] * (rows * cols)))<EOL><DEDENT>augs_flat = list(itertools.chain(*augs))<EOL>cell_height = max([image.shape[<NUM_LIT:0>] for image in augs_flat])<EOL>cell_width = max([image.shape[<NUM_LIT:1>] for image in augs_flat])<EOL>width = cell_width * cols<EOL>height = cell_height * (rows * len(images))<EOL>grid = np.zeros((height, width, <NUM_LIT:3>), dtype=augs[<NUM_LIT:0>][<NUM_LIT:0>].dtype)<EOL>for row_idx in range(rows):<EOL><INDENT>for img_idx, image in enumerate(images):<EOL><INDENT>for col_idx in range(cols):<EOL><INDENT>image_aug = augs[img_idx][(row_idx * cols) + col_idx]<EOL>cell_y1 = cell_height * (row_idx * len(images) + img_idx)<EOL>cell_y2 = cell_y1 + image_aug.shape[<NUM_LIT:0>]<EOL>cell_x1 = cell_width * col_idx<EOL>cell_x2 = cell_x1 + image_aug.shape[<NUM_LIT:1>]<EOL>grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image_aug<EOL><DEDENT><DEDENT><DEDENT>return grid<EOL>", "docstring": "Apply this augmenter to the given images and return a grid image of the results.\nEach cell in the grid contains a single augmented variation of an input image.\n\nIf multiple images are provided, the row count is multiplied by\nthe number of images and each image gets its own row.\nE.g. for ``images = [A, B]``, ``rows=2``, ``cols=3``::\n\n    A A A\n    B B B\n    A A A\n    B B B\n\nfor ``images = [A]``, ``rows=2``,\n``cols=3``::\n\n    A A A\n    A A A\n\nParameters\n-------\nimages : (N,H,W,3) ndarray or (H,W,3) ndarray or (H,W) ndarray or list of (H,W,3) ndarray\\\n         or list of (H,W) ndarray\n    List of images of which to show the augmented versions.\n    If a list, then each element is expected to have shape ``(H, W)`` or\n    ``(H, W, 3)``. If a single array, then it is expected to have\n    shape ``(N, H, W, 3)`` or ``(H, W, 3)`` or ``(H, W)``.\n\nrows : int\n    Number of rows in the grid.\n    If ``N`` input images are given, this value will automatically be\n    multiplied by ``N`` to create rows for each image.\n\ncols : int\n    Number of columns in the grid.\n\nReturns\n-------\ngrid : (Hg, Wg, 3) ndarray\n    The generated grid image with augmented versions of the input\n    images. Here, ``Hg`` and ``Wg`` reference the output size of the grid,\n    and *not* the sizes of the input images.", "id": "f16288:c0:m22"}
{"signature": "def add(self, augmenter):", "body": "self.append(augmenter)<EOL>", "docstring": "Add an augmenter to the list of child augmenters.\n\n        Parameters\n        ----------\n        augmenter : imgaug.augmenters.meta.Augmenter\n            The augmenter to add.", "id": "f16288:c1:m7"}
{"signature": "def deepcopy(self):", "body": "return copy_module.deepcopy(self)<EOL>", "docstring": "Create a deep copy of this Augmenter instance.\n\nReturns\n-------\nimgaug.augmenters.meta.Augmenter\n    Deep copy of this Augmenter instance.", "id": "f16288:c0:m40"}
{"signature": "@abstractmethod<EOL><INDENT>def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):<DEDENT>", "body": "raise NotImplementedError()<EOL>", "docstring": "Augment heatmaps on multiple images.\n\nThis is the internal version of ``augment_heatmaps()``.\nIt is called from ``augment_heatmaps()`` and should usually not be called\ndirectly.\nThis method may heatmaps in-place.\nThis method does not have to care about determinism or the\nAugmenter instance's ``random_state`` variable. The parameter\n``random_state`` takes care of both of these.\n\nParameters\n----------\nheatmaps : list of imgaug.HeatmapsOnImage\n    Heatmaps to augment. They may be changed in-place.\n\nparents : list of imgaug.augmenters.meta.Augmenter\n    See :func:`imgaug.augmenters.meta.Augmenter.augment_heatmaps`.\n\nhooks : imgaug.HooksHeatmaps or None\n    See :func:`imgaug.augmenters.meta.Augmenter.augment_heatmaps`.\n\nReturns\n----------\nimages : list of imgaug.HeatmapsOnImage\n    The augmented heatmaps.", "id": "f16288:c0:m7"}
{"signature": "def augment_batch(self, batch, hooks=None):", "body": "batch_orig = batch<EOL>if isinstance(batch, UnnormalizedBatch):<EOL><INDENT>batch = batch.to_normalized_batch()<EOL><DEDENT>augmentables = [(attr_name[:-len(\"<STR_LIT>\")], attr)<EOL>for attr_name, attr<EOL>in batch.__dict__.items()<EOL>if attr_name.endswith(\"<STR_LIT>\") and attr is not None]<EOL>augseq = self<EOL>if len(augmentables) > <NUM_LIT:1> and not self.deterministic:<EOL><INDENT>augseq = self.to_deterministic()<EOL><DEDENT>for attr_name, attr in augmentables:<EOL><INDENT>aug = getattr(augseq, \"<STR_LIT>\" % (attr_name,))(<EOL>attr, hooks=hooks)<EOL>setattr(batch, \"<STR_LIT>\" % (attr_name,), aug)<EOL><DEDENT>if isinstance(batch_orig, UnnormalizedBatch):<EOL><INDENT>batch = batch_orig.fill_from_augmented_normalized_batch(batch)<EOL><DEDENT>return batch<EOL>", "docstring": "Augment a single batch.\n\nParameters\n----------\nbatch : imgaug.augmentables.batches.Batch \\\n        or imgaug.augmentables.batches.UnnormalizedBatch\n    A single batch to augment.\n\nhooks : None or imgaug.HooksImages, optional\n    HooksImages object to dynamically interfere with the augmentation\n    process.\n\nReturns\n-------\nimgaug.augmentables.batches.Batch \\\n        or imgaug.augmentables.batches.UnnormalizedBatch\n    Augmented batch.", "id": "f16288:c0:m2"}
{"signature": "def GammaContrast(gamma=<NUM_LIT:1>, per_channel=False, name=None, deterministic=False, random_state=None):", "body": "params1d = [iap.handle_continuous_param(gamma, \"<STR_LIT>\", value_range=None, tuple_to_uniform=True,<EOL>list_to_choice=True)]<EOL>func = adjust_contrast_gamma<EOL>return _ContrastFuncWrapper(<EOL>func, params1d, per_channel,<EOL>dtypes_allowed=[\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>dtypes_disallowed=[\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:bool>\"],<EOL>name=name if name is not None else ia.caller_name(),<EOL>deterministic=deterministic,<EOL>random_state=random_state<EOL>)<EOL>", "docstring": "Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.\n\nValues in the range ``gamma=(0.5, 2.0)`` seem to be sensible.\n\ndtype support::\n\n    See :func:`imgaug.augmenters.contrast.adjust_contrast_gamma`.\n\nParameters\n----------\ngamma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n    Exponent for the contrast adjustment. Higher values darken the image.\n\n        * If a number, then that value will be used for all images.\n        * If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.\n        * If a list, then a random value will be sampled from that list per image.\n        * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\nper_channel :  bool or float, optional\n    Whether to use the same value for all channels (False) or to sample a new value for each\n    channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`\n    will be treated as True, otherwise as False.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nReturns\n-------\n_ContrastFuncWrapper\n    Augmenter to perform gamma contrast adjustment.", "id": "f16289:m4"}
{"signature": "def LogContrast(gain=<NUM_LIT:1>, per_channel=False, name=None, deterministic=False, random_state=None):", "body": "<EOL>params1d = [iap.handle_continuous_param(gain, \"<STR_LIT>\", value_range=(<NUM_LIT:0>, None), tuple_to_uniform=True,<EOL>list_to_choice=True)]<EOL>func = adjust_contrast_log<EOL>return _ContrastFuncWrapper(<EOL>func, params1d, per_channel,<EOL>dtypes_allowed=[\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>dtypes_disallowed=[\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT:bool>\"],<EOL>name=name if name is not None else ia.caller_name(),<EOL>deterministic=deterministic,<EOL>random_state=random_state<EOL>)<EOL>", "docstring": "Adjust contrast by scaling each pixel value to ``255 * gain * log_2(1 + I_ij/255)``.\n\ndtype support::\n\n    See :func:`imgaug.augmenters.contrast.adjust_contrast_log`.\n\nParameters\n----------\ngain : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n    Multiplier for the logarithm result. Values around 1.0 lead to a contrast-adjusted\n    images. Values above 1.0 quickly lead to partially broken images due to exceeding the\n    datatype's value range.\n\n        * If a number, then that value will be used for all images.\n        * If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.\n        * If a list, then a random value will be sampled from that list per image.\n        * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\nper_channel :  bool or float, optional\n    Whether to use the same value for all channels (False) or to sample a new value for each\n    channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`\n    will be treated as True, otherwise as False.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nReturns\n-------\n_ContrastFuncWrapper\n    Augmenter to perform logarithmic contrast adjustment.", "id": "f16289:m6"}
{"signature": "def adjust_contrast_sigmoid(arr, gain, cutoff):", "body": "<EOL>if arr.dtype.name == \"<STR_LIT>\":<EOL><INDENT>min_value, _center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)<EOL>dynamic_range = max_value - min_value<EOL>value_range = np.linspace(<NUM_LIT:0>, <NUM_LIT:1.0>, num=dynamic_range+<NUM_LIT:1>, dtype=np.float32)<EOL>gain = np.float32(gain)<EOL>cutoff = np.float32(cutoff)<EOL>table = min_value + dynamic_range * <NUM_LIT:1>/(<NUM_LIT:1> + np.exp(gain * (cutoff - value_range)))<EOL>arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))<EOL>if arr.ndim == <NUM_LIT:3> and arr_aug.ndim == <NUM_LIT:2>:<EOL><INDENT>return arr_aug[..., np.newaxis]<EOL><DEDENT>return arr_aug<EOL><DEDENT>else:<EOL><INDENT>return ski_exposure.adjust_sigmoid(arr, cutoff=cutoff, gain=gain)<EOL><DEDENT>", "docstring": "Adjust contrast by scaling each pixel value to ``255 * 1/(1 + exp(gain*(cutoff - I_ij/255)))``.\n\ndtype support::\n\n    * ``uint8``: yes; fully tested (1) (2) (3)\n    * ``uint16``: yes; tested (2) (3)\n    * ``uint32``: yes; tested (2) (3)\n    * ``uint64``: yes; tested (2) (3) (4)\n    * ``int8``: limited; tested (2) (3) (5)\n    * ``int16``: limited; tested (2) (3) (5)\n    * ``int32``: limited; tested (2) (3) (5)\n    * ``int64``: limited; tested (2) (3) (4) (5)\n    * ``float16``: limited; tested (5)\n    * ``float32``: limited; tested (5)\n    * ``float64``: limited; tested (5)\n    * ``float128``: no (6)\n    * ``bool``: no (7)\n\n    - (1) Handled by ``cv2``. Other dtypes are handled by ``skimage``.\n    - (2) Normalization is done as ``I_ij/max``, where ``max`` is the maximum value of the\n          dtype, e.g. 255 for ``uint8``. The normalization is reversed afterwards,\n          e.g. ``result*255`` for ``uint8``.\n    - (3) Integer-like values are not rounded after applying the contrast adjustment equation\n          (before inverting the normalization to 0.0-1.0 space), i.e. projection from continuous\n          space to discrete happens according to floor function.\n    - (4) Note that scikit-image doc says that integers are converted to ``float64`` values before\n          applying the contrast normalization method. This might lead to inaccuracies for large\n          64bit integer values. Tests showed no indication of that happening though.\n    - (5) Must not contain negative values. Values >=0 are fully supported.\n    - (6) Leads to error in scikit-image.\n    - (7) Does not make sense for contrast adjustments.\n\nParameters\n----------\narr : numpy.ndarray\n    Array for which to adjust the contrast. Dtype ``uint8`` is fastest.\n\ngain : number\n    Multiplier for the sigmoid function's output.\n    Higher values lead to quicker changes from dark to light pixels.\n\ncutoff : number\n    Cutoff that shifts the sigmoid function in horizontal direction.\n    Higher values mean that the switch from dark to light pixels happens later, i.e.\n    the pixels will remain darker.\n\nReturns\n-------\nnumpy.ndarray\n    Array with adjusted contrast.", "id": "f16289:m1"}
{"signature": "def adjust_contrast_linear(arr, alpha):", "body": "<EOL>if arr.dtype.name == \"<STR_LIT>\":<EOL><INDENT>min_value, center_value, max_value = iadt.get_value_range_of_dtype(arr.dtype)<EOL>value_range = np.arange(<NUM_LIT:0>, <NUM_LIT>, dtype=np.float32)<EOL>alpha = np.float32(alpha)<EOL>table = center_value + alpha * (value_range - center_value)<EOL>arr_aug = cv2.LUT(arr, np.clip(table, min_value, max_value).astype(arr.dtype))<EOL>if arr.ndim == <NUM_LIT:3> and arr_aug.ndim == <NUM_LIT:2>:<EOL><INDENT>return arr_aug[..., np.newaxis]<EOL><DEDENT>return arr_aug<EOL><DEDENT>else:<EOL><INDENT>input_dtype = arr.dtype<EOL>_min_value, center_value, _max_value = iadt.get_value_range_of_dtype(input_dtype)<EOL>if input_dtype.kind in [\"<STR_LIT:u>\", \"<STR_LIT:i>\"]:<EOL><INDENT>center_value = int(center_value)<EOL><DEDENT>image_aug = center_value + alpha * (arr.astype(np.float64)-center_value)<EOL>image_aug = iadt.restore_dtypes_(image_aug, input_dtype)<EOL>return image_aug<EOL><DEDENT>", "docstring": "Adjust contrast by scaling each pixel value to ``127 + alpha*(I_ij-127)``.\n\n    dtype support::\n\n        * ``uint8``: yes; fully tested (1) (2)\n        * ``uint16``: yes; tested (2)\n        * ``uint32``: yes; tested (2)\n        * ``uint64``: no (3)\n        * ``int8``: yes; tested (2)\n        * ``int16``: yes; tested (2)\n        * ``int32``: yes; tested (2)\n        * ``int64``: no (2)\n        * ``float16``: yes; tested (2)\n        * ``float32``: yes; tested (2)\n        * ``float64``: yes; tested (2)\n        * ``float128``: no (2)\n        * ``bool``: no (4)\n\n        - (1) Handled by ``cv2``. Other dtypes are handled by raw ``numpy``.\n        - (2) Only tested for reasonable alphas with up to a value of around 100.\n        - (3) Conversion to ``float64`` is done during augmentation, hence ``uint64``, ``int64``,\n              and ``float128`` support cannot be guaranteed.\n        - (4) Does not make sense for contrast adjustments.\n\n    Parameters\n    ----------\n    arr : numpy.ndarray\n        Array for which to adjust the contrast. Dtype ``uint8`` is fastest.\n\n    alpha : number\n        Multiplier to linearly pronounce (>1.0), dampen (0.0 to 1.0) or invert (<0.0) the\n        difference between each pixel value and the center value, e.g. ``127`` for ``uint8``.\n\n    Returns\n    -------\n    numpy.ndarray\n        Array with adjusted contrast.", "id": "f16289:m3"}
{"signature": "def EdgeDetect(alpha=<NUM_LIT:0>, name=None, deterministic=False, random_state=None):", "body": "alpha_param = iap.handle_continuous_param(alpha, \"<STR_LIT>\", value_range=(<NUM_LIT:0>, <NUM_LIT:1.0>), tuple_to_uniform=True,<EOL>list_to_choice=True)<EOL>def create_matrices(_image, nb_channels, random_state_func):<EOL><INDENT>alpha_sample = alpha_param.draw_sample(random_state=random_state_func)<EOL>ia.do_assert(<NUM_LIT:0> <= alpha_sample <= <NUM_LIT:1.0>)<EOL>matrix_nochange = np.array([<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>]<EOL>], dtype=np.float32)<EOL>matrix_effect = np.array([<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>],<EOL>[<NUM_LIT:1>, -<NUM_LIT:4>, <NUM_LIT:1>],<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>]<EOL>], dtype=np.float32)<EOL>matrix = (<NUM_LIT:1>-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect<EOL>return [matrix] * nb_channels<EOL><DEDENT>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state)<EOL>", "docstring": "Augmenter that detects all edges in images, marks them in\na black and white image and then overlays the result with the original\nimage.\n\ndtype support::\n\n    See ``imgaug.augmenters.convolutional.Convolve``.\n\nParameters\n----------\nalpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n    Visibility of the sharpened image. At 0, only the original image is\n    visible, at 1.0 only its sharpened version is visible.\n\n        * If an int or float, exactly that value will be used.\n        * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n          be sampled per image.\n        * If a list, then a random value will be sampled from that list\n          per image.\n        * If a StochasticParameter, a value will be sampled from the\n          parameter per image.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = EdgeDetect(alpha=(0.0, 1.0))\n\ndetects edges in an image  and overlays the result with a variable alpha\nin the range ``0.0 <= a <= 1.0`` over the old image.", "id": "f16290:m2"}
{"signature": "def Sharpen(alpha=<NUM_LIT:0>, lightness=<NUM_LIT:1>, name=None, deterministic=False, random_state=None):", "body": "alpha_param = iap.handle_continuous_param(alpha, \"<STR_LIT>\", value_range=(<NUM_LIT:0>, <NUM_LIT:1.0>), tuple_to_uniform=True,<EOL>list_to_choice=True)<EOL>lightness_param = iap.handle_continuous_param(lightness, \"<STR_LIT>\", value_range=(<NUM_LIT:0>, None), tuple_to_uniform=True,<EOL>list_to_choice=True)<EOL>def create_matrices(image, nb_channels, random_state_func):<EOL><INDENT>alpha_sample = alpha_param.draw_sample(random_state=random_state_func)<EOL>ia.do_assert(<NUM_LIT:0> <= alpha_sample <= <NUM_LIT:1.0>)<EOL>lightness_sample = lightness_param.draw_sample(random_state=random_state_func)<EOL>matrix_nochange = np.array([<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>],<EOL>[<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>]<EOL>], dtype=np.float32)<EOL>matrix_effect = np.array([<EOL>[-<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>],<EOL>[-<NUM_LIT:1>, <NUM_LIT:8>+lightness_sample, -<NUM_LIT:1>],<EOL>[-<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>]<EOL>], dtype=np.float32)<EOL>matrix = (<NUM_LIT:1>-alpha_sample) * matrix_nochange + alpha_sample * matrix_effect<EOL>return [matrix] * nb_channels<EOL><DEDENT>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return Convolve(create_matrices, name=name, deterministic=deterministic, random_state=random_state)<EOL>", "docstring": "Augmenter that sharpens images and overlays the result with the original image.\n\ndtype support::\n\n    See ``imgaug.augmenters.convolutional.Convolve``.\n\nParameters\n----------\nalpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n    Visibility of the sharpened image. At 0, only the original image is\n    visible, at 1.0 only its sharpened version is visible.\n\n        * If an int or float, exactly that value will be used.\n        * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n          be sampled per image.\n        * If a list, then a random value will be sampled from that list\n          per image.\n        * If a StochasticParameter, a value will be sampled from the\n          parameter per image.\n\nlightness : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n    Parameter that controls the lightness/brightness of the sharped image.\n    Sane values are somewhere in the range ``(0.5, 2)``.\n    The value 0 results in an edge map. Values higher than 1 create bright\n    images. Default value is 1.\n\n        * If an int or float, exactly that value will be used.\n        * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n          be sampled per image.\n        * If a list, then a random value will be sampled from that list\n          per image.\n        * If a StochasticParameter, a value will be sampled from the\n          parameter per image.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = Sharpen(alpha=(0.0, 1.0))\n\nsharpens input images and overlays the sharpened image by a variable\namount over the old image.\n\n>>> aug = Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))\n\nsharpens input images with a variable lightness in the range\n``0.75 <= x <= 2.0`` and with a variable alpha.", "id": "f16290:m0"}
{"signature": "def VerticalFlip(*args, **kwargs):", "body": "return Flipud(*args, **kwargs)<EOL>", "docstring": "Alias for Flipud.", "id": "f16294:m1"}
{"signature": "def Pepper(p=<NUM_LIT:0>, per_channel=False, name=None, deterministic=False, random_state=None):", "body": "replacement01 = iap.ForceSign(<EOL>iap.Beta(<NUM_LIT:0.5>, <NUM_LIT:0.5>) - <NUM_LIT:0.5>,<EOL>positive=False,<EOL>mode=\"<STR_LIT>\"<EOL>) + <NUM_LIT:0.5><EOL>replacement = replacement01 * <NUM_LIT:255><EOL>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return ReplaceElementwise(<EOL>mask=p,<EOL>replacement=replacement,<EOL>per_channel=per_channel,<EOL>name=name,<EOL>deterministic=deterministic,<EOL>random_state=random_state<EOL>)<EOL>", "docstring": "Adds pepper noise to an image, i.e. black-ish pixels.\n\nThis is similar to dropout, but slower and the black pixels are not uniformly black.\n\ndtype support::\n\n    See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.\n\nParameters\n----------\np : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional\n    Probability of changing a pixel to pepper noise.\n\n        * If a float, then that value will be used for all images as the\n          probability.\n        * If a tuple ``(a, b)``, then a probability will be sampled per image\n          from the range ``a <= x <= b``.\n        * If a list, then a random value will be sampled from that list\n          per image.\n        * If a StochasticParameter, then this parameter will be used as\n          the *mask*, i.e. it is expected to contain values between\n          0.0 and 1.0, where 1.0 means that pepper is to be added\n          at that location.\n\nper_channel : bool or float, optional\n    Whether to use the same value for all channels (False)\n    or to sample a new value for each channel (True).\n    If this value is a float ``p``, then for ``p`` percent of all images\n    `per_channel` will be treated as True, otherwise as False.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.Pepper(0.05)\n\nReplaces 5 percent of all pixels with pepper.", "id": "f16295:m10"}
{"signature": "def ImpulseNoise(p=<NUM_LIT:0>, name=None, deterministic=False, random_state=None):", "body": "return SaltAndPepper(p=p, per_channel=True, name=name, deterministic=deterministic, random_state=random_state)<EOL>", "docstring": "Creates an augmenter to apply impulse noise to an image.\n\nThis is identical to ``SaltAndPepper``, except that per_channel is always set to True.\n\ndtype support::\n\n    See ``imgaug.augmenters.arithmetic.SaltAndPepper``.", "id": "f16295:m5"}
{"signature": "def Salt(p=<NUM_LIT:0>, per_channel=False, name=None, deterministic=False, random_state=None):", "body": "replacement01 = iap.ForceSign(<EOL>iap.Beta(<NUM_LIT:0.5>, <NUM_LIT:0.5>) - <NUM_LIT:0.5>,<EOL>positive=True,<EOL>mode=\"<STR_LIT>\"<EOL>) + <NUM_LIT:0.5><EOL>replacement = replacement01 * <NUM_LIT:255>  <EOL>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return ReplaceElementwise(mask=p, replacement=replacement, per_channel=per_channel, name=name,<EOL>deterministic=deterministic, random_state=random_state)<EOL>", "docstring": "Adds salt noise to an image, i.e. white-ish pixels.\n\ndtype support::\n\n    See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.\n\nParameters\n----------\np : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional\n    Probability of changing a pixel to salt noise.\n\n        * If a float, then that value will be used for all images as the\n          probability.\n        * If a tuple ``(a, b)``, then a probability will be sampled per image\n          from the range ``a <= x <= b``.\n        * If a list, then a random value will be sampled from that list\n          per image.\n        * If a StochasticParameter, then this parameter will be used as\n          the *mask*, i.e. it is expected to contain values between\n          0.0 and 1.0, where 1.0 means that salt is to be added\n          at that location.\n\nper_channel : bool or float, optional\n    Whether to use the same value for all channels (False)\n    or to sample a new value for each channel (True).\n    If this value is a float ``p``, then for ``p`` percent of all images\n    `per_channel` will be treated as True, otherwise as False.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.Salt(0.05)\n\nReplaces 5 percent of all pixels with salt.", "id": "f16295:m8"}
{"signature": "def CoarsePepper(p=<NUM_LIT:0>, size_px=None, size_percent=None, per_channel=False, min_size=<NUM_LIT:4>, name=None, deterministic=False,<EOL>random_state=None):", "body": "mask = iap.handle_probability_param(p, \"<STR_LIT:p>\", tuple_to_uniform=True, list_to_choice=True)<EOL>if size_px is not None:<EOL><INDENT>mask_low = iap.FromLowerResolution(other_param=mask, size_px=size_px, min_size=min_size)<EOL><DEDENT>elif size_percent is not None:<EOL><INDENT>mask_low = iap.FromLowerResolution(other_param=mask, size_percent=size_percent, min_size=min_size)<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>replacement01 = iap.ForceSign(<EOL>iap.Beta(<NUM_LIT:0.5>, <NUM_LIT:0.5>) - <NUM_LIT:0.5>,<EOL>positive=False,<EOL>mode=\"<STR_LIT>\"<EOL>) + <NUM_LIT:0.5><EOL>replacement = replacement01 * <NUM_LIT:255><EOL>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return ReplaceElementwise(<EOL>mask=mask_low,<EOL>replacement=replacement,<EOL>per_channel=per_channel,<EOL>name=name,<EOL>deterministic=deterministic,<EOL>random_state=random_state<EOL>)<EOL>", "docstring": "Adds coarse pepper noise to an image, i.e. rectangles that contain noisy black-ish pixels.\n\ndtype support::\n\n    See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.\n\nParameters\n----------\np : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional\n    Probability of changing a pixel to pepper noise.\n\n        * If a float, then that value will be used for all images as the\n          probability.\n        * If a tuple ``(a, b)``, then a probability will be sampled per image\n          from the range ``a <= x <= b.``\n        * If a list, then a random value will be sampled from that list\n          per image.\n        * If a StochasticParameter, then this parameter will be used as\n          the *mask*, i.e. it is expected to contain values between\n          0.0 and 1.0, where 1.0 means that pepper is to be added\n          at that location.\n\nsize_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional\n    The size of the lower resolution image from which to sample the noise\n    mask in absolute pixel dimensions.\n\n        * If an integer, then that size will be used for both height and\n          width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then\n          upsampled to ``HxW``, where ``H`` is the image size and W the image width.\n        * If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the\n          range ``[a..b]`` and the mask will be generated at size ``MxN``, then\n          upsampled to ``HxW``.\n        * If a StochasticParameter, then this parameter will be used to\n          determine the sizes. It is expected to be discrete.\n\nsize_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional\n    The size of the lower resolution image from which to sample the noise\n    mask *in percent* of the input image.\n\n        * If a float, then that value will be used as the percentage of the\n          height and width (relative to the original size). E.g. for value\n          p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled\n          to ``HxW``.\n        * If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the\n          interval ``(a, b)`` and used as the percentages, i.e the mask size\n          will be ``(m*H)x(n*W)``.\n        * If a StochasticParameter, then this parameter will be used to\n          sample the percentage values. It is expected to be continuous.\n\nper_channel : bool or float, optional\n    Whether to use the same value (is dropped / is not dropped)\n    for all channels of a pixel (False) or to sample a new value for each\n    channel (True).\n    If this value is a float ``p``, then for ``p`` percent of all images\n    `per_channel` will be treated as True, otherwise as False.\n\nmin_size : int, optional\n    Minimum size of the low resolution mask, both width and height. If\n    `size_percent` or `size_px` leads to a lower value than this, `min_size`\n    will be used instead. This should never have a value of less than 2,\n    otherwise one may end up with a 1x1 low resolution mask, leading easily\n    to the whole image being replaced.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))\n\nReplaces 5 percent of all pixels with pepper in an image that has\n1 to 10 percent of the input image size, then upscales the results\nto the input image size, leading to large rectangular areas being replaced.", "id": "f16295:m11"}
{"signature": "def CoarseSaltAndPepper(p=<NUM_LIT:0>, size_px=None, size_percent=None, per_channel=False, min_size=<NUM_LIT:4>, name=None,<EOL>deterministic=False, random_state=None):", "body": "mask = iap.handle_probability_param(p, \"<STR_LIT:p>\", tuple_to_uniform=True, list_to_choice=True)<EOL>if size_px is not None:<EOL><INDENT>mask_low = iap.FromLowerResolution(other_param=mask, size_px=size_px, min_size=min_size)<EOL><DEDENT>elif size_percent is not None:<EOL><INDENT>mask_low = iap.FromLowerResolution(other_param=mask, size_percent=size_percent, min_size=min_size)<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>replacement = iap.Beta(<NUM_LIT:0.5>, <NUM_LIT:0.5>) * <NUM_LIT:255><EOL>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return ReplaceElementwise(<EOL>mask=mask_low,<EOL>replacement=replacement,<EOL>per_channel=per_channel,<EOL>name=name,<EOL>deterministic=deterministic,<EOL>random_state=random_state<EOL>)<EOL>", "docstring": "Adds coarse salt and pepper noise to an image, i.e. rectangles that contain noisy white-ish and black-ish pixels.\n\nTODO replace dtype support with uint8 only, because replacement is geared towards that value range\n\ndtype support::\n\n    See ``imgaug.augmenters.arithmetic.ReplaceElementwise``.\n\nParameters\n----------\np : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional\n    Probability of changing a pixel to salt/pepper noise.\n\n        * If a float, then that value will be used for all images as the\n          probability.\n        * If a tuple ``(a, b)``, then a probability will be sampled per image\n          from the range ``a <= x <= b``.\n        * If a list, then a random value will be sampled from that list\n          per image.\n        * If a StochasticParameter, then this parameter will be used as\n          the *mask*, i.e. it is expected to contain values between\n          0.0 and 1.0, where 1.0 means that salt/pepper is to be added\n          at that location.\n\nsize_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional\n    The size of the lower resolution image from which to sample the noise\n    mask in absolute pixel dimensions.\n\n        * If an integer, then that size will be used for both height and\n          width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then\n          upsampled to ``HxW``, where ``H`` is the image size and ``W`` the image width.\n        * If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the\n          range ``[a..b]`` and the mask will be generated at size ``MxN``, then\n          upsampled to ``HxW``.\n        * If a StochasticParameter, then this parameter will be used to\n          determine the sizes. It is expected to be discrete.\n\nsize_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional\n    The size of the lower resolution image from which to sample the noise\n    mask *in percent* of the input image.\n\n        * If a float, then that value will be used as the percentage of the\n          height and width (relative to the original size). E.g. for value\n          p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled\n          to ``HxW.``\n        * If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the\n          interval ``(a, b)`` and used as the percentages, i.e the mask size\n          will be ``(m*H)x(n*W)``.\n        * If a StochasticParameter, then this parameter will be used to\n          sample the percentage values. It is expected to be continuous.\n\nper_channel : bool or float, optional\n    Whether to use the same value (is dropped / is not dropped)\n    for all channels of a pixel (False) or to sample a new value for each\n    channel (True).\n    If this value is a float ``p``, then for ``p`` percent of all images\n    `per_channel` will be treated as True, otherwise as False.\n\nmin_size : int, optional\n    Minimum size of the low resolution mask, both width and height. If\n    `size_percent` or `size_px` leads to a lower value than this, `min_size`\n    will be used instead. This should never have a value of less than 2,\n    otherwise one may end up with a 1x1 low resolution mask, leading easily\n    to the whole image being replaced.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.CoarseSaltAndPepper(0.05, size_percent=(0.01, 0.1))\n\nReplaces 5 percent of all pixels with salt/pepper in an image that has\n1 to 10 percent of the input image size, then upscales the results\nto the input image size, leading to large rectangular areas being replaced.", "id": "f16295:m7"}
{"signature": "def AdditiveLaplaceNoise(loc=<NUM_LIT:0>, scale=<NUM_LIT:0>, per_channel=False, name=None, deterministic=False, random_state=None):", "body": "loc2 = iap.handle_continuous_param(loc, \"<STR_LIT>\", value_range=None, tuple_to_uniform=True, list_to_choice=True)<EOL>scale2 = iap.handle_continuous_param(scale, \"<STR_LIT>\", value_range=(<NUM_LIT:0>, None), tuple_to_uniform=True,<EOL>list_to_choice=True)<EOL>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return AddElementwise(iap.Laplace(loc=loc2, scale=scale2), per_channel=per_channel, name=name,<EOL>deterministic=deterministic, random_state=random_state)<EOL>", "docstring": "Add laplace noise to images.\n\nThe laplace distribution is similar to the gaussian distribution, but has puts weight on the long tail.\nHence, this noise will add more outliers (very high/low values). It is somewhere between gaussian noise and\nsalt and pepper noise.\n\nValues of around ``255 * 0.05`` for `scale` lead to visible noise (for uint8).\nValues of around ``255 * 0.10`` for `scale` lead to very visible noise (for uint8).\nIt is recommended to usually set `per_channel` to True.\n\ndtype support::\n\n    See ``imgaug.augmenters.arithmetic.AddElementwise``.\n\nParameters\n----------\nloc : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n    Mean of the laplace distribution that generates the noise.\n\n        * If a number, exactly that value will be used.\n        * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n          be sampled per image.\n        * If a list, then a random value will be sampled from that list per\n          image.\n        * If a StochasticParameter, a value will be sampled from the\n          parameter per image.\n\nscale : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n    Standard deviation of the laplace distribution that generates the noise.\n    Must be ``>= 0``. If 0 then only `loc` will be used.\n    Recommended to be around ``255 * 0.05``.\n\n        * If an int or float, exactly that value will be used.\n        * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n          be sampled per image.\n        * If a list, then a random value will be sampled from that list per\n          image.\n        * If a StochasticParameter, a value will be sampled from the\n          parameter per image.\n\nper_channel : bool or float, optional\n    Whether to use the same noise value per pixel for all channels (False)\n    or to sample a new value for each channel (True).\n    If this value is a float ``p``, then for ``p`` percent of all images\n    `per_channel` will be treated as True, otherwise as False.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255)\n\nAdds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images.\n\n>>> aug = iaa.AdditiveLaplaceNoise(scale=(0, 0.1*255))\n\nAdds laplace noise from the distribution ``Laplace(0, s)`` to images,\nwhere s is sampled per image from the range ``0 <= s <= 0.1*255``.\n\n>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255, per_channel=True)\n\nAdds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images,\nwhere the noise value is different per pixel *and* channel (e.g. a\ndifferent one for red, green and blue channels for the same pixel).\n\n>>> aug = iaa.AdditiveLaplaceNoise(scale=0.1*255, per_channel=0.5)\n\nAdds laplace noise from the distribution ``Laplace(0, 0.1*255)`` to images,\nwhere the noise value is sometimes (50 percent of all cases) the same\nper pixel for all channels and sometimes different (other 50 percent).", "id": "f16295:m1"}
{"signature": "def CoarseDropout(p=<NUM_LIT:0>, size_px=None, size_percent=None, per_channel=False, min_size=<NUM_LIT:4>, name=None, deterministic=False,<EOL>random_state=None):", "body": "if ia.is_single_number(p):<EOL><INDENT>p2 = iap.Binomial(<NUM_LIT:1> - p)<EOL><DEDENT>elif ia.is_iterable(p):<EOL><INDENT>ia.do_assert(len(p) == <NUM_LIT:2>)<EOL>ia.do_assert(p[<NUM_LIT:0>] < p[<NUM_LIT:1>])<EOL>ia.do_assert(<NUM_LIT:0> <= p[<NUM_LIT:0>] <= <NUM_LIT:1.0>)<EOL>ia.do_assert(<NUM_LIT:0> <= p[<NUM_LIT:1>] <= <NUM_LIT:1.0>)<EOL>p2 = iap.Binomial(iap.Uniform(<NUM_LIT:1> - p[<NUM_LIT:1>], <NUM_LIT:1> - p[<NUM_LIT:0>]))<EOL><DEDENT>elif isinstance(p, iap.StochasticParameter):<EOL><INDENT>p2 = p<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\" % (type(p),))<EOL><DEDENT>if size_px is not None:<EOL><INDENT>p3 = iap.FromLowerResolution(other_param=p2, size_px=size_px, min_size=min_size)<EOL><DEDENT>elif size_percent is not None:<EOL><INDENT>p3 = iap.FromLowerResolution(other_param=p2, size_percent=size_percent, min_size=min_size)<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return MultiplyElementwise(p3, per_channel=per_channel, name=name, deterministic=deterministic,<EOL>random_state=random_state)<EOL>", "docstring": "Augmenter that sets rectangular areas within images to zero.\n\nIn contrast to Dropout, these areas can have larger sizes.\n(E.g. you might end up with three large black rectangles in an image.)\nNote that the current implementation leads to correlated sizes,\nso when there is one large area that is dropped, there is a high likelihood\nthat all other dropped areas are also large.\n\nThis method is implemented by generating the dropout mask at a\nlower resolution (than the image has) and then upsampling the mask\nbefore dropping the pixels.\n\ndtype support::\n\n    See ``imgaug.augmenters.arithmetic.MultiplyElementwise``.\n\nParameters\n----------\np : float or tuple of float or imgaug.parameters.StochasticParameter, optional\n    The probability of any pixel being dropped (i.e. set to zero).\n\n        * If a float, then that value will be used for all pixels. A value\n          of 1.0 would mean, that all pixels will be dropped. A value of\n          0.0 would lead to no pixels being dropped.\n        * If a tuple ``(a, b)``, then a value p will be sampled from the\n          range ``a <= p <= b`` per image and be used as the pixel's dropout\n          probability.\n        * If a StochasticParameter, then this parameter will be used to\n          determine per pixel whether it should be dropped (sampled value\n          of 0) or shouldn't (sampled value of 1).\n\nsize_px : int or tuple of int or imgaug.parameters.StochasticParameter, optional\n    The size of the lower resolution image from which to sample the dropout\n    mask in absolute pixel dimensions.\n\n        * If an integer, then that size will be used for both height and\n          width. E.g. a value of 3 would lead to a ``3x3`` mask, which is then\n          upsampled to ``HxW``, where ``H`` is the image size and W the image width.\n        * If a tuple ``(a, b)``, then two values ``M``, ``N`` will be sampled from the\n          range ``[a..b]`` and the mask will be generated at size ``MxN``, then\n          upsampled to ``HxW``.\n        * If a StochasticParameter, then this parameter will be used to\n          determine the sizes. It is expected to be discrete.\n\nsize_percent : float or tuple of float or imgaug.parameters.StochasticParameter, optional\n    The size of the lower resolution image from which to sample the dropout\n    mask *in percent* of the input image.\n\n        * If a float, then that value will be used as the percentage of the\n          height and width (relative to the original size). E.g. for value\n          p, the mask will be sampled from ``(p*H)x(p*W)`` and later upsampled\n          to ``HxW``.\n        * If a tuple ``(a, b)``, then two values ``m``, ``n`` will be sampled from the\n          interval ``(a, b)`` and used as the percentages, i.e the mask size\n          will be ``(m*H)x(n*W)``.\n        * If a StochasticParameter, then this parameter will be used to\n          sample the percentage values. It is expected to be continuous.\n\nper_channel : bool or float, optional\n    Whether to use the same value (is dropped / is not dropped)\n    for all channels of a pixel (False) or to sample a new value for each\n    channel (True).\n    If this value is a float ``p``, then for ``p`` percent of all images\n    `per_channel` will be treated as True, otherwise as False.\n\nmin_size : int, optional\n    Minimum size of the low resolution mask, both width and height. If\n    `size_percent` or `size_px` leads to a lower value than this, `min_size`\n    will be used instead. This should never have a value of less than 2,\n    otherwise one may end up with a ``1x1`` low resolution mask, leading easily\n    to the whole image being dropped.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5)\n\ndrops 2 percent of all pixels on an lower-resolution image that has\n50 percent of the original image's size, leading to dropped areas that\nhave roughly 2x2 pixels size.\n\n\n>>> aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.05, 0.5))\n\ngenerates a dropout mask at 5 to 50 percent of image's size. In that mask,\n0 to 5 percent of all pixels are dropped (random per image).\n\n>>> aug = iaa.CoarseDropout((0.0, 0.05), size_px=(2, 16))\n\nsame as previous example, but the lower resolution image has 2 to 16 pixels\nsize.\n\n>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=True)\n\ndrops 2 percent of all pixels at 50 percent resolution (2x2 sizes)\nin a channel-wise fashion, i.e. it is unlikely\nfor any pixel to have all channels set to zero (black pixels).\n\n>>> aug = iaa.CoarseDropout(0.02, size_percent=0.5, per_channel=0.5)\n\nsame as previous example, but the `per_channel` feature is only active\nfor 50 percent of all images.", "id": "f16295:m4"}
{"signature": "def blend_alpha(image_fg, image_bg, alpha, eps=<NUM_LIT>):", "body": "assert image_fg.shape == image_bg.shape<EOL>assert image_fg.dtype.kind == image_bg.dtype.kind<EOL>assert image_fg.dtype.name not in [\"<STR_LIT>\"]<EOL>assert image_bg.dtype.name not in [\"<STR_LIT>\"]<EOL>input_was_2d = (len(image_fg.shape) == <NUM_LIT:2>)<EOL>if input_was_2d:<EOL><INDENT>image_fg = np.atleast_3d(image_fg)<EOL>image_bg = np.atleast_3d(image_bg)<EOL><DEDENT>input_was_bool = False<EOL>if image_fg.dtype.kind == \"<STR_LIT:b>\":<EOL><INDENT>input_was_bool = True<EOL>image_fg = image_fg.astype(np.float32)<EOL>image_bg = image_bg.astype(np.float32)<EOL><DEDENT>alpha = np.array(alpha, dtype=np.float64)<EOL>if alpha.size == <NUM_LIT:1>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if alpha.ndim == <NUM_LIT:2>:<EOL><INDENT>assert alpha.shape == image_fg.shape[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>alpha = alpha.reshape((alpha.shape[<NUM_LIT:0>], alpha.shape[<NUM_LIT:1>], <NUM_LIT:1>))<EOL><DEDENT>elif alpha.ndim == <NUM_LIT:3>:<EOL><INDENT>assert alpha.shape == image_fg.shape or alpha.shape == image_fg.shape[<NUM_LIT:0>:<NUM_LIT:2>] + (<NUM_LIT:1>,)<EOL><DEDENT>else:<EOL><INDENT>alpha = alpha.reshape((<NUM_LIT:1>, <NUM_LIT:1>, -<NUM_LIT:1>))<EOL><DEDENT>if alpha.shape[<NUM_LIT:2>] != image_fg.shape[<NUM_LIT:2>]:<EOL><INDENT>alpha = np.tile(alpha, (<NUM_LIT:1>, <NUM_LIT:1>, image_fg.shape[<NUM_LIT:2>]))<EOL><DEDENT><DEDENT>if not input_was_bool:<EOL><INDENT>if np.all(alpha >= <NUM_LIT:1.0> - eps):<EOL><INDENT>return np.copy(image_fg)<EOL><DEDENT>elif np.all(alpha <= eps):<EOL><INDENT>return np.copy(image_bg)<EOL><DEDENT><DEDENT>assert <NUM_LIT:0> <= alpha.item(<NUM_LIT:0>) <= <NUM_LIT:1.0><EOL>dt_images = iadt.get_minimal_dtype([image_fg, image_bg])<EOL>isize = dt_images.itemsize * <NUM_LIT:2><EOL>isize = max(isize, <NUM_LIT:4>)  <EOL>dt_blend = np.dtype(\"<STR_LIT>\" % (isize,))<EOL>if alpha.dtype != dt_blend:<EOL><INDENT>alpha = alpha.astype(dt_blend)<EOL><DEDENT>if image_fg.dtype != dt_blend:<EOL><INDENT>image_fg = image_fg.astype(dt_blend)<EOL><DEDENT>if image_bg.dtype != dt_blend:<EOL><INDENT>image_bg = image_bg.astype(dt_blend)<EOL><DEDENT>image_blend = image_bg + alpha * (image_fg - image_bg)<EOL>if input_was_bool:<EOL><INDENT>image_blend = image_blend > <NUM_LIT:0.5><EOL><DEDENT>else:<EOL><INDENT>image_blend = iadt.restore_dtypes_(image_blend, dt_images, clip=False, round=True)<EOL><DEDENT>if input_was_2d:<EOL><INDENT>return image_blend[:, :, <NUM_LIT:0>]<EOL><DEDENT>return image_blend<EOL>", "docstring": "Blend two images using an alpha blending.\n\nIn an alpha blending, the two images are naively mixed. Let ``A`` be the foreground image\nand ``B`` the background image and ``a`` is the alpha value. Each pixel intensity is then\ncomputed as ``a * A_ij + (1-a) * B_ij``.\n\ndtype support::\n\n    * ``uint8``: yes; fully tested\n    * ``uint16``: yes; fully tested\n    * ``uint32``: yes; fully tested\n    * ``uint64``: yes; fully tested (1)\n    * ``int8``: yes; fully tested\n    * ``int16``: yes; fully tested\n    * ``int32``: yes; fully tested\n    * ``int64``: yes; fully tested (1)\n    * ``float16``: yes; fully tested\n    * ``float32``: yes; fully tested\n    * ``float64``: yes; fully tested (1)\n    * ``float128``: no (2)\n    * ``bool``: yes; fully tested (2)\n\n    - (1) Tests show that these dtypes work, but a conversion to float128 happens, which only\n          has 96 bits of size instead of true 128 bits and hence not twice as much resolution.\n          It is possible that these dtypes result in inaccuracies, though the tests did not\n          indicate that.\n    - (2) Not available due to the input dtype having to be increased to an equivalent float\n          dtype with two times the input resolution.\n    - (3) Mapped internally to ``float16``.\n\nParameters\n----------\nimage_fg : (H,W,[C]) ndarray\n    Foreground image. Shape and dtype kind must match the one of the\n    background image.\n\nimage_bg : (H,W,[C]) ndarray\n    Background image. Shape and dtype kind must match the one of the\n    foreground image.\n\nalpha : number or iterable of number or ndarray\n    The blending factor, between 0.0 and 1.0. Can be interpreted as the opacity of the\n    foreground image. Values around 1.0 result in only the foreground image being visible.\n    Values around 0.0 result in only the background image being visible.\n    Multiple alphas may be provided. In these cases, there must be exactly one alpha per\n    channel in the foreground/background image. Alternatively, for ``(H,W,C)`` images,\n    either one ``(H,W)`` array or an ``(H,W,C)`` array of alphas may be provided,\n    denoting the elementwise alpha value.\n\neps : number, optional\n    Controls when an alpha is to be interpreted as exactly 1.0 or exactly 0.0, resulting\n    in only the foreground/background being visible and skipping the actual computation.\n\nReturns\n-------\nimage_blend : (H,W,C) ndarray\n    Blend of foreground and background image.", "id": "f16296:m0"}
{"signature": "def FrequencyNoiseAlpha(exponent=(-<NUM_LIT:4>, <NUM_LIT:4>), first=None, second=None, per_channel=False,<EOL>size_px_max=(<NUM_LIT:4>, <NUM_LIT:16>), upscale_method=None,<EOL>iterations=(<NUM_LIT:1>, <NUM_LIT:3>), aggregation_method=[\"<STR_LIT>\", \"<STR_LIT>\"],<EOL>sigmoid=<NUM_LIT:0.5>, sigmoid_thresh=None,<EOL>name=None, deterministic=False, random_state=None):", "body": "<EOL>upscale_method_default = iap.Choice([\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"], p=[<NUM_LIT>, <NUM_LIT>, <NUM_LIT>])<EOL>sigmoid_thresh_default = iap.Normal(<NUM_LIT:0.0>, <NUM_LIT>)<EOL>noise = iap.FrequencyNoise(<EOL>exponent=exponent,<EOL>size_px_max=size_px_max,<EOL>upscale_method=upscale_method if upscale_method is not None else upscale_method_default<EOL>)<EOL>if iterations != <NUM_LIT:1>:<EOL><INDENT>noise = iap.IterativeNoiseAggregator(<EOL>noise,<EOL>iterations=iterations,<EOL>aggregation_method=aggregation_method<EOL>)<EOL><DEDENT>if sigmoid is False or (ia.is_single_number(sigmoid) and sigmoid <= <NUM_LIT>):<EOL><INDENT>noise = iap.Sigmoid.create_for_noise(<EOL>noise,<EOL>threshold=sigmoid_thresh if sigmoid_thresh is not None else sigmoid_thresh_default,<EOL>activated=sigmoid<EOL>)<EOL><DEDENT>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return AlphaElementwise(<EOL>factor=noise, first=first, second=second, per_channel=per_channel,<EOL>name=name, deterministic=deterministic, random_state=random_state<EOL>)<EOL>", "docstring": "Augmenter to alpha-blend two image sources using frequency noise masks.\n\nThe alpha masks are sampled using frequency noise of varying scales,\nwhich can sometimes create large connected blobs of 1s surrounded by 0s\nand other times results in smaller patterns. If nearest neighbour\nupsampling is used, these blobs can be rectangular with sharp edges.\n\ndtype support::\n\n    See ``imgaug.augmenters.blend.AlphaElementwise``.\n\nParameters\n----------\nexponent : number or tuple of number of list of number or imgaug.parameters.StochasticParameter, optional\n    Exponent to use when scaling in the frequency domain.\n    Sane values are in the range -4 (large blobs) to 4 (small patterns).\n    To generate cloud-like structures, use roughly -2.\n\n        * If number, then that number will be used as the exponent for all\n          iterations.\n        * If tuple of two numbers ``(a, b)``, then a value will be sampled\n          per iteration from the range ``[a, b]``.\n        * If a list of numbers, then a value will be picked per iteration\n          at random from that list.\n        * If a StochasticParameter, then a value will be sampled from\n          that parameter per iteration.\n\nfirst : None or imgaug.augmenters.meta.Augmenter or iterable of imgaug.augmenters.meta.Augmenter, optional\n    Augmenter(s) that make up the first of the two branches.\n\n        * If None, then the input images will be reused as the output\n          of the first branch.\n        * If Augmenter, then that augmenter will be used as the branch.\n        * If iterable of Augmenter, then that iterable will be converted\n          into a Sequential and used as the augmenter.\n\nsecond : None or imgaug.augmenters.meta.Augmenter or iterable of imgaug.augmenters.meta.Augmenter, optional\n    Augmenter(s) that make up the second of the two branches.\n\n        * If None, then the input images will be reused as the output\n          of the second branch.\n        * If Augmenter, then that augmenter will be used as the branch.\n        * If iterable of Augmenter, then that iterable will be converted\n          into a Sequential and used as the augmenter.\n\nper_channel : bool or float, optional\n    Whether to use the same factor for all channels (False)\n    or to sample a new value for each channel (True).\n    If this value is a float ``p``, then for ``p`` percent of all images\n    `per_channel` will be treated as True, otherwise as False.\n\nsize_px_max : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional\n    The noise is generated in a low resolution environment.\n    This parameter defines the maximum size of that environment (in\n    pixels). The environment is initialized at the same size as the input\n    image and then downscaled, so that no side exceeds `size_px_max`\n    (aspect ratio is kept).\n\n        * If int, then that number will be used as the size for all\n          iterations.\n        * If tuple of two ints ``(a, b)``, then a value will be sampled\n          per iteration from the discrete range ``[a..b]``.\n        * If a list of ints, then a value will be picked per iteration at\n          random from that list.\n        * If a StochasticParameter, then a value will be sampled from\n          that parameter per iteration.\n\nupscale_method : None or imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n    After generating the noise maps in low resolution environments, they\n    have to be upscaled to the input image size. This parameter controls\n    the upscaling method.\n\n        * If None, then either ``nearest`` or ``linear`` or ``cubic`` is picked.\n          Most weight is put on linear, followed by cubic.\n        * If imgaug.ALL, then either ``nearest`` or ``linear`` or ``area`` or ``cubic``\n          is picked per iteration (all same probability).\n        * If string, then that value will be used as the method (must be\n          ``nearest`` or ``linear`` or ``area`` or ``cubic``).\n        * If list of string, then a random value will be picked from that\n          list per iteration.\n        * If StochasticParameter, then a random value will be sampled\n          from that parameter per iteration.\n\niterations : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional\n    How often to repeat the simplex noise generation process per\n    image.\n\n        * If int, then that number will be used as the iterations for all\n          images.\n        * If tuple of two ints ``(a, b)``, then a value will be sampled\n          per image from the discrete range ``[a..b]``.\n        * If a list of ints, then a value will be picked per image at\n          random from that list.\n        * If a StochasticParameter, then a value will be sampled from\n          that parameter per image.\n\naggregation_method : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n    The noise maps (from each iteration) are combined to one noise map\n    using an aggregation process. This parameter defines the method used\n    for that process. Valid methods are ``min``, ``max`` or ``avg``,\n    where 'min' combines the noise maps by taking the (elementwise) minimum\n    over all iteration's results, ``max`` the (elementwise) maximum and\n    ``avg`` the (elementwise) average.\n\n        * If imgaug.ALL, then a random value will be picked per image from the\n          valid ones.\n        * If a string, then that value will always be used as the method.\n        * If a list of string, then a random value will be picked from\n          that list per image.\n        * If a StochasticParameter, then a random value will be sampled\n          from that parameter per image.\n\nsigmoid : bool or number, optional\n    Whether to apply a sigmoid function to the final noise maps, resulting\n    in maps that have more extreme values (close to 0.0 or 1.0).\n\n        * If bool, then a sigmoid will always (True) or never (False) be\n          applied.\n        * If a number ``p`` with ``0<=p<=1``, then a sigmoid will be applied to\n          ``p`` percent of all final noise maps.\n\nsigmoid_thresh : None or number or tuple of number or imgaug.parameters.StochasticParameter, optional\n    Threshold of the sigmoid, when applied. Thresholds above zero\n    (e.g. 5.0) will move the saddle point towards the right, leading to\n    more values close to 0.0.\n\n        * If None, then ``Normal(0, 5.0)`` will be used.\n        * If number, then that threshold will be used for all images.\n        * If tuple of two numbers ``(a, b)``, then a random value will\n          be sampled per image from the range ``[a, b]``.\n        * If StochasticParameter, then a random value will be sampled from\n          that parameter per image.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0))\n\nDetects per image all edges, marks them in a black and white image and\nthen alpha-blends the result with the original image using frequency noise\nmasks.\n\n>>> aug = iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), upscale_method=\"linear\")\n\nSame as the first example, but uses only (smooth) linear upscaling to\nscale the frequency noise masks to the final image sizes, i.e. no nearest\nneighbour upsampling is used, which would result in rectangles with hard\nedges.\n\n>>> aug = iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), upscale_method=\"linear\", exponent=-2, sigmoid=False)\n\nSame as the previous example, but also limits the exponent to -2 and\ndeactivates the sigmoid, resulting in cloud-like patterns without sharp\nedges.\n\n>>> aug = iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), sigmoid_thresh=iap.Normal(10.0, 5.0))\n\nSame as the first example, but uses a threshold for the sigmoid function\nthat is further to the right. This is more conservative, i.e. the generated\nnoise masks will be mostly black (values around 0.0), which means that\nmost of the original images (parameter/branch `second`) will be kept,\nrather than using the results of the augmentation (parameter/branch\n`first`).", "id": "f16296:m2"}
{"signature": "def SimplexNoiseAlpha(first=None, second=None, per_channel=False, size_px_max=(<NUM_LIT:2>, <NUM_LIT:16>), upscale_method=None,<EOL>iterations=(<NUM_LIT:1>, <NUM_LIT:3>), aggregation_method=\"<STR_LIT>\", sigmoid=True, sigmoid_thresh=None,<EOL>name=None, deterministic=False, random_state=None):", "body": "upscale_method_default = iap.Choice([\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"], p=[<NUM_LIT>, <NUM_LIT>, <NUM_LIT>])<EOL>sigmoid_thresh_default = iap.Normal(<NUM_LIT:0.0>, <NUM_LIT>)<EOL>noise = iap.SimplexNoise(<EOL>size_px_max=size_px_max,<EOL>upscale_method=upscale_method if upscale_method is not None else upscale_method_default<EOL>)<EOL>if iterations != <NUM_LIT:1>:<EOL><INDENT>noise = iap.IterativeNoiseAggregator(<EOL>noise,<EOL>iterations=iterations,<EOL>aggregation_method=aggregation_method<EOL>)<EOL><DEDENT>if sigmoid is False or (ia.is_single_number(sigmoid) and sigmoid <= <NUM_LIT>):<EOL><INDENT>noise = iap.Sigmoid.create_for_noise(<EOL>noise,<EOL>threshold=sigmoid_thresh if sigmoid_thresh is not None else sigmoid_thresh_default,<EOL>activated=sigmoid<EOL>)<EOL><DEDENT>if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return AlphaElementwise(<EOL>factor=noise, first=first, second=second, per_channel=per_channel,<EOL>name=name, deterministic=deterministic, random_state=random_state<EOL>)<EOL>", "docstring": "Augmenter to alpha-blend two image sources using simplex noise alpha masks.\n\nThe alpha masks are sampled using a simplex noise method, roughly creating\nconnected blobs of 1s surrounded by 0s. If nearest neighbour upsampling\nis used, these blobs can be rectangular with sharp edges.\n\ndtype support::\n\n    See ``imgaug.augmenters.blend.AlphaElementwise``.\n\nParameters\n----------\nfirst : None or imgaug.augmenters.meta.Augmenter or iterable of imgaug.augmenters.meta.Augmenter, optional\n    Augmenter(s) that make up the first of the two branches.\n\n        * If None, then the input images will be reused as the output\n          of the first branch.\n        * If Augmenter, then that augmenter will be used as the branch.\n        * If iterable of Augmenter, then that iterable will be converted\n          into a Sequential and used as the augmenter.\n\nsecond : None or imgaug.augmenters.meta.Augmenter or iterable of imgaug.augmenters.meta.Augmenter, optional\n    Augmenter(s) that make up the second of the two branches.\n\n        * If None, then the input images will be reused as the output\n          of the second branch.\n        * If Augmenter, then that augmenter will be used as the branch.\n        * If iterable of Augmenter, then that iterable will be converted\n          into a Sequential and used as the augmenter.\n\nper_channel : bool or float, optional\n    Whether to use the same factor for all channels (False)\n    or to sample a new value for each channel (True).\n    If this value is a float ``p``, then for ``p`` percent of all images\n    `per_channel` will be treated as True, otherwise as False.\n\nsize_px_max : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional\n    The simplex noise is always generated in a low resolution environment.\n    This parameter defines the maximum size of that environment (in\n    pixels). The environment is initialized at the same size as the input\n    image and then downscaled, so that no side exceeds `size_px_max`\n    (aspect ratio is kept).\n\n        * If int, then that number will be used as the size for all\n          iterations.\n        * If tuple of two ints ``(a, b)``, then a value will be sampled\n          per iteration from the discrete range ``[a..b]``.\n        * If a list of ints, then a value will be picked per iteration at\n          random from that list.\n        * If a StochasticParameter, then a value will be sampled from\n          that parameter per iteration.\n\nupscale_method : None or imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n    After generating the noise maps in low resolution environments, they\n    have to be upscaled to the input image size. This parameter controls\n    the upscaling method.\n\n        * If None, then either ``nearest`` or ``linear`` or ``cubic`` is picked.\n          Most weight is put on linear, followed by cubic.\n        * If ia.ALL, then either ``nearest`` or ``linear`` or ``area`` or ``cubic``\n          is picked per iteration (all same probability).\n        * If string, then that value will be used as the method (must be\n          'nearest' or ``linear`` or ``area`` or ``cubic``).\n        * If list of string, then a random value will be picked from that\n          list per iteration.\n        * If StochasticParameter, then a random value will be sampled\n          from that parameter per iteration.\n\niterations : int or tuple of int or list of int or imgaug.parameters.StochasticParameter, optional\n    How often to repeat the simplex noise generation process per image.\n\n        * If int, then that number will be used as the iterations for all\n          images.\n        * If tuple of two ints ``(a, b)``, then a value will be sampled\n          per image from the discrete range ``[a..b]``.\n        * If a list of ints, then a value will be picked per image at\n          random from that list.\n        * If a StochasticParameter, then a value will be sampled from\n          that parameter per image.\n\naggregation_method : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n    The noise maps (from each iteration) are combined to one noise map\n    using an aggregation process. This parameter defines the method used\n    for that process. Valid methods are ``min``, ``max`` or ``avg``,\n    where ``min`` combines the noise maps by taking the (elementwise) minimum\n    over all iteration's results, ``max`` the (elementwise) maximum and\n    ``avg`` the (elementwise) average.\n\n        * If imgaug.ALL, then a random value will be picked per image from the\n          valid ones.\n        * If a string, then that value will always be used as the method.\n        * If a list of string, then a random value will be picked from\n          that list per image.\n        * If a StochasticParameter, then a random value will be sampled\n          from that paramter per image.\n\nsigmoid : bool or number, optional\n    Whether to apply a sigmoid function to the final noise maps, resulting\n    in maps that have more extreme values (close to 0.0 or 1.0).\n\n        * If bool, then a sigmoid will always (True) or never (False) be\n          applied.\n        * If a number ``p`` with ``0<=p<=1``, then a sigmoid will be applied to\n          ``p`` percent of all final noise maps.\n\nsigmoid_thresh : None or number or tuple of number or imgaug.parameters.StochasticParameter, optional\n    Threshold of the sigmoid, when applied. Thresholds above zero\n    (e.g. 5.0) will move the saddle point towards the right, leading to\n    more values close to 0.0.\n\n        * If None, then ``Normal(0, 5.0)`` will be used.\n        * If number, then that threshold will be used for all images.\n        * If tuple of two numbers ``(a, b)``, then a random value will\n          be sampled per image from the range ``[a, b]``.\n        * If StochasticParameter, then a random value will be sampled from\n          that parameter per image.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.SimplexNoiseAlpha(iaa.EdgeDetect(1.0))\n\nDetects per image all edges, marks them in a black and white image and\nthen alpha-blends the result with the original image using simplex noise\nmasks.\n\n>>> aug = iaa.SimplexNoiseAlpha(iaa.EdgeDetect(1.0), upscale_method=\"linear\")\n\nSame as the first example, but uses only (smooth) linear upscaling to\nscale the simplex noise masks to the final image sizes, i.e. no nearest\nneighbour upsampling is used, which would result in rectangles with hard\nedges.\n\n>>> aug = iaa.SimplexNoiseAlpha(iaa.EdgeDetect(1.0), sigmoid_thresh=iap.Normal(10.0, 5.0))\n\nSame as the first example, but uses a threshold for the sigmoid function\nthat is further to the right. This is more conservative, i.e. the generated\nnoise masks will be mostly black (values around 0.0), which means that\nmost of the original images (parameter/branch `second`) will be kept,\nrather than using the results of the augmentation (parameter/branch\n`first`).", "id": "f16296:m1"}
{"signature": "@ia.deprecated(alt_func=\"<STR_LIT>\")<EOL>def InColorspace(to_colorspace, from_colorspace=\"<STR_LIT>\", children=None, name=None, deterministic=False,<EOL>random_state=None):", "body": "return WithColorspace(to_colorspace, from_colorspace, children, name, deterministic, random_state)<EOL>", "docstring": "Convert images to another colorspace.", "id": "f16297:m0"}
{"signature": "def Grayscale(alpha=<NUM_LIT:0>, from_colorspace=\"<STR_LIT>\", name=None, deterministic=False, random_state=None):", "body": "if name is None:<EOL><INDENT>name = \"<STR_LIT>\" % (ia.caller_name(),)<EOL><DEDENT>return ChangeColorspace(to_colorspace=ChangeColorspace.GRAY, alpha=alpha, from_colorspace=from_colorspace,<EOL>name=name, deterministic=deterministic, random_state=random_state)<EOL>", "docstring": "Augmenter to convert images to their grayscale versions.\n\nNOTE: Number of output channels is still 3, i.e. this augmenter just \"removes\" color.\n\nTODO check dtype support\n\ndtype support::\n\n    * ``uint8``: yes; fully tested\n    * ``uint16``: ?\n    * ``uint32``: ?\n    * ``uint64``: ?\n    * ``int8``: ?\n    * ``int16``: ?\n    * ``int32``: ?\n    * ``int64``: ?\n    * ``float16``: ?\n    * ``float32``: ?\n    * ``float64``: ?\n    * ``float128``: ?\n    * ``bool``: ?\n\nParameters\n----------\nalpha : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n    The alpha value of the grayscale image when overlayed over the\n    old image. A value close to 1.0 means, that mostly the new grayscale\n    image is visible. A value close to 0.0 means, that mostly the\n    old image is visible.\n\n        * If a number, exactly that value will always be used.\n        * If a tuple ``(a, b)``, a random value from the range ``a <= x <= b`` will\n          be sampled per image.\n        * If a list, then a random value will be sampled from that list per image.\n        * If a StochasticParameter, a value will be sampled from the\n          parameter per image.\n\nfrom_colorspace : str, optional\n    The source colorspace (of the input images).\n    Allowed strings are: ``RGB``, ``BGR``, ``GRAY``, ``CIE``, ``YCrCb``, ``HSV``, ``HLS``, ``Lab``, ``Luv``.\n    See :func:`imgaug.augmenters.color.ChangeColorspace.__init__`.\n\nname : None or str, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\ndeterministic : bool, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nrandom_state : None or int or numpy.random.RandomState, optional\n    See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\nExamples\n--------\n>>> aug = iaa.Grayscale(alpha=1.0)\n\ncreates an augmenter that turns images to their grayscale versions.\n\n>>> aug = iaa.Grayscale(alpha=(0.0, 1.0))\n\ncreates an augmenter that turns images to their grayscale versions with\nan alpha value in the range ``0 <= alpha <= 1``. An alpha value of 0.5 would\nmean, that the output image is 50 percent of the input image and 50\npercent of the grayscale image (i.e. 50 percent of color removed).", "id": "f16297:m1"}
{"signature": "@staticmethod<EOL><INDENT>def create_for_noise(other_param, threshold=(-<NUM_LIT:10>, <NUM_LIT:10>), activated=True):<DEDENT>", "body": "return Sigmoid(other_param, threshold, activated, mul=<NUM_LIT:20>, add=-<NUM_LIT:10>)<EOL>", "docstring": "Creates a Sigmoid that is adjusted to be used with noise parameters,\ni.e. with parameters which's output values are in the range [0.0, 1.0].\n\nParameters\n----------\nother_param : imgaug.parameters.StochasticParameter\n    See :func:`imgaug.parameters.Sigmoid.__init__`.\n\nthreshold : number or tuple of number or iterable of number or imgaug.parameters.StochasticParameter,\\\n            optional\n    See :func:`imgaug.parameters.Sigmoid.__init__`.\n\nactivated : bool or number, optional\n    See :func:`imgaug.parameters.Sigmoid.__init__`.\n\nReturns\n-------\nSigmoid\n    A sigmoid adjusted to be used with noise.", "id": "f16298:c25:m1"}
{"signature": "def draw_sample(self, random_state=None):", "body": "return self.draw_samples(<NUM_LIT:1>, random_state=random_state)[<NUM_LIT:0>]<EOL>", "docstring": "Draws a single sample value from this parameter.\n\nParameters\n----------\nrandom_state : None or numpy.random.RandomState, optional\n    A random state to use during the sampling process.\n    If None, the libraries global random state will be used.\n\nReturns\n-------\nany\n    A single sample value.", "id": "f16298:c0:m1"}
{"signature": "def draw_distribution_graph(self, title=None, size=(<NUM_LIT:1000>, <NUM_LIT:1000>), bins=<NUM_LIT:100>):", "body": "<EOL>import matplotlib.pyplot as plt<EOL>points = []<EOL>for _ in sm.xrange(size[<NUM_LIT:0>]):<EOL><INDENT>points.append(self.draw_samples(size[<NUM_LIT:1>:]).flatten())<EOL><DEDENT>points = np.concatenate(points)<EOL>fig = plt.figure()<EOL>fig.add_subplot(<NUM_LIT>)<EOL>ax = fig.gca()<EOL>heights, bins = np.histogram(points, bins=bins)<EOL>heights = heights / sum(heights)<EOL>ax.bar(bins[:-<NUM_LIT:1>], heights, width=(max(bins) - min(bins))/len(bins), color=\"<STR_LIT>\", alpha=<NUM_LIT>)<EOL>if title is None:<EOL><INDENT>title = str(self)<EOL><DEDENT>if title is not False:<EOL><INDENT>title_fragments = [title[i:i+<NUM_LIT:50>] for i in sm.xrange(<NUM_LIT:0>, len(title), <NUM_LIT:50>)]<EOL>ax.set_title(\"<STR_LIT:\\n>\".join(title_fragments))<EOL><DEDENT>fig.tight_layout(pad=<NUM_LIT:0>)<EOL>with tempfile.NamedTemporaryFile(suffix=\"<STR_LIT>\") as f:<EOL><INDENT>fig.savefig(f.name)<EOL>data = imageio.imread(f)[..., <NUM_LIT:0>:<NUM_LIT:3>]<EOL><DEDENT>plt.close()<EOL>return data<EOL>", "docstring": "Generate a plot (image) that shows the parameter's distribution of\nvalues.\n\nParameters\n----------\ntitle : None or False or str, optional\n    Title of the plot. None is automatically replaced by a title\n    derived from ``str(param)``. If set to False, no title will be\n    shown.\n\nsize : tuple of int\n    Number of points to sample. This is always expected to have at\n    least two values. The first defines the number of sampling runs,\n    the second (and further) dimensions define the size assigned\n    to each :func:`imgaug.parameters.StochasticParameter.draw_samples`\n    call. E.g. ``(10, 20, 15)`` will lead to ``10`` calls of\n    ``draw_samples(size=(20, 15))``. The results will be merged to a single 1d array.\n\nbins : int\n    Number of bins in the plot histograms.\n\nReturns\n-------\ndata : (H,W,3) ndarray\n    Image of the plot.", "id": "f16298:c0:m20"}
{"signature": "def deepcopy(self):", "body": "return copy_module.deepcopy(self)<EOL>", "docstring": "Create a deep copy of this parameter.\n\nReturns\n-------\nimgaug.parameters.StochasticParameter\n    Deep copy.", "id": "f16298:c0:m19"}
{"signature": "def Negative(other_param, mode=\"<STR_LIT>\", reroll_count_max=<NUM_LIT:2>):", "body": "return ForceSign(<EOL>other_param=other_param,<EOL>positive=False,<EOL>mode=mode,<EOL>reroll_count_max=reroll_count_max<EOL>)<EOL>", "docstring": "Converts another parameter's results to negative values.\n\nParameters\n----------\nother_param : imgaug.parameters.StochasticParameter\n    Other parameter which's sampled values are to be\n    modified.\n\nmode : {'invert', 'reroll'}, optional\n    How to change the signs. Valid values are ``invert`` and ``reroll``.\n    ``invert`` means that wrong signs are simply flipped.\n    ``reroll`` means that all samples with wrong signs are sampled again,\n    optionally many times, until they randomly end up having the correct\n    sign.\n\nreroll_count_max : int, optional\n    If `mode` is set to ``reroll``, this determines how often values may\n    be rerolled before giving up and simply flipping the sign (as in\n    ``mode=\"invert\"``). This shouldn't be set too high, as rerolling is\n    expensive.\n\nExamples\n--------\n>>> param = Negative(Normal(0, 1), mode=\"reroll\")\n\nGenerates a normal distribution that has only negative values.", "id": "f16298:m10"}
{"signature": "def _mark_log_file(self, msg):", "body": "with open('<STR_LIT>', '<STR_LIT:a>') as logfile:<EOL><INDENT>logfile.write(\"<STR_LIT:->\"*<NUM_LIT> + '<STR_LIT:\\n>')<EOL>logfile.write('<STR_LIT>' % (msg, datetime.now()))<EOL>logfile.write(\"<STR_LIT:->\"*<NUM_LIT> + '<STR_LIT:\\n>')<EOL><DEDENT>return<EOL>", "docstring": "A convenience method to mark sections of a continuous log file.", "id": "f16372:c0:m1"}
{"signature": "def project_info(self):", "body": "return self._get('<STR_LIT>')<EOL>", "docstring": "Get json with info about all the projects.", "id": "f16375:c0:m11"}
{"signature": "def _delete(self, sub_url=None, data=None):", "body": "return self._dispatch(\"<STR_LIT>\", sub_url, data)<EOL>", "docstring": "Thin wrapper around _dispatch function using method DELETE", "id": "f16375:c0:m5"}
{"signature": "def _get(self, sub_url=None, data=None):", "body": "return self._dispatch('<STR_LIT:GET>', sub_url, data)<EOL>", "docstring": "Thin wrapper around _dispatch function using method GET", "id": "f16375:c0:m3"}
{"signature": "def _post(self, sub_url=None, data=None):", "body": "return self._dispatch(\"<STR_LIT:POST>\", sub_url, data)<EOL>", "docstring": "Thin wrapper around _dispatch function using method POST", "id": "f16375:c0:m4"}
{"signature": "def _project_create(self):", "body": "return self._post('<STR_LIT>', {\"<STR_LIT>\": self.project_id})<EOL>", "docstring": "Create this project with given.", "id": "f16375:c0:m10"}
{"signature": "def toJSON(self):", "body": "return {\"<STR_LIT>\": self.file_metadata.toJSON(),<EOL>\"<STR_LIT:content>\": self.file_content}<EOL>", "docstring": "Get a JSON dict of the data in this file.", "id": "f16376:c1:m3"}
{"signature": "def get_is_sim_running(self):", "body": "sim_info = self.simulation_info()<EOL>try:<EOL><INDENT>progress_info = sim_info['<STR_LIT>']<EOL>ret = progress_info['<STR_LIT>']<EOL><DEDENT>except KeyError:  <EOL><INDENT>ret = False<EOL><DEDENT>return ret<EOL>", "docstring": "Check if the current simulation is running.", "id": "f16376:c5:m7"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def file_create(self, file_object):<DEDENT>", "body": "", "docstring": "Add a file to the project\n\nfile_object -- a Kappa_common.File", "id": "f16376:c5:m10"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def simulation_start(self, simulation_parameter=None):<DEDENT>", "body": "", "docstring": "Start the simulation from the last parsed model.\n\n        Inputs\n        ------\n        simulation_parameter -- (optional) a kappa_common.SimulationParameter\n            instance. The default is set using the `set_default_sim_param`\n            method.", "id": "f16376:c5:m26"}
{"signature": "def get_default_sim_param(self):", "body": "if self.__default_param is None:<EOL><INDENT>raise KappaError(\"<STR_LIT>\")<EOL><DEDENT>return self.__default_param<EOL>", "docstring": "Get the default SimulationParameter instance.", "id": "f16376:c5:m6"}
{"signature": "def add_model_string(self, model_str, position=<NUM_LIT:1>, file_id=None):", "body": "if file_id is None:<EOL><INDENT>file_id = self.make_unique_id('<STR_LIT>')<EOL><DEDENT>ret_data = self.file_create(File.from_string(model_str, position,<EOL>file_id))<EOL>return ret_data<EOL>", "docstring": "Add a kappa model given in a string to the project.", "id": "f16376:c5:m3"}
{"signature": "@classmethod<EOL><INDENT>def from_file(cls, fpath, position=<NUM_LIT:1>, file_id=None):<DEDENT>", "body": "if file_id is None:<EOL><INDENT>file_id = fpath<EOL><DEDENT>with open(fpath) as f:<EOL><INDENT>code = f.read()<EOL>file_content = str(code)<EOL>file_metadata = FileMetadata(file_id, position)<EOL>return cls(file_metadata, file_content)<EOL><DEDENT>", "docstring": "Convience method to create a kappa file object from a file on disk\n\nInputs\n------\nfpath -- path to the file on disk\nposition -- (default 1) rank among all files of the model while parsing\n    see FileMetadata\nfile_id -- (default = fpath) the file_id that will be used by kappa.", "id": "f16376:c1:m2"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def simulation_info_file_line(self):<DEDENT>", "body": "", "docstring": "Lists files generated by $PRINT during the simulation", "id": "f16376:c5:m21"}
{"signature": "def wait_for_simulation_stop(self, timeout=None):", "body": "start = datetime.now()<EOL>while self.get_is_sim_running():<EOL><INDENT>sleep(<NUM_LIT:0.5>)<EOL>if timeout is not None:<EOL><INDENT>if (datetime.now() - start).seconds >= timeout:<EOL><INDENT>ret = None<EOL>break<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>ret = self.simulation_info()<EOL><DEDENT>return ret<EOL>", "docstring": "Block until the simulation is done or timeout seconds exceeded.\n\n        If the simulation stops before timeout, siminfo is returned.", "id": "f16376:c5:m8"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def simulation_info(self):<DEDENT>", "body": "", "docstring": "Returns state and progress of the simulation", "id": "f16376:c5:m20"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def simulation_file_line(self, file_line_id):<DEDENT>", "body": "", "docstring": "Returns the file file_line_id generated by $PRINT interventions", "id": "f16376:c5:m15"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def analyses_constraints_list(self):<DEDENT>", "body": "", "docstring": "Returns a bunch of invarients on the last parsed model", "id": "f16376:c5:m29"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def analyses_contact_map(self, accuracy=None):<DEDENT>", "body": "", "docstring": "Returns the contact of the last parsed model\n\nInput\n-----\naccuracy -- \\\"high\\\" means take into account reachability from\n   initial state. \\\"low\\\" means don't.", "id": "f16376:c5:m30"}
{"signature": "@classmethod<EOL><INDENT>def from_metadata_list(cls, metadata_list):<DEDENT>", "body": "return map(lambda info: cls(**info), metadata_list)<EOL>", "docstring": "Get metadata objects from a list of metadata.", "id": "f16376:c0:m1"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def analyses_influence_map(self, accuracy=None):<DEDENT>", "body": "", "docstring": "Returns the influence_map of the last parsed model\n\nInput\n-----\naccuracy -- level can be \\\"low\\\", \\\"medium\\\", \\\"high\\\" or \\\"full\\\".\n    Default is medium.", "id": "f16376:c5:m31"}
{"signature": "@classmethod<EOL><INDENT>def from_string(cls, content, position=<NUM_LIT:1>, file_id=None):<DEDENT>", "body": "if file_id is None:<EOL><INDENT>file_id = '<STR_LIT>'<EOL><DEDENT>return cls(FileMetadata(file_id, position), content)<EOL>", "docstring": "Convenience method to create a file from a string.\n\nThis file object's metadata will have the id 'inlined_input'.\n\nInputs\n------\ncontent -- the content of the file (a string).\nposition -- (default 1) rank among all files of the model while parsing\n    see FileMetadata\nfile_id -- (default 'inlined_input') the file_id that will be used by\n    kappa.", "id": "f16376:c1:m1"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def simulation_DIN(self, DIN_id):<DEDENT>", "body": "", "docstring": "Returns a given generated DIN", "id": "f16376:c5:m16"}
{"signature": "def go_str(value):", "body": "io = StringIO.StringIO()<EOL>io.write('<STR_LIT:\">')<EOL>for c in value:<EOL><INDENT>if c in _ESCAPES:<EOL><INDENT>io.write(_ESCAPES[c])<EOL><DEDENT>elif c in _SIMPLE_CHARS:<EOL><INDENT>io.write(c)<EOL><DEDENT>else:<EOL><INDENT>io.write(r'<STR_LIT>'.format(ord(c)))<EOL><DEDENT><DEDENT>io.write('<STR_LIT:\">')<EOL>return io.getvalue()<EOL>", "docstring": "Returns value as a valid Go string literal.", "id": "f16382:m0"}
{"signature": "@contextlib.contextmanager<EOL><INDENT>def indent_block(self, n=<NUM_LIT:1>):<DEDENT>", "body": "self.indent(n)<EOL>yield<EOL>self.dedent(n)<EOL>", "docstring": "A context manager that indents by n on entry and dedents on exit.", "id": "f16382:c4:m2"}
{"signature": "def write_block(self, block_, body):", "body": "self.write('<STR_LIT>')<EOL>with self.indent_block():<EOL><INDENT>self.write('<STR_LIT>')<EOL>self.write('<STR_LIT>')<EOL>for checkpoint in block_.checkpoints:<EOL><INDENT>self.write_tmpl('<STR_LIT>', state=checkpoint)<EOL><DEDENT>self.write('<STR_LIT>')<EOL>self.write('<STR_LIT:}>')<EOL>with self.indent_block(-<NUM_LIT:1>):<EOL><INDENT>self.write(body)<EOL><DEDENT><DEDENT>self.write('<STR_LIT:}>')<EOL>", "docstring": "Outputs the boilerplate necessary for code blocks like functions.\n\n        Args:\n          block_: The Block object representing the code block.\n          body: String containing Go code making up the body of the code block.", "id": "f16382:c4:m4"}
{"signature": "def _import_and_bind(self, imp):", "body": "<EOL>with self.block.alloc_temp() as mod,self.block.alloc_temp('<STR_LIT>') as mod_slice:<EOL><INDENT>self.writer.write_checked_call2(<EOL>mod_slice, '<STR_LIT>', util.go_str(imp.name))<EOL>for binding in imp.bindings:<EOL><INDENT>if binding.bind_type == imputil.Import.MODULE:<EOL><INDENT>self.writer.write('<STR_LIT>'.format(<EOL>mod.name, mod_slice.expr, binding.value))<EOL>self.block.bind_var(self.writer, binding.alias, mod.expr)<EOL><DEDENT>else:<EOL><INDENT>self.writer.write('<STR_LIT>'.format(<EOL>mod.name, mod_slice.expr, imp.name.count('<STR_LIT:.>')))<EOL>with self.block.alloc_temp() as member:<EOL><INDENT>self.writer.write_checked_call2(<EOL>member, '<STR_LIT>',<EOL>mod.expr, self.block.root.intern(binding.value))<EOL>self.block.bind_var(self.writer, binding.alias, member.expr)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "Generates code that imports a module and binds it to a variable.\n\n        Args:\n          imp: Import object representing an import of the form \"import x.y.z\" or\n              \"from x.y import z\". Expects only a single binding.", "id": "f16385:c0:m28"}
{"signature": "def visit_function_inline(self, node):", "body": "<EOL>func_visitor = block.FunctionBlockVisitor(node)<EOL>for child in node.body:<EOL><INDENT>func_visitor.visit(child)<EOL><DEDENT>func_block = block.FunctionBlock(self.block, node.name, func_visitor.vars,<EOL>func_visitor.is_generator)<EOL>visitor = StatementVisitor(func_block, self.future_node)<EOL>with visitor.writer.indent_block():<EOL><INDENT>visitor._visit_each(node.body)  <EOL><DEDENT>result = self.block.alloc_temp()<EOL>with self.block.alloc_temp('<STR_LIT>') as func_args:<EOL><INDENT>args = node.args<EOL>argc = len(args.args)<EOL>self.writer.write('<STR_LIT>'.format(<EOL>func_args.expr, argc))<EOL>defaults = [None] * (argc - len(args.defaults)) + args.defaults<EOL>for i, (a, d) in enumerate(zip(args.args, defaults)):<EOL><INDENT>with self.visit_expr(d) if d else expr.nil_expr as default:<EOL><INDENT>tmpl = '<STR_LIT>'<EOL>self.writer.write_tmpl(tmpl, args=func_args.expr, i=i,<EOL>name=util.go_str(a.arg), default=default.expr)<EOL><DEDENT><DEDENT>flags = []<EOL>if args.vararg:<EOL><INDENT>flags.append('<STR_LIT>')<EOL><DEDENT>if args.kwarg:<EOL><INDENT>flags.append('<STR_LIT>')<EOL><DEDENT>self.writer.write_tmpl(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>result=result.name, name=util.go_str(node.name),<EOL>filename=util.go_str(self.block.root.filename), args=func_args.expr,<EOL>flags='<STR_LIT>'.join(flags) if flags else <NUM_LIT:0>)<EOL>with self.writer.indent_block():<EOL><INDENT>for var in func_block.vars.values():<EOL><INDENT>if var.type != block.Var.TYPE_GLOBAL:<EOL><INDENT>fmt = '<STR_LIT>'<EOL>self.writer.write(fmt.format(<EOL>util.adjust_local_name(var.name), var.init_expr))<EOL><DEDENT><DEDENT>self.writer.write_temp_decls(func_block)<EOL>self.writer.write('<STR_LIT>')<EOL>self.writer.write('<STR_LIT>')<EOL>if func_block.is_generator:<EOL><INDENT>self.writer.write(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>with self.writer.indent_block():<EOL><INDENT>self.writer.write_block(func_block, visitor.writer.getvalue())<EOL>self.writer.write('<STR_LIT>')<EOL><DEDENT>self.writer.write('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.writer.write_block(func_block, visitor.writer.getvalue())<EOL>self.writer.write(textwrap.dedent(\"\"\"<STR_LIT>\"\"\"))<EOL><DEDENT><DEDENT>self.writer.write('<STR_LIT>')<EOL><DEDENT>return result<EOL>", "docstring": "Returns an GeneratedExpr for a function with the given body.", "id": "f16385:c0:m25"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def resolve_name(self, writer, name):<DEDENT>", "body": "pass<EOL>", "docstring": "Returns a GeneratedExpr object for accessing the named var in this block.\n\n        This is overridden in the different concrete block types since name\n        resolution in Python behaves differently depending on where in what kind of\n        block its happening within, e.g. local vars are different than globals.\n\n        Args:\n          writer: Writer object where intermediate calculations will be printed.\n          name: The name of the Python variable.", "id": "f16392:c2:m3"}
{"signature": "def alloc_temp(self, type_='<STR_LIT>'):", "body": "for v in sorted(self.free_temps, key=lambda k: k.name):<EOL><INDENT>if v.type_ == type_:<EOL><INDENT>self.free_temps.remove(v)<EOL>self.used_temps.add(v)<EOL>return v<EOL><DEDENT><DEDENT>self.temp_index += <NUM_LIT:1><EOL>name = '<STR_LIT>'.format(self.temp_index)<EOL>v = expr.GeneratedTempVar(self, name, type_)<EOL>self.used_temps.add(v)<EOL>return v<EOL>", "docstring": "Create a new temporary Go variable having type type_ for this block.", "id": "f16392:c2:m5"}
{"signature": "@abc.abstractmethod<EOL><INDENT>def bind_var(self, writer, name, value):<DEDENT>", "body": "pass<EOL>", "docstring": "Writes Go statements for assigning value to named var in this block.\n\n        This is overridden in the different concrete block types since in Python,\n        binding a variable in, e.g. a function is quite different than binding at\n        global block.\n\n        Args:\n          writer: The Writer object where statements will be written.\n          name: The name of the Python variable.\n          value: A Go expression to assign to the variable.", "id": "f16392:c2:m1"}
{"signature": "def free_temp(self, v):", "body": "self.used_temps.remove(v)<EOL>self.free_temps.add(v)<EOL>", "docstring": "Release the GeneratedTempVar v so it can be reused.", "id": "f16392:c2:m6"}
{"signature": "def _MakeSliceTest(subscript, want):", "body": "def Test(self):<EOL><INDENT>code = textwrap.dedent(\"\"\"<STR_LIT>\"\"\")<EOL>status, output = _GrumpRun(code.format(subscript))<EOL>self.assertEqual(<NUM_LIT:0>, status, output)<EOL>self.assertEqual(want, output.strip())<EOL><DEDENT>return Test<EOL>", "docstring": "Define a test function that evaluates a slice expression.", "id": "f16393:m2"}
{"signature": "def assertFalse(self, expr, msg=None):", "body": "if expr:<EOL><INDENT>msg = self._formatMessage(msg, \"<STR_LIT>\" % safe_repr(expr))<EOL>raise self.failureException(msg)<EOL><DEDENT>", "docstring": "Check that the expression is false.", "id": "f16396:c5:m23"}
{"signature": "def skipIf(condition, reason):", "body": "if condition:<EOL><INDENT>return skip(reason)<EOL><DEDENT>return _id<EOL>", "docstring": "Skip a test if the condition is true.", "id": "f16396:m2"}
{"signature": "def assertLessEqual(self, a, b, msg=None):", "body": "if not a <= b:<EOL><INDENT>standardMsg = '<STR_LIT>' % (safe_repr(a), safe_repr(b))<EOL>self.fail(self._formatMessage(msg, standardMsg))<EOL><DEDENT>", "docstring": "Just like self.assertTrue(a <= b), but with a nicer default message.", "id": "f16396:c5:m48"}
{"signature": "def assertRaises(self, excClass, callableObj=None, *args, **kwargs):", "body": "context = _AssertRaisesContext(excClass, self)<EOL>if callableObj is None:<EOL><INDENT>return context<EOL><DEDENT>with context:<EOL><INDENT>callableObj(*args, **kwargs)<EOL><DEDENT>", "docstring": "Fail unless an exception of class excClass is raised\n           by callableObj when invoked with arguments args and keyword\n           arguments kwargs. If a different type of exception is\n           raised, it will not be caught, and the test case will be\n           deemed to have suffered an error, exactly as for an\n           unexpected exception.\n\n           If called with callableObj omitted or None, will return a\n           context object used like this::\n\n                with self.assertRaises(SomeException):\n                    do_something()\n\n           The context manager keeps a reference to the exception as\n           the 'exception' attribute. This allows you to inspect the\n           exception after the assertion::\n\n               with self.assertRaises(SomeException) as cm:\n                   do_something()\n               the_exception = cm.exception\n               self.assertEqual(the_exception.error_code, 3)", "id": "f16396:c5:m26"}
{"signature": "def assertTrue(self, expr, msg=None):", "body": "if not expr:<EOL><INDENT>msg = self._formatMessage(msg, \"<STR_LIT>\" % safe_repr(expr))<EOL>raise self.failureException(msg)<EOL><DEDENT>", "docstring": "Check that the expression is true.", "id": "f16396:c5:m24"}
{"signature": "def __init__(self, methodName='<STR_LIT>'):", "body": "self._testMethodName = methodName<EOL>self._resultForDoCleanups = None<EOL>try:<EOL><INDENT>testMethod = getattr(self, methodName)<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" %<EOL>(self.__class__, methodName))<EOL><DEDENT>self._cleanups = []<EOL>self._type_equality_funcs = {}<EOL>self.addTypeEqualityFunc(dict, '<STR_LIT>')<EOL>self.addTypeEqualityFunc(list, '<STR_LIT>')<EOL>self.addTypeEqualityFunc(tuple, '<STR_LIT>')<EOL>self.addTypeEqualityFunc(set, '<STR_LIT>')<EOL>self.addTypeEqualityFunc(frozenset, '<STR_LIT>')<EOL>try:<EOL><INDENT>self.addTypeEqualityFunc(str, '<STR_LIT>')<EOL><DEDENT>except NameError:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Create an instance of the class that will use the named test\n           method when executed. Raises a ValueError if the instance does\n           not have a method with the specified name.", "id": "f16396:c5:m0"}
{"signature": "def assertIsNotNone(self, obj, msg=None):", "body": "if obj is None:<EOL><INDENT>standardMsg = '<STR_LIT>'<EOL>self.fail(self._formatMessage(msg, standardMsg))<EOL><DEDENT>", "docstring": "Included for symmetry with assertIsNone.", "id": "f16396:c5:m52"}
{"signature": "def tearDown(self):", "body": "pass<EOL>", "docstring": "Hook method for deconstructing the test fixture after testing it.", "id": "f16396:c5:m4"}
{"signature": "def addTypeEqualityFunc(self, typeobj, function):", "body": "self._type_equality_funcs[typeobj] = function<EOL>", "docstring": "Add a type specific assertEqual style function to compare a type.\n\n        This method is for use by TestCase subclasses that need to register\n        their own type equality functions to provide nicer error messages.\n\n        Args:\n            typeobj: The data type to call this function on when both values\n                    are of the same type in assertEqual().\n            function: The callable taking two arguments and an optional\n                    msg= argument that raises self.failureException with a\n                    useful error message when the two arguments are not equal.", "id": "f16396:c5:m1"}
{"signature": "def debug(self):", "body": "self.setUp()<EOL>getattr(self, self._testMethodName)()<EOL>self.tearDown()<EOL>while self._cleanups:<EOL><INDENT>function, args, kwargs = self._cleanups.pop(-<NUM_LIT:1>)<EOL>function(*args, **kwargs)<EOL><DEDENT>", "docstring": "Run the test without collecting errors in a TestResult", "id": "f16396:c5:m20"}
{"signature": "def fail(self, msg=None):", "body": "raise self.failureException(msg)<EOL>", "docstring": "Fail immediately, with the given message.", "id": "f16396:c5:m22"}
{"signature": "def assertNotEqual(self, first, second, msg=None):", "body": "if not first != second:<EOL><INDENT>msg = self._formatMessage(msg, '<STR_LIT>' % (safe_repr(first),<EOL>safe_repr(second)))<EOL>raise self.failureException(msg)<EOL><DEDENT>", "docstring": "Fail if the two objects are equal as determined by the '!='\n           operator.", "id": "f16396:c5:m30"}
{"signature": "def assertGreaterEqual(self, a, b, msg=None):", "body": "if not a >= b:<EOL><INDENT>standardMsg = '<STR_LIT>' % (safe_repr(a), safe_repr(b))<EOL>self.fail(self._formatMessage(msg, standardMsg))<EOL><DEDENT>", "docstring": "Just like self.assertTrue(a >= b), but with a nicer default message.", "id": "f16396:c5:m50"}
{"signature": "def assertEqual(self, first, second, msg=None):", "body": "assertion_func = self._getAssertEqualityFunc(first, second)<EOL>assertion_func(first, second, msg=msg)<EOL>", "docstring": "Fail if the two objects are unequal as determined by the '=='\n           operator.", "id": "f16396:c5:m29"}
{"signature": "def addCleanup(self, function, *args, **kwargs):", "body": "self._cleanups.append((function, args, kwargs))<EOL>", "docstring": "Add a function, with arguments, to be called when the test is\n        completed. Functions added are called on a LIFO basis and are\n        called after tearDown on test failure or success.\n\n        Cleanup items are called even if setUp fails (unlike tearDown).", "id": "f16396:c5:m2"}
{"signature": "def _baseAssertEqual(self, first, second, msg=None):", "body": "if not first == second:<EOL><INDENT>standardMsg = '<STR_LIT>' % (safe_repr(first), safe_repr(second))<EOL>msg = self._formatMessage(msg, standardMsg)<EOL>raise self.failureException(msg)<EOL><DEDENT>", "docstring": "The default assertEqual implementation, not type specific.", "id": "f16396:c5:m28"}
{"signature": "def assertNotIn(self, member, container, msg=None):", "body": "if member in container:<EOL><INDENT>standardMsg = '<STR_LIT>' % (safe_repr(member),<EOL>safe_repr(container))<EOL>self.fail(self._formatMessage(msg, standardMsg))<EOL><DEDENT>", "docstring": "Just like self.assertTrue(a not in b), but with a nicer default message.", "id": "f16396:c5:m40"}
{"signature": "def assertMultiLineEqual(self, first, second, msg=None):", "body": "self.assertIsInstance(first, str,<EOL>'<STR_LIT>')<EOL>self.assertIsInstance(second, str,<EOL>'<STR_LIT>')<EOL>if first != second:<EOL><INDENT>if (len(first) > self._diffThreshold or<EOL>len(second) > self._diffThreshold):<EOL><INDENT>self._baseAssertEqual(first, second, msg)<EOL><DEDENT>firstlines = first.splitlines(True)<EOL>secondlines = second.splitlines(True)<EOL>if len(firstlines) == <NUM_LIT:1> and first.strip('<STR_LIT:\\r\\n>') == first:<EOL><INDENT>firstlines = [first + '<STR_LIT:\\n>']<EOL>secondlines = [second + '<STR_LIT:\\n>']<EOL><DEDENT>standardMsg = '<STR_LIT>' % (safe_repr(first, True),<EOL>safe_repr(second, True))<EOL>diff = '<STR_LIT:\\n>' + '<STR_LIT>'.join(difflib.ndiff(firstlines, secondlines))<EOL>standardMsg = self._truncateMessage(standardMsg, diff)<EOL>self.fail(self._formatMessage(msg, standardMsg))<EOL><DEDENT>", "docstring": "Assert that two multi-line strings are equal.", "id": "f16396:c5:m46"}
{"signature": "def assertIsNone(self, obj, msg=None):", "body": "if obj is not None:<EOL><INDENT>standardMsg = '<STR_LIT>' % (safe_repr(obj),)<EOL>self.fail(self._formatMessage(msg, standardMsg))<EOL><DEDENT>", "docstring": "Same as self.assertTrue(obj is None), with a nicer default message.", "id": "f16396:c5:m51"}
{"signature": "def assertItemsEqual(self, expected_seq, actual_seq, msg=None):", "body": "first_seq, second_seq = list(expected_seq), list(actual_seq)<EOL>with warnings.catch_warnings():<EOL><INDENT>if sys.py3kwarning:<EOL><INDENT>for _msg in [\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\"]:<EOL><INDENT>warnings.filterwarnings(\"<STR_LIT:ignore>\", _msg, DeprecationWarning)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>first = collections.Counter(first_seq)<EOL>second = collections.Counter(second_seq)<EOL><DEDENT>except TypeError:<EOL><INDENT>differences = _count_diff_all_purpose(first_seq, second_seq)<EOL><DEDENT>else:<EOL><INDENT>if first == second:<EOL><INDENT>return<EOL><DEDENT>differences = _count_diff_hashable(first_seq, second_seq)<EOL><DEDENT><DEDENT>if differences:<EOL><INDENT>standardMsg = '<STR_LIT>'<EOL>lines = ['<STR_LIT>' % diff for diff in differences]<EOL>diffMsg = '<STR_LIT:\\n>'.join(lines)<EOL>standardMsg = self._truncateMessage(standardMsg, diffMsg)<EOL>msg = self._formatMessage(msg, standardMsg)<EOL>self.fail(msg)<EOL><DEDENT>", "docstring": "An unordered sequence specific comparison. It asserts that\n        actual_seq and expected_seq have the same element counts.\n        Equivalent to::\n\n            self.assertEqual(Counter(iter(actual_seq)),\n                             Counter(iter(expected_seq)))\n\n        Asserts that each element has the same count in both sequences.\n        Example:\n            - [0, 1, 1] and [1, 0, 1] compare equal.\n            - [0, 0, 1] and [0, 1] compare unequal.", "id": "f16396:c5:m45"}
{"signature": "def shortDescription(self):", "body": "<EOL>return '<STR_LIT>'<EOL>", "docstring": "Returns a one-line description of the test, or None if no\n        description has been provided.\n\n        The default implementation of this method returns the first line of\n        the specified test method's docstring.", "id": "f16396:c5:m9"}
{"signature": "def assertDictContainsSubset(self, expected, actual, msg=None):", "body": "missing = []<EOL>mismatched = []<EOL>for key, value in expected.items():<EOL><INDENT>if key not in actual:<EOL><INDENT>missing.append(key)<EOL><DEDENT>elif value != actual[key]:<EOL><INDENT>mismatched.append('<STR_LIT>' %<EOL>(safe_repr(key), safe_repr(value),<EOL>safe_repr(actual[key])))<EOL><DEDENT><DEDENT>if not (missing or mismatched):<EOL><INDENT>return<EOL><DEDENT>standardMsg = '<STR_LIT>'<EOL>if missing:<EOL><INDENT>standardMsg = '<STR_LIT>' % '<STR_LIT:U+002C>'.join(safe_repr(m) for m in<EOL>missing)<EOL><DEDENT>if mismatched:<EOL><INDENT>if standardMsg:<EOL><INDENT>standardMsg += '<STR_LIT>'<EOL><DEDENT>standardMsg += '<STR_LIT>' % '<STR_LIT:U+002C>'.join(mismatched)<EOL><DEDENT>self.fail(self._formatMessage(msg, standardMsg))<EOL>", "docstring": "Checks whether actual is a superset of expected.", "id": "f16396:c5:m44"}
{"signature": "def _formatMessage(self, msg, standardMsg):", "body": "if not self.longMessage:<EOL><INDENT>return msg or standardMsg<EOL><DEDENT>if msg is None:<EOL><INDENT>return standardMsg<EOL><DEDENT>try:<EOL><INDENT>return '<STR_LIT>' % (standardMsg, msg)<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>return  '<STR_LIT>' % (safe_repr(standardMsg), safe_repr(msg))<EOL><DEDENT>", "docstring": "Honour the longMessage attribute when generating failure messages.\n        If longMessage is False this means:\n        * Use only an explicit message if it is provided\n        * Otherwise use the standard message for the assert\n\n        If longMessage is True:\n        * Use the standard message\n        * If an explicit message is provided, plus ' : ' and the explicit message", "id": "f16396:c5:m25"}
{"signature": "def setUpClass(cls):", "body": "", "docstring": "Hook method for setting up class fixture before running tests in the class.", "id": "f16396:c5:m5"}
{"signature": "def skipUnless(condition, reason):", "body": "if not condition:<EOL><INDENT>return skip(reason)<EOL><DEDENT>return _id<EOL>", "docstring": "Skip a test unless the condition is true.", "id": "f16396:m3"}
{"signature": "def setdefault(self, key, default=None):", "body": "if key in self:<EOL><INDENT>return self[key]<EOL><DEDENT>self[key] = default<EOL>return default<EOL>", "docstring": "od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od", "id": "f16398:c0:m13"}
{"signature": "def __add__(self, other):", "body": "if not isinstance(other, Counter):<EOL><INDENT>return NotImplemented<EOL><DEDENT>result = Counter()<EOL>for elem, count in self.items():<EOL><INDENT>newcount = count + other[elem]<EOL>if newcount > <NUM_LIT:0>:<EOL><INDENT>result[elem] = newcount<EOL><DEDENT><DEDENT>for elem, count in other.items():<EOL><INDENT>if elem not in self and count > <NUM_LIT:0>:<EOL><INDENT>result[elem] = count<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Add counts from two counters.\n\n        >>> Counter('abbb') + Counter('bcc')\n        Counter({'b': 4, 'c': 2, 'a': 1})", "id": "f16398:c1:m11"}
{"signature": "def __delitem__(self, key, dict_delitem=dict.__delitem__):", "body": "<EOL>dict_delitem(self, key)<EOL>link_prev, link_next, _ = self.__map.pop(key)<EOL>link_prev[<NUM_LIT:1>] = link_next                        <EOL>link_next[<NUM_LIT:0>] = link_prev                        <EOL>", "docstring": "od.__delitem__(y) <==> del od[y]", "id": "f16398:c0:m2"}
{"signature": "def viewkeys(self):", "body": "return KeysView(self)<EOL>", "docstring": "od.viewkeys() -> a set-like object providing a view on od's keys", "id": "f16398:c0:m21"}
{"signature": "def viewitems(self):", "body": "return ItemsView(self)<EOL>", "docstring": "od.viewitems() -> a set-like object providing a view on od's items", "id": "f16398:c0:m23"}
{"signature": "def elements(self):", "body": "<EOL>return _chain.from_iterable(_starmap(_repeat, self.iteritems()))<EOL>", "docstring": "Iterator over elements repeating each as many times as its count.\n\n        >>> c = Counter('ABCABC')\n        >>> sorted(c.elements())\n        ['A', 'A', 'B', 'B', 'C', 'C']\n\n        # Knuth's example for prime factors of 1836:  2**2 * 3**3 * 17**1\n        >>> prime_factors = Counter({2: 2, 3: 3, 17: 1})\n        >>> product = 1\n        >>> for factor in prime_factors.elements():     # loop over factors\n        ...     product *= factor                       # and multiply them\n        >>> product\n        1836\n\n        Note, if an element's count has been set to zero or is a negative\n        number, elements() will ignore it.", "id": "f16398:c1:m3"}
{"signature": "def __sub__(self, other):", "body": "if not isinstance(other, Counter):<EOL><INDENT>return NotImplemented<EOL><DEDENT>result = Counter()<EOL>for elem, count in self.items():<EOL><INDENT>newcount = count - other[elem]<EOL>if newcount > <NUM_LIT:0>:<EOL><INDENT>result[elem] = newcount<EOL><DEDENT><DEDENT>for elem, count in other.items():<EOL><INDENT>if elem not in self and count < <NUM_LIT:0>:<EOL><INDENT>result[elem] = <NUM_LIT:0> - count<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Subtract count, but keep only results with positive counts.\n\n        >>> Counter('abbbc') - Counter('bccd')\n        Counter({'b': 2, 'a': 1})", "id": "f16398:c1:m12"}
{"signature": "def __init__(*args, **kwds):", "body": "if not args:<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>self = args[<NUM_LIT:0>]<EOL>args = args[<NUM_LIT:1>:]<EOL>if len(args) > <NUM_LIT:1>:<EOL><INDENT>raise TypeError('<STR_LIT>' % len(args))<EOL><DEDENT>try:<EOL><INDENT>self.__root<EOL><DEDENT>except AttributeError:<EOL><INDENT>self.__root = root = []                     <EOL>root[:] = [root, root, None]<EOL>self.__map = {}<EOL><DEDENT>self.__update(*args, **kwds)<EOL>", "docstring": "Initialize an ordered dictionary.  The signature is the same as\n        regular dictionaries, but keyword arguments are not recommended because\n        their insertion order is arbitrary.", "id": "f16398:c0:m0"}
{"signature": "def clear(self):", "body": "root = self.__root<EOL>root[:] = [root, root, None]<EOL>self.__map.clear()<EOL>dict.clear(self)<EOL>", "docstring": "od.clear() -> None.  Remove all items from od.", "id": "f16398:c0:m5"}
{"signature": "def __missing__(self, key):", "body": "<EOL>return <NUM_LIT:0><EOL>", "docstring": "The count of elements not in the Counter is zero.", "id": "f16398:c1:m1"}
{"signature": "def __and__(self, other):", "body": "if not isinstance(other, Counter):<EOL><INDENT>return NotImplemented<EOL><DEDENT>result = Counter()<EOL>for elem, count in self.items():<EOL><INDENT>other_count = other[elem]<EOL>newcount = count if count < other_count else other_count<EOL>if newcount > <NUM_LIT:0>:<EOL><INDENT>result[elem] = newcount<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Intersection is the minimum of corresponding counts.\n\n        >>> Counter('abbb') & Counter('bcc')\n        Counter({'b': 1})", "id": "f16398:c1:m14"}
{"signature": "def copy(self):", "body": "return self.__class__(self)<EOL>", "docstring": "Return a shallow copy.", "id": "f16398:c1:m7"}
{"signature": "def saferepr(o):", "body": "return _safe_repr(o, {}, None, <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>", "docstring": "Version of repr() which can handle recursive data structures.", "id": "f16399:m2"}
{"signature": "def format(self, o, context, maxlevels, level):", "body": "return _safe_repr(o, context, maxlevels, level)<EOL>", "docstring": "Format o for a specific context, returning a string\n        and flags indicating whether the representation is 'readable'\n        and whether the o represents a recursive construct.", "id": "f16399:c0:m7"}
{"signature": "def contextmanager(func):", "body": "@wraps(func)<EOL>def helper(*args, **kwds):<EOL><INDENT>return GeneratorContextManager(func(*args, **kwds))<EOL><DEDENT>return helper<EOL>", "docstring": "@contextmanager decorator.\n\n    Typical usage:\n\n        @contextmanager\n        def some_generator(<arguments>):\n            <setup>\n            try:\n                yield <value>\n            finally:\n                <cleanup>\n\n    This makes this:\n\n        with some_generator(<arguments>) as <variable>:\n            <body>\n\n    equivalent to this:\n\n        <setup>\n        try:\n            <variable> = <value>\n            <body>\n        finally:\n            <cleanup>", "id": "f16400:m0"}
{"signature": "def unquote(s):", "body": "if _is_unicode(s):<EOL><INDENT>if '<STR_LIT:%>' not in s:<EOL><INDENT>return s<EOL><DEDENT>bits = _asciire.split(s)<EOL>res = [bits[<NUM_LIT:0>]]<EOL>append = res.append<EOL>for i in range(<NUM_LIT:1>, len(bits), <NUM_LIT:2>):<EOL><INDENT>append(unquote(str(bits[i])).decode('<STR_LIT>'))<EOL>append(bits[i + <NUM_LIT:1>])<EOL><DEDENT>return '<STR_LIT>'.join(res)<EOL><DEDENT>bits = s.split('<STR_LIT:%>')<EOL>if len(bits) == <NUM_LIT:1>:<EOL><INDENT>return s<EOL><DEDENT>res = [bits[<NUM_LIT:0>]]<EOL>append = res.append<EOL>for item in bits[<NUM_LIT:1>:]:<EOL><INDENT>try:<EOL><INDENT>append(_hextochr[item[:<NUM_LIT:2>]])<EOL>append(item[<NUM_LIT:2>:])<EOL><DEDENT>except KeyError:<EOL><INDENT>append('<STR_LIT:%>')<EOL>append(item)<EOL><DEDENT><DEDENT>return '<STR_LIT>'.join(res)<EOL>", "docstring": "unquote('abc%20def') -> 'abc def'.", "id": "f16401:m9"}
{"signature": "def _make(cls, iterable, new=tuple.__new__, len=len):", "body": "result = new(cls, iterable)<EOL>if len(result) != <NUM_LIT:6>:<EOL><INDENT>raise TypeError('<STR_LIT>' % len(result))<EOL><DEDENT>return result<EOL>", "docstring": "Make a new ParseResult object from a sequence or iterable", "id": "f16401:c3:m1"}
{"signature": "def clear_cache():", "body": "_parse_cache.clear()<EOL>", "docstring": "Clear the parse cache.", "id": "f16401:m0"}
{"signature": "def urljoin(base, url, allow_fragments=True):", "body": "if not base:<EOL><INDENT>return url<EOL><DEDENT>if not url:<EOL><INDENT>return base<EOL><DEDENT>bscheme, bnetloc, bpath, bparams, bquery, bfragment =urlparse(base, '<STR_LIT>', allow_fragments)<EOL>scheme, netloc, path, params, query, fragment =urlparse(url, bscheme, allow_fragments)<EOL>if scheme != bscheme or scheme not in uses_relative:<EOL><INDENT>return url<EOL><DEDENT>if scheme in uses_netloc:<EOL><INDENT>if netloc:<EOL><INDENT>return urlunparse((scheme, netloc, path,<EOL>params, query, fragment))<EOL><DEDENT>netloc = bnetloc<EOL><DEDENT>if path[:<NUM_LIT:1>] == '<STR_LIT:/>':<EOL><INDENT>return urlunparse((scheme, netloc, path,<EOL>params, query, fragment))<EOL><DEDENT>if not path and not params:<EOL><INDENT>path = bpath<EOL>params = bparams<EOL>if not query:<EOL><INDENT>query = bquery<EOL><DEDENT>return urlunparse((scheme, netloc, path,<EOL>params, query, fragment))<EOL><DEDENT>segments = bpath.split('<STR_LIT:/>')[:-<NUM_LIT:1>] + path.split('<STR_LIT:/>')<EOL>if segments[-<NUM_LIT:1>] == '<STR_LIT:.>':<EOL><INDENT>segments[-<NUM_LIT:1>] = '<STR_LIT>'<EOL><DEDENT>while '<STR_LIT:.>' in segments:<EOL><INDENT>segments.remove('<STR_LIT:.>')<EOL><DEDENT>while <NUM_LIT:1>:<EOL><INDENT>i = <NUM_LIT:1><EOL>n = len(segments) - <NUM_LIT:1><EOL>while i < n:<EOL><INDENT>if (segments[i] == '<STR_LIT:..>'<EOL>and segments[i-<NUM_LIT:1>] not in ('<STR_LIT>', '<STR_LIT:..>')):<EOL><INDENT>del segments[i-<NUM_LIT:1>:i+<NUM_LIT:1>]<EOL>break<EOL><DEDENT>i = i+<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if segments == ['<STR_LIT>', '<STR_LIT:..>']:<EOL><INDENT>segments[-<NUM_LIT:1>] = '<STR_LIT>'<EOL><DEDENT>elif len(segments) >= <NUM_LIT:2> and segments[-<NUM_LIT:1>] == '<STR_LIT:..>':<EOL><INDENT>segments[-<NUM_LIT:2>:] = ['<STR_LIT>']<EOL><DEDENT>return urlunparse((scheme, netloc, '<STR_LIT:/>'.join(segments),<EOL>params, query, fragment))<EOL>", "docstring": "Join a base URL and a possibly relative URL to form an absolute\n    interpretation of the latter.", "id": "f16401:m7"}
{"signature": "def urlunsplit(data):", "body": "scheme, netloc, url, query, fragment = data<EOL>if netloc or (scheme and scheme in uses_netloc and url[:<NUM_LIT:2>] != '<STR_LIT>'):<EOL><INDENT>if url and url[:<NUM_LIT:1>] != '<STR_LIT:/>': url = '<STR_LIT:/>' + url<EOL>url = '<STR_LIT>' + (netloc or '<STR_LIT>') + url<EOL><DEDENT>if scheme:<EOL><INDENT>url = scheme + '<STR_LIT::>' + url<EOL><DEDENT>if query:<EOL><INDENT>url = url + '<STR_LIT:?>' + query<EOL><DEDENT>if fragment:<EOL><INDENT>url = url + '<STR_LIT:#>' + fragment<EOL><DEDENT>return url<EOL>", "docstring": "Combine the elements of a tuple as returned by urlsplit() into a\n    complete URL as a string. The data argument can be any five-item iterable.\n    This may result in a slightly different, but equivalent URL, if the URL that\n    was parsed originally had unnecessary delimiters (for example, a ? with an\n    empty query; the RFC states that these are equivalent).", "id": "f16401:m6"}
{"signature": "def urlunparse(data):", "body": "scheme, netloc, url, params, query, fragment = data<EOL>if params:<EOL><INDENT>url = \"<STR_LIT>\" % (url, params)<EOL><DEDENT>return urlunsplit((scheme, netloc, url, query, fragment))<EOL>", "docstring": "Put a parsed URL back together again.  This may result in a\n    slightly different, but equivalent URL, if the URL that was parsed\n    originally had redundant delimiters, e.g. a ? with an empty query\n    (the draft states that these are equivalent).", "id": "f16401:m5"}
{"signature": "def parse_qs(qs, keep_blank_values=<NUM_LIT:0>, strict_parsing=<NUM_LIT:0>):", "body": "dict = {}<EOL>for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):<EOL><INDENT>if name in dict:<EOL><INDENT>dict[name].append(value)<EOL><DEDENT>else:<EOL><INDENT>dict[name] = [value]<EOL><DEDENT><DEDENT>return dict<EOL>", "docstring": "Parse a query given as a string argument.\n\n        Arguments:\n\n        qs: percent-encoded query string to be parsed\n\n        keep_blank_values: flag indicating whether blank values in\n            percent-encoded queries should be treated as blank strings.\n            A true value indicates that blanks should be retained as\n            blank strings.  The default false value indicates that\n            blank values are to be ignored and treated as if they were\n            not included.\n\n        strict_parsing: flag indicating what to do with parsing errors.\n            If false (the default), errors are silently ignored.\n            If true, errors raise a ValueError exception.", "id": "f16401:m10"}
{"signature": "def _asdict(self):", "body": "return OrderedDict(list(zip(self._fields, self)))<EOL>", "docstring": "Return a new OrderedDict which maps field names to their values", "id": "f16401:c1:m3"}
{"signature": "def __getstate__(self):", "body": "pass<EOL>", "docstring": "Exclude the OrderedDict from pickling", "id": "f16401:c3:m6"}
{"signature": "def _replace(_self, **kwds):", "body": "result = _self._make(list(map(kwds.pop, ('<STR_LIT>', '<STR_LIT>', '<STR_LIT:path>', '<STR_LIT>', '<STR_LIT>'), _self)))<EOL>if kwds:<EOL><INDENT>raise ValueError('<STR_LIT>' % list(kwds.keys()))<EOL><DEDENT>return result<EOL>", "docstring": "Return a new SplitResult object replacing specified fields with new values", "id": "f16401:c1:m4"}
{"signature": "def decode(input, output, header = <NUM_LIT:0>):", "body": "if a2b_qp is not None:<EOL><INDENT>data = input.read()<EOL>odata = a2b_qp(data, header = header)<EOL>output.write(odata)<EOL>return<EOL><DEDENT>new = '<STR_LIT>'<EOL>while <NUM_LIT:1>:<EOL><INDENT>line = input.readline()<EOL>if not line: break<EOL>i, n = <NUM_LIT:0>, len(line)<EOL>if n > <NUM_LIT:0> and line[n-<NUM_LIT:1>] == '<STR_LIT:\\n>':<EOL><INDENT>partial = <NUM_LIT:0>; n = n-<NUM_LIT:1><EOL>while n > <NUM_LIT:0> and line[n-<NUM_LIT:1>] in \"<STR_LIT>\":<EOL><INDENT>n = n-<NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>partial = <NUM_LIT:1><EOL><DEDENT>while i < n:<EOL><INDENT>c = line[i]<EOL>if c == '<STR_LIT:_>' and header:<EOL><INDENT>new = new + '<STR_LIT:U+0020>'; i = i+<NUM_LIT:1><EOL><DEDENT>elif c != ESCAPE:<EOL><INDENT>new = new + c; i = i+<NUM_LIT:1><EOL><DEDENT>elif i+<NUM_LIT:1> == n and not partial:<EOL><INDENT>partial = <NUM_LIT:1>; break<EOL><DEDENT>elif i+<NUM_LIT:1> < n and line[i+<NUM_LIT:1>] == ESCAPE:<EOL><INDENT>new = new + ESCAPE; i = i+<NUM_LIT:2><EOL><DEDENT>elif i+<NUM_LIT:2> < n and ishex(line[i+<NUM_LIT:1>]) and ishex(line[i+<NUM_LIT:2>]):<EOL><INDENT>new = new + chr(unhex(line[i+<NUM_LIT:1>:i+<NUM_LIT:3>])); i = i+<NUM_LIT:3><EOL><DEDENT>else: <EOL><INDENT>new = new + c; i = i+<NUM_LIT:1><EOL><DEDENT><DEDENT>if not partial:<EOL><INDENT>output.write(new + '<STR_LIT:\\n>')<EOL>new = '<STR_LIT>'<EOL><DEDENT><DEDENT>if new:<EOL><INDENT>output.write(new)<EOL><DEDENT>", "docstring": "Read 'input', apply quoted-printable decoding, and write to 'output'.\n    'input' and 'output' are files with readline() and write() methods.\n    If 'header' is true, decode underscore as space (per RFC 1522).", "id": "f16402:m4"}
{"signature": "def unhex(s):", "body": "bits = <NUM_LIT:0><EOL>for c in s:<EOL><INDENT>if '<STR_LIT:0>' <= c <= '<STR_LIT>':<EOL><INDENT>i = ord('<STR_LIT:0>')<EOL><DEDENT>elif '<STR_LIT:a>' <= c <= '<STR_LIT:f>':<EOL><INDENT>i = ord('<STR_LIT:a>')-<NUM_LIT:10><EOL><DEDENT>elif '<STR_LIT:A>' <= c <= '<STR_LIT:F>':<EOL><INDENT>i = ord('<STR_LIT:A>')-<NUM_LIT:10><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT>bits = bits*<NUM_LIT:16> + (ord(c) - i)<EOL><DEDENT>return bits<EOL>", "docstring": "Get the integer value of a hexadecimal number.", "id": "f16402:m7"}
{"signature": "def needsquoting(c, quotetabs, header):", "body": "if c in '<STR_LIT>':<EOL><INDENT>return quotetabs<EOL><DEDENT>if c == '<STR_LIT:_>':<EOL><INDENT>return header<EOL><DEDENT>return c == ESCAPE or not ('<STR_LIT:U+0020>' <= c <= '<STR_LIT>')<EOL>", "docstring": "Decide whether a particular character needs to be quoted.\n\n    The 'quotetabs' flag indicates whether embedded tabs and spaces should be\n    quoted.  Note that line-ending tabs and spaces are always encoded, as per\n    RFC 1521.", "id": "f16402:m0"}
{"signature": "def quote(c):", "body": "i = ord(c)<EOL>return ESCAPE + HEX[i//<NUM_LIT:16>] + HEX[i%<NUM_LIT:16>]<EOL>", "docstring": "Quote a single character.", "id": "f16402:m1"}
{"signature": "def encode(input, output, quotetabs, header = <NUM_LIT:0>):", "body": "if b2a_qp is not None:<EOL><INDENT>data = input.read()<EOL>odata = b2a_qp(data, quotetabs = quotetabs, header = header)<EOL>output.write(odata)<EOL>return<EOL><DEDENT>def write(s, output=output, lineEnd='<STR_LIT:\\n>'):<EOL><INDENT>if s and s[-<NUM_LIT:1>:] in '<STR_LIT>':<EOL><INDENT>output.write(s[:-<NUM_LIT:1>] + quote(s[-<NUM_LIT:1>]) + lineEnd)<EOL><DEDENT>elif s == '<STR_LIT:.>':<EOL><INDENT>output.write(quote(s) + lineEnd)<EOL><DEDENT>else:<EOL><INDENT>output.write(s + lineEnd)<EOL><DEDENT><DEDENT>prevline = None<EOL>while <NUM_LIT:1>:<EOL><INDENT>line = input.readline()<EOL>if not line:<EOL><INDENT>break<EOL><DEDENT>outline = []<EOL>stripped = '<STR_LIT>'<EOL>if line[-<NUM_LIT:1>:] == '<STR_LIT:\\n>':<EOL><INDENT>line = line[:-<NUM_LIT:1>]<EOL>stripped = '<STR_LIT:\\n>'<EOL><DEDENT>for c in line:<EOL><INDENT>if needsquoting(c, quotetabs, header):<EOL><INDENT>c = quote(c)<EOL><DEDENT>if header and c == '<STR_LIT:U+0020>':<EOL><INDENT>outline.append('<STR_LIT:_>')<EOL><DEDENT>else:<EOL><INDENT>outline.append(c)<EOL><DEDENT><DEDENT>if prevline is not None:<EOL><INDENT>write(prevline)<EOL><DEDENT>thisline = EMPTYSTRING.join(outline)<EOL>while len(thisline) > MAXLINESIZE:<EOL><INDENT>write(thisline[:MAXLINESIZE-<NUM_LIT:1>], lineEnd='<STR_LIT>')<EOL>thisline = thisline[MAXLINESIZE-<NUM_LIT:1>:]<EOL><DEDENT>prevline = thisline<EOL><DEDENT>if prevline is not None:<EOL><INDENT>write(prevline, lineEnd=stripped)<EOL><DEDENT>", "docstring": "Read 'input', apply quoted-printable encoding, and write to 'output'.\n\n    'input' and 'output' are files with readline() and write() methods.\n    The 'quotetabs' flag indicates whether embedded tabs and spaces should be\n    quoted.  Note that line-ending tabs and spaces are always encoded, as per\n    RFC 1521.\n    The 'header' flag indicates whether we are encoding spaces as _ as per\n    RFC 1522.", "id": "f16402:m2"}
{"signature": "def b16encode(s):", "body": "return binascii.hexlify(s).upper()<EOL>", "docstring": "Encode a string using Base16.\n\n    s is the string to encode.  The encoded string is returned.", "id": "f16403:m9"}
{"signature": "def b64encode(s, altchars=None):", "body": "<EOL>encoded = binascii.b2a_base64(s)[:-<NUM_LIT:1>]<EOL>if altchars is not None:<EOL><INDENT>return encoded.translate(string.maketrans(b'<STR_LIT>', altchars[:<NUM_LIT:2>]))<EOL><DEDENT>return encoded<EOL>", "docstring": "Encode a string using Base64.\n\n    s is the string to encode.  Optional altchars must be a string of at least\n    length 2 (additional characters are ignored) which specifies an\n    alternative alphabet for the '+' and '/' characters.  This allows an\n    application to e.g. generate url or filesystem safe Base64 strings.\n\n    The encoded string is returned.", "id": "f16403:m1"}
{"signature": "def decode(input, output):", "body": "while True:<EOL><INDENT>line = input.readline()<EOL>if not line:<EOL><INDENT>break<EOL><DEDENT>s = binascii.a2b_base64(line)<EOL>output.write(s)<EOL><DEDENT>", "docstring": "Decode a file.", "id": "f16403:m12"}
{"signature": "def b64decode(s, altchars=None):", "body": "if altchars is not None:<EOL><INDENT>s = s.translate(string.maketrans(altchars[:<NUM_LIT:2>], '<STR_LIT>'))<EOL><DEDENT>try:<EOL><INDENT>return binascii.a2b_base64(s)<EOL><DEDENT>except binascii.Error as msg:<EOL><INDENT>raise TypeError(msg)<EOL><DEDENT>", "docstring": "Decode a Base64 encoded string.\n\n    s is the string to decode.  Optional altchars must be a string of at least\n    length 2 (additional characters are ignored) which specifies the\n    alternative alphabet used instead of the '+' and '/' characters.\n\n    The decoded string is returned.  A TypeError is raised if s is\n    incorrectly padded.  Characters that are neither in the normal base-64\n    alphabet nor the alternative alphabet are discarded prior to the padding\n    check.", "id": "f16403:m2"}
{"signature": "def urlsafe_b64encode(s):", "body": "return b64encode(s).translate(_urlsafe_encode_translation)<EOL>", "docstring": "Encode a string using the URL- and filesystem-safe Base64 alphabet.\n\n    Argument s is the string to encode.  The encoded string is returned.  The\n    alphabet uses '-' instead of '+' and '_' instead of '/'.", "id": "f16403:m5"}
{"signature": "def b32encode(s):", "body": "parts = []<EOL>quanta, leftover = divmod(len(s), <NUM_LIT:5>)<EOL>if leftover:<EOL><INDENT>s += ('<STR_LIT>' * (<NUM_LIT:5> - leftover))<EOL>quanta += <NUM_LIT:1><EOL><DEDENT>for i in range(quanta):<EOL><INDENT>c1, c2, c3 = struct.unpack('<STR_LIT>', s[i*<NUM_LIT:5>:(i+<NUM_LIT:1>)*<NUM_LIT:5>])<EOL>c2 += (c1 & <NUM_LIT:1>) << <NUM_LIT:16> <EOL>c3 += (c2 & <NUM_LIT:3>) << <NUM_LIT:8>  <EOL>parts.extend([_b32tab[c1 >> <NUM_LIT:11>],         <EOL>_b32tab[(c1 >> <NUM_LIT:6>) & <NUM_LIT>], <EOL>_b32tab[(c1 >> <NUM_LIT:1>) & <NUM_LIT>], <EOL>_b32tab[c2 >> <NUM_LIT:12>],         <EOL>_b32tab[(c2 >> <NUM_LIT:7>) & <NUM_LIT>], <EOL>_b32tab[(c2 >> <NUM_LIT:2>) & <NUM_LIT>], <EOL>_b32tab[c3 >> <NUM_LIT:5>],          <EOL>_b32tab[c3 & <NUM_LIT>],        <EOL>])<EOL><DEDENT>encoded = EMPTYSTRING.join(parts)<EOL>if leftover == <NUM_LIT:1>:<EOL><INDENT>return encoded[:-<NUM_LIT:6>] + '<STR_LIT>'<EOL><DEDENT>elif leftover == <NUM_LIT:2>:<EOL><INDENT>return encoded[:-<NUM_LIT:4>] + '<STR_LIT>'<EOL><DEDENT>elif leftover == <NUM_LIT:3>:<EOL><INDENT>return encoded[:-<NUM_LIT:3>] + '<STR_LIT>'<EOL><DEDENT>elif leftover == <NUM_LIT:4>:<EOL><INDENT>return encoded[:-<NUM_LIT:1>] + '<STR_LIT:=>'<EOL><DEDENT>return encoded<EOL>", "docstring": "Encode a string using Base32.\n\n    s is the string to encode.  The encoded string is returned.", "id": "f16403:m7"}
{"signature": "def standard_b64decode(s):", "body": "return b64decode(s)<EOL>", "docstring": "Decode a string encoded with the standard Base64 alphabet.\n\n    Argument s is the string to decode.  The decoded string is returned.  A\n    TypeError is raised if the string is incorrectly padded.  Characters that\n    are not in the standard alphabet are discarded prior to the padding\n    check.", "id": "f16403:m4"}
{"signature": "def add_extension(module, name, code):", "body": "code = int(code)<EOL>if not <NUM_LIT:1> <= code <= <NUM_LIT>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>key = (module, name)<EOL>if (_extension_registry.get(key) == code and<EOL>_inverted_registry.get(code) == key):<EOL><INDENT>return <EOL><DEDENT>if key in _extension_registry:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" %<EOL>(key, _extension_registry[key]))<EOL><DEDENT>if code in _inverted_registry:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" %<EOL>(code, _inverted_registry[code]))<EOL><DEDENT>_extension_registry[key] = code<EOL>_inverted_registry[code] = key<EOL>", "docstring": "Register an extension code.", "id": "f16404:m6"}
{"signature": "def remove_extension(module, name, code):", "body": "key = (module, name)<EOL>if (_extension_registry.get(key) != code or<EOL>_inverted_registry.get(code) != key):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" %<EOL>(key, code))<EOL><DEDENT>del _extension_registry[key]<EOL>del _inverted_registry[code]<EOL>if code in _extension_cache:<EOL><INDENT>del _extension_cache[code]<EOL><DEDENT>", "docstring": "Unregister an extension code.  For testing only.", "id": "f16404:m7"}
{"signature": "def join(self):", "body": "self.all_tasks_done.acquire()<EOL>try:<EOL><INDENT>while self.unfinished_tasks:<EOL><INDENT>self.all_tasks_done.wait()<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self.all_tasks_done.release()<EOL><DEDENT>", "docstring": "Blocks until all items in the Queue have been gotten and processed.\n\n        The count of unfinished tasks goes up whenever an item is added to the\n        queue. The count goes down whenever a consumer thread calls task_done()\n        to indicate the item was retrieved and all work on it is complete.\n\n        When the count of unfinished tasks drops to zero, join() unblocks.", "id": "f16406:c2:m2"}
{"signature": "def task_done(self):", "body": "self.all_tasks_done.acquire()<EOL>try:<EOL><INDENT>unfinished = self.unfinished_tasks - <NUM_LIT:1><EOL>if unfinished <= <NUM_LIT:0>:<EOL><INDENT>if unfinished < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.all_tasks_done.notify_all()<EOL><DEDENT>self.unfinished_tasks = unfinished<EOL><DEDENT>finally:<EOL><INDENT>self.all_tasks_done.release()<EOL><DEDENT>", "docstring": "Indicate that a formerly enqueued task is complete.\n\n        Used by Queue consumer threads.  For each get() used to fetch a task,\n        a subsequent call to task_done() tells the queue that the processing\n        on the task is complete.\n\n        If a join() is currently blocking, it will resume when all items\n        have been processed (meaning that a task_done() call was received\n        for every item that had been put() into the queue).\n\n        Raises a ValueError if called more times than there were items\n        placed in the queue.", "id": "f16406:c2:m1"}
{"signature": "def put(self, item, block=True, timeout=None):", "body": "self.not_full.acquire()<EOL>try:<EOL><INDENT>if self.maxsize > <NUM_LIT:0>:<EOL><INDENT>if not block:<EOL><INDENT>if self._qsize() == self.maxsize:<EOL><INDENT>raise Full<EOL><DEDENT><DEDENT>elif timeout is None:<EOL><INDENT>while self._qsize() == self.maxsize:<EOL><INDENT>self.not_full.wait()<EOL><DEDENT><DEDENT>elif timeout < <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>endtime = _time() + timeout<EOL>while self._qsize() == self.maxsize:<EOL><INDENT>remaining = endtime - _time()<EOL>if remaining <= <NUM_LIT:0.0>:<EOL><INDENT>raise Full<EOL><DEDENT>self.not_full.wait(remaining)<EOL><DEDENT><DEDENT><DEDENT>self._put(item)<EOL>self.unfinished_tasks += <NUM_LIT:1><EOL>self.not_empty.notify()<EOL><DEDENT>finally:<EOL><INDENT>self.not_full.release()<EOL><DEDENT>", "docstring": "Put an item into the queue.\n\n        If optional args 'block' is true and 'timeout' is None (the default),\n        block if necessary until a free slot is available. If 'timeout' is\n        a non-negative number, it blocks at most 'timeout' seconds and raises\n        the Full exception if no free slot was available within that time.\n        Otherwise ('block' is false), put an item on the queue if a free slot\n        is immediately available, else raise the Full exception ('timeout'\n        is ignored in that case).", "id": "f16406:c2:m6"}
{"signature": "def real_quick_ratio(self):", "body": "la, lb = len(self.a), len(self.b)<EOL>return _calculate_ratio(min(la, lb), la + lb)<EOL>", "docstring": "Return an upper bound on ratio() very quickly.\n\n        This isn't defined beyond that it is an upper bound on .ratio(), and\n        is faster to compute than either .ratio() or .quick_ratio().", "id": "f16407:c1:m11"}
{"signature": "def _format_range_unified(start, stop):", "body": "<EOL>beginning = start + <NUM_LIT:1>     <EOL>length = stop - start<EOL>if length == <NUM_LIT:1>:<EOL><INDENT>return '<STR_LIT:%s>' % (beginning)<EOL><DEDENT>if not length:<EOL><INDENT>beginning -= <NUM_LIT:1>        <EOL><DEDENT>return '<STR_LIT>' % (beginning, length)<EOL>", "docstring": "Convert range to the \"ed\" format", "id": "f16407:m6"}
{"signature": "def get_opcodes(self):", "body": "if self.opcodes is not None:<EOL><INDENT>return self.opcodes<EOL><DEDENT>i = j = <NUM_LIT:0><EOL>self.opcodes = answer = []<EOL>for ai, bj, size in self.get_matching_blocks():<EOL><INDENT>tag = '<STR_LIT>'<EOL>if i < ai and j < bj:<EOL><INDENT>tag = '<STR_LIT:replace>'<EOL><DEDENT>elif i < ai:<EOL><INDENT>tag = '<STR_LIT>'<EOL><DEDENT>elif j < bj:<EOL><INDENT>tag = '<STR_LIT>'<EOL><DEDENT>if tag:<EOL><INDENT>answer.append( (tag, i, ai, j, bj) )<EOL><DEDENT>i, j = ai+size, bj+size<EOL>if size:<EOL><INDENT>answer.append( ('<STR_LIT>', ai, i, bj, j) )<EOL><DEDENT><DEDENT>return answer<EOL>", "docstring": "Return list of 5-tuples describing how to turn a into b.\n\n        Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple\n        has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the\n        tuple preceding it, and likewise for j1 == the previous j2.\n\n        The tags are strings, with these meanings:\n\n        'replace':  a[i1:i2] should be replaced by b[j1:j2]\n        'delete':   a[i1:i2] should be deleted.\n                    Note that j1==j2 in this case.\n        'insert':   b[j1:j2] should be inserted at a[i1:i1].\n                    Note that i1==i2 in this case.\n        'equal':    a[i1:i2] == b[j1:j2]\n\n        >>> a = \"qabxcd\"\n        >>> b = \"abycdf\"\n        >>> s = SequenceMatcher(None, a, b)\n        >>> for tag, i1, i2, j1, j2 in s.get_opcodes():\n        ...    print (\"%7s a[%d:%d] (%s) b[%d:%d] (%s)\" %\n        ...           (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))\n         delete a[0:1] (q) b[0:0] ()\n          equal a[1:3] (ab) b[0:2] (ab)\n        replace a[3:4] (x) b[2:3] (y)\n          equal a[4:6] (cd) b[3:5] (cd)\n         insert a[6:6] () b[5:6] (f)", "id": "f16407:c1:m7"}
{"signature": "def __repr__(self):", "body": "return '<STR_LIT>' % self<EOL>", "docstring": "Return a nicely formatted representation string", "id": "f16407:c0:m2"}
{"signature": "def ratio(self):", "body": "matches = reduce(lambda sum, triple: sum + triple[-<NUM_LIT:1>],<EOL>self.get_matching_blocks(), <NUM_LIT:0>)<EOL>return _calculate_ratio(matches, len(self.a) + len(self.b))<EOL>", "docstring": "Return a measure of the sequences' similarity (float in [0,1]).\n\n        Where T is the total number of elements in both sequences, and\n        M is the number of matches, this is 2.0*M / T.\n        Note that this is 1 if the sequences are identical, and 0 if\n        they have nothing in common.\n\n        .ratio() is expensive to compute if you haven't already computed\n        .get_matching_blocks() or .get_opcodes(), in which case you may\n        want to try .quick_ratio() or .real_quick_ratio() first to get an\n        upper bound.\n\n        >>> s = SequenceMatcher(None, \"abcd\", \"bcde\")\n        >>> s.ratio()\n        0.75\n        >>> s.quick_ratio()\n        0.75\n        >>> s.real_quick_ratio()\n        1.0", "id": "f16407:c1:m9"}
{"signature": "def __init__(self, isjunk=None, a='<STR_LIT>', b='<STR_LIT>', autojunk=True):", "body": "<EOL>self.isjunk = isjunk<EOL>self.a = self.b = None<EOL>self.autojunk = autojunk<EOL>self.set_seqs(a, b)<EOL>", "docstring": "Construct a SequenceMatcher.\n\n        Optional arg isjunk is None (the default), or a one-argument\n        function that takes a sequence element and returns true iff the\n        element is junk.  None is equivalent to passing \"lambda x: 0\", i.e.\n        no elements are considered to be junk.  For example, pass\n            lambda x: x in \" \\\\t\"\n        if you're comparing lines as sequences of characters, and don't\n        want to synch up on blanks or hard tabs.\n\n        Optional arg a is the first of two sequences to be compared.  By\n        default, an empty string.  The elements of a must be hashable.  See\n        also .set_seqs() and .set_seq1().\n\n        Optional arg b is the second of two sequences to be compared.  By\n        default, an empty string.  The elements of b must be hashable. See\n        also .set_seqs() and .set_seq2().\n\n        Optional arg autojunk should be set to False to disable the\n        \"automatic junk heuristic\" that treats popular elements as junk\n        (see module documentation for more information).", "id": "f16407:c1:m0"}
{"signature": "def restore(delta, which):", "body": "try:<EOL><INDENT>tag = {<NUM_LIT:1>: \"<STR_LIT>\", <NUM_LIT:2>: \"<STR_LIT>\"}[int(which)]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>% which)<EOL><DEDENT>prefixes = (\"<STR_LIT:U+0020>\", tag)<EOL>for line in delta:<EOL><INDENT>if line[:<NUM_LIT:2>] in prefixes:<EOL><INDENT>yield line[<NUM_LIT:2>:]<EOL><DEDENT><DEDENT>", "docstring": "r\"\"\"\n    Generate one of the two sequences that generated a delta.\n\n    Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract\n    lines originating from file 1 or 2 (parameter `which`), stripping off line\n    prefixes.\n\n    Examples:\n\n    >>> diff = ndiff('one\\ntwo\\nthree\\n'.splitlines(1),\n    ...              'ore\\ntree\\nemu\\n'.splitlines(1))\n    >>> diff = list(diff)\n    >>> print ''.join(restore(diff, 1)),\n    one\n    two\n    three\n    >>> print ''.join(restore(diff, 2)),\n    ore\n    tree\n    emu", "id": "f16407:m12"}
{"signature": "def get_close_matches(word, possibilities, n=<NUM_LIT:3>, cutoff=<NUM_LIT>):", "body": "if not n >  <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (n,))<EOL><DEDENT>if not <NUM_LIT:0.0> <= cutoff <= <NUM_LIT:1.0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (cutoff,))<EOL><DEDENT>result = []<EOL>s = SequenceMatcher()<EOL>s.set_seq2(word)<EOL>for x in possibilities:<EOL><INDENT>s.set_seq1(x)<EOL>if s.real_quick_ratio() >= cutoff ands.quick_ratio() >= cutoff ands.ratio() >= cutoff:<EOL><INDENT>result.append((s.ratio(), x))<EOL><DEDENT><DEDENT>result = heapq.nlargest(n, result)<EOL>return [x for score, x in result]<EOL>", "docstring": "Use SequenceMatcher to return list of the best \"good enough\" matches.\n\n    word is a sequence for which close matches are desired (typically a\n    string).\n\n    possibilities is a list of sequences against which to match word\n    (typically a list of strings).\n\n    Optional arg n (default 3) is the maximum number of close matches to\n    return.  n must be > 0.\n\n    Optional arg cutoff (default 0.6) is a float in [0, 1].  Possibilities\n    that don't score at least that similar to word are ignored.\n\n    The best (no more than n) matches among the possibilities are returned\n    in a list, sorted by similarity score, most similar first.\n\n    >>> get_close_matches(\"appel\", [\"ape\", \"apple\", \"peach\", \"puppy\"])\n    ['apple', 'ape']\n    >>> import keyword as _keyword\n    >>> get_close_matches(\"wheel\", _keyword.kwlist)\n    ['while']\n    >>> get_close_matches(\"apple\", _keyword.kwlist)\n    []\n    >>> get_close_matches(\"accept\", _keyword.kwlist)\n    ['except']", "id": "f16407:m2"}
{"signature": "def _tab_newline_replace(self,fromlines,tolines):", "body": "def expand_tabs(line):<EOL><INDENT>line = line.replace('<STR_LIT:U+0020>','<STR_LIT>')<EOL>line = line.expandtabs(self._tabsize)<EOL>line = line.replace('<STR_LIT:U+0020>','<STR_LIT:\\t>')<EOL>return line.replace('<STR_LIT>','<STR_LIT:U+0020>').rstrip('<STR_LIT:\\n>')<EOL><DEDENT>fromlines = [expand_tabs(line) for line in fromlines]<EOL>tolines = [expand_tabs(line) for line in tolines]<EOL>return fromlines,tolines<EOL>", "docstring": "Returns from/to line lists with tabs expanded and newlines removed.\n\n        Instead of tab characters being replaced by the number of spaces\n        needed to fill in to the next tab stop, this function will fill\n        the space with tab characters.  This is done so that the difference\n        algorithms can identify changes in a file when tabs are replaced by\n        spaces and vice versa.  At the end of the HTML generation, the tab\n        characters will be replaced with a nonbreakable space.", "id": "f16407:c3:m2"}
{"signature": "def _count_leading(line, ch):", "body": "i, n = <NUM_LIT:0>, len(line)<EOL>while i < n and line[i] == ch:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT>return i<EOL>", "docstring": "Return number of `ch` characters at the start of `line`.\n\nExample:\n\n>>> _count_leading('   abc', ' ')\n3", "id": "f16407:m3"}
{"signature": "def _fancy_replace(self, a, alo, ahi, b, blo, bhi):", "body": "<EOL>best_ratio, cutoff = <NUM_LIT>, <NUM_LIT><EOL>cruncher = SequenceMatcher(self.charjunk)<EOL>eqi, eqj = None, None   <EOL>for j in range(blo, bhi):<EOL><INDENT>bj = b[j]<EOL>cruncher.set_seq2(bj)<EOL>for i in range(alo, ahi):<EOL><INDENT>ai = a[i]<EOL>if ai == bj:<EOL><INDENT>if eqi is None:<EOL><INDENT>eqi, eqj = i, j<EOL><DEDENT>continue<EOL><DEDENT>cruncher.set_seq1(ai)<EOL>if cruncher.real_quick_ratio() > best_ratio andcruncher.quick_ratio() > best_ratio andcruncher.ratio() > best_ratio:<EOL><INDENT>best_ratio, best_i, best_j = cruncher.ratio(), i, j<EOL><DEDENT><DEDENT><DEDENT>if best_ratio < cutoff:<EOL><INDENT>if eqi is None:<EOL><INDENT>for line in self._plain_replace(a, alo, ahi, b, blo, bhi):<EOL><INDENT>yield line<EOL><DEDENT>return<EOL><DEDENT>best_i, best_j, best_ratio = eqi, eqj, <NUM_LIT:1.0><EOL><DEDENT>else:<EOL><INDENT>eqi = None<EOL><DEDENT>for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):<EOL><INDENT>yield line<EOL><DEDENT>aelt, belt = a[best_i], b[best_j]<EOL>if eqi is None:<EOL><INDENT>atags = btags = \"<STR_LIT>\"<EOL>cruncher.set_seqs(aelt, belt)<EOL>for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():<EOL><INDENT>la, lb = ai2 - ai1, bj2 - bj1<EOL>if tag == '<STR_LIT:replace>':<EOL><INDENT>atags += '<STR_LIT>' * la<EOL>btags += '<STR_LIT>' * lb<EOL><DEDENT>elif tag == '<STR_LIT>':<EOL><INDENT>atags += '<STR_LIT:->' * la<EOL><DEDENT>elif tag == '<STR_LIT>':<EOL><INDENT>btags += '<STR_LIT:+>' * lb<EOL><DEDENT>elif tag == '<STR_LIT>':<EOL><INDENT>atags += '<STR_LIT:U+0020>' * la<EOL>btags += '<STR_LIT:U+0020>' * lb<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % (tag,))<EOL><DEDENT><DEDENT>for line in self._qformat(aelt, belt, atags, btags):<EOL><INDENT>yield line<EOL><DEDENT><DEDENT>else:<EOL><INDENT>yield '<STR_LIT:U+0020>' + aelt<EOL><DEDENT>for line in self._fancy_helper(a, best_i+<NUM_LIT:1>, ahi, b, best_j+<NUM_LIT:1>, bhi):<EOL><INDENT>yield line<EOL><DEDENT>", "docstring": "r\"\"\"\n        When replacing one block of lines with another, search the blocks\n        for *similar* lines; the best-matching pair (if any) is used as a\n        synch point, and intraline difference marking is done on the\n        similar pair. Lots of work, but often worth it.\n\n        Example:\n\n        >>> d = Differ()\n        >>> results = d._fancy_replace(['abcDefghiJkl\\n'], 0, 1,\n        ...                            ['abcdefGhijkl\\n'], 0, 1)\n        >>> print ''.join(results),\n        - abcDefghiJkl\n        ?    ^  ^  ^\n        + abcdefGhijkl\n        ?    ^  ^  ^", "id": "f16407:c2:m4"}
{"signature": "def set_seq2(self, b):", "body": "if b is self.b:<EOL><INDENT>return<EOL><DEDENT>self.b = b<EOL>self.matching_blocks = self.opcodes = None<EOL>self.fullbcount = None<EOL>self.__chain_b()<EOL>", "docstring": "Set the second sequence to be compared.\n\n        The first sequence to be compared is not changed.\n\n        >>> s = SequenceMatcher(None, \"abcd\", \"bcde\")\n        >>> s.ratio()\n        0.75\n        >>> s.set_seq2(\"abcd\")\n        >>> s.ratio()\n        1.0\n        >>>\n\n        SequenceMatcher computes and caches detailed information about the\n        second sequence, so if you want to compare one sequence S against\n        many sequences, use .set_seq2(S) once and call .set_seq1(x)\n        repeatedly for each of the other sequences.\n\n        See also set_seqs() and set_seq1().", "id": "f16407:c1:m3"}
{"signature": "def _asdict(self):", "body": "return OrderedDict(list(zip(self._fields, self)))<EOL>", "docstring": "Return a new OrderedDict which maps field names to their values", "id": "f16407:c0:m3"}
{"signature": "def make_table(self,fromlines,tolines,fromdesc='<STR_LIT>',todesc='<STR_LIT>',context=False,<EOL>numlines=<NUM_LIT:5>):", "body": "<EOL>self._make_prefix()<EOL>fromlines,tolines = self._tab_newline_replace(fromlines,tolines)<EOL>if context:<EOL><INDENT>context_lines = numlines<EOL><DEDENT>else:<EOL><INDENT>context_lines = None<EOL><DEDENT>diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,<EOL>charjunk=self._charjunk)<EOL>if self._wrapcolumn:<EOL><INDENT>diffs = self._line_wrapper(diffs)<EOL><DEDENT>fromlist,tolist,flaglist = self._collect_lines(diffs)<EOL>fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(<EOL>fromlist,tolist,flaglist,context,numlines)<EOL>s = []<EOL>fmt = '<STR_LIT>' +'<STR_LIT>'<EOL>for i in range(len(flaglist)):<EOL><INDENT>if flaglist[i] is None:<EOL><INDENT>if i > <NUM_LIT:0>:<EOL><INDENT>s.append('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>s.append( fmt % (next_id[i],next_href[i],fromlist[i],<EOL>next_href[i],tolist[i]))<EOL><DEDENT><DEDENT>if fromdesc or todesc:<EOL><INDENT>header_row = '<STR_LIT>' % (<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % fromdesc,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % todesc)<EOL><DEDENT>else:<EOL><INDENT>header_row = '<STR_LIT>'<EOL><DEDENT>table = self._table_template % dict(<EOL>data_rows='<STR_LIT>'.join(s),<EOL>header_row=header_row,<EOL>prefix=self._prefix[<NUM_LIT:1>])<EOL>return table.replace('<STR_LIT>','<STR_LIT>').replace('<STR_LIT>','<STR_LIT>').replace('<STR_LIT>','<STR_LIT>').replace('<STR_LIT>','<STR_LIT>').replace('<STR_LIT:\\t>','<STR_LIT>')<EOL>", "docstring": "Returns HTML table of side by side comparison with change highlights\n\n        Arguments:\n        fromlines -- list of \"from\" lines\n        tolines -- list of \"to\" lines\n        fromdesc -- \"from\" file column header string\n        todesc -- \"to\" file column header string\n        context -- set to True for contextual differences (defaults to False\n            which shows full differences).\n        numlines -- number of context lines.  When context is set True,\n            controls number of lines displayed before and after the change.\n            When context is False, controls the number of lines to place\n            the \"next\" link anchors before the next change (so click of\n            \"next\" link jumps to just before the change).", "id": "f16407:c3:m9"}
{"signature": "def IS_LINE_JUNK(line, pat=re.compile(r\"<STR_LIT>\").match):", "body": "return pat(line) is not None<EOL>", "docstring": "r\"\"\"\n    Return 1 for ignorable line: iff `line` is blank or contains a single '#'.\n\n    Examples:\n\n    >>> IS_LINE_JUNK('\\n')\n    True\n    >>> IS_LINE_JUNK('  #   \\n')\n    True\n    >>> IS_LINE_JUNK('hello\\n')\n    False", "id": "f16407:m4"}
{"signature": "def _split_line(self,data_list,line_num,text):", "body": "<EOL>if not line_num:<EOL><INDENT>data_list.append((line_num,text))<EOL>return<EOL><DEDENT>size = len(text)<EOL>max = self._wrapcolumn<EOL>if (size <= max) or ((size -(text.count('<STR_LIT>')*<NUM_LIT:3>)) <= max):<EOL><INDENT>data_list.append((line_num,text))<EOL>return<EOL><DEDENT>i = <NUM_LIT:0><EOL>n = <NUM_LIT:0><EOL>mark = '<STR_LIT>'<EOL>while n < max and i < size:<EOL><INDENT>if text[i] == '<STR_LIT>':<EOL><INDENT>i += <NUM_LIT:1><EOL>mark = text[i]<EOL>i += <NUM_LIT:1><EOL><DEDENT>elif text[i] == '<STR_LIT>':<EOL><INDENT>i += <NUM_LIT:1><EOL>mark = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL>n += <NUM_LIT:1><EOL><DEDENT><DEDENT>line1 = text[:i]<EOL>line2 = text[i:]<EOL>if mark:<EOL><INDENT>line1 = line1 + '<STR_LIT>'<EOL>line2 = '<STR_LIT>' + mark + line2<EOL><DEDENT>data_list.append((line_num,line1))<EOL>self._split_line(data_list,'<STR_LIT:>>',line2)<EOL>", "docstring": "Builds list of text lines by splitting text lines at wrap point\n\n        This function will determine if the input text line needs to be\n        wrapped (split) into separate lines.  If so, the first wrap point\n        will be determined and the first line appended to the output\n        text line list.  This function is used recursively to handle\n        the second part of the split line to further split it.", "id": "f16407:c3:m3"}
{"signature": "def _replace(_self, **kwds):", "body": "result = _self._make(list(map(kwds.pop, ('<STR_LIT:a>', '<STR_LIT:b>', '<STR_LIT:size>'), _self)))<EOL>if kwds:<EOL><INDENT>raise ValueError('<STR_LIT>' % list(kwds.keys()))<EOL><DEDENT>return result<EOL>", "docstring": "Return a new Match object replacing specified fields with new values", "id": "f16407:c0:m4"}
{"signature": "def context_diff(a, b, fromfile='<STR_LIT>', tofile='<STR_LIT>',<EOL>fromfiledate='<STR_LIT>', tofiledate='<STR_LIT>', n=<NUM_LIT:3>, lineterm='<STR_LIT:\\n>'):", "body": "prefix = dict(insert='<STR_LIT>', delete='<STR_LIT>', replace='<STR_LIT>', equal='<STR_LIT:U+0020>')<EOL>started = False<EOL>for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):<EOL><INDENT>if not started:<EOL><INDENT>started = True<EOL>fromdate = '<STR_LIT>' % (fromfiledate) if fromfiledate else '<STR_LIT>'<EOL>todate = '<STR_LIT>' % (tofiledate) if tofiledate else '<STR_LIT>'<EOL>yield '<STR_LIT>' % (fromfile, fromdate, lineterm)<EOL>yield '<STR_LIT>' % (tofile, todate, lineterm)<EOL><DEDENT>first, last = group[<NUM_LIT:0>], group[-<NUM_LIT:1>]<EOL>yield '<STR_LIT>' + lineterm<EOL>file1_range = _format_range_context(first[<NUM_LIT:1>], last[<NUM_LIT:2>])<EOL>yield '<STR_LIT>' % (file1_range, lineterm)<EOL>if any(tag in ('<STR_LIT:replace>', '<STR_LIT>') for tag, _, _, _, _ in group):<EOL><INDENT>for tag, i1, i2, _, _ in group:<EOL><INDENT>if tag != '<STR_LIT>':<EOL><INDENT>for line in a[i1:i2]:<EOL><INDENT>yield prefix[tag] + line<EOL><DEDENT><DEDENT><DEDENT><DEDENT>file2_range = _format_range_context(first[<NUM_LIT:3>], last[<NUM_LIT:4>])<EOL>yield '<STR_LIT>' % (file2_range, lineterm)<EOL>if any(tag in ('<STR_LIT:replace>', '<STR_LIT>') for tag, _, _, _, _ in group):<EOL><INDENT>for tag, _, _, j1, j2 in group:<EOL><INDENT>if tag != '<STR_LIT>':<EOL><INDENT>for line in b[j1:j2]:<EOL><INDENT>yield prefix[tag] + line<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>", "docstring": "r\"\"\"\n    Compare two sequences of lines; generate the delta as a context diff.\n\n    Context diffs are a compact way of showing line changes and a few\n    lines of context.  The number of context lines is set by 'n' which\n    defaults to three.\n\n    By default, the diff control lines (those with *** or ---) are\n    created with a trailing newline.  This is helpful so that inputs\n    created from file.readlines() result in diffs that are suitable for\n    file.writelines() since both the inputs and outputs have trailing\n    newlines.\n\n    For inputs that do not have trailing newlines, set the lineterm\n    argument to \"\" so that the output will be uniformly newline free.\n\n    The context diff format normally has a header for filenames and\n    modification times.  Any or all of these may be specified using\n    strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.\n    The modification times are normally expressed in the ISO 8601 format.\n    If not specified, the strings default to blanks.\n\n    Example:\n\n    >>> print ''.join(context_diff('one\\ntwo\\nthree\\nfour\\n'.splitlines(1),\n    ...       'zero\\none\\ntree\\nfour\\n'.splitlines(1), 'Original', 'Current')),\n    *** Original\n    --- Current\n    ***************\n    *** 1,4 ****\n      one\n    ! two\n    ! three\n      four\n    --- 1,4 ----\n    + zero\n      one\n    ! tree\n      four", "id": "f16407:m9"}
{"signature": "def _format_line(self,side,flag,linenum,text):", "body": "try:<EOL><INDENT>linenum = '<STR_LIT>' % linenum<EOL>id = '<STR_LIT>' % (self._prefix[side],linenum)<EOL><DEDENT>except TypeError:<EOL><INDENT>id = '<STR_LIT>'<EOL><DEDENT>text=text.replace(\"<STR_LIT:&>\",\"<STR_LIT>\").replace(\"<STR_LIT:>>\",\"<STR_LIT>\").replace(\"<STR_LIT:<>\",\"<STR_LIT>\")<EOL>text = text.replace('<STR_LIT:U+0020>','<STR_LIT>').rstrip()<EOL>return '<STR_LIT>'% (id,linenum,text)<EOL>", "docstring": "Returns HTML markup of \"from\" / \"to\" text lines\n\n        side -- 0 or 1 indicating \"from\" or \"to\" text\n        flag -- indicates if difference on line\n        linenum -- line number (used for line number column)\n        text -- line text to be marked up", "id": "f16407:c3:m6"}
{"signature": "def getlines(filename, module_globals=None):", "body": "if filename in cache:<EOL><INDENT>return cache[filename][<NUM_LIT:2>]<EOL><DEDENT>try:<EOL><INDENT>return updatecache(filename, module_globals)<EOL><DEDENT>except MemoryError:<EOL><INDENT>clearcache()<EOL>return []<EOL><DEDENT>", "docstring": "Get the lines for a file from the cache.\n    Update the cache if it doesn't contain an entry for this file already.", "id": "f16409:m2"}
{"signature": "def updatecache(filename, module_globals=None):", "body": "if filename in cache:<EOL><INDENT>del cache[filename]<EOL><DEDENT>if not filename or (filename.startswith('<STR_LIT:<>') and filename.endswith('<STR_LIT:>>')):<EOL><INDENT>return []<EOL><DEDENT>fullname = filename<EOL>try:<EOL><INDENT>stat = os.stat(fullname)<EOL><DEDENT>except OSError:<EOL><INDENT>basename = filename<EOL>if module_globals and '<STR_LIT>' in module_globals:<EOL><INDENT>name = module_globals.get('<STR_LIT>')<EOL>loader = module_globals['<STR_LIT>']<EOL>get_source = getattr(loader, '<STR_LIT>', None)<EOL>if name and get_source:<EOL><INDENT>try:<EOL><INDENT>data = get_source(name)<EOL><DEDENT>except (ImportError, IOError):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if data is None:<EOL><INDENT>return []<EOL><DEDENT>cache[filename] = (<EOL>len(data), None,<EOL>[line+'<STR_LIT:\\n>' for line in data.splitlines()], fullname<EOL>)<EOL>return cache[filename][<NUM_LIT:2>]<EOL><DEDENT><DEDENT><DEDENT>if os.path.isabs(filename):<EOL><INDENT>return []<EOL><DEDENT>for dirname in sys.path:<EOL><INDENT>try:<EOL><INDENT>fullname = os.path.join(dirname, basename)<EOL><DEDENT>except (TypeError, AttributeError):<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>stat = os.stat(fullname)<EOL>break<EOL><DEDENT>except os.error:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return []<EOL><DEDENT><DEDENT>try:<EOL><INDENT>with open(fullname, '<STR_LIT>') as fp:<EOL><INDENT>lines = fp.readlines()<EOL><DEDENT><DEDENT>except IOError:<EOL><INDENT>return []<EOL><DEDENT>if lines and not lines[-<NUM_LIT:1>].endswith('<STR_LIT:\\n>'):<EOL><INDENT>lines[-<NUM_LIT:1>] += '<STR_LIT:\\n>'<EOL><DEDENT>size, mtime = stat.st_size, stat.st_mtime<EOL>cache[filename] = size, mtime, lines, fullname<EOL>return lines<EOL>", "docstring": "Update a cache entry and return its list of lines.\n    If something's wrong, print a message, discard the cache entry,\n    and return an empty list.", "id": "f16409:m4"}
{"signature": "def clearcache():", "body": "global cache<EOL>cache = {}<EOL>", "docstring": "Clear the cache entirely.", "id": "f16409:m1"}
{"signature": "def checkcache(filename=None):", "body": "if filename is None:<EOL><INDENT>filenames = cache.keys()<EOL><DEDENT>else:<EOL><INDENT>if filename in cache:<EOL><INDENT>filenames = [filename]<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT><DEDENT>for filename in filenames:<EOL><INDENT>size, mtime, lines, fullname = cache[filename]<EOL>if mtime is None:<EOL><INDENT>continue   <EOL><DEDENT>try:<EOL><INDENT>stat = os.stat(fullname)<EOL><DEDENT>except os.error:<EOL><INDENT>del cache[filename]<EOL>continue<EOL><DEDENT>if size != stat.st_size or mtime != stat.st_mtime:<EOL><INDENT>del cache[filename]<EOL><DEDENT><DEDENT>", "docstring": "Discard cache entries that are out of date.\n    (This is not checked upon each call!)", "id": "f16409:m3"}
{"signature": "def check(self, msg, results):", "body": "m = self.create_message(msg)<EOL>i = <NUM_LIT:0><EOL>for n, a in m.getaddrlist('<STR_LIT:to>') + m.getaddrlist('<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>mn, ma = results[i][<NUM_LIT:0>], results[i][<NUM_LIT:1>]<EOL><DEDENT>except IndexError:<EOL><INDENT>print('<STR_LIT>', repr(n), repr(a))<EOL>continue<EOL><DEDENT>i = i + <NUM_LIT:1><EOL>self.assertEqual(mn, n,<EOL>\"<STR_LIT>\" % (mn, n))<EOL>self.assertEqual(ma, a,<EOL>\"<STR_LIT>\" % (ma, a))<EOL>if mn == n and ma == a:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>', repr(n), repr(a))<EOL><DEDENT><DEDENT>out = m.getdate('<STR_LIT:date>')<EOL>if out:<EOL><INDENT>self.assertEqual(out,<EOL>(<NUM_LIT>, <NUM_LIT:1>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:0>),<EOL>\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Check addresses and the date.", "id": "f16413:c0:m3"}
{"signature": "@contextlib.contextmanager<EOL>def check_warnings(*filters, **kwargs):", "body": "quiet = kwargs.get('<STR_LIT>')<EOL>if not filters:<EOL><INDENT>filters = ((\"<STR_LIT>\", Warning),)<EOL>if quiet is None:<EOL><INDENT>quiet = True<EOL><DEDENT><DEDENT>return _filterwarnings(filters, quiet)<EOL>", "docstring": "Context manager to silence warnings.\n\n    Accept 2-tuples as positional arguments:\n        (\"message regexp\", WarningCategory)\n\n    Optional argument:\n     - if 'quiet' is True, it does not fail if a filter catches nothing\n        (default True without argument,\n         default False if some filters are defined)\n\n    Without argument, it defaults to:\n        check_warnings((\"\", Warning), quiet=True)", "id": "f16416:m2"}
{"signature": "def _run_suite(suite):", "body": "if verbose:<EOL><INDENT>runner = unittest.TextTestRunner(sys.stdout, verbosity=<NUM_LIT:2>)<EOL><DEDENT>else:<EOL><INDENT>runner = BasicTestRunner()<EOL><DEDENT>result = runner.run(suite)<EOL>if not result.wasSuccessful():<EOL><INDENT>if len(result.errors) == <NUM_LIT:1> and not result.failures:<EOL><INDENT>err = result.errors[<NUM_LIT:0>][<NUM_LIT:1>]<EOL><DEDENT>elif len(result.failures) == <NUM_LIT:1> and not result.errors:<EOL><INDENT>err = result.failures[<NUM_LIT:0>][<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>err = \"<STR_LIT>\"<EOL>if not verbose:<EOL><INDENT>err += \"<STR_LIT>\"<EOL><DEDENT><DEDENT>raise TestFailed(err)<EOL><DEDENT>", "docstring": "Run tests from a unittest.TestSuite-derived class.", "id": "f16416:m6"}
{"signature": "def _reference(self):", "body": "return {<NUM_LIT:1>:<NUM_LIT:2>, \"<STR_LIT>\":\"<STR_LIT>\", \"<STR_LIT>\":(<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>)}<EOL>", "docstring": "Return a dictionary of values which are invariant by storage\n        in the object under test.", "id": "f16436:c0:m0"}
{"signature": "def _full_mapping(self, data):", "body": "x = self._empty_mapping()<EOL>for key, value in list(data.items()):<EOL><INDENT>x[key] = value<EOL><DEDENT>return x<EOL>", "docstring": "Return a mapping object with the value contained in data\n        dictionary", "id": "f16436:c0:m2"}
{"signature": "def loadTestsFromNames(self, names, module=None):", "body": "suites = [self.loadTestsFromName(name, module) for name in names]<EOL>return self.suiteClass(suites)<EOL>", "docstring": "Return a suite of all tests cases found using the given sequence\n        of string specifiers. See 'loadTestsFromName()'.", "id": "f16445:c0:m3"}
{"signature": "def loadTestsFromName(self, name, module=None):", "body": "parts = name.split('<STR_LIT:.>')<EOL>if module is None:<EOL><INDENT>parts_copy = parts[:]<EOL>while parts_copy:<EOL><INDENT>try:<EOL><INDENT>module = __import__('<STR_LIT:.>'.join(parts_copy))<EOL>break<EOL><DEDENT>except ImportError:<EOL><INDENT>del parts_copy[-<NUM_LIT:1>]<EOL>if not parts_copy:<EOL><INDENT>raise<EOL><DEDENT><DEDENT><DEDENT>parts = parts[<NUM_LIT:1>:]<EOL><DEDENT>obj = module<EOL>for part in parts:<EOL><INDENT>parent, obj = obj, getattr(obj, part)<EOL><DEDENT>if isinstance(obj, types.ModuleType):<EOL><INDENT>return self.loadTestsFromModule(obj)<EOL><DEDENT>elif isinstance(obj, type) and issubclass(obj, case.TestCase):<EOL><INDENT>return self.loadTestsFromTestCase(obj)<EOL><DEDENT>elif (isinstance(obj, types.UnboundMethodType) and<EOL>isinstance(parent, type) and<EOL>issubclass(parent, case.TestCase)):<EOL><INDENT>name = parts[-<NUM_LIT:1>]<EOL>inst = parent(name)<EOL>return self.suiteClass([inst])<EOL><DEDENT>elif isinstance(obj, suite.TestSuite):<EOL><INDENT>return obj<EOL><DEDENT>elif hasattr(obj, '<STR_LIT>'):<EOL><INDENT>test = obj()<EOL>if isinstance(test, suite.TestSuite):<EOL><INDENT>return test<EOL><DEDENT>elif isinstance(test, case.TestCase):<EOL><INDENT>return self.suiteClass([test])<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" %<EOL>(obj, test))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % obj)<EOL><DEDENT>", "docstring": "Return a suite of all tests cases given a string specifier.\n\n        The name may resolve either to a module, a test case class, a\n        test method within a test case class, or a callable object which\n        returns a TestCase or TestSuite instance.\n\n        The method optionally resolves the names relative to a given module.", "id": "f16445:c0:m2"}
{"signature": "def __subclasscheck__(cls, subclass):", "body": "<EOL>if subclass in cls._abc_cache:<EOL><INDENT>return True<EOL><DEDENT>if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:<EOL><INDENT>cls._abc_negative_cache = _weakrefset.WeakSet()<EOL>cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter<EOL><DEDENT>elif subclass in cls._abc_negative_cache:<EOL><INDENT>return False<EOL><DEDENT>ok = cls.__subclasshook__(subclass)<EOL>if ok is not NotImplemented:<EOL><INDENT>assert isinstance(ok, bool)<EOL>if ok:<EOL><INDENT>cls._abc_cache.add(subclass)<EOL><DEDENT>else:<EOL><INDENT>cls._abc_negative_cache.add(subclass)<EOL><DEDENT>return ok<EOL><DEDENT>if cls in getattr(subclass, '<STR_LIT>', ()):<EOL><INDENT>cls._abc_cache.add(subclass)<EOL>return True<EOL><DEDENT>for rcls in cls._abc_registry:<EOL><INDENT>if issubclass(subclass, rcls):<EOL><INDENT>cls._abc_cache.add(subclass)<EOL>return True<EOL><DEDENT><DEDENT>for scls in cls.__subclasses__():<EOL><INDENT>if issubclass(subclass, scls):<EOL><INDENT>cls._abc_cache.add(subclass)<EOL>return True<EOL><DEDENT><DEDENT>cls._abc_negative_cache.add(subclass)<EOL>return False<EOL>", "docstring": "Override for issubclass(subclass, cls).", "id": "f16447:c1:m4"}
{"signature": "def abstractmethod(funcobj):", "body": "funcobj.__isabstractmethod__ = True<EOL>return funcobj<EOL>", "docstring": "A decorator indicating abstract methods.\n\n    Requires that the metaclass is ABCMeta or derived from it.  A\n    class that has a metaclass derived from ABCMeta cannot be\n    instantiated unless all of its abstract methods are overridden.\n    The abstract methods can be called using any of the normal\n    'super' call mechanisms.\n\n    Usage:\n\n        class C:\n            __metaclass__ = ABCMeta\n            @abstractmethod\n            def my_abstract_method(self, ...):\n                ...", "id": "f16447:m0"}
{"signature": "def _dump_registry(cls, file=None):", "body": "print >> file, \"<STR_LIT>\" % (cls.__module__, cls.__name__)<EOL>print >> file, \"<STR_LIT>\" % ABCMeta._abc_invalidation_counter<EOL>for name in sorted(cls.__dict__.keys()):<EOL><INDENT>if name.startswith(\"<STR_LIT>\"):<EOL><INDENT>value = getattr(cls, name)<EOL>print >> file, \"<STR_LIT>\" % (name, value)<EOL><DEDENT><DEDENT>", "docstring": "Debug helper to print the ABC registry.", "id": "f16447:c1:m2"}
{"signature": "def format_exception_only(etype, value):", "body": "<EOL>if (isinstance(etype, BaseException) or<EOL>isinstance(etype, types.InstanceType) or<EOL>etype is None or type(etype) is str):<EOL><INDENT>return [_format_final_exc_line(etype, value)]<EOL><DEDENT>stype = etype.__name__<EOL>if not issubclass(etype, SyntaxError):<EOL><INDENT>return [_format_final_exc_line(stype, value)]<EOL><DEDENT>lines = []<EOL>try:<EOL><INDENT>msg, (filename, lineno, offset, badline) = value.args<EOL><DEDENT>except Exception:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>filename = filename or \"<STR_LIT>\"<EOL>lines.append('<STR_LIT>' % (filename, lineno))<EOL>if badline is not None:<EOL><INDENT>lines.append('<STR_LIT>' % badline.strip())<EOL>if offset is not None:<EOL><INDENT>caretspace = badline.rstrip('<STR_LIT:\\n>')<EOL>offset = min(len(caretspace), offset) - <NUM_LIT:1><EOL>caretspace = caretspace[:offset].lstrip()<EOL>caretspace = ((c.isspace() and c or '<STR_LIT:U+0020>') for c in caretspace)<EOL>lines.append('<STR_LIT>' % '<STR_LIT>'.join(caretspace))<EOL><DEDENT><DEDENT>value = msg<EOL><DEDENT>lines.append(_format_final_exc_line(stype, value))<EOL>return lines<EOL>", "docstring": "Format the exception part of a traceback.\n\n    The arguments are the exception type and value such as given by\n    sys.last_type and sys.last_value. The return value is a list of\n    strings, each ending in a newline.\n\n    Normally, the list contains a single string; however, for\n    SyntaxError exceptions, it contains several lines that (when\n    printed) display detailed information about where the syntax\n    error occurred.\n\n    The message indicating which exception occurred is always the last\n    string in the list.", "id": "f16448:m8"}
{"signature": "def extract_stack(f=None, limit = None):", "body": "if f is None:<EOL><INDENT>try:<EOL><INDENT>raise ZeroDivisionError<EOL><DEDENT>except ZeroDivisionError:<EOL><INDENT>f = sys.exc_info()[<NUM_LIT:2>].tb_frame.f_back<EOL><DEDENT><DEDENT>if limit is None:<EOL><INDENT>if hasattr(sys, '<STR_LIT>'):<EOL><INDENT>limit = sys.tracebacklimit<EOL><DEDENT><DEDENT>list = []<EOL>n = <NUM_LIT:0><EOL>while f is not None and (limit is None or n < limit):<EOL><INDENT>lineno = f.f_lineno<EOL>co = f.f_code<EOL>filename = co.co_filename<EOL>name = co.co_name<EOL>linecache.checkcache(filename)<EOL>line = linecache.getline(filename, lineno, f.f_globals)<EOL>if line: line = line.strip()<EOL>else: line = None<EOL>list.append((filename, lineno, name, line))<EOL>f = f.f_back<EOL>n = n+<NUM_LIT:1><EOL><DEDENT>list.reverse()<EOL>return list<EOL>", "docstring": "Extract the raw traceback from the current stack frame.\n\n    The return value has the same format as for extract_tb().  The\n    optional 'f' and 'limit' arguments have the same meaning as for\n    print_stack().  Each item in the list is a quadruple (filename,\n    line number, function name, text), and the entries are in order\n    from oldest to newest stack frame.", "id": "f16448:m16"}
{"signature": "def format_exception(etype, value, tb, limit = None):", "body": "if tb:<EOL><INDENT>list = ['<STR_LIT>']<EOL>list = list + format_tb(tb, limit)<EOL><DEDENT>else:<EOL><INDENT>list = []<EOL><DEDENT>list = list + format_exception_only(etype, value)<EOL>return list<EOL>", "docstring": "Format a stack trace and the exception information.\n\n    The arguments have the same meaning as the corresponding arguments\n    to print_exception().  The return value is a list of strings, each\n    ending in a newline and some containing internal newlines.  When\n    these lines are concatenated and printed, exactly the same text is\n    printed as does print_exception().", "id": "f16448:m7"}
{"signature": "def format_stack(f=None, limit=None):", "body": "if f is None:<EOL><INDENT>try:<EOL><INDENT>raise ZeroDivisionError<EOL><DEDENT>except ZeroDivisionError:<EOL><INDENT>f = sys.exc_info()[<NUM_LIT:2>].tb_frame.f_back<EOL><DEDENT><DEDENT>return format_list(extract_stack(f, limit))<EOL>", "docstring": "Shorthand for 'format_list(extract_stack(f, limit))'.", "id": "f16448:m15"}
{"signature": "def print_exception(etype, value, tb, limit=None, file=None):", "body": "if file is None:<EOL><INDENT>file = open('<STR_LIT>', '<STR_LIT:w>')<EOL><DEDENT>if tb:<EOL><INDENT>_print(file, '<STR_LIT>')<EOL>print_tb(tb, limit, file)<EOL><DEDENT>lines = format_exception_only(etype, value)<EOL>for line in lines:<EOL><INDENT>_print(file, line, '<STR_LIT>')<EOL><DEDENT>", "docstring": "Print exception up to 'limit' stack trace entries from 'tb' to 'file'.\n\n    This differs from print_tb() in the following ways: (1) if\n    traceback is not None, it prints a header \"Traceback (most recent\n    call last):\"; (2) it prints the exception type and value after the\n    stack trace; (3) if type is SyntaxError and value has the\n    appropriate format, it prints the line where the syntax error\n    occurred with a caret on the next line indicating the approximate\n    position of the error.", "id": "f16448:m6"}
{"signature": "def _format_final_exc_line(etype, value):", "body": "valuestr = _some_str(value)<EOL>if value is None or not valuestr:<EOL><INDENT>line = \"<STR_LIT>\" % etype<EOL><DEDENT>else:<EOL><INDENT>line = \"<STR_LIT>\" % (etype, valuestr)<EOL><DEDENT>return line<EOL>", "docstring": "Return a list of a single line -- normal case for format_exception_only", "id": "f16448:m9"}
{"signature": "def extract_tb(tb, limit = None):", "body": "if limit is None:<EOL><INDENT>if hasattr(sys, '<STR_LIT>'):<EOL><INDENT>limit = sys.tracebacklimit<EOL><DEDENT><DEDENT>list = []<EOL>n = <NUM_LIT:0><EOL>while tb is not None and (limit is None or n < limit):<EOL><INDENT>f = tb.tb_frame<EOL>lineno = tb.tb_lineno<EOL>co = f.f_code<EOL>filename = co.co_filename<EOL>name = co.co_name<EOL>linecache.checkcache(filename)<EOL>line = linecache.getline(filename, lineno, f.f_globals)<EOL>if line: line = line.strip()<EOL>else: line = None<EOL>list.append((filename, lineno, name, line))<EOL>tb = tb.tb_next<EOL>n = n+<NUM_LIT:1><EOL><DEDENT>return list<EOL>", "docstring": "Return list of up to limit pre-processed entries from traceback.\n\n    This is useful for alternate formatting of stack traces.  If\n    'limit' is omitted or None, all entries are extracted.  A\n    pre-processed stack trace entry is a quadruple (filename, line\n    number, function name, text) representing the information that is\n    usually printed for a stack trace.  The text is a string with\n    leading and trailing whitespace stripped; if the source is not\n    available it is None.", "id": "f16448:m5"}
{"signature": "def insort_right(a, x, lo=<NUM_LIT:0>, hi=None):", "body": "if lo < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if hi is None:<EOL><INDENT>hi = len(a)<EOL><DEDENT>while lo < hi:<EOL><INDENT>mid = (lo+hi)//<NUM_LIT:2><EOL>if x < a[mid]: hi = mid<EOL>else: lo = mid+<NUM_LIT:1><EOL><DEDENT>a.insert(lo, x)<EOL>", "docstring": "Insert item x in list a, and keep it sorted assuming a is sorted.\n\n    If x is already in a, insert it to the right of the rightmost x.\n\n    Optional args lo (default 0) and hi (default len(a)) bound the\n    slice of a to be searched.", "id": "f16449:m0"}
{"signature": "def _split(self, text):", "body": "if isinstance(text, _unicode):<EOL><INDENT>if self.break_on_hyphens:<EOL><INDENT>pat = self.wordsep_re_uni<EOL><DEDENT>else:<EOL><INDENT>pat = self.wordsep_simple_re_uni<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self.break_on_hyphens:<EOL><INDENT>pat = self.wordsep_re<EOL><DEDENT>else:<EOL><INDENT>pat = self.wordsep_simple_re<EOL><DEDENT><DEDENT>chunks = pat.split(text)<EOL>chunks = [x for x in chunks if x is not None]<EOL>return chunks<EOL>", "docstring": "_split(text : string) -> [string]\n\n        Split the text to wrap into indivisible chunks.  Chunks are\n        not quite the same as words; see _wrap_chunks() for full\n        details.  As an example, the text\n          Look, goof-ball -- use the -b option!\n        breaks into the following chunks:\n          'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',\n          'use', ' ', 'the', ' ', '-b', ' ', 'option!'\n        if break_on_hyphens is True, or in:\n          'Look,', ' ', 'goof-ball', ' ', '--', ' ',\n          'use', ' ', 'the', ' ', '-b', ' ', option!'\n        otherwise.", "id": "f16450:c0:m2"}
{"signature": "def _wrap_chunks(self, chunks):", "body": "lines = []<EOL>if self.width <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % self.width)<EOL><DEDENT>chunks.reverse()<EOL>while chunks:<EOL><INDENT>cur_line = []<EOL>cur_len = <NUM_LIT:0><EOL>if lines:<EOL><INDENT>indent = self.subsequent_indent<EOL><DEDENT>else:<EOL><INDENT>indent = self.initial_indent<EOL><DEDENT>width = self.width - len(indent)<EOL>if self.drop_whitespace and chunks[-<NUM_LIT:1>].strip() == '<STR_LIT>' and lines:<EOL><INDENT>chunks.pop()<EOL><DEDENT>while chunks:<EOL><INDENT>l = len(chunks[-<NUM_LIT:1>])<EOL>if cur_len + l <= width:<EOL><INDENT>cur_line.append(chunks.pop())<EOL>cur_len += l<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if chunks and len(chunks[-<NUM_LIT:1>]) > width:<EOL><INDENT>self._handle_long_word(chunks, cur_line, cur_len, width)<EOL><DEDENT>if self.drop_whitespace and cur_line and cur_line[-<NUM_LIT:1>].strip() == '<STR_LIT>':<EOL><INDENT>cur_line.pop()<EOL><DEDENT>if cur_line:<EOL><INDENT>lines.append(indent + '<STR_LIT>'.join(cur_line))<EOL><DEDENT><DEDENT>return lines<EOL>", "docstring": "_wrap_chunks(chunks : [string]) -> [string]\n\n        Wrap a sequence of text chunks and return a list of lines of\n        length 'self.width' or less.  (If 'break_long_words' is false,\n        some lines may be longer than this.)  Chunks correspond roughly\n        to words and the whitespace between them: each chunk is\n        indivisible (modulo 'break_long_words'), but a line break can\n        come between any two chunks.  Chunks should not have internal\n        whitespace; ie. a chunk is either all whitespace or a \"word\".\n        Whitespace chunks will be removed from the beginning and end of\n        lines, but apart from that whitespace is preserved.", "id": "f16450:c0:m5"}
{"signature": "def _munge_whitespace(self, text):", "body": "if self.expand_tabs:<EOL><INDENT>text = '<STR_LIT:U+0020>'.join(('<STR_LIT:U+0020>'.join(text.split('<STR_LIT:\\n>'))).split('<STR_LIT:\\t>'))<EOL><DEDENT>if self.replace_whitespace:<EOL><INDENT>text = '<STR_LIT:U+0020>'.join('<STR_LIT:U+0020>'.join(text.split('<STR_LIT:\\n>')).split('<STR_LIT:\\t>'))<EOL><DEDENT>return text<EOL>", "docstring": "_munge_whitespace(text : string) -> string\n\n        Munge whitespace in text: expand tabs and convert all other\n        whitespace characters to spaces.  Eg. \" foo\\\\tbar\\\\n\\\\nbaz\"\n        becomes \" foo    bar  baz\".", "id": "f16450:c0:m1"}
{"signature": "def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):", "body": "<EOL>if width < <NUM_LIT:1>:<EOL><INDENT>space_left = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>space_left = width - cur_len<EOL><DEDENT>if self.break_long_words:<EOL><INDENT>cur_line.append(reversed_chunks[-<NUM_LIT:1>][:space_left])<EOL>reversed_chunks[-<NUM_LIT:1>] = reversed_chunks[-<NUM_LIT:1>][space_left:]<EOL><DEDENT>elif not cur_line:<EOL><INDENT>cur_line.append(reversed_chunks.pop())<EOL><DEDENT>", "docstring": "_handle_long_word(chunks : [string],\n                             cur_line : [string],\n                             cur_len : int, width : int)\n\n        Handle a chunk of text (most likely a word, not whitespace) that\n        is too long to fit in any line.", "id": "f16450:c0:m4"}
{"signature": "def _fix_sentence_endings(self, chunks):", "body": "i = <NUM_LIT:0><EOL>patsearch = self.sentence_end_re.search<EOL>while i < len(chunks)-<NUM_LIT:1>:<EOL><INDENT>if chunks[i+<NUM_LIT:1>] == \"<STR_LIT:U+0020>\" and patsearch(chunks[i]):<EOL><INDENT>chunks[i+<NUM_LIT:1>] = \"<STR_LIT:U+0020>\"<EOL>i += <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>", "docstring": "_fix_sentence_endings(chunks : [string])\n\n        Correct for sentence endings buried in 'chunks'.  Eg. when the\n        original text contains \"... foo.\\\\nBar ...\", munge_whitespace()\n        and split() will convert that to [..., \"foo.\", \" \", \"Bar\", ...]\n        which has one too few spaces; this method simply changes the one\n        space to two.", "id": "f16450:c0:m3"}
{"signature": "def settrace(func):", "body": "global _trace_hook<EOL>_trace_hook = func<EOL>", "docstring": "Set a trace function for all threads started from the threading module.\n\n    The func will be passed to sys.settrace() for each thread, before its run()\n    method is called.", "id": "f16452:m1"}
{"signature": "def notifyAll(self):", "body": "self.notify(len(self.__waiters))<EOL>", "docstring": "Wake up all threads waiting on this condition.\n\n        If the calling thread has not acquired the lock when this method\n        is called, a RuntimeError is raised.", "id": "f16452:c1:m9"}
{"signature": "def _daemon_getter(self):", "body": "assert self.__initialized, \"<STR_LIT>\"<EOL>return self.__daemonic<EOL>", "docstring": "A boolean value indicating whether this thread is a daemon thread (True) or not (False).\n\n        This must be set before start() is called, otherwise RuntimeError is\n        raised. Its initial value is inherited from the creating thread; the\n        main thread is not a daemon thread and therefore all threads created in\n        the main thread default to daemon = False.\n\n        The entire Python program exits when no alive non-daemon threads are\n        left.", "id": "f16452:c5:m17"}
{"signature": "def run(self):", "body": "try:<EOL><INDENT>if self.__target:<EOL><INDENT>self.__target(*self.__args, **self.__kwargs)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>del self.__target, self.__args, self.__kwargs<EOL><DEDENT>", "docstring": "Method representing the thread's activity.\n\n        You may override this method in a subclass. The standard run() method\n        invokes the callable object passed to the object's constructor as the\n        target argument, if any, with sequential and keyword arguments taken\n        from the args and kwargs arguments, respectively.", "id": "f16452:c5:m6"}
{"signature": "def Semaphore(*args, **kwargs):", "body": "return _Semaphore(*args, **kwargs)<EOL>", "docstring": "A factory function that returns a new semaphore.\n\n    Semaphores manage a counter representing the number of release() calls minus\n    the number of acquire() calls, plus an initial value. The acquire() method\n    blocks if necessary until it can return without making the counter\n    negative. If not given, value defaults to 1.", "id": "f16452:m4"}
{"signature": "def set(self):", "body": "with self.__cond:<EOL><INDENT>self.__flag = True<EOL>self.__cond.notify_all()<EOL><DEDENT>", "docstring": "Set the internal flag to true.\n\n        All threads waiting for the flag to become true are awakened. Threads\n        that call wait() once the flag is true will not block at all.", "id": "f16452:c4:m3"}
{"signature": "def Timer(*args, **kwargs):", "body": "return _Timer(*args, **kwargs)<EOL>", "docstring": "Factory function to create a Timer object.\n\n    Timers call a function after a specified number of seconds:\n\n        t = Timer(30.0, f, args=[], kwargs={})\n        t.start()\n        t.cancel()     # stop the timer's action if it's still waiting", "id": "f16452:m8"}
{"signature": "def enumerate():", "body": "with _active_limbo_lock:<EOL><INDENT>return list(_active.values()) + list(_limbo.values())<EOL><DEDENT>", "docstring": "Return a list of all Thread objects currently alive.\n\n    The list includes daemonic threads, dummy thread objects created by\n    current_thread(), and the main thread. It excludes terminated threads and\n    threads that have not yet been started.", "id": "f16452:m13"}
{"signature": "def isAlive(self):", "body": "assert self.__initialized, \"<STR_LIT>\"<EOL>return self.__started.is_set() and not self.__stopped<EOL>", "docstring": "Return whether the thread is alive.\n\n        This method returns True just before the run() method starts until just\n        after the run() method terminates. The module function enumerate()\n        returns a list of all alive threads.", "id": "f16452:c5:m16"}
{"signature": "def clear(self):", "body": "with self.__cond:<EOL><INDENT>self.__flag = False<EOL><DEDENT>", "docstring": "Reset the internal flag to false.\n\n        Subsequently, threads calling wait() will block until set() is called to\n        set the internal flag to true again.", "id": "f16452:c4:m4"}
{"signature": "def wait(self, timeout=None):", "body": "with self.__cond:<EOL><INDENT>if not self.__flag:<EOL><INDENT>self.__cond.wait(timeout)<EOL><DEDENT>return self.__flag<EOL><DEDENT>", "docstring": "Block until the internal flag is true.\n\n        If the internal flag is true on entry, return immediately. Otherwise,\n        block until another thread calls set() to set the flag to true, or until\n        the optional timeout occurs.\n\n        When the timeout argument is present and not None, it should be a\n        floating point number specifying a timeout for the operation in seconds\n        (or fractions thereof).\n\n        This method returns the internal flag on exit, so it will always return\n        True except if a timeout is given and the operation times out.", "id": "f16452:c4:m5"}
{"signature": "def acquire(self, blocking=<NUM_LIT:1>):", "body": "rc = False<EOL>with self.__cond:<EOL><INDENT>while self.__value == <NUM_LIT:0>:<EOL><INDENT>if not blocking:<EOL><INDENT>break<EOL><DEDENT>if __debug__:<EOL><INDENT>self._note(\"<STR_LIT>\",<EOL>self, blocking, self.__value)<EOL><DEDENT>self.__cond.wait()<EOL><DEDENT>else:<EOL><INDENT>self.__value = self.__value - <NUM_LIT:1><EOL>if __debug__:<EOL><INDENT>self._note(\"<STR_LIT>\",<EOL>self, self.__value)<EOL><DEDENT>rc = True<EOL><DEDENT><DEDENT>return rc<EOL>", "docstring": "Acquire a semaphore, decrementing the internal counter by one.\n\n        When invoked without arguments: if the internal counter is larger than\n        zero on entry, decrement it by one and return immediately. If it is zero\n        on entry, block, waiting until some other thread has called release() to\n        make it larger than zero. This is done with proper interlocking so that\n        if multiple acquire() calls are blocked, release() will wake exactly one\n        of them up. The implementation may pick one at random, so the order in\n        which blocked threads are awakened should not be relied on. There is no\n        return value in this case.\n\n        When invoked with blocking set to true, do the same thing as when called\n        without arguments, and return true.\n\n        When invoked with blocking set to false, do not block. If a call without\n        an argument would block, return false immediately; otherwise, do the\n        same thing as when called without arguments, and return true.", "id": "f16452:c2:m1"}
{"signature": "def index(s, *args):", "body": "return s.index(*args)<EOL>", "docstring": "index(s, sub [,start [,end]]) -> int\n\n    Like find but raises ValueError when the substring is not found.", "id": "f16453:m11"}
{"signature": "def ljust(s, width, *args):", "body": "return s.ljust(width, *args)<EOL>", "docstring": "ljust(s, width[, fillchar]) -> string\n\n    Return a left-justified version of s, in a field of the\n    specified width, padded with spaces as needed.  The string is\n    never truncated.  If specified the fillchar is used instead of spaces.", "id": "f16453:m19"}
{"signature": "def count(s, *args):", "body": "return s.count(*args)<EOL>", "docstring": "count(s, sub[, start[,end]]) -> int\n\n    Return the number of occurrences of substring sub in string\n    s[start:end].  Optional arguments start and end are\n    interpreted as in slice notation.", "id": "f16453:m13"}
{"signature": "def lower(s):", "body": "return s.lower()<EOL>", "docstring": "lower(s) -> string\n\n    Return a copy of the string s converted to lowercase.", "id": "f16453:m2"}
{"signature": "def rstrip(s, chars=None):", "body": "return s.rstrip(chars)<EOL>", "docstring": "rstrip(s [,chars]) -> string\n\n    Return a copy of the string s with trailing whitespace removed.\n    If chars is given and not None, remove characters in chars instead.", "id": "f16453:m7"}
{"signature": "def strip(s, chars=None):", "body": "return s.strip(chars)<EOL>", "docstring": "strip(s [,chars]) -> string\n\n    Return a copy of the string s with leading and trailing\n    whitespace removed.\n    If chars is given and not None, remove characters in chars instead.\n    If chars is unicode, S will be converted to unicode before stripping.", "id": "f16453:m5"}
{"signature": "def rfind(s, *args):", "body": "return s.rfind(*args)<EOL>", "docstring": "rfind(s, sub [,start [,end]]) -> int\n\n    Return the highest index in s where substring sub is found,\n    such that sub is contained within s[start,end].  Optional\n    arguments start and end are interpreted as in slice notation.\n\n    Return -1 on failure.", "id": "f16453:m15"}
{"signature": "def rjust(s, width, *args):", "body": "return s.rjust(width, *args)<EOL>", "docstring": "rjust(s, width[, fillchar]) -> string\n\n    Return a right-justified version of s, in a field of the\n    specified width, padded with spaces as needed.  The string is\n    never truncated.  If specified the fillchar is used instead of spaces.", "id": "f16453:m20"}
{"signature": "def capwords(s, sep=None):", "body": "return (sep or '<STR_LIT:U+0020>').join(x.capitalize() for x in s.split(sep))<EOL>", "docstring": "capwords(s [,sep]) -> string\n\n    Split the argument into words using split, capitalize each\n    word using capitalize, and join the capitalized words using\n    join.  If the optional second argument sep is absent or None,\n    runs of whitespace characters are replaced by a single space\n    and leading and trailing whitespace are removed, otherwise\n    sep is used to split and join the words.", "id": "f16453:m0"}
{"signature": "def translate(s, table, deletions=\"<STR_LIT>\"):", "body": "if deletions or table is None:<EOL><INDENT>return s.translate(table, deletions)<EOL><DEDENT>else:<EOL><INDENT>return s.translate(table + s[:<NUM_LIT:0>])<EOL><DEDENT>", "docstring": "translate(s,table [,deletions]) -> string\n\n    Return a copy of the string s, where all characters occurring\n    in the optional argument deletions are removed, and the\n    remaining characters have been mapped through the given\n    translation table, which must be a string of length 256.  The\n    deletions argument is not allowed for Unicode strings.", "id": "f16453:m24"}
{"signature": "def atol(s, base=<NUM_LIT:10>):", "body": "return _long(s, base)<EOL>", "docstring": "atol(s [,base]) -> long\n\n    Return the long integer represented by the string s in the\n    given base, which defaults to 10.  The string s must consist\n    of one or more digits, possibly preceded by a sign.  If base\n    is 0, it is chosen from the leading characters of s, 0 for\n    octal, 0x or 0X for hexadecimal.  If base is 16, a preceding\n    0x or 0X is accepted.  A trailing L or l is not accepted,\n    unless base is 0.", "id": "f16453:m18"}
{"signature": "def replace(s, old, new, maxreplace=-<NUM_LIT:1>):", "body": "return s.replace(old, new, maxreplace)<EOL>", "docstring": "replace (str, old, new[, maxreplace]) -> string\n\n    Return a copy of string str with all occurrences of substring\n    old replaced by new. If the optional argument maxreplace is\n    given, only the first maxreplace occurrences are replaced.", "id": "f16453:m26"}
{"signature": "def find(s, *args):", "body": "return s.find(*args)<EOL>", "docstring": "find(s, sub [,start [,end]]) -> in\n\n    Return the lowest index in s where substring sub is found,\n    such that sub is contained within s[start,end].  Optional\n    arguments start and end are interpreted as in slice notation.\n\n    Return -1 on failure.", "id": "f16453:m14"}
{"signature": "def split(s, sep=None, maxsplit=-<NUM_LIT:1>):", "body": "return s.split(sep, maxsplit)<EOL>", "docstring": "split(s [,sep [,maxsplit]]) -> list of strings\n\n    Return a list of the words in the string s, using sep as the\n    delimiter string.  If maxsplit is given, splits at no more than\n    maxsplit places (resulting in at most maxsplit+1 words).  If sep\n    is not specified or is None, any whitespace string is a separator.\n\n    (split and splitfields are synonymous)", "id": "f16453:m8"}
{"signature": "def rindex(s, *args):", "body": "return s.rindex(*args)<EOL>", "docstring": "rindex(s, sub [,start [,end]]) -> int\n\n    Like rfind but raises ValueError when the substring is not found.", "id": "f16453:m12"}
{"signature": "def __init__(self):", "body": "self.locked = False<EOL>self.queue = deque()<EOL>", "docstring": "Create a new mutex -- initially unlocked.", "id": "f16454:c0:m0"}
{"signature": "def nlargest(n, iterable, key=None):", "body": "<EOL>if n == <NUM_LIT:1>:<EOL><INDENT>it = iter(iterable)<EOL>head = list(islice(it, <NUM_LIT:1>))<EOL>if not head:<EOL><INDENT>return []<EOL><DEDENT>if key is None:<EOL><INDENT>return [max(chain(head, it))]<EOL><DEDENT>return [max(chain(head, it), key=key)]<EOL><DEDENT>try:<EOL><INDENT>size = len(iterable)<EOL><DEDENT>except (TypeError, AttributeError):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if n >= size:<EOL><INDENT>return sorted(iterable, key=key, reverse=True)[:n]<EOL><DEDENT><DEDENT>if key is None:<EOL><INDENT>it = izip(iterable, count(<NUM_LIT:0>,-<NUM_LIT:1>))                    <EOL>result = _nlargest(n, it)<EOL>return map(itemgetter(<NUM_LIT:0>), result)                   <EOL><DEDENT>in1, in2 = tee(iterable)<EOL>it = izip(imap(key, in1), count(<NUM_LIT:0>,-<NUM_LIT:1>), in2)             <EOL>result = _nlargest(n, it)<EOL>return map(itemgetter(<NUM_LIT:2>), result)                       <EOL>", "docstring": "Find the n largest elements in a dataset.\n\n    Equivalent to:  sorted(iterable, key=key, reverse=True)[:n]", "id": "f16455:m16"}
{"signature": "def nsmallest(n, iterable):", "body": "if n < <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>it = iter(iterable)<EOL>result = list(islice(it, n))<EOL>if not result:<EOL><INDENT>return result<EOL><DEDENT>_heapify_max(result)<EOL>_heappushpop = _heappushpop_max<EOL>for elem in it:<EOL><INDENT>_heappushpop(result, elem)<EOL><DEDENT>result.sort()<EOL>return result<EOL>", "docstring": "Find the n smallest elements in a dataset.\n\n    Equivalent to:  sorted(iterable)[:n]", "id": "f16455:m9"}
{"signature": "def heappush(heap, item):", "body": "heap.append(item)<EOL>_siftdown(heap, <NUM_LIT:0>, len(heap)-<NUM_LIT:1>)<EOL>", "docstring": "Push item onto heap, maintaining the heap invariant.", "id": "f16455:m1"}
{"signature": "def nlargest(n, iterable):", "body": "if n < <NUM_LIT:0>:<EOL><INDENT>return []<EOL><DEDENT>it = iter(iterable)<EOL>result = list(islice(it, n))<EOL>if not result:<EOL><INDENT>return result<EOL><DEDENT>heapify(result)<EOL>_heappushpop = heappushpop<EOL>for elem in it:<EOL><INDENT>_heappushpop(result, elem)<EOL><DEDENT>result.sort(reverse=True)<EOL>return result<EOL>", "docstring": "Find the n largest elements in a dataset.\n\n    Equivalent to:  sorted(iterable, reverse=True)[:n]", "id": "f16455:m8"}
{"signature": "def encode(input, output, encoding):", "body": "if encoding == '<STR_LIT>':<EOL><INDENT>import base64<EOL>return base64.encode(input, output)<EOL><DEDENT>if encoding == '<STR_LIT>':<EOL><INDENT>import quopri<EOL>return quopri.encode(input, output, <NUM_LIT:0>)<EOL><DEDENT>if encoding in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>import uu<EOL>return uu.encode(input, output)<EOL><DEDENT>if encoding in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>return output.write(input.read())<EOL><DEDENT>if encoding in encodetab:<EOL><INDENT>pipethrough(input, encodetab[encoding], output)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % encoding)<EOL><DEDENT>", "docstring": "Encode common content-transfer-encodings (base64, quopri, uuencode).", "id": "f16456:m2"}
{"signature": "def decode(input, output, encoding):", "body": "if encoding == '<STR_LIT>':<EOL><INDENT>import base64<EOL>return base64.decode(input, output)<EOL><DEDENT>if encoding == '<STR_LIT>':<EOL><INDENT>import quopri<EOL>return quopri.decode(input, output)<EOL><DEDENT>if encoding in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>import uu<EOL>return uu.decode(input, output)<EOL><DEDENT>if encoding in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>return output.write(input.read())<EOL><DEDENT>if encoding in decodetab:<EOL><INDENT>pipethrough(input, decodetab[encoding], output)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % encoding)<EOL><DEDENT>", "docstring": "Decode common content-transfer-encodings (base64, quopri, uuencode).", "id": "f16456:m1"}
{"signature": "def unorderable_list_difference(expected, actual, ignore_duplicate=False):", "body": "missing = []<EOL>unexpected = []<EOL>while expected:<EOL><INDENT>item = expected.pop()<EOL>try:<EOL><INDENT>actual.remove(item)<EOL><DEDENT>except ValueError:<EOL><INDENT>missing.append(item)<EOL><DEDENT>if ignore_duplicate:<EOL><INDENT>for lst in expected, actual:<EOL><INDENT>try:<EOL><INDENT>while True:<EOL><INDENT>lst.remove(item)<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if ignore_duplicate:<EOL><INDENT>while actual:<EOL><INDENT>item = actual.pop()<EOL>unexpected.append(item)<EOL>try:<EOL><INDENT>while True:<EOL><INDENT>actual.remove(item)<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return missing, unexpected<EOL><DEDENT>return missing, actual<EOL>", "docstring": "Same behavior as sorted_list_difference but\n    for lists of unorderable items (like dicts).\n\n    As it does a linear search per item (remove) it\n    has O(n*n) performance.", "id": "f16457:m3"}
{"signature": "def __new__(_cls, actual, expected, value):", "body": "return _tuple.__new__(_cls, (actual, expected, value))<EOL>", "docstring": "Create new instance of Mismatch(actual, expected, value)", "id": "f16457:c0:m0"}
{"signature": "def _asdict(self):", "body": "<EOL>return dict(zip(self._fields, self))<EOL>", "docstring": "Return a new OrderedDict which maps field names to their values", "id": "f16457:c0:m3"}
{"signature": "def sorted_list_difference(expected, actual):", "body": "i = j = <NUM_LIT:0><EOL>missing = []<EOL>unexpected = []<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>e = expected[i]<EOL>a = actual[j]<EOL>if e < a:<EOL><INDENT>missing.append(e)<EOL>i += <NUM_LIT:1><EOL>while expected[i] == e:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>elif e > a:<EOL><INDENT>unexpected.append(a)<EOL>j += <NUM_LIT:1><EOL>while actual[j] == a:<EOL><INDENT>j += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL>try:<EOL><INDENT>while expected[i] == e:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>finally:<EOL><INDENT>j += <NUM_LIT:1><EOL>while actual[j] == a:<EOL><INDENT>j += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>except IndexError:<EOL><INDENT>missing.extend(expected[i:])<EOL>unexpected.extend(actual[j:])<EOL>break<EOL><DEDENT><DEDENT>return missing, unexpected<EOL>", "docstring": "Finds elements in only one or the other of two, sorted input lists.\n\n    Returns a two-element tuple of lists.    The first list contains those\n    elements in the \"expected\" list but not in the \"actual\" list, and the\n    second contains those elements in the \"actual\" list but not in the\n    \"expected\" list.    Duplicate elements in either input list are ignored.", "id": "f16457:m2"}
{"signature": "def _ordered_count(iterable):", "body": "c = {} <EOL>for elem in iterable:<EOL><INDENT>c[elem] = c.get(elem, <NUM_LIT:0>) + <NUM_LIT:1><EOL><DEDENT>return c<EOL>", "docstring": "Return dict of element counts, in the order they were first seen", "id": "f16457:m5"}
{"signature": "def destroy(self):", "body": "OptionContainer.destroy(self)<EOL>for group in self.option_groups:<EOL><INDENT>group.destroy()<EOL><DEDENT>del self.option_list<EOL>del self.option_groups<EOL>del self.formatter<EOL>", "docstring": "Declare that you are done with this OptionParser.  This cleans up\nreference cycles so the OptionParser (and all objects referenced by\nit) can be garbage-collected promptly.  After calling destroy(), the\nOptionParser is unusable.", "id": "f16458:c13:m1"}
{"signature": "def _format_text(self, text):", "body": "text_width = max(self.width - self.current_indent, <NUM_LIT:11>)<EOL>indent = \"<STR_LIT:U+0020>\"*self.current_indent<EOL>return textwrap.fill(text,<EOL>text_width,<EOL>initial_indent=indent,<EOL>subsequent_indent=indent)<EOL>", "docstring": "Format a paragraph of free-form text for inclusion in the\nhelp output at the current indentation level.", "id": "f16458:c6:m8"}
{"signature": "def _match_abbrev(s, wordmap):", "body": "<EOL>if s in wordmap:<EOL><INDENT>return s<EOL><DEDENT>else:<EOL><INDENT>possibilities = [word for word in list(wordmap.keys())<EOL>if word.startswith(s)]<EOL>if len(possibilities) == <NUM_LIT:1>:<EOL><INDENT>return possibilities[<NUM_LIT:0>]<EOL><DEDENT>elif not possibilities:<EOL><INDENT>raise BadOptionError(s)<EOL><DEDENT>else:<EOL><INDENT>possibilities.sort()<EOL>raise AmbiguousOptionError(s, possibilities)<EOL><DEDENT><DEDENT>", "docstring": "_match_abbrev(s : string, wordmap : {string : Option}) -> string\n\n    Return the string key in 'wordmap' for which 's' is an unambiguous\n    abbreviation.  If 's' is found to be ambiguous or doesn't match any of\n    'words', raise BadOptionError.", "id": "f16458:m8"}
{"signature": "def _match_long_opt(self, opt):", "body": "return _match_abbrev(opt, self._long_opt)<EOL>", "docstring": "_match_long_opt(opt : string) -> string\n\n        Determine which long option string 'opt' matches, ie. which one\n        it is an unambiguous abbreviation for.  Raises BadOptionError if\n        'opt' doesn't unambiguously match any long option string.", "id": "f16458:c13:m21"}
{"signature": "def format_option_strings(self, option):", "body": "if option.takes_value():<EOL><INDENT>metavar = option.metavar or option.dest.upper()<EOL>short_opts = [self._short_opt_fmt % (sopt, metavar)<EOL>for sopt in option._short_opts]<EOL>long_opts = [self._long_opt_fmt % (lopt, metavar)<EOL>for lopt in option._long_opts]<EOL><DEDENT>else:<EOL><INDENT>short_opts = option._short_opts<EOL>long_opts = option._long_opts<EOL><DEDENT>if self.short_first:<EOL><INDENT>opts = short_opts + long_opts<EOL><DEDENT>else:<EOL><INDENT>opts = long_opts + short_opts<EOL><DEDENT>return \"<STR_LIT:U+002CU+0020>\".join(opts)<EOL>", "docstring": "Return a comma-separated list of option strings & metavariables.", "id": "f16458:c6:m14"}
{"signature": "def check_values(self, values, args):", "body": "return (values, args)<EOL>", "docstring": "check_values(values : Values, args : [string])\n-> (values : Values, args : [string])\n\nCheck that the supplied option values and leftover arguments are\nvalid.  Returns the option values and leftover arguments\n(possibly adjusted, possibly completely new -- whatever you\nlike).  Default implementation just returns the passed-in\nvalues; subclasses may override as desired.", "id": "f16458:c13:m19"}
{"signature": "def _update_loose(self, dict):", "body": "self.__dict__.update(dict)<EOL>", "docstring": "Update the option values from an arbitrary dictionary,\nusing all keys from the dictionary regardless of whether\nthey have a corresponding attribute in self or not.", "id": "f16458:c10:m4"}
{"signature": "def print_help(self, file=None):", "body": "if file is None:<EOL><INDENT>file = sys.stdout<EOL><DEDENT>encoding = self._get_encoding(file)<EOL>file.write(self.format_help())<EOL>", "docstring": "print_help(file : file = stdout)\n\n        Print an extended help message, listing all options and any\n        help text provided with them, to 'file' (default stdout).", "id": "f16458:c13:m37"}
{"signature": "def print_usage(self, file=None):", "body": "if self.usage:<EOL><INDENT>print(self.get_usage(), file=file)<EOL><DEDENT>", "docstring": "print_usage(file : file = stdout)\n\n        Print the usage message for the current program (self.usage) to\n        'file' (default stdout).  Any occurrence of the string \"%prog\" in\n        self.usage is replaced with the name of the current program\n        (basename of sys.argv[0]).  Does nothing if self.usage is empty\n        or not defined.", "id": "f16458:c13:m30"}
{"signature": "def enter(self, delay, priority, action, argument):", "body": "time = self.timefunc() + delay<EOL>return self.enterabs(time, priority, action, argument)<EOL>", "docstring": "A variant that specifies the time as a relative time.\n        This is actually the more commonly used interface.", "id": "f16459:c1:m2"}
{"signature": "@property<EOL><INDENT>def queue(self):<DEDENT>", "body": "<EOL>events = self._queue[:]<EOL>return map(heapq.heappop, [events]*len(events))<EOL>", "docstring": "An ordered list of upcoming events.\n        Events are named tuples with fields for:\n            time, priority, action, arguments", "id": "f16459:c1:m6"}
{"signature": "def run(self):", "body": "<EOL>q = self._queue<EOL>delayfunc = self.delayfunc<EOL>timefunc = self.timefunc<EOL>pop = heapq.heappop<EOL>while q:<EOL><INDENT>checked_event = q[<NUM_LIT:0>]<EOL>time, priority, action, argument = checked_event.get_fields()<EOL>now = timefunc()<EOL>if now < time:<EOL><INDENT>delayfunc(time - now)<EOL><DEDENT>else:<EOL><INDENT>event = pop(q)<EOL>if event is checked_event:<EOL><INDENT>action(*argument)<EOL>delayfunc(<NUM_LIT:0>)   <EOL><DEDENT>else:<EOL><INDENT>heapq.heappush(q, event)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Execute events until the queue is empty.\n        When there is a positive delay until the first event, the\n        delay function is called and the event is left in the queue;\n        otherwise, the event is removed from the queue and executed\n        (its action function is called, passing it the argument).  If\n        the delay function returns prematurely, it is simply\n        restarted.\n        It is legal for both the delay function and the action\n        function to modify the queue or to raise an exception;\n        exceptions are not caught but the scheduler's state remains\n        well-defined so run() may be called again.\n        A questionable hack is added to allow other threads to run:\n        just after an event is executed, a delay of 0 is executed, to\n        avoid monopolizing the CPU when other threads are also\n        runnable.", "id": "f16459:c1:m5"}
{"signature": "def __len__(self):", "body": "return len(self.dict)<EOL>", "docstring": "Get the number of headers in a message.", "id": "f16460:c0:m15"}
{"signature": "def readheaders(self):", "body": "self.dict = {}<EOL>self.unixfrom = '<STR_LIT>'<EOL>self.headers = lst = []<EOL>self.status = '<STR_LIT>'<EOL>headerseen = \"<STR_LIT>\"<EOL>firstline = <NUM_LIT:1><EOL>startofline = unread = tell = None<EOL>if hasattr(self.fp, '<STR_LIT>'):<EOL><INDENT>unread = self.fp.unread<EOL><DEDENT>elif self.seekable:<EOL><INDENT>tell = self.fp.tell<EOL><DEDENT>while <NUM_LIT:1>:<EOL><INDENT>if tell:<EOL><INDENT>try:<EOL><INDENT>startofline = tell()<EOL><DEDENT>except IOError:<EOL><INDENT>startofline = tell = None<EOL>self.seekable = <NUM_LIT:0><EOL><DEDENT><DEDENT>line = self.fp.readline()<EOL>if not line:<EOL><INDENT>self.status = '<STR_LIT>'<EOL>break<EOL><DEDENT>if firstline and line.startswith('<STR_LIT>'):<EOL><INDENT>self.unixfrom = self.unixfrom + line<EOL>continue<EOL><DEDENT>firstline = <NUM_LIT:0><EOL>if headerseen and line[<NUM_LIT:0>] in '<STR_LIT>':<EOL><INDENT>lst.append(line)<EOL>x = (self.dict[headerseen] + \"<STR_LIT>\" + line.strip())<EOL>self.dict[headerseen] = x.strip()<EOL>continue<EOL><DEDENT>elif self.iscomment(line):<EOL><INDENT>continue<EOL><DEDENT>elif self.islast(line):<EOL><INDENT>break<EOL><DEDENT>headerseen = self.isheader(line)<EOL>if headerseen:<EOL><INDENT>lst.append(line)<EOL>self.dict[headerseen] = line[len(headerseen)+<NUM_LIT:1>:].strip()<EOL>continue<EOL><DEDENT>elif headerseen is not None:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>if not self.dict:<EOL><INDENT>self.status = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>self.status = '<STR_LIT>'<EOL><DEDENT>if unread:<EOL><INDENT>unread(line)<EOL><DEDENT>elif tell:<EOL><INDENT>self.fp.seek(startofline)<EOL><DEDENT>else:<EOL><INDENT>self.status = self.status + '<STR_LIT>'<EOL><DEDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Read header lines.\n\n        Read header lines up to the entirely blank line that terminates them.\n        The (normally blank) line that ends the headers is skipped, but not\n        included in the returned list.  If a non-header line ends the headers,\n        (which is an error), an attempt is made to backspace over it; it is\n        never included in the returned list.\n\n        The variable self.status is set to the empty string if all went well,\n        otherwise it is an error message.  The variable self.headers is a\n        completely uninterpreted list of lines contained in the header (so\n        printing them will reproduce the header exactly as it appears in the\n        file).", "id": "f16460:c0:m2"}
{"signature": "def getaddrlist(self, name):", "body": "raw = []<EOL>for h in self.getallmatchingheaders(name):<EOL><INDENT>if h[<NUM_LIT:0>] in '<STR_LIT>':<EOL><INDENT>raw.append(h)<EOL><DEDENT>else:<EOL><INDENT>if raw:<EOL><INDENT>raw.append('<STR_LIT:U+002CU+0020>')<EOL><DEDENT>i = h.find('<STR_LIT::>')<EOL>if i > <NUM_LIT:0>:<EOL><INDENT>addr = h[i+<NUM_LIT:1>:]<EOL><DEDENT>raw.append(addr)<EOL><DEDENT><DEDENT>alladdrs = '<STR_LIT>'.join(raw)<EOL>a = AddressList(alladdrs)<EOL>return a.addresslist<EOL>", "docstring": "Get a list of addresses from a header.\n\n        Retrieves a list of addresses from a header, where each address is a\n        tuple as returned by getaddr().  Scans all named headers, so it works\n        properly with multiple To: or Cc: headers for example.", "id": "f16460:c0:m12"}
{"signature": "def formatdate(timeval=None):", "body": "if timeval is None:<EOL><INDENT>timeval = time.time()<EOL><DEDENT>timeval = time.gmtime(timeval)<EOL>return \"<STR_LIT>\" % (<EOL>(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")[timeval[<NUM_LIT:6>]],<EOL>timeval[<NUM_LIT:2>],<EOL>(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")[timeval[<NUM_LIT:1>]-<NUM_LIT:1>],<EOL>timeval[<NUM_LIT:0>], timeval[<NUM_LIT:3>], timeval[<NUM_LIT:4>], timeval[<NUM_LIT:5>])<EOL>", "docstring": "Returns time format preferred for Internet standards.\n\n    Sun, 06 Nov 1994 08:49:37 GMT  ; RFC 822, updated by RFC 1123\n\n    According to RFC 1123, day and month names must always be in\n    English.  If not for that, this code could use strftime().  It\n    can't because strftime() honors the locale and could generate\n    non-English names.", "id": "f16460:m7"}
{"signature": "def dump_address_pair(pair):", "body": "if pair[<NUM_LIT:0>]:<EOL><INDENT>return '<STR_LIT:\">' + pair[<NUM_LIT:0>] + '<STR_LIT>' + pair[<NUM_LIT:1>] + '<STR_LIT:>>'<EOL><DEDENT>else:<EOL><INDENT>return pair[<NUM_LIT:1>]<EOL><DEDENT>", "docstring": "Dump a (name, address) pair in a canonicalized form.", "id": "f16460:m3"}
{"signature": "def getfirstmatchingheader(self, name):", "body": "name = name.lower() + '<STR_LIT::>'<EOL>n = len(name)<EOL>lst = []<EOL>hit = <NUM_LIT:0><EOL>for line in self.headers:<EOL><INDENT>if hit:<EOL><INDENT>if not line[:<NUM_LIT:1>].isspace():<EOL><INDENT>break<EOL><DEDENT><DEDENT>elif line[:n].lower() == name:<EOL><INDENT>hit = <NUM_LIT:1><EOL><DEDENT>if hit:<EOL><INDENT>lst.append(line)<EOL><DEDENT><DEDENT>return lst<EOL>", "docstring": "Get the first header line matching name.\n\n        This is similar to getallmatchingheaders, but it returns only the\n        first matching header (and its continuation lines).", "id": "f16460:c0:m7"}
{"signature": "def mktime_tz(data):", "body": "if data[<NUM_LIT:9>] is None:<EOL><INDENT>return time.mktime(data[:<NUM_LIT:8>] + (-<NUM_LIT:1>,))<EOL><DEDENT>else:<EOL><INDENT>t = time.mktime(data[:<NUM_LIT:8>] + (<NUM_LIT:0>,))<EOL>return t - data[<NUM_LIT:9>] - time.timezone<EOL><DEDENT>", "docstring": "Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp.", "id": "f16460:m6"}
{"signature": "def has_key(self, name):", "body": "return name.lower() in self.dict<EOL>", "docstring": "Determine whether a message contains the named header.", "id": "f16460:c0:m20"}
{"signature": "def getquote(self):", "body": "return self.getdelimited('<STR_LIT:\">', '<STR_LIT>', <NUM_LIT:0>)<EOL>", "docstring": "Get a quote-delimited fragment from self's field.", "id": "f16460:c1:m8"}
{"signature": "def __init__(self, fp, seekable = <NUM_LIT:1>):", "body": "if seekable == <NUM_LIT:1>:<EOL><INDENT>try:<EOL><INDENT>fp.tell()<EOL><DEDENT>except (AttributeError, IOError):<EOL><INDENT>seekable = <NUM_LIT:0><EOL><DEDENT><DEDENT>self.fp = fp<EOL>self.seekable = seekable<EOL>self.startofheaders = None<EOL>self.startofbody = None<EOL>if self.seekable:<EOL><INDENT>try:<EOL><INDENT>self.startofheaders = self.fp.tell()<EOL><DEDENT>except IOError:<EOL><INDENT>self.seekable = <NUM_LIT:0><EOL><DEDENT><DEDENT>self.readheaders()<EOL>if self.seekable:<EOL><INDENT>try:<EOL><INDENT>self.startofbody = self.fp.tell()<EOL><DEDENT>except IOError:<EOL><INDENT>self.seekable = <NUM_LIT:0><EOL><DEDENT><DEDENT>", "docstring": "Initialize the class instance and read the headers.", "id": "f16460:c0:m0"}
{"signature": "def isheader(self, line):", "body": "i = line.find('<STR_LIT::>')<EOL>if i > -<NUM_LIT:1>:<EOL><INDENT>return line[:i].lower()<EOL><DEDENT>return None<EOL>", "docstring": "Determine whether a given line is a legal header.\n\n        This method should return the header name, suitably canonicalized.\n        You may override this method in order to use Message parsing on tagged\n        data in RFC 2822-like formats with special header formats.", "id": "f16460:c0:m3"}
{"signature": "def unquote(s):", "body": "if len(s) > <NUM_LIT:1>:<EOL><INDENT>if s.startswith('<STR_LIT:\">') and s.endswith('<STR_LIT:\">'):<EOL><INDENT>return s[<NUM_LIT:1>:-<NUM_LIT:1>].replace('<STR_LIT>', '<STR_LIT:\\\\>').replace('<STR_LIT>', '<STR_LIT:\">')<EOL><DEDENT>if s.startswith('<STR_LIT:<>') and s.endswith('<STR_LIT:>>'):<EOL><INDENT>return s[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>return s<EOL>", "docstring": "Remove quotes from a string.", "id": "f16460:m0"}
{"signature": "def __setitem__(self, name, value):", "body": "del self[name] <EOL>self.dict[name.lower()] = value<EOL>text = name + \"<STR_LIT>\" + value<EOL>for line in text.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>self.headers.append(line + \"<STR_LIT:\\n>\")<EOL><DEDENT>", "docstring": "Set the value of a header.\n\n        Note: This is not a perfect inversion of __getitem__, because any\n        changed headers get stuck at the end of the raw-headers list rather\n        than where the altered header was.", "id": "f16460:c0:m17"}
{"signature": "def getaddress(self):", "body": "self.commentlist = []<EOL>self.gotonext()<EOL>oldpos = self.pos<EOL>oldcl = self.commentlist<EOL>plist = self.getphraselist()<EOL>self.gotonext()<EOL>returnlist = []<EOL>if self.pos >= len(self.field):<EOL><INDENT>if plist:<EOL><INDENT>returnlist = [('<STR_LIT:U+0020>'.join(self.commentlist), plist[<NUM_LIT:0>])]<EOL><DEDENT><DEDENT>elif self.field[self.pos] in '<STR_LIT>':<EOL><INDENT>self.pos = oldpos<EOL>self.commentlist = oldcl<EOL>addrspec = self.getaddrspec()<EOL>returnlist = [('<STR_LIT:U+0020>'.join(self.commentlist), addrspec)]<EOL><DEDENT>elif self.field[self.pos] == '<STR_LIT::>':<EOL><INDENT>returnlist = []<EOL>fieldlen = len(self.field)<EOL>self.pos += <NUM_LIT:1><EOL>while self.pos < len(self.field):<EOL><INDENT>self.gotonext()<EOL>if self.pos < fieldlen and self.field[self.pos] == '<STR_LIT:;>':<EOL><INDENT>self.pos += <NUM_LIT:1><EOL>break<EOL><DEDENT>returnlist = returnlist + self.getaddress()<EOL><DEDENT><DEDENT>elif self.field[self.pos] == '<STR_LIT:<>':<EOL><INDENT>routeaddr = self.getrouteaddr()<EOL>if self.commentlist:<EOL><INDENT>returnlist = [('<STR_LIT:U+0020>'.join(plist) + '<STR_LIT>' +'<STR_LIT:U+0020>'.join(self.commentlist) + '<STR_LIT:)>', routeaddr)]<EOL><DEDENT>else: returnlist = [('<STR_LIT:U+0020>'.join(plist), routeaddr)]<EOL><DEDENT>else:<EOL><INDENT>if plist:<EOL><INDENT>returnlist = [('<STR_LIT:U+0020>'.join(self.commentlist), plist[<NUM_LIT:0>])]<EOL><DEDENT>elif self.field[self.pos] in self.specials:<EOL><INDENT>self.pos += <NUM_LIT:1><EOL><DEDENT><DEDENT>self.gotonext()<EOL>if self.pos < len(self.field) and self.field[self.pos] == '<STR_LIT:U+002C>':<EOL><INDENT>self.pos += <NUM_LIT:1><EOL><DEDENT>return returnlist<EOL>", "docstring": "Parse the next address.", "id": "f16460:c1:m3"}
{"signature": "def getcomment(self):", "body": "return self.getdelimited('<STR_LIT:(>', '<STR_LIT>', <NUM_LIT:1>)<EOL>", "docstring": "Get a parenthesis-delimited fragment from self's field.", "id": "f16460:c1:m9"}
{"signature": "def __init__(self, field):", "body": "self.specials = '<STR_LIT>'<EOL>self.pos = <NUM_LIT:0><EOL>self.LWS = '<STR_LIT>'<EOL>self.CR = '<STR_LIT:\\r\\n>'<EOL>self.atomends = self.specials + self.LWS + self.CR<EOL>self.phraseends = self.atomends.replace('<STR_LIT:.>', '<STR_LIT>')<EOL>self.field = field<EOL>self.commentlist = []<EOL>", "docstring": "Initialize a new instance.\n\n        `field' is an unparsed address header field, containing one or more\n        addresses.", "id": "f16460:c1:m0"}
{"signature": "def getaddr(self, name):", "body": "<EOL>alist = self.getaddrlist(name)<EOL>if alist:<EOL><INDENT>return alist[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return (None, None)<EOL><DEDENT>", "docstring": "Get a single address from a header, as a tuple.\n\n        An example return value:\n        ('Guido van Rossum', 'guido@cwi.nl')", "id": "f16460:c0:m11"}
{"signature": "def items(self):", "body": "return list(self.dict.items())<EOL>", "docstring": "Get all of a message's headers.\n\n        Returns a list of name, value tuples.", "id": "f16460:c0:m25"}
{"signature": "def __getitem__(self, name):", "body": "return self.dict[name.lower()]<EOL>", "docstring": "Get a specific header, as from a dictionary.", "id": "f16460:c0:m16"}
{"signature": "def getdate(self, name):", "body": "try:<EOL><INDENT>data = self[name]<EOL><DEDENT>except KeyError:<EOL><INDENT>return None<EOL><DEDENT>return parsedate(data)<EOL>", "docstring": "Retrieve a date field from a header.\n\n        Retrieves a date field from the named header, returning a tuple\n        compatible with time.mktime().", "id": "f16460:c0:m13"}
{"signature": "def getdomain(self):", "body": "sdlist = []<EOL>while self.pos < len(self.field):<EOL><INDENT>if self.field[self.pos] in self.LWS:<EOL><INDENT>self.pos += <NUM_LIT:1><EOL><DEDENT>elif self.field[self.pos] == '<STR_LIT:(>':<EOL><INDENT>self.commentlist.append(self.getcomment())<EOL><DEDENT>elif self.field[self.pos] == '<STR_LIT:[>':<EOL><INDENT>sdlist.append(self.getdomainliteral())<EOL><DEDENT>elif self.field[self.pos] == '<STR_LIT:.>':<EOL><INDENT>self.pos += <NUM_LIT:1><EOL>sdlist.append('<STR_LIT:.>')<EOL><DEDENT>elif self.field[self.pos] in self.atomends:<EOL><INDENT>break<EOL><DEDENT>else: sdlist.append(self.getatom())<EOL><DEDENT>return '<STR_LIT>'.join(sdlist)<EOL>", "docstring": "Get the complete domain name from an address.", "id": "f16460:c1:m6"}
{"signature": "def getaddrspec(self):", "body": "aslist = []<EOL>self.gotonext()<EOL>while self.pos < len(self.field):<EOL><INDENT>if self.field[self.pos] == '<STR_LIT:.>':<EOL><INDENT>aslist.append('<STR_LIT:.>')<EOL>self.pos += <NUM_LIT:1><EOL><DEDENT>elif self.field[self.pos] == '<STR_LIT:\">':<EOL><INDENT>aslist.append('<STR_LIT>' % self.getquote())<EOL><DEDENT>elif self.field[self.pos] in self.atomends:<EOL><INDENT>break<EOL><DEDENT>else: aslist.append(self.getatom())<EOL>self.gotonext()<EOL><DEDENT>if self.pos >= len(self.field) or self.field[self.pos] != '<STR_LIT:@>':<EOL><INDENT>return '<STR_LIT>'.join(aslist)<EOL><DEDENT>aslist.append('<STR_LIT:@>')<EOL>self.pos += <NUM_LIT:1><EOL>self.gotonext()<EOL>return '<STR_LIT>'.join(aslist) + self.getdomain()<EOL>", "docstring": "Parse an RFC 2822 addr-spec.", "id": "f16460:c1:m5"}
{"signature": "def parseaddr(address):", "body": "a = AddressList(address)<EOL>lst = a.addresslist<EOL>if not lst:<EOL><INDENT>return (None, None)<EOL><DEDENT>return lst[<NUM_LIT:0>]<EOL>", "docstring": "Parse an address into a (realname, mailaddr) tuple.", "id": "f16460:m2"}
{"signature": "def iscomment(self, line):", "body": "return False<EOL>", "docstring": "Determine whether a line should be skipped entirely.\n\n        You may override this method in order to use Message parsing on tagged\n        data in RFC 2822-like formats that support embedded comments or\n        free-text data.", "id": "f16460:c0:m5"}
{"signature": "def quote(s):", "body": "return s.replace('<STR_LIT:\\\\>', '<STR_LIT>').replace('<STR_LIT:\">', '<STR_LIT>')<EOL>", "docstring": "Add quotes around a string.", "id": "f16460:m1"}
{"signature": "def getphraselist(self):", "body": "plist = []<EOL>while self.pos < len(self.field):<EOL><INDENT>if self.field[self.pos] in self.LWS:<EOL><INDENT>self.pos += <NUM_LIT:1><EOL><DEDENT>elif self.field[self.pos] == '<STR_LIT:\">':<EOL><INDENT>plist.append(self.getquote())<EOL><DEDENT>elif self.field[self.pos] == '<STR_LIT:(>':<EOL><INDENT>self.commentlist.append(self.getcomment())<EOL><DEDENT>elif self.field[self.pos] in self.phraseends:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>plist.append(self.getatom(self.phraseends))<EOL><DEDENT><DEDENT>return plist<EOL>", "docstring": "Parse a sequence of RFC 2822 phrases.\n\n        A phrase is a sequence of words, which are in turn either RFC 2822\n        atoms or quoted-strings.  Phrases are canonicalized by squeezing all\n        runs of continuous whitespace into one space.", "id": "f16460:c1:m12"}
{"signature": "def getdomainliteral(self):", "body": "return '<STR_LIT>' % self.getdelimited('<STR_LIT:[>', '<STR_LIT>', <NUM_LIT:0>)<EOL>", "docstring": "Parse an RFC 2822 domain-literal.", "id": "f16460:c1:m10"}
{"signature": "def gotonext(self):", "body": "while self.pos < len(self.field):<EOL><INDENT>if self.field[self.pos] in self.LWS + '<STR_LIT>':<EOL><INDENT>self.pos = self.pos + <NUM_LIT:1><EOL><DEDENT>elif self.field[self.pos] == '<STR_LIT:(>':<EOL><INDENT>self.commentlist.append(self.getcomment())<EOL><DEDENT>else: break<EOL><DEDENT>", "docstring": "Parse up to the start of the next address.", "id": "f16460:c1:m1"}
{"signature": "def truncate(self, size=None):", "body": "_complain_ifclosed(self.closed)<EOL>if size is None:<EOL><INDENT>size = self.pos<EOL><DEDENT>elif size < <NUM_LIT:0>:<EOL><INDENT>raise IOError(EINVAL, \"<STR_LIT>\")<EOL><DEDENT>elif size < self.pos:<EOL><INDENT>self.pos = size<EOL><DEDENT>self.buf = self.getvalue()[:size]<EOL>self.len = size<EOL>", "docstring": "Truncate the file's size.\n\n        If the optional size argument is present, the file is truncated to\n        (at most) that size. The size defaults to the current position.\n        The current file position is not changed unless the position\n        is beyond the new file size.\n\n        If the specified size exceeds the file's current size, the\n        file remains unchanged.", "id": "f16461:c0:m10"}
{"signature": "def isatty(self):", "body": "_complain_ifclosed(self.closed)<EOL>return False<EOL>", "docstring": "Returns False because StringIO objects are not connected to a\n        tty-like device.", "id": "f16461:c0:m4"}
{"signature": "def flush(self):", "body": "_complain_ifclosed(self.closed)<EOL>", "docstring": "Flush the internal buffer", "id": "f16461:c0:m13"}
{"signature": "def close(self):", "body": "if not self.closed:<EOL><INDENT>self.closed = True<EOL>del self.buf, self.pos<EOL><DEDENT>", "docstring": "Free the memory buffer.", "id": "f16461:c0:m3"}
{"signature": "def tell(self):", "body": "_complain_ifclosed(self.closed)<EOL>return self.pos<EOL>", "docstring": "Return the file's current position.", "id": "f16461:c0:m6"}
{"signature": "def getvalue(self):", "body": "_complain_ifclosed(self.closed)<EOL>if self.buflist:<EOL><INDENT>self.buf += '<STR_LIT>'.join(self.buflist)<EOL>self.buflist = []<EOL><DEDENT>return self.buf<EOL>", "docstring": "Retrieve the entire contents of the \"file\" at any time before\nthe StringIO object's close() method is called.\n\nThe StringIO object can accept either Unicode or 8-bit strings,\nbut mixing the two may take some care. If both are used, 8-bit\nstrings that cannot be interpreted as 7-bit ASCII (that use the\n8th bit) will cause a UnicodeError to be raised when getvalue()\nis called.", "id": "f16461:c0:m14"}
{"signature": "def _guess_quote_and_delimiter(self, data, delimiters):", "body": "matches = []<EOL>for restr in ('<STR_LIT>', <EOL>'<STR_LIT>',   <EOL>'<STR_LIT>',  <EOL>'<STR_LIT>'):                            <EOL><INDENT>regexp = re.compile(restr, re.DOTALL | re.MULTILINE)<EOL>matches = regexp.findall(data)<EOL>if matches:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if not matches:<EOL><INDENT>return ('<STR_LIT>', False, None, <NUM_LIT:0>)<EOL><DEDENT>quotes = {}<EOL>delims = {}<EOL>spaces = <NUM_LIT:0><EOL>for m in matches:<EOL><INDENT>n = regexp.groupindex['<STR_LIT>'] - <NUM_LIT:1><EOL>key = m[n]<EOL>if key:<EOL><INDENT>quotes[key] = quotes.get(key, <NUM_LIT:0>) + <NUM_LIT:1><EOL><DEDENT>try:<EOL><INDENT>n = regexp.groupindex['<STR_LIT>'] - <NUM_LIT:1><EOL>key = m[n]<EOL><DEDENT>except KeyError:<EOL><INDENT>continue<EOL><DEDENT>if key and (delimiters is None or key in delimiters):<EOL><INDENT>delims[key] = delims.get(key, <NUM_LIT:0>) + <NUM_LIT:1><EOL><DEDENT>try:<EOL><INDENT>n = regexp.groupindex['<STR_LIT>'] - <NUM_LIT:1><EOL><DEDENT>except KeyError:<EOL><INDENT>continue<EOL><DEDENT>if m[n]:<EOL><INDENT>spaces += <NUM_LIT:1><EOL><DEDENT><DEDENT>quotechar = reduce(lambda a, b, quotes = quotes:<EOL>(quotes[a] > quotes[b]) and a or b, list(quotes.keys()))<EOL>if delims:<EOL><INDENT>delim = reduce(lambda a, b, delims = delims:<EOL>(delims[a] > delims[b]) and a or b, list(delims.keys()))<EOL>skipinitialspace = delims[delim] == spaces<EOL>if delim == '<STR_LIT:\\n>': <EOL><INDENT>delim = '<STR_LIT>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>delim = '<STR_LIT>'<EOL>skipinitialspace = <NUM_LIT:0><EOL><DEDENT>dq_regexp = re.compile(<EOL>r\"<STR_LIT>\" %{'<STR_LIT>':re.escape(delim), '<STR_LIT>':quotechar}, re.MULTILINE)<EOL>if dq_regexp.search(data):<EOL><INDENT>doublequote = True<EOL><DEDENT>else:<EOL><INDENT>doublequote = False<EOL><DEDENT>return (quotechar, doublequote, delim, skipinitialspace)<EOL>", "docstring": "Looks for text enclosed between two identical quotes\n(the probable quotechar) which are preceded and followed\nby the same character (the probable delimiter).\nFor example:\n                 ,'some text',\nThe quote with the most wins, same with the delimiter.\nIf there is no quotechar the delimiter can't be determined\nthis way.", "id": "f16463:c5:m2"}
{"signature": "def glob(pathname):", "body": "return list(iglob(pathname))<EOL>", "docstring": "Return a list of paths matching a pathname pattern.\n\n    The pattern may contain simple shell-style wildcards a la\n    fnmatch. However, unlike fnmatch, filenames starting with a\n    dot are special cases that are not matched by '*' and '?'\n    patterns.", "id": "f16465:m0"}
{"signature": "def getatime(filename):", "body": "return os.stat(filename).st_atime<EOL>", "docstring": "Return the last access time of a file, reported by os.stat().", "id": "f16466:m5"}
{"signature": "def isdir(s):", "body": "try:<EOL><INDENT>st = os.stat(s)<EOL><DEDENT>except os.error:<EOL><INDENT>return False<EOL><DEDENT>return stat.S_ISDIR(st.st_mode)<EOL>", "docstring": "Return true if the pathname refers to an existing directory.", "id": "f16466:m2"}
{"signature": "def commonprefix(m):", "body": "if not m: return '<STR_LIT>'<EOL>s1 = min(m)<EOL>s2 = max(m)<EOL>for i, c in enumerate(s1):<EOL><INDENT>if c != s2[i]:<EOL><INDENT>return s1[:i]<EOL><DEDENT><DEDENT>return s1<EOL>", "docstring": "Given a list of pathnames, returns the longest common leading component", "id": "f16466:m7"}
{"signature": "def isfile(path):", "body": "try:<EOL><INDENT>st = os.stat(path)<EOL><DEDENT>except os.error:<EOL><INDENT>return False<EOL><DEDENT>return stat.S_ISREG(st.st_mode)<EOL>", "docstring": "Test whether a path is a regular file", "id": "f16466:m1"}
{"signature": "def exists(path):", "body": "try:<EOL><INDENT>os.stat(path)<EOL><DEDENT>except os.error:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Test whether a path exists.  Returns False for broken symbolic links", "id": "f16466:m0"}
{"signature": "def getctime(filename):", "body": "return os.stat(filename).st_ctime<EOL>", "docstring": "Return the metadata change time of a file, reported by os.stat().", "id": "f16466:m6"}
{"signature": "def getmtime(filename):", "body": "return os.stat(filename).st_mtime<EOL>", "docstring": "Return the last modification time of a file, reported by os.stat().", "id": "f16466:m4"}
{"signature": "def start_new_thread(function, args, kwargs={}):", "body": "if type(args) != type(tuple()):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if type(kwargs) != type(dict()):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>global _main<EOL>_main = False<EOL>try:<EOL><INDENT>function(*args, **kwargs)<EOL><DEDENT>except SystemExit:<EOL><INDENT>pass<EOL><DEDENT>except:<EOL><INDENT>_traceback.print_exc()<EOL><DEDENT>_main = True<EOL>global _interrupt<EOL>if _interrupt:<EOL><INDENT>_interrupt = False<EOL>raise KeyboardInterrupt<EOL><DEDENT>", "docstring": "Dummy implementation of thread.start_new_thread().\n\n    Compatibility is maintained by making sure that ``args`` is a\n    tuple and ``kwargs`` is a dictionary.  If an exception is raised\n    and it is SystemExit (which can be done by thread.exit()) it is\n    caught and nothing is done; all other exceptions are printed out\n    by using traceback.print_exc().\n\n    If the executed function calls interrupt_main the KeyboardInterrupt will be\n    raised when the function returns.", "id": "f16467:m0"}
{"signature": "def stack_size(size=None):", "body": "if size is not None:<EOL><INDENT>raise error(\"<STR_LIT>\")<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Dummy implementation of thread.stack_size().", "id": "f16467:m4"}
{"signature": "def interrupt_main():", "body": "if _main:<EOL><INDENT>raise KeyboardInterrupt<EOL><DEDENT>else:<EOL><INDENT>global _interrupt<EOL>_interrupt = True<EOL><DEDENT>", "docstring": "Set _interrupt flag to True to have start_new_thread raise\n    KeyboardInterrupt upon exiting.", "id": "f16467:m5"}
{"signature": "def release(self):", "body": "<EOL>if not self.locked_status:<EOL><INDENT>raise error<EOL><DEDENT>self.locked_status = False<EOL>return True<EOL>", "docstring": "Release the dummy lock.", "id": "f16467:c1:m3"}
{"signature": "def get_ident():", "body": "return -<NUM_LIT:1><EOL>", "docstring": "Dummy implementation of thread.get_ident().\n\n    Since this module should only be used when threadmodule is not\n    available, it is safe to assume that the current process is the\n    only thread.  Thus a constant can be safely returned.", "id": "f16467:m2"}
{"signature": "def acquire(self, waitflag=None):", "body": "if waitflag is None or waitflag:<EOL><INDENT>self.locked_status = True<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>if not self.locked_status:<EOL><INDENT>self.locked_status = True<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>", "docstring": "Dummy implementation of acquire().\n\n        For blocking calls, self.locked_status is automatically set to\n        True and returned appropriately based on value of\n        ``waitflag``.  If it is non-blocking, then the value is\n        actually checked and not set if it is already acquired.  This\n        is all done so that threading.Condition's assert statements\n        aren't triggered and throw a little fit.", "id": "f16467:c1:m1"}
{"signature": "def debug(self):", "body": "for test in self:<EOL><INDENT>test.debug()<EOL><DEDENT>", "docstring": "Run the tests without collecting errors in a TestResult", "id": "f16468:c0:m10"}
{"signature": "def printErrors(self):", "body": "", "docstring": "Called by TestRunner after test run", "id": "f16469:c0:m1"}
{"signature": "def stop(self):", "body": "self.shouldStop = True<EOL>", "docstring": "Indicates that the tests should be aborted", "id": "f16469:c0:m15"}
{"signature": "def wasSuccessful(self):", "body": "return len(self.failures) == len(self.errors) == <NUM_LIT:0><EOL>", "docstring": "Tells whether or not this result was a success", "id": "f16469:c0:m14"}
{"signature": "def wraps(wrapped,<EOL>assigned = WRAPPER_ASSIGNMENTS,<EOL>updated = WRAPPER_UPDATES):", "body": "return partial(update_wrapper, wrapped=wrapped,<EOL>assigned=assigned, updated=updated)<EOL>", "docstring": "Decorator factory to apply update_wrapper() to a wrapper function\n\n       Returns a decorator that invokes update_wrapper() with the decorated\n       function as the wrapper argument and the arguments to wraps() as the\n       remaining arguments. Default arguments are as for update_wrapper().\n       This is a convenience function to simplify applying partial() to\n       update_wrapper().", "id": "f16470:m2"}
{"signature": "def total_ordering(cls):", "body": "convert = {<EOL>'<STR_LIT>': [('<STR_LIT>', lambda self, other: not (self < other or self == other)),<EOL>('<STR_LIT>', lambda self, other: self < other or self == other),<EOL>('<STR_LIT>', lambda self, other: not self < other)],<EOL>'<STR_LIT>': [('<STR_LIT>', lambda self, other: not self <= other or self == other),<EOL>('<STR_LIT>', lambda self, other: self <= other and not self == other),<EOL>('<STR_LIT>', lambda self, other: not self <= other)],<EOL>'<STR_LIT>': [('<STR_LIT>', lambda self, other: not (self > other or self == other)),<EOL>('<STR_LIT>', lambda self, other: self > other or self == other),<EOL>('<STR_LIT>', lambda self, other: not self > other)],<EOL>'<STR_LIT>': [('<STR_LIT>', lambda self, other: (not self >= other) or self == other),<EOL>('<STR_LIT>', lambda self, other: self >= other and not self == other),<EOL>('<STR_LIT>', lambda self, other: not self >= other)]<EOL>}<EOL>roots = set(dir(cls)) & set(convert)<EOL>if not roots:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>root = max(roots)       <EOL>for opname, opfunc in convert[root]:<EOL><INDENT>if opname not in roots:<EOL><INDENT>opfunc.__name__ = opname<EOL>opfunc.__doc__ = getattr(int, opname).__doc__<EOL>setattr(cls, opname, opfunc)<EOL><DEDENT><DEDENT>return cls<EOL>", "docstring": "Class decorator that fills in missing ordering methods", "id": "f16470:m3"}
{"signature": "def items(self):", "body": "return [(key, self[key]) for key in self]<EOL>", "docstring": "D.items() -> list of D's (key, value) pairs, as 2-tuples", "id": "f16471:c8:m7"}
{"signature": "def itervalues(self):", "body": "for key in self:<EOL><INDENT>yield self[key]<EOL><DEDENT>", "docstring": "D.itervalues() -> an iterator over the values of D", "id": "f16471:c8:m4"}
{"signature": "def keys(self):", "body": "return list(self)<EOL>", "docstring": "D.keys() -> list of D's keys", "id": "f16471:c8:m6"}
{"signature": "def iteritems(self):", "body": "for key in self:<EOL><INDENT>yield (key, self[key])<EOL><DEDENT>", "docstring": "D.iteritems() -> an iterator over the (key, value) items of D", "id": "f16471:c8:m5"}
{"signature": "def popitem(self):", "body": "try:<EOL><INDENT>key = next(iter(self))<EOL><DEDENT>except StopIteration:<EOL><INDENT>raise KeyError<EOL><DEDENT>value = self[key]<EOL>del self[key]<EOL>return key, value<EOL>", "docstring": "D.popitem() -> (k, v), remove and return some (key, value) pair\n           as a 2-tuple; but raise KeyError if D is empty.", "id": "f16471:c10:m3"}
{"signature": "def iterkeys(self):", "body": "return iter(self)<EOL>", "docstring": "D.iterkeys() -> an iterator over the keys of D", "id": "f16471:c8:m3"}
{"signature": "@abstractmethod<EOL><INDENT>def add(self, value):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Add an element.", "id": "f16471:c7:m0"}
{"signature": "def get(self, key, default=None):", "body": "try:<EOL><INDENT>return self[key]<EOL><DEDENT>except KeyError:<EOL><INDENT>return default<EOL><DEDENT>", "docstring": "D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.", "id": "f16471:c8:m1"}
{"signature": "def reverse(self):", "body": "n = len(self)<EOL>for i in range(n//<NUM_LIT:2>):<EOL><INDENT>self[i], self[n-i-<NUM_LIT:1>] = self[n-i-<NUM_LIT:1>], self[i]<EOL><DEDENT>", "docstring": "S.reverse() -- reverse *IN PLACE*", "id": "f16471:c12:m4"}
{"signature": "def pop(self):", "body": "it = iter(self)<EOL>try:<EOL><INDENT>value = next(it)<EOL><DEDENT>except StopIteration:<EOL><INDENT>raise KeyError<EOL><DEDENT>self.discard(value)<EOL>return value<EOL>", "docstring": "Return the popped value.  Raise KeyError if empty.", "id": "f16471:c7:m3"}
{"signature": "def pop(self, index=-<NUM_LIT:1>):", "body": "v = self[index]<EOL>del self[index]<EOL>return v<EOL>", "docstring": "S.pop([index]) -> item -- remove and return item at index (default last).\n           Raise IndexError if list is empty or index is out of range.", "id": "f16471:c12:m6"}
{"signature": "def setdefault(self, key, default=None):", "body": "try:<EOL><INDENT>return self[key]<EOL><DEDENT>except KeyError:<EOL><INDENT>self[key] = default<EOL><DEDENT>return default<EOL>", "docstring": "D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D", "id": "f16471:c10:m6"}
{"signature": "def _hash(self):", "body": "MAX = sys.maxint<EOL>MASK = <NUM_LIT:2> * MAX + <NUM_LIT:1><EOL>n = len(self)<EOL>h = <NUM_LIT> * (n + <NUM_LIT:1>)<EOL>h &= MASK<EOL>for x in self:<EOL><INDENT>hx = hash(x)<EOL>h ^= (hx ^ (hx << <NUM_LIT:16>) ^ <NUM_LIT>)  * <NUM_LIT><EOL>h &= MASK<EOL><DEDENT>h = h * <NUM_LIT> + <NUM_LIT><EOL>h &= MASK<EOL>if h > MAX:<EOL><INDENT>h -= MASK + <NUM_LIT:1><EOL><DEDENT>if h == -<NUM_LIT:1>:<EOL><INDENT>h = <NUM_LIT><EOL><DEDENT>return h<EOL>", "docstring": "Compute the hash value of a set.\n\n        Note that we don't define __hash__: not all sets are hashable.\n        But if you define a hashable set type, its __hash__ should\n        call this function.\n\n        This must be compatible __eq__.\n\n        All sets ought to compare equal if they contain the same\n        elements, regardless of how they are implemented, and\n        regardless of the order of the elements; so there's not much\n        freedom for __eq__ or __hash__.  We match the algorithm used\n        by the built-in frozenset type.", "id": "f16471:c6:m13"}
{"signature": "@abstractmethod<EOL><INDENT>def insert(self, index, value):<DEDENT>", "body": "raise IndexError<EOL>", "docstring": "S.insert(index, object) -- insert object before index", "id": "f16471:c12:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def discard(self, value):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "Remove an element.  Do not raise an exception if absent.", "id": "f16471:c7:m1"}
{"signature": "@classmethod<EOL><INDENT>def _from_iterable(cls, it):<DEDENT>", "body": "return cls(it)<EOL>", "docstring": "Construct an instance of the class from any iterable input.\n\n        Must override this method if the class constructor signature\n        does not accept an iterable for an input.", "id": "f16471:c6:m6"}
{"signature": "def count(self, value):", "body": "return sum(<NUM_LIT:1> for v in self if v == value)<EOL>", "docstring": "S.count(value) -> integer -- return number of occurrences of value", "id": "f16471:c11:m5"}
{"signature": "def randrange(self, start, stop=None, step=<NUM_LIT:1>, _int=int, _maxwidth=<NUM_LIT:1><<BPF):", "body": "<EOL>istart = _int(start)<EOL>if istart != start:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if stop is None:<EOL><INDENT>if istart > <NUM_LIT:0>:<EOL><INDENT>if istart >= _maxwidth:<EOL><INDENT>return self._randbelow(istart)<EOL><DEDENT>return _int(self.random() * istart)<EOL><DEDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>istop = _int(stop)<EOL>if istop != stop:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>width = istop - istart<EOL>if step == <NUM_LIT:1> and width > <NUM_LIT:0>:<EOL><INDENT>if width >= _maxwidth:<EOL><INDENT>return _int(istart + self._randbelow(width))<EOL><DEDENT>return _int(istart + _int(self.random()*width))<EOL><DEDENT>if step == <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (istart, istop, width))<EOL><DEDENT>istep = _int(step)<EOL>if istep != step:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if istep > <NUM_LIT:0>:<EOL><INDENT>n = (width + istep - <NUM_LIT:1>) // istep<EOL><DEDENT>elif istep < <NUM_LIT:0>:<EOL><INDENT>n = (width + istep + <NUM_LIT:1>) // istep<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if n <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if n >= _maxwidth:<EOL><INDENT>return istart + istep*self._randbelow(n)<EOL><DEDENT>return istart + istep*_int(self.random() * n)<EOL>", "docstring": "Choose a random item from range(start, stop[, step]).\n\n        This fixes the problem with randint() which includes the\n        endpoint; in Python this is usually not what you want.", "id": "f16474:c0:m2"}
{"signature": "def shuffle(self, x, random=None):", "body": "if random is None:<EOL><INDENT>random = self.random<EOL><DEDENT>_int = int<EOL>for i in reversed(range(<NUM_LIT:1>, len(x))):<EOL><INDENT>j = _int(random() * (i+<NUM_LIT:1>))<EOL>x[i], x[j] = x[j], x[i]<EOL><DEDENT>", "docstring": "x, random=random.random -> shuffle list x in place; return None.\n\n        Optional arg random is a 0-argument function returning a random\n        float in [0.0, 1.0); by default, the standard random.random.", "id": "f16474:c0:m5"}
{"signature": "def __init__(self, x=None):", "body": "self.seed(x)<EOL>self.gauss_next = None<EOL>", "docstring": "Initialize an instance.\n\n        Optional argument x controls seeding, as for Random.seed().", "id": "f16474:c0:m0"}
{"signature": "def randint(self, a, b):", "body": "return self.randrange(a, b+<NUM_LIT:1>)<EOL>", "docstring": "Return random integer in range [a, b], including both end points.", "id": "f16474:c0:m3"}
{"signature": "def match(pattern, string, flags=<NUM_LIT:0>):", "body": "return _compile(pattern, flags).match(string)<EOL>", "docstring": "Try to apply the pattern at the start of the string, returning\n    a match object, or None if no match was found.", "id": "f16475:m0"}
{"signature": "def subn(pattern, repl, string, count=<NUM_LIT:0>, flags=<NUM_LIT:0>):", "body": "return _compile(pattern, flags).subn(repl, string, count)<EOL>", "docstring": "Return a 2-tuple containing (new_string, number).\n    new_string is the string obtained by replacing the leftmost\n    non-overlapping occurrences of the pattern in the source\n    string by the replacement repl.  number is the number of\n    substitutions that were made. repl can be either a string or a\n    callable; if a string, backslash escapes in it are processed.\n    If it is a callable, it's passed the match object and must\n    return a replacement string to be used.", "id": "f16475:m3"}
{"signature": "def findall(pattern, string, flags=<NUM_LIT:0>):", "body": "return _compile(pattern, flags).findall(string)<EOL>def finditer(pattern, string, flags=<NUM_LIT:0>):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return _compile(pattern, flags).finditer(string)<EOL><DEDENT>", "docstring": "Return a list of all non-overlapping matches in the string.\n\n    If one or more groups are present in the pattern, return a\n    list of groups; this will be a list of tuples if the pattern\n    has more than one group.\n\n    Empty matches are included in the result.", "id": "f16475:m5"}
{"signature": "def escape(pattern):", "body": "s = list(pattern)<EOL>alphanum = _alphanum<EOL>for i, c in enumerate(pattern):<EOL><INDENT>if c not in alphanum:<EOL><INDENT>if c == \"<STR_LIT>\":<EOL><INDENT>s[i] = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>s[i] = \"<STR_LIT:\\\\>\" + c<EOL><DEDENT><DEDENT><DEDENT>return pattern[:<NUM_LIT:0>].join(s)<EOL>", "docstring": "Escape all non-alphanumeric characters in pattern.", "id": "f16475:m9"}
{"signature": "def compile(pattern, flags=<NUM_LIT:0>):", "body": "return _compile(pattern, flags)<EOL>", "docstring": "Compile a regular expression pattern, returning a pattern object.", "id": "f16475:m6"}
{"signature": "def add_argument(self, *args, **kwargs):", "body": "<EOL>chars = self.prefix_chars<EOL>if not args or len(args) == <NUM_LIT:1> and args[<NUM_LIT:0>][<NUM_LIT:0>] not in chars:<EOL><INDENT>if args and '<STR_LIT>' in kwargs:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>kwargs = self._get_positional_kwargs(*args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>kwargs = self._get_optional_kwargs(*args, **kwargs)<EOL><DEDENT>if '<STR_LIT:default>' not in kwargs:<EOL><INDENT>dest = kwargs['<STR_LIT>']<EOL>if dest in self._defaults:<EOL><INDENT>kwargs['<STR_LIT:default>'] = self._defaults[dest]<EOL><DEDENT>elif self.argument_default is not None:<EOL><INDENT>kwargs['<STR_LIT:default>'] = self.argument_default<EOL><DEDENT><DEDENT>action_class = self._pop_action_class(kwargs)<EOL>if not _callable(action_class):<EOL><INDENT>raise ValueError('<STR_LIT>' % (action_class,))<EOL><DEDENT>action = action_class(**kwargs)<EOL>type_func = self._registry_get('<STR_LIT:type>', action.type, action.type)<EOL>if not _callable(type_func):<EOL><INDENT>raise ValueError('<STR_LIT>' % (type_func,))<EOL><DEDENT>if hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>try:<EOL><INDENT>self._get_formatter()._format_args(action, None)<EOL><DEDENT>except TypeError:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return self._add_action(action)<EOL>", "docstring": "add_argument(dest, ..., name=value, ...)\nadd_argument(option_string, option_string, ..., name=value, ...)", "id": "f16477:c20:m5"}
{"signature": "def error(self, message):", "body": "self.print_usage(_sys.stderr)<EOL>self.exit(<NUM_LIT:2>, _('<STR_LIT>') % (self.prog, message))<EOL>", "docstring": "error(message: string)\n\n        Prints a usage message incorporating the message to stderr and\n        exits.\n\n        If you override this in a subclass, it should not return -- it\n        should either exit or raise an exception.", "id": "f16477:c23:m28"}
{"signature": "def encode(self, o):", "body": "<EOL>if isinstance(o, str):<EOL><INDENT>if isinstance(o, str):<EOL><INDENT>_encoding = self.encoding<EOL>if (_encoding is not None<EOL>and not (_encoding == '<STR_LIT:utf-8>')):<EOL><INDENT>o = o.decode(_encoding)<EOL><DEDENT><DEDENT>if self.ensure_ascii:<EOL><INDENT>return encode_basestring_ascii(o)<EOL><DEDENT>else:<EOL><INDENT>return encode_basestring(o)<EOL><DEDENT><DEDENT>chunks = self.iterencode(o, _one_shot=True)<EOL>if not isinstance(chunks, (list, tuple)):<EOL><INDENT>chunks = list(chunks)<EOL><DEDENT>return '<STR_LIT>'.join(chunks)<EOL>", "docstring": "Return a JSON string representation of a Python data structure.\n\n        >>> JSONEncoder().encode({\"foo\": [\"bar\", \"baz\"]})\n        '{\"foo\": [\"bar\", \"baz\"]}'", "id": "f16478:c0:m2"}
{"signature": "def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,<EOL>allow_nan=True, cls=None, indent=None, separators=None,<EOL>encoding='<STR_LIT:utf-8>', default=None, sort_keys=False, **kw):", "body": "<EOL>if (not skipkeys and ensure_ascii and<EOL>check_circular and allow_nan and<EOL>cls is None and indent is None and separators is None and<EOL>encoding == '<STR_LIT:utf-8>' and default is None and not sort_keys and not kw):<EOL><INDENT>return _default_encoder.encode(obj)<EOL><DEDENT>if cls is None:<EOL><INDENT>cls = JSONEncoder<EOL><DEDENT>return cls(<EOL>skipkeys=skipkeys, ensure_ascii=ensure_ascii,<EOL>check_circular=check_circular, allow_nan=allow_nan, indent=indent,<EOL>separators=separators, encoding=encoding, default=default,<EOL>sort_keys=sort_keys, **kw).encode(obj)<EOL>", "docstring": "Serialize ``obj`` to a JSON formatted ``str``.\n\n    If ``skipkeys`` is true then ``dict`` keys that are not basic types\n    (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)\n    will be skipped instead of raising a ``TypeError``.\n\n\n    If ``ensure_ascii`` is false, all non-ASCII characters are not escaped, and\n    the return value may be a ``unicode`` instance. See ``dump`` for details.\n\n    If ``check_circular`` is false, then the circular reference check\n    for container types will be skipped and a circular reference will\n    result in an ``OverflowError`` (or worse).\n\n    If ``allow_nan`` is false, then it will be a ``ValueError`` to\n    serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in\n    strict compliance of the JSON specification, instead of using the\n    JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).\n\n    If ``indent`` is a non-negative integer, then JSON array elements and\n    object members will be pretty-printed with that indent level. An indent\n    level of 0 will only insert newlines. ``None`` is the most compact\n    representation.  Since the default item separator is ``', '``,  the\n    output might include trailing whitespace when ``indent`` is specified.\n    You can use ``separators=(',', ': ')`` to avoid this.\n\n    If ``separators`` is an ``(item_separator, dict_separator)`` tuple\n    then it will be used instead of the default ``(', ', ': ')`` separators.\n    ``(',', ':')`` is the most compact JSON representation.\n\n    ``encoding`` is the character encoding for str instances, default is UTF-8.\n\n    ``default(obj)`` is a function that should return a serializable version\n    of obj or raise TypeError. The default simply raises TypeError.\n\n    If *sort_keys* is ``True`` (default: ``False``), then the output of\n    dictionaries will be sorted by key.\n\n    To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the\n    ``.default()`` method to serialize additional types), specify it with\n    the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.", "id": "f16479:m1"}
{"signature": "def decode(self, s, _w=WHITESPACE.match):", "body": "obj, end = self.raw_decode(s, idx=_w(s, <NUM_LIT:0>).end())<EOL>end = _w(s, end).end()<EOL>if end != len(s):<EOL><INDENT>raise ValueError(errmsg(\"<STR_LIT>\", s, end, len(s)))<EOL><DEDENT>return obj<EOL>", "docstring": "Return the Python representation of ``s`` (a ``str`` or ``unicode``\n        instance containing a JSON document)", "id": "f16480:c0:m1"}
{"signature": "def py_scanstring(s, end, encoding=None, strict=True,<EOL>_b=BACKSLASH, _m=STRINGCHUNK.match):", "body": "if encoding is None:<EOL><INDENT>encoding = DEFAULT_ENCODING<EOL><DEDENT>chunks = []<EOL>_append = chunks.append<EOL>begin = end - <NUM_LIT:1><EOL>while <NUM_LIT:1>:<EOL><INDENT>chunk = _m(s, end)<EOL>if chunk is None:<EOL><INDENT>raise ValueError(<EOL>errmsg(\"<STR_LIT>\", s, begin))<EOL><DEDENT>end = chunk.end()<EOL>content, terminator = chunk.groups()<EOL>if content:<EOL><INDENT>if not isinstance(content, unicode):<EOL><INDENT>content = unicode(content, encoding)<EOL><DEDENT>_append(content)<EOL><DEDENT>if terminator == '<STR_LIT:\">':<EOL><INDENT>break<EOL><DEDENT>elif terminator != '<STR_LIT:\\\\>':<EOL><INDENT>if strict:<EOL><INDENT>msg = \"<STR_LIT>\" % (terminator,)<EOL>raise ValueError(errmsg(msg, s, end))<EOL><DEDENT>else:<EOL><INDENT>_append(terminator)<EOL>continue<EOL><DEDENT><DEDENT>try:<EOL><INDENT>esc = s[end]<EOL><DEDENT>except IndexError:<EOL><INDENT>raise ValueError(<EOL>errmsg(\"<STR_LIT>\", s, begin))<EOL><DEDENT>if esc != '<STR_LIT:u>':<EOL><INDENT>try:<EOL><INDENT>char = _b[esc]<EOL><DEDENT>except KeyError:<EOL><INDENT>msg = \"<STR_LIT>\" + repr(esc)<EOL>raise ValueError(errmsg(msg, s, end))<EOL><DEDENT>end += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>uni = _decode_uXXXX(s, end)<EOL>end += <NUM_LIT:5><EOL>if sys.maxunicode > <NUM_LIT> and<NUM_LIT> <= uni <= <NUM_LIT> and s[end:end + <NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>uni2 = _decode_uXXXX(s, end + <NUM_LIT:1>)<EOL>if <NUM_LIT> <= uni2 <= <NUM_LIT>:<EOL><INDENT>uni = <NUM_LIT> + (((uni - <NUM_LIT>) << <NUM_LIT:10>) | (uni2 - <NUM_LIT>))<EOL>end += <NUM_LIT:6><EOL><DEDENT><DEDENT>char = unichr(uni)<EOL><DEDENT>_append(char)<EOL><DEDENT>return u'<STR_LIT>'.join(chunks), end<EOL>", "docstring": "Scan the string s for a JSON string. End is the index of the\n    character in s after the quote that started the JSON string.\n    Unescapes all valid JSON string escape sequences and raises ValueError\n    on attempt to decode an invalid string. If strict is False then literal\n    control characters are allowed in the string.\n\n    Returns a tuple of the decoded string and the index of the character in s\n    after the end quote.", "id": "f16480:m4"}
{"signature": "def extract(s):", "body": "res = decoder.match(s)<EOL>if res is None: raise NotANumber(s)<EOL>sign, intpart, fraction, exppart = res.group(<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>,<NUM_LIT:4>)<EOL>if sign == '<STR_LIT:+>': sign = '<STR_LIT>'<EOL>if fraction: fraction = fraction[<NUM_LIT:1>:]<EOL>if exppart: expo = int(exppart[<NUM_LIT:1>:])<EOL>else: expo = <NUM_LIT:0><EOL>return sign, intpart, fraction, expo<EOL>", "docstring": "Return (sign, intpart, fraction, expo) or raise an exception:\n    sign is '+' or '-'\n    intpart is 0 or more digits beginning with a nonzero\n    fraction is 0 or more digits\n    expo is an integer", "id": "f16481:m0"}
{"signature": "def fix(x, digs):", "body": "if type(x) != type('<STR_LIT>'): x = repr(x)<EOL>try:<EOL><INDENT>sign, intpart, fraction, expo = extract(x)<EOL><DEDENT>except NotANumber:<EOL><INDENT>return x<EOL><DEDENT>intpart, fraction = unexpo(intpart, fraction, expo)<EOL>intpart, fraction = roundfrac(intpart, fraction, digs)<EOL>while intpart and intpart[<NUM_LIT:0>] == '<STR_LIT:0>': intpart = intpart[<NUM_LIT:1>:]<EOL>if intpart == '<STR_LIT>': intpart = '<STR_LIT:0>'<EOL>if digs > <NUM_LIT:0>: return sign + intpart + '<STR_LIT:.>' + fraction<EOL>else: return sign + intpart<EOL>", "docstring": "Format x as [-]ddd.ddd with 'digs' digits after the point\n    and at least one digit before.\n    If digs <= 0, the point is suppressed.", "id": "f16481:m3"}
{"signature": "def _show_warning(message, category, filename, lineno, file=None, line=None):", "body": "if file is None:<EOL><INDENT>file = sys.stderr<EOL>if file is None:<EOL><INDENT>return<EOL><DEDENT><DEDENT>try:<EOL><INDENT>file.write(formatwarning(message, category, filename, lineno, line))<EOL><DEDENT>except (IOError, UnicodeError):<EOL><INDENT>pass <EOL><DEDENT>", "docstring": "Hook to write a warning to a file; replace if you like.", "id": "f16482:m1"}
{"signature": "def formatwarning(message, category, filename, lineno, line=None):", "body": "try:<EOL><INDENT>unicodetype = str<EOL><DEDENT>except NameError:<EOL><INDENT>unicodetype = ()<EOL><DEDENT>try:<EOL><INDENT>message = str(message)<EOL><DEDENT>except UnicodeEncodeError:<EOL><INDENT>pass<EOL><DEDENT>s =  \"<STR_LIT>\" % (lineno, category.__name__, message)<EOL>line = linecache.getline(filename, lineno) if line is None else line<EOL>if line:<EOL><INDENT>line = line.strip()<EOL>if isinstance(s, unicodetype) and isinstance(line, str):<EOL><INDENT>line = str(line, '<STR_LIT>')<EOL><DEDENT>s += \"<STR_LIT>\" % line<EOL><DEDENT>if isinstance(s, unicodetype) and isinstance(filename, str):<EOL><INDENT>enc = sys.getfilesystemencoding()<EOL>if enc:<EOL><INDENT>try:<EOL><INDENT>filename = str(filename, enc)<EOL><DEDENT>except UnicodeDecodeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>s = \"<STR_LIT>\" % (filename, s)<EOL>return s<EOL>", "docstring": "Function to format a warning the standard way.", "id": "f16482:m2"}
{"signature": "def warn(message, category=None, stacklevel=<NUM_LIT:1>):", "body": "<EOL>if isinstance(message, Warning):<EOL><INDENT>category = message.__class__<EOL><DEDENT>if category is None:<EOL><INDENT>category = UserWarning<EOL><DEDENT>assert issubclass(category, Warning)<EOL>try:<EOL><INDENT>caller = sys._getframe(stacklevel)<EOL><DEDENT>except ValueError:<EOL><INDENT>globals = sys.__dict__<EOL>lineno = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>globals = caller.f_globals<EOL>lineno = caller.f_lineno<EOL><DEDENT>if '<STR_LIT>' in globals:<EOL><INDENT>module = globals['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>module = \"<STR_LIT>\"<EOL><DEDENT>filename = globals.get('<STR_LIT>')<EOL>if filename:<EOL><INDENT>fnl = filename.lower()<EOL>if fnl.endswith((\"<STR_LIT>\", \"<STR_LIT>\")):<EOL><INDENT>filename = filename[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if module == \"<STR_LIT:__main__>\":<EOL><INDENT>try:<EOL><INDENT>filename = sys.argv[<NUM_LIT:0>]<EOL><DEDENT>except AttributeError:<EOL><INDENT>filename = '<STR_LIT:__main__>'<EOL><DEDENT><DEDENT>if not filename:<EOL><INDENT>filename = module<EOL><DEDENT><DEDENT>registry = globals.setdefault(\"<STR_LIT>\", {})<EOL>warn_explicit(message, category, filename, lineno, module, registry,<EOL>globals)<EOL>", "docstring": "Issue a warning, or maybe ignore it or raise an exception.", "id": "f16482:m10"}
{"signature": "def warnpy3k(message, category=None, stacklevel=<NUM_LIT:1>):", "body": "if sys.py3kwarning:<EOL><INDENT>if category is None:<EOL><INDENT>category = DeprecationWarning<EOL><DEDENT>warn(message, category, stacklevel+<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Issue a deprecation warning for Python 3.x related changes.\n\n    Warnings are omitted unless Python is started with the -3 option.", "id": "f16482:m0"}
{"signature": "def simplefilter(action, category=Warning, lineno=<NUM_LIT:0>, append=<NUM_LIT:0>):", "body": "assert action in (\"<STR_LIT:error>\", \"<STR_LIT:ignore>\", \"<STR_LIT>\", \"<STR_LIT:default>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\"), \"<STR_LIT>\" % (action,)<EOL>assert isinstance(lineno, int) and lineno >= <NUM_LIT:0>,\"<STR_LIT>\"<EOL>item = (action, None, category, None, lineno)<EOL>if append:<EOL><INDENT>filters.append(item)<EOL><DEDENT>else:<EOL><INDENT>filters.insert(<NUM_LIT:0>, item)<EOL><DEDENT>", "docstring": "Insert a simple entry into the list of warnings filters (at the front).\n\n    A simple filter matches all modules and messages.\n    'action' -- one of \"error\", \"ignore\", \"always\", \"default\", \"module\",\n                or \"once\"\n    'category' -- a class that the warning must be a subclass of\n    'lineno' -- an integer line number, 0 matches all warnings\n    'append' -- if true, append to the list of filters", "id": "f16482:m4"}
{"signature": "def getopt(args, shortopts, longopts = []):", "body": "opts = []<EOL>if type(longopts) == type(\"<STR_LIT>\"):<EOL><INDENT>longopts = [longopts]<EOL><DEDENT>else:<EOL><INDENT>longopts = list(longopts)<EOL><DEDENT>while args and args[<NUM_LIT:0>].startswith('<STR_LIT:->') and args[<NUM_LIT:0>] != '<STR_LIT:->':<EOL><INDENT>if args[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>args = args[<NUM_LIT:1>:]<EOL>break<EOL><DEDENT>if args[<NUM_LIT:0>].startswith('<STR_LIT>'):<EOL><INDENT>opts, args = do_longs(opts, args[<NUM_LIT:0>][<NUM_LIT:2>:], longopts, args[<NUM_LIT:1>:])<EOL><DEDENT>else:<EOL><INDENT>opts, args = do_shorts(opts, args[<NUM_LIT:0>][<NUM_LIT:1>:], shortopts, args[<NUM_LIT:1>:])<EOL><DEDENT><DEDENT>return opts, args<EOL>", "docstring": "getopt(args, options[, long_options]) -> opts, args\n\n    Parses command line options and parameter list.  args is the\n    argument list to be parsed, without the leading reference to the\n    running program.  Typically, this means \"sys.argv[1:]\".  shortopts\n    is the string of option letters that the script wants to\n    recognize, with options that require an argument followed by a\n    colon (i.e., the same format that Unix getopt() uses).  If\n    specified, longopts is a list of strings with the names of the\n    long options which should be supported.  The leading '--'\n    characters should not be included in the option name.  Options\n    which require an argument should be followed by an equal sign\n    ('=').\n\n    The return value consists of two elements: the first is a list of\n    (option, value) pairs; the second is the list of program arguments\n    left after the option list was stripped (this is a trailing slice\n    of the first argument).  Each option-and-value pair returned has\n    the option as its first element, prefixed with a hyphen (e.g.,\n    '-x'), and the option argument as its second element, or an empty\n    string if the option has no argument.  The options occur in the\n    list in the same order in which they were found, thus allowing\n    multiple occurrences.  Long and short options may be mixed.", "id": "f16483:m0"}
{"signature": "def translate(pat):", "body": "i, n = <NUM_LIT:0>, len(pat)<EOL>res = '<STR_LIT>'<EOL>while i < n:<EOL><INDENT>c = pat[i]<EOL>i = i+<NUM_LIT:1><EOL>if c == '<STR_LIT:*>':<EOL><INDENT>res = res + '<STR_LIT>'<EOL><DEDENT>elif c == '<STR_LIT:?>':<EOL><INDENT>res = res + '<STR_LIT:.>'<EOL><DEDENT>elif c == '<STR_LIT:[>':<EOL><INDENT>j = i<EOL>if j < n and pat[j] == '<STR_LIT:!>':<EOL><INDENT>j = j+<NUM_LIT:1><EOL><DEDENT>if j < n and pat[j] == '<STR_LIT:]>':<EOL><INDENT>j = j+<NUM_LIT:1><EOL><DEDENT>while j < n and pat[j] != '<STR_LIT:]>':<EOL><INDENT>j = j+<NUM_LIT:1><EOL><DEDENT>if j >= n:<EOL><INDENT>res = res + '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>stuff = pat[i:j].replace('<STR_LIT:\\\\>','<STR_LIT>')<EOL>i = j+<NUM_LIT:1><EOL>if stuff[<NUM_LIT:0>] == '<STR_LIT:!>':<EOL><INDENT>stuff = '<STR_LIT>' + stuff[<NUM_LIT:1>:]<EOL><DEDENT>elif stuff[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>stuff = '<STR_LIT:\\\\>' + stuff<EOL><DEDENT>res = '<STR_LIT>' % (res, stuff)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>res = res + re.escape(c)<EOL><DEDENT><DEDENT>return res + '<STR_LIT>'<EOL>", "docstring": "Translate a shell PATTERN to a regular expression.\n\n    There is no way to quote meta-characters.", "id": "f16486:m4"}
{"signature": "def fnmatchcase(name, pat):", "body": "try:<EOL><INDENT>re_pat = _cache[pat]<EOL><DEDENT>except KeyError:<EOL><INDENT>res = translate(pat)<EOL>if len(_cache) >= _MAXCACHE:<EOL><INDENT>globals()['<STR_LIT>'] = {}<EOL><DEDENT>_cache[pat] = re_pat = re.compile(res)<EOL><DEDENT>return re_pat.match(name) is not None<EOL>", "docstring": "Test whether FILENAME matches PATTERN, including case.\n\n    This is a version of fnmatch() which doesn't case-normalize\n    its arguments.", "id": "f16486:m3"}
{"signature": "def fnmatch(name, pat):", "body": "<EOL>return fnmatchcase(name, pat)<EOL>", "docstring": "Test whether FILENAME matches PATTERN.\n\n    Patterns are Unix shell style:\n\n    *       matches everything\n    ?       matches any single character\n    [seq]   matches any character in seq\n    [!seq]  matches any char not in seq\n\n    An initial period in FILENAME is not special.\n    Both FILENAME and PATTERN are first case-normalized\n    if the operating system requires it.\n    If you don't want this, use fnmatchcase(FILENAME, PATTERN).", "id": "f16486:m1"}
{"signature": "def filter(names, pat):", "body": "import os<EOL>result=[]<EOL>try:<EOL><INDENT>re_pat = _cache[pat]<EOL><DEDENT>except KeyError:<EOL><INDENT>res = translate(pat)<EOL>if len(_cache) >= _MAXCACHE:<EOL><INDENT>globals()['<STR_LIT>'] = {}<EOL><DEDENT>_cache[pat] = re_pat = re.compile(res)<EOL><DEDENT>match = re_pat.match<EOL>if <NUM_LIT:1>:<EOL><INDENT>for name in names:<EOL><INDENT>if match(name):<EOL><INDENT>result.append(name)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for name in names:<EOL><INDENT>if match(os.path.normcase(name)):<EOL><INDENT>result.append(name)<EOL><DEDENT><DEDENT><DEDENT>return result<EOL>", "docstring": "Return the subset of the list NAMES that match PAT", "id": "f16486:m2"}
{"signature": "def calcsize(fmt):", "body": "formatdef, endianness, i = getmode(fmt)<EOL>num = <NUM_LIT:0><EOL>result = <NUM_LIT:0><EOL>while i < len(fmt):<EOL><INDENT>num, i = getNum(fmt, i)<EOL>cur = fmt[i]<EOL>try:<EOL><INDENT>format = formatdef[cur]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise StructError(\"<STR_LIT>\" % cur)<EOL><DEDENT>if num != None:<EOL><INDENT>result += num * format['<STR_LIT:size>']<EOL><DEDENT>else:<EOL><INDENT>result += format['<STR_LIT:size>']<EOL><DEDENT>num = <NUM_LIT:0><EOL>i += <NUM_LIT:1><EOL><DEDENT>return result<EOL>", "docstring": "calcsize(fmt) -> int\n    Return size of C struct described by format string fmt.\n    See struct.__doc__ for more on format strings.", "id": "f16488:m16"}
{"signature": "def float_unpack(Q, size, le):", "body": "if size == <NUM_LIT:8>:<EOL><INDENT>MIN_EXP = -<NUM_LIT>  <EOL>MAX_EXP = <NUM_LIT>   <EOL>MANT_DIG = <NUM_LIT>    <EOL>BITS = <NUM_LIT:64><EOL><DEDENT>elif size == <NUM_LIT:4>:<EOL><INDENT>MIN_EXP = -<NUM_LIT>   <EOL>MAX_EXP = <NUM_LIT>    <EOL>MANT_DIG = <NUM_LIT>    <EOL>BITS = <NUM_LIT:32><EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if Q >> BITS:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>sign = Q >> BITS - <NUM_LIT:1><EOL>exp = (Q & ((<NUM_LIT:1> << BITS - <NUM_LIT:1>) - (<NUM_LIT:1> << MANT_DIG - <NUM_LIT:1>))) >> MANT_DIG - <NUM_LIT:1><EOL>mant = Q & ((<NUM_LIT:1> << MANT_DIG - <NUM_LIT:1>) - <NUM_LIT:1>)<EOL>if exp == MAX_EXP - MIN_EXP + <NUM_LIT:2>:<EOL><INDENT>result = float('<STR_LIT>') if mant else float('<STR_LIT>')<EOL><DEDENT>elif exp == <NUM_LIT:0>:<EOL><INDENT>result = math.ldexp(float(mant), MIN_EXP - MANT_DIG)<EOL><DEDENT>else:<EOL><INDENT>mant += <NUM_LIT:1> << MANT_DIG - <NUM_LIT:1><EOL>result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - <NUM_LIT:1>)<EOL><DEDENT>return -result if sign else result<EOL>", "docstring": "Convert a 32-bit or 64-bit integer created\n    by float_pack into a Python float.", "id": "f16488:m12"}
{"signature": "def unpack(fmt, data):", "body": "formatdef, endianness, i = getmode(fmt)<EOL>j = <NUM_LIT:0><EOL>num = <NUM_LIT:0><EOL>result = []<EOL>length = calcsize(fmt)<EOL>if length != len(data):<EOL><INDENT>raise StructError(\"<STR_LIT>\")<EOL><DEDENT>while i < len(fmt):<EOL><INDENT>num, i = getNum(fmt, i)<EOL>cur = fmt[i]<EOL>i += <NUM_LIT:1><EOL>try:<EOL><INDENT>format = formatdef[cur]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise StructError(\"<STR_LIT>\" % cur)<EOL><DEDENT>if not num:<EOL><INDENT>num = <NUM_LIT:1><EOL><DEDENT>if cur == '<STR_LIT:x>':<EOL><INDENT>j += num<EOL><DEDENT>elif cur == '<STR_LIT:s>':<EOL><INDENT>result.append(data[j:j + num])<EOL>j += num<EOL><DEDENT>elif cur == '<STR_LIT:p>':<EOL><INDENT>n = data[j]<EOL>if n >= num:<EOL><INDENT>n = num - <NUM_LIT:1><EOL><DEDENT>result.append(data[j + <NUM_LIT:1>:j + n + <NUM_LIT:1>])<EOL>j += num<EOL><DEDENT>else:<EOL><INDENT>for n in range(num):<EOL><INDENT>result += [format['<STR_LIT>'](data, j, format['<STR_LIT:size>'], endianness)]<EOL>j += format['<STR_LIT:size>']<EOL><DEDENT><DEDENT><DEDENT>return tuple(result)<EOL>", "docstring": "unpack(fmt, string) -> (v1, v2, ...)\n       Unpack the string, containing packed C structure data, according\n       to fmt.  Requires len(string)==calcsize(fmt).\n       See struct.__doc__ for more on format strings.", "id": "f16488:m18"}
{"signature": "def b2a_qp(data, quotetabs=False, istext=True, header=False):", "body": "MAXLINESIZE = <NUM_LIT><EOL>lf = data.find('<STR_LIT:\\n>')<EOL>crlf = lf > <NUM_LIT:0> and data[lf-<NUM_LIT:1>] == '<STR_LIT:\\r>'<EOL>inp = <NUM_LIT:0><EOL>linelen = <NUM_LIT:0><EOL>odata = []<EOL>while inp < len(data):<EOL><INDENT>c = data[inp]<EOL>if (c > '<STR_LIT>' or<EOL>c == '<STR_LIT:=>' or<EOL>(header and c == '<STR_LIT:_>') or<EOL>(c == '<STR_LIT:.>' and linelen == <NUM_LIT:0> and (inp+<NUM_LIT:1> == len(data) or<EOL>data[inp+<NUM_LIT:1>] == '<STR_LIT:\\n>' or<EOL>data[inp+<NUM_LIT:1>] == '<STR_LIT:\\r>')) or<EOL>(not istext and (c == '<STR_LIT:\\r>' or c == '<STR_LIT:\\n>')) or<EOL>((c == '<STR_LIT:\\t>' or c == '<STR_LIT:U+0020>') and (inp + <NUM_LIT:1> == len(data))) or<EOL>(c <= '<STR_LIT:U+0020>' and c != '<STR_LIT:\\r>' and c != '<STR_LIT:\\n>' and<EOL>(quotetabs or (not quotetabs and (c != '<STR_LIT:\\t>' and c != '<STR_LIT:U+0020>'))))):<EOL><INDENT>linelen += <NUM_LIT:3><EOL>if linelen >= MAXLINESIZE:<EOL><INDENT>odata.append('<STR_LIT:=>')<EOL>if crlf: odata.append('<STR_LIT:\\r>')<EOL>odata.append('<STR_LIT:\\n>')<EOL>linelen = <NUM_LIT:3><EOL><DEDENT>odata.append('<STR_LIT:=>' + two_hex_digits(ord(c)))<EOL>inp += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>if (istext and<EOL>(c == '<STR_LIT:\\n>' or (inp+<NUM_LIT:1> < len(data) and c == '<STR_LIT:\\r>' and<EOL>data[inp+<NUM_LIT:1>] == '<STR_LIT:\\n>'))):<EOL><INDENT>linelen = <NUM_LIT:0><EOL>if (len(odata) > <NUM_LIT:0> and<EOL>(odata[-<NUM_LIT:1>] == '<STR_LIT:U+0020>' or odata[-<NUM_LIT:1>] == '<STR_LIT:\\t>')):<EOL><INDENT>ch = ord(odata[-<NUM_LIT:1>])<EOL>odata[-<NUM_LIT:1>] = '<STR_LIT:=>'<EOL>odata.append(two_hex_digits(ch))<EOL><DEDENT>if crlf: odata.append('<STR_LIT:\\r>')<EOL>odata.append('<STR_LIT:\\n>')<EOL>if c == '<STR_LIT:\\r>':<EOL><INDENT>inp += <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>inp += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>if (inp + <NUM_LIT:1> < len(data) and<EOL>data[inp+<NUM_LIT:1>] != '<STR_LIT:\\n>' and<EOL>(linelen + <NUM_LIT:1>) >= MAXLINESIZE):<EOL><INDENT>odata.append('<STR_LIT:=>')<EOL>if crlf: odata.append('<STR_LIT:\\r>')<EOL>odata.append('<STR_LIT:\\n>')<EOL>linelen = <NUM_LIT:0><EOL><DEDENT>linelen += <NUM_LIT:1><EOL>if header and c == '<STR_LIT:U+0020>':<EOL><INDENT>c = '<STR_LIT:_>'<EOL><DEDENT>odata.append(c)<EOL>inp += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return '<STR_LIT>'.join(odata)<EOL>", "docstring": "quotetabs=True means that tab and space characters are always\n       quoted.\n       istext=False means that \\r and \\n are treated as regular characters\n       header=True encodes space characters with '_' and requires\n       real '_' characters to be quoted.", "id": "f16489:m5"}
{"signature": "def update(self, inBuf):", "body": "leninBuf = len(inBuf)<EOL>index = (self.count[<NUM_LIT:0>] >> <NUM_LIT:3>) & <NUM_LIT><EOL>self.count[<NUM_LIT:0>] = self.count[<NUM_LIT:0>] + (leninBuf << <NUM_LIT:3>)<EOL>if self.count[<NUM_LIT:0>] < (leninBuf << <NUM_LIT:3>):<EOL><INDENT>self.count[<NUM_LIT:1>] = self.count[<NUM_LIT:1>] + <NUM_LIT:1><EOL><DEDENT>self.count[<NUM_LIT:1>] = self.count[<NUM_LIT:1>] + (leninBuf >> <NUM_LIT>)<EOL>partLen = <NUM_LIT:64> - index<EOL>if leninBuf >= partLen:<EOL><INDENT>self.input[index:] = list(inBuf[:partLen])<EOL>self._transform(_bytelist2long(self.input))<EOL>i = partLen<EOL>while i + <NUM_LIT> < leninBuf:<EOL><INDENT>self._transform(_bytelist2long(list(inBuf[i:i + <NUM_LIT:64>])))<EOL>i = i + <NUM_LIT:64><EOL><DEDENT>else:<EOL><INDENT>self.input = list(inBuf[i:leninBuf])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>i = <NUM_LIT:0><EOL>self.input = self.input + list(inBuf)<EOL><DEDENT>", "docstring": "Add to the current message.\n\n        Update the md5 object with the string arg. Repeated calls\n        are equivalent to a single call with the concatenation of all\n        the arguments, i.e. m.update(a); m.update(b) is equivalent\n        to m.update(a+b).\n\n        The hash is immediately calculated for all full blocks. The final\n        calculation is made in digest(). This allows us to keep an\n        intermediate value for the hash, so that we only need to make\n        minimal recalculation if we call update() to add moredata to\n        the hashed string.", "id": "f16490:c0:m3"}
{"signature": "def _rotateLeft(x, n):", "body": "return (x << n) | (x >> (<NUM_LIT:32> - n))<EOL>", "docstring": "Rotate x (32 bit) left n bits circularly.", "id": "f16490:m1"}
{"signature": "def XX(func, a, b, c, d, x, s, ac):", "body": "res = <NUM_LIT:0><EOL>res = res + a + func(b, c, d)<EOL>res = res + x<EOL>res = res + ac<EOL>res = res & <NUM_LIT><EOL>res = _rotateLeft(res, s)<EOL>res = res & <NUM_LIT><EOL>res = res + b<EOL>return res & <NUM_LIT><EOL>", "docstring": "Wrapper for call distribution to functions F, G, H and I.\n\n    This replaces functions FF, GG, HH and II from \"Appl. Crypto.\"\n    Rotation is separate from addition to prevent recomputation\n    (now summed-up in one function).", "id": "f16490:m6"}
{"signature": "def new(arg=None):", "body": "crypto = MD5Type()<EOL>if arg:<EOL><INDENT>crypto.update(arg)<EOL><DEDENT>return crypto<EOL>", "docstring": "Return a new md5 crypto object.\n    If arg is present, the method call update(arg) is made.", "id": "f16490:m7"}
{"signature": "def _transform(self, inp):", "body": "a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D<EOL>S11, S12, S13, S14 = <NUM_LIT:7>, <NUM_LIT:12>, <NUM_LIT>, <NUM_LIT><EOL>a = XX(F, a, b, c, d, inp[<NUM_LIT:0>], S11, <NUM_LIT>)  <EOL>d = XX(F, d, a, b, c, inp[<NUM_LIT:1>], S12, <NUM_LIT>)  <EOL>c = XX(F, c, d, a, b, inp[<NUM_LIT:2>], S13, <NUM_LIT>)  <EOL>b = XX(F, b, c, d, a, inp[<NUM_LIT:3>], S14, <NUM_LIT>)  <EOL>a = XX(F, a, b, c, d, inp[<NUM_LIT:4>], S11, <NUM_LIT>)  <EOL>d = XX(F, d, a, b, c, inp[<NUM_LIT:5>], S12, <NUM_LIT>)  <EOL>c = XX(F, c, d, a, b, inp[<NUM_LIT:6>], S13, <NUM_LIT>)  <EOL>b = XX(F, b, c, d, a, inp[<NUM_LIT:7>], S14, <NUM_LIT>)  <EOL>a = XX(F, a, b, c, d, inp[<NUM_LIT:8>], S11, <NUM_LIT>)  <EOL>d = XX(F, d, a, b, c, inp[<NUM_LIT:9>], S12, <NUM_LIT>)  <EOL>c = XX(F, c, d, a, b, inp[<NUM_LIT:10>], S13, <NUM_LIT>)  <EOL>b = XX(F, b, c, d, a, inp[<NUM_LIT:11>], S14, <NUM_LIT>)  <EOL>a = XX(F, a, b, c, d, inp[<NUM_LIT:12>], S11, <NUM_LIT>)  <EOL>d = XX(F, d, a, b, c, inp[<NUM_LIT>], S12, <NUM_LIT>)  <EOL>c = XX(F, c, d, a, b, inp[<NUM_LIT>], S13, <NUM_LIT>)  <EOL>b = XX(F, b, c, d, a, inp[<NUM_LIT:15>], S14, <NUM_LIT>)  <EOL>S21, S22, S23, S24 = <NUM_LIT:5>, <NUM_LIT:9>, <NUM_LIT>, <NUM_LIT:20><EOL>a = XX(G, a, b, c, d, inp[<NUM_LIT:1>], S21, <NUM_LIT>)  <EOL>d = XX(G, d, a, b, c, inp[<NUM_LIT:6>], S22, <NUM_LIT>)  <EOL>c = XX(G, c, d, a, b, inp[<NUM_LIT:11>], S23, <NUM_LIT>)  <EOL>b = XX(G, b, c, d, a, inp[<NUM_LIT:0>], S24, <NUM_LIT>)  <EOL>a = XX(G, a, b, c, d, inp[<NUM_LIT:5>], S21, <NUM_LIT>)  <EOL>d = XX(G, d, a, b, c, inp[<NUM_LIT:10>], S22, <NUM_LIT>)  <EOL>c = XX(G, c, d, a, b, inp[<NUM_LIT:15>], S23, <NUM_LIT>)  <EOL>b = XX(G, b, c, d, a, inp[<NUM_LIT:4>], S24, <NUM_LIT>)  <EOL>a = XX(G, a, b, c, d, inp[<NUM_LIT:9>], S21, <NUM_LIT>)  <EOL>d = XX(G, d, a, b, c, inp[<NUM_LIT>], S22, <NUM_LIT>)  <EOL>c = XX(G, c, d, a, b, inp[<NUM_LIT:3>], S23, <NUM_LIT>)  <EOL>b = XX(G, b, c, d, a, inp[<NUM_LIT:8>], S24, <NUM_LIT>)  <EOL>a = XX(G, a, b, c, d, inp[<NUM_LIT>], S21, <NUM_LIT>)  <EOL>d = XX(G, d, a, b, c, inp[<NUM_LIT:2>], S22, <NUM_LIT>)  <EOL>c = XX(G, c, d, a, b, inp[<NUM_LIT:7>], S23, <NUM_LIT>)  <EOL>b = XX(G, b, c, d, a, inp[<NUM_LIT:12>], S24, <NUM_LIT>)  <EOL>S31, S32, S33, S34 = <NUM_LIT:4>, <NUM_LIT:11>, <NUM_LIT:16>, <NUM_LIT><EOL>a = XX(H, a, b, c, d, inp[<NUM_LIT:5>], S31, <NUM_LIT>)  <EOL>d = XX(H, d, a, b, c, inp[<NUM_LIT:8>], S32, <NUM_LIT>)  <EOL>c = XX(H, c, d, a, b, inp[<NUM_LIT:11>], S33, <NUM_LIT>)  <EOL>b = XX(H, b, c, d, a, inp[<NUM_LIT>], S34, <NUM_LIT>)  <EOL>a = XX(H, a, b, c, d, inp[<NUM_LIT:1>], S31, <NUM_LIT>)  <EOL>d = XX(H, d, a, b, c, inp[<NUM_LIT:4>], S32, <NUM_LIT>)  <EOL>c = XX(H, c, d, a, b, inp[<NUM_LIT:7>], S33, <NUM_LIT>)  <EOL>b = XX(H, b, c, d, a, inp[<NUM_LIT:10>], S34, <NUM_LIT>)  <EOL>a = XX(H, a, b, c, d, inp[<NUM_LIT>], S31, <NUM_LIT>)  <EOL>d = XX(H, d, a, b, c, inp[<NUM_LIT:0>], S32, <NUM_LIT>)  <EOL>c = XX(H, c, d, a, b, inp[<NUM_LIT:3>], S33, <NUM_LIT>)  <EOL>b = XX(H, b, c, d, a, inp[<NUM_LIT:6>], S34, <NUM_LIT>)  <EOL>a = XX(H, a, b, c, d, inp[<NUM_LIT:9>], S31, <NUM_LIT>)  <EOL>d = XX(H, d, a, b, c, inp[<NUM_LIT:12>], S32, <NUM_LIT>)  <EOL>c = XX(H, c, d, a, b, inp[<NUM_LIT:15>], S33, <NUM_LIT>)  <EOL>b = XX(H, b, c, d, a, inp[<NUM_LIT:2>], S34, <NUM_LIT>)  <EOL>S41, S42, S43, S44 = <NUM_LIT:6>, <NUM_LIT:10>, <NUM_LIT:15>, <NUM_LIT><EOL>a = XX(I, a, b, c, d, inp[<NUM_LIT:0>], S41, <NUM_LIT>)  <EOL>d = XX(I, d, a, b, c, inp[<NUM_LIT:7>], S42, <NUM_LIT>)  <EOL>c = XX(I, c, d, a, b, inp[<NUM_LIT>], S43, <NUM_LIT>)  <EOL>b = XX(I, b, c, d, a, inp[<NUM_LIT:5>], S44, <NUM_LIT>)  <EOL>a = XX(I, a, b, c, d, inp[<NUM_LIT:12>], S41, <NUM_LIT>)  <EOL>d = XX(I, d, a, b, c, inp[<NUM_LIT:3>], S42, <NUM_LIT>)  <EOL>c = XX(I, c, d, a, b, inp[<NUM_LIT:10>], S43, <NUM_LIT>)  <EOL>b = XX(I, b, c, d, a, inp[<NUM_LIT:1>], S44, <NUM_LIT>)  <EOL>a = XX(I, a, b, c, d, inp[<NUM_LIT:8>], S41, <NUM_LIT>)  <EOL>d = XX(I, d, a, b, c, inp[<NUM_LIT:15>], S42, <NUM_LIT>)  <EOL>c = XX(I, c, d, a, b, inp[<NUM_LIT:6>], S43, <NUM_LIT>)  <EOL>b = XX(I, b, c, d, a, inp[<NUM_LIT>], S44, <NUM_LIT>)  <EOL>a = XX(I, a, b, c, d, inp[<NUM_LIT:4>], S41, <NUM_LIT>)  <EOL>d = XX(I, d, a, b, c, inp[<NUM_LIT:11>], S42, <NUM_LIT>)  <EOL>c = XX(I, c, d, a, b, inp[<NUM_LIT:2>], S43, <NUM_LIT>)  <EOL>b = XX(I, b, c, d, a, inp[<NUM_LIT:9>], S44, <NUM_LIT>)  <EOL>A = (A + a) & <NUM_LIT><EOL>B = (B + b) & <NUM_LIT><EOL>C = (C + c) & <NUM_LIT><EOL>D = (D + d) & <NUM_LIT><EOL>self.A, self.B, self.C, self.D = A, B, C, D<EOL>", "docstring": "Basic MD5 step transforming the digest based on the input.\n\n        Note that if the Mysterious Constants are arranged backwards\n        in little-endian order and decrypted with the DES they produce\n        OCCULT MESSAGES!", "id": "f16490:c0:m2"}
{"signature": "def unregister_dialect(name):", "body": "try:<EOL><INDENT>del _dialects[name]<EOL><DEDENT>except KeyError:<EOL><INDENT>raise Error(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Delete the name/dialect mapping associated with a string name.\\n\n    csv.unregister_dialect(name)", "id": "f16493:m2"}
{"signature": "def list_dialects():", "body": "return list(_dialects)<EOL>", "docstring": "Return a list of all know dialect names\n    names = csv.list_dialects()", "id": "f16493:m4"}
{"signature": "def count_repetitions(self, ctx, maxcount):", "body": "count = <NUM_LIT:0><EOL>real_maxcount = ctx.state.end - ctx.string_position<EOL>if maxcount < real_maxcount and maxcount != MAXREPEAT:<EOL><INDENT>real_maxcount = maxcount<EOL><DEDENT>code_position = ctx.code_position<EOL>string_position = ctx.string_position<EOL>ctx.skip_code(<NUM_LIT:4>)<EOL>reset_position = ctx.code_position<EOL>while count < real_maxcount:<EOL><INDENT>ctx.code_position = reset_position<EOL>self.dispatch(ctx.peek_code(), ctx)<EOL>if ctx.has_matched is False: <EOL><INDENT>break<EOL><DEDENT>count += <NUM_LIT:1><EOL><DEDENT>ctx.has_matched = None<EOL>ctx.code_position = code_position<EOL>ctx.string_position = string_position<EOL>return count<EOL>", "docstring": "Returns the number of repetitions of a single item, starting from the\n        current string position. The code pointer is expected to point to a\n        REPEAT_ONE operation (with the repeated 4 ahead).", "id": "f16494:c7:m33"}
{"signature": "def groups(self, default=None):", "body": "groups = []<EOL>for indices in self.regs[<NUM_LIT:1>:]:<EOL><INDENT>if indices[<NUM_LIT:0>] >= <NUM_LIT:0>:<EOL><INDENT>groups.append(self.string[indices[<NUM_LIT:0>]:indices[<NUM_LIT:1>]])<EOL><DEDENT>else:<EOL><INDENT>groups.append(default)<EOL><DEDENT><DEDENT>return tuple(groups)<EOL>", "docstring": "Returns a tuple containing all the subgroups of the match. The\n        default argument is used for groups that did not participate in the\n        match (defaults to None).", "id": "f16494:c2:m8"}
{"signature": "def search(self, string, pos=<NUM_LIT:0>, endpos=sys.maxsize):", "body": "state = _State(string, pos, endpos, self.flags)<EOL>if state.search(self._code):<EOL><INDENT>return SRE_Match(self, state)<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Scan through string looking for a location where this regular\n        expression produces a match, and return a corresponding MatchObject\n        instance. Return None if no position in the string matches the\n        pattern.", "id": "f16494:c0:m2"}
{"signature": "def check_charset(self, ctx, char):", "body": "self.set_dispatcher.reset(char)<EOL>save_position = ctx.code_position<EOL>result = None<EOL>while result is None:<EOL><INDENT>result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)<EOL><DEDENT>ctx.code_position = save_position<EOL>return result<EOL>", "docstring": "Checks whether a character matches set of arbitrary length. Assumes\n        the code pointer is at the first member of the set.", "id": "f16494:c7:m32"}
{"signature": "def split(self, string, maxsplit=<NUM_LIT:0>):", "body": "splitlist = []<EOL>state = _State(string, <NUM_LIT:0>, sys.maxsize, self.flags)<EOL>n = <NUM_LIT:0><EOL>last = state.start<EOL>while not maxsplit or n < maxsplit:<EOL><INDENT>state.reset()<EOL>state.string_position = state.start<EOL>if not state.search(self._code):<EOL><INDENT>break<EOL><DEDENT>if state.start == state.string_position: <EOL><INDENT>if last == state.end:                <EOL><INDENT>break<EOL><DEDENT>state.start += <NUM_LIT:1><EOL>continue<EOL><DEDENT>splitlist.append(string[last:state.start])<EOL>if self.groups:<EOL><INDENT>match = SRE_Match(self, state)<EOL>splitlist += (list(match.groups(None)))<EOL><DEDENT>n += <NUM_LIT:1><EOL>last = state.start = state.string_position<EOL><DEDENT>splitlist.append(string[last:state.end])<EOL>return splitlist<EOL>", "docstring": "Split string by the occurrences of pattern.", "id": "f16494:c0:m7"}
{"signature": "def groupdict(self, default=None):", "body": "groupdict = {}<EOL>for key, value in list(self.re.groupindex.items()):<EOL><INDENT>groupdict[key] = self._get_slice(value, default)<EOL><DEDENT>return groupdict<EOL>", "docstring": "Return a dictionary containing all the named subgroups of the match.\n        The default argument is used for groups that did not participate in the\n        match (defaults to None).", "id": "f16494:c2:m9"}
{"signature": "def __repr__(self):", "body": "if self._microsecond != <NUM_LIT:0>:<EOL><INDENT>s = \"<STR_LIT>\" % (self._second, self._microsecond)<EOL><DEDENT>elif self._second != <NUM_LIT:0>:<EOL><INDENT>s = \"<STR_LIT>\" % self._second<EOL><DEDENT>else:<EOL><INDENT>s = \"<STR_LIT>\"<EOL><DEDENT>module = \"<STR_LIT>\" if self.__class__ is time else \"<STR_LIT>\"<EOL>s= \"<STR_LIT>\" % (module + self.__class__.__name__,<EOL>self._hour, self._minute, s)<EOL>if self._tzinfo is not None:<EOL><INDENT>assert s[-<NUM_LIT:1>:] == \"<STR_LIT:)>\"<EOL>s = s[:-<NUM_LIT:1>] + \"<STR_LIT>\" % self._tzinfo + \"<STR_LIT:)>\"<EOL><DEDENT>return s<EOL>", "docstring": "Convert to formal string, for repr().", "id": "f16496:c3:m15"}
{"signature": "@classmethod<EOL><INDENT>def today(cls):<DEDENT>", "body": "t = _time.time()<EOL>return cls.fromtimestamp(t)<EOL>", "docstring": "Construct a date from time.time().", "id": "f16496:c1:m2"}
{"signature": "def _ymd2ord(year, month, day):", "body": "assert <NUM_LIT:1> <= month <= <NUM_LIT:12>, '<STR_LIT>'<EOL>dim = _days_in_month(year, month)<EOL>assert <NUM_LIT:1> <= day <= dim, ('<STR_LIT>' % dim)<EOL>return (_days_before_year(year) +<EOL>_days_before_month(year, month) +<EOL>day)<EOL>", "docstring": "year, month, day -> ordinal, considering 01-Jan-0001 as day 1.", "id": "f16496:m7"}
{"signature": "def replace(self, hour=None, minute=None, second=None, microsecond=None,<EOL>tzinfo=True):", "body": "if hour is None:<EOL><INDENT>hour = self.hour<EOL><DEDENT>if minute is None:<EOL><INDENT>minute = self.minute<EOL><DEDENT>if second is None:<EOL><INDENT>second = self.second<EOL><DEDENT>if microsecond is None:<EOL><INDENT>microsecond = self.microsecond<EOL><DEDENT>if tzinfo is True:<EOL><INDENT>tzinfo = self.tzinfo<EOL><DEDENT>return time.__new__(type(self),<EOL>hour, minute, second, microsecond, tzinfo)<EOL>", "docstring": "Return a new time with new values for the specified fields.", "id": "f16496:c3:m23"}
{"signature": "@property<EOL><INDENT>def hour(self):<DEDENT>", "body": "return self._hour<EOL>", "docstring": "hour (0-23)", "id": "f16496:c4:m1"}
{"signature": "@property<EOL><INDENT>def microseconds(self):<DEDENT>", "body": "return self._microseconds<EOL>", "docstring": "microseconds", "id": "f16496:c0:m9"}
{"signature": "def _is_leap(year):", "body": "return year % <NUM_LIT:4> == <NUM_LIT:0> and (year % <NUM_LIT:100> != <NUM_LIT:0> or year % <NUM_LIT> == <NUM_LIT:0>)<EOL>", "docstring": "year -> 1 if leap year, else 0.", "id": "f16496:m3"}
{"signature": "def tzname(self, dt):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "datetime -> string name of time zone.", "id": "f16496:c2:m0"}
{"signature": "def _ord2ymd(n):", "body": "<EOL>n -= <NUM_LIT:1><EOL>n400, n = divmod(n, _DI400Y)<EOL>year = n400 * <NUM_LIT> + <NUM_LIT:1>   <EOL>n100, n = divmod(n, _DI100Y)<EOL>n4, n = divmod(n, _DI4Y)<EOL>n1, n = divmod(n, <NUM_LIT>)<EOL>year += n100 * <NUM_LIT:100> + n4 * <NUM_LIT:4> + n1<EOL>if n1 == <NUM_LIT:4> or n100 == <NUM_LIT:4>:<EOL><INDENT>assert n == <NUM_LIT:0><EOL>return year-<NUM_LIT:1>, <NUM_LIT:12>, <NUM_LIT><EOL><DEDENT>leapyear = n1 == <NUM_LIT:3> and (n4 != <NUM_LIT> or n100 == <NUM_LIT:3>)<EOL>assert leapyear == _is_leap(year)<EOL>month = (n + <NUM_LIT:50>) >> <NUM_LIT:5><EOL>preceding = _DAYS_BEFORE_MONTH[month] + (month > <NUM_LIT:2> and leapyear)<EOL>if preceding > n:  <EOL><INDENT>month -= <NUM_LIT:1><EOL>preceding -= _DAYS_IN_MONTH[month] + (month == <NUM_LIT:2> and leapyear)<EOL><DEDENT>n -= preceding<EOL>assert <NUM_LIT:0> <= n < _days_in_month(year, month)<EOL>return year, month, n+<NUM_LIT:1><EOL>", "docstring": "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1.", "id": "f16496:m8"}
{"signature": "def utcoffset(self, dt):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "datetime -> minutes east of UTC (negative for west of UTC)", "id": "f16496:c2:m1"}
{"signature": "def _days_before_month(year, month):", "body": "assert <NUM_LIT:1> <= month <= <NUM_LIT:12>, '<STR_LIT>'<EOL>return _DAYS_BEFORE_MONTH[month] + (month > <NUM_LIT:2> and _is_leap(year))<EOL>", "docstring": "year, month -> number of days in year preceding first day of month.", "id": "f16496:m6"}
{"signature": "def toordinal(self):", "body": "return _ymd2ord(self._year, self._month, self._day)<EOL>", "docstring": "Return proleptic Gregorian ordinal for the year, month and day.\n\n        January 1 of year 1 is day 1.  Only the year, month and day values\n        contribute to the result.", "id": "f16496:c1:m12"}
{"signature": "def dst(self, dt):", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "datetime -> DST offset in minutes east of UTC.\n\n        Return 0 if DST not in effect.  utcoffset() must include the DST\n        offset.", "id": "f16496:c2:m2"}
{"signature": "def timetz(self):", "body": "return time(self.hour, self.minute, self.second, self.microsecond,<EOL>self._tzinfo)<EOL>", "docstring": "Return the time part, with same tzinfo.", "id": "f16496:c4:m16"}
{"signature": "def utcoffset(self):", "body": "if self._tzinfo is None:<EOL><INDENT>return None<EOL><DEDENT>offset = self._tzinfo.utcoffset(None)<EOL>offset = _check_utc_offset(\"<STR_LIT>\", offset)<EOL>if offset is not None:<EOL><INDENT>offset = timedelta._create(<NUM_LIT:0>, offset * <NUM_LIT>, <NUM_LIT:0>, True)<EOL><DEDENT>return offset<EOL>", "docstring": "Return the timezone offset in minutes east of UTC (negative west of\n        UTC).", "id": "f16496:c3:m18"}
{"signature": "def __sub__(self, other):", "body": "if isinstance(other, date):<EOL><INDENT>days1 = self.toordinal()<EOL>days2 = other.toordinal()<EOL>return timedelta._create(days1 - days2, <NUM_LIT:0>, <NUM_LIT:0>, False)<EOL><DEDENT>if isinstance(other, timedelta):<EOL><INDENT>return self._add_timedelta(other, -<NUM_LIT:1>)<EOL><DEDENT>return NotImplemented<EOL>", "docstring": "Subtract two dates, or a date and a timedelta.", "id": "f16496:c1:m24"}
{"signature": "def isoformat(self, sep='<STR_LIT:T>'):", "body": "s = (\"<STR_LIT>\" % (self._year, self._month, self._day, sep) +<EOL>_format_time(self._hour, self._minute, self._second,<EOL>self._microsecond))<EOL>off = self._utcoffset()<EOL>if off is not None:<EOL><INDENT>if off < <NUM_LIT:0>:<EOL><INDENT>sign = \"<STR_LIT:->\"<EOL>off = -off<EOL><DEDENT>else:<EOL><INDENT>sign = \"<STR_LIT:+>\"<EOL><DEDENT>hh, mm = divmod(off, <NUM_LIT>)<EOL>s += \"<STR_LIT>\" % (sign, hh, mm)<EOL><DEDENT>return s<EOL>", "docstring": "Return the time formatted according to ISO.\n\n        This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if\n        self.microsecond == 0.\n\n        If self.tzinfo is not None, the UTC offset is also attached, giving\n        'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.\n\n        Optional argument sep specifies the separator between date and\n        time, default 'T'.", "id": "f16496:c4:m20"}
{"signature": "@property<EOL><INDENT>def minute(self):<DEDENT>", "body": "return self._minute<EOL>", "docstring": "minute (0-59)", "id": "f16496:c4:m2"}
{"signature": "def utctimetuple(self):", "body": "y, m, d = self.year, self.month, self.day<EOL>hh, mm, ss = self.hour, self.minute, self.second<EOL>offset = self._utcoffset()<EOL>if offset:  <EOL><INDENT>mm -= offset<EOL>y, m, d, hh, mm, ss, _ = _normalize_datetime(<EOL>y, m, d, hh, mm, ss, <NUM_LIT:0>, ignore_overflow=True)<EOL><DEDENT>return _build_struct_time(y, m, d, hh, mm, ss, <NUM_LIT:0>)<EOL>", "docstring": "Return UTC time tuple compatible with time.gmtime().", "id": "f16496:c4:m13"}
{"signature": "@property<EOL><INDENT>def tzinfo(self):<DEDENT>", "body": "return self._tzinfo<EOL>", "docstring": "timezone info object", "id": "f16496:c3:m5"}
{"signature": "def replace(self, year=None, month=None, day=None, hour=None,<EOL>minute=None, second=None, microsecond=None, tzinfo=True):", "body": "if year is None:<EOL><INDENT>year = self.year<EOL><DEDENT>if month is None:<EOL><INDENT>month = self.month<EOL><DEDENT>if day is None:<EOL><INDENT>day = self.day<EOL><DEDENT>if hour is None:<EOL><INDENT>hour = self.hour<EOL><DEDENT>if minute is None:<EOL><INDENT>minute = self.minute<EOL><DEDENT>if second is None:<EOL><INDENT>second = self.second<EOL><DEDENT>if microsecond is None:<EOL><INDENT>microsecond = self.microsecond<EOL><DEDENT>if tzinfo is True:<EOL><INDENT>tzinfo = self.tzinfo<EOL><DEDENT>return datetime.__new__(type(self),<EOL>year, month, day, hour, minute, second,<EOL>microsecond, tzinfo)<EOL>", "docstring": "Return a new datetime with new values for the specified fields.", "id": "f16496:c4:m17"}
{"signature": "def __repr__(self):", "body": "L = [self._year, self._month, self._day,  <EOL>self._hour, self._minute, self._second, self._microsecond]<EOL>if L[-<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>del L[-<NUM_LIT:1>]<EOL><DEDENT>if L[-<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>del L[-<NUM_LIT:1>]<EOL><DEDENT>s = \"<STR_LIT:U+002CU+0020>\".join(map(str, L))<EOL>module = \"<STR_LIT>\" if self.__class__ is datetime else \"<STR_LIT>\"<EOL>s = \"<STR_LIT>\" % (module + self.__class__.__name__, s)<EOL>if self._tzinfo is not None:<EOL><INDENT>assert s[-<NUM_LIT:1>:] == \"<STR_LIT:)>\"<EOL>s = s[:-<NUM_LIT:1>] + \"<STR_LIT>\" % self._tzinfo + \"<STR_LIT:)>\"<EOL><DEDENT>return s<EOL>", "docstring": "Convert to formal string, for repr().", "id": "f16496:c4:m21"}
{"signature": "def dst(self):", "body": "if self._tzinfo is None:<EOL><INDENT>return None<EOL><DEDENT>offset = self._tzinfo.dst(None)<EOL>offset = _check_utc_offset(\"<STR_LIT>\", offset)<EOL>if offset is not None:<EOL><INDENT>offset = timedelta._create(<NUM_LIT:0>, offset * <NUM_LIT>, <NUM_LIT:0>, True)<EOL><DEDENT>return offset<EOL>", "docstring": "Return 0 if DST is not in effect, or the DST offset (in minutes\n        eastward) if DST is in effect.\n\n        This is purely informational; the DST offset has already been added to\n        the UTC offset returned by utcoffset() if applicable, so there's no\n        need to consult dst() unless you're interested in displaying the DST\n        info.", "id": "f16496:c3:m21"}
{"signature": "def timetuple(self):", "body": "return _build_struct_time(self._year, self._month, self._day,<EOL><NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, -<NUM_LIT:1>)<EOL>", "docstring": "Return local time tuple compatible with time.localtime().", "id": "f16496:c1:m11"}
{"signature": "def __str__(self):", "body": "return self.isoformat(sep='<STR_LIT:U+0020>')<EOL>", "docstring": "Convert to string, for str().", "id": "f16496:c4:m22"}
{"signature": "def isoformat(self):", "body": "<EOL>return \"<STR_LIT>\" % (str(self._year).zfill(<NUM_LIT:4>), str(self._month).zfill(<NUM_LIT:2>), str(self._day).zfill(<NUM_LIT:2>))<EOL>", "docstring": "Return the date formatted according to ISO.\n\n        This is 'YYYY-MM-DD'.\n\n        References:\n        - http://www.w3.org/TR/NOTE-datetime\n        - http://www.cl.cam.ac.uk/~mgk25/iso-time.html", "id": "f16496:c1:m7"}
{"signature": "@property<EOL><INDENT>def second(self):<DEDENT>", "body": "return self._second<EOL>", "docstring": "second (0-59)", "id": "f16496:c4:m3"}
{"signature": "def _days_in_month(year, month):", "body": "assert <NUM_LIT:1> <= month <= <NUM_LIT:12>, month<EOL>if month == <NUM_LIT:2> and _is_leap(year):<EOL><INDENT>return <NUM_LIT><EOL><DEDENT>return _DAYS_IN_MONTH[month]<EOL>", "docstring": "year, month -> number of days in that month in that year.", "id": "f16496:m5"}
{"signature": "def __add__(self, other):", "body": "if isinstance(other, timedelta):<EOL><INDENT>return self._add_timedelta(other, <NUM_LIT:1>)<EOL><DEDENT>return NotImplemented<EOL>", "docstring": "Add a date to a timedelta.", "id": "f16496:c1:m23"}
{"signature": "@classmethod<EOL><INDENT>def now(cls, tz=None):<DEDENT>", "body": "t = _time.time()<EOL>return cls.fromtimestamp(t, tz)<EOL>", "docstring": "Construct a datetime from time.time() and optional time zone info.", "id": "f16496:c4:m9"}
{"signature": "def __add__(self, other):", "body": "if not isinstance(other, timedelta):<EOL><INDENT>return NotImplemented<EOL><DEDENT>return self._add_timedelta(other, <NUM_LIT:1>)<EOL>", "docstring": "Add a datetime and a timedelta.", "id": "f16496:c4:m36"}
{"signature": "def replace(self, year=None, month=None, day=None):", "body": "if year is None:<EOL><INDENT>year = self._year<EOL><DEDENT>if month is None:<EOL><INDENT>month = self._month<EOL><DEDENT>if day is None:<EOL><INDENT>day = self._day<EOL><DEDENT>return date.__new__(type(self), year, month, day)<EOL>", "docstring": "Return a new date with new values for the specified fields.", "id": "f16496:c1:m13"}
{"signature": "def tzname(self):", "body": "if self._tzinfo is None:<EOL><INDENT>return None<EOL><DEDENT>name = self._tzinfo.tzname(self)<EOL>_check_tzname(name)<EOL>return name<EOL>", "docstring": "Return the timezone name.\n\n        Note that the name is 100% informational -- there's no requirement that\n        it mean anything in particular. For example, \"GMT\", \"UTC\", \"-500\",\n        \"-5:00\", \"EDT\", \"US/Eastern\", \"America/New York\" are all valid replies.", "id": "f16496:c4:m25"}
{"signature": "@property<EOL><INDENT>def year(self):<DEDENT>", "body": "return self._year<EOL>", "docstring": "year (1-9999)", "id": "f16496:c1:m8"}
{"signature": "@property<EOL><INDENT>def month(self):<DEDENT>", "body": "return self._month<EOL>", "docstring": "month (1-12)", "id": "f16496:c1:m9"}
{"signature": "@property<EOL><INDENT>def tzinfo(self):<DEDENT>", "body": "return self._tzinfo<EOL>", "docstring": "timezone info object", "id": "f16496:c4:m5"}
{"signature": "def __new__(cls, hour=<NUM_LIT:0>, minute=<NUM_LIT:0>, second=<NUM_LIT:0>, microsecond=<NUM_LIT:0>, tzinfo=None):", "body": "<EOL>hour, minute, second, microsecond = _check_time_fields(<EOL>hour, minute, second, microsecond)<EOL>_check_tzinfo_arg(tzinfo)<EOL>self = object.__new__(cls)<EOL>self._hour = hour<EOL>self._minute = minute<EOL>self._second = second<EOL>self._microsecond = microsecond<EOL>self._tzinfo = tzinfo<EOL>self._hashcode = -<NUM_LIT:1><EOL>return self<EOL>", "docstring": "Constructor.\n\n        Arguments:\n\n        hour, minute (required)\n        second, microsecond (default to zero)\n        tzinfo (default to None)", "id": "f16496:c3:m0"}
{"signature": "@property<EOL><INDENT>def second(self):<DEDENT>", "body": "return self._second<EOL>", "docstring": "second (0-59)", "id": "f16496:c3:m3"}
{"signature": "@classmethod<EOL><INDENT>def fromtimestamp(cls, timestamp, tz=None):<DEDENT>", "body": "_check_tzinfo_arg(tz)<EOL>converter = _time.localtime if tz is None else _time.gmtime<EOL>self = cls._from_timestamp(converter, timestamp, tz)<EOL>if tz is not None:<EOL><INDENT>self = tz.fromutc(self)<EOL><DEDENT>return self<EOL>", "docstring": "Construct a datetime from a POSIX timestamp (like time.time()).\n\n        A timezone info object may be passed in as well.", "id": "f16496:c4:m6"}
{"signature": "def __init__(self):", "body": "<EOL>self.length = <NUM_LIT:0><EOL>self.count = [<NUM_LIT:0>, <NUM_LIT:0>]<EOL>self.input = []<EOL>self.init()<EOL>", "docstring": "Initialisation.", "id": "f16497:c0:m0"}
{"signature": "def _bytelist2longBigEndian(list):", "body": "imax = len(list) // <NUM_LIT:4><EOL>hl = [<NUM_LIT:0>] * imax<EOL>j = <NUM_LIT:0><EOL>i = <NUM_LIT:0><EOL>while i < imax:<EOL><INDENT>b0 = ord(list[j]) << <NUM_LIT><EOL>b1 = ord(list[j+<NUM_LIT:1>]) << <NUM_LIT:16><EOL>b2 = ord(list[j+<NUM_LIT:2>]) << <NUM_LIT:8><EOL>b3 = ord(list[j+<NUM_LIT:3>])<EOL>hl[i] = b0 | b1 | b2 | b3<EOL>i = i+<NUM_LIT:1><EOL>j = j+<NUM_LIT:4><EOL><DEDENT>return hl<EOL>", "docstring": "Transform a list of characters into a list of longs.", "id": "f16497:m1"}
{"signature": "def init(self):", "body": "self.length = <NUM_LIT:0><EOL>self.input = []<EOL>self.H0 = <NUM_LIT><EOL>self.H1 = <NUM_LIT><EOL>self.H2 = <NUM_LIT><EOL>self.H3 = <NUM_LIT><EOL>self.H4 = <NUM_LIT><EOL>", "docstring": "Initialize the message-digest and set all fields to zero.", "id": "f16497:c0:m1"}
{"signature": "def update(self, inBuf):", "body": "leninBuf = len(inBuf)<EOL>index = (self.count[<NUM_LIT:1>] >> <NUM_LIT:3>) & <NUM_LIT><EOL>self.count[<NUM_LIT:1>] = self.count[<NUM_LIT:1>] + (leninBuf << <NUM_LIT:3>)<EOL>if self.count[<NUM_LIT:1>] < (leninBuf << <NUM_LIT:3>):<EOL><INDENT>self.count[<NUM_LIT:0>] = self.count[<NUM_LIT:0>] + <NUM_LIT:1><EOL><DEDENT>self.count[<NUM_LIT:0>] = self.count[<NUM_LIT:0>] + (leninBuf >> <NUM_LIT>)<EOL>partLen = <NUM_LIT:64> - index<EOL>if leninBuf >= partLen:<EOL><INDENT>self.input[index:] = list(inBuf[:partLen])<EOL>self._transform(_bytelist2longBigEndian(self.input))<EOL>i = partLen<EOL>while i + <NUM_LIT> < leninBuf:<EOL><INDENT>self._transform(_bytelist2longBigEndian(list(inBuf[i:i+<NUM_LIT:64>])))<EOL>i = i + <NUM_LIT:64><EOL><DEDENT>else:<EOL><INDENT>self.input = list(inBuf[i:leninBuf])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>i = <NUM_LIT:0><EOL>self.input = self.input + list(inBuf)<EOL><DEDENT>", "docstring": "Add to the current message.\n\n        Update the md5 object with the string arg. Repeated calls\n        are equivalent to a single call with the concatenation of all\n        the arguments, i.e. m.update(a); m.update(b) is equivalent\n        to m.update(a+b).\n\n        The hash is immediately calculated for all full blocks. The final\n        calculation is made in digest(). It will calculate 1-2 blocks,\n        depending on how much padding we have to add. This allows us to\n        keep an intermediate value for the hash, so that we only need to\n        make minimal recalculation if we call update() to add more data\n        to the hashed string.", "id": "f16497:c0:m3"}
{"signature": "def _long2bytesBigEndian(n, blocksize=<NUM_LIT:0>):", "body": "<EOL>s = b'<STR_LIT>'<EOL>pack = struct.pack<EOL>while n > <NUM_LIT:0>:<EOL><INDENT>s = pack('<STR_LIT>', n & <NUM_LIT>) + s<EOL>n = n >> <NUM_LIT:32><EOL><DEDENT>for i in range(len(s)):<EOL><INDENT>if s[i] != '<STR_LIT>':<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>s = '<STR_LIT>'<EOL>i = <NUM_LIT:0><EOL><DEDENT>s = s[i:]<EOL>if blocksize > <NUM_LIT:0> and len(s) % blocksize:<EOL><INDENT>s = (blocksize - len(s) % blocksize) * '<STR_LIT>' + s<EOL><DEDENT>return s<EOL>", "docstring": "Convert a long integer to a byte string.\n\n    If optional blocksize is given and greater than zero, pad the front\n    of the byte string with binary zeros so that the length is a multiple\n    of blocksize.", "id": "f16497:m0"}
{"signature": "def __reduce__(self):", "body": "return (type(self), (self.default_factory,), None, None, self.iteritems())<EOL>", "docstring": "__reduce__ must return a 5-tuple as follows:\n\n   - factory function\n   - tuple of args for the factory function\n   - additional state (here None)\n   - sequence iterator (here None)\n   - dictionary iterator (yielding successive (key, value) pairs\n\n   This API is used by pickle.py and copy.py.", "id": "f16498:c2:m5"}
{"signature": "def render(self, only_line=False, colored=False):", "body": "source_line = self.location.source_line().rstrip(\"<STR_LIT:\\n>\")<EOL>highlight_line = bytearray(re.sub(r\"<STR_LIT>\", \"<STR_LIT:U+0020>\", source_line), \"<STR_LIT:utf-8>\")<EOL>for hilight in self.highlights:<EOL><INDENT>if hilight.line() == self.location.line():<EOL><INDENT>lft, rgt = hilight.column_range()<EOL>highlight_line[lft:rgt] = bytearray(\"<STR_LIT>\", \"<STR_LIT:utf-8>\") * (rgt - lft)<EOL><DEDENT><DEDENT>lft, rgt = self.location.column_range()<EOL>if rgt == lft: <EOL><INDENT>rgt = lft + <NUM_LIT:1><EOL><DEDENT>highlight_line[lft:rgt] = bytearray(\"<STR_LIT>\", \"<STR_LIT:utf-8>\") * (rgt - lft)<EOL>if only_line:<EOL><INDENT>location = \"<STR_LIT>\" % (self.location.source_buffer.name, self.location.line())<EOL><DEDENT>else:<EOL><INDENT>location = str(self.location)<EOL><DEDENT>notes = list(self.notes)<EOL>if self.level != \"<STR_LIT>\":<EOL><INDENT>expanded_location = self.location.expanded_from<EOL>while expanded_location is not None:<EOL><INDENT>notes.insert(<NUM_LIT:0>, Diagnostic(\"<STR_LIT>\",<EOL>\"<STR_LIT>\", {},<EOL>self.location.expanded_from))<EOL>expanded_location = expanded_location.expanded_from<EOL><DEDENT><DEDENT>rendered_notes = reduce(list.__add__, [note.render(only_line, colored)<EOL>for note in notes], [])<EOL>if colored:<EOL><INDENT>if self.level in (\"<STR_LIT:error>\", \"<STR_LIT>\"):<EOL><INDENT>level_color = <NUM_LIT> <EOL><DEDENT>elif self.level == \"<STR_LIT>\":<EOL><INDENT>level_color = <NUM_LIT> <EOL><DEDENT>else: <EOL><INDENT>level_color = <NUM_LIT:30> <EOL><DEDENT>return [<EOL>\"<STR_LIT>\".<EOL>format(location, level_color, self.level, self.message()),<EOL>source_line,<EOL>\"<STR_LIT>\".format(highlight_line.decode(\"<STR_LIT:utf-8>\"))<EOL>] + rendered_notes<EOL><DEDENT>else:<EOL><INDENT>return [<EOL>\"<STR_LIT>\".format(location, self.level, self.message()),<EOL>source_line,<EOL>highlight_line.decode(\"<STR_LIT:utf-8>\")<EOL>] + rendered_notes<EOL><DEDENT>", "docstring": "Returns the human-readable location of the diagnostic in the source,\nthe formatted message, the source line corresponding\nto ``location`` and a line emphasizing the problematic\nlocations in the source line using ASCII art, as a list of lines.\nAppends the result of calling :meth:`render` on ``notes``, if any.\n\nFor example: ::\n\n    <input>:1:8-9: error: cannot add integer and string\n    x + (1 + \"a\")\n         ~ ^ ~~~\n\n:param only_line: (bool) If true, only print line number, not line and column range", "id": "f16500:c0:m2"}
{"signature": "@contextmanager<EOL><INDENT>def context(self, *notes):<DEDENT>", "body": "self._appended_notes += notes<EOL>yield<EOL>del self._appended_notes[-len(notes):]<EOL>", "docstring": "A context manager that appends ``note`` to every diagnostic processed by\nthis engine.", "id": "f16500:c2:m2"}
{"signature": "def compare(left, right, compare_locs=False):", "body": "if type(left) != type(right):<EOL><INDENT>return False<EOL><DEDENT>if isinstance(left, ast.AST):<EOL><INDENT>for field in left._fields:<EOL><INDENT>if not compare(getattr(left, field), getattr(right, field)):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>if compare_locs:<EOL><INDENT>for loc in left._locs:<EOL><INDENT>if getattr(left, loc) != getattr(right, loc):<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT>return True<EOL><DEDENT>elif isinstance(left, list):<EOL><INDENT>if len(left) != len(right):<EOL><INDENT>return False<EOL><DEDENT>for left_elt, right_elt in zip(left, right):<EOL><INDENT>if not compare(left_elt, right_elt):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return left == right<EOL><DEDENT>", "docstring": "An AST comparison function. Returns ``True`` if all fields in\n``left`` are equal to fields in ``right``; if ``compare_locs`` is\ntrue, all locations should match as well.", "id": "f16501:m0"}
{"signature": "def visit(self, obj):", "body": "if isinstance(obj, list):<EOL><INDENT>return list(filter(lambda x: x is not None, map(self.visit, obj)))<EOL><DEDENT>elif isinstance(obj, ast.AST):<EOL><INDENT>return self._visit_one(obj)<EOL><DEDENT>else:<EOL><INDENT>return obj<EOL><DEDENT>", "docstring": "Visit a node or a list of nodes. Other values are ignored", "id": "f16501:c1:m2"}
{"signature": "def generic_visit(self, node):", "body": "for field_name in node._fields:<EOL><INDENT>setattr(node, field_name, self.visit(getattr(node, field_name)))<EOL><DEDENT>return node<EOL>", "docstring": "Called if no explicit visitor function exists for a node.", "id": "f16501:c1:m0"}
{"signature": "@action(Loc(\"<STR_LIT>\"))<EOL><INDENT>def continue_stmt(self, stmt_loc):<DEDENT>", "body": "return ast.Continue(loc=stmt_loc, keyword_loc=stmt_loc)<EOL>", "docstring": "continue_stmt: 'continue", "id": "f16502:c2:m37"}
{"signature": "@action(Seq(Loc(\"<STR_LIT>\"), Tok(\"<STR_LIT>\"), Rule(\"<STR_LIT>\"), Loc(\"<STR_LIT::>\"), Rule(\"<STR_LIT>\")))<EOL><INDENT>def funcdef__26(self, def_loc, ident_tok, args, colon_loc, suite):<DEDENT>", "body": "return ast.FunctionDef(name=ident_tok.value, args=args, returns=None,<EOL>body=suite, decorator_list=[],<EOL>at_locs=[], keyword_loc=def_loc, name_loc=ident_tok.loc,<EOL>colon_loc=colon_loc, arrow_loc=None,<EOL>loc=def_loc.join(suite[-<NUM_LIT:1>].loc))<EOL>", "docstring": "(2.6, 2.7) funcdef: 'def' NAME parameters ':' suite", "id": "f16502:c2:m17"}
{"signature": "@action(Seq(Loc(\"<STR_LIT>\"), List(Tok(\"<STR_LIT>\"), \"<STR_LIT:U+002C>\", trailing=False)))<EOL><INDENT>def nonlocal_stmt(self, nonlocal_loc, names):<DEDENT>", "body": "return ast.Nonlocal(names=list(map(lambda x: x.value, names)),<EOL>name_locs=list(map(lambda x: x.loc, names)),<EOL>keyword_loc=nonlocal_loc, loc=nonlocal_loc.join(names[-<NUM_LIT:1>].loc))<EOL>", "docstring": "(3.0-) nonlocal_stmt: 'nonlocal' NAME (',' NAME)*", "id": "f16502:c2:m57"}
{"signature": "def Opt(inner_rule, loc=None):", "body": "return Alt(inner_rule, Eps(), loc=loc)<EOL>", "docstring": "Shorthand for ``Alt(inner_rule, Eps())``", "id": "f16502:m10"}
{"signature": "@action(Alt(Seq(Star(SeqN(<NUM_LIT:0>, varargslist__26_1, Tok(\"<STR_LIT:U+002C>\"))),<EOL>Alt(varargslist__26_2, varargslist__26_3)),<EOL>Seq(List(varargslist__26_1, \"<STR_LIT:U+002C>\", trailing=True),<EOL>varargslist__26_4)))<EOL><INDENT>def varargslist__26(self, fparams, args):<DEDENT>", "body": "for fparam, default_opt in fparams:<EOL><INDENT>if default_opt:<EOL><INDENT>equals_loc, default = default_opt<EOL>args.equals_locs.append(equals_loc)<EOL>args.defaults.append(default)<EOL><DEDENT>elif len(args.defaults) > <NUM_LIT:0>:<EOL><INDENT>error = diagnostic.Diagnostic(<EOL>\"<STR_LIT>\", \"<STR_LIT>\", {},<EOL>fparam.loc, [args.args[-<NUM_LIT:1>].loc.join(args.defaults[-<NUM_LIT:1>].loc)])<EOL>self.diagnostic_engine.process(error)<EOL><DEDENT>args.args.append(fparam)<EOL><DEDENT>def fparam_loc(fparam, default_opt):<EOL><INDENT>if default_opt:<EOL><INDENT>equals_loc, default = default_opt<EOL>return fparam.loc.join(default.loc)<EOL><DEDENT>else:<EOL><INDENT>return fparam.loc<EOL><DEDENT><DEDENT>if args.loc is None:<EOL><INDENT>args.loc = fparam_loc(*fparams[<NUM_LIT:0>]).join(fparam_loc(*fparams[-<NUM_LIT:1>]))<EOL><DEDENT>elif len(fparams) > <NUM_LIT:0>:<EOL><INDENT>args.loc = args.loc.join(fparam_loc(*fparams[<NUM_LIT:0>]))<EOL><DEDENT>return args<EOL>", "docstring": "(2.6, 2.7)\nvarargslist: ((fpdef ['=' test] ',')*\n              ('*' NAME [',' '**' NAME] | '**' NAME) |\n              fpdef ['=' test] (',' fpdef ['=' test])* [','])", "id": "f16502:c2:m22"}
{"signature": "@action(Alt(Rule(\"<STR_LIT>\"),<EOL>Rule(\"<STR_LIT>\"),<EOL>Rule(\"<STR_LIT>\")))<EOL><INDENT>def arglist(self, args, call):<DEDENT>", "body": "for arg in args:<EOL><INDENT>if isinstance(arg, ast.keyword):<EOL><INDENT>call.keywords.append(arg)<EOL><DEDENT>elif len(call.keywords) > <NUM_LIT:0>:<EOL><INDENT>error = diagnostic.Diagnostic(<EOL>\"<STR_LIT>\", \"<STR_LIT>\", {},<EOL>arg.loc, [call.keywords[-<NUM_LIT:1>].loc])<EOL>self.diagnostic_engine.process(error)<EOL><DEDENT>else:<EOL><INDENT>call.args.append(arg)<EOL><DEDENT><DEDENT>return call<EOL>", "docstring": "arglist: (argument ',')* (argument [','] |\n                                     '*' test (',' argument)* [',' '**' test] |\n                                     '**' test)", "id": "f16502:c2:m113"}
{"signature": "@action(Seq(Rule(\"<STR_LIT>\"), Alt(Rule(\"<STR_LIT>\"), Rule(\"<STR_LIT>\"))))<EOL><INDENT>def decorated(self, decorators, classfuncdef):<DEDENT>", "body": "classfuncdef.at_locs = list(map(lambda x: x[<NUM_LIT:0>], decorators))<EOL>classfuncdef.decorator_list = list(map(lambda x: x[<NUM_LIT:1>], decorators))<EOL>classfuncdef.loc = classfuncdef.loc.join(decorators[<NUM_LIT:0>][<NUM_LIT:0>])<EOL>return classfuncdef<EOL>", "docstring": "decorated: decorators (classdef | funcdef)", "id": "f16502:c2:m16"}
{"signature": "def Newline(loc=None):", "body": "@llrule(loc, lambda parser: [\"<STR_LIT>\"])<EOL>def rule(parser):<EOL><INDENT>result = parser._accept(\"<STR_LIT>\")<EOL>if result is unmatched:<EOL><INDENT>return result<EOL><DEDENT>return []<EOL><DEDENT>return rule<EOL>", "docstring": "A rule that accepts token of kind ``newline`` and returns an empty list.", "id": "f16502:m14"}
{"signature": "@action(Seq(Rule(\"<STR_LIT>\"), Opt(Seq(Loc(\"<STR_LIT>\"), Tok(\"<STR_LIT>\")))))<EOL><INDENT>def dotted_as_name(self, dotted_name, as_name_opt):<DEDENT>", "body": "asname_name = asname_loc = as_loc = None<EOL>dotted_name_loc, dotted_name_name = dotted_name<EOL>loc = dotted_name_loc<EOL>if as_name_opt:<EOL><INDENT>as_loc, asname = as_name_opt<EOL>asname_name = asname.value<EOL>asname_loc = asname.loc<EOL>loc = loc.join(asname.loc)<EOL><DEDENT>return ast.alias(name=dotted_name_name, asname=asname_name,<EOL>loc=loc, name_loc=dotted_name_loc, as_loc=as_loc, asname_loc=asname_loc)<EOL>", "docstring": "dotted_as_name: dotted_name ['as' NAME]", "id": "f16502:c2:m52"}
{"signature": "def Oper(klass, *kinds, **kwargs):", "body": "@action(Seq(*map(Loc, kinds)), loc=kwargs.get(\"<STR_LIT>\", None))<EOL>def rule(parser, *tokens):<EOL><INDENT>return klass(loc=tokens[<NUM_LIT:0>].join(tokens[-<NUM_LIT:1>]))<EOL><DEDENT>return rule<EOL>", "docstring": "A rule that accepts a sequence of tokens of kinds ``kinds`` and returns\nan instance of ``klass`` with ``loc`` encompassing the entire sequence\nor None if the first token is not of ``kinds[0]``.", "id": "f16502:m15"}
{"signature": "@action(Alt(Rule(\"<STR_LIT>\"), Rule(\"<STR_LIT>\"), Rule(\"<STR_LIT>\"),<EOL>Rule(\"<STR_LIT>\"), Rule(\"<STR_LIT>\"), Rule(\"<STR_LIT>\"),<EOL>Rule(\"<STR_LIT>\"), Rule(\"<STR_LIT>\")))<EOL><INDENT>def compound_stmt(self, stmt):<DEDENT>", "body": "return [stmt]<EOL>", "docstring": "compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt |\n                          funcdef | classdef | decorated", "id": "f16502:c2:m59"}
{"signature": "@action(Seq(Loc(\"<STR_LIT>\"), Rule(\"<STR_LIT>\")))<EOL><INDENT>def import_name(self, import_loc, names):<DEDENT>", "body": "return ast.Import(names=names,<EOL>keyword_loc=import_loc, loc=import_loc.join(names[-<NUM_LIT:1>].loc))<EOL>", "docstring": "import_name: 'import' dotted_as_names", "id": "f16502:c2:m42"}
{"signature": "def Eps(value=None, loc=None):", "body": "@llrule(loc, lambda parser: [])<EOL>def rule(parser):<EOL><INDENT>return value<EOL><DEDENT>return rule<EOL>", "docstring": "A rule that accepts no tokens (epsilon) and returns ``value``.", "id": "f16502:m2"}
{"signature": "@action(Seq(Loc(\"<STR_LIT:*>\"), Rule(\"<STR_LIT>\")))<EOL><INDENT>def star_expr__32(self, star_loc, expr):<DEDENT>", "body": "return ast.Starred(value=expr, ctx=None,<EOL>star_loc=star_loc, loc=expr.loc.join(star_loc))<EOL>", "docstring": "(3.0-) star_expr: '*' expr", "id": "f16502:c2:m81"}
{"signature": "def Star(inner_rule, loc=None):", "body": "@llrule(loc, lambda parser: [])<EOL>def rule(parser):<EOL><INDENT>results = []<EOL>while True:<EOL><INDENT>data = parser._save()<EOL>result = inner_rule(parser)<EOL>if result is unmatched:<EOL><INDENT>parser._restore(data, rule=inner_rule)<EOL>return results<EOL><DEDENT>results.append(result)<EOL><DEDENT><DEDENT>return rule<EOL>", "docstring": "A rule that accepts a sequence of tokens satisfying ``inner_rule`` zero or more times,\nand returns the returned values in a :class:`list`.", "id": "f16502:m11"}
{"signature": "@action(Loc(\"<STR_LIT>\"))<EOL><INDENT>def break_stmt(self, stmt_loc):<DEDENT>", "body": "return ast.Break(loc=stmt_loc, keyword_loc=stmt_loc)<EOL>", "docstring": "break_stmt: 'break", "id": "f16502:c2:m36"}
{"signature": "@action(Seq(Loc(\"<STR_LIT>\"), Loc(\"<STR_LIT::>\"), Rule(\"<STR_LIT>\"), Alt(try_stmt_1, try_stmt_2)))<EOL><INDENT>def try_stmt(self, try_loc, try_colon_loc, body, stmt):<DEDENT>", "body": "stmt.keyword_loc, stmt.try_colon_loc, stmt.body =try_loc, try_colon_loc, body<EOL>stmt.loc = stmt.loc.join(try_loc)<EOL>return stmt<EOL>", "docstring": "try_stmt: ('try' ':' suite\n           ((except_clause ':' suite)+\n            ['else' ':' suite]\n            ['finally' ':' suite] |\n            'finally' ':' suite))", "id": "f16502:c2:m65"}
{"signature": "@action(Rule(\"<STR_LIT>\"))<EOL><INDENT>def comparison(self, lhs, rhs):<DEDENT>", "body": "if len(rhs) > <NUM_LIT:0>:<EOL><INDENT>return ast.Compare(left=lhs, ops=list(map(lambda x: x[<NUM_LIT:0>], rhs)),<EOL>comparators=list(map(lambda x: x[<NUM_LIT:1>], rhs)),<EOL>loc=lhs.loc.join(rhs[-<NUM_LIT:1>][<NUM_LIT:1>].loc))<EOL><DEDENT>else:<EOL><INDENT>return lhs<EOL><DEDENT>", "docstring": "(2.6, 2.7) comparison: expr (comp_op expr)*\n(3.0, 3.1) comparison: star_expr (comp_op star_expr)*\n(3.2-) comparison: expr (comp_op expr)*", "id": "f16502:c2:m79"}
{"signature": "def next(self, eof_token=False):", "body": "if len(self.queue) == <NUM_LIT:0>:<EOL><INDENT>self._refill(eof_token)<EOL><DEDENT>return self.queue.pop(<NUM_LIT:0>)<EOL>", "docstring": "Returns token at ``offset`` as a :class:`Token` and advances ``offset``\nto point past the end of the token, where the token has:\n\n- *range* which is a :class:`pythonparser.source.Range` that includes\n  the token but not surrounding whitespace,\n- *kind* which is a string containing one of Python keywords or operators,\n  ``newline``, ``float``, ``int``, ``complex``, ``strbegin``,\n  ``strdata``, ``strend``, ``ident``, ``indent``, ``dedent`` or ``eof``\n  (if ``eof_token`` is True).\n- *value* which is the flags as lowercase string if *kind* is ``strbegin``,\n  the string contents if *kind* is ``strdata``,\n  the numeric value if *kind* is ``float``, ``int`` or ``complex``,\n  the identifier if *kind* is ``ident`` and ``None`` in any other case.\n\n:param eof_token: if true, will return a token with kind ``eof``\n    when the input is exhausted; if false, will raise ``StopIteration``.", "id": "f16503:c1:m1"}
{"signature": "def rewrite(self):", "body": "self._sort()<EOL>self._check()<EOL>rewritten, pos = [], <NUM_LIT:0><EOL>for range, replacement in self.ranges:<EOL><INDENT>rewritten.append(self.buffer.source[pos:range.begin_pos])<EOL>rewritten.append(replacement)<EOL>pos = range.end_pos<EOL><DEDENT>rewritten.append(self.buffer.source[pos:])<EOL>return Buffer(\"<STR_LIT>\".join(rewritten), self.buffer.name, self.buffer.first_line)<EOL>", "docstring": "Return the rewritten source. May raise :class:`RewriterConflict`.", "id": "f16505:c4:m5"}
{"signature": "def line(self):", "body": "line, column = self.source_buffer.decompose_position(self.begin_pos)<EOL>return line<EOL>", "docstring": "Returns the line number of the beginning of this range.", "id": "f16505:c1:m8"}
{"signature": "def join(self, other):", "body": "if self.source_buffer != other.source_buffer:<EOL><INDENT>raise ValueError<EOL><DEDENT>if self.expanded_from == other.expanded_from:<EOL><INDENT>expanded_from = self.expanded_from<EOL><DEDENT>else:<EOL><INDENT>expanded_from = None<EOL><DEDENT>return Range(self.source_buffer,<EOL>min(self.begin_pos, other.begin_pos),<EOL>max(self.end_pos, other.end_pos),<EOL>expanded_from=expanded_from)<EOL>", "docstring": "Returns the smallest possible range spanning both this range and other.\nRaises :exc:`ValueError` if the ranges do not belong to the same\n:class:`Buffer`.", "id": "f16505:c1:m9"}
{"signature": "def insert_after(self, range, text):", "body": "self.replace(range.end(), text)<EOL>", "docstring": "Insert `text` after `range`.", "id": "f16505:c4:m4"}
{"signature": "def __repr__(self):", "body": "return \"<STR_LIT>\" %(self.source_buffer.name, self.begin_pos, self.end_pos, repr(self.expanded_from))<EOL>", "docstring": "Returns a human-readable representation of this range.", "id": "f16505:c1:m1"}
{"signature": "def source_line(self, lineno):", "body": "line_begins = self._extract_line_begins()<EOL>lineno = lineno - self.first_line<EOL>if lineno >= <NUM_LIT:0> and lineno + <NUM_LIT:1> < len(line_begins):<EOL><INDENT>first, last = line_begins[lineno:lineno + <NUM_LIT:2>]<EOL>return self.source[first:last]<EOL><DEDENT>elif lineno >= <NUM_LIT:0> and lineno < len(line_begins):<EOL><INDENT>return self.source[line_begins[-<NUM_LIT:1>]:]<EOL><DEDENT>else:<EOL><INDENT>raise IndexError<EOL><DEDENT>", "docstring": "Returns line ``lineno`` from source, taking ``first_line`` into account,\nor raises :exc:`IndexError` if ``lineno`` is out of range.", "id": "f16505:c0:m2"}
{"signature": "def size(self):", "body": "return self.end_pos - self.begin_pos<EOL>", "docstring": "Returns the amount of characters spanned by the range.", "id": "f16505:c1:m5"}
{"signature": "def chain(self, expanded_from):", "body": "return Range(self.source_buffer, self.begin_pos, self.begin_pos,<EOL>expanded_from=expanded_from)<EOL>", "docstring": "Returns a range identical to this one, but indicating that\nit was expanded from the range `expanded_from`.", "id": "f16505:c1:m2"}
{"signature": "def decompose_position(self, offset):", "body": "line_begins = self._extract_line_begins()<EOL>lineno = bisect.bisect_right(line_begins, offset) - <NUM_LIT:1><EOL>if offset >= <NUM_LIT:0> and offset <= len(self.source):<EOL><INDENT>return lineno + self.first_line, offset - line_begins[lineno]<EOL><DEDENT>else:<EOL><INDENT>raise IndexError<EOL><DEDENT>", "docstring": "Returns a ``line, column`` tuple for a character offset into the source,\norraises :exc:`IndexError` if ``lineno`` is out of range.", "id": "f16505:c0:m3"}
{"signature": "def source_lines(self):", "body": "return [self.source_buffer.source_line(line)<EOL>for line in range(self.line(), self.end().line() + <NUM_LIT:1>)]<EOL>", "docstring": "Returns the lines of source code containing the entirety of this range.", "id": "f16505:c1:m12"}
{"signature": "def end(self):", "body": "return Range(self.source_buffer, self.end_pos, self.end_pos,<EOL>expanded_from=self.expanded_from)<EOL>", "docstring": "Returns a zero-length range located just after the end of this range.", "id": "f16505:c1:m4"}
{"signature": "def remove(self, range):", "body": "self.replace(range, \"<STR_LIT>\")<EOL>", "docstring": "Remove `range`.", "id": "f16505:c4:m2"}
{"signature": "def begin(self):", "body": "return Range(self.source_buffer, self.begin_pos, self.begin_pos,<EOL>expanded_from=self.expanded_from)<EOL>", "docstring": "Returns a zero-length range located just before the beginning of this range.", "id": "f16505:c1:m3"}
{"signature": "def replace(self, range, replacement):", "body": "self.ranges.append((range, replacement))<EOL>", "docstring": "Remove `range` and replace it with string `replacement`.", "id": "f16505:c4:m1"}
{"signature": "def parse_buffer(buffer, mode=\"<STR_LIT>\", flags=[], version=None, engine=None):", "body": "if version is None:<EOL><INDENT>version = sys.version_info[<NUM_LIT:0>:<NUM_LIT:2>]<EOL><DEDENT>if engine is None:<EOL><INDENT>engine = pythonparser_diagnostic.Engine()<EOL><DEDENT>lexer = pythonparser_lexer.Lexer(buffer, version, engine)<EOL>if mode in (\"<STR_LIT>\", \"<STR_LIT>\"):<EOL><INDENT>lexer.interactive = True<EOL><DEDENT>parser = pythonparser_parser.Parser(lexer, version, engine)<EOL>parser.add_flags(flags)<EOL>if mode == \"<STR_LIT>\":<EOL><INDENT>return parser.file_input(), lexer.comments<EOL><DEDENT>elif mode == \"<STR_LIT>\":<EOL><INDENT>return parser.single_input(), lexer.comments<EOL><DEDENT>elif mode == \"<STR_LIT>\":<EOL><INDENT>return parser.eval_input(), lexer.comments<EOL><DEDENT>", "docstring": "Like :meth:`parse`, but accepts a :class:`source.Buffer` instead of\nsource and filename, and returns comments as well.\n\n:see: :meth:`parse`\n:return: (:class:`ast.AST`, list of :class:`source.Comment`)\n    Abstract syntax tree and comments", "id": "f16506:m0"}
{"signature": "def BenchmarkHashStrCached(b):", "body": "h = hash  <EOL>for _ in xrange(b.N):<EOL><INDENT>h('<STR_LIT>')<EOL><DEDENT>", "docstring": "Hashes the same value repeatedly to exercise any hash caching logic.", "id": "f16537:m6"}
{"signature": "def _WriteXmlFile(filename, suite_duration, results):", "body": "xml_file = open(filename, '<STR_LIT:w>')<EOL>xml_file.write('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(sys.argv[<NUM_LIT:0>], len(results), suite_duration))<EOL>for result in results:<EOL><INDENT>xml_file.write('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(result.name, result.duration))<EOL>if result.properties:<EOL><INDENT>xml_file.write('<STR_LIT>')<EOL>for name in result.properties:<EOL><INDENT>value = result.properties[name]<EOL>if isinstance(value, float):<EOL><INDENT>formatted = '<STR_LIT>' % value<EOL><DEDENT>else:<EOL><INDENT>formatted = str(value)<EOL><DEDENT>xml_file.write('<STR_LIT>' %<EOL>(name, formatted))<EOL><DEDENT>xml_file.write('<STR_LIT>')<EOL><DEDENT>xml_file.write('<STR_LIT>')<EOL><DEDENT>xml_file.write('<STR_LIT>')<EOL>xml_file.close()<EOL>", "docstring": "Given a list of _BenchmarkResults, writes XML test results to filename.", "id": "f16547:m2"}
{"signature": "def unicodevalues_asstring(values):", "body": "if not os.environ.get('<STR_LIT>'):<EOL><INDENT>return map(lambda x: '<STR_LIT:%s>' % format(x).strip(), values)<EOL><DEDENT>return map(lambda x: u'<STR_LIT>' % (x, unichr(x)), sorted(values))<EOL>", "docstring": "Return string with unicodenames (unless that is disabled)", "id": "f16556:m2"}
{"signature": "@staticmethod<EOL><INDENT>def get_unicodes(codepoint):<DEDENT>", "body": "result = re.sub('<STR_LIT>', '<STR_LIT>', codepoint.text)<EOL>return Extension.convert_to_list_of_unicodes(result)<EOL>", "docstring": "Return list of unicodes for <scanning-codepoints>", "id": "f16564:c0:m2"}
{"signature": "def _loadNamelistIncludes(item, unique_glyphs, cache):", "body": "includes = item[\"<STR_LIT>\"] = []<EOL>charset = item[\"<STR_LIT>\"] = set() | item[\"<STR_LIT>\"]<EOL>noCharcode = item[\"<STR_LIT>\"] = set() | item[\"<STR_LIT>\"]<EOL>dirname =  os.path.dirname(item[\"<STR_LIT>\"])<EOL>for include in item[\"<STR_LIT>\"][\"<STR_LIT>\"]:<EOL><INDENT>includeFile = os.path.join(dirname, include)<EOL>try:<EOL><INDENT>includedItem = readNamelist(includeFile, unique_glyphs, cache)<EOL><DEDENT>except NamelistRecursionError:<EOL><INDENT>continue<EOL><DEDENT>if includedItem in includes:<EOL><INDENT>continue<EOL><DEDENT>includes.append(includedItem)<EOL>charset |= includedItem[\"<STR_LIT>\"]<EOL>noCharcode |= includedItem[\"<STR_LIT>\"]<EOL><DEDENT>return item<EOL>", "docstring": "Load the includes of an encoding Namelist files.\n\n    This is an implementation detail of readNamelist.", "id": "f16791:m5"}
{"signature": "def codepointsInNamelist(namFilename, unique_glyphs=False, cache=None):", "body": "key = '<STR_LIT>' if not unique_glyphs else '<STR_LIT>'<EOL>internals_dir = os.path.dirname(os.path.abspath(__file__))<EOL>target = os.path.join(internals_dir, namFilename)<EOL>result = readNamelist(target, unique_glyphs, cache)<EOL>return result[key]<EOL>", "docstring": "Returns the set of codepoints contained in a given Namelist file.\n\n    This is a replacement CodepointsInSubset and implements the \"#$ include\"\n    header format.\n\n    Args:\n      namFilename: The path to the  Namelist file.\n      unique_glyphs: Optional, whether to only include glyphs unique to subset.\n    Returns:\n      A set containing the glyphs in the subset.", "id": "f16791:m9"}
{"signature": "def get_orthographies(self, _library=library):", "body": "results = []<EOL>for charset in _library.charsets:<EOL><INDENT>if self._charsets:<EOL><INDENT>cn = getattr(charset, '<STR_LIT>', False)<EOL>abbr = getattr(charset, '<STR_LIT>', False)<EOL>nn = getattr(charset, '<STR_LIT>', False)<EOL>naive = getattr(charset, '<STR_LIT>', False)<EOL>if cn and cn.lower() in self._charsets:<EOL><INDENT>results.append(charset)<EOL><DEDENT>elif nn and nn.lower() in self._charsets:<EOL><INDENT>results.append(charset)<EOL><DEDENT>elif naive and naive.lower() in self._charsets:<EOL><INDENT>results.append(charset)<EOL><DEDENT>elif abbr and abbr.lower() in self._charsets:<EOL><INDENT>results.append(charset)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>results.append(charset)<EOL><DEDENT><DEDENT>for result in results:<EOL><INDENT>yield CharsetInfo(self, result)<EOL><DEDENT>", "docstring": "Returns list of CharsetInfo about supported orthographies", "id": "f16968:c4:m3"}
{"signature": "def getGlyphNames(self):", "body": "return \"<STR_LIT>\"<EOL>", "docstring": "TODO: Fix this so that pyfontaine falls back to using fontTools to get glyph names\nif getGlyphNames() is really needed by the given mode of use.", "id": "f16968:c6:m2"}
{"signature": "@classmethod<EOL><INDENT>def linux(cls, path, argv=None, envp=None, entry_symbol=None, symbolic_files=None, concrete_start='<STR_LIT>', pure_symbolic=False, stdin_size=None, **kwargs):<DEDENT>", "body": "if stdin_size is None:<EOL><INDENT>stdin_size = consts.stdin_size<EOL><DEDENT>try:<EOL><INDENT>return cls(_make_linux(path, argv, envp, entry_symbol, symbolic_files, concrete_start, pure_symbolic, stdin_size), **kwargs)<EOL><DEDENT>except elftools.common.exceptions.ELFError:<EOL><INDENT>raise Exception(f'<STR_LIT>')<EOL><DEDENT>", "docstring": "Constructor for Linux binary analysis.\n\n:param str path: Path to binary to analyze\n:param argv: Arguments to provide to the binary\n:type argv: list[str]\n:param envp: Environment to provide to the binary\n:type envp: dict[str, str]\n:param entry_symbol: Entry symbol to resolve to start execution\n:type envp: str\n:param symbolic_files: Filenames to mark as having symbolic input\n:type symbolic_files: list[str]\n:param str concrete_start: Concrete stdin to use before symbolic input\n:param int stdin_size: symbolic stdin size to use\n:param kwargs: Forwarded to the Manticore constructor\n:return: Manticore instance, initialized with a Linux State\n:rtype: Manticore", "id": "f16971:c0:m2"}
{"signature": "def resolve(self, symbol):", "body": "with open(self.binary_path, '<STR_LIT:rb>') as f:<EOL><INDENT>elffile = ELFFile(f)<EOL>for section in elffile.iter_sections():<EOL><INDENT>if not isinstance(section, SymbolTableSection):<EOL><INDENT>continue<EOL><DEDENT>symbols = section.get_symbol_by_name(symbol)<EOL>if not symbols:<EOL><INDENT>continue<EOL><DEDENT>return symbols[<NUM_LIT:0>].entry['<STR_LIT>']<EOL><DEDENT>raise ValueError(f\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "A helper method used to resolve a symbol name into a memory address when\ninjecting hooks for analysis.\n\n:param symbol: function name to be resolved\n:type symbol: string\n\n:param line: if more functions present, optional line number can be included\n:type line: int or None", "id": "f16971:c0:m9"}
{"signature": "def __repr__(self):", "body": "return f'<STR_LIT>'<EOL>", "docstring": "Returns the string representation of the map mapping.\n\n:rtype: str", "id": "f16973:c4:m8"}
{"signature": "def __init__(self, constraints, symbols=None, *args, **kwargs):", "body": "super().__init__(*args, **kwargs)<EOL>assert isinstance(constraints, ConstraintSet)<EOL>self._constraints = constraints<EOL>if symbols is None:<EOL><INDENT>self._symbols = {}<EOL><DEDENT>else:<EOL><INDENT>self._symbols = dict(symbols)<EOL><DEDENT>", "docstring": "Builds a memory.\n\n:param constraints:  a set of constraints\n:param symbols: Symbolic chunks", "id": "f16973:c11:m0"}
{"signature": "def __iter__(self):", "body": "for page_addr in sorted(self._page2map.keys()):<EOL><INDENT>start = page_addr * self.page_size<EOL>end = start + self.page_size<EOL>for addr in range(start, end):<EOL><INDENT>yield addr<EOL><DEDENT><DEDENT>", "docstring": "Iterate all valid addresses", "id": "f16973:c10:m33"}
{"signature": "def access_ok(self, access):", "body": "for c in access:<EOL><INDENT>if c not in self.perms:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Check if there is enough permissions for access", "id": "f16973:c4:m3"}
{"signature": "def _import_concrete_memory(self, from_addr, to_addr):", "body": "logger.debug(\"<STR_LIT>\".format(from_addr, to_addr, to_addr - from_addr))<EOL>for m in self.maps:<EOL><INDENT>span = interval_intersection(m.start, m.end, from_addr, to_addr)<EOL>if span is None:<EOL><INDENT>continue<EOL><DEDENT>start, stop = span<EOL>for addr in range(start, stop):<EOL><INDENT>if addr in self.backed_by_symbolic_store:<EOL><INDENT>continue<EOL><DEDENT>self.backing_array[addr] = Memory.read(self, addr, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>self.backed_by_symbolic_store.add(addr)<EOL><DEDENT><DEDENT>", "docstring": "for each address in this range need to read from concrete and write to symbolic\nit's possible that there will be invalid/unmapped addresses in this range. need to skip to next map if so\nalso need to mark all of these addresses as now in the symbolic store\n\n:param int from_addr:\n:param int to_addr:\n:return:", "id": "f16973:c12:m5"}
{"signature": "def _try_get_solutions(self, address, size, access, max_solutions=<NUM_LIT>, force=False):", "body": "assert issymbolic(address)<EOL>solutions = solver.get_all_values(self.constraints, address, maxcnt=max_solutions)<EOL>crashing_condition = False<EOL>for base in solutions:<EOL><INDENT>if not self.access_ok(slice(base, base + size), access, force):<EOL><INDENT>crashing_condition = Operators.OR(address == base, crashing_condition)<EOL><DEDENT><DEDENT>if solver.can_be_true(self.constraints, crashing_condition):<EOL><INDENT>raise InvalidSymbolicMemoryAccess(address, access, size, crashing_condition)<EOL><DEDENT>return solutions<EOL>", "docstring": "Try to solve for a symbolic address, checking permissions when reading/writing size bytes.\n\n:param Expression address: The address to solve for\n:param int size: How many bytes to check permissions for\n:param str access: 'r' or 'w'\n:param int max_solutions: Will raise if more solutions are found\n:param force: Whether to ignore permission failure\n:rtype: list", "id": "f16973:c11:m8"}
{"signature": "def __init__(self, start, size, perms, data_init=None, name=None, **kwargs):", "body": "super().__init__(start, size, perms, name, **kwargs)<EOL>self._data = bytearray(size)<EOL>if data_init is not None:<EOL><INDENT>assert len(data_init) <= size, '<STR_LIT>'<EOL>if isinstance(data_init[<NUM_LIT:0>], int):<EOL><INDENT>self._data[<NUM_LIT:0>:len(data_init)] = data_init<EOL><DEDENT>else:<EOL><INDENT>self._data[<NUM_LIT:0>:len(data_init)] = [ord(s) for s in data_init]<EOL><DEDENT><DEDENT>", "docstring": "Builds a concrete anonymous memory map.\n\n:param start: the first valid address of the map.\n:param size: the size of the map.\n:param perms: the access permissions of the map.\n:param data_init: the data to initialize the map.", "id": "f16973:c5:m0"}
{"signature": "def _normalize(c):", "body": "if isinstance(c, int):<EOL><INDENT>return bytes([c])<EOL><DEDENT>elif isinstance(c, str):<EOL><INDENT>return bytes([ord(c)])<EOL><DEDENT>else:<EOL><INDENT>return c<EOL><DEDENT>", "docstring": "Convert a byte-like value into a canonical byte (a value of type 'bytes' of len 1)\n\n:param c:\n:return:", "id": "f16973:m0"}
{"signature": "def __len__(self):", "body": "return self._end - self._start<EOL>", "docstring": "Returns the current size in bytes.", "id": "f16973:c4:m7"}
{"signature": "@abstractmethod<EOL><INDENT>def split(self, address):<DEDENT>", "body": "", "docstring": "Split the current map into two mappings\n\n:param address: The address at which to split the Map.", "id": "f16973:c4:m17"}
{"signature": "def mappings(self):", "body": "result = []<EOL>for m in self.maps:<EOL><INDENT>if isinstance(m, AnonMap):<EOL><INDENT>result.append((m.start, m.end, m.perms, <NUM_LIT:0>, '<STR_LIT>'))<EOL><DEDENT>elif isinstance(m, FileMap):<EOL><INDENT>result.append((m.start, m.end, m.perms, m._offset, m._filename))<EOL><DEDENT>else:<EOL><INDENT>result.append((m.start, m.end, m.perms, <NUM_LIT:0>, m.name))<EOL><DEDENT><DEDENT>return sorted(result)<EOL>", "docstring": "Returns a sorted list of all the mappings for this memory.\n\n:return: a list of mappings.\n:rtype: list", "id": "f16973:c10:m18"}
{"signature": "def __init__(self, addr, size, perms, filename, offset=<NUM_LIT:0>, overlay=None, **kwargs):", "body": "super().__init__(addr, size, perms, **kwargs)<EOL>assert isinstance(offset, int)<EOL>assert offset >= <NUM_LIT:0><EOL>self._filename = filename<EOL>self._offset = offset<EOL>with open(filename, '<STR_LIT:r>') as fileobject:<EOL><INDENT>fileobject.seek(<NUM_LIT:0>, <NUM_LIT:2>)<EOL>file_size = fileobject.tell()<EOL>self._mapped_size = min(size, file_size - offset)<EOL>self._data = mmap(fileobject.fileno(), offset, self._mapped_size)<EOL><DEDENT>if overlay is not None:<EOL><INDENT>self._overlay = dict(overlay)<EOL><DEDENT>else:<EOL><INDENT>self._overlay = dict()<EOL><DEDENT>", "docstring": "Builds a map of memory  initialized with the content of filename.\n\n:param addr: the first valid address of the file map.\n:param size: the size of the file map.\n:param perms: the access permissions of the file map.\n:param filename: the file to map in memory.\n:param offset: the offset into the file where to start the mapping. \\\n        This offset must be a multiple of pagebitsize.", "id": "f16973:c7:m0"}
{"signature": "def write(self, address, value, force=False):", "body": "size = len(value)<EOL>if issymbolic(address):<EOL><INDENT>solutions = self._try_get_solutions(address, size, '<STR_LIT:w>', force=force)<EOL>for offset in range(size):<EOL><INDENT>for base in solutions:<EOL><INDENT>condition = base == address<EOL>self._symbols.setdefault(base + offset, []).append((condition, value[offset]))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for offset in range(size):<EOL><INDENT>if issymbolic(value[offset]):<EOL><INDENT>if not self.access_ok(address + offset, '<STR_LIT:w>', force):<EOL><INDENT>raise InvalidMemoryAccess(address + offset, '<STR_LIT:w>')<EOL><DEDENT>self._symbols[address + offset] = [(True, value[offset])]<EOL><DEDENT>else:<EOL><INDENT>if address + offset in self._symbols:<EOL><INDENT>del self._symbols[address + offset]<EOL><DEDENT>super().write(address + offset, [value[offset]], force)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Write a value at address.\n:param address: The address at which to write\n:type address: int or long or Expression\n:param value: Bytes to write\n:type value: str or list\n:param force: Whether to ignore permissions", "id": "f16973:c11:m7"}
{"signature": "def __init__(self, parent, offset=<NUM_LIT:0>, perms=None, size=None, **kwargs):", "body": "assert isinstance(parent, Map)<EOL>assert offset >= <NUM_LIT:0> and offset < len(parent)<EOL>if size is None:<EOL><INDENT>size = len(parent) - offset<EOL><DEDENT>assert parent.start + offset + size <= parent.end<EOL>if perms is None:<EOL><INDENT>perms = parent.perms<EOL><DEDENT>super().__init__(parent.start + offset, size, perms, **kwargs)<EOL>self._parent = parent<EOL>self._parent.__setitem__ = False<EOL>self._cow = {}<EOL>", "docstring": "A copy on write copy of parent. Writes to the parent after a copy on\nwrite are unspecified.\n\n:param parent: the parent map.\n:param offset: an offset within the parent map from where to create the new map.\n:param perms: Permissions on new mapping, or None if inheriting.\n:param size: the size of the new map or max.", "id": "f16973:c8:m0"}
{"signature": "def _maps_in_range(self, start, end):", "body": "<EOL>addr = start<EOL>while addr < end:<EOL><INDENT>if addr not in self:<EOL><INDENT>addr += self.page_size<EOL><DEDENT>else:<EOL><INDENT>m = self._page2map[self._page(addr)]<EOL>yield m<EOL>addr = m.end<EOL><DEDENT><DEDENT>", "docstring": "Generates the list of maps that overlaps with the range [start:end]", "id": "f16973:c10:m20"}
{"signature": "def push_record_writes(self):", "body": "self._recording_stack.append([])<EOL>", "docstring": "Begin recording all writes. Retrieve all writes with `pop_record_writes()`", "id": "f16973:c10:m27"}
{"signature": "def munmap(self, start, size):", "body": "for addr in range(start, start + size):<EOL><INDENT>if len(self._symbols) == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT>if addr in self._symbols:<EOL><INDENT>del self._symbols[addr]<EOL><DEDENT><DEDENT>super().munmap(start, size)<EOL>", "docstring": "Deletes the mappings for the specified address range and causes further\nreferences to addresses within the range to generate invalid memory\nreferences.\n\n:param start: the starting address to delete.\n:param size: the length of the unmapping.", "id": "f16973:c11:m5"}
{"signature": "def _get_offset(self, index):", "body": "if not self._in_range(index):<EOL><INDENT>raise IndexError('<STR_LIT>')<EOL><DEDENT>if isinstance(index, slice):<EOL><INDENT>index = slice(index.start - self.start, index.stop - self.start)<EOL><DEDENT>else:<EOL><INDENT>index -= self.start<EOL><DEDENT>return index<EOL>", "docstring": "Translates the index to the internal offsets.\n\nself.start   -> 0\nself.start+1 -> 1\n...\nself.end     -> len(self)", "id": "f16973:c4:m14"}
{"signature": "def read(self, address, size, force=False):", "body": "size = self._get_size(size)<EOL>assert not issymbolic(size)<EOL>if issymbolic(address):<EOL><INDENT>assert solver.check(self.constraints)<EOL>logger.debug(f'<STR_LIT>')<EOL>try:<EOL><INDENT>solutions = self._try_get_solutions(address, size, '<STR_LIT:r>', force=force)<EOL>assert len(solutions) > <NUM_LIT:0><EOL><DEDENT>except TooManySolutions as e:<EOL><INDENT>m, M = solver.minmax(self.constraints, address)<EOL>logger.debug(f'<STR_LIT>')<EOL>crashing_condition = True<EOL>for start, end, perms, offset, name in self.mappings():<EOL><INDENT>if start <= M + size and end >= m:<EOL><INDENT>if '<STR_LIT:r>' in perms:<EOL><INDENT>crashing_condition = Operators.AND(Operators.OR((address + size).ult(start), address.uge(end)), crashing_condition)<EOL><DEDENT><DEDENT><DEDENT>if solver.can_be_true(self.constraints, crashing_condition):<EOL><INDENT>raise InvalidSymbolicMemoryAccess(address, '<STR_LIT:r>', size, crashing_condition)<EOL><DEDENT>logger.info('<STR_LIT>')<EOL>condition = False<EOL>for base in e.solutions:<EOL><INDENT>condition = Operators.OR(address == base, condition)<EOL><DEDENT>from .state import ForkState<EOL>raise ForkState(\"<STR_LIT>\", condition)<EOL><DEDENT>condition = False<EOL>for base in solutions:<EOL><INDENT>condition = Operators.OR(address == base, condition)<EOL><DEDENT>result = []<EOL>for offset in range(size):<EOL><INDENT>for base in solutions:<EOL><INDENT>addr_value = base + offset<EOL>byte = Operators.ORD(self.map_containing(addr_value)[addr_value])<EOL>if addr_value in self._symbols:<EOL><INDENT>for condition, value in self._symbols[addr_value]:<EOL><INDENT>byte = Operators.ITEBV(<NUM_LIT:8>, condition, Operators.ORD(value), byte)<EOL><DEDENT><DEDENT>if len(result) > offset:<EOL><INDENT>result[offset] = Operators.ITEBV(<NUM_LIT:8>, address == base, byte, result[offset])<EOL><DEDENT>else:<EOL><INDENT>result.append(byte)<EOL><DEDENT>assert len(result) == offset + <NUM_LIT:1><EOL><DEDENT><DEDENT>return list(map(Operators.CHR, result))<EOL><DEDENT>else:<EOL><INDENT>result = list(map(Operators.ORD, super().read(address, size, force)))<EOL>for offset in range(size):<EOL><INDENT>if address + offset in self._symbols:<EOL><INDENT>for condition, value in self._symbols[address + offset]:<EOL><INDENT>if condition is True:<EOL><INDENT>result[offset] = Operators.ORD(value)<EOL><DEDENT>else:<EOL><INDENT>result[offset] = Operators.ITEBV(<NUM_LIT:8>, condition, Operators.ORD(value), result[offset])<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return list(map(Operators.CHR, result))<EOL><DEDENT>", "docstring": "Read a stream of potentially symbolic bytes from a potentially symbolic\naddress\n\n:param address: Where to read from\n:param size: How many bytes\n:param force: Whether to ignore permissions\n:rtype: list", "id": "f16973:c11:m6"}
{"signature": "def munmap(self, start, size):", "body": "start = self._floor(start)<EOL>end = self._ceil(start + size)<EOL>self.cpu._publish('<STR_LIT>', start, size)<EOL>for m in self._maps_in_range(start, end):<EOL><INDENT>self._del(m)<EOL>head, tail = m.split(start)<EOL>middle, tail = tail.split(end)<EOL>assert middle is not None<EOL>if head:<EOL><INDENT>self._add(head)<EOL><DEDENT>if tail:<EOL><INDENT>self._add(tail)<EOL><DEDENT><DEDENT>self.cpu._publish('<STR_LIT>', start, size)<EOL>logger.debug(f'<STR_LIT>')<EOL>", "docstring": "Deletes the mappings for the specified address range and causes further\nreferences to addresses within the range to generate invalid memory\nreferences.\n\n:param start: the starting address to delete.\n:param size: the length of the unmapping.", "id": "f16973:c10:m21"}
{"signature": "@abstractmethod<EOL><INDENT>def __getitem__(self, index):<DEDENT>", "body": "", "docstring": "Reads a byte from an address or a sequence of bytes from a range of addresses\n\n:param index: the address or slice where to obtain the bytes from.\n:return: the character or sequence at the specified address.\n:rtype: byte or array", "id": "f16973:c4:m15"}
{"signature": "@instruction<EOL><INDENT>def CMOVO(cpu, dest, src):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.OF, src.read(), dest.read()))<EOL>", "docstring": "Conditional move - Overflow.\n\nTests the status flags in the EFLAGS register and moves the source operand\n(second operand) to the destination operand (first operand) if the given\ntest condition is true.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m56"}
{"signature": "@instruction<EOL><INDENT>def STC(cpu):<DEDENT>", "body": "cpu.CF = True<EOL>", "docstring": "Sets CF\n:param cpu: current CPU.", "id": "f16975:c2:m173"}
{"signature": "@instruction<EOL><INDENT>def JNLE(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, Operators.AND(False == cpu.ZF, cpu.SF == cpu.OF), target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if not less or equal.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m136"}
{"signature": "@instruction<EOL><INDENT>def SETNG(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, Operators.OR(cpu.ZF, cpu.SF != cpu.OF), <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if not greater.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m86"}
{"signature": "@instruction<EOL><INDENT>def XADD(cpu, dest, src):<DEDENT>", "body": "MASK = (<NUM_LIT:1> << dest.size) - <NUM_LIT:1><EOL>SIGN_MASK = <NUM_LIT:1> << (dest.size - <NUM_LIT:1>)<EOL>arg0 = dest.read()<EOL>arg1 = src.read()<EOL>temp = (arg1 + arg0) & MASK<EOL>src.write(arg0)<EOL>dest.write(temp)<EOL>tempCF = Operators.OR(Operators.ULT(temp, arg0), Operators.ULT(temp, arg1))<EOL>cpu.CF = tempCF<EOL>cpu.AF = ((arg0 ^ arg1) ^ temp) & <NUM_LIT> != <NUM_LIT:0><EOL>cpu.ZF = temp == <NUM_LIT:0><EOL>cpu.SF = (temp & SIGN_MASK) != <NUM_LIT:0><EOL>cpu.OF = (((arg0 ^ arg1 ^ SIGN_MASK) & (temp ^ arg1)) & SIGN_MASK) != <NUM_LIT:0><EOL>cpu.PF = cpu._calculate_parity_flag(temp)<EOL>", "docstring": "Exchanges and adds.\n\nExchanges the first operand (destination operand) with the second operand\n(source operand), then loads the sum of the two values into the destination\noperand. The destination operand can be a register or a memory location;\nthe source operand is a register.\nThis instruction can be used with a LOCK prefix::\n\n        TEMP  =  SRC + DEST\n        SRC  =  DEST\n        DEST  =  TEMP\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m42"}
{"signature": "@instruction<EOL><INDENT>def CMOVLE(cpu, dest, src):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, Operators.OR(cpu.SF ^ cpu.OF, cpu.ZF), src.read(), dest.read()))<EOL>", "docstring": "Conditional move - Less or equal/not greater.\n\nTests the status flags in the EFLAGS register and moves the source operand\n(second operand) to the destination operand (first operand) if the given\ntest condition is true.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m55"}
{"signature": "@instruction<EOL><INDENT>def JNO(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, False == cpu.OF, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if not overflow.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m137"}
{"signature": "def _PUNPCKL(cpu, dest, src, item_size):", "body": "assert dest.size == src.size<EOL>size = dest.size<EOL>dest_value = dest.read()<EOL>src_value = src.read()<EOL>mask = (<NUM_LIT:1> << item_size) - <NUM_LIT:1><EOL>res = <NUM_LIT:0><EOL>count = <NUM_LIT:0><EOL>for pos in range(<NUM_LIT:0>, size // item_size):<EOL><INDENT>if count >= size:<EOL><INDENT>break<EOL><DEDENT>item0 = Operators.ZEXTEND((dest_value >> (pos * item_size)) & mask, size)<EOL>item1 = Operators.ZEXTEND((src_value >> (pos * item_size)) & mask, size)<EOL>res |= item0 << count<EOL>count += item_size<EOL>res |= item1 << count<EOL>count += item_size<EOL><DEDENT>dest.write(res)<EOL>", "docstring": "Generic PUNPCKL", "id": "f16975:c2:m190"}
{"signature": "def _PUNPCKH(cpu, dest, src, item_size):", "body": "assert dest.size == src.size<EOL>size = dest.size<EOL>dest_value = dest.read()<EOL>src_value = src.read()<EOL>mask = (<NUM_LIT:1> << item_size) - <NUM_LIT:1><EOL>res = <NUM_LIT:0><EOL>count = <NUM_LIT:0><EOL>for pos in reversed(range(<NUM_LIT:0>, size // item_size)):<EOL><INDENT>if count >= size:<EOL><INDENT>break<EOL><DEDENT>item0 = Operators.ZEXTEND((dest_value >> (pos * item_size)) & mask, size)<EOL>item1 = Operators.ZEXTEND((src_value >> (pos * item_size)) & mask, size)<EOL>res = res << item_size<EOL>res |= item1<EOL>res = res << item_size<EOL>res |= item0<EOL>count += item_size * <NUM_LIT:2><EOL><DEDENT>dest.write(res)<EOL>", "docstring": "Generic PUNPCKH", "id": "f16975:c2:m191"}
{"signature": "@instruction<EOL><INDENT>def ADD(cpu, dest, src):<DEDENT>", "body": "cpu._ADD(dest, src, carry=False)<EOL>", "docstring": "Add.\n\nAdds the first operand (destination operand) and the second operand (source operand)\nand stores the result in the destination operand. When an immediate value is used as\nan operand, it is sign-extended to the length of the destination operand format.\nThe ADD instruction does not distinguish between signed or unsigned operands. Instead,\nthe processor evaluates the result for both data types and sets the OF and CF flags to\nindicate a carry in the signed or unsigned result, respectively. The SF flag indicates\nthe sign of the signed result::\n\n        DEST  =  DEST + SRC;\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m25"}
{"signature": "@instruction<EOL><INDENT>def LSS(cpu, dest, src):<DEDENT>", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Loads far pointer.\n\nLoads a far pointer (segment selector and offset) from the second operand\n(source operand) into a segment register and the first operand (destination\noperand). The source operand specifies a 48-bit or a 32-bit pointer in\nmemory depending on the current setting of the operand-size attribute\n(32 bits or 16 bits, respectively). The instruction opcode and the destination\noperand specify a segment register/general-purpose register pair. The\n16-bit segment selector from the source operand is loaded into the segment\nregister specified with the opcode (DS, SS, ES, FS, or GS). The 32-bit\nor 16-bit offset is loaded into the register specified with the destination\noperand.\nIn 64-bit mode, the instruction's default operation size is 32 bits. Using a\nREX prefix in the form of REX.W promotes operation to specify a source operand\nreferencing an 80-bit pointer (16-bit selector, 64-bit offset) in memory.\nIf one of these instructions is executed in protected mode, additional\ninformation from the segment descriptor pointed to by the segment selector\nin the source operand is loaded in the hidden part of the selected segment\nregister.\nAlso in protected mode, a null selector (values 0000 through 0003) can\nbe loaded into DS, ES, FS, or GS registers without causing a protection\nexception. (Any subsequent reference to a segment whose corresponding\nsegment register is loaded with a null selector, causes a general-protection\nexception (#GP) and no memory reference to the segment occurs.)::\n\n        IF ProtectedMode\n        THEN IF SS is loaded\n            THEN IF SegementSelector  =  null\n                THEN #GP(0);\n                FI;\n            ELSE IF Segment selector index is not within descriptor table limits\n                OR Segment selector RPL  CPL\n                OR Access rights indicate nonwritable data segment\n                OR DPL  CPL\n                THEN #GP(selector);\n                FI;\n            ELSE IF Segment marked not present\n                THEN #SS(selector);\n                FI;\n                SS  =  SegmentSelector(SRC);\n                SS  =  SegmentDescriptor([SRC]);\n            ELSE IF DS, ES, FS, or GS is loaded with non-null segment selector\n                THEN IF Segment selector index is not within descriptor table limits\n                    OR Access rights indicate segment neither data nor readable code segment\n                    OR Segment is data or nonconforming-code segment\n                    AND both RPL and CPL > DPL)\n                    THEN #GP(selector);\n                    FI;\n                ELSE IF Segment marked not present\n                    THEN #NP(selector);\n                    FI;\n                    SegmentRegister  =  SegmentSelector(SRC) AND RPL;\n                    SegmentRegister  =  SegmentDescriptor([SRC]);\n                ELSE IF DS, ES, FS, or GS is loaded with a null selector:\n                    SegmentRegister  =  NullSelector;\n                    SegmentRegister(DescriptorValidBit)  =  0; (*hidden flag; not accessible by software*)\n                FI;\n            FI;\n            IF (Real-Address or Virtual-8086 Mode)\n            THEN\n                SegmentRegister  =  SegmentSelector(SRC);\n            FI;\n            DEST  =  Offset(SRC);", "id": "f16975:c2:m65"}
{"signature": "@instruction<EOL><INDENT>def BTS(cpu, dest, src):<DEDENT>", "body": "if dest.type == '<STR_LIT>':<EOL><INDENT>value = dest.read()<EOL>pos = src.read() % dest.size<EOL>cpu.CF = value & (<NUM_LIT:1> << pos) == <NUM_LIT:1> << pos<EOL>dest.write(value | (<NUM_LIT:1> << pos))<EOL><DEDENT>elif dest.type == '<STR_LIT>':<EOL><INDENT>addr, pos = cpu._getMemoryBit(dest, src)<EOL>base, size, ty = cpu.get_descriptor(cpu.DS)<EOL>addr += base<EOL>value = cpu.read_int(addr, <NUM_LIT:8>)<EOL>cpu.CF = value & (<NUM_LIT:1> << pos) == <NUM_LIT:1> << pos<EOL>value = value | (<NUM_LIT:1> << pos)<EOL>cpu.write_int(addr, value, <NUM_LIT:8>)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError(f\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Bit test and set.\n\nSelects the bit in a bit string (specified with the first operand, called\nthe bit base) at the bit-position designated by the bit offset operand\n(second operand), stores the value of the bit in the CF flag, and sets\nthe selected bit in the bit string to 1.\n\n:param cpu: current CPU.\n:param dest: bit base operand.\n:param src: bit offset operand.", "id": "f16975:c2:m168"}
{"signature": "@instruction<EOL><INDENT>def RCR(cpu, dest, src):<DEDENT>", "body": "OperandSize = dest.size<EOL>count = src.read()<EOL>countMask = {<NUM_LIT:8>: <NUM_LIT>,<EOL><NUM_LIT:16>: <NUM_LIT>,<EOL><NUM_LIT:32>: <NUM_LIT>,<EOL><NUM_LIT:64>: <NUM_LIT>}[OperandSize]<EOL>tempCount = Operators.ZEXTEND((count & countMask) % (src.size + <NUM_LIT:1>), OperandSize)<EOL>value = dest.read()<EOL>if isinstance(tempCount, int) and tempCount == <NUM_LIT:0>:<EOL><INDENT>new_val = value<EOL>dest.write(new_val)<EOL><DEDENT>else:<EOL><INDENT>carry = Operators.ITEBV(OperandSize, cpu.CF, <NUM_LIT:1>, <NUM_LIT:0>)<EOL>left = value >> (tempCount - <NUM_LIT:1>)<EOL>right = value << (OperandSize - tempCount)<EOL>new_val = (left >> <NUM_LIT:1>) | (carry << (OperandSize - tempCount)) | (right << <NUM_LIT:1>)<EOL>dest.write(new_val)<EOL>cpu.CF = Operators.ITE(tempCount != <NUM_LIT:0>, (left & <NUM_LIT:1>) == <NUM_LIT:1>, cpu.CF)<EOL>s_MSB = ((new_val >> (OperandSize - <NUM_LIT:1>)) & <NUM_LIT>) == <NUM_LIT:1><EOL>s_MSB2 = ((new_val >> (OperandSize - <NUM_LIT:2>)) & <NUM_LIT>) == <NUM_LIT:1><EOL>cpu.OF = Operators.ITE(tempCount == <NUM_LIT:1>,<EOL>s_MSB ^ s_MSB2, cpu.OF)<EOL><DEDENT>", "docstring": "Rotates through carry right (RCR).\n\nShifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the\nsecond operand (count operand) and stores the result in the destination operand. The destination operand can be\na register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in\nthe CL register. In legacy and compatibility mode, the processor restricts the count to a number between 0 and 31\nby masking all the bits in the count operand except the 5 least-significant bits.\n\nRotate through carry right (RCR) instructions shift all the bits toward less significant bit positions, except\nfor the least-significant bit, which is rotated to the most-significant bit location. The RCR instruction shifts the\nCF flag into the most-significant bit and shifts the least-significant bit into the CF flag.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: count operand.", "id": "f16975:c2:m152"}
{"signature": "@instruction<EOL><INDENT>def RDTSC(cpu):<DEDENT>", "body": "val = cpu.icount<EOL>cpu.RAX = val & <NUM_LIT><EOL>cpu.RDX = (val >> <NUM_LIT:32>) & <NUM_LIT><EOL>", "docstring": "Reads time-stamp counter.\n\nLoads the current value of the processor's time-stamp counter into the\nEDX:EAX registers.  The time-stamp counter is contained in a 64-bit\nMSR. The high-order 32 bits of the MSR are loaded into the EDX\nregister, and the low-order 32 bits are loaded into the EAX register.\nThe processor increments the time-stamp counter MSR every clock cycle\nand resets it to 0 whenever the processor is reset.\n\n:param cpu: current CPU.", "id": "f16975:c2:m237"}
{"signature": "@instruction<EOL><INDENT>def XORPS(cpu, dest, src):<DEDENT>", "body": "res = dest.write(dest.read() ^ src.read())<EOL>", "docstring": "Performs a bitwise logical OR operation on the source operand (second operand) and the destination operand\n(first operand) and stores the result in the destination operand. The source operand can be an MMX technology\nregister or a 64-bit memory location or it can be an XMM register or a 128-bit memory location. The destination\noperand can be an MMX technology register or an XMM register. Each bit of the result is set to 1 if either\nor both of the corresponding bits of the first and second operands are 1; otherwise, it is set to 0.", "id": "f16975:c2:m249"}
{"signature": "@instruction<EOL><INDENT>def SETNLE(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, Operators.AND(cpu.ZF == False, cpu.SF == cpu.OF), <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if not less or equal.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m89"}
{"signature": "def SHL(cpu, dest, src):", "body": "return cpu.SAL(dest, src)<EOL>", "docstring": "The shift logical left.\n\nThe shift arithmetic left (SAL) and shift logical left (SHL) instructions perform the same operation.\n\n:param cpu: current cpu.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m156"}
{"signature": "@instruction<EOL><INDENT>def JNGE(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, (cpu.SF != cpu.OF), target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if not greater or equal.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m134"}
{"signature": "@instruction<EOL><INDENT>def SUB(cpu, dest, src):<DEDENT>", "body": "cpu._SUB(dest, src, carry=False)<EOL>", "docstring": "Subtract.\n\nSubtracts the second operand (source operand) from the first operand\n(destination operand) and stores the result in the destination operand.\nThe destination operand can be a register or a memory location; the\nsource operand can be an immediate, register, or memory location.\n(However, two memory operands cannot be used in one instruction.) When\nan immediate value is used as an operand, it is sign-extended to the\nlength of the destination operand format.\nThe SUB instruction does not distinguish between signed or unsigned\noperands. Instead, the processor evaluates the result for both\ndata types and sets the OF and CF flags to indicate a borrow in the\nsigned or unsigned result, respectively. The SF flag indicates the sign\nof the signed result::\n\n    DEST  =  DEST - SRC;\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m40"}
{"signature": "@instruction<EOL><INDENT>def SYSENTER(cpu):<DEDENT>", "body": "raise Syscall()<EOL>", "docstring": "Calls to system\n\nExecutes a fast call to a level 0 system procedure or routine\n\n:param cpu: current CPU.", "id": "f16975:c2:m272"}
{"signature": "@instruction<EOL><INDENT>def CMOVG(cpu, dest, src):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, Operators.AND(cpu.ZF == <NUM_LIT:0>, cpu.SF == cpu.OF), src.read(), dest.read()))<EOL>", "docstring": "Conditional move - Greater.\n\nTests the status flags in the EFLAGS register and moves the source operand\n(second operand) to the destination operand (first operand) if the given\ntest condition is true.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m52"}
{"signature": "@instruction<EOL><INDENT>def JPO(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, False == cpu.PF, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if parity odd.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m144"}
{"signature": "@instruction<EOL><INDENT>def JE(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.ZF, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if equal.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m122"}
{"signature": "@instruction<EOL><INDENT>def PSHUFD(cpu, op0, op1, op3):<DEDENT>", "body": "size = op0.size<EOL>arg0 = op0.read()<EOL>arg1 = op1.read()<EOL>order = Operators.ZEXTEND(op3.read(), size)<EOL>arg0 = arg0 & <NUM_LIT><EOL>arg0 |= ((arg1 >> (((order >> <NUM_LIT:0>) & <NUM_LIT:3>) * <NUM_LIT:32>)) & <NUM_LIT>)<EOL>arg0 |= ((arg1 >> (((order >> <NUM_LIT:2>) & <NUM_LIT:3>) * <NUM_LIT:32>)) & <NUM_LIT>) << <NUM_LIT:32><EOL>arg0 |= ((arg1 >> (((order >> <NUM_LIT:4>) & <NUM_LIT:3>) * <NUM_LIT:32>)) & <NUM_LIT>) << <NUM_LIT:64><EOL>arg0 |= ((arg1 >> (((order >> <NUM_LIT:6>) & <NUM_LIT:3>) * <NUM_LIT:32>)) & <NUM_LIT>) << <NUM_LIT><EOL>op0.write(arg0)<EOL>", "docstring": "Packed shuffle doublewords.\n\nCopies doublewords from source operand (second operand) and inserts them in the destination operand\n(first operand) at locations selected with the order operand (third operand).\n\n:param cpu: current CPU.\n:param op0: destination operand.\n:param op1: source operand.\n:param op3: order operand.", "id": "f16975:c2:m202"}
{"signature": "@instruction<EOL><INDENT>def SETPO(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.PF == False, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if parity odd.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m97"}
{"signature": "@instruction<EOL><INDENT>def BTR(cpu, dest, src):<DEDENT>", "body": "if dest.type == '<STR_LIT>':<EOL><INDENT>value = dest.read()<EOL>pos = src.read() % dest.size<EOL>cpu.CF = value & (<NUM_LIT:1> << pos) == <NUM_LIT:1> << pos<EOL>dest.write(value & ~(<NUM_LIT:1> << pos))<EOL><DEDENT>elif dest.type == '<STR_LIT>':<EOL><INDENT>addr, pos = cpu._getMemoryBit(dest, src)<EOL>base, size, ty = cpu.get_descriptor(cpu.DS)<EOL>addr += base<EOL>value = cpu.read_int(addr, <NUM_LIT:8>)<EOL>cpu.CF = value & (<NUM_LIT:1> << pos) == <NUM_LIT:1> << pos<EOL>value = value & ~(<NUM_LIT:1> << pos)<EOL>cpu.write_int(addr, value, <NUM_LIT:8>)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError(f\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Bit test and reset.\n\nSelects the bit in a bit string (specified with the first operand, called\nthe bit base) at the bit-position designated by the bit offset operand\n(second operand), stores the value of the bit in the CF flag, and clears\nthe selected bit in the bit string to 0.\n\n:param cpu: current CPU.\n:param dest: bit base operand.\n:param src: bit offset operand.", "id": "f16975:c2:m167"}
{"signature": "@instruction<EOL><INDENT>def BTC(cpu, dest, src):<DEDENT>", "body": "if dest.type == '<STR_LIT>':<EOL><INDENT>value = dest.read()<EOL>pos = src.read() % dest.size<EOL>cpu.CF = value & (<NUM_LIT:1> << pos) == <NUM_LIT:1> << pos<EOL>dest.write(value ^ (<NUM_LIT:1> << pos))<EOL><DEDENT>elif dest.type == '<STR_LIT>':<EOL><INDENT>addr, pos = cpu._getMemoryBit(dest, src)<EOL>base, size, ty = cpu.get_descriptor(cpu.DS)<EOL>addr += base<EOL>value = cpu.read_int(addr, <NUM_LIT:8>)<EOL>cpu.CF = value & (<NUM_LIT:1> << pos) == <NUM_LIT:1> << pos<EOL>value = value ^ (<NUM_LIT:1> << pos)<EOL>cpu.write_int(addr, value, <NUM_LIT:8>)<EOL><DEDENT>else:<EOL><INDENT>raise NotImplementedError(f\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Bit test and complement.\n\nSelects the bit in a bit string (specified with the first operand, called\nthe bit base) at the bit-position designated by the bit offset operand\n(second operand), stores the value of the bit in the CF flag, and complements\nthe selected bit in the bit string.\n\n:param cpu: current CPU.\n:param dest: bit base operand.\n:param src: bit offset operand.", "id": "f16975:c2:m166"}
{"signature": "@instruction<EOL><INDENT>def JLE(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, Operators.OR(cpu.ZF, cpu.SF != cpu.OF), target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if less or equal.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m126"}
{"signature": "def __str__(self):", "body": "CHEADER = '<STR_LIT>'<EOL>CBLUE = '<STR_LIT>'<EOL>CGREEN = '<STR_LIT>'<EOL>CWARNING = '<STR_LIT>'<EOL>CFAIL = '<STR_LIT>'<EOL>CEND = '<STR_LIT>'<EOL>pos = <NUM_LIT:0><EOL>result = \"<STR_LIT>\"<EOL>try:<EOL><INDENT>instruction = self.instruction<EOL>result += f\"<STR_LIT>\"<EOL><DEDENT>except BaseException:<EOL><INDENT>result += \"<STR_LIT>\"<EOL><DEDENT>regs = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>for reg_name in regs:<EOL><INDENT>value = self.read_register(reg_name)<EOL>if issymbolic(value):<EOL><INDENT>result += f'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>result += f\"<STR_LIT>\"<EOL><DEDENT>pos = <NUM_LIT:0><EOL><DEDENT>pos = <NUM_LIT:0><EOL>for reg_name in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>value = self.read_register(reg_name)<EOL>if issymbolic(value):<EOL><INDENT>result += f'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>result += f'<STR_LIT>'<EOL><DEDENT>pos = <NUM_LIT:0><EOL><DEDENT>for reg_name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>base, size, ty = self.get_descriptor(self.read_register(reg_name))<EOL>result += f'<STR_LIT>'<EOL><DEDENT>for reg_name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>value = getattr(self, reg_name)<EOL>result += f'<STR_LIT>'<EOL>pos = <NUM_LIT:0><EOL><DEDENT>return result<EOL>", "docstring": "Returns a string representation of cpu state\n\n:rtype: str\n:return: a string containing the name and current value for all the registers.", "id": "f16975:c8:m1"}
{"signature": "@instruction<EOL><INDENT>def LES(cpu, dest, src):<DEDENT>", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Not implemented.", "id": "f16975:c2:m62"}
{"signature": "@instruction<EOL><INDENT>def CDQE(cpu):<DEDENT>", "body": "cpu.RAX = Operators.SEXTEND(cpu.EAX, <NUM_LIT:32>, <NUM_LIT:64>)<EOL>", "docstring": "RAX = sign-extend of EAX.", "id": "f16975:c2:m233"}
{"signature": "@instruction<EOL><INDENT>def SHRX(cpu, dest, src, count):<DEDENT>", "body": "OperandSize = dest.size<EOL>count = count.read()<EOL>countMask = {<NUM_LIT:8>: <NUM_LIT>,<EOL><NUM_LIT:16>: <NUM_LIT>,<EOL><NUM_LIT:32>: <NUM_LIT>,<EOL><NUM_LIT:64>: <NUM_LIT>}[OperandSize]<EOL>tempCount = Operators.ZEXTEND(count & countMask, dest.size)<EOL>tempDest = value = src.read()<EOL>res = dest.write(Operators.ITEBV(dest.size, tempCount == <NUM_LIT:0>, tempDest, value >> tempCount))<EOL>", "docstring": "The shift arithmetic right.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: count operand.", "id": "f16975:c2:m184"}
{"signature": "@instruction<EOL><INDENT>def PUNPCKLBW(cpu, dest, src):<DEDENT>", "body": "cpu._PUNPCKL(dest, src, <NUM_LIT:8>)<EOL>", "docstring": "Interleaves the low-order bytes of the source and destination operands.\n\nUnpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords)\nof the destination operand (first operand) and source operand (second operand) into the\ndestination operand.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m196"}
{"signature": "@instruction<EOL><INDENT>def PMAXUB(cpu, dest, src):<DEDENT>", "body": "dest_value = dest.read()<EOL>src_value = src.read()<EOL>result = <NUM_LIT:0><EOL>for pos in range(<NUM_LIT:0>, dest.size, <NUM_LIT:8>):<EOL><INDENT>itema = (dest_value >> pos) & <NUM_LIT><EOL>itemb = (src_value >> pos) & <NUM_LIT><EOL>result |= Operators.ITEBV(dest.size, itema > itemb, itema, itemb) << pos<EOL><DEDENT>dest.write(result)<EOL>", "docstring": "PMAXUB: returns maximum of packed unsigned byte integers in the dest operand\n\nPerforms a SIMD compare of the packed unsigned byte in the second source operand\nand the first source operand and returns the maximum value for each pair of\nintegers to the destination operand.\n\nExample :\n$xmm1.v16_int8 = {..., 0xf2, 0xd1}\n$xmm2.v16_int8 = {..., 0xd2, 0xf1}\n# after pmaxub xmm1, xmm2, we get\n$xmm1.v16_int8 = {..., 0xf2, 0xf1}", "id": "f16975:c2:m187"}
{"signature": "@instruction<EOL><INDENT>def SETLE(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, Operators.OR(cpu.ZF, cpu.SF != cpu.OF), <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if less or equal.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m79"}
{"signature": "@instruction<EOL><INDENT>def LAHF(cpu):<DEDENT>", "body": "used_regs = (cpu.SF, cpu.ZF, cpu.AF, cpu.PF, cpu.CF)<EOL>is_expression = any(issymbolic(x) for x in used_regs)<EOL>def make_flag(val, offset):<EOL><INDENT>if is_expression:<EOL><INDENT>return Operators.ITEBV(<NUM_LIT:8>, val,<EOL>BitVecConstant(<NUM_LIT:8>, <NUM_LIT:1> << offset),<EOL>BitVecConstant(<NUM_LIT:8>, <NUM_LIT:0>))<EOL><DEDENT>else:<EOL><INDENT>return val << offset<EOL><DEDENT><DEDENT>cpu.AH = (make_flag(cpu.SF, <NUM_LIT:7>) |<EOL>make_flag(cpu.ZF, <NUM_LIT:6>) |<EOL>make_flag(<NUM_LIT:0>, <NUM_LIT:5>) |<EOL>make_flag(cpu.AF, <NUM_LIT:4>) |<EOL>make_flag(<NUM_LIT:0>, <NUM_LIT:3>) |<EOL>make_flag(cpu.PF, <NUM_LIT:2>) |<EOL>make_flag(<NUM_LIT:1>, <NUM_LIT:1>) |<EOL>make_flag(cpu.CF, <NUM_LIT:0>))<EOL>", "docstring": "Loads status flags into AH register.\n\nMoves the low byte of the EFLAGS register (which includes status flags\nSF, ZF, AF, PF, and CF) to the AH register. Reserved bits 1, 3, and 5\nof the EFLAGS register are set in the AH register::\n\n        AH  =  EFLAGS(SF:ZF:0:AF:0:PF:1:CF);\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m60"}
{"signature": "@instruction<EOL><INDENT>def CMOVP(cpu, dest, src):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.PF, src.read(), dest.read()))<EOL>", "docstring": "Conditional move - Parity/parity even.\n\nTests the status flags in the EFLAGS register and moves the source operand\n(second operand) to the destination operand (first operand) if the given\ntest condition is true.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m50"}
{"signature": "@instruction<EOL><INDENT>def PSLLD(cpu, op0, op1):<DEDENT>", "body": "arg0 = op0.read()<EOL>arg1 = op1.read()<EOL>res = <NUM_LIT:0><EOL>for i in range(<NUM_LIT:0>, op0.size, <NUM_LIT:32>):<EOL><INDENT>res |= ((Operators.EXTRACT(arg0, i, <NUM_LIT:32>) << arg1) & <NUM_LIT>) << i<EOL><DEDENT>op0.write(res)<EOL>", "docstring": "PSLLD: Packed shift left logical with double words\n\nShifts the destination operand (first operand) to the left by the number of bytes specified\nin the count operand (second operand). The empty low-order bytes are cleared (set to all 0s).\nIf the value specified by the count operand is greater than 15, the destination operand is\nset to all 0s. The count operand is an 8-bit immediate.\n\nExample :\n$xmm1.v16_int8 = {..., 0x00000003, 0x00000001}\n# after pslld xmm1, 2, we get\n$xmm1.v16_int8 = {..., 0x0000000c, 0x00000004}", "id": "f16975:c2:m210"}
{"signature": "@instruction<EOL><INDENT>def JG(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, Operators.AND(cpu.ZF == False, cpu.SF == cpu.OF), target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if greater.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m123"}
{"signature": "@instruction<EOL><INDENT>def STD(cpu):<DEDENT>", "body": "cpu.DF = True<EOL>", "docstring": "Sets direction flag.\n\nSets the DF flag in the EFLAGS register. When the DF flag is set to 1, string operations decrement\nthe index registers (ESI and/or EDI)::\n\n    DF  =  1;\n\n:param cpu: current CPU.", "id": "f16975:c2:m171"}
{"signature": "@instruction<EOL><INDENT>def SETA(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, Operators.OR(cpu.CF, cpu.ZF) == False, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if above.\n\nSets the destination operand to 0 or 1 depending on the settings of the status flags (CF, SF, OF, ZF, and PF, 1, 0) in the\nEFLAGS register. The destination operand points to a byte register or a byte in memory. The condition code suffix\n(cc, 1, 0) indicates the condition being tested for::\n        IF condition\n        THEN\n            DEST = 1;\n        ELSE\n            DEST = 0;\n        FI;\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m70"}
{"signature": "@instruction<EOL><INDENT>def PREFETCHT0(cpu, arg):<DEDENT>", "body": "", "docstring": "Not implemented.\n\nPerforms no operation.", "id": "f16975:c2:m261"}
{"signature": "@instruction<EOL><INDENT>def XGETBV(cpu):<DEDENT>", "body": "<EOL>cpu.EAX, cpu.EDX = <NUM_LIT:7>, <NUM_LIT:0><EOL>", "docstring": "XGETBV instruction.\n\nReads the contents of the extended cont register (XCR) specified in the ECX register into registers EDX:EAX.\nImplemented only for ECX = 0.\n\n:param cpu: current CPU.", "id": "f16975:c2:m14"}
{"signature": "@instruction<EOL><INDENT>def JA(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, Operators.AND(cpu.CF == False, cpu.ZF == False), target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if above.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m114"}
{"signature": "def __init__(self, memory, *args, **kwargs):", "body": "super().__init__(AMD64RegFile(aliases={'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'}, ),<EOL>memory,<EOL>*args,<EOL>**kwargs)<EOL>", "docstring": "Builds a CPU model.\n:param memory: memory object for this CPU.", "id": "f16975:c8:m0"}
{"signature": "@instruction<EOL><INDENT>def SYSCALL(cpu):<DEDENT>", "body": "cpu.RCX = cpu.RIP<EOL>cpu.R11 = cpu.RFLAGS<EOL>raise Syscall()<EOL>", "docstring": "Calls to interrupt procedure.\n\nThe INT n instruction generates a call to the interrupt or exception handler specified\nwith the destination operand. The INT n instruction is the general mnemonic for executing\na software-generated call to an interrupt handler. The INTO instruction is a special\nmnemonic for calling overflow exception (#OF), interrupt vector number 4. The overflow\ninterrupt checks the OF flag in the EFLAGS register and calls the overflow interrupt handler\nif the OF flag is set to 1.\n\n:param cpu: current CPU.", "id": "f16975:c2:m244"}
{"signature": "@instruction<EOL><INDENT>def SHLD(cpu, dest, src, count):<DEDENT>", "body": "OperandSize = dest.size<EOL>tempCount = Operators.ZEXTEND(count.read(), OperandSize) & (OperandSize - <NUM_LIT:1>)<EOL>arg0 = dest.read()<EOL>arg1 = src.read()<EOL>MASK = ((<NUM_LIT:1> << OperandSize) - <NUM_LIT:1>)<EOL>t0 = (arg0 << tempCount)<EOL>t1 = arg1 >> (OperandSize - tempCount)<EOL>res = Operators.ITEBV(OperandSize, tempCount == <NUM_LIT:0>, arg0, t0 | t1)<EOL>res = res & MASK<EOL>dest.write(res)<EOL>if isinstance(tempCount, int) and tempCount == <NUM_LIT:0>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>SIGN_MASK = <NUM_LIT:1> << (OperandSize - <NUM_LIT:1>)<EOL>lastbit = <NUM_LIT:0> != ((arg0 << (tempCount - <NUM_LIT:1>)) & SIGN_MASK)<EOL>cpu._set_shiftd_flags(OperandSize, arg0, res, lastbit, tempCount)<EOL><DEDENT>", "docstring": "Double precision shift right.\n\nShifts the first operand (destination operand) to the left the number of bits specified by the third operand\n(count operand). The second operand (source operand) provides bits to shift in from the right (starting with\nthe least significant bit of the destination operand).\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.\n:param count: count operand", "id": "f16975:c2:m161"}
{"signature": "@instruction<EOL><INDENT>def PUSH(cpu, src):<DEDENT>", "body": "<EOL>size = src.size<EOL>v = src.read()<EOL>if size != <NUM_LIT:64> and size != cpu.address_bit_size // <NUM_LIT:2>:<EOL><INDENT>v = Operators.SEXTEND(v, size, cpu.address_bit_size)<EOL>size = cpu.address_bit_size<EOL><DEDENT>cpu.push(v, size)<EOL>", "docstring": "Pushes a value onto the stack.\n\nDecrements the stack pointer and then stores the source operand on the top of the stack.\n\n:param cpu: current CPU.\n:param src: source operand.", "id": "f16975:c2:m103"}
{"signature": "@instruction<EOL><INDENT>def MOVSX(cpu, op0, op1):<DEDENT>", "body": "op0.write(Operators.SEXTEND(op1.read(), op1.size, op0.size))<EOL>", "docstring": "Moves with sign-extension.\n\nCopies the contents of the source operand (register or memory location) to the destination\noperand (register) and sign extends the value to 16::\n\n        OP0  =  SignExtend(OP1);\n\n:param cpu: current CPU.\n:param op0: destination operand.\n:param op1: source operand.", "id": "f16975:c2:m230"}
{"signature": "def _getMemoryBit(cpu, bitbase, bitoffset):", "body": "assert bitbase.type == '<STR_LIT>'<EOL>assert bitbase.size >= bitoffset.size<EOL>addr = bitbase.address()<EOL>offt = Operators.SEXTEND(bitoffset.read(), bitoffset.size, bitbase.size)<EOL>offt_is_neg = offt >= (<NUM_LIT:1> << (bitbase.size - <NUM_LIT:1>))<EOL>offt_in_bytes = offt // <NUM_LIT:8><EOL>bitpos = offt % <NUM_LIT:8><EOL>new_addr = addr + Operators.ITEBV(bitbase.size, offt_is_neg, -offt_in_bytes, offt_in_bytes)<EOL>return (new_addr, bitpos)<EOL>", "docstring": "Calculate address and bit offset given a base address and a bit offset\n            relative to that address (in the form of asm operands)", "id": "f16975:c2:m162"}
{"signature": "@instruction<EOL><INDENT>def SETAE(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.CF == False, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if above or equal.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m71"}
{"signature": "@instruction<EOL><INDENT>def CMOVZ(cpu, dest, src):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.ZF, src.read(), dest.read()))<EOL>", "docstring": "Conditional move - Equal/zero.\n\nTests the status flags in the EFLAGS register and moves the source operand\n(second operand) to the destination operand (first operand) if the given\ntest condition is true.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m48"}
{"signature": "@instruction<EOL><INDENT>def JNB(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.CF == False, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if not below.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m129"}
{"signature": "@instruction<EOL><INDENT>def JS(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.SF, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if sign.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m145"}
{"signature": "@instruction<EOL><INDENT>def SETZ(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.ZF, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if zero.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m99"}
{"signature": "@instruction<EOL><INDENT>def NEG(cpu, dest):<DEDENT>", "body": "source = dest.read()<EOL>res = dest.write(-source)<EOL>cpu._calculate_logic_flags(dest.size, res)<EOL>cpu.CF = source != <NUM_LIT:0><EOL>cpu.AF = (res & <NUM_LIT>) != <NUM_LIT><EOL>", "docstring": "Two's complement negation.\n\nReplaces the value of operand (the destination operand) with its two's complement.\n(This operation is equivalent to subtracting the operand from 0.) The destination operand is\nlocated in a general-purpose register or a memory location::\n\n        IF DEST  =  0\n        THEN CF  =  0\n        ELSE CF  =  1;\n        FI;\n        DEST  =  - (DEST)\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m38"}
{"signature": "@instruction<EOL><INDENT>def SHRD(cpu, dest, src, count):<DEDENT>", "body": "OperandSize = dest.size<EOL>MASK = ((<NUM_LIT:1> << OperandSize) - <NUM_LIT:1>)<EOL>tempCount = Operators.ZEXTEND(count.read(), OperandSize) & (OperandSize - <NUM_LIT:1>)<EOL>if isinstance(tempCount, int) and tempCount == <NUM_LIT:0>:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>arg0 = dest.read()<EOL>arg1 = src.read()<EOL>res = Operators.ITEBV(OperandSize, tempCount == <NUM_LIT:0>, arg0, (arg0 >> tempCount) | (arg1 << (dest.size - tempCount)))<EOL>res = res & MASK<EOL>dest.write(res)<EOL>lastbit = <NUM_LIT:0> != (arg0 >> (tempCount - <NUM_LIT:1>)) & <NUM_LIT:1><EOL>cpu._set_shiftd_flags(OperandSize, arg0, res, lastbit, tempCount)<EOL><DEDENT>", "docstring": "Double precision shift right.\n\nShifts the first operand (destination operand) to the right the number of bits specified by the third operand\n(count operand). The second operand (source operand) provides bits to shift in from the left (starting with\nthe most significant bit of the destination operand).\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.\n:param count: count operand", "id": "f16975:c2:m160"}
{"signature": "@instruction<EOL><INDENT>def POPCNT(cpu, dest, src):<DEDENT>", "body": "count = <NUM_LIT:0><EOL>source = src.read()<EOL>for i in range(src.size):<EOL><INDENT>count += Operators.ITEBV(dest.size, (source >> i) & <NUM_LIT:1> == <NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:0>)<EOL><DEDENT>dest.write(count)<EOL>cpu.OF = False<EOL>cpu.SF = False<EOL>cpu.AF = False<EOL>cpu.CF = False<EOL>cpu.PF = False<EOL>cpu.ZF = source == <NUM_LIT:0><EOL>", "docstring": "This instruction calculates of number of bits set to 1 in the second\noperand (source) and returns the count in the first operand (a destination\nregister).\nCount = 0;\nFor (i=0; i < OperandSize; i++) {\n    IF (SRC[ i] = 1) // i'th bit\n        THEN Count++;\n    FI;\n}\nDEST = Count;\nFlags Affected\nOF, SF, ZF, AF, CF, PF are all cleared.\nZF is set if SRC = 0, otherwise ZF is cleared", "id": "f16975:c2:m169"}
{"signature": "@instruction<EOL><INDENT>def JNP(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, False == cpu.PF, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if not parity.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m138"}
{"signature": "@instruction<EOL><INDENT>def PADDD(cpu, op0, op1):<DEDENT>", "body": "arg0 = op0.read()<EOL>arg1 = op1.read()<EOL>res = <NUM_LIT:0><EOL>for i in range(<NUM_LIT:0>, op0.size, <NUM_LIT:32>):<EOL><INDENT>res |= ((Operators.EXTRACT(arg0, i, <NUM_LIT:32>) + Operators.EXTRACT(arg1, i, <NUM_LIT:32>)) & <NUM_LIT>) << i<EOL><DEDENT>op0.write(res)<EOL>", "docstring": "PADDD: Packed add with double words\n\nPerforms a SIMD add of the packed integers from the source operand (second operand)\nand the destination operand (first operand), and stores the packed integer results\nin the destination operand\n\nExample :\n$xmm1.v16_int8 = {..., 0x00000003, 0x00000001}\n$xmm2.v16_int8 = {..., 0x00000004, 0x00000002}\n# after paddd xmm1, xmm2, we get\n$xmm1.v16_int8 = {..., 0x00000007, 0x00000003}", "id": "f16975:c2:m208"}
{"signature": "@instruction<EOL><INDENT>def CMOVNZ(cpu, dest, src):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.ZF == False, src.read(), dest.read()))<EOL>", "docstring": "Conditional move - Not equal/not zero.\n\nTests the status flags in the EFLAGS register and moves the source operand\n(second operand) to the destination operand (first operand) if the given\ntest condition is true.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m49"}
{"signature": "@instruction<EOL><INDENT>def SETO(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.OF, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if overflow.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m94"}
{"signature": "@instruction<EOL><INDENT>def IMUL(cpu, *operands):<DEDENT>", "body": "dest = operands[<NUM_LIT:0>]<EOL>OperandSize = dest.size<EOL>reg_name_h = {<NUM_LIT:8>: '<STR_LIT>', <NUM_LIT:16>: '<STR_LIT>', <NUM_LIT:32>: '<STR_LIT>', <NUM_LIT:64>: '<STR_LIT>'}[OperandSize]<EOL>reg_name_l = {<NUM_LIT:8>: '<STR_LIT>', <NUM_LIT:16>: '<STR_LIT>', <NUM_LIT:32>: '<STR_LIT>', <NUM_LIT:64>: '<STR_LIT>'}[OperandSize]<EOL>arg0 = dest.read()<EOL>arg1 = None<EOL>arg2 = None<EOL>res = None<EOL>if len(operands) == <NUM_LIT:1>:<EOL><INDENT>arg1 = cpu.read_register(reg_name_l)<EOL>temp = (Operators.SEXTEND(arg0, OperandSize, OperandSize * <NUM_LIT:2>) *<EOL>Operators.SEXTEND(arg1, OperandSize, OperandSize * <NUM_LIT:2>))<EOL>temp = temp & ((<NUM_LIT:1> << (OperandSize * <NUM_LIT:2>)) - <NUM_LIT:1>)<EOL>cpu.write_register(reg_name_l,<EOL>Operators.EXTRACT(temp, <NUM_LIT:0>, OperandSize))<EOL>cpu.write_register(reg_name_h,<EOL>Operators.EXTRACT(temp, OperandSize, OperandSize))<EOL>res = Operators.EXTRACT(temp, <NUM_LIT:0>, OperandSize)<EOL><DEDENT>elif len(operands) == <NUM_LIT:2>:<EOL><INDENT>arg1 = operands[<NUM_LIT:1>].read()<EOL>arg1 = Operators.SEXTEND(arg1, OperandSize, OperandSize * <NUM_LIT:2>)<EOL>temp = Operators.SEXTEND(arg0, OperandSize, OperandSize * <NUM_LIT:2>) * arg1<EOL>temp = temp & ((<NUM_LIT:1> << (OperandSize * <NUM_LIT:2>)) - <NUM_LIT:1>)<EOL>res = dest.write(Operators.EXTRACT(temp, <NUM_LIT:0>, OperandSize))<EOL><DEDENT>else:<EOL><INDENT>arg1 = operands[<NUM_LIT:1>].read()<EOL>arg2 = operands[<NUM_LIT:2>].read()<EOL>temp = (Operators.SEXTEND(arg1, OperandSize, OperandSize * <NUM_LIT:2>) *<EOL>Operators.SEXTEND(arg2, operands[<NUM_LIT:2>].size, OperandSize * <NUM_LIT:2>))<EOL>temp = temp & ((<NUM_LIT:1> << (OperandSize * <NUM_LIT:2>)) - <NUM_LIT:1>)<EOL>res = dest.write(Operators.EXTRACT(temp, <NUM_LIT:0>, OperandSize))<EOL><DEDENT>cpu.CF = (Operators.SEXTEND(res, OperandSize, OperandSize * <NUM_LIT:2>) != temp)<EOL>cpu.OF = cpu.CF<EOL>", "docstring": "Signed multiply.\n\nPerforms a signed multiplication of two operands. This instruction has\nthree forms, depending on the number of operands.\n    - One-operand form. This form is identical to that used by the MUL\n    instruction. Here, the source operand (in a general-purpose\n    register or memory location) is multiplied by the value in the AL,\n    AX, or EAX register (depending on the operand size) and the product\n    is stored in the AX, DX:AX, or EDX:EAX registers, respectively.\n    - Two-operand form. With this form the destination operand (the\n    first operand) is multiplied by the source operand (second\n    operand). The destination operand is a general-purpose register and\n    the source operand is an immediate value, a general-purpose\n    register, or a memory location. The product is then stored in the\n    destination operand location.\n    - Three-operand form. This form requires a destination operand (the\n    first operand) and two source operands (the second and the third\n    operands). Here, the first source operand (which can be a\n    general-purpose register or a memory location) is multiplied by the\n    second source operand (an immediate value). The product is then\n    stored in the destination operand (a general-purpose register).\n\nWhen an immediate value is used as an operand, it is sign-extended to\nthe length of the destination operand format. The CF and OF flags are\nset when significant bits are carried into the upper half of the\nresult. The CF and OF flags are cleared when the result fits exactly in\nthe lower half of the result. The three forms of the IMUL instruction\nare similar in that the length of the product is calculated to twice\nthe length of the operands. With the one-operand form, the product is\nstored exactly in the destination. With the two- and three- operand\nforms, however, result is truncated to the length of the destination\nbefore it is stored in the destination register. Because of this\ntruncation, the CF or OF flag should be tested to ensure that no\nsignificant bits are lost. The two- and three-operand forms may also be\nused with unsigned operands because the lower half of the product is\nthe same regardless if the operands are signed or unsigned. The CF and\nOF flags, however, cannot be used to determine if the upper half of the\nresult is non-zero::\n\nIF (NumberOfOperands == 1)\nTHEN\n    IF (OperandSize == 8)\n    THEN\n        AX = AL * SRC (* Signed multiplication *)\n        IF AL == AX\n        THEN\n            CF = 0; OF = 0;\n        ELSE\n            CF = 1; OF = 1;\n        FI;\n    ELSE\n        IF OperandSize == 16\n        THEN\n            DX:AX = AX * SRC (* Signed multiplication *)\n            IF sign_extend_to_32 (AX) == DX:AX\n            THEN\n                CF = 0; OF = 0;\n            ELSE\n                CF = 1; OF = 1;\n            FI;\n        ELSE\n            IF OperandSize == 32\n            THEN\n                EDX:EAX = EAX * SRC (* Signed multiplication *)\n                IF EAX == EDX:EAX\n                THEN\n                    CF = 0; OF = 0;\n                ELSE\n                    CF = 1; OF = 1;\n                FI;\n            ELSE (* OperandSize = 64 *)\n                RDX:RAX = RAX * SRC (* Signed multiplication *)\n                IF RAX == RDX:RAX\n                THEN\n                    CF = 0; OF = 0;\n                ELSE\n                   CF = 1; OF = 1;\n                FI;\n            FI;\n        FI;\nELSE\n    IF (NumberOfOperands = 2)\n    THEN\n        temp = DEST * SRC (* Signed multiplication; temp is double DEST size *)\n        DEST = DEST * SRC (* Signed multiplication *)\n        IF temp != DEST\n        THEN\n            CF = 1; OF = 1;\n        ELSE\n            CF = 0; OF = 0;\n        FI;\n    ELSE (* NumberOfOperands = 3 *)\n        DEST = SRC1 * SRC2 (* Signed multiplication *)\n        temp = SRC1 * SRC2 (* Signed multiplication; temp is double SRC1 size *)\n        IF temp != DEST\n        THEN\n            CF = 1; OF = 1;\n        ELSE\n            CF = 0; OF = 0;\n        FI;\n    FI;\nFI;\n\n:param cpu: current CPU.\n:param operands: variable list of operands.", "id": "f16975:c2:m35"}
{"signature": "@instruction<EOL><INDENT>def BSR(cpu, dest, src):<DEDENT>", "body": "value = src.read()<EOL>flag = Operators.EXTRACT(value, src.size - <NUM_LIT:1>, <NUM_LIT:1>) == <NUM_LIT:1><EOL>res = <NUM_LIT:0><EOL>for pos in reversed(range(<NUM_LIT:0>, src.size)):<EOL><INDENT>res = Operators.ITEBV(dest.size, flag, res, pos)<EOL>flag = Operators.OR(flag, (Operators.EXTRACT(value, pos, <NUM_LIT:1>) == <NUM_LIT:1>))<EOL><DEDENT>cpu.PF = cpu._calculate_parity_flag(res)<EOL>cpu.ZF = value == <NUM_LIT:0><EOL>dest.write(Operators.ITEBV(dest.size, cpu.ZF, dest.read(), res))<EOL>", "docstring": "Bit scan reverse.\n\nSearches the source operand (second operand) for the most significant\nset bit (1 bit). If a most significant 1 bit is found, its bit index is\nstored in the destination operand (first operand). The source operand\ncan be a register or a memory location; the destination operand is a register.\nThe bit index is an unsigned offset from bit 0 of the source operand.\nIf the contents source operand are 0, the contents of the destination\noperand is undefined::\n\n        IF SRC  =  0\n        THEN\n            ZF  =  1;\n            DEST is undefined;\n        ELSE\n            ZF  =  0;\n            temp  =  OperandSize - 1;\n            WHILE Bit(SRC, temp)  =  0\n            DO\n                temp  =  temp - 1;\n                DEST  =  temp;\n            OD;\n        FI;\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m164"}
{"signature": "@instruction<EOL><INDENT>def PREFETCHT2(cpu, arg):<DEDENT>", "body": "", "docstring": "Not implemented.\n\nPerforms no operation.", "id": "f16975:c2:m263"}
{"signature": "@instruction<EOL><INDENT>def JAE(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.CF == False, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if above or equal.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m115"}
{"signature": "@instruction<EOL><INDENT>def PSUBB(cpu, dest, src):<DEDENT>", "body": "result = []<EOL>value_a = dest.read()<EOL>value_b = src.read()<EOL>for i in reversed(range(<NUM_LIT:0>, dest.size, <NUM_LIT:8>)):<EOL><INDENT>a = Operators.EXTRACT(value_a, i, <NUM_LIT:8>)<EOL>b = Operators.EXTRACT(value_b, i, <NUM_LIT:8>)<EOL>result.append((a - b) & <NUM_LIT>)<EOL><DEDENT>dest.write(Operators.CONCAT(<NUM_LIT:8> * len(result), *result))<EOL>", "docstring": "Packed subtract.\n\nPerforms a SIMD subtract of the packed integers of the source operand (second operand) from the packed\nintegers of the destination operand (first operand), and stores the packed integer results in the\ndestination operand. The source operand can be an MMX(TM) technology register or a 64-bit memory location,\nor it can be an XMM register or a 128-bit memory location. The destination operand can be an MMX or an XMM\nregister.\nThe PSUBB instruction subtracts packed byte integers. When an individual result is too large or too small\nto be represented in a byte, the result is wrapped around and the low 8 bits are written to the\ndestination element.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m247"}
{"signature": "@instruction<EOL><INDENT>def MOVHPD(cpu, dest, src):<DEDENT>", "body": "if src.size == <NUM_LIT>:<EOL><INDENT>assert dest.size == <NUM_LIT:64><EOL>dest.write(Operators.EXTRACT(src.read(), <NUM_LIT:64>, <NUM_LIT:64>))<EOL><DEDENT>else:<EOL><INDENT>assert src.size == <NUM_LIT:64> and dest.size == <NUM_LIT><EOL>value = Operators.EXTRACT(dest.read(), <NUM_LIT:0>, <NUM_LIT:64>)  <EOL>dest.write(Operators.CONCAT(<NUM_LIT>, src.read(), value))<EOL><DEDENT>", "docstring": "Moves high packed double-precision floating-point value.\n\nMoves a double-precision floating-point value from the source operand (second operand) and the\ndestination operand (first operand). The source and destination operands can be an XMM register\nor a 64-bit memory location. This instruction allows double-precision floating-point values to be moved\nto and from the high quadword of an XMM register and memory. It cannot be used for register to\nregister or memory to memory moves. When the destination operand is an XMM register, the low quadword\nof the register remains unchanged.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m246"}
{"signature": "def LOOPNZ(cpu, target):", "body": "counter_name = {<NUM_LIT:16>: '<STR_LIT>', <NUM_LIT:32>: '<STR_LIT>', <NUM_LIT:64>: '<STR_LIT>'}[cpu.address_bit_size]<EOL>counter = cpu.write_register(counter_name, cpu.read_register(counter_name) - <NUM_LIT:1>)<EOL>cpu.PC = Operators.ITEBV(cpu.address_bit_size, counter != <NUM_LIT:0>, (cpu.PC + target.read()) & ((<NUM_LIT:1> << target.size) - <NUM_LIT:1>), cpu.PC + cpu.instruction.size)<EOL>", "docstring": "Loops if ECX counter is nonzero.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m150"}
{"signature": "@instruction<EOL><INDENT>def AAM(cpu, imm=None):<DEDENT>", "body": "if imm is None:<EOL><INDENT>imm = <NUM_LIT:10><EOL><DEDENT>else:<EOL><INDENT>imm = imm.read()<EOL><DEDENT>cpu.AH = Operators.UDIV(cpu.AL, imm)<EOL>cpu.AL = Operators.UREM(cpu.AL, imm)<EOL>cpu._calculate_logic_flags(<NUM_LIT:8>, cpu.AL)<EOL>", "docstring": "ASCII adjust AX after multiply.\n\nAdjusts the result of the multiplication of two unpacked BCD values\nto create a pair of unpacked (base 10) BCD values. The AX register is\nthe implied source and destination operand for this instruction. The AAM\ninstruction is only useful when it follows a MUL instruction that multiplies\n(binary multiplication) two unpacked BCD values and stores a word result\nin the AX register. The AAM instruction then adjusts the contents of the\nAX register to contain the correct 2-digit unpacked (base 10) BCD result.\n\nThe SF, ZF, and PF flags are set according to the resulting binary value in the AL register.\n\nThis instruction executes as described in compatibility mode and legacy mode.\nIt is not valid in 64-bit mode.::\n\n        tempAL  =  AL;\n        AH  =  tempAL / 10;\n        AL  =  tempAL MOD 10;\n\n:param cpu: current CPU.", "id": "f16975:c2:m22"}
{"signature": "@instruction<EOL><INDENT>def SETNBE(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, Operators.AND(cpu.CF == False, cpu.ZF == False), <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if not below or equal.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m83"}
{"signature": "@instruction<EOL><INDENT>def SETGE(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.SF == cpu.OF, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if greater or equal.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m77"}
{"signature": "@instruction<EOL><INDENT>def JNL(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, (cpu.SF == cpu.OF), target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if not less.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m135"}
{"signature": "def LOOP(cpu, dest):", "body": "counter_name = {<NUM_LIT:16>: '<STR_LIT>', <NUM_LIT:32>: '<STR_LIT>', <NUM_LIT:64>: '<STR_LIT>'}[cpu.address_bit_size]<EOL>counter = cpu.write_register(counter_name, cpu.read_register(counter_name) - <NUM_LIT:1>)<EOL>cpu.PC = Operators.ITEBV(cpu.address_bit_size, counter == <NUM_LIT:0>, (cpu.PC + dest.read()) & ((<NUM_LIT:1> << dest.size) - <NUM_LIT:1>), cpu.PC + cpu.instruction.size)<EOL>", "docstring": "Loops according to ECX counter.\n\nPerforms a loop operation using the ECX or CX register as a counter.\nEach time the LOOP instruction is executed, the count register is decremented,\nthen checked for 0. If the count is 0, the loop is terminated and program\nexecution continues with the instruction following the LOOP instruction.\nIf the count is not zero, a near jump is performed to the destination\n(target) operand, which is presumably the instruction at the beginning\nof the loop. If the address-size attribute is 32 bits, the ECX register\nis used as the count register; otherwise the CX register is used::\n\n        IF address_bit_size  =  32\n        THEN\n            Count is ECX;\n        ELSE (* address_bit_size  =  16 *)\n            Count is CX;\n        FI;\n        Count  =  Count - 1;\n\n        IF (Count  0)  =  1\n        THEN\n            EIP  =  EIP + SignExtend(DEST);\n            IF OperandSize  =  16\n            THEN\n                EIP  =  EIP AND 0000FFFFH;\n            FI;\n        ELSE\n            Terminate loop and continue program execution at EIP;\n        FI;\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m149"}
{"signature": "@instruction<EOL><INDENT>def JZ(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.ZF, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if zero.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m146"}
{"signature": "@instruction<EOL><INDENT>def SETB(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.CF, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if below.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m72"}
{"signature": "@instruction<EOL><INDENT>def SETNB(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.CF == False, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if not below.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m82"}
{"signature": "@instruction<EOL><INDENT>def INT(cpu, op0):<DEDENT>", "body": "if op0.read() != <NUM_LIT>:<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL><DEDENT>raise Interruption(op0.read())<EOL>", "docstring": "Calls to interrupt procedure.\n\nThe INT n instruction generates a call to the interrupt or exception handler specified\nwith the destination operand. The INT n instruction is the  general mnemonic for executing\na software-generated call to an interrupt handler. The INTO instruction is a special\nmnemonic for calling overflow exception (#OF), interrupt vector number 4. The overflow\ninterrupt checks the OF flag in the EFLAGS register and calls the overflow interrupt handler\nif the OF flag is set to 1.\n\n:param cpu: current CPU.\n:param op0: destination operand.", "id": "f16975:c2:m110"}
{"signature": "@instruction<EOL><INDENT>def INC(cpu, dest):<DEDENT>", "body": "arg0 = dest.read()<EOL>res = dest.write(arg0 + <NUM_LIT:1>)<EOL>res &= (<NUM_LIT:1> << dest.size) - <NUM_LIT:1><EOL>SIGN_MASK = <NUM_LIT:1> << (dest.size - <NUM_LIT:1>)<EOL>cpu.AF = ((arg0 ^ <NUM_LIT:1>) ^ res) & <NUM_LIT> != <NUM_LIT:0><EOL>cpu.ZF = res == <NUM_LIT:0><EOL>cpu.SF = (res & SIGN_MASK) != <NUM_LIT:0><EOL>cpu.OF = res == SIGN_MASK<EOL>cpu.PF = cpu._calculate_parity_flag(res)<EOL>", "docstring": "Increments by 1.\n\nAdds 1 to the destination operand, while preserving the state of the\nCF flag. The destination operand can be a register or a memory location.\nThis instruction allows a loop counter to be updated without disturbing\nthe CF flag. (Use a ADD instruction with an immediate operand of 1 to\nperform an increment operation that does updates the CF flag.)::\n\n        DEST  =  DEST +1;\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m36"}
{"signature": "@instruction<EOL><INDENT>def ROR(cpu, dest, src):<DEDENT>", "body": "OperandSize = dest.size<EOL>count = src.read()<EOL>countMask = {<NUM_LIT:8>: <NUM_LIT>,<EOL><NUM_LIT:16>: <NUM_LIT>,<EOL><NUM_LIT:32>: <NUM_LIT>,<EOL><NUM_LIT:64>: <NUM_LIT>}[OperandSize]<EOL>tempCount = Operators.ZEXTEND((count & countMask) % (OperandSize), OperandSize)<EOL>value = dest.read()<EOL>newValue = (value >> tempCount) | (value << (OperandSize - tempCount))<EOL>dest.write(newValue)<EOL>cpu.CF = Operators.ITE(tempCount != <NUM_LIT:0>, ((newValue >> (OperandSize - <NUM_LIT:1>)) & <NUM_LIT>) == <NUM_LIT:1>, cpu.CF)<EOL>s_MSB = ((newValue >> (OperandSize - <NUM_LIT:1>)) & <NUM_LIT>) == <NUM_LIT:1><EOL>s_MSB2 = ((newValue >> (OperandSize - <NUM_LIT:2>)) & <NUM_LIT>) == <NUM_LIT:1><EOL>cpu.OF = Operators.ITE(tempCount == <NUM_LIT:1>, s_MSB ^ s_MSB2, cpu.OF)<EOL>", "docstring": "Rotates right (ROR).\n\nShifts (rotates) the bits of the first operand (destination operand) the number of bit positions specified in the\nsecond operand (count operand) and stores the result in the destination operand. The destination operand can be\na register or a memory location; the count operand is an unsigned integer that can be an immediate or a value in\nthe CL register. In legacy and compatibility mode, the processor restricts the count to a number between 0 and 31\nby masking all the bits in the count operand except the 5 least-significant bits.\n\nThe rotate right (ROR) instruction shift all the bits toward less significant bit positions, except\nfor the least-significant bit, which is rotated to the most-significant bit location.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: count operand.", "id": "f16975:c2:m154"}
{"signature": "@instruction<EOL><INDENT>def JRCXZ(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.RCX == <NUM_LIT:0>, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if RCX register is 0.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m121"}
{"signature": "@instruction<EOL><INDENT>def VMOVDQU(cpu, dest, src):<DEDENT>", "body": "<EOL>dest.write(src.read())<EOL>", "docstring": "Move Unaligned Double Quadword\n\nMoves 128 bits of packed integer values from the source operand (second operand)\nto the destination operand (first operand). This instruction can be used to load\nan XMM register from a 128-bit memory location, to store the contents of an XMM\nregister into a 128-bit memory location, or to move data between two XMM registers.\nWhen the source or destination operand is a memory operand, the operand may be\nunaligned on a 16-byte boundary without causing a general-protection exception\n(#GP) to be generated.\n\n    VMOVDQU (VEX.128 encoded version)\n    DEST[127:0] <- SRC[127:0]\n    DEST[VLMAX-1:128] <- 0\n    VMOVDQU (VEX.256 encoded version)\n    DEST[255:0] <- SRC[255:0]", "id": "f16975:c2:m259"}
{"signature": "@instruction<EOL><INDENT>def SETPE(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.PF, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if parity even.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m96"}
{"signature": "@instruction<EOL><INDENT>def CQO(cpu):<DEDENT>", "body": "res = Operators.SEXTEND(cpu.RAX, <NUM_LIT:64>, <NUM_LIT>)<EOL>cpu.RAX = Operators.EXTRACT(res, <NUM_LIT:0>, <NUM_LIT:64>)<EOL>cpu.RDX = Operators.EXTRACT(res, <NUM_LIT:64>, <NUM_LIT:64>)<EOL>", "docstring": "RDX:RAX = sign-extend of RAX.", "id": "f16975:c2:m232"}
{"signature": "@instruction<EOL><INDENT>def POP(cpu, dest):<DEDENT>", "body": "dest.write(cpu.pop(dest.size))<EOL>", "docstring": "Pops a value from the stack.\n\nLoads the value from the top of the stack to the location specified\nwith the destination operand and then increments the stack pointer.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m102"}
{"signature": "@instruction<EOL><INDENT>def SETBE(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, Operators.OR(cpu.CF, cpu.ZF), <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if below or equal.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m73"}
{"signature": "@instruction<EOL><INDENT>def XCHG(cpu, dest, src):<DEDENT>", "body": "temp = dest.read()<EOL>dest.write(src.read())<EOL>src.write(temp)<EOL>", "docstring": "Exchanges register/memory with register.\n\nExchanges the contents of the destination (first) and source (second)\noperands. The operands can be two general-purpose registers or a register\nand a memory location. If a memory operand is referenced, the processor's\nlocking protocol is automatically implemented for the duration of the\nexchange operation, regardless of the presence or absence of the LOCK\nprefix or of the value of the IOPL.\nThis instruction is useful for implementing semaphores or similar data\nstructures for process synchronization.\nThe XCHG instruction can also be used instead of the BSWAP instruction\nfor 16-bit operands::\n\n        TEMP  =  DEST\n        DEST  =  SRC\n        SRC  =  TEMP\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m100"}
{"signature": "@instruction<EOL><INDENT>def CMP(cpu, src1, src2):<DEDENT>", "body": "arg0 = src1.read()<EOL>arg1 = Operators.SEXTEND(src2.read(), src2.size, src1.size)<EOL>cpu._calculate_CMP_flags(src1.size, arg0 - arg1, arg0, arg1)<EOL>", "docstring": "Compares two operands.\n\nCompares the first source operand with the second source operand and sets the status flags\nin the EFLAGS register according to the results. The comparison is performed by subtracting\nthe second operand from the first operand and then setting the status flags in the same manner\nas the SUB instruction. When an immediate value is used as an operand, it is sign-extended to\nthe length of the first operand::\n\n        temp  =  SRC1 - SignExtend(SRC2);\n        ModifyStatusFlags; (* Modify status flags in the same manner as the SUB instruction*)\n\nThe CF, OF, SF, ZF, AF, and PF flags are set according to the result.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m27"}
{"signature": "@instruction<EOL><INDENT>def PUSHF(cpu):<DEDENT>", "body": "cpu.push(cpu.EFLAGS, <NUM_LIT:16>)<EOL>", "docstring": "Pushes FLAGS register onto the stack.\n\n:param cpu: current CPU.", "id": "f16975:c2:m107"}
{"signature": "@instruction<EOL><INDENT>def PREFETCHT1(cpu, arg):<DEDENT>", "body": "", "docstring": "Not implemented.\n\nPerforms no operation.", "id": "f16975:c2:m262"}
{"signature": "def __init__(self, regfile, memory, *args, **kwargs):", "body": "super().__init__(regfile, memory, *args, **kwargs)<EOL>self._segments = {}<EOL>", "docstring": "Builds a CPU model.\n:param regfile: regfile object for this CPU.\n:param memory: memory object for this CPU.", "id": "f16975:c2:m0"}
{"signature": "@instruction<EOL><INDENT>def CMPXCHG8B(cpu, dest):<DEDENT>", "body": "size = dest.size<EOL>cmp_reg_name_l = {<NUM_LIT:64>: '<STR_LIT>', <NUM_LIT>: '<STR_LIT>'}[size]<EOL>cmp_reg_name_h = {<NUM_LIT:64>: '<STR_LIT>', <NUM_LIT>: '<STR_LIT>'}[size]<EOL>src_reg_name_l = {<NUM_LIT:64>: '<STR_LIT>', <NUM_LIT>: '<STR_LIT>'}[size]<EOL>src_reg_name_h = {<NUM_LIT:64>: '<STR_LIT>', <NUM_LIT>: '<STR_LIT>'}[size]<EOL>cmph = cpu.read_register(cmp_reg_name_h)<EOL>cmpl = cpu.read_register(cmp_reg_name_l)<EOL>srch = cpu.read_register(src_reg_name_h)<EOL>srcl = cpu.read_register(src_reg_name_l)<EOL>cmp0 = Operators.CONCAT(size, cmph, cmpl)<EOL>src0 = Operators.CONCAT(size, srch, srcl)<EOL>arg_dest = dest.read()<EOL>cpu.ZF = arg_dest == cmp0<EOL>dest.write(<EOL>Operators.ITEBV(size, cpu.ZF,<EOL>Operators.CONCAT(size, srch, srcl),<EOL>arg_dest)<EOL>)<EOL>cpu.write_register(cmp_reg_name_l, Operators.ITEBV(size // <NUM_LIT:2>, cpu.ZF, cmpl,<EOL>Operators.EXTRACT(arg_dest, <NUM_LIT:0>, size // <NUM_LIT:2>)))<EOL>cpu.write_register(cmp_reg_name_h, Operators.ITEBV(size // <NUM_LIT:2>, cpu.ZF, cmph,<EOL>Operators.EXTRACT(arg_dest, size // <NUM_LIT:2>, size // <NUM_LIT:2>)))<EOL>", "docstring": "Compares and exchanges bytes.\n\nCompares the 64-bit value in EDX:EAX (or 128-bit value in RDX:RAX if\noperand size is 128 bits) with the operand (destination operand). If\nthe values are equal, the 64-bit value in ECX:EBX (or 128-bit value in\nRCX:RBX) is stored in the destination operand.  Otherwise, the value in\nthe destination operand is loaded into EDX:EAX (or RDX:RAX)::\n\n        IF (64-Bit Mode and OperandSize = 64)\n        THEN\n            IF (RDX:RAX = DEST)\n            THEN\n                ZF = 1;\n                DEST = RCX:RBX;\n            ELSE\n                ZF = 0;\n                RDX:RAX = DEST;\n            FI\n        ELSE\n            IF (EDX:EAX = DEST)\n            THEN\n                ZF = 1;\n                DEST = ECX:EBX;\n            ELSE\n                ZF = 0;\n                EDX:EAX = DEST;\n            FI;\n        FI;\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m29"}
{"signature": "def __str__(self):", "body": "CHEADER = '<STR_LIT>'<EOL>CBLUE = '<STR_LIT>'<EOL>CGREEN = '<STR_LIT>'<EOL>CWARNING = '<STR_LIT>'<EOL>CFAIL = '<STR_LIT>'<EOL>CEND = '<STR_LIT>'<EOL>pos = <NUM_LIT:0><EOL>result = \"<STR_LIT>\"<EOL>try:<EOL><INDENT>instruction = self.instruction<EOL>result += f\"<STR_LIT>\"<EOL><DEDENT>except BaseException:<EOL><INDENT>result += \"<STR_LIT>\"<EOL><DEDENT>regs = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>for reg_name in regs:<EOL><INDENT>value = self.read_register(reg_name)<EOL>if issymbolic(value):<EOL><INDENT>result += f'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>result += f'<STR_LIT>'<EOL><DEDENT>pos = <NUM_LIT:0><EOL><DEDENT>pos = <NUM_LIT:0><EOL>for reg_name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>value = self.read_register(reg_name)<EOL>if issymbolic(value):<EOL><INDENT>result += f'<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>result += f'<STR_LIT>'<EOL><DEDENT>pos = <NUM_LIT:0><EOL><DEDENT>for reg_name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>base, size, ty = self.get_descriptor(self.read_register(reg_name))<EOL>result += f'<STR_LIT>'<EOL><DEDENT>for reg_name in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>value = getattr(self, reg_name)<EOL>result += f'<STR_LIT>'<EOL>pos = <NUM_LIT:0><EOL><DEDENT>return result<EOL>", "docstring": "Returns a string representation of cpu state\n\n:rtype: str\n:return: a string containing the name and current value for all the registers.", "id": "f16975:c9:m1"}
{"signature": "@instruction<EOL><INDENT>def JECXZ(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.ECX == <NUM_LIT:0>, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if ECX register is 0.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m120"}
{"signature": "@instruction<EOL><INDENT>def SAL(cpu, dest, src):<DEDENT>", "body": "OperandSize = dest.size<EOL>count = src.read()<EOL>countMask = {<NUM_LIT:8>: <NUM_LIT>,<EOL><NUM_LIT:16>: <NUM_LIT>,<EOL><NUM_LIT:32>: <NUM_LIT>,<EOL><NUM_LIT:64>: <NUM_LIT>}[OperandSize]<EOL>tempCount = Operators.ZEXTEND(count & countMask, dest.size)<EOL>tempDest = value = dest.read()<EOL>res = dest.write(Operators.ITEBV(dest.size, tempCount == <NUM_LIT:0>, tempDest, value << tempCount))<EOL>MASK = (<NUM_LIT:1> << OperandSize) - <NUM_LIT:1><EOL>SIGN_MASK = <NUM_LIT:1> << (OperandSize - <NUM_LIT:1>)<EOL>cpu.CF = Operators.OR(Operators.AND(tempCount == <NUM_LIT:0>, cpu.CF), Operators.AND(tempCount != <NUM_LIT:0>, (tempDest & (<NUM_LIT:1> << (OperandSize - tempCount)) != <NUM_LIT:0>)))<EOL>cpu.OF = Operators.ITE(tempCount != <NUM_LIT:0>, (cpu.CF) ^ (((res >> (OperandSize - <NUM_LIT:1>)) & <NUM_LIT>) == <NUM_LIT:1>), cpu.OF)<EOL>cpu.SF = Operators.OR(Operators.AND(tempCount == <NUM_LIT:0>, cpu.SF), Operators.AND(tempCount != <NUM_LIT:0>, (res & SIGN_MASK) != <NUM_LIT:0>))<EOL>cpu.ZF = Operators.OR(Operators.AND(tempCount == <NUM_LIT:0>, cpu.ZF), Operators.AND(tempCount != <NUM_LIT:0>, res == <NUM_LIT:0>))<EOL>cpu.PF = Operators.OR(Operators.AND(tempCount == <NUM_LIT:0>, cpu.PF), Operators.AND(tempCount != <NUM_LIT:0>, cpu._calculate_parity_flag(res)))<EOL>", "docstring": "The shift arithmetic left.\n\nShifts the bits in the first operand (destination operand) to the left or right by the number of bits specified in the\nsecond operand (count operand). Bits shifted beyond the destination operand boundary are first shifted into the CF\nflag, then discarded. At the end of the shift operation, the CF flag contains the last bit shifted out of the destination\noperand.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: count operand.", "id": "f16975:c2:m155"}
{"signature": "@instruction<EOL><INDENT>def JNE(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, False == cpu.ZF, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if not equal.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m132"}
{"signature": "@instruction<EOL><INDENT>def PSRLQ(cpu, dest, src):<DEDENT>", "body": "count = src.read()<EOL>count = Operators.ITEBV(src.size, Operators.UGT(count, <NUM_LIT>), <NUM_LIT:64>, count)<EOL>count = Operators.EXTRACT(count, <NUM_LIT:0>, <NUM_LIT:64>)<EOL>if dest.size == <NUM_LIT:64>:<EOL><INDENT>dest.write(dest.read() >> count)<EOL><DEDENT>else:<EOL><INDENT>hi = Operators.EXTRACT(dest.read(), <NUM_LIT:64>, <NUM_LIT:64>) >> count<EOL>low = Operators.EXTRACT(dest.read(), <NUM_LIT:0>, <NUM_LIT:64>) >> count<EOL>dest.write(Operators.CONCAT(<NUM_LIT>, hi, low))<EOL><DEDENT>", "docstring": "Shift Packed Data Right Logical\n\n        Shifts the bits in the individual quadword in the destination operand to the right by\n        the number of bits specified in the count operand . As the bits in the data elements\n        are shifted right, the empty high-order bits are cleared (set to 0). If the value\n        specified by the count operand is greater than  63, then the destination operand is set\n        to all 0s.\n\n        if(OperandSize == 64) {\n                        //PSRLQ instruction with 64-bit operand:\n                        if(Count > 63) Destination[64..0] = 0;\n                        else Destination = ZeroExtend(Destination >> Count);\n                }\n                else {\n                        //PSRLQ instruction with 128-bit operand:\n                        if(Count > 15) Destination[128..0] = 0;\n                        else {\n                                Destination[0..63] = ZeroExtend(Destination[0..63] >> Count);\n                                Destination[64..127] = ZeroExtend(Destination[64..127] >> Count);\n                        }\n                }", "id": "f16975:c2:m269"}
{"signature": "@instruction<EOL><INDENT>def JNBE(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, Operators.AND(cpu.CF == False, cpu.ZF == False), target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if not below or equal.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m130"}
{"signature": "@instruction<EOL><INDENT>def PSRLDQ(cpu, dest, src):<DEDENT>", "body": "<EOL>temp = Operators.EXTRACT(src.read(), <NUM_LIT:0>, <NUM_LIT:8>)<EOL>temp = Operators.ITEBV(src.size, temp > <NUM_LIT:15>, <NUM_LIT:16>, temp)<EOL>dest.write(dest.read() >> (temp * <NUM_LIT:8>))<EOL>", "docstring": "Packed shift right logical double quadword.\n\nShifts the destination operand (first operand) to the right by the number\nof bytes specified in the count operand (second operand). The empty high-order\nbytes are cleared (set to all 0s). If the value specified by the count\noperand is greater than 15, the destination operand is set to all 0s.\nThe destination operand is an XMM register. The count operand is an 8-bit\nimmediate::\n\n    TEMP  =  SRC;\n    if (TEMP > 15) TEMP  =  16;\n    DEST  =  DEST >> (temp * 8);\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: count operand.", "id": "f16975:c2:m226"}
{"signature": "@instruction<EOL><INDENT>def NOT(cpu, dest):<DEDENT>", "body": "res = dest.write(~dest.read())<EOL>", "docstring": "One's complement negation.\n\nPerforms a bitwise NOT operation (each 1 is cleared to 0, and each 0\nis set to 1) on the destination operand and stores the result in the destination\noperand location::\n\n    DEST  =  NOT DEST;\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m17"}
{"signature": "@instruction<EOL><INDENT>def SBB(cpu, dest, src):<DEDENT>", "body": "cpu._SUB(dest, src, carry=True)<EOL>", "docstring": "Integer subtraction with borrow.\n\nAdds the source operand (second operand) and the carry (CF) flag, and\nsubtracts the result from the destination operand (first operand). The\nresult of the subtraction is stored in the destination operand. The\ndestination operand can be a register or a memory location; the source\noperand can be an immediate, a register, or a memory location.\n(However, two memory operands cannot be used in one instruction.) The\nstate of the CF flag represents a borrow from a previous subtraction.\nWhen an immediate value is used as an operand, it is sign-extended to\nthe length of the destination operand format.\nThe SBB instruction does not distinguish between signed or unsigned\noperands. Instead, the processor evaluates the result for both data\ntypes and sets the OF and CF flags to indicate a borrow in the signed\nor unsigned result, respectively. The SF flag indicates the sign of the\nsigned result.  The SBB instruction is usually executed as part of a\nmultibyte or multiword subtraction in which a SUB instruction is\nfollowed by a SBB instruction::\n\n        DEST  =  DEST - (SRC + CF);\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m39"}
{"signature": "@instruction<EOL><INDENT>def SETS(cpu, dest):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.SF, <NUM_LIT:1>, <NUM_LIT:0>))<EOL>", "docstring": "Sets byte if sign.\n\n:param cpu: current CPU.\n:param dest: destination operand.", "id": "f16975:c2:m98"}
{"signature": "@instruction<EOL><INDENT>def PXOR(cpu, dest, src):<DEDENT>", "body": "res = dest.write(dest.read() ^ src.read())<EOL>", "docstring": "Logical exclusive OR.\n\nPerforms a bitwise logical exclusive-OR (XOR) operation on the quadword\nsource (second) and destination (first) operands and stores the result\nin the destination operand location. The source operand can be an MMX(TM)\ntechnology register or a quadword memory location; the destination operand\nmust be an MMX register. Each bit of the result is 1 if the corresponding\nbits of the two operands are different; each bit is 0 if the corresponding\nbits of the operands are the same::\n\n    DEST  =  DEST XOR SRC;\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: quadword source operand.", "id": "f16975:c2:m189"}
{"signature": "@instruction<EOL><INDENT>def JO(cpu, target):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.OF, target.read(), cpu.PC)<EOL>", "docstring": "Jumps short if overflow.\n\n:param cpu: current CPU.\n:param target: destination operand.", "id": "f16975:c2:m141"}
{"signature": "@instruction<EOL><INDENT>def SHR(cpu, dest, src):<DEDENT>", "body": "OperandSize = dest.size<EOL>count = Operators.ZEXTEND(src.read() & (OperandSize - <NUM_LIT:1>), OperandSize)<EOL>value = dest.read()<EOL>res = dest.write(value >> count)  <EOL>MASK = (<NUM_LIT:1> << OperandSize) - <NUM_LIT:1><EOL>SIGN_MASK = <NUM_LIT:1> << (OperandSize - <NUM_LIT:1>)<EOL>if issymbolic(count):<EOL><INDENT>cpu.CF = Operators.ITE(count != <NUM_LIT:0>,<EOL>((value >> Operators.ZEXTEND(count - <NUM_LIT:1>, OperandSize)) & <NUM_LIT:1>) != <NUM_LIT:0>,<EOL>cpu.CF)<EOL><DEDENT>else:<EOL><INDENT>if count != <NUM_LIT:0>:<EOL><INDENT>cpu.CF = Operators.EXTRACT(value, count - <NUM_LIT:1>, <NUM_LIT:1>) != <NUM_LIT:0><EOL><DEDENT><DEDENT>cpu.ZF = Operators.ITE(count != <NUM_LIT:0>, res == <NUM_LIT:0>, cpu.ZF)<EOL>cpu.SF = Operators.ITE(count != <NUM_LIT:0>, (res & SIGN_MASK) != <NUM_LIT:0>, cpu.SF)<EOL>cpu.OF = Operators.ITE(count != <NUM_LIT:0>, ((value >> (OperandSize - <NUM_LIT:1>)) & <NUM_LIT>) == <NUM_LIT:1>, cpu.OF)<EOL>cpu.PF = Operators.ITE(count != <NUM_LIT:0>, cpu._calculate_parity_flag(res), cpu.PF)<EOL>", "docstring": "Shift logical right.\n\nThe shift arithmetic right (SAR) and shift logical right (SHR)\ninstructions shift the bits of the destination operand to the right\n(toward less significant bit locations). For each shift count, the\nleast significant bit of the destination operand is shifted into the CF\nflag, and the most significant bit is either set or cleared depending\non the instruction type. The SHR instruction clears the most\nsignificant bit.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: count operand.", "id": "f16975:c2:m158"}
{"signature": "@instruction<EOL><INDENT>def ADC(cpu, dest, src):<DEDENT>", "body": "cpu._ADD(dest, src, carry=True)<EOL>", "docstring": "Adds with carry.\n\nAdds the destination operand (first operand), the source operand (second operand),\nand the carry (CF) flag and stores the result in the destination operand. The state\nof the CF flag represents a carry from a previous addition. When an immediate value\nis used as an operand, it is sign-extended to the length of the destination operand\nformat. The ADC instruction does not distinguish between signed or unsigned operands.\nInstead, the processor evaluates the result for both data types and sets the OF and CF\nflags to indicate a carry in the signed or unsigned result, respectively. The SF flag\nindicates the sign of the signed result. The ADC instruction is usually executed as\npart of a multibyte or multiword addition in which an ADD instruction is followed by an\nADC instruction::\n\n        DEST  =  DEST + SRC + CF;\n\nThe OF, SF, ZF, AF, CF, and PF flags are set according to the result.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m24"}
{"signature": "@instruction<EOL><INDENT>def CMOVNP(cpu, dest, src):<DEDENT>", "body": "dest.write(Operators.ITEBV(dest.size, cpu.PF == False, src.read(), dest.read()))<EOL>", "docstring": "Conditional move - Not parity/parity odd.\n\nTests the status flags in the EFLAGS register and moves the source operand\n(second operand) to the destination operand (first operand) if the given\ntest condition is true.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m51"}
{"signature": "@instruction<EOL><INDENT>def PALIGNR(cpu, dest, src, offset):<DEDENT>", "body": "dest.write(<EOL>Operators.EXTRACT(<EOL>Operators.CONCAT(dest.size * <NUM_LIT:2>, dest.read(), src.read()),<EOL>offset.read() * <NUM_LIT:8>,<EOL>dest.size))<EOL>", "docstring": "ALIGNR concatenates the destination operand (the first operand) and the source\n            operand (the second operand) into an intermediate composite, shifts the composite\n            at byte granularity to the right by a constant immediate, and extracts the right-\n            aligned result into the destination.", "id": "f16975:c2:m267"}
{"signature": "@instruction<EOL><INDENT>def LDS(cpu, dest, src):<DEDENT>", "body": "raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Not implemented.", "id": "f16975:c2:m61"}
{"signature": "@instruction<EOL><INDENT>def PUNPCKLQDQ(cpu, dest, src):<DEDENT>", "body": "cpu._PUNPCKL(dest, src, <NUM_LIT:64>)<EOL>", "docstring": "Interleaves the low-order quad-words of the source and destination operands.\n\nUnpacks and interleaves the low-order data elements (bytes, words, doublewords, and quadwords)\nof the destination operand (first operand) and source operand (second operand) into the\ndestination operand.\n\n:param cpu: current CPU.\n:param dest: destination operand.\n:param src: source operand.", "id": "f16975:c2:m198"}
{"signature": "@instruction<EOL><INDENT>def CLD(cpu):<DEDENT>", "body": "cpu.DF = False<EOL>", "docstring": "Clears direction flag.\nClears the DF flag in the EFLAGS register. When the DF flag is set to 0, string operations\nincrement the index registers (ESI and/or EDI)::\n\n    DF  =  0;\n\n:param cpu: current CPU.", "id": "f16975:c2:m170"}
{"signature": "def UInt(value, width):", "body": "return GetNBits(value, width)<EOL>", "docstring": "Return integer value of `value` as a bitstring of `width` width.\n\n:param value: The value to convert.\n:type value: int or long or BitVec\n:param int width: The width of the bitstring to consider\n:return: The integer value\n:rtype int or long or BitVec", "id": "f16976:m4"}
{"signature": "def LSL(value, amount, width):", "body": "if amount == <NUM_LIT:0>:<EOL><INDENT>return value<EOL><DEDENT>result, _ = LSL_C(value, amount, width)<EOL>return result<EOL>", "docstring": "The ARM LSL (logical left shift) operation.\n\n:param value: Value to shift\n:type value: int or long or BitVec\n:param int amount: How many bits to shift it.\n:param int width: Width of the value\n:return: Resultant value\n:rtype int or BitVec", "id": "f16976:m6"}
{"signature": "def ROR(value, amount, width):", "body": "if amount == <NUM_LIT:0>:<EOL><INDENT>return value<EOL><DEDENT>result, _ = ROR_C(value, amount, width)<EOL>return result<EOL>", "docstring": "The ARM ROR (rotate right) operation.\n\n:param value: Value to shift\n:type value: int or long or BitVec\n:param int amount: How many bits to rotate it.\n:param int width: Width of the value\n:return: Resultant value\n:rtype int or BitVec", "id": "f16976:m12"}
{"signature": "def Mask(width):", "body": "return (<NUM_LIT:1> << width) - <NUM_LIT:1><EOL>", "docstring": "Return a mask with the low `width` bits set.\n\n:param int width: How many bits to set to 1\n:return: int or long", "id": "f16976:m0"}
{"signature": "def SInt(value, width):", "body": "return Operators.ITEBV(width, Bit(value, width - <NUM_LIT:1>) == <NUM_LIT:1>,<EOL>GetNBits(value, width) - <NUM_LIT:2>**width,<EOL>GetNBits(value, width))<EOL>", "docstring": "Convert a bitstring `value` of `width` bits to a signed integer\nrepresentation.\n\n:param value: The value to convert.\n:type value: int or long or BitVec\n:param int width: The width of the bitstring to consider\n:return: The converted value\n:rtype int or long or BitVec", "id": "f16976:m3"}
{"signature": "def LSR(value, amount, width):", "body": "if amount == <NUM_LIT:0>:<EOL><INDENT>return value<EOL><DEDENT>result, _ = LSR_C(value, amount, width)<EOL>return result<EOL>", "docstring": "The ARM LSR (logical shift right) operation.\n\n:param value: Value to shift\n:type value: int or long or BitVec\n:param int amount: How many bits to shift it.\n:param int width: Width of the value\n:return: Resultant value\n:rtype int or BitVec", "id": "f16976:m8"}
{"signature": "def ASR_C(value, amount, width):", "body": "assert amount <= width<EOL>assert amount > <NUM_LIT:0><EOL>assert amount + width <= width * <NUM_LIT:2><EOL>value = Operators.SEXTEND(value, width, width * <NUM_LIT:2>)<EOL>result = GetNBits(value >> amount, width)<EOL>carry = Bit(value, amount - <NUM_LIT:1>)<EOL>return (result, carry)<EOL>", "docstring": "The ARM ASR_C (arithmetic shift right with carry) operation.\n\n:param value: Value to shift\n:type value: int or long or BitVec\n:param int amount: How many bits to shift it.\n:param int width: Width of the value\n:return: Resultant value and carry result\n:rtype tuple", "id": "f16976:m9"}
{"signature": "def GetNBits(value, nbits):", "body": "<EOL>if isinstance(value, int):<EOL><INDENT>return Operators.EXTRACT(value, <NUM_LIT:0>, nbits)<EOL><DEDENT>elif isinstance(value, BitVec):<EOL><INDENT>if value.size < nbits:<EOL><INDENT>return Operators.ZEXTEND(value, nbits)<EOL><DEDENT>else:<EOL><INDENT>return Operators.EXTRACT(value, <NUM_LIT:0>, nbits)<EOL><DEDENT><DEDENT>", "docstring": "Get the first `nbits` from `value`.\n\n:param value: Source value from which to extract\n:type value: int or long or BitVec\n:param int nbits: How many bits to extract\n:return: Low `nbits` bits of `value`.\n:rtype int or long or BitVec", "id": "f16976:m2"}
{"signature": "@instruction<EOL><INDENT>def MOV(cpu, dest, src):<DEDENT>", "body": "if cpu.mode == cs.CS_MODE_ARM:<EOL><INDENT>result, carry_out = src.read(with_carry=True)<EOL>dest.write(result)<EOL>cpu.set_flags(C=carry_out, N=HighBit(result), Z=(result == <NUM_LIT:0>))<EOL><DEDENT>else:<EOL><INDENT>result = src.read()<EOL>dest.write(result)<EOL>cpu.set_flags(N=HighBit(result), Z=(result == <NUM_LIT:0>))<EOL><DEDENT>", "docstring": "Implement the MOV{S} instruction.\n\nNote: If src operand is PC, temporarily release our logical PC\nview and conform to the spec, which dictates PC = curr instr + 8\n\n:param Armv7Operand dest: The destination operand; register.\n:param Armv7Operand src: The source operand; register or immediate.", "id": "f16977:c4:m24"}
{"signature": "def _shift(cpu, value, _type, amount, carry):", "body": "assert(cs.arm.ARM_SFT_INVALID < _type <= cs.arm.ARM_SFT_RRX_REG)<EOL>if _type in (cs.arm.ARM_SFT_RRX, cs.arm.ARM_SFT_RRX_REG) and amount != <NUM_LIT:1>:<EOL><INDENT>amount = <NUM_LIT:1><EOL><DEDENT>elif _type in range(cs.arm.ARM_SFT_ASR_REG, cs.arm.ARM_SFT_RRX_REG + <NUM_LIT:1>):<EOL><INDENT>if cpu.mode == cs.CS_MODE_THUMB:<EOL><INDENT>src = amount.read()<EOL><DEDENT>else:<EOL><INDENT>src_reg = cpu.instruction.reg_name(amount).upper()<EOL>src = cpu.regfile.read(src_reg)<EOL><DEDENT>amount = Operators.EXTRACT(src, <NUM_LIT:0>, <NUM_LIT:8>)<EOL><DEDENT>if amount == <NUM_LIT:0>:<EOL><INDENT>return value, carry<EOL><DEDENT>width = cpu.address_bit_size<EOL>if _type in (cs.arm.ARM_SFT_ASR, cs.arm.ARM_SFT_ASR_REG):<EOL><INDENT>return ASR_C(value, amount, width)<EOL><DEDENT>elif _type in (cs.arm.ARM_SFT_LSL, cs.arm.ARM_SFT_LSL_REG):<EOL><INDENT>return LSL_C(value, amount, width)<EOL><DEDENT>elif _type in (cs.arm.ARM_SFT_LSR, cs.arm.ARM_SFT_LSR_REG):<EOL><INDENT>return LSR_C(value, amount, width)<EOL><DEDENT>elif _type in (cs.arm.ARM_SFT_ROR, cs.arm.ARM_SFT_ROR_REG):<EOL><INDENT>return ROR_C(value, amount, width)<EOL><DEDENT>elif _type in (cs.arm.ARM_SFT_RRX, cs.arm.ARM_SFT_RRX_REG):<EOL><INDENT>return RRX_C(value, carry, width)<EOL><DEDENT>raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "See Shift() and Shift_C() in the ARM manual", "id": "f16977:c4:m9"}
{"signature": "@instruction<EOL><INDENT>def LDCL(cpu, *operands):<DEDENT>", "body": "", "docstring": "Occasionally used in glibc (longjmp in ld.so). Nop under our execution model.", "id": "f16977:c4:m107"}
{"signature": "@instruction<EOL><INDENT>def CBZ(cpu, op, dest):<DEDENT>", "body": "cpu.PC = Operators.ITEBV(cpu.address_bit_size,<EOL>op.read(), cpu.PC, dest.read())<EOL>", "docstring": "Compare and Branch on Zero compares the value in a register with zero, and conditionally branches forward\na constant value. It does not affect the condition flags.\n\n:param ARMv7Operand op: Specifies the register that contains the first operand.\n:param ARMv7Operand dest:\n    Specifies the label of the instruction that is to be branched to. The assembler calculates the\n    required value of the offset from the PC value of the CBZ instruction to this label, then\n    selects an encoding that will set imm32 to that offset. Allowed offsets are even numbers in\n    the range 0 to 126.", "id": "f16977:c4:m60"}
{"signature": "def __init__(self):", "body": "super().__init__({'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'})<EOL>self._regs = {}<EOL>for reg_name in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self._regs[reg_name] = Register(<NUM_LIT:32>)<EOL><DEDENT>for reg_name in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self._regs[reg_name] = Register(<NUM_LIT:64>)<EOL><DEDENT>self._regs['<STR_LIT>'] = Register(<NUM_LIT:1>)<EOL>self._regs['<STR_LIT>'] = Register(<NUM_LIT:1>)<EOL>self._regs['<STR_LIT>'] = Register(<NUM_LIT:1>)<EOL>self._regs['<STR_LIT>'] = Register(<NUM_LIT:1>)<EOL>self._regs['<STR_LIT>'] = Register(<NUM_LIT:4>)<EOL>self._regs['<STR_LIT>'] = Register(<NUM_LIT:32>)<EOL>", "docstring": "ARM Register file abstraction. GPRs use ints for read/write. APSR\nflags allow writes of bool/{1, 0} but always read bools.", "id": "f16977:c1:m0"}
{"signature": "def _swap_mode(self):", "body": "assert self.mode in (cs.CS_MODE_ARM, cs.CS_MODE_THUMB)<EOL>if self.mode == cs.CS_MODE_ARM:<EOL><INDENT>self.mode = cs.CS_MODE_THUMB<EOL><DEDENT>else:<EOL><INDENT>self.mode = cs.CS_MODE_ARM<EOL><DEDENT>", "docstring": "Toggle between ARM and Thumb mode", "id": "f16977:c4:m6"}
{"signature": "@instruction<EOL><INDENT>def ADDW(cpu, dest, src, add):<DEDENT>", "body": "aligned_pc = (cpu.instruction.address + <NUM_LIT:4>) & <NUM_LIT><EOL>if src.type == '<STR_LIT>' and src.reg in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>src = aligned_pc<EOL><DEDENT>else:<EOL><INDENT>src = src.read()<EOL><DEDENT>dest.write(src + add.read())<EOL>", "docstring": "This instruction adds an immediate value to a register value, and writes the result to the destination register.\nIt doesn't update the condition flags.\n\n:param ARMv7Operand dest: Specifies the destination register. If omitted, this register is the same as src.\n:param ARMv7Operand src:\n    Specifies the register that contains the first operand. If the SP is specified for dest, see ADD (SP plus\n    immediate). If the PC is specified for dest, see ADR.\n:param ARMv7Operand add:\n    Specifies the immediate value to be added to the value obtained from src. The range of allowed values is\n    0-4095.", "id": "f16977:c4:m55"}
{"signature": "def _UXT(cpu, dest, src, src_width):", "body": "val = GetNBits(src.read(), src_width)<EOL>word = Operators.ZEXTEND(val, cpu.address_bit_size)<EOL>dest.write(word)<EOL>", "docstring": "Helper for UXT* family of instructions.\n\n:param ARMv7Operand dest: the destination register; register\n:param ARMv7Operand dest: the source register; register\n:param int src_width: bits to consider of the src operand", "id": "f16977:c4:m31"}
{"signature": "@instruction<EOL><INDENT>def STRD(cpu, src1, src2, dest, offset=None):<DEDENT>", "body": "assert src1.type == '<STR_LIT>'<EOL>assert src2.type == '<STR_LIT>'<EOL>assert dest.type == '<STR_LIT>'<EOL>val1 = src1.read()<EOL>val2 = src2.read()<EOL>writeback = cpu._compute_writeback(dest, offset)<EOL>cpu.write_int(dest.address(), val1, <NUM_LIT:32>)<EOL>cpu.write_int(dest.address() + <NUM_LIT:4>, val2, <NUM_LIT:32>)<EOL>cpu._cs_hack_ldr_str_writeback(dest, offset, writeback)<EOL>", "docstring": "Writes the contents of two registers to memory.", "id": "f16977:c4:m28"}
{"signature": "def _SR(cpu, insn_id, dest, op, *rest):", "body": "assert insn_id in (cs.arm.ARM_INS_ASR, cs.arm.ARM_INS_LSL, cs.arm.ARM_INS_LSR)<EOL>if insn_id == cs.arm.ARM_INS_ASR:<EOL><INDENT>if rest and rest[<NUM_LIT:0>].type == '<STR_LIT>':<EOL><INDENT>srtype = cs.arm.ARM_SFT_ASR<EOL><DEDENT>else:<EOL><INDENT>srtype = cs.arm.ARM_SFT_ASR_REG<EOL><DEDENT><DEDENT>elif insn_id == cs.arm.ARM_INS_LSL:<EOL><INDENT>if rest and rest[<NUM_LIT:0>].type == '<STR_LIT>':<EOL><INDENT>srtype = cs.arm.ARM_SFT_LSL<EOL><DEDENT>else:<EOL><INDENT>srtype = cs.arm.ARM_SFT_LSL_REG<EOL><DEDENT><DEDENT>elif insn_id == cs.arm.ARM_INS_LSR:<EOL><INDENT>if rest and rest[<NUM_LIT:0>].type == '<STR_LIT>':<EOL><INDENT>srtype = cs.arm.ARM_SFT_LSR<EOL><DEDENT>else:<EOL><INDENT>srtype = cs.arm.ARM_SFT_LSR_REG<EOL><DEDENT><DEDENT>carry = cpu.regfile.read('<STR_LIT>')<EOL>if rest and rest[<NUM_LIT:0>].type == '<STR_LIT>':<EOL><INDENT>result, carry = cpu._shift(op.read(), srtype, rest[<NUM_LIT:0>].op.reg, carry)<EOL><DEDENT>elif rest and rest[<NUM_LIT:0>].type == '<STR_LIT>':<EOL><INDENT>amount = rest[<NUM_LIT:0>].read()<EOL>result, carry = cpu._shift(op.read(), srtype, amount, carry)<EOL><DEDENT>elif cpu.mode == cs.CS_MODE_THUMB:<EOL><INDENT>result, carry = cpu._shift(dest.read(), srtype, op, carry)<EOL><DEDENT>else:<EOL><INDENT>result, carry = op.read(with_carry=True)<EOL><DEDENT>dest.write(result)<EOL>cpu.set_flags(N=HighBit(result), Z=(result == <NUM_LIT:0>), C=carry)<EOL>", "docstring": "Notes on Capstone behavior:\n- In ARM mode, _SR reg has `rest`, but _SR imm does not, its baked into `op`.\n- In ARM mode, `lsr r1, r2` will have a `rest[0]`\n- In Thumb mode, `lsr r1, r2` will have an empty `rest`\n- In ARM mode, something like `lsr r1, 3` will not have `rest` and op will be\n    the immediate.", "id": "f16977:c4:m92"}
{"signature": "def _write_APSR(self, apsr):", "body": "V = Operators.EXTRACT(apsr, <NUM_LIT>, <NUM_LIT:1>)<EOL>C = Operators.EXTRACT(apsr, <NUM_LIT>, <NUM_LIT:1>)<EOL>Z = Operators.EXTRACT(apsr, <NUM_LIT:30>, <NUM_LIT:1>)<EOL>N = Operators.EXTRACT(apsr, <NUM_LIT>, <NUM_LIT:1>)<EOL>self.write('<STR_LIT>', V)<EOL>self.write('<STR_LIT>', C)<EOL>self.write('<STR_LIT>', Z)<EOL>self.write('<STR_LIT>', N)<EOL>", "docstring": "Auxiliary function - Writes flags from a full APSR (only 4 msb used)", "id": "f16977:c1:m2"}
{"signature": "@instruction<EOL><INDENT>def STREX(cpu, status, *args):<DEDENT>", "body": "<EOL>status.write(<NUM_LIT:0>)<EOL>return cpu._STR(cpu.address_bit_size, *args)<EOL>", "docstring": "STREX performs a conditional store to memory.\n:param Armv7Operand status: the destination register for the returned status; register", "id": "f16977:c4:m30"}
{"signature": "@instruction<EOL><INDENT>def LDRD(cpu, dest1, dest2, src, offset=None):<DEDENT>", "body": "assert dest1.type == '<STR_LIT>'<EOL>assert dest2.type == '<STR_LIT>'<EOL>assert src.type == '<STR_LIT>'<EOL>mem1 = cpu.read_int(src.address(), <NUM_LIT:32>)<EOL>mem2 = cpu.read_int(src.address() + <NUM_LIT:4>, <NUM_LIT:32>)<EOL>writeback = cpu._compute_writeback(src, offset)<EOL>dest1.write(mem1)<EOL>dest2.write(mem2)<EOL>cpu._cs_hack_ldr_str_writeback(src, offset, writeback)<EOL>", "docstring": "Loads double width data from memory.", "id": "f16977:c4:m27"}
{"signature": "def _STM(cpu, insn_id, base, regs):", "body": "if cpu.instruction.usermode:<EOL><INDENT>raise NotImplementedError(\"<STR_LIT>\")<EOL><DEDENT>increment = insn_id in (cs.arm.ARM_INS_STM, cs.arm.ARM_INS_STMIB)<EOL>after = insn_id in (cs.arm.ARM_INS_STM, cs.arm.ARM_INS_STMDA)<EOL>address = base.read()<EOL>for reg in regs:<EOL><INDENT>if not after:<EOL><INDENT>address += (<NUM_LIT:1> if increment else -<NUM_LIT:1>) * (reg.size // <NUM_LIT:8>)<EOL><DEDENT>cpu.write_int(address, reg.read(), reg.size)<EOL>if after:<EOL><INDENT>address += (<NUM_LIT:1> if increment else -<NUM_LIT:1>) * (reg.size // <NUM_LIT:8>)<EOL><DEDENT><DEDENT>if cpu.instruction.writeback:<EOL><INDENT>base.writeback(address)<EOL><DEDENT>", "docstring": "STM (Store Multiple) stores a non-empty subset (or possibly all) of the general-purpose registers to\nsequential memory locations.\n\n:param int insn_id: should be one of ARM_INS_STM, ARM_INS_STMIB, ARM_INS_STMDA, ARM_INS_STMDB\n:param Armv7Operand base: Specifies the base register.\n:param list[Armv7Operand] regs:\n    Is a list of registers. It specifies the set of registers to be stored by the STM instruction.\n    The registers are stored in sequence, the lowest-numbered register to the lowest\n    memory address (start_address), through to the highest-numbered register to the\n    highest memory address (end_address).", "id": "f16977:c4:m78"}
{"signature": "@property<EOL><INDENT>def type(self):<DEDENT>", "body": "return self.__type<EOL>", "docstring": "Corresponds to capstone's `operand.type` (cs.arm.ARM_OP_*).", "id": "f16977:c0:m1"}
{"signature": "def concrete_emulate(self, insn):", "body": "if not self.emu:<EOL><INDENT>self.emu = ConcreteUnicornEmulator(self)<EOL>self.emu._stop_at = self._break_unicorn_at<EOL><DEDENT>try:<EOL><INDENT>self.emu.emulate(insn)<EOL><DEDENT>except unicorn.UcError as e:<EOL><INDENT>if e.errno == unicorn.UC_ERR_INSN_INVALID:<EOL><INDENT>text_bytes = '<STR_LIT:U+0020>'.join('<STR_LIT>' % x for x in insn.bytes)<EOL>logger.error(\"<STR_LIT>\",<EOL>insn.address, text_bytes, insn.mnemonic, insn.op_str)<EOL><DEDENT>raise InstructionEmulationError(str(e))<EOL><DEDENT>", "docstring": "Start executing in Unicorn from this point until we hit a syscall or reach break_unicorn_at\n\n:param capstone.CsInsn insn: The instruction object to emulate", "id": "f16980:c13:m31"}
{"signature": "def push_bytes(self, data, force=False):", "body": "self.STACK -= len(data)<EOL>self.write_bytes(self.STACK, data, force)<EOL>return self.STACK<EOL>", "docstring": "Write `data` to the stack and decrement the stack pointer accordingly.\n\n:param str data: Data to write\n:param force: whether to ignore memory permissions", "id": "f16980:c13:m20"}
{"signature": "def canonicalize_instruction_name(self, instruction):", "body": "raise NotImplemented<EOL>", "docstring": "Get the semantic name of an instruction.", "id": "f16980:c13:m27"}
{"signature": "def execute(self):", "body": "if issymbolic(self.PC):<EOL><INDENT>raise ConcretizeRegister(self, '<STR_LIT>', policy='<STR_LIT>')<EOL><DEDENT>if not self.memory.access_ok(self.PC, '<STR_LIT:x>'):<EOL><INDENT>raise InvalidMemoryAccess(self.PC, '<STR_LIT:x>')<EOL><DEDENT>self._publish('<STR_LIT>', self.PC)<EOL>insn = self.decode_instruction(self.PC)<EOL>self._last_pc = self.PC<EOL>self._publish('<STR_LIT>', self.PC, insn)<EOL>if insn.address != self.PC:<EOL><INDENT>return<EOL><DEDENT>name = self.canonicalize_instruction_name(insn)<EOL>if logger.level == logging.DEBUG:<EOL><INDENT>logger.debug(self.render_instruction(insn))<EOL>for l in self.render_registers():<EOL><INDENT>register_logger.debug(l)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>if self._concrete and '<STR_LIT>' in name:<EOL><INDENT>self.emu.sync_unicorn_to_manticore()<EOL><DEDENT>if self._concrete and '<STR_LIT>' not in name:<EOL><INDENT>self.emulate(insn)<EOL>if self.PC == self._break_unicorn_at:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self._break_unicorn_at = None<EOL>self._concrete = False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>implementation = getattr(self, name, None)<EOL>if implementation is not None:<EOL><INDENT>implementation(*insn.operands)<EOL><DEDENT>else:<EOL><INDENT>text_bytes = '<STR_LIT:U+0020>'.join('<STR_LIT>' % x for x in insn.bytes)<EOL>logger.warning(\"<STR_LIT>\",<EOL>insn.address, text_bytes, insn.mnemonic, insn.op_str)<EOL>self.backup_emulate(insn)<EOL><DEDENT><DEDENT><DEDENT>except (Interruption, Syscall) as e:<EOL><INDENT>e.on_handled = lambda: self._publish_instruction_as_executed(insn)<EOL>raise e<EOL><DEDENT>else:<EOL><INDENT>self._publish_instruction_as_executed(insn)<EOL><DEDENT>", "docstring": "Decode, and execute one instruction pointed by register PC", "id": "f16980:c13:m28"}
{"signature": "def _raw_read(self, where: int, size=<NUM_LIT:1>) -> bytes:", "body": "map = self.memory.map_containing(where)<EOL>start = map._get_offset(where)<EOL>mapType = type(map)<EOL>if mapType is FileMap:<EOL><INDENT>end = map._get_offset(where + size)<EOL>if end > map._mapped_size:<EOL><INDENT>logger.warning(f\"<STR_LIT>\")<EOL><DEDENT>raw_data = map._data[map._get_offset(where): min(end, map._mapped_size)]<EOL>if len(raw_data) < end:<EOL><INDENT>raw_data += b'<STR_LIT:\\x00>' * (end - len(raw_data))<EOL><DEDENT>data = b'<STR_LIT>'<EOL>for offset in sorted(map._overlay.keys()):<EOL><INDENT>data += raw_data[len(data):offset]<EOL>data += map._overlay[offset]<EOL><DEDENT>data += raw_data[len(data):]<EOL><DEDENT>elif mapType is AnonMap:<EOL><INDENT>data = bytes(map._data[start:start + size])<EOL><DEDENT>else:<EOL><INDENT>data = b'<STR_LIT>'.join(self.memory[where:where + size])<EOL><DEDENT>assert len(data) == size, '<STR_LIT>'<EOL>return data<EOL>", "docstring": "Selects bytes from memory. Attempts to do so faster than via read_bytes.\n\n:param where: address to read from\n:param size: number of bytes to read\n:return: the bytes in memory", "id": "f16980:c13:m14"}
{"signature": "def push_int(self, value, force=False):", "body": "self.STACK -= self.address_bit_size // <NUM_LIT:8><EOL>self.write_int(self.STACK, value, force=force)<EOL>return self.STACK<EOL>", "docstring": "Decrement the stack pointer and write `value` to the stack.\n\n:param int value: The value to write\n:param force: whether to ignore memory permissions\n:return: New stack pointer", "id": "f16980:c13:m22"}
{"signature": "def syscall_number(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Extract the index of the invoked syscall.\n\n:return: int", "id": "f16980:c12:m0"}
{"signature": "def write_int(self, where, expression, size=None, force=False):", "body": "if size is None:<EOL><INDENT>size = self.address_bit_size<EOL><DEDENT>assert size in SANE_SIZES<EOL>self._publish('<STR_LIT>', where, expression, size)<EOL>data = [Operators.CHR(Operators.EXTRACT(expression, offset, <NUM_LIT:8>)) for offset in range(<NUM_LIT:0>, size, <NUM_LIT:8>)]<EOL>self._memory.write(where, data, force)<EOL>self._publish('<STR_LIT>', where, expression, size)<EOL>", "docstring": "Writes int to memory\n\n:param int where: address to write to\n:param expr: value to write\n:type expr: int or BitVec\n:param size: bit size of `expr`\n:param force: whether to ignore memory permissions", "id": "f16980:c13:m13"}
{"signature": "def _wrap_operands(self, operands):", "body": "raise NotImplementedError<EOL>", "docstring": "Private method to decorate an Operand to our needs based on the\nunderlying architecture.\nSee :class:`~manticore.core.cpu.abstractcpu.Operand` class", "id": "f16980:c13:m24"}
{"signature": "@property<EOL><INDENT>def all_registers(self):<DEDENT>", "body": "return self._regfile.all_registers<EOL>", "docstring": "Returns all register names for this CPU. Any register returned can be\naccessed via a `cpu.REG` convenience interface (e.g. `cpu.EAX`) for both\nreading and writing.\n\n:return: valid register names\n:rtype: tuple[str]", "id": "f16980:c13:m5"}
{"signature": "def get_arguments(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Extract model arguments conforming to `convention`. Produces an iterable\nof argument descriptors following the calling convention. A descriptor\nis either a string describing a register, or an address (concrete or\nsymbolic).\n\n:return: iterable returning syscall arguments.\n:rtype: iterable", "id": "f16980:c11:m1"}
{"signature": "def _publish_instruction_as_executed(self, insn):", "body": "self._icount += <NUM_LIT:1><EOL>self._publish('<STR_LIT>', self._last_pc, self.PC, insn)<EOL>", "docstring": "Notify listeners that an instruction has been executed.", "id": "f16980:c13:m29"}
{"signature": "def write_result(self, result):", "body": "raise NotImplementedError<EOL>", "docstring": "Write the result of a model back to the environment.\n\n:param result: result of the model implementation", "id": "f16980:c11:m2"}
{"signature": "def __init__(self, cpu, op):", "body": "assert isinstance(cpu, Cpu)<EOL>self.cpu = cpu<EOL>self.op = op<EOL>self.mem = Operand.MemSpec(self)<EOL>", "docstring": "This encapsulates the arch-independent way to access instruction\noperands and immediates based on the disassembler operand descriptor in\nuse. This class knows how to browse an operand and get its details.\n\nIt also knows how to access the specific Cpu to get the actual values\nfrom memory and registers.\n\n:param Cpu cpu: A Cpu instance\n:param Operand op: An wrapped Instruction Operand\n:type op: X86Op or ArmOp", "id": "f16980:c9:m0"}
{"signature": "def values_from(self, base):", "body": "word_bytes = self._cpu.address_bit_size // <NUM_LIT:8><EOL>while True:<EOL><INDENT>yield base<EOL>base += word_bytes<EOL><DEDENT>", "docstring": "A reusable generator for increasing pointer-sized values from an address\n(usually the stack).", "id": "f16980:c11:m4"}
{"signature": "def _alias(self, register):", "body": "return self._aliases.get(register, register)<EOL>", "docstring": "Get register canonical alias. ex. PC->RIP or PC->R15\n\n:param str register: The register name", "id": "f16980:c10:m1"}
{"signature": "def read_int(self, where, size=None, force=False):", "body": "if size is None:<EOL><INDENT>size = self.address_bit_size<EOL><DEDENT>assert size in SANE_SIZES<EOL>self._publish('<STR_LIT>', where, size)<EOL>data = self._memory.read(where, size // <NUM_LIT:8>, force)<EOL>assert (<NUM_LIT:8> * len(data)) == size<EOL>value = Operators.CONCAT(size, *map(Operators.ORD, reversed(data)))<EOL>self._publish('<STR_LIT>', where, value, size)<EOL>return value<EOL>", "docstring": "Reads int from memory\n\n:param int where: address to read from\n:param size: number of bits to read\n:return: the value read\n:rtype: int or BitVec\n:param force: whether to ignore memory permissions", "id": "f16980:c13:m15"}
{"signature": "def write_register(self, register, value):", "body": "self._publish('<STR_LIT>', register, value)<EOL>value = self._regfile.write(register, value)<EOL>self._publish('<STR_LIT>', register, value)<EOL>return value<EOL>", "docstring": "Dynamic interface for writing cpu registers\n\n:param str register: register name (as listed in `self.all_registers`)\n:param value: register value\n:type value: int or long or Expression", "id": "f16980:c13:m7"}
{"signature": "@property<EOL><INDENT>def canonical_registers(self):<DEDENT>", "body": "raise NotImplementedError<EOL>", "docstring": "List the minimal most beautiful set of registers needed", "id": "f16980:c10:m5"}
{"signature": "def pop_int(self, force=False):", "body": "value = self.read_int(self.STACK, force=force)<EOL>self.STACK += self.address_bit_size // <NUM_LIT:8><EOL>return value<EOL>", "docstring": "Read a value from the stack and increment the stack pointer.\n\n:param force: whether to ignore memory permissions\n:return: Value read", "id": "f16980:c13:m23"}
{"signature": "def pop_bytes(self, nbytes, force=False):", "body": "data = self.read_bytes(self.STACK, nbytes, force=force)<EOL>self.STACK += nbytes<EOL>return data<EOL>", "docstring": "Read `nbytes` from the stack, increment the stack pointer, and return\ndata.\n\n:param int nbytes: How many bytes to read\n:param force: whether to ignore memory permissions\n:return: Data read from the stack", "id": "f16980:c13:m21"}
{"signature": "@abstractmethod<EOL><INDENT>def disassemble_instruction(self, code, pc):<DEDENT>", "body": "", "docstring": "Get next instruction based on the disassembler in use\n\n        :param str code: binary blob to be disassembled\n        :param long pc: program counter", "id": "f16981:c1:m1"}
{"signature": "def invoke_model(self, model):", "body": "self._platform.invoke_model(model, prefix_args=(self,))<EOL>", "docstring": "Invokes a `model`. Modelling can be used to override a function in the target program with a custom\nimplementation.\n\nFor more information on modelling see docs/models.rst\n\nA `model` is a callable whose first argument is a `manticore.native.State` instance.\nIf the following arguments correspond to the arguments of the C function\nbeing modeled. If the `model` models a variadic function, the following argument\nis a generator object, which can be used to access function arguments dynamically.\nThe `model` callable should simply return the value that should be returned by the\nnative function being modeled.f\n\n:param model: callable, model to invoke", "id": "f16982:c0:m3"}
{"signature": "def isvariadic(model):", "body": "return getattr(model, VARIADIC_FUNC_ATTR, False)<EOL>", "docstring": ":param callable model: Function model\n:return: Whether `model` models a variadic function\n:rtype: bool", "id": "f16983:m0"}
{"signature": "def strlen(state, s):", "body": "cpu = state.cpu<EOL>if issymbolic(s):<EOL><INDENT>raise ConcretizeArgument(state.cpu, <NUM_LIT:1>)<EOL><DEDENT>zero_idx = _find_zero(cpu, state.constraints, s)<EOL>ret = zero_idx<EOL>for offset in range(zero_idx - <NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>byt = cpu.read_int(s + offset, <NUM_LIT:8>)<EOL>if issymbolic(byt):<EOL><INDENT>ret = ITEBV(cpu.address_bit_size, byt == <NUM_LIT:0>, offset, ret)<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "strlen symbolic model.\n\nAlgorithm: Walks from end of string not including NULL building ITE tree when current byte is symbolic.\n\n:param State state: current program state\n:param int s: Address of string\n:return: Symbolic strlen result\n:rtype: Expression or int", "id": "f16983:m4"}
{"signature": "@staticmethod<EOL><INDENT>def verbosity(level):<DEDENT>", "body": "log.set_verbosity(level)<EOL>logger.info(f'<STR_LIT>')<EOL>", "docstring": "Convenience interface for setting logging verbosity to one of\n        several predefined logging presets. Valid values: 0-5.", "id": "f16984:c0:m10"}
{"signature": "def shutdown(self):", "body": "self._executor.shutdown()<EOL>", "docstring": "Gracefully terminate the currently-executing run. Typically called from within\na :func:`~hook`.", "id": "f16984:c0:m25"}
{"signature": "def is_shutdown(self):", "body": "return self._executor.is_shutdown()<EOL>", "docstring": "Returns True if shutdown was requested", "id": "f16984:c0:m26"}
{"signature": "def migrate(self, expression, name_migration_map=None):", "body": "if name_migration_map is None:<EOL><INDENT>name_migration_map = {}<EOL><DEDENT>object_migration_map = {}<EOL>foreign_vars = itertools.filterfalse(self.is_declared, get_variables(expression))<EOL>for foreign_var in foreign_vars:<EOL><INDENT>if foreign_var.name in name_migration_map:<EOL><INDENT>migrated_name = name_migration_map[foreign_var.name]<EOL>native_var = self.get_variable(migrated_name)<EOL>assert native_var is not None, \"<STR_LIT>\"<EOL>object_migration_map[foreign_var] = native_var<EOL><DEDENT>else:<EOL><INDENT>migrated_name = foreign_var.name<EOL>if migrated_name in self._declarations:<EOL><INDENT>migrated_name = self._make_unique_name(f'<STR_LIT>')<EOL><DEDENT>if isinstance(foreign_var, Bool):<EOL><INDENT>new_var = self.new_bool(name=migrated_name)<EOL><DEDENT>elif isinstance(foreign_var, BitVec):<EOL><INDENT>new_var = self.new_bitvec(foreign_var.size, name=migrated_name)<EOL><DEDENT>elif isinstance(foreign_var, Array):<EOL><INDENT>new_var = self.new_array(index_max=foreign_var.index_max, index_bits=foreign_var.index_bits, value_bits=foreign_var.value_bits, name=migrated_name).array<EOL><DEDENT>else:<EOL><INDENT>raise NotImplemented(f\"<STR_LIT>\")<EOL><DEDENT>object_migration_map[foreign_var] = new_var<EOL>name_migration_map[foreign_var.name] = new_var.name<EOL><DEDENT><DEDENT>migrated_expression = replace(expression, object_migration_map)<EOL>return migrated_expression<EOL>", "docstring": "Migrate an expression created for a different constraint set to self.\n            Returns an expression that can be used with this constraintSet\n\n            All the foreign variables used in the expression are replaced by\n            variables of this constraint set. If the variable was replaced before\n            the replacement is taken from the provided migration map.\n\n            The migration mapping is updated with new replacements.\n\n            :param expression: the potentially foreign expression\n            :param name_migration_map: mapping of already migrated variables. maps from string name of foreign variable to its currently existing migrated string name. this is updated during this migration.\n            :return: a migrated expression where all the variables are local. name_migration_map is updated", "id": "f16985:c0:m18"}
{"signature": "def get_declared_variables(self):", "body": "return self._declarations.values()<EOL>", "docstring": "Returns the variable expressions of this constraint set", "id": "f16985:c0:m10"}
{"signature": "def is_declared(self, expression_var):", "body": "if not isinstance(expression_var, Variable):<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>return any(expression_var is x for x in self.get_declared_variables())<EOL>", "docstring": "True if expression_var is declared in this constraint set", "id": "f16985:c0:m17"}
{"signature": "def __str__(self):", "body": "return self.to_string()<EOL>", "docstring": "Returns a smtlib representation of the current state", "id": "f16985:c0:m15"}
{"signature": "def new_array(self, index_bits=<NUM_LIT:32>, name=None, index_max=None, value_bits=<NUM_LIT:8>, taint=frozenset(), avoid_collisions=False, default=None):", "body": "if name is None:<EOL><INDENT>name = '<STR_LIT:A>'<EOL>avoid_collisions = True<EOL><DEDENT>if avoid_collisions:<EOL><INDENT>name = self._make_unique_name(name)<EOL><DEDENT>if not avoid_collisions and name in self._declarations:<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>var = self._declare(ArrayVariable(index_bits, index_max, value_bits, name, taint=taint))<EOL>return ArrayProxy(var, default=default)<EOL>", "docstring": "Declares a free symbolic array of value_bits long bitvectors in the constraint store.\n            :param index_bits: size in bits for the array indexes one of [32, 64]\n            :param value_bits: size in bits for the array values\n            :param name: try to assign name to internal variable representation,\n                         if not unique, a numeric nonce will be appended\n            :param index_max: upper limit for indexes on this array (#FIXME)\n            :param avoid_collisions: potentially avoid_collisions the variable to avoid name collisions if True\n            :param default: default for not initialized values\n            :return: a fresh ArrayProxy", "id": "f16985:c0:m21"}
{"signature": "def get_variable(self, name):", "body": "return self._declarations.get(name)<EOL>", "docstring": "Returns the variable declared under name or None if it does not exists", "id": "f16985:c0:m11"}
{"signature": "def min(self, constraints, X: BitVec, M=<NUM_LIT>):", "body": "assert isinstance(X, BitVec)<EOL>return self.optimize(constraints, X, '<STR_LIT>', M)<EOL>", "docstring": "Iteratively finds the minimum value for a symbol within given constraints.\n\n:param constraints: constraints that the expression must fulfil\n:param X: a symbol or expression\n:param M: maximum number of iterations allowed", "id": "f16986:c0:m8"}
{"signature": "def __init__(self):", "body": "super().__init__()<EOL>self._proc: Popen = None<EOL>self._command = f'<STR_LIT>'<EOL>self._init = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>]<EOL>self._get_value_fmt = (RE_GET_EXPR_VALUE_FMT, <NUM_LIT:16>)<EOL>self.debug = False<EOL>self._received_version = None<EOL>self.version = self._solver_version()<EOL>self.support_maximize = False<EOL>self.support_minimize = False<EOL>self.support_reset = True<EOL>logger.debug('<STR_LIT>', self.version)<EOL>if self.version >= Version(<NUM_LIT:4>, <NUM_LIT:5>, <NUM_LIT:0>):<EOL><INDENT>self.support_maximize = False<EOL>self.support_minimize = False<EOL>self.support_reset = False<EOL><DEDENT>elif self.version >= Version(<NUM_LIT:4>, <NUM_LIT:4>, <NUM_LIT:1>):<EOL><INDENT>self.support_maximize = True<EOL>self.support_minimize = True<EOL>self.support_reset = False<EOL><DEDENT>else:<EOL><INDENT>logger.debug('<STR_LIT>')<EOL><DEDENT>", "docstring": "Build a Z3 solver instance.\nThis is implemented using an external z3 solver (via a subprocess).\nSee https://github.com/Z3Prover/z3", "id": "f16986:c1:m0"}
{"signature": "def _push(self):", "body": "self._send('<STR_LIT>')<EOL>", "docstring": "Pushes and save the current constraint store and state.", "id": "f16986:c1:m14"}
{"signature": "def _start_proc(self):", "body": "assert '<STR_LIT>' not in dir(self) or self._proc is None<EOL>try:<EOL><INDENT>self._proc = Popen(shlex.split(self._command), stdin=PIPE, stdout=PIPE, bufsize=<NUM_LIT:0>, universal_newlines=True)<EOL><DEDENT>except OSError as e:<EOL><INDENT>print(e, \"<STR_LIT>\")<EOL>raise Z3NotFoundError  <EOL><DEDENT>for cfg in self._init:<EOL><INDENT>self._send(cfg)<EOL><DEDENT>", "docstring": "Spawns z3 solver process", "id": "f16986:c1:m2"}
{"signature": "def optimize(self, constraints, X, operation, M=<NUM_LIT>):", "body": "raise Exception(\"<STR_LIT>\")<EOL>", "docstring": "Iteratively finds the maximum or minimal value for the operation\n(Normally Operators.UGT or Operators.ULT)\n\n:param constraints: the constraints set\n:param X: a symbol or expression\n:param M: maximum number of iterations allowed", "id": "f16986:c0:m1"}
{"signature": "def get_all_values(self, constraints, x, maxcnt=<NUM_LIT>, silent=False):", "body": "raise Exception(\"<STR_LIT>\")<EOL>", "docstring": "Returns a list with all the possible values for the symbol x", "id": "f16986:c0:m5"}
{"signature": "def _getvalue(self, expression):", "body": "if not issymbolic(expression):<EOL><INDENT>return expression<EOL><DEDENT>assert isinstance(expression, Variable)<EOL>if isinstance(expression, Array):<EOL><INDENT>result = bytearray()<EOL>for c in expression:<EOL><INDENT>expression_str = translate_to_smtlib(c)<EOL>self._send('<STR_LIT>' % expression_str)<EOL>response = self._recv()<EOL>result.append(int('<STR_LIT>'.format(response.split(expression_str)[<NUM_LIT:1>][<NUM_LIT:3>:-<NUM_LIT:2>]), <NUM_LIT:16>))<EOL><DEDENT>return bytes(result)<EOL><DEDENT>else:<EOL><INDENT>self._send('<STR_LIT>' % expression.name)<EOL>ret = self._recv()<EOL>assert ret.startswith('<STR_LIT>') and ret.endswith('<STR_LIT>'), ret<EOL>if isinstance(expression, Bool):<EOL><INDENT>return {'<STR_LIT:true>': True, '<STR_LIT:false>': False}[ret[<NUM_LIT:2>:-<NUM_LIT:2>].split('<STR_LIT:U+0020>')[<NUM_LIT:1>]]<EOL><DEDENT>elif isinstance(expression, BitVec):<EOL><INDENT>pattern, base = self._get_value_fmt<EOL>m = pattern.match(ret)<EOL>expr, value = m.group('<STR_LIT>'), m.group('<STR_LIT:value>')<EOL>return int(value, base)<EOL><DEDENT><DEDENT>raise NotImplementedError(\"<STR_LIT>\")<EOL>", "docstring": "Ask the solver for one possible assignment for given expression using current set of constraints.\nThe current set of expressions must be sat.\n\nNOTE: This is an internal method: it uses the current solver state (set of constraints!).", "id": "f16986:c1:m13"}
{"signature": "def _pop(self):", "body": "self._send('<STR_LIT>')<EOL>", "docstring": "Recall the last pushed constraint store and state.", "id": "f16986:c1:m15"}
{"signature": "def _send(self, cmd: str):", "body": "logger.debug('<STR_LIT>', cmd)<EOL>try:<EOL><INDENT>self._proc.stdout.flush()<EOL>self._proc.stdin.write(f'<STR_LIT>')<EOL><DEDENT>except IOError as e:<EOL><INDENT>raise SolverError(str(e))<EOL><DEDENT>", "docstring": "Send a string to the solver.\n\n:param cmd: a SMTLIBv2 command (ex. (check-sat))", "id": "f16986:c1:m8"}
{"signature": "def _reset(self, constraints=None):", "body": "if self._proc is None:<EOL><INDENT>self._start_proc()<EOL><DEDENT>else:<EOL><INDENT>if self.support_reset:<EOL><INDENT>self._send(\"<STR_LIT>\")<EOL>for cfg in self._init:<EOL><INDENT>self._send(cfg)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._stop_proc()<EOL>self._start_proc()<EOL><DEDENT><DEDENT>if constraints is not None:<EOL><INDENT>self._send(constraints)<EOL><DEDENT>", "docstring": "Auxiliary method to reset the smtlib external solver to initial defaults", "id": "f16986:c1:m7"}
{"signature": "def _stop_proc(self):", "body": "if self._proc is None:<EOL><INDENT>return<EOL><DEDENT>if self._proc.returncode is None:<EOL><INDENT>try:<EOL><INDENT>self._send(\"<STR_LIT>\")<EOL><DEDENT>except (SolverError, IOError) as e:<EOL><INDENT>logger.debug(str(e))<EOL><DEDENT>finally:<EOL><INDENT>try:<EOL><INDENT>self._proc.stdin.close()<EOL><DEDENT>except IOError as e:<EOL><INDENT>logger.debug(str(e))<EOL><DEDENT>try:<EOL><INDENT>self._proc.stdout.close()<EOL><DEDENT>except IOError as e:<EOL><INDENT>logger.debug(str(e))<EOL><DEDENT>self._proc.kill()<EOL>self._proc.wait()<EOL><DEDENT><DEDENT>self._proc: Popen = None<EOL>", "docstring": "Stops the z3 solver process by:\n- sending an exit command to it,\n- sending a SIGKILL signal,\n- waiting till the process terminates (so we don't leave a zombie process)", "id": "f16986:c1:m3"}
{"signature": "def _solver_version(self) -> Version:", "body": "self._reset()<EOL>if self._received_version is None:<EOL><INDENT>self._send('<STR_LIT>')<EOL>self._received_version = self._recv()<EOL><DEDENT>key, version = shlex.split(self._received_version[<NUM_LIT:1>:-<NUM_LIT:1>])<EOL>return Version(*map(int, version.split('<STR_LIT:.>')))<EOL>", "docstring": "If we fail to parse the version, we assume z3's output has changed, meaning it's a newer\nversion than what's used now, and therefore ok.\n\nAnticipated version_cmd_output format: 'Z3 version 4.4.2'\n                                       'Z3 version 4.4.5 - 64 bit - build hashcode $Z3GITHASH'", "id": "f16986:c1:m1"}
{"signature": "def can_be_true(self, constraints, expression) -> bool:", "body": "raise Exception(\"<STR_LIT>\")<EOL>", "docstring": "Check if given expression could be valid", "id": "f16986:c0:m3"}
{"signature": "def visit_ArraySelect(self, expression, *operands):", "body": "arr, index = operands<EOL>if isinstance(arr, ArrayVariable):<EOL><INDENT>return<EOL><DEDENT>if isinstance(index, BitVecConstant):<EOL><INDENT>ival = index.value<EOL>while isinstance(arr, ArrayStore) and isinstance(arr._operands[<NUM_LIT:1>], BitVecConstant) and arr._operands[<NUM_LIT:1>]._value != ival:<EOL><INDENT>arr = arr._operands[<NUM_LIT:0>]  <EOL><DEDENT><DEDENT>if isinstance(index, BitVecConstant) and isinstance(arr, ArrayStore) and isinstance(arr.index, BitVecConstant) and arr.index.value == index.value:<EOL><INDENT>return arr.value<EOL><DEDENT>else:<EOL><INDENT>if arr is not expression.array:<EOL><INDENT>return arr.select(index)<EOL><DEDENT><DEDENT>", "docstring": "ArraySelect (ArrayStore((ArrayStore(x0,v0) ...),xn, vn), x0)\n                -> v0", "id": "f16987:c6:m13"}
{"signature": "def visit_BitVecOr(self, expression, *operands):", "body": "left = expression.operands[<NUM_LIT:0>]<EOL>right = expression.operands[<NUM_LIT:1>]<EOL>if isinstance(right, BitVecConstant):<EOL><INDENT>if right.value == <NUM_LIT:0>:<EOL><INDENT>return left<EOL><DEDENT>elif right.value == left.mask:<EOL><INDENT>return right<EOL><DEDENT>elif isinstance(left, BitVecOr):<EOL><INDENT>left_left = left.operands[<NUM_LIT:0>]<EOL>left_right = left.operands[<NUM_LIT:1>]<EOL>if isinstance(right, Constant):<EOL><INDENT>return BitVecOr(left_left, (left_right | right), taint=expression.taint)<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(left, BitVecConstant):<EOL><INDENT>return BitVecOr(right, left, taint=expression.taint)<EOL><DEDENT>", "docstring": "a | 0 => a\n            0 | a => a\n            0xffffffff & a => 0xffffffff\n            a & 0xffffffff => 0xffffffff", "id": "f16987:c6:m10"}
{"signature": "def _method(self, expression, *args):", "body": "assert expression.__class__.__mro__[-<NUM_LIT:1>] is object<EOL>for cls in expression.__class__.__mro__:<EOL><INDENT>sort = cls.__name__<EOL>methodname = '<STR_LIT>' % sort<EOL>method = getattr(self, methodname, None)<EOL>if method is not None:<EOL><INDENT>method(expression, *args)<EOL>return<EOL><DEDENT><DEDENT>return<EOL>", "docstring": "Overload Visitor._method because we want to stop to iterate over the\nvisit_ functions as soon as a valid visit_ function is found", "id": "f16987:c4:m3"}
{"signature": "def visit_BitVecShiftLeft(self, expression, *operands):", "body": "left = expression.operands[<NUM_LIT:0>]<EOL>right = expression.operands[<NUM_LIT:1>]<EOL>if isinstance(right, BitVecConstant):<EOL><INDENT>if right.value == <NUM_LIT:0>:<EOL><INDENT>return left<EOL><DEDENT>elif right.value >= right.size:<EOL><INDENT>return left<EOL><DEDENT><DEDENT>", "docstring": "a << 0 => a                       remove zero\n            a << ct => 0 if ct > sizeof(a)    remove big constant shift", "id": "f16987:c6:m12"}
{"signature": "def visit_BitVecAnd(self, expression, *operands):", "body": "left = expression.operands[<NUM_LIT:0>]<EOL>right = expression.operands[<NUM_LIT:1>]<EOL>if isinstance(right, BitVecConstant):<EOL><INDENT>if right.value == <NUM_LIT:0>:<EOL><INDENT>return right<EOL><DEDENT>elif right.value == right.mask:<EOL><INDENT>return left<EOL><DEDENT>elif isinstance(left, BitVecAnd):<EOL><INDENT>left_left = left.operands[<NUM_LIT:0>]<EOL>left_right = left.operands[<NUM_LIT:1>]<EOL>if isinstance(right, Constant):<EOL><INDENT>return BitVecAnd(left_left, left_right & right, taint=expression.taint)<EOL><DEDENT><DEDENT>elif isinstance(left, BitVecOr):<EOL><INDENT>left_left = left.operands[<NUM_LIT:0>]<EOL>left_right = left.operands[<NUM_LIT:1>]<EOL>return BitVecOr(right & left_left, right & left_right, taint=expression.taint)<EOL><DEDENT><DEDENT>elif isinstance(left, BitVecConstant):<EOL><INDENT>return BitVecAnd(right, left, taint=expression.taint)<EOL><DEDENT>", "docstring": "ct & x => x & ct                move constants to the right\n            a & 0 => 0                      remove zero\n            a & 0xffffffff => a             remove full mask\n            (b & ct2) & ct => b & (ct&ct2)  associative property\n            (a & (b | c) => a&b | a&c       distribute over |", "id": "f16987:c6:m11"}
{"signature": "@contextmanager<EOL><INDENT>def save_stream(self, key, binary=False):<DEDENT>", "body": "mode = '<STR_LIT:wb>' if binary else '<STR_LIT:w>'<EOL>with open(os.path.join(self.uri, key), mode) as f:<EOL><INDENT>yield f<EOL><DEDENT>", "docstring": "Yield a file object representing `key`\n\n:param str key: The file to save to\n:param bool binary: Whether we should treat it as binary\n:return:", "id": "f16991:c2:m1"}
{"signature": "def __init__(self, desc=None):", "body": "self._named_key_prefix = '<STR_LIT:test>'<EOL>self._descriptor = desc<EOL>self._store = Store.fromdescriptor(desc)<EOL>self._last_id = <NUM_LIT:0><EOL>self._id_gen = manager().Value('<STR_LIT:i>', self._last_id)<EOL>self._lock = manager().Condition(manager().RLock())<EOL>", "docstring": "Create an object capable of producing Manticore output.\n\n:param desc: A descriptor ('type:uri') of where to write output.", "id": "f16991:c6:m0"}
{"signature": "def save_value(self, key, value):", "body": "return self._client.set(key, value)<EOL>", "docstring": "Save an arbitrary, serializable `value` under `key`.\n\n:param str key: A string identifier under which to store the value.\n:param value: A serializable value\n:return:", "id": "f16991:c4:m1"}
{"signature": "def rm(self, key):", "body": "path = os.path.join(self.uri, key)<EOL>os.remove(path)<EOL>", "docstring": "Remove file identified by `key`.\n\n:param str key: The file to delete", "id": "f16991:c2:m3"}
{"signature": "def load_value(self, key):", "body": "return self._client.get(key)<EOL>", "docstring": "Load an arbitrary value identified by `key`.\n\n:param str key: The key that identifies the value\n:return: The loaded value", "id": "f16991:c4:m2"}
{"signature": "def sync(f):", "body": "def new_function(self, *args, **kw):<EOL><INDENT>self._lock.acquire()<EOL>try:<EOL><INDENT>return f(self, *args, **kw)<EOL><DEDENT>finally:<EOL><INDENT>self._lock.release()<EOL><DEDENT><DEDENT>return new_function<EOL>", "docstring": "Synchronization decorator.", "id": "f16991:m1"}
{"signature": "@contextmanager<EOL><INDENT>def _named_stream(self, name, binary=False):<DEDENT>", "body": "with self._store.save_stream(self._named_key(name), binary=binary) as s:<EOL><INDENT>yield s<EOL><DEDENT>", "docstring": "Create an indexed output stream i.e. 'test_00000001.name'\n\n:param name: Identifier for the stream\n:return: A context-managed stream-like object", "id": "f16991:c6:m7"}
{"signature": "def save_value(self, key, value):", "body": "with self.save_stream(key) as s:<EOL><INDENT>s.write(value)<EOL><DEDENT>", "docstring": "Save an arbitrary, serializable `value` under `key`.\n\n:param str key: A string identifier under which to store the value.\n:param value: A serializable value\n:return:", "id": "f16991:c1:m2"}
{"signature": "def __init__(self, uri=None):", "body": "<EOL>import redis<EOL>hostname, port = uri.split('<STR_LIT::>')<EOL>self._client = redis.StrictRedis(host=hostname, port=int(port), db=<NUM_LIT:0>)<EOL>super().__init__(uri)<EOL>", "docstring": ":param uri: A url for redis", "id": "f16991:c4:m0"}
{"signature": "@contextmanager<EOL><INDENT>def load_stream(self, key, binary=False):<DEDENT>", "body": "value = self.load_value(key, binary=binary)<EOL>yield io.BytesIO(value) if binary else io.StringIO(value)<EOL>", "docstring": "Return a managed file-like object from which the calling code can read\npreviously-serialized data.\n\n:param key:\n:return: A managed stream-like object", "id": "f16991:c1:m5"}
{"signature": "def constrain(self, constraint):", "body": "constraint = self.migrate_expression(constraint)<EOL>self._constraints.add(constraint)<EOL>", "docstring": "Constrain state.\n\n        :param manticore.core.smtlib.Bool constraint: Constraint to add", "id": "f16992:c5:m11"}
{"signature": "def solve_minmax(self, expr):", "body": "if isinstance(expr, int):<EOL><INDENT>return expr<EOL><DEDENT>expr = self.migrate_expression(expr)<EOL>return self._solver.minmax(self._constraints, expr)<EOL>", "docstring": "Solves a symbolic :class:`~manticore.core.smtlib.expression.Expression` into\nits minimum and maximun solution. Only defined for bitvects.\n\n:param manticore.core.smtlib.Expression expr: Symbolic value to solve\n:return: Concrete value\n:rtype: list[int]", "id": "f16992:c5:m25"}
{"signature": "def solve_one(self, expr, constrain=False):", "body": "expr = self.migrate_expression(expr)<EOL>value = self._solver.get_value(self._constraints, expr)<EOL>if constrain:<EOL><INDENT>self.constrain(expr == value)<EOL><DEDENT>if isinstance(value, bytearray):<EOL><INDENT>value = bytes(value)<EOL><DEDENT>return value<EOL>", "docstring": "Concretize a symbolic :class:`~manticore.core.smtlib.expression.Expression` into\none solution.\n\n:param manticore.core.smtlib.Expression expr: Symbolic value to concretize\n:param bool constrain: If True, constrain expr to concretized value\n:return: Concrete value\n:rtype: int", "id": "f16992:c5:m21"}
{"signature": "def symbolicate_buffer(self, data, label='<STR_LIT>', wildcard='<STR_LIT:+>', string=False, taint=frozenset()):", "body": "if wildcard in data:<EOL><INDENT>size = len(data)<EOL>symb = self._constraints.new_array(name=label, index_max=size, taint=taint, avoid_collisions=True)<EOL>self._input_symbols.append(symb)<EOL>tmp = []<EOL>for i in range(size):<EOL><INDENT>if data[i] == wildcard:<EOL><INDENT>tmp.append(symb[i])<EOL><DEDENT>else:<EOL><INDENT>tmp.append(data[i])<EOL><DEDENT><DEDENT>data = tmp<EOL><DEDENT>if string:<EOL><INDENT>for b in data:<EOL><INDENT>if issymbolic(b):<EOL><INDENT>self._constraints.add(b != <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>assert b != <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>return data<EOL>", "docstring": "Mark parts of a buffer as symbolic (demarked by the wildcard byte)\n\n        :param str data: The string to symbolicate. If no wildcard bytes are provided,\n                this is the identity function on the first argument.\n        :param str label: The label to assign to the value\n        :param str wildcard: The byte that is considered a wildcard\n        :param bool string: Ensure bytes returned can not be NULL\n        :param taint: Taint identifier of the symbolicated data\n        :type taint: tuple or frozenset\n\n        :return: If data does not contain any wildcard bytes, data itself. Otherwise,\n            a list of values derived from data. Non-wildcard bytes are kept as\n            is, wildcard bytes are replaced by Expression objects.", "id": "f16992:c5:m27"}
{"signature": "def solve_n(self, expr, nsolves):", "body": "expr = self.migrate_expression(expr)<EOL>return self._solver.get_all_values(self._constraints, expr, nsolves, silent=True)<EOL>", "docstring": "Concretize a symbolic :class:`~manticore.core.smtlib.expression.Expression` into\n`nsolves` solutions.\n\n:param manticore.core.smtlib.Expression expr: Symbolic value to concretize\n:return: Concrete value\n:rtype: list[int]", "id": "f16992:c5:m22"}
{"signature": "def will_start_run_callback(self, state):", "body": "logger.info('<STR_LIT>')<EOL>", "docstring": "Called once at the beginning of the run.\n            state is the initial root state", "id": "f16993:c7:m5"}
{"signature": "def on_register(self):", "body": "pass<EOL>", "docstring": "Called by parent manticore on registration", "id": "f16993:c0:m3"}
{"signature": "def __init__(self):", "body": "super().__init__()<EOL>self.last_dict = {}<EOL>self.current_pc = None<EOL>self.context_key = '<STR_LIT>'<EOL>", "docstring": "Record a detailed execution trace", "id": "f16993:c2:m0"}
{"signature": "def _dict_diff(d1, d2):", "body": "d = {}<EOL>for key in set(d1).intersection(set(d2)):<EOL><INDENT>if d2[key] != d1[key]:<EOL><INDENT>d[key] = d2[key]<EOL><DEDENT><DEDENT>for key in set(d2).difference(set(d1)):<EOL><INDENT>d[key] = d2[key]<EOL><DEDENT>return d<EOL>", "docstring": "Produce a dict that includes all the keys in d2 that represent different values in d1, as well as values that\naren't in d1.\n\n:param dict d1: First dict\n:param dict d2: Dict to compare with\n:rtype: dict", "id": "f16993:m0"}
{"signature": "def choice(self, state_ids):", "body": "raise NotImplementedError<EOL>", "docstring": "Select a state id from state_ids.\n            self.context has a dict mapping state_ids -> summarize(state)", "id": "f16994:c0:m4"}
{"signature": "def run(self):", "body": "<EOL>current_state = None<EOL>current_state_id = None<EOL>with WithKeyboardInterruptAs(self.shutdown):<EOL><INDENT>self._notify_start_run()<EOL>logger.debug(\"<STR_LIT>\", os.getpid())<EOL>solver = Z3Solver()<EOL>while not self.is_shutdown():<EOL><INDENT>try:  <EOL><INDENT>try:  <EOL><INDENT>if current_state is None:<EOL><INDENT>with self._lock:<EOL><INDENT>self._notify_stop_run()<EOL>try:<EOL><INDENT>current_state_id = self.get()<EOL>if current_state_id is not None:<EOL><INDENT>self._publish('<STR_LIT>', current_state_id)<EOL>current_state = self._workspace.load_state(current_state_id)<EOL>self.forward_events_from(current_state, True)<EOL>self._publish('<STR_LIT>', current_state, current_state_id)<EOL>logger.info(\"<STR_LIT>\", current_state_id)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>self._notify_start_run()<EOL><DEDENT><DEDENT><DEDENT>if current_state is None:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>break<EOL><DEDENT>assert current_state is not None<EOL>assert current_state.constraints is current_state.platform.constraints<EOL>while not self.is_shutdown():<EOL><INDENT>if not current_state.execute():<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._publish('<STR_LIT>', current_state, current_state_id, TerminateState('<STR_LIT>'))<EOL>current_state = None<EOL><DEDENT><DEDENT>except Concretize as e:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>current_state = self.fork(current_state, e.expression, e.policy, e.setstate)<EOL><DEDENT>except TerminateState as e:<EOL><INDENT>self._publish('<STR_LIT>', current_state, current_state_id, e)<EOL>logger.debug(\"<STR_LIT>\")<EOL>if e.testcase:<EOL><INDENT>self._publish('<STR_LIT>', current_state, message=str(e))<EOL><DEDENT>current_state = None<EOL><DEDENT>except SolverError as e:<EOL><INDENT>import traceback<EOL>trace = traceback.format_exc()<EOL>logger.error(\"<STR_LIT>\", str(e), trace)<EOL>self._publish('<STR_LIT>', current_state, current_state_id, e)<EOL>if solver.check(current_state.constraints):<EOL><INDENT>self._publish('<STR_LIT>', current_state, message=\"<STR_LIT>\" + str(e))<EOL><DEDENT>current_state = None<EOL><DEDENT><DEDENT>except (Exception, AssertionError) as e:<EOL><INDENT>import traceback<EOL>trace = traceback.format_exc()<EOL>logger.error(\"<STR_LIT>\", str(e), trace)<EOL>self._publish('<STR_LIT>', current_state, current_state_id, e)<EOL>current_state = None<EOL><DEDENT><DEDENT>assert current_state is None or self.is_shutdown()<EOL>self._notify_stop_run()<EOL><DEDENT>", "docstring": "Entry point of the Executor; called by workers to start analysis.", "id": "f16994:c4:m15"}
{"signature": "def list(self):", "body": "return list(self._states)<EOL>", "docstring": "Returns the list of states ids currently queued", "id": "f16994:c4:m13"}
{"signature": "def enqueue(self, state):", "body": "<EOL>state_id = self._workspace.save_state(state)<EOL>self.put(state_id)<EOL>self._publish('<STR_LIT>', state_id, state)<EOL>return state_id<EOL>", "docstring": "Enqueue state.\nSave state on storage, assigns an id to it, then add it to the\npriority queue", "id": "f16994:c4:m4"}
{"signature": "@sync<EOL><INDENT>def put(self, state_id):<DEDENT>", "body": "self._states.append(state_id)<EOL>self._lock.notify_all()<EOL>return state_id<EOL>", "docstring": "Enqueue it for processing", "id": "f16994:c4:m11"}
{"signature": "def is_shutdown(self):", "body": "return self._shutdown.is_set()<EOL>", "docstring": "Returns True if shutdown was requested", "id": "f16994:c4:m10"}
{"signature": "@property<EOL><INDENT>def running(self):<DEDENT>", "body": "return self._running.value<EOL>", "docstring": "Report an estimate  of how many workers are currently running", "id": "f16994:c4:m8"}
{"signature": "@sync<EOL><INDENT>def get(self):<DEDENT>", "body": "<EOL>if self.is_shutdown():<EOL><INDENT>return None<EOL><DEDENT>while len(self._states) == <NUM_LIT:0>:<EOL><INDENT>if self.running == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>if self.is_shutdown():<EOL><INDENT>return None<EOL><DEDENT>logger.debug(\"<STR_LIT>\")<EOL>self._lock.wait()<EOL><DEDENT>state_id = self._policy.choice(list(self._states))<EOL>if state_id is None:<EOL><INDENT>return None<EOL><DEDENT>del self._states[self._states.index(state_id)]<EOL>return state_id<EOL>", "docstring": "Dequeue a state with the max priority", "id": "f16994:c4:m12"}
{"signature": "def p_expression_term(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "expression : term", "id": "f16995:m17"}
{"signature": "def p_expression_and(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] & p[<NUM_LIT:3>]<EOL>", "docstring": "expression : expression AND expression", "id": "f16995:m10"}
{"signature": "def p_term_num(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "term : NUMBER", "id": "f16995:m19"}
{"signature": "def p_expression_derefseg(p):", "body": "size = sizes[p[<NUM_LIT:1>]]<EOL>address = p[<NUM_LIT:6>]<EOL>seg = functions['<STR_LIT>'](p[<NUM_LIT:3>])<EOL>base, limit, _ = functions['<STR_LIT>'](seg)<EOL>address = base + address<EOL>char_list = functions['<STR_LIT>'](address, size)<EOL>value = Operators.CONCAT(<NUM_LIT:8> * len(char_list), *reversed(map(Operators.ORD, char_list)))<EOL>p[<NUM_LIT:0>] = value<EOL>", "docstring": "expression : TYPE PTR SEGMENT COLOM LBRAKET expression RBRAKET", "id": "f16995:m16"}
{"signature": "def p_expression_deref(p):", "body": "size = sizes[p[<NUM_LIT:1>]]<EOL>address = p[<NUM_LIT:4>]<EOL>char_list = functions['<STR_LIT>'](address, size)<EOL>value = Operators.CONCAT(<NUM_LIT:8> * len(char_list), *reversed(map(Operators.ORD, char_list)))<EOL>p[<NUM_LIT:0>] = value<EOL>", "docstring": "expression : TYPE PTR LBRAKET expression RBRAKET", "id": "f16995:m15"}
{"signature": "def p_expression_eq(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] == p[<NUM_LIT:3>]<EOL>", "docstring": "expression : expression EQ expression", "id": "f16995:m21"}
{"signature": "def p_expression_gt(p):", "body": "<EOL>p[<NUM_LIT:0>] = Operators.UGT(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL>", "docstring": "expression : expression GT expression", "id": "f16995:m27"}
{"signature": "def p_expression_or(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] | p[<NUM_LIT:3>]<EOL>", "docstring": "expression : expression OR expression", "id": "f16995:m11"}
{"signature": "def p_expression_mul(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] * p[<NUM_LIT:3>]<EOL>", "docstring": "expression : expression TIMES expression", "id": "f16995:m7"}
{"signature": "def p_expression_div(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] // p[<NUM_LIT:3>]<EOL>", "docstring": "expression : expression DIVIDE expression", "id": "f16995:m6"}
{"signature": "def p_factor_expr(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:2>]<EOL>", "docstring": "expression : LPAREN expression RPAREN", "id": "f16995:m18"}
{"signature": "def p_expression_lt(p):", "body": "<EOL>p[<NUM_LIT:0>] = Operators.ULT(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL>", "docstring": "expression : expression LT expression", "id": "f16995:m25"}
{"signature": "def p_expression_minus(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] - p[<NUM_LIT:3>]<EOL>", "docstring": "expression : expression MINUS expression", "id": "f16995:m9"}
{"signature": "def p_expression_plus(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] + p[<NUM_LIT:3>]<EOL>", "docstring": "expression : expression PLUS expression", "id": "f16995:m8"}
{"signature": "def t_newline(t):", "body": "t.lexer.lineno += len(t.value)<EOL>", "docstring": "r'\\n+", "id": "f16995:m1"}
{"signature": "def p_expression_ge(p):", "body": "<EOL>p[<NUM_LIT:0>] = Operators.UGE(p[<NUM_LIT:1>], p[<NUM_LIT:3>])<EOL>", "docstring": "expression : expression GE expression", "id": "f16995:m28"}
{"signature": "def p_expression_lor(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>] or p[<NUM_LIT:3>]<EOL>", "docstring": "expression : expression LOR expression", "id": "f16995:m23"}
{"signature": "def p_expression_lnot(p):", "body": "p[<NUM_LIT:0>] = not p[<NUM_LIT:1>]<EOL>", "docstring": "expression : LNOT expression", "id": "f16995:m24"}
{"signature": "def new_address(self):", "body": "all_addresses = self._all_addresses()<EOL>while True:<EOL><INDENT>new_address = random.randint(<NUM_LIT:100>, pow(<NUM_LIT:2>, <NUM_LIT>))<EOL>if new_address not in all_addresses:<EOL><INDENT>return new_address<EOL><DEDENT><DEDENT>", "docstring": "Create a fresh 160bit address", "id": "f16997:c0:m42"}
{"signature": "def preconstraint_for_call_transaction(self, address: Union[int, EVMAccount], data: Array,<EOL>value: Optional[Union[int, Expression]] = None,<EOL>contract_metadata: Optional[SolidityMetadata] = None) -> BoolOperation:", "body": "if isinstance(address, EVMAccount):<EOL><INDENT>address = int(address)<EOL><DEDENT>if not isinstance(address, int):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if not issymbolic(data):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if contract_metadata is None:<EOL><INDENT>contract_metadata = self.metadata.get(address)<EOL>if contract_metadata is None:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>selectors = contract_metadata.function_selectors<EOL>if not selectors or len(data) <= <NUM_LIT:4>:<EOL><INDENT>return BoolConstant(True)<EOL><DEDENT>symbolic_selector = data[:<NUM_LIT:4>]<EOL>value_is_symbolic = issymbolic(value)<EOL>constraint = None<EOL>for selector in selectors:<EOL><INDENT>c = symbolic_selector == selector<EOL>if value_is_symbolic and not contract_metadata.get_abi(selector)['<STR_LIT>']:<EOL><INDENT>c = Operators.AND(c, value == <NUM_LIT:0>)<EOL><DEDENT>if constraint is None:<EOL><INDENT>constraint = c<EOL><DEDENT>else:<EOL><INDENT>constraint = Operators.OR(constraint, c)<EOL><DEDENT><DEDENT>return constraint<EOL>", "docstring": "Returns a constraint that excludes combinations of value and data that would cause an exception in the EVM\n            contract dispatcher.\n            :param address: address of the contract to call\n            :param value: balance to be transferred (optional)\n            :param data: symbolic transaction data\n            :param contract_metadata: SolidityMetadata for the contract (optional)", "id": "f16997:c0:m47"}
{"signature": "def __init__(self, procs=<NUM_LIT:10>, workspace_url: str=None, policy: str='<STR_LIT>'):", "body": "self._accounts = dict()<EOL>self._serializer = PickleSerializer()<EOL>self._config_procs = procs<EOL>constraints = ConstraintSet()<EOL>world = evm.EVMWorld(constraints)<EOL>initial_state = State(constraints, world)<EOL>super().__init__(initial_state, workspace_url=workspace_url, policy=policy)<EOL>self.constraints = ConstraintSet()<EOL>self.detectors = {}<EOL>self.metadata: Dict[int, SolidityMetadata] = {}<EOL>self.context['<STR_LIT>'] = {}<EOL>self.context['<STR_LIT>']['<STR_LIT>'] = set()<EOL>self.context['<STR_LIT>']['<STR_LIT>'] = set()<EOL>self.context['<STR_LIT>']['<STR_LIT>'] = <NUM_LIT:0><EOL>self.context['<STR_LIT>']['<STR_LIT>'] = dict()<EOL>self.context['<STR_LIT>']['<STR_LIT>'] = set()<EOL>self._executor.subscribe('<STR_LIT>', self._load_state_callback)<EOL>self._executor.subscribe('<STR_LIT>', self._terminate_state_callback)<EOL>self._executor.subscribe('<STR_LIT>', self._did_evm_execute_instruction_callback)<EOL>self._executor.subscribe('<STR_LIT>', self._did_evm_read_code)<EOL>self._executor.subscribe('<STR_LIT>', self._on_symbolic_sha3_callback)<EOL>self._executor.subscribe('<STR_LIT>', self._on_concrete_sha3_callback)<EOL>self.subscribe('<STR_LIT>', self._generate_testcase_callback)<EOL>", "docstring": "A Manticore EVM manager\n:param procs:, number of workers to use in the exploration\n:param workspace_url: workspace folder name\n:param policy: scheduling priority", "id": "f16997:c0:m13"}
{"signature": "def count_terminated_states(self):", "body": "return len(self._terminated_state_ids)<EOL>", "docstring": "Terminated states count", "id": "f16997:c0:m24"}
{"signature": "def _make_symbolic_arguments(self, ty):", "body": "<EOL>default_string_size = <NUM_LIT:32><EOL>default_array_size = <NUM_LIT:32><EOL>if ty[<NUM_LIT:0>] in ('<STR_LIT:int>', '<STR_LIT>'):<EOL><INDENT>result = self.make_symbolic_value()<EOL><DEDENT>elif ty[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>result = self.make_symbolic_buffer(size=ty[<NUM_LIT:1>])<EOL><DEDENT>elif ty[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>address = self.make_symbolic_value()<EOL>func_id = self.make_symbolic_buffer(size=<NUM_LIT:4>)<EOL>result = (address, func_id)<EOL><DEDENT>elif ty[<NUM_LIT:0>] in ('<STR_LIT>', '<STR_LIT:string>'):<EOL><INDENT>result = self.make_symbolic_buffer(size=default_string_size)<EOL><DEDENT>elif ty[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>result = ()<EOL>for ty_i in ty[<NUM_LIT:1>]:<EOL><INDENT>result += (self._make_symbolic_arguments(ty_i), )<EOL><DEDENT><DEDENT>elif ty[<NUM_LIT:0>] == '<STR_LIT>':<EOL><INDENT>result = []<EOL>rep = ty[<NUM_LIT:1>]<EOL>if rep is None:<EOL><INDENT>rep = default_array_size<EOL><DEDENT>for _ in range(rep):<EOL><INDENT>result.append(self._make_symbolic_arguments(ty[<NUM_LIT:2>]))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise NotImplemented<EOL><DEDENT>return result<EOL>", "docstring": "This makes a tuple of symbols to be used as arguments of type ty", "id": "f16997:c0:m35"}
{"signature": "@property<EOL><INDENT>def _running_state_ids(self):<DEDENT>", "body": "with self.locked_context('<STR_LIT>') as context:<EOL><INDENT>if self.initial_state is not None:<EOL><INDENT>return (-<NUM_LIT:1>,) + tuple(context['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>return tuple(context['<STR_LIT>'])<EOL><DEDENT><DEDENT>", "docstring": "IDs of the running states", "id": "f16997:c0:m16"}
{"signature": "def count_running_states(self):", "body": "return len(self._running_state_ids)<EOL>", "docstring": "Running states count", "id": "f16997:c0:m23"}
{"signature": "def human_transactions(self, state_id=None):", "body": "state = self.load(state_id)<EOL>return state.platform.human_transactions<EOL>", "docstring": "Transactions list for state `state_id`", "id": "f16997:c0:m33"}
{"signature": "def unregister_detector(self, d):", "body": "if not isinstance(d, (Detector, str)):<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>name = d<EOL>if isinstance(d, Detector):<EOL><INDENT>name = d.name<EOL><DEDENT>if name not in self.detectors:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>d = self.detectors[name]<EOL>del self.detectors[name]<EOL>self.unregister_plugin(d)<EOL>", "docstring": "Unregisters a detector. This will invoke detector's `on_unregister` callback.\nShall be called after `.finalize` - otherwise, finalize won't add detector's finding to `global.findings`.", "id": "f16997:c0:m60"}
{"signature": "def flagged(flag):", "body": "return '<STR_LIT>' if flag else '<STR_LIT>'<EOL>", "docstring": "Return special character denoting concretization happened.", "id": "f16997:m0"}
{"signature": "@staticmethod<EOL><INDENT>def _compile(source_code, contract_name, libraries=None, solc_bin=None, solc_remaps=[], working_dir=None):<DEDENT>", "body": "if isinstance(source_code, str):<EOL><INDENT>with tempfile.NamedTemporaryFile('<STR_LIT>') as temp:<EOL><INDENT>temp.write(source_code)<EOL>temp.flush()<EOL>output, warnings = ManticoreEVM._run_solc(temp, solc_bin, solc_remaps, working_dir=working_dir)<EOL><DEDENT><DEDENT>elif isinstance(source_code, io.IOBase):<EOL><INDENT>output, warnings = ManticoreEVM._run_solc(source_code, solc_bin, solc_remaps, working_dir=working_dir)<EOL>source_code.seek(<NUM_LIT:0>)<EOL>source_code = source_code.read()<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(f'<STR_LIT>')<EOL><DEDENT>contracts = output.get('<STR_LIT>', [])<EOL>if len(contracts) != <NUM_LIT:1> and contract_name is None:<EOL><INDENT>raise EthereumError(f'<STR_LIT>')<EOL><DEDENT>name, contract = None, None<EOL>if contract_name is None:<EOL><INDENT>name, contract = list(contracts.items())[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>for n, c in contracts.items():<EOL><INDENT>if n == contract_name or n.split(\"<STR_LIT::>\")[<NUM_LIT:1>] == contract_name:<EOL><INDENT>name, contract = n, c<EOL>break<EOL><DEDENT><DEDENT><DEDENT>if name is None:<EOL><INDENT>raise ValueError(f'<STR_LIT>')<EOL><DEDENT>name = name.split('<STR_LIT::>')[<NUM_LIT:1>]<EOL>if contract['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>raise EthereumError('<STR_LIT>')<EOL><DEDENT>bytecode = ManticoreEVM._link(contract['<STR_LIT>'], libraries)<EOL>srcmap = contract['<STR_LIT>'].split('<STR_LIT:;>')<EOL>srcmap_runtime = contract['<STR_LIT>'].split('<STR_LIT:;>')<EOL>hashes = {str(x): str(y) for x, y in contract['<STR_LIT>'].items()}<EOL>abi = json.loads(contract['<STR_LIT>'])<EOL>runtime = ManticoreEVM._link(contract['<STR_LIT>'], libraries)<EOL>return name, source_code, bytecode, runtime, srcmap, srcmap_runtime, hashes, abi, warnings<EOL>", "docstring": "Compile a Solidity contract, used internally\n\n            :param source_code: solidity source as either a string or a file handle\n            :param contract_name: a string with the name of the contract to analyze\n            :param libraries: an itemizable of pairs (library_name, address)\n            :param solc_bin: path to solc binary\n            :param solc_remaps: solc import remaps\n            :param working_dir: working directory for solc compilation (defaults to current)\n            :return: name, source_code, bytecode, srcmap, srcmap_runtime, hashes\n            :return: name, source_code, bytecode, runtime, srcmap, srcmap_runtime, hashes, abi, warnings", "id": "f16997:c0:m7"}
{"signature": "@property<EOL><INDENT>def world(self):<DEDENT>", "body": "return self.get_world()<EOL>", "docstring": "The world instance or None if there is more than one state", "id": "f16997:c0:m14"}
{"signature": "def create_account(self, balance=<NUM_LIT:0>, address=None, code=None, name=None):", "body": "<EOL>if not self.count_running_states():<EOL><INDENT>raise NoAliveStates<EOL><DEDENT>if name is None:<EOL><INDENT>if code is None:<EOL><INDENT>name = self._get_uniq_name(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>name = self._get_uniq_name(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if name in self._accounts:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(balance, int):<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>if isinstance(code, str):<EOL><INDENT>code = bytes(code, \"<STR_LIT:utf-8>\")<EOL><DEDENT>if code is not None and not isinstance(code, (bytes, Array)):<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>if address is None:<EOL><INDENT>address = self.new_address()<EOL><DEDENT>if not isinstance(address, int):<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>assert address is not None<EOL>if address in map(int, self.accounts.values()):<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>for state in self.running_states:<EOL><INDENT>world = state.platform<EOL>if '<STR_LIT>' in state.context:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>if address in world.accounts:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>world.create_account(address, balance, code=code, storage=None)<EOL><DEDENT>self._accounts[name] = EVMAccount(address, manticore=self, name=name)<EOL>return self.accounts[name]<EOL>", "docstring": "Low level creates an account. This won't generate a transaction.\n\n            :param balance: balance to be set on creation (optional)\n            :type balance: int or BitVecVariable\n            :param address: the address for the new account (optional)\n            :type address: int\n            :param code: the runtime code for the new account (None means normal account), str or bytes (optional)\n            :param name: a global account name eg. for use as reference in the reports (optional)\n            :return: an EVMAccount", "id": "f16997:c0:m44"}
{"signature": "def global_coverage(self, account):", "body": "account_address = int(account)<EOL>runtime_bytecode = None<EOL>for state in self.all_states:<EOL><INDENT>world = state.platform<EOL>if account_address in world:<EOL><INDENT>code = world.get_code(account_address)<EOL>runtime_bytecode = state.solve_one(code)<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>with self.locked_context('<STR_LIT>') as coverage:<EOL><INDENT>seen = {off for addr, off in coverage if addr == account_address}<EOL><DEDENT>return calculate_coverage(runtime_bytecode, seen)<EOL>", "docstring": "Returns code coverage for the contract on `account_address`.\n            This sums up all the visited code lines from any of the explored\n            states.", "id": "f16997:c0:m68"}
{"signature": "def finalize(self):", "body": "logger.debug(\"<STR_LIT>\", self.count_states())<EOL>def finalizer(state_id):<EOL><INDENT>state_id = self._terminate_state_id(state_id)<EOL>st = self.load(state_id)<EOL>logger.debug(\"<STR_LIT>\", state_id)<EOL>last_tx = st.platform.last_transaction<EOL>message = last_tx.result if last_tx else '<STR_LIT>'<EOL>self._publish_generate_testcase(st, message=message)<EOL><DEDENT>def worker_finalize(q):<EOL><INDENT>try:<EOL><INDENT>while True:<EOL><INDENT>finalizer(q.get_nowait())<EOL><DEDENT><DEDENT>except EmptyQueue:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>q = Queue()<EOL>for state_id in self._all_state_ids:<EOL><INDENT>if state_id == -<NUM_LIT:1>:<EOL><INDENT>finalizer(-<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>q.put(state_id)<EOL><DEDENT><DEDENT>report_workers = []<EOL>for _ in range(self._config_procs):<EOL><INDENT>proc = Process(target=worker_finalize, args=(q,))<EOL>proc.start()<EOL>report_workers.append(proc)<EOL><DEDENT>for proc in report_workers:<EOL><INDENT>proc.join()<EOL><DEDENT>if len(self.global_findings):<EOL><INDENT>with self._output.save_stream('<STR_LIT>') as global_findings:<EOL><INDENT>for address, pc, finding, at_init in self.global_findings:<EOL><INDENT>global_findings.write('<STR_LIT>' % finding)<EOL>write_findings(global_findings, '<STR_LIT:U+0020>', address, pc, at_init)<EOL>md = self.get_metadata(address)<EOL>if md is not None:<EOL><INDENT>source_code_snippet = md.get_source_for(pc, runtime=not at_init)<EOL>global_findings.write('<STR_LIT>')<EOL>global_findings.write('<STR_LIT:U+0020>'.join(source_code_snippet.splitlines(True)))<EOL>global_findings.write('<STR_LIT:\\n>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT>self._save_run_data()<EOL>with self._output.save_stream('<STR_LIT>') as global_summary:<EOL><INDENT>global_summary.write(\"<STR_LIT>\")<EOL>for address in self.contract_accounts.values():<EOL><INDENT>global_summary.write(\"<STR_LIT>\".format(int(address), self.global_coverage(address)))<EOL>md = self.get_metadata(address)<EOL>if md is not None and len(md.warnings) > <NUM_LIT:0>:<EOL><INDENT>global_summary.write('<STR_LIT>' % md.name)<EOL>global_summary.write(md.warnings)<EOL><DEDENT><DEDENT><DEDENT>for address, md in self.metadata.items():<EOL><INDENT>with self._output.save_stream('<STR_LIT>' % md.name) as global_src:<EOL><INDENT>global_src.write(md.source_code)<EOL><DEDENT>with self._output.save_stream('<STR_LIT>' % md.name, binary=True) as global_runtime_bytecode:<EOL><INDENT>global_runtime_bytecode.write(md.runtime_bytecode)<EOL><DEDENT>with self._output.save_stream('<STR_LIT>' % md.name, binary=True) as global_init_bytecode:<EOL><INDENT>global_init_bytecode.write(md.init_bytecode)<EOL><DEDENT>with self._output.save_stream('<STR_LIT>' % md.name) as global_runtime_asm:<EOL><INDENT>runtime_bytecode = md.runtime_bytecode<EOL>with self.locked_context('<STR_LIT>') as seen:<EOL><INDENT>count, total = <NUM_LIT:0>, <NUM_LIT:0><EOL>for i in EVMAsm.disassemble_all(runtime_bytecode):<EOL><INDENT>if (address, i.pc) in seen:<EOL><INDENT>count += <NUM_LIT:1><EOL>global_runtime_asm.write('<STR_LIT:*>')<EOL><DEDENT>else:<EOL><INDENT>global_runtime_asm.write('<STR_LIT:U+0020>')<EOL><DEDENT>global_runtime_asm.write('<STR_LIT>' % (i.pc, i))<EOL>total += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>with self._output.save_stream('<STR_LIT>' % md.name) as global_init_asm:<EOL><INDENT>with self.locked_context('<STR_LIT>') as seen:<EOL><INDENT>count, total = <NUM_LIT:0>, <NUM_LIT:0><EOL>for i in EVMAsm.disassemble_all(md.init_bytecode):<EOL><INDENT>if (address, i.pc) in seen:<EOL><INDENT>count += <NUM_LIT:1><EOL>global_init_asm.write('<STR_LIT:*>')<EOL><DEDENT>else:<EOL><INDENT>global_init_asm.write('<STR_LIT:U+0020>')<EOL><DEDENT>global_init_asm.write('<STR_LIT>' % (i.pc, i))<EOL>total += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>with self._output.save_stream('<STR_LIT>' % md.name) as f:<EOL><INDENT>with self.locked_context('<STR_LIT>') as seen:<EOL><INDENT>visited = set((o for (a, o) in seen if a == address))<EOL>for o in sorted(visited):<EOL><INDENT>f.write('<STR_LIT>' % o)<EOL><DEDENT><DEDENT><DEDENT>with self._output.save_stream('<STR_LIT>' % md.name) as f:<EOL><INDENT>with self.locked_context('<STR_LIT>') as seen:<EOL><INDENT>visited = set()<EOL>for (a, o) in seen:<EOL><INDENT>if a == address:<EOL><INDENT>visited.add(o)<EOL><DEDENT><DEDENT>for o in sorted(visited):<EOL><INDENT>f.write('<STR_LIT>' % o)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>for state_id in self._all_state_ids:<EOL><INDENT>if state_id != -<NUM_LIT:1>:<EOL><INDENT>self._executor._workspace.rm_state(state_id)<EOL><DEDENT><DEDENT>with self.locked_context('<STR_LIT>') as eth_context:<EOL><INDENT>eth_context['<STR_LIT>'] = set()<EOL>eth_context['<STR_LIT>'] = set()<EOL><DEDENT>", "docstring": "Terminate and generate testcases for all currently alive states (contract states that cleanly executed\nto a STOP or RETURN in the last symbolic transaction).", "id": "f16997:c0:m67"}
{"signature": "def _did_evm_read_code(self, state, offset, size):", "body": "with self.locked_context('<STR_LIT>', set) as code_data:<EOL><INDENT>for i in range(offset, offset + size):<EOL><INDENT>code_data.add((state.platform.current_vm.address, i))<EOL><DEDENT><DEDENT>", "docstring": "INTERNAL USE", "id": "f16997:c0:m57"}
{"signature": "def _on_symbolic_sha3_callback(self, state, data, known_hashes):", "body": "assert issymbolic(data), '<STR_LIT>'<EOL>with self.locked_context('<STR_LIT>') as context:<EOL><INDENT>known_sha3 = context.get('<STR_LIT>', None)<EOL>if known_sha3 is None:<EOL><INDENT>known_sha3 = set()<EOL><DEDENT>sha3_states = context.get('<STR_LIT>', [])<EOL>results = []<EOL>known_hashes_cond = False<EOL>for key, value in known_sha3:<EOL><INDENT>assert not issymbolic(key), \"<STR_LIT>\"<EOL>cond = key == data<EOL>if not state.can_be_true(cond):<EOL><INDENT>continue<EOL><DEDENT>results.append((key, value))<EOL>known_hashes_cond = Operators.OR(cond, known_hashes_cond)<EOL><DEDENT>if not results:<EOL><INDENT>data_concrete = state.solve_one(data)<EOL>s = sha3.keccak_256(data_concrete)<EOL>data_hash = int(s.hexdigest(), <NUM_LIT:16>)<EOL>results.append((data_concrete, data_hash))<EOL>known_hashes_cond = data_concrete == data<EOL>known_sha3.add((data_concrete, data_hash))<EOL><DEDENT>not_known_hashes_cond = Operators.NOT(known_hashes_cond)<EOL>with state as temp_state:<EOL><INDENT>if temp_state.can_be_true(not_known_hashes_cond):<EOL><INDENT>temp_state.constrain(not_known_hashes_cond)<EOL>state_id = self._executor._workspace.save_state(temp_state)<EOL>sha3_states[state_id] = [hsh for buf, hsh in known_sha3]<EOL><DEDENT><DEDENT>context['<STR_LIT>'] = sha3_states<EOL>if not state.can_be_true(known_hashes_cond):<EOL><INDENT>raise TerminateState(\"<STR_LIT>\")<EOL><DEDENT>state.constrain(known_hashes_cond)<EOL>known_hashes.update(results)<EOL><DEDENT>", "docstring": "INTERNAL USE", "id": "f16997:c0:m52"}
{"signature": "@property<EOL><INDENT>def terminated_states(self):<DEDENT>", "body": "for state_id in self._terminated_state_ids:<EOL><INDENT>state = self.load(state_id)<EOL>yield state<EOL>self.save(state, state_id=state_id, final=True)<EOL><DEDENT>", "docstring": "Iterates over the terminated states.\n\nSee also `running_states`.", "id": "f16997:c0:m20"}
{"signature": "@property<EOL><INDENT>def _terminated_state_ids(self):<DEDENT>", "body": "with self.locked_context('<STR_LIT>') as context:<EOL><INDENT>return tuple(context['<STR_LIT>'])<EOL><DEDENT>", "docstring": "IDs of the terminated states", "id": "f16997:c0:m17"}
{"signature": "def _all_addresses(self):", "body": "ret = set()<EOL>for state in self.running_states:<EOL><INDENT>ret |= set(state.platform.accounts)<EOL><DEDENT>return ret<EOL>", "docstring": "Returns all addresses in all running states", "id": "f16997:c0:m41"}
{"signature": "@property<EOL><INDENT>def running_states(self):<DEDENT>", "body": "for state_id in self._running_state_ids:<EOL><INDENT>state = self.load(state_id)<EOL>yield state<EOL>self.save(state, state_id=state_id)<EOL><DEDENT>", "docstring": "Iterates over running states giving the possibility to change state data.\n\nThe state data change must be done in a loop, e.g. `for state in running_states: ...`\nas we re-save the state when the generator comes back to the function.\n\nThis means it is not possible to change the state used by Manticore with `states = list(m.running_states)`.", "id": "f16997:c0:m19"}
{"signature": "def count_states(self):", "body": "return len(self._all_state_ids)<EOL>", "docstring": "Total states count", "id": "f16997:c0:m22"}
{"signature": "def make_symbolic_value(self, nbits=<NUM_LIT>, name=None):", "body": "avoid_collisions = False<EOL>if name is None:<EOL><INDENT>name = '<STR_LIT>'<EOL>avoid_collisions = True<EOL><DEDENT>return self.constraints.new_bitvec(nbits, name=name, avoid_collisions=avoid_collisions)<EOL>", "docstring": "Creates a symbolic value, normally a uint256, to be used in transactions.\n            You can operate on it normally and add constraints to manticore.constraints\n            via manticore.constrain(constraint_expression)\n\n            Example use::\n\n                symbolic_value = m.make_symbolic_value()\n                m.constrain(symbolic_value > 100)\n                m.constrain(symbolic_value < 1000)\n                m.transaction(caller=attacker_account,\n                                address=contract_account,\n                                data=data,\n                                value=symbolic_value )", "id": "f16997:c0:m1"}
{"signature": "def calculate_coverage(runtime_bytecode, seen):", "body": "count, total = <NUM_LIT:0>, <NUM_LIT:0><EOL>bytecode = SolidityMetadata._without_metadata(runtime_bytecode)<EOL>for i in EVMAsm.disassemble_all(bytecode):<EOL><INDENT>if i.pc in seen:<EOL><INDENT>count += <NUM_LIT:1><EOL><DEDENT>total += <NUM_LIT:1><EOL><DEDENT>if total == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>return count * <NUM_LIT> / total<EOL>", "docstring": "Calculates what percentage of runtime_bytecode has been seen", "id": "f16997:m2"}
{"signature": "@property<EOL><INDENT>def _all_state_ids(self):<DEDENT>", "body": "return self._running_state_ids + self._terminated_state_ids<EOL>", "docstring": "IDs of the all states\n\n            Note: state with id -1 is already in memory and it is not backed on the storage", "id": "f16997:c0:m18"}
{"signature": "def get_world(self, state_id=None):", "body": "state = self.load(state_id)<EOL>if state is None:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return state.platform<EOL><DEDENT>", "docstring": "Returns the evm world of `state_id` state.", "id": "f16997:c0:m27"}
{"signature": "def register_detector(self, d):", "body": "if not isinstance(d, Detector):<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>if d.name in self.detectors:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>self.detectors[d.name] = d<EOL>self.register_plugin(d)<EOL>return d.name<EOL>", "docstring": "Unregisters a plugin. This will invoke detector's `on_unregister` callback.\nShall be called after `.finalize`.", "id": "f16997:c0:m59"}
{"signature": "@staticmethod<EOL><INDENT>def compile(source_code, contract_name=None, libraries=None, runtime=False, solc_bin=None, solc_remaps=[]):<DEDENT>", "body": "name, source_code, init_bytecode, runtime_bytecode, srcmap, srcmap_runtime, hashes, abi, warnings = ManticoreEVM._compile(source_code, contract_name, libraries, solc_bin, solc_remaps)<EOL>if runtime:<EOL><INDENT>return runtime_bytecode<EOL><DEDENT>return init_bytecode<EOL>", "docstring": "Get initialization bytecode from a Solidity source code", "id": "f16997:c0:m4"}
{"signature": "def load(self, state_id=None):", "body": "state = None<EOL>if state_id is None:<EOL><INDENT>state_count = self.count_running_states()<EOL>if state_count == <NUM_LIT:1>:<EOL><INDENT>state_id = self._running_state_ids[<NUM_LIT:0>]<EOL><DEDENT>elif state_count == <NUM_LIT:0>:<EOL><INDENT>raise NoAliveStates<EOL><DEDENT>else:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if state_id == -<NUM_LIT:1>:<EOL><INDENT>state = self.initial_state<EOL><DEDENT>else:<EOL><INDENT>state = self._executor._workspace.load_state(state_id, delete=False)<EOL>self._executor.forward_events_from(state, True)<EOL><DEDENT>return state<EOL>", "docstring": "Load one of the running or final states.\n\n            :param state_id: If None it assumes there is a single running state\n            :type state_id: int or None", "id": "f16997:c0:m51"}
{"signature": "def _transaction(self, sort, caller, value=<NUM_LIT:0>, address=None, data=None, gaslimit=None, price=<NUM_LIT:1>):", "body": "if gaslimit is None:<EOL><INDENT>gaslimit = cfg.defaultgas<EOL><DEDENT>if isinstance(address, EVMAccount):<EOL><INDENT>address = int(address)<EOL><DEDENT>if isinstance(caller, EVMAccount):<EOL><INDENT>caller = int(caller)<EOL><DEDENT>if data is None:<EOL><INDENT>data = bytearray(b\"<STR_LIT>\")<EOL><DEDENT>if isinstance(data, (str, bytes)):<EOL><INDENT>data = bytearray(data)<EOL><DEDENT>if not isinstance(data, (bytearray, Array)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(caller, (int, BitVec)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(value, (int, BitVec)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(address, (int, BitVec)):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if not isinstance(price, int):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if sort not in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if sort == '<STR_LIT>':<EOL><INDENT>if len(data) == <NUM_LIT:0>:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>assert address is not None<EOL>assert caller is not None<EOL>if not self.count_running_states():<EOL><INDENT>raise NoAliveStates<EOL><DEDENT>for state in self.running_states:<EOL><INDENT>world = state.platform<EOL>if '<STR_LIT>' in state.context:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>if address is None:<EOL><INDENT>if issymbolic(caller):<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>address = world.new_address(caller)<EOL><DEDENT>caller_migrated, address_migrated, value_migrated, data_migrated = self._migrate_tx_expressions(state, caller, address, value, data)<EOL>if sort == '<STR_LIT>':<EOL><INDENT>if address in world.accounts:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>state.context['<STR_LIT>'] = (sort, caller_migrated, address_migrated, value_migrated, data_migrated, gaslimit, price)<EOL><DEDENT>self.run(procs=self._config_procs)<EOL>return address<EOL>", "docstring": "Initiates a transaction\n\n            :param caller: caller account\n            :type caller: int or EVMAccount\n            :param int address: the address for the transaction (optional)\n            :param value: value to be transferred\n            :param price: the price of gas for this transaction. Mostly unused.\n            :type value: int or BitVecVariable\n            :param str data: initializing evm bytecode and arguments or transaction call data\n            :param gaslimit: gas budget\n            :rtype: EVMAccount", "id": "f16997:c0:m46"}
{"signature": "def last_return(self, state_id=None):", "body": "state = self.load(state_id)<EOL>return state.platform.last_transaction.return_data<EOL>", "docstring": "Last returned buffer for state `state_id`", "id": "f16997:c0:m31"}
{"signature": "def transaction(self, caller, address, value, data, gas=None):", "body": "self._transaction('<STR_LIT>', caller, value=value, address=address, data=data, gaslimit=gas)<EOL>", "docstring": "Issue a symbolic transaction in all running states\n\n            :param caller: the address of the account sending the transaction\n            :type caller: int or EVMAccount\n            :param address: the address of the contract to call\n            :type address: int or EVMAccount\n            :param value: balance to be transfered on creation\n            :type value: int or BitVecVariable\n            :param data: initial data\n            :param gas: gas budget\n            :raises NoAliveStates: if there are no alive states to execute", "id": "f16997:c0:m43"}
{"signature": "def __init__(self, regexp=r'<STR_LIT>', mutability='<STR_LIT>', depth='<STR_LIT>', fallback=False, include=True, **kwargs):", "body": "super().__init__(**kwargs)<EOL>depth = depth.lower()<EOL>if depth not in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>raise ValueError<EOL><DEDENT>mutability = mutability.lower()<EOL>if mutability not in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>raise ValueError<EOL><DEDENT>self._regexp = regexp<EOL>self._mutability = mutability<EOL>self._depth = depth<EOL>self._fallback = fallback<EOL>self._include = include<EOL>", "docstring": "Constrain input based on function metadata. Include or avoid functions selected by the specified criteria.\n\nExamples:\n#Do not explore any human transactions that end up calling a constant function\nno_human_constant = FilterFunctions(depth='human', mutability='constant', include=False)\n\n#At human tx depth only accept synthetic check functions\nonly_tests = FilterFunctions(regexp=r'mcore_.*', depth='human', include=False)\n\n:param regexp: a regular expression over the name of the function '.*' will match all functions\n:param mutability: mutable, constant or both will match functions declared in the abi to be of such class\n:param depth: match functions in internal transactions, in human initiated transactions or in both types\n:param fallback: if True include the fallback function. Hash will be 00000000 for it\n:param include: if False exclude the selected functions, if True include them", "id": "f16998:c0:m0"}
{"signature": "def __init__(self, only_human=True, **kwargs):", "body": "super().__init__(**kwargs)<EOL>self._only_human = only_human<EOL>", "docstring": "Detects INVALID instructions.\n\nINVALID instructions are originally designated to signal exceptional code.\nAs in practice the INVALID instruction is used in different ways this\ndetector may Generate a great deal of false positives.\n\n:param only_human: if True report only INVALID at depth 0 transactions", "id": "f16999:c5:m0"}
{"signature": "def add_finding(self, state, address, pc, finding, at_init, constraint=True):", "body": "if issymbolic(pc):<EOL><INDENT>pc = simplify(pc)<EOL><DEDENT>if isinstance(pc, Constant):<EOL><INDENT>pc = pc.value<EOL><DEDENT>if not isinstance(pc, int):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.get_findings(state).add((address, pc, finding, at_init, constraint))<EOL>with self.locked_global_findings() as gf:<EOL><INDENT>gf.add((address, pc, finding, at_init))<EOL><DEDENT>logger.warning(finding)<EOL>", "docstring": "Logs a finding at specified contract and assembler line.\n:param state: current state\n:param address: contract address of the finding\n:param pc: program counter of the finding\n:param at_init: true if executing the constructor\n:param finding: textual description of the finding\n:param constraint: finding is considered reproducible only when constraint is True", "id": "f16999:c1:m4"}
{"signature": "@staticmethod<EOL><INDENT>def _unsigned_sub_overflow(state, a, b):<DEDENT>", "body": "cond = Operators.UGT(b, a)<EOL>return cond<EOL>", "docstring": "Sign extend the value to 512 bits and check the result can be represented\n in 256. Following there is a 32 bit excerpt of this condition:\n\na  -  b   ffffffff bfffffff 80000001 00000000 00000001 3ffffffff 7fffffff\nffffffff     True     True     True    False     True     True     True\nbfffffff     True     True     True    False    False     True     True\n80000001     True     True     True    False    False     True     True\n00000000    False    False    False    False    False     True    False\n00000001     True    False    False    False    False     True    False\nffffffff     True     True     True     True     True     True     True\n7fffffff     True     True     True    False    False     True    False", "id": "f16999:c8:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _in_user_func(state):<DEDENT>", "body": "<EOL>in_function = state.context.get('<STR_LIT>', False)<EOL>prev_tx_count = state.context.get('<STR_LIT>', <NUM_LIT:0>)<EOL>curr_tx_count = len(state.platform.transactions)<EOL>new_human_tx = prev_tx_count != curr_tx_count<EOL>if in_function and not new_human_tx:<EOL><INDENT>return True<EOL><DEDENT>in_function = len(state.solve_n(state.platform.current_transaction.data[:<NUM_LIT:4>], <NUM_LIT:2>)) == <NUM_LIT:1><EOL>state.context['<STR_LIT>'] = in_function<EOL>state.context['<STR_LIT>'] = curr_tx_count<EOL>return in_function<EOL>", "docstring": ":param state: current state\n:return: whether the current execution is in a user-defined function or not.\n\nNOTE / TODO / FIXME: As this may produce false postives, this is not in the base `Detector` class.\nIt should be fixed at some point and moved there. See below.\n\nThe first 4 bytes of tx data is keccak256 hash of the function signature that is called by given tx.\n\nAll transactions start within Solidity dispatcher function: it takes passed hash and dispatches\nthe execution to given function based on it.\n\nSo: if we are in the dispatcher, *and contract have some functions* one of the first four tx data bytes\nwill effectively have more than one solutions.\n\nBUT if contract have only a fallback function, the equation below may return more solutions when we are\nin a dispatcher function.  <--- because of that, we warn that the detector is not that stable\nfor contracts with only a fallback function.", "id": "f16999:c13:m1"}
{"signature": "@property<EOL><INDENT>def function_signatures(self) -> Iterable[str]:<DEDENT>", "body": "return self._function_signatures_by_selector.values()<EOL>", "docstring": "The signatures of all normal contract functions.", "id": "f17001:c0:m19"}
{"signature": "@property<EOL><INDENT>def fallback_function_selector(self) -> bytes:<DEDENT>", "body": "return self._fallback_function_selector<EOL>", "docstring": "A function selector not associated with any of the non-fallback contract functions.\n\n        This selector is almost always ``b'\\0\\0\\0\\0'``.", "id": "f17001:c0:m22"}
{"signature": "def get_constructor_arguments(self) -> str:", "body": "item = self._constructor_abi_item<EOL>return '<STR_LIT>' if item is None else self.tuple_signature_for_components(item['<STR_LIT>'])<EOL>", "docstring": "Returns the tuple type signature for the arguments of the contract constructor.", "id": "f17001:c0:m3"}
{"signature": "def get_source_for(self, asm_offset, runtime=True):", "body": "srcmap = self.get_srcmap(runtime)<EOL>try:<EOL><INDENT>beg, size, _, _ = srcmap[asm_offset]<EOL><DEDENT>except KeyError:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>output = '<STR_LIT>'<EOL>nl = self.source_code[:beg].count('<STR_LIT:\\n>') + <NUM_LIT:1><EOL>snippet = self.source_code[beg:beg + size]<EOL>for l in snippet.split('<STR_LIT:\\n>'):<EOL><INDENT>output += '<STR_LIT>' % (nl, l)<EOL>nl += <NUM_LIT:1><EOL><DEDENT>return output<EOL>", "docstring": "Solidity source code snippet related to `asm_pos` evm bytecode offset.\n            If runtime is False, initialization bytecode source map is used", "id": "f17001:c0:m8"}
{"signature": "def get_func_name(self, hsh: bytes) -> str:", "body": "if not isinstance(hsh, (bytes, bytearray)):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>sig = self._function_signatures_by_selector.get(hsh)<EOL>return '<STR_LIT>' if sig is None else sig[:sig.find('<STR_LIT:(>')]<EOL>", "docstring": "Returns the name of the normal function with the selector ``hsh``,\n        or ``'{fallback}'`` if no such function exists.", "id": "f17001:c0:m16"}
{"signature": "@property<EOL><INDENT>def has_non_default_fallback_function(self) -> bool:<DEDENT>", "body": "return self._fallback_function_abi_item is not None<EOL>", "docstring": "Indicates whether the contract has an explicitly defined fallback function.", "id": "f17001:c0:m21"}
{"signature": "def __init__(self, name, source_code, init_bytecode, runtime_bytecode, srcmap, srcmap_runtime, hashes, abi, warnings):", "body": "self.name = name<EOL>if isinstance(source_code, bytes):<EOL><INDENT>source_code = source_code.decode()<EOL><DEDENT>self.source_code = source_code<EOL>self._init_bytecode = init_bytecode<EOL>self._runtime_bytecode = runtime_bytecode<EOL>self._function_signatures_by_selector = {bytes.fromhex(sel): sig for sig, sel in hashes.items()}<EOL>fallback_selector = b'<STR_LIT>'<EOL>while fallback_selector in self._function_signatures_by_selector:<EOL><INDENT>fallback_selector = (int.from_bytes(fallback_selector, '<STR_LIT>') + <NUM_LIT:1>).to_bytes(<NUM_LIT:4>, '<STR_LIT>')<EOL><DEDENT>self._fallback_function_selector = fallback_selector<EOL>self._constructor_abi_item = None<EOL>self._fallback_function_abi_item = None<EOL>function_items = {}<EOL>event_items = {}<EOL>for item in abi:<EOL><INDENT>type = item['<STR_LIT:type>']<EOL>if type == '<STR_LIT>':<EOL><INDENT>signature = self.function_signature_for_name_and_inputs(item['<STR_LIT:name>'], item['<STR_LIT>'])<EOL>function_items[signature] = item<EOL><DEDENT>elif type == '<STR_LIT>':<EOL><INDENT>signature = self.function_signature_for_name_and_inputs(item['<STR_LIT:name>'], item['<STR_LIT>'])<EOL>event_items[signature] = item<EOL><DEDENT>elif type == '<STR_LIT>':<EOL><INDENT>assert not self._constructor_abi_item, \"<STR_LIT>\"<EOL>self._constructor_abi_item = item<EOL><DEDENT>elif type == '<STR_LIT>':<EOL><INDENT>assert not self._fallback_function_abi_item, \"<STR_LIT>\"<EOL>self._fallback_function_abi_item = item<EOL><DEDENT><DEDENT>self._function_abi_items_by_signature = function_items<EOL>self._event_abi_items_by_signature = event_items<EOL>self.warnings = warnings<EOL>self.srcmap_runtime = self.__build_source_map(self.runtime_bytecode, srcmap_runtime)<EOL>self.srcmap = self.__build_source_map(self.init_bytecode, srcmap)<EOL>", "docstring": "Contract metadata for Solidity-based contracts", "id": "f17001:c0:m2"}
{"signature": "def get_func_argument_types(self, hsh: bytes):", "body": "if not isinstance(hsh, (bytes, bytearray)):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>sig = self._function_signatures_by_selector.get(hsh)<EOL>return '<STR_LIT>' if sig is None else sig[sig.find('<STR_LIT:(>'):]<EOL>", "docstring": "Returns the tuple type signature for the arguments of the function associated with the selector ``hsh``.\n\n        If no normal contract function has the specified selector,\n        the empty tuple type signature ``'()'`` is returned.", "id": "f17001:c0:m14"}
{"signature": "@property<EOL><INDENT>@deprecated(\"<STR_LIT>\")<EOL>def functions(self) -> Tuple[str, ...]:<DEDENT>", "body": "return (*self._function_signatures_by_selector.values(), '<STR_LIT>')<EOL>", "docstring": "The signatures of all normal contract functions, plus the ``'{fallback}()'`` pseudo-signature.", "id": "f17001:c0:m20"}
{"signature": "@property<EOL><INDENT>def constructor_abi(self) -> Dict[str, Any]:<DEDENT>", "body": "item = self._constructor_abi_item<EOL>if item:<EOL><INDENT>return dict(item)<EOL><DEDENT>return {'<STR_LIT>': [], '<STR_LIT>': False, '<STR_LIT>': '<STR_LIT>', '<STR_LIT:type>': '<STR_LIT>'}<EOL>", "docstring": "Returns a copy of the Solidity JSON ABI item for the contract constructor.\n\n        The content of the returned dict is described at https://solidity.readthedocs.io/en/latest/abi-spec.html#json_", "id": "f17001:c0:m12"}
{"signature": "@staticmethod<EOL><INDENT>def tuple_signature_for_components(components: Sequence[Mapping[str, Any]]) -> str:<DEDENT>", "body": "ts = []<EOL>for c in components:<EOL><INDENT>t: str = c['<STR_LIT:type>']<EOL>if t.startswith('<STR_LIT>'):<EOL><INDENT>assert len(t) == <NUM_LIT:5> or t[<NUM_LIT:5>] == '<STR_LIT:[>'<EOL>t = SolidityMetadata.tuple_signature_for_components(c['<STR_LIT>']) + t[<NUM_LIT:5>:]<EOL><DEDENT>ts.append(t)<EOL><DEDENT>return f'<STR_LIT>'<EOL>", "docstring": "Equivalent to ``function_signature_for_name_and_inputs('', components)``.", "id": "f17001:c0:m1"}
{"signature": "@property<EOL><INDENT>def has_non_default_constructor(self) -> bool:<DEDENT>", "body": "return self._fallback_function_abi_item is not None<EOL>", "docstring": "Indicates whether the contract has an explicitly defined constructor.", "id": "f17001:c0:m11"}
{"signature": "@property<EOL><INDENT>def name_(self):<DEDENT>", "body": "return self._name<EOL>", "docstring": "This is named this way to avoid naming collisions with Solidity functions/data,\nsince EVMContract inherits this.", "id": "f17003:c0:m2"}
{"signature": "def __init__(self, default_caller=None, **kwargs):", "body": "super().__init__(**kwargs)<EOL>self.__default_caller = default_caller<EOL>self.__hashes = {}<EOL>self.__initialized = False<EOL>", "docstring": "Encapsulates a contract account.\n\n:param default_caller: the default caller address for any transaction", "id": "f17003:c1:m0"}
{"signature": "def __init__(self, address=None, manticore=None, name=None):", "body": "self._manticore = manticore<EOL>self._address = address<EOL>self._name = name<EOL>", "docstring": "Encapsulates an account.\n\n:param address: the address of this account\n:type address: 160 bit long integer\n:param manticore: the controlling Manticore", "id": "f17003:c0:m0"}
{"signature": "def t_UINT(t):", "body": "t.value = ('<STR_LIT>', <NUM_LIT>)<EOL>return t<EOL>", "docstring": "r\"uint", "id": "f17004:m4"}
{"signature": "def p_tuple(p):", "body": "p[<NUM_LIT:0>] = ('<STR_LIT>', p[<NUM_LIT:2>])<EOL>", "docstring": "T : LPAREN TL RPAREN", "id": "f17004:m19"}
{"signature": "def p_dynamic_type(p):", "body": "reps = None<EOL>base_type = p[<NUM_LIT:1>]<EOL>p[<NUM_LIT:0>] = ('<STR_LIT>', reps, base_type)<EOL>", "docstring": "T : T LBRAKET RBRAKET", "id": "f17004:m21"}
{"signature": "def t_BOOL(t):", "body": "t.value = ('<STR_LIT>', <NUM_LIT:8>)<EOL>return t<EOL>", "docstring": "r\"bool", "id": "f17004:m3"}
{"signature": "def t_INTN(t):", "body": "size = int(t.lexer.lexmatch.group('<STR_LIT:size>'))<EOL>t.value = ('<STR_LIT:int>', size)<EOL>return t<EOL>", "docstring": "r\"int(?P<size>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)", "id": "f17004:m5"}
{"signature": "def p_dynamic_fixed_type(p):", "body": "reps = int(p[<NUM_LIT:3>])<EOL>base_type = p[<NUM_LIT:1>]<EOL>p[<NUM_LIT:0>] = ('<STR_LIT>', reps, base_type)<EOL>", "docstring": "T : T LBRAKET NUMBER RBRAKET", "id": "f17004:m22"}
{"signature": "def t_BYTES(t):", "body": "t.value = ('<STR_LIT>',)<EOL>return t<EOL>", "docstring": "r\"bytes", "id": "f17004:m12"}
{"signature": "def t_INT(t):", "body": "t.value = ('<STR_LIT:int>', <NUM_LIT>)<EOL>return t<EOL>", "docstring": "r\"int", "id": "f17004:m6"}
{"signature": "def t_FIXEDMN(t):", "body": "M = int(t.lexer.lexmatch.group('<STR_LIT:M>'))<EOL>N = int(t.lexer.lexmatch.group('<STR_LIT:N>'))<EOL>t.value = (\"<STR_LIT>\", M, N)<EOL>return t<EOL>", "docstring": "r\"^fixed(?P<M>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)x(?P<N>80|79|78|77|76|75|74|73|72|71|70|69|68|67|66|65|64|63|62|61|60|59|58|57|56|55|54|53|52|51|50|49|48|47|46|45|44|43|42|41|40|39|38|37|36|35|34|33|32|31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10|9|8|7|6|5|4|3|2|1)", "id": "f17004:m7"}
{"signature": "def t_UFIXED(t):", "body": "t.value = ('<STR_LIT>', <NUM_LIT>, <NUM_LIT>)<EOL>return t<EOL>", "docstring": "r\"ufixed", "id": "f17004:m10"}
{"signature": "def t_UINTN(t):", "body": "size = int(t.lexer.lexmatch.group('<STR_LIT:size>'))<EOL>t.value = ('<STR_LIT>', size)<EOL>return t<EOL>", "docstring": "r\"uint(?P<size>256|248|240|232|224|216|208|200|192|184|176|168|160|152|144|136|128|120|112|104|96|88|80|72|64|56|48|40|32|24|16|8)", "id": "f17004:m1"}
{"signature": "def t_BYTESM(t):", "body": "size = int(t.lexer.lexmatch.group('<STR_LIT>'))<EOL>t.value = ('<STR_LIT>', size)<EOL>return t<EOL>", "docstring": "r\"bytes(?P<nbytes>32|31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10|9|8|7|6|5|4|3|2|1)", "id": "f17004:m11"}
{"signature": "def p_basic_type(p):", "body": "p[<NUM_LIT:0>] = p[<NUM_LIT:1>]<EOL>", "docstring": "T : UINTN\nT : UINT\nT : INTN\nT : INT\nT : ADDRESS\nT : BOOL\nT : FIXEDMN\nT : UFIXEDMN\nT : FIXED\nT : UFIXED\nT : BYTESM\nT : FUNCTION\nT : BYTES\nT : STRING", "id": "f17004:m16"}
{"signature": "def t_FUNCTION(t):", "body": "t.value = ('<STR_LIT>',)<EOL>return t<EOL>", "docstring": "r\"function", "id": "f17004:m14"}
{"signature": "@staticmethod<EOL><INDENT>def function_selector(method_name_and_signature):<DEDENT>", "body": "s = sha3.keccak_256()<EOL>s.update(method_name_and_signature.encode())<EOL>return bytes(s.digest()[:<NUM_LIT:4>])<EOL>", "docstring": "Makes a function hash id from a method signature", "id": "f17007:c0:m8"}
{"signature": "@staticmethod<EOL><INDENT>def _serialize_uint(value, size=<NUM_LIT:32>, padding=<NUM_LIT:0>):<DEDENT>", "body": "if size <= <NUM_LIT:0> or size > <NUM_LIT:32>:<EOL><INDENT>raise ValueError<EOL><DEDENT>from .account import EVMAccount  <EOL>if not isinstance(value, (int, BitVec, EVMAccount)):<EOL><INDENT>raise ValueError<EOL><DEDENT>if issymbolic(value):<EOL><INDENT>bytes = ArrayVariable(index_bits=<NUM_LIT>, index_max=<NUM_LIT:32>, value_bits=<NUM_LIT:8>, name='<STR_LIT>'.format(uuid.uuid1()))<EOL>if value.size <= size * <NUM_LIT:8>:<EOL><INDENT>value = Operators.ZEXTEND(value, size * <NUM_LIT:8>)<EOL><DEDENT>else:<EOL><INDENT>value = Operators.EXTRACT(value, <NUM_LIT:0>, size * <NUM_LIT:8>)<EOL><DEDENT>bytes = ArrayProxy(bytes.write_BE(padding, value, size))<EOL><DEDENT>else:<EOL><INDENT>value = int(value)<EOL>bytes = bytearray()<EOL>for _ in range(padding):<EOL><INDENT>bytes.append(<NUM_LIT:0>)<EOL><DEDENT>for position in reversed(range(size)):<EOL><INDENT>bytes.append(Operators.EXTRACT(value, position * <NUM_LIT:8>, <NUM_LIT:8>))<EOL><DEDENT><DEDENT>assert len(bytes) == size + padding<EOL>return bytes<EOL>", "docstring": "Translates a python integral or a BitVec into a 32 byte string, MSB first", "id": "f17007:c0:m11"}
{"signature": "@staticmethod<EOL><INDENT>def _deserialize_uint(data, nbytes=<NUM_LIT:32>, padding=<NUM_LIT:0>, offset=<NUM_LIT:0>):<DEDENT>", "body": "assert isinstance(data, (bytearray, Array))<EOL>value = ABI._readBE(data, nbytes, padding=True, offset=offset)<EOL>value = Operators.ZEXTEND(value, (nbytes + padding) * <NUM_LIT:8>)<EOL>return value<EOL>", "docstring": "Read a `nbytes` bytes long big endian unsigned integer from `data` starting at `offset`\n\n:param data: sliceable buffer; symbolic buffer of Eth ABI encoded data\n:param nbytes: number of bytes to read starting from least significant byte\n:rtype: int or Expression", "id": "f17007:c0:m14"}
{"signature": "@staticmethod<EOL><INDENT>def _type_size(ty):<DEDENT>", "body": "if ty[<NUM_LIT:0>] in ('<STR_LIT:int>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>return <NUM_LIT:32><EOL><DEDENT>elif ty[<NUM_LIT:0>] in ('<STR_LIT>'):<EOL><INDENT>result = <NUM_LIT:0><EOL>for ty_i in ty[<NUM_LIT:1>]:<EOL><INDENT>result += ABI._type_size(ty_i)<EOL><DEDENT>return result<EOL><DEDENT>elif ty[<NUM_LIT:0>] in ('<STR_LIT>'):<EOL><INDENT>rep = ty[<NUM_LIT:1>]<EOL>result = <NUM_LIT:32>  <EOL>return result<EOL><DEDENT>elif ty[<NUM_LIT:0>] in ('<STR_LIT>', '<STR_LIT:string>'):<EOL><INDENT>result = <NUM_LIT:32>  <EOL>return result<EOL><DEDENT>raise ValueError<EOL>", "docstring": "Calculate `static` type size", "id": "f17007:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def function_call(type_spec, *args):<DEDENT>", "body": "m = re.match(r\"<STR_LIT>\", type_spec)<EOL>if not m:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>ABI._check_and_warn_num_args(type_spec, *args)<EOL>result = ABI.function_selector(type_spec)  <EOL>result += ABI.serialize(m.group('<STR_LIT:type>'), *args)<EOL>return result<EOL>", "docstring": "Build transaction data from function signature and arguments", "id": "f17007:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _deserialize_int(data, nbytes=<NUM_LIT:32>, padding=<NUM_LIT:0>):<DEDENT>", "body": "assert isinstance(data, (bytearray, Array))<EOL>value = ABI._readBE(data, nbytes, padding=True)<EOL>value = Operators.SEXTEND(value, nbytes * <NUM_LIT:8>, (nbytes + padding) * <NUM_LIT:8>)<EOL>if not issymbolic(value):<EOL><INDENT>if value & (<NUM_LIT:1> << (nbytes * <NUM_LIT:8> - <NUM_LIT:1>)):<EOL><INDENT>value = -(((~value) + <NUM_LIT:1>) & ((<NUM_LIT:1> << (nbytes * <NUM_LIT:8>)) - <NUM_LIT:1>))<EOL><DEDENT><DEDENT>return value<EOL>", "docstring": "Read a `nbytes` bytes long big endian signed integer from `data` starting at `offset`\n\n:param data: sliceable buffer; symbolic buffer of Eth ABI encoded data\n:param nbytes: number of bytes to read starting from least significant byte\n:rtype: int or Expression", "id": "f17007:c0:m15"}
{"signature": "def get_config_keys():", "body": "global _groups<EOL>for group_name, group in _groups.items():<EOL><INDENT>for key in group:<EOL><INDENT>yield f\"<STR_LIT>\"<EOL><DEDENT><DEDENT>", "docstring": "Return an iterable covering all defined keys so far", "id": "f17008:m6"}
{"signature": "def parse_config(f):", "body": "try:<EOL><INDENT>c = yaml.safe_load(f)<EOL>for section_name, section in c.items():<EOL><INDENT>group = get_group(section_name)<EOL>for key, val in section.items():<EOL><INDENT>group.update(key)<EOL>setattr(group, key, val)<EOL><DEDENT><DEDENT><DEDENT>except Exception:<EOL><INDENT>raise ConfigError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Load an yml-formatted configuration from file stream |f|\n\n:param file f: Where to read the config.", "id": "f17008:m2"}
{"signature": "def update(self, name: str, value=None, default=None, description: str=None):", "body": "if name in self._vars:<EOL><INDENT>description = description or self._vars[name].description<EOL>default = default or self._vars[name].default<EOL><DEDENT>elif name == '<STR_LIT:name>':<EOL><INDENT>raise ConfigError(\"<STR_LIT>\")<EOL><DEDENT>v = _Var(name, description=description, default=default, defined=False)<EOL>v.value = value<EOL>self._vars[name] = v<EOL>", "docstring": "Like add, but can tolerate existing values; also updates the value.\n\nMostly used for setting fields from imported INI files and modified CLI flags.", "id": "f17008:c2:m3"}
{"signature": "def temp_vals(self) -> \"<STR_LIT>\":", "body": "return _TemporaryGroup(self)<EOL>", "docstring": "Returns a contextmanager that can be used to set temporary config variables.\nE.g.:\ngroup.var = 123\n\nwith group.temp_vals():\n    group.var = 456\n    # var is 456\n\n# group.var is back to 123", "id": "f17008:c2:m11"}
{"signature": "def updated_vars(self):", "body": "return filter(lambda x: x.was_set, self._vars.values())<EOL>", "docstring": "Return all vars that were explicitly set or updated with new values.", "id": "f17008:c2:m5"}
{"signature": "def get_description(self, name: str) -> str:", "body": "if name not in self._vars:<EOL><INDENT>raise ConfigError(f\"<STR_LIT>\")<EOL><DEDENT>return self._vars[name].description<EOL>", "docstring": "Return the description, or a help string of variable identified by |name|.", "id": "f17008:c2:m4"}
{"signature": "def save(f):", "body": "global _groups<EOL>c = {}<EOL>for group_name, group in _groups.items():<EOL><INDENT>section = {var.name: var.value for var in group.updated_vars()}<EOL>if not section:<EOL><INDENT>continue<EOL><DEDENT>c[group_name] = section<EOL><DEDENT>yaml.safe_dump(c, f, line_break=True)<EOL>", "docstring": "Save current config state to an yml file stream identified by |f|\n\n:param f: where to write the config file", "id": "f17008:m1"}
{"signature": "def output_detectors(detector_classes):", "body": "detectors_list = []<EOL>for detector in detector_classes:<EOL><INDENT>argument = detector.ARGUMENT<EOL>help_info = detector.HELP<EOL>impact = detector.IMPACT<EOL>confidence = classification_txt[detector.CONFIDENCE]<EOL>detectors_list.append((argument, help_info, impact, confidence))<EOL><DEDENT>table = PrettyTable([\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\"])<EOL>detectors_list = sorted(detectors_list, key=lambda element: (element[<NUM_LIT:2>], element[<NUM_LIT:3>], element[<NUM_LIT:0>]))<EOL>idx = <NUM_LIT:1><EOL>for (argument, help_info, impact, confidence) in detectors_list:<EOL><INDENT>table.add_row([idx, argument, help_info, classification_txt[impact], confidence])<EOL>idx = idx + <NUM_LIT:1><EOL><DEDENT>print(table)<EOL>", "docstring": "Copied from\nhttps://github.com/trailofbits/slither/blob/563d5118298e4cae7f0ea5f2a531f0dcdcebd64d/slither/utils/command_line.py", "id": "f17009:m0"}
{"signature": "def summarized_name(self, name):", "body": "components = name.split('<STR_LIT:.>')<EOL>prefix = '<STR_LIT:.>'.join(c[<NUM_LIT:0>] for c in components[:-<NUM_LIT:1>])<EOL>return f'<STR_LIT>'<EOL>", "docstring": "Produce a summarized record name\n  i.e. manticore.core.executor -> m.c.executor", "id": "f17010:c0:m0"}
{"signature": "def _step(self, instruction):", "body": "logger.debug(\"<STR_LIT>\"<EOL>% (instruction.address, instruction.mnemonic, instruction.op_str))<EOL>registers = set(self._cpu.canonical_registers)<EOL>if self._cpu.arch == CS_ARCH_X86:<EOL><INDENT>registers -= set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>registers.add('<STR_LIT>')<EOL>registers -= set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>registers |= set(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL><DEDENT>for reg in registers:<EOL><INDENT>val = self._cpu.read_register(reg)<EOL>if issymbolic(val):<EOL><INDENT>from ..native.cpu.abstractcpu import ConcretizeRegister<EOL>raise ConcretizeRegister(self._cpu, reg, \"<STR_LIT>\",<EOL>policy='<STR_LIT>')<EOL><DEDENT>self._emu.reg_write(self._to_unicorn_id(reg), val)<EOL><DEDENT>instruction = self._cpu.decode_instruction(self._cpu.PC)<EOL>text_bytes = self._cpu.read_bytes(self._cpu.PC, instruction.size)<EOL>self._emu.mem_write(self._cpu.PC, b'<STR_LIT>'.join(text_bytes))<EOL>self._emu.hook_add(UC_HOOK_MEM_READ_UNMAPPED, self._hook_unmapped)<EOL>self._emu.hook_add(UC_HOOK_MEM_WRITE_UNMAPPED, self._hook_unmapped)<EOL>self._emu.hook_add(UC_HOOK_MEM_FETCH_UNMAPPED, self._hook_unmapped)<EOL>self._emu.hook_add(UC_HOOK_MEM_READ, self._hook_xfer_mem)<EOL>self._emu.hook_add(UC_HOOK_MEM_WRITE, self._hook_xfer_mem)<EOL>self._emu.hook_add(UC_HOOK_INTR, self._interrupt)<EOL>saved_PC = self._cpu.PC<EOL>try:<EOL><INDENT>pc = self._cpu.PC<EOL>if self._cpu.arch == CS_ARCH_ARM and self._uc_mode == UC_MODE_THUMB:<EOL><INDENT>pc |= <NUM_LIT:1><EOL><DEDENT>self._emu.emu_start(pc, self._cpu.PC + instruction.size, count=<NUM_LIT:1>)<EOL><DEDENT>except UcError as e:<EOL><INDENT>if not self._should_try_again:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>if self._should_try_again:<EOL><INDENT>return<EOL><DEDENT>if logger.isEnabledFor(logging.DEBUG):<EOL><INDENT>logger.debug(\"<STR_LIT:=>\" * <NUM_LIT:10>)<EOL>for register in self._cpu.canonical_registers:<EOL><INDENT>logger.debug(<EOL>f\"<STR_LIT>\"<EOL>f\"<STR_LIT>\"<EOL>f\"<STR_LIT>\"<EOL>)<EOL><DEDENT>logger.debug(\"<STR_LIT:>>\" * <NUM_LIT:10>)<EOL><DEDENT>for reg in registers:<EOL><INDENT>val = self._emu.reg_read(self._to_unicorn_id(reg))<EOL>self._cpu.write_register(reg, val)<EOL><DEDENT>mu_pc = self.get_unicorn_pc()<EOL>if saved_PC == mu_pc:<EOL><INDENT>self._cpu.PC = saved_PC + instruction.size<EOL><DEDENT>if self._to_raise:<EOL><INDENT>raise self._to_raise<EOL><DEDENT>return<EOL>", "docstring": "A single attempt at executing an instruction.", "id": "f17015:c0:m9"}
{"signature": "def forward_events_to(self, sink, include_source=False):", "body": "assert isinstance(sink, Eventful), f'<STR_LIT>'<EOL>self._forwards[sink] = include_source<EOL>", "docstring": "This forwards signal to sink", "id": "f17017:c1:m11"}
{"signature": "@classmethod<EOL><INDENT>def all_events(cls):<DEDENT>", "body": "all_evts = set()<EOL>for cls, evts in cls.__all_events__.items():<EOL><INDENT>all_evts.update(evts)<EOL><DEDENT>return all_evts<EOL>", "docstring": "Return all events that all subclasses have so far registered to publish.", "id": "f17017:c1:m0"}
{"signature": "def emulate(self, instruction):", "body": "<EOL>while True:<EOL><INDENT>self._should_try_again = False<EOL>self._to_raise = None<EOL>self._step(instruction)<EOL>if not self._should_try_again:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Wrapper that runs the _step function in a loop while handling exceptions", "id": "f17018:c0:m12"}
{"signature": "def update_segment(self, selector, base, size, perms):", "body": "logger.info(\"<STR_LIT>\", selector, base, size, perms)<EOL>if selector == <NUM_LIT>:<EOL><INDENT>self.set_fs(base)<EOL><DEDENT>else:<EOL><INDENT>logger.error(\"<STR_LIT>\", selector)<EOL><DEDENT>", "docstring": "Only useful for setting FS right now.", "id": "f17018:c0:m17"}
{"signature": "def _hook_syscall(self, uc, data):", "body": "logger.debug(f\"<STR_LIT>\")<EOL>self.sync_unicorn_to_manticore()<EOL>from ..native.cpu.abstractcpu import Syscall<EOL>self._to_raise = Syscall()<EOL>uc.emu_stop()<EOL>", "docstring": "Unicorn hook that transfers control to Manticore so it can execute the syscall", "id": "f17018:c0:m7"}
{"signature": "def protect_memory_callback(self, start, size, perms):", "body": "logger.info(f\"<STR_LIT>\")<EOL>self._emu.mem_protect(start, size, convert_permissions(perms))<EOL>", "docstring": "Set memory protections in Unicorn correctly", "id": "f17018:c0:m5"}
{"signature": "def hr_size(num, suffix='<STR_LIT:B>') -> str:", "body": "for unit in '<STR_LIT>':<EOL><INDENT>if abs(num) < <NUM_LIT>:<EOL><INDENT>return \"<STR_LIT>\" % (num, unit if unit != '<STR_LIT:U+0020>' else '<STR_LIT>', suffix)<EOL><DEDENT>num /= <NUM_LIT><EOL><DEDENT>return \"<STR_LIT>\" % (num, '<STR_LIT:Y>', suffix)<EOL>", "docstring": "Human-readable data size\nFrom https://stackoverflow.com/a/1094933\n:param num: number of bytes\n:param suffix: Optional size specifier\n:return: Formatted string", "id": "f17018:m1"}
{"signature": "def map_memory_callback(self, address, size, perms, name, offset, result):", "body": "logger.info('<STR_LIT:U+0020>'.join((\"<STR_LIT>\",<EOL>hex(address) if type(address) is int else \"<STR_LIT>\",<EOL>hr_size(size), \"<STR_LIT:->\",<EOL>perms, \"<STR_LIT:->\",<EOL>f\"<STR_LIT>\", \"<STR_LIT>\",<EOL>hex(result))))<EOL>self._emu.mem_map(address, size, convert_permissions(perms))<EOL>self.copy_memory(address, size)<EOL>", "docstring": "Catches did_map_memory and copies the mapping into Manticore", "id": "f17018:c0:m3"}
{"signature": "def write_back_register(self, reg, val):", "body": "if self.write_backs_disabled:<EOL><INDENT>return<EOL><DEDENT>if issymbolic(val):<EOL><INDENT>logger.warning(\"<STR_LIT>\")<EOL>return<EOL><DEDENT>if reg in self.flag_registers:<EOL><INDENT>self._emu.reg_write(self._to_unicorn_id('<STR_LIT>'), self._cpu.read_register('<STR_LIT>'))<EOL>return<EOL><DEDENT>self._emu.reg_write(self._to_unicorn_id(reg), val)<EOL>", "docstring": "Sync register state from Manticore -> Unicorn", "id": "f17018:c0:m16"}
{"signature": "def JUMPDEST(self):", "body": "", "docstring": "Mark a valid destination for jumps", "id": "f17019:c16:m110"}
{"signature": "@concretized_args(account='<STR_LIT>')<EOL><INDENT>def EXTCODECOPY(self, account, address, offset, size):<DEDENT>", "body": "extbytecode = self.world.get_code(account)<EOL>self._allocate(address + size)<EOL>for i in range(size):<EOL><INDENT>if offset + i < len(extbytecode):<EOL><INDENT>self._store(address + i, extbytecode[offset + i])<EOL><DEDENT>else:<EOL><INDENT>self._store(address + i, <NUM_LIT:0>)<EOL><DEDENT><DEDENT>", "docstring": "Copy an account's code to memory", "id": "f17019:c16:m85"}
{"signature": "def _top(self, n=<NUM_LIT:0>):", "body": "if len(self.stack) - n < <NUM_LIT:0>:<EOL><INDENT>raise StackUnderflow()<EOL><DEDENT>return self.stack[n - <NUM_LIT:1>]<EOL>", "docstring": "Read a value from the top of the stack without removing it", "id": "f17019:c16:m17"}
{"signature": "def _push(self, value):", "body": "assert isinstance(value, int) or isinstance(value, BitVec) and value.size == <NUM_LIT><EOL>if len(self.stack) >= <NUM_LIT>:<EOL><INDENT>raise StackOverflow()<EOL><DEDENT>if isinstance(value, int):<EOL><INDENT>value = value & TT256M1<EOL><DEDENT>value = simplify(value)<EOL>if isinstance(value, Constant) and not value.taint:<EOL><INDENT>value = value.value<EOL><DEDENT>self.stack.append(value)<EOL>", "docstring": "Push into the stack\n\n      ITEM0\n      ITEM1\n      ITEM2\nsp->  {empty}", "id": "f17019:c16:m16"}
{"signature": "@transact<EOL><INDENT>def CREATE(self, value, offset, size):<DEDENT>", "body": "address = self.world.create_account(address=EVMWorld.calculate_new_address(sender=self.address, nonce=self.world.get_nonce(self.address)))<EOL>self.world.start_transaction('<STR_LIT>',<EOL>address,<EOL>data=self.read_buffer(offset, size),<EOL>caller=self.address,<EOL>value=value,<EOL>gas=self.gas)<EOL>raise StartTx()<EOL>", "docstring": "Create a new account with associated code", "id": "f17019:c16:m117"}
{"signature": "def STOP(self):", "body": "raise EndTx('<STR_LIT>')<EOL>", "docstring": "Halts execution", "id": "f17019:c16:m41"}
{"signature": "def MSTORE8(self, address, value):", "body": "if istainted(self.pc):<EOL><INDENT>for taint in get_taints(self.pc):<EOL><INDENT>value = taint_with(value, taint)<EOL><DEDENT><DEDENT>self._allocate(address, <NUM_LIT:1>)<EOL>self._store(address, Operators.EXTRACT(value, <NUM_LIT:0>, <NUM_LIT:8>), <NUM_LIT:1>)<EOL>", "docstring": "Save byte to memory", "id": "f17019:c16:m101"}
{"signature": "def XOR(self, a, b):", "body": "return a ^ b<EOL>", "docstring": "Bitwise XOR operation", "id": "f17019:c16:m62"}
{"signature": "def __reduce__(self):", "body": "return (self.__class__, (self.sort, self.address, self.price, self.data, self.caller, self.value, self.gas, self.depth, self.result, self.return_data))<EOL>", "docstring": "Implements serialization/pickle", "id": "f17019:c0:m11"}
{"signature": "def MOD(self, a, b):", "body": "try:<EOL><INDENT>result = Operators.ITEBV(<NUM_LIT>, b == <NUM_LIT:0>, <NUM_LIT:0>, a % b)<EOL><DEDENT>except ZeroDivisionError:<EOL><INDENT>result = <NUM_LIT:0><EOL><DEDENT>return result<EOL>", "docstring": "Modulo remainder operation", "id": "f17019:c16:m47"}
{"signature": "def BYTE(self, offset, value):", "body": "offset = Operators.ITEBV(<NUM_LIT>, offset < <NUM_LIT:32>, (<NUM_LIT> - offset) * <NUM_LIT:8>, <NUM_LIT>)<EOL>return Operators.ZEXTEND(Operators.EXTRACT(value, offset, <NUM_LIT:8>), <NUM_LIT>)<EOL>", "docstring": "Retrieve single byte from word", "id": "f17019:c16:m64"}
{"signature": "def has_storage(self, address):", "body": "storage = self._world_state[address]['<STR_LIT>']<EOL>array = storage.array<EOL>while not isinstance(array, ArrayVariable):<EOL><INDENT>if isinstance(array, ArrayStore):<EOL><INDENT>return True<EOL><DEDENT>array = array.array<EOL><DEDENT>return False<EOL>", "docstring": "True if something has been written to the storage.\nNote that if a slot has been erased from the storage this function may\nlose any meaning.", "id": "f17019:c17:m28"}
{"signature": "def set_storage_data(self, storage_address, offset, value):", "body": "self._world_state[storage_address]['<STR_LIT>'][offset] = value<EOL>", "docstring": "Writes a value to a storage slot in specified account\n\n:param storage_address: an account address\n:param offset: the storage slot to use.\n:type offset: int or BitVec\n:param value: the value to write\n:type value: int or BitVec", "id": "f17019:c17:m26"}
{"signature": "def EXP(self, base, exponent):", "body": "<EOL>return pow(base, exponent, TT256)<EOL>", "docstring": "Exponential operation\nThe zero-th power of zero 0^0 is defined to be one", "id": "f17019:c16:m52"}
{"signature": "@property<EOL><INDENT>def last_human_transaction(self):<DEDENT>", "body": "for tx in reversed(self.transactions):<EOL><INDENT>if tx.depth == <NUM_LIT:0>:<EOL><INDENT>return tx<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Last completed human transaction", "id": "f17019:c17:m15"}
{"signature": "@property<EOL><INDENT>def current_transaction(self):<DEDENT>", "body": "try:<EOL><INDENT>tx, _, _, _, _ = self._callstack[-<NUM_LIT:1>]<EOL>if tx.result is not None:<EOL><INDENT>return None<EOL><DEDENT>return tx<EOL><DEDENT>except IndexError:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "current tx", "id": "f17019:c17:m18"}
{"signature": "def GT(self, a, b):", "body": "return Operators.ITEBV(<NUM_LIT>, Operators.UGT(a, b), <NUM_LIT:1>, <NUM_LIT:0>)<EOL>", "docstring": "Greater-than comparison", "id": "f17019:c16:m55"}
{"signature": "def SWAP(self, *operands):", "body": "a = operands[<NUM_LIT:0>]<EOL>b = operands[-<NUM_LIT:1>]<EOL>return (b,) + operands[<NUM_LIT:1>:-<NUM_LIT:1>] + (a,)<EOL>", "docstring": "Exchange 1st and 2nd stack items", "id": "f17019:c16:m113"}
{"signature": "def ADD(self, a, b):", "body": "return a + b<EOL>", "docstring": "Addition operation", "id": "f17019:c16:m42"}
{"signature": "def JUMP(self, dest):", "body": "self.pc = dest<EOL>self._set_check_jmpdest()<EOL>", "docstring": "Alter the program counter", "id": "f17019:c16:m105"}
{"signature": "def _rollback(self):", "body": "last_pc, last_gas, last_instruction, last_arguments, fee, allocated = self._checkpoint_data<EOL>self._push_arguments(last_arguments)<EOL>self._gas = last_gas<EOL>self._pc = last_pc<EOL>self._allocated = allocated<EOL>self._checkpoint_data = None<EOL>", "docstring": "Revert the stack, gas, pc and memory allocation so it looks like before executing the instruction", "id": "f17019:c16:m28"}
{"signature": "def SSTORE(self, offset, value):", "body": "storage_address = self.address<EOL>self._publish('<STR_LIT>', storage_address, offset, value)<EOL>if istainted(self.pc):<EOL><INDENT>for taint in get_taints(self.pc):<EOL><INDENT>value = taint_with(value, taint)<EOL><DEDENT><DEDENT>self.world.set_storage_data(storage_address, offset, value)<EOL>self._publish('<STR_LIT>', storage_address, offset, value)<EOL>", "docstring": "Save word to storage", "id": "f17019:c16:m104"}
{"signature": "def COINBASE(self):", "body": "return self.world.block_coinbase()<EOL>", "docstring": "Get the block's beneficiary address", "id": "f17019:c16:m90"}
{"signature": "def DIFFICULTY(self):", "body": "return self.world.block_difficulty()<EOL>", "docstring": "Get the block's difficulty", "id": "f17019:c16:m93"}
{"signature": "def _get_memfee(self, address, size=<NUM_LIT:1>):", "body": "if not issymbolic(size) and size == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>address = self.safe_add(address, size)<EOL>allocated = self.allocated<EOL>GMEMORY = <NUM_LIT:3><EOL>GQUADRATICMEMDENOM = <NUM_LIT>  <EOL>old_size = Operators.ZEXTEND(Operators.UDIV(self.safe_add(allocated, <NUM_LIT>), <NUM_LIT:32>), <NUM_LIT>)<EOL>new_size = Operators.ZEXTEND(Operators.UDIV(self.safe_add(address, <NUM_LIT>), <NUM_LIT:32>), <NUM_LIT>)<EOL>old_totalfee = self.safe_mul(old_size, GMEMORY) + Operators.UDIV(self.safe_mul(old_size, old_size), GQUADRATICMEMDENOM)<EOL>new_totalfee = self.safe_mul(new_size, GMEMORY) + Operators.UDIV(self.safe_mul(new_size, new_size), GQUADRATICMEMDENOM)<EOL>memfee = new_totalfee - old_totalfee<EOL>flag = Operators.UGT(new_totalfee, old_totalfee)<EOL>return Operators.ITEBV(<NUM_LIT>, size == <NUM_LIT:0>, <NUM_LIT:0>, Operators.ITEBV(<NUM_LIT>, flag, memfee, <NUM_LIT:0>))<EOL>", "docstring": "This calculates the amount of extra gas needed for accessing to\npreviously unused memory.\n\n:param address: base memory offset\n:param size: size of the memory access", "id": "f17019:c16:m7"}
{"signature": "def start_transaction(self, sort, address, price=None, data=None, caller=None, value=<NUM_LIT:0>, gas=<NUM_LIT>):", "body": "assert self._pending_transaction is None, \"<STR_LIT>\"<EOL>self._pending_transaction = PendingTransaction(sort, address, price, data, caller, value, gas)<EOL>", "docstring": "Initiate a transaction\n:param sort: the type of transaction. CREATE or CALL or DELEGATECALL\n:param address: the address of the account which owns the code that is executing.\n:param price: the price of gas in the transaction that originated this execution.\n:param data: the byte array that is the input data to this execution\n:param caller: the address of the account which caused the code to be executing. A 160-bit code used for identifying Accounts\n:param value: the value, in Wei, passed to this account as part of the same procedure as execution. One Ether is defined as being 10**18 Wei.\n:param bytecode: the byte array that is the machine code to be executed.\n:param gas: gas budget for this transaction.", "id": "f17019:c17:m60"}
{"signature": "def _checkpoint(self):", "body": "<EOL>if self._checkpoint_data is None:<EOL><INDENT>if not self._published_pre_instruction_events:<EOL><INDENT>self._published_pre_instruction_events = True<EOL>self._publish('<STR_LIT>', self.pc)<EOL>self._publish('<STR_LIT>', self.pc, self.instruction)<EOL>self._publish('<STR_LIT>', self.instruction, self._top_arguments())<EOL><DEDENT>pc = self.pc<EOL>instruction = self.instruction<EOL>old_gas = self.gas<EOL>allocated = self._allocated<EOL>arguments = self._pop_arguments()<EOL>fee = self._calculate_gas(*arguments)<EOL>self._checkpoint_data = (pc, old_gas, instruction, arguments, fee, allocated)<EOL><DEDENT>return self._checkpoint_data<EOL>", "docstring": "Save and/or get a state checkpoint previous to current instruction", "id": "f17019:c16:m27"}
{"signature": "def DUP(self, *operands):", "body": "return (operands[-<NUM_LIT:1>],) + operands<EOL>", "docstring": "Duplicate stack item", "id": "f17019:c16:m112"}
{"signature": "def PUSH(self, value):", "body": "return value<EOL>", "docstring": "Place 1 to 32 bytes item on stack", "id": "f17019:c16:m111"}
{"signature": "def OR(self, a, b):", "body": "return a | b<EOL>", "docstring": "Bitwise OR operation", "id": "f17019:c16:m61"}
{"signature": "def MSTORE(self, address, value):", "body": "if istainted(self.pc):<EOL><INDENT>for taint in get_taints(self.pc):<EOL><INDENT>value = taint_with(value, taint)<EOL><DEDENT><DEDENT>self._allocate(address, <NUM_LIT:32>)<EOL>self._store(address, value, <NUM_LIT:32>)<EOL>", "docstring": "Save word to memory", "id": "f17019:c16:m99"}
{"signature": "def dump(self, stream, state, mevm, conc_tx=None):", "body": "from ..ethereum import ABI  <EOL>from ..ethereum.manticore import flagged<EOL>is_something_symbolic = False<EOL>if conc_tx is None:<EOL><INDENT>conc_tx = self.concretize(state)<EOL><DEDENT>stream.write(\"<STR_LIT>\" % (self.sort, self.depth))<EOL>caller_solution = conc_tx.caller<EOL>caller_name = mevm.account_name(caller_solution)<EOL>stream.write(\"<STR_LIT>\" % (caller_name, caller_solution, flagged(issymbolic(self.caller))))<EOL>address_solution = conc_tx.address<EOL>address_name = mevm.account_name(address_solution)<EOL>stream.write(\"<STR_LIT>\" % (address_name, address_solution, flagged(issymbolic(self.address))))<EOL>stream.write(\"<STR_LIT>\" % (conc_tx.value, flagged(issymbolic(self.value))))<EOL>stream.write(\"<STR_LIT>\" % (conc_tx.gas, flagged(issymbolic(self.gas))))<EOL>tx_data = conc_tx.data<EOL>stream.write(\"<STR_LIT>\".format(binascii.hexlify(tx_data).decode(), flagged(issymbolic(self.data))))<EOL>if self.return_data is not None:<EOL><INDENT>return_data = conc_tx.return_data<EOL>stream.write(\"<STR_LIT>\".format(binascii.hexlify(return_data).decode(), flagged(issymbolic(self.return_data))))<EOL><DEDENT>metadata = mevm.get_metadata(self.address)<EOL>if self.sort == '<STR_LIT>':<EOL><INDENT>if metadata is not None:<EOL><INDENT>conc_args_data = conc_tx.data[len(metadata._init_bytecode):]<EOL>arguments = ABI.deserialize(metadata.get_constructor_arguments(), conc_args_data)<EOL>is_argument_symbolic = any(map(issymbolic, arguments))  <EOL>stream.write('<STR_LIT>')<EOL>stream.write(\"<STR_LIT>\")<EOL>stream.write('<STR_LIT:U+002C>'.join(map(repr, map(state.solve_one, arguments))))  <EOL>stream.write('<STR_LIT>' % (self.result, flagged(is_argument_symbolic)))<EOL><DEDENT><DEDENT>if self.sort == '<STR_LIT>':<EOL><INDENT>if metadata is not None:<EOL><INDENT>calldata = conc_tx.data<EOL>is_calldata_symbolic = issymbolic(self.data)<EOL>function_id = calldata[:<NUM_LIT:4>]  <EOL>signature = metadata.get_func_signature(function_id)<EOL>function_name = metadata.get_func_name(function_id)<EOL>if signature:<EOL><INDENT>_, arguments = ABI.deserialize(signature, calldata)<EOL><DEDENT>else:<EOL><INDENT>arguments = (calldata,)<EOL><DEDENT>return_data = None<EOL>if self.result == '<STR_LIT>':<EOL><INDENT>ret_types = metadata.get_func_return_types(function_id)<EOL>return_data = conc_tx.return_data<EOL>return_values = ABI.deserialize(ret_types, return_data)  <EOL><DEDENT>is_return_symbolic = issymbolic(self.return_data)<EOL>stream.write('<STR_LIT:\\n>')<EOL>stream.write(\"<STR_LIT>\")<EOL>stream.write(\"<STR_LIT>\" % function_name)<EOL>stream.write('<STR_LIT:U+002C>'.join(map(repr, arguments)))<EOL>stream.write('<STR_LIT>' % (self.result, flagged(is_calldata_symbolic)))<EOL>if return_data is not None:<EOL><INDENT>if len(return_values) == <NUM_LIT:1>:<EOL><INDENT>return_values = return_values[<NUM_LIT:0>]<EOL><DEDENT>stream.write('<STR_LIT>' % (return_values, flagged(is_return_symbolic)))<EOL><DEDENT>is_something_symbolic = is_calldata_symbolic or is_return_symbolic<EOL><DEDENT><DEDENT>stream.write('<STR_LIT>')<EOL>return is_something_symbolic<EOL>", "docstring": "Concretize and write a human readable version of the transaction into the stream. Used during testcase\ngeneration.\n\n:param stream: Output stream to write to. Typically a file.\n:param manticore.ethereum.State state: state that the tx exists in\n:param manticore.ethereum.ManticoreEVM mevm: manticore instance\n:return:", "id": "f17019:c0:m3"}
{"signature": "def SLT(self, a, b):", "body": "<EOL>s0, s1 = to_signed(a), to_signed(b)<EOL>return Operators.ITEBV(<NUM_LIT>, s0 < s1, <NUM_LIT:1>, <NUM_LIT:0>)<EOL>", "docstring": "Signed less-than comparison", "id": "f17019:c16:m56"}
{"signature": "def block_hash(self, block_number=None, force_recent=True):", "body": "if block_number is None:<EOL><INDENT>block_number = self.block_number() - <NUM_LIT:1><EOL><DEDENT>value = sha3.keccak_256((repr(block_number) + '<STR_LIT>').encode()).hexdigest()<EOL>value = int(value, <NUM_LIT:16>)<EOL>if force_recent:<EOL><INDENT>bnmax = Operators.ITEBV(<NUM_LIT>, self.block_number() > <NUM_LIT>, <NUM_LIT>, self.block_number())<EOL>value = Operators.ITEBV(<NUM_LIT>, Operators.OR(block_number >= self.block_number(), block_number < bnmax), <NUM_LIT:0>, value)<EOL><DEDENT>return value<EOL>", "docstring": "Calculates a block's hash\n:param block_number: the block number for which to calculate the hash, defaulting to the most recent block\n:param force_recent: if True (the default) return zero for any block that is in the future or older than 256 blocks\n:return: the block hash", "id": "f17019:c17:m50"}
{"signature": "@concretized_args(account='<STR_LIT>')<EOL><INDENT>def EXTCODESIZE(self, account):<DEDENT>", "body": "return len(self.world.get_code(account))<EOL>", "docstring": "Get size of an account's code", "id": "f17019:c16:m83"}
{"signature": "def JUMPI(self, dest, cond):", "body": "self.pc = Operators.ITEBV(<NUM_LIT>, cond != <NUM_LIT:0>, dest, self.pc + self.instruction.size)<EOL>self._set_check_jmpdest(cond != <NUM_LIT:0>)<EOL>", "docstring": "Conditionally alter the program counter", "id": "f17019:c16:m106"}
{"signature": "def EXP_gas(self, base, exponent):", "body": "EXP_SUPPLEMENTAL_GAS = <NUM_LIT:10>   <EOL>def nbytes(e):<EOL><INDENT>result = <NUM_LIT:0><EOL>for i in range(<NUM_LIT:32>):<EOL><INDENT>result = Operators.ITEBV(<NUM_LIT>, Operators.EXTRACT(e, i * <NUM_LIT:8>, <NUM_LIT:8>) != <NUM_LIT:0>, i + <NUM_LIT:1>, result)<EOL><DEDENT>return result<EOL><DEDENT>return EXP_SUPPLEMENTAL_GAS * nbytes(exponent)<EOL>", "docstring": "Calculate extra gas fee", "id": "f17019:c16:m51"}
{"signature": "def _check_jmpdest(self):", "body": "should_check_jumpdest = self._check_jumpdest<EOL>if issymbolic(should_check_jumpdest):<EOL><INDENT>should_check_jumpdest_solutions = solver.get_all_values(self.constraints, should_check_jumpdest)<EOL>if len(should_check_jumpdest_solutions) != <NUM_LIT:1>:<EOL><INDENT>raise EthereumError(\"<STR_LIT>\")<EOL><DEDENT>should_check_jumpdest = should_check_jumpdest_solutions[<NUM_LIT:0>]<EOL><DEDENT>if should_check_jumpdest:<EOL><INDENT>self._check_jumpdest = False<EOL>pc = self.pc.value if isinstance(self.pc, Constant) else self.pc<EOL>if pc not in self._valid_jumpdests:<EOL><INDENT>raise InvalidOpcode()<EOL><DEDENT><DEDENT>", "docstring": "If the previous instruction was a JUMP/JUMPI and the conditional was\nTrue, this checks that the current instruction must be a JUMPDEST.\n\nHere, if symbolic, the conditional `self._check_jumpdest` would be\nalready constrained to a single concrete value.", "id": "f17019:c16:m30"}
{"signature": "def _pop(self):", "body": "if len(self.stack) == <NUM_LIT:0>:<EOL><INDENT>raise StackUnderflow()<EOL><DEDENT>return self.stack.pop()<EOL>", "docstring": "Pop a value from the stack", "id": "f17019:c16:m18"}
{"signature": "@property<EOL><INDENT>def last_transaction(self):<DEDENT>", "body": "if len(self.transactions):<EOL><INDENT>return self.transactions[-<NUM_LIT:1>]<EOL><DEDENT>return None<EOL>", "docstring": "Last completed transaction", "id": "f17019:c17:m14"}
{"signature": "def read_code(self, address, size=<NUM_LIT:1>):", "body": "assert address < len(self.bytecode)<EOL>value = self.bytecode[address:address + size]<EOL>if len(value) < size:<EOL><INDENT>value += '<STR_LIT:\\x00>' * (size - len(value))  <EOL><DEDENT>return value<EOL>", "docstring": "Read size byte from bytecode.\nIf less than size bytes are available result will be pad with \\x00", "id": "f17019:c16:m12"}
{"signature": "def GASPRICE(self):", "body": "return self.world.tx_gasprice()<EOL>", "docstring": "Get price of gas in current environment", "id": "f17019:c16:m82"}
{"signature": "def new_address(self, sender=None, nonce=None):", "body": "if sender is not None and nonce is None:<EOL><INDENT>nonce = self.get_nonce(sender)<EOL><DEDENT>new_address = self.calculate_new_address(sender, nonce)<EOL>if sender is None and new_address in self:<EOL><INDENT>return self.new_address(sender, nonce)<EOL><DEDENT>return new_address<EOL>", "docstring": "Create a fresh 160bit address", "id": "f17019:c17:m54"}
{"signature": "def ORIGIN(self):", "body": "return Operators.ZEXTEND(self.world.tx_origin(), <NUM_LIT>)<EOL>", "docstring": "Get execution origination address", "id": "f17019:c16:m71"}
{"signature": "def _close(self, fd):", "body": "self.files[fd] = None<EOL>", "docstring": "Closes a file descriptor\n:rtype: int\n:param fd: the file descriptor to close.\n:return: C{0} on success.", "id": "f17020:c4:m10"}
{"signature": "def signal_receive(self, fd):", "body": "connections = self.connections<EOL>if connections(fd) and self.twait[connections(fd)]:<EOL><INDENT>procid = random.sample(self.twait[connections(fd)], <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>self.awake(procid)<EOL><DEDENT>", "docstring": "Awake one process waiting to receive data on fd", "id": "f17020:c4:m25"}
{"signature": "def _read_string(self, cpu, buf):", "body": "filename = \"<STR_LIT>\"<EOL>for i in range(<NUM_LIT:0>, <NUM_LIT>):<EOL><INDENT>c = Operators.CHR(cpu.read_int(buf + i, <NUM_LIT:8>))<EOL>if c == '<STR_LIT:\\x00>':<EOL><INDENT>break<EOL><DEDENT>filename += c<EOL><DEDENT>return filename<EOL>", "docstring": "Reads a null terminated concrete buffer form memory\n:todo: FIX. move to cpu or memory", "id": "f17020:c4:m7"}
{"signature": "def _dup(self, fd):", "body": "return self._open(self.files[fd])<EOL>", "docstring": "Duplicates a file descriptor\n:rtype: int\n:param fd: the file descriptor to close.\n:return: C{0} on success.", "id": "f17020:c4:m11"}
{"signature": "def execute(self):", "body": "try:<EOL><INDENT>self.current.execute()<EOL>self.clocks += <NUM_LIT:1><EOL>if self.clocks % <NUM_LIT> == <NUM_LIT:0>:<EOL><INDENT>self.check_timers()<EOL>self.sched()<EOL><DEDENT><DEDENT>except Interruption as e:<EOL><INDENT>if e.N != <NUM_LIT>:<EOL><INDENT>raise<EOL><DEDENT>try:<EOL><INDENT>self.int80(self.current)<EOL><DEDENT>except RestartSyscall:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Execute one cpu instruction in the current thread (only one supported).\n:rtype: bool\n:return: C{True}\n\n:todo: This is where we could implement a simple schedule.", "id": "f17020:c4:m28"}
{"signature": "def sys_receive(self, cpu, fd, buf, count, rx_bytes):", "body": "data = '<STR_LIT>'<EOL>if count != <NUM_LIT:0>:<EOL><INDENT>if not self._is_open(fd):<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return Decree.CGC_EBADF<EOL><DEDENT>if buf not in cpu.memory:  <EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return Decree.CGC_EFAULT<EOL><DEDENT>if fd > <NUM_LIT:2> and self.files[fd].is_empty():<EOL><INDENT>cpu.PC -= cpu.instruction.size<EOL>self.wait([fd], [], None)<EOL>raise RestartSyscall()<EOL><DEDENT>data = self.files[fd].receive(count)<EOL>self.syscall_trace.append((\"<STR_LIT>\", fd, data))<EOL>cpu.write_bytes(buf, data)<EOL>self.signal_receive(fd)<EOL><DEDENT>if rx_bytes:<EOL><INDENT>if rx_bytes not in cpu.memory:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return Decree.CGC_EFAULT<EOL><DEDENT>cpu.write_int(rx_bytes, len(data), <NUM_LIT:32>)<EOL><DEDENT>logger.info(\"<STR_LIT>\" % (fd, buf, count, rx_bytes, repr(data)[:min(count, <NUM_LIT:10>)], len(data)))<EOL>return <NUM_LIT:0><EOL>", "docstring": "receive - receive bytes from a file descriptor\n\n            The receive system call reads up to count bytes from file descriptor fd to the\n            buffer pointed to by buf. If count is zero, receive returns 0 and optionally\n            sets *rx_bytes to zero.\n\n            :param cpu: current CPU.\n            :param fd: a valid file descriptor\n            :param buf: a memory buffer\n            :param count: max number of bytes to receive\n            :param rx_bytes: if valid, points to the actual number of bytes received\n            :return: 0            Success\n                     EBADF        fd is not a valid file descriptor or is not open\n                     EFAULT       buf or rx_bytes points to an invalid address.", "id": "f17020:c4:m15"}
{"signature": "def sys_fdwait(self, cpu, nfds, readfds, writefds, timeout, readyfds):", "body": "logger.debug(\"<STR_LIT>\" % (nfds, readfds, writefds, timeout, readyfds))<EOL>if timeout:<EOL><INDENT>if timeout not in cpu.memory:  <EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return Decree.CGC_EFAULT<EOL><DEDENT><DEDENT>if readyfds:<EOL><INDENT>if readyfds not in cpu.memory:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return Decree.CGC_EFAULT<EOL><DEDENT><DEDENT>writefds_wait = set()<EOL>writefds_ready = set()<EOL>fds_bitsize = (nfds + <NUM_LIT:7>) & ~<NUM_LIT:7><EOL>if writefds:<EOL><INDENT>if writefds not in cpu.memory:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return Decree.CGC_EFAULT<EOL><DEDENT>bits = cpu.read_int(writefds, fds_bitsize)<EOL>for fd in range(nfds):<EOL><INDENT>if (bits & <NUM_LIT:1> << fd):<EOL><INDENT>if self.files[fd].is_full():<EOL><INDENT>writefds_wait.add(fd)<EOL><DEDENT>else:<EOL><INDENT>writefds_ready.add(fd)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>readfds_wait = set()<EOL>readfds_ready = set()<EOL>if readfds:<EOL><INDENT>if readfds not in cpu.memory:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return Decree.CGC_EFAULT<EOL><DEDENT>bits = cpu.read_int(readfds, fds_bitsize)<EOL>for fd in range(nfds):<EOL><INDENT>if (bits & <NUM_LIT:1> << fd):<EOL><INDENT>if self.files[fd].is_empty():<EOL><INDENT>readfds_wait.add(fd)<EOL><DEDENT>else:<EOL><INDENT>readfds_ready.add(fd)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>n = len(readfds_ready) + len(writefds_ready)<EOL>if n == <NUM_LIT:0>:<EOL><INDENT>if timeout != <NUM_LIT:0>:<EOL><INDENT>seconds = cpu.read_int(timeout, <NUM_LIT:32>)<EOL>microseconds = cpu.read_int(timeout + <NUM_LIT:4>, <NUM_LIT:32>)<EOL>logger.info(\"<STR_LIT>\", repr(<EOL>list(readfds_wait)), repr(list(writefds_wait)), microseconds + <NUM_LIT:1000> * seconds)<EOL>to = microseconds + <NUM_LIT:1000> * seconds<EOL><DEDENT>else:<EOL><INDENT>to = None<EOL>logger.info(\"<STR_LIT>\",<EOL>repr(list(readfds_wait)), repr(list(writefds_wait)))<EOL><DEDENT>cpu.PC -= cpu.instruction.size<EOL>self.wait(readfds_wait, writefds_wait, to)<EOL>raise RestartSyscall()  <EOL><DEDENT>if readfds:<EOL><INDENT>bits = <NUM_LIT:0><EOL>for fd in readfds_ready:<EOL><INDENT>bits |= <NUM_LIT:1> << fd<EOL><DEDENT>for byte in range(<NUM_LIT:0>, nfds, <NUM_LIT:8>):<EOL><INDENT>cpu.write_int(readfds, (bits >> byte) & <NUM_LIT>, <NUM_LIT:8>)<EOL><DEDENT><DEDENT>if writefds:<EOL><INDENT>bits = <NUM_LIT:0><EOL>for fd in writefds_ready:<EOL><INDENT>bits |= <NUM_LIT:1> << fd<EOL><DEDENT>for byte in range(<NUM_LIT:0>, nfds, <NUM_LIT:8>):<EOL><INDENT>cpu.write_int(writefds, (bits >> byte) & <NUM_LIT>, <NUM_LIT:8>)<EOL><DEDENT><DEDENT>logger.info(\"<STR_LIT>\", readyfds)<EOL>if readyfds:<EOL><INDENT>cpu.write_int(readyfds, n, <NUM_LIT:32>)<EOL><DEDENT>self.syscall_trace.append((\"<STR_LIT>\", -<NUM_LIT:1>, None))<EOL>return <NUM_LIT:0><EOL>", "docstring": "fdwait - wait for file descriptors to become ready", "id": "f17020:c4:m19"}
{"signature": "def int80(self, cpu):", "body": "syscalls = {<NUM_LIT>: self.sys_terminate,<EOL><NUM_LIT>: self.sys_transmit,<EOL><NUM_LIT>: self.sys_receive,<EOL><NUM_LIT>: self.sys_fdwait,<EOL><NUM_LIT>: self.sys_allocate,<EOL><NUM_LIT>: self.sys_deallocate,<EOL><NUM_LIT>: self.sys_random,<EOL>}<EOL>if cpu.EAX not in syscalls.keys():<EOL><INDENT>raise TerminateState(f\"<STR_LIT>\")<EOL><DEDENT>func = syscalls[cpu.EAX]<EOL>logger.debug(\"<STR_LIT>\", func.__name__, func.__code__.co_argcount)<EOL>nargs = func.__code__.co_argcount<EOL>args = [cpu, cpu.EBX, cpu.ECX, cpu.EDX, cpu.ESI, cpu.EDI, cpu.EBP]<EOL>cpu.EAX = func(*args[:nargs - <NUM_LIT:1>])<EOL>", "docstring": "32 bit dispatcher.\n:param cpu: current CPU.\n_terminate, transmit, receive, fdwait, allocate, deallocate and random", "id": "f17020:c4:m20"}
{"signature": "def __init__(self, constraints, programs, symbolic_random=None):", "body": "self.random = <NUM_LIT:0><EOL>self._constraints = constraints<EOL>super().__init__(programs)<EOL>", "docstring": "Builds a symbolic extension of a Decree OS\n:param constraints: a constraint set\n:param cpus: CPU for this platform\n:param mem: memory for this platform", "id": "f17020:c5:m0"}
{"signature": "def sys_transmit(self, cpu, fd, buf, count, tx_bytes):", "body": "data = []<EOL>if count != <NUM_LIT:0>:<EOL><INDENT>if not self._is_open(fd):<EOL><INDENT>logger.error(\"<STR_LIT>\", fd)<EOL>return Decree.CGC_EBADF<EOL><DEDENT>if buf not in cpu.memory or (buf + count) not in cpu.memory:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Decree.CGC_EFAULT<EOL><DEDENT>if fd > <NUM_LIT:2> and self.files[fd].is_full():<EOL><INDENT>cpu.PC -= cpu.instruction.size<EOL>self.wait([], [fd], None)<EOL>raise RestartSyscall()<EOL><DEDENT>for i in range(<NUM_LIT:0>, count):<EOL><INDENT>value = Operators.CHR(cpu.read_int(buf + i, <NUM_LIT:8>))<EOL>if not isinstance(value, str):<EOL><INDENT>logger.debug(\"<STR_LIT>\", fd)<EOL><DEDENT>data.append(value)<EOL><DEDENT>self.files[fd].transmit(data)<EOL>logger.info(\"<STR_LIT>\" % (fd, buf, count, tx_bytes, '<STR_LIT>'.join([str(x) for x in data])))<EOL>self.syscall_trace.append((\"<STR_LIT>\", fd, data))<EOL>self.signal_transmit(fd)<EOL><DEDENT>if tx_bytes:<EOL><INDENT>if tx_bytes not in cpu.memory:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return Decree.CGC_EFAULT<EOL><DEDENT>cpu.write_int(tx_bytes, len(data), <NUM_LIT:32>)<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "transmit - send bytes through a file descriptor\n          The  transmit system call writes up to count bytes from the buffer pointed\n          to by buf to the file descriptor fd. If count is zero, transmit returns 0\n          and optionally sets *tx_bytes to zero.\n\n          :param cpu           current CPU\n          :param fd            a valid file descriptor\n          :param buf           a memory buffer\n          :param count         number of bytes to send\n          :param tx_bytes      if valid, points to the actual number of bytes transmitted\n          :return: 0            Success\n                   EBADF        fd is not a valid file descriptor or is not open.\n                   EFAULT       buf or tx_bytes points to an invalid address.", "id": "f17020:c4:m16"}
{"signature": "def sys_receive(self, cpu, fd, buf, count, rx_bytes):", "body": "if issymbolic(fd):<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>cpu.PC = cpu.PC - cpu.instruction.size<EOL>raise SymbolicSyscallArgument(cpu, <NUM_LIT:0>)<EOL><DEDENT>if issymbolic(buf):<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>cpu.PC = cpu.PC - cpu.instruction.size<EOL>raise SymbolicSyscallArgument(cpu, <NUM_LIT:1>)<EOL><DEDENT>if issymbolic(count):<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>cpu.PC = cpu.PC - cpu.instruction.size<EOL>raise SymbolicSyscallArgument(cpu, <NUM_LIT:2>)<EOL><DEDENT>if issymbolic(rx_bytes):<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>cpu.PC = cpu.PC - cpu.instruction.size<EOL>raise SymbolicSyscallArgument(cpu, <NUM_LIT:3>)<EOL><DEDENT>return super().sys_receive(cpu, fd, buf, count, rx_bytes)<EOL>", "docstring": "Symbolic version of Decree.sys_receive", "id": "f17020:c5:m6"}
{"signature": "def sys_lseek(self, fd, offset, whence):", "body": "signed_offset = self._to_signed_dword(offset)<EOL>try:<EOL><INDENT>return self._get_fd(fd).seek(signed_offset, whence)<EOL><DEDENT>except FdError as e:<EOL><INDENT>logger.info((\"<STR_LIT>\"<EOL>\"<STR_LIT>\"))<EOL>return -e.err<EOL><DEDENT>", "docstring": "lseek - reposition read/write file offset\n\nThe lseek() function repositions the file offset of the open file description associated\nwith the file descriptor fd to the argument offset according to the directive whence\n\n\n:param fd: a valid file descriptor\n:param offset: the offset in bytes\n:param whence: SEEK_SET: The file offset is set to offset bytes.\n               SEEK_CUR: The file offset is set to its current location plus offset bytes.\n               SEEK_END: The file offset is set to the size of the file plus offset bytes.\n\n:return: offset from file beginning, or EBADF (fd is not a valid file descriptor or is not open)", "id": "f17024:c9:m29"}
{"signature": "def sys_geteuid(self):", "body": "return <NUM_LIT:1000><EOL>", "docstring": "Gets user identity.\n:rtype: int\n\n:return: This call returns C{1000} for all the users.", "id": "f17024:c9:m63"}
{"signature": "def sys_acct(self, path):", "body": "logger.debug(\"<STR_LIT>\")<EOL>return -<NUM_LIT:1><EOL>", "docstring": "System call not implemented.\n:rtype: int\n\n:return: C{-1}", "id": "f17024:c9:m71"}
{"signature": "def wait(self, readfds, writefds, timeout):", "body": "logger.debug(\"<STR_LIT>\")<EOL>logger.debug(f\"<STR_LIT>\")<EOL>logger.debug(f\"<STR_LIT>\")<EOL>logger.debug(f\"<STR_LIT>\")<EOL>logger.debug(f\"<STR_LIT>\")<EOL>logger.debug(f\"<STR_LIT>\")<EOL>logger.debug(f\"<STR_LIT>\")<EOL>for fd in readfds:<EOL><INDENT>self.rwait[fd].add(self._current)<EOL><DEDENT>for fd in writefds:<EOL><INDENT>self.twait[fd].add(self._current)<EOL><DEDENT>if timeout is not None:<EOL><INDENT>self.timers[self._current] = self.clocks + timeout<EOL><DEDENT>procid = self._current<EOL>next_index = (self.running.index(procid) + <NUM_LIT:1>) % len(self.running)<EOL>self._current = self.running[next_index]<EOL>logger.debug(f\"<STR_LIT>\")<EOL>logger.debug(f\"<STR_LIT>\")<EOL>self.running.remove(procid)<EOL>if self._current not in self.running:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>self._current = None<EOL>self.check_timers()<EOL><DEDENT>", "docstring": "Wait for file descriptors or timeout.\n            Adds the current process in the correspondent waiting list and\n            yield the cpu to another running process.", "id": "f17024:c9:m102"}
{"signature": "def sys_exit(self, error_code):", "body": "return self.sys_exit_group(error_code)<EOL>", "docstring": "Wrapper for sys_exit_group", "id": "f17024:c9:m72"}
{"signature": "def __init__(self, program, argv=None, envp=None, disasm='<STR_LIT>', **kwargs):", "body": "super().__init__(path=program, **kwargs)<EOL>self.program = program<EOL>self.clocks = <NUM_LIT:0><EOL>self.files = []<EOL>self._closed_files = []<EOL>self.syscall_trace = []<EOL>self.programs = program<EOL>self.disasm = disasm<EOL>self.envp = envp<EOL>self.argv = argv<EOL>self._rlimits = {<EOL>resource.RLIMIT_NOFILE: (<NUM_LIT>, <NUM_LIT>),<EOL>resource.RLIMIT_STACK: (<NUM_LIT> * <NUM_LIT>, <NUM_LIT:0>)<EOL>}<EOL>if program is not None:<EOL><INDENT>self.elf = ELFFile(open(program, '<STR_LIT:rb>'))<EOL>self.arch = {'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>'}[self.elf.get_machine_arch()]<EOL>self._init_cpu(self.arch)<EOL>self._init_std_fds()<EOL>self._execve(program, argv, envp)<EOL><DEDENT>", "docstring": "Builds a Linux OS platform\n:param string program: The path to ELF binary\n:param string disasm: Disassembler to be used\n:param list argv: The argv array; not including binary.\n:param list envp: The ENV variables.\n:ivar files: List of active file descriptors\n:type files: list[Socket] or list[File]", "id": "f17024:c9:m0"}
{"signature": "def sys_rt_sigaction(self, signum, act, oldact):", "body": "return self.sys_sigaction(signum, act, oldact)<EOL>", "docstring": "Wrapper for sys_sigaction", "id": "f17024:c9:m47"}
{"signature": "def tell(self):", "body": "return self.pos<EOL>", "docstring": "Returns the read/write file offset\n:rtype: int\n:return: the read/write file offset.", "id": "f17024:c6:m3"}
{"signature": "def sys_getuid(self):", "body": "return <NUM_LIT:1000><EOL>", "docstring": "Gets user identity.\n:rtype: int\n\n:return: this call returns C{1000} for all the users.", "id": "f17024:c9:m61"}
{"signature": "def write(self, data):", "body": "size = min(len(data), self.max_size - self.pos)<EOL>for i in range(self.pos, self.pos + size):<EOL><INDENT>self.array[i] = data[i - self.pos]<EOL><DEDENT>", "docstring": "Writes the symbolic bytes in C{data} onto the file.", "id": "f17024:c6:m6"}
{"signature": "def sys_fstat64(self, fd, buf):", "body": "try:<EOL><INDENT>stat = self._get_fd(fd).stat()<EOL><DEDENT>except FdError as e:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return -e.err<EOL><DEDENT>def add(width, val):<EOL><INDENT>fformat = {<NUM_LIT:2>: '<STR_LIT:H>', <NUM_LIT:4>: '<STR_LIT:L>', <NUM_LIT:8>: '<STR_LIT>'}[width]<EOL>return struct.pack('<STR_LIT:<>' + fformat, val)<EOL><DEDENT>def to_timespec(ts):<EOL><INDENT>return struct.pack('<STR_LIT>', int(ts), int(ts % <NUM_LIT:1> * <NUM_LIT>))<EOL><DEDENT>bufstat = add(<NUM_LIT:8>, stat.st_dev)        <EOL>bufstat += add(<NUM_LIT:8>, stat.st_ino)        <EOL>bufstat += add(<NUM_LIT:4>, stat.st_mode)       <EOL>bufstat += add(<NUM_LIT:4>, stat.st_nlink)      <EOL>bufstat += add(<NUM_LIT:4>, stat.st_uid)        <EOL>bufstat += add(<NUM_LIT:4>, stat.st_gid)        <EOL>bufstat += add(<NUM_LIT:8>, stat.st_rdev)       <EOL>bufstat += add(<NUM_LIT:8>, <NUM_LIT:0>)                  <EOL>bufstat += add(<NUM_LIT:8>, stat.st_size)       <EOL>bufstat += add(<NUM_LIT:4>, stat.st_blksize)    <EOL>bufstat += add(<NUM_LIT:4>, <NUM_LIT:0>)                  <EOL>bufstat += add(<NUM_LIT:8>, stat.st_blocks)     <EOL>bufstat += to_timespec(stat.st_atime)  <EOL>bufstat += to_timespec(stat.st_mtime)  <EOL>bufstat += to_timespec(stat.st_ctime)  <EOL>bufstat += add(<NUM_LIT:4>, <NUM_LIT:0>)                   <EOL>bufstat += add(<NUM_LIT:4>, <NUM_LIT:0>)                   <EOL>self.current.write_bytes(buf, bufstat)<EOL>return <NUM_LIT:0><EOL>", "docstring": "Determines information about a file based on its file descriptor (for Linux 64 bits).\n:rtype: int\n:param fd: the file descriptor of the file that is being inquired.\n:param buf: a buffer where data about the file will be stored.\n:return: C{0} on success, EBADF when called with bad fd\n:todo: Fix device number.", "id": "f17024:c9:m111"}
{"signature": "def _open(self, f):", "body": "if None in self.files:<EOL><INDENT>fd = self.files.index(None)<EOL>self.files[fd] = f<EOL><DEDENT>else:<EOL><INDENT>fd = len(self.files)<EOL>self.files.append(f)<EOL><DEDENT>return fd<EOL>", "docstring": "Adds a file descriptor to the current file descriptor list\n\n:rtype: int\n:param f: the file descriptor to add.\n:return: the index of the file descriptor in the file descr. list", "id": "f17024:c9:m19"}
{"signature": "def execute(self):", "body": "try:<EOL><INDENT>self.current.execute()<EOL>self.clocks += <NUM_LIT:1><EOL>if self.clocks % <NUM_LIT> == <NUM_LIT:0>:<EOL><INDENT>self.check_timers()<EOL>self.sched()<EOL><DEDENT><DEDENT>except (Interruption, Syscall) as e:<EOL><INDENT>try:<EOL><INDENT>self.syscall()<EOL>if hasattr(e, '<STR_LIT>'):<EOL><INDENT>e.on_handled()<EOL><DEDENT><DEDENT>except RestartSyscall:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Execute one cpu instruction in the current thread (only one supported).\n:rtype: bool\n:return: C{True}\n\n:todo: This is where we could implement a simple schedule.", "id": "f17024:c9:m108"}
{"signature": "def sys_exit_group(self, error_code):", "body": "return self._exit(f\"<STR_LIT>\")<EOL>", "docstring": "Exits all threads in a process\n:raises Exception: 'Finished'", "id": "f17024:c9:m73"}
{"signature": "def awake(self, procid):", "body": "logger.debug(f\"<STR_LIT>\")<EOL>for wait_list in self.rwait:<EOL><INDENT>if procid in wait_list:<EOL><INDENT>wait_list.remove(procid)<EOL><DEDENT><DEDENT>for wait_list in self.twait:<EOL><INDENT>if procid in wait_list:<EOL><INDENT>wait_list.remove(procid)<EOL><DEDENT><DEDENT>self.timers[procid] = None<EOL>self.running.append(procid)<EOL>if self._current is None:<EOL><INDENT>self._current = procid<EOL><DEDENT>", "docstring": "Remove procid from waitlists and reestablish it in the running list", "id": "f17024:c9:m103"}
{"signature": "def sys_writev(self, fd, iov, count):", "body": "cpu = self.current<EOL>ptrsize = cpu.address_bit_size<EOL>sizeof_iovec = <NUM_LIT:2> * (ptrsize // <NUM_LIT:8>)<EOL>total = <NUM_LIT:0><EOL>try:<EOL><INDENT>write_fd = self._get_fd(fd)<EOL><DEDENT>except FdError as e:<EOL><INDENT>logger.error(f\"<STR_LIT>\")<EOL>return -e.err<EOL><DEDENT>for i in range(<NUM_LIT:0>, count):<EOL><INDENT>buf = cpu.read_int(iov + i * sizeof_iovec, ptrsize)<EOL>size = cpu.read_int(iov + i * sizeof_iovec + (sizeof_iovec // <NUM_LIT:2>), ptrsize)<EOL>data = [Operators.CHR(cpu.read_int(buf + i, <NUM_LIT:8>)) for i in range(size)]<EOL>data = self._transform_write_data(data)<EOL>write_fd.write(data)<EOL>self.syscall_trace.append((\"<STR_LIT>\", fd, data))<EOL>total += size<EOL><DEDENT>return total<EOL>", "docstring": "Works just like C{sys_write} except that multiple buffers are written out.\n:rtype: int\n\n:param fd: the file descriptor of the file to write.\n:param iov: the buffer where the the bytes to write are taken.\n:param count: amount of C{iov} buffers to write into the file.\n:return: the amount of bytes written in total.", "id": "f17024:c9:m66"}
{"signature": "def sys_mmap(self, address, size, prot, flags, fd, offset):", "body": "if address == <NUM_LIT:0>:<EOL><INDENT>address = None<EOL><DEDENT>cpu = self.current<EOL>if flags & <NUM_LIT>:<EOL><INDENT>cpu.memory.munmap(address, size)<EOL><DEDENT>perms = perms_from_protflags(prot)<EOL>if flags & <NUM_LIT>:<EOL><INDENT>result = cpu.memory.mmap(address, size, perms)<EOL><DEDENT>elif fd == <NUM_LIT:0>:<EOL><INDENT>assert offset == <NUM_LIT:0><EOL>result = cpu.memory.mmap(address, size, perms)<EOL>data = self.files[fd].read(size)<EOL>cpu.write_bytes(result, data)<EOL><DEDENT>else:<EOL><INDENT>result = cpu.memory.mmapFile(address, size, perms, self.files[fd].name, offset)<EOL><DEDENT>actually_mapped = f'<STR_LIT>'<EOL>if address is None or result != address:<EOL><INDENT>address = address or <NUM_LIT:0><EOL>actually_mapped += f'<STR_LIT>'<EOL><DEDENT>if flags & <NUM_LIT> != <NUM_LIT:0> and result != address:<EOL><INDENT>cpu.memory.munmap(result, size)<EOL>result = -<NUM_LIT:1><EOL><DEDENT>return result<EOL>", "docstring": "Creates a new mapping in the virtual address space of the calling process.\n:rtype: int\n\n:param address: the starting address for the new mapping. This address is used as hint unless the\n                flag contains C{MAP_FIXED}.\n:param size: the length of the mapping.\n:param prot: the desired memory protection of the mapping.\n:param flags: determines whether updates to the mapping are visible to other\n              processes mapping the same region, and whether updates are carried\n              through to the underlying file.\n:param fd: the contents of a file mapping are initialized using C{size} bytes starting at\n           offset C{offset} in the file referred to by the file descriptor C{fd}.\n:param offset: the contents of a file mapping are initialized using C{size} bytes starting at\n               offset C{offset} in the file referred to by the file descriptor C{fd}.\n:return:\n        - C{-1} in case you use C{MAP_FIXED} in the flags and the mapping can not be place at the desired address.\n        - the address of the new mapping (that must be the same as address in case you included C{MAP_FIXED} in flags).\n:todo: handle exception.", "id": "f17024:c9:m58"}
{"signature": "def _dup(self, fd):", "body": "return self._open(self.files[fd])<EOL>", "docstring": "Duplicates a file descriptor\n:rtype: int\n:param fd: the file descriptor to duplicate.\n:return: C{0} on success.", "id": "f17024:c9:m21"}
{"signature": "def _close(self, fd):", "body": "try:<EOL><INDENT>self.files[fd].close()<EOL>self._closed_files.append(self.files[fd])  <EOL>self.files[fd] = None<EOL><DEDENT>except IndexError:<EOL><INDENT>raise FdError(f\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Removes a file descriptor from the file descriptor list\n:rtype: int\n:param fd: the file descriptor to close.\n:return: C{0} on success.", "id": "f17024:c9:m20"}
{"signature": "def sys_rename(self, oldnamep, newnamep):", "body": "oldname = self.current.read_string(oldnamep)<EOL>newname = self.current.read_string(newnamep)<EOL>ret = <NUM_LIT:0><EOL>try:<EOL><INDENT>os.rename(oldname, newname)<EOL><DEDENT>except OSError as e:<EOL><INDENT>ret = -e.errno<EOL><DEDENT>return ret<EOL>", "docstring": "Rename filename `oldnamep` to `newnamep`.\n\n:param int oldnamep: pointer to oldname\n:param int newnamep: pointer to newname", "id": "f17024:c9:m41"}
{"signature": "def __setstate__(self, state):", "body": "super().__setstate__(state)<EOL>self.input = Socket()<EOL>self.input.buffer = state['<STR_LIT:input>']<EOL>self.output = Socket()<EOL>self.output.buffer = state['<STR_LIT>']<EOL>self.files = []<EOL>for ty, file_or_buffer in state['<STR_LIT>']:<EOL><INDENT>if ty == '<STR_LIT>':<EOL><INDENT>f = Socket()<EOL>f.buffer = file_or_buffer<EOL>self.files.append(f)<EOL><DEDENT>else:<EOL><INDENT>self.files.append(file_or_buffer)<EOL><DEDENT><DEDENT>self.files[<NUM_LIT:0>].peer = self.output<EOL>self.files[<NUM_LIT:1>].peer = self.output<EOL>self.files[<NUM_LIT:2>].peer = self.output<EOL>self._closed_files = state['<STR_LIT>']<EOL>self.input.peer = self.files[<NUM_LIT:0>]<EOL>self._rlimits = state['<STR_LIT>']<EOL>self.procs = state['<STR_LIT>']<EOL>self._current = state['<STR_LIT>']<EOL>self.running = state['<STR_LIT>']<EOL>self.rwait = state['<STR_LIT>']<EOL>self.twait = state['<STR_LIT>']<EOL>self.timers = state['<STR_LIT>']<EOL>self.clocks = state['<STR_LIT>']<EOL>self.syscall_trace = state['<STR_LIT>']<EOL>self.argv = state['<STR_LIT>']<EOL>self.envp = state['<STR_LIT>']<EOL>self.base = state['<STR_LIT>']<EOL>self.elf_bss = state['<STR_LIT>']<EOL>self.end_code = state['<STR_LIT>']<EOL>self.end_data = state['<STR_LIT>']<EOL>self.elf_brk = state['<STR_LIT>']<EOL>self.brk = state['<STR_LIT>']<EOL>self.auxv = state['<STR_LIT>']<EOL>self.program = state['<STR_LIT>']<EOL>self._function_abi = state['<STR_LIT>']<EOL>self._syscall_abi = state['<STR_LIT>']<EOL>self._uname_machine = state['<STR_LIT>']<EOL>if '<STR_LIT>' in state:<EOL><INDENT>self._arm_tls_memory = state['<STR_LIT>']<EOL><DEDENT>for proc in self.procs:<EOL><INDENT>self.forward_events_from(proc)<EOL><DEDENT>", "docstring": ":todo: some asserts\n:todo: fix deps? (last line)", "id": "f17024:c9:m12"}
{"signature": "def sys_mprotect(self, start, size, prot):", "body": "perms = perms_from_protflags(prot)<EOL>ret = self.current.memory.mprotect(start, size, perms)<EOL>return <NUM_LIT:0><EOL>", "docstring": "Sets protection on a region of memory. Changes protection for the calling process's\nmemory page(s) containing any part of the address range in the interval [C{start}, C{start}+C{size}-1].\n:rtype: int\n\n:param start: the starting address to change the permissions.\n:param size: the size of the portion of memory to change the permissions.\n:param prot: the new access permission for the memory.\n:return: C{0} on success.", "id": "f17024:c9:m59"}
{"signature": "def signal_transmit(self, fd):", "body": "connection = self.connections(fd)<EOL>if connection is None or connection >= len(self.rwait):<EOL><INDENT>return<EOL><DEDENT>procs = self.rwait[connection]<EOL>if procs:<EOL><INDENT>procid = random.sample(procs, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>self.awake(procid)<EOL><DEDENT>", "docstring": "Awake one process waiting to transmit data on fd", "id": "f17024:c9:m106"}
{"signature": "def sys_newstat(self, fd, buf):", "body": "return self.sys_stat64(fd, buf)<EOL>", "docstring": "Wrapper for stat64()", "id": "f17024:c9:m112"}
{"signature": "@classmethod<EOL><INDENT>def empty_platform(cls, arch):<DEDENT>", "body": "platform = cls(None)<EOL>platform._init_cpu(arch)<EOL>platform._init_std_fds()<EOL>return platform<EOL>", "docstring": "Create a platform without an ELF loaded.\n\n:param str arch: The architecture of the new platform\n:rtype: Linux", "id": "f17024:c9:m4"}
{"signature": "def sys_getcwd(self, buf, size):", "body": "try:<EOL><INDENT>current_dir = os.getcwd()<EOL>length = len(current_dir) + <NUM_LIT:1><EOL>if size > <NUM_LIT:0> and size < length:<EOL><INDENT>logger.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>return -errno.ERANGE<EOL><DEDENT>if not self.current.memory.access_ok(slice(buf, buf + length), '<STR_LIT:w>'):<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return -errno.EFAULT<EOL><DEDENT>self.current.write_string(buf, current_dir)<EOL>logger.debug(f\"<STR_LIT>\")<EOL>return length<EOL><DEDENT>except OSError as e:<EOL><INDENT>return -e.errno<EOL><DEDENT>", "docstring": "getcwd - Get the current working directory\n:param int buf: Pointer to dest array\n:param size: size in bytes of the array pointed to by the buf\n:return: buf (Success), or 0", "id": "f17024:c9:m28"}
{"signature": "def check_timers(self):", "body": "if self._current is None:<EOL><INDENT>advance = min([self.clocks] + [x for x in self.timers if x is not None]) + <NUM_LIT:1><EOL>logger.debug(f\"<STR_LIT>\")<EOL>self.clocks = advance<EOL><DEDENT>for procid in range(len(self.timers)):<EOL><INDENT>if self.timers[procid] is not None:<EOL><INDENT>if self.clocks > self.timers[procid]:<EOL><INDENT>self.procs[procid].PC += self.procs[procid].instruction.size<EOL>self.awake(procid)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Awake process if timer has expired", "id": "f17024:c9:m107"}
{"signature": "def sys_write(self, fd, buf, count):", "body": "data: bytes = bytes()<EOL>cpu = self.current<EOL>if count != <NUM_LIT:0>:<EOL><INDENT>try:<EOL><INDENT>write_fd = self._get_fd(fd)<EOL><DEDENT>except FdError as e:<EOL><INDENT>logger.error(f\"<STR_LIT>\")<EOL>return -e.err<EOL><DEDENT>if buf not in cpu.memory or buf + count not in cpu.memory:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>return -errno.EFAULT<EOL><DEDENT>if fd > <NUM_LIT:2> and write_fd.is_full():<EOL><INDENT>cpu.PC -= cpu.instruction.size<EOL>self.wait([], [fd], None)<EOL>raise RestartSyscall()<EOL><DEDENT>data: MixedSymbolicBuffer = cpu.read_bytes(buf, count)<EOL>data: bytes = self._transform_write_data(data)<EOL>write_fd.write(data)<EOL>for line in data.split(b'<STR_LIT:\\n>'):<EOL><INDENT>line = line.decode('<STR_LIT>')  <EOL>logger.debug(f\"<STR_LIT>\")<EOL><DEDENT>self.syscall_trace.append((\"<STR_LIT>\", fd, data))<EOL>self.signal_transmit(fd)<EOL><DEDENT>return len(data)<EOL>", "docstring": "write - send bytes through a file descriptor\n          The write system call writes up to count bytes from the buffer pointed\n          to by buf to the file descriptor fd. If count is zero, write returns 0\n          and optionally sets *tx_bytes to zero.\n\n          :param fd            a valid file descriptor\n          :param buf           a memory buffer\n          :param count         number of bytes to send\n          :return: 0          Success\n                    EBADF      fd is not a valid file descriptor or is not open.\n                    EFAULT     buf or tx_bytes points to an invalid address.", "id": "f17024:c9:m31"}
{"signature": "def sys_fsync(self, fd):", "body": "ret = <NUM_LIT:0><EOL>try:<EOL><INDENT>self.files[fd].sync()<EOL><DEDENT>except IndexError:<EOL><INDENT>ret = -errno.EBADF<EOL><DEDENT>except FdError:<EOL><INDENT>ret = -errno.EINVAL<EOL><DEDENT>return ret<EOL>", "docstring": "Synchronize a file's in-core state with that on disk.", "id": "f17024:c9:m42"}
{"signature": "def sys_open(self, buf, flags, mode):", "body": "filename = self.current.read_string(buf)<EOL>try:<EOL><INDENT>f = self._sys_open_get_file(filename, flags)<EOL>logger.debug(f\"<STR_LIT>\")<EOL><DEDENT>except IOError as e:<EOL><INDENT>logger.warning(f\"<STR_LIT>\")<EOL>return -e.errno if e.errno is not None else -errno.EINVAL<EOL><DEDENT>return self._open(f)<EOL>", "docstring": ":param buf: address of zero-terminated pathname\n:param flags: file access bits\n:param mode: file permission mode", "id": "f17024:c9:m39"}
{"signature": "def sys_getrandom(self, buf, size, flags):", "body": "GRND_NONBLOCK = <NUM_LIT><EOL>GRND_RANDOM = <NUM_LIT><EOL>if size == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if buf not in self.current.memory:<EOL><INDENT>logger.info(\"<STR_LIT>\")<EOL>return -errno.EFAULT<EOL><DEDENT>if flags & ~(GRND_NONBLOCK | GRND_RANDOM):<EOL><INDENT>return -errno.EINVAL<EOL><DEDENT>self.current.write_bytes(buf, '<STR_LIT:\\x00>' * size)<EOL>return size<EOL>", "docstring": "The getrandom system call fills the buffer with random bytes of buflen.\nThe source of random (/dev/random or /dev/urandom) is decided based on\nthe flags value.\n\nManticore's implementation simply fills a buffer with zeroes -- choosing\ndeterminism over true randomness.\n\n:param buf: address of buffer to be filled with random bytes\n:param size: number of random bytes\n:param flags: source of random (/dev/random or /dev/urandom)\n:return: number of bytes copied to buf", "id": "f17024:c9:m97"}
{"signature": "def sys_mmap2(self, address, size, prot, flags, fd, offset):", "body": "return self.sys_mmap(address, size, prot, flags, fd, offset * <NUM_LIT>)<EOL>", "docstring": "Creates a new mapping in the virtual address space of the calling process.\n:rtype: int\n:param address: the starting address for the new mapping. This address is used as hint unless the\n                flag contains C{MAP_FIXED}.\n:param size: the length of the mapping.\n:param prot: the desired memory protection of the mapping.\n:param flags: determines whether updates to the mapping are visible to other\n              processes mapping the same region, and whether updates are carried\n              through to the underlying file.\n:param fd: the contents of a file mapping are initialized using C{size} bytes starting at\n           offset C{offset} in the file referred to by the file descriptor C{fd}.\n:param offset: the contents of a file mapping are initialized using C{size} bytes starting at\n               offset C{offset}*0x1000 in the file referred to by the file descriptor C{fd}.\n:return:\n    - C{-1} In case you use C{MAP_FIXED} in the flags and the mapping can not be place at the desired address.\n    - the address of the new mapping.", "id": "f17024:c9:m57"}
{"signature": "def _transform_write_data(self, data: T) -> T:", "body": "return data<EOL>", "docstring": "Implement in subclass to transform data written by write(2)/writev(2)\nNop by default.", "id": "f17024:c9:m24"}
{"signature": "def sys_readv(self, fd, iov, count):", "body": "cpu = self.current<EOL>ptrsize = cpu.address_bit_size<EOL>sizeof_iovec = <NUM_LIT:2> * (ptrsize // <NUM_LIT:8>)<EOL>total = <NUM_LIT:0><EOL>for i in range(<NUM_LIT:0>, count):<EOL><INDENT>buf = cpu.read_int(iov + i * sizeof_iovec, ptrsize)<EOL>size = cpu.read_int(iov + i * sizeof_iovec + (sizeof_iovec // <NUM_LIT:2>),<EOL>ptrsize)<EOL>data = self.files[fd].read(size)<EOL>total += len(data)<EOL>cpu.write_bytes(buf, data)<EOL>self.syscall_trace.append((\"<STR_LIT>\", fd, data))<EOL><DEDENT>return total<EOL>", "docstring": "Works just like C{sys_read} except that data is read into multiple buffers.\n:rtype: int\n\n:param fd: the file descriptor of the file to read.\n:param iov: the buffer where the the bytes to read are stored.\n:param count: amount of C{iov} buffers to read from the file.\n:return: the amount of bytes read in total.", "id": "f17024:c9:m65"}
{"signature": "def syscall(self):", "body": "index = self._syscall_abi.syscall_number()<EOL>try:<EOL><INDENT>table = getattr(linux_syscalls, self.current.machine)<EOL>name = table.get(index, None)<EOL>implementation = getattr(self, name)<EOL><DEDENT>except (AttributeError, KeyError):<EOL><INDENT>if name is not None:<EOL><INDENT>raise SyscallNotImplemented(index, name)<EOL><DEDENT>else:<EOL><INDENT>raise Exception(f\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return self._syscall_abi.invoke(implementation)<EOL>", "docstring": "Syscall dispatcher.", "id": "f17024:c9:m98"}
{"signature": "def sys_access(self, buf, mode):", "body": "filename = b'<STR_LIT>'<EOL>for i in range(<NUM_LIT:0>, <NUM_LIT:255>):<EOL><INDENT>c = Operators.CHR(self.current.read_int(buf + i, <NUM_LIT:8>))<EOL>if c == b'<STR_LIT:\\x00>':<EOL><INDENT>break<EOL><DEDENT>filename += c<EOL><DEDENT>if os.access(filename, mode):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>", "docstring": "Checks real user's permissions for a file\n:rtype: int\n\n:param buf: a buffer containing the pathname to the file to check its permissions.\n:param mode: the access permissions to check.\n:return:\n    -  C{0} if the calling process can access the file in the desired mode.\n    - C{-1} if the calling process can not access the file in the desired mode.", "id": "f17024:c9:m33"}
{"signature": "def correspond(self, text):", "body": "self.stdin.write(text)<EOL>self.stdin.flush()<EOL>str_buffer = '<STR_LIT>'<EOL>while not str_buffer.endswith(self.prompt):<EOL><INDENT>str_buffer += self.stdout.read(<NUM_LIT:1>)<EOL><DEDENT>return str_buffer<EOL>", "docstring": "Communicate with the child process without closing stdin.", "id": "f17055:c0:m1"}
{"signature": "def __init__(self, prg, prompt='<STR_LIT>'):", "body": "self.prompt = prompt<EOL>subprocess.Popen.__init__(self, ['<STR_LIT>', prg], stdin=subprocess.PIPE, stdout=subprocess.PIPE , stderr=subprocess.STDOUT)<EOL>", "docstring": "Construct interactive Popen.", "id": "f17055:c0:m0"}
{"signature": "def input_from_cons(constupl, datas):", "body": "def make_chr(c):<EOL><INDENT>try:<EOL><INDENT>return chr(c)<EOL><DEDENT>except Exception:<EOL><INDENT>return c<EOL><DEDENT><DEDENT>newset = constraints_to_constraintset(constupl)<EOL>ret = '<STR_LIT>'<EOL>for data in datas:<EOL><INDENT>for c in data:<EOL><INDENT>ret += make_chr(solver.get_value(newset, c))<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "solve bytes in |datas| based on", "id": "f17135:m6"}
{"signature": "def flip(constraint):", "body": "equal = copy.copy(constraint)<EOL>assert len(equal.operands) == <NUM_LIT:2><EOL>ite, forcepc = equal.operands<EOL>assert isinstance(ite, BitVecITE) and isinstance(forcepc, BitVecConstant)<EOL>assert len(ite.operands) == <NUM_LIT:3><EOL>cond, iifpc, eelsepc = ite.operands<EOL>assert isinstance(iifpc, BitVecConstant) and isinstance(eelsepc, BitVecConstant)<EOL>equal._operands= (equal.operands[<NUM_LIT:0>], eelsepc if forcepc.value == iifpc.value else iifpc)<EOL>return equal<EOL>", "docstring": "flips a constraint (Equal)\n\n(Equal (BitVecITE Cond IfC ElseC) IfC)\n    ->\n(Equal (BitVecITE Cond IfC ElseC) ElseC)", "id": "f17135:m2"}
{"signature": "def contains(new, olds):", "body": "return any(eq(new, old) for old in olds)<EOL>", "docstring": "__contains__ operator using the `eq` function", "id": "f17135:m9"}
{"signature": "def constraints_are_sat(cons):", "body": "return solver.check(constraints_to_constraintset(cons))<EOL>", "docstring": "Whether constraints are sat", "id": "f17135:m11"}
{"signature": "def perm(lst, func):", "body": "for i in range(<NUM_LIT:1>, <NUM_LIT:2>**len(lst)):<EOL><INDENT>yield [func(item) if (<NUM_LIT:1><<j)&i else item for (j, item) in enumerate(lst)]<EOL><DEDENT>", "docstring": "Produce permutations of `lst`, where permutations are mutated by `func`. Used for flipping constraints. highly\n    possible that returned constraints can be unsat this does it blindly, without any attention to the constraints\n    themselves\n\n    Considering lst as a list of constraints, e.g.\n\n      [ C1, C2, C3 ]\n\n    we'd like to consider scenarios of all possible permutations of flipped constraints, excluding the original list.\n    So we'd like to generate:\n\n      [ func(C1),      C2 ,       C3 ],\n      [      C1 , func(C2),       C3 ],\n      [ func(C1), func(C2),       C3 ],\n      [      C1 ,      C2 ,  func(C3)],\n      .. etc\n\n    This is effectively treating the list of constraints as a bitmask of width len(lst) and counting up, skipping the\n    0th element (unmodified array).\n\n    The code below yields lists of constraints permuted as above by treating list indeces as bitmasks from 1 to\n     2**len(lst) and applying func to all the set bit offsets.", "id": "f17135:m4"}
{"signature": "def visualize(self):", "body": "if os.path.isfile(self.workspace):<EOL><INDENT>t = threading.Thread(target=self.highlight_from_file,<EOL>args=(self.workspace,))<EOL><DEDENT>elif os.path.isdir(self.workspace):<EOL><INDENT>t = threading.Thread(target=self.highlight_from_dir,<EOL>args=(self.workspace,))<EOL><DEDENT>t.start()<EOL>", "docstring": "Given a Manticore workspace, or trace file, highlight the basic blocks.", "id": "f17153:c1:m1"}
{"signature": "def sync_svc(state):", "body": "syscall = state.cpu.R7 <EOL>name = linux_syscalls.armv7[syscall]<EOL>logger.debug(f\"<STR_LIT>\")<EOL>try:<EOL><INDENT>if '<STR_LIT>' in name:<EOL><INDENT>returned = gdb.getR('<STR_LIT>')<EOL>logger.debug(f\"<STR_LIT>\")<EOL>state.cpu.write_register('<STR_LIT>', returned)<EOL><DEDENT>if '<STR_LIT>' in name:<EOL><INDENT>return<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>for reg in state.cpu.canonical_registers:<EOL><INDENT>print(f'<STR_LIT>')<EOL><DEDENT>raise<EOL><DEDENT>", "docstring": "Mirror some service calls in manticore. Happens after qemu executed a SVC\ninstruction, but before manticore did.", "id": "f17155:m7"}
{"signature": "def binary_arch(binary):", "body": "with open(binary, '<STR_LIT:rb>') as f:<EOL><INDENT>elffile = ELFFile(f)<EOL>if elffile['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>", "docstring": "helper method for determining binary architecture\n\n:param binary: str for binary to introspect.\n:rtype bool: True for x86_64, False otherwise", "id": "f17156:m0"}
{"signature": "def binary_symbols(binary):", "body": "def substr_after(string, delim):<EOL><INDENT>return string.partition(delim)[<NUM_LIT:2>]<EOL><DEDENT>with open(binary, '<STR_LIT:rb>') as f:<EOL><INDENT>elffile = ELFFile(f)<EOL>for section in elffile.iter_sections():<EOL><INDENT>if not isinstance(section, SymbolTableSection):<EOL><INDENT>continue<EOL><DEDENT>symbols = [sym.name for sym in section.iter_symbols() if sym]<EOL>return [substr_after(name, PREPEND_SYM) for name in symbols<EOL>if name.startswith(PREPEND_SYM)]<EOL><DEDENT><DEDENT>", "docstring": "helper method for getting all binary symbols with SANDSHREW_ prepended.\nWe do this in order to provide the symbols Manticore should hook on to\nperform main analysis.\n\n:param binary: str for binary to instrospect.\n:rtype list: list of symbols from binary", "id": "f17156:m1"}
{"signature": "def __init__(self, entries=None):", "body": "self.entries = []<EOL>self.entry_keys = {}<EOL>self.by_key = {}<EOL>self.callbacks = []<EOL>if entries is None:<EOL><INDENT>entries = sys.path<EOL><DEDENT>for entry in entries:<EOL><INDENT>self.add_entry(entry)<EOL><DEDENT>", "docstring": "Create working set from list of path entries (default=sys.path)", "id": "f17162:c6:m0"}
{"signature": "def resolve(self, requirements, env=None, installer=None):", "body": "requirements = list(requirements)[::-<NUM_LIT:1>]  <EOL>processed = {}  <EOL>best = {}  <EOL>to_activate = []<EOL>while requirements:<EOL><INDENT>req = requirements.pop(<NUM_LIT:0>)   <EOL>if req in processed:<EOL><INDENT>continue<EOL><DEDENT>dist = best.get(req.key)<EOL>if dist is None:<EOL><INDENT>dist = self.by_key.get(req.key)<EOL>if dist is None:<EOL><INDENT>if env is None:<EOL><INDENT>env = Environment(self.entries)<EOL><DEDENT>dist = best[req.key] = env.best_match(req, self, installer)<EOL>if dist is None:<EOL><INDENT>raise DistributionNotFound(req)  <EOL><DEDENT><DEDENT>to_activate.append(dist)<EOL><DEDENT>if dist not in req:<EOL><INDENT>raise VersionConflict(dist,req) <EOL><DEDENT>requirements.extend(dist.requires(req.extras)[::-<NUM_LIT:1>])<EOL>processed[req] = True<EOL><DEDENT>return to_activate<EOL>", "docstring": "List all distributions needed to (recursively) meet `requirements`\n\n        `requirements` must be a sequence of ``Requirement`` objects.  `env`,\n        if supplied, should be an ``Environment`` instance.  If\n        not supplied, it defaults to all distributions available within any\n        entry or distribution in the working set.  `installer`, if supplied,\n        will be invoked with each requirement that cannot be met by an\n        already-installed distribution; it should return a ``Distribution`` or\n        ``None``.", "id": "f17162:c6:m8"}
{"signature": "def resource_listdir(resource_name):", "body": "", "docstring": "List of resource names in the directory (like ``os.listdir()``)", "id": "f17162:c5:m5"}
{"signature": "def normalize_path(filename):", "body": "return os.path.normcase(os.path.realpath(filename))<EOL>", "docstring": "Normalize a file/dir name for comparison purposes", "id": "f17162:m30"}
{"signature": "def postprocess(self, tempname, filename):", "body": "if os.name == '<STR_LIT>':<EOL><INDENT>mode = ((os.stat(tempname).st_mode) | <NUM_LIT>) & <NUM_LIT><EOL>os.chmod(tempname, mode)<EOL><DEDENT>", "docstring": "Perform any platform-specific postprocessing of `tempname`\n\n        This is where Mac header rewrites should be done; other platforms don't\n        have anything special they should do.\n\n        Resource providers should call this method ONLY after successfully\n        extracting a compressed resource.  They must NOT call it on resources\n        that are already in the filesystem.\n\n        `tempname` is the current (temporary) name of the file, and `filename`\n        is the name it will be renamed to by the caller after this routine\n        returns.", "id": "f17162:c9:m9"}
{"signature": "def safe_name(name):", "body": "return re.sub('<STR_LIT>', '<STR_LIT:->', name)<EOL>", "docstring": "Convert an arbitrary string to a standard distribution name\n\n    Any runs of non-alphanumeric/. characters are replaced with a single '-'.", "id": "f17162:m13"}
{"signature": "def resource_listdir(self, package_or_requirement, resource_name):", "body": "return get_provider(package_or_requirement).resource_listdir(<EOL>resource_name<EOL>)<EOL>", "docstring": "List the contents of the named resource directory", "id": "f17162:c9:m6"}
{"signature": "def register_loader_type(loader_type, provider_factory):", "body": "_provider_factories[loader_type] = provider_factory<EOL>", "docstring": "Register `provider_factory` to make providers for `loader_type`\n\n    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,\n    and `provider_factory` is a function that, passed a *module* object,\n    returns an ``IResourceProvider`` for that module.", "id": "f17162:m1"}
{"signature": "def insert_on(self, path, loc = None):", "body": "loc = loc or self.location<EOL>if not loc:<EOL><INDENT>return<EOL><DEDENT>if path is sys.path:<EOL><INDENT>self.check_version_conflict()<EOL><DEDENT>nloc = _normalize_cached(loc)<EOL>bdir = os.path.dirname(nloc)<EOL>npath= list(map(_normalize_cached, path))<EOL>bp = None<EOL>for p, item in enumerate(npath):<EOL><INDENT>if item==nloc:<EOL><INDENT>break<EOL><DEDENT>elif item==bdir and self.precedence==EGG_DIST:<EOL><INDENT>path.insert(p, loc)<EOL>npath.insert(p, nloc)<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>path.append(loc)<EOL>return<EOL><DEDENT>while <NUM_LIT:1>:<EOL><INDENT>try:<EOL><INDENT>np = npath.index(nloc, p+<NUM_LIT:1>)<EOL><DEDENT>except ValueError:<EOL><INDENT>break<EOL><DEDENT>else:<EOL><INDENT>del npath[np], path[np]<EOL>p = np  <EOL><DEDENT><DEDENT>return<EOL>", "docstring": "Insert self.location in path before its nearest parent directory", "id": "f17162:c21:m20"}
{"signature": "def run_script(dist_spec, script_name):", "body": "ns = sys._getframe(<NUM_LIT:1>).f_globals<EOL>name = ns['<STR_LIT>']<EOL>ns.clear()<EOL>ns['<STR_LIT>'] = name<EOL>require(dist_spec)[<NUM_LIT:0>].run_script(script_name, ns)<EOL>", "docstring": "Locate distribution `dist_spec` and run its `script_name` script", "id": "f17162:m7"}
{"signature": "def declare_namespace(packageName):", "body": "imp.acquire_lock()<EOL>try:<EOL><INDENT>if packageName in _namespace_packages:<EOL><INDENT>return<EOL><DEDENT>path, parent = sys.path, None<EOL>if '<STR_LIT:.>' in packageName:<EOL><INDENT>parent = '<STR_LIT:.>'.join(packageName.split('<STR_LIT:.>')[:-<NUM_LIT:1>])<EOL>declare_namespace(parent)<EOL>__import__(parent)<EOL>try:<EOL><INDENT>path = sys.modules[parent].__path__<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise TypeError(\"<STR_LIT>\", parent)<EOL><DEDENT><DEDENT>_namespace_packages.setdefault(parent,[]).append(packageName)<EOL>_namespace_packages.setdefault(packageName,[])<EOL>for path_item in path:<EOL><INDENT>_handle_ns(packageName, path_item)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>imp.release_lock()<EOL><DEDENT>", "docstring": "Declare that package 'packageName' is a namespace package", "id": "f17162:m26"}
{"signature": "def resource_string(self, package_or_requirement, resource_name):", "body": "return get_provider(package_or_requirement).get_resource_string(<EOL>self, resource_name<EOL>)<EOL>", "docstring": "Return specified resource as a string", "id": "f17162:c9:m5"}
{"signature": "def _get_mro(cls):", "body": "if not isinstance(cls,type):<EOL><INDENT>class cls(cls,object): pass<EOL>return cls.__mro__[<NUM_LIT:1>:]<EOL><DEDENT>return cls.__mro__<EOL>", "docstring": "Get an mro for a type or classic class", "id": "f17162:m39"}
{"signature": "def __init__(self, project_name, specs, extras):", "body": "self.unsafe_name, project_name = project_name, safe_name(project_name)<EOL>self.project_name, self.key = project_name, project_name.lower()<EOL>index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]<EOL>index.sort()<EOL>self.specs = [(op,ver) for parsed,trans,op,ver in index]<EOL>self.index, self.extras = index, tuple(map(safe_extra,extras))<EOL>self.hashCmp = (<EOL>self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),<EOL>frozenset(self.extras)<EOL>)<EOL>self.__hash = hash(self.hashCmp)<EOL>", "docstring": "DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!", "id": "f17162:c22:m0"}
{"signature": "def yield_lines(strs):", "body": "if isinstance(strs,str):<EOL><INDENT>for s in strs.splitlines():<EOL><INDENT>s = s.strip()<EOL>if s and not s.startswith('<STR_LIT:#>'):     <EOL><INDENT>yield s<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for ss in strs:<EOL><INDENT>for s in yield_lines(ss):<EOL><INDENT>yield s<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Yield non-empty/non-comment lines of a ``basestring`` or sequence", "id": "f17162:m33"}
{"signature": "def has_metadata(name):", "body": "", "docstring": "Does the package's distribution contain the named metadata?", "id": "f17162:c4:m0"}
{"signature": "def to_filename(name):", "body": "return name.replace('<STR_LIT:->','<STR_LIT:_>')<EOL>", "docstring": "Convert a project or version name to its filename-escaped form\n\n    Any '-' characters are currently replaced with '_'.", "id": "f17162:m16"}
{"signature": "def activate(self,path=None):", "body": "if path is None: path = sys.path<EOL>self.insert_on(path)<EOL>if path is sys.path:<EOL><INDENT>fixup_namespace_packages(self.location)<EOL>list(map(declare_namespace, self._get_metadata('<STR_LIT>')))<EOL><DEDENT>", "docstring": "Ensure distribution is importable on `path` (default=sys.path)", "id": "f17162:c21:m10"}
{"signature": "def parse_map(cls, data, dist=None):", "body": "if isinstance(data,dict):<EOL><INDENT>data = list(data.items())<EOL><DEDENT>else:<EOL><INDENT>data = split_sections(data)<EOL><DEDENT>maps = {}<EOL>for group, lines in data:<EOL><INDENT>if group is None:<EOL><INDENT>if not lines:<EOL><INDENT>continue<EOL><DEDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>group = group.strip()<EOL>if group in maps:<EOL><INDENT>raise ValueError(\"<STR_LIT>\", group)<EOL><DEDENT>maps[group] = cls.parse_group(group, lines, dist)<EOL><DEDENT>return maps<EOL>", "docstring": "Parse a map of entry point groups", "id": "f17162:c20:m7"}
{"signature": "def get_entry_info(dist, group, name):", "body": "return get_distribution(dist).get_entry_info(group, name)<EOL>", "docstring": "Return the EntryPoint object for `group`+`name`, or ``None``", "id": "f17162:m11"}
{"signature": "def get_resource_stream(manager, resource_name):", "body": "", "docstring": "Return a readable file-like object for `resource_name`\n\n        `manager` must be an ``IResourceManager``", "id": "f17162:c5:m1"}
{"signature": "def add(self,dist):", "body": "if self.can_add(dist) and dist.has_version():<EOL><INDENT>dists = self._distmap.setdefault(dist.key,[])<EOL>if dist not in dists:<EOL><INDENT>dists.append(dist)<EOL>if dist.key in self._cache:<EOL><INDENT>_sort_dists(self._cache[dist.key])<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Add `dist` if we ``can_add()`` it and it isn't already added", "id": "f17162:c7:m5"}
{"signature": "def get_entry_info(self, group, name):", "body": "return self.get_entry_map(group).get(name)<EOL>", "docstring": "Return the EntryPoint object for `group`+`name`, or ``None``", "id": "f17162:c21:m19"}
{"signature": "def resource_filename(self, package_or_requirement, resource_name):", "body": "return get_provider(package_or_requirement).get_resource_filename(<EOL>self, resource_name<EOL>)<EOL>", "docstring": "Return a true filesystem path for specified resource", "id": "f17162:c9:m3"}
{"signature": "def compatible_platforms(provided,required):", "body": "if provided is None or required is None or provided==required:<EOL><INDENT>return True     <EOL><DEDENT>reqMac = macosVersionString.match(required)<EOL>if reqMac:<EOL><INDENT>provMac = macosVersionString.match(provided)<EOL>if not provMac:<EOL><INDENT>provDarwin = darwinVersionString.match(provided)<EOL>if provDarwin:<EOL><INDENT>dversion = int(provDarwin.group(<NUM_LIT:1>))<EOL>macosversion = \"<STR_LIT>\" % (reqMac.group(<NUM_LIT:1>), reqMac.group(<NUM_LIT:2>))<EOL>if dversion == <NUM_LIT:7> and macosversion >= \"<STR_LIT>\" ordversion == <NUM_LIT:8> and macosversion >= \"<STR_LIT>\":<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False    <EOL><DEDENT>if provMac.group(<NUM_LIT:1>) != reqMac.group(<NUM_LIT:1>) orprovMac.group(<NUM_LIT:3>) != reqMac.group(<NUM_LIT:3>):<EOL><INDENT>return False<EOL><DEDENT>if int(provMac.group(<NUM_LIT:2>)) > int(reqMac.group(<NUM_LIT:2>)):<EOL><INDENT>return False<EOL><DEDENT>return True<EOL><DEDENT>return False<EOL>", "docstring": "Can code for the `provided` platform run on the `required` platform?\n\n    Returns true if either platform is ``None``, or the platforms are equal.\n\n    XXX Needs compatibility checks for Linux and other unixy OSes.", "id": "f17162:m6"}
{"signature": "def get_importer(path_item):", "body": "try:<EOL><INDENT>importer = sys.path_importer_cache[path_item]<EOL><DEDENT>except KeyError:<EOL><INDENT>for hook in sys.path_hooks:<EOL><INDENT>try:<EOL><INDENT>importer = hook(path_item)<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>importer = None<EOL><DEDENT><DEDENT>sys.path_importer_cache.setdefault(path_item,importer)<EOL>if importer is None:<EOL><INDENT>try:<EOL><INDENT>importer = ImpWrapper(path_item)<EOL><DEDENT>except ImportError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return importer<EOL>", "docstring": "Retrieve a PEP 302 \"importer\" for the given path item\n\n    If there is no importer, this returns a wrapper around the builtin import\n    machinery.  The returned importer is only cached if it was created by a\n    path hook.", "id": "f17162:m17"}
{"signature": "def load_entry_point(dist, group, name):", "body": "return get_distribution(dist).load_entry_point(group, name)<EOL>", "docstring": "Return `name` entry point of `group` for `dist` or raise ImportError", "id": "f17162:m9"}
{"signature": "def find(self, req):", "body": "dist = self.by_key.get(req.key)<EOL>if dist is not None and dist not in req:<EOL><INDENT>raise VersionConflict(dist,req)     <EOL><DEDENT>else:<EOL><INDENT>return dist<EOL><DEDENT>", "docstring": "Find a distribution matching requirement `req`\n\n        If there is an active distribution for the requested project, this\n        returns it as long as it meets the version requirement specified by\n        `req`.  But, if there is an active distribution for the project and it\n        does *not* meet the `req` requirement, ``VersionConflict`` is raised.\n        If there is no active distribution for the requested project, ``None``\n        is returned.", "id": "f17162:c6:m3"}
{"signature": "def has_resource(resource_name):", "body": "", "docstring": "Does the package contain the named resource?", "id": "f17162:c5:m3"}
{"signature": "def ensure_directory(path):", "body": "dirname = os.path.dirname(path)<EOL>if not os.path.isdir(dirname):<EOL><INDENT>os.makedirs(dirname)<EOL><DEDENT>", "docstring": "Ensure that the parent directory of `path` exists", "id": "f17162:m41"}
{"signature": "def parse_requirements(strs):", "body": "<EOL>lines = iter(yield_lines(strs))<EOL>def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):<EOL><INDENT>items = []<EOL>while not TERMINATOR(line,p):<EOL><INDENT>if CONTINUE(line,p):<EOL><INDENT>try:<EOL><INDENT>line = next(lines); p = <NUM_LIT:0><EOL><DEDENT>except StopIteration:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT><DEDENT>match = ITEM(line,p)<EOL>if not match:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"+item_name+\"<STR_LIT>\",line,\"<STR_LIT>\",line[p:])<EOL><DEDENT>items.append(match.group(*groups))<EOL>p = match.end()<EOL>match = COMMA(line,p)<EOL>if match:<EOL><INDENT>p = match.end() <EOL><DEDENT>elif not TERMINATOR(line,p):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\",line,\"<STR_LIT>\",line[p:]<EOL>)<EOL><DEDENT><DEDENT>match = TERMINATOR(line,p)<EOL>if match: p = match.end()   <EOL>return line, p, items<EOL><DEDENT>for line in lines:<EOL><INDENT>match = DISTRO(line)<EOL>if not match:<EOL><INDENT>raise ValueError(\"<STR_LIT>\", line)<EOL><DEDENT>project_name = match.group(<NUM_LIT:1>)<EOL>p = match.end()<EOL>extras = []<EOL>match = OBRACKET(line,p)<EOL>if match:<EOL><INDENT>p = match.end()<EOL>line, p, extras = scan_list(<EOL>DISTRO, CBRACKET, line, p, (<NUM_LIT:1>,), \"<STR_LIT>\"<EOL>)<EOL><DEDENT>line, p, specs = scan_list(VERSION,LINE_END,line,p,(<NUM_LIT:1>,<NUM_LIT:2>),\"<STR_LIT>\")<EOL>specs = [(op,safe_version(val)) for op,val in specs]<EOL>yield Requirement(project_name, specs, extras)<EOL><DEDENT>", "docstring": "Yield ``Requirement`` objects for each specification in `strs`\n\n    `strs` must be an instance of ``basestring``, or a (possibly-nested)\n    iterable thereof.", "id": "f17162:m37"}
{"signature": "def __getattr__(self,attr):", "body": "if attr.startswith('<STR_LIT:_>'):<EOL><INDENT>raise AttributeError(attr)<EOL><DEDENT>return getattr(self._provider, attr)<EOL>", "docstring": "Delegate all unrecognized public attributes to .metadata provider", "id": "f17162:c21:m14"}
{"signature": "def metadata_listdir(name):", "body": "", "docstring": "List of metadata names in the directory (like ``os.listdir()``)", "id": "f17162:c4:m4"}
{"signature": "def get_provider(moduleOrReq):", "body": "if isinstance(moduleOrReq,Requirement):<EOL><INDENT>return working_set.find(moduleOrReq) or require(str(moduleOrReq))[<NUM_LIT:0>]<EOL><DEDENT>try:<EOL><INDENT>module = sys.modules[moduleOrReq]<EOL><DEDENT>except KeyError:<EOL><INDENT>__import__(moduleOrReq)<EOL>module = sys.modules[moduleOrReq]<EOL><DEDENT>loader = getattr(module, '<STR_LIT>', None)<EOL>return _find_adapter(_provider_factories, loader)(module)<EOL>", "docstring": "Return an IResourceProvider for the named module or requirement", "id": "f17162:m2"}
{"signature": "def add(self, dist, entry=None, insert=True):", "body": "if insert:<EOL><INDENT>dist.insert_on(self.entries, entry)<EOL><DEDENT>if entry is None:<EOL><INDENT>entry = dist.location<EOL><DEDENT>keys = self.entry_keys.setdefault(entry,[])<EOL>keys2 = self.entry_keys.setdefault(dist.location,[])<EOL>if dist.key in self.by_key:<EOL><INDENT>return      <EOL><DEDENT>self.by_key[dist.key] = dist<EOL>if dist.key not in keys:<EOL><INDENT>keys.append(dist.key)<EOL><DEDENT>if dist.key not in keys2:<EOL><INDENT>keys2.append(dist.key)<EOL><DEDENT>self._added_new(dist)<EOL>", "docstring": "Add `dist` to working set, associated with `entry`\n\n        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.\n        On exit from this routine, `entry` is added to the end of the working\n        set's ``.entries`` (if it wasn't already present).\n\n        `dist` is only added to the working set if it's for a project that\n        doesn't already have a distribution in the set.  If it's added, any\n        callbacks registered with the ``subscribe()`` method will be called.", "id": "f17162:c6:m7"}
{"signature": "def metadata_isdir(name):", "body": "", "docstring": "Is the named metadata a directory?  (like ``os.path.isdir()``)", "id": "f17162:c4:m3"}
{"signature": "def resource_exists(self, package_or_requirement, resource_name):", "body": "return get_provider(package_or_requirement).has_resource(resource_name)<EOL>", "docstring": "Does the named resource exist?", "id": "f17162:c9:m1"}
{"signature": "def require(self, *requirements):", "body": "needed = self.resolve(parse_requirements(requirements))<EOL>for dist in needed:<EOL><INDENT>self.add(dist)<EOL><DEDENT>return needed<EOL>", "docstring": "Ensure that distributions matching `requirements` are activated\n\n        `requirements` must be a string or a (possibly-nested) sequence\n        thereof, specifying the distributions and versions required.  The\n        return value is a sequence of the distributions that needed to be\n        activated to fulfill the requirements; all relevant distributions are\n        included, even if they were already activated in this working set.", "id": "f17162:c6:m10"}
{"signature": "def __contains__(self,dist):", "body": "return self.by_key.get(dist.key) == dist<EOL>", "docstring": "True if `dist` is the active distribution for its project", "id": "f17162:c6:m2"}
{"signature": "def _find_adapter(registry, ob):", "body": "for t in _get_mro(getattr(ob, '<STR_LIT>', type(ob))):<EOL><INDENT>if t in registry:<EOL><INDENT>return registry[t]<EOL><DEDENT><DEDENT>", "docstring": "Return an adapter factory for `ob` from `registry`", "id": "f17162:m40"}
{"signature": "def __iadd__(self, other):", "body": "if isinstance(other,Distribution):<EOL><INDENT>self.add(other)<EOL><DEDENT>elif isinstance(other,Environment):<EOL><INDENT>for project in other:<EOL><INDENT>for dist in other[project]:<EOL><INDENT>self.add(dist)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" % (other,))<EOL><DEDENT>return self<EOL>", "docstring": "In-place addition of a distribution or environment", "id": "f17162:c7:m9"}
{"signature": "def __init__(self, importer):", "body": "self.zipinfo = zipimport._zip_directory_cache[importer.archive]<EOL>self.zip_pre = importer.archive+os.sep<EOL>self.loader = importer<EOL>if importer.prefix:<EOL><INDENT>self.module_path = os.path.join(importer.archive, importer.prefix)<EOL><DEDENT>else:<EOL><INDENT>self.module_path = importer.archive<EOL><DEDENT>self._setup_prefix()<EOL>", "docstring": "Create a metadata provider from a zipimporter", "id": "f17162:c17:m0"}
{"signature": "def resource_isdir(resource_name):", "body": "", "docstring": "Is the named resource a directory?  (like ``os.path.isdir()``)", "id": "f17162:c5:m4"}
{"signature": "def __iter__(self):", "body": "seen = {}<EOL>for item in self.entries:<EOL><INDENT>for key in self.entry_keys[item]:<EOL><INDENT>if key not in seen:<EOL><INDENT>seen[key]=<NUM_LIT:1><EOL>yield self.by_key[key]<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Yield distributions for non-duplicate projects in the working set\n\n        The yield order is the order in which the items' path entries were\n        added to the working set.", "id": "f17162:c6:m6"}
{"signature": "def get_metadata_lines(name):", "body": "", "docstring": "Yield named metadata resource as list of non-blank non-comment lines\n\n       Leading and trailing whitespace is stripped from each line, and lines\n       with ``#`` as the first non-blank character are omitted.", "id": "f17162:c4:m2"}
{"signature": "def localize(self, dt, is_dst=False):", "body": "if dt.tzinfo is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return dt.replace(tzinfo=self)<EOL>", "docstring": "Convert naive time to local time", "id": "f17165:c1:m4"}
{"signature": "def timezone(zone):", "body": "if zone.upper() == '<STR_LIT>':<EOL><INDENT>return utc<EOL><DEDENT>try:<EOL><INDENT>zone = zone.encode('<STR_LIT>')<EOL><DEDENT>except UnicodeEncodeError:<EOL><INDENT>raise UnknownTimeZoneError(zone)<EOL><DEDENT>zone = _unmunge_zone(zone)<EOL>if zone not in _tzinfo_cache:<EOL><INDENT>if zone in all_timezones_set:<EOL><INDENT>_tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone))<EOL><DEDENT>else:<EOL><INDENT>raise UnknownTimeZoneError(zone)<EOL><DEDENT><DEDENT>return _tzinfo_cache[zone]<EOL>", "docstring": "r''' Return a datetime.tzinfo implementation for the given timezone \n\n    >>> from datetime import datetime, timedelta\n    >>> utc = timezone('UTC')\n    >>> eastern = timezone('US/Eastern')\n    >>> eastern.zone\n    'US/Eastern'\n    >>> timezone(u'US/Eastern') is eastern\n    True\n    >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)\n    >>> loc_dt = utc_dt.astimezone(eastern)\n    >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'\n    >>> loc_dt.strftime(fmt)\n    '2002-10-27 01:00:00 EST (-0500)'\n    >>> (loc_dt - timedelta(minutes=10)).strftime(fmt)\n    '2002-10-27 00:50:00 EST (-0500)'\n    >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)\n    '2002-10-27 01:50:00 EDT (-0400)'\n    >>> (loc_dt + timedelta(minutes=10)).strftime(fmt)\n    '2002-10-27 01:10:00 EST (-0500)'\n\n    Raises UnknownTimeZoneError if passed an unknown zone.\n\n    >>> timezone('Asia/Shangri-La')\n    Traceback (most recent call last):\n    ...\n    UnknownTimeZoneError: 'Asia/Shangri-La'\n\n    >>> timezone(u'\\N{TRADE MARK SIGN}')\n    Traceback (most recent call last):\n    ...\n    UnknownTimeZoneError: u'\\u2122'", "id": "f17165:m1"}
{"signature": "def FixedOffset(offset, _tzinfos = {}):", "body": "if offset == <NUM_LIT:0>:<EOL><INDENT>return UTC<EOL><DEDENT>info = _tzinfos.get(offset)<EOL>if info is None:<EOL><INDENT>info = _tzinfos.setdefault(offset, _FixedOffset(offset))<EOL><DEDENT>return info<EOL>", "docstring": "return a fixed-offset timezone based off a number of minutes.\n\n        >>> one = FixedOffset(-330)\n        >>> one\n        pytz.FixedOffset(-330)\n        >>> one.utcoffset(datetime.datetime.now())\n        datetime.timedelta(-1, 66600)\n\n        >>> two = FixedOffset(1380)\n        >>> two\n        pytz.FixedOffset(1380)\n        >>> two.utcoffset(datetime.datetime.now())\n        datetime.timedelta(0, 82800)\n\n    The datetime.timedelta must be between the range of -1 and 1 day,\n    non-inclusive.\n\n        >>> FixedOffset(1440)\n        Traceback (most recent call last):\n        ...\n        ValueError: ('absolute offset is too large', 1440)\n\n        >>> FixedOffset(-1440)\n        Traceback (most recent call last):\n        ...\n        ValueError: ('absolute offset is too large', -1440)\n\n    An offset of 0 is special-cased to return UTC.\n\n        >>> FixedOffset(0) is UTC\n        True\n\n    There should always be only one instance of a FixedOffset per timedelta.\n    This should be true for multiple creation calls.\n\n        >>> FixedOffset(-330) is one\n        True\n        >>> FixedOffset(1380) is two\n        True\n\n    It should also be true for pickling.\n\n        >>> import pickle\n        >>> pickle.loads(pickle.dumps(one)) is one\n        True\n        >>> pickle.loads(pickle.dumps(two)) is two\n        True", "id": "f17165:m6"}
{"signature": "def memorized_timedelta(seconds):", "body": "try:<EOL><INDENT>return _timedelta_cache[seconds]<EOL><DEDENT>except KeyError:<EOL><INDENT>delta = timedelta(seconds=seconds)<EOL>_timedelta_cache[seconds] = delta<EOL>return delta<EOL><DEDENT>", "docstring": "Create only one instance of each distinct timedelta", "id": "f17166:m0"}
{"signature": "def memorized_datetime(seconds):", "body": "try:<EOL><INDENT>return _datetime_cache[seconds]<EOL><DEDENT>except KeyError:<EOL><INDENT>dt = _epoch + timedelta(seconds=seconds)<EOL>_datetime_cache[seconds] = dt<EOL>return dt<EOL><DEDENT>", "docstring": "Create only one instance of each distinct datetime", "id": "f17166:m1"}
{"signature": "def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):", "body": "<EOL>tz = pytz.timezone(zone)<EOL>if utcoffset is None:<EOL><INDENT>return tz<EOL><DEDENT>utcoffset = memorized_timedelta(utcoffset)<EOL>dstoffset = memorized_timedelta(dstoffset)<EOL>try:<EOL><INDENT>return tz._tzinfos[(utcoffset, dstoffset, tzname)]<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>for localized_tz in list(tz._tzinfos.values()):<EOL><INDENT>if (localized_tz._utcoffset == utcoffset<EOL>and localized_tz._dst == dstoffset):<EOL><INDENT>return localized_tz<EOL><DEDENT><DEDENT>inf = (utcoffset, dstoffset, tzname)<EOL>tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)<EOL>return tz._tzinfos[inf]<EOL>", "docstring": "Factory function for unpickling pytz tzinfo instances.\n\n    This is shared for both StaticTzInfo and DstTzInfo instances, because\n    database changes could cause a zones implementation to switch between\n    these two base classes and we can't break pickles on a pytz version\n    upgrade.", "id": "f17166:m4"}
{"signature": "def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,<EOL>pivot, sizes, fill_empty, flip):", "body": "<EOL>spacing = length * sizes.get('<STR_LIT>', <NUM_LIT>)<EOL>full_height = length * sizes.get('<STR_LIT>', <NUM_LIT>)<EOL>full_width = length * sizes.get('<STR_LIT:width>', <NUM_LIT>)<EOL>empty_rad = length * sizes.get('<STR_LIT>', <NUM_LIT>)<EOL>pivot_points = dict(tip=<NUM_LIT:0.0>, middle=-length/<NUM_LIT>)<EOL>if flip: full_height = -full_height<EOL>endx = <NUM_LIT:0.0><EOL>endy = pivot_points[pivot.lower()]<EOL>angles = -(ma.arctan2(v, u) + np.pi/<NUM_LIT:2>)<EOL>circ = CirclePolygon((<NUM_LIT:0>,<NUM_LIT:0>), radius=empty_rad).get_verts()<EOL>if fill_empty:<EOL><INDENT>empty_barb = circ<EOL><DEDENT>else:<EOL><INDENT>empty_barb = np.concatenate((circ, circ[::-<NUM_LIT:1>]))<EOL><DEDENT>barb_list = []<EOL>for index, angle in np.ndenumerate(angles):<EOL><INDENT>if empty_flag[index]:<EOL><INDENT>barb_list.append(empty_barb)<EOL>continue<EOL><DEDENT>poly_verts = [(endx, endy)]<EOL>offset = length<EOL>for i in range(nflags[index]):<EOL><INDENT>if offset != length: offset += spacing / <NUM_LIT><EOL>poly_verts.extend([[endx, endy + offset],<EOL>[endx + full_height, endy - full_width/<NUM_LIT:2> + offset],<EOL>[endx, endy - full_width + offset]])<EOL>offset -= full_width + spacing<EOL><DEDENT>for i in range(nbarbs[index]):<EOL><INDENT>poly_verts.extend([(endx, endy + offset),<EOL>(endx + full_height, endy + offset + full_width/<NUM_LIT:2>),<EOL>(endx, endy + offset)])<EOL>offset -= spacing<EOL><DEDENT>if half_barb[index]:<EOL><INDENT>if offset == length:<EOL><INDENT>poly_verts.append((endx, endy + offset))<EOL>offset -= <NUM_LIT> * spacing<EOL><DEDENT>poly_verts.extend([(endx, endy + offset),<EOL>(endx + full_height/<NUM_LIT:2>, endy + offset + full_width/<NUM_LIT:4>),<EOL>(endx, endy + offset)])<EOL><DEDENT>poly_verts = transforms.Affine2D().rotate(-angle).transform(<EOL>poly_verts)<EOL>barb_list.append(poly_verts)<EOL><DEDENT>return barb_list<EOL>", "docstring": "This function actually creates the wind barbs.  *u* and *v*\nare components of the vector in the *x* and *y* directions,\nrespectively.\n\n*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,\n*respectively, the number of flags, number of barbs, flag for\n*half a barb, and flag for empty barb, ostensibly obtained\n*from :meth:`_find_tails`.\n\n*length* is the length of the barb staff in points.\n\n*pivot* specifies the point on the barb around which the\nentire barb should be rotated.  Right now, valid options are\n'head' and 'middle'.\n\n*sizes* is a dictionary of coefficients specifying the ratio\nof a given feature to the length of the barb. These features\ninclude:\n\n    - *spacing*: space between features (flags, full/half\n       barbs)\n\n    - *height*: distance from shaft of top of a flag or full\n       barb\n\n    - *width* - width of a flag, twice the width of a full barb\n\n    - *emptybarb* - radius of the circle used for low\n       magnitudes\n\n*fill_empty* specifies whether the circle representing an\nempty barb should be filled or not (this changes the drawing\nof the polygon).\n\n*flip* is a flag indicating whether the features should be flipped to\nthe other side of the barb (useful for winds in the southern\nhemisphere.\n\nThis function returns list of arrays of vertices, defining a polygon for\neach of the wind barbs.  These polygons have been rotated to properly\nalign with the vector direction.", "id": "f17167:c2:m2"}
{"signature": "def _find_tails(self, mag, rounding=True, half=<NUM_LIT:5>, full=<NUM_LIT:10>, flag=<NUM_LIT:50>):", "body": "<EOL>if rounding:<EOL><INDENT>mag = half * (mag / half + <NUM_LIT:0.5>).astype(np.int)<EOL><DEDENT>num_flags = np.floor(mag / flag).astype(np.int)<EOL>mag = np.mod(mag, flag)<EOL>num_barb = np.floor(mag / full).astype(np.int)<EOL>mag = np.mod(mag, full)<EOL>half_flag = mag >= half<EOL>empty_flag = ~(half_flag | (num_flags > <NUM_LIT:0>) | (num_barb > <NUM_LIT:0>))<EOL>return num_flags, num_barb, half_flag, empty_flag<EOL>", "docstring": "Find how many of each of the tail pieces is necessary.  Flag\nspecifies the increment for a flag, barb for a full barb, and half for\nhalf a barb. Mag should be the magnitude of a vector (ie. >= 0).\n\nThis returns a tuple of:\n\n    (*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)\n\n*half_flag* is a boolean whether half of a barb is needed,\nsince there should only ever be one half on a given\nbarb. *empty_flag* flag is an array of flags to easily tell if\na barb is empty (too low to plot any barbs/flags.", "id": "f17167:c2:m1"}
{"signature": "def set_offsets(self, xy):", "body": "self.x = xy[:,<NUM_LIT:0>]<EOL>self.y = xy[:,<NUM_LIT:1>]<EOL>x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(), self.u,<EOL>self.v)<EOL>xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))<EOL>collections.PolyCollection.set_offsets(self, xy)<EOL>", "docstring": "Set the offsets for the barb polygons.  This saves the offets passed in\nand actually sets version masked as appropriate for the existing U/V\ndata. *offsets* should be a sequence.\n\nACCEPTS: sequence of pairs of floats", "id": "f17167:c2:m5"}
{"signature": "def _init(self):", "body": "<EOL>if True: <EOL><INDENT>trans = self._set_transform()<EOL>ax = self.ax<EOL>sx, sy = trans.inverted().transform_point(<EOL>(ax.bbox.width, ax.bbox.height))<EOL>self.span = sx<EOL>sn = max(<NUM_LIT:8>, min(<NUM_LIT>, math.sqrt(self.N)))<EOL>if self.width is None:<EOL><INDENT>self.width = <NUM_LIT> * self.span / sn<EOL><DEDENT><DEDENT>", "docstring": "initialization delayed until first draw;\n        allow time for axes setup.", "id": "f17167:c1:m2"}
{"signature": "def twiny(ax=None):", "body": "if ax is None:<EOL><INDENT>ax=gca()<EOL><DEDENT>ax1 = ax.twiny()<EOL>draw_if_interactive()<EOL>return ax1<EOL>", "docstring": "Make a second axes overlay *ax* (or the current axes if *ax* is\n*None*) sharing the yaxis.  The ticks for *ax2* will be placed on\nthe top, and the *ax2* instance is returned.", "id": "f17168:m34"}
{"signature": "def polar(*args, **kwargs):", "body": "ax = gca(polar=True)<EOL>ret = ax.plot(*args, **kwargs)<EOL>draw_if_interactive()<EOL>return ret<EOL>", "docstring": "call signature::\n\n  polar(theta, r, **kwargs)\n\nMake a polar plot.  Multiple *theta*, *r* arguments are supported,\nwith format strings, as in :func:`~matplotlib.pyplot.plot`.", "id": "f17168:m58"}
{"signature": "def gray():", "body": "rc('<STR_LIT:image>', cmap='<STR_LIT>')<EOL>im = gci()<EOL>if im is not None:<EOL><INDENT>im.set_cmap(cm.gray)<EOL><DEDENT>draw_if_interactive()<EOL>", "docstring": "set the default colormap to gray and apply to current image if any.\nSee help(colormaps) for more information", "id": "f17168:m112"}
{"signature": "def flag():", "body": "rc('<STR_LIT:image>', cmap='<STR_LIT>')<EOL>im = gci()<EOL>if im is not None:<EOL><INDENT>im.set_cmap(cm.flag)<EOL><DEDENT>draw_if_interactive()<EOL>", "docstring": "set the default colormap to flag and apply to current image if any.\nSee help(colormaps) for more information", "id": "f17168:m111"}
{"signature": "def yscale(*args, **kwargs):", "body": "ax = gca()<EOL>ret = ax.set_yscale(*args, **kwargs)<EOL>draw_if_interactive()<EOL>return ret<EOL>", "docstring": "call signature::\n\n  xscale(scale, **kwargs)\n\nSet the scaling for the y-axis: %(scale)s\n\nDifferent keywords may be accepted, depending on the scale:\n\n%(scale_docs)s", "id": "f17168:m45"}
{"signature": "def subplot(*args, **kwargs):", "body": "fig = gcf()<EOL>a = fig.add_subplot(*args, **kwargs)<EOL>bbox = a.bbox<EOL>byebye = []<EOL>for other in fig.axes:<EOL><INDENT>if other==a: continue<EOL>if bbox.fully_overlaps(other.bbox):<EOL><INDENT>byebye.append(other)<EOL><DEDENT><DEDENT>for ax in byebye: delaxes(ax)<EOL>draw_if_interactive()<EOL>return a<EOL>", "docstring": "Create a subplot command, creating axes with::\n\n  subplot(numRows, numCols, plotNum)\n\nwhere *plotNum* = 1 is the first plot number and increasing *plotNums*\nfill rows first.  max(*plotNum*) == *numRows* * *numCols*\n\nYou can leave out the commas if *numRows* <= *numCols* <=\n*plotNum* < 10, as in::\n\n  subplot(211)    # 2 rows, 1 column, first (upper) plot\n\n``subplot(111)`` is the default axis.\n\nNew subplots that overlap old will delete the old axes.  If you do\nnot want this behavior, use\n:meth:`matplotlib.figure.Figure.add_subplot` or the\n:func:`~matplotlib.pyplot.axes` command.  Eg.::\n\n  from pylab import *\n  plot([1,2,3])  # implicitly creates subplot(111)\n  subplot(211)   # overlaps, subplot(111) is killed\n  plot(rand(12), rand(12))\n  subplot(212, axisbg='y') # creates 2nd subplot with yellow background\n\nKeyword arguments:\n\n  *axisbg*:\n    The background color of the subplot, which can be any valid\n    color specifier.  See :mod:`matplotlib.colors` for more\n    information.\n\n  *polar*:\n    A boolean flag indicating whether the subplot plot should be\n    a polar projection.  Defaults to False.\n\n  *projection*:\n    A string giving the name of a custom projection to be used\n    for the subplot. This projection must have been previously\n    registered. See :func:`matplotlib.projections.register_projection`\n\n.. seealso::\n    :func:`~matplotlib.pyplot.axes`:\n        For additional information on :func:`axes` and\n        :func:`subplot` keyword arguments.\n\n    :file:`examples/pylab_examples/polar_scatter.py`\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/subplot_demo.py", "id": "f17168:m32"}
{"signature": "def figure(num=None, <EOL>figsize   = None, <EOL>dpi       = None, <EOL>facecolor = None, <EOL>edgecolor = None, <EOL>frameon = True,<EOL>FigureClass = Figure,<EOL>**kwargs<EOL>):", "body": "if figsize is None   : figsize   = rcParams['<STR_LIT>']<EOL>if dpi is None       : dpi       = rcParams['<STR_LIT>']<EOL>if facecolor is None : facecolor = rcParams['<STR_LIT>']<EOL>if edgecolor is None : edgecolor = rcParams['<STR_LIT>']<EOL>if num is None:<EOL><INDENT>allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]<EOL>if allnums:<EOL><INDENT>num = max(allnums) + <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>num = <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>num = int(num)  <EOL><DEDENT>figManager = _pylab_helpers.Gcf.get_fig_manager(num)<EOL>if figManager is None:<EOL><INDENT>if get_backend().lower() == '<STR_LIT>':  dpi = <NUM_LIT><EOL>figManager = new_figure_manager(num, figsize=figsize,<EOL>dpi=dpi,<EOL>facecolor=facecolor,<EOL>edgecolor=edgecolor,<EOL>frameon=frameon,<EOL>FigureClass=FigureClass,<EOL>**kwargs)<EOL>def make_active(event):<EOL><INDENT>_pylab_helpers.Gcf.set_active(figManager)<EOL><DEDENT>cid = figManager.canvas.mpl_connect('<STR_LIT>', make_active)<EOL>figManager._cidgcf = cid<EOL>_pylab_helpers.Gcf.set_active(figManager)<EOL>figManager.canvas.figure.number = num<EOL><DEDENT>draw_if_interactive()<EOL>return figManager.canvas.figure<EOL>", "docstring": "call signature::\n\n  figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')\n\n\nCreate a new figure and return a :class:`matplotlib.figure.Figure`\ninstance.  If *num* = *None*, the figure number will be incremented and\na new figure will be created.  The returned figure objects have a\n*number* attribute holding this number.\n\nIf *num* is an integer, and ``figure(num)`` already exists, make it\nactive and return the handle to it.  If ``figure(num)`` does not exist\nit will be created.  Numbering starts at 1, matlab style::\n\n  figure(1)\n\nIf you are creating many figures, make sure you explicitly call \"close\"\non the figures you are not using, because this will enable pylab\nto properly clean up the memory.\n\nOptional keyword arguments:\n\n  =========   =======================================================\n  Keyword     Description\n  =========   =======================================================\n  figsize     width x height in inches; defaults to rc figure.figsize\n  dpi         resolution; defaults to rc figure.dpi\n  facecolor   the background color; defaults to rc figure.facecolor\n  edgecolor   the border color; defaults to rc figure.edgecolor\n  =========   =======================================================\n\nrcParams defines the default values, which can be modified in the\nmatplotlibrc file\n\n*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived\nclass that will be passed on to :meth:`new_figure_manager` in the\nbackends which allows you to hook custom Figure classes into the\npylab interface.  Additional kwargs will be passed on to your\nfigure init function.", "id": "f17168:m11"}
{"signature": "def subplots_adjust(*args, **kwargs):", "body": "fig = gcf()<EOL>fig.subplots_adjust(*args, **kwargs)<EOL>draw_if_interactive()<EOL>", "docstring": "call signature::\n\n  subplots_adjust(left=None, bottom=None, right=None, top=None,\n                  wspace=None, hspace=None)\n\nTune the subplot layout via the\n:class:`matplotlib.figure.SubplotParams` mechanism.  The parameter\nmeanings (and suggested defaults) are::\n\n  left  = 0.125  # the left side of the subplots of the figure\n  right = 0.9    # the right side of the subplots of the figure\n  bottom = 0.1   # the bottom of the subplots of the figure\n  top = 0.9      # the top of the subplots of the figure\n  wspace = 0.2   # the amount of width reserved for blank space between subplots\n  hspace = 0.2   # the amount of height reserved for white space between subplots\n\nThe actual defaults are controlled by the rc file", "id": "f17168:m35"}
{"signature": "def xlim(*args, **kwargs):", "body": "ax = gca()<EOL>ret = ax.set_xlim(*args, **kwargs)<EOL>draw_if_interactive()<EOL>return ret<EOL>", "docstring": "Set/Get the xlimits of the current axes::\n\n  xmin, xmax = xlim()   # return the current xlim\n  xlim( (xmin, xmax) )  # set the xlim to xmin, xmax\n  xlim( xmin, xmax )    # set the xlim to xmin, xmax\n\nIf you do not specify args, you can pass the xmin and xmax as\nkwargs, eg.::\n\n  xlim(xmax=3) # adjust the max leaving min unchanged\n  xlim(xmin=1) # adjust the min leaving max unchanged\n\nThe new axis limits are returned as a length 2 tuple.", "id": "f17168:m42"}
{"signature": "def spring():", "body": "rc('<STR_LIT:image>', cmap='<STR_LIT>')<EOL>im = gci()<EOL>if im is not None:<EOL><INDENT>im.set_cmap(cm.spring)<EOL><DEDENT>draw_if_interactive()<EOL>", "docstring": "set the default colormap to spring and apply to current image if any.\nSee help(colormaps) for more information", "id": "f17168:m118"}
{"signature": "def bone():", "body": "rc('<STR_LIT:image>', cmap='<STR_LIT>')<EOL>im = gci()<EOL>if im is not None:<EOL><INDENT>im.set_cmap(cm.bone)<EOL><DEDENT>draw_if_interactive()<EOL>", "docstring": "set the default colormap to bone and apply to current image if any.\nSee help(colormaps) for more information", "id": "f17168:m108"}
{"signature": "def plotfile(fname, cols=(<NUM_LIT:0>,), plotfuncs=None,<EOL>comments='<STR_LIT:#>', skiprows=<NUM_LIT:0>, checkrows=<NUM_LIT:5>, delimiter='<STR_LIT:U+002C>',<EOL>**kwargs):", "body": "fig = figure()<EOL>if len(cols)<<NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if plotfuncs is None:<EOL><INDENT>plotfuncs = dict()<EOL><DEDENT>r = mlab.csv2rec(fname, comments=comments,<EOL>skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)<EOL>def getname_val(identifier):<EOL><INDENT>'<STR_LIT>'<EOL>if is_string_like(identifier):<EOL><INDENT>return identifier, r[identifier]<EOL><DEDENT>elif is_numlike(identifier):<EOL><INDENT>name = r.dtype.names[int(identifier)]<EOL>return name, r[name]<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT><DEDENT>xname, x = getname_val(cols[<NUM_LIT:0>])<EOL>if len(cols)==<NUM_LIT:1>:<EOL><INDENT>ax1 = fig.add_subplot(<NUM_LIT:1>,<NUM_LIT:1>,<NUM_LIT:1>)<EOL>funcname = plotfuncs.get(cols[<NUM_LIT:0>], '<STR_LIT>')<EOL>func = getattr(ax1, funcname)<EOL>func(x, **kwargs)<EOL>ax1.set_xlabel(xname)<EOL><DEDENT>else:<EOL><INDENT>N = len(cols)<EOL>for i in range(<NUM_LIT:1>,N):<EOL><INDENT>if i==<NUM_LIT:1>:<EOL><INDENT>ax = ax1 = fig.add_subplot(N-<NUM_LIT:1>,<NUM_LIT:1>,i)<EOL>ax.grid(True)<EOL><DEDENT>else:<EOL><INDENT>ax = fig.add_subplot(N-<NUM_LIT:1>,<NUM_LIT:1>,i, sharex=ax1)<EOL>ax.grid(True)<EOL><DEDENT>yname, y = getname_val(cols[i])<EOL>funcname = plotfuncs.get(cols[i], '<STR_LIT>')<EOL>func = getattr(ax, funcname)<EOL>func(x, y, **kwargs)<EOL>ax.set_ylabel(yname)<EOL>if ax.is_last_row():<EOL><INDENT>ax.set_xlabel(xname)<EOL><DEDENT>else:<EOL><INDENT>ax.set_xlabel('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>if xname=='<STR_LIT:date>':<EOL><INDENT>fig.autofmt_xdate()<EOL><DEDENT>draw_if_interactive()<EOL>", "docstring": "Plot the data in *fname*\n\n*cols* is a sequence of column identifiers to plot.  An identifier\nis either an int or a string.  If it is an int, it indicates the\ncolumn number.  If it is a string, it indicates the column header.\nmatplotlib will make column headers lower case, replace spaces with\nunderscores, and remove all illegal characters; so ``'Adj Close*'``\nwill have name ``'adj_close'``.\n\n- If len(*cols*) == 1, only that column will be plotted on the *y* axis.\n\n- If len(*cols*) > 1, the first element will be an identifier for\n  data for the *x* axis and the remaining elements will be the\n  column indexes for multiple subplots\n\n*plotfuncs*, if not *None*, is a dictionary mapping identifier to\nan :class:`~matplotlib.axes.Axes` plotting function as a string.\nDefault is 'plot', other choices are 'semilogy', 'fill', 'bar',\netc.  You must use the same type of identifier in the *cols*\nvector as you use in the *plotfuncs* dictionary, eg., integer\ncolumn numbers in both or column names in both.\n\n*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to\n:func:`matplotlib.pylab.csv2rec` to load the data into a record array.\n\nkwargs are passed on to plotting functions.\n\nExample usage::\n\n  # plot the 2nd and 4th column against the 1st in two subplots\n  plotfile(fname, (0,1,3))\n\n  # plot using column names; specify an alternate plot type for volume\n  plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})", "id": "f17168:m59"}
{"signature": "def hot():", "body": "rc('<STR_LIT:image>', cmap='<STR_LIT>')<EOL>im = gci()<EOL>if im is not None:<EOL><INDENT>im.set_cmap(cm.hot)<EOL><DEDENT>draw_if_interactive()<EOL>", "docstring": "set the default colormap to hot and apply to current image if any.\nSee help(colormaps) for more information", "id": "f17168:m113"}
{"signature": "def ylim(*args, **kwargs):", "body": "ax = gca()<EOL>ret = ax.set_ylim(*args, **kwargs)<EOL>draw_if_interactive()<EOL>return ret<EOL>", "docstring": "Set/Get the ylimits of the current axes::\n\n  ymin, ymax = ylim()   # return the current ylim\n  ylim( (ymin, ymax) )  # set the ylim to ymin, ymax\n  ylim( ymin, ymax )    # set the ylim to ymin, ymax\n\nIf you do not specify args, you can pass the *ymin* and *ymax* as\nkwargs, eg.::\n\n  ylim(ymax=3) # adjust the max leaving min unchanged\n  ylim(ymin=1) # adjust the min leaving max unchanged\n\nThe new axis limits are returned as a length 2 tuple.", "id": "f17168:m43"}
{"signature": "def clim(vmin=None, vmax=None):", "body": "im = gci()<EOL>if im is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>im.set_clim(vmin, vmax)<EOL>draw_if_interactive()<EOL>", "docstring": "Set the color limits of the current image\n\nTo apply clim to all axes images do::\n\n  clim(0, 0.5)\n\nIf either *vmin* or *vmax* is None, the image min/max respectively\nwill be used for color scaling.\n\nIf you want to set the clim of multiple images,\nuse, for example::\n\n  for im in gca().get_images():\n      im.set_clim(0, 0.05)", "id": "f17168:m55"}
{"signature": "def summer():", "body": "rc('<STR_LIT:image>', cmap='<STR_LIT>')<EOL>im = gci()<EOL>if im is not None:<EOL><INDENT>im.set_cmap(cm.summer)<EOL><DEDENT>draw_if_interactive()<EOL>", "docstring": "set the default colormap to summer and apply to current image if any.\nSee help(colormaps) for more information", "id": "f17168:m119"}
{"signature": "def yticks(*args, **kwargs):", "body": "ax = gca()<EOL>if len(args)==<NUM_LIT:0>:<EOL><INDENT>locs = ax.get_yticks()<EOL>labels = ax.get_yticklabels()<EOL><DEDENT>elif len(args)==<NUM_LIT:1>:<EOL><INDENT>locs = ax.set_yticks(args[<NUM_LIT:0>])<EOL>labels = ax.get_yticklabels()<EOL><DEDENT>elif len(args)==<NUM_LIT:2>:<EOL><INDENT>locs = ax.set_yticks(args[<NUM_LIT:0>])<EOL>labels = ax.set_yticklabels(args[<NUM_LIT:1>], **kwargs)<EOL><DEDENT>else: raise TypeError('<STR_LIT>')<EOL>if len(kwargs):<EOL><INDENT>for l in labels:<EOL><INDENT>l.update(kwargs)<EOL><DEDENT><DEDENT>draw_if_interactive()<EOL>return ( locs,<EOL>silent_list('<STR_LIT>', labels)<EOL>)<EOL>", "docstring": "Set/Get the ylimits of the current ticklocs and labels::\n\n  # return locs, labels where locs is an array of tick locations and\n  # labels is an array of tick labels.\n  locs, labels = yticks()\n\n  # set the locations of the yticks\n  yticks( arange(6) )\n\n  # set the locations and labels of the yticks\n  yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )\n\nThe keyword args, if any, are :class:`~matplotlib.text.Text`\nproperties.", "id": "f17168:m47"}
{"signature": "def colors():", "body": "pass<EOL>", "docstring": "This is a do nothing function to provide you with help on how\nmatplotlib handles colors.\n\nCommands which take color arguments can use several formats to\nspecify the colors.  For the basic builtin colors, you can use a\nsingle letter\n\n  =====   =======\n  Alias   Color\n  =====   =======\n  'b'     blue\n  'g'     green\n  'r'     red\n  'c'     cyan\n  'm'     magenta\n  'y'     yellow\n  'k'     black\n  'w'     white\n  =====   =======\n\nFor a greater range of colors, you have two options.  You can\nspecify the color using an html hex string, as in::\n\n  color = '#eeefff'\n\nor you can pass an R,G,B tuple, where each of R,G,B are in the\nrange [0,1].\n\nYou can also use any legal html name for a color, for example::\n\n  color = 'red',\n  color = 'burlywood'\n  color = 'chartreuse'\n\nThe example below creates a subplot with a dark\nslate gray background\n\n   subplot(111, axisbg=(0.1843, 0.3098, 0.3098))\n\nHere is an example that creates a pale turqoise title::\n\n  title('Is this the best color?', color='#afeeee')", "id": "f17168:m52"}
{"signature": "def prism():", "body": "rc('<STR_LIT:image>', cmap='<STR_LIT>')<EOL>im = gci()<EOL>if im is not None:<EOL><INDENT>im.set_cmap(cm.prism)<EOL><DEDENT>draw_if_interactive()<EOL>", "docstring": "set the default colormap to prism and apply to current image if any.\nSee help(colormaps) for more information", "id": "f17168:m117"}
{"signature": "def isinteractive():", "body": "return matplotlib.is_interactive()<EOL>", "docstring": "Return the interactive status", "id": "f17168:m3"}
{"signature": "def copper():", "body": "rc('<STR_LIT:image>', cmap='<STR_LIT>')<EOL>im = gci()<EOL>if im is not None:<EOL><INDENT>im.set_cmap(cm.copper)<EOL><DEDENT>draw_if_interactive()<EOL>", "docstring": "set the default colormap to copper and apply to current image if any.\nSee help(colormaps) for more information", "id": "f17168:m110"}
{"signature": "def title(s, *args, **kwargs):", "body": "l =  gca().set_title(s, *args, **kwargs)<EOL>draw_if_interactive()<EOL>return l<EOL>", "docstring": "Set the title of the current axis to *s*.\n\nDefault font override is::\n\n  override = {'fontsize': 'medium',\n              'verticalalignment': 'bottom',\n              'horizontalalignment': 'center'}\n\n.. seealso::\n   :func:`~matplotlib.pyplot.text`:\n       for information on how override and the optional args work.", "id": "f17168:m38"}
{"signature": "def delaxes(*args):", "body": "if not len(args):<EOL><INDENT>ax = gca()<EOL><DEDENT>else:<EOL><INDENT>ax = args[<NUM_LIT:0>]<EOL><DEDENT>ret = gcf().delaxes(ax)<EOL>draw_if_interactive()<EOL>return ret<EOL>", "docstring": "``delaxes(ax)``: remove *ax* from the current figure.  If *ax*\ndoesn't exist, an error will be raised.\n\n``delaxes()``: delete the current axes", "id": "f17168:m30"}
{"signature": "def switch_backend(newbackend):", "body": "close('<STR_LIT:all>')<EOL>global new_figure_manager, draw_if_interactive, show<EOL>matplotlib.use(newbackend, warn=False)<EOL>reload(matplotlib.backends)<EOL>from matplotlib.backends import pylab_setup<EOL>new_figure_manager, draw_if_interactive, show = pylab_setup()<EOL>", "docstring": "Switch the default backend to newbackend.  This feature is\n**experimental**, and is only expected to work switching to an\nimage backend.  Eg, if you have a bunch of PostScript scripts that\nyou want to run from an interactive ipython session, you may want\nto switch to the PS backend before running them to avoid having a\nbunch of GUI windows popup.  If you try to interactively switch\nfrom one GUI backend to another, you will explode.\n\nCalling this command will close all open windows.", "id": "f17168:m2"}
{"signature": "def close(*args):", "body": "if len(args)==<NUM_LIT:0>:<EOL><INDENT>figManager = _pylab_helpers.Gcf.get_active()<EOL>if figManager is None: return<EOL>else:<EOL><INDENT>figManager.canvas.mpl_disconnect(figManager._cidgcf)<EOL>_pylab_helpers.Gcf.destroy(figManager.num)<EOL><DEDENT><DEDENT>elif len(args)==<NUM_LIT:1>:<EOL><INDENT>arg = args[<NUM_LIT:0>]<EOL>if arg=='<STR_LIT:all>':<EOL><INDENT>for manager in _pylab_helpers.Gcf.get_all_fig_managers():<EOL><INDENT>manager.canvas.mpl_disconnect(manager._cidgcf)<EOL>_pylab_helpers.Gcf.destroy(manager.num)<EOL><DEDENT><DEDENT>elif isinstance(arg, int):<EOL><INDENT>_pylab_helpers.Gcf.destroy(arg)<EOL><DEDENT>elif isinstance(arg, Figure):<EOL><INDENT>for manager in _pylab_helpers.Gcf.get_all_fig_managers():<EOL><INDENT>if manager.canvas.figure==arg:<EOL><INDENT>manager.canvas.mpl_disconnect(manager._cidgcf)<EOL>_pylab_helpers.Gcf.destroy(manager.num)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>'%type(arg))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Close a figure window\n\n``close()`` by itself closes the current figure\n\n``close(num)`` closes figure number *num*\n\n``close(h)`` where *h* is a :class:`Figure` instance, closes that figure\n\n``close('all')`` closes all the figure windows", "id": "f17168:m16"}
{"signature": "def thetagrids(*args, **kwargs):", "body": "ax = gca()<EOL>if not isinstance(ax, PolarAxes):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if len(args)==<NUM_LIT:0>:<EOL><INDENT>lines = ax.xaxis.get_ticklines()<EOL>labels = ax.xaxis.get_ticklabels()<EOL><DEDENT>else:<EOL><INDENT>lines, labels = ax.set_thetagrids(*args, **kwargs)<EOL><DEDENT>draw_if_interactive()<EOL>return (silent_list('<STR_LIT>', lines),<EOL>silent_list('<STR_LIT>', labels)<EOL>)<EOL>", "docstring": "Set/Get the theta locations of the gridlines and ticklabels.\n\nIf no arguments are passed, return a tuple (*lines*, *labels*)\nwhere *lines* is an array of radial gridlines\n(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an\narray of tick labels (:class:`~matplotlib.text.Text` instances)::\n\n  lines, labels = thetagrids()\n\nOtherwise the syntax is::\n\n  lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)\n\nset the angles at which to place the theta grids (these gridlines\nare equal along the theta dimension).\n\n*angles* is in degrees.\n\n*labels*, if not *None*, is a len(angles) list of strings of the\nlabels to use at each angle.\n\nIf *labels* is *None*, the labels will be ``fmt%angle``.\n\n*frac* is the fraction of the polar axes radius at which to place\nthe label (1 is the edge). Eg. 1.05 is outside the axes and 0.95\nis inside the axes.\n\nReturn value is a list of tuples (*lines*, *labels*):\n\n  - *lines* are :class:`~matplotlib.lines.Line2D` instances\n\n  - *labels* are :class:`~matplotlib.text.Text` instances.\n\nNote that on input, the *labels* argument is a list of strings,\nand on output it is a list of :class:`~matplotlib.text.Text`\ninstances.\n\nExamples::\n\n  # set the locations of the radial gridlines and labels\n  lines, labels = thetagrids( range(45,360,90) )\n\n  # set the locations and labels of the radial gridlines and labels\n  lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )", "id": "f17168:m49"}
{"signature": "def waitforbuttonpress(*args, **kwargs):", "body": "return gcf().waitforbuttonpress(*args, **kwargs)<EOL>", "docstring": "Blocking call to interact with the figure.\n\nThis will wait for *n* key or mouse clicks from the user and\nreturn a list containing True's for keyboard clicks and False's\nfor mouse clicks.\n\nIf *timeout* is negative, does not timeout.", "id": "f17168:m21"}
{"signature": "def twinx(ax=None):", "body": "if ax is None:<EOL><INDENT>ax=gca()<EOL><DEDENT>ax1 = ax.twinx()<EOL>draw_if_interactive()<EOL>return ax1<EOL>", "docstring": "Make a second axes overlay *ax* (or the current axes if *ax* is\n*None*) sharing the xaxis.  The ticks for *ax2* will be placed on\nthe right, and the *ax2* instance is returned.\n\n.. seealso::\n   :file:`examples/api_examples/two_scales.py`", "id": "f17168:m33"}
{"signature": "def gca(**kwargs):", "body": "ax =  gcf().gca(**kwargs)<EOL>return ax<EOL>", "docstring": "Return the current axis instance.  This can be used to control\naxis properties either using set or the\n:class:`~matplotlib.axes.Axes` methods, for example, setting the\nxaxis range::\n\n  plot(t,s)\n  set(gca(), 'xlim', [0,10])\n\nor::\n\n  plot(t,s)\n  a = gca()\n  a.set_xlim([0,10])", "id": "f17168:m31"}
{"signature": "def iter_segments(self, simplify=None):", "body": "vertices = self.vertices<EOL>if not len(vertices):<EOL><INDENT>return<EOL><DEDENT>codes        = self.codes<EOL>len_vertices = len(vertices)<EOL>isfinite     = np.isfinite<EOL>NUM_VERTICES = self.NUM_VERTICES<EOL>MOVETO       = self.MOVETO<EOL>LINETO       = self.LINETO<EOL>CLOSEPOLY    = self.CLOSEPOLY<EOL>STOP         = self.STOP<EOL>if simplify is not None and self.should_simplify:<EOL><INDENT>polygons = self.to_polygons(None, *simplify)<EOL>for vertices in polygons:<EOL><INDENT>yield vertices[<NUM_LIT:0>], MOVETO<EOL>for v in vertices[<NUM_LIT:1>:]:<EOL><INDENT>yield v, LINETO<EOL><DEDENT><DEDENT><DEDENT>elif codes is None:<EOL><INDENT>if self.has_nonfinite:<EOL><INDENT>next_code = MOVETO<EOL>for v in vertices:<EOL><INDENT>if np.isfinite(v).all():<EOL><INDENT>yield v, next_code<EOL>next_code = LINETO<EOL><DEDENT>else:<EOL><INDENT>next_code = MOVETO<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>yield vertices[<NUM_LIT:0>], MOVETO<EOL>for v in vertices[<NUM_LIT:1>:]:<EOL><INDENT>yield v, LINETO<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>i = <NUM_LIT:0><EOL>was_nan = False<EOL>while i < len_vertices:<EOL><INDENT>code = codes[i]<EOL>if code == CLOSEPOLY:<EOL><INDENT>yield [], code<EOL>i += <NUM_LIT:1><EOL><DEDENT>elif code == STOP:<EOL><INDENT>return<EOL><DEDENT>else:<EOL><INDENT>num_vertices = NUM_VERTICES[int(code)]<EOL>curr_vertices = vertices[i:i+num_vertices].flatten()<EOL>if not isfinite(curr_vertices).all():<EOL><INDENT>was_nan = True<EOL><DEDENT>elif was_nan:<EOL><INDENT>yield curr_vertices[-<NUM_LIT:2>:], MOVETO<EOL>was_nan = False<EOL><DEDENT>else:<EOL><INDENT>yield curr_vertices, code<EOL><DEDENT>i += num_vertices<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Iterates over all of the curve segments in the path.  Each\niteration returns a 2-tuple (*vertices*, *code*), where\n*vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is\none of the :class:`Path` codes.\n\nIf *simplify* is provided, it must be a tuple (*width*,\n*height*) defining the size of the figure, in native units\n(e.g. pixels or points).  Simplification implies both removing\nadjacent line segments that are very close to parallel, and\nremoving line segments outside of the figure.  The path will\nbe simplified *only* if :attr:`should_simplify` is True, which\nis determined in the constructor by this criteria:\n\n   - No curves\n   - More than 128 vertices", "id": "f17169:c0:m4"}
{"signature": "def __init__(self, vertices, codes=None):", "body": "if ma.isMaskedArray(vertices):<EOL><INDENT>vertices = vertices.astype(np.float_).filled(np.nan)<EOL><DEDENT>else:<EOL><INDENT>vertices = np.asarray(vertices, np.float_)<EOL><DEDENT>if codes is not None:<EOL><INDENT>codes = np.asarray(codes, self.code_type)<EOL>assert codes.ndim == <NUM_LIT:1><EOL>assert len(codes) == len(vertices)<EOL><DEDENT>assert vertices.ndim == <NUM_LIT:2><EOL>assert vertices.shape[<NUM_LIT:1>] == <NUM_LIT:2><EOL>self.should_simplify = (len(vertices) >= <NUM_LIT> and<EOL>(codes is None or np.all(codes <= Path.LINETO)))<EOL>self.has_nonfinite = not np.isfinite(vertices).all()<EOL>self.codes = codes<EOL>self.vertices = vertices<EOL>", "docstring": "Create a new path with the given vertices and codes.\n\n*vertices* is an Nx2 numpy float array, masked array or Python\nsequence.\n\n*codes* is an N-length numpy array or Python sequence of type\n:attr:`matplotlib.path.Path.code_type`.\n\nThese two arrays must have the same length in the first\ndimension.\n\nIf *codes* is None, *vertices* will be treated as a series of\nline segments.\n\nIf *vertices* contains masked values, they will be converted\nto NaNs which are then handled correctly by the Agg\nPathIterator and other consumers of path data, such as\n:meth:`iter_segments`.", "id": "f17169:c0:m0"}
{"signature": "def get_path_collection_extents(*args):", "body": "from transforms import Bbox<EOL>if len(args[<NUM_LIT:1>]) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return Bbox.from_extents(*_get_path_collection_extents(*args))<EOL>", "docstring": "Given a sequence of :class:`Path` objects, returns the bounding\nbox that encapsulates all of them.", "id": "f17169:m0"}
{"signature": "def unit_circle(cls):", "body": "if cls._unit_circle is None:<EOL><INDENT>MAGIC = <NUM_LIT><EOL>SQRTHALF = np.sqrt(<NUM_LIT:0.5>)<EOL>MAGIC45 = np.sqrt((MAGIC*MAGIC) / <NUM_LIT>)<EOL>vertices = np.array(<EOL>[[<NUM_LIT:0.0>, -<NUM_LIT:1.0>],<EOL>[MAGIC, -<NUM_LIT:1.0>],<EOL>[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],<EOL>[SQRTHALF, -SQRTHALF],<EOL>[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],<EOL>[<NUM_LIT:1.0>, -MAGIC],<EOL>[<NUM_LIT:1.0>, <NUM_LIT:0.0>],<EOL>[<NUM_LIT:1.0>, MAGIC],<EOL>[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],<EOL>[SQRTHALF, SQRTHALF],<EOL>[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],<EOL>[MAGIC, <NUM_LIT:1.0>],<EOL>[<NUM_LIT:0.0>, <NUM_LIT:1.0>],<EOL>[-MAGIC, <NUM_LIT:1.0>],<EOL>[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],<EOL>[-SQRTHALF, SQRTHALF],<EOL>[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],<EOL>[-<NUM_LIT:1.0>, MAGIC],<EOL>[-<NUM_LIT:1.0>, <NUM_LIT:0.0>],<EOL>[-<NUM_LIT:1.0>, -MAGIC],<EOL>[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],<EOL>[-SQRTHALF, -SQRTHALF],<EOL>[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],<EOL>[-MAGIC, -<NUM_LIT:1.0>],<EOL>[<NUM_LIT:0.0>, -<NUM_LIT:1.0>],<EOL>[<NUM_LIT:0.0>, -<NUM_LIT:1.0>]],<EOL>np.float_)<EOL>codes = cls.CURVE4 * np.ones(<NUM_LIT>)<EOL>codes[<NUM_LIT:0>] = cls.MOVETO<EOL>codes[-<NUM_LIT:1>] = cls.CLOSEPOLY<EOL>cls._unit_circle = Path(vertices, codes)<EOL><DEDENT>return cls._unit_circle<EOL>", "docstring": "(staticmethod) Returns a :class:`Path` of the unit circle.\nThe circle is approximated using cubic Bezier curves.  This\nuses 8 splines around the circle using the approach presented\nhere:\n\n  Lancaster, Don.  `Approximating a Circle or an Ellipse Using Four\n  Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_.", "id": "f17169:c0:m17"}
{"signature": "def get_extents(self, transform=None):", "body": "from transforms import Bbox<EOL>if transform is not None:<EOL><INDENT>transform = transform.frozen()<EOL><DEDENT>return Bbox(get_path_extents(self, transform))<EOL>", "docstring": "Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the\npath.\n\nUnlike computing the extents on the *vertices* alone, this\nalgorithm will take into account the curves and deal with\ncontrol points appropriately.", "id": "f17169:c0:m8"}
{"signature": "def _edges(self, X, Y):", "body": "N = X.shape[<NUM_LIT:0>]<EOL>if self.orientation == '<STR_LIT>':<EOL><INDENT>return [list(zip(X[i], Y[i])) for i in range(<NUM_LIT:1>, N-<NUM_LIT:1>)]<EOL><DEDENT>else:<EOL><INDENT>return [list(zip(Y[i], X[i])) for i in range(<NUM_LIT:1>, N-<NUM_LIT:1>)]<EOL><DEDENT>", "docstring": "Return the separator line segments; helper for _add_solids.", "id": "f17170:c0:m6"}
{"signature": "def _extended_N(self):", "body": "N = self.cmap.N + <NUM_LIT:1><EOL>if self.extend == '<STR_LIT>':<EOL><INDENT>N += <NUM_LIT:2><EOL><DEDENT>elif self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>N += <NUM_LIT:1><EOL><DEDENT>return N<EOL>", "docstring": "Based on the colormap and extend variable, return the\nnumber of boundaries.", "id": "f17170:c0:m13"}
{"signature": "def _config_axes(self, X, Y):", "body": "ax = self.ax<EOL>ax.set_frame_on(False)<EOL>ax.set_navigate(False)<EOL>xy = self._outline(X, Y)<EOL>ax.update_datalim(xy)<EOL>ax.set_xlim(*ax.dataLim.intervalx)<EOL>ax.set_ylim(*ax.dataLim.intervaly)<EOL>self.outline = lines.Line2D(xy[:, <NUM_LIT:0>], xy[:, <NUM_LIT:1>], color=mpl.rcParams['<STR_LIT>'],<EOL>linewidth=mpl.rcParams['<STR_LIT>'])<EOL>ax.add_artist(self.outline)<EOL>self.outline.set_clip_box(None)<EOL>self.outline.set_clip_path(None)<EOL>c = mpl.rcParams['<STR_LIT>']<EOL>self.patch = patches.Polygon(xy, edgecolor=c,<EOL>facecolor=c,<EOL>linewidth=<NUM_LIT>,<EOL>zorder=-<NUM_LIT:1>)<EOL>ax.add_artist(self.patch)<EOL>ticks, ticklabels, offset_string = self._ticker()<EOL>if self.orientation == '<STR_LIT>':<EOL><INDENT>ax.set_xticks([])<EOL>ax.yaxis.set_label_position('<STR_LIT:right>')<EOL>ax.yaxis.set_ticks_position('<STR_LIT:right>')<EOL>ax.set_yticks(ticks)<EOL>ax.set_yticklabels(ticklabels)<EOL>ax.yaxis.get_major_formatter().set_offset_string(offset_string)<EOL><DEDENT>else:<EOL><INDENT>ax.set_yticks([])<EOL>ax.xaxis.set_label_position('<STR_LIT>')<EOL>ax.set_xticks(ticks)<EOL>ax.set_xticklabels(ticklabels)<EOL>ax.xaxis.get_major_formatter().set_offset_string(offset_string)<EOL><DEDENT>", "docstring": "Make an axes patch and outline.", "id": "f17170:c0:m2"}
{"signature": "def _locate(self, x):", "body": "if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):<EOL><INDENT>b = self._boundaries<EOL>xn = x<EOL>xout = x<EOL><DEDENT>else:<EOL><INDENT>b = self.norm(self._boundaries, clip=False).filled()<EOL>xn = self.norm(x, clip=False).filled()<EOL>in_cond = (xn > -<NUM_LIT>) & (xn < <NUM_LIT>)<EOL>xn = np.compress(in_cond, xn)<EOL>xout = np.compress(in_cond, x)<EOL><DEDENT>y = self._y<EOL>N = len(b)<EOL>ii = np.minimum(np.searchsorted(b, xn), N-<NUM_LIT:1>)<EOL>i0 = np.maximum(ii - <NUM_LIT:1>, <NUM_LIT:0>)<EOL>db = np.take(b, ii) - np.take(b, i0)<EOL>db = np.where(i0==ii, <NUM_LIT:1.0>, db)<EOL>dy = np.take(y, ii) - np.take(y, i0)<EOL>z = np.take(y, i0) + (xn-np.take(b,i0))*dy/db<EOL>return xout, z<EOL>", "docstring": "Given a possible set of color data values, return the ones\nwithin range, together with their corresponding colorbar\ndata coordinates.", "id": "f17170:c0:m17"}
{"signature": "def _mesh(self):", "body": "x = np.array([<NUM_LIT:0.0>, <NUM_LIT:1.0>])<EOL>if self.spacing == '<STR_LIT>':<EOL><INDENT>y = self._uniform_y(self._central_N())<EOL><DEDENT>else:<EOL><INDENT>y = self._proportional_y()<EOL><DEDENT>self._y = y<EOL>X, Y = np.meshgrid(x,y)<EOL>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>X[<NUM_LIT:0>,:] = <NUM_LIT:0.5><EOL><DEDENT>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>X[-<NUM_LIT:1>,:] = <NUM_LIT:0.5><EOL><DEDENT>return X, Y<EOL>", "docstring": "Return X,Y, the coordinate arrays for the colorbar pcolormesh.\nThese are suitable for a vertical colorbar; swapping and\ntransposition for a horizontal colorbar are done outside\nthis function.", "id": "f17170:c0:m16"}
{"signature": "def _ticker(self):", "body": "locator = self.locator<EOL>formatter = self.formatter<EOL>if locator is None:<EOL><INDENT>if self.boundaries is None:<EOL><INDENT>if isinstance(self.norm, colors.NoNorm):<EOL><INDENT>nv = len(self._values)<EOL>base = <NUM_LIT:1> + int(nv/<NUM_LIT:10>)<EOL>locator = ticker.IndexLocator(base=base, offset=<NUM_LIT:0>)<EOL><DEDENT>elif isinstance(self.norm, colors.BoundaryNorm):<EOL><INDENT>b = self.norm.boundaries<EOL>locator = ticker.FixedLocator(b, nbins=<NUM_LIT:10>)<EOL><DEDENT>elif isinstance(self.norm, colors.LogNorm):<EOL><INDENT>locator = ticker.LogLocator()<EOL><DEDENT>else:<EOL><INDENT>locator = ticker.MaxNLocator()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>b = self._boundaries[self._inside]<EOL>locator = ticker.FixedLocator(b, nbins=<NUM_LIT:10>)<EOL><DEDENT><DEDENT>if isinstance(self.norm, colors.NoNorm):<EOL><INDENT>intv = self._values[<NUM_LIT:0>], self._values[-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>intv = self.vmin, self.vmax<EOL><DEDENT>locator.create_dummy_axis()<EOL>formatter.create_dummy_axis()<EOL>locator.set_view_interval(*intv)<EOL>locator.set_data_interval(*intv)<EOL>formatter.set_view_interval(*intv)<EOL>formatter.set_data_interval(*intv)<EOL>b = np.array(locator())<EOL>b, ticks = self._locate(b)<EOL>formatter.set_locs(b)<EOL>ticklabels = [formatter(t, i) for i, t in enumerate(b)]<EOL>offset_string = formatter.get_offset()<EOL>return ticks, ticklabels, offset_string<EOL>", "docstring": "Return two sequences: ticks (colorbar data locations)\nand ticklabels (strings).", "id": "f17170:c0:m9"}
{"signature": "def _find_range(self):", "body": "b = self._boundaries[self._inside]<EOL>self.vmin = b[<NUM_LIT:0>]<EOL>self.vmax = b[-<NUM_LIT:1>]<EOL>", "docstring": "Set :attr:`vmin` and :attr:`vmax` attributes to the first and\nlast boundary excluding extended end boundaries.", "id": "f17170:c0:m11"}
{"signature": "def _process_values(self, b=None):", "body": "if b is None:<EOL><INDENT>b = self.boundaries<EOL><DEDENT>if b is not None:<EOL><INDENT>self._boundaries = np.asarray(b, dtype=float)<EOL>if self.values is None:<EOL><INDENT>self._values = <NUM_LIT:0.5>*(self._boundaries[:-<NUM_LIT:1>]<EOL>+ self._boundaries[<NUM_LIT:1>:])<EOL>if isinstance(self.norm, colors.NoNorm):<EOL><INDENT>self._values = (self._values + <NUM_LIT>).astype(np.int16)<EOL><DEDENT>return<EOL><DEDENT>self._values = np.array(self.values)<EOL>return<EOL><DEDENT>if self.values is not None:<EOL><INDENT>self._values = np.array(self.values)<EOL>if self.boundaries is None:<EOL><INDENT>b = np.zeros(len(self.values)+<NUM_LIT:1>, '<STR_LIT:d>')<EOL>b[<NUM_LIT:1>:-<NUM_LIT:1>] = <NUM_LIT:0.5>*(self._values[:-<NUM_LIT:1>] - self._values[<NUM_LIT:1>:])<EOL>b[<NUM_LIT:0>] = <NUM_LIT>*b[<NUM_LIT:1>] - b[<NUM_LIT:2>]<EOL>b[-<NUM_LIT:1>] = <NUM_LIT>*b[-<NUM_LIT:2>] - b[-<NUM_LIT:3>]<EOL>self._boundaries = b<EOL>return<EOL><DEDENT>self._boundaries = np.array(self.boundaries)<EOL>return<EOL><DEDENT>if isinstance(self.norm, colors.NoNorm):<EOL><INDENT>b = self._uniform_y(self.cmap.N+<NUM_LIT:1>) * self.cmap.N - <NUM_LIT:0.5><EOL>v = np.zeros((len(b)-<NUM_LIT:1>,), dtype=np.int16)<EOL>v[self._inside] = np.arange(self.cmap.N, dtype=np.int16)<EOL>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>v[<NUM_LIT:0>] = -<NUM_LIT:1><EOL><DEDENT>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>v[-<NUM_LIT:1>] = self.cmap.N<EOL><DEDENT>self._boundaries = b<EOL>self._values = v<EOL>return<EOL><DEDENT>elif isinstance(self.norm, colors.BoundaryNorm):<EOL><INDENT>b = list(self.norm.boundaries)<EOL>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>b = [b[<NUM_LIT:0>]-<NUM_LIT:1>] + b<EOL><DEDENT>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>b = b + [b[-<NUM_LIT:1>] + <NUM_LIT:1>]<EOL><DEDENT>b = np.array(b)<EOL>v = np.zeros((len(b)-<NUM_LIT:1>,), dtype=float)<EOL>bi = self.norm.boundaries<EOL>v[self._inside] = <NUM_LIT:0.5>*(bi[:-<NUM_LIT:1>] + bi[<NUM_LIT:1>:])<EOL>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>v[<NUM_LIT:0>] = b[<NUM_LIT:0>] - <NUM_LIT:1><EOL><DEDENT>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>v[-<NUM_LIT:1>] = b[-<NUM_LIT:1>] + <NUM_LIT:1><EOL><DEDENT>self._boundaries = b<EOL>self._values = v<EOL>return<EOL><DEDENT>else:<EOL><INDENT>if not self.norm.scaled():<EOL><INDENT>self.norm.vmin = <NUM_LIT:0><EOL>self.norm.vmax = <NUM_LIT:1><EOL><DEDENT>b = self.norm.inverse(self._uniform_y(self.cmap.N+<NUM_LIT:1>))<EOL>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>b[<NUM_LIT:0>] = b[<NUM_LIT:0>] - <NUM_LIT:1><EOL><DEDENT>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>b[-<NUM_LIT:1>] = b[-<NUM_LIT:1>] + <NUM_LIT:1><EOL><DEDENT><DEDENT>self._process_values(b)<EOL>", "docstring": "Set the :attr:`_boundaries` and :attr:`_values` attributes\nbased on the input boundaries and values.  Input boundaries\ncan be *self.boundaries* or the argument *b*.", "id": "f17170:c0:m10"}
{"signature": "def add_lines(self, levels, colors, linewidths):", "body": "N = len(levels)<EOL>dummy, y = self._locate(levels)<EOL>if len(y) != N:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>x = np.array([<NUM_LIT:0.0>, <NUM_LIT:1.0>])<EOL>X, Y = np.meshgrid(x,y)<EOL>if self.orientation == '<STR_LIT>':<EOL><INDENT>xy = [list(zip(X[i], Y[i])) for i in range(N)]<EOL><DEDENT>else:<EOL><INDENT>xy = [list(zip(Y[i], X[i])) for i in range(N)]<EOL><DEDENT>col = collections.LineCollection(xy, linewidths=linewidths)<EOL>self.lines = col<EOL>col.set_color(colors)<EOL>self.ax.add_collection(col)<EOL>", "docstring": "Draw lines on the colorbar.", "id": "f17170:c0:m8"}
{"signature": "def _add_solids(self, X, Y, C):", "body": "<EOL>if self.orientation == '<STR_LIT>':<EOL><INDENT>args = (X, Y, C)<EOL><DEDENT>else:<EOL><INDENT>args = (np.transpose(Y), np.transpose(X), np.transpose(C))<EOL><DEDENT>kw = {'<STR_LIT>':self.cmap, '<STR_LIT>':self.norm,<EOL>'<STR_LIT>':'<STR_LIT>', '<STR_LIT>':self.alpha}<EOL>_hold = self.ax.ishold()<EOL>self.ax.hold(True)<EOL>col = self.ax.pcolor(*args, **kw)<EOL>self.ax.hold(_hold)<EOL>self.solids = col<EOL>if self.drawedges:<EOL><INDENT>self.dividers = collections.LineCollection(self._edges(X,Y),<EOL>colors=(mpl.rcParams['<STR_LIT>'],),<EOL>linewidths=(<NUM_LIT:0.5>*mpl.rcParams['<STR_LIT>'],)<EOL>)<EOL>self.ax.add_collection(self.dividers)<EOL><DEDENT>", "docstring": "Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`;\noptionally add separators.", "id": "f17170:c0:m7"}
{"signature": "def add_lines(self, CS):", "body": "if not isinstance(CS, contour.ContourSet) or CS.filled:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>tcolors = [c[<NUM_LIT:0>] for c in CS.tcolors]<EOL>tlinewidths = [t[<NUM_LIT:0>] for t in CS.tlinewidths]<EOL>ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)<EOL>", "docstring": "Add the lines from a non-filled\n:class:`~matplotlib.contour.ContourSet` to the colorbar.", "id": "f17170:c1:m1"}
{"signature": "def _auto_set_column_width(self, col, renderer):", "body": "cells = [key for key in self._cells if key[<NUM_LIT:1>] == col]<EOL>width = <NUM_LIT:0><EOL>for cell in cells:<EOL><INDENT>c = self._cells[cell]<EOL>width = max(c.get_required_width(renderer), width)<EOL><DEDENT>for cell in cells:<EOL><INDENT>self._cells[cell].set_width(width)<EOL><DEDENT>", "docstring": "Automagically set width for column.", "id": "f17171:c1:m10"}
{"signature": "def _offset(self, ox, oy):", "body": "for c in self._cells.itervalues():<EOL><INDENT>x, y = c.get_x(), c.get_y()<EOL>c.set_x(x+ox)<EOL>c.set_y(y+oy)<EOL><DEDENT>", "docstring": "Move all the artists by ox,oy (axes coords)", "id": "f17171:c1:m15"}
{"signature": "def _do_cell_alignment(self):", "body": "<EOL>widths = {}<EOL>heights = {}<EOL>for (row, col), cell in self._cells.iteritems():<EOL><INDENT>height = heights.setdefault(row, <NUM_LIT:0.0>)<EOL>heights[row] = max(height, cell.get_height())<EOL>width = widths.setdefault(col, <NUM_LIT:0.0>)<EOL>widths[col] = max(width, cell.get_width())<EOL><DEDENT>xpos = <NUM_LIT:0><EOL>lefts = {}<EOL>cols = widths.keys()<EOL>cols.sort()<EOL>for col in cols:<EOL><INDENT>lefts[col] = xpos<EOL>xpos += widths[col]<EOL><DEDENT>ypos = <NUM_LIT:0><EOL>bottoms = {}<EOL>rows = heights.keys()<EOL>rows.sort()<EOL>rows.reverse()<EOL>for row in rows:<EOL><INDENT>bottoms[row] = ypos<EOL>ypos += heights[row]<EOL><DEDENT>for (row, col), cell in self._cells.iteritems():<EOL><INDENT>cell.set_x(lefts[col])<EOL>cell.set_y(bottoms[row])<EOL><DEDENT>", "docstring": "Calculate row heights and column widths.\n\n        Position cells accordingly.", "id": "f17171:c1:m8"}
{"signature": "def table(ax,<EOL>cellText=None, cellColours=None,<EOL>cellLoc='<STR_LIT:right>', colWidths=None,<EOL>rowLabels=None, rowColours=None, rowLoc='<STR_LIT:left>',<EOL>colLabels=None, colColours=None, colLoc='<STR_LIT>',<EOL>loc='<STR_LIT>', bbox=None):", "body": "<EOL>if cellText is None:<EOL><INDENT>rows = len(cellColours)<EOL>cols = len(cellColours[<NUM_LIT:0>])<EOL>cellText = [['<STR_LIT>'] * rows] * cols<EOL><DEDENT>rows = len(cellText)<EOL>cols = len(cellText[<NUM_LIT:0>])<EOL>for row in cellText:<EOL><INDENT>assert len(row) == cols<EOL><DEDENT>if cellColours is not None:<EOL><INDENT>assert len(cellColours) == rows<EOL>for row in cellColours:<EOL><INDENT>assert len(row) == cols<EOL><DEDENT><DEDENT>else:<EOL><INDENT>cellColours = ['<STR_LIT:w>' * cols] * rows<EOL><DEDENT>if colWidths is None:<EOL><INDENT>colWidths = [<NUM_LIT:1.0>/cols] * cols<EOL><DEDENT>rowLabelWidth = <NUM_LIT:0><EOL>if rowLabels is None:<EOL><INDENT>if rowColours is not None:<EOL><INDENT>rowLabels = ['<STR_LIT>'] * cols<EOL>rowLabelWidth = colWidths[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>elif rowColours is None:<EOL><INDENT>rowColours = '<STR_LIT:w>' * rows<EOL><DEDENT>if rowLabels is not None:<EOL><INDENT>assert len(rowLabels) == rows<EOL><DEDENT>offset = <NUM_LIT:0><EOL>if colLabels is None:<EOL><INDENT>if colColours is not None:<EOL><INDENT>colLabels = ['<STR_LIT>'] * rows<EOL>offset = <NUM_LIT:1><EOL><DEDENT><DEDENT>elif colColours is None:<EOL><INDENT>colColours = '<STR_LIT:w>' * cols<EOL>offset = <NUM_LIT:1><EOL><DEDENT>if rowLabels is not None:<EOL><INDENT>assert len(rowLabels) == rows<EOL><DEDENT>if cellColours is None:<EOL><INDENT>cellColours = ['<STR_LIT:w>' * cols] * rows<EOL><DEDENT>table = Table(ax, loc, bbox)<EOL>height = table._approx_text_height()<EOL>for row in xrange(rows):<EOL><INDENT>for col in xrange(cols):<EOL><INDENT>table.add_cell(row+offset, col,<EOL>width=colWidths[col], height=height,<EOL>text=cellText[row][col],<EOL>facecolor=cellColours[row][col],<EOL>loc=cellLoc)<EOL><DEDENT><DEDENT>if colLabels is not None:<EOL><INDENT>for col in xrange(cols):<EOL><INDENT>table.add_cell(<NUM_LIT:0>, col,<EOL>width=colWidths[col], height=height,<EOL>text=colLabels[col], facecolor=colColours[col],<EOL>loc=colLoc)<EOL><DEDENT><DEDENT>if rowLabels is not None:<EOL><INDENT>for row in xrange(rows):<EOL><INDENT>table.add_cell(row+offset, -<NUM_LIT:1>,<EOL>width=rowLabelWidth or <NUM_LIT>, height=height,<EOL>text=rowLabels[row], facecolor=rowColours[row],<EOL>loc=rowLoc)<EOL><DEDENT>if rowLabelWidth == <NUM_LIT:0>:<EOL><INDENT>table.auto_set_column_width(-<NUM_LIT:1>)<EOL><DEDENT><DEDENT>ax.add_table(table)<EOL>return table<EOL>", "docstring": "TABLE(cellText=None, cellColours=None,\n      cellLoc='right', colWidths=None,\n      rowLabels=None, rowColours=None, rowLoc='left',\n      colLabels=None, colColours=None, colLoc='center',\n      loc='bottom', bbox=None)\n\nFactory function to generate a Table instance.\n\nThanks to John Gill for providing the class and table.", "id": "f17171:m0"}
{"signature": "def _get_grid_bbox(self, renderer):", "body": "boxes = [self._cells[pos].get_window_extent(renderer)<EOL>for pos in self._cells.keys()<EOL>if pos[<NUM_LIT:0>] >= <NUM_LIT:0> and pos[<NUM_LIT:1>] >= <NUM_LIT:0>]<EOL>bbox = Bbox.union(boxes)<EOL>return bbox.inverse_transformed(self.get_transform())<EOL>", "docstring": "Get a bbox, in axes co-ordinates for the cells.\n\n        Only include those in the range (0,0) to (maxRow, maxCol)", "id": "f17171:c1:m4"}
{"signature": "def get_children(self):", "body": "return self._cells.values()<EOL>", "docstring": "Return the Artists contained by the table", "id": "f17171:c1:m6"}
{"signature": "def get_texts(self):", "body": "return silent_list('<STR_LIT>', self.texts)<EOL>", "docstring": "return a list of text.Text instance in the legend", "id": "f17172:c0:m15"}
{"signature": "def draw(self, renderer):", "body": "if not self.get_visible(): return<EOL>self._update_legend_box(renderer)<EOL>renderer.open_group('<STR_LIT>')<EOL>if self._loc == <NUM_LIT:0>:<EOL><INDENT>_findoffset = self._findoffset_best<EOL><DEDENT>else:<EOL><INDENT>_findoffset = self._findoffset_loc<EOL><DEDENT>def findoffset(width, height, xdescent, ydescent):<EOL><INDENT>return _findoffset(width, height, xdescent, ydescent, renderer)<EOL><DEDENT>self._legend_box.set_offset(findoffset)<EOL>fontsize = renderer.points_to_pixels(self.fontsize)<EOL>if self._mode in [\"<STR_LIT>\"]:<EOL><INDENT>pad = <NUM_LIT:2>*(self.borderaxespad+self.borderpad)*fontsize<EOL>self._legend_box.set_width(self.parent.bbox.width-pad)<EOL><DEDENT>if self._drawFrame:<EOL><INDENT>bbox = self._legend_box.get_window_extent(renderer)<EOL>self.legendPatch.set_bounds(bbox.x0, bbox.y0,<EOL>bbox.width, bbox.height)<EOL>self.legendPatch.set_mutation_scale(fontsize)<EOL>if self.shadow:<EOL><INDENT>shadow = Shadow(self.legendPatch, <NUM_LIT:2>, -<NUM_LIT:2>)<EOL>shadow.draw(renderer)<EOL><DEDENT>self.legendPatch.draw(renderer)<EOL><DEDENT>self._legend_box.draw(renderer)<EOL>renderer.close_group('<STR_LIT>')<EOL>", "docstring": "Draw everything that belongs to the legend", "id": "f17172:c0:m5"}
{"signature": "def draw_frame(self, b):", "body": "self._drawFrame = b<EOL>", "docstring": "b is a boolean.  Set draw frame to b", "id": "f17172:c0:m10"}
{"signature": "def get_children(self):", "body": "children = []<EOL>if self._legend_box:<EOL><INDENT>children.append(self._legend_box)<EOL><DEDENT>return children<EOL>", "docstring": "return a list of child artists", "id": "f17172:c0:m11"}
{"signature": "def get_window_extent(self):", "body": "return self.legendPatch.get_window_extent()<EOL>", "docstring": "return a extent of the the legend", "id": "f17172:c0:m16"}
{"signature": "def _set_artist_props(self, a):", "body": "a.set_figure(self.figure)<EOL>for c in self.get_children():<EOL><INDENT>c.set_figure(self.figure)<EOL><DEDENT>a.set_transform(self.get_transform())<EOL>", "docstring": "set the boilerplate props for artists added to axes", "id": "f17172:c0:m2"}
{"signature": "def _findoffset_best(self, width, height, xdescent, ydescent, renderer):", "body": "ox, oy = self._find_best_position(width, height, renderer)<EOL>return ox+xdescent, oy+ydescent<EOL>", "docstring": "Heper function to locate the legend at its best position", "id": "f17172:c0:m3"}
{"signature": "def _approx_text_height(self, renderer=None):", "body": "if renderer is None:<EOL><INDENT>return self.fontsize<EOL><DEDENT>else:<EOL><INDENT>return renderer.points_to_pixels(self.fontsize)<EOL><DEDENT>", "docstring": "Return the approximate height of the text. This is used to place\nthe legend handle.", "id": "f17172:c0:m6"}
{"signature": "def get_frame(self):", "body": "return self.legendPatch<EOL>", "docstring": "return the Rectangle instance used to frame the legend", "id": "f17172:c0:m12"}
{"signature": "def set_extent(self, extent):", "body": "self._extent = extent<EOL>xmin, xmax, ymin, ymax = extent<EOL>corners = (xmin, ymin), (xmax, ymax)<EOL>self.axes.update_datalim(corners)<EOL>if self.axes._autoscaleon:<EOL><INDENT>self.axes.set_xlim((xmin, xmax))<EOL>self.axes.set_ylim((ymin, ymax))<EOL><DEDENT>", "docstring": "extent is data axes (left, right, bottom, top) for making image plots", "id": "f17173:c0:m11"}
{"signature": "def write_png(self, fname):", "body": "im = self.make_image()<EOL>rows, cols, buffer = im.as_rgba_str()<EOL>_png.write_png(buffer, cols, rows, fname)<EOL>", "docstring": "Write the image to png file with fname", "id": "f17173:c3:m6"}
{"signature": "def get_filternorm(self):", "body": "return self._filternorm<EOL>", "docstring": "return the filternorm setting", "id": "f17173:c0:m18"}
{"signature": "def set_array(self, A):", "body": "<EOL>self.set_data(A)<EOL>", "docstring": "retained for backwards compatibility - use set_data instead\n\nACCEPTS: numpy array A or PIL Image", "id": "f17173:c0:m10"}
{"signature": "def set_filterrad(self, filterrad):", "body": "r = float(filterrad)<EOL>assert(r><NUM_LIT:0>)<EOL>self._filterrad = r<EOL>", "docstring": "Set the resize filter radius only applicable to some\n        interpolation schemes -- see help for imshow\n\n        ACCEPTS: positive float", "id": "f17173:c0:m19"}
{"signature": "def changed(self):", "body": "self._imcache = None<EOL>self._rgbacache = None<EOL>cm.ScalarMappable.changed(self)<EOL>", "docstring": "Call this whenever the mappable is changed so observers can\nupdate state", "id": "f17173:c0:m4"}
{"signature": "def set_data(self, A, shape=None):", "body": "<EOL>if hasattr(A,'<STR_LIT>'):<EOL><INDENT>self._A = pil_to_array(A)<EOL><DEDENT>elif ma.isMA(A):<EOL><INDENT>self._A = A<EOL><DEDENT>else:<EOL><INDENT>self._A = np.asarray(A) <EOL><DEDENT>if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype, np.float):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>if (self._A.ndim not in (<NUM_LIT:2>, <NUM_LIT:3>) or<EOL>(self._A.ndim == <NUM_LIT:3> and self._A.shape[-<NUM_LIT:1>] not in (<NUM_LIT:3>, <NUM_LIT:4>))):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>self._imcache =None<EOL>self._rgbacache = None<EOL>self._oldxslice = None<EOL>self._oldyslice = None<EOL>", "docstring": "Set the image array\n\nACCEPTS: numpy/PIL Image A", "id": "f17173:c0:m9"}
{"signature": "def __init__(self, ax,<EOL>cmap = None,<EOL>norm = None,<EOL>interpolation=None,<EOL>origin=None,<EOL>extent=None,<EOL>filternorm=<NUM_LIT:1>,<EOL>filterrad=<NUM_LIT>,<EOL>resample = False,<EOL>**kwargs<EOL>):", "body": "martist.Artist.__init__(self)<EOL>cm.ScalarMappable.__init__(self, norm, cmap)<EOL>if origin is None: origin = rcParams['<STR_LIT>']<EOL>self.origin = origin<EOL>self._extent = extent<EOL>self.set_filternorm(filternorm)<EOL>self.set_filterrad(filterrad)<EOL>self._filterrad = filterrad<EOL>self.set_interpolation(interpolation)<EOL>self.set_resample(resample)<EOL>self.axes = ax<EOL>self._imcache = None<EOL>self.update(kwargs)<EOL>", "docstring": "interpolation and cmap default to their rc settings\n\ncmap is a colors.Colormap instance\nnorm is a colors.Normalize instance to map luminance to 0-1\n\nextent is data axes (left, right, bottom, top) for making image plots\nregistered with data plots.  Default is to label the pixel\ncenters with the zero-based row and column indices.\n\nAdditional kwargs are matplotlib.artist properties", "id": "f17173:c0:m1"}
{"signature": "def generate_fontconfig_pattern(d):", "body": "props = []<EOL>families = '<STR_LIT>'<EOL>size = '<STR_LIT>'<EOL>for key in '<STR_LIT>'.split():<EOL><INDENT>val = getattr(d, '<STR_LIT>' + key)()<EOL>if val is not None and val != []:<EOL><INDENT>if type(val) == list:<EOL><INDENT>val = [value_escape(r'<STR_LIT>', str(x)) for x in val if x is not None]<EOL>if val != []:<EOL><INDENT>val = '<STR_LIT:U+002C>'.join(val)<EOL><DEDENT><DEDENT>props.append(\"<STR_LIT>\" % (key, val))<EOL><DEDENT><DEDENT>return '<STR_LIT>'.join(props)<EOL>", "docstring": "Given a dictionary of key/value pairs, generates a fontconfig\npattern string.", "id": "f17175:m0"}
{"signature": "def post_event(self):", "body": "assert len(self.events)><NUM_LIT:0>, \"<STR_LIT>\"<EOL>if self.events[-<NUM_LIT:1>].name == '<STR_LIT>':<EOL><INDENT>self.key_event()<EOL><DEDENT>else:<EOL><INDENT>self.mouse_event()<EOL><DEDENT>", "docstring": "This will be called to process events", "id": "f17176:c1:m1"}
{"signature": "def button1( self, event ):", "body": "if event.inaxes:<EOL><INDENT>self.add_click(event)<EOL><DEDENT>else: <EOL><INDENT>BlockingInput.pop(self)<EOL><DEDENT>", "docstring": "Will be called for any event involving a button other than\nbutton 2 or 3.  This will add a click if it is inside axes.", "id": "f17176:c1:m4"}
{"signature": "def add_event(self,event):", "body": "self.events.append(event)<EOL>", "docstring": "For base class, this just appends an event to events.", "id": "f17176:c0:m4"}
{"signature": "def pop_event(self,index=-<NUM_LIT:1>):", "body": "self.events.pop(index)<EOL>", "docstring": "This removes an event from the event list.  Defaults to\nremoving last event, but an index can be supplied.  Note that\nthis does not check that there are events, much like the\nnormal pop method.  If not events exist, this will throw an\nexception.", "id": "f17176:c0:m5"}
{"signature": "def pop_click(self,index=-<NUM_LIT:1>):", "body": "self.clicks.pop(index)<EOL>if self.show_clicks:<EOL><INDENT>mark = self.marks.pop(index)<EOL>mark.remove()<EOL>self.fig.canvas.draw()<EOL><DEDENT>", "docstring": "This removes a click from the list of clicks.  Defaults to\nremoving the last click.", "id": "f17176:c1:m8"}
{"signature": "def mouse_event(self):", "body": "event = self.events[-<NUM_LIT:1>]<EOL>button = event.button<EOL>if button == <NUM_LIT:3>:<EOL><INDENT>self.button3(event)<EOL><DEDENT>elif button == <NUM_LIT:2>:<EOL><INDENT>self.button2(event)<EOL><DEDENT>else:<EOL><INDENT>self.button1(event)<EOL><DEDENT>", "docstring": "Process a mouse click event", "id": "f17176:c1:m2"}
{"signature": "def button3(self,event):", "body": "<EOL>BlockingInput.pop(self)<EOL>if self.inline:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>self.cs.pop_label()<EOL>self.cs.ax.figure.canvas.draw()<EOL><DEDENT>", "docstring": "This will be called if button 3 is clicked.  This will remove\na label if not in inline mode.  Unfortunately, if one is doing\ninline labels, then there is currently no way to fix the\nbroken contour - once humpty-dumpty is broken, he can't be put\nback together.  In inline mode, this does nothing.", "id": "f17176:c2:m2"}
{"signature": "def pop(self,index=-<NUM_LIT:1>):", "body": "self.pop_click(index)<EOL>BlockingInput.pop(self,index)<EOL>", "docstring": "This removes a click and the associated event from the object.\nDefaults to removing the last click, but any index can be\nsupplied.", "id": "f17176:c1:m9"}
{"signature": "def __call__(self, n=<NUM_LIT:1>, timeout=<NUM_LIT:30>, show_clicks=True):", "body": "self.show_clicks = show_clicks<EOL>self.clicks      = []<EOL>self.marks       = []<EOL>BlockingInput.__call__(self,n=n,timeout=timeout)<EOL>return self.clicks<EOL>", "docstring": "Blocking call to retrieve n coordinate pairs through mouse\nclicks.", "id": "f17176:c1:m11"}
{"signature": "def __call__(self, timeout=<NUM_LIT:30>):", "body": "self.keyormouse = None<EOL>BlockingInput.__call__(self,n=<NUM_LIT:1>,timeout=timeout)<EOL>return self.keyormouse<EOL>", "docstring": "Blocking call to retrieve a single mouse or key click\nReturns True if key click, False if mouse, or None if timeout", "id": "f17176:c3:m2"}
{"signature": "def set_antialiaseds(self, aa):", "body": "return self.set_antialiased(aa)<EOL>", "docstring": "alias for set_antialiased", "id": "f17178:c0:m23"}
{"signature": "def _prepare_points(self):", "body": "transform = self.get_transform()<EOL>transOffset = self._transOffset<EOL>offsets = self._offsets<EOL>paths = self.get_paths()<EOL>if self.have_units():<EOL><INDENT>paths = []<EOL>for path in self.get_paths():<EOL><INDENT>vertices = path.vertices<EOL>xs, ys = vertices[:, <NUM_LIT:0>], vertices[:, <NUM_LIT:1>]<EOL>xs = self.convert_xunits(xs)<EOL>ys = self.convert_yunits(ys)<EOL>paths.append(mpath.Path(zip(xs, ys), path.codes))<EOL><DEDENT>if len(self._offsets):<EOL><INDENT>xs = self.convert_xunits(self._offsets[:<NUM_LIT:0>])<EOL>ys = self.convert_yunits(self._offsets[:<NUM_LIT:1>])<EOL>offsets = zip(xs, ys)<EOL><DEDENT><DEDENT>offsets = np.asarray(offsets, np.float_)<EOL>if not transform.is_affine:<EOL><INDENT>paths = [transform.transform_path_non_affine(<EOL>path) for path in paths]<EOL>transform = transform.get_affine()<EOL><DEDENT>if not transOffset.is_affine:<EOL><INDENT>offsets = transOffset.transform_non_affine(offsets)<EOL>transOffset = transOffset.get_affine()<EOL><DEDENT>return transform, transOffset, offsets, paths<EOL>", "docstring": "Point prep for drawing and hit testing", "id": "f17178:c0:m7"}
{"signature": "def set_verts(self, verts, closed=True):", "body": "if closed:<EOL><INDENT>self._paths = []<EOL>for xy in verts:<EOL><INDENT>if np.ma.isMaskedArray(xy):<EOL><INDENT>if len(xy) and (xy[<NUM_LIT:0>] != xy[-<NUM_LIT:1>]).any():<EOL><INDENT>xy = np.ma.concatenate([xy, [xy[<NUM_LIT:0>]]])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>xy = np.asarray(xy)<EOL>if len(xy) and (xy[<NUM_LIT:0>] != xy[-<NUM_LIT:1>]).any():<EOL><INDENT>xy = np.concatenate([xy, [xy[<NUM_LIT:0>]]])<EOL><DEDENT><DEDENT>self._paths.append(mpath.Path(xy))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._paths = [mpath.Path(xy) for xy in verts]<EOL><DEDENT>", "docstring": "This allows one to delay initialization of the vertices.", "id": "f17178:c2:m1"}
{"signature": "def __init__(self, verts, sizes=None, closed=True, **kwargs):", "body": "Collection.__init__(self, **kwargs)<EOL>self._sizes = sizes<EOL>self.set_verts(verts, closed)<EOL>", "docstring": "*verts* is a sequence of ( *verts0*, *verts1*, ...) where\n*verts_i* is a sequence of *xy* tuples of vertices, or an\nequivalent :mod:`numpy` array of shape (*nv*, 2).\n\n*sizes* is *None* (default) or a sequence of floats that\nscale the corresponding *verts_i*.  The scaling is applied\nbefore the Artist master transform; if the latter is an identity\ntransform, then the overall scaling is such that if\n*verts_i* specify a unit square, then *sizes_i* is the area\nof that square in points^2.\nIf len(*sizes*) < *nv*, the additional values will be\ntaken cyclically from the array.\n\n*closed*, when *True*, will explicitly close the polygon.\n\n%(Collection)s", "id": "f17178:c2:m0"}
{"signature": "def set_dashes(self, ls):", "body": "return self.set_linestyle(ls)<EOL>", "docstring": "alias for set_linestyle", "id": "f17178:c0:m21"}
{"signature": "def __init__(self, patches, match_original=False, **kwargs):", "body": "if match_original:<EOL><INDENT>def determine_facecolor(patch):<EOL><INDENT>if patch.fill:<EOL><INDENT>return patch.get_facecolor()<EOL><DEDENT>return [<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>]<EOL><DEDENT>facecolors = [determine_facecolor(p) for p in patches]<EOL>edgecolors = [p.get_edgecolor() for p in patches]<EOL>linewidths = [p.get_linewidths() for p in patches]<EOL>antialiaseds = [p.get_antialiased() for p in patches]<EOL>Collection.__init__(<EOL>self,<EOL>edgecolors=edgecolors,<EOL>facecolors=facecolors,<EOL>linewidths=linewidths,<EOL>linestyles='<STR_LIT>',<EOL>antialiaseds=antialiaseds)<EOL><DEDENT>else:<EOL><INDENT>Collection.__init__(self, **kwargs)<EOL><DEDENT>paths = [p.get_transform().transform_path(p.get_path())<EOL>for p in patches]<EOL>self._paths = paths<EOL>", "docstring": "*patches*\n    a sequence of Patch objects.  This list may include\n    a heterogeneous assortment of different patch types.\n\n*match_original*\n    If True, use the colors and linewidths of the original\n    patches.  If False, new colors may be assigned by\n    providing the standard collection arguments, facecolor,\n    edgecolor, linewidths, norm or cmap.\n\nIf any of *edgecolors*, *facecolors*, *linewidths*,\n*antialiaseds* are None, they default to their\n:data:`matplotlib.rcParams` patch setting, in sequence form.\n\nThe use of :class:`~matplotlib.cm.ScalarMappable` is optional.\nIf the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not\nNone (ie a call to set_array has been made), at draw time a\ncall to scalar mappable will be made to set the face colors.", "id": "f17178:c10:m0"}
{"signature": "def update_from(self, other):", "body": "artist.Artist.update_from(self, other)<EOL>self._antialiaseds = other._antialiaseds<EOL>self._edgecolors_original = other._edgecolors_original<EOL>self._edgecolors = other._edgecolors<EOL>self._facecolors_original = other._facecolors_original<EOL>self._facecolors = other._facecolors<EOL>self._linewidths = other._linewidths<EOL>self._linestyles = other._linestyles<EOL>self._pickradius = other._pickradius<EOL>", "docstring": "copy properties from other to self", "id": "f17178:c0:m35"}
{"signature": "def set_lw(self, lw):", "body": "return self.set_linewidth(lw)<EOL>", "docstring": "alias for set_linewidth", "id": "f17178:c0:m18"}
{"signature": "def set_color(self, c):", "body": "self.set_facecolor(c)<EOL>self.set_edgecolor(c)<EOL>", "docstring": "Set both the edgecolor and the facecolor.\n\nACCEPTS: matplotlib color arg or sequence of rgba tuples\n\n.. seealso::\n    :meth:`set_facecolor`, :meth:`set_edgecolor`", "id": "f17178:c0:m24"}
{"signature": "def set_linestyles(self, ls):", "body": "return self.set_linestyle(ls)<EOL>", "docstring": "alias for set_linestyle", "id": "f17178:c0:m20"}
{"signature": "def update_scalarmappable(self):", "body": "if self._A is None:<EOL><INDENT>return<EOL><DEDENT>if self._A.ndim > <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(self._facecolors):<EOL><INDENT>self._facecolors = self.to_rgba(self._A, self._alpha)<EOL><DEDENT>else:<EOL><INDENT>self._edgecolors = self.to_rgba(self._A, self._alpha)<EOL><DEDENT>", "docstring": "If the scalar mappable array is not none, update colors\nfrom scalar data", "id": "f17178:c0:m34"}
{"signature": "def set_color(self, c):", "body": "self._edgecolors = _colors.colorConverter.to_rgba_array(c)<EOL>", "docstring": "Set the color(s) of the line collection.  *c* can be a\nmatplotlib color arg (all patches have same color), or a\nsequence or rgba tuples; if it is a sequence the patches will\ncycle through the sequence\n\nACCEPTS: matplotlib color arg or sequence of rgba tuples", "id": "f17178:c7:m4"}
{"signature": "def set_facecolor(self, c):", "body": "if c is None:<EOL><INDENT>c = mpl.rcParams['<STR_LIT>']<EOL><DEDENT>self._facecolors_original = c<EOL>self._facecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)<EOL>", "docstring": "Set the facecolor(s) of the collection.  *c* can be a\nmatplotlib color arg (all patches have same color), or a\nsequence or rgba tuples; if it is a sequence the patches will\ncycle through the sequence\n\nACCEPTS: matplotlib color arg or sequence of rgba tuples", "id": "f17178:c0:m25"}
{"signature": "def get_aa(self):", "body": "return self.get_antialiased()<EOL>", "docstring": "alias for get_antialiased", "id": "f17180:c0:m90"}
{"signature": "def get_pickradius(self):", "body": "return self.pickradius<EOL>", "docstring": "return the pick radius used for containment tests", "id": "f17180:c0:m3"}
{"signature": "def set_mfc(self, val):", "body": "self.set_markerfacecolor(val)<EOL>", "docstring": "alias for set_markerfacecolor", "id": "f17180:c0:m88"}
{"signature": "def get_solid_joinstyle(self):", "body": "return self._solidjoinstyle<EOL>", "docstring": "Get the join style for solid linestyles", "id": "f17180:c0:m101"}
{"signature": "def set_linestyle(self, linestyle):", "body": "<EOL>for ds in flatten([list(k.keys()) for k in (self._drawStyles_l,<EOL>self._drawStyles_s)], is_string_like):<EOL><INDENT>if linestyle.startswith(ds):<EOL><INDENT>self.set_drawstyle(ds)<EOL>if len(linestyle) > len(ds):<EOL><INDENT>linestyle = linestyle[len(ds):]<EOL><DEDENT>else:<EOL><INDENT>linestyle = '<STR_LIT:->'<EOL><DEDENT><DEDENT><DEDENT>if linestyle not in self._lineStyles:<EOL><INDENT>if linestyle in ls_mapper:<EOL><INDENT>linestyle = ls_mapper[linestyle]<EOL><DEDENT>else:<EOL><INDENT>verbose.report('<STR_LIT>' %<EOL>(linestyle, type(linestyle)))<EOL><DEDENT><DEDENT>if linestyle in ['<STR_LIT:U+0020>','<STR_LIT>']:<EOL><INDENT>linestyle = '<STR_LIT:None>'<EOL><DEDENT>self._linestyle = linestyle<EOL>", "docstring": "Set the linestyle of the line (also accepts drawstyles)\n\n\n================    =================\nlinestyle           description\n================    =================\n'-'                 solid\n'--'                dashed\n'-.'                dash_dot\n':'                 dotted\n'None'              draw nothing\n' '                 draw nothing\n''                  draw nothing\n================    =================\n\n'steps' is equivalent to 'steps-pre' and is maintained for\nbackward-compatibility.\n\n.. seealso::\n    :meth:`set_drawstyle`\n\nACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and\nany drawstyle in combination with a linestyle, e.g. 'steps--'.", "id": "f17180:c0:m32"}
{"signature": "def get_mec(self):", "body": "return self.get_markeredgecolor()<EOL>", "docstring": "alias for get_markeredgecolor", "id": "f17180:c0:m94"}
{"signature": "def update_from(self, other):", "body": "Artist.update_from(self, other)<EOL>self._linestyle = other._linestyle<EOL>self._linewidth = other._linewidth<EOL>self._color = other._color<EOL>self._markersize = other._markersize<EOL>self._markerfacecolor = other._markerfacecolor<EOL>self._markeredgecolor = other._markeredgecolor<EOL>self._markeredgewidth = other._markeredgewidth<EOL>self._dashSeq = other._dashSeq<EOL>self._dashcapstyle = other._dashcapstyle<EOL>self._dashjoinstyle = other._dashjoinstyle<EOL>self._solidcapstyle = other._solidcapstyle<EOL>self._solidjoinstyle = other._solidjoinstyle<EOL>self._linestyle = other._linestyle<EOL>self._marker = other._marker<EOL>self._drawstyle = other._drawstyle<EOL>", "docstring": "copy properties from other to self", "id": "f17180:c0:m80"}
{"signature": "def set_data(self, *args):", "body": "if len(args)==<NUM_LIT:1>:<EOL><INDENT>x, y = args[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>x, y = args<EOL><DEDENT>not_masked = <NUM_LIT:0><EOL>if not ma.isMaskedArray(x):<EOL><INDENT>x = np.asarray(x)<EOL>not_masked += <NUM_LIT:1><EOL><DEDENT>if not ma.isMaskedArray(y):<EOL><INDENT>y = np.asarray(y)<EOL>not_masked += <NUM_LIT:1><EOL><DEDENT>if (not_masked < <NUM_LIT:2> or<EOL>(x is not self._xorig and<EOL>(x.shape != self._xorig.shape or np.any(x != self._xorig))) or<EOL>(y is not self._yorig and<EOL>(y.shape != self._yorig.shape or np.any(y != self._yorig)))):<EOL><INDENT>self._xorig = x<EOL>self._yorig = y<EOL>self._invalid = True<EOL><DEDENT>", "docstring": "Set the x and y data\n\nACCEPTS: 2D array", "id": "f17180:c0:m8"}
{"signature": "def get_lw(self):", "body": "return self.get_linewidth()<EOL>", "docstring": "alias for get_linewidth", "id": "f17180:c0:m93"}
{"signature": "def set_lw(self, val):", "body": "self.set_linewidth(val)<EOL>", "docstring": "alias for set_linewidth", "id": "f17180:c0:m85"}
{"signature": "def get_mew(self):", "body": "return self.get_markeredgewidth()<EOL>", "docstring": "alias for get_markeredgewidth", "id": "f17180:c0:m95"}
{"signature": "def get_path(self):", "body": "if self._invalid:<EOL><INDENT>self.recache()<EOL><DEDENT>return self._path<EOL>", "docstring": "Return the :class:`~matplotlib.path.Path` object associated\nwith this line.", "id": "f17180:c0:m26"}
{"signature": "def set_dash_joinstyle(self, s):", "body": "s = s.lower()<EOL>if s not in self.validJoin:<EOL><INDENT>raise ValueError('<STR_LIT>' % (s,)<EOL>+ '<STR_LIT>' % (self.validJoin,))<EOL><DEDENT>self._dashjoinstyle = s<EOL>", "docstring": "Set the join style for dashed linestyles\nACCEPTS: ['miter' | 'round' | 'bevel']", "id": "f17180:c0:m98"}
{"signature": "def set_mec(self, val):", "body": "self.set_markeredgecolor(val)<EOL>", "docstring": "alias for set_markeredgecolor", "id": "f17180:c0:m86"}
{"signature": "def get_ydata(self, orig=True):", "body": "if orig:<EOL><INDENT>return self._yorig<EOL><DEDENT>if self._invalid:<EOL><INDENT>self.recache()<EOL><DEDENT>return self._y<EOL>", "docstring": "Return the ydata.\n\nIf *orig* is *True*, return the original data, else the\nprocessed data.", "id": "f17180:c0:m25"}
{"signature": "def set_dashes(self, seq):", "body": "if seq == (None, None) or len(seq)==<NUM_LIT:0>:<EOL><INDENT>self.set_linestyle('<STR_LIT:->')<EOL><DEDENT>else:<EOL><INDENT>self.set_linestyle('<STR_LIT>')<EOL><DEDENT>self._dashSeq = seq<EOL>", "docstring": "Set the dash sequence, sequence of dashes with on off ink in\npoints.  If seq is empty or if seq = (None, None), the\nlinestyle will be set to solid.\n\nACCEPTS: sequence of on/off ink in points", "id": "f17180:c0:m40"}
{"signature": "def get_mfc(self):", "body": "return self.get_markerfacecolor()<EOL>", "docstring": "alias for get_markerfacecolor", "id": "f17180:c0:m96"}
{"signature": "def set_transform(self, t):", "body": "Artist.set_transform(self, t)<EOL>self._invalid = True<EOL>", "docstring": "set the Transformation instance used by this artist\n\nACCEPTS: a :class:`matplotlib.transforms.Transform` instance", "id": "f17180:c0:m10"}
{"signature": "def __init__(self, line):", "body": "if not hasattr(line, '<STR_LIT>'):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>if line.get_picker() is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>self.axes = line.axes<EOL>self.line = line<EOL>self.canvas = self.axes.figure.canvas<EOL>self.cid = self.canvas.mpl_connect('<STR_LIT>', self.onpick)<EOL>self.ind = set()<EOL>", "docstring": "Initialize the class with a :class:`matplotlib.lines.Line2D`\ninstance.  The line should already be added to some\n:class:`matplotlib.axes.Axes` instance and should have the\npicker property set.", "id": "f17180:c1:m0"}
{"signature": "def set_ms(self, val):", "body": "self.set_markersize(val)<EOL>", "docstring": "alias for set_markersize", "id": "f17180:c0:m89"}
{"signature": "def set_c(self, val):", "body": "self.set_color(val)<EOL>", "docstring": "alias for set_color", "id": "f17180:c0:m83"}
{"signature": "def get_dash_capstyle(self):", "body": "return self._dashcapstyle<EOL>", "docstring": "Get the cap style for dashed linestyles", "id": "f17180:c0:m104"}
{"signature": "def get_xdata(self, orig=True):", "body": "if orig:<EOL><INDENT>return self._xorig<EOL><DEDENT>if self._invalid:<EOL><INDENT>self.recache()<EOL><DEDENT>return self._x<EOL>", "docstring": "Return the xdata.\n\nIf *orig* is *True*, return the original data, else the\nprocessed data.", "id": "f17180:c0:m24"}
{"signature": "def set_dash_capstyle(self, s):", "body": "s = s.lower()<EOL>if s not in self.validCap:<EOL><INDENT>raise ValueError('<STR_LIT>' % (s,)<EOL>+ '<STR_LIT>' % (self.validCap,))<EOL><DEDENT>self._dashcapstyle = s<EOL>", "docstring": "Set the cap style for dashed linestyles\n\nACCEPTS: ['butt' | 'round' | 'projecting']", "id": "f17180:c0:m102"}
{"signature": "def set_solid_capstyle(self, s):", "body": "s = s.lower()<EOL>if s not in self.validCap:<EOL><INDENT>raise ValueError('<STR_LIT>' % (s,)<EOL>+ '<STR_LIT>' % (self.validCap,))<EOL><DEDENT>self._solidcapstyle = s<EOL>", "docstring": "Set the cap style for solid linestyles\n\nACCEPTS: ['butt' | 'round' |  'projecting']", "id": "f17180:c0:m103"}
{"signature": "def can_zoom(self):", "body": "return False<EOL>", "docstring": "Return True if this axes support the zoom box", "id": "f17181:c0:m17"}
{"signature": "def set_latitude_grid(self, degrees):", "body": "number = (<NUM_LIT> / degrees) + <NUM_LIT:1><EOL>self.yaxis.set_major_locator(<EOL>FixedLocator(<EOL>np.linspace(-np.pi / <NUM_LIT>, np.pi / <NUM_LIT>, number, True)[<NUM_LIT:1>:-<NUM_LIT:1>]))<EOL>self._latitude_degrees = degrees<EOL>self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))<EOL>", "docstring": "Set the number of degrees between each longitude grid.", "id": "f17181:c0:m14"}
{"signature": "def set_longitude_grid(self, degrees):", "body": "number = (<NUM_LIT> / degrees) + <NUM_LIT:1><EOL>self.xaxis.set_major_locator(<EOL>FixedLocator(<EOL>np.linspace(-np.pi, np.pi, number, True)[<NUM_LIT:1>:-<NUM_LIT:1>]))<EOL>self._logitude_degrees = degrees<EOL>self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))<EOL>", "docstring": "Set the number of degrees between each longitude grid.", "id": "f17181:c0:m13"}
{"signature": "def can_zoom(self):", "body": "return False<EOL>", "docstring": "Return True if this axes support the zoom box", "id": "f17182:c0:m19"}
{"signature": "def set_thetagrids(self, angles, labels=None, frac=None,<EOL>**kwargs):", "body": "angles = npy.asarray(angles, npy.float_)<EOL>self.set_xticks(angles * (npy.pi / <NUM_LIT>))<EOL>if labels is not None:<EOL><INDENT>self.set_xticklabels(labels)<EOL><DEDENT>if frac is not None:<EOL><INDENT>self._theta_label1_position.clear().translate(<NUM_LIT:0.0>, frac)<EOL>self._theta_label2_position.clear().translate(<NUM_LIT:0.0>, <NUM_LIT:1.0> / frac)<EOL><DEDENT>for t in self.xaxis.get_ticklabels():<EOL><INDENT>t.update(kwargs)<EOL><DEDENT>return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()<EOL>", "docstring": "Set the angles at which to place the theta grids (these\ngridlines are equal along the theta dimension).  *angles* is in\ndegrees.\n\n*labels*, if not None, is a ``len(angles)`` list of strings of\nthe labels to use at each angle.\n\nIf *labels* is None, the labels will be ``fmt %% angle``\n\n*frac* is the fraction of the polar axes radius at which to\nplace the label (1 is the edge). Eg. 1.05 is outside the axes\nand 0.95 is inside the axes.\n\nReturn value is a list of tuples (*line*, *label*), where\n*line* is :class:`~matplotlib.lines.Line2D` instances and the\n*label* is :class:`~matplotlib.text.Text` instances.\n\nkwargs are optional text properties for the labels:\n\n%(Text)s\n\nACCEPTS: sequence of floats", "id": "f17182:c0:m13"}
{"signature": "def projection_factory(projection, figure, rect, **kwargs):", "body": "return get_projection_class(projection)(figure, rect, **kwargs)<EOL>", "docstring": "Get a new projection instance.\n\n*projection* is a projection name.\n\n*figure* is a figure to add the axes to.\n\n*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying\nthe location of the axes within the figure.\n\nAny other kwargs are passed along to the specific projection\nconstructor being used.", "id": "f17183:m2"}
{"signature": "def get_projection_class(self, name):", "body": "return self._all_projection_types[name]<EOL>", "docstring": "Get a projection class from its *name*.", "id": "f17183:c0:m2"}
{"signature": "def register(self, *projections):", "body": "for projection in projections:<EOL><INDENT>name = projection.name<EOL>self._all_projection_types[name] = projection<EOL><DEDENT>", "docstring": "Register a new set of projection(s).", "id": "f17183:c0:m1"}
{"signature": "def get_font_config(self):", "body": "if self._rc_cache is None:<EOL><INDENT>self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])<EOL><DEDENT>changed = [par for par in self._rc_cache_keys if rcParams[par] !=self._rc_cache[par]]<EOL>if changed:<EOL><INDENT>if DEBUG: print('<STR_LIT>', changed)<EOL>for k in changed:<EOL><INDENT>if DEBUG:<EOL><INDENT>print('<STR_LIT>' %(k, self._rc_cache[k], rcParams[k]))<EOL><DEDENT>self._rc_cache[k] = copy.deepcopy(rcParams[k])<EOL><DEDENT>if DEBUG: print('<STR_LIT>', self._fontconfig)<EOL>self.__init__()<EOL><DEDENT>if DEBUG: print('<STR_LIT>', self._fontconfig)<EOL>return self._fontconfig<EOL>", "docstring": "Reinitializes self if relevant rcParams on have changed.", "id": "f17185:c0:m2"}
{"signature": "def get_grey(self, tex, fontsize=None, dpi=None):", "body": "key = tex, self.get_font_config(), fontsize, dpi<EOL>alpha = self.grey_arrayd.get(key)<EOL>if alpha is None:<EOL><INDENT>pngfile = self.make_png(tex, fontsize, dpi)<EOL>X = read_png(os.path.join(self.texcache, pngfile))<EOL>if rcParams['<STR_LIT>'] is not None:<EOL><INDENT>hack = rcParams['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>hack = self._dvipng_hack_alpha<EOL><DEDENT>if hack:<EOL><INDENT>alpha = <NUM_LIT:1>-X[:,:,<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>alpha = X[:,:,-<NUM_LIT:1>]<EOL><DEDENT>self.grey_arrayd[key] = alpha<EOL><DEDENT>return alpha<EOL>", "docstring": "returns the alpha channel", "id": "f17185:c0:m11"}
{"signature": "def _get_shell_cmd(self, *args):", "body": "if sys.platform == '<STR_LIT:win32>':<EOL><INDENT>command = ['<STR_LIT:%s>'% os.path.splitdrive(self.texcache)[<NUM_LIT:0>]]<EOL><DEDENT>else:<EOL><INDENT>command = []<EOL><DEDENT>command.extend(args)<EOL>return '<STR_LIT>'.join(command)<EOL>", "docstring": "On windows, changing directories can be complicated by the presence of\nmultiple drives. get_shell_cmd deals with this issue.", "id": "f17185:c0:m5"}
{"signature": "def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:0>)):", "body": "if not fontsize: fontsize = rcParams['<STR_LIT>']<EOL>if not dpi: dpi = rcParams['<STR_LIT>']<EOL>r,g,b = rgb<EOL>key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)<EOL>Z = self.rgba_arrayd.get(key)<EOL>if Z is None:<EOL><INDENT>alpha = self.get_grey(tex, fontsize, dpi)<EOL>Z = np.zeros((alpha.shape[<NUM_LIT:0>], alpha.shape[<NUM_LIT:1>], <NUM_LIT:4>), np.float)<EOL>Z[:,:,<NUM_LIT:0>] = r<EOL>Z[:,:,<NUM_LIT:1>] = g<EOL>Z[:,:,<NUM_LIT:2>] = b<EOL>Z[:,:,<NUM_LIT:3>] = alpha<EOL>self.rgba_arrayd[key] = Z<EOL><DEDENT>return Z<EOL>", "docstring": "Returns latex's rendering of the tex string as an rgba array", "id": "f17185:c0:m12"}
{"signature": "def get_basefile(self, tex, fontsize, dpi=None):", "body": "s = '<STR_LIT>'.join([tex, self.get_font_config(), '<STR_LIT>'%fontsize,<EOL>self.get_custom_preamble(), str(dpi or '<STR_LIT>')])<EOL>bytes = str(s).encode('<STR_LIT:utf-8>')<EOL>return os.path.join(self.texcache, md5(bytes).hexdigest())<EOL>", "docstring": "returns a filename based on a hash of the string, fontsize, and dpi", "id": "f17185:c0:m1"}
{"signature": "def get_font_preamble(self):", "body": "return self._font_preamble<EOL>", "docstring": "returns a string containing font configuration for the tex preamble", "id": "f17185:c0:m3"}
{"signature": "def fully_overlaps(self, other):", "body": "ax1, ay1, ax2, ay2 = self._get_extents()<EOL>bx1, by1, bx2, by2 = other._get_extents()<EOL>if ax2 < ax1:<EOL><INDENT>ax2, ax1 = ax1, ax2<EOL><DEDENT>if ay2 < ay1:<EOL><INDENT>ay2, ay1 = ay1, ay2<EOL><DEDENT>if bx2 < bx1:<EOL><INDENT>bx2, bx1 = bx1, bx2<EOL><DEDENT>if by2 < by1:<EOL><INDENT>by2, by1 = by1, by2<EOL><DEDENT>return not ((bx2 <= ax1) or<EOL>(by2 <= ay1) or<EOL>(bx1 >= ax2) or<EOL>(by1 >= ay2))<EOL>", "docstring": "Returns True if this bounding box overlaps with the given\nbounding box *other*, but not on its edge alone.", "id": "f17186:c1:m30"}
{"signature": "def __radd__(self, other):", "body": "if isinstance(other, Transform):<EOL><INDENT>return composite_transform_factory(other, self)<EOL><DEDENT>raise TypeError(<EOL>\"<STR_LIT>\" % type(other))<EOL>", "docstring": "Composes two transforms together such that *self* is followed\nby *other*.", "id": "f17186:c4:m1"}
{"signature": "def union(bboxes):", "body": "assert(len(bboxes))<EOL>if len(bboxes) == <NUM_LIT:1>:<EOL><INDENT>return bboxes[<NUM_LIT:0>]<EOL><DEDENT>x0 = np.inf<EOL>y0 = np.inf<EOL>x1 = -np.inf<EOL>y1 = -np.inf<EOL>for bbox in bboxes:<EOL><INDENT>points = bbox.get_points()<EOL>xs = points[:, <NUM_LIT:0>]<EOL>ys = points[:, <NUM_LIT:1>]<EOL>x0 = min(x0, np.min(xs))<EOL>y0 = min(y0, np.min(ys))<EOL>x1 = max(x1, np.max(xs))<EOL>y1 = max(y1, np.max(ys))<EOL><DEDENT>return Bbox.from_extents(x0, y0, x1, y1)<EOL>", "docstring": "Return a :class:`Bbox` that contains all of the given bboxes.", "id": "f17186:c1:m45"}
{"signature": "def splity(self, *args):", "body": "boxes = []<EOL>yf = [<NUM_LIT:0>] + list(args) + [<NUM_LIT:1>]<EOL>x0, y0, x1, y1 = self._get_extents()<EOL>h = y1 - y0<EOL>for yf0, yf1 in zip(yf[:-<NUM_LIT:1>], yf[<NUM_LIT:1>:]):<EOL><INDENT>boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))<EOL><DEDENT>return boxes<EOL>", "docstring": "e.g., ``bbox.splitx(f1, f2, ...)``\n\nReturns a list of new :class:`Bbox` objects formed by\nsplitting the original one with horizontal lines at fractional\npositions *f1*, *f2*, ...", "id": "f17186:c1:m37"}
{"signature": "def containsx(self, x):", "body": "x0, x1 = self.intervalx<EOL>return ((x0 < x1<EOL>and (x >= x0 and x <= x1))<EOL>or (x >= x1 and x <= x0))<EOL>", "docstring": "Returns True if *x* is between or equal to :attr:`x0` and\n:attr:`x1`.", "id": "f17186:c1:m23"}
{"signature": "def transformed(self, transform):", "body": "return Bbox(transform.transform(self.get_points()))<EOL>", "docstring": "Return a new :class:`Bbox` object, statically transformed by\nthe given transform.", "id": "f17186:c1:m31"}
{"signature": "def set(self, child):", "body": "assert child.input_dims == self.input_dims<EOL>assert child.output_dims == self.output_dims<EOL>self._set(child)<EOL>self._invalid = <NUM_LIT:0><EOL>self.invalidate()<EOL>self._invalid = <NUM_LIT:0><EOL>", "docstring": "Replace the current child of this transform with another one.\n\nThe new child must have the same number of input and output\ndimensions as the current child.", "id": "f17186:c5:m4"}
{"signature": "def transform_non_affine(self, values):", "body": "return self.transform(values)<EOL>", "docstring": "Performs only the non-affine part of the transformation.\n\n``transform(values)`` is always equivalent to\n``transform_affine(transform_non_affine(values))``.\n\nIn non-affine transformations, this is generally equivalent to\n``transform(values)``.  In affine transformations, this is\nalways a no-op.\n\nAccepts a numpy array of shape (N x :attr:`input_dims`) and\nreturns a numpy array of shape (N x :attr:`output_dims`).", "id": "f17186:c4:m5"}
{"signature": "def shrunk_to_aspect(self, box_aspect, container = None, fig_aspect = <NUM_LIT:1.0>):", "body": "assert box_aspect > <NUM_LIT:0> and fig_aspect > <NUM_LIT:0><EOL>if container is None:<EOL><INDENT>container = self<EOL><DEDENT>w, h = container.size<EOL>H = w * box_aspect/fig_aspect<EOL>if H <= h:<EOL><INDENT>W = w<EOL><DEDENT>else:<EOL><INDENT>W = h * fig_aspect/box_aspect<EOL>H = h<EOL><DEDENT>return Bbox([self._points[<NUM_LIT:0>],<EOL>self._points[<NUM_LIT:0>] + (W, H)])<EOL>", "docstring": "Return a copy of the :class:`Bbox`, shrunk so that it is as\nlarge as it can be while having the desired aspect ratio,\n*box_aspect*.  If the box coordinates are relative---that\nis, fractions of a larger box such as a figure---then the\nphysical aspect ratio of that figure is specified with\n*fig_aspect*, so that *box_aspect* can also be given as a\nratio of the absolute dimensions, not the relative dimensions.", "id": "f17186:c1:m35"}
{"signature": "def is_unit(self):", "body": "return list(self.get_points().flatten()) == [<NUM_LIT:0.>, <NUM_LIT:0.>, <NUM_LIT:1.>, <NUM_LIT:1.>]<EOL>", "docstring": "Returns True if the :class:`Bbox` is the unit bounding box\nfrom (0, 0) to (1, 1).", "id": "f17186:c1:m2"}
{"signature": "def set_matrix(self, mtx):", "body": "self._mtx = mtx<EOL>self.invalidate()<EOL>", "docstring": "Set the underlying transformation matrix from a 3x3 numpy array::\n\n  a c e\n  b d f\n  0 0 1", "id": "f17186:c8:m5"}
{"signature": "def translated(self, tx, ty):", "body": "return Bbox(self._points + (tx, ty))<EOL>", "docstring": "Return a copy of the :class:`Bbox`, statically translated by\n*tx* and *ty*.", "id": "f17186:c1:m42"}
{"signature": "def get_transformed_points_and_affine(self):", "body": "self._revalidate()<EOL>return self._transformed_points, self.get_affine()<EOL>", "docstring": "Return a copy of the child path, with the non-affine part of\nthe transform already applied, along with the affine part of\nthe path necessary to complete the transformation.  Unlike\n:meth:`get_transformed_path_and_affine`, no interpolation will\nbe performed.", "id": "f17186:c18:m2"}
{"signature": "def __init__(self, boxout):", "body": "assert boxout.is_bbox<EOL>Affine2DBase.__init__(self)<EOL>self._boxout = boxout<EOL>self.set_children(boxout)<EOL>self._mtx = None<EOL>self._inverted = None<EOL>", "docstring": "Create a new :class:`BboxTransformTo` that linearly transforms\npoints from the unit bounding box to *boxout*.", "id": "f17186:c15:m0"}
{"signature": "def transform_path_non_affine(self, path):", "body": "return Path(self.transform_non_affine(path.vertices), path.codes)<EOL>", "docstring": "Returns a copy of path, transformed only by the non-affine\npart of this transform.\n\n*path*: a :class:`~matplotlib.path.Path` instance.\n\n``transform_path(path)`` is equivalent to\n``transform_path_affine(transform_path_non_affine(values))``.", "id": "f17186:c4:m10"}
{"signature": "def set(self, other):", "body": "assert isinstance(other, Affine2DBase)<EOL>self._mtx = other.get_matrix()<EOL>self.invalidate()<EOL>", "docstring": "Set this transformation from the frozen copy of another\n:class:`Affine2DBase` object.", "id": "f17186:c8:m6"}
{"signature": "def transform(self, values):", "body": "raise NotImplementedError()<EOL>", "docstring": "Performs the transformation on the given array of values.\n\nAccepts a numpy array of shape (N x :attr:`input_dims`) and\nreturns a numpy array of shape (N x :attr:`output_dims`).", "id": "f17186:c4:m3"}
{"signature": "def contains(self, x, y):", "body": "return self.containsx(x) and self.containsy(y)<EOL>", "docstring": "Returns *True* if (*x*, *y*) is a coordinate inside the\nbounding box or on its edge.", "id": "f17186:c1:m25"}
{"signature": "def shrunk(self, mx, my):", "body": "w, h = self.size<EOL>return Bbox([self._points[<NUM_LIT:0>],<EOL>self._points[<NUM_LIT:0>] + [mx * w, my * h]])<EOL>", "docstring": "Return a copy of the :class:`Bbox`, shrunk by the factor *mx*\nin the *x* direction and the factor *my* in the *y* direction.\nThe lower left corner of the box remains unchanged.  Normally\n*mx* and *my* will be less than 1, but this is not enforced.", "id": "f17186:c1:m34"}
{"signature": "def set_points(self, points):", "body": "if np.any(self._points != points):<EOL><INDENT>self._points = points<EOL>self.invalidate()<EOL><DEDENT>", "docstring": "Set the points of the bounding box directly from a numpy array\nof the form: [[x0, y0], [x1, y1]].  No error checking is\nperformed, as this method is mainly for internal use.", "id": "f17186:c2:m22"}
{"signature": "def set(self, other):", "body": "if np.any(self._points != other.get_points()):<EOL><INDENT>self._points = other.get_points()<EOL>self.invalidate()<EOL><DEDENT>", "docstring": "Set this bounding box from the \"frozen\" bounds of another\n:class:`Bbox`.", "id": "f17186:c2:m23"}
{"signature": "def __init__(self, path, transform):", "body": "assert isinstance(transform, Transform)<EOL>TransformNode.__init__(self)<EOL>self._path = path<EOL>self._transform = transform<EOL>self.set_children(transform)<EOL>self._transformed_path = None<EOL>self._transformed_points = None<EOL>", "docstring": "Create a new :class:`TransformedPath` from the given\n:class:`~matplotlib.path.Path` and :class:`Transform`.", "id": "f17186:c18:m0"}
{"signature": "def get_affine(self):", "body": "return IdentityTransform()<EOL>", "docstring": "Get the affine part of this transform.", "id": "f17186:c4:m6"}
{"signature": "def inverse_transformed(self, transform):", "body": "return Bbox(transform.inverted().transform(self.get_points()))<EOL>", "docstring": "Return a new :class:`Bbox` object, statically transformed by\nthe inverse of the given transform.", "id": "f17186:c1:m32"}
{"signature": "def get_matrix(self):", "body": "self._invalid = <NUM_LIT:0><EOL>return self._mtx<EOL>", "docstring": "Get the underlying transformation matrix as a 3x3 numpy array::\n\n  a c e\n  b d f\n  0 0 1", "id": "f17186:c8:m4"}
{"signature": "def update_from_path(self, path, ignore=None, updatex=True, updatey=True):", "body": "if ignore is None:<EOL><INDENT>ignore = self._ignore<EOL><DEDENT>if path.vertices.size == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>points, minpos, changed = update_path_extents(<EOL>path, None, self._points, self._minpos, ignore)<EOL>if changed:<EOL><INDENT>self.invalidate()<EOL>if updatex:<EOL><INDENT>self._points[:,<NUM_LIT:0>] = points[:,<NUM_LIT:0>]<EOL>self._minpos[<NUM_LIT:0>] = minpos[<NUM_LIT:0>]<EOL><DEDENT>if updatey:<EOL><INDENT>self._points[:,<NUM_LIT:1>] = points[:,<NUM_LIT:1>]<EOL>self._minpos[<NUM_LIT:1>] = minpos[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>", "docstring": "Update the bounds of the :class:`Bbox` based on the passed in\ndata.  After updating, the bounds will have positive *width*\nand *height*; *x0* and *y0* will be the minimal values.\n\n*path*: a :class:`~matplotlib.path.Path` instance\n\n*ignore*:\n   - when True, ignore the existing bounds of the :class:`Bbox`.\n   - when False, include the existing bounds of the :class:`Bbox`.\n   - when None, use the last value passed to :meth:`ignore`.\n\n*updatex*: when True, update the x values\n\n*updatey*: when True, update the y values", "id": "f17186:c2:m7"}
{"signature": "def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):", "body": "if len(xy) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>path = Path(xy)<EOL>self.update_from_path(path, ignore=ignore,<EOL>updatex=updatex, updatey=updatey)<EOL>", "docstring": "Update the bounds of the :class:`Bbox` based on the passed in\ndata.  After updating, the bounds will have positive *width*\nand *height*; *x0* and *y0* will be the minimal values.\n\n*xy*: a numpy array of 2D points\n\n*ignore*:\n   - when True, ignore the existing bounds of the :class:`Bbox`.\n   - when False, include the existing bounds of the :class:`Bbox`.\n   - when None, use the last value passed to :meth:`ignore`.\n\n*updatex*: when True, update the x values\n\n*updatey*: when True, update the y values", "id": "f17186:c2:m8"}
{"signature": "def ignore(self, value):", "body": "self._ignore = value<EOL>", "docstring": "Set whether the existing bounds of the box should be ignored\nby subsequent calls to :meth:`update_from_data` or\n:meth:`update_from_data_xy`.\n\n*value*:\n\n   - When True, subsequent calls to :meth:`update_from_data`\n     will ignore the existing bounds of the :class:`Bbox`.\n\n   - When False, subsequent calls to :meth:`update_from_data`\n     will include the existing bounds of the :class:`Bbox`.", "id": "f17186:c2:m5"}
{"signature": "def invalidate(self):", "body": "<EOL>value = (self.is_affine) and self.INVALID_AFFINE or self.INVALID<EOL>if self._invalid == value:<EOL><INDENT>return<EOL><DEDENT>if not len(self._parents):<EOL><INDENT>self._invalid = value<EOL>return<EOL><DEDENT>stack = [self]<EOL>while len(stack):<EOL><INDENT>root = stack.pop()<EOL>if root._invalid != value or root.pass_through:<EOL><INDENT>root._invalid = self.INVALID<EOL>stack.extend(list(root._parents.keys()))<EOL><DEDENT><DEDENT>", "docstring": "Invalidate this :class:`TransformNode` and all of its\nancestors.  Should be called any time the transform changes.", "id": "f17186:c0:m2"}
{"signature": "def __init__(self, matrix = None):", "body": "Affine2DBase.__init__(self)<EOL>if matrix is None:<EOL><INDENT>matrix = np.identity(<NUM_LIT:3>)<EOL><DEDENT>elif DEBUG:<EOL><INDENT>matrix = np.asarray(matrix, np.float_)<EOL>assert matrix.shape == (<NUM_LIT:3>, <NUM_LIT:3>)<EOL><DEDENT>self._mtx = matrix<EOL>self._invalid = <NUM_LIT:0><EOL>", "docstring": "Initialize an Affine transform from a 3x3 numpy float array::\n\n  a c e\n  b d f\n  0 0 1\n\nIf *matrix* is None, initialize with the identity transform.", "id": "f17186:c8:m0"}
{"signature": "def __init__(self, x_transform, y_transform):", "body": "<EOL>Transform.__init__(self)<EOL>self._x = x_transform<EOL>self._y = y_transform<EOL>self.set_children(x_transform, y_transform)<EOL>self._affine = None<EOL>", "docstring": "Create a new \"blended\" transform using *x_transform* to\ntransform the *x*-axis and *y_transform* to transform the\n*y*-axis.\n\nYou will generally not call this constructor directly but use\nthe :func:`blended_transform_factory` function instead, which\ncan determine automatically which kind of blended transform to\ncreate.", "id": "f17186:c10:m0"}
{"signature": "def anchored(self, c, container = None):", "body": "if container is None:<EOL><INDENT>container = self<EOL><DEDENT>l, b, w, h = container.bounds<EOL>if isinstance(c, str):<EOL><INDENT>cx, cy = self.coefs[c]<EOL><DEDENT>else:<EOL><INDENT>cx, cy = c<EOL><DEDENT>L, B, W, H = self.bounds<EOL>return Bbox(self._points +<EOL>[(l + cx * (w-W)) - L,<EOL>(b + cy * (h-H)) - B])<EOL>", "docstring": "Return a copy of the :class:`Bbox`, shifted to position *c*\nwithin a container.\n\n*c*: may be either:\n\n  * a sequence (*cx*, *cy*) where *cx* and *cy* range from 0\n    to 1, where 0 is left or bottom and 1 is right or top\n\n  * a string:\n    - 'C' for centered\n    - 'S' for bottom-center\n    - 'SE' for bottom-left\n    - 'E' for left\n    - etc.\n\nOptional argument *container* is the box within which the\n:class:`Bbox` is positioned; it defaults to the initial\n:class:`Bbox`.", "id": "f17186:c1:m33"}
{"signature": "def __init__(self, bbox, transform):", "body": "assert bbox.is_bbox<EOL>assert isinstance(transform, Transform)<EOL>assert transform.input_dims == <NUM_LIT:2><EOL>assert transform.output_dims == <NUM_LIT:2><EOL>BboxBase.__init__(self)<EOL>self._bbox = bbox<EOL>self._transform = transform<EOL>self.set_children(bbox, transform)<EOL>self._points = None<EOL>", "docstring": "*bbox*: a child :class:`Bbox`\n\n*transform*: a 2D :class:`Transform`", "id": "f17186:c3:m0"}
{"signature": "def transform_point(self, point):", "body": "assert len(point) == self.input_dims<EOL>return self.transform(np.asarray([point]))[<NUM_LIT:0>]<EOL>", "docstring": "A convenience function that returns the transformed copy of a\nsingle point.\n\nThe point is given as a sequence of length :attr:`input_dims`.\nThe transformed point is returned as a sequence of length\n:attr:`output_dims`.", "id": "f17186:c4:m7"}
{"signature": "def __add__(self, other):", "body": "if isinstance(other, Transform):<EOL><INDENT>return composite_transform_factory(self, other)<EOL><DEDENT>raise TypeError(<EOL>\"<STR_LIT>\" % type(other))<EOL>", "docstring": "Composes two transforms together such that *self* is followed\nby *other*.", "id": "f17186:c4:m0"}
{"signature": "def fully_contains(self, x, y):", "body": "return self.fully_containsx(x)and self.fully_containsy(y)<EOL>", "docstring": "Returns True if (*x*, *y*) is a coordinate inside the bounding\nbox, but not on its edge.", "id": "f17186:c1:m29"}
{"signature": "def from_extents(*args):", "body": "points = np.array(args, dtype=np.float_).reshape(<NUM_LIT:2>, <NUM_LIT:2>)<EOL>return Bbox(points)<EOL>", "docstring": "(staticmethod) Create a new Bbox from *left*, *bottom*,\n*right* and *top*.\n\nThe *y*-axis increases upwards.", "id": "f17186:c2:m3"}
{"signature": "def transform_path_affine(self, path):", "body": "return path<EOL>", "docstring": "Returns a copy of path, transformed only by the affine part of\nthis transform.\n\n*path*: a :class:`~matplotlib.path.Path` instance.\n\n``transform_path(path)`` is equivalent to\n``transform_path_affine(transform_path_non_affine(values))``.", "id": "f17186:c4:m9"}
{"signature": "def rotate(self, theta):", "body": "a = np.cos(theta)<EOL>b = np.sin(theta)<EOL>rotate_mtx = np.array(<EOL>[[a, -b, <NUM_LIT:0.0>], [b, a, <NUM_LIT:0.0>], [<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:1.0>]],<EOL>np.float_)<EOL>self._mtx = np.dot(rotate_mtx, self._mtx)<EOL>self.invalidate()<EOL>return self<EOL>", "docstring": "Add a rotation (in radians) to this transform in place.\n\nReturns *self*, so this method can easily be chained with more\ncalls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`\nand :meth:`scale`.", "id": "f17186:c8:m9"}
{"signature": "def identity():", "body": "return Affine2D(np.identity(<NUM_LIT:3>))<EOL>", "docstring": "(staticmethod) Return a new :class:`Affine2D` object that is\nthe identity transform.\n\nUnless this transform will be mutated later on, consider using\nthe faster :class:`IdentityTransform` class instead.", "id": "f17186:c8:m7"}
{"signature": "def get_transform(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Return the :class:`~matplotlib.transforms.Transform` object\nassociated with this scale.", "id": "f17187:c0:m0"}
{"signature": "def register_scale(scale_class):", "body": "_scale_mapping[scale_class.name] = scale_class<EOL>", "docstring": "Register a new kind of scale.\n\n*scale_class* must be a subclass of :class:`ScaleBase`.", "id": "f17187:m3"}
{"signature": "def __init__(self, axis, **kwargs):", "body": "if axis.axis_name == '<STR_LIT:x>':<EOL><INDENT>base = kwargs.pop('<STR_LIT>', <NUM_LIT>)<EOL>linthresh = kwargs.pop('<STR_LIT>', <NUM_LIT>)<EOL>subs = kwargs.pop('<STR_LIT>', None)<EOL><DEDENT>else:<EOL><INDENT>base = kwargs.pop('<STR_LIT>', <NUM_LIT>)<EOL>linthresh = kwargs.pop('<STR_LIT>', <NUM_LIT>)<EOL>subs = kwargs.pop('<STR_LIT>', None)<EOL><DEDENT>self._transform = self.SymmetricalLogTransform(base, linthresh)<EOL>self.base = base<EOL>self.linthresh = linthresh<EOL>self.subs = subs<EOL>", "docstring": "*basex*/*basey*:\n   The base of the logarithm\n\n*linthreshx*/*linthreshy*:\n  The range (-*x*, *x*) within which the plot is linear (to\n  avoid having the plot go to infinity around zero).\n\n*subsx*/*subsy*:\n   Where to place the subticks between each major tick.\n   Should be a sequence of integers.  For example, in a log10\n   scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``\n\n   will place 10 logarithmically spaced minor ticks between\n   each major tick.", "id": "f17187:c3:m0"}
{"signature": "def limit_range_for_scale(self, vmin, vmax, minpos):", "body": "return (vmin <= <NUM_LIT:0.0> and minpos or vmin,<EOL>vmax <= <NUM_LIT:0.0> and minpos or vmax)<EOL>", "docstring": "Limit the domain to positive values.", "id": "f17187:c2:m3"}
{"signature": "def get_scale_docs():", "body": "docs = []<EOL>for name in get_scale_names():<EOL><INDENT>scale_class = _scale_mapping[name]<EOL>docs.append(\"<STR_LIT>\" % name)<EOL>docs.append(\"<STR_LIT>\")<EOL>class_docs = dedent(scale_class.__init__.__doc__)<EOL>class_docs = \"<STR_LIT>\".join([\"<STR_LIT>\" %<EOL>x for x in class_docs.split(\"<STR_LIT:\\n>\")])<EOL>docs.append(class_docs)<EOL>docs.append(\"<STR_LIT>\")<EOL><DEDENT>return \"<STR_LIT:\\n>\".join(docs)<EOL>", "docstring": "Helper function for generating docstrings related to scales.", "id": "f17187:m4"}
{"signature": "def get_transform(self):", "body": "return self._transform<EOL>", "docstring": "Return a :class:`~matplotlib.transforms.Transform` instance\nappropriate for the given logarithm base.", "id": "f17187:c2:m2"}
{"signature": "def _mask_non_positives(a):", "body": "mask = a <= <NUM_LIT:0.0><EOL>if mask.any():<EOL><INDENT>return ma.MaskedArray(a, mask=mask)<EOL><DEDENT>return a<EOL>", "docstring": "Return a Numpy masked array where all non-positive values are\nmasked.  If there are no non-positive values, the original array\nis returned.", "id": "f17187:m0"}
{"signature": "def set_clim(self, vmin=None, vmax=None):", "body": "if (vmin is not None and vmax is None and<EOL>cbook.iterable(vmin) and len(vmin)==<NUM_LIT:2>):<EOL><INDENT>vmin, vmax = vmin<EOL><DEDENT>if vmin is not None: self.norm.vmin = vmin<EOL>if vmax is not None: self.norm.vmax = vmax<EOL>self.changed()<EOL>", "docstring": "set the norm limits for image scaling; if *vmin* is a length2\nsequence, interpret it as ``(vmin, vmax)`` which is used to\nsupport setp\n\nACCEPTS: a length 2 sequence of floats", "id": "f17188:c0:m7"}
{"signature": "def get_clim(self):", "body": "return self.norm.vmin, self.norm.vmax<EOL>", "docstring": "return the min, max of the color limits for image scaling", "id": "f17188:c0:m6"}
{"signature": "def autoscale(self):", "body": "if self._A is None:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self.norm.autoscale(self._A)<EOL>self.changed()<EOL>", "docstring": "Autoscale the scalar limits on the norm instance using the\ncurrent array", "id": "f17188:c0:m10"}
{"signature": "def get_cmap(name=None, lut=None):", "body": "if name is None: name = mpl.rcParams['<STR_LIT>']<EOL>if lut is None: lut = mpl.rcParams['<STR_LIT>']<EOL>assert(name in datad.keys())<EOL>return colors.LinearSegmentedColormap(name,  datad[name], lut)<EOL>", "docstring": "Get a colormap instance, defaulting to rc values if *name* is None", "id": "f17188:m0"}
{"signature": "def changed(self):", "body": "self.callbacksSM.process('<STR_LIT>', self)<EOL>for key in self.update_dict:<EOL><INDENT>self.update_dict[key] = True<EOL><DEDENT>", "docstring": "Call this whenever the mappable is changed to notify all the\ncallbackSM listeners to the 'changed' signal", "id": "f17188:c0:m14"}
{"signature": "def to_rgba(self, x, alpha=<NUM_LIT:1.0>, bytes=False):", "body": "try:<EOL><INDENT>if x.ndim == <NUM_LIT:3>:<EOL><INDENT>if x.shape[<NUM_LIT:2>] == <NUM_LIT:3>:<EOL><INDENT>if x.dtype == np.uint8:<EOL><INDENT>alpha = np.array(alpha*<NUM_LIT:255>, np.uint8)<EOL><DEDENT>m, n = x.shape[:<NUM_LIT:2>]<EOL>xx = np.empty(shape=(m,n,<NUM_LIT:4>), dtype = x.dtype)<EOL>xx[:,:,:<NUM_LIT:3>] = x<EOL>xx[:,:,<NUM_LIT:3>] = alpha<EOL><DEDENT>elif x.shape[<NUM_LIT:2>] == <NUM_LIT:4>:<EOL><INDENT>xx = x<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if bytes and xx.dtype != np.uint8:<EOL><INDENT>xx = (xx * <NUM_LIT:255>).astype(np.uint8)<EOL><DEDENT>return xx<EOL><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>x = ma.asarray(x)<EOL>x = self.norm(x)<EOL>x = self.cmap(x, alpha=alpha, bytes=bytes)<EOL>return x<EOL>", "docstring": "Return a normalized rgba array corresponding to *x*. If *x*\n        is already an rgb array, insert *alpha*; if it is already\n        rgba, return it unchanged. If *bytes* is True, return rgba as\n        4 uint8s instead of 4 floats.", "id": "f17188:c0:m2"}
{"signature": "def get_cmap(self):", "body": "return self.cmap<EOL>", "docstring": "return the colormap", "id": "f17188:c0:m5"}
{"signature": "def set_array(self, A):", "body": "self._A = A<EOL>self.update_dict['<STR_LIT>'] = True<EOL>", "docstring": "Set the image array from numpy array *A*", "id": "f17188:c0:m3"}
{"signature": "def set_norm(self, norm):", "body": "if norm is None: norm = colors.Normalize()<EOL>self.norm = norm<EOL>self.changed()<EOL>", "docstring": "set the normalization instance", "id": "f17188:c0:m9"}
{"signature": "def autoscale_None(self):", "body": "if self._A is None:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self.norm.autoscale_None(self._A)<EOL>self.changed()<EOL>", "docstring": "Autoscale the scalar limits on the norm instance using the\ncurrent array, changing only limits that are None", "id": "f17188:c0:m11"}
{"signature": "def get_array(self):", "body": "return self._A<EOL>", "docstring": "Return the array", "id": "f17188:c0:m4"}
{"signature": "def __init__(self, tz=None):", "body": "if tz is None: tz = _get_rc_timezone()<EOL>self.tz = tz<EOL>", "docstring": "*tz* is a :class:`tzinfo` instance.", "id": "f17189:c5:m0"}
{"signature": "def _get_unit(self):", "body": "return <NUM_LIT:1.>/(<NUM_LIT>*<NUM_LIT>*<NUM_LIT>)<EOL>", "docstring": "Return how many days a unit of the locator is; used for\nintelligent autoscaling.", "id": "f17189:c14:m1"}
{"signature": "def axisinfo(unit):", "body": "if unit=='<STR_LIT:date>':<EOL><INDENT>majloc = AutoDateLocator()<EOL>majfmt = AutoDateFormatter(majloc)<EOL>return units.AxisInfo(<EOL>majloc = majloc,<EOL>majfmt = majfmt,<EOL>label='<STR_LIT>',<EOL>)<EOL><DEDENT>else: return None<EOL>", "docstring": "return the unit AxisInfo", "id": "f17189:c15:m0"}
{"signature": "def __init__(self,  byhour=None, interval=<NUM_LIT:1>, tz=None):", "body": "if byhour is None: byhour=list(range(<NUM_LIT>))<EOL>rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,<EOL>byminute=<NUM_LIT:0>, bysecond=<NUM_LIT:0>)<EOL>RRuleLocator.__init__(self, rule, tz)<EOL>", "docstring": "Mark every hour in *byhour*; *byhour* can be an int or sequence.\nDefault is to tick every hour: ``byhour=range(24)``\n\n*interval* is the interval between each iteration.  For\nexample, if ``interval=2``, mark every second occurrence.", "id": "f17189:c12:m0"}
{"signature": "def refresh(self):", "body": "dmin, dmax = self.viewlim_to_dt()<EOL>self._locator = self.get_locator(dmin, dmax)<EOL>", "docstring": "Refresh internal information based on current limits.", "id": "f17189:c7:m3"}
{"signature": "def _to_ordinalf(dt):", "body": "if hasattr(dt, '<STR_LIT>') and dt.tzinfo is not None:<EOL><INDENT>delta = dt.tzinfo.utcoffset(dt)<EOL>if delta is not None:<EOL><INDENT>dt -= delta<EOL><DEDENT><DEDENT>base =  float(dt.toordinal())<EOL>if hasattr(dt, '<STR_LIT>'):<EOL><INDENT>base += (dt.hour/HOURS_PER_DAY + dt.minute/MINUTES_PER_DAY +<EOL>dt.second/SECONDS_PER_DAY + dt.microsecond/MUSECONDS_PER_DAY<EOL>)<EOL><DEDENT>return base<EOL>", "docstring": "Convert :mod:`datetime` to the Gregorian date as UTC float days,\npreserving hours, minutes, seconds and microseconds.  Return value\nis a :func:`float`.", "id": "f17189:m1"}
{"signature": "def _get_unit(self):", "body": "return <NUM_LIT:1.>/(<NUM_LIT>*<NUM_LIT>)<EOL>", "docstring": "Return how many days a unit of the locator is; used for\nintelligent autoscaling.", "id": "f17189:c13:m1"}
{"signature": "def hours(h):", "body": "return h/<NUM_LIT><EOL>", "docstring": "Return hours as days.", "id": "f17189:m17"}
{"signature": "def __init__(self,  byweekday=<NUM_LIT:1>, interval=<NUM_LIT:1>, tz=None):", "body": "o = rrulewrapper(DAILY, byweekday=byweekday,<EOL>interval=interval, **self.hms0d)<EOL>RRuleLocator.__init__(self, o, tz)<EOL>", "docstring": "Mark every weekday in *byweekday*; *byweekday* can be a number or\nsequence.\n\nElements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,\nSU, the constants from :mod:`dateutils.rrule`.\n\n*interval* specifies the number of weeks to skip.  For example,\n``interval=2`` plots every second week.", "id": "f17189:c10:m0"}
{"signature": "def __init__(self, base=<NUM_LIT:1>, month=<NUM_LIT:1>, day=<NUM_LIT:1>, tz=None):", "body": "DateLocator.__init__(self, tz)<EOL>self.base = ticker.Base(base)<EOL>self.replaced = { '<STR_LIT>'  : month,<EOL>'<STR_LIT>'    : day,<EOL>'<STR_LIT>'   : <NUM_LIT:0>,<EOL>'<STR_LIT>' : <NUM_LIT:0>,<EOL>'<STR_LIT>' : <NUM_LIT:0>,<EOL>'<STR_LIT>' : tz<EOL>}<EOL>", "docstring": "Mark years that are multiple of base on a given month and day\n(default jan 1).", "id": "f17189:c8:m0"}
{"signature": "def __init__(self,  bymonthday=None, interval=<NUM_LIT:1>, tz=None):", "body": "if bymonthday is None: bymonthday=list(range(<NUM_LIT:1>,<NUM_LIT:32>))<EOL>o = rrulewrapper(DAILY, bymonthday=bymonthday,<EOL>interval=interval, **self.hms0d)<EOL>RRuleLocator.__init__(self, o, tz)<EOL>", "docstring": "Mark every day in *bymonthday*; *bymonthday* can be an int or\nsequence.\n\nDefault is to tick every day of the month: ``bymonthday=range(1,32)``", "id": "f17189:c11:m0"}
{"signature": "def minutes(m):", "body": "return float(m)/MINUTES_PER_DAY<EOL>", "docstring": "Return minutes as days.", "id": "f17189:m16"}
{"signature": "def num2julian(n):", "body": "if cbook.iterable(n): n = np.asarray(n)<EOL>return n - <NUM_LIT><EOL>", "docstring": "Convert a matplotlib date (or sequence) to a Julian date (or sequence).", "id": "f17189:m6"}
{"signature": "def __init__(self, t, fmt, tz=None):", "body": "if tz is None: tz = _get_rc_timezone()<EOL>self.t = t<EOL>self.fmt = fmt<EOL>self.tz = tz<EOL>", "docstring": "*t* is a sequence of dates (floating point days).  *fmt* is a\n:func:`strftime` format string.", "id": "f17189:c2:m0"}
{"signature": "def __init__(self,  byminute=None, interval=<NUM_LIT:1>, tz=None):", "body": "if byminute is None: byminute=list(range(<NUM_LIT>))<EOL>rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,<EOL>bysecond=<NUM_LIT:0>)<EOL>RRuleLocator.__init__(self, rule, tz)<EOL>", "docstring": "Mark every minute in *byminute*; *byminute* can be an int or\nsequence.  Default is to tick every minute: ``byminute=range(60)``\n\n*interval* is the interval between each iteration.  For\nexample, if ``interval=2``, mark every second occurrence.", "id": "f17189:c13:m0"}
{"signature": "def _get_unit(self):", "body": "return <NUM_LIT:1><EOL>", "docstring": "Return how many days a unit of the locator is; used for\nintelligent autoscaling.", "id": "f17189:c11:m1"}
{"signature": "def __init__(self, fmt, tz=None):", "body": "if tz is None: tz = _get_rc_timezone()<EOL>self.fmt = fmt<EOL>self.tz = tz<EOL>", "docstring": "*fmt* is an :func:`strftime` format string; *tz* is the\n :class:`tzinfo` instance.", "id": "f17189:c1:m0"}
{"signature": "def __call__(self):", "body": "self.refresh()<EOL>return self._locator()<EOL>", "docstring": "Return the locations of the ticks", "id": "f17189:c7:m1"}
{"signature": "def _get_unit(self):", "body": "return <NUM_LIT:7><EOL>", "docstring": "return how many days a unit of the locator is; used for\nintelligent autoscaling.", "id": "f17189:c10:m1"}
{"signature": "def autoscale(self):", "body": "dmin, dmax = self.datalim_to_dt()<EOL>ymin = self.base.le(dmin.year)<EOL>ymax = self.base.ge(dmax.year)<EOL>vmin = dmin.replace(year=ymin, **self.replaced)<EOL>vmax = dmax.replace(year=ymax, **self.replaced)<EOL>vmin = date2num(vmin)<EOL>vmax = date2num(vmax)<EOL>return self.nonsingular(vmin, vmax)<EOL>", "docstring": "Set the view limits to include the data range.", "id": "f17189:c8:m3"}
{"signature": "def weeks(w):", "body": "return w*<NUM_LIT><EOL>", "docstring": "Return weeks as days.", "id": "f17189:m18"}
{"signature": "def __call__(self, s):", "body": "return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:<NUM_LIT:6>]))<EOL>", "docstring": "s : string to be converted\n           return value: a date2num float", "id": "f17189:c0:m1"}
{"signature": "def __call__(self, x, pos=<NUM_LIT:0>):", "body": "ind = int(round(x))<EOL>if ind>=len(self.t) or ind<=<NUM_LIT:0>: return '<STR_LIT>'<EOL>dt = num2date(self.t[ind], self.tz)<EOL>return cbook.unicode_safe(dt.strftime(self.fmt))<EOL>", "docstring": "Return the label for time *x* at position *pos*", "id": "f17189:c2:m1"}
{"signature": "def _get_unit(self):", "body": "return <NUM_LIT:1><EOL>", "docstring": "Return how many days a unit of the locator is; used for\nintelligent autoscaling.", "id": "f17189:c5:m4"}
{"signature": "def get_fontproperties(self):", "body": "return self._fontproperties<EOL>", "docstring": "Return the :class:`~font_manager.FontProperties` object", "id": "f17190:c0:m14"}
{"signature": "def get_dashpad(self):", "body": "return self._dashpad<EOL>", "docstring": "Get the extra spacing between the dash and the text, in canvas units.", "id": "f17190:c1:m13"}
{"signature": "def set_x(self, x):", "body": "self._x = x<EOL>", "docstring": "Set the *x* position of the text\n\nACCEPTS: float", "id": "f17190:c0:m59"}
{"signature": "def set_fontstretch(self, stretch):", "body": "return self.set_stretch(stretch)<EOL>", "docstring": "alias for set_stretch", "id": "f17190:c0:m57"}
{"signature": "def set_color(self, color):", "body": "<EOL>try:<EOL><INDENT>hash(color)<EOL><DEDENT>except TypeError:<EOL><INDENT>color = tuple(color)<EOL><DEDENT>self._color = color<EOL>", "docstring": "Set the foreground color of the text\n\nACCEPTS: any matplotlib color", "id": "f17190:c0:m39"}
{"signature": "def get_window_extent(self, renderer=None):", "body": "self.update_coords(renderer)<EOL>if self.get_dashlength() == <NUM_LIT:0.0>:<EOL><INDENT>return Text.get_window_extent(self, renderer=renderer)<EOL><DEDENT>else:<EOL><INDENT>return self._twd_window_extent<EOL><DEDENT>", "docstring": "Return a :class:`~matplotlib.transforms.Bbox` object bounding\nthe text, in display units.\n\nIn addition to being used internally, this is useful for\nspecifying clickable regions in a png file on a web page.\n\n*renderer* defaults to the _renderer attribute of the text\nobject.  This is not assigned until the first execution of\n:meth:`draw`, so you must use this kwarg if you want\nto call :meth:`get_window_extent` prior to the first\n:meth:`draw`.  For getting web page regions, it is\nsimpler to call the method after saving the figure.", "id": "f17190:c1:m6"}
{"signature": "def get_dashlength(self):", "body": "return self._dashlength<EOL>", "docstring": "Get the length of the dash.", "id": "f17190:c1:m7"}
{"signature": "def set_figure(self, fig):", "body": "Text.set_figure(self, fig)<EOL>self.dashline.set_figure(fig)<EOL>", "docstring": "Set the figure instance the artist belong to.\n\nACCEPTS: a :class:`matplotlib.figure.Figure` instance", "id": "f17190:c1:m22"}
{"signature": "def get_fontstyle(self):", "body": "return self.get_style()<EOL>", "docstring": "alias for get_style", "id": "f17190:c0:m25"}
{"signature": "def set_va(self, align):", "body": "self.set_verticalalignment(align)<EOL>", "docstring": "alias for set_verticalalignment", "id": "f17190:c0:m62"}
{"signature": "def get_stretch(self):", "body": "return self._fontproperties.get_stretch()<EOL>", "docstring": "Get the font stretch as a string or number", "id": "f17190:c0:m28"}
{"signature": "def set_linespacing(self, spacing):", "body": "self._linespacing = spacing<EOL>", "docstring": "Set the line spacing as a multiple of the font size.\nDefault is 1.2.\n\nACCEPTS: float (multiple of font size)", "id": "f17190:c0:m44"}
{"signature": "def set_backgroundcolor(self, color):", "body": "if self._bbox is None:<EOL><INDENT>self._bbox = dict(facecolor=color, edgecolor=color)<EOL><DEDENT>else:<EOL><INDENT>self._bbox.update(dict(facecolor=color))<EOL><DEDENT>", "docstring": "Set the background color of the text by updating the bbox.\n\n.. seealso::\n    :meth:`set_bbox`\n\nACCEPTS: any matplotlib color", "id": "f17190:c0:m38"}
{"signature": "def set_bbox(self, rectprops):", "body": "<EOL>if rectprops is not None and \"<STR_LIT>\" in rectprops:<EOL><INDENT>props = rectprops.copy()<EOL>boxstyle = props.pop(\"<STR_LIT>\")<EOL>bbox_transmuter = props.pop(\"<STR_LIT>\", None)<EOL>self._bbox_patch = FancyBboxPatch((<NUM_LIT:0.>, <NUM_LIT:0.>),<EOL><NUM_LIT:1.>, <NUM_LIT:1.>,<EOL>boxstyle=boxstyle,<EOL>bbox_transmuter=bbox_transmuter,<EOL>transform=mtransforms.IdentityTransform(),<EOL>**props)<EOL>self._bbox = None<EOL><DEDENT>else:<EOL><INDENT>self._bbox_patch = None<EOL>self._bbox = rectprops<EOL><DEDENT>", "docstring": "Draw a bounding box around self.  rectprops are any settable\nproperties for a rectangle, eg facecolor='red', alpha=0.5.\n\n  t.set_bbox(dict(facecolor='red', alpha=0.5))\n\nIf rectprops has \"boxstyle\" key. A FancyBboxPatch\nis initialized with rectprops and will be drawn. The mutation\nscale of the FancyBboxPath is set to the fontsize.\n\nACCEPTS: rectangle prop dict", "id": "f17190:c0:m8"}
{"signature": "def set_ma(self, align):", "body": "self.set_multialignment(align)<EOL>", "docstring": "alias for set_verticalalignment", "id": "f17190:c0:m42"}
{"signature": "def set_multialignment(self, align):", "body": "legal = ('<STR_LIT>', '<STR_LIT:right>', '<STR_LIT:left>')<EOL>if align not in legal:<EOL><INDENT>raise ValueError('<STR_LIT>' % str(legal))<EOL><DEDENT>self._multialignment = align<EOL>", "docstring": "Set the alignment for multiple lines layout.  The layout of the\nbounding box of all the lines is determined bu the horizontalalignment\nand verticalalignment properties, but the multiline text within that\nbox can be\n\nACCEPTS: ['left' | 'right' | 'center' ]", "id": "f17190:c0:m43"}
{"signature": "def update_from(self, other):", "body": "Artist.update_from(self, other)<EOL>self._color = other._color<EOL>self._multialignment = other._multialignment<EOL>self._verticalalignment = other._verticalalignment<EOL>self._horizontalalignment = other._horizontalalignment<EOL>self._fontproperties = other._fontproperties.copy()<EOL>self._rotation = other._rotation<EOL>self._picker = other._picker<EOL>self._linespacing = other._linespacing<EOL>", "docstring": "Copy properties from other to self", "id": "f17190:c0:m6"}
{"signature": "def _get_xy_display(self):", "body": "x, y = self.get_position()<EOL>return self.get_transform().transform_point((x,y))<EOL>", "docstring": "get the (possibly unit converted) transformed x, y in display coords", "id": "f17190:c0:m3"}
{"signature": "def set_fontsize(self, fontsize):", "body": "return self.set_size(fontsize)<EOL>", "docstring": "alias for set_size", "id": "f17190:c0:m53"}
{"signature": "def set_rotation(self, s):", "body": "self._rotation = s<EOL>", "docstring": "Set the rotation of the text\n\nACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]", "id": "f17190:c0:m61"}
{"signature": "def set_weight(self, weight):", "body": "self._fontproperties.set_weight(weight)<EOL>", "docstring": "Set the font weight.\n\nACCEPTS: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]", "id": "f17190:c0:m54"}
{"signature": "def get_fontweight(self):", "body": "return self.get_weight()<EOL>", "docstring": "alias for get_weight", "id": "f17190:c0:m27"}
{"signature": "def get_rotation(rotation):", "body": "if rotation in ('<STR_LIT>', None):<EOL><INDENT>angle = <NUM_LIT:0.><EOL><DEDENT>elif rotation == '<STR_LIT>':<EOL><INDENT>angle = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>angle = float(rotation)<EOL><DEDENT>return angle%<NUM_LIT><EOL>", "docstring": "Return the text angle as float.\n\n*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.", "id": "f17190:m1"}
{"signature": "def set_size(self, fontsize):", "body": "self._fontproperties.set_size(fontsize)<EOL>", "docstring": "Set the font size.  May be either a size string, relative to\nthe default font size, or an absolute font size in points.\n\nACCEPTS: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]", "id": "f17190:c0:m52"}
{"signature": "def get_fontfamily(self):", "body": "return self.get_family()<EOL>", "docstring": "alias for get_family", "id": "f17190:c0:m17"}
{"signature": "def get_text(self):", "body": "return self._text<EOL>", "docstring": "Get the text as string", "id": "f17190:c0:m34"}
{"signature": "def set_y(self, y):", "body": "self._dashy = float(y)<EOL>", "docstring": "Set the *y* position of the :class:`TextWithDash`.\n\nACCEPTS: float", "id": "f17190:c1:m19"}
{"signature": "def get_dashpush(self):", "body": "return self._dashpush<EOL>", "docstring": "Get the extra spacing between the dash and the specified text\nposition, in canvas units.", "id": "f17190:c1:m15"}
{"signature": "def set_fontweight(self, weight):", "body": "return self.set_weight(weight)<EOL>", "docstring": "alias for set_weight", "id": "f17190:c0:m55"}
{"signature": "def set_y(self, y):", "body": "self._y = y<EOL>", "docstring": "Set the *y* position of the text\n\nACCEPTS: float", "id": "f17190:c0:m60"}
{"signature": "def get_variant(self):", "body": "return self._fontproperties.get_variant()<EOL>", "docstring": "Return the font variant as a string", "id": "f17190:c0:m21"}
{"signature": "def get_fontname(self):", "body": "return self.get_name()<EOL>", "docstring": "alias for get_name", "id": "f17190:c0:m24"}
{"signature": "def draw(self, renderer):", "body": "if renderer is not None:<EOL><INDENT>self._renderer = renderer<EOL><DEDENT>if not self.get_visible(): return<EOL>if self._text=='<STR_LIT>': return<EOL>bbox, info = self._get_layout(renderer)<EOL>trans = self.get_transform()<EOL>posx = float(self.convert_xunits(self._x))<EOL>posy = float(self.convert_yunits(self._y))<EOL>posx, posy = trans.transform_point((posx, posy))<EOL>canvasw, canvash = renderer.get_canvas_width_height()<EOL>if self._bbox_patch:<EOL><INDENT>self._draw_bbox(renderer, posx, posy)<EOL><DEDENT>gc = renderer.new_gc()<EOL>gc.set_foreground(self._color)<EOL>gc.set_alpha(self._alpha)<EOL>gc.set_url(self._url)<EOL>if self.get_clip_on():<EOL><INDENT>gc.set_clip_rectangle(self.clipbox)<EOL><DEDENT>if self._bbox:<EOL><INDENT>bbox_artist(self, renderer, self._bbox)<EOL><DEDENT>angle = self.get_rotation()<EOL>if rcParams['<STR_LIT>']:<EOL><INDENT>for line, wh, x, y in info:<EOL><INDENT>x = x + posx<EOL>y = y + posy<EOL>if renderer.flipy():<EOL><INDENT>y = canvash-y<EOL><DEDENT>clean_line, ismath = self.is_math_text(line)<EOL>renderer.draw_tex(gc, x, y, clean_line,<EOL>self._fontproperties, angle)<EOL><DEDENT>return<EOL><DEDENT>for line, wh, x, y in info:<EOL><INDENT>x = x + posx<EOL>y = y + posy<EOL>if renderer.flipy():<EOL><INDENT>y = canvash-y<EOL><DEDENT>clean_line, ismath = self.is_math_text(line)<EOL>renderer.draw_text(gc, x, y, clean_line,<EOL>self._fontproperties, angle,<EOL>ismath=ismath)<EOL><DEDENT>", "docstring": "Draws the :class:`Text` object to the given *renderer*.", "id": "f17190:c0:m12"}
{"signature": "def get_window_extent(self, renderer=None, dpi=None):", "body": "<EOL>if not self.get_visible(): return Bbox.unit()<EOL>if dpi is not None:<EOL><INDENT>dpi_orig = self.figure.dpi<EOL>self.figure.dpi = dpi<EOL><DEDENT>if self._text == '<STR_LIT>':<EOL><INDENT>tx, ty = self._get_xy_display()<EOL>return Bbox.from_bounds(tx,ty,<NUM_LIT:0>,<NUM_LIT:0>)<EOL><DEDENT>if renderer is not None:<EOL><INDENT>self._renderer = renderer<EOL><DEDENT>if self._renderer is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>bbox, info = self._get_layout(self._renderer)<EOL>x, y = self.get_position()<EOL>x, y = self.get_transform().transform_point((x, y))<EOL>bbox = bbox.translated(x, y)<EOL>if dpi is not None:<EOL><INDENT>self.figure.dpi = dpi_orig<EOL><DEDENT>return bbox<EOL>", "docstring": "Return a :class:`~matplotlib.transforms.Bbox` object bounding\nthe text, in display units.\n\nIn addition to being used internally, this is useful for\nspecifying clickable regions in a png file on a web page.\n\n*renderer* defaults to the _renderer attribute of the text\nobject.  This is not assigned until the first execution of\n:meth:`draw`, so you must use this kwarg if you want\nto call :meth:`get_window_extent` prior to the first\n:meth:`draw`.  For getting web page regions, it is\nsimpler to call the method after saving the figure.\n\n*dpi* defaults to self.figure.dpi; the renderer dpi is\nirrelevant.  For the web application, if figure.dpi is not\nthe value used when saving the figure, then the value that\nwas used must be specified as the *dpi* argument.", "id": "f17190:c0:m37"}
{"signature": "def get_position(self):", "body": "x = float(self.convert_xunits(self._dashx))<EOL>y = float(self.convert_yunits(self._dashy))<EOL>return x, y<EOL>", "docstring": "Return the position of the text as a tuple (*x*, *y*)", "id": "f17190:c1:m2"}
{"signature": "def set_font_properties(self, fp):", "body": "self.set_fontproperties(fp)<EOL>", "docstring": "alias for set_fontproperties", "id": "f17190:c0:m67"}
{"signature": "def set_dashpush(self, dp):", "body": "self._dashpush = dp<EOL>", "docstring": "Set the \"push\" of the TextWithDash, which\nis the extra spacing between the beginning\nof the dash and the specified position.\n\nACCEPTS: float (canvas units)", "id": "f17190:c1:m16"}
{"signature": "def get_capheight(self):", "body": "return self._header['<STR_LIT>']<EOL>", "docstring": "Return the cap height as float", "id": "f17191:c0:m16"}
{"signature": "def __init__(self, fh):", "body": "(dhead, dcmetrics_ascii, dcmetrics_name, dkernpairs, dcomposite) =parse_afm(fh)<EOL>self._header = dhead<EOL>self._kern = dkernpairs<EOL>self._metrics = dcmetrics_ascii<EOL>self._metrics_by_name = dcmetrics_name<EOL>self._composite = dcomposite<EOL>", "docstring": "Parse the AFM file in file object *fh*", "id": "f17191:c0:m0"}
{"signature": "def get_name_char(self, c, isord=False):", "body": "if not isord: c=ord(c)<EOL>wx, name, bbox = self._metrics[c]<EOL>return name<EOL>", "docstring": "Get the name of the character, ie, ';' is 'semicolon'", "id": "f17191:c0:m5"}
{"signature": "def get_fullname(self):", "body": "name = self._header.get('<STR_LIT>')<EOL>if name is None: <EOL><INDENT>name = self._header['<STR_LIT>']<EOL><DEDENT>return name<EOL>", "docstring": "Return the font full name, eg, 'Times-Roman", "id": "f17191:c0:m12"}
{"signature": "def get_str_bbox(self, s):", "body": "return self.get_str_bbox_and_descent(s)[:<NUM_LIT:4>]<EOL>", "docstring": "Return the string bounding box", "id": "f17191:c0:m4"}
{"signature": "def _parse_kern_pairs(fh):", "body": "line = fh.readline()<EOL>if not line.startswith('<STR_LIT>'):<EOL><INDENT>raise RuntimeError('<STR_LIT>'%line)<EOL><DEDENT>d = {}<EOL>while <NUM_LIT:1>:<EOL><INDENT>line = fh.readline()<EOL>if not line: break<EOL>line = line.rstrip()<EOL>if len(line)==<NUM_LIT:0>: continue<EOL>if line.startswith('<STR_LIT>'):<EOL><INDENT>fh.readline()  <EOL>return d<EOL><DEDENT>vals = line.split()<EOL>if len(vals)!=<NUM_LIT:4> or vals[<NUM_LIT:0>]!='<STR_LIT>':<EOL><INDENT>raise RuntimeError('<STR_LIT>'%line)<EOL><DEDENT>c1, c2, val = vals[<NUM_LIT:1>], vals[<NUM_LIT:2>], _to_float(vals[<NUM_LIT:3>])<EOL>d[(c1,c2)] = val<EOL><DEDENT>raise RuntimeError('<STR_LIT>')<EOL>", "docstring": "Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and\nvalues are the kern pair value.  For example, a kern pairs line like\n``KPX A y -50``\n\nwill be represented as::\n\n  d[ ('A', 'y') ] = -50", "id": "f17191:m6"}
{"signature": "def get_vertical_stem_width(self):", "body": "return self._header.get('<STR_LIT>', None)<EOL>", "docstring": "Return the standard vertical stem width as float, or *None* if\nnot specified in AFM file.", "id": "f17191:c0:m20"}
{"signature": "def get_familyname(self):", "body": "name = self._header.get('<STR_LIT>')<EOL>if name is not None:<EOL><INDENT>return name<EOL><DEDENT>name = self.get_fullname()<EOL>extras = r'<STR_LIT>'<EOL>return re.sub(extras, '<STR_LIT>', name)<EOL>", "docstring": "Return the font family name, eg, 'Times", "id": "f17191:c0:m13"}
{"signature": "def _sanity_check(fh):", "body": "<EOL>pos = fh.tell()<EOL>try:<EOL><INDENT>line = fh.readline()<EOL><DEDENT>finally:<EOL><INDENT>fh.seek(pos, <NUM_LIT:0>)<EOL><DEDENT>if not line.startswith('<STR_LIT>'):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Check if the file at least looks like AFM.\nIf not, raise :exc:`RuntimeError`.", "id": "f17191:m3"}
{"signature": "def get_width_char(self, c, isord=False):", "body": "if not isord: c=ord(c)<EOL>wx, name, bbox = self._metrics[c]<EOL>return wx<EOL>", "docstring": "Get the width of the character from the character metric WX\nfield", "id": "f17191:c0:m6"}
{"signature": "def get_xheight(self):", "body": "return self._header['<STR_LIT>']<EOL>", "docstring": "Return the xheight as float", "id": "f17191:c0:m17"}
{"signature": "def get_angle(self):", "body": "return self._header['<STR_LIT>']<EOL>", "docstring": "Return the fontangle as float", "id": "f17191:c0:m15"}
{"signature": "def get_width_from_char_name(self, name):", "body": "wx, bbox = self._metrics_by_name[name]<EOL>return wx<EOL>", "docstring": "Get the width of the character from a type1 character name", "id": "f17191:c0:m7"}
{"signature": "def string_width_height(self, s):", "body": "if not len(s): return <NUM_LIT:0>,<NUM_LIT:0><EOL>totalw = <NUM_LIT:0><EOL>namelast = None<EOL>miny = <NUM_LIT><EOL>maxy = <NUM_LIT:0><EOL>for c in s:<EOL><INDENT>if c == '<STR_LIT:\\n>': continue<EOL>wx, name, bbox = self._metrics[ord(c)]<EOL>l,b,w,h = bbox<EOL>try: kp = self._kern[ (namelast, name) ]<EOL>except KeyError: kp = <NUM_LIT:0><EOL>totalw += wx + kp<EOL>thismax = b+h<EOL>if thismax>maxy: maxy = thismax<EOL>thismin = b<EOL>if thismin<miny: miny = thismin<EOL><DEDENT>return totalw, maxy-miny<EOL>", "docstring": "Return the string width (including kerning) and string height\nas a (*w*, *h*) tuple.", "id": "f17191:c0:m2"}
{"signature": "def get_fontname(self):", "body": "return self._header['<STR_LIT>']<EOL>", "docstring": "Return the font name, eg, 'Times-Roman", "id": "f17191:c0:m11"}
{"signature": "def _parse_optional(fh):", "body": "optional = {<EOL>'<STR_LIT>' : _parse_kern_pairs,<EOL>'<STR_LIT>' :  _parse_composites,<EOL>}<EOL>d = {'<STR_LIT>':{}, '<STR_LIT>':{}}<EOL>while <NUM_LIT:1>:<EOL><INDENT>line = fh.readline()<EOL>if not line: break<EOL>line = line.rstrip()<EOL>if len(line)==<NUM_LIT:0>: continue<EOL>key = line.split()[<NUM_LIT:0>]<EOL>if key in optional: d[key] = optional[key](fh)<EOL><DEDENT>l = ( d['<STR_LIT>'], d['<STR_LIT>'] )<EOL>return l<EOL>", "docstring": "Parse the optional fields for kern pair data and composites\n\nreturn value is a (*kernDict*, *compositeDict*) which are the\nreturn values from :func:`_parse_kern_pairs`, and\n:func:`_parse_composites` if the data exists, or empty dicts\notherwise", "id": "f17191:m8"}
{"signature": "def find_nearest_contour( self, x, y, indices=None, pixel=True ):", "body": "<EOL>if indices==None:<EOL><INDENT>indices = list(range(len(self.levels)))<EOL><DEDENT>dmin = <NUM_LIT><EOL>conmin = None<EOL>segmin = None<EOL>xmin = None<EOL>ymin = None<EOL>for icon in indices:<EOL><INDENT>con = self.collections[icon]<EOL>paths = con.get_paths()<EOL>for segNum, linepath in enumerate(paths):<EOL><INDENT>lc = linepath.vertices<EOL>if pixel:<EOL><INDENT>lc = self.ax.transData.transform(lc)<EOL><DEDENT>ds = (lc[:,<NUM_LIT:0>]-x)**<NUM_LIT:2> + (lc[:,<NUM_LIT:1>]-y)**<NUM_LIT:2><EOL>d = min( ds )<EOL>if d < dmin:<EOL><INDENT>dmin = d<EOL>conmin = icon<EOL>segmin = segNum<EOL>imin = mpl.mlab.find( ds == d )[<NUM_LIT:0>]<EOL>xmin = lc[imin,<NUM_LIT:0>]<EOL>ymin = lc[imin,<NUM_LIT:1>]<EOL><DEDENT><DEDENT><DEDENT>return (conmin,segmin,imin,xmin,ymin,dmin)<EOL>", "docstring": "Finds contour that is closest to a point.  Defaults to\nmeasuring distance in pixels (screen space - useful for manual\ncontour labeling), but this can be controlled via a keyword\nargument.\n\nReturns a tuple containing the contour, segment, index of\nsegment, x & y of segment point and distance to minimum point.\n\nCall signature::\n\n  conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(\n             self, x, y, indices=None, pixel=True )\n\nOptional keyword arguments::\n\n*indices*:\n   Indexes of contour levels to consider when looking for\n   nearest point.  Defaults to using all levels.\n\n*pixel*:\n   If *True*, measure distance in pixel space, if not, measure\n   distance in axes space.  Defaults to *True*.", "id": "f17192:c1:m11"}
{"signature": "def get_text(self, lev, fmt):", "body": "if cbook.is_string_like(lev):<EOL><INDENT>return lev<EOL><DEDENT>else:<EOL><INDENT>if isinstance(fmt,dict):<EOL><INDENT>return fmt[lev]<EOL><DEDENT>else:<EOL><INDENT>return fmt%lev<EOL><DEDENT><DEDENT>", "docstring": "get the text of the label", "id": "f17192:c0:m7"}
{"signature": "def get_label_width(self, lev, fmt, fsize):", "body": "if cbook.is_string_like(lev):<EOL><INDENT>lw = (len(lev)) * fsize<EOL><DEDENT>else:<EOL><INDENT>lw = (len(self.get_text(lev,fmt))) * fsize<EOL><DEDENT>return lw<EOL>", "docstring": "get the width of the label in points", "id": "f17192:c0:m4"}
{"signature": "def _process_colors(self):", "body": "self.monochrome = self.cmap.monochrome<EOL>if self.colors is not None:<EOL><INDENT>i0, i1 = <NUM_LIT:0>, len(self.layers)<EOL>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>i0 = -<NUM_LIT:1><EOL><DEDENT>if self.extend in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>i1 = i1 + <NUM_LIT:1><EOL><DEDENT>self.cvalues = list(range(i0, i1))<EOL>self.set_norm(colors.NoNorm())<EOL><DEDENT>else:<EOL><INDENT>self.cvalues = self.layers<EOL><DEDENT>if not self.norm.scaled():<EOL><INDENT>self.set_clim(self.vmin, self.vmax)<EOL><DEDENT>if self.extend in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.norm.clip = False<EOL><DEDENT>self.set_array(self.layers)<EOL>", "docstring": "Color argument processing for contouring.\n\nNote that we base the color mapping on the contour levels,\nnot on the actual range of the Z values.  This means we\ndon't have to worry about bad values in Z, and we always have\nthe full dynamic range available for the selected levels.\n\nThe color is based on the midpoint of the layer, except for\nan extended end layers.", "id": "f17192:c1:m6"}
{"signature": "def _check_xyz(self, args):", "body": "<EOL>x = self.ax.convert_xunits( args[<NUM_LIT:0>] )<EOL>y = self.ax.convert_yunits( args[<NUM_LIT:1>] )<EOL>x = np.asarray(x, dtype=np.float64)<EOL>y = np.asarray(y, dtype=np.float64)<EOL>z = ma.asarray(args[<NUM_LIT:2>], dtype=np.float64)<EOL>if z.ndim != <NUM_LIT:2>:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>else: Ny, Nx = z.shape<EOL>if x.shape == z.shape and y.shape == z.shape:<EOL><INDENT>return x,y,z<EOL><DEDENT>if x.ndim != <NUM_LIT:1> or y.ndim != <NUM_LIT:1>:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>nx, = x.shape<EOL>ny, = y.shape<EOL>if nx != Nx or ny != Ny:<EOL><INDENT>raise TypeError(\"<STR_LIT>\" +<EOL>\"<STR_LIT>\")<EOL><DEDENT>x,y = np.meshgrid(x,y)<EOL>return x,y,z<EOL>", "docstring": "For functions like contour, check that the dimensions\nof the input arrays match; if x and y are 1D, convert\nthem to 2D using meshgrid.\n\nPossible change: I think we should make and use an ArgumentError\nException class (here and elsewhere).", "id": "f17192:c1:m4"}
{"signature": "def pop_label(self,index=-<NUM_LIT:1>):", "body": "self.labelCValues.pop(index)<EOL>t = self.labelTexts.pop(index)<EOL>t.remove()<EOL>", "docstring": "Defaults to removing last label, but any index can be supplied", "id": "f17192:c0:m11"}
{"signature": "def print_label(self, linecontour,labelwidth):", "body": "lcsize = len(linecontour)<EOL>if lcsize > <NUM_LIT:10> * labelwidth:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>xmax = np.amax(linecontour[:,<NUM_LIT:0>])<EOL>xmin = np.amin(linecontour[:,<NUM_LIT:0>])<EOL>ymax = np.amax(linecontour[:,<NUM_LIT:1>])<EOL>ymin = np.amin(linecontour[:,<NUM_LIT:1>])<EOL>lw = labelwidth<EOL>if (xmax - xmin) > <NUM_LIT>* lw or (ymax - ymin) > <NUM_LIT> * lw:<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>", "docstring": "if contours are too short, don't plot a label", "id": "f17192:c0:m1"}
{"signature": "def get_real_label_width( self, lev, fmt, fsize ):", "body": "<EOL>xx = np.mean( np.asarray(self.ax.axis()).reshape(<NUM_LIT:2>,<NUM_LIT:2>), axis=<NUM_LIT:1> )<EOL>t = text.Text( xx[<NUM_LIT:0>], xx[<NUM_LIT:1>] )<EOL>self.set_label_props( t, self.get_text(lev,fmt), '<STR_LIT:k>' )<EOL>bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)<EOL>lw = np.diff(bbox.corners()[<NUM_LIT:0>::<NUM_LIT:2>,<NUM_LIT:0>])[<NUM_LIT:0>]<EOL>return lw<EOL>", "docstring": "This computes actual onscreen label width.\nThis uses some black magic to determine onscreen extent of non-drawn\nlabel.  This magic may not be very robust.", "id": "f17192:c0:m5"}
{"signature": "def set_label_props(self, label, text, color):", "body": "label.set_text(text)<EOL>label.set_color(color)<EOL>label.set_fontproperties(self.labelFontProps)<EOL>label.set_clip_box(self.ax.bbox)<EOL>", "docstring": "set the label properties - color, fontsize, text", "id": "f17192:c0:m6"}
{"signature": "def _autolev(self, z, N):", "body": "if self.locator is None:<EOL><INDENT>if self.logscale:<EOL><INDENT>self.locator = ticker.LogLocator()<EOL><DEDENT>else:<EOL><INDENT>self.locator = ticker.MaxNLocator(N+<NUM_LIT:1>)<EOL><DEDENT><DEDENT>self.locator.create_dummy_axis()<EOL>zmax = self.zmax<EOL>zmin = self.zmin<EOL>self.locator.set_bounds(zmin, zmax)<EOL>lev = self.locator()<EOL>zmargin = (zmax - zmin) * <NUM_LIT> <EOL>if zmax >= lev[-<NUM_LIT:1>]:<EOL><INDENT>lev[-<NUM_LIT:1>] += zmargin<EOL><DEDENT>if zmin <= lev[<NUM_LIT:0>]:<EOL><INDENT>if self.logscale:<EOL><INDENT>lev[<NUM_LIT:0>] = <NUM_LIT> * zmin<EOL><DEDENT>else:<EOL><INDENT>lev[<NUM_LIT:0>] -= zmargin<EOL><DEDENT><DEDENT>self._auto = True<EOL>if self.filled:<EOL><INDENT>return lev<EOL><DEDENT>return lev[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>", "docstring": "Select contour levels to span the data.\n\nWe need two more levels for filled contours than for\nline contours, because for the latter we need to specify\nthe lower and upper boundary of each range. For example,\na single contour boundary, say at z = 0, requires only\none contour line, but two filled regions, and therefore\nthree levels to provide boundaries for both regions.", "id": "f17192:c1:m2"}
{"signature": "def get_label_coords(self, distances, XX, YY, ysize, lw):", "body": "hysize = int(ysize/<NUM_LIT:2>)<EOL>adist = np.argsort(distances)<EOL>for ind in adist:<EOL><INDENT>x, y = XX[ind][hysize], YY[ind][hysize]<EOL>if self.too_close(x,y, lw):<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>return x,y, ind<EOL><DEDENT><DEDENT>ind = adist[<NUM_LIT:0>]<EOL>x, y = XX[ind][hysize], YY[ind][hysize]<EOL>return x,y, ind<EOL>", "docstring": "labels are ploted at a location with the smallest\n        dispersion of the contour from a straight line\n        unless there's another label nearby, in which case\n        the second best place on the contour is picked up\n        if there's no good place a label isplotted at the\n        beginning of the contour", "id": "f17192:c0:m3"}
{"signature": "def get_state(self):", "body": "return self._state_stack[-<NUM_LIT:1>]<EOL>", "docstring": "Get the current :class:`State` of the parser.", "id": "f17193:c44:m3"}
{"signature": "def render(self, x, y):", "body": "self.font_output.render_glyph(<EOL>x, y,<EOL>self.font, self.font_class, self.c, self.fontsize, self.dpi)<EOL>", "docstring": "Render the character to the canvas", "id": "f17193:c20:m5"}
{"signature": "def get_underline_thickness(self, font, fontsize, dpi):", "body": "raise NotImplementedError()<EOL>", "docstring": "Get the line thickness that matches the given font.  Used as a\nbase unit for drawing lines such as in a fraction or radical.", "id": "f17193:c8:m8"}
{"signature": "def set_canvas_size(self, w, h, d):", "body": "self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)<EOL>self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)<EOL>", "docstring": "Set the size of the buffer used to render the math expression.\nOnly really necessary for the bitmap backends.", "id": "f17193:c8:m4"}
{"signature": "def get_xheight(self, font, fontsize, dpi):", "body": "raise NotImplementedError()<EOL>", "docstring": "Get the xheight for the given *font* and *fontsize*.", "id": "f17193:c8:m7"}
{"signature": "def to_rgba(self, texstr, color='<STR_LIT>', dpi=<NUM_LIT>, fontsize=<NUM_LIT>):", "body": "x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)<EOL>r, g, b = mcolors.colorConverter.to_rgb(color)<EOL>RGBA = np.zeros((x.shape[<NUM_LIT:0>], x.shape[<NUM_LIT:1>], <NUM_LIT:4>), dtype=np.uint8)<EOL>RGBA[:,:,<NUM_LIT:0>] = int(<NUM_LIT:255>*r)<EOL>RGBA[:,:,<NUM_LIT:1>] = int(<NUM_LIT:255>*g)<EOL>RGBA[:,:,<NUM_LIT:2>] = int(<NUM_LIT:255>*b)<EOL>RGBA[:,:,<NUM_LIT:3>] = x<EOL>return RGBA, depth<EOL>", "docstring": "*texstr*\n    A valid mathtext string, eg r'IQ: $\\sigma_i=15$'\n\n*color*\n    Any matplotlib color argument\n\n*dpi*\n    The dots-per-inch to render the text\n\n*fontsize*\n    The font size in points\n\nReturns a tuple (*array*, *depth*)\n\n  - *array* is an NxM uint8 alpha ubyte mask array of\n    rasterized tex.\n\n  - depth is the offset of the baseline from the bottom of the\n    image in pixels.", "id": "f17193:c45:m3"}
{"signature": "def get_kern(self, font1, fontclass1, sym1, fontsize1,<EOL>font2, fontclass2, sym2, fontsize2, dpi):", "body": "return <NUM_LIT:0.><EOL>", "docstring": "Get the kerning distance for font between *sym1* and *sym2*.\n\n*fontX*: one of the TeX font names::\n\n  tt, it, rm, cal, sf, bf or default (non-math)\n\n*fontclassX*: TODO\n\n*symX*: a symbol in raw TeX form. e.g. '1', 'x' or '\\sigma'\n\n*fontsizeX*: the fontsize in points\n\n*dpi*: the current dots-per-inch", "id": "f17193:c8:m2"}
{"signature": "def parse(self, s, dpi = <NUM_LIT>, prop = None):", "body": "if prop is None:<EOL><INDENT>prop = FontProperties()<EOL><DEDENT>cacheKey = (s, dpi, hash(prop))<EOL>result = self._cache.get(cacheKey)<EOL>if result is not None:<EOL><INDENT>return result<EOL><DEDENT>if self._output == '<STR_LIT>' and rcParams['<STR_LIT>']:<EOL><INDENT>font_output = StandardPsFonts(prop)<EOL><DEDENT>else:<EOL><INDENT>backend = self._backend_mapping[self._output]()<EOL>fontset = rcParams['<STR_LIT>']<EOL>fontset_class = self._font_type_mapping.get(fontset.lower())<EOL>if fontset_class is not None:<EOL><INDENT>font_output = fontset_class(prop, backend)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>fontsize = prop.get_size_in_points()<EOL>if self._parser is None:<EOL><INDENT>self.__class__._parser = Parser()<EOL><DEDENT>box = self._parser.parse(s, font_output, fontsize, dpi)<EOL>font_output.set_canvas_size(box.width, box.height, box.depth)<EOL>result = font_output.get_results(box)<EOL>self._cache[cacheKey] = result<EOL>self._parser.clear()<EOL>font_output.destroy()<EOL>font_output.mathtext_backend.fonts_object = None<EOL>font_output.mathtext_backend = None<EOL>return result<EOL>", "docstring": "Parse the given math expression *s* at the given *dpi*.  If\n*prop* is provided, it is a\n:class:`~matplotlib.font_manager.FontProperties` object\nspecifying the \"default\" font to use in the math expression,\nused for all non-math text.\n\nThe results are cached, so multiple calls to :meth:`parse`\nwith the same expression should be fast.", "id": "f17193:c45:m1"}
{"signature": "def grow(self):", "body": "self.size -= <NUM_LIT:1><EOL>", "docstring": "Grows one level larger.  There is no limit to how big\nsomething can get.", "id": "f17193:c16:m5"}
{"signature": "def get_used_characters(self):", "body": "return self.used_characters<EOL>", "docstring": "Get the set of characters that were used in the math\nexpression.  Used by backends that need to subset fonts so\nthey know which glyphs to include.", "id": "f17193:c8:m9"}
{"signature": "def hpack(self, w=<NUM_LIT:0.>, m='<STR_LIT>'):", "body": "<EOL>h = <NUM_LIT:0.><EOL>d = <NUM_LIT:0.><EOL>x = <NUM_LIT:0.><EOL>total_stretch = [<NUM_LIT:0.>] * <NUM_LIT:4><EOL>total_shrink = [<NUM_LIT:0.>] * <NUM_LIT:4><EOL>for p in self.children:<EOL><INDENT>if isinstance(p, Char):<EOL><INDENT>x += p.width<EOL>h = max(h, p.height)<EOL>d = max(d, p.depth)<EOL><DEDENT>elif isinstance(p, Box):<EOL><INDENT>x += p.width<EOL>if not isinf(p.height) and not isinf(p.depth):<EOL><INDENT>s = getattr(p, '<STR_LIT>', <NUM_LIT:0.>)<EOL>h = max(h, p.height - s)<EOL>d = max(d, p.depth + s)<EOL><DEDENT><DEDENT>elif isinstance(p, Glue):<EOL><INDENT>glue_spec = p.glue_spec<EOL>x += glue_spec.width<EOL>total_stretch[glue_spec.stretch_order] += glue_spec.stretch<EOL>total_shrink[glue_spec.shrink_order] += glue_spec.shrink<EOL><DEDENT>elif isinstance(p, Kern):<EOL><INDENT>x += p.width<EOL><DEDENT><DEDENT>self.height = h<EOL>self.depth = d<EOL>if m == '<STR_LIT>':<EOL><INDENT>w += x<EOL><DEDENT>self.width = w<EOL>x = w - x<EOL>if x == <NUM_LIT:0.>:<EOL><INDENT>self.glue_sign = <NUM_LIT:0><EOL>self.glue_order = <NUM_LIT:0><EOL>self.glue_ratio = <NUM_LIT:0.><EOL>return<EOL><DEDENT>if x > <NUM_LIT:0.>:<EOL><INDENT>self._set_glue(x, <NUM_LIT:1>, total_stretch, \"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>self._set_glue(x, -<NUM_LIT:1>, total_shrink, \"<STR_LIT>\")<EOL><DEDENT>", "docstring": "The main duty of :meth:`hpack` is to compute the dimensions of\nthe resulting boxes, and to adjust the glue if one of those\ndimensions is pre-specified.  The computed sizes normally\nenclose all of the material inside the new box; but some items\nmay stick out if negative glue is used, if the box is\noverfull, or if a ``\\\\vbox`` includes other boxes that have\nbeen shifted left.\n\n  - *w*: specifies a width\n\n  - *m*: is either 'exactly' or 'additional'.\n\nThus, ``hpack(w, 'exactly')`` produces a box whose width is\nexactly *w*, while ``hpack(w, 'additional')`` yields a box\nwhose width is the natural width plus *w*.  The default values\nproduce a box with the natural width.", "id": "f17193:c23:m2"}
{"signature": "def render_glyph(self, ox, oy, info):", "body": "raise NotImplementedError()<EOL>", "docstring": "Draw a glyph described by *info* to the reference point (*ox*,\n*oy*).", "id": "f17193:c0:m2"}
{"signature": "def get_results(self, box):", "body": "raise NotImplementedError()<EOL>", "docstring": "Return a backend-specific tuple to return to the backend after\nall processing is done.", "id": "f17193:c0:m4"}
{"signature": "def to_mask(self, texstr, dpi=<NUM_LIT>, fontsize=<NUM_LIT>):", "body": "assert(self._output==\"<STR_LIT>\")<EOL>prop = FontProperties(size=fontsize)<EOL>ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)<EOL>x = ftimage.as_array()<EOL>return x, depth<EOL>", "docstring": "*texstr*\n    A valid mathtext string, eg r'IQ: $\\sigma_i=15$'\n\n*dpi*\n    The dots-per-inch to render the text\n\n*fontsize*\n    The font size in points\n\nReturns a tuple (*array*, *depth*)\n\n  - *array* is an NxM uint8 alpha ubyte mask array of\n    rasterized tex.\n\n  - depth is the offset of the baseline from the bottom of the\n    image in pixels.", "id": "f17193:c45:m2"}
{"signature": "def __init__(self, output):", "body": "self._output = output.lower()<EOL>self._cache = maxdict(<NUM_LIT:50>)<EOL>", "docstring": "Create a MathTextParser for the given backend *output*.", "id": "f17193:c45:m0"}
{"signature": "def parse(self, s, fonts_object, fontsize, dpi):", "body": "self._state_stack = [self.State(fonts_object, '<STR_LIT:default>', '<STR_LIT>', fontsize, dpi)]<EOL>try:<EOL><INDENT>self._expression.parseString(s)<EOL><DEDENT>except ParseException as err:<EOL><INDENT>raise ValueError(\"<STR_LIT:\\n>\".join([<EOL>\"<STR_LIT>\",<EOL>err.line,<EOL>\"<STR_LIT:U+0020>\" * (err.column - <NUM_LIT:1>) + \"<STR_LIT>\",<EOL>str(err)]))<EOL><DEDENT>return self._expr<EOL>", "docstring": "Parse expression *s* using the given *fonts_object* for\noutput, at the given *fontsize* and *dpi*.\n\nReturns the parse tree of :class:`Node` instances.", "id": "f17193:c44:m2"}
{"signature": "def get_sized_alternatives_for_symbol(self, fontname, sym):", "body": "return [(fontname, sym)]<EOL>", "docstring": "Override if your font provides multiple sizes of the same\nsymbol.  Should return a list of symbols matching *sym* in\nvarious sizes.  The expression renderer will select the most\nappropriate size for a given situation from this list.", "id": "f17193:c8:m11"}
{"signature": "def get_kerning(self, next):", "body": "advance = self._metrics.advance - self.width<EOL>kern = <NUM_LIT:0.><EOL>if isinstance(next, Char):<EOL><INDENT>kern = self.font_output.get_kern(<EOL>self.font, self.font_class, self.c, self.fontsize,<EOL>next.font, next.font_class, next.c, next.fontsize,<EOL>self.dpi)<EOL><DEDENT>return advance + kern<EOL>", "docstring": "Return the amount of kerning between this and the given\ncharacter.  Called when characters are strung together into\n:class:`Hlist` to create :class:`Kern` nodes.", "id": "f17193:c20:m4"}
{"signature": "def Error(msg):", "body": "def raise_error(s, loc, toks):<EOL><INDENT>raise ParseFatalException(msg + \"<STR_LIT:\\n>\" + s)<EOL><DEDENT>empty = Empty()<EOL>empty.setParseAction(raise_error)<EOL>return empty<EOL>", "docstring": "Helper class to raise parser errors.", "id": "f17193:m3"}
{"signature": "def _get_info (self, fontname, font_class, sym, fontsize, dpi):", "body": "key = fontname, sym, fontsize, dpi<EOL>tup = self.glyphd.get(key)<EOL>if tup is not None:<EOL><INDENT>return tup<EOL><DEDENT>if (fontname == '<STR_LIT>' and<EOL>(len(sym) > <NUM_LIT:1> or<EOL>not unicodedata.category(str(sym)).startswith(\"<STR_LIT:L>\"))):<EOL><INDENT>fontname = '<STR_LIT>'<EOL><DEDENT>found_symbol = False<EOL>if sym in latex_to_standard:<EOL><INDENT>fontname, num = latex_to_standard[sym]<EOL>glyph = chr(num)<EOL>found_symbol = True<EOL><DEDENT>elif len(sym) == <NUM_LIT:1>:<EOL><INDENT>glyph = sym<EOL>num = ord(glyph)<EOL>found_symbol = True<EOL><DEDENT>else:<EOL><INDENT>warn(\"<STR_LIT>\" % sym,<EOL>MathTextWarning)<EOL><DEDENT>slanted = (fontname == '<STR_LIT>')<EOL>font = self._get_font(fontname)<EOL>if found_symbol:<EOL><INDENT>try:<EOL><INDENT>symbol_name = font.get_name_char(glyph)<EOL><DEDENT>except KeyError:<EOL><INDENT>warn(\"<STR_LIT>\" %<EOL>(font.postscript_name, sym),<EOL>MathTextWarning)<EOL>found_symbol = False<EOL><DEDENT><DEDENT>if not found_symbol:<EOL><INDENT>glyph = sym = '<STR_LIT:?>'<EOL>num = ord(glyph)<EOL>symbol_name = font.get_name_char(glyph)<EOL><DEDENT>offset = <NUM_LIT:0><EOL>scale = <NUM_LIT> * fontsize<EOL>xmin, ymin, xmax, ymax = [val * scale<EOL>for val in font.get_bbox_char(glyph)]<EOL>metrics = Bunch(<EOL>advance  = font.get_width_char(glyph) * scale,<EOL>width    = font.get_width_char(glyph) * scale,<EOL>height   = font.get_height_char(glyph) * scale,<EOL>xmin = xmin,<EOL>xmax = xmax,<EOL>ymin = ymin+offset,<EOL>ymax = ymax+offset,<EOL>iceberg = ymax + offset,<EOL>slanted = slanted<EOL>)<EOL>self.glyphd[key] = Bunch(<EOL>font            = font,<EOL>fontsize        = fontsize,<EOL>postscript_name = font.get_fontname(),<EOL>metrics         = metrics,<EOL>symbol_name     = symbol_name,<EOL>num             = num,<EOL>glyph           = glyph,<EOL>offset          = offset<EOL>)<EOL>return self.glyphd[key]<EOL>", "docstring": "load the cmfont, metrics and glyph with caching", "id": "f17193:c14:m2"}
{"signature": "def set_canvas_size(self, w, h, d):", "body": "self.width  = w<EOL>self.height = h<EOL>self.depth  = d<EOL>", "docstring": "Dimension the drawing canvas", "id": "f17193:c0:m1"}
{"signature": "def to_png(self, filename, texstr, color='<STR_LIT>', dpi=<NUM_LIT>, fontsize=<NUM_LIT>):", "body": "rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)<EOL>numrows, numcols, tmp = rgba.shape<EOL>_png.write_png(rgba.tostring(), numcols, numrows, filename)<EOL>return depth<EOL>", "docstring": "Writes a tex expression to a PNG file.\n\nReturns the offset of the baseline from the bottom of the\nimage in pixels.\n\n*filename*\n    A writable filename or fileobject\n\n*texstr*\n    A valid mathtext string, eg r'IQ: $\\sigma_i=15$'\n\n*color*\n    A valid matplotlib color argument\n\n*dpi*\n    The dots-per-inch to render the text\n\n*fontsize*\n    The font size in points\n\nReturns the offset of the baseline from the bottom of the\nimage in pixels.", "id": "f17193:c45:m4"}
{"signature": "def render_filled_rect(self, x1, y1, x2, y2):", "body": "raise NotImplementedError()<EOL>", "docstring": "Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).", "id": "f17193:c0:m3"}
{"signature": "def index_bar(ax, vals,<EOL>facecolor='<STR_LIT:b>', edgecolor='<STR_LIT:l>',<EOL>width=<NUM_LIT:4>, alpha=<NUM_LIT:1.0>, ):", "body": "facecolors = (colorConverter.to_rgba(facecolor, alpha),)<EOL>edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)<EOL>right = width/<NUM_LIT><EOL>left = -width/<NUM_LIT><EOL>bars = [ ( (left, <NUM_LIT:0>), (left, v), (right, v), (right, <NUM_LIT:0>)) for v in vals if v != -<NUM_LIT:1> ]<EOL>sx = ax.figure.dpi * (<NUM_LIT:1.0>/<NUM_LIT>)  <EOL>sy = ax.bbox.height / ax.viewLim.height<EOL>barTransform = Affine2D().scale(sx,sy)<EOL>offsetsBars = [ (i, <NUM_LIT:0>) for i,v in enumerate(vals) if v != -<NUM_LIT:1> ]<EOL>barCollection = PolyCollection(bars,<EOL>facecolors   = facecolors,<EOL>edgecolors   = edgecolors,<EOL>antialiaseds = (<NUM_LIT:0>,),<EOL>linewidths   = (<NUM_LIT:0.5>,),<EOL>offsets      = offsetsBars,<EOL>transOffset  = ax.transData,<EOL>)<EOL>barCollection.set_transform(barTransform)<EOL>minpy, maxx = (<NUM_LIT:0>, len(offsetsBars))<EOL>miny = <NUM_LIT:0><EOL>maxy = max([v for v in vals if v!=-<NUM_LIT:1>])<EOL>corners = (minpy, miny), (maxx, maxy)<EOL>ax.update_datalim(corners)<EOL>ax.autoscale_view()<EOL>ax.add_collection(barCollection)<EOL>return barCollection<EOL>", "docstring": "Add a bar collection graph with height vals (-1 is missing).\n\nax          : an Axes instance to plot to\nwidth       : the bar width in points\nalpha       : bar transparency", "id": "f17194:m10"}
{"signature": "def fetch_historical_yahoo(ticker, date1, date2, cachename=None):", "body": "ticker = ticker.upper()<EOL>d1 = (date1.month-<NUM_LIT:1>, date1.day, date1.year)<EOL>d2 = (date2.month-<NUM_LIT:1>, date2.day, date2.year)<EOL>urlFmt = '<STR_LIT>'<EOL>url =  urlFmt % (d1[<NUM_LIT:0>], d1[<NUM_LIT:1>], d1[<NUM_LIT:2>],<EOL>d2[<NUM_LIT:0>], d2[<NUM_LIT:1>], d2[<NUM_LIT:2>], ticker)<EOL>if cachename is None:<EOL><INDENT>cachename = os.path.join(cachedir, md5(url).hexdigest())<EOL><DEDENT>if os.path.exists(cachename):<EOL><INDENT>fh = file(cachename)<EOL>verbose.report('<STR_LIT>'%(cachename, ticker))<EOL><DEDENT>else:<EOL><INDENT>if not os.path.isdir(cachedir): os.mkdir(cachedir)<EOL>fh = file(cachename, '<STR_LIT:w>')<EOL>fh.write(urlopen(url).read())<EOL>fh.close()<EOL>verbose.report('<STR_LIT>'%(ticker, cachename))<EOL>fh = file(cachename, '<STR_LIT:r>')<EOL><DEDENT>return fh<EOL>", "docstring": "Fetch historical data for ticker between date1 and date2.  date1 and\ndate2 are datetime instances\n\nEx:\nfh = fetch_historical_yahoo('^GSPC', d1, d2)\n\ncachename is the name of the local file cache.  If None, will\ndefault to the md5 hash or the url (which incorporates the ticker\nand date range)\n\na file handle is returned", "id": "f17194:m1"}
{"signature": "def candlestick2(ax, opens, closes, highs, lows, width=<NUM_LIT:4>,<EOL>colorup='<STR_LIT:k>', colordown='<STR_LIT:r>',<EOL>alpha=<NUM_LIT>,<EOL>):", "body": "<EOL>delta = width/<NUM_LIT><EOL>barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(range(len(opens)), opens, closes) if open != -<NUM_LIT:1> and close!=-<NUM_LIT:1> ]<EOL>rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(range(len(lows)), lows, highs) if low != -<NUM_LIT:1> ]<EOL>r,g,b = colorConverter.to_rgb(colorup)<EOL>colorup = r,g,b,alpha<EOL>r,g,b = colorConverter.to_rgb(colordown)<EOL>colordown = r,g,b,alpha<EOL>colord = { True : colorup,<EOL>False : colordown,<EOL>}<EOL>colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-<NUM_LIT:1> and close !=-<NUM_LIT:1>]<EOL>assert(len(barVerts)==len(rangeSegments))<EOL>useAA = <NUM_LIT:0>,  <EOL>lw = <NUM_LIT:0.5>,   <EOL>rangeCollection = LineCollection(rangeSegments,<EOL>colors       = ( (<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:1>), ),<EOL>linewidths   = lw,<EOL>antialiaseds = useAA,<EOL>)<EOL>barCollection = PolyCollection(barVerts,<EOL>facecolors   = colors,<EOL>edgecolors   = ( (<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:1>), ),<EOL>antialiaseds = useAA,<EOL>linewidths   = lw,<EOL>)<EOL>minx, maxx = <NUM_LIT:0>, len(rangeSegments)<EOL>miny = min([low for low in lows if low !=-<NUM_LIT:1>])<EOL>maxy = max([high for high in highs if high != -<NUM_LIT:1>])<EOL>corners = (minx, miny), (maxx, maxy)<EOL>ax.update_datalim(corners)<EOL>ax.autoscale_view()<EOL>ax.add_collection(barCollection)<EOL>ax.add_collection(rangeCollection)<EOL>return rangeCollection, barCollection<EOL>", "docstring": "Represent the open, close as a bar line and high low range as a\nvertical line.\n\n\nax          : an Axes instance to plot to\nwidth       : the bar width in points\ncolorup     : the color of the lines where close >= open\ncolordown   : the color of the lines where close <  open\nalpha       : bar transparency\n\nreturn value is lineCollection, barCollection", "id": "f17194:m6"}
{"signature": "def plot_day_summary(ax, quotes, ticksize=<NUM_LIT:3>,<EOL>colorup='<STR_LIT:k>', colordown='<STR_LIT:r>',<EOL>):", "body": "lines = []<EOL>for q in quotes:<EOL><INDENT>t, open, close, high, low = q[:<NUM_LIT:5>]<EOL>if close>=open : color = colorup<EOL>else           : color = colordown<EOL>vline = Line2D(<EOL>xdata=(t, t), ydata=(low, high),<EOL>color=color,<EOL>antialiased=False,   <EOL>)<EOL>oline = Line2D(<EOL>xdata=(t, t), ydata=(open, open),<EOL>color=color,<EOL>antialiased=False,<EOL>marker=TICKLEFT,<EOL>markersize=ticksize,<EOL>)<EOL>cline = Line2D(<EOL>xdata=(t, t), ydata=(close, close),<EOL>color=color,<EOL>antialiased=False,<EOL>markersize=ticksize,<EOL>marker=TICKRIGHT)<EOL>lines.extend((vline, oline, cline))<EOL>ax.add_line(vline)<EOL>ax.add_line(oline)<EOL>ax.add_line(cline)<EOL><DEDENT>ax.autoscale_view()<EOL>return lines<EOL>", "docstring": "quotes is a list of (time, open, close, high, low, ...) tuples\n\nRepresent the time, open, close, high, low as a vertical line\nranging from low to high.  The left tick is the open and the right\ntick is the close.\n\ntime must be in float date format - see date2num\n\nax          : an Axes instance to plot to\nticksize    : open/close tick marker in points\ncolorup     : the color of the lines where close >= open\ncolordown   : the color of the lines where close <  open\nreturn value is a list of lines added", "id": "f17194:m3"}
{"signature": "def candlestick(ax, quotes, width=<NUM_LIT>, colorup='<STR_LIT:k>', colordown='<STR_LIT:r>',<EOL>alpha=<NUM_LIT:1.0>):", "body": "OFFSET = width/<NUM_LIT><EOL>lines = []<EOL>patches = []<EOL>for q in quotes:<EOL><INDENT>t, open, close, high, low = q[:<NUM_LIT:5>]<EOL>if close>=open :<EOL><INDENT>color = colorup<EOL>lower = open<EOL>height = close-open<EOL><DEDENT>else           :<EOL><INDENT>color = colordown<EOL>lower = close<EOL>height = open-close<EOL><DEDENT>vline = Line2D(<EOL>xdata=(t, t), ydata=(low, high),<EOL>color='<STR_LIT:k>',<EOL>linewidth=<NUM_LIT:0.5>,<EOL>antialiased=True,<EOL>)<EOL>rect = Rectangle(<EOL>xy    = (t-OFFSET, lower),<EOL>width = width,<EOL>height = height,<EOL>facecolor = color,<EOL>edgecolor = color,<EOL>)<EOL>rect.set_alpha(alpha)<EOL>lines.append(vline)<EOL>patches.append(rect)<EOL>ax.add_line(vline)<EOL>ax.add_patch(rect)<EOL><DEDENT>ax.autoscale_view()<EOL>return lines, patches<EOL>", "docstring": "quotes is a list of (time, open, close, high, low, ...)  tuples.\nAs long as the first 5 elements of the tuples are these values,\nthe tuple can be as long as you want (eg it may store volume).\n\ntime must be in float days format - see date2num\n\nPlot the time, open, close, high, low as a vertical line ranging\nfrom low to high.  Use a rectangular bar to represent the\nopen-close span.  If close >= open, use colorup to color the bar,\notherwise use colordown\n\nax          : an Axes instance to plot to\nwidth       : fraction of a day for the rectangle width\ncolorup     : the color of the rectangle where close >= open\ncolordown   : the color of the rectangle where close <  open\nalpha       : the rectangle alpha level\n\nreturn value is lines, patches where lines is a list of lines\nadded and patches is a list of the rectangle patches added", "id": "f17194:m4"}
{"signature": "def volume_overlay2(ax, closes, volumes,<EOL>colorup='<STR_LIT:k>', colordown='<STR_LIT:r>',<EOL>width=<NUM_LIT:4>, alpha=<NUM_LIT:1.0>):", "body": "return volume_overlay(ax,closes[:-<NUM_LIT:1>],closes[<NUM_LIT:1>:],volumes[<NUM_LIT:1>:],colorup,colordown,width,alpha)<EOL>", "docstring": "Add a volume overlay to the current axes.  The closes are used to\ndetermine the color of the bar.  -1 is missing.  If a value is\nmissing on one it must be missing on all\n\nax          : an Axes instance to plot to\nwidth       : the bar width in points\ncolorup     : the color of the lines where close >= open\ncolordown   : the color of the lines where close <  open\nalpha       : bar transparency\n\nnb: first point is not displayed - it is used only for choosing the\nright color", "id": "f17194:m8"}
{"signature": "def parse_yahoo_historical(fh, asobject=False, adjusted=True):", "body": "results = []<EOL>lines = fh.readlines()<EOL>datefmt = None<EOL>for line in lines[<NUM_LIT:1>:]:<EOL><INDENT>vals = line.split('<STR_LIT:U+002C>')<EOL>if len(vals)!=<NUM_LIT:7>: continue<EOL>datestr = vals[<NUM_LIT:0>]<EOL>if datefmt is None:<EOL><INDENT>try:<EOL><INDENT>datefmt = '<STR_LIT>'<EOL>dt = datetime.date(*time.strptime(datestr, datefmt)[:<NUM_LIT:3>])<EOL><DEDENT>except ValueError:<EOL><INDENT>datefmt = '<STR_LIT>'  <EOL><DEDENT><DEDENT>dt = datetime.date(*time.strptime(datestr, datefmt)[:<NUM_LIT:3>])<EOL>d = date2num(dt)<EOL>open, high, low, close =  [float(val) for val in vals[<NUM_LIT:1>:<NUM_LIT:5>]]<EOL>volume = int(vals[<NUM_LIT:5>])<EOL>if adjusted:<EOL><INDENT>aclose = float(vals[<NUM_LIT:6>])<EOL>m = aclose/close<EOL>open *= m<EOL>high *= m<EOL>low *= m<EOL>close = aclose<EOL><DEDENT>results.append((d, open, close, high, low, volume))<EOL><DEDENT>results.reverse()<EOL>if asobject:<EOL><INDENT>if len(results)==<NUM_LIT:0>: return None<EOL>else:<EOL><INDENT>date, open, close, high, low, volume = list(map(np.asarray, list(zip(*results))))<EOL><DEDENT>return Bunch(date=date, open=open, close=close, high=high, low=low, volume=volume)<EOL><DEDENT>else:<EOL><INDENT>return results<EOL><DEDENT>", "docstring": "Parse the historical data in file handle fh from yahoo finance and return\nresults as a list of\n\nd, open, close, high, low, volume\n\nwhere d is a floating poing representation of date, as returned by date2num\n\nif adjust=True, use adjusted prices", "id": "f17194:m0"}
{"signature": "def suppress( self ):", "body": "return Suppress( self )<EOL>", "docstring": "Suppresses the output of this ParserElement; useful to keep punctuation from\n           cluttering up returned output.", "id": "f17196:c8:m37"}
{"signature": "def validate( self, validateTrace=[] ):", "body": "self.checkRecursion( [] )<EOL>", "docstring": "Check defined expressions for valid structure, check for infinite recursive definitions.", "id": "f17196:c8:m48"}
{"signature": "def getName(self):", "body": "if self.__name:<EOL><INDENT>return self.__name<EOL><DEDENT>elif self.__parent:<EOL><INDENT>par = self.__parent()<EOL>if par:<EOL><INDENT>return par.__lookup(self)<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>elif (len(self) == <NUM_LIT:1> and<EOL>len(self.__tokdict) == <NUM_LIT:1> and<EOL>list(self.__tokdict.values())[<NUM_LIT:0>][<NUM_LIT:0>][<NUM_LIT:1>] in (<NUM_LIT:0>,-<NUM_LIT:1>)):<EOL><INDENT>return list(self.__tokdict.keys())[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns the results name for this token expression.", "id": "f17196:c7:m27"}
{"signature": "def __ror__(self, other ):", "body": "if isinstance( other, str ):<EOL><INDENT>other = Literal( other )<EOL><DEDENT>if not isinstance( other, ParserElement ):<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % type(other),<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL>return None<EOL><DEDENT>return other | self<EOL>", "docstring": "Implementation of | operator when left operand is not a ParserElement", "id": "f17196:c8:m30"}
{"signature": "def col (loc,strg):", "body": "return (loc<len(strg) and strg[loc] == '<STR_LIT:\\n>') and <NUM_LIT:1> or loc - strg.rfind(\"<STR_LIT:\\n>\", <NUM_LIT:0>, loc)<EOL>", "docstring": "Returns current column within a string, counting newlines as line separators.\n   The first column is number 1.\n\n   Note: the default parsing behavior is to expand tabs in the input string\n   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information\n   on parsing strings containing <TAB>s, and suggested methods to maintain a\n   consistent view of the parsed string, the parse location, and line and column\n   positions within the parsed string.", "id": "f17196:m1"}
{"signature": "def leaveWhitespace( self ):", "body": "self.skipWhitespace = False<EOL>return self<EOL>", "docstring": "Disables the skipping of whitespace before matching the characters in the\n           ParserElement's defined pattern.  This is normally only used internally by\n           the pyparsing module, but may be needed in some whitespace-sensitive grammars.", "id": "f17196:c8:m38"}
{"signature": "def asList( self ):", "body": "out = []<EOL>for res in self.__toklist:<EOL><INDENT>if isinstance(res,ParseResults):<EOL><INDENT>out.append( res.asList() )<EOL><DEDENT>else:<EOL><INDENT>out.append( res )<EOL><DEDENT><DEDENT>return out<EOL>", "docstring": "Returns the parse results as a nested list of matching tokens, all converted to strings.", "id": "f17196:c7:m22"}
{"signature": "def dump(self,indent='<STR_LIT>',depth=<NUM_LIT:0>):", "body": "out = []<EOL>out.append( indent+_ustr(self.asList()) )<EOL>keys = list(self.items())<EOL>keys.sort()<EOL>for k,v in keys:<EOL><INDENT>if out:<EOL><INDENT>out.append('<STR_LIT:\\n>')<EOL><DEDENT>out.append( \"<STR_LIT>\" % (indent,('<STR_LIT:U+0020>'*depth), k) )<EOL>if isinstance(v,ParseResults):<EOL><INDENT>if list(v.keys()):<EOL><INDENT>out.append( v.dump(indent,depth+<NUM_LIT:1>) )<EOL><DEDENT>else:<EOL><INDENT>out.append(_ustr(v))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>out.append(_ustr(v))<EOL><DEDENT><DEDENT>return \"<STR_LIT>\".join(out)<EOL>", "docstring": "Diagnostic method for listing out the contents of a ParseResults.\n           Accepts an optional indent argument so that this string can be embedded\n           in a nested display of other data.", "id": "f17196:c7:m28"}
{"signature": "def srange(s):", "body": "try:<EOL><INDENT>return \"<STR_LIT>\".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])<EOL><DEDENT>except:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>", "docstring": "r\"\"\"Helper to easily define string ranges for use in Word construction.  Borrows\n       syntax from regexp '[]' string range definitions::\n          srange(\"[0-9]\")   -> \"0123456789\"\n          srange(\"[a-z]\")   -> \"abcdefghijklmnopqrstuvwxyz\"\n          srange(\"[a-z$_]\") -> \"abcdefghijklmnopqrstuvwxyz$_\"\n       The input string must be enclosed in []'s, and the returned string is the expanded\n       character set joined into a single string.\n       The values enclosed in the []'s may be::\n          a single character\n          an escaped character with a leading backslash (such as \\- or \\])\n          an escaped hex character with a leading '\\0x' (\\0x21, which is a '!' character)\n          an escaped octal character with a leading '\\0' (\\041, which is a '!' character)\n          a range of any of the above, separated by a dash ('a-z', etc.)\n          any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)", "id": "f17196:m17"}
{"signature": "def oneOf( strs, caseless=False, useRegex=True ):", "body": "if caseless:<EOL><INDENT>isequal = ( lambda a,b: a.upper() == b.upper() )<EOL>masks = ( lambda a,b: b.upper().startswith(a.upper()) )<EOL>parseElementClass = CaselessLiteral<EOL><DEDENT>else:<EOL><INDENT>isequal = ( lambda a,b: a == b )<EOL>masks = ( lambda a,b: b.startswith(a) )<EOL>parseElementClass = Literal<EOL><DEDENT>if isinstance(strs,(list,tuple)):<EOL><INDENT>symbols = strs[:]<EOL><DEDENT>elif isinstance(strs,str):<EOL><INDENT>symbols = strs.split()<EOL><DEDENT>else:<EOL><INDENT>warnings.warn(\"<STR_LIT>\",<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL><DEDENT>i = <NUM_LIT:0><EOL>while i < len(symbols)-<NUM_LIT:1>:<EOL><INDENT>cur = symbols[i]<EOL>for j,other in enumerate(symbols[i+<NUM_LIT:1>:]):<EOL><INDENT>if ( isequal(other, cur) ):<EOL><INDENT>del symbols[i+j+<NUM_LIT:1>]<EOL>break<EOL><DEDENT>elif ( masks(cur, other) ):<EOL><INDENT>del symbols[i+j+<NUM_LIT:1>]<EOL>symbols.insert(i,other)<EOL>cur = other<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>if not caseless and useRegex:<EOL><INDENT>try:<EOL><INDENT>if len(symbols)==len(\"<STR_LIT>\".join(symbols)):<EOL><INDENT>return Regex( \"<STR_LIT>\" % \"<STR_LIT>\".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )<EOL><DEDENT>else:<EOL><INDENT>return Regex( \"<STR_LIT:|>\".join( [ re.escape(sym) for sym in symbols] ) )<EOL><DEDENT><DEDENT>except:<EOL><INDENT>warnings.warn(\"<STR_LIT>\",<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL><DEDENT><DEDENT>return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )<EOL>", "docstring": "Helper to quickly define a set of alternative Literals, and makes sure to do\n       longest-first testing when there is a conflict, regardless of the input order,\n       but returns a MatchFirst for best performance.\n\n       Parameters:\n        - strs - a string of space-delimited literals, or a list of string literals\n        - caseless - (default=False) - treat all literals as caseless\n        - useRegex - (default=True) - as an optimization, will generate a Regex\n          object; otherwise, will generate a MatchFirst object (if caseless=True, or\n          if creating a Regex raises an exception)", "id": "f17196:m15"}
{"signature": "def __rxor__(self, other ):", "body": "if isinstance( other, str ):<EOL><INDENT>other = Literal( other )<EOL><DEDENT>if not isinstance( other, ParserElement ):<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % type(other),<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL>return None<EOL><DEDENT>return other ^ self<EOL>", "docstring": "Implementation of ^ operator when left operand is not a ParserElement", "id": "f17196:c8:m32"}
{"signature": "def __add__(self, other ):", "body": "if isinstance( other, str ):<EOL><INDENT>other = Literal( other )<EOL><DEDENT>if not isinstance( other, ParserElement ):<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % type(other),<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL>return None<EOL><DEDENT>return And( [ self, other ] )<EOL>", "docstring": "Implementation of + operator - returns And", "id": "f17196:c8:m23"}
{"signature": "def asDict( self ):", "body": "return dict( list(self.items()) )<EOL>", "docstring": "Returns the named parse results as dictionary.", "id": "f17196:c7:m23"}
{"signature": "def parseFile( self, file_or_filename ):", "body": "try:<EOL><INDENT>file_contents = file_or_filename.read()<EOL><DEDENT>except AttributeError:<EOL><INDENT>f = open(file_or_filename, \"<STR_LIT:rb>\")<EOL>file_contents = f.read()<EOL>f.close()<EOL><DEDENT>return self.parseString(file_contents)<EOL>", "docstring": "Execute the parse expression on the given file or filename.\n           If a filename is specified (instead of a file object),\n           the entire file is opened, read, and closed before parsing.", "id": "f17196:c8:m49"}
{"signature": "def matchPreviousLiteral(expr):", "body": "rep = Forward()<EOL>def copyTokenToRepeater(s,l,t):<EOL><INDENT>if t:<EOL><INDENT>if len(t) == <NUM_LIT:1>:<EOL><INDENT>rep << t[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>tflat = _flatten(t.asList())<EOL>rep << And( [ Literal(tt) for tt in tflat ] )<EOL><DEDENT><DEDENT>else:<EOL><INDENT>rep << Empty()<EOL><DEDENT><DEDENT>expr.addParseAction(copyTokenToRepeater, callDuringTry=True)<EOL>return rep<EOL>", "docstring": "Helper to define an expression that is indirectly defined from\n       the tokens matched in a previous expression, that is, it looks\n       for a 'repeat' of a previous expression.  For example::\n           first = Word(nums)\n           second = matchPreviousLiteral(first)\n           matchExpr = first + \":\" + second\n       will match \"1:1\", but not \"1:2\".  Because this matches a\n       previous literal, will also match the leading \"1:1\" in \"1:10\".\n       If this is not desired, use matchPreviousExpr.\n       Do *not* use with packrat parsing enabled.", "id": "f17196:m12"}
{"signature": "def __sub__(self, other):", "body": "if isinstance( other, str ):<EOL><INDENT>other = Literal( other )<EOL><DEDENT>if not isinstance( other, ParserElement ):<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % type(other),<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL>return None<EOL><DEDENT>return And( [ self, And._ErrorStop(), other ] )<EOL>", "docstring": "Implementation of - operator, returns And with error stop", "id": "f17196:c8:m25"}
{"signature": "def __or__(self, other ):", "body": "if isinstance( other, str ):<EOL><INDENT>other = Literal( other )<EOL><DEDENT>if not isinstance( other, ParserElement ):<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % type(other),<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL>return None<EOL><DEDENT>return MatchFirst( [ self, other ] )<EOL>", "docstring": "Implementation of | operator - returns MatchFirst", "id": "f17196:c8:m29"}
{"signature": "def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):", "body": "super(QuotedString,self).__init__()<EOL>quoteChar = quoteChar.strip()<EOL>if len(quoteChar) == <NUM_LIT:0>:<EOL><INDENT>warnings.warn(\"<STR_LIT>\",SyntaxWarning,stacklevel=<NUM_LIT:2>)<EOL>raise SyntaxError()<EOL><DEDENT>if endQuoteChar is None:<EOL><INDENT>endQuoteChar = quoteChar<EOL><DEDENT>else:<EOL><INDENT>endQuoteChar = endQuoteChar.strip()<EOL>if len(endQuoteChar) == <NUM_LIT:0>:<EOL><INDENT>warnings.warn(\"<STR_LIT>\",SyntaxWarning,stacklevel=<NUM_LIT:2>)<EOL>raise SyntaxError()<EOL><DEDENT><DEDENT>self.quoteChar = quoteChar<EOL>self.quoteCharLen = len(quoteChar)<EOL>self.firstQuoteChar = quoteChar[<NUM_LIT:0>]<EOL>self.endQuoteChar = endQuoteChar<EOL>self.endQuoteCharLen = len(endQuoteChar)<EOL>self.escChar = escChar<EOL>self.escQuote = escQuote<EOL>self.unquoteResults = unquoteResults<EOL>if multiline:<EOL><INDENT>self.flags = re.MULTILINE | re.DOTALL<EOL>self.pattern = r'<STR_LIT>' %( re.escape(self.quoteChar),<EOL>_escapeRegexRangeChars(self.endQuoteChar[<NUM_LIT:0>]),<EOL>(escChar is not None and _escapeRegexRangeChars(escChar) or '<STR_LIT>') )<EOL><DEDENT>else:<EOL><INDENT>self.flags = <NUM_LIT:0><EOL>self.pattern = r'<STR_LIT>' %( re.escape(self.quoteChar),<EOL>_escapeRegexRangeChars(self.endQuoteChar[<NUM_LIT:0>]),<EOL>(escChar is not None and _escapeRegexRangeChars(escChar) or '<STR_LIT>') )<EOL><DEDENT>if len(self.endQuoteChar) > <NUM_LIT:1>:<EOL><INDENT>self.pattern += (<EOL>'<STR_LIT>' + '<STR_LIT>'.join([\"<STR_LIT>\" % (re.escape(self.endQuoteChar[:i]),<EOL>_escapeRegexRangeChars(self.endQuoteChar[i]))<EOL>for i in range(len(self.endQuoteChar)-<NUM_LIT:1>,<NUM_LIT:0>,-<NUM_LIT:1>)]) + '<STR_LIT:)>'<EOL>)<EOL><DEDENT>if escQuote:<EOL><INDENT>self.pattern += (r'<STR_LIT>' % re.escape(escQuote))<EOL><DEDENT>if escChar:<EOL><INDENT>self.pattern += (r'<STR_LIT>' % re.escape(escChar))<EOL>self.escCharReplacePattern = re.escape(self.escChar)+\"<STR_LIT>\"<EOL><DEDENT>self.pattern += (r'<STR_LIT>' % re.escape(self.endQuoteChar))<EOL>try:<EOL><INDENT>self.re = re.compile(self.pattern, self.flags)<EOL>self.reString = self.pattern<EOL><DEDENT>except sre_constants.error:<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % self.pattern,<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL>raise<EOL><DEDENT>self.name = _ustr(self)<EOL>self.errmsg = \"<STR_LIT>\" + self.name<EOL>self.mayIndexError = False<EOL>self.mayReturnEmpty = True<EOL>", "docstring": "Defined with the following parameters:\n - quoteChar - string of one or more characters defining the quote delimiting string\n - escChar - character to escape quotes, typically backslash (default=None)\n - escQuote - special quote sequence to escape an embedded quote string (such as SQL's \"\" to escape an embedded \") (default=None)\n - multiline - boolean indicating whether quotes can span multiple lines (default=False)\n - unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)\n - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)", "id": "f17196:c18:m0"}
{"signature": "def setDefaultKeywordChars( chars ):", "body": "Keyword.DEFAULT_KEYWORD_CHARS = chars<EOL>", "docstring": "Overrides the default Keyword chars", "id": "f17196:c13:m3"}
{"signature": "def traceParseAction(f):", "body": "f = ParserElement._normalizeParseActionArgs(f)<EOL>def z(*paArgs):<EOL><INDENT>thisFunc = f.__name__<EOL>s,l,t = paArgs[-<NUM_LIT:3>:]<EOL>if len(paArgs)><NUM_LIT:3>:<EOL><INDENT>thisFunc = paArgs[<NUM_LIT:0>].__class__.__name__ + '<STR_LIT:.>' + thisFunc<EOL><DEDENT>sys.stderr.write( \"<STR_LIT>\" % (thisFunc,line(l,s),l,t) )<EOL>try:<EOL><INDENT>ret = f(*paArgs)<EOL><DEDENT>except Exception as exc:<EOL><INDENT>sys.stderr.write( \"<STR_LIT>\" % (thisFunc,exc) )<EOL>raise<EOL><DEDENT>sys.stderr.write( \"<STR_LIT>\" % (thisFunc,ret) )<EOL>return ret<EOL><DEDENT>try:<EOL><INDENT>z.__name__ = f.__name__<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>return z<EOL>", "docstring": "Decorator for debugging parse actions.", "id": "f17196:m8"}
{"signature": "def keys( self ):", "body": "return list(self.__tokdict.keys())<EOL>", "docstring": "Returns all named result keys.", "id": "f17196:c7:m10"}
{"signature": "def setName( self, name ):", "body": "self.name = name<EOL>self.errmsg = \"<STR_LIT>\" + self.name<EOL>if hasattr(self,\"<STR_LIT>\"):<EOL><INDENT>self.exception.msg = self.errmsg<EOL><DEDENT>return self<EOL>", "docstring": "Define name for this expression, for use in debugging.", "id": "f17196:c8:m3"}
{"signature": "def makeXMLTags(tagStr):", "body": "return _makeTags( tagStr, True )<EOL>", "docstring": "Helper to construct opening and closing tag expressions for XML, given a tag name", "id": "f17196:m27"}
{"signature": "def keepOriginalText(s,startLoc,t):", "body": "try:<EOL><INDENT>endloc = getTokensEndLoc()<EOL><DEDENT>except ParseException:<EOL><INDENT>raise ParseFatalException(\"<STR_LIT>\")<EOL><DEDENT>del t[:]<EOL>t += ParseResults(s[startLoc:endloc])<EOL>return t<EOL>", "docstring": "Helper parse action to preserve original parsed text,\n       overriding any nested parse actions.", "id": "f17196:m23"}
{"signature": "def transformString( self, instring ):", "body": "out = []<EOL>lastE = <NUM_LIT:0><EOL>self.keepTabs = True<EOL>for t,s,e in self.scanString( instring ):<EOL><INDENT>out.append( instring[lastE:s] )<EOL>if t:<EOL><INDENT>if isinstance(t,ParseResults):<EOL><INDENT>out += t.asList()<EOL><DEDENT>elif isinstance(t,list):<EOL><INDENT>out += t<EOL><DEDENT>else:<EOL><INDENT>out.append(t)<EOL><DEDENT><DEDENT>lastE = e<EOL><DEDENT>out.append(instring[lastE:])<EOL>return \"<STR_LIT>\".join(map(_ustr,out))<EOL>", "docstring": "Extension to scanString, to modify matching text with modified tokens that may\n           be returned from a parse action.  To use transformString, define a grammar and\n           attach a parse action to it that modifies the returned token list.\n           Invoking transformString() on a target string will then scan for matches,\n           and replace the matched text patterns according to the logic in the parse\n           action.  transformString() returns the resulting transformed string.", "id": "f17196:c8:m21"}
{"signature": "def setDebug( self, flag=True ):", "body": "if flag:<EOL><INDENT>self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )<EOL><DEDENT>else:<EOL><INDENT>self.debug = False<EOL><DEDENT>return self<EOL>", "docstring": "Enable display of debugging messages while doing pattern matching.\n           Set flag to True to enable, False to disable.", "id": "f17196:c8:m43"}
{"signature": "def __xor__(self, other ):", "body": "if isinstance( other, str ):<EOL><INDENT>other = Literal( other )<EOL><DEDENT>if not isinstance( other, ParserElement ):<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % type(other),<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL>return None<EOL><DEDENT>return Or( [ self, other ] )<EOL>", "docstring": "Implementation of ^ operator - returns Or", "id": "f17196:c8:m31"}
{"signature": "def delimitedList( expr, delim=\"<STR_LIT:U+002C>\", combine=False ):", "body": "dlName = _ustr(expr)+\"<STR_LIT>\"+_ustr(delim)+\"<STR_LIT:U+0020>\"+_ustr(expr)+\"<STR_LIT>\"<EOL>if combine:<EOL><INDENT>return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)<EOL><DEDENT>else:<EOL><INDENT>return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)<EOL><DEDENT>", "docstring": "Helper to define a delimited list of expressions - the delimiter defaults to ','.\n       By default, the list elements and delimiters can have intervening whitespace, and\n       comments, but this can be overridden by passing 'combine=True' in the constructor.\n       If combine is set to True, the matching tokens are returned as a single token\n       string, with the delimiters included; otherwise, the matching tokens are returned\n       as a list of tokens, with the delimiters suppressed.", "id": "f17196:m9"}
{"signature": "def setResultsName( self, name, listAllMatches=False ):", "body": "newself = self.copy()<EOL>newself.resultsName = name<EOL>newself.modalResults = not listAllMatches<EOL>return newself<EOL>", "docstring": "Define name for referencing matching tokens as a nested attribute\n           of the returned parse results.\n           NOTE: this returns a *copy* of the original ParserElement object;\n           this is so that the client can define a basic element, such as an\n           integer, and reference it in multiple places with different names.", "id": "f17196:c8:m4"}
{"signature": "def getTokensEndLoc():", "body": "import inspect<EOL>fstack = inspect.stack()<EOL>try:<EOL><INDENT>for f in fstack[<NUM_LIT:2>:]:<EOL><INDENT>if f[<NUM_LIT:3>] == \"<STR_LIT>\":<EOL><INDENT>endloc = f[<NUM_LIT:0>].f_locals[\"<STR_LIT>\"]<EOL>return endloc<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ParseFatalException(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>del fstack<EOL><DEDENT>", "docstring": "Method to be called from within a parse action to determine the end\n       location of the parsed tokens.", "id": "f17196:m24"}
{"signature": "def values( self ):", "body": "return [ v[-<NUM_LIT:1>][<NUM_LIT:0>] for v in list(self.__tokdict.values()) ]<EOL>", "docstring": "Returns all named result values.", "id": "f17196:c7:m15"}
{"signature": "def asXML( self, doctag=None, namedItemsOnly=False, indent=\"<STR_LIT>\", formatted=True ):", "body": "nl = \"<STR_LIT:\\n>\"<EOL>out = []<EOL>namedItems = dict( [ (v[<NUM_LIT:1>],k) for (k,vlist) in list(self.__tokdict.items())<EOL>for v in vlist ] )<EOL>nextLevelIndent = indent + \"<STR_LIT:U+0020>\"<EOL>if not formatted:<EOL><INDENT>indent = \"<STR_LIT>\"<EOL>nextLevelIndent = \"<STR_LIT>\"<EOL>nl = \"<STR_LIT>\"<EOL><DEDENT>selfTag = None<EOL>if doctag is not None:<EOL><INDENT>selfTag = doctag<EOL><DEDENT>else:<EOL><INDENT>if self.__name:<EOL><INDENT>selfTag = self.__name<EOL><DEDENT><DEDENT>if not selfTag:<EOL><INDENT>if namedItemsOnly:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>selfTag = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>out += [ nl, indent, \"<STR_LIT:<>\", selfTag, \"<STR_LIT:>>\" ]<EOL>worklist = self.__toklist<EOL>for i,res in enumerate(worklist):<EOL><INDENT>if isinstance(res,ParseResults):<EOL><INDENT>if i in namedItems:<EOL><INDENT>out += [ res.asXML(namedItems[i],<EOL>namedItemsOnly and doctag is None,<EOL>nextLevelIndent,<EOL>formatted)]<EOL><DEDENT>else:<EOL><INDENT>out += [ res.asXML(None,<EOL>namedItemsOnly and doctag is None,<EOL>nextLevelIndent,<EOL>formatted)]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>resTag = None<EOL>if i in namedItems:<EOL><INDENT>resTag = namedItems[i]<EOL><DEDENT>if not resTag:<EOL><INDENT>if namedItemsOnly:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>resTag = \"<STR_LIT>\"<EOL><DEDENT><DEDENT>xmlBodyText = xml.sax.saxutils.escape(_ustr(res))<EOL>out += [ nl, nextLevelIndent, \"<STR_LIT:<>\", resTag, \"<STR_LIT:>>\",<EOL>xmlBodyText,<EOL>\"<STR_LIT>\", resTag, \"<STR_LIT:>>\" ]<EOL><DEDENT><DEDENT>out += [ nl, indent, \"<STR_LIT>\", selfTag, \"<STR_LIT:>>\" ]<EOL>return \"<STR_LIT>\".join(out)<EOL>", "docstring": "Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.", "id": "f17196:c7:m25"}
{"signature": "def setDefaultWhitespaceChars( chars ):", "body": "ParserElement.DEFAULT_WHITE_CHARS = chars<EOL>", "docstring": "Overrides the default whitespace chars", "id": "f17196:c8:m0"}
{"signature": "def makeHTMLTags(tagStr):", "body": "return _makeTags( tagStr, False )<EOL>", "docstring": "Helper to construct opening and closing tag expressions for HTML, given a tag name", "id": "f17196:m26"}
{"signature": "def enablePackrat():", "body": "if not ParserElement._packratEnabled:<EOL><INDENT>ParserElement._packratEnabled = True<EOL>ParserElement._parse = ParserElement._parseCache<EOL><DEDENT>", "docstring": "Enables \"packrat\" parsing, which adds memoizing to the parsing logic.\n           Repeated parse attempts at the same string location (which happens\n           often in many complex grammars) can immediately return a cached value,\n           instead of re-executing parsing/validating code.  Memoizing is done of\n           both valid results and parsing exceptions.\n\n           This speedup may break existing programs that use parse actions that\n           have side-effects.  For this reason, packrat parsing is disabled when\n           you first import pyparsing.  To activate the packrat feature, your\n           program must call the class method ParserElement.enablePackrat().  If\n           your program uses psyco to \"compile as you go\", you must call\n           enablePackrat before calling psyco.full().  If you do not do this,\n           Python will crash.  For best results, call enablePackrat() immediately\n           after importing pyparsing.", "id": "f17196:c8:m18"}
{"signature": "def items( self ):", "body": "return [(k,self[k]) for k in self.__tokdict]<EOL>", "docstring": "Returns all named result keys and values as a list of tuples.", "id": "f17196:c7:m14"}
{"signature": "def markInputline( self, markerString = \"<STR_LIT>\" ):", "body": "line_str = self.line<EOL>line_column = self.column - <NUM_LIT:1><EOL>if markerString:<EOL><INDENT>line_str = \"<STR_LIT>\".join( [line_str[:line_column],<EOL>markerString, line_str[line_column:]])<EOL><DEDENT>return line_str.strip()<EOL>", "docstring": "Extracts the exception line from the input string, and marks\n           the location of the exception with a special symbol.", "id": "f17196:c1:m4"}
{"signature": "def withAttribute(*args,**attrDict):", "body": "if args:<EOL><INDENT>attrs = args[:]<EOL><DEDENT>else:<EOL><INDENT>attrs = list(attrDict.items())<EOL><DEDENT>attrs = [(k,v) for k,v in attrs]<EOL>def pa(s,l,tokens):<EOL><INDENT>for attrName,attrValue in attrs:<EOL><INDENT>if attrName not in tokens:<EOL><INDENT>raise ParseException(s,l,\"<STR_LIT>\" + attrName)<EOL><DEDENT>if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:<EOL><INDENT>raise ParseException(s,l,\"<STR_LIT>\" %<EOL>(attrName, tokens[attrName], attrValue))<EOL><DEDENT><DEDENT><DEDENT>return pa<EOL>", "docstring": "Helper to create a validating parse action to be used with start tags created\n       with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag\n       with a required attribute value, to avoid false matches on common tags such as\n       <TD> or <DIV>.\n\n       Call withAttribute with a series of attribute names and values. Specify the list\n       of filter attributes names and values as:\n        - keyword arguments, as in (class=\"Customer\",align=\"right\"), or\n        - a list of name-value tuples, as in ( (\"ns1:class\", \"Customer\"), (\"ns2:align\",\"right\") )\n       For attribute names with a namespace prefix, you must use the second form.  Attribute\n       names are matched insensitive to upper/lower case.\n\n       To verify that the attribute exists, but without specifying a value, pass\n       withAttribute.ANY_VALUE as the value.", "id": "f17196:m28"}
{"signature": "def parseString( self, instring, parseAll=False ):", "body": "ParserElement.resetCache()<EOL>if not self.streamlined:<EOL><INDENT>self.streamline()<EOL><DEDENT>for e in self.ignoreExprs:<EOL><INDENT>e.streamline()<EOL><DEDENT>if not self.keepTabs:<EOL><INDENT>instring = instring.expandtabs()<EOL><DEDENT>loc, tokens = self._parse( instring, <NUM_LIT:0> )<EOL>if parseAll:<EOL><INDENT>StringEnd()._parse( instring, loc )<EOL><DEDENT>return tokens<EOL>", "docstring": "Execute the parse expression with the given string.\n           This is the main interface to the client code, once the complete\n           expression has been built.\n\n           If you want the grammar to require that the entire input string be\n           successfully parsed, then set parseAll to True (equivalent to ending\n           the grammar with StringEnd()).\n\n           Note: parseString implicitly calls expandtabs() on the input string,\n           in order to report proper column numbers in parse actions.\n           If the input string contains tabs and\n           the grammar uses parse actions that use the loc argument to index into the\n           string being parsed, you can ensure you have a consistent view of the input\n           string by:\n            - calling parseWithTabs on your grammar before calling parseString\n              (see L{I{parseWithTabs}<parseWithTabs>})\n            - define your parse action using the full (s,loc,toks) signature, and\n              reference the input string using the parse action's s argument\n            - explictly expand the tabs in your input string before calling\n              parseString", "id": "f17196:c8:m19"}
{"signature": "def countedArray( expr ):", "body": "arrayExpr = Forward()<EOL>def countFieldParseAction(s,l,t):<EOL><INDENT>n = int(t[<NUM_LIT:0>])<EOL>arrayExpr << (n and Group(And([expr]*n)) or Group(empty))<EOL>return []<EOL><DEDENT>return ( Word(nums).setName(\"<STR_LIT>\").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )<EOL>", "docstring": "Helper to define a counted list of expressions.\n       This helper defines a pattern of the form::\n           integer expr expr expr...\n       where the leading integer tells how many expr expressions follow.\n       The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.", "id": "f17196:m10"}
{"signature": "def line( loc, strg ):", "body": "lastCR = strg.rfind(\"<STR_LIT:\\n>\", <NUM_LIT:0>, loc)<EOL>nextCR = strg.find(\"<STR_LIT:\\n>\", loc)<EOL>if nextCR > <NUM_LIT:0>:<EOL><INDENT>return strg[lastCR+<NUM_LIT:1>:nextCR]<EOL><DEDENT>else:<EOL><INDENT>return strg[lastCR+<NUM_LIT:1>:]<EOL><DEDENT>", "docstring": "Returns the line of text containing loc within a string, counting newlines as line separators.", "id": "f17196:m3"}
{"signature": "def get(self, key, defaultValue=None):", "body": "if key in self:<EOL><INDENT>return self[key]<EOL><DEDENT>else:<EOL><INDENT>return defaultValue<EOL><DEDENT>", "docstring": "Returns named result matching the given key, or if there is no\n           such name, then returns the given defaultValue or None if no\n           defaultValue is specified.", "id": "f17196:c7:m12"}
{"signature": "def setDebugActions( self, startAction, successAction, exceptionAction ):", "body": "self.debugActions = (startAction or _defaultStartDebugAction,<EOL>successAction or _defaultSuccessDebugAction,<EOL>exceptionAction or _defaultExceptionDebugAction)<EOL>self.debug = True<EOL>return self<EOL>", "docstring": "Enable display of debugging messages while doing pattern matching.", "id": "f17196:c8:m42"}
{"signature": "def copy( self ):", "body": "ret = ParseResults( self.__toklist )<EOL>ret.__tokdict = self.__tokdict.copy()<EOL>ret.__parent = self.__parent<EOL>ret.__accumNames.update( self.__accumNames )<EOL>ret.__name = self.__name<EOL>return ret<EOL>", "docstring": "Returns a new copy of a ParseResults object.", "id": "f17196:c7:m24"}
{"signature": "def leaveWhitespace( self ):", "body": "self.skipWhitespace = False<EOL>self.exprs = [ e.copy() for e in self.exprs ]<EOL>for e in self.exprs:<EOL><INDENT>e.leaveWhitespace()<EOL><DEDENT>return self<EOL>", "docstring": "Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on\n           all contained expressions.", "id": "f17196:c29:m3"}
{"signature": "def __init__( self, pattern, flags=<NUM_LIT:0>):", "body": "super(Regex,self).__init__()<EOL>if len(pattern) == <NUM_LIT:0>:<EOL><INDENT>warnings.warn(\"<STR_LIT>\",<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL><DEDENT>self.pattern = pattern<EOL>self.flags = flags<EOL>try:<EOL><INDENT>self.re = re.compile(self.pattern, self.flags)<EOL>self.reString = self.pattern<EOL><DEDENT>except sre_constants.error:<EOL><INDENT>warnings.warn(\"<STR_LIT>\" % pattern,<EOL>SyntaxWarning, stacklevel=<NUM_LIT:2>)<EOL>raise<EOL><DEDENT>self.name = _ustr(self)<EOL>self.errmsg = \"<STR_LIT>\" + self.name<EOL>self.mayIndexError = False<EOL>self.mayReturnEmpty = True<EOL>", "docstring": "The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.", "id": "f17196:c17:m0"}
{"signature": "def lineno(loc,strg):", "body": "return strg.count(\"<STR_LIT:\\n>\",<NUM_LIT:0>,loc) + <NUM_LIT:1><EOL>", "docstring": "Returns current line number within a string, counting newlines as line separators.\n   The first line is number 1.\n\n   Note: the default parsing behavior is to expand tabs in the input string\n   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information\n   on parsing strings containing <TAB>s, and suggested methods to maintain a\n   consistent view of the parsed string, the parse location, and line and column\n   positions within the parsed string.", "id": "f17196:m2"}
{"signature": "def set_mutation_aspect(self, aspect):", "body": "self._mutation_aspect=aspect<EOL>", "docstring": "Set the aspect ratio of the bbox mutation.\n\nACCEPTS: float", "id": "f17197:c16:m5"}
{"signature": "def set_connectionstyle(self, connectionstyle, **kw):", "body": "if connectionstyle==None:<EOL><INDENT>return ConnectionStyle.pprint_styles()<EOL><DEDENT>if isinstance(connectionstyle, ConnectionStyle._Base):<EOL><INDENT>self._connector = connectionstyle<EOL><DEDENT>elif callable(connectionstyle):<EOL><INDENT>self._connector = connectionstyle<EOL><DEDENT>else:<EOL><INDENT>self._connector = ConnectionStyle(connectionstyle, **kw)<EOL><DEDENT>", "docstring": "Set the connection style.\n\n*connectionstyle* can be a string with connectionstyle name with optional\n comma-separated attributes. Alternatively, the attrs can\n be probided as keywords.\n\n set_connectionstyle(\"arc,angleA=0,armA=30,rad=10\")\n set_connectionstyle(\"arc\", angleA=0,armA=30,rad=10)\n\nOld attrs simply are forgotten.\n\nWithout argument (or with connectionstyle=None), return\navailable styles as a list of strings.", "id": "f17197:c19:m5"}
{"signature": "def set_x(self, x):", "body": "self._x = x<EOL>", "docstring": "Set the left coord of the rectangle\n\nACCEPTS: float", "id": "f17197:c2:m11"}
{"signature": "def __init__(self, xy, width, height, angle=<NUM_LIT:0.0>, theta1=<NUM_LIT:0.0>, theta2=<NUM_LIT>, **kwargs):", "body": "fill = kwargs.pop('<STR_LIT>')<EOL>if fill:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>kwargs['<STR_LIT>'] = False<EOL>Ellipse.__init__(self, xy, width, height, angle, **kwargs)<EOL>self.theta1 = theta1<EOL>self.theta2 = theta2<EOL>", "docstring": "The following args are supported:\n\n*xy*\n  center of ellipse\n\n*width*\n  length of horizontal axis\n\n*height*\n  length of vertical axis\n\n*angle*\n  rotation in degrees (anti-clockwise)\n\n*theta1*\n  starting angle of the arc in degrees\n\n*theta2*\n  ending angle of the arc in degrees\n\nIf *theta1* and *theta2* are not provided, the arc will form a\ncomplete ellipse.\n\nValid kwargs are:\n\n%(Patch)s", "id": "f17197:c13:m1"}
{"signature": "def __init__(self, xy, radius=<NUM_LIT:5>,<EOL>resolution=<NUM_LIT:20>,  <EOL>**kwargs):", "body": "RegularPolygon.__init__(self, xy,<EOL>resolution,<EOL>radius,<EOL>orientation=<NUM_LIT:0>,<EOL>**kwargs)<EOL>", "docstring": "Create a circle at *xy* = (*x*, *y*) with given *radius*.\nThis circle is approximated by a regular polygon with\n*resolution* sides.  For a smoother circle drawn with splines,\nsee :class:`~matplotlib.patches.Circle`.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c10:m1"}
{"signature": "def __init__(self, xy, width, height,<EOL>boxstyle=\"<STR_LIT>\",<EOL>bbox_transmuter=None,<EOL>mutation_scale=<NUM_LIT:1.>,<EOL>mutation_aspect=None,<EOL>**kwargs):", "body": "Patch.__init__(self, **kwargs)<EOL>self._x = xy[<NUM_LIT:0>]<EOL>self._y = xy[<NUM_LIT:1>]<EOL>self._width = width<EOL>self._height = height<EOL>if boxstyle == \"<STR_LIT>\":<EOL><INDENT>if bbox_transmuter is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self._bbox_transmuter = bbox_transmuter<EOL><DEDENT>else:<EOL><INDENT>self.set_boxstyle(boxstyle)<EOL><DEDENT>self._mutation_scale=mutation_scale<EOL>self._mutation_aspect=mutation_aspect<EOL>", "docstring": "*xy* = lower left corner\n\n*width*, *height*\n\n*boxstyle* determines what kind of fancy box will be drawn. It\ncan be a string of the style name with a comma separated\nattribute, or an instance of :class:`BoxStyle`. Following box\nstyles are available.\n\n%(AvailableBoxstyles)s\n\n*mutation_scale* : a value with which attributes of boxstyle\n(e.g., pad) will be scaled. default=1.\n\n*mutation_aspect* : The height of the rectangle will be\nsqueezed by this value before the mutation and the mutated\nbox will be stretched by the inverse of it. default=None.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c16:m1"}
{"signature": "def set_ec(self, color):", "body": "return self.set_edgecolor(color)<EOL>", "docstring": "alias for set_edgecolor", "id": "f17197:c0:m16"}
{"signature": "def set_bounds(self, *args):", "body": "if len(args)==<NUM_LIT:0>:<EOL><INDENT>l,b,w,h = args[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>l,b,w,h = args<EOL><DEDENT>self._x = l<EOL>self._y = b<EOL>self._width = w<EOL>self._height = h<EOL>", "docstring": "Set the bounds of the rectangle: l,b,w,h\n\nACCEPTS: (left, bottom, width, height)", "id": "f17197:c16:m17"}
{"signature": "def get_width(self):", "body": "return self._width<EOL>", "docstring": "Return the width of the  rectangle", "id": "f17197:c16:m11"}
{"signature": "def get_transform(self):", "body": "return self.get_patch_transform() + artist.Artist.get_transform(self)<EOL>", "docstring": "Return the :class:`~matplotlib.transforms.Transform` applied\nto the :class:`Patch`.", "id": "f17197:c0:m5"}
{"signature": "def set_y(self, y):", "body": "self._y = y<EOL>", "docstring": "Set the bottom coord of the rectangle\n\nACCEPTS: float", "id": "f17197:c16:m14"}
{"signature": "def draw(self, renderer):", "body": "if not self.get_visible(): return<EOL>gc = renderer.new_gc()<EOL>if cbook.is_string_like(self._edgecolor) and self._edgecolor.lower()=='<STR_LIT:none>':<EOL><INDENT>gc.set_linewidth(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>gc.set_foreground(self._edgecolor)<EOL>gc.set_linewidth(self._linewidth)<EOL>gc.set_linestyle(self._linestyle)<EOL><DEDENT>gc.set_antialiased(self._antialiased)<EOL>self._set_gc_clip(gc)<EOL>gc.set_capstyle('<STR_LIT>')<EOL>gc.set_url(self._url)<EOL>gc.set_snap(self._snap)<EOL>if (not self.fill or self._facecolor is None or<EOL>(cbook.is_string_like(self._facecolor) and self._facecolor.lower()=='<STR_LIT:none>')):<EOL><INDENT>rgbFace = None<EOL>gc.set_alpha(<NUM_LIT:1.0>)<EOL><DEDENT>else:<EOL><INDENT>r, g, b, a = colors.colorConverter.to_rgba(self._facecolor, self._alpha)<EOL>rgbFace = (r, g, b)<EOL>gc.set_alpha(a)<EOL><DEDENT>if self._hatch:<EOL><INDENT>gc.set_hatch(self._hatch )<EOL><DEDENT>path = self.get_path()<EOL>transform = self.get_transform()<EOL>tpath = transform.transform_path_non_affine(path)<EOL>affine = transform.get_affine()<EOL>renderer.draw_path(gc, tpath, affine, rgbFace)<EOL>", "docstring": "Draw the :class:`Patch` to the given *renderer*.", "id": "f17197:c0:m27"}
{"signature": "def __patch__init__(self,<EOL>edgecolor=None,<EOL>facecolor=None,<EOL>linewidth=None,<EOL>linestyle=None,<EOL>antialiased = None,<EOL>hatch = None,<EOL>fill=True,<EOL>**kwargs<EOL>):", "body": "artist.Artist.__init__(self)<EOL>if linewidth is None: linewidth = mpl.rcParams['<STR_LIT>']<EOL>if linestyle is None: linestyle = \"<STR_LIT>\"<EOL>if antialiased is None: antialiased = mpl.rcParams['<STR_LIT>']<EOL>self.set_edgecolor(edgecolor)<EOL>self.set_facecolor(facecolor)<EOL>self.set_linewidth(linewidth)<EOL>self.set_linestyle(linestyle)<EOL>self.set_antialiased(antialiased)<EOL>self.set_hatch(hatch)<EOL>self.fill = fill<EOL>self._combined_transform = transforms.IdentityTransform()<EOL>if len(kwargs): artist.setp(self, **kwargs)<EOL>", "docstring": "The following kwarg properties are supported\n\n%(Patch)s", "id": "f17197:m0"}
{"signature": "def __init__(self, figure, xytip, xybase, width=<NUM_LIT:4>, frac=<NUM_LIT:0.1>, headwidth=<NUM_LIT:12>, **kwargs):", "body": "self.figure = figure<EOL>self.xytip = xytip<EOL>self.xybase = xybase<EOL>self.width = width<EOL>self.frac = frac<EOL>self.headwidth = headwidth<EOL>Patch.__init__(self, **kwargs)<EOL>", "docstring": "Constructor arguments:\n\n*xytip*\n  (*x*, *y*) location of arrow tip\n\n*xybase*\n  (*x*, *y*) location the arrow base mid point\n\n*figure*\n  The :class:`~matplotlib.figure.Figure` instance\n  (fig.dpi)\n\n*width*\n  The width of the arrow in points\n\n*frac*\n  The fraction of the arrow length occupied by the head\n\n*headwidth*\n  The width of the base of the arrow head in points\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c9:m1"}
{"signature": "def get_path(self):", "body": "_path = self.get_path_in_displaycoord()<EOL>return self.get_transform().inverted().transform_path(_path)<EOL>", "docstring": "return the path of the arrow in the data coordinate. Use\nget_path_in_displaycoord() medthod to retrieve the arrow path\nin the disaply coord.", "id": "f17197:c19:m13"}
{"signature": "def get_verts(self):", "body": "trans = self.get_transform()<EOL>path = self.get_path()<EOL>polygons = path.to_polygons(trans)<EOL>if len(polygons):<EOL><INDENT>return polygons[<NUM_LIT:0>]<EOL><DEDENT>return []<EOL>", "docstring": "Return a copy of the vertices used in this patch\n\nIf the patch contains B\u00e9zier curves, the curves will be\ninterpolated by line segments.  To access the curves as\ncurves, use :meth:`get_path`.", "id": "f17197:c0:m1"}
{"signature": "def set_fc(self, color):", "body": "return self.set_facecolor(color)<EOL>", "docstring": "alias for set_facecolor", "id": "f17197:c0:m18"}
{"signature": "def set_height(self, h):", "body": "self._height = h<EOL>", "docstring": "Set the width rectangle\n\nACCEPTS: float", "id": "f17197:c2:m15"}
{"signature": "def contains(self, mouseevent):", "body": "<EOL>if callable(self._contains): return self._contains(self,mouseevent)<EOL>inside = self.get_path().contains_point(<EOL>(mouseevent.x, mouseevent.y), self.get_transform())<EOL>return inside, {}<EOL>", "docstring": "Test whether the mouse event occurred in the patch.\n\n        Returns T/F, {}", "id": "f17197:c0:m2"}
{"signature": "def __init__(self, xy, radius=<NUM_LIT:5>, **kwargs):", "body": "if '<STR_LIT>' in kwargs:<EOL><INDENT>import warnings<EOL>warnings.warn('<STR_LIT>', DeprecationWarning)<EOL>kwargs.pop('<STR_LIT>')<EOL><DEDENT>self.radius = radius<EOL>Ellipse.__init__(self, xy, radius*<NUM_LIT:2>, radius*<NUM_LIT:2>, **kwargs)<EOL>", "docstring": "Create true circle at center *xy* = (*x*, *y*) with given\n*radius*.  Unlike :class:`~matplotlib.patches.CirclePolygon`\nwhich is a polygonal approximation, this uses B\u00e9zier splines\nand is much closer to a scale-free circle.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c12:m1"}
{"signature": "def __init__( self, x, y, dx, dy, width=<NUM_LIT:1.0>, **kwargs ):", "body": "Patch.__init__(self, **kwargs)<EOL>L = np.sqrt(dx**<NUM_LIT:2>+dy**<NUM_LIT:2>) or <NUM_LIT:1> <EOL>cx = float(dx)/L<EOL>sx = float(dy)/L<EOL>trans1 = transforms.Affine2D().scale(L, width)<EOL>trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, <NUM_LIT:0.0>, <NUM_LIT:0.0>)<EOL>trans3 = transforms.Affine2D().translate(x, y)<EOL>trans = trans1 + trans2 + trans3<EOL>self._patch_transform = trans.frozen()<EOL>", "docstring": "Draws an arrow, starting at (*x*, *y*), direction and length\ngiven by (*dx*, *dy*) the width of the arrow is scaled by *width*.\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c7:m1"}
{"signature": "def set_y(self, y):", "body": "self._y = y<EOL>", "docstring": "Set the bottom coord of the rectangle\n\nACCEPTS: float", "id": "f17197:c2:m12"}
{"signature": "def __init__(self, center, r, theta1, theta2, width=None, **kwargs):", "body": "Patch.__init__(self, **kwargs)<EOL>self.center = center<EOL>self.r,self.width = r,width<EOL>self.theta1,self.theta2 = theta1,theta2<EOL>delta=theta2-theta1<EOL>if abs((theta2-theta1) - <NUM_LIT>) <= <NUM_LIT>:<EOL><INDENT>theta1,theta2 = <NUM_LIT:0>,<NUM_LIT><EOL>connector = Path.MOVETO<EOL><DEDENT>else:<EOL><INDENT>connector = Path.LINETO<EOL><DEDENT>arc = Path.arc(theta1,theta2)<EOL>if width is not None:<EOL><INDENT>v1 = arc.vertices<EOL>v2 = arc.vertices[::-<NUM_LIT:1>]*float(r-width)/r<EOL>v = np.vstack([v1,v2,v1[<NUM_LIT:0>,:],(<NUM_LIT:0>,<NUM_LIT:0>)])<EOL>c = np.hstack([arc.codes,arc.codes,connector,Path.CLOSEPOLY])<EOL>c[len(arc.codes)]=connector<EOL><DEDENT>else:<EOL><INDENT>v = np.vstack([arc.vertices,[(<NUM_LIT:0>,<NUM_LIT:0>),arc.vertices[<NUM_LIT:0>,:],(<NUM_LIT:0>,<NUM_LIT:0>)]])<EOL>c = np.hstack([arc.codes,[connector,connector,Path.CLOSEPOLY]])<EOL><DEDENT>v *= r<EOL>v += np.asarray(center)<EOL>self._path = Path(v,c)<EOL>self._patch_transform = transforms.IdentityTransform()<EOL>", "docstring": "Draw a wedge centered at *x*, *y* center with radius *r* that\nsweeps *theta1* to *theta2* (in degrees).  If *width* is given,\nthen a partial wedge is drawn from inner radius *r* - *width*\nto outer radius *r*.\n\nValid kwargs are:\n\n%(Patch)s", "id": "f17197:c6:m1"}
{"signature": "def set_arrowstyle(self, arrowstyle=None, **kw):", "body": "if arrowstyle==None:<EOL><INDENT>return ArrowStyle.pprint_styles()<EOL><DEDENT>if isinstance(arrowstyle, ConnectionStyle._Base):<EOL><INDENT>self._arrow_transmuter = arrowstyle<EOL><DEDENT>else:<EOL><INDENT>self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)<EOL><DEDENT>", "docstring": "Set the arrow style.\n\n*arrowstyle* can be a string with arrowstyle name with optional\n comma-separated attributes. Alternatively, the attrs can\n be provided as keywords.\n\n set_arrowstyle(\"Fancy,head_length=0.2\")\n set_arrowstyle(\"fancy\", head_length=0.2)\n\nOld attrs simply are forgotten.\n\nWithout argument (or with arrowstyle=None), return\navailable box styles as a list of strings.", "id": "f17197:c19:m7"}
{"signature": "def get_fill(self):", "body": "return self.fill<EOL>", "docstring": "return whether fill is set", "id": "f17197:c0:m24"}
{"signature": "def get_y(self):", "body": "return self._y<EOL>", "docstring": "Return the bottom coord of the rectangle", "id": "f17197:c2:m7"}
{"signature": "def get_path(self):", "body": "_path = self.get_boxstyle()(self._x, self._y,<EOL>self._width, self._height,<EOL>self.get_mutation_scale(),<EOL>self.get_mutation_aspect())<EOL>return _path<EOL>", "docstring": "Return the mutated path of the rectangle", "id": "f17197:c16:m8"}
{"signature": "def get_xy(self):", "body": "return self._x, self._y<EOL>", "docstring": "Return the left and bottom coords of the rectangle", "id": "f17197:c2:m8"}
{"signature": "def set_width(self, w):", "body": "self._width = w<EOL>", "docstring": "Set the width rectangle\n\nACCEPTS: float", "id": "f17197:c2:m14"}
{"signature": "def __init__(self, patch, ox, oy, props=None, **kwargs):", "body": "Patch.__init__(self)<EOL>self.patch = patch<EOL>self.props = props<EOL>self._ox, self._oy = ox, oy<EOL>self._update_transform()<EOL>self._update()<EOL>", "docstring": "Create a shadow of the given *patch* offset by *ox*, *oy*.\n*props*, if not *None*, is a patch property update dictionary.\nIf *None*, the shadow will have have the same color as the face,\nbut darkened.\n\nkwargs are\n%(Patch)s", "id": "f17197:c1:m1"}
{"signature": "def __init__(self, xy, width, height, angle=<NUM_LIT:0.0>, **kwargs):", "body": "Patch.__init__(self, **kwargs)<EOL>self.center = xy<EOL>self.width, self.height = width, height<EOL>self.angle = angle<EOL>self._path = Path.unit_circle()<EOL>self._patch_transform = transforms.IdentityTransform()<EOL>", "docstring": "*xy*\n  center of ellipse\n\n*width*\n  length of horizontal axis\n\n*height*\n  length of vertical axis\n\n*angle*\n  rotation in degrees (anti-clockwise)\n\nValid kwargs are:\n%(Patch)s", "id": "f17197:c11:m1"}
{"signature": "def get_path(self):", "body": "return Path.unit_rectangle()<EOL>", "docstring": "Return the vertices of the rectangle", "id": "f17197:c2:m2"}
{"signature": "def bbox_artist(artist, renderer, props=None, fill=True):", "body": "if props is None: props = {}<EOL>props = props.copy() <EOL>pad = props.pop('<STR_LIT>', <NUM_LIT:4>)<EOL>pad = renderer.points_to_pixels(pad)<EOL>bbox = artist.get_window_extent(renderer)<EOL>l,b,w,h = bbox.bounds<EOL>l-=pad/<NUM_LIT><EOL>b-=pad/<NUM_LIT><EOL>w+=pad<EOL>h+=pad<EOL>r = Rectangle(xy=(l,b),<EOL>width=w,<EOL>height=h,<EOL>fill=fill,<EOL>)<EOL>r.set_transform(transforms.IdentityTransform())<EOL>r.set_clip_on( False )<EOL>r.update(props)<EOL>r.draw(renderer)<EOL>", "docstring": "This is a debug function to draw a rectangle around the bounding\nbox returned by\n:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,\nto test whether the artist is returning the correct bbox.\n\n*props* is a dict of rectangle props with the additional property\n'pad' that sets the padding around the bbox in points.", "id": "f17197:m1"}
{"signature": "def get_extents(self):", "body": "return self.get_path().get_extents(self.get_transform())<EOL>", "docstring": "Return a :class:`~matplotlib.transforms.Bbox` object defining\nthe axis-aligned extents of the :class:`Patch`.", "id": "f17197:c0:m4"}
{"signature": "def set_facecolor(self, color):", "body": "if color is None: color = mpl.rcParams['<STR_LIT>']<EOL>self._facecolor = color<EOL>", "docstring": "Set the patch face color\n\nACCEPTS: mpl color spec, or None for default, or 'none' for no color", "id": "f17197:c0:m17"}
{"signature": "def set_linewidth(self, w):", "body": "if w is None: w = mpl.rcParams['<STR_LIT>']<EOL>self._linewidth = w<EOL>", "docstring": "Set the patch linewidth in points\n\nACCEPTS: float or None for default", "id": "f17197:c0:m19"}
{"signature": "def set_ls(self, ls):", "body": "return self.set_linestyle(ls)<EOL>", "docstring": "alias for set_linestyle", "id": "f17197:c0:m22"}
{"signature": "def get_height(self):", "body": "return self._height<EOL>", "docstring": "Return the height of the rectangle", "id": "f17197:c16:m12"}
{"signature": "def get_linestyle(self):", "body": "return self._linestyle<EOL>", "docstring": "Return the linestyle.  Will be one of ['solid' | 'dashed' |\n'dashdot' | 'dotted']", "id": "f17197:c0:m12"}
{"signature": "@classmethod<EOL><INDENT>def pprint_styles(klass):<DEDENT>", "body": "return _pprint_styles(klass._style_list)<EOL>", "docstring": "A class method which returns a string of the available styles.", "id": "f17197:c14:m2"}
{"signature": "def set_linestyle(self, ls):", "body": "if ls is None: ls = \"<STR_LIT>\"<EOL>self._linestyle = ls<EOL>", "docstring": "Set the patch linestyle\n\nACCEPTS: ['solid' | 'dashed' | 'dashdot' | 'dotted']", "id": "f17197:c0:m21"}
{"signature": "def get_edgecolor(self):", "body": "return self._edgecolor<EOL>", "docstring": "Return the edge color of the :class:`Patch`.", "id": "f17197:c0:m9"}
{"signature": "def set_mutation_scale(self, scale):", "body": "self._mutation_scale=scale<EOL>", "docstring": "Set the mutation scale.\n\nACCEPTS: float", "id": "f17197:c16:m3"}
{"signature": "def _update_patch_transform(self):", "body": "x = self.convert_xunits(self._x)<EOL>y = self.convert_yunits(self._y)<EOL>width = self.convert_xunits(self._width)<EOL>height = self.convert_yunits(self._height)<EOL>bbox = transforms.Bbox.from_bounds(x, y, width, height)<EOL>self._rect_transform = transforms.BboxTransformTo(bbox)<EOL>", "docstring": "NOTE: This cannot be called until after this has been added\n                 to an Axes, otherwise unit conversion will fail. This\n                 maxes it very important to call the accessor method and\n                 not directly access the transformation member variable.", "id": "f17197:c2:m3"}
{"signature": "def get_boxstyle(self):", "body": "return self._bbox_transmuter<EOL>", "docstring": "Return the boxstyle object", "id": "f17197:c16:m7"}
{"signature": "def update_from(self, other):", "body": "artist.Artist.update_from(self, other)<EOL>self.set_edgecolor(other.get_edgecolor())<EOL>self.set_facecolor(other.get_facecolor())<EOL>self.set_fill(other.get_fill())<EOL>self.set_hatch(other.get_hatch())<EOL>self.set_linewidth(other.get_linewidth())<EOL>self.set_linestyle(other.get_linestyle())<EOL>self.set_transform(other.get_data_transform())<EOL>self.set_figure(other.get_figure())<EOL>self.set_alpha(other.get_alpha())<EOL>", "docstring": "Updates this :class:`Patch` from the properties of *other*.", "id": "f17197:c0:m3"}
{"signature": "def __init__(self, path, **kwargs):", "body": "Patch.__init__(self, **kwargs)<EOL>self._path = path<EOL>", "docstring": "*path* is a :class:`matplotlib.path.Path` object.\n\nValid kwargs are:\n%(Patch)s\n\n.. seealso::\n    :class:`Patch`:\n        For additional kwargs", "id": "f17197:c4:m1"}
{"signature": "def get_mutation_scale(self):", "body": "return self._mutation_scale<EOL>", "docstring": "Return the mutation scale.", "id": "f17197:c16:m4"}
{"signature": "def set_patchA(self, patchA):", "body": "self.patchA = patchA<EOL>", "docstring": "set the begin patch.", "id": "f17197:c19:m3"}
{"signature": "def get_x(self):", "body": "return self._x<EOL>", "docstring": "Return the left coord of the rectangle", "id": "f17197:c16:m9"}
{"signature": "def __init__(self, xy, closed=True, **kwargs):", "body": "Patch.__init__(self, **kwargs)<EOL>xy = np.asarray(xy, np.float_)<EOL>self._path = Path(xy)<EOL>self.set_closed(closed)<EOL>", "docstring": "*xy* is a numpy array with shape Nx2.\n\nIf *closed* is *True*, the polygon will be closed so the\nstarting and ending points are the same.\n\nValid kwargs are:\n%(Patch)s\n\n.. seealso::\n    :class:`Patch`:\n        For additional kwargs", "id": "f17197:c5:m1"}
{"signature": "def get_mutation_aspect(self):", "body": "return self._mutation_aspect<EOL>", "docstring": "Return the aspect ratio of the bbox mutation.", "id": "f17197:c16:m6"}
{"signature": "def set_ticks(self, ticks, minor=False):", "body": "<EOL>ticks = self.convert_units(ticks)<EOL>if len(ticks) > <NUM_LIT:1>:<EOL><INDENT>xleft, xright = self.get_view_interval()<EOL>if xright > xleft:<EOL><INDENT>self.set_view_interval(min(ticks), max(ticks))<EOL><DEDENT>else:<EOL><INDENT>self.set_view_interval(max(ticks), min(ticks))<EOL><DEDENT><DEDENT>if minor:<EOL><INDENT>self.set_minor_locator(mticker.FixedLocator(ticks))<EOL>return self.get_minor_ticks(len(ticks))<EOL><DEDENT>else:<EOL><INDENT>self.set_major_locator( mticker.FixedLocator(ticks) )<EOL>return self.get_major_ticks(len(ticks))<EOL><DEDENT>", "docstring": "Set the locations of the tick marks from sequence ticks\n\nACCEPTS: sequence of floats", "id": "f17198:c4:m54"}
{"signature": "def set_label_position(self, position):", "body": "assert position == '<STR_LIT>' or position == '<STR_LIT>'<EOL>if position == '<STR_LIT>':<EOL><INDENT>self.label.set_verticalalignment('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.label.set_verticalalignment('<STR_LIT>')<EOL><DEDENT>self.label_position=position<EOL>", "docstring": "Set the label position (top or bottom)\n\nACCEPTS: [ 'top' | 'bottom' ]", "id": "f17198:c5:m5"}
{"signature": "def _get_text2(self):", "body": "<EOL>trans, vert, horiz = self.axes.get_xaxis_text2_transform(self._pad)<EOL>t = mtext.Text(<EOL>x=<NUM_LIT:0>, y=<NUM_LIT:1>,<EOL>fontproperties=font_manager.FontProperties(size=rcParams['<STR_LIT>']),<EOL>color=rcParams['<STR_LIT>'],<EOL>verticalalignment=vert,<EOL>horizontalalignment=horiz,<EOL>)<EOL>t.set_transform(trans)<EOL>self._set_artist_props(t)<EOL>return t<EOL>", "docstring": "Get the default Text 2 instance", "id": "f17198:c1:m1"}
{"signature": "def _get_text1(self):", "body": "<EOL>trans, vert, horiz = self.axes.get_yaxis_text1_transform(self._pad)<EOL>t = mtext.Text(<EOL>x=<NUM_LIT:0>, y=<NUM_LIT:0>,<EOL>fontproperties=font_manager.FontProperties(size=rcParams['<STR_LIT>']),<EOL>color=rcParams['<STR_LIT>'],<EOL>verticalalignment=vert,<EOL>horizontalalignment=horiz,<EOL>)<EOL>t.set_transform(trans)<EOL>self._set_artist_props(t)<EOL>return t<EOL>", "docstring": "Get the default Text instance", "id": "f17198:c2:m0"}
{"signature": "def contains(self,mouseevent):", "body": "if callable(self._contains): return self._contains(self,mouseevent)<EOL>x,y = mouseevent.x,mouseevent.y<EOL>try:<EOL><INDENT>trans = self.axes.transAxes.inverted()<EOL>xaxes,yaxes = trans.transform_point((x,y))<EOL><DEDENT>except ValueError:<EOL><INDENT>return False, {}<EOL><DEDENT>l,b = self.axes.transAxes.transform_point((<NUM_LIT:0>,<NUM_LIT:0>))<EOL>r,t = self.axes.transAxes.transform_point((<NUM_LIT:1>,<NUM_LIT:1>))<EOL>inaxis = xaxes>=<NUM_LIT:0> and xaxes<=<NUM_LIT:1> and (<EOL>(y<b and y>b-self.pickradius) or<EOL>(y>t and y<t+self.pickradius))<EOL>return inaxis, {}<EOL>", "docstring": "Test whether the mouse event occured in the x axis.", "id": "f17198:c5:m0"}
{"signature": "def set_label1(self, s):", "body": "self.label1.set_text(s)<EOL>", "docstring": "Set the text of ticklabel\n\nACCEPTS: str", "id": "f17198:c0:m14"}
{"signature": "def _update_axisinfo(self):", "body": "if self.converter is None:<EOL><INDENT>return<EOL><DEDENT>info = self.converter.axisinfo(self.units)<EOL>if info is None:<EOL><INDENT>return<EOL><DEDENT>if info.majloc is not None and self.major.locator!=info.majloc:<EOL><INDENT>self.set_major_locator(info.majloc)<EOL><DEDENT>if info.minloc is not None and self.minor.locator!=info.minloc:<EOL><INDENT>self.set_minor_locator(info.minloc)<EOL><DEDENT>if info.majfmt is not None and self.major.formatter!=info.majfmt:<EOL><INDENT>self.set_major_formatter(info.majfmt)<EOL><DEDENT>if info.minfmt is not None and self.minor.formatter!=info.minfmt:<EOL><INDENT>self.set_minor_formatter(info.minfmt)<EOL><DEDENT>if info.label is not None:<EOL><INDENT>label = self.get_label()<EOL>label.set_text(info.label)<EOL><DEDENT>", "docstring": "check the axis converter for the stored units to see if the\naxis info needs to be updated", "id": "f17198:c4:m43"}
{"signature": "def get_view_interval(self):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "return the view Interval instance for the axis this tick is ticking", "id": "f17198:c0:m17"}
{"signature": "def get_data_interval(self):", "body": "return self.axes.dataLim.intervalx<EOL>", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c5:m16"}
{"signature": "def set_major_formatter(self, formatter):", "body": "self.major.formatter = formatter<EOL>formatter.set_axis(self)<EOL>", "docstring": "Set the formatter of the major ticker\n\nACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance", "id": "f17198:c4:m48"}
{"signature": "def _get_tick2line(self):", "body": "pass<EOL>", "docstring": "Get the default line2D instance for tick2", "id": "f17198:c0:m10"}
{"signature": "def get_data_interval(self):", "body": "return self.axes.dataLim.intervaly<EOL>", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c2:m9"}
{"signature": "def get_pad(self):", "body": "return self._pad<EOL>", "docstring": "Get the value of the tick label pad in points", "id": "f17198:c0:m6"}
{"signature": "def _get_tick2line(self):", "body": "<EOL>l = mlines.Line2D( xdata=(<NUM_LIT:0>,), ydata=(<NUM_LIT:1>,),<EOL>color='<STR_LIT:k>',<EOL>linestyle = '<STR_LIT:None>',<EOL>marker = self._xtickmarkers[<NUM_LIT:1>],<EOL>markersize=self._size,<EOL>)<EOL>l.set_transform(self.axes.get_xaxis_transform())<EOL>self._set_artist_props(l)<EOL>return l<EOL>", "docstring": "Get the default line2D instance", "id": "f17198:c1:m3"}
{"signature": "def set_ticklabels(self, ticklabels, *args, **kwargs):", "body": "<EOL>minor = kwargs.pop('<STR_LIT>', False)<EOL>if minor:<EOL><INDENT>self.set_minor_formatter(mticker.FixedFormatter(ticklabels))<EOL>ticks = self.get_minor_ticks()<EOL><DEDENT>else:<EOL><INDENT>self.set_major_formatter( mticker.FixedFormatter(ticklabels) )<EOL>ticks = self.get_major_ticks()<EOL><DEDENT>self.set_major_formatter( mticker.FixedFormatter(ticklabels) )<EOL>ret = []<EOL>for i, tick in enumerate(ticks):<EOL><INDENT>if i<len(ticklabels):<EOL><INDENT>tick.label1.set_text(ticklabels[i])<EOL>ret.append(tick.label1)<EOL><DEDENT>tick.label1.update(kwargs)<EOL><DEDENT>return ret<EOL>", "docstring": "Set the text values of the tick labels. Return a list of Text\ninstances.  Use *kwarg* *minor=True* to select minor ticks.\n\nACCEPTS: sequence of strings", "id": "f17198:c4:m53"}
{"signature": "def get_data_interval(self):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "return the Interval instance for this axis data limits", "id": "f17198:c4:m12"}
{"signature": "def cla(self):", "body": "self.set_major_locator(mticker.AutoLocator())<EOL>self.set_major_formatter(mticker.ScalarFormatter())<EOL>self.set_minor_locator(mticker.NullLocator())<EOL>self.set_minor_formatter(mticker.NullFormatter())<EOL>self.callbacks = cbook.CallbackRegistry(('<STR_LIT>', '<STR_LIT>'))<EOL>self._gridOnMajor = rcParams['<STR_LIT>']<EOL>self._gridOnMinor = False<EOL>self.label.set_text('<STR_LIT>')<EOL>self._set_artist_props(self.label)<EOL>cbook.popall(self.majorTicks)<EOL>cbook.popall(self.minorTicks)<EOL>self.majorTicks.extend([self._get_tick(major=True)])<EOL>self.minorTicks.extend([self._get_tick(major=False)])<EOL>self._lastNumMajorTicks = <NUM_LIT:1><EOL>self._lastNumMinorTicks = <NUM_LIT:1><EOL>self.converter = None<EOL>self.units = None<EOL>self.set_units(None)<EOL>", "docstring": "clear the current axis", "id": "f17198:c4:m8"}
{"signature": "def _update_offset_text_position(self, bboxes, bboxes2):", "body": "x,y = self.offsetText.get_position()<EOL>if not len(bboxes):<EOL><INDENT>bottom = self.axes.bbox.ymin<EOL><DEDENT>else:<EOL><INDENT>bbox = mtransforms.Bbox.union(bboxes)<EOL>bottom = bbox.y0<EOL><DEDENT>self.offsetText.set_position((x, bottom-self.OFFSETTEXTPAD*self.figure.dpi/<NUM_LIT>))<EOL>", "docstring": "Update the offset_text position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c5:m7"}
{"signature": "def get_ticks_position(self):", "body": "majt=self.majorTicks[<NUM_LIT:0>]<EOL>mT=self.minorTicks[<NUM_LIT:0>]<EOL>majorRight=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On<EOL>minorRight=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On<EOL>if majorRight and minorRight: return '<STR_LIT:right>'<EOL>majorLeft=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)<EOL>minorLeft=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)<EOL>if majorLeft and minorLeft: return '<STR_LIT:left>'<EOL>majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)<EOL>minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)<EOL>if majorDefault and minorDefault: return '<STR_LIT:default>'<EOL>return '<STR_LIT>'<EOL>", "docstring": "Return the ticks position (left, right, both or unknown)", "id": "f17198:c6:m13"}
{"signature": "def get_text_heights(self, renderer):", "body": "bbox, bbox2 = self.get_ticklabel_extents(renderer)<EOL>padPixels = self.majorTicks[<NUM_LIT:0>].get_pad_pixels()<EOL>above = <NUM_LIT:0.0><EOL>if bbox2.height:<EOL><INDENT>above += bbox2.height + padPixels<EOL><DEDENT>below = <NUM_LIT:0.0><EOL>if bbox.height:<EOL><INDENT>below += bbox.height + padPixels<EOL><DEDENT>if self.get_label_position() == '<STR_LIT>':<EOL><INDENT>above += self.label.get_window_extent(renderer).height + padPixels<EOL><DEDENT>else:<EOL><INDENT>below += self.label.get_window_extent(renderer).height + padPixels<EOL><DEDENT>return above, below<EOL>", "docstring": "Returns the amount of space one should reserve for text\nabove and below the axes.  Returns a tuple (above, below)", "id": "f17198:c5:m8"}
{"signature": "def get_major_formatter(self):", "body": "return self.major.formatter<EOL>", "docstring": "Get the formatter of the major ticker", "id": "f17198:c4:m37"}
{"signature": "def _update_label_position(self, bboxes, bboxes2):", "body": "if not self._autolabelpos: return<EOL>x,y = self.label.get_position()<EOL>if self.label_position == '<STR_LIT:left>':<EOL><INDENT>if not len(bboxes):<EOL><INDENT>left = self.axes.bbox.xmin<EOL><DEDENT>else:<EOL><INDENT>bbox = mtransforms.Bbox.union(bboxes)<EOL>left = bbox.x0<EOL><DEDENT>self.label.set_position( (left-self.LABELPAD*self.figure.dpi/<NUM_LIT>, y))<EOL><DEDENT>else:<EOL><INDENT>if not len(bboxes2):<EOL><INDENT>right = self.axes.bbox.xmax<EOL><DEDENT>else:<EOL><INDENT>bbox = mtransforms.Bbox.union(bboxes2)<EOL>right = bbox.x1<EOL><DEDENT>self.label.set_position( (right+self.LABELPAD*self.figure.dpi/<NUM_LIT>, y))<EOL><DEDENT>", "docstring": "Update the label position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c6:m6"}
{"signature": "def get_minorticklabels(self):", "body": "ticks = self.get_minor_ticks()<EOL>labels1 = [tick.label1 for tick in ticks if tick.label1On]<EOL>labels2 = [tick.label2 for tick in ticks if tick.label2On]<EOL>return cbook.silent_list('<STR_LIT>', labels1+labels2)<EOL>", "docstring": "Return a list of Text instances for the minor ticklabels", "id": "f17198:c4:m25"}
{"signature": "def set_pad(self, val):", "body": "self._pad = val<EOL>", "docstring": "Set the tick label pad in points\n\nACCEPTS: float", "id": "f17198:c0:m5"}
{"signature": "def get_major_ticks(self, numticks=None):", "body": "if numticks is None:<EOL><INDENT>numticks = len(self.get_major_locator()())<EOL><DEDENT>if len(self.majorTicks) < numticks:<EOL><INDENT>for i in range(numticks - len(self.majorTicks)):<EOL><INDENT>tick = self._get_tick(major=True)<EOL>self.majorTicks.append(tick)<EOL><DEDENT><DEDENT>if self._lastNumMajorTicks < numticks:<EOL><INDENT>protoTick = self.majorTicks[<NUM_LIT:0>]<EOL>for i in range(self._lastNumMajorTicks, len(self.majorTicks)):<EOL><INDENT>tick = self.majorTicks[i]<EOL>if self._gridOnMajor: tick.gridOn = True<EOL>self._copy_tick_props(protoTick, tick)<EOL><DEDENT><DEDENT>self._lastNumMajorTicks = numticks<EOL>ticks = self.majorTicks[:numticks]<EOL>return ticks<EOL>", "docstring": "get the tick instances; grow as necessary", "id": "f17198:c4:m39"}
{"signature": "def draw(self, renderer, *args, **kwargs):", "body": "ticklabelBoxes = []<EOL>ticklabelBoxes2 = []<EOL>if not self.get_visible(): return<EOL>renderer.open_group(__name__)<EOL>interval = self.get_view_interval()<EOL>for tick, loc, label in self.iter_ticks():<EOL><INDENT>if tick is None: continue<EOL>if not mtransforms.interval_contains(interval, loc): continue<EOL>tick.update_position(loc)<EOL>tick.set_label1(label)<EOL>tick.set_label2(label)<EOL>tick.draw(renderer)<EOL>if tick.label1On and tick.label1.get_visible():<EOL><INDENT>extent = tick.label1.get_window_extent(renderer)<EOL>ticklabelBoxes.append(extent)<EOL><DEDENT>if tick.label2On and tick.label2.get_visible():<EOL><INDENT>extent = tick.label2.get_window_extent(renderer)<EOL>ticklabelBoxes2.append(extent)<EOL><DEDENT><DEDENT>self._update_label_position(ticklabelBoxes, ticklabelBoxes2)<EOL>self.label.draw(renderer)<EOL>self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)<EOL>self.offsetText.set_text( self.major.formatter.get_offset() )<EOL>self.offsetText.draw(renderer)<EOL>if <NUM_LIT:0>: <EOL><INDENT>for tick in majorTicks:<EOL><INDENT>label = tick.label1<EOL>mpatches.bbox_artist(label, renderer)<EOL><DEDENT>mpatches.bbox_artist(self.label, renderer)<EOL><DEDENT>renderer.close_group(__name__)<EOL>", "docstring": "Draw the axis lines, grid lines, tick lines and labels", "id": "f17198:c4:m17"}
{"signature": "def _update_label_position(self, bboxes, bboxes2):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Update the label position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c4:m55"}
{"signature": "def _get_tick1line(self):", "body": "pass<EOL>", "docstring": "Get the default line2D instance for tick1", "id": "f17198:c0:m9"}
{"signature": "def set_major_locator(self, locator):", "body": "self.major.locator = locator<EOL>locator.set_axis(self)<EOL>", "docstring": "Set the locator of the major ticker\n\nACCEPTS: a :class:`~matplotlib.ticker.Locator` instance", "id": "f17198:c4:m50"}
{"signature": "def get_view_interval(self):", "body": "return self.axes.viewLim.intervalx<EOL>", "docstring": "return the Interval instance for this axis view limits", "id": "f17198:c5:m13"}
{"signature": "def get_offset_text(self):", "body": "return self.offsetText<EOL>", "docstring": "Return the axis offsetText as a Text instance", "id": "f17198:c4:m22"}
{"signature": "def get_minorticklocs(self):", "body": "return self.minor.locator()<EOL>", "docstring": "Get the minor tick locations in data coordinates as a numpy array", "id": "f17198:c4:m31"}
{"signature": "def get_minorticklines(self):", "body": "lines = []<EOL>ticks = self.get_minor_ticks()<EOL>for tick in ticks:<EOL><INDENT>lines.append(tick.tick1line)<EOL>lines.append(tick.tick2line)<EOL><DEDENT>return cbook.silent_list('<STR_LIT>', lines)<EOL>", "docstring": "Return the minor tick lines as a list of Line2D instances", "id": "f17198:c4:m28"}
{"signature": "def set_minor_locator(self, locator):", "body": "self.minor.locator = locator<EOL>locator.set_axis(self)<EOL>", "docstring": "Set the locator of the minor ticker\n\nACCEPTS: a :class:`~matplotlib.ticker.Locator` instance", "id": "f17198:c4:m51"}
{"signature": "def set_minor_formatter(self, formatter):", "body": "self.minor.formatter = formatter<EOL>formatter.set_axis(self)<EOL>", "docstring": "Set the formatter of the minor ticker\n\nACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance", "id": "f17198:c4:m49"}
{"signature": "def _update_label_position(self, bboxes, bboxes2):", "body": "if not self._autolabelpos: return<EOL>x,y = self.label.get_position()<EOL>if self.label_position == '<STR_LIT>':<EOL><INDENT>if not len(bboxes):<EOL><INDENT>bottom = self.axes.bbox.ymin<EOL><DEDENT>else:<EOL><INDENT>bbox = mtransforms.Bbox.union(bboxes)<EOL>bottom = bbox.y0<EOL><DEDENT>self.label.set_position( (x, bottom - self.LABELPAD*self.figure.dpi / <NUM_LIT>))<EOL><DEDENT>else:<EOL><INDENT>if not len(bboxes2):<EOL><INDENT>top = self.axes.bbox.ymax<EOL><DEDENT>else:<EOL><INDENT>bbox = mtransforms.Bbox.union(bboxes2)<EOL>top = bbox.y1<EOL><DEDENT>self.label.set_position( (x, top+self.LABELPAD*self.figure.dpi / <NUM_LIT>))<EOL><DEDENT>", "docstring": "Update the label position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c5:m6"}
{"signature": "def get_ticklabel_extents(self, renderer):", "body": "ticklabelBoxes = []<EOL>ticklabelBoxes2 = []<EOL>interval = self.get_view_interval()<EOL>for tick, loc, label in self.iter_ticks():<EOL><INDENT>if tick is None: continue<EOL>if not mtransforms.interval_contains(interval, loc): continue<EOL>tick.update_position(loc)<EOL>tick.set_label1(label)<EOL>tick.set_label2(label)<EOL>if tick.label1On and tick.label1.get_visible():<EOL><INDENT>extent = tick.label1.get_window_extent(renderer)<EOL>ticklabelBoxes.append(extent)<EOL><DEDENT>if tick.label2On and tick.label2.get_visible():<EOL><INDENT>extent = tick.label2.get_window_extent(renderer)<EOL>ticklabelBoxes2.append(extent)<EOL><DEDENT><DEDENT>if len(ticklabelBoxes):<EOL><INDENT>bbox = mtransforms.Bbox.union(ticklabelBoxes)<EOL><DEDENT>else:<EOL><INDENT>bbox = mtransforms.Bbox.from_extents(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL><DEDENT>if len(ticklabelBoxes2):<EOL><INDENT>bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)<EOL><DEDENT>else:<EOL><INDENT>bbox2 = mtransforms.Bbox.from_extents(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL><DEDENT>return bbox, bbox2<EOL>", "docstring": "Get the extents of the tick labels on either side\nof the axes.", "id": "f17198:c4:m16"}
{"signature": "def tick_right(self):", "body": "self.set_ticks_position('<STR_LIT:right>')<EOL>", "docstring": "use ticks only on right", "id": "f17198:c6:m11"}
{"signature": "def _get_tick2line(self):", "body": "<EOL>l = mlines.Line2D( (<NUM_LIT:1>,), (<NUM_LIT:0>,), color='<STR_LIT:k>',<EOL>marker = self._ytickmarkers[<NUM_LIT:1>],<EOL>linestyle = '<STR_LIT:None>',<EOL>markersize=self._size,<EOL>)<EOL>l.set_transform(self.axes.get_yaxis_transform())<EOL>self._set_artist_props(l)<EOL>return l<EOL>", "docstring": "Get the default line2D instance", "id": "f17198:c2:m3"}
{"signature": "def tick_left(self):", "body": "self.set_ticks_position('<STR_LIT:left>')<EOL>", "docstring": "use ticks only on left", "id": "f17198:c6:m12"}
{"signature": "def __init__(self, axes, loc, label,<EOL>size   = None,  <EOL>gridOn = None, <EOL>tick1On = True,<EOL>tick2On = True,<EOL>label1On = True,<EOL>label2On = False,<EOL>major = True,<EOL>):", "body": "artist.Artist.__init__(self)<EOL>if gridOn is None: gridOn = rcParams['<STR_LIT>']<EOL>self.set_figure(axes.figure)<EOL>self.axes = axes<EOL>name = self.__name__.lower()<EOL>if size is None:<EOL><INDENT>if major:<EOL><INDENT>size = rcParams['<STR_LIT>'%name]<EOL>pad = rcParams['<STR_LIT>'%name]<EOL><DEDENT>else:<EOL><INDENT>size = rcParams['<STR_LIT>'%name]<EOL>pad = rcParams['<STR_LIT>'%name]<EOL><DEDENT><DEDENT>self._tickdir = rcParams['<STR_LIT>'%name]<EOL>if self._tickdir == '<STR_LIT>':<EOL><INDENT>self._xtickmarkers = (mlines.TICKUP, mlines.TICKDOWN)<EOL>self._ytickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)<EOL>self._pad = pad<EOL><DEDENT>else:<EOL><INDENT>self._xtickmarkers = (mlines.TICKDOWN, mlines.TICKUP)<EOL>self._ytickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)<EOL>self._pad = pad + size<EOL><DEDENT>self._loc = loc<EOL>self._size = size<EOL>self.tick1line = self._get_tick1line()<EOL>self.tick2line = self._get_tick2line()<EOL>self.gridline = self._get_gridline()<EOL>self.label1 = self._get_text1()<EOL>self.label = self.label1  <EOL>self.label2 = self._get_text2()<EOL>self.gridOn = gridOn<EOL>self.tick1On = tick1On<EOL>self.tick2On = tick2On<EOL>self.label1On = label1On<EOL>self.label2On = label2On<EOL>self.update_position(loc)<EOL>", "docstring": "bbox is the Bound2D bounding box in display coords of the Axes\nloc is the tick location in data coords\nsize is the tick size in relative, axes coords", "id": "f17198:c0:m0"}
{"signature": "def tick_top(self):", "body": "self.set_ticks_position('<STR_LIT>')<EOL>", "docstring": "use ticks only on top", "id": "f17198:c5:m10"}
{"signature": "def get_view_interval(self):", "body": "return self.axes.viewLim.intervaly<EOL>", "docstring": "return the Interval instance for this axis view limits", "id": "f17198:c2:m6"}
{"signature": "def _get_gridline(self):", "body": "pass<EOL>", "docstring": "Get the default grid Line2d instance for this tick", "id": "f17198:c0:m11"}
{"signature": "def get_view_interval(self):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "return the Interval instance for this axis view limits", "id": "f17198:c4:m10"}
{"signature": "def _update_offset_text_position(self, bboxes, bboxes2):", "body": "x,y = self.offsetText.get_position()<EOL>top = self.axes.bbox.ymax<EOL>self.offsetText.set_position((x, top+self.OFFSETTEXTPAD*self.figure.dpi/<NUM_LIT>))<EOL>", "docstring": "Update the offset_text position based on the sequence of bounding\nboxes of all the ticklabels", "id": "f17198:c6:m7"}
{"signature": "def get_minor_formatter(self):", "body": "return self.minor.formatter<EOL>", "docstring": "Get the formatter of the minor ticker", "id": "f17198:c4:m38"}
{"signature": "def get_ticklines(self, minor=False):", "body": "if minor:<EOL><INDENT>return self.get_minorticklines()<EOL><DEDENT>return self.get_majorticklines()<EOL>", "docstring": "Return the tick lines as a list of Line2D instances", "id": "f17198:c4:m29"}
{"signature": "def _get_text2(self):", "body": "<EOL>trans, vert, horiz = self.axes.get_yaxis_text2_transform(self._pad)<EOL>t = mtext.Text(<EOL>x=<NUM_LIT:1>, y=<NUM_LIT:0>,<EOL>fontproperties=font_manager.FontProperties(size=rcParams['<STR_LIT>']),<EOL>color=rcParams['<STR_LIT>'],<EOL>verticalalignment=vert,<EOL>horizontalalignment=horiz,<EOL>)<EOL>t.set_transform(trans)<EOL>self._set_artist_props(t)<EOL>return t<EOL>", "docstring": "Get the default Text instance", "id": "f17198:c2:m1"}
{"signature": "def __init__(self, axes, pickradius=<NUM_LIT:15>):", "body": "artist.Artist.__init__(self)<EOL>self.set_figure(axes.figure)<EOL>self.axes = axes<EOL>self.major = Ticker()<EOL>self.minor = Ticker()<EOL>self.callbacks = cbook.CallbackRegistry(('<STR_LIT>', '<STR_LIT>'))<EOL>self._autolabelpos = True<EOL>self.label = self._get_label()<EOL>self.offsetText = self._get_offset_text()<EOL>self.majorTicks = []<EOL>self.minorTicks = []<EOL>self.pickradius = pickradius<EOL>self.cla()<EOL>self.set_scale('<STR_LIT>')<EOL>", "docstring": "Init the axis with the parent Axes instance", "id": "f17198:c4:m1"}
{"signature": "def get_minor_ticks(self, numticks=None):", "body": "if numticks is None:<EOL><INDENT>numticks = len(self.get_minor_locator()())<EOL><DEDENT>if len(self.minorTicks) < numticks:<EOL><INDENT>for i in range(numticks - len(self.minorTicks)):<EOL><INDENT>tick = self._get_tick(major=False)<EOL>self.minorTicks.append(tick)<EOL><DEDENT><DEDENT>if self._lastNumMinorTicks < numticks:<EOL><INDENT>protoTick = self.minorTicks[<NUM_LIT:0>]<EOL>for i in range(self._lastNumMinorTicks, len(self.minorTicks)):<EOL><INDENT>tick = self.minorTicks[i]<EOL>if self._gridOnMinor: tick.gridOn = True<EOL>self._copy_tick_props(protoTick, tick)<EOL><DEDENT><DEDENT>self._lastNumMinorTicks = numticks<EOL>ticks = self.minorTicks[:numticks]<EOL>return ticks<EOL>", "docstring": "get the minor tick instances; grow as necessary", "id": "f17198:c4:m40"}
{"signature": "def contains(self,mouseevent):", "body": "if callable(self._contains): return self._contains(self,mouseevent)<EOL>x,y = mouseevent.x,mouseevent.y<EOL>try:<EOL><INDENT>trans = self.axes.transAxes.inverted()<EOL>xaxes,yaxes = trans.transform_point((x,y))<EOL><DEDENT>except ValueError:<EOL><INDENT>return False, {}<EOL><DEDENT>l,b = self.axes.transAxes.transform_point((<NUM_LIT:0>,<NUM_LIT:0>))<EOL>r,t = self.axes.transAxes.transform_point((<NUM_LIT:1>,<NUM_LIT:1>))<EOL>inaxis = yaxes>=<NUM_LIT:0> and yaxes<=<NUM_LIT:1> and (<EOL>(x<l and x>l-self.pickradius) or<EOL>(x>r and x<r+self.pickradius))<EOL>return inaxis, {}<EOL>", "docstring": "Test whether the mouse event occurred in the y axis.\n\n        Returns *True* | *False*", "id": "f17198:c6:m0"}
{"signature": "def slice2gridspec(key):", "body": "if ((len(key) != <NUM_LIT:2>) or<EOL>(not isinstance(key[<NUM_LIT:0>], slice)) or<EOL>(not isinstance(key[<NUM_LIT:1>], slice))):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>x0 = key[<NUM_LIT:1>].start<EOL>x1 = key[<NUM_LIT:1>].stop<EOL>xstep = key[<NUM_LIT:1>].step<EOL>if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>xstep = int(xstep.imag)<EOL>y0 = key[<NUM_LIT:0>].start<EOL>y1 = key[<NUM_LIT:0>].stop<EOL>ystep = key[<NUM_LIT:0>].step<EOL>if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>ystep = int(ystep.imag)<EOL>return x0, x1, xstep, y0, y1, ystep<EOL>", "docstring": "Convert a 2-tuple of slices to start,stop,steps for x and y.\n\n    key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))\n\n    For now, the only accepted step values are imaginary integers (interpreted\n    in the same way numpy.mgrid, etc. do).", "id": "f17199:m0"}
{"signature": "def nn_interpolator(self, z, default_value=np.nan):", "body": "z = np.asarray(z, dtype=np.float64)<EOL>if z.shape != self.old_shape:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if self.j_unique is not None:<EOL><INDENT>z = z[self.j_unique]<EOL><DEDENT>return NNInterpolator(self, z, default_value)<EOL>", "docstring": "Get an object which can interpolate within the convex hull by\n        the natural neighbors method.\n\n        z -- an array of floats giving the known function values at each point\n          in the triangulation.", "id": "f17202:c1:m4"}
{"signature": "def _compute_convex_hull(self):", "body": "border = (self.triangle_neighbors == -<NUM_LIT:1>)<EOL>edges = {}<EOL>edges.update(dict(zip(self.triangle_nodes[border[:,<NUM_LIT:0>]][:,<NUM_LIT:1>],<EOL>self.triangle_nodes[border[:,<NUM_LIT:0>]][:,<NUM_LIT:2>])))<EOL>edges.update(dict(zip(self.triangle_nodes[border[:,<NUM_LIT:1>]][:,<NUM_LIT:2>],<EOL>self.triangle_nodes[border[:,<NUM_LIT:1>]][:,<NUM_LIT:0>])))<EOL>edges.update(dict(zip(self.triangle_nodes[border[:,<NUM_LIT:2>]][:,<NUM_LIT:0>],<EOL>self.triangle_nodes[border[:,<NUM_LIT:2>]][:,<NUM_LIT:1>])))<EOL>hull = list(edges.popitem())<EOL>while edges:<EOL><INDENT>hull.append(edges.pop(hull[-<NUM_LIT:1>]))<EOL><DEDENT>hull.pop()<EOL>return hull<EOL>", "docstring": "Extract the convex hull from the triangulation information.\n\n        The output will be a list of point_id's in counter-clockwise order\n        forming the convex hull of the data set.", "id": "f17202:c1:m2"}
{"signature": "def node_graph(self):", "body": "g = {}<EOL>for i, j in self.edge_db:<EOL><INDENT>s = g.setdefault(i, set())<EOL>s.add(j)<EOL>s = g.setdefault(j, set())<EOL>s.add(i)<EOL><DEDENT>return g<EOL>", "docstring": "Return a graph of node_id's pointing to node_id's.\n\n        The arcs of the graph correspond to the edges in the triangulation.\n\n        {node_id: set([node_id, ...]), ...}", "id": "f17202:c1:m8"}
{"signature": "def linear_interpolator(self, z, default_value=np.nan):", "body": "z = np.asarray(z, dtype=np.float64)<EOL>if z.shape != self.old_shape:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>if self.j_unique is not None:<EOL><INDENT>z = z[self.j_unique]<EOL><DEDENT>return LinearInterpolator(self, z, default_value)<EOL>", "docstring": "Get an object which can interpolate within the convex hull by\n        assigning a plane to each triangle.\n\n        z -- an array of floats giving the known function values at each point\n          in the triangulation.", "id": "f17202:c1:m3"}
{"signature": "def _collapse_duplicate_points(self):", "body": "<EOL>j_sorted = np.lexsort(keys=(self.x, self.y))<EOL>mask_unique = np.hstack([<EOL>True,<EOL>(np.diff(self.x[j_sorted]) != <NUM_LIT:0>) | (np.diff(self.y[j_sorted]) != <NUM_LIT:0>),<EOL>])<EOL>return j_sorted[mask_unique]<EOL>", "docstring": "Generate index array that picks out unique x,y points.\n\n        This appears to be required by the underlying delaunay triangulation\n        code.", "id": "f17202:c1:m1"}
{"signature": "def is_numlike(x):", "body": "if iterable(x):<EOL><INDENT>for thisx in x:<EOL><INDENT>return is_numlike(thisx)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return is_numlike(x)<EOL><DEDENT>", "docstring": "The matplotlib datalim, autoscaling, locators etc work with\nscalars which are the units converted to floats given the\ncurrent unit.  The converter may be passed these floats, or\narrays of them, even when units are set.  Derived conversion\ninterfaces may opt to pass plain-ol unitless numbers through\nthe conversion interface and this is a helper function for\nthem.", "id": "f17205:c1:m3"}
{"signature": "def convert(obj, unit):", "body": "return obj<EOL>", "docstring": "convert obj using unit.  If obj is a sequence, return the\nconverted sequence.  The ouput must be a sequence of scalars\nthat can be used by the numpy array layer", "id": "f17205:c1:m2"}
{"signature": "def __init__(self, majloc=None, minloc=None,<EOL>majfmt=None, minfmt=None, label=None):", "body": "self.majloc = majloc<EOL>self.minloc = minloc<EOL>self.majfmt = majfmt<EOL>self.minfmt = minfmt<EOL>self.label = label<EOL>", "docstring": "majloc and minloc: TickLocators for the major and minor ticks\nmajfmt and minfmt: TickFormatters for the major and minor ticks\nlabel: the default axis label\n\nIf any of the above are None, the axis will simply use the default", "id": "f17205:c0:m0"}
{"signature": "def get_converter(self, x):", "body": "if not len(self): return None <EOL>converter = None<EOL>classx = getattr(x, '<STR_LIT>', None)<EOL>if classx is not None:<EOL><INDENT>converter = self.get(classx)<EOL><DEDENT>if converter is None and iterable(x):<EOL><INDENT>if isinstance(x, np.ndarray) and x.dtype != np.object:<EOL><INDENT>return None<EOL><DEDENT>for thisx in x:<EOL><INDENT>converter = self.get_converter( thisx )<EOL>return converter<EOL><DEDENT><DEDENT>return converter<EOL>", "docstring": "get the converter interface instance for x, or None", "id": "f17205:c2:m1"}
{"signature": "def get_backend():", "body": "return rcParams['<STR_LIT>']<EOL>", "docstring": "Returns the current backend", "id": "f17206:m20"}
{"signature": "def _get_data_path():", "body": "if '<STR_LIT>' in os.environ:<EOL><INDENT>path = os.environ['<STR_LIT>']<EOL>if not os.path.isdir(path):<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>return path<EOL><DEDENT>path = os.sep.join([os.path.dirname(__file__), '<STR_LIT>'])<EOL>if os.path.isdir(path): return path<EOL>import matplotlib.afm<EOL>path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), '<STR_LIT>'])<EOL>if os.path.isdir(path): return path<EOL>if getattr(sys,'<STR_LIT>',None):<EOL><INDENT>path = os.path.join(os.path.split(sys.path[<NUM_LIT:0>])[<NUM_LIT:0>], '<STR_LIT>')<EOL>if os.path.isdir(path): return path<EOL>else:<EOL><INDENT>path = os.path.join(os.path.split(os.path.split(sys.path[<NUM_LIT:0>])[<NUM_LIT:0>])[<NUM_LIT:0>],<EOL>'<STR_LIT>')<EOL><DEDENT>if os.path.isdir(path): return path<EOL>else:<EOL><INDENT>path = os.path.join(sys.path[<NUM_LIT:0>], '<STR_LIT>')<EOL>if os.path.isdir(path): return path<EOL><DEDENT><DEDENT>raise RuntimeError('<STR_LIT>')<EOL>", "docstring": "get the path to matplotlib data", "id": "f17206:m11"}
{"signature": "def get_example_data(fname):", "body": "datadir = os.path.join(get_data_path(), '<STR_LIT>')<EOL>fullpath = os.path.join(datadir, fname)<EOL>if not os.path.exists(fullpath):<EOL><INDENT>raise IOError('<STR_LIT>'%(<EOL>fname, datadir))<EOL><DEDENT>return file(fullpath, '<STR_LIT:rb>')<EOL>", "docstring": "return a filehandle to one of the example files in mpl-data/example\n\n*fname*\n    the name of one of the files in mpl-data/example", "id": "f17206:m13"}
{"signature": "def rcdefaults():", "body": "rcParams.update(rcParamsDefault)<EOL>", "docstring": "Restore the default rc params - the ones that were created at\nmatplotlib load time.", "id": "f17206:m18"}
{"signature": "def _get_home():", "body": "path='<STR_LIT>'<EOL>try:<EOL><INDENT>path=os.path.expanduser(\"<STR_LIT>\")<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>if not os.path.isdir(path):<EOL><INDENT>for evar in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>try:<EOL><INDENT>path = os.environ[evar]<EOL>if os.path.isdir(path):<EOL><INDENT>break<EOL><DEDENT><DEDENT>except: pass<EOL><DEDENT><DEDENT>if path:<EOL><INDENT>return path<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>", "docstring": "Find user's home directory if possible.\n    Otherwise raise error.\n\n    :see:  http://mail.python.org/pipermail/python-list/2005-February/263921.html", "id": "f17206:m9"}
{"signature": "def compare_versions(a, b):", "body": "if a:<EOL><INDENT>a = distutils.version.LooseVersion(a)<EOL>b = distutils.version.LooseVersion(b)<EOL>if a>=b: return True<EOL>else: return False<EOL><DEDENT>else: return False<EOL>", "docstring": "return True if a is greater than or equal to b", "id": "f17206:m6"}
{"signature": "def rc_params(fail_on_error=False):", "body": "fname = matplotlib_fname()<EOL>if not os.path.exists(fname):<EOL><INDENT>message = '<STR_LIT>'<EOL>ret = RcParams([ (key, default) for key, (default, converter) indefaultParams.items() ])<EOL>warnings.warn(message)<EOL>return ret<EOL><DEDENT>cnt = <NUM_LIT:0><EOL>rc_temp = {}<EOL>for line in file(fname):<EOL><INDENT>cnt += <NUM_LIT:1><EOL>strippedline = line.split('<STR_LIT:#>',<NUM_LIT:1>)[<NUM_LIT:0>].strip()<EOL>if not strippedline: continue<EOL>tup = strippedline.split('<STR_LIT::>',<NUM_LIT:1>)<EOL>if len(tup) !=<NUM_LIT:2>:<EOL><INDENT>warnings.warn('<STR_LIT>'%(cnt, line, fname))<EOL>continue<EOL><DEDENT>key, val = tup<EOL>key = key.strip()<EOL>val = val.strip()<EOL>if key in rc_temp:<EOL><INDENT>warnings.warn('<STR_LIT>'%(fname,cnt))<EOL><DEDENT>rc_temp[key] = (val, line, cnt)<EOL><DEDENT>ret = RcParams([ (key, default) for key, (default, converter) indefaultParams.items() ])<EOL>for key in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if key in rc_temp:<EOL><INDENT>val, line, cnt = rc_temp.pop(key)<EOL>if fail_on_error:<EOL><INDENT>ret[key] = val <EOL><DEDENT>else:<EOL><INDENT>try: ret[key] = val <EOL>except Exception as msg:<EOL><INDENT>warnings.warn('<STR_LIT>' % (val, cnt, line, fname, msg))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>verbose.set_level(ret['<STR_LIT>'])<EOL>verbose.set_fileo(ret['<STR_LIT>'])<EOL>for key, (val, line, cnt) in rc_temp.items():<EOL><INDENT>if key in defaultParams:<EOL><INDENT>if fail_on_error:<EOL><INDENT>ret[key] = val <EOL><DEDENT>else:<EOL><INDENT>try: ret[key] = val <EOL>except Exception as msg:<EOL><INDENT>warnings.warn('<STR_LIT>' % (val, cnt, line, fname, msg))<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>print(\"\"\"<STR_LIT>\"\"\" % (key, cnt, fname), file=sys.stderr)<EOL><DEDENT><DEDENT>if ret['<STR_LIT>'] is None:<EOL><INDENT>ret['<STR_LIT>'] = get_data_path()<EOL><DEDENT>if not ret['<STR_LIT>'] == ['<STR_LIT>']:<EOL><INDENT>verbose.report(", "docstring": "Return the default params updated from the values in the rc file", "id": "f17206:m16"}
{"signature": "def _is_writable_dir(p):", "body": "try: p + '<STR_LIT>'  <EOL>except TypeError: return False<EOL>try:<EOL><INDENT>t = tempfile.TemporaryFile(dir=p)<EOL>t.write('<STR_LIT:1>')<EOL>t.close()<EOL><DEDENT>except OSError: return False<EOL>else: return True<EOL>", "docstring": "p is a string pointing to a putative writable dir -- return True p\nis such a string, else False", "id": "f17206:m1"}
{"signature": "def matplotlib_fname():", "body": "oldname = os.path.join( os.getcwd(), '<STR_LIT>')<EOL>if os.path.exists(oldname):<EOL><INDENT>print(\"\"\"<STR_LIT>\"\"\", file=sys.stderr)<EOL>shutil.move('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>home = get_home()<EOL>oldname = os.path.join( home, '<STR_LIT>')<EOL>if os.path.exists(oldname):<EOL><INDENT>configdir = get_configdir()<EOL>newname = os.path.join(configdir, '<STR_LIT>')<EOL>print(\"\"\"<STR_LIT>\"\"\"%(oldname, newname), file=sys.stderr)<EOL>shutil.move(oldname, newname)<EOL><DEDENT>fname = os.path.join( os.getcwd(), '<STR_LIT>')<EOL>if os.path.exists(fname): return fname<EOL>if '<STR_LIT>' in os.environ:<EOL><INDENT>path =  os.environ['<STR_LIT>']<EOL>if os.path.exists(path):<EOL><INDENT>fname = os.path.join(path, '<STR_LIT>')<EOL>if os.path.exists(fname):<EOL><INDENT>return fname<EOL><DEDENT><DEDENT><DEDENT>fname = os.path.join(get_configdir(), '<STR_LIT>')<EOL>if os.path.exists(fname): return fname<EOL>path =  get_data_path() <EOL>fname = os.path.join(path, '<STR_LIT>')<EOL>if not os.path.exists(fname):<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL><DEDENT>return fname<EOL>", "docstring": "Return the path to the rc file\n\nSearch order:\n\n * current working dir\n * environ var MATPLOTLIBRC\n * HOME/.matplotlib/matplotlibrc\n * MATPLOTLIBDATA/matplotlibrc", "id": "f17206:m15"}
{"signature": "def _get_configdir():", "body": "configdir = os.environ.get('<STR_LIT>')<EOL>if configdir is not None:<EOL><INDENT>if not _is_writable_dir(configdir):<EOL><INDENT>raise RuntimeError('<STR_LIT>'%configdir)<EOL><DEDENT>return configdir<EOL><DEDENT>h = get_home()<EOL>p = os.path.join(get_home(), '<STR_LIT>')<EOL>if os.path.exists(p):<EOL><INDENT>if not _is_writable_dir(p):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"% (h, h))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if not _is_writable_dir(h):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"%h)<EOL><DEDENT>os.mkdir(p)<EOL><DEDENT>return p<EOL>", "docstring": "Return the string representing the configuration dir.\n\ndefault is HOME/.matplotlib.  you can override this with the\nMPLCONFIGDIR environment variable", "id": "f17206:m10"}
{"signature": "def set_alpha(self, alpha):", "body": "self._alpha = alpha<EOL>", "docstring": "Set the alpha value used for blending - not supported on\nall backends", "id": "f17207:c1:m14"}
{"signature": "def switch_backends(self, FigureCanvasClass):", "body": "newCanvas = FigureCanvasClass(self.figure)<EOL>return newCanvas<EOL>", "docstring": "instantiate an instance of FigureCanvasClass\n\nThis is used for backend switching, eg, to instantiate a\nFigureCanvasPS from a FigureCanvasGTK.  Note, deep copying is\nnot done, so any changes to one of the instances (eg, setting\nfigure size or line props), will be reflected in the other", "id": "f17207:c10:m35"}
{"signature": "def set_snap(self, snap):", "body": "self._snap = snap<EOL>", "docstring": "Sets the snap setting which may be:\n\n  * True: snap vertices to the nearest pixel center\n\n  * False: leave vertices as-is\n\n  * None: (auto) If the path contains only rectilinear line\n    segments, round to the nearest pixel center", "id": "f17207:c1:m26"}
{"signature": "def forward(self, *args):", "body": "self._views.forward()<EOL>self._positions.forward()<EOL>self.set_history_buttons()<EOL>self._update_view()<EOL>", "docstring": "move forward in the view lim stack", "id": "f17207:c13:m5"}
{"signature": "def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):", "body": "Npaths      = len(paths)<EOL>Ntransforms = len(all_transforms)<EOL>N           = max(Npaths, Ntransforms)<EOL>if Npaths == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>transform = transforms.IdentityTransform()<EOL>for i in range(N):<EOL><INDENT>path = paths[i % Npaths]<EOL>if Ntransforms:<EOL><INDENT>transform = all_transforms[i % Ntransforms]<EOL><DEDENT>yield path, transform + master_transform<EOL><DEDENT>", "docstring": "This is a helper method (along with :meth:`_iter_collection`) to make\nit easier to write a space-efficent :meth:`draw_path_collection`\nimplementation in a backend.\n\nThis method yields all of the base path/transform\ncombinations, given a master transform, a list of paths and\nlist of transforms.\n\nThe arguments should be exactly what is passed in to\n:meth:`draw_path_collection`.\n\nThe backend should take each yielded path and transform and\ncreate an object that can be referenced (reused) later.", "id": "f17207:c0:m7"}
{"signature": "def flipy(self):", "body": "return True<EOL>", "docstring": "Return true if y small numbers are top for renderer Is used\nfor drawing text (:mod:`matplotlib.text`) and images\n(:mod:`matplotlib.image`) only", "id": "f17207:c0:m14"}
{"signature": "def release(self, event):", "body": "pass<EOL>", "docstring": "this will be called whenever mouse button is released", "id": "f17207:c13:m14"}
{"signature": "def set_clip_rectangle(self, rectangle):", "body": "self._cliprect = rectangle<EOL>", "docstring": "Set the clip rectangle with sequence (left, bottom, width, height)", "id": "f17207:c1:m17"}
{"signature": "def key_release_event(self, key, guiEvent=None):", "body": "s = '<STR_LIT>'<EOL>event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)<EOL>self.callbacks.process(s, event)<EOL>self._key = None<EOL>", "docstring": "This method will be call all functions connected to the\n'key_release_event' with a :class:`KeyEvent`", "id": "f17207:c10:m9"}
{"signature": "def draw(self, *args, **kwargs):", "body": "pass<EOL>", "docstring": "Render the :class:`~matplotlib.figure.Figure`", "id": "f17207:c10:m18"}
{"signature": "def drag_pan(self, event):", "body": "for a, ind in self._xypress:<EOL><INDENT>a.drag_pan(self._button_pressed, event.key, event.x, event.y)<EOL><DEDENT>self.dynamic_update()<EOL>", "docstring": "the drag callback in pan/zoom mode", "id": "f17207:c13:m16"}
{"signature": "def set_linestyle(self, style):", "body": "try:<EOL><INDENT>offset, dashes = self.dashd[style]<EOL><DEDENT>except:<EOL><INDENT>raise ValueError('<STR_LIT>' % style)<EOL><DEDENT>self._linestyle = style<EOL>self.set_dashes(offset, dashes)<EOL>", "docstring": "Set the linestyle to be one of ('solid', 'dashed', 'dashdot',\n'dotted').", "id": "f17207:c1:m24"}
{"signature": "def enter_notify_event(self, guiEvent=None):", "body": "event = Event('<STR_LIT>', self, guiEvent)<EOL>self.callbacks.process('<STR_LIT>', event)<EOL>", "docstring": "Backend derived classes should call this function when entering\ncanvas\n\n*guiEvent*\n    the native UI event that generated the mpl event", "id": "f17207:c10:m16"}
{"signature": "def get_url(self):", "body": "return self._url<EOL>", "docstring": "returns a url if one is set, None otherwise", "id": "f17207:c1:m12"}
{"signature": "def mpl_disconnect(self, cid):", "body": "return self.callbacks.disconnect(cid)<EOL>", "docstring": "disconnect callback id cid\n\nExample usage::\n\n    cid = canvas.mpl_connect('button_press_event', on_press)\n    #...later\n    canvas.mpl_disconnect(cid)", "id": "f17207:c10:m37"}
{"signature": "def idle_event(self, guiEvent=None):", "body": "s = '<STR_LIT>'<EOL>event = IdleEvent(s, self, guiEvent=guiEvent)<EOL>self.callbacks.process(s, event)<EOL>", "docstring": "call when GUI is idle", "id": "f17207:c10:m17"}
{"signature": "def zoom(self, *args):", "body": "if self._active == '<STR_LIT>':<EOL><INDENT>self._active = None<EOL><DEDENT>else:<EOL><INDENT>self._active = '<STR_LIT>'<EOL><DEDENT>if self._idPress is not None:<EOL><INDENT>self._idPress=self.canvas.mpl_disconnect(self._idPress)<EOL>self.mode = '<STR_LIT>'<EOL><DEDENT>if self._idRelease is not None:<EOL><INDENT>self._idRelease=self.canvas.mpl_disconnect(self._idRelease)<EOL>self.mode = '<STR_LIT>'<EOL><DEDENT>if  self._active:<EOL><INDENT>self._idPress = self.canvas.mpl_connect('<STR_LIT>', self.press_zoom)<EOL>self._idRelease = self.canvas.mpl_connect('<STR_LIT>', self.release_zoom)<EOL>self.mode = '<STR_LIT>'<EOL>self.canvas.widgetlock(self)<EOL><DEDENT>else:<EOL><INDENT>self.canvas.widgetlock.release(self)<EOL><DEDENT>for a in self.canvas.figure.get_axes():<EOL><INDENT>a.set_navigate_mode(self._active)<EOL><DEDENT>self.set_message(self.mode)<EOL>", "docstring": "activate zoom to rect mode", "id": "f17207:c13:m23"}
{"signature": "def copy_properties(self, gc):", "body": "self._alpha = gc._alpha<EOL>self._antialiased = gc._antialiased<EOL>self._capstyle = gc._capstyle<EOL>self._cliprect = gc._cliprect<EOL>self._clippath = gc._clippath<EOL>self._dashes = gc._dashes<EOL>self._joinstyle = gc._joinstyle<EOL>self._linestyle = gc._linestyle<EOL>self._linewidth = gc._linewidth<EOL>self._rgb = gc._rgb<EOL>self._hatch = gc._hatch<EOL>self._url = gc._url<EOL>self._snap = gc._snap<EOL>", "docstring": "Copy properties from gc to self", "id": "f17207:c1:m1"}
{"signature": "def set_capstyle(self, cs):", "body": "if cs in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self._capstyle = cs<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % cs)<EOL><DEDENT>", "docstring": "Set the capstyle as a string in ('butt', 'round', 'projecting')", "id": "f17207:c1:m16"}
{"signature": "def key_press_event(self, key, guiEvent=None):", "body": "self._key = key<EOL>s = '<STR_LIT>'<EOL>event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)<EOL>self.callbacks.process(s, event)<EOL>", "docstring": "This method will be call all functions connected to the\n'key_press_event' with a :class:`KeyEvent`", "id": "f17207:c10:m8"}
{"signature": "def open_group(self, s):", "body": "pass<EOL>", "docstring": "Open a grouping element with label *s*. Is only currently used by\n:mod:`~matplotlib.backends.backend_svg`", "id": "f17207:c0:m1"}
{"signature": "def set_linewidth(self, w):", "body": "self._linewidth = w<EOL>", "docstring": "Set the linewidth in points", "id": "f17207:c1:m23"}
{"signature": "def resize_event(self):", "body": "s = '<STR_LIT>'<EOL>event = ResizeEvent(s, self)<EOL>self.callbacks.process(s, event)<EOL>", "docstring": "This method will be call all functions connected to the\n'resize_event' with a :class:`ResizeEvent`", "id": "f17207:c10:m7"}
{"signature": "def pick_event(self, mouseevent, artist, **kwargs):", "body": "s = '<STR_LIT>'<EOL>event = PickEvent(s, self, mouseevent, artist, **kwargs)<EOL>self.callbacks.process(s, event)<EOL>", "docstring": "This method will be called by artists who are picked and will\nfire off :class:`PickEvent` callbacks registered listeners", "id": "f17207:c10:m10"}
{"signature": "def back(self, *args):", "body": "self._views.back()<EOL>self._positions.back()<EOL>self.set_history_buttons()<EOL>self._update_view()<EOL>", "docstring": "move back up the view lim stack", "id": "f17207:c13:m2"}
{"signature": "def set_graylevel(self, frac):", "body": "self._rgb = (frac, frac, frac)<EOL>", "docstring": "Set the foreground color to be a gray level with *frac*", "id": "f17207:c1:m21"}
{"signature": "def new_gc(self):", "body": "return GraphicsContextBase()<EOL>", "docstring": "Return an instance of a :class:`GraphicsContextBase`", "id": "f17207:c0:m18"}
{"signature": "def set_hatch(self, hatch):", "body": "self._hatch = hatch<EOL>", "docstring": "Sets the hatch style for filling", "id": "f17207:c1:m27"}
{"signature": "def release_pan(self, event):", "body": "self.canvas.mpl_disconnect(self._idDrag)<EOL>self._idDrag=self.canvas.mpl_connect('<STR_LIT>', self.mouse_move)<EOL>for a, ind in self._xypress:<EOL><INDENT>a.end_pan()<EOL><DEDENT>if not self._xypress: return<EOL>self._xypress = []<EOL>self._button_pressed=None<EOL>self.push_current()<EOL>self.release(event)<EOL>self.draw()<EOL>", "docstring": "the release mouse button callback in pan/zoom mode", "id": "f17207:c13:m15"}
{"signature": "def onRemove(self, ev):", "body": "def sort_artists(artists):<EOL><INDENT>L = [ (h.zorder, h) for h in artists ]<EOL>L.sort()<EOL>return [ h for zorder, h in L ]<EOL><DEDENT>under = sort_artists(self.figure.hitlist(ev))<EOL>h = None<EOL>if under: h = under[-<NUM_LIT:1>]<EOL>while h:<EOL><INDENT>print(\"<STR_LIT>\",h)<EOL>if h.remove():<EOL><INDENT>self.draw_idle()<EOL>break<EOL><DEDENT>parent = None<EOL>for p in under:<EOL><INDENT>if h in p.get_children():<EOL><INDENT>parent = p<EOL>break<EOL><DEDENT><DEDENT>h = parent<EOL><DEDENT>", "docstring": "Mouse event processor which removes the top artist\nunder the cursor.  Connect this to the 'mouse_press_event'\nusing::\n\n    canvas.mpl_connect('mouse_press_event',canvas.onRemove)", "id": "f17207:c10:m1"}
{"signature": "def resize(self, w, h):", "body": "pass<EOL>", "docstring": "set the canvas size in pixels", "id": "f17207:c10:m5"}
{"signature": "def get_joinstyle(self):", "body": "return self._joinstyle<EOL>", "docstring": "Return the line join style as one of ('miter', 'round', 'bevel')", "id": "f17207:c1:m8"}
{"signature": "def onHilite(self, ev):", "body": "if not hasattr(self,'<STR_LIT>'): self._active = dict()<EOL>under = self.figure.hitlist(ev)<EOL>enter = [a for a in under if a not in self._active]<EOL>leave = [a for a in self._active if a not in under]<EOL>print(\"<STR_LIT>\",\"<STR_LIT:U+0020>\".join([str(x) for x in under]))<EOL>for a in leave:<EOL><INDENT>if hasattr(a,'<STR_LIT>'):<EOL><INDENT>a.set_color(self._active[a])<EOL><DEDENT>elif hasattr(a,'<STR_LIT>'):<EOL><INDENT>a.set_edgecolor(self._active[a][<NUM_LIT:0>])<EOL>a.set_facecolor(self._active[a][<NUM_LIT:1>])<EOL><DEDENT>del self._active[a]<EOL><DEDENT>for a in enter:<EOL><INDENT>if hasattr(a,'<STR_LIT>'):<EOL><INDENT>self._active[a] = a.get_color()<EOL><DEDENT>elif hasattr(a,'<STR_LIT>'):<EOL><INDENT>self._active[a] = (a.get_edgecolor(),a.get_facecolor())<EOL><DEDENT>else: self._active[a] = None<EOL><DEDENT>for a in enter:<EOL><INDENT>if hasattr(a,'<STR_LIT>'):<EOL><INDENT>a.set_color('<STR_LIT>')<EOL><DEDENT>elif hasattr(a,'<STR_LIT>'):<EOL><INDENT>a.set_edgecolor('<STR_LIT>')<EOL>a.set_facecolor('<STR_LIT>')<EOL><DEDENT>else: self._active[a] = None<EOL><DEDENT>self.draw_idle()<EOL>", "docstring": "Mouse event processor which highlights the artists\nunder the cursor.  Connect this to the 'motion_notify_event'\nusing::\n\n    canvas.mpl_connect('motion_notify_event',canvas.onHilite)", "id": "f17207:c10:m2"}
{"signature": "def press_zoom(self, event):", "body": "if event.button == <NUM_LIT:1>:<EOL><INDENT>self._button_pressed=<NUM_LIT:1><EOL><DEDENT>elif  event.button == <NUM_LIT:3>:<EOL><INDENT>self._button_pressed=<NUM_LIT:3><EOL><DEDENT>else:<EOL><INDENT>self._button_pressed=None<EOL>return<EOL><DEDENT>x, y = event.x, event.y<EOL>if self._views.empty(): self.push_current()<EOL>self._xypress=[]<EOL>for i, a in enumerate(self.canvas.figure.get_axes()):<EOL><INDENT>if x is not None and y is not None and a.in_axes(event)and a.get_navigate() and a.can_zoom():<EOL><INDENT>self._xypress.append(( x, y, a, i, a.viewLim.frozen(), a.transData.frozen()))<EOL><DEDENT><DEDENT>self.press(event)<EOL>", "docstring": "the press mouse button in zoom to rect mode callback", "id": "f17207:c13:m12"}
{"signature": "def draw_quad_mesh(self, master_transform, cliprect, clippath,<EOL>clippath_trans, meshWidth, meshHeight, coordinates,<EOL>offsets, offsetTrans, facecolors, antialiased,<EOL>showedges):", "body": "from matplotlib.collections import QuadMesh<EOL>paths = QuadMesh.convert_mesh_to_paths(<EOL>meshWidth, meshHeight, coordinates)<EOL>if showedges:<EOL><INDENT>edgecolors = np.array([[<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:1.0>]], np.float_)<EOL>linewidths = np.array([<NUM_LIT:1.0>], np.float_)<EOL><DEDENT>else:<EOL><INDENT>edgecolors = facecolors<EOL>linewidths = np.array([<NUM_LIT:0.0>], np.float_)<EOL><DEDENT>return self.draw_path_collection(<EOL>master_transform, cliprect, clippath, clippath_trans,<EOL>paths, [], offsets, offsetTrans, facecolors, edgecolors,<EOL>linewidths, [], [antialiased], [None])<EOL>", "docstring": "This provides a fallback implementation of\n:meth:`draw_quad_mesh` that generates paths and then calls\n:meth:`draw_path_collection`.", "id": "f17207:c0:m6"}
{"signature": "def get_image_magnification(self):", "body": "return <NUM_LIT:1.0><EOL>", "docstring": "Get the factor by which to magnify images passed to :meth:`draw_image`.\nAllows a backend to have images at a different resolution to other\nartists.", "id": "f17207:c0:m9"}
{"signature": "def draw_event(self, renderer):", "body": "s = '<STR_LIT>'<EOL>event = DrawEvent(s, self, renderer)<EOL>self.callbacks.process(s, event)<EOL>", "docstring": "This method will be call all functions connected to the\n'draw_event' with a :class:`DrawEvent`", "id": "f17207:c10:m6"}
{"signature": "def flush_events(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Flush the GUI events for the figure. Implemented only for\nbackends with GUIs.", "id": "f17207:c10:m38"}
{"signature": "def press_pan(self, event):", "body": "if event.button == <NUM_LIT:1>:<EOL><INDENT>self._button_pressed=<NUM_LIT:1><EOL><DEDENT>elif  event.button == <NUM_LIT:3>:<EOL><INDENT>self._button_pressed=<NUM_LIT:3><EOL><DEDENT>else:<EOL><INDENT>self._button_pressed=None<EOL>return<EOL><DEDENT>x, y = event.x, event.y<EOL>if self._views.empty(): self.push_current()<EOL>self._xypress=[]<EOL>for i, a in enumerate(self.canvas.figure.get_axes()):<EOL><INDENT>if x is not None and y is not None and a.in_axes(event) and a.get_navigate():<EOL><INDENT>a.start_pan(x, y, event.button)<EOL>self._xypress.append((a, i))<EOL>self.canvas.mpl_disconnect(self._idDrag)<EOL>self._idDrag=self.canvas.mpl_connect('<STR_LIT>', self.drag_pan)<EOL><DEDENT><DEDENT>self.press(event)<EOL>", "docstring": "the press mouse button in pan/zoom mode callback", "id": "f17207:c13:m11"}
{"signature": "def motion_notify_event(self, x, y, guiEvent=None):", "body": "self._lastx, self._lasty = x, y<EOL>s = '<STR_LIT>'<EOL>event = MouseEvent(s, self, x, y, self._button, self._key,<EOL>guiEvent=guiEvent)<EOL>self.callbacks.process(s, event)<EOL>", "docstring": "Backend derived classes should call this function on any\nmotion-notify-event.\n\n*x*\n    the canvas coordinates where 0=left\n\n*y*\n    the canvas coordinates where 0=bottom\n\n*guiEvent*\n    the native UI event that generated the mpl event\n\n\nThis method will be call all functions connected to the\n'motion_notify_event' with a :class:`MouseEvent` instance.", "id": "f17207:c10:m14"}
{"signature": "def update(self):", "body": "self._views.clear()<EOL>self._positions.clear()<EOL>self.set_history_buttons()<EOL>", "docstring": "reset the axes stack", "id": "f17207:c13:m22"}
{"signature": "def button_press_event(self, x, y, button, guiEvent=None):", "body": "self._button = button<EOL>s = '<STR_LIT>'<EOL>mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)<EOL>self.callbacks.process(s, mouseevent)<EOL>", "docstring": "Backend derived classes should call this function on any mouse\nbutton press.  x,y are the canvas coords: 0,0 is lower, left.\nbutton and key are as defined in :class:`MouseEvent`.\n\nThis method will be call all functions connected to the\n'button_press_event' with a :class:`MouseEvent` instance.", "id": "f17207:c10:m12"}
{"signature": "def get_width_height(self):", "body": "return int(self.figure.bbox.width), int(self.figure.bbox.height)<EOL>", "docstring": "return the figure width and height in points or pixels\n(depending on the backend), truncated to integers", "id": "f17207:c10:m21"}
{"signature": "def draw_path_collection(self, master_transform, cliprect, clippath,<EOL>clippath_trans, paths, all_transforms, offsets,<EOL>offsetTrans, facecolors, edgecolors, linewidths,<EOL>linestyles, antialiaseds, urls):", "body": "path_ids = []<EOL>for path, transform in self._iter_collection_raw_paths(<EOL>master_transform, paths, all_transforms):<EOL><INDENT>path_ids.append((path, transform))<EOL><DEDENT>for xo, yo, path_id, gc, rgbFace in self._iter_collection(<EOL>path_ids, cliprect, clippath, clippath_trans,<EOL>offsets, offsetTrans, facecolors, edgecolors,<EOL>linewidths, linestyles, antialiaseds, urls):<EOL><INDENT>path, transform = path_id<EOL>transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)<EOL>self.draw_path(gc, path, transform, rgbFace)<EOL><DEDENT>", "docstring": "Draws a collection of paths, selecting drawing properties from\nthe lists *facecolors*, *edgecolors*, *linewidths*,\n*linestyles* and *antialiaseds*. *offsets* is a list of\noffsets to apply to each of the paths.  The offsets in\n*offsets* are first transformed by *offsetTrans* before\nbeing applied.\n\nThis provides a fallback implementation of\n:meth:`draw_path_collection` that makes multiple calls to\ndraw_path.  Some backends may want to override this in order\nto render each set of path data only once, and then reference\nthat path multiple times with the different offsets, colors,\nstyles etc.  The generator methods\n:meth:`_iter_collection_raw_paths` and\n:meth:`_iter_collection` are provided to help with (and\nstandardize) the implementation across backends.  It is highly\nrecommended to use those generators, so that changes to the\nbehavior of :meth:`draw_path_collection` can be made globally.", "id": "f17207:c0:m5"}
{"signature": "def start_event_loop_default(self,timeout=<NUM_LIT:0>):", "body": "str = \"<STR_LIT>\"<EOL>str += \"<STR_LIT>\"<EOL>warnings.warn(str,DeprecationWarning)<EOL>if timeout <= <NUM_LIT:0>: timeout = np.inf<EOL>timestep = <NUM_LIT><EOL>counter = <NUM_LIT:0><EOL>self._looping = True<EOL>while self._looping and counter*timestep < timeout:<EOL><INDENT>self.flush_events()<EOL>time.sleep(timestep)<EOL>counter += <NUM_LIT:1><EOL><DEDENT>", "docstring": "Start an event loop.  This is used to start a blocking event\nloop so that interactive functions, such as ginput and\nwaitforbuttonpress, can wait for events.  This should not be\nconfused with the main GUI event loop, which is always running\nand has nothing to do with this.\n\nThis function provides default event loop functionality based\non time.sleep that is meant to be used until event loop\nfunctions for each of the GUI backends can be written.  As\nsuch, it throws a deprecated warning.\n\nCall signature::\n\n    start_event_loop_default(self,timeout=0)\n\nThis call blocks until a callback function triggers\nstop_event_loop() or *timeout* is reached.  If *timeout* is\n<=0, never timeout.", "id": "f17207:c10:m41"}
{"signature": "def points_to_pixels(self, points):", "body": "return points<EOL>", "docstring": "Convert points to display units\n\n*points*\n    a float or a numpy array of float\n\nreturn points converted to pixels\n\nYou need to override this function (unless your backend\ndoesn't have a dpi, eg, postscript or svg).  Some imaging\nsystems assume some value for pixels per inch::\n\n    points to pixels = points * pixels_per_inch/72.0 * dpi/72.0", "id": "f17207:c0:m19"}
{"signature": "def draw_path(self, gc, path, transform, rgbFace=None):", "body": "raise NotImplementedError<EOL>", "docstring": "Draws a :class:`~matplotlib.path.Path` instance using the\ngiven affine transform.", "id": "f17207:c0:m3"}
{"signature": "def get_texmanager(self):", "body": "if self._texmanager is None:<EOL><INDENT>from matplotlib.texmanager import TexManager<EOL>self._texmanager = TexManager()<EOL><DEDENT>return self._texmanager<EOL>", "docstring": "return the :class:`matplotlib.texmanager.TexManager` instance", "id": "f17207:c0:m16"}
{"signature": "def get_capstyle(self):", "body": "return self._capstyle<EOL>", "docstring": "Return the capstyle as a string in ('butt', 'round', 'projecting')", "id": "f17207:c1:m4"}
{"signature": "def draw_cursor(self, event):", "body": "pass<EOL>", "docstring": "Draw a cursor in the event.axes if inaxes is not None.  Use\nnative GUI drawing for efficiency if possible", "id": "f17207:c10:m20"}
{"signature": "def _dispatch(self, byte):", "body": "if <NUM_LIT:0> <= byte <= <NUM_LIT>: self._set_char(byte)<EOL>elif byte == <NUM_LIT>: self._set_char(self._arg(<NUM_LIT:1>))<EOL>elif byte == <NUM_LIT>: self._set_char(self._arg(<NUM_LIT:2>))<EOL>elif byte == <NUM_LIT>: self._set_char(self._arg(<NUM_LIT:3>))<EOL>elif byte == <NUM_LIT>: self._set_char(self._arg(<NUM_LIT:4>, True))<EOL>elif byte == <NUM_LIT>: self._set_rule(self._arg(<NUM_LIT:4>, True), self._arg(<NUM_LIT:4>, True))<EOL>elif byte == <NUM_LIT>: self._put_char(self._arg(<NUM_LIT:1>))<EOL>elif byte == <NUM_LIT>: self._put_char(self._arg(<NUM_LIT:2>))<EOL>elif byte == <NUM_LIT>: self._put_char(self._arg(<NUM_LIT:3>))<EOL>elif byte == <NUM_LIT>: self._put_char(self._arg(<NUM_LIT:4>, True))<EOL>elif byte == <NUM_LIT>: self._put_rule(self._arg(<NUM_LIT:4>, True), self._arg(<NUM_LIT:4>, True))<EOL>elif byte == <NUM_LIT>: self._nop()<EOL>elif byte == <NUM_LIT>: self._bop(*[self._arg(<NUM_LIT:4>, True) for i in range(<NUM_LIT:11>)])<EOL>elif byte == <NUM_LIT>: self._eop()<EOL>elif byte == <NUM_LIT>: self._push()<EOL>elif byte == <NUM_LIT>: self._pop()<EOL>elif byte == <NUM_LIT>: self._right(self._arg(<NUM_LIT:1>, True))<EOL>elif byte == <NUM_LIT>: self._right(self._arg(<NUM_LIT:2>, True))<EOL>elif byte == <NUM_LIT>: self._right(self._arg(<NUM_LIT:3>, True))<EOL>elif byte == <NUM_LIT>: self._right(self._arg(<NUM_LIT:4>, True))<EOL>elif byte == <NUM_LIT>: self._right_w(None)<EOL>elif byte == <NUM_LIT>: self._right_w(self._arg(<NUM_LIT:1>, True))<EOL>elif byte == <NUM_LIT>: self._right_w(self._arg(<NUM_LIT:2>, True))<EOL>elif byte == <NUM_LIT>: self._right_w(self._arg(<NUM_LIT:3>, True))<EOL>elif byte == <NUM_LIT>: self._right_w(self._arg(<NUM_LIT:4>, True))<EOL>elif byte == <NUM_LIT>: self._right_x(None)<EOL>elif byte == <NUM_LIT>: self._right_x(self._arg(<NUM_LIT:1>, True))<EOL>elif byte == <NUM_LIT>: self._right_x(self._arg(<NUM_LIT:2>, True))<EOL>elif byte == <NUM_LIT>: self._right_x(self._arg(<NUM_LIT:3>, True))<EOL>elif byte == <NUM_LIT>: self._right_x(self._arg(<NUM_LIT:4>, True))<EOL>elif byte == <NUM_LIT>: self._down(self._arg(<NUM_LIT:1>, True))<EOL>elif byte == <NUM_LIT>: self._down(self._arg(<NUM_LIT:2>, True))<EOL>elif byte == <NUM_LIT>: self._down(self._arg(<NUM_LIT:3>, True))<EOL>elif byte == <NUM_LIT>: self._down(self._arg(<NUM_LIT:4>, True))<EOL>elif byte == <NUM_LIT>: self._down_y(None)<EOL>elif byte == <NUM_LIT>: self._down_y(self._arg(<NUM_LIT:1>, True))<EOL>elif byte == <NUM_LIT>: self._down_y(self._arg(<NUM_LIT:2>, True))<EOL>elif byte == <NUM_LIT>: self._down_y(self._arg(<NUM_LIT:3>, True))<EOL>elif byte == <NUM_LIT>: self._down_y(self._arg(<NUM_LIT:4>, True))<EOL>elif byte == <NUM_LIT>: self._down_z(None)<EOL>elif byte == <NUM_LIT>: self._down_z(self._arg(<NUM_LIT:1>, True))<EOL>elif byte == <NUM_LIT>: self._down_z(self._arg(<NUM_LIT:2>, True))<EOL>elif byte == <NUM_LIT>: self._down_z(self._arg(<NUM_LIT:3>, True))<EOL>elif byte == <NUM_LIT>: self._down_z(self._arg(<NUM_LIT:4>, True))<EOL>elif <NUM_LIT> <= byte <= <NUM_LIT>: self._fnt_num(byte-<NUM_LIT>)<EOL>elif byte == <NUM_LIT>: self._fnt_num(self._arg(<NUM_LIT:1>))<EOL>elif byte == <NUM_LIT>: self._fnt_num(self._arg(<NUM_LIT:2>))<EOL>elif byte == <NUM_LIT>: self._fnt_num(self._arg(<NUM_LIT:3>))<EOL>elif byte == <NUM_LIT>: self._fnt_num(self._arg(<NUM_LIT:4>, True))<EOL>elif <NUM_LIT> <= byte <= <NUM_LIT>:<EOL><INDENT>len = self._arg(byte-<NUM_LIT>)<EOL>special = self.file.read(len)<EOL>self._xxx(special)<EOL><DEDENT>elif <NUM_LIT> <= byte <= <NUM_LIT>:<EOL><INDENT>k = self._arg(byte-<NUM_LIT>, byte==<NUM_LIT>)<EOL>c, s, d, a, l = [ self._arg(x) for x in (<NUM_LIT:4>, <NUM_LIT:4>, <NUM_LIT:4>, <NUM_LIT:1>, <NUM_LIT:1>) ]<EOL>n = self.file.read(a+l)<EOL>self._fnt_def(k, c, s, d, a, l, n)<EOL><DEDENT>elif byte == <NUM_LIT>:<EOL><INDENT>i, num, den, mag, k = [ self._arg(x) for x in (<NUM_LIT:1>, <NUM_LIT:4>, <NUM_LIT:4>, <NUM_LIT:4>, <NUM_LIT:1>) ]<EOL>x = self.file.read(k)<EOL>self._pre(i, num, den, mag, x)<EOL><DEDENT>elif byte == <NUM_LIT>: self._post()<EOL>elif byte == <NUM_LIT>: self._post_post()<EOL>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"%byte)<EOL><DEDENT>", "docstring": "Based on the opcode \"byte\", read the correct kinds of\narguments from the dvi file and call the method implementing\nthat opcode with those arguments.", "id": "f17208:c0:m6"}
{"signature": "def clean(self):", "body": "mapping = self._mapping<EOL>for key, val in list(mapping.items()):<EOL><INDENT>if key() is None:<EOL><INDENT>del mapping[key]<EOL>val.remove(key)<EOL><DEDENT><DEDENT>", "docstring": "Clean dead weak references from the dictionary", "id": "f17209:c20:m2"}
{"signature": "def onetrue(seq):", "body": "if not len(seq): return False<EOL>for val in seq:<EOL><INDENT>if val: return True<EOL><DEDENT>return False<EOL>", "docstring": "Return *True* if one element of *seq* is *True*.  It *seq* is\nempty, return *False*.", "id": "f17209:m24"}
{"signature": "def get_split_ind(seq, N):", "body": "sLen = <NUM_LIT:0><EOL>for (word, ind) in zip(seq, list(range(len(seq)))):<EOL><INDENT>sLen += len(word) + <NUM_LIT:1>  <EOL>if sLen>=N: return ind<EOL><DEDENT>return len(seq)<EOL>", "docstring": "*seq* is a list of words.  Return the index into seq such that::\n\n    len(' '.join(seq[:ind])<=N", "id": "f17209:m15"}
{"signature": "def listFiles(root, patterns='<STR_LIT:*>', recurse=<NUM_LIT:1>, return_folders=<NUM_LIT:0>):", "body": "import os.path, fnmatch<EOL>pattern_list = patterns.split('<STR_LIT:;>')<EOL>class Bunch:<EOL><INDENT>def __init__(self, **kwds): self.__dict__.update(kwds)<EOL><DEDENT>arg = Bunch(recurse=recurse, pattern_list=pattern_list,<EOL>return_folders=return_folders, results=[])<EOL>def visit(arg, dirname, files):<EOL><INDENT>for name in files:<EOL><INDENT>fullname = os.path.normpath(os.path.join(dirname, name))<EOL>if arg.return_folders or os.path.isfile(fullname):<EOL><INDENT>for pattern in arg.pattern_list:<EOL><INDENT>if fnmatch.fnmatch(name, pattern):<EOL><INDENT>arg.results.append(fullname)<EOL>break<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if not arg.recurse: files[:]=[]<EOL><DEDENT>os.path.walk(root, visit, arg)<EOL>return arg.results<EOL>", "docstring": "Recursively list files\n\nfrom Parmar and Martelli in the Python Cookbook", "id": "f17209:m18"}
{"signature": "def unmasked_index_ranges(mask, compressed = True):", "body": "mask = mask.reshape(mask.size)<EOL>m = np.concatenate(((<NUM_LIT:1>,), mask, (<NUM_LIT:1>,)))<EOL>indices = np.arange(len(mask) + <NUM_LIT:1>)<EOL>mdif = m[<NUM_LIT:1>:] - m[:-<NUM_LIT:1>]<EOL>i0 = np.compress(mdif == -<NUM_LIT:1>, indices)<EOL>i1 = np.compress(mdif == <NUM_LIT:1>, indices)<EOL>assert len(i0) == len(i1)<EOL>if len(i1) == <NUM_LIT:0>:<EOL><INDENT>return None  <EOL><DEDENT>if not compressed:<EOL><INDENT>return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=<NUM_LIT:1>)<EOL><DEDENT>seglengths = i1 - i0<EOL>breakpoints = np.cumsum(seglengths)<EOL>ic0 = np.concatenate(((<NUM_LIT:0>,), breakpoints[:-<NUM_LIT:1>]))<EOL>ic1 = breakpoints<EOL>return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=<NUM_LIT:1>)<EOL>", "docstring": "Find index ranges where *mask* is *False*.\n\n*mask* will be flattened if it is not already 1-D.\n\nReturns Nx2 :class:`numpy.ndarray` with each row the start and stop\nindices for slices of the compressed :class:`numpy.ndarray`\ncorresponding to each of *N* uninterrupted runs of unmasked\nvalues.  If optional argument *compressed* is *False*, it returns\nthe start and stop indices into the original :class:`numpy.ndarray`,\nnot the compressed :class:`numpy.ndarray`.  Returns *None* if there\nare no unmasked values.\n\nExample::\n\n  y = ma.array(np.arange(5), mask = [0,0,1,0,0])\n  ii = unmasked_index_ranges(ma.getmaskarray(y))\n  # returns array [[0,2,] [2,4,]]\n\n  y.compressed()[ii[1,0]:ii[1,1]]\n  # returns array [3,4,]\n\n  ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)\n  # returns array [[0, 2], [3, 5]]\n\n  y.filled()[ii[1,0]:ii[1,1]]\n  # returns array [3,4,]\n\nPrior to the transforms refactoring, this was used to support\nmasked arrays in Line2D.", "id": "f17209:m37"}
{"signature": "def clear(self):", "body": "self._pos = -<NUM_LIT:1><EOL>self._elements = []<EOL>", "docstring": "empty the stack", "id": "f17209:c18:m7"}
{"signature": "def pieces(seq, num=<NUM_LIT:2>):", "body": "start = <NUM_LIT:0><EOL>while <NUM_LIT:1>:<EOL><INDENT>item = seq[start:start+num]<EOL>if not len(item): break<EOL>yield item<EOL>start += num<EOL><DEDENT>", "docstring": "Break up the *seq* into *num* tuples", "id": "f17209:m20"}
{"signature": "def print_cycles(objects, outstream=sys.stdout, show_progress=False):", "body": "import gc<EOL>from types import FrameType<EOL>def print_path(path):<EOL><INDENT>for i, step in enumerate(path):<EOL><INDENT>next = path[(i + <NUM_LIT:1>) % len(path)]<EOL>outstream.write(\"<STR_LIT>\" % str(type(step)))<EOL>if isinstance(step, dict):<EOL><INDENT>for key, val in list(step.items()):<EOL><INDENT>if val is next:<EOL><INDENT>outstream.write(\"<STR_LIT>\" % repr(key))<EOL>break<EOL><DEDENT>if key is next:<EOL><INDENT>outstream.write(\"<STR_LIT>\" % repr(val))<EOL>break<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(step, list):<EOL><INDENT>outstream.write(\"<STR_LIT>\" % step.index(next))<EOL><DEDENT>elif isinstance(step, tuple):<EOL><INDENT>outstream.write(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>outstream.write(repr(step))<EOL><DEDENT>outstream.write(\"<STR_LIT>\")<EOL><DEDENT>outstream.write(\"<STR_LIT:\\n>\")<EOL><DEDENT>def recurse(obj, start, all, current_path):<EOL><INDENT>if show_progress:<EOL><INDENT>outstream.write(\"<STR_LIT>\" % len(all))<EOL><DEDENT>all[id(obj)] = None<EOL>referents = gc.get_referents(obj)<EOL>for referent in referents:<EOL><INDENT>if referent is start:<EOL><INDENT>print_path(current_path)<EOL><DEDENT>elif referent is objects or isinstance(referent, FrameType):<EOL><INDENT>continue<EOL><DEDENT>elif id(referent) not in all:<EOL><INDENT>recurse(referent, start, all, current_path + [obj])<EOL><DEDENT><DEDENT><DEDENT>for obj in objects:<EOL><INDENT>outstream.write(\"<STR_LIT>\" % (obj,))<EOL>recurse(obj, obj, { }, [])<EOL><DEDENT>", "docstring": "*objects*\n    A list of objects to find cycles in.  It is often useful to\n    pass in gc.garbage to find the cycles that are preventing some\n    objects from being garbage collected.\n\n*outstream*\n    The stream for output.\n\n*show_progress*\n    If True, print the number of objects reached as they are found.", "id": "f17209:m33"}
{"signature": "def _make_regex(self):", "body": "return re.compile(\"<STR_LIT:|>\".join(map(re.escape, list(self.keys()))))<EOL>", "docstring": "Build re object based on the keys of the current dictionary", "id": "f17209:c13:m0"}
{"signature": "def get(self):", "body": "return self.data<EOL>", "docstring": "Return a list of elements from the oldest to the newest.", "id": "f17209:c16:m2"}
{"signature": "def push(self, o):", "body": "self._elements = self._elements[:self._pos+<NUM_LIT:1>]<EOL>self._elements.append(o)<EOL>self._pos = len(self._elements)-<NUM_LIT:1><EOL>return self()<EOL>", "docstring": "push object onto stack at current position - all elements\noccurring later than the current position are discarded", "id": "f17209:c18:m4"}
{"signature": "def get_recursive_filelist(args):", "body": "files = []<EOL>for arg in args:<EOL><INDENT>if os.path.isfile(arg):<EOL><INDENT>files.append(arg)<EOL>continue<EOL><DEDENT>if os.path.isdir(arg):<EOL><INDENT>newfiles = listFiles(arg, recurse=<NUM_LIT:1>, return_folders=<NUM_LIT:1>)<EOL>files.extend(newfiles)<EOL><DEDENT><DEDENT>return [f for f in files if not os.path.islink(f)]<EOL>", "docstring": "Recurs all the files and dirs in *args* ignoring symbolic links\nand return the files as a list of strings", "id": "f17209:m19"}
{"signature": "def report_memory(i=<NUM_LIT:0>):  ", "body": "pid = os.getpid()<EOL>if sys.platform=='<STR_LIT>':<EOL><INDENT>a2 = os.popen('<STR_LIT>' % pid).readlines()<EOL>mem = int(a2[-<NUM_LIT:1>].strip())<EOL><DEDENT>elif sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>a2 = os.popen('<STR_LIT>' % pid).readlines()<EOL>mem = int(a2[<NUM_LIT:1>].split()[<NUM_LIT:1>])<EOL><DEDENT>elif sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>a2 = os.popen('<STR_LIT>' % pid).readlines()<EOL>mem = int(a2[<NUM_LIT:1>].split()[<NUM_LIT:0>])<EOL><DEDENT>return mem<EOL>", "docstring": "return the memory consumed by process", "id": "f17209:m30"}
{"signature": "def is_sequence_of_strings(obj):", "body": "if not iterable(obj): return False<EOL>if is_string_like(obj): return False<EOL>for o in obj:<EOL><INDENT>if not is_string_like(o): return False<EOL><DEDENT>return True<EOL>", "docstring": "Returns true if *obj* is iterable and contains strings", "id": "f17209:m5"}
{"signature": "def joined(self, a, b):", "body": "self.clean()<EOL>mapping = self._mapping<EOL>try:<EOL><INDENT>return mapping[ref(a)] is mapping[ref(b)]<EOL><DEDENT>except KeyError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Returns True if *a* and *b* are members of the same set.", "id": "f17209:c20:m4"}
{"signature": "def is_numlike(obj):", "body": "try: obj+<NUM_LIT:1><EOL>except TypeError: return False<EOL>else: return True<EOL>", "docstring": "return true if *obj* looks like a number", "id": "f17209:m8"}
{"signature": "def bubble(self, o):", "body": "if o not in self._elements:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>old = self._elements[:]<EOL>self.clear()<EOL>bubbles = []<EOL>for thiso in old:<EOL><INDENT>if thiso==o: bubbles.append(thiso)<EOL>else: self.push(thiso)<EOL><DEDENT>for thiso in bubbles:<EOL><INDENT>self.push(o)<EOL><DEDENT>return o<EOL>", "docstring": "raise *o* to the top of the stack and return *o*.  *o* must be\nin the stack", "id": "f17209:c18:m8"}
{"signature": "def __init__(self, signals):", "body": "self.signals = set(signals)<EOL>self.callbacks = dict([(s, dict()) for s in signals])<EOL>self._cid = <NUM_LIT:0><EOL>", "docstring": "*signals* is a sequence of valid signals", "id": "f17209:c6:m0"}
{"signature": "def to_filehandle(fname, flag='<STR_LIT:r>', return_opened=False):", "body": "if is_string_like(fname):<EOL><INDENT>if fname.endswith('<STR_LIT>'):<EOL><INDENT>import gzip<EOL>fh = gzip.open(fname, flag)<EOL><DEDENT>else:<EOL><INDENT>fh = file(fname, flag)<EOL><DEDENT>opened = True<EOL><DEDENT>elif hasattr(fname, '<STR_LIT>'):<EOL><INDENT>fh = fname<EOL>opened = False<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if return_opened:<EOL><INDENT>return fh, opened<EOL><DEDENT>return fh<EOL>", "docstring": "*fname* can be a filename or a file handle.  Support for gzipped\nfiles is automatic, if the filename ends in .gz.  *flag* is a\nread/write flag for :func:`file`", "id": "f17209:m9"}
{"signature": "def delete_masked_points(*args):", "body": "if not len(args):<EOL><INDENT>return ()<EOL><DEDENT>if (is_string_like(args[<NUM_LIT:0>]) or not iterable(args[<NUM_LIT:0>])):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>nrecs = len(args[<NUM_LIT:0>])<EOL>margs = []<EOL>seqlist = [False] * len(args)<EOL>for i, x in enumerate(args):<EOL><INDENT>if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:<EOL><INDENT>seqlist[i] = True<EOL>if ma.isMA(x):<EOL><INDENT>if x.ndim > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>x = np.asarray(x)<EOL><DEDENT><DEDENT>margs.append(x)<EOL><DEDENT>masks = []    <EOL>for i, x in enumerate(margs):<EOL><INDENT>if seqlist[i]:<EOL><INDENT>if x.ndim > <NUM_LIT:1>:<EOL><INDENT>continue  <EOL><DEDENT>if ma.isMA(x):<EOL><INDENT>masks.append(~ma.getmaskarray(x))  <EOL>xd = x.data<EOL><DEDENT>else:<EOL><INDENT>xd = x<EOL><DEDENT>try:<EOL><INDENT>mask = np.isfinite(xd)<EOL>if isinstance(mask, np.ndarray):<EOL><INDENT>masks.append(mask)<EOL><DEDENT><DEDENT>except: <EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>if len(masks):<EOL><INDENT>mask = reduce(np.logical_and, masks)<EOL>igood = mask.nonzero()[<NUM_LIT:0>]<EOL>if len(igood) < nrecs:<EOL><INDENT>for i, x in enumerate(margs):<EOL><INDENT>if seqlist[i]:<EOL><INDENT>margs[i] = x.take(igood, axis=<NUM_LIT:0>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>for i, x in enumerate(margs):<EOL><INDENT>if seqlist[i] and ma.isMA(x):<EOL><INDENT>margs[i] = x.filled()<EOL><DEDENT><DEDENT>return margs<EOL>", "docstring": "Find all masked and/or non-finite points in a set of arguments,\nand return the arguments with only the unmasked points remaining.\n\nArguments can be in any of 5 categories:\n\n1) 1-D masked arrays\n2) 1-D ndarrays\n3) ndarrays with more than one dimension\n4) other non-string iterables\n5) anything else\n\nThe first argument must be in one of the first four categories;\nany argument with a length differing from that of the first\nargument (and hence anything in category 5) then will be\npassed through unchanged.\n\nMasks are obtained from all arguments of the correct length\nin categories 1, 2, and 4; a point is bad if masked in a masked\narray or if it is a nan or inf.  No attempt is made to\nextract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`\ndoes not yield a Boolean array.\n\nAll input arguments that are not passed unchanged are returned\nas ndarrays after removing the points or rows corresponding to\nmasks in any of the arguments.\n\nA vastly simpler version of this function was originally\nwritten as a helper for Axes.scatter().", "id": "f17209:m36"}
{"signature": "def flatten(seq, scalarp=is_scalar_or_string):", "body": "for item in seq:<EOL><INDENT>if scalarp(item): yield item<EOL>else:<EOL><INDENT>for subitem in flatten(item, scalarp):<EOL><INDENT>yield subitem<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "this generator flattens nested containers such as\n\n>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])\n\nso that\n\n>>> for i in flatten(l): print i,\nJohn Hunter 1 23 42 5 23\n\nBy: Composite of Holger Krekel and Luther Blissett\nFrom: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294\nand Recipe 1.12 in cookbook", "id": "f17209:m11"}
{"signature": "def unique(x):", "body": "return list(dict([ (val, <NUM_LIT:1>) for val in x]).keys())<EOL>", "docstring": "Return a list of unique elements of *x*", "id": "f17209:m2"}
{"signature": "def forward(self):", "body": "N = len(self._elements)<EOL>if self._pos<N-<NUM_LIT:1>: self._pos += <NUM_LIT:1><EOL>return self()<EOL>", "docstring": "move the position forward and return the current element", "id": "f17209:c18:m2"}
{"signature": "def popall(seq):", "body": "for i in range(len(seq)): seq.pop()<EOL>", "docstring": "empty a list", "id": "f17209:m27"}
{"signature": "def __call__(self, match):", "body": "return self[match.group(<NUM_LIT:0>)]<EOL>", "docstring": "Handler invoked for each regex *match*", "id": "f17209:c13:m1"}
{"signature": "def alltrue(seq):", "body": "if not len(seq): return False<EOL>for val in seq:<EOL><INDENT>if not val: return False<EOL><DEDENT>return True<EOL>", "docstring": "Return *True* if all elements of *seq* evaluate to *True*.  If\n*seq* is empty, return *False*.", "id": "f17209:m23"}
{"signature": "def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):", "body": "<EOL>warnings.warn('<STR_LIT>', DeprecationWarning)<EOL>import matplotlib.mlab as mlab<EOL>return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)<EOL>", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m44"}
{"signature": "def is_closed_polygon(X):", "body": "<EOL>warnings.warn('<STR_LIT>', DeprecationWarning)<EOL>import matplotlib.mlab as mlab<EOL>return mlab.is_closed_polygon(X)<EOL>", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m43"}
{"signature": "def soundex(name, len=<NUM_LIT:4>):", "body": "<EOL>soundex_digits = '<STR_LIT>'<EOL>sndx = '<STR_LIT>'<EOL>fc = '<STR_LIT>'<EOL>for c in name.upper():<EOL><INDENT>if c.isalpha():<EOL><INDENT>if not fc: fc = c   <EOL>d = soundex_digits[ord(c)-ord('<STR_LIT:A>')]<EOL>if not sndx or (d != sndx[-<NUM_LIT:1>]):<EOL><INDENT>sndx += d<EOL><DEDENT><DEDENT><DEDENT>sndx = fc + sndx[<NUM_LIT:1>:]<EOL>sndx = sndx.replace('<STR_LIT:0>', '<STR_LIT>')<EOL>return (sndx + (len * '<STR_LIT:0>'))[:len]<EOL>", "docstring": "soundex module conforming to Odell-Russell algorithm", "id": "f17209:m12"}
{"signature": "def vector_lengths( X, P=<NUM_LIT>, axis=None ):", "body": "<EOL>warnings.warn('<STR_LIT>', DeprecationWarning)<EOL>import matplotlib.mlab as mlab<EOL>return mlab.vector_lengths( X, P=<NUM_LIT>, axis=axis )<EOL>", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m40"}
{"signature": "def get_siblings(self, a):", "body": "self.clean()<EOL>siblings = self._mapping.get(ref(a), [ref(a)])<EOL>return [x() for x in siblings]<EOL>", "docstring": "Returns all of the items joined with *a*, including itself.", "id": "f17209:c20:m6"}
{"signature": "def less_simple_linear_interpolation( x, y, xi, extrap=False ):", "body": "<EOL>warnings.warn('<STR_LIT>', DeprecationWarning)<EOL>import matplotlib.mlab as mlab<EOL>return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )<EOL>", "docstring": "This function has been moved to matplotlib.mlab -- please import\nit from there", "id": "f17209:m38"}
{"signature": "def is_string_like(obj):", "body": "if isinstance(obj, str): return True<EOL>if ma.isMaskedArray(obj):<EOL><INDENT>if obj.ndim == <NUM_LIT:0> and obj.dtype.kind in '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>try: obj + '<STR_LIT>'<EOL>except (TypeError, ValueError): return False<EOL>return True<EOL>", "docstring": "Return True if *obj* looks like a string", "id": "f17209:m4"}
{"signature": "def iterable(obj):", "body": "try: len(obj)<EOL>except: return False<EOL>return True<EOL>", "docstring": "return true if *obj* is iterable", "id": "f17209:m3"}
{"signature": "def get_window_extent(self, *args, **kwargs):", "body": "return self.bbox<EOL>", "docstring": "get the figure bounding box in display space; kwargs are void", "id": "f17210:c1:m7"}
{"signature": "def set_size_inches(self, *args, **kwargs):", "body": "forward = kwargs.get('<STR_LIT>', False)<EOL>if len(args)==<NUM_LIT:1>:<EOL><INDENT>w,h = args[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>w,h = args<EOL><DEDENT>dpival = self.dpi<EOL>self.bbox_inches.p1 = w, h<EOL>if forward:<EOL><INDENT>dpival = self.dpi<EOL>canvasw = w*dpival<EOL>canvash = h*dpival<EOL>manager = getattr(self.canvas, '<STR_LIT>', None)<EOL>if manager is not None:<EOL><INDENT>manager.resize(int(canvasw), int(canvash))<EOL><DEDENT><DEDENT>", "docstring": "set_size_inches(w,h, forward=False)\n\nSet the figure size in inches\n\nUsage::\n\n     fig.set_size_inches(w,h)  # OR\n     fig.set_size_inches((w,h) )\n\noptional kwarg *forward=True* will cause the canvas size to be\nautomatically updated; eg you can resize the figure window\nfrom the shell\n\nWARNING: forward=True is broken on all backends except GTK*\nand WX*\n\nACCEPTS: a w,h tuple with w,h in inches", "id": "f17210:c1:m13"}
{"signature": "def clear(self):", "body": "self.clf()<EOL>", "docstring": "Clear the figure -- synonym for fig.clf", "id": "f17210:c1:m32"}
{"signature": "def _make_key(self, *args, **kwargs):", "body": "def fixitems(items):<EOL><INDENT>ret = []<EOL>for k, v in items:<EOL><INDENT>if iterable(v): v = tuple(v)<EOL>ret.append((k,v))<EOL><DEDENT>return tuple(ret)<EOL><DEDENT>def fixlist(args):<EOL><INDENT>ret = []<EOL>for a in args:<EOL><INDENT>if iterable(a): a = tuple(a)<EOL>ret.append(a)<EOL><DEDENT>return tuple(ret)<EOL><DEDENT>key = fixlist(args), fixitems(kwargs.items())<EOL>return key<EOL>", "docstring": "make a hashable key out of args and kwargs", "id": "f17210:c1:m28"}
{"signature": "def __init__(self, left=None, bottom=None, right=None, top=None,<EOL>wspace=None, hspace=None):", "body": "self.validate = True<EOL>self.update(left, bottom, right, top, wspace, hspace)<EOL>", "docstring": "All dimensions are fraction of the figure width or height.\nAll values default to their rc params\n\nThe following attributes are available\n\n*left*  = 0.125\n    the left side of the subplots of the figure\n*right* = 0.9\n    the right side of the subplots of the figure\n*bottom* = 0.1\n    the bottom of the subplots of the figure\n*top* = 0.9\n    the top of the subplots of the figure\n*wspace* = 0.2\n    the amount of width reserved for blank space between subplots\n*hspace* = 0.2\n    the amount of height reserved for white space between subplots\n*validate*\n    make sure the params are in a legal state (*left*<*right*, etc)", "id": "f17210:c0:m0"}
{"signature": "def set_facecolor(self, color):", "body": "self.patch.set_facecolor(color)<EOL>", "docstring": "Set the face color of the Figure rectangle\n\nACCEPTS: any matplotlib color - see help(colors)", "id": "f17210:c1:m22"}
{"signature": "def get_dpi(self):", "body": "return self.dpi<EOL>", "docstring": "Return the dpi as a float", "id": "f17210:c1:m19"}
{"signature": "def clf(self):", "body": "self.suppressComposite = None<EOL>self.callbacks = cbook.CallbackRegistry(('<STR_LIT>', ))<EOL>for ax in tuple(self.axes):  <EOL><INDENT>ax.cla()<EOL>self.delaxes(ax)         <EOL><DEDENT>toolbar = getattr(self.canvas, '<STR_LIT>', None)<EOL>if toolbar is not None:<EOL><INDENT>toolbar.update()<EOL><DEDENT>self._axstack.clear()<EOL>self._seen = {}<EOL>self.artists = []<EOL>self.lines = []<EOL>self.patches = []<EOL>self.texts=[]<EOL>self.images = []<EOL>self.legends = []<EOL>self._axobservers = []<EOL>", "docstring": "Clear the figure", "id": "f17210:c1:m31"}
{"signature": "def gca(self, **kwargs):", "body": "ax = self._axstack()<EOL>if ax is not None:<EOL><INDENT>ispolar = kwargs.get('<STR_LIT>', False)<EOL>projection = kwargs.get('<STR_LIT>', None)<EOL>if ispolar:<EOL><INDENT>if projection is not None and projection != '<STR_LIT>':<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" +<EOL>\"<STR_LIT>\" %<EOL>projection)<EOL><DEDENT>projection = '<STR_LIT>'<EOL><DEDENT>projection_class = get_projection_class(projection)<EOL>if isinstance(ax, projection_class):<EOL><INDENT>return ax<EOL><DEDENT><DEDENT>return self.add_subplot(<NUM_LIT>, **kwargs)<EOL>", "docstring": "Return the current axes, creating one if necessary\n\nThe following kwargs are supported\n%(Axes)s", "id": "f17210:c1:m39"}
{"signature": "def set_canvas(self, canvas):", "body": "self.canvas = canvas<EOL>", "docstring": "Set the canvas the contains the figure\n\nACCEPTS: a FigureCanvas instance", "id": "f17210:c1:m9"}
{"signature": "def set_edgecolor(self, color):", "body": "self.patch.set_edgecolor(color)<EOL>", "docstring": "Set the edge color of the Figure rectangle\n\nACCEPTS: any matplotlib color - see help(colors)", "id": "f17210:c1:m21"}
{"signature": "def suptitle(self, t, **kwargs):", "body": "x = kwargs.pop('<STR_LIT:x>', <NUM_LIT:0.5>)<EOL>y = kwargs.pop('<STR_LIT:y>', <NUM_LIT>)<EOL>if ('<STR_LIT>' not in kwargs) and ('<STR_LIT>' not in kwargs):<EOL><INDENT>kwargs['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if ('<STR_LIT>' not in kwargs) and ('<STR_LIT>' not in kwargs):<EOL><INDENT>kwargs['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>t = self.text(x, y, t, **kwargs)<EOL>return t<EOL>", "docstring": "Add a centered title to the figure.\n\nkwargs are :class:`matplotlib.text.Text` properties.  Using figure\ncoordinates, the defaults are:\n\n  - *x* = 0.5\n      the x location of text in figure coords\n\n  - *y* = 0.98\n      the y location of the text in figure coords\n\n  - *horizontalalignment* = 'center'\n      the horizontal alignment of the text\n\n  - *verticalalignment* = 'top'\n      the vertical alignment of the text\n\nA :class:`matplotlib.text.Text` instance is returned.\n\nExample::\n\n  fig.subtitle('this is the figure title', fontsize=12)", "id": "f17210:c1:m8"}
{"signature": "def waitforbuttonpress(self, timeout=-<NUM_LIT:1>):", "body": "blocking_input = BlockingKeyMouseInput(self)<EOL>return blocking_input(timeout=timeout)<EOL>", "docstring": "call signature::\n\n  waitforbuttonpress(self, timeout=-1)\n\nBlocking call to interact with the figure.\n\nThis will return True is a key was pressed, False if a mouse\nbutton was pressed and None if *timeout* was reached without\neither being pressed.\n\nIf *timeout* is negative, does not timeout.", "id": "f17210:c1:m46"}
{"signature": "def savefig(self, *args, **kwargs):", "body": "for key in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if key not in kwargs:<EOL><INDENT>kwargs[key] = rcParams['<STR_LIT>'%key]<EOL><DEDENT><DEDENT>transparent = kwargs.pop('<STR_LIT>', False)<EOL>if transparent:<EOL><INDENT>original_figure_alpha = self.patch.get_alpha()<EOL>self.patch.set_alpha(<NUM_LIT:0.0>)<EOL>original_axes_alpha = []<EOL>for ax in self.axes:<EOL><INDENT>patch = ax.patch<EOL>original_axes_alpha.append(patch.get_alpha())<EOL>patch.set_alpha(<NUM_LIT:0.0>)<EOL><DEDENT><DEDENT>self.canvas.print_figure(*args, **kwargs)<EOL>if transparent:<EOL><INDENT>self.patch.set_alpha(original_figure_alpha)<EOL>for ax, alpha in zip(self.axes, original_axes_alpha):<EOL><INDENT>ax.patch.set_alpha(alpha)<EOL><DEDENT><DEDENT>", "docstring": "call signature::\n\n  savefig(fname, dpi=None, facecolor='w', edgecolor='w',\n          orientation='portrait', papertype=None, format=None,\n          transparent=False):\n\nSave the current figure.\n\nThe output formats available depend on the backend being used.\n\nArguments:\n\n  *fname*:\n    A string containing a path to a filename, or a Python file-like object.\n\n    If *format* is *None* and *fname* is a string, the output\n    format is deduced from the extension of the filename.\n\nKeyword arguments:\n\n  *dpi*: [ None | scalar > 0 ]\n    The resolution in dots per inch.  If *None* it will default to\n    the value ``savefig.dpi`` in the matplotlibrc file.\n\n  *facecolor*, *edgecolor*:\n    the colors of the figure rectangle\n\n  *orientation*: [ 'landscape' | 'portrait' ]\n    not supported on all backends; currently only on postscript output\n\n  *papertype*:\n    One of 'letter', 'legal', 'executive', 'ledger', 'a0' through\n    'a10', 'b0' through 'b10'. Only supported for postscript\n    output.\n\n  *format*:\n    One of the file extensions supported by the active\n    backend.  Most backends support png, pdf, ps, eps and svg.\n\n  *transparent*:\n    If *True*, the figure patch and axes patches will all be\n    transparent.  This is useful, for example, for displaying\n    a plot on top of a colored background on a web page.  The\n    transparency of these patches will be restored to their\n    original values upon exit of this function.", "id": "f17210:c1:m42"}
{"signature": "def figimage(self, X,<EOL>xo=<NUM_LIT:0>,<EOL>yo=<NUM_LIT:0>,<EOL>alpha=<NUM_LIT:1.0>,<EOL>norm=None,<EOL>cmap=None,<EOL>vmin=None,<EOL>vmax=None,<EOL>origin=None):", "body": "if not self._hold: self.clf()<EOL>im = FigureImage(self, cmap, norm, xo, yo, origin)<EOL>im.set_array(X)<EOL>im.set_alpha(alpha)<EOL>if norm is None:<EOL><INDENT>im.set_clim(vmin, vmax)<EOL><DEDENT>self.images.append(im)<EOL>return im<EOL>", "docstring": "call signatures::\n\n  figimage(X, **kwargs)\n\nadds a non-resampled array *X* to the figure.\n\n::\n\n  figimage(X, xo, yo)\n\nwith pixel offsets *xo*, *yo*,\n\n*X* must be a float array:\n\n* If *X* is MxN, assume luminance (grayscale)\n* If *X* is MxNx3, assume RGB\n* If *X* is MxNx4, assume RGBA\n\nOptional keyword arguments:\n\n  =========   ==========================================================\n  Keyword     Description\n  =========   ==========================================================\n  xo or yo    An integer, the *x* and *y* image offset in pixels\n  cmap        a :class:`matplotlib.cm.ColorMap` instance, eg cm.jet.\n              If None, default to the rc ``image.cmap`` value\n  norm        a :class:`matplotlib.colors.Normalize` instance. The\n              default is normalization().  This scales luminance -> 0-1\n  vmin|vmax   are used to scale a luminance image to 0-1.  If either is\n              None, the min and max of the luminance values will be\n              used.  Note if you pass a norm instance, the settings for\n              *vmin* and *vmax* will be ignored.\n  alpha       the alpha blending value, default is 1.0\n  origin      [ 'upper' | 'lower' ] Indicates where the [0,0] index of\n              the array is in the upper left or lower left corner of\n              the axes. Defaults to the rc image.origin value\n  =========   ==========================================================\n\nfigimage complements the axes image\n(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled\nto fit the current axes.  If you want a resampled image to\nfill the entire figure, you can define an\n:class:`~matplotlib.axes.Axes` with size [0,1,0,1].\n\nAn :class:`matplotlib.image.FigureImage` instance is returned.\n\n.. plot:: mpl_examples/pylab_examples/figimage_demo.py", "id": "f17210:c1:m11"}
{"signature": "def hold(self, b=None):", "body": "if b is None: self._hold = not self._hold<EOL>else: self._hold = b<EOL>", "docstring": "Set the hold state.  If hold is None (default), toggle the\nhold state.  Else set the hold state to boolean value b.\n\nEg::\n\n    hold()      # toggle hold\n    hold(True)  # hold is on\n    hold(False) # hold is off", "id": "f17210:c1:m10"}
{"signature": "def get_frameon(self):", "body": "return self.frameon<EOL>", "docstring": "get the boolean indicating frameon", "id": "f17210:c1:m20"}
{"signature": "def autofmt_xdate(self, bottom=<NUM_LIT>, rotation=<NUM_LIT:30>, ha='<STR_LIT:right>'):", "body": "allsubplots = np.alltrue([hasattr(ax, '<STR_LIT>') for ax in self.axes])<EOL>if len(self.axes)==<NUM_LIT:1>:<EOL><INDENT>for label in ax.get_xticklabels():<EOL><INDENT>label.set_ha(ha)<EOL>label.set_rotation(rotation)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if allsubplots:<EOL><INDENT>for ax in self.get_axes():<EOL><INDENT>if ax.is_last_row():<EOL><INDENT>for label in ax.get_xticklabels():<EOL><INDENT>label.set_ha(ha)<EOL>label.set_rotation(rotation)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for label in ax.get_xticklabels():<EOL><INDENT>label.set_visible(False)<EOL><DEDENT>ax.set_xlabel('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if allsubplots:<EOL><INDENT>self.subplots_adjust(bottom=bottom)<EOL><DEDENT>", "docstring": "Date ticklabels often overlap, so it is useful to rotate them\nand right align them.  Also, a common use case is a number of\nsubplots with shared xaxes where the x-axis is date data.  The\nticklabels are often long, and it helps to rotate them on the\nbottom subplot and turn them off on other subplots, as well as\nturn off xlabels.\n\n*bottom*\n    the bottom of the subplots for :meth:`subplots_adjust`\n*rotation*\n    the rotation of the xtick labels\n*ha*\n    the horizontal alignment of the xticklabels", "id": "f17210:c1:m4"}
{"signature": "def add_axes(self, *args, **kwargs):", "body": "key = self._make_key(*args, **kwargs)<EOL>if key in self._seen:<EOL><INDENT>ax = self._seen[key]<EOL>self.sca(ax)<EOL>return ax<EOL><DEDENT>if not len(args): return<EOL>if isinstance(args[<NUM_LIT:0>], Axes):<EOL><INDENT>a = args[<NUM_LIT:0>]<EOL>assert(a.get_figure() is self)<EOL><DEDENT>else:<EOL><INDENT>rect = args[<NUM_LIT:0>]<EOL>ispolar = kwargs.pop('<STR_LIT>', False)<EOL>projection = kwargs.pop('<STR_LIT>', None)<EOL>if ispolar:<EOL><INDENT>if projection is not None and projection != '<STR_LIT>':<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" +<EOL>\"<STR_LIT>\" %<EOL>projection)<EOL><DEDENT>projection = '<STR_LIT>'<EOL><DEDENT>a = projection_factory(projection, self, rect, **kwargs)<EOL><DEDENT>self.axes.append(a)<EOL>self._axstack.push(a)<EOL>self.sca(a)<EOL>self._seen[key] = a<EOL>return a<EOL>", "docstring": "Add an a axes with axes rect [*left*, *bottom*, *width*,\n*height*] where all quantities are in fractions of figure\nwidth and height.  kwargs are legal\n:class:`~matplotlib.axes.Axes` kwargs plus *projection* which\nsets the projection type of the axes.  (For backward\ncompatibility, ``polar=True`` may also be provided, which is\nequivalent to ``projection='polar'``).  Valid values for\n*projection* are: %(list)s.  Some of these projections support\nadditional kwargs, which may be provided to :meth:`add_axes`::\n\n    rect = l,b,w,h\n    fig.add_axes(rect)\n    fig.add_axes(rect, frameon=False, axisbg='g')\n    fig.add_axes(rect, polar=True)\n    fig.add_axes(rect, projection='polar')\n    fig.add_axes(ax)   # add an Axes instance\n\nIf the figure already has an axes with the same parameters,\nthen it will simply make that axes current and return it.  If\nyou do not want this behavior, eg. you want to force the\ncreation of a new axes, you must use a unique set of args and\nkwargs.  The axes :attr:`~matplotlib.axes.Axes.label`\nattribute has been exposed for this purpose.  Eg., if you want\ntwo axes that are otherwise identical to be added to the\nfigure, make sure you give them unique labels::\n\n    fig.add_axes(rect, label='axes1')\n    fig.add_axes(rect, label='axes2')\n\nThe :class:`~matplotlib.axes.Axes` instance will be returned.\n\nThe following kwargs are supported:\n\n%(Axes)s", "id": "f17210:c1:m29"}
{"signature": "def add_axobserver(self, func):", "body": "self._axobservers.append(func)<EOL>", "docstring": "whenever the axes state change, func(self) will be called", "id": "f17210:c1:m41"}
{"signature": "def embedTTF(self, filename, characters):", "body": "font = FT2Font(str(filename))<EOL>fonttype = rcParams['<STR_LIT>']<EOL>def cvt(length, upe=font.units_per_EM, nearest=True):<EOL><INDENT>\"<STR_LIT>\"<EOL>value = length / upe * <NUM_LIT:1000><EOL>if nearest: return round(value)<EOL>if value < <NUM_LIT:0>: return floor(value)<EOL>else: return ceil(value)<EOL><DEDENT>def embedTTFType3(font, characters, descriptor):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>widthsObject = self.reserveObject('<STR_LIT>')<EOL>fontdescObject = self.reserveObject('<STR_LIT>')<EOL>fontdictObject = self.reserveObject('<STR_LIT>')<EOL>charprocsObject = self.reserveObject('<STR_LIT>')<EOL>differencesArray = []<EOL>firstchar, lastchar = <NUM_LIT:0>, <NUM_LIT:255><EOL>bbox = [cvt(x, nearest=False) for x in font.bbox]<EOL>fontdict = {<EOL>'<STR_LIT>'            : Name('<STR_LIT>'),<EOL>'<STR_LIT>'        : ps_name,<EOL>'<STR_LIT>'       : firstchar,<EOL>'<STR_LIT>'        : lastchar,<EOL>'<STR_LIT>'  : fontdescObject,<EOL>'<STR_LIT>'         : Name('<STR_LIT>'),<EOL>'<STR_LIT:Name>'            : descriptor['<STR_LIT>'],<EOL>'<STR_LIT>'        : bbox,<EOL>'<STR_LIT>'      : [ <NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT>, <NUM_LIT:0>, <NUM_LIT:0> ],<EOL>'<STR_LIT>'       : charprocsObject,<EOL>'<STR_LIT>'        : {<EOL>'<STR_LIT>'        : Name('<STR_LIT>'),<EOL>'<STR_LIT>' : differencesArray},<EOL>'<STR_LIT>'          : widthsObject<EOL>}<EOL>from encodings import cp1252<EOL>if hasattr(cp1252, '<STR_LIT>'):<EOL><INDENT>def decode_char(charcode):<EOL><INDENT>return cp1252.decoding_map[charcode] or <NUM_LIT:0><EOL><DEDENT><DEDENT>else:<EOL><INDENT>def decode_char(charcode):<EOL><INDENT>return ord(cp1252.decoding_table[charcode])<EOL><DEDENT><DEDENT>def get_char_width(charcode):<EOL><INDENT>str = decode_char(charcode)<EOL>width = font.load_char(str, flags=LOAD_NO_SCALE|LOAD_NO_HINTING).horiAdvance<EOL>return cvt(width)<EOL><DEDENT>widths = [ get_char_width(charcode) for charcode in range(firstchar, lastchar+<NUM_LIT:1>) ]<EOL>descriptor['<STR_LIT>'] = max(widths)<EOL>cmap = font.get_charmap()<EOL>glyph_ids = []<EOL>differences = []<EOL>multi_byte_chars = set()<EOL>for c in characters:<EOL><INDENT>ccode = c<EOL>gind = cmap.get(ccode) or <NUM_LIT:0><EOL>glyph_ids.append(gind)<EOL>glyph_name = font.get_glyph_name(gind)<EOL>if ccode <= <NUM_LIT:255>:<EOL><INDENT>differences.append((ccode, glyph_name))<EOL><DEDENT>else:<EOL><INDENT>multi_byte_chars.add(glyph_name)<EOL><DEDENT><DEDENT>differences.sort()<EOL>last_c = -<NUM_LIT:2><EOL>for c, name in differences:<EOL><INDENT>if c != last_c + <NUM_LIT:1>:<EOL><INDENT>differencesArray.append(c)<EOL><DEDENT>differencesArray.append(Name(name))<EOL>last_c = c<EOL><DEDENT>rawcharprocs = ttconv.get_pdf_charprocs(filename, glyph_ids)<EOL>charprocs = {}<EOL>charprocsRef = {}<EOL>for charname, stream in list(rawcharprocs.items()):<EOL><INDENT>charprocDict = { '<STR_LIT>': len(stream) }<EOL>if charname in multi_byte_chars:<EOL><INDENT>charprocDict['<STR_LIT>'] = Name('<STR_LIT>')<EOL>charprocDict['<STR_LIT>'] = Name('<STR_LIT>')<EOL>charprocDict['<STR_LIT>'] = bbox<EOL>stream = stream[stream.find(\"<STR_LIT>\") + <NUM_LIT:2>:]<EOL><DEDENT>charprocObject = self.reserveObject('<STR_LIT>')<EOL>self.beginStream(charprocObject.id, None, charprocDict)<EOL>self.currentstream.write(stream)<EOL>self.endStream()<EOL>if charname in multi_byte_chars:<EOL><INDENT>name = self._get_xobject_symbol_name(filename, charname)<EOL>self.multi_byte_charprocs[name] = charprocObject<EOL><DEDENT>else:<EOL><INDENT>charprocs[charname] = charprocObject<EOL><DEDENT><DEDENT>self.writeObject(fontdictObject, fontdict)<EOL>self.writeObject(fontdescObject, descriptor)<EOL>self.writeObject(widthsObject, widths)<EOL>self.writeObject(charprocsObject, charprocs)<EOL>return fontdictObject<EOL><DEDENT>def embedTTFType42(font, characters, descriptor):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>fontdescObject = self.reserveObject('<STR_LIT>')<EOL>cidFontDictObject = self.reserveObject('<STR_LIT>')<EOL>type0FontDictObject = self.reserveObject('<STR_LIT>')<EOL>cidToGidMapObject = self.reserveObject('<STR_LIT>')<EOL>fontfileObject = self.reserveObject('<STR_LIT>')<EOL>wObject = self.reserveObject('<STR_LIT>')<EOL>toUnicodeMapObject = self.reserveObject('<STR_LIT>')<EOL>cidFontDict = {<EOL>'<STR_LIT>'           : Name('<STR_LIT>'),<EOL>'<STR_LIT>'        : Name('<STR_LIT>'),<EOL>'<STR_LIT>'       : ps_name,<EOL>'<STR_LIT>'  : {<EOL>'<STR_LIT>'   : '<STR_LIT>',<EOL>'<STR_LIT>'   : '<STR_LIT>',<EOL>'<STR_LIT>' : <NUM_LIT:0> },<EOL>'<STR_LIT>' : fontdescObject,<EOL>'<STR_LIT>'              : wObject,<EOL>'<STR_LIT>'    : cidToGidMapObject<EOL>}<EOL>type0FontDict = {<EOL>'<STR_LIT>'            : Name('<STR_LIT>'),<EOL>'<STR_LIT>'         : Name('<STR_LIT>'),<EOL>'<STR_LIT>'        : ps_name,<EOL>'<STR_LIT>'        : Name('<STR_LIT>'),<EOL>'<STR_LIT>' : [cidFontDictObject],<EOL>'<STR_LIT>'       : toUnicodeMapObject<EOL>}<EOL>descriptor['<STR_LIT>'] = fontfileObject<EOL>length1Object = self.reserveObject('<STR_LIT>')<EOL>self.beginStream(<EOL>fontfileObject.id,<EOL>self.reserveObject('<STR_LIT>'),<EOL>{'<STR_LIT>': length1Object})<EOL>fontfile = open(filename, '<STR_LIT:rb>')<EOL>length1 = <NUM_LIT:0><EOL>while True:<EOL><INDENT>data = fontfile.read(<NUM_LIT>)<EOL>if not data: break<EOL>length1 += len(data)<EOL>self.currentstream.write(data)<EOL><DEDENT>fontfile.close()<EOL>self.endStream()<EOL>self.writeObject(length1Object, length1)<EOL>cid_to_gid_map = ['<STR_LIT>'] * <NUM_LIT><EOL>cmap = font.get_charmap()<EOL>unicode_mapping = []<EOL>widths = []<EOL>max_ccode = <NUM_LIT:0><EOL>for c in characters:<EOL><INDENT>ccode = c<EOL>gind = cmap.get(ccode) or <NUM_LIT:0><EOL>glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)<EOL>widths.append((ccode, glyph.horiAdvance / <NUM_LIT:6>))<EOL>if ccode < <NUM_LIT>:<EOL><INDENT>cid_to_gid_map[ccode] = chr(gind)<EOL><DEDENT>max_ccode = max(ccode, max_ccode)<EOL><DEDENT>widths.sort()<EOL>cid_to_gid_map = cid_to_gid_map[:max_ccode + <NUM_LIT:1>]<EOL>last_ccode = -<NUM_LIT:2><EOL>w = []<EOL>max_width = <NUM_LIT:0><EOL>unicode_groups = []<EOL>for ccode, width in widths:<EOL><INDENT>if ccode != last_ccode + <NUM_LIT:1>:<EOL><INDENT>w.append(ccode)<EOL>w.append([width])<EOL>unicode_groups.append([ccode, ccode])<EOL><DEDENT>else:<EOL><INDENT>w[-<NUM_LIT:1>].append(width)<EOL>unicode_groups[-<NUM_LIT:1>][<NUM_LIT:1>] = ccode<EOL><DEDENT>max_width = max(max_width, width)<EOL>last_ccode = ccode<EOL><DEDENT>unicode_bfrange = []<EOL>for start, end in unicode_groups:<EOL><INDENT>unicode_bfrange.append(<EOL>\"<STR_LIT>\" %<EOL>(start, end,<EOL>\"<STR_LIT:U+0020>\".join([\"<STR_LIT>\" % x for x in range(start, end+<NUM_LIT:1>)])))<EOL><DEDENT>unicode_cmap = (self._identityToUnicodeCMap %<EOL>(len(unicode_groups),<EOL>\"<STR_LIT:\\n>\".join(unicode_bfrange)))<EOL>cid_to_gid_map = \"<STR_LIT>\".join(cid_to_gid_map).encode(\"<STR_LIT>\")<EOL>self.beginStream(cidToGidMapObject.id,<EOL>None,<EOL>{'<STR_LIT>':  len(cid_to_gid_map)})<EOL>self.currentstream.write(cid_to_gid_map)<EOL>self.endStream()<EOL>self.beginStream(toUnicodeMapObject.id,<EOL>None,<EOL>{'<STR_LIT>': unicode_cmap})<EOL>self.currentstream.write(unicode_cmap)<EOL>self.endStream()<EOL>descriptor['<STR_LIT>'] = max_width<EOL>self.writeObject(cidFontDictObject, cidFontDict)<EOL>self.writeObject(type0FontDictObject, type0FontDict)<EOL>self.writeObject(fontdescObject, descriptor)<EOL>self.writeObject(wObject, w)<EOL>return type0FontDictObject<EOL><DEDENT>ps_name = Name(font.get_sfnt()[(<NUM_LIT:1>,<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:6>)])<EOL>pclt = font.get_sfnt_table('<STR_LIT>')or { '<STR_LIT>': <NUM_LIT:0>, '<STR_LIT>': <NUM_LIT:0> }<EOL>post = font.get_sfnt_table('<STR_LIT>')or { '<STR_LIT>': (<NUM_LIT:0>,<NUM_LIT:0>) }<EOL>ff = font.face_flags<EOL>sf = font.style_flags<EOL>flags = <NUM_LIT:0><EOL>symbolic = False <EOL>if ff & FIXED_WIDTH: flags |= <NUM_LIT:1> << <NUM_LIT:0><EOL>if <NUM_LIT:0>: flags |= <NUM_LIT:1> << <NUM_LIT:1> <EOL>if symbolic: flags |= <NUM_LIT:1> << <NUM_LIT:2><EOL>else: flags |= <NUM_LIT:1> << <NUM_LIT:5><EOL>if sf & ITALIC: flags |= <NUM_LIT:1> << <NUM_LIT:6><EOL>if <NUM_LIT:0>: flags |= <NUM_LIT:1> << <NUM_LIT:16> <EOL>if <NUM_LIT:0>: flags |= <NUM_LIT:1> << <NUM_LIT> <EOL>if <NUM_LIT:0>: flags |= <NUM_LIT:1> << <NUM_LIT> <EOL>descriptor = {<EOL>'<STR_LIT>'        : Name('<STR_LIT>'),<EOL>'<STR_LIT>'    : ps_name,<EOL>'<STR_LIT>'       : flags,<EOL>'<STR_LIT>'    : [ cvt(x, nearest=False) for x in font.bbox ],<EOL>'<STR_LIT>'      : cvt(font.ascender, nearest=False),<EOL>'<STR_LIT>'     : cvt(font.descender, nearest=False),<EOL>'<STR_LIT>'   : cvt(pclt['<STR_LIT>'], nearest=False),<EOL>'<STR_LIT>'     : cvt(pclt['<STR_LIT>']),<EOL>'<STR_LIT>' : post['<STR_LIT>'][<NUM_LIT:1>], <EOL>'<STR_LIT>'       : <NUM_LIT:0> <EOL>}<EOL>if is_opentype_cff_font(filename):<EOL><INDENT>fonttype = <NUM_LIT><EOL>warnings.warn((\"<STR_LIT>\" +<EOL>\"<STR_LIT>\") %<EOL>os.path.basename(filename))<EOL><DEDENT>if fonttype == <NUM_LIT:3>:<EOL><INDENT>return embedTTFType3(font, characters, descriptor)<EOL><DEDENT>elif fonttype == <NUM_LIT>:<EOL><INDENT>return embedTTFType42(font, characters, descriptor)<EOL><DEDENT>", "docstring": "Embed the TTF font from the named file into the document.", "id": "f17211:c4:m11"}
{"signature": "def track_characters(self, font, s):", "body": "if isinstance(font, str):<EOL><INDENT>fname = font<EOL><DEDENT>else:<EOL><INDENT>fname = font.fname<EOL><DEDENT>realpath, stat_key = get_realpath_and_stat(fname)<EOL>used_characters = self.used_characters.setdefault(<EOL>stat_key, (realpath, set()))<EOL>used_characters[<NUM_LIT:1>].update([ord(x) for x in s])<EOL>", "docstring": "Keeps track of which characters are required from\n        each font.", "id": "f17211:c5:m4"}
{"signature": "def write(self, data):", "body": "if self.compressobj is None:<EOL><INDENT>self.file.write(data)<EOL><DEDENT>else:<EOL><INDENT>compressed = self.compressobj.compress(data)<EOL>self.file.write(compressed)<EOL><DEDENT>", "docstring": "Write some data on the stream.", "id": "f17211:c3:m3"}
{"signature": "def markerObject(self, path, trans, fillp, lw):", "body": "key = (path, trans, fillp is not None, lw)<EOL>result = self.markers.get(key)<EOL>if result is None:<EOL><INDENT>name = Name('<STR_LIT>' % len(self.markers))<EOL>ob = self.reserveObject('<STR_LIT>' % len(self.markers))<EOL>self.markers[key] = (name, ob, path, trans, fillp, lw)<EOL><DEDENT>else:<EOL><INDENT>name = result[<NUM_LIT:0>]<EOL><DEDENT>return name<EOL>", "docstring": "Return name of a marker XObject representing the given path.", "id": "f17211:c4:m19"}
{"signature": "def pdfRepr(obj):", "body": "<EOL>if hasattr(obj, '<STR_LIT>'):<EOL><INDENT>return obj.pdfRepr()<EOL><DEDENT>elif isinstance(obj, float):<EOL><INDENT>if not npy.isfinite(obj):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>r = \"<STR_LIT>\" % obj<EOL>return r.rstrip('<STR_LIT:0>').rstrip('<STR_LIT:.>')<EOL><DEDENT>elif isinstance(obj, int):<EOL><INDENT>return \"<STR_LIT>\" % obj<EOL><DEDENT>elif is_string_like(obj):<EOL><INDENT>return '<STR_LIT:(>' + _string_escape_regex.sub(r'<STR_LIT>', obj) + '<STR_LIT:)>'<EOL><DEDENT>elif isinstance(obj, dict):<EOL><INDENT>r = [\"<STR_LIT>\"]<EOL>r.extend([\"<STR_LIT>\" % (Name(key).pdfRepr(), pdfRepr(val))<EOL>for key, val in list(obj.items())])<EOL>r.append(\"<STR_LIT>\")<EOL>return fill(r)<EOL><DEDENT>elif isinstance(obj, (list, tuple)):<EOL><INDENT>r = [\"<STR_LIT:[>\"]<EOL>r.extend([pdfRepr(val) for val in obj])<EOL>r.append(\"<STR_LIT:]>\")<EOL>return fill(r)<EOL><DEDENT>elif isinstance(obj, bool):<EOL><INDENT>return ['<STR_LIT:false>', '<STR_LIT:true>'][obj]<EOL><DEDENT>elif obj is None:<EOL><INDENT>return '<STR_LIT:null>'<EOL><DEDENT>elif isinstance(obj, datetime):<EOL><INDENT>r = obj.strftime('<STR_LIT>')<EOL>if time.daylight: z = time.altzone<EOL>else: z = time.timezone<EOL>if z == <NUM_LIT:0>: r += '<STR_LIT>'<EOL>elif z < <NUM_LIT:0>: r += \"<STR_LIT>\" % ((-z)//<NUM_LIT>, (-z)%<NUM_LIT>)<EOL>else: r += \"<STR_LIT>\" % (z//<NUM_LIT>, z%<NUM_LIT>)<EOL>return pdfRepr(r)<EOL><DEDENT>elif isinstance(obj, BboxBase):<EOL><INDENT>return fill([pdfRepr(val) for val in obj.bounds])<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\"% type(obj))<EOL><DEDENT>", "docstring": "Map Python objects to PDF syntax.", "id": "f17211:m1"}
{"signature": "def writeTrailer(self):", "body": "self.write(\"<STR_LIT>\")<EOL>self.write(pdfRepr(<EOL>{'<STR_LIT>': self.nextObject,<EOL>'<STR_LIT>': self.rootObject,<EOL>'<STR_LIT>': self.infoObject }))<EOL>self.write(\"<STR_LIT>\" % self.startxref)<EOL>", "docstring": "Write out the PDF trailer.", "id": "f17211:c4:m27"}
{"signature": "def copy_properties(self, other):", "body": "GraphicsContextBase.copy_properties(self, other)<EOL>self._fillcolor = other._fillcolor<EOL>", "docstring": "Copy properties of other into self.", "id": "f17211:c6:m18"}
{"signature": "def fontName(self, fontprop):", "body": "if is_string_like(fontprop):<EOL><INDENT>filename = fontprop<EOL><DEDENT>elif rcParams['<STR_LIT>']:<EOL><INDENT>filename = findfont(fontprop, fontext='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>filename = findfont(fontprop)<EOL><DEDENT>Fx = self.fontNames.get(filename)<EOL>if Fx is None:<EOL><INDENT>Fx = Name('<STR_LIT>' % self.nextFont)<EOL>self.fontNames[filename] = Fx<EOL>self.nextFont += <NUM_LIT:1><EOL><DEDENT>return Fx<EOL>", "docstring": "Select a font based on fontprop and return a name suitable for\nOp.selectfont. If fontprop is a string, it will be interpreted\nas the filename of the font.", "id": "f17211:c4:m6"}
{"signature": "def _flush(self):", "body": "if self.compressobj is not None:<EOL><INDENT>compressed = self.compressobj.flush()<EOL>self.file.write(compressed)<EOL>self.compressobj = None<EOL><DEDENT>", "docstring": "Flush the compression object.", "id": "f17211:c3:m4"}
{"signature": "def zoomx(self, button, direction):", "body": "for a in self._active:<EOL><INDENT>a.xaxis.zoom(direction)<EOL><DEDENT>self.canvas.draw()<EOL>return True<EOL>", "docstring": "zoomx in direction", "id": "f17212:c3:m9"}
{"signature": "def show(mainloop=True):", "body": "for manager in Gcf.get_all_fig_managers():<EOL><INDENT>manager.window.show()<EOL><DEDENT>if mainloop and gtk.main_level() == <NUM_LIT:0> andlen(Gcf.get_all_fig_managers())><NUM_LIT:0>:<EOL><INDENT>gtk.main()<EOL><DEDENT>", "docstring": "Show all the figures and enter the gtk main loop\nThis should be the last line of your script", "id": "f17212:m3"}
{"signature": "def pany(self, button, direction):", "body": "for a in self._active:<EOL><INDENT>a.yaxis.pan(direction)<EOL><DEDENT>self.canvas.draw()<EOL>return True<EOL>", "docstring": "pany in direction", "id": "f17212:c3:m8"}
{"signature": "def draw_if_interactive():", "body": "if matplotlib.is_interactive():<EOL><INDENT>figManager =  Gcf.get_active()<EOL>if figManager is not None:<EOL><INDENT>figManager.canvas.draw()<EOL><DEDENT><DEDENT>", "docstring": "Is called after every pylab drawing command", "id": "f17212:m2"}
{"signature": "def _renderer_init(self):", "body": "self._renderer = RendererGDK (self, self.figure.dpi)<EOL>", "docstring": "Override by GTK backends to select a different renderer\n        Renderer should provide the methods:\n            set_pixmap ()\n            set_width_height ()\n        that are used by\n            _render_figure() / _pixmap_prepare()", "id": "f17212:c0:m14"}
{"signature": "def get_active_linestyle(self):", "body": "ind = self.cbox_linestyles.get_active()<EOL>ls = self.linestyles[ind]<EOL>return ls<EOL>", "docstring": "get the active lineinestyle", "id": "f17212:c4:m3"}
{"signature": "def on_combobox_lineprops_changed(self, item):", "body": "if not self._inited: return<EOL>self._updateson = False<EOL>line = self.get_active_line()<EOL>ls = line.get_linestyle()<EOL>if ls is None: ls = '<STR_LIT:None>'<EOL>self.cbox_linestyles.set_active(self.linestyled[ls])<EOL>marker = line.get_marker()<EOL>if marker is None: marker = '<STR_LIT:None>'<EOL>self.cbox_markers.set_active(self.markerd[marker])<EOL>r,g,b = colorConverter.to_rgb(line.get_color())<EOL>color = gtk.gdk.Color(*[int(val*<NUM_LIT>) for val in (r,g,b)])<EOL>button = self.wtree.get_widget('<STR_LIT>')<EOL>button.set_color(color)<EOL>r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())<EOL>color = gtk.gdk.Color(*[int(val*<NUM_LIT>) for val in (r,g,b)])<EOL>button = self.wtree.get_widget('<STR_LIT>')<EOL>button.set_color(color)<EOL>self._updateson = True<EOL>", "docstring": "update the widgets from the active line", "id": "f17212:c4:m6"}
{"signature": "def _update(self):", "body": "if not self._inited or not self._updateson: return<EOL>line = self.get_active_line()<EOL>ls = self.get_active_linestyle()<EOL>marker = self.get_active_marker()<EOL>line.set_linestyle(ls)<EOL>line.set_marker(marker)<EOL>button = self.wtree.get_widget('<STR_LIT>')<EOL>color = button.get_color()<EOL>r, g, b = [val/<NUM_LIT> for val in (color.red, color.green, color.blue)]<EOL>line.set_color((r,g,b))<EOL>button = self.wtree.get_widget('<STR_LIT>')<EOL>color = button.get_color()<EOL>r, g, b = [val/<NUM_LIT> for val in (color.red, color.green, color.blue)]<EOL>line.set_markerfacecolor((r,g,b))<EOL>line.figure.canvas.draw()<EOL>", "docstring": "update the active line props from the widgets", "id": "f17212:c4:m5"}
{"signature": "def __init__(self, canvas, window):", "body": "gtk.Toolbar.__init__(self)<EOL>self.canvas = canvas<EOL>self.win    = window<EOL>self.set_style(gtk.TOOLBAR_ICONS)<EOL>if gtk.pygtk_version >= (<NUM_LIT:2>,<NUM_LIT:4>,<NUM_LIT:0>):<EOL><INDENT>self._create_toolitems_2_4()<EOL>self.update = self._update_2_4<EOL>self.fileselect = FileChooserDialog(<EOL>title='<STR_LIT>',<EOL>parent=self.win,<EOL>filetypes=self.canvas.get_supported_filetypes(),<EOL>default_filetype=self.canvas.get_default_filetype())<EOL><DEDENT>else:<EOL><INDENT>self._create_toolitems_2_2()<EOL>self.update = self._update_2_2<EOL>self.fileselect = FileSelection(title='<STR_LIT>',<EOL>parent=self.win)<EOL><DEDENT>self.show_all()<EOL>self.update()<EOL>", "docstring": "figManager is the FigureManagerGTK instance that contains the\ntoolbar, with attributes figure, window and drawingArea", "id": "f17212:c3:m0"}
{"signature": "def expose_event(self, widget, event):", "body": "if _debug: print('<STR_LIT>' % fn_name())<EOL>if GTK_WIDGET_DRAWABLE(self):<EOL><INDENT>if self._need_redraw:<EOL><INDENT>x, y, w, h = self.allocation<EOL>self._pixmap_prepare (w, h)<EOL>self._render_figure(self._pixmap, w, h)<EOL>self._need_redraw = False<EOL><DEDENT>x, y, w, h = event.area<EOL>self.window.draw_drawable (self.style.fg_gc[self.state],<EOL>self._pixmap, x, y, x, y, w, h)<EOL><DEDENT>return False<EOL>", "docstring": "Expose_event for all GTK backends. Should not be overridden.", "id": "f17212:c0:m17"}
{"signature": "def panx(self, button, direction):", "body": "for a in self._active:<EOL><INDENT>a.xaxis.pan(direction)<EOL><DEDENT>self.canvas.draw()<EOL>return True<EOL>", "docstring": "panx in direction", "id": "f17212:c3:m7"}
{"signature": "def paintEvent( self, e ):", "body": "<EOL>if DEBUG: print('<STR_LIT>', self,self.get_width_height())<EOL>if type(self.replot) is bool: <EOL><INDENT>if self.replot:<EOL><INDENT>FigureCanvasAgg.draw(self)<EOL><DEDENT>if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:<EOL><INDENT>stringBuffer = self.renderer._renderer.tostring_bgra()<EOL><DEDENT>else:<EOL><INDENT>stringBuffer = self.renderer._renderer.tostring_argb()<EOL><DEDENT>qImage = QtGui.QImage(stringBuffer, self.renderer.width,<EOL>self.renderer.height,<EOL>QtGui.QImage.Format_ARGB32)<EOL>p = QtGui.QPainter(self)<EOL>p.drawPixmap(QtCore.QPoint(<NUM_LIT:0>, <NUM_LIT:0>), QtGui.QPixmap.fromImage(qImage))<EOL>if self.drawRect:<EOL><INDENT>p.setPen( QtGui.QPen( QtCore.Qt.black, <NUM_LIT:1>, QtCore.Qt.DotLine ) )<EOL>p.drawRect( self.rect[<NUM_LIT:0>], self.rect[<NUM_LIT:1>], self.rect[<NUM_LIT:2>], self.rect[<NUM_LIT:3>] )<EOL><DEDENT>p.end()<EOL><DEDENT>else:<EOL><INDENT>bbox = self.replot<EOL>l, b, r, t = bbox.extents<EOL>w = int(r) - int(l)<EOL>h = int(t) - int(b)<EOL>t = int(b) + h<EOL>reg = self.copy_from_bbox(bbox)<EOL>stringBuffer = reg.to_string_argb()<EOL>qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32)<EOL>pixmap = QtGui.QPixmap.fromImage(qImage)<EOL>p = QtGui.QPainter( self )<EOL>p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)<EOL>p.end()<EOL><DEDENT>self.replot = False<EOL>self.drawRect = False<EOL>", "docstring": "Draw to the Agg backend and then copy the image to the qt.drawable.\nIn Qt, all drawing should be done inside of here when a widget is\nshown onscreen.", "id": "f17213:c2:m3"}
{"signature": "def draw( self ):", "body": "if DEBUG: print(\"<STR_LIT>\", self)<EOL>self.replot = True<EOL>FigureCanvasAgg.draw(self)<EOL>self.update()<EOL>QtGui.qApp.processEvents()<EOL>", "docstring": "Draw the figure when xwindows is ready for the update", "id": "f17213:c2:m4"}
{"signature": "def new_figure_manager(num, *args, **kwargs): ", "body": "if _debug: print('<STR_LIT>' % (self.__class__.__name__, _fn_name()))<EOL>FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>thisFig = FigureClass(*args, **kwargs)<EOL>canvas  = FigureCanvasCairo(thisFig)<EOL>manager = FigureManagerBase(canvas, num)<EOL>return manager<EOL>", "docstring": "Create a new figure manager instance", "id": "f17214:m1"}
{"signature": "def scroll_event_windows(self, event):", "body": "<EOL>w = event.widget.winfo_containing(event.x_root, event.y_root)<EOL>if w == self._tkcanvas:<EOL><INDENT>x = event.x_root - w.winfo_rootx()<EOL>y = event.y_root - w.winfo_rooty()<EOL>y = self.figure.bbox.height - y<EOL>step = event.delta/<NUM_LIT><EOL>FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)<EOL><DEDENT>", "docstring": "MouseWheel event processor", "id": "f17215:c0:m10"}
{"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "_focus = windowing.FocusManager()<EOL>FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>figure = FigureClass(*args, **kwargs)<EOL>window = Tk.Tk()<EOL>canvas = FigureCanvasTkAgg(figure, master=window)<EOL>figManager = FigureManagerTkAgg(canvas, num, window)<EOL>if matplotlib.is_interactive():<EOL><INDENT>figManager.show()<EOL><DEDENT>return figManager<EOL>", "docstring": "Create a new figure manager instance", "id": "f17215:m5"}
{"signature": "def dynamic_update(self):", "body": "<EOL>self.canvas.draw_idle()<EOL>", "docstring": "update drawing area only if idle", "id": "f17215:c4:m12"}
{"signature": "def draw_idle(self):", "body": "d = self._idle<EOL>self._idle = False<EOL>def idle_draw(*args):<EOL><INDENT>self.draw()<EOL>self._idle = True<EOL><DEDENT>if d:<EOL><INDENT>self._tkcanvas.after_idle(idle_draw)<EOL><DEDENT>", "docstring": "update drawing area only if idle", "id": "f17215:c0:m4"}
{"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "<EOL>FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>thisFig = FigureClass(*args, **kwargs)<EOL>canvas = FigureCanvasTemplate(thisFig)<EOL>manager = FigureManagerTemplate(canvas, num)<EOL>return manager<EOL>", "docstring": "Create a new figure manager instance", "id": "f17216:m2"}
{"signature": "def print_foo(self, filename, *args, **kwargs):", "body": "pass<EOL>", "docstring": "Write out format foo.  The dpi, facecolor and edgecolor are restored\nto their original values after this call, so you don't need to\nsave and restore them.", "id": "f17216:c2:m1"}
{"signature": "def draw_if_interactive():", "body": "if matplotlib.is_interactive():<EOL><INDENT>figManager =  Gcf.get_active()<EOL>if figManager != None:<EOL><INDENT>figManager.canvas.draw()<EOL><DEDENT><DEDENT>", "docstring": "Is called after every pylab drawing command", "id": "f17217:m1"}
{"signature": "def new_figure_manager( num, *args, **kwargs ):", "body": "FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>thisFig = FigureClass( *args, **kwargs )<EOL>canvas = FigureCanvasQT( thisFig )<EOL>manager = FigureManagerQT( canvas, num )<EOL>return manager<EOL>", "docstring": "Create a new figure manager instance", "id": "f17217:m4"}
{"signature": "def exception_handler( type, value, tb ):", "body": "msg = '<STR_LIT>'<EOL>if hasattr(value, '<STR_LIT:filename>') and value.filename != None:<EOL><INDENT>msg = value.filename + '<STR_LIT>'<EOL><DEDENT>if hasattr(value, '<STR_LIT>') and value.strerror != None:<EOL><INDENT>msg += value.strerror<EOL><DEDENT>else:<EOL><INDENT>msg += str(value)<EOL><DEDENT>if len( msg ) : error_msg_qt( msg )<EOL>", "docstring": "Handle uncaught exceptions\n    It does not catch SystemExit", "id": "f17217:m6"}
{"signature": "def show():", "body": "for manager in Gcf.get_all_fig_managers():<EOL><INDENT>manager.window.show()<EOL><DEDENT>if DEBUG: print('<STR_LIT>')<EOL>figManager =  Gcf.get_active()<EOL>if figManager != None:<EOL><INDENT>figManager.canvas.draw()<EOL><DEDENT>if _create_qApp.qAppCreatedHere:<EOL><INDENT>qt.qApp.exec_loop()<EOL><DEDENT>", "docstring": "Show all the figures and enter the qt main loop\nThis should be the last line of your script", "id": "f17217:m3"}
{"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "if _debug: print('<STR_LIT>' % fn_name())<EOL>FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>thisFig = FigureClass(*args, **kwargs)<EOL>canvas = FigureCanvasGTKCairo(thisFig)<EOL>return FigureManagerGTK(canvas, num)<EOL>", "docstring": "Create a new figure manager instance", "id": "f17218:m0"}
{"signature": "def _renderer_init(self):", "body": "if _debug: print('<STR_LIT>' % (self.__class__.__name__, _fn_name()))<EOL>self._renderer = RendererGTKCairo (self.figure.dpi)<EOL>", "docstring": "Override to use cairo (rather than GDK) renderer", "id": "f17218:c1:m0"}
{"signature": "def start_rasterizing(self):", "body": "if self._rasterizing == <NUM_LIT:0>:<EOL><INDENT>self._raster_renderer = self._raster_renderer_class(<EOL>self._width*self.dpi, self._height*self.dpi, self.dpi)<EOL>self._set_current_renderer(self._raster_renderer)<EOL><DEDENT>self._rasterizing += <NUM_LIT:1><EOL>", "docstring": "Enter \"raster\" mode.  All subsequent drawing commands (until\nstop_rasterizing is called) will be drawn with the raster\nbackend.\n\nIf start_rasterizing is called multiple times before\nstop_rasterizing is called, this method has no effect.", "id": "f17219:c0:m2"}
{"signature": "def __init__(self, width, height, dpi, vector_renderer, raster_renderer_class=None):", "body": "if raster_renderer_class is None:<EOL><INDENT>raster_renderer_class = RendererAgg<EOL><DEDENT>self._raster_renderer_class = raster_renderer_class<EOL>self._width = width<EOL>self._height = height<EOL>self.dpi = dpi<EOL>assert not vector_renderer.option_image_nocomposite()<EOL>self._vector_renderer = vector_renderer<EOL>self._raster_renderer = None<EOL>self._rasterizing = <NUM_LIT:0><EOL>self._set_current_renderer(vector_renderer)<EOL>", "docstring": "width: The width of the canvas in logical units\n\nheight: The height of the canvas in logical units\n\ndpi: The dpi of the canvas\n\nvector_renderer: An instance of a subclass of RendererBase\nthat will be used for the vector drawing.\n\nraster_renderer_class: The renderer class to use for the\nraster drawing.  If not provided, this will use the Agg\nbackend (which is currently the only viable option anyway.)", "id": "f17219:c0:m0"}
{"signature": "def stop_rasterizing(self):", "body": "self._rasterizing -= <NUM_LIT:1><EOL>if self._rasterizing == <NUM_LIT:0>:<EOL><INDENT>self._set_current_renderer(self._vector_renderer)<EOL>width, height = self._width * self.dpi, self._height * self.dpi<EOL>buffer, bounds = self._raster_renderer.tostring_rgba_minimized()<EOL>l, b, w, h = bounds<EOL>if w > <NUM_LIT:0> and h > <NUM_LIT:0>:<EOL><INDENT>image = frombuffer(buffer, w, h, True)<EOL>image.is_grayscale = False<EOL>image.flipud_out()<EOL>self._renderer.draw_image(l, height - b - h, image, None)<EOL><DEDENT>self._raster_renderer = None<EOL>self._rasterizing = False<EOL><DEDENT>", "docstring": "Exit \"raster\" mode.  All of the drawing that was done since\nthe last start_rasterizing command will be copied to the\nvector backend by calling draw_image.\n\nIf stop_rasterizing is called multiple times before\nstart_rasterizing is called, this method has no effect.", "id": "f17219:c0:m3"}
{"signature": "def _WX28_clipped_agg_as_bitmap(agg, bbox):", "body": "l, b, width, height = bbox.get_bounds()<EOL>r = l + width<EOL>t = b + height<EOL>srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),<EOL>agg.buffer_rgba(<NUM_LIT:0>, <NUM_LIT:0>))<EOL>srcDC = wx.MemoryDC()<EOL>srcDC.SelectObject(srcBmp)<EOL>destBmp = wx.EmptyBitmap(int(width), int(height))<EOL>destDC = wx.MemoryDC()<EOL>destDC.SelectObject(destBmp)<EOL>destDC.BeginDrawing()<EOL>x = int(l)<EOL>y = int(int(agg.height) - t)<EOL>destDC.Blit(<NUM_LIT:0>, <NUM_LIT:0>, int(width), int(height), srcDC, x, y)<EOL>destDC.EndDrawing()<EOL>srcDC.SelectObject(wx.NullBitmap)<EOL>destDC.SelectObject(wx.NullBitmap)<EOL>return destBmp<EOL>", "docstring": "Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.\n\nNote: agg must be a backend_agg.RendererAgg instance.", "id": "f17220:m6"}
{"signature": "def blit(self, bbox=None):", "body": "if bbox is None:<EOL><INDENT>self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)<EOL>self.gui_repaint()<EOL>return<EOL><DEDENT>l, b, w, h = bbox.bounds<EOL>r = l + w<EOL>t = b + h<EOL>x = int(l)<EOL>y = int(self.bitmap.GetHeight() - t)<EOL>srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)<EOL>srcDC = wx.MemoryDC()<EOL>srcDC.SelectObject(srcBmp)<EOL>destDC = wx.MemoryDC()<EOL>destDC.SelectObject(self.bitmap)<EOL>destDC.BeginDrawing()<EOL>destDC.Blit(x, y, int(w), int(h), srcDC, x, y)<EOL>destDC.EndDrawing()<EOL>destDC.SelectObject(wx.NullBitmap)<EOL>srcDC.SelectObject(wx.NullBitmap)<EOL>self.gui_repaint()<EOL>", "docstring": "Transfer the region of the agg buffer defined by bbox to the display.\nIf bbox is None, the entire buffer is transferred.", "id": "f17220:c1:m1"}
{"signature": "def _py_WX28_convert_agg_to_wx_image(agg, bbox):", "body": "if bbox is None:<EOL><INDENT>image = wx.EmptyImage(int(agg.width), int(agg.height))<EOL>image.SetData(agg.tostring_rgb())<EOL>return image<EOL><DEDENT>else:<EOL><INDENT>return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))<EOL><DEDENT>", "docstring": "Convert the region of the agg buffer bounded by bbox to a wx.Image.  If\nbbox is None, the entire buffer is converted.\n\nNote: agg must be a backend_agg.RendererAgg instance.", "id": "f17220:m4"}
{"signature": "def _use_accelerator(state):", "body": "global _convert_agg_to_wx_image<EOL>global _convert_agg_to_wx_bitmap<EOL>if getattr(wx, '<STR_LIT>', '<STR_LIT>')[<NUM_LIT:0>:<NUM_LIT:3>] < '<STR_LIT>':<EOL><INDENT>if state and _wxagg is not None:<EOL><INDENT>_convert_agg_to_wx_image  = _wxagg.convert_agg_to_wx_image<EOL>_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap<EOL><DEDENT>else:<EOL><INDENT>_convert_agg_to_wx_image  = _py_convert_agg_to_wx_image<EOL>_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap<EOL><DEDENT><DEDENT>else:<EOL><INDENT>_convert_agg_to_wx_image  = _py_WX28_convert_agg_to_wx_image<EOL>_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap<EOL><DEDENT>", "docstring": "Enable or disable the WXAgg accelerator, if it is present and is also\ncompatible with whatever version of wxPython is in use.", "id": "f17220:m7"}
{"signature": "def _py_convert_agg_to_wx_bitmap(agg, bbox):", "body": "if bbox is None:<EOL><INDENT>return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))<EOL><DEDENT>else:<EOL><INDENT>return _clipped_image_as_bitmap(<EOL>_py_convert_agg_to_wx_image(agg, None),<EOL>bbox)<EOL><DEDENT>", "docstring": "Convert the region of the agg buffer bounded by bbox to a wx.Bitmap.  If\nbbox is None, the entire buffer is converted.\n\nNote: agg must be a backend_agg.RendererAgg instance.", "id": "f17220:m2"}
{"signature": "def ishow():", "body": "for manager in Gcf.get_all_fig_managers():<EOL><INDENT>manager.show()<EOL><DEDENT>if show._needmain:<EOL><INDENT>_thread.start_new_thread(Fltk_run_interactive,())<EOL><DEDENT>show._needmain = False<EOL>", "docstring": "Show all the figures and enter the fltk mainloop in another thread\nThis allows to keep hand in interractive python session\nWarning: does not work under windows\nThis should be the last line of your script", "id": "f17221:m3"}
{"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>figure = FigureClass(*args, **kwargs)<EOL>window = Fltk.Fl_Double_Window(<NUM_LIT:10>,<NUM_LIT:10>,<NUM_LIT:30>,<NUM_LIT:30>)<EOL>canvas = FigureCanvasFltkAgg(figure)<EOL>window.end()<EOL>window.show()<EOL>window.make_current()<EOL>figManager = FigureManagerFltkAgg(canvas, num, window)<EOL>if matplotlib.is_interactive():<EOL><INDENT>figManager.show()<EOL><DEDENT>return figManager<EOL>", "docstring": "Create a new figure manager instance", "id": "f17221:m5"}
{"signature": "def show():", "body": "for manager in Gcf.get_all_fig_managers():<EOL><INDENT>manager.show()<EOL><DEDENT>if show._needmain:<EOL><INDENT>Fltk.Fl.run()<EOL>show._needmain = False<EOL><DEDENT>", "docstring": "Show all the figures and enter the fltk mainloop\n\nThis should be the last line of your script", "id": "f17221:m4"}
{"signature": "def new_figure_manager( num, *args, **kwargs ):", "body": "if DEBUG: print('<STR_LIT>')<EOL>FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>thisFig = FigureClass( *args, **kwargs )<EOL>canvas = FigureCanvasQTAgg( thisFig )<EOL>return FigureManagerQTAgg( canvas, num )<EOL>", "docstring": "Create a new figure manager instance", "id": "f17222:m0"}
{"signature": "def rgb_to_gdk_color(self, rgb):", "body": "try:<EOL><INDENT>return self._cached[tuple(rgb)]<EOL><DEDENT>except KeyError:<EOL><INDENT>color = self._cached[tuple(rgb)] =self._cmap.alloc_color(<EOL>int(rgb[<NUM_LIT:0>]*<NUM_LIT>),int(rgb[<NUM_LIT:1>]*<NUM_LIT>),int(rgb[<NUM_LIT:2>]*<NUM_LIT>))<EOL>return color<EOL><DEDENT>", "docstring": "rgb - an RGB tuple (three 0.0-1.0 values)\nreturn an allocated gtk.gdk.Color", "id": "f17223:c1:m1"}
{"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "if __debug__: verbose.report('<STR_LIT>',<EOL>'<STR_LIT>')<EOL>FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>thisFig = FigureClass(*args, **kwargs)<EOL>canvas = FigureCanvasAgg(thisFig)<EOL>manager = FigureManagerBase(canvas, num)<EOL>return manager<EOL>", "docstring": "Create a new figure manager instance", "id": "f17226:m0"}
{"signature": "def set_linewidth(self, w):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>self.select()<EOL>if w><NUM_LIT:0> and w<<NUM_LIT:1>: w = <NUM_LIT:1><EOL>GraphicsContextBase.set_linewidth(self, w)<EOL>lw = int(self.renderer.points_to_pixels(self._linewidth))<EOL>if lw==<NUM_LIT:0>: lw = <NUM_LIT:1><EOL>self._pen.SetWidth(lw)<EOL>self.gfx_ctx.SetPen(self._pen)<EOL>self.unselect()<EOL>", "docstring": "Set the line width.", "id": "f17227:c2:m5"}
{"signature": "def _create_menu(self):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>self._menu = MenuButtonWx(self)<EOL>self.AddControl(self._menu)<EOL>self.AddSeparator()<EOL>", "docstring": "Creates the 'menu' - implemented as a button which opens a\npop-up menu since wxPython does not allow a menu as a control", "id": "f17227:c9:m1"}
{"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "<EOL>DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:3>, None)<EOL>_create_wx_app()<EOL>FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>fig = FigureClass(*args, **kwargs)<EOL>frame = FigureFrameWx(num, fig)<EOL>figmgr = frame.get_figure_manager()<EOL>if matplotlib.is_interactive():<EOL><INDENT>figmgr.frame.Show()<EOL><DEDENT>return figmgr<EOL>", "docstring": "Create a new figure manager instance", "id": "f17227:m7"}
{"signature": "def __init__(self, bitmap, dpi):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>if wx.VERSION_STRING < \"<STR_LIT>\":<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>self.width  = bitmap.GetWidth()<EOL>self.height = bitmap.GetHeight()<EOL>self.bitmap = bitmap<EOL>self.fontd = {}<EOL>self.dpi = dpi<EOL>self.gc = None<EOL>", "docstring": "Initialise a wxWindows renderer instance.", "id": "f17227:c1:m0"}
{"signature": "def _do_nothing(self, d):", "body": "pass<EOL>", "docstring": "A NULL event handler - does nothing whatsoever", "id": "f17227:c9:m10"}
{"signature": "def draw(self, drawDC=None):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>self.renderer = RendererWx(self.bitmap, self.figure.dpi)<EOL>self.figure.draw(self.renderer)<EOL>self._isDrawn = True<EOL>self.gui_repaint(drawDC=drawDC)<EOL>", "docstring": "Render the figure using RendererWx instance renderer, or using a\npreviously defined renderer if none is specified.", "id": "f17227:c3:m10"}
{"signature": "def _load_bitmap(filename):", "body": "basedir = os.path.join(rcParams['<STR_LIT>'],'<STR_LIT>')<EOL>bmpFilename = os.path.normpath(os.path.join(basedir, filename))<EOL>if not os.path.exists(bmpFilename):<EOL><INDENT>raise IOError('<STR_LIT>'%bmpFilename)<EOL><DEDENT>bmp = wx.Bitmap(bmpFilename)<EOL>return bmp<EOL>", "docstring": "Load a bitmap file from the backends/images subdirectory in which the\nmatplotlib library is installed. The filename parameter should not\ncontain any path information as this is determined automatically.\n\nReturns a wx.Bitmap object", "id": "f17227:m8"}
{"signature": "def Printer_Setup2(self, event=None):", "body": "if hasattr(self, '<STR_LIT>'):<EOL><INDENT>data = wx.PageSetupDialogData()<EOL>data.SetPrintData(self.printerData)<EOL><DEDENT>else:<EOL><INDENT>data = wx.PageSetupDialogData()<EOL><DEDENT>data.SetMarginTopLeft( (<NUM_LIT:15>, <NUM_LIT:15>) )<EOL>data.SetMarginBottomRight( (<NUM_LIT:15>, <NUM_LIT:15>) )<EOL>dlg = wx.PageSetupDialog(self, data)<EOL>if dlg.ShowModal() == wx.ID_OK:<EOL><INDENT>data = dlg.GetPageSetupData()<EOL>tl = data.GetMarginTopLeft()<EOL>br = data.GetMarginBottomRight()<EOL><DEDENT>self.printerData = wx.PrintData(data.GetPrintData())<EOL>dlg.Destroy()<EOL>", "docstring": "set up figure for printing.  Using the standard wx Printer\n        Setup Dialog.", "id": "f17227:c3:m5"}
{"signature": "def stop_event_loop(self, event=None):", "body": "if hasattr(self,'<STR_LIT>'):<EOL><INDENT>if self._event_loop.IsRunning():<EOL><INDENT>self._event_loop.Exit()<EOL><DEDENT>del self._event_loop<EOL><DEDENT>", "docstring": "Stop an event loop.  This is used to stop a blocking event\nloop so that interactive functions, such as ginput and\nwaitforbuttonpress, can wait for events.\n\nCall signature::\n\nstop_event_loop_default(self)", "id": "f17227:c3:m13"}
{"signature": "def draw_idle(self):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>self._isDrawn = False  <EOL>if hasattr(self,'<STR_LIT>'):<EOL><INDENT>self._idletimer.Restart(IDLE_DELAY)<EOL><DEDENT>else:<EOL><INDENT>self._idletimer = wx.FutureCall(IDLE_DELAY,self._onDrawIdle)<EOL><DEDENT>", "docstring": "Delay rendering until the GUI is idle.", "id": "f17227:c3:m8"}
{"signature": "def get_last_control(self):", "body": "return self._lastControl<EOL>", "docstring": "Returns the identity of the last toolbar button pressed.", "id": "f17227:c9:m4"}
{"signature": "def Printer_Preview(self, event=None):", "body": "po1  = PrintoutWx(self, width=self.printer_width,<EOL>margin=self.printer_margin)<EOL>po2  = PrintoutWx(self, width=self.printer_width,<EOL>margin=self.printer_margin)<EOL>self.preview = wx.PrintPreview(po1,po2,self.printerData)<EOL>if not self.preview.Ok():  print(\"<STR_LIT>\")<EOL>self.preview.SetZoom(<NUM_LIT:50>)<EOL>frameInst= self<EOL>while not isinstance(frameInst, wx.Frame):<EOL><INDENT>frameInst= frameInst.GetParent()<EOL><DEDENT>frame = wx.PreviewFrame(self.preview, frameInst, \"<STR_LIT>\")<EOL>frame.Initialize()<EOL>frame.SetPosition(self.GetPosition())<EOL>frame.SetSize((<NUM_LIT>,<NUM_LIT>))<EOL>frame.Centre(wx.BOTH)<EOL>frame.Show(True)<EOL>self.gui_repaint()<EOL>", "docstring": "generate Print Preview with wx Print mechanism", "id": "f17227:c3:m6"}
{"signature": "def updateButtonText(self, lst):", "body": "axis_txt = '<STR_LIT>'<EOL>for e in lst:<EOL><INDENT>axis_txt += '<STR_LIT>' % (e+<NUM_LIT:1>)<EOL><DEDENT>self.SetLabel(\"<STR_LIT>\" % axis_txt[:-<NUM_LIT:1>])<EOL>", "docstring": "Update the list of selected axes in the menu button", "id": "f17227:c6:m8"}
{"signature": "def draw_text(self, gc, x, y, s, prop, angle, ismath):", "body": "if ismath: s = self.strip_math(s)<EOL>DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>gc.select()<EOL>self.handle_clip_rectangle(gc)<EOL>gfx_ctx = gc.gfx_ctx<EOL>font = self.get_wx_font(s, prop)<EOL>color = gc.get_wxcolour(gc.get_rgb())<EOL>gfx_ctx.SetFont(font, color)<EOL>w, h, d = self.get_text_width_height_descent(s, prop, ismath)<EOL>x = int(x)<EOL>y = int(y-h)<EOL>if angle == <NUM_LIT:0.0>:<EOL><INDENT>gfx_ctx.DrawText(s, x, y)<EOL><DEDENT>else:<EOL><INDENT>rads = angle / <NUM_LIT> * math.pi<EOL>xo = h * math.sin(rads)<EOL>yo = h * math.cos(rads)<EOL>gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)<EOL><DEDENT>gc.unselect()<EOL>", "docstring": "Render the matplotlib.text.Text instance\nNone)", "id": "f17227:c1:m9"}
{"signature": "def get_text_width_height_descent(self, s, prop, ismath):", "body": "<EOL>if ismath: s = self.strip_math(s)<EOL>if self.gc is None:<EOL><INDENT>gc = self.new_gc()<EOL><DEDENT>else:<EOL><INDENT>gc = self.gc<EOL><DEDENT>gfx_ctx = gc.gfx_ctx<EOL>font = self.get_wx_font(s, prop)<EOL>gfx_ctx.SetFont(font, wx.BLACK)<EOL>w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)<EOL>return w, h, descent<EOL>", "docstring": "get the width and height in display coords of the string s\nwith FontPropertry prop", "id": "f17227:c1:m3"}
{"signature": "def update(self):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>self._axes = self.canvas.figure.get_axes()<EOL>self._menu.updateAxes(len(self._axes))<EOL>", "docstring": "Update the toolbar menu - called when (e.g.) a new subplot or axes are added", "id": "f17227:c9:m9"}
{"signature": "def _onSize(self, evt):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:2>, self)<EOL>self._width, self._height = self.GetClientSize()<EOL>self.bitmap =wx.EmptyBitmap(self._width, self._height)<EOL>self._isDrawn = False<EOL>if self._width <= <NUM_LIT:1> or self._height <= <NUM_LIT:1>: return <EOL>dpival = self.figure.dpi<EOL>winch = self._width/dpival<EOL>hinch = self._height/dpival<EOL>self.figure.set_size_inches(winch, hinch)<EOL>self.Refresh(eraseBackground=False)<EOL>", "docstring": "Called when wxEventSize is generated.\n\nIn this application we attempt to resize to fit the window, so it\nis better to take the performance hit and redraw the whole window.", "id": "f17227:c3:m27"}
{"signature": "def _onKeyUp(self, evt):", "body": "key = self._get_key(evt)<EOL>evt.Skip()<EOL>FigureCanvasBase.key_release_event(self, key, guiEvent=evt)<EOL>", "docstring": "Release key.", "id": "f17227:c3:m31"}
{"signature": "def _onEraseBackground(self, evt):", "body": "pass<EOL>", "docstring": "Called when window is redrawn; since we are blitting the entire\nimage, we can leave this blank to suppress flicker.", "id": "f17227:c3:m26"}
{"signature": "def _onMenuItemSelected(self, evt):", "body": "current = self._menu.IsChecked(evt.GetId())<EOL>if current:<EOL><INDENT>new = False<EOL><DEDENT>else:<EOL><INDENT>new = True<EOL><DEDENT>self._menu.Check(evt.GetId(), new)<EOL>self._toolbar.set_active(self.getActiveAxes())<EOL>evt.Skip()<EOL>", "docstring": "Called whenever one of the specific axis menu items is selected", "id": "f17227:c6:m5"}
{"signature": "def set_joinstyle(self, js):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>self.select()<EOL>GraphicsContextBase.set_joinstyle(self, js)<EOL>self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])<EOL>self.gfx_ctx.SetPen(self._pen)<EOL>self.unselect()<EOL>", "docstring": "Set the join style to be one of ('miter', 'round', 'bevel')", "id": "f17227:c2:m7"}
{"signature": "def get_wxcolour(self, color):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>if len(color) == <NUM_LIT:3>:<EOL><INDENT>r, g, b = color<EOL>r *= <NUM_LIT:255><EOL>g *= <NUM_LIT:255><EOL>b *= <NUM_LIT:255><EOL>return wx.Colour(red=int(r), green=int(g), blue=int(b))<EOL><DEDENT>else:<EOL><INDENT>r, g, b, a = color<EOL>r *= <NUM_LIT:255><EOL>g *= <NUM_LIT:255><EOL>b *= <NUM_LIT:255><EOL>a *= <NUM_LIT:255><EOL>return wx.Colour(red=int(r), green=int(g), blue=int(b), alpha=int(a))<EOL><DEDENT>", "docstring": "return a wx.Colour from RGB format", "id": "f17227:c2:m9"}
{"signature": "def Printer_Init(self):", "body": "self.printerData = wx.PrintData()<EOL>self.printerData.SetPaperId(wx.PAPER_LETTER)<EOL>self.printerData.SetPrintMode(wx.PRINT_MODE_PRINTER)<EOL>self.printerPageData= wx.PageSetupDialogData()<EOL>self.printerPageData.SetMarginBottomRight((<NUM_LIT>,<NUM_LIT>))<EOL>self.printerPageData.SetMarginTopLeft((<NUM_LIT>,<NUM_LIT>))<EOL>self.printerPageData.SetPrintData(self.printerData)<EOL>self.printer_width = <NUM_LIT><EOL>self.printer_margin= <NUM_LIT:0.5><EOL>", "docstring": "initialize printer settings using wx methods", "id": "f17227:c3:m3"}
{"signature": "def unselect(self):", "body": "if sys.platform=='<STR_LIT:win32>':<EOL><INDENT>self.dc.SelectObject(wx.NullBitmap)<EOL>self.IsSelected = False<EOL><DEDENT>", "docstring": "Select a Null bitmasp into this wxDC instance", "id": "f17227:c2:m2"}
{"signature": "def _onLeftButtonDown(self, evt):", "body": "x = evt.GetX()<EOL>y = self.figure.bbox.height - evt.GetY()<EOL>evt.Skip()<EOL>self.CaptureMouse()<EOL>FigureCanvasBase.button_press_event(self, x, y, <NUM_LIT:1>, guiEvent=evt)<EOL>", "docstring": "Start measuring on an axis.", "id": "f17227:c3:m34"}
{"signature": "def set_capstyle(self, cs):", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:1>, self)<EOL>self.select()<EOL>GraphicsContextBase.set_capstyle(self, cs)<EOL>self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])<EOL>self.gfx_ctx.SetPen(self._pen)<EOL>self.unselect()<EOL>", "docstring": "Set the capstyle as a string in ('butt', 'round', 'projecting')", "id": "f17227:c2:m6"}
{"signature": "def _handleSelectAllAxes(self, evt):", "body": "if len(self._axisId) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>for i in range(len(self._axisId)):<EOL><INDENT>self._menu.Check(self._axisId[i], True)<EOL><DEDENT>self._toolbar.set_active(self.getActiveAxes())<EOL>evt.Skip()<EOL>", "docstring": "Called when the 'select all axes' menu item is selected.", "id": "f17227:c6:m3"}
{"signature": "def select(self):", "body": "if sys.platform=='<STR_LIT:win32>':<EOL><INDENT>self.dc.SelectObject(self.bitmap)<EOL>self.IsSelected = True<EOL><DEDENT>", "docstring": "Select the current bitmap into this wxDC instance", "id": "f17227:c2:m1"}
{"signature": "def show():", "body": "DEBUG_MSG(\"<STR_LIT>\", <NUM_LIT:3>, None)<EOL>for figwin in Gcf.get_all_fig_managers():<EOL><INDENT>figwin.frame.Show()<EOL><DEDENT>if show._needmain and not matplotlib.is_interactive():<EOL><INDENT>wxapp = wx.GetApp()<EOL>if wxapp is not None:<EOL><INDENT>imlr = getattr(wxapp, '<STR_LIT>', lambda: False)<EOL>if not imlr():<EOL><INDENT>wxapp.MainLoop()<EOL><DEDENT><DEDENT>show._needmain = False<EOL><DEDENT>", "docstring": "Current implementation assumes that matplotlib is executed in a PyCrust\nshell. It appears to be possible to execute wxPython applications from\nwithin a PyCrust without having to ensure that wxPython has been created\nin a secondary thread (e.g. SciPy gui_thread).\n\nUnfortunately, gui_thread seems to introduce a number of further\ndependencies on SciPy modules, which I do not wish to introduce\ninto the backend at this point. If there is a need I will look\ninto this in a later release.", "id": "f17227:m6"}
{"signature": "def _onEnter(self, evt):", "body": "FigureCanvasBase.enter_notify_event(self, guiEvent = evt)<EOL>", "docstring": "Mouse has entered the window.", "id": "f17227:c3:m39"}
{"signature": "def getActiveAxes(self):", "body": "active = []<EOL>for i in range(len(self._axisId)):<EOL><INDENT>if self._menu.IsChecked(self._axisId[i]):<EOL><INDENT>active.append(i)<EOL><DEDENT><DEDENT>return active<EOL>", "docstring": "Return a list of the selected axes.", "id": "f17227:c6:m7"}
{"signature": "def new_figure_manager( num, *args, **kwargs ):", "body": "thisFig = Figure( *args, **kwargs )<EOL>canvas = FigureCanvasQT( thisFig )<EOL>manager = FigureManagerQT( canvas, num )<EOL>return manager<EOL>", "docstring": "Create a new figure manager instance", "id": "f17228:m4"}
{"signature": "def resize(self, width, height):", "body": "self.window.resize(width, height)<EOL>", "docstring": "set the canvas size in pixels", "id": "f17228:c1:m3"}
{"signature": "def set_handle(self,type,handle):", "body": "if self.lastHandle[type] != handle:<EOL><INDENT>self.emf.SelectObject(handle)<EOL>self.lastHandle[type]=handle<EOL><DEDENT>", "docstring": "Update the EMF file with the current handle, but only if it\nisn't the same as the last one.  Don't want to flood the file\nwith duplicate info.", "id": "f17229:c3:m15"}
{"signature": "def draw_image(self, x, y, im, bbox):", "body": "<EOL>pass<EOL>", "docstring": "Draw the Image instance into the current axes; x is the\ndistance in pixels from the left hand side of the canvas. y is\nthe distance from the origin.  That is, if origin is upper, y\nis the distance from top.  If origin is lower, y is the\ndistance from bottom\n\nbbox is a matplotlib.transforms.BBox instance for clipping, or\nNone", "id": "f17229:c3:m3"}
{"signature": "def select_pen(self, gc):", "body": "pen=EMFPen(self.emf,gc)<EOL>key=hash(pen)<EOL>handle=self._fontHandle.get(key)<EOL>if handle is None:<EOL><INDENT>handle=pen.get_handle()<EOL>self._fontHandle[key]=handle<EOL><DEDENT>if debugHandle: print(\"<STR_LIT>\" % handle)<EOL>self.set_handle(\"<STR_LIT>\",handle)<EOL>if pen.style != pyemf.PS_NULL:<EOL><INDENT>return pen<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Select a pen that includes the color, line width and line\nstyle.  Return the pen if it will draw a line, or None if the\npen won't produce any output (i.e. the style is PS_NULL)", "id": "f17229:c3:m18"}
{"signature": "def draw_lines(self, gc, x, y):", "body": "if debugPrint: print(\"<STR_LIT>\" % len(str(x)))<EOL>if self.select_pen(gc):<EOL><INDENT>points = [(int(x[i]), int(self.height-y[i])) for i in range(len(x))]<EOL>self.emf.Polyline(points)<EOL><DEDENT>", "docstring": "x and y are equal length arrays, draw lines connecting each\npoint in x, y", "id": "f17229:c3:m5"}
{"signature": "def _get_font_ttf(self, prop):", "body": "key = hash(prop)<EOL>font = _fontd.get(key)<EOL>if font is None:<EOL><INDENT>fname = findfont(prop)<EOL>if debugText: print(\"<STR_LIT>\" % fname)<EOL>font = FT2Font(str(fname))<EOL>_fontd[key] = font<EOL><DEDENT>font.clear()<EOL>size = prop.get_size_in_points()<EOL>font.set_size(size, self.dpi)<EOL>return font<EOL>", "docstring": "get the true type font properties, used because EMFs on\nwindows will use true type fonts.", "id": "f17229:c3:m20"}
{"signature": "def __init__(self, outfile, width, height, dpi):", "body": "self.outfile = outfile<EOL>self._cached = {}<EOL>self._fontHandle = {}<EOL>self.lastHandle = {'<STR_LIT>':-<NUM_LIT:1>, '<STR_LIT>':-<NUM_LIT:1>, '<STR_LIT>':-<NUM_LIT:1>}<EOL>self.emf=pyemf.EMF(width,height,dpi,'<STR_LIT>')<EOL>self.width=int(width*dpi)<EOL>self.height=int(height*dpi)<EOL>self.dpi = dpi<EOL>self.pointstodpi = dpi/<NUM_LIT><EOL>self.hackPointsForMathExponent = <NUM_LIT><EOL>self.emf.SetBkMode(pyemf.TRANSPARENT)<EOL>self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT)<EOL>if debugPrint: print(\"<STR_LIT>\" % (self.width,self.height,outfile,dpi))<EOL>", "docstring": "Initialize the renderer with a gd image instance", "id": "f17229:c3:m0"}
{"signature": "def show():", "body": "for manager in Gcf.get_all_fig_managers():<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "For image backends - is not required\nFor GUI backends - show() is usually the last line of a pylab script and\ntells the backend that it is time to draw.  In interactive mode, this may\nbe a do nothing func.  See the GTK backend for an example of how to handle\ninteractive versus batch mode", "id": "f17229:m1"}
{"signature": "def draw_text(self, gc, x, y, s, prop, angle, ismath=False):", "body": "if debugText: print(\"<STR_LIT>\" % (x,y,angle,s))<EOL>if ismath:<EOL><INDENT>self.draw_math_text(gc,x,y,s,prop,angle)<EOL><DEDENT>else:<EOL><INDENT>self.draw_plain_text(gc,x,y,s,prop,angle)<EOL><DEDENT>", "docstring": "Draw the text.Text instance s at x,y (display coords) with font\nproperties instance prop at angle in degrees, using GraphicsContext gc\n\n**backend implementers note**\n\nWhen you are trying to determine if you have gotten your bounding box\nright (which is what enables the text layout/alignment to work\nproperly), it helps to change the line in text.py\n\n          if 0: bbox_artist(self, renderer)\n\nto if 1, and then the actual bounding box will be blotted along with\nyour text.", "id": "f17229:c3:m9"}
{"signature": "def new_figure_manager(num, *args, **kwargs):", "body": "<EOL>FigureClass = kwargs.pop('<STR_LIT>', Figure)<EOL>thisFig = FigureClass(*args, **kwargs)<EOL>canvas = FigureCanvasEMF(thisFig)<EOL>manager = FigureManagerEMF(canvas, num)<EOL>return manager<EOL>", "docstring": "Create a new figure manager instance", "id": "f17229:m2"}
{"signature": "def draw_point(self, gc, x, y):", "body": "if debugPrint: print(\"<STR_LIT>\" % (x,y))<EOL>pen=EMFPen(self.emf,gc)<EOL>self.emf.SetPixel(int(x),int(self.height-y),(pen.r,pen.g,pen.b))<EOL>", "docstring": "Draw a single point at x,y\nWhere 'point' is a device-unit point (or pixel), not a matplotlib point", "id": "f17229:c3:m6"}
{"signature": "def show():", "body": "<EOL>_macosx.show()<EOL>", "docstring": "Show all the figures and enter the Cocoa mainloop.\n    This function will not return until all windows are closed or\n    the interpreter exits.", "id": "f17230:m0"}
{"signature": "def draw_if_interactive():", "body": "figManager =  Gcf.get_active()<EOL>if figManager is not None:<EOL><INDENT>figManager.canvas.invalidate()<EOL><DEDENT>", "docstring": "For performance reasons, we don't want to redraw the figure after\neach draw command. Instead, we mark the figure as invalid, so that\nit will be redrawn as soon as the event loop resumes via PyOS_InputHook.\nThis function should be called after each draw event, even if\nmatplotlib is not running interactively.", "id": "f17230:m1"}
{"signature": "def _get_style(self, gc, rgbFace):", "body": "if rgbFace is None:<EOL><INDENT>fill = '<STR_LIT:none>'<EOL><DEDENT>else:<EOL><INDENT>fill = rgb2hex(rgbFace[:<NUM_LIT:3>])<EOL><DEDENT>offset, seq = gc.get_dashes()<EOL>if seq is None:<EOL><INDENT>dashes = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>dashes = '<STR_LIT>' % (<EOL>'<STR_LIT:U+002C>'.join(['<STR_LIT>'%val for val in seq]), offset)<EOL><DEDENT>linewidth = gc.get_linewidth()<EOL>if linewidth:<EOL><INDENT>return '<STR_LIT>''<STR_LIT>' % (<EOL>fill,<EOL>rgb2hex(gc.get_rgb()[:<NUM_LIT:3>]),<EOL>linewidth,<EOL>gc.get_joinstyle(),<EOL>_capstyle_d[gc.get_capstyle()],<EOL>dashes,<EOL>gc.get_alpha(),<EOL>)<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>' % (fill,<EOL>gc.get_alpha(),<EOL>)<EOL><DEDENT>", "docstring": "return the style string.\nstyle is generated from the GraphicsContext, rgbFace and clippath", "id": "f17232:c0:m3"}
{"signature": "def _draw_mathtext(self, gc, x, y, s, prop, angle):", "body": "width, height, descent, svg_elements, used_characters =self.mathtext_parser.parse(s, <NUM_LIT>, prop)<EOL>svg_glyphs = svg_elements.svg_glyphs<EOL>svg_rects = svg_elements.svg_rects<EOL>color = rgb2hex(gc.get_rgb()[:<NUM_LIT:3>])<EOL>write = self._svgwriter.write<EOL>style = \"<STR_LIT>\" % color<EOL>if rcParams['<STR_LIT>']:<EOL><INDENT>new_chars = []<EOL>for font, fontsize, thetext, new_x, new_y_mtc, metrics in svg_glyphs:<EOL><INDENT>path = self._add_char_def(font, thetext)<EOL>if path is not None:<EOL><INDENT>new_chars.append(path)<EOL><DEDENT><DEDENT>if len(new_chars):<EOL><INDENT>write('<STR_LIT>')<EOL>for path in new_chars:<EOL><INDENT>write(path)<EOL><DEDENT>write('<STR_LIT>')<EOL><DEDENT>svg = ['<STR_LIT>' % style]<EOL>if angle != <NUM_LIT:0>:<EOL><INDENT>svg.append('<STR_LIT>'<EOL>% (x,y,-angle) )<EOL><DEDENT>else:<EOL><INDENT>svg.append('<STR_LIT>' % (x, y))<EOL><DEDENT>svg.append('<STR_LIT>')<EOL>for font, fontsize, thetext, new_x, new_y_mtc, metrics in svg_glyphs:<EOL><INDENT>charid = self._get_char_def_id(font, thetext)<EOL>svg.append('<STR_LIT>' %<EOL>(charid, new_x, -new_y_mtc, fontsize / self.FONT_SCALE))<EOL><DEDENT>svg.append('<STR_LIT>')<EOL><DEDENT>else: <EOL><INDENT>svg = ['<STR_LIT>' % (style, x, y)]<EOL>if angle != <NUM_LIT:0>:<EOL><INDENT>svg.append('<STR_LIT>'<EOL>% (x,y,-angle,-x,-y) ) <EOL><DEDENT>svg.append('<STR_LIT>')<EOL>curr_x,curr_y = <NUM_LIT:0.0>,<NUM_LIT:0.0><EOL>for font, fontsize, thetext, new_x, new_y_mtc, metrics in svg_glyphs:<EOL><INDENT>new_y = - new_y_mtc<EOL>style = \"<STR_LIT>\" % (fontsize, font.family_name)<EOL>svg.append('<STR_LIT>' % style)<EOL>xadvance = metrics.advance<EOL>svg.append('<STR_LIT>' % xadvance)<EOL>dx = new_x - curr_x<EOL>if dx != <NUM_LIT:0.0>:<EOL><INDENT>svg.append('<STR_LIT>' % dx)<EOL><DEDENT>dy = new_y - curr_y<EOL>if dy != <NUM_LIT:0.0>:<EOL><INDENT>svg.append('<STR_LIT>' % dy)<EOL><DEDENT>thetext = escape_xml_text(thetext)<EOL>svg.append('<STR_LIT>' % thetext)<EOL>curr_x = new_x + xadvance<EOL>curr_y = new_y<EOL><DEDENT>svg.append('<STR_LIT>')<EOL><DEDENT>if len(svg_rects):<EOL><INDENT>style = \"<STR_LIT>\" % color<EOL>svg.append('<STR_LIT>' % style)<EOL>if angle != <NUM_LIT:0>:<EOL><INDENT>svg.append('<STR_LIT>'<EOL>% (x,y,-angle) )<EOL><DEDENT>else:<EOL><INDENT>svg.append('<STR_LIT>' % (x, y))<EOL><DEDENT>svg.append('<STR_LIT>')<EOL>for x, y, width, height in svg_rects:<EOL><INDENT>svg.append('<STR_LIT>' % (x, -y + height, width, height))<EOL><DEDENT>svg.append(\"<STR_LIT>\")<EOL><DEDENT>self.open_group(\"<STR_LIT>\")<EOL>write ('<STR_LIT>'.join(svg))<EOL>self.close_group(\"<STR_LIT>\")<EOL>", "docstring": "Draw math text using matplotlib.mathtext", "id": "f17232:c0:m17"}
{"signature": "def draw_mathtext(self, gc,<EOL>x, y, s, prop, angle):", "body": "if debugPS:<EOL><INDENT>self._pswriter.write(\"<STR_LIT>\")<EOL><DEDENT>width, height, descent, pswriter, used_characters =self.mathtext_parser.parse(s, <NUM_LIT>, prop)<EOL>self.merge_used_characters(used_characters)<EOL>self.set_color(*gc.get_rgb())<EOL>thetext = pswriter.getvalue()<EOL>ps =", "docstring": "Draw the math text using matplotlib.mathtext", "id": "f17234:c0:m30"}
{"signature": "def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):", "body": "if debugPS: self._pswriter.write('<STR_LIT>')<EOL>write = self._pswriter.write<EOL>if rgbFace:<EOL><INDENT>if rgbFace[<NUM_LIT:0>]==rgbFace[<NUM_LIT:1>] and rgbFace[<NUM_LIT:0>]==rgbFace[<NUM_LIT:2>]:<EOL><INDENT>ps_color = '<STR_LIT>' % rgbFace[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>ps_color = '<STR_LIT>' % rgbFace<EOL><DEDENT><DEDENT>ps_cmd = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'] <EOL>ps_cmd.append(self._convert_path(marker_path, marker_trans))<EOL>if rgbFace:<EOL><INDENT>ps_cmd.extend(['<STR_LIT>', ps_color, '<STR_LIT>', '<STR_LIT>'])<EOL><DEDENT>ps_cmd.extend(['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>tpath = trans.transform_path(path)<EOL>for vertices, code in tpath.iter_segments():<EOL><INDENT>if len(vertices):<EOL><INDENT>x, y = vertices[-<NUM_LIT:2>:]<EOL>ps_cmd.append(\"<STR_LIT>\" % (x, y))<EOL><DEDENT><DEDENT>ps = '<STR_LIT:\\n>'.join(ps_cmd)<EOL>self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)<EOL>", "docstring": "Draw the markers defined by path at each of the positions in x\nand y.  path coordinates are points, x and y coords will be\ntransformed by the transform", "id": "f17234:c0:m24"}
{"signature": "def quote_ps_string(s):", "body": "s=s.replace(\"<STR_LIT:\\\\>\", \"<STR_LIT>\")<EOL>s=s.replace(\"<STR_LIT:(>\", \"<STR_LIT>\")<EOL>s=s.replace(\"<STR_LIT:)>\", \"<STR_LIT>\")<EOL>s=s.replace(\"<STR_LIT:'>\", \"<STR_LIT>\")<EOL>s=s.replace(\"<STR_LIT>\", \"<STR_LIT>\")<EOL>s=re.sub(r\"<STR_LIT>\", lambda x: r\"<STR_LIT>\"%ord(x.group()), s)<EOL>return s<EOL>", "docstring": "Quote dangerous characters of S for use in a PostScript string constant.", "id": "f17234:m4"}
{"signature": "def seq_allequal(seq1, seq2):", "body": "if seq1 is None:<EOL><INDENT>return seq2 is None<EOL><DEDENT>if seq2 is None:<EOL><INDENT>return False<EOL><DEDENT>if len(seq1) != len(seq2): return False<EOL>return npy.alltrue(npy.equal(seq1, seq2))<EOL>", "docstring": "seq1 and seq2 are either None or sequences or numerix arrays\nReturn True if both are None or both are seqs with identical\nelements", "id": "f17234:m5"}
{"signature": "def set_hatch(self, hatch):", "body": "hatches = {'<STR_LIT>':<NUM_LIT:0>, '<STR_LIT>':<NUM_LIT:0>, '<STR_LIT>':<NUM_LIT:0>, '<STR_LIT>':<NUM_LIT:0>}<EOL>for letter in hatch:<EOL><INDENT>if   (letter == '<STR_LIT:/>'):    hatches['<STR_LIT>'] += <NUM_LIT:1><EOL>elif (letter == '<STR_LIT:\\\\>'):   hatches['<STR_LIT>'] += <NUM_LIT:1><EOL>elif (letter == '<STR_LIT:|>'):    hatches['<STR_LIT>']  += <NUM_LIT:1><EOL>elif (letter == '<STR_LIT:->'):    hatches['<STR_LIT>'] += <NUM_LIT:1><EOL>elif (letter == '<STR_LIT:+>'):<EOL><INDENT>hatches['<STR_LIT>'] += <NUM_LIT:1><EOL>hatches['<STR_LIT>'] += <NUM_LIT:1><EOL><DEDENT>elif (letter.lower() == '<STR_LIT:x>'):<EOL><INDENT>hatches['<STR_LIT>'] += <NUM_LIT:1><EOL>hatches['<STR_LIT>'] += <NUM_LIT:1><EOL><DEDENT><DEDENT>def do_hatch(angle, density):<EOL><INDENT>if (density == <NUM_LIT:0>): return \"<STR_LIT>\"<EOL>return", "docstring": "hatch can be one of:\n    /   - diagonal hatching\n    \\   - back diagonal\n    |   - vertical\n    -   - horizontal\n    +   - crossed\n    X   - crossed diagonal\n\nletters can be combined, in which case all the specified\nhatchings are done\n\nif same letter repeats, it increases the density of hatching\nin that direction", "id": "f17234:c0:m9"}
{"signature": "def get_canvas_width_height(self):", "body": "return self.width, self.height<EOL>", "docstring": "return the canvas width and height in display coords", "id": "f17234:c0:m10"}
{"signature": "def get_image_magnification(self):", "body": "return self.image_magnification<EOL>", "docstring": "Get the factor by which to magnify images passed to draw_image.\nAllows a backend to have images at a different resolution to other\nartists.", "id": "f17234:c0:m19"}
{"signature": "def draw_unicode(self, gc, x, y, s, prop, angle):", "body": "if rcParams['<STR_LIT>']:<EOL><INDENT>self.set_color(*gc.get_rgb())<EOL>font = self._get_font_afm(prop)<EOL>fontname = font.get_fontname()<EOL>fontsize = prop.get_size_in_points()<EOL>scale = <NUM_LIT>*fontsize<EOL>thisx = <NUM_LIT:0><EOL>thisy = font.get_str_bbox_and_descent(s)[<NUM_LIT:4>] * scale<EOL>last_name = None<EOL>lines = []<EOL>for c in s:<EOL><INDENT>name = uni2type1.get(ord(c), '<STR_LIT>')<EOL>try:<EOL><INDENT>width = font.get_width_from_char_name(name)<EOL><DEDENT>except KeyError:<EOL><INDENT>name = '<STR_LIT>'<EOL>width = font.get_width_char('<STR_LIT:?>')<EOL><DEDENT>if last_name is not None:<EOL><INDENT>kern = font.get_kern_dist_from_name(last_name, name)<EOL><DEDENT>else:<EOL><INDENT>kern = <NUM_LIT:0><EOL><DEDENT>last_name = name<EOL>thisx += kern * scale<EOL>lines.append('<STR_LIT>'%(thisx, thisy, name))<EOL>thisx += width * scale<EOL><DEDENT>thetext = \"<STR_LIT:\\n>\".join(lines)<EOL>ps = \"\"\"<STR_LIT>\"\"\"gsave<EOL><DEDENT>y)f translate<EOL>f rotate<EOL>t)s<EOL>cals()<EOL>self._pswriter.write(ps)<EOL>", "docstring": "draw a unicode string.  ps doesn't have unicode support, so\n        we have to do this the hard way", "id": "f17234:c0:m29"}
{"signature": "def disconnect(self, cid):", "body": "try: del self.observers[cid]<EOL>except KeyError: pass<EOL>", "docstring": "remove the observer with connection id cid", "id": "f17235:c4:m3"}
{"signature": "def update(self):", "body": "if self.useblit:<EOL><INDENT>if self.background is not None:<EOL><INDENT>self.canvas.restore_region(self.background)<EOL><DEDENT>self.ax.draw_artist(self.rect)<EOL>self.canvas.blit(self.ax.bbox)<EOL><DEDENT>else:<EOL><INDENT>self.canvas.draw_idle()<EOL><DEDENT>return False<EOL>", "docstring": "draw using newfangled blit or oldfangled draw depending on useblit", "id": "f17235:c9:m6"}
{"signature": "def isowner(self, o):", "body": "return self._owner is o<EOL>", "docstring": "o owns the lock", "id": "f17235:c0:m4"}
{"signature": "def ignore(self, event):", "body": "<EOL>if not self.active:<EOL><INDENT>return True<EOL><DEDENT>if not self.canvas.widgetlock.available(self):<EOL><INDENT>return True<EOL><DEDENT>if self.eventpress == None:<EOL><INDENT>return event.inaxes!= self.ax<EOL><DEDENT>return  (event.inaxes!=self.ax or<EOL>event.button != self.eventpress.button)<EOL>", "docstring": "return True if event should be ignored", "id": "f17235:c11:m2"}
{"signature": "def disconnect(self, cid):", "body": "try: del self.observers[cid]<EOL>except KeyError: pass<EOL>", "docstring": "remove the observer with connection id cid", "id": "f17235:c2:m4"}
{"signature": "def on_clicked(self, func):", "body": "cid = self.cnt<EOL>self.observers[cid] = func<EOL>self.cnt += <NUM_LIT:1><EOL>return cid<EOL>", "docstring": "When the button is clicked, call this func with button label\n\nA connection id is returned which can be used to disconnect", "id": "f17235:c5:m2"}
{"signature": "def _update(self, event):", "body": "if event.button !=<NUM_LIT:1>: return<EOL>if event.inaxes != self.ax: return<EOL>val = event.xdata<EOL>if not self.closedmin and val<=self.valmin: return<EOL>if not self.closedmax and val>=self.valmax: return<EOL>if self.slidermin is not None:<EOL><INDENT>if val<=self.slidermin.val: return<EOL><DEDENT>if self.slidermax is not None:<EOL><INDENT>if val>=self.slidermax.val: return<EOL><DEDENT>self.set_val(val)<EOL>", "docstring": "update the slider position", "id": "f17235:c3:m1"}
{"signature": "def update(self):", "body": "if self.useblit:<EOL><INDENT>if self.background is not None:<EOL><INDENT>self.canvas.restore_region(self.background)<EOL><DEDENT>self.ax.draw_artist(self.to_draw)<EOL>self.canvas.blit(self.ax.bbox)<EOL><DEDENT>else:<EOL><INDENT>self.canvas.draw_idle()<EOL><DEDENT>return False<EOL>", "docstring": "draw using newfangled blit or oldfangled draw depending on useblit", "id": "f17235:c11:m5"}
{"signature": "def press(self, event):", "body": "if self.ignore(event): return<EOL>self.buttonDown = True<EOL>self.rect.set_visible(self.visible)<EOL>if self.direction == '<STR_LIT>':<EOL><INDENT>self.pressv = event.xdata<EOL><DEDENT>else:<EOL><INDENT>self.pressv = event.ydata<EOL><DEDENT>return False<EOL>", "docstring": "on button press event", "id": "f17235:c9:m4"}
{"signature": "def disconnect(self, cid):", "body": "try: del self.observers[cid]<EOL>except KeyError: pass<EOL>", "docstring": "remove the observer with connection id cid", "id": "f17235:c5:m3"}
{"signature": "def __call__(self, o):", "body": "if not self.available(o):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self._owner = o<EOL>", "docstring": "reserve the lock for o", "id": "f17235:c0:m1"}
{"signature": "def onmove(self, event):", "body": "if self.pressv is None or self.ignore(event): return<EOL>x, y = event.xdata, event.ydata<EOL>self.prev = x, y<EOL>if self.direction == '<STR_LIT>':<EOL><INDENT>v = x<EOL><DEDENT>else:<EOL><INDENT>v = y<EOL><DEDENT>minv, maxv = v, self.pressv<EOL>if minv>maxv: minv, maxv = maxv, minv<EOL>if self.direction == '<STR_LIT>':<EOL><INDENT>self.rect.set_x(minv)<EOL>self.rect.set_width(maxv-minv)<EOL><DEDENT>else:<EOL><INDENT>self.rect.set_y(minv)<EOL>self.rect.set_height(maxv-minv)<EOL><DEDENT>if self.onmove_callback is not None:<EOL><INDENT>vmin = self.pressv<EOL>if self.direction == '<STR_LIT>':<EOL><INDENT>vmax = event.xdata or self.prev[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>vmax = event.ydata or self.prev[<NUM_LIT:1>]<EOL><DEDENT>if vmin>vmax: vmin, vmax = vmax, vmin<EOL>self.onmove_callback(vmin, vmax)<EOL><DEDENT>self.update()<EOL>return False<EOL>", "docstring": "on motion notify event", "id": "f17235:c9:m7"}
{"signature": "def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):", "body": "if rectprops is None:<EOL><INDENT>rectprops = dict(facecolor='<STR_LIT>', alpha=<NUM_LIT:0.5>)<EOL><DEDENT>assert direction in ['<STR_LIT>', '<STR_LIT>'], '<STR_LIT>'<EOL>self.direction = direction<EOL>self.ax = None<EOL>self.canvas = None<EOL>self.visible = True<EOL>self.cids=[]<EOL>self.rect = None<EOL>self.background = None<EOL>self.pressv = None<EOL>self.rectprops = rectprops<EOL>self.onselect = onselect<EOL>self.onmove_callback = onmove_callback<EOL>self.useblit = useblit<EOL>self.minspan = minspan<EOL>self.buttonDown = False<EOL>self.prev = (<NUM_LIT:0>, <NUM_LIT:0>)<EOL>self.new_axes(ax)<EOL>", "docstring": "Create a span selector in ax.  When a selection is made, clear\nthe span and call onselect with\n\n  onselect(vmin, vmax)\n\nand clear the span.\n\ndirection must be 'horizontal' or 'vertical'\n\nIf minspan is not None, ignore events smaller than minspan\n\nThe span rect is drawn with rectprops; default\n  rectprops = dict(facecolor='red', alpha=0.5)\n\nset the visible attribute to False if you want to turn off\nthe functionality of the span selector", "id": "f17235:c9:m0"}
{"signature": "def get_active(self):", "body": "return self.active<EOL>", "docstring": "to get status of active mode (boolean variable)", "id": "f17235:c11:m8"}
{"signature": "def __init__(self, ax, label, valmin, valmax, valinit=<NUM_LIT:0.5>, valfmt='<STR_LIT>',<EOL>closedmin=True, closedmax=True, slidermin=None, slidermax=None,<EOL>dragging=True, **kwargs):", "body": "self.ax = ax<EOL>self.valmin = valmin<EOL>self.valmax = valmax<EOL>self.val = valinit<EOL>self.valinit = valinit<EOL>self.poly = ax.axvspan(valmin,valinit,<NUM_LIT:0>,<NUM_LIT:1>, **kwargs)<EOL>self.vline = ax.axvline(valinit,<NUM_LIT:0>,<NUM_LIT:1>, color='<STR_LIT:r>', lw=<NUM_LIT:1>)<EOL>self.valfmt=valfmt<EOL>ax.set_yticks([])<EOL>ax.set_xlim((valmin, valmax))<EOL>ax.set_xticks([])<EOL>ax.set_navigate(False)<EOL>ax.figure.canvas.mpl_connect('<STR_LIT>', self._update)<EOL>if dragging:<EOL><INDENT>ax.figure.canvas.mpl_connect('<STR_LIT>', self._update)<EOL><DEDENT>self.label = ax.text(-<NUM_LIT>, <NUM_LIT:0.5>, label, transform=ax.transAxes,<EOL>verticalalignment='<STR_LIT>',<EOL>horizontalalignment='<STR_LIT:right>')<EOL>self.valtext = ax.text(<NUM_LIT>, <NUM_LIT:0.5>, valfmt%valinit,<EOL>transform=ax.transAxes,<EOL>verticalalignment='<STR_LIT>',<EOL>horizontalalignment='<STR_LIT:left>')<EOL>self.cnt = <NUM_LIT:0><EOL>self.observers = {}<EOL>self.closedmin = closedmin<EOL>self.closedmax = closedmax<EOL>self.slidermin = slidermin<EOL>self.slidermax = slidermax<EOL>", "docstring": "Create a slider from valmin to valmax in axes ax;\n\nvalinit -  the slider initial position\n\nlabel - the slider label\n\nvalfmt - used to format the slider value\n\nclosedmin and closedmax - indicate whether the slider interval is closed\n\nslidermin and slidermax - be used to contrain the value of\n  this slider to the values of other sliders.\n\nadditional kwargs are passed on to self.poly which is the\nmatplotlib.patches.Rectangle which draws the slider.  See the\nmatplotlib.patches.Rectangle documentation for legal property\nnames (eg facecolor, edgecolor, alpha, ...)", "id": "f17235:c3:m0"}
{"signature": "def __init__(self, targetfig, toolfig):", "body": "self.targetfig = targetfig<EOL>toolfig.subplots_adjust(left=<NUM_LIT>, right=<NUM_LIT>)<EOL>class toolbarfmt:<EOL><INDENT>def __init__(self, slider):<EOL><INDENT>self.slider = slider<EOL><DEDENT>def __call__(self, x, y):<EOL><INDENT>fmt = '<STR_LIT>'%(self.slider.label.get_text(), self.slider.valfmt)<EOL>return fmt%x<EOL><DEDENT><DEDENT>self.axleft = toolfig.add_subplot(<NUM_LIT>)<EOL>self.axleft.set_title('<STR_LIT>')<EOL>self.axleft.set_navigate(False)<EOL>self.sliderleft = Slider(self.axleft, '<STR_LIT:left>', <NUM_LIT:0>, <NUM_LIT:1>, targetfig.subplotpars.left, closedmax=False)<EOL>self.sliderleft.on_changed(self.funcleft)<EOL>self.axbottom = toolfig.add_subplot(<NUM_LIT>)<EOL>self.axbottom.set_navigate(False)<EOL>self.sliderbottom = Slider(self.axbottom, '<STR_LIT>', <NUM_LIT:0>, <NUM_LIT:1>, targetfig.subplotpars.bottom, closedmax=False)<EOL>self.sliderbottom.on_changed(self.funcbottom)<EOL>self.axright = toolfig.add_subplot(<NUM_LIT>)<EOL>self.axright.set_navigate(False)<EOL>self.sliderright = Slider(self.axright, '<STR_LIT:right>', <NUM_LIT:0>, <NUM_LIT:1>, targetfig.subplotpars.right, closedmin=False)<EOL>self.sliderright.on_changed(self.funcright)<EOL>self.axtop = toolfig.add_subplot(<NUM_LIT>)<EOL>self.axtop.set_navigate(False)<EOL>self.slidertop = Slider(self.axtop, '<STR_LIT>', <NUM_LIT:0>, <NUM_LIT:1>, targetfig.subplotpars.top, closedmin=False)<EOL>self.slidertop.on_changed(self.functop)<EOL>self.axwspace = toolfig.add_subplot(<NUM_LIT>)<EOL>self.axwspace.set_navigate(False)<EOL>self.sliderwspace = Slider(self.axwspace, '<STR_LIT>', <NUM_LIT:0>, <NUM_LIT:1>, targetfig.subplotpars.wspace, closedmax=False)<EOL>self.sliderwspace.on_changed(self.funcwspace)<EOL>self.axhspace = toolfig.add_subplot(<NUM_LIT>)<EOL>self.axhspace.set_navigate(False)<EOL>self.sliderhspace = Slider(self.axhspace, '<STR_LIT>', <NUM_LIT:0>, <NUM_LIT:1>, targetfig.subplotpars.hspace, closedmax=False)<EOL>self.sliderhspace.on_changed(self.funchspace)<EOL>self.sliderleft.slidermax = self.sliderright<EOL>self.sliderright.slidermin = self.sliderleft<EOL>self.sliderbottom.slidermax = self.slidertop<EOL>self.slidertop.slidermin = self.sliderbottom<EOL>bax = toolfig.add_axes([<NUM_LIT>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT>])<EOL>self.buttonreset = Button(bax, '<STR_LIT>')<EOL>sliders = (self.sliderleft, self.sliderbottom, self.sliderright,<EOL>self.slidertop, self.sliderwspace, self.sliderhspace, )<EOL>def func(event):<EOL><INDENT>thisdrawon = self.drawon<EOL>self.drawon = False<EOL>bs = []<EOL>for slider in sliders:<EOL><INDENT>bs.append(slider.drawon)<EOL>slider.drawon = False<EOL><DEDENT>for slider in sliders:<EOL><INDENT>slider.reset()<EOL><DEDENT>for slider, b in zip(sliders, bs):<EOL><INDENT>slider.drawon = b<EOL><DEDENT>self.drawon = thisdrawon<EOL>if self.drawon:<EOL><INDENT>toolfig.canvas.draw()<EOL>self.targetfig.canvas.draw()<EOL><DEDENT><DEDENT>validate = toolfig.subplotpars.validate<EOL>toolfig.subplotpars.validate = False<EOL>self.buttonreset.on_clicked(func)<EOL>toolfig.subplotpars.validate = validate<EOL>", "docstring": "targetfig is the figure to adjust\n\ntoolfig is the figure to embed the the subplot tool into.  If\nNone, a default pylab figure will be created.  If you are\nusing this from the GUI", "id": "f17235:c6:m0"}
{"signature": "def disconnect(self, cid):", "body": "try: del self.observers[cid]<EOL>except KeyError: pass<EOL>", "docstring": "remove the observer with connection id cid", "id": "f17235:c3:m4"}
{"signature": "def set_active(self, active):", "body": "self.active = active<EOL>", "docstring": "Use this to activate / deactivate the RectangleSelector\n\n            from your program with an boolean variable 'active'.", "id": "f17235:c11:m7"}
{"signature": "def release(self, event):", "body": "if self.eventpress is None or self.ignore(event): return<EOL>self.to_draw.set_visible(False)<EOL>self.canvas.draw()<EOL>self.eventrelease = event<EOL>if self.spancoords=='<STR_LIT:data>':<EOL><INDENT>xmin, ymin = self.eventpress.xdata, self.eventpress.ydata<EOL>xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata<EOL><DEDENT>elif self.spancoords=='<STR_LIT>':<EOL><INDENT>xmin, ymin = self.eventpress.x, self.eventpress.y<EOL>xmax, ymax = self.eventrelease.x, self.eventrelease.y<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if xmin>xmax: xmin, xmax = xmax, xmin<EOL>if ymin>ymax: ymin, ymax = ymax, ymin<EOL>spanx = xmax - xmin<EOL>spany = ymax - ymin<EOL>xproblems = self.minspanx is not None and spanx<self.minspanx<EOL>yproblems = self.minspany is not None and spany<self.minspany<EOL>if (self.drawtype=='<STR_LIT>')  and (xproblems or  yproblems):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"     <EOL>return                 <EOL><DEDENT>if (self.drawtype=='<STR_LIT>') and (xproblems and yproblems):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"    <EOL>return                 <EOL><DEDENT>self.onselect(self.eventpress, self.eventrelease)<EOL>self.eventpress = None                <EOL>self.eventrelease = None              <EOL>return False<EOL>", "docstring": "on button release event", "id": "f17235:c11:m4"}
{"signature": "def validate_fonttype(s):", "body": "fonttypes = { '<STR_LIT>':    <NUM_LIT:3>,<EOL>'<STR_LIT>': <NUM_LIT> }<EOL>try:<EOL><INDENT>fonttype = validate_int(s)<EOL><DEDENT>except ValueError:<EOL><INDENT>if s.lower() in list(fonttypes.keys()):<EOL><INDENT>return fonttypes[s.lower()]<EOL><DEDENT>raise ValueError('<STR_LIT>' % list(fonttypes.keys()))<EOL><DEDENT>else:<EOL><INDENT>if fonttype not in list(fonttypes.values()):<EOL><INDENT>raise ValueError('<STR_LIT>' % list(fonttypes.values()))<EOL><DEDENT>return fonttype<EOL><DEDENT>", "docstring": "confirm that this is a Postscript of PDF font type that we know how to convert to", "id": "f17236:m5"}
{"signature": "def validate_int(s):", "body": "try: return int(s)<EOL>except ValueError:<EOL><INDENT>raise ValueError('<STR_LIT>' % s)<EOL><DEDENT>", "docstring": "convert s to int or raise", "id": "f17236:m4"}
{"signature": "def validate_bool_maybe_none(b):", "body": "if type(b) is str:<EOL><INDENT>b = b.lower()<EOL><DEDENT>if b=='<STR_LIT:none>': return None<EOL>if b in ('<STR_LIT:t>', '<STR_LIT:y>', '<STR_LIT:yes>', '<STR_LIT>', '<STR_LIT:true>', '<STR_LIT:1>', <NUM_LIT:1>, True): return True<EOL>elif b in ('<STR_LIT:f>', '<STR_LIT:n>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:false>', '<STR_LIT:0>', <NUM_LIT:0>, False): return False<EOL>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % b)<EOL><DEDENT>", "docstring": "Convert b to a boolean or raise", "id": "f17236:m2"}
{"signature": "def validate_stringlist(s):", "body": "if type(s) is str:<EOL><INDENT>return [ v.strip() for v in s.split('<STR_LIT:U+002C>') ]<EOL><DEDENT>else:<EOL><INDENT>assert type(s) in [list,tuple]<EOL>return [ str(v) for v in s ]<EOL><DEDENT>", "docstring": "return a list", "id": "f17236:m9"}
{"signature": "def validate_path_exists(s):", "body": "if os.path.exists(s): return s<EOL>else:<EOL><INDENT>raise RuntimeError('<STR_LIT>'%s)<EOL><DEDENT>", "docstring": "If s is a path, return s, else False", "id": "f17236:m0"}
{"signature": "def approx_real(x):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Deprecated - needs clean room implementation", "id": "f17237:m77"}
{"signature": "def vander(*args, **kwargs):", "body": "warnings.warn(\"<STR_LIT>\", DeprecationWarning)<EOL>return np.vander(*args, **kwargs)<EOL>", "docstring": "*X* = vander(*x*, *N* = *None*)\n\nThe Vandermonde matrix of vector *x*.  The *i*-th column of *X* is the\nthe *i*-th power of *x*.  *N* is the maximum power to compute; if *N* is\n*None* it defaults to len(*x*).", "id": "f17237:m21"}
{"signature": "def update_datalim_to_current(self):", "body": "if self.dataLim is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>x, y = self.asarrays()<EOL>self.dataLim.update_numerix(x, y, True)<EOL>", "docstring": "Update the *datalim* in the current data in the fifo.", "id": "f17237:c0:m5"}
{"signature": "def rank(x):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Deprecated - see :func:`numpy.rank`", "id": "f17237:m74"}
{"signature": "def __init__(self, nmax):", "body": "self._xa = np.zeros((nmax,), np.float_)<EOL>self._ya = np.zeros((nmax,), np.float_)<EOL>self._xs = np.zeros((nmax,), np.float_)<EOL>self._ys = np.zeros((nmax,), np.float_)<EOL>self._ind = <NUM_LIT:0><EOL>self._nmax = nmax<EOL>self.dataLim = None<EOL>self.callbackd = {}<EOL>", "docstring": "Buffer up to *nmax* points.", "id": "f17237:c0:m0"}
{"signature": "def window_hanning(x):", "body": "return np.hanning(len(x))*x<EOL>", "docstring": "return x times the hanning window of len(x)", "id": "f17237:m5"}
{"signature": "def l1norm(a):", "body": "return np.sum(np.absolute(a))<EOL>", "docstring": "Return the *l1* norm of *a*, flattened out.\n\nImplemented as a separate function (not a call to :func:`norm` for speed).", "id": "f17237:m59"}
{"signature": "def find(condition):", "body": "res, = np.nonzero(np.ravel(condition))<EOL>return res<EOL>", "docstring": "Return the indices where ravel(condition) is true", "id": "f17237:m28"}
{"signature": "def cohere(x, y, NFFT=<NUM_LIT>, Fs=<NUM_LIT:2>, detrend=detrend_none, window=window_hanning,<EOL>noverlap=<NUM_LIT:0>, pad_to=None, sides='<STR_LIT:default>', scale_by_freq=None):", "body": "if len(x)<<NUM_LIT:2>*NFFT:<EOL><INDENT>raise ValueError(_coh_error)<EOL><DEDENT>Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,<EOL>scale_by_freq)<EOL>Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,<EOL>scale_by_freq)<EOL>Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,<EOL>scale_by_freq)<EOL>Cxy = np.divide(np.absolute(Pxy)**<NUM_LIT:2>, Pxx*Pyy)<EOL>Cxy.shape = (len(f),)<EOL>return Cxy, f<EOL>", "docstring": "The coherence between *x* and *y*.  Coherence is the normalized\ncross spectral density:\n\n.. math::\n\n    C_{xy} = \\\\frac{|P_{xy}|^2}{P_{xx}P_{yy}}\n\n*x*, *y*\n    Array or sequence containing the data\n%(PSD)s\nThe return value is the tuple (*Cxy*, *f*), where *f* are the\nfrequencies of the coherence vector. For cohere, scaling the\nindividual densities by the sampling frequency has no effect, since\nthe factors cancel out.\n\n.. seealso::\n    :func:`psd` and :func:`csd`:\n        For information about the methods used to compute\n        :math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.", "id": "f17237:m17"}
{"signature": "def detrend_mean(x):", "body": "return x - x.mean()<EOL>", "docstring": "Return x minus the mean(x)", "id": "f17237:m10"}
{"signature": "def movavg(x,n):", "body": "w = np.empty((n,), dtype=np.float_)<EOL>w[:] = <NUM_LIT:1.0>/n<EOL>return np.convolve(x, w, mode='<STR_LIT>')<EOL>", "docstring": "Compute the len(*n*) moving average of *x*.", "id": "f17237:m45"}
{"signature": "def norm(x,y=<NUM_LIT:2>):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Deprecated - see :func:`numpy.linalg.norm`", "id": "f17237:m72"}
{"signature": "def fromfunction_kw(function, dimensions, **kwargs):", "body": "warnings.warn(\"<STR_LIT>\", DeprecationWarning)<EOL>return np.fromfunction(function, dimensions, **kwargs)<EOL>", "docstring": "Drop-in replacement for :func:`numpy.fromfunction`.\n\nAllows passing keyword arguments to the desired function.\n\nCall it as (keywords are optional)::\n\n  fromfunction_kw(MyFunction, dimensions, keywords)\n\nThe function ``MyFunction`` is responsible for handling the\ndictionary of keywords it will receive.", "id": "f17237:m70"}
{"signature": "def safe_isinf(x):", "body": "if cbook.is_string_like(x):<EOL><INDENT>return False<EOL><DEDENT>try: b = np.isinf(x)<EOL>except NotImplementedError: return False<EOL>except TypeError: return False<EOL>else: return b<EOL>", "docstring": ":func:`numpy.isinf` for arbitrary types", "id": "f17237:m79"}
{"signature": "def conv(x, y, mode=<NUM_LIT:2>):", "body": "warnings.warn(\"<STR_LIT>\", DeprecationWarning)<EOL>return np.convolve(x,y,mode)<EOL>", "docstring": "convolve x with y", "id": "f17237:m7"}
{"signature": "def dist(x,y):", "body": "d = x-y<EOL>return np.sqrt(np.dot(d,d))<EOL>", "docstring": "Return the distance between two points.", "id": "f17237:m40"}
{"signature": "def levypdf(x, gamma, alpha):", "body": "N = len(x)<EOL>if N%<NUM_LIT:2> != <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>' +'<STR_LIT>')<EOL><DEDENT>dx = x[<NUM_LIT:1>]-x[<NUM_LIT:0>]<EOL>f = <NUM_LIT:1>/(N*dx)*np.arange(-N/<NUM_LIT:2>, N/<NUM_LIT:2>, np.float_)<EOL>ind = np.concatenate([np.arange(N/<NUM_LIT:2>, N, int),<EOL>np.arange(<NUM_LIT:0>, N/<NUM_LIT:2>, int)])<EOL>df = f[<NUM_LIT:1>]-f[<NUM_LIT:0>]<EOL>cfl = exp(-gamma*np.absolute(<NUM_LIT:2>*pi*f)**alpha)<EOL>px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)<EOL>return np.take(px, ind)<EOL>", "docstring": "Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*", "id": "f17237:m27"}
{"signature": "def cohere_pairs( X, ij, NFFT=<NUM_LIT>, Fs=<NUM_LIT:2>, detrend=detrend_none,<EOL>window=window_hanning, noverlap=<NUM_LIT:0>,<EOL>preferSpeedOverMemory=True,<EOL>progressCallback=donothing_callback,<EOL>returnPxx=False):", "body": "numRows, numCols = X.shape<EOL>if numRows < NFFT:<EOL><INDENT>tmp = X<EOL>X = np.zeros( (NFFT, numCols), X.dtype)<EOL>X[:numRows,:] = tmp<EOL>del tmp<EOL><DEDENT>numRows, numCols = X.shape<EOL>seen = {}<EOL>for i,j in ij:<EOL><INDENT>seen[i]=<NUM_LIT:1>; seen[j] = <NUM_LIT:1><EOL><DEDENT>allColumns = list(seen.keys())<EOL>Ncols = len(allColumns)<EOL>del seen<EOL>if np.iscomplexobj(X): numFreqs = NFFT<EOL>else: numFreqs = NFFT//<NUM_LIT:2>+<NUM_LIT:1><EOL>if cbook.iterable(window):<EOL><INDENT>assert(len(window) == NFFT)<EOL>windowVals = window<EOL><DEDENT>else:<EOL><INDENT>windowVals = window(np.ones((NFFT,), typecode(X)))<EOL><DEDENT>ind = list(range(<NUM_LIT:0>, numRows-NFFT+<NUM_LIT:1>, NFFT-noverlap))<EOL>numSlices = len(ind)<EOL>FFTSlices = {}<EOL>FFTConjSlices = {}<EOL>Pxx = {}<EOL>slices = list(range(numSlices))<EOL>normVal = norm(windowVals)**<NUM_LIT:2><EOL>for iCol in allColumns:<EOL><INDENT>progressCallback(i/Ncols, '<STR_LIT>')<EOL>Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)<EOL>for iSlice in slices:<EOL><INDENT>thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]<EOL>thisSlice = windowVals*detrend(thisSlice)<EOL>Slices[iSlice,:] = fft(thisSlice)[:numFreqs]<EOL><DEDENT>FFTSlices[iCol] = Slices<EOL>if preferSpeedOverMemory:<EOL><INDENT>FFTConjSlices[iCol] = conjugate(Slices)<EOL><DEDENT>Pxx[iCol] = np.divide(np.mean(absolute(Slices)**<NUM_LIT:2>), normVal)<EOL><DEDENT>del Slices, ind, windowVals<EOL>Cxy = {}<EOL>Phase = {}<EOL>count = <NUM_LIT:0><EOL>N = len(ij)<EOL>for i,j in ij:<EOL><INDENT>count +=<NUM_LIT:1><EOL>if count%<NUM_LIT:10>==<NUM_LIT:0>:<EOL><INDENT>progressCallback(count/N, '<STR_LIT>')<EOL><DEDENT>if preferSpeedOverMemory:<EOL><INDENT>Pxy = FFTSlices[i] * FFTConjSlices[j]<EOL><DEDENT>else:<EOL><INDENT>Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])<EOL><DEDENT>if numSlices><NUM_LIT:1>: Pxy = np.mean(Pxy)<EOL>Pxy = np.divide(Pxy, normVal)<EOL>Cxy[(i,j)] = np.divide(np.absolute(Pxy)**<NUM_LIT:2>, Pxx[i]*Pxx[j])<EOL>Phase[(i,j)] =  np.arctan2(Pxy.imag, Pxy.real)<EOL><DEDENT>freqs = Fs/NFFT*np.arange(numFreqs)<EOL>if returnPxx:<EOL><INDENT>return Cxy, Phase, freqs, Pxx<EOL><DEDENT>else:<EOL><INDENT>return Cxy, Phase, freqs<EOL><DEDENT>", "docstring": "Cxy, Phase, freqs = cohere_pairs(X, ij, ...)\n\nCompute the coherence for all pairs in *ij*.  *X* is a\n(*numSamples*, *numCols*) numpy array.  *ij* is a list of tuples\n(*i*, *j*).  Each tuple is a pair of indexes into the columns of *X*\nfor which you want to compute coherence.  For example, if *X* has 64\ncolumns, and you want to compute all nonredundant pairs, define *ij*\nas::\n\n  ij = []\n  for i in range(64):\n      for j in range(i+1,64):\n          ij.append( (i, j) )\n\nThe other function arguments, except for *preferSpeedOverMemory*\n(see below), are explained in the help string of :func:`psd`.\n\nReturn value is a tuple (*Cxy*, *Phase*, *freqs*).\n\n  - *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that\n    pair.  I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``.  Number of\n    dictionary keys is ``len(ij)``.\n\n  - *Phase*: a dictionary of phases of the cross spectral density at\n    each frequency for each pair.  The keys are ``(i,j)``.\n\n  - *freqs*: a vector of frequencies, equal in length to either\n    the coherence or phase vectors for any (*i*, *j*) key..  Eg,\n    to make a coherence Bode plot::\n\n      subplot(211)\n      plot( freqs, Cxy[(12,19)])\n      subplot(212)\n      plot( freqs, Phase[(12,19)])\n\nFor a large number of pairs, :func:`cohere_pairs` can be much more\nefficient than just calling :func:`cohere` for each pair, because\nit caches most of the intensive computations.  If *N* is the\nnumber of pairs, this function is O(N) for most of the heavy\nlifting, whereas calling cohere for each pair is\nO(N\\N{SUPERSCRIPT TWO}).  However, because of the caching, it is\nalso more memory intensive, making 2 additional complex arrays\nwith approximately the same number of elements as *X*.\n\nThe parameter *preferSpeedOverMemory*, if *False*, limits the\ncaching by only making one, rather than two, complex cache arrays.\nThis is useful if memory becomes critical.  Even when\n*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will\nstill give significant performace gains over calling\n:func:`cohere` for each pair, and will use subtantially less\nmemory than if *preferSpeedOverMemory* is *True*.  In my tests\nwith a (43000, 64) array over all non-redundant pairs,\n*preferSpeedOverMemory* = *True* delivered a 33% performace boost\non a 1.7GHZ Athlon with 512MB RAM compared with\n*preferSpeedOverMemory* = *False*.  But both solutions were more\nthan 10x faster than naievly crunching all possible pairs through\ncohere.\n\n.. seealso::\n    :file:`test/cohere_pairs_test.py` in the src tree:\n        For an example script that shows that this\n        :func:`cohere_pairs` and :func:`cohere` give the same\n        results for a given pair.", "id": "f17237:m23"}
{"signature": "def rec_drop_fields(rec, names):", "body": "names = set(names)<EOL>Nr = len(rec)<EOL>newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names<EOL>if name not in names])<EOL>newrec = np.empty(Nr, dtype=newdtype)<EOL>for field in newdtype.names:<EOL><INDENT>newrec[field] = rec[field]<EOL><DEDENT>return rec_view(newrec)<EOL>", "docstring": "Return a new numpy record array with fields in *names* dropped.", "id": "f17237:m83"}
{"signature": "def binary_repr(number, max_length = <NUM_LIT>):", "body": "<EOL>shifts = list(map (operator.rshift, max_length * [number],list(range(max_length - <NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>))))<EOL>digits = list(map (operator.mod, shifts, max_length * [<NUM_LIT:2>]))<EOL>if not digits.count (<NUM_LIT:1>): return <NUM_LIT:0><EOL>digits = digits [digits.index (<NUM_LIT:1>):]<EOL>return '<STR_LIT>'.join (map (repr, digits)).replace('<STR_LIT:L>','<STR_LIT>')<EOL>", "docstring": "Return the binary representation of the input *number* as a\nstring.\n\nThis is more efficient than using :func:`base_repr` with base 2.\n\nIncrease the value of max_length for very large numbers. Note that\non 32-bit machines, 2**1023 is the largest integer power of 2\nwhich can be converted to a Python float.", "id": "f17237:m66"}
{"signature": "def longest_ones(x):", "body": "return longest_contiguous_ones(x)<EOL>", "docstring": "alias for longest_contiguous_ones", "id": "f17237:m31"}
{"signature": "def rk4(derivs, y0, t):", "body": "try: Ny = len(y0)<EOL>except TypeError:<EOL><INDENT>yout = np.zeros( (len(t),), np.float_)<EOL><DEDENT>else:<EOL><INDENT>yout = np.zeros( (len(t), Ny), np.float_)<EOL><DEDENT>yout[<NUM_LIT:0>] = y0<EOL>i = <NUM_LIT:0><EOL>for i in np.arange(len(t)-<NUM_LIT:1>):<EOL><INDENT>thist = t[i]<EOL>dt = t[i+<NUM_LIT:1>] - thist<EOL>dt2 = dt/<NUM_LIT><EOL>y0 = yout[i]<EOL>k1 = np.asarray(derivs(y0, thist))<EOL>k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))<EOL>k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))<EOL>k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))<EOL>yout[i+<NUM_LIT:1>] = y0 + dt/<NUM_LIT>*(k1 + <NUM_LIT:2>*k2 + <NUM_LIT:2>*k3 + k4)<EOL><DEDENT>return yout<EOL>", "docstring": "Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.\nThis is a toy implementation which may be useful if you find\nyourself stranded on a system w/o scipy.  Otherwise use\n:func:`scipy.integrate`.\n\n*y0*\n    initial state vector\n\n*t*\n    sample times\n\n*derivs*\n    returns the derivative of the system and has the\n    signature ``dy = derivs(yi, ti)``\n\n\nExample 1 ::\n\n    ## 2D system\n\n    def derivs6(x,t):\n        d1 =  x[0] + 2*x[1]\n        d2 =  -3*x[0] + 4*x[1]\n        return (d1, d2)\n    dt = 0.0005\n    t = arange(0.0, 2.0, dt)\n    y0 = (1,2)\n    yout = rk4(derivs6, y0, t)\n\nExample 2::\n\n    ## 1D system\n    alpha = 2\n    def derivs(x,t):\n        return -alpha*x + exp(-t)\n\n    y0 = 1\n    yout = rk4(derivs, y0, t)\n\n\nIf you have access to scipy, you should probably be using the\nscipy.integrate tools rather than this function.", "id": "f17237:m36"}
{"signature": "def stineman_interp(xi,x,y,yp=None):", "body": "<EOL>x=np.asarray(x, np.float_)<EOL>y=np.asarray(y, np.float_)<EOL>assert x.shape == y.shape<EOL>N=len(y)<EOL>if yp is None:<EOL><INDENT>yp = slopes(x,y)<EOL><DEDENT>else:<EOL><INDENT>yp=np.asarray(yp, np.float_)<EOL><DEDENT>xi=np.asarray(xi, np.float_)<EOL>yi=np.zeros(xi.shape, np.float_)<EOL>dx = x[<NUM_LIT:1>:] - x[:-<NUM_LIT:1>]<EOL>dy = y[<NUM_LIT:1>:] - y[:-<NUM_LIT:1>]<EOL>s = dy/dx  <EOL>idx = np.searchsorted(x[<NUM_LIT:1>:-<NUM_LIT:1>], xi)<EOL>sidx = s.take(idx)<EOL>xidx = x.take(idx)<EOL>yidx = y.take(idx)<EOL>xidxp1 = x.take(idx+<NUM_LIT:1>)<EOL>yo = yidx + sidx * (xi - xidx)<EOL>dy1 = (yp.take(idx)- sidx) * (xi - xidx)       <EOL>dy2 = (yp.take(idx+<NUM_LIT:1>)-sidx) * (xi - xidxp1) <EOL>dy1dy2 = dy1*dy2<EOL>yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+<NUM_LIT:1>,<EOL>((<NUM_LIT:2>*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),<EOL><NUM_LIT:0.0>,<EOL><NUM_LIT:1>/(dy1+dy2),))<EOL>return yi<EOL>", "docstring": "Given data vectors *x* and *y*, the slope vector *yp* and a new\nabscissa vector *xi*, the function :func:`stineman_interp` uses\nStineman interpolation to calculate a vector *yi* corresponding to\n*xi*.\n\nHere's an example that generates a coarse sine curve, then\ninterpolates over a finer abscissa::\n\n  x = linspace(0,2*pi,20);  y = sin(x); yp = cos(x)\n  xi = linspace(0,2*pi,40);\n  yi = stineman_interp(xi,x,y,yp);\n  plot(x,y,'o',xi,yi)\n\nThe interpolation method is described in the article A\nCONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell\nW. Stineman. The article appeared in the July 1980 issue of\nCreative Computing with a note from the editor stating that while\nthey were:\n\n  not an academic journal but once in a while something serious\n  and original comes in adding that this was\n  \"apparently a real solution\" to a well known problem.\n\nFor *yp* = *None*, the routine automatically determines the slopes\nusing the :func:`slopes` routine.\n\n*x* is assumed to be sorted in increasing order.\n\nFor values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine\ntries an extrapolation.  The relevance of the data obtained from\nthis, of course, is questionable...\n\nOriginal implementation by Halldor Bjornsson, Icelandic\nMeteorolocial Office, March 2006 halldor at vedur.is\n\nCompletely reworked and optimized for Python by Norbert Nemec,\nInstitute of Theoretical Physics, University or Regensburg, April\n2006 Norbert.Nemec at physik.uni-regensburg.de", "id": "f17237:m95"}
{"signature": "def distances_along_curve( X ):", "body": "X = np.diff( X, axis=<NUM_LIT:0> )<EOL>return vector_lengths(X,axis=<NUM_LIT:1>)<EOL>", "docstring": "Computes the distance between a set of successive points in *N* dimensions.\n\nWhere *X* is an *M* x *N* array or matrix.  The distances between\nsuccessive rows is computed.  Distance is the standard Euclidean\ndistance.", "id": "f17237:m102"}
{"signature": "def diagonal_matrix(diag):", "body": "warnings.warn(\"<STR_LIT>\", DeprecationWarning)<EOL>return np.diag(diag)<EOL>", "docstring": "Return square diagonal matrix whose non-zero elements are given by the\ninput array.", "id": "f17237:m63"}
{"signature": "def orth(A):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Deprecated - needs clean room implementation", "id": "f17237:m73"}
{"signature": "def rec_append_fields(rec, names, arrs, dtypes=None):", "body": "if (not cbook.is_string_like(names) and cbook.iterable(names)and len(names) and cbook.is_string_like(names[<NUM_LIT:0>])):<EOL><INDENT>if len(names) != len(arrs):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>else: <EOL><INDENT>names = [names]<EOL>arrs = [arrs]<EOL><DEDENT>arrs = list(map(np.asarray, arrs))<EOL>if dtypes is None:<EOL><INDENT>dtypes = [a.dtype for a in arrs]<EOL><DEDENT>elif not cbook.iterable(dtypes):<EOL><INDENT>dtypes = [dtypes]<EOL><DEDENT>if len(arrs) != len(dtypes):<EOL><INDENT>if len(dtypes) == <NUM_LIT:1>:<EOL><INDENT>dtypes = dtypes * len(arrs)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>newdtype = np.dtype(rec.dtype.descr + list(zip(names, dtypes)))<EOL>newrec = np.empty(rec.shape, dtype=newdtype)<EOL>for field in rec.dtype.fields:<EOL><INDENT>newrec[field] = rec[field]<EOL><DEDENT>for name, arr in zip(names, arrs):<EOL><INDENT>newrec[name] = arr<EOL><DEDENT>return rec_view(newrec)<EOL>", "docstring": "Return a new record array with field names populated with data\nfrom arrays in *arrs*.  If appending a single field, then *names*,\n*arrs* and *dtypes* do not have to be lists. They can just be the\nvalues themselves.", "id": "f17237:m82"}
{"signature": "def asarrays(self):", "body": "if self._ind<self._nmax:<EOL><INDENT>return self._xs[:self._ind], self._ys[:self._ind]<EOL><DEDENT>ind = self._ind % self._nmax<EOL>self._xa[:self._nmax-ind] = self._xs[ind:]<EOL>self._xa[self._nmax-ind:] = self._xs[:ind]<EOL>self._ya[:self._nmax-ind] = self._ys[ind:]<EOL>self._ya[self._nmax-ind:] = self._ys[:ind]<EOL>return self._xa, self._ya<EOL>", "docstring": "Return *x* and *y* as arrays; their length will be the len of\ndata added or *nmax*.", "id": "f17237:c0:m4"}
{"signature": "def dist_point_to_segment(p, s0, s1):", "body": "p = np.asarray(p, np.float_)<EOL>s0 = np.asarray(s0, np.float_)<EOL>s1 = np.asarray(s1, np.float_)<EOL>v = s1 - s0<EOL>w = p - s0<EOL>c1 = np.dot(w,v);<EOL>if ( c1 <= <NUM_LIT:0> ):<EOL><INDENT>return dist(p, s0);<EOL><DEDENT>c2 = np.dot(v,v)<EOL>if ( c2 <= c1 ):<EOL><INDENT>return dist(p, s1);<EOL><DEDENT>b = c1 / c2<EOL>pb = s0 + b * v;<EOL>return dist(p, pb)<EOL>", "docstring": "Get the distance of a point to a segment.\n\n  *p*, *s0*, *s1* are *xy* sequences\n\nThis algorithm from\nhttp://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment", "id": "f17237:m41"}
{"signature": "def segments_intersect(s1, s2):", "body": "(x1, y1), (x2, y2) = s1<EOL>(x3, y3), (x4, y4) = s2<EOL>den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))<EOL>n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))<EOL>n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))<EOL>if den == <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>u1 = n1/den<EOL>u2 = n2/den<EOL>return <NUM_LIT:0.0> <= u1 <= <NUM_LIT:1.0> and <NUM_LIT:0.0> <= u2 <= <NUM_LIT:1.0><EOL>", "docstring": "Return *True* if *s1* and *s2* intersect.\n*s1* and *s2* are defined as::\n\n  s1: (x1, y1), (x2, y2)\n  s2: (x3, y3), (x4, y4)", "id": "f17237:m42"}
{"signature": "def contiguous_regions(mask):", "body": "in_region = None<EOL>boundaries = []<EOL>for i, val in enumerate(mask):<EOL><INDENT>if in_region is None and val:<EOL><INDENT>in_region = i<EOL><DEDENT>elif in_region is not None and not val:<EOL><INDENT>boundaries.append((in_region, i))<EOL>in_region = None<EOL><DEDENT><DEDENT>if in_region is not None:<EOL><INDENT>boundaries.append((in_region, i+<NUM_LIT:1>))<EOL><DEDENT>return boundaries<EOL>", "docstring": "return a list of (ind0, ind1) such that mask[ind0:ind1].all() is\nTrue and we cover all such regions\n\nTODO: this is a pure python implementation which probably has a much faster numpy impl", "id": "f17237:m100"}
{"signature": "def rms_flat(a):", "body": "return np.sqrt(np.mean(np.absolute(a)**<NUM_LIT:2>))<EOL>", "docstring": "Return the root mean square of all the elements of *a*, flattened out.", "id": "f17237:m58"}
{"signature": "def last(self):", "body": "if self._ind==<NUM_LIT:0>: return None, None<EOL>ind = (self._ind-<NUM_LIT:1>) % self._nmax<EOL>return self._xs[ind], self._ys[ind]<EOL>", "docstring": "Get the last *x*, *y* or *None*.  *None* if no data set.", "id": "f17237:c0:m3"}
{"signature": "def rec2csv(r, fname, delimiter='<STR_LIT:U+002C>', formatd=None, missing='<STR_LIT>',<EOL>missingd=None):", "body": "if missingd is None:<EOL><INDENT>missingd = dict()<EOL><DEDENT>def with_mask(func):<EOL><INDENT>def newfunc(val, mask, mval):<EOL><INDENT>if mask:<EOL><INDENT>return mval<EOL><DEDENT>else:<EOL><INDENT>return func(val)<EOL><DEDENT><DEDENT>return newfunc<EOL><DEDENT>formatd = get_formatd(r, formatd)<EOL>funcs = []<EOL>for i, name in enumerate(r.dtype.names):<EOL><INDENT>funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))<EOL><DEDENT>fh, opened = cbook.to_filehandle(fname, '<STR_LIT:w>', return_opened=True)<EOL>writer = csv.writer(fh, delimiter=delimiter)<EOL>header = r.dtype.names<EOL>writer.writerow(header)<EOL>mvals = []<EOL>for name in header:<EOL><INDENT>mvals.append(missingd.get(name, missing))<EOL><DEDENT>ismasked = False<EOL>if len(r):<EOL><INDENT>row = r[<NUM_LIT:0>]<EOL>ismasked = hasattr(row, '<STR_LIT>')<EOL><DEDENT>for row in r:<EOL><INDENT>if ismasked:<EOL><INDENT>row, rowmask = row.item(), row._fieldmask.item()<EOL><DEDENT>else:<EOL><INDENT>rowmask = [False] * len(row)<EOL><DEDENT>writer.writerow([func(val, mask, mval) for func, val, mask, mval<EOL>in zip(funcs, row, rowmask, mvals)])<EOL><DEDENT>if opened:<EOL><INDENT>fh.close()<EOL><DEDENT>", "docstring": "Save the data from numpy recarray *r* into a\ncomma-/space-/tab-delimited file.  The record array dtype names\nwill be used for column headers.\n\n*fname*: can be a filename or a file handle.  Support for gzipped\n  files is automatic, if the filename ends in '.gz'\n\n.. seealso::\n    :func:`csv2rec`:\n        For information about *missing* and *missingd*, which can\n        be used to fill in masked values into your CSV file.", "id": "f17237:m91"}
{"signature": "def stineman_interp(xi,x,y,yp=None):", "body": "<EOL>x=np.asarray(x, np.float_)<EOL>y=np.asarray(y, np.float_)<EOL>assert x.shape == y.shape<EOL>N=len(y)<EOL>if yp is None:<EOL><INDENT>yp = slopes(x,y)<EOL><DEDENT>else:<EOL><INDENT>yp=np.asarray(yp, np.float_)<EOL><DEDENT>xi=np.asarray(xi, np.float_)<EOL>yi=np.zeros(xi.shape, np.float_)<EOL>dx = x[<NUM_LIT:1>:] - x[:-<NUM_LIT:1>]<EOL>dy = y[<NUM_LIT:1>:] - y[:-<NUM_LIT:1>]<EOL>s = dy/dx  <EOL>idx = np.searchsorted(x[<NUM_LIT:1>:-<NUM_LIT:1>], xi)<EOL>sidx = s.take(idx)<EOL>xidx = x.take(idx)<EOL>yidx = y.take(idx)<EOL>xidxp1 = x.take(idx+<NUM_LIT:1>)<EOL>yo = yidx + sidx * (xi - xidx)<EOL>dy1 = (yp.take(idx)- sidx) * (xi - xidx)       <EOL>dy2 = (yp.take(idx+<NUM_LIT:1>)-sidx) * (xi - xidxp1) <EOL>dy1dy2 = dy1*dy2<EOL>yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+<NUM_LIT:1>,<EOL>((<NUM_LIT:2>*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),<EOL><NUM_LIT:0.0>,<EOL><NUM_LIT:1>/(dy1+dy2),))<EOL>return yi<EOL>", "docstring": "STINEMAN_INTERP Well behaved data interpolation.  Given data\nvectors X and Y, the slope vector YP and a new abscissa vector XI\nthe function stineman_interp(xi,x,y,yp) uses Stineman\ninterpolation to calculate a vector YI corresponding to XI.\n\nHere's an example that generates a coarse sine curve, then\ninterpolates over a finer abscissa:\n\n  x = linspace(0,2*pi,20);  y = sin(x); yp = cos(x)\n  xi = linspace(0,2*pi,40);\n  yi = stineman_interp(xi,x,y,yp);\n  plot(x,y,'o',xi,yi)\n\nThe interpolation method is described in the article A\nCONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell\nW. Stineman. The article appeared in the July 1980 issue of\nCreative Computing with a note from the editor stating that while\nthey were\n\n  not an academic journal but once in a while something serious\n  and original comes in adding that this was\n  \"apparently a real solution\" to a well known problem.\n\nFor yp=None, the routine automatically determines the slopes using\nthe \"slopes\" routine.\n\nX is assumed to be sorted in increasing order\n\nFor values xi[j] < x[0] or xi[j] > x[-1], the routine tries a\nextrapolation.  The relevance of the data obtained from this, of\ncourse, questionable...\n\noriginal implementation by Halldor Bjornsson, Icelandic\nMeteorolocial Office, March 2006 halldor at vedur.is\n\ncompletely reworked and optimized for Python by Norbert Nemec,\nInstitute of Theoretical Physics, University or Regensburg, April\n2006 Norbert.Nemec at physik.uni-regensburg.de", "id": "f17237:m49"}
{"signature": "def isvector(X):", "body": "return np.prod(X.shape)==np.max(X.shape)<EOL>", "docstring": "Like the Matlab (TM) function with the same name, returns *True*\nif the supplied numpy array or matrix *X* looks like a vector,\nmeaning it has a one non-singleton axis (i.e., it can have\nmultiple axes, but all must have length 1, except for one of\nthem).\n\nIf you just want to see if the array has 1 axis, use X.ndim == 1.", "id": "f17237:m69"}
{"signature": "def amap(fn,*args):", "body": "return np.array(list(map(fn,*args)))<EOL>", "docstring": "amap(function, sequence[, sequence, ...]) -> array.\n\nWorks like :func:`map`, but it returns an array.  This is just a\nconvenient shorthand for ``numpy.array(map(...))``.", "id": "f17237:m54"}
{"signature": "def log2(x,ln2 = math.log(<NUM_LIT>)):", "body": "try:<EOL><INDENT>bin_n = binary_repr(x)[<NUM_LIT:1>:]<EOL><DEDENT>except (AssertionError,TypeError):<EOL><INDENT>return math.log(x)/ln2<EOL><DEDENT>else:<EOL><INDENT>if '<STR_LIT:1>' in bin_n:<EOL><INDENT>return math.log(x)/ln2<EOL><DEDENT>else:<EOL><INDENT>return len(bin_n)<EOL><DEDENT><DEDENT>", "docstring": "Return the log(*x*) in base 2.\n\nThis is a _slow_ function but which is guaranteed to return the correct\ninteger value if the input is an integer exact power of 2.", "id": "f17237:m67"}
{"signature": "def sqrtm(x):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Deprecated - needs clean room implementation", "id": "f17237:m75"}
{"signature": "def safe_isnan(x):", "body": "if cbook.is_string_like(x):<EOL><INDENT>return False<EOL><DEDENT>try: b = np.isnan(x)<EOL>except NotImplementedError: return False<EOL>except TypeError: return False<EOL>else: return b<EOL>", "docstring": ":func:`numpy.isnan` for arbitrary types", "id": "f17237:m78"}
{"signature": "def longest_contiguous_ones(x):", "body": "x = np.ravel(x)<EOL>if len(x)==<NUM_LIT:0>:<EOL><INDENT>return np.array([])<EOL><DEDENT>ind = (x==<NUM_LIT:0>).nonzero()[<NUM_LIT:0>]<EOL>if len(ind)==<NUM_LIT:0>:<EOL><INDENT>return np.arange(len(x))<EOL><DEDENT>if len(ind)==len(x):<EOL><INDENT>return np.array([])<EOL><DEDENT>y = np.zeros( (len(x)+<NUM_LIT:2>,), x.dtype)<EOL>y[<NUM_LIT:1>:-<NUM_LIT:1>] = x<EOL>dif = np.diff(y)<EOL>up = (dif ==  <NUM_LIT:1>).nonzero()[<NUM_LIT:0>];<EOL>dn = (dif == -<NUM_LIT:1>).nonzero()[<NUM_LIT:0>];<EOL>i = (dn-up == max(dn - up)).nonzero()[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>ind = np.arange(up[i], dn[i])<EOL>return ind<EOL>", "docstring": "Return the indices of the longest stretch of contiguous ones in *x*,\nassuming *x* is a vector of zeros and ones.  If there are two\nequally long stretches, pick the first.", "id": "f17237:m30"}
{"signature": "def prepca(P, frac=<NUM_LIT:0>):", "body": "U,s,v = np.linalg.svd(P)<EOL>varEach = s**<NUM_LIT:2>/P.shape[<NUM_LIT:1>]<EOL>totVar = varEach.sum()<EOL>fracVar = varEach/totVar<EOL>ind = slice((fracVar>=frac).sum())<EOL>Trans = U[:,ind].transpose()<EOL>Pcomponents = np.dot(Trans,P)<EOL>return Pcomponents, Trans, fracVar[ind]<EOL>", "docstring": "Compute the principal components of *P*.  *P* is a (*numVars*,\n*numObs*) array.  *frac* is the minimum fraction of variance that a\ncomponent must contain to be included.\n\nReturn value is a tuple of the form (*Pcomponents*, *Trans*,\n*fracVar*) where:\n\n  - *Pcomponents* : a (numVars, numObs) array\n\n  - *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *\n     *P*\n\n  - *fracVar* : the fraction of the variance accounted for by each\n     component returned\n\nA similar function of the same name was in the Matlab (TM)\nR13 Neural Network Toolbox but is not found in later versions;\nits successor seems to be called \"processpcs\".", "id": "f17237:m32"}
{"signature": "def poly_below(ymin, xs, ys):", "body": "return poly_between(xs, ys, xmin)<EOL>", "docstring": "given a arrays *xs* and *ys*, return the vertices of a polygon\nthat has a scalar lower bound *ymin* and an upper bound at the *ys*.\n\nintended for use with Axes.fill, eg::\n\n  xv, yv = poly_below(0, x, y)\n  ax.fill(xv, yv)", "id": "f17237:m51"}
{"signature": "def center_matrix(M, dim=<NUM_LIT:0>):", "body": "M = np.asarray(M, np.float_)<EOL>if dim:<EOL><INDENT>M = (M - M.mean(axis=<NUM_LIT:0>)) / M.std(axis=<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>M = (M - M.mean(axis=<NUM_LIT:1>)[:,np.newaxis])<EOL>M = M / M.std(axis=<NUM_LIT:1>)[:,np.newaxis]<EOL><DEDENT>return M<EOL>", "docstring": "Return the matrix *M* with each row having zero mean and unit std.\n\nIf *dim* = 1 operate on columns instead of rows.  (*dim* is\nopposite to the numpy axis kwarg.)", "id": "f17237:m35"}
{"signature": "def semilogx(self, *args, **kwargs):", "body": "if not self._hold: self.cla()<EOL>d = {'<STR_LIT>': kwargs.pop( '<STR_LIT>', <NUM_LIT:10>),<EOL>'<STR_LIT>': kwargs.pop( '<STR_LIT>', None),<EOL>}<EOL>self.set_xscale('<STR_LIT>', **d)<EOL>b =  self._hold<EOL>self._hold = True <EOL>l = self.plot(*args, **kwargs)<EOL>self._hold = b    <EOL>return l<EOL>", "docstring": "call signature::\n\n  semilogx(*args, **kwargs)\n\nMake a plot with log scaling on the *x* axis.\n\n:func:`semilogx` supports all the keyword arguments of\n:func:`~matplotlib.pyplot.plot` and\n:meth:`matplotlib.axes.Axes.set_xscale`.\n\nNotable keyword arguments:\n\n  *basex*: scalar > 1\n    base of the *x* logarithm\n\n  *subsx*: [ None | sequence ]\n    The location of the minor xticks; *None* defaults to\n    autosubs, which depend on the number of decades in the\n    plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for\n    details.\n\nThe remaining valid kwargs are\n:class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n.. seealso::\n    :meth:`loglog`:\n        For example code and figure", "id": "f17238:c1:m141"}
{"signature": "def yaxis_date(self, tz=None):", "body": "ymin, ymax = self.dataLim.intervaly<EOL>if ymin==<NUM_LIT:0.>:<EOL><INDENT>dmax = today = datetime.date.today()<EOL>dmin = today-datetime.timedelta(days=<NUM_LIT:10>)<EOL>self._process_unit_info(ydata=(dmin, dmax))<EOL>dmin, dmax = self.convert_yunits([dmin, dmax])<EOL>self.viewLim.intervaly = dmin, dmax<EOL>self.dataLim.intervaly = dmin, dmax<EOL><DEDENT>locator = self.yaxis.get_major_locator()<EOL>if not isinstance(locator, mdates.DateLocator):<EOL><INDENT>locator = mdates.AutoDateLocator(tz)<EOL>self.yaxis.set_major_locator(locator)<EOL><DEDENT>if self.viewLim.intervaly[<NUM_LIT:0>]==<NUM_LIT:0.>:<EOL><INDENT>self.viewLim.intervaly = tuple(self.dataLim.intervaly)<EOL><DEDENT>locator.refresh()<EOL>formatter = self.xaxis.get_major_formatter()<EOL>if not isinstance(formatter, mdates.DateFormatter):<EOL><INDENT>formatter = mdates.AutoDateFormatter(locator, tz)<EOL>self.yaxis.set_major_formatter(formatter)<EOL><DEDENT>", "docstring": "Sets up y-axis ticks and labels that treat the y data as dates.\n\n        *tz* is the time zone to use in labeling dates.  Defaults to rc value.", "id": "f17238:c1:m104"}
{"signature": "def relim(self):", "body": "<EOL>self.dataLim.ignore(True)<EOL>self.ignore_existing_data_limits = True<EOL>for line in self.lines:<EOL><INDENT>self._update_line_limits(line)<EOL><DEDENT>for p in self.patches:<EOL><INDENT>self._update_patch_limits(p)<EOL><DEDENT>", "docstring": "recompute the data limits based on current artists", "id": "f17238:c1:m51"}
{"signature": "def get_legend(self):", "body": "return self.legend_<EOL>", "docstring": "Return the legend.Legend instance, or None if no legend is defined", "id": "f17238:c1:m34"}
{"signature": "def set_xticks(self, ticks, minor=False):", "body": "return self.xaxis.set_ticks(ticks, minor=minor)<EOL>", "docstring": "Set the x ticks with list of *ticks*\n\nACCEPTS: sequence of floats", "id": "f17238:c1:m84"}
{"signature": "def pcolor(self, *args, **kwargs):", "body": "if not self._hold: self.cla()<EOL>alpha = kwargs.pop('<STR_LIT>', <NUM_LIT:1.0>)<EOL>norm = kwargs.pop('<STR_LIT>', None)<EOL>cmap = kwargs.pop('<STR_LIT>', None)<EOL>vmin = kwargs.pop('<STR_LIT>', None)<EOL>vmax = kwargs.pop('<STR_LIT>', None)<EOL>shading = kwargs.pop('<STR_LIT>', '<STR_LIT>')<EOL>X, Y, C = self._pcolorargs('<STR_LIT>', *args)<EOL>Ny, Nx = X.shape<EOL>C = ma.asarray(C)<EOL>X = ma.asarray(X)<EOL>Y = ma.asarray(Y)<EOL>mask = ma.getmaskarray(X)+ma.getmaskarray(Y)<EOL>xymask = mask[<NUM_LIT:0>:-<NUM_LIT:1>,<NUM_LIT:0>:-<NUM_LIT:1>]+mask[<NUM_LIT:1>:,<NUM_LIT:1>:]+mask[<NUM_LIT:0>:-<NUM_LIT:1>,<NUM_LIT:1>:]+mask[<NUM_LIT:1>:,<NUM_LIT:0>:-<NUM_LIT:1>]<EOL>mask = ma.getmaskarray(C)[<NUM_LIT:0>:Ny-<NUM_LIT:1>,<NUM_LIT:0>:Nx-<NUM_LIT:1>]+xymask<EOL>newaxis = np.newaxis<EOL>compress = np.compress<EOL>ravelmask = (mask==<NUM_LIT:0>).ravel()<EOL>X1 = compress(ravelmask, ma.filled(X[<NUM_LIT:0>:-<NUM_LIT:1>,<NUM_LIT:0>:-<NUM_LIT:1>]).ravel())<EOL>Y1 = compress(ravelmask, ma.filled(Y[<NUM_LIT:0>:-<NUM_LIT:1>,<NUM_LIT:0>:-<NUM_LIT:1>]).ravel())<EOL>X2 = compress(ravelmask, ma.filled(X[<NUM_LIT:1>:,<NUM_LIT:0>:-<NUM_LIT:1>]).ravel())<EOL>Y2 = compress(ravelmask, ma.filled(Y[<NUM_LIT:1>:,<NUM_LIT:0>:-<NUM_LIT:1>]).ravel())<EOL>X3 = compress(ravelmask, ma.filled(X[<NUM_LIT:1>:,<NUM_LIT:1>:]).ravel())<EOL>Y3 = compress(ravelmask, ma.filled(Y[<NUM_LIT:1>:,<NUM_LIT:1>:]).ravel())<EOL>X4 = compress(ravelmask, ma.filled(X[<NUM_LIT:0>:-<NUM_LIT:1>,<NUM_LIT:1>:]).ravel())<EOL>Y4 = compress(ravelmask, ma.filled(Y[<NUM_LIT:0>:-<NUM_LIT:1>,<NUM_LIT:1>:]).ravel())<EOL>npoly = len(X1)<EOL>xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],<EOL>X2[:,newaxis], Y2[:,newaxis],<EOL>X3[:,newaxis], Y3[:,newaxis],<EOL>X4[:,newaxis], Y4[:,newaxis],<EOL>X1[:,newaxis], Y1[:,newaxis]),<EOL>axis=<NUM_LIT:1>)<EOL>verts = xy.reshape((npoly, <NUM_LIT:5>, <NUM_LIT:2>))<EOL>C = compress(ravelmask, ma.filled(C[<NUM_LIT:0>:Ny-<NUM_LIT:1>,<NUM_LIT:0>:Nx-<NUM_LIT:1>]).ravel())<EOL>if shading == '<STR_LIT>':<EOL><INDENT>edgecolors = (<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:0>,<NUM_LIT:1>),<EOL>linewidths = (<NUM_LIT>,)<EOL><DEDENT>else:<EOL><INDENT>edgecolors = '<STR_LIT>'<EOL>linewidths = (<NUM_LIT:1.0>,)<EOL><DEDENT>kwargs.setdefault('<STR_LIT>', edgecolors)<EOL>kwargs.setdefault('<STR_LIT>', (<NUM_LIT:0>,))<EOL>kwargs.setdefault('<STR_LIT>', linewidths)<EOL>collection = mcoll.PolyCollection(verts, **kwargs)<EOL>collection.set_alpha(alpha)<EOL>collection.set_array(C)<EOL>if norm is not None: assert(isinstance(norm, mcolors.Normalize))<EOL>if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))<EOL>collection.set_cmap(cmap)<EOL>collection.set_norm(norm)<EOL>if vmin is not None or vmax is not None:<EOL><INDENT>collection.set_clim(vmin, vmax)<EOL><DEDENT>else:<EOL><INDENT>collection.autoscale_None()<EOL><DEDENT>self.grid(False)<EOL>x = X.compressed()<EOL>y = Y.compressed()<EOL>minx = np.amin(x)<EOL>maxx = np.amax(x)<EOL>miny = np.amin(y)<EOL>maxy = np.amax(y)<EOL>corners = (minx, miny), (maxx, maxy)<EOL>self.update_datalim( corners)<EOL>self.autoscale_view()<EOL>self.add_collection(collection)<EOL>return collection<EOL>", "docstring": "call signatures::\n\n  pcolor(C, **kwargs)\n  pcolor(X, Y, C, **kwargs)\n\nCreate a pseudocolor plot of a 2-D array.\n\n*C* is the array of color values.\n\n*X* and *Y*, if given, specify the (*x*, *y*) coordinates of\nthe colored quadrilaterals; the quadrilateral for C[i,j] has\ncorners at::\n\n  (X[i,   j],   Y[i,   j]),\n  (X[i,   j+1], Y[i,   j+1]),\n  (X[i+1, j],   Y[i+1, j]),\n  (X[i+1, j+1], Y[i+1, j+1]).\n\nIdeally the dimensions of *X* and *Y* should be one greater\nthan those of *C*; if the dimensions are the same, then the\nlast row and column of *C* will be ignored.\n\nNote that the the column index corresponds to the\n*x*-coordinate, and the row index corresponds to *y*; for\ndetails, see the :ref:`Grid Orientation\n<axes-pcolor-grid-orientation>` section below.\n\nIf either or both of *X* and *Y* are 1-D arrays or column vectors,\nthey will be expanded as needed into the appropriate 2-D arrays,\nmaking a rectangular grid.\n\n*X*, *Y* and *C* may be masked arrays.  If either C[i, j], or one\nof the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],\n[i, j+1],[i+1, j+1]) is masked, nothing is plotted.\n\nKeyword arguments:\n\n  *cmap*: [ None | Colormap ]\n    A :class:`matplotlib.cm.Colormap` instance. If *None*, use\n    rc settings.\n\n  norm: [ None | Normalize ]\n    An :class:`matplotlib.colors.Normalize` instance is used\n    to scale luminance data to 0,1. If *None*, defaults to\n    :func:`normalize`.\n\n  *vmin*/*vmax*: [ None | scalar ]\n    *vmin* and *vmax* are used in conjunction with *norm* to\n    normalize luminance data.  If either are *None*, the min\n    and max of the color array *C* is used.  If you pass a\n    *norm* instance, *vmin* and *vmax* will be ignored.\n\n  *shading*: [ 'flat' | 'faceted' ]\n    If 'faceted', a black grid is drawn around each rectangle; if\n    'flat', edges are not drawn. Default is 'flat', contrary to\n    Matlab(TM).\n\n    This kwarg is deprecated; please use 'edgecolors' instead:\n      * shading='flat' -- edgecolors='None'\n      * shading='faceted  -- edgecolors='k'\n\n  *edgecolors*: [ None | 'None' | color | color sequence]\n    If *None*, the rc setting is used by default.\n\n    If 'None', edges will not be visible.\n\n    An mpl color or sequence of colors will set the edge color\n\n  *alpha*: 0 <= scalar <= 1\n    the alpha blending value\n\nReturn value is a :class:`matplotlib.collection.Collection`\ninstance.\n\n.. _axes-pcolor-grid-orientation:\n\nThe grid orientation follows the Matlab(TM) convention: an\narray *C* with shape (*nrows*, *ncolumns*) is plotted with\nthe column number as *X* and the row number as *Y*, increasing\nup; hence it is plotted the way the array would be printed,\nexcept that the *Y* axis is reversed.  That is, *C* is taken\nas *C*(*y*, *x*).\n\nSimilarly for :func:`~matplotlib.pyplot.meshgrid`::\n\n  x = np.arange(5)\n  y = np.arange(3)\n  X, Y = meshgrid(x,y)\n\nis equivalent to:\n\n  X = array([[0, 1, 2, 3, 4],\n             [0, 1, 2, 3, 4],\n             [0, 1, 2, 3, 4]])\n\n  Y = array([[0, 0, 0, 0, 0],\n             [1, 1, 1, 1, 1],\n             [2, 2, 2, 2, 2]])\n\nso if you have::\n\n  C = rand( len(x), len(y))\n\nthen you need::\n\n  pcolor(X, Y, C.T)\n\nor::\n\n  pcolor(C.T)\n\nMatlab :func:`pcolor` always discards the last row and column\nof *C*, but matplotlib displays the last row and column if *X* and\n*Y* are not specified, or if *X* and *Y* have one more row and\ncolumn than *C*.\n\nkwargs can be used to control the\n:class:`~matplotlib.collection.PolyCollection` properties:\n\n%(PolyCollection)s", "id": "f17238:c1:m164"}
{"signature": "def get_xmajorticklabels(self):", "body": "return cbook.silent_list('<STR_LIT>',<EOL>self.xaxis.get_majorticklabels())<EOL>", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m85"}
{"signature": "def set_default_color_cycle(clist):", "body": "_process_plot_var_args.defaultColors = clist[:]<EOL>rcParams['<STR_LIT>'] = clist[<NUM_LIT:0>]<EOL>", "docstring": "Change the default cycle of colors that will be used by the plot\ncommand.  This must be called before creating the\n:class:`Axes` to which it will apply; it will\napply to all future axes.\n\n*clist* is a sequence of mpl color specifiers", "id": "f17238:m1"}
{"signature": "def axhspan(self, ymin, ymax, xmin=<NUM_LIT:0>, xmax=<NUM_LIT:1>, **kwargs):", "body": "trans = mtransforms.blended_transform_factory(<EOL>self.transAxes, self.transData)<EOL>self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )<EOL>xmin, xmax = self.convert_xunits( [xmin, xmax] )<EOL>ymin, ymax = self.convert_yunits( [ymin, ymax] )<EOL>verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)<EOL>p = mpatches.Polygon(verts, **kwargs)<EOL>p.set_transform(trans)<EOL>p.x_isdata = False<EOL>self.add_patch(p)<EOL>return p<EOL>", "docstring": "call signature::\n\n  axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)\n\nAxis Horizontal Span.\n\n*y* coords are in data units and *x* coords are in axes (relative\n0-1) units.\n\nDraw a horizontal span (rectangle) from *ymin* to *ymax*.\nWith the default values of *xmin* = 0 and *xmax* = 1, this\nalways spans the xrange, regardless of the xlim settings, even\nif you change them, eg. with the :meth:`set_xlim` command.\nThat is, the horizontal extent is in axes coords: 0=left,\n0.5=middle, 1.0=right but the *y* location is in data\ncoordinates.\n\nReturn value is a :class:`matplotlib.patches.Polygon`\ninstance.\n\nExamples:\n\n* draw a gray rectangle from *y* = 0.25-0.75 that spans the\n  horizontal extent of the axes\n\n    >>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)\n\nValid kwargs are :class:`~matplotlib.patches.Polygon` properties:\n\n%(Polygon)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/axhspan_demo.py", "id": "f17238:c1:m134"}
{"signature": "def vlines(self, x, ymin, ymax, colors='<STR_LIT:k>', linestyles='<STR_LIT>',<EOL>label='<STR_LIT>', **kwargs):", "body": "if kwargs.get('<STR_LIT>') is not None:<EOL><INDENT>raise DeprecationWarning('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)<EOL>x = self.convert_xunits( x )<EOL>ymin = self.convert_yunits( ymin )<EOL>ymax = self.convert_yunits( ymax )<EOL>if not iterable(x): x = [x]<EOL>if not iterable(ymin): ymin = [ymin]<EOL>if not iterable(ymax): ymax = [ymax]<EOL>x = np.asarray(x)<EOL>ymin = np.asarray(ymin)<EOL>ymax = np.asarray(ymax)<EOL>if len(ymin)==<NUM_LIT:1>:<EOL><INDENT>ymin = np.resize( ymin, x.shape )<EOL><DEDENT>if len(ymax)==<NUM_LIT:1>:<EOL><INDENT>ymax = np.resize( ymax, x.shape )<EOL><DEDENT>if len(ymin)!=len(x):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(ymax)!=len(x):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>Y = np.array([ymin, ymax]).T<EOL>verts = [ ((thisx, thisymin), (thisx, thisymax))<EOL>for thisx, (thisymin, thisymax) in zip(x,Y)]<EOL>coll = mcoll.LineCollection(verts, colors=colors,<EOL>linestyles=linestyles, label=label)<EOL>self.add_collection(coll)<EOL>coll.update(kwargs)<EOL>minx = min( x )<EOL>maxx = max( x )<EOL>miny = min( min(ymin), min(ymax) )<EOL>maxy = max( max(ymin), max(ymax) )<EOL>corners = (minx, miny), (maxx, maxy)<EOL>self.update_datalim(corners)<EOL>self.autoscale_view()<EOL>return coll<EOL>", "docstring": "call signature::\n\n  vlines(x, ymin, ymax, color='k', linestyles='solid')\n\nPlot vertical lines at each *x* from *ymin* to *ymax*.  *ymin*\nor *ymax* can be scalars or len(*x*) numpy arrays.  If they are\nscalars, then the respective values are constant, else the\nheights of the lines are determined by *ymin* and *ymax*.\n\n*colors*\n  a line collections color args, either a single color\n  or a len(*x*) list of colors\n\n*linestyles*\n\n  one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n\nReturns the :class:`matplotlib.collections.LineCollection`\nthat was added.\n\nkwargs are :class:`~matplotlib.collections.LineCollection` properties:\n\n%(LineCollection)s", "id": "f17238:c1:m137"}
{"signature": "def get_frame_on(self):", "body": "return self._frameon<EOL>", "docstring": "Get whether the axes rectangle patch is drawn", "id": "f17238:c1:m65"}
{"signature": "def can_zoom(self):", "body": "return True<EOL>", "docstring": "Return *True* if this axes support the zoom box", "id": "f17238:c1:m108"}
{"signature": "def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):", "body": "return self.yaxis.set_ticklabels(labels, fontdict,<EOL>minor=minor, **kwargs)<EOL>", "docstring": "call signature::\n\n  set_yticklabels(labels, fontdict=None, minor=False, **kwargs)\n\nSet the ytick labels with list of strings *labels*.  Return a list of\n:class:`~matplotlib.text.Text` instances.\n\n*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.\nValid properties are\n%(Text)s\n\nACCEPTS: sequence of strings", "id": "f17238:c1:m102"}
{"signature": "def table(self, **kwargs):", "body": "return mtable.table(self, **kwargs)<EOL>", "docstring": "call signature::\n\n  table(cellText=None, cellColours=None,\n        cellLoc='right', colWidths=None,\n        rowLabels=None, rowColours=None, rowLoc='left',\n        colLabels=None, colColours=None, colLoc='center',\n        loc='bottom', bbox=None):\n\nAdd a table to the current axes.  Returns a\n:class:`matplotlib.table.Table` instance.  For finer grained\ncontrol over tables, use the :class:`~matplotlib.table.Table`\nclass and add it to the axes with\n:meth:`~matplotlib.axes.Axes.add_table`.\n\nThanks to John Gill for providing the class and table.\n\nkwargs control the :class:`~matplotlib.table.Table`\nproperties:\n\n%(Table)s", "id": "f17238:c1:m170"}
{"signature": "def loglog(self, *args, **kwargs):", "body": "if not self._hold: self.cla()<EOL>dx = {'<STR_LIT>': kwargs.pop('<STR_LIT>', <NUM_LIT:10>),<EOL>'<STR_LIT>': kwargs.pop('<STR_LIT>', None),<EOL>}<EOL>dy = {'<STR_LIT>': kwargs.pop('<STR_LIT>', <NUM_LIT:10>),<EOL>'<STR_LIT>': kwargs.pop('<STR_LIT>', None),<EOL>}<EOL>self.set_xscale('<STR_LIT>', **dx)<EOL>self.set_yscale('<STR_LIT>', **dy)<EOL>b =  self._hold<EOL>self._hold = True <EOL>l = self.plot(*args, **kwargs)<EOL>self._hold = b    <EOL>return l<EOL>", "docstring": "call signature::\n\n  loglog(*args, **kwargs)\n\nMake a plot with log scaling on the *x* and *y* axis.\n\n:func:`~matplotlib.pyplot.loglog` supports all the keyword\narguments of :func:`~matplotlib.pyplot.plot` and\n:meth:`matplotlib.axes.Axes.set_xscale` /\n:meth:`matplotlib.axes.Axes.set_yscale`.\n\nNotable keyword arguments:\n\n  *basex*/*basey*: scalar > 1\n    base of the *x*/*y* logarithm\n\n  *subsx*/*subsy*: [ None | sequence ]\n    the location of the minor *x*/*y* ticks; *None* defaults\n    to autosubs, which depend on the number of decades in the\n    plot; see :meth:`matplotlib.axes.Axes.set_xscale` /\n    :meth:`matplotlib.axes.Axes.set_yscale` for details\n\nThe remaining valid kwargs are\n:class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/log_demo.py", "id": "f17238:c1:m140"}
{"signature": "def axhline(self, y=<NUM_LIT:0>, xmin=<NUM_LIT:0>, xmax=<NUM_LIT:1>, **kwargs):", "body": "ymin, ymax = self.get_ybound()<EOL>yy = self.convert_yunits( y )<EOL>scaley = (yy<ymin) or (yy>ymax)<EOL>trans = mtransforms.blended_transform_factory(<EOL>self.transAxes, self.transData)<EOL>l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)<EOL>l.x_isdata = False<EOL>self.add_line(l)<EOL>self.autoscale_view(scalex=False, scaley=scaley)<EOL>return l<EOL>", "docstring": "call signature::\n\n  axhline(y=0, xmin=0, xmax=1, **kwargs)\n\nAxis Horizontal Line\n\nDraw a horizontal line at *y* from *xmin* to *xmax*.  With the\ndefault values of *xmin* = 0 and *xmax* = 1, this line will\nalways span the horizontal extent of the axes, regardless of\nthe xlim settings, even if you change them, eg. with the\n:meth:`set_xlim` command.  That is, the horizontal extent is\nin axes coords: 0=left, 0.5=middle, 1.0=right but the *y*\nlocation is in data coordinates.\n\nReturn value is the :class:`~matplotlib.lines.Line2D`\ninstance.  kwargs are the same as kwargs to plot, and can be\nused to control the line properties.  Eg.,\n\n* draw a thick red hline at *y* = 0 that spans the xrange\n\n    >>> axhline(linewidth=4, color='r')\n\n* draw a default hline at *y* = 1 that spans the xrange\n\n    >>> axhline(y=1)\n\n* draw a default hline at *y* = .5 that spans the the middle half of\n  the xrange\n\n    >>> axhline(y=.5, xmin=0.25, xmax=0.75)\n\nValid kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n.. seealso::\n    :meth:`axhspan`:\n        for example plot and source code", "id": "f17238:c1:m132"}
{"signature": "def set_cursor_props(self, *args):", "body": "if len(args)==<NUM_LIT:1>:<EOL><INDENT>lw, c = args[<NUM_LIT:0>]<EOL><DEDENT>elif len(args)==<NUM_LIT:2>:<EOL><INDENT>lw, c = args<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>c =mcolors.colorConverter.to_rgba(c)<EOL>self._cursorProps = lw, c<EOL>", "docstring": "Set the cursor property as::\n\n  ax.set_cursor_props(linewidth, color)\n\nor::\n\n  ax.set_cursor_props((linewidth, color))\n\nACCEPTS: a (*float*, *color*) tuple", "id": "f17238:c1:m117"}
{"signature": "def semilogy(self, *args, **kwargs):", "body": "if not self._hold: self.cla()<EOL>d = {'<STR_LIT>': kwargs.pop('<STR_LIT>', <NUM_LIT:10>),<EOL>'<STR_LIT>': kwargs.pop('<STR_LIT>', None),<EOL>}<EOL>self.set_yscale('<STR_LIT>', **d)<EOL>b =  self._hold<EOL>self._hold = True <EOL>l = self.plot(*args, **kwargs)<EOL>self._hold = b    <EOL>return l<EOL>", "docstring": "call signature::\n\n  semilogy(*args, **kwargs)\n\nMake a plot with log scaling on the *y* axis.\n\n:func:`semilogy` supports all the keyword arguments of\n:func:`~matplotlib.pylab.plot` and\n:meth:`matplotlib.axes.Axes.set_yscale`.\n\nNotable keyword arguments:\n\n  *basey*: scalar > 1\n    Base of the *y* logarithm\n\n  *subsy*: [ None | sequence ]\n    The location of the minor yticks; *None* defaults to\n    autosubs, which depend on the number of decades in the\n    plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for\n    details.\n\nThe remaining valid kwargs are\n:class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\n.. seealso::\n    :meth:`loglog`:\n        For example code and figure", "id": "f17238:c1:m142"}
{"signature": "def set_axis_bgcolor(self, color):", "body": "self._axisbg = color<EOL>self.patch.set_facecolor(color)<EOL>", "docstring": "set the axes background color\n\nACCEPTS: any matplotlib color - see\n:func:`~matplotlib.pyplot.colors`", "id": "f17238:c1:m74"}
{"signature": "def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):", "body": "if xmax is None and iterable(xmin):<EOL><INDENT>xmin,xmax = xmin<EOL><DEDENT>self._process_unit_info(xdata=(xmin, xmax))<EOL>if xmin is not None:<EOL><INDENT>xmin = self.convert_xunits(xmin)<EOL><DEDENT>if xmax is not None:<EOL><INDENT>xmax = self.convert_xunits(xmax)<EOL><DEDENT>old_xmin,old_xmax = self.get_xlim()<EOL>if xmin is None: xmin = old_xmin<EOL>if xmax is None: xmax = old_xmax<EOL>xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)<EOL>xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)<EOL>self.viewLim.intervalx = (xmin, xmax)<EOL>if emit:<EOL><INDENT>self.callbacks.process('<STR_LIT>', self)<EOL>for other in self._shared_x_axes.get_siblings(self):<EOL><INDENT>if other is not self:<EOL><INDENT>other.set_xlim(self.viewLim.intervalx, emit=False)<EOL>if (other.figure != self.figure and<EOL>other.figure.canvas is not None):<EOL><INDENT>other.figure.canvas.draw_idle()<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return xmin, xmax<EOL>", "docstring": "call signature::\n\n  set_xlim(self, *args, **kwargs)\n\nSet the limits for the xaxis\n\nReturns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]\n\nExamples::\n\n  set_xlim((valmin, valmax))\n  set_xlim(valmin, valmax)\n  set_xlim(xmin=1) # xmax unchanged\n  set_xlim(xmax=1) # xmin unchanged\n\nKeyword arguments:\n\n  *ymin*: scalar\n    the min of the ylim\n  *ymax*: scalar\n    the max of the ylim\n  *emit*: [ True | False ]\n    notify observers of lim change\n\nACCEPTS: len(2) sequence of floats", "id": "f17238:c1:m80"}
{"signature": "def get_xaxis_text2_transform(self, pad_points):", "body": "return (self._xaxis_transform +<EOL>mtransforms.ScaledTranslation(<NUM_LIT:0>, pad_points / <NUM_LIT>,<EOL>self.figure.dpi_scale_trans),<EOL>\"<STR_LIT>\", \"<STR_LIT>\")<EOL>", "docstring": "Get the transformation used for drawing the secondary x-axis\nlabels, which will add the given amount of padding (in points)\nbetween the axes and the label.  The x-direction is in data\ncoordinates and the y-direction is in axis coordinates.\nReturns a 3-tuple of the form::\n\n  (transform, valign, halign)\n\nwhere *valign* and *halign* are requested alignments for the\ntext.\n\n.. note::\n    This transformation is primarily used by the\n    :class:`~matplotlib.axis.Axis` class, and is meant to be\n    overridden by new kinds of projections that may need to\n    place axis elements in different locations.", "id": "f17238:c1:m8"}
{"signature": "def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):", "body": "return self.xaxis.set_ticklabels(labels, fontdict,<EOL>minor=minor, **kwargs)<EOL>", "docstring": "call signature::\n\n  set_xticklabels(labels, fontdict=None, minor=False, **kwargs)\n\nSet the xtick labels with list of strings *labels*. Return a\nlist of axis text instances.\n\n*kwargs* set the :class:`~matplotlib.text.Text` properties.\nValid properties are\n%(Text)s\n\nACCEPTS: sequence of strings", "id": "f17238:c1:m88"}
{"signature": "def _process_plot_format(fmt):", "body": "linestyle = None<EOL>marker = None<EOL>color = None<EOL>try:<EOL><INDENT>color = mcolors.colorConverter.to_rgb(fmt)<EOL>return linestyle, marker, color     <EOL><DEDENT>except ValueError:<EOL><INDENT>pass                                <EOL><DEDENT>if fmt.find('<STR_LIT>')>=<NUM_LIT:0>:<EOL><INDENT>linestyle = '<STR_LIT>'<EOL>fmt = fmt.replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>if fmt.find('<STR_LIT>')>=<NUM_LIT:0>:<EOL><INDENT>linestyle = '<STR_LIT>'<EOL>fmt = fmt.replace('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>if fmt.find('<STR_LIT:U+0020>')>=<NUM_LIT:0>:<EOL><INDENT>linestyle = '<STR_LIT:None>'<EOL>fmt = fmt.replace('<STR_LIT:U+0020>', '<STR_LIT>')<EOL><DEDENT>chars = [c for c in fmt]<EOL>for c in chars:<EOL><INDENT>if c in mlines.lineStyles:<EOL><INDENT>if linestyle is not None:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % fmt)<EOL><DEDENT>linestyle = c<EOL><DEDENT>elif c in mlines.lineMarkers:<EOL><INDENT>if marker is not None:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % fmt)<EOL><DEDENT>marker = c<EOL><DEDENT>elif c in mcolors.colorConverter.colors:<EOL><INDENT>if color is not None:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % fmt)<EOL><DEDENT>color = c<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>' % c)<EOL><DEDENT><DEDENT>if linestyle is None and marker is None:<EOL><INDENT>linestyle = rcParams['<STR_LIT>']<EOL><DEDENT>if linestyle is None:<EOL><INDENT>linestyle = '<STR_LIT:None>'<EOL><DEDENT>if marker is None:<EOL><INDENT>marker = '<STR_LIT:None>'<EOL><DEDENT>return linestyle, marker, color<EOL>", "docstring": "Process a matlab(TM) style color/line style format string.  Return a\n(*linestyle*, *color*) tuple as a result of the processing.  Default\nvalues are ('-', 'b').  Example format strings include:\n\n* 'ko': black circles\n* '.b': blue dots\n* 'r--': red dashed lines\n\n.. seealso::\n    :func:`~matplotlib.Line2D.lineStyles` and\n    :func:`~matplotlib.pyplot.colors`:\n        for all possible styles and color format string.", "id": "f17238:m0"}
{"signature": "def get_shared_y_axes(self):", "body": "return self._shared_y_axes<EOL>", "docstring": "Return a copy of the shared axes Grouper object for y axes", "id": "f17238:c1:m174"}
{"signature": "def set_aspect(self, aspect, adjustable=None, anchor=None):", "body": "if aspect in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self._aspect = '<STR_LIT>'<EOL><DEDENT>elif aspect == '<STR_LIT>':<EOL><INDENT>self._aspect = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>self._aspect = float(aspect) <EOL><DEDENT>if adjustable is not None:<EOL><INDENT>self.set_adjustable(adjustable)<EOL><DEDENT>if anchor is not None:<EOL><INDENT>self.set_anchor(anchor)<EOL><DEDENT>", "docstring": "*aspect*\n\n  ========   ================================================\n  value      description\n  ========   ================================================\n  'auto'     automatic; fill position rectangle with data\n  'normal'   same as 'auto'; deprecated\n  'equal'    same scaling from data to plot units for x and y\n   num       a circle will be stretched such that the height\n             is num times the width. aspect=1 is the same as\n             aspect='equal'.\n  ========   ================================================\n\n*adjustable*\n\n  =========   ============================\n  value       description\n  =========   ============================\n  'box'       change physical size of axes\n  'datalim'   change xlim or ylim\n  =========   ============================\n\n*anchor*\n\n  =====   =====================\n  value   description\n  =====   =====================\n  'C'     centered\n  'SW'    lower left corner\n  'S'     middle of bottom edge\n  'SE'    lower right corner\n  etc.\n  =====   =====================", "id": "f17238:c1:m24"}
{"signature": "def set_position(self, pos, which='<STR_LIT>'):", "body": "if not isinstance(pos, mtransforms.BboxBase):<EOL><INDENT>pos = mtransforms.Bbox.from_bounds(*pos)<EOL><DEDENT>if which in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self._position.set(pos)<EOL><DEDENT>if which in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self._originalPosition.set(pos)<EOL><DEDENT>", "docstring": "Set the axes position with::\n\n  pos = [left, bottom, width, height]\n\nin relative 0,1 coords, or *pos* can be a\n:class:`~matplotlib.transforms.Bbox`\n\nThere are two position variables: one which is ultimately\nused, but which may be modified by :meth:`apply_aspect`, and a\nsecond which is the starting point for :meth:`apply_aspect`.\n\n\nOptional keyword arguments:\n  *which*\n\n    ==========   ====================\n    value        description\n    ==========   ====================\n    'active'     to change the first\n    'original'   to change the second\n    'both'       to change both\n    ==========   ====================", "id": "f17238:c1:m14"}
{"signature": "def set_color_cycle(self, clist):", "body": "self._get_lines.set_color_cycle(clist)<EOL>", "docstring": "Set the color cycle for any future plot commands on this Axes.\n\nclist is a list of mpl color specifiers.", "id": "f17238:c1:m20"}
{"signature": "def fill(self, *args, **kwargs):", "body": "if not self._hold: self.cla()<EOL>patches = []<EOL>for poly in self._get_patches_for_fill(*args, **kwargs):<EOL><INDENT>self.add_patch( poly )<EOL>patches.append( poly )<EOL><DEDENT>self.autoscale_view()<EOL>return patches<EOL>", "docstring": "call signature::\n\n  fill(*args, **kwargs)\n\nPlot filled polygons.  *args* is a variable length argument,\nallowing for multiple *x*, *y* pairs with an optional color\nformat string; see :func:`~matplotlib.pyplot.plot` for details\non the argument parsing.  For example, to plot a polygon with\nvertices at *x*, *y* in blue.::\n\n  ax.fill(x,y, 'b' )\n\nAn arbitrary number of *x*, *y*, *color* groups can be specified::\n\n  ax.fill(x1, y1, 'g', x2, y2, 'r')\n\nReturn value is a list of :class:`~matplotlib.patches.Patch`\ninstances that were added.\n\nThe same color strings that :func:`~matplotlib.pyplot.plot`\nsupports are supported by the fill format string.\n\nIf you would like to fill below a curve, eg. shade a region\nbetween 0 and *y* along *x*, use :meth:`fill_between`\n\nThe *closed* kwarg will close the polygon when *True* (default).\n\nkwargs control the Polygon properties:\n\n%(Polygon)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/fill_demo.py", "id": "f17238:c1:m160"}
{"signature": "def add_line(self, line):", "body": "self._set_artist_props(line)<EOL>line.set_clip_path(self.patch)<EOL>self._update_line_limits(line)<EOL>if not line.get_label():<EOL><INDENT>line.set_label('<STR_LIT>'%len(self.lines))<EOL><DEDENT>self.lines.append(line)<EOL>line._remove_method = lambda h: self.lines.remove(h)<EOL>", "docstring": "Add a :class:`~matplotlib.lines.Line2D` to the list of plot\nlines", "id": "f17238:c1:m46"}
{"signature": "def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):", "body": "if ymax is None and iterable(ymin):<EOL><INDENT>ymin,ymax = ymin<EOL><DEDENT>if ymin is not None:<EOL><INDENT>ymin = self.convert_yunits(ymin)<EOL><DEDENT>if ymax is not None:<EOL><INDENT>ymax = self.convert_yunits(ymax)<EOL><DEDENT>old_ymin,old_ymax = self.get_ylim()<EOL>if ymin is None: ymin = old_ymin<EOL>if ymax is None: ymax = old_ymax<EOL>ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)<EOL>ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)<EOL>self.viewLim.intervaly = (ymin, ymax)<EOL>if emit:<EOL><INDENT>self.callbacks.process('<STR_LIT>', self)<EOL>for other in self._shared_y_axes.get_siblings(self):<EOL><INDENT>if other is not self:<EOL><INDENT>other.set_ylim(self.viewLim.intervaly, emit=False)<EOL>if (other.figure != self.figure and<EOL>other.figure.canvas is not None):<EOL><INDENT>other.figure.canvas.draw_idle()<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return ymin, ymax<EOL>", "docstring": "call signature::\n\n  set_ylim(self, *args, **kwargs):\n\nSet the limits for the yaxis; v = [ymin, ymax]::\n\n  set_ylim((valmin, valmax))\n  set_ylim(valmin, valmax)\n  set_ylim(ymin=1) # ymax unchanged\n  set_ylim(ymax=1) # ymin unchanged\n\nKeyword arguments:\n\n  *ymin*: scalar\n    the min of the ylim\n  *ymax*: scalar\n    the max of the ylim\n  *emit*: [ True | False ]\n    notify observers of lim change\n\nReturns the current ylimits as a length 2 tuple\n\nACCEPTS: len(2) sequence of floats", "id": "f17238:c1:m94"}
{"signature": "def twinx(self):", "body": "ax2 = self.figure.add_axes(self.get_position(True), sharex=self,<EOL>frameon=False)<EOL>ax2.yaxis.tick_right()<EOL>ax2.yaxis.set_label_position('<STR_LIT:right>')<EOL>self.yaxis.tick_left()<EOL>return ax2<EOL>", "docstring": "call signature::\n\n  ax = twinx()\n\ncreate a twin of Axes for generating a plot with a sharex\nx-axis but independent y axis.  The y-axis of self will have\nticks on left and the returned axes will have ticks on the\nright", "id": "f17238:c1:m171"}
{"signature": "def set_ybound(self, lower=None, upper=None):", "body": "if upper is None and iterable(lower):<EOL><INDENT>lower,upper = lower<EOL><DEDENT>old_lower,old_upper = self.get_ybound()<EOL>if lower is None: lower = old_lower<EOL>if upper is None: upper = old_upper<EOL>if self.yaxis_inverted():<EOL><INDENT>if lower < upper:<EOL><INDENT>self.set_ylim(upper, lower)<EOL><DEDENT>else:<EOL><INDENT>self.set_ylim(lower, upper)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if lower < upper:<EOL><INDENT>self.set_ylim(lower, upper)<EOL><DEDENT>else:<EOL><INDENT>self.set_ylim(upper, lower)<EOL><DEDENT><DEDENT>", "docstring": "Set the lower and upper numerical bounds of the y-axis.\n           This method will honor axes inversion regardless of parameter order.", "id": "f17238:c1:m92"}
{"signature": "def set_xbound(self, lower=None, upper=None):", "body": "if upper is None and iterable(lower):<EOL><INDENT>lower,upper = lower<EOL><DEDENT>old_lower,old_upper = self.get_xbound()<EOL>if lower is None: lower = old_lower<EOL>if upper is None: upper = old_upper<EOL>if self.xaxis_inverted():<EOL><INDENT>if lower < upper:<EOL><INDENT>self.set_xlim(upper, lower)<EOL><DEDENT>else:<EOL><INDENT>self.set_xlim(lower, upper)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if lower < upper:<EOL><INDENT>self.set_xlim(lower, upper)<EOL><DEDENT>else:<EOL><INDENT>self.set_xlim(upper, lower)<EOL><DEDENT><DEDENT>", "docstring": "Set the lower and upper numerical bounds of the x-axis.\nThis method will honor axes inversion regardless of parameter order.", "id": "f17238:c1:m78"}
{"signature": "def stem(self, x, y, linefmt='<STR_LIT>', markerfmt='<STR_LIT>', basefmt='<STR_LIT>'):", "body": "remember_hold=self._hold<EOL>if not self._hold: self.cla()<EOL>self.hold(True)<EOL>markerline, = self.plot(x, y, markerfmt)<EOL>stemlines = []<EOL>for thisx, thisy in zip(x, y):<EOL><INDENT>l, = self.plot([thisx,thisx], [<NUM_LIT:0>, thisy], linefmt)<EOL>stemlines.append(l)<EOL><DEDENT>baseline, = self.plot([np.amin(x), np.amax(x)], [<NUM_LIT:0>,<NUM_LIT:0>], basefmt)<EOL>self.hold(remember_hold)<EOL>return markerline, stemlines, baseline<EOL>", "docstring": "call signature::\n\n  stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')\n\nA stem plot plots vertical lines (using *linefmt*) at each *x*\nlocation from the baseline to *y*, and places a marker there\nusing *markerfmt*.  A horizontal line at 0 is is plotted using\n*basefmt*.\n\nReturn value is a tuple (*markerline*, *stemlines*,\n*baseline*).\n\n.. seealso::\n    `this document`__ for details\n\n    :file:`examples/pylab_examples/stem_plot.py`:\n        for a demo\n\n__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html", "id": "f17238:c1:m150"}
{"signature": "def get_navigate(self):", "body": "return self._navigate<EOL>", "docstring": "Get whether the axes responds to navigation commands", "id": "f17238:c1:m109"}
{"signature": "def get_yticklines(self):", "body": "return cbook.silent_list('<STR_LIT>', self.yaxis.get_ticklines())<EOL>", "docstring": "Get the ytick lines as a list of Line2D instances", "id": "f17238:c1:m42"}
{"signature": "def change_geometry(self, numrows, numcols, num):", "body": "self._rows = numrows<EOL>self._cols = numcols<EOL>self._num = num-<NUM_LIT:1><EOL>self.update_params()<EOL>self.set_position(self.figbox)<EOL>", "docstring": "change subplot geometry, eg. from 1,1,1 to 2,2,3", "id": "f17238:c2:m2"}
{"signature": "def set_ylabel(self, ylabel, fontdict=None, **kwargs):", "body": "label = self.yaxis.get_label()<EOL>label.set_text(ylabel)<EOL>if fontdict is not None: label.update(fontdict)<EOL>label.update(kwargs)<EOL>return label<EOL>", "docstring": "call signature::\n\n  set_ylabel(ylabel, fontdict=None, **kwargs)\n\nSet the label for the yaxis\n\nValid kwargs are Text properties:\n%(Text)s\nACCEPTS: str\n\n.. seealso::\n    :meth:`text`:\n        for information on how override and the optional args work", "id": "f17238:c1:m129"}
{"signature": "def start_pan(self, x, y, button):", "body": "self._pan_start = cbook.Bunch(<EOL>lim           = self.viewLim.frozen(),<EOL>trans         = self.transData.frozen(),<EOL>trans_inverse = self.transData.inverted().frozen(),<EOL>bbox          = self.bbox.frozen(),<EOL>x             = x,<EOL>y             = y<EOL>)<EOL>", "docstring": "Called when a pan operation has started.\n\n*x*, *y* are the mouse coordinates in display coords.\nbutton is the mouse button number:\n\n* 1: LEFT\n* 2: MIDDLE\n* 3: RIGHT\n\n.. note::\n    Intended to be overridden by new projection types.", "id": "f17238:c1:m113"}
{"signature": "def hlines(self, y, xmin, xmax, colors='<STR_LIT:k>', linestyles='<STR_LIT>',<EOL>label='<STR_LIT>', **kwargs):", "body": "if kwargs.get('<STR_LIT>') is not None:<EOL><INDENT>raise DeprecationWarning('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>y = self.convert_yunits( y )<EOL>xmin = self.convert_xunits( xmin )<EOL>xmax = self.convert_xunits( xmax )<EOL>if not iterable(y): y = [y]<EOL>if not iterable(xmin): xmin = [xmin]<EOL>if not iterable(xmax): xmax = [xmax]<EOL>y = np.asarray(y)<EOL>xmin = np.asarray(xmin)<EOL>xmax = np.asarray(xmax)<EOL>if len(xmin)==<NUM_LIT:1>:<EOL><INDENT>xmin = np.resize( xmin, y.shape )<EOL><DEDENT>if len(xmax)==<NUM_LIT:1>:<EOL><INDENT>xmax = np.resize( xmax, y.shape )<EOL><DEDENT>if len(xmin)!=len(y):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(xmax)!=len(y):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>verts = [ ((thisxmin, thisy), (thisxmax, thisy))<EOL>for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]<EOL>coll = mcoll.LineCollection(verts, colors=colors,<EOL>linestyles=linestyles, label=label)<EOL>self.add_collection(coll)<EOL>coll.update(kwargs)<EOL>minx = min(xmin.min(), xmax.min())<EOL>maxx = max(xmin.max(), xmax.max())<EOL>miny = y.min()<EOL>maxy = y.max()<EOL>corners = (minx, miny), (maxx, maxy)<EOL>self.update_datalim(corners)<EOL>self.autoscale_view()<EOL>return coll<EOL>", "docstring": "call signature::\n\n  hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)\n\nPlot horizontal lines at each *y* from *xmin* to *xmax*.\n\nReturns the :class:`~matplotlib.collections.LineCollection`\nthat was added.\n\nRequired arguments:\n\n  *y*:\n    a 1-D numpy array or iterable.\n\n  *xmin* and *xmax*:\n    can be scalars or ``len(x)`` numpy arrays.  If they are\n    scalars, then the respective values are constant, else the\n    widths of the lines are determined by *xmin* and *xmax*.\n\nOptional keyword arguments:\n\n  *colors*:\n    a line collections color argument, either a single color\n    or a ``len(y)`` list of colors\n\n  *linestyles*:\n    [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/hline_demo.py", "id": "f17238:c1:m136"}
{"signature": "def draw_artist(self, a):", "body": "assert self._cachedRenderer is not None<EOL>a.draw(self._cachedRenderer)<EOL>", "docstring": "This method can only be used after an initial draw which\ncaches the renderer.  It is used to efficiently update Axes\ndata (axis ticks, labels, etc are not updated)", "id": "f17238:c1:m61"}
{"signature": "def get_xticklabels(self, minor=False):", "body": "return cbook.silent_list('<STR_LIT>',<EOL>self.xaxis.get_ticklabels(minor=minor))<EOL>", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m87"}
{"signature": "def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):", "body": "if self.xaxis is None or self.yaxis is None: return<EOL>if xdata is not None:<EOL><INDENT>if not self.xaxis.have_units():<EOL><INDENT>self.xaxis.update_units(xdata)<EOL><DEDENT><DEDENT>if ydata is not None:<EOL><INDENT>if not self.yaxis.have_units():<EOL><INDENT>self.yaxis.update_units(ydata)<EOL><DEDENT><DEDENT>if kwargs is not None:<EOL><INDENT>xunits = kwargs.pop( '<STR_LIT>', self.xaxis.units)<EOL>if xunits!=self.xaxis.units:<EOL><INDENT>self.xaxis.set_units(xunits)<EOL>if xdata is not None:<EOL><INDENT>self.xaxis.update_units(xdata)<EOL><DEDENT><DEDENT>yunits = kwargs.pop('<STR_LIT>', self.yaxis.units)<EOL>if yunits!=self.yaxis.units:<EOL><INDENT>self.yaxis.set_units(yunits)<EOL>if ydata is not None:<EOL><INDENT>self.yaxis.update_units(ydata)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "look for unit *kwargs* and update the axis instances as necessary", "id": "f17238:c1:m55"}
{"signature": "def ishold(self):", "body": "return self._hold<EOL>", "docstring": "return the HOLD status of the axes", "id": "f17238:c1:m21"}
{"signature": "def __pick(self, x, y, trans=None, among=None):", "body": "<EOL>if trans is not None:<EOL><INDENT>xywin = trans.transform_point((x,y))<EOL><DEDENT>else:<EOL><INDENT>xywin = x,y<EOL><DEDENT>def dist_points(p1, p2):<EOL><INDENT>'<STR_LIT>'<EOL>x1, y1 = p1<EOL>x2, y2 = p2<EOL>return math.sqrt((x1-x2)**<NUM_LIT:2>+(y1-y2)**<NUM_LIT:2>)<EOL><DEDENT>def dist_x_y(p1, x, y):<EOL><INDENT>'<STR_LIT>'<EOL>x1, y1 = p1<EOL>return min(np.sqrt((x-x1)**<NUM_LIT:2>+(y-y1)**<NUM_LIT:2>))<EOL><DEDENT>def dist(a):<EOL><INDENT>if isinstance(a, Text):<EOL><INDENT>bbox = a.get_window_extent()<EOL>l,b,w,h = bbox.bounds<EOL>verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)<EOL>xt, yt = list(zip(*verts))<EOL><DEDENT>elif isinstance(a, Patch):<EOL><INDENT>path = a.get_path()<EOL>tverts = a.get_transform().transform_path(path)<EOL>xt, yt = list(zip(*tverts))<EOL><DEDENT>elif isinstance(a, mlines.Line2D):<EOL><INDENT>xdata = a.get_xdata(orig=False)<EOL>ydata = a.get_ydata(orig=False)<EOL>xt, yt = a.get_transform().numerix_x_y(xdata, ydata)<EOL><DEDENT>return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))<EOL><DEDENT>artists = self.lines + self.patches + self.texts<EOL>if callable(among):<EOL><INDENT>artists = list(filter(test, artists))<EOL><DEDENT>elif iterable(among):<EOL><INDENT>amongd = dict([(k,<NUM_LIT:1>) for k in among])<EOL>artists = [a for a in artists if a in amongd]<EOL><DEDENT>elif among is None:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not len(artists): return None<EOL>ds = [ (dist(a),a) for a in artists]<EOL>ds.sort()<EOL>return ds[<NUM_LIT:0>][<NUM_LIT:1>]<EOL>", "docstring": "Return the artist under point that is closest to the *x*, *y*.\nIf *trans* is *None*, *x*, and *y* are in window coords,\n(0,0 = lower left).  Otherwise, *trans* is a\n:class:`~matplotlib.transforms.Transform` that specifies the\ncoordinate system of *x*, *y*.\n\nThe selection of artists from amongst which the pick function\nfinds an artist can be narrowed using the optional keyword\nargument *among*. If provided, this should be either a sequence\nof permitted artists or a function taking an artist as its\nargument and returning a true value if and only if that artist\ncan be selected.\n\nNote this algorithm calculates distance to the vertices of the\npolygon, so if you want to pick a patch, click on the edge!", "id": "f17238:c1:m123"}
{"signature": "def set_xscale(self, value, **kwargs):", "body": "self.xaxis.set_scale(value, **kwargs)<EOL>self.autoscale_view()<EOL>self._update_transScale()<EOL>", "docstring": "call signature::\n\n  set_xscale(value)\n\nSet the scaling of the x-axis: %(scale)s\n\nACCEPTS: [%(scale)s]\n\nDifferent kwargs are accepted, depending on the scale:\n%(scale_docs)s", "id": "f17238:c1:m82"}
{"signature": "def set_anchor(self, anchor):", "body": "if anchor in list(mtransforms.Bbox.coefs.keys()) or len(anchor) == <NUM_LIT:2>:<EOL><INDENT>self._anchor = anchor<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' %<EOL>'<STR_LIT:U+002CU+0020>'.join(list(mtransforms.BBox.coefs.keys())))<EOL><DEDENT>", "docstring": "*anchor*\n\n  =====  ============\n  value  description\n  =====  ============\n  'C'    Center\n  'SW'   bottom left\n  'S'    bottom\n  'SE'   bottom right\n  'E'    right\n  'NE'   top right\n  'N'    top\n  'NW'   top left\n  'W'    left\n  =====  ============", "id": "f17238:c1:m28"}
{"signature": "def get_images(self):", "body": "return cbook.silent_list('<STR_LIT>', self.images)<EOL>", "docstring": "return a list of Axes images contained by the Axes", "id": "f17238:c1:m35"}
{"signature": "def get_ybound(self):", "body": "left, right = self.get_ylim()<EOL>if left < right:<EOL><INDENT>return left, right<EOL><DEDENT>else:<EOL><INDENT>return right, left<EOL><DEDENT>", "docstring": "Return y-axis numerical bounds in the form of lowerBound < upperBound", "id": "f17238:c1:m91"}
{"signature": "def set_adjustable(self, adjustable):", "body": "if adjustable in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if self in self._shared_x_axes or self in self._shared_y_axes:<EOL><INDENT>if adjustable == '<STR_LIT>':<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>self._adjustable = adjustable<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>", "docstring": "ACCEPTS: [ 'box' | 'datalim' ]", "id": "f17238:c1:m26"}
{"signature": "def step(self, x, y, *args, **kwargs):", "body": "where = kwargs.pop('<STR_LIT>', '<STR_LIT>')<EOL>if where not in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>kwargs['<STR_LIT>'] = '<STR_LIT>' + where<EOL>return self.plot(x, y, *args, **kwargs)<EOL>", "docstring": "call signature::\n\n  step(x, y, *args, **kwargs)\n\nMake a step plot. Additional keyword args to :func:`step` are the same\nas those for :func:`~matplotlib.pyplot.plot`.\n\n*x* and *y* must be 1-D sequences, and it is assumed, but not checked,\nthat *x* is uniformly increasing.\n\nKeyword arguments:\n\n*where*: [ 'pre' | 'post' | 'mid'  ]\n  If 'pre', the interval from x[i] to x[i+1] has level y[i]\n\n  If 'post', that interval has level y[i+1]\n\n  If 'mid', the jumps in *y* occur half-way between the\n  *x*-values.", "id": "f17238:c1:m146"}
{"signature": "def matshow(self, Z, **kwargs):", "body": "Z = np.asarray(Z)<EOL>nr, nc = Z.shape<EOL>extent = [-<NUM_LIT:0.5>, nc-<NUM_LIT:0.5>, nr-<NUM_LIT:0.5>, -<NUM_LIT:0.5>]<EOL>kw = {'<STR_LIT>': extent,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>'}          <EOL>kw.update(kwargs)<EOL>im = self.imshow(Z, **kw)<EOL>self.title.set_y(<NUM_LIT>)<EOL>self.xaxis.tick_top()<EOL>self.xaxis.set_ticks_position('<STR_LIT>')<EOL>self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=<NUM_LIT:9>,<EOL>steps=[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:5>, <NUM_LIT:10>],<EOL>integer=True))<EOL>self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=<NUM_LIT:9>,<EOL>steps=[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:5>, <NUM_LIT:10>],<EOL>integer=True))<EOL>return im<EOL>", "docstring": "Plot a matrix or array as an image.\n\nThe matrix will be shown the way it would be printed,\nwith the first row at the top.  Row and column numbering\nis zero-based.\n\nArgument:\n    *Z*   anything that can be interpreted as a 2-D array\n\nkwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.\n:meth:`matshow` sets defaults for *extent*, *origin*,\n*interpolation*, and *aspect*; use care in overriding the\n*extent* and *origin* kwargs, because they interact.  (Also,\nif you want to change them, you probably should be using\nimshow directly in your own version of matshow.)\n\nReturns: an :class:`matplotlib.image.AxesImage` instance.", "id": "f17238:c1:m181"}
{"signature": "def get_frame(self):", "body": "warnings.warn('<STR_LIT>', DeprecationWarning)<EOL>return self.patch<EOL>", "docstring": "Return the axes Rectangle frame", "id": "f17238:c1:m33"}
{"signature": "def get_geometry(self):", "body": "return self._rows, self._cols, self._num+<NUM_LIT:1><EOL>", "docstring": "get the subplot geometry, eg 2,2,3", "id": "f17238:c2:m1"}
{"signature": "def cohere(self, x, y, NFFT=<NUM_LIT>, Fs=<NUM_LIT:2>, Fc=<NUM_LIT:0>, detrend=mlab.detrend_none,<EOL>window=mlab.window_hanning, noverlap=<NUM_LIT:0>, pad_to=None,<EOL>sides='<STR_LIT:default>', scale_by_freq=None, **kwargs):", "body": "if not self._hold: self.cla()<EOL>cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,<EOL>scale_by_freq)<EOL>freqs += Fc<EOL>self.plot(freqs, cxy, **kwargs)<EOL>self.set_xlabel('<STR_LIT>')<EOL>self.set_ylabel('<STR_LIT>')<EOL>self.grid(True)<EOL>return cxy, freqs<EOL>", "docstring": "call signature::\n\n  cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,\n         window = mlab.window_hanning, noverlap=0, pad_to=None,\n         sides='default', scale_by_freq=None, **kwargs)\n\ncohere the coherence between *x* and *y*.  Coherence is the normalized\ncross spectral density:\n\n.. math::\n\n  C_{xy} = \\\\frac{|P_{xy}|^2}{P_{xx}P_{yy}}\n\n%(PSD)s\n\n  *Fc*: integer\n    The center frequency of *x* (defaults to 0), which offsets\n    the x extents of the plot to reflect the frequency range used\n    when a signal is acquired and then filtered and downsampled to\n    baseband.\n\nThe return value is a tuple (*Cxy*, *f*), where *f* are the\nfrequencies of the coherence vector.\n\nkwargs are applied to the lines.\n\nReferences:\n\n  * Bendat & Piersol -- Random Data: Analysis and Measurement\n    Procedures, John Wiley & Sons (1986)\n\nkwargs control the :class:`~matplotlib.lines.Line2D`\nproperties of the coherence plot:\n\n%(Line2D)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/cohere_demo.py", "id": "f17238:c1:m178"}
{"signature": "def _update_patch_limits(self, patch):", "body": "<EOL>if (isinstance(patch, mpatches.Rectangle) and<EOL>(patch.get_width()==<NUM_LIT:0> or patch.get_height()==<NUM_LIT:0>)):<EOL><INDENT>return<EOL><DEDENT>vertices = patch.get_path().vertices<EOL>if vertices.size > <NUM_LIT:0>:<EOL><INDENT>xys = patch.get_patch_transform().transform(vertices)<EOL>if patch.get_data_transform() != self.transData:<EOL><INDENT>transform = (patch.get_data_transform() +<EOL>self.transData.inverted())<EOL>xys = transform.transform(xys)<EOL><DEDENT>self.update_datalim(xys, updatex=patch.x_isdata,<EOL>updatey=patch.y_isdata)<EOL><DEDENT>", "docstring": "update the data limits for patch *p*", "id": "f17238:c1:m49"}
{"signature": "def update_params(self):", "body": "rows = self._rows<EOL>cols = self._cols<EOL>num = self._num<EOL>pars = self.figure.subplotpars<EOL>left = pars.left<EOL>right = pars.right<EOL>bottom = pars.bottom<EOL>top = pars.top<EOL>wspace = pars.wspace<EOL>hspace = pars.hspace<EOL>totWidth = right-left<EOL>totHeight = top-bottom<EOL>figH = totHeight/(rows + hspace*(rows-<NUM_LIT:1>))<EOL>sepH = hspace*figH<EOL>figW = totWidth/(cols + wspace*(cols-<NUM_LIT:1>))<EOL>sepW = wspace*figW<EOL>rowNum, colNum =  divmod(num, cols)<EOL>figBottom = top - (rowNum+<NUM_LIT:1>)*figH - rowNum*sepH<EOL>figLeft = left + colNum*(figW + sepW)<EOL>self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,<EOL>figW, figH)<EOL>self.rowNum = rowNum<EOL>self.colNum = colNum<EOL>self.numRows = rows<EOL>self.numCols = cols<EOL>if <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>', rows, cols, num)<EOL>print('<STR_LIT>', left, bottom, right, top)<EOL>print('<STR_LIT>', self.figBottom)<EOL>print('<STR_LIT>', self.figLeft)<EOL>print('<STR_LIT>', self.figW)<EOL>print('<STR_LIT>', self.figH)<EOL>print('<STR_LIT>', self.rowNum)<EOL>print('<STR_LIT>', self.colNum)<EOL>print('<STR_LIT>', self.numRows)<EOL>print('<STR_LIT>', self.numCols)<EOL><DEDENT>", "docstring": "update the subplot position from fig.subplotpars", "id": "f17238:c2:m3"}
{"signature": "def in_axes(self, mouseevent):", "body": "return self.patch.contains(mouseevent)[<NUM_LIT:0>]<EOL>", "docstring": "return *True* if the given *mouseevent* (in display coords)\nis in the Axes", "id": "f17238:c1:m56"}
{"signature": "def get_xgridlines(self):", "body": "return cbook.silent_list('<STR_LIT>', self.xaxis.get_gridlines())<EOL>", "docstring": "Get the x grid lines as a list of Line2D instances", "id": "f17238:c1:m38"}
{"signature": "def get_xbound(self):", "body": "left, right = self.get_xlim()<EOL>if left < right:<EOL><INDENT>return left, right<EOL><DEDENT>else:<EOL><INDENT>return right, left<EOL><DEDENT>", "docstring": "Returns the x-axis numerical bounds where::\n\n  lowerBound < upperBound", "id": "f17238:c1:m77"}
{"signature": "def get_axis_bgcolor(self):", "body": "return self._axisbg<EOL>", "docstring": "Return the axis background color", "id": "f17238:c1:m73"}
{"signature": "def get_xlabel(self):", "body": "label = self.xaxis.get_label()<EOL>return label.get_text()<EOL>", "docstring": "Get the xlabel text string.", "id": "f17238:c1:m126"}
{"signature": "def imshow(self, X, cmap=None, norm=None, aspect=None,<EOL>interpolation=None, alpha=<NUM_LIT:1.0>, vmin=None, vmax=None,<EOL>origin=None, extent=None, shape=None, filternorm=<NUM_LIT:1>,<EOL>filterrad=<NUM_LIT>, imlim=None, resample=None, url=None, **kwargs):", "body": "if not self._hold: self.cla()<EOL>if norm is not None: assert(isinstance(norm, mcolors.Normalize))<EOL>if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))<EOL>if aspect is None: aspect = rcParams['<STR_LIT>']<EOL>self.set_aspect(aspect)<EOL>im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,<EOL>filternorm=filternorm,<EOL>filterrad=filterrad, resample=resample, **kwargs)<EOL>im.set_data(X)<EOL>im.set_alpha(alpha)<EOL>self._set_artist_props(im)<EOL>im.set_clip_path(self.patch)<EOL>if vmin is not None or vmax is not None:<EOL><INDENT>im.set_clim(vmin, vmax)<EOL><DEDENT>else:<EOL><INDENT>im.autoscale_None()<EOL><DEDENT>im.set_url(url)<EOL>xmin, xmax, ymin, ymax = im.get_extent()<EOL>corners = (xmin, ymin), (xmax, ymax)<EOL>self.update_datalim(corners)<EOL>if self._autoscaleon:<EOL><INDENT>self.set_xlim((xmin, xmax))<EOL>self.set_ylim((ymin, ymax))<EOL><DEDENT>self.images.append(im)<EOL>return im<EOL>", "docstring": "call signature::\n\n  imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,\n         alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,\n         **kwargs)\n\nDisplay the image in *X* to current axes.  *X* may be a float\narray, a uint8 array or a PIL image. If *X* is an array, *X*\ncan have the following shapes:\n\n* MxN -- luminance (grayscale, float array only)\n* MxNx3 -- RGB (float or uint8 array)\n* MxNx4 -- RGBA (float or uint8 array)\n\nThe value for each component of MxNx3 and MxNx4 float arrays should be\nin the range 0.0 to 1.0; MxN float arrays may be normalised.\n\nAn :class:`matplotlib.image.AxesImage` instance is returned.\n\nKeyword arguments:\n\n  *cmap*: [ None | Colormap ]\n    A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.\n    If *None*, default to rc ``image.cmap`` value.\n\n    *cmap* is ignored when *X* has RGB(A) information\n\n  *aspect*: [ None | 'auto' | 'equal' | scalar ]\n    If 'auto', changes the image aspect ratio to match that of the axes\n\n    If 'equal', and *extent* is *None*, changes the axes\n    aspect ratio to match that of the image. If *extent* is\n    not *None*, the axes aspect ratio is changed to match that\n    of the extent.\n\n    If *None*, default to rc ``image.aspect`` value.\n\n  *interpolation*:\n\n    Acceptable values are *None*, 'nearest', 'bilinear',\n      'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',\n      'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',\n      'bessel', 'mitchell', 'sinc', 'lanczos',\n\n\n    If *interpolation* is *None*, default to rc\n    ``image.interpolation``. See also the *filternorm* and\n    *filterrad* parameters\n\n  *norm*: [ None | Normalize ]\n    An :class:`matplotlib.colors.Normalize` instance; if\n    *None*, default is ``normalization()``.  This scales\n    luminance -> 0-1\n\n    *norm* is only used for an MxN float array.\n\n  *vmin*/*vmax*: [ None | scalar ]\n    Used to scale a luminance image to 0-1.  If either is\n    *None*, the min and max of the luminance values will be\n    used.  Note if *norm* is not *None*, the settings for\n    *vmin* and *vmax* will be ignored.\n\n  *alpha*: scalar\n    The alpha blending value, between 0 (transparent) and 1 (opaque)\n\n  *origin*: [ None | 'upper' | 'lower' ]\n    Place the [0,0] index of the array in the upper left or lower left\n    corner of the axes. If *None*, default to rc ``image.origin``.\n\n  *extent*: [ None | scalars (left, right, bottom, top) ]\n    Eata values of the axes.  The default assigns zero-based row,\n    column indices to the *x*, *y* centers of the pixels.\n\n  *shape*: [ None | scalars (columns, rows) ]\n    For raw buffer images\n\n  *filternorm*:\n    A parameter for the antigrain image resize filter.  From the\n    antigrain documentation, if *filternorm* = 1, the filter normalizes\n    integer values and corrects the rounding errors. It doesn't do\n    anything with the source floating point values, it corrects only\n    integers according to the rule of 1.0 which means that any sum of\n    pixel weights must be equal to 1.0.  So, the filter function must\n    produce a graph of the proper shape.\n\n  *filterrad*:\n    The filter radius for filters that have a radius\n    parameter, i.e. when interpolation is one of: 'sinc',\n    'lanczos' or 'blackman'\n\nAdditional kwargs are :class:`~matplotlib.artist.Artist` properties:\n\n%(Artist)s\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/image_demo.py", "id": "f17238:c1:m162"}
{"signature": "def get_yaxis(self):", "body": "return self.yaxis<EOL>", "docstring": "Return the YAxis instance", "id": "f17238:c1:m40"}
{"signature": "def get_yminorticklabels(self):", "body": "return cbook.silent_list('<STR_LIT>',<EOL>self.yaxis.get_minorticklabels())<EOL>", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m100"}
{"signature": "def boxplot(self, x, notch=<NUM_LIT:0>, sym='<STR_LIT>', vert=<NUM_LIT:1>, whis=<NUM_LIT>,<EOL>positions=None, widths=None):", "body": "if not self._hold: self.cla()<EOL>holdStatus = self._hold<EOL>whiskers, caps, boxes, medians, fliers = [], [], [], [], []<EOL>if hasattr(x, '<STR_LIT>'):<EOL><INDENT>if len(x.shape) == <NUM_LIT:1>:<EOL><INDENT>if hasattr(x[<NUM_LIT:0>], '<STR_LIT>'):<EOL><INDENT>x = list(x)<EOL><DEDENT>else:<EOL><INDENT>x = [x,]<EOL><DEDENT><DEDENT>elif len(x.shape) == <NUM_LIT:2>:<EOL><INDENT>nr, nc = x.shape<EOL>if nr == <NUM_LIT:1>:<EOL><INDENT>x = [x]<EOL><DEDENT>elif nc == <NUM_LIT:1>:<EOL><INDENT>x = [x.ravel()]<EOL><DEDENT>else:<EOL><INDENT>x = [x[:,i] for i in range(nc)]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if not hasattr(x[<NUM_LIT:0>], '<STR_LIT>'):<EOL><INDENT>x = [x]<EOL><DEDENT>col = len(x)<EOL>if positions is None:<EOL><INDENT>positions = list(range(<NUM_LIT:1>, col + <NUM_LIT:1>))<EOL><DEDENT>if widths is None:<EOL><INDENT>distance = max(positions) - min(positions)<EOL>widths = min(<NUM_LIT>*max(distance,<NUM_LIT:1.0>), <NUM_LIT:0.5>)<EOL><DEDENT>if isinstance(widths, float) or isinstance(widths, int):<EOL><INDENT>widths = np.ones((col,), float) * widths<EOL><DEDENT>self.hold(True)<EOL>for i,pos in enumerate(positions):<EOL><INDENT>d = np.ravel(x[i])<EOL>row = len(d)<EOL>q1, med, q3 = mlab.prctile(d,[<NUM_LIT>,<NUM_LIT:50>,<NUM_LIT>])<EOL>iq = q3 - q1<EOL>hi_val = q3 + whis*iq<EOL>wisk_hi = np.compress( d <= hi_val , d )<EOL>if len(wisk_hi) == <NUM_LIT:0>:<EOL><INDENT>wisk_hi = q3<EOL><DEDENT>else:<EOL><INDENT>wisk_hi = max(wisk_hi)<EOL><DEDENT>lo_val = q1 - whis*iq<EOL>wisk_lo = np.compress( d >= lo_val, d )<EOL>if len(wisk_lo) == <NUM_LIT:0>:<EOL><INDENT>wisk_lo = q1<EOL><DEDENT>else:<EOL><INDENT>wisk_lo = min(wisk_lo)<EOL><DEDENT>flier_hi = []<EOL>flier_lo = []<EOL>flier_hi_x = []<EOL>flier_lo_x = []<EOL>if len(sym) != <NUM_LIT:0>:<EOL><INDENT>flier_hi = np.compress( d > wisk_hi, d )<EOL>flier_lo = np.compress( d < wisk_lo, d )<EOL>flier_hi_x = np.ones(flier_hi.shape[<NUM_LIT:0>]) * pos<EOL>flier_lo_x = np.ones(flier_lo.shape[<NUM_LIT:0>]) * pos<EOL><DEDENT>box_x_min = pos - widths[i] * <NUM_LIT:0.5><EOL>box_x_max = pos + widths[i] * <NUM_LIT:0.5><EOL>wisk_x = np.ones(<NUM_LIT:2>) * pos<EOL>cap_x_min = pos - widths[i] * <NUM_LIT><EOL>cap_x_max = pos + widths[i] * <NUM_LIT><EOL>cap_x = [cap_x_min, cap_x_max]<EOL>med_y = [med, med]<EOL>if notch == <NUM_LIT:0>:<EOL><INDENT>box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]<EOL>box_y = [q1, q1, q3, q3, q1 ]<EOL>med_x = [box_x_min, box_x_max]<EOL><DEDENT>else:<EOL><INDENT>notch_max = med + <NUM_LIT>*iq/np.sqrt(row)<EOL>notch_min = med - <NUM_LIT>*iq/np.sqrt(row)<EOL>if notch_max > q3:<EOL><INDENT>notch_max = q3<EOL><DEDENT>if notch_min < q1:<EOL><INDENT>notch_min = q1<EOL><DEDENT>box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,<EOL>box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,<EOL>box_x_min ]<EOL>box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,<EOL>med, notch_min, q1]<EOL>med_x = [cap_x_min, cap_x_max]<EOL>med_y = [med, med]<EOL><DEDENT>if vert:<EOL><INDENT>def doplot(*args):<EOL><INDENT>return self.plot(*args)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>def doplot(*args):<EOL><INDENT>shuffled = []<EOL>for i in range(<NUM_LIT:0>, len(args), <NUM_LIT:3>):<EOL><INDENT>shuffled.extend([args[i+<NUM_LIT:1>], args[i], args[i+<NUM_LIT:2>]])<EOL><DEDENT>return self.plot(*shuffled)<EOL><DEDENT><DEDENT>whiskers.extend(doplot(wisk_x, [q1, wisk_lo], '<STR_LIT>',<EOL>wisk_x, [q3, wisk_hi], '<STR_LIT>'))<EOL>caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], '<STR_LIT>',<EOL>cap_x, [wisk_lo, wisk_lo], '<STR_LIT>'))<EOL>boxes.extend(doplot(box_x, box_y, '<STR_LIT>'))<EOL>medians.extend(doplot(med_x, med_y, '<STR_LIT>'))<EOL>fliers.extend(doplot(flier_hi_x, flier_hi, sym,<EOL>flier_lo_x, flier_lo, sym))<EOL><DEDENT>if <NUM_LIT:1> == vert:<EOL><INDENT>setticks, setlim = self.set_xticks, self.set_xlim<EOL><DEDENT>else:<EOL><INDENT>setticks, setlim = self.set_yticks, self.set_ylim<EOL><DEDENT>newlimits = min(positions)-<NUM_LIT:0.5>, max(positions)+<NUM_LIT:0.5><EOL>setlim(newlimits)<EOL>setticks(positions)<EOL>self.hold(holdStatus)<EOL>return dict(whiskers=whiskers, caps=caps, boxes=boxes,<EOL>medians=medians, fliers=fliers)<EOL>", "docstring": "call signature::\n\n  boxplot(x, notch=0, sym='+', vert=1, whis=1.5,\n          positions=None, widths=None)\n\nMake a box and whisker plot for each column of *x* or each\nvector in sequence *x*.  The box extends from the lower to\nupper quartile values of the data, with a line at the median.\nThe whiskers extend from the box to show the range of the\ndata.  Flier points are those past the end of the whiskers.\n\n- *notch* = 0 (default) produces a rectangular box plot.\n- *notch* = 1 will produce a notched box plot\n\n*sym* (default 'b+') is the default symbol for flier points.\nEnter an empty string ('') if you don't want to show fliers.\n\n- *vert* = 1 (default) makes the boxes vertical.\n- *vert* = 0 makes horizontal boxes.  This seems goofy, but\n  that's how Matlab did it.\n\n*whis* (default 1.5) defines the length of the whiskers as\na function of the inner quartile range.  They extend to the\nmost extreme data point within ( ``whis*(75%-25%)`` ) data range.\n\n*positions* (default 1,2,...,n) sets the horizontal positions of\nthe boxes. The ticks and limits are automatically set to match\nthe positions.\n\n*widths* is either a scalar or a vector and sets the width of\neach box. The default is 0.5, or ``0.15*(distance between extreme\npositions)`` if that is smaller.\n\n*x* is an array or a sequence of vectors.\n\nReturns a dictionary mapping each component of the boxplot\nto a list of the :class:`matplotlib.lines.Line2D`\ninstances created.\n\n**Example:**\n\n.. plot:: pyplots/boxplot_demo.py", "id": "f17238:c1:m153"}
{"signature": "def twiny(self):", "body": "ax2 = self.figure.add_axes(self.get_position(True), sharey=self,<EOL>frameon=False)<EOL>ax2.xaxis.tick_top()<EOL>ax2.xaxis.set_label_position('<STR_LIT>')<EOL>self.xaxis.tick_bottom()<EOL>return ax2<EOL>", "docstring": "call signature::\n\n  ax = twiny()\n\ncreate a twin of Axes for generating a plot with a shared\ny-axis but independent x axis.  The x-axis of self will have\nticks on bottom and the returned axes will have ticks on the\ntop", "id": "f17238:c1:m172"}
{"signature": "def hexbin(self, x, y, C = None, gridsize = <NUM_LIT:100>, bins = None,<EOL>xscale = '<STR_LIT>', yscale = '<STR_LIT>',<EOL>cmap=None, norm=None, vmin=None, vmax=None,<EOL>alpha=<NUM_LIT:1.0>, linewidths=None, edgecolors='<STR_LIT:none>',<EOL>reduce_C_function = np.mean,<EOL>**kwargs):", "body": "if not self._hold: self.cla()<EOL>self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)<EOL>x, y, C = cbook.delete_masked_points(x, y, C)<EOL>if iterable(gridsize):<EOL><INDENT>nx, ny = gridsize<EOL><DEDENT>else:<EOL><INDENT>nx = gridsize<EOL>ny = int(nx/math.sqrt(<NUM_LIT:3>))<EOL><DEDENT>x = np.array(x, float)<EOL>y = np.array(y, float)<EOL>if xscale=='<STR_LIT>':<EOL><INDENT>x = np.log10(x)<EOL><DEDENT>if yscale=='<STR_LIT>':<EOL><INDENT>y = np.log10(y)<EOL><DEDENT>xmin = np.amin(x)<EOL>xmax = np.amax(x)<EOL>ymin = np.amin(y)<EOL>ymax = np.amax(y)<EOL>padding = <NUM_LIT> * (xmax - xmin)<EOL>xmin -= padding<EOL>xmax += padding<EOL>sx = (xmax-xmin) / nx<EOL>sy = (ymax-ymin) / ny<EOL>x = (x-xmin)/sx<EOL>y = (y-ymin)/sy<EOL>ix1 = np.round(x).astype(int)<EOL>iy1 = np.round(y).astype(int)<EOL>ix2 = np.floor(x).astype(int)<EOL>iy2 = np.floor(y).astype(int)<EOL>nx1 = nx + <NUM_LIT:1><EOL>ny1 = ny + <NUM_LIT:1><EOL>nx2 = nx<EOL>ny2 = ny<EOL>n = nx1*ny1+nx2*ny2<EOL>d1 = (x-ix1)**<NUM_LIT:2> + <NUM_LIT> * (y-iy1)**<NUM_LIT:2><EOL>d2 = (x-ix2-<NUM_LIT:0.5>)**<NUM_LIT:2> + <NUM_LIT> * (y-iy2-<NUM_LIT:0.5>)**<NUM_LIT:2><EOL>bdist = (d1<d2)<EOL>if C is None:<EOL><INDENT>accum = np.zeros(n)<EOL>lattice1 = accum[:nx1*ny1]<EOL>lattice2 = accum[nx1*ny1:]<EOL>lattice1.shape = (nx1,ny1)<EOL>lattice2.shape = (nx2,ny2)<EOL>for i in range(len(x)):<EOL><INDENT>if bdist[i]:<EOL><INDENT>lattice1[ix1[i], iy1[i]]+=<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>lattice2[ix2[i], iy2[i]]+=<NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>lattice1 = np.empty((nx1,ny1),dtype=object)<EOL>for i in range(nx1):<EOL><INDENT>for j in range(ny1):<EOL><INDENT>lattice1[i,j] = []<EOL><DEDENT><DEDENT>lattice2 = np.empty((nx2,ny2),dtype=object)<EOL>for i in range(nx2):<EOL><INDENT>for j in range(ny2):<EOL><INDENT>lattice2[i,j] = []<EOL><DEDENT><DEDENT>for i in range(len(x)):<EOL><INDENT>if bdist[i]:<EOL><INDENT>lattice1[ix1[i], iy1[i]].append( C[i] )<EOL><DEDENT>else:<EOL><INDENT>lattice2[ix2[i], iy2[i]].append( C[i] )<EOL><DEDENT><DEDENT>for i in range(nx1):<EOL><INDENT>for j in range(ny1):<EOL><INDENT>vals = lattice1[i,j]<EOL>if len(vals):<EOL><INDENT>lattice1[i,j] = reduce_C_function( vals )<EOL><DEDENT>else:<EOL><INDENT>lattice1[i,j] = np.nan<EOL><DEDENT><DEDENT><DEDENT>for i in range(nx2):<EOL><INDENT>for j in range(ny2):<EOL><INDENT>vals = lattice2[i,j]<EOL>if len(vals):<EOL><INDENT>lattice2[i,j] = reduce_C_function( vals )<EOL><DEDENT>else:<EOL><INDENT>lattice2[i,j] = np.nan<EOL><DEDENT><DEDENT><DEDENT>accum = np.hstack((<EOL>lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))<EOL>good_idxs = ~np.isnan(accum)<EOL><DEDENT>px = xmin + sx * np.array([ <NUM_LIT:0.5>, <NUM_LIT:0.5>, <NUM_LIT:0.0>, -<NUM_LIT:0.5>, -<NUM_LIT:0.5>,  <NUM_LIT:0.0>])<EOL>py = ymin + sy * np.array([-<NUM_LIT:0.5>, <NUM_LIT:0.5>, <NUM_LIT:1.0>,  <NUM_LIT:0.5>, -<NUM_LIT:0.5>, -<NUM_LIT:1.0>]) / <NUM_LIT><EOL>polygons = np.zeros((<NUM_LIT:6>, n, <NUM_LIT:2>), float)<EOL>polygons[:,:nx1*ny1,<NUM_LIT:0>] = np.repeat(np.arange(nx1), ny1)<EOL>polygons[:,:nx1*ny1,<NUM_LIT:1>] = np.tile(np.arange(ny1), nx1)<EOL>polygons[:,nx1*ny1:,<NUM_LIT:0>] = np.repeat(np.arange(nx2) + <NUM_LIT:0.5>, ny2)<EOL>polygons[:,nx1*ny1:,<NUM_LIT:1>] = np.tile(np.arange(ny2), nx2) + <NUM_LIT:0.5><EOL>if C is not None:<EOL><INDENT>polygons = polygons[:,good_idxs,:]<EOL>accum = accum[good_idxs]<EOL><DEDENT>polygons = np.transpose(polygons, axes=[<NUM_LIT:1>,<NUM_LIT:0>,<NUM_LIT:2>])<EOL>polygons[:,:,<NUM_LIT:0>] *= sx<EOL>polygons[:,:,<NUM_LIT:1>] *= sy<EOL>polygons[:,:,<NUM_LIT:0>] += px<EOL>polygons[:,:,<NUM_LIT:1>] += py<EOL>if xscale=='<STR_LIT>':<EOL><INDENT>polygons[:,:,<NUM_LIT:0>] = <NUM_LIT:10>**(polygons[:,:,<NUM_LIT:0>])<EOL>xmin = <NUM_LIT:10>**xmin<EOL>xmax = <NUM_LIT:10>**xmax<EOL>self.set_xscale('<STR_LIT>')<EOL><DEDENT>if yscale=='<STR_LIT>':<EOL><INDENT>polygons[:,:,<NUM_LIT:1>] = <NUM_LIT:10>**(polygons[:,:,<NUM_LIT:1>])<EOL>ymin = <NUM_LIT:10>**ymin<EOL>ymax = <NUM_LIT:10>**ymax<EOL>self.set_yscale('<STR_LIT>')<EOL><DEDENT>if edgecolors=='<STR_LIT:none>':<EOL><INDENT>edgecolors = '<STR_LIT>'<EOL><DEDENT>collection = mcoll.PolyCollection(<EOL>polygons,<EOL>edgecolors = edgecolors,<EOL>linewidths = linewidths,<EOL>transOffset = self.transData,<EOL>)<EOL>if bins=='<STR_LIT>':<EOL><INDENT>accum = np.log10(accum+<NUM_LIT:1>)<EOL><DEDENT>elif bins!=None:<EOL><INDENT>if not iterable(bins):<EOL><INDENT>minimum, maximum = min(accum), max(accum)<EOL>bins-=<NUM_LIT:1> <EOL>bins = minimum + (maximum-minimum)*np.arange(bins)/bins<EOL><DEDENT>bins = np.sort(bins)<EOL>accum = bins.searchsorted(accum)<EOL><DEDENT>if norm is not None: assert(isinstance(norm, mcolors.Normalize))<EOL>if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))<EOL>collection.set_array(accum)<EOL>collection.set_cmap(cmap)<EOL>collection.set_norm(norm)<EOL>collection.set_alpha(alpha)<EOL>collection.update(kwargs)<EOL>if vmin is not None or vmax is not None:<EOL><INDENT>collection.set_clim(vmin, vmax)<EOL><DEDENT>else:<EOL><INDENT>collection.autoscale_None()<EOL><DEDENT>corners = ((xmin, ymin), (xmax, ymax))<EOL>self.update_datalim( corners)<EOL>self.autoscale_view()<EOL>self.add_collection(collection)<EOL>return collection<EOL>", "docstring": "call signature::\n\n  hexbin(x, y, C = None, gridsize = 100, bins = None,\n         xscale = 'linear', yscale = 'linear',\n         cmap=None, norm=None, vmin=None, vmax=None,\n         alpha=1.0, linewidths=None, edgecolors='none'\n         reduce_C_function = np.mean,\n         **kwargs)\n\nMake a hexagonal binning plot of *x* versus *y*, where *x*,\n*y* are 1-D sequences of the same length, *N*. If *C* is None\n(the default), this is a histogram of the number of occurences\nof the observations at (x[i],y[i]).\n\nIf *C* is specified, it specifies values at the coordinate\n(x[i],y[i]). These values are accumulated for each hexagonal\nbin and then reduced according to *reduce_C_function*, which\ndefaults to numpy's mean function (np.mean). (If *C* is\nspecified, it must also be a 1-D sequence of the same length\nas *x* and *y*.)\n\n*x*, *y* and/or *C* may be masked arrays, in which case only\nunmasked points will be plotted.\n\nOptional keyword arguments:\n\n  *gridsize*: [ 100 | integer ]\n    The number of hexagons in the *x*-direction, default is\n    100. The corresponding number of hexagons in the\n    *y*-direction is chosen such that the hexagons are\n    approximately regular. Alternatively, gridsize can be a\n    tuple with two elements specifying the number of hexagons\n    in the *x*-direction and the *y*-direction.\n\n  *bins*: [ None | 'log' | integer | sequence ]\n    If *None*, no binning is applied; the color of each hexagon\n    directly corresponds to its count value.\n\n    If 'log', use a logarithmic scale for the color\n    map. Internally, :math:`log_{10}(i+1)` is used to\n    determine the hexagon color.\n\n    If an integer, divide the counts in the specified number\n    of bins, and color the hexagons accordingly.\n\n    If a sequence of values, the values of the lower bound of\n    the bins to be used.\n\n  *xscale*: [ 'linear' | 'log' ]\n    Use a linear or log10 scale on the horizontal axis.\n\n  *scale*: [ 'linear' | 'log' ]\n    Use a linear or log10 scale on the vertical axis.\n\nOther keyword arguments controlling color mapping and normalization\narguments:\n\n  *cmap*: [ None | Colormap ]\n    a :class:`matplotlib.cm.Colormap` instance. If *None*,\n    defaults to rc ``image.cmap``.\n\n  *norm*: [ None | Normalize ]\n    :class:`matplotlib.colors.Normalize` instance is used to\n    scale luminance data to 0,1.\n\n  *vmin*/*vmax*: scalar\n    *vmin* and *vmax* are used in conjunction with *norm* to normalize\n    luminance data.  If either are *None*, the min and max of the color\n    array *C* is used.  Note if you pass a norm instance, your settings\n    for *vmin* and *vmax* will be ignored.\n\n  *alpha*: scalar\n    the alpha value for the patches\n\n  *linewidths*: [ None | scalar ]\n    If *None*, defaults to rc lines.linewidth. Note that this\n    is a tuple, and if you set the linewidths argument you\n    must set it as a sequence of floats, as required by\n    :class:`~matplotlib.collections.RegularPolyCollection`.\n\nOther keyword arguments controlling the Collection properties:\n\n  *edgecolors*: [ None | mpl color | color sequence ]\n    If 'none', draws the edges in the same color as the fill color.\n    This is the default, as it avoids unsightly unpainted pixels\n    between the hexagons.\n\n    If *None*, draws the outlines in the default color.\n\n    If a matplotlib color arg or sequence of rgba tuples, draws the\n    outlines in the specified color.\n\nHere are the standard descriptions of all the\n:class:`~matplotlib.collections.Collection` kwargs:\n\n%(Collection)s\n\nThe return value is a\n:class:`~matplotlib.collections.PolyCollection` instance; use\n:meth:`~matplotlib.collection.PolyCollection.get_array` on\nthis :class:`~matplotlib.collections.PolyCollection` to get\nthe counts in each hexagon.\n\n**Example:**\n\n.. plot:: mpl_examples/pylab_examples/hexbin_demo.py", "id": "f17238:c1:m155"}
{"signature": "def end_pan(self):", "body": "del self._pan_start<EOL>", "docstring": "Called when a pan operation completes (when the mouse button\nis up.)\n\n.. note::\n    Intended to be overridden by new projection types.", "id": "f17238:c1:m114"}
{"signature": "def get_xaxis(self):", "body": "return self.xaxis<EOL>", "docstring": "Return the XAxis instance", "id": "f17238:c1:m37"}
{"signature": "def disconnect(self, cid):", "body": "raise DeprecationWarning('<STR_LIT>'<EOL>'<STR_LIT>')<EOL>", "docstring": "disconnect from the Axes event.", "id": "f17238:c1:m119"}
{"signature": "def get_ymajorticklabels(self):", "body": "return cbook.silent_list('<STR_LIT>',<EOL>self.yaxis.get_majorticklabels())<EOL>", "docstring": "Get the xtick labels as a list of Text instances", "id": "f17238:c1:m99"}
{"signature": "def text(self, x, y, s, fontdict=None,<EOL>withdash=False, **kwargs):", "body": "default = {<EOL>'<STR_LIT>' : '<STR_LIT>',<EOL>'<STR_LIT>' : '<STR_LIT:left>',<EOL>'<STR_LIT>' : self.transData,<EOL>}<EOL>if withdash:<EOL><INDENT>t = mtext.TextWithDash(<EOL>x=x, y=y, text=s,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>t = mtext.Text(<EOL>x=x, y=y, text=s,<EOL>)<EOL><DEDENT>self._set_artist_props(t)<EOL>t.update(default)<EOL>if fontdict is not None: t.update(fontdict)<EOL>t.update(kwargs)<EOL>self.texts.append(t)<EOL>t._remove_method = lambda h: self.texts.remove(h)<EOL>if '<STR_LIT>' in kwargs:  t.set_clip_box(self.bbox)<EOL>return t<EOL>", "docstring": "call signature::\n\n   text(x, y, s, fontdict=None, **kwargs)\n\n Add text in string *s* to axis at location *x*, *y*, data\n coordinates.\n\n Keyword arguments:\n\n   *fontdict*:\n     A dictionary to override the default text properties.\n     If *fontdict* is *None*, the defaults are determined by your rc\n     parameters.\n\n   *withdash*: [ False | True ]\n     Creates a :class:`~matplotlib.text.TextWithDash` instance\n     instead of a :class:`~matplotlib.text.Text` instance.\n\n Individual keyword arguments can be used to override any given\n parameter::\n\n     text(x, y, s, fontsize=12)\n\n The default transform specifies that text is in data coords,\n alternatively, you can specify text in axis coords (0,0 is\n lower-left and 1,1 is upper-right).  The example below places\n text in the center of the axes::\n\n     text(0.5, 0.5,'matplotlib',\n          horizontalalignment='center',\n          verticalalignment='center',\n          transform = ax.transAxes)\n\nYou can put a rectangular box around the text instance (eg. to\nset a background color) by using the keyword *bbox*.  *bbox* is\na dictionary of :class:`matplotlib.patches.Rectangle`\nproperties.  For example::\n\n  text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))\n\nValid kwargs are :class:`matplotlib.text.Text` properties:\n\n%(Text)s", "id": "f17238:c1:m130"}
{"signature": "def set_axisbelow(self, b):", "body": "self._axisbelow = b<EOL>", "docstring": "Set whether the axis ticks and gridlines are above or below most artists\n\nACCEPTS: [ *True* | *False* ]", "id": "f17238:c1:m68"}
{"signature": "def drag_pan(self, button, key, x, y):", "body": "def format_deltas(key, dx, dy):<EOL><INDENT>if key=='<STR_LIT>':<EOL><INDENT>if(abs(dx)>abs(dy)):<EOL><INDENT>dy = dx<EOL><DEDENT>else:<EOL><INDENT>dx = dy<EOL><DEDENT><DEDENT>elif key=='<STR_LIT:x>':<EOL><INDENT>dy = <NUM_LIT:0><EOL><DEDENT>elif key=='<STR_LIT:y>':<EOL><INDENT>dx = <NUM_LIT:0><EOL><DEDENT>elif key=='<STR_LIT>':<EOL><INDENT>if <NUM_LIT:2>*abs(dx) < abs(dy):<EOL><INDENT>dx=<NUM_LIT:0><EOL><DEDENT>elif <NUM_LIT:2>*abs(dy) < abs(dx):<EOL><INDENT>dy=<NUM_LIT:0><EOL><DEDENT>elif(abs(dx)>abs(dy)):<EOL><INDENT>dy=dy/abs(dy)*abs(dx)<EOL><DEDENT>else:<EOL><INDENT>dx=dx/abs(dx)*abs(dy)<EOL><DEDENT><DEDENT>return (dx,dy)<EOL><DEDENT>p = self._pan_start<EOL>dx = x - p.x<EOL>dy = y - p.y<EOL>if dx == <NUM_LIT:0> and dy == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>if button == <NUM_LIT:1>:<EOL><INDENT>dx, dy = format_deltas(key, dx, dy)<EOL>result = p.bbox.translated(-dx, -dy).transformed(p.trans_inverse)<EOL><DEDENT>elif button == <NUM_LIT:3>:<EOL><INDENT>try:<EOL><INDENT>dx = -dx / float(self.bbox.width)<EOL>dy = -dy / float(self.bbox.height)<EOL>dx, dy = format_deltas(key, dx, dy)<EOL>if self.get_aspect() != '<STR_LIT>':<EOL><INDENT>dx = <NUM_LIT:0.5> * (dx + dy)<EOL>dy = dx<EOL><DEDENT>alpha = np.power(<NUM_LIT>, (dx, dy))<EOL>start = p.trans_inverse.transform_point((p.x, p.y))<EOL>lim_points = p.lim.get_points()<EOL>result = start + alpha * (lim_points - start)<EOL>result = mtransforms.Bbox(result)<EOL><DEDENT>except OverflowError:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL>return<EOL><DEDENT><DEDENT>self.set_xlim(*result.intervalx)<EOL>self.set_ylim(*result.intervaly)<EOL>", "docstring": "Called when the mouse moves during a pan operation.\n\n*button* is the mouse button number:\n\n* 1: LEFT\n* 2: MIDDLE\n* 3: RIGHT\n\n*key* is a \"shift\" key\n\n*x*, *y* are the mouse coordinates in display coords.\n\n.. note::\n    Intended to be overridden by new projection types.", "id": "f17238:c1:m115"}
{"signature": "def pie(self, x, explode=None, labels=None, colors=None,<EOL>autopct=None, pctdistance=<NUM_LIT>, shadow=False,<EOL>labeldistance=<NUM_LIT>):", "body": "self.set_frame_on(False)<EOL>x = np.asarray(x).astype(np.float32)<EOL>sx = float(x.sum())<EOL>if sx><NUM_LIT:1>: x = np.divide(x,sx)<EOL>if labels is None: labels = ['<STR_LIT>']*len(x)<EOL>if explode is None: explode = [<NUM_LIT:0>]*len(x)<EOL>assert(len(x)==len(labels))<EOL>assert(len(x)==len(explode))<EOL>if colors is None: colors = ('<STR_LIT:b>', '<STR_LIT:g>', '<STR_LIT:r>', '<STR_LIT:c>', '<STR_LIT:m>', '<STR_LIT:y>', '<STR_LIT:k>', '<STR_LIT:w>')<EOL>center = <NUM_LIT:0>,<NUM_LIT:0><EOL>radius = <NUM_LIT:1><EOL>theta1 = <NUM_LIT:0><EOL>i = <NUM_LIT:0><EOL>texts = []<EOL>slices = []<EOL>autotexts = []<EOL>for frac, label, expl in cbook.safezip(x,labels, explode):<EOL><INDENT>x, y = center<EOL>theta2 = theta1 + frac<EOL>thetam = <NUM_LIT:2>*math.pi*<NUM_LIT:0.5>*(theta1+theta2)<EOL>x += expl*math.cos(thetam)<EOL>y += expl*math.sin(thetam)<EOL>w = mpatches.Wedge((x,y), radius, <NUM_LIT>*theta1, <NUM_LIT>*theta2,<EOL>facecolor=colors[i%len(colors)])<EOL>slices.append(w)<EOL>self.add_patch(w)<EOL>w.set_label(label)<EOL>if shadow:<EOL><INDENT>shad = mpatches.Shadow(w, -<NUM_LIT>, -<NUM_LIT>,<EOL>)<EOL>shad.set_zorder(<NUM_LIT>*w.get_zorder())<EOL>self.add_patch(shad)<EOL><DEDENT>xt = x + labeldistance*radius*math.cos(thetam)<EOL>yt = y + labeldistance*radius*math.sin(thetam)<EOL>label_alignment = xt > <NUM_LIT:0> and '<STR_LIT:left>' or '<STR_LIT:right>'<EOL>t = self.text(xt, yt, label,<EOL>size=rcParams['<STR_LIT>'],<EOL>horizontalalignment=label_alignment,<EOL>verticalalignment='<STR_LIT>')<EOL>texts.append(t)<EOL>if autopct is not None:<EOL><INDENT>xt = x + pctdistance*radius*math.cos(thetam)<EOL>yt = y + pctdistance*radius*math.sin(thetam)<EOL>if is_string_like(autopct):<EOL><INDENT>s = autopct%(<NUM_LIT>*frac)<EOL><DEDENT>elif callable(autopct):<EOL><INDENT>s = autopct(<NUM_LIT>*frac)<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>')<EOL><DEDENT>t = self.text(xt, yt, s,<EOL>horizontalalignment='<STR_LIT>',<EOL>verticalalignment='<STR_LIT>')<EOL>autotexts.append(t)<EOL><DEDENT>theta1 = theta2<EOL>i += <NUM_LIT:1><EOL><DEDENT>self.set_xlim((-<NUM_LIT>, <NUM_LIT>))<EOL>self.set_ylim((-<NUM_LIT>, <NUM_LIT>))<EOL>self.set_xticks([])<EOL>self.set_yticks([])<EOL>if autopct is None: return slices, texts<EOL>else: return slices, texts, autotexts<EOL>", "docstring": "r\"\"\"\n        call signature::\n\n          pie(x, explode=None, labels=None,\n              colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),\n              autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)\n\n        Make a pie chart of array *x*.  The fractional area of each\n        wedge is given by x/sum(x).  If sum(x) <= 1, then the values\n        of x give the fractional area directly and the array will not\n        be normalized.\n\n        Keyword arguments:\n\n          *explode*: [ None | len(x) sequence ]\n            If not *None*, is a len(*x*) array which specifies the\n            fraction of the radius with which to offset each wedge.\n\n          *colors*: [ None | color sequence ]\n            A sequence of matplotlib color args through which the pie chart\n            will cycle.\n\n          *labels*: [ None | len(x) sequence of strings ]\n            A sequence of strings providing the labels for each wedge\n\n          *autopct*: [ None | format string | format function ]\n            If not *None*, is a string or function used to label the\n            wedges with their numeric value.  The label will be placed inside\n            the wedge.  If it is a format string, the label will be ``fmt%pct``.\n            If it is a function, it will be called.\n\n          *pctdistance*: scalar\n            The ratio between the center of each pie slice and the\n            start of the text generated by *autopct*.  Ignored if\n            *autopct* is *None*; default is 0.6.\n\n          *labeldistance*: scalar\n            The radial distance at which the pie labels are drawn\n\n          *shadow*: [ False | True ]\n            Draw a shadow beneath the pie.\n\n        The pie chart will probably look best if the figure and axes are\n        square.  Eg.::\n\n          figure(figsize=(8,8))\n          ax = axes([0.1, 0.1, 0.8, 0.8])\n\n        Return value:\n          If *autopct* is None, return the tuple (*patches*, *texts*):\n\n            - *patches* is a sequence of\n              :class:`matplotlib.patches.Wedge` instances\n\n            - *texts* is a list of the label\n              :class:`matplotlib.text.Text` instances.\n\n          If *autopct* is not *None*, return the tuple (*patches*,\n          *texts*, *autotexts*), where *patches* and *texts* are as\n          above, and *autotexts* is a list of\n          :class:`~matplotlib.text.Text` instances for the numeric\n          labels.", "id": "f17238:c1:m151"}
{"signature": "def get_cursor_props(self):", "body": "return self._cursorProps<EOL>", "docstring": "return the cursor propertiess as a (*linewidth*, *color*)\ntuple, where *linewidth* is a float and *color* is an RGBA\ntuple", "id": "f17238:c1:m116"}
{"signature": "def set_axis_on(self):", "body": "self.axison = True<EOL>", "docstring": "turn on the axis", "id": "f17238:c1:m72"}
{"signature": "def __init__(self, fig, *args, **kwargs):", "body": "self.figure = fig<EOL>if len(args)==<NUM_LIT:1>:<EOL><INDENT>s = str(args[<NUM_LIT:0>])<EOL>if len(s) != <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>rows, cols, num = list(map(int, s))<EOL><DEDENT>elif len(args)==<NUM_LIT:3>:<EOL><INDENT>rows, cols, num = args<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(  '<STR_LIT>')<EOL><DEDENT>total = rows*cols<EOL>num -= <NUM_LIT:1>    <EOL>if num >= total:<EOL><INDENT>raise ValueError( '<STR_LIT>')<EOL><DEDENT>self._rows = rows<EOL>self._cols = cols<EOL>self._num = num<EOL>self.update_params()<EOL>self._axes_class.__init__(self, fig, self.figbox, **kwargs)<EOL>", "docstring": "*fig* is a :class:`matplotlib.figure.Figure` instance.\n\n*args* is the tuple (*numRows*, *numCols*, *plotNum*), where\nthe array of subplots in the figure has dimensions *numRows*,\n*numCols*, and where *plotNum* is the number of the subplot\nbeing created.  *plotNum* starts at 1 in the upper left\ncorner and increases to the right.\n\nIf *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the\ndecimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.", "id": "f17238:c2:m0"}
{"signature": "def get_shared_x_axes(self):", "body": "return self._shared_x_axes<EOL>", "docstring": "Return a copy of the shared axes Grouper object for x axes", "id": "f17238:c1:m173"}
{"signature": "def _init_axis(self):", "body": "self.xaxis = maxis.XAxis(self)<EOL>self.yaxis = maxis.YAxis(self)<EOL>self._update_transScale()<EOL>", "docstring": "move this out of __init__ because non-separable axes don't use it", "id": "f17238:c1:m3"}
{"signature": "def get_position(self, original=False):", "body": "if original:<EOL><INDENT>return self._originalPosition.frozen()<EOL><DEDENT>else:<EOL><INDENT>return self._position.frozen()<EOL><DEDENT>", "docstring": "Return the a copy of the axes rectangle as a Bbox", "id": "f17238:c1:m13"}
{"signature": "def plot(self, *args, **kwargs):", "body": "scalex = kwargs.pop( '<STR_LIT>', True)<EOL>scaley = kwargs.pop( '<STR_LIT>', True)<EOL>if not self._hold: self.cla()<EOL>lines = []<EOL>for line in self._get_lines(*args, **kwargs):<EOL><INDENT>self.add_line(line)<EOL>lines.append(line)<EOL><DEDENT>self.autoscale_view(scalex=scalex, scaley=scaley)<EOL>return lines<EOL>", "docstring": "Plot lines and/or markers to the\n:class:`~matplotlib.axes.Axes`.  *args* is a variable length\nargument, allowing for multiple *x*, *y* pairs with an\noptional format string.  For example, each of the following is\nlegal::\n\n    plot(x, y)         # plot x and y using default line style and color\n    plot(x, y, 'bo')   # plot x and y using blue circle markers\n    plot(y)            # plot y using x as index array 0..N-1\n    plot(y, 'r+')      # ditto, but with red plusses\n\nIf *x* and/or *y* is 2-dimensional, then the corresponding columns\nwill be plotted.\n\nAn arbitrary number of *x*, *y*, *fmt* groups can be\nspecified, as in::\n\n    a.plot(x1, y1, 'g^', x2, y2, 'g-')\n\nReturn value is a list of lines that were added.\n\nThe following format string characters are accepted to control\nthe line style or marker:\n\n================    ===============================\ncharacter           description\n================    ===============================\n'-'                 solid line style\n'--'                dashed line style\n'-.'                dash-dot line style\n':'                 dotted line style\n'.'                 point marker\n','                 pixel marker\n'o'                 circle marker\n'v'                 triangle_down marker\n'^'                 triangle_up marker\n'<'                 triangle_left marker\n'>'                 triangle_right marker\n'1'                 tri_down marker\n'2'                 tri_up marker\n'3'                 tri_left marker\n'4'                 tri_right marker\n's'                 square marker\n'p'                 pentagon marker\n'*'                 star marker\n'h'                 hexagon1 marker\n'H'                 hexagon2 marker\n'+'                 plus marker\n'x'                 x marker\n'D'                 diamond marker\n'd'                 thin_diamond marker\n'|'                 vline marker\n'_'                 hline marker\n================    ===============================\n\n\nThe following color abbreviations are supported:\n\n==========  ========\ncharacter   color\n==========  ========\n'b'         blue\n'g'         green\n'r'         red\n'c'         cyan\n'm'         magenta\n'y'         yellow\n'k'         black\n'w'         white\n==========  ========\n\nIn addition, you can specify colors in many weird and\nwonderful ways, including full names (``'green'``), hex\nstrings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or\ngrayscale intensities as a string (``'0.8'``).  Of these, the\nstring specifications can be used in place of a ``fmt`` group,\nbut the tuple forms can be used only as ``kwargs``.\n\nLine styles and colors are combined in a single format string, as in\n``'bo'`` for blue circles.\n\nThe *kwargs* can be used to set line properties (any property that has\na ``set_*`` method).  You can use this to set a line label (for auto\nlegends), linewidth, anitialising, marker face color, etc.  Here is an\nexample::\n\n    plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)\n    plot([1,2,3], [1,4,9], 'rs',  label='line 2')\n    axis([0, 4, 0, 10])\n    legend()\n\nIf you make multiple lines with one plot command, the kwargs\napply to all those lines, e.g.::\n\n    plot(x1, y1, x2, y2, antialised=False)\n\nNeither line will be antialiased.\n\nYou do not need to use format strings, which are just\nabbreviations.  All of the line properties can be controlled\nby keyword arguments.  For example, you can set the color,\nmarker, linestyle, and markercolor with::\n\n    plot(x, y, color='green', linestyle='dashed', marker='o',\n         markerfacecolor='blue', markersize=12).  See\n         :class:`~matplotlib.lines.Line2D` for details.\n\nThe kwargs are :class:`~matplotlib.lines.Line2D` properties:\n\n%(Line2D)s\n\nkwargs *scalex* and *scaley*, if defined, are passed on to\n:meth:`~matplotlib.axes.Axes.autoscale_view` to determine\nwhether the *x* and *y* axes are autoscaled; the default is\n*True*.", "id": "f17238:c1:m138"}
{"signature": "def __init__(self, base, offset):", "body": "self._base = base<EOL>self.offset = offset<EOL>", "docstring": "place ticks on the i-th data points where (i-offset)%base==0", "id": "f17239:c12:m0"}
{"signature": "def __init__(self, seq):", "body": "self.seq = seq<EOL>self.offset_string = '<STR_LIT>'<EOL>", "docstring": "seq is a sequence of strings.  For positions `i<len(seq)` return\n*seq[i]* regardless of *x*.  Otherwise return ''", "id": "f17239:c3:m0"}
{"signature": "def decade_down(x, base=<NUM_LIT:10>):", "body": "lx = math.floor(math.log(x)/math.log(base))<EOL>return base**lx<EOL>", "docstring": "floor x to the nearest lower decade", "id": "f17239:m2"}
{"signature": "def __call__(self, x, pos=None):", "body": "return self.fmt % x<EOL>", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c5:m1"}
{"signature": "def __call__(self):", "body": "vmin, vmax = self.axis.get_view_interval()<EOL>if vmax<vmin:<EOL><INDENT>vmin, vmax = vmax, vmin<EOL><DEDENT>vmin = self._base.ge(vmin)<EOL>base = self._base.get_base()<EOL>n = (vmax - vmin + <NUM_LIT>*base)//base<EOL>locs = vmin + np.arange(n+<NUM_LIT:1>) * base<EOL>return locs<EOL>", "docstring": "Return the locations of the ticks", "id": "f17239:c17:m1"}
{"signature": "def __call__(self):", "body": "dmin, dmax = self.axis.get_data_interval()<EOL>return np.arange(dmin + self.offset, dmax+<NUM_LIT:1>, self._base)<EOL>", "docstring": "Return the locations of the ticks", "id": "f17239:c12:m1"}
{"signature": "def fix_minus(self, s):", "body": "return s<EOL>", "docstring": "some classes may want to replace a hyphen for minus with the\nproper unicode symbol as described `here\n<http://sourceforge.net/tracker/index.php?func=detail&aid=1962574&group_id=80706&atid=560720>`_.\nThe default is to do nothing\n\nNote, if you use this method, eg in :meth`format_data` or\ncall, you probably don't want to use it for\n:meth:`format_data_short` since the toolbar uses this for\ninterative coord reporting and I doubt we can expect GUIs\nacross platforms will handle the unicode correctly.  So for\nnow the classes that override :meth:`fix_minus` should have an\nexplicit :meth:`format_data_short` method", "id": "f17239:c1:m5"}
{"signature": "def lt(self, x):", "body": "d,m = divmod(x, self._base)<EOL>if closeto(m,<NUM_LIT:0>) and not closeto(m/self._base,<NUM_LIT:1>):<EOL><INDENT>return (d-<NUM_LIT:1>)*self._base<EOL><DEDENT>return d*self._base<EOL>", "docstring": "return the largest multiple of base < x", "id": "f17239:c16:m1"}
{"signature": "def autoscale(self):", "body": "return self.view_limits(*self.axis.get_view_interval())<EOL>", "docstring": "autoscale the view limits", "id": "f17239:c11:m2"}
{"signature": "def pan(self, numsteps):", "body": "ticks = self()<EOL>numticks = len(ticks)<EOL>vmin, vmax = self.axis.get_view_interval()<EOL>vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = <NUM_LIT>)<EOL>if numticks><NUM_LIT:2>:<EOL><INDENT>step = numsteps*abs(ticks[<NUM_LIT:0>]-ticks[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>d = abs(vmax-vmin)<EOL>step = numsteps*d/<NUM_LIT><EOL><DEDENT>vmin += step<EOL>vmax += step<EOL>self.axis.set_view_interval(vmin, vmax, ignore=True)<EOL>", "docstring": "Pan numticks (can be positive or negative)", "id": "f17239:c11:m3"}
{"signature": "def label_minor(self,labelOnlyBase):", "body": "self.labelOnlyBase=labelOnlyBase<EOL>", "docstring": "switch on/off minor ticks labeling", "id": "f17239:c8:m2"}
{"signature": "def __call__(self):", "body": "self.refresh()<EOL>return self._locator()<EOL>", "docstring": "Return the locations of the ticks", "id": "f17239:c22:m1"}
{"signature": "def decade_up(x, base=<NUM_LIT:10>):", "body": "lx = math.ceil(math.log(x)/math.log(base))<EOL>return base**lx<EOL>", "docstring": "ceil x to the nearest higher decade", "id": "f17239:m3"}
{"signature": "def __init__(self, transform, subs=[<NUM_LIT:1.0>]):", "body": "self._transform = transform<EOL>self._subs = subs<EOL>self.numticks = <NUM_LIT:15><EOL>", "docstring": "place ticks on the location= base**i*subs[j]", "id": "f17239:c20:m0"}
{"signature": "def __call__(self, x, pos=None):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Return the format for tick val x at position pos; pos=None indicated unspecified", "id": "f17239:c1:m0"}
{"signature": "def __init__(self, numticks = None, presets=None):", "body": "self.numticks = numticks<EOL>if presets is None:<EOL><INDENT>self.presets = {}<EOL><DEDENT>else:<EOL><INDENT>self.presets = presets<EOL><DEDENT>", "docstring": "Use presets to set locs based on lom.  A dict mapping vmin, vmax->locs", "id": "f17239:c15:m0"}
{"signature": "def view_limits(self, vmin, vmax):", "body": "if vmax<vmin:<EOL><INDENT>vmin, vmax = vmax, vmin<EOL><DEDENT>if vmin==vmax:<EOL><INDENT>vmin-=<NUM_LIT:1><EOL>vmax+=<NUM_LIT:1><EOL><DEDENT>exponent, remainder = divmod(math.log10(vmax - vmin), <NUM_LIT:1>)<EOL>if remainder < <NUM_LIT:0.5>:<EOL><INDENT>exponent -= <NUM_LIT:1><EOL><DEDENT>scale = <NUM_LIT:10>**(-exponent)<EOL>vmin = math.floor(scale*vmin)/scale<EOL>vmax = math.ceil(scale*vmax)/scale<EOL>return mtransforms.nonsingular(vmin, vmax)<EOL>", "docstring": "Try to choose the view limits intelligently", "id": "f17239:c15:m3"}
{"signature": "def __call__(self, x, pos=None):", "body": "if len(self.locs)==<NUM_LIT:0>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>s = self.pprint_val(x)<EOL>return self.fix_minus(s)<EOL><DEDENT>", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c7:m2"}
{"signature": "def __call__(self, x, pos=None):", "body": "b = self._base<EOL>if x == <NUM_LIT:0>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>sign = np.sign(x)<EOL>fx = math.log(abs(x))/math.log(b)<EOL>isDecade = self.is_decade(fx)<EOL>usetex = rcParams['<STR_LIT>']<EOL>if sign == -<NUM_LIT:1>:<EOL><INDENT>sign_string = '<STR_LIT:->'<EOL><DEDENT>else:<EOL><INDENT>sign_string = '<STR_LIT>'<EOL><DEDENT>if not isDecade and self.labelOnlyBase: s = '<STR_LIT>'<EOL>elif not isDecade:<EOL><INDENT>if usetex:<EOL><INDENT>s = r'<STR_LIT>'% (sign_string, b, fx)<EOL><DEDENT>else:<EOL><INDENT>s = '<STR_LIT>'% (sign_string, b, fx)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if usetex:<EOL><INDENT>s = r'<STR_LIT>'% (sign_string, b, self.nearest_long(fx))<EOL><DEDENT>else:<EOL><INDENT>s = r'<STR_LIT>'% (sign_string, b, self.nearest_long(fx))<EOL><DEDENT><DEDENT>return s<EOL>", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c10:m0"}
{"signature": "def set_locs(self, locs):", "body": "self.locs = locs<EOL>if len(self.locs) > <NUM_LIT:0>:<EOL><INDENT>vmin, vmax = self.axis.get_view_interval()<EOL>d = abs(vmax-vmin)<EOL>if self._useOffset: self._set_offset(d)<EOL>self._set_orderOfMagnitude(d)<EOL>self._set_format()<EOL><DEDENT>", "docstring": "set the locations of the ticks", "id": "f17239:c7:m8"}
{"signature": "def __call__(self):", "body": "if self.nbins is None:<EOL><INDENT>return self.locs<EOL><DEDENT>step = max(int(<NUM_LIT> + len(self.locs) / float(self.nbins)), <NUM_LIT:1>)<EOL>return self.locs[::step]<EOL>", "docstring": "Return the locations of the ticks", "id": "f17239:c13:m1"}
{"signature": "def __call__(self):", "body": "vmin, vmax = self.axis.get_view_interval()<EOL>vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander = <NUM_LIT>)<EOL>if vmax<vmin:<EOL><INDENT>vmin, vmax = vmax, vmin<EOL><DEDENT>if (vmin, vmax) in self.presets:<EOL><INDENT>return self.presets[(vmin, vmax)]<EOL><DEDENT>if self.numticks is None:<EOL><INDENT>self._set_numticks()<EOL><DEDENT>if self.numticks==<NUM_LIT:0>: return []<EOL>ticklocs = np.linspace(vmin, vmax, self.numticks)<EOL>return ticklocs<EOL>", "docstring": "Return the locations of the ticks", "id": "f17239:c15:m1"}
{"signature": "def get_locator(self, d):", "body": "d = abs(d)<EOL>if d<=<NUM_LIT:0>:<EOL><INDENT>locator = MultipleLocator(<NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>try: ld = math.log10(d)<EOL>except OverflowError:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>fld = math.floor(ld)<EOL>base = <NUM_LIT:10>**fld<EOL>if   d >= <NUM_LIT:5>*base : ticksize = base<EOL>elif d >= <NUM_LIT:2>*base : ticksize = base/<NUM_LIT><EOL>else             : ticksize = base/<NUM_LIT><EOL>locator = MultipleLocator(ticksize)<EOL><DEDENT>return locator<EOL>", "docstring": "pick the best locator based on a distance", "id": "f17239:c22:m4"}
{"signature": "def format_data_short(self,value):", "body": "return self.format_data(value)<EOL>", "docstring": "return a short string version", "id": "f17239:c1:m2"}
{"signature": "def __call__(self, x, pos=None):", "body": "xmin, xmax = self.axis.get_view_interval()<EOL>d = abs(xmax - xmin)<EOL>return self.pprint_val(x,d)<EOL>", "docstring": "Return the format for tick val *x* at position *pos*", "id": "f17239:c6:m0"}
{"signature": "def __call__(self):", "body": "b=self._base<EOL>vmin, vmax = self.axis.get_view_interval()<EOL>if vmin <= <NUM_LIT:0.0>:<EOL><INDENT>vmin = self.axis.get_minpos()<EOL>if vmin <= <NUM_LIT:0.0>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>vmin = math.log(vmin)/math.log(b)<EOL>vmax = math.log(vmax)/math.log(b)<EOL>if vmax<vmin:<EOL><INDENT>vmin, vmax = vmax, vmin<EOL><DEDENT>numdec = math.floor(vmax)-math.ceil(vmin)<EOL>if self._subs is None: <EOL><INDENT>if numdec><NUM_LIT:10>: subs = np.array([<NUM_LIT:1.0>])<EOL>elif numdec><NUM_LIT:6>: subs = np.arange(<NUM_LIT>, b, <NUM_LIT>)<EOL>else: subs = np.arange(<NUM_LIT>, b)<EOL><DEDENT>else:<EOL><INDENT>subs = self._subs<EOL><DEDENT>stride = <NUM_LIT:1><EOL>while numdec/stride+<NUM_LIT:1> > self.numticks:<EOL><INDENT>stride += <NUM_LIT:1><EOL><DEDENT>decades = np.arange(math.floor(vmin),<EOL>math.ceil(vmax)+stride, stride)<EOL>if len(subs) > <NUM_LIT:1> or (len(subs == <NUM_LIT:1>) and subs[<NUM_LIT:0>] != <NUM_LIT:1.0>):<EOL><INDENT>ticklocs = []<EOL>for decadeStart in b**decades:<EOL><INDENT>ticklocs.extend( subs*decadeStart )<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ticklocs = b**decades<EOL><DEDENT>return np.array(ticklocs)<EOL>", "docstring": "Return the locations of the ticks", "id": "f17239:c19:m4"}
{"signature": "def __call__(self):", "body": "raise NotImplementedError('<STR_LIT>')<EOL>", "docstring": "Return the locations of the ticks", "id": "f17239:c11:m0"}
{"signature": "def ge(self, x):", "body": "d,m = divmod(x, self._base)<EOL>if closeto(m,<NUM_LIT:0>) and not closeto(m/self._base,<NUM_LIT:1>):<EOL><INDENT>return d*self._base<EOL><DEDENT>return (d+<NUM_LIT:1>)*self._base<EOL>", "docstring": "return the smallest multiple of base >= x", "id": "f17239:c16:m4"}
{"signature": "def get_intersection(cx1, cy1, cos_t1, sin_t1,<EOL>cx2, cy2, cos_t2, sin_t2):", "body": "<EOL>line1_rhs = sin_t1 * cx1 - cos_t1 * cy1<EOL>line2_rhs = sin_t2 * cx2 - cos_t2 * cy2<EOL>a, b = sin_t1, -cos_t1<EOL>c, d = sin_t2, -cos_t2<EOL>ad_bc = a*d-b*c<EOL>if ad_bc == <NUM_LIT:0.>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>a_, b_ = d, -b<EOL>c_, d_ = -c, a<EOL>a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]<EOL>x = a_* line1_rhs + b_ * line2_rhs<EOL>y = c_* line1_rhs + d_ * line2_rhs<EOL>return x, y<EOL>", "docstring": "return a intersecting point between a line through (cx1, cy1)\n    and having angle t1 and a line through (cx2, cy2) and angle t2.", "id": "f17240:m0"}
{"signature": "def make_wedged_bezier2(bezier2, length, shrink_factor=<NUM_LIT:0.5>):", "body": "xx1, yy1 = bezier2[<NUM_LIT:2>]<EOL>xx2, yy2 = bezier2[<NUM_LIT:1>]<EOL>xx3, yy3 = bezier2[<NUM_LIT:0>]<EOL>cx, cy = xx3, yy3<EOL>x0, y0 = xx2, yy2<EOL>dist = sqrt((x0-cx)**<NUM_LIT:2> + (y0-cy)**<NUM_LIT:2>)<EOL>cos_t, sin_t = (x0-cx)/dist, (y0-cy)/dist,<EOL>x1, y1, x2, y2 = get_normal_points(cx, cy, cos_t, sin_t, length)<EOL>xx12, yy12 = (xx1+xx2)/<NUM_LIT>, (yy1+yy2)/<NUM_LIT>, <EOL>xx23, yy23 = (xx2+xx3)/<NUM_LIT>, (yy2+yy3)/<NUM_LIT>, <EOL>dist = sqrt((xx12-xx23)**<NUM_LIT:2> + (yy12-yy23)**<NUM_LIT:2>)<EOL>cos_t, sin_t = (xx12-xx23)/dist, (yy12-yy23)/dist,<EOL>xm1, ym1, xm2, ym2 = get_normal_points(xx2, yy2, cos_t, sin_t, length*shrink_factor)<EOL>l_plus = [(x1, y1), (xm1, ym1), (xx1, yy1)]<EOL>l_minus = [(x2, y2), (xm2, ym2), (xx1, yy1)]<EOL>return l_plus, l_minus<EOL>", "docstring": "Being similar to get_parallels, returns\ncontrol points of two quadrativ bezier lines having a width roughly parralel to given\none separated by *width*.", "id": "f17240:m11"}
{"signature": "def point_at_t(self, t):", "body": "one_minus_t_powers = np.power(<NUM_LIT:1.>-t, self._orders)[::-<NUM_LIT:1>]<EOL>t_powers = np.power(t, self._orders)<EOL>tt = one_minus_t_powers * t_powers<EOL>_x = sum(tt * self._px)<EOL>_y = sum(tt * self._py)<EOL>return _x, _y<EOL>", "docstring": "evaluate a point at t", "id": "f17240:c0:m1"}
{"signature": "def get_parallels(bezier2, width):", "body": "<EOL>c1x, c1y = bezier2[<NUM_LIT:0>]<EOL>cmx, cmy = bezier2[<NUM_LIT:1>]<EOL>c2x, c2y = bezier2[<NUM_LIT:2>]<EOL>cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)<EOL>cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)<EOL>c1x_left, c1y_left, c1x_right, c1y_right =get_normal_points(c1x, c1y, cos_t1, sin_t1, width)<EOL>c2x_left, c2y_left, c2x_right, c2y_right =get_normal_points(c2x, c2y, cos_t2, sin_t2, width)<EOL>cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,<EOL>c2x_left, c2y_left, cos_t2, sin_t2)<EOL>cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,<EOL>c2x_right, c2y_right, cos_t2, sin_t2)<EOL>path_left = [(c1x_left, c1y_left), (cmx_left, cmy_left), (c2x_left, c2y_left)]<EOL>path_right = [(c1x_right, c1y_right), (cmx_right, cmy_right), (c2x_right, c2y_right)]<EOL>return path_left, path_right<EOL>", "docstring": "Given the quadraitc bezier control points *bezier2*, returns\ncontrol points of quadrativ bezier lines roughly parralel to given\none separated by *width*.", "id": "f17240:m10"}
{"signature": "def make_wedged_bezier2(bezier2, width, w1=<NUM_LIT:1.>, wm=<NUM_LIT:0.5>, w2=<NUM_LIT:0.>):", "body": "<EOL>c1x, c1y = bezier2[<NUM_LIT:0>]<EOL>cmx, cmy = bezier2[<NUM_LIT:1>]<EOL>c3x, c3y = bezier2[<NUM_LIT:2>]<EOL>cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)<EOL>cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)<EOL>c1x_left, c1y_left, c1x_right, c1y_right =get_normal_points(c1x, c1y, cos_t1, sin_t1, width*w1)<EOL>c3x_left, c3y_left, c3x_right, c3y_right =get_normal_points(c3x, c3y, cos_t2, sin_t2, width*w2)<EOL>c12x, c12y = (c1x+cmx)*<NUM_LIT>, (c1y+cmy)*<NUM_LIT><EOL>c23x, c23y = (cmx+c3x)*<NUM_LIT>, (cmy+c3y)*<NUM_LIT> <EOL>c123x, c123y = (c12x+c23x)*<NUM_LIT>, (c12y+c23y)*<NUM_LIT><EOL>cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)<EOL>c123x_left, c123y_left, c123x_right, c123y_right =get_normal_points(c123x, c123y, cos_t123, sin_t123, width*wm)<EOL>path_left = find_control_points(c1x_left, c1y_left,<EOL>c123x_left, c123y_left,<EOL>c3x_left, c3y_left)<EOL>path_right = find_control_points(c1x_right, c1y_right,<EOL>c123x_right, c123y_right,<EOL>c3x_right, c3y_right)<EOL>return path_left, path_right<EOL>", "docstring": "Being similar to get_parallels, returns\ncontrol points of two quadrativ bezier lines having a width roughly parralel to given\none separated by *width*.", "id": "f17240:m13"}
{"signature": "def split_bezier_intersecting_with_closedpath(bezier,<EOL>inside_closedpath, <EOL>tolerence=<NUM_LIT>):", "body": "bz = BezierSegment(bezier)<EOL>bezier_point_at_t = bz.point_at_t<EOL>t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,<EOL>inside_closedpath, <EOL>tolerence=tolerence)<EOL>_left, _right = split_de_casteljau(bezier, (t0+t1)/<NUM_LIT>)<EOL>return _left, _right<EOL>", "docstring": "bezier : control points of the bezier segment\ninside_closedpath : a function which returns true if the point is inside the path", "id": "f17240:m5"}
{"signature": "def __init__(self, control_points):", "body": "_o = len(control_points)<EOL>self._orders = np.arange(_o)<EOL>_coeff = BezierSegment._binom_coeff[_o - <NUM_LIT:1>]<EOL>_control_points = np.asarray(control_points)<EOL>xx = _control_points[:,<NUM_LIT:0>]<EOL>yy = _control_points[:,<NUM_LIT:1>]<EOL>self._px = xx * _coeff<EOL>self._py = yy * _coeff<EOL>", "docstring": "*control_points* : location of contol points. It needs have a\n shpae of n * 2, where n is the order of the bezier line. 1<=\n n <= 3 is supported.", "id": "f17240:c0:m0"}
{"signature": "def get_normal_points(cx, cy, cos_t, sin_t, length):", "body": "if length == <NUM_LIT:0.>:<EOL><INDENT>return cx, cy, cx, cy<EOL><DEDENT>cos_t1, sin_t1 = sin_t, -cos_t<EOL>cos_t2, sin_t2 = -sin_t, cos_t<EOL>x1, y1 = length*cos_t1 + cx, length*sin_t1 + cy<EOL>x2, y2 = length*cos_t2 + cx, length*sin_t2 + cy<EOL>return x1, y1, x2, y2<EOL>", "docstring": "For a line passing through (*cx*, *cy*) and having a angle *t*,\nreturn locations of the two points located along its perpendicular line at the distance of *length*.", "id": "f17240:m1"}
{"signature": "def set_over(self, color = '<STR_LIT:k>', alpha = <NUM_LIT:1.0>):", "body": "self._rgba_over = colorConverter.to_rgba(color, alpha)<EOL>if self._isinit: self._set_extremes()<EOL>", "docstring": "Set color to be used for high out-of-range values.\n           Requires norm.clip = False", "id": "f17241:c1:m4"}
{"signature": "def is_color_like(c):", "body": "try:<EOL><INDENT>colorConverter.to_rgb(c)<EOL>return True<EOL><DEDENT>except ValueError:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Return *True* if *c* can be converted to *RGB*", "id": "f17241:m0"}
{"signature": "def __init__(self, colors, name = '<STR_LIT>', N = None):", "body": "self.colors = colors<EOL>self.monochrome = False  <EOL>if N is None:<EOL><INDENT>N = len(self.colors)<EOL><DEDENT>else:<EOL><INDENT>if cbook.is_string_like(self.colors):<EOL><INDENT>self.colors = [self.colors] * N<EOL>self.monochrome = True<EOL><DEDENT>elif cbook.iterable(self.colors):<EOL><INDENT>self.colors = list(self.colors) <EOL>if len(self.colors) == <NUM_LIT:1>:<EOL><INDENT>self.monochrome = True<EOL><DEDENT>if len(self.colors) < N:<EOL><INDENT>self.colors = list(self.colors) * N<EOL><DEDENT>del(self.colors[N:])<EOL><DEDENT>else:<EOL><INDENT>try: gray = float(self.colors)<EOL>except TypeError: pass<EOL>else:  self.colors = [gray] * N<EOL>self.monochrome = True<EOL><DEDENT><DEDENT>Colormap.__init__(self, name, N)<EOL>", "docstring": "Make a colormap from a list of colors.\n\n*colors*\n    a list of matplotlib color specifications,\n    or an equivalent Nx3 floating point array (*N* rgb values)\n*name*\n    a string to identify the colormap\n*N*\n    the number of entries in the map.  The default is *None*,\n    in which case there is one colormap entry for each\n    element in the list of colors.  If::\n\n        N < len(colors)\n\n    the list will be truncated at *N*.  If::\n\n        N > len(colors)\n\n    the list will be extended by repetition.", "id": "f17241:c3:m0"}
{"signature": "def to_rgba(self, arg, alpha=None):", "body": "try:<EOL><INDENT>if not cbook.is_string_like(arg) and cbook.iterable(arg):<EOL><INDENT>if len(arg) == <NUM_LIT:4>:<EOL><INDENT>if [x for x in arg if (float(x) < <NUM_LIT:0>) or  (x > <NUM_LIT:1>)]:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if alpha is None:<EOL><INDENT>return tuple(arg)<EOL><DEDENT>if alpha < <NUM_LIT:0.0> or alpha > <NUM_LIT:1.0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>return arg[<NUM_LIT:0>], arg[<NUM_LIT:1>], arg[<NUM_LIT:2>], arg[<NUM_LIT:3>] * alpha<EOL><DEDENT>r,g,b = arg[:<NUM_LIT:3>]<EOL>if [x for x in (r,g,b) if (float(x) < <NUM_LIT:0>) or  (x > <NUM_LIT:1>)]:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>r,g,b = self.to_rgb(arg)<EOL><DEDENT>if alpha is None:<EOL><INDENT>alpha = <NUM_LIT:1.0><EOL><DEDENT>return r,g,b,alpha<EOL><DEDENT>except (TypeError, ValueError) as exc:<EOL><INDENT>raise ValueError('<STR_LIT>' % (str(arg), exc))<EOL><DEDENT>", "docstring": "Returns an *RGBA* tuple of four floats from 0-1.\n\nFor acceptable values of *arg*, see :meth:`to_rgb`.\nIf *arg* is an *RGBA* sequence and *alpha* is not *None*,\n*alpha* will replace the original *A*.", "id": "f17241:c0:m1"}
{"signature": "def __init__(self, name, N=<NUM_LIT>):", "body": "self.name = name<EOL>self.N = N<EOL>self._rgba_bad = (<NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>, <NUM_LIT:0.0>) <EOL>self._rgba_under = None<EOL>self._rgba_over = None<EOL>self._i_under = N<EOL>self._i_over = N+<NUM_LIT:1><EOL>self._i_bad = N+<NUM_LIT:2><EOL>self._isinit = False<EOL>", "docstring": "Public class attributes:\n    :attr:`N` : number of rgb quantization levels\n    :attr:`name` : name of colormap", "id": "f17241:c1:m0"}
{"signature": "def to_rgb(self, arg):", "body": "try: return self.cache[arg]<EOL>except KeyError: pass<EOL>except TypeError: <EOL><INDENT>arg = tuple(arg)<EOL>try: return self.cache[arg]<EOL>except KeyError: pass<EOL>except TypeError:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'<EOL>% (str(arg),))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>if cbook.is_string_like(arg):<EOL><INDENT>color = self.colors.get(arg, None)<EOL>if color is None:<EOL><INDENT>str1 = cnames.get(arg, arg)<EOL>if str1.startswith('<STR_LIT:#>'):<EOL><INDENT>color = hex2color(str1)<EOL><DEDENT>else:<EOL><INDENT>fl = float(arg)<EOL>if fl < <NUM_LIT:0> or fl > <NUM_LIT:1>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>color = tuple([fl]*<NUM_LIT:3>)<EOL><DEDENT><DEDENT><DEDENT>elif cbook.iterable(arg):<EOL><INDENT>if len(arg) > <NUM_LIT:4> or len(arg) < <NUM_LIT:3>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'%len(arg))<EOL><DEDENT>color = tuple(arg[:<NUM_LIT:3>])<EOL>if [x for x in color if (float(x) < <NUM_LIT:0>) or  (x > <NUM_LIT:1>)]:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self.cache[arg] = color<EOL><DEDENT>except (KeyError, ValueError, TypeError) as exc:<EOL><INDENT>raise ValueError('<STR_LIT>' % (str(arg), exc))<EOL><DEDENT>return color<EOL>", "docstring": "Returns an *RGB* tuple of three floats from 0-1.\n\n*arg* can be an *RGB* or *RGBA* sequence or a string in any of\nseveral forms:\n\n    1) a letter from the set 'rgbcmykw'\n    2) a hex color string, like '#00FFFF'\n    3) a standard name, like 'aqua'\n    4) a float, like '0.4', indicating gray on a 0-1 scale\n\nif *arg* is *RGBA*, the *A* will simply be discarded.", "id": "f17241:c0:m0"}
{"signature": "def makeMappingArray(N, data):", "body": "try:<EOL><INDENT>adata = np.array(data)<EOL><DEDENT>except:<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>shape = adata.shape<EOL>if len(shape) != <NUM_LIT:2> and shape[<NUM_LIT:1>] != <NUM_LIT:3>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>x  = adata[:,<NUM_LIT:0>]<EOL>y0 = adata[:,<NUM_LIT:1>]<EOL>y1 = adata[:,<NUM_LIT:2>]<EOL>if x[<NUM_LIT:0>] != <NUM_LIT:0.> or x[-<NUM_LIT:1>] != <NUM_LIT:1.0>:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>if np.sometrue(np.sort(x)-x):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\")<EOL><DEDENT>x = x * (N-<NUM_LIT:1>)<EOL>lut = np.zeros((N,), np.float)<EOL>xind = np.arange(float(N))<EOL>ind = np.searchsorted(x, xind)[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL>lut[<NUM_LIT:1>:-<NUM_LIT:1>] = ( ((xind[<NUM_LIT:1>:-<NUM_LIT:1>] - x[ind-<NUM_LIT:1>]) / (x[ind] - x[ind-<NUM_LIT:1>]))<EOL>* (y0[ind] - y1[ind-<NUM_LIT:1>]) + y1[ind-<NUM_LIT:1>])<EOL>lut[<NUM_LIT:0>] = y1[<NUM_LIT:0>]<EOL>lut[-<NUM_LIT:1>] = y0[-<NUM_LIT:1>]<EOL>np.clip(lut, <NUM_LIT:0.0>, <NUM_LIT:1.0>)<EOL>return lut<EOL>", "docstring": "Create an *N* -element 1-d lookup table\n\n    *data* represented by a list of x,y0,y1 mapping correspondences.\n    Each element in this list represents how a value between 0 and 1\n    (inclusive) represented by x is mapped to a corresponding value\n    between 0 and 1 (inclusive). The two values of y are to allow\n    for discontinuous mapping functions (say as might be found in a\n    sawtooth) where y0 represents the value of y for values of x\n    <= to that given, and y1 is the value to be used for x > than\n    that given). The list must start with x=0, end with x=1, and\n    all values of x must be in increasing order. Values between\n    the given mapping points are determined by simple linear interpolation.\n\n    The function returns an array \"result\" where ``result[x*(N-1)]``\n    gives the closest value for values of x between 0 and 1.", "id": "f17241:m3"}
{"signature": "def get_extent(self, renderer):", "body": "return self.width, self.height, self.xdescent, self.ydescent<EOL>", "docstring": "Return with, height, xdescent, ydescent of box", "id": "f17242:c4:m6"}
{"signature": "def set_width(self, width):", "body": "self.width = width<EOL>", "docstring": "Set the width\n\naccepts float", "id": "f17242:c0:m4"}
{"signature": "def draw(self, renderer):", "body": "for c in self._children:<EOL><INDENT>c.draw(renderer)<EOL><DEDENT>bbox_artist(self, renderer, fill=False, props=dict(pad=<NUM_LIT:0.>))<EOL>", "docstring": "Draw the children", "id": "f17242:c4:m8"}
{"signature": "def set_offset(self, xy):", "body": "self._offset = xy<EOL>", "docstring": "Set the offset\n\naccepts x, y, tuple, or a callable object.", "id": "f17242:c0:m2"}
{"signature": "def get_offset(self):", "body": "return self._offset<EOL>", "docstring": "return offset of the container.", "id": "f17242:c5:m7"}
{"signature": "def get_minimumdescent(self):", "body": "return self._minimumdescent<EOL>", "docstring": "get minimumdescent.", "id": "f17242:c5:m4"}
{"signature": "def score_stretch(self, stretch1, stretch2):", "body": "try:<EOL><INDENT>stretchval1 = int(stretch1)<EOL><DEDENT>except ValueError:<EOL><INDENT>stretchval1 = stretch_dict.get(stretch1, <NUM_LIT>)<EOL><DEDENT>try:<EOL><INDENT>stretchval2 = int(stretch2)<EOL><DEDENT>except ValueError:<EOL><INDENT>stretchval2 = stretch_dict.get(stretch2, <NUM_LIT>)<EOL><DEDENT>return abs(stretchval1 - stretchval2) / <NUM_LIT><EOL>", "docstring": "Returns a match score between *stretch1* and *stretch2*.\n\nThe result is the absolute value of the difference between the\nCSS numeric values of *stretch1* and *stretch2*, normalized\nbetween 0.0 and 1.0.", "id": "f17243:c2:m9"}
{"signature": "def get_variant(self):", "body": "if self._variant is None:<EOL><INDENT>return rcParams['<STR_LIT>']<EOL><DEDENT>return self._variant<EOL>", "docstring": "Return the font variant.  Values are: 'normal' or\n'small-caps'.", "id": "f17243:c1:m7"}
{"signature": "def copy(self):", "body": "return FontProperties(_init = self)<EOL>", "docstring": "Return a deep copy of self", "id": "f17243:c1:m22"}
{"signature": "def findSystemFonts(fontpaths=None, fontext='<STR_LIT>'):", "body": "fontfiles = {}<EOL>fontexts = get_fontext_synonyms(fontext)<EOL>if fontpaths is None:<EOL><INDENT>if sys.platform == '<STR_LIT:win32>':<EOL><INDENT>fontdir = win32FontDirectory()<EOL>fontpaths = [fontdir]<EOL>for f in win32InstalledFonts(fontdir):<EOL><INDENT>base, ext = os.path.splitext(f)<EOL>if len(ext)><NUM_LIT:1> and ext[<NUM_LIT:1>:].lower() in fontexts:<EOL><INDENT>fontfiles[f] = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>fontpaths = x11FontDirectory()<EOL>if sys.platform == '<STR_LIT>':<EOL><INDENT>for f in OSXInstalledFonts(fontext=fontext):<EOL><INDENT>fontfiles[f] = <NUM_LIT:1><EOL><DEDENT><DEDENT>for f in get_fontconfig_fonts(fontext):<EOL><INDENT>fontfiles[f] = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>elif isinstance(fontpaths, str):<EOL><INDENT>fontpaths = [fontpaths]<EOL><DEDENT>for path in fontpaths:<EOL><INDENT>files = []<EOL>for ext in fontexts:<EOL><INDENT>files.extend(glob.glob(os.path.join(path, '<STR_LIT>'+ext)))<EOL>files.extend(glob.glob(os.path.join(path, '<STR_LIT>'+ext.upper())))<EOL><DEDENT>for fname in files:<EOL><INDENT>fontfiles[os.path.abspath(fname)] = <NUM_LIT:1><EOL><DEDENT><DEDENT>return [fname for fname in list(fontfiles.keys()) if os.path.exists(fname)]<EOL>", "docstring": "Search for fonts in the specified font paths.  If no paths are\ngiven, will use a standard set of system paths, as well as the\nlist of fonts tracked by fontconfig if fontconfig is installed and\navailable.  A list of TrueType fonts are returned by default with\nAFM fonts as an option.", "id": "f17243:m7"}
{"signature": "def OSXFontDirectory():", "body": "fontpaths = []<EOL>def add(arg,directory,files):<EOL><INDENT>fontpaths.append(directory)<EOL><DEDENT>for fontdir in OSXFontDirectories:<EOL><INDENT>try:<EOL><INDENT>if os.path.isdir(fontdir):<EOL><INDENT>os.path.walk(fontdir, add, None)<EOL><DEDENT><DEDENT>except (IOError, OSError, TypeError, ValueError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return fontpaths<EOL>", "docstring": "Return the system font directories for OS X.  This is done by\nstarting at the list of hardcoded paths in\n:attr:`OSXFontDirectories` and returning all nested directories\nwithin them.", "id": "f17243:m3"}
{"signature": "def x11FontDirectory():", "body": "fontpaths = []<EOL>def add(arg,directory,files):<EOL><INDENT>fontpaths.append(directory)<EOL><DEDENT>for fontdir in X11FontDirectories:<EOL><INDENT>try:<EOL><INDENT>if os.path.isdir(fontdir):<EOL><INDENT>os.path.walk(fontdir, add, None)<EOL><DEDENT><DEDENT>except (IOError, OSError, TypeError, ValueError):<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return fontpaths<EOL>", "docstring": "Return the system font directories for X11.  This is done by\nstarting at the list of hardcoded paths in\n:attr:`X11FontDirectories` and returning all nested directories\nwithin them.", "id": "f17243:m5"}
{"signature": "def get_stretch(self):", "body": "if self._stretch is None:<EOL><INDENT>return rcParams['<STR_LIT>']<EOL><DEDENT>return self._stretch<EOL>", "docstring": "Return the font stretch or width.  Options are: 'ultra-condensed',\n'extra-condensed', 'condensed', 'semi-condensed', 'normal',\n'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.", "id": "f17243:c1:m9"}
{"signature": "def set_default_size(self, size):", "body": "self.default_size = size<EOL>", "docstring": "Set the default font size in points.  The initial value is set\nby ``font.size`` in rc.", "id": "f17243:c2:m4"}
{"signature": "def get_weight(self):", "body": "if self._weight is None:<EOL><INDENT>return rcParams['<STR_LIT>']<EOL><DEDENT>return self._weight<EOL>", "docstring": "Set the font weight.  Options are: A numeric value in the\nrange 0-1000 or one of 'light', 'normal', 'regular', 'book',\n'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',\n'heavy', 'extra bold', 'black'", "id": "f17243:c1:m8"}
{"signature": "def score_size(self, size1, size2):", "body": "if size2 == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>try:<EOL><INDENT>sizeval1 = float(size1)<EOL><DEDENT>except ValueError:<EOL><INDENT>sizeval1 = self.default_size * font_scalings(size1)<EOL><DEDENT>try:<EOL><INDENT>sizeval2 = float(size2)<EOL><DEDENT>except ValueError:<EOL><INDENT>return <NUM_LIT:1.0><EOL><DEDENT>return abs(sizeval1 - sizeval2) / <NUM_LIT><EOL>", "docstring": "Returns a match score between *size1* and *size2*.\n\nIf *size2* (the size specified in the font file) is 'scalable', this\nfunction always returns 0.0, since any font size can be generated.\n\nOtherwise, the result is the absolute distance between *size1* and\n*size2*, normalized so that the usual range of font sizes (6pt -\n72pt) will lie between 0.0 and 1.0.", "id": "f17243:c2:m11"}
{"signature": "def weight_as_number(weight):", "body": "if isinstance(weight, str):<EOL><INDENT>try:<EOL><INDENT>weight = weight_dict[weight.lower()]<EOL><DEDENT>except KeyError:<EOL><INDENT>weight = <NUM_LIT><EOL><DEDENT><DEDENT>elif weight in range(<NUM_LIT:100>, <NUM_LIT:1000>, <NUM_LIT:100>):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return weight<EOL>", "docstring": "Return the weight property as a numeric value.  String values\nare converted to their corresponding numeric value.", "id": "f17243:m8"}
{"signature": "def score_family(self, families, family2):", "body": "for i, family1 in enumerate(families):<EOL><INDENT>if family1.lower() in font_family_aliases:<EOL><INDENT>if family1 == '<STR_LIT>':<EOL><INDENT>family1 == '<STR_LIT>'<EOL><DEDENT>options = rcParams['<STR_LIT>' + family1]<EOL>if family2 in options:<EOL><INDENT>idx = options.index(family2)<EOL>return <NUM_LIT:0.1> * (float(idx) / len(options))<EOL><DEDENT><DEDENT>elif family1.lower() == family2.lower():<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT><DEDENT>return <NUM_LIT:1.0><EOL>", "docstring": "Returns a match score between the list of font families in\n*families* and the font family name *family2*.\n\nAn exact match anywhere in the list returns 0.0.\n\nA match by generic font name will return 0.1.\n\nNo match will return 1.0.", "id": "f17243:c2:m6"}
{"signature": "def pickle_dump(data, filename):", "body": "fh = open(filename, '<STR_LIT:w>')<EOL>try:<EOL><INDENT>pickle.dump(data, fh)<EOL><DEDENT>finally:<EOL><INDENT>fh.close()<EOL><DEDENT>", "docstring": "Equivalent to pickle.dump(data, open(filename, 'w'))\nbut closes the file to prevent filehandle leakage.", "id": "f17243:m13"}
{"signature": "def set_variant(self, variant):", "body": "if variant not in ('<STR_LIT>', '<STR_LIT>', None):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self._variant = variant<EOL>", "docstring": "Set the font variant.  Values are: 'normal' or 'small-caps'.", "id": "f17243:c1:m16"}
{"signature": "def score_variant(self, variant1, variant2):", "body": "if variant1 == variant2:<EOL><INDENT>return <NUM_LIT:0.0><EOL><DEDENT>else:<EOL><INDENT>return <NUM_LIT:1.0><EOL><DEDENT>", "docstring": "Returns a match score between *variant1* and *variant2*.\n\nAn exact match returns 0.0, otherwise 1.0.", "id": "f17243:c2:m8"}
{"signature": "def createFontList(fontfiles, fontext='<STR_LIT>'):", "body": "fontlist = []<EOL>seen = {}<EOL>for fpath in fontfiles:<EOL><INDENT>verbose.report('<STR_LIT>' % (fpath), '<STR_LIT>')<EOL>fname = os.path.split(fpath)[<NUM_LIT:1>]<EOL>if fname in seen:  continue<EOL>else: seen[fname] = <NUM_LIT:1><EOL>if fontext == '<STR_LIT>':<EOL><INDENT>try:<EOL><INDENT>fh = open(fpath, '<STR_LIT:r>')<EOL><DEDENT>except:<EOL><INDENT>verbose.report(\"<STR_LIT>\" % fpath)<EOL>continue<EOL><DEDENT>try:<EOL><INDENT>try:<EOL><INDENT>font = afm.AFM(fh)<EOL><DEDENT>finally:<EOL><INDENT>fh.close()<EOL><DEDENT><DEDENT>except RuntimeError:<EOL><INDENT>verbose.report(\"<STR_LIT>\"%fpath)<EOL>continue<EOL><DEDENT>prop = afmFontProperty(fpath, font)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>font = ft2font.FT2Font(str(fpath))<EOL><DEDENT>except RuntimeError:<EOL><INDENT>verbose.report(\"<STR_LIT>\"%fpath)<EOL>continue<EOL><DEDENT>except UnicodeError:<EOL><INDENT>verbose.report(\"<STR_LIT>\")<EOL>continue<EOL><DEDENT>try: prop = ttfFontProperty(font)<EOL>except: continue<EOL><DEDENT>fontlist.append(prop)<EOL><DEDENT>return fontlist<EOL>", "docstring": "A function to create a font lookup list.  The default is to create\na list of TrueType fonts.  An AFM font list can optionally be\ncreated.", "id": "f17243:m11"}
{"signature": "def get_fontconfig_fonts(fontext='<STR_LIT>'):", "body": "try:<EOL><INDENT>import subprocess<EOL><DEDENT>except ImportError:<EOL><INDENT>return {}<EOL><DEDENT>fontext = get_fontext_synonyms(fontext)<EOL>fontfiles = {}<EOL>status, output = subprocess.getstatusoutput(\"<STR_LIT>\")<EOL>if status == <NUM_LIT:0>:<EOL><INDENT>for line in output.split('<STR_LIT:\\n>'):<EOL><INDENT>fname = line.split('<STR_LIT::>')[<NUM_LIT:0>]<EOL>if (os.path.splitext(fname)[<NUM_LIT:1>][<NUM_LIT:1>:] in fontext and<EOL>os.path.exists(fname)):<EOL><INDENT>fontfiles[fname] = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return fontfiles<EOL>", "docstring": "Grab a list of all the fonts that are being tracked by fontconfig\nby making a system call to ``fc-list``.  This is an easy way to\ngrab all of the fonts the user wants to be made available to\napplications, without needing knowing where all of them reside.", "id": "f17243:m6"}
{"signature": "def set_weight(self, weight):", "body": "if weight is not None:<EOL><INDENT>try:<EOL><INDENT>weight = int(weight)<EOL>if weight < <NUM_LIT:0> or weight > <NUM_LIT:1000>:<EOL><INDENT>raise ValueError()<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>if weight not in weight_dict:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>self._weight = weight<EOL>", "docstring": "Set the font weight.  May be either a numeric value in the\nrange 0-1000 or one of 'ultralight', 'light', 'normal',\n'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',\n'demi', 'bold', 'heavy', 'extra bold', 'black'", "id": "f17243:c1:m17"}
{"signature": "def get_fontconfig_pattern(self):", "body": "return generate_fontconfig_pattern(self)<EOL>", "docstring": "Get a fontconfig pattern suitable for looking up the font as\nspecified with fontconfig's ``fc-match`` utility.\n\nSee the documentation on `fontconfig patterns\n<http://www.fontconfig.org/fontconfig-user.html>`_.\n\nThis support does not require fontconfig to be installed or\nsupport for it to be enabled.  We are merely borrowing its\npattern syntax for use here.", "id": "f17243:c1:m13"}
{"signature": "def is_opentype_cff_font(filename):", "body": "if os.path.splitext(filename)[<NUM_LIT:1>].lower() == '<STR_LIT>':<EOL><INDENT>result = _is_opentype_cff_font_cache.get(filename)<EOL>if result is None:<EOL><INDENT>fd = open(filename, '<STR_LIT:rb>')<EOL>tag = fd.read(<NUM_LIT:4>)<EOL>fd.close()<EOL>result = (tag == '<STR_LIT>')<EOL>_is_opentype_cff_font_cache[filename] = result<EOL><DEDENT>return result<EOL><DEDENT>return False<EOL>", "docstring": "Returns True if the given font is a Postscript Compact Font Format\nFont embedded in an OpenType wrapper.  Used by the PostScript and\nPDF backends that can not subset these fonts.", "id": "f17243:m15"}
{"signature": "def all(a, axis=None):", "body": "if axis is None:<EOL><INDENT>return alltrue(ravel(a))<EOL><DEDENT>else:<EOL><INDENT>return alltrue(a, axis)<EOL><DEDENT>", "docstring": "Numpy-compatible version of all()", "id": "f17245:m1"}
{"signature": "def isnan(a):", "body": "return reshape(array([_isnan(i) for i in ravel(a)],'<STR_LIT:b>'), shape(a))<EOL>", "docstring": "y = isnan(x) returns True where x is Not-A-Number", "id": "f17245:m0"}
{"signature": "def _import_fail_message(module, version):", "body": "_dict = { \"<STR_LIT>\" : which[<NUM_LIT:0>],<EOL>\"<STR_LIT>\" : module,<EOL>\"<STR_LIT>\" : version + module<EOL>}<EOL>print(\"\"\"<STR_LIT>\"\"\" % _dict)<EOL>", "docstring": "Prints a message when the array package specific version of an extension\n    fails to import correctly.", "id": "f17251:m0"}
{"signature": "def Matrix(data, typecode=None, copy=<NUM_LIT:1>, savespace=<NUM_LIT:0>):", "body": "if isinstance(data, type(\"<STR_LIT>\")):<EOL><INDENT>raise TypeError(\"<STR_LIT>\")<EOL><DEDENT>a = fromlist(data, type=typecode)<EOL>if a.rank == <NUM_LIT:0>:<EOL><INDENT>a.shape = (<NUM_LIT:1>,<NUM_LIT:1>)<EOL><DEDENT>elif a.rank == <NUM_LIT:1>:<EOL><INDENT>a.shape = (<NUM_LIT:1>,) + a.shape<EOL><DEDENT>a.__class__ = _Matrix<EOL>return a<EOL>", "docstring": "Matrix constructs new matrices from 2D nested lists of numbers", "id": "f17253:m1"}
{"signature": "def all(a, axis=None):", "body": "if axis is None:<EOL><INDENT>return _all(a)<EOL><DEDENT>return alltrue(a, axis)<EOL>", "docstring": "Numpy-compatible version of all()", "id": "f17253:m0"}
{"signature": "def set_visible(self, b):", "body": "self._visible = b<EOL>self.pchanged()<EOL>", "docstring": "Set the artist's visiblity.\n\nACCEPTS: [True | False]", "id": "f17255:c0:m43"}
{"signature": "def get_clip_path(self):", "body": "return self._clippath<EOL>", "docstring": "Return artist clip path", "id": "f17255:c0:m36"}
{"signature": "def getp(o, property=None):", "body": "insp = ArtistInspector(o)<EOL>if property is None:<EOL><INDENT>ret = insp.pprint_getters()<EOL>print('<STR_LIT:\\n>'.join(ret))<EOL>return<EOL><DEDENT>func = getattr(o, '<STR_LIT>' + property)<EOL>return func()<EOL>", "docstring": "Return the value of handle property.  property is an optional string\nfor the property you want to return\n\nExample usage::\n\n    getp(o)  # get all the object properties\n    getp(o, 'linestyle')  # get the linestyle property\n\n*o* is a :class:`Artist` instance, eg\n:class:`~matplotllib.lines.Line2D` or an instance of a\n:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.\nIf the *property* is 'somename', this function returns\n\n  o.get_somename()\n\n:func:`getp` can be used to query all the gettable properties with\n``getp(o)``. Many properties have aliases for shorter typing, e.g.\n'lw' is an alias for 'linewidth'.  In the output, aliases and full\nproperty names will be listed as:\n\n  property or alias = value\n\ne.g.:\n\n  linewidth or lw = 2", "id": "f17255:m0"}
{"signature": "def setp(h, *args, **kwargs):", "body": "insp = ArtistInspector(h)<EOL>if len(kwargs)==<NUM_LIT:0> and len(args)==<NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT:\\n>'.join(insp.pprint_setters()))<EOL>return<EOL><DEDENT>if len(kwargs)==<NUM_LIT:0> and len(args)==<NUM_LIT:1>:<EOL><INDENT>print(insp.pprint_setters(prop=args[<NUM_LIT:0>]))<EOL>return<EOL><DEDENT>if not cbook.iterable(h): h = [h]<EOL>else: h = cbook.flatten(h)<EOL>if len(args)%<NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>funcvals = []<EOL>for i in range(<NUM_LIT:0>, len(args)-<NUM_LIT:1>, <NUM_LIT:2>):<EOL><INDENT>funcvals.append((args[i], args[i+<NUM_LIT:1>]))<EOL><DEDENT>funcvals.extend(list(kwargs.items()))<EOL>ret = []<EOL>for o in h:<EOL><INDENT>for s, val in funcvals:<EOL><INDENT>s = s.lower()<EOL>funcName = \"<STR_LIT>\"%s<EOL>func = getattr(o,funcName)<EOL>ret.extend( [func(val)] )<EOL><DEDENT><DEDENT>return [x for x in cbook.flatten(ret)]<EOL>", "docstring": "matplotlib supports the use of :func:`setp` (\"set property\") and\n:func:`getp` to set and get object properties, as well as to do\nintrospection on the object.  For example, to set the linestyle of a\nline to be dashed, you can do::\n\n  >>> line, = plot([1,2,3])\n  >>> setp(line, linestyle='--')\n\nIf you want to know the valid types of arguments, you can provide the\nname of the property you want to set without a value::\n\n  >>> setp(line, 'linestyle')\n      linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]\n\nIf you want to see all the properties that can be set, and their\npossible values, you can do::\n\n  >>> setp(line)\n      ... long output listing omitted\n\n:func:`setp` operates on a single instance or a list of instances.\nIf you are in query mode introspecting the possible values, only\nthe first instance in the sequence is used.  When actually setting\nvalues, all the instances will be set.  E.g., suppose you have a\nlist of two lines, the following will make both lines thicker and\nred::\n\n  >>> x = arange(0,1.0,0.01)\n  >>> y1 = sin(2*pi*x)\n  >>> y2 = sin(4*pi*x)\n  >>> lines = plot(x, y1, x, y2)\n  >>> setp(lines, linewidth=2, color='r')\n\n:func:`setp` works with the matlab(TM) style string/value pairs or\nwith python kwargs.  For example, the following are equivalent::\n\n  >>> setp(lines, 'linewidth', 2, 'color', r')  # matlab style\n\n  >>> setp(lines, linewidth=2, color='r')       # python style", "id": "f17255:m1"}
{"signature": "def set_clip_box(self, clipbox):", "body": "self.clipbox = clipbox<EOL>self.pchanged()<EOL>", "docstring": "Set the artist's clip :class:`~matplotlib.transforms.Bbox`.\n\nACCEPTS: a :class:`matplotlib.transforms.Bbox` instance", "id": "f17255:c0:m29"}
{"signature": "def get_picker(self):", "body": "return self._picker<EOL>", "docstring": "Return the picker object used by this artist", "id": "f17255:c0:m21"}
{"signature": "def set_snap(self, snap):", "body": "self._snap = snap<EOL>", "docstring": "Sets the snap setting which may be:\n\n  * True: snap vertices to the nearest pixel center\n\n  * False: leave vertices as-is\n\n  * None: (auto) If the path contains only rectilinear line\n    segments, round to the nearest pixel center\n\nOnly supported by the Agg backends.", "id": "f17255:c0:m26"}
{"signature": "def set_figure(self, fig):", "body": "self.figure = fig<EOL>self.pchanged()<EOL>", "docstring": "Set the :class:`~matplotlib.figure.Figure` instance the artist\nbelongs to.\n\nACCEPTS: a :class:`matplotlib.figure.Figure` instance", "id": "f17255:c0:m28"}
{"signature": "def set_clip_on(self, b):", "body": "self._clipon = b<EOL>self.pchanged()<EOL>", "docstring": "Set whether artist uses clipping.\n\nACCEPTS: [True | False]", "id": "f17255:c0:m38"}
{"signature": "def get_zorder(self):", "body": "return self.zorder<EOL>", "docstring": "Return the :class:`Artist`'s zorder.", "id": "f17255:c0:m48"}
{"signature": "def get_url(self):", "body": "return self._url<EOL>", "docstring": "Returns the url", "id": "f17255:c0:m23"}
{"signature": "def pchanged(self):", "body": "for oid, func in list(self._propobservers.items()):<EOL><INDENT>func(self)<EOL><DEDENT>", "docstring": "Fire an event when property changed, calling all of the\nregistered callbacks.", "id": "f17255:c0:m9"}
{"signature": "def set_axes(self, axes):", "body": "self.axes = axes<EOL>", "docstring": "Set the :class:`~matplotlib.axes.Axes` instance in which the\nartist resides, if any.\n\nACCEPTS: an :class:`~matplotlib.axes.Axes` instance", "id": "f17255:c0:m5"}
{"signature": "def __init__(self, o):", "body": "if cbook.iterable(o) and len(o): o = o[<NUM_LIT:0>]<EOL>self.oorig = o<EOL>if not isinstance(o, type):<EOL><INDENT>o = type(o)<EOL><DEDENT>self.o = o<EOL>self.aliasd = self.get_aliases()<EOL>", "docstring": "Initialize the artist inspector with an\n:class:`~matplotlib.artist.Artist` or sequence of\n:class:`Artists`.  If a sequence is used, we assume it is a\nhomogeneous sequence (all :class:`Artists` are of the same\ntype) and it is your responsibility to make sure this is so.", "id": "f17255:c1:m0"}
{"signature": "def get_transform(self):", "body": "if self._transform is None:<EOL><INDENT>self._transform = IdentityTransform()<EOL><DEDENT>return self._transform<EOL>", "docstring": "Return the :class:`~matplotlib.transforms.Transform`\ninstance used by this artist.", "id": "f17255:c0:m12"}
{"signature": "def set_animated(self, b):", "body": "self._animated = b<EOL>self.pchanged()<EOL>", "docstring": "Set the artist's animation state.\n\nACCEPTS: [True | False]", "id": "f17255:c0:m44"}
{"signature": "def get_visible(self):", "body": "return self._visible<EOL>", "docstring": "Return the artist's visiblity", "id": "f17255:c0:m32"}
{"signature": "def remove_callback(self, oid):", "body": "try: del self._propobservers[oid]<EOL>except KeyError: pass<EOL>", "docstring": "Remove a callback based on its *id*.\n\n.. seealso::\n    :meth:`add_callback`", "id": "f17255:c0:m8"}
{"signature": "def get_snap(self):", "body": "return self._snap<EOL>", "docstring": "Returns the snap setting which may be:\n\n  * True: snap vertices to the nearest pixel center\n\n  * False: leave vertices as-is\n\n  * None: (auto) If the path contains only rectilinear line\n    segments, round to the nearest pixel center\n\nOnly supported by the Agg backends.", "id": "f17255:c0:m25"}
{"signature": "def aliased_name(self, s):", "body": "if s in self.aliasd:<EOL><INDENT>return s + '<STR_LIT>'.join(['<STR_LIT>' % x for x in list(self.aliasd[s].keys())])<EOL><DEDENT>else:<EOL><INDENT>return s<EOL><DEDENT>", "docstring": "return 'PROPNAME or alias' if *s* has an alias, else return\nPROPNAME.\n\nE.g. for the line markerfacecolor property, which has an\nalias, return 'markerfacecolor or mfc' and for the transform\nproperty, which does not, return 'transform'", "id": "f17255:c1:m6"}
{"signature": "def set_picker(self, picker):", "body": "self._picker = picker<EOL>", "docstring": "Set the epsilon for picking used by this artist\n\n*picker* can be one of the following:\n\n  * *None*: picking is disabled for this artist (default)\n\n  * A boolean: if *True* then picking will be enabled and the\n    artist will fire a pick event if the mouse event is over\n    the artist\n\n  * A float: if picker is a number it is interpreted as an\n    epsilon tolerance in points and the artist will fire\n    off an event if it's data is within epsilon of the mouse\n    event.  For some artists like lines and patch collections,\n    the artist may provide additional data to the pick event\n    that is generated, e.g. the indices of the data within\n    epsilon of the pick event\n\n  * A function: if picker is callable, it is a user supplied\n    function which determines whether the artist is hit by the\n    mouse event::\n\n      hit, props = picker(artist, mouseevent)\n\n    to determine the hit test.  if the mouse event is over the\n    artist, return *hit=True* and props is a dictionary of\n    properties you want added to the PickEvent attributes.\n\nACCEPTS: [None|float|boolean|callable]", "id": "f17255:c0:m20"}
{"signature": "def set(self, **kwargs):", "body": "ret = []<EOL>for k,v in list(kwargs.items()):<EOL><INDENT>k = k.lower()<EOL>funcName = \"<STR_LIT>\"%k<EOL>func = getattr(self,funcName)<EOL>ret.extend( [func(v)] )<EOL><DEDENT>return ret<EOL>", "docstring": "A tkstyle set command, pass *kwargs* to set properties", "id": "f17255:c0:m51"}
{"signature": "def get_setters(self):", "body": "return [prop for prop, target in self._get_setters_and_targets()]<EOL>", "docstring": "Get the attribute strings with setters for object.  Eg., for a line,\nreturn ``['markerfacecolor', 'linewidth', ....]``.", "id": "f17255:c1:m4"}
{"signature": "def get_valid_values(self, attr):", "body": "name = '<STR_LIT>'%attr<EOL>if not hasattr(self.o, name):<EOL><INDENT>raise AttributeError('<STR_LIT>'%(self.o,name))<EOL><DEDENT>func = getattr(self.o, name)<EOL>docstring = func.__doc__<EOL>if docstring is None: return '<STR_LIT>'<EOL>if docstring.startswith('<STR_LIT>'):<EOL><INDENT>return None<EOL><DEDENT>match = self._get_valid_values_regex.search(docstring)<EOL>if match is not None:<EOL><INDENT>return match.group(<NUM_LIT:1>).replace('<STR_LIT:\\n>', '<STR_LIT:U+0020>')<EOL><DEDENT>return '<STR_LIT>'<EOL>", "docstring": "Get the legal arguments for the setter associated with *attr*.\n\nThis is done by querying the docstring of the function *set_attr*\nfor a line that begins with ACCEPTS:\n\nEg., for a line linestyle, return\n[ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]", "id": "f17255:c1:m2"}
{"signature": "def get_contains(self):", "body": "return self._contains<EOL>", "docstring": "Return the _contains test used by the artist, or *None* for default.", "id": "f17255:c0:m17"}
{"signature": "def get_animated(self):", "body": "return self._animated<EOL>", "docstring": "Return the artist's animated state", "id": "f17255:c0:m33"}
{"signature": "def findobj(self, match=None):", "body": "if match is None: <EOL><INDENT>def matchfunc(x): return True<EOL><DEDENT>elif cbook.issubclass_safe(match, Artist):<EOL><INDENT>def matchfunc(x):<EOL><INDENT>return isinstance(x, match)<EOL><DEDENT><DEDENT>elif callable(match):<EOL><INDENT>matchfunc = match<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>artists = []<EOL>for c in self.get_children():<EOL><INDENT>if matchfunc(c):<EOL><INDENT>artists.append(c)<EOL><DEDENT>artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])<EOL><DEDENT>if matchfunc(self):<EOL><INDENT>artists.append(self)<EOL><DEDENT>return artists<EOL>", "docstring": "pyplot signature:\n  findobj(o=gcf(), match=None)\n\nRecursively find all :class:matplotlib.artist.Artist instances\ncontained in self.\n\n*match* can be\n\n  - None: return all objects contained in artist (including artist)\n\n  - function with signature ``boolean = match(artist)`` used to filter matches\n\n  - class instance: eg Line2D.  Only return artists of class type\n\n.. plot:: mpl_examples/pylab_examples/findobj_demo.py", "id": "f17255:c0:m52"}
{"signature": "def pprint_getters(self):", "body": "o = self.oorig<EOL>getters = [name for name in dir(o)<EOL>if name.startswith('<STR_LIT>')<EOL>and callable(getattr(o, name))]<EOL>getters.sort()<EOL>lines = []<EOL>for name in getters:<EOL><INDENT>func = getattr(o, name)<EOL>if self.is_alias(func): continue<EOL>try: val = func()<EOL>except: continue<EOL>if getattr(val, '<STR_LIT>', ()) != () and len(val)><NUM_LIT:6>:<EOL><INDENT>s = str(val[:<NUM_LIT:6>]) + '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>s = str(val)<EOL><DEDENT>s = s.replace('<STR_LIT:\\n>', '<STR_LIT:U+0020>')<EOL>if len(s)><NUM_LIT:50>:<EOL><INDENT>s = s[:<NUM_LIT:50>] + '<STR_LIT>'<EOL><DEDENT>name = self.aliased_name(name[<NUM_LIT:4>:])<EOL>lines.append('<STR_LIT>' %(name, s))<EOL><DEDENT>return lines<EOL>", "docstring": "Return the getters and actual values as list of strings.", "id": "f17255:c1:m10"}
{"signature": "def get_figure(self):", "body": "return self.figure<EOL>", "docstring": "Return the :class:`~matplotlib.figure.Figure` instance the\nartist belongs to.", "id": "f17255:c0:m27"}
{"signature": "def _getAttributes(obj):", "body": "if isinstance(obj, dict):<EOL><INDENT>attrs = obj<EOL><DEDENT>elif hasattr(obj, \"<STR_LIT>\"):<EOL><INDENT>attrs = {attr: getattr(obj, attr) for attr in obj.__slots__}<EOL><DEDENT>elif hasattr(obj, \"<STR_LIT>\"):<EOL><INDENT>attrs = obj.__dict__<EOL><DEDENT>testParams = SERIALIZABLE_SUBCLASSES[obj.__class__.__name__]<EOL>if \"<STR_LIT>\" in testParams:<EOL><INDENT>for f in testParams[\"<STR_LIT>\"]:<EOL><INDENT>if f in attrs:<EOL><INDENT>del attrs[f]<EOL><DEDENT><DEDENT><DEDENT>return attrs<EOL>", "docstring": "Get all attributes of the given object", "id": "f17263:m1"}
{"signature": "def _allSubclasses(cls):", "body": "return cls.__subclasses__() + [<EOL>g for s in cls.__subclasses__() for g in _allSubclasses(s)<EOL>]<EOL>", "docstring": "Get all subclasses\n:param cls: The class to get subclasses from\n:return: list with all subclasses", "id": "f17263:m0"}
{"signature": "def overlapsForUnrelatedAreas(n, w, radius, repetitions=<NUM_LIT:100>, verbose=False):", "body": "return overlapsForRelativeAreas(n, w, np.array([<NUM_LIT:0>, <NUM_LIT:0>]), radius,<EOL>dPosition=np.array([<NUM_LIT:0>, radius * <NUM_LIT:10>]),<EOL>num=repetitions, verbose=verbose)<EOL>", "docstring": "Return overlaps between an encoding and other, unrelated encodings", "id": "f17266:m3"}
{"signature": "def computeOverlap(x, y):", "body": "return (x & y).sum()<EOL>", "docstring": "Given two binary arrays, compute their overlap. The overlap is the number\nof bits where x[i] and y[i] are both 1", "id": "f17269:m0"}
{"signature": "def _getSimplePatterns(numOnes, numPatterns):", "body": "numCols = numOnes * numPatterns<EOL>p = []<EOL>for i in range(numPatterns):<EOL><INDENT>x = np.zeros(numCols, dtype='<STR_LIT>')<EOL>x[i*numOnes:(i + <NUM_LIT:1>)*numOnes] = <NUM_LIT:1><EOL>p.append(x)<EOL><DEDENT>return p<EOL>", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n    bits on. There are numPatterns*numOnes bits in the vector. These patterns\n    are used as elements of sequences when building up a training set.", "id": "f17280:m1"}
{"signature": "@staticmethod<EOL><INDENT>def _addSampleData(origData=None, numSamples=<NUM_LIT>, spikeValue=<NUM_LIT:1.0>,<EOL>spikePeriod=<NUM_LIT:20>):<DEDENT>", "body": "if origData is None:<EOL><INDENT>origData = []<EOL><DEDENT>if len(origData) > <NUM_LIT:0>:<EOL><INDENT>lastDate = origData[-<NUM_LIT:1>][<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>lastDate = datetime.datetime(<NUM_LIT>, <NUM_LIT:2>, <NUM_LIT:3>)<EOL><DEDENT>dateList = _getDateList(numSamples, lastDate)<EOL>data = copy.copy(origData)<EOL>for idx, date in enumerate(dateList):<EOL><INDENT>if (spikePeriod > <NUM_LIT:0>) and ( (idx + <NUM_LIT:1>) % spikePeriod == <NUM_LIT:0>):<EOL><INDENT>data.append([date, idx, spikeValue])<EOL><DEDENT>else:<EOL><INDENT>data.append([date, idx, <NUM_LIT:0.0>])<EOL><DEDENT><DEDENT>return data<EOL>", "docstring": "Add sample anomaly data to the existing data list and return it.\nNote: this does not modify the original data list\nNote 2: here we just add in increasing integers as the metric value", "id": "f17282:c0:m1"}
{"signature": "def frequency(self,<EOL>n=<NUM_LIT:15>,<EOL>w=<NUM_LIT:7>,<EOL>columnDimensions = <NUM_LIT>,<EOL>numActiveColumnsPerInhArea = <NUM_LIT>,<EOL>stimulusThreshold = <NUM_LIT:0>,<EOL>spSeed = <NUM_LIT:1>,<EOL>spVerbosity = <NUM_LIT:0>,<EOL>numColors = <NUM_LIT:2>,<EOL>seed=<NUM_LIT>,<EOL>minVal=<NUM_LIT:0>,<EOL>maxVal=<NUM_LIT:10>,<EOL>encoder = '<STR_LIT>',<EOL>forced=True):", "body": "print(\"<STR_LIT>\")<EOL>print(encoder, '<STR_LIT>', '<STR_LIT>', seed, '<STR_LIT>', numColors, '<STR_LIT>')<EOL>spImpl = SpatialPooler(<EOL>columnDimensions=(columnDimensions, <NUM_LIT:1>),<EOL>inputDimensions=(<NUM_LIT:1>, n),<EOL>potentialRadius=n/<NUM_LIT:2>,<EOL>numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,<EOL>spVerbosity=spVerbosity,<EOL>stimulusThreshold=stimulusThreshold,<EOL>potentialPct=<NUM_LIT:0.5>,<EOL>seed=spSeed,<EOL>globalInhibition=True,<EOL>)<EOL>rnd.seed(seed)<EOL>numpy.random.seed(seed)<EOL>colors = []<EOL>coincs = []<EOL>reUsedCoincs = []<EOL>spOutput = []<EOL>patterns = set([])<EOL>if encoder=='<STR_LIT>':<EOL><INDENT>enc = scalar.ScalarEncoder(name='<STR_LIT>', w=w, n=n, minval=minVal,<EOL>maxval=maxVal, periodic=False, forced=True) <EOL>for y in range(numColors):<EOL><INDENT>temp = enc.encode(rnd.random()*maxVal)<EOL>colors.append(numpy.array(temp, dtype=numpy.uint32))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for y in range(numColors):<EOL><INDENT>sdr = numpy.zeros(n, dtype=numpy.uint32)<EOL>sdr[rnd.sample(range(n), w)] = <NUM_LIT:1><EOL>colors.append(sdr)<EOL><DEDENT><DEDENT>print('<STR_LIT>', numColors, '<STR_LIT>')<EOL>startTime = time.time()<EOL>for i in range(numColors):<EOL><INDENT>spInput = colors[i]<EOL>onCells = numpy.zeros(columnDimensions, dtype=numpy.uint32)<EOL>spImpl.compute(spInput, True, onCells)<EOL>spOutput.append(onCells.tolist())<EOL>activeCoincIndices = set(onCells.nonzero()[<NUM_LIT:0>])<EOL>reUsed = activeCoincIndices.intersection(patterns)<EOL>if len(reUsed) == <NUM_LIT:0>:<EOL><INDENT>coincs.append((i, activeCoincIndices, colors[i]))<EOL><DEDENT>else:<EOL><INDENT>reUsedCoincs.append((i, activeCoincIndices, colors[i]))<EOL><DEDENT>patterns.update(activeCoincIndices)<EOL>if (i + <NUM_LIT:1>) % <NUM_LIT:100> == <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>', i + <NUM_LIT:1>)<EOL>print(\"<STR_LIT>\" % (time.time() - startTime))<EOL>print(len(reUsedCoincs), \"<STR_LIT>\")<EOL><DEDENT><DEDENT>summ = []<EOL>for z in coincs:<EOL><INDENT>summ.append(sum([len(z[<NUM_LIT:1>].intersection(y[<NUM_LIT:1>])) for y in reUsedCoincs]))<EOL><DEDENT>zeros = len([x for x in summ if x==<NUM_LIT:0>])<EOL>factor = max(summ)*len(summ)/sum(summ)<EOL>if len(reUsed) < <NUM_LIT:10>:<EOL><INDENT>self.assertLess(factor, <NUM_LIT>,<EOL>\"<STR_LIT>\" % (<EOL>factor, <NUM_LIT>))<EOL>self.assertLess(zeros, <NUM_LIT>*len(summ),<EOL>\"<STR_LIT>\" % (<EOL>zeros, <NUM_LIT>*len(summ)))<EOL><DEDENT>else:<EOL><INDENT>self.assertLess(factor, <NUM_LIT:8>,<EOL>\"<STR_LIT>\" % (<EOL>factor, <NUM_LIT:8>))<EOL>self.assertLess(zeros, <NUM_LIT:12>,<EOL>\"<STR_LIT>\" % (<EOL>zeros, <NUM_LIT:12>))<EOL><DEDENT>", "docstring": "Helper function that tests whether the SP predicts the most\n        frequent record", "id": "f17285:c0:m4"}
{"signature": "def _computeOverlap(x, y):", "body": "return ((x + y) == <NUM_LIT:2>).sum()<EOL>", "docstring": "Given two binary arrays, compute their overlap. The overlap is the number\nof bits where x[i] and y[i] are both 1", "id": "f17289:m0"}
{"signature": "def boostTestLoop(self, imp):", "body": "self.sp = CreateSP(imp, self.params)<EOL>self.spImplementation = imp<EOL>self.winningIteration.fill(<NUM_LIT:0>)<EOL>self.lastSDR = {}<EOL>self.boostTestPhase1()<EOL>self.boostTestPhase2()<EOL>self.boostTestPhase3()<EOL>self.boostTestPhase4()<EOL>", "docstring": "Main test loop.", "id": "f17289:c0:m7"}
{"signature": "def verifySDRProperties(self):", "body": "<EOL>self.assertTrue(_areAllSDRsUnique(self.lastSDR), \"<STR_LIT>\")<EOL>self.assertGreater(_computeOverlap(self.lastSDR[<NUM_LIT:0>], self.lastSDR[<NUM_LIT:1>]), <NUM_LIT:9>,<EOL>\"<STR_LIT>\")<EOL>for i in [<NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:4>]:<EOL><INDENT>for j in range(<NUM_LIT:5>):<EOL><INDENT>if (i!=j):<EOL><INDENT>self.assertLess(_computeOverlap(self.lastSDR[i], self.lastSDR[j]),<EOL><NUM_LIT>, \"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Verify that all SDRs have the properties desired for this test.\n\nThe bounds for checking overlap are set fairly loosely here since there is\nsome variance due to randomness and the artificial parameters used in this\ntest.", "id": "f17289:c0:m2"}
{"signature": "def checkCell0(tm):", "body": "for c in range(tm.numberOfCols):<EOL><INDENT>assert tm.getNumSegmentsInCell(c, <NUM_LIT:0>) == <NUM_LIT:0><EOL><DEDENT>", "docstring": "Check that cell 0 has no incoming segments", "id": "f17295:m0"}
{"signature": "def setVerbosity(verbosity, tm, tmPy):", "body": "tm.cells4.setVerbosity(verbosity)<EOL>tm.verbosity = verbosity<EOL>tmPy.verbosity = verbosity<EOL>", "docstring": "Set verbosity levels of the TM's", "id": "f17295:m1"}
{"signature": "def basicTest(self):", "body": "<EOL>tm = BacktrackingTMCPP(numberOfCols=<NUM_LIT:10>, cellsPerColumn=<NUM_LIT:3>,<EOL>initialPerm=<NUM_LIT>, connectedPerm= <NUM_LIT>,<EOL>minThreshold=<NUM_LIT:2>, newSynapseCount=<NUM_LIT:5>,<EOL>permanenceInc=<NUM_LIT>, permanenceDec= <NUM_LIT>,<EOL>permanenceMax=<NUM_LIT:1>, globalDecay=<NUM_LIT>,<EOL>activationThreshold=<NUM_LIT:4>, doPooling=False,<EOL>segUpdateValidDuration=<NUM_LIT:5>, seed=SEED,<EOL>verbosity=VERBOSITY)<EOL>tm.retrieveLearningStates = True<EOL>tm.makeCells4Ephemeral = False<EOL>pickle.dump(tm, open(\"<STR_LIT>\", \"<STR_LIT:wb>\"))<EOL>tm2 = pickle.load(open(\"<STR_LIT>\"))<EOL>self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY, checkStates=False))<EOL>for i in range(<NUM_LIT:5>):<EOL><INDENT>x = numpy.zeros(tm.numberOfCols, dtype='<STR_LIT>')<EOL>_RGEN.initializeUInt32Array(x, <NUM_LIT:2>)<EOL>tm.learn(x)<EOL><DEDENT>tm.reset()<EOL>tm.makeCells4Ephemeral = False<EOL>pickle.dump(tm, open(\"<STR_LIT>\", \"<STR_LIT:wb>\"))<EOL>tm2 = pickle.load(open(\"<STR_LIT>\"))<EOL>self.assertTrue(fdrutils.tmDiff2(tm, tm2, VERBOSITY))<EOL>patterns = numpy.zeros((<NUM_LIT:4>, tm.numberOfCols), dtype='<STR_LIT>')<EOL>for i in range(<NUM_LIT:4>):<EOL><INDENT>_RGEN.initializeUInt32Array(patterns[i], <NUM_LIT:2>)<EOL><DEDENT>for i in range(<NUM_LIT:10>):<EOL><INDENT>x = numpy.zeros(tm.numberOfCols, dtype='<STR_LIT>')<EOL>_RGEN.initializeUInt32Array(x, <NUM_LIT:2>)<EOL>tm.infer(x)<EOL>if i > <NUM_LIT:0>:<EOL><INDENT>tm._checkPrediction(patterns)<EOL><DEDENT><DEDENT>", "docstring": "Basic test (creation, pickling, basic run of learning and inference)", "id": "f17295:c0:m0"}
{"signature": "def _createNetwork():", "body": "network = Network()<EOL>network.addRegion('<STR_LIT>', '<STR_LIT>', '<STR_LIT:{}>')<EOL>network.addRegion('<STR_LIT>', '<STR_LIT>', '<STR_LIT:{}>')<EOL>_createSensorToClassifierLinks(network, '<STR_LIT>', '<STR_LIT>')<EOL>sensorRegion = network.regions['<STR_LIT>'].getSelf()<EOL>encoderParams = {'<STR_LIT>': {'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT:name>': '<STR_LIT>',<EOL>'<STR_LIT:type>': '<STR_LIT>'}}<EOL>encoder = MultiEncoder()<EOL>encoder.addMultipleEncoders(encoderParams)<EOL>sensorRegion.encoder = encoder<EOL>testDir = os.path.dirname(os.path.abspath(__file__))<EOL>inputFile = os.path.join(testDir, '<STR_LIT>', '<STR_LIT>')<EOL>dataSource = FileRecordStream(streamID=inputFile)<EOL>sensorRegion.dataSource = dataSource<EOL>network.regions['<STR_LIT>'].setParameter('<STR_LIT>', '<STR_LIT>')<EOL>return network<EOL>", "docstring": "Create a network with a RecordSensor region and a SDRClassifier region", "id": "f17305:m0"}
{"signature": "def _createNetwork():", "body": "network = Network()<EOL>network.addRegion('<STR_LIT>', '<STR_LIT>', '<STR_LIT:{}>')<EOL>sensorRegion = network.regions['<STR_LIT>'].getSelf()<EOL>encoderParams = {'<STR_LIT>': {'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': <NUM_LIT>,<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT:name>': '<STR_LIT>',<EOL>'<STR_LIT:type>': '<STR_LIT>'}}<EOL>encoder = MultiEncoder()<EOL>encoder.addMultipleEncoders(encoderParams)<EOL>sensorRegion.encoder = encoder<EOL>testDir = os.path.dirname(os.path.abspath(__file__))<EOL>inputFile = os.path.join(testDir, '<STR_LIT>', '<STR_LIT>')<EOL>dataSource = FileRecordStream(streamID=inputFile)<EOL>sensorRegion.dataSource = dataSource<EOL>network.regions['<STR_LIT>'].setParameter('<STR_LIT>', '<STR_LIT>')<EOL>return network<EOL>", "docstring": "Create network with one RecordSensor region.", "id": "f17306:m0"}
{"signature": "def setUp(self):", "body": "self.files = {}<EOL>with tempfile.NamedTemporaryFile(<EOL>prefix='<STR_LIT>', delete=False) as outp:<EOL><INDENT>self.addCleanup(os.remove, outp.name)<EOL>with open(resource_filename(__name__, '<STR_LIT>')) as inp:<EOL><INDENT>outp.write(inp.read())<EOL><DEDENT>self.files['<STR_LIT>'] = outp.name<EOL><DEDENT>with tempfile.NamedTemporaryFile(<EOL>prefix='<STR_LIT>', delete=False) as outp:<EOL><INDENT>self.addCleanup(os.remove, outp.name)<EOL>with open(resource_filename(__name__, '<STR_LIT>')) as inp:<EOL><INDENT>outp.write(inp.read())<EOL><DEDENT>self.files['<STR_LIT>'] = outp.name<EOL><DEDENT>", "docstring": "configuration.Configuration relies on static methods\n        which load files by name.  Since we need to be able to run tests and\n        potentially change the content of those files between tests without\n        interfering with one another and with the system configuration, this\n        setUp() function will allocate temporary files used only during the using\n        conf/nupic-default.xml and conf/nupic-site.xml (relative to the unit tests)\n        as templates.", "id": "f17311:c0:m0"}
{"signature": "def _getTempFileName():", "body": "handle = tempfile.NamedTemporaryFile(prefix='<STR_LIT:test>', suffix='<STR_LIT>', dir='<STR_LIT:.>')<EOL>filename = handle.name<EOL>handle.close()<EOL>return filename<EOL>", "docstring": "Creates unique file name that starts with 'test' and ends with '.txt'.", "id": "f17319:m0"}
{"signature": "@classmethod<EOL><INDENT>def setUpClass(cls):<DEDENT>", "body": "for example in cls.examples:<EOL><INDENT>predictionGenerator = _getPredictionsGenerator(cls.examplesDir, example)<EOL>for prediction in predictionGenerator(MAX_PREDICTIONS):<EOL><INDENT>cls.oneStepPredictions[example].append(prediction[<NUM_LIT:0>])<EOL>cls.oneStepConfidences[example].append(prediction[<NUM_LIT:1>])<EOL>cls.fiveStepPredictions[example].append(prediction[<NUM_LIT:2>])<EOL>cls.fiveStepConfidences[example].append(prediction[<NUM_LIT:3>])<EOL><DEDENT><DEDENT>", "docstring": "Get the predictions and prediction confidences for all examples.", "id": "f17338:c0:m0"}
{"signature": "def _getPredictionsGenerator(examplesDir, exampleName):", "body": "sys.path.insert(<NUM_LIT:0>, os.path.join(examplesDir, exampleName))<EOL>modName = \"<STR_LIT>\" % exampleName<EOL>mod = __import__(modName, fromlist=[\"<STR_LIT>\"])<EOL>return getattr(mod, \"<STR_LIT>\")<EOL>", "docstring": "Get predictions generator for one of the quick-start example. \n\n.. note::\n\n  The examples are not part of the nupic package so we need to manually \n  append the example module path to syspath.\n\n:param examplesDir: \n  (str) path to the example parent directory.\n:param exampleName: \n  (str) name of the example. E.g: \"opf\", \"network\", \"algo\".\n:return predictionsGenerator: \n  (function) predictions generator functions.", "id": "f17338:m0"}
{"signature": "def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =<NUM_LIT:100>, nSequences =[<NUM_LIT:2>],<EOL>pctShared = <NUM_LIT>, seqGenMode = '<STR_LIT>',<EOL>shouldFail = False):", "body": "print(\"<STR_LIT>\")<EOL>nFailed = <NUM_LIT:0><EOL>subsequenceStartPos = <NUM_LIT:10><EOL>assert subsequenceStartPos < sequenceLength<EOL>for numSequences in nSequences:<EOL><INDENT>print(\"<STR_LIT>\",sequenceLength, end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\",cellsPerColumn,\"<STR_LIT>\",nTests,\"<STR_LIT>\", numCols)<EOL>print(\"<STR_LIT>\",numSequences, \"<STR_LIT>\", pctShared, end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\", seqGenMode)<EOL>for k in range(nTests): <EOL><INDENT>trainingSet = buildTrainingSet(numSequences = numSequences,<EOL>sequenceLength = sequenceLength,<EOL>pctShared = pctShared, seqGenMode = seqGenMode,<EOL>subsequenceStartPos = subsequenceStartPos,<EOL>numCols = numCols,<EOL>minOnes = <NUM_LIT>, maxOnes = <NUM_LIT>)<EOL>print(\"<STR_LIT>\")<EOL>numFailures3, numStrictErrors3, numPerfect3, tm3 =_testSequence(trainingSet,<EOL>nTrainingReps = <NUM_LIT:10>,<EOL>numberOfCols = numCols,<EOL>cellsPerColumn = cellsPerColumn,<EOL>initialPerm = <NUM_LIT>,<EOL>connectedPerm = <NUM_LIT>,<EOL>minThreshold = <NUM_LIT:12>,<EOL>permanenceInc = <NUM_LIT>,<EOL>permanenceDec = <NUM_LIT:0.1>,<EOL>permanenceMax = <NUM_LIT:1>,<EOL>globalDecay = <NUM_LIT>,<EOL>newSynapseCount = <NUM_LIT:15>,<EOL>activationThreshold = <NUM_LIT:12>,<EOL>doPooling = False,<EOL>shouldFail = shouldFail)<EOL>print(\"<STR_LIT>\")<EOL>numFailures, numStrictErrors, numPerfect, tm2 =_testSequence(trainingSet,<EOL>nTrainingReps = <NUM_LIT:2>,<EOL>numberOfCols = numCols,<EOL>cellsPerColumn = cellsPerColumn,<EOL>initialPerm = <NUM_LIT>,<EOL>connectedPerm = <NUM_LIT>,<EOL>minThreshold = <NUM_LIT:12>,<EOL>permanenceInc = <NUM_LIT>,<EOL>permanenceDec = <NUM_LIT:0>,<EOL>permanenceMax = <NUM_LIT:1>,<EOL>globalDecay = <NUM_LIT>,<EOL>newSynapseCount = <NUM_LIT:15>,<EOL>activationThreshold = <NUM_LIT:12>,<EOL>doPooling = False,<EOL>shouldFail = shouldFail)<EOL>print(\"<STR_LIT>\")<EOL>numFailures1, numStrictErrors1, numPerfect1, tm1 =_testSequence(trainingSet,<EOL>nTrainingReps = <NUM_LIT:1>,<EOL>numberOfCols = numCols,<EOL>cellsPerColumn = cellsPerColumn,<EOL>initialPerm = <NUM_LIT>,<EOL>connectedPerm = <NUM_LIT>,<EOL>minThreshold = <NUM_LIT:12>,<EOL>permanenceInc = <NUM_LIT>,<EOL>permanenceDec = <NUM_LIT:0>,<EOL>permanenceMax = <NUM_LIT:1>,<EOL>globalDecay = <NUM_LIT>,<EOL>newSynapseCount = <NUM_LIT:15>,<EOL>activationThreshold = <NUM_LIT:12>,<EOL>doPooling = False,<EOL>shouldFail = shouldFail)<EOL>segmentInfo1 = tm1.getSegmentInfo()<EOL>segmentInfo2 = tm2.getSegmentInfo()<EOL>if (abs(segmentInfo1[<NUM_LIT:0>] - segmentInfo2[<NUM_LIT:0>]) > <NUM_LIT:3>) or(abs(segmentInfo1[<NUM_LIT:1>] - segmentInfo2[<NUM_LIT:1>]) > <NUM_LIT:3>*<NUM_LIT:15>) :<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(segmentInfo1)<EOL>print(segmentInfo2)<EOL>print(tm3.getSegmentInfo())<EOL>tm3.trimSegments()<EOL>print(tm3.getSegmentInfo())<EOL>print(\"<STR_LIT>\")<EOL>print(numFailures1, numStrictErrors1, numPerfect1)<EOL>print(numFailures, numStrictErrors, numPerfect)<EOL>print(numFailures3, numStrictErrors3, numPerfect3)<EOL>numFailures += <NUM_LIT:1><EOL><DEDENT>if numFailures == <NUM_LIT:0> and not shouldFailor numFailures > <NUM_LIT:0> and shouldFail:<EOL><INDENT>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>if shouldFail:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>print()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>nFailed = nFailed + <NUM_LIT:1><EOL>print(\"<STR_LIT>\", numFailures)<EOL>print(\"<STR_LIT>\", numStrictErrors)<EOL>print(\"<STR_LIT>\", numPerfect)<EOL><DEDENT><DEDENT><DEDENT>return nFailed<EOL>", "docstring": "Still need to test:\n    Two overlapping sequences. OK to get new segments but check that we can\n    get correct high order prediction after multiple reps.", "id": "f17345:m19"}
{"signature": "def buildHL0bTrainingSet(numOnes=<NUM_LIT:5>):", "body": "numPatterns = <NUM_LIT><EOL>p = getSimplePatterns(numOnes, numPatterns)<EOL>s = []<EOL>s.append(p[rgen.randint(<NUM_LIT:5>,numPatterns)])<EOL>for i in range(<NUM_LIT:50>):<EOL><INDENT>r = rgen.randint(<NUM_LIT:5>,numPatterns)<EOL>print(r, end='<STR_LIT:U+0020>')<EOL>s.append(p[r])<EOL>if rgen.binomial(<NUM_LIT:1>, <NUM_LIT:0.5>) > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>s.append(p[<NUM_LIT:0>])<EOL>s.append(p[<NUM_LIT:1>])<EOL>s.append(p[<NUM_LIT:2>])<EOL>s.append(p[<NUM_LIT:4>])<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>s.append(p[<NUM_LIT:1>])<EOL>s.append(p[<NUM_LIT:2>])<EOL>s.append(p[<NUM_LIT:3>])<EOL><DEDENT>r = rgen.randint(<NUM_LIT:5>,numPatterns)<EOL>s.append(p[r])<EOL>print(r, end='<STR_LIT:U+0020>')<EOL><DEDENT>print()<EOL>return ([s], [ [p[<NUM_LIT:0>], p[<NUM_LIT:1>], p[<NUM_LIT:2>], p[<NUM_LIT:4>]],  [p[<NUM_LIT:1>], p[<NUM_LIT:2>], p[<NUM_LIT:3>]] ])<EOL>", "docstring": "Simple sequences for HL0b. Each pattern in the sequence has a series of 1's\n    in a specific set of columns.\n      There are 23 patterns, p0 to p22.\n      The sequences we want to learn are p1->p2->p3 and p0->p1->p2->p4.\n      We create a very long sequence consisting of these two sub-sequences\n      intermixed with noise, such as:\n            N N p0 p1 p2 p4 N N p1 p2 p3 N N p1 p2 p3\n      N is randomly chosen from p5 to p22", "id": "f17345:m8"}
{"signature": "def getSimplePatterns(numOnes, numPatterns):", "body": "numCols = numOnes * numPatterns<EOL>p = []<EOL>for i in range(numPatterns):<EOL><INDENT>x = numpy.zeros(numCols, dtype='<STR_LIT>')<EOL>x[i*numOnes:(i+<NUM_LIT:1>)*numOnes] = <NUM_LIT:1><EOL>p.append(x)<EOL><DEDENT>return p<EOL>", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n    bits on. There are numPatterns*numOnes bits in the vector.", "id": "f17345:m4"}
{"signature": "def buildSequencePool(numSequences = <NUM_LIT:10>,<EOL>seqLen = [<NUM_LIT:2>,<NUM_LIT:3>,<NUM_LIT:4>],<EOL>numPatterns = <NUM_LIT:5>,<EOL>numOnBitsPerPattern = <NUM_LIT:3>,<EOL>patternOverlap = <NUM_LIT:0>,<EOL>**kwargs<EOL>):", "body": "<EOL>patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)<EOL>numCols = len(patterns[<NUM_LIT:0>])<EOL>trainingSequences = []<EOL>for i in range(numSequences):<EOL><INDENT>sequence = []<EOL>length = random.choice(seqLen)<EOL>for j in range(length):<EOL><INDENT>patIdx = random.choice(range(numPatterns))<EOL>sequence.append(patterns[patIdx])<EOL><DEDENT>trainingSequences.append(sequence)<EOL><DEDENT>if VERBOSITY >= <NUM_LIT:3>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>printAllTrainingSequences(trainingSequences)<EOL><DEDENT>return (numCols, trainingSequences)<EOL>", "docstring": "Create a bunch of sequences of various lengths, all built from\n    a fixed set of patterns.\n\n    Parameters:\n    -----------------------------------------------------\n    numSequences:         Number of training sequences to generate\n    seqLen:               List of possible sequence lengths\n    numPatterns:          How many possible patterns there are to use within\n                            sequences\n    numOnBitsPerPattern:  Number of ON bits in each TM input pattern\n    patternOverlap:       Max number of bits of overlap between any 2 patterns\n    retval:               (numCols, trainingSequences)\n                            numCols - width of the patterns\n                            trainingSequences - a list of training sequences", "id": "f17347:m4"}
{"signature": "def createTMs(includeCPP = True,<EOL>includePy = True,<EOL>numCols = <NUM_LIT:100>,<EOL>cellsPerCol = <NUM_LIT:4>,<EOL>activationThreshold = <NUM_LIT:3>,<EOL>minThreshold = <NUM_LIT:3>,<EOL>newSynapseCount = <NUM_LIT:3>,<EOL>initialPerm = <NUM_LIT>,<EOL>permanenceInc = <NUM_LIT:0.1>,<EOL>permanenceDec = <NUM_LIT:0.0>,<EOL>globalDecay = <NUM_LIT:0.0>,<EOL>pamLength = <NUM_LIT:0>,<EOL>checkSynapseConsistency = True,<EOL>maxInfBacktrack = <NUM_LIT:0>,<EOL>maxLrnBacktrack = <NUM_LIT:0>,<EOL>**kwargs<EOL>):", "body": "<EOL>connectedPerm = <NUM_LIT:0.5><EOL>tms = dict()<EOL>if includeCPP:<EOL><INDENT>if VERBOSITY >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol,<EOL>initialPerm = initialPerm, connectedPerm = connectedPerm,<EOL>minThreshold = minThreshold, newSynapseCount = newSynapseCount,<EOL>permanenceInc = permanenceInc, permanenceDec = permanenceDec,<EOL>activationThreshold = activationThreshold,<EOL>globalDecay = globalDecay, burnIn = <NUM_LIT:1>,<EOL>seed=SEED, verbosity=VERBOSITY,<EOL>checkSynapseConsistency = checkSynapseConsistency,<EOL>collectStats = True,<EOL>pamLength = pamLength,<EOL>maxInfBacktrack = maxInfBacktrack,<EOL>maxLrnBacktrack = maxLrnBacktrack,<EOL>)<EOL>cpp_tm.retrieveLearningStates = True<EOL>tms['<STR_LIT>'] = cpp_tm<EOL><DEDENT>if includePy:<EOL><INDENT>if VERBOSITY >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>py_tm = BacktrackingTM(numberOfCols = numCols, cellsPerColumn = cellsPerCol,<EOL>initialPerm = initialPerm, connectedPerm = connectedPerm,<EOL>minThreshold = minThreshold, newSynapseCount = newSynapseCount,<EOL>permanenceInc = permanenceInc, permanenceDec = permanenceDec,<EOL>activationThreshold = activationThreshold,<EOL>globalDecay = globalDecay, burnIn = <NUM_LIT:1>,<EOL>seed=SEED, verbosity=VERBOSITY,<EOL>collectStats = True,<EOL>pamLength = pamLength,<EOL>maxInfBacktrack = maxInfBacktrack,<EOL>maxLrnBacktrack = maxLrnBacktrack,<EOL>)<EOL>tms['<STR_LIT>'] = py_tm<EOL><DEDENT>return tms<EOL>", "docstring": "Create one or more TM instances, placing each into a dict keyed by\n    name.\n\n    Parameters:\n    ------------------------------------------------------------------\n    retval:   tms - dict of TM instances", "id": "f17347:m5"}
{"signature": "def printOneTrainingVector(x):", "body": "print('<STR_LIT>'.join('<STR_LIT:1>' if k != <NUM_LIT:0> else '<STR_LIT:.>' for k in x))<EOL>", "docstring": "Print a single vector succinctly.", "id": "f17347:m0"}
{"signature": "def getSimplePatterns(numOnes, numPatterns, patternOverlap=<NUM_LIT:0>):", "body": "assert (patternOverlap < numOnes)<EOL>numNewBitsInEachPattern = numOnes - patternOverlap<EOL>numCols = numNewBitsInEachPattern * numPatterns + patternOverlap<EOL>p = []<EOL>for i in range(numPatterns):<EOL><INDENT>x = numpy.zeros(numCols, dtype='<STR_LIT>')<EOL>startBit = i*numNewBitsInEachPattern<EOL>nextStartBit = startBit + numOnes<EOL>x[startBit:nextStartBit] = <NUM_LIT:1><EOL>p.append(x)<EOL><DEDENT>return p<EOL>", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n    bits on. The amount of overlap between consecutive patterns is\n    configurable, via the patternOverlap parameter.\n\n    Parameters:\n    -----------------------------------------------------------------------\n    numOnes:        Number of bits ON in each pattern\n    numPatterns:    Number of unique patterns to generate\n    patternOverlap: Number of bits of overlap between each successive pattern\n    retval:         patterns", "id": "f17347:m2"}
{"signature": "def assertNoTMDiffs(tms):", "body": "if len(tms) == <NUM_LIT:1>:<EOL><INDENT>return<EOL><DEDENT>if len(tms) > <NUM_LIT:2>:<EOL><INDENT>raise \"<STR_LIT>\"<EOL><DEDENT>same = fdrutils.tmDiff2(*list(tms.values()), verbosity=VERBOSITY)<EOL>assert(same)<EOL>return<EOL>", "docstring": "Check for diffs among the TM instances in the passed in tms dict and\nraise an assert if any are detected\n\nParameters:\n---------------------------------------------------------------------\ntms:                  dict of TM instances", "id": "f17347:m6"}
{"signature": "def _createTMs(numCols, cellsPerColumn=<NUM_LIT:4>, checkSynapseConsistency=True):", "body": "<EOL>minThreshold = <NUM_LIT:4><EOL>activationThreshold = <NUM_LIT:4><EOL>newSynapseCount = <NUM_LIT:5><EOL>initialPerm = <NUM_LIT><EOL>connectedPerm = <NUM_LIT:0.5><EOL>permanenceInc = <NUM_LIT:0.1><EOL>permanenceDec = <NUM_LIT><EOL>globalDecay = <NUM_LIT:0.0><EOL>if VERBOSITY > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>cppTm = BacktrackingTMCPP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,<EOL>initialPerm=initialPerm, connectedPerm=connectedPerm,<EOL>minThreshold=minThreshold, newSynapseCount=newSynapseCount,<EOL>permanenceInc=permanenceInc, permanenceDec=permanenceDec,<EOL>activationThreshold=activationThreshold,<EOL>globalDecay=globalDecay, burnIn=<NUM_LIT:1>,<EOL>seed=SEED, verbosity=VERBOSITY,<EOL>checkSynapseConsistency=checkSynapseConsistency,<EOL>pamLength=<NUM_LIT:1000>)<EOL>if VERBOSITY > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>pyTm = BacktrackingTM(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,<EOL>initialPerm=initialPerm, connectedPerm=connectedPerm,<EOL>minThreshold=minThreshold, newSynapseCount=newSynapseCount,<EOL>permanenceInc=permanenceInc, permanenceDec=permanenceDec,<EOL>activationThreshold=activationThreshold,<EOL>globalDecay=globalDecay, burnIn=<NUM_LIT:1>,<EOL>seed=SEED, verbosity=VERBOSITY,<EOL>pamLength=<NUM_LIT:1000>)<EOL>return cppTm, pyTm<EOL>", "docstring": "Create TM and BacktrackingTMCPP instances with identical parameters.", "id": "f17350:m2"}
{"signature": "def _getSimplePatterns(numOnes, numPatterns):", "body": "numCols = numOnes * numPatterns<EOL>p = []<EOL>for i in range(numPatterns):<EOL><INDENT>x = numpy.zeros(numCols, dtype='<STR_LIT>')<EOL>x[i*numOnes:(i+<NUM_LIT:1>)*numOnes] = <NUM_LIT:1><EOL>p.append(x)<EOL><DEDENT>return p<EOL>", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n    bits on. There are numPatterns*numOnes bits in the vector.", "id": "f17350:m0"}
{"signature": "def _computeTMMetric(tm=None, sequences=None, useResets=True, verbosity=<NUM_LIT:1>):", "body": "datasetScore = <NUM_LIT:0><EOL>numPredictions = <NUM_LIT:0><EOL>tm.resetStats()<EOL>for seqIdx, seq in enumerate(sequences):<EOL><INDENT>if useResets:<EOL><INDENT>tm.reset()<EOL><DEDENT>seq = numpy.array(seq, dtype='<STR_LIT>')<EOL>if verbosity > <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>for i, inputPattern in enumerate(seq):<EOL><INDENT>if verbosity > <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\" % (seqIdx, i), end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\", inputPattern)<EOL><DEDENT>y = tm.infer(inputPattern)<EOL>if verbosity > <NUM_LIT:2>:<EOL><INDENT>stats = tm.getStats()<EOL>if stats['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\", stats['<STR_LIT>'])<EOL><DEDENT><DEDENT>if verbosity > <NUM_LIT:3>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>predOut = numpy.sum(tm.predictedState['<STR_LIT:t>'], axis=<NUM_LIT:1>)<EOL>actOut  = numpy.sum(tm.activeState['<STR_LIT:t>'], axis=<NUM_LIT:1>)<EOL>outout  = numpy.sum(y.reshape(tm.activeState['<STR_LIT:t>'].shape), axis=<NUM_LIT:1>)<EOL>print(\"<STR_LIT>\", predOut.nonzero())<EOL>print(\"<STR_LIT>\", actOut.nonzero())<EOL>print(\"<STR_LIT>\", inputPattern.nonzero())<EOL>print(\"<STR_LIT>\", outout.nonzero())<EOL><DEDENT><DEDENT><DEDENT>stats = tm.getStats()<EOL>datasetScore = stats['<STR_LIT>']<EOL>numPredictions = stats['<STR_LIT>']<EOL>print(\"<STR_LIT>\", datasetScore, end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\", numPredictions)<EOL>return datasetScore, numPredictions<EOL>", "docstring": "Given a trained TM and a list of sequences, compute the temporal memory\n    performance metric on those sequences.\n\n    Parameters:\n    ===========\n    tm:               A trained temporal memory.\n    sequences:        A list of sequences. Each sequence is a list of numpy\n                      vectors.\n    useResets:        If True, the TM's reset method will be called before the\n                      the start of each new sequence.\n    verbosity:        An integer controlling the level of printouts. The higher\n                      the number the more debug printouts.\n\n    Return value:\n    ============\n    The following pair is returned:  (score, numPredictions)\n\n    score:            The average prediction score per pattern.\n    numPredictions:   The total number of predictions that were made.", "id": "f17350:m3"}
{"signature": "def _printOneTrainingVector(self, x):", "body": "print('<STR_LIT>'.join('<STR_LIT:1>' if k != <NUM_LIT:0> else '<STR_LIT:.>' for k in x))<EOL>", "docstring": "Print a single vector succinctly.", "id": "f17351:c0:m1"}
{"signature": "def _getSimplePatterns(self, numOnes, numPatterns):", "body": "numCols = numOnes * numPatterns<EOL>p = []<EOL>for i in range(numPatterns):<EOL><INDENT>x = numpy.zeros(numCols, dtype='<STR_LIT>')<EOL>x[i*numOnes:(i+<NUM_LIT:1>)*numOnes] = <NUM_LIT:1><EOL>p.append(x)<EOL><DEDENT>return p<EOL>", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n        bits on. There are numPatterns*numOnes bits in the vector. These patterns\n        are used as elements of sequences when building up a training set.", "id": "f17351:c0:m5"}
{"signature": "def _setVerbosity(self, verbosity, tm, tmPy):", "body": "tm.cells4.setVerbosity(verbosity)<EOL>tm.verbosity = verbosity<EOL>tmPy.verbosity = verbosity<EOL>", "docstring": "Set verbosity level on the TM", "id": "f17351:c0:m3"}
{"signature": "def _createTMs(self, numCols, fixedResources=False,<EOL>checkSynapseConsistency = True):", "body": "<EOL>minThreshold = <NUM_LIT:4><EOL>activationThreshold = <NUM_LIT:8><EOL>newSynapseCount = <NUM_LIT:15><EOL>initialPerm = <NUM_LIT><EOL>connectedPerm = <NUM_LIT:0.5><EOL>permanenceInc = <NUM_LIT:0.1><EOL>permanenceDec = <NUM_LIT><EOL>if fixedResources:<EOL><INDENT>permanenceDec = <NUM_LIT:0.1><EOL>maxSegmentsPerCell = <NUM_LIT:5><EOL>maxSynapsesPerSegment = <NUM_LIT:15><EOL>globalDecay = <NUM_LIT:0><EOL>maxAge = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>permanenceDec = <NUM_LIT><EOL>maxSegmentsPerCell = -<NUM_LIT:1><EOL>maxSynapsesPerSegment = -<NUM_LIT:1><EOL>globalDecay = <NUM_LIT><EOL>maxAge = <NUM_LIT:1><EOL><DEDENT>if g_testCPPTM:<EOL><INDENT>if g_options.verbosity > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>cppTM = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = <NUM_LIT:4>,<EOL>initialPerm = initialPerm, connectedPerm = connectedPerm,<EOL>minThreshold = minThreshold,<EOL>newSynapseCount = newSynapseCount,<EOL>permanenceInc = permanenceInc,<EOL>permanenceDec = permanenceDec,<EOL>activationThreshold = activationThreshold,<EOL>globalDecay = globalDecay, maxAge=maxAge, burnIn = <NUM_LIT:1>,<EOL>seed=g_options.seed, verbosity=g_options.verbosity,<EOL>checkSynapseConsistency = checkSynapseConsistency,<EOL>pamLength = <NUM_LIT:1000>,<EOL>maxSegmentsPerCell = maxSegmentsPerCell,<EOL>maxSynapsesPerSegment = maxSynapsesPerSegment,<EOL>)<EOL>cppTM.retrieveLearningStates = True<EOL><DEDENT>else:<EOL><INDENT>cppTM = None<EOL><DEDENT>if g_options.verbosity > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>pyTM = BacktrackingTM(numberOfCols = numCols, cellsPerColumn = <NUM_LIT:4>,<EOL>initialPerm = initialPerm,<EOL>connectedPerm = connectedPerm,<EOL>minThreshold = minThreshold,<EOL>newSynapseCount = newSynapseCount,<EOL>permanenceInc = permanenceInc,<EOL>permanenceDec = permanenceDec,<EOL>activationThreshold = activationThreshold,<EOL>globalDecay = globalDecay, maxAge=maxAge, burnIn = <NUM_LIT:1>,<EOL>seed=g_options.seed, verbosity=g_options.verbosity,<EOL>pamLength = <NUM_LIT:1000>,<EOL>maxSegmentsPerCell = maxSegmentsPerCell,<EOL>maxSynapsesPerSegment = maxSynapsesPerSegment,<EOL>)<EOL>return cppTM, pyTM<EOL>", "docstring": "Create an instance of the appropriate temporal memory. We isolate\n        all parameters as constants specified here.", "id": "f17351:c0:m4"}
{"signature": "def getNumTestPatterns(short=<NUM_LIT:0>):", "body": "if short==<NUM_LIT:0>:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL>numPatterns = numpy.random.randint(<NUM_LIT>, <NUM_LIT>)<EOL>numClasses = numpy.random.randint(<NUM_LIT:50>, <NUM_LIT>)<EOL><DEDENT>elif short==<NUM_LIT:1>:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL>numPatterns = numpy.random.randint(<NUM_LIT>, <NUM_LIT>)<EOL>numClasses = numpy.random.randint(<NUM_LIT:50>, <NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL>numPatterns = numpy.random.randint(<NUM_LIT>, <NUM_LIT>)<EOL>numClasses = numpy.random.randint(<NUM_LIT:30>, <NUM_LIT:1000>)<EOL><DEDENT>LOGGER.info(\"<STR_LIT>\", numPatterns)<EOL>LOGGER.info(\"<STR_LIT>\", numClasses)<EOL>return numPatterns, numClasses<EOL>", "docstring": "Return the number of patterns and classes the test should use.", "id": "f17354:m2"}
{"signature": "def runTestKNNClassifier(self, short = <NUM_LIT:0>):", "body": "failures = \"<STR_LIT>\"<EOL>if short != <NUM_LIT:2>:<EOL><INDENT>numpy.random.seed(<NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>seed_value = int(time.time())<EOL>numpy.random.seed(seed_value)<EOL>LOGGER.info('<STR_LIT>', seed_value)<EOL>f = open('<STR_LIT>', '<STR_LIT:a>')<EOL>f.write(str(seed_value))<EOL>f.write('<STR_LIT:\\n>')<EOL>f.close()<EOL><DEDENT>failures += simulateKMoreThanOne()<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>numPatterns, numClasses = getNumTestPatterns(short)<EOL>patternSize = <NUM_LIT:100><EOL>patterns = numpy.random.rand(numPatterns, patternSize)<EOL>patternDict = dict()<EOL>testDict = dict()<EOL>for i in xrange(numPatterns):<EOL><INDENT>patternDict[i] = dict()<EOL>patternDict[i]['<STR_LIT>'] = patterns[i]<EOL>patternDict[i]['<STR_LIT>'] = numpy.random.randint(<NUM_LIT:0>, numClasses-<NUM_LIT:1>)<EOL>testDict[i] = copy.deepcopy(patternDict[i])<EOL>testDict[i]['<STR_LIT>'][:int(<NUM_LIT>*patternSize)] = numpy.random.rand()<EOL>testDict[i]['<STR_LIT>'] = None<EOL><DEDENT>LOGGER.info(\"<STR_LIT>\")<EOL>knn = KNNClassifier(k=<NUM_LIT:1>)<EOL>failures += simulateClassifier(knn, patternDict,\"<STR_LIT>\")<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>knnL1 = KNNClassifier(k=<NUM_LIT:1>, distanceNorm=<NUM_LIT:1.0>)<EOL>failures += simulateClassifier(knnL1, patternDict,\"<STR_LIT>\")<EOL>LOGGER.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>knnExact = KNNClassifier(k=<NUM_LIT:1>, exact=True)<EOL>failures += simulateClassifier(knnExact,<EOL>patternDict,<EOL>\"<STR_LIT>\",<EOL>testDict=testDict)<EOL>numPatterns, numClasses = getNumTestPatterns(short)<EOL>patterns = (numpy.random.rand(numPatterns, <NUM_LIT>) > <NUM_LIT>).astype(RealNumpyDType)<EOL>patternDict = dict()<EOL>for i in patterns:<EOL><INDENT>iString = str(i.tolist())<EOL>if not patternDict.has_key(iString):<EOL><INDENT>randCategory = numpy.random.randint(<NUM_LIT:0>, numClasses-<NUM_LIT:1>)<EOL>patternDict[iString] = dict()<EOL>patternDict[iString]['<STR_LIT>'] = i<EOL>patternDict[iString]['<STR_LIT>'] = randCategory<EOL><DEDENT><DEDENT>LOGGER.info(\"<STR_LIT>\")<EOL>knnDense = KNNClassifier(k=<NUM_LIT:1>)<EOL>failures += simulateClassifier(knnDense, patternDict,\"<STR_LIT>\")<EOL>self.assertEqual(len(failures), <NUM_LIT:0>,<EOL>\"<STR_LIT>\" + failures)<EOL>if short == <NUM_LIT:2>:<EOL><INDENT>f = open('<STR_LIT>', '<STR_LIT:a>')<EOL>f.write('<STR_LIT>')<EOL>f.close()<EOL><DEDENT>", "docstring": "Test the KNN classifier in this module. short can be:\n            0 (short), 1 (medium), or 2 (long)", "id": "f17354:c0:m0"}
{"signature": "def simulateKMoreThanOne():", "body": "failures = \"<STR_LIT>\"<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>knn = KNNClassifier(k=<NUM_LIT:3>)<EOL>v = numpy.zeros((<NUM_LIT:6>, <NUM_LIT:2>))<EOL>v[<NUM_LIT:0>] = [<NUM_LIT:1.0>, <NUM_LIT:0.0>]<EOL>v[<NUM_LIT:1>] = [<NUM_LIT:1.0>, <NUM_LIT>]<EOL>v[<NUM_LIT:2>] = [<NUM_LIT:1.0>, <NUM_LIT>]<EOL>v[<NUM_LIT:3>] = [<NUM_LIT:1.0>, <NUM_LIT>]<EOL>v[<NUM_LIT:4>] = [<NUM_LIT:1.0>, <NUM_LIT>]<EOL>v[<NUM_LIT:5>] = [<NUM_LIT:1.0>, <NUM_LIT>]<EOL>knn.learn(v[<NUM_LIT:0>], <NUM_LIT:0>)<EOL>knn.learn(v[<NUM_LIT:1>], <NUM_LIT:0>)<EOL>knn.learn(v[<NUM_LIT:2>], <NUM_LIT:0>)<EOL>knn.learn(v[<NUM_LIT:3>], <NUM_LIT:1>)<EOL>knn.learn(v[<NUM_LIT:4>], <NUM_LIT:1>)<EOL>knn.learn(v[<NUM_LIT:5>], <NUM_LIT:1>)<EOL>winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[<NUM_LIT:0>])<EOL>if winner != <NUM_LIT:0>:<EOL><INDENT>failures += \"<STR_LIT>\"<EOL><DEDENT>winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[<NUM_LIT:2>])<EOL>if winner != <NUM_LIT:0>:<EOL><INDENT>failures += \"<STR_LIT>\"<EOL><DEDENT>winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[<NUM_LIT:3>])<EOL>if winner != <NUM_LIT:0>:<EOL><INDENT>failures += \"<STR_LIT>\"<EOL><DEDENT>winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[<NUM_LIT:5>])<EOL>if winner != <NUM_LIT:1>:<EOL><INDENT>failures += \"<STR_LIT>\"<EOL><DEDENT>if len(failures) == <NUM_LIT:0>:<EOL><INDENT>LOGGER.info(\"<STR_LIT>\")<EOL><DEDENT>return failures<EOL>", "docstring": "A small test with k=3", "id": "f17354:m0"}
{"signature": "def _setupTempDirectory(filename):", "body": "tmpDir = tempfile.mkdtemp()<EOL>tmpFileName = os.path.join(tmpDir, os.path.basename(filename))<EOL>return tmpDir, tmpFileName<EOL>", "docstring": "Create a temp directory, and return path to filename in that directory", "id": "f17359:m1"}
{"signature": "def runBaseDescriptionAndPermutations(self, expDesc, hsVersion, maxModels=<NUM_LIT:2>):", "body": "<EOL>self.getModules(expDesc, hsVersion=hsVersion)<EOL>permutationsPyPath = os.path.join(g_myEnv.testOutDir, \"<STR_LIT>\")<EOL>args = [g_myEnv.testOutDir]<EOL>from nupic.frameworks.opf.experiment_runner import runExperiment<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>runExperiment(args)<EOL>jobParams = {'<STR_LIT>' : generatePersistentJobGUID(),<EOL>'<STR_LIT>': permutationsPyPath,<EOL>'<STR_LIT>': hsVersion,<EOL>}<EOL>if maxModels is not None:<EOL><INDENT>jobParams['<STR_LIT>'] = maxModels<EOL><DEDENT>args = ['<STR_LIT>', '<STR_LIT>' % (json.dumps(jobParams))]<EOL>self.resetExtraLogItems()<EOL>self.addExtraLogItem({'<STR_LIT>':jobParams})<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>LOGGER.info(\"<STR_LIT>\")<EOL>jobID = hypersearch_worker.main(args)<EOL>cjDAO = ClientJobsDAO.get()<EOL>models = cjDAO.modelsGetUpdateCounters(jobID)<EOL>modelIDs = [model.modelId for model in models]<EOL>results = cjDAO.modelsGetResultAndStatus(modelIDs)<EOL>if maxModels is not None:<EOL><INDENT>self.assertEqual(len(results), maxModels, \"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (maxModels, len(results)))<EOL><DEDENT>for result in results:<EOL><INDENT>self.assertEqual(result.completionReason, cjDAO.CMPL_REASON_EOF,<EOL>\"<STR_LIT>\" % (result.completionMsg))<EOL><DEDENT>return results<EOL>", "docstring": "This does the following:\n\n        1.) Calls ExpGenerator to generate a base description file and permutations\n        file from expDescription.\n\n        2.) Verifies that description.py and permutations.py are valid python\n        modules that can be loaded\n\n        3.) Runs the base description.py as an experiment using OPF RunExperiment.\n\n        4.) Runs a Hypersearch using the generated permutations.py by passing it\n        to HypersearchWorker.\n\n        Parameters:\n        -------------------------------------------------------------------\n        expDesc:       JSON format experiment description\n        hsVersion:     which version of hypersearch to use ('v2'; 'v1' was dropped)\n        retval:        list of model results", "id": "f17360:c1:m6"}
{"signature": "def getModules(self, expDesc, hsVersion='<STR_LIT>'):", "body": "<EOL>shutil.rmtree(g_myEnv.testOutDir, ignore_errors=True)<EOL>args = [<EOL>\"<STR_LIT>\" % (json.dumps(expDesc)),<EOL>\"<STR_LIT>\" % (g_myEnv.testOutDir),<EOL>\"<STR_LIT>\" % (hsVersion)<EOL>]<EOL>self.addExtraLogItem({'<STR_LIT:args>':args})<EOL>experiment_generator.expGenerator(args)<EOL>descriptionPyPath = os.path.join(g_myEnv.testOutDir, \"<STR_LIT>\")<EOL>permutationsPyPath = os.path.join(g_myEnv.testOutDir, \"<STR_LIT>\")<EOL>return (self.checkPythonScript(descriptionPyPath),<EOL>self.checkPythonScript(permutationsPyPath))<EOL>", "docstring": "This does the following:\n\n        1.) Calls ExpGenerator to generate a base description file and permutations\n        file from expDescription.\n\n        2.) Verifies that description.py and permutations.py are valid python\n        modules that can be loaded\n\n        3.) Returns the loaded base description module and permutations module\n\n        Parameters:\n        -------------------------------------------------------------------\n        expDesc:       JSON format experiment description\n        hsVersion:     which version of hypersearch to use ('v2'; 'v1' was dropped)\n        retval:        (baseModule, permutationsModule)", "id": "f17360:c1:m5"}
{"signature": "def setUp(self):", "body": "global g_myEnv<EOL>if not g_myEnv:<EOL><INDENT>params = type('<STR_LIT>', (object,), {'<STR_LIT>' : resource_filename(\"<STR_LIT>\", \"<STR_LIT>\")})<EOL>g_myEnv = MyTestEnvironment(params)<EOL><DEDENT>", "docstring": "Method called to prepare the test fixture. This is called by the\n        unittest framework immediately before calling the test method; any exception\n        raised by this method will be considered an error rather than a test\n        failure. The default implementation does nothing.", "id": "f17360:c1:m1"}
{"signature": "def _getTestList():", "body": "suiteNames = ['<STR_LIT>']<EOL>testNames = []<EOL>for suite in suiteNames:<EOL><INDENT>for f in dir(eval(suite)):<EOL><INDENT>if f.startswith('<STR_LIT:test>'):<EOL><INDENT>testNames.append('<STR_LIT>' % (suite, f))<EOL><DEDENT><DEDENT><DEDENT>return testNames<EOL>", "docstring": "Get the list of tests that can be run from this module", "id": "f17360:m2"}
{"signature": "def _openOpfPredictionCsvFile(self, filepath):", "body": "<EOL>csvReader = self._openCsvFile(filepath)<EOL>names = next(csvReader)<EOL>_types = next(csvReader)<EOL>_specials = next(csvReader)<EOL>return (csvReader, names)<EOL>", "docstring": "Open an OPF prediction CSV file and advance it to the first data row\n\n        Returns:      the tuple (csvReader, fieldNames), where 'csvReader' is the\n                      csv reader object, and 'fieldNames' is a sequence of field\n                      names.", "id": "f17363:c0:m2"}
{"signature": "def shortDescription(self):", "body": "return None<EOL>", "docstring": "Override to force unittest framework to use test method names instead\n        of docstrings in the report.", "id": "f17372:c1:m2"}
{"signature": "def __executePositiveRunExperimentTest(self,<EOL>runnerPath,<EOL>experimentDirPath,<EOL>customOptions=[],<EOL>short=False):", "body": "<EOL>command = [<EOL>\"<STR_LIT>\",<EOL>runnerPath,<EOL>experimentDirPath,<EOL>]<EOL>command.extend(customOptions)<EOL>if short:<EOL><INDENT>command.append(\"<STR_LIT>\")<EOL><DEDENT>self.addExtraLogItem({'<STR_LIT>':command})<EOL>r = _executeExternalCmdAndReapOutputs(command)<EOL>self.addExtraLogItem({'<STR_LIT:result>':r})<EOL>_debugOut((\"<STR_LIT>\") % (command, r))<EOL>self.assertEqual(r['<STR_LIT>'], <NUM_LIT:0>,<EOL>(\"<STR_LIT>\") %(runnerPath, r['<STR_LIT>'],))<EOL>self.resetExtraLogItems()<EOL>return r<EOL>", "docstring": "Executes a positive RunExperiment.py test and performs\n        basic validation\n\n        runnerPath:         experiment running (LPF or OPF RunExperiment.py path)\n\n        experimentDirPath:  directory containing the description.py file of interest\n\n        short:              if True, attempt to run the experiment with --testMode\n                            flag turned on, which causes all inference and training\n                            iteration counts to be overridden with small counts.\n                            NOTE: if the (possibly aggregated) dataset has fewer\n                            rows than the count overrides, then an LPF experiment\n                            will fail.\n\n        Returns:            result from _executeExternalCmdAndReapOutputs", "id": "f17372:c1:m4"}
{"signature": "def whoisCallersCaller():", "body": "import inspect<EOL>frameObj = inspect.stack()[<NUM_LIT:2>][<NUM_LIT:0>]<EOL>return inspect.getframeinfo(frameObj)<EOL>", "docstring": "Returns: Traceback namedtuple for our caller's caller", "id": "f17372:m2"}
{"signature": "def getAllExperimentDirectories(excludedExperiments=[]):", "body": "excludedDirectories = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>excludedDirectories.extend(excludedExperiments)<EOL>return getAllDirectoriesWithFile(<EOL>path=\"<STR_LIT>\",<EOL>filename=\"<STR_LIT>\",<EOL>excludeDirs=excludedDirectories)<EOL>", "docstring": "Experiment directories are the directories with a description.py file", "id": "f17376:m1"}
{"signature": "def _getTempFileName():", "body": "handle = tempfile.NamedTemporaryFile(prefix=\"<STR_LIT:test>\", suffix=\"<STR_LIT>\", dir=\"<STR_LIT:.>\")<EOL>filename = handle.name<EOL>handle.close()<EOL>return filename<EOL>", "docstring": "Creates unique test csv file name.", "id": "f17377:m0"}
{"signature": "def _getTempFileName():", "body": "handle = tempfile.NamedTemporaryFile(prefix=\"<STR_LIT:test>\", suffix=\"<STR_LIT>\", dir=\"<STR_LIT:.>\")<EOL>filename = handle.name<EOL>handle.close()<EOL>return filename<EOL>", "docstring": "Creates unique test csv file name.", "id": "f17378:m0"}
{"signature": "def _aggregate(input, options, output, timeFieldName):", "body": "aggregator = Aggregator(aggregationInfo=options, <EOL>inputFields=input.getFields(),<EOL>timeFieldName=timeFieldName)<EOL>while True:<EOL><INDENT>inRecord = input.getNextRecord()<EOL>print(\"<STR_LIT>\", inRecord)<EOL>(outRecord, aggBookmark) = aggregator.next(record = inRecord, <EOL>curInputBookmark = None)<EOL>print(\"<STR_LIT>\", outRecord)<EOL>if outRecord is not None:<EOL><INDENT>output.appendRecord(outRecord, None)<EOL><DEDENT>if inRecord is None and outRecord is None:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Aggregate the input stream and write aggregated records to the output\n    stream", "id": "f17379:m0"}
{"signature": "def permutationFilter(perm):", "body": "<EOL>return True<EOL>", "docstring": "This function can be used to selectively filter out specific permutation\n    combinations. It is called by RunPermutations for every possible permutation\n    of the variables in the permutations dict. It should return True for valid a\n    combination of permutation values and False for an invalid one.\n\n    Parameters:\n    ---------------------------------------------------------\n    perm: dict of one possible combination of name:value\n          pairs chosen from permutations.", "id": "f17383:m1"}
{"signature": "def dummyModelParams(perm):", "body": "errScore = <NUM_LIT:50><EOL>errScore += abs(perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] - <NUM_LIT>)<EOL>errScore += abs(perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT:n>'] - <NUM_LIT>)<EOL>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT:address>'] is not None:<EOL><INDENT>errScore -= <NUM_LIT:20><EOL><DEDENT>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] is not None:<EOL><INDENT>errScore -= <NUM_LIT:10><EOL><DEDENT>waitTime = None<EOL>if eval(os.environ.get('<STR_LIT>', '<STR_LIT:False>')):<EOL><INDENT>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] is not None:<EOL><INDENT>waitTime = <NUM_LIT><EOL><DEDENT><DEDENT>dummyModelParams = dict(<EOL>metricValue = errScore,<EOL>iterations = int(os.environ.get('<STR_LIT>', '<STR_LIT:1>')),<EOL>waitTime = waitTime,<EOL>sysExitModelRange = os.environ.get('<STR_LIT>',<EOL>None),<EOL>errModelRange = os.environ.get('<STR_LIT>',<EOL>None),<EOL>jobFailErr = bool(os.environ.get('<STR_LIT>', False))<EOL>)<EOL>return dummyModelParams<EOL>", "docstring": "This function can be used for Hypersearch algorithm development. When\n    present, Hypersearch doesn't actually run the CLA model in the OPF, but\n    instead runs a dummy model. This function returns the dummy model params that\n    will be used. See the OPFDummyModelRunner class source code (in\n    nupic.swarming.ModelRunner) for a description of the schema\n    for the dummy model params.", "id": "f17385:m0"}
{"signature": "def permutationFilter(perm):", "body": "<EOL>return True<EOL>", "docstring": "This function can be used to selectively filter out specific permutation\n    combinations. It is called by RunPermutations for every possible permutation\n    of the variables in the permutations dict. It should return True for valid a\n    combination of permutation values and False for an invalid one.\n\n    Parameters:\n    ---------------------------------------------------------\n    perm: dict of one possible combination of name:value\n          pairs chosen from permutations.", "id": "f17387:m0"}
{"signature": "def permutationFilter(perm):", "body": "<EOL>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] > <NUM_LIT>:<EOL><INDENT>return False;<EOL><DEDENT>return True<EOL>", "docstring": "This function can be used to selectively filter out specific permutation\n    combinations. It is called by RunPermutations for every possible permutation\n    of the variables in the permutations dict. It should return True for valid a\n    combination of permutation values and False for an invalid one.\n\n    Parameters:\n    ---------------------------------------------------------\n    perm: dict of one possible combination of name:value\n          pairs chosen from permutations.", "id": "f17393:m0"}
{"signature": "def permutationFilter(perm):", "body": "<EOL>return True<EOL>", "docstring": "This function can be used to selectively filter out specific permutation\n    combinations. It is called by RunPermutations for every possible permutation\n    of the variables in the permutations dict. It should return True for valid a\n    combination of permutation values and False for an invalid one.\n\n    Parameters:\n    ---------------------------------------------------------\n    perm: dict of one possible combination of name:value\n          pairs chosen from permutations.", "id": "f17397:m0"}
{"signature": "def permutationFilter(perm):", "body": "<EOL>return True<EOL>", "docstring": "This function can be used to selectively filter out specific permutation\n    combinations. It is called by RunPermutations for every possible permutation\n    of the variables in the permutations dict. It should return True for valid a\n    combination of permutation values and False for an invalid one.\n\n    Parameters:\n    ---------------------------------------------------------\n    perm: dict of one possible combination of name:value\n          pairs chosen from permutations.", "id": "f17399:m1"}
{"signature": "def permutationFilter(perm):", "body": "<EOL>return True<EOL>", "docstring": "This function can be used to selectively filter out specific permutation\n    combinations. It is called by RunPermutations for every possible permutation\n    of the variables in the permutations dict. It should return True for valid a\n    combination of permutation values and False for an invalid one.\n\n    Parameters:\n    ---------------------------------------------------------\n    perm: dict of one possible combination of name:value\n          pairs chosen from permutations.", "id": "f17401:m0"}
{"signature": "def dummyModelParams(perm):", "body": "<EOL>errScore = <NUM_LIT:50><EOL>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] is not None:<EOL><INDENT>errScore += <NUM_LIT:10><EOL><DEDENT>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT:address>'] is not None:<EOL><INDENT>errScore -= <NUM_LIT:10><EOL><DEDENT>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] is not None:<EOL><INDENT>errScore -= <NUM_LIT:20><EOL><DEDENT>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']is not None:<EOL><INDENT>errScore -= <NUM_LIT:30><EOL><DEDENT>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']is not None:<EOL><INDENT>errScore -= <NUM_LIT><EOL><DEDENT>dummyModelParams = dict(<EOL>metricValue = errScore,<EOL>iterations = int(os.environ.get('<STR_LIT>', '<STR_LIT:1>')),<EOL>waitTime = None,<EOL>sysExitModelRange = os.environ.get('<STR_LIT>',<EOL>None),<EOL>errModelRange = os.environ.get('<STR_LIT>',<EOL>None),<EOL>jobFailErr = bool(os.environ.get('<STR_LIT>', False))<EOL>)<EOL>return dummyModelParams<EOL>", "docstring": "This function can be used for Hypersearch algorithm development. When\n    present, Hypersearch doesn't actually run the CLA model in the OPF, but\n    instead runs a dummy model. This function returns the dummy model params that\n    will be used. See the OPFDummyModelRunner class source code (in\n    nupic.swarming.ModelRunner) for a description of the schema\n    for the dummy model params.", "id": "f17403:m0"}
{"signature": "def permutationFilter(perm):", "body": "<EOL>limit = int(os.environ.get('<STR_LIT>', <NUM_LIT>))<EOL>if perm['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']['<STR_LIT>'] > limit:<EOL><INDENT>return False;<EOL><DEDENT>return True<EOL>", "docstring": "This function can be used to selectively filter out specific permutation\n    combinations. It is called by RunPermutations for every possible permutation\n    of the variables in the permutations dict. It should return True for valid a\n    combination of permutation values and False for an invalid one.\n\n    Parameters:\n    ---------------------------------------------------------\n    perm: dict of one possible combination of name:value\n          pairs chosen from permutations.", "id": "f17407:m1"}
{"signature": "def dummyModelParams(perm):", "body": "errScore = <NUM_LIT:50><EOL>waitTime = <NUM_LIT><EOL>dummyModelParams = dict(<EOL>metricValue = errScore,<EOL>iterations = int(os.environ.get('<STR_LIT>', '<STR_LIT:5>')),<EOL>waitTime = waitTime,<EOL>sysExitModelRange = os.environ.get('<STR_LIT>',<EOL>None),<EOL>delayModelRange = os.environ.get('<STR_LIT>',<EOL>None),<EOL>errModelRange = os.environ.get('<STR_LIT>',<EOL>None),<EOL>jobFailErr = bool(os.environ.get('<STR_LIT>', False))<EOL>)<EOL>return dummyModelParams<EOL>", "docstring": "This function can be used for Hypersearch algorithm development. When\n    present, Hypersearch doesn't actually run the CLA model in the OPF, but instead run\n    a dummy model. This function returns the dummy model params that will be\n    used. See the OPFDummyModelRunner class source code (in\n    nupic.swarming.ModelRunner) for a description of the schema for\n    the dummy model params.", "id": "f17407:m0"}
{"signature": "def _runPermutationsCluster(self, jobParams, loggingLevel=logging.INFO,<EOL>maxNumWorkers=<NUM_LIT:4>, env=None,<EOL>waitForCompletion=True, ignoreErrModels=False,<EOL>timeoutSec=DEFAULT_JOB_TIMEOUT_SEC):", "body": "print()<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>if env is not None and len(env) > <NUM_LIT:0>:<EOL><INDENT>envItems = []<EOL>for (key, value) in env.items():<EOL><INDENT>if (sys.platform.startswith('<STR_LIT>')):<EOL><INDENT>envItems.append(\"<STR_LIT>\" % (key, value))<EOL><DEDENT>else:<EOL><INDENT>envItems.append(\"<STR_LIT>\" % (key, value))<EOL><DEDENT><DEDENT>if (sys.platform.startswith('<STR_LIT>')):<EOL><INDENT>envStr = \"<STR_LIT>\" % ('<STR_LIT>'.join(envItems))<EOL><DEDENT>else:<EOL><INDENT>envStr = \"<STR_LIT>\" % ('<STR_LIT:;>'.join(envItems))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>envStr = '<STR_LIT>'<EOL><DEDENT>cmdLine = '<STR_LIT>''<STR_LIT>'% (envStr, loggingLevel)<EOL>cjDAO = ClientJobsDAO.get()<EOL>jobID = cjDAO.jobInsert(client='<STR_LIT:test>', cmdLine=cmdLine,<EOL>params=json.dumps(jobParams),<EOL>minimumWorkers=<NUM_LIT:1>, maximumWorkers=maxNumWorkers,<EOL>jobType = cjDAO.JOB_TYPE_HS)<EOL>workerCmdLine = '<STR_LIT>''<STR_LIT>'% (envStr, jobID, loggingLevel)<EOL>workers = self._launchWorkers(cmdLine=workerCmdLine, numWorkers=maxNumWorkers)<EOL>print(\"<STR_LIT>\" % (jobID))<EOL>print(\"<STR_LIT>\" % (maxNumWorkers),cmdLine)<EOL>if not waitForCompletion:<EOL><INDENT>return (jobID, None, None)<EOL><DEDENT>if timeoutSec is None:<EOL><INDENT>timeout=DEFAULT_JOB_TIMEOUT_SEC<EOL><DEDENT>else:<EOL><INDENT>timeout=timeoutSec<EOL><DEDENT>startTime = time.time()<EOL>lastUpdate = time.time()<EOL>lastCompleted = <NUM_LIT:0><EOL>lastCompletedWithError = <NUM_LIT:0><EOL>lastCompletedAsOrphan = <NUM_LIT:0><EOL>lastStarted = <NUM_LIT:0><EOL>lastJobStatus = \"<STR_LIT>\"<EOL>lastJobResults = None<EOL>lastActiveSwarms = None<EOL>lastEngStatus = None<EOL>modelIDs = []<EOL>print(\"<STR_LIT>\" % (\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"))<EOL>print(\"<STR_LIT>\")<EOL>while (lastJobStatus != ClientJobsDAO.STATUS_COMPLETED)and (time.time() - lastUpdate < timeout):<EOL><INDENT>printUpdate = False<EOL>if g_myEnv.options.verbosity == <NUM_LIT:0>:<EOL><INDENT>time.sleep(<NUM_LIT:0.5>)<EOL><DEDENT>jobInfo = self._getJobInfo(cjDAO, workers, jobID)<EOL>if jobInfo.status != lastJobStatus:<EOL><INDENT>if jobInfo.status == ClientJobsDAO.STATUS_RUNNINGand lastJobStatus != ClientJobsDAO.STATUS_RUNNING:<EOL><INDENT>print(\"<STR_LIT>\"% (jobInfo.jobId))<EOL><DEDENT>lastJobStatus = jobInfo.status<EOL>printUpdate = True<EOL><DEDENT>if g_myEnv.options.verbosity >= <NUM_LIT:1>:<EOL><INDENT>if jobInfo.engWorkerState is not None:<EOL><INDENT>activeSwarms = json.loads(jobInfo.engWorkerState)['<STR_LIT>']<EOL>if activeSwarms != lastActiveSwarms:<EOL><INDENT>print(\"<STR_LIT>\", '<STR_LIT>'.join(activeSwarms))<EOL>lastActiveSwarms = activeSwarms<EOL>print()<EOL><DEDENT><DEDENT>if jobInfo.results != lastJobResults:<EOL><INDENT>print(\"<STR_LIT>\", jobInfo.results, \"<STR_LIT>\")<EOL>lastJobResults = jobInfo.results<EOL><DEDENT>if jobInfo.engStatus != lastEngStatus:<EOL><INDENT>print('<STR_LIT>' % jobInfo.engStatus)<EOL>print()<EOL>lastEngStatus = jobInfo.engStatus<EOL><DEDENT><DEDENT>modelCounters = cjDAO.modelsGetUpdateCounters(jobID)<EOL>if len(modelCounters) != lastStarted:<EOL><INDENT>modelIDs = [x.modelId for x in modelCounters]<EOL>lastStarted = len(modelCounters)<EOL>printUpdate = True<EOL><DEDENT>if len(modelIDs) > <NUM_LIT:0>:<EOL><INDENT>completed = <NUM_LIT:0><EOL>completedWithError = <NUM_LIT:0><EOL>completedAsOrphan = <NUM_LIT:0><EOL>infos = cjDAO.modelsGetResultAndStatus(modelIDs)<EOL>for info in infos:<EOL><INDENT>if info.status == ClientJobsDAO.STATUS_COMPLETED:<EOL><INDENT>completed += <NUM_LIT:1><EOL>if info.completionReason == ClientJobsDAO.CMPL_REASON_ERROR:<EOL><INDENT>completedWithError += <NUM_LIT:1><EOL><DEDENT>if info.completionReason == ClientJobsDAO.CMPL_REASON_ORPHAN:<EOL><INDENT>completedAsOrphan += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>if completed != lastCompletedor completedWithError != lastCompletedWithErroror completedAsOrphan != lastCompletedAsOrphan:<EOL><INDENT>lastCompleted = completed<EOL>lastCompletedWithError = completedWithError<EOL>lastCompletedAsOrphan = completedAsOrphan<EOL>printUpdate = True<EOL><DEDENT><DEDENT>if printUpdate:<EOL><INDENT>lastUpdate = time.time()<EOL>if g_myEnv.options.verbosity >= <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL><DEDENT>print(\"<STR_LIT>\" % (lastJobStatus, lastStarted,<EOL>lastCompleted,<EOL>lastCompletedWithError,<EOL>lastCompletedAsOrphan))<EOL><DEDENT><DEDENT>print(\"<STR_LIT>\" % (lastJobStatus, lastStarted,<EOL>lastCompleted,<EOL>lastCompletedWithError,<EOL>lastCompletedAsOrphan))<EOL>jobInfo = self._getJobInfo(cjDAO, workers, jobID)<EOL>if not ignoreErrModels:<EOL><INDENT>self.assertEqual (jobInfo.completionReason,<EOL>ClientJobsDAO.CMPL_REASON_SUCCESS)<EOL><DEDENT>models = cjDAO.modelsGetUpdateCounters(jobID)<EOL>modelIDs = [model.modelId for model in models]<EOL>if len(modelIDs) > <NUM_LIT:0>:<EOL><INDENT>results = cjDAO.modelsGetResultAndStatus(modelIDs)<EOL><DEDENT>else:<EOL><INDENT>results = []<EOL><DEDENT>metricResults = []<EOL>for result in results:<EOL><INDENT>if result.results is not None:<EOL><INDENT>metricResults.append(list(json.loads(result.results)[<NUM_LIT:1>].values())[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>metricResults.append(None)<EOL><DEDENT>if not ignoreErrModels:<EOL><INDENT>self.assertNotEqual(result.completionReason, cjDAO.CMPL_REASON_ERROR,<EOL>\"<STR_LIT>\" % (result.completionMsg))<EOL><DEDENT><DEDENT>return (jobID, jobInfo, results, metricResults)<EOL>", "docstring": "Given a prepared, filled in jobParams for a hypersearch, this starts\n        the job, waits for it to complete, and returns the results for all\n        models.\n\n        Parameters:\n        -------------------------------------------------------------------\n        jobParams:        filled in job params for a hypersearch\n        loggingLevel:    logging level to use in the Hypersearch worker\n        maxNumWorkers:    max # of worker processes to use\n        env:             if not None, this is a dict of environment variables\n                            that should be sent to each worker process. These can\n                            aid in re-using the same description/permutations file\n                            for different tests.\n        waitForCompletion: If True, wait for job to complete before returning\n                           If False, then return resultsInfoForAllModels and\n                           metricResults will be None\n        ignoreErrModels:  If true, ignore erred models\n        retval:          (jobID, jobInfo, resultsInfoForAllModels, metricResults)", "id": "f17408:c1:m9"}
{"signature": "def setUp(self):", "body": "pass<EOL>", "docstring": "Method called to prepare the test fixture. This is called by the\n        unittest framework immediately before calling the test method; any exception\n        raised by this method will be considered an error rather than a test\n        failure. The default implementation does nothing.", "id": "f17408:c1:m0"}
{"signature": "def _runPermutationsLocal(self, jobParams, loggingLevel=logging.INFO,<EOL>env=None, waitForCompletion=True,<EOL>continueJobId=None, ignoreErrModels=False):", "body": "print()<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>if env is not None:<EOL><INDENT>saveEnvState = copy.deepcopy(os.environ)<EOL>os.environ.update(env)<EOL><DEDENT>cjDAO = ClientJobsDAO.get()<EOL>if continueJobId is None:<EOL><INDENT>jobID = cjDAO.jobInsert(client='<STR_LIT:test>', cmdLine='<STR_LIT>',<EOL>params=json.dumps(jobParams),<EOL>alreadyRunning=True, minimumWorkers=<NUM_LIT:1>, maximumWorkers=<NUM_LIT:1>,<EOL>jobType = cjDAO.JOB_TYPE_HS)<EOL><DEDENT>else:<EOL><INDENT>jobID = continueJobId<EOL><DEDENT>args = ['<STR_LIT>', '<STR_LIT>' % (jobID),<EOL>'<STR_LIT>' % (loggingLevel)]<EOL>if continueJobId is None:<EOL><INDENT>args.append('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>hypersearch_worker.main(args)<EOL><DEDENT>except SystemExit:<EOL><INDENT>pass<EOL><DEDENT>except:<EOL><INDENT>raise<EOL><DEDENT>if env is not None:<EOL><INDENT>os.environ = saveEnvState<EOL><DEDENT>models = cjDAO.modelsGetUpdateCounters(jobID)<EOL>modelIDs = [model.modelId for model in models]<EOL>if len(modelIDs) > <NUM_LIT:0>:<EOL><INDENT>results = cjDAO.modelsGetResultAndStatus(modelIDs)<EOL><DEDENT>else:<EOL><INDENT>results = []<EOL><DEDENT>metricResults = []<EOL>for result in results:<EOL><INDENT>if result.results is not None:<EOL><INDENT>metricResults.append(list(json.loads(result.results)[<NUM_LIT:1>].values())[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>metricResults.append(None)<EOL><DEDENT>if not ignoreErrModels:<EOL><INDENT>self.assertNotEqual(result.completionReason, cjDAO.CMPL_REASON_ERROR,<EOL>\"<STR_LIT>\" % (result.completionMsg))<EOL><DEDENT><DEDENT>jobInfo = cjDAO.jobInfo(jobID)<EOL>return (jobID, jobInfo, results, metricResults)<EOL>", "docstring": "This runs permutations on the given experiment using just 1 worker\n        in the current process\n\n        Parameters:\n        -------------------------------------------------------------------\n        jobParams:        filled in job params for a hypersearch\n        loggingLevel:    logging level to use in the Hypersearch worker\n        env:             if not None, this is a dict of environment variables\n                            that should be sent to each worker process. These can\n                            aid in re-using the same description/permutations file\n                            for different tests.\n        waitForCompletion: If True, wait for job to complete before returning\n                           If False, then return resultsInfoForAllModels and\n                           metricResults will be None\n        continueJobId:    If not None, then this is the JobId of a job we want\n                          to continue working on with another worker.\n        ignoreErrModels:  If true, ignore erred models\n        retval:          (jobId, jobInfo, resultsInfoForAllModels, metricResults)", "id": "f17408:c1:m8"}
{"signature": "def _getJobInfo(self, cjDAO, workers, jobID):", "body": "<EOL>jobInfo = cjDAO.jobInfo(jobID)<EOL>runningCount = <NUM_LIT:0><EOL>for worker in workers:<EOL><INDENT>retCode = worker.poll()<EOL>if retCode is None:<EOL><INDENT>runningCount += <NUM_LIT:1><EOL><DEDENT><DEDENT>if runningCount > <NUM_LIT:0>:<EOL><INDENT>status = ClientJobsDAO.STATUS_RUNNING<EOL><DEDENT>else:<EOL><INDENT>status = ClientJobsDAO.STATUS_COMPLETED<EOL><DEDENT>jobInfo = jobInfo._replace(status=status)<EOL>if status == ClientJobsDAO.STATUS_COMPLETED:<EOL><INDENT>jobInfo = jobInfo._replace(<EOL>completionReason=ClientJobsDAO.CMPL_REASON_SUCCESS)<EOL><DEDENT>return jobInfo<EOL>", "docstring": "Return the job info for a job\n\n        Parameters:\n        -----------------------------------------------\n        cjDAO:   client jobs database instance\n        workers: list of workers for this job\n        jobID:   which job ID\n\n        retval: job info", "id": "f17408:c1:m6"}
{"signature": "def tearDown(self):", "body": "<EOL>self.resetExtraLogItems()<EOL>", "docstring": "Method called immediately after the test method has been called and the\n        result recorded. This is called even if the test method raised an exception,\n        so the implementation in subclasses may need to be particularly careful\n        about checking internal state. Any exception raised by this method will be\n        considered an error rather than a test failure. This method will only be\n        called if the setUp() succeeds, regardless of the outcome of the test\n        method. The default implementation does nothing.", "id": "f17408:c1:m1"}
{"signature": "def getHypersearchWinningModelID(jobID):", "body": "cjDAO = ClientJobsDAO.get()<EOL>jobResults = cjDAO.jobGetFields(jobID, ['<STR_LIT>'])[<NUM_LIT:0>]<EOL>print(\"<STR_LIT>\" % (jobResults,))<EOL>jobResults = json.loads(jobResults)<EOL>return jobResults['<STR_LIT>']<EOL>", "docstring": "Parameters:\n-------------------------------------------------------------------\njobID:            jobID of successfully-completed Hypersearch job\n\nretval:           modelID of the winning model", "id": "f17408:m0"}
{"signature": "def _executeExternalCmdAndReapStdout(args):", "body": "_debugOut((\"<STR_LIT>\") %(args,))<EOL>p = subprocess.Popen(args,<EOL>env=os.environ,<EOL>stdout=subprocess.PIPE,<EOL>stderr=subprocess.PIPE)<EOL>_debugOut((\"<STR_LIT>\") % (args,))<EOL>(stdoutData, stderrData) = p.communicate()<EOL>_debugOut((\"<STR_LIT>\" +\"<STR_LIT>\") %(args, p.returncode, type(stdoutData), stdoutData, stderrData))<EOL>result = dict(<EOL>exitStatus = p.returncode,<EOL>stdoutData = stdoutData,<EOL>stderrData = stderrData,<EOL>)<EOL>_debugOut((\"<STR_LIT>\") %(args, pprint.pformat(result, indent=<NUM_LIT:4>)))<EOL>return result<EOL>", "docstring": "args:     Args list as defined for the args parameter in subprocess.Popen()\n\nReturns:  result dicionary:\n            {\n              'exitStatus':<exit-status-of-external-command>,\n              'stdoutData':\"string\",\n              'stderrData':\"string\"\n            }", "id": "f17408:m1"}
{"signature": "def _printTestHeader(self):", "body": "print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\" % (self.__class__, self._testMethodName))<EOL>", "docstring": "Print out what test we are running", "id": "f17408:c1:m3"}
{"signature": "@classmethod<EOL><INDENT>def _processArgs(cls):<DEDENT>", "body": "helpString =\"\"\"<STR_LIT>\"\"\"<EOL>allTests = _getTestList()<EOL>for test in allTests:<EOL><INDENT>helpString += \"<STR_LIT>\" % (test)<EOL><DEDENT>parser = OptionParser(helpString,conflict_handler=\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT>\", default=<NUM_LIT:0>, type=\"<STR_LIT:int>\",<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT>\", action=\"<STR_LIT:store_true>\", default=False,<EOL>help=\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT>\", action=\"<STR_LIT:store>\", type=\"<STR_LIT:int>\",<EOL>default=logging.INFO,<EOL>help=\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT>\", dest=\"<STR_LIT>\", default=<NUM_LIT:2>, type='<STR_LIT:int>',<EOL>help=(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"))<EOL>return parser.parse_args(args=cls.args)<EOL>", "docstring": "Parse our command-line args/options and strip them from sys.argv\nReturns the tuple (parsedOptions, remainingArgs)", "id": "f17408:c6:m0"}
{"signature": "@classmethod<EOL><INDENT>def consumeArgs(cls):<DEDENT>", "body": "return cls._processArgs()[<NUM_LIT:1>]<EOL>", "docstring": "Consumes the test arguments and returns the remaining arguments meant\n        for unittest.man", "id": "f17408:c6:m2"}
{"signature": "def shortDescription(self):", "body": "return None<EOL>", "docstring": "Override to force unittest framework to use test method names instead\n        of docstrings in the report.", "id": "f17408:c1:m2"}
{"signature": "def __updateProcessCounter(self):", "body": "newcounter = <NUM_LIT:0><EOL>for job in self.__procs:<EOL><INDENT>if job.is_alive():<EOL><INDENT>newcounter+=<NUM_LIT:1><EOL><DEDENT><DEDENT>self.__numRunningProcs = newcounter<EOL>return newcounter<EOL>", "docstring": "Function that iterates through the running Processes\n        and counts the number of processes that are currently alive.\n        Sets numRunningProcs to this count", "id": "f17409:c0:m2"}
{"signature": "def runJobs(self, maxJobs):", "body": "jobsrunning = self.__numRunningProcs<EOL>if(maxJobs > <NUM_LIT:1>):<EOL><INDENT>jobsindx = <NUM_LIT:0><EOL>while(jobsindx<len(self.testQ) or jobsrunning><NUM_LIT:0>):<EOL><INDENT>if(jobsindx<len(self.testQ) and jobsrunning<maxJobs):<EOL><INDENT>curJob = self.testQ[jobsindx]<EOL>p = Process(target = curJob[<NUM_LIT:0>], args = curJob[<NUM_LIT:1>])<EOL>p.start()<EOL>self.__procs.append(p)<EOL>jobsindx+=<NUM_LIT:1><EOL><DEDENT>if jobsrunning >= maxJobs:<EOL><INDENT>time.sleep(<NUM_LIT:30>)<EOL>print (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>elif jobsindx == len(self.testQ):<EOL><INDENT>time.sleep(<NUM_LIT:30>)<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>jobsrunning = self.__updateProcessCounter()<EOL>for proc in self.__procs:<EOL><INDENT>if proc.exitcode == <NUM_LIT:1>:<EOL><INDENT>self.cancelJobs()<EOL>assert False, (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT><DEDENT>try:<EOL><INDENT>while True:<EOL><INDENT>result = self.__resultQ.get(True, <NUM_LIT:5>)<EOL>self.assertBenchmarks(result)<EOL><DEDENT><DEDENT>except Empty:<EOL><INDENT>pass<EOL><DEDENT>", "docstring": "Function that launched Hypersearch benchmark jobs.\n        Runs jobs contained in self.testQ, until maxJobs are running\n        in parallel at which point it waits until some jobs finish.", "id": "f17409:c0:m4"}
{"signature": "def cancelJobs(self):", "body": "print(\"<STR_LIT>\")<EOL>for proc in self.__procs:<EOL><INDENT>if not proc.is_alive():<EOL><INDENT>proc.terminate()<EOL><DEDENT><DEDENT>print(\"<STR_LIT>\")<EOL>", "docstring": "Function that cancels all the jobs in the\n        process queue.", "id": "f17409:c0:m3"}
{"signature": "def benchmarkHotGym(self):", "body": "<EOL>dataPath = os.path.join(self.datadir, \"<STR_LIT>\", \"<STR_LIT>\")<EOL>streamDef = dict(<EOL>version=<NUM_LIT:1>,<EOL>info=\"<STR_LIT>\",<EOL>streams=[<EOL>dict(source=\"<STR_LIT>\" % (dataPath),<EOL>info=\"<STR_LIT>\",<EOL>columns=[\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"],<EOL>last_record=self.splits['<STR_LIT>'],)<EOL>],<EOL>aggregation={<EOL>'<STR_LIT>' : <NUM_LIT:1>,<EOL>'<STR_LIT>' : [<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>]<EOL>},<EOL>)<EOL>expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()<EOL>expDesc[\"<STR_LIT>\"][\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>expDesc.update({<EOL>\"<STR_LIT>\": streamDef,<EOL>\"<STR_LIT>\": [<EOL>{ \"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT>\"<EOL>},<EOL>{ \"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT:float>\",<EOL>\"<STR_LIT>\":  <NUM_LIT>,<EOL>\"<STR_LIT>\":  <NUM_LIT>,<EOL>},<EOL>{ \"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT:string>\",<EOL>},<EOL>],<EOL>\"<STR_LIT>\": self.__recordsToProcess,<EOL>})<EOL>expdir = os.path.join(self.outdir,  \"<STR_LIT>\")<EOL>self.generateModules(expDesc,  expdir)<EOL>self.descriptions[\"<STR_LIT>\"]=(expdir, expDesc)<EOL>return expdir<EOL>", "docstring": "Try running a basic experiment and permutations.", "id": "f17409:c0:m31"}
{"signature": "def benchmarkTwoVars(self):", "body": "<EOL>dataPath = os.path.join(self.datadir, \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\")<EOL>streamDef = dict(<EOL>version=<NUM_LIT:1>,<EOL>info=\"<STR_LIT>\",<EOL>streams=[<EOL>dict(source=\"<STR_LIT>\" % (dataPath),<EOL>info=\"<STR_LIT>\",<EOL>columns=[\"<STR_LIT>\",\"<STR_LIT>\"],<EOL>last_record=self.splits['<STR_LIT>'],),<EOL>],<EOL>)<EOL>expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()<EOL>expDesc[\"<STR_LIT>\"][\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>expDesc.update({<EOL>\"<STR_LIT>\": streamDef,<EOL>\"<STR_LIT>\": [<EOL>{ \"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT:int>\",<EOL>\"<STR_LIT>\":  -<NUM_LIT:10>,<EOL>\"<STR_LIT>\":  <NUM_LIT>,<EOL>},<EOL>{ \"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT:int>\",<EOL>\"<STR_LIT>\":  -<NUM_LIT:10>,<EOL>\"<STR_LIT>\":  <NUM_LIT>,<EOL>},<EOL>],<EOL>\"<STR_LIT>\": self.__recordsToProcess,<EOL>})<EOL>expdir = os.path.join(self.outdir,  \"<STR_LIT>\")<EOL>self.generateModules(expDesc,  expdir)<EOL>self.descriptions[\"<STR_LIT>\"]=(expdir, expDesc)<EOL>return expdir<EOL>", "docstring": "Try running a basic experiment and permutations", "id": "f17409:c0:m33"}
{"signature": "def setUpExportDicts(self):", "body": "ret = []<EOL>if self.maxBranchings is None:<EOL><INDENT>self.maxBranchings = [None]<EOL><DEDENT>else:<EOL><INDENT>self.maxBranchings = self.maxBranchings.split('<STR_LIT:U+002C>')<EOL><DEDENT>if self.maxParticles is None:<EOL><INDENT>self.maxParticles = [None]<EOL><DEDENT>else:<EOL><INDENT>self.maxParticles = self.maxParticles.split(\"<STR_LIT:U+002C>\")<EOL><DEDENT>for branch in self.maxBranchings:<EOL><INDENT>for part in self.maxParticles:<EOL><INDENT>curdict = dict()<EOL>if not branch is None:<EOL><INDENT>curdict[self.BRANCHING_PROP] = branch<EOL><DEDENT>if not part is None:<EOL><INDENT>curdict[self.PARTICLE_PROP] = part<EOL><DEDENT>ret+=[curdict]<EOL><DEDENT><DEDENT>return ret<EOL>", "docstring": "Setup up a dict of branchings and particles", "id": "f17409:c0:m19"}
{"signature": "def benchmarkTwoVarsSquare(self):", "body": "<EOL>dataPath = os.path.join(self.datadir, \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\")<EOL>streamDef = dict(<EOL>version=<NUM_LIT:1>,<EOL>info=\"<STR_LIT>\",<EOL>streams=[<EOL>dict(source=\"<STR_LIT>\" % (dataPath),<EOL>info=\"<STR_LIT>\",<EOL>columns=[\"<STR_LIT>\",\"<STR_LIT>\"],<EOL>last_record=self.splits['<STR_LIT>']),<EOL>],<EOL>)<EOL>expDesc = OPFBenchmarkRunner.EXP_COMMON.copy()<EOL>expDesc[\"<STR_LIT>\"][\"<STR_LIT>\"] = \"<STR_LIT>\"<EOL>expDesc.update({<EOL>\"<STR_LIT>\": streamDef,<EOL>\"<STR_LIT>\": [<EOL>{ \"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT:int>\",<EOL>\"<STR_LIT>\":  -<NUM_LIT:10>,<EOL>\"<STR_LIT>\":  <NUM_LIT>,<EOL>},<EOL>{ \"<STR_LIT>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": \"<STR_LIT:int>\",<EOL>\"<STR_LIT>\":  -<NUM_LIT:10>,<EOL>\"<STR_LIT>\":  <NUM_LIT>,<EOL>}<EOL>],<EOL>\"<STR_LIT>\": self.__recordsToProcess,<EOL>})<EOL>expdir = os.path.join(self.outdir,  \"<STR_LIT>\")<EOL>self.generateModules(expDesc,  expdir)<EOL>self.descriptions[\"<STR_LIT>\"]=(expdir, expDesc)<EOL>return expdir<EOL>", "docstring": "Try running a basic experiment and permutations", "id": "f17409:c0:m37"}
{"signature": "def runNetwork(network, numRecords, writer):", "body": "sensorRegion = network.regions[_RECORD_SENSOR]<EOL>l1SpRegion = network.regions[_L1_SPATIAL_POOLER]<EOL>l1TpRegion = network.regions[_L1_TEMPORAL_MEMORY]<EOL>l1Classifier = network.regions[_L1_CLASSIFIER]<EOL>l2SpRegion = network.regions[_L2_SPATIAL_POOLER]<EOL>l2TpRegion = network.regions[_L2_TEMPORAL_MEMORY]<EOL>l2Classifier = network.regions[_L2_CLASSIFIER]<EOL>l1PreviousPredictedColumns = []<EOL>l2PreviousPredictedColumns = []<EOL>l1PreviousPrediction = None<EOL>l2PreviousPrediction = None<EOL>l1ErrorSum = <NUM_LIT:0.0><EOL>l2ErrorSum = <NUM_LIT:0.0><EOL>for record in range(numRecords):<EOL><INDENT>network.run(<NUM_LIT:1>)<EOL>actual = float(sensorRegion.getOutputData(\"<STR_LIT>\")[<NUM_LIT:0>])<EOL>l1Predictions = l1Classifier.getOutputData(\"<STR_LIT>\")<EOL>l1Probabilities = l1Classifier.getOutputData(\"<STR_LIT>\")<EOL>l1Prediction = l1Predictions[l1Probabilities.argmax()]<EOL>if l1PreviousPrediction is not None:<EOL><INDENT>l1ErrorSum += math.fabs(l1PreviousPrediction - actual)<EOL><DEDENT>l1PreviousPrediction = l1Prediction<EOL>l2Predictions = l2Classifier.getOutputData(\"<STR_LIT>\")<EOL>l2Probabilities = l2Classifier.getOutputData(\"<STR_LIT>\")<EOL>l2Prediction = l2Predictions[l2Probabilities.argmax()]<EOL>if l2PreviousPrediction is not None:<EOL><INDENT>l2ErrorSum += math.fabs(l2PreviousPrediction - actual)<EOL><DEDENT>l2PreviousPrediction = l2Prediction<EOL>l1AnomalyScore = l1TpRegion.getOutputData(\"<STR_LIT>\")[<NUM_LIT:0>]<EOL>l2AnomalyScore = l2TpRegion.getOutputData(\"<STR_LIT>\")[<NUM_LIT:0>]<EOL>writer.writerow((record, actual, l1PreviousPrediction, l1AnomalyScore, l2PreviousPrediction, l2AnomalyScore))<EOL>l1PredictedColumns = l1TpRegion.getOutputData(\"<STR_LIT>\").nonzero()[<NUM_LIT:0>]<EOL>l1PreviousPredictedColumns = copy.deepcopy(l1PredictedColumns)<EOL>l2PredictedColumns = l2TpRegion.getOutputData(\"<STR_LIT>\").nonzero()[<NUM_LIT:0>]<EOL>l2PreviousPredictedColumns = copy.deepcopy(l2PredictedColumns)<EOL><DEDENT>if numRecords > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\" % (l1ErrorSum / (numRecords - <NUM_LIT:1>)))<EOL>print(\"<STR_LIT>\" % (l2ErrorSum / (numRecords - <NUM_LIT:1>)))<EOL><DEDENT>", "docstring": "Runs specified Network writing the ensuing anomaly\nscores to writer.\n\n@param network: The Network instance to be run\n@param writer: A csv.writer used to write to output file.", "id": "f17412:m5"}
{"signature": "def createNetwork(dataSource):", "body": "network = Network()<EOL>network.addRegion(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>json.dumps({\"<STR_LIT>\": _VERBOSITY}))<EOL>sensor = network.regions[\"<STR_LIT>\"].getSelf()<EOL>sensor.encoder = createEncoder()<EOL>sensor.dataSource = dataSource<EOL>SP_PARAMS[\"<STR_LIT>\"] = sensor.encoder.getWidth()<EOL>network.addRegion(\"<STR_LIT>\", \"<STR_LIT>\", json.dumps(SP_PARAMS))<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>network.addRegion(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>json.dumps(TM_PARAMS))<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>network.addRegion(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>json.dumps({}))<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>spatialPoolerRegion = network.regions[\"<STR_LIT>\"]<EOL>spatialPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>spatialPoolerRegion.setParameter(\"<STR_LIT>\", False)<EOL>temporalPoolerRegion = network.regions[\"<STR_LIT>\"]<EOL>temporalPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>temporalPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>temporalPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>temporalPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>return network<EOL>", "docstring": "Create the Network instance.\n\n    The network has a sensor region reading data from `dataSource` and passing\n    the encoded representation to an SPRegion. The SPRegion output is passed to\n    a TMRegion.\n\n    :param dataSource: a RecordStream instance to get data from\n    :returns: a Network instance ready to run", "id": "f17413:m1"}
{"signature": "def createTemporalAnomaly(recordParams, spatialParams=_SP_PARAMS,<EOL>temporalParams=_TM_PARAMS,<EOL>verbosity=_VERBOSITY):", "body": "inputFilePath = recordParams[\"<STR_LIT>\"]<EOL>scalarEncoderArgs = recordParams[\"<STR_LIT>\"]<EOL>dateEncoderArgs = recordParams[\"<STR_LIT>\"]<EOL>scalarEncoder = ScalarEncoder(**scalarEncoderArgs)<EOL>dateEncoder = DateEncoder(**dateEncoderArgs)<EOL>encoder = MultiEncoder()<EOL>encoder.addEncoder(scalarEncoderArgs[\"<STR_LIT:name>\"], scalarEncoder)<EOL>encoder.addEncoder(dateEncoderArgs[\"<STR_LIT:name>\"], dateEncoder)<EOL>network = Network()<EOL>network.addRegion(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>json.dumps({\"<STR_LIT>\": verbosity}))<EOL>sensor = network.regions[\"<STR_LIT>\"].getSelf()<EOL>sensor.encoder = encoder<EOL>sensor.dataSource = FileRecordStream(streamID=inputFilePath)<EOL>spatialParams[\"<STR_LIT>\"] = sensor.encoder.getWidth()<EOL>network.addRegion(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>json.dumps(spatialParams))<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>network.addRegion(\"<STR_LIT>\", \"<STR_LIT>\",<EOL>json.dumps(temporalParams))<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>network.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>srcOutput=\"<STR_LIT>\", destInput=\"<STR_LIT>\")<EOL>spatialPoolerRegion = network.regions[\"<STR_LIT>\"]<EOL>spatialPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>spatialPoolerRegion.setParameter(\"<STR_LIT>\", False)<EOL>temporalPoolerRegion = network.regions[\"<STR_LIT>\"]<EOL>temporalPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>temporalPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>temporalPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>temporalPoolerRegion.setParameter(\"<STR_LIT>\", True)<EOL>return network<EOL>", "docstring": "Generates a Network with connected RecordSensor, SP, TM.\n\n    This function takes care of generating regions and the canonical links.\n    The network has a sensor region reading data from a specified input and\n    passing the encoded representation to an SPRegion.\n    The SPRegion output is passed to a TMRegion.\n\n    Note: this function returns a network that needs to be initialized. This\n    allows the user to extend the network by adding further regions and\n    connections.\n\n    :param recordParams: a dict with parameters for creating RecordSensor region.\n    :param spatialParams: a dict with parameters for creating SPRegion.\n    :param temporalParams: a dict with parameters for creating TMRegion.\n    :param verbosity: an integer representing how chatty the network will be.", "id": "f17415:m0"}
{"signature": "def computeAccuracy(model, size, top):", "body": "accuracy = []<EOL>filename = os.path.join(os.path.dirname(__file__), \"<STR_LIT>\")<EOL>with zipfile.ZipFile(filename) as archive:<EOL><INDENT>with archive.open(\"<STR_LIT>\") as datafile:<EOL><INDENT>for _ in range(<NUM_LIT:7>):<EOL><INDENT>next(datafile)<EOL><DEDENT>for _ in range(LEARNING_RECORDS):<EOL><INDENT>next(datafile)<EOL><DEDENT>for _ in range(size):<EOL><INDENT>pages = readUserSession(datafile)<EOL>model.resetSequenceStates()<EOL>for i in range(len(pages) - <NUM_LIT:1>):<EOL><INDENT>result = model.run({\"<STR_LIT>\": pages[i]})<EOL>inferences = result.inferences[\"<STR_LIT>\"][<NUM_LIT:1>]<EOL>predicted = sorted(list(inferences.items()), key=itemgetter(<NUM_LIT:1>), reverse=True)[:top]<EOL>accuracy.append(<NUM_LIT:1> if pages[i + <NUM_LIT:1>] in list(zip(*predicted))[<NUM_LIT:0>] else <NUM_LIT:0>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return np.mean(accuracy)<EOL>", "docstring": "Compute prediction accuracy by checking if the next page in the sequence is\nwithin the top N predictions calculated by the model\nArgs:\n  model: HTM model\n  size: Sample size\n  top: top N predictions to use\n\nReturns: Probability the next page in the sequence is within the top N\n         predicted pages", "id": "f17417:m0"}
{"signature": "def filter(perm):", "body": "if perm['<STR_LIT>'] != perm['<STR_LIT>']:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "This function can be used to selectively filter out specific permutation\n    combinations. It is called for every possible permutation of the variables\n    in the permutations dict. It should return True for valid a combination of \n    permutation values and False for an invalid one. \n\n    Parameters:\n    ---------------------------------------------------------\n    perm: dict of one possible combination of name:value \n                  pairs chosen from permutations.", "id": "f17432:m0"}
{"signature": "def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):", "body": "lines = activeCoincsFile.readlines()<EOL>inputs = encodingsFile.readlines()<EOL>w = len(inputs[<NUM_LIT:0>].split('<STR_LIT:U+0020>'))-<NUM_LIT:1><EOL>patterns = set([])<EOL>encodings = set([])<EOL>coincs = []    <EOL>reUsedCoincs = []<EOL>firstLine = inputs[<NUM_LIT:0>].split('<STR_LIT:U+0020>')<EOL>size = int(firstLine.pop(<NUM_LIT:0>))<EOL>spOutput = np.zeros((len(lines),<NUM_LIT>))<EOL>inputBits = np.zeros((len(lines),w))<EOL>print('<STR_LIT>', size)<EOL>print('<STR_LIT>', len(lines), '<STR_LIT:\\n>')<EOL>print('<STR_LIT>', w)<EOL>count = <NUM_LIT:0><EOL>for x in range(len(lines)):<EOL><INDENT>inputSpace = []     <EOL>spBUout = [int(z) for z in lines[x].split('<STR_LIT:U+0020>')]  <EOL>spBUout.pop(<NUM_LIT:0>)         <EOL>temp = set(spBUout)<EOL>spOutput[x]=spBUout<EOL>input = [int(z) for z in inputs[x].split('<STR_LIT:U+0020>')]    <EOL>input.pop(<NUM_LIT:0>)   <EOL>tempInput = set(input)<EOL>inputBits[x]=input<EOL>for m in range(size):<EOL><INDENT>if m in tempInput:<EOL><INDENT>inputSpace.append(m)<EOL><DEDENT>else:<EOL><INDENT>inputSpace.append('<STR_LIT:|>')  <EOL><DEDENT><DEDENT>repeatedBits = tempInput.intersection(encodings)    <EOL>reUsed = temp.intersection(patterns)  <EOL>if len(reUsed)==<NUM_LIT:0>:<EOL><INDENT>coincs.append((count,temp,repeatedBits,inputSpace, tempInput))  <EOL><DEDENT>else:<EOL><INDENT>reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))<EOL><DEDENT>patterns=patterns.union(temp)   <EOL>encodings = encodings.union(tempInput)<EOL>count +=<NUM_LIT:1><EOL><DEDENT>overlap = {}<EOL>overlapVal = <NUM_LIT:0><EOL>seen = []<EOL>seen = (printOverlaps(coincs, coincs, seen))<EOL>print(len(seen), '<STR_LIT>')<EOL>seen = printOverlaps(reUsedCoincs, coincs, seen)<EOL>Summ=[]<EOL>for z in coincs:<EOL><INDENT>c=<NUM_LIT:0><EOL>for y in reUsedCoincs:<EOL><INDENT>c += len(z[<NUM_LIT:1>].intersection(y[<NUM_LIT:1>]))<EOL><DEDENT>Summ.append(c)<EOL><DEDENT>print('<STR_LIT>', Summ)<EOL>for m in range(<NUM_LIT:3>):<EOL><INDENT>displayLimit = min(<NUM_LIT>, len(spOutput[m*<NUM_LIT:200>:]))<EOL>if displayLimit><NUM_LIT:0>:<EOL><INDENT>drawFile(dataset, np.zeros([len(inputBits[:(m+<NUM_LIT:1>)*displayLimit]),len(inputBits[:(m+<NUM_LIT:1>)*displayLimit])]), inputBits[:(m+<NUM_LIT:1>)*displayLimit], spOutput[:(m+<NUM_LIT:1>)*displayLimit], w, m+<NUM_LIT:1>)<EOL><DEDENT>else: <EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>pyl.show()<EOL>", "docstring": "Mirror Image Visualization: Shows the encoding space juxtaposed against the\n    coincidence space. The encoding space is the bottom-up sensory encoding and\n    the coincidence space depicts the corresponding activation of coincidences in\n    the SP. Hence, the mirror image visualization is a visual depiction of the\n    mapping of SP cells to the input representations.\n\n    Note:\n    * The files spBUOut and sensorBUOut are assumed to be in the output format\n    used for LPF experiment outputs.\n    * BU outputs for some sample datasets are provided. Specify the name of the\n    dataset as an option while running this script.", "id": "f17441:m0"}
{"signature": "def generatePlot(outputs, origData):", "body": "PLOT_PRECISION = <NUM_LIT:100><EOL>distribMatrix = np.zeros((PLOT_PRECISION+<NUM_LIT:1>,PLOT_PRECISION+<NUM_LIT:1>))<EOL>outputSize = len(outputs)<EOL>for i in range(<NUM_LIT:0>,outputSize):<EOL><INDENT>for j in range(i+<NUM_LIT:1>,outputSize):<EOL><INDENT>in1 = outputs[i]<EOL>in2 = outputs[j]<EOL>dist = (abs(in1-in2) > <NUM_LIT:0.1>)<EOL>intDist = int(dist.sum()/<NUM_LIT:2>+<NUM_LIT:0.1>)<EOL>orig1 = origData[i]<EOL>orig2 = origData[j]<EOL>origDist = (abs(orig1-orig2) > <NUM_LIT:0.1>)<EOL>intOrigDist = int(origDist.sum()/<NUM_LIT:2>+<NUM_LIT:0.1>)<EOL>if intDist < <NUM_LIT:2> and intOrigDist > <NUM_LIT:10>:<EOL><INDENT>print('<STR_LIT>' % (i, j, intDist))<EOL>print('<STR_LIT>' % intOrigDist)<EOL><DEDENT>x = int(PLOT_PRECISION*intDist/<NUM_LIT>)<EOL>y = int(PLOT_PRECISION*intOrigDist/<NUM_LIT>)<EOL>if distribMatrix[x, y] < <NUM_LIT:0.1>:<EOL><INDENT>distribMatrix[x, y] = <NUM_LIT:3><EOL><DEDENT>else:<EOL><INDENT>if distribMatrix[x, y] < <NUM_LIT:10>:<EOL><INDENT>distribMatrix[x, y] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>distribMatrix[<NUM_LIT:4>, <NUM_LIT:50>] = <NUM_LIT:3><EOL>distribMatrix[<NUM_LIT:4>, <NUM_LIT>] = <NUM_LIT:4><EOL>distribMatrix[<NUM_LIT:4>, <NUM_LIT>] = <NUM_LIT:5><EOL>distribMatrix[<NUM_LIT:4>, <NUM_LIT>] = <NUM_LIT:6><EOL>distribMatrix[<NUM_LIT:4>, <NUM_LIT>] = <NUM_LIT:7><EOL>distribMatrix[<NUM_LIT:4>, <NUM_LIT>] = <NUM_LIT:8><EOL>distribMatrix[<NUM_LIT:4>, <NUM_LIT>] = <NUM_LIT:9><EOL>distribMatrix[<NUM_LIT:4>, <NUM_LIT:64>] = <NUM_LIT:10><EOL>return distribMatrix<EOL>", "docstring": "Generates a table where each cell represent a frequency of pairs\n    as described below.\n    x coordinate is the % difference between input records (origData list),\n    y coordinate is the % difference between corresponding output records.", "id": "f17442:m0"}
{"signature": "def generateRandomInput(numRecords, elemSize = <NUM_LIT>, numSet = <NUM_LIT>):", "body": "inputs = []<EOL>for _ in range(numRecords):<EOL><INDENT>input = np.zeros(elemSize, dtype=realDType)<EOL>for _ in range(<NUM_LIT:0>,numSet):<EOL><INDENT>ind = np.random.random_integers(<NUM_LIT:0>, elemSize-<NUM_LIT:1>, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>input[ind] = <NUM_LIT:1><EOL><DEDENT>while abs(input.sum() - numSet) > <NUM_LIT:0.1>:<EOL><INDENT>ind = np.random.random_integers(<NUM_LIT:0>, elemSize-<NUM_LIT:1>, <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>input[ind] = <NUM_LIT:1><EOL><DEDENT>inputs.append(input)<EOL><DEDENT>return inputs<EOL>", "docstring": "Generates a set of input record\n\n    Params:\n            numRecords - how many records to generate\n            elemSize - the size of each record (num 0s or 1s)\n            numSet - how many 1s in each record\n\n    Returns: a list of inputs", "id": "f17442:m1"}
{"signature": "def getModelParamsFromName(gymName):", "body": "importName = \"<STR_LIT>\" % (<EOL>gymName.replace(\"<STR_LIT:U+0020>\", \"<STR_LIT:_>\").replace(\"<STR_LIT:->\", \"<STR_LIT:_>\")<EOL>)<EOL>print(\"<STR_LIT>\" % importName)<EOL>try:<EOL><INDENT>importedModelParams = importlib.import_module(importName).MODEL_PARAMS<EOL><DEDENT>except ImportError:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>% gymName)<EOL><DEDENT>return importedModelParams<EOL>", "docstring": "Given a gym name, assumes a matching model params python module exists within\nthe model_params directory and attempts to import it.\n:param gymName: Gym name, used to guess the model params module name.\n:return: OPF Model params dictionary", "id": "f17447:m1"}
{"signature": "def _generateCategory(filename=\"<STR_LIT>\", numSequences=<NUM_LIT:2>, elementsPerSeq=<NUM_LIT:1>, <EOL>numRepeats=<NUM_LIT:10>, resets=False):", "body": "<EOL>scriptDir = os.path.dirname(__file__)<EOL>pathname = os.path.join(scriptDir, '<STR_LIT>', filename)<EOL>print(\"<STR_LIT>\" % (pathname))<EOL>fields = [('<STR_LIT>', '<STR_LIT:int>', '<STR_LIT:R>'), ('<STR_LIT>', '<STR_LIT:int>', '<STR_LIT:C>'),<EOL>('<STR_LIT>', '<STR_LIT:string>', '<STR_LIT>')]  <EOL>outFile = FileRecordStream(pathname, write=True, fields=fields)<EOL>sequences = []<EOL>for i in range(numSequences):<EOL><INDENT>seq = [x for x in range(i*elementsPerSeq, (i+<NUM_LIT:1>)*elementsPerSeq)]<EOL>sequences.append(seq)<EOL><DEDENT>seqIdxs = []<EOL>for i in range(numRepeats):<EOL><INDENT>seqIdxs += list(range(numSequences))<EOL><DEDENT>random.shuffle(seqIdxs)<EOL>for seqIdx in seqIdxs:<EOL><INDENT>reset = int(resets)<EOL>seq = sequences[seqIdx]<EOL>for x in seq:<EOL><INDENT>outFile.appendRecord([reset, str(seqIdx), str(x)])<EOL>reset = <NUM_LIT:0><EOL><DEDENT><DEDENT>outFile.close()<EOL>", "docstring": "Generate a simple dataset. This contains a bunch of non-overlapping\n    sequences. \n\n    Parameters:\n    ----------------------------------------------------\n    filename:       name of the file to produce, including extension. It will\n                    be created in a 'datasets' sub-directory within the \n                    directory containing this script. \n    numSequences:   how many sequences to generate\n    elementsPerSeq: length of each sequence\n    numRepeats:     how many times to repeat each sequence in the output \n    resets:         if True, turn on reset at start of each sequence", "id": "f17465:m0"}
{"signature": "def _generateSimple(filename=\"<STR_LIT>\", numSequences=<NUM_LIT:1>, elementsPerSeq=<NUM_LIT:3>, <EOL>numRepeats=<NUM_LIT:10>):", "body": "<EOL>scriptDir = os.path.dirname(__file__)<EOL>pathname = os.path.join(scriptDir, '<STR_LIT>', filename)<EOL>print(\"<STR_LIT>\" % (pathname))<EOL>fields = [('<STR_LIT>', '<STR_LIT>', '<STR_LIT:T>'), <EOL>('<STR_LIT>', '<STR_LIT:string>', '<STR_LIT>'),  <EOL>('<STR_LIT>', '<STR_LIT:float>', '<STR_LIT>')]  <EOL>outFile = FileRecordStream(pathname, write=True, fields=fields)<EOL>sequences = []<EOL>for i in range(numSequences):<EOL><INDENT>seq = [x for x in range(i*elementsPerSeq, (i+<NUM_LIT:1>)*elementsPerSeq)]<EOL>sequences.append(seq)<EOL><DEDENT>seqIdxs = []<EOL>for i in range(numRepeats):<EOL><INDENT>seqIdxs += list(range(numSequences))<EOL><DEDENT>random.shuffle(seqIdxs)<EOL>timestamp = datetime.datetime(year=<NUM_LIT>, month=<NUM_LIT:1>, day=<NUM_LIT:1>, hour=<NUM_LIT:0>, minute=<NUM_LIT:0>,<EOL>second=<NUM_LIT:0>)<EOL>timeDelta = datetime.timedelta(hours=<NUM_LIT:1>)<EOL>for seqIdx in seqIdxs:<EOL><INDENT>seq = sequences[seqIdx]<EOL>for x in seq:<EOL><INDENT>outFile.appendRecord([timestamp, str(x), x])<EOL>timestamp += timeDelta<EOL><DEDENT><DEDENT>for seqIdx in seqIdxs:<EOL><INDENT>seq = sequences[seqIdx]<EOL>for i,x in enumerate(seq):<EOL><INDENT>if i != <NUM_LIT:1>:<EOL><INDENT>outFile.appendRecord([timestamp, str(x), x])<EOL><DEDENT>timestamp += timeDelta<EOL><DEDENT><DEDENT>for seqIdx in seqIdxs:<EOL><INDENT>seq = sequences[seqIdx]<EOL>for i,x in enumerate(seq):<EOL><INDENT>if i != <NUM_LIT:1>:<EOL><INDENT>outFile.appendRecord([timestamp, str(x), x])<EOL><DEDENT>timestamp += timeDelta<EOL><DEDENT><DEDENT>for seqIdx in seqIdxs:<EOL><INDENT>seq = sequences[seqIdx]<EOL>for x in seq:<EOL><INDENT>outFile.appendRecord([timestamp, str(x), x])<EOL>timestamp += timeDelta<EOL><DEDENT><DEDENT>outFile.close()<EOL>", "docstring": "Generate a simple dataset. This contains a bunch of non-overlapping\n    sequences. \n\n    At the end of the dataset, we introduce missing records so that test\n    code can insure that the model didn't get confused by them. \n\n    Parameters:\n    ----------------------------------------------------\n    filename:       name of the file to produce, including extension. It will\n                    be created in a 'datasets' sub-directory within the \n                    directory containing this script. \n    numSequences:   how many sequences to generate\n    elementsPerSeq: length of each sequence\n    numRepeats:     how many times to repeat each sequence in the output", "id": "f17493:m0"}
{"signature": "def permutationFilter(perm):", "body": "<EOL>return True<EOL>", "docstring": "This function can be used to selectively filter out specific permutation\n    combinations. It is called by RunPermutations for every possible permutation\n    of the variables in the permutations dict. It should return True for valid a\n    combination of permutation values and False for an invalid one.\n\n    Parameters:\n    ---------------------------------------------------------\n    perm: dict of one possible combination of name:value\n          pairs chosen from permutations.", "id": "f17499:m0"}
{"signature": "def _generateFirstOrder0():", "body": "<EOL>numCategories = <NUM_LIT:5><EOL>initProb = numpy.zeros(numCategories)<EOL>initProb[<NUM_LIT:0>] = <NUM_LIT:1.0><EOL>firstOrder = dict()<EOL>firstOrder['<STR_LIT:0>'] = numpy.array([<NUM_LIT:0>, <NUM_LIT:0.1>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT>])<EOL>firstOrder['<STR_LIT:1>'] = numpy.array([<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT>, <NUM_LIT>, <NUM_LIT:0>])<EOL>firstOrder['<STR_LIT:2>'] = numpy.array([<NUM_LIT:1.0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>])<EOL>firstOrder['<STR_LIT:3>'] = numpy.array([<NUM_LIT:1.0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>])<EOL>firstOrder['<STR_LIT:4>'] = numpy.array([<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0.5>, <NUM_LIT:0.5>, <NUM_LIT:0>])<EOL>secondOrder = None<EOL>categoryList = ['<STR_LIT>' % x for x in range(<NUM_LIT:5>)]<EOL>return (initProb, firstOrder, secondOrder, <NUM_LIT:3>, categoryList)<EOL>", "docstring": "Generate the initial, first order, and second order transition\n    probabilities for 'probability0'. For this model, we generate the following\n    set of sequences:\n\n      .1   .75\n    0----1-----2\n     \\    \\   \n      \\    \\  .25\n       \\    \\-----3\n        \\\n         \\ .9     .5 \n          \\--- 4--------- 2\n                \\\n                 \\   .5\n                  \\---------3   \n\n\n\n\n    Parameters:\n    ----------------------------------------------------------------------\n    retval: (initProb, firstOrder, secondOrder, seqLen)\n              initProb:     Initial probability for each category. This is a vector\n                              of length len(categoryList).\n              firstOrder:   A dictionary of the 1st order probabilities. The key\n                              is the 1st element of the sequence, the value is\n                              the probability of each 2nd element given the first. \n              secondOrder:  A dictionary of the 2nd order probabilities. The key\n                              is the first 2 elements of the sequence, the value is\n                              the probability of each possible 3rd element given the \n                              first two. \n              seqLen:       Desired length of each sequence. The 1st element will\n                            be generated using the initProb, the 2nd element by the\n                            firstOrder table, and the 3rd and all successive \n                            elements by the secondOrder table. \n              categoryList:  list of category names to use\n\n\n    Here is an example of some return values when there are 3 categories\n    initProb:         [0.7, 0.2, 0.1]\n\n    firstOrder:       {'[0]': [0.3, 0.3, 0.4],\n                       '[1]': [0.3, 0.3, 0.4],\n                       '[2]': [0.3, 0.3, 0.4]}\n\n    secondOrder:      {'[0,0]': [0.3, 0.3, 0.4],\n                       '[0,1]': [0.3, 0.3, 0.4],\n                       '[0,2]': [0.3, 0.3, 0.4],\n                       '[1,0]': [0.3, 0.3, 0.4],\n                       '[1,1]': [0.3, 0.3, 0.4],\n                       '[1,2]': [0.3, 0.3, 0.4],\n                       '[2,0]': [0.3, 0.3, 0.4],\n                       '[2,1]': [0.3, 0.3, 0.4],\n                       '[2,2]': [0.3, 0.3, 0.4]}", "id": "f17515:m2"}
{"signature": "def _generateFileFromProb(filename, numRecords, categoryList, initProb, <EOL>firstOrderProb, secondOrderProb, seqLen, numNoise=<NUM_LIT:0>, resetsEvery=None):", "body": "<EOL>print(\"<STR_LIT>\" % (filename))<EOL>fields = [('<STR_LIT>', '<STR_LIT:int>', '<STR_LIT:R>'), <EOL>('<STR_LIT>', '<STR_LIT:string>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT:float>', '<STR_LIT>')]<EOL>scriptDir = os.path.dirname(__file__)<EOL>pathname = os.path.join(scriptDir, '<STR_LIT>', filename)<EOL>outFile = FileRecordStream(pathname, write=True, fields=fields)<EOL>initCumProb = initProb.cumsum()<EOL>firstOrderCumProb = dict()<EOL>for (key,value) in firstOrderProb.items():<EOL><INDENT>firstOrderCumProb[key] = value.cumsum()<EOL><DEDENT>if secondOrderProb is not None:<EOL><INDENT>secondOrderCumProb = dict()<EOL>for (key,value) in secondOrderProb.items():<EOL><INDENT>secondOrderCumProb[key] = value.cumsum()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>secondOrderCumProb = None<EOL><DEDENT>elementsInSeq = []<EOL>numElementsSinceReset = <NUM_LIT:0><EOL>maxCatIdx = len(categoryList) - <NUM_LIT:1><EOL>for _ in range(numRecords):<EOL><INDENT>if numElementsSinceReset == <NUM_LIT:0>:<EOL><INDENT>reset = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>reset = <NUM_LIT:0><EOL><DEDENT>rand = numpy.random.rand()<EOL>if secondOrderCumProb is None:<EOL><INDENT>if len(elementsInSeq) == <NUM_LIT:0>:<EOL><INDENT>catIdx = numpy.searchsorted(initCumProb, rand)<EOL><DEDENT>elif len(elementsInSeq) >= <NUM_LIT:1> and(seqLen is None or len(elementsInSeq) < seqLen-numNoise):<EOL><INDENT>catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq[-<NUM_LIT:1>])], <EOL>rand)<EOL><DEDENT>else:   <EOL><INDENT>catIdx = numpy.random.randint(len(categoryList))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if len(elementsInSeq) == <NUM_LIT:0>:<EOL><INDENT>catIdx = numpy.searchsorted(initCumProb, rand)<EOL><DEDENT>elif len(elementsInSeq) == <NUM_LIT:1>:<EOL><INDENT>catIdx = numpy.searchsorted(firstOrderCumProb[str(elementsInSeq)], rand)<EOL><DEDENT>elif (len(elementsInSeq) >=<NUM_LIT:2>) and(seqLen is None or len(elementsInSeq) < seqLen-numNoise):<EOL><INDENT>catIdx = numpy.searchsorted(secondOrderCumProb[str(elementsInSeq[-<NUM_LIT:2>:])], rand)<EOL><DEDENT>else:   <EOL><INDENT>catIdx = numpy.random.randint(len(categoryList))<EOL><DEDENT><DEDENT>catIdx = min(maxCatIdx, catIdx)<EOL>outFile.appendRecord([reset, categoryList[catIdx], catIdx])    <EOL>elementsInSeq.append(catIdx)<EOL>numElementsSinceReset += <NUM_LIT:1><EOL>if resetsEvery is not None and numElementsSinceReset == resetsEvery:<EOL><INDENT>numElementsSinceReset = <NUM_LIT:0><EOL>elementsInSeq = []<EOL><DEDENT>if seqLen is not None and (len(elementsInSeq) == seqLen+numNoise):<EOL><INDENT>elementsInSeq = []<EOL><DEDENT><DEDENT>outFile.close()<EOL>", "docstring": "Generate a set of records reflecting a set of probabilities.\n\n    Parameters:\n    ----------------------------------------------------------------\n    filename:         name of .csv file to generate\n    numRecords:       number of records to generate\n    categoryList:     list of category names\n    initProb:         Initial probability for each category. This is a vector\n                        of length len(categoryList).\n    firstOrderProb:   A dictionary of the 1st order probabilities. The key\n                        is the 1st element of the sequence, the value is\n                        the probability of each 2nd element given the first. \n    secondOrderProb:  A dictionary of the 2nd order probabilities. The key\n                        is the first 2 elements of the sequence, the value is\n                        the probability of each possible 3rd element given the \n                        first two. If this is None, then the sequences will be\n                        first order only. \n    seqLen:           Desired length of each sequence. The 1st element will\n                        be generated using the initProb, the 2nd element by the\n                        firstOrder table, and the 3rd and all successive \n                        elements by the secondOrder table. None means infinite\n                        length. \n    numNoise:         Number of noise elements to place between each \n                        sequence. The noise elements are evenly distributed from \n                        all categories. \n    resetsEvery:      If not None, generate a reset every N records\n\n\n    Here is an example of some parameters:\n\n    categoryList:     ['cat1', 'cat2', 'cat3']\n\n    initProb:         [0.7, 0.2, 0.1]\n\n    firstOrderProb:   {'[0]': [0.3, 0.3, 0.4],\n                       '[1]': [0.3, 0.3, 0.4],\n                       '[2]': [0.3, 0.3, 0.4]}\n\n    secondOrderProb:  {'[0,0]': [0.3, 0.3, 0.4],\n                       '[0,1]': [0.3, 0.3, 0.4],\n                       '[0,2]': [0.3, 0.3, 0.4],\n                       '[1,0]': [0.3, 0.3, 0.4],\n                       '[1,1]': [0.3, 0.3, 0.4],\n                       '[1,2]': [0.3, 0.3, 0.4],\n                       '[2,0]': [0.3, 0.3, 0.4],\n                       '[2,1]': [0.3, 0.3, 0.4],\n                       '[2,2]': [0.3, 0.3, 0.4]}", "id": "f17515:m3"}
{"signature": "def chunk(l, n):", "body": "newn = int(<NUM_LIT:1.0> * len(l) / n + <NUM_LIT:0.5>)<EOL>for i in range(<NUM_LIT:0>, n-<NUM_LIT:1>):<EOL><INDENT>yield l[i*newn:i*newn+newn]<EOL><DEDENT>yield l[n*newn-newn:]<EOL>", "docstring": "Yield n successive chunks from l.", "id": "f17518:m3"}
{"signature": "def slice_sampler(px, N = <NUM_LIT:1>, x = None):", "body": "values = np.zeros(N, dtype=np.int)<EOL>samples = np.arange(len(px))<EOL>px = np.array(px) / (<NUM_LIT:1.>*sum(px))<EOL>u = uniform(<NUM_LIT:0>, max(px))<EOL>for n in range(N):<EOL><INDENT>included = px>=u<EOL>choice = random.sample(list(range(np.sum(included))), <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>values[n] = samples[included][choice]<EOL>u = uniform(<NUM_LIT:0>, px[included][choice])<EOL><DEDENT>if x:<EOL><INDENT>if len(x) == len(px):<EOL><INDENT>x=np.array(x)<EOL>values = x[values]<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return values<EOL>", "docstring": "Provides samples from a user-defined distribution.\n\nslice_sampler(px, N = 1, x = None)\n\nInputs:\npx = A discrete probability distribution.\nN  = Number of samples to return, default is 1\nx  = Optional list/array of observation values to return, where prob(x) = px.\n\nOutputs:\nIf x=None (default) or if len(x) != len(px), it will return an array of integers\nbetween 0 and len(px)-1. If x is supplied, it will return the\nsamples from x according to the distribution px.", "id": "f17518:m6"}
{"signature": "def resetVector(x1, x2):", "body": "size = len(x1)<EOL>for i in range(size):<EOL><INDENT>x2[i] = x1[i]<EOL><DEDENT>", "docstring": "Copies the contents of vector x1 into vector x2.\n\n@param x1 (array) binary vector to be copied\n@param x2 (array) binary vector where x1 is copied", "id": "f17529:m2"}
{"signature": "def percentOverlap(x1, x2, size):", "body": "nonZeroX1 = np.count_nonzero(x1)<EOL>nonZeroX2 = np.count_nonzero(x2)<EOL>minX1X2 = min(nonZeroX1, nonZeroX2)<EOL>percentOverlap = <NUM_LIT:0><EOL>if minX1X2 > <NUM_LIT:0>:<EOL><INDENT>percentOverlap = float(np.dot(x1, x2))/float(minX1X2)<EOL><DEDENT>return percentOverlap<EOL>", "docstring": "Computes the percentage of overlap between vectors x1 and x2.\n\n@param x1   (array) binary vector\n@param x2   (array) binary vector\n@param size (int)   length of binary vectors\n\n@return percentOverlap (float) percentage overlap between x1 and x2", "id": "f17529:m0"}
{"signature": "def buildTrainingSet(numSequences = <NUM_LIT:2>,<EOL>sequenceLength = <NUM_LIT:100>,<EOL>pctShared = <NUM_LIT>,<EOL>seqGenMode = '<STR_LIT>',<EOL>subsequenceStartPos = <NUM_LIT:10>,<EOL>numCols = <NUM_LIT:100>,<EOL>minOnes=<NUM_LIT>,<EOL>maxOnes = <NUM_LIT>,<EOL>disjointConsecutive =True):", "body": "<EOL>colSet = set(range(numCols))<EOL>if '<STR_LIT>' in seqGenMode:<EOL><INDENT>assert '<STR_LIT>' in seqGenMode and '<STR_LIT>' not in seqGenMode<EOL><DEDENT>if '<STR_LIT>' in seqGenMode or numSequences == <NUM_LIT:1>:<EOL><INDENT>pctShared = <NUM_LIT:0.0><EOL><DEDENT>if '<STR_LIT>' not in seqGenMode and '<STR_LIT>' not in seqGenMode:<EOL><INDENT>sharedSequenceLength = int(pctShared*sequenceLength)<EOL><DEDENT>elif '<STR_LIT>' in seqGenMode:<EOL><INDENT>sharedSequenceLength = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>sharedSequenceLength = <NUM_LIT:0><EOL><DEDENT>assert sharedSequenceLength + subsequenceStartPos < sequenceLength<EOL>sharedSequence = []<EOL>for i in range(sharedSequenceLength):<EOL><INDENT>if disjointConsecutive and i > <NUM_LIT:0>:<EOL><INDENT>x = generatePattern(numCols, minOnes, maxOnes, colSet,<EOL>sharedSequence[i-<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>x = generatePattern(numCols, minOnes, maxOnes, colSet)<EOL><DEDENT>sharedSequence.append(x)<EOL><DEDENT>trainingSequences = []<EOL>if '<STR_LIT>' not in seqGenMode:<EOL><INDENT>trailingLength = sequenceLength - sharedSequenceLength - subsequenceStartPos<EOL><DEDENT>else:<EOL><INDENT>trailingLength = sequenceLength - sharedSequenceLength<EOL><DEDENT>for k,s in enumerate(range(numSequences)):<EOL><INDENT>if len(trainingSequences) > <NUM_LIT:0> and '<STR_LIT>' in seqGenMode:<EOL><INDENT>r = list(range(subsequenceStartPos))+ list(range(subsequenceStartPos + sharedSequenceLength, sequenceLength))<EOL>rgen.shuffle(r)<EOL>r = r[:subsequenceStartPos]+ list(range(subsequenceStartPos, subsequenceStartPos + sharedSequenceLength))+ r[subsequenceStartPos:]<EOL>sequence = [trainingSequences[k-<NUM_LIT:1>][j] for j in r]<EOL><DEDENT>else:<EOL><INDENT>sequence = []<EOL>if '<STR_LIT>' not in seqGenMode:<EOL><INDENT>for i in range(subsequenceStartPos):<EOL><INDENT>if disjointConsecutive and i > <NUM_LIT:0>:<EOL><INDENT>x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>x = generatePattern(numCols, minOnes, maxOnes, colSet)<EOL><DEDENT>sequence.append(x)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in seqGenMode and '<STR_LIT>' not in seqGenMode:<EOL><INDENT>sequence.extend(sharedSequence)<EOL><DEDENT>for i in range(trailingLength):<EOL><INDENT>if disjointConsecutive and i > <NUM_LIT:0>:<EOL><INDENT>x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>x = generatePattern(numCols, minOnes, maxOnes, colSet)<EOL><DEDENT>sequence.append(x)<EOL><DEDENT><DEDENT>assert len(sequence) == sequenceLength<EOL>trainingSequences.append(sequence)<EOL><DEDENT>assert len(trainingSequences) == numSequences<EOL>if VERBOSITY >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>pprint.pprint(trainingSequences)<EOL><DEDENT>if sharedSequenceLength > <NUM_LIT:0>:<EOL><INDENT>return (trainingSequences, subsequenceStartPos + sharedSequenceLength)<EOL><DEDENT>else:<EOL><INDENT>return (trainingSequences, -<NUM_LIT:1>)<EOL><DEDENT>", "docstring": "Build random high order test sequences.\n\n    Parameters:\n    --------------------------------------------\n    numSequences:           The number of sequences created.\n    sequenceLength:         The length of each sequence.\n    pctShared:              The percentage of sequenceLength that is shared across\n                            every sequence. If sequenceLength is 100 and pctShared\n                            is 0.2, then a subsequence consisting of 20 patterns\n                            will be in every sequence. Can also be the keyword\n                            'one pattern', in which case a single time step is\n                            shared.\n    seqGenMode:             What kind of sequence to generate. If contains 'shared'\n                            generates shared subsequence. If contains 'no shared',\n                            does not generate any shared subsequence. If contains\n                            'shuffle', will use common patterns shuffle among the\n                            different sequences. If contains 'beginning', will\n                            place shared subsequence at the beginning.\n    subsequenceStartPos:    The position where the shared subsequence starts\n    numCols:                Number of columns in each pattern.\n    minOnes:                The minimum number of 1's in each pattern.\n    maxOnes:                The maximum number of 1's in each pattern.\n    disjointConsecutive:    Whether to generate disjoint consecutive patterns or not.", "id": "f17531:m3"}
{"signature": "def buildHL0bTrainingSet(numOnes=<NUM_LIT:5>):", "body": "numPatterns = <NUM_LIT><EOL>p = getSimplePatterns(numOnes, numPatterns)<EOL>s = []<EOL>s.append(p[rgen.randint(<NUM_LIT:5>,numPatterns)])<EOL>for _ in range(<NUM_LIT:50>):<EOL><INDENT>r = rgen.randint(<NUM_LIT:5>,numPatterns)<EOL>print(r, end='<STR_LIT:U+0020>')<EOL>s.append(p[r])<EOL>if rgen.binomial(<NUM_LIT:1>, <NUM_LIT:0.5>) > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>s.append(p[<NUM_LIT:0>])<EOL>s.append(p[<NUM_LIT:1>])<EOL>s.append(p[<NUM_LIT:2>])<EOL>s.append(p[<NUM_LIT:4>])<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>s.append(p[<NUM_LIT:1>])<EOL>s.append(p[<NUM_LIT:2>])<EOL>s.append(p[<NUM_LIT:3>])<EOL><DEDENT>r = rgen.randint(<NUM_LIT:5>,numPatterns)<EOL>s.append(p[r])<EOL>print(r, end='<STR_LIT:U+0020>')<EOL><DEDENT>print()<EOL>return ([s], [ [p[<NUM_LIT:0>], p[<NUM_LIT:1>], p[<NUM_LIT:2>], p[<NUM_LIT:4>]],  [p[<NUM_LIT:1>], p[<NUM_LIT:2>], p[<NUM_LIT:3>]] ])<EOL>", "docstring": "Simple sequences for HL0b. Each pattern in the sequence has a series of 1's\n    in a specific set of columns.\n      There are 23 patterns, p0 to p22.\n      The sequences we want to learn are p1->p2->p3 and p0->p1->p2->p4.\n      We create a very long sequence consisting of these two sub-sequences\n      intermixed with noise, such as:\n            N N p0 p1 p2 p4 N N p1 p2 p3 N N p1 p2 p3\n      N is randomly chosen from p5 to p22", "id": "f17531:m8"}
{"signature": "def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =<NUM_LIT:100>, nSequences =[<NUM_LIT:2>],<EOL>pctShared = <NUM_LIT>, seqGenMode = '<STR_LIT>',<EOL>shouldFail = False):", "body": "print(\"<STR_LIT>\")<EOL>nFailed = <NUM_LIT:0><EOL>subsequenceStartPos = <NUM_LIT:10><EOL>assert subsequenceStartPos < sequenceLength<EOL>for numSequences in nSequences:<EOL><INDENT>print(\"<STR_LIT>\",sequenceLength, end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\",cellsPerColumn,\"<STR_LIT>\",nTests,\"<STR_LIT>\", numCols)<EOL>print(\"<STR_LIT>\",numSequences, \"<STR_LIT>\", pctShared, end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\", seqGenMode)<EOL>for _ in range(nTests): <EOL><INDENT>trainingSet = buildTrainingSet(numSequences = numSequences,<EOL>sequenceLength = sequenceLength,<EOL>pctShared = pctShared, seqGenMode = seqGenMode,<EOL>subsequenceStartPos = subsequenceStartPos,<EOL>numCols = numCols,<EOL>minOnes = <NUM_LIT>, maxOnes = <NUM_LIT>)<EOL>print(\"<STR_LIT>\")<EOL>numFailures3, numStrictErrors3, numPerfect3, tm3 =testSequence(trainingSet,<EOL>nTrainingReps = <NUM_LIT:10>,<EOL>numberOfCols = numCols,<EOL>cellsPerColumn = cellsPerColumn,<EOL>initialPerm = <NUM_LIT>,<EOL>connectedPerm = <NUM_LIT>,<EOL>minThreshold = <NUM_LIT:12>,<EOL>permanenceInc = <NUM_LIT>,<EOL>permanenceDec = <NUM_LIT:0.1>,<EOL>permanenceMax = <NUM_LIT:1>,<EOL>globalDecay = <NUM_LIT>,<EOL>newSynapseCount = <NUM_LIT:15>,<EOL>activationThreshold = <NUM_LIT:12>,<EOL>doPooling = False,<EOL>shouldFail = shouldFail)<EOL>print(\"<STR_LIT>\")<EOL>numFailures, numStrictErrors, numPerfect, tm2 =testSequence(trainingSet,<EOL>nTrainingReps = <NUM_LIT:2>,<EOL>numberOfCols = numCols,<EOL>cellsPerColumn = cellsPerColumn,<EOL>initialPerm = <NUM_LIT>,<EOL>connectedPerm = <NUM_LIT>,<EOL>minThreshold = <NUM_LIT:12>,<EOL>permanenceInc = <NUM_LIT>,<EOL>permanenceDec = <NUM_LIT:0>,<EOL>permanenceMax = <NUM_LIT:1>,<EOL>globalDecay = <NUM_LIT>,<EOL>newSynapseCount = <NUM_LIT:15>,<EOL>activationThreshold = <NUM_LIT:12>,<EOL>doPooling = False,<EOL>shouldFail = shouldFail)<EOL>print(\"<STR_LIT>\")<EOL>numFailures1, numStrictErrors1, numPerfect1, tm1 =testSequence(trainingSet,<EOL>nTrainingReps = <NUM_LIT:1>,<EOL>numberOfCols = numCols,<EOL>cellsPerColumn = cellsPerColumn,<EOL>initialPerm = <NUM_LIT>,<EOL>connectedPerm = <NUM_LIT>,<EOL>minThreshold = <NUM_LIT:12>,<EOL>permanenceInc = <NUM_LIT>,<EOL>permanenceDec = <NUM_LIT:0>,<EOL>permanenceMax = <NUM_LIT:1>,<EOL>globalDecay = <NUM_LIT>,<EOL>newSynapseCount = <NUM_LIT:15>,<EOL>activationThreshold = <NUM_LIT:12>,<EOL>doPooling = False,<EOL>shouldFail = shouldFail)<EOL>segmentInfo1 = tm1.getSegmentInfo()<EOL>segmentInfo2 = tm2.getSegmentInfo()<EOL>if (abs(segmentInfo1[<NUM_LIT:0>] - segmentInfo2[<NUM_LIT:0>]) > <NUM_LIT:3>) or(abs(segmentInfo1[<NUM_LIT:1>] - segmentInfo2[<NUM_LIT:1>]) > <NUM_LIT:3>*<NUM_LIT:15>) :<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(segmentInfo1)<EOL>print(segmentInfo2)<EOL>print(tm3.getSegmentInfo())<EOL>tm3.trimSegments()<EOL>print(tm3.getSegmentInfo())<EOL>print(\"<STR_LIT>\")<EOL>print(numFailures1, numStrictErrors1, numPerfect1)<EOL>print(numFailures, numStrictErrors, numPerfect)<EOL>print(numFailures3, numStrictErrors3, numPerfect3)<EOL>numFailures += <NUM_LIT:1><EOL><DEDENT>if numFailures == <NUM_LIT:0> and not shouldFailor numFailures > <NUM_LIT:0> and shouldFail:<EOL><INDENT>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>if shouldFail:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>print()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>nFailed = nFailed + <NUM_LIT:1><EOL>print(\"<STR_LIT>\", numFailures)<EOL>print(\"<STR_LIT>\", numStrictErrors)<EOL>print(\"<STR_LIT>\", numPerfect)<EOL><DEDENT><DEDENT><DEDENT>return nFailed<EOL>", "docstring": "Still need to test:\n    Two overlapping sequences. OK to get new segments but check that we can\n    get correct high order prediction after multiple reps.", "id": "f17531:m19"}
{"signature": "def buildHL0aTrainingSet(numOnes=<NUM_LIT:5>):", "body": "numPatterns = <NUM_LIT><EOL>p = getSimplePatterns(numOnes, numPatterns)<EOL>s = []<EOL>s.append(p[rgen.randint(<NUM_LIT:3>,<NUM_LIT>)])<EOL>for _ in range(<NUM_LIT:20>):<EOL><INDENT>s.append(p[rgen.randint(<NUM_LIT:3>,<NUM_LIT>)])<EOL>s.append(p[<NUM_LIT:0>])<EOL>s.append(p[<NUM_LIT:1>])<EOL>s.append(p[<NUM_LIT:2>])<EOL>s.append(p[rgen.randint(<NUM_LIT:3>,<NUM_LIT>)])<EOL><DEDENT>return ([s], [[p[<NUM_LIT:0>], p[<NUM_LIT:1>], p[<NUM_LIT:2>]]])<EOL>", "docstring": "Simple sequences for HL0. Each pattern in the sequence has a series of 1's\n    in a specific set of columns.\n      There are 23 patterns, p0 to p22.\n      The sequence we want to learn is p0->p1->p2\n      We create a very long sequence consisting of N N p0 p1 p2 N N p0 p1 p2\n      N is randomly chosen from p3 to p22", "id": "f17531:m7"}
{"signature": "def findAcceptablePatterns(tm, t, whichSequence, trainingSequences, nAcceptable = <NUM_LIT:1>):", "body": "<EOL>upTo = t + <NUM_LIT:2> <EOL>if tm.doPooling:<EOL><INDENT>upTo += min(tm.segUpdateValidDuration, nAcceptable)<EOL><DEDENT>assert upTo <= len(trainingSequences[whichSequence])<EOL>acceptablePatterns = []<EOL>if len(trainingSequences) == <NUM_LIT:2> and(trainingSequences[<NUM_LIT:0>][<NUM_LIT:0>] == trainingSequences[<NUM_LIT:1>][<NUM_LIT:0>]).all():<EOL><INDENT>if (trainingSequences[<NUM_LIT:0>][t] == trainingSequences[<NUM_LIT:1>][t]).all()and (trainingSequences[<NUM_LIT:0>][t+<NUM_LIT:1>] != trainingSequences[<NUM_LIT:1>][t+<NUM_LIT:1>]).any():<EOL><INDENT>acceptablePatterns.append(trainingSequences[<NUM_LIT:0>][t+<NUM_LIT:1>])<EOL>acceptablePatterns.append(trainingSequences[<NUM_LIT:1>][t+<NUM_LIT:1>])<EOL><DEDENT><DEDENT>acceptablePatterns += [trainingSequences[whichSequence][t]for t in range(t,upTo)]<EOL>return acceptablePatterns<EOL>", "docstring": "Tries to infer the set of acceptable patterns for prediction at the given\ntime step and for the give sequence. Acceptable patterns are: the current one,\nplus a certain number of patterns after timeStep, in the sequence that the TM\nis currently tracking. Any other pattern is not acceptable.\n\nTODO:\n====\n- Doesn't work for noise cases.\n- Might run in trouble if shared subsequence at the beginning.\n\nParameters:\n==========\ntm                       the whole TM, so that we can look at its parameters\nt                        the current time step\nwhichSequence            the sequence we are currently tracking\ntrainingSequences        all the training sequences\nnAcceptable              the number of steps forward from the current timeStep\n                         we are willing to consider acceptable. In the case of\n                         pooling, it is less than or equal to the min of the\n                         number of training reps and the segUpdateValidDuration\n                         parameter of the TM, depending on the test case.\n                         The default value is 1, because by default, the pattern\n                         after the current one should always be predictable.\n\nReturn value:\n============\nacceptablePatterns       A list of acceptable patterns for prediction.", "id": "f17531:m10"}
{"signature": "def buildSequencePool(numSequences = <NUM_LIT:10>,<EOL>seqLen = [<NUM_LIT:2>,<NUM_LIT:3>,<NUM_LIT:4>],<EOL>numPatterns = <NUM_LIT:5>,<EOL>numOnBitsPerPattern = <NUM_LIT:3>,<EOL>patternOverlap = <NUM_LIT:0>,<EOL>**kwargs<EOL>):", "body": "<EOL>patterns = getSimplePatterns(numOnBitsPerPattern, numPatterns, patternOverlap)<EOL>numCols = len(patterns[<NUM_LIT:0>])<EOL>trainingSequences = []<EOL>for _ in range(numSequences):<EOL><INDENT>sequence = []<EOL>length = random.choice(seqLen)<EOL>for _ in range(length):<EOL><INDENT>patIdx = random.choice(range(numPatterns))<EOL>sequence.append(patterns[patIdx])<EOL><DEDENT>trainingSequences.append(sequence)<EOL><DEDENT>if VERBOSITY >= <NUM_LIT:3>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>printAllTrainingSequences(trainingSequences)<EOL><DEDENT>return (numCols, trainingSequences)<EOL>", "docstring": "Create a bunch of sequences of various lengths, all built from\n    a fixed set of patterns.\n\n    Parameters:\n    -----------------------------------------------------\n    numSequences:         Number of training sequences to generate\n    seqLen:               List of possible sequence lengths\n    numPatterns:          How many possible patterns there are to use within\n                            sequences\n    numOnBitsPerPattern:  Number of ON bits in each TM input pattern\n    patternOverlap:       Max number of bits of overlap between any 2 patterns\n    retval:               (numCols, trainingSequences)\n                            numCols - width of the patterns\n                            trainingSequences - a list of training sequences", "id": "f17532:m4"}
{"signature": "def printOneTrainingVector(x):", "body": "print('<STR_LIT>'.join('<STR_LIT:1>' if k != <NUM_LIT:0> else '<STR_LIT:.>' for k in x))<EOL>", "docstring": "Print a single vector succinctly.", "id": "f17532:m0"}
{"signature": "def getSimplePatterns(numOnes, numPatterns, patternOverlap=<NUM_LIT:0>):", "body": "assert (patternOverlap < numOnes)<EOL>numNewBitsInEachPattern = numOnes - patternOverlap<EOL>numCols = numNewBitsInEachPattern * numPatterns + patternOverlap<EOL>p = []<EOL>for i in range(numPatterns):<EOL><INDENT>x = numpy.zeros(numCols, dtype='<STR_LIT>')<EOL>startBit = i*numNewBitsInEachPattern<EOL>nextStartBit = startBit + numOnes<EOL>x[startBit:nextStartBit] = <NUM_LIT:1><EOL>p.append(x)<EOL><DEDENT>return p<EOL>", "docstring": "Very simple patterns. Each pattern has numOnes consecutive\n    bits on. The amount of overlap between consecutive patterns is\n    configurable, via the patternOverlap parameter.\n\n    Parameters:\n    -----------------------------------------------------------------------\n    numOnes:        Number of bits ON in each pattern\n    numPatterns:    Number of unique patterns to generate\n    patternOverlap: Number of bits of overlap between each successive pattern\n    retval:         patterns", "id": "f17532:m2"}
{"signature": "def createTMs(includeCPP = True,<EOL>includePy = True,<EOL>numCols = <NUM_LIT:100>,<EOL>cellsPerCol = <NUM_LIT:4>,<EOL>activationThreshold = <NUM_LIT:3>,<EOL>minThreshold = <NUM_LIT:3>,<EOL>newSynapseCount = <NUM_LIT:3>,<EOL>initialPerm = <NUM_LIT>,<EOL>permanenceInc = <NUM_LIT:0.1>,<EOL>permanenceDec = <NUM_LIT:0.0>,<EOL>globalDecay = <NUM_LIT:0.0>,<EOL>pamLength = <NUM_LIT:0>,<EOL>checkSynapseConsistency = True,<EOL>maxInfBacktrack = <NUM_LIT:0>,<EOL>maxLrnBacktrack = <NUM_LIT:0>,<EOL>**kwargs<EOL>):", "body": "<EOL>connectedPerm = <NUM_LIT:0.5><EOL>tms = dict()<EOL>if includeCPP:<EOL><INDENT>if VERBOSITY >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>cpp_tm = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = cellsPerCol,<EOL>initialPerm = initialPerm, connectedPerm = connectedPerm,<EOL>minThreshold = minThreshold, newSynapseCount = newSynapseCount,<EOL>permanenceInc = permanenceInc, permanenceDec = permanenceDec,<EOL>activationThreshold = activationThreshold,<EOL>globalDecay = globalDecay, burnIn = <NUM_LIT:1>,<EOL>seed=SEED, verbosity=VERBOSITY,<EOL>checkSynapseConsistency = checkSynapseConsistency,<EOL>collectStats = True,<EOL>pamLength = pamLength,<EOL>maxInfBacktrack = maxInfBacktrack,<EOL>maxLrnBacktrack = maxLrnBacktrack,<EOL>)<EOL>cpp_tm.retrieveLearningStates = True<EOL>tms['<STR_LIT>'] = cpp_tm<EOL><DEDENT>if includePy:<EOL><INDENT>if VERBOSITY >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>py_tm = BacktrackingTM(numberOfCols = numCols,<EOL>cellsPerColumn = cellsPerCol,<EOL>initialPerm = initialPerm,<EOL>connectedPerm = connectedPerm,<EOL>minThreshold = minThreshold,<EOL>newSynapseCount = newSynapseCount,<EOL>permanenceInc = permanenceInc,<EOL>permanenceDec = permanenceDec,<EOL>activationThreshold = activationThreshold,<EOL>globalDecay = globalDecay, burnIn = <NUM_LIT:1>,<EOL>seed=SEED, verbosity=VERBOSITY,<EOL>collectStats = True,<EOL>pamLength = pamLength,<EOL>maxInfBacktrack = maxInfBacktrack,<EOL>maxLrnBacktrack = maxLrnBacktrack,<EOL>)<EOL>tms['<STR_LIT>'] = py_tm<EOL><DEDENT>return tms<EOL>", "docstring": "Create one or more TM instances, placing each into a dict keyed by\n    name.\n\n    Parameters:\n    ------------------------------------------------------------------\n    retval:   tms - dict of TM instances", "id": "f17532:m5"}
{"signature": "def _createTMs(self, numCols, fixedResources=False,<EOL>checkSynapseConsistency = True):", "body": "<EOL>minThreshold = <NUM_LIT:4><EOL>activationThreshold = <NUM_LIT:8><EOL>newSynapseCount = <NUM_LIT:15><EOL>initialPerm = <NUM_LIT><EOL>connectedPerm = <NUM_LIT:0.5><EOL>permanenceInc = <NUM_LIT:0.1><EOL>permanenceDec = <NUM_LIT><EOL>if fixedResources:<EOL><INDENT>permanenceDec = <NUM_LIT:0.1><EOL>maxSegmentsPerCell = <NUM_LIT:5><EOL>maxSynapsesPerSegment = <NUM_LIT:15><EOL>globalDecay = <NUM_LIT:0><EOL>maxAge = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>permanenceDec = <NUM_LIT><EOL>maxSegmentsPerCell = -<NUM_LIT:1><EOL>maxSynapsesPerSegment = -<NUM_LIT:1><EOL>globalDecay = <NUM_LIT><EOL>maxAge = <NUM_LIT:1><EOL><DEDENT>if g_testCPPTM:<EOL><INDENT>if g_options.verbosity > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>cppTM = BacktrackingTMCPP(numberOfCols = numCols, cellsPerColumn = <NUM_LIT:4>,<EOL>initialPerm = initialPerm, connectedPerm = connectedPerm,<EOL>minThreshold = minThreshold,<EOL>newSynapseCount = newSynapseCount,<EOL>permanenceInc = permanenceInc,<EOL>permanenceDec = permanenceDec,<EOL>activationThreshold = activationThreshold,<EOL>globalDecay = globalDecay, maxAge=maxAge, burnIn = <NUM_LIT:1>,<EOL>seed=g_options.seed, verbosity=g_options.verbosity,<EOL>checkSynapseConsistency = checkSynapseConsistency,<EOL>pamLength = <NUM_LIT:1000>,<EOL>maxSegmentsPerCell = maxSegmentsPerCell,<EOL>maxSynapsesPerSegment = maxSynapsesPerSegment,<EOL>)<EOL>cppTM.retrieveLearningStates = True<EOL><DEDENT>else:<EOL><INDENT>cppTM = None<EOL><DEDENT>if g_options.verbosity > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>pyTM = BacktrackingTM(numberOfCols = numCols,<EOL>cellsPerColumn = <NUM_LIT:4>,<EOL>initialPerm = initialPerm,<EOL>connectedPerm = connectedPerm,<EOL>minThreshold = minThreshold,<EOL>newSynapseCount = newSynapseCount,<EOL>permanenceInc = permanenceInc,<EOL>permanenceDec = permanenceDec,<EOL>activationThreshold = activationThreshold,<EOL>globalDecay = globalDecay, maxAge=maxAge, burnIn = <NUM_LIT:1>,<EOL>seed=g_options.seed, verbosity=g_options.verbosity,<EOL>pamLength = <NUM_LIT:1000>,<EOL>maxSegmentsPerCell = maxSegmentsPerCell,<EOL>maxSynapsesPerSegment = maxSynapsesPerSegment,<EOL>)<EOL>return cppTM, pyTM<EOL>", "docstring": "Create an instance of the appropriate temporal memory. We isolate\n        all parameters as constants specified here.", "id": "f17534:c0:m4"}
{"signature": "def _buildSL2TrainingSet(self, numOnes=<NUM_LIT:10>, numRepetitions= <NUM_LIT:10>):", "body": "numPatterns = <NUM_LIT:5><EOL>numCols = <NUM_LIT:2> * numPatterns * numOnes<EOL>halfCols = numPatterns * numOnes<EOL>numNoiseBits = numOnes<EOL>p = self._getSimplePatterns(numOnes, numPatterns)<EOL>numSequences = <NUM_LIT:3><EOL>indices = [<EOL>[<NUM_LIT:0>, <NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:4>],<EOL>[<NUM_LIT:4>, <NUM_LIT:3>, <NUM_LIT:2>, <NUM_LIT:1>, <NUM_LIT:0>],<EOL>[<NUM_LIT:2>, <NUM_LIT:0>, <NUM_LIT:4>, <NUM_LIT:1>, <NUM_LIT:3>],<EOL>]<EOL>trainingSequences = []<EOL>for i in range(numRepetitions*numSequences):<EOL><INDENT>sequence = []<EOL>for j in range(numPatterns):<EOL><INDENT>v = numpy.zeros(numCols, dtype='<STR_LIT>')<EOL>v[<NUM_LIT:0>:halfCols] = p[indices[i % numSequences][j]]<EOL>noiseIndices = (self._rgen.permutation(halfCols)<EOL>+ halfCols)[<NUM_LIT:0>:numNoiseBits]<EOL>v[noiseIndices] = <NUM_LIT:1><EOL>sequence.append(v)<EOL><DEDENT>trainingSequences.append(sequence)<EOL><DEDENT>testSequences = []<EOL>for i in range(numSequences):<EOL><INDENT>sequence = []<EOL>for j in range(numPatterns):<EOL><INDENT>v = numpy.zeros(numCols, dtype='<STR_LIT>')<EOL>v[<NUM_LIT:0>:halfCols] = p[indices[i % numSequences][j]]<EOL>sequence.append(v)<EOL><DEDENT>testSequences.append(sequence)<EOL><DEDENT>if g_options.verbosity > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>self._printAllTrainingSequences(trainingSequences)<EOL>print(\"<STR_LIT>\")<EOL>self._printAllTrainingSequences(testSequences)<EOL><DEDENT>return (trainingSequences, testSequences)<EOL>", "docstring": "Three simple sequences, composed of the same 5 static patterns. The left\n        half of the vector contains the pattern elements, each with numOnes\n        consecutive bits. The right half contains numOnes random bits.\n\n        Sequence 1 is: p0, p1, p2, p3, p4\n        Sequence 2 is: p4, p3, p2, p1, p0\n        Sequence 3 is: p2, p0, p4, p1, p3\n\n        The function returns a pair:\n\n        trainingSequences:    A list containing numRepetitions instances of the\n                              above sequences\n        testSequence:         Clean test sequences with no noise on the right half", "id": "f17534:c0:m7"}
{"signature": "@staticmethod<EOL><INDENT>def getSpecFromType(nodeType):<DEDENT>", "body": "return Spec(engine_internal.Region.getSpecFromType(nodeType))<EOL>", "docstring": "@doc:place_holder(Region.getSpecFromType)", "id": "f17536:c5:m3"}
{"signature": "@staticmethod<EOL><INDENT>def unregisterRegion(regionName):<DEDENT>", "body": "engine_internal.Network.unregisterPyRegion(regionName)<EOL>", "docstring": "Unregisters a region from the internal list of regions\n\n:param str regionName: The name of the region to unregister\n    (ex: regionName=regionClass.__name__)", "id": "f17536:c6:m17"}
{"signature": "def __hash__(self):", "body": "return self._region.__hash__()<EOL>", "docstring": "Hash a region", "id": "f17536:c5:m13"}
{"signature": "def _getDimensions(self):", "body": "return Dimensions(tuple(self._region.getDimensions()))<EOL>", "docstring": "Dimensions of the region", "id": "f17536:c5:m11"}
{"signature": "def setPhases(self, name, phases):", "body": "phases = engine_internal.UInt32Set(phases)<EOL>engine_internal.Network.setPhases(self, name, phases)<EOL>", "docstring": "@doc:place_holder(Network.setPhases)", "id": "f17536:c6:m4"}
{"signature": "def addRegion(self, name, nodeType, nodeParams):", "body": "engine_internal.Network.addRegion(self, name, nodeType, nodeParams)<EOL>return self._getRegions()[name]<EOL>", "docstring": "@doc:place_holder(Network.addRegion)", "id": "f17536:c6:m2"}
{"signature": "def __ne__(self, other):", "body": "return self._region != other._region<EOL>", "docstring": "Compare regions", "id": "f17536:c5:m15"}
{"signature": "def run(self, n):", "body": "<EOL>engine_internal.Network.run(self, n)<EOL>", "docstring": "@doc:place_holder(Network.run)", "id": "f17536:c6:m5"}
{"signature": "def _get(self, method):", "body": "return getattr(self._region, method)()<EOL>", "docstring": "Auto forwarding of properties to get methods of internal region", "id": "f17536:c5:m19"}
{"signature": "def getRegionsByType(self, regionClass):", "body": "regions = []<EOL>for region in list(self.regions.values()):<EOL><INDENT>if type(region.getSelf()) is regionClass:<EOL><INDENT>regions.append(region)<EOL><DEDENT><DEDENT>return regions<EOL>", "docstring": "Gets all region instances of a given class\n(for example, nupic.regions.sp_region.SPRegion).", "id": "f17536:c6:m15"}
{"signature": "def __eq__(self, other):", "body": "return self._region == other._region<EOL>", "docstring": "Compare regions", "id": "f17536:c5:m14"}
{"signature": "def _getRegions(self):", "body": "def makeRegion(name, r):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>r = Region(r, self)<EOL>return r<EOL><DEDENT>regions = CollectionWrapper(engine_internal.Network.getRegions(self), makeRegion)<EOL>return regions<EOL>", "docstring": "Get the collection of regions in a network\n\n        This is a tricky one. The collection of regions returned from\n        from the internal network is a collection of internal regions.\n        The desired collection is a collelcion of net.Region objects\n        that also points to this network (net.network) and not to\n        the internal network. To achieve that a CollectionWrapper\n        class is used with a custom makeRegion() function (see bellow)\n        as a value wrapper. The CollectionWrapper class wraps each value in the\n        original collection with the result of the valueWrapper.", "id": "f17536:c6:m1"}
{"signature": "def getCallbacks(self, *args, **kwargs):", "body": "engine_internal.Network.getCallbacks(self, *args, **kwargs)<EOL>", "docstring": "@doc:place_holder(Network.getCallbacks)", "id": "f17536:c6:m8"}
{"signature": "def getParameter(self, paramName):", "body": "(setter, getter) = self._getParameterMethods(paramName)<EOL>if getter is None:<EOL><INDENT>import exceptions<EOL>raise exceptions.Exception(<EOL>\"<STR_LIT>\"<EOL>% (paramName, self.name, self.type))<EOL><DEDENT>return getter(paramName)<EOL>", "docstring": "Get parameter value", "id": "f17536:c5:m17"}
{"signature": "def Array(dtype, size=None, ref=False):", "body": "def getArrayType(self):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return self._dtype<EOL><DEDENT>if ref:<EOL><INDENT>assert size is None<EOL><DEDENT>index = basicTypes.index(dtype)<EOL>if index == -<NUM_LIT:1>:<EOL><INDENT>raise Exception('<STR_LIT>' + dtype)<EOL><DEDENT>if size and size <= <NUM_LIT:0>:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>suffix = '<STR_LIT>' if ref else '<STR_LIT>'<EOL>arrayFactory = getattr(engine_internal, dtype + suffix)<EOL>arrayFactory.getType = getArrayType<EOL>if size:<EOL><INDENT>a = arrayFactory(size)<EOL><DEDENT>else:<EOL><INDENT>a = arrayFactory()<EOL><DEDENT>a._dtype = basicTypes[index]<EOL>return a<EOL>", "docstring": "Factory function that creates typed Array or ArrayRef objects\n\n    dtype - the data type of the array (as string).\n      Supported types are: Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Real32, Real64\n\n    size - the size of the array. Must be positive integer.", "id": "f17536:m1"}
{"signature": "@classmethod<EOL><INDENT>def _orderForCoordinate(cls, coordinate):<DEDENT>", "body": "seed = cls._hashCoordinate(coordinate)<EOL>rng = Random(seed)<EOL>return rng.getReal64()<EOL>", "docstring": "Returns the order for a coordinate.\n\n@param coordinate (numpy.array) Coordinate\n@return (float) A value in the interval [0, 1), representing the\n                order of the coordinate", "id": "f17537:c0:m8"}
{"signature": "@staticmethod<EOL><INDENT>def _neighbors(coordinate, radius):<DEDENT>", "body": "ranges = (xrange(n-radius, n+radius+<NUM_LIT:1>) for n in coordinate.tolist())<EOL>return numpy.array(list(itertools.product(*ranges)))<EOL>", "docstring": "Returns coordinates around given coordinate, within given radius.\nIncludes given coordinate.\n\n@param coordinate (numpy.array) N-dimensional integer coordinate\n@param radius (int) Radius around `coordinate`\n\n@return (numpy.array) List of coordinates", "id": "f17537:c0:m5"}
{"signature": "def encodeIntoArray(self, inputData, output):", "body": "(coordinate, radius) = inputData<EOL>assert isinstance(radius, int), (\"<STR_LIT>\"<EOL>.format(radius, type(radius)))<EOL>neighbors = self._neighbors(coordinate, radius)<EOL>winners = self._topWCoordinates(neighbors, self.w)<EOL>bitFn = lambda coordinate: self._bitForCoordinate(coordinate, self.n)<EOL>indices = numpy.array([bitFn(w) for w in winners])<EOL>output[:] = <NUM_LIT:0><EOL>output[indices] = <NUM_LIT:1><EOL>", "docstring": "See `nupic.encoders.base.Encoder` for more information.\n\n@param inputData (tuple) Contains coordinate (numpy.array, N-dimensional\n                         integer coordinate) and radius (int)\n@param output (numpy.array) Stores encoded SDR in this numpy array", "id": "f17537:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def _bitForCoordinate(cls, coordinate, n):<DEDENT>", "body": "seed = cls._hashCoordinate(coordinate)<EOL>rng = Random(seed)<EOL>return rng.getUInt32(n)<EOL>", "docstring": "Maps the coordinate to a bit in the SDR.\n\n@param coordinate (numpy.array) Coordinate\n@param n (int) The number of available bits in the SDR\n@return (int) The index to a bit in the SDR", "id": "f17537:c0:m9"}
{"signature": "def getScalars(self, input):", "body": "return numpy.array(self.getEncodedValues(input))<EOL>", "docstring": "See method description in :meth:`~.nupic.encoders.base.Encoder.getScalars`.\n\n:param input: (datetime) representing the time being encoded\n\n:returns: A numpy array of the corresponding scalar values in the following\n          order: season, dayOfWeek, weekend, holiday, timeOfDay. Some of\n          these fields might be omitted if they were not specified in the\n          encoder.", "id": "f17539:c0:m4"}
{"signature": "def getScalarNames(self, parentFieldName='<STR_LIT>'):", "body": "names = []<EOL>def _formFieldName(encoder):<EOL><INDENT>if parentFieldName == '<STR_LIT>':<EOL><INDENT>return encoder.name<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>' % (parentFieldName, encoder.name)<EOL><DEDENT><DEDENT>if self.seasonEncoder is not None:<EOL><INDENT>names.append(_formFieldName(self.seasonEncoder))<EOL><DEDENT>if self.dayOfWeekEncoder is not None:<EOL><INDENT>names.append(_formFieldName(self.dayOfWeekEncoder))<EOL><DEDENT>if self.customDaysEncoder is not None:<EOL><INDENT>names.append(_formFieldName(self.customDaysEncoder))<EOL><DEDENT>if self.weekendEncoder is not None:<EOL><INDENT>names.append(_formFieldName(self.weekendEncoder))<EOL><DEDENT>if self.holidayEncoder is not None:<EOL><INDENT>names.append(_formFieldName(self.holidayEncoder))<EOL><DEDENT>if self.timeOfDayEncoder is not None:<EOL><INDENT>names.append(_formFieldName(self.timeOfDayEncoder))<EOL><DEDENT>return names<EOL>", "docstring": "See method description in base.py", "id": "f17539:c0:m2"}
{"signature": "def getBucketIndices(self, input):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:<EOL><INDENT>return [None] * len(self.encoders)<EOL><DEDENT>else:<EOL><INDENT>assert isinstance(input, datetime.datetime)<EOL>scalars = self.getScalars(input)<EOL>result = []<EOL>for i in xrange(len(self.encoders)):<EOL><INDENT>(name, encoder, offset) = self.encoders[i]<EOL>result.extend(encoder.getBucketIndices(scalars[i]))<EOL><DEDENT>return result<EOL><DEDENT>", "docstring": "See method description in base.py", "id": "f17539:c0:m5"}
{"signature": "def encodeIntoArray(self, value, output):", "body": "denseInput = numpy.zeros(output.shape)<EOL>try:<EOL><INDENT>denseInput[value] = <NUM_LIT:1><EOL><DEDENT>except IndexError:<EOL><INDENT>if isinstance(value, numpy.ndarray):<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\".format(<EOL>value.dtype))<EOL><DEDENT>raise<EOL><DEDENT>super(SparsePassThroughEncoder, self).encodeIntoArray(denseInput, output)<EOL>", "docstring": "See method description in base.py", "id": "f17540:c0:m1"}
{"signature": "def getScalars(self, input):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:<EOL><INDENT>return numpy.array([None])<EOL><DEDENT>else:<EOL><INDENT>return numpy.array([self.categoryToIndex.get(input, <NUM_LIT:0>)])<EOL><DEDENT>", "docstring": "See method description in base.py", "id": "f17541:c0:m4"}
{"signature": "def topDownCompute(self, encoded):", "body": "encoderResult = self.encoder.topDownCompute(encoded)[<NUM_LIT:0>]<EOL>value = encoderResult.value<EOL>categoryIndex = int(round(value))<EOL>category = self.indexToCategory[categoryIndex]<EOL>return EncoderResult(value=category, scalar=categoryIndex,<EOL>encoding=encoderResult.encoding)<EOL>", "docstring": "See the function description in base.py", "id": "f17541:c0:m11"}
{"signature": "def getBucketValues(self):", "body": "if self._bucketValues is None:<EOL><INDENT>numBuckets = len(self.encoder.getBucketValues())<EOL>self._bucketValues = []<EOL>for bucketIndex in range(numBuckets):<EOL><INDENT>self._bucketValues.append(self.getBucketInfo([bucketIndex])[<NUM_LIT:0>].value)<EOL><DEDENT><DEDENT>return self._bucketValues<EOL>", "docstring": "See the function description in base.py", "id": "f17541:c0:m9"}
{"signature": "def getBucketIndices(self, input):", "body": "<EOL>if input == SENTINEL_VALUE_FOR_MISSING_DATA:<EOL><INDENT>return [None]<EOL><DEDENT>else:<EOL><INDENT>return self.encoder.getBucketIndices(self.categoryToIndex.get(input, <NUM_LIT:0>))<EOL><DEDENT>", "docstring": "See method description in base.py", "id": "f17541:c0:m5"}
{"signature": "def encodeIntoArray(self, inputData, output):", "body": "raise NotImplementedError()<EOL>", "docstring": "Encodes inputData and puts the encoded value into the numpy output array,\nwhich is a 1-D array of length returned by :meth:`.getWidth`.\n\n.. note:: The numpy output array is reused, so clear it before updating it.\n\n:param inputData: Data to encode. This should be validated by the encoder.\n:param output: numpy 1-D array of same length returned by\n       :meth:`.getWidth`.", "id": "f17542:c0:m1"}
{"signature": "def getBucketIndices(self, inputData):", "body": "retVals = []<EOL>if self.encoders is not None:<EOL><INDENT>for (name, encoder, offset) in self.encoders:<EOL><INDENT>values = encoder.getBucketIndices(self._getInputValue(inputData, name))<EOL>retVals.extend(values)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>assert False, \"<STR_LIT>\"\"<STR_LIT>\"<EOL><DEDENT>return retVals<EOL>", "docstring": "Returns an array containing the sub-field bucket indices for each sub-field\nof the inputData. To get the associated field names for each of the buckets,\ncall :meth:`.getScalarNames`.\n\n:param inputData: The data from the source. This is typically an object with\n             members.\n:return: array of bucket indices", "id": "f17542:c0:m12"}
{"signature": "def getScalars(self, inputData):", "body": "retVals = numpy.array([])<EOL>if self.encoders is not None:<EOL><INDENT>for (name, encoder, offset) in self.encoders:<EOL><INDENT>values = encoder.getScalars(self._getInputValue(inputData, name))<EOL>retVals = numpy.hstack((retVals, values))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>retVals = numpy.hstack((retVals, inputData))<EOL><DEDENT>return retVals<EOL>", "docstring": "Returns a numpy array containing the sub-field scalar value(s) for\neach sub-field of the ``inputData``. To get the associated field names for\neach of the scalar values, call :meth:`.getScalarNames()`.\n\nFor a simple scalar encoder, the scalar value is simply the input unmodified.\nFor category encoders, it is the scalar representing the category string\nthat is passed in. For the datetime encoder, the scalar value is the\nthe number of seconds since epoch.\n\nThe intent of the scalar representation of a sub-field is to provide a\nbaseline for measuring error differences. You can compare the scalar value\nof the inputData with the scalar value returned from :meth:`.topDownCompute`\non a top-down representation to evaluate prediction accuracy, for example.\n\n:param inputData: The data from the source. This is typically an object with\n             members\n:return: array of scalar values", "id": "f17542:c0:m10"}
{"signature": "def closenessScores(self, expValues, actValues, fractional=True):", "body": "<EOL>if self.encoders is None:<EOL><INDENT>err = abs(expValues[<NUM_LIT:0>] - actValues[<NUM_LIT:0>])<EOL>if fractional:<EOL><INDENT>denom = max(expValues[<NUM_LIT:0>], actValues[<NUM_LIT:0>])<EOL>if denom == <NUM_LIT:0>:<EOL><INDENT>denom = <NUM_LIT:1.0><EOL><DEDENT>closeness = <NUM_LIT:1.0> - float(err)/denom<EOL>if closeness < <NUM_LIT:0>:<EOL><INDENT>closeness = <NUM_LIT:0><EOL><DEDENT><DEDENT>else:<EOL><INDENT>closeness = err<EOL><DEDENT>return numpy.array([closeness])<EOL><DEDENT>scalarIdx = <NUM_LIT:0><EOL>retVals = numpy.array([])<EOL>for (name, encoder, offset) in self.encoders:<EOL><INDENT>values = encoder.closenessScores(expValues[scalarIdx:], actValues[scalarIdx:],<EOL>fractional=fractional)<EOL>scalarIdx += len(values)<EOL>retVals = numpy.hstack((retVals, values))<EOL><DEDENT>return retVals<EOL>", "docstring": "Compute closeness scores between the expected scalar value(s) and actual\nscalar value(s). The expected scalar values are typically those obtained\nfrom the :meth:`.getScalars` method. The actual scalar values are typically\nthose returned from :meth:`.topDownCompute`.\n\nThis method returns one closeness score for each value in expValues (or\nactValues which must be the same length). The closeness score ranges from\n0 to 1.0, 1.0 being a perfect match and 0 being the worst possible match.\n\nIf this encoder is a simple, single field encoder, then it will expect\njust 1 item in each of the ``expValues`` and ``actValues`` arrays.\nMulti-encoders will expect 1 item per sub-encoder.\n\nEach encoder type can define it's own metric for closeness. For example,\na category encoder may return either 1 or 0, if the scalar matches exactly\nor not. A scalar encoder might return a percentage match, etc.\n\n:param expValues: Array of expected scalar values, typically obtained from\n                 :meth:`.getScalars`\n:param actValues: Array of actual values, typically obtained from\n                 :meth:`.topDownCompute`\n\n:return: Array of closeness scores, one per item in expValues (or\n         actValues).", "id": "f17542:c0:m24"}
{"signature": "def _isSequence(obj):", "body": "mType = type(obj)<EOL>return mType is list or mType is tuple<EOL>", "docstring": "Helper function to determine if a function is a list or sequence.", "id": "f17542:m0"}
{"signature": "def getBucketValues(self):", "body": "raise Exception(\"<STR_LIT>\")<EOL>", "docstring": "**Must be overridden by subclasses.**\n\nReturns a list of items, one for each bucket defined by this encoder.\nEach item is the value assigned to that bucket, this is the same as the\n:attr:`.EncoderResult.value` that would be returned by\n:meth:`.getBucketInfo` for that bucket and is in the same format as the\ninput that would be passed to :meth:`.encode`.\n\nThis call is faster than calling :meth:`.getBucketInfo` on each bucket\nindividually if all you need are the bucket values.\n\n:return: list of items, each item representing the bucket value for that\n         bucket.", "id": "f17542:c0:m21"}
{"signature": "def decode(self, encoded, parentFieldName='<STR_LIT>'):", "body": "fieldsDict = dict()<EOL>fieldsOrder = []<EOL>if parentFieldName == '<STR_LIT>':<EOL><INDENT>parentName = self.name<EOL><DEDENT>else:<EOL><INDENT>parentName = \"<STR_LIT>\" % (parentFieldName, self.name)<EOL><DEDENT>if self.encoders is not None:<EOL><INDENT>for i in range(len(self.encoders)):<EOL><INDENT>(name, encoder, offset) = self.encoders[i]<EOL>if i < len(self.encoders)-<NUM_LIT:1>:<EOL><INDENT>nextOffset = self.encoders[i+<NUM_LIT:1>][<NUM_LIT:2>]<EOL><DEDENT>else:<EOL><INDENT>nextOffset = self.width<EOL><DEDENT>fieldOutput = encoded[offset:nextOffset]<EOL>(subFieldsDict, subFieldsOrder) = encoder.decode(fieldOutput,<EOL>parentFieldName=parentName)<EOL>fieldsDict.update(subFieldsDict)<EOL>fieldsOrder.extend(subFieldsOrder)<EOL><DEDENT><DEDENT>return (fieldsDict, fieldsOrder)<EOL>", "docstring": "Takes an encoded output and does its best to work backwards and generate\nthe input that would have generated it.\n\nIn cases where the encoded output contains more ON bits than an input\nwould have generated, this routine will return one or more ranges of inputs\nwhich, if their encoded outputs were ORed together, would produce the\ntarget output. This behavior makes this method suitable for doing things\nlike generating a description of a learned coincidence in the SP, which\nin many cases might be a union of one or more inputs.\n\nIf instead, you want to figure the *most likely* single input scalar value\nthat would have generated a specific encoded output, use the\n:meth:`.topDownCompute` method.\n\nIf you want to pretty print the return value from this method, use the\n:meth:`.decodedToStr` method.\n\n:param encoded:      The encoded output that you want decode\n:param parentFieldName: The name of the encoder which is our parent. This name\n       is prefixed to each of the field names within this encoder to form the\n       keys of the dict() in the retval.\n\n:return: tuple(``fieldsDict``, ``fieldOrder``)\n\n          ``fieldsDict`` is a dict() where the keys represent field names\n          (only 1 if this is a simple encoder, > 1 if this is a multi\n          or date encoder) and the values are the result of decoding each\n          field. If there are  no bits in encoded that would have been\n          generated by a field, it won't be present in the dict. The\n          key of each entry in the dict is formed by joining the passed in\n          parentFieldName with the child encoder name using a '.'.\n\n          Each 'value' in ``fieldsDict`` consists of (ranges, desc), where\n          ranges is a list of one or more (minVal, maxVal) ranges of\n          input that would generate bits in the encoded output and 'desc'\n          is a pretty print description of the ranges. For encoders like\n          the category encoder, the 'desc' will contain the category\n          names that correspond to the scalar values included in the\n          ranges.\n\n          ``fieldOrder`` is a list of the keys from ``fieldsDict``, in the\n          same order as the fields appear in the encoded output.\n\n          TODO: when we switch to Python 2.7 or 3.x, use OrderedDict\n\nExample retvals for a scalar encoder:\n\n.. code-block:: python\n\n   {'amount':  ( [[1,3], [7,10]], '1-3, 7-10' )}\n   {'amount':  ( [[2.5,2.5]],     '2.5'       )}\n\nExample retval for a category encoder:\n\n.. code-block:: python\n\n   {'country': ( [[1,1], [5,6]], 'US, GB, ES' )}\n\nExample retval for a multi encoder:\n\n.. code-block:: python\n\n   {'amount':  ( [[2.5,2.5]],     '2.5'       ),\n    'country': ( [[1,1], [5,6]],  'US, GB, ES' )}", "id": "f17542:c0:m19"}
{"signature": "def getWidth(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Should return the output width, in bits.\n\n        :return: (int) output width in bits", "id": "f17542:c0:m0"}
{"signature": "def encodedBitDescription(self, bitOffset, formatted=False):", "body": "<EOL>(prevFieldName, prevFieldOffset) = (None, None)<EOL>description = self.getDescription()<EOL>for i in range(len(description)):<EOL><INDENT>(name, offset) = description[i]<EOL>if formatted:<EOL><INDENT>offset = offset + i<EOL>if bitOffset == offset-<NUM_LIT:1>:<EOL><INDENT>prevFieldName = \"<STR_LIT>\"<EOL>prevFieldOffset = bitOffset<EOL>break<EOL><DEDENT><DEDENT>if bitOffset < offset:<EOL><INDENT>break<EOL><DEDENT>(prevFieldName, prevFieldOffset) = (name, offset)<EOL><DEDENT>width = self.getDisplayWidth() if formatted else self.getWidth()<EOL>if prevFieldOffset is None or bitOffset > self.getWidth():<EOL><INDENT>raise IndexError(\"<STR_LIT>\" % width)<EOL><DEDENT>return (prevFieldName, bitOffset - prevFieldOffset)<EOL>", "docstring": "Return a description of the given bit in the encoded output.\nThis will include the field name and the offset within the field.\n\n:param bitOffset:      Offset of the bit to get the description of\n:param formatted:      If True, the bitOffset is w.r.t. formatted output,\n                      which includes separators\n:return:             tuple(``fieldName``, ``offsetWithinField``)", "id": "f17542:c0:m16"}
{"signature": "def getEncoderList(self):", "body": "if hasattr(self, '<STR_LIT>') andself._flattenedEncoderList is not None:<EOL><INDENT>return self._flattenedEncoderList<EOL><DEDENT>encoders = []<EOL>if self.encoders is not None:<EOL><INDENT>for (name, encoder, offset) in self.encoders:<EOL><INDENT>subEncoders = encoder.getEncoderList()<EOL>encoders.extend(subEncoders)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>encoders.append(self)<EOL><DEDENT>self._flattenedEncoderList = encoders<EOL>return encoders<EOL>", "docstring": ":return: a reference to each sub-encoder in this encoder. They are\n         returned in the same order as they are for :meth:`.getScalarNames`\n         and :meth:`.getScalars`.", "id": "f17542:c0:m9"}
{"signature": "def setFieldStats(self, fieldName, fieldStatistics):", "body": "pass<EOL>", "docstring": "This method is called by the model to set the statistics like min and\nmax for the underlying encoders if this information is available.\n\n:param fieldName: name of the field this encoder is encoding, provided by\n      :class:`~.nupic.encoders.multi.MultiEncoder`.\n\n:param fieldStatistics: dictionary of dictionaries with the first level being\n      the fieldname and the second index the statistic ie:\n      ``fieldStatistics['pounds']['min']``", "id": "f17542:c0:m3"}
{"signature": "def _getInputValue(self, obj, fieldName):", "body": "if isinstance(obj, dict):<EOL><INDENT>if not fieldName in obj:<EOL><INDENT>knownFields = \"<STR_LIT:U+002CU+0020>\".join(<EOL>key for key in list(obj.keys()) if not key.startswith(\"<STR_LIT:_>\")<EOL>)<EOL>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>fieldName, knownFields, fieldName<EOL>)<EOL>)<EOL><DEDENT>return obj[fieldName]<EOL><DEDENT>else:<EOL><INDENT>return getattr(obj, fieldName)<EOL><DEDENT>", "docstring": "Gets the value of a given field from the input record", "id": "f17542:c0:m8"}
{"signature": "def getEncodedValues(self, inputData):", "body": "retVals = []<EOL>if self.encoders is not None:<EOL><INDENT>for name, encoders, offset in self.encoders:<EOL><INDENT>values = encoders.getEncodedValues(self._getInputValue(inputData, name))<EOL>if _isSequence(values):<EOL><INDENT>retVals.extend(values)<EOL><DEDENT>else:<EOL><INDENT>retVals.append(values)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if _isSequence(inputData):<EOL><INDENT>retVals.extend(inputData)<EOL><DEDENT>else:<EOL><INDENT>retVals.append(inputData)<EOL><DEDENT><DEDENT>return tuple(retVals)<EOL>", "docstring": "Returns the input in the same format as is returned by\n:meth:`.topDownCompute`. For most encoder types, this is the same as the\ninput data. For instance, for scalar and category types, this corresponds to\nthe numeric and string values, respectively, from the inputs. For datetime\nencoders, this returns the list of scalars for each of the sub-fields\n(timeOfDay, dayOfWeek, etc.)\n\nThis method is essentially the same as :meth:`.getScalars` except that it\nreturns strings.\n\n:param inputData: The input data in the format it is received from the data\n                  source\n\n:return: A list of values, in the same format and in the same order as they\n         are returned by :meth:`.topDownCompute`.", "id": "f17542:c0:m11"}
{"signature": "def scalarsToStr(self, scalarValues, scalarNames=None):", "body": "if scalarNames is None:<EOL><INDENT>scalarNames = self.getScalarNames()<EOL><DEDENT>desc = '<STR_LIT>'<EOL>for (name, value) in zip(scalarNames, scalarValues):<EOL><INDENT>if len(desc) > <NUM_LIT:0>:<EOL><INDENT>desc += \"<STR_LIT>\" % (name, value)<EOL><DEDENT>else:<EOL><INDENT>desc += \"<STR_LIT>\" % (name, value)<EOL><DEDENT><DEDENT>return desc<EOL>", "docstring": "Return a pretty print string representing the return values from\n:meth:`.getScalars` and :meth:`.getScalarNames`.\n\n:param scalarValues: input values to encode to string\n:param scalarNames: optional input of scalar names to convert. If None, gets\n                   scalar names from :meth:`.getScalarNames`\n:return: string representation of scalar values", "id": "f17542:c0:m13"}
{"signature": "def setLearning(self, learningEnabled):", "body": "<EOL>if hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self._learningEnabled = learningEnabled<EOL><DEDENT>", "docstring": "Set whether learning is enabled.\n\n        :param learningEnabled: (bool) whether learning should be enabled", "id": "f17542:c0:m2"}
{"signature": "def getBucketInfo(self, buckets):", "body": "<EOL>if self.encoders is None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>retVals = []<EOL>bucketOffset = <NUM_LIT:0><EOL>for i in range(len(self.encoders)):<EOL><INDENT>(name, encoder, offset) = self.encoders[i]<EOL>if encoder.encoders is not None:<EOL><INDENT>nextBucketOffset = bucketOffset + len(encoder.encoders)<EOL><DEDENT>else:<EOL><INDENT>nextBucketOffset = bucketOffset + <NUM_LIT:1><EOL><DEDENT>bucketIndices = buckets[bucketOffset:nextBucketOffset]<EOL>values = encoder.getBucketInfo(bucketIndices)<EOL>retVals.extend(values)<EOL>bucketOffset = nextBucketOffset<EOL><DEDENT>return retVals<EOL>", "docstring": "Returns a list of :class:`.EncoderResult` namedtuples describing the inputs\nfor each sub-field that correspond to the bucket indices passed in\n``buckets``. To get the associated field names for each of the values, call\n:meth:`.getScalarNames`.\n\n:param buckets: The list of bucket indices, one for each sub-field encoder.\n               These bucket indices for example may have been retrieved\n               from the :meth:`.getBucketIndices` call.\n:return: A list of :class:`.EncoderResult`.", "id": "f17542:c0:m22"}
{"signature": "def getDecoderOutputFieldTypes(self):", "body": "if hasattr(self, '<STR_LIT>') andself._flattenedFieldTypeList is not None:<EOL><INDENT>return self._flattenedFieldTypeList<EOL><DEDENT>fieldTypes = []<EOL>for (name, encoder, offset) in self.encoders:<EOL><INDENT>subTypes = encoder.getDecoderOutputFieldTypes()<EOL>fieldTypes.extend(subTypes)<EOL><DEDENT>self._flattenedFieldTypeList = fieldTypes<EOL>return fieldTypes<EOL>", "docstring": "Returns a sequence of field types corresponding to the elements in the\ndecoded output field array.  The types are defined by\n:class:`~nupic.data.field_meta.FieldMetaType`.\n\n:return: list of :class:`~nupic.data.field_meta.FieldMetaType` objects", "id": "f17542:c0:m6"}
{"signature": "def _getScaledValue(self, inpt):", "body": "if inpt == SENTINEL_VALUE_FOR_MISSING_DATA:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>val = inpt<EOL>if val < self.minval:<EOL><INDENT>val = self.minval<EOL><DEDENT>elif val > self.maxval:<EOL><INDENT>val = self.maxval<EOL><DEDENT>scaledVal = math.log10(val)<EOL>return scaledVal<EOL><DEDENT>", "docstring": "Convert the input, which is in normal space, into log space", "id": "f17543:c0:m4"}
{"signature": "def getBucketIndices(self, inpt):", "body": "<EOL>scaledVal = self._getScaledValue(inpt)<EOL>if scaledVal is None:<EOL><INDENT>return [None]<EOL><DEDENT>else:<EOL><INDENT>return self.encoder.getBucketIndices(scaledVal)<EOL><DEDENT>", "docstring": "See the function description in base.py", "id": "f17543:c0:m5"}
{"signature": "def topDownCompute(self, encoded):", "body": "scaledResult = self.encoder.topDownCompute(encoded)[<NUM_LIT:0>]<EOL>scaledValue = scaledResult.value<EOL>value = math.pow(<NUM_LIT:10>, scaledValue)<EOL>return EncoderResult(value=value, scalar=value,<EOL>encoding = scaledResult.encoding)<EOL>", "docstring": "See the function description in base.py", "id": "f17543:c0:m10"}
{"signature": "def addEncoder(self, name, encoder):", "body": "self.encoders.append((name, encoder, self.width))<EOL>for d in encoder.getDescription():<EOL><INDENT>self.description.append((d[<NUM_LIT:0>], d[<NUM_LIT:1>] + self.width))<EOL><DEDENT>self.width += encoder.getWidth()<EOL>", "docstring": "Adds one encoder.\n\n:param name: (string) name of encoder, should be unique\n:param encoder: (:class:`.Encoder`) the encoder to add", "id": "f17544:c0:m2"}
{"signature": "def encodeIntoArray(self, inputVal, outputVal):", "body": "if len(inputVal) != len(outputVal):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (<EOL>len(inputVal), len(outputVal)))<EOL><DEDENT>if self.w is not None and sum(inputVal) != self.w:<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (<EOL>sum(inputVal), self.w))<EOL><DEDENT>outputVal[:] = inputVal[:]<EOL>if self.verbosity >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\", inputVal, \"<STR_LIT>\", outputVal)<EOL>print(\"<STR_LIT>\", self.decodedToStr(self.decode(outputVal)))<EOL><DEDENT>", "docstring": "See method description in base.py", "id": "f17546:c0:m6"}
{"signature": "def closenessScores(self, expValues, actValues, **kwargs):", "body": "ratio = <NUM_LIT:1.0><EOL>esum = int(expValues.sum())<EOL>asum = int(actValues.sum())<EOL>if asum > esum:<EOL><INDENT>diff = asum - esum<EOL>if diff < esum:<EOL><INDENT>ratio = <NUM_LIT:1> - diff/float(esum)<EOL><DEDENT>else:<EOL><INDENT>ratio = <NUM_LIT:1>/float(diff)<EOL><DEDENT><DEDENT>olap = expValues & actValues<EOL>osum = int(olap.sum())<EOL>if esum == <NUM_LIT:0>:<EOL><INDENT>r = <NUM_LIT:0.0><EOL><DEDENT>else:<EOL><INDENT>r = osum/float(esum)<EOL><DEDENT>r = r * ratio<EOL>return numpy.array([r])<EOL>", "docstring": "Does a bitwise compare of the two bitmaps and returns a fractonal\nvalue between 0 and 1 of how similar they are.\n\n- ``1`` => identical\n- ``0`` => no overlaping bits\n\n``kwargs`` will have the keyword \"fractional\", which is assumed by this\nencoder.", "id": "f17546:c0:m10"}
{"signature": "def getScalars(self, input):", "body": "return numpy.array([<NUM_LIT:0>])<EOL>", "docstring": "See method description in base.py", "id": "f17546:c0:m4"}
{"signature": "def getBucketIndices(self, input):", "body": "return [<NUM_LIT:0>]<EOL>", "docstring": "See method description in base.py", "id": "f17546:c0:m5"}
{"signature": "def radiusForSpeed(self, speed):", "body": "overlap = <NUM_LIT><EOL>coordinatesPerTimestep = speed * self.timestep / self.scale<EOL>radius = int(round(float(coordinatesPerTimestep) / <NUM_LIT:2> * overlap))<EOL>minRadius = int(math.ceil((math.sqrt(self.w) - <NUM_LIT:1>) / <NUM_LIT:2>))<EOL>return max(radius, minRadius)<EOL>", "docstring": "Returns radius for given speed.\n\nTries to get the encodings of consecutive readings to be\nadjacent with some overlap.\n\n:param: speed (float) Speed (in meters per second)\n:returns: (int) Radius for given speed", "id": "f17547:c0:m5"}
{"signature": "def getScalars(self, inputData):", "body": "return numpy.array([<NUM_LIT:0>] * len(self.getDescription()))<EOL>", "docstring": "See `nupic.encoders.base.Encoder` for more information.", "id": "f17547:c0:m2"}
{"signature": "def encodeIntoArray(self, inputData, output):", "body": "altitude = None<EOL>if len(inputData) == <NUM_LIT:4>:<EOL><INDENT>(speed, longitude, latitude, altitude) = inputData<EOL><DEDENT>else:<EOL><INDENT>(speed, longitude, latitude) = inputData<EOL><DEDENT>coordinate = self.coordinateForPosition(longitude, latitude, altitude)<EOL>radius = self.radiusForSpeed(speed)<EOL>super(GeospatialCoordinateEncoder, self).encodeIntoArray(<EOL>(coordinate, radius), output)<EOL>", "docstring": "See `nupic.encoders.base.Encoder` for more information.\n\n:param: inputData (tuple) Contains speed (float), longitude (float),\n                         latitude (float), altitude (float)\n:param: output (numpy.array) Stores encoded SDR in this numpy array", "id": "f17547:c0:m3"}
{"signature": "def _getTopDownMapping(self):", "body": "<EOL>if self._topDownMappingM is None:<EOL><INDENT>if self.periodic:<EOL><INDENT>self._topDownValues = numpy.arange(self.minval + self.resolution / <NUM_LIT>,<EOL>self.maxval,<EOL>self.resolution)<EOL><DEDENT>else:<EOL><INDENT>self._topDownValues = numpy.arange(self.minval,<EOL>self.maxval + self.resolution / <NUM_LIT>,<EOL>self.resolution)<EOL><DEDENT>numCategories = len(self._topDownValues)<EOL>self._topDownMappingM = SM32(numCategories, self.n)<EOL>outputSpace = numpy.zeros(self.n, dtype=GetNTAReal())<EOL>for i in range(numCategories):<EOL><INDENT>value = self._topDownValues[i]<EOL>value = max(value, self.minval)<EOL>value = min(value, self.maxval)<EOL>self.encodeIntoArray(value, outputSpace, learn=False)<EOL>self._topDownMappingM.setRowFromDense(i, outputSpace)<EOL><DEDENT><DEDENT>return self._topDownMappingM<EOL>", "docstring": "Return the interal _topDownMappingM matrix used for handling the\n        bucketInfo() and topDownCompute() methods. This is a matrix, one row per\n        category (bucket) where each row contains the encoded output for that\n        category.", "id": "f17548:c0:m12"}
{"signature": "def topDownCompute(self, encoded):", "body": "<EOL>topDownMappingM = self._getTopDownMapping()<EOL>category = topDownMappingM.rightVecProd(encoded).argmax()<EOL>return self.getBucketInfo([category])<EOL>", "docstring": "See the function description in base.py", "id": "f17548:c0:m15"}
{"signature": "def _checkReasonableSettings(self):", "body": "if self.w < <NUM_LIT>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % self.w)<EOL><DEDENT>", "docstring": "(helper function) check if the settings are reasonable for SP to work", "id": "f17548:c0:m2"}
{"signature": "def getBucketValues(self):", "body": "<EOL>if self._bucketValues is None:<EOL><INDENT>topDownMappingM = self._getTopDownMapping()<EOL>numBuckets = topDownMappingM.nRows()<EOL>self._bucketValues = []<EOL>for bucketIdx in range(numBuckets):<EOL><INDENT>self._bucketValues.append(self.getBucketInfo([bucketIdx])[<NUM_LIT:0>].value)<EOL><DEDENT><DEDENT>return self._bucketValues<EOL>", "docstring": "See the function description in base.py", "id": "f17548:c0:m13"}
{"signature": "def getBucketInfo(self, buckets):", "body": "<EOL>topDownMappingM = self._getTopDownMapping()<EOL>category = buckets[<NUM_LIT:0>]<EOL>encoding = self._topDownMappingM.getRow(category)<EOL>if self.periodic:<EOL><INDENT>inputVal = (self.minval + (self.resolution / <NUM_LIT>) +<EOL>(category * self.resolution))<EOL><DEDENT>else:<EOL><INDENT>inputVal = self.minval + (category * self.resolution)<EOL><DEDENT>return [EncoderResult(value=inputVal, scalar=inputVal, encoding=encoding)]<EOL>", "docstring": "See the function description in base.py", "id": "f17548:c0:m14"}
{"signature": "def closenessScores(self, expValues, actValues, fractional=True):", "body": "expValue = expValues[<NUM_LIT:0>]<EOL>actValue = actValues[<NUM_LIT:0>]<EOL>if self.periodic:<EOL><INDENT>expValue = expValue % self.maxval<EOL>actValue = actValue % self.maxval<EOL><DEDENT>err = abs(expValue - actValue)<EOL>if self.periodic:<EOL><INDENT>err = min(err, self.maxval - err)<EOL><DEDENT>if fractional:<EOL><INDENT>pctErr = float(err) / (self.maxval - self.minval)<EOL>pctErr = min(<NUM_LIT:1.0>, pctErr)<EOL>closeness = <NUM_LIT:1.0> - pctErr<EOL><DEDENT>else:<EOL><INDENT>closeness = err<EOL><DEDENT>return numpy.array([closeness])<EOL>", "docstring": "See the function description in base.py", "id": "f17548:c0:m16"}
{"signature": "def _initEncoder(self, w, minval, maxval, n, radius, resolution):", "body": "if n != <NUM_LIT:0>:<EOL><INDENT>if (radius !=<NUM_LIT:0> or resolution != <NUM_LIT:0>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>assert n > w<EOL>self.n = n<EOL>if (minval is not None and maxval is not None):<EOL><INDENT>if not self.periodic:<EOL><INDENT>self.resolution = float(self.rangeInternal) / (self.n - self.w)<EOL><DEDENT>else:<EOL><INDENT>self.resolution = float(self.rangeInternal) / (self.n)<EOL><DEDENT>self.radius = self.w * self.resolution<EOL>if self.periodic:<EOL><INDENT>self.range = self.rangeInternal<EOL><DEDENT>else:<EOL><INDENT>self.range = self.rangeInternal + self.resolution<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if radius != <NUM_LIT:0>:<EOL><INDENT>if (resolution != <NUM_LIT:0>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>self.radius = radius<EOL>self.resolution = float(self.radius) / w<EOL><DEDENT>elif resolution != <NUM_LIT:0>:<EOL><INDENT>self.resolution = float(resolution)<EOL>self.radius = self.resolution * self.w<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>if (minval is not None and maxval is not None):<EOL><INDENT>if self.periodic:<EOL><INDENT>self.range = self.rangeInternal<EOL><DEDENT>else:<EOL><INDENT>self.range = self.rangeInternal + self.resolution<EOL><DEDENT>nfloat = self.w * (self.range / self.radius) + <NUM_LIT:2> * self.padding<EOL>self.n = int(math.ceil(nfloat))<EOL><DEDENT><DEDENT>", "docstring": "(helper function)  There are three different ways of thinking about the representation.\n         Handle each case here.", "id": "f17548:c0:m1"}
{"signature": "def topDownCompute(self, encoded):", "body": "if self.ncategories==<NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>topDownMappingM = self._getTopDownMapping()<EOL>categoryIndex = topDownMappingM.rightVecProd(encoded).argmax()<EOL>category = self.categories[categoryIndex]<EOL>encoding = topDownMappingM.getRow(categoryIndex)<EOL>return EncoderResult(value=category, scalar=categoryIndex, encoding=encoding)<EOL>", "docstring": "See the function description in base.py", "id": "f17551:c0:m16"}
{"signature": "def closenessScores(self, expValues, actValues, fractional=True):", "body": "expValue = expValues[<NUM_LIT:0>]<EOL>actValue = actValues[<NUM_LIT:0>]<EOL>if expValue == actValue:<EOL><INDENT>closeness = <NUM_LIT:1.0><EOL><DEDENT>else:<EOL><INDENT>closeness = <NUM_LIT:0.0><EOL><DEDENT>if not fractional:<EOL><INDENT>closeness = <NUM_LIT:1.0> - closeness<EOL><DEDENT>return numpy.array([closeness])<EOL>", "docstring": "See the function description in base.py\n\n        kwargs will have the keyword \"fractional\", which is ignored by this encoder", "id": "f17551:c0:m17"}
{"signature": "def getScalars(self, input):", "body": "if input == SENTINEL_VALUE_FOR_MISSING_DATA:<EOL><INDENT>return numpy.array([<NUM_LIT:0>])<EOL><DEDENT>index = self.categoryToIndex.get(input, None)<EOL>if index is None:<EOL><INDENT>if self._learningEnabled:<EOL><INDENT>self._addCategory(input)<EOL>index = self.ncategories - <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>index = <NUM_LIT:0><EOL><DEDENT><DEDENT>return numpy.array([index])<EOL>", "docstring": "See method description in base.py", "id": "f17551:c0:m9"}
{"signature": "def _initializeBucketMap(self, maxBuckets, offset):", "body": "<EOL>self._maxBuckets = maxBuckets<EOL>self.minIndex = self._maxBuckets / <NUM_LIT:2><EOL>self.maxIndex = self._maxBuckets / <NUM_LIT:2><EOL>self._offset = offset<EOL>self.bucketMap = {}<EOL>def _permutation(n):<EOL><INDENT>r = numpy.arange(n, dtype=numpy.uint32)<EOL>self.random.shuffle(r)<EOL>return r<EOL><DEDENT>self.bucketMap[self.minIndex] = _permutation(self.n)[<NUM_LIT:0>:self.w]<EOL>self.numTries = <NUM_LIT:0><EOL>", "docstring": "Initialize the bucket map assuming the given number of maxBuckets.", "id": "f17552:c0:m15"}
{"signature": "def _newRepresentationOK(self, newRep, newIndex):", "body": "if newRep.size != self.w:<EOL><INDENT>return False<EOL><DEDENT>if (newIndex < self.minIndex-<NUM_LIT:1>) or (newIndex > self.maxIndex+<NUM_LIT:1>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>newRepBinary = numpy.array([False]*self.n)<EOL>newRepBinary[newRep] = True<EOL>midIdx = self._maxBuckets/<NUM_LIT:2><EOL>runningOverlap = self._countOverlap(self.bucketMap[self.minIndex], newRep)<EOL>if not self._overlapOK(self.minIndex, newIndex, overlap=runningOverlap):<EOL><INDENT>return False<EOL><DEDENT>for i in range(self.minIndex+<NUM_LIT:1>, midIdx+<NUM_LIT:1>):<EOL><INDENT>newBit = (i-<NUM_LIT:1>)%self.w<EOL>if newRepBinary[self.bucketMap[i-<NUM_LIT:1>][newBit]]:<EOL><INDENT>runningOverlap -= <NUM_LIT:1><EOL><DEDENT>if newRepBinary[self.bucketMap[i][newBit]]:<EOL><INDENT>runningOverlap += <NUM_LIT:1><EOL><DEDENT>if not self._overlapOK(i, newIndex, overlap=runningOverlap):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>for i in range(midIdx+<NUM_LIT:1>, self.maxIndex+<NUM_LIT:1>):<EOL><INDENT>newBit = i%self.w<EOL>if newRepBinary[self.bucketMap[i-<NUM_LIT:1>][newBit]]:<EOL><INDENT>runningOverlap -= <NUM_LIT:1><EOL><DEDENT>if newRepBinary[self.bucketMap[i][newBit]]:<EOL><INDENT>runningOverlap += <NUM_LIT:1><EOL><DEDENT>if not self._overlapOK(i, newIndex, overlap=runningOverlap):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>", "docstring": "Return True if this new candidate representation satisfies all our overlap\nrules. Since we know that neighboring representations differ by at most\none bit, we compute running overlaps.", "id": "f17552:c0:m11"}
{"signature": "def getBucketIndices(self, x):", "body": "if ((isinstance(x, float) and math.isnan(x)) or<EOL>x == SENTINEL_VALUE_FOR_MISSING_DATA):<EOL><INDENT>return [None]<EOL><DEDENT>if self._offset is None:<EOL><INDENT>self._offset = x<EOL><DEDENT>bucketIdx = (<EOL>(self._maxBuckets/<NUM_LIT:2>) + int(round((x - self._offset) / self.resolution))<EOL>)<EOL>if bucketIdx < <NUM_LIT:0>:<EOL><INDENT>bucketIdx = <NUM_LIT:0><EOL><DEDENT>elif bucketIdx >= self._maxBuckets:<EOL><INDENT>bucketIdx = self._maxBuckets-<NUM_LIT:1><EOL><DEDENT>return [bucketIdx]<EOL>", "docstring": "See method description in base.py", "id": "f17552:c0:m6"}
{"signature": "def _countOverlapIndices(self, i, j):", "body": "if i in self.bucketMap and j in self.bucketMap:<EOL><INDENT>iRep = self.bucketMap[i]<EOL>jRep = self.bucketMap[j]<EOL>return self._countOverlap(iRep, jRep)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Return the overlap between bucket indices i and j", "id": "f17552:c0:m12"}
{"signature": "def getWidth(self):", "body": "return self.n<EOL>", "docstring": "See method description in base.py", "id": "f17552:c0:m4"}
{"signature": "def mapBucketIndexToNonZeroBits(self, index):", "body": "if index < <NUM_LIT:0>:<EOL><INDENT>index = <NUM_LIT:0><EOL><DEDENT>if index >= self._maxBuckets:<EOL><INDENT>index = self._maxBuckets-<NUM_LIT:1><EOL><DEDENT>if index not in self.bucketMap:<EOL><INDENT>if self.verbosity >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\", index)<EOL><DEDENT>self._createBucket(index)<EOL><DEDENT>return self.bucketMap[index]<EOL>", "docstring": "Given a bucket index, return the list of non-zero bits. If the bucket\nindex does not exist, it is created. If the index falls outside our range\nwe clip it.\n\n:param index The bucket index to get non-zero bits for.\n@returns numpy array of indices of non-zero bits for specified index.", "id": "f17552:c0:m7"}
{"signature": "def _seed(self, seed=-<NUM_LIT:1>):", "body": "if seed != -<NUM_LIT:1>:<EOL><INDENT>self.random = NupicRandom(seed)<EOL><DEDENT>else:<EOL><INDENT>self.random = NupicRandom()<EOL><DEDENT>", "docstring": "Initialize the random seed", "id": "f17552:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def _countOverlap(rep1, rep2):<DEDENT>", "body": "overlap = <NUM_LIT:0><EOL>for e in rep1:<EOL><INDENT>if e in rep2:<EOL><INDENT>overlap += <NUM_LIT:1><EOL><DEDENT><DEDENT>return overlap<EOL>", "docstring": "Return the overlap between two representations. rep1 and rep2 are lists of\nnon-zero indices.", "id": "f17552:c0:m13"}
{"signature": "def _newRepresentation(self, index, newIndex):", "body": "newRepresentation = self.bucketMap[index].copy()<EOL>ri = newIndex % self.w<EOL>newBit = self.random.getUInt32(self.n)<EOL>newRepresentation[ri] = newBit<EOL>while newBit in self.bucketMap[index] ornot self._newRepresentationOK(newRepresentation, newIndex):<EOL><INDENT>self.numTries += <NUM_LIT:1><EOL>newBit = self.random.getUInt32(self.n)<EOL>newRepresentation[ri] = newBit<EOL><DEDENT>return newRepresentation<EOL>", "docstring": "Return a new representation for newIndex that overlaps with the\nrepresentation at index by exactly w-1 bits", "id": "f17552:c0:m10"}
{"signature": "def getBucketInfo(self, buckets):", "body": "if self.minval is None or self.maxval is None:<EOL><INDENT>return [EncoderResult(value=<NUM_LIT:0>, scalar=<NUM_LIT:0>,<EOL>encoding=numpy.zeros(self.n))]<EOL><DEDENT>return super(AdaptiveScalarEncoder, self).getBucketInfo(buckets)<EOL>", "docstring": "[overrides nupic.encoders.scalar.ScalarEncoder.getBucketInfo]", "id": "f17553:c0:m6"}
{"signature": "def topDownCompute(self, encoded):", "body": "if self.minval is None or self.maxval is None:<EOL><INDENT>return [EncoderResult(value=<NUM_LIT:0>, scalar=<NUM_LIT:0>,<EOL>encoding=numpy.zeros(self.n))]<EOL><DEDENT>return super(AdaptiveScalarEncoder, self).topDownCompute(encoded)<EOL>", "docstring": "[overrides nupic.encoders.scalar.ScalarEncoder.topDownCompute]", "id": "f17553:c0:m7"}
{"signature": "def _setMinAndMax(self, input, learn):", "body": "self.slidingWindow.next(input)<EOL>if self.minval is None and self.maxval is None:<EOL><INDENT>self.minval = input<EOL>self.maxval = input+<NUM_LIT:1>   <EOL>self._setEncoderParams()<EOL><DEDENT>elif learn:<EOL><INDENT>sorted = self.slidingWindow.getSlidingWindow()<EOL>sorted.sort()<EOL>minOverWindow = sorted[<NUM_LIT:0>]<EOL>maxOverWindow = sorted[len(sorted)-<NUM_LIT:1>]<EOL>if minOverWindow < self.minval:<EOL><INDENT>if self.verbosity >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\"% (self.name, input, self.minval, minOverWindow))<EOL><DEDENT>self.minval = minOverWindow       <EOL>self._setEncoderParams()<EOL><DEDENT>if maxOverWindow > self.maxval:<EOL><INDENT>if self.verbosity >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\"% (self.name, input, self.maxval, maxOverWindow))<EOL><DEDENT>self.maxval = maxOverWindow       <EOL>self._setEncoderParams()<EOL><DEDENT><DEDENT>", "docstring": "Potentially change the minval and maxval using input.\n**The learn flag is currently not supported by cla regions.**", "id": "f17553:c0:m3"}
{"signature": "def _setEncoderParams(self):", "body": "self.rangeInternal = float(self.maxval - self.minval)<EOL>self.resolution = float(self.rangeInternal) / (self.n - self.w)<EOL>self.radius = self.w * self.resolution<EOL>self.range = self.rangeInternal + self.resolution<EOL>self.nInternal = self.n - <NUM_LIT:2> * self.padding<EOL>self._bucketValues = None<EOL>", "docstring": "Set the radius, resolution and range. These values are updated when minval\nand/or maxval change.", "id": "f17553:c0:m1"}
{"signature": "def _trackInstanceAndCheckForConcurrencyViolation(self):", "body": "global g_max_concurrency, g_max_concurrency_raise_exception<EOL>assert g_max_concurrency is not None<EOL>assert self not in self._clsOutstandingInstances, repr(self)<EOL>self._creationTracebackString = traceback.format_stack()<EOL>if self._clsNumOutstanding >= g_max_concurrency:<EOL><INDENT>errorMsg = (\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\") % (<EOL>self._clsNumOutstanding, g_max_concurrency, self,<EOL>len(self._clsOutstandingInstances), self._clsOutstandingInstances,)<EOL>self._logger.error(errorMsg)<EOL>if g_max_concurrency_raise_exception:<EOL><INDENT>raise ConcurrencyExceededError(errorMsg)<EOL><DEDENT><DEDENT>self._clsOutstandingInstances.add(self)<EOL>self._addedToInstanceSet = True<EOL>return<EOL>", "docstring": "Check for concurrency violation and add self to\n        _clsOutstandingInstances.\n\n        ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is\n        incremented", "id": "f17554:c2:m5"}
{"signature": "def acquireConnection(self):", "body": "raise NotImplementedError()<EOL>", "docstring": "Get a Connection instance.\n\n        Parameters:\n        ----------------------------------------------------------------\n        retval:       A ConnectionWrapper instance.\n                        Caller is responsible for calling the  ConnectionWrapper\n                        instance's release() method to release resources.", "id": "f17554:c3:m1"}
{"signature": "def _getLogger(cls, logLevel=None):", "body": "logger = logging.getLogger(<EOL>\"<STR_LIT:.>\".join(['<STR_LIT>', _MODULE_NAME, cls.__name__]))<EOL>if logLevel is not None:<EOL><INDENT>logger.setLevel(logLevel)<EOL><DEDENT>return logger<EOL>", "docstring": "Gets a logger for the given class in this module", "id": "f17554:m3"}
{"signature": "def release(self):", "body": "self._logger.debug(\"<STR_LIT>\", self)<EOL>if self._addedToInstanceSet:<EOL><INDENT>try:<EOL><INDENT>self._clsOutstandingInstances.remove(self)<EOL><DEDENT>except:<EOL><INDENT>self._logger.exception(<EOL>\"<STR_LIT>\", self)<EOL>raise<EOL><DEDENT><DEDENT>self._releaser(dbConn=self.dbConn, cursor=self.cursor)<EOL>self.__class__._clsNumOutstanding -= <NUM_LIT:1><EOL>assert self._clsNumOutstanding >= <NUM_LIT:0>,\"<STR_LIT>\" % (self._clsNumOutstanding,)<EOL>self._releaser = None<EOL>self.cursor = None<EOL>self.dbConn = None<EOL>self._creationTracebackString = None<EOL>self._addedToInstanceSet = False<EOL>self._logger = None<EOL>return<EOL>", "docstring": "Release the database connection and cursor\n\n        The receiver of the Connection instance MUST call this method in order\n        to reclaim resources", "id": "f17554:c2:m4"}
{"signature": "def _releaseConnection(self, dbConn, cursor):", "body": "self._logger.debug(\"<STR_LIT>\")<EOL>cursor.close()<EOL>return<EOL>", "docstring": "Release database connection and cursor; passed as a callback to\n        ConnectionWrapper", "id": "f17554:c4:m3"}
{"signature": "def _releaseConnection(self, dbConn, cursor):", "body": "self._logger.debug(\"<STR_LIT>\")<EOL>cursor.close()<EOL>dbConn.close()<EOL>return<EOL>", "docstring": "Release database connection and cursor; passed as a callback to\n        ConnectionWrapper", "id": "f17554:c6:m3"}
{"signature": "@classmethod<EOL><INDENT>def get(cls):<DEDENT>", "body": "if cls._connectionPolicy is None:<EOL><INDENT>logger = _getLogger(cls)<EOL>logger.info(\"<STR_LIT>\",<EOL>cls._connectionPolicyInstanceProvider)<EOL>cls._connectionPolicy = cls._connectionPolicyInstanceProvider()<EOL>logger.debug(\"<STR_LIT>\", cls._connectionPolicy)<EOL><DEDENT>return cls._connectionPolicy.acquireConnection()<EOL>", "docstring": "Acquire a ConnectionWrapper instance that represents a connection\n        to the SQL server per nupic.cluster.database.* configuration settings.\n\n        NOTE: caller is responsible for calling the ConnectionWrapper instance's\n        release() method after using the connection in order to release resources.\n        Better yet, use the returned ConnectionWrapper instance in a Context Manager\n        statement for automatic invocation of release():\n        Example:\n            # If using Jython 2.5.x, first import with_statement at the very top of\n            your script (don't need this import for Jython/Python 2.6.x and later):\n            from __future__ import with_statement\n            # Then:\n            from nupic.database.Connection import ConnectionFactory\n            # Then use it like this\n            with ConnectionFactory.get() as conn:\n              conn.cursor.execute(\"SELECT ...\")\n              conn.cursor.fetchall()\n              conn.cursor.execute(\"INSERT ...\")\n\n        WARNING: DO NOT close the underlying connection or cursor as it may be\n        shared by other modules in your process.  ConnectionWrapper's release()\n        method will do the right thing.\n\n        Parameters:\n        ----------------------------------------------------------------\n        retval:       A ConnectionWrapper instance. NOTE: Caller is responsible\n                        for releasing resources as described above.", "id": "f17554:c1:m0"}
{"signature": "def __init__(self):", "body": "self._logger = _getLogger(self.__class__)<EOL>self._logger.debug(\"<STR_LIT>\")<EOL>self._pool = PooledDB(**_getCommonSteadyDBArgsDict())<EOL>self._logger.info(\"<STR_LIT>\", self.__class__.__name__)<EOL>return<EOL>", "docstring": "Consruct an instance. The instance's open() method must be\n        called to make it ready for acquireConnection() calls.", "id": "f17554:c5:m0"}
{"signature": "@classmethod<EOL><INDENT>def _createDefaultPolicy(cls):<DEDENT>", "body": "logger = _getLogger(cls)<EOL>logger.debug(<EOL>\"<STR_LIT>\",<EOL>platform.system(), pymysql.VERSION)<EOL>if platform.system() == \"<STR_LIT>\":<EOL><INDENT>policy = SingleSharedConnectionPolicy()<EOL><DEDENT>else:<EOL><INDENT>policy = PooledConnectionPolicy()<EOL><DEDENT>return policy<EOL>", "docstring": "[private] Create the default database connection policy instance\n\n        Parameters:\n        ----------------------------------------------------------------\n        retval:            The default database connection policy instance", "id": "f17554:c1:m3"}
{"signature": "def acquireConnection(self):", "body": "self._logger.debug(\"<STR_LIT>\")<EOL>self._conn._ping_check()<EOL>connWrap = ConnectionWrapper(dbConn=self._conn,<EOL>cursor=self._conn.cursor(),<EOL>releaser=self._releaseConnection,<EOL>logger=self._logger)<EOL>return connWrap<EOL>", "docstring": "Get a Connection instance.\n\n        Parameters:\n        ----------------------------------------------------------------\n        retval:       A ConnectionWrapper instance. NOTE: Caller\n                        is responsible for calling the  ConnectionWrapper\n                        instance's release() method or use it in a context manager\n                        expression (with ... as:) to release resources.", "id": "f17554:c4:m2"}
{"signature": "def close(self):", "body": "self._logger.info(\"<STR_LIT>\")<EOL>if self._pool is not None:<EOL><INDENT>self._pool.close()<EOL>self._pool = None<EOL><DEDENT>else:<EOL><INDENT>self._logger.warning(<EOL>\"<STR_LIT>\")<EOL><DEDENT>return<EOL>", "docstring": "Close the policy instance and its database connection pool.", "id": "f17554:c5:m1"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def jobIncrementIntField(self, jobID, fieldName, increment=<NUM_LIT:1>,<EOL>useConnectionID=False):<DEDENT>", "body": "<EOL>dbFieldName = self._jobs.pubToDBNameDict[fieldName]<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>'% (self.jobsTableName, dbFieldName, dbFieldName)<EOL>sqlParams = [increment, jobID]<EOL>if useConnectionID:<EOL><INDENT>query += '<STR_LIT>'<EOL>sqlParams.append(self._connectionID)<EOL><DEDENT>result = conn.cursor.execute(query, sqlParams)<EOL><DEDENT>if result != <NUM_LIT:1>:<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\"\"<STR_LIT>\" % (<EOL>dbFieldName, jobID, self._connectionID, result, query))<EOL><DEDENT>", "docstring": "Incremet the value of 1 field in a job by increment. The 'fieldName' is\n        the public name of the field (camelBack, not the lower_case_only form as\n        stored in the DB).\n\n        This method is used for example by HypersearcWorkers to update the\n        engWorkerState field periodically. By qualifying on curValue, it insures\n        that only 1 worker at a time is elected to perform the next scheduled\n        periodic sweep of the models.\n\n        Parameters:\n        ----------------------------------------------------------------\n        jobID:        jobID of the job record to modify\n        fieldName:    public field name of the field\n        increment:    increment is added to the current value of the field", "id": "f17555:c1:m48"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def __init__(self):<DEDENT>", "body": "self._logger = _LOGGER<EOL>assert (ClientJobsDAO._instance is None)<EOL>self.dbName = self._getDBName()<EOL>self._jobs = self._JobsTableInfo()<EOL>self._jobs.tableName = '<STR_LIT>' % (self.dbName)<EOL>self._models = self._ModelsTableInfo()<EOL>self._models.tableName = '<STR_LIT>' % (self.dbName)<EOL>self._connectionID = None<EOL>", "docstring": "Instantiate a ClientJobsDAO instance.\n\n        Parameters:\n        ----------------------------------------------------------------", "id": "f17555:c1:m5"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def modelsClearAll(self):<DEDENT>", "body": "self._logger.info('<STR_LIT>',<EOL>self.modelsTableName)<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>' % (self.modelsTableName)<EOL>conn.cursor.execute(query)<EOL><DEDENT>", "docstring": "Delete all models from the models table\n\n        Parameters:\n        ----------------------------------------------------------------", "id": "f17555:c1:m50"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def jobSetCompleted(self, jobID, completionReason, completionMsg,<EOL>useConnectionID = True):<DEDENT>", "body": "<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.jobsTableName,)<EOL>sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,<EOL>jobID]<EOL>if useConnectionID:<EOL><INDENT>query += '<STR_LIT>'<EOL>sqlParams.append(self._connectionID)<EOL><DEDENT>result = conn.cursor.execute(query, sqlParams)<EOL>if result != <NUM_LIT:1>:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (jobID))<EOL><DEDENT><DEDENT>", "docstring": "Change the status on the given job to completed\n\n        Parameters:\n        ----------------------------------------------------------------\n        job:                 jobID of the job to mark as completed\n        completionReason:    completionReason string\n        completionMsg:       completionMsg string\n\n        useConnectionID: True if the connection id of the calling function\n        must be the same as the connection that created the job. Set\n        to False for hypersearch workers", "id": "f17555:c1:m35"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def modelSetFields(self, modelID, fields, ignoreUnchanged = False):<DEDENT>", "body": "<EOL>assignmentExpressions = '<STR_LIT:U+002C>'.join(<EOL>'<STR_LIT>' % (self._models.pubToDBNameDict[f],) for f in fields.keys())<EOL>assignmentValues = list(fields.values())<EOL>query = '<STR_LIT>''<STR_LIT>'% (self.modelsTableName, assignmentExpressions)<EOL>sqlParams = assignmentValues + [modelID]<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>numAffectedRows = conn.cursor.execute(query, sqlParams)<EOL>self._logger.debug(\"<STR_LIT>\",<EOL>numAffectedRows, query, sqlParams)<EOL><DEDENT>if numAffectedRows != <NUM_LIT:1> and not ignoreUnchanged:<EOL><INDENT>raise RuntimeError(<EOL>(\"<STR_LIT>\"<EOL>\"<STR_LIT>\") % (<EOL>fields, modelID, self._connectionID, numAffectedRows, query,<EOL>sqlParams,))<EOL><DEDENT>", "docstring": "Change the values of 1 or more fields in a model. Here, 'fields' is a\n        dict with the name/value pairs to change. The names are the public names of\n        the fields (camelBack, not the lower_case_only form as stored in the DB).\n\n        Parameters:\n        ----------------------------------------------------------------\n        jobID:     jobID of the job record\n\n        fields:    dictionary of fields to change\n\n        ignoreUnchanged: The default behavior is to throw a\n        RuntimeError if no rows are affected. This could either be\n        because:\n          1) Because there was no matching modelID\n          2) or if the data to update matched the data in the DB exactly.\n\n        Set this parameter to True if you expect case 2 and wish to\n        supress the error.", "id": "f17555:c1:m56"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def jobGetCancellingJobs(self,):<DEDENT>", "body": "with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.jobsTableName,)<EOL>conn.cursor.execute(query, [self.STATUS_COMPLETED])<EOL>rows = conn.cursor.fetchall()<EOL><DEDENT>return tuple(r[<NUM_LIT:0>] for r in rows)<EOL>", "docstring": "Look through the jobs table and get the list of running jobs whose\n        cancel field is true.\n\n        Parameters:\n        ----------------------------------------------------------------\n        retval:      A (possibly empty) sequence of running job IDs with cancel field\n                      set to true", "id": "f17555:c1:m29"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def jobCancel(self, jobID):<DEDENT>", "body": "self._logger.info('<STR_LIT>', jobID)<EOL>self.jobSetFields(jobID, {\"<STR_LIT>\" : True}, useConnectionID=False)<EOL>", "docstring": "Cancel the given job. This will update the cancel field in the\n        jobs table and will result in the job being cancelled.\n\n        Parameters:\n        ----------------------------------------------------------------\n        jobID:                 jobID of the job to mark as completed\n\n        to False for hypersearch workers", "id": "f17555:c1:m36"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def jobSetFieldIfEqual(self, jobID, fieldName, newValue, curValue):<DEDENT>", "body": "<EOL>dbFieldName = self._jobs.pubToDBNameDict[fieldName]<EOL>conditionValue = []<EOL>if isinstance(curValue, bool):<EOL><INDENT>conditionExpression = '<STR_LIT>' % (<EOL>dbFieldName, {True:'<STR_LIT>', False:'<STR_LIT>'}[curValue])<EOL><DEDENT>elif curValue is None:<EOL><INDENT>conditionExpression = '<STR_LIT>' % (dbFieldName,)<EOL><DEDENT>else:<EOL><INDENT>conditionExpression = '<STR_LIT>' % (dbFieldName,)<EOL>conditionValue.append(curValue)<EOL><DEDENT>query = '<STR_LIT>''<STR_LIT>'% (self.jobsTableName, dbFieldName, conditionExpression)<EOL>sqlParams = [newValue, jobID] + conditionValue<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>result = conn.cursor.execute(query, sqlParams)<EOL><DEDENT>return (result == <NUM_LIT:1>)<EOL>", "docstring": "Change the value of 1 field in a job to 'newValue', but only if the\n        current value matches 'curValue'. The 'fieldName' is the public name of\n        the field (camelBack, not the lower_case_only form as stored in the DB).\n\n        This method is used for example by HypersearcWorkers to update the\n        engWorkerState field periodically. By qualifying on curValue, it insures\n        that only 1 worker at a time is elected to perform the next scheduled\n        periodic sweep of the models.\n\n        Parameters:\n        ----------------------------------------------------------------\n        jobID:        jobID of the job record to modify\n        fieldName:    public field name of the field\n        newValue:     new value of the field to set\n        curValue:     current value to qualify against\n\n        retval:       True if we successfully modified the field\n                      False if curValue did not match", "id": "f17555:c1:m47"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def jobSetStatus(self, jobID, status, useConnectionID=True,):<DEDENT>", "body": "<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.jobsTableName,)<EOL>sqlParams = [status, jobID]<EOL>if useConnectionID:<EOL><INDENT>query += '<STR_LIT>'<EOL>sqlParams.append(self._connectionID)<EOL><DEDENT>result = conn.cursor.execute(query, sqlParams)<EOL>if result != <NUM_LIT:1>:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>jobID, status))<EOL><DEDENT><DEDENT>", "docstring": "Change the status on the given job\n\n        Parameters:\n        ----------------------------------------------------------------\n        job:        jobID of the job to change status\n        status:     new status string (ClientJobsDAO.STATUS_xxxxx)\n\n        useConnectionID: True if the connection id of the calling function\n        must be the same as the connection that created the job. Set\n        to False for hypersearch workers", "id": "f17555:c1:m34"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def getActiveJobCountForClientInfo(self, clientInfo):<DEDENT>", "body": "with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>' %  self.jobsTableName<EOL>conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED])<EOL>activeJobCount = conn.cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>return activeJobCount<EOL>", "docstring": "Return the number of jobs for the given clientInfo and a status that is\n        not completed.", "id": "f17555:c1:m38"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def modelInsertAndStart(self, jobID, params, paramsHash, particleHash=None):<DEDENT>", "body": "<EOL>if particleHash is None:<EOL><INDENT>particleHash = paramsHash<EOL><DEDENT>paramsHash = self._normalizeHash(paramsHash)<EOL>particleHash = self._normalizeHash(particleHash)<EOL>def findExactMatchNoRetries(conn):<EOL><INDENT>return self._getOneMatchingRowNoRetries(<EOL>self._models, conn,<EOL>{'<STR_LIT>':jobID, '<STR_LIT>':paramsHash,<EOL>'<STR_LIT>':particleHash},<EOL>['<STR_LIT>', '<STR_LIT>'])<EOL><DEDENT>@g_retrySQL<EOL>def findExactMatchWithRetries():<EOL><INDENT>with ConnectionFactory.get() as conn:<EOL><INDENT>return findExactMatchNoRetries(conn)<EOL><DEDENT><DEDENT>row = findExactMatchWithRetries()<EOL>if row is not None:<EOL><INDENT>return (row[<NUM_LIT:0>], False)<EOL><DEDENT>@g_retrySQL<EOL>def insertModelWithRetries():<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.modelsTableName,)<EOL>sqlParams = (jobID, params, self.STATUS_RUNNING, paramsHash,<EOL>particleHash, self._connectionID)<EOL>try:<EOL><INDENT>numRowsAffected = conn.cursor.execute(query, sqlParams)<EOL><DEDENT>except Exception as e:<EOL><INDENT>if \"<STR_LIT>\" not in str(e):<EOL><INDENT>raise<EOL><DEDENT>self._logger.info('<STR_LIT>'<EOL>'<STR_LIT>',<EOL>jobID, paramsHash.encode('<STR_LIT>'),<EOL>particleHash.encode('<STR_LIT>'), e)<EOL><DEDENT>else:<EOL><INDENT>if numRowsAffected == <NUM_LIT:1>:<EOL><INDENT>conn.cursor.execute('<STR_LIT>')<EOL>modelID = conn.cursor.fetchall()[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>if modelID != <NUM_LIT:0>:<EOL><INDENT>return (modelID, True)<EOL><DEDENT>else:<EOL><INDENT>self._logger.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>jobID, paramsHash, particleHash)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._logger.error(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>numRowsAffected, jobID, paramsHash, particleHash)<EOL><DEDENT><DEDENT>row = findExactMatchNoRetries(conn)<EOL>if row is not None:<EOL><INDENT>(modelID, connectionID) = row<EOL>return (modelID, connectionID == self._connectionID)<EOL><DEDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.modelsTableName,)<EOL>sqlParams = [jobID, paramsHash, particleHash]<EOL>numRowsFound = conn.cursor.execute(query, sqlParams)<EOL>assert numRowsFound == <NUM_LIT:1>, (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>') % (jobID, paramsHash, particleHash, numRowsFound)<EOL>(modelID,) = conn.cursor.fetchall()[<NUM_LIT:0>]<EOL>return (modelID, False)<EOL><DEDENT><DEDENT>return insertModelWithRetries()<EOL>", "docstring": "Insert a new unique model (based on params) into the model table in the\n        \"running\" state. This will return two things: whether or not the model was\n        actually inserted (i.e. that set of params isn't already in the table) and\n        the modelID chosen for that set of params. Even if the model was not\n        inserted by this call (it was already there) the modelID of the one already\n        inserted is returned.\n\n        Parameters:\n        ----------------------------------------------------------------\n        jobID:            jobID of the job to add models for\n        params:           params for this model\n        paramsHash        hash of the params, generated by the worker\n        particleHash      hash of the particle info (for PSO). If not provided,\n                          then paramsHash will be used.\n\n        retval:           (modelID, wasInserted)\n                          modelID: the model ID for this set of params\n                          wasInserted: True if this call ended up inserting the\n                          new model. False if this set of params was already in\n                          the model table.", "id": "f17555:c1:m51"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def modelsGetFields(self, modelIDs, fields):<DEDENT>", "body": "assert len(fields) >= <NUM_LIT:1>, '<STR_LIT>'<EOL>isSequence = isinstance(modelIDs, self._SEQUENCE_TYPES)<EOL>if isSequence:<EOL><INDENT>assert len(modelIDs) >=<NUM_LIT:1>, '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>modelIDs = [modelIDs]<EOL><DEDENT>rows = self._getMatchingRowsWithRetries(<EOL>self._models, dict(model_id=modelIDs),<EOL>['<STR_LIT>'] + [self._models.pubToDBNameDict[f] for f in fields])<EOL>if len(rows) < len(modelIDs):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % (<EOL>(set(modelIDs) - set(r[<NUM_LIT:0>] for r in rows)),))<EOL><DEDENT>if not isSequence:<EOL><INDENT>return list(rows[<NUM_LIT:0>][<NUM_LIT:1>:])<EOL><DEDENT>return [(r[<NUM_LIT:0>], list(r[<NUM_LIT:1>:])) for r in rows]<EOL>", "docstring": "Fetch the values of 1 or more fields from a sequence of model records.\n        Here, 'fields' is a list with the names of the fields to fetch. The names\n        are the public names of the fields (camelBack, not the lower_case_only form\n        as stored in the DB).\n\n        WARNING!!!: The order of the results are NOT necessarily in the same order\n        as the order of the model IDs passed in!!!\n\n\n        Parameters:\n        ----------------------------------------------------------------\n        modelIDs:      A single modelID or sequence of modelIDs\n        fields:        A list  of fields to return\n\n        Returns:  If modelIDs is a sequence:\n                    a list of tuples->(modelID, [field1, field2,...])\n                  If modelIDs is a single modelID:\n                    a list of field values->[field1, field2,...]", "id": "f17555:c1:m53"}
{"signature": "def _insertOrGetUniqueJobNoRetries(<EOL>self, conn, client, cmdLine, jobHash, clientInfo, clientKey, params,<EOL>minimumWorkers, maximumWorkers, jobType, priority, alreadyRunning):", "body": "assert len(client) <= self.CLIENT_MAX_LEN, \"<STR_LIT>\" + repr(client)<EOL>assert cmdLine, \"<STR_LIT>\" + repr(cmdLine)<EOL>assert len(jobHash) == self.HASH_MAX_LEN, \"<STR_LIT>\" % len(jobHash)<EOL>if alreadyRunning:<EOL><INDENT>initStatus = self.STATUS_TESTMODE<EOL><DEDENT>else:<EOL><INDENT>initStatus = self.STATUS_NOTSTARTED<EOL><DEDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.jobsTableName,)<EOL>sqlParams = (initStatus, client, clientInfo, clientKey, cmdLine, params,<EOL>jobHash, minimumWorkers, maximumWorkers, priority, jobType)<EOL>numRowsInserted = conn.cursor.execute(query, sqlParams)<EOL>jobID = <NUM_LIT:0><EOL>if numRowsInserted == <NUM_LIT:1>:<EOL><INDENT>conn.cursor.execute('<STR_LIT>')<EOL>jobID = conn.cursor.fetchall()[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>if jobID == <NUM_LIT:0>:<EOL><INDENT>self._logger.warn(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>jobType, client, _abbreviate(clientInfo, <NUM_LIT:32>), clientKey, jobHash,<EOL>cmdLine)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>assert numRowsInserted == <NUM_LIT:0>, repr(numRowsInserted)<EOL><DEDENT>if jobID == <NUM_LIT:0>:<EOL><INDENT>row = self._getOneMatchingRowNoRetries(<EOL>self._jobs, conn, dict(client=client, job_hash=jobHash), ['<STR_LIT>'])<EOL>assert row is not None<EOL>assert len(row) == <NUM_LIT:1>, '<STR_LIT>' + repr(len(row))<EOL>jobID = row[<NUM_LIT:0>]<EOL><DEDENT>if alreadyRunning:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.jobsTableName,)<EOL>conn.cursor.execute(query, (self._connectionID, jobID))<EOL><DEDENT>return jobID<EOL>", "docstring": "Attempt to insert a row with the given parameters into the jobs table.\n        Return jobID of the inserted row, or of an existing row with matching\n        client/jobHash key.\n\n        The combination of client and jobHash are expected to be unique (enforced\n        by a unique index on the two columns).\n\n        NOTE: It's possibe that this or another process (on this or another machine)\n         already inserted a row with matching client/jobHash key (e.g.,\n         StreamMgr). This may also happen undetected by this function due to a\n         partially-successful insert operation (e.g., row inserted, but then\n         connection was lost while reading response) followed by retries either of\n         this function or in SteadyDB module.\n\n        Parameters:\n        ----------------------------------------------------------------\n        conn:            Owned connection acquired from ConnectionFactory.get()\n        client:          Name of the client submitting the job\n        cmdLine:         Command line to use to launch each worker process; must be\n                          a non-empty string\n        jobHash:         unique hash of this job. The caller must insure that this,\n                          together with client, uniquely identifies this job request\n                          for the purposes of detecting duplicates.\n        clientInfo:      JSON encoded dict of client specific information.\n        clientKey:       Foreign key.\n        params:          JSON encoded dict of the parameters for the job. This\n                          can be fetched out of the database by the worker processes\n                          based on the jobID.\n        minimumWorkers:  minimum number of workers design at a time.\n        maximumWorkers:  maximum number of workers desired at a time.\n        priority:        Job scheduling priority; 0 is the default priority (\n                          ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are\n                          higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),\n                          and negative values are lower priority (down to\n                          ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will\n                          be scheduled to run at the expense of the lower-priority\n                          jobs, and higher-priority job tasks will preempt those\n                          with lower priority if there is inadequate supply of\n                          scheduling slots. Excess lower priority job tasks will\n                          starve as long as slot demand exceeds supply. Most jobs\n                          should be scheduled with DEFAULT_JOB_PRIORITY. System jobs\n                          that must run at all cost, such as Multi-Model-Master,\n                          should be scheduled with MAX_JOB_PRIORITY.\n        alreadyRunning:  Used for unit test purposes only. This inserts the job\n                          in the running state. It is used when running a worker\n                          in standalone mode without hadoop- it gives it a job\n                          record to work with.\n\n        retval:           jobID of the inserted jobs row, or of an existing jobs row\n                           with matching client/jobHash key", "id": "f17555:c1:m16"}
{"signature": "@g_retrySQL<EOL><INDENT>def _startJobWithRetries(self, jobID):<DEDENT>", "body": "with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.jobsTableName,)<EOL>sqlParams = [self.STATUS_RUNNING, self._connectionID,<EOL>jobID, self.STATUS_NOTSTARTED]<EOL>numRowsUpdated = conn.cursor.execute(query, sqlParams)<EOL>if numRowsUpdated != <NUM_LIT:1>:<EOL><INDENT>self._logger.warn('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>', numRowsUpdated)<EOL><DEDENT><DEDENT>return<EOL>", "docstring": "Place the given job in STATUS_RUNNING mode; the job is expected to be\n        STATUS_NOTSTARTED.\n\n        NOTE: this function was factored out of jobStartNext because it's also\n         needed for testing (e.g., test_client_jobs_dao.py)", "id": "f17555:c1:m23"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def jobInfoWithModels(self, jobID):<DEDENT>", "body": "<EOL>combinedResults = None<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT:U+0020>'.join([<EOL>'<STR_LIT>' % (self.jobsTableName, self.modelsTableName),<EOL>'<STR_LIT>' % self.jobsTableName,<EOL>'<STR_LIT>' % self.modelsTableName,<EOL>'<STR_LIT>'])<EOL>conn.cursor.execute(query, (jobID,))<EOL>if conn.cursor.rowcount > <NUM_LIT:0>:<EOL><INDENT>combinedResults = [<EOL>ClientJobsDAO._combineResults(<EOL>result, self._jobs.jobInfoNamedTuple,<EOL>self._models.modelInfoNamedTuple<EOL>) for result in conn.cursor.fetchall()]<EOL><DEDENT><DEDENT>if combinedResults is not None:<EOL><INDENT>return combinedResults<EOL><DEDENT>raise RuntimeError(\"<STR_LIT>\" % (jobID))<EOL>", "docstring": "Get all info about a job, with model details, if available.\n\n        Parameters:\n        ----------------------------------------------------------------\n        job:    jobID of the job to query\n        retval: A sequence of two-tuples if the jobID exists in the jobs\n                 table (exeption is raised if it doesn't exist). Each two-tuple\n                 contains an instance of jobInfoNamedTuple as the first element and\n                 an instance of modelInfoNamedTuple as the second element. NOTE: In\n                 the case where there are no matching model rows, a sequence of one\n                 two-tuple will still be returned, but the modelInfoNamedTuple\n                 fields will be None, and the jobInfoNamedTuple fields will be\n                 populated.", "id": "f17555:c1:m32"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def modelUpdateResults(self, modelID, results=None, metricValue =None,<EOL>numRecords=None):<DEDENT>", "body": "assignmentExpressions = ['<STR_LIT>',<EOL>'<STR_LIT>']<EOL>assignmentValues = []<EOL>if results is not None:<EOL><INDENT>assignmentExpressions.append('<STR_LIT>')<EOL>assignmentValues.append(results)<EOL><DEDENT>if numRecords is not None:<EOL><INDENT>assignmentExpressions.append('<STR_LIT>')<EOL>assignmentValues.append(numRecords)<EOL><DEDENT>if metricValue is not None and (metricValue==metricValue):<EOL><INDENT>assignmentExpressions.append('<STR_LIT>')<EOL>assignmentValues.append(float(metricValue))<EOL><DEDENT>query = '<STR_LIT>''<STR_LIT>'% (self.modelsTableName, '<STR_LIT:U+002C>'.join(assignmentExpressions))<EOL>sqlParams = assignmentValues + [modelID, self._connectionID]<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>numRowsAffected = conn.cursor.execute(query, sqlParams)<EOL><DEDENT>if numRowsAffected != <NUM_LIT:1>:<EOL><INDENT>raise InvalidConnectionException(<EOL>(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\") % (modelID,self._connectionID, numRowsAffected,))<EOL><DEDENT>", "docstring": "Update the results string, and/or num_records fields of\n        a model. This will fail if the model does not currently belong to this\n        client (connection_id doesn't match).\n\n        Parameters:\n        ----------------------------------------------------------------\n        modelID:      model ID of model to modify\n        results:      new results, or None to ignore\n        metricValue:  the value of the metric being optimized, or None to ignore\n        numRecords:   new numRecords, or None to ignore", "id": "f17555:c1:m60"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def modelsGetFieldsForJob(self, jobID, fields, ignoreKilled=False):<DEDENT>", "body": "assert len(fields) >= <NUM_LIT:1>, '<STR_LIT>'<EOL>dbFields = [self._models.pubToDBNameDict[x] for x in fields]<EOL>dbFieldsStr = '<STR_LIT:U+002C>'.join(dbFields)<EOL>query = '<STR_LIT>''<STR_LIT>'% (dbFieldsStr, self.modelsTableName)<EOL>sqlParams = [jobID]<EOL>if ignoreKilled:<EOL><INDENT>query += '<STR_LIT>'<EOL>sqlParams.append(self.CMPL_REASON_KILLED)<EOL><DEDENT>with ConnectionFactory.get() as conn:<EOL><INDENT>conn.cursor.execute(query, sqlParams)<EOL>rows = conn.cursor.fetchall()<EOL><DEDENT>if rows is None:<EOL><INDENT>self._logger.error(\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>query, traceback.format_exc())<EOL><DEDENT>return [(r[<NUM_LIT:0>], list(r[<NUM_LIT:1>:])) for r in rows]<EOL>", "docstring": "Gets the specified fields for all the models for a single job. This is\n        similar to modelsGetFields\n\n        Parameters:\n        ----------------------------------------------------------------\n        jobID:              jobID for the models to be searched\n        fields:             A list  of fields to return\n        ignoreKilled:       (True/False). If True, this will ignore models that\n                            have been killed\n\n        Returns: a (possibly empty) list of tuples as follows\n          [\n            (model_id1, [field1, ..., fieldn]),\n            (model_id2, [field1, ..., fieldn]),\n            (model_id3, [field1, ..., fieldn])\n                        ...\n          ]\n\n        NOTE: since there is a window of time between a job getting inserted into\n         jobs table and the job's worker(s) starting up and creating models, an\n         empty-list result is one of the normal outcomes.", "id": "f17555:c1:m54"}
{"signature": "def _abbreviate(text, threshold):", "body": "if text is not None and len(text) > threshold:<EOL><INDENT>text = text[:threshold] + \"<STR_LIT>\"<EOL><DEDENT>return text<EOL>", "docstring": "Abbreviate the given text to threshold chars and append an ellipsis if its\n    length exceeds threshold; used for logging;\n\n    NOTE: the resulting text could be longer than threshold due to the ellipsis", "id": "f17555:m0"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def jobsGetFields(self, jobIDs, fields, requireAll=True):<DEDENT>", "body": "assert isinstance(jobIDs, self._SEQUENCE_TYPES)<EOL>assert len(jobIDs) >=<NUM_LIT:1><EOL>rows = self._getMatchingRowsWithRetries(<EOL>self._jobs, dict(job_id=jobIDs),<EOL>['<STR_LIT>'] + [self._jobs.pubToDBNameDict[x] for x in fields])<EOL>if requireAll and len(rows) < len(jobIDs):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % (<EOL>(set(jobIDs) - set(r[<NUM_LIT:0>] for r in rows)),))<EOL><DEDENT>return [(r[<NUM_LIT:0>], list(r[<NUM_LIT:1>:])) for r in rows]<EOL>", "docstring": "Fetch the values of 1 or more fields from a sequence of job records.\n        Here, 'fields' is a sequence (list or tuple) with the names of the fields to\n        fetch. The names are the public names of the fields (camelBack, not the\n        lower_case_only form as stored in the DB).\n\n        WARNING!!!: The order of the results are NOT necessarily in the same order as\n        the order of the job IDs passed in!!!\n\n        Parameters:\n        ----------------------------------------------------------------\n        jobIDs:        A sequence of jobIDs\n        fields:        A list  of fields to return for each jobID\n\n        Returns:      A list of tuples->(jobID, [field1, field2,...])", "id": "f17555:c1:m45"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def modelSetCompleted(self, modelID, completionReason, completionMsg,<EOL>cpuTime=<NUM_LIT:0>, useConnectionID=True):<DEDENT>", "body": "if completionMsg is None:<EOL><INDENT>completionMsg = '<STR_LIT>'<EOL><DEDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.modelsTableName,)<EOL>sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,<EOL>cpuTime, modelID]<EOL>if useConnectionID:<EOL><INDENT>query += \"<STR_LIT>\"<EOL>sqlParams.append(self._connectionID)<EOL><DEDENT>with ConnectionFactory.get() as conn:<EOL><INDENT>numRowsAffected = conn.cursor.execute(query, sqlParams)<EOL><DEDENT>if numRowsAffected != <NUM_LIT:1>:<EOL><INDENT>raise InvalidConnectionException(<EOL>(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\") % (modelID, self._connectionID, numRowsAffected))<EOL><DEDENT>", "docstring": "Mark a model as completed, with the given completionReason and\n        completionMsg. This will fail if the model does not currently belong to this\n        client (connection_id doesn't match).\n\n        Parameters:\n        ----------------------------------------------------------------\n        modelID:             model ID of model to modify\n        completionReason:    completionReason string\n        completionMsg:       completionMsg string\n        cpuTime:             amount of CPU time spent on this model\n        useConnectionID:     True if the connection id of the calling function\n                              must be the same as the connection that created the\n                              job. Set to True for hypersearch workers, which use\n                              this mechanism for orphaned model detection.", "id": "f17555:c1:m62"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def jobInfo(self, jobID):<DEDENT>", "body": "row = self._getOneMatchingRowWithRetries(<EOL>self._jobs, dict(job_id=jobID),<EOL>[self._jobs.pubToDBNameDict[n]<EOL>for n in self._jobs.jobInfoNamedTuple._fields])<EOL>if row is None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % (jobID))<EOL><DEDENT>return self._jobs.jobInfoNamedTuple._make(row)<EOL>", "docstring": "Get all info about a job\n\n        Parameters:\n        ----------------------------------------------------------------\n        job:    jobID of the job to query\n        retval:  namedtuple containing the job info.", "id": "f17555:c1:m33"}
{"signature": "def _getOneMatchingRowNoRetries(self, tableInfo, conn, fieldsToMatch,<EOL>selectFieldNames):", "body": "rows = self._getMatchingRowsNoRetries(tableInfo, conn, fieldsToMatch,<EOL>selectFieldNames, maxRows=<NUM_LIT:1>)<EOL>if rows:<EOL><INDENT>assert len(rows) == <NUM_LIT:1>, repr(len(rows))<EOL>result = rows[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>result = None<EOL><DEDENT>return result<EOL>", "docstring": "Return a single matching row with the requested field values from the\n        the requested table or None if nothing matched.\n\n        tableInfo:       Table information: a ClientJobsDAO._TableInfoBase  instance\n        conn:            Owned connection acquired from ConnectionFactory.get()\n        fieldsToMatch:   Dictionary of internal fieldName/value mappings that\n                         identify the desired rows. If a value is an instance of\n                         ClientJobsDAO._SEQUENCE_TYPES (list/set/tuple), then the\n                         operator 'IN' will be used in the corresponding SQL\n                         predicate; if the value is bool: \"IS TRUE/FALSE\"; if the\n                         value is None: \"IS NULL\"; '=' will be used for all other\n                         cases.\n        selectFieldNames:\n                         list of fields to return, using internal field names\n\n        retval:          A sequence of field values of the matching row in the order\n                          of the given field names; or None if there was no match.", "id": "f17555:c1:m13"}
{"signature": "def getConnectionID(self):", "body": "return self._connectionID<EOL>", "docstring": "Return our connection ID. This can be used for worker identification\n        purposes.\n\n        NOTE: the actual MySQL connection ID used in queries may change from time\n         to time if connection is re-acquired (e.g., upon MySQL server restart) or\n         when more than one entry from the connection pool has been used (e.g.,\n         multi-threaded apps)", "id": "f17555:c1:m18"}
{"signature": "@g_retrySQL<EOL><INDENT>def _getOneMatchingRowWithRetries(self, tableInfo, fieldsToMatch,<EOL>selectFieldNames):<DEDENT>", "body": "with ConnectionFactory.get() as conn:<EOL><INDENT>return self._getOneMatchingRowNoRetries(tableInfo, conn, fieldsToMatch,<EOL>selectFieldNames)<EOL><DEDENT>", "docstring": "Like _getOneMatchingRowNoRetries(), but with retries on transient MySQL\n        failures", "id": "f17555:c1:m14"}
{"signature": "def _resumeJobNoRetries(self, conn, jobID, alreadyRunning):", "body": "<EOL>if alreadyRunning:<EOL><INDENT>initStatus = self.STATUS_TESTMODE<EOL><DEDENT>else:<EOL><INDENT>initStatus = self.STATUS_NOTSTARTED<EOL><DEDENT>assignments = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>]<EOL>assignmentValues = [initStatus]<EOL>if alreadyRunning:<EOL><INDENT>assignments += ['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>']<EOL>assignmentValues.append(self._connectionID)<EOL><DEDENT>else:<EOL><INDENT>assignments += ['<STR_LIT>', '<STR_LIT>']<EOL><DEDENT>assignments = '<STR_LIT:U+002CU+0020>'.join(assignments)<EOL>query = '<STR_LIT>''<STR_LIT>'% (self.jobsTableName, assignments)<EOL>sqlParams = assignmentValues + [jobID, self.STATUS_COMPLETED]<EOL>numRowsAffected = conn.cursor.execute(query, sqlParams)<EOL>assert numRowsAffected <= <NUM_LIT:1>, repr(numRowsAffected)<EOL>if numRowsAffected == <NUM_LIT:0>:<EOL><INDENT>self._logger.info(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\", jobID)<EOL><DEDENT>return<EOL>", "docstring": "Resumes processing of an existing job that is presently in the\n        STATUS_COMPLETED state.\n\n        NOTE: this is primarily for resuming suspended Production and Stream Jobs; DO\n         NOT use it on Hypersearch jobs.\n\n        This prepares an existing job entry to resume processing. The CJM is always\n        periodically sweeping the jobs table and when it finds a job that is ready\n        to run, it will proceed to start it up on Hadoop.\n\n        Parameters:\n        ----------------------------------------------------------------\n        conn:            Owned connection acquired from ConnectionFactory.get()\n        jobID:          jobID of the job to resume\n        alreadyRunning: Used for unit test purposes only. This inserts the job\n                         in the running state. It is used when running a worker\n                         in standalone mode without hadoop.\n\n        raises:         Throws a RuntimeError if no rows are affected. This could\n                        either be because:\n                          1) Because there was not matching jobID\n                          2) or if the status of the job was not STATUS_COMPLETED.\n\n        retval:            nothing", "id": "f17555:c1:m17"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def _initTables(self, cursor, deleteOldVersions, recreate):<DEDENT>", "body": "<EOL>if deleteOldVersions:<EOL><INDENT>self._logger.info(<EOL>\"<STR_LIT>\",<EOL>traceback.format_stack())<EOL>for i in range(self._DB_VERSION):<EOL><INDENT>cursor.execute('<STR_LIT>' %<EOL>(self.__getDBNameForVersion(i),))<EOL><DEDENT><DEDENT>if recreate:<EOL><INDENT>self._logger.info(<EOL>\"<STR_LIT>\",<EOL>self.dbName, traceback.format_stack())<EOL>cursor.execute('<STR_LIT>' % (self.dbName))<EOL><DEDENT>cursor.execute('<STR_LIT>' % (self.dbName))<EOL>cursor.execute('<STR_LIT>' % (self.dbName))<EOL>output = cursor.fetchall()<EOL>tableNames = [x[<NUM_LIT:0>] for x in output]<EOL>if '<STR_LIT>' not in tableNames:<EOL><INDENT>self._logger.info(\"<STR_LIT>\", self.jobsTableName)<EOL>fields = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % (self.CLIENT_MAX_LEN),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % (self.HASH_MAX_LEN),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'  %self.CMPL_REASON_SUCCESS,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % self.DEFAULT_JOB_PRIORITY,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'  %self.CLEAN_NOT_DONE,<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>]<EOL>options = [<EOL>'<STR_LIT>',<EOL>]<EOL>query = '<STR_LIT>' %(self.jobsTableName, '<STR_LIT:U+002C>'.join(fields), '<STR_LIT:U+002C>'.join(options))<EOL>cursor.execute(query)<EOL><DEDENT>if '<STR_LIT>' not in tableNames:<EOL><INDENT>self._logger.info(\"<STR_LIT>\", self.modelsTableName)<EOL>fields = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % (self.HASH_MAX_LEN),<EOL>'<STR_LIT>' % (self.HASH_MAX_LEN),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>]<EOL>options = [<EOL>'<STR_LIT>',<EOL>]<EOL>query = '<STR_LIT>' %(self.modelsTableName, '<STR_LIT:U+002C>'.join(fields), '<STR_LIT:U+002C>'.join(options))<EOL>cursor.execute(query)<EOL><DEDENT>cursor.execute('<STR_LIT>' % (self.jobsTableName))<EOL>fields = cursor.fetchall()<EOL>self._jobs.dbFieldNames = [str(field[<NUM_LIT:0>]) for field in fields]<EOL>cursor.execute('<STR_LIT>' % (self.modelsTableName))<EOL>fields = cursor.fetchall()<EOL>self._models.dbFieldNames = [str(field[<NUM_LIT:0>]) for field in fields]<EOL>self._jobs.publicFieldNames = [self._columnNameDBToPublic(x)<EOL>for x in self._jobs.dbFieldNames]<EOL>self._models.publicFieldNames = [self._columnNameDBToPublic(x)<EOL>for x in self._models.dbFieldNames]<EOL>self._jobs.pubToDBNameDict = dict(<EOL>list(zip(self._jobs.publicFieldNames, self._jobs.dbFieldNames)))<EOL>self._jobs.dbToPubNameDict = dict(<EOL>list(zip(self._jobs.dbFieldNames, self._jobs.publicFieldNames)))<EOL>self._models.pubToDBNameDict = dict(<EOL>list(zip(self._models.publicFieldNames, self._models.dbFieldNames)))<EOL>self._models.dbToPubNameDict = dict(<EOL>list(zip(self._models.dbFieldNames, self._models.publicFieldNames)))<EOL>self._models.modelInfoNamedTuple = collections.namedtuple(<EOL>'<STR_LIT>', self._models.publicFieldNames)<EOL>self._jobs.jobInfoNamedTuple = collections.namedtuple(<EOL>'<STR_LIT>', self._jobs.publicFieldNames)<EOL>return<EOL>", "docstring": "Initialize tables, if needed\n\n        Parameters:\n        ----------------------------------------------------------------\n        cursor:              SQL cursor\n        deleteOldVersions:   if true, delete any old versions of the DB left\n                              on the server\n        recreate:            if true, recreate the database from scratch even\n                              if it already exists.", "id": "f17555:c1:m10"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def getJobs(self, fields=[]):<DEDENT>", "body": "<EOL>dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]<EOL>dbFieldsStr = '<STR_LIT:U+002C>'.join(['<STR_LIT>'] + dbFields)<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>' % (dbFieldsStr, self.jobsTableName)<EOL>conn.cursor.execute(query)<EOL>rows = conn.cursor.fetchall()<EOL><DEDENT>return rows<EOL>", "docstring": "Fetch jobIDs for jobs in the table with optional fields", "id": "f17555:c1:m42"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def connect(self, deleteOldVersions=False, recreate=False):<DEDENT>", "body": "<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>self._initTables(cursor=conn.cursor, deleteOldVersions=deleteOldVersions,<EOL>recreate=recreate)<EOL>conn.cursor.execute('<STR_LIT>')<EOL>self._connectionID = conn.cursor.fetchall()[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>self._logger.info(\"<STR_LIT>\", self._connectionID)<EOL><DEDENT>return<EOL>", "docstring": "Locate the current version of the jobs DB or create a new one, and\n        optionally delete old versions laying around. If desired, this method\n        can be called at any time to re-create the tables from scratch, delete\n        old versions of the database, etc.\n\n        Parameters:\n        ----------------------------------------------------------------\n        deleteOldVersions:   if true, delete any old versions of the DB left\n                              on the server\n        recreate:            if true, recreate the database from scratch even\n                              if it already exists.", "id": "f17555:c1:m9"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def jobGetModelIDs(self, jobID):<DEDENT>", "body": "rows = self._getMatchingRowsWithRetries(self._models, dict(job_id=jobID),<EOL>['<STR_LIT>'])<EOL>return [r[<NUM_LIT:0>] for r in rows]<EOL>", "docstring": "Fetch all the modelIDs that correspond to a given jobID; empty sequence\n        if none", "id": "f17555:c1:m37"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def modelsInfo(self, modelIDs):<DEDENT>", "body": "assert isinstance(modelIDs, self._SEQUENCE_TYPES), (<EOL>\"<STR_LIT>\") % (type(modelIDs),)<EOL>assert modelIDs, \"<STR_LIT>\"<EOL>rows = self._getMatchingRowsWithRetries(<EOL>self._models, dict(model_id=modelIDs),<EOL>[self._models.pubToDBNameDict[f]<EOL>for f in self._models.modelInfoNamedTuple._fields])<EOL>results = [self._models.modelInfoNamedTuple._make(r) for r in rows]<EOL>assert len(results) == len(modelIDs), \"<STR_LIT>\" % (<EOL>set(modelIDs) - set(r.modelId for r in results))<EOL>return results<EOL>", "docstring": "Get ALL info for a set of models\n\n        WARNING!!!: The order of the results are NOT necessarily in the same order as\n        the order of the model IDs passed in!!!\n\n        Parameters:\n        ----------------------------------------------------------------\n        modelIDs:    list of model IDs\n        retval:      list of nametuples containing all the fields stored for each\n                        model.", "id": "f17555:c1:m52"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def modelsGetResultAndStatus(self, modelIDs):<DEDENT>", "body": "assert isinstance(modelIDs, self._SEQUENCE_TYPES), (<EOL>\"<STR_LIT>\") % type(modelIDs)<EOL>assert len(modelIDs) >= <NUM_LIT:1>, \"<STR_LIT>\"<EOL>rows = self._getMatchingRowsWithRetries(<EOL>self._models, {'<STR_LIT>' : modelIDs},<EOL>[self._models.pubToDBNameDict[f]<EOL>for f in self._models.getResultAndStatusNamedTuple._fields])<EOL>assert len(rows) == len(modelIDs), \"<STR_LIT>\" % (<EOL>(set(modelIDs) - set(r[<NUM_LIT:0>] for r in rows)),)<EOL>return [self._models.getResultAndStatusNamedTuple._make(r) for r in rows]<EOL>", "docstring": "Get the results string and other status fields for a set of models.\n\n        WARNING!!!: The order of the results are NOT necessarily in the same order\n        as the order of the model IDs passed in!!!\n\n        For each model, this returns a tuple containing:\n         (modelID, results, status, updateCounter, numRecords, completionReason,\n             completionMsg, engParamsHash\n\n        Parameters:\n        ----------------------------------------------------------------\n        modelIDs:    list of model IDs\n        retval:      list of result tuples. Each tuple contains:\n                        (modelID, results, status, updateCounter, numRecords,\n                          completionReason, completionMsg, engParamsHash)", "id": "f17555:c1:m58"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def jobReactivateRunningJobs(self):<DEDENT>", "body": "<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.jobsTableName,)<EOL>conn.cursor.execute(query, [self._connectionID, self.STATUS_RUNNING])<EOL><DEDENT>return<EOL>", "docstring": "Look through the jobs table and reactivate all that are already in the\n        running state by setting their _eng_allocate_new_workers fields to True;\n        used by Nupic Scheduler as part of its failure-recovery procedure.", "id": "f17555:c1:m25"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>@g_retrySQL<EOL>def getActiveJobCountForClientKey(self, clientKey):<DEDENT>", "body": "with ConnectionFactory.get() as conn:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>' %  self.jobsTableName<EOL>conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])<EOL>activeJobCount = conn.cursor.fetchone()[<NUM_LIT:0>]<EOL><DEDENT>return activeJobCount<EOL>", "docstring": "Return the number of jobs for the given clientKey and a status that is\n        not completed.", "id": "f17555:c1:m39"}
{"signature": "@logExceptions(_LOGGER)<EOL><INDENT>def jobInsertUnique(self, client, cmdLine, jobHash, clientInfo='<STR_LIT>',<EOL>clientKey='<STR_LIT>', params='<STR_LIT>', minimumWorkers=<NUM_LIT:0>,<EOL>maximumWorkers=<NUM_LIT:0>, jobType='<STR_LIT>',<EOL>priority=DEFAULT_JOB_PRIORITY):<DEDENT>", "body": "assert cmdLine, \"<STR_LIT>\" + repr(cmdLine)<EOL>@g_retrySQL<EOL>def insertUniqueWithRetries():<EOL><INDENT>jobHashValue = self._normalizeHash(jobHash)<EOL>jobID = None<EOL>with ConnectionFactory.get() as conn:<EOL><INDENT>row = self._getOneMatchingRowNoRetries(<EOL>self._jobs, conn, dict(client=client, job_hash=jobHashValue),<EOL>['<STR_LIT>', '<STR_LIT:status>'])<EOL>if row is not None:<EOL><INDENT>(jobID, status) = row<EOL>if status == self.STATUS_COMPLETED:<EOL><INDENT>query = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'% (self.jobsTableName,)<EOL>sqlParams = (clientInfo, clientKey, cmdLine, params,<EOL>minimumWorkers, maximumWorkers, priority,<EOL>jobType, jobID, self.STATUS_COMPLETED)<EOL>numRowsUpdated = conn.cursor.execute(query, sqlParams)<EOL>assert numRowsUpdated <= <NUM_LIT:1>, repr(numRowsUpdated)<EOL>if numRowsUpdated == <NUM_LIT:0>:<EOL><INDENT>self._logger.info(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\", jobID)<EOL><DEDENT>self._resumeJobNoRetries(conn, jobID, alreadyRunning=False)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>jobID = self._insertOrGetUniqueJobNoRetries(<EOL>conn, client=client, cmdLine=cmdLine, jobHash=jobHashValue,<EOL>clientInfo=clientInfo, clientKey=clientKey, params=params,<EOL>minimumWorkers=minimumWorkers, maximumWorkers=maximumWorkers,<EOL>jobType=jobType, priority=priority, alreadyRunning=False)<EOL><DEDENT>return jobID<EOL><DEDENT><DEDENT>try:<EOL><INDENT>jobID = insertUniqueWithRetries()<EOL><DEDENT>except:<EOL><INDENT>self._logger.exception(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>jobType, client, _abbreviate(clientInfo, <NUM_LIT>), clientKey, jobHash,<EOL>cmdLine)<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>self._logger.info(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>jobID, jobType, client, _abbreviate(clientInfo, <NUM_LIT>), clientKey,<EOL>jobHash, cmdLine)<EOL><DEDENT>return jobID<EOL>", "docstring": "Add an entry to the jobs table for a new job request, but only if the\n        same job, by the same client is not already running. If the job is already\n        running, or queued up to run, this call does nothing. If the job does not\n        exist in the jobs table or has completed, it will be inserted and/or started\n        up again.\n\n        This method is called by clients, like StreamMgr, that wish to only start up\n        a job if it hasn't already been started up.\n\n        Parameters:\n        ----------------------------------------------------------------\n        client:          Name of the client submitting the job\n        cmdLine:         Command line to use to launch each worker process; must be\n                          a non-empty string\n        jobHash:         unique hash of this job. The client must insure that this\n                          uniquely identifies this job request for the purposes\n                          of detecting duplicates.\n        clientInfo:      JSON encoded dict of client specific information.\n        clientKey:       Foreign key.\n        params:          JSON encoded dict of the parameters for the job. This\n                          can be fetched out of the database by the worker processes\n                          based on the jobID.\n        minimumWorkers:  minimum number of workers design at a time.\n        maximumWorkers:  maximum number of workers desired at a time.\n        jobType:         The type of job that this is. This should be one of the\n                          JOB_TYPE_XXXX enums. This is needed to allow a standard\n                          way of recognizing a job's function and capabilities.\n        priority:        Job scheduling priority; 0 is the default priority (\n                          ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are\n                          higher priority (up to ClientJobsDAO.MAX_JOB_PRIORITY),\n                          and negative values are lower priority (down to\n                          ClientJobsDAO.MIN_JOB_PRIORITY). Higher-priority jobs will\n                          be scheduled to run at the expense of the lower-priority\n                          jobs, and higher-priority job tasks will preempt those\n                          with lower priority if there is inadequate supply of\n                          scheduling slots. Excess lower priority job tasks will\n                          starve as long as slot demand exceeds supply. Most jobs\n                          should be scheduled with DEFAULT_JOB_PRIORITY. System jobs\n                          that must run at all cost, such as Multi-Model-Master,\n                          should be scheduled with MAX_JOB_PRIORITY.\n\n        retval:          jobID of the newly inserted or existing job.", "id": "f17555:c1:m22"}
{"signature": "def tailProbability(x, distributionParams):", "body": "if \"<STR_LIT>\" not in distributionParams or \"<STR_LIT>\" not in distributionParams:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>if x < distributionParams[\"<STR_LIT>\"]:<EOL><INDENT>xp = <NUM_LIT:2> * distributionParams[\"<STR_LIT>\"] - x<EOL>return tailProbability(xp, distributionParams)<EOL><DEDENT>z = (x - distributionParams[\"<STR_LIT>\"]) / distributionParams[\"<STR_LIT>\"]<EOL>return <NUM_LIT:0.5> * math.erfc(z/<NUM_LIT>)<EOL>", "docstring": "Given the normal distribution specified by the mean and standard deviation\nin distributionParams, return the probability of getting samples further\nfrom the mean. For values above the mean, this is the probability of getting\nsamples > x and for values below the mean, the probability of getting\nsamples < x. This is the Q-function: the tail probability of the normal distribution.\n\n:param distributionParams: dict with 'mean' and 'stdev' of the distribution", "id": "f17556:m6"}
{"signature": "@staticmethod<EOL><INDENT>def computeLogLikelihood(likelihood):<DEDENT>", "body": "<EOL>return math.log(<NUM_LIT> - likelihood) / -<NUM_LIT><EOL>", "docstring": "Compute a log scale representation of the likelihood value. Since the\nlikelihood computations return low probabilities that often go into four 9's\nor five 9's, a log value is more useful for visualization, thresholding,\netc.", "id": "f17556:c0:m3"}
{"signature": "def estimateNormal(sampleData, performLowerBoundCheck=True):", "body": "params = {<EOL>\"<STR_LIT:name>\": \"<STR_LIT>\",<EOL>\"<STR_LIT>\": numpy.mean(sampleData),<EOL>\"<STR_LIT>\": numpy.var(sampleData),<EOL>}<EOL>if performLowerBoundCheck:<EOL><INDENT>if params[\"<STR_LIT>\"] < <NUM_LIT>:<EOL><INDENT>params[\"<STR_LIT>\"] = <NUM_LIT><EOL><DEDENT>if params[\"<STR_LIT>\"] < <NUM_LIT>:<EOL><INDENT>params[\"<STR_LIT>\"] = <NUM_LIT><EOL><DEDENT><DEDENT>if params[\"<STR_LIT>\"] > <NUM_LIT:0>:<EOL><INDENT>params[\"<STR_LIT>\"] = math.sqrt(params[\"<STR_LIT>\"])<EOL><DEDENT>else:<EOL><INDENT>params[\"<STR_LIT>\"] = <NUM_LIT:0><EOL><DEDENT>return params<EOL>", "docstring": ":param sampleData:\n:type sampleData: Numpy array.\n:param performLowerBoundCheck:\n:type performLowerBoundCheck: bool\n:returns: A dict containing the parameters of a normal distribution based on\n    the ``sampleData``.", "id": "f17556:m4"}
{"signature": "def estimateAnomalyLikelihoods(anomalyScores,<EOL>averagingWindow=<NUM_LIT:10>,<EOL>skipRecords=<NUM_LIT:0>,<EOL>verbosity=<NUM_LIT:0>):", "body": "if verbosity > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\", len(anomalyScores))<EOL>print(\"<STR_LIT>\", skipRecords)<EOL>print(\"<STR_LIT>\", anomalyScores[<NUM_LIT:0>:min(<NUM_LIT:20>, len(anomalyScores))])<EOL><DEDENT>if len(anomalyScores) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>aggRecordList, historicalValues, total =  _anomalyScoreMovingAverage(<EOL>anomalyScores,<EOL>windowSize = averagingWindow,<EOL>verbosity = verbosity)<EOL>s = [r[<NUM_LIT:2>] for r in aggRecordList]<EOL>dataValues = numpy.array(s)<EOL>if len(aggRecordList) <= skipRecords:<EOL><INDENT>distributionParams = nullDistribution(verbosity = verbosity)<EOL><DEDENT>else:<EOL><INDENT>distributionParams = estimateNormal(dataValues[skipRecords:])<EOL>s = [r[<NUM_LIT:1>] for r in aggRecordList]<EOL>if all([isinstance(r[<NUM_LIT:1>], numbers.Number) for r in aggRecordList]):<EOL><INDENT>metricValues = numpy.array(s)<EOL>metricDistribution = estimateNormal(metricValues[skipRecords:],<EOL>performLowerBoundCheck=False)<EOL>if metricDistribution[\"<STR_LIT>\"] < <NUM_LIT>:<EOL><INDENT>distributionParams = nullDistribution(verbosity = verbosity)<EOL><DEDENT><DEDENT><DEDENT>likelihoods = numpy.array(dataValues, dtype=float)<EOL>for i, s in enumerate(dataValues):<EOL><INDENT>likelihoods[i] = tailProbability(s, distributionParams)<EOL><DEDENT>filteredLikelihoods = numpy.array(<EOL>_filterLikelihoods(likelihoods) )<EOL>params = {<EOL>\"<STR_LIT>\":       distributionParams,<EOL>\"<STR_LIT>\": {<EOL>\"<STR_LIT>\": historicalValues,<EOL>\"<STR_LIT>\":            total,<EOL>\"<STR_LIT>\":       averagingWindow,<EOL>},<EOL>\"<STR_LIT>\":<EOL>list(likelihoods[-min(averagingWindow, len(likelihoods)):]),<EOL>}<EOL>if verbosity > <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(params)<EOL>print(\"<STR_LIT>\", len(likelihoods))<EOL>print(\"<STR_LIT>\", (<EOL>filteredLikelihoods[<NUM_LIT:0>:min(<NUM_LIT:20>, len(filteredLikelihoods))] ))<EOL>print(\"<STR_LIT>\")<EOL><DEDENT>return (filteredLikelihoods, aggRecordList, params)<EOL>", "docstring": "Given a series of anomaly scores, compute the likelihood for each score. This\nfunction should be called once on a bunch of historical anomaly scores for an\ninitial estimate of the distribution. It should be called again every so often\n(say every 50 records) to update the estimate.\n\n:param anomalyScores: a list of records. Each record is a list with the\n                      following three elements: [timestamp, value, score]\n\n                      Example::\n\n                          [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0]\n\n                      For best results, the list should be between 1000\n                      and 10,000 records\n:param averagingWindow: integer number of records to average over\n:param skipRecords: integer specifying number of records to skip when\n                    estimating distributions. If skip records are >=\n                    len(anomalyScores), a very broad distribution is returned\n                    that makes everything pretty likely.\n:param verbosity: integer controlling extent of printouts for debugging\n\n                    0 = none\n                    1 = occasional information\n                    2 = print every record\n\n:returns: 3-tuple consisting of:\n\n          - likelihoods\n\n            numpy array of likelihoods, one for each aggregated point\n\n          - avgRecordList\n\n            list of averaged input records\n\n          - params\n\n            a small JSON dict that contains the state of the estimator", "id": "f17556:m0"}
{"signature": "def anomalyProbability(self, value, anomalyScore, timestamp=None):", "body": "if timestamp is None:<EOL><INDENT>timestamp = self._iteration<EOL><DEDENT>dataPoint = (timestamp, value, anomalyScore)<EOL>if self._iteration < self._probationaryPeriod:<EOL><INDENT>likelihood = <NUM_LIT:0.5><EOL><DEDENT>else:<EOL><INDENT>if ( (self._distribution is None) or<EOL>(self._iteration % self._reestimationPeriod == <NUM_LIT:0>) ):<EOL><INDENT>numSkipRecords = self._calcSkipRecords(<EOL>numIngested=self._iteration,<EOL>windowSize=self._historicalScores.maxlen,<EOL>learningPeriod=self._learningPeriod)<EOL>_, _, self._distribution = estimateAnomalyLikelihoods(<EOL>self._historicalScores,<EOL>skipRecords=numSkipRecords)<EOL><DEDENT>likelihoods, _, self._distribution = updateAnomalyLikelihoods(<EOL>[dataPoint],<EOL>self._distribution)<EOL>likelihood = <NUM_LIT:1.0> - likelihoods[<NUM_LIT:0>]<EOL><DEDENT>self._historicalScores.append(dataPoint)<EOL>self._iteration += <NUM_LIT:1><EOL>return likelihood<EOL>", "docstring": "Compute the probability that the current value plus anomaly score represents\nan anomaly given the historical distribution of anomaly scores. The closer\nthe number is to 1, the higher the chance it is an anomaly.\n\n:param value: the current metric (\"raw\") input value, eg. \"orange\", or\n               '21.2' (deg. Celsius), ...\n:param anomalyScore: the current anomaly score\n:param timestamp: [optional] timestamp of the ocurrence,\n                   default (None) results in using iteration step.\n:returns: the anomalyLikelihood for this record.", "id": "f17556:c0:m8"}
{"signature": "def _filterLikelihoods(likelihoods,<EOL>redThreshold=<NUM_LIT>, yellowThreshold=<NUM_LIT>):", "body": "redThreshold    = <NUM_LIT:1.0> - redThreshold<EOL>yellowThreshold = <NUM_LIT:1.0> - yellowThreshold<EOL>filteredLikelihoods = [likelihoods[<NUM_LIT:0>]]<EOL>for i, v in enumerate(likelihoods[<NUM_LIT:1>:]):<EOL><INDENT>if v <= redThreshold:<EOL><INDENT>if likelihoods[i] > redThreshold:<EOL><INDENT>filteredLikelihoods.append(v)<EOL><DEDENT>else:<EOL><INDENT>filteredLikelihoods.append(yellowThreshold)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>filteredLikelihoods.append(v)<EOL><DEDENT><DEDENT>return filteredLikelihoods<EOL>", "docstring": "Filter the list of raw (pre-filtered) likelihoods so that we only preserve\nsharp increases in likelihood. 'likelihoods' can be a numpy array of floats or\na list of floats.\n\n:returns: A new list of floats likelihoods containing the filtered values.", "id": "f17556:m2"}
{"signature": "def generateSimpleCoincMatrix(nCoinc=<NUM_LIT:10>, length=<NUM_LIT>, activity=<NUM_LIT:50>):", "body": "assert nCoinc*activity<=length, \"<STR_LIT>\"<EOL>coincMatrix = SM32(<NUM_LIT:0>, length)<EOL>coinc = numpy.zeros(length, dtype='<STR_LIT>')<EOL>for i in range(nCoinc):<EOL><INDENT>coinc[:] = <NUM_LIT:0><EOL>coinc[i*activity:(i+<NUM_LIT:1>)*activity] = <NUM_LIT:1><EOL>coincMatrix.addRow(coinc)<EOL><DEDENT>return coincMatrix<EOL>", "docstring": "Generate a non overlapping coincidence matrix. This is used to generate random\ninputs to the temporal learner and to compare the predicted output against.\n\nIt generates a matrix of nCoinc rows, each row has length 'length' and has\na total of 'activity' bits on.\n\nParameters:\n-----------------------------------------------\nnCoinc:        the number of rows to generate\nlength:        the length of each row\nactivity:      the number of ones to put into each row.", "id": "f17557:m7"}
{"signature": "def sameSegment(seg1, seg2):", "body": "result = True<EOL>for field in [<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:4>, <NUM_LIT:5>, <NUM_LIT:6>]:<EOL><INDENT>if abs(seg1[<NUM_LIT:0>][field] - seg2[<NUM_LIT:0>][field]) > <NUM_LIT>:<EOL><INDENT>result = False<EOL><DEDENT><DEDENT>if len(seg1[<NUM_LIT:1>:]) != len(seg2[<NUM_LIT:1>:]):<EOL><INDENT>result = False<EOL><DEDENT>for syn in seg2[<NUM_LIT:1>:]:<EOL><INDENT>if syn[<NUM_LIT:2>] <= <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>result = False<EOL><DEDENT><DEDENT>if result == True:<EOL><INDENT>for syn in seg1[<NUM_LIT:1>:]:<EOL><INDENT>if syn[<NUM_LIT:2>] <= <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>result = False<EOL><DEDENT>res = sameSynapse(syn, seg2[<NUM_LIT:1>:])<EOL>if res == False:<EOL><INDENT>result = False<EOL><DEDENT><DEDENT><DEDENT>return result<EOL>", "docstring": "Return True if seg1 and seg2 are identical, ignoring order of synapses", "id": "f17557:m13"}
{"signature": "def generateSimpleSequences(nCoinc=<NUM_LIT:10>, seqLength=[<NUM_LIT:5>,<NUM_LIT:6>,<NUM_LIT:7>], nSeq=<NUM_LIT:100>):", "body": "coincList = list(range(nCoinc))<EOL>seqList  = []<EOL>for i in range(nSeq):<EOL><INDENT>if max(seqLength) <= nCoinc:<EOL><INDENT>seqList.append(random.sample(coincList, random.choice(seqLength)))<EOL><DEDENT>else:<EOL><INDENT>len = random.choice(seqLength)<EOL>seq = []<EOL>for x in range(len):<EOL><INDENT>seq.append(random.choice(coincList))<EOL><DEDENT>seqList.append(seq)<EOL><DEDENT><DEDENT>return seqList<EOL>", "docstring": "Generate a set of simple sequences. The elements of the sequences will be\nintegers from 0 to 'nCoinc'-1. The length of each sequence will be\nrandomly chosen from the 'seqLength' list.\n\nParameters:\n-----------------------------------------------\nnCoinc:      the number of elements available to use in the sequences\nseqLength:   a list of possible sequence lengths. The length of each\n             sequence will be randomly chosen from here.\nnSeq:        The number of sequences to generate\n\nretval:      a list of sequences. Each sequence is itself a list\n             containing the coincidence indices for that sequence.", "id": "f17557:m4"}
{"signature": "def tmDiff(tm1, tm2, verbosity = <NUM_LIT:0>, relaxSegmentTests =True):", "body": "<EOL>if sameTMParams(tm1, tm2) == False:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>result = True<EOL>if (tm1.activeState['<STR_LIT:t>'] != tm2.activeState['<STR_LIT:t>']).any():<EOL><INDENT>print('<STR_LIT>', numpy.where(tm1.activeState['<STR_LIT:t>'] != tm2.activeState['<STR_LIT:t>']))<EOL>result = False<EOL><DEDENT>if (tm1.predictedState['<STR_LIT:t>'] - tm2.predictedState['<STR_LIT:t>']).any():<EOL><INDENT>print('<STR_LIT>', numpy.where(tm1.predictedState['<STR_LIT:t>'] != tm2.predictedState['<STR_LIT:t>']))<EOL>result = False<EOL><DEDENT>if tm1.getNumSegments() != tm2.getNumSegments():<EOL><INDENT>print(\"<STR_LIT>\", tm1.getNumSegments(), tm2.getNumSegments())<EOL>result = False<EOL><DEDENT>if tm1.getNumSynapses() != tm2.getNumSynapses():<EOL><INDENT>print(\"<STR_LIT>\", tm1.getNumSynapses(), tm2.getNumSynapses())<EOL>tm1.printCells()<EOL>tm2.printCells()<EOL>result = False<EOL><DEDENT>for c in range(tm1.numberOfCols):<EOL><INDENT>for i in range(tm2.cellsPerColumn):<EOL><INDENT>if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i):<EOL><INDENT>print(\"<STR_LIT>\",c,i, end='<STR_LIT:U+0020>')<EOL>print(tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i))<EOL>result = False<EOL><DEDENT><DEDENT><DEDENT>if result == True and not relaxSegmentTests:<EOL><INDENT>for c in range(tm1.numberOfCols):<EOL><INDENT>for i in range(tm2.cellsPerColumn):<EOL><INDENT>nSegs = tm1.getNumSegmentsInCell(c, i)<EOL>for segIdx in range(nSegs):<EOL><INDENT>tm1seg = tm1.getSegmentOnCell(c, i, segIdx)<EOL>res = False<EOL>for tm2segIdx in range(nSegs):<EOL><INDENT>tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx)<EOL>if sameSegment(tm1seg, tm2seg) == True:<EOL><INDENT>res = True<EOL>break<EOL><DEDENT><DEDENT>if res == False:<EOL><INDENT>print(\"<STR_LIT>\",c,i)<EOL>if verbosity >= <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>tm1.printCell(c, i)<EOL>print(\"<STR_LIT>\")<EOL>tm2.printCell(c, i)<EOL><DEDENT>result = False<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if result == True and (verbosity > <NUM_LIT:1>):<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return result<EOL>", "docstring": "Given two TM instances, list the difference between them and returns False\nif there is a difference. This function checks the major parameters. If this\npasses (and checkLearn is true) it checks the number of segments on\neach cell. If this passes, checks each synapse on each segment.\nWhen comparing C++ and Py, the segments are usually in different orders in the\ncells. tmDiff ignores segment order when comparing TM's.", "id": "f17557:m14"}
{"signature": "def populationStability(vectors, numSamples=None):", "body": "<EOL>numVectors = len(vectors)<EOL>if numSamples is None:<EOL><INDENT>numSamples = numVectors-<NUM_LIT:1><EOL>countOn = list(range(numVectors-<NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>countOn = numpy.random.randint(<NUM_LIT:0>, numVectors-<NUM_LIT:1>, numSamples)<EOL><DEDENT>sigmap = <NUM_LIT:0.0><EOL>for i in countOn:<EOL><INDENT>match = checkMatch(vectors[i], vectors[i+<NUM_LIT:1>], sparse=False)<EOL>if match[<NUM_LIT:1>] != <NUM_LIT:0>:<EOL><INDENT>sigmap += float(match[<NUM_LIT:0>])/match[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>return sigmap / numSamples<EOL>", "docstring": "Returns the stability for the population averaged over multiple time steps\n\nParameters:\n-----------------------------------------------\nvectors:          the vectors for which the stability is calculated\nnumSamples        the number of time steps where stability is counted\n\nAt each time step, count the fraction of the active elements which are stable\nfrom the previous step\nAverage all the fraction", "id": "f17557:m25"}
{"signature": "def vectorsFromSeqList(seqList, patternMatrix):", "body": "totalLen = <NUM_LIT:0><EOL>for seq in seqList:<EOL><INDENT>totalLen += len(seq)<EOL><DEDENT>vectors = numpy.zeros((totalLen, patternMatrix.shape[<NUM_LIT:1>]), dtype='<STR_LIT:bool>')<EOL>vecOffset = <NUM_LIT:0><EOL>for seq in seqList:<EOL><INDENT>seq = numpy.array(seq, dtype='<STR_LIT>')<EOL>for idx,coinc in enumerate(seq):<EOL><INDENT>vectors[vecOffset] = patternMatrix.getRow(int(coinc))<EOL>vecOffset += <NUM_LIT:1><EOL><DEDENT><DEDENT>return vectors<EOL>", "docstring": "Convert a list of sequences of pattern indices, and a pattern lookup table\n    into a an array of patterns\n\nParameters:\n-----------------------------------------------\nseq:            the sequence, given as indices into the patternMatrix\npatternMatrix:  a SparseMatrix contaning the possible patterns used in\n                        the sequence.", "id": "f17557:m10"}
{"signature": "def tmDiff2(tm1, tm2, verbosity = <NUM_LIT:0>, relaxSegmentTests =True,<EOL>checkLearn = True, checkStates = True):", "body": "<EOL>if sameTMParams(tm1, tm2) == False:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>tm1Label = \"<STR_LIT>\" % tm1.__class__.__name__<EOL>tm2Label = \"<STR_LIT>\" % tm2.__class__.__name__<EOL>result = True<EOL>if checkStates:<EOL><INDENT>if (tm1.infActiveState['<STR_LIT:t>'] != tm2.infActiveState['<STR_LIT:t>']).any():<EOL><INDENT>print('<STR_LIT>', numpy.where(tm1.infActiveState['<STR_LIT:t>'] != tm2.infActiveState['<STR_LIT:t>']))<EOL>result = False<EOL><DEDENT>if (tm1.infPredictedState['<STR_LIT:t>'] - tm2.infPredictedState['<STR_LIT:t>']).any():<EOL><INDENT>print('<STR_LIT>', numpy.where(tm1.infPredictedState['<STR_LIT:t>'] != tm2.infPredictedState['<STR_LIT:t>']))<EOL>result = False<EOL><DEDENT>if checkLearn and (tm1.lrnActiveState['<STR_LIT:t>'] - tm2.lrnActiveState['<STR_LIT:t>']).any():<EOL><INDENT>print('<STR_LIT>', numpy.where(tm1.lrnActiveState['<STR_LIT:t>'] != tm2.lrnActiveState['<STR_LIT:t>']))<EOL>result = False<EOL><DEDENT>if checkLearn and (tm1.lrnPredictedState['<STR_LIT:t>'] - tm2.lrnPredictedState['<STR_LIT:t>']).any():<EOL><INDENT>print('<STR_LIT>', numpy.where(tm1.lrnPredictedState['<STR_LIT:t>'] != tm2.lrnPredictedState['<STR_LIT:t>']))<EOL>result = False<EOL><DEDENT>if checkLearn and abs(tm1.getAvgLearnedSeqLength() - tm2.getAvgLearnedSeqLength()) > <NUM_LIT>:<EOL><INDENT>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>print(tm1.getAvgLearnedSeqLength(), \"<STR_LIT>\", tm2.getAvgLearnedSeqLength())<EOL>result = False<EOL><DEDENT><DEDENT>if tm1.getNumSegments() != tm2.getNumSegments():<EOL><INDENT>print(\"<STR_LIT>\", tm1.getNumSegments(), tm2.getNumSegments())<EOL>result = False<EOL><DEDENT>if tm1.getNumSynapses() != tm2.getNumSynapses():<EOL><INDENT>print(\"<STR_LIT>\", tm1.getNumSynapses(), tm2.getNumSynapses())<EOL>if verbosity >= <NUM_LIT:3>:<EOL><INDENT>print(\"<STR_LIT>\" % tm1Label, end='<STR_LIT:U+0020>')<EOL>tm1.printCells()<EOL>print(\"<STR_LIT>\" % tm2Label, end='<STR_LIT:U+0020>')<EOL>tm2.printCells()<EOL><DEDENT><DEDENT>for c in range(tm1.numberOfCols):<EOL><INDENT>for i in range(tm2.cellsPerColumn):<EOL><INDENT>if tm1.getNumSegmentsInCell(c, i) != tm2.getNumSegmentsInCell(c, i):<EOL><INDENT>print(\"<STR_LIT>\",c,i, end='<STR_LIT:U+0020>')<EOL>print(tm1.getNumSegmentsInCell(c, i), tm2.getNumSegmentsInCell(c, i))<EOL>result = False<EOL><DEDENT><DEDENT><DEDENT>if result == True and not relaxSegmentTests and checkLearn:<EOL><INDENT>for c in range(tm1.numberOfCols):<EOL><INDENT>for i in range(tm2.cellsPerColumn):<EOL><INDENT>nSegs = tm1.getNumSegmentsInCell(c, i)<EOL>for segIdx in range(nSegs):<EOL><INDENT>tm1seg = tm1.getSegmentOnCell(c, i, segIdx)<EOL>res = False<EOL>for tm2segIdx in range(nSegs):<EOL><INDENT>tm2seg = tm2.getSegmentOnCell(c, i, tm2segIdx)<EOL>if sameSegment(tm1seg, tm2seg) == True:<EOL><INDENT>res = True<EOL>break<EOL><DEDENT><DEDENT>if res == False:<EOL><INDENT>print(\"<STR_LIT>\",c,i)<EOL>result = False<EOL>if verbosity >= <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" % tm1Label, end='<STR_LIT:U+0020>')<EOL>tm1.printCell(c, i)<EOL>print(\"<STR_LIT>\" % tm2Label, end='<STR_LIT:U+0020>')<EOL>tm2.printCell(c, i)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if result == True and (verbosity > <NUM_LIT:1>):<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return result<EOL>", "docstring": "Given two TM instances, list the difference between them and returns False\nif there is a difference. This function checks the major parameters. If this\npasses (and checkLearn is true) it checks the number of segments on each cell.\nIf this passes, checks each synapse on each segment.\nWhen comparing C++ and Py, the segments are usually in different orders in the\ncells. tmDiff ignores segment order when comparing TM's.\n\nIf checkLearn is True, will check learn states as well as all the segments\n\nIf checkStates is True, will check the various state arrays", "id": "f17557:m15"}
{"signature": "def generateSequences(nPatterns=<NUM_LIT:10>, patternLen=<NUM_LIT>, patternActivity=<NUM_LIT:50>,<EOL>hubs=[<NUM_LIT:2>,<NUM_LIT:6>],  seqLength=[<NUM_LIT:5>,<NUM_LIT:6>,<NUM_LIT:7>],<EOL>nSimpleSequences=<NUM_LIT:50>,  nHubSequences=<NUM_LIT:50>):", "body": "<EOL>patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen,<EOL>activity=patternActivity)<EOL>seqList =  generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength,<EOL>nSeq=nSimpleSequences) +generateHubSequences(nCoinc=nPatterns, hubs=hubs, seqLength=seqLength,<EOL>nSeq=nHubSequences)<EOL>return (seqList, patterns)<EOL>", "docstring": "Generate a set of simple and hub sequences. A simple sequence contains\na randomly chosen set of elements from 0 to 'nCoinc-1'. A hub sequence\nalways contains a hub element in the middle of it.\n\nParameters:\n-----------------------------------------------\nnPatterns:        the number of patterns to use in the sequences.\npatternLen:       The number of elements in each pattern\npatternActivity:  The number of elements that should be active in\n                      each pattern\nhubs:             which of the elements will be used as hubs.\nseqLength:        a list of possible sequence lengths. The length of each\n                      sequence will be randomly chosen from here.\nnSimpleSequences: The number of simple sequences to generate\nnHubSequences:    The number of hub sequences to generate\n\nretval:           (seqList, patterns)\n                  seqList: a list of sequences. Each sequence is itself a list\n                                containing the input pattern indices for that sequence.\n                  patterns: the input patterns used in the seqList.", "id": "f17557:m8"}
{"signature": "def _fillInOnTimes(vector, durations):", "body": "<EOL>nonzeros = numpy.array(vector).nonzero()[<NUM_LIT:0>]<EOL>if len(nonzeros) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>if len(nonzeros) == <NUM_LIT:1>:<EOL><INDENT>durations[nonzeros[<NUM_LIT:0>]] = <NUM_LIT:1><EOL>return<EOL><DEDENT>prev = nonzeros[<NUM_LIT:0>]<EOL>onTime = <NUM_LIT:1><EOL>onStartIdx = prev<EOL>endIdx = nonzeros[-<NUM_LIT:1>]<EOL>for idx in nonzeros[<NUM_LIT:1>:]:<EOL><INDENT>if idx != prev+<NUM_LIT:1>:<EOL><INDENT>durations[onStartIdx:onStartIdx+onTime] = list(range(<NUM_LIT:1>,onTime+<NUM_LIT:1>))<EOL>onTime       = <NUM_LIT:1><EOL>onStartIdx = idx<EOL><DEDENT>else:<EOL><INDENT>onTime += <NUM_LIT:1><EOL><DEDENT>prev = idx<EOL><DEDENT>durations[onStartIdx:onStartIdx+onTime] = list(range(<NUM_LIT:1>,onTime+<NUM_LIT:1>))<EOL>", "docstring": "Helper function used by averageOnTimePerTimestep. 'durations' is a vector\nwhich must be the same len as vector. For each \"on\" in vector, it fills in\nthe corresponding element of duration with the duration of that \"on\" signal\nup until that time\n\nParameters:\n-----------------------------------------------\nvector:     vector of output values over time\ndurations:  vector same length as 'vector', initialized to 0's.\n            This is filled in with the durations of each 'on\" signal.\n\nExample:\nvector:     11100000001100000000011111100000\ndurations:  12300000001200000000012345600000", "id": "f17557:m20"}
{"signature": "def generateCoincMatrix(nCoinc=<NUM_LIT:10>, length=<NUM_LIT>, activity=<NUM_LIT:50>):", "body": "coincMatrix0 = SM32(int(nCoinc), int(length))<EOL>theOnes = numpy.array([<NUM_LIT:1.0>] * activity, dtype=numpy.float32)<EOL>for rowIdx in range(nCoinc):<EOL><INDENT>coinc = numpy.array(random.sample(range(length),<EOL>activity), dtype=numpy.uint32)<EOL>coinc.sort()<EOL>coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)<EOL><DEDENT>coincMatrix = SM32(int(nCoinc), int(length))<EOL>coincMatrix.initializeWithFixedNNZR(activity)<EOL>return coincMatrix0<EOL>", "docstring": "Generate a coincidence matrix. This is used to generate random inputs to the\ntemporal learner and to compare the predicted output against.\n\nIt generates a matrix of nCoinc rows, each row has length 'length' and has\na total of 'activity' bits on.\n\nParameters:\n-----------------------------------------------\nnCoinc:        the number of rows to generate\nlength:        the length of each row\nactivity:      the number of ones to put into each row.", "id": "f17557:m2"}
{"signature": "def percentOutputsStableOverNTimeSteps(vectors, numSamples=None):", "body": "<EOL>totalSamples = len(vectors)<EOL>windowSize = numSamples<EOL>numWindows = <NUM_LIT:0><EOL>pctStable = <NUM_LIT:0><EOL>for wStart in range(<NUM_LIT:0>, totalSamples-windowSize+<NUM_LIT:1>):<EOL><INDENT>data = vectors[wStart:wStart+windowSize]<EOL>outputSums = data.sum(axis=<NUM_LIT:0>)<EOL>stableOutputs = (outputSums == windowSize).sum()<EOL>samplePctStable = float(stableOutputs) / data[<NUM_LIT:0>].sum()<EOL>print(samplePctStable)<EOL>pctStable += samplePctStable<EOL>numWindows += <NUM_LIT:1><EOL><DEDENT>return float(pctStable) / numWindows<EOL>", "docstring": "Returns the percent of the outputs that remain completely stable over\nN time steps.\n\nParameters:\n-----------------------------------------------\nvectors:        the vectors for which the stability is calculated\nnumSamples:     the number of time steps where stability is counted\n\nFor each window of numSamples, count how many outputs are active during\nthe entire window.", "id": "f17557:m26"}
{"signature": "def sameTMParams(tp1, tp2):", "body": "result = True<EOL>for param in [\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>if getattr(tp1, param) != getattr(tp2,param):<EOL><INDENT>print(param,\"<STR_LIT>\")<EOL>print(getattr(tp1, param), \"<STR_LIT>\", getattr(tp2,param))<EOL>result = False<EOL><DEDENT><DEDENT>return result<EOL>", "docstring": "Given two TM instances, see if any parameters are different.", "id": "f17557:m11"}
{"signature": "def predictionExtent(inputs, resets, outputs, minOverlapPct=<NUM_LIT>):", "body": "<EOL>predCounts = None<EOL>predTotal = <NUM_LIT:0><EOL>nSamples = len(outputs)<EOL>predTotalNotLimited = <NUM_LIT:0><EOL>nSamplesNotLimited = <NUM_LIT:0><EOL>nCols = len(inputs[<NUM_LIT:0>])<EOL>nCellsPerCol = len(outputs[<NUM_LIT:0>]) // nCols<EOL>for idx in range(nSamples):<EOL><INDENT>activeCols = outputs[idx].reshape(nCols, nCellsPerCol).max(axis=<NUM_LIT:1>)<EOL>steps = <NUM_LIT:0><EOL>while (idx+steps+<NUM_LIT:1> < nSamples) and (resets[idx+steps+<NUM_LIT:1>] == <NUM_LIT:0>):<EOL><INDENT>overlap = numpy.logical_and(inputs[idx+steps+<NUM_LIT:1>], activeCols)<EOL>overlapPct = <NUM_LIT> * float(overlap.sum()) / inputs[idx+steps+<NUM_LIT:1>].sum()<EOL>if overlapPct >= minOverlapPct:<EOL><INDENT>steps += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>predCounts = _accumulateFrequencyCounts([steps], predCounts)<EOL>predTotal += steps<EOL>if resets[idx] or((idx+steps+<NUM_LIT:1> < nSamples) and (not resets[idx+steps+<NUM_LIT:1>])):<EOL><INDENT>predTotalNotLimited += steps<EOL>nSamplesNotLimited += <NUM_LIT:1><EOL><DEDENT><DEDENT>return (float(predTotal) / nSamples,<EOL>float(predTotalNotLimited) / nSamplesNotLimited,<EOL>predCounts)<EOL>", "docstring": "Computes the predictive ability of a temporal memory (TM). This routine returns\na value which is the average number of time steps of prediction provided\nby the TM. It accepts as input the inputs, outputs, and resets provided to\nthe TM as well as a 'minOverlapPct' used to evalulate whether or not a\nprediction is a good enough match to the actual input.\n\nThe 'outputs' are the pooling outputs of the TM. This routine treats each output\nas a \"manifold\" that includes the active columns that should be present in the\nnext N inputs. It then looks at each successive input and sees if it's active\ncolumns are within the manifold. For each output sample, it computes how\nmany time steps it can go forward on the input before the input overlap with\nthe manifold is less then 'minOverlapPct'. It returns the average number of\ntime steps calculated for each output.\n\nParameters:\n-----------------------------------------------\ninputs:          The inputs to the TM. Row 0 contains the inputs from time\n                 step 0, row 1 from time step 1, etc.\nresets:          The reset input to the TM. Element 0 contains the reset from\n                 time step 0, element 1 from time step 1, etc.\noutputs:         The pooling outputs from the TM. Row 0 contains the outputs\n                 from time step 0, row 1 from time step 1, etc.\nminOverlapPct:   How much each input's columns must overlap with the pooling\n                 output's columns to be considered a valid prediction.\n\nretval:          (Average number of time steps of prediction over all output\n                   samples,\n                  Average number of time steps of prediction when we aren't\n                   cut short by the end of the sequence,\n                  List containing frequency counts of each encountered\n                   prediction time)", "id": "f17557:m29"}
{"signature": "def spDiff(SP1,SP2):", "body": "if(len(SP1._masterConnectedM)!=len(SP2._masterConnectedM)):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>if(len(SP1._masterPotentialM)!=len(SP2._masterPotentialM)):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>if(len(SP1._masterPermanenceM)!=len(SP2._masterPermanenceM)):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>for i in range(<NUM_LIT:0>,len(SP1._masterConnectedM)):<EOL><INDENT>connected1 = SP1._masterConnectedM[i]<EOL>connected2 = SP2._masterConnectedM[i]<EOL>if(connected1!=connected2):<EOL><INDENT>print(\"<STR_LIT>\"  % (i))<EOL>return False<EOL><DEDENT>permanences1 = SP1._masterPermanenceM[i];<EOL>permanences2 = SP2._masterPermanenceM[i];<EOL>if(permanences1!=permanences2):<EOL><INDENT>print(\"<STR_LIT>\" % (i))<EOL>return False<EOL><DEDENT>potential1 = SP1._masterPotentialM[i];<EOL>potential2 = SP2._masterPotentialM[i];<EOL>if(potential1!=potential2):<EOL><INDENT>print(\"<STR_LIT>\" % (i))<EOL>return False<EOL><DEDENT><DEDENT>if(not numpy.array_equal(SP1._firingBoostFactors,SP2._firingBoostFactors)):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>if(not numpy.array_equal(SP1._dutyCycleAfterInh,SP2._dutyCycleAfterInh)):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>if(not numpy.array_equal(SP1._dutyCycleBeforeInh,SP2._dutyCycleBeforeInh)):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>return False<EOL><DEDENT>print(\"<STR_LIT>\")<EOL>return True<EOL>", "docstring": "Function that compares two spatial pooler instances. Compares the\nstatic variables between the two poolers to make sure that they are equivalent.\n\nParameters\n-----------------------------------------\nSP1 first spatial pooler to be compared\n\nSP2 second spatial pooler to be compared\n\nTo establish equality, this function does the following:\n\n1.Compares the connected synapse matrices for each coincidence\n\n2.Compare the potential synapse matrices for each coincidence\n\n3.Compare the permanence matrices for each coincidence\n\n4.Compare the firing boosts between the two poolers.\n\n5.Compare the duty cycles before and after inhibition for both poolers", "id": "f17557:m16"}
{"signature": "def generateHubSequences(nCoinc=<NUM_LIT:10>, hubs = [<NUM_LIT:2>,<NUM_LIT:6>], seqLength=[<NUM_LIT:5>,<NUM_LIT:6>,<NUM_LIT:7>], nSeq=<NUM_LIT:100>):", "body": "coincList = list(range(nCoinc))<EOL>for hub in hubs:<EOL><INDENT>coincList.remove(hub)<EOL><DEDENT>seqList = []<EOL>for i in range(nSeq):<EOL><INDENT>length = random.choice(seqLength)-<NUM_LIT:1><EOL>seq = random.sample(coincList,length)<EOL>seq.insert(length//<NUM_LIT:2>, random.choice(hubs))<EOL>seqList.append(seq)<EOL><DEDENT>return seqList<EOL>", "docstring": "Generate a set of hub sequences. These are sequences which contain a hub\nelement in the middle. The elements of the sequences will be integers\nfrom 0 to 'nCoinc'-1. The hub elements will only appear in the middle of\neach sequence. The length of each sequence will be randomly chosen from the\n'seqLength' list.\n\nParameters:\n-----------------------------------------------\nnCoinc:        the number of elements available to use in the sequences\nhubs:          which of the elements will be used as hubs.\nseqLength:     a list of possible sequence lengths. The length of each\n                      sequence will be randomly chosen from here.\nnSeq:          The number of sequences to generate\n\nretval:        a list of sequences. Each sequence is itself a list\n              containing the coincidence indices for that sequence.", "id": "f17557:m5"}
{"signature": "def _listOfOnTimesInVec(vector):", "body": "<EOL>durations = []<EOL>numOnTimes   = <NUM_LIT:0><EOL>totalOnTime = <NUM_LIT:0><EOL>nonzeros = numpy.array(vector).nonzero()[<NUM_LIT:0>]<EOL>if len(nonzeros) == <NUM_LIT:0>:<EOL><INDENT>return (<NUM_LIT:0>, <NUM_LIT:0>, [])<EOL><DEDENT>if len(nonzeros) == <NUM_LIT:1>:<EOL><INDENT>return (<NUM_LIT:1>, <NUM_LIT:1>, [<NUM_LIT:1>])<EOL><DEDENT>prev = nonzeros[<NUM_LIT:0>]<EOL>onTime = <NUM_LIT:1><EOL>endIdx = nonzeros[-<NUM_LIT:1>]<EOL>for idx in nonzeros[<NUM_LIT:1>:]:<EOL><INDENT>if idx != prev+<NUM_LIT:1>:<EOL><INDENT>totalOnTime += onTime<EOL>numOnTimes  += <NUM_LIT:1><EOL>durations.append(onTime)<EOL>onTime       = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>onTime += <NUM_LIT:1><EOL><DEDENT>prev = idx<EOL><DEDENT>totalOnTime += onTime<EOL>numOnTimes  += <NUM_LIT:1><EOL>durations.append(onTime)<EOL>return (totalOnTime, numOnTimes, durations)<EOL>", "docstring": "Returns 3 things for a vector:\n  * the total on time\n  * the number of runs\n  * a list of the durations of each run.\n\nParameters:\n-----------------------------------------------\ninput stream: 11100000001100000000011111100000\nreturn value: (11, 3, [3, 2, 6])", "id": "f17557:m19"}
{"signature": "def getCentreAndSpreadOffsets(spaceShape,<EOL>spreadShape,<EOL>stepSize=<NUM_LIT:1>):", "body": "from nupic.math.cross import cross<EOL>shape = spaceShape<EOL>if shape[<NUM_LIT:0>] == <NUM_LIT:1> and shape[<NUM_LIT:1>] == <NUM_LIT:1>:<EOL><INDENT>centerOffsets = [(<NUM_LIT:0>,<NUM_LIT:0>)]<EOL><DEDENT>else:<EOL><INDENT>xMin = -<NUM_LIT:1> * (shape[<NUM_LIT:1>] // <NUM_LIT:2>)<EOL>xMax = xMin + shape[<NUM_LIT:1>] - <NUM_LIT:1><EOL>xPositions = list(range(stepSize * xMin, stepSize * xMax + <NUM_LIT:1>, stepSize))<EOL>yMin = -<NUM_LIT:1> * (shape[<NUM_LIT:0>] // <NUM_LIT:2>)<EOL>yMax = yMin + shape[<NUM_LIT:0>] - <NUM_LIT:1><EOL>yPositions = list(range(stepSize * yMin, stepSize * yMax + <NUM_LIT:1>, stepSize))<EOL>centerOffsets = list(cross(yPositions, xPositions))<EOL><DEDENT>numCenterOffsets = len(centerOffsets)<EOL>print(\"<STR_LIT>\", centerOffsets)<EOL>shape = spreadShape<EOL>if shape[<NUM_LIT:0>] == <NUM_LIT:1> and shape[<NUM_LIT:1>] == <NUM_LIT:1>:<EOL><INDENT>spreadOffsets = [(<NUM_LIT:0>,<NUM_LIT:0>)]<EOL><DEDENT>else:<EOL><INDENT>xMin = -<NUM_LIT:1> * (shape[<NUM_LIT:1>] // <NUM_LIT:2>)<EOL>xMax = xMin + shape[<NUM_LIT:1>] - <NUM_LIT:1><EOL>xPositions = list(range(stepSize * xMin, stepSize * xMax + <NUM_LIT:1>, stepSize))<EOL>yMin = -<NUM_LIT:1> * (shape[<NUM_LIT:0>] // <NUM_LIT:2>)<EOL>yMax = yMin + shape[<NUM_LIT:0>] - <NUM_LIT:1><EOL>yPositions = list(range(stepSize * yMin, stepSize * yMax + <NUM_LIT:1>, stepSize))<EOL>spreadOffsets = list(cross(yPositions, xPositions))<EOL>spreadOffsets.remove((<NUM_LIT:0>,<NUM_LIT:0>))<EOL>spreadOffsets.insert(<NUM_LIT:0>, (<NUM_LIT:0>,<NUM_LIT:0>))<EOL><DEDENT>numSpreadOffsets = len(spreadOffsets)<EOL>print(\"<STR_LIT>\", spreadOffsets)<EOL>return centerOffsets, spreadOffsets<EOL>", "docstring": "Generates centre offsets and spread offsets for block-mode based training\nregimes - star, cross, block.\n\n  Parameters:\n  -----------------------------------------------\n  spaceShape:   The (height, width) of the 2-D space to explore. This\n                sets the number of center-points.\n  spreadShape:  The shape (height, width) of the area around each center-point\n                to explore.\n  stepSize:     The step size. How big each step is, in pixels. This controls\n                *both* the spacing of the center-points within the block and the\n                points we explore around each center-point\n  retval:       (centreOffsets, spreadOffsets)", "id": "f17557:m30"}
{"signature": "def topDownCompute(self, topDownIn=None):", "body": "output = numpy.zeros(self.numberOfColumns())<EOL>columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()]<EOL>output[columns] = <NUM_LIT:1><EOL>return output<EOL>", "docstring": "(From `backtracking_tm.py`)\nTop-down compute - generate expected input given output of the TM\n\n@param topDownIn top down input from the level above us\n\n@returns best estimate of the TM input that would have generated bottomUpOut.", "id": "f17559:c4:m3"}
{"signature": "@classmethod<EOL><INDENT>def read(cls, proto):<DEDENT>", "body": "tm = super(TMShimMixin, cls).read(proto)<EOL>tm.infActiveState = {\"<STR_LIT:t>\": None}<EOL>return tm<EOL>", "docstring": "Intercepts TemporalMemory deserialization request in order to initialize\n`self.infActiveState`\n\n@param proto (DynamicStructBuilder) Proto object\n\n@return (TemporalMemory) TemporalMemory shim instance", "id": "f17559:c1:m1"}
{"signature": "def topDownCompute(self, topDownIn=None):", "body": "output = numpy.zeros(self.numberOfColumns())<EOL>columns = [self.columnForCell(idx) for idx in self.getPredictiveCells()]<EOL>output[columns] = <NUM_LIT:1><EOL>return output<EOL>", "docstring": "(From `backtracking_tm.py`)\nTop-down compute - generate expected input given output of the TM\n\n@param topDownIn top down input from the level above us\n\n@returns best estimate of the TM input that would have generated bottomUpOut.", "id": "f17559:c1:m3"}
{"signature": "def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):", "body": "super(TMShimMixin, self).compute(set(bottomUpInput.nonzero()[<NUM_LIT:0>]),<EOL>learn=enableLearn)<EOL>numberOfCells = self.numberOfCells()<EOL>activeState = numpy.zeros(numberOfCells)<EOL>activeState[self.getActiveCells()] = <NUM_LIT:1><EOL>self.infActiveState[\"<STR_LIT:t>\"] = activeState<EOL>output = numpy.zeros(numberOfCells)<EOL>output[self.getPredictiveCells()] = <NUM_LIT:1><EOL>output[self.getActiveCells()] = <NUM_LIT:1><EOL>return output<EOL>", "docstring": "(From `backtracking_tm.py`)\nHandle one compute, possibly learning.\n\n@param bottomUpInput     The bottom-up input, typically from a spatial pooler\n@param enableLearn       If true, perform learning\n@param computeInfOutput  If None, default behavior is to disable the inference\n                         output when enableLearn is on.\n                         If true, compute the inference output\n                         If false, do not compute the inference output", "id": "f17559:c1:m2"}
{"signature": "def compute(self, recordNum, patternNZ, classification, learn, infer):", "body": "if self.verbosity >= <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\", learn)<EOL>print(\"<STR_LIT>\", recordNum)<EOL>print(\"<STR_LIT>\" % len(patternNZ), patternNZ)<EOL>print(\"<STR_LIT>\", classification)<EOL><DEDENT>if len(self._patternNZHistory) > <NUM_LIT:0>:<EOL><INDENT>if recordNum < self._patternNZHistory[-<NUM_LIT:1>][<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if len(self._patternNZHistory) == <NUM_LIT:0> orrecordNum > self._patternNZHistory[-<NUM_LIT:1>][<NUM_LIT:0>]:<EOL><INDENT>self._patternNZHistory.append((recordNum, patternNZ))<EOL><DEDENT>retval = {}<EOL>if max(patternNZ) > self._maxInputIdx:<EOL><INDENT>newMaxInputIdx = max(patternNZ)<EOL>for nSteps in self.steps:<EOL><INDENT>self._weightMatrix[nSteps] = numpy.concatenate((<EOL>self._weightMatrix[nSteps],<EOL>numpy.zeros(shape=(newMaxInputIdx-self._maxInputIdx,<EOL>self._maxBucketIdx+<NUM_LIT:1>))), axis=<NUM_LIT:0>)<EOL><DEDENT>self._maxInputIdx = int(newMaxInputIdx)<EOL><DEDENT>if classification is not None:<EOL><INDENT>if type(classification[\"<STR_LIT>\"]) is not list:<EOL><INDENT>bucketIdxList = [classification[\"<STR_LIT>\"]]<EOL>actValueList = [classification[\"<STR_LIT>\"]]<EOL>numCategory = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>bucketIdxList = classification[\"<STR_LIT>\"]<EOL>actValueList = classification[\"<STR_LIT>\"]<EOL>numCategory = len(classification[\"<STR_LIT>\"])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if learn:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>actValueList = None<EOL>bucketIdxList = None<EOL><DEDENT>if infer:<EOL><INDENT>retval = self.infer(patternNZ, actValueList)<EOL><DEDENT>if learn and classification[\"<STR_LIT>\"] is not None:<EOL><INDENT>for categoryI in range(numCategory):<EOL><INDENT>bucketIdx = bucketIdxList[categoryI]<EOL>actValue = actValueList[categoryI]<EOL>if bucketIdx > self._maxBucketIdx:<EOL><INDENT>for nSteps in self.steps:<EOL><INDENT>self._weightMatrix[nSteps] = numpy.concatenate((<EOL>self._weightMatrix[nSteps],<EOL>numpy.zeros(shape=(self._maxInputIdx+<NUM_LIT:1>,<EOL>bucketIdx-self._maxBucketIdx))), axis=<NUM_LIT:1>)<EOL><DEDENT>self._maxBucketIdx = int(bucketIdx)<EOL><DEDENT>while self._maxBucketIdx > len(self._actualValues) - <NUM_LIT:1>:<EOL><INDENT>self._actualValues.append(None)<EOL><DEDENT>if self._actualValues[bucketIdx] is None:<EOL><INDENT>self._actualValues[bucketIdx] = actValue<EOL><DEDENT>else:<EOL><INDENT>if (isinstance(actValue, int) or<EOL>isinstance(actValue, float) or<EOL>isinstance(actValue, int)):<EOL><INDENT>self._actualValues[bucketIdx] = ((<NUM_LIT:1.0> - self.actValueAlpha)<EOL>* self._actualValues[bucketIdx]<EOL>+ self.actValueAlpha * actValue)<EOL><DEDENT>else:<EOL><INDENT>self._actualValues[bucketIdx] = actValue<EOL><DEDENT><DEDENT><DEDENT>for (learnRecordNum, learnPatternNZ) in self._patternNZHistory:<EOL><INDENT>error = self._calculateError(recordNum, bucketIdxList)<EOL>nSteps = recordNum - learnRecordNum<EOL>if nSteps in self.steps:<EOL><INDENT>for bit in learnPatternNZ:<EOL><INDENT>self._weightMatrix[nSteps][bit, :] += self.alpha * error[nSteps]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if infer and self.verbosity >= <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\", retval[\"<STR_LIT>\"])<EOL>for (nSteps, votes) in list(retval.items()):<EOL><INDENT>if nSteps == \"<STR_LIT>\":<EOL><INDENT>continue<EOL><DEDENT>print(\"<STR_LIT>\" % (nSteps), _pFormatArray(votes))<EOL>bestBucketIdx = votes.argmax()<EOL>print((\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (bestBucketIdx,<EOL>retval[\"<STR_LIT>\"][bestBucketIdx])))<EOL><DEDENT>print()<EOL><DEDENT>return retval<EOL>", "docstring": "Process one input sample.\n\nThis method is called by outer loop code outside the nupic-engine. We\nuse this instead of the nupic engine compute() because our inputs and\noutputs aren't fixed size vectors of reals.\n\n\n:param recordNum: Record number of this input pattern. Record numbers\n  normally increase sequentially by 1 each time unless there are missing\n  records in the dataset. Knowing this information insures that we don't get\n  confused by missing records.\n\n:param patternNZ: List of the active indices from the output below. When the\n  input is from TemporalMemory, this list should be the indices of the\n  active cells.\n\n:param classification: Dict of the classification information where:\n\n  - bucketIdx: list of indices of the encoder bucket\n  - actValue: list of actual values going into the encoder\n\n  Classification could be None for inference mode.\n:param learn: (bool) if true, learn this sample\n:param infer: (bool) if true, perform inference\n\n:return:    Dict containing inference results, there is one entry for each\n            step in self.steps, where the key is the number of steps, and\n            the value is an array containing the relative likelihood for\n            each bucketIdx starting from bucketIdx 0.\n\n            There is also an entry containing the average actual value to\n            use for each bucket. The key is 'actualValues'.\n\n            for example:\n\n            .. code-block:: python\n\n               {1 :             [0.1, 0.3, 0.2, 0.7],\n                 4 :             [0.2, 0.4, 0.3, 0.5],\n                 'actualValues': [1.5, 3,5, 5,5, 7.6],\n               }", "id": "f17560:c0:m1"}
{"signature": "def _pFormatArray(array_, fmt=\"<STR_LIT>\"):", "body": "return \"<STR_LIT>\" + \"<STR_LIT:U+0020>\".join(fmt % x for x in array_) + \"<STR_LIT>\"<EOL>", "docstring": "Return a string with pretty-print of a numpy array using the given format\n    for each element", "id": "f17560:m0"}
{"signature": "def _calculateError(self, recordNum, bucketIdxList):", "body": "error = dict()<EOL>targetDist = numpy.zeros(self._maxBucketIdx + <NUM_LIT:1>)<EOL>numCategories = len(bucketIdxList)<EOL>for bucketIdx in bucketIdxList:<EOL><INDENT>targetDist[bucketIdx] = <NUM_LIT:1.0>/numCategories<EOL><DEDENT>for (learnRecordNum, learnPatternNZ) in self._patternNZHistory:<EOL><INDENT>nSteps = recordNum - learnRecordNum<EOL>if nSteps in self.steps:<EOL><INDENT>predictDist = self.inferSingleStep(learnPatternNZ,<EOL>self._weightMatrix[nSteps])<EOL>error[nSteps] = targetDist - predictDist<EOL><DEDENT><DEDENT>return error<EOL>", "docstring": "Calculate error signal\n\n:param bucketIdxList: list of encoder buckets\n\n:return: dict containing error. The key is the number of steps\n         The value is a numpy array of error at the output layer", "id": "f17560:c0:m7"}
{"signature": "def setSynPermBelowStimulusInc(self, synPermBelowStimulusInc):", "body": "self._synPermBelowStimulusInc = synPermBelowStimulusInc<EOL>", "docstring": "Sets the permanence increment amount for columns that have not been\nrecently active.\n\n:param synPermBelowStimulusInc: (float) value to set.", "id": "f17561:c4:m38"}
{"signature": "def getMinPctOverlapDutyCycles(self):", "body": "return self._minPctOverlapDutyCycles<EOL>", "docstring": ":returns: (float) the minimum tolerated overlaps, given as percent of\n          neighbors overlap score", "id": "f17561:c4:m41"}
{"signature": "def setMinOverlapDutyCycles(self, minOverlapDutyCycles):", "body": "self._minOverlapDutyCycles[:] = minOverlapDutyCycles[:]<EOL>", "docstring": "Sets the minimum overlap duty cycles for all columns. \n``minOverlapDutyCycles`` size must match the number of columns.\n\n:param minOverlapDutyCycles: (iter) value to set.", "id": "f17561:c4:m50"}
{"signature": "def setLocalAreaDensity(self, localAreaDensity):", "body": "assert(localAreaDensity > <NUM_LIT:0> and localAreaDensity <= <NUM_LIT:1>)<EOL>self._localAreaDensity = localAreaDensity<EOL>self._numActiveColumnsPerInhArea = <NUM_LIT:0><EOL>", "docstring": "Sets the local area density. Invalidates the 'numActiveColumnsPerInhArea'\nparameter\n\n:param localAreaDensity: (float) value to set", "id": "f17561:c4:m14"}
{"signature": "def setUpdatePeriod(self, updatePeriod):", "body": "self._updatePeriod = updatePeriod<EOL>", "docstring": ":param updatePeriod: (int) The period at which active duty cycles are \n       updated.", "id": "f17561:c4:m30"}
{"signature": "def _mapPotential(self, index):", "body": "centerInput = self._mapColumn(index)<EOL>columnInputs = self._getInputNeighborhood(centerInput).astype(uintType)<EOL>numPotential = int(columnInputs.size * self._potentialPct + <NUM_LIT:0.5>)<EOL>selectedInputs = numpy.empty(numPotential, dtype=uintType)<EOL>self._random.sample(columnInputs, selectedInputs)<EOL>potential = numpy.zeros(self._numInputs, dtype=uintType)<EOL>potential[selectedInputs] = <NUM_LIT:1><EOL>return potential<EOL>", "docstring": "Maps a column to its input bits. This method encapsulates the topology of\nthe region. It takes the index of the column as an argument and determines\nwhat are the indices of the input vector that are located within the\ncolumn's potential pool. The return value is a list containing the indices\nof the input bits. The current implementation of the base class only\nsupports a 1 dimensional topology of columns with a 1 dimensional topology\nof inputs. To extend this class to support 2-D topology you will need to\noverride this method. Examples of the expected output of this method:\n* If the potentialRadius is greater than or equal to the largest input\n  dimension then each column connects to all of the inputs.\n* If the topology is one dimensional, the input space is divided up evenly\n  among the columns and each column is centered over its share of the\n  inputs.  If the potentialRadius is 5, then each column connects to the\n  input it is centered above as well as the 5 inputs to the left of that\n  input and the five inputs to the right of that input, wrapping around if\n  wrapAround=True.\n* If the topology is two dimensional, the input space is again divided up\n  evenly among the columns and each column is centered above its share of\n  the inputs.  If the potentialRadius is 5, the column connects to a square\n  that has 11 inputs on a side and is centered on the input that the column\n  is centered above.\n\nParameters:\n----------------------------\n:param index:   The index identifying a column in the permanence, potential\n                and connectivity matrices.", "id": "f17561:c4:m78"}
{"signature": "def _initPermNonConnected(self):", "body": "p = self._synPermConnected * self._random.getReal64()<EOL>p = int(p*<NUM_LIT>) / <NUM_LIT><EOL>return p<EOL>", "docstring": "Returns a randomly generated permanence value for a synapses that is to be\ninitialized in a non-connected state.", "id": "f17561:c4:m75"}
{"signature": "def setStimulusThreshold(self, stimulusThreshold):", "body": "self._stimulusThreshold = stimulusThreshold<EOL>", "docstring": ":param stimulusThreshold: (float) value to set.", "id": "f17561:c4:m16"}
{"signature": "def _isUpdateRound(self):", "body": "return (self._iterationNum % self._updatePeriod) == <NUM_LIT:0><EOL>", "docstring": "returns true if enough rounds have passed to warrant updates of\nduty cycles", "id": "f17561:c4:m89"}
{"signature": "def getSynPermBelowStimulusInc(self):", "body": "return self._synPermBelowStimulusInc<EOL>", "docstring": ":returns: (float) the permanence increment amount for columns that have not \n          been recently active.", "id": "f17561:c4:m37"}
{"signature": "def _inhibitColumnsLocal(self, overlaps, density):", "body": "activeArray = numpy.zeros(self._numColumns, dtype=\"<STR_LIT:bool>\")<EOL>for column, overlap in enumerate(overlaps):<EOL><INDENT>if overlap >= self._stimulusThreshold:<EOL><INDENT>neighborhood = self._getColumnNeighborhood(column)<EOL>neighborhoodOverlaps = overlaps[neighborhood]<EOL>numBigger = numpy.count_nonzero(neighborhoodOverlaps > overlap)<EOL>ties = numpy.where(neighborhoodOverlaps == overlap)<EOL>tiedNeighbors = neighborhood[ties]<EOL>numTiesLost = numpy.count_nonzero(activeArray[tiedNeighbors])<EOL>numActive = int(<NUM_LIT:0.5> + density * len(neighborhood))<EOL>if numBigger + numTiesLost < numActive:<EOL><INDENT>activeArray[column] = True<EOL><DEDENT><DEDENT><DEDENT>return activeArray.nonzero()[<NUM_LIT:0>]<EOL>", "docstring": "Performs local inhibition. Local inhibition is performed on a column by\ncolumn basis. Each column observes the overlaps of its neighbors and is\nselected if its overlap score is within the top 'numActive' in its local\nneighborhood. At most half of the columns in a local neighborhood are\nallowed to be active. Columns with an overlap score below the\n'stimulusThreshold' are always inhibited.\n\n:param overlaps: an array containing the overlap score for each  column.\n                The overlap score for a column is defined as the number\n                of synapses in a \"connected state\" (connected synapses)\n                that are connected to input bits which are turned on.\n:param density: The fraction of columns to survive inhibition. This\n                value is only an intended target. Since the surviving\n                columns are picked in a local fashion, the exact fraction\n                of surviving columns is likely to vary.\n@return list with indices of the winning columns", "id": "f17561:c4:m88"}
{"signature": "def __getitem__(self, columnIndex):", "body": "return super(_SparseMatrixCorticalColumnAdapter, self).getRow(columnIndex)<EOL>", "docstring": "Wraps getRow() such that instances may be indexed by columnIndex.", "id": "f17561:c1:m0"}
{"signature": "def _getColumnNeighborhood(self, centerColumn):", "body": "if self._wrapAround:<EOL><INDENT>return topology.wrappingNeighborhood(centerColumn,<EOL>self._inhibitionRadius,<EOL>self._columnDimensions)<EOL><DEDENT>else:<EOL><INDENT>return topology.neighborhood(centerColumn,<EOL>self._inhibitionRadius,<EOL>self._columnDimensions)<EOL><DEDENT>", "docstring": "Gets a neighborhood of columns.\n\nSimply calls topology.neighborhood or topology.wrappingNeighborhood\n\nA subclass can insert different topology behavior by overriding this method.\n\n:param centerColumn (int)\nThe center of the neighborhood.\n\n@returns (1D numpy array of integers)\nThe columns in the neighborhood.", "id": "f17561:c4:m90"}
{"signature": "def getPotentialRadius(self):", "body": "return self._potentialRadius<EOL>", "docstring": ":returns: (float) the potential radius", "id": "f17561:c4:m5"}
{"signature": "def setGlobalInhibition(self, globalInhibition):", "body": "self._globalInhibition = globalInhibition<EOL>", "docstring": ":param globalInhibition: (bool) value to set.", "id": "f17561:c4:m10"}
{"signature": "def compute(self, inputVector, learn, activeArray):", "body": "if not isinstance(inputVector, numpy.ndarray):<EOL><INDENT>raise TypeError(\"<STR_LIT>\" %<EOL>str(type(inputVector)))<EOL><DEDENT>if inputVector.size != self._numInputs:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" % (<EOL>inputVector.size, self._numInputs))<EOL><DEDENT>self._updateBookeepingVars(learn)<EOL>inputVector = numpy.array(inputVector, dtype=realDType)<EOL>inputVector.reshape(-<NUM_LIT:1>)<EOL>self._overlaps = self._calculateOverlap(inputVector)<EOL>if learn:<EOL><INDENT>self._boostedOverlaps = self._boostFactors * self._overlaps<EOL><DEDENT>else:<EOL><INDENT>self._boostedOverlaps = self._overlaps<EOL><DEDENT>activeColumns = self._inhibitColumns(self._boostedOverlaps)<EOL>if learn:<EOL><INDENT>self._adaptSynapses(inputVector, activeColumns)<EOL>self._updateDutyCycles(self._overlaps, activeColumns)<EOL>self._bumpUpWeakColumns()<EOL>self._updateBoostFactors()<EOL>if self._isUpdateRound():<EOL><INDENT>self._updateInhibitionRadius()<EOL>self._updateMinDutyCycles()<EOL><DEDENT><DEDENT>activeArray.fill(<NUM_LIT:0>)<EOL>activeArray[activeColumns] = <NUM_LIT:1><EOL>", "docstring": "This is the primary public method of the SpatialPooler class. This\nfunction takes a input vector and outputs the indices of the active columns.\nIf 'learn' is set to True, this method also updates the permanences of the\ncolumns.\n\n:param inputVector: A numpy array of 0's and 1's that comprises the input\n    to the spatial pooler. The array will be treated as a one dimensional\n    array, therefore the dimensions of the array do not have to match the\n    exact dimensions specified in the class constructor. In fact, even a\n    list would suffice. The number of input bits in the vector must,\n    however, match the number of bits specified by the call to the\n    constructor. Therefore there must be a '0' or '1' in the array for\n    every input bit.\n:param learn: A boolean value indicating whether learning should be\n    performed. Learning entails updating the  permanence values of the\n    synapses, and hence modifying the 'state' of the model. Setting\n    learning to 'off' freezes the SP and has many uses. For example, you\n    might want to feed in various inputs and examine the resulting SDR's.\n:param activeArray: An array whose size is equal to the number of columns.\n    Before the function returns this array will be populated with 1's at\n    the indices of the active columns, and 0's everywhere else.", "id": "f17561:c4:m59"}
{"signature": "def getStimulusThreshold(self):", "body": "return self._stimulusThreshold<EOL>", "docstring": ":returns: (int) the stimulus threshold", "id": "f17561:c4:m15"}
{"signature": "def setPotentialPct(self, potentialPct):", "body": "self._potentialPct = potentialPct<EOL>", "docstring": ":param potentialPct: (float) value to set", "id": "f17561:c4:m8"}
{"signature": "def _updateDutyCycles(self, overlaps, activeColumns):", "body": "overlapArray = numpy.zeros(self._numColumns, dtype=realDType)<EOL>activeArray = numpy.zeros(self._numColumns, dtype=realDType)<EOL>overlapArray[overlaps > <NUM_LIT:0>] = <NUM_LIT:1><EOL>activeArray[activeColumns] = <NUM_LIT:1><EOL>period = self._dutyCyclePeriod<EOL>if (period > self._iterationNum):<EOL><INDENT>period = self._iterationNum<EOL><DEDENT>self._overlapDutyCycles = self._updateDutyCyclesHelper(<EOL>self._overlapDutyCycles,<EOL>overlapArray,<EOL>period<EOL>)<EOL>self._activeDutyCycles = self._updateDutyCyclesHelper(<EOL>self._activeDutyCycles,<EOL>activeArray,<EOL>period<EOL>)<EOL>", "docstring": "Updates the duty cycles for each column. The OVERLAP duty cycle is a moving\naverage of the number of inputs which overlapped with the each column. The\nACTIVITY duty cycles is a moving average of the frequency of activation for\neach column.\n\nParameters:\n----------------------------\n:param overlaps:\n                An array containing the overlap score for each column.\n                The overlap score for a column is defined as the number\n                of synapses in a \"connected state\" (connected synapses)\n                that are connected to input bits which are turned on.\n:param activeColumns:\n                An array containing the indices of the active columns,\n                the sparse set of columns which survived inhibition", "id": "f17561:c4:m64"}
{"signature": "def _updateBookeepingVars(self, learn):", "body": "self._iterationNum += <NUM_LIT:1><EOL>if learn:<EOL><INDENT>self._iterationLearnNum += <NUM_LIT:1><EOL><DEDENT>", "docstring": "Updates counter instance variables each round.\n\nParameters:\n----------------------------\n:param learn:   a boolean value indicating whether learning should be\n                performed. Learning entails updating the  permanence\n                values of the synapses, and hence modifying the 'state'\n                of the model. setting learning to 'off' might be useful\n                for indicating separate training vs. testing sets.", "id": "f17561:c4:m83"}
{"signature": "def _updateBoostFactorsLocal(self):", "body": "<EOL>targetDensity = numpy.zeros(self._numColumns, dtype=realDType)<EOL>for i in range(self._numColumns):<EOL><INDENT>maskNeighbors = self._getColumnNeighborhood(i)<EOL>targetDensity[i] = numpy.mean(self._activeDutyCycles[maskNeighbors])<EOL><DEDENT>self._boostFactors = numpy.exp(<EOL>(targetDensity - self._activeDutyCycles) * self._boostStrength)<EOL>", "docstring": "Update boost factors when local inhibition is used", "id": "f17561:c4:m82"}
{"signature": "def _adaptSynapses(self, inputVector, activeColumns):", "body": "inputIndices = numpy.where(inputVector > <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>permChanges = numpy.zeros(self._numInputs, dtype=realDType)<EOL>permChanges.fill(-<NUM_LIT:1> * self._synPermInactiveDec)<EOL>permChanges[inputIndices] = self._synPermActiveInc<EOL>for columnIndex in activeColumns:<EOL><INDENT>perm = self._permanences[columnIndex]<EOL>maskPotential = numpy.where(self._potentialPools[columnIndex] > <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>perm[maskPotential] += permChanges[maskPotential]<EOL>self._updatePermanencesForColumn(perm, columnIndex, raisePerm=True)<EOL><DEDENT>", "docstring": "The primary method in charge of learning. Adapts the permanence values of\nthe synapses based on the input vector, and the chosen columns after\ninhibition round. Permanence values are increased for synapses connected to\ninput bits that are turned on, and decreased for synapses connected to\ninputs bits that are turned off.\n\nParameters:\n----------------------------\n:param inputVector:\n                A numpy array of 0's and 1's that comprises the input to\n                the spatial pooler. There exists an entry in the array\n                for every input bit.\n:param activeColumns:\n                An array containing the indices of the columns that\n                survived inhibition.", "id": "f17561:c4:m70"}
{"signature": "def getPermanence(self, columnIndex, permanence):", "body": "assert(columnIndex < self._numColumns)<EOL>permanence[:] = self._permanences[columnIndex]<EOL>", "docstring": "Returns the permanence values for a given column. ``permanence`` size\nmust match the number of inputs.\n\n:param columnIndex: (int) column index to get permanence for.\n:param permanence: (list) will be overwritten with permanences.", "id": "f17561:c4:m53"}
{"signature": "def getIterationNum(self):", "body": "return self._iterationNum<EOL>", "docstring": ":returns: the iteration number", "id": "f17561:c4:m23"}
{"signature": "def getNumActiveColumnsPerInhArea(self):", "body": "return self._numActiveColumnsPerInhArea<EOL>", "docstring": ":returns: (float) the number of active columns per inhibition area. Returns \n          a value less than 0 if parameter is unused.", "id": "f17561:c4:m11"}
{"signature": "def _updateInhibitionRadius(self):", "body": "if self._globalInhibition:<EOL><INDENT>self._inhibitionRadius = int(self._columnDimensions.max())<EOL>return<EOL><DEDENT>avgConnectedSpan = numpy.average(<EOL>[self._avgConnectedSpanForColumnND(i)<EOL>for i in range(self._numColumns)]<EOL>)<EOL>columnsPerInput = self._avgColumnsPerInput()<EOL>diameter = avgConnectedSpan * columnsPerInput<EOL>radius = (diameter - <NUM_LIT:1>) / <NUM_LIT><EOL>radius = max(<NUM_LIT:1.0>, radius)<EOL>self._inhibitionRadius = int(radius + <NUM_LIT:0.5>)<EOL>", "docstring": "Update the inhibition radius. The inhibition radius is a measure of the\nsquare (or hypersquare) of columns that each a column is \"connected to\"\non average. Since columns are are not connected to each other directly, we\ndetermine this quantity by first figuring out how many *inputs* a column is\nconnected to, and then multiplying it by the total number of columns that\nexist for each input. For multiple dimension the aforementioned\ncalculations are averaged over all dimensions of inputs and columns. This\nvalue is meaningless if global inhibition is enabled.", "id": "f17561:c4:m65"}
{"signature": "def getInhibitionRadius(self):", "body": "return self._inhibitionRadius<EOL>", "docstring": ":returns: (int) the inhibition radius", "id": "f17561:c4:m17"}
{"signature": "def _seed(self, seed=-<NUM_LIT:1>):", "body": "if seed != -<NUM_LIT:1>:<EOL><INDENT>self._random = NupicRandom(seed)<EOL><DEDENT>else:<EOL><INDENT>self._random = NupicRandom()<EOL><DEDENT>", "docstring": "Initialize the random seed", "id": "f17561:c4:m92"}
{"signature": "def setActiveDutyCycles(self, activeDutyCycles):", "body": "self._activeDutyCycles[:] = activeDutyCycles<EOL>", "docstring": "Sets the activity duty cycles for all columns. ``activeDutyCycles`` size \nmust match the number of columns.\n\n:param activeDutyCycles: (list) value to set.", "id": "f17561:c4:m48"}
{"signature": "def setSynPermActiveInc(self, synPermActiveInc):", "body": "self._synPermActiveInc = synPermActiveInc<EOL>", "docstring": "Sets the permanence increment amount for active synapses.\n\n:param synPermActiveInc: (float) value to set.", "id": "f17561:c4:m34"}
{"signature": "def __setstate__(self, state):", "body": "<EOL>if state['<STR_LIT>'] < <NUM_LIT:2>:<EOL><INDENT>state['<STR_LIT>'] = True<EOL><DEDENT>if state['<STR_LIT>'] < <NUM_LIT:3>:<EOL><INDENT>state['<STR_LIT>'] = numpy.zeros(self._numColumns, dtype=realDType)<EOL>state['<STR_LIT>'] = numpy.zeros(self._numColumns, dtype=realDType)<EOL><DEDENT>state['<STR_LIT>'] = VERSION<EOL>self.__dict__.update(state)<EOL>", "docstring": "Initialize class properties from stored values.", "id": "f17561:c4:m93"}
{"signature": "def update(self, columnIndex, vector):", "body": "return super(_SparseMatrixCorticalColumnAdapter, self).setRowFromDense(<EOL>columnIndex, vector<EOL>)<EOL>", "docstring": "Wraps setRowFromDense()", "id": "f17561:c1:m2"}
{"signature": "def getNumInputs(self):", "body": "return self._numInputs<EOL>", "docstring": ":returns: (int) the total number of inputs.", "id": "f17561:c4:m4"}
{"signature": "def setBoostStrength(self, boostStrength):", "body": "self._boostStrength = boostStrength<EOL>", "docstring": "Sets the maximum boost value.\n:param boostStrength: (float) value to set", "id": "f17561:c4:m22"}
{"signature": "def setSpVerbosity(self, spVerbosity):", "body": "self._spVerbosity = spVerbosity<EOL>", "docstring": ":param spVerbosity: (int) value to set, larger is more verbose.", "id": "f17561:c4:m28"}
{"signature": "def _bumpUpWeakColumns(self):", "body": "weakColumns = numpy.where(self._overlapDutyCycles<EOL>< self._minOverlapDutyCycles)[<NUM_LIT:0>]<EOL>for columnIndex in weakColumns:<EOL><INDENT>perm = self._permanences[columnIndex].astype(realDType)<EOL>maskPotential = numpy.where(self._potentialPools[columnIndex] > <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>perm[maskPotential] += self._synPermBelowStimulusInc<EOL>self._updatePermanencesForColumn(perm, columnIndex, raisePerm=False)<EOL><DEDENT>", "docstring": "This method increases the permanence values of synapses of columns whose\nactivity level has been too low. Such columns are identified by having an\noverlap duty cycle that drops too much below those of their peers. The\npermanence values for such columns are increased.", "id": "f17561:c4:m71"}
{"signature": "def _updateBoostFactorsGlobal(self):", "body": "<EOL>if (self._localAreaDensity > <NUM_LIT:0>):<EOL><INDENT>targetDensity = self._localAreaDensity<EOL><DEDENT>else:<EOL><INDENT>inhibitionArea = ((<NUM_LIT:2> * self._inhibitionRadius + <NUM_LIT:1>)<EOL>** self._columnDimensions.size)<EOL>inhibitionArea = min(self._numColumns, inhibitionArea)<EOL>targetDensity = float(self._numActiveColumnsPerInhArea) / inhibitionArea<EOL>targetDensity = min(targetDensity, <NUM_LIT:0.5>)<EOL><DEDENT>self._boostFactors = numpy.exp(<EOL>(targetDensity - self._activeDutyCycles) * self._boostStrength)<EOL>", "docstring": "Update boost factors when global inhibition is used", "id": "f17561:c4:m81"}
{"signature": "def setMinPctOverlapDutyCycles(self, minPctOverlapDutyCycles):", "body": "self._minPctOverlapDutyCycles = minPctOverlapDutyCycles<EOL>", "docstring": "Sets the minimum tolerated activity duty cycle, given as percent of\nneighbors' activity duty cycle.\n\n:param minPctOverlapDutyCycles: (float) value to set.", "id": "f17561:c4:m42"}
{"signature": "def setPotentialRadius(self, potentialRadius):", "body": "self._potentialRadius = potentialRadius<EOL>", "docstring": ":param potentialRadius: (float) value to set", "id": "f17561:c4:m6"}
{"signature": "def getSynPermInactiveDec(self):", "body": "return self._synPermInactiveDec<EOL>", "docstring": ":returns: (float) the permanence decrement amount for inactive synapses.", "id": "f17561:c4:m35"}
{"signature": "def _avgConnectedSpanForColumn1D(self, columnIndex):", "body": "assert(self._inputDimensions.size == <NUM_LIT:1>)<EOL>connected = self._connectedSynapses[columnIndex].nonzero()[<NUM_LIT:0>]<EOL>if connected.size == <NUM_LIT:0>:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>return max(connected) - min(connected) + <NUM_LIT:1><EOL><DEDENT>", "docstring": "The range of connected synapses for column. This is used to\ncalculate the inhibition radius. This variation of the function only\nsupports a 1 dimensional column topology.\n\nParameters:\n----------------------------\n:param columnIndex:   The index identifying a column in the permanence,\n                      potential and connectivity matrices", "id": "f17561:c4:m67"}
{"signature": "def getActiveDutyCycles(self, activeDutyCycles):", "body": "activeDutyCycles[:] = self._activeDutyCycles[:]<EOL>", "docstring": "Gets the activity duty cycles for all columns. Input list will be \noverwritten.\n\n:param activeDutyCycles: (list) size must match number of columns.", "id": "f17561:c4:m47"}
{"signature": "def saveToFile(self, filePath):", "body": "self.cells4.saveToFile(filePath)<EOL>", "docstring": "Save Cells4 state to a file. File can be loaded with :meth:`loadFromFile`.", "id": "f17562:c0:m8"}
{"signature": "def _slowIsSegmentActive(self, seg, timeStep):", "body": "numSyn = seg.size()<EOL>numActiveSyns = <NUM_LIT:0><EOL>for synIdx in range(numSyn):<EOL><INDENT>if seg.getPermanence(synIdx) < self.connectedPerm:<EOL><INDENT>continue<EOL><DEDENT>sc, si = self.getColCellIdx(seg.getSrcCellIdx(synIdx))<EOL>if self.infActiveState[timeStep][sc, si]:<EOL><INDENT>numActiveSyns += <NUM_LIT:1><EOL>if numActiveSyns >= self.activationThreshold:<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>return numActiveSyns >= self.activationThreshold<EOL>", "docstring": "A segment is active if it has >= activationThreshold connected\nsynapses that are active due to infActiveState.", "id": "f17562:c0:m21"}
{"signature": "def _getEphemeralMembers(self):", "body": "e = BacktrackingTM._getEphemeralMembers(self)<EOL>if self.makeCells4Ephemeral:<EOL><INDENT>e.extend(['<STR_LIT>'])<EOL><DEDENT>return e<EOL>", "docstring": "List of our member variables that we don't need to be saved", "id": "f17562:c0:m6"}
{"signature": "def reset(self):", "body": "if self.verbosity >= <NUM_LIT:3>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>self._setStatePointers()<EOL>self.cells4.reset()<EOL>BacktrackingTM.reset(self)<EOL>", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.reset`.", "id": "f17562:c0:m16"}
{"signature": "def _initEphemerals(self):", "body": "BacktrackingTM._initEphemerals(self)<EOL>self.allocateStatesInCPP = False<EOL>self.retrieveLearningStates = False<EOL>if self.makeCells4Ephemeral:<EOL><INDENT>self._initCells4()<EOL><DEDENT>", "docstring": "Initialize all ephemeral members after being restored to a pickled state.", "id": "f17562:c0:m7"}
{"signature": "def _copyAllocatedStates(self):", "body": "<EOL>if self.verbosity > <NUM_LIT:1> or self.retrieveLearningStates:<EOL><INDENT>(activeT, activeT1, predT, predT1) = self.cells4.getLearnStates()<EOL>self.lrnActiveState['<STR_LIT>'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))<EOL>self.lrnActiveState['<STR_LIT:t>'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))<EOL>self.lrnPredictedState['<STR_LIT>'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))<EOL>self.lrnPredictedState['<STR_LIT:t>'] = predT.reshape((self.numberOfCols, self.cellsPerColumn))<EOL><DEDENT>if self.allocateStatesInCPP:<EOL><INDENT>assert False<EOL>(activeT, activeT1, predT, predT1, colConfidenceT, colConfidenceT1, confidenceT,<EOL>confidenceT1) = self.cells4.getStates()<EOL>self.cellConfidence['<STR_LIT:t>'] = confidenceT.reshape((self.numberOfCols, self.cellsPerColumn))<EOL>self.cellConfidence['<STR_LIT>'] = confidenceT1.reshape((self.numberOfCols, self.cellsPerColumn))<EOL>self.colConfidence['<STR_LIT:t>'] = colConfidenceT.reshape(self.numberOfCols)<EOL>self.colConfidence['<STR_LIT>'] = colConfidenceT1.reshape(self.numberOfCols)<EOL>self.infActiveState['<STR_LIT>'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))<EOL>self.infActiveState['<STR_LIT:t>'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))<EOL>self.infPredictedState['<STR_LIT>'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))<EOL>self.infPredictedState['<STR_LIT:t>'] = predT.reshape((self.numberOfCols, self.cellsPerColumn))<EOL><DEDENT>", "docstring": "If state is allocated in CPP, copy over the data into our numpy arrays.", "id": "f17562:c0:m14"}
{"signature": "def _extractCallingMethodArgs():", "body": "import inspect<EOL>import copy<EOL>callingFrame = inspect.stack()[<NUM_LIT:1>][<NUM_LIT:0>]<EOL>argNames, _, _, frameLocalVarDict = inspect.getargvalues(callingFrame)<EOL>argNames.remove(\"<STR_LIT>\")<EOL>args = copy.copy(frameLocalVarDict)<EOL>for varName in frameLocalVarDict:<EOL><INDENT>if varName not in argNames:<EOL><INDENT>args.pop(varName)<EOL><DEDENT><DEDENT>return args<EOL>", "docstring": "Returns args dictionary from the calling method", "id": "f17562:m0"}
{"signature": "def getNumSegmentsInCell(self, c, i):", "body": "return self.cells4.nSegmentsOnCell(c,i)<EOL>", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSegmentsInCell`.", "id": "f17562:c0:m28"}
{"signature": "def write(self, proto):", "body": "<EOL>super(BacktrackingTMCPP, self).write(proto.baseTM)<EOL>self.cells4.write(proto.cells4)<EOL>proto.makeCells4Ephemeral = self.makeCells4Ephemeral<EOL>proto.seed = self.seed<EOL>proto.checkSynapseConsistency = self.checkSynapseConsistency<EOL>proto.initArgs = json.dumps(self._initArgsDict)<EOL>", "docstring": "Populate serialization proto instance.\n\n        :param proto: (BacktrackingTMCppProto) the proto instance to populate", "id": "f17562:c0:m2"}
{"signature": "def getSegmentInfo(self, collectActiveData = False):", "body": "<EOL>assert collectActiveData == False<EOL>nSegments, nSynapses = self.getNumSegments(), self.cells4.nSynapses()<EOL>distSegSizes, distNSegsPerCell = {}, {}<EOL>nActiveSegs, nActiveSynapses = <NUM_LIT:0>, <NUM_LIT:0><EOL>distPermValues = {}   <EOL>numAgeBuckets = <NUM_LIT:20><EOL>distAges = []<EOL>ageBucketSize = int((self.iterationIdx+<NUM_LIT:20>) / <NUM_LIT:20>)<EOL>for i in range(numAgeBuckets):<EOL><INDENT>distAges.append(['<STR_LIT>' % (i*ageBucketSize, (i+<NUM_LIT:1>)*ageBucketSize-<NUM_LIT:1>), <NUM_LIT:0>])<EOL><DEDENT>for c in range(self.numberOfCols):<EOL><INDENT>for i in range(self.cellsPerColumn):<EOL><INDENT>nSegmentsThisCell = self.getNumSegmentsInCell(c,i)<EOL>if nSegmentsThisCell > <NUM_LIT:0>:<EOL><INDENT>if nSegmentsThisCell in distNSegsPerCell:<EOL><INDENT>distNSegsPerCell[nSegmentsThisCell] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>distNSegsPerCell[nSegmentsThisCell] = <NUM_LIT:1><EOL><DEDENT>segList = self.cells4.getNonEmptySegList(c,i)<EOL>for segIdx in range(nSegmentsThisCell):<EOL><INDENT>seg = self.getSegmentOnCell(c, i, segIdx)<EOL>nSynapsesThisSeg = len(seg) - <NUM_LIT:1><EOL>if nSynapsesThisSeg > <NUM_LIT:0>:<EOL><INDENT>if nSynapsesThisSeg in distSegSizes:<EOL><INDENT>distSegSizes[nSynapsesThisSeg] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>distSegSizes[nSynapsesThisSeg] = <NUM_LIT:1><EOL><DEDENT>for syn in seg[<NUM_LIT:1>:]:<EOL><INDENT>p = int(syn[<NUM_LIT:2>]*<NUM_LIT:10>)<EOL>if p in distPermValues:<EOL><INDENT>distPermValues[p] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>distPermValues[p] = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>segObj = self.cells4.getSegment(c, i, segList[segIdx])<EOL>age = self.iterationIdx - segObj.getLastActiveIteration()<EOL>ageBucket = int(age/ageBucketSize)<EOL>distAges[ageBucket][<NUM_LIT:1>] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT><DEDENT>return (nSegments, nSynapses, nActiveSegs, nActiveSynapses,distSegSizes, distNSegsPerCell, distPermValues, distAges)<EOL>", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentInfo`.", "id": "f17562:c0:m29"}
{"signature": "def getSegmentOnCell(self, c, i, segIdx):", "body": "segList = self.cells4.getNonEmptySegList(c,i)<EOL>seg = self.cells4.getSegment(c, i, segList[segIdx])<EOL>numSyn = seg.size()<EOL>assert numSyn != <NUM_LIT:0><EOL>result = []<EOL>result.append([int(segIdx), bool(seg.isSequenceSegment()),<EOL>seg.getPositiveActivations(),<EOL>seg.getTotalActivations(), seg.getLastActiveIteration(),<EOL>seg.getLastPosDutyCycle(),<EOL>seg.getLastPosDutyCycleIteration()])<EOL>for s in range(numSyn):<EOL><INDENT>sc, si = self.getColCellIdx(seg.getSrcCellIdx(s))<EOL>result.append([int(sc), int(si), seg.getPermanence(s)])<EOL><DEDENT>return result<EOL>", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getSegmentOnCell`.", "id": "f17562:c0:m25"}
{"signature": "def getNumSegments(self):", "body": "return self.cells4.nSegments()<EOL>", "docstring": "Overrides :meth:`nupic.algorithms.backtracking_tm.BacktrackingTM.getNumSegments`.", "id": "f17562:c0:m26"}
{"signature": "def __setstate__(self, state):", "body": "self.__dict__.update(state)<EOL>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._mode = Anomaly.MODE_PURE<EOL><DEDENT>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._movingAverage = None<EOL><DEDENT>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._binaryThreshold = None<EOL><DEDENT>", "docstring": "deserialization", "id": "f17563:c0:m4"}
{"signature": "def compute(self, activeColumns, predictedColumns,<EOL>inputValue=None, timestamp=None):", "body": "<EOL>anomalyScore = computeRawAnomalyScore(activeColumns, predictedColumns)<EOL>if self._mode == Anomaly.MODE_PURE:<EOL><INDENT>score = anomalyScore<EOL><DEDENT>elif self._mode == Anomaly.MODE_LIKELIHOOD:<EOL><INDENT>if inputValue is None:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>probability = self._likelihood.anomalyProbability(<EOL>inputValue, anomalyScore, timestamp)<EOL>score = <NUM_LIT:1> - probability<EOL><DEDENT>elif self._mode == Anomaly.MODE_WEIGHTED:<EOL><INDENT>probability = self._likelihood.anomalyProbability(<EOL>inputValue, anomalyScore, timestamp)<EOL>score = anomalyScore * (<NUM_LIT:1> - probability)<EOL><DEDENT>if self._movingAverage is not None:<EOL><INDENT>score = self._movingAverage.next(score)<EOL><DEDENT>if self._binaryThreshold is not None:<EOL><INDENT>if score >= self._binaryThreshold:<EOL><INDENT>score = <NUM_LIT:1.0><EOL><DEDENT>else:<EOL><INDENT>score = <NUM_LIT:0.0><EOL><DEDENT><DEDENT>return score<EOL>", "docstring": "Compute the anomaly score as the percent of active columns not predicted.\n\n        :param activeColumns: array of active column indices\n        :param predictedColumns: array of columns indices predicted in this step\n                                 (used for anomaly in step T+1)\n        :param inputValue: (optional) value of current input to encoders\n                                      (eg \"cat\" for category encoder)\n                                      (used in anomaly-likelihood)\n        :param timestamp: (optional) date timestamp when the sample occured\n                                     (used in anomaly-likelihood)\n        :returns: the computed anomaly score; float 0..1", "id": "f17563:c0:m1"}
{"signature": "def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity= <NUM_LIT:0>):", "body": "<EOL>assert (numToFree <= len(self.syns))<EOL>if (verbosity >= <NUM_LIT:4>):<EOL><INDENT>print(\"<STR_LIT>\", numToFree, end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>for i in inactiveSynapseIndices:<EOL><INDENT>print(self.syns[i][<NUM_LIT:0>:<NUM_LIT:2>], end='<STR_LIT:U+0020>')<EOL><DEDENT>print()<EOL><DEDENT>if len(inactiveSynapseIndices) > <NUM_LIT:0>:<EOL><INDENT>perms = numpy.array([self.syns[i][<NUM_LIT:2>] for i in inactiveSynapseIndices])<EOL>candidates = numpy.array(inactiveSynapseIndices)[<EOL>perms.argsort()[<NUM_LIT:0>:numToFree]]<EOL>candidates = list(candidates)<EOL><DEDENT>else:<EOL><INDENT>candidates = []<EOL><DEDENT>if len(candidates) < numToFree:<EOL><INDENT>activeSynIndices = [i for i in range(len(self.syns))<EOL>if i not in inactiveSynapseIndices]<EOL>perms = numpy.array([self.syns[i][<NUM_LIT:2>] for i in activeSynIndices])<EOL>moreToFree = numToFree - len(candidates)<EOL>moreCandidates = numpy.array(activeSynIndices)[<EOL>perms.argsort()[<NUM_LIT:0>:moreToFree]]<EOL>candidates += list(moreCandidates)<EOL><DEDENT>if verbosity >= <NUM_LIT:4>:<EOL><INDENT>print(\"<STR_LIT>\" % (<EOL>len(candidates)), candidates)<EOL>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>self.debugPrint()<EOL><DEDENT>synsToDelete = [self.syns[i] for i in candidates]<EOL>for syn in synsToDelete:<EOL><INDENT>self.syns.remove(syn)<EOL><DEDENT>if verbosity >= <NUM_LIT:4>:<EOL><INDENT>print(\"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>self.debugPrint()<EOL><DEDENT>", "docstring": "Free up some synapses in this segment. We always free up inactive\n        synapses (lowest permanence freed up first) before we start to free up\n        active ones.\n\n        :param numToFree              number of synapses to free up\n        :param inactiveSynapseIndices list of the inactive synapse indices.", "id": "f17565:c1:m10"}
{"signature": "def _setRandomState(self, state):", "body": "self._random = pickle.loads(state)<EOL>", "docstring": "@internal Set the random number state.\n\n        This is used during unit testing to generate repeatable results.", "id": "f17565:c0:m17"}
{"signature": "def _addToSegmentUpdates(self, c, i, segUpdate):", "body": "<EOL>if segUpdate is None or len(segUpdate.activeSynapses) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>key = (c, i) <EOL>if key in self.segmentUpdates:<EOL><INDENT>self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)]<EOL><DEDENT>else:<EOL><INDENT>self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)]<EOL><DEDENT>", "docstring": "Store a dated potential segment update. The \"date\" (iteration index) is used\nlater to determine whether the update is too old and should be forgotten.\nThis is controlled by parameter ``segUpdateValidDuration``.\n\n:param c: TODO: document\n:param i: TODO: document\n:param segUpdate: TODO: document", "id": "f17565:c0:m40"}
{"signature": "def printSegmentUpdates(self):", "body": "print(\"<STR_LIT>\", len(self.segmentUpdates))<EOL>for key, updateList in self.segmentUpdates.items():<EOL><INDENT>c, i = key[<NUM_LIT:0>], key[<NUM_LIT:1>]<EOL>print(c, i, updateList)<EOL><DEDENT>", "docstring": "TODO: document\n\n:return:", "id": "f17565:c0:m31"}
{"signature": "def write(self, proto):", "body": "proto.version = TM_VERSION<EOL>self._random.write(proto.random)<EOL>proto.numberOfCols = self.numberOfCols<EOL>proto.cellsPerColumn = self.cellsPerColumn<EOL>proto.initialPerm = float(self.initialPerm)<EOL>proto.connectedPerm = float(self.connectedPerm)<EOL>proto.minThreshold = self.minThreshold<EOL>proto.newSynapseCount = self.newSynapseCount<EOL>proto.permanenceInc = float(self.permanenceInc)<EOL>proto.permanenceDec = float(self.permanenceDec)<EOL>proto.permanenceMax = float(self.permanenceMax)<EOL>proto.globalDecay = float(self.globalDecay)<EOL>proto.activationThreshold = self.activationThreshold<EOL>proto.doPooling = self.doPooling<EOL>proto.segUpdateValidDuration = self.segUpdateValidDuration<EOL>proto.burnIn = self.burnIn<EOL>proto.collectStats = self.collectStats<EOL>proto.verbosity = self.verbosity<EOL>proto.pamLength = self.pamLength<EOL>proto.maxAge = self.maxAge<EOL>proto.maxInfBacktrack = self.maxInfBacktrack<EOL>proto.maxLrnBacktrack = self.maxLrnBacktrack<EOL>proto.maxSeqLength = self.maxSeqLength<EOL>proto.maxSegmentsPerCell = self.maxSegmentsPerCell<EOL>proto.maxSynapsesPerSegment = self.maxSynapsesPerSegment<EOL>proto.outputType = self.outputType<EOL>proto.activeColumns = self.activeColumns<EOL>cellListProto = proto.init(\"<STR_LIT>\", len(self.cells))<EOL>for i, columnSegments in enumerate(self.cells):<EOL><INDENT>columnSegmentsProto = cellListProto.init(i, len(columnSegments))<EOL>for j, cellSegments in enumerate(columnSegments):<EOL><INDENT>cellSegmentsProto = columnSegmentsProto.init(j, len(cellSegments))<EOL>for k, segment in enumerate(cellSegments):<EOL><INDENT>segment.write(cellSegmentsProto[k])<EOL><DEDENT><DEDENT><DEDENT>proto.lrnIterationIdx = self.lrnIterationIdx<EOL>proto.iterationIdx = self.iterationIdx<EOL>proto.segID = self.segID<EOL>if self.currentOutput is None:<EOL><INDENT>proto.currentOutput.none = None<EOL><DEDENT>else:<EOL><INDENT>proto.currentOutput.list = self.currentOutput.tolist()<EOL><DEDENT>proto.pamCounter = self.pamCounter<EOL>proto.collectSequenceStats = self.collectSequenceStats<EOL>proto.resetCalled = self.resetCalled<EOL>proto.avgInputDensity = self.avgInputDensity or -<NUM_LIT:1.0><EOL>proto.learnedSeqLength = self.learnedSeqLength<EOL>proto.avgLearnedSeqLength = self.avgLearnedSeqLength<EOL>proto.prevLrnPatterns = self._prevLrnPatterns<EOL>proto.prevInfPatterns = self._prevInfPatterns<EOL>segmentUpdatesListProto = proto.init(\"<STR_LIT>\",<EOL>len(self.segmentUpdates))<EOL>for i, (key, updates) in enumerate(self.segmentUpdates.items()):<EOL><INDENT>cellSegmentUpdatesProto = segmentUpdatesListProto[i]<EOL>cellSegmentUpdatesProto.columnIdx = key[<NUM_LIT:0>]<EOL>cellSegmentUpdatesProto.cellIdx = key[<NUM_LIT:1>]<EOL>segmentUpdatesProto = cellSegmentUpdatesProto.init(\"<STR_LIT>\",<EOL>len(updates))<EOL>for j, (lrnIterationIdx, segmentUpdate) in enumerate(updates):<EOL><INDENT>segmentUpdateWrapperProto = segmentUpdatesProto[j]<EOL>segmentUpdateWrapperProto.lrnIterationIdx = lrnIterationIdx<EOL>segmentUpdate.write(segmentUpdateWrapperProto.segmentUpdate)<EOL><DEDENT><DEDENT>proto.cellConfidenceT = self.cellConfidence[\"<STR_LIT:t>\"].tolist()<EOL>proto.cellConfidenceT1 = self.cellConfidence[\"<STR_LIT>\"].tolist()<EOL>proto.cellConfidenceCandidate = self.cellConfidence[\"<STR_LIT>\"].tolist()<EOL>proto.colConfidenceT = self.colConfidence[\"<STR_LIT:t>\"].tolist()<EOL>proto.colConfidenceT1 = self.colConfidence[\"<STR_LIT>\"].tolist()<EOL>proto.colConfidenceCandidate = self.colConfidence[\"<STR_LIT>\"].tolist()<EOL>proto.lrnActiveStateT = self.lrnActiveState[\"<STR_LIT:t>\"].tolist()<EOL>proto.lrnActiveStateT1 = self.lrnActiveState[\"<STR_LIT>\"].tolist()<EOL>proto.infActiveStateT = self.infActiveState[\"<STR_LIT:t>\"].tolist()<EOL>proto.infActiveStateT1 = self.infActiveState[\"<STR_LIT>\"].tolist()<EOL>proto.infActiveStateBackup = self.infActiveState[\"<STR_LIT>\"].tolist()<EOL>proto.infActiveStateCandidate = self.infActiveState[\"<STR_LIT>\"].tolist()<EOL>proto.lrnPredictedStateT = self.lrnPredictedState[\"<STR_LIT:t>\"].tolist()<EOL>proto.lrnPredictedStateT1 = self.lrnPredictedState[\"<STR_LIT>\"].tolist()<EOL>proto.infPredictedStateT = self.infPredictedState[\"<STR_LIT:t>\"].tolist()<EOL>proto.infPredictedStateT1 = self.infPredictedState[\"<STR_LIT>\"].tolist()<EOL>proto.infPredictedStateBackup = self.infPredictedState[\"<STR_LIT>\"].tolist()<EOL>proto.infPredictedStateCandidate = self.infPredictedState[\"<STR_LIT>\"].tolist()<EOL>proto.consolePrinterVerbosity = self.consolePrinterVerbosity<EOL>", "docstring": "Populate serialization proto instance.\n\n        :param proto: (BacktrackingTMProto) the proto instance to populate", "id": "f17565:c0:m6"}
{"signature": "def infer(self, bottomUpInput):", "body": "return self.compute(bottomUpInput, enableLearn=False)<EOL>", "docstring": "TODO: document\n\n:param bottomUpInput: \n:return:", "id": "f17565:c0:m61"}
{"signature": "def printInput(self, x):", "body": "print(\"<STR_LIT>\")<EOL>for c in range(self.numberOfCols):<EOL><INDENT>print(int(x[c]), end='<STR_LIT:U+0020>')<EOL><DEDENT>print()<EOL>", "docstring": "TODO: document\n\n:param x: \n:return:", "id": "f17565:c0:m27"}
{"signature": "def debugPrint(self):", "body": "<EOL>print(\"<STR_LIT>\" % (self.segID), end='<STR_LIT:U+0020>')<EOL>if self.isSequenceSeg:<EOL><INDENT>print(\"<STR_LIT:True>\", end='<STR_LIT:U+0020>')<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT:False>\", end='<STR_LIT:U+0020>')<EOL><DEDENT>print(\"<STR_LIT>\" % (self.dutyCycle(readOnly=True)), end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\" % (self.positiveActivations,<EOL>self.totalActivations), end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\" % (self.tm.lrnIterationIdx - self.lastActiveIteration), end='<STR_LIT:U+0020>')<EOL>sortedSyns = sorted(self.syns)<EOL>for _, synapse in enumerate(sortedSyns):<EOL><INDENT>print(\"<STR_LIT>\" % (synapse[<NUM_LIT:0>], synapse[<NUM_LIT:1>], synapse[<NUM_LIT:2>]), end='<STR_LIT:U+0020>')<EOL><DEDENT>print()<EOL>", "docstring": "Print segment information for verbose messaging and debugging.\n        This uses the following format:\n\n         ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75\n\n        where:\n          54413 - is the unique segment id\n          True - is sequence segment\n          0.64801 - moving average duty cycle\n          (24/36) - (numPositiveActivations / numTotalActivations)\n          101 - age, number of iterations since last activated\n          [9,1]0.75 - synapse from column 9, cell #1, strength 0.75\n          [10,1]0.75 - synapse from column 10, cell #1, strength 0.75\n          [11,1]0.75 - synapse from column 11, cell #1, strength 0.75", "id": "f17565:c1:m7"}
{"signature": "def _trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence,<EOL>minNumSyns):", "body": "<EOL>if minPermanence is None:<EOL><INDENT>minPermanence = self.connectedPerm<EOL><DEDENT>if minNumSyns is None:<EOL><INDENT>minNumSyns = self.activationThreshold<EOL><DEDENT>nSegsRemoved, nSynsRemoved = <NUM_LIT:0>, <NUM_LIT:0><EOL>segsToDel = [] <EOL>for segment in segList:<EOL><INDENT>synsToDel = [syn for syn in segment.syns if syn[<NUM_LIT:2>] < minPermanence]<EOL>if len(synsToDel) == len(segment.syns):<EOL><INDENT>segsToDel.append(segment) <EOL><DEDENT>else:<EOL><INDENT>if len(synsToDel) > <NUM_LIT:0>:<EOL><INDENT>for syn in synsToDel: <EOL><INDENT>segment.syns.remove(syn)<EOL>nSynsRemoved += <NUM_LIT:1><EOL><DEDENT><DEDENT>if len(segment.syns) < minNumSyns:<EOL><INDENT>segsToDel.append(segment)<EOL><DEDENT><DEDENT><DEDENT>nSegsRemoved += len(segsToDel)<EOL>for seg in segsToDel: <EOL><INDENT>self._cleanUpdatesList(colIdx, cellIdx, seg)<EOL>self.cells[colIdx][cellIdx].remove(seg)<EOL>nSynsRemoved += len(seg.syns)<EOL><DEDENT>return nSegsRemoved, nSynsRemoved<EOL>", "docstring": "This method goes through a list of segments for a given cell and\ndeletes all synapses whose permanence is less than minPermanence and deletes\nany segments that have less than minNumSyns synapses remaining.\n\n:param colIdx        Column index\n:param cellIdx       Cell index within the column\n:param segList       List of segment references\n:param minPermanence Any syn whose permamence is 0 or < minPermanence will\n                     be deleted.\n:param minNumSyns    Any segment with less than minNumSyns synapses remaining\n                     in it will be deleted.\n\n:returns: tuple (numSegsRemoved, numSynsRemoved)", "id": "f17565:c0:m65"}
{"signature": "def _learnBacktrack(self):", "body": "<EOL>numPrevPatterns = len(self._prevLrnPatterns) - <NUM_LIT:1><EOL>if numPrevPatterns <= <NUM_LIT:0>:<EOL><INDENT>if self.verbosity >= <NUM_LIT:3>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>return False<EOL><DEDENT>badPatterns = []<EOL>inSequence = False<EOL>for startOffset in range(<NUM_LIT:0>, numPrevPatterns):<EOL><INDENT>inSequence = self._learnBacktrackFrom(startOffset, readOnly=True)<EOL>if inSequence:<EOL><INDENT>break<EOL><DEDENT>badPatterns.append(startOffset)<EOL><DEDENT>if not inSequence:<EOL><INDENT>if self.verbosity >= <NUM_LIT:3>:<EOL><INDENT>print (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>self._prevLrnPatterns = []<EOL>return False<EOL><DEDENT>if self.verbosity >= <NUM_LIT:3>:<EOL><INDENT>print((\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (numPrevPatterns - startOffset),<EOL>self._prevLrnPatterns[startOffset]))<EOL><DEDENT>self._learnBacktrackFrom(startOffset, readOnly=False)<EOL>for i in range(numPrevPatterns):<EOL><INDENT>if i in badPatterns or i <= startOffset:<EOL><INDENT>if self.verbosity >= <NUM_LIT:3>:<EOL><INDENT>print((\"<STR_LIT>\",<EOL>self._prevLrnPatterns[<NUM_LIT:0>]))<EOL><DEDENT>self._prevLrnPatterns.pop(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return numPrevPatterns - startOffset<EOL>", "docstring": "This \"backtracks\" our learning state, trying to see if we can lock onto\nthe current set of inputs by assuming the sequence started up to N steps\nago on start cells.\n\nThis will adjust @ref lrnActiveState['t'] if it does manage to lock on to a\nsequence that started earlier.\n\n:returns:          >0 if we managed to lock on to a sequence that started\n                  earlier. The value returned is how many steps in the\n                  past we locked on.\n                  If 0 is returned, the caller needs to change active\n                  state to start on start cells.\n\nHow it works:\n-------------------------------------------------------------------\nThis method gets called from updateLearningState when we detect either of\nthe following two conditions:\n\n#. Our PAM counter (@ref pamCounter) expired\n#. We reached the max allowed learned sequence length\n\nEither of these two conditions indicate that we want to start over on start\ncells.\n\nRather than start over on start cells on the current input, we can\naccelerate learning by backtracking a few steps ago and seeing if perhaps\na sequence we already at least partially know already started.\n\nThis updates/modifies:\n    - @ref lrnActiveState['t']\n\nThis trashes:\n    - @ref lrnActiveState['t-1']\n    - @ref lrnPredictedState['t']\n    - @ref lrnPredictedState['t-1']", "id": "f17565:c0:m56"}
{"signature": "def trimSegments(self, minPermanence=None, minNumSyns=None):", "body": "<EOL>if minPermanence is None:<EOL><INDENT>minPermanence = self.connectedPerm<EOL><DEDENT>if minNumSyns is None:<EOL><INDENT>minNumSyns = self.activationThreshold<EOL><DEDENT>totalSegsRemoved, totalSynsRemoved = <NUM_LIT:0>, <NUM_LIT:0><EOL>for c, i in itertools.product(range(self.numberOfCols),<EOL>range(self.cellsPerColumn)):<EOL><INDENT>(segsRemoved, synsRemoved) = self._trimSegmentsInCell(<EOL>colIdx=c, cellIdx=i, segList=self.cells[c][i],<EOL>minPermanence=minPermanence, minNumSyns=minNumSyns)<EOL>totalSegsRemoved += segsRemoved<EOL>totalSynsRemoved += synsRemoved<EOL><DEDENT>if self.verbosity >= <NUM_LIT:5>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>self.printCells(predictedOnly=False)<EOL><DEDENT>return totalSegsRemoved, totalSynsRemoved<EOL>", "docstring": "This method deletes all synapses whose permanence is less than\nminPermanence and deletes any segments that have less than\nminNumSyns synapses remaining.\n\n:param minPermanence: (float) Any syn whose permanence is 0 or < \n       ``minPermanence``  will be deleted. If None is passed in, then \n       ``self.connectedPerm`` is used.\n:param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses \n       remaining in it will be deleted. If None is passed in, then \n       ``self.activationThreshold`` is used.\n:returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved``", "id": "f17565:c0:m66"}
{"signature": "def _processSegmentUpdates(self, activeColumns):", "body": "<EOL>removeKeys = []<EOL>trimSegments = []<EOL>for key, updateList in self.segmentUpdates.items():<EOL><INDENT>c, i = key[<NUM_LIT:0>], key[<NUM_LIT:1>]<EOL>if c in activeColumns:<EOL><INDENT>action = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>if self.doPooling and self.lrnPredictedState['<STR_LIT:t>'][c, i] == <NUM_LIT:1>:<EOL><INDENT>action = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>action = '<STR_LIT>'<EOL><DEDENT><DEDENT>updateListKeep = []<EOL>if action != '<STR_LIT>':<EOL><INDENT>for (createDate, segUpdate) in updateList:<EOL><INDENT>if self.verbosity >= <NUM_LIT:4>:<EOL><INDENT>print(\"<STR_LIT>\", self.lrnIterationIdx, end='<STR_LIT:U+0020>')<EOL>print(segUpdate)<EOL><DEDENT>if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:<EOL><INDENT>continue<EOL><DEDENT>if action == '<STR_LIT>':<EOL><INDENT>trimSegment = self._adaptSegment(segUpdate)<EOL>if trimSegment:<EOL><INDENT>trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,<EOL>segUpdate.segment))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>updateListKeep.append((createDate, segUpdate))<EOL><DEDENT><DEDENT><DEDENT>self.segmentUpdates[key] = updateListKeep<EOL>if len(updateListKeep) == <NUM_LIT:0>:<EOL><INDENT>removeKeys.append(key)<EOL><DEDENT><DEDENT>for key in removeKeys:<EOL><INDENT>self.segmentUpdates.pop(key)<EOL><DEDENT>for (c, i, segment) in trimSegments:<EOL><INDENT>self._trimSegmentsInCell(c, i, [segment], minPermanence = <NUM_LIT>,<EOL>minNumSyns = <NUM_LIT:0>)<EOL><DEDENT>", "docstring": "Go through the list of accumulated segment updates and process them\nas follows:\n\nif the segment update is too old, remove the update\nelse if the cell received bottom-up, update its permanences\nelse if it's still being predicted, leave it in the queue\nelse remove it.\n\n:param activeColumns TODO: document", "id": "f17565:c0:m77"}
{"signature": "def _checkPrediction(self, patternNZs, output=None, colConfidence=None,<EOL>details=False):", "body": "<EOL>numPatterns = len(patternNZs)<EOL>orAll = set()<EOL>orAll = orAll.union(*patternNZs)<EOL>if output is None:<EOL><INDENT>assert self.currentOutput is not None<EOL>output = self.currentOutput<EOL><DEDENT>output = set(output.sum(axis=<NUM_LIT:1>).nonzero()[<NUM_LIT:0>])<EOL>totalExtras = len(output.difference(orAll))<EOL>totalMissing = len(orAll.difference(output))<EOL>if colConfidence is None:<EOL><INDENT>colConfidence = self.colConfidence['<STR_LIT:t>']<EOL><DEDENT>confidences = []<EOL>for i in range(numPatterns):<EOL><INDENT>positivePredictionSum = colConfidence[patternNZs[i]].sum()<EOL>positiveColumnCount   = len(patternNZs[i])<EOL>totalPredictionSum    = colConfidence.sum()<EOL>totalColumnCount      = len(colConfidence)<EOL>negativePredictionSum = totalPredictionSum - positivePredictionSum<EOL>negativeColumnCount   = totalColumnCount   - positiveColumnCount<EOL>if positiveColumnCount != <NUM_LIT:0>:<EOL><INDENT>positivePredictionScore = positivePredictionSum<EOL><DEDENT>else:<EOL><INDENT>positivePredictionScore = <NUM_LIT:0.0><EOL><DEDENT>if negativeColumnCount != <NUM_LIT:0>:<EOL><INDENT>negativePredictionScore = negativePredictionSum<EOL><DEDENT>else:<EOL><INDENT>negativePredictionScore = <NUM_LIT:0.0><EOL><DEDENT>currentSum = negativePredictionScore + positivePredictionScore<EOL>if currentSum > <NUM_LIT:0>:<EOL><INDENT>positivePredictionScore *= <NUM_LIT:1.0>/currentSum<EOL>negativePredictionScore *= <NUM_LIT:1.0>/currentSum<EOL><DEDENT>predictionScore = positivePredictionScore - negativePredictionScore<EOL>confidences.append((predictionScore,<EOL>positivePredictionScore,<EOL>negativePredictionScore))<EOL><DEDENT>if details:<EOL><INDENT>missingPatternBits = [set(pattern).difference(output)<EOL>for pattern in patternNZs]<EOL>return (totalExtras, totalMissing, confidences, missingPatternBits)<EOL><DEDENT>else:<EOL><INDENT>return (totalExtras, totalMissing, confidences)<EOL><DEDENT>", "docstring": "This function produces goodness-of-match scores for a set of input patterns,\nby checking for their presence in the current and predicted output of the\nTM. Returns a global count of the number of extra and missing bits, the\nconfidence scores for each input pattern, and (if requested) the\nbits in each input pattern that were not present in the TM's prediction.\n\n:param patternNZs a list of input patterns that we want to check for. Each\n                  element is a list of the non-zeros in that pattern.\n:param output     The output of the TM. If not specified, then use the\n                  TM's current output. This can be specified if you are\n                  trying to check the prediction metric for an output from\n                  the past.\n:param colConfidence The column confidences. If not specified, then use the\n                     TM's current self.colConfidence. This can be specified if you\n                     are trying to check the prediction metrics for an output\n                     from the past.\n:param details    if True, also include details of missing bits per pattern.\n\n:returns:  list containing:\n\n          [\n            totalExtras,\n            totalMissing,\n            [conf_1, conf_2, ...],\n            [missing1, missing2, ...]\n          ]\n\n@retval totalExtras a global count of the number of 'extras', i.e. bits that\n                    are on in the current output but not in the or of all the\n                    passed in patterns\n@retval totalMissing a global count of all the missing bits, i.e. the bits\n                     that are on in the or of the patterns, but not in the\n                     current output\n@retval conf_i the confidence score for the i'th pattern inpatternsToCheck\n               This consists of 3 items as a tuple:\n               (predictionScore, posPredictionScore, negPredictionScore)\n@retval missing_i the bits in the i'th pattern that were missing\n                  in the output. This list is only returned if details is\n                  True.", "id": "f17565:c0:m69"}
{"signature": "def _getTPDynamicStateVariableNames(self):", "body": "return [\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>]<EOL>", "docstring": "Any newly added dynamic states in the TM should be added to this list.\n\nParameters:\n--------------------------------------------\nretval:       The list of names of TM dynamic state variables.", "id": "f17565:c0:m46"}
{"signature": "def _getBestMatchingSegment(self, c, i, activeState):", "body": "maxActivity, which = self.minThreshold, -<NUM_LIT:1><EOL>for j, s in enumerate(self.cells[c][i]):<EOL><INDENT>activity = self._getSegmentActivityLevel(s, activeState,<EOL>connectedSynapsesOnly=False)<EOL>if activity >= maxActivity:<EOL><INDENT>maxActivity, which = activity, j<EOL><DEDENT><DEDENT>if which == -<NUM_LIT:1>:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return self.cells[c][i][which]<EOL><DEDENT>", "docstring": "For the given cell, find the segment with the largest number of active\nsynapses. This routine is aggressive in finding the best match. The\npermanence value of synapses is allowed to be below connectedPerm. The number\nof active synapses is allowed to be below activationThreshold, but must be\nabove minThreshold. The routine returns the segment index. If no segments are\nfound, then an index of -1 is returned.\n\n:param c TODO: document\n:param i TODO: document\n:param activeState TODO: document", "id": "f17565:c0:m73"}
{"signature": "def _getTPDynamicState(self,):", "body": "tpDynamicState = dict()<EOL>for variableName in self._getTPDynamicStateVariableNames():<EOL><INDENT>tpDynamicState[variableName] = copy.deepcopy(self.__dict__[variableName])<EOL><DEDENT>return tpDynamicState<EOL>", "docstring": "Parameters:\n--------------------------------------------\nretval:       A dict with all the dynamic state variable names as keys and\n              their values at this instant as values.", "id": "f17565:c0:m47"}
{"signature": "def printCells(self, predictedOnly=False):", "body": "if predictedOnly:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>print(\"<STR_LIT>\", self.activationThreshold, end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\", self.minThreshold, end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\", self.connectedPerm)<EOL>for c in range(self.numberOfCols):<EOL><INDENT>for i in range(self.cellsPerColumn):<EOL><INDENT>if not predictedOnly or self.infPredictedState['<STR_LIT:t>'][c, i]:<EOL><INDENT>self.printCell(c, i, predictedOnly)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "TODO: document\n\n:param predictedOnly: \n:return:", "id": "f17565:c0:m33"}
{"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()<EOL>for ephemeralMemberName in self._getEphemeralMembers():<EOL><INDENT>state.pop(ephemeralMemberName, None)<EOL><DEDENT>state['<STR_LIT>'] = self._getRandomState()<EOL>state['<STR_LIT:version>'] = TM_VERSION<EOL>return state<EOL>", "docstring": "@internal\n        Return serializable state.  This function will return a version of the\n        __dict__ with all \"ephemeral\" members stripped out.  \"Ephemeral\" members\n        are defined as those that do not need to be (nor should be) stored\n        in any kind of persistent file (e.g., NuPIC network XML file.)", "id": "f17565:c0:m3"}
{"signature": "def _setTPDynamicState(self, tpDynamicState):", "body": "for variableName in self._getTPDynamicStateVariableNames():<EOL><INDENT>self.__dict__[variableName] = tpDynamicState.pop(variableName)<EOL><DEDENT>", "docstring": "Set all the dynamic state variables from the <tpDynamicState> dict.\n\n<tpDynamicState> dict has all the dynamic state variable names as keys and\ntheir values at this instant as values.\n\nWe set the dynamic state variables in the tm object with these items.", "id": "f17565:c0:m48"}
{"signature": "def _getBestMatchingCell(self, c, activeState, minThreshold):", "body": "<EOL>bestActivityInCol = minThreshold<EOL>bestSegIdxInCol = -<NUM_LIT:1><EOL>bestCellInCol = -<NUM_LIT:1><EOL>for i in range(self.cellsPerColumn):<EOL><INDENT>maxSegActivity = <NUM_LIT:0><EOL>maxSegIdx = <NUM_LIT:0><EOL>for j, s in enumerate(self.cells[c][i]):<EOL><INDENT>activity = self._getSegmentActivityLevel(s, activeState)<EOL>if activity > maxSegActivity:<EOL><INDENT>maxSegActivity = activity<EOL>maxSegIdx = j<EOL><DEDENT><DEDENT>if maxSegActivity >= bestActivityInCol:<EOL><INDENT>bestActivityInCol = maxSegActivity<EOL>bestSegIdxInCol = maxSegIdx<EOL>bestCellInCol = i<EOL><DEDENT><DEDENT>if bestCellInCol == -<NUM_LIT:1>:<EOL><INDENT>return (None, None, None)<EOL><DEDENT>else:<EOL><INDENT>return (bestCellInCol, self.cells[c][bestCellInCol][bestSegIdxInCol],<EOL>bestActivityInCol)<EOL><DEDENT>", "docstring": "Find weakly activated cell in column with at least minThreshold active\nsynapses.\n\n:param c            which column to look at\n:param activeState  the active cells\n:param minThreshold minimum number of synapses required\n\n:returns: tuple (cellIdx, segment, numActiveSynapses)", "id": "f17565:c0:m72"}
{"signature": "def _getRandomState(self):", "body": "return pickle.dumps(self._random)<EOL>", "docstring": "@internal\n        Return the random number state.\n\n        This is used during unit testing to generate repeatable results.", "id": "f17565:c0:m16"}
{"signature": "def _updateStatsInferEnd(self, stats, bottomUpNZ, predictedState,<EOL>colConfidence):", "body": "<EOL>if not self.collectStats:<EOL><INDENT>return<EOL><DEDENT>stats['<STR_LIT>'] += <NUM_LIT:1><EOL>(numExtra2, numMissing2, confidences2) = self._checkPrediction(<EOL>patternNZs=[bottomUpNZ], output=predictedState,<EOL>colConfidence=colConfidence)<EOL>predictionScore, positivePredictionScore, negativePredictionScore = (<EOL>confidences2[<NUM_LIT:0>])<EOL>stats['<STR_LIT>'] = float(predictionScore)<EOL>stats['<STR_LIT>'] = <NUM_LIT:1.0> - float(positivePredictionScore)<EOL>stats['<STR_LIT>'] = float(negativePredictionScore)<EOL>stats['<STR_LIT>'] = numMissing2<EOL>stats['<STR_LIT>'] = numExtra2<EOL>if stats['<STR_LIT>'] <= self.burnIn:<EOL><INDENT>return<EOL><DEDENT>stats['<STR_LIT>'] += <NUM_LIT:1><EOL>numExpected = max(<NUM_LIT:1.0>, float(len(bottomUpNZ)))<EOL>stats['<STR_LIT>'] += numMissing2<EOL>stats['<STR_LIT>'] += numExtra2<EOL>stats['<STR_LIT>'] += <NUM_LIT> * numExtra2 / numExpected<EOL>stats['<STR_LIT>'] += <NUM_LIT> * numMissing2 / numExpected<EOL>stats['<STR_LIT>'] += float(predictionScore)<EOL>stats['<STR_LIT>'] += <NUM_LIT:1.0> - float(positivePredictionScore)<EOL>stats['<STR_LIT>'] += float(negativePredictionScore)<EOL>if self.collectSequenceStats:<EOL><INDENT>cc = self.cellConfidence['<STR_LIT>'] * self.infActiveState['<STR_LIT:t>']<EOL>sconf = cc.sum(axis=<NUM_LIT:1>)<EOL>for c in range(self.numberOfCols):<EOL><INDENT>if sconf[c] > <NUM_LIT:0>:<EOL><INDENT>cc[c, :] /= sconf[c]<EOL><DEDENT><DEDENT>self._internalStats['<STR_LIT>'] += cc<EOL><DEDENT>", "docstring": "Called at the end of learning and inference, this routine will update\na number of stats in our _internalStats dictionary, including our computed\nprediction score.\n\n:param stats            internal stats dictionary\n:param bottomUpNZ       list of the active bottom-up inputs\n:param predictedState   The columns we predicted on the last time step (should\n                        match the current bottomUpNZ in the best case)\n:param colConfidence    Column confidences we determined on the last time step", "id": "f17565:c0:m21"}
{"signature": "def printStates(self, printPrevious = True, printLearnState = True):", "body": "def formatRow(var, i):<EOL><INDENT>s = '<STR_LIT>'<EOL>for c in range(self.numberOfCols):<EOL><INDENT>if c > <NUM_LIT:0> and c % <NUM_LIT:10> == <NUM_LIT:0>:<EOL><INDENT>s += '<STR_LIT:U+0020>'<EOL><DEDENT>s += str(var[c, i])<EOL><DEDENT>s += '<STR_LIT:U+0020>'<EOL>return s<EOL><DEDENT>print(\"<STR_LIT>\")<EOL>for i in range(self.cellsPerColumn):<EOL><INDENT>if printPrevious:<EOL><INDENT>print(formatRow(self.infActiveState['<STR_LIT>'], i), end='<STR_LIT:U+0020>')<EOL><DEDENT>print(formatRow(self.infActiveState['<STR_LIT:t>'], i))<EOL><DEDENT>print(\"<STR_LIT>\")<EOL>for i in range(self.cellsPerColumn):<EOL><INDENT>if printPrevious:<EOL><INDENT>print(formatRow(self.infPredictedState['<STR_LIT>'], i), end='<STR_LIT:U+0020>')<EOL><DEDENT>print(formatRow(self.infPredictedState['<STR_LIT:t>'], i))<EOL><DEDENT>if printLearnState:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>for i in range(self.cellsPerColumn):<EOL><INDENT>if printPrevious:<EOL><INDENT>print(formatRow(self.lrnActiveState['<STR_LIT>'], i), end='<STR_LIT:U+0020>')<EOL><DEDENT>print(formatRow(self.lrnActiveState['<STR_LIT:t>'], i))<EOL><DEDENT>print(\"<STR_LIT>\")<EOL>for i in range(self.cellsPerColumn):<EOL><INDENT>if printPrevious:<EOL><INDENT>print(formatRow(self.lrnPredictedState['<STR_LIT>'], i), end='<STR_LIT:U+0020>')<EOL><DEDENT>print(formatRow(self.lrnPredictedState['<STR_LIT:t>'], i))<EOL><DEDENT><DEDENT>", "docstring": "TODO: document\n\n:param printPrevious: \n:param printLearnState: \n:return:", "id": "f17565:c0:m25"}
{"signature": "def _learnPhase1(self, activeColumns, readOnly=False):", "body": "<EOL>self.lrnActiveState['<STR_LIT:t>'].fill(<NUM_LIT:0>)<EOL>numUnpredictedColumns = <NUM_LIT:0><EOL>for c in activeColumns:<EOL><INDENT>predictingCells = numpy.where(self.lrnPredictedState['<STR_LIT>'][c] == <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>numPredictedCells = len(predictingCells)<EOL>assert numPredictedCells <= <NUM_LIT:1><EOL>if numPredictedCells == <NUM_LIT:1>:<EOL><INDENT>i = predictingCells[<NUM_LIT:0>]<EOL>self.lrnActiveState['<STR_LIT:t>'][c, i] = <NUM_LIT:1><EOL>continue<EOL><DEDENT>numUnpredictedColumns += <NUM_LIT:1><EOL>if readOnly:<EOL><INDENT>continue<EOL><DEDENT>i, s, numActive = self._getBestMatchingCell(<EOL>c, self.lrnActiveState['<STR_LIT>'], self.minThreshold)<EOL>if s is not None and s.isSequenceSegment():<EOL><INDENT>if self.verbosity >= <NUM_LIT:4>:<EOL><INDENT>print(\"<STR_LIT>\", c)<EOL><DEDENT>self.lrnActiveState['<STR_LIT:t>'][c, i] = <NUM_LIT:1><EOL>segUpdate = self._getSegmentActiveSynapses(<EOL>c, i, s, self.lrnActiveState['<STR_LIT>'], newSynapses = True)<EOL>s.totalActivations += <NUM_LIT:1><EOL>trimSegment = self._adaptSegment(segUpdate)<EOL>if trimSegment:<EOL><INDENT>self._trimSegmentsInCell(c, i, [s], minPermanence = <NUM_LIT>,<EOL>minNumSyns = <NUM_LIT:0>)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>i = self._getCellForNewSegment(c)<EOL>if (self.verbosity >= <NUM_LIT:4>):<EOL><INDENT>print(\"<STR_LIT>\", c, end='<STR_LIT:U+0020>')<EOL>print(\"<STR_LIT>\", i)<EOL><DEDENT>self.lrnActiveState['<STR_LIT:t>'][c, i] = <NUM_LIT:1><EOL>segUpdate = self._getSegmentActiveSynapses(<EOL>c, i, None, self.lrnActiveState['<STR_LIT>'], newSynapses=True)<EOL>segUpdate.sequenceSegment = True <EOL>self._adaptSegment(segUpdate)  <EOL><DEDENT><DEDENT>numBottomUpColumns = len(activeColumns)<EOL>if numUnpredictedColumns < numBottomUpColumns / <NUM_LIT:2>:<EOL><INDENT>return True   <EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>", "docstring": "Compute the learning active state given the predicted state and\nthe bottom-up input.\n\n:param activeColumns list of active bottom-ups\n:param readOnly      True if being called from backtracking logic.\n                     This tells us not to increment any segment\n                     duty cycles or queue up any updates.\n:returns: True if the current input was sufficiently predicted, OR\n         if we started over on startCells. False indicates that the current\n         input was NOT predicted, well enough to consider it as \"inSequence\"\n\nThis looks at:\n    - @ref lrnActiveState['t-1']\n    - @ref lrnPredictedState['t-1']\n\nThis modifies:\n    - @ref lrnActiveState['t']\n    - @ref lrnActiveState['t-1']", "id": "f17565:c0:m57"}
{"signature": "def printConfidence(self, aState, maxCols = <NUM_LIT:20>):", "body": "def formatFPRow(var, i):<EOL><INDENT>s = '<STR_LIT>'<EOL>for c in range(min(maxCols, self.numberOfCols)):<EOL><INDENT>if c > <NUM_LIT:0> and c % <NUM_LIT:10> == <NUM_LIT:0>:<EOL><INDENT>s += '<STR_LIT:U+0020>'<EOL><DEDENT>s += '<STR_LIT>' % var[c, i]<EOL><DEDENT>s += '<STR_LIT:U+0020>'<EOL>return s<EOL><DEDENT>for i in range(self.cellsPerColumn):<EOL><INDENT>print(formatFPRow(aState, i))<EOL><DEDENT>", "docstring": "Print a floating point array that is the same shape as activeState.\n\n:param aState: TODO: document\n:param maxCols: TODO: document", "id": "f17565:c0:m23"}
{"signature": "def dutyCycle(self, active=False, readOnly=False):", "body": "<EOL>if self.tm.lrnIterationIdx <= self.dutyCycleTiers[<NUM_LIT:1>]:<EOL><INDENT>dutyCycle = float(self.positiveActivations)/ self.tm.lrnIterationIdx<EOL>if not readOnly:<EOL><INDENT>self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx<EOL>self._lastPosDutyCycle = dutyCycle<EOL><DEDENT>return dutyCycle<EOL><DEDENT>age = self.tm.lrnIterationIdx - self._lastPosDutyCycleIteration<EOL>if age == <NUM_LIT:0> and not active:<EOL><INDENT>return self._lastPosDutyCycle<EOL><DEDENT>for tierIdx in range(len(self.dutyCycleTiers)-<NUM_LIT:1>, <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>if self.tm.lrnIterationIdx > self.dutyCycleTiers[tierIdx]:<EOL><INDENT>alpha = self.dutyCycleAlphas[tierIdx]<EOL>break<EOL><DEDENT><DEDENT>dutyCycle = pow(<NUM_LIT:1.0>-alpha, age) * self._lastPosDutyCycle<EOL>if active:<EOL><INDENT>dutyCycle += alpha<EOL><DEDENT>if not readOnly:<EOL><INDENT>self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx<EOL>self._lastPosDutyCycle = dutyCycle<EOL><DEDENT>return dutyCycle<EOL>", "docstring": "Compute/update and return the positive activations duty cycle of\n        this segment. This is a measure of how often this segment is\n        providing good predictions.\n\n        :param active   True if segment just provided a good prediction\n\n        :param readOnly If True, compute the updated duty cycle, but don't change\n                   the cached value. This is used by debugging print statements.\n\n        :returns: The duty cycle, a measure of how often this segment is\n        providing good predictions.\n\n        **NOTE:** This method relies on different schemes to compute the duty cycle\n        based on how much history we have. In order to support this tiered\n        approach **IT MUST BE CALLED ON EVERY SEGMENT AT EACH DUTY CYCLE TIER**\n        (@ref dutyCycleTiers).\n\n        When we don't have a lot of history yet (first tier), we simply return\n        number of positive activations / total number of iterations\n\n        After a certain number of iterations have accumulated, it converts into\n        a moving average calculation, which is updated only when requested\n        since it can be a bit expensive to compute on every iteration (it uses\n        the pow() function).\n\n        The duty cycle is computed as follows:\n\n            dc[t] = (1-alpha) * dc[t-1] + alpha * value[t]\n\n        If the value[t] has been 0 for a number of steps in a row, you can apply\n        all of the updates at once using:\n\n            dc[t] = (1-alpha)^(t-lastT) * dc[lastT]\n\n        We use the alphas and tiers as defined in @ref dutyCycleAlphas and\n        @ref dutyCycleTiers.", "id": "f17565:c1:m6"}
{"signature": "@classmethod<EOL><INDENT>def read(cls, proto):<DEDENT>", "body": "assert proto.version == TM_VERSION<EOL>obj = object.__new__(cls)<EOL>obj._random = Random()<EOL>obj._random.read(proto.random)<EOL>obj.numberOfCols = int(proto.numberOfCols)<EOL>obj.cellsPerColumn = int(proto.cellsPerColumn)<EOL>obj._numberOfCells = obj.numberOfCols * obj.cellsPerColumn<EOL>obj.initialPerm = numpy.float32(proto.initialPerm)<EOL>obj.connectedPerm = numpy.float32(proto.connectedPerm)<EOL>obj.minThreshold = int(proto.minThreshold)<EOL>obj.newSynapseCount = int(proto.newSynapseCount)<EOL>obj.permanenceInc = numpy.float32(proto.permanenceInc)<EOL>obj.permanenceDec = numpy.float32(proto.permanenceDec)<EOL>obj.permanenceMax = numpy.float32(proto.permanenceMax)<EOL>obj.globalDecay = numpy.float32(proto.globalDecay)<EOL>obj.activationThreshold = int(proto.activationThreshold)<EOL>obj.doPooling = proto.doPooling<EOL>obj.segUpdateValidDuration = int(proto.segUpdateValidDuration)<EOL>obj.burnIn = int(proto.burnIn)<EOL>obj.collectStats = proto.collectStats<EOL>obj.verbosity = int(proto.verbosity)<EOL>obj.pamLength = int(proto.pamLength)<EOL>obj.maxAge = int(proto.maxAge)<EOL>obj.maxInfBacktrack = int(proto.maxInfBacktrack)<EOL>obj.maxLrnBacktrack = int(proto.maxLrnBacktrack)<EOL>obj.maxSeqLength = int(proto.maxSeqLength)<EOL>obj.maxSegmentsPerCell = proto.maxSegmentsPerCell<EOL>obj.maxSynapsesPerSegment = proto.maxSynapsesPerSegment<EOL>obj.outputType = proto.outputType<EOL>obj.activeColumns = [int(col) for col in proto.activeColumns]<EOL>obj.cells = [[] for _ in range(len(proto.cells))]<EOL>for columnSegments, columnSegmentsProto in zip(obj.cells, proto.cells):<EOL><INDENT>columnSegments.extend([[] for _ in range(len(columnSegmentsProto))])<EOL>for cellSegments, cellSegmentsProto in zip(columnSegments,<EOL>columnSegmentsProto):<EOL><INDENT>for segmentProto in cellSegmentsProto:<EOL><INDENT>segment = Segment.read(segmentProto, obj)<EOL>cellSegments.append(segment)<EOL><DEDENT><DEDENT><DEDENT>obj.lrnIterationIdx = int(proto.lrnIterationIdx)<EOL>obj.iterationIdx = int(proto.iterationIdx)<EOL>obj.segID = int(proto.segID)<EOL>obj.pamCounter = int(proto.pamCounter)<EOL>obj.collectSequenceStats = proto.collectSequenceStats<EOL>obj.resetCalled = proto.resetCalled<EOL>avgInputDensity = proto.avgInputDensity<EOL>if avgInputDensity < <NUM_LIT:0.0>:<EOL><INDENT>obj.avgInputDensity = None<EOL><DEDENT>else:<EOL><INDENT>obj.avgInputDensity = avgInputDensity<EOL><DEDENT>obj.learnedSeqLength = int(proto.learnedSeqLength)<EOL>obj.avgLearnedSeqLength = proto.avgLearnedSeqLength<EOL>obj._initEphemerals()<EOL>if proto.currentOutput.which() == \"<STR_LIT:none>\":<EOL><INDENT>obj.currentOutput = None<EOL><DEDENT>else:<EOL><INDENT>obj.currentOutput = numpy.array(proto.currentOutput.list,<EOL>dtype='<STR_LIT>')<EOL><DEDENT>for pattern in proto.prevLrnPatterns:<EOL><INDENT>obj.prevLrnPatterns.append([v for v in pattern])<EOL><DEDENT>for pattern in proto.prevInfPatterns:<EOL><INDENT>obj.prevInfPatterns.append([v for v in pattern])<EOL><DEDENT>for cellWrapperProto in proto.segmentUpdates:<EOL><INDENT>key = (cellWrapperProto.columnIdx, cellWrapperProto.cellIdx)<EOL>value = []<EOL>for updateWrapperProto in cellWrapperProto.segmentUpdates:<EOL><INDENT>segmentUpdate = SegmentUpdate.read(updateWrapperProto.segmentUpdate, obj)<EOL>value.append((int(updateWrapperProto.lrnIterationIdx), segmentUpdate))<EOL><DEDENT>obj.segmentUpdates[key] = value<EOL><DEDENT>numpy.copyto(obj.cellConfidence[\"<STR_LIT:t>\"], proto.cellConfidenceT)<EOL>numpy.copyto(obj.cellConfidence[\"<STR_LIT>\"], proto.cellConfidenceT1)<EOL>numpy.copyto(obj.cellConfidence[\"<STR_LIT>\"],<EOL>proto.cellConfidenceCandidate)<EOL>numpy.copyto(obj.colConfidence[\"<STR_LIT:t>\"], proto.colConfidenceT)<EOL>numpy.copyto(obj.colConfidence[\"<STR_LIT>\"], proto.colConfidenceT1)<EOL>numpy.copyto(obj.colConfidence[\"<STR_LIT>\"], proto.colConfidenceCandidate)<EOL>numpy.copyto(obj.lrnActiveState[\"<STR_LIT:t>\"], proto.lrnActiveStateT)<EOL>numpy.copyto(obj.lrnActiveState[\"<STR_LIT>\"], proto.lrnActiveStateT1)<EOL>numpy.copyto(obj.infActiveState[\"<STR_LIT:t>\"], proto.infActiveStateT)<EOL>numpy.copyto(obj.infActiveState[\"<STR_LIT>\"], proto.infActiveStateT1)<EOL>numpy.copyto(obj.infActiveState[\"<STR_LIT>\"], proto.infActiveStateBackup)<EOL>numpy.copyto(obj.infActiveState[\"<STR_LIT>\"],<EOL>proto.infActiveStateCandidate)<EOL>numpy.copyto(obj.lrnPredictedState[\"<STR_LIT:t>\"], proto.lrnPredictedStateT)<EOL>numpy.copyto(obj.lrnPredictedState[\"<STR_LIT>\"], proto.lrnPredictedStateT1)<EOL>numpy.copyto(obj.infPredictedState[\"<STR_LIT:t>\"], proto.infPredictedStateT)<EOL>numpy.copyto(obj.infPredictedState[\"<STR_LIT>\"], proto.infPredictedStateT1)<EOL>numpy.copyto(obj.infPredictedState[\"<STR_LIT>\"],<EOL>proto.infPredictedStateBackup)<EOL>numpy.copyto(obj.infPredictedState[\"<STR_LIT>\"],<EOL>proto.infPredictedStateCandidate)<EOL>obj.consolePrinterVerbosity = int(proto.consolePrinterVerbosity)<EOL>return obj<EOL>", "docstring": "Deserialize from proto instance.\n\n        :param proto: (BacktrackingTMProto) the proto instance to read from", "id": "f17565:c0:m7"}
{"signature": "def printColConfidence(self, aState, maxCols = <NUM_LIT:20>):", "body": "def formatFPRow(var):<EOL><INDENT>s = '<STR_LIT>'<EOL>for c in range(min(maxCols, self.numberOfCols)):<EOL><INDENT>if c > <NUM_LIT:0> and c % <NUM_LIT:10> == <NUM_LIT:0>:<EOL><INDENT>s += '<STR_LIT:U+0020>'<EOL><DEDENT>s += '<STR_LIT>' % var[c]<EOL><DEDENT>s += '<STR_LIT:U+0020>'<EOL>return s<EOL><DEDENT>print(formatFPRow(aState))<EOL>", "docstring": "Print up to maxCols number from a flat floating point array.\n\n:param aState: TODO: document\n:param maxCols: TODO: document", "id": "f17565:c0:m24"}
{"signature": "def segmentPositionSortKey(self, segment):", "body": "return segment.cell + (segment._ordinal / float(self._nextSegmentOrdinal))<EOL>", "docstring": "Return a numeric key for sorting this segment. This can be used with the \npython built-in ``sorted()`` function.\n\n:param segment: (:class:`Segment`) within this :class:`Connections` \n       instance.\n:returns: (float) A numeric key for sorting.", "id": "f17567:c3:m18"}
{"signature": "def __ne__(self, other):", "body": "return not self.__eq__(other)<EOL>", "docstring": "Non-equality operator for Connections instances.\nChecks if two instances are not functionally identical\n\n:param other: (:class:`Connections`) Connections instance to compare to", "id": "f17567:c3:m23"}
{"signature": "def destroySynapse(self, synapse):", "body": "self._numSynapses -= <NUM_LIT:1><EOL>self._removeSynapseFromPresynapticMap(synapse)<EOL>synapse.segment._synapses.remove(synapse)<EOL>", "docstring": "Destroys a synapse.\n\n:param synapse: (:class:`Synapse`) synapse to destroy", "id": "f17567:c3:m13"}
{"signature": "def dataForSynapse(self, synapse):", "body": "return synapse<EOL>", "docstring": "Returns the data for a synapse.\n\n.. note:: This method exists to match the interface of the C++ Connections. \n   This allows tests and tools to inspect the connections using a common \n   interface.\n\n:param synapse: (:class:`Synapse`)\n:returns: Synapse data", "id": "f17567:c3:m3"}
{"signature": "def updateSynapsePermanence(self, synapse, permanence):", "body": "synapse.permanence = permanence<EOL>", "docstring": "Updates the permanence for a synapse.\n\n:param synapse: (class:`Synapse`) to be updated.\n:param permanence: (float) New permanence.", "id": "f17567:c3:m14"}
{"signature": "def computeActivity(self, activePresynapticCells, connectedPermanence):", "body": "numActiveConnectedSynapsesForSegment = [<NUM_LIT:0>] * self._nextFlatIdx<EOL>numActivePotentialSynapsesForSegment = [<NUM_LIT:0>] * self._nextFlatIdx<EOL>threshold = connectedPermanence - EPSILON<EOL>for cell in activePresynapticCells:<EOL><INDENT>for synapse in self._synapsesForPresynapticCell[cell]:<EOL><INDENT>flatIdx = synapse.segment.flatIdx<EOL>numActivePotentialSynapsesForSegment[flatIdx] += <NUM_LIT:1><EOL>if synapse.permanence > threshold:<EOL><INDENT>numActiveConnectedSynapsesForSegment[flatIdx] += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>return (numActiveConnectedSynapsesForSegment,<EOL>numActivePotentialSynapsesForSegment)<EOL>", "docstring": "Compute each segment's number of active synapses for a given input.\nIn the returned lists, a segment's active synapse count is stored at index\n``segment.flatIdx``.\n\n:param activePresynapticCells: (iter) Active cells.\n:param connectedPermanence: (float) Permanence threshold for a synapse to be \n       considered connected\n\n:returns: (tuple) (``numActiveConnectedSynapsesForSegment`` [list],\n                  ``numActivePotentialSynapsesForSegment`` [list])", "id": "f17567:c3:m15"}
{"signature": "def synapsesForSegment(self, segment):", "body": "return segment._synapses<EOL>", "docstring": "Returns the synapses on a segment.\n\n:param segment: (int) Segment index\n:returns: (set) Synapse objects representing synapses on the given segment.", "id": "f17567:c3:m2"}
{"signature": "def segmentForFlatIdx(self, flatIdx):", "body": "return self._segmentForFlatIdx[flatIdx]<EOL>", "docstring": "Get the segment with the specified flatIdx.\n\n:param flatIdx: (int) The segment's flattened list index.\n\n:returns: (:class:`Segment`)", "id": "f17567:c3:m6"}
{"signature": "def __eq__(self, other):", "body": "return (self.segment.cell == other.segment.cell and<EOL>self.presynapticCell == other.presynapticCell and<EOL>abs(self.permanence - other.permanence) < EPSILON)<EOL>", "docstring": "Explicitly implement this for unit testing. Allow floating point\n        differences for synapse permanence.", "id": "f17567:c1:m1"}
{"signature": "def binSearch(arr, val):", "body": "i = bisect_left(arr, val)<EOL>if i != len(arr) and arr[i] == val:<EOL><INDENT>return i<EOL><DEDENT>return -<NUM_LIT:1><EOL>", "docstring": "Function for running binary search on a sorted list.\n\n:param arr: (list) a sorted list of integers to search\n:param val: (int)  a integer to search for in the sorted array\n:returns: (int) the index of the element if it is found and -1 otherwise.", "id": "f17567:m0"}
{"signature": "def __eq__(self, other):", "body": "return (self.cell == other.cell and<EOL>(sorted(self._synapses, key=lambda x: x._ordinal) ==<EOL>sorted(other._synapses, key=lambda x: x._ordinal)))<EOL>", "docstring": "Explicitly implement this for unit testing. The flatIdx is not designed\n        to be consistent after serialize / deserialize, and the synapses might not\n        enumerate in the same order.", "id": "f17567:c0:m1"}
{"signature": "def numSynapses(self, segment=None):", "body": "if segment is not None:<EOL><INDENT>return len(segment._synapses)<EOL><DEDENT>return self._numSynapses<EOL>", "docstring": "Returns the number of Synapses.\n\n:param segment: (:class:`Segment`) Optional parameter to get the number of \n       synapses on a segment.\n\n:returns: (int) Number of synapses on all segments if segment is not \n          specified, or on a specified segment.", "id": "f17567:c3:m17"}
{"signature": "def destroySegment(self, segment):", "body": "<EOL>for synapse in segment._synapses:<EOL><INDENT>self._removeSynapseFromPresynapticMap(synapse)<EOL><DEDENT>self._numSynapses -= len(segment._synapses)<EOL>segments = self._cells[segment.cell]._segments<EOL>i = segments.index(segment)<EOL>del segments[i]<EOL>self._freeFlatIdxs.append(segment.flatIdx)<EOL>self._segmentForFlatIdx[segment.flatIdx] = None<EOL>", "docstring": "Destroys a segment.\n\n:param segment: (:class:`Segment`) representing the segment to be destroyed.", "id": "f17567:c3:m10"}
{"signature": "def getSegment(self, cell, idx):", "body": "return self._cells[cell]._segments[idx]<EOL>", "docstring": "Returns a :class:`Segment` object of the specified segment using data from \nthe ``self._cells`` array.\n\n:param cell: (int) cell index\n:param idx:  (int) segment index on a cell\n:returns: (:class:`Segment`) Segment object with index idx on the specified cell", "id": "f17567:c3:m5"}
{"signature": "def _calcDistance(self, inputPattern, distanceNorm=None):", "body": "if distanceNorm is None:<EOL><INDENT>distanceNorm = self.distanceNorm<EOL><DEDENT>if self.useSparseMemory:<EOL><INDENT>if self._protoSizes is None:<EOL><INDENT>self._protoSizes = self._Memory.rowSums()<EOL><DEDENT>overlapsWithProtos = self._Memory.rightVecSumAtNZ(inputPattern)<EOL>inputPatternSum = inputPattern.sum()<EOL>if self.distanceMethod == \"<STR_LIT>\":<EOL><INDENT>dist = inputPattern.sum() - overlapsWithProtos<EOL><DEDENT>elif self.distanceMethod == \"<STR_LIT>\":<EOL><INDENT>dist = inputPatternSum - overlapsWithProtos<EOL>if inputPatternSum > <NUM_LIT:0>:<EOL><INDENT>dist /= inputPatternSum<EOL><DEDENT><DEDENT>elif self.distanceMethod == \"<STR_LIT>\":<EOL><INDENT>overlapsWithProtos /= self._protoSizes<EOL>dist = <NUM_LIT:1.0> - overlapsWithProtos<EOL><DEDENT>elif self.distanceMethod == \"<STR_LIT>\":<EOL><INDENT>maxVal = numpy.maximum(self._protoSizes, inputPatternSum)<EOL>if maxVal.all() > <NUM_LIT:0>:<EOL><INDENT>overlapsWithProtos /= maxVal<EOL><DEDENT>dist = <NUM_LIT:1.0> - overlapsWithProtos<EOL><DEDENT>elif self.distanceMethod == \"<STR_LIT>\":<EOL><INDENT>dist = self._Memory.vecLpDist(self.distanceNorm, inputPattern)<EOL>distMax = dist.max()<EOL>if distMax > <NUM_LIT:0>:<EOL><INDENT>dist /= distMax<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" %<EOL>self.distanceMethod)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self.distanceMethod == \"<STR_LIT>\":<EOL><INDENT>dist = numpy.power(numpy.abs(self._M - inputPattern), self.distanceNorm)<EOL>dist = dist.sum(<NUM_LIT:1>)<EOL>dist = numpy.power(dist, <NUM_LIT:1.0>/self.distanceNorm)<EOL>dist /= dist.max()<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError (\"<STR_LIT>\")<EOL><DEDENT><DEDENT>return dist<EOL>", "docstring": "Calculate the distances from inputPattern to all stored patterns. All\n        distances are between 0.0 and 1.0\n\n        :param inputPattern The pattern from which distances to all other patterns\n            are calculated\n\n        :param distanceNorm Degree of the distance norm", "id": "f17568:c0:m24"}
{"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()<EOL>return state<EOL>", "docstring": "Return serializable state.\n\n        This function will return a version of the __dict__.", "id": "f17568:c0:m35"}
{"signature": "def getPartitionIdKeys(self):", "body": "return list(self._partitionIdMap.keys())<EOL>", "docstring": ":returns: a list containing unique (non-None) partition Ids (just the keys)", "id": "f17568:c0:m20"}
{"signature": "def _labeledInput(activeInputs, cellsPerCol=<NUM_LIT:32>):", "body": "if cellsPerCol == <NUM_LIT:0>:<EOL><INDENT>cellsPerCol = <NUM_LIT:1><EOL><DEDENT>cols = activeInputs.size / cellsPerCol<EOL>activeInputs = activeInputs.reshape(cols, cellsPerCol)<EOL>(cols, cellIdxs) = activeInputs.nonzero()<EOL>if len(cols) == <NUM_LIT:0>:<EOL><INDENT>return \"<STR_LIT>\"<EOL><DEDENT>items = [\"<STR_LIT>\" % (len(cols))]<EOL>prevCol = -<NUM_LIT:1><EOL>for (col,cellIdx) in zip(cols, cellIdxs):<EOL><INDENT>if col != prevCol:<EOL><INDENT>if prevCol != -<NUM_LIT:1>:<EOL><INDENT>items.append(\"<STR_LIT>\")<EOL><DEDENT>items.append(\"<STR_LIT>\" % col)<EOL>prevCol = col<EOL><DEDENT>items.append(\"<STR_LIT>\" % cellIdx)<EOL><DEDENT>items.append(\"<STR_LIT:]>\")<EOL>return \"<STR_LIT:U+0020>\".join(items)<EOL>", "docstring": "Print the list of [column, cellIdx] indices for each of the active\n    cells in activeInputs.", "id": "f17568:m0"}
{"signature": "def getPartitionIdList(self):", "body": "return self._partitionIdList<EOL>", "docstring": ":returns: a list of complete partition id objects", "id": "f17568:c0:m18"}
{"signature": "def _finalizeSVD(self, numSVDDims=None):", "body": "if numSVDDims is not None:<EOL><INDENT>self.numSVDDims = numSVDDims<EOL><DEDENT>if self.numSVDDims==\"<STR_LIT>\":<EOL><INDENT>if self.fractionOfMax is not None:<EOL><INDENT>self.numSVDDims = self.getAdaptiveSVDDims(self._s, self.fractionOfMax)<EOL><DEDENT>else:<EOL><INDENT>self.numSVDDims = self.getAdaptiveSVDDims(self._s)<EOL><DEDENT><DEDENT>if self._vt.shape[<NUM_LIT:0>] < self.numSVDDims:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>print (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\", self._vt.shape[<NUM_LIT:0>])<EOL>print(\"<STR_LIT>\")<EOL>self.numSVDDims = self._vt.shape[<NUM_LIT:0>]<EOL><DEDENT>self._vt = self._vt[:self.numSVDDims]<EOL>if len(self._vt) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>self._Memory = numpy.zeros((self._numPatterns,self.numSVDDims))<EOL>self._M = self._Memory<EOL>self.useSparseMemory = False<EOL>for i in range(self._numPatterns):<EOL><INDENT>self._Memory[i] = numpy.dot(self._vt, self._a[i])<EOL><DEDENT>self._a = None<EOL>", "docstring": "Called by finalizeLearning(). This will project all the patterns onto the\nSVD eigenvectors.\n:param numSVDDims: (int) number of egeinvectors used for projection.\n:return:", "id": "f17568:c0:m29"}
{"signature": "def getDistances(self, inputPattern):", "body": "dist = self._getDistances(inputPattern)<EOL>return (dist, self._categoryList)<EOL>", "docstring": "Return the distances between the input pattern and all other\n        stored patterns.\n\n        :param inputPattern: pattern to check distance with\n\n        :returns: (distances, categories) numpy arrays of the same length.\n            - overlaps: an integer overlap amount for each category\n            - categories: category index for each element of distances", "id": "f17568:c0:m11"}
{"signature": "def removeCategory(self, categoryToRemove):", "body": "removedRows = <NUM_LIT:0><EOL>if self._Memory is None:<EOL><INDENT>return removedRows<EOL><DEDENT>catToRemove = float(categoryToRemove)<EOL>rowsToRemove = [k for k, catID in enumerate(self._categoryList)if catID == catToRemove]<EOL>self._removeRows(rowsToRemove)<EOL>assert catToRemove not in self._categoryList<EOL>", "docstring": "There are two caveats. First, this is a potentially slow operation. Second,\npattern indices will shift if patterns before them are removed.\n\n:param categoryToRemove: Category label to remove", "id": "f17568:c0:m6"}
{"signature": "def remapCategories(self, mapping):", "body": "categoryArray = numpy.array(self._categoryList)<EOL>newCategoryArray = numpy.zeros(categoryArray.shape[<NUM_LIT:0>])<EOL>newCategoryArray.fill(-<NUM_LIT:1>)<EOL>for i in range(len(mapping)):<EOL><INDENT>newCategoryArray[categoryArray==i] = mapping[i]<EOL><DEDENT>self._categoryList = list(newCategoryArray)<EOL>", "docstring": "Change the category indices.\n\n        Used by the Network Builder to keep the category indices in sync with the\n        ImageSensor categoryInfo when the user renames or removes categories.\n\n        :param mapping: List of new category indices. For example, mapping=[2,0,1]\n            would change all vectors of category 0 to be category 2, category 1 to\n            0, and category 2 to 1", "id": "f17568:c0:m30"}
{"signature": "def doIteration(self):", "body": "self._iterationIdx += <NUM_LIT:1><EOL>", "docstring": "Utility method to increment the iteration index. Intended for models that\ndon't learn each timestep.", "id": "f17568:c0:m8"}
{"signature": "def closestTrainingPattern(self, inputPattern, cat):", "body": "dist = self._getDistances(inputPattern)<EOL>sorted = dist.argsort()<EOL>for patIdx in sorted:<EOL><INDENT>patternCat = self._categoryList[patIdx]<EOL>if patternCat == cat:<EOL><INDENT>if self.useSparseMemory:<EOL><INDENT>closestPattern = self._Memory.getRow(int(patIdx))<EOL><DEDENT>else:<EOL><INDENT>closestPattern = self._M[patIdx]<EOL><DEDENT>return closestPattern<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Returns the closest training pattern to inputPattern that belongs to\n        category \"cat\".\n\n        :param inputPattern: The pattern whose closest neighbor is sought\n\n        :param cat: The required category of closest neighbor\n\n        :returns: A dense version of the closest training pattern, or None if no\n            such patterns exist", "id": "f17568:c0:m14"}
{"signature": "def getPartitionId(self, i):", "body": "if (i < <NUM_LIT:0>) or (i >= self._numPatterns):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>partitionId = self._partitionIdList[i]<EOL>if partitionId == numpy.inf:<EOL><INDENT>return None<EOL><DEDENT>else:<EOL><INDENT>return partitionId<EOL><DEDENT>", "docstring": "Gets the partition id given an index.\n\n:param i: index of partition\n:returns: the partition id associated with pattern i. Returns None if no id\n    is associated with it.", "id": "f17568:c0:m17"}
{"signature": "def clear(self):", "body": "self._Memory = None<EOL>self._numPatterns = <NUM_LIT:0><EOL>self._M = None<EOL>self._categoryList = []<EOL>self._partitionIdList = []<EOL>self._partitionIdMap = {}<EOL>self._finishedLearning = False<EOL>self._iterationIdx = -<NUM_LIT:1><EOL>if self.maxStoredPatterns > <NUM_LIT:0>:<EOL><INDENT>assert self.useSparseMemory, (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>self.fixedCapacity = True<EOL>self._categoryRecencyList = []<EOL><DEDENT>else:<EOL><INDENT>self.fixedCapacity = False<EOL><DEDENT>self._protoSizes = None<EOL>self._s = None<EOL>self._vt = None<EOL>self._nc = None<EOL>self._mean = None<EOL>self._specificIndexTraining = False<EOL>self._nextTrainingIndices = None<EOL>", "docstring": "Clears the state of the KNNClassifier.", "id": "f17568:c0:m1"}
{"signature": "def getAdaptiveSVDDims(self, singularValues, fractionOfMax=<NUM_LIT>):", "body": "v = singularValues/singularValues[<NUM_LIT:0>]<EOL>idx = numpy.where(v<fractionOfMax)[<NUM_LIT:0>]<EOL>if len(idx):<EOL><INDENT>print(\"<STR_LIT>\", idx[<NUM_LIT:0>], \"<STR_LIT>\", len(v))<EOL>return idx[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\", len(v)-<NUM_LIT:1>, \"<STR_LIT>\", len(v))<EOL>return len(v)-<NUM_LIT:1><EOL><DEDENT>", "docstring": "Compute the number of eigenvectors (singularValues) to keep.\n\n:param singularValues:\n:param fractionOfMax:\n:return:", "id": "f17568:c0:m28"}
{"signature": "def transferCoincidences(network, fromElementName, toElementName):", "body": "coincidenceHandle = getLockedHandle(<EOL>runtimeElement=network.getElement(fromElementName),<EOL>expression=\"<STR_LIT>\"<EOL>)<EOL>network.getElement(toElementName).setParameter(\"<STR_LIT>\",<EOL>coincidenceHandle)<EOL>", "docstring": "Gets the coincidence matrix from one element and sets it on\nanother element\n(using locked handles, a la nupic.bindings.research.lockHandle).\n\nTODO: Generalize to more node types, parameter name pairs, etc.\n\nDoes not work across processes.", "id": "f17569:m2"}
{"signature": "def importAndRunFunction(<EOL>path,<EOL>moduleName,<EOL>funcName,<EOL>**keywords<EOL>):", "body": "import sys<EOL>originalPath = sys.path<EOL>try:<EOL><INDENT>augmentedPath = [path] + sys.path<EOL>sys.path = augmentedPath<EOL>func = getattr(__import__(moduleName, fromlist=[funcName]), funcName)<EOL>sys.path = originalPath<EOL><DEDENT>except:<EOL><INDENT>sys.path = originalPath<EOL>raise<EOL><DEDENT>return func(**keywords)<EOL>", "docstring": "Run a named function specified by a filesystem path, module name\nand function name.\n\nReturns the value returned by the imported function.\n\nUse this when access is needed to code that has\nnot been added to a package accessible from the ordinary Python\npath. Encapsulates the multiple lines usually needed to\nsafely manipulate and restore the Python path.\n\nParameters\n----------\npath: filesystem path\nPath to the directory where the desired module is stored.\nThis will be used to temporarily augment the Python path.\n\nmoduleName: basestring\nName of the module, without trailing extension, where the desired\nfunction is stored. This module should be in the directory specified\nwith path.\n\nfuncName: basestring\nName of the function to import and call.\n\nkeywords:\nKeyword arguments to be passed to the imported function.", "id": "f17569:m0"}
{"signature": "def write(self, proto):", "body": "super(TemporalMemoryShim, self).write(proto.baseTM)<EOL>proto.connections.write(self.connections)<EOL>proto.predictiveCells = self.predictiveCells<EOL>", "docstring": "Populate serialization proto instance.\n\n        :param proto: (TemporalMemoryShimProto) the proto instance to populate", "id": "f17570:c0:m4"}
{"signature": "def compute(self, activeColumns, learn=True):", "body": "bottomUpInput = numpy.zeros(self.numberOfCols, dtype=dtype)<EOL>bottomUpInput[list(activeColumns)] = <NUM_LIT:1><EOL>super(TemporalMemoryShim, self).compute(bottomUpInput,<EOL>enableLearn=learn,<EOL>enableInference=True)<EOL>predictedState = self.getPredictedState()<EOL>self.predictiveCells = set(numpy.flatnonzero(predictedState))<EOL>", "docstring": "Feeds input record through TM, performing inference and learning.\nUpdates member variables with new state.\n\n@param activeColumns (set) Indices of active columns in `t`", "id": "f17570:c0:m1"}
{"signature": "def write(self, proto):", "body": "<EOL>proto.columnDimensions = list(self.columnDimensions)<EOL>proto.cellsPerColumn = self.cellsPerColumn<EOL>proto.activationThreshold = self.activationThreshold<EOL>proto.initialPermanence = round(self.initialPermanence, EPSILON_ROUND)<EOL>proto.connectedPermanence = round(self.connectedPermanence, EPSILON_ROUND)<EOL>proto.minThreshold = self.minThreshold<EOL>proto.maxNewSynapseCount = self.maxNewSynapseCount<EOL>proto.permanenceIncrement = round(self.permanenceIncrement, EPSILON_ROUND)<EOL>proto.permanenceDecrement = round(self.permanenceDecrement, EPSILON_ROUND)<EOL>proto.predictedSegmentDecrement = self.predictedSegmentDecrement<EOL>proto.maxSegmentsPerCell = self.maxSegmentsPerCell<EOL>proto.maxSynapsesPerSegment = self.maxSynapsesPerSegment<EOL>self.connections.write(proto.connections)<EOL>self._random.write(proto.random)<EOL>proto.activeCells = list(self.activeCells)<EOL>proto.winnerCells = list(self.winnerCells)<EOL>protoActiveSegments = proto.init(\"<STR_LIT>\", len(self.activeSegments))<EOL>for i, segment in enumerate(self.activeSegments):<EOL><INDENT>protoActiveSegments[i].cell = segment.cell<EOL>idx = self.connections.segmentsForCell(segment.cell).index(segment)<EOL>protoActiveSegments[i].idxOnCell = idx<EOL><DEDENT>protoMatchingSegments = proto.init(\"<STR_LIT>\",<EOL>len(self.matchingSegments))<EOL>for i, segment in enumerate(self.matchingSegments):<EOL><INDENT>protoMatchingSegments[i].cell = segment.cell<EOL>idx = self.connections.segmentsForCell(segment.cell).index(segment)<EOL>protoMatchingSegments[i].idxOnCell = idx<EOL><DEDENT>protoNumActivePotential = proto.init(<EOL>\"<STR_LIT>\",<EOL>len(self.numActivePotentialSynapsesForSegment))<EOL>for i, numActivePotentialSynapses in enumerate(<EOL>self.numActivePotentialSynapsesForSegment):<EOL><INDENT>segment = self.connections.segmentForFlatIdx(i)<EOL>if segment is not None:<EOL><INDENT>protoNumActivePotential[i].cell = segment.cell<EOL>idx = self.connections.segmentsForCell(segment.cell).index(segment)<EOL>protoNumActivePotential[i].idxOnCell = idx<EOL>protoNumActivePotential[i].number = numActivePotentialSynapses<EOL><DEDENT><DEDENT>proto.iteration = self.iteration<EOL>protoLastUsedIteration = proto.init(<EOL>\"<STR_LIT>\",<EOL>len(self.numActivePotentialSynapsesForSegment))<EOL>for i, lastUsed in enumerate(self.lastUsedIterationForSegment):<EOL><INDENT>segment = self.connections.segmentForFlatIdx(i)<EOL>if segment is not None:<EOL><INDENT>protoLastUsedIteration[i].cell = segment.cell<EOL>idx = self.connections.segmentsForCell(segment.cell).index(segment)<EOL>protoLastUsedIteration[i].idxOnCell = idx<EOL>protoLastUsedIteration[i].number = lastUsed<EOL><DEDENT><DEDENT>", "docstring": "Writes serialized data to proto object.\n\n:param proto: (DynamicStructBuilder) Proto object", "id": "f17571:c0:m48"}
{"signature": "def activatePredictedColumn(self, column, columnActiveSegments,<EOL>columnMatchingSegments, prevActiveCells,<EOL>prevWinnerCells, learn):", "body": "return self._activatePredictedColumn(<EOL>self.connections, self._random,<EOL>columnActiveSegments, prevActiveCells, prevWinnerCells,<EOL>self.numActivePotentialSynapsesForSegment,<EOL>self.maxNewSynapseCount, self.initialPermanence,<EOL>self.permanenceIncrement, self.permanenceDecrement,<EOL>self.maxSynapsesPerSegment, learn)<EOL>", "docstring": "Determines which cells in a predicted column should be added to winner cells\nlist, and learns on the segments that correctly predicted this column.\n\n:param column: (int) Index of bursting column.\n\n:param columnActiveSegments: (iter) Active segments in this column.\n\n:param columnMatchingSegments: (iter) Matching segments in this column.\n\n:param prevActiveCells: (list) Active cells in ``t-1``.\n\n:param prevWinnerCells: (list) Winner cells in ``t-1``.\n\n:param learn: (bool) If true, grow and reinforce synapses.\n\n:returns: (list) A list of predicted cells that will be added to \n          active cells and winner cells.", "id": "f17571:c0:m6"}
{"signature": "def cellsForColumn(self, column):", "body": "self._validateColumn(column)<EOL>start = self.cellsPerColumn * column<EOL>end = start + self.cellsPerColumn<EOL>return range(start, end)<EOL>", "docstring": "Returns the indices of cells that belong to a column.\n\n:param column: (int) Column index\n\n:returns: (list) Cell indices", "id": "f17571:c0:m19"}
{"signature": "def setPermanenceDecrement(self, permanenceDecrement):", "body": "self.permanenceDecrement = permanenceDecrement<EOL>", "docstring": "Sets the permanence decrement.\n\n:param permanenceDecrement: (float) The permanence decrement.", "id": "f17571:c0:m41"}
{"signature": "def mapCellsToColumns(self, cells):", "body": "cellsForColumns = defaultdict(set)<EOL>for cell in cells:<EOL><INDENT>column = self.columnForCell(cell)<EOL>cellsForColumns[column].add(cell)<EOL><DEDENT>return cellsForColumns<EOL>", "docstring": "Maps cells to the columns they belong to.\n\n:param cells: (set) Cells\n\n:returns: (dict) Mapping from columns to their cells in `cells`", "id": "f17571:c0:m22"}
{"signature": "def getActiveSegments(self):", "body": "return self.activeSegments<EOL>", "docstring": "Returns the active segments.\n\n:returns: (list) Active segments", "id": "f17571:c0:m26"}
{"signature": "@staticmethod<EOL><INDENT>def connectionsFactory(*args, **kwargs):<DEDENT>", "body": "return Connections(*args, **kwargs)<EOL>", "docstring": "Create a :class:`~nupic.algorithms.connections.Connections` instance.  \n:class:`TemporalMemory` subclasses may override this method to choose a \ndifferent :class:`~nupic.algorithms.connections.Connections` implementation, \nor to augment the instance otherwise returned by the default \n:class:`~nupic.algorithms.connections.Connections` implementation.\n\nSee :class:`~nupic.algorithms.connections.Connections` for constructor \nsignature and usage.\n\n:returns: :class:`~nupic.algorithms.connections.Connections` instance", "id": "f17571:c0:m1"}
{"signature": "def getConnectedPermanence(self):", "body": "return self.connectedPermanence<EOL>", "docstring": "Get the connected permanence.\n\n:returns: (float) The connected permanence.", "id": "f17571:c0:m44"}
{"signature": "@staticmethod<EOL><INDENT>def getCellIndex(cell):<DEDENT>", "body": "return cell<EOL>", "docstring": "Returns the index of the cell.\n\n:param cell: (int) cell to find the index of", "id": "f17571:c0:m56"}
{"signature": "def getMaxSynapsesPerSegment(self):", "body": "return self.maxSynapsesPerSegment<EOL>", "docstring": "Get the maximum number of synapses per segment.\n\n:returns: (int) max number of synapses per segment", "id": "f17571:c0:m47"}
{"signature": "def getActivationThreshold(self):", "body": "return self.activationThreshold<EOL>", "docstring": "Returns the activation threshold.\n\n:returns: (int) The activation threshold.", "id": "f17571:c0:m30"}
{"signature": "@classmethod<EOL><INDENT>def read(cls, proto):<DEDENT>", "body": "tm = object.__new__(cls)<EOL>tm.columnDimensions = tuple(proto.columnDimensions)<EOL>tm.cellsPerColumn = int(proto.cellsPerColumn)<EOL>tm.activationThreshold = int(proto.activationThreshold)<EOL>tm.initialPermanence = round(proto.initialPermanence, EPSILON_ROUND)<EOL>tm.connectedPermanence = round(proto.connectedPermanence, EPSILON_ROUND)<EOL>tm.minThreshold = int(proto.minThreshold)<EOL>tm.maxNewSynapseCount = int(proto.maxNewSynapseCount)<EOL>tm.permanenceIncrement = round(proto.permanenceIncrement, EPSILON_ROUND)<EOL>tm.permanenceDecrement = round(proto.permanenceDecrement, EPSILON_ROUND)<EOL>tm.predictedSegmentDecrement = round(proto.predictedSegmentDecrement,<EOL>EPSILON_ROUND)<EOL>tm.maxSegmentsPerCell = int(proto.maxSegmentsPerCell)<EOL>tm.maxSynapsesPerSegment = int(proto.maxSynapsesPerSegment)<EOL>tm.connections = Connections.read(proto.connections)<EOL>tm._random = Random()<EOL>tm._random.read(proto.random)<EOL>tm.activeCells = [int(x) for x in proto.activeCells]<EOL>tm.winnerCells = [int(x) for x in proto.winnerCells]<EOL>flatListLength = tm.connections.segmentFlatListLength()<EOL>tm.numActiveConnectedSynapsesForSegment = [<NUM_LIT:0>] * flatListLength<EOL>tm.numActivePotentialSynapsesForSegment = [<NUM_LIT:0>] * flatListLength<EOL>tm.lastUsedIterationForSegment = [<NUM_LIT:0>] * flatListLength<EOL>tm.activeSegments = []<EOL>tm.matchingSegments = []<EOL>for protoSegment in proto.activeSegments:<EOL><INDENT>tm.activeSegments.append(<EOL>tm.connections.getSegment(protoSegment.cell,<EOL>protoSegment.idxOnCell))<EOL><DEDENT>for protoSegment in proto.matchingSegments:<EOL><INDENT>tm.matchingSegments.append(<EOL>tm.connections.getSegment(protoSegment.cell,<EOL>protoSegment.idxOnCell))<EOL><DEDENT>for protoSegment in proto.numActivePotentialSynapsesForSegment:<EOL><INDENT>segment = tm.connections.getSegment(protoSegment.cell,<EOL>protoSegment.idxOnCell)<EOL>tm.numActivePotentialSynapsesForSegment[segment.flatIdx] = (<EOL>int(protoSegment.number))<EOL><DEDENT>tm.iteration = long(proto.iteration)<EOL>for protoSegment in proto.lastUsedIterationForSegment:<EOL><INDENT>segment = tm.connections.getSegment(protoSegment.cell,<EOL>protoSegment.idxOnCell)<EOL>tm.lastUsedIterationForSegment[segment.flatIdx] = (<EOL>long(protoSegment.number))<EOL><DEDENT>return tm<EOL>", "docstring": "Reads deserialized data from proto object.\n\n:param proto: (DynamicStructBuilder) Proto object\n\n:returns: (:class:TemporalMemory) TemporalMemory instance", "id": "f17571:c0:m50"}
{"signature": "def compute(self, activeColumns, learn=True):", "body": "self.activateCells(sorted(activeColumns), learn)<EOL>self.activateDendrites(learn)<EOL>", "docstring": "Perform one time step of the Temporal Memory algorithm.\n\nThis method calls :meth:`activateCells`, then calls \n:meth:`activateDendrites`. Using :class:`TemporalMemory` via its \n:meth:`compute` method ensures that you'll always be able to call \n:meth:`getPredictiveCells` to get predictions for the next time step.\n\n:param activeColumns: (iter) Indices of active columns.\n\n:param learn: (bool) Whether or not learning is enabled.", "id": "f17571:c0:m2"}
{"signature": "def reset(self):", "body": "self.activeCells = []<EOL>self.winnerCells = []<EOL>self.activeSegments = []<EOL>self.matchingSegments = []<EOL>", "docstring": "Indicates the start of a new sequence. Clears any predictions and makes sure\nsynapses don't grow to the currently active cells in the next time step.", "id": "f17571:c0:m5"}
{"signature": "def _validateCell(self, cell):", "body": "if cell >= self.numberOfCells() or cell < <NUM_LIT:0>:<EOL><INDENT>raise IndexError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Raises an error if cell index is invalid.\n\n:param cell: (int) Cell index", "id": "f17571:c0:m54"}
{"signature": "def getCellsPerColumn(self):", "body": "return self.cellsPerColumn<EOL>", "docstring": "Returns the number of cells per column.\n\n:returns: (int) The number of cells per column.", "id": "f17571:c0:m28"}
{"signature": "@classmethod<EOL><INDENT>def _leastUsedCell(cls, random, cells, connections):<DEDENT>", "body": "leastUsedCells = []<EOL>minNumSegments = float(\"<STR_LIT>\")<EOL>for cell in cells:<EOL><INDENT>numSegments = connections.numSegments(cell)<EOL>if numSegments < minNumSegments:<EOL><INDENT>minNumSegments = numSegments<EOL>leastUsedCells = []<EOL><DEDENT>if numSegments == minNumSegments:<EOL><INDENT>leastUsedCells.append(cell)<EOL><DEDENT><DEDENT>i = random.getUInt32(len(leastUsedCells))<EOL>return leastUsedCells[i]<EOL>", "docstring": "Gets the cell with the smallest number of segments.\nBreak ties randomly.\n\n:param random: (Object)\nRandom number generator. Gets mutated.\n\n:param cells: (list)\nIndices of cells.\n\n:param connections: (Object)\nConnections instance for the TM.\n\n:returns: (int) Cell index.", "id": "f17571:c0:m15"}
{"signature": "def numberOfCells(self):", "body": "return self.numberOfColumns() * self.cellsPerColumn<EOL>", "docstring": "Returns the number of cells in this layer.\n\n:returns: (int) Number of cells", "id": "f17571:c0:m21"}
{"signature": "def punishPredictedColumn(self, column, columnActiveSegments,<EOL>columnMatchingSegments, prevActiveCells,<EOL>prevWinnerCells):", "body": "self._punishPredictedColumn(<EOL>self.connections, columnMatchingSegments, prevActiveCells,<EOL>self.predictedSegmentDecrement)<EOL>", "docstring": "Punishes the Segments that incorrectly predicted a column to be active.\n\n:param column: (int) Index of bursting column.\n\n:param columnActiveSegments: (iter) Active segments for this column, or None \n       if there aren't any.\n\n:param columnMatchingSegments: (iter) Matching segments for this column, or \n       None if there aren't any.\n\n:param prevActiveCells: (list) Active cells in ``t-1``.\n\n:param prevWinnerCells: (list) Winner cells in ``t-1``.", "id": "f17571:c0:m8"}
{"signature": "@classmethod<EOL><INDENT>def getCellIndices(cls, cells):<DEDENT>", "body": "return [cls.getCellIndex(c) for c in cells]<EOL>", "docstring": "Returns the indices of the cells passed in.\n\n:param cells: (list) cells to find the indices of", "id": "f17571:c0:m55"}
{"signature": "def _validateColumn(self, column):", "body": "if column >= self.numberOfColumns() or column < <NUM_LIT:0>:<EOL><INDENT>raise IndexError(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Raises an error if column index is invalid.\n\n:param column: (int) Column index", "id": "f17571:c0:m53"}
{"signature": "def setInitialPermanence(self, initialPermanence):", "body": "self.initialPermanence = initialPermanence<EOL>", "docstring": "Sets the initial permanence.\n\n:param initialPermanence: (float) The initial permanence.", "id": "f17571:c0:m33"}
{"signature": "def mmClearHistory(self):", "body": "self._mmTraces = {}<EOL>self._mmData = {}<EOL>", "docstring": "Clears the stored history.", "id": "f17572:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def mmPrettyPrintTraces(traces, breakOnResets=None):<DEDENT>", "body": "assert len(traces) > <NUM_LIT:0>, \"<STR_LIT>\"<EOL>table = PrettyTable([\"<STR_LIT:#>\"] + [trace.prettyPrintTitle() for trace in traces])<EOL>for i in xrange(len(traces[<NUM_LIT:0>].data)):<EOL><INDENT>if breakOnResets and breakOnResets.data[i]:<EOL><INDENT>table.add_row([\"<STR_LIT>\"] * (len(traces) + <NUM_LIT:1>))<EOL><DEDENT>table.add_row([i] +<EOL>[trace.prettyPrintDatum(trace.data[i]) for trace in traces])<EOL><DEDENT>return table.get_string().encode(\"<STR_LIT:utf-8>\")<EOL>", "docstring": "Returns pretty-printed table of traces.\n\n@param traces (list) Traces to print in table\n@param breakOnResets (BoolsTrace) Trace of resets to break table on\n\n@return (string) Pretty-printed table of traces.", "id": "f17572:c0:m2"}
{"signature": "def __init__(self, monitor, title, show=True):", "body": "self._monitor = monitor<EOL>self._title = title<EOL>self._fig = self._initFigure()<EOL>self._show = show<EOL>if self._show:<EOL><INDENT>plt.ion()<EOL>plt.show()<EOL><DEDENT>", "docstring": "@param monitor (MonitorMixinBase) Monitor Mixin instance that generated\n                                  this plot\n\n@param title  (string)            Plot title", "id": "f17573:c0:m0"}
{"signature": "def addGraph(self, data, position=<NUM_LIT>, xlabel=None, ylabel=None):", "body": "ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)<EOL>ax.plot(data)<EOL>plt.draw()<EOL>", "docstring": "Adds a graph to the plot's figure.\n\n        @param data See matplotlib.Axes.plot documentation.\n        @param position A 3-digit number. The first two digits define a 2D grid\n                where subplots may be added. The final digit specifies the nth grid\n                location for the added subplot\n        @param xlabel text to be displayed on the x-axis\n        @param ylabel text to be displayed on the y-axis", "id": "f17573:c0:m3"}
{"signature": "def add2DArray(self, data, position=<NUM_LIT>, xlabel=None, ylabel=None, cmap=None,<EOL>aspect=\"<STR_LIT>\", interpolation=\"<STR_LIT>\", name=None):", "body": "if cmap is None:<EOL><INDENT>cmap = cm.Greys<EOL><DEDENT>ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)<EOL>ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)<EOL>if self._show:<EOL><INDENT>plt.draw()<EOL><DEDENT>if name is not None:<EOL><INDENT>if not os.path.exists(\"<STR_LIT>\"):<EOL><INDENT>os.mkdir(\"<STR_LIT>\")<EOL><DEDENT>plt.savefig(\"<STR_LIT>\".format(name=name), bbox_inches=\"<STR_LIT>\",<EOL>figsize=(<NUM_LIT:8>, <NUM_LIT:6>), dpi=<NUM_LIT>)<EOL><DEDENT>", "docstring": "Adds an image to the plot's figure.\n\n        @param data a 2D array. See matplotlib.Axes.imshow documentation.\n        @param position A 3-digit number. The first two digits define a 2D grid\n                where subplots may be added. The final digit specifies the nth grid\n                location for the added subplot\n        @param xlabel text to be displayed on the x-axis\n        @param ylabel text to be displayed on the y-axis\n        @param cmap color map used in the rendering\n        @param aspect how aspect ratio is handled during resize\n        @param interpolation interpolation method", "id": "f17573:c0:m5"}
{"signature": "def mmGetTraceResets(self):", "body": "return self._mmTraces[\"<STR_LIT>\"]<EOL>", "docstring": "@return (Trace) Trace of resets", "id": "f17574:c0:m6"}
{"signature": "def mmGetTracePredictedActiveColumns(self):", "body": "self._mmComputeTransitionTraces()<EOL>return self._mmTraces[\"<STR_LIT>\"]<EOL>", "docstring": "@return (Trace) Trace of predicted => active columns", "id": "f17574:c0:m9"}
{"signature": "def mmGetTracePredictedInactiveColumns(self):", "body": "self._mmComputeTransitionTraces()<EOL>return self._mmTraces[\"<STR_LIT>\"]<EOL>", "docstring": "@return (Trace) Trace of predicted => inactive columns", "id": "f17574:c0:m10"}
{"signature": "def mmGetMetricSequencesPredictedActiveCellsShared(self):", "body": "self._mmComputeTransitionTraces()<EOL>numSequencesForCell = defaultdict(lambda: <NUM_LIT:0>)<EOL>for predictedActiveCells in (<EOL>self._mmData[\"<STR_LIT>\"].values()):<EOL><INDENT>for cell in predictedActiveCells:<EOL><INDENT>numSequencesForCell[cell] += <NUM_LIT:1><EOL><DEDENT><DEDENT>return Metric(self,<EOL>\"<STR_LIT>\",<EOL>numSequencesForCell.values())<EOL>", "docstring": "Metric for number of sequences each predicted => active cell appears in\n\nNote: This metric is flawed when it comes to high-order sequences.\n\n@return (Metric) metric", "id": "f17574:c0:m14"}
{"signature": "def mmGetCellActivityPlot(self, title=\"<STR_LIT>\", showReset=False,<EOL>resetShading=<NUM_LIT>, activityType=\"<STR_LIT>\"):", "body": "if activityType == \"<STR_LIT>\":<EOL><INDENT>self._mmComputeTransitionTraces()<EOL><DEDENT>cellTrace = copy.deepcopy(self._mmTraces[activityType].data)<EOL>for i in xrange(len(cellTrace)):<EOL><INDENT>cellTrace[i] = self.getCellIndices(cellTrace[i])<EOL><DEDENT>return self.mmGetCellTracePlot(cellTrace, self.numberOfCells(),<EOL>activityType, title, showReset,<EOL>resetShading)<EOL>", "docstring": "Returns plot of the cell activity.\n\n@param title        (string)  an optional title for the figure\n\n@param showReset    (bool)    if true, the first set of cell activities\n                              after a reset will have a gray background\n\n@param resetShading (float)   if showReset is true, this float specifies the\n                              intensity of the reset background with 0.0\n                              being white and 1.0 being black\n\n@param activityType (string)  The type of cell activity to display. Valid\n                              types include \"activeCells\",\n                              \"predictiveCells\", \"predictedCells\",\n                              and \"predictedActiveCells\"\n\n@return (Plot) plot", "id": "f17574:c0:m23"}
{"signature": "def mmGetTraceSequenceLabels(self):", "body": "return self._mmTraces[\"<STR_LIT>\"]<EOL>", "docstring": "@return (Trace) Trace of sequence labels", "id": "f17574:c0:m5"}
{"signature": "def mmGetTracePredictiveCells(self):", "body": "return self._mmTraces[\"<STR_LIT>\"]<EOL>", "docstring": "@return (Trace) Trace of predictive cells", "id": "f17574:c0:m2"}
{"signature": "def mmGetMetricFromTrace(self, trace):", "body": "return Metric.createFromTrace(trace.makeCountsTrace(),<EOL>excludeResets=self.mmGetTraceResets())<EOL>", "docstring": "Convenience method to compute a metric over an indices trace, excluding\nresets.\n\n@param (IndicesTrace) Trace of indices\n\n@return (Metric) Metric over trace excluding resets", "id": "f17574:c0:m12"}
{"signature": "def mmGetMetricSequencesPredictedActiveCellsPerColumn(self):", "body": "self._mmComputeTransitionTraces()<EOL>numCellsPerColumn = []<EOL>for predictedActiveCells in (<EOL>self._mmData[\"<STR_LIT>\"].values()):<EOL><INDENT>cellsForColumn = self.mapCellsToColumns(predictedActiveCells)<EOL>numCellsPerColumn += [len(x) for x in cellsForColumn.values()]<EOL><DEDENT>return Metric(self,<EOL>\"<STR_LIT>\",<EOL>numCellsPerColumn)<EOL>", "docstring": "Metric for number of predicted => active cells per column for each sequence\n\n@return (Metric) metric", "id": "f17574:c0:m13"}
{"signature": "def mmGetTraceActiveColumns(self):", "body": "return self._mmTraces[\"<STR_LIT>\"]<EOL>", "docstring": "@return (Trace) Trace of active columns", "id": "f17574:c0:m1"}
{"signature": "def mmGetTracePredictedInactiveCells(self):", "body": "self._mmComputeTransitionTraces()<EOL>return self._mmTraces[\"<STR_LIT>\"]<EOL>", "docstring": "@return (Trace) Trace of predicted => inactive cells", "id": "f17574:c0:m8"}
{"signature": "def mmPrettyPrintConnections(self):", "body": "text = \"<STR_LIT>\"<EOL>text += (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>text += \"<STR_LIT>\"<EOL>columns = range(self.numberOfColumns())<EOL>for column in columns:<EOL><INDENT>cells = self.cellsForColumn(column)<EOL>for cell in cells:<EOL><INDENT>segmentDict = dict()<EOL>for seg in self.connections.segmentsForCell(cell):<EOL><INDENT>synapseList = []<EOL>for synapse in self.connections.synapsesForSegment(seg):<EOL><INDENT>synapseData = self.connections.dataForSynapse(synapse)<EOL>synapseList.append(<EOL>(synapseData.presynapticCell, synapseData.permanence))<EOL><DEDENT>synapseList.sort()<EOL>synapseStringList = [\"<STR_LIT>\".format(sourceCell, permanence) for<EOL>sourceCell, permanence in synapseList]<EOL>segmentDict[seg] = \"<STR_LIT>\".format(\"<STR_LIT:U+0020>\".join(synapseStringList))<EOL><DEDENT>text += (\"<STR_LIT>\".format(<EOL>column, cell,<EOL>len(segmentDict.values()),<EOL>\"<STR_LIT>\".format(\"<STR_LIT:U+002CU+0020>\".join(segmentDict.values()))))<EOL><DEDENT>if column < len(columns) - <NUM_LIT:1>:  <EOL><INDENT>text += \"<STR_LIT:\\n>\"<EOL><DEDENT><DEDENT>text += \"<STR_LIT>\"<EOL>return text<EOL>", "docstring": "Pretty print the connections in the temporal memory.\n\nTODO: Use PrettyTable.\n\n@return (string) Pretty-printed text", "id": "f17574:c0:m15"}
{"signature": "def makeCumCountsTrace(self):", "body": "trace = CountsTrace(self.monitor, \"<STR_LIT>\".format(self.title))<EOL>countsTrace = self.makeCountsTrace()<EOL>def accumulate(iterator):<EOL><INDENT>total = <NUM_LIT:0><EOL>for item in iterator:<EOL><INDENT>total += item<EOL>yield total<EOL><DEDENT><DEDENT>trace.data = list(accumulate(countsTrace.data))<EOL>return trace<EOL>", "docstring": "@return (CountsTrace) A new Trace made up of cumulative counts of this\ntrace's indices.", "id": "f17576:c1:m1"}
{"signature": "@staticmethod<EOL><INDENT>def prettyPrintDatum(datum):<DEDENT>", "body": "return str(datum) if datum is not None else \"<STR_LIT>\"<EOL>", "docstring": "@param datum (object) Datum from `self.data` to pretty-print\n\n@return (string) Pretty-printed datum", "id": "f17576:c0:m2"}
{"signature": "def makeCountsTrace(self):", "body": "trace = CountsTrace(self.monitor, \"<STR_LIT>\".format(self.title))<EOL>trace.data = [len(indices) for indices in self.data]<EOL>return trace<EOL>", "docstring": "@return (CountsTrace) A new Trace made up of counts of this trace's indices.", "id": "f17576:c1:m0"}
{"signature": "def POST(self, name):", "body": "global g_models<EOL>data = json.loads(web.data())<EOL>modelParams = data[\"<STR_LIT>\"]<EOL>predictedFieldName = data[\"<STR_LIT>\"]<EOL>if name in g_models.keys():<EOL><INDENT>raise web.badrequest(\"<STR_LIT>\" % name)<EOL><DEDENT>model = ModelFactory.create(modelParams)<EOL>model.enableInference({'<STR_LIT>': predictedFieldName})<EOL>g_models[name] = model<EOL>return json.dumps({\"<STR_LIT:success>\": name})<EOL>", "docstring": "/models/{name}\n\nschema:\n{\n  \"modelParams\": dict containing model parameters\n  \"predictedFieldName\": str\n}\n\nreturns:\n{\"success\":name}", "id": "f17578:c0:m1"}
{"signature": "def getParticleBest(self, particleId):", "body": "return self._particleBest.get(particleId, (None, None))<EOL>", "docstring": "Return the best score and position for a given particle. The position\n        is given as a dict, with varName:varPosition items in it.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        particleId:    which particle\n        retval:        (bestResult, bestPosition)", "id": "f17579:c0:m14"}
{"signature": "def __init__(self, searchParams, workerID=None, cjDAO=None, jobID=None,<EOL>logLevel=None):", "body": "<EOL>self.logger = logging.getLogger(\"<STR_LIT:.>\".join( ['<STR_LIT>',<EOL>self.__class__.__module__, self.__class__.__name__]))<EOL>if logLevel is not None:<EOL><INDENT>self.logger.setLevel(logLevel)<EOL><DEDENT>random.seed(<NUM_LIT>)<EOL>self._searchParams = searchParams<EOL>self._workerID = workerID<EOL>self._cjDAO = cjDAO<EOL>self._jobID = jobID<EOL>self.logger.info(\"<STR_LIT>\" % (pprint.pformat(<EOL>clippedObj(searchParams))))<EOL>self._createCheckpoints = self._searchParams.get('<STR_LIT>',<EOL>False)<EOL>self._maxModels = self._searchParams.get('<STR_LIT>', None)<EOL>if self._maxModels == -<NUM_LIT:1>:<EOL><INDENT>self._maxModels = None<EOL><DEDENT>self._predictionCacheMaxRecords = self._searchParams.get('<STR_LIT>', None)<EOL>self._speculativeParticles = self._searchParams.get('<STR_LIT>',<EOL>bool(int(Configuration.get(<EOL>'<STR_LIT>'))))<EOL>self._speculativeWaitSecondsMax = float(Configuration.get(<EOL>'<STR_LIT>'))<EOL>self._maxBranching= int(Configuration.get(<EOL>'<STR_LIT>'))<EOL>self._minFieldContribution= float(Configuration.get(<EOL>'<STR_LIT>'))<EOL>self._jobCancelled = False<EOL>if '<STR_LIT>' in self._searchParams:<EOL><INDENT>useTerminators = self._searchParams['<STR_LIT>']<EOL>useTerminators = str(int(useTerminators))<EOL>Configuration.set('<STR_LIT>', useTerminators)<EOL>Configuration.set('<STR_LIT>', useTerminators)<EOL>Configuration.set('<STR_LIT>', useTerminators)<EOL><DEDENT>if '<STR_LIT>' in os.environ:<EOL><INDENT>self._maxModels = int(os.environ['<STR_LIT>'])<EOL><DEDENT>self._dummyModel = self._searchParams.get('<STR_LIT>', None)<EOL>self._tempDir = None<EOL>try:<EOL><INDENT>if '<STR_LIT:description>' in self._searchParams:<EOL><INDENT>if ('<STR_LIT>' in self._searchParams or<EOL>'<STR_LIT>' in self._searchParams or<EOL>'<STR_LIT>' in self._searchParams):<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>searchParamObj = self._searchParams<EOL>anomalyParams = searchParamObj['<STR_LIT:description>'].get('<STR_LIT>',<EOL>dict())<EOL>if anomalyParams is None:<EOL><INDENT>anomalyParams = dict()<EOL><DEDENT>if (('<STR_LIT>' not in anomalyParams) or<EOL>(anomalyParams['<STR_LIT>'] is None)):<EOL><INDENT>streamDef = self._getStreamDef(searchParamObj['<STR_LIT:description>'])<EOL>from nupic.data.stream_reader import StreamReader<EOL>try:<EOL><INDENT>streamReader = StreamReader(streamDef, isBlocking=False,<EOL>maxTimeout=<NUM_LIT:0>, eofOnTimeout=True)<EOL>anomalyParams['<STR_LIT>'] =streamReader.getDataRowCount()<EOL><DEDENT>except Exception:<EOL><INDENT>anomalyParams['<STR_LIT>'] = None<EOL><DEDENT>self._searchParams['<STR_LIT:description>']['<STR_LIT>'] = anomalyParams<EOL><DEDENT>outDir = self._tempDir = tempfile.mkdtemp()<EOL>expGenerator([<EOL>'<STR_LIT>' % (<EOL>json.dumps(self._searchParams['<STR_LIT:description>'])),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>' % (outDir)])<EOL>permutationsScript = os.path.join(outDir, '<STR_LIT>')<EOL><DEDENT>elif '<STR_LIT>' in self._searchParams:<EOL><INDENT>if ('<STR_LIT:description>' in self._searchParams or<EOL>'<STR_LIT>' in self._searchParams or<EOL>'<STR_LIT>' in self._searchParams):<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>permutationsScript = self._searchParams['<STR_LIT>']<EOL><DEDENT>elif '<STR_LIT>' in self._searchParams:<EOL><INDENT>if ('<STR_LIT:description>' in self._searchParams or<EOL>'<STR_LIT>' in self._searchParams):<EOL><INDENT>raise RuntimeError(<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>assert ('<STR_LIT>' in self._searchParams)<EOL>outDir = self._tempDir = tempfile.mkdtemp()<EOL>permutationsScript = os.path.join(outDir, '<STR_LIT>')<EOL>fd = open(permutationsScript, '<STR_LIT:w>')<EOL>fd.write(self._searchParams['<STR_LIT>'])<EOL>fd.close()<EOL>fd = open(os.path.join(outDir, '<STR_LIT>'), '<STR_LIT:w>')<EOL>fd.write(self._searchParams['<STR_LIT>'])<EOL>fd.close()<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError (\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>self._basePath = os.path.dirname(permutationsScript)<EOL>self._baseDescription = open(os.path.join(self._basePath,<EOL>'<STR_LIT>')).read()<EOL>self._baseDescriptionHash = hashlib.md5(self._baseDescription).digest()<EOL>modelDescription, _ = helpers.loadExperiment(self._basePath)<EOL>self._readPermutationsFile(permutationsScript, modelDescription)<EOL>if self._cjDAO is not None:<EOL><INDENT>updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,<EOL>fieldName='<STR_LIT>',<EOL>curValue=None,<EOL>newValue = self._baseDescription)<EOL>if updated:<EOL><INDENT>permContents = open(permutationsScript).read()<EOL>self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,<EOL>fieldName='<STR_LIT>',<EOL>curValue=None,<EOL>newValue = permContents)<EOL><DEDENT><DEDENT>if self._dummyModelParamsFunc is not None:<EOL><INDENT>if self._dummyModel is None:<EOL><INDENT>self._dummyModel = dict()<EOL><DEDENT><DEDENT>if self.logger.getEffectiveLevel() <= logging.DEBUG:<EOL><INDENT>msg = io.StringIO()<EOL>print(\"<STR_LIT>\", file=msg)<EOL>info = dict()<EOL>for key in ['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>info[key] = getattr(self, key)<EOL><DEDENT>print(pprint.pformat(info), file=msg)<EOL>self.logger.debug(msg.getvalue())<EOL>msg.close()<EOL><DEDENT>self._resultsDB = ResultsDB(self)<EOL>self._swarmTerminator = SwarmTerminator()<EOL>self._hsState = None<EOL>self._maxUniqueModelAttempts = int(Configuration.get(<EOL>'<STR_LIT>'))<EOL>self._modelOrphanIntervalSecs = float(Configuration.get(<EOL>'<STR_LIT>'))<EOL>self._maxPctErrModels = float(Configuration.get(<EOL>'<STR_LIT>'))<EOL><DEDENT>except:<EOL><INDENT>if self._tempDir is not None:<EOL><INDENT>shutil.rmtree(self._tempDir)<EOL>self._tempDir = None<EOL><DEDENT>raise<EOL><DEDENT>return<EOL>", "docstring": "Instantiate the HyperseachV2 instance.\n\n        Parameters:\n        ----------------------------------------------------------------------\n        searchParams:   a dict of the job's search parameters. The format is:\n\n          persistentJobGUID:  REQUIRED.\n                              Persistent, globally-unique identifier for this job\n                              for use in constructing persistent model checkpoint\n                              keys. MUST be compatible with S3 key-naming rules, but\n                              MUST NOT contain forward slashes. This GUID is\n                              expected to retain its global uniqueness across\n                              clusters and cluster software updates (unlike the\n                              record IDs in the Engine's jobs table, which recycle\n                              upon table schema change and software update). In the\n                              future, this may also be instrumental for checkpoint\n                              garbage collection.\n\n          permutationsPyFilename:\n                              OPTIONAL - path to permutations.py file\n          permutationsPyContents:\n                              OPTIONAL - JSON encoded string with\n                                          contents of permutations.py file\n          descriptionPyContents:\n                              OPTIONAL - JSON encoded string with\n                                          contents of base description.py file\n          description:        OPTIONAL - JSON description of the search\n          createCheckpoints:  OPTIONAL - Whether to create checkpoints\n          useTerminators      OPTIONAL - True of False (default config.xml). When set\n                                         to False, the model and swarm terminators\n                                         are disabled\n          maxModels:          OPTIONAL - max # of models to generate\n                                        NOTE: This is a deprecated location for this\n                                        setting. Now, it should be specified through\n                                        the maxModels variable within the permutations\n                                        file, or maxModels in the JSON description\n          dummyModel:         OPTIONAL - Either (True/False) or a dict of parameters\n                                         for a dummy model. If this key is absent,\n                                         a real model is trained.\n                                         See utils.py/OPFDummyModel runner for the\n                                         schema of the dummy parameters\n          speculativeParticles OPTIONAL - True or False (default obtained from\n                                         nupic.hypersearch.speculative.particles.default\n                                         configuration property). See note below.\n\n          NOTE: The caller must provide just ONE of the following to describe the\n          hypersearch:\n                1.) permutationsPyFilename\n            OR  2.) permutationsPyContents & permutationsPyContents\n            OR  3.) description\n\n          The schema for the description element can be found at:\n           \"py/nupic/frameworks/opf/expGenerator/experimentDescriptionSchema.json\"\n\n          NOTE about speculativeParticles: If true (not 0), hypersearch workers will\n          go ahead and create and run particles in subsequent sprints and\n          generations before the current generation or sprint has been completed. If\n          false, a worker will wait in a sleep loop until the current generation or\n          sprint has finished before choosing the next particle position or going\n          into the next sprint. When true, the best model can be found faster, but\n          results are less repeatable due to the randomness of when each worker\n          completes each particle. This property can be overridden via the\n          speculativeParticles element of the Hypersearch job params.\n\n\n        workerID:   our unique Hypersearch worker ID\n\n        cjDAO:      ClientJobsDB Data Access Object\n        jobID:      job ID for this hypersearch job\n        logLevel:   override logging level to this value, if not None", "id": "f17579:c1:m0"}
{"signature": "def close(self):", "body": "if self._tempDir is not None and os.path.isdir(self._tempDir):<EOL><INDENT>self.logger.debug(\"<STR_LIT>\", self._tempDir)<EOL>shutil.rmtree(self._tempDir)<EOL>self._tempDir = None<EOL><DEDENT>return<EOL>", "docstring": "Deletes temporary system objects/files.", "id": "f17579:c1:m3"}
{"signature": "def getComplexVariableLabelLookupDict(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Generates a lookup dictionary of permutation variables whose values\n        are too complex for labels, so that artificial labels have to be generated\n        for them.\n\n        Parameters:\n        ---------------------------------------------------------\n        retval:       A look-up dictionary of permutation\n                      variables whose values are too complex for labels, so\n                      artificial labels were generated instead (e.g., \"Choice0\",\n                      \"Choice1\", etc.); the key is the name of the complex variable\n                      and the value is:\n                        dict(labels=<list_of_labels>, values=<list_of_values>).", "id": "f17579:c1:m8"}
{"signature": "def _hsStatePeriodicUpdate(self, exhaustedSwarmId=None):", "body": "if self._hsState is None:<EOL><INDENT>self._hsState =  HsState(self)<EOL><DEDENT>self._hsState.readStateFromDB()<EOL>completedSwarms = set()<EOL>if exhaustedSwarmId is not None:<EOL><INDENT>self.logger.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (exhaustedSwarmId))<EOL>(particles, _, _, _, _) = self._resultsDB.getParticleInfos(<EOL>swarmId=exhaustedSwarmId, matured=False)<EOL>if len(particles) > <NUM_LIT:0>:<EOL><INDENT>exhaustedSwarmStatus = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>exhaustedSwarmStatus = '<STR_LIT>'<EOL><DEDENT><DEDENT>if self._killUselessSwarms:<EOL><INDENT>self._hsState.killUselessSwarms()<EOL><DEDENT>completingSwarms = self._hsState.getCompletingSwarms()<EOL>for swarmId in completingSwarms:<EOL><INDENT>(particles, _, _, _, _) = self._resultsDB.getParticleInfos(<EOL>swarmId=swarmId, matured=False)<EOL>if len(particles) == <NUM_LIT:0>:<EOL><INDENT>completedSwarms.add(swarmId)<EOL><DEDENT><DEDENT>completedSwarmGens = self._resultsDB.getMaturedSwarmGenerations()<EOL>priorCompletedSwarms = self._hsState.getCompletedSwarms()<EOL>for (swarmId, genIdx, errScore) in completedSwarmGens:<EOL><INDENT>if swarmId in priorCompletedSwarms:<EOL><INDENT>continue<EOL><DEDENT>completedList = self._swarmTerminator.recordDataPoint(<EOL>swarmId=swarmId, generation=genIdx, errScore=errScore)<EOL>statusMsg = \"<STR_LIT>\"\"<STR_LIT>\" % (genIdx, swarmId, errScore)<EOL>if len(completedList) > <NUM_LIT:0>:<EOL><INDENT>statusMsg = \"<STR_LIT>\" % (statusMsg, completedList)<EOL><DEDENT>self.logger.info(statusMsg)<EOL>self._cjDAO.jobSetFields (jobID=self._jobID,<EOL>fields=dict(engStatus=statusMsg),<EOL>useConnectionID=False,<EOL>ignoreUnchanged=True)<EOL>if '<STR_LIT>' in os.environ:<EOL><INDENT>while True:<EOL><INDENT>resultsStr = self._cjDAO.jobGetFields(self._jobID, ['<STR_LIT>'])[<NUM_LIT:0>]<EOL>if resultsStr is None:<EOL><INDENT>results = {}<EOL><DEDENT>else:<EOL><INDENT>results = json.loads(resultsStr)<EOL><DEDENT>if not '<STR_LIT>' in results:<EOL><INDENT>results['<STR_LIT>'] = {}<EOL><DEDENT>for swarm in completedList:<EOL><INDENT>if swarm not in results['<STR_LIT>']:<EOL><INDENT>results['<STR_LIT>'][swarm] = (genIdx,<EOL>self._swarmTerminator.swarmScores[swarm])<EOL><DEDENT><DEDENT>newResultsStr = json.dumps(results)<EOL>if newResultsStr == resultsStr:<EOL><INDENT>break<EOL><DEDENT>updated = self._cjDAO.jobSetFieldIfEqual(jobID=self._jobID,<EOL>fieldName='<STR_LIT>',<EOL>curValue=resultsStr,<EOL>newValue = json.dumps(results))<EOL>if updated:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>if len(completedList) > <NUM_LIT:0>:<EOL><INDENT>for name in completedList:<EOL><INDENT>self.logger.info(\"<STR_LIT>\"<EOL>\"<STR_LIT:%s>\" % (name, genIdx, errScore))<EOL><DEDENT>completedSwarms = completedSwarms.union(completedList)<EOL><DEDENT><DEDENT>if len(completedSwarms)==<NUM_LIT:0> and (exhaustedSwarmId is None):<EOL><INDENT>return<EOL><DEDENT>while True:<EOL><INDENT>if exhaustedSwarmId is not None:<EOL><INDENT>self._hsState.setSwarmState(exhaustedSwarmId, exhaustedSwarmStatus)<EOL><DEDENT>for swarmId in completedSwarms:<EOL><INDENT>self._hsState.setSwarmState(swarmId, '<STR_LIT>')<EOL><DEDENT>if not self._hsState.isDirty():<EOL><INDENT>return<EOL><DEDENT>success = self._hsState.writeStateToDB()<EOL>if success:<EOL><INDENT>jobResultsStr = self._cjDAO.jobGetFields(self._jobID, ['<STR_LIT>'])[<NUM_LIT:0>]<EOL>if jobResultsStr is not None:<EOL><INDENT>jobResults = json.loads(jobResultsStr)<EOL>bestModelId = jobResults.get('<STR_LIT>', None)<EOL><DEDENT>else:<EOL><INDENT>bestModelId = None<EOL><DEDENT>for swarmId in list(completedSwarms):<EOL><INDENT>(_, modelIds, _, _, _) = self._resultsDB.getParticleInfos(<EOL>swarmId=swarmId, completed=False)<EOL>if bestModelId in modelIds:<EOL><INDENT>modelIds.remove(bestModelId)<EOL><DEDENT>if len(modelIds) == <NUM_LIT:0>:<EOL><INDENT>continue<EOL><DEDENT>self.logger.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (swarmId,<EOL>str(modelIds)))<EOL>for modelId in modelIds:<EOL><INDENT>self._cjDAO.modelSetFields(modelId,<EOL>dict(engStop=ClientJobsDAO.STOP_REASON_KILLED),<EOL>ignoreUnchanged = True)<EOL><DEDENT><DEDENT>return<EOL><DEDENT>self._hsState.readStateFromDB()<EOL>self.logger.debug(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (pprint.pformat(self._hsState._state, indent=<NUM_LIT:4>)))<EOL><DEDENT>", "docstring": "Periodically, check to see if we should remove a certain field combination\nfrom evaluation (because it is doing so poorly) or move on to the next\nsprint (add in more fields).\n\nThis method is called from _getCandidateParticleAndSwarm(), which is called\nright before we try and create a new model to run.\n\nParameters:\n-----------------------------------------------------------------------\nremoveSwarmId:     If not None, force a change to the current set of active\n                  swarms by removing this swarm. This is used in situations\n                  where we can't find any new unique models to create in\n                  this swarm. In these situations, we update the hypersearch\n                  state regardless of the timestamp of the last time another\n                  worker updated it.", "id": "f17579:c1:m11"}
{"signature": "def runModel(self, modelID, jobID, modelParams, modelParamsHash,<EOL>jobsDAO, modelCheckpointGUID):", "body": "<EOL>if not self._createCheckpoints:<EOL><INDENT>modelCheckpointGUID = None<EOL><DEDENT>self._resultsDB.update(modelID=modelID,<EOL>modelParams=modelParams,<EOL>modelParamsHash=modelParamsHash,<EOL>metricResult = None,<EOL>completed = False,<EOL>completionReason = None,<EOL>matured = False,<EOL>numRecords = <NUM_LIT:0>)<EOL>structuredParams = modelParams['<STR_LIT>']<EOL>if self.logger.getEffectiveLevel() <= logging.DEBUG:<EOL><INDENT>self.logger.debug(\"<STR_LIT>\" %(pprint.pformat(modelParams, indent=<NUM_LIT:4>), modelID))<EOL><DEDENT>cpuTimeStart = time.clock()<EOL>logLevel = self.logger.getEffectiveLevel()<EOL>try:<EOL><INDENT>if self._dummyModel is None or self._dummyModel is False:<EOL><INDENT>(cmpReason, cmpMsg) = runModelGivenBaseAndParams(<EOL>modelID=modelID,<EOL>jobID=jobID,<EOL>baseDescription=self._baseDescription,<EOL>params=structuredParams,<EOL>predictedField=self._predictedField,<EOL>reportKeys=self._reportKeys,<EOL>optimizeKey=self._optimizeKey,<EOL>jobsDAO=jobsDAO,<EOL>modelCheckpointGUID=modelCheckpointGUID,<EOL>logLevel=logLevel,<EOL>predictionCacheMaxRecords=self._predictionCacheMaxRecords)<EOL><DEDENT>else:<EOL><INDENT>dummyParams = dict(self._dummyModel)<EOL>dummyParams['<STR_LIT>'] = structuredParams<EOL>if self._dummyModelParamsFunc is not None:<EOL><INDENT>permInfo = dict(structuredParams)<EOL>permInfo ['<STR_LIT>'] = modelParams['<STR_LIT>']['<STR_LIT>']<EOL>dummyParams.update(self._dummyModelParamsFunc(permInfo))<EOL><DEDENT>(cmpReason, cmpMsg) = runDummyModel(<EOL>modelID=modelID,<EOL>jobID=jobID,<EOL>params=dummyParams,<EOL>predictedField=self._predictedField,<EOL>reportKeys=self._reportKeys,<EOL>optimizeKey=self._optimizeKey,<EOL>jobsDAO=jobsDAO,<EOL>modelCheckpointGUID=modelCheckpointGUID,<EOL>logLevel=logLevel,<EOL>predictionCacheMaxRecords=self._predictionCacheMaxRecords)<EOL><DEDENT>jobsDAO.modelSetCompleted(modelID,<EOL>completionReason = cmpReason,<EOL>completionMsg = cmpMsg,<EOL>cpuTime = time.clock() - cpuTimeStart)<EOL><DEDENT>except InvalidConnectionException as e:<EOL><INDENT>self.logger.warn(\"<STR_LIT:%s>\", e)<EOL><DEDENT>", "docstring": "Run the given model.\n\n        This runs the model described by 'modelParams'. Periodically, it updates\n        the results seen on the model to the model database using the databaseAO\n        (database Access Object) methods.\n\n        Parameters:\n        -------------------------------------------------------------------------\n        modelID:             ID of this model in models table\n\n        jobID:               ID for this hypersearch job in the jobs table\n\n        modelParams:         parameters of this specific model\n                             modelParams is a dictionary containing the name/value\n                             pairs of each variable we are permuting over. Note that\n                             variables within an encoder spec have their name\n                             structure as:\n                               <encoderName>.<encodrVarName>\n\n        modelParamsHash:     hash of modelParamValues\n\n        jobsDAO              jobs data access object - the interface to the jobs\n                              database where model information is stored\n\n        modelCheckpointGUID: A persistent, globally-unique identifier for\n                              constructing the model checkpoint key", "id": "f17579:c1:m17"}
{"signature": "def getExpectedNumModels(self):", "body": "return -<NUM_LIT:1><EOL>", "docstring": "Computes the number of models that are expected to complete as part of\n        this instances's HyperSearch.\n\n        NOTE: This is compute-intensive for HyperSearches with a huge number of\n        combinations.\n\n        NOTE/TODO:  THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the\n                    benefit of perutations_runner.py for use in progress\n                    reporting.\n\n        Parameters:\n        ---------------------------------------------------------\n        retval:       The total number of expected models, if known; -1 if unknown", "id": "f17579:c1:m5"}
{"signature": "def getModelNames(self):", "body": "return None<EOL>", "docstring": "Generates a list of model names that are expected to complete as part of\n        this instances's HyperSearch.\n\n        NOTE: This is compute-intensive for HyperSearches with a huge number of\n        combinations.\n\n        NOTE/TODO:  THIS ONLY WORKS FOR RONOMATIC: This method is exposed for the\n                    benefit of perutations_runner.py.\n\n        Parameters:\n        ---------------------------------------------------------\n        retval:       List of model names for this HypersearchV2 instance, or\n                      None of not applicable", "id": "f17579:c1:m6"}
{"signature": "def numModels(self, swarmId=None, includeHidden=False):", "body": "<EOL>if includeHidden:<EOL><INDENT>if swarmId is None:<EOL><INDENT>return len(self._allResults)<EOL><DEDENT>else:<EOL><INDENT>return len(self._swarmIdToIndexes.get(swarmId, []))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if swarmId is None:<EOL><INDENT>entries = self._allResults<EOL><DEDENT>else:<EOL><INDENT>entries = [self._allResults[entryIdx]<EOL>for entryIdx in self._swarmIdToIndexes.get(swarmId,[])]<EOL><DEDENT>return len([entry for entry in entries if not entry['<STR_LIT>']])<EOL><DEDENT>", "docstring": "Return the total # of models we have in our database (if swarmId is\n        None) or in a specific swarm.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        swarmId:        A string representation of the sorted list of encoders\n                        in this swarm. For example '__address_encoder.__gym_encoder'\n        includeHidden:  If False, this will only return the number of models\n                        that are not hidden (i.e. orphanned, etc.)\n        retval:  numModels", "id": "f17579:c0:m6"}
{"signature": "def getOptimizationMetricInfo(self):", "body": "return (self._optimizeKey, self._maximize)<EOL>", "docstring": "Retrives the optimization key name and optimization function.\n\n        Parameters:\n        ---------------------------------------------------------\n        retval:       (optimizationMetricKey, maximize)\n                      optimizationMetricKey: which report key to optimize for\n                      maximize: True if we should try and maximize the optimizeKey\n                        metric. False if we should minimize it.", "id": "f17579:c1:m9"}
{"signature": "def firstNonFullGeneration(self, swarmId, minNumParticles):", "body": "if not swarmId in self._swarmNumParticlesPerGeneration:<EOL><INDENT>return None<EOL><DEDENT>numPsPerGen = self._swarmNumParticlesPerGeneration[swarmId]<EOL>numPsPerGen = numpy.array(numPsPerGen)<EOL>firstNonFull = numpy.where(numPsPerGen < minNumParticles)[<NUM_LIT:0>]<EOL>if len(firstNonFull) == <NUM_LIT:0>:<EOL><INDENT>return len(numPsPerGen)<EOL><DEDENT>else:<EOL><INDENT>return firstNonFull[<NUM_LIT:0>]<EOL><DEDENT>", "docstring": "Return the generation index of the first generation in the given\n        swarm that does not have numParticles particles in it, either still in the\n        running state or completed. This does not include orphaned particles.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        swarmId:  A string representation of the sorted list of encoders in this\n                     swarm. For example '__address_encoder.__gym_encoder'\n        minNumParticles: minium number of partices required for a full\n                      generation.\n\n        retval:  generation index, or None if no particles at all.", "id": "f17579:c0:m12"}
{"signature": "def _getStreamDef(self, modelDescription):", "body": "<EOL>aggregationPeriod = {<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>}<EOL>aggFunctionsDict = {}<EOL>if '<STR_LIT>' in modelDescription['<STR_LIT>']:<EOL><INDENT>for key in list(aggregationPeriod.keys()):<EOL><INDENT>if key in modelDescription['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>aggregationPeriod[key] = modelDescription['<STR_LIT>']['<STR_LIT>'][key]<EOL><DEDENT><DEDENT>if '<STR_LIT>' in modelDescription['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>for (fieldName, func) in modelDescription['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>aggFunctionsDict[fieldName] = str(func)<EOL><DEDENT><DEDENT><DEDENT>hasAggregation = False<EOL>for v in list(aggregationPeriod.values()):<EOL><INDENT>if v != <NUM_LIT:0>:<EOL><INDENT>hasAggregation = True<EOL>break<EOL><DEDENT><DEDENT>aggFunctionList = list(aggFunctionsDict.items())<EOL>aggregationInfo = dict(aggregationPeriod)<EOL>aggregationInfo['<STR_LIT>'] = aggFunctionList<EOL>streamDef = copy.deepcopy(modelDescription['<STR_LIT>'])<EOL>streamDef['<STR_LIT>'] = copy.deepcopy(aggregationInfo)<EOL>return streamDef<EOL>", "docstring": "Generate stream definition based on", "id": "f17579:c1:m1"}
{"signature": "def __init__(self, hsObj):", "body": "self._hsObj = hsObj<EOL>self._allResults = []<EOL>self._errModels = set()<EOL>self._numErrModels = <NUM_LIT:0><EOL>self._completedModels = set()<EOL>self._numCompletedModels = <NUM_LIT:0><EOL>self._modelIDToIdx = dict()<EOL>self._bestResult = numpy.inf<EOL>self._bestModelID = None<EOL>self._swarmBestOverall = dict()<EOL>self._swarmNumParticlesPerGeneration = dict()<EOL>self._modifiedSwarmGens = set()<EOL>self._maturedSwarmGens = set()<EOL>self._particleBest = dict()<EOL>self._particleLatestGenIdx = dict()<EOL>self._swarmIdToIndexes = dict()<EOL>self._paramsHashToIndexes = dict()<EOL>", "docstring": "Instantiate our results database\n\n        Parameters:\n        --------------------------------------------------------------------\n        hsObj:        Reference to the HypersearchV2 instance", "id": "f17579:c0:m0"}
{"signature": "def getParticleInfos(self, swarmId=None, genIdx=None, completed=None,<EOL>matured=None, lastDescendent=False):", "body": "<EOL>if swarmId is not None:<EOL><INDENT>entryIdxs = self._swarmIdToIndexes.get(swarmId, [])<EOL><DEDENT>else:<EOL><INDENT>entryIdxs = list(range(len(self._allResults)))<EOL><DEDENT>if len(entryIdxs) == <NUM_LIT:0>:<EOL><INDENT>return ([], [], [], [], [])<EOL><DEDENT>particleStates = []<EOL>modelIds = []<EOL>errScores = []<EOL>completedFlags = []<EOL>maturedFlags = []<EOL>for idx in entryIdxs:<EOL><INDENT>entry = self._allResults[idx]<EOL>if swarmId is not None:<EOL><INDENT>assert (not entry['<STR_LIT>'])<EOL><DEDENT>modelParams = entry['<STR_LIT>']<EOL>isCompleted = entry['<STR_LIT>']<EOL>isMatured = entry['<STR_LIT>']<EOL>particleState = modelParams['<STR_LIT>']<EOL>particleGenIdx = particleState['<STR_LIT>']<EOL>particleId = particleState['<STR_LIT:id>']<EOL>if genIdx is not None and particleGenIdx != genIdx:<EOL><INDENT>continue<EOL><DEDENT>if completed is not None and (completed != isCompleted):<EOL><INDENT>continue<EOL><DEDENT>if matured is not None and (matured != isMatured):<EOL><INDENT>continue<EOL><DEDENT>if lastDescendentand (self._particleLatestGenIdx[particleId] != particleGenIdx):<EOL><INDENT>continue<EOL><DEDENT>particleStates.append(particleState)<EOL>modelIds.append(entry['<STR_LIT>'])<EOL>errScores.append(entry['<STR_LIT>'])<EOL>completedFlags.append(isCompleted)<EOL>maturedFlags.append(isMatured)<EOL><DEDENT>return (particleStates, modelIds, errScores, completedFlags, maturedFlags)<EOL>", "docstring": "Return a list of particleStates for all particles we know about in\n        the given swarm, their model Ids, and metric results.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        swarmId:  A string representation of the sorted list of encoders in this\n                     swarm. For example '__address_encoder.__gym_encoder'\n\n        genIdx:  If not None, only return particles at this specific generation\n                      index.\n\n        completed:   If not None, only return particles of the given state (either\n                    completed if 'completed' is True, or running if 'completed'\n                    is false\n\n        matured:   If not None, only return particles of the given state (either\n                    matured if 'matured' is True, or not matured if 'matured'\n                    is false. Note that any model which has completed is also\n                    considered matured.\n\n        lastDescendent: If True, only return particles that are the last descendent,\n                    that is, the highest generation index for a given particle Id\n\n        retval:  (particleStates, modelIds, errScores, completed, matured)\n                  particleStates: list of particleStates\n                  modelIds: list of modelIds\n                  errScores: list of errScores, numpy.inf is plugged in\n                                  if we don't have a result yet\n                  completed: list of completed booleans\n                  matured: list of matured booleans", "id": "f17579:c0:m9"}
{"signature": "def _checkForOrphanedModels (self):", "body": "self.logger.debug(\"<STR_LIT>\" %(self._modelOrphanIntervalSecs))<EOL>while True:<EOL><INDENT>orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,<EOL>self._modelOrphanIntervalSecs)<EOL>if orphanedModelId is None:<EOL><INDENT>return<EOL><DEDENT>self.logger.info(\"<STR_LIT>\" % (orphanedModelId))<EOL>for attempt in range(<NUM_LIT:100>):<EOL><INDENT>paramsHash = hashlib.md5(\"<STR_LIT>\" % (orphanedModelId,<EOL>attempt)).digest()<EOL>particleHash = hashlib.md5(\"<STR_LIT>\" % (orphanedModelId,<EOL>attempt)).digest()<EOL>try:<EOL><INDENT>self._cjDAO.modelSetFields(orphanedModelId,<EOL>dict(engParamsHash=paramsHash,<EOL>engParticleHash=particleHash))<EOL>success = True<EOL><DEDENT>except:<EOL><INDENT>success = False<EOL><DEDENT>if success:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if not success:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>self._cjDAO.modelSetCompleted(modelID=orphanedModelId,<EOL>completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,<EOL>completionMsg=\"<STR_LIT>\")<EOL>self._resultsDB.update(modelID=orphanedModelId,<EOL>modelParams=None,<EOL>modelParamsHash=paramsHash,<EOL>metricResult=None,<EOL>completed = True,<EOL>completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,<EOL>matured = True,<EOL>numRecords = <NUM_LIT:0>)<EOL><DEDENT>", "docstring": "If there are any models that haven't been updated in a while, consider\n        them dead, and mark them as hidden in our resultsDB. We also change the\n        paramsHash and particleHash of orphaned models so that we can\n        re-generate that particle and/or model again if we desire.\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:", "id": "f17579:c1:m10"}
{"signature": "def agitate(self):", "body": "<EOL>self._velocity *= <NUM_LIT> / self._inertia<EOL>maxV = (self.max - self.min)/<NUM_LIT:2><EOL>if self._velocity > maxV:<EOL><INDENT>self._velocity = maxV<EOL><DEDENT>elif self._velocity < -maxV:<EOL><INDENT>self._velocity = -maxV<EOL><DEDENT>if self._position == self.max and self._velocity > <NUM_LIT:0>:<EOL><INDENT>self._velocity *= -<NUM_LIT:1><EOL><DEDENT>if self._position == self.min and self._velocity < <NUM_LIT:0>:<EOL><INDENT>self._velocity *= -<NUM_LIT:1><EOL><DEDENT>", "docstring": "See comments in base class.", "id": "f17581:c1:m5"}
{"signature": "def __repr__(self):", "body": "return (\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>self.min, self.max, self.stepSize, self.getPosition(),<EOL>self._position, self._velocity, self._bestPosition,<EOL>self._bestResult))<EOL>", "docstring": "See comments in base class.", "id": "f17581:c1:m1"}
{"signature": "def newPosition(self, globalBestPosition, rng):", "body": "<EOL>lb=float(Configuration.get(\"<STR_LIT>\"))<EOL>ub=float(Configuration.get(\"<STR_LIT>\"))<EOL>self._velocity = (self._velocity * self._inertia + rng.uniform(lb, ub) *<EOL>self._cogRate * (self._bestPosition - self.getPosition()))<EOL>if globalBestPosition is not None:<EOL><INDENT>self._velocity += rng.uniform(lb, ub) * self._socRate * (<EOL>globalBestPosition - self.getPosition())<EOL><DEDENT>self._position += self._velocity<EOL>self._position = max(self.min, self._position)<EOL>self._position = min(self.max, self._position)<EOL>return self.getPosition()<EOL>", "docstring": "See comments in base class.", "id": "f17581:c1:m6"}
{"signature": "def getDict(self, encoderName, flattenedChosenValues):", "body": "encoder = dict(fieldname=self.fieldName,<EOL>name=self.name)<EOL>for encoderArg, value in self.kwArgs.items():<EOL><INDENT>if isinstance(value, PermuteVariable):<EOL><INDENT>value = flattenedChosenValues[\"<STR_LIT>\" % (encoderName, encoderArg)]<EOL><DEDENT>encoder[encoderArg] = value<EOL><DEDENT>if '<STR_LIT:.>' in self.encoderClass:<EOL><INDENT>(encoder['<STR_LIT:type>'], argName) = self.encoderClass.split('<STR_LIT:.>')<EOL>argValue = (encoder['<STR_LIT:w>'], encoder['<STR_LIT>'])<EOL>encoder[argName] = argValue<EOL>encoder.pop('<STR_LIT:w>')<EOL>encoder.pop('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>encoder['<STR_LIT:type>'] = self.encoderClass<EOL><DEDENT>return encoder<EOL>", "docstring": "Return a dict that can be used to construct this encoder. This dict\n        can be passed directly to the addMultipleEncoders() method of the\n        multi encoder.\n\n        Parameters:\n        ----------------------------------------------------------------------\n        encoderName:            name of the encoder\n        flattenedChosenValues:  dict of the flattened permutation variables. Any\n                                  variables within this dict whose key starts\n                                  with encoderName will be substituted for\n                                  encoder constructor args which are being\n                                  permuted over.", "id": "f17581:c4:m2"}
{"signature": "def getState(self):", "body": "raise NotImplementedError<EOL>", "docstring": "Return the current state of this particle. This is used for\n        communicating our state into a model record entry so that it can be\n        instantiated on another worker.", "id": "f17581:c0:m1"}
{"signature": "def pushAwayFrom(self, otherPositions, rng):", "body": "<EOL>if self.max == self.min:<EOL><INDENT>return<EOL><DEDENT>numPositions = len(otherPositions) * <NUM_LIT:4><EOL>if numPositions == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>stepSize = float(self.max-self.min) / numPositions<EOL>positions = numpy.arange(self.min, self.max + stepSize, stepSize)<EOL>numPositions = len(positions)<EOL>weights = numpy.zeros(numPositions)<EOL>maxDistanceSq = -<NUM_LIT:1> * (stepSize ** <NUM_LIT:2>)<EOL>for pos in otherPositions:<EOL><INDENT>distances = pos - positions<EOL>varWeights = numpy.exp(numpy.power(distances, <NUM_LIT:2>) / maxDistanceSq)<EOL>weights += varWeights<EOL><DEDENT>positionIdx = weights.argmin()<EOL>self._position = positions[positionIdx]<EOL>self._bestPosition = self.getPosition()<EOL>self._velocity *= rng.choice([<NUM_LIT:1>, -<NUM_LIT:1>])<EOL>", "docstring": "See comments in base class.", "id": "f17581:c1:m7"}
{"signature": "def setState(self, state):", "body": "self._positionIdx = self.choices.index(state['<STR_LIT>'])<EOL>self._bestPositionIdx = self.choices.index(state['<STR_LIT>'])<EOL>self._bestResult = state['<STR_LIT>']<EOL>", "docstring": "See comments in base class.", "id": "f17581:c3:m3"}
{"signature": "def setState(self, state):", "body": "raise NotImplementedError<EOL>", "docstring": "Set the current state of this particle. This is counterpart to getState.", "id": "f17581:c0:m2"}
{"signature": "def __init__(self, min, max, stepSize=None, inertia=None, cogRate=None,<EOL>socRate=None):", "body": "super(PermuteFloat, self).__init__()<EOL>self.min = min<EOL>self.max = max<EOL>self.stepSize = stepSize<EOL>self._position = (self.max + self.min) / <NUM_LIT><EOL>self._velocity = (self.max - self.min) / <NUM_LIT><EOL>self._inertia = (float(Configuration.get(\"<STR_LIT>\"))<EOL>if inertia is None else inertia)<EOL>self._cogRate = (float(Configuration.get(\"<STR_LIT>\"))<EOL>if cogRate is None else cogRate)<EOL>self._socRate = (float(Configuration.get(\"<STR_LIT>\"))<EOL>if socRate is None else socRate)<EOL>self._bestPosition = self.getPosition()<EOL>self._bestResult = None<EOL>", "docstring": "Construct a variable that permutes over floating point values using\n        the Particle Swarm Optimization (PSO) algorithm. See descriptions of\n        PSO (i.e. http://en.wikipedia.org/wiki/Particle_swarm_optimization)\n        for references to the inertia, cogRate, and socRate parameters.\n\n        Parameters:\n        -----------------------------------------------------------------------\n        min:          min allowed value of position\n        max:          max allowed value of position\n        stepSize:     if not None, the position must be at min + N * stepSize,\n                        where N is an integer\n        inertia:      The inertia for the particle.\n        cogRate:      This parameter controls how much the particle is affected\n                        by its distance from it's local best position\n        socRate:      This parameter controls how much the particle is affected\n                        by its distance from the global best position", "id": "f17581:c1:m0"}
{"signature": "def __repr__(self):", "body": "return (\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>self.min, self.max, self.stepSize, self.getPosition(),<EOL>self._position, self._velocity, self._bestPosition,<EOL>self._bestResult))<EOL>", "docstring": "See comments in base class.", "id": "f17581:c2:m1"}
{"signature": "def run(self):", "body": "<EOL>self.verbosity = <NUM_LIT:0><EOL>self._testValidPositions(varClass=PermuteFloat, minValue=<NUM_LIT>,<EOL>maxValue=<NUM_LIT>, stepSize=<NUM_LIT:0.5>)<EOL>self._testValidPositions(varClass=PermuteInt, minValue=<NUM_LIT:2>,<EOL>maxValue=<NUM_LIT:11>, stepSize=<NUM_LIT:3>)<EOL>self._testValidPositions(varClass=PermuteInt, minValue=<NUM_LIT:2>,<EOL>maxValue=<NUM_LIT:11>, stepSize=<NUM_LIT:1>)<EOL>self._testConvergence(varClass=PermuteFloat, minValue=<NUM_LIT>,<EOL>maxValue=<NUM_LIT>, targetValue=<NUM_LIT>)<EOL>self._testConvergence(varClass=PermuteFloat, minValue=<NUM_LIT>,<EOL>maxValue=<NUM_LIT>, targetValue=<NUM_LIT>)<EOL>self._testConvergence(varClass=PermuteFloat, minValue=<NUM_LIT>,<EOL>maxValue=<NUM_LIT>, targetValue=<NUM_LIT>)<EOL>self._testConvergence(varClass=PermuteInt, minValue=<NUM_LIT:1>,<EOL>maxValue=<NUM_LIT:20>, targetValue=<NUM_LIT>)<EOL>self._testConvergence(varClass=PermuteInt, minValue=<NUM_LIT:1>,<EOL>maxValue=<NUM_LIT:20>, targetValue=<NUM_LIT:1>)<EOL>self._testChoices()<EOL>", "docstring": "Run unit tests on this module.", "id": "f17581:c5:m3"}
{"signature": "def getPosition(self):", "body": "raise NotImplementedError<EOL>", "docstring": "for int vars, returns position to nearest int\n\n        Parameters:\n        --------------------------------------------------------------\n        retval:     current position", "id": "f17581:c0:m3"}
{"signature": "def newPosition(self, whichVars=None):<EOL>", "body": "<EOL>globalBestPosition = None<EOL>if self._hsObj._speculativeParticles:<EOL><INDENT>genIdx = self.genIdx<EOL><DEDENT>else:<EOL><INDENT>genIdx = self.genIdx - <NUM_LIT:1><EOL><DEDENT>if genIdx >= <NUM_LIT:0>:<EOL><INDENT>(bestModelId, _) = self._resultsDB.bestModelIdAndErrScore(self.swarmId,<EOL>genIdx)<EOL>if bestModelId is not None:<EOL><INDENT>(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(<EOL>bestModelId)<EOL>globalBestPosition = Particle.getPositionFromState(particleState)<EOL><DEDENT><DEDENT>for (varName, var) in self.permuteVars.iteritems():<EOL><INDENT>if whichVars is not None and varName not in whichVars:<EOL><INDENT>continue<EOL><DEDENT>if globalBestPosition is None:<EOL><INDENT>var.newPosition(None, self._rng)<EOL><DEDENT>else:<EOL><INDENT>var.newPosition(globalBestPosition[varName], self._rng)<EOL><DEDENT><DEDENT>position = self.getPosition()<EOL>if self.logger.getEffectiveLevel() <= logging.DEBUG:<EOL><INDENT>msg = StringIO.StringIO()<EOL>print >> msg, \"<STR_LIT>\" % (pprint.pformat(position,<EOL>indent=<NUM_LIT:4>))<EOL>print >> msg, \"<STR_LIT>\"<EOL>for (varName, var) in self.permuteVars.iteritems():<EOL><INDENT>print >> msg, \"<STR_LIT>\" % (varName, str(var))<EOL><DEDENT>self.logger.debug(msg.getvalue())<EOL>msg.close()<EOL><DEDENT>return position<EOL>", "docstring": "Choose a new position based on results obtained so far from all other\n        particles.\n\n        Parameters:\n        --------------------------------------------------------------\n        whichVars:       If not None, only move these variables\n        retval:               new position", "id": "f17585:c0:m9"}
{"signature": "def initStateFrom(self, particleId, particleState, newBest):", "body": "<EOL>if newBest:<EOL><INDENT>(bestResult, bestPosition) = self._resultsDB.getParticleBest(particleId)<EOL><DEDENT>else:<EOL><INDENT>bestResult = bestPosition = None<EOL><DEDENT>varStates = particleState['<STR_LIT>']<EOL>for varName in varStates.keys():<EOL><INDENT>varState = copy.deepcopy(varStates[varName])<EOL>if newBest:<EOL><INDENT>varState['<STR_LIT>'] = bestResult<EOL><DEDENT>if bestPosition is not None:<EOL><INDENT>varState['<STR_LIT>'] = bestPosition[varName]<EOL><DEDENT>self.permuteVars[varName].setState(varState)<EOL><DEDENT>", "docstring": "Init all of our variable positions, velocities, and optionally the best\n        result and best position from the given particle.\n\n        If newBest is true, we get the best result and position for this new\n        generation from the resultsDB, This is used when evoloving a particle\n        because the bestResult and position as stored in was the best AT THE TIME\n        THAT PARTICLE STARTED TO RUN and does not include the best since that\n        particle completed.", "id": "f17585:c0:m3"}
{"signature": "def getPosition(self):", "body": "result = dict()<EOL>for (varName, value) in self.permuteVars.iteritems():<EOL><INDENT>result[varName] = value.getPosition()<EOL><DEDENT>return result<EOL>", "docstring": "Return the position of this particle. This returns a dict() of key\n        value pairs where each key is the name of the flattened permutation\n        variable and the value is its chosen value.\n\n        Parameters:\n        --------------------------------------------------------------\n        retval:     dict() of flattened permutation choices", "id": "f17585:c0:m6"}
{"signature": "def agitate(self):", "body": "for (varName, var) in self.permuteVars.iteritems():<EOL><INDENT>var.agitate()<EOL><DEDENT>self.newPosition()<EOL>", "docstring": "Agitate this particle so that it is likely to go to a new position.\n        Every time agitate is called, the particle is jiggled an even greater\n        amount.\n\n        Parameters:\n        --------------------------------------------------------------\n        retval:               None", "id": "f17585:c0:m8"}
{"signature": "def bestModelInCompletedSprint(self, sprintIdx):", "body": "sprintInfo = self._state['<STR_LIT>'][sprintIdx]<EOL>return (sprintInfo['<STR_LIT>'],<EOL>sprintInfo['<STR_LIT>'])<EOL>", "docstring": "Return the best model ID and it's errScore from the given sprint.\n        If the sprint has not completed yet, the bestModelID will be None.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        retval:   (modelId, errScore)", "id": "f17586:c0:m14"}
{"signature": "def __init__(self, hsObj):", "body": "<EOL>self._hsObj = hsObj<EOL>self.logger = self._hsObj.logger<EOL>self._state = None<EOL>self._priorStateJSON = None<EOL>self._dirty = False<EOL>self.readStateFromDB()<EOL>", "docstring": "Create our state object.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        hsObj:     Reference to the HypersesarchV2 instance\n        cjDAO:     ClientJobsDAO instance\n        logger:    logger to use\n        jobID:     our JobID", "id": "f17586:c0:m0"}
{"signature": "def writeStateToDB(self):", "body": "<EOL>if not self._dirty:<EOL><INDENT>return True<EOL><DEDENT>self._state['<STR_LIT>'] = time.time()<EOL>newStateJSON = json.dumps(self._state)<EOL>success = self._hsObj._cjDAO.jobSetFieldIfEqual(self._hsObj._jobID,<EOL>'<STR_LIT>', str(newStateJSON), str(self._priorStateJSON))<EOL>if success:<EOL><INDENT>self.logger.debug(\"<STR_LIT>\" %(pprint.pformat(self._state, indent=<NUM_LIT:4>)))<EOL>self._priorStateJSON = newStateJSON<EOL><DEDENT>else:<EOL><INDENT>self.logger.debug(\"<STR_LIT>\" %(pprint.pformat(self._state, indent=<NUM_LIT:4>)))<EOL>self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,<EOL>['<STR_LIT>'])[<NUM_LIT:0>]<EOL>self._state =  json.loads(self._priorStateJSON)<EOL>self.logger.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (pprint.pformat(self._state, indent=<NUM_LIT:4>)))<EOL><DEDENT>return success<EOL>", "docstring": "Update the state in the job record with our local changes (if any).\n        If we don't have the latest state in our priorStateJSON, then re-load\n        in the latest state and return False. If we were successful writing out\n        our changes, return True\n\n        Parameters:\n        ---------------------------------------------------------------------\n        retval:    True if we were successful writing out our changes\n                   False if our priorState is not the latest that was in the DB.\n                   In this case, we will re-load our state from the DB", "id": "f17586:c0:m4"}
{"signature": "def setSwarmState(self, swarmId, newStatus):", "body": "assert (newStatus in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'])<EOL>swarmInfo = self._state['<STR_LIT>'][swarmId]<EOL>if swarmInfo['<STR_LIT:status>'] == newStatus:<EOL><INDENT>return<EOL><DEDENT>if swarmInfo['<STR_LIT:status>'] == '<STR_LIT>' and newStatus == '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>self._dirty = True<EOL>swarmInfo['<STR_LIT:status>'] = newStatus<EOL>if newStatus == '<STR_LIT>':<EOL><INDENT>(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)<EOL>swarmInfo['<STR_LIT>'] = modelId<EOL>swarmInfo['<STR_LIT>'] = errScore<EOL><DEDENT>if newStatus != '<STR_LIT>' and swarmId in self._state['<STR_LIT>']:<EOL><INDENT>self._state['<STR_LIT>'].remove(swarmId)<EOL><DEDENT>if newStatus=='<STR_LIT>':<EOL><INDENT>self._hsObj.killSwarmParticles(swarmId)<EOL><DEDENT>sprintIdx = swarmInfo['<STR_LIT>']<EOL>self.isSprintActive(sprintIdx)<EOL>sprintInfo = self._state['<STR_LIT>'][sprintIdx]<EOL>statusCounts = dict(active=<NUM_LIT:0>, completing=<NUM_LIT:0>, completed=<NUM_LIT:0>, killed=<NUM_LIT:0>)<EOL>bestModelIds = []<EOL>bestErrScores = []<EOL>for info in self._state['<STR_LIT>'].values():<EOL><INDENT>if info['<STR_LIT>'] != sprintIdx:<EOL><INDENT>continue<EOL><DEDENT>statusCounts[info['<STR_LIT:status>']] += <NUM_LIT:1><EOL>if info['<STR_LIT:status>'] == '<STR_LIT>':<EOL><INDENT>bestModelIds.append(info['<STR_LIT>'])<EOL>bestErrScores.append(info['<STR_LIT>'])<EOL><DEDENT><DEDENT>if statusCounts['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>sprintStatus = '<STR_LIT>'<EOL><DEDENT>elif statusCounts['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>sprintStatus = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>sprintStatus = '<STR_LIT>'<EOL><DEDENT>sprintInfo['<STR_LIT:status>'] = sprintStatus<EOL>if sprintStatus == '<STR_LIT>':<EOL><INDENT>if len(bestErrScores) > <NUM_LIT:0>:<EOL><INDENT>whichIdx = numpy.array(bestErrScores).argmin()<EOL>sprintInfo['<STR_LIT>'] = bestModelIds[whichIdx]<EOL>sprintInfo['<STR_LIT>'] = bestErrScores[whichIdx]<EOL><DEDENT>else:<EOL><INDENT>sprintInfo['<STR_LIT>'] = <NUM_LIT:0><EOL>sprintInfo['<STR_LIT>'] = numpy.inf<EOL><DEDENT>bestPrior = numpy.inf<EOL>for idx in range(sprintIdx):<EOL><INDENT>if self._state['<STR_LIT>'][idx]['<STR_LIT:status>'] == '<STR_LIT>':<EOL><INDENT>(_, errScore) = self.bestModelInCompletedSprint(idx)<EOL>if errScore is None:<EOL><INDENT>errScore = numpy.inf<EOL><DEDENT><DEDENT>else:<EOL><INDENT>errScore = numpy.inf<EOL><DEDENT>if errScore < bestPrior:<EOL><INDENT>bestPrior = errScore<EOL><DEDENT><DEDENT>if sprintInfo['<STR_LIT>'] >= bestPrior:<EOL><INDENT>self._state['<STR_LIT>'] = sprintIdx-<NUM_LIT:1><EOL><DEDENT>if self._state['<STR_LIT>'] is not Noneand not self.anyGoodSprintsActive():<EOL><INDENT>self._state['<STR_LIT>'] = True<EOL><DEDENT><DEDENT>", "docstring": "Change the given swarm's state to 'newState'. If 'newState' is\n        'completed', then bestModelId and bestErrScore must be provided.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        swarmId:      swarm Id\n        newStatus:    new status, either 'active', 'completing', 'completed', or\n                        'killed'", "id": "f17586:c0:m16"}
{"signature": "def killUselessSwarms(self):", "body": "<EOL>numExistingSprints = len(self._state['<STR_LIT>'])<EOL>if self._hsObj._searchType == HsSearchType.legacyTemporal:<EOL><INDENT>if numExistingSprints <= <NUM_LIT:2>:<EOL><INDENT>return<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if numExistingSprints <= <NUM_LIT:1>:<EOL><INDENT>return<EOL><DEDENT><DEDENT>completedSwarms = self.getCompletedSwarms()<EOL>completedSwarms = [(swarm, self._state[\"<STR_LIT>\"][swarm],<EOL>self._state[\"<STR_LIT>\"][swarm][\"<STR_LIT>\"])for swarm in completedSwarms]<EOL>completedMatrix = [[] for i in range(numExistingSprints)]<EOL>for swarm in completedSwarms:<EOL><INDENT>completedMatrix[swarm[<NUM_LIT:1>][\"<STR_LIT>\"]].append(swarm)<EOL><DEDENT>for sprint in completedMatrix:<EOL><INDENT>sprint.sort(key=itemgetter(<NUM_LIT:2>))<EOL><DEDENT>activeSwarms = self.getActiveSwarms()<EOL>activeSwarms.extend(self.getCompletingSwarms())<EOL>activeSwarms = [(swarm, self._state[\"<STR_LIT>\"][swarm],<EOL>self._state[\"<STR_LIT>\"][swarm][\"<STR_LIT>\"])for swarm in activeSwarms]<EOL>activeMatrix = [[] for i in range(numExistingSprints)]<EOL>for swarm in activeSwarms:<EOL><INDENT>activeMatrix[swarm[<NUM_LIT:1>][\"<STR_LIT>\"]].append(swarm)<EOL><DEDENT>for sprint in activeMatrix:<EOL><INDENT>sprint.sort(key=itemgetter(<NUM_LIT:2>))<EOL><DEDENT>toKill = []<EOL>for i in range(<NUM_LIT:1>, numExistingSprints):<EOL><INDENT>for swarm in activeMatrix[i]:<EOL><INDENT>curSwarmEncoders = swarm[<NUM_LIT:0>].split(\"<STR_LIT:.>\")<EOL>if(len(activeMatrix[i-<NUM_LIT:1>])==<NUM_LIT:0>):<EOL><INDENT>if i==<NUM_LIT:2> and (self._hsObj._tryAll3FieldCombinations orself._hsObj._tryAll3FieldCombinationsWTimestamps):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>bestInPrevious = completedMatrix[i-<NUM_LIT:1>][<NUM_LIT:0>]<EOL>bestEncoders = bestInPrevious[<NUM_LIT:0>].split('<STR_LIT:.>')<EOL>for encoder in bestEncoders:<EOL><INDENT>if not encoder in curSwarmEncoders:<EOL><INDENT>toKill.append(swarm)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if len(toKill) > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" + str(toKill))<EOL><DEDENT>for swarm in toKill:<EOL><INDENT>self.setSwarmState(swarm[<NUM_LIT:0>], \"<STR_LIT>\")<EOL><DEDENT>return<EOL>", "docstring": "See if we can kill off some speculative swarms. If an earlier sprint\n        has finally completed, we can now tell which fields should *really* be present\n        in the sprints we've already started due to speculation, and kill off the\n        swarms that should not have been included.", "id": "f17586:c0:m19"}
{"signature": "def getFieldContributions(self):", "body": "<EOL>if self._hsObj._fixedFields is not None:<EOL><INDENT>return dict(), dict()<EOL><DEDENT>predictedEncoderName = self._hsObj._predictedFieldEncoder<EOL>fieldScores = []<EOL>for swarmId, info in self._state['<STR_LIT>'].items():<EOL><INDENT>encodersUsed = swarmId.split('<STR_LIT:.>')<EOL>if len(encodersUsed) != <NUM_LIT:1>:<EOL><INDENT>continue<EOL><DEDENT>field = self.getEncoderNameFromKey(encodersUsed[<NUM_LIT:0>])<EOL>bestScore = info['<STR_LIT>']<EOL>if bestScore is None:<EOL><INDENT>(_modelId, bestScore) =self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)<EOL><DEDENT>fieldScores.append((bestScore, field))<EOL><DEDENT>if self._hsObj._searchType == HsSearchType.legacyTemporal:<EOL><INDENT>assert(len(fieldScores)==<NUM_LIT:1>)<EOL>(baseErrScore, baseField) = fieldScores[<NUM_LIT:0>]<EOL>for swarmId, info in self._state['<STR_LIT>'].items():<EOL><INDENT>encodersUsed = swarmId.split('<STR_LIT:.>')<EOL>if len(encodersUsed) != <NUM_LIT:2>:<EOL><INDENT>continue<EOL><DEDENT>fields = [self.getEncoderNameFromKey(name) for name in encodersUsed]<EOL>fields.remove(baseField)<EOL>fieldScores.append((info['<STR_LIT>'], fields[<NUM_LIT:0>]))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>fieldScores.sort(reverse=True)<EOL>if self._hsObj._maxBranching > <NUM_LIT:0>and len(fieldScores) > self._hsObj._maxBranching:<EOL><INDENT>baseErrScore = fieldScores[-self._hsObj._maxBranching-<NUM_LIT:1>][<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>baseErrScore = fieldScores[<NUM_LIT:0>][<NUM_LIT:0>]<EOL><DEDENT><DEDENT>pctFieldContributionsDict = dict()<EOL>absFieldContributionsDict = dict()<EOL>if baseErrScore is not None:<EOL><INDENT>if abs(baseErrScore) < <NUM_LIT>:<EOL><INDENT>baseErrScore = <NUM_LIT><EOL><DEDENT>for (errScore, field) in fieldScores:<EOL><INDENT>if errScore is not None:<EOL><INDENT>pctBetter = (baseErrScore - errScore) * <NUM_LIT> / baseErrScore<EOL><DEDENT>else:<EOL><INDENT>pctBetter = <NUM_LIT:0.0><EOL>errScore = baseErrScore   <EOL><DEDENT>pctFieldContributionsDict[field] = pctBetter<EOL>absFieldContributionsDict[field] = baseErrScore - errScore<EOL><DEDENT><DEDENT>self.logger.debug(\"<STR_LIT>\" % (pctFieldContributionsDict))<EOL>return pctFieldContributionsDict, absFieldContributionsDict<EOL>", "docstring": "Return the field contributions statistics.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        retval:   Dictionary where the keys are the field names and the values\n                    are how much each field contributed to the best score.", "id": "f17586:c0:m7"}
{"signature": "def getNonKilledSwarms(self, sprintIdx):", "body": "swarmIds = []<EOL>for swarmId, info in self._state['<STR_LIT>'].items():<EOL><INDENT>if info['<STR_LIT>'] == sprintIdx and info['<STR_LIT:status>'] != '<STR_LIT>':<EOL><INDENT>swarmIds.append(swarmId)<EOL><DEDENT><DEDENT>return swarmIds<EOL>", "docstring": "Return the list of swarms in the given sprint that were not killed.\n        This is called when we are trying to figure out which encoders to carry\n        forward to the next sprint. We don't want to carry forward encoder\n        combintations which were obviously bad (in killed swarms).\n\n        Parameters:\n        ---------------------------------------------------------------------\n        retval:   list of active swarm Ids in the given sprint", "id": "f17586:c0:m10"}
{"signature": "def isSprintCompleted(self, sprintIdx):", "body": "numExistingSprints = len(self._state['<STR_LIT>'])<EOL>if sprintIdx >= numExistingSprints:<EOL><INDENT>return False<EOL><DEDENT>return (self._state['<STR_LIT>'][sprintIdx]['<STR_LIT:status>'] == '<STR_LIT>')<EOL>", "docstring": "Return True if the given sprint has completed.", "id": "f17586:c0:m18"}
{"signature": "def bestModelInSprint(self, sprintIdx):", "body": "<EOL>swarms = self.getAllSwarms(sprintIdx)<EOL>bestModelId = None<EOL>bestErrScore = numpy.inf<EOL>for swarmId in swarms:<EOL><INDENT>(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)<EOL>if errScore < bestErrScore:<EOL><INDENT>bestModelId = modelId<EOL>bestErrScore = errScore<EOL><DEDENT><DEDENT>return (bestModelId, bestErrScore)<EOL>", "docstring": "Return the best model ID and it's errScore from the given sprint,\n        which may still be in progress. This returns the best score from all models\n        in the sprint which have matured so far.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        retval:   (modelId, errScore)", "id": "f17586:c0:m15"}
{"signature": "def getCompletedSwarms(self):", "body": "swarmIds = []<EOL>for swarmId, info in self._state['<STR_LIT>'].items():<EOL><INDENT>if info['<STR_LIT:status>'] == '<STR_LIT>':<EOL><INDENT>swarmIds.append(swarmId)<EOL><DEDENT><DEDENT>return swarmIds<EOL>", "docstring": "Return the list of all completed swarms.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        retval:   list of active swarm Ids", "id": "f17586:c0:m11"}
{"signature": "def getCompletingSwarms(self):", "body": "swarmIds = []<EOL>for swarmId, info in self._state['<STR_LIT>'].items():<EOL><INDENT>if info['<STR_LIT:status>'] == '<STR_LIT>':<EOL><INDENT>swarmIds.append(swarmId)<EOL><DEDENT><DEDENT>return swarmIds<EOL>", "docstring": "Return the list of all completing swarms.\n\n        Parameters:\n        ---------------------------------------------------------------------\n        retval:   list of active swarm Ids", "id": "f17586:c0:m12"}
{"signature": "def critical(self, msg, *args, **kwargs):", "body": "self._baseLogger.critical(self, self.getExtendedMsg(msg), *args, **kwargs)<EOL>", "docstring": "Log 'msg % args' with severity 'CRITICAL'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.critical(\"Houston, we have a %s\", \"major disaster\", exc_info=1)", "id": "f17587:c0:m7"}
{"signature": "def debug(self, msg, *args, **kwargs):", "body": "self._baseLogger.debug(self, self.getExtendedMsg(msg), *args, **kwargs)<EOL>", "docstring": "Log 'msg % args' with severity 'DEBUG'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.debug(\"Houston, we have a %s\", \"thorny problem\", exc_info=1)", "id": "f17587:c0:m3"}
{"signature": "def error(self, msg, *args, **kwargs):", "body": "self._baseLogger.error(self, self.getExtendedMsg(msg), *args, **kwargs)<EOL>", "docstring": "Log 'msg % args' with severity 'ERROR'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.error(\"Houston, we have a %s\", \"major problem\", exc_info=1)", "id": "f17587:c0:m6"}
{"signature": "def log(self, level, msg, *args, **kwargs):", "body": "self._baseLogger.log(self, level, self.getExtendedMsg(msg), *args,<EOL>**kwargs)<EOL>", "docstring": "Log 'msg % args' with the integer severity 'level'.\n\nTo pass exception information, use the keyword argument exc_info with\na true value, e.g.\n\nlogger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)", "id": "f17587:c0:m8"}
{"signature": "@classmethod<EOL><INDENT>def getString(cls, prop):<DEDENT>", "body": "if cls._properties is None:<EOL><INDENT>cls._readStdConfigFiles()<EOL><DEDENT>envValue = os.environ.get(\"<STR_LIT>\" % (cls.envPropPrefix,<EOL>prop.replace('<STR_LIT:.>', '<STR_LIT:_>')), None)<EOL>if envValue is not None:<EOL><INDENT>return envValue<EOL><DEDENT>return cls._properties[prop]<EOL>", "docstring": "Retrieve the requested property as a string. If property does not exist,\n        then KeyError will be raised.\n\n        Parameters:\n        ----------------------------------------------------------------\n        prop:        name of the property\n        retval:      property value as a string", "id": "f17591:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def getConfigPaths(cls):<DEDENT>", "body": "configPaths = []<EOL>if cls._configPaths is not None:<EOL><INDENT>return cls._configPaths<EOL><DEDENT>else:<EOL><INDENT>if '<STR_LIT>' in os.environ:<EOL><INDENT>configVar = os.environ['<STR_LIT>']<EOL>configPaths = configVar.split(os.pathsep)<EOL><DEDENT>return configPaths<EOL><DEDENT>", "docstring": "Return the list of paths to search for configuration files.\n\n        Parameters:\n        ----------------------------------------------------------------\n        retval:    list of paths.", "id": "f17591:c0:m11"}
{"signature": "@classmethod<EOL><INDENT>def dict(cls):<DEDENT>", "body": "if cls._properties is None:<EOL><INDENT>cls._readStdConfigFiles()<EOL><DEDENT>result = dict(cls._properties)<EOL>keys = list(os.environ.keys())<EOL>replaceKeys = [x for x in keys if x.startswith(cls.envPropPrefix)]<EOL>for envKey in replaceKeys:<EOL><INDENT>key = envKey[len(cls.envPropPrefix):]<EOL>key = key.replace('<STR_LIT:_>', '<STR_LIT:.>')<EOL>result[key] = os.environ[envKey]<EOL><DEDENT>return result<EOL>", "docstring": "Return a dict containing all of the configuration properties\n\n        Parameters:\n        ----------------------------------------------------------------\n        retval:      dict containing all configuration properties.", "id": "f17591:c0:m6"}
{"signature": "@classmethod<EOL><INDENT>def clear(cls):<DEDENT>", "body": "cls._properties = None<EOL>cls._configPaths = None<EOL>", "docstring": "Clear out the entire configuration.", "id": "f17591:c0:m9"}
{"signature": "@classmethod<EOL><INDENT>def findConfigFile(cls, filename):<DEDENT>", "body": "paths = cls.getConfigPaths()<EOL>for p in paths:<EOL><INDENT>testPath = os.path.join(p, filename)<EOL>if os.path.isfile(testPath):<EOL><INDENT>return os.path.join(p, filename)<EOL><DEDENT><DEDENT>", "docstring": "Search the configuration path (specified via the NTA_CONF_PATH\n        environment variable) for the given filename. If found, return the complete\n        path to the file.\n\n        Parameters:\n        ----------------------------------------------------------------\n        filename:  name of file to locate", "id": "f17591:c0:m10"}
{"signature": "@classmethod<EOL><INDENT>def setCustomProperty(cls, propertyName, value):<DEDENT>", "body": "cls.setCustomProperties({propertyName: value})<EOL>", "docstring": "Set a single custom setting and persist it to the custom\n        configuration store.\n\n        Parameters:\n        ----------------------------------------------------------------\n        propertyName: string containing the name of the property to get\n        value: value to set the property to", "id": "f17591:c1:m1"}
{"signature": "@classmethod<EOL><INDENT>def setCustomProperties(cls, properties):<DEDENT>", "body": "_getLogger().info(\"<STR_LIT>\",<EOL>properties, traceback.format_stack())<EOL>_CustomConfigurationFileWrapper.edit(properties)<EOL>for propertyName, value in properties.items():<EOL><INDENT>cls.set(propertyName, value)<EOL><DEDENT>", "docstring": "Set multiple custom properties and persist them to the custom\n        configuration store.\n\n        Parameters:\n        ----------------------------------------------------------------\n        properties: a dict of property name/value pairs to set", "id": "f17591:c1:m2"}
{"signature": "@classmethod<EOL><INDENT>def _readStdConfigFiles(cls):<DEDENT>", "body": "<EOL>cls.readConfigFile(DEFAULT_CONFIG)<EOL>cls.readConfigFile(USER_CONFIG)<EOL>", "docstring": "Read in all standard configuration files", "id": "f17591:c0:m13"}
{"signature": "@classmethod<EOL><INDENT>def loadCustomConfig(cls):<DEDENT>", "body": "cls.readConfigFile(_CustomConfigurationFileWrapper.customFileName)<EOL>", "docstring": "Loads custom configuration settings from their persistent storage.\n        DO NOT CALL THIS: It's typically not necessary to call this method\n        directly - see NOTE below.\n\n        NOTE: this method exists *solely* for the benefit of prepare_conf.py, which\n        needs to load configuration files selectively.", "id": "f17591:c1:m5"}
{"signature": "@classmethod<EOL><INDENT>def getPath(cls):<DEDENT>", "body": "if cls._path is None:<EOL><INDENT>cls._setPath()<EOL><DEDENT>return cls._path<EOL>", "docstring": "Get the path of the custom configuration file", "id": "f17591:c2:m4"}
{"signature": "@classmethod<EOL><INDENT>def getCustomDict(cls):<DEDENT>", "body": "if not os.path.exists(cls.getPath()):<EOL><INDENT>return dict()<EOL><DEDENT>properties = Configuration._readConfigFile(os.path.basename(<EOL>cls.getPath()), os.path.dirname(cls.getPath()))<EOL>values = dict()<EOL>for propName in properties:<EOL><INDENT>if '<STR_LIT:value>' in properties[propName]:<EOL><INDENT>values[propName] = properties[propName]['<STR_LIT:value>']<EOL><DEDENT><DEDENT>return values<EOL>", "docstring": "Returns a dict of all temporary values in custom configuration file", "id": "f17591:c2:m1"}
{"signature": "@classmethod<EOL><INDENT>def clear(cls):<DEDENT>", "body": "<EOL>super(Configuration, cls).clear()<EOL>_CustomConfigurationFileWrapper.clear(persistent=False)<EOL>", "docstring": "Clear all configuration properties from in-memory cache, but do NOT\n        alter the custom configuration file. Used in unit-testing.", "id": "f17591:c1:m3"}
{"signature": "def run(self):", "body": "self._logger.debug(\"<STR_LIT>\" % (self._modelID))<EOL>periodic = self._initPeriodicActivities()<EOL>self._optimizedMetricLabel = self._optimizeKeyPattern<EOL>self._reportMetricLabels = [self._optimizeKeyPattern]<EOL>if self._iterations >= <NUM_LIT:0>:<EOL><INDENT>iterTracker = iter(xrange(self._iterations))<EOL><DEDENT>else:<EOL><INDENT>iterTracker = iter(itertools.count())<EOL><DEDENT>doSysExit = False<EOL>if self._sysExitModelRange is not None:<EOL><INDENT>modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)<EOL>modelIDs = [x[<NUM_LIT:0>] for x in modelAndCounters]<EOL>modelIDs.sort()<EOL>(beg,end) = self._sysExitModelRange<EOL>if self._modelID in modelIDs[int(beg):int(end)]:<EOL><INDENT>doSysExit = True<EOL><DEDENT><DEDENT>if self._delayModelRange is not None:<EOL><INDENT>modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)<EOL>modelIDs = [x[<NUM_LIT:0>] for x in modelAndCounters]<EOL>modelIDs.sort()<EOL>(beg,end) = self._delayModelRange<EOL>if self._modelID in modelIDs[int(beg):int(end)]:<EOL><INDENT>time.sleep(<NUM_LIT:10>)<EOL><DEDENT><DEDENT>if self._errModelRange is not None:<EOL><INDENT>modelAndCounters = self._jobsDAO.modelsGetUpdateCounters(self._jobID)<EOL>modelIDs = [x[<NUM_LIT:0>] for x in modelAndCounters]<EOL>modelIDs.sort()<EOL>(beg,end) = self._errModelRange<EOL>if self._modelID in modelIDs[int(beg):int(end)]:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if self._delay is not None:<EOL><INDENT>time.sleep(self._delay)<EOL><DEDENT>self._currentRecordIndex = <NUM_LIT:0><EOL>while True:<EOL><INDENT>if self._isKilled:<EOL><INDENT>break<EOL><DEDENT>if self._isCanceled:<EOL><INDENT>break<EOL><DEDENT>if self._isMature:<EOL><INDENT>if not self._isBestModel:<EOL><INDENT>self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED<EOL>break<EOL><DEDENT>else:<EOL><INDENT>self._cmpReason = self._jobsDAO.CMPL_REASON_EOF<EOL><DEDENT><DEDENT>try:<EOL><INDENT>self._currentRecordIndex = next(iterTracker)<EOL><DEDENT>except StopIteration:<EOL><INDENT>break<EOL><DEDENT>self._writePrediction(ModelResult(None, None, None, None))<EOL>periodic.tick()<EOL>if self.__shouldSysExit(self._currentRecordIndex):<EOL><INDENT>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if self._busyWaitTime is not None:<EOL><INDENT>time.sleep(self._busyWaitTime)<EOL>self.__computeWaitTime()<EOL><DEDENT>if doSysExit:<EOL><INDENT>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>if self._jobFailErr:<EOL><INDENT>raise utils.JobFailException(\"<STR_LIT>\",<EOL>\"<STR_LIT>\")<EOL><DEDENT><DEDENT>if self._doFinalize:<EOL><INDENT>if not self._makeCheckpoint:<EOL><INDENT>self._model = None<EOL><DEDENT>if self._finalDelay is not None:<EOL><INDENT>time.sleep(self._finalDelay)<EOL><DEDENT>self._finalize()<EOL><DEDENT>self._logger.info(\"<STR_LIT>\"% (self._modelID))<EOL>return (self._cmpReason, None)<EOL>", "docstring": "Runs the given OPF task against the given Model instance", "id": "f17592:c0:m4"}
{"signature": "def _computModelDelay(self):", "body": "<EOL>if self._params['<STR_LIT>'] is not Noneand self._params['<STR_LIT>'] is not None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if self._sleepModelRange is not None:<EOL><INDENT>range, delay = self._sleepModelRange.split('<STR_LIT::>')<EOL>delay = float(delay)<EOL>range = map(int, range.split('<STR_LIT:U+002C>'))<EOL>modelIDs = self._jobsDAO.jobGetModelIDs(self._jobID)<EOL>modelIDs.sort()<EOL>range[<NUM_LIT:1>] = min(range[<NUM_LIT:1>], len(modelIDs))<EOL>if self._modelID in modelIDs[range[<NUM_LIT:0>]:range[<NUM_LIT:1>]]:<EOL><INDENT>self._delay = delay<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self._delay = self._params['<STR_LIT>']<EOL><DEDENT>", "docstring": "Computes the amount of time (if any) to delay the run of this model.\n        This can be determined by two mutually exclusive parameters:\n        delay and sleepModelRange.\n\n        'delay' specifies the number of seconds a model should be delayed. If a list\n        is specified, the appropriate amount of delay is determined by using the\n        model's modelIndex property.\n\n        However, this doesn't work when testing orphaned models, because the\n        modelIndex will be the same for every recovery attempt. Therefore, every\n        recovery attempt will also be delayed and potentially orphaned.\n\n        'sleepModelRange' doesn't use the modelIndex property for a model, but rather\n        sees which order the model is in the database, and uses that to determine\n        whether or not a model should be delayed.", "id": "f17592:c0:m2"}
{"signature": "def __init__(self,<EOL>modelID,<EOL>jobID,<EOL>params,<EOL>predictedField,<EOL>reportKeyPatterns,<EOL>optimizeKeyPattern,<EOL>jobsDAO,<EOL>modelCheckpointGUID,<EOL>logLevel=None,<EOL>predictionCacheMaxRecords=None):", "body": "super(OPFDummyModelRunner, self).__init__(modelID=modelID,<EOL>jobID=jobID,<EOL>predictedField=predictedField,<EOL>experimentDir=None,<EOL>reportKeyPatterns=reportKeyPatterns,<EOL>optimizeKeyPattern=optimizeKeyPattern,<EOL>jobsDAO=jobsDAO,<EOL>modelCheckpointGUID=modelCheckpointGUID,<EOL>logLevel=logLevel,<EOL>predictionCacheMaxRecords=None)<EOL>self._predictionCacheMaxRecords = predictionCacheMaxRecords<EOL>self._streamDef = copy.deepcopy(self._DUMMY_STREAMDEF)<EOL>self._params = copy.deepcopy(self._DEFAULT_PARAMS)<EOL>if '<STR_LIT>' in paramsand '<STR_LIT>' in params['<STR_LIT>']:<EOL><INDENT>self.modelIndex=params['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>self.modelIndex = OPFDummyModelRunner.modelIndex<EOL>OPFDummyModelRunner.modelIndex += <NUM_LIT:1><EOL><DEDENT>self._loadDummyModelParameters(params)<EOL>self._logger.debug(\"<STR_LIT>\", self._params)<EOL>self._busyWaitTime = self._params['<STR_LIT>']<EOL>self._iterations = self._params['<STR_LIT>']<EOL>self._doFinalize = self._params['<STR_LIT>']<EOL>self._delay = self._params['<STR_LIT>']<EOL>self._sleepModelRange = self._params['<STR_LIT>']<EOL>self._makeCheckpoint = self._params['<STR_LIT>']<EOL>self._finalDelay = self._params['<STR_LIT>']<EOL>self._exitAfter = self._params['<STR_LIT>']<EOL>self.randomizeWait = self._params['<STR_LIT>']<EOL>if self._busyWaitTime is not None:<EOL><INDENT>self.__computeWaitTime()<EOL><DEDENT>if self._params['<STR_LIT>'] is not Noneand self._params['<STR_LIT>'] is not None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"\"<STR_LIT>\")<EOL><DEDENT>self.metrics = None<EOL>self.metricValue = None<EOL>if self._params['<STR_LIT>'] is not None:<EOL><INDENT>self.metrics = eval(self._params['<STR_LIT>'])<EOL><DEDENT>elif self._params['<STR_LIT>'] is not None:<EOL><INDENT>self.metricValue = float(self._params['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>self.metrics = OPFDummyModelRunner.metrics[<NUM_LIT:0>]<EOL><DEDENT>if self._params['<STR_LIT>'] is not None:<EOL><INDENT>self._model = self.__createModel(self._params['<STR_LIT>'])<EOL>self.__fieldInfo = self._model.getFieldInfo()<EOL><DEDENT>self._sysExitModelRange = self._params['<STR_LIT>']<EOL>if self._sysExitModelRange is not None:<EOL><INDENT>self._sysExitModelRange = [int(x) for x in self._sysExitModelRange.split('<STR_LIT:U+002C>')]<EOL><DEDENT>self._delayModelRange = self._params['<STR_LIT>']<EOL>if self._delayModelRange is not None:<EOL><INDENT>self._delayModelRange = [int(x) for x in self._delayModelRange.split('<STR_LIT:U+002C>')]<EOL><DEDENT>self._errModelRange = self._params['<STR_LIT>']<EOL>if self._errModelRange is not None:<EOL><INDENT>self._errModelRange = [int(x) for x in self._errModelRange.split('<STR_LIT:U+002C>')]<EOL><DEDENT>self._computModelDelay()<EOL>self._jobFailErr = self._params['<STR_LIT>']<EOL>self._logger.debug(\"<STR_LIT>\", self._modelID, self._params)<EOL>", "docstring": "Parameters:\n-------------------------------------------------------------------------\nmodelID:    ID of this model in the models table\n\njobID:\n\nparams:     a dictionary of parameters for this dummy model. The\n            possible keys are:\n\n              delay:          OPTIONAL-This specifies the amount of time\n                              (in seconds) that the experiment should wait\n                              before STARTING to process records. This is\n                              useful for simulating workers that start/end\n                              at different times\n\n              finalDelay:     OPTIONAL-This specifies the amount of time\n                              (in seconds) that the experiment should wait\n                              before it conducts its finalization operations.\n                              These operations include checking if the model\n                              is the best model, and writing out checkpoints.\n\n              waitTime:       OPTIONAL-The amount of time (in seconds)\n                              to wait in a busy loop to simulate\n                              computation time on EACH ITERATION\n\n              randomizeWait:  OPTIONAL-([0.0-1.0] ). Default:None\n                              If set to a value, the above specified\n                              wait time will be randomly be dithered by\n                              +/- <randomizeWait>% of the specfied value.\n                              For example, if randomizeWait=0.2, the wait\n                              time will be dithered by +/- 20% of its value.\n\n              iterations:     OPTIONAL-How many iterations to run the model\n                              for. -1 means run forever (default=1)\n\n              metricFunctions: OPTIONAL-A list of single argument functions\n                               serialized as strings, which return the metric\n                               value given the record number.\n\n                               Mutually exclusive with metricValue\n\n              metricValue:    OPTIONAL-A single value to use for the metric\n                              value (used to debug hypersearch).\n\n                              Mutually exclusive with metricFunctions\n\n              finalize:       OPTIONAL-(True/False). Default:True\n                              When False, this will prevent the model from\n                              recording it's metrics and performing other\n                              functions that it usually performs after the\n                              model has finished running\n\n              permutationParams: A dict containing the instances of all the\n                                  variables being permuted over\n\n              experimentDirectory: REQUIRED-An absolute path to a directory\n                                   with a valid description.py file.\n\n                                   NOTE: This does not actually affect the\n                                   running of the model or the metrics\n                                   produced. It is required to create certain\n                                   objects (such as the output stream)\n\n              makeCheckpoint:     True to actually write a checkpoint out to\n                                  disk (default: False)\n\n              sysExitModelRange: A string containing two integers 'firstIdx,\n                              endIdx'. When present, if we are running the\n                              firstIdx'th model up to but not including the\n                              endIdx'th model, then do a sys.exit() while\n                              running the model. This causes the worker to\n                              exit, simulating an orphaned model.\n\n              delayModelRange: A string containing two integers 'firstIdx,\n                              endIdx'. When present, if we are running the\n                              firstIdx'th model up to but not including the\n                              endIdx'th model, then do a delay of 10 sec.\n                              while running the model. This causes the\n                              worker to run slower and for some other worker\n                              to think the model should be orphaned.\n\n              exitAfter:      The number of iterations after which the model\n                              should perform a sys exit. This is an\n                              alternative way of creating an orphaned model\n                              that use's the dummmy model's modelIndex\n                              instead of the modelID\n\n              errModelRange: A string containing two integers 'firstIdx,\n                              endIdx'. When present, if we are running the\n                              firstIdx'th model up to but not including the\n                              endIdx'th model, then raise an exception while\n                              running the model. This causes the model to\n                              fail with a CMPL_REASON_ERROR reason\n\n              sleepModelRange: A string containing 3 integers 'firstIdx,\n                              endIdx: delay'. When present, if we are running\n                              the firstIdx'th model up to but not including\n                              the endIdx'th model, then sleep for delay\n                              seconds at the beginning of the run.\n\n              jobFailErr: If true, model will raise a JobFailException\n                          which should cause the job to be marked as\n                          failed and immediately cancel all other workers.\n\npredictedField:     Name of the input field for which this model is being\n                    optimized\n\nreportKeyPatterns:  list of items from the results dict to include in\n                    the report. These can be regular expressions.\noptimizeKeyPattern: Which report item, if any, we will be optimizing for.\n                    This can also be a regular expression, but is an error\n                    if it matches more than one key from the experiment's\n                    results.\n\njobsDAO:            Jobs data access object - the interface to the\n                    jobs database which has the model's table.\n\nmodelCheckpointGUID:\n                    A persistent, globally-unique identifier for\n                    constructing the model checkpoint key\n\nlogLevel:           override logging level to this value, if not None\npredictionCacheMaxRecords:\n                    Maximum number of records for the prediction output cache.\n                    Pass None for the default value.", "id": "f17592:c0:m0"}
{"signature": "def _createPredictionLogger(self):", "body": "class DummyLogger:<EOL><INDENT>def writeRecord(self, record): pass<EOL>def writeRecords(self, records, progressCB): pass<EOL>def close(self): pass<EOL><DEDENT>self._predictionLogger = DummyLogger()<EOL>", "docstring": "Creates the model's PredictionLogger object, which is an interface to write\nmodel results to a permanent storage location", "id": "f17592:c0:m7"}
{"signature": "def _finalize(self):", "body": "self._logger.info(<EOL>\"<STR_LIT>\",<EOL>self._modelID, self._currentRecordIndex + <NUM_LIT:1>)<EOL>self._updateModelDBResults()<EOL>if not self._isKilled:<EOL><INDENT>self.__updateJobResults()<EOL><DEDENT>else:<EOL><INDENT>self.__deleteOutputCache(self._modelID)<EOL><DEDENT>if self._predictionLogger:<EOL><INDENT>self._predictionLogger.close()<EOL><DEDENT>if self._inputSource: <EOL><INDENT>self._inputSource.close()<EOL><DEDENT>", "docstring": "Run final activities after a model has run. These include recording and\n        logging the final score", "id": "f17593:c0:m3"}
{"signature": "def __deleteOutputCache(self, modelID):", "body": "<EOL>if modelID == self._modelID and self._predictionLogger is not None:<EOL><INDENT>self._predictionLogger.close()<EOL>del self.__predictionCache<EOL>self._predictionLogger = None<EOL>self.__predictionCache = None<EOL><DEDENT>", "docstring": "Delete's the output cache associated with the given modelID. This actually\nclears up the resources associated with the cache, rather than deleting al\nthe records in the cache\n\nParameters:\n-----------------------------------------------------------------------\nmodelID:      The id of the model whose output cache is being deleted", "id": "f17593:c0:m18"}
{"signature": "def __createModelCheckpoint(self):", "body": "if self._model is None or self._modelCheckpointGUID is None:<EOL><INDENT>return<EOL><DEDENT>if self._predictionLogger is None:<EOL><INDENT>self._createPredictionLogger()<EOL><DEDENT>predictions = io.StringIO()<EOL>self._predictionLogger.checkpoint(<EOL>checkpointSink=predictions,<EOL>maxRows=int(Configuration.get('<STR_LIT>')))<EOL>self._model.save(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))<EOL>self._jobsDAO.modelSetFields(modelID,<EOL>{'<STR_LIT>':str(self._modelCheckpointGUID)},<EOL>ignoreUnchanged=True)<EOL>self._logger.info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\", self._modelID, checkpointID)<EOL>return<EOL>", "docstring": "Create a checkpoint from the current model, and store it in a dir named\n        after checkpoint GUID, and finally store the GUID in the Models DB", "id": "f17593:c0:m4"}
{"signature": "def __updateJobResultsPeriodic(self):", "body": "if self._isBestModelStored and not self._isBestModel:<EOL><INDENT>return<EOL><DEDENT>while True:<EOL><INDENT>jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['<STR_LIT>'])[<NUM_LIT:0>]<EOL>if jobResultsStr is None:<EOL><INDENT>jobResults = {}<EOL><DEDENT>else:<EOL><INDENT>self._isBestModelStored = True<EOL>if not self._isBestModel:<EOL><INDENT>return<EOL><DEDENT>jobResults = json.loads(jobResultsStr)<EOL><DEDENT>bestModel = jobResults.get('<STR_LIT>', None)<EOL>bestMetric = jobResults.get('<STR_LIT>', None)<EOL>isSaved = jobResults.get('<STR_LIT>', False)<EOL>if (bestModel is not None) and (self._modelID != bestModel):<EOL><INDENT>self._isBestModel = False<EOL>return<EOL><DEDENT>self.__flushPredictionCache()<EOL>self._jobsDAO.modelUpdateTimestamp(self._modelID)<EOL>metrics = self._getMetrics()<EOL>jobResults['<STR_LIT>'] = self._modelID<EOL>jobResults['<STR_LIT>'] = metrics[self._optimizedMetricLabel]<EOL>jobResults['<STR_LIT>'] = metrics<EOL>jobResults['<STR_LIT>'] = False<EOL>newResults = json.dumps(jobResults)<EOL>isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,<EOL>fieldName='<STR_LIT>',<EOL>curValue=jobResultsStr,<EOL>newValue=newResults)<EOL>if isUpdated or (not isUpdated and newResults==jobResultsStr):<EOL><INDENT>self._isBestModel = True<EOL>break<EOL><DEDENT><DEDENT>", "docstring": "Periodic check to see if this is the best model. This should only have an\neffect if this is the *first* model to report its progress", "id": "f17593:c0:m12"}
{"signature": "def handleWarningSignal(self, signum, frame):", "body": "self._isInterrupted.set()<EOL>", "docstring": "Handles a \"warning signal\" from the scheduler. This is received when the\nscheduler is about to kill the the current process so that the worker can be\nallocated to another job.\n\nRight now, this function just sets the current model to the \"Orphaned\" state\nin the models table so that another worker can eventually re-run this model\n\nParameters:\n-----------------------------------------------------------------------", "id": "f17593:c0:m22"}
{"signature": "def _getMetricLabels(self):", "body": "return self.__metricMgr.getMetricLabels()<EOL>", "docstring": "Returns:  A list of labels that correspond to metrics being computed", "id": "f17593:c0:m8"}
{"signature": "def __deleteModelCheckpoint(self, modelID):", "body": "checkpointID =self._jobsDAO.modelsGetFields(modelID, ['<STR_LIT>'])[<NUM_LIT:0>]<EOL>if checkpointID is None:<EOL><INDENT>return<EOL><DEDENT>try:<EOL><INDENT>shutil.rmtree(os.path.join(self._experimentDir, str(self._modelCheckpointGUID)))<EOL><DEDENT>except:<EOL><INDENT>self._logger.warn(\"<STR_LIT>\"\"<STR_LIT>\",<EOL>checkpointID)<EOL>return<EOL><DEDENT>self._jobsDAO.modelSetFields(modelID,<EOL>{'<STR_LIT>':None},<EOL>ignoreUnchanged=True)<EOL>return<EOL>", "docstring": "Delete the stored checkpoint for the specified modelID. This function is\ncalled if the current model is now the best model, making the old model's\ncheckpoint obsolete\n\nParameters:\n-----------------------------------------------------------------------\nmodelID:      The modelID for the checkpoint to delete. This is NOT the\n              unique checkpointID", "id": "f17593:c0:m5"}
{"signature": "def __getOptimizedMetricLabel(self):", "body": "matchingKeys = matchPatterns([self._optimizeKeyPattern],<EOL>self._getMetricLabels())<EOL>if len(matchingKeys) == <NUM_LIT:0>:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" %(self._optimizeKeyPattern, self._getMetricLabels()))<EOL><DEDENT>elif len(matchingKeys) > <NUM_LIT:1>:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (self._optimizeKeyPattern, matchingKeys))<EOL><DEDENT>return matchingKeys[<NUM_LIT:0>]<EOL>", "docstring": "Get the label for the metric being optimized. This function also caches\n        the label in the instance variable self._optimizedMetricLabel\n\n        Parameters:\n        -----------------------------------------------------------------------\n        metricLabels:   A sequence of all the labels being computed for this model\n\n        Returns:        The label for the metric being optmized over", "id": "f17593:c0:m7"}
{"signature": "def __flushPredictionCache(self):", "body": "if not self.__predictionCache:<EOL><INDENT>return<EOL><DEDENT>if self._predictionLogger is None:<EOL><INDENT>self._createPredictionLogger()<EOL><DEDENT>startTime = time.time()<EOL>self._predictionLogger.writeRecords(self.__predictionCache,<EOL>progressCB=self.__writeRecordsCallback)<EOL>self._logger.info(\"<STR_LIT>\",<EOL>len(self.__predictionCache), time.time() - startTime)<EOL>self.__predictionCache.clear()<EOL>", "docstring": "Writes the contents of this model's in-memory prediction cache to a permanent\nstore via the prediction output stream instance", "id": "f17593:c0:m17"}
{"signature": "def _createPredictionLogger(self):", "body": "<EOL>self._predictionLogger = BasicPredictionLogger(<EOL>fields=self._model.getFieldInfo(),<EOL>experimentDir=self._experimentDir,<EOL>label = \"<STR_LIT>\",<EOL>inferenceType=self._model.getInferenceType())<EOL>if self.__loggedMetricPatterns:<EOL><INDENT>metricLabels = self.__metricMgr.getMetricLabels()<EOL>loggedMetrics = matchPatterns(self.__loggedMetricPatterns, metricLabels)<EOL>self._predictionLogger.setLoggedMetrics(loggedMetrics)<EOL><DEDENT>", "docstring": "Creates the model's PredictionLogger object, which is an interface to write\nmodel results to a permanent storage location", "id": "f17593:c0:m6"}
{"signature": "def __checkCancelation(self):", "body": "<EOL>print(\"<STR_LIT>\", file=sys.stderr)<EOL>jobCancel = self._jobsDAO.jobGetFields(self._jobID, ['<STR_LIT>'])[<NUM_LIT:0>]<EOL>if jobCancel:<EOL><INDENT>self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED<EOL>self._isCanceled = True<EOL>self._logger.info(\"<STR_LIT>\",<EOL>self._modelID, self._jobID)<EOL><DEDENT>else:<EOL><INDENT>stopReason = self._jobsDAO.modelsGetFields(self._modelID, ['<STR_LIT>'])[<NUM_LIT:0>]<EOL>if stopReason is None:<EOL><INDENT>pass<EOL><DEDENT>elif stopReason == ClientJobsDAO.STOP_REASON_KILLED:<EOL><INDENT>self._cmpReason = ClientJobsDAO.CMPL_REASON_KILLED<EOL>self._isKilled = True<EOL>self._logger.info(\"<STR_LIT>\",<EOL>self._modelID)<EOL><DEDENT>elif stopReason == ClientJobsDAO.STOP_REASON_STOPPED:<EOL><INDENT>self._cmpReason = ClientJobsDAO.CMPL_REASON_STOPPED<EOL>self._isCanceled = True<EOL>self._logger.info(\"<STR_LIT>\", self._modelID)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError (\"<STR_LIT>\" % (stopReason))<EOL><DEDENT><DEDENT>", "docstring": "Check if the cancelation flag has been set for this model\n        in the Model DB", "id": "f17593:c0:m20"}
{"signature": "def run(self):", "body": "<EOL>descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(<EOL>self._experimentDir)<EOL>expIface = helpers.getExperimentDescriptionInterfaceFromModule(<EOL>descriptionPyModule)<EOL>expIface.normalizeStreamSources()<EOL>modelDescription = expIface.getModelDescription()<EOL>self._modelControl = expIface.getModelControl()<EOL>streamDef = self._modelControl['<STR_LIT>']<EOL>from nupic.data.stream_reader import StreamReader<EOL>readTimeout = <NUM_LIT:0><EOL>self._inputSource = StreamReader(streamDef, isBlocking=False,<EOL>maxTimeout=readTimeout)<EOL>fieldStats = self._getFieldStats()<EOL>self._model = ModelFactory.create(modelDescription)<EOL>self._model.setFieldStatistics(fieldStats)<EOL>self._model.enableLearning()<EOL>self._model.enableInference(self._modelControl.get(\"<STR_LIT>\", None))<EOL>self.__metricMgr = MetricsManager(self._modelControl.get('<STR_LIT>',None),<EOL>self._model.getFieldInfo(),<EOL>self._model.getInferenceType())<EOL>self.__loggedMetricPatterns = self._modelControl.get(\"<STR_LIT>\", [])<EOL>self._optimizedMetricLabel = self.__getOptimizedMetricLabel()<EOL>self._reportMetricLabels = matchPatterns(self._reportKeyPatterns,<EOL>self._getMetricLabels())<EOL>self._periodic = self._initPeriodicActivities()<EOL>numIters = self._modelControl.get('<STR_LIT>', -<NUM_LIT:1>)<EOL>learningOffAt = None<EOL>iterationCountInferOnly = self._modelControl.get('<STR_LIT>', <NUM_LIT:0>)<EOL>if iterationCountInferOnly == -<NUM_LIT:1>:<EOL><INDENT>self._model.disableLearning()<EOL><DEDENT>elif iterationCountInferOnly > <NUM_LIT:0>:<EOL><INDENT>assert numIters > iterationCountInferOnly, \"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\"<EOL>learningOffAt = numIters - iterationCountInferOnly<EOL><DEDENT>self.__runTaskMainLoop(numIters, learningOffAt=learningOffAt)<EOL>self._finalize()<EOL>return (self._cmpReason, None)<EOL>", "docstring": "Runs the OPF Model\n\n        Parameters:\n        -------------------------------------------------------------------------\n        retval:  (completionReason, completionMsg)\n                  where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX\n                    equates.", "id": "f17593:c0:m1"}
{"signature": "def __updateJobResults(self):", "body": "isSaved = False<EOL>while True:<EOL><INDENT>self._isBestModel, jobResults, jobResultsStr =self.__checkIfBestCompletedModel()<EOL>if self._isBestModel:<EOL><INDENT>if not isSaved:<EOL><INDENT>self.__flushPredictionCache()<EOL>self._jobsDAO.modelUpdateTimestamp(self._modelID)<EOL>self.__createModelCheckpoint()<EOL>self._jobsDAO.modelUpdateTimestamp(self._modelID)<EOL>isSaved = True<EOL><DEDENT>prevBest = jobResults.get('<STR_LIT>', None)<EOL>prevWasSaved = jobResults.get('<STR_LIT>', False)<EOL>if prevBest == self._modelID:<EOL><INDENT>assert not prevWasSaved<EOL><DEDENT>metrics = self._getMetrics()<EOL>jobResults['<STR_LIT>'] = self._modelID<EOL>jobResults['<STR_LIT>'] = metrics[self._optimizedMetricLabel]<EOL>jobResults['<STR_LIT>'] = metrics<EOL>jobResults['<STR_LIT>'] = True<EOL>isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID,<EOL>fieldName='<STR_LIT>',<EOL>curValue=jobResultsStr,<EOL>newValue=json.dumps(jobResults))<EOL>if isUpdated:<EOL><INDENT>if prevWasSaved:<EOL><INDENT>self.__deleteOutputCache(prevBest)<EOL>self._jobsDAO.modelUpdateTimestamp(self._modelID)<EOL>self.__deleteModelCheckpoint(prevBest)<EOL>self._jobsDAO.modelUpdateTimestamp(self._modelID)<EOL><DEDENT>self._logger.info(\"<STR_LIT>\", self._modelID)<EOL>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.__deleteOutputCache(self._modelID)<EOL>self._jobsDAO.modelUpdateTimestamp(self._modelID)<EOL>self.__deleteModelCheckpoint(self._modelID)<EOL>self._jobsDAO.modelUpdateTimestamp(self._modelID)<EOL>break<EOL><DEDENT><DEDENT>", "docstring": "Check if this is the best model\nIf so:\n  1) Write it's checkpoint\n  2) Record this model as the best\n  3) Delete the previous best's output cache\nOtherwise:\n  1) Delete our output cache", "id": "f17593:c0:m14"}
{"signature": "@staticmethod<EOL><INDENT>def getMaxDelay(inferences):<DEDENT>", "body": "maxDelay = <NUM_LIT:0><EOL>for inferenceElement, inference in inferences.iteritems():<EOL><INDENT>if isinstance(inference, dict):<EOL><INDENT>for key in inference.iterkeys():<EOL><INDENT>maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement,<EOL>key),<EOL>maxDelay)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement),<EOL>maxDelay)<EOL><DEDENT><DEDENT>return maxDelay<EOL>", "docstring": "Returns the maximum delay for the InferenceElements in the inference\ndictionary\n\nParameters:\n-----------------------------------------------------------------------\ninferences:   A dictionary where the keys are InferenceElements", "id": "f17595:c0:m3"}
{"signature": "@staticmethod<EOL><INDENT>def getTemporalDelay(inferenceElement, key=None):<DEDENT>", "body": "<EOL>if inferenceElement in (InferenceElement.prediction,<EOL>InferenceElement.encodings):<EOL><INDENT>return <NUM_LIT:1><EOL><DEDENT>if inferenceElement in (InferenceElement.anomalyScore,<EOL>InferenceElement.anomalyLabel,<EOL>InferenceElement.classification,<EOL>InferenceElement.classConfidences):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if inferenceElement in (InferenceElement.multiStepPredictions,<EOL>InferenceElement.multiStepBestPredictions):<EOL><INDENT>return int(key)<EOL><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Returns the number of records that elapse between when an inference is\n        made and when the corresponding input record will appear. For example, a\n        multistep prediction for 3 timesteps out will have a delay of 3\n\n\n        Parameters:\n        -----------------------------------------------------------------------\n\n        inferenceElement:   The InferenceElement value being delayed\n        key:                If the inference is a dictionary type, this specifies\n                            key for the sub-inference that is being delayed", "id": "f17595:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def isTemporal(inferenceType):<DEDENT>", "body": "if InferenceType.__temporalInferenceTypes is None:<EOL><INDENT>InferenceType.__temporalInferenceTypes =set([InferenceType.TemporalNextStep,<EOL>InferenceType.TemporalClassification,<EOL>InferenceType.TemporalAnomaly,<EOL>InferenceType.TemporalMultiStep,<EOL>InferenceType.NontemporalMultiStep])<EOL><DEDENT>return inferenceType in InferenceType.__temporalInferenceTypes<EOL>", "docstring": "Returns True if the inference type is 'temporal', i.e. requires a\n        temporal memory in the network.", "id": "f17595:c1:m0"}
{"signature": "@staticmethod<EOL><INDENT>def getInputElement(inferenceElement):<DEDENT>", "body": "return InferenceElement.__inferenceInputMap.get(inferenceElement, None)<EOL>", "docstring": "Get the sensor input element that corresponds to the given inference\n        element. This is mainly used for metrics and prediction logging", "id": "f17595:c0:m0"}
{"signature": "def __init__(self,  jobID, jobsDAO, logLevel = None):", "body": "self._jobID = jobID<EOL>self._cjDB = jobsDAO<EOL>self._lastUpdateAttemptTime = <NUM_LIT:0><EOL>initLogging(verbose = True)<EOL>self.logger = logging.getLogger(\"<STR_LIT:.>\".join( ['<STR_LIT>',<EOL>self.__class__.__module__, self.__class__.__name__]))<EOL>if logLevel is not None:<EOL><INDENT>self.logger.setLevel(logLevel)<EOL><DEDENT>self.logger.info(\"<STR_LIT>\" % str(jobID))<EOL>", "docstring": "TODO: Documentation", "id": "f17596:c0:m0"}
{"signature": "def updateResultsForJob(self, forceUpdate=True):", "body": "updateInterval = time.time() - self._lastUpdateAttemptTime<EOL>if updateInterval < self._MIN_UPDATE_INTERVAL and not forceUpdate:<EOL><INDENT>return<EOL><DEDENT>self.logger.info(\"<STR_LIT>\"\"<STR_LIT>\"%(self._jobID,<EOL>time.time(),<EOL>self._lastUpdateAttemptTime))<EOL>timestampUpdated = self._cjDB.jobUpdateSelectionSweep(self._jobID,<EOL>self._MIN_UPDATE_INTERVAL)<EOL>if not timestampUpdated:<EOL><INDENT>self.logger.info(\"<STR_LIT>\"\"<STR_LIT>\"%(self._jobID, self._lastUpdateAttemptTime))<EOL>if not forceUpdate:<EOL><INDENT>return<EOL><DEDENT><DEDENT>self._lastUpdateAttemptTime = time.time()<EOL>self.logger.info(\"<STR_LIT>\"%(self._jobID, self._lastUpdateAttemptTime))<EOL>minUpdateRecords = self._MIN_UPDATE_THRESHOLD<EOL>jobResults = self._getJobResults()<EOL>if forceUpdate or jobResults is None:<EOL><INDENT>minUpdateRecords = <NUM_LIT:0><EOL><DEDENT>candidateIDs, bestMetric = self._cjDB.modelsGetCandidates(self._jobID, minUpdateRecords)<EOL>self.logger.info(\"<STR_LIT>\"%(candidateIDs, bestMetric, self._jobID))<EOL>if len(candidateIDs) == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>self._jobUpdateCandidate(candidateIDs[<NUM_LIT:0>], bestMetric, results=jobResults)<EOL>", "docstring": "Chooses the best model for a given job.\n\n        Parameters\n        -----------------------------------------------------------------------\n        forceUpdate:  (True/False). If True, the update will ignore all the\n                      restrictions on the minimum time to update and the minimum\n                      number of records to update. This should typically only be\n                      set to true if the model has completed running", "id": "f17596:c0:m1"}
{"signature": "def _matchReportKeys(reportKeyREs=[], allReportKeys=[]):", "body": "matchingReportKeys = []<EOL>for keyRE in reportKeyREs:<EOL><INDENT>matchObj = re.compile(keyRE)<EOL>found = False<EOL>for keyName in allReportKeys:<EOL><INDENT>match = matchObj.match(keyName)<EOL>if match and match.end() == len(keyName):<EOL><INDENT>matchingReportKeys.append(keyName)<EOL>found = True<EOL><DEDENT><DEDENT>if not found:<EOL><INDENT>raise _BadKeyError(keyRE)<EOL><DEDENT><DEDENT>return matchingReportKeys<EOL>", "docstring": "Extract all items from the 'allKeys' list whose key matches one of the regular\nexpressions passed in 'reportKeys'.\n\nParameters:\n----------------------------------------------------------------------------\nreportKeyREs:     List of regular expressions\nallReportKeys:    List of all keys\n\nretval:         list of keys from allReportKeys that match the regular expressions\n                  in 'reportKeyREs'\n                If an invalid regular expression was included in 'reportKeys',\n                  then BadKeyError() is raised", "id": "f17597:m4"}
{"signature": "def _getReportItem(itemName, results):", "body": "subKeys = itemName.split('<STR_LIT::>')<EOL>subResults = results<EOL>for subKey in subKeys:<EOL><INDENT>subResults = subResults[subKey]<EOL><DEDENT>return subResults<EOL>", "docstring": "Get a specific item by name out of the results dict.\n\nThe format of itemName is a string of dictionary keys separated by colons,\neach key being one level deeper into the results dict. For example,\n'key1:key2' would fetch results['key1']['key2'].\n\nIf itemName is not found in results, then None is returned", "id": "f17597:m5"}
{"signature": "def loadJsonValueFromFile(inputFilePath):", "body": "with open(inputFilePath) as fileObj:<EOL><INDENT>value = json.load(fileObj)<EOL><DEDENT>return value<EOL>", "docstring": "Loads a json value from a file and converts it to the corresponding python\n    object.\n\n    inputFilePath:\n                    Path of the json file;\n\n    Returns:\n                    python value that represents the loaded json value", "id": "f17597:m17"}
{"signature": "def _quoteAndEscape(string):", "body": "assert type(string) in (str,)<EOL>return pprint.pformat(string)<EOL>", "docstring": "string:   input string (ascii or unicode)\n\nReturns:  a quoted string with characters that are represented in python via\n          escape sequences converted to those escape sequences", "id": "f17597:m7"}
{"signature": "def generatePersistentJobGUID():", "body": "return \"<STR_LIT>\" + str(uuid.uuid1())<EOL>", "docstring": "Generates a \"persistentJobGUID\" value.\n\n    Parameters:\n    ----------------------------------------------------------------------\n    retval:          A persistentJobGUID value", "id": "f17597:m11"}
{"signature": "def filterResults(allResults, reportKeys, optimizeKey=None):", "body": "<EOL>optimizeDict = dict()<EOL>allReportKeys = set()<EOL>_appendReportKeys(keys=allReportKeys, prefix='<STR_LIT>', results=allResults)<EOL>matchingKeys = _matchReportKeys(reportKeys, allReportKeys)<EOL>reportDict = dict()<EOL>for keyName in matchingKeys:<EOL><INDENT>value = _getReportItem(keyName, allResults)<EOL>reportDict[keyName] = value<EOL><DEDENT>if optimizeKey is not None:<EOL><INDENT>matchingKeys = _matchReportKeys([optimizeKey], allReportKeys)<EOL>if len(matchingKeys) == <NUM_LIT:0>:<EOL><INDENT>raise _BadKeyError(optimizeKey)<EOL><DEDENT>elif len(matchingKeys) > <NUM_LIT:1>:<EOL><INDENT>raise _BadOptimizeKeyError(optimizeKey, matchingKeys)<EOL><DEDENT>optimizeKeyFullName = matchingKeys[<NUM_LIT:0>]<EOL>value = _getReportItem(optimizeKeyFullName, allResults)<EOL>optimizeDict[optimizeKeyFullName] = value<EOL>reportDict[optimizeKeyFullName] = value<EOL><DEDENT>return(reportDict, optimizeDict)<EOL>", "docstring": "Given the complete set of results generated by an experiment (passed in\n    'results'), filter out and return only the ones the caller wants, as\n    specified through 'reportKeys' and 'optimizeKey'.\n\n    A report key is a string of key names separated by colons, each key being one\n    level deeper into the experiment results dict. For example, 'key1:key2'.\n\n\n    Parameters:\n    -------------------------------------------------------------------------\n    results:             dict of all results generated by an experiment\n    reportKeys:          list of items from the results dict to include in\n                         the report. These can be regular expressions.\n    optimizeKey:         Which report item, if any, we will be optimizing for. This can\n                         also be a regular expression, but is an error if it matches\n                         more than one key from the experiment's results.\n    retval:  (reportDict, optimizeDict)\n                reportDict: a dictionary of the metrics named by desiredReportKeys\n                optimizeDict: A dictionary containing 1 item: the full name and\n                      value of the metric identified by the optimizeKey", "id": "f17597:m6"}
{"signature": "def _paramsFileHead():", "body": "str = getCopyrightHead() +\"<EOL>his file defines parameters for a prediction experiment.<EOL><INDENT>IMPORTANT!!!<EOL><DEDENT>is params file is dynamically generated by the RunExperimentPermutations<EOL>ript. Any changes made manually will be over-written the next time<EOL>nExperimentPermutations is run!!!<EOL><INDENT>nupic.frameworks.opf.exp_description_helpers import importBaseDescription<EOL><DEDENT>e sub-experiment configuration<EOL>ig ={<EOL>return str<EOL>", "docstring": "This is the first portion of every sub-experiment params file we generate. Between\nthe head and the tail are the experiment specific options.", "id": "f17597:m1"}
{"signature": "def tick(self):", "body": "<EOL>for act in self.__activities:<EOL><INDENT>if not act.iteratorHolder[<NUM_LIT:0>]:<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>next(act.iteratorHolder[<NUM_LIT:0>])<EOL><DEDENT>except StopIteration:<EOL><INDENT>act.cb()<EOL>if act.repeating:<EOL><INDENT>act.iteratorHolder[<NUM_LIT:0>] = iter(range(act.period))<EOL><DEDENT>else:<EOL><INDENT>act.iteratorHolder[<NUM_LIT:0>] = None<EOL><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "Activity tick handler; services all activities\n\n        Returns:      True if controlling iterator says it's okay to keep going;\n                      False to stop", "id": "f17597:c2:m1"}
{"signature": "def __init__(self, nupicJobID):", "body": "super(_HyperSearchJob, self).__init__(nupicJobID)<EOL>self.__expectedNumModels = None<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\nnupicJobID:      Nupic Client JobID of a HyperSearch job\nretval:         nothing", "id": "f17598:c6:m0"}
{"signature": "def getOptimizationMetrics(self):", "body": "return self.__unwrapResults().optimizationMetrics<EOL>", "docstring": "Retrives a dictionary of metrics designagted for optimization\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         a dictionary of optimization metrics that were collected\n                        for the model or an empty dictionary if there aren't any.", "id": "f17598:c9:m10"}
{"signature": "def __init__(self,<EOL>hyperSearchJob,<EOL>metricsKeys,<EOL>searchVar,<EOL>outputDirAbsPath,<EOL>outputLabel,<EOL>replaceReport):", "body": "self.__searchJob = hyperSearchJob<EOL>self.__searchJobID = hyperSearchJob.getJobID()<EOL>self.__sortedMetricsKeys = sorted(metricsKeys)<EOL>self.__outputDirAbsPath = os.path.abspath(outputDirAbsPath)<EOL>self.__outputLabel = outputLabel<EOL>self.__replaceReport = replaceReport<EOL>self.__sortedVariableNames=searchVar<EOL>self.__csvFileObj = None<EOL>self.__reportCSVPath = None<EOL>self.__backupCSVPath = None<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\nhyperSearchJob: _HyperSearchJob instance\nmetricsKeys:    sequence of report metrics key names to include in report\noutputDirAbsPath:\n                Directory for creating report CSV file (absolute path)\noutputLabel:    A string label to incorporate into report CSV file name\nreplaceReport:  True to replace existing report csv, if any; False to\n                append to existing report csv, if any\nretval:         nothing", "id": "f17598:c3:m0"}
{"signature": "def _engineServicesRunning():", "body": "process = subprocess.Popen([\"<STR_LIT>\", \"<STR_LIT>\"], stdout=subprocess.PIPE)<EOL>stdout = process.communicate()[<NUM_LIT:0>]<EOL>result = process.returncode<EOL>if result != <NUM_LIT:0>:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>running = False<EOL>for line in stdout.split(\"<STR_LIT:\\n>\"):<EOL><INDENT>if \"<STR_LIT>\" in line and \"<STR_LIT>\" in line:<EOL><INDENT>running = True<EOL>break<EOL><DEDENT><DEDENT>return running<EOL>", "docstring": "Return true if the engine services are running", "id": "f17598:m5"}
{"signature": "def getEndTime(self):", "body": "assert self.isFinished(), \"<STR_LIT>\" % self<EOL>return \"<STR_LIT:%s>\" % self.__rawInfo.endTime<EOL>", "docstring": "Returns mode evaluation end time.\n\n        NOTE: it's an error to call this method if isFinished() would return False.\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         model evaluation end time", "id": "f17598:c9:m19"}
{"signature": "@classmethod<EOL><INDENT>def loadSavedHyperSearchJob(cls, permWorkDir, outputLabel):<DEDENT>", "body": "jobID = cls.__loadHyperSearchJobID(permWorkDir=permWorkDir,<EOL>outputLabel=outputLabel)<EOL>searchJob = _HyperSearchJob(nupicJobID=jobID)<EOL>return searchJob<EOL>", "docstring": "Instantiates a _HyperSearchJob instance from info saved in file\n\n        Parameters:\n        ----------------------------------------------------------------------\n        permWorkDir: Directory path for saved jobID file\n        outputLabel: Label string for incorporating into file name for saved jobID\n        retval:      _HyperSearchJob instance; raises exception if not found", "id": "f17598:c1:m10"}
{"signature": "def finalize(self):", "body": "if self.__csvFileObj is not None:<EOL><INDENT>self.__csvFileObj.close()<EOL>self.__csvFileObj = None<EOL>print(\"<STR_LIT>\" % (self.__reportCSVPath,))<EOL>if self.__backupCSVPath:<EOL><INDENT>print(\"<STR_LIT>\" %(self.__backupCSVPath,))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Close file and print report/backup csv file paths\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         nothing", "id": "f17598:c3:m2"}
{"signature": "@classmethod<EOL><INDENT>def generateReport(cls,<EOL>options,<EOL>replaceReport,<EOL>hyperSearchJob,<EOL>metricsKeys):<DEDENT>", "body": "<EOL>if hyperSearchJob is None:<EOL><INDENT>hyperSearchJob = cls.loadSavedHyperSearchJob(<EOL>permWorkDir=options[\"<STR_LIT>\"],<EOL>outputLabel=options[\"<STR_LIT>\"])<EOL><DEDENT>modelIDs = hyperSearchJob.queryModelIDs()<EOL>bestModel = None<EOL>metricstmp = set()<EOL>searchVar = set()<EOL>for modelInfo in _iterModels(modelIDs):<EOL><INDENT>if modelInfo.isFinished():<EOL><INDENT>vars = list(modelInfo.getParamLabels().keys())<EOL>searchVar.update(vars)<EOL>metrics = modelInfo.getReportMetrics()<EOL>metricstmp.update(list(metrics.keys()))<EOL><DEDENT><DEDENT>if metricsKeys is None:<EOL><INDENT>metricsKeys = metricstmp<EOL><DEDENT>reportWriter = _ReportCSVWriter(hyperSearchJob=hyperSearchJob,<EOL>metricsKeys=metricsKeys,<EOL>searchVar=searchVar,<EOL>outputDirAbsPath=options[\"<STR_LIT>\"],<EOL>outputLabel=options[\"<STR_LIT>\"],<EOL>replaceReport=replaceReport)<EOL>modelStats = _ModelStats()<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>searchParams = hyperSearchJob.getParams()<EOL>(optimizationMetricKey, maximizeMetric) = (<EOL>_PermutationUtils.getOptimizationMetricInfo(searchParams))<EOL>formatStr = None<EOL>foundMetricsKeySet = set(metricsKeys)<EOL>sortedMetricsKeys = []<EOL>jobInfo = _clientJobsDB().jobInfo(hyperSearchJob.getJobID())<EOL>if jobInfo.cancel == <NUM_LIT:1>:<EOL><INDENT>raise Exception(jobInfo.workerCompletionMsg)<EOL><DEDENT>try:<EOL><INDENT>results = json.loads(jobInfo.results)<EOL><DEDENT>except Exception as e:<EOL><INDENT>print(\"<STR_LIT>\"\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\", jobInfo)<EOL>print(\"<STR_LIT>\", jobInfo.results)<EOL>print(\"<STR_LIT>\", e)<EOL>raise<EOL><DEDENT>bestModelNum = results[\"<STR_LIT>\"]<EOL>bestModelIterIndex = None<EOL>totalWallTime = <NUM_LIT:0><EOL>totalRecords = <NUM_LIT:0><EOL>scoreModelIDDescList = []<EOL>for (i, modelInfo) in enumerate(_iterModels(modelIDs)):<EOL><INDENT>reportWriter.emit(modelInfo)<EOL>totalRecords+=modelInfo.getNumRecords()<EOL>format = \"<STR_LIT>\"<EOL>startTime = modelInfo.getStartTime()<EOL>if modelInfo.isFinished():<EOL><INDENT>endTime = modelInfo.getEndTime()<EOL>st = datetime.strptime(startTime, format)<EOL>et = datetime.strptime(endTime, format)<EOL>totalWallTime+=(et-st).seconds<EOL><DEDENT>modelStats.update(modelInfo)<EOL>expDesc = modelInfo.getModelDescription()<EOL>reportMetrics = modelInfo.getReportMetrics()<EOL>optimizationMetrics = modelInfo.getOptimizationMetrics()<EOL>if modelInfo.getModelID() == bestModelNum:<EOL><INDENT>bestModel = modelInfo<EOL>bestModelIterIndex=i<EOL>bestMetric = list(optimizationMetrics.values())[<NUM_LIT:0>]<EOL><DEDENT>if optimizationMetrics:<EOL><INDENT>assert len(optimizationMetrics) == <NUM_LIT:1>, (<EOL>\"<STR_LIT>\" % (<EOL>len(optimizationMetrics), optimizationMetrics, modelInfo))<EOL><DEDENT>if modelInfo.getCompletionReason().isEOF():<EOL><INDENT>scoreModelIDDescList.append((list(optimizationMetrics.values())[<NUM_LIT:0>],<EOL>modelInfo.getModelID(),<EOL>modelInfo.getGeneratedDescriptionFile(),<EOL>modelInfo.getParamLabels()))<EOL><DEDENT>print(\"<STR_LIT>\" % (i, modelInfo, expDesc))<EOL>if (modelInfo.isFinished() and<EOL>not (modelInfo.getCompletionReason().isStopped or<EOL>modelInfo.getCompletionReason().isEOF())):<EOL><INDENT>print(\"<STR_LIT>\" % modelInfo.getCompletionMsg())<EOL><DEDENT>if reportMetrics:<EOL><INDENT>foundMetricsKeySet.update(iter(reportMetrics.keys()))<EOL>if len(sortedMetricsKeys) != len(foundMetricsKeySet):<EOL><INDENT>sortedMetricsKeys = sorted(foundMetricsKeySet)<EOL>maxKeyLen = max([len(k) for k in sortedMetricsKeys])<EOL>formatStr = \"<STR_LIT>\" % (maxKeyLen+<NUM_LIT:2>)<EOL><DEDENT>for key in sortedMetricsKeys:<EOL><INDENT>if key in reportMetrics:<EOL><INDENT>if key == optimizationMetricKey:<EOL><INDENT>m = \"<STR_LIT>\" % reportMetrics[key]<EOL><DEDENT>else:<EOL><INDENT>m = \"<STR_LIT>\" % reportMetrics[key]<EOL><DEDENT>print(formatStr % (key+\"<STR_LIT::>\"), m)<EOL><DEDENT><DEDENT>print()<EOL><DEDENT><DEDENT>print(\"<STR_LIT>\")<EOL>if len(modelIDs) > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" % (<EOL>len(modelIDs),<EOL>(\"<STR_LIT>\"<EOL>if (modelStats.numCompletedKilled + modelStats.numCompletedEOF) ==<EOL>len(modelIDs)<EOL>else \"<STR_LIT>\" % (<EOL>len(modelIDs) - (<EOL>modelStats.numCompletedKilled + modelStats.numCompletedEOF +<EOL>modelStats.numCompletedStopped)))))<EOL>if modelStats.numStatusOther > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" % (<EOL>modelStats.numStatusOther))<EOL><DEDENT>print(\"<STR_LIT>\" % modelStats.numStatusWaitingToStart)<EOL>print(\"<STR_LIT>\" % modelStats.numStatusRunning)<EOL>print(\"<STR_LIT>\" % modelStats.numStatusCompleted)<EOL>if modelStats.numCompletedOther > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" % (<EOL>modelStats.numCompletedOther))<EOL><DEDENT>print(\"<STR_LIT>\" % modelStats.numCompletedEOF)<EOL>print(\"<STR_LIT>\" % modelStats.numCompletedStopped)<EOL>print(\"<STR_LIT>\" % modelStats.numCompletedOrphaned)<EOL>print(\"<STR_LIT>\" % modelStats.numCompletedKilled)<EOL>print(\"<STR_LIT>\" % modelStats.numCompletedError)<EOL>assert modelStats.numStatusOther == <NUM_LIT:0>, \"<STR_LIT>\" % (<EOL>modelStats.numStatusOther)<EOL>assert modelStats.numCompletedOther == <NUM_LIT:0>, \"<STR_LIT>\" % (<EOL>modelStats.numCompletedOther)<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>print()<EOL>global gCurrentSearch<EOL>jobStatus = hyperSearchJob.getJobStatus(gCurrentSearch._workers)<EOL>jobResults = jobStatus.getResults()<EOL>if \"<STR_LIT>\" in jobResults:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>pprint.pprint(jobResults[\"<STR_LIT>\"], indent=<NUM_LIT:4>)<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>if bestModel is not None:<EOL><INDENT>maxKeyLen = max([len(k) for k in sortedMetricsKeys])<EOL>maxKeyLen = max(maxKeyLen, len(optimizationMetricKey))<EOL>formatStr = \"<STR_LIT>\" % (maxKeyLen+<NUM_LIT:2>)<EOL>bestMetricValue = list(bestModel.getOptimizationMetrics().values())[<NUM_LIT:0>]<EOL>optimizationMetricName = list(bestModel.getOptimizationMetrics().keys())[<NUM_LIT:0>]<EOL>print()<EOL>print(\"<STR_LIT>\" % (<EOL>optimizationMetricName, maximizeMetric))<EOL>print(\"<STR_LIT>\" % (<EOL>bestModelIterIndex, bestModel, bestModel.getModelDescription()))<EOL>print(formatStr % (optimizationMetricName+\"<STR_LIT::>\"), bestMetricValue)<EOL>print()<EOL>print(\"<STR_LIT>\"  % totalRecords)<EOL>print()<EOL>print(\"<STR_LIT>\" % totalWallTime)<EOL>hsJobParams = hyperSearchJob.getParams()<EOL><DEDENT>if options[\"<STR_LIT>\"] > <NUM_LIT:0>:<EOL><INDENT>print(\"<STR_LIT>\" % (<EOL>options[\"<STR_LIT>\"]))<EOL>scoreModelIDDescList.sort()<EOL>scoreModelIDDescList = scoreModelIDDescList[<EOL><NUM_LIT:0>:options[\"<STR_LIT>\"]]<EOL>i = -<NUM_LIT:1><EOL>for (score, modelID, description, paramLabels) in scoreModelIDDescList:<EOL><INDENT>i += <NUM_LIT:1><EOL>outDir = os.path.join(options[\"<STR_LIT>\"], \"<STR_LIT>\" % (i))<EOL>print(\"<STR_LIT>\" %(modelID, outDir))<EOL>if not os.path.exists(outDir):<EOL><INDENT>os.makedirs(outDir)<EOL><DEDENT>base_description_path = os.path.join(options[\"<STR_LIT>\"],<EOL>\"<STR_LIT>\")<EOL>base_description_relpath = os.path.relpath(base_description_path,<EOL>start=outDir)<EOL>description = description.replace(<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\" % base_description_relpath)<EOL>fd = open(os.path.join(outDir, \"<STR_LIT>\"), \"<STR_LIT:wb>\")<EOL>fd.write(description)<EOL>fd.close()<EOL>fd = open(os.path.join(outDir, \"<STR_LIT>\"), \"<STR_LIT:wb>\")<EOL>writer = csv.writer(fd)<EOL>colNames = list(paramLabels.keys())<EOL>colNames.sort()<EOL>writer.writerow(colNames)<EOL>row = [paramLabels[x] for x in colNames]<EOL>writer.writerow(row)<EOL>fd.close()<EOL>print(\"<STR_LIT>\")<EOL>mod = imp.load_source(\"<STR_LIT:description>\", os.path.join(outDir,<EOL>\"<STR_LIT>\"))<EOL>model_description = mod.descriptionInterface.getModelDescription()<EOL>fd = open(os.path.join(outDir, \"<STR_LIT>\"), \"<STR_LIT:wb>\")<EOL>fd.write(\"<STR_LIT>\" % (getCopyrightHead(),<EOL>pprint.pformat(model_description)))<EOL>fd.close()<EOL><DEDENT>print()<EOL><DEDENT>reportWriter.finalize()<EOL>return model_description<EOL>", "docstring": "Prints all available results in the given HyperSearch job and emits\n        model information to the permutations report csv.\n\n        The job may be completed or still in progress.\n\n        Parameters:\n        ----------------------------------------------------------------------\n        options:        NupicRunPermutations options dict\n        replaceReport:  True to replace existing report csv, if any; False to\n                        append to existing report csv, if any\n        hyperSearchJob: _HyperSearchJob instance; if None, will get it from saved\n                        jobID, if any\n        metricsKeys:    sequence of report metrics key names to include in report;\n                        if None, will pre-scan all modelInfos to generate a complete\n                        list of metrics key names.\n        retval:         model parameters", "id": "f17598:c1:m9"}
{"signature": "def _escape(s):", "body": "assert isinstance(s, str),\"<STR_LIT>\" % (type(str), type(s), s)<EOL>s = s.replace(\"<STR_LIT:\\\\>\", \"<STR_LIT>\")<EOL>s = s.replace(\"<STR_LIT:\\n>\", \"<STR_LIT>\")<EOL>s = s.replace(\"<STR_LIT:\\t>\", \"<STR_LIT>\")<EOL>s = s.replace(\"<STR_LIT:U+002C>\", \"<STR_LIT:\\t>\")<EOL>return s<EOL>", "docstring": "Escape commas, tabs, newlines and dashes in a string\n\n    Commas are encoded as tabs", "id": "f17598:m4"}
{"signature": "@classmethod<EOL><INDENT>def getOptimizationMetricInfo(cls, searchJobParams):<DEDENT>", "body": "if searchJobParams[\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>search = HypersearchV2(searchParams=searchJobParams)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" %(searchJobParams[\"<STR_LIT>\"]))<EOL><DEDENT>info = search.getOptimizationMetricInfo()<EOL>return info<EOL>", "docstring": "Retrives the optimization key name and optimization function.\n\n        Parameters:\n        ---------------------------------------------------------\n        searchJobParams:\n                        Parameter for passing as the searchParams arg to\n                        Hypersearch constructor.\n        retval:       (optimizationMetricKey, maximize)\n                      optimizationMetricKey: which report key to optimize for\n                      maximize: True if we should try and maximize the optimizeKey\n                        metric. False if we should minimize it.", "id": "f17598:c8:m0"}
{"signature": "def __init__(self, options):", "body": "self.__cjDAO = _clientJobsDB()<EOL>self._options = options<EOL>self.__searchJob = None<EOL>self.__foundMetrcsKeySet = set()<EOL>self._workers = None<EOL>return<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\noptions:        NupicRunPermutations options dict\nretval:         nothing", "id": "f17598:c1:m0"}
{"signature": "def __unwrapResults(self):", "body": "if self.__cachedResults is None:<EOL><INDENT>if self.__rawInfo.results is not None:<EOL><INDENT>resultList = json.loads(self.__rawInfo.results)<EOL>assert len(resultList) == <NUM_LIT:2>,\"<STR_LIT>\" % (<EOL>len(resultList), resultList)<EOL>self.__cachedResults = self.ModelResults(<EOL>reportMetrics=resultList[<NUM_LIT:0>],<EOL>optimizationMetrics=resultList[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>self.__cachedResults = self.ModelResults(<EOL>reportMetrics={},<EOL>optimizationMetrics={})<EOL><DEDENT><DEDENT>return self.__cachedResults<EOL>", "docstring": "Unwraps self.__rawInfo.results and caches it in self.__cachedResults;\n        Returns the unwrapped params\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         ModelResults namedtuple instance", "id": "f17598:c9:m12"}
{"signature": "def isWaitingToStart(self):", "body": "waiting = (self.__rawInfo.status == self.__nupicModelStatus_notStarted)<EOL>return waiting<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval:       True if the job has not been started yet", "id": "f17598:c9:m13"}
{"signature": "def getParams(self):", "body": "return self.__params<EOL>", "docstring": "Semi-private method for retrieving the job-specific params\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         Job params dict corresponding to the JSON params value\n                        returned by ClientJobsDAO.jobInfo()", "id": "f17598:c4:m4"}
{"signature": "def peekSearchJob(self):", "body": "assert self.__searchJob is not None<EOL>return self.__searchJob<EOL>", "docstring": "Retrieves the runner's _HyperSearchJob instance; NOTE: only available\n        after run().\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         _HyperSearchJob instance or None", "id": "f17598:c1:m6"}
{"signature": "def __startSearch(self):", "body": "<EOL>params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options,<EOL>forRunning=True)<EOL>if self._options[\"<STR_LIT:action>\"] == \"<STR_LIT>\":<EOL><INDENT>args = [sys.argv[<NUM_LIT:0>], \"<STR_LIT>\" % (json.dumps(params))]<EOL>print()<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\")<EOL>jobID = hypersearch_worker.main(args)<EOL><DEDENT>else:<EOL><INDENT>cmdLine = _setUpExports(self._options[\"<STR_LIT>\"])<EOL>cmdLine += \"<STR_LIT>\"<EOL>maxWorkers = self._options[\"<STR_LIT>\"]<EOL>jobID = self.__cjDAO.jobInsert(<EOL>client=\"<STR_LIT>\",<EOL>cmdLine=cmdLine,<EOL>params=json.dumps(params),<EOL>minimumWorkers=<NUM_LIT:1>,<EOL>maximumWorkers=maxWorkers,<EOL>jobType=self.__cjDAO.JOB_TYPE_HS)<EOL>cmdLine = \"<STR_LIT>\"\"<STR_LIT>\" % (jobID)<EOL>self._launchWorkers(cmdLine, maxWorkers)<EOL><DEDENT>searchJob = _HyperSearchJob(jobID)<EOL>self.__saveHyperSearchJobID(<EOL>permWorkDir=self._options[\"<STR_LIT>\"],<EOL>outputLabel=self._options[\"<STR_LIT>\"],<EOL>hyperSearchJob=searchJob)<EOL>if self._options[\"<STR_LIT:action>\"] == \"<STR_LIT>\":<EOL><INDENT>print(\"<STR_LIT>\" % (jobID))<EOL><DEDENT>else:<EOL><INDENT>print(\"<STR_LIT>\" % (jobID))<EOL>_emit(Verbosity.DEBUG,<EOL>\"<STR_LIT>\" % (cmdLine,))<EOL><DEDENT>return searchJob<EOL>", "docstring": "Starts HyperSearch as a worker or runs it inline for the \"dryRun\" action\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         the new _HyperSearchJob instance representing the\n                        HyperSearch job", "id": "f17598:c1:m5"}
{"signature": "def queryModelIDs(self):", "body": "jobID = self.getJobID()<EOL>modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID)<EOL>modelIDs = tuple(x[<NUM_LIT:0>] for x in modelCounterPairs)<EOL>return modelIDs<EOL>", "docstring": "Queuries DB for model IDs of all currently instantiated models\n        associated with this HyperSearch job.\n\n        See also: _iterModels()\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         A sequence of Nupic modelIDs", "id": "f17598:c6:m1"}
{"signature": "def monitorSearchJob(self):", "body": "assert self.__searchJob is not None<EOL>jobID = self.__searchJob.getJobID()<EOL>startTime = time.time()<EOL>lastUpdateTime = datetime.now()<EOL>expectedNumModels = self.__searchJob.getExpectedNumModels(<EOL>searchMethod = self._options[\"<STR_LIT>\"])<EOL>lastNumFinished = <NUM_LIT:0><EOL>finishedModelIDs = set()<EOL>finishedModelStats = _ModelStats()<EOL>lastWorkerState = None<EOL>lastJobResults = None<EOL>lastModelMilestones = None<EOL>lastEngStatus = None<EOL>hyperSearchFinished = False<EOL>while not hyperSearchFinished:<EOL><INDENT>jobInfo = self.__searchJob.getJobStatus(self._workers)<EOL>hyperSearchFinished = jobInfo.isFinished()<EOL>modelIDs = self.__searchJob.queryModelIDs()<EOL>_emit(Verbosity.DEBUG,<EOL>\"<STR_LIT>\" % (<EOL>len(modelIDs), len(finishedModelIDs)))<EOL>if len(modelIDs) > <NUM_LIT:0>:<EOL><INDENT>checkModelIDs = []<EOL>for modelID in modelIDs:<EOL><INDENT>if modelID not in finishedModelIDs:<EOL><INDENT>checkModelIDs.append(modelID)<EOL><DEDENT><DEDENT>del modelIDs<EOL>if checkModelIDs:<EOL><INDENT>_emit(Verbosity.DEBUG,<EOL>\"<STR_LIT>\" % (len(checkModelIDs)))<EOL>errorCompletionMsg = None<EOL>for (i, modelInfo) in enumerate(_iterModels(checkModelIDs)):<EOL><INDENT>_emit(Verbosity.DEBUG,<EOL>\"<STR_LIT>\" % (i, modelInfo))<EOL>if modelInfo.isFinished():<EOL><INDENT>finishedModelIDs.add(modelInfo.getModelID())<EOL>finishedModelStats.update(modelInfo)<EOL>if (modelInfo.getCompletionReason().isError() and<EOL>not errorCompletionMsg):<EOL><INDENT>errorCompletionMsg = modelInfo.getCompletionMsg()<EOL><DEDENT>metrics = modelInfo.getReportMetrics()<EOL>self.__foundMetrcsKeySet.update(list(metrics.keys()))<EOL><DEDENT><DEDENT><DEDENT>numFinished = len(finishedModelIDs)<EOL>if numFinished != lastNumFinished:<EOL><INDENT>lastNumFinished = numFinished<EOL>if expectedNumModels is None:<EOL><INDENT>expModelsStr = \"<STR_LIT>\"<EOL><DEDENT>else:<EOL><INDENT>expModelsStr = \"<STR_LIT>\" % (expectedNumModels)<EOL><DEDENT>stats = finishedModelStats<EOL>print((\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>jobID,<EOL>numFinished,<EOL>expModelsStr,<EOL>(stats.numCompletedEOF+stats.numCompletedStopped),<EOL>\"<STR_LIT>\" if stats.numCompletedEOF else \"<STR_LIT>\",<EOL>stats.numCompletedEOF,<EOL>\"<STR_LIT>\" if stats.numCompletedStopped else \"<STR_LIT>\",<EOL>stats.numCompletedStopped,<EOL>\"<STR_LIT>\" if stats.numCompletedKilled else \"<STR_LIT>\",<EOL>stats.numCompletedKilled,<EOL>\"<STR_LIT>\" if stats.numCompletedError else \"<STR_LIT:error>\",<EOL>stats.numCompletedError,<EOL>\"<STR_LIT>\" if stats.numCompletedError else \"<STR_LIT>\",<EOL>stats.numCompletedOrphaned,<EOL>\"<STR_LIT>\" if stats.numCompletedOther else \"<STR_LIT>\",<EOL>stats.numCompletedOther)))<EOL>if errorCompletionMsg:<EOL><INDENT>print(\"<STR_LIT>\" % errorCompletionMsg)<EOL><DEDENT><DEDENT>workerState = jobInfo.getWorkerState()<EOL>if workerState != lastWorkerState:<EOL><INDENT>print(\"<STR_LIT>\" % (pprint.pformat(workerState,<EOL>indent=<NUM_LIT:4>)))<EOL>lastWorkerState = workerState<EOL><DEDENT>jobResults = jobInfo.getResults()<EOL>if jobResults != lastJobResults:<EOL><INDENT>print(\"<STR_LIT>\"% (pprint.pformat(jobResults, indent=<NUM_LIT:4>), time.time()-startTime))<EOL>lastJobResults = jobResults<EOL><DEDENT>modelMilestones = jobInfo.getModelMilestones()<EOL>if modelMilestones != lastModelMilestones:<EOL><INDENT>print(\"<STR_LIT>\" % (<EOL>pprint.pformat(modelMilestones, indent=<NUM_LIT:4>)))<EOL>lastModelMilestones = modelMilestones<EOL><DEDENT>engStatus = jobInfo.getEngStatus()<EOL>if engStatus != lastEngStatus:<EOL><INDENT>print(\"<STR_LIT>\" % (engStatus))<EOL>lastEngStatus = engStatus<EOL><DEDENT><DEDENT>if not hyperSearchFinished:<EOL><INDENT>if self._options[\"<STR_LIT>\"] != None:<EOL><INDENT>if ((datetime.now() - lastUpdateTime) ><EOL>timedelta(minutes=self._options[\"<STR_LIT>\"])):<EOL><INDENT>print(\"<STR_LIT>\")<EOL>self.__cjDAO.jobCancel(jobID)<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>time.sleep(<NUM_LIT:1>)<EOL><DEDENT><DEDENT>modelIDs = self.__searchJob.queryModelIDs()<EOL>print(\"<STR_LIT>\" % len(modelIDs))<EOL>print(\"<STR_LIT>\")<EOL>jobInfo = self.__searchJob.getJobStatus(self._workers)<EOL>print(\"<STR_LIT>\" % (jobInfo.getWorkerCompletionMsg()))<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval:         nothing", "id": "f17598:c1:m3"}
{"signature": "def __repr__(self):", "body": "return \"<STR_LIT>\" % (self.__class__.__name__, self.__nupicJobID)<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval:         representation of this _NupicJob instance", "id": "f17598:c4:m1"}
{"signature": "def getDiscoveredMetricsKeys(self):", "body": "return tuple(self.__foundMetrcsKeySet)<EOL>", "docstring": "Returns a tuple of all metrics keys discovered while running HyperSearch.\n\n        NOTE: This is an optimization so that our client may\n            use this info for generating the report csv file without having\n            to pre-scan all modelInfos\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         Tuple of metrics keys discovered while running\n                        HyperSearch;", "id": "f17598:c1:m7"}
{"signature": "def getParamLabels(self):", "body": "params = self.__unwrapParams()<EOL>if \"<STR_LIT>\" in params:<EOL><INDENT>retval = dict()<EOL>queue = [(pair, retval) for pair in<EOL>params[\"<STR_LIT>\"][\"<STR_LIT>\"].items()]<EOL>while len(queue) > <NUM_LIT:0>:<EOL><INDENT>pair, output = queue.pop()<EOL>k, v = pair<EOL>if (\"<STR_LIT>\" in v and \"<STR_LIT>\" in v and<EOL>\"<STR_LIT>\" in v):<EOL><INDENT>output[k] = v[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>if k not in output:<EOL><INDENT>output[k] = dict()<EOL><DEDENT>queue.extend((pair, output[k]) for pair in v.items())<EOL><DEDENT><DEDENT>return retval<EOL><DEDENT>", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval:         a dictionary of model parameter labels. For each entry\n                the key is the name of the parameter and the value\n                is the value chosen for it.", "id": "f17598:c9:m7"}
{"signature": "def getNumRecords(self):", "body": "return self.__rawInfo.numRecords<EOL>", "docstring": "Paramets:\n----------------------------------------------------------------------\nretval:         The number of records processed by the model.", "id": "f17598:c9:m6"}
{"signature": "def getStartTime(self):", "body": "assert not self.isWaitingToStart(), \"<STR_LIT>\" % self<EOL>return \"<STR_LIT:%s>\" % self.__rawInfo.startTime<EOL>", "docstring": "Returns model evaluation start time.\n\n        NOTE: it's an error to call this method if isWaitingToStart() would\n        return True.\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         model evaluation start time", "id": "f17598:c9:m18"}
{"signature": "def __repr__(self):", "body": "return (\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (<EOL>\"<STR_LIT>\",<EOL>self.__rawInfo.jobId,<EOL>self.__rawInfo.modelId,<EOL>self.__rawInfo.status,<EOL>self.__rawInfo.completionReason,<EOL>self.__rawInfo.updateCounter,<EOL>self.__rawInfo.numRecords))<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval:         Representation of this _NupicModelInfo instance.", "id": "f17598:c9:m1"}
{"signature": "@classmethod<EOL><INDENT>def printModels(cls, options):<DEDENT>", "body": "print(\"<STR_LIT>\")<EOL>searchParams = _ClientJobUtils.makeSearchJobParamsDict(options=options)<EOL>", "docstring": "Prints a listing of experiments that would take place without\n        actually executing them.\n\n        Parameters:\n        ----------------------------------------------------------------------\n        options:        NupicRunPermutations options dict\n        retval:         nothing", "id": "f17598:c1:m8"}
{"signature": "def getJobStatus(self, workers):", "body": "jobInfo = self.JobStatus(self.__nupicJobID, workers)<EOL>return jobInfo<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\nworkers:  If this job was launched outside of the nupic job engine, then this\n           is an array of subprocess Popen instances, one for each worker\nretval:         _NupicJob.JobStatus instance", "id": "f17598:c4:m2"}
{"signature": "def runWithConfig(swarmConfig, options,<EOL>outDir=None, outputLabel=\"<STR_LIT:default>\",<EOL>permWorkDir=None, verbosity=<NUM_LIT:1>):", "body": "global g_currentVerbosityLevel<EOL>g_currentVerbosityLevel = verbosity<EOL>if outDir is None:<EOL><INDENT>outDir = os.getcwd()<EOL><DEDENT>if permWorkDir is None:<EOL><INDENT>permWorkDir = os.getcwd()<EOL><DEDENT>_checkOverwrite(options, outDir)<EOL>_generateExpFilesFromSwarmDescription(swarmConfig, outDir)<EOL>options[\"<STR_LIT>\"] = swarmConfig<EOL>options[\"<STR_LIT>\"] = outputLabel<EOL>options[\"<STR_LIT>\"] = outDir<EOL>options[\"<STR_LIT>\"] = permWorkDir<EOL>runOptions = _injectDefaultOptions(options)<EOL>_validateOptions(runOptions)<EOL>return _runAction(runOptions)<EOL>", "docstring": "Starts a swarm, given an dictionary configuration.\n@param swarmConfig {dict} A complete [swarm description](http://nupic.docs.numenta.org/0.7.0.dev0/guides/swarming/running.html#the-swarm-description) object.\n@param outDir {string} Optional path to write swarm details (defaults to\n                       current working directory).\n@param outputLabel {string} Optional label for output (defaults to \"default\").\n@param permWorkDir {string} Optional location of working directory (defaults\n                            to current working directory).\n@param verbosity {int} Optional (1,2,3) increasing verbosity of output.\n\n@returns {object} Model parameters", "id": "f17598:m12"}
{"signature": "def _clientJobsDB():", "body": "return cjdao.ClientJobsDAO.get()<EOL>", "docstring": "Returns: The shared cjdao.ClientJobsDAO instance", "id": "f17598:m17"}
{"signature": "def getJobID(self):", "body": "return self.__nupicJobID<EOL>", "docstring": "Semi-private method for retrieving the jobId\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         Nupic Client JobID of this _NupicJob instance", "id": "f17598:c4:m3"}
{"signature": "def __init__(self, rawInfo):", "body": "self.__rawInfo = rawInfo<EOL>self.__cachedResults = None<EOL>assert self.__rawInfo.params is not None<EOL>self.__cachedParams = None<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\nrawInfo:        A single model information element as returned by\n                ClientJobsDAO.modelsInfo()\nretval:         nothing.", "id": "f17598:c9:m0"}
{"signature": "def getGeneratedDescriptionFile(self):", "body": "return self.__rawInfo.genDescription<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------------\nretval:         Contents of the sub-experiment description file for\n                    this model", "id": "f17598:c9:m5"}
{"signature": "def __init__(self, nupicJobID):", "body": "self.__nupicJobID = nupicJobID<EOL>jobInfo = _clientJobsDB().jobInfo(nupicJobID)<EOL>assert jobInfo is not None, \"<STR_LIT>\" % nupicJobID<EOL>assert jobInfo.jobId == nupicJobID, \"<STR_LIT>\" % (jobInfo.jobId, nupicJobID)<EOL>_emit(Verbosity.DEBUG, \"<STR_LIT>\" % pprint.pformat(jobInfo, indent=<NUM_LIT:4>))<EOL>if jobInfo.params is not None:<EOL><INDENT>self.__params = json.loads(jobInfo.params)<EOL><DEDENT>else:<EOL><INDENT>self.__params = None<EOL><DEDENT>", "docstring": "_NupicJob constructor\n\n        Parameters:\n        ----------------------------------------------------------------------\n        retval:         Nupic Client JobID of the job", "id": "f17598:c4:m0"}
{"signature": "def _launchWorkers(self, cmdLine, numWorkers):", "body": "self._workers = []<EOL>for i in range(numWorkers):<EOL><INDENT>stdout = tempfile.NamedTemporaryFile(delete=False)<EOL>stderr = tempfile.NamedTemporaryFile(delete=False)<EOL>p = subprocess.Popen(cmdLine, bufsize=<NUM_LIT:1>, env=os.environ, shell=True,<EOL>stdin=None, stdout=stdout, stderr=stderr)<EOL>p._stderr_file = stderr<EOL>p._stdout_file = stdout<EOL>self._workers.append(p)<EOL><DEDENT>", "docstring": "Launch worker processes to execute the given command line\n\n        Parameters:\n        -----------------------------------------------\n        cmdLine: The command line for each worker\n        numWorkers: number of workers to launch", "id": "f17598:c1:m4"}
{"signature": "def main(argv):", "body": "parser = OptionParser(helpString)<EOL>parser.add_option(\"<STR_LIT>\", action=\"<STR_LIT:store>\", type=\"<STR_LIT:int>\", default=None,<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT>\", action=\"<STR_LIT:store>\", type=\"<STR_LIT:str>\", default=None,<EOL>help=(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"))<EOL>parser.add_option(\"<STR_LIT>\", action=\"<STR_LIT:store>\", type=\"<STR_LIT:str>\", default=None,<EOL>help=(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"))<EOL>parser.add_option(\"<STR_LIT>\", action=\"<STR_LIT:store>\", default=None,<EOL>help=\"<STR_LIT>\"\"<STR_LIT>\"\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT>\", action=\"<STR_LIT:store_true>\", default=False,<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT>\", action=\"<STR_LIT:store_true>\", default=False,<EOL>help=\"<STR_LIT>\")<EOL>parser.add_option(\"<STR_LIT>\", action=\"<STR_LIT:store>\", type=\"<STR_LIT:int>\", default=None,<EOL>help=\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL>(options, args) = parser.parse_args(argv[<NUM_LIT:1>:])<EOL>if len(args) != <NUM_LIT:0>:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" %(args))<EOL><DEDENT>if (options.jobID and options.params):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>if (options.jobID is None and options.params is None):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>initLogging(verbose=True)<EOL>hst = HypersearchWorker(options, argv[<NUM_LIT:1>:])<EOL>if options.params is None:<EOL><INDENT>try:<EOL><INDENT>jobID = hst.run()<EOL><DEDENT>except Exception as e:<EOL><INDENT>jobID = options.jobID<EOL>msg = io.StringIO()<EOL>print(\"<STR_LIT>\" %(ErrorCodes.hypersearchLogicErr, e), file=msg)<EOL>traceback.print_exc(None, msg)<EOL>completionReason = ClientJobsDAO.CMPL_REASON_ERROR<EOL>completionMsg = msg.getvalue()<EOL>hst.logger.error(completionMsg)<EOL>jobsDAO = ClientJobsDAO.get()<EOL>workerCmpReason = jobsDAO.jobGetFields(options.jobID,<EOL>['<STR_LIT>'])[<NUM_LIT:0>]<EOL>if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:<EOL><INDENT>jobsDAO.jobSetFields(options.jobID, fields=dict(<EOL>cancel=True,<EOL>workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,<EOL>workerCompletionMsg = completionMsg),<EOL>useConnectionID=False,<EOL>ignoreUnchanged=True)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>jobID = None<EOL>completionReason = ClientJobsDAO.CMPL_REASON_SUCCESS<EOL>completionMsg = \"<STR_LIT>\"<EOL>try:<EOL><INDENT>jobID = hst.run()<EOL><DEDENT>except Exception as e:<EOL><INDENT>jobID = hst._options.jobID<EOL>completionReason = ClientJobsDAO.CMPL_REASON_ERROR<EOL>completionMsg = \"<STR_LIT>\" % (e,)<EOL>raise<EOL><DEDENT>finally:<EOL><INDENT>if jobID is not None:<EOL><INDENT>cjDAO = ClientJobsDAO.get()<EOL>cjDAO.jobSetCompleted(jobID=jobID,<EOL>completionReason=completionReason,<EOL>completionMsg=completionMsg)<EOL><DEDENT><DEDENT><DEDENT>return jobID<EOL>", "docstring": "The main function of the HypersearchWorker script. This parses the command\nline arguments, instantiates a HypersearchWorker instance, and then\nruns it.\n\nParameters:\n----------------------------------------------------------------------\nretval:     jobID of the job we ran. This is used by unit test code\n              when calling this working using the --params command\n              line option (which tells this worker to insert the job\n              itself).", "id": "f17599:m0"}
{"signature": "def getSwarmModelParams(modelID):", "body": "<EOL>cjDAO = ClientJobsDAO.get()<EOL>(jobID, description) = cjDAO.modelsGetFields(<EOL>modelID,<EOL>[\"<STR_LIT>\", \"<STR_LIT>\"])<EOL>(baseDescription,) = cjDAO.jobGetFields(jobID, [\"<STR_LIT>\"])<EOL>descriptionDirectory = tempfile.mkdtemp()<EOL>try:<EOL><INDENT>baseDescriptionFilePath = os.path.join(descriptionDirectory, \"<STR_LIT>\")<EOL>with open(baseDescriptionFilePath, mode=\"<STR_LIT:wb>\") as f:<EOL><INDENT>f.write(baseDescription)<EOL><DEDENT>descriptionFilePath = os.path.join(descriptionDirectory, \"<STR_LIT>\")<EOL>with open(descriptionFilePath, mode=\"<STR_LIT:wb>\") as f:<EOL><INDENT>f.write(description)<EOL><DEDENT>expIface = helpers.getExperimentDescriptionInterfaceFromModule(<EOL>helpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory))<EOL>return json.dumps(<EOL>dict(<EOL>modelConfig=expIface.getModelDescription(),<EOL>inferenceArgs=expIface.getModelControl().get(\"<STR_LIT>\", None)))<EOL><DEDENT>finally:<EOL><INDENT>shutil.rmtree(descriptionDirectory, ignore_errors=True)<EOL><DEDENT>", "docstring": "Retrieve the Engine-level model params from a Swarm model\n\n    Args:\n      modelID - Engine-level model ID of the Swarm model\n\n    Returns:\n      JSON-encoded string containing Model Params", "id": "f17600:m1"}
{"signature": "def createAndStartSwarm(client, clientInfo=\"<STR_LIT>\", clientKey=\"<STR_LIT>\", params=\"<STR_LIT>\",<EOL>minimumWorkers=None, maximumWorkers=None,<EOL>alreadyRunning=False):", "body": "if minimumWorkers is None:<EOL><INDENT>minimumWorkers = Configuration.getInt(<EOL>\"<STR_LIT>\")<EOL><DEDENT>if maximumWorkers is None:<EOL><INDENT>maximumWorkers = Configuration.getInt(<EOL>\"<STR_LIT>\")<EOL><DEDENT>return ClientJobsDAO.get().jobInsert(<EOL>client=client,<EOL>cmdLine=\"<STR_LIT>\",<EOL>clientInfo=clientInfo,<EOL>clientKey=clientKey,<EOL>alreadyRunning=alreadyRunning,<EOL>params=params,<EOL>minimumWorkers=minimumWorkers,<EOL>maximumWorkers=maximumWorkers,<EOL>jobType=ClientJobsDAO.JOB_TYPE_HS)<EOL>", "docstring": "Create and start a swarm job.\n\n    Args:\n      client - A string identifying the calling client. There is a small limit\n          for the length of the value. See ClientJobsDAO.CLIENT_MAX_LEN.\n      clientInfo - JSON encoded dict of client specific information.\n      clientKey - Foreign key. Limited in length, see ClientJobsDAO._initTables.\n      params - JSON encoded dict of the parameters for the job. This can be\n          fetched out of the database by the worker processes based on the jobID.\n      minimumWorkers - The minimum workers to allocate to the swarm. Set to None\n          to use the default.\n      maximumWorkers - The maximum workers to allocate to the swarm. Set to None\n          to use the swarm default. Set to 0 to use the maximum scheduler value.\n      alreadyRunning - Insert a job record for an already running process. Used\n          for testing.", "id": "f17600:m0"}
{"signature": "def _getPredictedField(options):", "body": "if not options['<STR_LIT>'] ornot options['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>return None, None<EOL><DEDENT>predictedField = options['<STR_LIT>']['<STR_LIT>']<EOL>predictedFieldInfo = None<EOL>includedFields = options['<STR_LIT>']<EOL>for info in includedFields:<EOL><INDENT>if info['<STR_LIT>'] == predictedField:<EOL><INDENT>predictedFieldInfo = info<EOL>break<EOL><DEDENT><DEDENT>if predictedFieldInfo is None:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\" % predictedField<EOL>)<EOL><DEDENT>predictedFieldType = predictedFieldInfo['<STR_LIT>']<EOL>return predictedField, predictedFieldType<EOL>", "docstring": "Gets the predicted field and it's datatype from the options dictionary\n\n    Returns: (predictedFieldName, predictedFieldType)", "id": "f17601:m22"}
{"signature": "def _generateExtraMetricSpecs(options):", "body": "_metricSpecSchema = {'<STR_LIT>': {}}<EOL>results = []<EOL>for metric in options['<STR_LIT>']:<EOL><INDENT>for propertyName in list(_metricSpecSchema['<STR_LIT>'].keys()):<EOL><INDENT>_getPropertyValue(_metricSpecSchema, propertyName, metric)<EOL><DEDENT>specString, label = _generateMetricSpecString(<EOL>field=metric['<STR_LIT>'],<EOL>metric=metric['<STR_LIT>'],<EOL>params=metric['<STR_LIT>'],<EOL>inferenceElement=metric['<STR_LIT>'],<EOL>returnLabel=True)<EOL>if metric['<STR_LIT>']:<EOL><INDENT>options['<STR_LIT>'].append(label)<EOL><DEDENT>results.append(specString)<EOL><DEDENT>return results<EOL>", "docstring": "Generates the non-default metrics specified by the expGenerator params", "id": "f17601:m21"}
{"signature": "def _isString(obj):", "body": "return type(obj) in (str,)<EOL>", "docstring": "returns whether or not the object is a string", "id": "f17601:m5"}
{"signature": "def _makeUsageErrorStr(errorString, usageString):", "body": "return \"<STR_LIT>\" % (errorString, usageString)<EOL>", "docstring": "Combines an error string and usage string into a regular format, so they\n    all look consistent.", "id": "f17601:m0"}
{"signature": "def _generateEncoderChoicesV1(fieldInfo):", "body": "width = <NUM_LIT:7><EOL>fieldName = fieldInfo['<STR_LIT>']<EOL>fieldType = fieldInfo['<STR_LIT>']<EOL>encoderChoicesList = []<EOL>if fieldType in ['<STR_LIT:float>', '<STR_LIT:int>']:<EOL><INDENT>aggFunction = '<STR_LIT>'<EOL>encoders = [None]<EOL>for n in (<NUM_LIT>, <NUM_LIT:50>, <NUM_LIT>, <NUM_LIT>):<EOL><INDENT>encoder = dict(type='<STR_LIT>', name=fieldName, fieldname=fieldName,<EOL>n=n, w=width, clipInput=True,space=\"<STR_LIT>\")<EOL>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoder['<STR_LIT>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoder['<STR_LIT>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>encoders.append(encoder)<EOL><DEDENT>encoderChoicesList.append(encoders)<EOL><DEDENT>elif fieldType == '<STR_LIT:string>':<EOL><INDENT>aggFunction = '<STR_LIT>'<EOL>encoders = [None]<EOL>encoder = dict(type='<STR_LIT>', name=fieldName,<EOL>fieldname=fieldName, n=<NUM_LIT:100>, w=width)<EOL>encoders.append(encoder)<EOL>encoderChoicesList.append(encoders)<EOL><DEDENT>elif fieldType == '<STR_LIT>':<EOL><INDENT>aggFunction = '<STR_LIT>'<EOL>encoders = [None]<EOL>for radius in (<NUM_LIT:1>, <NUM_LIT:8>):<EOL><INDENT>encoder = dict(type='<STR_LIT>', name='<STR_LIT>' % (fieldName),<EOL>fieldname=fieldName, timeOfDay=(width, radius))<EOL>encoders.append(encoder)<EOL><DEDENT>encoderChoicesList.append(encoders)<EOL>encoders = [None]<EOL>for radius in (<NUM_LIT:1>, <NUM_LIT:3>):<EOL><INDENT>encoder = dict(type='<STR_LIT>', name='<STR_LIT>' % (fieldName),<EOL>fieldname=fieldName, dayOfWeek=(width, radius))<EOL>encoders.append(encoder)<EOL><DEDENT>encoderChoicesList.append(encoders)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % (fieldType))<EOL><DEDENT>return (encoderChoicesList, aggFunction)<EOL>", "docstring": "Return a list of possible encoder parameter combinations for the given\n    field and the default aggregation function to use. Each parameter combination\n    is a dict defining the parameters for the encoder. Here is an example\n    return value for the encoderChoicesList:\n\n     [\n       None,\n       {'fieldname':'timestamp',\n        'name': 'timestamp_timeOfDay',\n        'type':'DateEncoder'\n        'dayOfWeek': (7,1)\n        },\n       {'fieldname':'timestamp',\n        'name': 'timestamp_timeOfDay',\n        'type':'DateEncoder'\n        'dayOfWeek': (7,3)\n        },\n    ],\n\n    Parameters:\n    --------------------------------------------------\n    fieldInfo:      item from the 'includedFields' section of the\n                      description JSON object\n\n    retval:  (encoderChoicesList, aggFunction)\n               encoderChoicesList: a list of encoder choice lists for this field.\n                 Most fields will generate just 1 encoder choice list.\n                 DateTime fields can generate 2 or more encoder choice lists,\n                   one for dayOfWeek, one for timeOfDay, etc.\n               aggFunction: name of aggregation function to use for this\n                             field type", "id": "f17601:m11"}
{"signature": "def _generatePermEncoderStr(options, encoderDict):", "body": "permStr = \"<STR_LIT>\"<EOL>if encoderDict.get('<STR_LIT>', False):<EOL><INDENT>permStr = \"<STR_LIT>\"<EOL>for key, value in list(encoderDict.items()):<EOL><INDENT>if key == \"<STR_LIT:name>\":<EOL><INDENT>continue<EOL><DEDENT>if key == '<STR_LIT:n>' and encoderDict['<STR_LIT:type>'] != '<STR_LIT>':<EOL><INDENT>permStr += \"<STR_LIT>\" % (encoderDict[\"<STR_LIT:w>\"] + <NUM_LIT:7>,<EOL>encoderDict[\"<STR_LIT:w>\"] + <NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>if issubclass(type(value), str):<EOL><INDENT>permStr += \"<STR_LIT>\" % (key, value)<EOL><DEDENT>else:<EOL><INDENT>permStr += \"<STR_LIT>\" % (key, value)<EOL><DEDENT><DEDENT><DEDENT>permStr += \"<STR_LIT:)>\"<EOL><DEDENT>else:<EOL><INDENT>if encoderDict[\"<STR_LIT:type>\"] in [\"<STR_LIT>\", \"<STR_LIT>\",<EOL>\"<STR_LIT>\", \"<STR_LIT>\"]:<EOL><INDENT>permStr = \"<STR_LIT>\"<EOL>for key, value in list(encoderDict.items()):<EOL><INDENT>if key == \"<STR_LIT>\":<EOL><INDENT>key = \"<STR_LIT>\"<EOL><DEDENT>elif key == \"<STR_LIT:type>\":<EOL><INDENT>key = \"<STR_LIT>\"<EOL><DEDENT>elif key == \"<STR_LIT:name>\":<EOL><INDENT>continue<EOL><DEDENT>if key == \"<STR_LIT:n>\":<EOL><INDENT>permStr += \"<STR_LIT>\" % (encoderDict[\"<STR_LIT:w>\"] + <NUM_LIT:1>,<EOL>encoderDict[\"<STR_LIT:w>\"] + <NUM_LIT>)<EOL><DEDENT>elif key == \"<STR_LIT>\":<EOL><INDENT>if value and not \"<STR_LIT>\" in encoderDict:<EOL><INDENT>permStr += \"<STR_LIT>\"% (_quoteAndEscape(\"<STR_LIT>\"), _quoteAndEscape(\"<STR_LIT>\"))<EOL><DEDENT>encoderDict.pop(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>if issubclass(type(value), str):<EOL><INDENT>permStr += \"<STR_LIT>\" % (key, value)<EOL><DEDENT>else:<EOL><INDENT>permStr += \"<STR_LIT>\" % (key, value)<EOL><DEDENT><DEDENT><DEDENT>permStr += \"<STR_LIT:)>\"<EOL><DEDENT>elif encoderDict[\"<STR_LIT:type>\"] in [\"<STR_LIT>\"]:<EOL><INDENT>permStr = \"<STR_LIT>\"<EOL>for key, value in list(encoderDict.items()):<EOL><INDENT>if key == \"<STR_LIT>\":<EOL><INDENT>key = \"<STR_LIT>\"<EOL><DEDENT>elif key == \"<STR_LIT:type>\":<EOL><INDENT>key = \"<STR_LIT>\"<EOL><DEDENT>elif key == \"<STR_LIT:name>\":<EOL><INDENT>continue<EOL><DEDENT>if issubclass(type(value), str):<EOL><INDENT>permStr += \"<STR_LIT>\" % (key, value)<EOL><DEDENT>else:<EOL><INDENT>permStr += \"<STR_LIT>\" % (key, value)<EOL><DEDENT><DEDENT>permStr += \"<STR_LIT:)>\"<EOL><DEDENT>elif encoderDict[\"<STR_LIT:type>\"] in [\"<STR_LIT>\"]:<EOL><INDENT>permStr = \"<STR_LIT>\"<EOL>for key, value in list(encoderDict.items()):<EOL><INDENT>if key == \"<STR_LIT>\":<EOL><INDENT>key = \"<STR_LIT>\"<EOL><DEDENT>elif key == \"<STR_LIT:type>\":<EOL><INDENT>continue<EOL><DEDENT>elif key == \"<STR_LIT:name>\":<EOL><INDENT>continue<EOL><DEDENT>if key == \"<STR_LIT>\":<EOL><INDENT>permStr += \"<STR_LIT>\" % (encoderDict[\"<STR_LIT:type>\"])<EOL>permStr += \"<STR_LIT>\"<EOL>permStr += \"<STR_LIT>\" % (value[<NUM_LIT:0>])<EOL><DEDENT>elif key == \"<STR_LIT>\":<EOL><INDENT>permStr += \"<STR_LIT>\" % (encoderDict[\"<STR_LIT:type>\"])<EOL>permStr += \"<STR_LIT>\"<EOL>permStr += \"<STR_LIT>\" % (value[<NUM_LIT:0>])<EOL><DEDENT>elif key == \"<STR_LIT>\":<EOL><INDENT>permStr += \"<STR_LIT>\" % (encoderDict[\"<STR_LIT:type>\"])<EOL>permStr += \"<STR_LIT>\"<EOL>permStr += \"<STR_LIT>\" % (value)<EOL><DEDENT>else:<EOL><INDENT>if issubclass(type(value), str):<EOL><INDENT>permStr += \"<STR_LIT>\" % (key, value)<EOL><DEDENT>else:<EOL><INDENT>permStr += \"<STR_LIT>\" % (key, value)<EOL><DEDENT><DEDENT><DEDENT>permStr += \"<STR_LIT:)>\"<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" %(encoderDict[\"<STR_LIT:type>\"]))<EOL><DEDENT><DEDENT>return permStr<EOL>", "docstring": "Generate the string that defines the permutations to apply for a given\n    encoder.\n\n    Parameters:\n    -----------------------------------------------------------------------\n    options: experiment params\n    encoderDict: the encoder dict, which gets placed into the description.py\n\n\n    For example, if the encoderDict contains:\n        'consumption':     {\n                  'clipInput': True,\n                  'fieldname': u'consumption',\n                  'n': 100,\n                  'name': u'consumption',\n                  'type': 'AdaptiveScalarEncoder',\n                  'w': 21},\n\n    The return string will contain:\n      \"PermuteEncoder(fieldName='consumption',\n                      encoderClass='AdaptiveScalarEncoder',\n                      w=21,\n                      n=PermuteInt(28, 521),\n                      clipInput=True)\"", "id": "f17601:m13"}
{"signature": "def _generateMetricSpecs(options):", "body": "inferenceType = options['<STR_LIT>']<EOL>inferenceArgs = options['<STR_LIT>']<EOL>predictionSteps = inferenceArgs['<STR_LIT>']<EOL>metricWindow = options['<STR_LIT>']<EOL>if metricWindow is None:<EOL><INDENT>metricWindow = int(Configuration.get(\"<STR_LIT>\"))<EOL><DEDENT>metricSpecStrings = []<EOL>optimizeMetricLabel = \"<STR_LIT>\"<EOL>metricSpecStrings.extend(_generateExtraMetricSpecs(options))<EOL>optimizeMetricSpec = None<EOL>if options['<STR_LIT>']:<EOL><INDENT>assert len(predictionSteps) == <NUM_LIT:1><EOL>predictionSteps = ['<STR_LIT>']<EOL><DEDENT>if inferenceType in (InferenceType.TemporalNextStep,<EOL>InferenceType.TemporalAnomaly,<EOL>InferenceType.TemporalMultiStep,<EOL>InferenceType.NontemporalMultiStep,<EOL>InferenceType.NontemporalClassification,<EOL>'<STR_LIT>'):<EOL><INDENT>predictedFieldName, predictedFieldType = _getPredictedField(options)<EOL>isCategory = _isCategory(predictedFieldType)<EOL>metricNames = ('<STR_LIT>',) if isCategory else ('<STR_LIT>', '<STR_LIT>')<EOL>trivialErrorMetric = '<STR_LIT>' if isCategory else '<STR_LIT>'<EOL>oneGramErrorMetric = '<STR_LIT>' if isCategory else '<STR_LIT>'<EOL>movingAverageBaselineName = '<STR_LIT>' if isCategory else '<STR_LIT>'<EOL>for metricName in metricNames:<EOL><INDENT>metricSpec, metricLabel =_generateMetricSpecString(field=predictedFieldName,<EOL>inferenceElement=InferenceElement.multiStepBestPredictions,<EOL>metric='<STR_LIT>',<EOL>params={'<STR_LIT>': metricName,<EOL>'<STR_LIT>':metricWindow,<EOL>'<STR_LIT>': predictionSteps},<EOL>returnLabel=True)<EOL>metricSpecStrings.append(metricSpec)<EOL><DEDENT>if options[\"<STR_LIT>\"] is not None :<EOL><INDENT>metricParams = dict(options[\"<STR_LIT>\"])<EOL>metricParams['<STR_LIT>'] = '<STR_LIT>'<EOL>metricParams['<STR_LIT>'] = predictionSteps<EOL>if not \"<STR_LIT>\" in metricParams:<EOL><INDENT>metricParams[\"<STR_LIT>\"] = metricWindow<EOL><DEDENT>metricSpec, metricLabel =_generateMetricSpecString(field=predictedFieldName,<EOL>inferenceElement=InferenceElement.multiStepPredictions,<EOL>metric=\"<STR_LIT>\",<EOL>params=metricParams,<EOL>returnLabel=True)<EOL>metricSpecStrings.append(metricSpec)<EOL><DEDENT>optimizeMetricSpec = metricSpec<EOL>metricLabel = metricLabel.replace('<STR_LIT:[>', '<STR_LIT>')<EOL>metricLabel = metricLabel.replace('<STR_LIT:]>', '<STR_LIT>')<EOL>optimizeMetricLabel = metricLabel<EOL>if options[\"<STR_LIT>\"] is not None :<EOL><INDENT>optimizeMetricLabel = \"<STR_LIT>\"<EOL><DEDENT>if options[\"<STR_LIT>\"]and inferenceType != InferenceType.NontemporalClassification:<EOL><INDENT>for steps in predictionSteps:<EOL><INDENT>metricSpecStrings.append(<EOL>_generateMetricSpecString(field=predictedFieldName,<EOL>inferenceElement=InferenceElement.prediction,<EOL>metric=\"<STR_LIT>\",<EOL>params={'<STR_LIT>':metricWindow,<EOL>\"<STR_LIT>\":trivialErrorMetric,<EOL>'<STR_LIT>': steps})<EOL>)<EOL>if isCategory:<EOL><INDENT>metricSpecStrings.append(<EOL>_generateMetricSpecString(field=predictedFieldName,<EOL>inferenceElement=InferenceElement.prediction,<EOL>metric=movingAverageBaselineName,<EOL>params={'<STR_LIT>':metricWindow<EOL>,\"<STR_LIT>\":\"<STR_LIT>\",<EOL>\"<STR_LIT>\":<NUM_LIT:200>,<EOL>\"<STR_LIT>\": steps})<EOL>)<EOL><DEDENT>else :<EOL><INDENT>metricSpecStrings.append(<EOL>_generateMetricSpecString(field=predictedFieldName,<EOL>inferenceElement=InferenceElement.prediction,<EOL>metric=movingAverageBaselineName,<EOL>params={'<STR_LIT>':metricWindow<EOL>,\"<STR_LIT>\":\"<STR_LIT>\",<EOL>\"<STR_LIT>\":<NUM_LIT:200>,<EOL>\"<STR_LIT>\": steps})<EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif inferenceType in (InferenceType.TemporalClassification):<EOL><INDENT>metricName = '<STR_LIT>'<EOL>trivialErrorMetric = '<STR_LIT>'<EOL>oneGramErrorMetric = '<STR_LIT>'<EOL>movingAverageBaselineName = '<STR_LIT>'<EOL>optimizeMetricSpec, optimizeMetricLabel =_generateMetricSpecString(inferenceElement=InferenceElement.classification,<EOL>metric=metricName,<EOL>params={'<STR_LIT>':metricWindow},<EOL>returnLabel=True)<EOL>metricSpecStrings.append(optimizeMetricSpec)<EOL>if options[\"<STR_LIT>\"]:<EOL><INDENT>if inferenceType == InferenceType.TemporalClassification:<EOL><INDENT>metricSpecStrings.append(<EOL>_generateMetricSpecString(inferenceElement=InferenceElement.classification,<EOL>metric=\"<STR_LIT>\",<EOL>params={'<STR_LIT>':metricWindow,<EOL>\"<STR_LIT>\":trivialErrorMetric})<EOL>)<EOL>metricSpecStrings.append(<EOL>_generateMetricSpecString(inferenceElement=InferenceElement.classification,<EOL>metric=\"<STR_LIT>\",<EOL>params={'<STR_LIT>':metricWindow,<EOL>\"<STR_LIT>\":oneGramErrorMetric})<EOL>)<EOL>metricSpecStrings.append(<EOL>_generateMetricSpecString(inferenceElement=InferenceElement.classification,<EOL>metric=movingAverageBaselineName,<EOL>params={'<STR_LIT>':metricWindow<EOL>,\"<STR_LIT>\":\"<STR_LIT>\",<EOL>\"<STR_LIT>\":<NUM_LIT:200>})<EOL>)<EOL><DEDENT><DEDENT>if not options[\"<STR_LIT>\"] == None :<EOL><INDENT>if not \"<STR_LIT>\" in options[\"<STR_LIT>\"]:<EOL><INDENT>options[\"<STR_LIT>\"][\"<STR_LIT>\"] = metricWindow<EOL><DEDENT>optimizeMetricSpec = _generateMetricSpecString(<EOL>inferenceElement=InferenceElement.classification,<EOL>metric=\"<STR_LIT>\",<EOL>params=options[\"<STR_LIT>\"])<EOL>optimizeMetricLabel = \"<STR_LIT>\"<EOL>metricSpecStrings.append(optimizeMetricSpec)<EOL><DEDENT><DEDENT>if options['<STR_LIT>']:<EOL><INDENT>for i in range(len(metricSpecStrings)):<EOL><INDENT>metricSpecStrings[i] = metricSpecStrings[i].replace(<EOL>\"<STR_LIT>\", \"<STR_LIT>\")<EOL><DEDENT>optimizeMetricLabel = optimizeMetricLabel.replace(<EOL>\"<STR_LIT>\", \"<STR_LIT>\")<EOL><DEDENT>return metricSpecStrings, optimizeMetricLabel<EOL>", "docstring": "Generates the Metrics for a given InferenceType\n\n    Parameters:\n    -------------------------------------------------------------------------\n    options: ExpGenerator options\n    retval: (metricsList, optimizeMetricLabel)\n              metricsList: list of metric string names\n              optimizeMetricLabel: Name of the metric which to optimize over", "id": "f17601:m20"}
{"signature": "def _generateFileFromTemplates(templateFileNames, outputFilePath,<EOL>replacementDict):", "body": "<EOL>installPath = os.path.dirname(__file__)<EOL>outputFile = open(outputFilePath, \"<STR_LIT:w>\")<EOL>outputLines = []<EOL>inputLines = []<EOL>firstFile = True<EOL>for templateFileName in templateFileNames:<EOL><INDENT>if not firstFile:<EOL><INDENT>inputLines.extend([os.linesep]*<NUM_LIT:2>)<EOL><DEDENT>firstFile = False<EOL>inputFilePath = os.path.join(installPath, templateFileName)<EOL>inputFile = open(inputFilePath)<EOL>inputLines.extend(inputFile.readlines())<EOL>inputFile.close()<EOL><DEDENT>print(\"<STR_LIT>\", len(inputLines), \"<STR_LIT>\")<EOL>for line in inputLines:<EOL><INDENT>tempLine = line<EOL>for k, v in replacementDict.items():<EOL><INDENT>if v is None:<EOL><INDENT>v = \"<STR_LIT:None>\"<EOL><DEDENT>tempLine = re.sub(k, v, tempLine)<EOL><DEDENT>outputFile.write(tempLine)<EOL><DEDENT>outputFile.close()<EOL>", "docstring": "Generates a file by applying token replacements to the given template\n    file\n\n    templateFileName:\n                    A list of template file names; these files are assumed to be in\n                    the same directory as the running experiment_generator.py script.\n                    ExpGenerator will perform the substitution and concanetate\n                    the files in the order they are specified\n\n    outputFilePath: Absolute path of the output file\n\n    replacementDict:\n                    A dictionary of token/replacement pairs", "id": "f17601:m10"}
{"signature": "def _generateMetricSpecString(inferenceElement, metric,<EOL>params=None, field=None,<EOL>returnLabel=False):", "body": "metricSpecArgs = dict(metric=metric,<EOL>field=field,<EOL>params=params,<EOL>inferenceElement=inferenceElement)<EOL>metricSpecAsString = \"<STR_LIT>\" %'<STR_LIT:U+002CU+0020>'.join(['<STR_LIT>' % (item[<NUM_LIT:0>],item[<NUM_LIT:1>])<EOL>for item in metricSpecArgs.items()])<EOL>if not returnLabel:<EOL><INDENT>return metricSpecAsString<EOL><DEDENT>spec = MetricSpec(**metricSpecArgs)<EOL>metricLabel = spec.getLabel()<EOL>return metricSpecAsString, metricLabel<EOL>", "docstring": "Generates the string representation of a MetricSpec object, and returns\n    the metric key associated with the metric.\n\n\n    Parameters:\n    -----------------------------------------------------------------------\n    inferenceElement:\n      An InferenceElement value that indicates which part of the inference this\n      metric is computed on\n\n    metric:\n      The type of the metric being computed (e.g. aae, avg_error)\n\n    params:\n      A dictionary of parameters for the metric. The keys are the parameter names\n      and the values should be the parameter values (e.g. window=200)\n\n    field:\n      The name of the field for which this metric is being computed\n\n    returnLabel:\n      If True, returns the label of the MetricSpec that was generated", "id": "f17601:m9"}
{"signature": "def _quoteAndEscape(string):", "body": "assert _isString(string)<EOL>return pprint.pformat(string)<EOL>", "docstring": "string:   input string (ascii or unicode)\n\nReturns:  a quoted string with characters that are represented in python via\n          escape sequences converted to those escape sequences", "id": "f17601:m6"}
{"signature": "def _getPropertyValue(schema, propertyName, options):", "body": "if propertyName not in options:<EOL><INDENT>paramsSchema = schema['<STR_LIT>'][propertyName]<EOL>if '<STR_LIT:default>' in paramsSchema:<EOL><INDENT>options[propertyName] = paramsSchema['<STR_LIT:default>']<EOL><DEDENT>else:<EOL><INDENT>options[propertyName] = None<EOL><DEDENT><DEDENT>", "docstring": "Checks to see if property is specified in 'options'. If not, reads the\n    default value from the schema", "id": "f17601:m16"}
{"signature": "def _isInt(x, precision = <NUM_LIT>):", "body": "xInt = int(round(x))<EOL>return (abs(x - xInt) < precision * x, xInt)<EOL>", "docstring": "Return (isInt, intValue) for a given floating point number.\n\nParameters:\n----------------------------------------------------------------------\nx:  floating point number to evaluate\nprecision: desired precision\nretval:   (isInt, intValue)\n          isInt: True if x is close enough to an integer value\n          intValue: x as an integer", "id": "f17601:m4"}
{"signature": "def _generateEncoderStringsV2(includedFields, options):", "body": "width = <NUM_LIT><EOL>encoderDictsList = []<EOL>if options['<STR_LIT>'] in [\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\",<EOL>\"<STR_LIT>\"]:<EOL><INDENT>classifierOnlyField = options['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>classifierOnlyField = None<EOL><DEDENT>for fieldInfo in includedFields:<EOL><INDENT>fieldName = fieldInfo['<STR_LIT>']<EOL>fieldType = fieldInfo['<STR_LIT>']<EOL>if fieldType in ['<STR_LIT:float>', '<STR_LIT:int>']:<EOL><INDENT>runDelta = fieldInfo.get(\"<STR_LIT>\", False)<EOL>if runDelta or \"<STR_LIT>\" in fieldInfo:<EOL><INDENT>encoderDict = dict(type='<STR_LIT>', name=fieldName,<EOL>fieldname=fieldName, n=<NUM_LIT:100>, w=width, clipInput=True)<EOL>if runDelta:<EOL><INDENT>encoderDict[\"<STR_LIT>\"] = True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>encoderDict = dict(type='<STR_LIT>', name=fieldName,<EOL>fieldname=fieldName, n=<NUM_LIT:100>, w=width, clipInput=True)<EOL><DEDENT>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoderDict['<STR_LIT>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoderDict['<STR_LIT>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>if ('<STR_LIT>' in fieldInfo and '<STR_LIT>' in fieldInfo)and (encoderDict['<STR_LIT:type>'] == '<STR_LIT>'):<EOL><INDENT>encoderDict['<STR_LIT:type>'] = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoderDict['<STR_LIT:type>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoderDict['<STR_LIT>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>encoderDictsList.append(encoderDict)<EOL><DEDENT>elif fieldType == '<STR_LIT:string>':<EOL><INDENT>encoderDict = dict(type='<STR_LIT>', name=fieldName,<EOL>fieldname=fieldName, n=<NUM_LIT:100>+width, w=width)<EOL>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoderDict['<STR_LIT:type>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>encoderDictsList.append(encoderDict)<EOL><DEDENT>elif fieldType == '<STR_LIT>':<EOL><INDENT>encoderDict = dict(type='<STR_LIT>', name='<STR_LIT>' % (fieldName),<EOL>fieldname=fieldName, timeOfDay=(width, <NUM_LIT:1>))<EOL>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoderDict['<STR_LIT:type>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>encoderDictsList.append(encoderDict)<EOL>encoderDict = dict(type='<STR_LIT>', name='<STR_LIT>' % (fieldName),<EOL>fieldname=fieldName, dayOfWeek=(width, <NUM_LIT:1>))<EOL>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoderDict['<STR_LIT:type>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>encoderDictsList.append(encoderDict)<EOL>encoderDict = dict(type='<STR_LIT>', name='<STR_LIT>' % (fieldName),<EOL>fieldname=fieldName, weekend=(width))<EOL>if '<STR_LIT>' in fieldInfo:<EOL><INDENT>encoderDict['<STR_LIT:type>'] = fieldInfo['<STR_LIT>']<EOL><DEDENT>encoderDictsList.append(encoderDict)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % (fieldType))<EOL><DEDENT>if fieldName == classifierOnlyField:<EOL><INDENT>clEncoderDict = dict(encoderDict)<EOL>clEncoderDict['<STR_LIT>'] = True<EOL>clEncoderDict['<STR_LIT:name>'] = '<STR_LIT>'<EOL>encoderDictsList.append(clEncoderDict)<EOL>if options[\"<STR_LIT>\"][\"<STR_LIT>\"] == \"<STR_LIT>\":<EOL><INDENT>encoderDictsList.remove(encoderDict)<EOL><DEDENT><DEDENT><DEDENT>if options.get('<STR_LIT>') is not None:<EOL><INDENT>tempList=[]<EOL>for encoderDict in encoderDictsList:<EOL><INDENT>if encoderDict['<STR_LIT:name>'] in options['<STR_LIT>']:<EOL><INDENT>tempList.append(encoderDict)<EOL><DEDENT><DEDENT>encoderDictsList = tempList<EOL><DEDENT>encoderSpecsList = []<EOL>permEncoderChoicesList = []<EOL>for encoderDict in encoderDictsList:<EOL><INDENT>if encoderDict['<STR_LIT:name>'].find('<STR_LIT:\\\\>') >= <NUM_LIT:0>:<EOL><INDENT>raise _ExpGeneratorException(\"<STR_LIT>\")<EOL><DEDENT>for c in _ILLEGAL_FIELDNAME_CHARACTERS:<EOL><INDENT>if encoderDict['<STR_LIT:name>'].find(c) >= <NUM_LIT:0>:<EOL><INDENT>raise _ExpGeneratorException(\"<STR_LIT>\"  %(c, encoderDict['<STR_LIT:name>']))<EOL><DEDENT><DEDENT>constructorStr = _generatePermEncoderStr(options, encoderDict)<EOL>encoderKey = _quoteAndEscape(encoderDict['<STR_LIT:name>'])<EOL>encoderSpecsList.append(\"<STR_LIT>\" % (<EOL>encoderKey,<EOL><NUM_LIT:2>*_ONE_INDENT,<EOL>pprint.pformat(encoderDict, indent=<NUM_LIT:2>*_INDENT_STEP)))<EOL>permEncoderChoicesList.append(\"<STR_LIT>\" % (encoderKey, constructorStr))<EOL><DEDENT>encoderSpecsStr = '<STR_LIT>'.join(encoderSpecsList)<EOL>permEncoderChoicesStr = '<STR_LIT:\\n>'.join(permEncoderChoicesList)<EOL>permEncoderChoicesStr = _indentLines(permEncoderChoicesStr, <NUM_LIT:1>,<EOL>indentFirstLine=True)<EOL>return (encoderSpecsStr, permEncoderChoicesStr)<EOL>", "docstring": "Generate and return the following encoder related substitution variables:\n\n    encoderSpecsStr:\n      For the base description file, this string defines the default\n      encoding dicts for each encoder. For example:\n\n           __gym_encoder = {   'fieldname': 'gym',\n            'n': 13,\n            'name': 'gym',\n            'type': 'SDRCategoryEncoder',\n            'w': 7},\n          __address_encoder = {   'fieldname': 'address',\n            'n': 13,\n            'name': 'address',\n            'type': 'SDRCategoryEncoder',\n            'w': 7}\n\n\n    permEncoderChoicesStr:\n      For the permutations file, this defines the possible\n      encoder dicts for each encoder. For example:\n\n          '__gym_encoder' : PermuteEncoder('gym', 'SDRCategoryEncoder', w=7,\n              n=100),\n\n          '__address_encoder' : PermuteEncoder('address', 'SDRCategoryEncoder',\n                w=7, n=100),\n\n          '__timestamp_dayOfWeek_encoder' : PermuteEncoder('timestamp',\n              'DateEncoder.timeOfDay', w=7, radius=PermuteChoices([1, 8])),\n\n          '__consumption_encoder': PermuteEncoder('consumption', 'AdaptiveScalarEncoder',\n              w=7, n=PermuteInt(13, 500, 20), minval=0,\n              maxval=PermuteInt(100, 300, 25)),\n\n\n\n    Parameters:\n    --------------------------------------------------\n    includedFields:  item from the 'includedFields' section of the\n                      description JSON object. This is a list of dicts, each\n                      dict defining the field name, type, and optional min\n                      and max values.\n\n    retval:  (encoderSpecsStr permEncoderChoicesStr)", "id": "f17601:m14"}
{"signature": "def _handleDescriptionOption(cmdArgStr, outDir, usageStr, hsVersion,<EOL>claDescriptionTemplateFile):", "body": "<EOL>try:<EOL><INDENT>args = json.loads(cmdArgStr)<EOL><DEDENT>except Exception as e:<EOL><INDENT>raise _InvalidCommandArgException(<EOL>_makeUsageErrorStr(<EOL>(\"<STR_LIT>\" +\"<STR_LIT>\") % (str(e), cmdArgStr), usageStr))<EOL><DEDENT>filesDescription = _generateExperiment(args, outDir, hsVersion=hsVersion,<EOL>claDescriptionTemplateFile = claDescriptionTemplateFile)<EOL>pprint.pprint(filesDescription)<EOL>return<EOL>", "docstring": "Parses and validates the --description option args and executes the\nrequest\n\nParameters:\n-----------------------------------------------------------------------\ncmdArgStr:  JSON string compatible with _gExperimentDescriptionSchema\noutDir:     where to place generated experiment files\nusageStr:   program usage string\nhsVersion:  which version of hypersearch permutations file to generate, can\n              be 'v1' or 'v2'\nclaDescriptionTemplateFile: Filename containing the template description\nretval:     nothing", "id": "f17601:m2"}
{"signature": "def coordinatesFromIndex(index, dimensions):", "body": "coordinates = [<NUM_LIT:0>] * len(dimensions)<EOL>shifted = index<EOL>for i in xrange(len(dimensions) - <NUM_LIT:1>, <NUM_LIT:0>, -<NUM_LIT:1>):<EOL><INDENT>coordinates[i] = shifted % dimensions[i]<EOL>shifted = shifted / dimensions[i]<EOL><DEDENT>coordinates[<NUM_LIT:0>] = shifted<EOL>return coordinates<EOL>", "docstring": "Translate an index into coordinates, using the given coordinate system.\n\nSimilar to ``numpy.unravel_index``.\n\n:param index: (int) The index of the point. The coordinates are expressed as a \n       single index by using the dimensions as a mixed radix definition. For \n       example, in dimensions 42x10, the point [1, 4] is index \n       1*420 + 4*10 = 460.\n\n:param dimensions (list of ints) The coordinate system.\n\n:returns: (list) of coordinates of length ``len(dimensions)``.", "id": "f17602:m0"}
{"signature": "def wrappingNeighborhood(centerIndex, radius, dimensions):", "body": "centerPosition = coordinatesFromIndex(centerIndex, dimensions)<EOL>intervals = []<EOL>for i, dimension in enumerate(dimensions):<EOL><INDENT>left = centerPosition[i] - radius<EOL>right = min(centerPosition[i] + radius,<EOL>left + dimensions[i] - <NUM_LIT:1>)<EOL>interval = [v % dimension for v in xrange(left, right + <NUM_LIT:1>)]<EOL>intervals.append(interval)<EOL><DEDENT>coords = numpy.array(list(itertools.product(*intervals)))<EOL>return numpy.ravel_multi_index(coords.T, dimensions)<EOL>", "docstring": "Like :meth:`neighborhood`, except that the neighborhood isn't truncated when \nit's near an edge. It wraps around to the other side.\n\n:param centerIndex: (int) The index of the point. The coordinates are \n       expressed as a single index by using the dimensions as a mixed radix \n       definition. For example, in dimensions 42x10, the point [1, 4] is index \n       1*420 + 4*10 = 460.\n\n:param radius: (int) The radius of this neighborhood about the \n       ``centerIndex``.\n\n:param dimensions: (indexable sequence) The dimensions of the world outside \n       this neighborhood.\n\n:returns: (numpy array) The points in the neighborhood, including \n          ``centerIndex``.", "id": "f17602:m3"}
{"signature": "def propose(self, current, r):", "body": "curLambda = current + self.offset<EOL>x, logProb = PoissonDistribution(curLambda).sample(r)<EOL>logBackward = PoissonDistribution(x+self.offset).logDensity(current)<EOL>return x, logProb, logBackward<EOL>", "docstring": "Generates a random sample from the Poisson probability distribution with\n        with location and scale parameter equal to the current value (passed in).\n        Returns the value of the random sample, the log of the probability of\n        sampling that value, and the log of the probability of sampling the current\n        value if the roles of the new sample and the current sample were reversed\n        (the log of the backward proposal probability).", "id": "f17603:c7:m1"}
{"signature": "def propose(self, current, r):", "body": "stay = (r.uniform(<NUM_LIT:0>, <NUM_LIT:1>) < self.kernel)<EOL>if stay:<EOL><INDENT>logKernel = numpy.log(self.kernel)<EOL>return current, logKernel, logKernel<EOL><DEDENT>else: <EOL><INDENT>curIndex = self.keyMap[current]<EOL>ri = r.randint(<NUM_LIT:0>, self.nKeys-<NUM_LIT:1>)<EOL>logKernel = numpy.log(<NUM_LIT:1.0> - self.kernel)<EOL>lp = logKernel + self.logp<EOL>if ri < curIndex: return self.keys[ri], lp, lp<EOL>else: return self.keys[ri+<NUM_LIT:1>], lp, lp<EOL><DEDENT>", "docstring": "Generates a random sample from the discrete probability distribution and\n        returns its value, the log of the probability of sampling that value and the\n        log of the probability of sampling the current value (passed in).", "id": "f17603:c5:m1"}
{"signature": "def normalize(lx):", "body": "lx = numpy.asarray(lx)<EOL>base = lx.max()<EOL>x = numpy.exp(lx - base)<EOL>result = x / x.sum()<EOL>conventional = (numpy.exp(lx) / numpy.exp(lx).sum())<EOL>assert similar(result, conventional)<EOL>return result<EOL>", "docstring": "Accepts log-values as input, exponentiates them,\nnormalizes and returns the result.\nHandles underflow by rescaling so that the largest values is exactly 1.0.", "id": "f17604:m3"}
{"signature": "def logDiffExp(A, B, out=None):", "body": "if out is None:<EOL><INDENT>out = numpy.zeros(A.shape)<EOL><DEDENT>indicator1 = A >= B<EOL>assert indicator1.all(), \"<STR_LIT>\"<EOL>out[indicator1] = A[indicator1] + numpy.log(<NUM_LIT:1> - numpy.exp(B[indicator1]-A[indicator1]))<EOL>return out<EOL>", "docstring": "returns log(exp(A) - exp(B)). A and B are numpy arrays. values in A should be\n    greater than or equal to corresponding values in B", "id": "f17604:m7"}
{"signature": "def lscsum(lx, epsilon=None):", "body": "lx = numpy.asarray(lx)<EOL>base = lx.max()<EOL>if numpy.isinf(base):<EOL><INDENT>return base<EOL><DEDENT>if (epsilon is not None) and (base < epsilon):<EOL><INDENT>return epsilon<EOL><DEDENT>x = numpy.exp(lx - base)<EOL>ssum = x.sum()<EOL>result = numpy.log(ssum) + base<EOL>return result<EOL>", "docstring": "Accepts log-values as input, exponentiates them, computes the sum,\nthen converts the sum back to log-space and returns the result.\nHandles underflow by rescaling so that the largest values is exactly 1.0.", "id": "f17604:m1"}
{"signature": "def lnsum0(lx):", "body": "lx = numpy.asarray(lx)<EOL>base = lx.max()<EOL>x = numpy.exp(lx - base)<EOL>ssum = x.sum(<NUM_LIT:0>)<EOL>normalized = nsum0(lx)<EOL>result = numpy.log(normalized)<EOL>conventional = numpy.log(numpy.exp(lx).sum(<NUM_LIT:0>) / numpy.exp(lx).sum())<EOL>assert similar(result, conventional)<EOL>return result<EOL>", "docstring": "Accepts log-values as input, exponentiates them, sums down the rows\n(first dimension), normalizes, then converts the sum back to\nlog-space and returns the result.\nHandles underflow by rescaling so that the largest values is exactly 1.0.", "id": "f17604:m5"}
{"signature": "def MultiArgMax(x):", "body": "m = x.max()<EOL>return (i for i, v in enumerate(x) if v == m)<EOL>", "docstring": "Get tuple (actually a generator) of indices where the max value of\narray x occurs. Requires that x have a max() method, as x.max()\n(in the case of NumPy) is much faster than max(x).\nFor a simpler, faster argmax when there is only a single maximum entry,\nor when knowing only the first index where the maximum occurs,\ncall argmax() on a NumPy array.\n\n:param x: Any sequence that has a max() method.\n:returns: Generator with the indices where the max value occurs.", "id": "f17606:m2"}
{"signature": "def numRows(self):", "body": "if self.hist_: return self.hist_.nRows()<EOL>else: return <NUM_LIT:0><EOL>", "docstring": "Gets the number of rows in the histogram.\n\n        :returns: Integer number of rows.", "id": "f17606:c0:m1"}
{"signature": "def MultiIndicator(pos, size, dtype):", "body": "x = numpy.zeros(size, dtype=dtype)<EOL>if hasattr(pos, '<STR_LIT>'):<EOL><INDENT>for i in pos: x[i] = <NUM_LIT:1><EOL><DEDENT>else: x[pos] = <NUM_LIT:1><EOL>return x<EOL>", "docstring": "Returns an array of length size and type dtype that is everywhere 0,\nexcept in the indices listed in sequence pos.\n\n:param pos:   A single integer or sequence of integers that specify\n       the position of ones to be set.\n:param size:  The total size of the array to be returned.\n:param dtype: The element type (compatible with NumPy array())\n       of the array to be returned.\n:returns: An array of length size and element type dtype.", "id": "f17606:m6"}
{"signature": "def Indicator(pos, size, dtype):", "body": "x = numpy.zeros(size, dtype=dtype)<EOL>x[pos] = <NUM_LIT:1><EOL>return x<EOL>", "docstring": "Returns an array of length size and type dtype that is everywhere 0,\nexcept in the index in pos.\n\n:param pos: (int) specifies the position of the one entry that will be set.\n:param size: (int) The total size of the array to be returned.\n:param dtype: The element type (compatible with NumPy array())\n       of the array to be returned.\n:returns: (list) of length ``size`` and element type ``dtype``.", "id": "f17606:m1"}
{"signature": "def clean_outcpd(self):", "body": "m = self.hist_.toDense()<EOL>for j in xrange(m.shape[<NUM_LIT:1>]): <EOL><INDENT>cmax = m[:,j].max()<EOL>if cmax:<EOL><INDENT>m[:,j] = numpy.array(m[:,j] == cmax, dtype=dtype)<EOL><DEDENT><DEDENT>self.hack_ = SparseMatrix(<NUM_LIT:0>, self.hist_.nCols())<EOL>for i in xrange(m.shape[<NUM_LIT:0>]):<EOL><INDENT>self.hack_.addRow(m[i,:])<EOL><DEDENT>", "docstring": "Hack to act like clean_outcpd on zeta1.TopLevelNode.\n        Take the max element in each to column, set it to 1, and set all the\n        other elements to 0.\n        Only called by inferRowMaxProd() and only needed if an updateRow()\n        has been called since the last clean_outcpd().", "id": "f17606:c0:m9"}
{"signature": "def sample(self, rgen):", "body": "x = rgen.poisson(self.lambdaParameter)<EOL>return x, self.logDensity(x)<EOL>", "docstring": "Generates a random sample from the Poisson probability distribution and\n        returns its value and the log of the probability of sampling that value.", "id": "f17608:c4:m1"}
{"signature": "def logFactorial(x):", "body": "return lgamma(x + <NUM_LIT:1.0>)<EOL>", "docstring": "Approximation to the log of the factorial function.", "id": "f17608:m0"}
{"signature": "def sample(self, rgen):", "body": "rf = rgen.uniform(<NUM_LIT:0>, self.sum)<EOL>index = bisect.bisect(self.cdf, rf)<EOL>return self.keys[index], numpy.log(self.pmf[index])<EOL>", "docstring": "Generates a random sample from the discrete probability distribution\n        and returns its value and the log of the probability of sampling that value.", "id": "f17608:c2:m1"}
{"signature": "def AreaUnderCurve(x, y):", "body": "<EOL>if x.shape[<NUM_LIT:0>] != y.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>% (x.shape, y.shape))<EOL><DEDENT>if x.shape[<NUM_LIT:0>] < <NUM_LIT:2>:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % x.shape)<EOL><DEDENT>order = np.argsort(x)<EOL>x = x[order]<EOL>y = y[order]<EOL>h = np.diff(x)<EOL>area = np.sum(h * (y[<NUM_LIT:1>:] + y[:-<NUM_LIT:1>])) / <NUM_LIT><EOL>return area<EOL>", "docstring": "Compute Area Under the Curve (AUC) using the trapezoidal rule\n\n    Parameters\n    ----------\n    x : array, shape = [n]\n        x coordinates\n\n    y : array, shape = [n]\n        y coordinates\n\n    Returns\n    -------\n    auc : float\n\n    Examples\n    --------\n    >>> import numpy as np\n    >>> from sklearn import metrics\n    >>> y = np.array([1, 1, 2, 2])\n    >>> pred = np.array([0.1, 0.4, 0.35, 0.8])\n    >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)\n    >>> metrics.auc(fpr, tpr)\n    0.75", "id": "f17609:m1"}
{"signature": "def dcross(**keywords):", "body": "keys = keywords.keys()<EOL>sequences = [keywords[key] for key in keys]<EOL>wheels = map(iter, sequences)<EOL>digits = [it.next( ) for it in wheels]<EOL>while True:<EOL><INDENT>yield dict(zip(keys, digits))<EOL>for i in range(len(digits)-<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>try:<EOL><INDENT>digits[i] = wheels[i].next( )<EOL>break<EOL><DEDENT>except StopIteration:<EOL><INDENT>wheels[i] = iter(sequences[i])<EOL>digits[i] = wheels[i].next( )<EOL><DEDENT><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "Similar to cross(), but generates output dictionaries instead of tuples.", "id": "f17610:m2"}
{"signature": "def cross(*sequences):", "body": "<EOL>wheels = map(iter, sequences)<EOL>digits = [it.next( ) for it in wheels]<EOL>while True:<EOL><INDENT>yield tuple(digits)<EOL>for i in range(len(digits)-<NUM_LIT:1>, -<NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>try:<EOL><INDENT>digits[i] = wheels[i].next( )<EOL>break<EOL><DEDENT>except StopIteration:<EOL><INDENT>wheels[i] = iter(sequences[i])<EOL>digits[i] = wheels[i].next( )<EOL><DEDENT><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>", "docstring": "From: http://book.opensourceproject.org.cn/lamp/python/pythoncook2/opensource/0596007973/pythoncook2-chp-19-sect-9.html", "id": "f17610:m1"}
{"signature": "def next(self, newValue):", "body": "newAverage, self.slidingWindow, self.total = self.compute(<EOL>self.slidingWindow, self.total, newValue, self.windowSize)<EOL>return newAverage<EOL>", "docstring": "Instance method wrapper around compute.", "id": "f17611:c0:m2"}
{"signature": "def processClubConsumption(f, clubs):", "body": "try:<EOL><INDENT>line = next(f)<EOL>assert line.endswith('<STR_LIT>')<EOL>valid_times = list(range(<NUM_LIT>))<EOL>t = <NUM_LIT:0> <EOL>club = None<EOL>clubName = None<EOL>lastDate = None<EOL>while True:<EOL><INDENT>assert t in valid_times<EOL>consumption = <NUM_LIT:0><EOL>for x in range(<NUM_LIT:4>):<EOL><INDENT>line = f.next()[:-<NUM_LIT:1>]<EOL>fields = line.split('<STR_LIT:U+002C>')<EOL>assert len(fields) == <NUM_LIT:4><EOL>for i, field in enumerate(fields):<EOL><INDENT>assert field[<NUM_LIT:0>] == '<STR_LIT:\">' and field[-<NUM_LIT:1>] == '<STR_LIT:\">'<EOL>fields[i] = field[<NUM_LIT:1>:-<NUM_LIT:1>]<EOL><DEDENT>name = fields[<NUM_LIT:1>]<EOL>partialNames = ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>for pn in partialNames:<EOL><INDENT>if pn in name:<EOL><INDENT>name = pn<EOL><DEDENT><DEDENT>if name != clubName:<EOL><INDENT>clubName = name<EOL>club = clubs[name]<EOL><DEDENT>tokens = fields[<NUM_LIT:2>].split()<EOL>if len(tokens) == <NUM_LIT:1>:<EOL><INDENT>assert consumption == <NUM_LIT:0> and t == <NUM_LIT:0><EOL><DEDENT>date = tokens[<NUM_LIT:0>]<EOL>consumption += float(fields[<NUM_LIT:3>])<EOL><DEDENT>club.updateRecord(date, t, consumption)<EOL>t += <NUM_LIT:1><EOL>t %= <NUM_LIT><EOL><DEDENT><DEDENT>except StopIteration:<EOL><INDENT>return<EOL><DEDENT>", "docstring": "Process the consumption a club\n\n    - Skip the header line\n    - Iterate over lines\n      - Read 4 records at a time\n        - Parse each line: club, date, time, consumption\n        - Get club object from dictionary if needed\n        - Aggregate consumption\n      - Call club.processConsumption() with data", "id": "f17614:m1"}
{"signature": "def _generateModel0(numCategories):", "body": "<EOL>initProb = numpy.zeros(numCategories)<EOL>initProb[<NUM_LIT:0>] = <NUM_LIT:0.5><EOL>initProb[<NUM_LIT:4>] = <NUM_LIT:0.5><EOL>firstOrder = dict()<EOL>for catIdx in range(numCategories):<EOL><INDENT>key = str([catIdx])<EOL>probs = numpy.ones(numCategories) / numCategories<EOL>if catIdx == <NUM_LIT:0> or catIdx == <NUM_LIT:4>:<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT:1>] = <NUM_LIT:1.0>    <EOL><DEDENT>firstOrder[key] = probs<EOL><DEDENT>secondOrder = dict()<EOL>for firstIdx in range(numCategories):<EOL><INDENT>for secondIdx in range(numCategories):<EOL><INDENT>key = str([firstIdx, secondIdx])<EOL>probs = numpy.ones(numCategories) / numCategories<EOL>if key == str([<NUM_LIT:0>,<NUM_LIT:1>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT:2>] = <NUM_LIT>   <EOL>probs[<NUM_LIT:3>] = <NUM_LIT>   <EOL><DEDENT>elif key == str([<NUM_LIT:4>,<NUM_LIT:1>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT:2>] = <NUM_LIT>   <EOL>probs[<NUM_LIT:3>] = <NUM_LIT>   <EOL><DEDENT>secondOrder[key] = probs<EOL><DEDENT><DEDENT>return (initProb, firstOrder, secondOrder, <NUM_LIT:3>)<EOL>", "docstring": "Generate the initial, first order, and second order transition\n    probabilities for 'model0'. For this model, we generate the following\n    set of sequences:\n\n    1-2-3   (4X)\n    1-2-4   (1X)\n    5-2-3   (1X)\n    5-2-4   (4X)\n\n\n    Parameters:\n    ----------------------------------------------------------------------\n    numCategories:      Number of categories\n    retval: (initProb, firstOrder, secondOrder, seqLen)\n              initProb:     Initial probability for each category. This is a vector\n                              of length len(categoryList).\n              firstOrder:   A dictionary of the 1st order probabilities. The key\n                              is the 1st element of the sequence, the value is\n                              the probability of each 2nd element given the first. \n              secondOrder:  A dictionary of the 2nd order probabilities. The key\n                              is the first 2 elements of the sequence, the value is\n                              the probability of each possible 3rd element given the \n                              first two. \n              seqLen:       Desired length of each sequence. The 1st element will\n                            be generated using the initProb, the 2nd element by the\n                            firstOrder table, and the 3rd and all successive \n                            elements by the secondOrder table. \n\n\n    Here is an example of some return values:\n    initProb:         [0.7, 0.2, 0.1]\n\n    firstOrder:       {'[0]': [0.3, 0.3, 0.4],\n                       '[1]': [0.3, 0.3, 0.4],\n                       '[2]': [0.3, 0.3, 0.4]}\n\n    secondOrder:      {'[0,0]': [0.3, 0.3, 0.4],\n                       '[0,1]': [0.3, 0.3, 0.4],\n                       '[0,2]': [0.3, 0.3, 0.4],\n                       '[1,0]': [0.3, 0.3, 0.4],\n                       '[1,1]': [0.3, 0.3, 0.4],\n                       '[1,2]': [0.3, 0.3, 0.4],\n                       '[2,0]': [0.3, 0.3, 0.4],\n                       '[2,1]': [0.3, 0.3, 0.4],\n                       '[2,2]': [0.3, 0.3, 0.4]}", "id": "f17616:m0"}
{"signature": "def _generateModel1(numCategories):", "body": "<EOL>initProb = numpy.zeros(numCategories)<EOL>initProb[<NUM_LIT:0>] = <NUM_LIT:0.5><EOL>initProb[<NUM_LIT:1>] = <NUM_LIT:0.5><EOL>firstOrder = dict()<EOL>for catIdx in range(numCategories):<EOL><INDENT>key = str([catIdx])<EOL>probs = numpy.ones(numCategories) / numCategories<EOL>if catIdx == <NUM_LIT:0> or catIdx == <NUM_LIT:1>:<EOL><INDENT>indices = numpy.array([<NUM_LIT:10>,<NUM_LIT:11>,<NUM_LIT:12>,<NUM_LIT>,<NUM_LIT>])<EOL>probs.fill(<NUM_LIT:0>)<EOL>probs[indices] = <NUM_LIT:1.0>    <EOL>probs /= probs.sum()<EOL><DEDENT>firstOrder[key] = probs<EOL><DEDENT>secondOrder = dict()<EOL>for firstIdx in range(numCategories):<EOL><INDENT>for secondIdx in range(numCategories):<EOL><INDENT>key = str([firstIdx, secondIdx])<EOL>probs = numpy.ones(numCategories) / numCategories<EOL>if key == str([<NUM_LIT:0>,<NUM_LIT:10>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT:15>] = <NUM_LIT:1><EOL><DEDENT>elif key == str([<NUM_LIT:0>,<NUM_LIT:11>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT:16>] = <NUM_LIT:1><EOL><DEDENT>elif key == str([<NUM_LIT:0>,<NUM_LIT:12>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT>] = <NUM_LIT:1><EOL><DEDENT>elif key == str([<NUM_LIT:0>,<NUM_LIT>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT>] = <NUM_LIT:1><EOL><DEDENT>elif key == str([<NUM_LIT:0>,<NUM_LIT>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT>] = <NUM_LIT:1><EOL><DEDENT>elif key == str([<NUM_LIT:1>,<NUM_LIT:10>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT:20>] = <NUM_LIT:1><EOL><DEDENT>elif key == str([<NUM_LIT:1>,<NUM_LIT:11>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT>] = <NUM_LIT:1><EOL><DEDENT>elif key == str([<NUM_LIT:1>,<NUM_LIT:12>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT>] = <NUM_LIT:1><EOL><DEDENT>elif key == str([<NUM_LIT:1>,<NUM_LIT>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT>] = <NUM_LIT:1><EOL><DEDENT>elif key == str([<NUM_LIT:1>,<NUM_LIT>]):<EOL><INDENT>probs.fill(<NUM_LIT:0>)<EOL>probs[<NUM_LIT>] = <NUM_LIT:1><EOL><DEDENT>secondOrder[key] = probs<EOL><DEDENT><DEDENT>return (initProb, firstOrder, secondOrder, <NUM_LIT:3>)<EOL>", "docstring": "Generate the initial, first order, and second order transition\n    probabilities for 'model1'. For this model, we generate the following\n    set of sequences:\n\n    0-10-15 (1X)\n    0-11-16 (1X)\n    0-12-17 (1X)\n    0-13-18 (1X)\n    0-14-19 (1X)\n\n    1-10-20 (1X)\n    1-11-21 (1X)\n    1-12-22 (1X)\n    1-13-23 (1X)\n    1-14-24 (1X)\n\n\n    Parameters:\n    ----------------------------------------------------------------------\n    numCategories:      Number of categories\n    retval: (initProb, firstOrder, secondOrder, seqLen)\n              initProb:     Initial probability for each category. This is a vector\n                              of length len(categoryList).\n              firstOrder:   A dictionary of the 1st order probabilities. The key\n                              is the 1st element of the sequence, the value is\n                              the probability of each 2nd element given the first. \n              secondOrder:  A dictionary of the 2nd order probabilities. The key\n                              is the first 2 elements of the sequence, the value is\n                              the probability of each possible 3rd element given the \n                              first two. \n              seqLen:       Desired length of each sequence. The 1st element will\n                            be generated using the initProb, the 2nd element by the\n                            firstOrder table, and the 3rd and all successive \n                            elements by the secondOrder table. \n\n\n    Here is an example of some return values:\n    initProb:         [0.7, 0.2, 0.1]\n\n    firstOrder:       {'[0]': [0.3, 0.3, 0.4],\n                       '[1]': [0.3, 0.3, 0.4],\n                       '[2]': [0.3, 0.3, 0.4]}\n\n    secondOrder:      {'[0,0]': [0.3, 0.3, 0.4],\n                       '[0,1]': [0.3, 0.3, 0.4],\n                       '[0,2]': [0.3, 0.3, 0.4],\n                       '[1,0]': [0.3, 0.3, 0.4],\n                       '[1,1]': [0.3, 0.3, 0.4],\n                       '[1,2]': [0.3, 0.3, 0.4],\n                       '[2,0]': [0.3, 0.3, 0.4],\n                       '[2,1]': [0.3, 0.3, 0.4],\n                       '[2,2]': [0.3, 0.3, 0.4]}", "id": "f17616:m1"}
{"signature": "def _generateFile(filename, data):", "body": "<EOL>print(\"<STR_LIT>\" % (filename))<EOL>numRecords, numFields = data.shape<EOL>fields = [('<STR_LIT>'%(i+<NUM_LIT:1>), '<STR_LIT:float>', '<STR_LIT>') for i in range(numFields)]<EOL>outFile = File(filename, fields)<EOL>for i in range(numRecords):<EOL><INDENT>outFile.write(data[i].tolist())<EOL><DEDENT>outFile.close()<EOL>", "docstring": "Parameters:\n----------------------------------------------------------------\nfilename:         name of .csv file to generate", "id": "f17617:m4"}
{"signature": "@staticmethod<EOL><INDENT>def getSchema():<DEDENT>", "body": "return TMRegionProto<EOL>", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.", "id": "f17618:c0:m12"}
{"signature": "def _initEphemerals(self):", "body": "self._sequencePos = <NUM_LIT:0><EOL>self._fpLogTPOutput = None<EOL>self.logPathOutput = None<EOL>", "docstring": "Initialize all ephemerals used by derived classes.", "id": "f17618:c0:m19"}
{"signature": "def _getAdditionalSpecs(temporalImp, kwargs={}):", "body": "typeNames = {int: '<STR_LIT>', float: '<STR_LIT>', str: '<STR_LIT>', bool: '<STR_LIT:bool>', tuple: '<STR_LIT>'}<EOL>def getArgType(arg):<EOL><INDENT>t = typeNames.get(type(arg), '<STR_LIT>')<EOL>count = <NUM_LIT:0> if t == '<STR_LIT>' else <NUM_LIT:1><EOL>if t == '<STR_LIT>':<EOL><INDENT>t = typeNames.get(type(arg[<NUM_LIT:0>]), '<STR_LIT>')<EOL>count = len(arg)<EOL><DEDENT>if t == '<STR_LIT:bool>':<EOL><INDENT>t = '<STR_LIT>'<EOL><DEDENT>return (t, count)<EOL><DEDENT>def getConstraints(arg):<EOL><INDENT>t = typeNames.get(type(arg), '<STR_LIT>')<EOL>if t == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif t == '<STR_LIT:bool>':<EOL><INDENT>return '<STR_LIT:bool>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT><DEDENT>TemporalClass = _getTPClass(temporalImp)<EOL>tArgTuples = _buildArgs(TemporalClass.__init__)<EOL>temporalSpec = {}<EOL>for argTuple in tArgTuples:<EOL><INDENT>d = dict(<EOL>description=argTuple[<NUM_LIT:1>],<EOL>accessMode='<STR_LIT>',<EOL>dataType=getArgType(argTuple[<NUM_LIT:2>])[<NUM_LIT:0>],<EOL>count=getArgType(argTuple[<NUM_LIT:2>])[<NUM_LIT:1>],<EOL>constraints=getConstraints(argTuple[<NUM_LIT:2>]))<EOL>temporalSpec[argTuple[<NUM_LIT:0>]] = d<EOL><DEDENT>temporalSpec.update(dict(<EOL>columnCount=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>'),<EOL>cellsPerColumn=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>'),<EOL>inputWidth=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>'),<EOL>predictedSegmentDecrement=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>'),<EOL>orColumnOutputs=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>'),<EOL>cellsSavePath=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>temporalImp=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>))<EOL>otherSpec = dict(<EOL>learningMode=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>defaultValue=True,<EOL>constraints='<STR_LIT:bool>'),<EOL>inferenceMode=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>defaultValue=False,<EOL>constraints='<STR_LIT:bool>'),<EOL>computePredictedActiveCellIndices=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>defaultValue=False,<EOL>constraints='<STR_LIT:bool>'),<EOL>anomalyMode=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>defaultValue=False,<EOL>constraints='<STR_LIT:bool>'),<EOL>topDownMode=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>defaultValue=False,<EOL>constraints='<STR_LIT:bool>'),<EOL>activeOutputCount=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>'),<EOL>storeDenseOutput=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>'),<EOL>logPathOutput=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>)<EOL>return temporalSpec, otherSpec<EOL>", "docstring": "Build the additional specs in three groups (for the inspector)\n\n    Use the type of the default argument to set the Spec type, defaulting\n    to 'Byte' for None and complex types\n\n    Determines the spatial parameters based on the selected implementation.\n    It defaults to TemporalMemory.\n    Determines the temporal parameters based on the temporalImp", "id": "f17618:m2"}
{"signature": "def __setstate__(self, state):", "body": "if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.storeDenseOutput = False<EOL><DEDENT>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self.computePredictedActiveCellIndices = False<EOL><DEDENT>self.__dict__.update(state)<EOL>self._loaded = True<EOL>self._initialize()<EOL>", "docstring": "Set the state of ourself from a serialized state.", "id": "f17618:c0:m18"}
{"signature": "@classmethod<EOL><INDENT>def readFromProto(cls, proto):<DEDENT>", "body": "instance = cls(proto.columnCount, proto.inputWidth, proto.cellsPerColumn)<EOL>instance.temporalImp = proto.temporalImp<EOL>instance.learningMode = proto.learningMode<EOL>instance.inferenceMode = proto.inferenceMode<EOL>instance.anomalyMode = proto.anomalyMode<EOL>instance.topDownMode = proto.topDownMode<EOL>instance.computePredictedActiveCellIndices = (<EOL>proto.computePredictedActiveCellIndices)<EOL>instance.orColumnOutputs = proto.orColumnOutputs<EOL>if instance.temporalImp == \"<STR_LIT>\":<EOL><INDENT>tmProto = proto.backtrackingTM<EOL><DEDENT>elif instance.temporalImp == \"<STR_LIT>\":<EOL><INDENT>tmProto = proto.backtrackingTMCpp<EOL><DEDENT>elif instance.temporalImp == \"<STR_LIT>\":<EOL><INDENT>tmProto = proto.temporalMemory<EOL><DEDENT>elif instance.temporalImp == \"<STR_LIT>\":<EOL><INDENT>tmProto = proto.temporalMemory<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(<EOL>\"<STR_LIT>\".format(<EOL>instance.temporalImp))<EOL><DEDENT>instance._tfdr = _getTPClass(proto.temporalImp).read(tmProto)<EOL>return instance<EOL>", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.\n\nRead state from proto object.\n\n:param proto: TMRegionProto capnproto object", "id": "f17618:c0:m14"}
{"signature": "@classmethod<EOL><INDENT>def getBaseSpec(cls):<DEDENT>", "body": "spec = dict(<EOL>description=TMRegion.__doc__,<EOL>singleNodeOnly=True,<EOL>inputs=dict(<EOL>bottomUpIn=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>required=True,<EOL>regionLevel=False,<EOL>isDefaultInput=True,<EOL>requireSplitterMap=False),<EOL>resetIn=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>required=False,<EOL>regionLevel=True,<EOL>isDefaultInput=False,<EOL>requireSplitterMap=False),<EOL>sequenceIdIn=dict(<EOL>description=\"<STR_LIT>\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>required=False,<EOL>regionLevel=True,<EOL>isDefaultInput=False,<EOL>requireSplitterMap=False),<EOL>),<EOL>outputs=dict(<EOL>bottomUpOut=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=True),<EOL>topDownOut=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>activeCells=dict(<EOL>description=\"<STR_LIT>\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>predictedActiveCells=dict(<EOL>description=\"<STR_LIT>\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>anomalyScore = dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>lrnActiveStateT = dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>),<EOL>parameters=dict(<EOL>breakPdb=dict(<EOL>description='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>breakKomodo=dict(<EOL>description='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>),<EOL>commands = {}<EOL>)<EOL>return spec<EOL>", "docstring": "Doesn't include the spatial, temporal and other parameters\n\n:returns: (dict) the base Spec for TMRegion.", "id": "f17618:c0:m5"}
{"signature": "def _compute(self, inputs, outputs):", "body": "<EOL>if self._tfdr is None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>self._conditionalBreak()<EOL>self._iterations += <NUM_LIT:1><EOL>buInputVector = inputs['<STR_LIT>']<EOL>resetSignal = False<EOL>if '<STR_LIT>' in inputs:<EOL><INDENT>assert len(inputs['<STR_LIT>']) == <NUM_LIT:1><EOL>if inputs['<STR_LIT>'][<NUM_LIT:0>] != <NUM_LIT:0>:<EOL><INDENT>self._tfdr.reset()<EOL>self._sequencePos = <NUM_LIT:0>  <EOL><DEDENT><DEDENT>if self.computePredictedActiveCellIndices:<EOL><INDENT>prevPredictedState = self._tfdr.getPredictedState().reshape(-<NUM_LIT:1>).astype('<STR_LIT>')<EOL><DEDENT>if self.anomalyMode:<EOL><INDENT>prevPredictedColumns = self._tfdr.topDownCompute().copy().nonzero()[<NUM_LIT:0>]<EOL><DEDENT>tpOutput = self._tfdr.compute(buInputVector, self.learningMode, self.inferenceMode)<EOL>self._sequencePos += <NUM_LIT:1><EOL>if self.orColumnOutputs:<EOL><INDENT>tpOutput= tpOutput.reshape(self.columnCount,<EOL>self.cellsPerColumn).max(axis=<NUM_LIT:1>)<EOL><DEDENT>if self._fpLogTPOutput:<EOL><INDENT>output = tpOutput.reshape(-<NUM_LIT:1>)<EOL>outputNZ = tpOutput.nonzero()[<NUM_LIT:0>]<EOL>outStr = \"<STR_LIT:U+0020>\".join([\"<STR_LIT>\" % int(token) for token in outputNZ])<EOL>print(output.size, outStr, file=self._fpLogTPOutput)<EOL><DEDENT>outputs['<STR_LIT>'][:] = tpOutput.flat<EOL>if self.topDownMode:<EOL><INDENT>outputs['<STR_LIT>'][:] = self._tfdr.topDownCompute().copy()<EOL><DEDENT>if self.anomalyMode:<EOL><INDENT>activeLearnCells = self._tfdr.getLearnActiveStateT()<EOL>size = activeLearnCells.shape[<NUM_LIT:0>] * activeLearnCells.shape[<NUM_LIT:1>]<EOL>outputs['<STR_LIT>'][:] = activeLearnCells.reshape(size)<EOL>activeColumns = buInputVector.nonzero()[<NUM_LIT:0>]<EOL>outputs['<STR_LIT>'][:] = anomaly.computeRawAnomalyScore(<EOL>activeColumns, prevPredictedColumns)<EOL><DEDENT>if self.computePredictedActiveCellIndices:<EOL><INDENT>activeState = self._tfdr._getActiveState().reshape(-<NUM_LIT:1>).astype('<STR_LIT>')<EOL>activeIndices = numpy.where(activeState != <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>predictedIndices= numpy.where(prevPredictedState != <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>predictedActiveIndices = numpy.intersect1d(activeIndices, predictedIndices)<EOL>outputs[\"<STR_LIT>\"].fill(<NUM_LIT:0>)<EOL>outputs[\"<STR_LIT>\"][activeIndices] = <NUM_LIT:1><EOL>outputs[\"<STR_LIT>\"].fill(<NUM_LIT:0>)<EOL>outputs[\"<STR_LIT>\"][predictedActiveIndices] = <NUM_LIT:1><EOL><DEDENT>", "docstring": "Run one iteration of TMRegion's compute", "id": "f17618:c0:m4"}
{"signature": "def _getEphemeralMembersAll(self):", "body": "return self._getEphemeralMembersBase() + self._getEphemeralMembers()<EOL>", "docstring": "Returns a concatenated list of both the standard base class\nephemeral members, as well as any additional ephemeral members\n(e.g., file handles, etc.).", "id": "f17618:c0:m22"}
{"signature": "@classmethod<EOL><INDENT>def getSpec(cls):<DEDENT>", "body": "spec = cls.getBaseSpec()<EOL>t, o = _getAdditionalSpecs(temporalImp=gDefaultTemporalImp)<EOL>spec['<STR_LIT>'].update(t)<EOL>spec['<STR_LIT>'].update(o)<EOL>return spec<EOL>", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSpec`.\n\nThe parameters collection is constructed based on the parameters specified\nby the various components (spatialSpec, temporalSpec and otherSpec)", "id": "f17618:c0:m6"}
{"signature": "def _getTPClass(temporalImp):", "body": "if temporalImp == '<STR_LIT>':<EOL><INDENT>return backtracking_tm.BacktrackingTM<EOL><DEDENT>elif temporalImp == '<STR_LIT>':<EOL><INDENT>return backtracking_tm_cpp.BacktrackingTMCPP<EOL><DEDENT>elif temporalImp == '<STR_LIT>':<EOL><INDENT>return backtracking_tm_shim.TMShim<EOL><DEDENT>elif temporalImp == '<STR_LIT>':<EOL><INDENT>return backtracking_tm_shim.TMCPPShim<EOL><DEDENT>elif temporalImp == '<STR_LIT>':<EOL><INDENT>return backtracking_tm_shim.MonitoredTMShim<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (temporalImp))<EOL><DEDENT>", "docstring": "Return the class corresponding to the given temporalImp string", "id": "f17618:m0"}
{"signature": "def getParameterArray(self, name, index, a):", "body": "p = self.getParameter(name)<EOL>if (not hasattr(p, '<STR_LIT>')):<EOL><INDENT>raise Exception(\"<STR_LIT>\" % name)<EOL><DEDENT>if len(p) >  <NUM_LIT:0>:<EOL><INDENT>a[:] = p[:]<EOL><DEDENT>", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`.", "id": "f17618:c0:m27"}
{"signature": "def _getEphemeralMembersBase(self):", "body": "return [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>]<EOL>", "docstring": "Returns list of all ephemeral members.", "id": "f17618:c0:m21"}
{"signature": "def getParameter(self, name, index=-<NUM_LIT:1>):", "body": "<EOL>return PyRegion.getParameter(self, name, index)<EOL>", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.", "id": "f17619:c0:m4"}
{"signature": "def initialize(self):", "body": "if self._sdrClassifier is None:<EOL><INDENT>self._sdrClassifier = SDRClassifierFactory.create(<EOL>steps=self.stepsList,<EOL>alpha=self.alpha,<EOL>verbosity=self.verbosity,<EOL>implementation=self.implementation,<EOL>)<EOL><DEDENT>", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.initialize`.\n\nIs called once by NuPIC before the first call to compute().\nInitializes self._sdrClassifier if it is not already initialized.", "id": "f17619:c0:m2"}
{"signature": "def getAlgorithmInstance(self):", "body": "return self._sdrClassifier<EOL>", "docstring": ":returns: (:class:`nupic.regions.sdr_classifier_region.SDRClassifierRegion`)", "id": "f17619:c0:m3"}
{"signature": "def customCompute(self, recordNum, patternNZ, classification):", "body": "<EOL>if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self._computeFlag = False<EOL><DEDENT>if self._computeFlag:<EOL><INDENT>warnings.simplefilter('<STR_LIT:error>', DeprecationWarning)<EOL>warnings.warn(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>DeprecationWarning)<EOL><DEDENT>return self._sdrClassifier.compute(recordNum,<EOL>patternNZ,<EOL>classification,<EOL>self.learningMode,<EOL>self.inferenceMode)<EOL>", "docstring": "Just return the inference value from one input sample. The actual\nlearning happens in compute() -- if, and only if learning is enabled --\nwhich is called when you run the network.\n\n.. warning:: This method is deprecated and exists only to maintain backward \n   compatibility. This method is deprecated, and will be removed. Use \n   :meth:`nupic.engine.Network.run` instead, which will call \n   :meth:`~nupic.regions.sdr_classifier_region.compute`.\n\n:param recordNum: (int) Record number of the input sample.\n:param patternNZ: (list) of the active indices from the output below\n:param classification: (dict) of the classification information:\n\n       * ``bucketIdx``: index of the encoder bucket\n       * ``actValue``:  actual value going into the encoder\n\n:returns: (dict) containing inference results, one entry for each step in\n          ``self.steps``. The key is the number of steps, the value is an\n          array containing the relative likelihood for each ``bucketIdx``\n          starting from 0.\n\n          For example:\n\n          :: \n\n            {'actualValues': [0.0, 1.0, 2.0, 3.0]\n              1 : [0.1, 0.3, 0.2, 0.7]\n              4 : [0.2, 0.4, 0.3, 0.5]}", "id": "f17619:c0:m10"}
{"signature": "@staticmethod<EOL><INDENT>def getSchema():<DEDENT>", "body": "return SDRClassifierRegionProto<EOL>", "docstring": ":returns: the pycapnp proto type that the class uses for serialization.", "id": "f17619:c0:m6"}
{"signature": "def compute(self, inputs, outputs):", "body": "<EOL>self._computeFlag = True<EOL>patternNZ = inputs[\"<STR_LIT>\"].nonzero()[<NUM_LIT:0>]<EOL>if self.learningMode:<EOL><INDENT>categories = [category for category in inputs[\"<STR_LIT>\"]<EOL>if category >= <NUM_LIT:0>]<EOL>if len(categories) > <NUM_LIT:0>:<EOL><INDENT>bucketIdxList = []<EOL>actValueList = []<EOL>for category in categories:<EOL><INDENT>bucketIdxList.append(int(category))<EOL>if \"<STR_LIT>\" not in inputs:<EOL><INDENT>actValueList.append(int(category))<EOL><DEDENT>else:<EOL><INDENT>actValueList.append(float(inputs[\"<STR_LIT>\"]))<EOL><DEDENT><DEDENT>classificationIn = {\"<STR_LIT>\": bucketIdxList,<EOL>\"<STR_LIT>\": actValueList}<EOL><DEDENT>else:<EOL><INDENT>if \"<STR_LIT>\" not in inputs:<EOL><INDENT>raise KeyError(\"<STR_LIT>\")<EOL><DEDENT>if \"<STR_LIT>\" not in inputs:<EOL><INDENT>raise KeyError(\"<STR_LIT>\")<EOL><DEDENT>classificationIn = {\"<STR_LIT>\": int(inputs[\"<STR_LIT>\"]),<EOL>\"<STR_LIT>\": float(inputs[\"<STR_LIT>\"])}<EOL><DEDENT><DEDENT>else:<EOL><INDENT>classificationIn = {\"<STR_LIT>\": <NUM_LIT:0>, \"<STR_LIT>\": <NUM_LIT:0>}<EOL><DEDENT>clResults = self._sdrClassifier.compute(recordNum=self.recordNum,<EOL>patternNZ=patternNZ,<EOL>classification=classificationIn,<EOL>learn=self.learningMode,<EOL>infer=self.inferenceMode)<EOL>if clResults is not None and len(clResults) > <NUM_LIT:0>:<EOL><INDENT>outputs['<STR_LIT>'][:len(clResults[\"<STR_LIT>\"])] =clResults[\"<STR_LIT>\"]<EOL>for step in self.stepsList:<EOL><INDENT>stepIndex = self.stepsList.index(step)<EOL>categoryOut = clResults[\"<STR_LIT>\"][clResults[step].argmax()]<EOL>outputs['<STR_LIT>'][stepIndex] = categoryOut<EOL>stepProbabilities = clResults[step]<EOL>for categoryIndex in xrange(self.maxCategoryCount):<EOL><INDENT>flatIndex = categoryIndex + stepIndex * self.maxCategoryCount<EOL>if categoryIndex < len(stepProbabilities):<EOL><INDENT>outputs['<STR_LIT>'][flatIndex] =stepProbabilities[categoryIndex]<EOL><DEDENT>else:<EOL><INDENT>outputs['<STR_LIT>'][flatIndex] = <NUM_LIT:0.0><EOL><DEDENT><DEDENT><DEDENT><DEDENT>self.recordNum += <NUM_LIT:1><EOL>", "docstring": "Process one input sample.\nThis method is called by the runtime engine.\n\n:param inputs: (dict) mapping region input names to numpy.array values\n:param outputs: (dict) mapping region output names to numpy.arrays that \n       should be populated with output values by this method", "id": "f17619:c0:m9"}
{"signature": "def _finishSphering(self):", "body": "<EOL>self._normOffset = self._samples.mean(axis=<NUM_LIT:0>) * -<NUM_LIT:1.0><EOL>self._samples += self._normOffset<EOL>variance = self._samples.var(axis=<NUM_LIT:0>)<EOL>variance[numpy.where(variance == <NUM_LIT:0.0>)] = <NUM_LIT:1.0><EOL>self._normScale  = <NUM_LIT:1.0> / numpy.sqrt(variance)<EOL>self._samples *= self._normScale<EOL>for sampleIndex in range(len(self._labels)):<EOL><INDENT>self._knn.learn(self._samples[sampleIndex],<EOL>self._labels[sampleIndex],<EOL>self._partitions[sampleIndex])<EOL><DEDENT>", "docstring": "Compute normalization constants for each feature dimension\nbased on the collected training samples.  Then normalize our\ntraining samples using these constants (so that each input\ndimension has mean and variance of zero and one, respectively.)\nThen feed these \"sphered\" training samples into the underlying\nSVM model.", "id": "f17620:c0:m29"}
{"signature": "def handleLogInput(self, inputs):", "body": "if self._tapFileIn is not None:<EOL><INDENT>for input in inputs:<EOL><INDENT>for k in range(len(input)):<EOL><INDENT>print(input[k], end='<STR_LIT:U+0020>', file=self._tapFileIn)<EOL><DEDENT>print(file=self._tapFileIn)<EOL><DEDENT><DEDENT>", "docstring": "Write inputs to output tap file.\n\n:param inputs: (iter) some inputs.", "id": "f17620:c0:m19"}
{"signature": "def getParameter(self, name, index=-<NUM_LIT:1>):", "body": "if name == \"<STR_LIT>\":<EOL><INDENT>return self._knn._numPatterns<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._getPatternMatrix()<EOL><DEDENT>elif name == \"<STR_LIT:k>\":<EOL><INDENT>return self._knn.k<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.distanceNorm<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.distanceMethod<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.distThreshold<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.binarizationThreshold<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.doBinarization<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.useSparseMemory<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.sparseThreshold<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.numWinners<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.relativeThreshold<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>v = self._knn.numSVDSamples<EOL>return v if v is not None else <NUM_LIT:0><EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>v = self._knn.numSVDDims<EOL>return v if v is not None else <NUM_LIT:0><EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>v = self._knn.fractionOfMax<EOL>return v if v is not None else <NUM_LIT:0><EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._useAuxiliary<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._justUseAuxiliary<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._doSphering<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self._knn.cellsPerCol<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>return self.maxStoredPatterns<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>return self._knn._categoryRecencyList<EOL><DEDENT>else:<EOL><INDENT>return PyRegion.getParameter(self, name, index)<EOL><DEDENT>", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getParameter`.", "id": "f17620:c0:m13"}
{"signature": "def compute(self, inputs, outputs):", "body": "<EOL>if self._useAuxiliary is None:<EOL><INDENT>self._useAuxiliary = False<EOL><DEDENT>if self._firstComputeCall:<EOL><INDENT>self._firstComputeCall = False<EOL>if self._useAuxiliary:<EOL><INDENT>if self._justUseAuxiliary == True:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT><DEDENT><DEDENT>inputVector = inputs['<STR_LIT>']<EOL>if self._useAuxiliary==True:<EOL><INDENT>auxVector = inputs['<STR_LIT>']<EOL>if auxVector.dtype != numpy.float32:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>if self._justUseAuxiliary == True:<EOL><INDENT>inputVector = inputs['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>inputVector = numpy.concatenate([inputVector, inputs['<STR_LIT>']])<EOL><DEDENT><DEDENT>self.handleLogInput([inputVector])<EOL>assert \"<STR_LIT>\" in inputs, \"<STR_LIT>\"<EOL>categories = inputs['<STR_LIT>']<EOL>if \"<STR_LIT>\" in inputs:<EOL><INDENT>assert len(inputs[\"<STR_LIT>\"]) == <NUM_LIT:1>, \"<STR_LIT>\"<EOL>partInput = inputs['<STR_LIT>']<EOL>assert len(partInput) == <NUM_LIT:1>, \"<STR_LIT>\"<EOL>partition = int(partInput[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>partition = None<EOL><DEDENT>if self.inferenceMode:<EOL><INDENT>categoriesOut = outputs['<STR_LIT>']<EOL>probabilitiesOut = outputs['<STR_LIT>']<EOL>if self._doSphering:<EOL><INDENT>inputVector = (inputVector + self._normOffset) * self._normScale<EOL><DEDENT>nPrototypes = <NUM_LIT:0><EOL>if \"<STR_LIT>\" in outputs:<EOL><INDENT>bestPrototypeIndicesOut = outputs[\"<STR_LIT>\"]<EOL>nPrototypes = len(bestPrototypeIndicesOut)<EOL><DEDENT>winner, inference, protoScores, categoryDistances =self._knn.infer(inputVector, partitionId=partition)<EOL>if not self.keepAllDistances:<EOL><INDENT>self._protoScores = protoScores<EOL><DEDENT>else:<EOL><INDENT>if self._protoScores is None:<EOL><INDENT>self._protoScores = numpy.zeros((<NUM_LIT:1>, protoScores.shape[<NUM_LIT:0>]),<EOL>protoScores.dtype)<EOL>self._protoScores[<NUM_LIT:0>,:] = protoScores<EOL>self._protoScoreCount = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>if self._protoScoreCount == self._protoScores.shape[<NUM_LIT:0>]:<EOL><INDENT>newProtoScores = numpy.zeros((self._protoScores.shape[<NUM_LIT:0>] * <NUM_LIT:2>,<EOL>self._protoScores.shape[<NUM_LIT:1>]),<EOL>self._protoScores.dtype)<EOL>newProtoScores[:self._protoScores.shape[<NUM_LIT:0>],:] = self._protoScores<EOL>self._protoScores = newProtoScores<EOL><DEDENT>self._protoScores[self._protoScoreCount,:] = protoScores<EOL>self._protoScoreCount += <NUM_LIT:1><EOL><DEDENT><DEDENT>self._categoryDistances = categoryDistances<EOL>if self.outputProbabilitiesByDist:<EOL><INDENT>scores = <NUM_LIT:1.0> - self._categoryDistances<EOL><DEDENT>else:<EOL><INDENT>scores = inference<EOL><DEDENT>total = scores.sum()<EOL>if total == <NUM_LIT:0>:<EOL><INDENT>numScores = len(scores)<EOL>probabilities = numpy.ones(numScores) / numScores<EOL><DEDENT>else:<EOL><INDENT>probabilities = scores / total<EOL><DEDENT>nout = min(len(categoriesOut), len(inference))<EOL>categoriesOut.fill(<NUM_LIT:0>)<EOL>categoriesOut[<NUM_LIT:0>:nout] = inference[<NUM_LIT:0>:nout]<EOL>probabilitiesOut.fill(<NUM_LIT:0>)<EOL>probabilitiesOut[<NUM_LIT:0>:nout] = probabilities[<NUM_LIT:0>:nout]<EOL>if self.verbosity >= <NUM_LIT:1>:<EOL><INDENT>print(\"<STR_LIT>\", categoriesOut[<NUM_LIT:0>:nout])<EOL>print(\"<STR_LIT>\", probabilitiesOut[<NUM_LIT:0>:nout])<EOL><DEDENT>if self._scanInfo is not None:<EOL><INDENT>self._scanResults = [tuple(inference[:nout])]<EOL><DEDENT>for category in categories:<EOL><INDENT>if category >= <NUM_LIT:0>:<EOL><INDENT>dims = max(int(category)+<NUM_LIT:1>, len(inference))<EOL>oldDims = len(self.confusion)<EOL>if oldDims < dims:<EOL><INDENT>confusion = numpy.zeros((dims, dims))<EOL>confusion[<NUM_LIT:0>:oldDims, <NUM_LIT:0>:oldDims] = self.confusion<EOL>self.confusion = confusion<EOL><DEDENT>self.confusion[inference.argmax(), int(category)] += <NUM_LIT:1><EOL><DEDENT><DEDENT>if nPrototypes > <NUM_LIT:1>:<EOL><INDENT>bestPrototypeIndicesOut.fill(<NUM_LIT:0>)<EOL>if categoryDistances is not None:<EOL><INDENT>indices = categoryDistances.argsort()<EOL>nout = min(len(indices), nPrototypes)<EOL>bestPrototypeIndicesOut[<NUM_LIT:0>:nout] = indices[<NUM_LIT:0>:nout]<EOL><DEDENT><DEDENT>elif nPrototypes == <NUM_LIT:1>:<EOL><INDENT>if (categoryDistances is not None) and len(categoryDistances):<EOL><INDENT>bestPrototypeIndicesOut[<NUM_LIT:0>] = categoryDistances.argmin()<EOL><DEDENT>else:<EOL><INDENT>bestPrototypeIndicesOut[<NUM_LIT:0>] = <NUM_LIT:0><EOL><DEDENT><DEDENT>self.handleLogOutput(inference)<EOL><DEDENT>if self.learningMode:<EOL><INDENT>if (self.acceptanceProbability < <NUM_LIT:1.0>) and(self._rgen.getReal64() > self.acceptanceProbability):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>for category in categories:<EOL><INDENT>if category >= <NUM_LIT:0>:<EOL><INDENT>if self._doSphering:<EOL><INDENT>self._storeSample(inputVector, category, partition)<EOL><DEDENT>else:<EOL><INDENT>self._knn.learn(inputVector, category, partition)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>self._epoch += <NUM_LIT:1><EOL>", "docstring": "Process one input sample. This method is called by the runtime engine.\n\n.. note:: the number of input categories may vary, but the array size is \n   fixed to the max number of categories allowed (by a lower region), so \n   \"unused\" indices of the input category array are filled with -1s.\n\nTODO: confusion matrix does not support multi-label classification\n\n:param inputs: (dict) mapping region input names to numpy.array values\n:param outputs: (dict) mapping region output names to numpy.arrays that \n       should be populated with output values by this method", "id": "f17620:c0:m22"}
{"signature": "def _finishLearning(self):", "body": "if self._doSphering:<EOL><INDENT>self._finishSphering()<EOL><DEDENT>self._knn.finishLearning()<EOL>self._accuracy = None<EOL>", "docstring": "Does nothing. Kept here for API compatibility", "id": "f17620:c0:m28"}
{"signature": "@classmethod<EOL><INDENT>def getSpec(cls):<DEDENT>", "body": "ns = dict(<EOL>description=KNNClassifierRegion.__doc__,<EOL>singleNodeOnly=True,<EOL>inputs=dict(<EOL>categoryIn=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>required=True,<EOL>regionLevel=True,<EOL>isDefaultInput=False,<EOL>requireSplitterMap=False),<EOL>bottomUpIn=dict(<EOL>description='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>required=True,<EOL>regionLevel=False,<EOL>isDefaultInput=True,<EOL>requireSplitterMap=False),<EOL>partitionIn=dict(<EOL>description='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>required=True,<EOL>regionLevel=True,<EOL>isDefaultInput=False,<EOL>requireSplitterMap=False),<EOL>auxDataIn=dict(<EOL>description='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>required=False,<EOL>regionLevel=True,<EOL>isDefaultInput=False,<EOL>requireSplitterMap=False)<EOL>),<EOL>outputs=dict(<EOL>categoriesOut=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=True),<EOL>bestPrototypeIndices=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>categoryProbabilitiesOut=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=True),<EOL>),<EOL>parameters=dict(<EOL>learningMode=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:1>,<EOL>accessMode='<STR_LIT>'),<EOL>inferenceMode=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>acceptanceProbability=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:1.0>,<EOL>accessMode='<STR_LIT>'), <EOL>confusion=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:2>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=None,<EOL>accessMode='<STR_LIT>'),<EOL>activeOutputCount=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>categoryCount=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=None,<EOL>accessMode='<STR_LIT>'),<EOL>patternCount=dict(<EOL>description='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=None,<EOL>accessMode='<STR_LIT>'),<EOL>patternMatrix=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=None,<EOL>accessMode='<STR_LIT>'),<EOL>k=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:1>,<EOL>accessMode='<STR_LIT>'),<EOL>maxCategoryCount=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:2>,<EOL>accessMode='<STR_LIT>'),<EOL>distanceNorm=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT>,<EOL>accessMode='<STR_LIT>'),<EOL>distanceMethod=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType=\"<STR_LIT>\",<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>defaultValue='<STR_LIT>',<EOL>accessMode='<STR_LIT>'),<EOL>outputProbabilitiesByDist=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>distThreshold=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0.0>,<EOL>accessMode='<STR_LIT>'),<EOL>inputThresh=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0.5>,<EOL>accessMode='<STR_LIT>'),<EOL>doBinarization=dict(<EOL>description='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>useSparseMemory=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:1>,<EOL>accessMode='<STR_LIT>'),<EOL>minSparsity=dict(<EOL>description=\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0.0>,<EOL>accessMode='<STR_LIT>'),<EOL>sparseThreshold=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0.0>,<EOL>accessMode='<STR_LIT>'),<EOL>relativeThreshold=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>winnerCount=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>doSphering=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>SVDSampleCount=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>SVDDimCount=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=-<NUM_LIT:1>,<EOL>accessMode='<STR_LIT>'),<EOL>fractionOfMax=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>useAuxiliary=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>justUseAuxiliary=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>verbosity=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0> ,<EOL>accessMode='<STR_LIT>'),<EOL>keepAllDistances=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=None,<EOL>accessMode='<STR_LIT>'),<EOL>replaceDuplicates=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=None,<EOL>accessMode='<STR_LIT>'),<EOL>cellsPerCol=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>maxStoredPatterns=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>',<EOL>defaultValue=-<NUM_LIT:1>,<EOL>accessMode='<STR_LIT>'),<EOL>),<EOL>commands=dict()<EOL>)<EOL>return ns<EOL>", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.", "id": "f17620:c0:m0"}
{"signature": "def removeCategory(self, categoryToRemove):", "body": "return self._knn.removeCategory(categoryToRemove)<EOL>", "docstring": "Removes a category.\n\n:param categoryToRemove: (string) label to remove", "id": "f17620:c0:m24"}
{"signature": "def getAlgorithmInstance(self):", "body": "return self._knn<EOL>", "docstring": ":returns: (:class:`~nupic.algorithms.knn_classifier.KNNClassifier`)", "id": "f17620:c0:m12"}
{"signature": "def setParameter(self, name, index, value):", "body": "if name == \"<STR_LIT>\":<EOL><INDENT>self.learningMode = bool(int(value))<EOL>self._epoch = <NUM_LIT:0><EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>self._epoch = <NUM_LIT:0><EOL>if int(value) and not self.inferenceMode:<EOL><INDENT>self._finishLearning()<EOL><DEDENT>self.inferenceMode = bool(int(value))<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>self._knn.distanceNorm = value<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>self._knn.distanceMethod = value<EOL><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>self.keepAllDistances = bool(value)<EOL>if not self.keepAllDistances:<EOL><INDENT>if self._protoScores is not None and self._protoScores.shape[<NUM_LIT:0>] > <NUM_LIT:1>:<EOL><INDENT>self._protoScores = self._protoScores[-<NUM_LIT:1>,:]<EOL><DEDENT>if self._protoScores is not None:<EOL><INDENT>self._protoScoreCount = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>self._protoScoreCount = <NUM_LIT:0><EOL><DEDENT><DEDENT><DEDENT>elif name == \"<STR_LIT>\":<EOL><INDENT>self.verbosity = value<EOL>self._knn.verbosity = value<EOL><DEDENT>else:<EOL><INDENT>return PyRegion.setParameter(self, name, index, value)<EOL><DEDENT>", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.setParameter`.", "id": "f17620:c0:m14"}
{"signature": "def _getEphemeralAttributes(self):", "body": "return ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>']<EOL>", "docstring": "List of attributes to not save with serialized state.", "id": "f17620:c0:m2"}
{"signature": "def getOutputElementCount(self, name):", "body": "if name == '<STR_LIT>':<EOL><INDENT>return self.maxCategoryCount<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>return self.maxCategoryCount<EOL><DEDENT>elif name == '<STR_LIT>':<EOL><INDENT>return self._bestPrototypeIndexCount if self._bestPrototypeIndexCount else <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>' + name)<EOL><DEDENT>", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.", "id": "f17620:c0:m31"}
{"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()<EOL>state['<STR_LIT>'] = self._knn.__getstate__()<EOL>del state['<STR_LIT>']<EOL>for field in self._getEphemeralAttributes():<EOL><INDENT>del state[field]<EOL><DEDENT>return state<EOL>", "docstring": "Get serializable state.", "id": "f17620:c0:m5"}
{"signature": "def enableTap(self, tapPath):", "body": "self._tapFileIn = open(tapPath + '<STR_LIT>', '<STR_LIT:w>')<EOL>self._tapFileOut = open(tapPath + '<STR_LIT>', '<STR_LIT:w>')<EOL>", "docstring": "Begin writing output tap files.\n\n:param tapPath: (string) base name of the output tap files to write.", "id": "f17620:c0:m17"}
{"signature": "def handleLogOutput(self, output):", "body": "<EOL>if self._tapFileOut is not None:<EOL><INDENT>for k in range(len(output)):<EOL><INDENT>print(output[k], end='<STR_LIT:U+0020>', file=self._tapFileOut)<EOL><DEDENT>print(file=self._tapFileOut)<EOL><DEDENT>", "docstring": "Write outputs to output tap file.\n\n:param outputs: (iter) some outputs.", "id": "f17620:c0:m20"}
{"signature": "def __setstate__(self, state):", "body": "if '<STR_LIT:version>' not in state:<EOL><INDENT>self.__dict__.update(state)<EOL><DEDENT>elif state['<STR_LIT:version>'] == <NUM_LIT:1>:<EOL><INDENT>if \"<STR_LIT>\" in state:<EOL><INDENT>state.pop(\"<STR_LIT>\")<EOL><DEDENT>knnState = state['<STR_LIT>']<EOL>del state['<STR_LIT>']<EOL>self.__dict__.update(state)<EOL>self._initEphemerals()<EOL>self._knn.__setstate__(knnState)<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>self.version = KNNClassifierRegion.__VERSION__<EOL>", "docstring": "Set state from serialized state.", "id": "f17620:c0:m4"}
{"signature": "def invariant(self):", "body": "<EOL>assert isinstance(self.description, str)<EOL>assert isinstance(self.singleNodeOnly, bool)<EOL>assert isinstance(self.inputs, dict)<EOL>assert isinstance(self.outputs, dict)<EOL>assert isinstance(self.parameters, dict)<EOL>assert isinstance(self.commands, dict)<EOL>hasDefaultInput = False<EOL>for k, v in self.inputs.items():<EOL><INDENT>assert isinstance(k, str)<EOL>assert isinstance(v, InputSpec)<EOL>v.invariant()<EOL>if v.isDefaultInput:<EOL><INDENT>assert not hasDefaultInput<EOL>hasDefaultInput = True<EOL><DEDENT><DEDENT>hasDefaultOutput = False<EOL>for k, v in self.outputs.items():<EOL><INDENT>assert isinstance(k, str)<EOL>assert isinstance(v, OutputSpec)<EOL>v.invariant()<EOL>if v.isDefaultOutput:<EOL><INDENT>assert not hasDefaultOutput<EOL>hasDefaultOutput = True<EOL><DEDENT><DEDENT>for k, v in self.parameters.items():<EOL><INDENT>assert isinstance(k, str)<EOL>assert isinstance(v, ParameterSpec)<EOL>v.invariant()<EOL><DEDENT>for k, v in self.commands.items():<EOL><INDENT>assert isinstance(k, str)<EOL>assert isinstance(v, CommandSpec)<EOL>v.invariant()<EOL><DEDENT>", "docstring": "Verify the validity of the node spec object\n\n        The type of each sub-object is verified and then\n        the validity of each node spec item is verified by calling\n        it invariant() method. It also makes sure that there is at most\n        one default input and one default output.", "id": "f17621:c4:m1"}
{"signature": "def getDefaultSPImp():", "body": "return '<STR_LIT>'<EOL>", "docstring": "Return the default spatial pooler implementation for this region.", "id": "f17623:m0"}
{"signature": "def _doTopDownInfer(self, topDownInput = None):", "body": "return None, None<EOL>", "docstring": "Do one iteration of top-down inference.\n\nParameters:\n--------------------------------------------\ntdInput:      Top-down input\n\nretval:     (spatialTopDownOut, temporalTopDownOut)\n              spatialTopDownOut is the top down output computed only from the SP,\n                using it's current bottom-up output.\n              temporalTopDownOut is the top down output computed from the topDown in\n               of the level above us.", "id": "f17623:c0:m7"}
{"signature": "def _getEphemeralMembersAll(self):", "body": "return self._getEphemeralMembersBase() + self._getEphemeralMembers()<EOL>", "docstring": "Returns a concatenated list of both the standard base class\nephemeral members, as well as any additional ephemeral members\n(e.g., file handles, etc.).", "id": "f17623:c0:m21"}
{"signature": "@classmethod<EOL><INDENT>def getBaseSpec(cls):<DEDENT>", "body": "spec = dict(<EOL>description=SPRegion.__doc__,<EOL>singleNodeOnly=True,<EOL>inputs=dict(<EOL>bottomUpIn=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>required=True,<EOL>regionLevel=False,<EOL>isDefaultInput=True,<EOL>requireSplitterMap=False),<EOL>resetIn=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>required=False,<EOL>regionLevel=True,<EOL>isDefaultInput=False,<EOL>requireSplitterMap=False),<EOL>topDownIn=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>required = False,<EOL>regionLevel=True,<EOL>isDefaultInput=False,<EOL>requireSplitterMap=False),<EOL>sequenceIdIn=dict(<EOL>description=\"<STR_LIT>\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>required=False,<EOL>regionLevel=True,<EOL>isDefaultInput=False,<EOL>requireSplitterMap=False),<EOL>),<EOL>outputs=dict(<EOL>bottomUpOut=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=True),<EOL>topDownOut=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>spatialTopDownOut = dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>temporalTopDownOut = dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>anomalyScore = dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>regionLevel=True,<EOL>isDefaultOutput=False),<EOL>),<EOL>parameters=dict(<EOL>breakPdb=dict(<EOL>description='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>breakKomodo=dict(<EOL>description='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>',<EOL>defaultValue=<NUM_LIT:0>,<EOL>accessMode='<STR_LIT>'),<EOL>),<EOL>)<EOL>return spec<EOL>", "docstring": "Doesn't include the spatial, temporal and other parameters\n\n:returns: (dict) The base Spec for SPRegion.", "id": "f17623:c0:m8"}
{"signature": "def getSPClass(spatialImp):", "body": "if spatialImp == '<STR_LIT>':<EOL><INDENT>return PYSpatialPooler<EOL><DEDENT>elif spatialImp == '<STR_LIT>':<EOL><INDENT>return CPPSpatialPooler<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (spatialImp))<EOL><DEDENT>", "docstring": "Return the class corresponding to the given spatialImp string", "id": "f17623:m1"}
{"signature": "def initialize(self):", "body": "<EOL>self._spatialPoolerOutput = numpy.zeros(self.columnCount,<EOL>dtype=GetNTAReal())<EOL>self._spatialPoolerInput = numpy.zeros((<NUM_LIT:1>, self.inputWidth),<EOL>dtype=GetNTAReal())<EOL>self._allocateSpatialFDR(None)<EOL>", "docstring": "Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.initialize`.", "id": "f17623:c0:m2"}
{"signature": "def compute(self, inputs, outputs):", "body": "<EOL>if False and self.learningModeand self._iterations > <NUM_LIT:0> and self._iterations <= <NUM_LIT:10>:<EOL><INDENT>import hotshot<EOL>if self._iterations == <NUM_LIT:10>:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>stats = hotshot.stats.load(\"<STR_LIT>\")<EOL>stats.strip_dirs()<EOL>stats.sort_stats('<STR_LIT:time>', '<STR_LIT>')<EOL>stats.print_stats()<EOL><DEDENT>if self._profileObj is None:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>if os.path.exists('<STR_LIT>'):<EOL><INDENT>os.remove('<STR_LIT>')<EOL><DEDENT>self._profileObj = hotshot.Profile(\"<STR_LIT>\", <NUM_LIT:1>, <NUM_LIT:1>)<EOL><DEDENT>self._profileObj.runcall(self._compute, *[inputs, outputs])<EOL><DEDENT>else:<EOL><INDENT>self._compute(inputs, outputs)<EOL><DEDENT>", "docstring": "Run one iteration, profiling it if requested.\n\n:param inputs: (dict) mapping region input names to numpy.array values\n:param outputs: (dict) mapping region output names to numpy.arrays that \n       should be populated with output values by this method", "id": "f17623:c0:m4"}
{"signature": "def _doBottomUpCompute(self, rfInput, resetSignal):", "body": "<EOL>self._conditionalBreak()<EOL>self._spatialPoolerInput = rfInput.reshape(-<NUM_LIT:1>)<EOL>assert(rfInput.shape[<NUM_LIT:0>] == <NUM_LIT:1>)<EOL>inputVector = numpy.array(rfInput[<NUM_LIT:0>]).astype('<STR_LIT>')<EOL>outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('<STR_LIT>')<EOL>self._sfdr.compute(inputVector, self.learningMode, outputVector)<EOL>self._spatialPoolerOutput[:] = outputVector[:]<EOL>if self._fpLogSP:<EOL><INDENT>output = self._spatialPoolerOutput.reshape(-<NUM_LIT:1>)<EOL>outputNZ = output.nonzero()[<NUM_LIT:0>]<EOL>outStr = \"<STR_LIT:U+0020>\".join([\"<STR_LIT>\" % int(token) for token in outputNZ])<EOL>print(output.size, outStr, file=self._fpLogSP)<EOL><DEDENT>if self._fpLogSPInput:<EOL><INDENT>output = rfInput.reshape(-<NUM_LIT:1>)<EOL>outputNZ = output.nonzero()[<NUM_LIT:0>]<EOL>outStr = \"<STR_LIT:U+0020>\".join([\"<STR_LIT>\" % int(token) for token in outputNZ])<EOL>print(output.size, outStr, file=self._fpLogSPInput)<EOL><DEDENT>return self._spatialPoolerOutput<EOL>", "docstring": "Do one iteration of inference and/or learning and return the result\n\nParameters:\n--------------------------------------------\nrfInput:      Input vector. Shape is: (1, inputVectorLen).\nresetSignal:  True if reset is asserted", "id": "f17623:c0:m6"}
{"signature": "def _getAdditionalSpecs(spatialImp, kwargs={}):", "body": "typeNames = {int: '<STR_LIT>', float: '<STR_LIT>', str: '<STR_LIT>', bool: '<STR_LIT:bool>', tuple: '<STR_LIT>'}<EOL>def getArgType(arg):<EOL><INDENT>t = typeNames.get(type(arg), '<STR_LIT>')<EOL>count = <NUM_LIT:0> if t == '<STR_LIT>' else <NUM_LIT:1><EOL>if t == '<STR_LIT>':<EOL><INDENT>t = typeNames.get(type(arg[<NUM_LIT:0>]), '<STR_LIT>')<EOL>count = len(arg)<EOL><DEDENT>if t == '<STR_LIT:bool>':<EOL><INDENT>t = '<STR_LIT>'<EOL><DEDENT>return (t, count)<EOL><DEDENT>def getConstraints(arg):<EOL><INDENT>t = typeNames.get(type(arg), '<STR_LIT>')<EOL>if t == '<STR_LIT>':<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif t == '<STR_LIT:bool>':<EOL><INDENT>return '<STR_LIT:bool>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT><DEDENT>SpatialClass = getSPClass(spatialImp)<EOL>sArgTuples = _buildArgs(SpatialClass.__init__)<EOL>spatialSpec = {}<EOL>for argTuple in sArgTuples:<EOL><INDENT>d = dict(<EOL>description=argTuple[<NUM_LIT:1>],<EOL>accessMode='<STR_LIT>',<EOL>dataType=getArgType(argTuple[<NUM_LIT:2>])[<NUM_LIT:0>],<EOL>count=getArgType(argTuple[<NUM_LIT:2>])[<NUM_LIT:1>],<EOL>constraints=getConstraints(argTuple[<NUM_LIT:2>]))<EOL>spatialSpec[argTuple[<NUM_LIT:0>]] = d<EOL><DEDENT>spatialSpec.update(dict(<EOL>columnCount=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>'),<EOL>inputWidth=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>'),<EOL>spInputNonZeros=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>spOutputNonZeros=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>spOverlapDistribution=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>sparseCoincidenceMatrix=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>denseOutput=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>spLearningStatsStr=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>spatialImp=dict(<EOL>description=\"\"\"<STR_LIT>\"\"\",<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>))<EOL>otherSpec = dict(<EOL>learningMode=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>'),<EOL>inferenceMode=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>'),<EOL>anomalyMode=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>'),<EOL>topDownMode=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT:bool>'),<EOL>activeOutputCount=dict(<EOL>description='<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:1>,<EOL>constraints='<STR_LIT>'),<EOL>logPathInput=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>logPathOutput=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>logPathOutputDense=dict(<EOL>description='<STR_LIT>'<EOL>'<STR_LIT>',<EOL>accessMode='<STR_LIT>',<EOL>dataType='<STR_LIT>',<EOL>count=<NUM_LIT:0>,<EOL>constraints='<STR_LIT>'),<EOL>)<EOL>return spatialSpec, otherSpec<EOL>", "docstring": "Build the additional specs in three groups (for the inspector)\n\n    Use the type of the default argument to set the Spec type, defaulting\n    to 'Byte' for None and complex types\n\n    Determines the spatial parameters based on the selected implementation.\n    It defaults to SpatialPooler.", "id": "f17623:m3"}
{"signature": "def __setstate__(self, state):", "body": "self.__dict__.update(state)<EOL>self._loaded = True<EOL>if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self.SpatialClass = self._sfdr.__class__<EOL><DEDENT>self._initializeEphemeralMembers()<EOL>self._allocateSpatialFDR(None)<EOL>", "docstring": "Set the state of ourself from a serialized state.", "id": "f17623:c0:m17"}
{"signature": "def _getEphemeralMembersBase(self):", "body": "return [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>]<EOL>", "docstring": "Returns list of all ephemeral members.", "id": "f17623:c0:m20"}
{"signature": "def _labelToCategoryNumber(self, label):", "body": "if label not in self.saved_categories:<EOL><INDENT>self.saved_categories.append(label)<EOL><DEDENT>return pow(<NUM_LIT:2>, self.saved_categories.index(label))<EOL>", "docstring": "Since the KNN Classifier stores categories as numbers, we must store each\nlabel as a number. This method converts from a label to a unique number.\nEach label is assigned a unique bit so multiple labels may be assigned to\na single record.", "id": "f17624:c0:m14"}
{"signature": "def getLabels(self, start=None, end=None):", "body": "if len(self._recordsCache) == <NUM_LIT:0>:<EOL><INDENT>return {<EOL>'<STR_LIT>': False,<EOL>'<STR_LIT>': []<EOL>}<EOL><DEDENT>try:<EOL><INDENT>start = int(start)<EOL><DEDENT>except Exception:<EOL><INDENT>start = <NUM_LIT:0><EOL><DEDENT>try:<EOL><INDENT>end = int(end)<EOL><DEDENT>except Exception:<EOL><INDENT>end = self._recordsCache[-<NUM_LIT:1>].ROWID<EOL><DEDENT>if end <= start:<EOL><INDENT>raise HTMPredictionModelInvalidRangeError(\"<STR_LIT>\",<EOL>debugInfo={<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': start,<EOL>'<STR_LIT>': end<EOL>},<EOL>'<STR_LIT>': len(self._recordsCache)<EOL>})<EOL><DEDENT>results = {<EOL>'<STR_LIT>': False,<EOL>'<STR_LIT>': []<EOL>}<EOL>ROWIDX = numpy.array(<EOL>self._knnclassifier.getParameter('<STR_LIT>'))<EOL>validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[<NUM_LIT:0>].tolist()<EOL>categories = self._knnclassifier.getCategoryList()<EOL>for idx in validIdx:<EOL><INDENT>row = dict(<EOL>ROWID=int(ROWIDX[idx]),<EOL>labels=self._categoryToLabelList(categories[idx]))<EOL>results['<STR_LIT>'].append(row)<EOL><DEDENT>return results<EOL>", "docstring": "Get the labels on classified points within range start to end. Not inclusive\nof end.\n\n:returns: (dict) with format:\n\n  ::\n\n    {\n      'isProcessing': boolean,\n      'recordLabels': list of results\n    }\n\n  ``isProcessing`` - currently always false as recalculation blocks; used if\n  reprocessing of records is still being performed;\n\n  Each item in ``recordLabels`` is of format:\n\n  ::\n\n    {\n      'ROWID': id of the row,\n      'labels': list of strings\n    }", "id": "f17624:c0:m18"}
{"signature": "def getLabelResults(self):", "body": "return self.labelResults<EOL>", "docstring": "Get the labels of the previously computed record.\n\n:returns: (list) of strings representing the classification labels", "id": "f17624:c0:m6"}
{"signature": "def compute(self, inputs, outputs):", "body": "record = self._constructClassificationRecord(inputs)<EOL>if record.ROWID >= self.getParameter('<STR_LIT>'):<EOL><INDENT>self._classifyState(record)<EOL><DEDENT>self._recordsCache.append(record)<EOL>while len(self._recordsCache) > self.cacheSize:<EOL><INDENT>self._recordsCache.pop(<NUM_LIT:0>)<EOL><DEDENT>self.labelResults = record.anomalyLabel<EOL>self._iteration += <NUM_LIT:1><EOL>", "docstring": "Process one input sample.\nThis method is called by the runtime engine.", "id": "f17624:c0:m5"}
{"signature": "def getOutputElementCount(self, name):", "body": "if name == '<STR_LIT>':<EOL><INDENT>return self._maxLabelOutputs<EOL><DEDENT>else:<EOL><INDENT>raise Exception(\"<STR_LIT>\")<EOL><DEDENT>", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getOutputElementCount`.", "id": "f17624:c0:m27"}
{"signature": "def _deleteRangeFromKNN(self, start=<NUM_LIT:0>, end=None):", "body": "prototype_idx = numpy.array(<EOL>self._knnclassifier.getParameter('<STR_LIT>'))<EOL>if end is None:<EOL><INDENT>end = prototype_idx.max() + <NUM_LIT:1><EOL><DEDENT>idsIdxToDelete = numpy.logical_and(prototype_idx >= start,<EOL>prototype_idx < end)<EOL>idsToDelete = prototype_idx[idsIdxToDelete]<EOL>nProtos = self._knnclassifier._knn._numPatterns<EOL>self._knnclassifier._knn.removeIds(idsToDelete.tolist())<EOL>assert self._knnclassifier._knn._numPatterns == nProtos - len(idsToDelete)<EOL>", "docstring": "Removes any stored records within the range from start to\nend. Noninclusive of end.\n\nparameters\n------------\nstart - integer representing the ROWID of the start of the deletion range,\nend - integer representing the ROWID of the end of the deletion range,\n  if None, it will default to end.", "id": "f17624:c0:m12"}
{"signature": "def addLabel(self, start, end, labelName):", "body": "if len(self._recordsCache) == <NUM_LIT:0>:<EOL><INDENT>raise HTMPredictionModelInvalidRangeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>try:<EOL><INDENT>start = int(start)<EOL><DEDENT>except Exception:<EOL><INDENT>start = <NUM_LIT:0><EOL><DEDENT>try:<EOL><INDENT>end = int(end)<EOL><DEDENT>except Exception:<EOL><INDENT>end = int(self._recordsCache[-<NUM_LIT:1>].ROWID)<EOL><DEDENT>startID = self._recordsCache[<NUM_LIT:0>].ROWID<EOL>clippedStart = max(<NUM_LIT:0>, start - startID)<EOL>clippedEnd = max(<NUM_LIT:0>, min( len( self._recordsCache) , end - startID))<EOL>if clippedEnd <= clippedStart:<EOL><INDENT>raise HTMPredictionModelInvalidRangeError(\"<STR_LIT>\",<EOL>debugInfo={<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': start,<EOL>'<STR_LIT>': end<EOL>},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': clippedStart,<EOL>'<STR_LIT>': clippedEnd<EOL>},<EOL>'<STR_LIT>': {<EOL>'<STR_LIT>': startID,<EOL>'<STR_LIT>': self._recordsCache[len(self._recordsCache)-<NUM_LIT:1>].ROWID<EOL>},<EOL>'<STR_LIT>': len(self._recordsCache)<EOL>})<EOL><DEDENT>for state in self._recordsCache[clippedStart:clippedEnd]:<EOL><INDENT>if labelName not in state.anomalyLabel:<EOL><INDENT>state.anomalyLabel.append(labelName)<EOL>state.setByUser = True<EOL>self._addRecordToKNN(state)<EOL><DEDENT><DEDENT>assert len(self.saved_categories) > <NUM_LIT:0><EOL>for state in self._recordsCache[clippedEnd:]:<EOL><INDENT>self._classifyState(state)<EOL><DEDENT>", "docstring": "Add the label labelName to each record with record ROWID in range from\n``start`` to ``end``, noninclusive of end.\n\nThis will recalculate all points from end to the last record stored in the\ninternal cache of this classifier.\n\n:param start: (int) start index \n:param end: (int) end index (noninclusive)\n:param labelName: (string) label name", "id": "f17624:c0:m19"}
{"signature": "def _classifyState(self, state):", "body": "<EOL>if state.ROWID < self.getParameter('<STR_LIT>'):<EOL><INDENT>if not state.setByUser:<EOL><INDENT>state.anomalyLabel = []<EOL>self._deleteRecordsFromKNN([state])<EOL><DEDENT>return<EOL><DEDENT>label = KNNAnomalyClassifierRegion.AUTO_THRESHOLD_CLASSIFIED_LABEL<EOL>autoLabel = label + KNNAnomalyClassifierRegion.AUTO_TAG<EOL>newCategory = self._recomputeRecordFromKNN(state)<EOL>labelList = self._categoryToLabelList(newCategory)<EOL>if state.setByUser:<EOL><INDENT>if label in state.anomalyLabel:<EOL><INDENT>state.anomalyLabel.remove(label)<EOL><DEDENT>if autoLabel in state.anomalyLabel:<EOL><INDENT>state.anomalyLabel.remove(autoLabel)<EOL><DEDENT>labelList.extend(state.anomalyLabel)<EOL><DEDENT>if state.anomalyScore >= self.getParameter('<STR_LIT>'):<EOL><INDENT>labelList.append(label)<EOL><DEDENT>elif label in labelList:<EOL><INDENT>ind = labelList.index(label)<EOL>labelList[ind] = autoLabel<EOL><DEDENT>labelList = list(set(labelList))<EOL>if label in labelList and autoLabel in labelList:<EOL><INDENT>labelList.remove(autoLabel)<EOL><DEDENT>if state.anomalyLabel == labelList:<EOL><INDENT>return<EOL><DEDENT>state.anomalyLabel = labelList<EOL>if state.anomalyLabel == []:<EOL><INDENT>self._deleteRecordsFromKNN([state])<EOL><DEDENT>else:<EOL><INDENT>self._addRecordToKNN(state)<EOL><DEDENT>", "docstring": "Reclassifies given state.", "id": "f17624:c0:m8"}
{"signature": "def _recomputeRecordFromKNN(self, record):", "body": "inputs = {<EOL>\"<STR_LIT>\": [None],<EOL>\"<STR_LIT>\": self._getStateAnomalyVector(record),<EOL>}<EOL>outputs = {\"<STR_LIT>\": numpy.zeros((<NUM_LIT:1>,)),<EOL>\"<STR_LIT>\":numpy.zeros((<NUM_LIT:1>,)),<EOL>\"<STR_LIT>\":numpy.zeros((<NUM_LIT:1>,))}<EOL>classifier_indexes = numpy.array(<EOL>self._knnclassifier.getParameter('<STR_LIT>'))<EOL>valid_idx = numpy.where(<EOL>(classifier_indexes >= self.getParameter('<STR_LIT>')) &<EOL>(classifier_indexes < record.ROWID)<EOL>)[<NUM_LIT:0>].tolist()<EOL>if len(valid_idx) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>self._knnclassifier.setParameter('<STR_LIT>', None, True)<EOL>self._knnclassifier.setParameter('<STR_LIT>', None, False)<EOL>self._knnclassifier.compute(inputs, outputs)<EOL>self._knnclassifier.setParameter('<STR_LIT>', None, True)<EOL>classifier_distances = self._knnclassifier.getLatestDistances()<EOL>valid_distances = classifier_distances[valid_idx]<EOL>if valid_distances.min() <= self._classificationMaxDist:<EOL><INDENT>classifier_indexes_prev = classifier_indexes[valid_idx]<EOL>rowID = classifier_indexes_prev[valid_distances.argmin()]<EOL>indexID = numpy.where(classifier_indexes == rowID)[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>category = self._knnclassifier.getCategoryList()[indexID]<EOL>return category<EOL><DEDENT>return None<EOL>", "docstring": "returns the classified labeling of record", "id": "f17624:c0:m13"}
{"signature": "def _addRecordToKNN(self, record):", "body": "knn = self._knnclassifier._knn<EOL>prototype_idx = self._knnclassifier.getParameter('<STR_LIT>')<EOL>category = self._labelListToCategoryNumber(record.anomalyLabel)<EOL>if record.ROWID in prototype_idx:<EOL><INDENT>knn.prototypeSetCategory(record.ROWID, category)<EOL>return<EOL><DEDENT>pattern = self._getStateAnomalyVector(record)<EOL>rowID = record.ROWID<EOL>knn.learn(pattern, category, rowID=rowID)<EOL>", "docstring": "Adds the record to the KNN classifier.", "id": "f17624:c0:m10"}
{"signature": "def _constructClassificationRecord(self, inputs):", "body": "<EOL>allSPColumns = inputs[\"<STR_LIT>\"]<EOL>activeSPColumns = allSPColumns.nonzero()[<NUM_LIT:0>]<EOL>score = anomaly.computeRawAnomalyScore(activeSPColumns,<EOL>self._prevPredictedColumns)<EOL>spSize = len(allSPColumns)<EOL>allTPCells = inputs['<STR_LIT>']<EOL>tpSize = len(inputs['<STR_LIT>'])<EOL>classificationVector = numpy.array([])<EOL>if self.classificationVectorType == <NUM_LIT:1>:<EOL><INDENT>classificationVector = numpy.zeros(tpSize)<EOL>activeCellMatrix = inputs[\"<STR_LIT>\"].reshape(tpSize, <NUM_LIT:1>)<EOL>activeCellIdx = numpy.where(activeCellMatrix > <NUM_LIT:0>)[<NUM_LIT:0>]<EOL>if activeCellIdx.shape[<NUM_LIT:0>] > <NUM_LIT:0>:<EOL><INDENT>classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = <NUM_LIT:1><EOL><DEDENT><DEDENT>elif self.classificationVectorType == <NUM_LIT:2>:<EOL><INDENT>classificationVector = numpy.zeros(spSize+spSize)<EOL>if activeSPColumns.shape[<NUM_LIT:0>] > <NUM_LIT:0>:<EOL><INDENT>classificationVector[activeSPColumns] = <NUM_LIT:1.0><EOL><DEDENT>errorColumns = numpy.setdiff1d(self._prevPredictedColumns,<EOL>activeSPColumns)<EOL>if errorColumns.shape[<NUM_LIT:0>] > <NUM_LIT:0>:<EOL><INDENT>errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +<EOL>spSize )<EOL>classificationVector[errorColumnIndexes] = <NUM_LIT:1.0><EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise TypeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (self.classificationVectorType))<EOL><DEDENT>numPredictedCols = len(self._prevPredictedColumns)<EOL>predictedColumns = allTPCells.nonzero()[<NUM_LIT:0>]<EOL>self._prevPredictedColumns = copy.deepcopy(predictedColumns)<EOL>if self._anomalyVectorLength is None:<EOL><INDENT>self._anomalyVectorLength = len(classificationVector)<EOL><DEDENT>result = _CLAClassificationRecord(<EOL>ROWID=self._iteration, <EOL>anomalyScore=score,<EOL>anomalyVector=classificationVector.nonzero()[<NUM_LIT:0>].tolist(),<EOL>anomalyLabel=[]<EOL>)<EOL>return result<EOL>", "docstring": "Construct a _HTMClassificationRecord based on the state of the model\npassed in through the inputs.\n\nTypes for self.classificationVectorType:\n  1 - TM active cells in learn state\n  2 - SP columns concatenated with error from TM column predictions and SP", "id": "f17624:c0:m9"}
{"signature": "def _getStateAnomalyVector(self, state):", "body": "vector = numpy.zeros(self._anomalyVectorLength)<EOL>vector[state.anomalyVector] = <NUM_LIT:1><EOL>return vector<EOL>", "docstring": "Returns a state's anomaly vertor converting it from spare to dense", "id": "f17624:c0:m17"}
{"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()<EOL>state['<STR_LIT>'] = state['<STR_LIT>'].__getstate__()<EOL>state.pop('<STR_LIT>')<EOL>return state<EOL>", "docstring": "Return serializable state.  This function will return a version of the\n__dict__ with all \"ephemeral\" members stripped out.  \"Ephemeral\" members\nare defined as those that do not need to be (nor should be) stored\nin any kind of persistent file (e.g., NuPIC network XML file.)", "id": "f17624:c0:m24"}
{"signature": "def setIdentityPolicyInstance(self, identityPolicyObj):", "body": "assert not self.identityPolicy<EOL>assert isinstance(identityPolicyObj, RegionIdentityPolicyBase)<EOL>self.identityPolicy = identityPolicyObj<EOL>return<EOL>", "docstring": "TestRegion command that sets identity policy instance.  The instance\n        MUST be derived from TestRegion's RegionIdentityPolicyBase class.\n\n        Users MUST set the identity instance BEFORE running the network\n\n        Exception: AssertionError if identity policy instance has already been set\n                   or if the passed-in instance is not derived from\n                   RegionIdentityPolicyBase.", "id": "f17625:c1:m10"}
{"signature": "def initialize(self):", "body": "self.identityPolicy.initialize(self)<EOL>_debugOut(self.identityPolicy.getName())<EOL>", "docstring": "Called by network after all links have been set up", "id": "f17625:c1:m2"}
{"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()<EOL>state.pop('<STR_LIT>')<EOL>return state<EOL>", "docstring": "Return serializable state.  This function will return a version of the\n__dict__ with all \"ephemeral\" members stripped out.  \"Ephemeral\" members\nare defined as those that do not need to be (nor should be) stored\nin any kind of persistent file (e.g., NuPIC network XML file.)", "id": "f17625:c1:m15"}
{"signature": "@abstractmethod<EOL><INDENT>def compute(self, inputs, outputs):<DEDENT>", "body": "", "docstring": "Perform the main computation\n\n        This method is called in each iteration for each phase the node supports.\n\n        Called from the scope of the region's PyRegion.compute() method.\n\n        inputs: dict of numpy arrays (one per input)\n        outputs: dict of numpy arrays (one per output)", "id": "f17625:c0:m1"}
{"signature": "def __setstate__(self, state):", "body": "assert '<STR_LIT>' not in state<EOL>self.__dict__.update(state)<EOL>self.__constructEphemeralInstanceVars()<EOL>return<EOL>", "docstring": "Set the state of ourself from a serialized state.", "id": "f17625:c1:m16"}
{"signature": "def whois_callers_caller():", "body": "import inspect<EOL>frameObj = inspect.stack()[<NUM_LIT:2>][<NUM_LIT:0>]<EOL>return inspect.getframeinfo(frameObj)<EOL>", "docstring": "Returns: Traceback namedtuple for our caller's caller", "id": "f17625:m1"}
{"signature": "def getParameter(self, parameterName, index=-<NUM_LIT:1>):", "body": "assert not (parameterName in self.__dict__ and parameterName in self.ephemeral)<EOL>if parameterName in self.ephemeral:<EOL><INDENT>assert parameterName not in self.__dict__<EOL>return self.ephemeral[parameterName]<EOL><DEDENT>else:<EOL><INDENT>return super(PyRegion, self).getParameter(parameterName, index)<EOL><DEDENT>", "docstring": "Get the value of a NodeSpec parameter. Most parameters are handled\nautomatically by PyRegion's parameter get mechanism. The ones that need\nspecial treatment are explicitly handled here.", "id": "f17625:c1:m8"}
{"signature": "def compute(self, inputs, outputs):", "body": "<EOL>self.identityPolicy.compute(inputs, outputs)<EOL>_debugOut((\"<STR_LIT>\") %(self.identityPolicy.getName(),inputs, outputs))<EOL>return<EOL>", "docstring": "Run one iteration of the region's compute.\n\nThe guts of the compute are contained in the _compute() call so that\nwe can profile it if requested.", "id": "f17625:c1:m3"}
{"signature": "@abstractmethod<EOL><INDENT>def getOutputElementCount(self, name):<DEDENT>", "body": "", "docstring": "Return the number of elements in the given output of the region\n\n        Called from the scope of the region's PyRegion.getOutputElementCount() method.\n\n        name: the name of the output", "id": "f17625:c0:m2"}
{"signature": "@staticmethod<EOL><INDENT>def getSchema():<DEDENT>", "body": "return RecordSensorProto<EOL>", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSchema`.", "id": "f17628:c0:m13"}
{"signature": "def compute(self, inputs, outputs):", "body": "if not self.topDownMode:<EOL><INDENT>data = self.getNextRecord()<EOL>reset = data[\"<STR_LIT>\"]<EOL>sequenceId = data[\"<STR_LIT>\"]<EOL>categories = data[\"<STR_LIT>\"]<EOL>self.encoder.encodeIntoArray(data, outputs[\"<STR_LIT>\"])<EOL>if self.predictedField is not None and self.predictedField != \"<STR_LIT>\":<EOL><INDENT>allEncoders = list(self.encoder.encoders)<EOL>if self.disabledEncoder is not None:<EOL><INDENT>allEncoders.extend(self.disabledEncoder.encoders)<EOL><DEDENT>encoders = [e for e in allEncoders<EOL>if e[<NUM_LIT:0>] == self.predictedField]<EOL>if len(encoders) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % self.predictedField)<EOL><DEDENT>else:<EOL><INDENT>encoder = encoders[<NUM_LIT:0>][<NUM_LIT:1>]<EOL><DEDENT>actualValue = data[self.predictedField]<EOL>outputs[\"<STR_LIT>\"][:] = encoder.getBucketIndices(actualValue)<EOL>if isinstance(actualValue, str):<EOL><INDENT>outputs[\"<STR_LIT>\"][:] = encoder.getBucketIndices(actualValue)<EOL><DEDENT>else:<EOL><INDENT>outputs[\"<STR_LIT>\"][:] = actualValue<EOL><DEDENT><DEDENT>outputs[\"<STR_LIT>\"][:] = self.encoder.getScalars(data)<EOL>self._outputValues[\"<STR_LIT>\"] = self.encoder.getEncodedValues(data)<EOL>encoders = self.encoder.getEncoderList()<EOL>prevOffset = <NUM_LIT:0><EOL>sourceEncodings = []<EOL>bitData = outputs[\"<STR_LIT>\"]<EOL>for encoder in encoders:<EOL><INDENT>nextOffset = prevOffset + encoder.getWidth()<EOL>sourceEncodings.append(bitData[prevOffset:nextOffset])<EOL>prevOffset = nextOffset<EOL><DEDENT>self._outputValues['<STR_LIT>'] = sourceEncodings<EOL>for filter in self.postEncodingFilters:<EOL><INDENT>filter.process(encoder=self.encoder, data=outputs['<STR_LIT>'])<EOL><DEDENT>outputs['<STR_LIT>'][<NUM_LIT:0>] = reset<EOL>outputs['<STR_LIT>'][<NUM_LIT:0>] = sequenceId<EOL>self.populateCategoriesOut(categories, outputs['<STR_LIT>'])<EOL>if self.verbosity >= <NUM_LIT:1>:<EOL><INDENT>if self._iterNum == <NUM_LIT:0>:<EOL><INDENT>self.encoder.pprintHeader(prefix=\"<STR_LIT>\")<EOL><DEDENT>if reset:<EOL><INDENT>print(\"<STR_LIT>\" % sequenceId)<EOL><DEDENT>if self.verbosity >= <NUM_LIT:2>:<EOL><INDENT>print()<EOL><DEDENT><DEDENT>if self.verbosity >= <NUM_LIT:1>:<EOL><INDENT>self.encoder.pprint(outputs[\"<STR_LIT>\"], prefix=\"<STR_LIT>\" % (self._iterNum))<EOL>scalarValues = self.encoder.getScalars(data)<EOL>nz = outputs[\"<STR_LIT>\"].nonzero()[<NUM_LIT:0>]<EOL>print(\"<STR_LIT>\" % (len(nz)), nz)<EOL>print(\"<STR_LIT>\", self.encoder.scalarsToStr(scalarValues))<EOL><DEDENT>if self.verbosity >= <NUM_LIT:2>:<EOL><INDENT>print(\"<STR_LIT>\", str(data))<EOL><DEDENT>if self.verbosity >= <NUM_LIT:3>:<EOL><INDENT>decoded = self.encoder.decode(outputs[\"<STR_LIT>\"])<EOL>print(\"<STR_LIT>\", self.encoder.decodedToStr(decoded))<EOL><DEDENT>self._iterNum += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>spatialTopDownIn = inputs['<STR_LIT>']<EOL>spatialTopDownOut = self.encoder.topDownCompute(spatialTopDownIn)<EOL>values = [elem.value for elem in spatialTopDownOut]<EOL>scalars = [elem.scalar for elem in spatialTopDownOut]<EOL>encodings = [elem.encoding for elem in spatialTopDownOut]<EOL>self._outputValues['<STR_LIT>'] = values<EOL>outputs['<STR_LIT>'][:] = numpy.array(scalars)<EOL>self._outputValues['<STR_LIT>'] = encodings<EOL>temporalTopDownIn = inputs['<STR_LIT>']<EOL>temporalTopDownOut = self.encoder.topDownCompute(temporalTopDownIn)<EOL>values = [elem.value for elem in temporalTopDownOut]<EOL>scalars = [elem.scalar for elem in temporalTopDownOut]<EOL>encodings = [elem.encoding for elem in temporalTopDownOut]<EOL>self._outputValues['<STR_LIT>'] = values<EOL>outputs['<STR_LIT>'][:] = numpy.array(scalars)<EOL>self._outputValues['<STR_LIT>'] = encodings<EOL>assert len(spatialTopDownOut) == len(temporalTopDownOut), (<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT:size>\")<EOL><DEDENT>", "docstring": "Get a record from the dataSource and encode it.\n\nOverrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.compute`.", "id": "f17628:c0:m8"}
{"signature": "def setParameter(self, parameterName, index, parameterValue):", "body": "if parameterName == '<STR_LIT>':<EOL><INDENT>self.topDownMode = parameterValue<EOL><DEDENT>elif parameterName == '<STR_LIT>':<EOL><INDENT>self.predictedField = parameterValue<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>' + parameterName)<EOL><DEDENT>", "docstring": "Set the value of a Spec parameter. Most parameters are handled\nautomatically by PyRegion's parameter set mechanism. The ones that need\nspecial treatment are explicitly handled here.", "id": "f17628:c0:m12"}
{"signature": "def populateCategoriesOut(self, categories, output):", "body": "if categories[<NUM_LIT:0>] is None:<EOL><INDENT>output[:] = -<NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>for i, cat in enumerate(categories[:len(output)]):<EOL><INDENT>output[i] = cat<EOL><DEDENT>output[len(categories):] = -<NUM_LIT:1><EOL><DEDENT>", "docstring": "Populate the output array with the category indices.\n\n.. note:: Non-categories are represented with ``-1``.\n\n:param categories: (list) of category strings\n:param output: (list) category output, will be overwritten", "id": "f17628:c0:m7"}
{"signature": "@classmethod<EOL><INDENT>def readFromProto(cls, proto):<DEDENT>", "body": "instance = cls()<EOL>instance.encoder = MultiEncoder.read(proto.encoder)<EOL>if proto.disabledEncoder is not None:<EOL><INDENT>instance.disabledEncoder = MultiEncoder.read(proto.disabledEncoder)<EOL><DEDENT>instance.topDownMode = bool(proto.topDownMode)<EOL>instance.verbosity = proto.verbosity<EOL>instance.numCategories = proto.numCategories<EOL>return instance<EOL>", "docstring": "Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.readFromProto`.", "id": "f17628:c0:m15"}
{"signature": "def setSensedValue(self, value):", "body": "self._sensedValue = value<EOL>", "docstring": ":param value: will be encoded when this region does a compute.", "id": "f17629:c0:m6"}
{"signature": "def getSensedValue(self):", "body": "return self._sensedValue<EOL>", "docstring": ":return: sensed value", "id": "f17629:c0:m5"}
{"signature": "def Enum(*args, **kwargs):", "body": "def getLabel(cls, val):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return cls.__labels[val]<EOL><DEDENT>def validate(cls, val):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return val in cls.__values<EOL><DEDENT>def getValues(cls):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return list(cls.__values)<EOL><DEDENT>def getLabels(cls):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return list(cls.__labels.values())<EOL><DEDENT>def getValue(cls, label):<EOL><INDENT>\"\"\"<STR_LIT>\"\"\"<EOL>return cls.__labels[label]<EOL><DEDENT>for arg in list(args)+kwargs.keys():<EOL><INDENT>if type(arg) is not str:<EOL><INDENT>raise TypeError(\"<STR_LIT>\".format(arg))<EOL><DEDENT>if not __isidentifier(arg):<EOL><INDENT>raise ValueError(\"<STR_LIT>\"\"<STR_LIT>\".format(arg))<EOL><DEDENT><DEDENT>kwargs.update(zip(args, args))<EOL>newType = type(\"<STR_LIT>\", (object,), kwargs)<EOL>newType.__labels = dict( (v,k) for k,v in kwargs.iteritems())<EOL>newType.__values = set(newType.__labels.keys())<EOL>newType.getLabel = functools.partial(getLabel, newType)<EOL>newType.validate = functools.partial(validate, newType)<EOL>newType.getValues = functools.partial(getValues, newType)<EOL>newType.getLabels = functools.partial(getLabels, newType)<EOL>newType.getValue = functools.partial(getValue, newType)<EOL>return newType<EOL>", "docstring": "Utility function for creating enumerations in python\n\nExample Usage:\n  >> Color = Enum(\"Red\", \"Green\", \"Blue\", \"Magenta\")\n  >> print Color.Red\n  >> 0\n  >> print Color.Green\n  >> 1\n  >> print Color.Blue\n  >> 2\n  >> print Color.Magenta\n  >> 3\n  >> Color.Violet\n  >> 'violet'\n  >> Color.getLabel(Color.Red)\n  >> 'Red'\n  >> Color.getLabel(2)\n  >> 'Blue'", "id": "f17633:m1"}
{"signature": "def groupby2(*args):", "body": "generatorList = [] <EOL>if len(args) % <NUM_LIT:2> == <NUM_LIT:1>:<EOL><INDENT>raise ValueError(\"<STR_LIT>\")<EOL><DEDENT>advanceList = []<EOL>for i in xrange(<NUM_LIT:0>, len(args), <NUM_LIT:2>):<EOL><INDENT>listn = args[i]<EOL>fn = args[i + <NUM_LIT:1>]<EOL>if listn is not None:<EOL><INDENT>generatorList.append(groupby(listn, fn))<EOL>advanceList.append(True) <EOL><DEDENT>else:<EOL><INDENT>generatorList.append(None)<EOL>advanceList.append(False)<EOL><DEDENT><DEDENT>n = len(generatorList)<EOL>nextList = [None] * n<EOL>while True:<EOL><INDENT>for i in xrange(n):<EOL><INDENT>if advanceList[i]:<EOL><INDENT>try:<EOL><INDENT>nextList[i] = generatorList[i].next()<EOL><DEDENT>except StopIteration:<EOL><INDENT>nextList[i] = None<EOL><DEDENT><DEDENT><DEDENT>if all(entry is None for entry in nextList):<EOL><INDENT>break<EOL><DEDENT>minKeyVal = min(nextVal[<NUM_LIT:0>] for nextVal in nextList<EOL>if nextVal is not None)<EOL>retGroups = [minKeyVal]<EOL>for i in xrange(n):<EOL><INDENT>if nextList[i] is not None and nextList[i][<NUM_LIT:0>] == minKeyVal:<EOL><INDENT>retGroups.append(nextList[i][<NUM_LIT:1>])<EOL>advanceList[i] = True<EOL><DEDENT>else:<EOL><INDENT>advanceList[i] = False<EOL>retGroups.append(None)<EOL><DEDENT><DEDENT>yield tuple(retGroups)<EOL><DEDENT>", "docstring": "Like itertools.groupby, with the following additions:\n\n    - Supports multiple sequences. Instead of returning (k, g), each iteration\n      returns (k, g0, g1, ...), with one `g` for each input sequence. The value of\n      each `g` is either a non-empty iterator or `None`.\n    - It treats the value `None` as an empty sequence. So you can make subsequent\n      calls to groupby2 on any `g` value.\n\n    .. note:: Read up on groupby here:\n           https://docs.python.org/dev/library/itertools.html#itertools.groupby\n\n    :param args: (list) Parameters alternating between sorted lists and their\n                        respective key functions. The lists should be sorted with\n                        respect to their key function.\n\n    :returns: (tuple) A n + 1 dimensional tuple, where the first element is the\n                    key of the iteration, and the other n entries are groups of\n                    objects that share this key. Each group corresponds to the an\n                    input sequence. `groupby2` is a generator that returns a tuple\n                    for every iteration. If an input sequence has no members with\n                    the current key, None is returned in place of a generator.", "id": "f17634:m0"}
{"signature": "def addExtraLogItem(self, item):", "body": "self.__logItems.append(item)<EOL>return<EOL>", "docstring": "Add an item to the log items list for the currently running session.\n        Our self.myAssertXXXXXX wrappers add the current items to the msg that is\n        passed to the unittest's assertXXXXX methods.  The extra info will show up\n        in test results if the test fails.", "id": "f17635:c0:m3"}
{"signature": "def printBanner(self, msg, *args):", "body": "print()<EOL>print(\"<STR_LIT>\")<EOL>print(msg % args)<EOL>print(\"<STR_LIT>\" % (datetime.utcnow(), self,), file=sys.stdout)<EOL>print(\"<STR_LIT>\")<EOL>sys.stdout.flush()<EOL>return<EOL>", "docstring": "Print out a banner", "id": "f17635:c0:m2"}
{"signature": "def assertEqual(self, first, second, msg=None):", "body": "unittest.TestCase.assertEqual(self, first, second, self.__wrapMsg(msg))<EOL>return<EOL>", "docstring": "unittest.TestCase.assertEqual override; adds extra log items to msg", "id": "f17635:c0:m6"}
{"signature": "def convertSP(pySp, newSeed):", "body": "columnDim = pySp._columnDimensions<EOL>inputDim = pySp._inputDimensions<EOL>numInputs = pySp.getNumInputs()<EOL>numColumns = pySp.getNumColumns()<EOL>cppSp = CPPSpatialPooler(inputDim, columnDim)<EOL>cppSp.setPotentialRadius(pySp.getPotentialRadius())<EOL>cppSp.setPotentialPct(pySp.getPotentialPct())<EOL>cppSp.setGlobalInhibition(pySp.getGlobalInhibition())<EOL>numActiveColumnsPerInhArea = pySp.getNumActiveColumnsPerInhArea()<EOL>localAreaDensity = pySp.getLocalAreaDensity()<EOL>if (numActiveColumnsPerInhArea > <NUM_LIT:0>):<EOL><INDENT>cppSp.setNumActiveColumnsPerInhArea(numActiveColumnsPerInhArea)<EOL><DEDENT>else:<EOL><INDENT>cppSp.setLocalAreaDensity(localAreaDensity)<EOL><DEDENT>cppSp.setStimulusThreshold(pySp.getStimulusThreshold())<EOL>cppSp.setInhibitionRadius(pySp.getInhibitionRadius())<EOL>cppSp.setDutyCyclePeriod(pySp.getDutyCyclePeriod())<EOL>cppSp.setBoostStrength(pySp.getBoostStrength())<EOL>cppSp.setIterationNum(pySp.getIterationNum())<EOL>cppSp.setIterationLearnNum(pySp.getIterationLearnNum())<EOL>cppSp.setSpVerbosity(pySp.getSpVerbosity())<EOL>cppSp.setUpdatePeriod(pySp.getUpdatePeriod())<EOL>cppSp.setSynPermTrimThreshold(pySp.getSynPermTrimThreshold())<EOL>cppSp.setSynPermActiveInc(pySp.getSynPermActiveInc())<EOL>cppSp.setSynPermInactiveDec(pySp.getSynPermInactiveDec())<EOL>cppSp.setSynPermBelowStimulusInc(pySp.getSynPermBelowStimulusInc())<EOL>cppSp.setSynPermConnected(pySp.getSynPermConnected())<EOL>cppSp.setMinPctOverlapDutyCycles(pySp.getMinPctOverlapDutyCycles())<EOL>boostFactors = numpy.zeros(numColumns).astype(realType)<EOL>pySp.getBoostFactors(boostFactors)<EOL>cppSp.setBoostFactors(boostFactors)<EOL>overlapDuty = numpy.zeros(numColumns).astype(realType)<EOL>pySp.getOverlapDutyCycles(overlapDuty)<EOL>cppSp.setOverlapDutyCycles(overlapDuty)<EOL>activeDuty = numpy.zeros(numColumns).astype(realType)<EOL>pySp.getActiveDutyCycles(activeDuty)<EOL>cppSp.setActiveDutyCycles(activeDuty)<EOL>minOverlapDuty = numpy.zeros(numColumns).astype(realType)<EOL>pySp.getMinOverlapDutyCycles(minOverlapDuty)<EOL>cppSp.setMinOverlapDutyCycles(minOverlapDuty)<EOL>for i in range(numColumns):<EOL><INDENT>potential = numpy.zeros(numInputs).astype(uintType)<EOL>pySp.getPotential(i, potential)<EOL>cppSp.setPotential(i, potential)<EOL>perm = numpy.zeros(numInputs).astype(realType)<EOL>pySp.getPermanence(i, perm)<EOL>cppSp.setPermanence(i, perm)<EOL><DEDENT>pySp._random = NupicRandom(newSeed)<EOL>cppSp.seed_(newSeed)<EOL>return cppSp<EOL>", "docstring": "Given an instance of a python spatial_pooler return an instance of the CPP\nspatial_pooler with identical parameters.", "id": "f17637:m3"}
{"signature": "def CreateSP(imp, params):", "body": "if (imp == \"<STR_LIT>\"):<EOL><INDENT>spClass = PySpatialPooler<EOL><DEDENT>elif (imp == \"<STR_LIT>\"):<EOL><INDENT>spClass = CPPSpatialPooler<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>print(params)<EOL>sp = spClass(**params)<EOL>return sp<EOL>", "docstring": "Helper class for creating an instance of the appropriate spatial pooler using\ngiven parameters.\n\nParameters:\n----------------------------\nimp:       Either 'py' or 'cpp' for creating the appropriate instance.\nparams:    A dict for overriding constructor parameters. The keys must\n           correspond to contructor parameter names.\n\nReturns the SP object.", "id": "f17637:m4"}
{"signature": "def getSeed():", "body": "seed = int((time.time()%<NUM_LIT>)*<NUM_LIT:10>)<EOL>print(\"<STR_LIT>\", seed, \"<STR_LIT>\", end='<STR_LIT:U+0020>')<EOL>callStack = traceback.extract_stack(limit=<NUM_LIT:3>)<EOL>print(callStack[<NUM_LIT:0>][<NUM_LIT:2>], \"<STR_LIT>\", callStack[<NUM_LIT:0>][<NUM_LIT:1>], \"<STR_LIT>\", callStack[<NUM_LIT:1>][<NUM_LIT:2>])<EOL>return seed<EOL>", "docstring": "Generate and log a 32-bit compatible seed value.", "id": "f17637:m2"}
{"signature": "def convertPermanences(sourceSP, destSP):", "body": "numColumns = sourceSP.getNumColumns()<EOL>numInputs = sourceSP.getNumInputs()<EOL>for i in range(numColumns):<EOL><INDENT>potential = numpy.zeros(numInputs).astype(uintType)<EOL>sourceSP.getPotential(i, potential)<EOL>destSP.setPotential(i, potential)<EOL>perm = numpy.zeros(numInputs).astype(realType)<EOL>sourceSP.getPermanence(i, perm)<EOL>destSP.setPermanence(i, perm)<EOL><DEDENT>", "docstring": "Transfer the permanences from source to dest SP's. This is used in test\nroutines to counteract some drift between implementations.\nWe assume the two SP's have identical configurations/parameters.", "id": "f17637:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def getPatternMachine(self):<DEDENT>", "body": "", "docstring": "Implement this method to provide the pattern machine.", "id": "f17638:c0:m1"}
{"signature": "def init(self, overrides=None):", "body": "params = self._computeTMParams(overrides)<EOL>class MonitoredTemporalMemory(TemporalMemoryMonitorMixin,<EOL>self.getTMClass()): pass<EOL>self.tm = MonitoredTemporalMemory(**params)<EOL>", "docstring": "Initialize Temporal Memory, and other member variables.\n\n:param overrides: overrides for default Temporal Memory parameters", "id": "f17638:c0:m4"}
{"signature": "@abstractmethod<EOL><INDENT>def getTMClass(self):<DEDENT>", "body": "", "docstring": "Implement this method to specify the Temporal Memory class.", "id": "f17638:c0:m0"}
{"signature": "def title(s=None, additional='<STR_LIT>', stream=sys.stdout):", "body": "if s is None:<EOL><INDENT>callable_name, file_name, class_name = getCallerInfo(<NUM_LIT:2>)<EOL>s = callable_name<EOL>if class_name is not None:<EOL><INDENT>s = class_name + '<STR_LIT:.>' + callable_name<EOL><DEDENT><DEDENT>lines = (s + additional).split('<STR_LIT:\\n>')<EOL>length = max(len(line) for line in lines)<EOL>print >> stream, '<STR_LIT:->' * length<EOL>print >> stream, s + additional<EOL>print >> stream, '<STR_LIT:->' * length<EOL>", "docstring": "Utility function to display nice titles\n\n    It automatically extracts the name of the function/method it is called from\n    and you can add additional text. title() will then print the name\n    of the function/method and the additional text surrounded by tow lines\n    of dashes. If you don't want the name of the function, you can provide\n    alternative text (regardless of the additional text)\n\n    :param s: (string) text to display, uses the function name and arguments by\n           default\n    :param additional: (string) extra text to display (not needed if s is not\n           None)\n    :param stream: (stream) the stream to print to. Ny default goes to standard\n           output\n\n    Examples:\n\n    .. code-block:: python\n\n      def foo():\n        title()\n\n    will display:\n\n    .. code-block:: text\n\n      ---\n      foo\n      ---\n\n    .. code-block:: python\n\n      def foo():\n        title(additional='(), this is cool!!!')\n\n    will display:\n\n    .. code-block:: text\n\n      ----------------------\n      foo(), this is cool!!!\n      ----------------------\n\n    .. code-block:: python\n\n      def foo():\n        title('No function name here!')\n\n    will display:\n\n    .. code-block:: text\n\n      ----------------------\n      No function name here!\n      ----------------------", "id": "f17640:m1"}
{"signature": "def getArgumentDescriptions(f):", "body": "<EOL>argspec = inspect.getargspec(f)<EOL>docstring = f.__doc__<EOL>descriptions = {}<EOL>if docstring:<EOL><INDENT>lines = docstring.split('<STR_LIT:\\n>')<EOL>i = <NUM_LIT:0><EOL>while i < len(lines):<EOL><INDENT>stripped = lines[i].lstrip()<EOL>if not stripped:<EOL><INDENT>i += <NUM_LIT:1><EOL>continue<EOL><DEDENT>indentLevel = lines[i].index(stripped[<NUM_LIT:0>])<EOL>firstWord = stripped.split()[<NUM_LIT:0>]<EOL>if firstWord.endswith('<STR_LIT::>'):<EOL><INDENT>firstWord = firstWord[:-<NUM_LIT:1>]<EOL><DEDENT>if firstWord in argspec.args:<EOL><INDENT>argName = firstWord<EOL>restOfLine = stripped[len(firstWord)+<NUM_LIT:1>:].strip()<EOL>argLines = [restOfLine]<EOL>i += <NUM_LIT:1><EOL>while i < len(lines):<EOL><INDENT>stripped = lines[i].lstrip()<EOL>if not stripped:<EOL><INDENT>break<EOL><DEDENT>if lines[i].index(stripped[<NUM_LIT:0>]) <= indentLevel:<EOL><INDENT>break<EOL><DEDENT>argLines.append(lines[i].strip())<EOL>i += <NUM_LIT:1><EOL><DEDENT>descriptions[argName] = '<STR_LIT:U+0020>'.join(argLines)<EOL><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>args = []<EOL>if argspec.defaults:<EOL><INDENT>defaultCount = len(argspec.defaults)<EOL><DEDENT>else:<EOL><INDENT>defaultCount = <NUM_LIT:0><EOL><DEDENT>nonDefaultArgCount = len(argspec.args) - defaultCount<EOL>for i, argName in enumerate(argspec.args):<EOL><INDENT>if i >= nonDefaultArgCount:<EOL><INDENT>defaultValue = argspec.defaults[i - nonDefaultArgCount]<EOL>args.append((argName, descriptions.get(argName, \"<STR_LIT>\"), defaultValue))<EOL><DEDENT>else:<EOL><INDENT>args.append((argName, descriptions.get(argName, \"<STR_LIT>\")))<EOL><DEDENT><DEDENT>return args<EOL>", "docstring": "Get the arguments, default values, and argument descriptions for a function.\n\nParses the argument descriptions out of the function docstring, using a\nformat something lke this:\n\n::\n\n  [junk]\n  argument_name:     description...\n    description...\n    description...\n  [junk]\n  [more arguments]\n\nIt will find an argument as long as the exact argument name starts the line.\nIt will then strip a trailing colon, if present, then strip the rest of the\nline and use it to start the description. It will then strip and append any\nsubsequent lines with a greater indent level than the original argument name.\n\n:param f: (function) to inspect\n:returns: (list of tuples) (``argName``, ``argDescription``, ``defaultValue``)\n  If an argument has no default value, the tuple is only two elements long (as\n  ``None`` cannot be used, since it could be a default value itself).", "id": "f17640:m2"}
{"signature": "def aggregationDivide(dividend, divisor):", "body": "<EOL>dividendMonthSec = aggregationToMonthsSeconds(dividend)<EOL>divisorMonthSec = aggregationToMonthsSeconds(divisor)<EOL>if (dividendMonthSec['<STR_LIT>'] != <NUM_LIT:0> and divisorMonthSec['<STR_LIT>'] != <NUM_LIT:0>)or (dividendMonthSec['<STR_LIT>'] != <NUM_LIT:0> and divisorMonthSec['<STR_LIT>'] != <NUM_LIT:0>):<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>if dividendMonthSec['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>return float(dividendMonthSec['<STR_LIT>']) / divisor['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>return float(dividendMonthSec['<STR_LIT>']) / divisorMonthSec['<STR_LIT>']<EOL><DEDENT>", "docstring": "Return the result from dividing two dicts that represent date and time.\n\nBoth dividend and divisor are dicts that contain one or more of the following\nkeys: 'years', 'months', 'weeks', 'days', 'hours', 'minutes', seconds',\n'milliseconds', 'microseconds'.\n\nFor example:\n\n::\n\n  aggregationDivide({'hours': 4}, {'minutes': 15}) == 16\n\n:param dividend: (dict) The numerator, as a dict representing a date and time\n:param divisor: (dict) the denominator, as a dict representing a date and time\n:returns: (float) number of times divisor goes into dividend", "id": "f17640:m6"}
{"signature": "@classmethod<EOL><INDENT>def getString(cls, prop):<DEDENT>", "body": "if cls._properties is None:<EOL><INDENT>cls._readStdConfigFiles()<EOL><DEDENT>envValue = os.environ.get(\"<STR_LIT>\" % (cls.envPropPrefix,<EOL>prop.replace('<STR_LIT:.>', '<STR_LIT:_>')), None)<EOL>if envValue is not None:<EOL><INDENT>return envValue<EOL><DEDENT>return cls._properties[prop]<EOL>", "docstring": "Retrieve the requested property as a string. If property does not exist,\n        then KeyError will be raised.\n\n        :param prop: (string) name of the property\n        :raises: KeyError\n        :returns: (string) property value", "id": "f17641:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def getInt(cls, prop):<DEDENT>", "body": "return int(cls.getString(prop))<EOL>", "docstring": "Retrieve the requested property and return it as an int. If property\n        does not exist, then KeyError will be raised.\n\n        :param prop: (string) name of the property\n        :returns: (int) property value", "id": "f17641:c0:m2"}
{"signature": "@classmethod<EOL><INDENT>def dict(cls):<DEDENT>", "body": "if cls._properties is None:<EOL><INDENT>cls._readStdConfigFiles()<EOL><DEDENT>result = dict(cls._properties)<EOL>keys = os.environ.keys()<EOL>replaceKeys = filter(lambda x: x.startswith(cls.envPropPrefix),<EOL>keys)<EOL>for envKey in replaceKeys:<EOL><INDENT>key = envKey[len(cls.envPropPrefix):]<EOL>key = key.replace('<STR_LIT:_>', '<STR_LIT:.>')<EOL>result[key] = os.environ[envKey]<EOL><DEDENT>return result<EOL>", "docstring": "Return a dict containing all of the configuration properties\n\n        :returns: (dict) containing all configuration properties.", "id": "f17641:c0:m6"}
{"signature": "@classmethod<EOL><INDENT>def _readConfigFile(cls, filename, path=None):<DEDENT>", "body": "outputProperties = dict()<EOL>if path is None:<EOL><INDENT>filePath = cls.findConfigFile(filename)<EOL><DEDENT>else:<EOL><INDENT>filePath = os.path.join(path, filename)<EOL><DEDENT>try:<EOL><INDENT>if filePath is not None:<EOL><INDENT>try:<EOL><INDENT>_getLogger().debug(\"<STR_LIT>\", filePath)<EOL>with open(filePath, '<STR_LIT:r>') as inp:<EOL><INDENT>contents = inp.read()<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\" % filePath)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>contents = resource_string(\"<STR_LIT>\", filename)<EOL><DEDENT>except Exception as resourceException:<EOL><INDENT>if filename in [USER_CONFIG, CUSTOM_CONFIG]:<EOL><INDENT>contents = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>raise resourceException<EOL><DEDENT><DEDENT><DEDENT>elements = ElementTree.XML(contents)<EOL>if elements.tag != '<STR_LIT>':<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (elements.tag))<EOL><DEDENT>propertyElements = elements.findall('<STR_LIT>')<EOL>for propertyItem in propertyElements:<EOL><INDENT>propInfo = dict()<EOL>propertyAttributes = list(propertyItem)<EOL>for propertyAttribute in propertyAttributes:<EOL><INDENT>propInfo[propertyAttribute.tag] = propertyAttribute.text<EOL><DEDENT>name = propInfo.get('<STR_LIT:name>', None)<EOL>if '<STR_LIT:value>' in propInfo and propInfo['<STR_LIT:value>'] is None:<EOL><INDENT>value = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>value = propInfo.get('<STR_LIT:value>', None)<EOL>if value is None:<EOL><INDENT>if '<STR_LIT>' in propInfo:<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (str(propInfo)))<EOL><DEDENT><DEDENT><DEDENT>restOfValue = value<EOL>value = '<STR_LIT>'<EOL>while True:<EOL><INDENT>pos = restOfValue.find('<STR_LIT>')<EOL>if pos == -<NUM_LIT:1>:<EOL><INDENT>value += restOfValue<EOL>break<EOL><DEDENT>value += restOfValue[<NUM_LIT:0>:pos]<EOL>varTailPos = restOfValue.find('<STR_LIT:}>', pos)<EOL>if varTailPos == -<NUM_LIT:1>:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (restOfValue))<EOL><DEDENT>varname = restOfValue[pos+<NUM_LIT:6>:varTailPos]<EOL>if varname not in os.environ:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (varname))<EOL><DEDENT>envVarValue = os.environ[varname]<EOL>value += envVarValue<EOL>restOfValue = restOfValue[varTailPos+<NUM_LIT:1>:]<EOL><DEDENT>if name is None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (str(propInfo)))<EOL><DEDENT>propInfo['<STR_LIT:value>'] = value<EOL>outputProperties[name] = propInfo<EOL><DEDENT>return outputProperties<EOL><DEDENT>except Exception:<EOL><INDENT>_getLogger().exception(\"<STR_LIT>\",<EOL>filePath)<EOL>raise<EOL><DEDENT>", "docstring": "Parse the given XML file and return a dict describing the file.\n\n        :param filename: (string) name of XML file to parse (no path)\n        :param path: (string) path of the XML file. If None, then use the standard\n               configuration search path.\n        :returns: (dict) with each property as a key and a dict of all the\n               property's attributes as value", "id": "f17641:c0:m8"}
{"signature": "@classmethod<EOL><INDENT>def getBool(cls, prop):<DEDENT>", "body": "value = cls.getInt(prop)<EOL>if value not in (<NUM_LIT:0>, <NUM_LIT:1>):<EOL><INDENT>raise ValueError(\"<STR_LIT>\" % (<EOL>value, prop))<EOL><DEDENT>return bool(value)<EOL>", "docstring": "Retrieve the requested property and return it as a bool. If property\n        does not exist, then KeyError will be raised. If the property value is\n        neither 0 nor 1, then ValueError will be raised\n\n        :param prop: (string) name of the property\n        :raises: KeyError, ValueError\n        :returns: (bool) property value", "id": "f17641:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def readConfigFile(cls, filename, path=None):<DEDENT>", "body": "properties = cls._readConfigFile(filename, path)<EOL>if cls._properties is None:<EOL><INDENT>cls._properties = dict()<EOL><DEDENT>for name in properties:<EOL><INDENT>if '<STR_LIT:value>' in properties[name]:<EOL><INDENT>cls._properties[name] = properties[name]['<STR_LIT:value>']<EOL><DEDENT><DEDENT>", "docstring": "Parse the given XML file and store all properties it describes.\n\n        :param filename: (string) name of XML file to parse (no path)\n        :param path: (string) path of the XML file. If None, then use the standard\n                      configuration search path.", "id": "f17641:c0:m7"}
{"signature": "@classmethod<EOL><INDENT>def clear(cls):<DEDENT>", "body": "<EOL>super(Configuration, cls).clear()<EOL>_CustomConfigurationFileWrapper.clear(persistent=False)<EOL>", "docstring": "Clear all configuration properties from in-memory cache, but do NOT alter \nthe custom configuration file. Used in unit-testing.", "id": "f17642:c0:m3"}
{"signature": "@classmethod<EOL><INDENT>def resetCustomConfig(cls):<DEDENT>", "body": "_getLogger().info(\"<STR_LIT>\"<EOL>\"<STR_LIT>\", traceback.format_stack())<EOL>super(Configuration, cls).clear()<EOL>_CustomConfigurationFileWrapper.clear(persistent=True)<EOL>", "docstring": "Clear all custom configuration settings and delete the persistent custom \nconfiguration store.", "id": "f17642:c0:m4"}
{"signature": "@classmethod<EOL><INDENT>def loadCustomConfig(cls):<DEDENT>", "body": "cls.readConfigFile(_CustomConfigurationFileWrapper.customFileName)<EOL>", "docstring": "Loads custom configuration settings from their persistent storage.\n\n.. warning :: DO NOT CALL THIS: It's typically not necessary to call this \n   method directly. This method exists *solely* for the benefit of \n   ``prepare_conf.py``, which needs to load configuration files selectively.", "id": "f17642:c0:m5"}
{"signature": "@classmethod<EOL><INDENT>def _setPath(cls):<DEDENT>", "body": "cls._path = os.path.join(os.environ['<STR_LIT>'],<EOL>cls.customFileName)<EOL>", "docstring": "Sets the path of the custom configuration file", "id": "f17642:c1:m3"}
{"signature": "@classmethod<EOL><INDENT>def getCustomDict(cls):<DEDENT>", "body": "return _CustomConfigurationFileWrapper.getCustomDict()<EOL>", "docstring": "returns: (dict) containing all custom configuration properties.", "id": "f17642:c0:m0"}
{"signature": "@classmethod<EOL><INDENT>def setCustomProperty(cls, propertyName, value):<DEDENT>", "body": "cls.setCustomProperties({propertyName : value})<EOL>", "docstring": "Set a single custom setting and persist it to the custom configuration \nstore.\n\n:param propertyName: (string) containing the name of the property to get\n:param value: (object) value to set the property to", "id": "f17642:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def getPath(cls):<DEDENT>", "body": "if cls._path is None:<EOL><INDENT>cls._setPath()<EOL><DEDENT>return cls._path<EOL>", "docstring": "Get the path of the custom configuration file", "id": "f17642:c1:m4"}
{"signature": "def logEntryExit(getLoggerCallback=logging.getLogger,<EOL>entryExitLogLevel=logging.DEBUG, logArgs=False,<EOL>logTraceback=False):", "body": "def entryExitLoggingDecorator(func):<EOL><INDENT>@functools.wraps(func)<EOL>def entryExitLoggingWrap(*args, **kwargs):<EOL><INDENT>if entryExitLogLevel is None:<EOL><INDENT>enabled = False<EOL><DEDENT>else:<EOL><INDENT>logger = getLoggerCallback()<EOL>enabled = logger.isEnabledFor(entryExitLogLevel)<EOL><DEDENT>if not enabled:<EOL><INDENT>return func(*args, **kwargs)<EOL><DEDENT>funcName = str(func)<EOL>if logArgs:<EOL><INDENT>argsRepr = '<STR_LIT:U+002CU+0020>'.join(<EOL>[repr(a) for a in args] +<EOL>['<STR_LIT>' % (k,v,) for k,v in kwargs.items()])<EOL><DEDENT>else:<EOL><INDENT>argsRepr = '<STR_LIT>'<EOL><DEDENT>logger.log(<EOL>entryExitLogLevel, \"<STR_LIT>\", funcName, argsRepr,<EOL>'<STR_LIT>' if not logTraceback else '<STR_LIT>' + repr(traceback.format_stack()))<EOL>try:<EOL><INDENT>return func(*args, **kwargs)<EOL><DEDENT>finally:<EOL><INDENT>logger.log(<EOL>entryExitLogLevel, \"<STR_LIT>\", funcName, argsRepr,<EOL>'<STR_LIT>' if not logTraceback else '<STR_LIT>' + repr(traceback.format_stack()))<EOL><DEDENT><DEDENT>return entryExitLoggingWrap<EOL><DEDENT>return entryExitLoggingDecorator<EOL>", "docstring": "Returns a closure suitable for use as function/method decorator for\n    logging entry/exit of function/method.\n\n    getLoggerCallback:    user-supplied callback function that takes no args and\n                            returns the logger instance to use for logging.\n    entryExitLogLevel:    Log level for logging entry/exit of decorated function;\n                            e.g., logging.DEBUG; pass None to disable entry/exit\n                            logging.\n    logArgs:              If True, also log args\n    logTraceback:         If True, also log Traceback information\n\n    Usage Examples:\n      NOTE: logging must be initialized *before* any loggers are created, else\n        there will be no output; see nupic.support.initLogging()\n\n      @logEntryExit()\n      def myFunctionBar():\n          ...\n\n\n      @logEntryExit(logTraceback=True)\n      @logExceptions()\n      def myFunctionGamma():\n          ...\n          raise RuntimeError(\"something bad happened\")\n          ...", "id": "f17643:m1"}
{"signature": "def logExceptions(logger=None):", "body": "logger = (logger if logger is not None else logging.getLogger(__name__))<EOL>def exceptionLoggingDecorator(func):<EOL><INDENT>@functools.wraps(func)<EOL>def exceptionLoggingWrap(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>return func(*args, **kwargs)<EOL><DEDENT>except:<EOL><INDENT>logger.exception(<EOL>\"<STR_LIT>\",<EOL>sys.exc_info()[<NUM_LIT:1>], func, '<STR_LIT>'.join(traceback.format_stack()), )<EOL>raise<EOL><DEDENT><DEDENT>return exceptionLoggingWrap<EOL><DEDENT>return exceptionLoggingDecorator<EOL>", "docstring": "Returns a closure suitable for use as function/method decorator for\n    logging exceptions that leave the scope of the decorated function. Exceptions\n    are logged at ERROR level.\n\n    logger:    user-supplied logger instance. Defaults to logging.getLogger.\n\n    Usage Example:\n      NOTE: logging must be initialized *before* any loggers are created, else\n        there will be no output; see nupic.support.initLogging()\n\n      @logExceptions()\n      def myFunctionFoo():\n          ...\n          raise RuntimeError(\"something bad happened\")\n          ...", "id": "f17643:m0"}
{"signature": "def isCompleted(self):", "body": "return self._recordStore.isCompleted()<EOL>", "docstring": ":returns: True if all records have been read.", "id": "f17645:c1:m18"}
{"signature": "def getFieldNames(self):", "body": "return [f.name for f in self._streamFields]<EOL>", "docstring": "Returns all fields in all inputs (list of plain names).\n\n.. note:: currently, only one input is supported", "id": "f17645:c1:m11"}
{"signature": "def getAggregationMonthsAndSeconds(self):", "body": "return self._aggMonthsAndSeconds<EOL>", "docstring": "Returns the aggregation period of the record stream as a dict\n        containing 'months' and 'seconds'. The months is always an integer and\n        seconds is a floating point. Only one is allowed to be non-zero at a\n        time.\n\n        Will return the aggregation period from this call. This call is\n        used by the :meth:`nupic.data.record_stream.RecordStream.getNextRecordDict`\n        method to assign a record number to a record given its timestamp and the\n        aggregation interval.\n\n        :returns: aggregationPeriod (as a dict) where:\n\n                  - ``months``: number of months in aggregation period\n                  - ``seconds``: number of seconds in aggregation period\n                    (as a float)", "id": "f17645:c1:m7"}
{"signature": "def setCompleted(self, completed=True):", "body": "<EOL>self._recordStore.setCompleted(completed)<EOL>", "docstring": "Marks the stream completed (True or False)\n\n:param completed: (bool) is completed or not", "id": "f17645:c1:m19"}
{"signature": "def getNextRecordIdx(self):", "body": "return self._recordCount<EOL>", "docstring": ":returns: the index of the record that will be read next from\n          :meth:`getNextRecord`.", "id": "f17645:c1:m5"}
{"signature": "def recordsExistAfter(self, bookmark):", "body": "return self._recordStore.recordsExistAfter(bookmark)<EOL>", "docstring": ":returns: True if there are records left after the  bookmark.", "id": "f17645:c1:m6"}
{"signature": "def getNextRecord(self):", "body": "<EOL>while True:<EOL><INDENT>if self._sourceLastRecordIdx is not None  andself._recordStore.getNextRecordIdx() >= self._sourceLastRecordIdx:<EOL><INDENT>preAggValues = None                             <EOL>bookmark = self._recordStore.getBookmark()<EOL><DEDENT>else:<EOL><INDENT>preAggValues = self._recordStore.getNextRecord()<EOL>bookmark = self._recordStore.getBookmark()<EOL><DEDENT>if preAggValues == ():  <EOL><INDENT>if self._eofOnTimeout:<EOL><INDENT>preAggValues = None  <EOL><DEDENT>else:<EOL><INDENT>return preAggValues  <EOL><DEDENT><DEDENT>self._logger.debug('<STR_LIT>',<EOL>self._recordStore.getNextRecordIdx()-<NUM_LIT:1>, preAggValues)<EOL>(fieldValues, aggBookmark) = self._aggregator.next(preAggValues, bookmark)<EOL>if fieldValues is not None:<EOL><INDENT>self._aggBookmark = aggBookmark<EOL><DEDENT>if preAggValues is None and fieldValues is None:<EOL><INDENT>return None<EOL><DEDENT>if fieldValues is not None:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if self._needFieldsFiltering:<EOL><INDENT>values = []<EOL>srcDict = dict(zip(self._recordStoreFieldNames, fieldValues))<EOL>for name in self._streamFieldNames:<EOL><INDENT>values.append(srcDict[name])<EOL><DEDENT>fieldValues = values<EOL><DEDENT>if self._writer is not None:<EOL><INDENT>self._writer.appendRecord(fieldValues)<EOL><DEDENT>self._recordCount += <NUM_LIT:1><EOL>self._logger.debug('<STR_LIT>'<EOL>'<STR_LIT>',<EOL>self._recordCount-<NUM_LIT:1>, fieldValues, self._aggBookmark)<EOL>return fieldValues<EOL>", "docstring": "Returns combined data from all sources (values only).\n\n        :returns: None on EOF; empty sequence on timeout.", "id": "f17645:c1:m3"}
{"signature": "def dictDiff(da, db):", "body": "different = False<EOL>resultDict = dict()<EOL>resultDict['<STR_LIT>'] = set(da) - set(db)<EOL>if resultDict['<STR_LIT>']:<EOL><INDENT>different = True<EOL><DEDENT>resultDict['<STR_LIT>'] = set(db) - set(da)<EOL>if resultDict['<STR_LIT>']:<EOL><INDENT>different = True<EOL><DEDENT>resultDict['<STR_LIT>'] = []<EOL>for key in (set(da) - resultDict['<STR_LIT>']):<EOL><INDENT>comparisonResult = da[key] == db[key]<EOL>if isinstance(comparisonResult, bool):<EOL><INDENT>isEqual = comparisonResult<EOL><DEDENT>else:<EOL><INDENT>isEqual = comparisonResult.all()<EOL><DEDENT>if not isEqual:<EOL><INDENT>resultDict['<STR_LIT>'].append(key)<EOL>different = True<EOL><DEDENT><DEDENT>assert (((resultDict['<STR_LIT>'] or resultDict['<STR_LIT>'] or<EOL>resultDict['<STR_LIT>']) and different) or not different)<EOL>return resultDict if different else None<EOL>", "docstring": "Compares two python dictionaries at the top level and return differences\n\n    da:             first dictionary\n    db:             second dictionary\n\n    Returns:        None if dictionaries test equal; otherwise returns a\n                    dictionary as follows:\n                    {\n                      'inAButNotInB':\n                          <sequence of keys that are in da but not in db>\n                      'inBButNotInA':\n                          <sequence of keys that are in db but not in da>\n                      'differentValues':\n                          <sequence of keys whose corresponding values differ\n                           between da and db>\n                    }", "id": "f17646:m6"}
{"signature": "def rApply(d, f):", "body": "remainingDicts = [(d, ())]<EOL>while len(remainingDicts) > <NUM_LIT:0>:<EOL><INDENT>current, prevKeys = remainingDicts.pop()<EOL>for k, v in current.items():<EOL><INDENT>keys = prevKeys + (k,)<EOL>if isinstance(v, dict):<EOL><INDENT>remainingDicts.insert(<NUM_LIT:0>, (v, keys))<EOL><DEDENT>else:<EOL><INDENT>f(v, keys)<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Recursively applies f to the values in dict d.\n\n    Args:\n      d: The dict to recurse over.\n      f: A function to apply to values in d that takes the value and a list of\n          keys from the root of the dict to the value.", "id": "f17646:m1"}
{"signature": "def sort(filename, key, outputFile, fields=None, watermark=<NUM_LIT> * <NUM_LIT> * <NUM_LIT:100>):", "body": "if fields is not None:<EOL><INDENT>assert set(key).issubset(set([f[<NUM_LIT:0>] for f in fields]))<EOL><DEDENT>with FileRecordStream(filename) as f:<EOL><INDENT>if fields:<EOL><INDENT>fieldNames = [ff[<NUM_LIT:0>] for ff in fields]<EOL>indices = [f.getFieldNames().index(name) for name in fieldNames]<EOL>assert len(indices) == len(fields)<EOL><DEDENT>else:<EOL><INDENT>fileds = f.getFields()<EOL>fieldNames = f.getFieldNames()<EOL>indices = None<EOL><DEDENT>key = [fieldNames.index(name) for name in key]<EOL>chunk = <NUM_LIT:0><EOL>records = []<EOL>for i, r in enumerate(f):<EOL><INDENT>if indices:<EOL><INDENT>temp = []<EOL>for i in indices:<EOL><INDENT>temp.append(r[i])<EOL><DEDENT>r = temp<EOL><DEDENT>records.append(r)<EOL>available_memory = psutil.avail_phymem()<EOL>if available_memory < watermark:<EOL><INDENT>_sortChunk(records, key, chunk, fields)<EOL>records = []<EOL>chunk += <NUM_LIT:1><EOL><DEDENT><DEDENT>if len(records) > <NUM_LIT:0>:<EOL><INDENT>_sortChunk(records, key, chunk, fields)<EOL>chunk += <NUM_LIT:1><EOL><DEDENT>_mergeFiles(key, chunk, outputFile, fields)<EOL><DEDENT>", "docstring": "Sort a potentially big file\n\n    filename - the input file (standard File format)\n    key - a list of field names to sort by\n    outputFile - the name of the output file\n    fields - a list of fields that should be included (all fields if None)\n    watermark - when available memory goes bellow the watermark create a new chunk\n\n    sort() works by reading as records from the file into memory\n    and calling _sortChunk() on each chunk. In the process it gets\n    rid of unneeded fields if any. Once all the chunks have been sorted and\n    written to chunk files it calls _merge() to merge all the chunks into a\n    single sorted file.\n\n    Note, that sort() gets a key that contains field names, which it converts\n    into field indices for _sortChunk() becuase _sortChunk() doesn't need to know\n    the field name.\n\n    sort() figures out by itself how many chunk files to use by reading records\n    from the file until the low watermark value of availabel memory is hit and\n    then it sorts the current records, generates a chunk file, clears the sorted\n    records and starts on a new chunk.\n\n    The key field names are turned into indices", "id": "f17647:m0"}
{"signature": "def _mergeFiles(key, chunkCount, outputFile, fields):", "body": "title()<EOL>files = [FileRecordStream('<STR_LIT>' % i) for i in range(chunkCount)]<EOL>with FileRecordStream(outputFile, write=True, fields=fields) as o:<EOL><INDENT>files = [FileRecordStream('<STR_LIT>' % i) for i in range(chunkCount)]<EOL>records = [f.getNextRecord() for f in files]<EOL>while not all(r is None for r in records):<EOL><INDENT>indices = [i for i,r in enumerate(records) if r is not None]<EOL>records = [records[i] for i in indices]<EOL>files = [files[i] for i in indices]<EOL>r = min(records, key=itemgetter(*key))<EOL>o.appendRecord(r)<EOL>index = records.index(r)<EOL>records[index] = files[index].getNextRecord()<EOL><DEDENT><DEDENT>for i, f in enumerate(files):<EOL><INDENT>f.close()<EOL>os.remove('<STR_LIT>' % i)<EOL><DEDENT>", "docstring": "Merge sorted chunk files into a sorted output file\n\n    chunkCount - the number of available chunk files\n    outputFile the name of the sorted output file\n\n    _mergeFiles()", "id": "f17647:m2"}
{"signature": "def clearStats(self):", "body": "self._stats = None<EOL>", "docstring": "Resets stats collected so far.", "id": "f17648:c0:m13"}
{"signature": "def rewind(self):", "body": "<EOL>super(FileRecordStream, self).rewind()<EOL>self.close()<EOL>self._file = open(self._filename, self._mode)<EOL>self._reader = csv.reader(self._file, dialect=\"<STR_LIT>\")<EOL>self._reader.next()<EOL>self._reader.next()<EOL>self._reader.next()<EOL>self._recordCount = <NUM_LIT:0><EOL>", "docstring": "Put us back at the beginning of the file again.", "id": "f17648:c0:m4"}
{"signature": "def _updateSequenceInfo(self, r):", "body": "<EOL>newSequence = False<EOL>sequenceId = (r[self._sequenceIdIdx]<EOL>if self._sequenceIdIdx is not None else None)<EOL>if sequenceId != self._currSequence:<EOL><INDENT>if sequenceId in self._sequences:<EOL><INDENT>raise Exception('<STR_LIT>' %(sequenceId, r))<EOL><DEDENT>self._sequences.add(self._currSequence)<EOL>self._currSequence = sequenceId<EOL>if self._resetIdx:<EOL><INDENT>assert r[self._resetIdx] == <NUM_LIT:1><EOL><DEDENT>newSequence = True<EOL><DEDENT>else:<EOL><INDENT>reset = False<EOL>if self._resetIdx:<EOL><INDENT>reset = r[self._resetIdx]<EOL>if reset == <NUM_LIT:1>:<EOL><INDENT>newSequence = True<EOL><DEDENT><DEDENT><DEDENT>if not newSequence:<EOL><INDENT>if self._timeStampIdx and self._currTime is not None:<EOL><INDENT>t = r[self._timeStampIdx]<EOL>if t < self._currTime:<EOL><INDENT>raise Exception('<STR_LIT>' % r)<EOL><DEDENT><DEDENT><DEDENT>if self._timeStampIdx:<EOL><INDENT>self._currTime = r[self._timeStampIdx]<EOL><DEDENT>", "docstring": "Keep track of sequence and make sure time goes forward\n\n        Check if the current record is the beginning of a new sequence\n        A new sequence starts in 2 cases:\n\n        1. The sequence id changed (if there is a sequence id field)\n        2. The reset field is 1 (if there is a reset field)\n\n        Note that if there is no sequenceId field or resetId field then the entire\n        dataset is technically one big sequence. The function will not return True\n        for the first record in this case. This is Ok because it is important to\n        detect new sequences only when there are multiple sequences in the file.", "id": "f17648:c0:m20"}
{"signature": "def _getTotalLineCount(self):", "body": "<EOL>if self._mode == self._FILE_WRITE_MODE:<EOL><INDENT>self._file.flush()<EOL><DEDENT>return sum(<NUM_LIT:1> for line in open(self._filename, self._FILE_READ_MODE))<EOL>", "docstring": "Returns:  count of ALL lines in dataset, including header lines", "id": "f17648:c0:m22"}
{"signature": "def isCompleted(self):", "body": "return True<EOL>", "docstring": "Not implemented. CSV file is always considered completed.", "id": "f17648:c0:m16"}
{"signature": "def __exit__(self, yupe, value, traceback):", "body": "self.close()<EOL>", "docstring": "Context guard - exit\n\n        Ensures that the file is always closed at the end of the 'with' block.\n        Lets exceptions propagate.", "id": "f17648:c0:m28"}
{"signature": "def next(self):", "body": "record = self.getNextRecord()<EOL>if record is None:<EOL><INDENT>raise StopIteration<EOL><DEDENT>return record<EOL>", "docstring": "Implement the iterator protocol", "id": "f17648:c0:m30"}
{"signature": "def _getStartRow(self, bookmark):", "body": "bookMarkDict = json.loads(bookmark)<EOL>realpath = os.path.realpath(self._filename)<EOL>bookMarkFile = bookMarkDict.get('<STR_LIT>', None)<EOL>if bookMarkFile != realpath:<EOL><INDENT>print (\"<STR_LIT>\"<EOL>\"<STR_LIT>\") % (<EOL>realpath, bookMarkDict)<EOL>return <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>return bookMarkDict['<STR_LIT>']<EOL><DEDENT>", "docstring": "Extracts start row from the bookmark information", "id": "f17648:c0:m21"}
{"signature": "def getFieldNames(self):", "body": "return [f.name for f in self._fields]<EOL>", "docstring": ":returns: (list) field names associated with the data.", "id": "f17648:c0:m18"}
{"signature": "def getNextRecord(self, useCache=True):", "body": "assert self._file is not None<EOL>assert self._mode == self._FILE_READ_MODE<EOL>try:<EOL><INDENT>line = self._reader.next()<EOL><DEDENT>except StopIteration:<EOL><INDENT>if self.rewindAtEOF:<EOL><INDENT>if self._recordCount == <NUM_LIT:0>:<EOL><INDENT>raise Exception(\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % self._filename)<EOL><DEDENT>self.rewind()<EOL>line = self._reader.next()<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT><DEDENT>self._recordCount += <NUM_LIT:1><EOL>record = []<EOL>for i, f in enumerate(line):<EOL><INDENT>if f in self._missingValues:<EOL><INDENT>record.append(SENTINEL_VALUE_FOR_MISSING_DATA)<EOL><DEDENT>else:<EOL><INDENT>record.append(self._adapters[i](f))<EOL><DEDENT><DEDENT>return record<EOL>", "docstring": "Returns next available data record from the file.\n\n        :returns: a data row (a list or tuple) if available; None, if no more\n                  records in the table (End of Stream - EOS); empty sequence (list\n                  or tuple) when timing out while waiting for the next record.", "id": "f17648:c0:m5"}
{"signature": "def getBookmark(self):", "body": "if self._write and self._recordCount==<NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>rowDict = dict(filepath=os.path.realpath(self._filename),<EOL>currentRow=self._recordCount)<EOL>return json.dumps(rowDict)<EOL>", "docstring": "Gets a bookmark or anchor to the current position.\n\n:returns: an anchor to the current position in the data. Passing this\n          anchor to a constructor makes the current position to be the first\n          returned record.", "id": "f17648:c0:m8"}
{"signature": "def getDataRowCount(self):", "body": "numLines = self._getTotalLineCount()<EOL>if numLines == <NUM_LIT:0>:<EOL><INDENT>assert self._mode == self._FILE_WRITE_MODE and self._recordCount == <NUM_LIT:0><EOL>numDataRows = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>numDataRows = numLines - self._NUM_HEADER_ROWS<EOL><DEDENT>assert numDataRows >= <NUM_LIT:0><EOL>return numDataRows<EOL>", "docstring": ":returns: (int) count of data rows in dataset (excluding header lines)", "id": "f17648:c0:m24"}
{"signature": "def getError(self):", "body": "return None<EOL>", "docstring": "Not implemented. CSV file version does not provide storage for the error\ninformation", "id": "f17648:c0:m14"}
{"signature": "def getNextRecordIdx(self):", "body": "return self._recordCount<EOL>", "docstring": ":returns: (int) the index of the record that will be read next from\n          :meth:`~.FileRecordStream.getNextRecord`.", "id": "f17648:c0:m23"}
{"signature": "def __iter__(self):", "body": "return self<EOL>", "docstring": "Support for the iterator protocol. Return itself", "id": "f17648:c0:m29"}
{"signature": "def _getFuncPtrAndParams(self, funcName):", "body": "params = None<EOL>if isinstance(funcName, str):<EOL><INDENT>if funcName == '<STR_LIT>':<EOL><INDENT>fp = _aggr_sum<EOL><DEDENT>elif funcName == '<STR_LIT>':<EOL><INDENT>fp = _aggr_first<EOL><DEDENT>elif funcName == '<STR_LIT>':<EOL><INDENT>fp = _aggr_last<EOL><DEDENT>elif funcName == '<STR_LIT>':<EOL><INDENT>fp = _aggr_mean<EOL><DEDENT>elif funcName == '<STR_LIT>':<EOL><INDENT>fp = max<EOL><DEDENT>elif funcName == '<STR_LIT>':<EOL><INDENT>fp = min<EOL><DEDENT>elif funcName == '<STR_LIT>':<EOL><INDENT>fp = _aggr_mode<EOL><DEDENT>elif funcName.startswith('<STR_LIT>'):<EOL><INDENT>fp = _aggr_weighted_mean<EOL>paramsName = funcName[<NUM_LIT:6>:]<EOL>params = [f[<NUM_LIT:0>] for f in self._inputFields].index(paramsName)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>fp = funcName<EOL><DEDENT>return (fp, params)<EOL>", "docstring": "Given the name of an aggregation function, returns the function pointer\n        and param.\n\n        Parameters:\n        ------------------------------------------------------------------------\n        funcName:  a string (name of function) or funcPtr\n        retval:   (funcPtr, param)", "id": "f17650:c0:m2"}
{"signature": "def _getEndTime(self, t):", "body": "assert isinstance(t, datetime.datetime)<EOL>if self._aggTimeDelta:<EOL><INDENT>return t + self._aggTimeDelta<EOL><DEDENT>else:<EOL><INDENT>year = t.year + self._aggYears + (t.month - <NUM_LIT:1> + self._aggMonths) / <NUM_LIT:12><EOL>month = (t.month - <NUM_LIT:1> + self._aggMonths) % <NUM_LIT:12> + <NUM_LIT:1><EOL>return t.replace(year=year, month=month)<EOL><DEDENT>", "docstring": "Add the aggregation period to the input time t and return a datetime object\n\n        Years and months are handled as aspecial case due to leap years\n        and months with different number of dates. They can't be converted\n        to a strict timedelta because a period of 3 months will have different\n        durations actually. The solution is to just add the years and months\n        fields directly to the current time.\n\n        Other periods are converted to timedelta and just added to current time.", "id": "f17650:c0:m1"}
{"signature": "def generateDataset(aggregationInfo, inputFilename, outputFilename=None):", "body": "<EOL>inputFullPath = resource_filename(\"<STR_LIT>\", inputFilename)<EOL>inputObj = FileRecordStream(inputFullPath)<EOL>aggregator = Aggregator(aggregationInfo=aggregationInfo,<EOL>inputFields=inputObj.getFields())<EOL>if aggregator.isNullAggregation():<EOL><INDENT>return inputFullPath<EOL><DEDENT>if outputFilename is None:<EOL><INDENT>outputFilename = '<STR_LIT>' %os.path.splitext(os.path.basename(inputFullPath))[<NUM_LIT:0>]<EOL>timePeriods = '<STR_LIT>''<STR_LIT>'<EOL>for k in timePeriods.split():<EOL><INDENT>if aggregationInfo.get(k, <NUM_LIT:0>) > <NUM_LIT:0>:<EOL><INDENT>outputFilename += '<STR_LIT>' % (k, aggregationInfo[k])<EOL><DEDENT><DEDENT>outputFilename += '<STR_LIT>'<EOL>outputFilename = os.path.join(os.path.dirname(inputFullPath), outputFilename)<EOL><DEDENT>lockFilePath = outputFilename + '<STR_LIT>'<EOL>if os.path.isfile(outputFilename) oros.path.isfile(lockFilePath):<EOL><INDENT>while os.path.isfile(lockFilePath):<EOL><INDENT>print('<STR_LIT>' %lockFilePath)<EOL>time.sleep(<NUM_LIT:1>)<EOL><DEDENT>return outputFilename<EOL><DEDENT>lockFD = open(lockFilePath, '<STR_LIT:w>')<EOL>outputObj = FileRecordStream(streamID=outputFilename, write=True,<EOL>fields=inputObj.getFields())<EOL>while True:<EOL><INDENT>inRecord = inputObj.getNextRecord()<EOL>(aggRecord, aggBookmark) = aggregator.next(inRecord, None)<EOL>if aggRecord is None and inRecord is None:<EOL><INDENT>break<EOL><DEDENT>if aggRecord is not None:<EOL><INDENT>outputObj.appendRecord(aggRecord)<EOL><DEDENT><DEDENT>return outputFilename<EOL>", "docstring": "Generate a dataset of aggregated values\n\n    Parameters:\n    ----------------------------------------------------------------------------\n    aggregationInfo: a dictionary that contains the following entries\n      - fields: a list of pairs. Each pair is a field name and an\n        aggregation function (e.g. sum). The function will be used to aggregate\n        multiple values during the aggregation period.\n\n    aggregation period: 0 or more of unit=value fields; allowed units are:\n          [years months] |\n          [weeks days hours minutes seconds milliseconds microseconds]\n          NOTE: years and months are mutually-exclusive with the other units.\n                See getEndTime() and _aggregate() for more details.\n          Example1: years=1, months=6,\n          Example2: hours=1, minutes=30,\n          If none of the period fields are specified or if all that are specified\n          have values of 0, then aggregation will be suppressed, and the given\n          inputFile parameter value will be returned.\n\n    inputFilename: filename of the input dataset within examples/prediction/data\n\n    outputFilename: name for the output file. If not given, a name will be\n          generated based on the input filename and the aggregation params\n\n    retval: Name of the generated output file. This will be the same as the input\n        file name if no aggregation needed to be performed\n\n\n\n    If the input file contained a time field, sequence id field or reset field\n    that were not specified in aggregationInfo fields, those fields will be\n    added automatically with the following rules:\n\n    1. The order will be R, S, T, rest of the fields\n    2. The aggregation function for all will be to pick the first: lambda x: x[0]\n\n      Returns: the path of the aggregated data file if aggregation was performed\n        (in the same directory as the given input file); if aggregation did not\n        need to be performed, then the given inputFile argument value is returned.", "id": "f17650:m8"}
{"signature": "def _createAggregateRecord(self):", "body": "record = []<EOL>for i, (fieldIdx, aggFP, paramIdx) in enumerate(self._fields):<EOL><INDENT>if aggFP is None: <EOL><INDENT>continue<EOL><DEDENT>values = self._slice[i]<EOL>refIndex = None<EOL>if paramIdx is not None:<EOL><INDENT>record.append(aggFP(values, self._slice[paramIdx]))<EOL><DEDENT>else:<EOL><INDENT>record.append(aggFP(values))<EOL><DEDENT><DEDENT>return record<EOL>", "docstring": "Generate the aggregated output record\n\n        Parameters:\n        ------------------------------------------------------------------------\n        retval: outputRecord", "id": "f17650:c0:m3"}
{"signature": "def getFilename(aggregationInfo, inputFile):", "body": "<EOL>inputFile = resource_filename(\"<STR_LIT>\", inputFile)<EOL>a = defaultdict(lambda: <NUM_LIT:0>, aggregationInfo)<EOL>outputDir = os.path.dirname(inputFile)<EOL>outputFile = '<STR_LIT>' % os.path.splitext(os.path.basename(inputFile))[<NUM_LIT:0>]<EOL>noAggregation = True<EOL>timePeriods = '<STR_LIT>''<STR_LIT>'<EOL>for k in timePeriods.split():<EOL><INDENT>if a[k] > <NUM_LIT:0>:<EOL><INDENT>noAggregation = False<EOL>outputFile += '<STR_LIT>' % (k, a[k])<EOL><DEDENT><DEDENT>if noAggregation:<EOL><INDENT>return inputFile<EOL><DEDENT>outputFile += '<STR_LIT>'<EOL>outputFile = os.path.join(outputDir, outputFile)<EOL>return outputFile<EOL>", "docstring": "Generate the filename for aggregated dataset\n\n    The filename is based on the input filename and the\n    aggregation period.\n\n    Returns the inputFile if no aggregation required (aggregation\n    info has all 0's)", "id": "f17650:m9"}
{"signature": "@abstractmethod<EOL><INDENT>def close(self):<DEDENT>", "body": "", "docstring": "Close the stream", "id": "f17651:c1:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def getStats(self):<DEDENT>", "body": "", "docstring": ":returns: storage stats (like min and max values of the fields).", "id": "f17651:c1:m12"}
{"signature": "def getNextRecordDict(self):", "body": "values = self.getNextRecord()<EOL>if values is None:<EOL><INDENT>return None<EOL><DEDENT>if not values:<EOL><INDENT>return dict()<EOL><DEDENT>if self._modelRecordEncoder is None:<EOL><INDENT>self._modelRecordEncoder = ModelRecordEncoder(<EOL>fields=self.getFields(),<EOL>aggregationPeriod=self.getAggregationMonthsAndSeconds())<EOL><DEDENT>return self._modelRecordEncoder.encode(values)<EOL>", "docstring": "Returns next available data record from the storage as a dict, with the\n        keys being the field names. This also adds in some meta fields:\n\n          - ``_category``: The value from the category field (if any)\n          - ``_reset``: True if the reset field was True (if any)\n          - ``_sequenceId``: the value from the sequenceId field (if any)", "id": "f17651:c1:m4"}
{"signature": "def getSequenceIdFieldIdx(self):", "body": "return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.sequence)<EOL>", "docstring": ":returns: (int) index of the ``sequenceId`` field.", "id": "f17651:c1:m24"}
{"signature": "@abstractmethod<EOL><INDENT>def setCompleted(self, completed):<DEDENT>", "body": "", "docstring": "Marks the stream completed.\n\n:param completed: (bool) is completed?", "id": "f17651:c1:m19"}
{"signature": "def rewind(self):", "body": "self._sequenceId = -<NUM_LIT:1><EOL>", "docstring": "Put us back at the beginning of the file again", "id": "f17651:c0:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def appendRecords(self, records, progressCB=None):<DEDENT>", "body": "", "docstring": "Saves multiple records in the underlying storage. Should be implemented in\nsubclasses.\n\n:param records: (list) of objects to store\n:param progressCB: (func) called after each appension", "id": "f17651:c1:m8"}
{"signature": "def _getFieldIndexBySpecial(fields, special):", "body": "for i, field in enumerate(fields):<EOL><INDENT>if field.special == special:<EOL><INDENT>return i<EOL><DEDENT><DEDENT>return None<EOL>", "docstring": "Return index of the field matching the field meta special value.\n    :param fields: sequence of nupic.data.fieldmeta.FieldMetaInfo objects\n      representing the fields of a stream\n    :param special: one of the special field attribute values from\n      nupic.data.fieldmeta.FieldMetaSpecial\n    :returns: first zero-based index of the field tagged with the target field\n      meta special attribute; None if no such field", "id": "f17651:m0"}
{"signature": "def encode(self, inputRow):", "body": "<EOL>result = dict(zip(self._fieldNames, inputRow))<EOL>if self._categoryFieldIndex is not None:<EOL><INDENT>if isinstance(inputRow[self._categoryFieldIndex], int):<EOL><INDENT>result['<STR_LIT>'] = [inputRow[self._categoryFieldIndex]]<EOL><DEDENT>else:<EOL><INDENT>result['<STR_LIT>'] = (inputRow[self._categoryFieldIndex]<EOL>if inputRow[self._categoryFieldIndex]<EOL>else [None])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>result['<STR_LIT>'] = [None]<EOL><DEDENT>if self._resetFieldIndex is not None:<EOL><INDENT>result['<STR_LIT>'] = int(bool(inputRow[self._resetFieldIndex]))<EOL><DEDENT>else:<EOL><INDENT>result['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>if self._learningFieldIndex is not None:<EOL><INDENT>result['<STR_LIT>'] = int(bool(inputRow[self._learningFieldIndex]))<EOL><DEDENT>result['<STR_LIT>'] = None<EOL>if self._timestampFieldIndex is not None:<EOL><INDENT>result['<STR_LIT>'] = inputRow[self._timestampFieldIndex]<EOL>result['<STR_LIT>'] = self._computeTimestampRecordIdx(<EOL>inputRow[self._timestampFieldIndex])<EOL><DEDENT>else:<EOL><INDENT>result['<STR_LIT>'] = None<EOL><DEDENT>hasReset = self._resetFieldIndex is not None<EOL>hasSequenceId = self._sequenceFieldIndex is not None<EOL>if hasReset and not hasSequenceId:<EOL><INDENT>if result['<STR_LIT>']:<EOL><INDENT>self._sequenceId += <NUM_LIT:1><EOL><DEDENT>sequenceId = self._sequenceId<EOL><DEDENT>elif not hasReset and hasSequenceId:<EOL><INDENT>sequenceId = inputRow[self._sequenceFieldIndex]<EOL>result['<STR_LIT>'] = int(sequenceId != self._sequenceId)<EOL>self._sequenceId = sequenceId<EOL><DEDENT>elif hasReset and hasSequenceId:<EOL><INDENT>sequenceId = inputRow[self._sequenceFieldIndex]<EOL><DEDENT>else:<EOL><INDENT>sequenceId = <NUM_LIT:0><EOL><DEDENT>if sequenceId is not None:<EOL><INDENT>result['<STR_LIT>'] = hash(sequenceId)<EOL><DEDENT>else:<EOL><INDENT>result['<STR_LIT>'] = None<EOL><DEDENT>return result<EOL>", "docstring": "Encodes the given input row as a dict, with the\n        keys being the field names. This also adds in some meta fields:\n          '_category': The value from the category field (if any)\n          '_reset': True if the reset field was True (if any)\n          '_sequenceId': the value from the sequenceId field (if any)\n\n        :param inputRow: sequence of values corresponding to a single input metric\n          data row\n        :rtype: dict", "id": "f17651:c0:m2"}
{"signature": "def getResetFieldIdx(self):", "body": "return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.reset)<EOL>", "docstring": ":returns: (int) index of the ``reset`` field; ``None`` if no such field.", "id": "f17651:c1:m22"}
{"signature": "def getTimestampFieldIdx(self):", "body": "return _getFieldIndexBySpecial(self.getFields(), FieldMetaSpecial.timestamp)<EOL>", "docstring": ":returns: (int) index of the ``timestamp`` field.", "id": "f17651:c1:m23"}
{"signature": "def _computeTimestampRecordIdx(self, recordTS):", "body": "if self._aggregationPeriod is None:<EOL><INDENT>return None<EOL><DEDENT>if self._aggregationPeriod['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>assert self._aggregationPeriod['<STR_LIT>'] == <NUM_LIT:0><EOL>result = int(<EOL>(recordTS.year * <NUM_LIT:12> + (recordTS.month-<NUM_LIT:1>)) /<EOL>self._aggregationPeriod['<STR_LIT>'])<EOL><DEDENT>elif self._aggregationPeriod['<STR_LIT>'] > <NUM_LIT:0>:<EOL><INDENT>delta = recordTS - datetime.datetime(year=<NUM_LIT:1>, month=<NUM_LIT:1>, day=<NUM_LIT:1>)<EOL>deltaSecs = delta.days * <NUM_LIT> * <NUM_LIT> * <NUM_LIT>+ delta.seconds+ delta.microseconds / <NUM_LIT><EOL>result = int(deltaSecs / self._aggregationPeriod['<STR_LIT>'])<EOL><DEDENT>else:<EOL><INDENT>result = None<EOL><DEDENT>return result<EOL>", "docstring": "Give the timestamp of a record (a datetime object), compute the record's\n        timestamp index - this is the timestamp divided by the aggregation period.\n\n\n        Parameters:\n        ------------------------------------------------------------------------\n        recordTS:  datetime instance\n        retval:    record timestamp index, or None if no aggregation period", "id": "f17651:c0:m3"}
{"signature": "def __init__(self, fields, aggregationPeriod=None):", "body": "if not fields:<EOL><INDENT>raise ValueError('<STR_LIT>' % (fields,))<EOL><DEDENT>self._fields = fields<EOL>self._aggregationPeriod = aggregationPeriod<EOL>self._sequenceId = -<NUM_LIT:1><EOL>self._fieldNames = tuple(f.name for f in fields)<EOL>self._categoryFieldIndex = _getFieldIndexBySpecial(<EOL>fields,<EOL>FieldMetaSpecial.category)<EOL>self._resetFieldIndex = _getFieldIndexBySpecial(<EOL>fields,<EOL>FieldMetaSpecial.reset)<EOL>self._sequenceFieldIndex = _getFieldIndexBySpecial(<EOL>fields,<EOL>FieldMetaSpecial.sequence)<EOL>self._timestampFieldIndex = _getFieldIndexBySpecial(<EOL>fields,<EOL>FieldMetaSpecial.timestamp)<EOL>self._learningFieldIndex = _getFieldIndexBySpecial(<EOL>fields,<EOL>FieldMetaSpecial.learning)<EOL>", "docstring": ":param fields: non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo\n  objects corresponding to fields in input rows.\n:param aggregationPeriod: (dict) aggregation period of the record stream \n  containing 'months' and 'seconds'. The months is always an integer\n  and seconds is a floating point. Only one is allowed to be non-zero at a\n  time. If there is no aggregation associated with the stream, pass None.\n  Typically, a raw file or hbase stream will NOT have any aggregation info,\n  but subclasses of RecordStreamIface, like StreamReader, will and will\n  provide the aggregation period. This is used by the encode method to\n  assign a record number to a record given its timestamp and the aggregation\n  interval.", "id": "f17651:c0:m0"}
{"signature": "def getFieldMin(self, fieldName):", "body": "stats = self.getStats()<EOL>if stats == None:<EOL><INDENT>return None<EOL><DEDENT>minValues = stats.get('<STR_LIT>', None)<EOL>if minValues == None:<EOL><INDENT>return None<EOL><DEDENT>index = self.getFieldNames().index(fieldName)<EOL>return minValues[index]<EOL>", "docstring": "If underlying implementation does not support min/max stats collection,\nor if a field type does not support min/max (non scalars), the return\nvalue will be None.\n\n:param fieldName: (string) name of field to get min\n:returns: current minimum value for the field ``fieldName``.", "id": "f17651:c1:m13"}
{"signature": "@abstractmethod<EOL><INDENT>def getFieldNames(self):<DEDENT>", "body": "", "docstring": ":returns: (list) of field names associated with the data.", "id": "f17651:c1:m20"}
{"signature": "def getFieldMax(self, fieldName):", "body": "stats = self.getStats()<EOL>if stats == None:<EOL><INDENT>return None<EOL><DEDENT>maxValues = stats.get('<STR_LIT>', None)<EOL>if maxValues == None:<EOL><INDENT>return None<EOL><DEDENT>index = self.getFieldNames().index(fieldName)<EOL>return maxValues[index]<EOL>", "docstring": "If underlying implementation does not support min/max stats collection,\nor if a field type does not support min/max (non scalars), the return\nvalue will be None.\n\n:param fieldName: (string) name of field to get max\n:returns: current maximum value for the field ``fieldName``.", "id": "f17651:c1:m14"}
{"signature": "@abstractmethod<EOL><INDENT>def getFields(self):<DEDENT>", "body": "", "docstring": ":returns: (list) of :class:`nupic.data.fieldmeta.FieldMetaInfo` objects for\n    each field in the stream. Might be None, if that information is provided\n    externally (through the `Stream Definition <stream-def.html>`_, \n    for example).", "id": "f17651:c1:m21"}
{"signature": "@abstractmethod<EOL><INDENT>def getNextRecordIdx(self):<DEDENT>", "body": "", "docstring": ":returns: (int) index of the record that will be read next from\n          :meth:`getNextRecord`", "id": "f17651:c1:m6"}
{"signature": "@abstractmethod<EOL><INDENT>def setTimeout(self, timeout):<DEDENT>", "body": "", "docstring": "Set the read timeout in seconds\n\n:param timeout: (int or floating point)", "id": "f17651:c1:m27"}
{"signature": "def generateStats(filename, maxSamples = None,):", "body": "<EOL>statsCollectorMapping = {'<STR_LIT:float>':    FloatStatsCollector,<EOL>'<STR_LIT:int>':      IntStatsCollector,<EOL>'<STR_LIT:string>':   StringStatsCollector,<EOL>'<STR_LIT>': DateTimeStatsCollector,<EOL>'<STR_LIT:bool>':     BoolStatsCollector,<EOL>}<EOL>filename = resource_filename(\"<STR_LIT>\", filename)<EOL>print(\"<STR_LIT:*>\"*<NUM_LIT>)<EOL>print(\"<STR_LIT>\" % (filename,))<EOL>dataFile = FileRecordStream(filename)<EOL>statsCollectors = []<EOL>for fieldName, fieldType, fieldSpecial in dataFile.getFields():<EOL><INDENT>statsCollector =statsCollectorMapping[fieldType](fieldName, fieldType, fieldSpecial)<EOL>statsCollectors.append(statsCollector)<EOL><DEDENT>if maxSamples is None:<EOL><INDENT>maxSamples = <NUM_LIT><EOL><DEDENT>for i in range(maxSamples):<EOL><INDENT>record = dataFile.getNextRecord()<EOL>if record is None:<EOL><INDENT>break<EOL><DEDENT>for i, value in enumerate(record):<EOL><INDENT>statsCollectors[i].addValue(value)<EOL><DEDENT><DEDENT>stats = {}<EOL>for statsCollector in statsCollectors:<EOL><INDENT>statsCollector.getStats(stats)<EOL><DEDENT>if dataFile.getResetFieldIdx() is not None:<EOL><INDENT>resetFieldName,_,_ = dataFile.getFields()[dataFile.reset]<EOL>stats.pop(resetFieldName)<EOL><DEDENT>if VERBOSITY > <NUM_LIT:0>:<EOL><INDENT>pprint.pprint(stats)<EOL><DEDENT>return stats<EOL>", "docstring": "Collect statistics for each of the fields in the user input data file and\nreturn a stats dict object.\n\nParameters:\n------------------------------------------------------------------------------\nfilename:             The path and name of the data file.\nmaxSamples:           Upper bound on the number of rows to be processed\nretval:               A dictionary of dictionaries. The top level keys are the\n                      field names and the corresponding values are the statistics\n                      collected for the individual file.\n                      Example:\n                      {\n                        'consumption':{'min':0,'max':90,'mean':50,...},\n                        'gym':{'numDistinctCategories':10,...},\n                        ...\n                       }", "id": "f17653:m0"}
{"signature": "def getStats(self, stats):", "body": "BaseStatsCollector.getStats(self, stats)<EOL>sortedNumberList = sorted(self.valueList)<EOL>listLength = len(sortedNumberList)<EOL>min = sortedNumberList[<NUM_LIT:0>]<EOL>max = sortedNumberList[-<NUM_LIT:1>]<EOL>mean = numpy.mean(self.valueList)<EOL>median = sortedNumberList[int(<NUM_LIT:0.5>*listLength)]<EOL>percentile1st = sortedNumberList[int(<NUM_LIT>*listLength)]<EOL>percentile99th = sortedNumberList[int(<NUM_LIT>*listLength)]<EOL>differenceList =[(cur - prev) for prev, cur in zip(list(self.valueSet)[:-<NUM_LIT:1>],<EOL>list(self.valueSet)[<NUM_LIT:1>:])]<EOL>if min > max:<EOL><INDENT>print(self.fieldname, min, max, '<STR_LIT>')<EOL><DEDENT>meanResolution = numpy.mean(differenceList)<EOL>stats[self.fieldname]['<STR_LIT>'] = min<EOL>stats[self.fieldname]['<STR_LIT>'] = max<EOL>stats[self.fieldname]['<STR_LIT>'] = mean<EOL>stats[self.fieldname]['<STR_LIT>'] = median<EOL>stats[self.fieldname]['<STR_LIT>'] = percentile1st<EOL>stats[self.fieldname]['<STR_LIT>'] = percentile99th<EOL>stats[self.fieldname]['<STR_LIT>'] = meanResolution<EOL>passData = True<EOL>if passData:<EOL><INDENT>stats[self.fieldname]['<STR_LIT:data>'] = self.valueList<EOL><DEDENT>if VERBOSITY > <NUM_LIT:2>:<EOL><INDENT>print('<STR_LIT>')<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\", min)<EOL>print(\"<STR_LIT>\", max)<EOL>print(\"<STR_LIT>\", mean)<EOL>print(\"<STR_LIT>\", median)<EOL>print(\"<STR_LIT>\", percentile1st)<EOL>print(\"<STR_LIT>\", percentile99th)<EOL>print('<STR_LIT>')<EOL>print(\"<STR_LIT>\")<EOL>print(\"<STR_LIT>\", meanResolution)<EOL><DEDENT>if VERBOSITY > <NUM_LIT:3>:<EOL><INDENT>print('<STR_LIT>')<EOL>print(\"<STR_LIT>\")<EOL>counts, bins = numpy.histogram(self.valueList, new=True)<EOL>print(\"<STR_LIT>\", counts.tolist())<EOL>print(\"<STR_LIT>\", bins.tolist())<EOL><DEDENT>", "docstring": "Override of getStats()  in BaseStatsCollector\n\n            stats: A dictionary where all the stats are\n            outputted", "id": "f17653:c2:m0"}
{"signature": "def match(self, record):", "body": "for field, meta in self.filterDict.iteritems():<EOL><INDENT>index = meta['<STR_LIT:index>']<EOL>categories = meta['<STR_LIT>']<EOL>for category in categories:<EOL><INDENT>if not record:<EOL><INDENT>continue<EOL><DEDENT>if record[index].find(category) != -<NUM_LIT:1>:<EOL><INDENT>'''<STR_LIT>'''<EOL>return True<EOL><DEDENT><DEDENT><DEDENT>return False<EOL>", "docstring": "Returns True if the record matches any of the provided filters", "id": "f17655:c0:m1"}
{"signature": "@classmethod<EOL><INDENT>def isValid(cls, fieldDataType):<DEDENT>", "body": "return fieldDataType in cls._ALL<EOL>", "docstring": "Check a candidate value whether it's one of the valid field data types\n\n        :param fieldDataType: (string) candidate field data type\n        :returns: True if the candidate value is a legitimate field data type value;\n                  False if not", "id": "f17656:c1:m0"}
{"signature": "def escape(s):", "body": "if s is None:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>assert isinstance(s, basestring),\"<STR_LIT>\" % (basestring, type(s), s)<EOL>s = s.replace('<STR_LIT:\\\\>', '<STR_LIT>')<EOL>s = s.replace('<STR_LIT:\\n>', '<STR_LIT>')<EOL>s = s.replace('<STR_LIT:\\t>', '<STR_LIT>')<EOL>s = s.replace('<STR_LIT:U+002C>', '<STR_LIT:\\t>')<EOL>return s<EOL>", "docstring": "Escape commas, tabs, newlines and dashes in a string\n\nCommas are encoded as tabs.\n\n:param s: (string) to escape\n:returns: (string) escaped string", "id": "f17657:m6"}
{"signature": "def serializeTimestampNoMS(t):", "body": "return t.strftime(DATETIME_FORMATS[<NUM_LIT:2>])<EOL>", "docstring": "Turns a datetime object into a string ignoring milliseconds.\n\n:param t: (datetime.datetime)\n:return: (string) in default format (see \n         :const:`~nupic.data.utils.DATETIME_FORMATS` [2])", "id": "f17657:m2"}
{"signature": "def intOrNone(i):", "body": "if i.strip() == '<STR_LIT:None>' or i.strip() == '<STR_LIT>':<EOL><INDENT>return None<EOL><DEDENT>return int(i)<EOL>", "docstring": "Tries to convert input to a int input or returns ``None``.\n\n:param f: (object) thing to convert to a int\n:return: (int or ``None``)", "id": "f17657:m5"}
{"signature": "def parseBool(s):", "body": "l = s.lower()<EOL>if l in (\"<STR_LIT:true>\", \"<STR_LIT:t>\", \"<STR_LIT:1>\"):<EOL><INDENT>return True<EOL><DEDENT>if l in (\"<STR_LIT:false>\", \"<STR_LIT:f>\", \"<STR_LIT:0>\"):<EOL><INDENT>return False<EOL><DEDENT>raise Exception(\"<STR_LIT>\" % s)<EOL>", "docstring": "String to boolean\n\n:param s: (string)\n:return: (bool)", "id": "f17657:m3"}
{"signature": "def floatOrNone(f):", "body": "if f == '<STR_LIT:None>':<EOL><INDENT>return None<EOL><DEDENT>return float(f)<EOL>", "docstring": "Tries to convert input to a float input or returns ``None``.\n\n:param f: (object) thing to convert to a float\n:return: (float or ``None``)", "id": "f17657:m4"}
{"signature": "def numbersForBit(self, bit):", "body": "if bit >= self._n:<EOL><INDENT>raise IndexError(\"<STR_LIT>\")<EOL><DEDENT>numbers = set()<EOL>for index, pattern in self._patterns.items():<EOL><INDENT>if bit in pattern:<EOL><INDENT>numbers.add(index)<EOL><DEDENT><DEDENT>return numbers<EOL>", "docstring": "Return the set of pattern numbers that match a bit.\n\n@param bit (int) Index of bit\n\n@return (set) Indices of numbers", "id": "f17658:c0:m3"}
{"signature": "def prettyPrintPattern(self, bits, verbosity=<NUM_LIT:1>):", "body": "numberMap = self.numberMapForBits(bits)<EOL>text = \"<STR_LIT>\"<EOL>numberList = []<EOL>numberItems = sorted(iter(numberMap.items()),<EOL>key=lambda number_bits: len(number_bits[<NUM_LIT:1>]),<EOL>reverse=True)<EOL>for number, bits in numberItems:<EOL><INDENT>if verbosity > <NUM_LIT:2>:<EOL><INDENT>strBits = [str(n) for n in bits]<EOL>numberText = \"<STR_LIT>\".format(number, \"<STR_LIT:U+002C>\".join(strBits))<EOL><DEDENT>elif verbosity > <NUM_LIT:1>:<EOL><INDENT>numberText = \"<STR_LIT>\".format(number, len(bits))<EOL><DEDENT>else:<EOL><INDENT>numberText = str(number)<EOL><DEDENT>numberList.append(numberText)<EOL><DEDENT>text += \"<STR_LIT>\".format(\"<STR_LIT:U+002CU+0020>\".join(numberList))<EOL>return text<EOL>", "docstring": "Pretty print a pattern.\n\n@param bits      (set) Indices of on bits\n@param verbosity (int) Verbosity level\n\n@return (string) Pretty-printed text", "id": "f17658:c0:m5"}
{"signature": "def numberMapForBits(self, bits):", "body": "numberMap = dict()<EOL>for bit in bits:<EOL><INDENT>numbers = self.numbersForBit(bit)<EOL>for number in numbers:<EOL><INDENT>if not number in numberMap:<EOL><INDENT>numberMap[number] = set()<EOL><DEDENT>numberMap[number].add(bit)<EOL><DEDENT><DEDENT>return numberMap<EOL>", "docstring": "Return a map from number to matching on bits,\nfor all numbers that match a set of bits.\n\n@param bits (set) Indices of bits\n\n@return (dict) Mapping from number => on bits.", "id": "f17658:c0:m4"}
{"signature": "def _generate(self):", "body": "candidates = np.array(list(range(self._n)), np.uint32)<EOL>for i in range(self._num):<EOL><INDENT>self._random.shuffle(candidates)<EOL>pattern = candidates[<NUM_LIT:0>:self._getW()]<EOL>self._patterns[i] = set(pattern)<EOL><DEDENT>", "docstring": "Generates set of random patterns.", "id": "f17658:c0:m6"}
{"signature": "def _getW(self):", "body": "w = self._w<EOL>if type(w) is list:<EOL><INDENT>return w[self._random.getUInt32(len(w))]<EOL><DEDENT>else:<EOL><INDENT>return w<EOL><DEDENT>", "docstring": "Gets a value of `w` for use in generating a pattern.", "id": "f17658:c0:m7"}
{"signature": "def generateFromNumbers(self, numbers):", "body": "sequence = []<EOL>for number in numbers:<EOL><INDENT>if number == None:<EOL><INDENT>sequence.append(number)<EOL><DEDENT>else:<EOL><INDENT>pattern = self.patternMachine.get(number)<EOL>sequence.append(pattern)<EOL><DEDENT><DEDENT>return sequence<EOL>", "docstring": "Generate a sequence from a list of numbers.\n\nNote: Any `None` in the list of numbers is considered a reset.\n\n@param numbers (list) List of numbers\n\n@return (list) Generated sequence", "id": "f17659:c0:m1"}
{"signature": "def generateNumbers(self, numSequences, sequenceLength, sharedRange=None):", "body": "numbers = []<EOL>if sharedRange:<EOL><INDENT>sharedStart, sharedEnd = sharedRange<EOL>sharedLength = sharedEnd - sharedStart<EOL>sharedNumbers = range(numSequences * sequenceLength,<EOL>numSequences * sequenceLength + sharedLength)<EOL><DEDENT>for i in xrange(numSequences):<EOL><INDENT>start = i * sequenceLength<EOL>newNumbers = np.array(range(start, start + sequenceLength), np.uint32)<EOL>self._random.shuffle(newNumbers)<EOL>newNumbers = list(newNumbers)<EOL>if sharedRange is not None:<EOL><INDENT>newNumbers[sharedStart:sharedEnd] = sharedNumbers<EOL><DEDENT>numbers += newNumbers<EOL>numbers.append(None)<EOL><DEDENT>return numbers<EOL>", "docstring": "@param numSequences   (int)   Number of sequences to return,\n                              separated by None\n@param sequenceLength (int)   Length of each sequence\n@param sharedRange    (tuple) (start index, end index) indicating range of\n                              shared subsequence in each sequence\n                              (None if no shared subsequences)\n@return (list) Numbers representing sequences", "id": "f17659:c0:m4"}
{"signature": "def addValues(self, values):", "body": "for v in values:<EOL><INDENT>self.addValue(v)<EOL><DEDENT>", "docstring": "Add values to the field", "id": "f17660:c1:m2"}
{"signature": "def saveRecords(self, path='<STR_LIT>'):", "body": "numRecords = self.fields[<NUM_LIT:0>].numRecords<EOL>assert (all(field.numRecords==numRecords for field in self.fields))<EOL>import csv<EOL>with open(path+'<STR_LIT>', '<STR_LIT:wb>') as f:<EOL><INDENT>writer = csv.writer(f)<EOL>writer.writerow(self.getAllFieldNames())<EOL>writer.writerow(self.getAllDataTypes())<EOL>writer.writerow(self.getAllFlags())<EOL>writer.writerows(self.getAllRecords())<EOL><DEDENT>if self.verbosity><NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>', numRecords,'<STR_LIT>',path,'<STR_LIT>')<EOL><DEDENT>", "docstring": "Export all the records into a csv file in numenta format.\n\n        Example header format:\n        fieldName1    fieldName2    fieldName3\n        date          string        float\n        T             S\n\n        Parameters:\n        --------------------------------------------------------------------\n        path:      Relative path of the file to which the records are to be exported", "id": "f17660:c0:m25"}
{"signature": "def addValuesToField(self, i, numValues):", "body": "assert(len(self.fields)>i)<EOL>values = [self.addValueToField(i) for n in range(numValues)]<EOL>return values<EOL>", "docstring": "Add values to the field i.", "id": "f17660:c0:m14"}
{"signature": "def getAllDataTypes(self):", "body": "dataTypes = [field.dataType for field in self.fields]<EOL>return dataTypes<EOL>", "docstring": "Returns data types for all fields", "id": "f17660:c0:m23"}
{"signature": "def encodeRecord(self, record, toBeAdded=True):", "body": "encoding=[self.fields[i].encodeValue(record[i], toBeAdded) for i inrange(len(self.fields))]<EOL>return encoding<EOL>", "docstring": "Encode a record as a sparse distributed representation\n        Parameters:\n        --------------------------------------------------------------------\n        record:        Record to be encoded\n        toBeAdded:     Whether the encodings corresponding to the record are added to\n                       the corresponding fields", "id": "f17660:c0:m11"}
{"signature": "def addMultipleFields(self, fieldsInfo):", "body": "assert all(x in field for x in ['<STR_LIT:name>', '<STR_LIT>', '<STR_LIT>'] for fieldin fieldsInfo)<EOL>for spec in fieldsInfo:<EOL><INDENT>self.addField(spec.pop('<STR_LIT:name>'), spec.pop('<STR_LIT>'), spec.pop('<STR_LIT>'))<EOL><DEDENT>", "docstring": "Add multiple fields to the dataset.\n        Parameters:\n        -------------------------------------------------------------------\n        fieldsInfo:       A list of dictionaries, containing a field name, specs for\n                          the data classes and encoder params for the corresponding\n                          field.", "id": "f17660:c0:m4"}
{"signature": "def addValueToField(self, i, value=None):", "body": "assert(len(self.fields)>i)<EOL>if value is None:<EOL><INDENT>value = self.fields[i].dataClass.getNext()<EOL>self.fields[i].addValue(value)<EOL>return value<EOL><DEDENT>else: self.fields[i].addValue(value)<EOL>", "docstring": "Add 'value' to the field i.\n        Parameters:\n        --------------------------------------------------------------------\n        value:       value to be added\n        i:           value is added to field i", "id": "f17660:c0:m13"}
{"signature": "def addField(self, name, fieldParams, encoderParams):", "body": "assert fieldParams is not None and'<STR_LIT:type>' in fieldParams<EOL>dataClassName = fieldParams.pop('<STR_LIT:type>')<EOL>try:<EOL><INDENT>dataClass=eval(dataClassName)(fieldParams)<EOL><DEDENT>except TypeError as e:<EOL><INDENT>print((\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\" % (dataClass, fieldParams)))<EOL>raise<EOL><DEDENT>encoderParams['<STR_LIT>']=dataClass<EOL>encoderParams['<STR_LIT>']=dataClassName<EOL>fieldIndex = self.defineField(name, encoderParams)<EOL>", "docstring": "Add a single field to the dataset.\n        Parameters:\n        -------------------------------------------------------------------\n        name:             The user-specified name of the field\n        fieldSpec:        A list of one or more dictionaries specifying parameters\n                          to be used for dataClass initialization. Each dict must\n                          contain the key 'type' that specifies a distribution for\n                          the values in this field\n        encoderParams:    Parameters for the field encoder", "id": "f17660:c0:m3"}
{"signature": "def addValue(self, value):", "body": "self.values.append(value)<EOL>self.numRecords+=<NUM_LIT:1><EOL>", "docstring": "Add value to the field", "id": "f17660:c1:m3"}
{"signature": "def getTotaln(self):", "body": "n = sum([field.n for field in self.fields])<EOL>return n<EOL>", "docstring": "Returns the cumulative n for all the fields in the dataset", "id": "f17660:c0:m17"}
{"signature": "def __init__(self, name, encoderSpec):", "body": "self.name=name<EOL>self.n, self.w = (<NUM_LIT:100>, <NUM_LIT:15>)<EOL>self.encoderType,self.dataType,self.dataClassName  = (None, None, None)<EOL>self.flag='<STR_LIT>'<EOL>self.isPredictedField=False<EOL>if encoderSpec is not None:<EOL><INDENT>if '<STR_LIT:n>' in encoderSpec: self.n = encoderSpec.pop('<STR_LIT:n>')<EOL>if '<STR_LIT:w>' in encoderSpec: self.w = encoderSpec.pop('<STR_LIT:w>')<EOL>if '<STR_LIT>' in encoderSpec: self.flag = encoderSpec.pop('<STR_LIT>')<EOL>if '<STR_LIT>' in encoderSpec: self.isPredictedField= encoderSpec.pop('<STR_LIT>')<EOL>if '<STR_LIT>' in encoderSpec: self.dataClass= encoderSpec.pop('<STR_LIT>')<EOL>if '<STR_LIT>' in encoderSpec: self.dataClassName= encoderSpec.pop('<STR_LIT>')<EOL>if '<STR_LIT>' in encoderSpec: self.dataType = encoderSpec.pop('<STR_LIT>')<EOL>if '<STR_LIT>' in encoderSpec: self.encoderType= encoderSpec.pop('<STR_LIT>')<EOL><DEDENT>if self.dataType is None and self.encoderType is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>assert(self.dataType is not None or self.encoderType is not None)<EOL>if self.dataType is None or self.encoderType is None:<EOL><INDENT>self._setTypes(encoderSpec)<EOL><DEDENT>self._initializeEncoders(encoderSpec)<EOL>self.encodings=[]<EOL>self.values=[]<EOL>self.numRecords=<NUM_LIT:0><EOL>self.numEncodings=<NUM_LIT:0><EOL>", "docstring": "Initialize a field with various parameters such as n, w, flag, dataType,\n        encoderType, and tag predicted field.", "id": "f17660:c1:m0"}
{"signature": "def defineField(self, name, encoderParams=None):", "body": "self.fields.append(_field(name, encoderParams))<EOL>return len(self.fields)-<NUM_LIT:1><EOL>", "docstring": "Initialize field using relevant encoder parameters.\n        Parameters:\n        -------------------------------------------------------------------\n        name:                 Field name\n        encoderParams:        Parameters for the encoder.\n\n        Returns the index of the field", "id": "f17660:c0:m5"}
{"signature": "def encodeAllRecords(self, records=None, toBeAdded=True):", "body": "if records is None:<EOL><INDENT>records = self.getAllRecords()<EOL><DEDENT>if self.verbosity><NUM_LIT:0>: print('<STR_LIT>', len(records), '<STR_LIT>')<EOL>encodings = [self.encodeRecord(record, toBeAdded) for record in records]<EOL>return encodings<EOL>", "docstring": "Encodes a list of records.\n        Parameters:\n        --------------------------------------------------------------------\n        records:      One or more records. (i,j)th element of this 2D array\n                      specifies the value at field j of record i.\n                      If unspecified, records previously generated and stored are\n                      used.\n        toBeAdded:    Whether the encodings corresponding to the record are added to\n                      the corresponding fields", "id": "f17660:c0:m12"}
{"signature": "def getAllFieldNames(self):", "body": "names = [field.name for field in self.fields]<EOL>return names<EOL>", "docstring": "Returns all field names", "id": "f17660:c0:m21"}
{"signature": "def copy(reader, writer, start, stop, insertLocation=None, tsCol=None):", "body": "assert stop >= start<EOL>startRows = []<EOL>copyRows = []<EOL>ts = None<EOL>inc = None<EOL>if tsCol is None:<EOL><INDENT>tsCol = reader.getTimestampFieldIdx()<EOL><DEDENT>for i, row in enumerate(reader):<EOL><INDENT>if ts is None:<EOL><INDENT>ts = row[tsCol]<EOL><DEDENT>elif inc is None:<EOL><INDENT>inc = row[tsCol] - ts<EOL><DEDENT>if i >= start and i <= stop:<EOL><INDENT>copyRows.append(row)<EOL><DEDENT>startRows.append(row)<EOL><DEDENT>if insertLocation is None:<EOL><INDENT>insertLocation = stop + <NUM_LIT:1><EOL><DEDENT>startRows[insertLocation:insertLocation] = copyRows<EOL>for row in startRows:<EOL><INDENT>row[tsCol] = ts<EOL>writer.appendRecord(row)<EOL>ts += inc<EOL><DEDENT>", "docstring": "Copies a range of values to a new location in the data set.\n\n    Args:\n      reader: A FileRecordStream object with input data.\n      writer: A FileRecordStream object to write output data to.\n      start: The first row in the range to copy.\n      stop: The last row in the range to copy.\n      insertLocation: The location to insert the copied range. If not specified,\n          the range is inserted immediately following itself.", "id": "f17661:m2"}
{"signature": "def sample(reader, writer, n, start=None, stop=None, tsCol=None,<EOL>writeSampleOnly=True):", "body": "rows = list(reader)<EOL>if tsCol is not None:<EOL><INDENT>ts = rows[<NUM_LIT:0>][tsCol]<EOL>inc = rows[<NUM_LIT:1>][tsCol] - ts<EOL><DEDENT>if start is None:<EOL><INDENT>start = <NUM_LIT:0><EOL><DEDENT>if stop is None:<EOL><INDENT>stop = len(rows) - <NUM_LIT:1><EOL><DEDENT>initialN = stop - start + <NUM_LIT:1><EOL>numDeletes =  initialN - n<EOL>for i in range(numDeletes):<EOL><INDENT>delIndex = random.randint(start, stop - i)<EOL>del rows[delIndex]<EOL><DEDENT>if writeSampleOnly:<EOL><INDENT>rows = rows[start:start + n]<EOL><DEDENT>if tsCol is not None:<EOL><INDENT>ts = rows[<NUM_LIT:0>][tsCol]<EOL><DEDENT>for row in rows:<EOL><INDENT>if tsCol is not None:<EOL><INDENT>row[tsCol] = ts<EOL>ts += inc<EOL><DEDENT>writer.appendRecord(row)<EOL><DEDENT>", "docstring": "Samples n rows.\n\n    Args:\n      reader: A FileRecordStream object with input data.\n      writer: A FileRecordStream object to write output data to.\n      n: The number of elements to sample.\n      start: The first row in the range to sample from.\n      stop: The last row in the range to sample from.\n      tsCol: If specified, the timestamp column to update.\n      writeSampleOnly: If False, the rows before start are written before the\n          sample and the rows after stop are written after the sample.", "id": "f17661:m3"}
{"signature": "def __init__(self):", "body": "", "docstring": "A distribution is a set of values with certain statistical properties\n\n        Methods/properties that must be implemented by subclasses\n        - getNext() -- Returns the next value for the distribution\n        - getData(n) -- Returns n values for the distribution\n        - getDescription() -- Returns a dict of parameters pertinent to the\n          distribution, if any as well as state variables.", "id": "f17663:c0:m0"}
{"signature": "def getData(self, n):", "body": "records = [self.getNext() for x in range(n)]<EOL>return records<EOL>", "docstring": "Returns the next n values for the distribution as a list.", "id": "f17663:c0:m2"}
{"signature": "def _cacheSequenceInfoType(self):", "body": "hasReset = self.resetFieldName is not None<EOL>hasSequenceId = self.sequenceIdFieldName is not None<EOL>if hasReset and not hasSequenceId:<EOL><INDENT>self._sequenceInfoType = self.SEQUENCEINFO_RESET_ONLY<EOL>self._prevSequenceId = <NUM_LIT:0><EOL><DEDENT>elif not hasReset and hasSequenceId:<EOL><INDENT>self._sequenceInfoType = self.SEQUENCEINFO_SEQUENCEID_ONLY<EOL>self._prevSequenceId = None<EOL><DEDENT>elif hasReset and hasSequenceId:<EOL><INDENT>self._sequenceInfoType = self.SEQUENCEINFO_BOTH<EOL><DEDENT>else:<EOL><INDENT>self._sequenceInfoType = self.SEQUENCEINFO_NONE<EOL><DEDENT>", "docstring": "Figure out whether reset, sequenceId,\n        both or neither are present in the data.\n        Compute once instead of every time.\n\n        Taken from filesource.py", "id": "f17664:c0:m1"}
{"signature": "def shift(self, modelResult):", "body": "inferencesToWrite = {}<EOL>if self._inferenceBuffer is None:<EOL><INDENT>maxDelay = InferenceElement.getMaxDelay(modelResult.inferences)<EOL>self._inferenceBuffer = collections.deque(maxlen=maxDelay + <NUM_LIT:1>)<EOL><DEDENT>self._inferenceBuffer.appendleft(copy.deepcopy(modelResult.inferences))<EOL>for inferenceElement, inference in modelResult.inferences.iteritems():<EOL><INDENT>if isinstance(inference, dict):<EOL><INDENT>inferencesToWrite[inferenceElement] = {}<EOL>for key, _ in inference.iteritems():<EOL><INDENT>delay = InferenceElement.getTemporalDelay(inferenceElement, key)<EOL>if len(self._inferenceBuffer) > delay:<EOL><INDENT>prevInference = self._inferenceBuffer[delay][inferenceElement][key]<EOL>inferencesToWrite[inferenceElement][key] = prevInference<EOL><DEDENT>else:<EOL><INDENT>inferencesToWrite[inferenceElement][key] = None<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>delay = InferenceElement.getTemporalDelay(inferenceElement)<EOL>if len(self._inferenceBuffer) > delay:<EOL><INDENT>inferencesToWrite[inferenceElement] = (<EOL>self._inferenceBuffer[delay][inferenceElement])<EOL><DEDENT>else:<EOL><INDENT>if type(inference) in (list, tuple):<EOL><INDENT>inferencesToWrite[inferenceElement] = [None] * len(inference)<EOL><DEDENT>else:<EOL><INDENT>inferencesToWrite[inferenceElement] = None<EOL><DEDENT><DEDENT><DEDENT><DEDENT>shiftedResult = ModelResult(rawInput=modelResult.rawInput,<EOL>sensorInput=modelResult.sensorInput,<EOL>inferences=inferencesToWrite,<EOL>metrics=modelResult.metrics,<EOL>predictedFieldIdx=modelResult.predictedFieldIdx,<EOL>predictedFieldName=modelResult.predictedFieldName)<EOL>return shiftedResult<EOL>", "docstring": "Shift the model result and return the new instance.\n\n        Queues up the T(i+1) prediction value and emits a T(i)\n        input/prediction pair, if possible. E.g., if the previous T(i-1)\n        iteration was learn-only, then we would not have a T(i) prediction in our\n        FIFO and would not be able to emit a meaningful input/prediction pair.\n\n        :param modelResult: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult`\n                            instance to shift.\n        :return: A :class:`~.nupic.frameworks.opf.opf_utils.ModelResult` instance that\n                 has been shifted", "id": "f17665:c0:m1"}
{"signature": "def loadJsonValueFromFile(inputFilePath):", "body": "with open(inputFilePath) as fileObj:<EOL><INDENT>value = json.load(fileObj)<EOL><DEDENT>return value<EOL>", "docstring": "Loads a json value from a file and converts it to the corresponding python\n    object.\n\n    inputFilePath:\n                    Path of the json file;\n\n    Returns:\n                    python value that represents the loaded json value", "id": "f17666:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def write(self, proto):<DEDENT>", "body": "pass<EOL>", "docstring": "Write obj instance to Cap'n Proto object\n\n.. warning: This is an abstract method.  Per abc protocol, attempts to\n            subclass without overriding will fail.\n\n:param proto: Cap'n Proto obj", "id": "f17667:c0:m2"}
{"signature": "def writeToFile(self, f, packed=True):", "body": "<EOL>schema = self.getSchema()<EOL>proto = schema.new_message()<EOL>self.write(proto)<EOL>if packed:<EOL><INDENT>proto.write_packed(f)<EOL><DEDENT>else:<EOL><INDENT>proto.write(f)<EOL><DEDENT>", "docstring": "Write serialized object to file.\n\n:param f: output file\n:param packed: If true, will pack contents.", "id": "f17667:c0:m4"}
{"signature": "def mostLikely(self, pred):", "body": "if len(pred) == <NUM_LIT:1>:<EOL><INDENT>return list(pred.keys())[<NUM_LIT:0>]<EOL><DEDENT>mostLikelyOutcome = None<EOL>maxProbability = <NUM_LIT:0><EOL>for prediction, probability in list(pred.items()):<EOL><INDENT>if probability > maxProbability:<EOL><INDENT>mostLikelyOutcome = prediction<EOL>maxProbability = probability<EOL><DEDENT><DEDENT>return mostLikelyOutcome<EOL>", "docstring": "Helper function to return a scalar value representing the most\n            likely outcome given a probability distribution", "id": "f17676:c12:m7"}
{"signature": "@classmethod<EOL><INDENT>def getInferenceTypeFromLabel(cls, label):<DEDENT>", "body": "infType, _, _= label.partition(cls._LABEL_SEPARATOR)<EOL>if not InferenceType.validate(infType):<EOL><INDENT>return None<EOL><DEDENT>return infType<EOL>", "docstring": "Extracts the PredictionKind (temporal vs. nontemporal) from the given\nmetric label.\n\n:param label: (string) for a metric spec generated by \n       :meth:`getMetricLabel`\n\n:returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)", "id": "f17676:c0:m3"}
{"signature": "def addInstance(self, groundTruth, prediction, record = None, result = None):", "body": "self.value = self.avg(prediction)<EOL>", "docstring": "Compute and store metric value", "id": "f17676:c10:m1"}
{"signature": "def __init__(self, windowSize = None):", "body": "self._windowSize = windowSize<EOL>self._countDict = dict()<EOL>self._history = deque([])<EOL>", "docstring": ":param windowSize:             The number of values that are used to compute the\n                        moving average", "id": "f17676:c1:m0"}
{"signature": "def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result = None):", "body": "<EOL>if self.disabled:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if historyBuffer is not None:<EOL><INDENT>historyBuffer.append((groundTruth, prediction[<NUM_LIT:0>]))<EOL>if len(historyBuffer) > self.spec.params[\"<STR_LIT>\"] :<EOL><INDENT>historyBuffer.popleft()<EOL><DEDENT><DEDENT>return <NUM_LIT:0><EOL>", "docstring": "Accumulate history of groundTruth and \"prediction\" values.\n\nFor this metric, groundTruth is the actual category and \"prediction\" is a\ndict containing one top-level item with a key of 0 (meaning this is the\n0-step classificaton) and a value which is another dict, which contains the\nprobability for each category as output from the classifier. For example,\nthis is what \"prediction\" would be if the classifier said that category 0\nhad a 0.6 probability and category 1 had a 0.4 probability: {0:0.6, 1: 0.4}", "id": "f17676:c18:m0"}
{"signature": "def __init__(self, metricSpec):", "body": "<EOL>self.id = None<EOL>self.verbosity = <NUM_LIT:0><EOL>self.window = -<NUM_LIT:1><EOL>self.history = None<EOL>self.accumulatedError = <NUM_LIT:0><EOL>self.aggregateError = None<EOL>self.steps = <NUM_LIT:0><EOL>self.spec = metricSpec<EOL>self.disabled = False<EOL>self._predictionSteps = [<NUM_LIT:0>]<EOL>self._groundTruthHistory = deque([])<EOL>self._subErrorMetrics = None<EOL>self._maxRecords = None<EOL>if metricSpec is not None and metricSpec.params is not None:<EOL><INDENT>self.id = metricSpec.params.get('<STR_LIT:id>', None)<EOL>self._predictionSteps = metricSpec.params.get('<STR_LIT>', [<NUM_LIT:0>])<EOL>if not hasattr(self._predictionSteps, '<STR_LIT>'):<EOL><INDENT>self._predictionSteps = [self._predictionSteps]<EOL><DEDENT>self.verbosity = metricSpec.params.get('<STR_LIT>', <NUM_LIT:0>)<EOL>self._maxRecords = metricSpec.params.get('<STR_LIT>', None)<EOL>if '<STR_LIT>' in metricSpec.params:<EOL><INDENT>assert metricSpec.params['<STR_LIT>'] >= <NUM_LIT:1><EOL>self.history = deque([])<EOL>self.window = metricSpec.params['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in metricSpec.params:<EOL><INDENT>self._subErrorMetrics = []<EOL>for step in self._predictionSteps:<EOL><INDENT>subSpec = copy.deepcopy(metricSpec)<EOL>subSpec.params.pop('<STR_LIT>', None)<EOL>subSpec.params.pop('<STR_LIT>')<EOL>subSpec.metric = metricSpec.params['<STR_LIT>']<EOL>self._subErrorMetrics.append(getModule(subSpec))<EOL><DEDENT><DEDENT><DEDENT>", "docstring": "Initialize this metric\n\n        If the params contains the key 'errorMetric', then that is the name of\n        another metric to which we will pass a modified groundTruth and prediction\n        to from our addInstance() method. For example, we may compute a moving mean\n        on the groundTruth and then pass that to the AbsoluteAveError metric", "id": "f17676:c3:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def getMetric(self):<DEDENT>", "body": "", "docstring": "``stats`` is expected to contain further information relevant to the given \nmetric, for example the number of timesteps represented in the current \nmeasurement. All stats are implementation defined, and ``stats`` can be \n``None``.\n\n:returns: (dict) representing data from the metric\n   ::\n\n       {value : <current measurement>, \"stats\" : {<stat> : <value> ...}}", "id": "f17676:c2:m2"}
{"signature": "def aggregate(self, accumulatedError, historyBuffer, steps):", "body": "", "docstring": "Updates the final aggregated score error given the prediction and the ground \ntruth.\n\n:param accumulatedError: The total accumulated score from the previous\n       predictions (possibly over some finite window)\n\n:param historyBuffer: A buffer of the last <self.window> ground truth values\n       that have been observed. If ``historyBuffer`` = None,  it means that \n       no history is being kept.\n\n:param steps: (int) The total number of (groundTruth, prediction) pairs that \n       have been passed to the metric. This does not include pairs where \n       ``groundTruth = SENTINEL_VALUE_FOR_MISSING_DATA``\n\n:returns: The new aggregate (final) error measure.", "id": "f17676:c3:m1"}
{"signature": "def accumulate(self, groundTruth, prediction, accumulatedError, historyBuffer, result):", "body": "", "docstring": "Updates the accumulated error given the prediction and the\nground truth.\n\n:param groundTruth: Actual value that is observed for the current timestep\n\n:param prediction: Value predicted by the network for the given timestep\n\n:param accumulatedError: The total accumulated score from the previous\n       predictions (possibly over some finite window)\n\n:param historyBuffer: A buffer of the last <self.window> ground truth values\n       that have been observed.\n\n       If historyBuffer = None,  it means that no history is being kept.\n\n:param result: An ModelResult class (see opf_utils.py), used for advanced\n       metric calculation (e.g., MetricNegativeLogLikelihood)\n\n:returns: The new accumulated error. That is:\n\n    .. code-block:: python\n\n       self.accumulatedError = self.accumulate(\n         groundTruth, predictions, accumulatedError\n       )\n\n    ``historyBuffer`` should also be updated in this method.\n    ``self.spec.params[\"window\"]`` indicates the maximum size of the window.", "id": "f17676:c3:m0"}
{"signature": "def getMetric(self):", "body": "return {\"<STR_LIT:value>\": self.value}<EOL>", "docstring": "Return the metric value", "id": "f17676:c10:m2"}
{"signature": "@abstractmethod<EOL><INDENT>def addInstance(self, groundTruth, prediction, record = None, result = None):<DEDENT>", "body": "", "docstring": "Add one instance consisting of ground truth and a prediction.\n\n:param groundTruth:\n  The actual measured value at the current timestep\n\n:param prediction:\n  The value predicted by the network at the current timestep\n\n:param record: the raw input record as fed to \n       :meth:`~nupic.frameworks.opf.model.Model.run` by the user. The \n       typical usage is to feed a record to that method and get a \n       :class:`~nupic.frameworks.opf.opf_utils.ModelResult`. Then you pass \n       :class:`~nupic.frameworks.opf.opf_utils.ModelResult`.rawInput into \n       this function as the record parameter.\n\n:param result: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`) the\n       result of running a row of data through an OPF model\n\n:returns:\n    The average error as computed over the metric's window size", "id": "f17676:c2:m1"}
{"signature": "def htmPredictionModelControlDisableSPLearningCb(htmPredictionModel):", "body": "assert isinstance(htmPredictionModel, HTMPredictionModel)<EOL>htmPredictionModel._getSPRegion().setParameter('<STR_LIT>', False)<EOL>return<EOL>", "docstring": "Disables learning in the HTMPredictionModel's Spatial Pooler, while\n    retaining the ability to re-enable SP learning in the future.\n\n    See also: htmPredictionModelControlEnableSPLearningCb.\n    See also: model_callbacks.modelControlFinishLearningCb.\n\n    htmPredictionModel:  pointer to a HTMPredictionModel instance\n\n    Returns: nothing", "id": "f17677:m1"}
{"signature": "def __init__(self, filePath):", "body": "self.__filePath = filePath<EOL>return<EOL>", "docstring": "filePath: path of file where TM __init__ args are to be saved", "id": "f17677:c1:m0"}
{"signature": "def htmPredictionModelControlDisableTPLearningCb(htmPredictionModel):", "body": "assert isinstance(htmPredictionModel, HTMPredictionModel)<EOL>htmPredictionModel._getTPRegion().setParameter('<STR_LIT>', False)<EOL>return<EOL>", "docstring": "Disables learning in the HTMPredictionModel's Temporal Pooler, while\n    retaining the ability to re-enable TM learning in the future.\n\n    See also: htmPredictionModelControlEnableTPLearningCb.\n    See also: model_callbacks.modelControlFinishLearningCb.\n\n    htmPredictionModel:  pointer to a HTMPredictionModel instance\n\n    Returns: nothing", "id": "f17677:m3"}
{"signature": "def _isCheckpointDir(checkpointDir):", "body": "lastSegment = os.path.split(checkpointDir)[<NUM_LIT:1>]<EOL>if lastSegment[<NUM_LIT:0>] == '<STR_LIT:.>':<EOL><INDENT>return False<EOL><DEDENT>if not checkpointDir.endswith(g_defaultCheckpointExtension):<EOL><INDENT>return False<EOL><DEDENT>if not os.path.isdir(checkpointDir):<EOL><INDENT>return False<EOL><DEDENT>return True<EOL>", "docstring": "Return true iff checkpointDir appears to be a checkpoint directory.", "id": "f17678:m10"}
{"signature": "def _runExperimentImpl(options, model=None):", "body": "json_helpers.validate(options.privateOptions,<EOL>schemaDict=g_parsedPrivateCommandLineOptionsSchema)<EOL>experimentDir = options.experimentDir<EOL>descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(<EOL>experimentDir)<EOL>expIface = helpers.getExperimentDescriptionInterfaceFromModule(<EOL>descriptionPyModule)<EOL>if options.privateOptions['<STR_LIT>']:<EOL><INDENT>_printAvailableCheckpoints(experimentDir)<EOL>return None<EOL><DEDENT>experimentTasks = expIface.getModelControl().get('<STR_LIT>', [])<EOL>if (len(experimentTasks) == <NUM_LIT:0> and<EOL>expIface.getModelControl()['<STR_LIT>'] == OpfEnvironment.Nupic):<EOL><INDENT>expIface.convertNupicEnvToOPF()<EOL>experimentTasks = expIface.getModelControl().get('<STR_LIT>', [])<EOL><DEDENT>expIface.normalizeStreamSources()<EOL>newSerialization = options.privateOptions['<STR_LIT>']<EOL>if options.privateOptions['<STR_LIT>']:<EOL><INDENT>print(\"<STR_LIT>\")<EOL>for label in [t['<STR_LIT>'] for t in experimentTasks]:<EOL><INDENT>print(\"<STR_LIT:\\t>\", label)<EOL><DEDENT>return None<EOL><DEDENT>if options.privateOptions['<STR_LIT>']:<EOL><INDENT>assert model is None<EOL>checkpointName = options.privateOptions['<STR_LIT>']<EOL>model = ModelFactory.loadFromCheckpoint(<EOL>savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),<EOL>newSerialization=newSerialization)<EOL><DEDENT>elif model is not None:<EOL><INDENT>print(\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>modelDescription = expIface.getModelDescription()<EOL>model = ModelFactory.create(modelDescription)<EOL><DEDENT>if options.privateOptions['<STR_LIT>']:<EOL><INDENT>checkpointName = options.privateOptions['<STR_LIT>']<EOL>_saveModel(model=model,<EOL>experimentDir=experimentDir,<EOL>checkpointLabel=checkpointName,<EOL>newSerialization=newSerialization)<EOL>return model<EOL><DEDENT>taskIndexList = list(range(len(experimentTasks)))<EOL>customTaskExecutionLabelsList = options.privateOptions['<STR_LIT>']<EOL>if customTaskExecutionLabelsList:<EOL><INDENT>taskLabelsList = [t['<STR_LIT>'] for t in experimentTasks]<EOL>taskLabelsSet = set(taskLabelsList)<EOL>customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)<EOL>assert customTaskExecutionLabelsSet.issubset(taskLabelsSet),(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\") % (customTaskExecutionLabelsSet - taskLabelsSet,<EOL>customTaskExecutionLabelsList)<EOL>taskIndexList = [taskLabelsList.index(label) for label in<EOL>customTaskExecutionLabelsList]<EOL>print(\"<STR_LIT>\" % [taskLabelsList[i] for<EOL>i in taskIndexList])<EOL><DEDENT>for taskIndex in taskIndexList:<EOL><INDENT>task = experimentTasks[taskIndex]<EOL>taskRunner = _TaskRunner(model=model,<EOL>task=task,<EOL>cmdOptions=options)<EOL>taskRunner.run()<EOL>del taskRunner<EOL>if options.privateOptions['<STR_LIT>']:<EOL><INDENT>_saveModel(model=model,<EOL>experimentDir=experimentDir,<EOL>checkpointLabel=task['<STR_LIT>'],<EOL>newSerialization=newSerialization)<EOL><DEDENT><DEDENT>return model<EOL>", "docstring": "Creates and runs the experiment\n\n    Args:\n      options: namedtuple ParseCommandLineOptionsResult\n      model: For testing: may pass in an existing OPF Model instance\n          to use instead of creating a new one.\n\n    Returns: reference to OPFExperiment instance that was constructed (this\n        is provided to aid with debugging) or None, if none was\n        created.", "id": "f17678:m5"}
{"signature": "def run(self):", "body": "self.__logger.debug(\"<STR_LIT>\", self.__task['<STR_LIT>'])<EOL>if self.__cmdOptions.privateOptions['<STR_LIT>']:<EOL><INDENT>numIters = <NUM_LIT:10><EOL><DEDENT>else:<EOL><INDENT>numIters = self.__task['<STR_LIT>']<EOL><DEDENT>if numIters >= <NUM_LIT:0>:<EOL><INDENT>iterTracker = iter(range(numIters))<EOL><DEDENT>else:<EOL><INDENT>iterTracker = iter(itertools.count())<EOL><DEDENT>periodic = PeriodicActivityMgr(<EOL>requestedActivities=self._createPeriodicActivities())<EOL>self.__model.resetSequenceStates()<EOL>self.__taskDriver.setup()<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>next(iterTracker)<EOL><DEDENT>except StopIteration:<EOL><INDENT>break<EOL><DEDENT>try:<EOL><INDENT>inputRecord = next(self.__datasetReader)<EOL><DEDENT>except StopIteration:<EOL><INDENT>break<EOL><DEDENT>result = self.__taskDriver.handleInputRecord(inputRecord=inputRecord)<EOL>if InferenceElement.encodings in result.inferences:<EOL><INDENT>result.inferences.pop(InferenceElement.encodings)<EOL><DEDENT>self.__predictionLogger.writeRecord(result)<EOL>periodic.tick()<EOL><DEDENT>self._getAndEmitExperimentMetrics(final=True)<EOL>self.__taskDriver.finalize()<EOL>self.__model.resetSequenceStates()<EOL>", "docstring": "Runs a single experiment task", "id": "f17678:c0:m2"}
{"signature": "def _reportCommandLineUsageErrorAndExit(parser, message):", "body": "print(parser.get_usage())<EOL>print(message)<EOL>sys.exit(<NUM_LIT:1>)<EOL>", "docstring": "Report usage error and exit program with error indication.", "id": "f17678:m4"}
{"signature": "def tick(self):", "body": "<EOL>for act in self.__activities:<EOL><INDENT>if not act.iteratorHolder[<NUM_LIT:0>]:<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>next(act.iteratorHolder[<NUM_LIT:0>])<EOL><DEDENT>except StopIteration:<EOL><INDENT>act.cb()<EOL>if act.repeating:<EOL><INDENT>act.iteratorHolder[<NUM_LIT:0>] = iter(range(act.period-<NUM_LIT:1>))<EOL><DEDENT>else:<EOL><INDENT>act.iteratorHolder[<NUM_LIT:0>] = None<EOL><DEDENT><DEDENT><DEDENT>return True<EOL>", "docstring": "Activity tick handler; services all activities\n\n        Returns:\n          True if controlling iterator says it's okay to keep going;\n          False to stop", "id": "f17678:c1:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def finishLearning(self):<DEDENT>", "body": "", "docstring": "Place the model in a permanent \"finished learning\" mode.\n        In such a mode the model will not be able to learn from subsequent input\n        records.\n\n        .. note:: Upon completion of this command, learning may not be resumed on\n           the given instance of the model (e.g., the implementation may optimize\n           itself by pruning data structures that are necessary for learning).", "id": "f17679:c0:m2"}
{"signature": "def __init__(self, inferenceType=None, proto=None):", "body": "assert inferenceType is not None and proto is None or (<EOL>inferenceType is None and proto is not None), (<EOL>\"<STR_LIT>\")<EOL>if proto is None:<EOL><INDENT>self._numPredictions = <NUM_LIT:0><EOL>self.__inferenceType =  inferenceType<EOL>self.__learningEnabled = True<EOL>self.__inferenceEnabled = True<EOL>self.__inferenceArgs = {}<EOL><DEDENT>else:<EOL><INDENT>self._numPredictions = proto.numPredictions<EOL>inferenceType = str(proto.inferenceType)<EOL>inferenceType = inferenceType[:<NUM_LIT:1>].upper() + inferenceType[<NUM_LIT:1>:]<EOL>self.__inferenceType = InferenceType.getValue(inferenceType)<EOL>self.__learningEnabled = proto.learningEnabled<EOL>self.__inferenceEnabled = proto.inferenceEnabled<EOL>self.__inferenceArgs = json.loads(proto.inferenceArgs)<EOL><DEDENT>", "docstring": ":param opf_utils.InferenceType inferenceType: mutually-exclusive with proto\n                                              arg\n:param proto: capnp ModelProto message reader for deserializing;\n              mutually-exclusive with the other constructor args.", "id": "f17679:c0:m0"}
{"signature": "@staticmethod<EOL><INDENT>def _getModelCheckpointFilePath(checkpointDir):<DEDENT>", "body": "path = os.path.join(checkpointDir, \"<STR_LIT>\")<EOL>path = os.path.abspath(path)<EOL>return path<EOL>", "docstring": "Return the absolute path of the model's checkpoint file.\n\n        :param checkpointDir: (string)\n               Directory of where the experiment is to be or was saved\n        :returns: (string) An absolute path.", "id": "f17679:c0:m17"}
{"signature": "def run(self, inputRecord):", "body": "<EOL>predictionNumber = self._numPredictions<EOL>self._numPredictions += <NUM_LIT:1><EOL>result = opf_utils.ModelResult(predictionNumber=predictionNumber,<EOL>rawInput=inputRecord)<EOL>return result<EOL>", "docstring": "Run one iteration of this model.\n\n:param inputRecord: (object)\n       A record object formatted according to\n       :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or\n       :meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`\n       result format.\n:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)\n         An ModelResult namedtuple. The contents of ModelResult.inferences\n         depends on the the specific inference type of this model, which\n         can be queried by :meth:`.getInferenceType`.", "id": "f17679:c0:m1"}
{"signature": "@staticmethod<EOL><INDENT>def _getModelExtraDataDir(saveModelDir):<DEDENT>", "body": "path = os.path.join(saveModelDir, \"<STR_LIT>\")<EOL>path = os.path.abspath(path)<EOL>return path<EOL>", "docstring": "Return the absolute path to the directory where the model's own\n        \"extra data\" are stored (i.e., data that's too big for pickling).\n\n        :param saveModelDir: (string)\n               Directory of where the experiment is to be or was saved\n        :returns: (string) An absolute path.", "id": "f17679:c0:m28"}
{"signature": "@classmethod<EOL><INDENT>def load(cls, savedModelDir):<DEDENT>", "body": "logger = opf_utils.initLogger(cls)<EOL>logger.debug(\"<STR_LIT>\", savedModelDir)<EOL>modelPickleFilePath = Model._getModelPickleFilePath(savedModelDir)<EOL>with open(modelPickleFilePath, '<STR_LIT:rb>') as modelPickleFile:<EOL><INDENT>logger.debug(\"<STR_LIT>\")<EOL>model = pickle.load(modelPickleFile)<EOL>logger.debug(\"<STR_LIT>\")<EOL><DEDENT>model._deSerializeExtraData(<EOL>extraDataDir=Model._getModelExtraDataDir(savedModelDir))<EOL>logger.debug(\"<STR_LIT>\")<EOL>return model<EOL>", "docstring": "Load saved model.\n\n        :param savedModelDir: (string)\n               Directory of where the experiment is to be or was saved\n        :returns: (:class:`Model`) The loaded model instance", "id": "f17679:c0:m25"}
{"signature": "@staticmethod<EOL><INDENT>def _getModelPickleFilePath(saveModelDir):<DEDENT>", "body": "path = os.path.join(saveModelDir, \"<STR_LIT>\")<EOL>path = os.path.abspath(path)<EOL>return path<EOL>", "docstring": "Return the absolute path of the model's pickle file.\n\n        :param saveModelDir: (string)\n               Directory of where the experiment is to be or was saved\n        :returns: (string) An absolute path.", "id": "f17679:c0:m27"}
{"signature": "def write(self, proto):", "body": "raise NotImplementedError()<EOL>", "docstring": "Write state to proto object.\n\n        The type of proto is determined by :meth:`getSchema`.", "id": "f17679:c0:m21"}
{"signature": "@classmethod<EOL><INDENT>def readFromCheckpoint(cls, checkpointDir):<DEDENT>", "body": "checkpointPath = cls._getModelCheckpointFilePath(checkpointDir)<EOL>with open(checkpointPath, '<STR_LIT:r>') as f:<EOL><INDENT>proto = cls.getSchema().read(f,<EOL>traversal_limit_in_words=_TRAVERSAL_LIMIT_IN_WORDS)<EOL><DEDENT>model = cls.read(proto)<EOL>return model<EOL>", "docstring": "Deserializes model from checkpointDir using capnproto", "id": "f17679:c0:m19"}
{"signature": "@abstractmethod<EOL><INDENT>def getRuntimeStats(self):<DEDENT>", "body": "", "docstring": "Get runtime statistics specific to this model, i.e.\n        ``activeCellOverlapAvg``.\n\n        :returns: (dict) A {statistic names: stats} dictionary", "id": "f17679:c0:m6"}
{"signature": "def getInferenceType(self):", "body": "return self.__inferenceType<EOL>", "docstring": "Return the InferenceType of this model.\n        This is immutable.\n\n        :returns: :class:`~nupic.frameworks.opf.opf_utils.InferenceType`", "id": "f17679:c0:m8"}
{"signature": "def getInferenceArgs(self):", "body": "return self.__inferenceArgs<EOL>", "docstring": "Return the dict of arguments for the current inference mode.\n\n        :returns: (dict) The arguments of the inference mode", "id": "f17679:c0:m13"}
{"signature": "def disableLearning(self):", "body": "self.__learningEnabled = False<EOL>return<EOL>", "docstring": "Turn Learning off for the current model.", "id": "f17679:c0:m10"}
{"signature": "def _getLogger(self):", "body": "return self.__logger<EOL>", "docstring": "Get the logger for this object. This is a protected method that is used\n        by the Model to access the logger created by the subclass\n\n        return:\n          A logging.Logger object. Should not be None", "id": "f17680:c1:m31"}
{"signature": "def __setstate__(self, state):", "body": "self.__dict__.update(state)<EOL>self.__restoringFromState = True<EOL>self.__logger = initLogger(self)<EOL>if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self.__restoringFromV1 = True<EOL>self._hasSP = True<EOL>if self.__temporalNetInfo is not None:<EOL><INDENT>self._Model__inferenceType = InferenceType.TemporalNextStep<EOL>self._netInfo = self.__temporalNetInfo<EOL>self._hasTP = True<EOL><DEDENT>else:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\")<EOL><DEDENT>self._Model__inferenceArgs = {}<EOL>self._Model__learningEnabled = True<EOL>self._Model__inferenceEnabled = True<EOL>self.__dict__.pop(\"<STR_LIT>\", None)<EOL>self.__dict__.pop(\"<STR_LIT>\", None)<EOL>self.__dict__.pop(\"<STR_LIT>\", None)<EOL><DEDENT>if not hasattr(self, \"<STR_LIT>\"):<EOL><INDENT>self._hasSP = False<EOL>self._hasTP = False<EOL>if self.__encoderNetInfo is not None:<EOL><INDENT>self._netInfo = self.__encoderNetInfo<EOL><DEDENT>elif self.__nonTemporalNetInfo is not None:<EOL><INDENT>self._netInfo = self.__nonTemporalNetInfo<EOL>self._hasSP = True<EOL><DEDENT>else:<EOL><INDENT>self._netInfo = self.__temporalNetInfo<EOL>self._hasSP = True<EOL>self._hasTP = True<EOL><DEDENT>self.__dict__.pop(\"<STR_LIT>\", None)<EOL>self.__dict__.pop(\"<STR_LIT>\", None)<EOL>self.__dict__.pop(\"<STR_LIT>\", None)<EOL><DEDENT>self._classifierInputEncoder = None<EOL>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._minLikelihoodThreshold = DEFAULT_LIKELIHOOD_THRESHOLD<EOL><DEDENT>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._maxPredictionsPerStep = DEFAULT_MAX_PREDICTIONS_PER_STEP<EOL><DEDENT>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._hasCL = (self._getClassifierRegion() is not None)<EOL><DEDENT>self.__logger.debug(\"<STR_LIT>\" % self.__class__.__name__)<EOL>", "docstring": "Set the state of ourself from a serialized state.\n\nSee also: _deSerializeExtraData", "id": "f17680:c1:m42"}
{"signature": "def _handleSDRClassifierMultiStep(self, patternNZ,<EOL>inputTSRecordIdx,<EOL>rawInput):", "body": "inferenceArgs = self.getInferenceArgs()<EOL>predictedFieldName = inferenceArgs.get('<STR_LIT>', None)<EOL>if predictedFieldName is None:<EOL><INDENT>raise ValueError(<EOL>\"<STR_LIT>\"<EOL>)<EOL><DEDENT>self._predictedFieldName = predictedFieldName<EOL>classifier = self._getClassifierRegion()<EOL>if not self._hasCL or classifier is None:<EOL><INDENT>return {}<EOL><DEDENT>sensor = self._getSensorRegion()<EOL>minLikelihoodThreshold = self._minLikelihoodThreshold<EOL>maxPredictionsPerStep = self._maxPredictionsPerStep<EOL>needLearning = self.isLearningEnabled()<EOL>inferences = {}<EOL>if self._classifierInputEncoder is None:<EOL><INDENT>if predictedFieldName is None:<EOL><INDENT>raise RuntimeError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>\"<STR_LIT>\")<EOL><DEDENT>encoderList = sensor.getSelf().encoder.getEncoderList()<EOL>self._numFields = len(encoderList)<EOL>fieldNames = sensor.getSelf().encoder.getScalarNames()<EOL>if predictedFieldName in fieldNames:<EOL><INDENT>self._predictedFieldIdx = fieldNames.index(predictedFieldName)<EOL><DEDENT>else:<EOL><INDENT>self._predictedFieldIdx = None<EOL><DEDENT>if sensor.getSelf().disabledEncoder is not None:<EOL><INDENT>encoderList = sensor.getSelf().disabledEncoder.getEncoderList()<EOL><DEDENT>else:<EOL><INDENT>encoderList = []<EOL><DEDENT>if len(encoderList) >= <NUM_LIT:1>:<EOL><INDENT>fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()<EOL>self._classifierInputEncoder = encoderList[fieldNames.index(<EOL>predictedFieldName)]<EOL><DEDENT>else:<EOL><INDENT>encoderList = sensor.getSelf().encoder.getEncoderList()<EOL>self._classifierInputEncoder = encoderList[self._predictedFieldIdx]<EOL><DEDENT><DEDENT>if not predictedFieldName in rawInput:<EOL><INDENT>raise ValueError(\"<STR_LIT>\"<EOL>\"<STR_LIT>\"<EOL>% predictedFieldName)<EOL><DEDENT>absoluteValue = rawInput[predictedFieldName]<EOL>bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[<NUM_LIT:0>]<EOL>if isinstance(self._classifierInputEncoder, DeltaEncoder):<EOL><INDENT>if not hasattr(self,\"<STR_LIT>\"):<EOL><INDENT>self._ms_prevVal = absoluteValue<EOL><DEDENT>prevValue = self._ms_prevVal<EOL>self._ms_prevVal = absoluteValue<EOL>actualValue = absoluteValue - prevValue<EOL><DEDENT>else:<EOL><INDENT>actualValue = absoluteValue<EOL><DEDENT>if isinstance(actualValue, float) and math.isnan(actualValue):<EOL><INDENT>actualValue = SENTINEL_VALUE_FOR_MISSING_DATA<EOL><DEDENT>classifier.setParameter('<STR_LIT>', True)<EOL>classifier.setParameter('<STR_LIT>', needLearning)<EOL>classificationIn = {'<STR_LIT>': bucketIdx,<EOL>'<STR_LIT>': actualValue}<EOL>if inputTSRecordIdx is not None:<EOL><INDENT>recordNum = inputTSRecordIdx<EOL><DEDENT>else:<EOL><INDENT>recordNum = self.__numRunCalls<EOL><DEDENT>clResults = classifier.getSelf().customCompute(recordNum=recordNum,<EOL>patternNZ=patternNZ,<EOL>classification=classificationIn)<EOL>predictionSteps = classifier.getParameter('<STR_LIT>')<EOL>predictionSteps = [int(x) for x in predictionSteps.split('<STR_LIT:U+002C>')]<EOL>inferences[InferenceElement.multiStepPredictions] = dict()<EOL>inferences[InferenceElement.multiStepBestPredictions] = dict()<EOL>inferences[InferenceElement.multiStepBucketLikelihoods] = dict()<EOL>for steps in predictionSteps:<EOL><INDENT>likelihoodsVec = clResults[steps]<EOL>bucketValues = clResults['<STR_LIT>']<EOL>likelihoodsDict = dict()<EOL>bestActValue = None<EOL>bestProb = None<EOL>for (actValue, prob) in zip(bucketValues, likelihoodsVec):<EOL><INDENT>if actValue in likelihoodsDict:<EOL><INDENT>likelihoodsDict[actValue] += prob<EOL><DEDENT>else:<EOL><INDENT>likelihoodsDict[actValue] = prob<EOL><DEDENT>if bestProb is None or likelihoodsDict[actValue] > bestProb:<EOL><INDENT>bestProb = likelihoodsDict[actValue]<EOL>bestActValue = actValue<EOL><DEDENT><DEDENT>likelihoodsDict = HTMPredictionModel._removeUnlikelyPredictions(<EOL>likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)<EOL>bucketLikelihood = {}<EOL>for k in likelihoodsDict.keys():<EOL><INDENT>bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[<NUM_LIT:0>]] = (<EOL>likelihoodsDict[k])<EOL><DEDENT>if isinstance(self._classifierInputEncoder, DeltaEncoder):<EOL><INDENT>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._ms_predHistories = dict()<EOL><DEDENT>predHistories = self._ms_predHistories<EOL>if not steps in predHistories:<EOL><INDENT>predHistories[steps] = deque()<EOL><DEDENT>predHistory = predHistories[steps]<EOL>sumDelta = sum(predHistory)<EOL>offsetDict = dict()<EOL>for (k, v) in likelihoodsDict.iteritems():<EOL><INDENT>if k is not None:<EOL><INDENT>offsetDict[absoluteValue+float(k)+sumDelta] = v<EOL><DEDENT><DEDENT>bucketLikelihoodOffset = {}<EOL>for k in offsetDict.keys():<EOL><INDENT>bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[<NUM_LIT:0>]] = (<EOL>offsetDict[k])<EOL><DEDENT>if bestActValue is not None:<EOL><INDENT>predHistory.append(bestActValue)<EOL><DEDENT>if len(predHistory) >= steps:<EOL><INDENT>predHistory.popleft()<EOL><DEDENT>if len(offsetDict)><NUM_LIT:0>:<EOL><INDENT>inferences[InferenceElement.multiStepPredictions][steps] = offsetDict<EOL>inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffset<EOL><DEDENT>else:<EOL><INDENT>inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict<EOL>inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood<EOL><DEDENT>if bestActValue is None:<EOL><INDENT>inferences[InferenceElement.multiStepBestPredictions][steps] = None<EOL><DEDENT>else:<EOL><INDENT>inferences[InferenceElement.multiStepBestPredictions][steps] = (<EOL>absoluteValue + sumDelta + bestActValue)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>inferences[InferenceElement.multiStepPredictions][steps] = (<EOL>likelihoodsDict)<EOL>inferences[InferenceElement.multiStepBestPredictions][steps] = (<EOL>bestActValue)<EOL>inferences[InferenceElement.multiStepBucketLikelihoods][steps] = (<EOL>bucketLikelihood)<EOL><DEDENT><DEDENT>return inferences<EOL>", "docstring": "Handle the CLA Classifier compute logic when implementing multi-step\n        prediction. This is where the patternNZ is associated with one of the\n        other fields from the dataset 0 to N steps in the future. This method is\n        used by each type of network (encoder only, SP only, SP +TM) to handle the\n        compute logic through the CLA Classifier. It fills in the inference dict with\n        the results of the compute.\n\n        Parameters:\n        -------------------------------------------------------------------\n        patternNZ: The input to the CLA Classifier as a list of active input indices\n        inputTSRecordIdx: The index of the record as computed from the timestamp\n                      and aggregation interval. This normally increments by 1\n                      each time unless there are missing records. If there is no\n                      aggregation interval or timestamp in the data, this will be\n                      None.\n        rawInput:   The raw input to the sensor, as a dict.", "id": "f17680:c1:m27"}
{"signature": "def _getDataSource(self):", "body": "return self._getSensorRegion().getSelf().dataSource<EOL>", "docstring": "Returns: data source that we installed in sensor region", "id": "f17680:c1:m39"}
{"signature": "def __init__(self, net, statsCollectors):", "body": "self.net = net<EOL>self.statsCollectors = statsCollectors<EOL>return<EOL>", "docstring": "net:          The CLA Network instance\nstatsCollectors:\n              Sequence of 0 or more CLAStatistic-based instances", "id": "f17680:c0:m0"}
{"signature": "def _getClassifierOnlyEncoder(self):", "body": "return  self._getSensorRegion().getSelf().disabledEncoder<EOL>", "docstring": "Returns:  sensor region's encoder that is sent only to the classifier,\n            not to the bottom of the network", "id": "f17680:c1:m38"}
{"signature": "def __createHTMNetwork(self, sensorParams, spEnable, spParams, tmEnable,<EOL>tmParams, clEnable, clParams, anomalyParams):", "body": "<EOL>n = Network()<EOL>n.addRegion(\"<STR_LIT>\", \"<STR_LIT>\", json.dumps(dict(verbosity=sensorParams['<STR_LIT>'])))<EOL>sensor = n.regions['<STR_LIT>'].getSelf()<EOL>enabledEncoders = copy.deepcopy(sensorParams['<STR_LIT>'])<EOL>for name, params in enabledEncoders.items():<EOL><INDENT>if params is not None:<EOL><INDENT>classifierOnly = params.pop('<STR_LIT>', False)<EOL>if classifierOnly:<EOL><INDENT>enabledEncoders.pop(name)<EOL><DEDENT><DEDENT><DEDENT>disabledEncoders = copy.deepcopy(sensorParams['<STR_LIT>'])<EOL>for name, params in disabledEncoders.items():<EOL><INDENT>if params is None:<EOL><INDENT>disabledEncoders.pop(name)<EOL><DEDENT>else:<EOL><INDENT>classifierOnly = params.pop('<STR_LIT>', False)<EOL>if not classifierOnly:<EOL><INDENT>disabledEncoders.pop(name)<EOL><DEDENT><DEDENT><DEDENT>encoder = MultiEncoder(enabledEncoders)<EOL>sensor.encoder = encoder<EOL>sensor.disabledEncoder = MultiEncoder(disabledEncoders)<EOL>sensor.dataSource = DataBuffer()<EOL>prevRegion = \"<STR_LIT>\"<EOL>prevRegionWidth = encoder.getWidth()<EOL>if spEnable:<EOL><INDENT>spParams = spParams.copy()<EOL>spParams['<STR_LIT>'] = prevRegionWidth<EOL>self.__logger.debug(\"<STR_LIT>\" % spParams)<EOL>n.addRegion(\"<STR_LIT>\", \"<STR_LIT>\", json.dumps(spParams))<EOL>n.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>n.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", srcOutput=\"<STR_LIT>\",<EOL>destInput=\"<STR_LIT>\")<EOL>n.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", srcOutput=\"<STR_LIT>\",<EOL>destInput=\"<STR_LIT>\")<EOL>n.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", srcOutput=\"<STR_LIT>\",<EOL>destInput=\"<STR_LIT>\")<EOL>prevRegion = \"<STR_LIT>\"<EOL>prevRegionWidth = spParams['<STR_LIT>']<EOL><DEDENT>if tmEnable:<EOL><INDENT>tmParams = tmParams.copy()<EOL>if prevRegion == '<STR_LIT>':<EOL><INDENT>tmParams['<STR_LIT>'] = tmParams['<STR_LIT>'] = prevRegionWidth<EOL><DEDENT>else:<EOL><INDENT>assert tmParams['<STR_LIT>'] == prevRegionWidth<EOL>tmParams['<STR_LIT>'] = tmParams['<STR_LIT>']<EOL><DEDENT>self.__logger.debug(\"<STR_LIT>\" % tmParams)<EOL>n.addRegion(\"<STR_LIT>\", \"<STR_LIT>\", json.dumps(tmParams))<EOL>n.link(prevRegion, \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL>if prevRegion != \"<STR_LIT>\":<EOL><INDENT>n.link(\"<STR_LIT>\", prevRegion, \"<STR_LIT>\", \"<STR_LIT>\", srcOutput=\"<STR_LIT>\",<EOL>destInput=\"<STR_LIT>\")<EOL><DEDENT>else:<EOL><INDENT>n.link(\"<STR_LIT>\", prevRegion, \"<STR_LIT>\", \"<STR_LIT>\", srcOutput=\"<STR_LIT>\",<EOL>destInput=\"<STR_LIT>\")<EOL><DEDENT>n.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", srcOutput=\"<STR_LIT>\",<EOL>destInput=\"<STR_LIT>\")<EOL>prevRegion = \"<STR_LIT>\"<EOL>prevRegionWidth = tmParams['<STR_LIT>']<EOL><DEDENT>if clEnable and clParams is not None:<EOL><INDENT>clParams = clParams.copy()<EOL>clRegionName = clParams.pop('<STR_LIT>')<EOL>self.__logger.debug(\"<STR_LIT>\" % (clRegionName,<EOL>clParams))<EOL>n.addRegion(\"<STR_LIT>\", \"<STR_LIT>\" % str(clRegionName), json.dumps(clParams))<EOL>if str(clRegionName) == \"<STR_LIT>\":<EOL><INDENT>n.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", srcOutput=\"<STR_LIT>\",<EOL>destInput=\"<STR_LIT>\")<EOL>n.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", srcOutput=\"<STR_LIT>\",<EOL>destInput=\"<STR_LIT>\")<EOL><DEDENT>n.link(\"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\", srcOutput=\"<STR_LIT>\",<EOL>destInput=\"<STR_LIT>\")<EOL>n.link(prevRegion, \"<STR_LIT>\", \"<STR_LIT>\", \"<STR_LIT>\")<EOL><DEDENT>if self.getInferenceType() == InferenceType.TemporalAnomaly:<EOL><INDENT>anomalyClParams = dict(<EOL>trainRecords=anomalyParams.get('<STR_LIT>', None),<EOL>cacheSize=anomalyParams.get('<STR_LIT>', None)<EOL>)<EOL>self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tmEnable)<EOL><DEDENT>n.initialize()<EOL>return NetworkInfo(net=n, statsCollectors=[])<EOL>", "docstring": "Create a CLA network and return it.\n\n        description:  HTMPredictionModel description dictionary (TODO: define schema)\n        Returns:      NetworkInfo instance;", "id": "f17680:c1:m40"}
{"signature": "def _getSensorRegion(self):", "body": "return self._netInfo.net.regions['<STR_LIT>']<EOL>", "docstring": "Returns reference to the network's Sensor region", "id": "f17680:c1:m34"}
{"signature": "def _getClassifierRegion(self):", "body": "if (self._netInfo.net is not None and<EOL>\"<STR_LIT>\" in self._netInfo.net.regions):<EOL><INDENT>return self._netInfo.net.regions[\"<STR_LIT>\"]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>", "docstring": "Returns reference to the network's Classifier region", "id": "f17680:c1:m35"}
{"signature": "def __init__(self,<EOL>sensorParams={},<EOL>inferenceType=InferenceType.TemporalNextStep,<EOL>spEnable=True,<EOL>spParams={},<EOL>trainSPNetOnlyIfRequested=False,<EOL>tmEnable=True,<EOL>tmParams={},<EOL>clEnable=True,<EOL>clParams={},<EOL>anomalyParams={},<EOL>minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,<EOL>maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP,<EOL>network=None,<EOL>baseProto=None):", "body": "if not inferenceType in self.__supportedInferenceKindSet:<EOL><INDENT>raise ValueError(\"<STR_LIT>\".format(self.__class__, inferenceType))<EOL><DEDENT>if baseProto is None:<EOL><INDENT>super(HTMPredictionModel, self).__init__(inferenceType)<EOL><DEDENT>else:<EOL><INDENT>super(HTMPredictionModel, self).__init__(proto=baseProto)<EOL><DEDENT>self.__restoringFromState = False<EOL>self.__restoringFromV1 = False<EOL>self.__logger = initLogger(self)<EOL>self.__logger.debug(\"<STR_LIT>\" % self.__myClassName)<EOL>self._minLikelihoodThreshold = minLikelihoodThreshold<EOL>self._maxPredictionsPerStep = maxPredictionsPerStep<EOL>self.__spLearningEnabled = bool(spEnable)<EOL>self.__tpLearningEnabled = bool(tmEnable)<EOL>if not InferenceType.isTemporal(self.getInferenceType())or self.getInferenceType() == InferenceType.NontemporalMultiStep:<EOL><INDENT>tmEnable = False<EOL><DEDENT>self._netInfo = None<EOL>self._hasSP = spEnable<EOL>self._hasTP = tmEnable<EOL>self._hasCL = clEnable<EOL>self._classifierInputEncoder = None<EOL>self._predictedFieldIdx = None<EOL>self._predictedFieldName = None<EOL>self._numFields = None<EOL>if network is not None:<EOL><INDENT>self._netInfo = NetworkInfo(net=network, statsCollectors=[])<EOL><DEDENT>else:<EOL><INDENT>self._netInfo = self.__createHTMNetwork(<EOL>sensorParams, spEnable, spParams, tmEnable, tmParams, clEnable,<EOL>clParams, anomalyParams)<EOL><DEDENT>if self.getInferenceType() == InferenceType.NontemporalAnomaly:<EOL><INDENT>self._getSPRegion().setParameter(\"<STR_LIT>\", True)<EOL><DEDENT>if self.getInferenceType() == InferenceType.TemporalAnomaly:<EOL><INDENT>self._getTPRegion().setParameter(\"<STR_LIT>\", True)<EOL><DEDENT>self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested<EOL>self.__numRunCalls = <NUM_LIT:0><EOL>self.__finishedLearning = False<EOL>self.__logger.debug(\"<STR_LIT>\" % self.__class__.__name__)<EOL>self._input = None<EOL>return<EOL>", "docstring": ":param network: if not None, the deserialized nupic.engine.Network instance\n                to use instead of creating a new Network\n:param baseProto: if not None, capnp ModelProto message reader for\n                  deserializing; supersedes inferenceType", "id": "f17680:c1:m0"}
{"signature": "def getRuntimeStats(self):", "body": "ret = {\"<STR_LIT>\" : self.__numRunCalls}<EOL>temporalStats = dict()<EOL>if self._hasTP:<EOL><INDENT>for stat in self._netInfo.statsCollectors:<EOL><INDENT>sdict = stat.getStats()<EOL>temporalStats.update(sdict)<EOL><DEDENT><DEDENT>ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats<EOL>return ret<EOL>", "docstring": "Only returns data for a stat called ``numRunCalls``.\n:return:", "id": "f17680:c1:m29"}
{"signature": "@classmethod<EOL><INDENT>def read(cls, proto):<DEDENT>", "body": "obj = object.__new__(cls)<EOL>super(HTMPredictionModel, obj).__init__(proto=proto.modelBase)<EOL>obj._minLikelihoodThreshold = round(proto.minLikelihoodThreshold,<EOL>EPSILON_ROUND)<EOL>obj._maxPredictionsPerStep = proto.maxPredictionsPerStep<EOL>network = Network.read(proto.network)<EOL>obj._hasSP = (\"<STR_LIT>\" in network.regions)<EOL>obj._hasTP = (\"<STR_LIT>\" in network.regions)<EOL>obj._hasCL = (\"<STR_LIT>\" in network.regions)<EOL>obj._netInfo = NetworkInfo(net=network, statsCollectors=[])<EOL>obj.__spLearningEnabled = bool(proto.spLearningEnabled)<EOL>obj.__tpLearningEnabled = bool(proto.tpLearningEnabled)<EOL>obj.__numRunCalls = proto.numRunCalls<EOL>obj._classifierInputEncoder = None<EOL>if proto.predictedFieldIdx.which() == \"<STR_LIT:none>\":<EOL><INDENT>obj._predictedFieldIdx = None<EOL><DEDENT>else:<EOL><INDENT>obj._predictedFieldIdx = proto.predictedFieldIdx.value<EOL><DEDENT>if proto.predictedFieldName.which() == \"<STR_LIT:none>\":<EOL><INDENT>obj._predictedFieldName = None<EOL><DEDENT>else:<EOL><INDENT>obj._predictedFieldName = proto.predictedFieldName.value<EOL><DEDENT>obj._numFields = proto.numFields<EOL>if proto.numFields.which() == \"<STR_LIT:none>\":<EOL><INDENT>obj._numFields = None<EOL><DEDENT>else:<EOL><INDENT>obj._numFields = proto.numFields.value<EOL><DEDENT>obj.__trainSPNetOnlyIfRequested = proto.trainSPNetOnlyIfRequested<EOL>obj.__finishedLearning = proto.finishedLearning<EOL>obj._input = None<EOL>sensor = network.regions['<STR_LIT>'].getSelf()<EOL>sensor.dataSource = DataBuffer()<EOL>network.initialize()<EOL>obj.__logger = initLogger(obj)<EOL>obj.__logger.debug(\"<STR_LIT>\" % obj.__myClassName)<EOL>obj.__restoringFromState = False<EOL>obj.__restoringFromV1 = False<EOL>return obj<EOL>", "docstring": ":param proto: capnp HTMPredictionModelProto message reader", "id": "f17680:c1:m45"}
{"signature": "def _deSerializeExtraData(self, extraDataDir):", "body": "assert self.__restoringFromState<EOL>assert (self._netInfo.net is None), \"<STR_LIT>\"<EOL>stateDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)<EOL>self.__logger.debug(<EOL>\"<STR_LIT>\", self)<EOL>self._netInfo.net = Network(stateDir)<EOL>self.__logger.debug(<EOL>\"<STR_LIT>\", self)<EOL>self._netInfo.net.initialize()<EOL>if self.getInferenceType() == InferenceType.TemporalAnomaly:<EOL><INDENT>classifierType = self._getAnomalyClassifier().getSelf().__class__.__name__<EOL>if classifierType is '<STR_LIT>':<EOL><INDENT>anomalyClParams = dict(<EOL>trainRecords=self._classifier_helper._autoDetectWaitRecords,<EOL>cacheSize=self._classifier_helper._history_length,<EOL>)<EOL>spEnable = (self._getSPRegion() is not None)<EOL>tmEnable = True<EOL>knnRegion = self._getAnomalyClassifier().getSelf()<EOL>self._addAnomalyClassifierRegion(self._netInfo.net, anomalyClParams,<EOL>spEnable, tmEnable)<EOL>self._getAnomalyClassifier().getSelf()._iteration = self.__numRunCalls<EOL>self._getAnomalyClassifier().getSelf()._recordsCache = (<EOL>self._classifier_helper.saved_states)<EOL>self._getAnomalyClassifier().getSelf().saved_categories = (<EOL>self._classifier_helper.saved_categories)<EOL>self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegion<EOL>self._getTPRegion().setParameter('<STR_LIT>', True)<EOL>del self._classifier_helper<EOL>self._netInfo.net.initialize()<EOL><DEDENT><DEDENT>self.__restoringFromState = False<EOL>self.__logger.debug(\"<STR_LIT>\", self)<EOL>return<EOL>", "docstring": "[virtual method override] This method is called during deserialization\n        (after __setstate__) with an external directory path that can be used to\n        bypass pickle for loading large binary states.\n\n        extraDataDir:\n                      Model's extra data directory path", "id": "f17680:c1:m47"}
{"signature": "@requireAnomalyModel<EOL><INDENT>def setAnomalyParameter(self, param, value):<DEDENT>", "body": "self._getAnomalyClassifier().setParameter(param, value)<EOL>", "docstring": "Set a parameter of the anomaly classifier within this model.\n\n:param param: (string) name of parameter to set\n:param value: (object) value to set", "id": "f17680:c1:m9"}
{"signature": "def __init__(self, experimentDir, label, inferenceType,<EOL>fields, metricNames=None, checkpointSource=None):", "body": "<EOL>self.__experimentDir = experimentDir<EOL>self.__inferenceType = inferenceType<EOL>self.__inputFieldsMeta = tuple(copy.deepcopy(fields))<EOL>self.__numInputFields = len(self.__inputFieldsMeta)<EOL>self.__label = label<EOL>if metricNames is not None:<EOL><INDENT>metricNames.sort()<EOL><DEDENT>self.__metricNames = metricNames<EOL>self.__outputFieldsMeta = []<EOL>self._rawInputNames = []<EOL>self.__datasetPath = None<EOL>self.__dataset = None<EOL>self.__checkpointCache = None<EOL>if checkpointSource is not None:<EOL><INDENT>checkpointSource.seek(<NUM_LIT:0>)<EOL>self.__checkpointCache = io.StringIO()<EOL>shutil.copyfileobj(checkpointSource, self.__checkpointCache)<EOL><DEDENT>return<EOL>", "docstring": "Constructor\n\n        experimentDir:\n                      experiment directory path that contains description.py\n\n        label:        A label string to incorporate into the filename.\n\n\n        inferenceElements:\n\n\n        inferenceType:\n                      An constant from opf_utils.InferenceType for the\n                      requested prediction writer\n\n        fields:       a non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo\n                      representing fields that will be emitted to this prediction\n                      writer\n\n        metricNames:  OPTIONAL - A list of metric names that well be emiited by this\n                      prediction writer\n\n        checkpointSource:\n                      If not None, a File-like object containing the\n                      previously-checkpointed predictions for setting the initial\n                      contents of this PredictionOutputStream.  Will be copied\n                      before returning, if needed.", "id": "f17681:c5:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def close(self):<DEDENT>", "body": "", "docstring": "Closes the writer (e.g., close the underlying file)", "id": "f17681:c2:m0"}
{"signature": "def checkpoint(self, checkpointSink, maxRows):", "body": "checkpointSink.truncate()<EOL>if self.__dataset is None:<EOL><INDENT>if self.__checkpointCache is not None:<EOL><INDENT>self.__checkpointCache.seek(<NUM_LIT:0>)<EOL>shutil.copyfileobj(self.__checkpointCache, checkpointSink)<EOL>checkpointSink.flush()<EOL>return<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT><DEDENT>self.__dataset.flush()<EOL>totalDataRows = self.__dataset.getDataRowCount()<EOL>if totalDataRows == <NUM_LIT:0>:<EOL><INDENT>return<EOL><DEDENT>reader = FileRecordStream(self.__datasetPath, missingValues=[])<EOL>writer = csv.writer(checkpointSink)<EOL>writer.writerow(reader.getFieldNames())<EOL>numToWrite = min(maxRows, totalDataRows)<EOL>numRowsToSkip = totalDataRows - numToWrite<EOL>for i in range(numRowsToSkip):<EOL><INDENT>next(reader)<EOL><DEDENT>numWritten = <NUM_LIT:0><EOL>while True:<EOL><INDENT>row = reader.getNextRecord()<EOL>if row is None:<EOL><INDENT>break;<EOL><DEDENT>row =  [str(element) for element in row]<EOL>writer.writerow(row)<EOL>numWritten +=<NUM_LIT:1><EOL><DEDENT>assert numWritten == numToWrite,\"<STR_LIT>\" % (numWritten, numToWrite)<EOL>checkpointSink.flush()<EOL>return<EOL>", "docstring": "[virtual method override] Save a checkpoint of the prediction output\n        stream. The checkpoint comprises up to maxRows of the most recent inference\n        records.\n\n        Parameters:\n        ----------------------------------------------------------------------\n        checkpointSink:     A File-like object where predictions checkpoint data, if\n                            any, will be stored.\n        maxRows:            Maximum number of most recent inference rows\n                            to checkpoint.", "id": "f17681:c5:m7"}
{"signature": "@abstractmethod<EOL><INDENT>def emitPeriodicMetrics(self, metrics):<DEDENT>", "body": "", "docstring": "Emits periodic metrics to stdout in JSON.\n\n        :param metrics: A list of metrics as returned by\n              :meth:`nupic.frameworks.opf.opf_task_driver.OPFTaskDriver.getMetrics`.", "id": "f17681:c0:m0"}
{"signature": "@abstractmethod<EOL><INDENT>def next(self):<DEDENT>", "body": "", "docstring": ":returns:     The next record from the dataset.  The returned record object\n              is of the same structure as returned by\n              :meth:`nupic.data.record_stream.RecordStreamIface.getNextRecord`.\n              Returns ``None`` if the next record is not available yet.\n\n:raises: (StopIteration) if a hard \"end of file\" has been reached\n              and no more records will be forthcoming.", "id": "f17681:c1:m1"}
{"signature": "@abstractmethod<EOL><INDENT>def emitFinalMetrics(self, metrics):<DEDENT>", "body": "", "docstring": "Emits final metrics.\n\n        .. note:: the intention is that the final metrics may go to a different\n                  place (e.g., csv file) versus :meth:`emitPeriodicMetrics`\n                  (e.g., stdout)\n\n        :param metrics: A list of metrics as returned by\n              :meth:`nupic.frameworks.opf.opf_task_driver.OPFTaskDriver.getMetrics`.", "id": "f17681:c0:m1"}
{"signature": "def close(self):", "body": "if self.__dataset:<EOL><INDENT>self.__dataset.close()<EOL><DEDENT>self.__dataset = None<EOL>return<EOL>", "docstring": "[virtual method override] Closes the writer (e.g., close the underlying\n        file)", "id": "f17681:c5:m3"}
{"signature": "@abstractmethod<EOL><INDENT>def writeRecords(self, modelResults, progressCB=None):<DEDENT>", "body": "", "docstring": "Same as :meth:`writeRecord`, but emits multiple rows in one shot.\n\n:param modelResults: (list) of\n       :class:`nupic.frameworks.opf.opf_utils.ModelResult` objects, each\n       represents one record.\n:param progressCB: (func) optional callback method that will be called after\n       each batch of records is written.", "id": "f17683:c0:m2"}
